summaryrefslogtreecommitdiff
path: root/scripts/lib/abi/git:
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-12-16 19:34:09 +1200
committerLinus Torvalds <torvalds@linux-foundation.org>2025-12-16 19:34:09 +1200
commit53ec4a79ff4b36d9711cfe030eeebc36afbc51dd (patch)
tree861e0e24084acba34a3cb19bce5c764933974745 /scripts/lib/abi/git:
parent115fada16b5a9a5ee371ad656e56419fe0e63cfc (diff)
parent95d7a890e4b03e198836d49d699408fd1867cb55 (diff)
Merge tag 'v6.19-rc1-ksmbd-server-fixes' of git://git.samba.org/ksmbd
Pull smb server fixes from Steve French: - Fix set xattr name validation - Fix session refcount leak - Minor cleanup - smbdirect (RDMA) fixes: improve receive completion, and connect * tag 'v6.19-rc1-ksmbd-server-fixes' of git://git.samba.org/ksmbd: ksmbd: fix buffer validation by including null terminator size in EA length ksmbd: Fix refcount leak when invalid session is found on session lookup ksmbd: remove redundant DACL check in smb_check_perm_dacl ksmbd: convert comma to semicolon smb: server: defer the initial recv completion logic to smb_direct_negotiate_recv_work() smb: server: initialize recv_io->cqe.done = recv_done just once smb: smbdirect: introduce smbdirect_socket.connect.{lock,work}
Diffstat (limited to 'scripts/lib/abi/git:')
0 files changed, 0 insertions, 0 deletions
ble/sysfs-class-backlight?id=a479ebb269bc0c4d286f0413b92f92808e053b79&id2=62ada17a6217a50fbd1b23f10899890f56effc97'>Documentation/ABI/stable/sysfs-class-backlight7
-rw-r--r--Documentation/ABI/stable/sysfs-driver-mlxreg-io98
-rw-r--r--Documentation/ABI/testing/configfs-tsm-report (renamed from Documentation/ABI/testing/configfs-tsm)0
-rw-r--r--Documentation/ABI/testing/debugfs-alienware-wmi64
-rw-r--r--Documentation/ABI/testing/debugfs-pcie-ptm70
-rw-r--r--Documentation/ABI/testing/debugfs-scmi-raw91
-rw-r--r--Documentation/ABI/testing/debugfs-turris-mox-rwtm14
-rw-r--r--Documentation/ABI/testing/sysfs-bus-cxl4
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci-devices-aer (renamed from Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats)44
-rw-r--r--Documentation/ABI/testing/sysfs-bus-wmi2
-rw-r--r--Documentation/ABI/testing/sysfs-class-led6
-rw-r--r--Documentation/ABI/testing/sysfs-class-power56
-rw-r--r--Documentation/ABI/testing/sysfs-class-power-gaokun27
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu64
-rw-r--r--Documentation/ABI/testing/sysfs-devices-virtual-misc-tdx_guest63
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd4
-rw-r--r--Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon28
-rw-r--r--Documentation/ABI/testing/sysfs-driver-qat_ras8
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs81
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-acpi21
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm9
-rw-r--r--Documentation/ABI/testing/sysfs-fs-erofs8
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs67
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-hardlockup_count7
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-damon6
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-mempolicy-weighted-interleave35
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-numa16
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-rcu_stall_count6
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-reboot10
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-slab96
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-softlockup_count7
-rw-r--r--Documentation/ABI/testing/sysfs-platform-alienware-wmi14
-rw-r--r--Documentation/ABI/testing/sysfs-platform-oxp25
-rw-r--r--Documentation/Makefile9
-rw-r--r--Documentation/PCI/controller/index.rst10
-rw-r--r--Documentation/PCI/controller/rcar-pcie-firmware.rst32
-rw-r--r--Documentation/PCI/endpoint/pci-nvme-function.rst2
-rw-r--r--Documentation/PCI/index.rst1
-rw-r--r--Documentation/PCI/pcieaer-howto.rst17
-rw-r--r--Documentation/RCU/listRCU.rst10
-rw-r--r--Documentation/RCU/whatisRCU.rst3
-rw-r--r--Documentation/admin-guide/LSM/ipe.rst69
-rw-r--r--Documentation/admin-guide/README.rst2
-rw-r--r--Documentation/admin-guide/blockdev/index.rst1
-rw-r--r--Documentation/admin-guide/blockdev/zoned_loop.rst169
-rw-r--r--Documentation/admin-guide/blockdev/zram.rst58
-rw-r--r--Documentation/admin-guide/bug-hunting.rst2
-rw-r--r--Documentation/admin-guide/cgroup-v1/cgroups.rst2
-rw-r--r--Documentation/admin-guide/cgroup-v1/cpusets.rst2
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst114
-rw-r--r--Documentation/admin-guide/gpio/gpio-aggregator.rst107
-rw-r--r--Documentation/admin-guide/hw-vuln/index.rst3
-rw-r--r--Documentation/admin-guide/hw-vuln/indirect-target-selection.rst168
-rw-r--r--Documentation/admin-guide/hw-vuln/old_microcode.rst21
-rw-r--r--Documentation/admin-guide/hw-vuln/rsb.rst268
-rw-r--r--Documentation/admin-guide/kdump/kdump.rst32
-rw-r--r--Documentation/admin-guide/kdump/vmcoreinfo.rst4
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt87
-rw-r--r--Documentation/admin-guide/laptops/alienware-wmi.rst127
-rw-r--r--Documentation/admin-guide/laptops/index.rst1
-rw-r--r--Documentation/admin-guide/media/c3-isp.dot26
-rw-r--r--Documentation/admin-guide/media/c3-isp.rst101
-rw-r--r--Documentation/admin-guide/media/mgb4.rst9
-rw-r--r--Documentation/admin-guide/media/pci-cardlist.rst1
-rw-r--r--Documentation/admin-guide/media/v4l-drivers.rst1
-rw-r--r--Documentation/admin-guide/mm/damon/index.rst11
-rw-r--r--Documentation/admin-guide/mm/damon/usage.rst12
-rw-r--r--Documentation/admin-guide/mm/index.rst1
-rw-r--r--Documentation/admin-guide/mm/kho.rst115
-rw-r--r--Documentation/admin-guide/mm/multigen_lru.rst5
-rw-r--r--Documentation/admin-guide/mm/pagemap.rst1
-rw-r--r--Documentation/admin-guide/namespaces/resource-control.rst24
-rw-r--r--Documentation/admin-guide/pm/cpufreq.rst8
-rw-r--r--Documentation/admin-guide/pm/intel_idle.rst21
-rw-r--r--Documentation/admin-guide/pm/intel_pstate.rst104
-rw-r--r--Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst10
-rw-r--r--Documentation/admin-guide/quickly-build-trimmed-linux.rst4
-rw-r--r--Documentation/admin-guide/reporting-issues.rst6
-rw-r--r--Documentation/admin-guide/sysctl/vm.rst38
-rw-r--r--Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst4
-rw-r--r--Documentation/admin-guide/xfs.rst48
-rw-r--r--Documentation/arch/arm64/cpu-feature-registers.rst13
-rw-r--r--Documentation/arch/arm64/silicon-errata.rst2
-rw-r--r--Documentation/arch/arm64/sme.rst8
-rw-r--r--Documentation/arch/openrisc/openrisc_port.rst18
-rw-r--r--Documentation/arch/powerpc/htm.rst104
-rw-r--r--Documentation/arch/powerpc/kvm-nested.rst40
-rw-r--r--Documentation/arch/riscv/hwprobe.rst2
-rw-r--r--Documentation/arch/x86/amd-debugging.rst368
-rw-r--r--Documentation/arch/x86/amd_hsmp.rst30
-rw-r--r--Documentation/arch/x86/cpuinfo.rst77
-rw-r--r--Documentation/arch/x86/index.rst2
-rw-r--r--Documentation/arch/x86/resume.svg4
-rw-r--r--Documentation/arch/x86/suspend.svg4
-rw-r--r--Documentation/arch/x86/x86_64/5level-paging.rst9
-rw-r--r--Documentation/arch/x86/x86_64/fsgs.rst2
-rw-r--r--Documentation/bpf/bpf_devel_QA.rst8
-rw-r--r--Documentation/bpf/bpf_iterators.rst117
-rw-r--r--Documentation/bpf/kfuncs.rst17
-rw-r--r--Documentation/conf.py164
-rw-r--r--Documentation/core-api/dma-api.rst71
-rw-r--r--Documentation/core-api/folio_queue.rst3
-rw-r--r--Documentation/core-api/genericirq.rst2
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/irq/concepts.rst27
-rw-r--r--Documentation/core-api/irq/irq-domain.rst203
-rw-r--r--Documentation/core-api/kho/bindings/kho.yaml43
-rw-r--r--Documentation/core-api/kho/bindings/memblock/memblock.yaml39
-rw-r--r--Documentation/core-api/kho/bindings/memblock/reserve-mem.yaml40
-rw-r--r--Documentation/core-api/kho/bindings/sub-fdt.yaml27
-rw-r--r--Documentation/core-api/kho/concepts.rst74
-rw-r--r--Documentation/core-api/kho/fdt.rst80
-rw-r--r--Documentation/core-api/kho/index.rst13
-rw-r--r--Documentation/core-api/printk-formats.rst35
-rw-r--r--Documentation/dev-tools/kunit/run_wrapper.rst2
-rw-r--r--Documentation/dev-tools/kunit/usage.rst38
-rw-r--r--Documentation/devicetree/bindings/arm/altera.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/altera/socfpga-clk-manager.yaml102
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic.yaml26
-rw-r--r--Documentation/devicetree/bindings/arm/atmel,sama5d2-secumod.yaml49
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-sysregs.txt25
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml234
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,imx51-m4if.yaml41
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/m4if.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/tigerp.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.yaml61
-rw-r--r--Documentation/devicetree/bindings/arm/intel,socfpga.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek.yaml11
-rw-r--r--Documentation/devicetree/bindings/arm/psci.yaml30
-rw-r--r--Documentation/devicetree/bindings/arm/qcom.yaml14
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.yaml37
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip/pmu.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml8
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/stm32.yaml9
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.yaml25
-rw-r--r--Documentation/devicetree/bindings/arm/tegra.yaml19
-rw-r--r--Documentation/devicetree/bindings/arm/ti/k3.yaml32
-rw-r--r--Documentation/devicetree/bindings/arm/vt8500.yaml10
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-dm816.txt21
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-st.txt35
-rw-r--r--Documentation/devicetree/bindings/ata/apm,xgene-ahci.yaml58
-rw-r--r--Documentation/devicetree/bindings/ata/apm-xgene.txt77
-rw-r--r--Documentation/devicetree/bindings/ata/arasan,cf-spear1340.yaml70
-rw-r--r--Documentation/devicetree/bindings/ata/cavium,ebt3000-compact-flash.yaml59
-rw-r--r--Documentation/devicetree/bindings/ata/cavium-compact-flash.txt30
-rw-r--r--Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml1
-rw-r--r--Documentation/devicetree/bindings/ata/marvell,orion-sata.yaml83
-rw-r--r--Documentation/devicetree/bindings/ata/marvell.txt22
-rw-r--r--Documentation/devicetree/bindings/ata/pata-arasan.txt37
-rw-r--r--Documentation/devicetree/bindings/ata/rockchip,dwc-ahci.yaml3
-rw-r--r--Documentation/devicetree/bindings/ata/st,ahci.yaml72
-rw-r--r--Documentation/devicetree/bindings/ata/ti,dm816-ahci.yaml43
-rw-r--r--Documentation/devicetree/bindings/bus/microsoft,vmbus.yaml16
-rw-r--r--Documentation/devicetree/bindings/bus/nvidia,tegra210-aconnect.yaml1
-rw-r--r--Documentation/devicetree/bindings/cache/andestech,ax45mp-cache.yaml20
-rw-r--r--Documentation/devicetree/bindings/cache/marvell,feroceon-cache.txt16
-rw-r--r--Documentation/devicetree/bindings/cache/marvell,kirkwood-cache.yaml45
-rw-r--r--Documentation/devicetree/bindings/cache/marvell,tauros2-cache.txt17
-rw-r--r--Documentation/devicetree/bindings/cache/marvell,tauros2-cache.yaml39
-rw-r--r--Documentation/devicetree/bindings/cache/qcom,llcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/cache/sifive,ccache0.yaml44
-rw-r--r--Documentation/devicetree/bindings/clock/allwinner,sun8i-a83t-de2-clk.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/altr_socfpga.txt30
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.txt31
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.yaml47
-rw-r--r--Documentation/devicetree/bindings/clock/fsl,vf610-ccm.yaml58
-rw-r--r--Documentation/devicetree/bindings/clock/maxim,max77686.txt114
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,videocc.yaml20
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,rzv2h-cpg.yaml5
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml69
-rw-r--r--Documentation/devicetree/bindings/clock/sophgo,cv1800-clk.yaml16
-rw-r--r--Documentation/devicetree/bindings/clock/sophgo,sg2044-clk.yaml99
-rw-r--r--Documentation/devicetree/bindings/clock/spacemit,k1-pll.yaml50
-rw-r--r--Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt71
-rw-r--r--Documentation/devicetree/bindings/clock/thead,th1520-clk-ap.yaml17
-rw-r--r--Documentation/devicetree/bindings/clock/vf610-clock.txt41
-rw-r--r--Documentation/devicetree/bindings/counter/fsl,ftm-quaddec.yaml36
-rw-r--r--Documentation/devicetree/bindings/counter/ftm-quaddec.txt18
-rw-r--r--Documentation/devicetree/bindings/cpu/cpu-topology.txt553
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt250
-rw-r--r--Documentation/devicetree/bindings/crypto/amd,ccp-seattle-v1a.yaml38
-rw-r--r--Documentation/devicetree/bindings/crypto/amd-ccp.txt17
-rw-r--r--Documentation/devicetree/bindings/crypto/artpec6-crypto.txt16
-rw-r--r--Documentation/devicetree/bindings/crypto/axis,artpec6-crypto.yaml39
-rw-r--r--Documentation/devicetree/bindings/crypto/brcm,spu-crypto.txt22
-rw-r--r--Documentation/devicetree/bindings/crypto/brcm,spum-crypto.yaml44
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl,sec-v4.0-mon.yaml5
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml10
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-sec6.txt157
-rw-r--r--Documentation/devicetree/bindings/crypto/hisilicon,hip06-sec.yaml134
-rw-r--r--Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt67
-rw-r--r--Documentation/devicetree/bindings/crypto/img,hash-accelerator.yaml69
-rw-r--r--Documentation/devicetree/bindings/crypto/img-hash.txt27
-rw-r--r--Documentation/devicetree/bindings/crypto/marvell,orion-crypto.yaml133
-rw-r--r--Documentation/devicetree/bindings/crypto/marvell-cesa.txt44
-rw-r--r--Documentation/devicetree/bindings/crypto/mediatek-crypto.txt25
-rw-r--r--Documentation/devicetree/bindings/crypto/mv_cesa.txt32
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom-qce.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml18
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,dsi.yaml67
-rw-r--r--Documentation/devicetree/bindings/display/fsl,tcon.txt17
-rw-r--r--Documentation/devicetree/bindings/display/fsl,vf610-tcon.yaml43
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx-display-subsystem.yaml36
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx-parallel-display.yaml74
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx6q-ipu.yaml97
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx6q-ldb.yaml193
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx6qp-pre.yaml55
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx6qp-prg.yaml54
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt160
-rw-r--r--Documentation/devicetree/bindings/display/imx/ldb.txt146
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,color.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,merge.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,mt8195-hdmi-ddc.yaml41
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,mt8195-hdmi.yaml151
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,padding.yaml10
-rw-r--r--Documentation/devicetree/bindings/display/msm/dp-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/msm/hdmi.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/msm/mdp4.yaml9
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml12
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml181
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sar2130p-mdss.yaml439
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sc7280-dpu.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sm8350-mdss.yaml13
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,td4320.yaml65
-rw-r--r--Documentation/devicetree/bindings/display/panel/himax,hx8279.yaml75
-rw-r--r--Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt37801.yaml69
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/truly,nt35597-2K-display.yaml97
-rw-r--r--Documentation/devicetree/bindings/display/panel/visionox,g2647fb105.yaml79
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/cdn-dp-rockchip.txt74
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml25
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml20
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,rk3399-cdn-dp.yaml170
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/sitronix,st7571.yaml73
-rw-r--r--Documentation/devicetree/bindings/display/truly,nt35597.txt59
-rw-r--r--Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.txt44
-rw-r--r--Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.yaml90
-rw-r--r--Documentation/devicetree/bindings/example-schema.yaml15
-rw-r--r--Documentation/devicetree/bindings/firmware/google,gs101-acpm-ipc.yaml35
-rw-r--r--Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt57
-rw-r--r--Documentation/devicetree/bindings/firmware/intel,stratix10-svc.yaml93
-rw-r--r--Documentation/devicetree/bindings/firmware/nxp,imx95-scmi.yaml23
-rw-r--r--Documentation/devicetree/bindings/fpga/intel,stratix10-soc-fpga-mgr.yaml36
-rw-r--r--Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt18
-rw-r--r--Documentation/devicetree/bindings/gpio/atmel,at91rm9200-gpio.yaml16
-rw-r--r--Documentation/devicetree/bindings/gpio/blaize,blzp1600-gpio.yaml77
-rw-r--r--Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml22
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mxs.yaml70
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml4
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-vf610.yaml7
-rw-r--r--Documentation/devicetree/bindings/gpio/maxim,max77759-gpio.yaml44
-rw-r--r--Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.yaml3
-rw-r--r--Documentation/devicetree/bindings/gpio/nxp,pcf8575.yaml26
-rw-r--r--Documentation/devicetree/bindings/gpio/realtek,otto-gpio.yaml8
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,em-gio.yaml20
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml24
-rw-r--r--Documentation/devicetree/bindings/gpio/sifive,gpio.yaml6
-rw-r--r--Documentation/devicetree/bindings/gpio/spacemit,k1-gpio.yaml96
-rw-r--r--Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml24
-rw-r--r--Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml48
-rw-r--r--Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml1
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml4
-rw-r--r--Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml90
-rw-r--r--Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml82
-rw-r--r--Documentation/devicetree/bindings/hwinfo/via,vt8500-scc-id.yaml37
-rw-r--r--Documentation/devicetree/bindings/hwmon/pmbus/adi,lt3074.yaml50
-rw-r--r--Documentation/devicetree/bindings/hwmon/pmbus/mps,mpq8785.yaml74
-rw-r--r--Documentation/devicetree/bindings/hwmon/sophgo,sg2042-hwmon-mcu.yaml6
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,amc6821.yaml18
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml5
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,tmp102.yaml4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-wmt.txt24
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,riic.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml12
-rw-r--r--Documentation/devicetree/bindings/i2c/wm,wm8505-i2c.yaml47
-rw-r--r--Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml45
-rw-r--r--Documentation/devicetree/bindings/iio/adc/qcom,spmi-rradc.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/dlg,da7280.txt108
-rw-r--r--Documentation/devicetree/bindings/input/dlg,da7280.yaml248
-rw-r--r--Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml9
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt37
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.yaml54
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt25
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.yaml49
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/altr,msi-controller.yaml (renamed from Documentation/devicetree/bindings/pci/altr,msi-controller.yaml)2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt27
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.yaml46
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,nvic.txt36
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,nvic.yaml61
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt38
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.yaml61
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.txt25
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.yaml46
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2500-scu-ic.yaml48
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2xxx-scu-ic.txt23
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt131
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.yaml162
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt55
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.yaml81
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-mx.txt18
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-pic.txt25
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-pic.yaml50
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/chrp,open-pic.yaml63
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/cirrus,clps711x-intc.txt41
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/cirrus,ep7209-intc.yaml71
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/cnxt,cx92755-ic.yaml47
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/csky,apb-intc.txt62
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/csky,apb-intc.yaml54
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/csky,mpintc.txt52
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/csky,mpintc.yaml43
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/digicolor-ic.txt21
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/econet,en751221-intc.yaml78
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ezchip,nps400-ic.txt17
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ezchip,nps400-ic.yaml34
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/faraday,ftintc010.txt25
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/faraday,ftintc010.yaml51
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,tzic.yaml48
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt30
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.yaml47
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.txt105
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.yaml79
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/jcore,aic.txt26
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/jcore,aic.yaml43
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt18
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.yaml43
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,ap806-gicp.yaml50
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,ap806-sei.yaml58
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,armada-8k-pic.txt25
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,armada-8k-pic.yaml52
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,cp110-icu.yaml98
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,gicp.txt27
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt112
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt42
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.yaml54
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,orion-bridge-intc.yaml52
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,orion-intc.txt48
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,sei.txt36
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/microchip,pic32-evic.txt67
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/microchip,pic32mzda-evic.yaml60
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt41
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.yaml82
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/open-pic.txt97
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/opencores,or1k-pic.txt23
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/opencores,or1k-pic.yaml38
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/openrisc,ompic.txt22
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/openrisc,ompic.yaml45
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qca,ar7100-cpu-intc.yaml61
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qca,ar7100-misc-intc.yaml52
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qca,ath79-cpu-intc.txt44
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt45
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,arc700-intc.txt24
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,arc700-intc.yaml42
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt46
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.yaml48
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,archs-intc.txt22
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,archs-intc.yaml48
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.txt43
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.yaml64
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,spear300-shirq.yaml67
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt44
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800-irqc.yaml49
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800.txt14
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-mswi.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,cp-intc.txt27
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,cp-intc.yaml50
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.txt36
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.yaml63
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,omap-intc-irq.txt28
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,omap-intc-irq.yaml52
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,omap2-intc.txt27
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu.txt31
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu.yaml55
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/via,vt8500-intc.txt16
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/via,vt8500-intc.yaml76
-rw-r--r--Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml4
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/ti,lp8864.yaml80
-rw-r--r--Documentation/devicetree/bindings/leds/ti,tps61310.yaml120
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml170
-rw-r--r--Documentation/devicetree/bindings/mailbox/sophgo,cv1800b-mailbox.yaml60
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,c3-isp.yaml88
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,c3-mipi-adapter.yaml111
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,c3-mipi-csi2.yaml127
-rw-r--r--Documentation/devicetree/bindings/media/cec/nvidia,tegra114-cec.yaml14
-rw-r--r--Documentation/devicetree/bindings/media/fsl,imx-capture-subsystem.yaml37
-rw-r--r--Documentation/devicetree/bindings/media/fsl,imx6-mipi-csi2.yaml143
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ad5820.txt28
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adi,ad5820.yaml56
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adi,adp1653.txt (renamed from Documentation/devicetree/bindings/media/i2c/adp1653.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adi,adv7180.yaml (renamed from Documentation/devicetree/bindings/media/i2c/adv7180.yaml)2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adi,adv7343.txt (renamed from Documentation/devicetree/bindings/media/i2c/adv7343.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adi,adv748x.yaml (renamed from Documentation/devicetree/bindings/media/i2c/adv748x.yaml)2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adi,adv7604.yaml (renamed from Documentation/devicetree/bindings/media/i2c/adv7604.yaml)2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/aptina,mt9v032.txt (renamed from Documentation/devicetree/bindings/media/i2c/mt9v032.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/maxim,max2175.txt (renamed from Documentation/devicetree/bindings/media/i2c/max2175.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/micron,mt9m111.txt (renamed from Documentation/devicetree/bindings/media/i2c/mt9m111.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/nxp,tda1997x.txt (renamed from Documentation/devicetree/bindings/media/i2c/tda1997x.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/onnn,mt9m001.txt (renamed from Documentation/devicetree/bindings/media/i2c/mt9m001.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov02e10.yaml152
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov2640.txt (renamed from Documentation/devicetree/bindings/media/i2c/ov2640.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov2659.txt (renamed from Documentation/devicetree/bindings/media/i2c/ov2659.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov7670.txt (renamed from Documentation/devicetree/bindings/media/i2c/ov7670.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov7740.txt (renamed from Documentation/devicetree/bindings/media/i2c/ov7740.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov9650.txt (renamed from Documentation/devicetree/bindings/media/i2c/ov9650.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx219.yaml (renamed from Documentation/devicetree/bindings/media/i2c/imx219.yaml)7
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/st,vd55g1.yaml133
-rw-r--r--Documentation/devicetree/bindings/media/i2c/st,vd56g3.yaml139
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ti,ds90ub953.yaml77
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ti,ds90ub960.yaml16
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ti,ths8200.txt (renamed from Documentation/devicetree/bindings/media/i2c/ths8200.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ti,tvp514x.txt (renamed from Documentation/devicetree/bindings/media/i2c/tvp514x.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ti,tvp5150.txt (renamed from Documentation/devicetree/bindings/media/i2c/tvp5150.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ti,tvp7002.txt (renamed from Documentation/devicetree/bindings/media/i2c/tvp7002.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/i2c/toshiba,tc358743.txt (renamed from Documentation/devicetree/bindings/media/i2c/tc358743.txt)0
-rw-r--r--Documentation/devicetree/bindings/media/imx.txt53
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,mdp3-fg.yaml8
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,mdp3-hdr.yaml8
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,mdp3-rsz.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,mdp3-stitch.yaml8
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,mdp3-tcc.yaml8
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,mdp3-tdshp.yaml8
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml8
-rw-r--r--Documentation/devicetree/bindings/media/qcom,msm8953-camss.yaml15
-rw-r--r--Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml20
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml7
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml20
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml20
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml20
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sm8250-camss.yaml30
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sm8550-iris.yaml39
-rw-r--r--Documentation/devicetree/bindings/media/qcom,x1e80100-camss.yaml367
-rw-r--r--Documentation/devicetree/bindings/media/renesas,fcp.yaml25
-rw-r--r--Documentation/devicetree/bindings/media/renesas,isp.yaml63
-rw-r--r--Documentation/devicetree/bindings/media/renesas,rzg2l-cru.yaml65
-rw-r--r--Documentation/devicetree/bindings/media/renesas,rzg2l-csi2.yaml62
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vsp1.yaml25
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml1
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml1
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/renesas,rzg3e-xspi.yaml135
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/st,stm32mp25-omm.yaml226
-rw-r--r--Documentation/devicetree/bindings/mfd/aspeed,ast2x00-scu.yaml9
-rw-r--r--Documentation/devicetree/bindings/mfd/atmel,at91sam9260-gpbr.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,bcm59056.txt39
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,bcm59056.yaml76
-rw-r--r--Documentation/devicetree/bindings/mfd/iqs62x.yaml177
-rw-r--r--Documentation/devicetree/bindings/mfd/maxim,max77759.yaml99
-rw-r--r--Documentation/devicetree/bindings/mfd/mediatek,mt8195-scpsys.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml6
-rw-r--r--Documentation/devicetree/bindings/mfd/netronix,ntxec.yaml39
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd9571mwv.yaml50
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml10
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd96802-pmic.yaml101
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml26
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml40
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.yaml7
-rw-r--r--Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml202
-rw-r--r--Documentation/devicetree/bindings/mips/cpus.yaml3
-rw-r--r--Documentation/devicetree/bindings/misc/ti,fpc202.yaml94
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml22
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl,esdhc.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml27
-rw-r--r--Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt29
-rw-r--r--Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.yaml66
-rw-r--r--Documentation/devicetree/bindings/mmc/mtk-sd.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml4
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci.txt13
-rw-r--r--Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml7
-rw-r--r--Documentation/devicetree/bindings/mmc/spacemit,sdhci.yaml53
-rw-r--r--Documentation/devicetree/bindings/mmc/vt8500-sdmmc.txt23
-rw-r--r--Documentation/devicetree/bindings/mmc/wm,wm8505-sdhc.yaml66
-rw-r--r--Documentation/devicetree/bindings/mtd/fsl,vf610-nfc.yaml89
-rw-r--r--Documentation/devicetree/bindings/mtd/loongson,ls1b-nand-controller.yaml72
-rw-r--r--Documentation/devicetree/bindings/mtd/qcom,nandc.yaml30
-rw-r--r--Documentation/devicetree/bindings/mtd/vf610-nfc.txt59
-rw-r--r--Documentation/devicetree/bindings/net/aeonsemi,as21xxx.yaml122
-rw-r--r--Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml13
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml17
-rw-r--r--Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml23
-rw-r--r--Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/can/nxp,sja1000.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml171
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml124
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-phy.yaml10
-rw-r--r--Documentation/devicetree/bindings/net/network-class.yaml46
-rw-r--r--Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml203
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml27
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83822.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/vertexcom-mse102x.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/via,vt8500-rhine.yaml41
-rw-r--r--Documentation/devicetree/bindings/net/via-rhine.txt17
-rw-r--r--Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ipq5332-wifi.yaml315
-rw-r--r--Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/wireless/wireless-controller.yaml23
-rw-r--r--Documentation/devicetree/bindings/numa.txt319
-rw-r--r--Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml2
-rw-r--r--Documentation/devicetree/bindings/nvmem/maxim,max77759-nvmem.yaml32
-rw-r--r--Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml4
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml25
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v1.yaml18
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v2-qcom-adreno.yaml96
-rw-r--r--Documentation/devicetree/bindings/pci/apple,pcie.yaml33
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml87
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml16
-rw-r--r--Documentation/devicetree/bindings/pci/intel,keembay-pcie-ep.yaml26
-rw-r--r--Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml38
-rw-r--r--Documentation/devicetree/bindings/pci/marvell,armada8k-pcie.yaml100
-rw-r--r--Documentation/devicetree/bindings/pci/marvell,kirkwood-pcie.yaml277
-rw-r--r--Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml56
-rw-r--r--Documentation/devicetree/bindings/pci/mvebu-pci.txt310
-rw-r--r--Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie-ep.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/pci-armada8k.txt48
-rw-r--r--Documentation/devicetree/bindings/pci/pci-ep.yaml68
-rw-r--r--Documentation/devicetree/bindings/pci/pci-iommu.txt171
-rw-r--r--Documentation/devicetree/bindings/pci/pci-msi.txt220
-rw-r--r--Documentation/devicetree/bindings/pci/pci.txt84
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sa8775p.yaml10
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml9
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sc8180x.yaml10
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml9
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sm8250.yaml9
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml9
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie.yaml65
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci-ep.yaml34
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci-host.yaml46
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-dw-pcie-common.yaml10
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml60
-rw-r--r--Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/snps,dw-pcie-common.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml4
-rw-r--r--Documentation/devicetree/bindings/pci/v3,v360epc-pci.yaml100
-rw-r--r--Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt76
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml116
-rw-r--r--Documentation/devicetree/bindings/pinctrl/amlogic,pinctrl-a4.yaml8
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx7ulp-iomuxc1.yaml99
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx7ulp-pinctrl.txt53
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,vf610-iomuxc.yaml83
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,vf610-pinctrl.txt41
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt65xx-pinctrl.yaml95
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt6779-pinctrl.yaml5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt6893-pinctrl.yaml193
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml56
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml70
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt8192-pinctrl.yaml78
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt8196-pinctrl.yaml236
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,qcs615-tlmm.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,qcs8300-tlmm.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.yaml4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/spacemit,k1-pinctrl.yaml18
-rw-r--r--Documentation/devicetree/bindings/power/allwinner,sun50i-h6-prcm-ppu.yaml42
-rw-r--r--Documentation/devicetree/bindings/power/mediatek,power-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/qcom,rpmpd.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml42
-rw-r--r--Documentation/devicetree/bindings/power/reset/toradex,smarc-ec.yaml52
-rw-r--r--Documentation/devicetree/bindings/power/rockchip,power-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq24190.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq25980.yaml36
-rw-r--r--Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml14
-rw-r--r--Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml18
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max77705.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max8971.yaml68
-rw-r--r--Documentation/devicetree/bindings/power/supply/pegatron,chagall-ec.yaml49
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml2
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/pmc.txt63
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/pmc.yaml152
-rw-r--r--Documentation/devicetree/bindings/pwm/loongson,ls7a-pwm.yaml67
-rw-r--r--Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml1
-rw-r--r--Documentation/devicetree/bindings/pwm/nxp,mc33xs2410.yaml118
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,rzg2l-gpt.yaml378
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml9
-rw-r--r--Documentation/devicetree/bindings/pwm/via,vt8500-pwm.yaml43
-rw-r--r--Documentation/devicetree/bindings/pwm/vt8500-pwm.txt18
-rw-r--r--Documentation/devicetree/bindings/regulator/adi,adp5055-regulator.yaml157
-rw-r--r--Documentation/devicetree/bindings/regulator/brcm,bcm59054.yaml56
-rw-r--r--Documentation/devicetree/bindings/regulator/brcm,bcm59056.yaml51
-rw-r--r--Documentation/devicetree/bindings/regulator/mediatek,mt6357-regulator.yaml12
-rw-r--r--Documentation/devicetree/bindings/regulator/rohm,bd96802-regulator.yaml44
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sm8350-pas.yaml54
-rw-r--r--Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml4
-rw-r--r--Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml22
-rw-r--r--Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml3
-rw-r--r--Documentation/devicetree/bindings/reset/renesas,rzv2h-usb2phy-reset.yaml56
-rw-r--r--Documentation/devicetree/bindings/reset/sophgo,sg2042-reset.yaml7
-rw-r--r--Documentation/devicetree/bindings/reset/thead,th1520-reset.yaml44
-rw-r--r--Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml1
-rw-r--r--Documentation/devicetree/bindings/riscv/sophgo.yaml4
-rw-r--r--Documentation/devicetree/bindings/rng/rockchip,rk3588-rng.yaml5
-rw-r--r--Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml12
-rw-r--r--Documentation/devicetree/bindings/soc/amlogic/amlogic,meson-gx-clk-measure.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/fsl,qman-fqd.yaml4
-rw-r--r--Documentation/devicetree/bindings/soc/google/google,gs101-pmu-intr-gen.yaml35
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/mediatek,mt8183-dvfsrc.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,rpm.yaml15
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml24
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,saw2.yaml3
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml5
-rw-r--r--Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/renesas/renesas.yaml32
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml7
-rw-r--r--Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml15
-rw-r--r--Documentation/devicetree/bindings/soc/sophgo/sophgo,cv1800b-rtc.yaml86
-rw-r--r--Documentation/devicetree/bindings/soc/sophgo/sophgo,sg2044-top-syscon.yaml49
-rw-r--r--Documentation/devicetree/bindings/soc/spacemit/spacemit,k1-syscon.yaml80
-rw-r--r--Documentation/devicetree/bindings/soc/ti/ti,j721e-system-controller.yaml23
-rw-r--r--Documentation/devicetree/bindings/sound/audio-graph-card2.yaml8
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,cs48l32.yaml195
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es8375.yaml71
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es8389.yaml50
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,mqs.yaml10
-rw-r--r--Documentation/devicetree/bindings/sound/loongson,ls1b-ac97.yaml68
-rw-r--r--Documentation/devicetree/bindings/sound/maxim,max98925.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml7
-rw-r--r--Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml7
-rw-r--r--Documentation/devicetree/bindings/sound/mt8195-mt6359.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra186-asrc.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra186-dspk.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-admaif.yaml17
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-adx.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-amx.yaml6
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-dmic.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-mbdrc.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-mixer.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-mvc.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-ope.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-peq.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-sfc.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml108
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,sm8250.yaml3
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml6
-rw-r--r--Documentation/devicetree/bindings/sound/realtek,alc203.yaml36
-rw-r--r--Documentation/devicetree/bindings/sound/richtek,rt9123.yaml56
-rw-r--r--Documentation/devicetree/bindings/sound/richtek,rt9123p.yaml48
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip,rk3576-sai.yaml144
-rw-r--r--Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml19
-rw-r--r--Documentation/devicetree/bindings/spi/fsl,dspi.yaml14
-rw-r--r--Documentation/devicetree/bindings/spi/nuvoton,wpcm450-fiu.yaml5
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml18
-rw-r--r--Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml8
-rw-r--r--Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml43
-rw-r--r--Documentation/devicetree/bindings/spi/samsung,spi.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml23
-rw-r--r--Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml13
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rockchip.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/st,stm32mp25-ospi.yaml1
-rw-r--r--Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml1
-rw-r--r--Documentation/devicetree/bindings/thermal/airoha,en7581-thermal.yaml48
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-tsens.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/altr,timer-1.0.txt18
-rw-r--r--Documentation/devicetree/bindings/timer/altr,timer-1.0.yaml39
-rw-r--r--Documentation/devicetree/bindings/timer/arm,mps2-timer.txt28
-rw-r--r--Documentation/devicetree/bindings/timer/arm,mps2-timer.yaml49
-rw-r--r--Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt29
-rw-r--r--Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.yaml45
-rw-r--r--Documentation/devicetree/bindings/timer/cnxt,cx92755-timer.yaml49
-rw-r--r--Documentation/devicetree/bindings/timer/csky,gx6605s-timer.txt42
-rw-r--r--Documentation/devicetree/bindings/timer/csky,gx6605s-timer.yaml40
-rw-r--r--Documentation/devicetree/bindings/timer/csky,mptimer.txt42
-rw-r--r--Documentation/devicetree/bindings/timer/csky,mptimer.yaml46
-rw-r--r--Documentation/devicetree/bindings/timer/digicolor-timer.txt18
-rw-r--r--Documentation/devicetree/bindings/timer/econet,en751221-timer.yaml80
-rw-r--r--Documentation/devicetree/bindings/timer/ezchip,nps400-timer.yaml45
-rw-r--r--Documentation/devicetree/bindings/timer/ezchip,nps400-timer0.txt17
-rw-r--r--Documentation/devicetree/bindings/timer/ezchip,nps400-timer1.txt15
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,gtm.txt30
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,gtm.yaml83
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,vf610-pit.yaml54
-rw-r--r--Documentation/devicetree/bindings/timer/img,pistachio-gptimer.txt28
-rw-r--r--Documentation/devicetree/bindings/timer/img,pistachio-gptimer.yaml69
-rw-r--r--Documentation/devicetree/bindings/timer/jcore,pit.txt24
-rw-r--r--Documentation/devicetree/bindings/timer/jcore,pit.yaml43
-rw-r--r--Documentation/devicetree/bindings/timer/lsi,zevio-timer.txt33
-rw-r--r--Documentation/devicetree/bindings/timer/lsi,zevio-timer.yaml56
-rw-r--r--Documentation/devicetree/bindings/timer/marvell,armada-370-timer.yaml88
-rw-r--r--Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt44
-rw-r--r--Documentation/devicetree/bindings/timer/marvell,orion-timer.txt16
-rw-r--r--Documentation/devicetree/bindings/timer/marvell,orion-timer.yaml43
-rw-r--r--Documentation/devicetree/bindings/timer/nxp,s32g2-stm.yaml64
-rw-r--r--Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml11
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,ostm.yaml12
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tpu.yaml56
-rw-r--r--Documentation/devicetree/bindings/timer/sifive,clint.yaml1
-rw-r--r--Documentation/devicetree/bindings/timer/snps,arc-timer.txt27
-rw-r--r--Documentation/devicetree/bindings/timer/snps,arc-timer.yaml45
-rw-r--r--Documentation/devicetree/bindings/timer/snps,archs-gfrc.txt14
-rw-r--r--Documentation/devicetree/bindings/timer/snps,archs-gfrc.yaml30
-rw-r--r--Documentation/devicetree/bindings/timer/snps,archs-rtc.txt14
-rw-r--r--Documentation/devicetree/bindings/timer/snps,archs-rtc.yaml30
-rw-r--r--Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.txt17
-rw-r--r--Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.yaml40
-rw-r--r--Documentation/devicetree/bindings/timer/st,spear-timer.txt16
-rw-r--r--Documentation/devicetree/bindings/timer/st,spear-timer.yaml36
-rw-r--r--Documentation/devicetree/bindings/timer/thead,c900-aclint-mtimer.yaml1
-rw-r--r--Documentation/devicetree/bindings/timer/ti,keystone-timer.txt29
-rw-r--r--Documentation/devicetree/bindings/timer/ti,keystone-timer.yaml63
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml6
-rw-r--r--Documentation/devicetree/bindings/ufs/qcom,ufs.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/cypress,hx3.yaml19
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/microchip,usb5744.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/xlnx,usb2.yaml1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml16
-rw-r--r--Documentation/devicetree/bindings/virtio/pci-iommu.yaml10
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl,scu-wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.yaml2
-rw-r--r--Documentation/devicetree/bindings/watchdog/nxp,s32g2-swt.yaml54
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml4
-rw-r--r--Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml11
-rw-r--r--Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/writing-schema.rst11
-rw-r--r--Documentation/devicetree/overlay-notes.rst12
-rw-r--r--Documentation/doc-guide/sphinx.rst14
-rw-r--r--Documentation/driver-api/basics.rst3
-rw-r--r--Documentation/driver-api/coco/index.rst12
-rw-r--r--Documentation/driver-api/coco/measurement-registers.rst12
-rw-r--r--Documentation/driver-api/cxl/access-coordinates.rst91
-rw-r--r--Documentation/driver-api/cxl/allocation/dax.rst60
-rw-r--r--Documentation/driver-api/cxl/allocation/hugepages.rst32
-rw-r--r--Documentation/driver-api/cxl/allocation/page-allocator.rst85
-rw-r--r--Documentation/driver-api/cxl/allocation/reclaim.rst51
-rw-r--r--Documentation/driver-api/cxl/devices/device-types.rst165
-rw-r--r--Documentation/driver-api/cxl/index.rst46
-rw-r--r--Documentation/driver-api/cxl/linux/access-coordinates.rst178
-rw-r--r--Documentation/driver-api/cxl/linux/cxl-driver.rst630
-rw-r--r--Documentation/driver-api/cxl/linux/dax-driver.rst43
-rw-r--r--Documentation/driver-api/cxl/linux/early-boot.rst137
-rw-r--r--Documentation/driver-api/cxl/linux/example-configurations/hb-interleave.rst314
-rw-r--r--Documentation/driver-api/cxl/linux/example-configurations/intra-hb-interleave.rst291
-rw-r--r--Documentation/driver-api/cxl/linux/example-configurations/multi-interleave.rst401
-rw-r--r--Documentation/driver-api/cxl/linux/example-configurations/single-device.rst246
-rw-r--r--Documentation/driver-api/cxl/linux/memory-hotplug.rst78
-rw-r--r--Documentation/driver-api/cxl/linux/overview.rst103
-rw-r--r--Documentation/driver-api/cxl/maturity-map.rst6
-rw-r--r--Documentation/driver-api/cxl/platform/acpi.rst76
-rw-r--r--Documentation/driver-api/cxl/platform/acpi/cedt.rst62
-rw-r--r--Documentation/driver-api/cxl/platform/acpi/dsdt.rst28
-rw-r--r--Documentation/driver-api/cxl/platform/acpi/hmat.rst32
-rw-r--r--Documentation/driver-api/cxl/platform/acpi/slit.rst21
-rw-r--r--Documentation/driver-api/cxl/platform/acpi/srat.rst71
-rw-r--r--Documentation/driver-api/cxl/platform/bios-and-efi.rst262
-rw-r--r--Documentation/driver-api/cxl/platform/cdat.rst118
-rw-r--r--Documentation/driver-api/cxl/platform/example-configs.rst13
-rw-r--r--Documentation/driver-api/cxl/platform/example-configurations/flexible.rst296
-rw-r--r--Documentation/driver-api/cxl/platform/example-configurations/hb-interleave.rst107
-rw-r--r--Documentation/driver-api/cxl/platform/example-configurations/multi-dev-per-hb.rst90
-rw-r--r--Documentation/driver-api/cxl/platform/example-configurations/one-dev-per-hb.rst136
-rw-r--r--Documentation/driver-api/cxl/theory-of-operation.rst (renamed from Documentation/driver-api/cxl/memory-devices.rst)27
-rw-r--r--Documentation/driver-api/dmaengine/provider.rst8
-rw-r--r--Documentation/driver-api/driver-model/devres.rst3
-rw-r--r--Documentation/driver-api/early-userspace/buffer-format.rst34
-rw-r--r--Documentation/driver-api/gpio/index.rst2
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-api/ipmi.rst29
-rw-r--r--Documentation/driver-api/ntb.rst2
-rw-r--r--Documentation/driver-api/thermal/intel_dptf.rst21
-rw-r--r--Documentation/driver-api/usb/usb.rst1
-rw-r--r--Documentation/edac/memory_repair.rst31
-rw-r--r--Documentation/edac/scrub.rst76
-rw-r--r--Documentation/fb/sstfb.rst2
-rw-r--r--Documentation/filesystems/bcachefs/casefolding.rst18
-rw-r--r--Documentation/filesystems/bcachefs/future/idle_work.rst78
-rw-r--r--Documentation/filesystems/bcachefs/index.rst7
-rw-r--r--Documentation/filesystems/debugfs.rst19
-rw-r--r--Documentation/filesystems/erofs.rst1
-rw-r--r--Documentation/filesystems/ext4/atomic_writes.rst225
-rw-r--r--Documentation/filesystems/ext4/overview.rst1
-rw-r--r--Documentation/filesystems/ext4/super.rst20
-rw-r--r--Documentation/filesystems/f2fs.rst52
-rw-r--r--Documentation/filesystems/fscrypt.rst189
-rw-r--r--Documentation/filesystems/fuse-passthrough.rst133
-rw-r--r--Documentation/filesystems/index.rst2
-rw-r--r--Documentation/filesystems/iomap/design.rst16
-rw-r--r--Documentation/filesystems/locking.rst54
-rw-r--r--Documentation/filesystems/mount_api.rst16
-rw-r--r--Documentation/filesystems/netfs_library.rst1013
-rw-r--r--Documentation/filesystems/porting.rst46
-rw-r--r--Documentation/filesystems/relay.rst36
-rw-r--r--Documentation/filesystems/resctrl.rst (renamed from Documentation/arch/x86/resctrl.rst)6
-rw-r--r--Documentation/filesystems/vfs.rst43
-rw-r--r--Documentation/firmware-guide/acpi/dsd/data-node-references.rst26
-rw-r--r--Documentation/firmware-guide/acpi/dsd/graph.rst11
-rw-r--r--Documentation/firmware-guide/acpi/dsd/leds.rst7
-rw-r--r--Documentation/gpu/amdgpu/amd-hardware-list-info.rst23
-rw-r--r--Documentation/gpu/amdgpu/amdgpu-glossary.rst75
-rw-r--r--Documentation/gpu/amdgpu/apu-asic-info-table.csv2
-rw-r--r--Documentation/gpu/amdgpu/debugfs.rst210
-rw-r--r--Documentation/gpu/amdgpu/debugging.rst7
-rw-r--r--Documentation/gpu/amdgpu/display/dc-debug.rst2
-rw-r--r--Documentation/gpu/amdgpu/driver-core.rst81
-rw-r--r--Documentation/gpu/amdgpu/driver-misc.rst17
-rw-r--r--Documentation/gpu/amdgpu/gc/index.rst52
-rw-r--r--Documentation/gpu/amdgpu/gc/mes.rst38
-rw-r--r--Documentation/gpu/amdgpu/index.rst5
-rw-r--r--Documentation/gpu/amdgpu/pipe_and_queue_abstraction.svg1279
-rw-r--r--Documentation/gpu/automated_testing.rst4
-rw-r--r--Documentation/gpu/driver-uapi.rst5
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst15
-rw-r--r--Documentation/gpu/nouveau.rst3
-rw-r--r--Documentation/gpu/nova/core/todo.rst16
-rw-r--r--Documentation/gpu/rfc/i915_scheduler.rst2
-rw-r--r--Documentation/gpu/todo.rst13
-rw-r--r--Documentation/gpu/vgaarbiter.rst6
-rw-r--r--Documentation/gpu/xe/index.rst1
-rw-r--r--Documentation/gpu/xe/xe_configfs.rst10
-rw-r--r--Documentation/gpu/xe/xe_firmware.rst6
-rw-r--r--Documentation/gpu/xe/xe_pcode.rst7
-rw-r--r--Documentation/hid/intel-thc-hid.rst8
-rw-r--r--Documentation/hwmon/acpi_power_meter.rst29
-rw-r--r--Documentation/hwmon/asus_ec_sensors.rst2
-rw-r--r--Documentation/hwmon/ina238.rst15
-rw-r--r--Documentation/hwmon/index.rst5
-rw-r--r--Documentation/hwmon/kbatt.rst60
-rw-r--r--Documentation/hwmon/kfan.rst39
-rw-r--r--Documentation/hwmon/lt3074.rst72
-rw-r--r--Documentation/hwmon/max34440.rst30
-rw-r--r--Documentation/hwmon/max77705.rst39
-rw-r--r--Documentation/hwmon/mpq8785.rst27
-rw-r--r--Documentation/hwmon/oxp-sensors.rst89
-rw-r--r--Documentation/i2c/busses/i2c-parport.rst2
-rw-r--r--Documentation/index.rst2
-rw-r--r--Documentation/input/devices/amijoy.rst125
-rw-r--r--Documentation/kbuild/makefiles.rst4
-rw-r--r--Documentation/kbuild/reproducible-builds.rst17
-rw-r--r--Documentation/leds/index.rst2
-rw-r--r--Documentation/leds/leds-class-multicolor.rst82
-rw-r--r--Documentation/mm/damon/design.rst15
-rw-r--r--Documentation/mm/damon/index.rst6
-rw-r--r--Documentation/netlink/genetlink-c.yaml3
-rw-r--r--Documentation/netlink/genetlink-legacy.yaml3
-rw-r--r--Documentation/netlink/netlink-raw.yaml3
-rw-r--r--Documentation/netlink/specs/devlink.yaml24
-rw-r--r--Documentation/netlink/specs/ethtool.yaml31
-rw-r--r--Documentation/netlink/specs/netdev.yaml12
-rw-r--r--Documentation/netlink/specs/nl80211.yaml68
-rw-r--r--Documentation/netlink/specs/ovpn.yaml367
-rw-r--r--Documentation/netlink/specs/ovs_datapath.yaml10
-rw-r--r--Documentation/netlink/specs/ovs_vport.yaml9
-rw-r--r--Documentation/netlink/specs/rt-addr.yaml (renamed from Documentation/netlink/specs/rt_addr.yaml)24
-rw-r--r--Documentation/netlink/specs/rt-link.yaml (renamed from Documentation/netlink/specs/rt_link.yaml)268
-rw-r--r--Documentation/netlink/specs/rt-neigh.yaml (renamed from Documentation/netlink/specs/rt_neigh.yaml)26
-rw-r--r--Documentation/netlink/specs/rt-route.yaml (renamed from Documentation/netlink/specs/rt_route.yaml)22
-rw-r--r--Documentation/netlink/specs/rt-rule.yaml (renamed from Documentation/netlink/specs/rt_rule.yaml)8
-rw-r--r--Documentation/netlink/specs/tc.yaml524
-rw-r--r--Documentation/networking/arcnet-hardware.rst2
-rw-r--r--Documentation/networking/dccp.rst219
-rw-r--r--Documentation/networking/device_drivers/ethernet/huawei/hinic3.rst137
-rw-r--r--Documentation/networking/device_drivers/ethernet/index.rst2
-rw-r--r--Documentation/networking/device_drivers/ethernet/meta/fbnic.rst60
-rw-r--r--Documentation/networking/device_drivers/ethernet/ti/icssg_prueth.rst56
-rw-r--r--Documentation/networking/devlink/devlink-info.rst4
-rw-r--r--Documentation/networking/devlink/devlink-trap.rst2
-rw-r--r--Documentation/networking/devlink/index.rst1
-rw-r--r--Documentation/networking/devlink/ixgbe.rst171
-rw-r--r--Documentation/networking/devmem.rst150
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--Documentation/networking/ip-sysctl.rst8
-rw-r--r--Documentation/networking/net_cachelines/net_device.rst3
-rw-r--r--Documentation/networking/net_cachelines/snmp.rst2
-rw-r--r--Documentation/networking/netdev-features.rst5
-rw-r--r--Documentation/networking/netdevices.rst75
-rw-r--r--Documentation/networking/netmem.rst23
-rw-r--r--Documentation/networking/rds.rst8
-rw-r--r--Documentation/networking/rxrpc.rst39
-rw-r--r--Documentation/networking/timestamping.rst8
-rw-r--r--Documentation/networking/tproxy.rst4
-rw-r--r--Documentation/networking/xfrm_device.rst10
-rw-r--r--Documentation/power/energy-model.rst8
-rw-r--r--Documentation/power/runtime_pm.rst2
-rw-r--r--Documentation/process/1.Intro.rst12
-rw-r--r--Documentation/process/adding-syscalls.rst84
-rw-r--r--Documentation/process/changes.rst6
-rw-r--r--Documentation/process/debugging/driver_development_debugging_guide.rst2
-rw-r--r--Documentation/process/debugging/gdb-kernel-debugging.rst34
-rw-r--r--Documentation/process/embargoed-hardware-issues.rst1
-rw-r--r--Documentation/rust/coding-guidelines.rst29
-rw-r--r--Documentation/rust/quick-start.rst44
-rw-r--r--Documentation/rust/testing.rst80
-rw-r--r--Documentation/scheduler/sched-ext.rst14
-rw-r--r--Documentation/scheduler/sched-stats.rst2
-rw-r--r--Documentation/scsi/scsi_mid_low_api.rst18
-rw-r--r--Documentation/sphinx/automarkup.py97
-rw-r--r--Documentation/sphinx/kerneldoc.py219
-rw-r--r--Documentation/staging/rpmsg.rst46
-rw-r--r--Documentation/staging/speculation.rst1
-rw-r--r--Documentation/tools/rtla/common_timerlat_description.rst10
-rw-r--r--Documentation/tools/rtla/rtla-timerlat.rst9
-rw-r--r--Documentation/trace/coresight/panic.rst4
-rw-r--r--Documentation/trace/ftrace.rst13
-rw-r--r--Documentation/trace/index.rst98
-rw-r--r--Documentation/trace/tracepoints.rst17
-rw-r--r--Documentation/translations/it_IT/process/changes.rst6
-rw-r--r--Documentation/translations/sp_SP/process/2.Process.rst11
-rw-r--r--Documentation/translations/sp_SP/process/howto.rst10
-rw-r--r--Documentation/translations/sp_SP/process/kernel-docs.rst5
-rw-r--r--Documentation/translations/sp_SP/process/submitting-patches.rst13
-rw-r--r--Documentation/translations/zh_CN/admin-guide/README.rst2
-rw-r--r--Documentation/translations/zh_CN/admin-guide/bug-hunting.rst2
-rw-r--r--Documentation/translations/zh_CN/arch/openrisc/openrisc_port.rst12
-rw-r--r--Documentation/translations/zh_CN/core-api/irq/irq-domain.rst8
-rw-r--r--Documentation/translations/zh_CN/core-api/printk-formats.rst3
-rw-r--r--Documentation/translations/zh_CN/dev-tools/gdb-kernel-debugging.rst34
-rw-r--r--Documentation/translations/zh_CN/devicetree/overlay-notes.rst12
-rw-r--r--Documentation/translations/zh_CN/driver-api/gpio/index.rst2
-rw-r--r--Documentation/translations/zh_CN/how-to.rst459
-rw-r--r--Documentation/translations/zh_CN/index.rst24
-rw-r--r--Documentation/translations/zh_CN/networking/index.rst160
-rw-r--r--Documentation/translations/zh_CN/networking/msg_zerocopy.rst223
-rw-r--r--Documentation/translations/zh_TW/admin-guide/bug-hunting.rst2
-rw-r--r--Documentation/translations/zh_TW/arch/openrisc/openrisc_port.rst12
-rw-r--r--Documentation/translations/zh_TW/dev-tools/gdb-kernel-debugging.rst34
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst19
-rw-r--r--Documentation/userspace-api/media/v4l/meta-formats.rst1
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-c3-isp.rst86
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst128
-rw-r--r--Documentation/userspace-api/mseal.rst2
-rw-r--r--Documentation/userspace-api/netlink/netlink-raw.rst2
-rw-r--r--Documentation/virt/hyperv/vmbus.rst28
-rw-r--r--Documentation/virt/kvm/api.rst794
-rw-r--r--Documentation/virt/kvm/devices/vcpu.rst24
-rw-r--r--Documentation/virt/kvm/x86/index.rst1
-rw-r--r--Documentation/virt/kvm/x86/intel-tdx.rst255
-rw-r--r--Documentation/wmi/devices/alienware-wmi.rst425
-rw-r--r--Documentation/wmi/devices/dell-wmi-ddv.rst46
-rw-r--r--Documentation/wmi/devices/msi-wmi-platform.rst4
-rw-r--r--LICENSES/deprecated/CC0-1.0129
-rw-r--r--MAINTAINERS873
-rw-r--r--Makefile18
-rw-r--r--arch/Kconfig8
-rw-r--r--arch/alpha/include/asm/pgtable.h7
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/alpha/kernel/perf_event.c11
-rw-r--r--arch/arc/include/asm/hugepage.h2
-rw-r--r--arch/arc/include/asm/pgtable-levels.h2
-rw-r--r--arch/arc/include/asm/syscall.h25
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/intc-compact.c5
-rw-r--r--arch/arc/kernel/mcip.c3
-rw-r--r--arch/arc/kernel/perf_event.c6
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/compressed/efi-header.S6
-rw-r--r--arch/arm/boot/dts/allwinner/sun7i-a20-bananapi.dts27
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-a83t.dtsi4
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-neo-air.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h3.dtsi2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-r40.dtsi2
-rw-r--r--arch/arm/boot/dts/amlogic/Makefile1
-rw-r--r--arch/arm/boot/dts/amlogic/meson8-fernsehfee3.dts306
-rw-r--r--arch/arm/boot/dts/amlogic/meson8.dtsi28
-rw-r--r--arch/arm/boot/dts/amlogic/meson8b.dtsi10
-rw-r--r--arch/arm/boot/dts/broadcom/Makefile2
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2166x-common.dtsi7
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2166x-pinctrl.dtsi297
-rw-r--r--arch/arm/boot/dts/broadcom/bcm28155-ap.dts68
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2837-rpi-2-b.dts130
-rw-r--r--arch/arm/boot/dts/broadcom/bcm59056.dtsi91
-rw-r--r--arch/arm/boot/dts/intel/socfpga/Makefile1
-rw-r--r--arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_de10nano.dts95
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-db.dtsi2
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-dir665.dts2
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-mv88f6281gtw-ge.dts2
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood.dtsi2
-rw-r--r--arch/arm/boot/dts/marvell/orion5x.dtsi4
-rw-r--r--arch/arm/boot/dts/mediatek/mt2701-evb.dts1
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts209
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts32
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9263ek.dts2
-rw-r--r--arch/arm/boot/dts/microchip/sama7d65.dtsi346
-rw-r--r--arch/arm/boot/dts/microchip/tny_a9263.dts2
-rw-r--r--arch/arm/boot/dts/microchip/usb_a9260.dts8
-rw-r--r--arch/arm/boot/dts/microchip/usb_a9260_common.dtsi9
-rw-r--r--arch/arm/boot/dts/microchip/usb_a9263.dts4
-rw-r--r--arch/arm/boot/dts/microchip/usb_a9g20.dts17
-rw-r--r--arch/arm/boot/dts/microchip/usb_a9g20_common.dtsi27
-rw-r--r--arch/arm/boot/dts/microchip/usb_a9g20_lpw.dts10
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-common-npcm7xx.dtsi108
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-npcm730-gbs.dts12
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts120
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-npcm750.dtsi65
-rw-r--r--arch/arm/boot/dts/nvidia/Makefile1
-rw-r--r--arch/arm/boot/dts/nvidia/tegra124-apalis-eval.dts5
-rw-r--r--arch/arm/boot/dts/nvidia/tegra124-apalis-v1.2-eval.dts5
-rw-r--r--arch/arm/boot/dts/nvidia/tegra20.dtsi2
-rw-r--r--arch/arm/boot/dts/nvidia/tegra30-apalis-eval.dts5
-rw-r--r--arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1-eval.dts5
-rw-r--r--arch/arm/boot/dts/nvidia/tegra30-asus-tf300tl.dts857
-rw-r--r--arch/arm/boot/dts/nvidia/tegra30.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx25.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx31-lite.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx31.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx35.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx51-digi-connectcore-som.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx51.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-apalis-eval.dts9
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-mccmon6.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qp-prtwd3.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d.dtsi6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7s.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/lpc/lpc32xx.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/ls/Makefile9
-rw-r--r--arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-hdmi.dtso32
-rw-r--r--arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-lvds-tm070jvhg33.dtso47
-rw-r--r--arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-dc44.dtso55
-rw-r--r--arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-fc21.dtso55
-rw-r--r--arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a.dts5
-rw-r--r--arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts6
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts12
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28-btt3.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28-cfa10036.dts6
-rw-r--r--arch/arm/boot/dts/qcom/Makefile1
-rw-r--r--arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts22
-rw-r--r--arch/arm/boot/dts/qcom/msm8926.dtsi11
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts1
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8026-samsung-milletwifi.dts2
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts1
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8064-lg-nexus4-mako.dts359
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8064.dtsi104
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8074-dragonboard.dts4
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts4
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-dempsey.dts1
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-makepeace.dts1
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts1
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8226-samsung-matisse-common.dtsi19
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8226.dtsi164
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8926-htc-memul.dts14
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-superman-lte.dts1
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts1
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8926-motorola-peregrine.dts2
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8926-samsung-matisselte.dts5
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8960.dtsi101
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974.dtsi21
-rw-r--r--arch/arm/boot/dts/qcom/qcom-sdx55.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/qcom-sdx65.dtsi2
-rw-r--r--arch/arm/boot/dts/renesas/Makefile1
-rw-r--r--arch/arm/boot/dts/renesas/r9a06g032-rzn1d400-db.dts124
-rw-r--r--arch/arm/boot/dts/renesas/r9a06g032-rzn1d400-eb.dts244
-rw-r--r--arch/arm/boot/dts/renesas/r9a06g032.dtsi46
-rw-r--r--arch/arm/boot/dts/rockchip/rk3036-kylin.dts18
-rw-r--r--arch/arm/boot/dts/rockchip/rk3036.dtsi40
-rw-r--r--arch/arm/boot/dts/rockchip/rk3066a-marsboard.dts37
-rw-r--r--arch/arm/boot/dts/rockchip/rk3128.dtsi8
-rw-r--r--arch/arm/boot/dts/rockchip/rk3188.dtsi1
-rw-r--r--arch/arm/boot/dts/rockchip/rk322x.dtsi1
-rw-r--r--arch/arm/boot/dts/rockchip/rk3288.dtsi5
-rw-r--r--arch/arm/boot/dts/rockchip/rv1108.dtsi1
-rw-r--r--arch/arm/boot/dts/rockchip/rv1126-sonoff-ihost.dtsi14
-rw-r--r--arch/arm/boot/dts/samsung/s5pv210-aries.dtsi2
-rw-r--r--arch/arm/boot/dts/st/Makefile4
-rw-r--r--arch/arm/boot/dts/st/spear1310-evb.dts8
-rw-r--r--arch/arm/boot/dts/st/spear1340-evb.dts8
-rw-r--r--arch/arm/boot/dts/st/spear13xx.dtsi8
-rw-r--r--arch/arm/boot/dts/st/spear300-evb.dts6
-rw-r--r--arch/arm/boot/dts/st/spear310-evb.dts6
-rw-r--r--arch/arm/boot/dts/st/spear320-evb.dts6
-rw-r--r--arch/arm/boot/dts/st/spear320-hmi.dts6
-rw-r--r--arch/arm/boot/dts/st/spear3xx.dtsi6
-rw-r--r--arch/arm/boot/dts/st/spear600.dtsi8
-rw-r--r--arch/arm/boot/dts/st/stm32f746.dtsi34
-rw-r--r--arch/arm/boot/dts/st/stm32h7-pinctrl.dtsi34
-rw-r--r--arch/arm/boot/dts/st/stm32h743.dtsi8
-rw-r--r--arch/arm/boot/dts/st/stm32h743i-disco.dts2
-rw-r--r--arch/arm/boot/dts/st/stm32h743i-eval.dts2
-rw-r--r--arch/arm/boot/dts/st/stm32h747i-disco.dts136
-rw-r--r--arch/arm/boot/dts/st/stm32h750i-art-pi.dts8
-rw-r--r--arch/arm/boot/dts/st/stm32mp131.dtsi5
-rw-r--r--arch/arm/boot/dts/st/stm32mp133.dtsi2
-rw-r--r--arch/arm/boot/dts/st/stm32mp135f-dk.dts2
-rw-r--r--arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi2
-rw-r--r--arch/arm/boot/dts/st/stm32mp157a-iot-box.dts2
-rw-r--r--arch/arm/boot/dts/st/stm32mp157c-dk2.dts2
-rw-r--r--arch/arm/boot/dts/st/stm32mp157c-ultra-fly-sbc.dts1152
-rw-r--r--arch/arm/boot/dts/st/stm32mp15xx-dhcor-avenger96.dtsi2
-rw-r--r--arch/arm/boot/dts/ti/davinci/da850-evm.dts2
-rw-r--r--arch/arm/boot/dts/ti/omap/am335x-evm.dts2
-rw-r--r--arch/arm/boot/dts/ti/omap/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/ti/omap/omap3.dtsi4
-rw-r--r--arch/arm/boot/dts/ti/omap/omap4-l4.dtsi4
-rw-r--r--arch/arm/boot/dts/ti/omap/omap4-panda-common.dtsi39
-rw-r--r--arch/arm/boot/dts/ti/omap/omap4-panda-es.dts32
-rw-r--r--arch/arm/boot/dts/ti/omap/omap5-l4.dtsi4
-rw-r--r--arch/arm/boot/dts/vt8500/Makefile3
-rw-r--r--arch/arm/boot/dts/vt8500/vt8500.dtsi9
-rw-r--r--arch/arm/boot/dts/vt8500/wm8505.dtsi9
-rw-r--r--arch/arm/boot/dts/vt8500/wm8650.dtsi9
-rw-r--r--arch/arm/boot/dts/vt8500/wm8750.dtsi9
-rw-r--r--arch/arm/boot/dts/vt8500/wm8850.dtsi9
-rw-r--r--arch/arm/boot/dts/vt8500/wm8950-apc-rock.dts21
-rw-r--r--arch/arm/boot/dts/vt8500/wm8950.dtsi11
-rw-r--r--arch/arm/common/sa1111.c12
-rw-r--r--arch/arm/common/scoop.c7
-rw-r--r--arch/arm/configs/at91_dt_defconfig1
-rw-r--r--arch/arm/configs/collie_defconfig1
-rw-r--r--arch/arm/configs/davinci_all_defconfig2
-rw-r--r--arch/arm/configs/dove_defconfig1
-rw-r--r--arch/arm/configs/exynos_defconfig6
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/configs/lpc18xx_defconfig1
-rw-r--r--arch/arm/configs/lpc32xx_defconfig1
-rw-r--r--arch/arm/configs/milbeaut_m10v_defconfig6
-rw-r--r--arch/arm/configs/mmp2_defconfig1
-rw-r--r--arch/arm/configs/multi_v4t_defconfig1
-rw-r--r--arch/arm/configs/multi_v5_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig24
-rw-r--r--arch/arm/configs/mvebu_v5_defconfig1
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig12
-rw-r--r--arch/arm/configs/orion5x_defconfig1
-rw-r--r--arch/arm/configs/pxa168_defconfig1
-rw-r--r--arch/arm/configs/pxa910_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig8
-rw-r--r--arch/arm/configs/s5pv210_defconfig1
-rw-r--r--arch/arm/configs/sama7_defconfig2
-rw-r--r--arch/arm/configs/shmobile_defconfig28
-rw-r--r--arch/arm/configs/spitz_defconfig3
-rw-r--r--arch/arm/configs/stm32_defconfig1
-rw-r--r--arch/arm/configs/wpcm450_defconfig2
-rw-r--r--arch/arm/crypto/Kconfig59
-rw-r--r--arch/arm/crypto/Makefile20
-rw-r--r--arch/arm/crypto/aes-ce-glue.c104
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c118
-rw-r--r--arch/arm/crypto/blake2b-neon-glue.c21
-rw-r--r--arch/arm/crypto/chacha-glue.c352
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c104
-rw-r--r--arch/arm/crypto/poly1305-glue.c274
-rw-r--r--arch/arm/crypto/sha1-ce-glue.c36
-rw-r--r--arch/arm/crypto/sha1.h14
-rw-r--r--arch/arm/crypto/sha1_glue.c33
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c39
-rw-r--r--arch/arm/crypto/sha2-ce-glue.c109
-rw-r--r--arch/arm/crypto/sha256_glue.c117
-rw-r--r--arch/arm/crypto/sha256_glue.h15
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c92
-rw-r--r--arch/arm/crypto/sha512-glue.c36
-rw-r--r--arch/arm/crypto/sha512-neon-glue.c43
-rw-r--r--arch/arm/crypto/sha512.h6
-rw-r--r--arch/arm/include/asm/pgtable-3level.h1
-rw-r--r--arch/arm/include/asm/pgtable.h1
-rw-r--r--arch/arm/include/asm/simd.h8
-rw-r--r--arch/arm/include/asm/syscall.h37
-rw-r--r--arch/arm/lib/Makefile6
-rw-r--r--arch/arm/lib/crc-t10dif.c (renamed from arch/arm/lib/crc-t10dif-glue.c)6
-rw-r--r--arch/arm/lib/crc32.c (renamed from arch/arm/lib/crc32-glue.c)6
-rw-r--r--arch/arm/lib/crypto/.gitignore3
-rw-r--r--arch/arm/lib/crypto/Kconfig31
-rw-r--r--arch/arm/lib/crypto/Makefile32
-rw-r--r--arch/arm/lib/crypto/blake2s-core.S (renamed from arch/arm/crypto/blake2s-core.S)0
-rw-r--r--arch/arm/lib/crypto/blake2s-glue.c (renamed from arch/arm/crypto/blake2s-glue.c)0
-rw-r--r--arch/arm/lib/crypto/chacha-glue.c138
-rw-r--r--arch/arm/lib/crypto/chacha-neon-core.S (renamed from arch/arm/crypto/chacha-neon-core.S)2
-rw-r--r--arch/arm/lib/crypto/chacha-scalar-core.S (renamed from arch/arm/crypto/chacha-scalar-core.S)5
-rw-r--r--arch/arm/lib/crypto/poly1305-armv4.pl (renamed from arch/arm/crypto/poly1305-armv4.pl)4
-rw-r--r--arch/arm/lib/crypto/poly1305-glue.c80
-rw-r--r--arch/arm/lib/crypto/sha256-armv4.pl (renamed from arch/arm/crypto/sha256-armv4.pl)20
-rw-r--r--arch/arm/lib/crypto/sha256-ce.S (renamed from arch/arm/crypto/sha2-ce-core.S)10
-rw-r--r--arch/arm/lib/crypto/sha256.c64
-rw-r--r--arch/arm/mach-aspeed/Kconfig1
-rw-r--r--arch/arm/mach-davinci/Kconfig7
-rw-r--r--arch/arm/mach-davinci/Makefile1
-rw-r--r--arch/arm/mach-davinci/cputype.h1
-rw-r--r--arch/arm/mach-davinci/da830.c506
-rw-r--r--arch/arm/mach-davinci/da850.c1
-rw-r--r--arch/arm/mach-davinci/da8xx.h2
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c1
-rw-r--r--arch/arm/mach-davinci/irqs.h27
-rw-r--r--arch/arm/mach-davinci/mux.h404
-rw-r--r--arch/arm/mach-davinci/psc.h3
-rw-r--r--arch/arm/mach-exynos/suspend.c5
-rw-r--r--arch/arm/mach-imx/avic.c4
-rw-r--r--arch/arm/mach-imx/gpc.c5
-rw-r--r--arch/arm/mach-imx/tzic.c4
-rw-r--r--arch/arm/mach-omap1/irq.c3
-rw-r--r--arch/arm/mach-omap2/clockdomain.h1
-rw-r--r--arch/arm/mach-omap2/clockdomains33xx_data.c2
-rw-r--r--arch/arm/mach-omap2/cm33xx.c14
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c5
-rw-r--r--arch/arm/mach-omap2/pmic-cpcap.c6
-rw-r--r--arch/arm/mach-pxa/irq.c5
-rw-r--r--arch/arm/mach-s3c/gpio-samsung.c10
-rw-r--r--arch/arm/mach-stm32/board-dt.c1
-rw-r--r--arch/arm/mm/flush.c4
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/plat-orion/gpio.c24
-rw-r--r--arch/arm/probes/uprobes/core.c4
-rw-r--r--arch/arm64/Kconfig66
-rw-r--r--arch/arm64/Kconfig.platforms2
-rw-r--r--arch/arm64/Makefile21
-rw-r--r--arch/arm64/boot/dts/airoha/en7581-evb.dts30
-rw-r--r--arch/arm64/boot/dts/airoha/en7581.dtsi105
-rw-r--r--arch/arm64/boot/dts/allwinner/Makefile5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a133-liontron-h-a133l.dts211
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts12
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts19
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi6
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts38
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts16
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi22
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616-bigtreetech-cb1.dtsi5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616-x96-mate.dts5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi21
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h618-longan-module-3h.dtsi5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero2w.dts5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h618-transpeed-8k618-t.dts5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h618-yuzukihd-chameleon.dts222
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts10
-rw-r--r--arch/arm64/boot/dts/allwinner/sun55i-a523.dtsi639
-rw-r--r--arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts318
-rw-r--r--arch/arm64/boot/dts/allwinner/sun55i-h728-x96qpro+.dts287
-rw-r--r--arch/arm64/boot/dts/allwinner/sun55i-t527-avaota-a1.dts327
-rw-r--r--arch/arm64/boot/dts/amazon/alpine-v2.dtsi2
-rw-r--r--arch/arm64/boot/dts/amazon/alpine-v3.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/Makefile4
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a4-reset.h93
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi133
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a5-reset.h95
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi8
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi5
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-s6-s905x5-bl209.dts42
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-s6.dtsi97
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-s7-s805x3-bp201.dts41
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-s7.dtsi99
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-s7d-s905x5m-bm202.dts41
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-s7d.dtsi99
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-a1.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg.dtsi12
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-a311d-libretech-cc.dts6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s805y-xiaomi-aquaman.dts262
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s805y.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi28
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-s4.dtsi5
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-ac2xx.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-bananapi.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-s905d3-libretech-cc.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1.dtsi1
-rw-r--r--arch/arm64/boot/dts/apple/s5l8960x.dtsi13
-rw-r--r--arch/arm64/boot/dts/apple/s800-0-3.dtsi13
-rw-r--r--arch/arm64/boot/dts/apple/s8001.dtsi13
-rw-r--r--arch/arm64/boot/dts/apple/t6001.dtsi1
-rw-r--r--arch/arm64/boot/dts/apple/t6002.dtsi1
-rw-r--r--arch/arm64/boot/dts/apple/t600x-die0.dtsi57
-rw-r--r--arch/arm64/boot/dts/apple/t7000.dtsi13
-rw-r--r--arch/arm64/boot/dts/apple/t7001.dtsi16
-rw-r--r--arch/arm64/boot/dts/apple/t8010.dtsi13
-rw-r--r--arch/arm64/boot/dts/apple/t8011.dtsi16
-rw-r--r--arch/arm64/boot/dts/apple/t8012.dtsi13
-rw-r--r--arch/arm64/boot/dts/apple/t8015.dtsi32
-rw-r--r--arch/arm64/boot/dts/apple/t8103-j293.dts10
-rw-r--r--arch/arm64/boot/dts/apple/t8103.dtsi58
-rw-r--r--arch/arm64/boot/dts/apple/t8112-j493.dts10
-rw-r--r--arch/arm64/boot/dts/apple/t8112.dtsi57
-rw-r--r--arch/arm64/boot/dts/arm/corstone1000.dtsi1
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8.dtsi1
-rw-r--r--arch/arm64/boot/dts/arm/fvp-base-revc.dts101
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi1
-rw-r--r--arch/arm64/boot/dts/arm/morello.dtsi22
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts1
-rw-r--r--arch/arm64/boot/dts/blaize/blaize-blzp1600-cb2.dts36
-rw-r--r--arch/arm64/boot/dts/blaize/blaize-blzp1600.dtsi12
-rw-r--r--arch/arm64/boot/dts/broadcom/Makefile1
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts8
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2712.dtsi155
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2837-rpi-2-b.dts2
-rw-r--r--arch/arm64/boot/dts/exynos/Makefile3
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7870-a2corelte.dts630
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7870-j6lte.dts613
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7870-on7xelte.dts662
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7870-pinctrl.dtsi1021
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7870.dtsi712
-rw-r--r--arch/arm64/boot/dts/exynos/exynos850.dtsi14
-rw-r--r--arch/arm64/boot/dts/exynos/exynosautov9.dtsi48
-rw-r--r--arch/arm64/boot/dts/exynos/exynosautov920.dtsi536
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101-pixel-common.dtsi4
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile40
-rw-r--r--arch/arm64/boot/dts/freescale/imx-pcie0-ep.dtso (renamed from arch/arm64/boot/dts/freescale/imx8mp-evk-pcie-ep.dtso)6
-rw-r--r--arch/arm64/boot/dts/freescale/imx-pcie1-ep.dtso15
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-apalis-eval.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-apalis-ixora-v1.1.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-apalis-ixora-v1.2.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi70
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dxl-evk.dts12
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dxl-ss-hsio.dtsi25
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dxp-tqma8xdps-mb-smarc-2.dts16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dxp-tqma8xdps.dtsi24
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi18
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi13
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-peb-av-10.dtso2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi31
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi18
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2-common.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi12
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk.dts22
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-libra-rdk-fpsc-lvds-etml1010g3dra.dtso44
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-libra-rdk-fpsc.dts290
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-nitrogen-enc-carrier-board.dts452
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-nitrogen-som.dtsi409
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi28
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phycore-fpsc.dtsi796
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-toradex-smarc-dev.dts304
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-toradex-smarc.dtsi1314
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mp-ras314-imx219.dtso107
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi12
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi36
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-evk.dts20
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi40
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-apalis.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-ss-hsio.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-mek-pcie-ep.dtso22
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-mek.dts45
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-ss-hsio.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-tqma8xqps-mb-smarc-2.dts16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-tqma8xqps.dtsi14
-rw-r--r--arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi29
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-phyboard-nash.dts317
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-phyboard-segin.dts243
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-phycore-som.dtsi165
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352-mba91xxca.dts749
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts8
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts8
-rw-r--r--arch/arm64/boot/dts/freescale/imx94-clock.h193
-rw-r--r--arch/arm64/boot/dts/freescale/imx94-pinfunc.h1570
-rw-r--r--arch/arm64/boot/dts/freescale/imx94-power.h41
-rw-r--r--arch/arm64/boot/dts/freescale/imx94.dtsi1148
-rw-r--r--arch/arm64/boot/dts/freescale/imx943-evk.dts195
-rw-r--r--arch/arm64/boot/dts/freescale/imx943.dtsi148
-rw-r--r--arch/arm64/boot/dts/freescale/imx95-15x15-evk.dts23
-rw-r--r--arch/arm64/boot/dts/freescale/imx95-19x19-evk-sof.dts84
-rw-r--r--arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts30
-rw-r--r--arch/arm64/boot/dts/freescale/imx95-tqma9596sa-mb-smarc-2.dts324
-rw-r--r--arch/arm64/boot/dts/freescale/imx95-tqma9596sa.dtsi698
-rw-r--r--arch/arm64/boot/dts/freescale/imx95.dtsi38
-rw-r--r--arch/arm64/boot/dts/freescale/s32gxxxa-rdb.dtsi5
-rw-r--r--arch/arm64/boot/dts/freescale/tqma8xxs-mb-smarc-2.dtsi194
-rw-r--r--arch/arm64/boot/dts/freescale/tqma8xxs.dtsi768
-rw-r--r--arch/arm64/boot/dts/intel/Makefile1
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex.dtsi6
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi4
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex5_socdk.dts51
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_nand.dts89
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi8
-rw-r--r--arch/arm64/boot/dts/marvell/armada-7040-db.dts1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-db.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/cn9130-db.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6357.dtsi10
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6359.dtsi4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6893-pinfunc.h1356
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4-2g5.dts11
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dts400
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dtsi450
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7988a.dtsi115
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi11
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-ponyta-sku0.dts18
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-ponyta-sku1.dts22
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-ponyta.dtsi49
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie.dtsi45
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188-evb.dts6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188.dtsi345
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi51
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8196-pinfunc.h1574
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8365-evk.dts40
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8390-genio-common.dtsi169
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts127
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts59
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi2
-rw-r--r--arch/arm64/boot/dts/nuvoton/nuvoton-common-npcm8xx.dtsi65
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts115
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi6
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p3509-0000+p3636-0001.dts8
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi162
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts6
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi6
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194.dtsi30
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts6
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi75
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts6
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi9
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234.dtsi22
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile61
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dts21
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-schneider-hmibsc.dts41
-rw-r--r--arch/arm64/boot/dts/qcom/apq8039-t2.dts28
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dts11
-rw-r--r--arch/arm64/boot/dts/qcom/ipq5018-rdp432-c2.dts40
-rw-r--r--arch/arm64/boot/dts/qcom/ipq5018.dtsi246
-rw-r--r--arch/arm64/boot/dts/qcom/ipq5332-rdp441.dts76
-rw-r--r--arch/arm64/boot/dts/qcom/ipq5332.dtsi387
-rw-r--r--arch/arm64/boot/dts/qcom/ipq5424-rdp466.dts91
-rw-r--r--arch/arm64/boot/dts/qcom/ipq5424.dtsi733
-rw-r--r--arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/ipq6018-mp5496.dtsi44
-rw-r--r--arch/arm64/boot/dts/qcom/ipq6018.dtsi37
-rw-r--r--arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi55
-rw-r--r--arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts12
-rw-r--r--arch/arm64/boot/dts/qcom/ipq9574.dtsi146
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-asus-z00l.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-huawei-g7.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-lg-c50.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-lg-m216.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-modem-qdsp6.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-motorola-common.dtsi11
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-mtp.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-gt5-common.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-j5-common.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-wingtech-wt865x8.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi64
-rw-r--r--arch/arm64/boot/dts/qcom/msm8917-xiaomi-riva.dts27
-rw-r--r--arch/arm64/boot/dts/qcom/msm8917.dtsi11
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939-huawei-kiwi.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939.dtsi75
-rw-r--r--arch/arm64/boot/dts/qcom/msm8953.dtsi158
-rw-r--r--arch/arm64/boot/dts/qcom/msm8976.dtsi17
-rw-r--r--arch/arm64/boot/dts/qcom/msm8992-lg-h815.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-oneplus-common.dtsi11
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-oneplus3.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-oneplus3t.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts11
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi21
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-natrium.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-scorpio.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-fxtec-pro1.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-lenovo-miix-630.dts8
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-mtp.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi23
-rw-r--r--arch/arm64/boot/dts/qcom/pm8937.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/qcm2290.dtsi75
-rw-r--r--arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts250
-rw-r--r--arch/arm64/boot/dts/qcom/qcm6490-idp.dts28
-rw-r--r--arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/qcs615.dtsi72
-rw-r--r--arch/arm64/boot/dts/qcom/qcs6490-rb3gen2-industrial-mezzanine.dtso21
-rw-r--r--arch/arm64/boot/dts/qcom/qcs6490-rb3gen2-vision-mezzanine.dtso89
-rw-r--r--arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts287
-rw-r--r--arch/arm64/boot/dts/qcom/qcs8300-pmics.dtsi51
-rw-r--r--arch/arm64/boot/dts/qcom/qcs8300-ride.dts37
-rw-r--r--arch/arm64/boot/dts/qcom/qcs8300.dtsi2057
-rw-r--r--arch/arm64/boot/dts/qcom/qdu1000.dtsi9
-rw-r--r--arch/arm64/boot/dts/qcom/qrb2210-rb1.dts85
-rw-r--r--arch/arm64/boot/dts/qcom/qrb4210-rb2.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/qrb5165-rb5-vision-mezzanine.dtso11
-rw-r--r--arch/arm64/boot/dts/qcom/qrb5165-rb5.dts31
-rw-r--r--arch/arm64/boot/dts/qcom/sa8155p-adp.dts5
-rw-r--r--arch/arm64/boot/dts/qcom/sa8540p-ride.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi138
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p.dtsi1179
-rw-r--r--arch/arm64/boot/dts/qcom/sar2130p.dtsi455
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts8
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-el2.dtso22
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel360.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180.dtsi15
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-rt5682-3mic.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-wcd9385.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280-idp.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi285
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x.dtsi13
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-crd.dts30
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-el2.dtso44
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts22
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-microsoft-arcata.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-microsoft-blackrock.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi58
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp.dtsi673
-rw-r--r--arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm630-sony-xperia-nile.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm630.dtsi20
-rw-r--r--arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts46
-rw-r--r--arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/sdm660.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670.dtsi215
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso11
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-db845c.dts29
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-mtp.dts9
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-samsung-starqltechn.dts600
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-common.dtsi43
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-ebbg.dts23
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-tianma.dts23
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts10
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi27
-rw-r--r--arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts17
-rw-r--r--arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts11
-rw-r--r--arch/arm64/boot/dts/qcom/sdx75-idp.dts18
-rw-r--r--arch/arm64/boot/dts/qcom/sdx75.dtsi49
-rw-r--r--arch/arm64/boot/dts/qcom/sm4450.dtsi68
-rw-r--r--arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm6115.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/sm6115p-lenovo-j606f.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm6125-xiaomi-ginkgo.dts295
-rw-r--r--arch/arm64/boot/dts/qcom/sm6125.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts7
-rw-r--r--arch/arm64/boot/dts/qcom/sm6350.dtsi212
-rw-r--r--arch/arm64/boot/dts/qcom/sm7325-nothing-spacewar.dts210
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-hdk.dts5
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts8
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-mtp.dts8
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150.dtsi21
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi25
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350.dtsi34
-rw-r--r--arch/arm64/boot/dts/qcom/sm8450.dtsi88
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-hdk.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-mtp.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-qrd.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts12
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550.dtsi704
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650.dtsi2675
-rw-r--r--arch/arm64/boot/dts/qcom/sm8750-mtp.dts22
-rw-r--r--arch/arm64/boot/dts/qcom/sm8750-qrd.dts21
-rw-r--r--arch/arm64/boot/dts/qcom/sm8750.dtsi728
-rw-r--r--arch/arm64/boot/dts/qcom/x1-crd.dtsi1749
-rw-r--r--arch/arm64/boot/dts/qcom/x1-el2.dtso52
-rw-r--r--arch/arm64/boot/dts/qcom/x1e001de-devkit.dts148
-rw-r--r--arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s-oled.dts12
-rw-r--r--arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts1126
-rw-r--r--arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dtsi1576
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts314
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-crd.dts1270
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts22
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-hp-elitebook-ultra-g1q.dts30
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-hp-omnibook-x14.dts173
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts492
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi23
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-qcp.dts154
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100.dtsi1111
-rw-r--r--arch/arm64/boot/dts/qcom/x1p42100-crd.dts17
-rw-r--r--arch/arm64/boot/dts/qcom/x1p42100.dtsi81
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile7
-rw-r--r--arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0.dtsi60
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779f4.dtsi17
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g0.dtsi30
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g3-sparrow-hawk-fan-pwm.dtso43
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g3-sparrow-hawk.dts772
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779h0.dtsi21
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044.dtsi115
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044l2-smarc.dts7
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g054.dtsi115
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g054l2-smarc.dts7
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g047.dtsi259
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g047e57-smarc.dts102
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g056.dtsi282
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g056n48-rzv2n-evk.dts114
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g057.dtsi21
-rw-r--r--arch/arm64/boot/dts/renesas/renesas-smarc2.dtsi47
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi5
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi215
-rw-r--r--arch/arm64/boot/dts/renesas/white-hawk-ard-audio-da7212.dtso2
-rw-r--r--arch/arm64/boot/dts/renesas/white-hawk-single.dtsi8
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile15
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-cobra-ltk050h3146w-a2.dts39
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-cobra-ltk050h3146w.dts39
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-cobra-ltk050h3148w.dts39
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-cobra-ltk500hd1829.dts73
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-cobra.dtsi566
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-engicam-ctouch2.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-engicam-px30-core-edimm2.2.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-pp1516-ltk050h3146w-a2.dts39
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-pp1516-ltk050h3148w.dts39
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-pp1516.dtsi602
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi22
-rw-r--r--arch/arm64/boot/dts/rockchip/px30.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-evb-ind.dts494
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi48
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3528-radxa-e20c.dts149
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3528.dtsi519
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3562-evb2-v10.dts456
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3562-pinctrl.dtsi2352
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3562.dtsi1185
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-pinetab2.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dts19
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts17
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568.dtsi8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts89
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts17
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576-roc-pc.dts47
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576.dtsi502
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-armsom-w3.dts101
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-base.dtsi150
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts62
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts47
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-evb2-v10.dts931
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi28
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi75
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi36
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-rock-5b-plus.dts113
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts952
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dtsi945
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou-video-demo.dtso153
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588j.dtsi53
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-evb1-v10.dts55
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts77
-rw-r--r--arch/arm64/boot/dts/st/stm32mp211.dtsi8
-rw-r--r--arch/arm64/boot/dts/st/stm32mp231.dtsi9
-rw-r--r--arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi51
-rw-r--r--arch/arm64/boot/dts/st/stm32mp251.dtsi240
-rw-r--r--arch/arm64/boot/dts/st/stm32mp257f-ev1.dts40
-rw-r--r--arch/arm64/boot/dts/tesla/fsd-evb.dts20
-rw-r--r--arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi112
-rw-r--r--arch/arm64/boot/dts/tesla/fsd.dtsi50
-rw-r--r--arch/arm64/boot/dts/ti/Makefile28
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-main.dtsi96
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi36
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-pocketbeagle2.dts521
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-verdin-yavia.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi25
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-beagleplay-csi2-ov5640.dtso31
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-beagleplay-csi2-tevi-ov5640.dtso31
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-main.dtsi14
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi25
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi107
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-thermal.dtsi57
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi25
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a7-sk.dts149
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a7.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi11
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-verdin-dahlia.dtsi228
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-verdin-dev.dtsi245
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-verdin-ivy.dtsi629
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-verdin-mallow.dtsi213
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-verdin-nonwifi.dtsi15
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-verdin-wifi.dtsi31
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-verdin-yavia.dtsi219
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-verdin.dtsi1404
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-sk.dts128
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-dahlia.dts22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-dev.dts22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-ivy.dts22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-mallow.dts22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-yavia.dts22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-dahlia.dts22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-dev.dts22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-ivy.dts22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-mallow.dts22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-yavia.dts22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra-gpio-fan.dtso14
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi76
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-imx219.dtso34
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-ov5640.dtso34
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-tevi-ov5640.dtso34
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-main.dtsi13
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-evm-pcie0-ep.dtso2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-evm.dts21
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-sk.dts20
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-base-board-rocktech-rk101-panel.dtso12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-base-board.dts1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am68-phyboard-izar.dts575
-rw-r--r--arch/arm64/boot/dts/ti/k3-am68-phycore-som.dtsi601
-rw-r--r--arch/arm64/boot/dts/ti/k3-am68-sk-base-board-pcie1-ep.dtso2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts13
-rw-r--r--arch/arm64/boot/dts/ti/k3-am69-sk.dts1
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-evm-pcie1-ep.dtso2
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-main.dtsi13
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-common-proc-board-infotainment.dtso57
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts7
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-evm-pcie0-ep.dtso2
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-evm-pcie1-ep.dtso2
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-main.dtsi40
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-sk-csi2-dual-imx219.dtso35
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-sk.dts31
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-evm-pcie1-ep.dtso2
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi27
-rw-r--r--arch/arm64/boot/dts/ti/k3-j722s-evm-csi2-quad-rpi-cam-imx219.dtso329
-rw-r--r--arch/arm64/boot/dts/ti/k3-j722s-evm-csi2-quad-tevi-ov5640.dtso323
-rw-r--r--arch/arm64/boot/dts/ti/k3-j722s-evm.dts46
-rw-r--r--arch/arm64/boot/dts/ti/k3-j722s-main.dtsi14
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-evm-usxgmii-exp1-exp2.dtso1
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi6
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-usb0-type-a.dtso29
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi19
-rw-r--r--arch/arm64/configs/defconfig64
-rw-r--r--arch/arm64/crypto/Kconfig53
-rw-r--r--arch/arm64/crypto/Makefile20
-rw-r--r--arch/arm64/crypto/aes-glue.c124
-rw-r--r--arch/arm64/crypto/chacha-neon-glue.c237
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c143
-rw-r--r--arch/arm64/crypto/poly1305-glue.c232
-rw-r--r--arch/arm64/crypto/polyval-ce-glue.c73
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c70
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c192
-rw-r--r--arch/arm64/crypto/sha256-glue.c194
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c111
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c49
-rw-r--r--arch/arm64/crypto/sha512-glue.c35
-rw-r--r--arch/arm64/crypto/sm3-ce-glue.c48
-rw-r--r--arch/arm64/crypto/sm3-neon-glue.c48
-rw-r--r--arch/arm64/crypto/sm4-ce-glue.c100
-rw-r--r--arch/arm64/hyperv/mshyperv.c53
-rw-r--r--arch/arm64/include/asm/cpu.h1
-rw-r--r--arch/arm64/include/asm/cputype.h4
-rw-r--r--arch/arm64/include/asm/el2_setup.h28
-rw-r--r--arch/arm64/include/asm/esr.h73
-rw-r--r--arch/arm64/include/asm/fixmap.h6
-rw-r--r--arch/arm64/include/asm/fpsimd.h64
-rw-r--r--arch/arm64/include/asm/hardirq.h4
-rw-r--r--arch/arm64/include/asm/hugetlb.h29
-rw-r--r--arch/arm64/include/asm/insn.h1
-rw-r--r--arch/arm64/include/asm/kvm_arm.h189
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h7
-rw-r--r--arch/arm64/include/asm/kvm_host.h96
-rw-r--r--arch/arm64/include/asm/kvm_nested.h100
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h7
-rw-r--r--arch/arm64/include/asm/kvm_pkvm.h8
-rw-r--r--arch/arm64/include/asm/kvm_ras.h2
-rw-r--r--arch/arm64/include/asm/mem_encrypt.h2
-rw-r--r--arch/arm64/include/asm/mmu.h11
-rw-r--r--arch/arm64/include/asm/pgtable-types.h20
-rw-r--r--arch/arm64/include/asm/pgtable.h243
-rw-r--r--arch/arm64/include/asm/ptdump.h24
-rw-r--r--arch/arm64/include/asm/rqspinlock.h2
-rw-r--r--arch/arm64/include/asm/rsi_cmds.h2
-rw-r--r--arch/arm64/include/asm/rwonce.h4
-rw-r--r--arch/arm64/include/asm/sections.h1
-rw-r--r--arch/arm64/include/asm/spectre.h3
-rw-r--r--arch/arm64/include/asm/syscall.h29
-rw-r--r--arch/arm64/include/asm/sysreg.h54
-rw-r--r--arch/arm64/include/asm/thread_info.h18
-rw-r--r--arch/arm64/include/asm/vdso/gettimeofday.h31
-rw-r--r--arch/arm64/include/asm/virt.h3
-rw-r--r--arch/arm64/include/asm/vmalloc.h45
-rw-r--r--arch/arm64/include/asm/vncr_mapping.h5
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h9
-rw-r--r--arch/arm64/kernel/asm-offsets.c2
-rw-r--r--arch/arm64/kernel/cpu_errata.c16
-rw-r--r--arch/arm64/kernel/cpufeature.c39
-rw-r--r--arch/arm64/kernel/cpuinfo.c110
-rw-r--r--arch/arm64/kernel/efi-header.S6
-rw-r--r--arch/arm64/kernel/efi.c8
-rw-r--r--arch/arm64/kernel/entry-common.c48
-rw-r--r--arch/arm64/kernel/fpsimd.c376
-rw-r--r--arch/arm64/kernel/head.S6
-rw-r--r--arch/arm64/kernel/hyp-stub.S2
-rw-r--r--arch/arm64/kernel/image-vars.h67
-rw-r--r--arch/arm64/kernel/kaslr.c2
-rw-r--r--arch/arm64/kernel/pi/kaslr_early.c4
-rw-r--r--arch/arm64/kernel/pi/map_kernel.c27
-rw-r--r--arch/arm64/kernel/pi/map_range.c4
-rw-r--r--arch/arm64/kernel/pi/pi.h3
-rw-r--r--arch/arm64/kernel/process.c124
-rw-r--r--arch/arm64/kernel/proton-pack.c15
-rw-r--r--arch/arm64/kernel/ptrace.c137
-rw-r--r--arch/arm64/kernel/setup.c10
-rw-r--r--arch/arm64/kernel/signal.c149
-rw-r--r--arch/arm64/kernel/signal32.c11
-rw-r--r--arch/arm64/kernel/traps.c4
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S28
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/arch_timer.c4
-rw-r--r--arch/arm64/kvm/arm.c104
-rw-r--r--arch/arm64/kvm/at.c186
-rw-r--r--arch/arm64/kvm/config.c1085
-rw-r--r--arch/arm64/kvm/emulate-nested.c590
-rw-r--r--arch/arm64/kvm/handle_exit.c84
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/fault.h70
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h173
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mem_protect.h14
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/memory.h58
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mm.h4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/Makefile6
-rw-r--r--arch/arm64/kvm/hyp/nvhe/ffa.c9
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-init.S4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c20
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp.lds.S2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c521
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mm.c97
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c47
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c27
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c14
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c6
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c44
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c48
-rw-r--r--arch/arm64/kvm/hyp/vhe/tlb.c4
-rw-r--r--arch/arm64/kvm/hypercalls.c10
-rw-r--r--arch/arm64/kvm/mmu.c50
-rw-r--r--arch/arm64/kvm/nested.c848
-rw-r--r--arch/arm64/kvm/pkvm.c150
-rw-r--r--arch/arm64/kvm/pmu-emul.c60
-rw-r--r--arch/arm64/kvm/reset.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c279
-rw-r--r--arch/arm64/kvm/trace_arm.h6
-rw-r--r--arch/arm64/kvm/vgic/vgic-debug.c227
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c31
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c95
-rw-r--r--arch/arm64/kvm/vgic/vgic-kvm-device.c12
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3-nested.c3
-rw-r--r--arch/arm64/kvm/vgic/vgic-v4.c92
-rw-r--r--arch/arm64/kvm/vgic/vgic.h33
-rw-r--r--arch/arm64/lib/Makefile7
-rw-r--r--arch/arm64/lib/crc-t10dif.c (renamed from arch/arm64/lib/crc-t10dif-glue.c)6
-rw-r--r--arch/arm64/lib/crc32-core.S (renamed from arch/arm64/lib/crc32.S)0
-rw-r--r--arch/arm64/lib/crc32.c (renamed from arch/arm64/lib/crc32-glue.c)0
-rw-r--r--arch/arm64/lib/crypto/.gitignore3
-rw-r--r--arch/arm64/lib/crypto/Kconfig20
-rw-r--r--arch/arm64/lib/crypto/Makefile24
-rw-r--r--arch/arm64/lib/crypto/chacha-neon-core.S (renamed from arch/arm64/crypto/chacha-neon-core.S)2
-rw-r--r--arch/arm64/lib/crypto/chacha-neon-glue.c119
-rw-r--r--arch/arm64/lib/crypto/poly1305-armv8.pl (renamed from arch/arm64/crypto/poly1305-armv8.pl)0
-rw-r--r--arch/arm64/lib/crypto/poly1305-glue.c73
-rw-r--r--arch/arm64/lib/crypto/sha2-armv8.pl (renamed from arch/arm64/crypto/sha512-armv8.pl)2
-rw-r--r--arch/arm64/lib/crypto/sha256-ce.S (renamed from arch/arm64/crypto/sha2-ce-core.S)41
-rw-r--r--arch/arm64/lib/crypto/sha256.c75
-rw-r--r--arch/arm64/lib/insn.c60
-rw-r--r--arch/arm64/lib/xor-neon.c2
-rw-r--r--arch/arm64/mm/hugetlbpage.c73
-rw-r--r--arch/arm64/mm/init.c20
-rw-r--r--arch/arm64/mm/mmap.c2
-rw-r--r--arch/arm64/mm/mmu.c93
-rw-r--r--arch/arm64/mm/pageattr.c6
-rw-r--r--arch/arm64/mm/proc.S19
-rw-r--r--arch/arm64/mm/ptdump.c50
-rw-r--r--arch/arm64/net/bpf_jit_comp.c299
-rw-r--r--arch/arm64/tools/cpucaps2
-rw-r--r--arch/arm64/tools/sysreg1019
-rw-r--r--arch/arm64/xen/hypercall.S21
-rw-r--r--arch/csky/include/asm/pgalloc.h2
-rw-r--r--arch/csky/include/asm/pgtable.h5
-rw-r--r--arch/csky/include/asm/syscall.h13
-rw-r--r--arch/csky/kernel/perf_event.c3
-rw-r--r--arch/hexagon/configs/comet_defconfig3
-rw-r--r--arch/hexagon/include/asm/pgtable.h3
-rw-r--r--arch/hexagon/include/asm/syscall.h21
-rw-r--r--arch/loongarch/Kconfig1
-rw-r--r--arch/loongarch/configs/loongson3_defconfig2
-rw-r--r--arch/loongarch/include/asm/asm-prototypes.h8
-rw-r--r--arch/loongarch/include/asm/fpu.h39
-rw-r--r--arch/loongarch/include/asm/kvm_host.h2
-rw-r--r--arch/loongarch/include/asm/kvm_vcpu.h2
-rw-r--r--arch/loongarch/include/asm/lbt.h10
-rw-r--r--arch/loongarch/include/asm/pgalloc.h2
-rw-r--r--arch/loongarch/include/asm/pgtable.h7
-rw-r--r--arch/loongarch/include/asm/ptrace.h6
-rw-r--r--arch/loongarch/include/asm/syscall.h15
-rw-r--r--arch/loongarch/include/asm/uprobes.h1
-rw-r--r--arch/loongarch/kernel/Makefile8
-rw-r--r--arch/loongarch/kernel/efi-header.S4
-rw-r--r--arch/loongarch/kernel/entry.S22
-rw-r--r--arch/loongarch/kernel/fpu.S6
-rw-r--r--arch/loongarch/kernel/genex.S7
-rw-r--r--arch/loongarch/kernel/head.S2
-rw-r--r--arch/loongarch/kernel/kfpu.c22
-rw-r--r--arch/loongarch/kernel/lbt.S4
-rw-r--r--arch/loongarch/kernel/perf_event.c3
-rw-r--r--arch/loongarch/kernel/process.c33
-rw-r--r--arch/loongarch/kernel/signal.c21
-rw-r--r--arch/loongarch/kernel/time.c2
-rw-r--r--arch/loongarch/kernel/traps.c20
-rw-r--r--arch/loongarch/kernel/uprobes.c11
-rw-r--r--arch/loongarch/kvm/Makefile2
-rw-r--r--arch/loongarch/kvm/exit.c37
-rw-r--r--arch/loongarch/kvm/intc/ipi.c4
-rw-r--r--arch/loongarch/kvm/main.c4
-rw-r--r--arch/loongarch/kvm/mmu.c15
-rw-r--r--arch/loongarch/kvm/vcpu.c8
-rw-r--r--arch/loongarch/lib/crc32-loongarch.c4
-rw-r--r--arch/loongarch/mm/hugetlbpage.c2
-rw-r--r--arch/loongarch/mm/init.c3
-rw-r--r--arch/loongarch/mm/pgtable.c9
-rw-r--r--arch/loongarch/power/hibernate.c3
-rw-r--r--arch/m68k/coldfire/gpio.c8
-rw-r--r--arch/m68k/coldfire/m5272.c2
-rw-r--r--arch/m68k/configs/amcore_defconfig2
-rw-r--r--arch/m68k/configs/amiga_defconfig8
-rw-r--r--arch/m68k/configs/apollo_defconfig8
-rw-r--r--arch/m68k/configs/atari_defconfig8
-rw-r--r--arch/m68k/configs/bvme6000_defconfig8
-rw-r--r--arch/m68k/configs/hp300_defconfig8
-rw-r--r--arch/m68k/configs/mac_defconfig8
-rw-r--r--arch/m68k/configs/multi_defconfig8
-rw-r--r--arch/m68k/configs/mvme147_defconfig8
-rw-r--r--arch/m68k/configs/mvme16x_defconfig8
-rw-r--r--arch/m68k/configs/q40_defconfig8
-rw-r--r--arch/m68k/configs/sun3_defconfig8
-rw-r--r--arch/m68k/configs/sun3x_defconfig8
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h8
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h6
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h10
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h6
-rw-r--r--arch/m68k/include/asm/sun3_pgtable.h6
-rw-r--r--arch/m68k/include/asm/syscall.h7
-rw-r--r--arch/m68k/kernel/setup_mm.c2
-rw-r--r--arch/m68k/kernel/setup_no.c3
-rw-r--r--arch/m68k/kernel/uboot.c2
-rw-r--r--arch/m68k/mac/config.c2
-rw-r--r--arch/m68k/mm/motorola.c9
-rw-r--r--arch/microblaze/include/asm/pgtable.h8
-rw-r--r--arch/microblaze/include/asm/syscall.h7
-rw-r--r--arch/microblaze/kernel/timer.c2
-rw-r--r--arch/microblaze/mm/pgtable.c2
-rw-r--r--arch/mips/ath25/ar2315.c4
-rw-r--r--arch/mips/ath25/ar5312.c4
-rw-r--r--arch/mips/bcm47xx/setup.c2
-rw-r--r--arch/mips/cavium-octeon/Kconfig6
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-md5.c121
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha1.c138
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha256.c250
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha512.c157
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c25
-rw-r--r--arch/mips/configs/ath79_defconfig1
-rw-r--r--arch/mips/configs/bigsur_defconfig2
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig1
-rw-r--r--arch/mips/configs/decstation_64_defconfig1
-rw-r--r--arch/mips/configs/decstation_defconfig1
-rw-r--r--arch/mips/configs/decstation_r4k_defconfig1
-rw-r--r--arch/mips/configs/fuloong2e_defconfig1
-rw-r--r--arch/mips/configs/gcw0_defconfig1
-rw-r--r--arch/mips/configs/gpr_defconfig3
-rw-r--r--arch/mips/configs/ip22_defconfig1
-rw-r--r--arch/mips/configs/ip27_defconfig1
-rw-r--r--arch/mips/configs/ip28_defconfig1
-rw-r--r--arch/mips/configs/ip30_defconfig1
-rw-r--r--arch/mips/configs/ip32_defconfig1
-rw-r--r--arch/mips/configs/lemote2f_defconfig2
-rw-r--r--arch/mips/configs/mtx1_defconfig3
-rw-r--r--arch/mips/configs/omega2p_defconfig1
-rw-r--r--arch/mips/configs/rb532_defconfig3
-rw-r--r--arch/mips/configs/rt305x_defconfig1
-rw-r--r--arch/mips/configs/sb1250_swarm_defconfig1
-rw-r--r--arch/mips/configs/vocore2_defconfig1
-rw-r--r--arch/mips/configs/xway_defconfig1
-rw-r--r--arch/mips/crypto/Kconfig33
-rw-r--r--arch/mips/crypto/Makefile17
-rw-r--r--arch/mips/crypto/chacha-glue.c146
-rw-r--r--arch/mips/crypto/poly1305-glue.c192
-rw-r--r--arch/mips/include/asm/idle.h5
-rw-r--r--arch/mips/include/asm/pgalloc.h2
-rw-r--r--arch/mips/include/asm/pgtable.h9
-rw-r--r--arch/mips/include/asm/ptrace.h3
-rw-r--r--arch/mips/include/asm/socket.h9
-rw-r--r--arch/mips/include/asm/syscall.h43
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/mips/kernel/genex.S71
-rw-r--r--arch/mips/kernel/idle.c7
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c3
-rw-r--r--arch/mips/kernel/smp-cps.c4
-rw-r--r--arch/mips/kernel/traps.c10
-rw-r--r--arch/mips/lantiq/irq.c2
-rw-r--r--arch/mips/lib/Makefile2
-rw-r--r--arch/mips/lib/crc32-mips.c4
-rw-r--r--arch/mips/lib/crypto/.gitignore2
-rw-r--r--arch/mips/lib/crypto/Kconfig12
-rw-r--r--arch/mips/lib/crypto/Makefile19
-rw-r--r--arch/mips/lib/crypto/chacha-core.S (renamed from arch/mips/crypto/chacha-core.S)0
-rw-r--r--arch/mips/lib/crypto/chacha-glue.c29
-rw-r--r--arch/mips/lib/crypto/poly1305-glue.c33
-rw-r--r--arch/mips/lib/crypto/poly1305-mips.pl (renamed from arch/mips/crypto/poly1305-mips.pl)12
-rw-r--r--arch/mips/mm/pgtable-32.c10
-rw-r--r--arch/mips/mm/pgtable-64.c9
-rw-r--r--arch/mips/pci/pci-ar2315.c4
-rw-r--r--arch/mips/pci/pci-rt3883.c7
-rw-r--r--arch/mips/ralink/irq.c2
-rw-r--r--arch/nios2/include/asm/pgtable.h22
-rw-r--r--arch/nios2/include/asm/syscall.h16
-rw-r--r--arch/nios2/kernel/cpuinfo.c5
-rw-r--r--arch/nios2/kernel/irq.c3
-rw-r--r--arch/nios2/mm/tlb.c18
-rw-r--r--arch/openrisc/include/asm/cacheflush.h17
-rw-r--r--arch/openrisc/include/asm/cpuinfo.h24
-rw-r--r--arch/openrisc/include/asm/pgtable.h2
-rw-r--r--arch/openrisc/include/asm/syscall.h13
-rw-r--r--arch/openrisc/kernel/Makefile2
-rw-r--r--arch/openrisc/kernel/cacheinfo.c104
-rw-r--r--arch/openrisc/kernel/dma.c18
-rw-r--r--arch/openrisc/kernel/setup.c45
-rw-r--r--arch/openrisc/mm/cache.c56
-rw-r--r--arch/openrisc/mm/init.c5
-rw-r--r--arch/openrisc/mm/ioremap.c2
-rw-r--r--arch/parisc/boot/compressed/Makefile1
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig4
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig2
-rw-r--r--arch/parisc/include/asm/alternative.h4
-rw-r--r--arch/parisc/include/asm/assembly.h4
-rw-r--r--arch/parisc/include/asm/barrier.h4
-rw-r--r--arch/parisc/include/asm/cache.h4
-rw-r--r--arch/parisc/include/asm/current.h4
-rw-r--r--arch/parisc/include/asm/dwarf.h4
-rw-r--r--arch/parisc/include/asm/fixmap.h4
-rw-r--r--arch/parisc/include/asm/ftrace.h4
-rw-r--r--arch/parisc/include/asm/jump_label.h4
-rw-r--r--arch/parisc/include/asm/kexec.h4
-rw-r--r--arch/parisc/include/asm/kgdb.h2
-rw-r--r--arch/parisc/include/asm/linkage.h4
-rw-r--r--arch/parisc/include/asm/page.h6
-rw-r--r--arch/parisc/include/asm/pdc.h4
-rw-r--r--arch/parisc/include/asm/pdcpat.h4
-rw-r--r--arch/parisc/include/asm/pgalloc.h2
-rw-r--r--arch/parisc/include/asm/pgtable.h14
-rw-r--r--arch/parisc/include/asm/prefetch.h4
-rw-r--r--arch/parisc/include/asm/processor.h8
-rw-r--r--arch/parisc/include/asm/psw.h4
-rw-r--r--arch/parisc/include/asm/signal.h4
-rw-r--r--arch/parisc/include/asm/smp.h4
-rw-r--r--arch/parisc/include/asm/spinlock_types.h4
-rw-r--r--arch/parisc/include/asm/syscall.h19
-rw-r--r--arch/parisc/include/asm/thread_info.h4
-rw-r--r--arch/parisc/include/asm/traps.h2
-rw-r--r--arch/parisc/include/asm/unistd.h4
-rw-r--r--arch/parisc/include/asm/vdso.h4
-rw-r--r--arch/parisc/include/uapi/asm/pdc.h4
-rw-r--r--arch/parisc/include/uapi/asm/signal.h4
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/kernel/unaligned.c2
-rw-r--r--arch/parisc/math-emu/driver.c16
-rw-r--r--arch/powerpc/Kconfig11
-rw-r--r--arch/powerpc/boot/Makefile1
-rw-r--r--arch/powerpc/boot/rs6000.h6
-rwxr-xr-xarch/powerpc/boot/wrapper6
-rw-r--r--arch/powerpc/configs/44x/sam440ep_defconfig1
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itx_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/stx_gp3_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/xes_mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/86xx-hw.config1
-rw-r--r--arch/powerpc/configs/amigaone_defconfig1
-rw-r--r--arch/powerpc/configs/chrp32_defconfig1
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config1
-rw-r--r--arch/powerpc/configs/g5_defconfig3
-rw-r--r--arch/powerpc/configs/gamecube_defconfig1
-rw-r--r--arch/powerpc/configs/linkstation_defconfig2
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mvme5100_defconfig2
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/pmac32_defconfig2
-rw-r--r--arch/powerpc/configs/powernv_defconfig2
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig3
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig3
-rw-r--r--arch/powerpc/configs/ps3_defconfig2
-rw-r--r--arch/powerpc/configs/skiroot_defconfig2
-rw-r--r--arch/powerpc/configs/storcenter_defconfig1
-rw-r--r--arch/powerpc/configs/wii_defconfig1
-rw-r--r--arch/powerpc/crypto/Kconfig44
-rw-r--r--arch/powerpc/crypto/Makefile6
-rw-r--r--arch/powerpc/crypto/aes.c8
-rw-r--r--arch/powerpc/crypto/aes_cbc.c4
-rw-r--r--arch/powerpc/crypto/aes_ctr.c4
-rw-r--r--arch/powerpc/crypto/aes_xts.c4
-rw-r--r--arch/powerpc/crypto/chacha-p10-glue.c221
-rw-r--r--arch/powerpc/crypto/ghash.c91
-rw-r--r--arch/powerpc/crypto/md5-glue.c99
-rw-r--r--arch/powerpc/crypto/poly1305-p10-glue.c186
-rw-r--r--arch/powerpc/crypto/sha1-spe-glue.c130
-rw-r--r--arch/powerpc/crypto/sha1.c101
-rw-r--r--arch/powerpc/crypto/sha256-spe-glue.c235
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/guest-state-buffer.h35
-rw-r--r--arch/powerpc/include/asm/hvcall.h13
-rw-r--r--arch/powerpc/include/asm/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h20
-rw-r--r--arch/powerpc/include/asm/preempt.h16
-rw-r--r--arch/powerpc/include/asm/rtas.h4
-rw-r--r--arch/powerpc/include/asm/syscall.h20
-rw-r--r--arch/powerpc/include/uapi/asm/papr-indices.h41
-rw-r--r--arch/powerpc/include/uapi/asm/papr-physical-attestation.h31
-rw-r--r--arch/powerpc/include/uapi/asm/papr-platform-dump.h16
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/fadump.c6
-rw-r--r--arch/powerpc/kernel/interrupt.c6
-rw-r--r--arch/powerpc/kernel/iommu.c5
-rw-r--r--arch/powerpc/kernel/module_64.c4
-rw-r--r--arch/powerpc/kernel/proc_powerpc.c3
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/kernel/rtas.c8
-rw-r--r--arch/powerpc/kernel/trace/ftrace_entry.S2
-rw-r--r--arch/powerpc/kexec/crash.c5
-rw-r--r--arch/powerpc/kvm/Kconfig13
-rw-r--r--arch/powerpc/kvm/book3s_hv.c20
-rw-r--r--arch/powerpc/kvm/book3s_hv_nestedv2.c6
-rw-r--r--arch/powerpc/kvm/booke.c8
-rw-r--r--arch/powerpc/kvm/guest-state-buffer.c39
-rw-r--r--arch/powerpc/kvm/test-guest-state-buffer.c214
-rw-r--r--arch/powerpc/kvm/timing.h4
-rw-r--r--arch/powerpc/lib/Makefile6
-rw-r--r--arch/powerpc/lib/crc-t10dif.c (renamed from arch/powerpc/lib/crc-t10dif-glue.c)18
-rw-r--r--arch/powerpc/lib/crc-vpmsum-template.S (renamed from arch/powerpc/lib/crc32-vpmsum_core.S)0
-rw-r--r--arch/powerpc/lib/crc32.c (renamed from arch/powerpc/lib/crc32-glue.c)17
-rw-r--r--arch/powerpc/lib/crc32c-vpmsum_asm.S2
-rw-r--r--arch/powerpc/lib/crct10dif-vpmsum_asm.S2
-rw-r--r--arch/powerpc/lib/crypto/Kconfig22
-rw-r--r--arch/powerpc/lib/crypto/Makefile10
-rw-r--r--arch/powerpc/lib/crypto/chacha-p10-glue.c100
-rw-r--r--arch/powerpc/lib/crypto/chacha-p10le-8x.S (renamed from arch/powerpc/crypto/chacha-p10le-8x.S)6
-rw-r--r--arch/powerpc/lib/crypto/poly1305-p10-glue.c96
-rw-r--r--arch/powerpc/lib/crypto/poly1305-p10le_64.S (renamed from arch/powerpc/crypto/poly1305-p10le_64.S)0
-rw-r--r--arch/powerpc/lib/crypto/sha256-spe-asm.S (renamed from arch/powerpc/crypto/sha256-spe-asm.S)0
-rw-r--r--arch/powerpc/lib/crypto/sha256.c70
-rw-r--r--arch/powerpc/lib/vmx-helper.c2
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c7
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c20
-rw-r--r--arch/powerpc/mm/fault.c5
-rw-r--r--arch/powerpc/mm/nohash/8xx.c32
-rw-r--r--arch/powerpc/mm/pgtable-frag.c30
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c46
-rw-r--r--arch/powerpc/net/bpf_jit.h20
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c33
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c6
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c15
-rw-r--r--arch/powerpc/perf/Makefile2
-rw-r--r--arch/powerpc/perf/core-book3s.c9
-rw-r--r--arch/powerpc/perf/core-fsl-emb.c6
-rw-r--r--arch/powerpc/perf/kvm-hv-pmu.c435
-rw-r--r--arch/powerpc/platforms/44x/gpio.c7
-rw-r--r--arch/powerpc/platforms/44x/uic.c7
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c3
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c12
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c4
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c13
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c4
-rw-r--r--arch/powerpc/platforms/8xx/cpm1-ic.c5
-rw-r--r--arch/powerpc/platforms/8xx/cpm1.c12
-rw-r--r--arch/powerpc/platforms/8xx/pic.c5
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c7
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c7
-rw-r--r--arch/powerpc/platforms/powermac/pic.c7
-rw-r--r--arch/powerpc/platforms/powermac/setup.c4
-rw-r--r--arch/powerpc/platforms/powermac/smp.c2
-rw-r--r--arch/powerpc/platforms/powermac/time.c3
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig2
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c3
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/ps3/device-init.c3
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/Makefile3
-rw-r--r--arch/powerpc/platforms/pseries/htmdump.c395
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c2
-rw-r--r--arch/powerpc/platforms/pseries/msi.c9
-rw-r--r--arch/powerpc/platforms/pseries/papr-indices.c488
-rw-r--r--arch/powerpc/platforms/pseries/papr-phy-attest.c288
-rw-r--r--arch/powerpc/platforms/pseries/papr-platform-dump.c411
-rw-r--r--arch/powerpc/platforms/pseries/papr-rtas-common.c311
-rw-r--r--arch/powerpc/platforms/pseries/papr-rtas-common.h61
-rw-r--r--arch/powerpc/platforms/pseries/papr-vpd.c352
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c5
-rw-r--r--arch/powerpc/sysdev/cpm_common.c6
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c7
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c2
-rw-r--r--arch/powerpc/sysdev/ge/ge_pic.c7
-rw-r--r--arch/powerpc/sysdev/i8259.c4
-rw-r--r--arch/powerpc/sysdev/ipic.c7
-rw-r--r--arch/powerpc/sysdev/mpic.c17
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c4
-rw-r--r--arch/powerpc/sysdev/xive/common.c2
-rw-r--r--arch/powerpc/xmon/xmon.c2
-rw-r--r--arch/riscv/Kconfig.socs1
-rw-r--r--arch/riscv/boot/dts/renesas/r9a07g043f.dtsi3
-rw-r--r--arch/riscv/boot/dts/sophgo/Makefile1
-rw-r--r--arch/riscv/boot/dts/sophgo/cv1800b.dtsi39
-rw-r--r--arch/riscv/boot/dts/sophgo/cv180x-cpus.dtsi36
-rw-r--r--arch/riscv/boot/dts/sophgo/cv180x.dtsi (renamed from arch/riscv/boot/dts/sophgo/cv18xx.dtsi)97
-rw-r--r--arch/riscv/boot/dts/sophgo/cv1812h.dtsi39
-rw-r--r--arch/riscv/boot/dts/sophgo/cv181x.dtsi2
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2002.dtsi39
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts72
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2042.dtsi32
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2044-cpus.dtsi3002
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2044-reset.h128
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2044-sophgo-srd3-10.dts32
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2044.dtsi86
-rw-r--r--arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts11
-rw-r--r--arch/riscv/boot/dts/spacemit/k1-pinctrl.dtsi3
-rw-r--r--arch/riscv/boot/dts/spacemit/k1.dtsi132
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-common.dtsi52
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-deepcomputing-fml13v01.dts19
-rw-r--r--arch/riscv/boot/dts/thead/th1520.dtsi21
-rw-r--r--arch/riscv/configs/defconfig3
-rw-r--r--arch/riscv/crypto/Kconfig23
-rw-r--r--arch/riscv/crypto/Makefile6
-rw-r--r--arch/riscv/crypto/chacha-riscv64-glue.c101
-rw-r--r--arch/riscv/crypto/ghash-riscv64-glue.c58
-rw-r--r--arch/riscv/crypto/sha256-riscv64-glue.c137
-rw-r--r--arch/riscv/crypto/sha512-riscv64-glue.c45
-rw-r--r--arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S4
-rw-r--r--arch/riscv/crypto/sm3-riscv64-glue.c47
-rw-r--r--arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S4
-rw-r--r--arch/riscv/include/asm/alternative-macros.h19
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h2
-rw-r--r--arch/riscv/include/asm/cacheflush.h15
-rw-r--r--arch/riscv/include/asm/kgdb.h9
-rw-r--r--arch/riscv/include/asm/kvm_aia.h3
-rw-r--r--arch/riscv/include/asm/kvm_host.h17
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_sbi.h3
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_vector.h6
-rw-r--r--arch/riscv/include/asm/pgtable-64.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h2
-rw-r--r--arch/riscv/include/asm/syscall.h26
-rw-r--r--arch/riscv/kernel/Makefile4
-rw-r--r--arch/riscv/kernel/efi-header.S8
-rw-r--r--arch/riscv/kernel/entry.S20
-rw-r--r--arch/riscv/kernel/head.S10
-rw-r--r--arch/riscv/kernel/kgdb.c6
-rw-r--r--arch/riscv/kernel/module-sections.c13
-rw-r--r--arch/riscv/kernel/module.c11
-rw-r--r--arch/riscv/kernel/probes/uprobes.c10
-rw-r--r--arch/riscv/kernel/process.c27
-rw-r--r--arch/riscv/kernel/setup.c36
-rw-r--r--arch/riscv/kernel/traps.c64
-rw-r--r--arch/riscv/kernel/traps_misaligned.c19
-rw-r--r--arch/riscv/kernel/unaligned_access_speed.c35
-rw-r--r--arch/riscv/kvm/Kconfig2
-rw-r--r--arch/riscv/kvm/aia_device.c38
-rw-r--r--arch/riscv/kvm/vcpu.c62
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c32
-rw-r--r--arch/riscv/kvm/vcpu_sbi_hsm.c13
-rw-r--r--arch/riscv/kvm/vcpu_sbi_system.c10
-rw-r--r--arch/riscv/kvm/vcpu_vector.c13
-rw-r--r--arch/riscv/kvm/vm.c13
-rw-r--r--arch/riscv/lib/Makefile1
-rw-r--r--arch/riscv/lib/crypto/Kconfig16
-rw-r--r--arch/riscv/lib/crypto/Makefile7
-rw-r--r--arch/riscv/lib/crypto/chacha-riscv64-glue.c75
-rw-r--r--arch/riscv/lib/crypto/chacha-riscv64-zvkb.S (renamed from arch/riscv/crypto/chacha-riscv64-zvkb.S)71
-rw-r--r--arch/riscv/lib/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S (renamed from arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S)8
-rw-r--r--arch/riscv/lib/crypto/sha256.c67
-rw-r--r--arch/riscv/mm/cacheflush.c2
-rw-r--r--arch/riscv/mm/init.c26
-rw-r--r--arch/riscv/mm/ptdump.c46
-rw-r--r--arch/riscv/net/bpf_jit.h15
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c332
-rw-r--r--arch/riscv/net/bpf_jit_core.c3
-rw-r--r--arch/s390/Kconfig20
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/boot/ipl_parm.c7
-rw-r--r--arch/s390/boot/printk.c7
-rw-r--r--arch/s390/boot/startup.c17
-rw-r--r--arch/s390/boot/string.c12
-rw-r--r--arch/s390/configs/debug_defconfig33
-rw-r--r--arch/s390/configs/defconfig29
-rw-r--r--arch/s390/configs/zfcpdump_defconfig1
-rw-r--r--arch/s390/crypto/Kconfig33
-rw-r--r--arch/s390/crypto/Makefile4
-rw-r--r--arch/s390/crypto/chacha-glue.c124
-rw-r--r--arch/s390/crypto/ghash_s390.c104
-rw-r--r--arch/s390/crypto/hmac_s390.c174
-rw-r--r--arch/s390/crypto/paes_s390.c1815
-rw-r--r--arch/s390/crypto/sha.h22
-rw-r--r--arch/s390/crypto/sha1_s390.c20
-rw-r--r--arch/s390/crypto/sha256_s390.c143
-rw-r--r--arch/s390/crypto/sha3_256_s390.c58
-rw-r--r--arch/s390/crypto/sha3_512_s390.c65
-rw-r--r--arch/s390/crypto/sha512_s390.c62
-rw-r--r--arch/s390/crypto/sha_common.c84
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/asce.h36
-rw-r--r--arch/s390/include/asm/cpacf.h18
-rw-r--r--arch/s390/include/asm/cpufeature.h1
-rw-r--r--arch/s390/include/asm/diag288.h41
-rw-r--r--arch/s390/include/asm/futex.h6
-rw-r--r--arch/s390/include/asm/gmap.h2
-rw-r--r--arch/s390/include/asm/gmap_helpers.h15
-rw-r--r--arch/s390/include/asm/machine.h1
-rw-r--r--arch/s390/include/asm/march.h4
-rw-r--r--arch/s390/include/asm/mmu_context.h17
-rw-r--r--arch/s390/include/asm/nospec-branch.h4
-rw-r--r--arch/s390/include/asm/pci_dma.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h2
-rw-r--r--arch/s390/include/asm/pgtable.h11
-rw-r--r--arch/s390/include/asm/pkey.h15
-rw-r--r--arch/s390/include/asm/ptrace.h47
-rw-r--r--arch/s390/include/asm/string.h20
-rw-r--r--arch/s390/include/asm/syscall.h21
-rw-r--r--arch/s390/include/asm/thread_info.h5
-rw-r--r--arch/s390/include/asm/tlb.h5
-rw-r--r--arch/s390/include/asm/uaccess.h12
-rw-r--r--arch/s390/include/asm/uv.h6
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/cert_store.c2
-rw-r--r--arch/s390/kernel/cpufeature.c5
-rw-r--r--arch/s390/kernel/crash_dump.c2
-rw-r--r--arch/s390/kernel/debug.c2
-rw-r--r--arch/s390/kernel/entry.S23
-rw-r--r--arch/s390/kernel/ipl.c27
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c13
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c171
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c8
-rw-r--r--arch/s390/kernel/processor.c18
-rw-r--r--arch/s390/kernel/ptrace.c33
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kernel/uv.c144
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/diag.c30
-rw-r--r--arch/s390/kvm/gaccess.c3
-rw-r--r--arch/s390/kvm/gmap-vsie.c1
-rw-r--r--arch/s390/kvm/gmap.c121
-rw-r--r--arch/s390/kvm/gmap.h39
-rw-r--r--arch/s390/kvm/intercept.c11
-rw-r--r--arch/s390/kvm/interrupt.c8
-rw-r--r--arch/s390/kvm/kvm-s390.c20
-rw-r--r--arch/s390/kvm/kvm-s390.h42
-rw-r--r--arch/s390/kvm/priv.c6
-rw-r--r--arch/s390/kvm/pv.c61
-rw-r--r--arch/s390/kvm/trace-s390.h4
-rw-r--r--arch/s390/kvm/vsie.c19
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/crc32.c (renamed from arch/s390/lib/crc32-glue.c)21
-rw-r--r--arch/s390/lib/crypto/Kconfig13
-rw-r--r--arch/s390/lib/crypto/Makefile7
-rw-r--r--arch/s390/lib/crypto/chacha-glue.c56
-rw-r--r--arch/s390/lib/crypto/chacha-s390.S (renamed from arch/s390/crypto/chacha-s390.S)0
-rw-r--r--arch/s390/lib/crypto/chacha-s390.h (renamed from arch/s390/crypto/chacha-s390.h)0
-rw-r--r--arch/s390/lib/crypto/sha256.c47
-rw-r--r--arch/s390/lib/string.c47
-rw-r--r--arch/s390/lib/uaccess.c5
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/dump_pagetables.c46
-rw-r--r--arch/s390/mm/extmem.c18
-rw-r--r--arch/s390/mm/fault.c1
-rw-r--r--arch/s390/mm/gmap.c185
-rw-r--r--arch/s390/mm/gmap_helpers.c221
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/mm/pgalloc.c21
-rw-r--r--arch/s390/mm/pgtable.c1
-rw-r--r--arch/s390/net/bpf_jit_comp.c138
-rw-r--r--arch/s390/pci/pci.c45
-rw-r--r--arch/s390/pci/pci_bus.h7
-rw-r--r--arch/s390/pci/pci_clp.c2
-rw-r--r--arch/s390/pci/pci_event.c22
-rw-r--r--arch/s390/pci/pci_mmio.c12
-rw-r--r--arch/s390/tools/gen_facilities.c3
-rw-r--r--arch/sh/boards/mach-se/7343/irq.c7
-rw-r--r--arch/sh/boards/mach-se/7722/irq.c4
-rw-r--r--arch/sh/boards/mach-x3proto/gpio.c2
-rw-r--r--arch/sh/configs/ap325rxa_defconfig1
-rw-r--r--arch/sh/configs/ecovec24_defconfig1
-rw-r--r--arch/sh/configs/edosk7705_defconfig1
-rw-r--r--arch/sh/configs/espt_defconfig1
-rw-r--r--arch/sh/configs/hp6xx_defconfig2
-rw-r--r--arch/sh/configs/kfr2r09-romimage_defconfig1
-rw-r--r--arch/sh/configs/landisk_defconfig1
-rw-r--r--arch/sh/configs/lboxre2_defconfig1
-rw-r--r--arch/sh/configs/magicpanelr2_defconfig2
-rw-r--r--arch/sh/configs/migor_defconfig2
-rw-r--r--arch/sh/configs/r7780mp_defconfig1
-rw-r--r--arch/sh/configs/r7785rp_defconfig1
-rw-r--r--arch/sh/configs/rts7751r2d1_defconfig1
-rw-r--r--arch/sh/configs/rts7751r2dplus_defconfig1
-rw-r--r--arch/sh/configs/sdk7780_defconfig1
-rw-r--r--arch/sh/configs/se7206_defconfig3
-rw-r--r--arch/sh/configs/se7712_defconfig1
-rw-r--r--arch/sh/configs/se7721_defconfig1
-rw-r--r--arch/sh/configs/se7724_defconfig1
-rw-r--r--arch/sh/configs/sh03_defconfig1
-rw-r--r--arch/sh/configs/sh2007_defconfig2
-rw-r--r--arch/sh/configs/sh7724_generic_defconfig1
-rw-r--r--arch/sh/configs/sh7763rdp_defconfig1
-rw-r--r--arch/sh/configs/sh7770_generic_defconfig1
-rw-r--r--arch/sh/configs/titan_defconfig1
-rw-r--r--arch/sh/include/asm/pgtable_32.h8
-rw-r--r--arch/sh/include/asm/syscall_32.h24
-rw-r--r--arch/sparc/configs/sparc64_defconfig3
-rw-r--r--arch/sparc/crypto/Kconfig10
-rw-r--r--arch/sparc/crypto/Makefile2
-rw-r--r--arch/sparc/crypto/aes_asm.S3
-rw-r--r--arch/sparc/crypto/aes_glue.c3
-rw-r--r--arch/sparc/crypto/camellia_asm.S3
-rw-r--r--arch/sparc/crypto/camellia_glue.c3
-rw-r--r--arch/sparc/crypto/des_asm.S3
-rw-r--r--arch/sparc/crypto/des_glue.c3
-rw-r--r--arch/sparc/crypto/md5_asm.S3
-rw-r--r--arch/sparc/crypto/md5_glue.c142
-rw-r--r--arch/sparc/crypto/sha1_asm.S3
-rw-r--r--arch/sparc/crypto/sha1_glue.c112
-rw-r--r--arch/sparc/crypto/sha256_glue.c210
-rw-r--r--arch/sparc/crypto/sha512_asm.S3
-rw-r--r--arch/sparc/crypto/sha512_glue.c105
-rw-r--r--arch/sparc/include/asm/opcodes.h (renamed from arch/sparc/crypto/opcodes.h)6
-rw-r--r--arch/sparc/include/asm/pgtable_32.h15
-rw-r--r--arch/sparc/include/asm/pgtable_64.h2
-rw-r--r--arch/sparc/include/asm/syscall.h22
-rw-r--r--arch/sparc/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/kernel/Makefile1
-rw-r--r--arch/sparc/kernel/perf_event.c3
-rw-r--r--arch/sparc/kernel/setup.c46
-rw-r--r--arch/sparc/lib/Makefile3
-rw-r--r--arch/sparc/lib/crc32.c (renamed from arch/sparc/lib/crc32_glue.c)6
-rw-r--r--arch/sparc/lib/crc32c_asm.S3
-rw-r--r--arch/sparc/lib/crypto/Kconfig8
-rw-r--r--arch/sparc/lib/crypto/Makefile4
-rw-r--r--arch/sparc/lib/crypto/sha256.c64
-rw-r--r--arch/sparc/lib/crypto/sha256_asm.S (renamed from arch/sparc/crypto/sha256_asm.S)5
-rw-r--r--arch/sparc/mm/init_64.c29
-rw-r--r--arch/sparc/mm/srmmu.c2
-rw-r--r--arch/um/Makefile5
-rw-r--r--arch/um/include/asm/fpu/api.h2
-rw-r--r--arch/um/include/asm/pgtable-2level.h1
-rw-r--r--arch/um/include/asm/pgtable-4level.h9
-rw-r--r--arch/um/include/asm/pgtable.h18
-rw-r--r--arch/um/include/asm/syscall-generic.h19
-rw-r--r--arch/um/include/asm/uaccess.h2
-rw-r--r--arch/um/kernel/trap.c26
-rw-r--r--arch/um/kernel/um_arch.c2
-rw-r--r--arch/x86/Kconfig65
-rw-r--r--arch/x86/Kconfig.assembler14
-rw-r--r--arch/x86/Kconfig.cpu24
-rw-r--r--arch/x86/Kconfig.cpufeatures4
-rw-r--r--arch/x86/Makefile6
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/bioscall.S4
-rw-r--r--arch/x86/boot/boot.h6
-rw-r--r--arch/x86/boot/compressed/Makefile10
-rw-r--r--arch/x86/boot/compressed/head_64.S1
-rw-r--r--arch/x86/boot/compressed/kaslr.c50
-rw-r--r--arch/x86/boot/compressed/mem.c2
-rw-r--r--arch/x86/boot/compressed/misc.c1
-rw-r--r--arch/x86/boot/compressed/misc.h8
-rw-r--r--arch/x86/boot/compressed/pgtable.h18
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c12
-rw-r--r--arch/x86/boot/compressed/sev-handle-vc.c134
-rw-r--r--arch/x86/boot/compressed/sev.c263
-rw-r--r--arch/x86/boot/compressed/sev.h25
-rw-r--r--arch/x86/boot/compressed/string.c8
-rw-r--r--arch/x86/boot/copy.S8
-rw-r--r--arch/x86/boot/header.S16
-rw-r--r--arch/x86/boot/startup/Makefile30
-rw-r--r--arch/x86/boot/startup/efi-mixed.S (renamed from drivers/firmware/efi/libstub/x86-mixed.S)0
-rw-r--r--arch/x86/boot/startup/gdt_idt.c71
-rw-r--r--arch/x86/boot/startup/la57toggle.S (renamed from arch/x86/boot/compressed/la57toggle.S)1
-rw-r--r--arch/x86/boot/startup/map_kernel.c217
-rw-r--r--arch/x86/boot/startup/sev-shared.c (renamed from arch/x86/coco/sev/shared.c)894
-rw-r--r--arch/x86/boot/startup/sev-startup.c368
-rw-r--r--arch/x86/boot/startup/sme.c (renamed from arch/x86/mm/mem_encrypt_identity.c)30
-rw-r--r--arch/x86/boot/string.c2
-rw-r--r--arch/x86/boot/video.c2
-rw-r--r--arch/x86/coco/core.c2
-rw-r--r--arch/x86/coco/sev/Makefile23
-rw-r--r--arch/x86/coco/sev/core.c2055
-rw-r--r--arch/x86/coco/sev/sev-nmi.c108
-rw-r--r--arch/x86/coco/sev/vc-handle.c1061
-rw-r--r--arch/x86/coco/sev/vc-shared.c504
-rw-r--r--arch/x86/coco/tdx/tdx.c50
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/crypto/Kconfig131
-rw-r--r--arch/x86/crypto/Makefile23
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c13
-rw-r--r--arch/x86/crypto/aes-ctr-avx-x86_64.S47
-rw-r--r--arch/x86/crypto/aes-xts-avx-x86_64.S206
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c174
-rw-r--r--arch/x86/crypto/aria_aesni_avx2_glue.c22
-rw-r--r--arch/x86/crypto/aria_aesni_avx_glue.c20
-rw-r--r--arch/x86/crypto/aria_gfni_avx512_glue.c22
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c21
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c21
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c21
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c20
-rw-r--r--arch/x86/crypto/chacha_glue.c311
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_asm.S5
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c289
-rw-r--r--arch/x86/crypto/poly1305_glue.c290
-rw-r--r--arch/x86/crypto/polyval-clmulni_glue.c72
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c21
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c21
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c21
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c89
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c467
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c75
-rw-r--r--arch/x86/crypto/sm3_avx_glue.c54
-rw-r--r--arch/x86/crypto/sm4_aesni_avx2_glue.c31
-rw-r--r--arch/x86/crypto/sm4_aesni_avx_glue.c31
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c21
-rw-r--r--arch/x86/entry/entry.S9
-rw-r--r--arch/x86/entry/entry_64.S20
-rw-r--r--arch/x86/entry/vdso/vma.c35
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c2
-rw-r--r--arch/x86/events/amd/brs.c12
-rw-r--r--arch/x86/events/amd/core.c16
-rw-r--r--arch/x86/events/amd/ibs.c33
-rw-r--r--arch/x86/events/amd/iommu.c2
-rw-r--r--arch/x86/events/amd/lbr.c21
-rw-r--r--arch/x86/events/amd/power.c11
-rw-r--r--arch/x86/events/amd/uncore.c117
-rw-r--r--arch/x86/events/core.c95
-rw-r--r--arch/x86/events/intel/bts.c151
-rw-r--r--arch/x86/events/intel/core.c440
-rw-r--r--arch/x86/events/intel/cstate.c3
-rw-r--r--arch/x86/events/intel/ds.c106
-rw-r--r--arch/x86/events/intel/knc.c24
-rw-r--r--arch/x86/events/intel/lbr.c46
-rw-r--r--arch/x86/events/intel/p4.c28
-rw-r--r--arch/x86/events/intel/p6.c13
-rw-r--r--arch/x86/events/intel/pt.c37
-rw-r--r--arch/x86/events/intel/uncore.c15
-rw-r--r--arch/x86/events/intel/uncore_discovery.c11
-rw-r--r--arch/x86/events/intel/uncore_nhmex.c71
-rw-r--r--arch/x86/events/intel/uncore_snb.c43
-rw-r--r--arch/x86/events/intel/uncore_snbep.c158
-rw-r--r--arch/x86/events/msr.c4
-rw-r--r--arch/x86/events/perf_event.h83
-rw-r--r--arch/x86/events/perf_event_flags.h41
-rw-r--r--arch/x86/events/probe.c4
-rw-r--r--arch/x86/events/rapl.c9
-rw-r--r--arch/x86/events/utils.c1
-rw-r--r--arch/x86/events/zhaoxin/core.c20
-rw-r--r--arch/x86/hyperv/hv_apic.c11
-rw-r--r--arch/x86/hyperv/hv_init.c134
-rw-r--r--arch/x86/hyperv/hv_spinlock.c7
-rw-r--r--arch/x86/hyperv/hv_vtl.c66
-rw-r--r--arch/x86/hyperv/ivm.c16
-rw-r--r--arch/x86/include/asm/acrn.h2
-rw-r--r--arch/x86/include/asm/alternative.h43
-rw-r--r--arch/x86/include/asm/amd/fch.h13
-rw-r--r--arch/x86/include/asm/amd/hsmp.h (renamed from arch/x86/include/asm/amd_hsmp.h)2
-rw-r--r--arch/x86/include/asm/amd/ibs.h (renamed from arch/x86/include/asm/amd-ibs.h)5
-rw-r--r--arch/x86/include/asm/amd/nb.h (renamed from arch/x86/include/asm/amd_nb.h)2
-rw-r--r--arch/x86/include/asm/amd/node.h (renamed from arch/x86/include/asm/amd_node.h)0
-rw-r--r--arch/x86/include/asm/apic.h20
-rw-r--r--arch/x86/include/asm/arch_hweight.h6
-rw-r--r--arch/x86/include/asm/asm.h24
-rw-r--r--arch/x86/include/asm/bitops.h7
-rw-r--r--arch/x86/include/asm/boot.h10
-rw-r--r--arch/x86/include/asm/coco.h2
-rw-r--r--arch/x86/include/asm/cpufeatures.h36
-rw-r--r--arch/x86/include/asm/cpuid/api.h96
-rw-r--r--arch/x86/include/asm/cpuid/types.h95
-rw-r--r--arch/x86/include/asm/debugreg.h16
-rw-r--r--arch/x86/include/asm/elf.h4
-rw-r--r--arch/x86/include/asm/entry-common.h5
-rw-r--r--arch/x86/include/asm/fpu/api.h3
-rw-r--r--arch/x86/include/asm/fpu/sched.h38
-rw-r--r--arch/x86/include/asm/fpu/types.h24
-rw-r--r--arch/x86/include/asm/fpu/xstate.h3
-rw-r--r--arch/x86/include/asm/fred.h1
-rw-r--r--arch/x86/include/asm/fsgsbase.h4
-rw-r--r--arch/x86/include/asm/inat.h6
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/kexec.h7
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h3
-rw-r--r--arch/x86/include/asm/kvm_host.h57
-rw-r--r--arch/x86/include/asm/linkage.h10
-rw-r--r--arch/x86/include/asm/mem_encrypt.h2
-rw-r--r--arch/x86/include/asm/microcode.h6
-rw-r--r--arch/x86/include/asm/mmu.h4
-rw-r--r--arch/x86/include/asm/mmu_context.h15
-rw-r--r--arch/x86/include/asm/mshyperv.h10
-rw-r--r--arch/x86/include/asm/msr-index.h24
-rw-r--r--arch/x86/include/asm/msr.h218
-rw-r--r--arch/x86/include/asm/mwait.h66
-rw-r--r--arch/x86/include/asm/nmi.h49
-rw-r--r--arch/x86/include/asm/nospec-branch.h26
-rw-r--r--arch/x86/include/asm/page_32_types.h1
-rw-r--r--arch/x86/include/asm/page_64.h2
-rw-r--r--arch/x86/include/asm/page_64_types.h11
-rw-r--r--arch/x86/include/asm/page_types.h4
-rw-r--r--arch/x86/include/asm/paravirt.h63
-rw-r--r--arch/x86/include/asm/paravirt_types.h12
-rw-r--r--arch/x86/include/asm/percpu.h20
-rw-r--r--arch/x86/include/asm/perf_event.h1
-rw-r--r--arch/x86/include/asm/pgalloc.h19
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h2
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h4
-rw-r--r--arch/x86/include/asm/pgtable.h31
-rw-r--r--arch/x86/include/asm/pgtable_64.h2
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h26
-rw-r--r--arch/x86/include/asm/posted_intr.h83
-rw-r--r--arch/x86/include/asm/processor.h16
-rw-r--r--arch/x86/include/asm/resctrl.h21
-rw-r--r--arch/x86/include/asm/set_memory.h2
-rw-r--r--arch/x86/include/asm/setup.h3
-rw-r--r--arch/x86/include/asm/sev-common.h2
-rw-r--r--arch/x86/include/asm/sev-internal.h105
-rw-r--r--arch/x86/include/asm/sev.h66
-rw-r--r--arch/x86/include/asm/shared/tdx.h10
-rw-r--r--arch/x86/include/asm/simd.h6
-rw-r--r--arch/x86/include/asm/smap.h12
-rw-r--r--arch/x86/include/asm/spec-ctrl.h2
-rw-r--r--arch/x86/include/asm/special_insns.h21
-rw-r--r--arch/x86/include/asm/string_32.h15
-rw-r--r--arch/x86/include/asm/suspend_32.h1
-rw-r--r--arch/x86/include/asm/suspend_64.h1
-rw-r--r--arch/x86/include/asm/svm.h10
-rw-r--r--arch/x86/include/asm/switch_to.h4
-rw-r--r--arch/x86/include/asm/syscall.h43
-rw-r--r--arch/x86/include/asm/tdx.h77
-rw-r--r--arch/x86/include/asm/tdx_global_metadata.h (renamed from arch/x86/virt/vmx/tdx/tdx_global_metadata.h)19
-rw-r--r--arch/x86/include/asm/text-patching.h29
-rw-r--r--arch/x86/include/asm/trace/common.h12
-rw-r--r--arch/x86/include/asm/trace/fpu.h5
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h1
-rw-r--r--arch/x86/include/asm/tsc.h55
-rw-r--r--arch/x86/include/asm/uaccess_64.h6
-rw-r--r--arch/x86/include/asm/vdso.h8
-rw-r--r--arch/x86/include/asm/vdso/processor.h8
-rw-r--r--arch/x86/include/asm/vmx.h2
-rw-r--r--arch/x86/include/asm/x86_init.h1
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h2
-rw-r--r--arch/x86/include/uapi/asm/kvm.h72
-rw-r--r--arch/x86/include/uapi/asm/setup_data.h13
-rw-r--r--arch/x86/include/uapi/asm/svm.h2
-rw-r--r--arch/x86/include/uapi/asm/vmx.h5
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/acpi/boot.c11
-rw-r--r--arch/x86/kernel/acpi/cppc.c10
-rw-r--r--arch/x86/kernel/acpi/cstate.c2
-rw-r--r--arch/x86/kernel/acpi/madt_wakeup.c2
-rw-r--r--arch/x86/kernel/acpi/sleep.c1
-rw-r--r--arch/x86/kernel/alternative.c783
-rw-r--r--arch/x86/kernel/amd_gart_64.c2
-rw-r--r--arch/x86/kernel/amd_nb.c11
-rw-r--r--arch/x86/kernel/amd_node.c2
-rw-r--r--arch/x86/kernel/aperture_64.c2
-rw-r--r--arch/x86/kernel/apic/apic.c17
-rw-r--r--arch/x86/kernel/apic/apic_noop.c8
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c9
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/apic/vector.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/asm-offsets.c8
-rw-r--r--arch/x86/kernel/asm-offsets_32.c9
-rw-r--r--arch/x86/kernel/callthunks.c6
-rw-r--r--arch/x86/kernel/cet.c3
-rw-r--r--arch/x86/kernel/cpu/Makefile5
-rw-r--r--arch/x86/kernel/cpu/amd.c112
-rw-r--r--arch/x86/kernel/cpu/amd_cache_disable.c301
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c29
-rw-r--r--arch/x86/kernel/cpu/bugs.c1512
-rw-r--r--arch/x86/kernel/cpu/bus_lock.c19
-rw-r--r--arch/x86/kernel/cpu/cacheinfo.c1059
-rw-r--r--arch/x86/kernel/cpu/common.c227
-rw-r--r--arch/x86/kernel/cpu/cpu.h9
-rw-r--r--arch/x86/kernel/cpu/cpuid-deps.c5
-rw-r--r--arch/x86/kernel/cpu/cpuid_0x2_table.c128
-rw-r--r--arch/x86/kernel/cpu/feat_ctl.c5
-rw-r--r--arch/x86/kernel/cpu/hygon.c7
-rw-r--r--arch/x86/kernel/cpu/intel.c134
-rw-r--r--arch/x86/kernel/cpu/intel_epb.c12
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c22
-rw-r--r--arch/x86/kernel/cpu/mce/core.c66
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c35
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c32
-rw-r--r--arch/x86/kernel/cpu/mce/internal.h2
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c29
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c61
-rw-r--r--arch/x86/kernel/cpu/microcode/intel-ucode-defs.h150
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c4
-rw-r--r--arch/x86/kernel/cpu/microcode/internal.h1
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c13
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/Makefile2
-rw-r--r--arch/x86/kernel/cpu/resctrl/core.c42
-rw-r--r--arch/x86/kernel/cpu/resctrl/ctrlmondata.c635
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h397
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c921
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c1119
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock_trace.h (renamed from arch/x86/kernel/cpu/resctrl/trace.h)26
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c4167
-rw-r--r--arch/x86/kernel/cpu/scattered.c3
-rw-r--r--arch/x86/kernel/cpu/sgx/driver.h1
-rw-r--r--arch/x86/kernel/cpu/sgx/ioctl.c30
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c5
-rw-r--r--arch/x86/kernel/cpu/topology.c3
-rw-r--r--arch/x86/kernel/cpu/topology_amd.c7
-rw-r--r--arch/x86/kernel/cpu/tsx.c21
-rw-r--r--arch/x86/kernel/cpu/umwait.c6
-rw-r--r--arch/x86/kernel/cpu/zhaoxin.c1
-rw-r--r--arch/x86/kernel/crash.c26
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/e820.c43
-rw-r--r--arch/x86/kernel/early_printk.c19
-rw-r--r--arch/x86/kernel/fpu/context.h4
-rw-r--r--arch/x86/kernel/fpu/core.c128
-rw-r--r--arch/x86/kernel/fpu/init.c21
-rw-r--r--arch/x86/kernel/fpu/regset.c22
-rw-r--r--arch/x86/kernel/fpu/signal.c29
-rw-r--r--arch/x86/kernel/fpu/xstate.c177
-rw-r--r--arch/x86/kernel/fpu/xstate.h31
-rw-r--r--arch/x86/kernel/fred.c21
-rw-r--r--arch/x86/kernel/ftrace.c20
-rw-r--r--arch/x86/kernel/head32.c4
-rw-r--r--arch/x86/kernel/head64.c284
-rw-r--r--arch/x86/kernel/head_32.S8
-rw-r--r--arch/x86/kernel/head_64.S10
-rw-r--r--arch/x86/kernel/hpet.c5
-rw-r--r--arch/x86/kernel/i8253.c3
-rw-r--r--arch/x86/kernel/irq.c63
-rw-r--r--arch/x86/kernel/jailhouse.c2
-rw-r--r--arch/x86/kernel/jump_label.c6
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c58
-rw-r--r--arch/x86/kernel/kprobes/core.c4
-rw-r--r--arch/x86/kernel/kprobes/opt.c6
-rw-r--r--arch/x86/kernel/kvm.c33
-rw-r--r--arch/x86/kernel/kvmclock.c6
-rw-r--r--arch/x86/kernel/machine_kexec_32.c4
-rw-r--r--arch/x86/kernel/machine_kexec_64.c70
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c8
-rw-r--r--arch/x86/kernel/module.c8
-rw-r--r--arch/x86/kernel/nmi.c87
-rw-r--r--arch/x86/kernel/nmi_selftest.c52
-rw-r--r--arch/x86/kernel/paravirt.c3
-rw-r--r--arch/x86/kernel/process.c44
-rw-r--r--arch/x86/kernel/process_32.c7
-rw-r--r--arch/x86/kernel/process_64.c28
-rw-r--r--arch/x86/kernel/reboot_fixups_32.c2
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S6
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S250
-rw-r--r--arch/x86/kernel/setup.c78
-rw-r--r--arch/x86/kernel/shstk.c18
-rw-r--r--arch/x86/kernel/signal.c6
-rw-r--r--arch/x86/kernel/smpboot.c72
-rw-r--r--arch/x86/kernel/static_call.c6
-rw-r--r--arch/x86/kernel/trace_clock.c2
-rw-r--r--arch/x86/kernel/tracepoint.c21
-rw-r--r--arch/x86/kernel/traps.c21
-rw-r--r--arch/x86/kernel/tsc.c5
-rw-r--r--arch/x86/kernel/tsc_sync.c15
-rw-r--r--arch/x86/kernel/uprobes.c5
-rw-r--r--arch/x86/kernel/vmlinux.lds.S24
-rw-r--r--arch/x86/kvm/Kconfig12
-rw-r--r--arch/x86/kvm/Makefile1
-rw-r--r--arch/x86/kvm/cpuid.c72
-rw-r--r--arch/x86/kvm/cpuid.h33
-rw-r--r--arch/x86/kvm/ioapic.c7
-rw-r--r--arch/x86/kvm/ioapic.h2
-rw-r--r--arch/x86/kvm/irq.c3
-rw-r--r--arch/x86/kvm/irq_comm.c37
-rw-r--r--arch/x86/kvm/lapic.c43
-rw-r--r--arch/x86/kvm/lapic.h6
-rw-r--r--arch/x86/kvm/mmu.h9
-rw-r--r--arch/x86/kvm/mmu/mmu.c114
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h5
-rw-r--r--arch/x86/kvm/mmu/page_track.c3
-rw-r--r--arch/x86/kvm/mmu/spte.c29
-rw-r--r--arch/x86/kvm/mmu/spte.h1
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c76
-rw-r--r--arch/x86/kvm/smm.c1
-rw-r--r--arch/x86/kvm/smm.h3
-rw-r--r--arch/x86/kvm/svm/avic.c71
-rw-r--r--arch/x86/kvm/svm/nested.c36
-rw-r--r--arch/x86/kvm/svm/sev.c232
-rw-r--r--arch/x86/kvm/svm/svm.c289
-rw-r--r--arch/x86/kvm/svm/svm.h16
-rw-r--r--arch/x86/kvm/trace.h13
-rw-r--r--arch/x86/kvm/vmx/common.h182
-rw-r--r--arch/x86/kvm/vmx/main.c1123
-rw-r--r--arch/x86/kvm/vmx/nested.c65
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c57
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.h28
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c108
-rw-r--r--arch/x86/kvm/vmx/posted_intr.h3
-rw-r--r--arch/x86/kvm/vmx/sgx.c9
-rw-r--r--arch/x86/kvm/vmx/tdx.c3526
-rw-r--r--arch/x86/kvm/vmx/tdx.h204
-rw-r--r--arch/x86/kvm/vmx/tdx_arch.h167
-rw-r--r--arch/x86/kvm/vmx/tdx_errno.h40
-rw-r--r--arch/x86/kvm/vmx/vmenter.S3
-rw-r--r--arch/x86/kvm/vmx/vmx.c391
-rw-r--r--arch/x86/kvm/vmx/vmx.h143
-rw-r--r--arch/x86/kvm/vmx/x86_ops.h47
-rw-r--r--arch/x86/kvm/x86.c213
-rw-r--r--arch/x86/kvm/x86.h49
-rw-r--r--arch/x86/lib/Makefile8
-rw-r--r--arch/x86/lib/crc-t10dif.c (renamed from arch/x86/lib/crc-t10dif-glue.c)4
-rw-r--r--arch/x86/lib/crc32.c (renamed from arch/x86/lib/crc32-glue.c)6
-rw-r--r--arch/x86/lib/crc64.c (renamed from arch/x86/lib/crc64-glue.c)4
-rw-r--r--arch/x86/lib/crypto/.gitignore2
-rw-r--r--arch/x86/lib/crypto/Kconfig34
-rw-r--r--arch/x86/lib/crypto/Makefile20
-rw-r--r--arch/x86/lib/crypto/blake2s-core.S (renamed from arch/x86/crypto/blake2s-core.S)4
-rw-r--r--arch/x86/lib/crypto/blake2s-glue.c (renamed from arch/x86/crypto/blake2s-glue.c)18
-rw-r--r--arch/x86/lib/crypto/chacha-avx2-x86_64.S (renamed from arch/x86/crypto/chacha-avx2-x86_64.S)0
-rw-r--r--arch/x86/lib/crypto/chacha-avx512vl-x86_64.S (renamed from arch/x86/crypto/chacha-avx512vl-x86_64.S)0
-rw-r--r--arch/x86/lib/crypto/chacha-ssse3-x86_64.S (renamed from arch/x86/crypto/chacha-ssse3-x86_64.S)0
-rw-r--r--arch/x86/lib/crypto/chacha_glue.c196
-rw-r--r--arch/x86/lib/crypto/poly1305-x86_64-cryptogams.pl (renamed from arch/x86/crypto/poly1305-x86_64-cryptogams.pl)41
-rw-r--r--arch/x86/lib/crypto/poly1305_glue.c129
-rw-r--r--arch/x86/lib/crypto/sha256-avx-asm.S (renamed from arch/x86/crypto/sha256-avx-asm.S)12
-rw-r--r--arch/x86/lib/crypto/sha256-avx2-asm.S (renamed from arch/x86/crypto/sha256-avx2-asm.S)12
-rw-r--r--arch/x86/lib/crypto/sha256-ni-asm.S (renamed from arch/x86/crypto/sha256_ni_asm.S)36
-rw-r--r--arch/x86/lib/crypto/sha256-ssse3-asm.S (renamed from arch/x86/crypto/sha256-ssse3-asm.S)14
-rw-r--r--arch/x86/lib/crypto/sha256.c80
-rw-r--r--arch/x86/lib/delay.c2
-rw-r--r--arch/x86/lib/insn-eval.c20
-rw-r--r--arch/x86/lib/insn.c7
-rw-r--r--arch/x86/lib/iomem.c2
-rw-r--r--arch/x86/lib/kaslr.c2
-rw-r--r--arch/x86/lib/memcpy_64.S1
-rw-r--r--arch/x86/lib/memset_64.S1
-rw-r--r--arch/x86/lib/msr-smp.c16
-rw-r--r--arch/x86/lib/msr.c12
-rw-r--r--arch/x86/lib/retpoline.S50
-rw-r--r--arch/x86/lib/string_32.c17
-rw-r--r--arch/x86/lib/strstr_32.c6
-rw-r--r--arch/x86/lib/usercopy_32.c18
-rw-r--r--arch/x86/lib/x86-opcode-map.txt60
-rw-r--r--arch/x86/math-emu/fpu_aux.c2
-rw-r--r--arch/x86/math-emu/fpu_entry.c4
-rw-r--r--arch/x86/math-emu/fpu_system.h2
-rw-r--r--arch/x86/mm/Makefile10
-rw-r--r--arch/x86/mm/amdtopology.c2
-rw-r--r--arch/x86/mm/dump_pagetables.c71
-rw-r--r--arch/x86/mm/extable.c2
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/x86/mm/init.c23
-rw-r--r--arch/x86/mm/init_32.c12
-rw-r--r--arch/x86/mm/init_64.c34
-rw-r--r--arch/x86/mm/ioremap.c7
-rw-r--r--arch/x86/mm/mem_encrypt_amd.c2
-rw-r--r--arch/x86/mm/mm_internal.h4
-rw-r--r--arch/x86/mm/numa.c5
-rw-r--r--arch/x86/mm/numa_32.c61
-rw-r--r--arch/x86/mm/numa_64.c13
-rw-r--r--arch/x86/mm/numa_internal.h10
-rw-r--r--arch/x86/mm/pat/memtype.c224
-rw-r--r--arch/x86/mm/pat/memtype_interval.c63
-rw-r--r--arch/x86/mm/pat/set_memory.c17
-rw-r--r--arch/x86/mm/pgtable.c167
-rw-r--r--arch/x86/mm/pti.c4
-rw-r--r--arch/x86/mm/tlb.c176
-rw-r--r--arch/x86/net/bpf_jit_comp.c60
-rw-r--r--arch/x86/pci/Makefile6
-rw-r--r--arch/x86/pci/amd_bus.c12
-rw-r--r--arch/x86/pci/fixup.c6
-rw-r--r--arch/x86/pci/intel_mid.c (renamed from arch/x86/pci/intel_mid_pci.c)0
-rw-r--r--arch/x86/pci/mmconfig-shared.c3
-rw-r--r--arch/x86/platform/efi/efi_64.c12
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-rtc.c6
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c2
-rw-r--r--arch/x86/platform/pvh/head.S3
-rw-r--r--arch/x86/power/cpu.c27
-rw-r--r--arch/x86/power/hibernate.c6
-rw-r--r--arch/x86/power/hibernate_asm_32.S3
-rw-r--r--arch/x86/power/hibernate_asm_64.S7
-rw-r--r--arch/x86/realmode/init.c5
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk7
-rw-r--r--arch/x86/um/shared/sysdep/faultinfo_32.h2
-rw-r--r--arch/x86/um/shared/sysdep/faultinfo_64.h2
-rw-r--r--arch/x86/virt/svm/sev.c23
-rw-r--r--arch/x86/virt/vmx/tdx/seamcall.S3
-rw-r--r--arch/x86/virt/vmx/tdx/tdx.c423
-rw-r--r--arch/x86/virt/vmx/tdx/tdx.h48
-rw-r--r--arch/x86/virt/vmx/tdx/tdx_global_metadata.c50
-rw-r--r--arch/x86/xen/enlighten.c17
-rw-r--r--arch/x86/xen/enlighten_pv.c63
-rw-r--r--arch/x86/xen/enlighten_pvh.c19
-rw-r--r--arch/x86/xen/mmu_pv.c4
-rw-r--r--arch/x86/xen/multicalls.c26
-rw-r--r--arch/x86/xen/pmu.c73
-rw-r--r--arch/x86/xen/setup.c3
-rw-r--r--arch/x86/xen/smp_pv.c1
-rw-r--r--arch/x86/xen/suspend.c7
-rw-r--r--arch/x86/xen/xen-asm.S4
-rw-r--r--arch/x86/xen/xen-ops.h8
-rw-r--r--arch/xtensa/Kbuild2
-rw-r--r--arch/xtensa/Kconfig3
-rw-r--r--arch/xtensa/boot/dts/Makefile2
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/cadence_csp_defconfig3
-rw-r--r--arch/xtensa/configs/common_defconfig1
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/nommu_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig2
-rw-r--r--arch/xtensa/configs/virt_defconfig2
-rw-r--r--arch/xtensa/configs/xip_kc705_defconfig2
-rw-r--r--arch/xtensa/include/asm/pgtable.h6
-rw-r--r--arch/xtensa/include/asm/ptrace.h5
-rw-r--r--arch/xtensa/include/asm/syscall.h18
-rw-r--r--arch/xtensa/kernel/perf_event.c3
-rw-r--r--block/Kconfig8
-rw-r--r--block/Makefile5
-rw-r--r--block/bdev.c73
-rw-r--r--block/bfq-iosched.c6
-rw-r--r--block/bio-integrity-auto.c62
-rw-r--r--block/bio-integrity.c21
-rw-r--r--block/bio.c160
-rw-r--r--block/blk-cgroup.c12
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-crypto-fallback.c1
-rw-r--r--block/blk-crypto-profile.c4
-rw-r--r--block/blk-map.c93
-rw-r--r--block/blk-merge.c137
-rw-r--r--block/blk-mq-debugfs.c13
-rw-r--r--block/blk-mq-dma.c116
-rw-r--r--block/blk-mq-sched.c53
-rw-r--r--block/blk-mq.c309
-rw-r--r--block/blk-mq.h7
-rw-r--r--block/blk-rq-qos.c4
-rw-r--r--block/blk-rq-qos.h21
-rw-r--r--block/blk-settings.c13
-rw-r--r--block/blk-sysfs.c36
-rw-r--r--block/blk-throttle.c411
-rw-r--r--block/blk-throttle.h37
-rw-r--r--block/blk-wbt.c11
-rw-r--r--block/blk-zoned.c5
-rw-r--r--block/blk.h56
-rw-r--r--block/bounce.c267
-rw-r--r--block/elevator.c329
-rw-r--r--block/elevator.h6
-rw-r--r--block/fops.c46
-rw-r--r--block/genhd.c266
-rw-r--r--block/ioctl.c6
-rw-r--r--block/ioprio.c6
-rw-r--r--block/mq-deadline.c2
-rw-r--r--crypto/842.c6
-rw-r--r--crypto/Kconfig82
-rw-r--r--crypto/Makefile22
-rw-r--r--crypto/acompress.c410
-rw-r--r--crypto/adiantum.c2
-rw-r--r--crypto/aead.c1
-rw-r--r--crypto/aegis128-core.c2
-rw-r--r--crypto/aes_generic.c2
-rw-r--r--crypto/ahash.c835
-rw-r--r--crypto/akcipher.c1
-rw-r--r--crypto/algapi.c82
-rw-r--r--crypto/algboss.c10
-rw-r--r--crypto/algif_aead.c101
-rw-r--r--crypto/algif_hash.c4
-rw-r--r--crypto/ansi_cprng.c2
-rw-r--r--crypto/anubis.c2
-rw-r--r--crypto/api.c37
-rw-r--r--crypto/arc4.c2
-rw-r--r--crypto/aria_generic.c2
-rw-r--r--crypto/asymmetric_keys/public_key.c36
-rw-r--r--crypto/asymmetric_keys/verify_pefile.c8
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c3
-rw-r--r--crypto/authenc.c34
-rw-r--r--crypto/authencesn.c40
-rw-r--r--crypto/blake2b_generic.c33
-rw-r--r--crypto/blowfish_generic.c2
-rw-r--r--crypto/camellia_generic.c2
-rw-r--r--crypto/cast5_generic.c2
-rw-r--r--crypto/cast6_generic.c2
-rw-r--r--crypto/cbc.c2
-rw-r--r--crypto/ccm.c65
-rw-r--r--crypto/chacha.c260
-rw-r--r--crypto/chacha20poly1305.c321
-rw-r--r--crypto/chacha_generic.c139
-rw-r--r--crypto/cmac.c94
-rw-r--r--crypto/crc32.c (renamed from crypto/crc32_generic.c)2
-rw-r--r--crypto/crc32c.c (renamed from crypto/crc32c_generic.c)2
-rw-r--r--crypto/cryptd.c2
-rw-r--r--crypto/crypto_engine.c31
-rw-r--r--crypto/crypto_null.c72
-rw-r--r--crypto/ctr.c2
-rw-r--r--crypto/cts.c2
-rw-r--r--crypto/curve25519-generic.c2
-rw-r--r--crypto/deflate.c355
-rw-r--r--crypto/des_generic.c2
-rw-r--r--crypto/dh.c2
-rw-r--r--crypto/drbg.c2
-rw-r--r--crypto/ecb.c2
-rw-r--r--crypto/ecdh.c2
-rw-r--r--crypto/ecdsa-p1363.c6
-rw-r--r--crypto/ecdsa-x962.c5
-rw-r--r--crypto/ecdsa.c4
-rw-r--r--crypto/echainiv.c20
-rw-r--r--crypto/ecrdsa.c2
-rw-r--r--crypto/essiv.c5
-rw-r--r--crypto/fcrypt.c2
-rw-r--r--crypto/fips.c2
-rw-r--r--crypto/gcm.c43
-rw-r--r--crypto/geniv.c13
-rw-r--r--crypto/ghash-generic.c58
-rw-r--r--crypto/hctr2.c2
-rw-r--r--crypto/hkdf.c2
-rw-r--r--crypto/hmac.c398
-rw-r--r--crypto/internal.h9
-rw-r--r--crypto/kdf_sp800108.c2
-rw-r--r--crypto/khazad.c2
-rw-r--r--crypto/kpp.c1
-rw-r--r--crypto/krb5/rfc3961_simplified.c1
-rw-r--r--crypto/krb5enc.c2
-rw-r--r--crypto/lrw.c6
-rw-r--r--crypto/lskcipher.c1
-rw-r--r--crypto/lz4.c6
-rw-r--r--crypto/lz4hc.c6
-rw-r--r--crypto/lzo-rle.c6
-rw-r--r--crypto/lzo.c6
-rw-r--r--crypto/md4.c2
-rw-r--r--crypto/md5.c104
-rw-r--r--crypto/michael_mic.c2
-rw-r--r--crypto/nhpoly1305.c2
-rw-r--r--crypto/pcbc.c2
-rw-r--r--crypto/pcrypt.c2
-rw-r--r--crypto/poly1305_generic.c149
-rw-r--r--crypto/polyval-generic.c118
-rw-r--r--crypto/rmd160.c90
-rw-r--r--crypto/rng.c1
-rw-r--r--crypto/rsa.c2
-rw-r--r--crypto/rsassa-pkcs1.c2
-rw-r--r--crypto/scatterwalk.c274
-rw-r--r--crypto/scompress.c248
-rw-r--r--crypto/seed.c2
-rw-r--r--crypto/seqiv.c19
-rw-r--r--crypto/serpent_generic.c2
-rw-r--r--crypto/sha1_generic.c35
-rw-r--r--crypto/sha256.c283
-rw-r--r--crypto/sha256_generic.c110
-rw-r--r--crypto/sha3_generic.c101
-rw-r--r--crypto/sha512_generic.c52
-rw-r--r--crypto/shash.c277
-rw-r--r--crypto/sig.c10
-rw-r--r--crypto/skcipher.c262
-rw-r--r--crypto/sm3_generic.c33
-rw-r--r--crypto/sm4_generic.c2
-rw-r--r--crypto/streebog_generic.c73
-rw-r--r--crypto/tcrypt.c239
-rw-r--r--crypto/tcrypt.h4
-rw-r--r--crypto/tea.c2
-rw-r--r--crypto/testmgr.c307
-rw-r--r--crypto/testmgr.h288
-rw-r--r--crypto/twofish_generic.c2
-rw-r--r--crypto/wp512.c2
-rw-r--r--crypto/xcbc.c94
-rw-r--r--crypto/xctr.c2
-rw-r--r--crypto/xts.c6
-rw-r--r--crypto/xxhash_generic.c2
-rw-r--r--crypto/zstd.c2
-rw-r--r--drivers/accel/amdxdna/TODO1
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c65
-rw-r--r--drivers/accel/amdxdna/aie2_message.c6
-rw-r--r--drivers/accel/amdxdna/aie2_msg_priv.h10
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c13
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c22
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c411
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h24
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c11
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.h2
-rw-r--r--drivers/accel/habanalabs/Kconfig2
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c2
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c6
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c14
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h1
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c21
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h1
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c12
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c2
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h14
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.c134
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.h9
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c3
-rw-r--r--drivers/accel/ivpu/ivpu_job.c50
-rw-r--r--drivers/accel/ivpu/ivpu_ms.c24
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c38
-rw-r--r--drivers/accel/ivpu/ivpu_sysfs.c49
-rw-r--r--drivers/accel/ivpu/vpu_boot_api.h13
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h53
-rw-r--r--drivers/accel/qaic/Kconfig1
-rw-r--r--drivers/accel/qaic/qaic_data.c8
-rw-r--r--drivers/accel/qaic/qaic_debugfs.c2
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_extlog.c3
-rw-r--r--drivers/acpi/acpi_lpit.c2
-rw-r--r--drivers/acpi/acpi_mrrm.c185
-rw-r--r--drivers/acpi/acpi_pad.c2
-rw-r--r--drivers/acpi/acpi_pcc.c13
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconvert.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h4
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h5
-rw-r--r--drivers/acpi/acpica/aclocal.h6
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h10
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c1
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dspkginit.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c9
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c6
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exserial.c8
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/extrace.c53
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c4
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psobject.c54
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c13
-rw-r--r--drivers/acpi/acpica/rscalc.c22
-rw-r--r--drivers/acpi/acpica/rslist.c12
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c6
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c4
-rw-r--r--drivers/acpi/acpica/utcksum.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c9
-rw-r--r--drivers/acpi/acpica/utresrc.c14
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/einj-core.c62
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/battery.c23
-rw-r--r--drivers/acpi/bus.c6
-rw-r--r--drivers/acpi/button.c2
-rw-r--r--drivers/acpi/cppc_acpi.c329
-rw-r--r--drivers/acpi/ec.c34
-rw-r--r--drivers/acpi/irq.c16
-rw-r--r--drivers/acpi/numa/srat.c15
-rw-r--r--drivers/acpi/osi.c1
-rw-r--r--drivers/acpi/pci_root.c2
-rw-r--r--drivers/acpi/platform_profile.c3
-rw-r--r--drivers/acpi/pptt.c15
-rw-r--r--drivers/acpi/processor_idle.c14
-rw-r--r--drivers/acpi/processor_perflib.c1
-rw-r--r--drivers/acpi/processor_throttling.c5
-rw-r--r--drivers/acpi/resource.c2
-rw-r--r--drivers/acpi/tables.c8
-rw-r--r--drivers/acpi/thermal.c10
-rw-r--r--drivers/acpi/viot.c2
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/android/binderfs.c4
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-eh.c39
-rw-r--r--drivers/ata/libata-sata.c27
-rw-r--r--drivers/ata/libata-scsi.c56
-rw-r--r--drivers/ata/libata.h3
-rw-r--r--drivers/ata/pata_pxa.c6
-rw-r--r--drivers/ata/sata_sx4.c43
-rw-r--r--drivers/base/arch_topology.c52
-rw-r--r--drivers/base/auxiliary.c118
-rw-r--r--drivers/base/base.h17
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/component.c3
-rw-r--r--drivers/base/core.c38
-rw-r--r--drivers/base/cpu.c6
-rw-r--r--drivers/base/dd.c7
-rw-r--r--drivers/base/devres.c20
-rw-r--r--drivers/base/devtmpfs.c22
-rw-r--r--drivers/base/faux.c22
-rw-r--r--drivers/base/firmware_loader/Kconfig4
-rw-r--r--drivers/base/firmware_loader/main.c34
-rw-r--r--drivers/base/memory.c92
-rw-r--r--drivers/base/module.c13
-rw-r--r--drivers/base/node.c11
-rw-r--r--drivers/base/platform-msi.c1
-rw-r--r--drivers/base/platform.c8
-rw-r--r--drivers/base/power/main.c218
-rw-r--r--drivers/base/power/runtime.c46
-rw-r--r--drivers/base/power/sysfs.c15
-rw-r--r--drivers/base/power/wakeup.c14
-rw-r--r--drivers/base/power/wakeup_stats.c2
-rw-r--r--drivers/base/property.c12
-rw-r--r--drivers/base/regmap/Kconfig4
-rw-r--r--drivers/base/regmap/regcache.c13
-rw-r--r--drivers/base/regmap/regmap-irq.c103
-rw-r--r--drivers/base/swnode.c5
-rw-r--r--drivers/base/topology.c52
-rw-r--r--drivers/bcma/driver_gpio.c8
-rw-r--r--drivers/block/Kconfig27
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/brd.c225
-rw-r--r--drivers/block/drbd/Kconfig2
-rw-r--r--drivers/block/loop.c141
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c7
-rw-r--r--drivers/block/null_blk/main.c2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rnbd/rnbd-srv.c7
-rw-r--r--drivers/block/ublk_drv.c1187
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/block/zloop.c1385
-rw-r--r--drivers/block/zram/backend_deflate.c12
-rw-r--r--drivers/block/zram/backend_lz4.c2
-rw-r--r--drivers/block/zram/backend_lz4hc.c2
-rw-r--r--drivers/block/zram/backend_zstd.c2
-rw-r--r--drivers/block/zram/zcomp.h9
-rw-r--r--drivers/block/zram/zram_drv.c352
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/btintel.c13
-rw-r--r--drivers/bluetooth/btintel.h6
-rw-r--r--drivers/bluetooth/btintel_pcie.c198
-rw-r--r--drivers/bluetooth/btintel_pcie.h19
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c4
-rw-r--r--drivers/bluetooth/btmtksdio.c14
-rw-r--r--drivers/bluetooth/btnxpuart.c79
-rw-r--r--drivers/bluetooth/btqca.c2
-rw-r--r--drivers/bluetooth/btrtl.c2
-rw-r--r--drivers/bluetooth/btusb.c321
-rw-r--r--drivers/bluetooth/hci_aml.c3
-rw-r--r--drivers/bluetooth/hci_vhci.c10
-rw-r--r--drivers/bus/brcmstb_gisb.c10
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c2
-rw-r--r--drivers/bus/fsl-mc/dprc.c4
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c21
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c12
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h2
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-uapi.c11
-rw-r--r--drivers/bus/fsl-mc/mc-io.c19
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c2
-rw-r--r--drivers/bus/moxtet.c6
-rw-r--r--drivers/bus/ti-sysc.c68
-rw-r--r--drivers/cache/sifive_ccache.c2
-rw-r--r--drivers/cdrom/cdrom.c3
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/intel-gtt.c55
-rw-r--r--drivers/char/agp/nvidia-agp.c1
-rw-r--r--drivers/char/hw_random/atmel-rng.c11
-rw-r--r--drivers/char/hw_random/mtk-rng.c9
-rw-r--r--drivers/char/hw_random/npcm-rng.c9
-rw-r--r--drivers/char/hw_random/rockchip-rng.c73
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c761
-rw-r--r--drivers/char/ipmi/ipmi_si.h10
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c116
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c52
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c27
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c6
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c92
-rw-r--r--drivers/char/mem.c18
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/random.c56
-rw-r--r--drivers/char/tpm/Kconfig10
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c7
-rw-r--r--drivers/char/tpm/tpm-buf.c6
-rw-r--r--drivers/char/tpm/tpm2-sessions.c20
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.c74
-rw-r--r--drivers/char/tpm/tpm_svsm.c125
-rw-r--r--drivers/char/tpm/tpm_tis_core.h2
-rw-r--r--drivers/char/virtio_console.c7
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/bcm/clk-kona.c18
-rw-r--r--drivers/clk/bcm/clk-kona.h2
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c3
-rw-r--r--drivers/clk/clk-s2mps11.c3
-rw-r--r--drivers/clk/davinci/pll.c26
-rw-r--r--drivers/clk/meson/Kconfig16
-rw-r--r--drivers/clk/meson/g12a.c1
-rw-r--r--drivers/clk/qcom/apcs-sdx55.c6
-rw-r--r--drivers/clk/qcom/camcc-sa8775p.c103
-rw-r--r--drivers/clk/qcom/camcc-sm6350.c18
-rw-r--r--drivers/clk/qcom/clk-rpmh.c11
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c3
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c4
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c6
-rw-r--r--drivers/clk/qcom/gcc-sm8650.c2
-rw-r--r--drivers/clk/qcom/gcc-sm8750.c3
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c4
-rw-r--r--drivers/clk/qcom/gpucc-sm6350.c6
-rw-r--r--drivers/clk/renesas/Kconfig5
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c52
-rw-r--r--drivers/clk/renesas/r9a09g056-cpg.c152
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c36
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c3
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c186
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h94
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-gate-grf.c105
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c24
-rw-r--r--drivers/clk/rockchip/clk-pll.c11
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c11
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c6
-rw-r--r--drivers/clk/rockchip/clk-rk3528.c83
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c3
-rw-r--r--drivers/clk/rockchip/clk-rk3576.c62
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c1
-rw-r--r--drivers/clk/rockchip/clk-rv1126.c2
-rw-r--r--drivers/clk/rockchip/clk.c38
-rw-r--r--drivers/clk/rockchip/clk.h75
-rw-r--r--drivers/clk/samsung/clk-exynos4.c74
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c338
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c6
-rw-r--r--drivers/clk/socfpga/clk-pll.c4
-rw-r--r--drivers/clk/sophgo/Kconfig19
-rw-r--r--drivers/clk/sophgo/Makefile2
-rw-r--r--drivers/clk/sophgo/clk-cv1800.c2
-rw-r--r--drivers/clk/sophgo/clk-sg2044-pll.c628
-rw-r--r--drivers/clk/sophgo/clk-sg2044.c1812
-rw-r--r--drivers/clk/spacemit/Kconfig18
-rw-r--r--drivers/clk/spacemit/Makefile5
-rw-r--r--drivers/clk/spacemit/ccu-k1.c1164
-rw-r--r--drivers/clk/spacemit/ccu_common.h48
-rw-r--r--drivers/clk/spacemit/ccu_ddn.c83
-rw-r--r--drivers/clk/spacemit/ccu_ddn.h48
-rw-r--r--drivers/clk/spacemit/ccu_mix.c268
-rw-r--r--drivers/clk/spacemit/ccu_mix.h218
-rw-r--r--drivers/clk/spacemit/ccu_pll.c157
-rw-r--r--drivers/clk/spacemit/ccu_pll.h86
-rw-r--r--drivers/clk/sunxi-ng/Kconfig48
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.c44
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c25
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h25
-rw-r--r--drivers/clk/sunxi/Kconfig10
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c196
-rw-r--r--drivers/clocksource/Kconfig20
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/i8253.c4
-rw-r--r--drivers/clocksource/renesas-ostm.c4
-rw-r--r--drivers/clocksource/timer-econet-en751221.c216
-rw-r--r--drivers/clocksource/timer-nxp-stm.c495
-rw-r--r--drivers/clocksource/timer-stm32-lp.c61
-rw-r--r--drivers/clocksource/timer-tegra186.c100
-rw-r--r--drivers/comedi/drivers/jr3_pci.c2
-rw-r--r--drivers/cpufreq/Kconfig12
-rw-r--r--drivers/cpufreq/Kconfig.arm20
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c25
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c29
-rw-r--r--drivers/cpufreq/amd-pstate.c183
-rw-r--r--drivers/cpufreq/amd-pstate.h3
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c2
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c10
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c111
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq.c505
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c3
-rw-r--r--drivers/cpufreq/e_powersaver.c6
-rw-r--r--drivers/cpufreq/elanfreq.c1
-rw-r--r--drivers/cpufreq/freq_table.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c294
-rw-r--r--drivers/cpufreq/longhaul.c24
-rw-r--r--drivers/cpufreq/powernow-k7.c14
-rw-r--r--drivers/cpufreq/powernow-k8.c2
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs226
-rw-r--r--drivers/cpufreq/sc520_freq.c1
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c46
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c13
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c18
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c2
-rw-r--r--drivers/cpuidle/cpuidle-psci.c82
-rw-r--r--drivers/cpuidle/cpuidle-psci.h4
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/cpuidle/governors/teo.c4
-rw-r--r--drivers/crypto/Kconfig7
-rw-r--r--drivers/crypto/Makefile4
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c56
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c17
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c177
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c49
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c110
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c45
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h17
-rw-r--r--drivers/crypto/atmel-aes.c5
-rw-r--r--drivers/crypto/atmel-sha.c6
-rw-r--r--drivers/crypto/atmel-sha204a.c6
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/caam/ctrl.c1
-rw-r--r--drivers/crypto/caam/qi.c6
-rw-r--r--drivers/crypto/cavium/Makefile3
-rw-r--r--drivers/crypto/cavium/zip/Makefile12
-rw-r--r--drivers/crypto/cavium/zip/common.h222
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c261
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.h68
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.c200
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_device.c202
-rw-r--r--drivers/crypto/cavium/zip/zip_device.h108
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.c223
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c603
-rw-r--r--drivers/crypto/cavium/zip/zip_main.h120
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.c114
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.h78
-rw-r--r--drivers/crypto/cavium/zip/zip_regs.h1347
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c15
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c13
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c13
-rw-r--r--drivers/crypto/ccp/ccp-ops.c11
-rw-r--r--drivers/crypto/ccp/sev-dev.c254
-rw-r--r--drivers/crypto/ccp/sp-pci.c3
-rw-r--r--drivers/crypto/hisilicon/qm.c4
-rw-r--r--drivers/crypto/img-hash.c41
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.c20
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c2
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c87
-rw-r--r--drivers/crypto/intel/qat/Kconfig12
-rw-r--r--drivers/crypto/intel/qat/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c8
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_drv.c10
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c12
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c14
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c845
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h148
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_drv.c226
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c41
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c41
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h24
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dc.c (renamed from drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c)50
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dc.h17
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_config.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c57
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c83
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c70
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h28
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c818
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h504
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c49
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h15
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_timer.c (renamed from drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c)18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_timer.h (renamed from drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h)10
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h23
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h99
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h318
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h23
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c13
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c449
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c41
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c2
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c2
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h9
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c3
-rw-r--r--drivers/crypto/marvell/cesa/hash.c2
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c53
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c89
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h35
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c25
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c5
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h12
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c18
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c19
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c1
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c8
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c8
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c8
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c128
-rw-r--r--drivers/crypto/nx/nx-sha256.c130
-rw-r--r--drivers/crypto/nx/nx-sha512.c143
-rw-r--r--drivers/crypto/nx/nx.c19
-rw-r--r--drivers/crypto/nx/nx.h11
-rw-r--r--drivers/crypto/omap-aes.c14
-rw-r--r--drivers/crypto/omap-sham.c14
-rw-r--r--drivers/crypto/padlock-sha.c478
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c52
-rw-r--r--drivers/crypto/s5p-sss.c24
-rw-r--r--drivers/crypto/sa2ul.c63
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c5
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c52
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c98
-rw-r--r--drivers/cxl/Kconfig71
-rw-r--r--drivers/cxl/acpi.c24
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/cdat.c2
-rw-r--r--drivers/cxl/core/core.h6
-rw-r--r--drivers/cxl/core/edac.c2102
-rw-r--r--drivers/cxl/core/features.c49
-rw-r--r--drivers/cxl/core/hdm.c11
-rw-r--r--drivers/cxl/core/mbox.c11
-rw-r--r--drivers/cxl/core/memdev.c5
-rw-r--r--drivers/cxl/core/pci.c78
-rw-r--r--drivers/cxl/core/port.c25
-rw-r--r--drivers/cxl/core/region.c189
-rw-r--r--drivers/cxl/core/regs.c4
-rw-r--r--drivers/cxl/cxl.h29
-rw-r--r--drivers/cxl/cxlmem.h30
-rw-r--r--drivers/cxl/mem.c4
-rw-r--r--drivers/cxl/pci.c2
-rw-r--r--drivers/cxl/pmem.c2
-rw-r--r--drivers/cxl/port.c15
-rw-r--r--drivers/dax/kmem.c10
-rw-r--r--drivers/dma-buf/dma-buf.c265
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c51
-rw-r--r--drivers/dma-buf/dma-resv.c5
-rw-r--r--drivers/dma-buf/heaps/system_heap.c3
-rw-r--r--drivers/dma-buf/st-dma-fence.c2
-rw-r--r--drivers/dma-buf/sw_sync.c35
-rw-r--r--drivers/dma-buf/sync_debug.c70
-rw-r--r--drivers/dma-buf/sync_debug.h2
-rw-r--r--drivers/dma-buf/udmabuf.c3
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dmaengine.c19
-rw-r--r--drivers/dma/dmatest.c6
-rw-r--r--drivers/dma/fsl-edma-main.c2
-rw-r--r--drivers/dma/idxd/cdev.c13
-rw-r--r--drivers/dma/idxd/init.c200
-rw-r--r--drivers/dma/ioat/dca.c2
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c6
-rw-r--r--drivers/dma/ti/k3-udma.c10
-rw-r--r--drivers/edac/altera_edac.c19
-rw-r--r--drivers/edac/altera_edac.h2
-rw-r--r--drivers/edac/amd64_edac.c10
-rw-r--r--drivers/edac/bluefield_edac.c20
-rw-r--r--drivers/edac/i10nm_base.c479
-rw-r--r--drivers/edac/ie31200_edac.c7
-rw-r--r--drivers/edac/igen6_edac.c86
-rw-r--r--drivers/edac/mce_amd.c1
-rwxr-xr-xdrivers/edac/mem_repair.c9
-rw-r--r--drivers/edac/skx_common.c1
-rw-r--r--drivers/edac/skx_common.h61
-rw-r--r--drivers/firewire/Kconfig2
-rw-r--r--drivers/firewire/core-transaction.c2
-rw-r--r--drivers/firmware/Kconfig18
-rw-r--r--drivers/firmware/arm_ffa/driver.c3
-rw-r--r--drivers/firmware/arm_scmi/Kconfig13
-rw-r--r--drivers/firmware/arm_scmi/Makefile1
-rw-r--r--drivers/firmware/arm_scmi/bus.c82
-rw-r--r--drivers/firmware/arm_scmi/clock.c33
-rw-r--r--drivers/firmware/arm_scmi/common.h1
-rw-r--r--drivers/firmware/arm_scmi/driver.c132
-rw-r--r--drivers/firmware/arm_scmi/protocols.h2
-rw-r--r--drivers/firmware/arm_scmi/quirks.c322
-rw-r--r--drivers/firmware/arm_scmi/quirks.h52
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c72
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Kconfig24
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Makefile2
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c276
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c263
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx95.rst828
-rw-r--r--drivers/firmware/arm_sdei.c11
-rw-r--r--drivers/firmware/cirrus/Kconfig5
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_bin.c6
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c45
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c4
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin.c2
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c2
-rw-r--r--drivers/firmware/efi/Kconfig24
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--drivers/firmware/efi/libstub/Makefile1
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot4
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c1
-rw-r--r--drivers/firmware/efi/libstub/x86-5lvl.c2
-rw-r--r--drivers/firmware/efi/libstub/zboot-header.S32
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds11
-rw-r--r--drivers/firmware/efi/memmap.c3
-rw-r--r--drivers/firmware/efi/test/efi_test.c4
-rw-r--r--drivers/firmware/imx/Kconfig22
-rw-r--r--drivers/firmware/imx/Makefile2
-rw-r--r--drivers/firmware/imx/sm-cpu.c85
-rw-r--r--drivers/firmware/imx/sm-lmm.c91
-rw-r--r--drivers/firmware/psci/psci.c4
-rw-r--r--drivers/firmware/psci/psci_checker.c2
-rw-r--r--drivers/firmware/qcom/qcom_scm.c3
-rw-r--r--drivers/firmware/qcom/qcom_scm.h3
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.c1
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c16
-rw-r--r--drivers/firmware/samsung/exynos-acpm.c102
-rw-r--r--drivers/firmware/smccc/kvm_guest.c14
-rw-r--r--drivers/firmware/smccc/smccc.c17
-rw-r--r--drivers/firmware/stratix10-svc.c14
-rw-r--r--drivers/firmware/sysfb_simplefb.c31
-rw-r--r--drivers/firmware/ti_sci.c14
-rw-r--r--drivers/firmware/turris-mox-rwtm.c260
-rw-r--r--drivers/fpga/tests/fpga-bridge-test.c1
-rw-r--r--drivers/fpga/tests/fpga-mgr-test.c1
-rw-r--r--drivers/fpga/tests/fpga-region-test.c1
-rw-r--r--drivers/fwctl/main.c2
-rw-r--r--drivers/fwctl/pds/main.c33
-rw-r--r--drivers/gpio/Kconfig64
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/TODO41
-rw-r--r--drivers/gpio/gpio-aggregator.c1104
-rw-r--r--drivers/gpio/gpio-bcm-kona.c1
-rw-r--r--drivers/gpio/gpio-blzp1600.c281
-rw-r--r--drivers/gpio/gpio-brcmstb.c2
-rw-r--r--drivers/gpio/gpio-davinci.c34
-rw-r--r--drivers/gpio/gpio-dln2.c7
-rw-r--r--drivers/gpio/gpio-ds4520.c6
-rw-r--r--drivers/gpio/gpio-eic-sprd.c5
-rw-r--r--drivers/gpio/gpio-em.c11
-rw-r--r--drivers/gpio/gpio-exar.c16
-rw-r--r--drivers/gpio/gpio-f7188x.c13
-rw-r--r--drivers/gpio/gpio-graniterapids.c6
-rw-r--r--drivers/gpio/gpio-grgpio.c9
-rw-r--r--drivers/gpio/gpio-gw-pld.c6
-rw-r--r--drivers/gpio/gpio-htc-egpio.c16
-rw-r--r--drivers/gpio/gpio-ich.c12
-rw-r--r--drivers/gpio/gpio-idt3243x.c2
-rw-r--r--drivers/gpio/gpio-imx-scu.c47
-rw-r--r--drivers/gpio/gpio-it87.c11
-rw-r--r--drivers/gpio/gpio-janz-ttl.c6
-rw-r--r--drivers/gpio/gpio-kempld.c7
-rw-r--r--drivers/gpio/gpio-ljca.c13
-rw-r--r--drivers/gpio/gpio-logicvc.c11
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c6
-rw-r--r--drivers/gpio/gpio-loongson.c8
-rw-r--r--drivers/gpio/gpio-lp3943.c13
-rw-r--r--drivers/gpio/gpio-lp873x.c12
-rw-r--r--drivers/gpio/gpio-lp87565.c15
-rw-r--r--drivers/gpio/gpio-lpc18xx.c29
-rw-r--r--drivers/gpio/gpio-lpc32xx.c28
-rw-r--r--drivers/gpio/gpio-madera.c18
-rw-r--r--drivers/gpio/gpio-max3191x.c16
-rw-r--r--drivers/gpio/gpio-max730x.c9
-rw-r--r--drivers/gpio/gpio-max732x.c15
-rw-r--r--drivers/gpio/gpio-max77620.c13
-rw-r--r--drivers/gpio/gpio-max77759.c530
-rw-r--r--drivers/gpio/gpio-mb86s7x.c6
-rw-r--r--drivers/gpio/gpio-mc33880.c9
-rw-r--r--drivers/gpio/gpio-ml-ioh.c6
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c12
-rw-r--r--drivers/gpio/gpio-mvebu.c17
-rw-r--r--drivers/gpio/gpio-mxc.c11
-rw-r--r--drivers/gpio/gpio-mxs.c4
-rw-r--r--drivers/gpio/gpio-pca953x.c38
-rw-r--r--drivers/gpio/gpio-pxa.c14
-rw-r--r--drivers/gpio/gpio-rockchip.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpio-sodaville.c2
-rw-r--r--drivers/gpio/gpio-spacemit-k1.c293
-rw-r--r--drivers/gpio/gpio-tb10x.c2
-rw-r--r--drivers/gpio/gpio-tegra186.c27
-rw-r--r--drivers/gpio/gpio-timberdale.c10
-rw-r--r--drivers/gpio/gpio-twl4030.c5
-rw-r--r--drivers/gpio/gpio-vf610.c4
-rw-r--r--drivers/gpio/gpio-virtuser.c12
-rw-r--r--drivers/gpio/gpio-xgene-sb.c26
-rw-r--r--drivers/gpio/gpio-zynq.c1
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c (renamed from drivers/gpio/gpiolib-acpi.c)522
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c363
-rw-r--r--drivers/gpio/gpiolib-acpi.h15
-rw-r--r--drivers/gpio/gpiolib-cdev.c3
-rw-r--r--drivers/gpio/gpiolib-devres.c95
-rw-r--r--drivers/gpio/gpiolib-of.c25
-rw-r--r--drivers/gpio/gpiolib-of.h6
-rw-r--r--drivers/gpio/gpiolib-sysfs.c8
-rw-r--r--drivers/gpio/gpiolib.c181
-rw-r--r--drivers/gpu/drm/Kconfig120
-rw-r--r--drivers/gpu/drm/Kconfig.debug116
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/adp/adp-mipi.c3
-rw-r--r--drivers/gpu/drm/adp/adp_drv.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c538
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c233
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c1106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c924
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c968
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c147
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c744
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c622
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c171
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c355
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.h (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c)30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c271
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c204
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c413
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_enums.h234
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sid.h1555
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c319
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c76
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c145
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c95
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c142
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c50
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c806
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h34
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c75
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c51
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c178
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile41
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c195
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h347
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c219
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c4346
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c142
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c368
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c380
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h171
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h159
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c137
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c349
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c21
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c53
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c48
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c73
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h38
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h41
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h44
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h188
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h23
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h2
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h1
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h2
-rw-r--r--drivers/gpu/drm/amd/include/v11_structs.h8
-rw-r--r--drivers/gpu/drm/amd/include/v12_structs.h8
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c62
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c45
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h5
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c358
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c42
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c36
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c63
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h30
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c41
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c123
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c38
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c155
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h29
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c45
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h17
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c26
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c77
-rw-r--r--drivers/gpu/drm/ast/ast_post.c24
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c3
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c41
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c33
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c194
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h3
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c52
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c47
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c3
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c1
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c207
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c77
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c6
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c5
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c9
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c9
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.c11
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.h5
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c9
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb.c32
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c22
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c3
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c7
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c47
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c5
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c16
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c3
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c1
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c3
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c3
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c5
-rw-r--r--drivers/gpu/drm/bridge/panel.c3
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c1
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c3
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c10
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c6
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c1
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c14
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c5
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c3
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c3
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c41
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c45
-rw-r--r--drivers/gpu/drm/bridge/tda998x_drv.c8
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c3
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c38
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c115
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c5
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c3
-rw-r--r--drivers/gpu/drm/ci/arm64.config2
-rw-r--r--drivers/gpu/drm/ci/build-igt.sh2
-rw-r--r--drivers/gpu/drm/ci/build.sh20
-rw-r--r--drivers/gpu/drm/ci/build.yml14
-rw-r--r--drivers/gpu/drm/ci/container.yml24
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml55
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh11
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml4
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh5
-rw-r--r--drivers/gpu/drm/ci/test.yml76
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt9
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt22
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt313
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt30
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt2
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c160
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c37
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c467
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c116
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c20
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c168
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c294
-rw-r--r--drivers/gpu/drm/drm_atomic.c59
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c3
-rw-r--r--drivers/gpu/drm/drm_blend.c6
-rw-r--r--drivers/gpu/drm/drm_bridge.c167
-rw-r--r--drivers/gpu/drm/drm_bridge_helper.c58
-rw-r--r--drivers/gpu/drm/drm_client.c10
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c257
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_debugfs.c38
-rw-r--r--drivers/gpu/drm/drm_displayid_internal.h31
-rw-r--r--drivers/gpu/drm/drm_draw.c100
-rw-r--r--drivers/gpu/drm/drm_drv.c83
-rw-r--r--drivers/gpu/drm/drm_edid.c102
-rw-r--r--drivers/gpu/drm/drm_file.c40
-rw-r--r--drivers/gpu/drm/drm_format_helper.c378
-rw-r--r--drivers/gpu/drm/drm_format_internal.h160
-rw-r--r--drivers/gpu/drm/drm_gem.c26
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c147
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c41
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c6
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c37
-rw-r--r--drivers/gpu/drm/drm_mode_config.c7
-rw-r--r--drivers/gpu/drm/drm_panel.c146
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c6
-rw-r--r--drivers/gpu/drm/drm_panic.c142
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs104
-rw-r--r--drivers/gpu/drm/drm_plane.c52
-rw-r--r--drivers/gpu/drm/drm_prime.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c11
-rw-r--r--drivers/gpu/drm/drm_syncobj.c47
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c4
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/mmu.c41
-rw-r--r--drivers/gpu/drm/gma500/mmu.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c31
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c33
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h1
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c16
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h10
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c91
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h36
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c94
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h130
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c71
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c104
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c74
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c87
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h12
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile5
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c62
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c32
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c8
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c73
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c1222
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h18
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c102
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c191
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c635
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c924
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h30
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c215
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h34
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c969
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h82
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c129
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.h24
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c152
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c296
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c140
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c330
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c218
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c64
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c52
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c60
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c124
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c181
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h63
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c276
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c670
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c672
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c161
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c230
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.c340
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.h58
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c204
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c140
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c91
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c524
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c288
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga_regs.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c325
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c171
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h14
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c4
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c21
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c741
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h24
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c160
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.h6
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c118
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.h5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c35
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c56
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_wopcm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c9
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c54
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c49
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c158
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h150
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h108
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c1
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c15
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h3
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h11
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c18
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.c4
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c21
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c316
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.h56
-rw-r--r--drivers/gpu/drm/imagination/Makefile2
-rw-r--r--drivers/gpu/drm/imagination/pvr_debugfs.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c147
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h40
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c16
-rw-r--r--drivers/gpu/drm/imagination/pvr_free_list.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c67
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.h85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c26
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.c85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_riscv.c165
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_startstop.c17
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c31
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.h2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_util.c66
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c18
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.h6
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c7
-rw-r--r--drivers/gpu/drm/imagination/pvr_mmu.c8
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c136
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.h3
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c4
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h153
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_riscv.h41
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c3
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c5
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c4
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c4
-rw-r--r--drivers/gpu/drm/loongson/Kconfig2
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c10
-rw-r--r--drivers/gpu/drm/mediatek/Makefile8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c120
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c31
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c413
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.h14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c3
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c3
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c32
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c195
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h13
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_catalog.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_catalog.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c44
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c96
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c73
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c19
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h433
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c230
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c34
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c50
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c121
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c51
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c131
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h27
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c146
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c55
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c17
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c18
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c18
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c50
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c133
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h26
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c107
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c73
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c89
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c14
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c6
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c1
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c13
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml7
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c14
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/coreca7d.c122
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcca7d.c98
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c50
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headca7d.c297
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c25
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c209
-rw-r--r--drivers/gpu/drm/nouveau/gv100_fence.c93
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h868
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h137
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h15
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h173
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/chan.h76
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push906f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/pushc97b.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/layout.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h132
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h43
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h166
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h335
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h216
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h65
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h48
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h162
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h95
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h148
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h97
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h79
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h170
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h82
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h119
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h44
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h124
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h45
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h74
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h86
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h174
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c87
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c213
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan.c156
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan506f.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan906f.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chanc36f.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvif/conn.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/outp.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvif/user.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c508
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c275
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c358
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c112
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c)43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c)394
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c)417
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c356
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c)1559
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c)37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h741
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h260
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h350
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h)64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h825
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h)55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c)34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c691
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c)118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c217
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h355
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h318
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h)241
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h634
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h249
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c271
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c306
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c2
-rw-r--r--drivers/gpu/drm/nova/Kconfig14
-rw-r--r--drivers/gpu/drm/nova/Makefile3
-rw-r--r--drivers/gpu/drm/nova/driver.rs69
-rw-r--r--drivers/gpu/drm/nova/file.rs74
-rw-r--r--drivers/gpu/drm/nova/gem.rs49
-rw-r--r--drivers/gpu/drm/nova/nova.rs18
-rw-r--r--drivers/gpu/drm/nova/uapi.rs61
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c3
-rw-r--r--drivers/gpu/drm/panel/Kconfig39
-rw-r--r--drivers/gpu/drm/panel/Makefile4
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c11
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c11
-rw-r--r--drivers/gpu/drm/panel/panel-auo-a030jtn01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c125
-rw-r--r--drivers/gpu/drm/panel/panel-boe-td4320.c247
-rw-r--r--drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ebbg-ft8719.c11
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c19
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8279.c1296
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c441
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c4
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c9
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c1683
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt37801.c340
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c238
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c108
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c41
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c129
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c68
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c6
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-g2647fb105.c280
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c71
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h19
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c12
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c152
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h36
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c13
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h13
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c76
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c10
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c227
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h82
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c6
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c19
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h4
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c13
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c2
-rw-r--r--drivers/gpu/drm/qxl/Kconfig2
-rw-r--r--drivers/gpu/drm/radeon/Kconfig2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h3
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c8
-rw-r--r--drivers/gpu/drm/radeon/cik.c42
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c42
-rw-r--r--drivers/gpu/drm/radeon/sid.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c5
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c10
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c3
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c3
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig15
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c120
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c9
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c5
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c103
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c23
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c55
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c315
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c94
-rw-r--r--drivers/gpu/drm/scheduler/.kunitconfig12
-rw-r--r--drivers/gpu/drm/scheduler/Makefile2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c35
-rw-r--r--drivers/gpu/drm/scheduler/tests/Makefile7
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c359
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h226
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c476
-rw-r--r--drivers/gpu/drm/sitronix/Kconfig51
-rw-r--r--drivers/gpu/drm/sitronix/Makefile3
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c1000
-rw-r--r--drivers/gpu/drm/sitronix/st7586.c (renamed from drivers/gpu/drm/tiny/st7586.c)0
-rw-r--r--drivers/gpu/drm/sitronix/st7735r.c (renamed from drivers/gpu/drm/tiny/st7735r.c)0
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c13
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c13
-rw-r--r--drivers/gpu/drm/sti/Makefile2
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c14
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c14
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c15
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c15
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c14
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c14
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c14
-rw-r--r--drivers/gpu/drm/stm/lvds.c11
-rw-r--r--drivers/gpu/drm/sysfb/Kconfig76
-rw-r--r--drivers/gpu/drm/sysfb/Makefile12
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb.c35
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h184
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c320
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c107
-rw-r--r--drivers/gpu/drm/sysfb/efidrm.c389
-rw-r--r--drivers/gpu/drm/sysfb/ofdrm.c (renamed from drivers/gpu/drm/tiny/ofdrm.c)376
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c (renamed from drivers/gpu/drm/tiny/simpledrm.c)258
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c554
-rw-r--r--drivers/gpu/drm/tegra/dc.c17
-rw-r--r--drivers/gpu/drm/tegra/dp.c67
-rw-r--r--drivers/gpu/drm/tegra/dp.h2
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c11
-rw-r--r--drivers/gpu/drm/tegra/dsi.c4
-rw-r--r--drivers/gpu/drm/tegra/falcon.c20
-rw-r--r--drivers/gpu/drm/tegra/falcon.h1
-rw-r--r--drivers/gpu/drm/tegra/gem.c1
-rw-r--r--drivers/gpu/drm/tegra/hub.c4
-rw-r--r--drivers/gpu/drm/tegra/hub.h3
-rw-r--r--drivers/gpu/drm/tegra/rgb.c14
-rw-r--r--drivers/gpu/drm/tegra/sor.c4
-rw-r--r--drivers/gpu/drm/tests/Makefile2
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_test.c153
-rw-r--r--drivers/gpu/drm/tests/drm_bridge_test.c417
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c12
-rw-r--r--drivers/gpu/drm/tests/drm_cmdline_parser_test.c10
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c31
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c158
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c83
-rw-r--r--drivers/gpu/drm/tests/drm_modes_test.c26
-rw-r--r--drivers/gpu/drm/tests/drm_probe_helper_test.c8
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c3
-rw-r--r--drivers/gpu/drm/tiny/Kconfig64
-rw-r--r--drivers/gpu/drm/tiny/Makefile4
-rw-r--r--drivers/gpu/drm/tiny/appletbdrm.c30
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c145
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c46
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c5
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c52
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c24
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h20
-rw-r--r--drivers/gpu/drm/udl/udl_main.c191
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c22
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c6
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c126
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c62
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h22
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c27
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c64
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h26
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c50
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_output.c62
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c154
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c37
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h7
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c15
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c9
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c38
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c9
-rw-r--r--drivers/gpu/drm/vkms/Kconfig15
-rw-r--r--drivers/gpu/drm/vkms/Makefile5
-rw-r--r--drivers/gpu/drm/vkms/tests/.kunitconfig4
-rw-r--r--drivers/gpu/drm/vkms/tests/Makefile3
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_config_test.c951
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.c640
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.h437
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.c61
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.h26
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c2
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c45
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h17
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c176
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c844
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c52
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c874
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h71
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c85
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c7
-rw-r--r--drivers/gpu/drm/xe/Kconfig18
-rw-r--r--drivers/gpu/drm/xe/Makefile7
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h1
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h1
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h48
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h76
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h6
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c10
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c45
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c71
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rps.c17
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c6
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c133
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c2
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_alu_commands.h79
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gpu_commands.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h14
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h3
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c11
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c7
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c515
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h27
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c393
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.h10
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c250
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h24
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c64
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_device.c34
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c105
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h41
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c7
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c18
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.h3
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c4
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c11
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c6
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c22
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c11
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c86
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c39
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c82
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c28
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c20
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c66
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c90
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c12
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c48
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c87
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c102
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.c159
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c203
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.h7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity_types.h12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c20
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c24
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c12
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c109
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c125
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c2
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c219
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h5
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c2
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c244
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h4
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c21
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c11
-rw-r--r--drivers/gpu/drm/xe/xe_module.c12
-rw-r--r--drivers/gpu/drm/xe/xe_module.h1
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c7
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c33
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c21
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h8
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c84
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c77
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c252
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_debugfs.c13
-rw-r--r--drivers/gpu/drm/xe/xe_query.c2
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c20
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c3
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c3
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c2
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c69
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h1
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c159
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h89
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.h8
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c8
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c94
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c51
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c5
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c16
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules4
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c9
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp_audio.c5
-rw-r--r--drivers/gpu/host1x/bus.c11
-rw-r--r--drivers/gpu/host1x/cdma.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c8
-rw-r--r--drivers/gpu/nova-core/Kconfig1
-rw-r--r--drivers/gpu/nova-core/driver.rs9
-rw-r--r--drivers/gpu/nova-core/firmware.rs44
-rw-r--r--drivers/gpu/nova-core/gpu.rs88
-rw-r--r--drivers/gpu/nova-core/nova_core.rs2
-rw-r--r--drivers/gpu/nova-core/regs.rs82
-rw-r--r--drivers/gpu/nova-core/regs/macros.rs380
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c12
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c9
-rw-r--r--drivers/hid/bpf/progs/XPPen__ACK05.bpf.c1
-rw-r--r--drivers/hid/hid-appletb-kbd.c9
-rw-r--r--drivers/hid/hid-asus.c111
-rw-r--r--drivers/hid/hid-core.c9
-rw-r--r--drivers/hid/hid-corsair-void.c4
-rw-r--r--drivers/hid/hid-cp2112.c66
-rw-r--r--drivers/hid/hid-hyperv.c4
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-kysona.c46
-rw-r--r--drivers/hid/hid-lg4ff.c6
-rw-r--r--drivers/hid/hid-magicmouse.c74
-rw-r--r--drivers/hid/hid-mcp2200.c23
-rw-r--r--drivers/hid/hid-mcp2221.c10
-rw-r--r--drivers/hid/hid-multitouch.c12
-rw-r--r--drivers/hid/hid-quirks.c7
-rw-r--r--drivers/hid/hid-steam.c2
-rw-r--r--drivers/hid/hid-thrustmaster.c1
-rw-r--r--drivers/hid/hid-uclogic-core.c7
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c7
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c4
-rw-r--r--drivers/hid/usbhid/hid-core.c25
-rw-r--r--drivers/hid/wacom_sys.c11
-rw-r--r--drivers/hv/Kconfig7
-rw-r--r--drivers/hv/channel.c65
-rw-r--r--drivers/hv/connection.c23
-rw-r--r--drivers/hv/hv_common.c86
-rw-r--r--drivers/hv/hyperv_vmbus.h6
-rw-r--r--drivers/hv/vmbus_drv.c202
-rw-r--r--drivers/hwmon/Kconfig40
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/aht10.c16
-rw-r--r--drivers/hwmon/amc6821.c50
-rw-r--r--drivers/hwmon/asus-ec-sensors.c53
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c7
-rw-r--r--drivers/hwmon/fam15h_power.c6
-rw-r--r--drivers/hwmon/gpio-fan.c103
-rw-r--r--drivers/hwmon/hwmon-vid.c4
-rw-r--r--drivers/hwmon/ina238.c214
-rw-r--r--drivers/hwmon/ina2xx.c8
-rw-r--r--drivers/hwmon/isl28022.c8
-rw-r--r--drivers/hwmon/k10temp.c9
-rw-r--r--drivers/hwmon/kbatt.c147
-rw-r--r--drivers/hwmon/kfan.c246
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwmon/ltc2992.c30
-rw-r--r--drivers/hwmon/max6639.c16
-rw-r--r--drivers/hwmon/max77705-hwmon.c221
-rw-r--r--drivers/hwmon/nct7363.c2
-rw-r--r--drivers/hwmon/oxp-sensors.c716
-rw-r--r--drivers/hwmon/pmbus/Kconfig18
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/lm25066.c2
-rw-r--r--drivers/hwmon/pmbus/lt3074.c122
-rw-r--r--drivers/hwmon/pmbus/max34440.c119
-rw-r--r--drivers/hwmon/pmbus/mpq7932.c4
-rw-r--r--drivers/hwmon/pmbus/mpq8785.c91
-rw-r--r--drivers/hwmon/pmbus/pmbus.h19
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c69
-rw-r--r--drivers/hwmon/pmbus/tda38640.c2
-rw-r--r--drivers/hwmon/pmbus/tps25990.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c16
-rw-r--r--drivers/hwmon/pwm-fan.c4
-rw-r--r--drivers/hwmon/qnap-mcu-hwmon.c1
-rw-r--r--drivers/hwmon/spd5118.c357
-rw-r--r--drivers/hwmon/tmp102.c5
-rw-r--r--drivers/hwmon/xgene-hwmon.c39
-rw-r--r--drivers/hwtracing/intel_th/Kconfig1
-rw-r--r--drivers/hwtracing/intel_th/msu.c31
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c3
-rw-r--r--drivers/i2c/busses/Kconfig15
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c3
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c223
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c3
-rw-r--r--drivers/i2c/busses/i2c-davinci.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-amdisp.c205
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c12
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c7
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c102
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c87
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c18
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c166
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h13
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c119
-rw-r--r--drivers/i2c/busses/i2c-pasemi-pci.c10
-rw-r--r--drivers/i2c/busses/i2c-piix4.c20
-rw-r--r--drivers/i2c/busses/i2c-powermac.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c19
-rw-r--r--drivers/i2c/busses/i2c-riic.c53
-rw-r--r--drivers/i2c/busses/i2c-rzv2m.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c3
-rw-r--r--drivers/i2c/busses/i2c-tegra.c5
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c5
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c3
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c24
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c24
-rw-r--r--drivers/i2c/busses/i2c-via.c15
-rw-r--r--drivers/i2c/busses/i2c-viai2c-wmt.c20
-rw-r--r--drivers/i2c/busses/i2c-viapro.c33
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c17
-rw-r--r--drivers/i2c/busses/i2c-virtio.c7
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c57
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/busses/scx200_acb.c6
-rw-r--r--drivers/i2c/i2c-atr.c572
-rw-r--r--drivers/i2c/i2c-core-base.c67
-rw-r--r--drivers/i2c/i2c-core-of.c1
-rw-r--r--drivers/i2c/i2c-core-slave.c12
-rw-r--r--drivers/i2c/i2c-core-smbus.c3
-rw-r--r--drivers/i2c/i2c-smbus.c21
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c10
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c6
-rw-r--r--drivers/i3c/master/Kconfig4
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c32
-rw-r--r--drivers/i3c/master/svc-i3c-master.c109
-rw-r--r--drivers/idle/intel_idle.c139
-rw-r--r--drivers/iio/accel/adis16201.c4
-rw-r--r--drivers/iio/accel/adxl355_core.c2
-rw-r--r--drivers/iio/accel/adxl367.c10
-rw-r--r--drivers/iio/accel/fxls8962af-core.c7
-rw-r--r--drivers/iio/adc/ad7266.c2
-rw-r--r--drivers/iio/adc/ad7380.c32
-rw-r--r--drivers/iio/adc/ad7606.c11
-rw-r--r--drivers/iio/adc/ad7606_spi.c2
-rw-r--r--drivers/iio/adc/ad7768-1.c2
-rw-r--r--drivers/iio/adc/dln2-adc.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-iadc.c4
-rw-r--r--drivers/iio/adc/qcom-spmi-rradc.c4
-rw-r--r--drivers/iio/adc/rockchip_saradc.c17
-rw-r--r--drivers/iio/adc/stm32-adc-core.c7
-rw-r--r--drivers/iio/chemical/pms7003.c5
-rw-r--r--drivers/iio/chemical/sps30.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c4
-rw-r--r--drivers/iio/imu/adis16550.c2
-rw-r--r--drivers/iio/imu/bmi270/bmi270_core.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c7
-rw-r--r--drivers/iio/light/hid-sensor-prox.c22
-rw-r--r--drivers/iio/light/opt3001.c5
-rw-r--r--drivers/iio/magnetometer/ak8974.c4
-rw-r--r--drivers/iio/pressure/mprls0025pa.h17
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c2
-rw-r--r--drivers/infiniband/core/cm.c78
-rw-r--r--drivers/infiniband/core/cm_trace.h2
-rw-r--r--drivers/infiniband/core/cma.c29
-rw-r--r--drivers/infiniband/core/cma_trace.h2
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/iwcm.c29
-rw-r--r--drivers/infiniband/core/mad_rmpp.c2
-rw-r--r--drivers/infiniband/core/ucaps.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c269
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c20
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h1
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c10
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h1
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c18
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h1
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c2
-rw-r--r--drivers/infiniband/hw/hns/Makefile1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h20
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_trace.h216
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c2
-rw-r--r--drivers/infiniband/hw/irdma/main.c129
-rw-r--r--drivers/infiniband/hw/irdma/main.h3
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h2
-rw-r--r--drivers/infiniband/hw/irdma/pble.c2
-rw-r--r--drivers/infiniband/hw/irdma/type.h4
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c1
-rw-r--r--drivers/infiniband/hw/mana/cq.c4
-rw-r--r--drivers/infiniband/hw/mana/device.c174
-rw-r--r--drivers/infiniband/hw/mana/main.c92
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h7
-rw-r--r--drivers/infiniband/hw/mana/mr.c29
-rw-r--r--drivers/infiniband/hw/mana/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c8
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c60
-rw-r--r--drivers/infiniband/hw/mlx5/main.c29
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h13
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c6
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c65
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c30
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c14
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h35
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c68
-rw-r--r--drivers/infiniband/sw/rxe/rxe_odp.c144
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c40
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h2
-rw-r--r--drivers/infiniband/sw/siw/Kconfig1
-rw-r--r--drivers/infiniband/sw/siw/siw.h24
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c28
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h1
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c65
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c127
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c19
-rw-r--r--drivers/input/joystick/magellan.c2
-rw-r--r--drivers/input/joystick/xpad.c101
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c6
-rw-r--r--drivers/input/keyboard/matrix_keypad.c30
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c4
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c25
-rw-r--r--drivers/input/misc/hisi_powerkey.c2
-rw-r--r--drivers/input/misc/ims-pcu.c6
-rw-r--r--drivers/input/misc/sparcspkr.c22
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/rmi4/rmi_f34.c135
-rw-r--r--drivers/input/touchscreen/cyttsp5.c7
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c7
-rw-r--r--drivers/iommu/Kconfig158
-rw-r--r--drivers/iommu/Makefile6
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h2
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h10
-rw-r--r--drivers/iommu/amd/init.c105
-rw-r--r--drivers/iommu/amd/io_pgtable.c38
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c12
-rw-r--r--drivers/iommu/amd/iommu.c109
-rw-r--r--drivers/iommu/amd/ppr.c2
-rw-r--r--drivers/iommu/apple-dart.c3
-rw-r--r--drivers/iommu/arm/Kconfig144
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c92
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c161
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h39
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c32
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c9
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c44
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c6
-rw-r--r--drivers/iommu/dma-iommu.c497
-rw-r--r--drivers/iommu/exynos-iommu.c16
-rw-r--r--drivers/iommu/fsl_pamu_domain.c2
-rw-r--r--drivers/iommu/intel/Makefile7
-rw-r--r--drivers/iommu/intel/dmar.c14
-rw-r--r--drivers/iommu/intel/iommu.c280
-rw-r--r--drivers/iommu/intel/iommu.h62
-rw-r--r--drivers/iommu/intel/irq_remapping.c41
-rw-r--r--drivers/iommu/intel/nested.c20
-rw-r--r--drivers/iommu/intel/pasid.c13
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/intel/prq.c7
-rw-r--r--drivers/iommu/intel/svm.c9
-rw-r--r--drivers/iommu/io-pgtable-arm.c58
-rw-r--r--drivers/iommu/io-pgtable-dart.c23
-rw-r--r--drivers/iommu/iommu-pages.c119
-rw-r--r--drivers/iommu/iommu-pages.h195
-rw-r--r--drivers/iommu/iommu-sva.c18
-rw-r--r--drivers/iommu/iommu.c240
-rw-r--r--drivers/iommu/iommufd/device.c59
-rw-r--r--drivers/iommu/iommufd/eventq.c48
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h6
-rw-r--r--drivers/iommu/iommufd/selftest.c57
-rw-r--r--drivers/iommu/ipmmu-vmsa.c28
-rw-r--r--drivers/iommu/mtk_iommu.c63
-rw-r--r--drivers/iommu/riscv/Makefile2
-rw-r--r--drivers/iommu/riscv/iommu.c43
-rw-r--r--drivers/iommu/rockchip-iommu.c14
-rw-r--r--drivers/iommu/s390-iommu.c345
-rw-r--r--drivers/iommu/sun50i-iommu.c6
-rw-r--r--drivers/iommu/tegra-smmu.c111
-rw-r--r--drivers/iommu/virtio-iommu.c187
-rw-r--r--drivers/irqchip/Kconfig9
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/exynos-combiner.c2
-rw-r--r--drivers/irqchip/irq-al-fic.c20
-rw-r--r--drivers/irqchip/irq-alpine-msi.c7
-rw-r--r--drivers/irqchip/irq-apple-aic.c4
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c12
-rw-r--r--drivers/irqchip/irq-aspeed-i2c-ic.c2
-rw-r--r--drivers/irqchip/irq-aspeed-intc.c2
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c2
-rw-r--r--drivers/irqchip/irq-aspeed-vic.c4
-rw-r--r--drivers/irqchip/irq-ath79-misc.c4
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c2
-rw-r--r--drivers/irqchip/irq-atmel-aic.c19
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c28
-rw-r--r--drivers/irqchip/irq-bcm2712-mip.c7
-rw-r--r--drivers/irqchip/irq-bcm2835.c2
-rw-r--r--drivers/irqchip/irq-bcm2836.c2
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c2
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c2
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c24
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c10
-rw-r--r--drivers/irqchip/irq-clps711x.c4
-rw-r--r--drivers/irqchip/irq-crossbar.c6
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c5
-rw-r--r--drivers/irqchip/irq-csky-mpintc.c2
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c6
-rw-r--r--drivers/irqchip/irq-digicolor.c2
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c5
-rw-r--r--drivers/irqchip/irq-econet-en751221.c310
-rw-r--r--drivers/irqchip/irq-ftintc010.c5
-rw-r--r--drivers/irqchip/irq-gic-v2m.c22
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-msi-parent.c41
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c76
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c20
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-goldfish-pic.c7
-rw-r--r--drivers/irqchip/irq-hip04.c6
-rw-r--r--drivers/irqchip/irq-i8259.c4
-rw-r--r--drivers/irqchip/irq-idt3243x.c2
-rw-r--r--drivers/irqchip/irq-imgpdc.c2
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c4
-rw-r--r--drivers/irqchip/irq-imx-intmux.c2
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c2
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c2
-rw-r--r--drivers/irqchip/irq-ingenic-tcu.c13
-rw-r--r--drivers/irqchip/irq-ingenic.c4
-rw-r--r--drivers/irqchip/irq-ixp4xx.c2
-rw-r--r--drivers/irqchip/irq-jcore-aic.c5
-rw-r--r--drivers/irqchip/irq-keystone.c4
-rw-r--r--drivers/irqchip/irq-lan966x-oic.c20
-rw-r--r--drivers/irqchip/irq-loongarch-avec.c2
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c2
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c2
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c2
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c11
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c4
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c2
-rw-r--r--drivers/irqchip/irq-lpc32xx.c4
-rw-r--r--drivers/irqchip/irq-ls-extirq.c4
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c10
-rw-r--r--drivers/irqchip/irq-ls1x.c4
-rw-r--r--drivers/irqchip/irq-mchp-eic.c5
-rw-r--r--drivers/irqchip/irq-meson-gpio.c2
-rw-r--r--drivers/irqchip/irq-mips-cpu.c13
-rw-r--r--drivers/irqchip/irq-mips-gic.c15
-rw-r--r--drivers/irqchip/irq-mmp.c12
-rw-r--r--drivers/irqchip/irq-mscc-ocelot.c7
-rw-r--r--drivers/irqchip/irq-msi-lib.c9
-rw-r--r--drivers/irqchip/irq-mst-intc.c4
-rw-r--r--drivers/irqchip/irq-mtk-cirq.c5
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c4
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c28
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c2
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c29
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c4
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c24
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/irqchip/irq-nvic.c2
-rw-r--r--drivers/irqchip/irq-omap-intc.c4
-rw-r--r--drivers/irqchip/irq-or1k-pic.c4
-rw-r--r--drivers/irqchip/irq-orion.c6
-rw-r--r--drivers/irqchip/irq-owl-sirq.c4
-rw-r--r--drivers/irqchip/irq-pic32-evic.c6
-rw-r--r--drivers/irqchip/irq-pruss-intc.c7
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c5
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c2
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c6
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c4
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c6
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c6
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c13
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c2
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c12
-rw-r--r--drivers/irqchip/irq-riscv-intc.c2
-rw-r--r--drivers/irqchip/irq-sa11x0.c2
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c155
-rw-r--r--drivers/irqchip/irq-sni-exiu.c6
-rw-r--r--drivers/irqchip/irq-sp7021-intc.c4
-rw-r--r--drivers/irqchip/irq-starfive-jh8100-intc.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c25
-rw-r--r--drivers/irqchip/irq-stm32mp-exti.c9
-rw-r--r--drivers/irqchip/irq-sun4i.c2
-rw-r--r--drivers/irqchip/irq-sun6i-r.c4
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c11
-rw-r--r--drivers/irqchip/irq-tb10x.c21
-rw-r--r--drivers/irqchip/irq-tegra.c5
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c10
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c7
-rw-r--r--drivers/irqchip/irq-ts4800.c2
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c2
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c4
-rw-r--r--drivers/irqchip/irq-vf610-mscm-ir.c6
-rw-r--r--drivers/irqchip/irq-vic.c5
-rw-r--r--drivers/irqchip/irq-vt8500.c153
-rw-r--r--drivers/irqchip/irq-wpcm450-aic.c2
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c4
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c5
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c4
-rw-r--r--drivers/irqchip/irq-zevio.c4
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/leds/.kunitconfig4
-rw-r--r--drivers/leds/Kconfig11
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c6
-rw-r--r--drivers/leds/flash/Kconfig11
-rw-r--r--drivers/leds/flash/Makefile1
-rw-r--r--drivers/leds/flash/leds-tps6131x.c815
-rw-r--r--drivers/leds/led-class-flash.c15
-rw-r--r--drivers/leds/led-class-multicolor.c3
-rw-r--r--drivers/leds/led-core.c43
-rw-r--r--drivers/leds/led-test.c132
-rw-r--r--drivers/leds/led-triggers.c13
-rw-r--r--drivers/leds/leds-cros_ec.c21
-rw-r--r--drivers/leds/leds-lp8860.c214
-rw-r--r--drivers/leds/leds-pca9532.c11
-rw-r--r--drivers/leds/leds-pca955x.c28
-rw-r--r--drivers/leds/leds-pca995x.c2
-rw-r--r--drivers/leds/leds-tca6507.c11
-rw-r--r--drivers/leds/leds-turris-omnia.c4
-rw-r--r--drivers/leds/rgb/leds-mt6370-rgb.c16
-rw-r--r--drivers/leds/rgb/leds-ncp5623.c5
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c7
-rw-r--r--drivers/leds/trigger/ledtrig-backlight.c48
-rw-r--r--drivers/mailbox/Kconfig14
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/cv1800-mailbox.c220
-rw-r--r--drivers/mailbox/imx-mailbox.c21
-rw-r--r--drivers/mailbox/mailbox.c199
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c51
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c16
-rw-r--r--drivers/mailbox/qcom-ipcc.c4
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/bcache/btree.c3
-rw-r--r--drivers/md/bcache/super.c6
-rw-r--r--drivers/md/dm-bufio.c200
-rw-r--r--drivers/md/dm-core.h4
-rw-r--r--drivers/md/dm-delay.c17
-rw-r--r--drivers/md/dm-dust.c4
-rw-r--r--drivers/md/dm-ebs-target.c3
-rw-r--r--drivers/md/dm-flakey.c118
-rw-r--r--drivers/md/dm-integrity.c18
-rw-r--r--drivers/md/dm-ioctl.c1
-rw-r--r--drivers/md/dm-linear.c4
-rw-r--r--drivers/md/dm-log-writes.c4
-rw-r--r--drivers/md/dm-mpath.c243
-rw-r--r--drivers/md/dm-raid.c3
-rw-r--r--drivers/md/dm-raid1.c5
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/dm-stripe.c5
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-table.c274
-rw-r--r--drivers/md/dm-vdo/indexer/volume.c24
-rw-r--r--drivers/md/dm-verity-fec.c4
-rw-r--r--drivers/md/dm-verity-target.c15
-rw-r--r--drivers/md/dm-verity-verify-sig.c17
-rw-r--r--drivers/md/dm-zone.c98
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c73
-rw-r--r--drivers/md/dm.h6
-rw-r--r--drivers/md/md-bitmap.c5
-rw-r--r--drivers/md/md.c190
-rw-r--r--drivers/md/md.h18
-rw-r--r--drivers/md/persistent-data/Kconfig2
-rw-r--r--drivers/md/raid1.c29
-rw-r--r--drivers/md/raid10.c10
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/media/cec/i2c/Kconfig1
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c5
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c4
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c4
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c11
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c4
-rw-r--r--drivers/media/dvb-frontends/dib8000.c5
-rw-r--r--drivers/media/i2c/Kconfig48
-rw-r--r--drivers/media/i2c/Makefile4
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c4
-rw-r--r--drivers/media/i2c/ccs-pll.c53
-rw-r--r--drivers/media/i2c/ccs-pll.h29
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c55
-rw-r--r--drivers/media/i2c/ccs/ccs-quirk.c3
-rw-r--r--drivers/media/i2c/ccs/ccs-reg-access.c9
-rw-r--r--drivers/media/i2c/ccs/ccs.h2
-rw-r--r--drivers/media/i2c/ds90ub913.c92
-rw-r--r--drivers/media/i2c/ds90ub953.c252
-rw-r--r--drivers/media/i2c/ds90ub953.h104
-rw-r--r--drivers/media/i2c/ds90ub960.c2210
-rw-r--r--drivers/media/i2c/imx219.c38
-rw-r--r--drivers/media/i2c/imx283.c2
-rw-r--r--drivers/media/i2c/imx334.c1035
-rw-r--r--drivers/media/i2c/imx335.c5
-rw-r--r--drivers/media/i2c/lt6911uxe.c4
-rw-r--r--drivers/media/i2c/max96714.c2
-rw-r--r--drivers/media/i2c/max96717.c2
-rw-r--r--drivers/media/i2c/ov02c10.c1013
-rw-r--r--drivers/media/i2c/ov02e10.c969
-rw-r--r--drivers/media/i2c/ov08x40.c1324
-rw-r--r--drivers/media/i2c/ov13b10.c176
-rw-r--r--drivers/media/i2c/ov2740.c4
-rw-r--r--drivers/media/i2c/ov5675.c5
-rw-r--r--drivers/media/i2c/ov8856.c9
-rw-r--r--drivers/media/i2c/rdacm20.c7
-rw-r--r--drivers/media/i2c/rdacm21.c7
-rw-r--r--drivers/media/i2c/tc358743.c4
-rw-r--r--drivers/media/i2c/vd55g1.c1965
-rw-r--r--drivers/media/i2c/vd56g3.c1586
-rw-r--r--drivers/media/pci/Kconfig1
-rw-r--r--drivers/media/pci/Makefile2
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c5
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.c2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.h7
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.c6
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.h5
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.c4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.h3
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.c45
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.h10
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c5
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.h8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.c8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.h4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6.c13
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c21
-rw-r--r--drivers/media/pci/pt3/pt3.c17
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c4
-rw-r--r--drivers/media/pci/sta2x11/Kconfig16
-rw-r--r--drivers/media/pci/sta2x11/Makefile2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c1270
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.h29
-rw-r--r--drivers/media/pci/tw5864/tw5864-core.c13
-rw-r--r--drivers/media/pci/zoran/zoran_card.c2
-rw-r--r--drivers/media/pci/zoran/zr36016.c2
-rw-r--r--drivers/media/pci/zoran/zr36050.c2
-rw-r--r--drivers/media/pci/zoran/zr36060.c2
-rw-r--r--drivers/media/platform/amlogic/Kconfig1
-rw-r--r--drivers/media/platform/amlogic/Makefile2
-rw-r--r--drivers/media/platform/amlogic/c3/Kconfig5
-rw-r--r--drivers/media/platform/amlogic/c3/Makefile5
-rw-r--r--drivers/media/platform/amlogic/c3/isp/Kconfig18
-rw-r--r--drivers/media/platform/amlogic/c3/isp/Makefile10
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c804
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-common.h340
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-core.c641
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-dev.c421
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-params.c1008
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-regs.h618
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-resizer.c892
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c326
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/Kconfig16
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/Makefile3
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/c3-mipi-adap.c842
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/Kconfig16
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/Makefile3
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c828
-rw-r--r--drivers/media/platform/amphion/vdec.c2
-rw-r--r--drivers/media/platform/amphion/vpu.h1
-rw-r--r--drivers/media/platform/amphion/vpu_core.c7
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c39
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c8
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.c9
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c5
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h4
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c73
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h1
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h8
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c33
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h7
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c19
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c652
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c2
-rw-r--r--drivers/media/platform/nuvoton/npcm-video.c19
-rw-r--r--drivers/media/platform/nxp/dw100/dw100.c8
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h1
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c132
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h5
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c14
-rw-r--r--drivers/media/platform/qcom/camss/Makefile2
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-680.c422
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c131
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c28
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h1
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-680.c244
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c6
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h1
-rw-r--r--drivers/media/platform/qcom/camss/camss.c359
-rw-r--r--drivers/media/platform/qcom/camss/camss.h1
-rw-r--r--drivers/media/platform/qcom/iris/Makefile4
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.h2
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.c4
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_common.h4
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_gen2.c (renamed from drivers/media/platform/qcom/iris/iris_platform_sm8550.c)119
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_qcs8300.h124
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8650.h13
-rw-r--r--drivers/media/platform/qcom/iris/iris_probe.c59
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu2.c1
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3.c122
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3x.c275
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.c4
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.h3
-rw-r--r--drivers/media/platform/qcom/venus/core.c16
-rw-r--r--drivers/media/platform/qcom/venus/core.h2
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c38
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c18
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe.c1
-rw-r--r--drivers/media/platform/renesas/Kconfig18
-rw-r--r--drivers/media/platform/renesas/Makefile2
-rw-r--r--drivers/media/platform/renesas/rcar-csi2.c8
-rw-r--r--drivers/media/platform/renesas/rcar-isp/Kconfig18
-rw-r--r--drivers/media/platform/renesas/rcar-isp/Makefile4
-rw-r--r--drivers/media/platform/renesas/rcar-isp/csisp.c (renamed from drivers/media/platform/renesas/rcar-isp.c)57
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c8
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c182
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c23
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-vin.h41
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c139
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h91
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h39
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c165
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c13
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c295
-rw-r--r--drivers/media/platform/renesas/vsp1/Makefile2
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1.h4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_brx.c9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_dl.c7
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.c30
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.h8
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c70
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.c30
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.h3
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hsit.c11
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_iif.c121
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_iif.h29
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.c187
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.h6
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_regs.h8
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rpf.c38
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.c51
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_sru.c9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uds.c9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c50
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_wpf.c53
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h7
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-capture.c6
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c8
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.h4
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c12
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h1
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c14
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h1
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c5
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-debug.c8
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c14
-rw-r--r--drivers/media/platform/st/sti/delta/delta-debug.c8
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c18
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c10
-rw-r--r--drivers/media/platform/synopsys/hdmirx/Kconfig1
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c14
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.c2
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c266
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c157
-rw-r--r--drivers/media/platform/ti/cal/cal.c45
-rw-r--r--drivers/media/platform/ti/cal/cal.h3
-rw-r--r--drivers/media/platform/ti/davinci/vpif.c4
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c8
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.c6
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c19
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c4
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c1
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c4
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c34
-rw-r--r--drivers/media/rc/keymaps/rc-hauppauge.c42
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_channel.c2
-rw-r--r--drivers/media/test-drivers/vim2m.c327
-rw-r--r--drivers/media/test-drivers/vivid/Kconfig3
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.c20
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c8
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c3
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c7
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.c167
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.h6
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c91
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c38
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c115
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h5
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c99
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c14
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c7
-rw-r--r--drivers/memory/Kconfig23
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/bt1-l2-ctl.c2
-rw-r--r--drivers/memory/mtk-smi.c52
-rw-r--r--drivers/memory/omap-gpmc.c21
-rw-r--r--drivers/memory/renesas-rpc-if-regs.h147
-rw-r--r--drivers/memory/renesas-rpc-if.c714
-rw-r--r--drivers/memory/renesas-xspi-if-regs.h105
-rw-r--r--drivers/memory/stm32_omm.c479
-rw-r--r--drivers/memory/tegra/Kconfig8
-rw-r--r--drivers/mfd/88pm860x-core.c4
-rw-r--r--drivers/mfd/88pm886.c14
-rw-r--r--drivers/mfd/Kconfig55
-rw-r--r--drivers/mfd/Makefile6
-rw-r--r--drivers/mfd/aat2870-core.c4
-rw-r--r--drivers/mfd/ab8500-core.c6
-rw-r--r--drivers/mfd/arizona-irq.c3
-rw-r--r--drivers/mfd/as3722.c4
-rw-r--r--drivers/mfd/bcm590xx.c66
-rw-r--r--drivers/mfd/db8500-prcmu.c6
-rw-r--r--drivers/mfd/exynos-lpass.c31
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c5
-rw-r--r--drivers/mfd/lp8788-irq.c2
-rw-r--r--drivers/mfd/max14577.c1
-rw-r--r--drivers/mfd/max77541.c2
-rw-r--r--drivers/mfd/max77705.c4
-rw-r--r--drivers/mfd/max77759.c690
-rw-r--r--drivers/mfd/max8925-core.c4
-rw-r--r--drivers/mfd/max8925-i2c.c1
-rw-r--r--drivers/mfd/max8997-irq.c4
-rw-r--r--drivers/mfd/max8998-irq.c2
-rw-r--r--drivers/mfd/mt6358-irq.c6
-rw-r--r--drivers/mfd/mt6397-irq.c6
-rw-r--r--drivers/mfd/qcom-pm8xxx.c6
-rw-r--r--drivers/mfd/rohm-bd96801.c565
-rw-r--r--drivers/mfd/rt5033.c6
-rw-r--r--drivers/mfd/sec-acpm.c442
-rw-r--r--drivers/mfd/sec-common.c301
-rw-r--r--drivers/mfd/sec-core.c481
-rw-r--r--drivers/mfd/sec-core.h23
-rw-r--r--drivers/mfd/sec-i2c.c239
-rw-r--r--drivers/mfd/sec-irq.c460
-rw-r--r--drivers/mfd/sm501.c50
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c5
-rw-r--r--drivers/mfd/stm32-lptimer.c33
-rw-r--r--drivers/mfd/stmfx.c2
-rw-r--r--drivers/mfd/stmpe-spi.c2
-rw-r--r--drivers/mfd/stmpe.c4
-rw-r--r--drivers/mfd/tc3589x.c6
-rw-r--r--drivers/mfd/tps65010.c9
-rw-r--r--drivers/mfd/tps65217.c2
-rw-r--r--drivers/mfd/tps6586x.c2
-rw-r--r--drivers/mfd/twl4030-irq.c4
-rw-r--r--drivers/mfd/twl6030-irq.c5
-rw-r--r--drivers/mfd/ucb1x00-core.c7
-rw-r--r--drivers/mfd/wm831x-irq.c15
-rw-r--r--drivers/mfd/wm8994-irq.c4
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cs5535-mfgpt.c1
-rw-r--r--drivers/misc/hi6421v600-irq.c5
-rw-r--r--drivers/misc/lkdtm/heap.c17
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c8
-rw-r--r--drivers/misc/mei/hw-me-regs.h1
-rw-r--r--drivers/misc/mei/pci-me.c1
-rw-r--r--drivers/misc/mei/vsc-tp.c40
-rw-r--r--drivers/misc/pci_endpoint_test.c21
-rw-r--r--drivers/misc/ti_fpc202.c438
-rw-r--r--drivers/misc/uacce/uacce.c40
-rw-r--r--drivers/mmc/core/block.c16
-rw-r--r--drivers/mmc/core/card.h6
-rw-r--r--drivers/mmc/core/core.c48
-rw-r--r--drivers/mmc/core/core.h10
-rw-r--r--drivers/mmc/core/host.h8
-rw-r--r--drivers/mmc/core/mmc.c103
-rw-r--r--drivers/mmc/core/mmc_ops.c6
-rw-r--r--drivers/mmc/core/mmc_ops.h2
-rw-r--r--drivers/mmc/core/mmc_test.c16
-rw-r--r--drivers/mmc/core/queue.c6
-rw-r--r--drivers/mmc/core/quirks.h10
-rw-r--r--drivers/mmc/core/sd.c65
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/core/slot-gpio.c8
-rw-r--r--drivers/mmc/host/Kconfig16
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/alcor.c3
-rw-r--r--drivers/mmc/host/bcm2835.c8
-rw-r--r--drivers/mmc/host/cavium-thunderx.c4
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/mtk-sd.c219
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c30
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c243
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c40
-rw-r--r--drivers/mmc/host/sdhci-of-k1.c304
-rw-r--r--drivers/mmc/host/sdhci-omap.c2
-rw-r--r--drivers/mmc/host/sdhci.c12
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sdhci_am654.c35
-rw-r--r--drivers/mmc/host/sunplus-mmc.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c6
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/inftlcore.c9
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/mtd/mtdcore.c152
-rw-r--r--drivers/mtd/mtdcore.h2
-rw-r--r--drivers/mtd/mtdpart.c16
-rw-r--r--drivers/mtd/nand/Makefile3
-rw-r--r--drivers/mtd/nand/ecc-mxic.c2
-rw-r--r--drivers/mtd/nand/qpic_common.c8
-rw-r--r--drivers/mtd/nand/raw/Kconfig9
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c5
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c248
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c6
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c13
-rw-r--r--drivers/mtd/nand/raw/loongson1-nand-controller.c836
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c18
-rw-r--r--drivers/mtd/nand/raw/r852.c3
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c2
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c20
-rw-r--r--drivers/mtd/nand/spi/ato.c14
-rw-r--r--drivers/mtd/nand/spi/core.c20
-rw-r--r--drivers/mtd/nand/spi/esmt.c22
-rw-r--r--drivers/mtd/nand/spi/foresee.c16
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c60
-rw-r--r--drivers/mtd/nand/spi/macronix.c20
-rw-r--r--drivers/mtd/nand/spi/micron.c38
-rw-r--r--drivers/mtd/nand/spi/paragon.c20
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c20
-rw-r--r--drivers/mtd/nand/spi/toshiba.c22
-rw-r--r--drivers/mtd/nand/spi/winbond.c128
-rw-r--r--drivers/mtd/nand/spi/xtx.c20
-rw-r--r--drivers/mtd/spi-nor/macronix.c73
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bareudp.c16
-rw-r--r--drivers/net/bonding/bond_alb.c8
-rw-r--r--drivers/net/bonding/bond_main.c193
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c8
-rw-r--r--drivers/net/can/dev/dev.c12
-rw-r--r--drivers/net/can/dev/netlink.c74
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c4
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c10
-rw-r--r--drivers/net/can/kvaser_pciefd.c188
-rw-r--r--drivers/net/can/m_can/m_can.c11
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c6
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c280
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c13
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-timestamp.c2
-rw-r--r--drivers/net/can/slcan/slcan-core.c26
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c46
-rw-r--r--drivers/net/can/usb/esd_usb.c6
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c4
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c6
-rw-r--r--drivers/net/can/usb/gs_usb.c8
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c6
-rw-r--r--drivers/net/can/xilinx_can.c16
-rw-r--r--drivers/net/dsa/b53/b53_common.c278
-rw-r--r--drivers/net/dsa/b53/b53_priv.h4
-rw-r--r--drivers/net/dsa/b53/b53_regs.h21
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/dsa_loop.c2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c24
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h5
-rw-r--r--drivers/net/dsa/microchip/Kconfig1
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c194
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c274
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h44
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c30
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h7
-rw-r--r--drivers/net/dsa/mt7530-mmio.c1
-rw-r--r--drivers/net/dsa/mt7530.c276
-rw-r--r--drivers/net/dsa/mt7530.h60
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c15
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h16
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c11
-rw-r--r--drivers/net/dsa/ocelot/felix.c11
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c5
-rw-r--r--drivers/net/dsa/qca/ar9331.c4
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c4
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c6
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c5
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c46
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h7
-rw-r--r--drivers/net/ethernet/airoha/Kconfig7
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c514
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h102
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c188
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.h4
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c485
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c9
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h203
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c40
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c4
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c16
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h4
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c5
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h122
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c126
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c292
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c128
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c204
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-smn.h30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h132
-rw-r--r--drivers/net/ethernet/apple/bmac.c60
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig6
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c176
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h78
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c36
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c13
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c289
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c277
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h32
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c23
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c25
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c18
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c3
-rw-r--r--drivers/net/ethernet/cortina/gemini.c37
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c16
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h20
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c30
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c41
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c41
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig12
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c168
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h51
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c90
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c369
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c50
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c78
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c107
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c93
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.h3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c12
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c462
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp_private.h104
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c53
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c9
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c82
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/huawei/Kconfig1
-rw-r--r--drivers/net/ethernet/huawei/Makefile1
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Kconfig20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c53
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h27
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c25
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h53
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c32
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h13
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h113
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c24
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h81
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h58
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c62
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c414
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c354
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c16
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h15
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h13
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h105
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c78
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c233
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h41
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h82
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h120
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c68
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c341
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c670
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h135
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c29
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h76
-rw-r--r--drivers/net/ethernet/ibm/Kconfig13
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c358
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h65
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c75
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c45
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h67
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c207
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c81
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c82
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c266
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c5
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h39
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c14
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c67
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c161
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c10
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c873
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h362
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c189
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c161
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h84
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c615
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h314
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c78
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c20
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h17
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h61
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c81
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c91
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c129
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c247
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h52
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c557
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/region.c290
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c1339
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c257
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c707
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c51
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c282
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h175
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c21
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c58
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h88
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c37
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c37
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c14
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c45
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c231
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h72
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c13
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c467
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c414
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1348
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c290
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c716
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c515
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c191
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c122
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3
-rw-r--r--drivers/net/ethernet/meta/Kconfig1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h14
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h36
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c258
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c178
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c500
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h56
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c335
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h48
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c142
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c13
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c52
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c24
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c18
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c50
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c62
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h7
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h5
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c63
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c27
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c19
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c132
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c6
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c33
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c48
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c99
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c25
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c138
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c7
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig1
-rw-r--r--drivers/net/ethernet/realtek/r8169.h7
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c434
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c205
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h15
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c70
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c11
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c108
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c146
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c88
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c295
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c174
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c157
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c374
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h64
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c45
-rw-r--r--drivers/net/ethernet/ti/cpsw.c26
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c70
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h6
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c121
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c48
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c16
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c8
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h58
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h33
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c110
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c357
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h5
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c194
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c176
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h77
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c30
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c909
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.h18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h118
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c97
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c5
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c385
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h15
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c38
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c23
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c12
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c64
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c209
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c47
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h118
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c61
-rw-r--r--drivers/net/geneve.c16
-rw-r--r--drivers/net/gtp.c18
-rw-r--r--drivers/net/hamradio/baycom_epp.c5
-rw-r--r--drivers/net/hyperv/hyperv_net.h13
-rw-r--r--drivers/net/hyperv/netvsc.c57
-rw-r--r--drivers/net/hyperv/netvsc_drv.c68
-rw-r--r--drivers/net/hyperv/rndis_filter.c24
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.5.c1
-rw-r--r--drivers/net/ipa/ipa_data.h2
-rw-r--r--drivers/net/ipa/ipa_mem.c21
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c2
-rw-r--r--drivers/net/macvlan.c20
-rw-r--r--drivers/net/mctp/mctp-usb.c2
-rw-r--r--drivers/net/mdio/Kconfig48
-rw-r--r--drivers/net/mdio/Makefile1
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c2
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c3
-rw-r--r--drivers/net/mdio/mdio-realtek-rtl9300.c522
-rw-r--r--drivers/net/mdio/mdio-thunder.c10
-rw-r--r--drivers/net/mdio/of_mdio.c2
-rw-r--r--drivers/net/netdevsim/ipsec.c15
-rw-r--r--drivers/net/netdevsim/netdev.c4
-rw-r--r--drivers/net/ovpn/Makefile22
-rw-r--r--drivers/net/ovpn/bind.c55
-rw-r--r--drivers/net/ovpn/bind.h101
-rw-r--r--drivers/net/ovpn/crypto.c210
-rw-r--r--drivers/net/ovpn/crypto.h145
-rw-r--r--drivers/net/ovpn/crypto_aead.c389
-rw-r--r--drivers/net/ovpn/crypto_aead.h29
-rw-r--r--drivers/net/ovpn/io.c458
-rw-r--r--drivers/net/ovpn/io.h34
-rw-r--r--drivers/net/ovpn/main.c279
-rw-r--r--drivers/net/ovpn/main.h14
-rw-r--r--drivers/net/ovpn/netlink-gen.c213
-rw-r--r--drivers/net/ovpn/netlink-gen.h41
-rw-r--r--drivers/net/ovpn/netlink.c1258
-rw-r--r--drivers/net/ovpn/netlink.h18
-rw-r--r--drivers/net/ovpn/ovpnpriv.h55
-rw-r--r--drivers/net/ovpn/peer.c1364
-rw-r--r--drivers/net/ovpn/peer.h163
-rw-r--r--drivers/net/ovpn/pktid.c129
-rw-r--r--drivers/net/ovpn/pktid.h86
-rw-r--r--drivers/net/ovpn/proto.h118
-rw-r--r--drivers/net/ovpn/skb.h61
-rw-r--r--drivers/net/ovpn/socket.c233
-rw-r--r--drivers/net/ovpn/socket.h49
-rw-r--r--drivers/net/ovpn/stats.c21
-rw-r--r--drivers/net/ovpn/stats.h47
-rw-r--r--drivers/net/ovpn/tcp.c598
-rw-r--r--drivers/net/ovpn/tcp.h36
-rw-r--r--drivers/net/ovpn/udp.c449
-rw-r--r--drivers/net/ovpn/udp.h25
-rw-r--r--drivers/net/pfcp.c23
-rw-r--r--drivers/net/phy/Kconfig29
-rw-r--r--drivers/net/phy/Makefile22
-rw-r--r--drivers/net/phy/air_en8811h.c103
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c6
-rw-r--r--drivers/net/phy/as21xxx.c1087
-rw-r--r--drivers/net/phy/bcm87xx.c14
-rw-r--r--drivers/net/phy/dp83640.c13
-rw-r--r--drivers/net/phy/dp83822.c35
-rw-r--r--drivers/net/phy/dp83867.c76
-rw-r--r--drivers/net/phy/fixed_phy.c40
-rw-r--r--drivers/net/phy/icplus.c6
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c111
-rw-r--r--drivers/net/phy/marvell10g.c12
-rw-r--r--drivers/net/phy/mdio_bus.c476
-rw-r--r--drivers/net/phy/mdio_bus_provider.c484
-rw-r--r--drivers/net/phy/mdio_device.c1
-rw-r--r--drivers/net/phy/mediatek/Kconfig20
-rw-r--r--drivers/net/phy/mediatek/Makefile3
-rw-r--r--drivers/net/phy/mediatek/mtk-2p5ge.c321
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c91
-rw-r--r--drivers/net/phy/micrel.c30
-rw-r--r--drivers/net/phy/microchip.c48
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c5
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c20
-rw-r--r--drivers/net/phy/mxl-86110.c616
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c54
-rw-r--r--drivers/net/phy/nxp-tja11xx.c6
-rw-r--r--drivers/net/phy/phy_device.c159
-rw-r--r--drivers/net/phy/phy_led_triggers.c23
-rw-r--r--drivers/net/phy/phylink.c45
-rw-r--r--drivers/net/phy/realtek/realtek_main.c337
-rw-r--r--drivers/net/phy/teranetics.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c25
-rw-r--r--drivers/net/ppp/ppp_synctty.c5
-rw-r--r--drivers/net/tap.c14
-rw-r--r--drivers/net/team/team_core.c8
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/Kconfig4
-rw-r--r--drivers/net/usb/aqc111.c10
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c22
-rw-r--r--drivers/net/usb/asix_devices.c17
-rw-r--r--drivers/net/usb/lan78xx.c471
-rw-r--r--drivers/net/usb/r8152.c98
-rw-r--r--drivers/net/usb/rndis_host.c16
-rw-r--r--drivers/net/veth.c57
-rw-r--r--drivers/net/virtio_net.c92
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c2
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c560
-rw-r--r--drivers/net/vxlan/vxlan_private.h11
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c18
-rw-r--r--drivers/net/wireguard/allowedips.c102
-rw-r--r--drivers/net/wireguard/allowedips.h4
-rw-r--r--drivers/net/wireguard/cookie.c4
-rw-r--r--drivers/net/wireguard/netlink.c47
-rw-r--r--drivers/net/wireguard/noise.c4
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c48
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c62
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c302
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h16
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c25
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c52
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c50
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig10
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c1155
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c103
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h18
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c329
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h169
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c497
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h17
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c154
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h53
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c1097
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c596
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h41
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c209
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c153
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h13
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c121
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h27
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c511
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h30
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1439
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h56
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c66
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c238
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c526
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c558
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h119
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h4
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c44
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c308
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h87
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c378
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c227
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c168
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c228
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c169
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c170
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c150
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c120
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h492
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h)59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c263
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h247
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c124
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c102
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c235
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c65
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c105
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.c122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c204
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c194
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c)200
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2378
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c187
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c150
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c308
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c228
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c167
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c2
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h1
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c42
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c74
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c21
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c44
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c51
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c158
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/testmode.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c196
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c195
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h51
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c1
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c35
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c61
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723ds.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723du.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c59
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c12
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814ae.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814au.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c27
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c63
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c1037
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h190
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c418
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c493
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h144
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c174
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c480
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h94
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c58
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c38
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c36
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c131
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c147
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c46
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c24
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c30
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c32
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c296
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h31
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c4
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c12
-rw-r--r--drivers/net/xen-netfront.c17
-rw-r--r--drivers/nfc/s3fwrn5/core.c2
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c2
-rw-r--r--drivers/nfc/s3fwrn5/firmware.h2
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c2
-rw-r--r--drivers/nfc/s3fwrn5/nci.c2
-rw-r--r--drivers/nfc/s3fwrn5/nci.h2
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.c4
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.h4
-rw-r--r--drivers/nfc/s3fwrn5/s3fwrn5.h2
-rw-r--r--drivers/nfc/virtual_ncidev.c2
-rw-r--r--drivers/ntb/msi.c22
-rw-r--r--drivers/nvme/common/auth.c15
-rw-r--r--drivers/nvme/host/Kconfig5
-rw-r--r--drivers/nvme/host/auth.c30
-rw-r--r--drivers/nvme/host/core.c247
-rw-r--r--drivers/nvme/host/fc.c13
-rw-r--r--drivers/nvme/host/multipath.c225
-rw-r--r--drivers/nvme/host/nvme.h34
-rw-r--r--drivers/nvme/host/pci.c314
-rw-r--r--drivers/nvme/host/sysfs.c35
-rw-r--r--drivers/nvme/host/tcp.c171
-rw-r--r--drivers/nvme/target/Kconfig3
-rw-r--r--drivers/nvme/target/admin-cmd.c31
-rw-r--r--drivers/nvme/target/auth.c26
-rw-r--r--drivers/nvme/target/core.c97
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd.c12
-rw-r--r--drivers/nvme/target/fc.c156
-rw-r--r--drivers/nvme/target/fcloop.c497
-rw-r--r--drivers/nvme/target/loop.c29
-rw-r--r--drivers/nvme/target/nvmet.h24
-rw-r--r--drivers/nvme/target/pci-epf.c141
-rw-r--r--drivers/nvme/target/rdma.c10
-rw-r--r--drivers/nvme/target/tcp.c105
-rw-r--r--drivers/nvmem/Kconfig12
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/core.c40
-rw-r--r--drivers/nvmem/max77759-nvmem.c145
-rw-r--r--drivers/nvmem/qfprom.c26
-rw-r--r--drivers/nvmem/rockchip-otp.c17
-rw-r--r--drivers/of/device.c31
-rw-r--r--drivers/of/fdt.c34
-rw-r--r--drivers/of/kexec.c42
-rw-r--r--drivers/of/of_reserved_mem.c80
-rw-r--r--drivers/of/unittest.c10
-rw-r--r--drivers/opp/core.c428
-rw-r--r--drivers/opp/cpu.c30
-rw-r--r--drivers/opp/of.c205
-rw-r--r--drivers/opp/opp.h1
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/bus.c4
-rw-r--r--drivers/pci/controller/Kconfig11
-rw-r--r--drivers/pci/controller/cadence/Kconfig16
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c40
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c36
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c124
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c12
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h25
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c8
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c4
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c213
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c7
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c252
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c30
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c83
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c29
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h32
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c106
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c4
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c12
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c11
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h2
-rw-r--r--drivers/pci/controller/pci-aardvark.c14
-rw-r--r--drivers/pci/controller/pci-ftpci100.c4
-rw-r--r--drivers/pci/controller/pci-host-common.c30
-rw-r--r--drivers/pci/controller/pci-host-common.h20
-rw-r--r--drivers/pci/controller/pci-host-generic.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c113
-rw-r--r--drivers/pci/controller/pci-mvebu.c32
-rw-r--r--drivers/pci/controller/pci-tegra.c63
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c2
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c1
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c53
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c4
-rw-r--r--drivers/pci/controller/pcie-altera.c2
-rw-r--r--drivers/pci/controller/pcie-apple.c312
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c4
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c6
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c9
-rw-r--r--drivers/pci/controller/pcie-mediatek.c6
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c8
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c10
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c4
-rw-r--r--drivers/pci/controller/pcie-rockchip.h7
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c10
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c14
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c11
-rw-r--r--drivers/pci/controller/pcie-xilinx.c5
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c1
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c16
-rw-r--r--drivers/pci/devres.c215
-rw-r--r--drivers/pci/ecam.c2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c26
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c26
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c22
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c73
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c29
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c78
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c3
-rw-r--r--drivers/pci/iomap.c16
-rw-r--r--drivers/pci/msi/api.c8
-rw-r--r--drivers/pci/msi/msi.c182
-rw-r--r--drivers/pci/msi/msi.h2
-rw-r--r--drivers/pci/of.c44
-rw-r--r--drivers/pci/p2pdma.c38
-rw-r--r--drivers/pci/pci-acpi.c23
-rw-r--r--drivers/pci/pci-driver.c8
-rw-r--r--drivers/pci/pci-sysfs.c4
-rw-r--r--drivers/pci/pci.c92
-rw-r--r--drivers/pci/pci.h82
-rw-r--r--drivers/pci/pcie/aer.c442
-rw-r--r--drivers/pci/pcie/bwctrl.c86
-rw-r--r--drivers/pci/pcie/dpc.c75
-rw-r--r--drivers/pci/pcie/err.c1
-rw-r--r--drivers/pci/pcie/ptm.c300
-rw-r--r--drivers/pci/pcie/tlp.c6
-rw-r--r--drivers/pci/probe.c3
-rw-r--r--drivers/pci/pwrctrl/Kconfig22
-rw-r--r--drivers/pci/pwrctrl/Makefile8
-rw-r--r--drivers/pci/pwrctrl/core.c2
-rw-r--r--drivers/pci/quirks.c45
-rw-r--r--drivers/pci/setup-bus.c20
-rw-r--r--drivers/pci/tph.c44
-rw-r--r--drivers/pcmcia/cardbus.c1
-rw-r--r--drivers/perf/Kconfig2
-rw-r--r--drivers/perf/amlogic/meson_ddr_pmu_core.c2
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c3
-rw-r--r--drivers/perf/arm-cmn.c18
-rw-r--r--drivers/perf/arm-ni.c40
-rw-r--r--drivers/perf/arm_pmuv3.c3
-rw-r--r--drivers/perf/arm_v6_pmu.c3
-rw-r--r--drivers/perf/arm_v7_pmu.c3
-rw-r--r--drivers/perf/arm_xscale_pmu.c6
-rw-r--r--drivers/phy/phy-can-transceiver.c22
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c3
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c135
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c2
-rw-r--r--drivers/phy/starfive/phy-jh7110-usb.c7
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c46
-rw-r--r--drivers/phy/tegra/xusb.c8
-rw-r--r--drivers/pinctrl/Kconfig4
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c9
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c6
-rw-r--r--drivers/pinctrl/core.c29
-rw-r--r--drivers/pinctrl/freescale/Kconfig11
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx-scmi.c4
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c6
-rw-r--r--drivers/pinctrl/mediatek/Kconfig22
-rw-r--r--drivers/pinctrl/mediatek/Makefile2
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c37
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.h7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c178
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c18
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6893.c879
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8196.c1860
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c9
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c27
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h2283
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h3085
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c29
-rw-r--r--drivers/pinctrl/meson/Kconfig24
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c22
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c8
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c43
-rw-r--r--drivers/pinctrl/nomadik/Kconfig6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c12
-rw-r--r--drivers/pinctrl/pinconf.h17
-rw-r--r--drivers/pinctrl/pinctrl-amd.c56
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c30
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c20
-rw-r--r--drivers/pinctrl/pinctrl-at91.c21
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c35
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c17
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c8
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c2
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c8
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c8
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c17
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c8
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c26
-rw-r--r--drivers/pinctrl/pinctrl-scmi.c1
-rw-r--r--drivers/pinctrl/pinctrl-single.c9
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c13
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c23
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c29
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c70
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs615.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs8300.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8750.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c7
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c7
-rw-r--r--drivers/pinctrl/qcom/tlmm-test.c1
-rw-r--r--drivers/pinctrl/renesas/Kconfig1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c299
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c52
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c294
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h28
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c34
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h8
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c10
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c7
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c7
-rw-r--r--drivers/pinctrl/uniphier/Kconfig2
-rw-r--r--drivers/platform/arm64/Kconfig2
-rw-r--r--drivers/platform/arm64/acer-aspire1-ec.c10
-rw-r--r--drivers/platform/arm64/huawei-gaokun-ec.c2
-rw-r--r--drivers/platform/chrome/Kconfig5
-rw-r--r--drivers/platform/chrome/Makefile3
-rw-r--r--drivers/platform/chrome/chromeos_of_hw_prober.c33
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c52
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c24
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test_util.h5
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c6
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c24
-rw-r--r--drivers/platform/cznic/Kconfig17
-rw-r--r--drivers/platform/cznic/Makefile3
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-base.c4
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-gpio.c21
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-keyctl.c162
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-trng.c17
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu.h33
-rw-r--r--drivers/platform/cznic/turris-signing-key.c193
-rw-r--r--drivers/platform/mellanox/Kconfig13
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlx-platform.c1546
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c4
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c155
-rw-r--r--drivers/platform/mellanox/mlxreg-dpu.c613
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c8
-rw-r--r--drivers/platform/mellanox/nvsw-sn2201.c112
-rw-r--r--drivers/platform/surface/Kconfig2
-rw-r--r--drivers/platform/x86/Kconfig40
-rw-r--r--drivers/platform/x86/Makefile12
-rw-r--r--drivers/platform/x86/acerhdf.c4
-rw-r--r--drivers/platform/x86/amd/Kconfig11
-rw-r--r--drivers/platform/x86/amd/Makefile1
-rw-r--r--drivers/platform/x86/amd/amd_isp4.c311
-rw-r--r--drivers/platform/x86/amd/hsmp/Kconfig2
-rw-r--r--drivers/platform/x86/amd/hsmp/Makefile1
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c274
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c27
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.h10
-rw-r--r--drivers/platform/x86/amd/hsmp/hwmon.c121
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c16
-rw-r--r--drivers/platform/x86/amd/pmc/mp1_stb.c2
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c10
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c9
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c4
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c8
-rw-r--r--drivers/platform/x86/amd/pmf/core.c16
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h1
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c12
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c29
-rw-r--r--drivers/platform/x86/asus-laptop.c9
-rw-r--r--drivers/platform/x86/asus-wmi.c162
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c10
-rw-r--r--drivers/platform/x86/dasharo-acpi.c360
-rw-r--r--drivers/platform/x86/dell/Kconfig3
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c1159
-rw-r--r--drivers/platform/x86/dell/dell-pc.c67
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c246
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c2
-rw-r--r--drivers/platform/x86/eeepc-laptop.c4
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c33
-rw-r--r--drivers/platform/x86/ideapad-laptop.c16
-rw-r--r--drivers/platform/x86/intel/hid.c21
-rw-r--r--drivers/platform/x86/intel/ifs/core.c5
-rw-r--r--drivers/platform/x86/intel/ifs/load.c21
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c17
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c9
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile3
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c173
-rw-r--r--drivers/platform/x86/intel/int3472/common.c9
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c69
-rw-r--r--drivers/platform/x86/intel/int3472/discrete_quirks.c21
-rw-r--r--drivers/platform/x86/intel/int3472/led.c3
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c3
-rw-r--r--drivers/platform/x86/intel/pmc/Kconfig4
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile8
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c16
-rw-r--r--drivers/platform/x86/intel/pmc/cnp.c7
-rw-r--r--drivers/platform/x86/intel/pmc/core.c250
-rw-r--r--drivers/platform/x86/intel/pmc/core.h22
-rw-r--r--drivers/platform/x86/intel/pmc/core_ssram.c332
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c10
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.c204
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.h24
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c38
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c15
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c106
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c38
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.h1
-rw-r--r--drivers/platform/x86/intel/turbo_max_3.c5
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c34
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h20
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c49
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c24
-rw-r--r--drivers/platform/x86/intel/vsec.c9
-rw-r--r--drivers/platform/x86/intel_ips.c36
-rw-r--r--drivers/platform/x86/msi-wmi-platform.c99
-rw-r--r--drivers/platform/x86/oxpec.c1054
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/portwell-ec.c291
-rw-r--r--drivers/platform/x86/silicom-platform.c11
-rw-r--r--drivers/platform/x86/sony-laptop.c175
-rw-r--r--drivers/platform/x86/think-lmi.c26
-rw-r--r--drivers/platform/x86/think-lmi.h1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c56
-rw-r--r--drivers/platform/x86/topstar-laptop.c4
-rw-r--r--drivers/platform/x86/tuxedo/Kconfig8
-rw-r--r--drivers/platform/x86/tuxedo/Makefile8
-rw-r--r--drivers/platform/x86/tuxedo/nb04/Kconfig17
-rw-r--r--drivers/platform/x86/tuxedo/nb04/Makefile10
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_ab.c923
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_util.c91
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_util.h109
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c14
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c124
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h3
-rw-r--r--drivers/platform/x86/xo15-ebook.c10
-rw-r--r--drivers/pmdomain/amlogic/meson-ee-pwrc.c78
-rw-r--r--drivers/pmdomain/arm/Kconfig6
-rw-r--r--drivers/pmdomain/bcm/bcm2835-power.c16
-rw-r--r--drivers/pmdomain/core.c133
-rw-r--r--drivers/pmdomain/governor.c2
-rw-r--r--drivers/pmdomain/mediatek/mt6893-pm-domains.h585
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c17
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.h2
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c16
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.c5
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c5
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c48
-rw-r--r--drivers/pmdomain/sunxi/Kconfig10
-rw-r--r--drivers/pmdomain/sunxi/Makefile1
-rw-r--r--drivers/pmdomain/sunxi/sun50i-h6-prcm-ppu.c208
-rw-r--r--drivers/pmdomain/ti/omap_prm.c8
-rw-r--r--drivers/pnp/quirks.c2
-rw-r--r--drivers/power/reset/Kconfig13
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-reset.c5
-rw-r--r--drivers/power/reset/reboot-mode.c25
-rw-r--r--drivers/power/reset/syscon-reboot.c98
-rw-r--r--drivers/power/reset/tdx-ec-poweroff.c150
-rw-r--r--drivers/power/supply/Kconfig37
-rw-r--r--drivers/power/supply/Makefile3
-rw-r--r--drivers/power/supply/bq24190_charger.c14
-rw-r--r--drivers/power/supply/bq27xxx_battery.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c13
-rw-r--r--drivers/power/supply/chagall-battery.c291
-rw-r--r--drivers/power/supply/collie_battery.c1
-rw-r--r--drivers/power/supply/cros_charge-control.c23
-rw-r--r--drivers/power/supply/gpio-charger.c4
-rw-r--r--drivers/power/supply/huawei-gaokun-battery.c645
-rw-r--r--drivers/power/supply/max17040_battery.c5
-rw-r--r--drivers/power/supply/max77705_charger.c20
-rw-r--r--drivers/power/supply/max8971_charger.c752
-rw-r--r--drivers/power/supply/power_supply_sysfs.c34
-rw-r--r--drivers/power/supply/qcom_pmi8998_charger.c4
-rw-r--r--drivers/power/supply/rk817_charger.c2
-rw-r--r--drivers/power/supply/rt9471.c12
-rw-r--r--drivers/power/supply/test_power.c21
-rw-r--r--drivers/power/supply/wm831x_power.c20
-rw-r--r--drivers/powercap/intel_rapl_common.c1
-rw-r--r--drivers/powercap/intel_rapl_msr.c7
-rw-r--r--drivers/pps/generators/pps_gen_tio.c2
-rw-r--r--drivers/ptp/Kconfig4
-rw-r--r--drivers/ptp/ptp_chardev.c16
-rw-r--r--drivers/ptp/ptp_clockmatrix.c14
-rw-r--r--drivers/ptp/ptp_fc3.c1
-rw-r--r--drivers/ptp/ptp_idt82p33.c15
-rw-r--r--drivers/ptp/ptp_ocp.c79
-rw-r--r--drivers/pwm/Kconfig113
-rw-r--r--drivers/pwm/Makefile11
-rw-r--r--drivers/pwm/core.c129
-rw-r--r--drivers/pwm/pwm-adp5585.c1
-rw-r--r--drivers/pwm/pwm-axi-pwmgen.c10
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c6
-rw-r--r--drivers/pwm/pwm-loongson.c290
-rw-r--r--drivers/pwm/pwm-mc33xs2410.c391
-rw-r--r--drivers/pwm/pwm-mediatek.c8
-rw-r--r--drivers/pwm/pwm-meson.c123
-rw-r--r--drivers/pwm/pwm-pca9685.c8
-rw-r--r--drivers/pwm/pwm-pxa.c18
-rw-r--r--drivers/pwm/pwm-rcar.c24
-rw-r--r--drivers/pwm/pwm-rzg2l-gpt.c447
-rw-r--r--drivers/pwm/pwm-stm32-lp.c219
-rw-r--r--drivers/pwm/pwm-stm32.c27
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c20
-rw-r--r--drivers/rapidio/rio.c103
-rw-r--r--drivers/rapidio/rio.h2
-rw-r--r--drivers/rapidio/rio_cm.c6
-rw-r--r--drivers/ras/amd/atl/internal.h7
-rw-r--r--drivers/ras/amd/atl/umc.c19
-rw-r--r--drivers/ras/amd/fmpm.c9
-rw-r--r--drivers/regulator/Kconfig23
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/adp5055-regulator.c424
-rw-r--r--drivers/regulator/bcm590xx-regulator.c1289
-rw-r--r--drivers/regulator/bd96801-regulator.c455
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/da9121-regulator.c2
-rw-r--r--drivers/regulator/gpio-regulator.c10
-rw-r--r--drivers/regulator/max20086-regulator.c11
-rw-r--r--drivers/regulator/pca9450-regulator.c27
-rw-r--r--drivers/regulator/pf9453-regulator.c3
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c69
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c83
-rw-r--r--drivers/regulator/s5m8767.c146
-rw-r--r--drivers/regulator/tps65219-regulator.c242
-rw-r--r--drivers/remoteproc/Makefile6
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c98
-rw-r--r--drivers/remoteproc/qcom_wcnss.c3
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c7
-rw-r--r--drivers/remoteproc/stm32_rproc.c8
-rw-r--r--drivers/remoteproc/ti_k3_common.c551
-rw-r--r--drivers/remoteproc/ti_k3_common.h118
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c616
-rw-r--r--drivers/remoteproc/ti_k3_m4_remoteproc.c583
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c1018
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c34
-rw-r--r--drivers/reset/Kconfig17
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-rzv2h-usb2phy.c236
-rw-r--r--drivers/reset/reset-th1520.c135
-rw-r--r--drivers/rpmsg/qcom_smd.c10
-rw-r--r--drivers/rpmsg/rpmsg_core.c63
-rw-r--r--drivers/rpmsg/rpmsg_internal.h6
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c24
-rw-r--r--drivers/s390/block/Kconfig3
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/char/con3270.c17
-rw-r--r--drivers/s390/char/diag_ftp.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c74
-rw-r--r--drivers/s390/crypto/ap_bus.h30
-rw-r--r--drivers/s390/crypto/pkey_api.c50
-rw-r--r--drivers/s390/crypto/pkey_base.c34
-rw-r--r--drivers/s390/crypto/pkey_base.h37
-rw-r--r--drivers/s390/crypto/pkey_cca.c136
-rw-r--r--drivers/s390/crypto/pkey_ep11.c117
-rw-r--r--drivers/s390/crypto/pkey_pckmo.c9
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c4
-rw-r--r--drivers/s390/crypto/pkey_uv.c44
-rw-r--r--drivers/s390/crypto/zcrypt_api.c167
-rw-r--r--drivers/s390/crypto/zcrypt_api.h16
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c486
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h49
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c39
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c454
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h27
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c36
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c109
-rw-r--r--drivers/s390/net/ctcm_mpc.c2
-rw-r--r--drivers/s390/net/ism_drv.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c14
-rw-r--r--drivers/s390/virtio/virtio_ccw.c16
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/aha152x.c1
-rw-r--r--drivers/scsi/bnx2fc/Kconfig1
-rw-r--r--drivers/scsi/bnx2i/Kconfig1
-rw-r--r--drivers/scsi/dc395x.c697
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c6
-rw-r--r--drivers/scsi/fnic/fip.c8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h51
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c101
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c15
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c273
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/isci/remote_device.c30
-rw-r--r--drivers/scsi/isci/remote_device.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c136
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c9
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c5
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c8
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c73
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h4
-rw-r--r--drivers/scsi/myrb.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c1
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qedi/qedi_dbg.c22
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h12
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c129
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/scsi.c36
-rw-r--r--drivers/scsi/scsi_debug.c361
-rw-r--r--drivers/scsi/scsi_devinfo.c27
-rw-r--r--drivers/scsi/scsi_ioctl.c2
-rw-r--r--drivers/scsi/scsi_lib.c12
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c7
-rw-r--r--drivers/scsi/scsi_transport_srp.c2
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sd_zbc.c6
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c153
-rw-r--r--drivers/scsi/storvsc_drv.c1
-rw-r--r--drivers/sh/intc/irqdomain.c5
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c461
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c17
-rw-r--r--drivers/soc/dove/pmu.c7
-rw-r--r--drivers/soc/fsl/Kconfig2
-rw-r--r--drivers/soc/fsl/qbman/qman.c2
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c19
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c42
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.h2
-rw-r--r--drivers/soc/imx/soc-imx8m.c177
-rw-r--r--drivers/soc/mediatek/mtk-dvfsrc.c53
-rw-r--r--drivers/soc/qcom/ice.c350
-rw-r--r--drivers/soc/qcom/llcc-qcom.c497
-rw-r--r--drivers/soc/qcom/pmic_glink.c4
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c30
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c11
-rw-r--r--drivers/soc/qcom/smem.c2
-rw-r--r--drivers/soc/qcom/smp2p.c4
-rw-r--r--drivers/soc/qcom/smsm.c2
-rw-r--r--drivers/soc/qcom/socinfo.c1
-rw-r--r--drivers/soc/renesas/Kconfig53
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r9a09g056-sys.c75
-rw-r--r--drivers/soc/renesas/rz-sysc.c3
-rw-r--r--drivers/soc/renesas/rz-sysc.h1
-rw-r--r--drivers/soc/samsung/exynos-pmu.c78
-rw-r--r--drivers/soc/samsung/exynos-pmu.h1
-rw-r--r--drivers/soc/samsung/exynos-usi.c2
-rw-r--r--drivers/soc/sophgo/Kconfig34
-rw-r--r--drivers/soc/sophgo/Makefile4
-rw-r--r--drivers/soc/sophgo/cv1800-rtcsys.c63
-rw-r--r--drivers/soc/sophgo/sg2044-topsys.c45
-rw-r--r--drivers/soc/tegra/pmc.c5
-rw-r--r--drivers/soc/ti/k3-ringacc.c2
-rw-r--r--drivers/soc/ti/k3-socinfo.c2
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c3
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c10
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c8
-rw-r--r--drivers/soc/vt8500/Kconfig19
-rw-r--r--drivers/soc/vt8500/Makefile2
-rw-r--r--drivers/soc/vt8500/wmt-socinfo.c125
-rw-r--r--drivers/soundwire/bus.c9
-rw-r--r--drivers/soundwire/intel_auxdevice.c36
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel-quadspi.c26
-rw-r--r--drivers/spi/spi-amd-pci.c70
-rw-r--r--drivers/spi/spi-amd.c227
-rw-r--r--drivers/spi/spi-amd.h44
-rw-r--r--drivers/spi/spi-axi-spi-engine.c91
-rw-r--r--drivers/spi/spi-cadence-quadspi.c2
-rw-r--r--drivers/spi/spi-cavium-thunderx.c4
-rw-r--r--drivers/spi/spi-cs42l43.c4
-rw-r--r--drivers/spi/spi-dw-core.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c46
-rw-r--r--drivers/spi/spi-fsl-qspi.c85
-rw-r--r--drivers/spi/spi-gpio.c2
-rw-r--r--drivers/spi/spi-imx.c5
-rw-r--r--drivers/spi/spi-intel-pci.c8
-rw-r--r--drivers/spi/spi-intel-platform.c9
-rw-r--r--drivers/spi/spi-intel.c9
-rw-r--r--drivers/spi/spi-intel.h4
-rw-r--r--drivers/spi/spi-loopback-test.c10
-rw-r--r--drivers/spi/spi-mem.c6
-rw-r--r--drivers/spi/spi-meson-spicc.c241
-rw-r--r--drivers/spi/spi-nxp-fspi.c189
-rw-r--r--drivers/spi/spi-offload.c5
-rw-r--r--drivers/spi/spi-pci1xxxx.c24
-rw-r--r--drivers/spi/spi-qpic-snand.c172
-rw-r--r--drivers/spi/spi-rpc-if.c16
-rw-r--r--drivers/spi/spi-sh-msiof.c397
-rw-r--r--drivers/spi/spi-stm32-ospi.c6
-rw-r--r--drivers/spi/spi-sun4i.c6
-rw-r--r--drivers/spi/spi-tegra114.c6
-rw-r--r--drivers/spi/spi-tegra210-quad.c286
-rw-r--r--drivers/spi/spi-xcomm.c8
-rw-r--r--drivers/spi/spi.c19
-rw-r--r--drivers/ssb/driver_gpio.c8
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c14
-rw-r--r--drivers/staging/gpib/common/iblib.c2
-rw-r--r--drivers/staging/iio/adc/ad7816.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig12
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile1
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c1612
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h1768
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c73
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h244
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c92
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c60
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c3
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c64
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c239
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.h18
-rw-r--r--drivers/staging/media/starfive/camss/stf-isp.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c4
-rw-r--r--drivers/target/target_core_configfs.c20
-rw-r--r--drivers/target/target_core_device.c89
-rw-r--r--drivers/target/target_core_spc.c134
-rw-r--r--drivers/target/target_core_stat.c69
-rw-r--r--drivers/target/target_core_transport.c119
-rw-r--r--drivers/tee/amdtee/core.c16
-rw-r--r--drivers/tee/optee/smc_abi.c3
-rw-r--r--drivers/tee/tee_core.c11
-rw-r--r--drivers/thermal/Kconfig11
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/airoha_thermal.c489
-rw-r--r--drivers/thermal/amlogic_thermal.c16
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/intel/int340x_thermal/platform_temperature_control.c243
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c18
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h3
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c10
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c33
-rw-r--r--drivers/thermal/intel/intel_hfi.c14
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c4
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c5
-rw-r--r--drivers/thermal/intel/therm_throt.c10
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c2
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c18
-rw-r--r--drivers/thermal/qcom/lmh.c3
-rw-r--r--drivers/thermal/qcom/tsens-v1.c62
-rw-r--r--drivers/thermal/qcom/tsens.c27
-rw-r--r--drivers/thermal/qcom/tsens.h4
-rw-r--r--drivers/thermal/tegra/soctherm.c2
-rw-r--r--drivers/tty/serial/msm_serial.c6
-rw-r--r--drivers/tty/serial/sifive.c6
-rw-r--r--drivers/tty/vt/selection.c5
-rw-r--r--drivers/ufs/core/ufs-mcq.c18
-rw-r--r--drivers/ufs/core/ufs-sysfs.c187
-rw-r--r--drivers/ufs/core/ufshcd-priv.h1
-rw-r--r--drivers/ufs/core/ufshcd.c226
-rw-r--r--drivers/ufs/host/ufs-exynos.c85
-rw-r--r--drivers/ufs/host/ufs-exynos.h6
-rw-r--r--drivers/ufs/host/ufs-qcom.c309
-rw-r--r--drivers/ufs/host/ufs-qcom.h29
-rw-r--r--drivers/uio/uio_hv_generic.c46
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c2
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c31
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h6
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c12
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c3
-rw-r--r--drivers/usb/cdns3/core.h3
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c44
-rw-r--r--drivers/usb/class/cdc-wdm.c21
-rw-r--r--drivers/usb/class/usbtmc.c59
-rw-r--r--drivers/usb/core/quirks.c9
-rw-r--r--drivers/usb/dwc3/core.h4
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c4
-rw-r--r--drivers/usb/dwc3/gadget.c66
-rw-r--r--drivers/usb/gadget/composite.c12
-rw-r--r--drivers/usb/gadget/function/f_ecm.c7
-rw-r--r--drivers/usb/gadget/function/f_hid.c12
-rw-r--r--drivers/usb/gadget/function/f_midi2.c2
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c4
-rw-r--r--drivers/usb/host/ohci-pci.c23
-rw-r--r--drivers/usb/host/uhci-platform.c2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c19
-rw-r--r--drivers/usb/host/xhci-dbgcap.h3
-rw-r--r--drivers/usb/host/xhci-hub.c30
-rw-r--r--drivers/usb/host/xhci-ring.c30
-rw-r--r--drivers/usb/host/xhci-tegra.c3
-rw-r--r--drivers/usb/host/xhci.c18
-rw-r--r--drivers/usb/host/xhci.h5
-rw-r--r--drivers/usb/misc/onboard_usb_dev.c10
-rw-r--r--drivers/usb/misc/usbtest.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h5
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/storage/usb.c20
-rw-r--r--drivers/usb/typec/class.c24
-rw-r--r--drivers/usb/typec/class.h1
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c8
-rw-r--r--drivers/usb/typec/ucsi/displayport.c21
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c34
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c2
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_main.c17
-rw-r--r--drivers/vfio/pci/Kconfig2
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c121
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h14
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c371
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h35
-rw-r--r--drivers/vfio/pci/mlx5/main.c87
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c12
-rw-r--r--drivers/vfio/vfio_iommu_type1.c51
-rw-r--r--drivers/vhost/net.c30
-rw-r--r--drivers/vhost/scsi.c264
-rw-r--r--drivers/vhost/vhost.c28
-rw-r--r--drivers/vhost/vringh.c19
-rw-r--r--drivers/video/backlight/backlight.c93
-rw-r--r--drivers/video/backlight/lcd.c108
-rw-r--r--drivers/video/backlight/qcom-wled.c6
-rw-r--r--drivers/video/console/vgacon.c2
-rw-r--r--drivers/video/fbdev/arkfb.c5
-rw-r--r--drivers/video/fbdev/carminefb.c8
-rw-r--r--drivers/video/fbdev/carminefb.h2
-rw-r--r--drivers/video/fbdev/core/fb_backlight.c12
-rw-r--r--drivers/video/fbdev/core/fb_info.c1
-rw-r--r--drivers/video/fbdev/core/fbcon.c7
-rw-r--r--drivers/video/fbdev/core/fbcvt.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c104
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c8
-rw-r--r--drivers/video/fbdev/geode/display_gx.c1
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c3
-rw-r--r--drivers/video/fbdev/geode/lxfb_ops.c23
-rw-r--r--drivers/video/fbdev/geode/suspend_gx.c10
-rw-r--r--drivers/video/fbdev/geode/video_gx.c16
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c2
-rw-r--r--drivers/video/fbdev/via/via-gpio.c10
-rw-r--r--drivers/video/screen_info_generic.c36
-rw-r--r--drivers/virt/coco/Kconfig6
-rw-r--r--drivers/virt/coco/Makefile2
-rw-r--r--drivers/virt/coco/arm-cca-guest/arm-cca-guest.c8
-rw-r--r--drivers/virt/coco/guest/Kconfig17
-rw-r--r--drivers/virt/coco/guest/Makefile4
-rw-r--r--drivers/virt/coco/guest/report.c (renamed from drivers/virt/coco/tsm.c)63
-rw-r--r--drivers/virt/coco/guest/tsm-mr.c251
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c12
-rw-r--r--drivers/virt/coco/tdx-guest/Kconfig1
-rw-r--r--drivers/virt/coco/tdx-guest/tdx-guest.c259
-rw-r--r--drivers/virtio/Kconfig64
-rw-r--r--drivers/virtio/Makefile5
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_pci_modern.c17
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/virtio/virtio_rtc_arm.c23
-rw-r--r--drivers/virtio/virtio_rtc_class.c262
-rw-r--r--drivers/virtio/virtio_rtc_driver.c1407
-rw-r--r--drivers/virtio/virtio_rtc_internal.h122
-rw-r--r--drivers/virtio/virtio_rtc_ptp.c347
-rw-r--r--drivers/w1/slaves/w1_ds2406.c12
-rw-r--r--drivers/watchdog/Kconfig26
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/apple_wdt.c7
-rw-r--r--drivers/watchdog/arm_smc_wdt.c17
-rw-r--r--drivers/watchdog/cros_ec_wdt.c30
-rw-r--r--drivers/watchdog/da9052_wdt.c27
-rw-r--r--drivers/watchdog/diag288_wdt.c53
-rw-r--r--drivers/watchdog/exar_wdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c25
-rw-r--r--drivers/watchdog/intel_oc_wdt.c233
-rw-r--r--drivers/watchdog/lenovo_se30_wdt.c2
-rw-r--r--drivers/watchdog/pcwd_usb.c6
-rw-r--r--drivers/watchdog/pretimeout_noop.c2
-rw-r--r--drivers/watchdog/pretimeout_panic.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c7
-rw-r--r--drivers/watchdog/s32g_wdt.c315
-rw-r--r--drivers/watchdog/s3c2410_wdt.c39
-rw-r--r--drivers/watchdog/stm32_iwdg.c2
-rw-r--r--drivers/watchdog/wdt_pci.c2
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/balloon.c37
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--drivers/xen/xenbus/xenbus.h2
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c9
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c14
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c1
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c18
-rw-r--r--fs/9p/vfs_addr.c6
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/Makefile1
-rw-r--r--fs/afs/Kconfig1
-rw-r--r--fs/afs/Makefile1
-rw-r--r--fs/afs/cm_security.c340
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/afs/dir_silly.c6
-rw-r--r--fs/afs/dynroot.c4
-rw-r--r--fs/afs/internal.h20
-rw-r--r--fs/afs/main.c1
-rw-r--r--fs/afs/misc.c27
-rw-r--r--fs/afs/mntpt.c1
-rw-r--r--fs/afs/rxrpc.c40
-rw-r--r--fs/afs/server.c2
-rw-r--r--fs/afs/write.c9
-rw-r--r--fs/aio.c1
-rw-r--r--fs/anon_inodes.c45
-rw-r--r--fs/autofs/dev-ioctl.c3
-rw-r--r--fs/bcachefs/Kconfig15
-rw-r--r--fs/bcachefs/Makefile4
-rw-r--r--fs/bcachefs/alloc_background.c238
-rw-r--r--fs/bcachefs/alloc_background.h10
-rw-r--r--fs/bcachefs/alloc_foreground.c630
-rw-r--r--fs/bcachefs/alloc_foreground.h81
-rw-r--r--fs/bcachefs/alloc_types.h16
-rw-r--r--fs/bcachefs/async_objs.c132
-rw-r--r--fs/bcachefs/async_objs.h44
-rw-r--r--fs/bcachefs/async_objs_types.h25
-rw-r--r--fs/bcachefs/backpointers.c407
-rw-r--r--fs/bcachefs/backpointers.h17
-rw-r--r--fs/bcachefs/bcachefs.h295
-rw-r--r--fs/bcachefs/bcachefs_format.h111
-rw-r--r--fs/bcachefs/bkey.c47
-rw-r--r--fs/bcachefs/bkey.h4
-rw-r--r--fs/bcachefs/bkey_methods.c26
-rw-r--r--fs/bcachefs/bset.c64
-rw-r--r--fs/bcachefs/bset.h22
-rw-r--r--fs/bcachefs/btree_cache.c207
-rw-r--r--fs/bcachefs/btree_gc.c116
-rw-r--r--fs/bcachefs/btree_gc.h3
-rw-r--r--fs/bcachefs/btree_io.c384
-rw-r--r--fs/bcachefs/btree_io.h12
-rw-r--r--fs/bcachefs/btree_iter.c386
-rw-r--r--fs/bcachefs/btree_iter.h116
-rw-r--r--fs/bcachefs/btree_journal_iter.c26
-rw-r--r--fs/bcachefs/btree_key_cache.c89
-rw-r--r--fs/bcachefs/btree_key_cache.h3
-rw-r--r--fs/bcachefs/btree_locking.c250
-rw-r--r--fs/bcachefs/btree_locking.h72
-rw-r--r--fs/bcachefs/btree_node_scan.c26
-rw-r--r--fs/bcachefs/btree_trans_commit.c115
-rw-r--r--fs/bcachefs/btree_types.h33
-rw-r--r--fs/bcachefs/btree_update.c133
-rw-r--r--fs/bcachefs/btree_update.h82
-rw-r--r--fs/bcachefs/btree_update_interior.c205
-rw-r--r--fs/bcachefs/btree_update_interior.h6
-rw-r--r--fs/bcachefs/btree_write_buffer.c34
-rw-r--r--fs/bcachefs/btree_write_buffer.h1
-rw-r--r--fs/bcachefs/buckets.c236
-rw-r--r--fs/bcachefs/buckets.h18
-rw-r--r--fs/bcachefs/buckets_waiting_for_journal.c3
-rw-r--r--fs/bcachefs/chardev.c9
-rw-r--r--fs/bcachefs/checksum.c255
-rw-r--r--fs/bcachefs/checksum.h5
-rw-r--r--fs/bcachefs/clock.c47
-rw-r--r--fs/bcachefs/clock.h1
-rw-r--r--fs/bcachefs/compress.c24
-rw-r--r--fs/bcachefs/darray.h59
-rw-r--r--fs/bcachefs/data_update.c379
-rw-r--r--fs/bcachefs/data_update.h15
-rw-r--r--fs/bcachefs/debug.c115
-rw-r--r--fs/bcachefs/debug.h20
-rw-r--r--fs/bcachefs/dirent.c213
-rw-r--r--fs/bcachefs/dirent.h33
-rw-r--r--fs/bcachefs/disk_accounting.c162
-rw-r--r--fs/bcachefs/disk_accounting.h34
-rw-r--r--fs/bcachefs/disk_groups.c177
-rw-r--r--fs/bcachefs/ec.c326
-rw-r--r--fs/bcachefs/ec.h10
-rw-r--r--fs/bcachefs/ec_types.h7
-rw-r--r--fs/bcachefs/enumerated_ref.c144
-rw-r--r--fs/bcachefs/enumerated_ref.h66
-rw-r--r--fs/bcachefs/enumerated_ref_types.h19
-rw-r--r--fs/bcachefs/errcode.c4
-rw-r--r--fs/bcachefs/errcode.h26
-rw-r--r--fs/bcachefs/error.c207
-rw-r--r--fs/bcachefs/error.h28
-rw-r--r--fs/bcachefs/extent_update.c67
-rw-r--r--fs/bcachefs/extent_update.h2
-rw-r--r--fs/bcachefs/extents.c188
-rw-r--r--fs/bcachefs/extents.h10
-rw-r--r--fs/bcachefs/extents_types.h1
-rw-r--r--fs/bcachefs/fast_list.c156
-rw-r--r--fs/bcachefs/fast_list.h41
-rw-r--r--fs/bcachefs/fs-io-buffered.c47
-rw-r--r--fs/bcachefs/fs-io-direct.c7
-rw-r--r--fs/bcachefs/fs-io-pagecache.c20
-rw-r--r--fs/bcachefs/fs-io.c64
-rw-r--r--fs/bcachefs/fs-ioctl.c235
-rw-r--r--fs/bcachefs/fs-ioctl.h75
-rw-r--r--fs/bcachefs/fs.c521
-rw-r--r--fs/bcachefs/fsck.c550
-rw-r--r--fs/bcachefs/fsck.h6
-rw-r--r--fs/bcachefs/inode.c248
-rw-r--r--fs/bcachefs/inode.h54
-rw-r--r--fs/bcachefs/inode_format.h12
-rw-r--r--fs/bcachefs/io_misc.c2
-rw-r--r--fs/bcachefs/io_read.c359
-rw-r--r--fs/bcachefs/io_read.h25
-rw-r--r--fs/bcachefs/io_write.c97
-rw-r--r--fs/bcachefs/io_write.h28
-rw-r--r--fs/bcachefs/io_write_types.h32
-rw-r--r--fs/bcachefs/journal.c227
-rw-r--r--fs/bcachefs/journal.h15
-rw-r--r--fs/bcachefs/journal_io.c452
-rw-r--r--fs/bcachefs/journal_io.h1
-rw-r--r--fs/bcachefs/journal_reclaim.c94
-rw-r--r--fs/bcachefs/journal_sb.c2
-rw-r--r--fs/bcachefs/journal_seq_blacklist.c14
-rw-r--r--fs/bcachefs/journal_seq_blacklist.h1
-rw-r--r--fs/bcachefs/journal_types.h2
-rw-r--r--fs/bcachefs/lru.c6
-rw-r--r--fs/bcachefs/migrate.c121
-rw-r--r--fs/bcachefs/migrate.h3
-rw-r--r--fs/bcachefs/move.c334
-rw-r--r--fs/bcachefs/move.h17
-rw-r--r--fs/bcachefs/move_types.h8
-rw-r--r--fs/bcachefs/movinggc.c232
-rw-r--r--fs/bcachefs/movinggc.h10
-rw-r--r--fs/bcachefs/namei.c288
-rw-r--r--fs/bcachefs/namei.h7
-rw-r--r--fs/bcachefs/nocow_locking.c4
-rw-r--r--fs/bcachefs/nocow_locking.h2
-rw-r--r--fs/bcachefs/opts.c170
-rw-r--r--fs/bcachefs/opts.h43
-rw-r--r--fs/bcachefs/printbuf.h8
-rw-r--r--fs/bcachefs/quota.c6
-rw-r--r--fs/bcachefs/rebalance.c252
-rw-r--r--fs/bcachefs/rebalance.h16
-rw-r--r--fs/bcachefs/rebalance_types.h6
-rw-r--r--fs/bcachefs/recovery.c159
-rw-r--r--fs/bcachefs/recovery.h3
-rw-r--r--fs/bcachefs/recovery_passes.c649
-rw-r--r--fs/bcachefs/recovery_passes.h31
-rw-r--r--fs/bcachefs/recovery_passes_format.h106
-rw-r--r--fs/bcachefs/recovery_passes_types.h93
-rw-r--r--fs/bcachefs/reflink.c14
-rw-r--r--fs/bcachefs/replicas.c35
-rw-r--r--fs/bcachefs/sb-counters_format.h3
-rw-r--r--fs/bcachefs/sb-downgrade.c15
-rw-r--r--fs/bcachefs/sb-errors.c22
-rw-r--r--fs/bcachefs/sb-errors.h1
-rw-r--r--fs/bcachefs/sb-errors_format.h25
-rw-r--r--fs/bcachefs/sb-members.c104
-rw-r--r--fs/bcachefs/sb-members.h103
-rw-r--r--fs/bcachefs/sb-members_format.h6
-rw-r--r--fs/bcachefs/sb-members_types.h1
-rw-r--r--fs/bcachefs/six.c7
-rw-r--r--fs/bcachefs/snapshot.c625
-rw-r--r--fs/bcachefs/snapshot.h116
-rw-r--r--fs/bcachefs/snapshot_format.h4
-rw-r--r--fs/bcachefs/snapshot_types.h57
-rw-r--r--fs/bcachefs/str_hash.c368
-rw-r--r--fs/bcachefs/str_hash.h37
-rw-r--r--fs/bcachefs/subvolume.c111
-rw-r--r--fs/bcachefs/subvolume.h5
-rw-r--r--fs/bcachefs/subvolume_types.h27
-rw-r--r--fs/bcachefs/super-io.c90
-rw-r--r--fs/bcachefs/super-io.h1
-rw-r--r--fs/bcachefs/super.c912
-rw-r--r--fs/bcachefs/super.h9
-rw-r--r--fs/bcachefs/sysfs.c133
-rw-r--r--fs/bcachefs/tests.c4
-rw-r--r--fs/bcachefs/thread_with_file.c4
-rw-r--r--fs/bcachefs/trace.h123
-rw-r--r--fs/bcachefs/util.c41
-rw-r--r--fs/bcachefs/util.h58
-rw-r--r--fs/bcachefs/xattr.c29
-rw-r--r--fs/bcachefs/xattr.h4
-rw-r--r--fs/bcachefs/xattr_format.h8
-rw-r--r--fs/bfs/inode.c30
-rw-r--r--fs/binfmt_elf.c147
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/btrfs/Kconfig34
-rw-r--r--fs/btrfs/async-thread.c3
-rw-r--r--fs/btrfs/backref.c12
-rw-r--r--fs/btrfs/backref.h4
-rw-r--r--fs/btrfs/bio.c55
-rw-r--r--fs/btrfs/bio.h3
-rw-r--r--fs/btrfs/block-group.c196
-rw-r--r--fs/btrfs/block-group.h11
-rw-r--r--fs/btrfs/block-rsv.c11
-rw-r--r--fs/btrfs/block-rsv.h1
-rw-r--r--fs/btrfs/btrfs_inode.h7
-rw-r--r--fs/btrfs/compression.c77
-rw-r--r--fs/btrfs/compression.h11
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/defrag.c143
-rw-r--r--fs/btrfs/delalloc-space.c51
-rw-r--r--fs/btrfs/delalloc-space.h4
-rw-r--r--fs/btrfs/delayed-inode.c73
-rw-r--r--fs/btrfs/delayed-ref.c9
-rw-r--r--fs/btrfs/delayed-ref.h1
-rw-r--r--fs/btrfs/dev-replace.c22
-rw-r--r--fs/btrfs/dev-replace.h2
-rw-r--r--fs/btrfs/direct-io.c75
-rw-r--r--fs/btrfs/discard.c19
-rw-r--r--fs/btrfs/disk-io.c201
-rw-r--r--fs/btrfs/disk-io.h5
-rw-r--r--fs/btrfs/extent-io-tree.c510
-rw-r--r--fs/btrfs/extent-io-tree.h165
-rw-r--r--fs/btrfs/extent-tree.c162
-rw-r--r--fs/btrfs/extent-tree.h4
-rw-r--r--fs/btrfs/extent_io.c961
-rw-r--r--fs/btrfs/extent_io.h11
-rw-r--r--fs/btrfs/extent_map.c175
-rw-r--r--fs/btrfs/extent_map.h47
-rw-r--r--fs/btrfs/fiemap.c9
-rw-r--r--fs/btrfs/file-item.c49
-rw-r--r--fs/btrfs/file-item.h6
-rw-r--r--fs/btrfs/file.c775
-rw-r--r--fs/btrfs/free-space-cache.c52
-rw-r--r--fs/btrfs/free-space-tree.c62
-rw-r--r--fs/btrfs/fs.h7
-rw-r--r--fs/btrfs/inode-item.c31
-rw-r--r--fs/btrfs/inode.c700
-rw-r--r--fs/btrfs/ioctl.c29
-rw-r--r--fs/btrfs/locking.c8
-rw-r--r--fs/btrfs/locking.h2
-rw-r--r--fs/btrfs/lzo.c5
-rw-r--r--fs/btrfs/messages.h83
-rw-r--r--fs/btrfs/ordered-data.c73
-rw-r--r--fs/btrfs/qgroup.c55
-rw-r--r--fs/btrfs/raid56.c219
-rw-r--r--fs/btrfs/reflink.c15
-rw-r--r--fs/btrfs/relocation.c114
-rw-r--r--fs/btrfs/scrub.c474
-rw-r--r--fs/btrfs/send.c88
-rw-r--r--fs/btrfs/space-info.c174
-rw-r--r--fs/btrfs/space-info.h12
-rw-r--r--fs/btrfs/subpage.c10
-rw-r--r--fs/btrfs/super.c31
-rw-r--r--fs/btrfs/sysfs.c27
-rw-r--r--fs/btrfs/tests/btrfs-tests.c32
-rw-r--r--fs/btrfs/tests/extent-io-tests.c61
-rw-r--r--fs/btrfs/tests/extent-map-tests.c102
-rw-r--r--fs/btrfs/tests/inode-tests.c107
-rw-r--r--fs/btrfs/transaction.c72
-rw-r--r--fs/btrfs/tree-checker.c24
-rw-r--r--fs/btrfs/tree-log.c66
-rw-r--r--fs/btrfs/volumes.c434
-rw-r--r--fs/btrfs/volumes.h11
-rw-r--r--fs/btrfs/zlib.c9
-rw-r--r--fs/btrfs/zoned.c47
-rw-r--r--fs/btrfs/zstd.c10
-rw-r--r--fs/buffer.c101
-rw-r--r--fs/cachefiles/internal.h1
-rw-r--r--fs/cachefiles/io.c16
-rw-r--r--fs/cachefiles/key.c3
-rw-r--r--fs/cachefiles/namei.c14
-rw-r--r--fs/ceph/Kconfig2
-rw-r--r--fs/ceph/addr.c6
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/configfs/Kconfig1
-rw-r--r--fs/configfs/dir.c4
-rw-r--r--fs/configfs/item.c2
-rw-r--r--fs/coredump.c461
-rw-r--r--fs/crypto/fscrypt_private.h75
-rw-r--r--fs/crypto/hkdf.c4
-rw-r--r--fs/crypto/inline_crypt.c44
-rw-r--r--fs/crypto/keyring.c132
-rw-r--r--fs/crypto/keysetup.c63
-rw-r--r--fs/crypto/keysetup_v1.c4
-rw-r--r--fs/dax.c6
-rw-r--r--fs/dcache.c12
-rw-r--r--fs/debugfs/inode.c6
-rw-r--r--fs/devpts/inode.c4
-rw-r--r--fs/dlm/Kconfig1
-rw-r--r--fs/dlm/config.c3
-rw-r--r--fs/dlm/lowcomms.c7
-rw-r--r--fs/ecryptfs/inode.c16
-rw-r--r--fs/efivarfs/internal.h1
-rw-r--r--fs/efivarfs/super.c206
-rw-r--r--fs/erofs/Kconfig16
-rw-r--r--fs/erofs/Makefile1
-rw-r--r--fs/erofs/compress.h10
-rw-r--r--fs/erofs/data.c5
-rw-r--r--fs/erofs/decompressor_crypto.c181
-rw-r--r--fs/erofs/decompressor_deflate.c20
-rw-r--r--fs/erofs/erofs_fs.h8
-rw-r--r--fs/erofs/fileio.c11
-rw-r--r--fs/erofs/fscache.c6
-rw-r--r--fs/erofs/internal.h3
-rw-r--r--fs/erofs/super.c66
-rw-r--r--fs/erofs/sysfs.c67
-rw-r--r--fs/erofs/zdata.c111
-rw-r--r--fs/erofs/zmap.c5
-rw-r--r--fs/eventpoll.c13
-rw-r--r--fs/exec.c129
-rw-r--r--fs/exfat/nls.c1
-rw-r--r--fs/exfat/super.c30
-rw-r--r--fs/exportfs/expfs.c6
-rw-r--r--fs/ext2/super.c3
-rw-r--r--fs/ext4/bitmap.c8
-rw-r--r--fs/ext4/block_validity.c5
-rw-r--r--fs/ext4/ext4.h91
-rw-r--r--fs/ext4/ext4_jbd2.c3
-rw-r--r--fs/ext4/ext4_jbd2.h4
-rw-r--r--fs/ext4/extents.c177
-rw-r--r--fs/ext4/extents_status.c35
-rw-r--r--fs/ext4/fast_commit.c460
-rw-r--r--fs/ext4/file.c14
-rw-r--r--fs/ext4/ialloc.c11
-rw-r--r--fs/ext4/inline.c3
-rw-r--r--fs/ext4/inode.c585
-rw-r--r--fs/ext4/ioctl.c16
-rw-r--r--fs/ext4/mballoc.c21
-rw-r--r--fs/ext4/mmp.c2
-rw-r--r--fs/ext4/move_extent.c11
-rw-r--r--fs/ext4/namei.c12
-rw-r--r--fs/ext4/orphan.c13
-rw-r--r--fs/ext4/readpage.c28
-rw-r--r--fs/ext4/resize.c2
-rw-r--r--fs/ext4/super.c84
-rw-r--r--fs/ext4/xattr.c10
-rw-r--r--fs/f2fs/acl.c33
-rw-r--r--fs/f2fs/acl.h10
-rw-r--r--fs/f2fs/checkpoint.c242
-rw-r--r--fs/f2fs/compress.c166
-rw-r--r--fs/f2fs/data.c248
-rw-r--r--fs/f2fs/dir.c243
-rw-r--r--fs/f2fs/extent_cache.c10
-rw-r--r--fs/f2fs/f2fs.h307
-rw-r--r--fs/f2fs/file.c216
-rw-r--r--fs/f2fs/gc.c149
-rw-r--r--fs/f2fs/inline.c310
-rw-r--r--fs/f2fs/inode.c117
-rw-r--r--fs/f2fs/namei.c131
-rw-r--r--fs/f2fs/node.c610
-rw-r--r--fs/f2fs/node.h12
-rw-r--r--fs/f2fs/recovery.c178
-rw-r--r--fs/f2fs/segment.c219
-rw-r--r--fs/f2fs/segment.h132
-rw-r--r--fs/f2fs/shrinker.c13
-rw-r--r--fs/f2fs/super.c168
-rw-r--r--fs/f2fs/sysfs.c41
-rw-r--r--fs/f2fs/xattr.c116
-rw-r--r--fs/f2fs/xattr.h24
-rw-r--r--fs/file.c2
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/filesystems.c14
-rw-r--r--fs/fs_context.c6
-rw-r--r--fs/fs_parser.c55
-rw-r--r--fs/fuse/dev.c182
-rw-r--r--fs/fuse/dev_uring.c34
-rw-r--r--fs/fuse/dir.c51
-rw-r--r--fs/fuse/file.c474
-rw-r--r--fs/fuse/fuse_dev_i.h9
-rw-r--r--fs/fuse/fuse_i.h10
-rw-r--r--fs/fuse/inode.c11
-rw-r--r--fs/fuse/readdir.c40
-rw-r--r--fs/fuse/virtio_fs.c3
-rw-r--r--fs/gfs2/Kconfig1
-rw-r--r--fs/gfs2/aops.c86
-rw-r--r--fs/gfs2/aops.h3
-rw-r--r--fs/gfs2/bmap.c9
-rw-r--r--fs/gfs2/glock.c3
-rw-r--r--fs/gfs2/glops.c9
-rw-r--r--fs/gfs2/incore.h9
-rw-r--r--fs/gfs2/inode.c99
-rw-r--r--fs/gfs2/inode.h1
-rw-r--r--fs/gfs2/lock_dlm.c11
-rw-r--r--fs/gfs2/log.c7
-rw-r--r--fs/gfs2/log.h11
-rw-r--r--fs/gfs2/lops.c17
-rw-r--r--fs/gfs2/lops.h2
-rw-r--r--fs/gfs2/meta_io.c2
-rw-r--r--fs/gfs2/meta_io.h4
-rw-r--r--fs/gfs2/ops_fstype.c69
-rw-r--r--fs/gfs2/recovery.c28
-rw-r--r--fs/gfs2/recovery.h2
-rw-r--r--fs/gfs2/super.c118
-rw-r--r--fs/gfs2/sys.c5
-rw-r--r--fs/gfs2/trans.c21
-rw-r--r--fs/gfs2/trans.h2
-rw-r--r--fs/gfs2/util.c2
-rw-r--r--fs/gfs2/xattr.c11
-rw-r--r--fs/gfs2/xattr.h2
-rw-r--r--fs/hfs/bnode.c6
-rw-r--r--fs/hfsplus/bnode.c6
-rw-r--r--fs/hfsplus/wrapper.c46
-rw-r--r--fs/internal.h7
-rw-r--r--fs/ioctl.c15
-rw-r--r--fs/iomap/buffered-io.c104
-rw-r--r--fs/iomap/trace.h27
-rw-r--r--fs/isofs/export.c2
-rw-r--r--fs/isofs/inode.c7
-rw-r--r--fs/isofs/isofs.h4
-rw-r--r--fs/isofs/rock.c40
-rw-r--r--fs/isofs/rock.h6
-rw-r--r--fs/isofs/util.c49
-rw-r--r--fs/jbd2/commit.c6
-rw-r--r--fs/jbd2/journal.c23
-rw-r--r--fs/jbd2/recovery.c10
-rw-r--r--fs/jbd2/revoke.c15
-rw-r--r--fs/jbd2/transaction.c5
-rw-r--r--fs/jfs/jfs_discard.c3
-rw-r--r--fs/jfs/jfs_dmap.c6
-rw-r--r--fs/jfs/jfs_dtree.c18
-rw-r--r--fs/jfs/jfs_metapage.c106
-rw-r--r--fs/kernfs/dir.c33
-rw-r--r--fs/kernfs/file.c3
-rw-r--r--fs/kernfs/kernfs-internal.h16
-rw-r--r--fs/kernfs/mount.c17
-rw-r--r--fs/libfs.c13
-rw-r--r--fs/mount.h5
-rw-r--r--fs/mpage.c13
-rw-r--r--fs/namei.c324
-rw-r--r--fs/namespace.c198
-rw-r--r--fs/netfs/buffered_read.c56
-rw-r--r--fs/netfs/buffered_write.c5
-rw-r--r--fs/netfs/direct_read.c16
-rw-r--r--fs/netfs/direct_write.c12
-rw-r--r--fs/netfs/fscache_cache.c2
-rw-r--r--fs/netfs/fscache_cookie.c2
-rw-r--r--fs/netfs/fscache_io.c10
-rw-r--r--fs/netfs/internal.h42
-rw-r--r--fs/netfs/main.c5
-rw-r--r--fs/netfs/misc.c219
-rw-r--r--fs/netfs/objects.c48
-rw-r--r--fs/netfs/read_collect.c199
-rw-r--r--fs/netfs/read_pgpriv2.c4
-rw-r--r--fs/netfs/read_retry.c26
-rw-r--r--fs/netfs/read_single.c6
-rw-r--r--fs/netfs/write_collect.c83
-rw-r--r--fs/netfs/write_issue.c38
-rw-r--r--fs/netfs/write_retry.c19
-rw-r--r--fs/nfs/Kconfig2
-rw-r--r--fs/nfs/client.c15
-rw-r--r--fs/nfs/delegation.c25
-rw-r--r--fs/nfs/dir.c15
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/filelayout/filelayoutdev.c6
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c8
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c8
-rw-r--r--fs/nfs/fscache.c1
-rw-r--r--fs/nfs/inode.c51
-rw-r--r--fs/nfs/internal.h8
-rw-r--r--fs/nfs/localio.c51
-rw-r--r--fs/nfs/namespace.c1
-rw-r--r--fs/nfs/netns.h6
-rw-r--r--fs/nfs/nfs3acl.c2
-rw-r--r--fs/nfs/nfs42.h1
-rw-r--r--fs/nfs/nfs42proc.c29
-rw-r--r--fs/nfs/nfs42xdr.c64
-rw-r--r--fs/nfs/nfs4_fs.h3
-rw-r--r--fs/nfs/nfs4file.c10
-rw-r--r--fs/nfs/nfs4proc.c93
-rw-r--r--fs/nfs/nfs4session.h4
-rw-r--r--fs/nfs/nfs4trace.h34
-rw-r--r--fs/nfs/nfs4xdr.c1
-rw-r--r--fs/nfs/pnfs.c51
-rw-r--r--fs/nfs/pnfs.h4
-rw-r--r--fs/nfs/pnfs_nfs.c43
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/super.c19
-rw-r--r--fs/nfs/symlink.c20
-rw-r--r--fs/nfs/sysfs.c28
-rw-r--r--fs/nfs/unlink.c11
-rw-r--r--fs/nfs/write.c54
-rw-r--r--fs/nfs_common/nfslocalio.c99
-rw-r--r--fs/nfsd/Kconfig3
-rw-r--r--fs/nfsd/Makefile1
-rw-r--r--fs/nfsd/debugfs.c47
-rw-r--r--fs/nfsd/export.c3
-rw-r--r--fs/nfsd/filecache.c32
-rw-r--r--fs/nfsd/filecache.h3
-rw-r--r--fs/nfsd/localio.c70
-rw-r--r--fs/nfsd/nfs3proc.c72
-rw-r--r--fs/nfsd/nfs3xdr.c4
-rw-r--r--fs/nfsd/nfs4callback.c132
-rw-r--r--fs/nfsd/nfs4proc.c39
-rw-r--r--fs/nfsd/nfs4recover.c74
-rw-r--r--fs/nfsd/nfs4state.c42
-rw-r--r--fs/nfsd/nfs4xdr.c25
-rw-r--r--fs/nfsd/nfsctl.c25
-rw-r--r--fs/nfsd/nfsd.h34
-rw-r--r--fs/nfsd/nfsfh.h7
-rw-r--r--fs/nfsd/nfsproc.c53
-rw-r--r--fs/nfsd/nfssvc.c8
-rw-r--r--fs/nfsd/nfsxdr.c4
-rw-r--r--fs/nfsd/state.h23
-rw-r--r--fs/nfsd/trace.h302
-rw-r--r--fs/nfsd/vfs.c107
-rw-r--r--fs/nfsd/vfs.h10
-rw-r--r--fs/nfsd/xdr4.h4
-rw-r--r--fs/nfsd/xdr4cb.h5
-rw-r--r--fs/nilfs2/btree.c4
-rw-r--r--fs/nilfs2/direct.c3
-rw-r--r--fs/nilfs2/mdt.c2
-rw-r--r--fs/nilfs2/segment.c16
-rw-r--r--fs/nilfs2/segment.h1
-rw-r--r--fs/nilfs2/the_nilfs.c3
-rw-r--r--fs/notify/fanotify/fanotify.c3
-rw-r--r--fs/notify/fanotify/fanotify.h9
-rw-r--r--fs/notify/fanotify/fanotify_user.c57
-rw-r--r--fs/ntfs3/attrib.c72
-rw-r--r--fs/ntfs3/file.c118
-rw-r--r--fs/ntfs3/frecord.c74
-rw-r--r--fs/ntfs3/fslog.c32
-rw-r--r--fs/ntfs3/index.c8
-rw-r--r--fs/ntfs3/inode.c5
-rw-r--r--fs/ntfs3/namei.c2
-rw-r--r--fs/ntfs3/ntfs_fs.h5
-rw-r--r--fs/ocfs2/alloc.c1
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/filecheck.c2
-rw-r--r--fs/ocfs2/journal.c82
-rw-r--r--fs/ocfs2/journal.h1
-rw-r--r--fs/ocfs2/ocfs2.h17
-rw-r--r--fs/ocfs2/quota_local.c11
-rw-r--r--fs/ocfs2/stackglue.c3
-rw-r--r--fs/ocfs2/suballoc.c38
-rw-r--r--fs/ocfs2/suballoc.h1
-rw-r--r--fs/ocfs2/super.c3
-rw-r--r--fs/omfs/inode.c176
-rw-r--r--fs/open.c14
-rw-r--r--fs/orangefs/inode.c9
-rw-r--r--fs/orangefs/orangefs-kernel.h8
-rw-r--r--fs/orangefs/orangefs-mod.c3
-rw-r--r--fs/orangefs/super.c189
-rw-r--r--fs/overlayfs/export.c6
-rw-r--r--fs/overlayfs/namei.c14
-rw-r--r--fs/overlayfs/overlayfs.h4
-rw-r--r--fs/overlayfs/readdir.c21
-rw-r--r--fs/overlayfs/super.c5
-rw-r--r--fs/pidfs.c165
-rw-r--r--fs/pipe.c3
-rw-r--r--fs/pnode.c17
-rw-r--r--fs/pnode.h2
-rw-r--r--fs/proc/base.c14
-rw-r--r--fs/proc/meminfo.c3
-rw-r--r--fs/proc/page.c161
-rw-r--r--fs/proc/task_mmu.c29
-rw-r--r--fs/proc/task_nommu.c4
-rw-r--r--fs/proc_namespace.c12
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/read_write.c4
-rw-r--r--fs/readdir.c47
-rw-r--r--fs/resctrl/Kconfig39
-rw-r--r--fs/resctrl/Makefile6
-rw-r--r--fs/resctrl/ctrlmondata.c661
-rw-r--r--fs/resctrl/internal.h426
-rw-r--r--fs/resctrl/monitor.c929
-rw-r--r--fs/resctrl/monitor_trace.h33
-rw-r--r--fs/resctrl/pseudo_lock.c1105
-rw-r--r--fs/resctrl/rdtgroup.c4353
-rw-r--r--fs/select.c4
-rw-r--r--fs/smb/client/cached_dir.c39
-rw-r--r--fs/smb/client/cifsencrypt.c16
-rw-r--r--fs/smb/client/cifsfs.c4
-rw-r--r--fs/smb/client/cifsglob.h24
-rw-r--r--fs/smb/client/cifspdu.h13
-rw-r--r--fs/smb/client/cifsproto.h9
-rw-r--r--fs/smb/client/cifssmb.c82
-rw-r--r--fs/smb/client/connect.c69
-rw-r--r--fs/smb/client/dir.c23
-rw-r--r--fs/smb/client/file.c46
-rw-r--r--fs/smb/client/fs_context.c25
-rw-r--r--fs/smb/client/fs_context.h47
-rw-r--r--fs/smb/client/inode.c25
-rw-r--r--fs/smb/client/misc.c8
-rw-r--r--fs/smb/client/namespace.c4
-rw-r--r--fs/smb/client/readdir.c10
-rw-r--r--fs/smb/client/reparse.c63
-rw-r--r--fs/smb/client/reparse.h5
-rw-r--r--fs/smb/client/sess.c85
-rw-r--r--fs/smb/client/smb1ops.c276
-rw-r--r--fs/smb/client/smb2inode.c2
-rw-r--r--fs/smb/client/smb2ops.c21
-rw-r--r--fs/smb/client/smb2pdu.c104
-rw-r--r--fs/smb/client/smb2proto.h3
-rw-r--r--fs/smb/common/smb2pdu.h6
-rw-r--r--fs/smb/server/Kconfig1
-rw-r--r--fs/smb/server/auth.c48
-rw-r--r--fs/smb/server/auth.h2
-rw-r--r--fs/smb/server/connection.c4
-rw-r--r--fs/smb/server/connection.h1
-rw-r--r--fs/smb/server/crypto_ctx.c8
-rw-r--r--fs/smb/server/crypto_ctx.h4
-rw-r--r--fs/smb/server/mgmt/user_session.c20
-rw-r--r--fs/smb/server/mgmt/user_session.h1
-rw-r--r--fs/smb/server/oplock.c41
-rw-r--r--fs/smb/server/oplock.h1
-rw-r--r--fs/smb/server/server.c1
-rw-r--r--fs/smb/server/smb2pdu.c100
-rw-r--r--fs/smb/server/smb2pdu.h3
-rw-r--r--fs/smb/server/smb_common.h2
-rw-r--r--fs/smb/server/transport_ipc.c7
-rw-r--r--fs/smb/server/transport_tcp.c14
-rw-r--r--fs/smb/server/transport_tcp.h1
-rw-r--r--fs/smb/server/vfs.c32
-rw-r--r--fs/smb/server/vfs_cache.c41
-rw-r--r--fs/splice.c2
-rw-r--r--fs/squashfs/Kconfig21
-rw-r--r--fs/squashfs/block.c28
-rw-r--r--fs/squashfs/super.c5
-rw-r--r--fs/stat.c73
-rw-r--r--fs/super.c327
-rw-r--r--fs/sysfs/group.c6
-rw-r--r--fs/tracefs/inode.c2
-rw-r--r--fs/ubifs/compress.c247
-rw-r--r--fs/udf/truncate.c2
-rw-r--r--fs/ufs/super.c307
-rw-r--r--fs/ufs/ufs.h9
-rw-r--r--fs/userfaultfd.c28
-rw-r--r--fs/vboxsf/file.c47
-rw-r--r--fs/xattr.c28
-rw-r--r--fs/xfs/Kconfig2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c5
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h6
-rw-r--r--fs/xfs/libxfs/xfs_log_rlimit.c4
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c343
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.h25
-rw-r--r--fs/xfs/scrub/fscounters.c4
-rw-r--r--fs/xfs/scrub/orphanage.c7
-rw-r--r--fs/xfs/scrub/scrub.c2
-rw-r--r--fs/xfs/xfs_aops.c22
-rw-r--r--fs/xfs/xfs_bio_io.c30
-rw-r--r--fs/xfs/xfs_bmap_item.c10
-rw-r--r--fs/xfs/xfs_bmap_item.h3
-rw-r--r--fs/xfs/xfs_buf.c123
-rw-r--r--fs/xfs/xfs_buf.h4
-rw-r--r--fs/xfs/xfs_buf_item.c19
-rw-r--r--fs/xfs/xfs_buf_item.h3
-rw-r--r--fs/xfs/xfs_buf_mem.c2
-rw-r--r--fs/xfs/xfs_discard.c17
-rw-r--r--fs/xfs/xfs_dquot.c3
-rw-r--r--fs/xfs/xfs_extfree_item.c10
-rw-r--r--fs/xfs/xfs_extfree_item.h3
-rw-r--r--fs/xfs/xfs_file.c87
-rw-r--r--fs/xfs/xfs_filestream.c15
-rw-r--r--fs/xfs/xfs_fsmap.c51
-rw-r--r--fs/xfs/xfs_globals.c2
-rw-r--r--fs/xfs/xfs_inode.h14
-rw-r--r--fs/xfs/xfs_inode_item.c6
-rw-r--r--fs/xfs/xfs_iomap.c190
-rw-r--r--fs/xfs/xfs_iomap.h1
-rw-r--r--fs/xfs/xfs_iops.c76
-rw-r--r--fs/xfs/xfs_iops.h3
-rw-r--r--fs/xfs/xfs_log.c34
-rw-r--r--fs/xfs/xfs_log_cil.c4
-rw-r--r--fs/xfs/xfs_log_priv.h13
-rw-r--r--fs/xfs/xfs_message.c16
-rw-r--r--fs/xfs/xfs_message.h4
-rw-r--r--fs/xfs/xfs_mount.c161
-rw-r--r--fs/xfs/xfs_mount.h28
-rw-r--r--fs/xfs/xfs_mru_cache.c15
-rw-r--r--fs/xfs/xfs_notify_failure.c6
-rw-r--r--fs/xfs/xfs_pnfs.c2
-rw-r--r--fs/xfs/xfs_refcount_item.c10
-rw-r--r--fs/xfs/xfs_refcount_item.h3
-rw-r--r--fs/xfs/xfs_reflink.c146
-rw-r--r--fs/xfs/xfs_reflink.h6
-rw-r--r--fs/xfs/xfs_rmap_item.c10
-rw-r--r--fs/xfs/xfs_rmap_item.h3
-rw-r--r--fs/xfs/xfs_super.c136
-rw-r--r--fs/xfs/xfs_sysctl.h2
-rw-r--r--fs/xfs/xfs_sysfs.c32
-rw-r--r--fs/xfs/xfs_trace.h115
-rw-r--r--fs/xfs/xfs_trans_ail.c39
-rw-r--r--fs/xfs/xfs_trans_priv.h28
-rw-r--r--fs/xfs/xfs_zone_alloc.c116
-rw-r--r--fs/xfs/xfs_zone_gc.c29
-rw-r--r--fs/zonefs/super.c34
-rw-r--r--include/acpi/acbuffer.h2
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acexcep.h2
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acoutput.h2
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/acpi/acrestyp.h2
-rw-r--r--include/acpi/actbl.h8
-rw-r--r--include/acpi/actbl1.h47
-rw-r--r--include/acpi/actbl2.h335
-rw-r--r--include/acpi/actbl3.h4
-rw-r--r--include/acpi/actypes.h10
-rw-r--r--include/acpi/acuuid.h2
-rw-r--r--include/acpi/cppc_acpi.h30
-rw-r--r--include/acpi/platform/acenv.h2
-rw-r--r--include/acpi/platform/acenvex.h2
-rw-r--r--include/acpi/platform/acgcc.h10
-rw-r--r--include/acpi/platform/acgccex.h2
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/acpi/platform/aclinuxex.h2
-rw-r--r--include/acpi/platform/aczephyr.h2
-rw-r--r--include/asm-generic/hugetlb.h5
-rw-r--r--include/asm-generic/memory_model.h10
-rw-r--r--include/asm-generic/mshyperv.h10
-rw-r--r--include/asm-generic/pgalloc.h11
-rw-r--r--include/asm-generic/simd.h9
-rw-r--r--include/asm-generic/syscall.h30
-rw-r--r--include/asm-generic/tlb.h46
-rw-r--r--include/asm-generic/vmlinux.lds.h7
-rw-r--r--include/crypto/acompress.h109
-rw-r--r--include/crypto/algapi.h37
-rw-r--r--include/crypto/blake2b.h31
-rw-r--r--include/crypto/chacha.h89
-rw-r--r--include/crypto/ctr.h50
-rw-r--r--include/crypto/ghash.h4
-rw-r--r--include/crypto/hash.h176
-rw-r--r--include/crypto/internal/acompress.h128
-rw-r--r--include/crypto/internal/blake2b.h92
-rw-r--r--include/crypto/internal/blockhash.h52
-rw-r--r--include/crypto/internal/chacha.h43
-rw-r--r--include/crypto/internal/engine.h5
-rw-r--r--include/crypto/internal/geniv.h1
-rw-r--r--include/crypto/internal/hash.h117
-rw-r--r--include/crypto/internal/poly1305.h28
-rw-r--r--include/crypto/internal/scompress.h17
-rw-r--r--include/crypto/internal/sha2.h66
-rw-r--r--include/crypto/internal/simd.h10
-rw-r--r--include/crypto/internal/skcipher.h49
-rw-r--r--include/crypto/krb5.h5
-rw-r--r--include/crypto/md5.h3
-rw-r--r--include/crypto/null.h3
-rw-r--r--include/crypto/poly1305.h67
-rw-r--r--include/crypto/polyval.h8
-rw-r--r--include/crypto/rng.h8
-rw-r--r--include/crypto/scatterwalk.h65
-rw-r--r--include/crypto/sha1.h9
-rw-r--r--include/crypto/sha1_base.h81
-rw-r--r--include/crypto/sha2.h62
-rw-r--r--include/crypto/sha256_base.h135
-rw-r--r--include/crypto/sha3.h20
-rw-r--r--include/crypto/sha512_base.h88
-rw-r--r--include/crypto/sig.h2
-rw-r--r--include/crypto/sm3.h4
-rw-r--r--include/crypto/sm3_base.h92
-rw-r--r--include/crypto/streebog.h5
-rw-r--r--include/cxl/features.h7
-rw-r--r--include/drm/Makefile2
-rw-r--r--include/drm/bridge/analogix_dp.h7
-rw-r--r--include/drm/display/drm_dp.h1
-rw-r--r--include/drm/display/drm_dp_helper.h101
-rw-r--r--include/drm/display/drm_hdmi_helper.h6
-rw-r--r--include/drm/drm_atomic.h3
-rw-r--r--include/drm/drm_bridge.h195
-rw-r--r--include/drm/drm_bridge_helper.h12
-rw-r--r--include/drm/drm_device.h41
-rw-r--r--include/drm/drm_drv.h5
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_file.h3
-rw-r--r--include/drm/drm_gem.h18
-rw-r--r--include/drm/drm_gem_shmem_helper.h45
-rw-r--r--include/drm/drm_gpusvm.h47
-rw-r--r--include/drm/drm_kunit_helpers.h11
-rw-r--r--include/drm/drm_mipi_dsi.h23
-rw-r--r--include/drm/drm_mode_config.h10
-rw-r--r--include/drm/drm_panel.h49
-rw-r--r--include/drm/drm_panic.h12
-rw-r--r--include/drm/drm_plane.h17
-rw-r--r--include/drm/drm_print.h20
-rw-r--r--include/drm/drm_probe_helper.h2
-rw-r--r--include/drm/gpu_scheduler.h112
-rw-r--r--include/drm/intel/intel-gtt.h2
-rw-r--r--include/drm/intel/pciids.h5
-rw-r--r--include/drm/ttm/ttm_backup.h18
-rw-r--r--include/drm/ttm/ttm_bo.h2
-rw-r--r--include/drm/ttm/ttm_tt.h2
-rw-r--r--include/dt-bindings/arm/qcom,ids.h1
-rw-r--r--include/dt-bindings/clock/qcom,sm6350-videocc.h27
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g047-cpg.h3
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g056-cpg.h24
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g057-cpg.h4
-rw-r--r--include/dt-bindings/clock/rk3036-cru.h1
-rw-r--r--include/dt-bindings/clock/rockchip,rk3528-cru.h6
-rw-r--r--include/dt-bindings/clock/rockchip,rk3576-cru.h10
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov920.h51
-rw-r--r--include/dt-bindings/clock/sophgo,sg2044-clk.h153
-rw-r--r--include/dt-bindings/clock/sophgo,sg2044-pll.h27
-rw-r--r--include/dt-bindings/clock/spacemit,k1-syscon.h247
-rw-r--r--include/dt-bindings/clock/stm32h7-clks.h4
-rw-r--r--include/dt-bindings/clock/thead,th1520-clk-ap.h34
-rw-r--r--include/dt-bindings/memory/mediatek,mt6893-memory-port.h288
-rw-r--r--include/dt-bindings/power/mediatek,mt6893-power.h35
-rw-r--r--include/dt-bindings/power/rockchip,rk3562-power.h35
-rw-r--r--include/dt-bindings/reset/sun50i-h616-ccu.h1
-rw-r--r--include/dt-bindings/reset/thead,th1520-reset.h16
-rw-r--r--include/dt-bindings/sound/cs48l32.h20
-rw-r--r--include/hyperv/hvgdk_mini.h6
-rw-r--r--include/keys/rxrpc-type.h17
-rw-r--r--include/kunit/clk.h1
-rw-r--r--include/kunit/test.h2
-rw-r--r--include/kvm/arm_vgic.h3
-rw-r--r--include/linux/acpi.h23
-rw-r--r--include/linux/adreno-smmu-priv.h6
-rw-r--r--include/linux/alloc_tag.h12
-rw-r--r--include/linux/arch_topology.h8
-rw-r--r--include/linux/arm-smccc.h64
-rw-r--r--include/linux/arm_sdei.h4
-rw-r--r--include/linux/auxiliary_bus.h17
-rw-r--r--include/linux/backing-dev.h1
-rw-r--r--include/linux/backlight.h32
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/bio.h26
-rw-r--r--include/linux/bitfield.h21
-rw-r--r--include/linux/bitmap-str.h10
-rw-r--r--include/linux/bitops.h1
-rw-r--r--include/linux/bits.h57
-rw-r--r--include/linux/blk-mq.h10
-rw-r--r--include/linux/blk_types.h10
-rw-r--r--include/linux/blkdev.h109
-rw-r--r--include/linux/bpf-cgroup.h17
-rw-r--r--include/linux/bpf.h20
-rw-r--r--include/linux/bpf_verifier.h24
-rw-r--r--include/linux/btf.h1
-rw-r--r--include/linux/buffer_head.h9
-rw-r--r--include/linux/build_bug.h10
-rw-r--r--include/linux/bus/stm32_firewall_device.h15
-rw-r--r--include/linux/can/dev.h28
-rw-r--r--include/linux/ceph/osd_client.h6
-rw-r--r--include/linux/cgroup-defs.h101
-rw-r--r--include/linux/cgroup.h52
-rw-r--r--include/linux/cleanup.h19
-rw-r--r--include/linux/codetag.h8
-rw-r--r--include/linux/compiler-version.h30
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/compiler_types.h13
-rw-r--r--include/linux/configfs.h8
-rw-r--r--include/linux/coredump.h1
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/cpufreq.h105
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpumask.h94
-rw-r--r--include/linux/cpuset.h9
-rw-r--r--include/linux/crash_core.h7
-rw-r--r--include/linux/crash_dump.h2
-rw-r--r--include/linux/crc16.h9
-rw-r--r--include/linux/crc32.h28
-rw-r--r--include/linux/crypto.h85
-rw-r--r--include/linux/damon.h6
-rw-r--r--include/linux/dcache.h110
-rw-r--r--include/linux/dccp.h289
-rw-r--r--include/linux/device-mapper.h9
-rw-r--r--include/linux/device.h38
-rw-r--r--include/linux/device/devres.h41
-rw-r--r--include/linux/device_cgroup.h7
-rw-r--r--include/linux/dma-buf.h31
-rw-r--r--include/linux/dma-fence-unwrap.h2
-rw-r--r--include/linux/dma-fence.h25
-rw-r--r--include/linux/dma-map-ops.h54
-rw-r--r--include/linux/dma-mapping.h97
-rw-r--r--include/linux/dmapool.h21
-rw-r--r--include/linux/edac.h7
-rw-r--r--include/linux/energy_model.h2
-rw-r--r--include/linux/entry-common.h43
-rw-r--r--include/linux/ethtool.h98
-rw-r--r--include/linux/execmem.h11
-rw-r--r--include/linux/exportfs.h10
-rw-r--r--include/linux/f2fs_fs.h1
-rw-r--r--include/linux/fanotify.h5
-rw-r--r--include/linux/fb.h12
-rw-r--r--include/linux/file.h2
-rw-r--r--include/linux/file_ref.h19
-rw-r--r--include/linux/find.h25
-rw-r--r--include/linux/firmware/cirrus/cs_dsp_test_utils.h1
-rw-r--r--include/linux/firmware/imx/sm.h19
-rw-r--r--include/linux/firmware/samsung/exynos-acpm-protocol.h6
-rw-r--r--include/linux/folio_queue.h42
-rw-r--r--include/linux/fs.h72
-rw-r--r--include/linux/fs_parser.h7
-rw-r--r--include/linux/fscache.h5
-rw-r--r--include/linux/fsl/ntmp.h121
-rw-r--r--include/linux/fsnotify_backend.h16
-rw-r--r--include/linux/ftrace.h11
-rw-r--r--include/linux/futex.h36
-rw-r--r--include/linux/fwnode.h5
-rw-r--r--include/linux/gfp.h8
-rw-r--r--include/linux/gpio/consumer.h12
-rw-r--r--include/linux/gpio/driver.h5
-rw-r--r--include/linux/habanalabs/hl_boot_if.h2
-rw-r--r--include/linux/hid.h9
-rw-r--r--include/linux/highmem-internal.h13
-rw-r--r--include/linux/highmem.h37
-rw-r--r--include/linux/hmm-dma.h33
-rw-r--r--include/linux/hmm.h24
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/huge_mm.h17
-rw-r--r--include/linux/hugetlb.h20
-rw-r--r--include/linux/hung_task.h99
-rw-r--r--include/linux/hyperv.h15
-rw-r--r--include/linux/i2c-atr.h73
-rw-r--r--include/linux/i2c-smbus.h6
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/ieee80211.h80
-rw-r--r--include/linux/ima.h3
-rw-r--r--include/linux/inet.h2
-rw-r--r--include/linux/intel_vsec.h5
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/io-pgtable.h8
-rw-r--r--include/linux/io.h21
-rw-r--r--include/linux/io_uring/cmd.h9
-rw-r--r--include/linux/io_uring_types.h15
-rw-r--r--include/linux/iomap.h5
-rw-r--r--include/linux/iommu.h77
-rw-r--r--include/linux/ipmi.h13
-rw-r--r--include/linux/irq.h28
-rw-r--r--include/linux/irqchip/irq-davinci-aintc.h27
-rw-r--r--include/linux/irqchip/irq-msi-lib.h (renamed from drivers/irqchip/irq-msi-lib.h)6
-rw-r--r--include/linux/irqdomain.h499
-rw-r--r--include/linux/jbd2.h5
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/kernel.h14
-rw-r--r--include/linux/kexec.h48
-rw-r--r--include/linux/kexec_handover.h109
-rw-r--r--include/linux/khugepaged.h8
-rw-r--r--include/linux/kvm_dirty_ring.h11
-rw-r--r--include/linux/kvm_host.h35
-rw-r--r--include/linux/lcd.h21
-rw-r--r--include/linux/led-class-flash.h16
-rw-r--r--include/linux/leds.h6
-rw-r--r--include/linux/libata.h19
-rw-r--r--include/linux/list.h8
-rw-r--r--include/linux/livepatch_sched.h14
-rw-r--r--include/linux/llist.h23
-rw-r--r--include/linux/local_lock.h58
-rw-r--r--include/linux/local_lock_internal.h207
-rw-r--r--include/linux/mailbox_controller.h3
-rw-r--r--include/linux/maple_tree.h4
-rw-r--r--include/linux/mdio.h5
-rw-r--r--include/linux/memblock.h41
-rw-r--r--include/linux/memcontrol.h65
-rw-r--r--include/linux/memory.h10
-rw-r--r--include/linux/mempolicy.h4
-rw-r--r--include/linux/mfd/aat2870.h3
-rw-r--r--include/linux/mfd/bcm590xx.h28
-rw-r--r--include/linux/mfd/max14577-private.h2
-rw-r--r--include/linux/mfd/max14577.h2
-rw-r--r--include/linux/mfd/max77686-private.h2
-rw-r--r--include/linux/mfd/max77686.h2
-rw-r--r--include/linux/mfd/max77693-private.h2
-rw-r--r--include/linux/mfd/max77693.h2
-rw-r--r--include/linux/mfd/max77759.h165
-rw-r--r--include/linux/mfd/max8997-private.h2
-rw-r--r--include/linux/mfd/max8997.h2
-rw-r--r--include/linux/mfd/max8998-private.h2
-rw-r--r--include/linux/mfd/max8998.h2
-rw-r--r--include/linux/mfd/rohm-bd96801.h2
-rw-r--r--include/linux/mfd/rohm-bd96802.h74
-rw-r--r--include/linux/mfd/rohm-generic.h3
-rw-r--r--include/linux/mfd/samsung/core.h7
-rw-r--r--include/linux/mfd/samsung/irq.h103
-rw-r--r--include/linux/mfd/samsung/rtc.h37
-rw-r--r--include/linux/mfd/samsung/s2mpg10.h454
-rw-r--r--include/linux/mfd/stm32-lptimer.h37
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/misc_cgroup.h4
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mm.h440
-rw-r--r--include/linux/mm_inline.h2
-rw-r--r--include/linux/mm_types.h49
-rw-r--r--include/linux/mman.h2
-rw-r--r--include/linux/mmap_lock.h231
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/mmc/slot-gpio.h4
-rw-r--r--include/linux/mmu_notifier.h3
-rw-r--r--include/linux/mmzone.h110
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h7
-rw-r--r--include/linux/mount.h90
-rw-r--r--include/linux/mroute_base.h5
-rw-r--r--include/linux/msi.h23
-rw-r--r--include/linux/mtd/nand-qpic-common.h4
-rw-r--r--include/linux/mtd/partitions.h2
-rw-r--r--include/linux/mtd/spinand.h123
-rw-r--r--include/linux/mutex.h32
-rw-r--r--include/linux/namei.h18
-rw-r--r--include/linux/net.h19
-rw-r--r--include/linux/net/intel/iidc.h109
-rw-r--r--include/linux/net/intel/iidc_rdma.h68
-rw-r--r--include/linux/net/intel/iidc_rdma_ice.h70
-rw-r--r--include/linux/net_tstamp.h7
-rw-r--r--include/linux/netdevice.h52
-rw-r--r--include/linux/netdevice_xmit.h6
-rw-r--r--include/linux/netfilter.h15
-rw-r--r--include/linux/netfs.h45
-rw-r--r--include/linux/netlink.h3
-rw-r--r--include/linux/nfs.h7
-rw-r--r--include/linux/nfs4.h2
-rw-r--r--include/linux/nfs_fs_sb.h14
-rw-r--r--include/linux/nfslocalio.h26
-rw-r--r--include/linux/nodemask.h20
-rw-r--r--include/linux/numa_memblks.h1
-rw-r--r--include/linux/nvme.h77
-rw-r--r--include/linux/of_reserved_mem.h26
-rw-r--r--include/linux/oid_registry.h1
-rw-r--r--include/linux/overflow.h44
-rw-r--r--include/linux/page-flags-layout.h4
-rw-r--r--include/linux/page-flags.h28
-rw-r--r--include/linux/page_table_check.h30
-rw-r--r--include/linux/pageblock-flags.h8
-rw-r--r--include/linux/pagemap.h102
-rw-r--r--include/linux/panic.h2
-rw-r--r--include/linux/part_stat.h2
-rw-r--r--include/linux/pci-ecam.h6
-rw-r--r--include/linux/pci-epc.h11
-rw-r--r--include/linux/pci-epf.h3
-rw-r--r--include/linux/pci-p2pdma.h85
-rw-r--r--include/linux/pci.h70
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/pds/pds_adminq.h3
-rw-r--r--include/linux/pe.h279
-rw-r--r--include/linux/percpu-defs.h2
-rw-r--r--include/linux/percpu-rwsem.h20
-rw-r--r--include/linux/percpu.h4
-rw-r--r--include/linux/perf_event.h299
-rw-r--r--include/linux/pgalloc_tag.h8
-rw-r--r--include/linux/pgtable.h128
-rw-r--r--include/linux/phy.h70
-rw-r--r--include/linux/phy_fixed.h30
-rw-r--r--include/linux/phylink.h31
-rw-r--r--include/linux/pid.h2
-rw-r--r--include/linux/pidfs.h8
-rw-r--r--include/linux/pinctrl/machine.h19
-rw-r--r--include/linux/platform_data/mlxreg.h4
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h19
-rw-r--r--include/linux/platform_data/x86/int3472.h (renamed from drivers/platform/x86/intel/int3472/common.h)73
-rw-r--r--include/linux/platform_data/x86/intel_pmc_ipc.h4
-rw-r--r--include/linux/pm_domain.h10
-rw-r--r--include/linux/pm_opp.h32
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/pm_wakeup.h15
-rw-r--r--include/linux/poison.h4
-rw-r--r--include/linux/power_supply.h4
-rw-r--r--include/linux/property.h7
-rw-r--r--include/linux/psp-sev.h3
-rw-r--r--include/linux/ptdump.h15
-rw-r--r--include/linux/ptp_clock_kernel.h18
-rw-r--r--include/linux/pwm.h10
-rw-r--r--include/linux/ratelimit.h37
-rw-r--r--include/linux/ratelimit_types.h5
-rw-r--r--include/linux/rcuref.h22
-rw-r--r--include/linux/regmap.h3
-rw-r--r--include/linux/regulator/max8952.h2
-rw-r--r--include/linux/regulator/pca9450.h5
-rw-r--r--include/linux/relay.h3
-rw-r--r--include/linux/resctrl.h38
-rw-r--r--include/linux/resctrl_types.h16
-rw-r--r--include/linux/reset.h6
-rw-r--r--include/linux/restart_block.h2
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/rio_drv.h5
-rw-r--r--include/linux/rmap.h2
-rw-r--r--include/linux/rpmsg.h22
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/scatterlist.h23
-rw-r--r--include/linux/sched.h32
-rw-r--r--include/linux/sched/task_stack.h2
-rw-r--r--include/linux/sched/topology.h6
-rw-r--r--include/linux/scmi_imx_protocol.h42
-rw-r--r--include/linux/screen_info.h9
-rw-r--r--include/linux/security.h12
-rw-r--r--include/linux/semaphore.h15
-rw-r--r--include/linux/shmem_fs.h7
-rw-r--r--include/linux/skbuff.h41
-rw-r--r--include/linux/skbuff_ref.h4
-rw-r--r--include/linux/sm501.h3
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h2
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h8
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h11
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/sony-laptop.h39
-rw-r--r--include/linux/sort.h10
-rw-r--r--include/linux/soundwire/sdw_intel.h2
-rw-r--r--include/linux/spi/sh_msiof.h125
-rw-r--r--include/linux/spi/spi.h78
-rw-r--r--include/linux/stat.h1
-rw-r--r--include/linux/stmmac.h4
-rw-r--r--include/linux/string_helpers.h1
-rw-r--r--include/linux/sunrpc/svc.h44
-rw-r--r--include/linux/sunrpc/svc_rdma.h6
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/svcsock.h4
-rw-r--r--include/linux/sunrpc/xprt.h17
-rw-r--r--include/linux/suspend.h9
-rw-r--r--include/linux/swap.h12
-rw-r--r--include/linux/sysfs.h27
-rw-r--r--include/linux/tcp.h5
-rw-r--r--include/linux/tfrc.h51
-rw-r--r--include/linux/tick.h7
-rw-r--r--include/linux/timekeeper_internal.h8
-rw-r--r--include/linux/timer.h42
-rw-r--r--include/linux/topology.h14
-rw-r--r--include/linux/tpm.h21
-rw-r--r--include/linux/tpm_svsm.h149
-rw-r--r--include/linux/tracepoint.h38
-rw-r--r--include/linux/tsm-mr.h89
-rw-r--r--include/linux/tsm.h22
-rw-r--r--include/linux/turris-signing-key.h35
-rw-r--r--include/linux/types.h4
-rw-r--r--include/linux/ubsan.h6
-rw-r--r--include/linux/udp.h19
-rw-r--r--include/linux/uio.h18
-rw-r--r--include/linux/unroll.h4
-rw-r--r--include/linux/uprobes.h6
-rw-r--r--include/linux/util_macros.h69
-rw-r--r--include/linux/vermagic.h1
-rw-r--r--include/linux/virtio.h3
-rw-r--r--include/linux/virtio_config.h2
-rw-r--r--include/linux/virtio_vsock.h1
-rw-r--r--include/linux/vm_event_item.h2
-rw-r--r--include/linux/vmalloc.h18
-rw-r--r--include/linux/workqueue.h6
-rw-r--r--include/linux/xarray.h24
-rw-r--r--include/linux/zpool.h4
-rw-r--r--include/linux/zsmalloc.h3
-rw-r--r--include/media/v4l2-common.h58
-rw-r--r--include/media/vsp1.h4
-rw-r--r--include/memory/renesas-rpc-if.h4
-rw-r--r--include/net/af_rxrpc.h54
-rw-r--r--include/net/af_vsock.h1
-rw-r--r--include/net/bluetooth/bluetooth.h4
-rw-r--r--include/net/bluetooth/hci.h8
-rw-r--r--include/net/bluetooth/hci_core.h70
-rw-r--r--include/net/bluetooth/hci_drv.h76
-rw-r--r--include/net/bluetooth/hci_mon.h2
-rw-r--r--include/net/bluetooth/hci_sync.h3
-rw-r--r--include/net/cfg80211.h44
-rw-r--r--include/net/checksum.h12
-rw-r--r--include/net/devlink.h10
-rw-r--r--include/net/dropreason-core.h10
-rw-r--r--include/net/dsa.h5
-rw-r--r--include/net/fib_rules.h2
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/inet_hashtables.h7
-rw-r--r--include/net/ip6_fib.h1
-rw-r--r--include/net/ip_fib.h3
-rw-r--r--include/net/ip_tunnels.h7
-rw-r--r--include/net/l3mdev.h27
-rw-r--r--include/net/lwtunnel.h13
-rw-r--r--include/net/mac80211.h28
-rw-r--r--include/net/mana/gdma.h47
-rw-r--r--include/net/mana/hw_channel.h9
-rw-r--r--include/net/mana/mana.h7
-rw-r--r--include/net/mptcp.h13
-rw-r--r--include/net/net_namespace.h4
-rw-r--r--include/net/netdev_lock.h50
-rw-r--r--include/net/netdev_queues.h28
-rw-r--r--include/net/netdev_rx_queue.h6
-rw-r--r--include/net/netfilter/nf_tables.h12
-rw-r--r--include/net/netfilter/nft_fib.h9
-rw-r--r--include/net/netlink.h22
-rw-r--r--include/net/netmem.h34
-rw-r--r--include/net/netns/ipv4.h11
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/nexthop.h2
-rw-r--r--include/net/p8022.h16
-rw-r--r--include/net/page_pool/helpers.h11
-rw-r--r--include/net/page_pool/types.h6
-rw-r--r--include/net/route.h3
-rw-r--r--include/net/rps.h29
-rw-r--r--include/net/rstreason.h2
-rw-r--r--include/net/sch_generic.h23
-rw-r--r--include/net/scm.h121
-rw-r--r--include/net/sctp/checksum.h29
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sctp/sm.h1
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/secure_seq.h4
-rw-r--r--include/net/sock.h86
-rw-r--r--include/net/strparser.h2
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/udp_tunnel.h15
-rw-r--r--include/net/vxlan.h5
-rw-r--r--include/net/xdp.h4
-rw-r--r--include/net/xdp_sock.h3
-rw-r--r--include/net/xfrm.h20
-rw-r--r--include/net/xsk_buff_pool.h4
-rw-r--r--include/rdma/ib_cm.h17
-rw-r--r--include/rdma/ib_umem_odp.h25
-rw-r--r--include/rdma/ib_verbs.h25
-rw-r--r--include/rdma/rdma_cm.h1
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/scsi/scsi_proto.h3
-rw-r--r--include/soc/mscc/ocelot.h7
-rw-r--r--include/soc/qcom/ice.h34
-rw-r--r--include/soc/qcom/qcom-spmi-pmic.h2
-rw-r--r--include/sound/core.h1
-rw-r--r--include/sound/cs-amp-lib.h2
-rw-r--r--include/sound/cs35l56.h29
-rw-r--r--include/sound/cs42l52.h29
-rw-r--r--include/sound/cs42l56.h45
-rw-r--r--include/sound/cs42l73.h19
-rw-r--r--include/sound/cs48l32.h47
-rw-r--r--include/sound/cs48l32_registers.h530
-rw-r--r--include/sound/dmaengine_pcm.h2
-rw-r--r--include/sound/gus.h22
-rw-r--r--include/sound/hdaudio.h6
-rw-r--r--include/sound/hdaudio_ext.h6
-rw-r--r--include/sound/jack.h6
-rw-r--r--include/sound/pcm.h4
-rw-r--r--include/sound/sdca_asoc.h42
-rw-r--r--include/sound/sdca_function.h71
-rw-r--r--include/sound/snd_wavefront.h4
-rw-r--r--include/sound/soc-acpi.h13
-rw-r--r--include/sound/soc-dapm.h4
-rw-r--r--include/sound/soc.h15
-rw-r--r--include/sound/soc_sdw_utils.h6
-rw-r--r--include/sound/sof.h1
-rw-r--r--include/sound/tas2781-comlib-i2c.h37
-rw-r--r--include/sound/tas2781.h81
-rw-r--r--include/sound/tpa6130a2-plat.h17
-rw-r--r--include/sound/ump_convert.h2
-rw-r--r--include/sound/ump_msg.h4
-rw-r--r--include/target/target_core_base.h26
-rw-r--r--include/trace/bpf_probe.h8
-rw-r--r--include/trace/define_trace.h17
-rw-r--r--include/trace/events/afs.h11
-rw-r--r--include/trace/events/block.h17
-rw-r--r--include/trace/events/btrfs.h91
-rw-r--r--include/trace/events/cgroup.h12
-rw-r--r--include/trace/events/erofs.h2
-rw-r--r--include/trace/events/exceptions.h (renamed from arch/x86/include/asm/trace/exceptions.h)27
-rw-r--r--include/trace/events/f2fs.h5
-rw-r--r--include/trace/events/fs_dax.h78
-rw-r--r--include/trace/events/huge_memory.h12
-rw-r--r--include/trace/events/io_uring.h2
-rw-r--r--include/trace/events/irq_matrix.h8
-rw-r--r--include/trace/events/mmflags.h4
-rw-r--r--include/trace/events/netfs.h11
-rw-r--r--include/trace/events/rpcgss.h4
-rw-r--r--include/trace/events/rxrpc.h163
-rw-r--r--include/trace/events/sched.h191
-rw-r--r--include/trace/events/sock.h1
-rw-r--r--include/trace/events/sunrpc.h17
-rw-r--r--include/trace/events/tcp.h101
-rw-r--r--include/trace/events/tsm_mr.h80
-rw-r--r--include/trace/events/xdp.h26
-rw-r--r--include/trace/misc/fs.h21
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/cxl/features.h21
-rw-r--r--include/uapi/drm/amdgpu_drm.h317
-rw-r--r--include/uapi/drm/asahi_drm.h1194
-rw-r--r--include/uapi/drm/drm.h4
-rw-r--r--include/uapi/drm/drm_fourcc.h45
-rw-r--r--include/uapi/drm/ivpu_accel.h4
-rw-r--r--include/uapi/drm/nova_drm.h101
-rw-r--r--include/uapi/drm/panthor_drm.h23
-rw-r--r--include/uapi/drm/virtgpu_drm.h6
-rw-r--r--include/uapi/drm/xe_drm.h6
-rw-r--r--include/uapi/linux/blktrace_api.h2
-rw-r--r--include/uapi/linux/bpf.h22
-rw-r--r--include/uapi/linux/cec-funcs.h40
-rw-r--r--include/uapi/linux/devlink.h15
-rw-r--r--include/uapi/linux/dm-ioctl.h9
-rw-r--r--include/uapi/linux/ethtool.h134
-rw-r--r--include/uapi/linux/ethtool_netlink_generated.h19
-rw-r--r--include/uapi/linux/fib_rules.h4
-rw-r--r--include/uapi/linux/fs.h1
-rw-r--r--include/uapi/linux/fscrypt.h6
-rw-r--r--include/uapi/linux/fuse.h6
-rw-r--r--include/uapi/linux/futex.h9
-rw-r--r--include/uapi/linux/if_addr.h4
-rw-r--r--include/uapi/linux/if_addrlabel.h4
-rw-r--r--include/uapi/linux/if_alg.h6
-rw-r--r--include/uapi/linux/if_arcnet.h6
-rw-r--r--include/uapi/linux/if_bonding.h6
-rw-r--r--include/uapi/linux/if_bridge.h10
-rw-r--r--include/uapi/linux/if_fc.h6
-rw-r--r--include/uapi/linux/if_hippi.h6
-rw-r--r--include/uapi/linux/if_link.h15
-rw-r--r--include/uapi/linux/if_packet.h4
-rw-r--r--include/uapi/linux/if_plip.h4
-rw-r--r--include/uapi/linux/if_slip.h4
-rw-r--r--include/uapi/linux/if_x25.h6
-rw-r--r--include/uapi/linux/if_xdp.h6
-rw-r--r--include/uapi/linux/io_uring.h16
-rw-r--r--include/uapi/linux/ip6_tunnel.h4
-rw-r--r--include/uapi/linux/isst_if.h26
-rw-r--r--include/uapi/linux/kfd_ioctl.h5
-rw-r--r--include/uapi/linux/kvm.h4
-rw-r--r--include/uapi/linux/landlock.h87
-rw-r--r--include/uapi/linux/media/amlogic/c3-isp-config.h564
-rw-r--r--include/uapi/linux/neighbour.h4
-rw-r--r--include/uapi/linux/net_dropmon.h4
-rw-r--r--include/uapi/linux/net_tstamp.h6
-rw-r--r--include/uapi/linux/netdev.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h22
-rw-r--r--include/uapi/linux/netfilter/nfnetlink.h2
-rw-r--r--include/uapi/linux/netlink_diag.h4
-rw-r--r--include/uapi/linux/nl80211.h6
-rw-r--r--include/uapi/linux/ovpn.h109
-rw-r--r--include/uapi/linux/pci_regs.h12
-rw-r--r--include/uapi/linux/perf_event.h657
-rw-r--r--include/uapi/linux/pidfd.h18
-rw-r--r--include/uapi/linux/pkt_cls.h5
-rw-r--r--include/uapi/linux/pkt_sched.h5
-rw-r--r--include/uapi/linux/prctl.h7
-rw-r--r--include/uapi/linux/ptrace.h7
-rw-r--r--include/uapi/linux/rxrpc.h77
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/stat.h8
-rw-r--r--include/uapi/linux/taskstats.h47
-rw-r--r--include/uapi/linux/tcp.h1
-rw-r--r--include/uapi/linux/ublk_cmd.h128
-rw-r--r--include/uapi/linux/udp.h1
-rw-r--r--include/uapi/linux/vhost.h4
-rw-r--r--include/uapi/linux/videodev2.h18
-rw-r--r--include/uapi/linux/virtio_gpu.h3
-rw-r--r--include/uapi/linux/virtio_pci.h1
-rw-r--r--include/uapi/linux/virtio_rtc.h237
-rw-r--r--include/uapi/linux/wireguard.h9
-rw-r--r--include/uapi/rdma/ib_user_verbs.h16
-rw-r--r--include/ufs/ufs.h37
-rw-r--r--include/ufs/ufs_quirks.h6
-rw-r--r--include/ufs/ufshcd.h15
-rw-r--r--include/vdso/unaligned.h12
-rw-r--r--include/video/mach64.h3
-rw-r--r--include/video/pixel_format.h41
-rw-r--r--init/Kconfig85
-rw-r--r--init/main.c22
-rw-r--r--io_uring/Makefile6
-rw-r--r--io_uring/advise.c4
-rw-r--r--io_uring/cancel.c2
-rw-r--r--io_uring/cmd_net.c83
-rw-r--r--io_uring/epoll.c4
-rw-r--r--io_uring/eventfd.c66
-rw-r--r--io_uring/eventfd.h3
-rw-r--r--io_uring/fdinfo.c90
-rw-r--r--io_uring/fs.c10
-rw-r--r--io_uring/futex.c10
-rw-r--r--io_uring/io-wq.c65
-rw-r--r--io_uring/io-wq.h5
-rw-r--r--io_uring/io_uring.c370
-rw-r--r--io_uring/io_uring.h4
-rw-r--r--io_uring/kbuf.c150
-rw-r--r--io_uring/kbuf.h8
-rw-r--r--io_uring/memmap.c13
-rw-r--r--io_uring/memmap.h4
-rw-r--r--io_uring/msg_ring.c2
-rw-r--r--io_uring/net.c76
-rw-r--r--io_uring/nop.c2
-rw-r--r--io_uring/notif.c1
-rw-r--r--io_uring/opdef.c11
-rw-r--r--io_uring/openclose.c139
-rw-r--r--io_uring/openclose.h3
-rw-r--r--io_uring/poll.c4
-rw-r--r--io_uring/rsrc.c200
-rw-r--r--io_uring/rsrc.h28
-rw-r--r--io_uring/rw.c8
-rw-r--r--io_uring/rw.h2
-rw-r--r--io_uring/splice.c4
-rw-r--r--io_uring/sqpoll.c2
-rw-r--r--io_uring/statx.c2
-rw-r--r--io_uring/sync.c6
-rw-r--r--io_uring/tctx.c2
-rw-r--r--io_uring/timeout.c13
-rw-r--r--io_uring/timeout.h13
-rw-r--r--io_uring/truncate.c2
-rw-r--r--io_uring/uring_cmd.c96
-rw-r--r--io_uring/uring_cmd.h6
-rw-r--r--io_uring/waitid.c2
-rw-r--r--io_uring/xattr.c8
-rw-r--r--io_uring/zcrx.c407
-rw-r--r--io_uring/zcrx.h30
-rw-r--r--ipc/mqueue.c5
-rw-r--r--ipc/shm.c5
-rw-r--r--kernel/Kconfig.kexec34
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit.c6
-rw-r--r--kernel/audit_watch.c16
-rw-r--r--kernel/bpf/Makefile3
-rw-r--r--kernel/bpf/bpf_struct_ops.c2
-rw-r--r--kernel/bpf/btf.c52
-rw-r--r--kernel/bpf/cgroup.c70
-rw-r--r--kernel/bpf/core.c29
-rw-r--r--kernel/bpf/dmabuf_iter.c150
-rw-r--r--kernel/bpf/hashtab.c150
-rw-r--r--kernel/bpf/helpers.c133
-rw-r--r--kernel/bpf/inode.c2
-rw-r--r--kernel/bpf/preload/bpf_preload_kern.c1
-rw-r--r--kernel/bpf/queue_stack_maps.c35
-rw-r--r--kernel/bpf/ringbuf.c17
-rw-r--r--kernel/bpf/rqspinlock.c2
-rw-r--r--kernel/bpf/syscall.c18
-rw-r--r--kernel/bpf/sysfs_btf.c32
-rw-r--r--kernel/bpf/verifier.c636
-rw-r--r--kernel/cgroup/cgroup-internal.h6
-rw-r--r--kernel/cgroup/cgroup.c185
-rw-r--r--kernel/cgroup/cpuset-internal.h1
-rw-r--r--kernel/cgroup/cpuset.c515
-rw-r--r--kernel/cgroup/misc.c4
-rw-r--r--kernel/cgroup/rstat.c466
-rw-r--r--kernel/configs/debug.config5
-rw-r--r--kernel/configs/xen.config3
-rw-r--r--kernel/cpu.c5
-rw-r--r--kernel/crash_dump_dm_crypt.c464
-rw-r--r--kernel/crash_reserve.c2
-rw-r--r--kernel/delayacct.c51
-rw-r--r--kernel/dma/coherent.c12
-rw-r--r--kernel/dma/contiguous.c3
-rw-r--r--kernel/dma/direct.c44
-rw-r--r--kernel/dma/mapping.c43
-rw-r--r--kernel/entry/common.c49
-rw-r--r--kernel/events/core.c675
-rw-r--r--kernel/events/ring_buffer.c29
-rw-r--r--kernel/events/uprobes.c372
-rw-r--r--kernel/exit.c80
-rw-r--r--kernel/fork.c388
-rw-r--r--kernel/futex/core.c805
-rw-r--r--kernel/futex/futex.h74
-rw-r--r--kernel/futex/pi.c308
-rw-r--r--kernel/futex/requeue.c460
-rw-r--r--kernel/futex/waitwake.c207
-rw-r--r--kernel/gcov/gcc_4_7.c4
-rw-r--r--kernel/hung_task.c55
-rw-r--r--kernel/irq/autoprobe.c26
-rw-r--r--kernel/irq/chip.c631
-rw-r--r--kernel/irq/cpuhotplug.c12
-rw-r--r--kernel/irq/debugfs.c7
-rw-r--r--kernel/irq/generic-chip.c47
-rw-r--r--kernel/irq/internals.h48
-rw-r--r--kernel/irq/irqdesc.c176
-rw-r--r--kernel/irq/irqdomain.c130
-rw-r--r--kernel/irq/manage.c1166
-rw-r--r--kernel/irq/msi.c194
-rw-r--r--kernel/irq/pm.c38
-rw-r--r--kernel/irq/proc.c67
-rw-r--r--kernel/irq/resend.c50
-rw-r--r--kernel/irq/spurious.c104
-rw-r--r--kernel/kcsan/kcsan_test.c2
-rw-r--r--kernel/kexec_core.c54
-rw-r--r--kernel/kexec_file.c127
-rw-r--r--kernel/kexec_handover.c1266
-rw-r--r--kernel/kexec_internal.h16
-rw-r--r--kernel/livepatch/transition.c49
-rw-r--r--kernel/locking/lockdep.c76
-rw-r--r--kernel/locking/lockdep_internals.h1
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/locking/mutex.c26
-rw-r--r--kernel/locking/percpu-rwsem.c13
-rw-r--r--kernel/locking/rtmutex_api.c33
-rw-r--r--kernel/locking/semaphore.c57
-rw-r--r--kernel/module/Kconfig5
-rw-r--r--kernel/module/internal.h7
-rw-r--r--kernel/module/main.c28
-rw-r--r--kernel/module/strict_rwx.c47
-rw-r--r--kernel/nsproxy.c30
-rw-r--r--kernel/padata.c3
-rw-r--r--kernel/panic.c38
-rw-r--r--kernel/params.c51
-rw-r--r--kernel/pid.c6
-rw-r--r--kernel/power/energy_model.c72
-rw-r--r--kernel/power/hibernate.c39
-rw-r--r--kernel/power/main.c39
-rw-r--r--kernel/power/power.h8
-rw-r--r--kernel/power/process.c8
-rw-r--r--kernel/power/snapshot.c42
-rw-r--r--kernel/power/suspend.c7
-rw-r--r--kernel/power/swap.c103
-rw-r--r--kernel/power/wakelock.c3
-rw-r--r--kernel/ptrace.c179
-rw-r--r--kernel/rcu/rcu.h18
-rw-r--r--kernel/rcu/rcuscale.c2
-rw-r--r--kernel/rcu/rcutorture.c208
-rw-r--r--kernel/rcu/srcutree.c2
-rw-r--r--kernel/rcu/tree.c84
-rw-r--r--kernel/rcu/tree.h3
-rw-r--r--kernel/rcu/tree_exp.h2
-rw-r--r--kernel/rcu/tree_nocb.h10
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/tree_stall.h4
-rw-r--r--kernel/relay.c111
-rw-r--r--kernel/sched/core.c157
-rw-r--r--kernel/sched/cpufreq_schedutil.c58
-rw-r--r--kernel/sched/debug.c8
-rw-r--r--kernel/sched/ext.c1938
-rw-r--r--kernel/sched/ext.h13
-rw-r--r--kernel/sched/ext_idle.c314
-rw-r--r--kernel/sched/ext_idle.h3
-rw-r--r--kernel/sched/fair.c49
-rw-r--r--kernel/sched/isolation.c2
-rw-r--r--kernel/sched/rt.c105
-rw-r--r--kernel/sched/sched.h40
-rw-r--r--kernel/sched/syscalls.c5
-rw-r--r--kernel/sched/topology.c154
-rw-r--r--kernel/signal.c11
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/sysctl-test.c49
-rw-r--r--kernel/sysctl.c108
-rw-r--r--kernel/time/alarmtimer.c84
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/time/jiffies.c5
-rw-r--r--kernel/time/posix-timers.c23
-rw-r--r--kernel/time/sleep_timeout.c2
-rw-r--r--kernel/time/tick-common.c22
-rw-r--r--kernel/time/timekeeping.c50
-rw-r--r--kernel/time/timer.c78
-rw-r--r--kernel/time/vsyscall.c4
-rw-r--r--kernel/trace/blktrace.c11
-rw-r--r--kernel/trace/bpf_trace.c321
-rw-r--r--kernel/trace/fgraph.c2
-rw-r--r--kernel/trace/fprobe.c171
-rw-r--r--kernel/trace/ftrace.c395
-rw-r--r--kernel/trace/pid_list.c8
-rw-r--r--kernel/trace/ring_buffer.c239
-rw-r--r--kernel/trace/rv/rv.c7
-rw-r--r--kernel/trace/trace.c309
-rw-r--r--kernel/trace/trace.h30
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_dynevent.c16
-rw-r--r--kernel/trace/trace_dynevent.h1
-rw-r--r--kernel/trace/trace_entries.h16
-rw-r--r--kernel/trace/trace_eprobe.c3
-rw-r--r--kernel/trace/trace_events.c39
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_events_hist.c179
-rw-r--r--kernel/trace/trace_events_synth.c1
-rw-r--r--kernel/trace/trace_events_trigger.c66
-rw-r--r--kernel/trace/trace_fprobe.c26
-rw-r--r--kernel/trace/trace_functions.c30
-rw-r--r--kernel/trace/trace_functions_graph.c49
-rw-r--r--kernel/trace/trace_irqsoff.c47
-rw-r--r--kernel/trace/trace_kdb.c9
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/trace/trace_mmiotrace.c12
-rw-r--r--kernel/trace/trace_osnoise.c9
-rw-r--r--kernel/trace/trace_output.c64
-rw-r--r--kernel/trace/trace_probe.c9
-rw-r--r--kernel/trace/trace_sched_wakeup.c18
-rw-r--r--kernel/trace/trace_stack.c24
-rw-r--r--kernel/trace/trace_uprobe.c4
-rw-r--r--kernel/vhost_task.c2
-rw-r--r--kernel/vmcore_info.c4
-rw-r--r--kernel/watchdog.c94
-rw-r--r--kernel/workqueue.c17
-rw-r--r--lib/Kconfig57
-rw-r--r--lib/Kconfig.debug29
-rw-r--r--lib/Kconfig.ubsan12
-rw-r--r--lib/Makefile1
-rw-r--r--lib/alloc_tag.c136
-rw-r--r--lib/asn1_decoder.c1
-rw-r--r--lib/codetag.c5
-rw-r--r--lib/crc16.c9
-rw-r--r--lib/crc32.c10
-rw-r--r--lib/crypto/Kconfig89
-rw-r--r--lib/crypto/Makefile24
-rw-r--r--lib/crypto/aescfb.c2
-rw-r--r--lib/crypto/aesgcm.c2
-rw-r--r--lib/crypto/blake2s.c2
-rw-r--r--lib/crypto/chacha.c40
-rw-r--r--lib/crypto/chacha20poly1305-selftest.c8
-rw-r--r--lib/crypto/chacha20poly1305.c55
-rw-r--r--lib/crypto/curve25519.c2
-rw-r--r--lib/crypto/libchacha.c2
-rw-r--r--lib/crypto/poly1305-generic.c24
-rw-r--r--lib/crypto/poly1305.c75
-rw-r--r--lib/crypto/sha256-generic.c137
-rw-r--r--lib/crypto/sha256.c150
-rw-r--r--lib/crypto/sm3.c (renamed from crypto/sm3.c)79
-rw-r--r--lib/devres.c1
-rw-r--r--lib/errseq.c13
-rw-r--r--lib/find_bit.c11
-rw-r--r--lib/iov_iter.c61
-rw-r--r--lib/kstrtox.c4
-rw-r--r--lib/kunit/executor.c2
-rw-r--r--lib/kunit/static_stub.c2
-rw-r--r--lib/llist.c22
-rw-r--r--lib/maple_tree.c191
-rw-r--r--lib/oid_registry.c25
-rw-r--r--lib/pldmfw/pldmfw.c6
-rw-r--r--lib/raid6/algos.c6
-rw-r--r--lib/raid6/avx512.c4
-rw-r--r--lib/raid6/recov_avx512.c6
-rw-r--r--lib/raid6/test/Makefile3
-rw-r--r--lib/ratelimit.c75
-rw-r--r--lib/rbtree.c8
-rw-r--r--lib/scatterlist.c23
-rw-r--r--lib/string.c13
-rw-r--r--lib/string_helpers.c39
-rw-r--r--lib/test_fortify/Makefile5
-rw-r--r--lib/test_kmod.c64
-rw-r--r--lib/test_sysctl.c131
-rw-r--r--lib/test_ubsan.c18
-rw-r--r--lib/test_vmalloc.c22
-rw-r--r--lib/test_xarray.c17
-rw-r--r--lib/tests/Makefile1
-rw-r--r--lib/tests/crc_kunit.c6
-rw-r--r--lib/tests/overflow_kunit.c4
-rw-r--r--lib/tests/printf_kunit.c39
-rw-r--r--lib/tests/randstruct_kunit.c334
-rw-r--r--lib/tests/slub_kunit.c1
-rw-r--r--lib/tests/stackinit_kunit.c10
-rw-r--r--lib/tests/test_bits.c30
-rw-r--r--lib/tests/usercopy_kunit.c1
-rw-r--r--lib/ubsan.c8
-rw-r--r--lib/ucs2_string.c1
-rw-r--r--lib/vsprintf.c50
-rw-r--r--lib/xarray.c9
-rw-r--r--lib/zlib_inflate/inflate_syms.c1
-rw-r--r--mm/Kconfig40
-rw-r--r--mm/Makefile4
-rw-r--r--mm/cma.c35
-rw-r--r--mm/cma.h6
-rw-r--r--mm/compaction.c30
-rw-r--r--mm/damon/Kconfig4
-rw-r--r--mm/damon/core.c49
-rw-r--r--mm/damon/paddr.c1
-rw-r--r--mm/damon/sysfs-schemes.c47
-rw-r--r--mm/damon/tests/core-kunit.h70
-rw-r--r--mm/debug.c6
-rw-r--r--mm/debug_page_alloc.c2
-rw-r--r--mm/debug_vm_pgtable.c18
-rw-r--r--mm/dmapool.c15
-rw-r--r--mm/execmem.c42
-rw-r--r--mm/filemap.c46
-rw-r--r--mm/gup.c147
-rw-r--r--mm/hmm.c262
-rw-r--r--mm/huge_memory.c56
-rw-r--r--mm/hugetlb.c220
-rw-r--r--mm/hugetlb_cma.c11
-rw-r--r--mm/hugetlb_vmemmap.c6
-rw-r--r--mm/internal.h36
-rw-r--r--mm/io-mapping.c9
-rw-r--r--mm/kasan/Makefile3
-rw-r--r--mm/kasan/kasan_test_c.c21
-rw-r--r--mm/kasan/shadow.c92
-rw-r--r--mm/khugepaged.c69
-rw-r--r--mm/kmemleak.c9
-rw-r--r--mm/kmsan/core.c12
-rw-r--r--mm/kmsan/hooks.c6
-rw-r--r--mm/kmsan/init.c3
-rw-r--r--mm/kmsan/instrumentation.c4
-rw-r--r--mm/kmsan/kmsan.h1
-rw-r--r--mm/kmsan/report.c6
-rw-r--r--mm/kmsan/shadow.c7
-rw-r--r--mm/maccess.c2
-rw-r--r--mm/madvise.c101
-rw-r--r--mm/memblock.c338
-rw-r--r--mm/memcontrol-v1.c11
-rw-r--r--mm/memcontrol.c780
-rw-r--r--mm/memfd.c1
-rw-r--r--mm/memory.c446
-rw-r--r--mm/memory_hotplug.c8
-rw-r--r--mm/mempolicy.c558
-rw-r--r--mm/memremap.c8
-rw-r--r--mm/migrate.c91
-rw-r--r--mm/mincore.c22
-rw-r--r--mm/mm_init.c60
-rw-r--r--mm/mmap.c306
-rw-r--r--mm/mmap_lock.c273
-rw-r--r--mm/mmu_gather.c1
-rw-r--r--mm/mmu_notifier.c2
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/mremap.c7
-rw-r--r--mm/nommu.c46
-rw-r--r--mm/numa.c4
-rw-r--r--mm/numa_memblks.c22
-rw-r--r--mm/page-writeback.c37
-rw-r--r--mm/page_alloc.c280
-rw-r--r--mm/page_io.c3
-rw-r--r--mm/page_owner.c2
-rw-r--r--mm/page_table_check.c34
-rw-r--r--mm/ptdump.c62
-rw-r--r--mm/readahead.c20
-rw-r--r--mm/rmap.c30
-rw-r--r--mm/secretmem.c14
-rw-r--r--mm/shmem.c56
-rw-r--r--mm/show_mem.c22
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/slub.c53
-rw-r--r--mm/swap.c8
-rw-r--r--mm/swap.h22
-rw-r--r--mm/swap_state.c9
-rw-r--r--mm/swapfile.c218
-rw-r--r--mm/truncate.c22
-rw-r--r--mm/userfaultfd.c27
-rw-r--r--mm/vma.c256
-rw-r--r--mm/vma.h24
-rw-r--r--mm/vma_exec.c161
-rw-r--r--mm/vma_init.c151
-rw-r--r--mm/vmalloc.c293
-rw-r--r--mm/vmscan.c177
-rw-r--r--mm/vmstat.c4
-rw-r--r--mm/workingset.c4
-rw-r--r--mm/zpdesc.h7
-rw-r--r--mm/zpool.c8
-rw-r--r--mm/zsmalloc.c40
-rw-r--r--mm/zswap.c2
-rw-r--r--net/802/Makefile5
-rw-r--r--net/802/p8022.c64
-rw-r--r--net/8021q/vlan.c1
-rw-r--r--net/9p/client.c6
-rw-r--r--net/Kconfig7
-rw-r--r--net/Makefile1
-rw-r--r--net/batman-adv/Kconfig2
-rw-r--r--net/batman-adv/hard-interface.c32
-rw-r--r--net/batman-adv/main.c4
-rw-r--r--net/batman-adv/main.h3
-rw-r--r--net/batman-adv/mesh-interface.c15
-rw-r--r--net/batman-adv/send.c4
-rw-r--r--net/batman-adv/translation-table.c2
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/af_bluetooth.c87
-rw-r--r--net/bluetooth/hci_conn.c292
-rw-r--r--net/bluetooth/hci_core.c45
-rw-r--r--net/bluetooth/hci_drv.c105
-rw-r--r--net/bluetooth/hci_event.c133
-rw-r--r--net/bluetooth/hci_sock.c12
-rw-r--r--net/bluetooth/hci_sync.c203
-rw-r--r--net/bluetooth/iso.c56
-rw-r--r--net/bluetooth/l2cap_core.c37
-rw-r--r--net/bluetooth/mgmt.c12
-rw-r--r--net/bluetooth/mgmt_util.c2
-rw-r--r--net/bpf/test_run.c8
-rw-r--r--net/bridge/br.c22
-rw-r--r--net/bridge/br_arp_nd_proxy.c7
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/bridge/br_mdb.c28
-rw-r--r--net/bridge/br_mst.c4
-rw-r--r--net/bridge/br_multicast.c119
-rw-r--r--net/bridge/br_nf_core.c7
-rw-r--r--net/bridge/br_private.h42
-rw-r--r--net/bridge/br_switchdev.c13
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c12
-rw-r--r--net/can/bcm.c79
-rw-r--r--net/can/gw.c149
-rw-r--r--net/can/j1939/socket.c1
-rw-r--r--net/ceph/Kconfig2
-rw-r--r--net/ceph/osd_client.c23
-rw-r--r--net/core/datagram.c90
-rw-r--r--net/core/dev.c233
-rw-r--r--net/core/dev.h22
-rw-r--r--net/core/dev_api.c50
-rw-r--r--net/core/dev_ioctl.c6
-rw-r--r--net/core/devmem.c139
-rw-r--r--net/core/devmem.h86
-rw-r--r--net/core/dst_cache.c30
-rw-r--r--net/core/fib_rules.c62
-rw-r--r--net/core/filter.c95
-rw-r--r--net/core/link_watch.c28
-rw-r--r--net/core/lock_debug.c6
-rw-r--r--net/core/lwtunnel.c41
-rw-r--r--net/core/neighbour.c16
-rw-r--r--net/core/net-procfs.c9
-rw-r--r--net/core/net_namespace.c171
-rw-r--r--net/core/netdev-genl-gen.c13
-rw-r--r--net/core/netdev-genl-gen.h1
-rw-r--r--net/core/netdev-genl.c246
-rw-r--r--net/core/netmem_priv.h33
-rw-r--r--net/core/page_pool.c89
-rw-r--r--net/core/pktgen.c103
-rw-r--r--net/core/rtnetlink.c75
-rw-r--r--net/core/scm.c122
-rw-r--r--net/core/secure_seq.c42
-rw-r--r--net/core/selftests.c18
-rw-r--r--net/core/skbuff.c214
-rw-r--r--net/core/skmsg.c56
-rw-r--r--net/core/sock.c121
-rw-r--r--net/core/sock_diag.c2
-rw-r--r--net/core/sysctl_net_core.c6
-rw-r--r--net/core/utils.c8
-rw-r--r--net/core/xdp.c72
-rw-r--r--net/dccp/Kconfig46
-rw-r--r--net/dccp/Makefile30
-rw-r--r--net/dccp/ackvec.c403
-rw-r--r--net/dccp/ackvec.h136
-rw-r--r--net/dccp/ccid.c219
-rw-r--r--net/dccp/ccid.h262
-rw-r--r--net/dccp/ccids/Kconfig55
-rw-r--r--net/dccp/ccids/ccid2.c794
-rw-r--r--net/dccp/ccids/ccid2.h121
-rw-r--r--net/dccp/ccids/ccid3.c866
-rw-r--r--net/dccp/ccids/ccid3.h148
-rw-r--r--net/dccp/ccids/lib/loss_interval.c184
-rw-r--r--net/dccp/ccids/lib/loss_interval.h69
-rw-r--r--net/dccp/ccids/lib/packet_history.c439
-rw-r--r--net/dccp/ccids/lib/packet_history.h142
-rw-r--r--net/dccp/ccids/lib/tfrc.c46
-rw-r--r--net/dccp/ccids/lib/tfrc.h73
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c702
-rw-r--r--net/dccp/dccp.h483
-rw-r--r--net/dccp/diag.c85
-rw-r--r--net/dccp/feat.c1581
-rw-r--r--net/dccp/feat.h133
-rw-r--r--net/dccp/input.c739
-rw-r--r--net/dccp/ipv4.c1101
-rw-r--r--net/dccp/ipv6.c1174
-rw-r--r--net/dccp/ipv6.h27
-rw-r--r--net/dccp/minisocks.c266
-rw-r--r--net/dccp/options.c609
-rw-r--r--net/dccp/output.c708
-rw-r--r--net/dccp/proto.c1293
-rw-r--r--net/dccp/qpolicy.c136
-rw-r--r--net/dccp/sysctl.c107
-rw-r--r--net/dccp/timer.c272
-rw-r--r--net/dccp/trace.h82
-rw-r--r--net/devlink/dev.c2
-rw-r--r--net/devlink/health.c52
-rw-r--r--net/devlink/netlink_gen.c29
-rw-r--r--net/devlink/param.c46
-rw-r--r--net/dsa/dsa.c59
-rw-r--r--net/dsa/port.c10
-rw-r--r--net/dsa/tag_8021q.c2
-rw-r--r--net/dsa/tag_ksz.c19
-rw-r--r--net/dsa/user.c41
-rw-r--r--net/ethtool/cmis.h1
-rw-r--r--net/ethtool/cmis_cdb.c20
-rw-r--r--net/ethtool/common.c30
-rw-r--r--net/ethtool/ioctl.c101
-rw-r--r--net/ethtool/mm.c279
-rw-r--r--net/ethtool/netlink.c225
-rw-r--r--net/ethtool/netlink.h4
-rw-r--r--net/ethtool/phy.c342
-rw-r--r--net/ethtool/tsinfo.c23
-rw-r--r--net/hsr/hsr_device.c11
-rw-r--r--net/hsr/hsr_main.c9
-rw-r--r--net/hsr/hsr_main.h1
-rw-r--r--net/hsr/hsr_slave.c2
-rw-r--r--net/ieee802154/nl-phy.c6
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/af_inet.c5
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/esp4.c53
-rw-r--r--net/ipv4/fib_frontend.c8
-rw-r--r--net/ipv4/fib_semantics.c50
-rw-r--r--net/ipv4/gre_demux.c2
-rw-r--r--net/ipv4/inet_connection_sock.c23
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/inet_hashtables.c36
-rw-r--r--net/ipv4/inet_timewait_sock.c4
-rw-r--r--net/ipv4/ip_gre.c27
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_tunnel.c29
-rw-r--r--net/ipv4/ip_vti.c9
-rw-r--r--net/ipv4/ipip.c9
-rw-r--r--net/ipv4/ipmr.c20
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/nf_dup_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c11
-rw-r--r--net/ipv4/nexthop.c38
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/route.c26
-rw-r--r--net/ipv4/tcp.c53
-rw-r--r--net/ipv4/tcp_fastopen.c1
-rw-r--r--net/ipv4/tcp_input.c110
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv4/tcp_minisocks.c9
-rw-r--r--net/ipv4/tcp_offload.c2
-rw-r--r--net/ipv4/tcp_output.c5
-rw-r--r--net/ipv4/udp.c227
-rw-r--r--net/ipv4/udp_offload.c233
-rw-r--r--net/ipv4/udp_tunnel_core.c15
-rw-r--r--net/ipv4/xfrm4_input.c18
-rw-r--r--net/ipv6/addrconf.c36
-rw-r--r--net/ipv6/addrlabel.c8
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/esp6.c53
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ioam6_iptunnel.c76
-rw-r--r--net/ipv6/ip6_fib.c115
-rw-r--r--net/ipv6/ip6_gre.c22
-rw-r--r--net/ipv6/ip6_output.c5
-rw-r--r--net/ipv6/ip6_tunnel.c24
-rw-r--r--net/ipv6/ip6_vti.c27
-rw-r--r--net/ipv6/ip6mr.c12
-rw-r--r--net/ipv6/netfilter.c12
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/nf_dup_ipv6.c6
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c17
-rw-r--r--net/ipv6/route.c433
-rw-r--r--net/ipv6/seg6_hmac.c13
-rw-r--r--net/ipv6/sit.c23
-rw-r--r--net/ipv6/tcp_ipv6.c5
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/udp_offload.c5
-rw-r--r--net/ipv6/xfrm6_input.c18
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l3mdev/l3mdev.c4
-rw-r--r--net/llc/af_llc.c8
-rw-r--r--net/mac80211/cfg.c78
-rw-r--r--net/mac80211/chan.c3
-rw-r--r--net/mac80211/debugfs_sta.c6
-rw-r--r--net/mac80211/ibss.c19
-rw-r--r--net/mac80211/ieee80211_i.h16
-rw-r--r--net/mac80211/iface.c90
-rw-r--r--net/mac80211/link.c90
-rw-r--r--net/mac80211/main.c6
-rw-r--r--net/mac80211/mesh.c10
-rw-r--r--net/mac80211/mesh_hwmp.c6
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/mesh_plink.c10
-rw-r--r--net/mac80211/mlme.c16
-rw-r--r--net/mac80211/parse.c3
-rw-r--r--net/mac80211/rate.c12
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c13
-rw-r--r--net/mac80211/scan.c18
-rw-r--r--net/mac80211/spectmgmt.c55
-rw-r--r--net/mac80211/sta_info.c28
-rw-r--r--net/mac80211/sta_info.h11
-rw-r--r--net/mac80211/status.c8
-rw-r--r--net/mac80211/tdls.c4
-rw-r--r--net/mac80211/tx.c35
-rw-r--r--net/mac80211/util.c25
-rw-r--r--net/mctp/af_mctp.c3
-rw-r--r--net/mctp/device.c17
-rw-r--r--net/mctp/neigh.c5
-rw-r--r--net/mctp/route.c4
-rw-r--r--net/mpls/af_mpls.c8
-rw-r--r--net/mptcp/mib.c1
-rw-r--r--net/mptcp/mib.h1
-rw-r--r--net/mptcp/pm.c5
-rw-r--r--net/mptcp/pm_userspace.c6
-rw-r--r--net/mptcp/protocol.c12
-rw-r--r--net/mptcp/protocol.h10
-rw-r--r--net/mptcp/sched.c35
-rw-r--r--net/mptcp/subflow.c18
-rw-r--r--net/ncsi/internal.h23
-rw-r--r--net/ncsi/ncsi-pkt.h23
-rw-r--r--net/ncsi/ncsi-rsp.c39
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/core.c3
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h2
-rw-r--r--net/netfilter/ipvs/Kconfig2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c27
-rw-r--r--net/netfilter/nf_conntrack_core.c10
-rw-r--r--net/netfilter/nf_conntrack_standalone.c88
-rw-r--r--net/netfilter/nf_dup_netdev.c22
-rw-r--r--net/netfilter/nf_flow_table_core.c10
-rw-r--r--net/netfilter/nf_tables_api.c428
-rw-r--r--net/netfilter/nf_tables_offload.c51
-rw-r--r--net/netfilter/nf_tables_trace.c54
-rw-r--r--net/netfilter/nfnetlink.c1
-rw-r--r--net/netfilter/nft_chain_filter.c94
-rw-r--r--net/netfilter/nft_flow_offload.c2
-rw-r--r--net/netfilter/nft_inner.c18
-rw-r--r--net/netfilter/nft_quota.c20
-rw-r--r--net/netfilter/nft_set_pipapo.c64
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c3
-rw-r--r--net/netfilter/nft_tunnel.c8
-rw-r--r--net/netfilter/xt_IDLETIMER.c12
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c4
-rw-r--r--net/netfilter/xt_cgroup.c26
-rw-r--r--net/netfilter/xt_mark.c2
-rw-r--r--net/netlabel/netlabel_kapi.c3
-rw-r--r--net/netlink/policy.c5
-rw-r--r--net/openvswitch/Kconfig2
-rw-r--r--net/openvswitch/actions.c89
-rw-r--r--net/openvswitch/datapath.c33
-rw-r--r--net/openvswitch/datapath.h52
-rw-r--r--net/openvswitch/flow.c2
-rw-r--r--net/openvswitch/flow_netlink.c6
-rw-r--r--net/packet/af_packet.c21
-rw-r--r--net/packet/internal.h1
-rw-r--r--net/rds/connection.c6
-rw-r--r--net/rds/page.c25
-rw-r--r--net/rxrpc/Kconfig23
-rw-r--r--net/rxrpc/Makefile6
-rw-r--r--net/rxrpc/af_rxrpc.c130
-rw-r--r--net/rxrpc/ar-internal.h83
-rw-r--r--net/rxrpc/call_accept.c34
-rw-r--r--net/rxrpc/call_object.c24
-rw-r--r--net/rxrpc/conn_event.c134
-rw-r--r--net/rxrpc/conn_object.c2
-rw-r--r--net/rxrpc/insecure.c13
-rw-r--r--net/rxrpc/io_thread.c12
-rw-r--r--net/rxrpc/key.c187
-rw-r--r--net/rxrpc/oob.c379
-rw-r--r--net/rxrpc/output.c60
-rw-r--r--net/rxrpc/peer_object.c22
-rw-r--r--net/rxrpc/protocol.h20
-rw-r--r--net/rxrpc/recvmsg.c132
-rw-r--r--net/rxrpc/rxgk.c1371
-rw-r--r--net/rxrpc/rxgk_app.c286
-rw-r--r--net/rxrpc/rxgk_common.h139
-rw-r--r--net/rxrpc/rxgk_kdf.c288
-rw-r--r--net/rxrpc/rxkad.c296
-rw-r--r--net/rxrpc/rxperf.c78
-rw-r--r--net/rxrpc/security.c3
-rw-r--r--net/rxrpc/sendmsg.c25
-rw-r--r--net/rxrpc/server_key.c42
-rw-r--r--net/rxrpc/txbuf.c8
-rw-r--r--net/sched/Kconfig14
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_api.c16
-rw-r--r--net/sched/act_mirred.c28
-rw-r--r--net/sched/bpf_qdisc.c475
-rw-r--r--net/sched/cls_api.c66
-rw-r--r--net/sched/sch_api.c11
-rw-r--r--net/sched/sch_codel.c7
-rw-r--r--net/sched/sch_drr.c16
-rw-r--r--net/sched/sch_ets.c17
-rw-r--r--net/sched/sch_fq.c2
-rw-r--r--net/sched/sch_fq_codel.c8
-rw-r--r--net/sched/sch_fq_pie.c2
-rw-r--r--net/sched/sch_frag.c10
-rw-r--r--net/sched/sch_generic.c7
-rw-r--r--net/sched/sch_hfsc.c46
-rw-r--r--net/sched/sch_hhf.c2
-rw-r--r--net/sched/sch_htb.c13
-rw-r--r--net/sched/sch_pie.c2
-rw-r--r--net/sched/sch_qfq.c18
-rw-r--r--net/sched/sch_sfq.c66
-rw-r--r--net/sctp/Kconfig2
-rw-r--r--net/sctp/associola.c18
-rw-r--r--net/sctp/offload.c1
-rw-r--r--net/sctp/sm_make_chunk.c8
-rw-r--r--net/sctp/socket.c31
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/smc/af_smc.c5
-rw-r--r--net/strparser/strparser.c13
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c59
-rw-r--r--net/sunrpc/cache.c23
-rw-r--r--net/sunrpc/clnt.c9
-rw-r--r--net/sunrpc/rpc_pipe.c12
-rw-r--r--net/sunrpc/svc.c80
-rw-r--r--net/sunrpc/svc_xprt.c11
-rw-r--r--net/sunrpc/svcsock.c17
-rw-r--r--net/sunrpc/xdr.c1
-rw-r--r--net/sunrpc/xprt.c3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c8
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c16
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c15
-rw-r--r--net/sunrpc/xprtsock.c16
-rw-r--r--net/tipc/crypto.c7
-rw-r--r--net/tipc/link.c3
-rw-r--r--net/tipc/monitor.c3
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tls/tls_main.c6
-rw-r--r--net/tls/tls_strp.c3
-rw-r--r--net/tls/tls_sw.c15
-rw-r--r--net/unix/af_unix.c241
-rw-r--r--net/vmw_vsock/af_vsock.c33
-rw-r--r--net/vmw_vsock/virtio_transport_common.c52
-rw-r--r--net/wireless/nl80211.c42
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/xdp/xsk.c8
-rw-r--r--net/xdp/xsk_buff_pool.c7
-rw-r--r--net/xfrm/espintcp.c4
-rw-r--r--net/xfrm/xfrm_device.c18
-rw-r--r--net/xfrm/xfrm_interface_core.c34
-rw-r--r--net/xfrm/xfrm_ipcomp.c3
-rw-r--r--net/xfrm/xfrm_nat_keepalive.c30
-rw-r--r--net/xfrm/xfrm_policy.c7
-rw-r--r--net/xfrm/xfrm_state.c52
-rw-r--r--net/xfrm/xfrm_user.c77
-rw-r--r--rust/Makefile22
-rw-r--r--rust/bindings/bindings_helper.h40
-rw-r--r--rust/bindings/lib.rs1
-rw-r--r--rust/ffi.rs2
-rw-r--r--rust/helpers/auxiliary.c23
-rw-r--r--rust/helpers/clk.c66
-rw-r--r--rust/helpers/cpufreq.c10
-rw-r--r--rust/helpers/cpumask.c25
-rw-r--r--rust/helpers/dma.c16
-rw-r--r--rust/helpers/drm.c23
-rw-r--r--rust/helpers/helpers.c7
-rw-r--r--rust/helpers/io.c34
-rw-r--r--rust/helpers/mm.c50
-rw-r--r--rust/helpers/mutex.c10
-rw-r--r--rust/helpers/pci.c5
-rw-r--r--rust/helpers/platform.c5
-rw-r--r--rust/helpers/xarray.c28
-rw-r--r--rust/kernel/alloc.rs4
-rw-r--r--rust/kernel/alloc/allocator_test.rs2
-rw-r--r--rust/kernel/alloc/kbox.rs80
-rw-r--r--rust/kernel/alloc/kvec.rs430
-rw-r--r--rust/kernel/alloc/kvec/errors.rs61
-rw-r--r--rust/kernel/auxiliary.rs362
-rw-r--r--rust/kernel/block/mq/gen_disk.rs2
-rw-r--r--rust/kernel/clk.rs334
-rw-r--r--rust/kernel/configfs.rs1049
-rw-r--r--rust/kernel/cpu.rs30
-rw-r--r--rust/kernel/cpufreq.rs1321
-rw-r--r--rust/kernel/cpumask.rs330
-rw-r--r--rust/kernel/device.rs135
-rw-r--r--rust/kernel/device_id.rs4
-rw-r--r--rust/kernel/devres.rs56
-rw-r--r--rust/kernel/dma.rs16
-rw-r--r--rust/kernel/drm/device.rs202
-rw-r--r--rust/kernel/drm/driver.rs166
-rw-r--r--rust/kernel/drm/file.rs99
-rw-r--r--rust/kernel/drm/gem/mod.rs332
-rw-r--r--rust/kernel/drm/ioctl.rs162
-rw-r--r--rust/kernel/drm/mod.rs19
-rw-r--r--rust/kernel/firmware.rs8
-rw-r--r--rust/kernel/fs/file.rs10
-rw-r--r--rust/kernel/kunit.rs37
-rw-r--r--rust/kernel/lib.rs59
-rw-r--r--rust/kernel/list.rs112
-rw-r--r--rust/kernel/list/arc.rs6
-rw-r--r--rust/kernel/miscdevice.rs55
-rw-r--r--rust/kernel/mm.rs296
-rw-r--r--rust/kernel/mm/mmput_async.rs68
-rw-r--r--rust/kernel/mm/virt.rs471
-rw-r--r--rust/kernel/net/phy.rs1
-rw-r--r--rust/kernel/opp.rs1146
-rw-r--r--rust/kernel/page.rs2
-rw-r--r--rust/kernel/pci.rs72
-rw-r--r--rust/kernel/platform.rs65
-rw-r--r--rust/kernel/prelude.rs7
-rw-r--r--rust/kernel/print.rs27
-rw-r--r--rust/kernel/rbtree.rs23
-rw-r--r--rust/kernel/revocable.rs28
-rw-r--r--rust/kernel/static_assert.rs9
-rw-r--r--rust/kernel/std_vendor.rs2
-rw-r--r--rust/kernel/str.rs128
-rw-r--r--rust/kernel/sync/arc.rs25
-rw-r--r--rust/kernel/sync/rcu.rs5
-rw-r--r--rust/kernel/task.rs247
-rw-r--r--rust/kernel/time.rs167
-rw-r--r--rust/kernel/time/hrtimer.rs24
-rw-r--r--rust/kernel/time/hrtimer/arc.rs2
-rw-r--r--rust/kernel/time/hrtimer/pin.rs2
-rw-r--r--rust/kernel/time/hrtimer/pin_mut.rs4
-rw-r--r--rust/kernel/time/hrtimer/tbox.rs2
-rw-r--r--rust/kernel/types.rs54
-rw-r--r--rust/kernel/uaccess.rs6
-rw-r--r--rust/kernel/workqueue.rs50
-rw-r--r--rust/kernel/xarray.rs275
-rw-r--r--rust/macros/helpers.rs17
-rw-r--r--rust/macros/kunit.rs67
-rw-r--r--rust/macros/lib.rs15
-rw-r--r--rust/macros/module.rs66
-rw-r--r--rust/macros/paste.rs2
-rw-r--r--rust/pin-init/README.md14
-rw-r--r--rust/pin-init/examples/linked_list.rs1
-rw-r--r--rust/pin-init/examples/mutex.rs1
-rw-r--r--rust/pin-init/examples/pthread_mutex.rs6
-rw-r--r--rust/pin-init/examples/static_init.rs1
-rw-r--r--rust/pin-init/internal/src/lib.rs6
-rw-r--r--rust/pin-init/internal/src/pinned_drop.rs3
-rw-r--r--rust/pin-init/internal/src/zeroable.rs27
-rw-r--r--rust/pin-init/src/alloc.rs8
-rw-r--r--rust/pin-init/src/lib.rs146
-rw-r--r--rust/pin-init/src/macros.rs91
-rw-r--r--rust/uapi/lib.rs1
-rw-r--r--rust/uapi/uapi_helper.h2
-rw-r--r--samples/Kconfig20
-rw-r--r--samples/Makefile2
-rw-r--r--samples/bpf/Makefile2
-rw-r--r--samples/bpf/sockex2_kern.c1
-rw-r--r--samples/damon/Kconfig13
-rw-r--r--samples/damon/Makefile1
-rw-r--r--samples/damon/mtier.c178
-rw-r--r--samples/damon/prcl.c2
-rw-r--r--samples/ftrace/sample-trace-array.c2
-rw-r--r--samples/hung_task/Makefile2
-rw-r--r--samples/hung_task/hung_task_mutex.c66
-rw-r--r--samples/hung_task/hung_task_tests.c97
-rw-r--r--samples/livepatch/livepatch-callbacks-busymod.c1
-rw-r--r--samples/livepatch/livepatch-callbacks-demo.c1
-rw-r--r--samples/livepatch/livepatch-callbacks-mod.c1
-rw-r--r--samples/livepatch/livepatch-sample.c1
-rw-r--r--samples/livepatch/livepatch-shadow-fix1.c1
-rw-r--r--samples/livepatch/livepatch-shadow-fix2.c1
-rw-r--r--samples/rust/Kconfig23
-rw-r--r--samples/rust/Makefile2
-rw-r--r--samples/rust/rust_configfs.rs192
-rw-r--r--samples/rust/rust_driver_auxiliary.rs120
-rw-r--r--samples/rust/rust_driver_pci.rs5
-rw-r--r--samples/tsm-mr/Makefile2
-rw-r--r--samples/tsm-mr/tsm_mr_sample.c131
-rw-r--r--scripts/Makefile.btf2
-rw-r--r--scripts/Makefile.build11
-rw-r--r--scripts/Makefile.compiler10
-rw-r--r--scripts/Makefile.extrawarn56
-rw-r--r--scripts/Makefile.gcc-plugins24
-rw-r--r--scripts/Makefile.kcov9
-rw-r--r--scripts/Makefile.lib21
-rw-r--r--scripts/Makefile.ubsan6
-rw-r--r--scripts/Makefile.vmlinux6
-rw-r--r--scripts/Makefile.vmlinux_o4
-rw-r--r--scripts/basic/Makefile5
-rwxr-xr-xscripts/bpf_doc.py119
-rwxr-xr-xscripts/checkpatch.pl41
-rwxr-xr-xscripts/find-unused-docs.sh2
-rw-r--r--scripts/gcc-plugins/Kconfig14
-rw-r--r--scripts/gcc-plugins/Makefile4
-rw-r--r--scripts/gcc-plugins/arm_ssp_per_task_plugin.c107
-rw-r--r--scripts/gcc-plugins/gcc-common.h77
-rw-r--r--scripts/gcc-plugins/randomize_layout_plugin.c40
-rw-r--r--scripts/gcc-plugins/sancov_plugin.c134
-rw-r--r--scripts/gcc-plugins/structleak_plugin.c257
-rw-r--r--scripts/gdb/linux/cpus.py4
-rw-r--r--scripts/gdb/linux/pgtable.py4
-rw-r--r--scripts/gdb/linux/symbols.py38
-rw-r--r--scripts/gdb/linux/utils.py22
-rwxr-xr-xscripts/generate_rust_analyzer.py25
-rw-r--r--scripts/generate_rust_target.rs4
-rw-r--r--scripts/genksyms/keywords.c7
-rw-r--r--scripts/genksyms/parse.y5
l---------[-rwxr-xr-x]scripts/kernel-doc2440
-rwxr-xr-xscripts/kernel-doc.pl2439
-rwxr-xr-xscripts/kernel-doc.py315
-rw-r--r--scripts/lib/kdoc/kdoc_files.py291
-rw-r--r--scripts/lib/kdoc/kdoc_output.py793
-rw-r--r--scripts/lib/kdoc/kdoc_parser.py1745
-rw-r--r--scripts/lib/kdoc/kdoc_re.py273
-rwxr-xr-xscripts/min-tool-version.sh6
-rw-r--r--scripts/package/kernel.spec1
-rwxr-xr-xscripts/package/mkdebian2
-rw-r--r--scripts/rustdoc_test_builder.rs8
-rw-r--r--scripts/rustdoc_test_gen.rs16
-rw-r--r--scripts/spelling.txt2
-rw-r--r--security/Kconfig.hardening76
-rw-r--r--security/apparmor/apparmorfs.c4
-rw-r--r--security/inode.c2
-rw-r--r--security/integrity/ima/Kconfig11
-rw-r--r--security/integrity/ima/ima.h6
-rw-r--r--security/integrity/ima/ima_kexec.c196
-rw-r--r--security/integrity/ima/ima_main.c4
-rw-r--r--security/integrity/ima/ima_queue.c5
-rw-r--r--security/ipe/audit.c19
-rw-r--r--security/ipe/fs.c25
-rw-r--r--security/ipe/policy.c17
-rw-r--r--security/ipe/policy_fs.c28
-rw-r--r--security/landlock/audit.c4
-rw-r--r--security/landlock/domain.c4
-rw-r--r--security/landlock/domain.h2
-rw-r--r--security/landlock/id.c33
-rw-r--r--security/landlock/syscalls.c30
-rw-r--r--security/lsm_audit.c19
-rw-r--r--security/security.c36
-rw-r--r--security/selinux/hooks.c266
-rw-r--r--security/selinux/ibpkey.c13
-rw-r--r--security/selinux/include/classmap.h2
-rw-r--r--security/selinux/include/netnode.h2
-rw-r--r--security/selinux/include/objsec.h16
-rw-r--r--security/selinux/include/policycap.h1
-rw-r--r--security/selinux/include/policycap_names.h1
-rw-r--r--security/selinux/include/security.h2
-rw-r--r--security/selinux/netif.c6
-rw-r--r--security/selinux/netnode.c15
-rw-r--r--security/selinux/netport.c14
-rw-r--r--security/selinux/nlmsgtab.c1
-rw-r--r--security/selinux/selinuxfs.c4
-rw-r--r--security/selinux/ss/services.c22
-rw-r--r--security/smack/smack_lsm.c9
-rw-r--r--security/smack/smackfs.c12
-rw-r--r--sound/atmel/ac97c.c9
-rw-r--r--sound/core/control.c14
-rw-r--r--sound/core/device.c23
-rw-r--r--sound/core/jack.c19
-rw-r--r--sound/core/oss/pcm_oss.c3
-rw-r--r--sound/core/pcm_dmaengine.c21
-rw-r--r--sound/core/pcm_memory.c2
-rw-r--r--sound/core/pcm_misc.c30
-rw-r--r--sound/core/pcm_native.c11
-rw-r--r--sound/core/seq/seq_clientmgr.c52
-rw-r--r--sound/core/seq/seq_queue.c16
-rw-r--r--sound/core/seq/seq_queue.h1
-rw-r--r--sound/core/seq/seq_ump_convert.c18
-rw-r--r--sound/core/seq/seq_ump_convert.h1
-rw-r--r--sound/core/seq_device.c2
-rw-r--r--sound/hda/ext/hdac_ext_controller.c19
-rw-r--r--sound/hda/hda_bus_type.c6
-rw-r--r--sound/hda/hdac_device.c2
-rw-r--r--sound/hda/hdac_stream.c19
-rw-r--r--sound/hda/intel-dsp-config.c10
-rw-r--r--sound/hda/intel-nhlt.c19
-rw-r--r--sound/hda/intel-sdw-acpi.c2
-rw-r--r--sound/i2c/other/pt2258.c8
-rw-r--r--sound/isa/azt2320.c4
-rw-r--r--sound/isa/gus/gus_io.c229
-rw-r--r--sound/isa/gus/gus_main.c14
-rw-r--r--sound/isa/gus/gus_reset.c16
-rw-r--r--sound/isa/msnd/Makefile2
-rw-r--r--sound/isa/msnd/msnd.h4
-rw-r--r--sound/isa/msnd/msnd_midi.c163
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c5
-rw-r--r--sound/isa/sb/emu8000.c3
-rw-r--r--sound/isa/sb/jazz16.c5
-rw-r--r--sound/isa/sb/sb16.c5
-rw-r--r--sound/isa/sb/sb8.c5
-rw-r--r--sound/isa/sb/sb8_midi.c3
-rw-r--r--sound/isa/sb/sb_mixer.c5
-rw-r--r--sound/mips/snd-n64.c9
-rw-r--r--sound/pci/ad1889.c7
-rw-r--r--sound/pci/ali5451/ali5451.c6
-rw-r--r--sound/pci/als300.c2
-rw-r--r--sound/pci/als4000.c2
-rw-r--r--sound/pci/asihpi/asihpi.c9
-rw-r--r--sound/pci/atiixp.c7
-rw-r--r--sound/pci/atiixp_modem.c7
-rw-r--r--sound/pci/au88x0/au88x0.c7
-rw-r--r--sound/pci/au88x0/au88x0_a3d.c10
-rw-r--r--sound/pci/aw2/aw2-alsa.c7
-rw-r--r--sound/pci/azt3328.c2
-rw-r--r--sound/pci/bt87x.c7
-rw-r--r--sound/pci/ca0106/ca0106_main.c2
-rw-r--r--sound/pci/cmipci.c2
-rw-r--r--sound/pci/cs4281.c13
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c2
-rw-r--r--sound/pci/cs46xx/dsp_spos_scb_lib.c7
-rw-r--r--sound/pci/cs5530.c7
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c2
-rw-r--r--sound/pci/ctxfi/cttimer.c2
-rw-r--r--sound/pci/echoaudio/echoaudio.c2
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c2
-rw-r--r--sound/pci/emu10k1/emu10k1x.c2
-rw-r--r--sound/pci/ens1370.c2
-rw-r--r--sound/pci/es1938.c2
-rw-r--r--sound/pci/es1968.c8
-rw-r--r--sound/pci/fm801.c2
-rw-r--r--sound/pci/hda/Kconfig37
-rw-r--r--sound/pci/hda/Makefile8
-rw-r--r--sound/pci/hda/cirrus_scodec_test.c117
-rw-r--r--sound/pci/hda/cs35l41_hda.c51
-rw-r--r--sound/pci/hda/cs35l41_hda_property.c6
-rw-r--r--sound/pci/hda/cs35l56_hda.c38
-rw-r--r--sound/pci/hda/cs35l56_hda_i2c.c3
-rw-r--r--sound/pci/hda/cs35l56_hda_spi.c3
-rw-r--r--sound/pci/hda/hda_acpi.c325
-rw-r--r--sound/pci/hda/hda_bind.c4
-rw-r--r--sound/pci/hda/hda_codec.c31
-rw-r--r--sound/pci/hda/hda_cs_dsp_ctl.c249
-rw-r--r--sound/pci/hda/hda_cs_dsp_ctl.h39
-rw-r--r--sound/pci/hda/hda_intel.c32
-rw-r--r--sound/pci/hda/hda_local.h2
-rw-r--r--sound/pci/hda/hda_tegra.c55
-rw-r--r--sound/pci/hda/patch_hdmi.c13
-rw-r--r--sound/pci/hda/patch_realtek.c133
-rw-r--r--sound/pci/hda/tas2781-spi.h157
-rw-r--r--sound/pci/hda/tas2781_hda.c377
-rw-r--r--sound/pci/hda/tas2781_hda.h90
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c498
-rw-r--r--sound/pci/hda/tas2781_hda_spi.c893
-rw-r--r--sound/pci/hda/tas2781_spi_fwlib.c2006
-rw-r--r--sound/pci/ice1712/ice1712.c2
-rw-r--r--sound/pci/ice1712/ice1724.c2
-rw-r--r--sound/pci/intel8x0.c2
-rw-r--r--sound/pci/intel8x0m.c2
-rw-r--r--sound/pci/korg1212/korg1212.c75
-rw-r--r--sound/pci/lola/lola.c16
-rw-r--r--sound/pci/lx6464es/lx6464es.c2
-rw-r--r--sound/pci/maestro3.c2
-rw-r--r--sound/pci/nm256/nm256.c2
-rw-r--r--sound/pci/oxygen/oxygen_lib.c2
-rw-r--r--sound/pci/riptide/riptide.c2
-rw-r--r--sound/pci/rme32.c2
-rw-r--r--sound/pci/rme96.c2
-rw-r--r--sound/pci/rme9652/hdsp.c2
-rw-r--r--sound/pci/rme9652/hdspm.c7
-rw-r--r--sound/pci/rme9652/rme9652.c2
-rw-r--r--sound/pci/sis7019.c2
-rw-r--r--sound/pci/sonicvibes.c2
-rw-r--r--sound/pci/trident/trident_main.c2
-rw-r--r--sound/pci/via82xx.c2
-rw-r--r--sound/pci/via82xx_modem.c2
-rw-r--r--sound/pci/vx222/vx222.c2
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c2
-rw-r--r--sound/sh/Kconfig2
-rw-r--r--sound/sh/aica.c8
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c8
-rw-r--r--sound/soc/amd/acp-es8336.c4
-rw-r--r--sound/soc/amd/acp-rt5645.c6
-rw-r--r--sound/soc/amd/acp/acp-i2s.c2
-rw-r--r--sound/soc/amd/acp/acp-legacy-common.c2
-rw-r--r--sound/soc/amd/acp/acp-rembrandt.c4
-rw-r--r--sound/soc/amd/acp/acp-renoir.c2
-rw-r--r--sound/soc/amd/acp/acp-sdw-legacy-mach.c4
-rw-r--r--sound/soc/amd/acp/acp-sdw-sof-mach.c10
-rw-r--r--sound/soc/amd/acp/acp-sof-mach.c2
-rw-r--r--sound/soc/amd/acp/acp63.c4
-rw-r--r--sound/soc/amd/acp/acp70.c4
-rw-r--r--sound/soc/amd/ps/pci-ps.c5
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c9
-rw-r--r--sound/soc/apple/mca.c23
-rw-r--r--sound/soc/codecs/Kconfig52
-rw-r--r--sound/soc/codecs/Makefile14
-rw-r--r--sound/soc/codecs/ac97.c10
-rw-r--r--sound/soc/codecs/adau7118.c6
-rw-r--r--sound/soc/codecs/ak4458.c10
-rw-r--r--sound/soc/codecs/ak5386.c28
-rw-r--r--sound/soc/codecs/aw88081.c10
-rw-r--r--sound/soc/codecs/aw88166.c7
-rw-r--r--sound/soc/codecs/aw88395/aw88395_device.c7
-rw-r--r--sound/soc/codecs/aw88399.c7
-rw-r--r--sound/soc/codecs/cs-amp-lib-test.c70
-rw-r--r--sound/soc/codecs/cs-amp-lib.c3
-rw-r--r--sound/soc/codecs/cs35l56-i2c.c23
-rw-r--r--sound/soc/codecs/cs35l56-sdw.c91
-rw-r--r--sound/soc/codecs/cs35l56-shared.c257
-rw-r--r--sound/soc/codecs/cs35l56-spi.c3
-rw-r--r--sound/soc/codecs/cs35l56.c48
-rw-r--r--sound/soc/codecs/cs35l56.h1
-rw-r--r--sound/soc/codecs/cs42l43-jack.c10
-rw-r--r--sound/soc/codecs/cs42l52.c112
-rw-r--r--sound/soc/codecs/cs42l56.c90
-rw-r--r--sound/soc/codecs/cs42l73.c70
-rw-r--r--sound/soc/codecs/cs48l32-tables.c540
-rw-r--r--sound/soc/codecs/cs48l32.c4073
-rw-r--r--sound/soc/codecs/cs48l32.h403
-rw-r--r--sound/soc/codecs/es8375.c793
-rw-r--r--sound/soc/codecs/es8375.h123
-rw-r--r--sound/soc/codecs/es8389.c962
-rw-r--r--sound/soc/codecs/es8389.h140
-rw-r--r--sound/soc/codecs/hdmi-codec.c23
-rw-r--r--sound/soc/codecs/idt821034.c17
-rw-r--r--sound/soc/codecs/lpass-wsa-macro.c117
-rw-r--r--sound/soc/codecs/pcm6240.c3
-rw-r--r--sound/soc/codecs/peb2466.c15
-rw-r--r--sound/soc/codecs/rt5665.c96
-rw-r--r--sound/soc/codecs/rt5665.h3
-rw-r--r--sound/soc/codecs/rt5668.c43
-rw-r--r--sound/soc/codecs/rt5668.h3
-rw-r--r--sound/soc/codecs/rt5677-spi.c4
-rw-r--r--sound/soc/codecs/rt5677.c7
-rw-r--r--sound/soc/codecs/rt712-sdca-dmic.c8
-rw-r--r--sound/soc/codecs/rt712-sdca.c8
-rw-r--r--sound/soc/codecs/rt722-sdca-sdw.c20
-rw-r--r--sound/soc/codecs/rt722-sdca-sdw.h1
-rw-r--r--sound/soc/codecs/rt722-sdca.c322
-rw-r--r--sound/soc/codecs/rt722-sdca.h6
-rw-r--r--sound/soc/codecs/rt9123.c503
-rw-r--r--sound/soc/codecs/rt9123p.c171
-rw-r--r--sound/soc/codecs/sma1307.c1
-rw-r--r--sound/soc/codecs/sta32x.c6
-rw-r--r--sound/soc/codecs/tas2764-quirks.h180
-rw-r--r--sound/soc/codecs/tas2764.c139
-rw-r--r--sound/soc/codecs/tas2764.h3
-rw-r--r--sound/soc/codecs/tas2770.c151
-rw-r--r--sound/soc/codecs/tas2770.h6
-rw-r--r--sound/soc/codecs/tas2781-comlib-i2c.c369
-rw-r--r--sound/soc/codecs/tas2781-comlib.c397
-rw-r--r--sound/soc/codecs/tas2781-fmwlib.c25
-rw-r--r--sound/soc/codecs/tas2781-i2c.c158
-rw-r--r--sound/soc/codecs/tlv320adc3xxx.c8
-rw-r--r--sound/soc/codecs/tlv320aic3x.c6
-rw-r--r--sound/soc/codecs/tpa6130a2.c54
-rw-r--r--sound/soc/codecs/twl4030.c79
-rw-r--r--sound/soc/codecs/wcd-mbhc-v2.c2
-rw-r--r--sound/soc/codecs/wcd-mbhc-v2.h2
-rw-r--r--sound/soc/codecs/wcd9335.c15
-rw-r--r--sound/soc/codecs/wcd937x.c2
-rw-r--r--sound/soc/codecs/wcd938x-sdw.c2
-rw-r--r--sound/soc/codecs/wcd938x.c69
-rw-r--r--sound/soc/codecs/wcd939x.c18
-rw-r--r--sound/soc/codecs/wm5100.c10
-rw-r--r--sound/soc/codecs/wm8903.c12
-rw-r--r--sound/soc/codecs/wm8962.c11
-rw-r--r--sound/soc/codecs/wm8996.c10
-rw-r--r--sound/soc/codecs/wm_adsp.c28
-rw-r--r--sound/soc/codecs/wsa883x.c2
-rw-r--r--sound/soc/codecs/wsa884x.c2
-rw-r--r--sound/soc/codecs/zl38060.c12
-rw-r--r--sound/soc/dwc/dwc-i2s.c13
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c15
-rw-r--r--sound/soc/fsl/fsl_qmc_audio.c3
-rw-r--r--sound/soc/fsl/fsl_rpmsg.c28
-rw-r--r--sound/soc/fsl/fsl_rpmsg.h2
-rw-r--r--sound/soc/fsl/fsl_sai.c90
-rw-r--r--sound/soc/fsl/fsl_sai.h6
-rw-r--r--sound/soc/fsl/fsl_xcvr.c2
-rw-r--r--sound/soc/fsl/imx-card.c9
-rw-r--r--sound/soc/fsl/imx-pcm-rpmsg.c21
-rw-r--r--sound/soc/generic/simple-card-utils.c27
-rw-r--r--sound/soc/generic/test-component.c11
-rw-r--r--sound/soc/intel/atom/sst/sst.h6
-rw-r--r--sound/soc/intel/atom/sst/sst_drv_interface.c24
-rw-r--r--sound/soc/intel/atom/sst/sst_pci.c59
-rw-r--r--sound/soc/intel/atom/sst/sst_pvt.c33
-rw-r--r--sound/soc/intel/avs/Makefile6
-rw-r--r--sound/soc/intel/avs/avs.h13
-rw-r--r--sound/soc/intel/avs/board_selection.c181
-rw-r--r--sound/soc/intel/avs/boards/Kconfig8
-rw-r--r--sound/soc/intel/avs/boards/da7219.c11
-rw-r--r--sound/soc/intel/avs/boards/dmic.c12
-rw-r--r--sound/soc/intel/avs/boards/es8336.c11
-rw-r--r--sound/soc/intel/avs/boards/hdaudio.c25
-rw-r--r--sound/soc/intel/avs/boards/i2s_test.c15
-rw-r--r--sound/soc/intel/avs/boards/max98357a.c11
-rw-r--r--sound/soc/intel/avs/boards/max98373.c11
-rw-r--r--sound/soc/intel/avs/boards/max98927.c11
-rw-r--r--sound/soc/intel/avs/boards/nau8825.c11
-rw-r--r--sound/soc/intel/avs/boards/pcm3168a.c16
-rw-r--r--sound/soc/intel/avs/boards/probe.c5
-rw-r--r--sound/soc/intel/avs/boards/rt274.c11
-rw-r--r--sound/soc/intel/avs/boards/rt286.c11
-rw-r--r--sound/soc/intel/avs/boards/rt298.c11
-rw-r--r--sound/soc/intel/avs/boards/rt5514.c11
-rw-r--r--sound/soc/intel/avs/boards/rt5663.c11
-rw-r--r--sound/soc/intel/avs/boards/rt5682.c11
-rw-r--r--sound/soc/intel/avs/boards/ssm4567.c11
-rw-r--r--sound/soc/intel/avs/core.c68
-rw-r--r--sound/soc/intel/avs/dsp.c2
-rw-r--r--sound/soc/intel/avs/lnl.c27
-rw-r--r--sound/soc/intel/avs/loader.c11
-rw-r--r--sound/soc/intel/avs/messages.h34
-rw-r--r--sound/soc/intel/avs/mtl.c200
-rw-r--r--sound/soc/intel/avs/path.c125
-rw-r--r--sound/soc/intel/avs/path.h5
-rw-r--r--sound/soc/intel/avs/pcm.c208
-rw-r--r--sound/soc/intel/avs/probes.c2
-rw-r--r--sound/soc/intel/avs/ptl.c98
-rw-r--r--sound/soc/intel/avs/registers.h40
-rw-r--r--sound/soc/intel/avs/tgl.c2
-rw-r--r--sound/soc/intel/avs/topology.c4
-rw-r--r--sound/soc/intel/avs/topology.h2
-rw-r--r--sound/soc/intel/avs/utils.h16
-rw-r--r--sound/soc/intel/boards/Kconfig8
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c13
-rw-r--r--sound/soc/intel/boards/sof_sdw.c49
-rw-r--r--sound/soc/intel/catpt/dsp.c2
-rw-r--r--sound/soc/intel/common/Makefile2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-arl-match.c9
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-lnl-match.c15
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-mtl-match.c49
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-ptl-match.c127
-rw-r--r--sound/soc/intel/common/sof-function-topology-lib.c135
-rw-r--r--sound/soc/intel/common/sof-function-topology-lib.h15
-rw-r--r--sound/soc/loongson/Kconfig10
-rw-r--r--sound/soc/loongson/Makefile2
-rw-r--r--sound/soc/loongson/loongson1_ac97.c398
-rw-r--r--sound/soc/loongson/loongson_i2s_pci.c13
-rw-r--r--sound/soc/mediatek/Kconfig1
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-pcm.c571
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-mt6359.c6
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-mt6359.c45
-rw-r--r--sound/soc/meson/meson-card-utils.c2
-rw-r--r--sound/soc/qcom/lpass.h3
-rw-r--r--sound/soc/qcom/qdsp6/q6apm-lpass-dais.c2
-rw-r--r--sound/soc/qcom/sc8280xp.c2
-rw-r--r--sound/soc/qcom/sdm845.c4
-rw-r--r--sound/soc/renesas/Kconfig7
-rw-r--r--sound/soc/renesas/rcar/Makefile3
-rw-r--r--sound/soc/renesas/rcar/adg.c32
-rw-r--r--sound/soc/renesas/rcar/core.c9
-rw-r--r--sound/soc/renesas/rcar/msiof.c566
-rw-r--r--sound/soc/renesas/rz-ssi.c2
-rw-r--r--sound/soc/rockchip/Kconfig10
-rw-r--r--sound/soc/rockchip/Makefile2
-rw-r--r--sound/soc/rockchip/rockchip_sai.c1555
-rw-r--r--sound/soc/rockchip/rockchip_sai.h251
-rw-r--r--sound/soc/sdca/Makefile2
-rw-r--r--sound/soc/sdca/sdca_asoc.c1311
-rw-r--r--sound/soc/sdca/sdca_functions.c10
-rw-r--r--sound/soc/sdca/sdca_regmap.c3
-rw-r--r--sound/soc/sdw_utils/soc_sdw_bridge_cs35l56.c4
-rw-r--r--sound/soc/sdw_utils/soc_sdw_cs42l43.c10
-rw-r--r--sound/soc/sdw_utils/soc_sdw_cs_amp.c24
-rw-r--r--sound/soc/sdw_utils/soc_sdw_rt_amp.c2
-rw-r--r--sound/soc/sdw_utils/soc_sdw_rt_dmic.c2
-rw-r--r--sound/soc/sdw_utils/soc_sdw_utils.c202
-rw-r--r--sound/soc/soc-ac97.c15
-rw-r--r--sound/soc/soc-core.c130
-rw-r--r--sound/soc/soc-dapm.c84
-rw-r--r--sound/soc/soc-devres.c7
-rw-r--r--sound/soc/soc-pcm.c5
-rw-r--r--sound/soc/soc-utils.c13
-rw-r--r--sound/soc/sof/amd/Kconfig7
-rw-r--r--sound/soc/sof/amd/acp-dsp-offset.h10
-rw-r--r--sound/soc/sof/amd/acp.c135
-rw-r--r--sound/soc/sof/amd/acp.h7
-rw-r--r--sound/soc/sof/amd/pci-acp70.c10
-rw-r--r--sound/soc/sof/core.c10
-rw-r--r--sound/soc/sof/imx/imx8.c24
-rw-r--r--sound/soc/sof/intel/hda-bus.c2
-rw-r--r--sound/soc/sof/intel/hda.c16
-rw-r--r--sound/soc/sof/intel/hda.h1
-rw-r--r--sound/soc/sof/intel/pci-ptl.c30
-rw-r--r--sound/soc/sof/intel/ptl.c23
-rw-r--r--sound/soc/sof/ipc4-control.c11
-rw-r--r--sound/soc/sof/ipc4-pcm.c6
-rw-r--r--sound/soc/sof/sof-pci-dev.c16
-rw-r--r--sound/soc/sof/topology.c115
-rw-r--r--sound/soc/starfive/jh7110_tdm.c13
-rw-r--r--sound/soc/stm/stm32_sai.c18
-rw-r--r--sound/soc/stm/stm32_sai_sub.c16
-rw-r--r--sound/soc/sunxi/sun8i-codec.c13
-rw-r--r--sound/soc/tegra/tegra186_asrc.c18
-rw-r--r--sound/soc/tegra/tegra186_asrc.h12
-rw-r--r--sound/soc/tegra/tegra210_admaif.c223
-rw-r--r--sound/soc/tegra/tegra210_admaif.h78
-rw-r--r--sound/soc/tegra/tegra210_adx.c229
-rw-r--r--sound/soc/tegra/tegra210_adx.h36
-rw-r--r--sound/soc/tegra/tegra210_ahub.c850
-rw-r--r--sound/soc/tegra/tegra210_ahub.h52
-rw-r--r--sound/soc/tegra/tegra210_amx.c229
-rw-r--r--sound/soc/tegra/tegra210_amx.h34
-rw-r--r--sound/soc/tegra/tegra210_i2s.c231
-rw-r--r--sound/soc/tegra/tegra210_i2s.h51
-rw-r--r--sound/soc/tegra/tegra_audio_graph_card.c14
-rw-r--r--sound/soc/tegra/tegra_cif.h30
-rw-r--r--sound/soc/tegra/tegra_isomgr_bw.c7
-rw-r--r--sound/soc/ti/davinci-mcasp.c8
-rw-r--r--sound/sparc/amd7930.c9
-rw-r--r--sound/sparc/dbri.c9
-rw-r--r--sound/usb/endpoint.c7
-rw-r--r--sound/usb/fcp.c3
-rw-r--r--sound/usb/format.c3
-rw-r--r--sound/usb/midi.c21
-rw-r--r--sound/usb/mixer.c22
-rw-r--r--sound/usb/mixer_quirks.c154
-rw-r--r--sound/usb/mixer_scarlett2.c3
-rw-r--r--sound/usb/mixer_us16x08.c32
-rw-r--r--sound/usb/quirks-table.h51
-rw-r--r--sound/usb/quirks.c5
-rw-r--r--sound/virtio/virtio_kctl.c8
-rw-r--r--sound/x86/intel_hdmi_audio.c7
-rw-r--r--tools/Makefile16
-rw-r--r--tools/arch/arm64/include/asm/cputype.h2
-rw-r--r--tools/arch/arm64/include/asm/sysreg.h65
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h5
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h24
-rw-r--r--tools/arch/x86/include/asm/amd/ibs.h (renamed from tools/arch/x86/include/asm/amd-ibs.h)2
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h52
-rw-r--r--tools/arch/x86/include/asm/inat.h6
-rw-r--r--tools/arch/x86/include/asm/msr-index.h39
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h5
-rw-r--r--tools/arch/x86/include/uapi/asm/svm.h2
-rw-r--r--tools/arch/x86/kcpuid/cpuid.csv791
-rw-r--r--tools/arch/x86/kcpuid/kcpuid.c375
-rw-r--r--tools/arch/x86/lib/insn.c7
-rw-r--r--tools/arch/x86/lib/memset_64.S3
-rw-r--r--tools/arch/x86/lib/x86-opcode-map.txt60
-rw-r--r--tools/arch/x86/tools/gen-insn-attr-x86.awk7
-rw-r--r--tools/bootconfig/Makefile4
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst10
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool4
-rw-r--r--tools/bpf/bpftool/cgroup.c14
-rw-r--r--tools/bpf/bpftool/link.c3
-rw-r--r--tools/bpf/bpftool/prog.c12
-rw-r--r--tools/build/Makefile.feature4
-rw-r--r--tools/hv/hv_kvp_daemon.c172
-rw-r--r--tools/include/linux/bits.h5
-rw-r--r--tools/include/linux/cfi_types.h45
-rw-r--r--tools/include/linux/compiler.h22
-rw-r--r--tools/include/nolibc/Makefile34
-rw-r--r--tools/include/nolibc/arch-aarch64.h1
-rw-r--r--tools/include/nolibc/arch-arm.h2
-rw-r--r--tools/include/nolibc/arch-i386.h2
-rw-r--r--tools/include/nolibc/arch-loongarch.h7
-rw-r--r--tools/include/nolibc/arch-m68k.h141
-rw-r--r--tools/include/nolibc/arch-powerpc.h2
-rw-r--r--tools/include/nolibc/arch-riscv.h1
-rw-r--r--tools/include/nolibc/arch-sparc.h191
-rw-r--r--tools/include/nolibc/arch-x86_64.h1
-rw-r--r--tools/include/nolibc/arch.h4
-rw-r--r--tools/include/nolibc/compiler.h9
-rw-r--r--tools/include/nolibc/crt.h5
-rw-r--r--tools/include/nolibc/ctype.h6
-rw-r--r--tools/include/nolibc/dirent.h10
-rw-r--r--tools/include/nolibc/elf.h15
-rw-r--r--tools/include/nolibc/errno.h6
-rw-r--r--tools/include/nolibc/fcntl.h69
-rw-r--r--tools/include/nolibc/getopt.h101
-rw-r--r--tools/include/nolibc/math.h31
-rw-r--r--tools/include/nolibc/nolibc.h21
-rw-r--r--tools/include/nolibc/poll.h55
-rw-r--r--tools/include/nolibc/sched.h50
-rw-r--r--tools/include/nolibc/signal.h6
-rw-r--r--tools/include/nolibc/std.h6
-rw-r--r--tools/include/nolibc/stddef.h24
-rw-r--r--tools/include/nolibc/stdint.h4
-rw-r--r--tools/include/nolibc/stdio.h167
-rw-r--r--tools/include/nolibc/stdlib.h54
-rw-r--r--tools/include/nolibc/string.h40
-rw-r--r--tools/include/nolibc/sys.h423
-rw-r--r--tools/include/nolibc/sys/auxv.h41
-rw-r--r--tools/include/nolibc/sys/ioctl.h29
-rw-r--r--tools/include/nolibc/sys/mman.h82
-rw-r--r--tools/include/nolibc/sys/mount.h37
-rw-r--r--tools/include/nolibc/sys/prctl.h36
-rw-r--r--tools/include/nolibc/sys/random.h34
-rw-r--r--tools/include/nolibc/sys/reboot.h34
-rw-r--r--tools/include/nolibc/sys/resource.h53
-rw-r--r--tools/include/nolibc/sys/stat.h94
-rw-r--r--tools/include/nolibc/sys/syscall.h19
-rw-r--r--tools/include/nolibc/sys/sysmacros.h20
-rw-r--r--tools/include/nolibc/sys/time.h49
-rw-r--r--tools/include/nolibc/sys/timerfd.h87
-rw-r--r--tools/include/nolibc/sys/types.h7
-rw-r--r--tools/include/nolibc/sys/utsname.h42
-rw-r--r--tools/include/nolibc/sys/wait.h116
-rw-r--r--tools/include/nolibc/time.h189
-rw-r--r--tools/include/nolibc/types.h32
-rw-r--r--tools/include/nolibc/unistd.h40
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h1
-rw-r--r--tools/include/uapi/asm-generic/socket.h2
-rw-r--r--tools/include/uapi/asm-generic/unistd.h4
-rw-r--r--tools/include/uapi/linux/bits.h8
-rw-r--r--tools/include/uapi/linux/bpf.h22
-rw-r--r--tools/include/uapi/linux/fanotify.h274
-rw-r--r--tools/include/uapi/linux/fs.h19
-rw-r--r--tools/include/uapi/linux/if_xdp.h6
-rw-r--r--tools/include/uapi/linux/in.h2
-rw-r--r--tools/include/uapi/linux/kvm.h9
-rw-r--r--tools/include/uapi/linux/mount.h235
-rw-r--r--tools/include/uapi/linux/netdev.h1
-rw-r--r--tools/include/uapi/linux/nsfs.h45
-rw-r--r--tools/include/uapi/linux/perf_event.h655
-rw-r--r--tools/include/uapi/linux/prctl.h45
-rw-r--r--tools/include/uapi/linux/stat.h99
-rw-r--r--tools/include/vdso/unaligned.h12
-rw-r--r--tools/lib/bpf/bpf_core_read.h6
-rw-r--r--tools/lib/bpf/bpf_helpers.h8
-rw-r--r--tools/lib/bpf/btf.c226
-rw-r--r--tools/lib/bpf/libbpf.c87
-rw-r--r--tools/lib/bpf/libbpf.h11
-rw-r--r--tools/lib/bpf/libbpf.map4
-rw-r--r--tools/lib/bpf/libbpf_internal.h9
-rw-r--r--tools/lib/bpf/linker.c6
-rw-r--r--tools/lib/bpf/netlink.c20
-rw-r--r--tools/lib/bpf/nlattr.c15
-rw-r--r--tools/lib/perf/Documentation/libperf.txt1
-rw-r--r--tools/lib/perf/Makefile14
-rw-r--r--tools/lib/perf/cpumap.c10
-rw-r--r--tools/lib/perf/include/perf/cpumap.h2
-rw-r--r--tools/lib/perf/include/perf/event.h12
-rw-r--r--tools/lib/perf/include/perf/threadmap.h1
-rw-r--r--tools/lib/perf/threadmap.c17
-rw-r--r--tools/memory-model/Documentation/README7
-rw-r--r--tools/memory-model/Documentation/explanation.txt2
-rw-r--r--tools/memory-model/Documentation/locking.txt5
-rw-r--r--tools/memory-model/Documentation/ordering.txt22
-rw-r--r--tools/memory-model/Documentation/recipes.txt4
-rw-r--r--tools/memory-model/Documentation/references.txt3
-rw-r--r--tools/memory-model/Documentation/simple.txt4
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_array.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_array.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_array.j22
-rw-r--r--tools/net/ynl/Makefile.deps17
-rw-r--r--tools/net/ynl/generated/Makefile7
-rw-r--r--tools/net/ynl/lib/ynl-priv.h19
-rw-r--r--tools/net/ynl/lib/ynl.c162
-rw-r--r--tools/net/ynl/lib/ynl.h18
-rwxr-xr-xtools/net/ynl/pyynl/cli.py15
-rwxr-xr-xtools/net/ynl/pyynl/ethtool.py22
-rw-r--r--tools/net/ynl/pyynl/lib/__init__.py5
-rw-r--r--tools/net/ynl/pyynl/lib/ynl.py39
-rwxr-xr-xtools/net/ynl/pyynl/ynl_gen_c.py908
-rwxr-xr-xtools/net/ynl/pyynl/ynl_gen_rst.py2
-rw-r--r--tools/net/ynl/samples/.gitignore6
-rw-r--r--tools/net/ynl/samples/devlink.c7
-rw-r--r--tools/net/ynl/samples/rt-addr.c80
-rw-r--r--tools/net/ynl/samples/rt-link.c184
-rw-r--r--tools/net/ynl/samples/rt-route.c80
-rw-r--r--tools/net/ynl/samples/tc.c80
-rw-r--r--tools/objtool/arch/x86/decode.c33
-rw-r--r--tools/objtool/arch/x86/special.c2
-rw-r--r--tools/objtool/check.c64
-rw-r--r--tools/objtool/elf.c38
-rw-r--r--tools/objtool/include/objtool/arch.h3
-rw-r--r--tools/objtool/include/objtool/elf.h1
-rw-r--r--tools/perf/Documentation/perf-amd-ibs.txt9
-rw-r--r--tools/perf/Documentation/perf-c2c.txt11
-rw-r--r--tools/perf/Documentation/perf-config.txt4
-rw-r--r--tools/perf/Documentation/perf-list.txt9
-rw-r--r--tools/perf/Documentation/perf-lock.txt15
-rw-r--r--tools/perf/Documentation/perf-mem.txt32
-rw-r--r--tools/perf/Documentation/perf-record.txt16
-rw-r--r--tools/perf/Documentation/perf-report.txt1
-rw-r--r--tools/perf/Documentation/perf-stat.txt7
-rw-r--r--tools/perf/Documentation/perf-trace.txt9
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt24
-rw-r--r--tools/perf/MANIFEST6
-rw-r--r--tools/perf/Makefile.config7
-rw-r--r--tools/perf/Makefile.perf5
-rw-r--r--tools/perf/arch/arm/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl1
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/sh/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/sparc/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_32.tbl3
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h1
-rw-r--r--tools/perf/arch/x86/tests/Build1
-rw-r--r--tools/perf/arch/x86/tests/amd-ibs-period.c1032
-rw-r--r--tools/perf/arch/x86/tests/arch-tests.c2
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c16
-rw-r--r--tools/perf/arch/x86/util/mem-events.c6
-rw-r--r--tools/perf/arch/x86/util/mem-events.h1
-rw-r--r--tools/perf/arch/x86/util/pmu.c288
-rw-r--r--tools/perf/arch/xtensa/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/bench/Build1
-rw-r--r--tools/perf/bench/evlist-open-close.c42
-rw-r--r--tools/perf/bench/futex-hash.c7
-rw-r--r--tools/perf/bench/futex-lock-pi.c5
-rw-r--r--tools/perf/bench/futex-requeue.c6
-rw-r--r--tools/perf/bench/futex-wake-parallel.c9
-rw-r--r--tools/perf/bench/futex-wake.c4
-rw-r--r--tools/perf/bench/futex.c67
-rw-r--r--tools/perf/bench/futex.h5
-rw-r--r--tools/perf/builtin-c2c.c3
-rw-r--r--tools/perf/builtin-check.c38
-rw-r--r--tools/perf/builtin-ftrace.c101
-rw-r--r--tools/perf/builtin-list.c12
-rw-r--r--tools/perf/builtin-lock.c79
-rw-r--r--tools/perf/builtin-record.c80
-rw-r--r--tools/perf/builtin-report.c21
-rw-r--r--tools/perf/builtin-script.c4
-rw-r--r--tools/perf/builtin-stat.c37
-rw-r--r--tools/perf/builtin-top.c9
-rw-r--r--tools/perf/builtin-trace.c78
-rw-r--r--tools/perf/builtin-version.c30
-rw-r--r--tools/perf/builtin.h9
-rwxr-xr-xtools/perf/check-headers.sh3
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json26
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json489
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/cache.json284
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/floating-point.json29
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/frontend.json78
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/memory.json82
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/other.json209
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/pipeline.json308
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json54
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/cache.json107
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/floating-point.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/memory.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/other.json102
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json55
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/virtual-memory.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/arl-metrics.json566
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/cache.json200
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/frontend.json39
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/memory.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/other.json197
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/pipeline.json230
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/other.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/pipeline.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json256
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json180
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json268
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/cache.json404
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json389
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/other.json404
-rw-r--r--tools/perf/pmu-events/arch/x86/clearwaterforest/cache.json35
-rw-r--r--tools/perf/pmu-events/arch/x86/clearwaterforest/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/clearwaterforest/other.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/clearwaterforest/pipeline.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/cache.json296
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/memory.json261
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/other.json404
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json31
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json284
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/emr-metrics.json475
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json78
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json231
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/other.json332
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json259
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/virtual-memory.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/counter.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/frontend.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/grr-metrics.json204
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/other.json29
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/pipeline.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json45
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json338
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/cache.json305
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/counter.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/floating-point.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/frontend.json105
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/gnr-metrics.json487
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/memory.json206
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/other.json243
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json261
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-cache.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-interconnect.json90
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-memory.json240
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/virtual-memory.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json206
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json222
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/cache.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json385
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/memory.json160
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/other.json220
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/cache.json273
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json399
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/memory.json190
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/other.json463
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json76
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json80
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/frontend.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/other.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/cache.json182
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/frontend.json39
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/lnl-metrics.json560
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/memory.json75
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/other.json358
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json253
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv14
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/cache.json352
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/frontend.json82
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/memory.json90
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/mtl-metrics.json553
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/other.json149
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json252
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/cache.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/memory.json160
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/other.json220
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json385
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/frontend.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/other.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json343
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json78
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json231
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/other.json382
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json259
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json469
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/cache.json25
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/memory.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/other.json49
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json204
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json240
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json367
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/cache.json74
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/other.json74
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json385
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/cache.json296
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/memory.json261
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/other.json404
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json31
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json383
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json8
-rw-r--r--tools/perf/pmu-events/empty-pmu-events.c282
-rwxr-xr-xtools/perf/pmu-events/jevents.py72
-rw-r--r--tools/perf/pmu-events/pmu-events.h26
-rwxr-xr-xtools/perf/python/counting.py36
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py5
-rw-r--r--tools/perf/tests/Build1
-rw-r--r--tools/perf/tests/builtin-test.c1
-rw-r--r--tools/perf/tests/demangle-java-test.c22
-rw-r--r--tools/perf/tests/demangle-ocaml-test.c7
-rw-r--r--tools/perf/tests/demangle-rust-v0-test.c74
-rw-r--r--tools/perf/tests/dwarf-unwind.c35
-rw-r--r--tools/perf/tests/pmu-events.c129
-rwxr-xr-xtools/perf/tests/shell/amd-ibs-swfilt.sh67
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py12
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh20
-rw-r--r--tools/perf/tests/shell/lib/stat_output.sh5
-rwxr-xr-xtools/perf/tests/shell/perf-report-hierarchy.sh43
-rwxr-xr-xtools/perf/tests/shell/probe_vfs_getname.sh8
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh8
-rwxr-xr-xtools/perf/tests/shell/record.sh41
-rwxr-xr-xtools/perf/tests/shell/record_lbr.sh5
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh71
-rwxr-xr-xtools/perf/tests/shell/stat+event_uniquifying.sh69
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh5
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh118
-rwxr-xr-xtools/perf/tests/shell/stat_metrics_values.sh17
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh72
-rwxr-xr-xtools/perf/tests/shell/test_data_symbol.sh29
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh5
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh9
-rwxr-xr-xtools/perf/tests/shell/trace_summary.sh77
-rw-r--r--tools/perf/tests/switch-tracking.c2
-rw-r--r--tools/perf/tests/tests.h1
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h2
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fcntl.h4
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fs.h21
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/mount.h10
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/prctl.h11
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/stat.h99
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/vhost.h4
-rw-r--r--tools/perf/trace/beauty/include/uapi/sound/asound.h8
-rw-r--r--tools/perf/ui/Build1
-rw-r--r--tools/perf/ui/browser.c10
-rw-r--r--tools/perf/ui/browser.h1
-rw-r--r--tools/perf/ui/browsers/annotate-data.c1
-rw-r--r--tools/perf/ui/browsers/annotate.c24
-rw-r--r--tools/perf/ui/browsers/header.c1
-rw-r--r--tools/perf/ui/browsers/hists.c123
-rw-r--r--tools/perf/ui/browsers/map.c4
-rw-r--r--tools/perf/ui/hist.c307
-rw-r--r--tools/perf/ui/keysyms.c44
-rw-r--r--tools/perf/ui/keysyms.h2
-rw-r--r--tools/perf/ui/stdio/hist.c57
-rw-r--r--tools/perf/util/Build11
-rw-r--r--tools/perf/util/amd-sample-raw.c79
-rw-r--r--tools/perf/util/annotate.c1
-rw-r--r--tools/perf/util/annotate.h1
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.h17
-rw-r--r--tools/perf/util/arm-spe.c96
-rw-r--r--tools/perf/util/bpf-trace-summary.c458
-rw-r--r--tools/perf/util/bpf_lock_contention.c116
-rw-r--r--tools/perf/util/bpf_off_cpu.c119
-rw-r--r--tools/perf/util/bpf_skel/lock_contention.bpf.c107
-rw-r--r--tools/perf/util/bpf_skel/lock_data.h1
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c98
-rw-r--r--tools/perf/util/bpf_skel/syscall_summary.bpf.c153
-rw-r--r--tools/perf/util/bpf_skel/syscall_summary.h27
-rw-r--r--tools/perf/util/bpf_skel/vmlinux/vmlinux.h9
-rw-r--r--tools/perf/util/demangle-cxx.h2
-rw-r--r--tools/perf/util/demangle-rust-v0.c2042
-rw-r--r--tools/perf/util/demangle-rust-v0.h88
-rw-r--r--tools/perf/util/demangle-rust.c269
-rw-r--r--tools/perf/util/demangle-rust.h8
-rw-r--r--tools/perf/util/dso.c45
-rw-r--r--tools/perf/util/dsos.c3
-rw-r--r--tools/perf/util/event.c6
-rw-r--r--tools/perf/util/evlist.c101
-rw-r--r--tools/perf/util/evlist.h6
-rw-r--r--tools/perf/util/evsel.c210
-rw-r--r--tools/perf/util/evsel.h19
-rw-r--r--tools/perf/util/evsel_config.h1
-rw-r--r--tools/perf/util/fncache.c69
-rw-r--r--tools/perf/util/fncache.h1
-rw-r--r--tools/perf/util/hist.c78
-rw-r--r--tools/perf/util/hist.h27
-rw-r--r--tools/perf/util/hwmon_pmu.c43
-rw-r--r--tools/perf/util/intel-pt.c205
-rw-r--r--tools/perf/util/intel-tpebs.c731
-rw-r--r--tools/perf/util/intel-tpebs.h40
-rw-r--r--tools/perf/util/lock-contention.h9
-rw-r--r--tools/perf/util/machine.c59
-rw-r--r--tools/perf/util/machine.h1
-rw-r--r--tools/perf/util/maps.c9
-rw-r--r--tools/perf/util/mem-events.c188
-rw-r--r--tools/perf/util/mem-events.h57
-rw-r--r--tools/perf/util/metricgroup.c107
-rw-r--r--tools/perf/util/metricgroup.h2
-rw-r--r--tools/perf/util/mutex.h11
-rw-r--r--tools/perf/util/off_cpu.h3
-rw-r--r--tools/perf/util/parse-events.c213
-rw-r--r--tools/perf/util/parse-events.h3
-rw-r--r--tools/perf/util/parse-events.l1
-rw-r--r--tools/perf/util/pmu.c236
-rw-r--r--tools/perf/util/pmu.h9
-rw-r--r--tools/perf/util/pmus.c21
-rw-r--r--tools/perf/util/print-events.h3
-rw-r--r--tools/perf/util/python.c178
-rw-r--r--tools/perf/util/record.h2
-rw-r--r--tools/perf/util/rwsem.c4
-rw-r--r--tools/perf/util/rwsem.h10
-rw-r--r--tools/perf/util/session.c18
-rw-r--r--tools/perf/util/sort.c116
-rw-r--r--tools/perf/util/sort.h3
-rw-r--r--tools/perf/util/srccode.c4
-rw-r--r--tools/perf/util/stat-display.c230
-rw-r--r--tools/perf/util/stat.c40
-rw-r--r--tools/perf/util/stat.h1
-rw-r--r--tools/perf/util/symbol-elf.c71
-rw-r--r--tools/perf/util/symbol-minimal.c167
-rw-r--r--tools/perf/util/symbol.c90
-rw-r--r--tools/perf/util/thread.c9
-rw-r--r--tools/perf/util/thread.h2
-rw-r--r--tools/perf/util/tool.c11
-rw-r--r--tools/perf/util/tool_pmu.c25
-rw-r--r--tools/perf/util/trace.h38
-rw-r--r--tools/perf/util/unwind-libunwind-local.c2
-rw-r--r--tools/power/acpi/common/cmfsize.c2
-rw-r--r--tools/power/acpi/common/getopt.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c4
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixdir.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c2
-rw-r--r--tools/power/acpi/tools/acpidump/acpidump.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c4
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/power/cpupower/Makefile13
-rw-r--r--tools/power/cpupower/README28
-rw-r--r--tools/power/cpupower/bindings/python/Makefile8
-rw-r--r--tools/power/cpupower/bindings/python/README13
-rw-r--r--tools/power/cpupower/cpupower-service.conf32
-rw-r--r--tools/power/cpupower/cpupower.service.in16
-rw-r--r--tools/power/cpupower/cpupower.sh26
-rwxr-xr-xtools/power/pm-graph/sleepgraph.py3
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c15
-rw-r--r--tools/power/x86/intel-speed-select/isst-core-tpmi.c12
-rw-r--r--tools/power/x86/intel-speed-select/isst-display.c20
-rw-r--r--tools/power/x86/intel-speed-select/isst.h3
-rw-r--r--tools/sched_ext/Makefile23
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h2
-rw-r--r--tools/sched_ext/scx_flatcg.bpf.c2
-rw-r--r--tools/sched_ext/scx_qmap.bpf.c4
-rw-r--r--tools/sched_ext/scx_show_state.py14
-rw-r--r--tools/scripts/syscall.tbl1
-rw-r--r--tools/testing/crypto/chacha20-s390/test-cipher.c10
-rw-r--r--tools/testing/cxl/Kbuild1
-rw-r--r--tools/testing/cxl/test/cxl.c1
-rw-r--r--tools/testing/cxl/test/mem.c3
-rw-r--r--tools/testing/cxl/test/mock.c1
-rw-r--r--tools/testing/kunit/configs/all_tests.config5
-rw-r--r--tools/testing/kunit/kunit_json.py10
-rw-r--r--tools/testing/kunit/kunit_kernel.py8
-rw-r--r--tools/testing/kunit/kunit_parser.py4
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py4
-rw-r--r--tools/testing/kunit/qemu_configs/powerpc.py1
-rw-r--r--tools/testing/kunit/qemu_configs/powerpc32.py17
-rw-r--r--tools/testing/kunit/qemu_configs/powerpcle.py14
-rw-r--r--tools/testing/kunit/qemu_configs/riscv32.py17
-rw-r--r--tools/testing/kunit/qemu_configs/sh.py4
-rw-r--r--tools/testing/kunit/qemu_configs/sparc.py2
-rw-r--r--tools/testing/kunit/qemu_configs/sparc64.py16
-rw-r--r--tools/testing/memblock/internal.h6
-rw-r--r--tools/testing/memblock/linux/mutex.h14
-rw-r--r--tools/testing/memblock/tests/alloc_api.c22
-rw-r--r--tools/testing/memblock/tests/alloc_helpers_api.c4
-rw-r--r--tools/testing/memblock/tests/alloc_nid_api.c20
-rw-r--r--tools/testing/memblock/tests/basic_api.c102
-rw-r--r--tools/testing/radix-tree/maple.c126
-rw-r--r--tools/testing/selftests/.gitignore1
-rw-r--r--tools/testing/selftests/Makefile4
-rw-r--r--tools/testing/selftests/arm64/Makefile2
-rw-r--r--tools/testing/selftests/arm64/abi/tpidr2.c14
-rw-r--r--tools/testing/selftests/arm64/fp/fp-ptrace.c62
-rw-r--r--tools/testing/selftests/bpf/DENYLIST1
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch642
-rw-r--r--tools/testing/selftests/bpf/Makefile16
-rw-r--r--tools/testing/selftests/bpf/bench.c16
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_htab_mem.c3
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_sockmap.c598
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c42
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_uprobes.sh2
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h5
-rw-r--r--tools/testing/selftests/bpf/config5
-rw-r--r--tools/testing/selftests/bpf/config.aarch641
-rw-r--r--tools/testing/selftests/bpf/config.s390x1
-rw-r--r--tools/testing/selftests/bpf/config.x86_641
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c84
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_nf.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c231
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c101
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_split.c58
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_sysfs.c81
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c285
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dynptr.c13
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fd_htab_lookup.c192
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fill_link_info.c18
-rw-r--r--tools/testing/selftests/bpf/prog_tests/for_each.c37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_list.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rbtree.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/res_spin_lock.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_assign.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c447
-rw-r--r--tools/testing/selftests/bpf/prog_tests/socket_helpers.h84
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c298
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c457
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_redir.c465
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_btf_ext.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_veristat.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_metadata.c22
-rw-r--r--tools/testing/selftests/bpf/progs/bench_sockmap_prog.c65
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h (renamed from tools/testing/selftests/bpf/bpf_arena_spin_lock.h)15
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h7
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_qdisc_common.h27
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_qdisc_fail__incompl_ops.c41
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c126
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c756
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tracing_net.h1
-rw-r--r--tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c22
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c9
-rw-r--r--tools/testing/selftests/bpf/progs/dmabuf_iter.c101
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_success.c230
-rw-r--r--tools/testing/selftests/bpf/progs/fd_htab_lookup.c25
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_hash_modify.c30
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c2
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list_peek.c113
-rw-r--r--tools/testing/selftests/bpf/progs/prepare.c1
-rw-r--r--tools/testing/selftests/bpf/progs/raw_tp_null.c2
-rw-r--r--tools/testing/selftests/bpf/progs/raw_tp_null_fail.c2
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_fail.c29
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_search.c206
-rw-r--r--tools/testing/selftests/bpf/progs/res_spin_lock.c10
-rw-r--r--tools/testing/selftests/bpf/progs/set_global_vars.c41
-rw-r--r--tools/testing/selftests/bpf/progs/setget_sockopt.c11
-rw-r--r--tools/testing/selftests/bpf/progs/sock_iter_batch.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_ext.c22
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_ktls.c36
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_redir.c68
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c4
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_trap.c71
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c12
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_load_acquire.c48
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_precision.c58
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_store_release.c39
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_metadata.c13
-rw-r--r--tools/testing/selftests/bpf/progs/xsk_xdp_progs.c50
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod.c22
-rw-r--r--tools/testing/selftests/bpf/test_loader.c14
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c8
-rw-r--r--tools/testing/selftests/bpf/veristat.c101
-rw-r--r--tools/testing/selftests/bpf/xsk_xdp_common.h1
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c118
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h2
-rw-r--r--tools/testing/selftests/cgroup/Makefile21
-rw-r--r--tools/testing/selftests/cgroup/lib/cgroup_util.c (renamed from tools/testing/selftests/cgroup/cgroup_util.c)118
-rw-r--r--tools/testing/selftests/cgroup/lib/include/cgroup_util.h (renamed from tools/testing/selftests/cgroup/cgroup_util.h)13
-rw-r--r--tools/testing/selftests/cgroup/lib/libcgroup.mk19
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_prs.sh617
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c100
-rw-r--r--tools/testing/selftests/coredump/stackdump_test.c477
-rwxr-xr-xtools/testing/selftests/cpufreq/cpufreq.sh18
-rw-r--r--tools/testing/selftests/damon/Makefile2
-rw-r--r--tools/testing/selftests/damon/_chk_dependency.sh52
-rw-r--r--tools/testing/selftests/damon/_damon_sysfs.py9
-rw-r--r--tools/testing/selftests/damon/_debugfs_common.sh64
-rw-r--r--tools/testing/selftests/drivers/net/.gitignore2
-rw-r--r--tools/testing/selftests/drivers/net/Makefile6
l---------tools/testing/selftests/drivers/net/dsa/tc_taprio.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/hds.py33
-rw-r--r--tools/testing/selftests/drivers/net/hw/Makefile3
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/devmem.py45
-rw-r--r--tools/testing/selftests/drivers/net/hw/iou-zcrx.c27
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/iou-zcrx.py132
-rw-r--r--tools/testing/selftests/drivers/net/hw/lib/py/__init__.py1
-rw-r--r--tools/testing/selftests/drivers/net/hw/lib/py/linkconfig.py222
-rw-r--r--tools/testing/selftests/drivers/net/hw/ncdevmem.c382
-rw-r--r--tools/testing/selftests/drivers/net/hw/nic_link_layer.py113
-rw-r--r--tools/testing/selftests/drivers/net/hw/nic_performance.py137
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_input_xfrm.py5
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/xsk_reconfig.py60
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/env.py2
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/load.py20
-rw-r--r--tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/napi_id.py23
-rw-r--r--tools/testing/selftests/drivers/net/napi_id_helper.c83
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/peer.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/psfp.sh8
-rwxr-xr-xtools/testing/selftests/drivers/net/ping.py61
-rwxr-xr-xtools/testing/selftests/drivers/net/queues.py4
-rw-r--r--tools/testing/selftests/drivers/net/team/Makefile2
-rw-r--r--tools/testing/selftests/drivers/net/team/config1
-rwxr-xr-xtools/testing/selftests/drivers/net/team/propagation.sh80
-rw-r--r--tools/testing/selftests/filesystems/.gitignore1
-rw-r--r--tools/testing/selftests/filesystems/Makefile2
-rw-r--r--tools/testing/selftests/filesystems/anon_inode_test.c69
-rw-r--r--tools/testing/selftests/filesystems/eventfd/eventfd_test.c7
-rw-r--r--tools/testing/selftests/filesystems/file_stressor.c2
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/.gitignore1
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/Makefile9
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c93
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c557
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/Makefile2
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c2
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c2
-rw-r--r--tools/testing/selftests/filesystems/statmount/Makefile6
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount.h36
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test_ns.c86
-rw-r--r--tools/testing/selftests/filesystems/utils.c88
-rw-r--r--tools/testing/selftests/filesystems/utils.h3
-rw-r--r--tools/testing/selftests/filesystems/wrappers.h (renamed from tools/testing/selftests/filesystems/overlayfs/wrappers.h)46
-rw-r--r--tools/testing/selftests/ftrace/Makefile2
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/dynevent_limitations.tc23
-rw-r--r--tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc20
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi-filter.tc177
-rw-r--r--tools/testing/selftests/futex/functional/.gitignore6
-rw-r--r--tools/testing/selftests/futex/functional/Makefile7
-rw-r--r--tools/testing/selftests/futex/functional/futex_numa.c262
-rw-r--r--tools/testing/selftests/futex/functional/futex_numa_mpol.c231
-rw-r--r--tools/testing/selftests/futex/functional/futex_priv_hash.c292
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_wouldblock.c2
-rwxr-xr-xtools/testing/selftests/futex/functional/run.sh7
-rw-r--r--tools/testing/selftests/futex/include/futex2test.h70
-rw-r--r--tools/testing/selftests/gpio/Makefile2
-rw-r--r--tools/testing/selftests/gpio/config1
-rwxr-xr-xtools/testing/selftests/gpio/gpio-aggregator.sh727
-rw-r--r--tools/testing/selftests/hid/config.common1
-rw-r--r--tools/testing/selftests/kexec/Makefile7
-rw-r--r--tools/testing/selftests/kexec/test_kexec_jump.c72
-rwxr-xr-xtools/testing/selftests/kexec/test_kexec_jump.sh42
-rw-r--r--tools/testing/selftests/kmod/config5
-rw-r--r--tools/testing/selftests/kselftest_harness.h170
-rw-r--r--tools/testing/selftests/kselftest_harness/.gitignore2
-rw-r--r--tools/testing/selftests/kselftest_harness/Makefile7
-rw-r--r--tools/testing/selftests/kselftest_harness/harness-selftest.c136
-rw-r--r--tools/testing/selftests/kselftest_harness/harness-selftest.expected64
-rwxr-xr-xtools/testing/selftests/kselftest_harness/harness-selftest.sh13
-rw-r--r--tools/testing/selftests/kvm/Makefile2
-rw-r--r--tools/testing/selftests/kvm/Makefile.kvm69
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c281
-rw-r--r--tools/testing/selftests/kvm/arm64/host_sve.c127
-rw-r--r--tools/testing/selftests/kvm/arm64/page_fault_test.c2
-rw-r--r--tools/testing/selftests/kvm/arm64/set_id_regs.c85
-rw-r--r--tools/testing/selftests/kvm/include/arm64/processor.h67
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h41
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/kvm_util_arch.h7
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/processor.h141
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/ucall.h20
-rw-r--r--tools/testing/selftests/kvm/include/lru_gen_util.h51
-rw-r--r--tools/testing/selftests/kvm/include/riscv/processor.h23
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h1
-rw-r--r--tools/testing/selftests/kvm/include/x86/processor.h1
-rw-r--r--tools/testing/selftests/kvm/include/x86/sev.h53
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/processor.c60
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c29
-rw-r--r--tools/testing/selftests/kvm/lib/loongarch/exception.S59
-rw-r--r--tools/testing/selftests/kvm/lib/loongarch/processor.c346
-rw-r--r--tools/testing/selftests/kvm/lib/loongarch/ucall.c38
-rw-r--r--tools/testing/selftests/kvm/lib/lru_gen_util.c387
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/handlers.S139
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/processor.c2
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c42
-rw-r--r--tools/testing/selftests/kvm/lib/x86/processor.c4
-rw-r--r--tools/testing/selftests/kvm/lib/x86/sev.c76
-rw-r--r--tools/testing/selftests/kvm/riscv/arch_timer.c2
-rw-r--r--tools/testing/selftests/kvm/riscv/ebreak_test.c2
-rw-r--r--tools/testing/selftests/kvm/riscv/get-reg-list.c132
-rw-r--r--tools/testing/selftests/kvm/riscv/sbi_pmu_test.c24
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c31
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/fastops_test.c165
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_cpuid.c21
-rw-r--r--tools/testing/selftests/kvm/x86/kvm_buslock_test.c135
-rw-r--r--tools/testing/selftests/kvm/x86/monitor_mwait_test.c108
-rw-r--r--tools/testing/selftests/kvm/x86/sev_init2_tests.c13
-rw-r--r--tools/testing/selftests/kvm/x86/sev_smoke_test.c75
-rw-r--r--tools/testing/selftests/landlock/audit.h21
-rw-r--r--tools/testing/selftests/landlock/audit_test.c154
-rw-r--r--tools/testing/selftests/landlock/fs_test.c3
-rw-r--r--tools/testing/selftests/lib/config1
-rw-r--r--tools/testing/selftests/mincore/mincore_selftest.c19
-rw-r--r--tools/testing/selftests/mm/.gitignore2
-rw-r--r--tools/testing/selftests/mm/Makefile2
-rwxr-xr-xtools/testing/selftests/mm/charge_reserved_hugetlb.sh4
-rw-r--r--tools/testing/selftests/mm/compaction_test.c19
-rw-r--r--tools/testing/selftests/mm/cow.c342
-rw-r--r--tools/testing/selftests/mm/guard-regions.c90
-rw-r--r--tools/testing/selftests/mm/gup_longterm.c160
-rwxr-xr-xtools/testing/selftests/mm/hugetlb_reparenting_test.sh100
-rw-r--r--tools/testing/selftests/mm/madv_populate.c18
-rw-r--r--tools/testing/selftests/mm/map_fixed_noreplace.c2
-rw-r--r--tools/testing/selftests/mm/merge.c455
-rw-r--r--tools/testing/selftests/mm/mlock2-tests.c2
-rw-r--r--tools/testing/selftests/mm/pagemap_ioctl.c16
-rw-r--r--tools/testing/selftests/mm/pfnmap.c249
-rw-r--r--tools/testing/selftests/mm/pkey-powerpc.h14
-rw-r--r--tools/testing/selftests/mm/pkey_util.c1
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh8
-rw-r--r--tools/testing/selftests/mm/thuge-gen.c4
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c202
-rwxr-xr-xtools/testing/selftests/mm/va_high_addr_switch.sh26
-rw-r--r--tools/testing/selftests/mm/vm_util.c62
-rw-r--r--tools/testing/selftests/mm/vm_util.h41
-rw-r--r--tools/testing/selftests/mount_setattr/Makefile2
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c61
-rw-r--r--tools/testing/selftests/nci/nci_dev.c2
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile5
-rw-r--r--tools/testing/selftests/net/af_unix/scm_rights.c80
-rwxr-xr-xtools/testing/selftests/net/bareudp.sh49
-rwxr-xr-xtools/testing/selftests/net/busy_poll_test.sh2
-rw-r--r--tools/testing/selftests/net/can/.gitignore2
-rw-r--r--tools/testing/selftests/net/can/Makefile11
-rw-r--r--tools/testing/selftests/net/can/test_raw_filter.c405
-rwxr-xr-xtools/testing/selftests/net/can/test_raw_filter.sh45
-rw-r--r--tools/testing/selftests/net/config1
-rwxr-xr-xtools/testing/selftests/net/fib_rule_tests.sh37
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh123
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_igmp.sh80
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mld.sh81
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_aware.sh96
-rw-r--r--tools/testing/selftests/net/forwarding/config1
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_taprio.sh421
-rw-r--r--tools/testing/selftests/net/forwarding/tsn_lib.sh26
-rwxr-xr-xtools/testing/selftests/net/gre_ipv6_lladdr.sh177
-rwxr-xr-xtools/testing/selftests/net/icmp_redirect.sh2
-rwxr-xr-xtools/testing/selftests/net/ipv6_route_update_soft_lockup.sh1
-rw-r--r--tools/testing/selftests/net/lib.sh47
-rw-r--r--tools/testing/selftests/net/lib/.gitignore1
-rw-r--r--tools/testing/selftests/net/lib/Makefile1
-rw-r--r--tools/testing/selftests/net/lib/ksft.h56
-rw-r--r--tools/testing/selftests/net/lib/py/ksft.py24
-rw-r--r--tools/testing/selftests/net/lib/py/ynl.py4
-rw-r--r--tools/testing/selftests/net/lib/xdp_helper.c (renamed from tools/testing/selftests/drivers/net/xdp_helper.c)82
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile2
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh37
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c21
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_diag.c231
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_inq.c16
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh44
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh10
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_sockopt.c16
-rw-r--r--tools/testing/selftests/net/net_helper.sh25
-rw-r--r--tools/testing/selftests/net/netfilter/Makefile2
-rwxr-xr-xtools/testing/selftests/net/netfilter/br_netfilter.sh3
-rwxr-xr-xtools/testing/selftests/net/netfilter/bridge_brouter.sh2
-rw-r--r--tools/testing/selftests/net/netfilter/config1
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_resize.sh427
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_vrf.sh37
-rwxr-xr-xtools/testing/selftests/net/netfilter/ipvs.sh6
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_concat_range.sh204
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_fib.sh635
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_interface_stress.sh154
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_nat_zones.sh2
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_queue.sh38
-rwxr-xr-xtools/testing/selftests/net/netfilter/rpath.sh18
-rw-r--r--tools/testing/selftests/net/ovpn/.gitignore2
-rw-r--r--tools/testing/selftests/net/ovpn/Makefile32
-rw-r--r--tools/testing/selftests/net/ovpn/common.sh108
-rw-r--r--tools/testing/selftests/net/ovpn/config10
-rw-r--r--tools/testing/selftests/net/ovpn/data64.key5
-rw-r--r--tools/testing/selftests/net/ovpn/ovpn-cli.c2383
-rw-r--r--tools/testing/selftests/net/ovpn/tcp_peers.txt5
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-chachapoly.sh9
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-close-socket-tcp.sh9
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-close-socket.sh45
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-float.sh9
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-tcp.sh9
-rwxr-xr-xtools/testing/selftests/net/ovpn/test.sh117
-rw-r--r--tools/testing/selftests/net/ovpn/udp_peers.txt6
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh1
-rw-r--r--tools/testing/selftests/net/reuseport_addr_any.c36
-rw-r--r--tools/testing/selftests/net/skf_net_off.c244
-rwxr-xr-xtools/testing/selftests/net/skf_net_off.sh30
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh5
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh5
-rwxr-xr-xtools/testing/selftests/net/srv6_end_flavors_test.sh4
-rwxr-xr-xtools/testing/selftests/net/srv6_end_next_csid_l3vpn_test.sh77
-rwxr-xr-xtools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh83
-rwxr-xr-xtools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh74
-rwxr-xr-xtools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh83
-rwxr-xr-xtools/testing/selftests/net/test_bridge_neigh_suppress.sh125
-rw-r--r--tools/testing/selftests/net/tls.c36
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgro_bench.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgro_frglist.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh2
-rw-r--r--tools/testing/selftests/nolibc/Makefile28
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test-linkage.c2
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c331
-rwxr-xr-xtools/testing/selftests/nolibc/run-tests.sh7
-rw-r--r--tools/testing/selftests/pcie_bwctrl/Makefile3
-rw-r--r--tools/testing/selftests/perf_events/watermark_signal.c2
-rw-r--r--tools/testing/selftests/pid_namespace/pid_max.c1
-rw-r--r--tools/testing/selftests/pidfd/pidfd.h22
-rw-r--r--tools/testing/selftests/pidfd/pidfd_bind_mount.c74
-rw-r--r--tools/testing/selftests/pidfd/pidfd_info_test.c13
-rw-r--r--tools/testing/selftests/ptrace/Makefile2
-rw-r--r--tools/testing/selftests/ptrace/set_syscall_info.c519
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/console-badness.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/srcu_lockdep.sh42
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/torture.sh89
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE012
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot2
-rwxr-xr-xtools/testing/selftests/run_kselftest.sh9
-rw-r--r--tools/testing/selftests/sched_ext/Makefile3
-rw-r--r--tools/testing/selftests/sched_ext/allowed_cpus.bpf.c144
-rw-r--r--tools/testing/selftests/sched_ext/allowed_cpus.c84
-rw-r--r--tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c74
-rw-r--r--tools/testing/selftests/sched_ext/enq_select_cpu.c88
-rw-r--r--tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c43
-rw-r--r--tools/testing/selftests/sched_ext/enq_select_cpu_fails.c61
-rw-r--r--tools/testing/selftests/seccomp/seccomp_benchmark.c2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c13
-rwxr-xr-xtools/testing/selftests/sysctl/sysctl.sh30
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/actions.json22
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json477
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/codel.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json22
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_codel.json22
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json22
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/hhf.json22
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/pie.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json36
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.sh4
-rw-r--r--tools/testing/selftests/thermal/intel/power_floor/power_floor_test.c2
-rw-r--r--tools/testing/selftests/thermal/intel/workload_hint/workload_hint_test.c4
-rw-r--r--tools/testing/selftests/timens/clock_nanosleep.c4
-rw-r--r--tools/testing/selftests/timens/exec.c2
-rw-r--r--tools/testing/selftests/timens/futex.c2
-rw-r--r--tools/testing/selftests/timens/gettime_perf.c2
-rw-r--r--tools/testing/selftests/timens/procfs.c2
-rw-r--r--tools/testing/selftests/timens/timens.c2
-rw-r--r--tools/testing/selftests/timens/timer.c4
-rw-r--r--tools/testing/selftests/timens/timerfd.c6
-rw-r--r--tools/testing/selftests/timens/vfork_exec.c2
-rw-r--r--tools/testing/selftests/tpm2/.gitignore3
-rwxr-xr-xtools/testing/selftests/tpm2/test_smoke.sh2
-rw-r--r--tools/testing/selftests/ublk/Makefile21
-rw-r--r--tools/testing/selftests/ublk/fault_inject.c103
-rw-r--r--tools/testing/selftests/ublk/file_backed.c17
-rw-r--r--tools/testing/selftests/ublk/kublk.c505
-rw-r--r--tools/testing/selftests/ublk/kublk.h69
-rw-r--r--tools/testing/selftests/ublk/null.c55
-rw-r--r--tools/testing/selftests/ublk/stripe.c54
-rwxr-xr-xtools/testing/selftests/ublk/test_common.sh177
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_04.sh40
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_05.sh44
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_06.sh41
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_07.sh28
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_08.sh32
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_09.sh28
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_10.sh30
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_11.sh44
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_01.sh8
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_02.sh8
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_03.sh8
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_04.sh9
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_05.sh8
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_01.sh45
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_02.sh47
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_03.sh45
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_04.sh44
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_05.sh73
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_01.sh12
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_02.sh13
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_03.sh12
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_04.sh21
-rwxr-xr-xtools/testing/selftests/wireguard/netns.sh29
-rw-r--r--tools/testing/selftests/wireguard/qemu/Makefile3
-rw-r--r--tools/testing/selftests/wireguard/qemu/debug.config1
-rw-r--r--tools/testing/selftests/x86/Makefile3
-rw-r--r--tools/testing/selftests/x86/apx.c10
-rw-r--r--tools/testing/selftests/x86/bugs/Makefile3
-rwxr-xr-xtools/testing/selftests/x86/bugs/common.py164
-rwxr-xr-xtools/testing/selftests/x86/bugs/its_indirect_alignment.py150
-rwxr-xr-xtools/testing/selftests/x86/bugs/its_permutations.py109
-rwxr-xr-xtools/testing/selftests/x86/bugs/its_ret_alignment.py139
-rwxr-xr-xtools/testing/selftests/x86/bugs/its_sysfs.py65
-rw-r--r--tools/testing/selftests/x86/lam.c9
-rw-r--r--tools/testing/selftests/x86/xstate.c3
-rw-r--r--tools/testing/selftests/x86/xstate.h2
-rw-r--r--tools/testing/shared/linux.c4
-rw-r--r--tools/testing/shared/linux/cleanup.h2
-rw-r--r--tools/testing/vma/Makefile2
-rw-r--r--tools/testing/vma/vma.c127
-rw-r--r--tools/testing/vma/vma_internal.h286
-rw-r--r--tools/testing/vsock/timeout.c18
-rw-r--r--tools/testing/vsock/timeout.h1
-rw-r--r--tools/testing/vsock/util.c38
-rw-r--r--tools/testing/vsock/util.h2
-rw-r--r--tools/testing/vsock/vsock_test.c131
-rw-r--r--tools/tracing/rtla/README.txt7
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c5
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c5
-rw-r--r--tools/tracing/rtla/src/timerlat_bpf.c1
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c5
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c5
-rw-r--r--tools/tracing/rtla/src/utils.c2
-rw-r--r--tools/tracing/rtla/src/utils.h6
-rw-r--r--tools/tracing/rtla/tests/engine.sh7
-rw-r--r--tools/tracing/rtla/tests/hwnoise.t4
-rw-r--r--tools/tracing/rtla/tests/osnoise.t6
-rw-r--r--tools/tracing/rtla/tests/timerlat.t12
-rw-r--r--usr/include/Makefile4
-rw-r--r--virt/kvm/Kconfig2
-rw-r--r--virt/kvm/dirty_ring.c11
-rw-r--r--virt/kvm/eventfd.c10
-rw-r--r--virt/kvm/kvm_main.c107
11932 files changed, 556183 insertions, 233339 deletions
diff --git a/.clang-format b/.clang-format
index fe1aa1a30d40..48405c54ef27 100644
--- a/.clang-format
+++ b/.clang-format
@@ -92,6 +92,7 @@ ForEachMacros:
- '__rq_for_each_bio'
- '__shost_for_each_device'
- '__sym_for_each'
+ - '_for_each_counter'
- 'apei_estatus_for_each_section'
- 'ata_for_each_dev'
- 'ata_for_each_link'
@@ -141,11 +142,14 @@ ForEachMacros:
- 'damon_for_each_target_safe'
- 'damos_for_each_filter'
- 'damos_for_each_filter_safe'
+ - 'damos_for_each_ops_filter'
+ - 'damos_for_each_ops_filter_safe'
- 'damos_for_each_quota_goal'
- 'damos_for_each_quota_goal_safe'
- 'data__for_each_file'
- 'data__for_each_file_new'
- 'data__for_each_file_start'
+ - 'def_for_each_cpu'
- 'device_for_each_child_node'
- 'device_for_each_child_node_scoped'
- 'dma_fence_array_for_each'
@@ -176,6 +180,7 @@ ForEachMacros:
- 'drm_for_each_privobj'
- 'drm_gem_for_each_gpuvm_bo'
- 'drm_gem_for_each_gpuvm_bo_safe'
+ - 'drm_gpusvm_for_each_range'
- 'drm_gpuva_for_each_op'
- 'drm_gpuva_for_each_op_from_reverse'
- 'drm_gpuva_for_each_op_reverse'
@@ -216,8 +221,10 @@ ForEachMacros:
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
+ - 'for_each_active_irq'
- 'for_each_active_route'
- 'for_each_aggr_pgid'
+ - 'for_each_alloc_capable_rdt_resource'
- 'for_each_and_bit'
- 'for_each_andnot_bit'
- 'for_each_available_child_of_node'
@@ -228,6 +235,7 @@ ForEachMacros:
- 'for_each_btf_ext_rec'
- 'for_each_btf_ext_sec'
- 'for_each_bvec'
+ - 'for_each_capable_rdt_resource'
- 'for_each_card_auxs'
- 'for_each_card_auxs_safe'
- 'for_each_card_components'
@@ -241,6 +249,7 @@ ForEachMacros:
- 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
- 'for_each_child_of_node_scoped'
+ - 'for_each_child_of_node_with_prefix'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_clear_bitrange'
@@ -296,6 +305,7 @@ ForEachMacros:
- 'for_each_group_member_head'
- 'for_each_hstate'
- 'for_each_hwgpio'
+ - 'for_each_hwgpio_in_range'
- 'for_each_if'
- 'for_each_inject_fn'
- 'for_each_insn'
@@ -304,6 +314,7 @@ ForEachMacros:
- 'for_each_intid'
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
+ - 'for_each_irq_desc'
- 'for_each_irq_nr'
- 'for_each_lang'
- 'for_each_link_ch_maps'
@@ -324,6 +335,8 @@ ForEachMacros:
- 'for_each_missing_reg'
- 'for_each_mle_subelement'
- 'for_each_mod_mem_type'
+ - 'for_each_mon_capable_rdt_resource'
+ - 'for_each_mp_bvec'
- 'for_each_net'
- 'for_each_net_continue_reverse'
- 'for_each_net_rcu'
@@ -351,6 +364,7 @@ ForEachMacros:
- 'for_each_node_by_name'
- 'for_each_node_by_type'
- 'for_each_node_mask'
+ - 'for_each_node_numadist'
- 'for_each_node_state'
- 'for_each_node_with_cpus'
- 'for_each_node_with_property'
@@ -359,6 +373,8 @@ ForEachMacros:
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
- 'for_each_of_cpu_node'
+ - 'for_each_of_graph_port'
+ - 'for_each_of_graph_port_endpoint'
- 'for_each_of_pci_range'
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
@@ -372,9 +388,11 @@ ForEachMacros:
- 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
- 'for_each_online_cpu'
+ - 'for_each_online_cpu_wrap'
- 'for_each_online_node'
- 'for_each_online_pgdat'
- 'for_each_or_bit'
+ - 'for_each_page_ext'
- 'for_each_path'
- 'for_each_pci_bridge'
- 'for_each_pci_dev'
@@ -382,8 +400,10 @@ ForEachMacros:
- 'for_each_physmem_range'
- 'for_each_populated_zone'
- 'for_each_possible_cpu'
+ - 'for_each_possible_cpu_wrap'
- 'for_each_present_blessed_reg'
- 'for_each_present_cpu'
+ - 'for_each_present_section_nr'
- 'for_each_prime_number'
- 'for_each_prime_number_from'
- 'for_each_probe_cache_entry'
@@ -396,6 +416,7 @@ ForEachMacros:
- 'for_each_prop_dlc_cpus'
- 'for_each_prop_dlc_platforms'
- 'for_each_property_of_node'
+ - 'for_each_rdt_resource'
- 'for_each_reg'
- 'for_each_reg_filtered'
- 'for_each_reloc'
@@ -434,10 +455,10 @@ ForEachMacros:
- 'for_each_subelement_id'
- 'for_each_sublist'
- 'for_each_subsystem'
+ - 'for_each_suite'
- 'for_each_supported_activate_fn'
- 'for_each_supported_inject_fn'
- 'for_each_sym'
- - 'for_each_test'
- 'for_each_thread'
- 'for_each_token'
- 'for_each_unicast_dest_pgid'
@@ -499,8 +520,10 @@ ForEachMacros:
- 'idr_for_each_entry_continue'
- 'idr_for_each_entry_continue_ul'
- 'idr_for_each_entry_ul'
+ - 'iio_for_each_active_channel'
- 'in_dev_for_each_ifa_rcu'
- 'in_dev_for_each_ifa_rtnl'
+ - 'in_dev_for_each_ifa_rtnl_net'
- 'inet_bind_bucket_for_each'
- 'interval_tree_for_each_span'
- 'intlist__for_each_entry'
@@ -542,7 +565,6 @@ ForEachMacros:
- 'list_for_each_prev'
- 'list_for_each_prev_safe'
- 'list_for_each_rcu'
- - 'list_for_each_reverse'
- 'list_for_each_safe'
- 'llist_for_each'
- 'llist_for_each_entry'
@@ -552,6 +574,7 @@ ForEachMacros:
- 'map__for_each_symbol'
- 'map__for_each_symbol_by_name'
- 'mas_for_each'
+ - 'mas_for_each_rev'
- 'mci_for_each_dimm'
- 'media_device_for_each_entity'
- 'media_device_for_each_intf'
@@ -561,10 +584,15 @@ ForEachMacros:
- 'media_pipeline_for_each_entity'
- 'media_pipeline_for_each_pad'
- 'mlx5_lag_for_each_peer_mdev'
+ - 'mptcp_for_each_subflow'
- 'msi_domain_for_each_desc'
- 'msi_for_each_desc'
- 'mt_for_each'
+ - 'nanddev_io_for_each_block'
- 'nanddev_io_for_each_page'
+ - 'neigh_for_each_in_bucket'
+ - 'neigh_for_each_in_bucket_rcu'
+ - 'neigh_for_each_in_bucket_safe'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
- 'netdev_for_each_lower_private_rcu'
@@ -604,11 +632,11 @@ ForEachMacros:
- 'perf_evlist__for_each_entry_safe'
- 'perf_evlist__for_each_evsel'
- 'perf_evlist__for_each_mmap'
+ - 'perf_evsel_for_each_per_thread_period_safe'
- 'perf_hpp_list__for_each_format'
- 'perf_hpp_list__for_each_format_safe'
- 'perf_hpp_list__for_each_sort_list'
- 'perf_hpp_list__for_each_sort_list_safe'
- - 'perf_tool_event__for_each_event'
- 'plist_for_each'
- 'plist_for_each_continue'
- 'plist_for_each_entry'
@@ -627,7 +655,6 @@ ForEachMacros:
- 'rdma_for_each_block'
- 'rdma_for_each_port'
- 'rdma_umem_for_each_dma_block'
- - 'resort_rb__for_each_entry'
- 'resource_list_for_each_entry'
- 'resource_list_for_each_entry_safe'
- 'rhl_for_each_entry_rcu'
@@ -658,6 +685,7 @@ ForEachMacros:
- 'shost_for_each_device'
- 'sk_for_each'
- 'sk_for_each_bound'
+ - 'sk_for_each_bound_safe'
- 'sk_for_each_entry_offset_rcu'
- 'sk_for_each_from'
- 'sk_for_each_rcu'
@@ -680,7 +708,11 @@ ForEachMacros:
- 'tb_property_for_each'
- 'tcf_act_for_each_action'
- 'tcf_exts_for_each_action'
+ - 'test_suite__for_each_test_case'
+ - 'tool_pmu__for_each_event'
+ - 'ttm_bo_lru_for_each_reserved_guarded'
- 'ttm_resource_manager_for_each_res'
+ - 'udp_lrpa_for_each_entry_rcu'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
@@ -690,7 +722,15 @@ ForEachMacros:
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
+ - 'vkms_config_for_each_connector'
+ - 'vkms_config_for_each_crtc'
+ - 'vkms_config_for_each_encoder'
+ - 'vkms_config_for_each_plane'
+ - 'vkms_config_connector_for_each_possible_encoder'
+ - 'vkms_config_encoder_for_each_possible_crtc'
+ - 'vkms_config_plane_for_each_possible_crtc'
- 'while_for_each_ftrace_op'
+ - 'workloads__for_each'
- 'xa_for_each'
- 'xa_for_each_marked'
- 'xa_for_each_range'
diff --git a/.clippy.toml b/.clippy.toml
index 815c94732ed7..137f41d203de 100644
--- a/.clippy.toml
+++ b/.clippy.toml
@@ -7,5 +7,5 @@ check-private-items = true
disallowed-macros = [
# The `clippy::dbg_macro` lint only works with `std::dbg!`, thus we simulate
# it here, see: https://github.com/rust-lang/rust-clippy/issues/11303.
- { path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool" },
+ { path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool", allow-invalid = true },
]
diff --git a/.gitignore b/.gitignore
index f2f63e47fb88..bf5ee6e01cd4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,6 +40,7 @@
*.o
*.o.*
*.patch
+*.pyc
*.rmeta
*.rpm
*.rsi
diff --git a/.mailmap b/.mailmap
index 4f7cd8e23177..074082ce9299 100644
--- a/.mailmap
+++ b/.mailmap
@@ -102,6 +102,7 @@ Ard Biesheuvel <ardb@kernel.org> <ard.biesheuvel@linaro.org>
Arnaud Patard <arnaud.patard@rtp-net.org>
Arnd Bergmann <arnd@arndb.de>
Arun Kumar Neelakantam <quic_aneela@quicinc.com> <aneela@codeaurora.org>
+Asahi Lina <lina+kernel@asahilina.net> <lina@asahilina.net>
Ashok Raj Nagarajan <quic_arnagara@quicinc.com> <arnagara@codeaurora.org>
Ashwin Chaugule <quic_ashwinc@quicinc.com> <ashwinc@codeaurora.org>
Asutosh Das <quic_asutoshd@quicinc.com> <asutoshd@codeaurora.org>
@@ -134,6 +135,7 @@ Ben Widawsky <bwidawsk@kernel.org> <benjamin.widawsky@intel.com>
Benjamin Poirier <benjamin.poirier@gmail.com> <bpoirier@suse.de>
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@gmail.com>
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@redhat.com>
+Benno Lossin <lossin@kernel.org> <benno.lossin@proton.me>
Bingwu Zhang <xtex@aosc.io> <xtexchooser@duck.com>
Bingwu Zhang <xtex@aosc.io> <xtex@xtexx.eu.org>
Bjorn Andersson <andersson@kernel.org> <bjorn@kryo.se>
@@ -154,6 +156,9 @@ Brian King <brking@us.ibm.com>
Brian Silverman <bsilver16384@gmail.com> <brian.silverman@bluerivertech.com>
Bryan Tan <bryan-bt.tan@broadcom.com> <bryantan@vmware.com>
Cai Huoqing <cai.huoqing@linux.dev> <caihuoqing@baidu.com>
+Casey Connolly <casey.connolly@linaro.org> <caleb.connolly@linaro.org>
+Casey Connolly <casey.connolly@linaro.org> <caleb@connolly.tech>
+Casey Connolly <casey.connolly@linaro.org> <caleb@postmarketos.org>
Can Guo <quic_cang@quicinc.com> <cang@codeaurora.org>
Carl Huang <quic_cjhuang@quicinc.com> <cjhuang@codeaurora.org>
Carlos Bilbao <carlos.bilbao@kernel.org> <carlos.bilbao@amd.com>
@@ -312,6 +317,7 @@ Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
Jan Kuliga <jtkuliga.kdev@gmail.com> <jankul@alatek.krakow.pl>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com>
+Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@opinsys.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
@@ -322,6 +328,7 @@ Jayachandran C <c.jayachandran@gmail.com> <jchandra@broadcom.com>
Jayachandran C <c.jayachandran@gmail.com> <jchandra@digeo.com>
Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
+Jean-Michel Hautbois <jeanmichel.hautbois@yoseli.org> <jeanmichel.hautbois@ideasonboard.com>
Jean Tourrilhes <jt@hpl.hp.com>
Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org>
Jeff Garzik <jgarzik@pretzel.yyz.us>
@@ -413,6 +420,8 @@ Krishna Manikandan <quic_mkrishn@quicinc.com> <mkrishn@codeaurora.org>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@canonical.com>
+Krzysztof Wilczyński <kwilczynski@kernel.org> <krzysztof.wilczynski@linux.com>
+Krzysztof Wilczyński <kwilczynski@kernel.org> <kw@linux.com>
Kshitiz Godara <quic_kgodara@quicinc.com> <kgodara@codeaurora.org>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Kuogee Hsieh <quic_khsieh@quicinc.com> <khsieh@codeaurora.org>
@@ -438,12 +447,16 @@ Linus Lüssing <linus.luessing@c0d3.blue> <ll@simonwunderlich.de>
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
Lior David <quic_liord@quicinc.com> <liord@codeaurora.org>
+Loic Poulain <loic.poulain@oss.qualcomm.com> <loic.poulain@linaro.org>
+Loic Poulain <loic.poulain@oss.qualcomm.com> <loic.poulain@intel.com>
Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
Lorenzo Stoakes <lorenzo.stoakes@oracle.com> <lstoakes@gmail.com>
Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
Luca Weiss <luca@lucaweiss.eu> <luca@z3ntu.xyz>
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
Luo Jie <quic_luoj@quicinc.com> <luoj@codeaurora.org>
+Lance Yang <lance.yang@linux.dev> <ioworker0@gmail.com>
+Lance Yang <lance.yang@linux.dev> <mingzhe.yang@ly.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
Maharaja Kennadyrajan <quic_mkenna@quicinc.com> <mkenna@codeaurora.org>
@@ -451,6 +464,7 @@ Maheshwar Ajja <quic_majja@quicinc.com> <majja@codeaurora.org>
Malathi Gottam <quic_mgottam@quicinc.com> <mgottam@codeaurora.org>
Manikanta Pubbisetty <quic_mpubbise@quicinc.com> <mpubbise@codeaurora.org>
Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
+Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
Manoj Basapathi <quic_manojbm@quicinc.com> <manojbm@codeaurora.org>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
@@ -480,6 +494,7 @@ Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu>
Matthieu Baerts <matttbe@kernel.org> <matthieu.baerts@tessares.net>
Matthieu CASTET <castet.matthieu@free.fr>
Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com>
+Mattijs Korpershoek <mkorpershoek@kernel.org> <mkorpershoek@baylibre.com>
Matt Ranostay <matt@ranostay.sg> <matt.ranostay@konsulko.com>
Matt Ranostay <matt@ranostay.sg> <matt@ranostay.consulting>
Matt Ranostay <matt@ranostay.sg> Matthew Ranostay <mranostay@embeddedalley.com>
@@ -503,6 +518,7 @@ Mayuresh Janorkar <mayur@ti.com>
Md Sadre Alam <quic_mdalam@quicinc.com> <mdalam@codeaurora.org>
Miaoqing Pan <quic_miaoqing@quicinc.com> <miaoqing@codeaurora.org>
Michael Buesch <m@bues.ch>
+Michael Riesch <michael.riesch@collabora.com> <michael.riesch@wolfvision.net>
Michal Simek <michal.simek@amd.com> <michal.simek@xilinx.com>
Michel Dänzer <michel@tungstengraphics.com>
Michel Lespinasse <michel@lespinasse.org>
@@ -537,6 +553,8 @@ Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.vnet.ibm.com>
Neeraj Upadhyay <neeraj.upadhyay@kernel.org> <quic_neeraju@quicinc.com>
Neeraj Upadhyay <neeraj.upadhyay@kernel.org> <neeraju@codeaurora.org>
Neil Armstrong <neil.armstrong@linaro.org> <narmstrong@baylibre.com>
+NeilBrown <neil@brown.name> <neilb@suse.de>
+NeilBrown <neil@brown.name> <neilb@cse.unsw.edu.au>
Nguyen Anh Quynh <aquynh@gmail.com>
Nicholas Piggin <npiggin@gmail.com> <npiggen@suse.de>
Nicholas Piggin <npiggin@gmail.com> <npiggin@kernel.dk>
@@ -685,6 +703,8 @@ Simon Wunderlich <sw@simonwunderlich.de> <simon.wunderlich@saxnet.de>
Simon Wunderlich <sw@simonwunderlich.de> <simon@open-mesh.com>
Simon Wunderlich <sw@simonwunderlich.de> <siwu@hrz.tu-chemnitz.de>
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
+Srinivas Kandagatla <srini@kernel.org> <srinivas.kandagatla@st.com>
+Srinivas Kandagatla <srini@kernel.org> <srinivas.kandagatla@linaro.org>
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
Sriram Yagnaraman <sriram.yagnaraman@ericsson.com> <sriram.yagnaraman@est.tech>
@@ -712,6 +732,7 @@ Sven Eckelmann <sven@narfation.org> <sven.eckelmann@gmx.de>
Sven Eckelmann <sven@narfation.org> <sven.eckelmann@open-mesh.com>
Sven Eckelmann <sven@narfation.org> <sven.eckelmann@openmesh.com>
Sven Eckelmann <sven@narfation.org> <sven@open-mesh.com>
+Sven Peter <sven@kernel.org> <sven@svenpeter.dev>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Tamizh Chelvam Raja <quic_tamizhr@quicinc.com> <tamizhr@codeaurora.org>
Taniya Das <quic_tdas@quicinc.com> <tdas@codeaurora.org>
@@ -744,6 +765,7 @@ Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko@ursulin.net>
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
+Uwe Kleine-König <u.kleine-koenig@baylibre.com> <ukleinek@baylibre.com>
Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Uwe Kleine-König <ukleinek@strlen.de>
Uwe Kleine-König <ukl@pengutronix.de>
diff --git a/.pylintrc b/.pylintrc
new file mode 100644
index 000000000000..30b8ae1659f8
--- /dev/null
+++ b/.pylintrc
@@ -0,0 +1,2 @@
+[MASTER]
+init-hook='import sys; sys.path += ["scripts/lib/kdoc", "scripts/lib/abi"]'
diff --git a/CREDITS b/CREDITS
index 1b77fba6c27e..45446ae322ec 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2071,6 +2071,10 @@ S: 660 Harvard Ave. #7
S: Santa Clara, CA 95051
S: USA
+N: Joonsoo Kim
+E: iamjoonsoo.kim@lge.com
+D: Slab allocators
+
N: Kukjin Kim
E: kgene@kernel.org
D: Samsung S3C, S5P and Exynos ARM architectures
@@ -2332,7 +2336,7 @@ D: Author of the dialog utility, foundation
D: for Menuconfig's lxdialog.
N: Christoph Lameter
-E: christoph@lameter.com
+E: cl@gentwo.org
D: Digiboard PC/Xe and PC/Xi, Digiboard EPCA
D: NUMA support, Slab allocators, Page migration
D: Scalability, Time subsystem
diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block
index 3879963f0f01..4ba771b56b3b 100644
--- a/Documentation/ABI/stable/sysfs-block
+++ b/Documentation/ABI/stable/sysfs-block
@@ -77,7 +77,7 @@ Description:
What: /sys/block/<disk>/diskseq
Date: February 2021
-Contact: Matteo Croce <mcroce@microsoft.com>
+Contact: Matteo Croce <teknoraver@meta.com>
Description:
The /sys/block/<disk>/diskseq files reports the disk
sequence number, which is a monotonically increasing
@@ -547,6 +547,21 @@ Description:
[RO] Maximum size in bytes of a single element in a DMA
scatter/gather list.
+What: /sys/block/<disk>/queue/max_write_streams
+Date: November 2024
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] Maximum number of write streams supported, 0 if not
+ supported. If supported, valid values are 1 through
+ max_write_streams, inclusive.
+
+What: /sys/block/<disk>/queue/write_stream_granularity
+Date: November 2024
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] Granularity of a write stream in bytes. The granularity
+ of a write stream is the size that should be discarded or
+ overwritten together to avoid write amplification in the device.
What: /sys/block/<disk>/queue/max_segments
Date: March 2010
diff --git a/Documentation/ABI/stable/sysfs-class-backlight b/Documentation/ABI/stable/sysfs-class-backlight
index 6102d6bebdf9..40b8c46b95b2 100644
--- a/Documentation/ABI/stable/sysfs-class-backlight
+++ b/Documentation/ABI/stable/sysfs-class-backlight
@@ -26,7 +26,12 @@ Date: March 2006
KernelVersion: 2.6.17
Contact: Richard Purdie <rpurdie@rpsys.net>
Description:
- Show the actual brightness by querying the hardware.
+ Show the actual brightness by querying the hardware. Due
+ to implementation differences in hardware this may not
+ match the value in 'brightness'. For example some hardware
+ may treat blanking differently or have custom power saving
+ features. Userspace should generally use the values in
+ 'brightness' to make decisions.
Users: HAL
What: /sys/class/backlight/<backlight>/max_brightness
diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
index 2cdfd09123da..f59461111221 100644
--- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io
+++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
@@ -715,3 +715,101 @@ Description: This file shows 1 in case the system reset happened due to the
switch board.
The file is read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/global_wp_request
+Date: May 2025
+KernelVersion: 6.16
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: This file when written 1 activates request to allow access to
+ the write protected flashes. Such request can be performed only
+ for system equipped with BMC (Board Management Controller),
+ which can grant access to protected flashes. In case BMC allows
+ access - it will respond with "global_wp_response". BMC decides
+ regarding time window of granted access. After granted window is
+ expired, BMC will change value back to 0.
+ Default value is 0.
+
+ The file is read/write.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/global_wp_response
+Date: May 2025
+KernelVersion: 6.16
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: This file, when set 1, indicates that access to protected
+ flashes have been granted to host CPU by BMC.
+ Default value is 0.
+
+ The file is read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/shutdown_unlock
+Date: May 2025
+KernelVersion: 6.16
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: When ASICs are getting overheated, system protection
+ hardware mechanism enforces system reboot. After system
+ reboot ASICs come up in locked state. To unlock ASICs,
+ this file should be written 1
+ Default value is 0.
+
+ The file is read/write.
+
+What: /sys/devices/platform/mlxplat/i2c_mlxcpld.*/i2c-*/i2c-*/*-00**/mlxreg-io.*/hwmon/hwmon*/boot_progress
+Date: May 2025
+KernelVersion: 6.16
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: These files show the Data Process Unit board boot progress
+ state. Valid states are:
+ - 4 : OS starting.
+ - 5 : OS running.
+ - 6 : Low-Power Standby.
+
+ The file is read only.
+
+What: /sys/devices/platform/mlxplat/i2c_mlxcpld.*/i2c-*/i2c-*/*-00**/mlxreg-io.*/hwmon/hwmon*/dpu_id
+Date: May 2025
+KernelVersion: 6.16
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: This file shows hardware Id of Data Process Unit board.
+
+ The file is read only.
+
+What: /sys/devices/platform/mlxplat/i2c_mlxcpld.*/i2c-*/i2c-*/*-00**/mlxreg-io.*/hwmon/hwmon*/reset_aux_pwr_or_reload
+What: /sys/devices/platform/mlxplat/i2c_mlxcpld.*/i2c-*/i2c-*/*-00**/mlxreg-io.*/hwmon/hwmon*/reset_dpu_thermal
+What: /sys/devices/platform/mlxplat/i2c_mlxcpld.*/i2c-*/i2c-*/*-00**/mlxreg-io.*/hwmon/hwmon*/reset_from_main_board
+Date: May 2025
+KernelVersion: 6.16
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: These files expose the cause of the most recent reset of the Data
+ Processing Unit (DPU) board. The possible causes are:
+ - Power auxiliary outage or power reload.
+ - Thermal shutdown.
+ - Reset request from the main board.
+ Value 1 in file means this is reset cause, 0 - otherwise. Only one of
+ the above causes could be 1 at the same time, representing only last
+ reset cause.
+
+ The files are read only.
+
+What: /sys/devices/platform/mlxplat/i2c_mlxcpld.*/i2c-*/i2c-*/*-00**/mlxreg-io.*/hwmon/hwmon*/perst_rst
+What: /sys/devices/platform/mlxplat/i2c_mlxcpld.*/i2c-*/i2c-*/*-00**/mlxreg-io.*/hwmon/hwmon*/phy_rst
+What: /sys/devices/platform/mlxplat/i2c_mlxcpld.*/i2c-*/i2c-*/*-00**/mlxreg-io.*/hwmon/hwmon*/tpm_rst
+What: /sys/devices/platform/mlxplat/i2c_mlxcpld.*/i2c-*/i2c-*/*-00**/mlxreg-io.*/hwmon/hwmon*/usbphy_rst
+Date: May 2025
+KernelVersion: 6.16
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: These files allow to reset hardware components of Data Process
+ Unit board. Respectively PCI, Ethernet PHY, TPM and USB PHY
+ resets.
+ Default values for all the attributes is 1. Writing 0 will
+ cause reset of the related component.
+
+ The files are read/write.
+
+What: /sys/devices/platform/mlxplat/i2c_mlxcpld.*/i2c-*/i2c-*/*-00**/mlxreg-io.*/hwmon/hwmon*/ufm_upgrade
+Date: May 2025
+KernelVersion: 6.16
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: These files show status of Unified Fabric Manager upgrade.
+ state. 0 - means upgrade is done, 1 - otherwise.
+
+ The file is read only.
diff --git a/Documentation/ABI/testing/configfs-tsm b/Documentation/ABI/testing/configfs-tsm-report
index 534408bc1408..534408bc1408 100644
--- a/Documentation/ABI/testing/configfs-tsm
+++ b/Documentation/ABI/testing/configfs-tsm-report
diff --git a/Documentation/ABI/testing/debugfs-alienware-wmi b/Documentation/ABI/testing/debugfs-alienware-wmi
new file mode 100644
index 000000000000..c7f525d6baac
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-alienware-wmi
@@ -0,0 +1,64 @@
+What: /sys/kernel/debug/alienware-wmi-<wmi_device_name>/system_description
+Date: March 2025
+KernelVersion: 6.15
+Contact: Kurt Borja <kuurtb@gmail.com>
+Description:
+ This file exposes the raw ``system_description`` number reported
+ by the WMAX device.
+
+ Only present on devices with the AWCC interface.
+
+ See Documentation/admin-guide/laptops/alienware-wmi.rst for
+ details.
+
+ RO
+
+What: /sys/kernel/debug/alienware-wmi-<wmi_device_name>/hwmon_data
+Date: March 2025
+KernelVersion: 6.15
+Contact: Kurt Borja <kuurtb@gmail.com>
+Description:
+ This file exposes HWMON private data.
+
+ Includes fan sensor count, temperature sensor count, internal
+ fan IDs and internal temp IDs.
+
+ See Documentation/admin-guide/laptops/alienware-wmi.rst for
+ details.
+
+ RO
+
+What: /sys/kernel/debug/alienware-wmi-<wmi_device_name>/pprof_data
+Date: March 2025
+KernelVersion: 6.15
+Contact: Kurt Borja <kuurtb@gmail.com>
+Description:
+ This file exposes Platform Profile private data.
+
+ Includes internal mapping to platform profiles and thermal
+ profile IDs.
+
+ See Documentation/admin-guide/laptops/alienware-wmi.rst for
+ details.
+
+ RO
+
+What: /sys/kernel/debug/alienware-wmi-<wmi_device_name>/gpio_ctl/total_gpios
+Date: May 2025
+KernelVersion: 6.16
+Contact: Kurt Borja <kuurtb@gmail.com>
+Description:
+ Total number of GPIO pins reported by the device.
+
+ RO
+
+What: /sys/kernel/debug/alienware-wmi-<wmi_device_name>/gpio_ctl/pinX
+Date: May 2025
+KernelVersion: 6.16
+Contact: Kurt Borja <kuurtb@gmail.com>
+Description:
+ This file controls GPIO pin X status.
+
+ See Documentation/wmi/devices/alienware-wmi.rst for details.
+
+ RW
diff --git a/Documentation/ABI/testing/debugfs-pcie-ptm b/Documentation/ABI/testing/debugfs-pcie-ptm
new file mode 100644
index 000000000000..602d41363571
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-pcie-ptm
@@ -0,0 +1,70 @@
+What: /sys/kernel/debug/pcie_ptm_*/local_clock
+Date: May 2025
+Contact: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Description:
+ (RO) PTM local clock in nanoseconds. Applicable for both Root
+ Complex and Endpoint controllers.
+
+What: /sys/kernel/debug/pcie_ptm_*/master_clock
+Date: May 2025
+Contact: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Description:
+ (RO) PTM master clock in nanoseconds. Applicable only for
+ Endpoint controllers.
+
+What: /sys/kernel/debug/pcie_ptm_*/t1
+Date: May 2025
+Contact: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Description:
+ (RO) PTM T1 timestamp in nanoseconds. Applicable only for
+ Endpoint controllers.
+
+What: /sys/kernel/debug/pcie_ptm_*/t2
+Date: May 2025
+Contact: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Description:
+ (RO) PTM T2 timestamp in nanoseconds. Applicable only for
+ Root Complex controllers.
+
+What: /sys/kernel/debug/pcie_ptm_*/t3
+Date: May 2025
+Contact: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Description:
+ (RO) PTM T3 timestamp in nanoseconds. Applicable only for
+ Root Complex controllers.
+
+What: /sys/kernel/debug/pcie_ptm_*/t4
+Date: May 2025
+Contact: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Description:
+ (RO) PTM T4 timestamp in nanoseconds. Applicable only for
+ Endpoint controllers.
+
+What: /sys/kernel/debug/pcie_ptm_*/context_update
+Date: May 2025
+Contact: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Description:
+ (RW) Control the PTM context update mode. Applicable only for
+ Endpoint controllers.
+
+ Following values are supported:
+
+ * auto = PTM context auto update trigger for every 10ms
+
+ * manual = PTM context manual update. Writing 'manual' to this
+ file triggers PTM context update (default)
+
+What: /sys/kernel/debug/pcie_ptm_*/context_valid
+Date: May 2025
+Contact: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Description:
+ (RW) Control the PTM context validity (local clock timing).
+ Applicable only for Root Complex controllers. PTM context is
+ invalidated by hardware if the Root Complex enters low power
+ mode or changes link frequency.
+
+ Following values are supported:
+
+ * 0 = PTM context invalid (default)
+
+ * 1 = PTM context valid
diff --git a/Documentation/ABI/testing/debugfs-scmi-raw b/Documentation/ABI/testing/debugfs-scmi-raw
index 97678cc9535c..5847b96b3896 100644
--- a/Documentation/ABI/testing/debugfs-scmi-raw
+++ b/Documentation/ABI/testing/debugfs-scmi-raw
@@ -31,6 +31,42 @@ Description: SCMI Raw asynchronous message injection/snooping facility; write
(receiving an EOF at each message boundary).
Users: Debugging, any userspace test suite
+What: /sys/kernel/debug/scmi/<n>/raw/message_poll
+Date: June 2025
+KernelVersion: 6.16
+Contact: cristian.marussi@arm.com
+Description: SCMI Raw message injection/snooping facility using polling mode;
+ write a complete SCMI command message (header included) in
+ little-endian binary format to have it sent to the configured
+ backend SCMI server for instance <n>, using polling mode on
+ the reception path. (if transport is polling capable)
+ Any subsequently received response can be read from this same
+ entry if it arrived within the configured timeout.
+ Each write to the entry causes one command request to be built
+ and sent while the replies are read back one message at time
+ (receiving an EOF at each message boundary).
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/raw/message_poll_async
+Date: June 2025
+KernelVersion: 6.16
+Contact: cristian.marussi@arm.com
+Description: SCMI Raw asynchronous message injection/snooping facility using
+ polling-mode; write a complete SCMI asynchronous command message
+ (header included) in little-endian binary format to have it sent
+ to the configured backend SCMI server for instance <n>, using
+ polling-mode on the reception path of the immediate part of the
+ asynchronous command. (if transport is polling capable)
+ Any subsequently received response can be read from this same
+ entry if it arrived within the configured timeout.
+ Any additional delayed response received afterwards can be read
+ from this same entry too if it arrived within the configured
+ timeout.
+ Each write to the entry causes one command request to be built
+ and sent while the replies are read back one message at time
+ (receiving an EOF at each message boundary).
+Users: Debugging, any userspace test suite
+
What: /sys/kernel/debug/scmi/<n>/raw/errors
Date: March 2023
KernelVersion: 6.3
@@ -115,3 +151,58 @@ Description: SCMI Raw asynchronous message injection/snooping facility; write
exist only if the transport is configured to have more than
one default channel.
Users: Debugging, any userspace test suite
+
+
+What: /sys/kernel/debug/scmi/<n>/raw/channels/<m>/message_poll
+Date: June 2025
+KernelVersion: 6.16
+Contact: cristian.marussi@arm.com
+Description: SCMI Raw message injection/snooping facility using polling mode;
+ write a complete SCMI command message (header included) in
+ little-endian binary format to have it sent to the configured
+ backend SCMI server for instance <n> through the <m> transport
+ channel, using polling mode on the reception path.
+ (if transport is polling capable)
+ Any subsequently received response can be read from this same
+ entry if it arrived on channel <m> within the configured
+ timeout.
+ Each write to the entry causes one command request to be built
+ and sent while the replies are read back one message at time
+ (receiving an EOF at each message boundary).
+ Channel identifier <m> matches the SCMI protocol number which
+ has been associated with this transport channel in the DT
+ description, with base protocol number 0x10 being the default
+ channel for this instance.
+ Note that these per-channel entries rooted at <..>/channels
+ exist only if the transport is configured to have more than
+ one default channel.
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/raw/channels/<m>/message_poll_async
+Date: June 2025
+KernelVersion: 6.16
+Contact: cristian.marussi@arm.com
+Description: SCMI Raw asynchronous message injection/snooping facility using
+ polling-mode; write a complete SCMI asynchronous command message
+ (header included) in little-endian binary format to have it sent
+ to the configured backend SCMI server for instance <n> through
+ the <m> transport channel, using polling mode on the reception
+ path of the immediate part of the asynchronous command.
+ (if transport is polling capable)
+ Any subsequently received response can be read from this same
+ entry if it arrived on channel <m> within the configured
+ timeout.
+ Any additional delayed response received afterwards can be read
+ from this same entry too if it arrived within the configured
+ timeout.
+ Each write to the entry causes one command request to be built
+ and sent while the replies are read back one message at time
+ (receiving an EOF at each message boundary).
+ Channel identifier <m> matches the SCMI protocol number which
+ has been associated with this transport channel in the DT
+ description, with base protocol number 0x10 being the default
+ channel for this instance.
+ Note that these per-channel entries rooted at <..>/channels
+ exist only if the transport is configured to have more than
+ one default channel.
+Users: Debugging, any userspace test suite
diff --git a/Documentation/ABI/testing/debugfs-turris-mox-rwtm b/Documentation/ABI/testing/debugfs-turris-mox-rwtm
deleted file mode 100644
index 813987d5de4e..000000000000
--- a/Documentation/ABI/testing/debugfs-turris-mox-rwtm
+++ /dev/null
@@ -1,14 +0,0 @@
-What: /sys/kernel/debug/turris-mox-rwtm/do_sign
-Date: Jun 2020
-KernelVersion: 5.8
-Contact: Marek Behún <kabel@kernel.org>
-Description:
-
- ======= ===========================================================
- (Write) Message to sign with the ECDSA private key stored in
- device's OTP. The message must be exactly 64 bytes
- (since this is intended for SHA-512 hashes).
- (Read) The resulting signature, 136 bytes. This contains the
- R and S values of the ECDSA signature, both in
- big-endian format.
- ======= ===========================================================
diff --git a/Documentation/ABI/testing/sysfs-bus-cxl b/Documentation/ABI/testing/sysfs-bus-cxl
index 99bb3faf7a0e..6b4e8c7a963d 100644
--- a/Documentation/ABI/testing/sysfs-bus-cxl
+++ b/Documentation/ABI/testing/sysfs-bus-cxl
@@ -242,7 +242,7 @@ Description:
decoding a Host Physical Address range. Note that this number
may be elevated without any regionX objects active or even
enumerated, as this may be due to decoders established by
- platform firwmare or a previous kernel (kexec).
+ platform firmware or a previous kernel (kexec).
What: /sys/bus/cxl/devices/decoderX.Y
@@ -572,7 +572,7 @@ Description:
What: /sys/bus/cxl/devices/regionZ/accessY/read_bandwidth
- /sys/bus/cxl/devices/regionZ/accessY/write_banwidth
+ /sys/bus/cxl/devices/regionZ/accessY/write_bandwidth
Date: Jan, 2024
KernelVersion: v6.9
Contact: linux-cxl@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats b/Documentation/ABI/testing/sysfs-bus-pci-devices-aer
index d1f67bb81d5d..5ed284523956 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats
+++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-aer
@@ -117,3 +117,47 @@ Date: July 2018
KernelVersion: 4.19.0
Contact: linux-pci@vger.kernel.org, rajatja@google.com
Description: Total number of ERR_NONFATAL messages reported to rootport.
+
+PCIe AER ratelimits
+-------------------
+
+These attributes show up under all the devices that are AER capable.
+They represent configurable ratelimits of logs per error type.
+
+See Documentation/PCI/pcieaer-howto.rst for more info on ratelimits.
+
+What: /sys/bus/pci/devices/<dev>/aer/correctable_ratelimit_interval_ms
+Date: May 2025
+KernelVersion: 6.16.0
+Contact: linux-pci@vger.kernel.org
+Description: Writing 0 disables AER correctable error log ratelimiting.
+ Writing a positive value sets the ratelimit interval in ms.
+ Default is DEFAULT_RATELIMIT_INTERVAL (5000 ms).
+
+What: /sys/bus/pci/devices/<dev>/aer/correctable_ratelimit_burst
+Date: May 2025
+KernelVersion: 6.16.0
+Contact: linux-pci@vger.kernel.org
+Description: Ratelimit burst for correctable error logs. Writing a value
+ changes the number of errors (burst) allowed per interval
+ before ratelimiting. Reading gets the current ratelimit
+ burst. Default is DEFAULT_RATELIMIT_BURST (10).
+
+What: /sys/bus/pci/devices/<dev>/aer/nonfatal_ratelimit_interval_ms
+Date: May 2025
+KernelVersion: 6.16.0
+Contact: linux-pci@vger.kernel.org
+Description: Writing 0 disables AER non-fatal uncorrectable error log
+ ratelimiting. Writing a positive value sets the ratelimit
+ interval in ms. Default is DEFAULT_RATELIMIT_INTERVAL
+ (5000 ms).
+
+What: /sys/bus/pci/devices/<dev>/aer/nonfatal_ratelimit_burst
+Date: May 2025
+KernelVersion: 6.16.0
+Contact: linux-pci@vger.kernel.org
+Description: Ratelimit burst for non-fatal uncorrectable error logs.
+ Writing a value changes the number of errors (burst)
+ allowed per interval before ratelimiting. Reading gets the
+ current ratelimit burst. Default is DEFAULT_RATELIMIT_BURST
+ (10).
diff --git a/Documentation/ABI/testing/sysfs-bus-wmi b/Documentation/ABI/testing/sysfs-bus-wmi
index aadb35b82198..d71a219c610e 100644
--- a/Documentation/ABI/testing/sysfs-bus-wmi
+++ b/Documentation/ABI/testing/sysfs-bus-wmi
@@ -76,6 +76,6 @@ Date: May 2017
Contact: Darren Hart (VMware) <dvhart@infradead.org>
Description:
This file contains a boolean flags signaling the data block
- aassociated with the given WMI device is writable. If the
+ associated with the given WMI device is writable. If the
given WMI device is not associated with a data block, then
this file will not exist.
diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led
index 2e24ac3bd7ef..0313b82644f2 100644
--- a/Documentation/ABI/testing/sysfs-class-led
+++ b/Documentation/ABI/testing/sysfs-class-led
@@ -72,6 +72,12 @@ Description:
/sys/class/leds/<led> once a given trigger is selected. For
their documentation see `sysfs-class-led-trigger-*`.
+ Writing "none" removes the trigger for this LED.
+
+ Writing "default" sets the trigger to the LED's default trigger
+ (which would often be configured in the device tree for the
+ hardware).
+
What: /sys/class/leds/<led>/inverted
Date: January 2011
KernelVersion: 2.6.38
diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power
index 2a5c1a09a28f..87a058e14e7e 100644
--- a/Documentation/ABI/testing/sysfs-class-power
+++ b/Documentation/ABI/testing/sysfs-class-power
@@ -456,7 +456,7 @@ Description:
"Over voltage", "Under voltage", "Unspecified failure", "Cold",
"Watchdog timer expire", "Safety timer expire",
"Over current", "Calibration required", "Warm",
- "Cool", "Hot", "No battery"
+ "Cool", "Hot", "No battery", "Blown fuse", "Cell imbalance"
What: /sys/class/power_supply/<supply_name>/precharge_current
Date: June 2017
@@ -508,11 +508,12 @@ Description:
Access: Read, Write
Valid values:
- ================ ====================================
- auto: Charge normally, respect thresholds
- inhibit-charge: Do not charge while AC is attached
- force-discharge: Force discharge while AC is attached
- ================ ====================================
+ ===================== ========================================
+ auto: Charge normally, respect thresholds
+ inhibit-charge: Do not charge while AC is attached
+ inhibit-charge-awake: inhibit-charge only when device is awake
+ force-discharge: Force discharge while AC is attached
+ ===================== ========================================
What: /sys/class/power_supply/<supply_name>/technology
Date: May 2007
@@ -822,3 +823,46 @@ Description:
Each entry is a link to the device which registered the extension.
Access: Read
+
+What: /sys/class/power_supply/max8971-charger/fast_charge_timer
+Date: May 2025
+KernelVersion: 6.15.0
+Contact: Svyatoslav Ryhel <clamor95@gmail.com>
+Description:
+ This entry shows and sets the maximum time the max8971
+ charger operates in fast-charge mode. When the timer expires
+ the device will terminate fast-charge mode (charging current
+ will drop to 0 A) and will trigger interrupt.
+
+ Valid values:
+
+ - 4 - 10 (hours), step by 1
+ - 0: disabled.
+
+What: /sys/class/power_supply/max8971-charger/top_off_threshold_current
+Date: May 2025
+KernelVersion: 6.15.0
+Contact: Svyatoslav Ryhel <clamor95@gmail.com>
+Description:
+ This entry shows and sets the charging current threshold for
+ entering top-off charging mode. When charging current in fast
+ charge mode drops below this value, the charger will trigger
+ interrupt and start top-off charging mode.
+
+ Valid values:
+
+ - 50000 - 200000 (microamps), step by 50000 (rounded down)
+
+What: /sys/class/power_supply/max8971-charger/top_off_timer
+Date: May 2025
+KernelVersion: 6.15.0
+Contact: Svyatoslav Ryhel <clamor95@gmail.com>
+Description:
+ This entry shows and sets the maximum time the max8971
+ charger operates in top-off charge mode. When the timer expires
+ the device will terminate top-off charge mode (charging current
+ will drop to 0 A) and will trigger interrupt.
+
+ Valid values:
+
+ - 0 - 70 (minutes), step by 10 (rounded down)
diff --git a/Documentation/ABI/testing/sysfs-class-power-gaokun b/Documentation/ABI/testing/sysfs-class-power-gaokun
new file mode 100644
index 000000000000..0633aed7b355
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-power-gaokun
@@ -0,0 +1,27 @@
+What: /sys/class/power_supply/gaokun-ec-battery/smart_charge_delay
+Date: March 2025
+KernelVersion: 6.15
+Contact: Pengyu Luo <mitltlatltl@gmail.com>
+Description:
+ This entry allows configuration of smart charging delay.
+
+ Smart charging behavior: when the power adapter is connected
+ for delay hours, battery charging will follow the rules of
+ charge_control_start_threshold and charge_control_end_threshold.
+ For more information about charge control, please refer to
+ sysfs-class-power.
+
+ Access: Read, Write
+
+ Valid values: In hours (non-negative)
+
+What: /sys/class/power_supply/gaokun-ec-battery/battery_adaptive_charge
+Date: March 2025
+KernelVersion: 6.15
+Contact: Pengyu Luo <mitltlatltl@gmail.com>
+Description:
+ This entry allows enabling battery adaptive charging.
+
+ Access: Read, Write
+
+ Valid values: 0 (disabled) or 1 (enabled)
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 206079d3bd5b..bf85f4de6862 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -111,6 +111,7 @@ What: /sys/devices/system/cpu/cpuidle/available_governors
/sys/devices/system/cpu/cpuidle/current_driver
/sys/devices/system/cpu/cpuidle/current_governor
/sys/devices/system/cpu/cpuidle/current_governer_ro
+ /sys/devices/system/cpu/cpuidle/intel_c1_demotion
Date: September 2007
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Discover cpuidle policy and mechanism
@@ -132,7 +133,11 @@ Description: Discover cpuidle policy and mechanism
current_governor_ro: (RO) displays current idle policy.
- See Documentation/admin-guide/pm/cpuidle.rst and
+ intel_c1_demotion: (RW) enables/disables the C1 demotion
+ feature on Intel CPUs.
+
+ See Documentation/admin-guide/pm/cpuidle.rst,
+ Documentation/admin-guide/pm/intel_idle.rst, and
Documentation/driver-api/pm/cpuidle.rst for more information.
@@ -268,6 +273,60 @@ Description: Discover CPUs in the same CPU frequency coordination domain
This file is only present if the acpi-cpufreq or the cppc-cpufreq
drivers are in use.
+What: /sys/devices/system/cpu/cpuX/cpufreq/auto_select
+Date: May 2025
+Contact: linux-pm@vger.kernel.org
+Description: Autonomous selection enable
+
+ Read/write interface to control autonomous selection enable
+ Read returns autonomous selection status:
+ 0: autonomous selection is disabled
+ 1: autonomous selection is enabled
+
+ Write 'y' or '1' or 'on' to enable autonomous selection.
+ Write 'n' or '0' or 'off' to disable autonomous selection.
+
+ This file is only present if the cppc-cpufreq driver is in use.
+
+What: /sys/devices/system/cpu/cpuX/cpufreq/auto_act_window
+Date: May 2025
+Contact: linux-pm@vger.kernel.org
+Description: Autonomous activity window
+
+ This file indicates a moving utilization sensitivity window to
+ the platform's autonomous selection policy.
+
+ Read/write an integer represents autonomous activity window (in
+ microseconds) from/to this file. The max value to write is
+ 1270000000 but the max significand is 127. This means that if 128
+ is written to this file, 127 will be stored. If the value is
+ greater than 130, only the first two digits will be saved as
+ significand.
+
+ Writing a zero value to this file enable the platform to
+ determine an appropriate Activity Window depending on the workload.
+
+ Writing to this file only has meaning when Autonomous Selection is
+ enabled.
+
+ This file is only present if the cppc-cpufreq driver is in use.
+
+What: /sys/devices/system/cpu/cpuX/cpufreq/energy_performance_preference_val
+Date: May 2025
+Contact: linux-pm@vger.kernel.org
+Description: Energy performance preference
+
+ Read/write an 8-bit integer from/to this file. This file
+ represents a range of values from 0 (performance preference) to
+ 0xFF (energy efficiency preference) that influences the rate of
+ performance increase/decrease and the result of the hardware's
+ energy efficiency and performance optimization policies.
+
+ Writing to this file only has meaning when Autonomous Selection is
+ enabled.
+
+ This file is only present if the cppc-cpufreq driver is in use.
+
What: /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1}
Date: August 2008
@@ -485,6 +544,7 @@ What: /sys/devices/system/cpu/cpuX/regs/
/sys/devices/system/cpu/cpuX/regs/identification/
/sys/devices/system/cpu/cpuX/regs/identification/midr_el1
/sys/devices/system/cpu/cpuX/regs/identification/revidr_el1
+ /sys/devices/system/cpu/cpuX/regs/identification/aidr_el1
/sys/devices/system/cpu/cpuX/regs/identification/smidr_el1
Date: June 2016
Contact: Linux ARM Kernel Mailing list <linux-arm-kernel@lists.infradead.org>
@@ -511,11 +571,13 @@ Description: information about CPUs heterogeneity.
What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
+ /sys/devices/system/cpu/vulnerabilities/indirect_target_selection
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
/sys/devices/system/cpu/vulnerabilities/l1tf
/sys/devices/system/cpu/vulnerabilities/mds
/sys/devices/system/cpu/vulnerabilities/meltdown
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
+ /sys/devices/system/cpu/vulnerabilities/old_microcode
/sys/devices/system/cpu/vulnerabilities/reg_file_data_sampling
/sys/devices/system/cpu/vulnerabilities/retbleed
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
diff --git a/Documentation/ABI/testing/sysfs-devices-virtual-misc-tdx_guest b/Documentation/ABI/testing/sysfs-devices-virtual-misc-tdx_guest
new file mode 100644
index 000000000000..8fca56c8c9df
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-virtual-misc-tdx_guest
@@ -0,0 +1,63 @@
+What: /sys/devices/virtual/misc/tdx_guest/measurements/MRNAME[:HASH]
+Date: April, 2025
+KernelVersion: v6.16
+Contact: linux-coco@lists.linux.dev
+Description:
+ Value of a TDX measurement register (MR). MRNAME and HASH above
+ are placeholders. The optional suffix :HASH is used for MRs
+ that have associated hash algorithms. See below for a complete
+ list of TDX MRs exposed via sysfs. Refer to Intel TDX Module
+ ABI Specification for the definition of TDREPORT and the full
+ list of TDX measurements.
+
+ Intel TDX Module ABI Specification can be found at:
+ https://www.intel.com/content/www/us/en/developer/tools/trust-domain-extensions/documentation.html#architecture
+
+ See also:
+ https://docs.kernel.org/driver-api/coco/measurement-registers.html
+
+What: /sys/devices/virtual/misc/tdx_guest/measurements/mrconfigid
+Date: April, 2025
+KernelVersion: v6.16
+Contact: linux-coco@lists.linux.dev
+Description:
+ (RO) MRCONFIGID - 48-byte immutable storage typically used for
+ software-defined ID for non-owner-defined configuration of the
+ guest TD – e.g., run-time or OS configuration.
+
+What: /sys/devices/virtual/misc/tdx_guest/measurements/mrowner
+Date: April, 2025
+KernelVersion: v6.16
+Contact: linux-coco@lists.linux.dev
+Description:
+ (RO) MROWNER - 48-byte immutable storage typically used for
+ software-defined ID for the guest TD’s owner.
+
+What: /sys/devices/virtual/misc/tdx_guest/measurements/mrownerconfig
+Date: April, 2025
+KernelVersion: v6.16
+Contact: linux-coco@lists.linux.dev
+Description:
+ (RO) MROWNERCONFIG - 48-byte immutable storage typically used
+ for software-defined ID for owner-defined configuration of the
+ guest TD – e.g., specific to the workload rather than the
+ run-time or OS.
+
+What: /sys/devices/virtual/misc/tdx_guest/measurements/mrtd:sha384
+Date: April, 2025
+KernelVersion: v6.16
+Contact: linux-coco@lists.linux.dev
+Description:
+ (RO) MRTD - Measurement of the initial contents of the TD.
+
+What: /sys/devices/virtual/misc/tdx_guest/measurements/rtmr[0123]:sha384
+Date: April, 2025
+KernelVersion: v6.16
+Contact: linux-coco@lists.linux.dev
+Description:
+ (RW) RTMR[0123] - 4 Run-Time extendable Measurement Registers.
+ Read from any of these returns the current value of the
+ corresponding RTMR. Write extends the written buffer to the
+ RTMR. All writes must start at offset 0 and be 48 bytes in
+ size. Partial writes will result in EINVAL returned by the
+ write() syscall.
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd b/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd
index 2a19584d091e..8c9718d83e9d 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd
+++ b/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd
@@ -1,6 +1,6 @@
What: /sys/bus/hid/drivers/hid-appletb-kbd/<dev>/mode
-Date: September, 2023
-KernelVersion: 6.5
+Date: March, 2025
+KernelVersion: 6.15
Contact: linux-input@vger.kernel.org
Description:
The set of keys displayed on the Touch Bar.
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
index 9bce281314df..4ca917ac6382 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
+++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
@@ -111,7 +111,7 @@ Description: RO. Package current voltage in millivolt.
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/temp2_input
Date: March 2025
-KernelVersion: 6.14
+KernelVersion: 6.15
Contact: intel-xe@lists.freedesktop.org
Description: RO. Package temperature in millidegree Celsius.
@@ -119,8 +119,32 @@ Description: RO. Package temperature in millidegree Celsius.
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/temp3_input
Date: March 2025
-KernelVersion: 6.14
+KernelVersion: 6.15
Contact: intel-xe@lists.freedesktop.org
Description: RO. VRAM temperature in millidegree Celsius.
Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/fan1_input
+Date: March 2025
+KernelVersion: 6.16
+Contact: intel-xe@lists.freedesktop.org
+Description: RO. Fan 1 speed in RPM.
+
+ Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/fan2_input
+Date: March 2025
+KernelVersion: 6.16
+Contact: intel-xe@lists.freedesktop.org
+Description: RO. Fan 2 speed in RPM.
+
+ Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/fan3_input
+Date: March 2025
+KernelVersion: 6.16
+Contact: intel-xe@lists.freedesktop.org
+Description: RO. Fan 3 speed in RPM.
+
+ Only supported for particular Intel Xe graphics platforms.
diff --git a/Documentation/ABI/testing/sysfs-driver-qat_ras b/Documentation/ABI/testing/sysfs-driver-qat_ras
index 176dea1e9c0a..82ceb04445ec 100644
--- a/Documentation/ABI/testing/sysfs-driver-qat_ras
+++ b/Documentation/ABI/testing/sysfs-driver-qat_ras
@@ -4,7 +4,7 @@ KernelVersion: 6.7
Contact: qat-linux@intel.com
Description: (RO) Reports the number of correctable errors detected by the device.
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat_ras/errors_nonfatal
Date: January 2024
@@ -12,7 +12,7 @@ KernelVersion: 6.7
Contact: qat-linux@intel.com
Description: (RO) Reports the number of non fatal errors detected by the device.
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat_ras/errors_fatal
Date: January 2024
@@ -20,7 +20,7 @@ KernelVersion: 6.7
Contact: qat-linux@intel.com
Description: (RO) Reports the number of fatal errors detected by the device.
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat_ras/reset_error_counters
Date: January 2024
@@ -38,4 +38,4 @@ Description: (WO) Write to resets all error counters of a device.
# cat /sys/bus/pci/devices/<BDF>/qat_ras/errors_fatal
0
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index ae0191295d29..d4140dc6c5ba 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -1604,3 +1604,84 @@ Description:
prevent the UFS from frequently performing clock gating/ungating.
The attribute is read/write.
+
+What: /sys/bus/platform/drivers/ufshcd/*/device_lvl_exception_count
+What: /sys/bus/platform/devices/*.ufs/device_lvl_exception_count
+Date: March 2025
+Contact: Bao D. Nguyen <quic_nguyenb@quicinc.com>
+Description:
+ This attribute is applicable to ufs devices compliant to the
+ JEDEC specifications version 4.1 or later. The
+ device_lvl_exception_count is a counter indicating the number of
+ times the device level exceptions have occurred since the last
+ time this variable is reset. Writing a 0 value to this
+ attribute will reset the device_lvl_exception_count. If the
+ device_lvl_exception_count reads a positive value, the user
+ application should read the device_lvl_exception_id attribute to
+ know more information about the exception.
+
+ The attribute is read/write.
+
+What: /sys/bus/platform/drivers/ufshcd/*/device_lvl_exception_id
+What: /sys/bus/platform/devices/*.ufs/device_lvl_exception_id
+Date: March 2025
+Contact: Bao D. Nguyen <quic_nguyenb@quicinc.com>
+Description:
+ Reading the device_lvl_exception_id returns the
+ qDeviceLevelExceptionID attribute of the ufs device JEDEC
+ specification version 4.1. The definition of the
+ qDeviceLevelExceptionID is the ufs device vendor specific
+ implementation. Refer to the device manufacturer datasheet for
+ more information on the meaning of the qDeviceLevelExceptionID
+ attribute value.
+
+ The attribute is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/wb_resize_enable
+What: /sys/bus/platform/devices/*.ufs/wb_resize_enable
+Date: April 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ The host can enable the WriteBooster buffer resize by setting this
+ attribute.
+
+ ======== ======================================
+ idle There is no resize operation
+ decrease Decrease WriteBooster buffer size
+ increase Increase WriteBooster buffer size
+ ======== ======================================
+
+ The file is write only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/attributes/wb_resize_hint
+What: /sys/bus/platform/devices/*.ufs/attributes/wb_resize_hint
+Date: April 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ wb_resize_hint indicates hint information about which type of resize
+ for WriteBooster buffer is recommended by the device.
+
+ ========= ======================================
+ keep Recommend keep the buffer size
+ decrease Recommend to decrease the buffer size
+ increase Recommend to increase the buffer size
+ ========= ======================================
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/attributes/wb_resize_status
+What: /sys/bus/platform/devices/*.ufs/attributes/wb_resize_status
+Date: April 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ The host can check the resize operation status of the WriteBooster
+ buffer by reading this attribute.
+
+ ================ ========================================
+ idle Resize operation is not issued
+ in_progress Resize operation in progress
+ complete_success Resize operation completed successfully
+ general_failure Resize operation general failure
+ ================ ========================================
+
+ The file is read only.
diff --git a/Documentation/ABI/testing/sysfs-firmware-acpi b/Documentation/ABI/testing/sysfs-firmware-acpi
index 5249ad5a96d9..f4de60c4134d 100644
--- a/Documentation/ABI/testing/sysfs-firmware-acpi
+++ b/Documentation/ABI/testing/sysfs-firmware-acpi
@@ -248,3 +248,24 @@ Description:
# cat ff_pwr_btn
7 enabled
+What: /sys/firmware/acpi/memory_ranges/rangeX
+Date: February 2025
+Contact: Tony Luck <tony.luck@intel.com>
+Description:
+ On systems with the ACPI MRRM table reports the parameters for
+ each range.
+
+ base: Starting system physical address.
+
+ length: Length of this range in bytes.
+
+ node: NUMA node that this range belongs to. Negative numbers
+ indicate that the node number could not be determined (e.g
+ for an address range that is reserved for future hot add of
+ memory).
+
+ local_region_id: ID associated with access by agents
+ local to this range of addresses.
+
+ remote_region_id: ID associated with access by agents
+ non-local to this range of addresses.
diff --git a/Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm b/Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm
index ea5e5b489bc7..26741cb84504 100644
--- a/Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm
+++ b/Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm
@@ -12,15 +12,6 @@ Contact: Marek Behún <kabel@kernel.org>
Description: (Read) MAC addresses burned into eFuses of this Turris Mox board.
Format: %pM
-What: /sys/firmware/turris-mox-rwtm/pubkey
-Date: August 2019
-KernelVersion: 5.4
-Contact: Marek Behún <kabel@kernel.org>
-Description: (Read) ECDSA public key (in pubkey hex compressed form) computed
- as pair to the ECDSA private key burned into eFuses of this
- Turris Mox Board.
- Format: string
-
What: /sys/firmware/turris-mox-rwtm/ram_size
Date: August 2019
KernelVersion: 5.4
diff --git a/Documentation/ABI/testing/sysfs-fs-erofs b/Documentation/ABI/testing/sysfs-fs-erofs
index b134146d735b..bf3b6299c15e 100644
--- a/Documentation/ABI/testing/sysfs-fs-erofs
+++ b/Documentation/ABI/testing/sysfs-fs-erofs
@@ -27,3 +27,11 @@ Description: Writing to this will drop compression-related caches,
- 1 : invalidate cached compressed folios
- 2 : drop in-memory pclusters
- 3 : drop in-memory pclusters and cached compressed folios
+
+What: /sys/fs/erofs/accel
+Date: May 2025
+Contact: "Bo Liu" <liubo03@inspur.com>
+Description: Used to set or show hardware accelerators in effect
+ and multiple accelerators are separated by '\n'.
+ Supported accelerator(s): qat_deflate.
+ Disable all accelerators with an empty string (echo > accel).
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 59adb7dc6f9e..bf03263b9f46 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -270,7 +270,7 @@ Description: Shows all enabled kernel features.
inode_checksum, flexible_inline_xattr, quota_ino,
inode_crtime, lost_found, verity, sb_checksum,
casefold, readonly, compression, test_dummy_encryption_v2,
- atomic_write, pin_file, encrypted_casefold.
+ atomic_write, pin_file, encrypted_casefold, linear_lookup.
What: /sys/fs/f2fs/<disk>/inject_rate
Date: May 2016
@@ -710,32 +710,34 @@ Description: Support configuring fault injection type, should be
enabled with fault_injection option, fault type value
is shown below, it supports single or combined type.
- =========================== ===========
+ =========================== ==========
Type_Name Type_Value
- =========================== ===========
- FAULT_KMALLOC 0x000000001
- FAULT_KVMALLOC 0x000000002
- FAULT_PAGE_ALLOC 0x000000004
- FAULT_PAGE_GET 0x000000008
- FAULT_ALLOC_BIO 0x000000010 (obsolete)
- FAULT_ALLOC_NID 0x000000020
- FAULT_ORPHAN 0x000000040
- FAULT_BLOCK 0x000000080
- FAULT_DIR_DEPTH 0x000000100
- FAULT_EVICT_INODE 0x000000200
- FAULT_TRUNCATE 0x000000400
- FAULT_READ_IO 0x000000800
- FAULT_CHECKPOINT 0x000001000
- FAULT_DISCARD 0x000002000
- FAULT_WRITE_IO 0x000004000
- FAULT_SLAB_ALLOC 0x000008000
- FAULT_DQUOT_INIT 0x000010000
- FAULT_LOCK_OP 0x000020000
- FAULT_BLKADDR_VALIDITY 0x000040000
- FAULT_BLKADDR_CONSISTENCE 0x000080000
- FAULT_NO_SEGMENT 0x000100000
- FAULT_INCONSISTENT_FOOTER 0x000200000
- =========================== ===========
+ =========================== ==========
+ FAULT_KMALLOC 0x00000001
+ FAULT_KVMALLOC 0x00000002
+ FAULT_PAGE_ALLOC 0x00000004
+ FAULT_PAGE_GET 0x00000008
+ FAULT_ALLOC_BIO 0x00000010 (obsolete)
+ FAULT_ALLOC_NID 0x00000020
+ FAULT_ORPHAN 0x00000040
+ FAULT_BLOCK 0x00000080
+ FAULT_DIR_DEPTH 0x00000100
+ FAULT_EVICT_INODE 0x00000200
+ FAULT_TRUNCATE 0x00000400
+ FAULT_READ_IO 0x00000800
+ FAULT_CHECKPOINT 0x00001000
+ FAULT_DISCARD 0x00002000
+ FAULT_WRITE_IO 0x00004000
+ FAULT_SLAB_ALLOC 0x00008000
+ FAULT_DQUOT_INIT 0x00010000
+ FAULT_LOCK_OP 0x00020000
+ FAULT_BLKADDR_VALIDITY 0x00040000
+ FAULT_BLKADDR_CONSISTENCE 0x00080000
+ FAULT_NO_SEGMENT 0x00100000
+ FAULT_INCONSISTENT_FOOTER 0x00200000
+ FAULT_TIMEOUT 0x00400000 (1000ms)
+ FAULT_VMALLOC 0x00800000
+ =========================== ==========
What: /sys/fs/f2fs/<disk>/discard_io_aware_gran
Date: January 2023
@@ -846,3 +848,16 @@ Description: For several zoned storage devices, vendors will provide extra space
reserved_blocks. However, it is not enough, since this extra space should
not be shown to users. So, with this new sysfs node, we can hide the space
by substracting reserved_blocks from total bytes.
+
+What: /sys/fs/f2fs/<disk>/encoding_flags
+Date: April 2025
+Contact: "Chao Yu" <chao@kernel.org>
+Description: This is a read-only entry to show the value of sb.s_encoding_flags, the
+ value is hexadecimal.
+
+ ============================ ==========
+ Flag_Name Flag_Value
+ ============================ ==========
+ SB_ENC_STRICT_MODE_FL 0x00000001
+ SB_ENC_NO_COMPAT_FALLBACK_FL 0x00000002
+ ============================ ==========
diff --git a/Documentation/ABI/testing/sysfs-kernel-hardlockup_count b/Documentation/ABI/testing/sysfs-kernel-hardlockup_count
new file mode 100644
index 000000000000..dfdd4078b077
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-hardlockup_count
@@ -0,0 +1,7 @@
+What: /sys/kernel/hardlockup_count
+Date: May 2025
+KernelVersion: 6.16
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:
+ Shows how many times the system has detected a hard lockup since last boot.
+ Available only if CONFIG_HARDLOCKUP_DETECTOR is enabled.
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon
index 293197f180ad..5697ab154c1f 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-damon
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon
@@ -283,6 +283,12 @@ Contact: SeongJae Park <sj@kernel.org>
Description: Writing to and reading from this file sets and gets the current
value of the goal metric.
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/quotas/goals/<G>/nid
+Date: Apr 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: Writing to and reading from this file sets and gets the nid
+ parameter of the goal.
+
What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/quotas/weights/sz_permil
Date: Mar 2022
Contact: SeongJae Park <sj@kernel.org>
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-mempolicy-weighted-interleave b/Documentation/ABI/testing/sysfs-kernel-mm-mempolicy-weighted-interleave
index 0b7972de04e9..649c0e9b895c 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-mempolicy-weighted-interleave
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-mempolicy-weighted-interleave
@@ -20,6 +20,35 @@ Description: Weight configuration interface for nodeN
Minimum weight: 1
Maximum weight: 255
- Writing an empty string or `0` will reset the weight to the
- system default. The system default may be set by the kernel
- or drivers at boot or during hotplug events.
+ Writing invalid values (i.e. any values not in [1,255],
+ empty string, ...) will return -EINVAL.
+
+ Changing the weight to a valid value will automatically
+ switch the system to manual mode as well.
+
+What: /sys/kernel/mm/mempolicy/weighted_interleave/auto
+Date: May 2025
+Contact: Linux memory management mailing list <linux-mm@kvack.org>
+Description: Auto-weighting configuration interface
+
+ Configuration mode for weighted interleave. 'true' indicates
+ that the system is in auto mode, and a 'false' indicates that
+ the system is in manual mode.
+
+ In auto mode, all node weights are re-calculated and overwritten
+ (visible via the nodeN interfaces) whenever new bandwidth data
+ is made available during either boot or hotplug events.
+
+ In manual mode, node weights can only be updated by the user.
+ Note that nodes that are onlined with previously set weights
+ will reuse those weights. If they were not previously set or
+ are onlined with missing bandwidth data, the weights will use
+ a default weight of 1.
+
+ Writing any true value string (e.g. Y or 1) will enable auto
+ mode, while writing any false value string (e.g. N or 0) will
+ enable manual mode. All other strings are ignored and will
+ return -EINVAL.
+
+ Writing a new weight to a node directly via the nodeN interface
+ will also automatically switch the system to manual mode.
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-numa b/Documentation/ABI/testing/sysfs-kernel-mm-numa
index 77e559d4ed80..90e375ff54cb 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-numa
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-numa
@@ -16,9 +16,13 @@ Description: Enable/disable demoting pages during reclaim
Allowing page migration during reclaim enables these
systems to migrate pages from fast tiers to slow tiers
when the fast tier is under pressure. This migration
- is performed before swap. It may move data to a NUMA
- node that does not fall into the cpuset of the
- allocating process which might be construed to violate
- the guarantees of cpusets. This should not be enabled
- on systems which need strict cpuset location
- guarantees.
+ is performed before swap if an eligible numa node is
+ present in cpuset.mems for the cgroup (or if cpuset v1
+ is being used). If cpusets.mems changes at runtime, it
+ may move data to a NUMA node that does not fall into the
+ cpuset of the new cpusets.mems, which might be construed
+ to violate the guarantees of cpusets. Shared memory,
+ such as libraries, owned by another cgroup may still be
+ demoted and result in memory use on a node not present
+ in cpusets.mem. This should not be enabled on systems
+ which need strict cpuset location guarantees.
diff --git a/Documentation/ABI/testing/sysfs-kernel-rcu_stall_count b/Documentation/ABI/testing/sysfs-kernel-rcu_stall_count
new file mode 100644
index 000000000000..a4a97a7f4a4d
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-rcu_stall_count
@@ -0,0 +1,6 @@
+What: /sys/kernel/rcu_stall_count
+Date: May 2025
+KernelVersion: 6.16
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:
+ Shows how many times the system has detected an RCU stall since last boot.
diff --git a/Documentation/ABI/testing/sysfs-kernel-reboot b/Documentation/ABI/testing/sysfs-kernel-reboot
index e117aba46be0..52571fd5ddba 100644
--- a/Documentation/ABI/testing/sysfs-kernel-reboot
+++ b/Documentation/ABI/testing/sysfs-kernel-reboot
@@ -1,7 +1,7 @@
What: /sys/kernel/reboot
Date: November 2020
KernelVersion: 5.11
-Contact: Matteo Croce <mcroce@microsoft.com>
+Contact: Matteo Croce <teknoraver@meta.com>
Description: Interface to set the kernel reboot behavior, similarly to
what can be done via the reboot= cmdline option.
(see Documentation/admin-guide/kernel-parameters.txt)
@@ -9,25 +9,25 @@ Description: Interface to set the kernel reboot behavior, similarly to
What: /sys/kernel/reboot/mode
Date: November 2020
KernelVersion: 5.11
-Contact: Matteo Croce <mcroce@microsoft.com>
+Contact: Matteo Croce <teknoraver@meta.com>
Description: Reboot mode. Valid values are: cold warm hard soft gpio
What: /sys/kernel/reboot/type
Date: November 2020
KernelVersion: 5.11
-Contact: Matteo Croce <mcroce@microsoft.com>
+Contact: Matteo Croce <teknoraver@meta.com>
Description: Reboot type. Valid values are: bios acpi kbd triple efi pci
What: /sys/kernel/reboot/cpu
Date: November 2020
KernelVersion: 5.11
-Contact: Matteo Croce <mcroce@microsoft.com>
+Contact: Matteo Croce <teknoraver@meta.com>
Description: CPU number to use to reboot.
What: /sys/kernel/reboot/force
Date: November 2020
KernelVersion: 5.11
-Contact: Matteo Croce <mcroce@microsoft.com>
+Contact: Matteo Croce <teknoraver@meta.com>
Description: Don't wait for any other CPUs on reboot and
avoid anything that could hang.
diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
index cd5fb8fa3ddf..658999be5164 100644
--- a/Documentation/ABI/testing/sysfs-kernel-slab
+++ b/Documentation/ABI/testing/sysfs-kernel-slab
@@ -2,7 +2,7 @@ What: /sys/kernel/slab
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The /sys/kernel/slab directory contains a snapshot of the
internal state of the SLUB allocator for each cache. Certain
@@ -14,7 +14,7 @@ What: /sys/kernel/slab/<cache>/aliases
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The aliases file is read-only and specifies how many caches
have merged into this cache.
@@ -23,7 +23,7 @@ What: /sys/kernel/slab/<cache>/align
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The align file is read-only and specifies the cache's object
alignment in bytes.
@@ -32,7 +32,7 @@ What: /sys/kernel/slab/<cache>/alloc_calls
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The alloc_calls file is read-only and lists the kernel code
locations from which allocations for this cache were performed.
@@ -43,7 +43,7 @@ What: /sys/kernel/slab/<cache>/alloc_fastpath
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The alloc_fastpath file shows how many objects have been
allocated using the fast path. It can be written to clear the
@@ -54,7 +54,7 @@ What: /sys/kernel/slab/<cache>/alloc_from_partial
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The alloc_from_partial file shows how many times a cpu slab has
been full and it has been refilled by using a slab from the list
@@ -66,7 +66,7 @@ What: /sys/kernel/slab/<cache>/alloc_refill
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The alloc_refill file shows how many times the per-cpu freelist
was empty but there were objects available as the result of
@@ -77,7 +77,7 @@ What: /sys/kernel/slab/<cache>/alloc_slab
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The alloc_slab file is shows how many times a new slab had to
be allocated from the page allocator. It can be written to
@@ -88,7 +88,7 @@ What: /sys/kernel/slab/<cache>/alloc_slowpath
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The alloc_slowpath file shows how many objects have been
allocated using the slow path because of a refill or
@@ -100,7 +100,7 @@ What: /sys/kernel/slab/<cache>/cache_dma
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The cache_dma file is read-only and specifies whether objects
are from ZONE_DMA.
@@ -110,7 +110,7 @@ What: /sys/kernel/slab/<cache>/cpu_slabs
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The cpu_slabs file is read-only and displays how many cpu slabs
are active and their NUMA locality.
@@ -119,7 +119,7 @@ What: /sys/kernel/slab/<cache>/cpuslab_flush
Date: April 2009
KernelVersion: 2.6.31
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The file cpuslab_flush shows how many times a cache's cpu slabs
have been flushed as the result of destroying or shrinking a
@@ -132,7 +132,7 @@ What: /sys/kernel/slab/<cache>/ctor
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The ctor file is read-only and specifies the cache's object
constructor function, which is invoked for each object when a
@@ -142,7 +142,7 @@ What: /sys/kernel/slab/<cache>/deactivate_empty
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The deactivate_empty file shows how many times an empty cpu slab
was deactivated. It can be written to clear the current count.
@@ -152,7 +152,7 @@ What: /sys/kernel/slab/<cache>/deactivate_full
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The deactivate_full file shows how many times a full cpu slab
was deactivated. It can be written to clear the current count.
@@ -162,7 +162,7 @@ What: /sys/kernel/slab/<cache>/deactivate_remote_frees
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The deactivate_remote_frees file shows how many times a cpu slab
has been deactivated and contained free objects that were freed
@@ -173,7 +173,7 @@ What: /sys/kernel/slab/<cache>/deactivate_to_head
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The deactivate_to_head file shows how many times a partial cpu
slab was deactivated and added to the head of its node's partial
@@ -184,7 +184,7 @@ What: /sys/kernel/slab/<cache>/deactivate_to_tail
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The deactivate_to_tail file shows how many times a partial cpu
slab was deactivated and added to the tail of its node's partial
@@ -195,7 +195,7 @@ What: /sys/kernel/slab/<cache>/destroy_by_rcu
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The destroy_by_rcu file is read-only and specifies whether
slabs (not objects) are freed by rcu.
@@ -204,7 +204,7 @@ What: /sys/kernel/slab/<cache>/free_add_partial
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The free_add_partial file shows how many times an object has
been freed in a full slab so that it had to added to its node's
@@ -215,7 +215,7 @@ What: /sys/kernel/slab/<cache>/free_calls
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The free_calls file is read-only and lists the locations of
object frees if slab debugging is enabled (see
@@ -225,7 +225,7 @@ What: /sys/kernel/slab/<cache>/free_fastpath
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The free_fastpath file shows how many objects have been freed
using the fast path because it was an object from the cpu slab.
@@ -236,7 +236,7 @@ What: /sys/kernel/slab/<cache>/free_frozen
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The free_frozen file shows how many objects have been freed to
a frozen slab (i.e. a remote cpu slab). It can be written to
@@ -247,7 +247,7 @@ What: /sys/kernel/slab/<cache>/free_remove_partial
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The free_remove_partial file shows how many times an object has
been freed to a now-empty slab so that it had to be removed from
@@ -259,7 +259,7 @@ What: /sys/kernel/slab/<cache>/free_slab
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The free_slab file shows how many times an empty slab has been
freed back to the page allocator. It can be written to clear
@@ -270,7 +270,7 @@ What: /sys/kernel/slab/<cache>/free_slowpath
Date: February 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The free_slowpath file shows how many objects have been freed
using the slow path (i.e. to a full or partial slab). It can
@@ -281,7 +281,7 @@ What: /sys/kernel/slab/<cache>/hwcache_align
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The hwcache_align file is read-only and specifies whether
objects are aligned on cachelines.
@@ -301,7 +301,7 @@ What: /sys/kernel/slab/<cache>/object_size
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The object_size file is read-only and specifies the cache's
object size.
@@ -310,7 +310,7 @@ What: /sys/kernel/slab/<cache>/objects
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The objects file is read-only and displays how many objects are
active and from which nodes they are from.
@@ -319,7 +319,7 @@ What: /sys/kernel/slab/<cache>/objects_partial
Date: April 2008
KernelVersion: 2.6.26
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The objects_partial file is read-only and displays how many
objects are on partial slabs and from which nodes they are
@@ -329,7 +329,7 @@ What: /sys/kernel/slab/<cache>/objs_per_slab
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The file objs_per_slab is read-only and specifies how many
objects may be allocated from a single slab of the order
@@ -339,7 +339,7 @@ What: /sys/kernel/slab/<cache>/order
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The order file specifies the page order at which new slabs are
allocated. It is writable and can be changed to increase the
@@ -356,7 +356,7 @@ What: /sys/kernel/slab/<cache>/order_fallback
Date: April 2008
KernelVersion: 2.6.26
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The order_fallback file shows how many times an allocation of a
new slab has not been possible at the cache's order and instead
@@ -369,7 +369,7 @@ What: /sys/kernel/slab/<cache>/partial
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The partial file is read-only and displays how long many
partial slabs there are and how long each node's list is.
@@ -378,7 +378,7 @@ What: /sys/kernel/slab/<cache>/poison
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The poison file specifies whether objects should be poisoned
when a new slab is allocated.
@@ -387,7 +387,7 @@ What: /sys/kernel/slab/<cache>/reclaim_account
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The reclaim_account file specifies whether the cache's objects
are reclaimable (and grouped by their mobility).
@@ -396,7 +396,7 @@ What: /sys/kernel/slab/<cache>/red_zone
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The red_zone file specifies whether the cache's objects are red
zoned.
@@ -405,7 +405,7 @@ What: /sys/kernel/slab/<cache>/remote_node_defrag_ratio
Date: January 2008
KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The file remote_node_defrag_ratio specifies the percentage of
times SLUB will attempt to refill the cpu slab with a partial
@@ -419,7 +419,7 @@ What: /sys/kernel/slab/<cache>/sanity_checks
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The sanity_checks file specifies whether expensive checks
should be performed on free and, at minimum, enables double free
@@ -430,7 +430,7 @@ What: /sys/kernel/slab/<cache>/shrink
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The shrink file is used to reclaim unused slab cache
memory from a cache. Empty per-cpu or partial slabs
@@ -446,7 +446,7 @@ What: /sys/kernel/slab/<cache>/slab_size
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The slab_size file is read-only and specifies the object size
with metadata (debugging information and alignment) in bytes.
@@ -455,7 +455,7 @@ What: /sys/kernel/slab/<cache>/slabs
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The slabs file is read-only and displays how long many slabs
there are (both cpu and partial) and from which nodes they are
@@ -465,7 +465,7 @@ What: /sys/kernel/slab/<cache>/store_user
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The store_user file specifies whether the location of
allocation or free should be tracked for a cache.
@@ -474,7 +474,7 @@ What: /sys/kernel/slab/<cache>/total_objects
Date: April 2008
KernelVersion: 2.6.26
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The total_objects file is read-only and displays how many total
objects a cache has and from which nodes they are from.
@@ -483,7 +483,7 @@ What: /sys/kernel/slab/<cache>/trace
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
The trace file specifies whether object allocations and frees
should be traced.
@@ -492,7 +492,7 @@ What: /sys/kernel/slab/<cache>/validate
Date: May 2007
KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
- Christoph Lameter <cl@linux-foundation.org>
+ Christoph Lameter <cl@gentwo.org>
Description:
Writing to the validate file causes SLUB to traverse all of its
cache's objects and check the validity of metadata.
@@ -506,14 +506,14 @@ Description:
What: /sys/kernel/slab/<cache>/slabs_cpu_partial
Date: Aug 2011
-Contact: Christoph Lameter <cl@linux.com>
+Contact: Christoph Lameter <cl@gentwo.org>
Description:
This read-only file shows the number of partialli allocated
frozen slabs.
What: /sys/kernel/slab/<cache>/cpu_partial
Date: Aug 2011
-Contact: Christoph Lameter <cl@linux.com>
+Contact: Christoph Lameter <cl@gentwo.org>
Description:
This read-only file shows the number of per cpu partial
pages to keep around.
diff --git a/Documentation/ABI/testing/sysfs-kernel-softlockup_count b/Documentation/ABI/testing/sysfs-kernel-softlockup_count
new file mode 100644
index 000000000000..337ff5531b5f
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-softlockup_count
@@ -0,0 +1,7 @@
+What: /sys/kernel/softlockup_count
+Date: May 2025
+KernelVersion: 6.16
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:
+ Shows how many times the system has detected a soft lockup since last boot.
+ Available only if CONFIG_SOFTLOCKUP_DETECTOR is enabled.
diff --git a/Documentation/ABI/testing/sysfs-platform-alienware-wmi b/Documentation/ABI/testing/sysfs-platform-alienware-wmi
new file mode 100644
index 000000000000..4877b3745f4e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-alienware-wmi
@@ -0,0 +1,14 @@
+What: /sys/class/hwmon/hwmonX/fanY_boost
+Date: March 2025
+KernelVersion: 6.15
+Contact: Kurt Borja <kuurtb@gmail.com>
+Description:
+ This file exposes fan boost control for Dell gaming laptops with
+ the AWCC WMI interface.
+
+ See Documentation/admin-guide/laptops/alienware-wmi.rst for
+ details.
+
+ Integer value in the range 0 to 255
+
+ RW
diff --git a/Documentation/ABI/testing/sysfs-platform-oxp b/Documentation/ABI/testing/sysfs-platform-oxp
new file mode 100644
index 000000000000..b3f39fc21dfa
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-oxp
@@ -0,0 +1,25 @@
+What: /sys/devices/platform/<platform>/tt_toggle
+Date: Jun 2023
+KernelVersion: 6.5
+Contact: "Antheas Kapenekakis" <lkml@antheas.dev>
+Description:
+ Takeover TDP controls from the device. OneXPlayer devices have a
+ turbo button that can be used to switch between two TDP modes
+ (usually 15W and 25W). By setting this attribute to 1, this
+ functionality is disabled, handing TDP control over to (Windows)
+ userspace software and the Turbo button turns into a keyboard
+ shortcut over the AT keyboard of the device. In addition,
+ using this setting is a prerequisite for PWM control for most
+ newer models (otherwise it NOOPs).
+
+What: /sys/devices/platform/<platform>/tt_led
+Date: April 2025
+KernelVersion: 6.16
+Contact: "Antheas Kapenekakis" <lkml@antheas.dev>
+Description:
+ Some OneXPlayer devices (e.g., X1 series) feature a little LED
+ nested in the Turbo button. This LED is illuminated when the
+ device is in the higher TDP mode (e.g., 25W). Once tt_toggle
+ is engaged, this LED is left dangling to its last state. This
+ attribute allows userspace to control the LED state manually
+ (either with 1 or 0). Only a subset of devices contain this LED.
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 63094646df28..d30d66ddf1ad 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -60,9 +60,8 @@ endif #HAVE_LATEXMK
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
-KERNELDOC = $(srctree)/scripts/kernel-doc
-KERNELDOC_CONF = -D kerneldoc_srctree=$(srctree) -D kerneldoc_bin=$(KERNELDOC)
-ALLSPHINXOPTS = $(KERNELDOC_CONF) $(PAPEROPT_$(PAPER)) $(SPHINXOPTS)
+ALLSPHINXOPTS = -D kerneldoc_srctree=$(srctree) -D kerneldoc_bin=$(KERNELDOC)
+ALLSPHINXOPTS += $(PAPEROPT_$(PAPER)) $(SPHINXOPTS)
ifneq ($(wildcard $(srctree)/.config),)
ifeq ($(CONFIG_RUST),y)
# Let Sphinx know we will include rustdoc
@@ -83,9 +82,11 @@ loop_cmd = $(echo-cmd) $(cmd_$(1)) || exit;
# $5 reST source folder relative to $(src),
# e.g. "userspace-api/media" for the linux-tv book-set at ./Documentation/userspace-api/media
+PYTHONPYCACHEPREFIX ?= $(abspath $(BUILDDIR)/__pycache__)
+
quiet_cmd_sphinx = SPHINX $@ --> file://$(abspath $(BUILDDIR)/$3/$4)
cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/userspace-api/media $2 && \
- PYTHONDONTWRITEBYTECODE=1 \
+ PYTHONPYCACHEPREFIX="$(PYTHONPYCACHEPREFIX)" \
BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(src)/$5/$(SPHINX_CONF)) \
$(PYTHON3) $(srctree)/scripts/jobserver-exec \
$(CONFIG_SHELL) $(srctree)/Documentation/sphinx/parallel-wrapper.sh \
diff --git a/Documentation/PCI/controller/index.rst b/Documentation/PCI/controller/index.rst
new file mode 100644
index 000000000000..c2ce9ccdcfa0
--- /dev/null
+++ b/Documentation/PCI/controller/index.rst
@@ -0,0 +1,10 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================================
+PCI Native Host Bridge and Endpoint Drivers
+===========================================
+
+.. toctree::
+ :maxdepth: 2
+
+ rcar-pcie-firmware
diff --git a/Documentation/PCI/controller/rcar-pcie-firmware.rst b/Documentation/PCI/controller/rcar-pcie-firmware.rst
new file mode 100644
index 000000000000..67d3bf66e315
--- /dev/null
+++ b/Documentation/PCI/controller/rcar-pcie-firmware.rst
@@ -0,0 +1,32 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================================================
+Firmware of PCIe controller for Renesas R-Car V4H
+=================================================
+
+Renesas R-Car V4H (r8a779g0) has a PCIe controller, requiring a specific
+firmware download during startup.
+
+However, Renesas currently cannot distribute the firmware free of charge.
+
+The firmware file "104_PCIe_fw_addr_data_ver1.05.txt" (note that the file name
+might be different between different datasheet revisions) can be found in the
+datasheet encoded as text, and as such, the file's content must be converted
+back to binary form. This can be achieved using the following example script:
+
+.. code-block:: sh
+
+ $ awk '/^\s*0x[0-9A-Fa-f]{4}\s+0x[0-9A-Fa-f]{4}/ { print substr($2,5,2) substr($2,3,2) }' \
+ 104_PCIe_fw_addr_data_ver1.05.txt | \
+ xxd -p -r > rcar_gen4_pcie.bin
+
+Once the text content has been converted into a binary firmware file, verify
+its checksum as follows:
+
+.. code-block:: sh
+
+ $ sha1sum rcar_gen4_pcie.bin
+ 1d0bd4b189b4eb009f5d564b1f93a79112994945 rcar_gen4_pcie.bin
+
+The resulting binary file called "rcar_gen4_pcie.bin" should be placed in the
+"/lib/firmware" directory before the driver runs.
diff --git a/Documentation/PCI/endpoint/pci-nvme-function.rst b/Documentation/PCI/endpoint/pci-nvme-function.rst
index df57b8e7d066..a68015317f7f 100644
--- a/Documentation/PCI/endpoint/pci-nvme-function.rst
+++ b/Documentation/PCI/endpoint/pci-nvme-function.rst
@@ -8,6 +8,6 @@ PCI NVMe Function
The PCI NVMe endpoint function implements a PCI NVMe controller using the NVMe
subsystem target core code. The driver for this function resides with the NVMe
-subsystem as drivers/nvme/target/nvmet-pciep.c.
+subsystem as drivers/nvme/target/pci-epf.c.
See Documentation/nvme/nvme-pci-endpoint-target.rst for more details.
diff --git a/Documentation/PCI/index.rst b/Documentation/PCI/index.rst
index 5e7c4e6e726b..5d720d2a415e 100644
--- a/Documentation/PCI/index.rst
+++ b/Documentation/PCI/index.rst
@@ -17,5 +17,6 @@ PCI Bus Subsystem
pci-error-recovery
pcieaer-howto
endpoint/index
+ controller/index
boot-interrupts
tph
diff --git a/Documentation/PCI/pcieaer-howto.rst b/Documentation/PCI/pcieaer-howto.rst
index f013f3b27c82..4b71e2f43ca7 100644
--- a/Documentation/PCI/pcieaer-howto.rst
+++ b/Documentation/PCI/pcieaer-howto.rst
@@ -85,12 +85,27 @@ In the example, 'Requester ID' means the ID of the device that sent
the error message to the Root Port. Please refer to PCIe specs for other
fields.
+AER Ratelimits
+--------------
+
+Since error messages can be generated for each transaction, we may see
+large volumes of errors reported. To prevent spammy devices from flooding
+the console/stalling execution, messages are throttled by device and error
+type (correctable vs. non-fatal uncorrectable). Fatal errors, including
+DPC errors, are not ratelimited.
+
+AER uses the default ratelimit of DEFAULT_RATELIMIT_BURST (10 events) over
+DEFAULT_RATELIMIT_INTERVAL (5 seconds).
+
+Ratelimits are exposed in the form of sysfs attributes and configurable.
+See Documentation/ABI/testing/sysfs-bus-pci-devices-aer.
+
AER Statistics / Counters
-------------------------
When PCIe AER errors are captured, the counters / statistics are also exposed
in the form of sysfs attributes which are documented at
-Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats
+Documentation/ABI/testing/sysfs-bus-pci-devices-aer.
Developer Guide
===============
diff --git a/Documentation/RCU/listRCU.rst b/Documentation/RCU/listRCU.rst
index ed5c9d8c9afe..d8bb98623c12 100644
--- a/Documentation/RCU/listRCU.rst
+++ b/Documentation/RCU/listRCU.rst
@@ -334,7 +334,7 @@ If the system-call audit module were to ever need to reject stale data, one way
to accomplish this would be to add a ``deleted`` flag and a ``lock`` spinlock to the
``audit_entry`` structure, and modify audit_filter_task() as follows::
- static enum audit_state audit_filter_task(struct task_struct *tsk)
+ static struct audit_entry *audit_filter_task(struct task_struct *tsk, char **key)
{
struct audit_entry *e;
enum audit_state state;
@@ -346,16 +346,18 @@ to accomplish this would be to add a ``deleted`` flag and a ``lock`` spinlock to
if (e->deleted) {
spin_unlock(&e->lock);
rcu_read_unlock();
- return AUDIT_BUILD_CONTEXT;
+ return NULL;
}
rcu_read_unlock();
if (state == AUDIT_STATE_RECORD)
*key = kstrdup(e->rule.filterkey, GFP_ATOMIC);
- return state;
+ /* As long as e->lock is held, e is valid and
+ * its value is not stale */
+ return e;
}
}
rcu_read_unlock();
- return AUDIT_BUILD_CONTEXT;
+ return NULL;
}
The ``audit_del_rule()`` function would need to set the ``deleted`` flag under the
diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst
index 53faeed7c190..be2eb6be16ec 100644
--- a/Documentation/RCU/whatisRCU.rst
+++ b/Documentation/RCU/whatisRCU.rst
@@ -15,6 +15,9 @@ to start learning about RCU:
| 2014 Big API Table https://lwn.net/Articles/609973/
| 6. The RCU API, 2019 Edition https://lwn.net/Articles/777036/
| 2019 Big API Table https://lwn.net/Articles/777165/
+| 7. The RCU API, 2024 Edition https://lwn.net/Articles/988638/
+| 2024 Background Information https://lwn.net/Articles/988641/
+| 2024 Big API Table https://lwn.net/Articles/988666/
For those preferring video:
diff --git a/Documentation/admin-guide/LSM/ipe.rst b/Documentation/admin-guide/LSM/ipe.rst
index f93a467db628..dc7088451f9d 100644
--- a/Documentation/admin-guide/LSM/ipe.rst
+++ b/Documentation/admin-guide/LSM/ipe.rst
@@ -423,7 +423,7 @@ Field descriptions:
Event Example::
- type=1422 audit(1653425529.927:53): policy_name="boot_verified" policy_version=0.0.0 policy_digest=sha256:820EEA5B40CA42B51F68962354BA083122A20BB846F26765076DD8EED7B8F4DB auid=4294967295 ses=4294967295 lsm=ipe res=1
+ type=1422 audit(1653425529.927:53): policy_name="boot_verified" policy_version=0.0.0 policy_digest=sha256:820EEA5B40CA42B51F68962354BA083122A20BB846F26765076DD8EED7B8F4DB auid=4294967295 ses=4294967295 lsm=ipe res=1 errno=0
type=1300 audit(1653425529.927:53): arch=c000003e syscall=1 success=yes exit=2567 a0=3 a1=5596fcae1fb0 a2=a07 a3=2 items=0 ppid=184 pid=229 auid=4294967295 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=pts0 ses=4294967295 comm="python3" exe="/usr/bin/python3.10" key=(null)
type=1327 audit(1653425529.927:53): PROCTITLE proctitle=707974686F6E3300746573742F6D61696E2E7079002D66002E2E
@@ -433,24 +433,55 @@ This record will always be emitted in conjunction with a ``AUDITSYSCALL`` record
Field descriptions:
-+----------------+------------+-----------+---------------------------------------------------+
-| Field | Value Type | Optional? | Description of Value |
-+================+============+===========+===================================================+
-| policy_name | string | No | The policy_name |
-+----------------+------------+-----------+---------------------------------------------------+
-| policy_version | string | No | The policy_version |
-+----------------+------------+-----------+---------------------------------------------------+
-| policy_digest | string | No | The policy hash |
-+----------------+------------+-----------+---------------------------------------------------+
-| auid | integer | No | The login user ID |
-+----------------+------------+-----------+---------------------------------------------------+
-| ses | integer | No | The login session ID |
-+----------------+------------+-----------+---------------------------------------------------+
-| lsm | string | No | The lsm name associated with the event |
-+----------------+------------+-----------+---------------------------------------------------+
-| res | integer | No | The result of the audited operation(success/fail) |
-+----------------+------------+-----------+---------------------------------------------------+
-
++----------------+------------+-----------+-------------------------------------------------------------+
+| Field | Value Type | Optional? | Description of Value |
++================+============+===========+=============================================================+
+| policy_name | string | Yes | The policy_name |
++----------------+------------+-----------+-------------------------------------------------------------+
+| policy_version | string | Yes | The policy_version |
++----------------+------------+-----------+-------------------------------------------------------------+
+| policy_digest | string | Yes | The policy hash |
++----------------+------------+-----------+-------------------------------------------------------------+
+| auid | integer | No | The login user ID |
++----------------+------------+-----------+-------------------------------------------------------------+
+| ses | integer | No | The login session ID |
++----------------+------------+-----------+-------------------------------------------------------------+
+| lsm | string | No | The lsm name associated with the event |
++----------------+------------+-----------+-------------------------------------------------------------+
+| res | integer | No | The result of the audited operation(success/fail) |
++----------------+------------+-----------+-------------------------------------------------------------+
+| errno | integer | No | Error code from policy loading operations (see table below) |
++----------------+------------+-----------+-------------------------------------------------------------+
+
+Policy error codes (errno):
+
+The following table lists the error codes that may appear in the errno field while loading or updating the policy:
+
++----------------+--------------------------------------------------------+
+| Error Code | Description |
++================+========================================================+
+| 0 | Success |
++----------------+--------------------------------------------------------+
+| -EPERM | Insufficient permission |
++----------------+--------------------------------------------------------+
+| -EEXIST | Same name policy already deployed |
++----------------+--------------------------------------------------------+
+| -EBADMSG | Policy is invalid |
++----------------+--------------------------------------------------------+
+| -ENOMEM | Out of memory (OOM) |
++----------------+--------------------------------------------------------+
+| -ERANGE | Policy version number overflow |
++----------------+--------------------------------------------------------+
+| -EINVAL | Policy version parsing error |
++----------------+--------------------------------------------------------+
+| -ENOKEY | Key used to sign the IPE policy not found in keyring |
++----------------+--------------------------------------------------------+
+| -EKEYREJECTED | Policy signature verification failed |
++----------------+--------------------------------------------------------+
+| -ESTALE | Attempting to update an IPE policy with older version |
++----------------+--------------------------------------------------------+
+| -ENOENT | Policy was deleted while updating |
++----------------+--------------------------------------------------------+
1404 AUDIT_MAC_STATUS
^^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index 70b02f30013a..05301f03b717 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -259,7 +259,7 @@ Configuring the kernel
Compiling the kernel
--------------------
- - Make sure you have at least gcc 5.1 available.
+ - Make sure you have at least gcc 8.1 available.
For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
- Do a ``make`` to create a compressed kernel image. It is also possible to do
diff --git a/Documentation/admin-guide/blockdev/index.rst b/Documentation/admin-guide/blockdev/index.rst
index 957ccf617797..3262397ebe8f 100644
--- a/Documentation/admin-guide/blockdev/index.rst
+++ b/Documentation/admin-guide/blockdev/index.rst
@@ -11,6 +11,7 @@ Block Devices
nbd
paride
ramdisk
+ zoned_loop
zram
drbd/index
diff --git a/Documentation/admin-guide/blockdev/zoned_loop.rst b/Documentation/admin-guide/blockdev/zoned_loop.rst
new file mode 100644
index 000000000000..9c7aa3b482f3
--- /dev/null
+++ b/Documentation/admin-guide/blockdev/zoned_loop.rst
@@ -0,0 +1,169 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+Zoned Loop Block Device
+=======================
+
+.. Contents:
+
+ 1) Overview
+ 2) Creating a Zoned Device
+ 3) Deleting a Zoned Device
+ 4) Example
+
+
+1) Overview
+-----------
+
+The zoned loop block device driver (zloop) allows a user to create a zoned block
+device using one regular file per zone as backing storage. This driver does not
+directly control any hardware and uses read, write and truncate operations to
+regular files of a file system to emulate a zoned block device.
+
+Using zloop, zoned block devices with a configurable capacity, zone size and
+number of conventional zones can be created. The storage for each zone of the
+device is implemented using a regular file with a maximum size equal to the zone
+size. The size of a file backing a conventional zone is always equal to the zone
+size. The size of a file backing a sequential zone indicates the amount of data
+sequentially written to the file, that is, the size of the file directly
+indicates the position of the write pointer of the zone.
+
+When resetting a sequential zone, its backing file size is truncated to zero.
+Conversely, for a zone finish operation, the backing file is truncated to the
+zone size. With this, the maximum capacity of a zloop zoned block device created
+can be larger configured to be larger than the storage space available on the
+backing file system. Of course, for such configuration, writing more data than
+the storage space available on the backing file system will result in write
+errors.
+
+The zoned loop block device driver implements a complete zone transition state
+machine. That is, zones can be empty, implicitly opened, explicitly opened,
+closed or full. The current implementation does not support any limits on the
+maximum number of open and active zones.
+
+No user tools are necessary to create and delete zloop devices.
+
+2) Creating a Zoned Device
+--------------------------
+
+Once the zloop module is loaded (or if zloop is compiled in the kernel), the
+character device file /dev/zloop-control can be used to add a zloop device.
+This is done by writing an "add" command directly to the /dev/zloop-control
+device::
+
+ $ modprobe zloop
+ $ ls -l /dev/zloop*
+ crw-------. 1 root root 10, 123 Jan 6 19:18 /dev/zloop-control
+
+ $ mkdir -p <base directory/<device ID>
+ $ echo "add [options]" > /dev/zloop-control
+
+The options available for the add command can be listed by reading the
+/dev/zloop-control device::
+
+ $ cat /dev/zloop-control
+ add id=%d,capacity_mb=%u,zone_size_mb=%u,zone_capacity_mb=%u,conv_zones=%u,base_dir=%s,nr_queues=%u,queue_depth=%u,buffered_io
+ remove id=%d
+
+In more details, the options that can be used with the "add" command are as
+follows.
+
+================ ===========================================================
+id Device number (the X in /dev/zloopX).
+ Default: automatically assigned.
+capacity_mb Device total capacity in MiB. This is always rounded up to
+ the nearest higher multiple of the zone size.
+ Default: 16384 MiB (16 GiB).
+zone_size_mb Device zone size in MiB. Default: 256 MiB.
+zone_capacity_mb Device zone capacity (must always be equal to or lower than
+ the zone size. Default: zone size.
+conv_zones Total number of conventioanl zones starting from sector 0.
+ Default: 8.
+base_dir Path to the base directoy where to create the directory
+ containing the zone files of the device.
+ Default=/var/local/zloop.
+ The device directory containing the zone files is always
+ named with the device ID. E.g. the default zone file
+ directory for /dev/zloop0 is /var/local/zloop/0.
+nr_queues Number of I/O queues of the zoned block device. This value is
+ always capped by the number of online CPUs
+ Default: 1
+queue_depth Maximum I/O queue depth per I/O queue.
+ Default: 64
+buffered_io Do buffered IOs instead of direct IOs (default: false)
+================ ===========================================================
+
+3) Deleting a Zoned Device
+--------------------------
+
+Deleting an unused zoned loop block device is done by issuing the "remove"
+command to /dev/zloop-control, specifying the ID of the device to remove::
+
+ $ echo "remove id=X" > /dev/zloop-control
+
+The remove command does not have any option.
+
+A zoned device that was removed can be re-added again without any change to the
+state of the device zones: the device zones are restored to their last state
+before the device was removed. Adding again a zoned device after it was removed
+must always be done using the same configuration as when the device was first
+added. If a zone configuration change is detected, an error will be returned and
+the zoned device will not be created.
+
+To fully delete a zoned device, after executing the remove operation, the device
+base directory containing the backing files of the device zones must be deleted.
+
+4) Example
+----------
+
+The following sequence of commands creates a 2GB zoned device with zones of 64
+MB and a zone capacity of 63 MB::
+
+ $ modprobe zloop
+ $ mkdir -p /var/local/zloop/0
+ $ echo "add capacity_mb=2048,zone_size_mb=64,zone_capacity=63MB" > /dev/zloop-control
+
+For the device created (/dev/zloop0), the zone backing files are all created
+under the default base directory (/var/local/zloop)::
+
+ $ ls -l /var/local/zloop/0
+ total 0
+ -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000000
+ -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000001
+ -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000002
+ -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000003
+ -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000004
+ -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000005
+ -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000006
+ -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000007
+ -rw-------. 1 root root 0 Jan 6 22:23 seq-000008
+ -rw-------. 1 root root 0 Jan 6 22:23 seq-000009
+ ...
+
+The zoned device created (/dev/zloop0) can then be used normally::
+
+ $ lsblk -z
+ NAME ZONED ZONE-SZ ZONE-NR ZONE-AMAX ZONE-OMAX ZONE-APP ZONE-WGRAN
+ zloop0 host-managed 64M 32 0 0 1M 4K
+ $ blkzone report /dev/zloop0
+ start: 0x000000000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)]
+ start: 0x000020000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)]
+ start: 0x000040000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)]
+ start: 0x000060000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)]
+ start: 0x000080000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)]
+ start: 0x0000a0000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)]
+ start: 0x0000c0000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)]
+ start: 0x0000e0000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)]
+ start: 0x000100000, len 0x020000, cap 0x01f800, wptr 0x000000 reset:0 non-seq:0, zcond: 1(em) [type: 2(SEQ_WRITE_REQUIRED)]
+ start: 0x000120000, len 0x020000, cap 0x01f800, wptr 0x000000 reset:0 non-seq:0, zcond: 1(em) [type: 2(SEQ_WRITE_REQUIRED)]
+ ...
+
+Deleting this device is done using the command::
+
+ $ echo "remove id=0" > /dev/zloop-control
+
+The removed device can be re-added again using the same "add" command as when
+the device was first created. To fully delete a zoned device, its backing files
+should also be deleted after executing the remove command::
+
+ $ rm -r /var/local/zloop/0
diff --git a/Documentation/admin-guide/blockdev/zram.rst b/Documentation/admin-guide/blockdev/zram.rst
index 9bdb30901a93..3e273c1bb749 100644
--- a/Documentation/admin-guide/blockdev/zram.rst
+++ b/Documentation/admin-guide/blockdev/zram.rst
@@ -317,6 +317,26 @@ a single line of text and contains the following stats separated by whitespace:
Optional Feature
================
+IDLE pages tracking
+-------------------
+
+zram has built-in support for idle pages tracking (that is, allocated but
+not used pages). This feature is useful for e.g. zram writeback and
+recompression. In order to mark pages as idle, execute the following command::
+
+ echo all > /sys/block/zramX/idle
+
+This will mark all allocated zram pages as idle. The idle mark will be
+removed only when the page (block) is accessed (e.g. overwritten or freed).
+Additionally, when CONFIG_ZRAM_TRACK_ENTRY_ACTIME is enabled, pages can be
+marked as idle based on how many seconds have passed since the last access to
+a particular zram page::
+
+ echo 86400 > /sys/block/zramX/idle
+
+In this example, all pages which haven't been accessed in more than 86400
+seconds (one day) will be marked idle.
+
writeback
---------
@@ -331,24 +351,7 @@ If admin wants to use incompressible page writeback, they could do it via::
echo huge > /sys/block/zramX/writeback
-To use idle page writeback, first, user need to declare zram pages
-as idle::
-
- echo all > /sys/block/zramX/idle
-
-From now on, any pages on zram are idle pages. The idle mark
-will be removed until someone requests access of the block.
-IOW, unless there is access request, those pages are still idle pages.
-Additionally, when CONFIG_ZRAM_TRACK_ENTRY_ACTIME is enabled pages can be
-marked as idle based on how long (in seconds) it's been since they were
-last accessed::
-
- echo 86400 > /sys/block/zramX/idle
-
-In this example all pages which haven't been accessed in more than 86400
-seconds (one day) will be marked idle.
-
-Admin can request writeback of those idle pages at right timing via::
+Admin can request writeback of idle pages at right timing via::
echo idle > /sys/block/zramX/writeback
@@ -369,6 +372,23 @@ they could write a page index into the interface::
echo "page_index=1251" > /sys/block/zramX/writeback
+In Linux 6.16 this interface underwent some rework. First, the interface
+now supports `key=value` format for all of its parameters (`type=huge_idle`,
+etc.) Second, the support for `page_indexes` was introduced, which specify
+`LOW-HIGH` range (or ranges) of pages to be written-back. This reduces the
+number of syscalls, but more importantly this enables optimal post-processing
+target selection strategy. Usage example::
+
+ echo "type=idle" > /sys/block/zramX/writeback
+ echo "page_indexes=1-100 page_indexes=200-300" > \
+ /sys/block/zramX/writeback
+
+We also now permit multiple page_index params per call and a mix of
+single pages and page ranges::
+
+ echo page_index=42 page_index=99 page_indexes=100-200 \
+ page_indexes=500-700 > /sys/block/zramX/writeback
+
If there are lots of write IO with flash device, potentially, it has
flash wearout problem so that admin needs to design write limitation
to guarantee storage health for entire product life.
@@ -482,8 +502,6 @@ attempt to recompress:::
echo "type=huge_idle max_pages=42" > /sys/block/zramX/recompress
-Recompression of idle pages requires memory tracking.
-
During re-compression for every page, that matches re-compression criteria,
ZRAM iterates the list of registered alternative compression algorithms in
order of their priorities. ZRAM stops either when re-compression was
diff --git a/Documentation/admin-guide/bug-hunting.rst b/Documentation/admin-guide/bug-hunting.rst
index ce6f4e8ca487..30858757c9f2 100644
--- a/Documentation/admin-guide/bug-hunting.rst
+++ b/Documentation/admin-guide/bug-hunting.rst
@@ -196,7 +196,7 @@ will see the assembler code for the routine shown, but if your kernel has
debug symbols the C code will also be available. (Debug symbols can be enabled
in the kernel hacking menu of the menu configuration.) For example::
- $ objdump -r -S -l --disassemble net/dccp/ipv4.o
+ $ objdump -r -S -l --disassemble net/ipv4/tcp.o
.. note::
diff --git a/Documentation/admin-guide/cgroup-v1/cgroups.rst b/Documentation/admin-guide/cgroup-v1/cgroups.rst
index a3e2edb3d274..463f98453323 100644
--- a/Documentation/admin-guide/cgroup-v1/cgroups.rst
+++ b/Documentation/admin-guide/cgroup-v1/cgroups.rst
@@ -13,7 +13,7 @@ Portions Copyright (c) 2004-2006 Silicon Graphics, Inc.
Modified by Paul Jackson <pj@sgi.com>
-Modified by Christoph Lameter <cl@linux.com>
+Modified by Christoph Lameter <cl@gentwo.org>
.. CONTENTS:
diff --git a/Documentation/admin-guide/cgroup-v1/cpusets.rst b/Documentation/admin-guide/cgroup-v1/cpusets.rst
index f401af5e2f09..c7909e5ac136 100644
--- a/Documentation/admin-guide/cgroup-v1/cpusets.rst
+++ b/Documentation/admin-guide/cgroup-v1/cpusets.rst
@@ -10,7 +10,7 @@ Written by Simon.Derr@bull.net
- Portions Copyright (c) 2004-2006 Silicon Graphics, Inc.
- Modified by Paul Jackson <pj@sgi.com>
-- Modified by Christoph Lameter <cl@linux.com>
+- Modified by Christoph Lameter <cl@gentwo.org>
- Modified by Paul Menage <menage@google.com>
- Modified by Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 1a16ce68a4d7..0cc35a14afbe 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1076,7 +1076,7 @@ cpufreq governor about the minimum desired frequency which should always be
provided by a CPU, as well as the maximum desired frequency, which should not
be exceeded by a CPU.
-WARNING: cgroup2 cpu controller doesn't yet fully support the control of
+WARNING: cgroup2 cpu controller doesn't yet support the (bandwidth) control of
realtime processes. For a kernel built with the CONFIG_RT_GROUP_SCHED option
enabled for group scheduling of realtime processes, the cpu controller can only
be enabled when all RT processes are in the root cgroup. Be aware that system
@@ -1095,19 +1095,34 @@ realtime processes irrespective of CONFIG_RT_GROUP_SCHED.
CPU Interface Files
~~~~~~~~~~~~~~~~~~~
-All time durations are in microseconds.
+The interaction of a process with the cpu controller depends on its scheduling
+policy and the underlying scheduler. From the point of view of the cpu controller,
+processes can be categorized as follows:
+
+* Processes under the fair-class scheduler
+* Processes under a BPF scheduler with the ``cgroup_set_weight`` callback
+* Everything else: ``SCHED_{FIFO,RR,DEADLINE}`` and processes under a BPF scheduler
+ without the ``cgroup_set_weight`` callback
+
+For details on when a process is under the fair-class scheduler or a BPF scheduler,
+check out :ref:`Documentation/scheduler/sched-ext.rst <sched-ext>`.
+
+For each of the following interface files, the above categories
+will be referred to. All time durations are in microseconds.
cpu.stat
A read-only flat-keyed file.
This file exists whether the controller is enabled or not.
- It always reports the following three stats:
+ It always reports the following three stats, which account for all the
+ processes in the cgroup:
- usage_usec
- user_usec
- system_usec
- and the following five when the controller is enabled:
+ and the following five when the controller is enabled, which account for
+ only the processes under the fair-class scheduler:
- nr_periods
- nr_throttled
@@ -1125,6 +1140,10 @@ All time durations are in microseconds.
If the cgroup has been configured to be SCHED_IDLE (cpu.idle = 1),
then the weight will show as a 0.
+ This file affects only processes under the fair-class scheduler and a BPF
+ scheduler with the ``cgroup_set_weight`` callback depending on what the
+ callback actually does.
+
cpu.weight.nice
A read-write single value file which exists on non-root
cgroups. The default is "0".
@@ -1137,6 +1156,10 @@ All time durations are in microseconds.
granularity is coarser for the nice values, the read value is
the closest approximation of the current weight.
+ This file affects only processes under the fair-class scheduler and a BPF
+ scheduler with the ``cgroup_set_weight`` callback depending on what the
+ callback actually does.
+
cpu.max
A read-write two value file which exists on non-root cgroups.
The default is "max 100000".
@@ -1149,43 +1172,55 @@ All time durations are in microseconds.
$PERIOD duration. "max" for $MAX indicates no limit. If only
one number is written, $MAX is updated.
+ This file affects only processes under the fair-class scheduler.
+
cpu.max.burst
A read-write single value file which exists on non-root
cgroups. The default is "0".
The burst in the range [0, $MAX].
+ This file affects only processes under the fair-class scheduler.
+
cpu.pressure
A read-write nested-keyed file.
Shows pressure stall information for CPU. See
:ref:`Documentation/accounting/psi.rst <psi>` for details.
+ This file accounts for all the processes in the cgroup.
+
cpu.uclamp.min
- A read-write single value file which exists on non-root cgroups.
- The default is "0", i.e. no utilization boosting.
+ A read-write single value file which exists on non-root cgroups.
+ The default is "0", i.e. no utilization boosting.
+
+ The requested minimum utilization (protection) as a percentage
+ rational number, e.g. 12.34 for 12.34%.
- The requested minimum utilization (protection) as a percentage
- rational number, e.g. 12.34 for 12.34%.
+ This interface allows reading and setting minimum utilization clamp
+ values similar to the sched_setattr(2). This minimum utilization
+ value is used to clamp the task specific minimum utilization clamp,
+ including those of realtime processes.
- This interface allows reading and setting minimum utilization clamp
- values similar to the sched_setattr(2). This minimum utilization
- value is used to clamp the task specific minimum utilization clamp.
+ The requested minimum utilization (protection) is always capped by
+ the current value for the maximum utilization (limit), i.e.
+ `cpu.uclamp.max`.
- The requested minimum utilization (protection) is always capped by
- the current value for the maximum utilization (limit), i.e.
- `cpu.uclamp.max`.
+ This file affects all the processes in the cgroup.
cpu.uclamp.max
- A read-write single value file which exists on non-root cgroups.
- The default is "max". i.e. no utilization capping
+ A read-write single value file which exists on non-root cgroups.
+ The default is "max". i.e. no utilization capping
+
+ The requested maximum utilization (limit) as a percentage rational
+ number, e.g. 98.76 for 98.76%.
- The requested maximum utilization (limit) as a percentage rational
- number, e.g. 98.76 for 98.76%.
+ This interface allows reading and setting maximum utilization clamp
+ values similar to the sched_setattr(2). This maximum utilization
+ value is used to clamp the task specific maximum utilization clamp,
+ including those of realtime processes.
- This interface allows reading and setting maximum utilization clamp
- values similar to the sched_setattr(2). This maximum utilization
- value is used to clamp the task specific maximum utilization clamp.
+ This file affects all the processes in the cgroup.
cpu.idle
A read-write single value file which exists on non-root cgroups.
@@ -1197,7 +1232,7 @@ All time durations are in microseconds.
own relative priorities, but the cgroup itself will be treated as
very low priority relative to its peers.
-
+ This file affects only processes under the fair-class scheduler.
Memory
------
@@ -1299,6 +1334,18 @@ PAGE_SIZE multiple when read back.
monitors the limited cgroup to alleviate heavy reclaim
pressure.
+ If memory.high is opened with O_NONBLOCK then the synchronous
+ reclaim is bypassed. This is useful for admin processes that
+ need to dynamically adjust the job's memory limits without
+ expending their own CPU resources on memory reclamation. The
+ job will trigger the reclaim and/or get throttled on its
+ next charge request.
+
+ Please note that with O_NONBLOCK, there is a chance that the
+ target memory cgroup may take indefinite amount of time to
+ reduce usage below the limit due to delayed charge request or
+ busy-hitting its memory to slow down reclaim.
+
memory.max
A read-write single value file which exists on non-root
cgroups. The default is "max".
@@ -1316,6 +1363,18 @@ PAGE_SIZE multiple when read back.
Caller could retry them differently, return into userspace
as -ENOMEM or silently ignore in cases like disk readahead.
+ If memory.max is opened with O_NONBLOCK, then the synchronous
+ reclaim and oom-kill are bypassed. This is useful for admin
+ processes that need to dynamically adjust the job's memory limits
+ without expending their own CPU resources on memory reclamation.
+ The job will trigger the reclaim and/or oom-kill on its next
+ charge request.
+
+ Please note that with O_NONBLOCK, there is a chance that the
+ target memory cgroup may take indefinite amount of time to
+ reduce usage below the limit due to delayed charge request or
+ busy-hitting its memory to slow down reclaim.
+
memory.reclaim
A write-only nested-keyed file which exists for all cgroups.
@@ -1348,6 +1407,9 @@ The following nested keys are defined.
same semantics as vm.swappiness applied to memcg reclaim with
all the existing limitations and potential future extensions.
+ The valid range for swappiness is [0-200, max], setting
+ swappiness=max exclusively reclaims anonymous memory.
+
memory.peak
A read-write single value file which exists on non-root cgroups.
@@ -1670,6 +1732,12 @@ The following nested keys are defined.
numa_hint_faults (npn)
Number of NUMA hinting faults.
+ numa_task_migrated (npn)
+ Number of task migration by NUMA balancing.
+
+ numa_task_swapped (npn)
+ Number of task swap by NUMA balancing.
+
pgdemote_kswapd
Number of pages demoted by kswapd.
@@ -3019,7 +3087,7 @@ Filesystem Support for Writeback
--------------------------------
A filesystem can support cgroup writeback by updating
-address_space_operations->writepage[s]() to annotate bio's using the
+address_space_operations->writepages() to annotate bio's using the
following two functions.
wbc_init_bio(@wbc, @bio)
diff --git a/Documentation/admin-guide/gpio/gpio-aggregator.rst b/Documentation/admin-guide/gpio/gpio-aggregator.rst
index 5cd1e7221756..8374a9df9105 100644
--- a/Documentation/admin-guide/gpio/gpio-aggregator.rst
+++ b/Documentation/admin-guide/gpio/gpio-aggregator.rst
@@ -69,6 +69,113 @@ write-only attribute files in sysfs.
$ echo gpio-aggregator.0 > delete_device
+Aggregating GPIOs using Configfs
+--------------------------------
+
+**Group:** ``/config/gpio-aggregator``
+
+ This is the root directory of the gpio-aggregator configfs tree.
+
+**Group:** ``/config/gpio-aggregator/<example-name>``
+
+ This directory represents a GPIO aggregator device. You can assign any
+ name to ``<example-name>`` (e.g. ``agg0``), except names starting with
+ ``_sysfs`` prefix, which are reserved for auto-generated configfs
+ entries corresponding to devices created via Sysfs.
+
+**Attribute:** ``/config/gpio-aggregator/<example-name>/live``
+
+ The ``live`` attribute allows to trigger the actual creation of the device
+ once it's fully configured. Accepted values are:
+
+ * ``1``, ``yes``, ``true`` : enable the virtual device
+ * ``0``, ``no``, ``false`` : disable the virtual device
+
+**Attribute:** ``/config/gpio-aggregator/<example-name>/dev_name``
+
+ The read-only ``dev_name`` attribute exposes the name of the device as it
+ will appear in the system on the platform bus (e.g. ``gpio-aggregator.0``).
+ This is useful for identifying a character device for the newly created
+ aggregator. If it's ``gpio-aggregator.0``,
+ ``/sys/devices/platform/gpio-aggregator.0/gpiochipX`` path tells you that the
+ GPIO device id is ``X``.
+
+You must create subdirectories for each virtual line you want to
+instantiate, named exactly as ``line0``, ``line1``, ..., ``lineY``, when
+you want to instantiate ``Y+1`` (Y >= 0) lines. Configure all lines before
+activating the device by setting ``live`` to 1.
+
+**Group:** ``/config/gpio-aggregator/<example-name>/<lineY>/``
+
+ This directory represents a GPIO line to include in the aggregator.
+
+**Attribute:** ``/config/gpio-aggregator/<example-name>/<lineY>/key``
+
+**Attribute:** ``/config/gpio-aggregator/<example-name>/<lineY>/offset``
+
+ The default values after creating the ``<lineY>`` directory are:
+
+ * ``key`` : <empty>
+ * ``offset`` : -1
+
+ ``key`` must always be explicitly configured, while ``offset`` depends.
+ Two configuration patterns exist for each ``<lineY>``:
+
+ (a). For lookup by GPIO line name:
+
+ * Set ``key`` to the line name.
+ * Ensure ``offset`` remains -1 (the default).
+
+ (b). For lookup by GPIO chip name and the line offset within the chip:
+
+ * Set ``key`` to the chip name.
+ * Set ``offset`` to the line offset (0 <= ``offset`` < 65535).
+
+**Attribute:** ``/config/gpio-aggregator/<example-name>/<lineY>/name``
+
+ The ``name`` attribute sets a custom name for lineY. If left unset, the
+ line will remain unnamed.
+
+Once the configuration is done, the ``'live'`` attribute must be set to 1
+in order to instantiate the aggregator device. It can be set back to 0 to
+destroy the virtual device. The module will synchronously wait for the new
+aggregator device to be successfully probed and if this doesn't happen, writing
+to ``'live'`` will result in an error. This is a different behaviour from the
+case when you create it using sysfs ``new_device`` interface.
+
+.. note::
+
+ For aggregators created via Sysfs, the configfs entries are
+ auto-generated and appear as ``/config/gpio-aggregator/_sysfs.<N>/``. You
+ cannot add or remove line directories with mkdir(2)/rmdir(2). To modify
+ lines, you must use the "delete_device" interface to tear down the
+ existing device and reconfigure it from scratch. However, you can still
+ toggle the aggregator with the ``live`` attribute and adjust the
+ ``key``, ``offset``, and ``name`` attributes for each line when ``live``
+ is set to 0 by hand (i.e. it's not waiting for deferred probe).
+
+Sample configuration commands
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: sh
+
+ # Create a directory for an aggregator device
+ $ mkdir /sys/kernel/config/gpio-aggregator/agg0
+
+ # Configure each line
+ $ mkdir /sys/kernel/config/gpio-aggregator/agg0/line0
+ $ echo gpiochip0 > /sys/kernel/config/gpio-aggregator/agg0/line0/key
+ $ echo 6 > /sys/kernel/config/gpio-aggregator/agg0/line0/offset
+ $ echo test0 > /sys/kernel/config/gpio-aggregator/agg0/line0/name
+ $ mkdir /sys/kernel/config/gpio-aggregator/agg0/line1
+ $ echo gpiochip0 > /sys/kernel/config/gpio-aggregator/agg0/line1/key
+ $ echo 7 > /sys/kernel/config/gpio-aggregator/agg0/line1/offset
+ $ echo test1 > /sys/kernel/config/gpio-aggregator/agg0/line1/name
+
+ # Activate the aggregator device
+ $ echo 1 > /sys/kernel/config/gpio-aggregator/agg0/live
+
+
Generic GPIO Driver
-------------------
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
index ff0b440ef2dc..09890a8f3ee9 100644
--- a/Documentation/admin-guide/hw-vuln/index.rst
+++ b/Documentation/admin-guide/hw-vuln/index.rst
@@ -22,3 +22,6 @@ are configurable at compile, boot or run time.
srso
gather_data_sampling
reg-file-data-sampling
+ rsb
+ old_microcode
+ indirect-target-selection
diff --git a/Documentation/admin-guide/hw-vuln/indirect-target-selection.rst b/Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
new file mode 100644
index 000000000000..d9ca64108d23
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
@@ -0,0 +1,168 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Indirect Target Selection (ITS)
+===============================
+
+ITS is a vulnerability in some Intel CPUs that support Enhanced IBRS and were
+released before Alder Lake. ITS may allow an attacker to control the prediction
+of indirect branches and RETs located in the lower half of a cacheline.
+
+ITS is assigned CVE-2024-28956 with a CVSS score of 4.7 (Medium).
+
+Scope of Impact
+---------------
+- **eIBRS Guest/Host Isolation**: Indirect branches in KVM/kernel may still be
+ predicted with unintended target corresponding to a branch in the guest.
+
+- **Intra-Mode BTI**: In-kernel training such as through cBPF or other native
+ gadgets.
+
+- **Indirect Branch Prediction Barrier (IBPB)**: After an IBPB, indirect
+ branches may still be predicted with targets corresponding to direct branches
+ executed prior to the IBPB. This is fixed by the IPU 2025.1 microcode, which
+ should be available via distro updates. Alternatively microcode can be
+ obtained from Intel's github repository [#f1]_.
+
+Affected CPUs
+-------------
+Below is the list of ITS affected CPUs [#f2]_ [#f3]_:
+
+ ======================== ============ ==================== ===============
+ Common name Family_Model eIBRS Intra-mode BTI
+ Guest/Host Isolation
+ ======================== ============ ==================== ===============
+ SKYLAKE_X (step >= 6) 06_55H Affected Affected
+ ICELAKE_X 06_6AH Not affected Affected
+ ICELAKE_D 06_6CH Not affected Affected
+ ICELAKE_L 06_7EH Not affected Affected
+ TIGERLAKE_L 06_8CH Not affected Affected
+ TIGERLAKE 06_8DH Not affected Affected
+ KABYLAKE_L (step >= 12) 06_8EH Affected Affected
+ KABYLAKE (step >= 13) 06_9EH Affected Affected
+ COMETLAKE 06_A5H Affected Affected
+ COMETLAKE_L 06_A6H Affected Affected
+ ROCKETLAKE 06_A7H Not affected Affected
+ ======================== ============ ==================== ===============
+
+- All affected CPUs enumerate Enhanced IBRS feature.
+- IBPB isolation is affected on all ITS affected CPUs, and need a microcode
+ update for mitigation.
+- None of the affected CPUs enumerate BHI_CTRL which was introduced in Golden
+ Cove (Alder Lake and Sapphire Rapids). This can help guests to determine the
+ host's affected status.
+- Intel Atom CPUs are not affected by ITS.
+
+Mitigation
+----------
+As only the indirect branches and RETs that have their last byte of instruction
+in the lower half of the cacheline are vulnerable to ITS, the basic idea behind
+the mitigation is to not allow indirect branches in the lower half.
+
+This is achieved by relying on existing retpoline support in the kernel, and in
+compilers. ITS-vulnerable retpoline sites are runtime patched to point to newly
+added ITS-safe thunks. These safe thunks consists of indirect branch in the
+second half of the cacheline. Not all retpoline sites are patched to thunks, if
+a retpoline site is evaluated to be ITS-safe, it is replaced with an inline
+indirect branch.
+
+Dynamic thunks
+~~~~~~~~~~~~~~
+From a dynamically allocated pool of safe-thunks, each vulnerable site is
+replaced with a new thunk, such that they get a unique address. This could
+improve the branch prediction accuracy. Also, it is a defense-in-depth measure
+against aliasing.
+
+Note, for simplicity, indirect branches in eBPF programs are always replaced
+with a jump to a static thunk in __x86_indirect_its_thunk_array. If required,
+in future this can be changed to use dynamic thunks.
+
+All vulnerable RETs are replaced with a static thunk, they do not use dynamic
+thunks. This is because RETs get their prediction from RSB mostly that does not
+depend on source address. RETs that underflow RSB may benefit from dynamic
+thunks. But, RETs significantly outnumber indirect branches, and any benefit
+from a unique source address could be outweighed by the increased icache
+footprint and iTLB pressure.
+
+Retpoline
+~~~~~~~~~
+Retpoline sequence also mitigates ITS-unsafe indirect branches. For this
+reason, when retpoline is enabled, ITS mitigation only relocates the RETs to
+safe thunks. Unless user requested the RSB-stuffing mitigation.
+
+RSB Stuffing
+~~~~~~~~~~~~
+RSB-stuffing via Call Depth Tracking is a mitigation for Retbleed RSB-underflow
+attacks. And it also mitigates RETs that are vulnerable to ITS.
+
+Mitigation in guests
+^^^^^^^^^^^^^^^^^^^^
+All guests deploy ITS mitigation by default, irrespective of eIBRS enumeration
+and Family/Model of the guest. This is because eIBRS feature could be hidden
+from a guest. One exception to this is when a guest enumerates BHI_DIS_S, which
+indicates that the guest is running on an unaffected host.
+
+To prevent guests from unnecessarily deploying the mitigation on unaffected
+platforms, Intel has defined ITS_NO bit(62) in MSR IA32_ARCH_CAPABILITIES. When
+a guest sees this bit set, it should not enumerate the ITS bug. Note, this bit
+is not set by any hardware, but is **intended for VMMs to synthesize** it for
+guests as per the host's affected status.
+
+Mitigation options
+^^^^^^^^^^^^^^^^^^
+The ITS mitigation can be controlled using the "indirect_target_selection"
+kernel parameter. The available options are:
+
+ ======== ===================================================================
+ on (default) Deploy the "Aligned branch/return thunks" mitigation.
+ If spectre_v2 mitigation enables retpoline, aligned-thunks are only
+ deployed for the affected RET instructions. Retpoline mitigates
+ indirect branches.
+
+ off Disable ITS mitigation.
+
+ vmexit Equivalent to "=on" if the CPU is affected by guest/host isolation
+ part of ITS. Otherwise, mitigation is not deployed. This option is
+ useful when host userspace is not in the threat model, and only
+ attacks from guest to host are considered.
+
+ stuff Deploy RSB-fill mitigation when retpoline is also deployed.
+ Otherwise, deploy the default mitigation. When retpoline mitigation
+ is enabled, RSB-stuffing via Call-Depth-Tracking also mitigates
+ ITS.
+
+ force Force the ITS bug and deploy the default mitigation.
+ ======== ===================================================================
+
+Sysfs reporting
+---------------
+
+The sysfs file showing ITS mitigation status is:
+
+ /sys/devices/system/cpu/vulnerabilities/indirect_target_selection
+
+Note, microcode mitigation status is not reported in this file.
+
+The possible values in this file are:
+
+.. list-table::
+
+ * - Not affected
+ - The processor is not vulnerable.
+ * - Vulnerable
+ - System is vulnerable and no mitigation has been applied.
+ * - Vulnerable, KVM: Not affected
+ - System is vulnerable to intra-mode BTI, but not affected by eIBRS
+ guest/host isolation.
+ * - Mitigation: Aligned branch/return thunks
+ - The mitigation is enabled, affected indirect branches and RETs are
+ relocated to safe thunks.
+ * - Mitigation: Retpolines, Stuffing RSB
+ - The mitigation is enabled using retpoline and RSB stuffing.
+
+References
+----------
+.. [#f1] Microcode repository - https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files
+
+.. [#f2] Affected Processors list - https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
+
+.. [#f3] Affected Processors list (machine readable) - https://github.com/intel/Intel-affected-processor-list
diff --git a/Documentation/admin-guide/hw-vuln/old_microcode.rst b/Documentation/admin-guide/hw-vuln/old_microcode.rst
new file mode 100644
index 000000000000..6ded8f86b8d0
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/old_microcode.rst
@@ -0,0 +1,21 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+Old Microcode
+=============
+
+The kernel keeps a table of released microcode. Systems that had
+microcode older than this at boot will say "Vulnerable". This means
+that the system was vulnerable to some known CPU issue. It could be
+security or functional, the kernel does not know or care.
+
+You should update the CPU microcode to mitigate any exposure. This is
+usually accomplished by updating the files in
+/lib/firmware/intel-ucode/ via normal distribution updates. Intel also
+distributes these files in a github repo:
+
+ https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files.git
+
+Just like all the other hardware vulnerabilities, exposure is
+determined at boot. Runtime microcode updates do not change the status
+of this vulnerability.
diff --git a/Documentation/admin-guide/hw-vuln/rsb.rst b/Documentation/admin-guide/hw-vuln/rsb.rst
new file mode 100644
index 000000000000..21dbf9cf25f8
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/rsb.rst
@@ -0,0 +1,268 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+RSB-related mitigations
+=======================
+
+.. warning::
+ Please keep this document up-to-date, otherwise you will be
+ volunteered to update it and convert it to a very long comment in
+ bugs.c!
+
+Since 2018 there have been many Spectre CVEs related to the Return Stack
+Buffer (RSB) (sometimes referred to as the Return Address Stack (RAS) or
+Return Address Predictor (RAP) on AMD).
+
+Information about these CVEs and how to mitigate them is scattered
+amongst a myriad of microarchitecture-specific documents.
+
+This document attempts to consolidate all the relevant information in
+once place and clarify the reasoning behind the current RSB-related
+mitigations. It's meant to be as concise as possible, focused only on
+the current kernel mitigations: what are the RSB-related attack vectors
+and how are they currently being mitigated?
+
+It's *not* meant to describe how the RSB mechanism operates or how the
+exploits work. More details about those can be found in the references
+below.
+
+Rather, this is basically a glorified comment, but too long to actually
+be one. So when the next CVE comes along, a kernel developer can
+quickly refer to this as a refresher to see what we're actually doing
+and why.
+
+At a high level, there are two classes of RSB attacks: RSB poisoning
+(Intel and AMD) and RSB underflow (Intel only). They must each be
+considered individually for each attack vector (and microarchitecture
+where applicable).
+
+----
+
+RSB poisoning (Intel and AMD)
+=============================
+
+SpectreRSB
+~~~~~~~~~~
+
+RSB poisoning is a technique used by SpectreRSB [#spectre-rsb]_ where
+an attacker poisons an RSB entry to cause a victim's return instruction
+to speculate to an attacker-controlled address. This can happen when
+there are unbalanced CALLs/RETs after a context switch or VMEXIT.
+
+* All attack vectors can potentially be mitigated by flushing out any
+ poisoned RSB entries using an RSB filling sequence
+ [#intel-rsb-filling]_ [#amd-rsb-filling]_ when transitioning between
+ untrusted and trusted domains. But this has a performance impact and
+ should be avoided whenever possible.
+
+ .. DANGER::
+ **FIXME**: Currently we're flushing 32 entries. However, some CPU
+ models have more than 32 entries. The loop count needs to be
+ increased for those. More detailed information is needed about RSB
+ sizes.
+
+* On context switch, the user->user mitigation requires ensuring the
+ RSB gets filled or cleared whenever IBPB gets written [#cond-ibpb]_
+ during a context switch:
+
+ * AMD:
+ On Zen 4+, IBPB (or SBPB [#amd-sbpb]_ if used) clears the RSB.
+ This is indicated by IBPB_RET in CPUID [#amd-ibpb-rsb]_.
+
+ On Zen < 4, the RSB filling sequence [#amd-rsb-filling]_ must be
+ always be done in addition to IBPB [#amd-ibpb-no-rsb]_. This is
+ indicated by X86_BUG_IBPB_NO_RET.
+
+ * Intel:
+ IBPB always clears the RSB:
+
+ "Software that executed before the IBPB command cannot control
+ the predicted targets of indirect branches executed after the
+ command on the same logical processor. The term indirect branch
+ in this context includes near return instructions, so these
+ predicted targets may come from the RSB." [#intel-ibpb-rsb]_
+
+* On context switch, user->kernel attacks are prevented by SMEP. User
+ space can only insert user space addresses into the RSB. Even
+ non-canonical addresses can't be inserted due to the page gap at the
+ end of the user canonical address space reserved by TASK_SIZE_MAX.
+ A SMEP #PF at instruction fetch prevents the kernel from speculatively
+ executing user space.
+
+ * AMD:
+ "Finally, branches that are predicted as 'ret' instructions get
+ their predicted targets from the Return Address Predictor (RAP).
+ AMD recommends software use a RAP stuffing sequence (mitigation
+ V2-3 in [2]) and/or Supervisor Mode Execution Protection (SMEP)
+ to ensure that the addresses in the RAP are safe for
+ speculation. Collectively, we refer to these mitigations as "RAP
+ Protection"." [#amd-smep-rsb]_
+
+ * Intel:
+ "On processors with enhanced IBRS, an RSB overwrite sequence may
+ not suffice to prevent the predicted target of a near return
+ from using an RSB entry created in a less privileged predictor
+ mode. Software can prevent this by enabling SMEP (for
+ transitions from user mode to supervisor mode) and by having
+ IA32_SPEC_CTRL.IBRS set during VM exits." [#intel-smep-rsb]_
+
+* On VMEXIT, guest->host attacks are mitigated by eIBRS (and PBRSB
+ mitigation if needed):
+
+ * AMD:
+ "When Automatic IBRS is enabled, the internal return address
+ stack used for return address predictions is cleared on VMEXIT."
+ [#amd-eibrs-vmexit]_
+
+ * Intel:
+ "On processors with enhanced IBRS, an RSB overwrite sequence may
+ not suffice to prevent the predicted target of a near return
+ from using an RSB entry created in a less privileged predictor
+ mode. Software can prevent this by enabling SMEP (for
+ transitions from user mode to supervisor mode) and by having
+ IA32_SPEC_CTRL.IBRS set during VM exits. Processors with
+ enhanced IBRS still support the usage model where IBRS is set
+ only in the OS/VMM for OSes that enable SMEP. To do this, such
+ processors will ensure that guest behavior cannot control the
+ RSB after a VM exit once IBRS is set, even if IBRS was not set
+ at the time of the VM exit." [#intel-eibrs-vmexit]_
+
+ Note that some Intel CPUs are susceptible to Post-barrier Return
+ Stack Buffer Predictions (PBRSB) [#intel-pbrsb]_, where the last
+ CALL from the guest can be used to predict the first unbalanced RET.
+ In this case the PBRSB mitigation is needed in addition to eIBRS.
+
+AMD RETBleed / SRSO / Branch Type Confusion
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+On AMD, poisoned RSB entries can also be created by the AMD RETBleed
+variant [#retbleed-paper]_ [#amd-btc]_ or by Speculative Return Stack
+Overflow [#amd-srso]_ (Inception [#inception-paper]_). The kernel
+protects itself by replacing every RET in the kernel with a branch to a
+single safe RET.
+
+----
+
+RSB underflow (Intel only)
+==========================
+
+RSB Alternate (RSBA) ("Intel Retbleed")
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some Intel Skylake-generation CPUs are susceptible to the Intel variant
+of RETBleed [#retbleed-paper]_ (Return Stack Buffer Underflow
+[#intel-rsbu]_). If a RET is executed when the RSB buffer is empty due
+to mismatched CALLs/RETs or returning from a deep call stack, the branch
+predictor can fall back to using the Branch Target Buffer (BTB). If a
+user forces a BTB collision then the RET can speculatively branch to a
+user-controlled address.
+
+* Note that RSB filling doesn't fully mitigate this issue. If there
+ are enough unbalanced RETs, the RSB may still underflow and fall back
+ to using a poisoned BTB entry.
+
+* On context switch, user->user underflow attacks are mitigated by the
+ conditional IBPB [#cond-ibpb]_ on context switch which effectively
+ clears the BTB:
+
+ * "The indirect branch predictor barrier (IBPB) is an indirect branch
+ control mechanism that establishes a barrier, preventing software
+ that executed before the barrier from controlling the predicted
+ targets of indirect branches executed after the barrier on the same
+ logical processor." [#intel-ibpb-btb]_
+
+* On context switch and VMEXIT, user->kernel and guest->host RSB
+ underflows are mitigated by IBRS or eIBRS:
+
+ * "Enabling IBRS (including enhanced IBRS) will mitigate the "RSBU"
+ attack demonstrated by the researchers. As previously documented,
+ Intel recommends the use of enhanced IBRS, where supported. This
+ includes any processor that enumerates RRSBA but not RRSBA_DIS_S."
+ [#intel-rsbu]_
+
+ However, note that eIBRS and IBRS do not mitigate intra-mode attacks.
+ Like RRSBA below, this is mitigated by clearing the BHB on kernel
+ entry.
+
+ As an alternative to classic IBRS, call depth tracking (combined with
+ retpolines) can be used to track kernel returns and fill the RSB when
+ it gets close to being empty.
+
+Restricted RSB Alternate (RRSBA)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some newer Intel CPUs have Restricted RSB Alternate (RRSBA) behavior,
+which, similar to RSBA described above, also falls back to using the BTB
+on RSB underflow. The only difference is that the predicted targets are
+restricted to the current domain when eIBRS is enabled:
+
+* "Restricted RSB Alternate (RRSBA) behavior allows alternate branch
+ predictors to be used by near RET instructions when the RSB is
+ empty. When eIBRS is enabled, the predicted targets of these
+ alternate predictors are restricted to those belonging to the
+ indirect branch predictor entries of the current prediction domain.
+ [#intel-eibrs-rrsba]_
+
+When a CPU with RRSBA is vulnerable to Branch History Injection
+[#bhi-paper]_ [#intel-bhi]_, an RSB underflow could be used for an
+intra-mode BTI attack. This is mitigated by clearing the BHB on
+kernel entry.
+
+However if the kernel uses retpolines instead of eIBRS, it needs to
+disable RRSBA:
+
+* "Where software is using retpoline as a mitigation for BHI or
+ intra-mode BTI, and the processor both enumerates RRSBA and
+ enumerates RRSBA_DIS controls, it should disable this behavior."
+ [#intel-retpoline-rrsba]_
+
+----
+
+References
+==========
+
+.. [#spectre-rsb] `Spectre Returns! Speculation Attacks using the Return Stack Buffer <https://arxiv.org/pdf/1807.07940.pdf>`_
+
+.. [#intel-rsb-filling] "Empty RSB Mitigation on Skylake-generation" in `Retpoline: A Branch Target Injection Mitigation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/retpoline-branch-target-injection-mitigation.html#inpage-nav-5-1>`_
+
+.. [#amd-rsb-filling] "Mitigation V2-3" in `Software Techniques for Managing Speculation <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/software-techniques-for-managing-speculation.pdf>`_
+
+.. [#cond-ibpb] Whether IBPB is written depends on whether the prev and/or next task is protected from Spectre attacks. It typically requires opting in per task or system-wide. For more details see the documentation for the ``spectre_v2_user`` cmdline option in Documentation/admin-guide/kernel-parameters.txt.
+
+.. [#amd-sbpb] IBPB without flushing of branch type predictions. Only exists for AMD.
+
+.. [#amd-ibpb-rsb] "Function 8000_0008h -- Processor Capacity Parameters and Extended Feature Identification" in `AMD64 Architecture Programmer's Manual Volume 3: General-Purpose and System Instructions <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/24594.pdf>`_. SBPB behaves the same way according to `this email <https://lore.kernel.org/5175b163a3736ca5fd01cedf406735636c99a>`_.
+
+.. [#amd-ibpb-no-rsb] `Spectre Attacks: Exploiting Speculative Execution <https://comsec.ethz.ch/wp-content/files/ibpb_sp25.pdf>`_
+
+.. [#intel-ibpb-rsb] "Introduction" in `Post-barrier Return Stack Buffer Predictions / CVE-2022-26373 / INTEL-SA-00706 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/post-barrier-return-stack-buffer-predictions.html>`_
+
+.. [#amd-smep-rsb] "Existing Mitigations" in `Technical Guidance for Mitigating Branch Type Confusion <https://www.amd.com/content/dam/amd/en/documents/resources/technical-guidance-for-mitigating-branch-type-confusion.pdf>`_
+
+.. [#intel-smep-rsb] "Enhanced IBRS" in `Indirect Branch Restricted Speculation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-restricted-speculation.html>`_
+
+.. [#amd-eibrs-vmexit] "Extended Feature Enable Register (EFER)" in `AMD64 Architecture Programmer's Manual Volume 2: System Programming <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/24593.pdf>`_
+
+.. [#intel-eibrs-vmexit] "Enhanced IBRS" in `Indirect Branch Restricted Speculation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-restricted-speculation.html>`_
+
+.. [#intel-pbrsb] `Post-barrier Return Stack Buffer Predictions / CVE-2022-26373 / INTEL-SA-00706 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/post-barrier-return-stack-buffer-predictions.html>`_
+
+.. [#retbleed-paper] `RETBleed: Arbitrary Speculative Code Execution with Return Instruction <https://comsec.ethz.ch/wp-content/files/retbleed_sec22.pdf>`_
+
+.. [#amd-btc] `Technical Guidance for Mitigating Branch Type Confusion <https://www.amd.com/content/dam/amd/en/documents/resources/technical-guidance-for-mitigating-branch-type-confusion.pdf>`_
+
+.. [#amd-srso] `Technical Update Regarding Speculative Return Stack Overflow <https://www.amd.com/content/dam/amd/en/documents/corporate/cr/speculative-return-stack-overflow-whitepaper.pdf>`_
+
+.. [#inception-paper] `Inception: Exposing New Attack Surfaces with Training in Transient Execution <https://comsec.ethz.ch/wp-content/files/inception_sec23.pdf>`_
+
+.. [#intel-rsbu] `Return Stack Buffer Underflow / Return Stack Buffer Underflow / CVE-2022-29901, CVE-2022-28693 / INTEL-SA-00702 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/return-stack-buffer-underflow.html>`_
+
+.. [#intel-ibpb-btb] `Indirect Branch Predictor Barrier' <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-predictor-barrier.html>`_
+
+.. [#intel-eibrs-rrsba] "Guidance for RSBU" in `Return Stack Buffer Underflow / Return Stack Buffer Underflow / CVE-2022-29901, CVE-2022-28693 / INTEL-SA-00702 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/return-stack-buffer-underflow.html>`_
+
+.. [#bhi-paper] `Branch History Injection: On the Effectiveness of Hardware Mitigations Against Cross-Privilege Spectre-v2 Attacks <http://download.vusec.net/papers/bhi-spectre-bhb_sec22.pdf>`_
+
+.. [#intel-bhi] `Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/branch-history-injection.html>`_
+
+.. [#intel-retpoline-rrsba] "Retpoline" in `Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/branch-history-injection.html>`_
diff --git a/Documentation/admin-guide/kdump/kdump.rst b/Documentation/admin-guide/kdump/kdump.rst
index 1f7f14c6e184..20fabdf6567e 100644
--- a/Documentation/admin-guide/kdump/kdump.rst
+++ b/Documentation/admin-guide/kdump/kdump.rst
@@ -547,6 +547,38 @@ from within add_taint() whenever the value set in this bitmask matches with the
bit flag being set by add_taint().
This will cause a kdump to occur at the add_taint()->panic() call.
+Write the dump file to encrypted disk volume
+============================================
+
+CONFIG_CRASH_DM_CRYPT can be enabled to support saving the dump file to an
+encrypted disk volume (only x86_64 supported for now). User space can interact
+with /sys/kernel/config/crash_dm_crypt_keys for setup,
+
+1. Tell the first kernel what logon keys are needed to unlock the disk volumes,
+ # Add key #1
+ mkdir /sys/kernel/config/crash_dm_crypt_keys/7d26b7b4-e342-4d2d-b660-7426b0996720
+ # Add key #1's description
+ echo cryptsetup:7d26b7b4-e342-4d2d-b660-7426b0996720 > /sys/kernel/config/crash_dm_crypt_keys/description
+
+ # how many keys do we have now?
+ cat /sys/kernel/config/crash_dm_crypt_keys/count
+ 1
+
+ # Add key #2 in the same way
+
+ # how many keys do we have now?
+ cat /sys/kernel/config/crash_dm_crypt_keys/count
+ 2
+
+ # To support CPU/memory hot-plugging, re-use keys already saved to reserved
+ # memory
+ echo true > /sys/kernel/config/crash_dm_crypt_key/reuse
+
+2. Load the dump-capture kernel
+
+3. After the dump-capture kerne get booted, restore the keys to user keyring
+ echo yes > /sys/kernel/crash_dm_crypt_keys/restore
+
Contact
=======
diff --git a/Documentation/admin-guide/kdump/vmcoreinfo.rst b/Documentation/admin-guide/kdump/vmcoreinfo.rst
index 0f714fc945ac..8cf4614385b7 100644
--- a/Documentation/admin-guide/kdump/vmcoreinfo.rst
+++ b/Documentation/admin-guide/kdump/vmcoreinfo.rst
@@ -331,8 +331,8 @@ PG_lru|PG_private|PG_swapcache|PG_swapbacked|PG_slab|PG_hwpoision|PG_head_mask|P
Page attributes. These flags are used to filter various unnecessary for
dumping pages.
-PAGE_BUDDY_MAPCOUNT_VALUE(~PG_buddy)|PAGE_OFFLINE_MAPCOUNT_VALUE(~PG_offline)
------------------------------------------------------------------------------
+PAGE_BUDDY_MAPCOUNT_VALUE(~PG_buddy)|PAGE_OFFLINE_MAPCOUNT_VALUE(~PG_offline)|PAGE_OFFLINE_MAPCOUNT_VALUE(~PG_unaccepted)
+-------------------------------------------------------------------------------------------------------------------------
More page attributes. These flags are used to filter various unnecessary for
dumping pages.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 76e538c77e31..a3ea40b22fb9 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1407,18 +1407,15 @@
earlyprintk=serial[,0x...[,baudrate]]
earlyprintk=ttySn[,baudrate]
earlyprintk=dbgp[debugController#]
+ earlyprintk=mmio32,membase[,{nocfg|baudrate}]
earlyprintk=pciserial[,force],bus:device.function[,{nocfg|baudrate}]
earlyprintk=xdbc[xhciController#]
earlyprintk=bios
- earlyprintk=mmio,membase[,{nocfg|baudrate}]
earlyprintk is useful when the kernel crashes before
the normal console is initialized. It is not enabled by
default because it has some cosmetic problems.
- Only 32-bit memory addresses are supported for "mmio"
- and "pciserial" devices.
-
Use "nocfg" to skip UART configuration, assume
BIOS/firmware has configured UART correctly.
@@ -1831,6 +1828,13 @@
lz4: Select LZ4 compression algorithm to
compress/decompress hibernation image.
+ hibernate.pm_test_delay=
+ [HIBERNATION]
+ Sets the number of seconds to remain in a hibernation test
+ mode before resuming the system (see
+ /sys/power/pm_test). Only available when CONFIG_PM_DEBUG
+ is set. Default value is 5.
+
highmem=nn[KMG] [KNL,BOOT,EARLY] forces the highmem zone to have an exact
size of <nn>. This works even on boxes that have no
highmem otherwise. This also works to reduce highmem
@@ -2205,6 +2209,23 @@
different crypto accelerators. This option can be used
to achieve best performance for particular HW.
+ indirect_target_selection= [X86,Intel] Mitigation control for Indirect
+ Target Selection(ITS) bug in Intel CPUs. Updated
+ microcode is also required for a fix in IBPB.
+
+ on: Enable mitigation (default).
+ off: Disable mitigation.
+ force: Force the ITS bug and deploy default
+ mitigation.
+ vmexit: Only deploy mitigation if CPU is affected by
+ guest/host isolation part of ITS.
+ stuff: Deploy RSB-fill mitigation when retpoline is
+ also deployed. Otherwise, deploy the default
+ mitigation.
+
+ For details see:
+ Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
+
init= [KNL]
Format: <full_path>
Run specified binary instead of /sbin/init as init
@@ -2728,6 +2749,31 @@
kgdbwait [KGDB,EARLY] Stop kernel execution and enter the
kernel debugger at the earliest opportunity.
+ kho= [KEXEC,EARLY]
+ Format: { "0" | "1" | "off" | "on" | "y" | "n" }
+ Enables or disables Kexec HandOver.
+ "0" | "off" | "n" - kexec handover is disabled
+ "1" | "on" | "y" - kexec handover is enabled
+
+ kho_scratch= [KEXEC,EARLY]
+ Format: ll[KMG],mm[KMG],nn[KMG] | nn%
+ Defines the size of the KHO scratch region. The KHO
+ scratch regions are physically contiguous memory
+ ranges that can only be used for non-kernel
+ allocations. That way, even when memory is heavily
+ fragmented with handed over memory, the kexeced
+ kernel will always have enough contiguous ranges to
+ bootstrap itself.
+
+ It is possible to specify the exact amount of
+ memory in the form of "ll[KMG],mm[KMG],nn[KMG]"
+ where the first parameter defines the size of a low
+ memory scratch area, the second parameter defines
+ the size of a global scratch area and the third
+ parameter defines the size of additional per-node
+ scratch areas. The form "nn%" defines scale factor
+ (in percents) of memory that was used during boot.
+
kmac= [MIPS] Korina ethernet MAC address.
Configure the RouterBoard 532 series on-chip
Ethernet adapter MAC address.
@@ -3696,6 +3742,7 @@
expose users to several CPU vulnerabilities.
Equivalent to: if nokaslr then kpti=0 [ARM64]
gather_data_sampling=off [X86]
+ indirect_target_selection=off [X86]
kvm.nx_huge_pages=off [X86]
l1tf=off [X86]
mds=off [X86]
@@ -5657,6 +5704,31 @@
are zero, rcutorture acts as if is interpreted
they are all non-zero.
+ rcutorture.gpwrap_lag= [KNL]
+ Enable grace-period wrap lag testing. Setting
+ to false prevents the gpwrap lag test from
+ running. Default is true.
+
+ rcutorture.gpwrap_lag_gps= [KNL]
+ Set the value for grace-period wrap lag during
+ active lag testing periods. This controls how many
+ grace periods differences we tolerate between
+ rdp and rnp's gp_seq before setting overflow flag.
+ The default is always set to 8.
+
+ rcutorture.gpwrap_lag_cycle_mins= [KNL]
+ Set the total cycle duration for gpwrap lag
+ testing in minutes. This is the total time for
+ one complete cycle of active and inactive
+ testing periods. Default is 30 minutes.
+
+ rcutorture.gpwrap_lag_active_mins= [KNL]
+ Set the duration for which gpwrap lag is active
+ within each cycle, in minutes. During this time,
+ the grace-period wrap lag will be set to the
+ value specified by gpwrap_lag_gps. Default is
+ 5 minutes.
+
rcutorture.irqreader= [KNL]
Run RCU readers from irq handlers, or, more
accurately, from a timer handler. Not all RCU
@@ -6253,7 +6325,7 @@
port and the regular usb controller gets disabled.
root= [KNL] Root filesystem
- Usually this a a block device specifier of some kind,
+ Usually this is a block device specifier of some kind,
see the early_lookup_bdev comment in
block/early-lookup.c for details.
Alternatively this can be "ram" for the legacy initial
@@ -6280,6 +6352,11 @@
Memory area to be used by remote processor image,
managed by CMA.
+ rt_group_sched= [KNL] Enable or disable SCHED_RR/FIFO group scheduling
+ when CONFIG_RT_GROUP_SCHED=y. Defaults to
+ !CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED.
+ Format: <bool>
+
rw [KNL] Mount root device read-write on boot
S [KNL] Run init in single mode
diff --git a/Documentation/admin-guide/laptops/alienware-wmi.rst b/Documentation/admin-guide/laptops/alienware-wmi.rst
new file mode 100644
index 000000000000..27a32a8057da
--- /dev/null
+++ b/Documentation/admin-guide/laptops/alienware-wmi.rst
@@ -0,0 +1,127 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+====================
+Alienware WMI Driver
+====================
+
+Kurt Borja <kuurtb@gmail.com>
+
+This is a driver for the "WMAX" WMI device, which is found in most Dell gaming
+laptops and controls various special features.
+
+Before the launch of M-Series laptops (~2018), the "WMAX" device controlled
+basic RGB lighting, deep sleep mode, HDMI mode and amplifier status.
+
+Later, this device was completely repurpused. Now it mostly deals with thermal
+profiles, sensor monitoring and overclocking. This interface is named "AWCC" and
+is known to be used by the AWCC OEM application to control these features.
+
+The alienware-wmi driver controls both interfaces.
+
+AWCC Interface
+==============
+
+WMI device documentation: Documentation/wmi/devices/alienware-wmi.rst
+
+Supported devices
+-----------------
+
+- Alienware M-Series laptops
+- Alienware X-Series laptops
+- Alienware Aurora Desktops
+- Dell G-Series laptops
+
+If you believe your device supports the AWCC interface and you don't have any of
+the features described in this document, try the following alienware-wmi module
+parameters:
+
+- ``force_platform_profile=1``: Forces probing for platform profile support
+- ``force_hwmon=1``: Forces probing for HWMON support
+
+If the module loads successfully with these parameters, consider submitting a
+patch adding your model to the ``awcc_dmi_table`` located in
+``drivers/platform/x86/dell/alienware-wmi-wmax.c`` or contacting the maintainer
+for further guidance.
+
+Status
+------
+
+The following features are currently supported:
+
+- :ref:`Platform Profile <platform-profile>`:
+
+ - Thermal profile control
+
+ - G-Mode toggling
+
+- :ref:`HWMON <hwmon>`:
+
+ - Sensor monitoring
+
+ - Manual fan control
+
+.. _platform-profile:
+
+Platform Profile
+----------------
+
+The AWCC interface exposes various firmware defined thermal profiles. These are
+exposed to user-space through the Platform Profile class interface. Refer to
+:ref:`sysfs-class-platform-profile <abi_file_testing_sysfs_class_platform_profile>`
+for more information.
+
+The name of the platform-profile class device exported by this driver is
+"alienware-wmi" and it's path can be found with:
+
+::
+
+ grep -l "alienware-wmi" /sys/class/platform-profile/platform-profile-*/name | sed 's|/[^/]*$||'
+
+If the device supports G-Mode, it is also toggled when selecting the
+``performance`` profile.
+
+.. note::
+ You may set the ``force_gmode`` module parameter to always try to toggle this
+ feature, without checking if your model supports it.
+
+.. _hwmon:
+
+HWMON
+-----
+
+The AWCC interface also supports sensor monitoring and manual fan control. Both
+of these features are exposed to user-space through the HWMON interface.
+
+The name of the hwmon class device exported by this driver is "alienware_wmi"
+and it's path can be found with:
+
+::
+
+ grep -l "alienware_wmi" /sys/class/hwmon/hwmon*/name | sed 's|/[^/]*$||'
+
+Sensor monitoring is done through the standard HWMON interface. Refer to
+:ref:`sysfs-class-hwmon <abi_file_testing_sysfs_class_hwmon>` for more
+information.
+
+Manual fan control on the other hand, is not exposed directly by the AWCC
+interface. Instead it let's us control a fan `boost` value. This `boost` value
+has the following aproximate behavior over the fan pwm:
+
+::
+
+ pwm = pwm_base + (fan_boost / 255) * (pwm_max - pwm_base)
+
+Due to the above behavior, the fan `boost` control is exposed to user-space
+through the following, custom hwmon sysfs attribute:
+
+=============================== ======= =======================================
+Name Perm Description
+=============================== ======= =======================================
+fan[1-4]_boost RW Fan boost value.
+
+ Integer value between 0 and 255
+=============================== ======= =======================================
+
+.. note::
+ In some devices, manual fan control only works reliably if the ``custom``
+ platform profile is selected.
diff --git a/Documentation/admin-guide/laptops/index.rst b/Documentation/admin-guide/laptops/index.rst
index e71c8984c23e..db842b629303 100644
--- a/Documentation/admin-guide/laptops/index.rst
+++ b/Documentation/admin-guide/laptops/index.rst
@@ -7,6 +7,7 @@ Laptop Drivers
.. toctree::
:maxdepth: 1
+ alienware-wmi
asus-laptop
disk-shock-protection
laptop-mode
diff --git a/Documentation/admin-guide/media/c3-isp.dot b/Documentation/admin-guide/media/c3-isp.dot
new file mode 100644
index 000000000000..42dc931ee84a
--- /dev/null
+++ b/Documentation/admin-guide/media/c3-isp.dot
@@ -0,0 +1,26 @@
+digraph board {
+ rankdir=TB
+ n00000001 [label="{{<port0> 0 | <port1> 1} | c3-isp-core\n/dev/v4l-subdev0 | {<port2> 2 | <port3> 3 | <port4> 4 | <port5> 5}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000001:port3 -> n00000008:port0
+ n00000001:port4 -> n0000000b:port0
+ n00000001:port5 -> n0000000e:port0
+ n00000001:port2 -> n00000027
+ n00000008 [label="{{<port0> 0} | c3-isp-resizer0\n/dev/v4l-subdev1 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000008:port1 -> n00000016 [style=bold]
+ n0000000b [label="{{<port0> 0} | c3-isp-resizer1\n/dev/v4l-subdev2 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000000b:port1 -> n0000001a [style=bold]
+ n0000000e [label="{{<port0> 0} | c3-isp-resizer2\n/dev/v4l-subdev3 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000000e:port1 -> n00000023 [style=bold]
+ n00000011 [label="{{<port0> 0} | c3-mipi-adapter\n/dev/v4l-subdev4 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000011:port1 -> n00000001:port0 [style=bold]
+ n00000016 [label="c3-isp-cap0\n/dev/video0", shape=box, style=filled, fillcolor=yellow]
+ n0000001a [label="c3-isp-cap1\n/dev/video1", shape=box, style=filled, fillcolor=yellow]
+ n0000001e [label="{{<port0> 0} | c3-mipi-csi2\n/dev/v4l-subdev5 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000001e:port1 -> n00000011:port0 [style=bold]
+ n00000023 [label="c3-isp-cap2\n/dev/video2", shape=box, style=filled, fillcolor=yellow]
+ n00000027 [label="c3-isp-stats\n/dev/video3", shape=box, style=filled, fillcolor=yellow]
+ n0000002b [label="c3-isp-params\n/dev/video4", shape=box, style=filled, fillcolor=yellow]
+ n0000002b -> n00000001:port1
+ n0000003f [label="{{} | imx290 2-001a\n/dev/v4l-subdev6 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000003f:port0 -> n0000001e:port0 [style=bold]
+}
diff --git a/Documentation/admin-guide/media/c3-isp.rst b/Documentation/admin-guide/media/c3-isp.rst
new file mode 100644
index 000000000000..ac508b8c6831
--- /dev/null
+++ b/Documentation/admin-guide/media/c3-isp.rst
@@ -0,0 +1,101 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+
+.. include:: <isonum.txt>
+
+=================================================
+Amlogic C3 Image Signal Processing (C3ISP) driver
+=================================================
+
+Introduction
+============
+
+This file documents the Amlogic C3ISP driver located under
+drivers/media/platform/amlogic/c3/isp.
+
+The current version of the driver supports the C3ISP found on
+Amlogic C308L processor.
+
+The driver implements V4L2, Media controller and V4L2 subdev interfaces.
+Camera sensor using V4L2 subdev interface in the kernel is supported.
+
+The driver has been tested on AW419-C308L-Socket platform.
+
+Amlogic C3 ISP
+==============
+
+The Camera hardware found on C308L processors and supported by
+the driver consists of:
+
+- 1 MIPI-CSI-2 module: handles the physical layer of the MIPI CSI-2 receiver and
+ receives data from the connected camera sensor.
+- 1 MIPI-ADAPTER module: organizes MIPI data to meet ISP input requirements and
+ send MIPI data to ISP.
+- 1 ISP (Image Signal Processing) module: contains a pipeline of image processing
+ hardware blocks. The ISP pipeline contains three resizers at the end each of
+ them connected to a DMA interface which writes the output data to memory.
+
+A high-level functional view of the C3 ISP is presented below.::
+
+ +----------+ +-------+
+ | Resizer |--->| WRMIF |
+ +---------+ +------------+ +--------------+ +-------+ |----------+ +-------+
+ | Sensor |--->| MIPI CSI-2 |--->| MIPI ADAPTER |--->| ISP |---|----------+ +-------+
+ +---------+ +------------+ +--------------+ +-------+ | Resizer |--->| WRMIF |
+ +----------+ +-------+
+ |----------+ +-------+
+ | Resizer |--->| WRMIF |
+ +----------+ +-------+
+
+Driver architecture and design
+==============================
+
+With the goal to model the hardware links between the modules and to expose a
+clean, logical and usable interface, the driver registers the following V4L2
+sub-devices:
+
+- 1 `c3-mipi-csi2` sub-device - the MIPI CSI-2 receiver
+- 1 `c3-mipi-adapter` sub-device - the MIPI adapter
+- 1 `c3-isp-core` sub-device - the ISP core
+- 3 `c3-isp-resizer` sub-devices - the ISP resizers
+
+The `c3-isp-core` sub-device is linked to 2 video device nodes for statistics
+capture and parameters programming:
+
+- the `c3-isp-stats` capture video device node for statistics capture
+- the `c3-isp-params` output video device for parameters programming
+
+Each `c3-isp-resizer` sub-device is linked to a capture video device node where
+frames are captured from:
+
+- `c3-isp-resizer0` is linked to the `c3-isp-cap0` capture video device
+- `c3-isp-resizer1` is linked to the `c3-isp-cap1` capture video device
+- `c3-isp-resizer2` is linked to the `c3-isp-cap2` capture video device
+
+The media controller pipeline graph is as follows (with connected a
+IMX290 camera sensor):
+
+.. _isp_topology_graph:
+
+.. kernel-figure:: c3-isp.dot
+ :alt: c3-isp.dot
+ :align: center
+
+ Media pipeline topology
+
+Implementation
+==============
+
+Runtime configuration of the ISP hardware is performed on the `c3-isp-params`
+video device node using the :ref:`V4L2_META_FMT_C3ISP_PARAMS
+<v4l2-meta-fmt-c3isp-params>` as data format. The buffer structure is defined by
+:c:type:`c3_isp_params_cfg`.
+
+Statistics are captured from the `c3-isp-stats` video device node using the
+:ref:`V4L2_META_FMT_C3ISP_STATS <v4l2-meta-fmt-c3isp-stats>` data format.
+
+The final picture size and format is configured using the V4L2 video
+capture interface on the `c3-isp-cap[0, 2]` video device nodes.
+
+The Amlogic C3 ISP is supported by `libcamera <https://libcamera.org>`_ with a
+dedicated pipeline handler and algorithms that perform run-time image correction
+and enhancement.
diff --git a/Documentation/admin-guide/media/mgb4.rst b/Documentation/admin-guide/media/mgb4.rst
index f69d331e3cb1..5ac69b833a7a 100644
--- a/Documentation/admin-guide/media/mgb4.rst
+++ b/Documentation/admin-guide/media/mgb4.rst
@@ -1,8 +1,17 @@
.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
The mgb4 driver
===============
+Copyright |copy| 2023 - 2025 Digiteq Automotive
+ author: Martin Tůma <martin.tuma@digiteqautomotive.com>
+
+This is a v4l2 device driver for the Digiteq Automotive FrameGrabber 4, a PCIe
+card capable of capturing and generating FPD-Link III and GMSL2/3 video streams
+as used in the automotive industry.
+
sysfs interface
---------------
diff --git a/Documentation/admin-guide/media/pci-cardlist.rst b/Documentation/admin-guide/media/pci-cardlist.rst
index 7d8e3c8987db..239879634ea5 100644
--- a/Documentation/admin-guide/media/pci-cardlist.rst
+++ b/Documentation/admin-guide/media/pci-cardlist.rst
@@ -86,7 +86,6 @@ saa7134 Philips SAA7134
saa7164 NXP SAA7164
smipcie SMI PCIe DVBSky cards
solo6x10 Bluecherry / Softlogic 6x10 capture cards (MPEG-4/H.264)
-sta2x11_vip STA2X11 VIP Video For Linux
tw5864 Techwell TW5864 video/audio grabber and encoder
tw686x Intersil/Techwell TW686x
tw68 Techwell tw68x Video For Linux
diff --git a/Documentation/admin-guide/media/v4l-drivers.rst b/Documentation/admin-guide/media/v4l-drivers.rst
index e8761561b2fe..3bac5165b134 100644
--- a/Documentation/admin-guide/media/v4l-drivers.rst
+++ b/Documentation/admin-guide/media/v4l-drivers.rst
@@ -10,6 +10,7 @@ Video4Linux (V4L) driver-specific documentation
:maxdepth: 2
bttv
+ c3-isp
cafe_ccic
cx88
fimc
diff --git a/Documentation/admin-guide/mm/damon/index.rst b/Documentation/admin-guide/mm/damon/index.rst
index 33d37bb2fb4e..bc7e976120e0 100644
--- a/Documentation/admin-guide/mm/damon/index.rst
+++ b/Documentation/admin-guide/mm/damon/index.rst
@@ -1,12 +1,11 @@
.. SPDX-License-Identifier: GPL-2.0
-==========================
-DAMON: Data Access MONitor
-==========================
+================================================================
+DAMON: Data Access MONitoring and Access-aware System Operations
+================================================================
-:doc:`DAMON </mm/damon/index>` allows light-weight data access monitoring.
-Using DAMON, users can analyze the memory access patterns of their systems and
-optimize those.
+:doc:`DAMON </mm/damon/index>` is a Linux kernel subsystem for efficient data
+access monitoring and access-aware system operations.
.. toctree::
:maxdepth: 2
diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
index ced2013db3df..d960aba72b82 100644
--- a/Documentation/admin-guide/mm/damon/usage.rst
+++ b/Documentation/admin-guide/mm/damon/usage.rst
@@ -81,7 +81,7 @@ comma (",").
│ │ │ │ │ │ │ :ref:`quotas <sysfs_quotas>`/ms,bytes,reset_interval_ms,effective_bytes
│ │ │ │ │ │ │ │ weights/sz_permil,nr_accesses_permil,age_permil
│ │ │ │ │ │ │ │ :ref:`goals <sysfs_schemes_quota_goals>`/nr_goals
- │ │ │ │ │ │ │ │ │ 0/target_metric,target_value,current_value
+ │ │ │ │ │ │ │ │ │ 0/target_metric,target_value,current_value,nid
│ │ │ │ │ │ │ :ref:`watermarks <sysfs_watermarks>`/metric,interval_us,high,mid,low
│ │ │ │ │ │ │ :ref:`{core_,ops_,}filters <sysfs_filters>`/nr_filters
│ │ │ │ │ │ │ │ 0/type,matching,allow,memcg_path,addr_start,addr_end,target_idx,min,max
@@ -390,11 +390,11 @@ number (``N``) to the file creates the number of child directories named ``0``
to ``N-1``. Each directory represents each goal and current achievement.
Among the multiple feedback, the best one is used.
-Each goal directory contains three files, namely ``target_metric``,
-``target_value`` and ``current_value``. Users can set and get the three
-parameters for the quota auto-tuning goals that specified on the :ref:`design
-doc <damon_design_damos_quotas_auto_tuning>` by writing to and reading from each
-of the files. Note that users should further write
+Each goal directory contains four files, namely ``target_metric``,
+``target_value``, ``current_value`` and ``nid``. Users can set and get the
+four parameters for the quota auto-tuning goals that specified on the
+:ref:`design doc <damon_design_damos_quotas_auto_tuning>` by writing to and
+reading from each of the files. Note that users should further write
``commit_schemes_quota_goals`` to the ``state`` file of the :ref:`kdamond
directory <sysfs_kdamond>` to pass the feedback to DAMON.
diff --git a/Documentation/admin-guide/mm/index.rst b/Documentation/admin-guide/mm/index.rst
index 8b35795b664b..2d2f6c222308 100644
--- a/Documentation/admin-guide/mm/index.rst
+++ b/Documentation/admin-guide/mm/index.rst
@@ -42,3 +42,4 @@ the Linux memory management.
transhuge
userfaultfd
zswap
+ kho
diff --git a/Documentation/admin-guide/mm/kho.rst b/Documentation/admin-guide/mm/kho.rst
new file mode 100644
index 000000000000..6dc18ed4b886
--- /dev/null
+++ b/Documentation/admin-guide/mm/kho.rst
@@ -0,0 +1,115 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+====================
+Kexec Handover Usage
+====================
+
+Kexec HandOver (KHO) is a mechanism that allows Linux to preserve memory
+regions, which could contain serialized system states, across kexec.
+
+This document expects that you are familiar with the base KHO
+:ref:`concepts <kho-concepts>`. If you have not read
+them yet, please do so now.
+
+Prerequisites
+=============
+
+KHO is available when the kernel is compiled with ``CONFIG_KEXEC_HANDOVER``
+set to y. Every KHO producer may have its own config option that you
+need to enable if you would like to preserve their respective state across
+kexec.
+
+To use KHO, please boot the kernel with the ``kho=on`` command line
+parameter. You may use ``kho_scratch`` parameter to define size of the
+scratch regions. For example ``kho_scratch=16M,512M,256M`` will reserve a
+16 MiB low memory scratch area, a 512 MiB global scratch region, and 256 MiB
+per NUMA node scratch regions on boot.
+
+Perform a KHO kexec
+===================
+
+First, before you perform a KHO kexec, you need to move the system into
+the :ref:`KHO finalization phase <kho-finalization-phase>` ::
+
+ $ echo 1 > /sys/kernel/debug/kho/out/finalize
+
+After this command, the KHO FDT is available in
+``/sys/kernel/debug/kho/out/fdt``. Other subsystems may also register
+their own preserved sub FDTs under
+``/sys/kernel/debug/kho/out/sub_fdts/``.
+
+Next, load the target payload and kexec into it. It is important that you
+use the ``-s`` parameter to use the in-kernel kexec file loader, as user
+space kexec tooling currently has no support for KHO with the user space
+based file loader ::
+
+ # kexec -l /path/to/bzImage --initrd /path/to/initrd -s
+ # kexec -e
+
+The new kernel will boot up and contain some of the previous kernel's state.
+
+For example, if you used ``reserve_mem`` command line parameter to create
+an early memory reservation, the new kernel will have that memory at the
+same physical address as the old kernel.
+
+Abort a KHO exec
+================
+
+You can move the system out of KHO finalization phase again by calling ::
+
+ $ echo 0 > /sys/kernel/debug/kho/out/active
+
+After this command, the KHO FDT is no longer available in
+``/sys/kernel/debug/kho/out/fdt``.
+
+debugfs Interfaces
+==================
+
+Currently KHO creates the following debugfs interfaces. Notice that these
+interfaces may change in the future. They will be moved to sysfs once KHO is
+stabilized.
+
+``/sys/kernel/debug/kho/out/finalize``
+ Kexec HandOver (KHO) allows Linux to transition the state of
+ compatible drivers into the next kexec'ed kernel. To do so,
+ device drivers will instruct KHO to preserve memory regions,
+ which could contain serialized kernel state.
+ While the state is serialized, they are unable to perform
+ any modifications to state that was serialized, such as
+ handed over memory allocations.
+
+ When this file contains "1", the system is in the transition
+ state. When contains "0", it is not. To switch between the
+ two states, echo the respective number into this file.
+
+``/sys/kernel/debug/kho/out/fdt``
+ When KHO state tree is finalized, the kernel exposes the
+ flattened device tree blob that carries its current KHO
+ state in this file. Kexec user space tooling can use this
+ as input file for the KHO payload image.
+
+``/sys/kernel/debug/kho/out/scratch_len``
+ Lengths of KHO scratch regions, which are physically contiguous
+ memory regions that will always stay available for future kexec
+ allocations. Kexec user space tools can use this file to determine
+ where it should place its payload images.
+
+``/sys/kernel/debug/kho/out/scratch_phys``
+ Physical locations of KHO scratch regions. Kexec user space tools
+ can use this file in conjunction to scratch_phys to determine where
+ it should place its payload images.
+
+``/sys/kernel/debug/kho/out/sub_fdts/``
+ In the KHO finalization phase, KHO producers register their own
+ FDT blob under this directory.
+
+``/sys/kernel/debug/kho/in/fdt``
+ When the kernel was booted with Kexec HandOver (KHO),
+ the state tree that carries metadata about the previous
+ kernel's state is in this file in the format of flattened
+ device tree. This file may disappear when all consumers of
+ it finished to interpret their metadata.
+
+``/sys/kernel/debug/kho/in/sub_fdts/``
+ Similar to ``kho/out/sub_fdts/``, but contains sub FDT blobs
+ of KHO producers passed from the old kernel.
diff --git a/Documentation/admin-guide/mm/multigen_lru.rst b/Documentation/admin-guide/mm/multigen_lru.rst
index 33e068830497..9cb54b4ff5d9 100644
--- a/Documentation/admin-guide/mm/multigen_lru.rst
+++ b/Documentation/admin-guide/mm/multigen_lru.rst
@@ -151,8 +151,9 @@ generations less than or equal to ``min_gen_nr``.
``min_gen_nr`` should be less than ``max_gen_nr-1``, since
``max_gen_nr`` and ``max_gen_nr-1`` are not fully aged (equivalent to
the active list) and therefore cannot be evicted. ``swappiness``
-overrides the default value in ``/proc/sys/vm/swappiness``.
-``nr_to_reclaim`` limits the number of pages to evict.
+overrides the default value in ``/proc/sys/vm/swappiness`` and the valid
+range is [0-200, max], with max being exclusively used for the reclamation
+of anonymous memory. ``nr_to_reclaim`` limits the number of pages to evict.
A typical use case is that a job scheduler runs this command before it
tries to land a new job on a server. If it fails to materialize enough
diff --git a/Documentation/admin-guide/mm/pagemap.rst b/Documentation/admin-guide/mm/pagemap.rst
index afce291649dd..e60e9211fd9b 100644
--- a/Documentation/admin-guide/mm/pagemap.rst
+++ b/Documentation/admin-guide/mm/pagemap.rst
@@ -250,6 +250,7 @@ Following flags about pages are currently supported:
- ``PAGE_IS_PFNZERO`` - Page has zero PFN
- ``PAGE_IS_HUGE`` - Page is PMD-mapped THP or Hugetlb backed
- ``PAGE_IS_SOFT_DIRTY`` - Page is soft-dirty
+- ``PAGE_IS_GUARD`` - Page is a part of a guard region
The ``struct pm_scan_arg`` is used as the argument of the IOCTL.
diff --git a/Documentation/admin-guide/namespaces/resource-control.rst b/Documentation/admin-guide/namespaces/resource-control.rst
index 369556e00f0c..553a44803231 100644
--- a/Documentation/admin-guide/namespaces/resource-control.rst
+++ b/Documentation/admin-guide/namespaces/resource-control.rst
@@ -1,17 +1,17 @@
-===========================
-Namespaces research control
-===========================
+====================================
+User namespaces and resource control
+====================================
-There are a lot of kinds of objects in the kernel that don't have
-individual limits or that have limits that are ineffective when a set
-of processes is allowed to switch user ids. With user namespaces
-enabled in a kernel for people who don't trust their users or their
-users programs to play nice this problems becomes more acute.
+The kernel contains many kinds of objects that either don't have
+individual limits or that have limits which are ineffective when
+a set of processes is allowed to switch their UID. On a system
+where the admins don't trust their users or their users' programs,
+user namespaces expose the system to potential misuse of resources.
-Therefore it is recommended that memory control groups be enabled in
-kernels that enable user namespaces, and it is further recommended
-that userspace configure memory control groups to limit how much
-memory user's they don't trust to play nice can use.
+In order to mitigate this, we recommend that admins enable memory
+control groups on any system that enables user namespaces.
+Furthermore, we recommend that admins configure the memory control
+groups to limit the maximum memory usable by any untrusted user.
Memory control groups can be configured by installing the libcgroup
package present on most distros editing /etc/cgrules.conf,
diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst
index 3950583f2b15..2d74af7f0efe 100644
--- a/Documentation/admin-guide/pm/cpufreq.rst
+++ b/Documentation/admin-guide/pm/cpufreq.rst
@@ -231,7 +231,7 @@ are the following:
present).
The existence of the limit may be a result of some (often unintentional)
- BIOS settings, restrictions coming from a service processor or another
+ BIOS settings, restrictions coming from a service processor or other
BIOS/HW-based mechanisms.
This does not cover ACPI thermal limitations which can be discovered
@@ -258,8 +258,8 @@ are the following:
extension on ARM). If one cannot be determined, this attribute should
not be present.
- Note, that failed attempt to retrieve current frequency for a given
- CPU(s) will result in an appropriate error, i.e: EAGAIN for CPU that
+ Note that failed attempt to retrieve current frequency for a given
+ CPU(s) will result in an appropriate error, i.e.: EAGAIN for CPU that
remains idle (raised on ARM).
``cpuinfo_max_freq``
@@ -499,7 +499,7 @@ This governor exposes the following tunables:
represented by it to be 1.5 times as high as the transition latency
(the default)::
- # echo `$(($(cat cpuinfo_transition_latency) * 3 / 2)) > ondemand/sampling_rate
+ # echo `$(($(cat cpuinfo_transition_latency) * 3 / 2))` > ondemand/sampling_rate
``up_threshold``
If the estimated CPU load is above this value (in percent), the governor
diff --git a/Documentation/admin-guide/pm/intel_idle.rst b/Documentation/admin-guide/pm/intel_idle.rst
index 5940528146eb..ed6f055d4b14 100644
--- a/Documentation/admin-guide/pm/intel_idle.rst
+++ b/Documentation/admin-guide/pm/intel_idle.rst
@@ -38,6 +38,27 @@ instruction at all.
only way to pass early-configuration-time parameters to it is via the kernel
command line.
+Sysfs Interface
+===============
+
+The ``intel_idle`` driver exposes the following ``sysfs`` attributes in
+``/sys/devices/system/cpu/cpuidle/``:
+
+``intel_c1_demotion``
+ Enable or disable C1 demotion for all CPUs in the system. This file is
+ only exposed on platforms that support the C1 demotion feature and where
+ it was tested. Value 0 means that C1 demotion is disabled, value 1 means
+ that it is enabled. Write 0 or 1 to disable or enable C1 demotion for
+ all CPUs.
+
+ The C1 demotion feature involves the platform firmware demoting deep
+ C-state requests from the OS (e.g., C6 requests) to C1. The idea is that
+ firmware monitors CPU wake-up rate, and if it is higher than a
+ platform-specific threshold, the firmware demotes deep C-state requests
+ to C1. For example, Linux requests C6, but firmware noticed too many
+ wake-ups per second, and it keeps the CPU in C1. When the CPU stays in
+ C1 long enough, the platform promotes it back to C6. This may improve
+ some workloads' performance, but it may also increase power consumption.
.. _intel-idle-enumeration-of-states:
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index 78fc83ed2a7e..26e702c7016e 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -329,6 +329,106 @@ information listed above is the same for all of the processors supporting the
HWP feature, which is why ``intel_pstate`` works with all of them.]
+Support for Hybrid Processors
+=============================
+
+Some processors supported by ``intel_pstate`` contain two or more types of CPU
+cores differing by the maximum turbo P-state, performance vs power characteristics,
+cache sizes, and possibly other properties. They are commonly referred to as
+hybrid processors. To support them, ``intel_pstate`` requires HWP to be enabled
+and it assumes the HWP performance units to be the same for all CPUs in the
+system, so a given HWP performance level always represents approximately the
+same physical performance regardless of the core (CPU) type.
+
+Hybrid Processors with SMT
+--------------------------
+
+On systems where SMT (Simultaneous Multithreading), also referred to as
+HyperThreading (HT) in the context of Intel processors, is enabled on at least
+one core, ``intel_pstate`` assigns performance-based priorities to CPUs. Namely,
+the priority of a given CPU reflects its highest HWP performance level which
+causes the CPU scheduler to generally prefer more performant CPUs, so the less
+performant CPUs are used when the other ones are fully loaded. However, SMT
+siblings (that is, logical CPUs sharing one physical core) are treated in a
+special way such that if one of them is in use, the effective priority of the
+other ones is lowered below the priorities of the CPUs located in the other
+physical cores.
+
+This approach maximizes performance in the majority of cases, but unfortunately
+it also leads to excessive energy usage in some important scenarios, like video
+playback, which is not generally desirable. While there is no other viable
+choice with SMT enabled because the effective capacity and utilization of SMT
+siblings are hard to determine, hybrid processors without SMT can be handled in
+more energy-efficient ways.
+
+.. _CAS:
+
+Capacity-Aware Scheduling Support
+---------------------------------
+
+The capacity-aware scheduling (CAS) support in the CPU scheduler is enabled by
+``intel_pstate`` by default on hybrid processors without SMT. CAS generally
+causes the scheduler to put tasks on a CPU so long as there is a sufficient
+amount of spare capacity on it, and if the utilization of a given task is too
+high for it, the task will need to go somewhere else.
+
+Since CAS takes CPU capacities into account, it does not require CPU
+prioritization and it allows tasks to be distributed more symmetrically among
+the more performant and less performant CPUs. Once placed on a CPU with enough
+capacity to accommodate it, a task may just continue to run there regardless of
+whether or not the other CPUs are fully loaded, so on average CAS reduces the
+utilization of the more performant CPUs which causes the energy usage to be more
+balanced because the more performant CPUs are generally less energy-efficient
+than the less performant ones.
+
+In order to use CAS, the scheduler needs to know the capacity of each CPU in
+the system and it needs to be able to compute scale-invariant utilization of
+CPUs, so ``intel_pstate`` provides it with the requisite information.
+
+First of all, the capacity of each CPU is represented by the ratio of its highest
+HWP performance level, multiplied by 1024, to the highest HWP performance level
+of the most performant CPU in the system, which works because the HWP performance
+units are the same for all CPUs. Second, the frequency-invariance computations,
+carried out by the scheduler to always express CPU utilization in the same units
+regardless of the frequency it is currently running at, are adjusted to take the
+CPU capacity into account. All of this happens when ``intel_pstate`` has
+registered itself with the ``CPUFreq`` core and it has figured out that it is
+running on a hybrid processor without SMT.
+
+Energy-Aware Scheduling Support
+-------------------------------
+
+If ``CONFIG_ENERGY_MODEL`` has been set during kernel configuration and
+``intel_pstate`` runs on a hybrid processor without SMT, in addition to enabling
+`CAS <CAS_>`_ it registers an Energy Model for the processor. This allows the
+Energy-Aware Scheduling (EAS) support to be enabled in the CPU scheduler if
+``schedutil`` is used as the ``CPUFreq`` governor which requires ``intel_pstate``
+to operate in the `passive mode <Passive Mode_>`_.
+
+The Energy Model registered by ``intel_pstate`` is artificial (that is, it is
+based on abstract cost values and it does not include any real power numbers)
+and it is relatively simple to avoid unnecessary computations in the scheduler.
+There is a performance domain in it for every CPU in the system and the cost
+values for these performance domains have been chosen so that running a task on
+a less performant (small) CPU appears to be always cheaper than running that
+task on a more performant (big) CPU. However, for two CPUs of the same type,
+the cost difference depends on their current utilization, and the CPU whose
+current utilization is higher generally appears to be a more expensive
+destination for a given task. This helps to balance the load among CPUs of the
+same type.
+
+Since EAS works on top of CAS, high-utilization tasks are always migrated to
+CPUs with enough capacity to accommodate them, but thanks to EAS, low-utilization
+tasks tend to be placed on the CPUs that look less expensive to the scheduler.
+Effectively, this causes the less performant and less loaded CPUs to be
+preferred as long as they have enough spare capacity to run the given task
+which generally leads to reduced energy usage.
+
+The Energy Model created by ``intel_pstate`` can be inspected by looking at
+the ``energy_model`` directory in ``debugfs`` (typlically mounted on
+``/sys/kernel/debug/``).
+
+
User Space Interface in ``sysfs``
=================================
@@ -697,8 +797,8 @@ of them have to be prepended with the ``intel_pstate=`` prefix.
Limits`_ for details).
``no_cas``
- Do not enable capacity-aware scheduling (CAS) which is enabled by
- default on hybrid systems.
+ Do not enable `capacity-aware scheduling <CAS_>`_ which is enabled by
+ default on hybrid systems without SMT.
Diagnostics and Tuning
======================
diff --git a/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst b/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst
index 5151ec312dc0..d367ba4d744a 100644
--- a/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst
+++ b/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst
@@ -91,12 +91,22 @@ Attributes in each directory:
``domain_id``
This attribute is used to get the power domain id of this instance.
+``die_id``
+ This attribute is used to get the Linux die id of this instance.
+ This attribute is only present for domains with core agents and
+ when the CPUID leaf 0x1f presents die ID.
+
``fabric_cluster_id``
This attribute is used to get the fabric cluster id of this instance.
``package_id``
This attribute is used to get the package id of this instance.
+``agent_types``
+ This attribute displays all the hardware agents present within the
+ domain. Each agent has the capability to control one or more hardware
+ subsystems, which include: core, cache, memory, and I/O.
+
The other attributes are same as presented at package_*_die_* level.
In most of current use cases, the "max_freq_khz" and "min_freq_khz"
diff --git a/Documentation/admin-guide/quickly-build-trimmed-linux.rst b/Documentation/admin-guide/quickly-build-trimmed-linux.rst
index 07cfd8863b46..4a5ffb0996a3 100644
--- a/Documentation/admin-guide/quickly-build-trimmed-linux.rst
+++ b/Documentation/admin-guide/quickly-build-trimmed-linux.rst
@@ -347,7 +347,7 @@ again.
[:ref:`details<uninstall>`]
-.. _submit_improvements:
+.. _submit_improvements_qbtl:
Did you run into trouble following any of the above steps that is not cleared up
by the reference section below? Or do you have ideas how to improve the text?
@@ -1070,7 +1070,7 @@ complicated, and harder to follow.
That being said: this of course is a balancing act. Hence, if you think an
additional use-case is worth describing, suggest it to the maintainers of this
-document, as :ref:`described above <submit_improvements>`.
+document, as :ref:`described above <submit_improvements_qbtl>`.
..
diff --git a/Documentation/admin-guide/reporting-issues.rst b/Documentation/admin-guide/reporting-issues.rst
index 2fd5a030235a..9a847506f6ec 100644
--- a/Documentation/admin-guide/reporting-issues.rst
+++ b/Documentation/admin-guide/reporting-issues.rst
@@ -41,7 +41,7 @@ If you are facing multiple issues with the Linux kernel at once, report each
separately. While writing your report, include all information relevant to the
issue, like the kernel and the distro used. In case of a regression, CC the
regressions mailing list (regressions@lists.linux.dev) to your report. Also try
-to pin-point the culprit with a bisection; if you succeed, include its
+to pinpoint the culprit with a bisection; if you succeed, include its
commit-id and CC everyone in the sign-off-by chain.
Once the report is out, answer any questions that come up and help where you
@@ -206,7 +206,7 @@ Reporting issues only occurring in older kernel version lines
This subsection is for you, if you tried the latest mainline kernel as outlined
above, but failed to reproduce your issue there; at the same time you want to
see the issue fixed in a still supported stable or longterm series or vendor
-kernels regularly rebased on those. If that the case, follow these steps:
+kernels regularly rebased on those. If that is the case, follow these steps:
* Prepare yourself for the possibility that going through the next few steps
might not get the issue solved in older releases: the fix might be too big
@@ -312,7 +312,7 @@ small modifications to a kernel based on a recent Linux version; that for
example often holds true for the mainline kernels shipped by Debian GNU/Linux
Sid or Fedora Rawhide. Some developers will also accept reports about issues
with kernels from distributions shipping the latest stable kernel, as long as
-its only slightly modified; that for example is often the case for Arch Linux,
+it's only slightly modified; that for example is often the case for Arch Linux,
regular Fedora releases, and openSUSE Tumbleweed. But keep in mind, you better
want to use a mainline Linux and avoid using a stable kernel for this
process, as outlined in the section 'Install a fresh kernel for testing' in more
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index 8290177b4f75..9bef46151d53 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -75,6 +75,7 @@ Currently, these files are in /proc/sys/vm:
- unprivileged_userfaultfd
- user_reserve_kbytes
- vfs_cache_pressure
+- vfs_cache_pressure_denom
- watermark_boost_factor
- watermark_scale_factor
- zone_reclaim_mode
@@ -131,6 +132,12 @@ to latency spikes in unsuspecting applications. The kernel employs
various heuristics to avoid wasting CPU cycles if it detects that
proactive compaction is not being effective.
+Setting the value above 80 will, in addition to lowering the acceptable level
+of fragmentation, make the compaction code more sensitive to increases in
+fragmentation, i.e. compaction will trigger more often, but reduce
+fragmentation by a smaller amount.
+This makes the fragmentation level more stable over time.
+
Be careful when setting it to extreme values like 100, as that may
cause excessive background compaction activity.
@@ -1017,19 +1024,28 @@ vfs_cache_pressure
This percentage value controls the tendency of the kernel to reclaim
the memory which is used for caching of directory and inode objects.
-At the default value of vfs_cache_pressure=100 the kernel will attempt to
-reclaim dentries and inodes at a "fair" rate with respect to pagecache and
-swapcache reclaim. Decreasing vfs_cache_pressure causes the kernel to prefer
-to retain dentry and inode caches. When vfs_cache_pressure=0, the kernel will
-never reclaim dentries and inodes due to memory pressure and this can easily
-lead to out-of-memory conditions. Increasing vfs_cache_pressure beyond 100
-causes the kernel to prefer to reclaim dentries and inodes.
+At the default value of vfs_cache_pressure=vfs_cache_pressure_denom the kernel
+will attempt to reclaim dentries and inodes at a "fair" rate with respect to
+pagecache and swapcache reclaim. Decreasing vfs_cache_pressure causes the
+kernel to prefer to retain dentry and inode caches. When vfs_cache_pressure=0,
+the kernel will never reclaim dentries and inodes due to memory pressure and
+this can easily lead to out-of-memory conditions. Increasing vfs_cache_pressure
+beyond vfs_cache_pressure_denom causes the kernel to prefer to reclaim dentries
+and inodes.
+
+Increasing vfs_cache_pressure significantly beyond vfs_cache_pressure_denom may
+have negative performance impact. Reclaim code needs to take various locks to
+find freeable directory and inode objects. When vfs_cache_pressure equals
+(10 * vfs_cache_pressure_denom), it will look for ten times more freeable
+objects than there are.
-Increasing vfs_cache_pressure significantly beyond 100 may have negative
-performance impact. Reclaim code needs to take various locks to find freeable
-directory and inode objects. With vfs_cache_pressure=1000, it will look for
-ten times more freeable objects than there are.
+Note: This setting should always be used together with vfs_cache_pressure_denom.
+
+vfs_cache_pressure_denom
+========================
+Defaults to 100 (minimum allowed value). Requires corresponding
+vfs_cache_pressure setting to take effect.
watermark_boost_factor
======================
diff --git a/Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst b/Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst
index 03c55151346c..d8946b084b1e 100644
--- a/Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst
+++ b/Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst
@@ -267,7 +267,7 @@ culprit might be known already. For further details on what actually qualifies
as a regression check out Documentation/admin-guide/reporting-regressions.rst.
If you run into any problems while following this guide or have ideas how to
-improve it, :ref:`please let the kernel developers know <submit_improvements>`.
+improve it, :ref:`please let the kernel developers know <submit_improvements_vbbr>`.
.. _introprep_bissbs:
@@ -1055,7 +1055,7 @@ follow these instructions.
[:ref:`details <introoptional_bisref>`]
-.. _submit_improvements:
+.. _submit_improvements_vbbr:
Conclusion
----------
diff --git a/Documentation/admin-guide/xfs.rst b/Documentation/admin-guide/xfs.rst
index b67772cf36d6..a18328a5fb93 100644
--- a/Documentation/admin-guide/xfs.rst
+++ b/Documentation/admin-guide/xfs.rst
@@ -124,6 +124,14 @@ When mounting an XFS filesystem, the following options are accepted.
controls the size of each buffer and so is also relevant to
this case.
+ lifetime (default) or nolifetime
+ Enable data placement based on write life time hints provided
+ by the user. This turns on co-allocation of data of similar
+ life times when statistically favorable to reduce garbage
+ collection cost.
+
+ These options are only available for zoned rt file systems.
+
logbsize=value
Set the size of each in-memory log buffer. The size may be
specified in bytes, or in kilobytes with a "k" suffix.
@@ -143,6 +151,25 @@ When mounting an XFS filesystem, the following options are accepted.
optional, and the log section can be separate from the data
section or contained within it.
+ max_atomic_write=value
+ Set the maximum size of an atomic write. The size may be
+ specified in bytes, in kilobytes with a "k" suffix, in megabytes
+ with a "m" suffix, or in gigabytes with a "g" suffix. The size
+ cannot be larger than the maximum write size, larger than the
+ size of any allocation group, or larger than the size of a
+ remapping operation that the log can complete atomically.
+
+ The default value is to set the maximum I/O completion size
+ to allow each CPU to handle one at a time.
+
+ max_open_zones=value
+ Specify the max number of zones to keep open for writing on a
+ zoned rt device. Many open zones aids file data separation
+ but may impact performance on HDDs.
+
+ If ``max_open_zones`` is not specified, the value is determined
+ by the capabilities and the size of the zoned rt device.
+
noalign
Data allocations will not be aligned at stripe unit
boundaries. This is only relevant to filesystems created
@@ -542,3 +569,24 @@ The interesting knobs for XFS workqueues are as follows:
nice Relative priority of scheduling the threads. These are the
same nice levels that can be applied to userspace processes.
============ ===========
+
+Zoned Filesystems
+=================
+
+For zoned file systems, the following attributes are exposed in:
+
+ /sys/fs/xfs/<dev>/zoned/
+
+ max_open_zones (Min: 1 Default: Varies Max: UINTMAX)
+ This read-only attribute exposes the maximum number of open zones
+ available for data placement. The value is determined at mount time and
+ is limited by the capabilities of the backing zoned device, file system
+ size and the max_open_zones mount option.
+
+ zonegc_low_space (Min: 0 Default: 0 Max: 100)
+ Define a percentage for how much of the unused space that GC should keep
+ available for writing. A high value will reclaim more of the space
+ occupied by unused blocks, creating a larger buffer against write
+ bursts at the cost of increased write amplification. Regardless
+ of this value, garbage collection will always aim to free a minimum
+ amount of blocks to keep max_open_zones open for data placement purposes.
diff --git a/Documentation/arch/arm64/cpu-feature-registers.rst b/Documentation/arch/arm64/cpu-feature-registers.rst
index 253e9743de2f..add66afc7b03 100644
--- a/Documentation/arch/arm64/cpu-feature-registers.rst
+++ b/Documentation/arch/arm64/cpu-feature-registers.rst
@@ -72,14 +72,15 @@ there are some issues with their usage.
process could be migrated to another CPU by the time it uses the
register value, unless the CPU affinity is set. Hence, there is no
guarantee that the value reflects the processor that it is
- currently executing on. The REVIDR is not exposed due to this
- constraint, as REVIDR makes sense only in conjunction with the
- MIDR. Alternately, MIDR_EL1 and REVIDR_EL1 are exposed via sysfs
- at::
+ currently executing on. REVIDR and AIDR are not exposed due to this
+ constraint, as these registers only make sense in conjunction with
+ the MIDR. Alternately, MIDR_EL1, REVIDR_EL1, and AIDR_EL1 are exposed
+ via sysfs at::
/sys/devices/system/cpu/cpu$ID/regs/identification/
- \- midr
- \- revidr
+ \- midr_el1
+ \- revidr_el1
+ \- aidr_el1
3. Implementation
--------------------
diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
index f968c13b46a7..b18ef4064bc0 100644
--- a/Documentation/arch/arm64/silicon-errata.rst
+++ b/Documentation/arch/arm64/silicon-errata.rst
@@ -57,6 +57,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Ampere | AmpereOne AC04 | AC04_CPU_10 | AMPERE_ERRATUM_AC03_CPU_38 |
+----------------+-----------------+-----------------+-----------------------------+
+| Ampere | AmpereOne AC04 | AC04_CPU_23 | AMPERE_ERRATUM_AC04_CPU_23 |
++----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
+----------------+-----------------+-----------------+-----------------------------+
diff --git a/Documentation/arch/arm64/sme.rst b/Documentation/arch/arm64/sme.rst
index b2fa01f85cb5..4cb38330e704 100644
--- a/Documentation/arch/arm64/sme.rst
+++ b/Documentation/arch/arm64/sme.rst
@@ -69,8 +69,8 @@ model features for SME is included in Appendix A.
vectors from 0 to VL/8-1 stored in the same endianness invariant format as is
used for SVE vectors.
-* On thread creation TPIDR2_EL0 is preserved unless CLONE_SETTLS is specified,
- in which case it is set to 0.
+* On thread creation PSTATE.ZA and TPIDR2_EL0 are preserved unless CLONE_VM
+ is specified, in which case PSTATE.ZA is set to 0 and TPIDR2_EL0 is set to 0.
2. Vector lengths
------------------
@@ -115,7 +115,7 @@ be zeroed.
5. Signal handling
-------------------
-* Signal handlers are invoked with streaming mode and ZA disabled.
+* Signal handlers are invoked with PSTATE.SM=0, PSTATE.ZA=0, and TPIDR2_EL0=0.
* A new signal frame record TPIDR2_MAGIC is added formatted as a struct
tpidr2_context to allow access to TPIDR2_EL0 from signal handlers.
@@ -241,7 +241,7 @@ prctl(PR_SME_SET_VL, unsigned long arg)
length, or calling PR_SME_SET_VL with the PR_SME_SET_VL_ONEXEC flag,
does not constitute a change to the vector length for this purpose.
- * Changing the vector length causes PSTATE.ZA and PSTATE.SM to be cleared.
+ * Changing the vector length causes PSTATE.ZA to be cleared.
Calling PR_SME_SET_VL with vl equal to the thread's current vector
length, or calling PR_SME_SET_VL with the PR_SME_SET_VL_ONEXEC flag,
does not constitute a change to the vector length for this purpose.
diff --git a/Documentation/arch/openrisc/openrisc_port.rst b/Documentation/arch/openrisc/openrisc_port.rst
index 1565b9546e38..60b0a9e51d70 100644
--- a/Documentation/arch/openrisc/openrisc_port.rst
+++ b/Documentation/arch/openrisc/openrisc_port.rst
@@ -7,10 +7,10 @@ target architecture, specifically, is the 32-bit OpenRISC 1000 family (or1k).
For information about OpenRISC processors and ongoing development:
- ======= =============================
+ ======= ==============================
website https://openrisc.io
- email openrisc@lists.librecores.org
- ======= =============================
+ email linux-openrisc@vger.kernel.org
+ ======= ==============================
---------------------------------------------------------------------
@@ -27,11 +27,11 @@ Toolchain binaries can be obtained from openrisc.io or our github releases page.
Instructions for building the different toolchains can be found on openrisc.io
or Stafford's toolchain build and release scripts.
- ========== =================================================
- binaries https://github.com/openrisc/or1k-gcc/releases
+ ========== ==========================================================
+ binaries https://github.com/stffrdhrn/or1k-toolchain-build/releases
toolchains https://openrisc.io/software
building https://github.com/stffrdhrn/or1k-toolchain-build
- ========== =================================================
+ ========== ==========================================================
2) Building
@@ -40,6 +40,12 @@ Build the Linux kernel as usual::
make ARCH=openrisc CROSS_COMPILE="or1k-linux-" defconfig
make ARCH=openrisc CROSS_COMPILE="or1k-linux-"
+If you want to embed initramfs in the kernel, also pass ``CONFIG_INITRAMFS_SOURCE``. For example::
+
+ make ARCH=openrisc CROSS_COMPILE="or1k-linux-" CONFIG_INITRAMFS_SOURCE="path/to/rootfs path/to/devnodes"
+
+For more information on this, please check Documentation/filesystems/ramfs-rootfs-initramfs.rst.
+
3) Running on FPGA (optional)
The OpenRISC community typically uses FuseSoC to manage building and programming
diff --git a/Documentation/arch/powerpc/htm.rst b/Documentation/arch/powerpc/htm.rst
new file mode 100644
index 000000000000..fcb4eb6306b1
--- /dev/null
+++ b/Documentation/arch/powerpc/htm.rst
@@ -0,0 +1,104 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. _htm:
+
+===================================
+HTM (Hardware Trace Macro)
+===================================
+
+Athira Rajeev, 2 Mar 2025
+
+.. contents::
+ :depth: 3
+
+
+Basic overview
+==============
+
+H_HTM is used as an interface for executing Hardware Trace Macro (HTM)
+functions, including setup, configuration, control and dumping of the HTM data.
+For using HTM, it is required to setup HTM buffers and HTM operations can
+be controlled using the H_HTM hcall. The hcall can be invoked for any core/chip
+of the system from within a partition itself. To use this feature, a debugfs
+folder called "htmdump" is present under /sys/kernel/debug/powerpc.
+
+
+HTM debugfs example usage
+=========================
+
+.. code-block:: sh
+
+ # ls /sys/kernel/debug/powerpc/htmdump/
+ coreindexonchip htmcaps htmconfigure htmflags htminfo htmsetup
+ htmstart htmstatus htmtype nodalchipindex nodeindex trace
+
+Details on each file:
+
+* nodeindex, nodalchipindex, coreindexonchip specifies which partition to configure the HTM for.
+* htmtype: specifies the type of HTM. Supported target is hardwareTarget.
+* trace: is to read the HTM data.
+* htmconfigure: Configure/Deconfigure the HTM. Writing 1 to the file will configure the trace, writing 0 to the file will do deconfigure.
+* htmstart: start/Stop the HTM. Writing 1 to the file will start the tracing, writing 0 to the file will stop the tracing.
+* htmstatus: get the status of HTM. This is needed to understand the HTM state after each operation.
+* htmsetup: set the HTM buffer size. Size of HTM buffer is in power of 2
+* htminfo: provides the system processor configuration details. This is needed to understand the appropriate values for nodeindex, nodalchipindex, coreindexonchip.
+* htmcaps : provides the HTM capabilities like minimum/maximum buffer size, what kind of tracing the HTM supports etc.
+* htmflags : allows to pass flags to hcall. Currently supports controlling the wrapping of HTM buffer.
+
+To see the system processor configuration details:
+
+.. code-block:: sh
+
+ # cat /sys/kernel/debug/powerpc/htmdump/htminfo > htminfo_file
+
+The result can be interpreted using hexdump.
+
+To collect HTM traces for a partition represented by nodeindex as
+zero, nodalchipindex as 1 and coreindexonchip as 12
+
+.. code-block:: sh
+
+ # cd /sys/kernel/debug/powerpc/htmdump/
+ # echo 2 > htmtype
+ # echo 33 > htmsetup ( sets 8GB memory for HTM buffer, number is size in power of 2 )
+
+This requires a CEC reboot to get the HTM buffers allocated.
+
+.. code-block:: sh
+
+ # cd /sys/kernel/debug/powerpc/htmdump/
+ # echo 2 > htmtype
+ # echo 0 > nodeindex
+ # echo 1 > nodalchipindex
+ # echo 12 > coreindexonchip
+ # echo 1 > htmflags # to set noWrap for HTM buffers
+ # echo 1 > htmconfigure # Configure the HTM
+ # echo 1 > htmstart # Start the HTM
+ # echo 0 > htmstart # Stop the HTM
+ # echo 0 > htmconfigure # Deconfigure the HTM
+ # cat htmstatus # Dump the status of HTM entries as data
+
+Above will set the htmtype and core details, followed by executing respective HTM operation.
+
+Read the HTM trace data
+========================
+
+After starting the trace collection, run the workload
+of interest. Stop the trace collection after required period
+of time, and read the trace file.
+
+.. code-block:: sh
+
+ # cat /sys/kernel/debug/powerpc/htmdump/trace > trace_file
+
+This trace file will contain the relevant instruction traces
+collected during the workload execution. And can be used as
+input file for trace decoders to understand data.
+
+Benefits of using HTM debugfs interface
+=======================================
+
+It is now possible to collect traces for a particular core/chip
+from within any partition of the system and decode it. Through
+this enablement, a small partition can be dedicated to collect the
+trace data and analyze to provide important information for Performance
+analysis, Software tuning, or Hardware debug.
diff --git a/Documentation/arch/powerpc/kvm-nested.rst b/Documentation/arch/powerpc/kvm-nested.rst
index 5defd13cc6c1..574592505604 100644
--- a/Documentation/arch/powerpc/kvm-nested.rst
+++ b/Documentation/arch/powerpc/kvm-nested.rst
@@ -208,13 +208,9 @@ associated values for each ID in the GSB::
flags:
Bit 0: getGuestWideState: Request state of the Guest instead
of an individual VCPU.
- Bit 1: takeOwnershipOfVcpuState Indicate the L1 is taking
- over ownership of the VCPU state and that the L0 can free
- the storage holding the state. The VCPU state will need to
- be returned to the Hypervisor via H_GUEST_SET_STATE prior
- to H_GUEST_RUN_VCPU being called for this VCPU. The data
- returned in the dataBuffer is in a Hypervisor internal
- format.
+ Bit 1: getHostWideState: Request stats of the Host. This causes
+ the guestId and vcpuId parameters to be ignored and attempting
+ to get the VCPU/Guest state will cause an error.
Bits 2-63: Reserved
guestId: ID obtained from H_GUEST_CREATE
vcpuId: ID of the vCPU pass to H_GUEST_CREATE_VCPU
@@ -406,9 +402,10 @@ the partition like the timebase offset and partition scoped page
table information.
+--------+-------+----+--------+----------------------------------+
-| ID | Size | RW | Thread | Details |
-| | Bytes | | Guest | |
-| | | | Scope | |
+| ID | Size | RW |(H)ost | Details |
+| | Bytes | |(G)uest | |
+| | | |(T)hread| |
+| | | |Scope | |
+========+=======+====+========+==================================+
| 0x0000 | | RW | TG | NOP element |
+--------+-------+----+--------+----------------------------------+
@@ -434,6 +431,29 @@ table information.
| | | | |- 0x8 Table size. |
+--------+-------+----+--------+----------------------------------+
| 0x0007-| | | | Reserved |
+| 0x07FF | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x0800 | 0x08 | R | H | Current usage in bytes of the |
+| | | | | L0's Guest Management Space |
+| | | | | for an L1-Lpar. |
++--------+-------+----+--------+----------------------------------+
+| 0x0801 | 0x08 | R | H | Max bytes available in the |
+| | | | | L0's Guest Management Space for |
+| | | | | an L1-Lpar |
++--------+-------+----+--------+----------------------------------+
+| 0x0802 | 0x08 | R | H | Current usage in bytes of the |
+| | | | | L0's Guest Page Table Management |
+| | | | | Space for an L1-Lpar |
++--------+-------+----+--------+----------------------------------+
+| 0x0803 | 0x08 | R | H | Max bytes available in the L0's |
+| | | | | Guest Page Table Management |
+| | | | | Space for an L1-Lpar |
++--------+-------+----+--------+----------------------------------+
+| 0x0804 | 0x08 | R | H | Cumulative Reclaimed bytes from |
+| | | | | L0 Guest's Page Table Management |
+| | | | | Space due to overcommit |
++--------+-------+----+--------+----------------------------------+
+| 0x0805-| | | | Reserved |
| 0x0BFF | | | | |
+--------+-------+----+--------+----------------------------------+
| 0x0C00 | 0x10 | RW | T |Run vCPU Input Buffer: |
diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst
index 53607d962653..f60bf5991755 100644
--- a/Documentation/arch/riscv/hwprobe.rst
+++ b/Documentation/arch/riscv/hwprobe.rst
@@ -51,7 +51,7 @@ The following keys are defined:
* :c:macro:`RISCV_HWPROBE_KEY_MARCHID`: Contains the value of ``marchid``, as
defined by the RISC-V privileged architecture specification.
-* :c:macro:`RISCV_HWPROBE_KEY_MIMPLID`: Contains the value of ``mimplid``, as
+* :c:macro:`RISCV_HWPROBE_KEY_MIMPID`: Contains the value of ``mimpid``, as
defined by the RISC-V privileged architecture specification.
* :c:macro:`RISCV_HWPROBE_KEY_BASE_BEHAVIOR`: A bitmask containing the base
diff --git a/Documentation/arch/x86/amd-debugging.rst b/Documentation/arch/x86/amd-debugging.rst
new file mode 100644
index 000000000000..d92bf59d62c7
--- /dev/null
+++ b/Documentation/arch/x86/amd-debugging.rst
@@ -0,0 +1,368 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Debugging AMD Zen systems
++++++++++++++++++++++++++
+
+Introduction
+============
+
+This document describes techniques that are useful for debugging issues with
+AMD Zen systems. It is intended for use by developers and technical users
+to help identify and resolve issues.
+
+S3 vs s2idle
+============
+
+On AMD systems, it's not possible to simultaneously support suspend-to-RAM (S3)
+and suspend-to-idle (s2idle). To confirm which mode your system supports you
+can look at ``cat /sys/power/mem_sleep``. If it shows ``s2idle [deep]`` then
+*S3* is supported. If it shows ``[s2idle]`` then *s2idle* is
+supported.
+
+On systems that support *S3*, the firmware will be utilized to put all hardware into
+the appropriate low power state.
+
+On systems that support *s2idle*, the kernel will be responsible for transitioning devices
+into the appropriate low power state. When all devices are in the appropriate low
+power state, the hardware will transition into a hardware sleep state.
+
+After a suspend cycle you can tell how much time was spent in a hardware sleep
+state by looking at ``cat /sys/power/suspend_stats/last_hw_sleep``.
+
+This flowchart explains how the AMD s2idle suspend flow works.
+
+.. kernel-figure:: suspend.svg
+
+This flowchart explains how the amd s2idle resume flow works.
+
+.. kernel-figure:: resume.svg
+
+s2idle debugging tool
+=====================
+
+As there are a lot of places that problems can occur, a debugging tool has been
+created at
+`amd-debug-tools <https://git.kernel.org/pub/scm/linux/kernel/git/superm1/amd-debug-tools.git/about/>`_
+that can help test for common problems and offer suggestions.
+
+If you have an s2idle issue, it's best to start with this and follow instructions
+from its findings. If you continue to have an issue, raise a bug with the
+report generated from this script to
+`drm/amd gitlab <https://gitlab.freedesktop.org/drm/amd/-/issues/new?issuable_template=s2idle_BUG_TEMPLATE>`_.
+
+Spurious s2idle wakeups from an IRQ
+===================================
+
+Spurious wakeups will generally have an IRQ set to ``/sys/power/pm_wakeup_irq``.
+This can be matched to ``/proc/interrupts`` to determine what device woke the system.
+
+If this isn't enough to debug the problem, then the following sysfs files
+can be set to add more verbosity to the wakeup process: ::
+
+ # echo 1 | sudo tee /sys/power/pm_debug_messages
+ # echo 1 | sudo tee /sys/power/pm_print_times
+
+After making those changes, the kernel will display messages that can
+be traced back to kernel s2idle loop code as well as display any active
+GPIO sources while waking up.
+
+If the wakeup is caused by the ACPI SCI, additional ACPI debugging may be
+needed. These commands can enable additional trace data: ::
+
+ # echo enable | sudo tee /sys/module/acpi/parameters/trace_state
+ # echo 1 | sudo tee /sys/module/acpi/parameters/aml_debug_output
+ # echo 0x0800000f | sudo tee /sys/module/acpi/parameters/debug_level
+ # echo 0xffff0000 | sudo tee /sys/module/acpi/parameters/debug_layer
+
+Spurious s2idle wakeups from a GPIO
+===================================
+
+If a GPIO is active when waking up the system ideally you would look at the
+schematic to determine what device it is associated with. If the schematic
+is not available, another tactic is to look at the ACPI _EVT() entry
+to determine what device is notified when that GPIO is active.
+
+For a hypothetical example, say that GPIO 59 woke up the system. You can
+look at the SSDT to determine what device is notified when GPIO 59 is active.
+
+First convert the GPIO number into hex. ::
+
+ $ python3 -c "print(hex(59))"
+ 0x3b
+
+Next determine which ACPI table has the ``_EVT`` entry. For example: ::
+
+ $ sudo grep EVT /sys/firmware/acpi/tables/SSDT*
+ grep: /sys/firmware/acpi/tables/SSDT27: binary file matches
+
+Decode this table::
+
+ $ sudo cp /sys/firmware/acpi/tables/SSDT27 .
+ $ sudo iasl -d SSDT27
+
+Then look at the table and find the matching entry for GPIO 0x3b. ::
+
+ Case (0x3B)
+ {
+ M000 (0x393B)
+ M460 (" Notify (\\_SB.PCI0.GP17.XHC1, 0x02)\n", Zero, Zero, Zero, Zero, Zero, Zero)
+ Notify (\_SB.PCI0.GP17.XHC1, 0x02) // Device Wake
+ }
+
+You can see in this case that the device ``\_SB.PCI0.GP17.XHC1`` is notified
+when GPIO 59 is active. It's obvious this is an XHCI controller, but to go a
+step further you can figure out which XHCI controller it is by matching it to
+ACPI.::
+
+ $ grep "PCI0.GP17.XHC1" /sys/bus/acpi/devices/*/path
+ /sys/bus/acpi/devices/device:2d/path:\_SB_.PCI0.GP17.XHC1
+ /sys/bus/acpi/devices/device:2e/path:\_SB_.PCI0.GP17.XHC1.RHUB
+ /sys/bus/acpi/devices/device:2f/path:\_SB_.PCI0.GP17.XHC1.RHUB.PRT1
+ /sys/bus/acpi/devices/device:30/path:\_SB_.PCI0.GP17.XHC1.RHUB.PRT1.CAM0
+ /sys/bus/acpi/devices/device:31/path:\_SB_.PCI0.GP17.XHC1.RHUB.PRT1.CAM1
+ /sys/bus/acpi/devices/device:32/path:\_SB_.PCI0.GP17.XHC1.RHUB.PRT2
+ /sys/bus/acpi/devices/LNXPOWER:0d/path:\_SB_.PCI0.GP17.XHC1.PWRS
+
+Here you can see it matches to ``device:2d``. Look at the ``physical_node``
+to determine what PCI device that actually is. ::
+
+ $ ls -l /sys/bus/acpi/devices/device:2d/physical_node
+ lrwxrwxrwx 1 root root 0 Feb 12 13:22 /sys/bus/acpi/devices/device:2d/physical_node -> ../../../../../pci0000:00/0000:00:08.1/0000:c2:00.4
+
+So there you have it: the PCI device associated with this GPIO wakeup was ``0000:c2:00.4``.
+
+The ``amd_s2idle.py`` script will capture most of these artifacts for you.
+
+s2idle PM debug messages
+========================
+
+During the s2idle flow on AMD systems, the ACPI LPS0 driver is responsible
+to check all uPEP constraints. Failing uPEP constraints does not prevent
+s0i3 entry. This means that if some constraints are not met, it is possible
+the kernel may attempt to enter s2idle even if there are some known issues.
+
+To activate PM debugging, either specify ``pm_debug_messagess`` kernel
+command-line option at boot or write to ``/sys/power/pm_debug_messages``.
+Unmet constraints will be displayed in the kernel log and can be
+viewed by logging tools that process kernel ring buffer like ``dmesg`` or
+``journalctl``."
+
+If the system freezes on entry/exit before these messages are flushed, a
+useful debugging tactic is to unbind the ``amd_pmc`` driver to prevent
+notification to the platform to start s0i3 entry. This will stop the
+system from freezing on entry or exit and let you view all the failed
+constraints. ::
+
+ cd /sys/bus/platform/drivers/amd_pmc
+ ls | grep AMD | sudo tee unbind
+
+After doing this, run the suspend cycle and look specifically for errors around: ::
+
+ ACPI: LPI: Constraint not met; min power state:%s current power state:%s
+
+Historical examples of s2idle issues
+====================================
+
+To help understand the types of issues that can occur and how to debug them,
+here are some historical examples of s2idle issues that have been resolved.
+
+Core offlining
+--------------
+An end user had reported that taking a core offline would prevent the system
+from properly entering s0i3. This was debugged using internal AMD tools
+to capture and display a stream of metrics from the hardware showing what changed
+when a core was offlined. It was determined that the hardware didn't get
+notification the offline cores were in the deepest state, and so it prevented
+CPU from going into the deepest state. The issue was debugged to a missing
+command to put cores into C3 upon offline.
+
+`commit d6b88ce2eb9d2 ("ACPI: processor idle: Allow playing dead in C3 state") <https://git.kernel.org/torvalds/c/d6b88ce2eb9d2>`_
+
+Corruption after resume
+-----------------------
+A big problem that occurred with Rembrandt was that there was graphical
+corruption after resume. This happened because of a misalignment of PSP
+and driver responsibility. The PSP will save and restore DMCUB, but the
+driver assumed it needed to reset DMCUB on resume.
+This actually was a misalignment for earlier silicon as well, but was not
+observed.
+
+`commit 79d6b9351f086 ("drm/amd/display: Don't reinitialize DMCUB on s0ix resume") <https://git.kernel.org/torvalds/c/79d6b9351f086>`_
+
+Back to Back suspends fail
+--------------------------
+When using a wakeup source that triggers the IRQ to wakeup, a bug in the
+pinctrl-amd driver may capture the wrong state of the IRQ and prevent the
+system going back to sleep properly.
+
+`commit b8c824a869f22 ("pinctrl: amd: Don't save/restore interrupt status and wake status bits") <https://git.kernel.org/torvalds/c/b8c824a869f22>`_
+
+Spurious timer based wakeup after 5 minutes
+-------------------------------------------
+The HPET was being used to program the wakeup source for the system, however
+this was causing a spurious wakeup after 5 minutes. The correct alarm to use
+was the ACPI alarm.
+
+`commit 3d762e21d5637 ("rtc: cmos: Use ACPI alarm for non-Intel x86 systems too") <https://git.kernel.org/torvalds/c/3d762e21d5637>`_
+
+Disk disappears after resume
+----------------------------
+After resuming from s2idle, the NVME disk would disappear. This was due to the
+BIOS not specifying the _DSD StorageD3Enable property. This caused the NVME
+driver not to put the disk into the expected state at suspend and to fail
+on resume.
+
+`commit e79a10652bbd3 ("ACPI: x86: Force StorageD3Enable on more products") <https://git.kernel.org/torvalds/c/e79a10652bbd3>`_
+
+Spurious IRQ1
+-------------
+A number of Renoir, Lucienne, Cezanne, & Barcelo platforms have a
+platform firmware bug where IRQ1 is triggered during s0i3 resume.
+
+This was fixed in the platform firmware, but a number of systems didn't
+receive any more platform firmware updates.
+
+`commit 8e60615e89321 ("platform/x86/amd: pmc: Disable IRQ1 wakeup for RN/CZN") <https://git.kernel.org/torvalds/c/8e60615e89321>`_
+
+Hardware timeout
+----------------
+The hardware performs many actions besides accepting the values from
+amd-pmc driver. As the communication path with the hardware is a mailbox,
+it's possible that it might not respond quickly enough.
+This issue manifested as a failure to suspend: ::
+
+ PM: dpm_run_callback(): acpi_subsys_suspend_noirq+0x0/0x50 returns -110
+ amd_pmc AMDI0005:00: PM: failed to suspend noirq: error -110
+
+The timing problem was identified by comparing the values of the idle mask.
+
+`commit 3c3c8e88c8712 ("platform/x86: amd-pmc: Increase the response register timeout") <https://git.kernel.org/torvalds/c/3c3c8e88c8712>`_
+
+Failed to reach hardware sleep state with panel on
+--------------------------------------------------
+On some Strix systems certain panels were observed to block the system from
+entering a hardware sleep state if the internal panel was on during the sequence.
+
+Even though the panel got turned off during suspend it exposed a timing problem
+where an interrupt caused the display hardware to wake up and block low power
+state entry.
+
+`commit 40b8c14936bd2 ("drm/amd/display: Disable unneeded hpd interrupts during dm_init") <https://git.kernel.org/torvalds/c/40b8c14936bd2>`_
+
+Runtime power consumption issues
+================================
+
+Runtime power consumption is influenced by many factors, including but not
+limited to the configuration of the PCIe Active State Power Management (ASPM),
+the display brightness, the EPP policy of the CPU, and the power management
+of the devices.
+
+ASPM
+----
+For the best runtime power consumption, ASPM should be programmed as intended
+by the BIOS from the hardware vendor. To accomplish this the Linux kernel
+should be compiled with ``CONFIG_PCIEASPM_DEFAULT`` set to ``y`` and the
+sysfs file ``/sys/module/pcie_aspm/parameters/policy`` should not be modified.
+
+Most notably, if L1.2 is not configured properly for any devices, the SoC
+will not be able to enter the deepest idle state.
+
+EPP Policy
+----------
+The ``energy_performance_preference`` sysfs file can be used to set a bias
+of efficiency or performance for a CPU. This has a direct relationship on
+the battery life when more heavily biased towards performance.
+
+
+BIOS debug messages
+===================
+
+Most OEM machines don't have a serial UART for outputting kernel or BIOS
+debug messages. However BIOS debug messages are useful for understanding
+both BIOS bugs and bugs with the Linux kernel drivers that call BIOS AML.
+
+As the BIOS on most OEM AMD systems are based off an AMD reference BIOS,
+the infrastructure used for exporting debugging messages is often the same
+as AMD reference BIOS.
+
+Manually Parsing
+----------------
+There is generally an ACPI method ``\M460`` that different paths of the AML
+will call to emit a message to the BIOS serial log. This method takes
+7 arguments, with the first being a string and the rest being optional
+integers::
+
+ Method (M460, 7, Serialized)
+
+Here is an example of a string that BIOS AML may call out using ``\M460``::
+
+ M460 (" OEM-ASL-PCIe Address (0x%X)._REG (%d %d) PCSA = %d\n", DADR, Arg0, Arg1, PCSA, Zero, Zero)
+
+Normally when executed, the ``\M460`` method would populate the additional
+arguments into the string. In order to get these messages from the Linux
+kernel a hook has been added into ACPICA that can capture the *arguments*
+sent to ``\M460`` and print them to the kernel ring buffer.
+For example the following message could be emitted into kernel ring buffer::
+
+ extrace-0174 ex_trace_args : " OEM-ASL-PCIe Address (0x%X)._REG (%d %d) PCSA = %d\n", ec106000, 2, 1, 1, 0, 0
+
+In order to get these messages, you need to compile with ``CONFIG_ACPI_DEBUG``
+and then turn on the following ACPICA tracing parameters.
+This can be done either on the kernel command line or at runtime:
+
+* ``acpi.trace_method_name=\M460``
+* ``acpi.trace_state=method``
+
+NOTE: These can be very noisy at bootup. If you turn these parameters on
+the kernel command, please also consider turning up ``CONFIG_LOG_BUF_SHIFT``
+to a larger size such as 17 to avoid losing early boot messages.
+
+Tool assisted Parsing
+---------------------
+As mentioned above, parsing by hand can be tedious, especially with a lot of
+messages. To help with this, a tool has been created at
+`amd-debug-tools <https://git.kernel.org/pub/scm/linux/kernel/git/superm1/amd-debug-tools.git/about/>`_
+to help parse the messages.
+
+Random reboot issues
+====================
+
+When a random reboot occurs, the high-level reason for the reboot is stored
+in a register that will persist onto the next boot.
+
+There are 6 classes of reasons for the reboot:
+ * Software induced
+ * Power state transition
+ * Pin induced
+ * Hardware induced
+ * Remote reset
+ * Internal CPU event
+
+.. csv-table::
+ :header: "Bit", "Type", "Reason"
+ :align: left
+
+ "0", "Pin", "thermal pin BP_THERMTRIP_L was tripped"
+ "1", "Pin", "power button was pressed for 4 seconds"
+ "2", "Pin", "shutdown pin was tripped"
+ "4", "Remote", "remote ASF power off command was received"
+ "9", "Internal", "internal CPU thermal limit was tripped"
+ "16", "Pin", "system reset pin BP_SYS_RST_L was tripped"
+ "17", "Software", "software issued PCI reset"
+ "18", "Software", "software wrote 0x4 to reset control register 0xCF9"
+ "19", "Software", "software wrote 0x6 to reset control register 0xCF9"
+ "20", "Software", "software wrote 0xE to reset control register 0xCF9"
+ "21", "ACPI-state", "ACPI power state transition occurred"
+ "22", "Pin", "keyboard reset pin KB_RST_L was tripped"
+ "23", "Internal", "internal CPU shutdown event occurred"
+ "24", "Hardware", "system failed to boot before failed boot timer expired"
+ "25", "Hardware", "hardware watchdog timer expired"
+ "26", "Remote", "remote ASF reset command was received"
+ "27", "Internal", "an uncorrected error caused a data fabric sync flood event"
+ "29", "Internal", "FCH and MP1 failed warm reset handshake"
+ "30", "Internal", "a parity error occurred"
+ "31", "Internal", "a software sync flood event occurred"
+
+This information is read by the kernel at bootup and printed into
+the syslog. When a random reboot occurs this message can be helpful
+to determine the next component to debug.
diff --git a/Documentation/arch/x86/amd_hsmp.rst b/Documentation/arch/x86/amd_hsmp.rst
index 2fd917638e42..a094f55c10b0 100644
--- a/Documentation/arch/x86/amd_hsmp.rst
+++ b/Documentation/arch/x86/amd_hsmp.rst
@@ -71,6 +71,28 @@ Note: lseek() is not supported as entire metrics table is read.
Metrics table definitions will be documented as part of Public PPR.
The same is defined in the amd_hsmp.h header.
+2. HSMP telemetry sysfs files
+
+Following sysfs files are available at /sys/devices/platform/AMDI0097:0X/.
+
+* c0_residency_input: Percentage of cores in C0 state.
+* prochot_status: Reports 1 if the processor is at thermal threshold value,
+ 0 otherwise.
+* smu_fw_version: SMU firmware version.
+* protocol_version: HSMP interface version.
+* ddr_max_bw: Theoretical maximum DDR bandwidth in GB/s.
+* ddr_utilised_bw_input: Current utilized DDR bandwidth in GB/s.
+* ddr_utilised_bw_perc_input(%): Percentage of current utilized DDR bandwidth.
+* mclk_input: Memory clock in MHz.
+* fclk_input: Fabric clock in MHz.
+* clk_fmax: Maximum frequency of socket in MHz.
+* clk_fmin: Minimum frequency of socket in MHz.
+* cclk_freq_limit_input: Core clock frequency limit per socket in MHz.
+* pwr_current_active_freq_limit: Current active frequency limit of socket
+ in MHz.
+* pwr_current_active_freq_limit_source: Source of current active frequency
+ limit.
+
ACPI device object format
=========================
The ACPI object format expected from the amd_hsmp driver
@@ -116,6 +138,14 @@ for socket with ID00 is given below::
})
}
+HSMP HWMON interface
+====================
+HSMP power sensors are registered with the hwmon interface. A separate hwmon
+directory is created for each socket and the following files are generated
+within the hwmon directory.
+- power1_input (read only)
+- power1_cap_max (read only)
+- power1_cap (read, write)
An example
==========
diff --git a/Documentation/arch/x86/cpuinfo.rst b/Documentation/arch/x86/cpuinfo.rst
index 6ef426a52cdc..dd8b7806944e 100644
--- a/Documentation/arch/x86/cpuinfo.rst
+++ b/Documentation/arch/x86/cpuinfo.rst
@@ -79,8 +79,9 @@ feature flags.
How are feature flags created?
==============================
-a: Feature flags can be derived from the contents of CPUID leaves.
-------------------------------------------------------------------
+Feature flags can be derived from the contents of CPUID leaves
+--------------------------------------------------------------
+
These feature definitions are organized mirroring the layout of CPUID
leaves and grouped in words with offsets as mapped in enum cpuid_leafs
in cpufeatures.h (see arch/x86/include/asm/cpufeatures.h for details).
@@ -89,8 +90,9 @@ cpufeatures.h, and if it is detected at run time, the flags will be
displayed accordingly in /proc/cpuinfo. For example, the flag "avx2"
comes from X86_FEATURE_AVX2 in cpufeatures.h.
-b: Flags can be from scattered CPUID-based features.
-----------------------------------------------------
+Flags can be from scattered CPUID-based features
+------------------------------------------------
+
Hardware features enumerated in sparsely populated CPUID leaves get
software-defined values. Still, CPUID needs to be queried to determine
if a given feature is present. This is done in init_scattered_cpuid_features().
@@ -104,8 +106,9 @@ has only one feature and would waste 31 bits of space in the x86_capability[]
array. Since there is a struct cpuinfo_x86 for each possible CPU, the wasted
memory is not trivial.
-c: Flags can be created synthetically under certain conditions for hardware features.
--------------------------------------------------------------------------------------
+Flags can be created synthetically under certain conditions for hardware features
+---------------------------------------------------------------------------------
+
Examples of conditions include whether certain features are present in
MSR_IA32_CORE_CAPS or specific CPU models are identified. If the needed
conditions are met, the features are enabled by the set_cpu_cap or
@@ -114,8 +117,8 @@ the feature X86_FEATURE_SPLIT_LOCK_DETECT will be enabled and
"split_lock_detect" will be displayed. The flag "ring3mwait" will be
displayed only when running on INTEL_XEON_PHI_[KNL|KNM] processors.
-d: Flags can represent purely software features.
-------------------------------------------------
+Flags can represent purely software features
+--------------------------------------------
These flags do not represent hardware features. Instead, they represent a
software feature implemented in the kernel. For example, Kernel Page Table
Isolation is purely software feature and its feature flag X86_FEATURE_PTI is
@@ -130,14 +133,18 @@ x86_cap/bug_flags[] arrays in kernel/cpu/capflags.c. The names in the
resulting x86_cap/bug_flags[] are used to populate /proc/cpuinfo. The naming
of flags in the x86_cap/bug_flags[] are as follows:
-a: The name of the flag is from the string in X86_FEATURE_<name> by default.
-----------------------------------------------------------------------------
-By default, the flag <name> in /proc/cpuinfo is extracted from the respective
-X86_FEATURE_<name> in cpufeatures.h. For example, the flag "avx2" is from
-X86_FEATURE_AVX2.
+Flags do not appear by default in /proc/cpuinfo
+-----------------------------------------------
+
+Feature flags are omitted by default from /proc/cpuinfo as it does not make
+sense for the feature to be exposed to userspace in most cases. For example,
+X86_FEATURE_ALWAYS is defined in cpufeatures.h but that flag is an internal
+kernel feature used in the alternative runtime patching functionality. So the
+flag does not appear in /proc/cpuinfo.
+
+Specify a flag name if absolutely needed
+----------------------------------------
-b: The naming can be overridden.
---------------------------------
If the comment on the line for the #define X86_FEATURE_* starts with a
double-quote character (""), the string inside the double-quote characters
will be the name of the flags. For example, the flag "sse4_1" comes from
@@ -148,36 +155,31 @@ needed. For instance, /proc/cpuinfo is a userspace interface and must remain
constant. If, for some reason, the naming of X86_FEATURE_<name> changes, one
shall override the new naming with the name already used in /proc/cpuinfo.
-c: The naming override can be "", which means it will not appear in /proc/cpuinfo.
-----------------------------------------------------------------------------------
-The feature shall be omitted from /proc/cpuinfo if it does not make sense for
-the feature to be exposed to userspace. For example, X86_FEATURE_ALWAYS is
-defined in cpufeatures.h but that flag is an internal kernel feature used
-in the alternative runtime patching functionality. So, its name is overridden
-with "". Its flag will not appear in /proc/cpuinfo.
-
Flags are missing when one or more of these happen
==================================================
-a: The hardware does not enumerate support for it.
---------------------------------------------------
+The hardware does not enumerate support for it
+----------------------------------------------
+
For example, when a new kernel is running on old hardware or the feature is
not enabled by boot firmware. Even if the hardware is new, there might be a
problem enabling the feature at run time, the flag will not be displayed.
-b: The kernel does not know about the flag.
--------------------------------------------
+The kernel does not know about the flag
+---------------------------------------
+
For example, when an old kernel is running on new hardware.
-c: The kernel disabled support for it at compile-time.
-------------------------------------------------------
-For example, if 5-level-paging is not enabled when building (i.e.,
-CONFIG_X86_5LEVEL is not selected) the flag "la57" will not show up [#f1]_.
+The kernel disabled support for it at compile-time
+--------------------------------------------------
+
+For example, if Linear Address Masking (LAM) is not enabled when building (i.e.,
+CONFIG_ADDRESS_MASKING is not selected) the flag "lam" will not show up.
Even though the feature will still be detected via CPUID, the kernel disables
-it by clearing via setup_clear_cpu_cap(X86_FEATURE_LA57).
+it by clearing via setup_clear_cpu_cap(X86_FEATURE_LAM).
-d: The feature is disabled at boot-time.
-----------------------------------------
+The feature is disabled at boot-time
+------------------------------------
A feature can be disabled either using a command-line parameter or because
it failed to be enabled. The command-line parameter clearcpuid= can be used
to disable features using the feature number as defined in
@@ -190,12 +192,11 @@ disable specific features. The list of parameters includes, but is not limited
to, nofsgsbase, nosgx, noxsave, etc. 5-level paging can also be disabled using
"no5lvl".
-e: The feature was known to be non-functional.
-----------------------------------------------
+The feature was known to be non-functional
+------------------------------------------
+
The feature was known to be non-functional because a dependency was
missing at runtime. For example, AVX flags will not show up if XSAVE feature
is disabled since they depend on XSAVE feature. Another example would be broken
CPUs and them missing microcode patches. Due to that, the kernel decides not to
enable a feature.
-
-.. [#f1] 5-level paging uses linear address of 57 bits.
diff --git a/Documentation/arch/x86/index.rst b/Documentation/arch/x86/index.rst
index 8ac64d7de4dc..8ea762494bcc 100644
--- a/Documentation/arch/x86/index.rst
+++ b/Documentation/arch/x86/index.rst
@@ -25,13 +25,13 @@ x86-specific Documentation
shstk
iommu
intel_txt
+ amd-debugging
amd-memory-encryption
amd_hsmp
tdx
pti
mds
microcode
- resctrl
tsx_async_abort
buslock
usb-legacy-support
diff --git a/Documentation/arch/x86/resume.svg b/Documentation/arch/x86/resume.svg
new file mode 100644
index 000000000000..ad8839f762bf
--- /dev/null
+++ b/Documentation/arch/x86/resume.svg
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Do not edit this file with editors other than draw.io -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="582px" height="1152px" viewBox="-0.5 -0.5 582 1152" content="&lt;mxfile host=&quot;confluence.amd.com&quot; agent=&quot;Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.0&quot; version=&quot;24.7.10&quot; scale=&quot;1&quot; border=&quot;0&quot;&gt;&#10; &lt;diagram id=&quot;lFF5s3GfZ4Py3fAf5dUH&quot; name=&quot;Page-1&quot;&gt;&#10; &lt;mxGraphModel dx=&quot;2364&quot; dy=&quot;1473&quot; grid=&quot;1&quot; gridSize=&quot;10&quot; guides=&quot;1&quot; tooltips=&quot;1&quot; connect=&quot;1&quot; arrows=&quot;1&quot; fold=&quot;1&quot; page=&quot;0&quot; pageScale=&quot;1&quot; pageWidth=&quot;850&quot; pageHeight=&quot;1100&quot; math=&quot;0&quot; shadow=&quot;0&quot;&gt;&#10; &lt;root&gt;&#10; &lt;mxCell id=&quot;0&quot; /&gt;&#10; &lt;mxCell id=&quot;1&quot; parent=&quot;0&quot; /&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-10&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-1&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-9&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-1&quot; value=&quot;Wakeup event occurs&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.start_2;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#0CF232;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;-240&quot; y=&quot;-190&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-8&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-3&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-4&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-56&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-3&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-13&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-3&quot; value=&quot;MP1 hands off control to OS&quot; style=&quot;rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;80&quot; y=&quot;-190&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-4&quot; value=&quot;&amp;lt;span style=&amp;quot;background-color: rgb(232, 205, 151);&amp;quot;&amp;gt;&amp;lt;span style=&amp;quot;color: rgb(0, 0, 0); font-family: Helvetica; font-size: 12px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; white-space: normal; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;&amp;quot;&amp;gt;OS Moves one core out of ACPI C3&amp;lt;/span&amp;gt;&amp;lt;br&amp;gt;&amp;lt;/span&amp;gt;&quot; style=&quot;verticalLabelPosition=middle;verticalAlign=middle;html=1;shape=process;whiteSpace=wrap;rounded=1;size=0.14;arcSize=6;labelPosition=center;align=center;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;240&quot; y=&quot;-170&quot; width=&quot;100&quot; height=&quot;60&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-11&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-9&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-3&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-9&quot; value=&quot;MP0/MP1 boot process&quot; style=&quot;rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;-80&quot; y=&quot;-190&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-27&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-13&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-16&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-13&quot; value=&quot;OS checks all wake sources&quot; style=&quot;rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;80&quot; y=&quot;-40&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-29&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-15&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-17&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-44&quot; value=&quot;no&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;jeVlbFHk8Qahp5zcIn_D-29&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.28&quot; y=&quot;2&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;8&quot; y=&quot;-8&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-34&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-15&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-25&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-41&quot; value=&quot;yes&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;jeVlbFHk8Qahp5zcIn_D-34&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.1165&quot; y=&quot;-1&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;153&quot; y=&quot;11&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-15&quot; value=&quot;ACPI fixed &amp;lt;br&amp;gt;event active&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;80&quot; y=&quot;260&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-28&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-16&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-15&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-43&quot; value=&quot;no&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;jeVlbFHk8Qahp5zcIn_D-28&quot;&gt;&#10; &lt;mxGeometry y=&quot;2&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;8&quot; y=&quot;-15&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-57&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=1;entryY=0.5;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-16&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-25&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-58&quot; value=&quot;yes&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;jeVlbFHk8Qahp5zcIn_D-57&quot;&gt;&#10; &lt;mxGeometry x=&quot;0.0145&quot; y=&quot;1&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;102&quot; y=&quot;9&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-16&quot; value=&quot;IRQ other &amp;lt;br&amp;gt;than ACPI SCI active&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;80&quot; y=&quot;110&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-30&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-17&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;130&quot; y=&quot;560&quot; as=&quot;targetPoint&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-65&quot; value=&quot;no&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;jeVlbFHk8Qahp5zcIn_D-30&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.4694&quot; y=&quot;1&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;9&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-36&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-17&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-35&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-45&quot; value=&quot;yes&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;jeVlbFHk8Qahp5zcIn_D-36&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.2867&quot; y=&quot;2&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;5&quot; y=&quot;8&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-17&quot; value=&quot;GPIO&amp;lt;br&amp;gt;IRQ shared&amp;lt;br&amp;gt;with SCI&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;80&quot; y=&quot;410&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-31&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;130&quot; y=&quot;660&quot; as=&quot;sourcePoint&quot; /&gt;&#10; &lt;mxPoint x=&quot;130&quot; y=&quot;710&quot; as=&quot;targetPoint&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-32&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-23&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;130&quot; y=&quot;810&quot; as=&quot;sourcePoint&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-48&quot; value=&quot;no&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;jeVlbFHk8Qahp5zcIn_D-32&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.1714&quot; y=&quot;-4&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;14&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-52&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-23&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-25&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-53&quot; value=&quot;yes&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;jeVlbFHk8Qahp5zcIn_D-52&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.5259&quot; y=&quot;-5&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;220&quot; y=&quot;15&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-62&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-23&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-54&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-64&quot; value=&quot;no&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;jeVlbFHk8Qahp5zcIn_D-62&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.7472&quot; y=&quot;3&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;-92&quot; y=&quot;13&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-23&quot; value=&quot;Any PM&amp;lt;br&amp;gt;wakeup event&amp;lt;br&amp;gt;pending&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;80&quot; y=&quot;860&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-24&quot; value=&quot;Kernel resumes system&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#0CF232;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;-240&quot; y=&quot;-20&quot; width=&quot;100&quot; height=&quot;60&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-26&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-25&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-24&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-25&quot; value=&quot;uPEP driver removes OS_HINT&quot; style=&quot;rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;-240&quot; y=&quot;110&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-37&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-35&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-25&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-40&quot; value=&quot;yes&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;jeVlbFHk8Qahp5zcIn_D-37&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.56&quot; y=&quot;-2&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;67&quot; y=&quot;12&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-60&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=-0.008;entryY=0.422;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;exitPerimeter=0;entryPerimeter=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-35&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-38&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;-50&quot; y=&quot;535&quot; as=&quot;sourcePoint&quot; /&gt;&#10; &lt;mxPoint x=&quot;20&quot; y=&quot;685&quot; as=&quot;targetPoint&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-61&quot; value=&quot;no&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;jeVlbFHk8Qahp5zcIn_D-60&quot;&gt;&#10; &lt;mxGeometry x=&quot;0.1126&quot; y=&quot;-3&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;-16&quot; y=&quot;-85&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-35&quot; value=&quot;Any GPIO&amp;lt;br&amp;gt;w/ WAKESTS&amp;lt;br&amp;gt;active&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;-90&quot; y=&quot;410&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-38&quot; value=&quot;Check for ACPI Notify() events&quot; style=&quot;rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;80&quot; y=&quot;560&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-49&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-39&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-23&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-50&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-39&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-25&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-51&quot; value=&quot;yes&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;jeVlbFHk8Qahp5zcIn_D-50&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.4506&quot; y=&quot;-2&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;215&quot; y=&quot;12&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-39&quot; value=&quot;Any GPE &amp;lt;br&amp;gt;pending&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;80&quot; y=&quot;710&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-63&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jeVlbFHk8Qahp5zcIn_D-54&quot; target=&quot;jeVlbFHk8Qahp5zcIn_D-55&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-54&quot; value=&quot;OS moves active &amp;lt;br&amp;gt;core back to&amp;lt;br&amp;gt;ACPI C3&quot; style=&quot;rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;240&quot; y=&quot;110&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jeVlbFHk8Qahp5zcIn_D-55&quot; value=&quot;MP1 puts system back to sleep&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F27979;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;240&quot; y=&quot;-20&quot; width=&quot;100&quot; height=&quot;60&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;/root&gt;&#10; &lt;/mxGraphModel&gt;&#10; &lt;/diagram&gt;&#10;&lt;/mxfile&gt;&#10;"><defs/><g><g data-cell-id="0"><g data-cell-id="1"><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-10"><g><path d="M 101 51 L 154.63 51" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 159.88 51 L 152.88 54.5 L 154.63 51 L 152.88 47.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-1"><g><ellipse cx="51" cy="51" rx="50" ry="50" fill="#0cf232" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 51px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Wakeup event occurs</div></div></div></foreignObject><image x="2" y="37" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfQt0HMWV9nd7RpKBAGYhvBYSIPlhwyZZEi+WNBoJ8QwQjMFmRjYGAssuj+ADgSUPHnltCPsn7J8EAjiQEF4BWxowz8UJEBCS5iETE5YlwGbJ8gxgJ6wNDsiSZvr+1Ghm3Oqpnu6ehzQa3T7H5/hoqm5Xfbe6vqpbde8lyCMICAKCgCAgCGgQIEFFEBAEBAFBQBDQISAEIeNCEBAEBAFBQIuAEIQMDEFAEBAEBAEhCBkDgoAgIAgIAt4RkB2Ed6ykpCAgCAgCswoBIYhZpW7prCAgCAgC3hEQgvCOlZQUBAQBQWBWISAEMavULZ0VBAQBQcA7AjOKILq7T58zmn4vBtBxli4+YfCWBfH4/Zu9d3uiZFs4ejUB59vqvckGHZ4a6H3et7yOnmVE/AtLvbJl+X13vnxn56I90hwcBPCx3N82muAjh4di68qVKfUEAUFgdiIwowgiO6l3Rr9CjO9VOgmHwyfvlMH4GoBa7apnplNS8d47fA4JagtHbyLgjGqQl893F4oLQZSLnNQTBKqDQHt7ZBsYOBOEseRQ7MbqSJ0eKTOPIMLRIwj4dwDNOcgyzLQgFe9d4wfC1nBkngF6BMBORQQB3Jwa6jsTAHuVqSMcBq5JDfVd4FVGNcoJQVQDRZEhCPhHIBKJBP74lnEcs/l/AfobJnw1Ndj3ff+S6qfGjCOI9vbIXyNI/WB8PA8jMa5MxPsu8wNre0fP2SD+ib4OPzMWDB6+rn/ln73KDIUiB8GgRxjYZStx8aJUPHa/VxnVKCcEUQ0URYYg4B+B9s7oCjDOydcUgvCPYcU1uru7g6PpXe8EENkqjB9sCW4X6e+/ZYuXF2hkbARoI8D75eq/ZzIfPRyPDXmRp8q02c4fGHgtiEz30NDd/+NVRjXKCUFUA0WRIQj4R6C9M3oLGF8QgvCPXVVrtHVGLyTGDwpCCS8izd3JZOyPXl5UtAthPMZESQIXdiF+2b/4wNsfaXlpt5cyQhBeUJIygkD1ERCCqD6mZUls7YiEDaJfAtiunBV/m+0cQ50VAHhg8tmG9wm+o+P47U2a8wCAQ6Z79SAEUdaQkkqCQMUICEFUDGF1BMzrXrpLczr9a4A+XZDIdE4y3nuDlzeEOqLfZcKlubIZZl5EJtZZzzb8mIjauno+QSb/GsAe5RCWlzZ7LSME4RUpKScIVBcBIYjq4lmJNGrriN5GhFMKK3bGL1LxvtPcbh51d0c+NDpO94FwmKqbJ4Jg8O1XbWcbWeLwcsjc1hE5nohWAwhk2+PT5FUJEPa6QhDVRFNkCQLeERCC8I5VzUsW30Li4QCajhkaunNjqZcXr/a3mpLsMr1eU7XtSNTrYy3BDSf39/enndoyb95ZTXPmvDOPCREGHwLCPmDsbCk/AvDrxEYcjJWjozs+vm7djeNuwJZLEDmzm/L92NXyjg0wjBOTA6sSbu+dP//EnQNNwRNA+AcQHWDpywiAlwF62CTzxuHBmHJA9HR92P7BgXBrcrDvdLe25H/3Wt++aADwhyClOwcHV7+pZHV0RA4wDfoSTBwNwt65hcAImF9gwi/MsfSta9fe87bXdlWrXLbdaeMYwDwDoM8C2C0nW427V4hp0DT4Z3vvzqlYLJYp9d5QZ88S5qyTZ3aR42cHbZWrMf96chatxfhpC0cXEFC4RWg9V1RXUl97i9qIcR6ALgC7F/QKvMyg1RmDbnhyYNVrTrhpxk1p1focv9UaJ5XImXHXXPOdtfsxEPBnmHxkIhF7uiRB2G4bWa/IFvtGuJPOp486dbsPvT96DwNHFt5bwty11YmGvgHgwz6U9ycAF+y1B/eV+tjLIYj2zsh8MKkzFN/k0Nq6bIdAcPwKJpwLIOjWHwZSTHzm8GDsObeyXid4Jzle6zsRRDod2ATD+DaIL3TpW5qB6+cE+bL+/thf3PpV6e/ZMRTEcjB9HcD27vL4BTJwXmIg9rgTOc/viuwbMEmZSffNyyvHYbR4sUR3tgTXf8FpsVTL8eNEEPPDPfsb4FsJaHPBLkNEd3Ha/GIyGftfe1khCPeRN20ltI5pTItT8V5l6nF67N7OYwx8PjXU96iqoJHpGqYiHF68XxqBfkJ2Zakexzqt3Yv2MtKBu3Te2x6BzBDoxtGRHS9w2k34JQgncmBgWR4Xp7aFQj0Hs4FVluvBHrsBtav4cnKo7/pSuwmvE3wtCCKAzFEmBy5hwj967RTAw2Ywc9Jw/+rXvdfxV7Kte8k+lM6sKmMMZUD44dj7cy91GDvU3tlzDZiXW1rkuhO2tl5zNqgm2FMSg72rdL2s9fjREYQB/h0z3a5zkHXSBAP/YZjphYnE6lesZYQg/I3dqS5dFNrCzSRUNIA1ZwVF11VdDr/tN6LUJKEzdc07IrJjyxaKTdppTCCmtv5vAVgLkNolqDlT7SxCFpOBFdsxZlrqRIR+CEKZ22DyvQTsb3nBBi/k4EAsSkwa4BdBNAym0WxfCF0289lEv4kvSQ7G/s2JJKaRIF5iYA0BX7Tgovq1DjD+A8QtMDFhFrQ9BDwyOocj6x6NvVPtDyIUWvRRNoIPATjQJjsDxmtMSBHoXQbvQEAYwF7FbaAbx0Z2XK4jifZw5GiA7rNEKfBkHsq/o+hbKHEWNxXjx04QRLifObtrsO6UX2dgSOFWYqyCGD9rbtpwrnUnlI0NN/7+chD9nwkMzEOB/P+zf1Df9FaLBptrk/HYTdUeF7WUN2NNTAoUu3Oa+jj/sm3Lic88fPt7OtA09tGiFVJbR88iIr7bUr/kKqqtM3opMb6bL+9EUpoYUqrKvRmDL1o7EHtJ115l+84QrSDg0Mm/8xpksDiZjKmV+KTHK0E4TDYbifjUxGBMhTJxfBzqpomxIj0+/m27PV7Ze199A4cZZPzEttsYYeYlThcBppEgrH3PMOPeTMC40GaPpvauJQfBNG8D8ElrBQKtaA6uP7/UGZTfj9phgZFtmxngL2vGELV2Lv2UwembAPp7y/syDP5Kaii21Y8o92OltwOLFldE1yYHe1UwzElnTlM1fuwEYcV8wtQZOHt4cOV/Wtunxurrb5JydvuRzXy3gUw6PJHofdZJd3JI7XdU17i8/cDZ7WCtaJLW7A6KDrFLrIJ00WVZY+ZqbV26m9GUUWaswkRCRL2j7+94qtvBc9beHKBbrZ7jpfrphSAqIQflhT42vusKm+lFTfRnpOKxvlImo4O7I7sH01gJULeFUFPm2PhxukPeOiCIDAFXjI7M/a6TntrbI3+FAPUCOMIy3N8lk49OJGLJan0CmgVGBsyX77Unrip1JpU7yL4e4FMtbXkVGT48mYy9aG9fKBz9FgPf3Pp3b/5AmjE+BvDC5FBM+SsVnqkcP04EoXYDbPL5ugVWvqGhzuhiZihT1DaFseoSW0kIolqjvUpyNGcGk84UJg/MolDh2rMCjV3RUaYmLpR2S15shoLrasTads3OZxOZdFQi0fukHUo3gtBN0urcxMvOQb0rFOr5JBtZn4/8Nt3VVGRtY3t75OMIZA9DP5L7u6OderoJgoGbx0fmnu1G4upcAGnzV5NMdQ6r53KGvm6BATibiuzvUDeEjOamB62HsgR8OzHU960igghF2tnIOqHukPvNk5mp2DylN7VO5fjREcTEecL4UYnEPRtK6WJiYYa7ATqmQBAuV+mFIMoZ3TWuY9/WOoXIKD5Mdr6hZL+J4RQMsGjiZjzW0sQL7TdZVCA/06AzCPz3AH2UiIaaA+tP8WqC0Ez6as9+fGqoT908mvSUIggHM8VmZjrd5XC/8A77ClNt1Z12AE6q97pKnWaC8EXiRSv8KvrC2K+gAnDcAThibrvGCugDUmoPXt2dUIsOuB0JyLZDqeX40RKEjwirRTeyHL7vPOZCEDWe7MsRX+Sk5uCD0NbRcwwRqwk1f8/bMRR30YrfYWDYY0KVE1XWS5+rQRDpOXhRc0juixx0k4ffmFWqv3ZidTKZTS9BlL6eadebxpvecefpReeWMrrLGGWEoy+6becYkLI9HFW+AddubYPzmZcqo9lJa01sUz1+NAThKwin/YwTQhA+h24dFNc4vmlDdevCazgejhaHFC/aZmsiwlZrQihCtVKCAPGJYONMmx16nIhOc7qCqFOt5kpvebk4std9gwOWO/da7KaTIPz6ADjE47ooNdj3w0o+E+11boJvubr2wWFnoCG7krupokWaw0Q61eNHQxAvmcF0l9dryEX1hSAqGcrTU1ezKimyzzuF13AKxa0LKW6fMLxcma0EkQlP67f3NxE4HsTLVAKSQiiPiZNgryYmdaNLhRz/lL09Xg/K8/U0ZyHqIPJBwPCcN0PJYuZtiXC8xc4N3YQ8jQTheMZT0m5tC/fs1/PbIymrYr8CaNKdfLexxuAmAj4HYM98Wacdrz6kPpYnh/qu07xHl0lRW3aqx0+RHwTwex4PdA0Pr1zvhpf6XQhCRQ1qgMduK7RPNqXCazh1XxN2Y9K2fsLJhx/+4Drc3JwMX05FuTo0r3vpzs2Z9H5sYl+DsuESPsOACkKYD5ugbaIPgiil4TFmPi0Vj6lbOK5PqWuDrpVdCuhMVdNIEL5WmvmulRNyxQ23UpkP3eq6/l4i9EPxuYfezKTxwHY81J7q8eN3grfj5be+nEG4jrjpKWA/X1DX2BLxvn/Kt6YomQ/hstRg35WlWltMAJMPtYsIxOPhl9oZNG2z6WhiXAzCwdZrdH7QK5MgVIwe9W9O/l0M/B5B43Op/lUvu71/qj/waSSISbGY3HApjDN7vnQXk4QXudNFEJpzBa2ZSXOA7rhQmurx43eCF4IoHpENsYPQ2DafMHjLgnj8/s0qtmpbOHoTAWfkuu/prMBuQrLFerLL9HL4RaGuyKFs4rqcucjL/KDKZL1kQdkdxdY72d5NTPn3ZMNbZCPNMu6xyvLq2DXVH7gQBDBdBKG+G03ojUmmowlT1G63AnxybpCVDK0x1eNHCMLrFONcriEIQuOwVtjmVnBWUHx7hOmUVLz3jmKPU9cc1tQejqqwDVe57BjU6v4NAOtArJysnmgJbPdMJvPuTmkODgL4mGXl7/UMQlUpxD7q7u4O6BzdTOaFw/HYI6WGlOYDL2ul7XXYzjiCCEevJkB5Dk88VYjeqSEI1/hgXvF1K1fk22DbEWkWZi9lDD7cKTLAVI8fIQg3Dbv/3hAEobppC3lRuF3jJbyGE0yaPNPZq7H2j1Y5VKWG+s508iJuCy9pJZhrbAHCMh94mv6WYNwMEwPNzebLTpFAK7zFNEbAlxNDfT/Ot0/jrKYOvV0diIpMeR4j6LoPQ32J6SIIr5GBba0uzlECOF6l9oqJ8qGBQY8wsEt+lc5MC1LxXjWeavq4Ba8sugbq4hw41eNHCKLy4dE4BGFPI5o7ZyjKG+Hu9FNA1X64nY/1tO17YycQZWPnZ59SVyI123BVZTNAZyaHeu/ykhuhQoLQrjjbOqLnEGXvuk8kOZpoyBWpoT4Vhlybr0E/WXlLqlTOUC2O7eM9H4Q2DLvDit6P97xTP3TXUZ2ukfrBQnlRU1NmwOqlXY7viZ93WssWOUbmzto0u3ZtaI1JsrRkV9PxMykfhJsfgx0jvwQjh9TljrIpqKc5VIuNjcxd1jRn088tmed8bc8112Oz1+SoKXOpxZRQMhSBZhvuOhHb4dLcmPJzzdU5pEiaFEGpq4/5ZyPDOCY1tGpYpzJtMLcyQkqocAumwQ/RxIH50wT8NzFfF4/HXrW+1+6dzB6zBioZuonVyeSjc+By8gR2Gsq1MgXp4n0BpR3XdG3M4aHCgexEwH+ZwPOGyTe75U8J2UNv5MxMW0xj78lpdl3NrJjy8WNLGCQE4X8ibpgdRPHdbX6GDWMJmaaKnZ/LXe2eAKhocp6cv3qjwVhsUjaY2SG5stYD8SIN6O5+W3NQeFFZsWerLz8IR1LUm764f2wOTnAIV607uPQb9kGd7fzLB+RwuaXv+hhW9ltBQEmsJ5GLbUeZ/c37DkJtohJjc3Cs17DdmvAhvuqXGgca/b9rsHFcPL5KnUt5ekKd0VOYcYtlx+jpsoYmJlFWV8jgs9ZdtMcoAlM7foQgPI2NUoUahiBUJ60rzqwdmXApM76Xt/275YvQAWUPu8GEi8C4MJ8gyE2mJlCfL+9j3XmBaqePa66ldk26ydoxHHR2Zd4ROdKgbM6Awo0qP4Hj2ruWfAamqYLBWWLy8+qW4HbL+vtv2TJpkrdl/wO8BTlUV4lbtn3ndmbumaRTXwQBFUp7eSre9xO3r6yjY/HfmhRQPjEFJzTA0bHMTVzR7zr5fvJOTCSqCqozC2tY8qcyY+NHeUmVaicoJj6DmI61RBj2HL12SsePEITvsWav0FAEYVutZ2PlE2FxrtMZZv/2ziLTFWMAlE060qyuoLrJ1Njt1eTuKUqoIgcO0F0E/J1dcbqw4qqMWzTXoh1S6MRdTaPpYds7VC7qo5MDq35rL59LmboSRAstv7mGxVZlHUJjjzjdoNJf8SwdxTQbz/8NdZ2XrrCer/jfQWRruCZPcsgS+HzaMD5XKp+xzy+X2jujPwDjS5Pr0e0tQfOLpdKcKn2RQdfYwrN7Jr/swqur5xM2c9LgB4sCRYYTt+p8+HtM5fjxe4Zg14nf+vYzCI+7Kp9DYWqLNxRBaG2cOTzdckU4wa4/jMuSg6fE7iqCavMWPASQyhCXfzIM3MDjTZcMD9/xrv3d2QTuLcHzwHSxU85hp4NKvwSRnQA6Ij1EpBLfZPs18dHzfTCxVBczX78LyFZ60GBcHI/H/svWpxI+IM4TvkNKR0fslFevkaGriHBCETmoBvnbQeS7sBnE30Ea19qwyPXJ+KktCVKGCKcnBvsKlxiq8Uk77AIU5r9hBJanhlattV8uaO2MHEhMygdoUu5lP7sP1XaHixbWbjmF4dB2farGj98JvlKCKLpUATyHDC/U5d2oxpiYChkNRRAOMWTyM96DLcHtInYzhheQHbLBZSdELzI19t/8a0fA/ALIeCq7G1GpIlVKRMrmty7cLsreeiKsB+PjhfnbwRu8HIJwMMmUnOh0CVUsWBbSOObSXx5mS/M4UZSQDCK9eHBw9ZtOeshl+Fs5ibwmCivfjjhA/5NNAQp0gLGvBbcRMF0LYpVTeqfc+25NDvadbn+XAxGN2d65mVQWMtBLJfvkI0+Dl7FnLVMiTafC8m2o3a1KW1siJWoWL8M8ITkQU5nUPD8aj+l8XV9h0fOVpmL8TDVBFN2YnOhszreJVMbFgeambc4tZw7yrKgqF2woglDYOCgJ7CG8hhO2moPmbFGv1w2zE/A271zN4LO0K9uSSuUXyDROMwMcJkYhTaTTjZ5yCEK93sGOXvIAOtQZ+bzfBPBbu8rDZjBzkltkTYVd87abrlTnPj6wG2HiLwbMwLMmmepswDdBEHCBCfyDzrznoK4MCD8ce3/upW4Jhir5hidW33yXbdfiUaQiBzpJZzp0E6C5JZir4m2RpJNf6/Ez1QTh8A1Zuu7/koybXmr9e8MRhENoAk83NpzAdjBdeQmvURCZnejmbLoABOVnsL0Hxf4JzP8CEzcp04bXhPDlEoRqT1s4chGBvm/zjSh5XnJw15K9m5ivYmZ11hP00C8nk41j1Vye4OgHE/3VAD5c+h1qR4ZzkoOxJ4rGgg8Tk7oEQBmOU9C4nplPKk1O/AIZOC8xEHvci1+LB4xKFmltXbYDNY1/hwAVb8xyWcCxWjZfeCbddLnOpOmxPbobSCVDa3iRW8vxM9UEkQ3r0xn5AjFdr9NLuWZuLzjWqkzDEYT+rjVeRJq7k8nYH8sEsjikcZkZw9T5QrC5aRmDe0B0ABg759qktqKvENMgGCtHR3d83LoS1aSd1JqAKiGI3HnJvdac0cqUw8xLnHJn5PFU7Qs0ZXqy/QKpw8t8NNoMgLdUBjPAuLklaK4pdahaSj/KFDSWoROY+VwbdusBfopBK8ZH5v4yj1ulBJHL2Edt4SXzCaYi96Ms+lIhox9j4Od778GPl8oLXeaYc62WzRA4SgvAOI2BAwHsbiEyhckfwPh5Zjx9r5fbSm4v1NzIKxlaw02e9fdajJ9pIIhsl9TZj8GkrnEr06o1KrOvRaUf/GpVtuEIolZAidzGRUCb6cwhGGLjoiA9EwSKERCCkFEx6xEQgpj1Q0AAcEBACEKGxqxHQAhi1g8BAUAIQsaAIKBHQAhCRoYgoEdAdhAyMmY9AkIQs34ICACyg5AxIAjIDkLGgCDgBwHZQfhBS8o2JAKyg2hItUqnqoCAEEQVQBQRMxsBIYiZrT9pfe0QEIKoHbYieYYgIAQxQxQlzZxyBIQgphxyeWG9ISAEUW8akfbUCwJCEPWiCWmHICAICAJ1hoAQRJ0pRJojCAgCgkC9ICAEUS+akHYIAoKAIFBnCAhB1JlCpDmCgCAgCNQLAkIQ9aIJaYcgIAgIAnWGgBBEnSlEmiMICAKCQL0gIARRL5qQdggCgoAgUGcICEHUmUKkOYKAICAI1AsCQhD1oglphyAgCAgCdYaAEESdKUSaIwgIAoJAvSAgBFEvmpB2CAKCgCBQZwgIQdSZQqQ5goAgIAjUCwJCEPWiCWmHICAICAJ1hoAQRJ0pRJojCAgCgkC9ICAEUS+akHYIAoKAIFBnCAhB1JlCpDmCgCAgCNQLAkIQ9aIJaYcgIAgIAnWGgBBEnSlEmiMICAKCQL0gIARRL5qQdggCgoAgUGcICEHUmUKkOYKAICAI1AsCQhD1ook6ase8eWc1tbS8cygbfDqALgC7AwjkmriegGdAuG20hR9Y92jsnUqaPn/+iTsHmoInEFEPA58GsFtO3giAlwF62CTzxuHB2PMA2O+7qiW/s3PRHmkODgL4WL4NDByfGup7wEub/NQvKst4rKWJF/b3x94LdUU72cSVAFoBBAGs/0BHjzHhur1351QsFsvY29PaunS3QFOmh8E9IDoAjJ1zZdIA3gAwDFCsJWiu6e+P/cVLf6TM7EBACGJ26NlTL9vbI9vAwJkg+gaAD3uoNMLAT3m86evDw3e866F8oUhr67IdqGn8OwT8E4BtXOpmAB7kYOCMVP+ql728p9ry/Uzwuvb5qa8jiMz4eDTQ0vQ1MC60kLX1VWMMfD411Pdo/o8Hd0d2b8oYP2LmxTkycYNuM4i/gzSuTSZjiqDlmeUICEHM8gEwaTJJ020MHFkGJM+yQdHUQK9a5bs+oVCknQ26C8CeroUnF9jAwDLrJKirXwv5fib4qhMEuJ9hxAn8NQdyUJurxNgcHJvf0bW3Rz6OAN0H4ECfGCtZ/ekglj7ZH3vLf12p0UgICEE0kjbL7EsotOijbAQf0k4mhLfBGADoTwzegYAwgL00r3rDYGNhPL7qN6Wa0d4ZmQ8mZZbZ1VYuA8JLAOJgGiXwvgy0AdjeVm4DDOPo5MCq3+reUyv500sQ2AygBUBzts+Ml2Hgiez/TRwCwj4AlieH+q5Tf5p3RGTHli0U05D9ZoD/EzCenRDjrE8Gbh4fmXv2unU3jpc5rKRaAyAgBNEASqykC93dkQ+NprOr+c/Z5DzLhHNSg30Ju+2/tTNyIDHdRBMTuPV51gymjxnuX/26rk2t3Yv2MtLBNQA+afk9w4x7zQB/ee1ATBFE4cm2LUPfBONLk0wkzPfBxFK7GaSW8qeZIPKYjBDwtb/eg6+znDVQOLx430zG2JRMxv5XFWzviCgz4Q2W3cYIES4YfX/uLZoJn9q7lhwE07zNppd3yeSjE4lYspLxJXVnNgJCEDNbfxW3vq0jeg4RrrWZLvrG5vBZpQ6g1UF287abriyyiRN+lBzsu0h3oBwKR7/FwDet5ADmy/faE1fpDldz5ai9M3IxmP7V0sYxgBcmh2K/tAJQS/l1QRDMlyTjse+VOqzv7j59zmj6vRhAx1mwKewunAZMW/eSfZA2f0XA/vkyDFyTGuq7oOJBJgJmLAJCEDNWdZU3XJkimrfgoQ9uCoUsk0LKHBs/bu3ae952e0P2ttO279zOzD2Wsm8YnDkqHr/7d9b66iaN0ZRRB6iF3YNXM0Z2JzFO94Fw2FaZdGdLcP0X+vv71U0c1Fp+HRDESxmDD7fvsuw6UjhQU2bAMtFvNMFHDg/F1rnpsz0cPQ/ILhZyDw8H0HTM0NCdG93qyu+NiYAQRGPq1VOv2sLRIwj494JtG9CuzEsJC4V6PskG/9p6psCEr6YG+75vrdfWETmeiFZbdgGeJy4lJ9TZs4SZf5Gvz8DveTzQNTy8Ul3zRK3l1wFBxFqCG07OE6KTTjTtfM9kPno4HhtyGxShUM/BbPA9DLwH4CkDeJozfI3caHJDrnF/F4JoXN269izUEf0uEy6tZMXY3d0dHE3veieAiEXOgy3B7SL9/bdsyf+tLRy9moDzLWXWIIPFXiefcHjxfhkYiswyIPoNAXFO8915u3ut5U83QTDhstRgn/J/KPl0dBy/vUlz1CWAQyxYy60kN+Dkdy0CQhCzdGB8+qhTt/vQ+6NqtVi41lquzbm9o+dsEP/EAuUfgpTuHBxc/ab6m/ZdHic8L+qptXzVhmknCB9OeZqzGNWFTSCsMky6dcuWHdfJ7SQvI0vKCEHM0jEwr3vpLs3p9K8BUt7LEw/TOcl4r7r94utp7YiEDSJ1YLxdruIk85F2cmVanIr3KpNTxU+t5dcBQXg2E6m25nwglNnvIw7gjoDwNEy6lZvoV6n+Va+U46VeseJEQN0jIARR9yqqTQMrXRFbW9UajswzQI8A2ElHELnrpwMA9s39nmGmBal4r7ryWvFTa/l1QBC+zmtUe9s6ehYR8S0aPxId3htAWM1s3LL3HpnflLhRVrGuRMDMQkAIYmbpq2qtrTFBqKBJhThFbgRSaadqLX8mEoRqcwl/lVKQbyLg/2XGm67xGz6lUj1K/fpDQAii/nQyJS2qMUFMMonUegKvtfyZShA0YUlhAAAG00lEQVS5gUStnZFPENPFBJzkcUehqvoKnzIlg1ZeMuUICEFMOeT18cIaE8Qkk4i6gZRGoJ+AvWthYqq1/BlOEIUBF4lEAq+tN/anDJ/AhB4C/tYliF9Jz/j6GMnSiloiIARRS3TrWHY1D6nt/hQE/BkmH5lIxJ52nFzlkHqSGc46VDTk7fsMwsvQy3pdm1s+CzZPBWORJj6WEuPqhe3lXVJmZiIgBDEz9VZxq3UhGYjxs0S8T4Xf9vW0dUYvJMYP8pXsTmy6u/le7/VbG9IWjv6cgKMAfoVBTwPGbamhVcO1lq/aoCNUP/kgQqHIQTDoEQZ2seCkzScxVQRhxVbtLl5/A6eD6MfW8OsEPPKXbVtOfObh25XznDyzDAEhiFmmcGt3p9JRLtQR/SkT/nHr+7nIma6UKsLhk3fKYHwNQCpRTvZhplNS8d471P9rLV8X7oN97ILaOnqOIWLlwJZPvFSTHURbeEkrceZYImpn4EAVziQ11Pd1j8Oc2sLRfyXgq5byk3xaPMqRYg2CgBBEgyiynG7oQm0w01I//gmaA2LoQm3YQ2UA2EAmHZ5I9GZDT7s9uRwPytdih1zZN9mgw/M5KGotX0sQPpz9ij29s+nxqr6DaOuMfoUYKqBfjkUL2eg8ZYprC0cXEHC/EITbiJwdvwtBzA49a3tZo2B92ol/fldk34BJynkr7wuhJkhPOQdUOI+x8V1X2HYgk0J11Fq+Q0gRT+FCnBzXakEQGqfFdw02jovHV6l0qa5PEcEATxi8ZUE8fr/KSSHPLENACGKWKdzeXV24byY8NN7CJ5cT7ptAK5qD68/XBZXThuMmviQ5GPu3Ep681NYRiRLRzRbb+Jhup1Nr+ZrJU+VZODUx2He30zBqb4/8FQLUC+AIe5laEISO9AE8R2b62ERitfKYdnx0Ib+JcWUi3nfZLP9MZm33hSBmreonOu6cMIhfIAPnJQZij9sn746OyAEZohUEHGqDr+REFAqduKtpND1MwN9Z6mUYuMEcG/+GPcS4atuWNP0zAZd7SRhUa/k6cxqAjWBevtee6LV6IKtD31ffwGEG4RqA/kY3zGpBEOo9oc7oKcxQXtSF8w6AXzAZ539kTzym8ZSmUFfkUDaNnwK8n6Wt2tDts/yTmVXdF4KYVerWd7ZKKUc3gHhBcjC2thSkJVKCphn4HYFUytIMiD8FxkHWGzU5uSVJqJby9aauQm9VvuzHCPQuwB8GoQuMnS1YPAfQEMBn5f9WK4Job49sAwMrQbSwSBeEt4nxFIOy2fuyqV0Jn7W1Vf2kdOC2u5Ovp8EREIJocAV77d5EPKPAnQB1eq2ztRy/ACNwslOeaLu89q4ln4HJd9lWq66vVddnYdAJ+YNppwq1lJ/D6S7rbSq3hufbDeYF1gPkWhGEas/B3ZHdg2msBKjbrX2a39MEfHt0ZO73JOprGeg1UBUhiAZSZqVdmVh50nkgKJvzXA/yNhPw/eYg/6i/P+bplkxeZmvrsh0CwfErmHCuizevqpImxopMuulyr/GBainfh+xsvu1MEy9/sj/2lv0Mo5YEoUBT+uQAfTOXh2MbD/pURZ4lgy/QmRY91pdiDYSAEEQDKbNaXZk4lzCOAVglAVLpSHfP2bMzAN4CkAAo1hI01/glBnsb588/cedgc9MyBveA6ACLqWMEzC984KP2QDoQ+NmTA6teK6d/tZR/cNeSvQMmn01g5YW8T84cplKgvgLCIyb4x8ODsefzZzhTTRB5vGz6VH4ke1pIeTPArwLGozDMm/baDc9JNNdyRlpj1hGCaEy9Sq8EAUFAEKgYASGIiiEUAYKAICAINCYCQhCNqVfplSAgCAgCFSMgBFExhCJAEBAEBIHGREAIojH1Kr0SBAQBQaBiBIQgKoZQBAgCgoAg0JgICEE0pl6lV4KAICAIVIyAEETFEIoAQUAQEAQaEwEhiMbUq/RKEBAEBIGKERCCqBhCESAICAKCQGMiIATRmHqVXgkCgoAgUDECQhAVQygCBAFBQBBoTASEIBpTr9IrQUAQEAQqRkAIomIIRYAgIAgIAo2JgBBEY+pVeiUICAKCQMUICEFUDKEIEAQEAUGgMREQgmhMvUqvBAFBQBCoGAEhiIohFAGCgCAgCDQmAkIQjalX6ZUgIAgIAhUjIARRMYQiQBAQBASBxkRACKIx9Sq9EgQEAUGgYgSEICqGUAQIAoKAINCYCAhBNKZepVeCgCAgCFSMgBBExRCKAEFAEBAEGhMBIYjG1Kv0ShAQBASBihEQgqgYQhEgCAgCgkBjIiAE0Zh6lV4JAoKAIFAxAkIQFUMoAgQBQUAQaEwE/j/HPA4l32L2BwAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-8"><g><path d="M 421 51 L 474.63 51" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 479.88 51 L 472.88 54.5 L 474.63 51 L 472.88 47.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-56"><g><path d="M 371 101 L 371 144.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 149.88 L 367.5 142.88 L 371 144.63 L 374.5 142.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-3"><g><rect x="321" y="1" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 51px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">MP1 hands off control to OS</div></div></div></foreignObject><image x="322" y="37" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXt8XFW1/3edmTStKK2iQHmo+LgqPi7XSptkkhIpTwUqrZO0QKHIVUBquSCICgreK+Bb4YIoyqO8mmRsecgFKVhjk0kmhf5EfhXRq4IChVb4tVBoHjNz1q97HunJmX1mzpnnmWSdv/LJ7Mfa37XP/p699lprE+QRBAQBQUAQEAQ0CJCgIggIAoKAICAI6BAQgpB5IQgIAoKAIKBFQAhCJoYgIAgIAoKAEITMAUFAEBAEBAH3CMgOwj1WUlIQEAQEgSmFgBDElFK3DFYQEAQEAfcICEG4x0pKCgKCgCAwpRAQgphS6pbBCgKCgCDgHoG6JIjmto5bwThDN0xmOi0W7b7TPQTOJZvmd36ATP41gNm2UttN8NFD/ZFN9trt7eE3jsbpXhCOLEGGrQy8AsZGw8BDidH4gxs33v1yCe05Vg2FlrSZZN4PYG8QVg329SyvRD/ZNtvaFs1OcLAPwLsz/3PEspJyTNW257WG5xighwG8OYPBX4OUaOvrW/vCVMXE7bhDofD7TIP+AyaOA+FgAIFM3a0AngPoisH+bvUupZ7m5vAMGDgZhPMA+jCAN2V+GgbwIkDrG4OjF/X23rPDrQzVLjfpCAJApDG47ZTe3t5EqWA2hzrPBvFPNO1UmiDsXSaIaE2c6OJHN3Q9W+q4svXnHBWeOW0E9wDUnvqfEES5oPVtO0IQ3lUzZ87nGhpn7LiEgcsBBB1aSDLTibFo94Pq98zHZQ+ADzn1SMDDr72h8eQn1t3+unepqlNj0hEEA88GkWzv71/zt1IgbG9fPn008XoEoBN8QBBZEbYT8bKBvsj/lDI2VVdN+mkzXrkO4M+NtyUEUSqsvq8vBOFdRU2hjnOIcJ1lx5DTCAEvweSjBwYij7e0nLyvaTSsI+Bf8/VGjJ8PRHs+612i6tWYdAShoCuHmSmPeUl1Ue0dhHVGbAPxiYN9kY0lTBNqbgtfBKarJ0x6IYgSIK2PqkIQ3vQ0p33pW6clEr8G6CO2mspCsQWgBMBqV/GaGQ8cOTS0equj5YHwMpheSe/WeSYDV8b6en7oTaLqlp6UBAHQXY3BrWeUYmbKY17yThCM9Y0NvLC3N/KaG/W2t39q1thY40GmwSsJONO+rWUgZo7FTyjyXIKaWzs+D+C7AGZMkEcIwo166rqMEIQ39WnwAjPu4ETDeUNDd76qa01zRroNhNMH+3rWqe9XbxLUtvRkIYidABoBTMvA+XTS4AUbN0SeLgbe1OFSAGsAOj5TPw7AsHxte9tBeCQIq8zN85e0wDTvBrCv5f9JZqyIRXt05yOOQ87YUi9l4DLtdlkIopjpUld1hCC8qaupteNEAu7L1ipkwtY5qRDjqoFoz6XeevZH6UlBEOqLmhhjIMzPwJokotMG+rq7ioG5paXzQ2ykvJeyi7JaoNVBbtbzo2oEoeRvaetYzIzbJ3zxeySd5ubwWxCgnwL4tCMmQhDFTJe6qiME4U1ddoIA8FuDR06MRu9TH6U5j44gmGlxLNq91lvP/ig9KQgCjPUAHgSlzCaZp3gzU3Nrx3lA6lBKPduZ8VUiXFUrgtDsaJRcL7BBC2Ibuv9YYCpRc2vnJwG+GcDbbGXHLLsu8WLyxztZUSmEILzBm0MQBT7MtAQBnBTr7/mlt579UXryEITBXwHTPZaYhaLMTLmLMT9oMq4yiNQ2syY7CDVVdGciXGDizWsLH2ow3ai87mwmpSQIP2PGi7snwBV7OFXiIPzxWlZOCiEIb9gKQXjDyxelcw6BGOvHGgKd0xKJWyxuqUWZmTTmpRUmOGYLLqqqiUmBrtnq5vXW0gSkZXW3E6AVg/3dtze1dVxMjG/7iSBSciNwGkwsBdH7LWY1FYy0noGbD57Nv4lEIskiJiM1tS95BxLJRQboOAYOBbC/hTxVm4o0n2Tm7mQ8cY8bRwDNV+OE4LO5c0/ex5gWPINApwD4F0vA1FaA/w+DbogPz/rVpk03qrMuz0+q/4RxPGCeCaK5YOyTaeQ5EO43TP5RNBr5k/pfsQQRDocDz23FoTCNswA+BsA7LbrZCfA/AOMRBq0+eHbysSL143ns2QqHz19ycJCTS8DUacNYBaU9A9A6GOZNB+2HJ/PJlue9ySebCpid40H4ugkOnTQ7COUlNBo3TrUGtjFwS6y/5ywvngM289I2MmlB0jAbfUkQhEtifT3f0U1MzURXB9v3JAPGBdlgu6a2ji/5hSBmBPGnkQRdSYDysHIKRsoMlZ8i0zh9YKD7UZcvJbXM72hjk78P0Mdc1lHFEsS4IZlouMzJY0UVciKIXbve+tK06TvOB+HrFlJw6v6fAH1msL9bxbi48nRJLdovUMfune01GvOhtZ8EmH4I07zcDOBQr5HUzW3hI8CkTK6OQV8TB8VPkYHzBjZEfuN2LB50MqFoZpf8AwBH5YtTsFTaTAaf7ySbEMRETUwqghgxjYMnpMYg/AUJbh8cjDzvZgLqzEtIYrHmpfLHDiKPiWniRNe/sH4hCBA+A+YvAzTPjZ4yZXYy0/JCh3+pgMA37LgKjAtcLiA5IjDwe8NMLBwYWPt3nXw6gkAyeQICgW8AUAu42yfB4Eti/RHlG5+XJFSfIwm6loDT3Y5LBWYlwats5tJ8qTaoKRTuIKJbclyiC48oScA3R4dnXVnszqhAF+psbRnAiriyKSwKS5UukSDgG6PDs75tl00IYhITBLBXwhb9nGTmRbFoZNxNLd8M0pmXBvt7rtdsy6tPEPavfeB1k/m4oWikP88O4hZm3HzwAbxGt632CUEo77OdFrOIGs4wCI+D6f+CWLkvh8A4xL4QMvBnBI1jY71dzzjoVR8QqAqngpb4T4CxOVVX9cM8D6D36HYwanGd1rDtXF1sjYYgngf4CYub9MQxAQEGf4yAD2r6Kmh+0EbB7wFgGwPrCfQqgQ/h9PnT+AJKhPuYU95+szJVHAmiuS08F0zqcNXqYq2qKXPfAED/TEOZ20+m7TFmPj0WjXS7XbldlnPWK5Bg4A8EegxAEsQfBuMwDcElCXTj6PDM860koWKQRhONlwB4S1oWfgeAYy1ybWHgIQJZzIHmMGCkYooY3EDp8gdY6jwEkPXjYpiD9KM889YlDJUvNql2ECoQzX6Y68XMpDMvDQx0b641QWRyJj2w247aMj4lPO6OdFPJJwRhFW0niP8LCVw3OBhRtuPxJ21KwCq7iUh9CQ709+w5aLfWyU1Mp5azv5lsnvP2A7BeR5rz5p26t9GQ+ArAF9kW75S5Uc0HO5YFEjTuJODKaUG+3h4oqezmDaZ5EwNHT2iT6LrBvu6VTruIlraO05hxq40wtwB81kGz8bB1XKnYlzfs6GTGtRYnC2t3WoLIpJq5E6BF2cKKkMkwlgxu6HrcLluKtKZvPx1Eavdj+aLngbHp+MSmRyLpCOIyPE2h8ElEpFzYrYGeCYC+Z8aDV9vNgSnLQBArwPQ1224jyeAvxfojykSlfeSQugwKq3YTukPqbKRyTooMlwupxrx0f2Nwr3Bv760jtSYIfS6Y4t14s/ryGUEUTCHS0rLoHWwEH0D6cDn9OLsdUnNb57VgXmGZn/9AkhcMDkb+UmDOUlNb+Axi+rl1EXZK4ZKHILbBME4e3NA14NSfIv/GEYpMIIk8c9Yhz8+TSPLCfOPKsxvQEoQm1YwjQVrH1hQKdxLRbRb36TEGPhnr73mkHOuEw/hd5Shrnr/k32DyLwB+l0WWLQYnj4lG1/xBJ58QRDm0VuU28hGEJsmeKzNTjnmJ6ZzBaLcKLNN5flTFxJQ6hNyC5SD6b9vX0rDJvHAoGlFpm4t+fEQQrk0Rtl2eGvvTZjAxf6h37XNWIBxy6KxQJkM3gLW2nvLmJOIPWs9FmHBprK9HxcNMeBwIwnW0e3Nr+DiA7rUsqjvIpGN0h/AtbZ1LmPkOC3G9arBxQjTapVKo5300i7cqryeIUOfxRKzMS+mU1i4DM3UxO+WMJNaMP0mE5QN9PQqTgs+E9PbZ0oQfDfb1XKjbsQlBFITUfwXyEYSSNidmoMCWXdWxLZYTgtCquYNQBLcrMfz2IHAUw/wCkHL1nPAos1l8eNbZpR7++Ycg3JshWlo6D2eDVU6brA1dS9YqcpwC9HkGPgrgMBCSAU4e6yXLb848c4g01xKEy52rUmxr6+J3JRDoJaTuGMisx7nBVe3t7cHRxL53AQjvmRDud5IOAZd6grClmAB4KICG4/v779peaEVoauv4KjGUiUzdkfA4g+8tR6CYPsMyP6gcSewmyTwy5u4s8+hKCKKQtn34eyGC0GyPN5vxwFEq06JuOLkvOI+bl1T5kgmivBg+SWbiE04eNV668gtBePnC1HiZOH5te8FCV7YkgvBwL4l2t6LxUGtuDh+IIPWCoQ7S00Ti8YKsplDnqUSpHUj20RKEhogr7ZVUUF1aIvU4/tT7HAq3GkS/2p2wcq9Mp45mMCGIgmrxX4FCBKH50hgDeOFgf0RNipwnhwAs5iU/EYRyt6Qkf9qFDd2V0vxCEF5y1ejcEAtFlLsCQ1OoFIJwMke5+0BJ+bjmpGfQLGxu062Md9vSEj4MBj3MwFsz/9QShNYxIk1JT4FwMwcCkVhvl/LMcRWzUawOrPWa7GYv9+lmJnSvMz+yQ0yREEQ5NFflNgoRhBLHi5nJulDqsjX6YAexA4wrYfL1HrbSBbXiG4LwkKumwgRBodCS2WyYRzLjVABHTDj78WBiYsKFbnP9u83f0xTqXETEa7KKVV5FHA/Md9oZaz+G5i3djxqSGygd0a0eRzdXB2+pPc2mXIWxjoHV8em8oZyeSjrZc9PN8BNjweCCTb2rXyo42S0FtKY6B90KQXhB1idl3RCEWzOT5uXMubK0ygSRuYiEnwPjEQOBX46M7P37Us8bdKrzCUEU9Pu3yl4OglA635UwDggQHwrwe5npowQcDkD5vDtHcXshCA+k55og7LEwLg+OrfgVSgtiLZtOD//KNZy+dTB7/7LTKpDM7C5WJSjQVc6rcbMd5szXIsafbcvt7lAIwieLvhcx3BCEzsykc7ezL/46m27JBFHCRPaCi9eyU4ggaF5b+APEdBERTrIF5bmHbYoRhALGQzoPO46bieiyA/c37y9XXqaKEoTDOyoE4f718E1JNwShMzMxcG2sv+d860AKmZdUWSGI8qleswOo6A5CBaMFkuYPifApF1/B1oFuSwUJW1OkT0GCyAKidhMNM3YcR4CKMlYpUQrky0rVTDJw2/Qgr3R7m2K+mVZRgnBwKhCCKN+7X7WW3BJErplpos3SjXlJCKK8aq0mQTgERukGpLKq/pVA/SZTb7zBeFTZtd2aIdyaiJyQdFu/HAukFxOTs7zLp48kdrUSYTEYJwA4KN8sIdAN04JbV5ZyBbBqvxzjFxOTt/d50qXasA6/kJnJtjNwTA8uOwhvkypf6WoRRMYL5x6A1E2A1ud5Bu4AjF/Fg7R5U+/ql508cXxHEDmxCc4HzE460LjU5kvW50rxaa8g8+O7U35/JudgP92CqyjsQp2V75B6+XRbzjaoXFsD0Z7P2mWQHUQhrfjwd7c7CCW6JjfTuJnJ9kXieMGQEET5JkG1CEIXcasuSUKCL3TrCeY3gnAbJJhPW5pYgpIJwtqfymVFwfj1RDjN+n8vXl1O8je1dhy125lApUNP3T1PwEsw+eiBgYjKDeX6mZfryQUn+YQgXMPqn4JeCEJjZkpFhAaD8fhonO4F4cj0yJwjUoUgyqf7ahCEzo1R3VtujsVPcHMBkBptKHTSm0yarlJNKFfXzBTR37jn1kTkhKLb+rqFDbaYnUKayiS6U/cjZ72ScggiE3Gt0n8sUJYdBvZl5lOcMgfb+0ztKJLJB8Apz7C82BWS1/q7BMp5Qas8ZSe1iUlBpDEzpdJkgzBsuTgl7+1zQhDlmWyqlWoQhENkco6DQr5RaVK/O97Z7XaBL5Ug1FdMU2vHTQScOd6WBw+5NHHutwpgdbNd9skhCO148lxOpRuX292Xl5mlTbXhYfzp9aA9OBbf9wYm/LuFvBzvjZEdhBcN+aSslx2EEtluZlKpHUwDr1huU8t7f7UQRPkUXw2C0EXKOtmYdSNLp8h+5XZmVtdX7nlq7MWUmsu5if1cJ6vTJqpzCJRrCXVcyYSvZgfvZQfmQDDaRIdeZ1ZlkvU5p1cXgvCqIR+U90oQmqA5dYesejL3yOZPeCYEUT6lV4MgPnLMsr3euGv0bmsKbReXC6UGmb47gH4AhjqwnBAcxow7YtEedYPbhPQSVdxBpOUzsBpECy1aUZcEnZovpXZzc/g9CKQyxu5JlZ5uwCEXU7iZjVS+or0tDHnj2PDMFYWCNjVZY/NebuVldjmk+3Z1u6CDV5tKyX7c4Iau3+nkEILwoh2flPVKEPoskOODKZgOXAiifIqvBkEoaVtaO65g4PKJkvNjJuGMob7Ik/YRZa4m/SSYr9Zl0E2VdzBnVJMgUiSmv+lNe9lSOtBNXUhEN9luOctCoM/FpN9Fpe41NwN88cYNkaftGCryogCtzOC+5zIf5nthYqlb54BCs83pwiCn+8PTchlnMvhb9guDQPyVwb7I95w82YQgCmnDh797JYjUSxXqPBvEP8kZjou0zEIQ5ZsE1SKIzBfzrwG83SZ9EoxnmRBT13KmdglsfhSUSqtuvaEsCXWlKfBuyz0NvzV45MRo9L6d1jarTRApAmzrWMyM2zVXaaob7GIMehrgt4Ewv0DkuKMXUyi0+IMmBVRqdev1map7txiqsgUvgipidpXjylF1pFNwRyQEUYR2al2lGILQmJkyH4W4Jdbfc1a+rJRCEOXTeLUIQkmcSW6nrub0eqn9Tia6FEDUYH7IkvlUezlRLQgidWCdvvXuxxqScFKYOq/oYuZDLNfX5nVzzbiW3qm5l9rNpNhCBi8b2BBZ76awxzLU0tp5rmZX4KYZdW/19+PDs75W0Fxmjz0pcChe6lxwI3w1y0x6L6YsmA5mpoLmJVVfCKJ8U7KaBJExxxwBxk8czUYTh6YWjtuThnG5SjanOezWzpdSF4VS6qu7uolJeTY1FdBSivTijeZt04Zp7R737sLBdk3tS95JieQtALW5TFeSIKI1caKLK5G0zzrO9F3ldKP6HnAjmzpsD7DxhWi06zE3s1p2EG5Q8lmZYnYQWjOTC/OSEER5lV9tglDSW/IILVPWGQD7WxaTrQQ8wczdyXjiHlucRM7tY0TUPbpr5jLrl2cpC7ySr9T66pzh2RepiRjnAZhvGd8wmJ9iwh3mWGKVGlspqTZSea3MZJiAxQAp09t+ltmRTVfSnYwHur2kIC/DDKO5rZ3vDTKfwYRjkU5lnt01KnPgn4nxUIJo1cb+7v/1coeFEEQZtCNNCAKCgCAgCEw+BOrSxDT51CAjEgQEAUHAfwgIQfhPJyKRICAICAK+QEAIwhdqECEEAUFAEPAfAkIQ/tOJSCQICAKCgC8QEILwhRpECEFAEBAE/IeAEIT/dCISCQKCgCDgCwSEIHyhBhFCEBAEBAH/ISAE4T+diESCgCAgCPgCASEIX6hBhBAEBAFBwH8ICEH4TycikSAgCAgCvkBACMIXahAhBAFBQBDwHwJCEP7TiUgkCAgCgoAvEBCC8IUaRAhBQBAQBPyHgBCE/3QiEgkCgoAg4AsEhCB8oQYRQhAQBAQB/yEgBOE/nYhEgoAgIAj4AgEhCF+oQYQQBAQBQcB/CAhB+E8nIpEgIAgIAr5AQAjCF2oQIQQBQUAQ8B8CQhD+04lIJAgIAoKALxAQgvCFGkSIWiHQ3NZxKxhnjPdPWDXY17O8VvJMpn7nzPlcw/Tpr8xhQpjBR4DwTjD2sWD9MphfBIxHGHz/9OAb+nt7bx0pNwbhcDjwjxdwWIBpEROOzZED2MnAC2BsNAw8lBwLPDw0tHprueWox/aEIOpRazWSubk5PAMGzgJhbLA/cmONxChrt9UkiMmIn04ZbW2LZsc5+EUCzgIwy4PChgG6K2mYV27cEHnaQz1t0T1409cBvM1De0kAMYbxxVh/10YA7KHupCoqBDGp1FmZwagvsOdfNE5gNr8F0PuZcEmsr+c7lemtuq1WgyAmM35WbaUW5CBWgOlrAN5UgiYTDPx4epAv7e2NvFZMOy0tnYezYd6m5msx9TN1kiDcYY41rBwauvPVEtqp26pCEHWruuoJ3tzWcQMY52R7FILwhv1kxi+LxOHt4f0bEnQbA0fnQScBYAtACYDV2rM/gBl5ym9mgzpiG7r/6AXxptaOowi4E8C+XurlKfsIktw5OBj5f2Vqr26aEYKoG1XVTlD7V7YQhDddTGb8FBItLYveYRrBewn4Vw0yz4P4GsMM3BmNdr1gN9fMaV/61oZk4gRiXOLwtb/FYGNhNNr1mBvUlSxsBB8AcKit/I7dZw+3Arx6LBD826be1S9nZWlvXz59V2L47UHgKIb5BZ0cDNwSH5519qZNN8bdyDFZyghBTBZNVnAck3mBq4aJaTLjN+eo8MzGEYpodg7bibBydNesbjeLqjLDPfeC2n3QTQAOsE3nJ8lMfGJgYO3fC0xzam7r+AEY/2Etx4w7ONFwnkszETWFwicS0a0A3mxpZ4yZlsai3Wsr+Kr5rmkhCN+pxH8CTeYFTgii+PmmvJSmzXjlOoA/N7EVHuJgYEmst+sZr60rU1UwgdUAtVvrEvDw6HQOb3ok8opTm3Pnhw8JmPRrAIdkyxBR9+iumcvckJS13Zb54SPZpLsB7L3n//wgklg8OBgZ9jquei0vBFGvmqui3EIQpYE9WfFrCoVPIqKuiecI3JsIYumjvZEXi0XNYVeSZPCXYv2RHzi12xTqPJ6IfwkgkCmzjUxaMDDQvbkIWXS7ke0m+Oih/simItqryypCEHWptuoKPVkXOIWi7CCKm0tqEZ82ggcAarG0sA2Gcdzghq7fFdfqnlqh0OIPmhRYZzM3PZ00eIGTC2xTW8eXiPFtyxf/UAANx/f337W9GHlaWsLNbNCvrLsIZlo8lcxMU54g5s49eZ9AQ/BTRNTJwEcA7JeZTGob+czuF2CdSeaNQ30R5Unh2R862z4InwHR+yyBQsqj4+/E1JeEeVNi5M1DbrfBys88wcE+AO9OycpY39jAC7MugYfPX3JwwOSzCbwIwDszX3jKt1t91W1gwvUH78+xSCSi/pfztLeH3zgap3tBONLVi2ULLssj3+st8zva2MRVAOYBCAJQAUnrC8mk5FDjCnJyCZg6AfyLxZVyXFcwzJsO2g9POo3NPp5KEESp+DlhXonxu9KvplBza/g4gO4FMC37c7mdF3IX/FRPKwb7e67Xya0p/1uDR06MRu/bWcw41QH6tETi1yA6EEwp0xbBvGKgP3J7Me3VY50pSxDz5p26NzXE/4uAzxZwtVN6TQLcx8HAmW7tqulF2vwGAcsyC2Gh+fE8gIsPms09hRY3pwVYdTCSoCsJ+HzhPvkpMo3TBwa6H7ULVuoCp5MvGY93BBobvgzGBRYTgLXrMQY+GevvecQuz7y28KEGkzItHOVQ115lMxl8/sCGyG8KkXo9EEQlx19oUuo/INqDo4n9VgF8iuX3vF/3xfSTIkTTfAjAB/bU54Gx6fiE7iyiqa3jAmJYTVClmJiKEXnS1ZmSBJHZOv5C4y1RSMHbGDhVt4hZK2YOuNRXht0bo1D7SQZumx7klfkChHQLMMzkeQgYtwKkvszdPjuZabl9y1x2ggD3Mowogb/svMBrX3xqbu1cBvB1RQReJQj4xujwrG/n25n5nCAqPn63E8VaTncYrCKgG4Nbz+jt7VU747I9Ta0d1xCw0tLgq2TycQMDkcGcD4lQuNWglElorz2/0Y1jwzNXuN2dl03wSdLQlCOI5rbwXDCpgyx7EI2KmlTh/VEwjRL4EAaaNAtTXjtrS1vHYmYocrAHALltX4UQPRBv5FOcPDbsBMHA7wkYBTDXMi93UipdAD0N4kYAIXDKuyN7gJct+seEYRz76IauZ7P/UH7ho/FdK0D03vT/zI8D2b9T/9gI0OPjfbG5cTAaUe6JqSeHwAC1xVcypM0RjGdg4LfpppHO0ZNrOqDmtvBFYLpaI7OKtP0DgZRvfBLEHwbjMB3mBLpxdHjm+U4LRGUIojT8MjBWZfzFrGOZQLT/sZqXwHTOYLT7p8W0l6+Ol74czkWSzLjHDPDF5UjfUe7x+b29KUUQ89oXHWQkgg8C+JBFMY4TKPUlnaTLM37Vyl6efpjvhYmldne35vlL/g2mqb5grOSTJEJXAImL+/rWqkCh8Sft+00du/2tr7HniiHQDdOCW1fqvsg0C7C12X8y0wXxkZk99kVxXtvSjxicVBGm1vGruo52XfWj10PqPPINE/DlA2fz9RYzGrW2Lj4kmTR2WCNV9R4ySAD0PTMevNru054nzUNe75dKEIT9pfeKn6pfrfEXs0BpTDmvm8zHDUUj/cW0l69Oa+vidyUQ6CXg4PFyeRIqNoXCnUR02wTySldUZuLfgfETMxG8X5LxudPUlCKIltaOKxi43EoOYL7soAPw3Tx2f92X3NjuybZwsD+iyCD1ZBKDrQbRQkv76vD04sH+nh/ns4WniSvwC5t5aNhkXjgUjTxsV2WeBfhJJHnh4GDkL07q10aa2g65S13gHOVj/spgNKK8TPIe9re0nLyvaTSss0XmbifiZQN9EfXl6vikSZp/AfC7LIW2GJw8Jhpd84dCY0MFsrl6JYhqjt/dMjGxVEuo42dM+Pfsfxl4Nohke3//mr8V016+OqHQSW8yabra8R8xXi7PfFWxGY0zXrmG07EZ9t3ynq4ILyvnDiLcR6axXhflXe6x1GN7U4Yg5s1bup/RkFQHoONfz27D5/U2+Yk2V61LHPDNWH+PyiRZ0PupuTn8HgRSQT5v3/Mi6HcqDguwI6HkLIqtHecBUHb97PO0GUzMH+pgUR4PAAAL/UlEQVRd+5xuEntd4Bzkc32I2dLWuYSZ77C84GoXtnygr0f9r+ATCi1pM8m8f0KQE+FHg309F9p14ccdRDXHXxBMW4H2dnVAve9dAMKWn/4apESbfYfstW1d+ZS5M/F6BKAT9vzOT4wFgws29a5+SVcnTRI7Lsl8DO7Z+ecXKG2SZe6WHYaVR8uhxTpoI7NlV2Hy2a8KT0Ev9peWgT9zPDA/u1XVHKZtNuOBo7xsZZtzF26tjNoFuMAuwKqiebmHeXmxKBNBRBqD204pdIjpsCB4jWCl5rbOa8G8YnzchL8gwe2DgxHlLTb++I0gqj1+r6+u9mPJw9zz2p8qn6MjwBUhZby/VFp6dZbovJvQCsVPAcbVSJqRqRQ5bYdiyuwgchdwb2HzyhaahKHMG0kQPUZAlBO8RtnNW1tPeXMS8QetJiJiXDUQ7bnUywuhtbdqDv90BOGlP00/FScIJlwa6+tR8Q95Hx0GzHRaLNqtzk5cPxoS1LrR+o0gqj1+14BmCtYTQWREprmtne8NEJ8LhnLL9Zrh9Z/M/IVYNNLjxhLgFU+/l58SBPGRY5bt9cZdo3dbE4q5XbDcKLClJXwYDHqYgbdmyjv69OdrT/v1qLGJ6wjCS4Snpn7lCQI4Kdbfo2zJeR9NuoQX2KAFXlM+jwc5gVTwY+rRBXL5jSCqPf5C+rD/XocEYR0ChUJLZptIHp8JjNV5KeogUR6IP0OCL5xqu4kpQRClLqiFXiKNK15Ri5rqpyXUcSUTvjrep2b7rhlPkplOjEW7lYdWwacGBOHay6U51Hk2iH+yZxD57c1Og9XayjVk6zeCqPb4C04WWwHdxxaAkiKW83805Z552M27XsdgKZ8iDDbMI5lV0B+15o+3mXoxFVOCIDLurRssWR49LaiFJmBTa8eJBNxnKefKRqprNyddgDuC8HSeUgOCcC2fm/EX0kf2dzeLv5sybvtzKuflDKfa4y9mbMWeCRTTl1cvpmL6yNZJuZ1vxaFg+iIYyv3cHss0xsynx6KR7lL6qae6U4MgWsNzDJByF83md3e9YLlRZkUJQnMg53WBt4/Ba30vC5zqy2v7VvkqukBqyHZKEUSZDpPtOiLgJZh89MBAZE/wpJsXx0UZzccd1P0OsWjP6ZU8E1BpPhpM86bcey6cU324GE7dFRGCKIPKKksQnJORspQFuJgFfNIQBJDjSTWlCEIz/mKmvy66uRhHAjd9a/siXBjr6/mhm/qllHFIO+7aXFpK336pOyUIQuMZIiYmazZYoNKH1K53bBXdQdTBGUS1x1/MQtTcHD4QQeoF4z176lcmF1POmRyQs0Ar546RxOtfJGApYMwA+G1M+JYbr7lC49fFN1UqrUghWWrx+5QgiHo6pLa746qbtF57Q+PJT6y7/fXsBJnMO4jyHdLmBlgR4+cD0R6VvXf88dsOotrjL2bRSTsA5GRz/QeSvCBfFL/XvnTBrUCuiUcfO5K7W/Tavyrv1huumLbroc6UIAjdQVcxbq5NrR03E3AMwH/nVLI647ZYf9dQ0/zOD5DJKgp6dkbpRe1QdB4iukVtMhOE3aRQrH1bLS7UkNxA6XsjUg9rTBN+I4hqj7/YRUp7H4SHzAFu+m0KdZxDlIr4Hw9yc7pzIifOySEw0k2/1jIOa8clsb6e73htqx7LTwmCUIqx548B+P7G4F7h3t5bR9woThcMl7W7+iBQzrUJR43VK8FU8wyi2oFifiOIao/fzdzXlUnlHgtgDUDHW37fzjCOVx9NxbabradNPQM4pmvRpSdhxopYtMfiMu1dKgd9TJlb5aYOQeTm9/F0mYjGFjkh1kGTaiMnjXah6am5Ecttqo1JQxBac4FH7xtlAhmL73uDNaEc6jnVRgXHX2hO5vt9Xih8tEGpW+Ws7qBPkpn4xMDA2r8X27bD4bA20DHbh+6OChUvgaBxrNtLvnTyanYxRcc4FYtHLetNGYJwmEC3xIdnnV3oMhHtgoOJqToqkqzP1kd2onjdAdgnmNf61dxBpHZ7FUnWR9cN9nWri2cmJE702w6i2uMvZfFJvReJ/a5l8LnWdlILs0Gf8hr9rto4vD28fzCB1QC1W9tUZ3Gj0znsdEeKug20qbXjPwm4bOKYeIiDgSXFkIT+7pjKHMaXoodK1p0yBJF68XTpvom/MtgX+V4en2pqCoU7iOgWy5fSGDMttd7E5pDuO8HgS2L9EeWS55jR1SndNzMviUUj1gC81FzwusCXmyAK5X0qVT6HdNfa2+/sY3NI9+14yVMtCKIQftUcf6mLi9PXPoDtKlniQQegu9AVuhkZqGV++ONsGj+zpWpXP28D8YmDfZGNeXc0+vteVJV/7r6N98tI8mo3qTJSAXNb0Akidf6RjZ1S7bxqsHFCNNql7oOfEs/UIgj9PQPqms+fmmPxr2/cePfLVq2rvDMjCVLuc+qrpOgLg9SNVsmAcYH11jbVj5qIz24xFhLxf+deT+oc1l/qAuy1vsZ8lvfeCa/t67f24ZOIqMtmvkgQ44ZkouEy3YVBFDDOZPC3bOkS1I1zjh8B1SAIr/gpPJwuDCr3+Muxyjl84GSb/iuYvmuAfqm7cyHtJaRuLDS/BNDHNPIocj95cEPXgBtZM4f8KmvzmzTld4DQxYw18WDg8U29q9X7nvpwa2//1KyxscaDTINPI+A0AAfa6uedR25kq8cyU4oglILyXDnq9hrLvDbWclw5Wmg7XeoC7LV+rutlaqqru4e3AKQW7Q3TGmacmz3w99q+w4tTjis3leUh753E1SAIr/hlv6jLcOVqwfGXa9Fqbg6/BQFSV45+Ok+b43MmXYbfVuCu8c1sUIdXU1VTqHMREd9axD3mTqInC11dWy4c/dbOlCOIFEnobx0rqBu3ttVMRs5V9mtEC3aQuhYRPWPT+dw8ttaqm5hCocUfNCmwLneXkx3RxGjvMhGEapxaWjvP1ewKXEAJRfjfjw/P+lq+M6ZqEIRX/CyDq/j43QDptky+K3TdtpEp57hTdNtOnut13TaRLbcTjP8cG5l1TaGzSq8N10P5KUkQSjHz5p26dyAY/yYT1AFboVunPE/YVC4X5u8y82IX7auvqaeIcNFAX+SBQjlmSl2Ai6hPTW3hM4hJXZ1qT2Cm9ugTrpwsov2874rXi18YiAXY+EI02vVYoZewGgSROkD1gJ9d5kqOvxA+xfyeerca4isZ+CKAWR7aUFf03hykxJXluJ1O3SzXMH37IiJcAdD7Pcihig4DdFfCoG/YTcMe26nr4lOWILJamzv35H2C0xpOZbA6lHofGPtkfhsGq1ul8MtEIPDzYieJIoqAmQwTsDinfeCZ3R/J6xi0+uDZycdcHuZVfQeRxSqzUKnzmCMB7GeZ+RPSH5SbIDL9pC5+CTKfwYRjkQ6Ay9qZdwL4MzEeShCt2tjf/b+FSDYre5UIItWdW/wcVpSKjL+Sq5faUfzjBRwWYFqU0dlBE+ZN+l7oZwj0W2JERkZmbqrQVzo1tS95B8X5WCZW3lEfpXRQq/WcYhjg54iNqEm8dnqQf9PbG3mtkvjUQ9tTniDqQUkioyAgCAgCtUBACKIWqEufgoAgIAjUAQJCEHWgJBFREBAEBIFaICAEUQvUpU9BQBAQBOoAASGIOlCSiCgICAKCQC0QEIKoBerSpyAgCAgCdYCAEEQdKElEFAQEAUGgFggIQdQCdelTEBAEBIE6QEAIog6UJCIKAoKAIFALBIQgaoG69CkICAKCQB0gIARRB0oSEQUBQUAQqAUCQhC1QF36FAQEAUGgDhAQgqgDJYmIgoAgIAjUAgEhiFqgLn0KAoKAIFAHCAhB1IGSRERBQBAQBGqBgBBELVCXPgUBQUAQqAMEhCDqQEkioiAgCAgCtUBACKIWqEufgoAgIAjUAQJCEHWgJBFREBAEBIFaICAEUQvUpU9BQBAQBOoAASGIOlCSiCgICAKCQC0QEIKoBerSpyAgCAgCdYCAEEQdKElEFAQEAUGgFggIQdQCdelTEBAEBIE6QEAIog6UJCIKAoKAIFALBIQgaoG69CkICAKCQB0gIARRB0oSEQUBQUAQqAUCQhC1QF36FAQEAUGgDhD4/9vTzY6FRhb6AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-4"><g><rect x="481" y="21" width="100" height="60" rx="3.6" ry="3.6" fill="#f2cc8f" stroke="#e07a5f" pointer-events="all"/><path d="M 495 21 L 495 81 M 567 21 L 567 81" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 70px; height: 1px; padding-top: 51px; margin-left: 496px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><span style="background-color: rgb(232, 205, 151);"><span style="color: rgb(0, 0, 0); font-family: Helvetica; font-size: 12px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; white-space: normal; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;">OS Moves one core out of ACPI C3</span><br /></span></div></div></div></foreignObject><image x="496" y="30" width="70" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAARgAAAC4CAYAAAA45fPWAAAAAXNSR0IArs4c6QAAIABJREFUeF7tXQd4FUXXPioqvYkioCBNekep0qW3CNKlhYRACBAgdD8R6QmEFgiELkUQpApIEUFAkCpFqgiIFOnVDv//ru5ld+7s7uy9d3MNd+Z5vseP3Nkp78y8c86ZM2eeokROl/dNfZTIVcrqJAISgX8ReLlUl6cSE4xErQwdkwSTmMMr65II6BGQBCNnhERAIuAYApJgHINWFiwRkAhIgpFzQCIgEXAMAUkwjkErC5YISAQkwcg5IBGQCDiGgCQYx6CVBUsEJAKSYOQckAhIBBxDQBIMA+3fDx/SmXO/0Iov9tKWb76nM+ev0K07D1y5nk32DGXNnIHKlMhD9auXoLIl81La1CkcGaCLV27Spu1HaPWm/fT9qQt0/eY9XT2ZM6Wj13NloSplC1DNSkUpV46X6Jmnn3akLWyh3T+YS0vW7OLWFTesAzWp86ZP2nHqx8vUIDiGbt2+rysvRfLnaOWM3lS0QHaf1CMLcQYBSTD/4nr3/m80b+k2mvLxRreFbAV9tfKFaHD3ICqQJys99ZR3voSPHj2i3QdO0+CYJXTkxAWrqnW/v5AhNQ2OCKKg2m9Q8ueftfWt3cxmBAPijR/ZiZI94z3ZzVv2NfUdsdCteZJg7I6Yf/IHPMFAYlm1YZ8yiUEy3qS6VYvTqAEt6aUX0npUzK079ylq+EJFYvEm5XntZZo+qhMVzJvNm2JMvzUjGPR/9ewoypEtk1f1//7Hn9Qxajpt3n5EEoxXSPrv44AmmAe//k4DRi+mxau/MR0BSAbPP/ePRHD1+h3686+/DfNnTJ9aWdwV38hna1R/uX6HQvsl0K4Dp219Z5Q5XZqUNH9COL1RLJdPymMLMSMY5PWFmmSkHqF8KcE4Mqw+LzRgCebOvV8pbMBM+nLnUTdQoV60f7cytX+3Er2a9QU3u8b9B7/T13uO0/iZ6+jg0XNu38NOEz8ymOpVKyE0YCCsAaM+ofnLt7vlh/qFdhTJn50yZUxDKBsJkte1G3fpu+/P0ZxPt3H7AQliaXxPpQ++TlYE07hWaZr8UQev1CQj9UgSjK9H07nyApJgjBb0008/Rf27NqROLapSyhTPW6IOe8nRkz9T2MCZdPrsZV1+SBCfxEVQiUKvWZazdfcxatltEj18+PjiN8hh9tgwYTXnp4vXqVO/BIVwtKlV4wo0ZmArrxY6rwMswYD4/vr7IQETpCwvpVeMsNk9VJN++/1P6tA7XjG0I8G2pZYtCcZySv1nMgQcwWCSTpm3kT6auFw3CDgZmhXdmYoXymF7cKBqwYazdO23um9zZX+JlkzpQa9kyWhYJhZlt/dnK6dWasLp0IoZvSnnqy/aagtOmVp1n6wjGRDdypm9KX/urLbKssrMEgxwey5ZMvr2ux9cn8aPCCZIMp6k4z9cpEbBY+n23X9O8GpXKUZbdx2jX3/7Q/m3VJE8QTXxvwk4gjl84idq2nm8a+ICckgLsFfkzfmyxyNgJBW1a1qJhvdtbihBXL1xh+q3j6ZzP19z1T2ib3Pq2LyKR23hSUM4WerWvqZH5Rl9xBJMuVJ56e2KRWjohM9cn3ijJs1a/BUNHLPYRSZDIpvQkNhlkmB8OorOFxZQBMOTFqAWzY4Jo1qVi3qNNmwiTcLG04kfLrrKeu7ZZPTZ9EgqXZRvbD107Dw16jTWtXBwtLtseiSVKZ7Ho/bAttQifBLtP/Kj6/t3ar+hGF29PULXNohHMP/r8Q61jJjs8lnxVE1i1aOq5QpSz051lH5JCcajaeG3jwKKYFixG6j72kbx+ZcHqFPfBJ29oEOzygSphLfAWYKBgXlFQm+PVDV1Fg2KXkKzl3xFULVQJ060oge1cp2E+WK28Qhm5phQivhgru5Y2RM1iR0nYAeC1hKxXRUJRnHYpz5Z9Q1t3nGUfr58wwUD7G1QIYNql1ZUuhczmrsZ8Ei8R8faNCC8kS1oseGFDZhBazYfcH0X3KIqDY9qZlqOWV/Sp02pHAg0b1CO6lYtJmRL5FWGPm7Ydlg5YT18/LzO2RSnqgXzvkJBtUpTnarFKUO6VIbtDSiCiZm2hmKmfy4sXdiaLf9mxglTm55x9M2+U67PzXbyoycvKAvnnsYHxxsVyZM2e/INj2Dmjw+nZeu+1TnGYaKP/+A9W9KTVj1SbUh//PGXRwSDxfj55gP04fjPdKRi1ucGNUrS0F5NKUvmDIbZ2LlUrGAOWhLXndKlTSkMJ9TiBh2iCS4KSFbSLuyHO/aepD7DFtDZC1ct64HhHYcWIC1Rx0vYE0fGraKZi7foDh3MKoM6D3JNkyq5W7aAIRjerlOycE7lpMfXrv6841Wjnfz2nQfULHyizjArYhy2nF0OZzAiGFxv0Lr2w8C9elaUcqokknjqEU7TTp65ZJtgRP2ceO2y8mdipSxPVFt2npjNR+AyMm4lTVuwWQRGXR6Q35yYzqaEiQ/g6BkclUA79p6wXYeRHTNgCIbntCUijtpGmojs1sXuhqjztVdepNEDWyrqTWLdL7LTVyOCSZbsaTfv27njugjbuHjqEXZIVpW0UpGwoQRHTaevvz3u1i3s7LhDhkXxx59/0c59J+nCpccqk/qBmT8TS4T4xs584nkpGxnjzfykoN6VKpJT6Quktf1HzupsgGpfrDYtM9eNfLmyKio75qEZXmVL5KF547vqNuyAIZgvdxxVjnC1yRfeprxFyZNKcMoCFSJVSnf/GviwNA0brztJUsuFvtuiYXlq0aBcol5mtCIbI4JB/9id2Y6axFOPYB+xQzCwbQwas5jmLt2m6wakEtiicOTNkvalKzcJtqu1Ww7qvoGKtnRaTyqS71U3SNh+4uLpioRehHqsErsJGbkTGLlVmG1AN2/fp9FTVtEcpv+VyxZQXDF4cxAkizmo9cWK7FSHItrX4tpxzv98jXoPW+BG4KykHjAEEz9/k3LMqSZPRFqrSaP+jkkRPng2fbZ+j+uTbC9npLVz+yqGV16CcRiexWbXEHDiVaxADuUWd42KRRTCUT17Rdvmq3xmBMMuHlE1iZUKqlcsTLOiQxXjtB2C4R3Vi6gJGLfZS7YqF021C+3tt4pQwugQNzsGFhnsZ5d+uaXACoP64rjuVKlMfkuYtUSKzDgpgyrI2krYfiNvhdL5aGZ0CKVPa2xcRV9w2z1y6Me6vsC7umld95vuINeZn2xxtdvsYELNBCmxbc8puust2jFDvoAhGOivE2atdwFoJWJbzhCLDOyApU+XilbP7GPqa7N9zwkK7T+DbtzSh2Uwqypf7qxUu3JRalq3TKJKOGYEwxP/RdQkVj2CB3LbJm8p3RclGG8dF7Ewx8SvptgZ61ywg9gXTY6gymUK6IaCV5eImsRTr3jSNNoCXyCQnpqsVB1tA3l94dl5YKtq1yteJ43Mi+2ihACxSlt2QjOIc52ashddA4Zg2AVhJVFYAWv1++Q5G2jYpMfewqKEhhvdo6asUo6ZtbuoVX34Xb1D1eW9GoaSkkg5InnMCAbfs+qDyI6oxYwlZFGC4dm/7J7KwfkxKCRWd/3DSM37YushatdrqgsyETWJJVKjU0ZWQkIlRhKI0ZiJSFm8k09R8wFUy6DQWMKmAikR6mxIy2oE1T6gJJikQjDqRIEejbAN0xdsptPnroiseV0e7KSDujXy2A/CqkIrgmEXOkJILE+INPQxYSc5K2qLEgyOyaGeqsnT0BGsxGtEHCwZQU1aODGcqpYvZAghu/kYeTyz5CWqamor5vnasD47PJXeF97tkmBMbCJWC8zqd08lGF65WHzfHjxNy7/Yq9yahsewSIL6NGdsmO07TSJlWxEMqyZZLTyWQLTqEdojSjC9PppPC1fscHWBJSqRviHPtt3HqXn4RJfobyaB2rFf8KQFI/WRJbm33sxPc8eF2d40YBpAWWriYYK7cLi0q01QDetXL0ktG5aj8qVf98hRM2BUJHbiQSxdO6evpW+A6IRk87GTI3Wq5Mrt4kKvv+Jpka7vMEn3Hf6R1mzer5x6mBGOHZ3dTsOsCAZl2VGTtITMkzpECIZn+xGxifD6zTrBKf0xsEvsPniamoTGKrfJkcykNbYfRnl5kgfCbrB2IJExO3T8vELQauI5BUJibhkxiRt+RP0OGxaunSCwmmh41oAhGF9KFFaD6skpklWZZr9DTF+yepdhuE/YD2IGt/bpiZMIwYiqSeyuzgu5KUIwPOnA04ueV67dprrtxui8f40IhnXiNJPWWJ8nIwLk9cWbOaL91sj+iKsUuEsmcsiA08u61UpQy0blqULp1w3nVsAQzIZth6ht5GNjHABfOLEbVatgrCt7OqD3HvxGrSPiCDubmnB5ccGkcEqd0t2d2tN62O9wOhE3bwNFx6/R/WTlgu5J/SIEw1OTeMe4LHnwDIz/BYIxIyt2A+MZtVkiMnOV8AfBYB4Y+QOZzRGoUu2aVFIupLJuGAFDMLzTBU8uqIksRl5duFQ57v02Ip97lcfIMcvXfRUhGJ6axNuxrdQjlPNfIBizkxX2ZIin+uw9dIbeCY1VvGGRzK4G+Itg1MmHKx/TF35Ji1bu1IU2MZucvOsVAUMwvAFz6i4Se5KBQeFNTngX4/Ll5au3CL4IcMn2hZTDu3flqbHTaEKJEgxLtuxpjIh69F8hGDPfEJ5vC2u8Ze1yfULrUZ/O9bkQ+1Ld82bHwoaFaxSILAiy+e7YOVP3CdbzOWAIBiDzblN/OrWH8saRrxJvohn5ObBGUE+PVHltZ082zK4qeNJ3UYKxUpNYycToUqiIBONLIy9PCrVSqdmNRes7w14fsYo0yHN+Sywp2Gw+wNMc74Yt+XwXLVu7my5fve2WXaseBhTB8OLB4Gr+lOEdfWYA5bmpG8WcYY9CMVIiHq8ihMAeTfqLYKzUJK16ZBbWQoRgUBdLrJ4e7bJ310ROAVmnNq3fCjvWRlcDtGPLnnz6egxF5pFZHkg3X+06plxxUUObIj98aNbMiVJ8ngKKYHiu17D4x4/oSI1qehY7VjsAvKM+MwMrz1PT6N6LncnAO+L0dVQ7UQkG7WalAfWYNFmyZ3Sxc8xCbIoSTGI72mnHhXd1QN0wWOJj/Xx448tKuFZSD68Mdc4vX7+HcHqElzDxEmi96iVdYVwxPjgE2XPoDB04claR6LHpij6ch6BUPYbMc1Wv9RkKKIIBAid+uERBoeN0R3F2XgAwWuhG193NIubxJqQvCI/XR5EJbYfE7BAMq7qopycpnn9OF+PFLPqdKMHwVBu7R9UwcDbpHEs//vQ4qJPojXD2bg6M2n0719fF/BFVhXkSN2449w1rIBzAC31o3Gks4dhdTayh3VuPYbOxCTiCAchT52+iDzU3q/E3kAxuzIrchGUXojevCuCZ2He7THCdLKBs7ABYbDUrFRGeSGqbeHFQPI2Na0Y4dgiGpybhVCtNqhSu+1pWbRQlGCcuO1p5IWtxYq8OQFrr1akuBUdNczniiQZDhz0vpF8Cbfz6sKsKO5shL2wFjpTxVlb5Uq+7yuQ5Fdq588QSlNaJNSAJxiy4DkL/4bKWaIhBRFrrOniW2zvSGMiEUSFUr7r542tGsUsw+niRoFdIXeGLi0ZtsbvriUgydgmGlSxUj2aEDEWyWnSiBIOyeHYweKEumBBu+oSMUbgGszgqPKy06hDmEYKGbdI8f2vHzmb0ZpbVKxjoy6qN+6jLoFmWoSd4pCxKZLyQDVpHyYAkGEwKAAMj2hrOO9CILdqtfS1qVLOU4cuOuBsUv2Cz8lYPm0AuU4d3pIZvlxKSQODqj4fX8KQKL0Gqala/HJUrmYdeypTOZZCG6nH1+l3atvsYzVrylRvJoSwYOWdGh/o8LKhdgjF7ZxrttFp0dgjGiLQxrlAVG9Ys5RZwCioE4gXBVqFNogtN+w3r76L9zerSJzv+Rn2BlIvg4O/UedNtM8SNfByJI+aMaF94kjTwwlvrmMe8uEO8DY0NbRGwBAPgReOcat+mvnXngeKzYpSsYrkafQdfg9Y94rjhDkUkCl4ekV3b07LtEgxPTVLrFrklbIdg1A3E25CZolIoiyHPD0nNIxK2gleeUV/UIGSqRIhNin3dE+Uh38h+LRSpmJcg8fxv7FJKWPSl288gF7xUUDBvNuW3azfv0q79p3QvDagftQmqSCP7t3ARUkATDECxG6ndbEHiEhh2FbNo9GbfY+dBqEdEIvM2oS3YfWBQdCJ5QjBGj9mLGFDtEoxKMlHDF9LKDY9fzRTFArv3tJGdPL5Kwl4dQL12It7xSMZI4rbqE8hlWJ9mBHIzexvLyJZoVb76O8ZxZL/mutveAU8wKjiQZiAeI0gUnmC1k7CYB0U0ptw5Mtv5zHAnOXb6Ig2buJz7oL1VBQjOPKRnE+Wo0ZcPrbH1ekIwRmqSlXqEuj0hGHyHQNirNuxTwmCKjit2+X5dGgjF1jUaD94JkLee4570BfNh4pB2BA9qkYQ65n66TXmhE2tCJEHCB4HxVE9JMAyCkGjOnP+F1n/1HX3+5UE69/NV3cSEuAhHIjg9gVg8jZMhMnAQtSGKbtn5vfLmMx4Lg4qmTeojWHAYrFGxMOGN7cRInhAM2sX6doioR94QjIoFDPt4U+izdd8q/9U+vKY+VibykJgotjwyNbsaIFou8qEvB46epfmfbadv9p9S+qJGP1TnZ60qxahVo/KEcB2ebDRavNg6IBHBp6ZcybzU5p2KVKLQa/I2tZ0BlHklAhIB3yAgJRjf4ChLkQhIBDgISIKR00IiIBFwDAFJMI5BKwuWCEgEJMHIOSARkAg4hoAkGMeglQVLBCQCkmDkHJAISAQcQ0ASjGPQyoIlAhIBSTByDkgEJAKOISAJxjFoZcESAYmAJBg5ByQCEgHHEJAE4xi0smCJgETgP0cwl/dNfSSHRSIgEZAI8BCwIqynrGCTBGOFkPxdIhC4CEiCCdyxlz2XCDiOgCQYxyGWFUgEAhcBSTCBO/ay5xIBxxGQBOM4xLICiUDgIiAJJnDHXvZcIuA4ApJgHIdYViARCFwEJMEE7tjLnksEHEdAEozjEMsKJAKBi4AkmMAde9lziYDPELh05SZN+XgTtWhYjtRXK1G4JBifQSwLkggEHgJ43XT6ws00fsY6SpbsGVo5ozcVLZDdBYQkmMCbE7LHEgGfIHDtxl1qGByjPHyIlCL5c5JgfIKsLEQiIBGgK9duU912Y1yvbkqCkZNCIiAR8BkCkmB8BqUsSCIgEWARkAQj54REQCLgGAKSYByDVhYsEZAI+JVg/n74kL77/hx9suob2rzjqMsQhGFJmeJ5yp87KwXVLk2Na5WmFzOmFR6tyXM20LBJy13558V2oZqViir//vOvv2nLzqM0e8lWOnD0LN2680D5+7PJnqHXc2WhpvXKUIsG5ShDulTC9bEZH/z6u9KfhSt26OpAvmwvZ6QKpV+n9955i0oWeY2eefppj+vx9MObt+/Tui0HafkXe+nw8fMuDJ5++inKkS0TVa9QmNo2eYvy5nyZnnrKMp6YWzPU8ucv30HHf7hIwENNav/bvFORShR6TcFdJLETtVypvDR/fDilTPEc7T5wmj6auJz2Hf5RKSp92pRUuWxB6tSiqiXGRm31FRYifTPLc/HKTVq+fg99tn4PnTxzSZm/SNr2tQ6qoMxd0bnErg8Vy1QpnxdqrtX3h46dp0adxtKvv/0hVB4RNSSi1UaZLWcgG9EOxPL55gP04fjPdKRi1poGNUrS0F5NKUvmDJaNNiKYb/adosihH9PZC1cty+j63tvUt0sDSv78s5Z51Qy//f4nzfxkC42asso1Ecw+fu2VFylmcGuFcDxZyMIN+zcj/BHQttlLvqKHD62jmJYtkYcmDW1Pr2Z9QagqOFKNmrqaFq/+Rij/CxlS07A+zahhzVKWi4NHMLOiO9PEWetp6vxN3PqA6eK47lSpTH6334HFyLiVNGvxV0JtLV4oB00c0k5ZyImRQCbvj/2Utu46JlRdntdeplH9WwjNJSuCsKrQ6nu/Egx2swGjFwtPQm1nM6ZPTdNHdaKKb+QzxYAFYO64LgqRDY5ZIrSw1MKrlS9E8SODKW3qFFaY008Xr1OnfgmKRGY3dWlTgwZGNBbeze2Wj/x7D52htpFT6cate7Y+T5cmJSWMDuEuUm1B2/ecoND+M2yXjzKaNyhHI/s1V6RWo8QSDMivbMm8NH7mOsNvShbOSZ/ERbiN38Gj56hj1DSCdGAnQWoAIXZoVtmxDeHRo0e09PPd1Ouj+UKbFNv+fl0aUrf2NU3nkhVBWGFi9b3fCObOvV8pOGo6ff3tcbc+QFTGhIGI/seff9HOfSfpwqUb3HxY9PWqlTDEgQWgUc3StHrTPh25vJIlI5Uv9To992wyOvfzNdq1/xR3QAdHBCkDZpbQzmZdJ7icibR5X34xHb1ZPI8yydF/9AvOR2xqE1SRRvZv4QjJQBVsET6Jbt/9Rx3UJhEcQDJLp/WkIvle5cIAaTSkfwKXvEXKR6FWZM4SDObLX38/JCxIJOBcqUwB5f9v232MLl+9TSP6NqeOzavo2myGBSTKN4vnVubEtZt3advu4zr1Ti3o/e5B1LXt2z4nGfRlyryNirrHS/lyZ6WShf9Rq4+evEDfHTvHxbxd00o0LKqZ4VyyIghvCQab7bQFmwkSPQSKNZsPKGsaCVJl7cpF6YUMaVzVzF++HYv5oFG9QioSJsOgMYtp7tJtunIglUQPakW1qxRzE5Mhcg+KXkJrt+jrtprwLIDaCutWLU5DIptQ9myZdO0AGHHzNlB0/Brd3yF6Lk+INLQBGZGmUT2YRMdOX6TuH8yhIycu6Or6ILIJQZrxZQKG73adSKfPXtYVa9Q+TAhgwKodb79VRJFkWJXx8ImfqGnn8W7kFVT7DQXnzJnS6eqFerxqwz7qO2IhQU3RJiyM4X2bU7Jn3O1SLMGo30Gq+LBXU4VIVBsEMD5/8TqlS5OC0qd9bEsz2gjwbVTn+m52N7QVklnkhx/rpB3UOTsmjGpV/seu56v0xdZD1KFPvBtpYIPr0bEOpUmV3G3OGqnkZnPJaYLRNjLRjLxbdx+jlt0m6cArVjAHzYnpbGpXwWSBQZZVb4wmPDpnRDADuzWi8HY1DfV9ox0EKpbRZOLVZVUP2oiFjEW2dO23rvHAYlwxozflfPVFX81Zipm2hmKmf64rz6p9PByw8yycGE5VyxdylQVSDumXQBu/Puz6m6gaAeJr32eaTqXEt4smR1DlfyURs4mq/oa+RLSvZSlNYIMD3jC8a8lp6vCO1PDtUqbf/3L9DoX2S6BdB067voVNZtGkCK8OA7T9g1TbJGw8nfjhouvP8HoFqdeoWNh0PmCjahs5RUeC2LiXT+9F+XK724yeOIK58G3co27vz6YVX+z1aDFhwo+JX02xMx7r22aTkbfozQhJO3pXb9yhoJBY3Y4/ILwR9ehY222QeXntqDo4wWgZMYlgE1BTn9B61KdzfZ8QDK99sHfAsGx1enP/we/UpmccwTCuJpzmTf6og0vCgF3nndBYl/iLfJGd6lDfsAaWCx55f/zpKjXuNFZxJ1eT0TjxJJgsL6VX7rWw0igPPJxmNQoeq5O07Kg6vLbGjwhWTjh9kbA2wgbO1BUFrJvWfVOoeJykvdtlgm4sQlpWo6G9m7qNxRNHMF8v/eBRg+AYunX7vgssnn5shqTRYhn/wXuWAKJcMylEWy92urABMxS9UU3N6peliR+2c2seOyk8kUDYMnBKsSKhF2EH8jZB5G7Xa6puR2RvsprVwbYN9rE1c6Jc6iLUV4joarJSJ3l14RRn4JjFlm3kEUz96iUofmQnrkrF1sVKcp5IIGwZ1SsWplnRofT8c+InjTwMfv/jT+oYNZ02bz/i+rlquYI0e2yY8CkmNmHgCGlfTbB/rZ4VRSBibXriCCZuWIdH4YNnu/r40gtpafXsKMWgayfhWHHCrPWuT4wWIwug3frYengEgwHt+eHHutMwSAc8wjPrIwzMDTpEE8RwJNgflk2PpDLF89iBhpuXJQC7kxZta951otKmkkVyKgZQLGrYNW7feUDNwifqVBxIeZD27CS2//h2zMBWih+ONvEIxkiyZOvnSWMixnu2nN0HT1OT0FjFuIxkd14Z4cLDIG5YB2pSR0x6Uctl22d0TP/EEUyrxhUeaXVfT5kfVv3m4RNdJwe8m5kAmwUQi3XBpHBKnVJvJDMacPZ7HsHwFhiMmmE2jbT3HvxGrSPiCJNDTbwFZmfRIi9sPO16xetO7EQXpEhdOMWAM9W9fw21Zj4nZuXxdm8e3jyC0TpQ2iFx5F04sRtVq/DYniTSZ9iN6rYfQ5d+uaVk97TPbF1f7jhKrbpPdv05fbpUtHpmH8XR0U6CC0LjkHGKQ56aeET6pBFM8uoVC/+qFf+CW1Sl4VHN7GCn5OUxPW+SsQC+9WZ+mjsuzNTPQtsYEYLhtaVy2QL0ahYxpzS1Pnhmwkiq9U/xRBJgweQtyJnRoabH+3YGhCV7TxcF6mQlRp5nKdsfO5Ieb2dnj0pF+s4eueIbTyQNtq55y75WDNBq8lRNFlXvnzSCSV2uVN67WmOhJ+IpwBfdxUQIwmxCiXzvgTORyBxW8hjZfIQLICJ2t/V01zaqc8O2Q4rjnppwBWDt3L5ux9IibRaZ8CLHnaJtFWmTaB5P57LZhmbXdV9bVvcP5tKSNbtcf+LNJRG87awPs/aKjJu3Ee0cJRgREdDugn0SCIYlQCN1UnQhsfmcJBgeWYlMVEkwRCzB8Ba/JBiDmcKTYHgiqghB2GFoHkH91yWYpEww8I9aEted0qVN6RomSTBiWwFLMLyTNkkwNghGxAaTGBKMryUEselknItnI/LEsCkqFSQlFcmbtno7LrzvvV3wga4i+czIe+rHy8T60/AWTWJIMOwpiq9tHN5O5KRk5GWP03lGeW8kGPaUJnWq5IqDnvbpDG+drxo3AAAZmElEQVTx9uZ7Xxl5eSdyrRpXoHHvt9E1z1tCs/O9yLh5a4Oh4BZVH2kdsuye6qjoiE6UxCAYOP7Vbx+tnGypyRcGP28mqvZb3vG3J8fUPYbMo03bD1OObC9S0fyvUrMGZQm3lEXJ3qo/vON03qIQmahGdfE2A1HHS6v2++J39kTOUwLkzUme6wRLaHaNyrjprXU78beRF0d5fnW0c0JF8oX3JSYnJkXTsAmKlzOOJ/G/Vo3K+2R3ZSeCXf8jnq+PavPyt6OdHZWU5x+CkAvwJrcThwfXDZp1mUDPPPM0wU6UK/tLFNy8ihJEzJuU2I52rIHejp8Yb0PwO8HwrgrY3e0Ru6NJ51jl/oqajDxnE0OCQRtYN3dc8/90ag8qU0LcCxeXHXFPS02+ct5CeayrP26hr5zZW4kUKJLYu0asrwur2uCS5rJpkZRVICiYWj87VkbE4Y0Ew3Ojt3utg3cfzhvfHy3+vM3KrlTBu8xpdFWAJRhcJVg7p69QMDee5Op3gnHisiPvdq/RpHVCgkFdJ364REGh43ROclD/4NAmEqSKF0qhYN5syiL1JmSnisP5n68p3raq5yn+LnrZkTdh2asGTlx2NLrO4A3BoN+82/x2LqbywlLUqVJMCUjm7V0k3maAv3l72dFISuM5HrI35XkbEI9kkc/vBIOQmbwBRgCdBRPCCUxrlIzCNcBrFiETeXFEE0uCQdv+N3YpJSz6Utd83ICFu79ZhDaEOsDlNK0ui0JGD2hJiIviq8QL12B1ixj9WrVxH3UZNMsVXgOEPmNMiM4TmBeuAe1GLJKw1tVN1Q+jcA1GcVa8JRijtuL2eo/gOqa3y2/duU/BUQm0Y+8J17CY3eb3ZOx44Rpw490quBrq4oVrMIuZhA2nQcdoXUA3q42RNyfUftohGJ4HttdGXhCMUcApBNHBYuTFZcWkGhK7TAl6rE0AD6EQETSalxKLYFC3UUCnwvleUfqFNrJ6Pu6KdB8yVxemAWVZDbKvJi7Kad+0EvXr2tBNUoKOPfXjTRQ9TR94y27AKQS0wnUQNoYygjit/+o7ihq+0C28pplE4S3BoM9GwbFgm0K859w5MusgxqLasfck9Rm2wC2Osx3pR3TcjAJOISAWDPS8gFOfrNpJ78d86haN0WwT4V3URRsR82hUvxZuY4awIpgPRvGLzQiGZ/+CER/2LzV4mU8IBh3wRchM7BwJo0KoXnXxkJlOqUjqxDELwwgpBkGnM2VIo8TqUMM5spMOdosFE8OpQJ5sovNROJ9Z+0TCMMKYuWRKD0NJ0xchM63I1RcEA8DM2popYxoljKoa3hQBt3lhRrFpzBkX5tG1CLNB81XITBHyM3MUhQEb4VGxGew/clYXBAvtB+HhVQpV9TYjGJ59CWVAOnvxhX9eCrl45WZbIvrYCBuhkJnqxyAZ7F4rNzwOPiW6UsDg00Z2srwFm5gSjNp2npgq2i+Qy7zYrgSpx6nkafsQUmP+hHDLm71wIeg8YIZbGEyR/iBmMsKmmtmtfEUwaM+m7UeUSHw2ntVwdUMkCqNIn43ygGQQVpYnlYiUi4iN/bs2tAwoZmR6sKoDKmWLhuUV2x4C6SNZGaTRn34jF5kVPeL/XxQa5BOCQSFqXFaEwbx+UyzKPewS/bo0EArE5A+CQb/sPgui7gY88ddqoD353e5THUaiuVHdUBf/N24prd60X6h5CLKN4NTVKxSyPC72JcGgcXbbih0XCxeRAOw8ZSMEBCcT1Gg8saO+9WRVDgJoje7fUjk+F01mKiBbBjZ3REJEeFHELtI+aG9FMLzwsEz5y4ioBRH9ExmcSbYkGO23CFUAHfezdd8q/1UZEXnweFaR/NkpqFZpqlO1uK1TFX8RjNo3SGkbth1WglF9f+qCjkTRr1zZM1OboAq2+yU6cazyQadeunY3LV+/V/cwGtTPfLmyEuwneMzLznGztk64FKzcuI9Wb9zvVr76sNs7dd5QFoPoY2G+Jhi1vfBDwnE+sDhz/orrETr8jnebiubPTrAZgATNjPZWmHvyOwjgzPlflNvRkLq0D6+pDwUiXi9MAFBj7fj0sOuQ9xih+vghNpq6VYu5+m/0CJ7Zw22K7W3LdxQ3b6Py2J/6gNy/7YA6g6c7uO/IeEwwnoAuv5EISASeLAR8ZuR9smCRvZEISAR8gYAkGF+gKMuQCEgEuAhIgpETQyIgEXAMAUkwjkErC5YISAQkwcg5IBGQCDiGgCQYx6CVBUsEJAKSYOQckAhIBBxDQBKMY9DKgiUCEgFJMHIOSAQkAo4hIAnGMWhlwRIBiYAkGDkHJAISAccQ8JpgHGuZLFgiIBF44hGwvOz4xCMgOygRkAg4hoAkGMeglQVLBCQCkmDkHJAISAQcQ8DnBIMg4Y61VhYsEZAIOIqAldHWbuWSYOwiJvNLBJ5gBCTBPMGDK7smEfA3ApJg/D0Csn6JwBOMgCSYJ3hwZdckAv5GQBKMv0dA1i8ReIIRkATzBA+u7JpEwN8ISILx9wjI+iUCTzACkmCe4MGVXZMI+BsBSTCCI/Db73/S2i0HacaiLbrX6PACYuZM6ahKuYI0JLIJpUuTUrBEsWznf76mvP2rPi6Or557Nhl9Nj2SShfNJVaIF7nwCt/RExfo8y8PKC8K4sXNW3ceuErEi385X32RKr6RT3lKtGiB7JZvIfOaw77AabfJ6gPquXNkpiplC1DtKsVsv3DY/YO5ysuJarJ6BtVuG+3kx4uYwBtP7+IVR7ygqCbMuWwvZ6Q3iuZSXgStXLaA6Vveduudt+xr5b34cz9fo4cP//FzVV9Xbd6gnO5lR6uyJcFYIUREp368TB36TKPTZy8b5n7rzfw0d1yYz58UnbX4Kxo4ZrFbvR2aVaYRfZt7/ESoVbfxpOzkuRto5idbCOQqmvBuMd5tbh1U0da7zd4SDK99eV57mUb1b0EVSr8uhJO/CQZPqH6++QCNmrKKzl64Kgq5kg8Lv3+XBpQlcwZb36mZ8cZ0/5GLlE3UKtl5m1sSjAWa127cpSZh4+nEDxdNc+LN4nHvt7EaG1u/Y2F36B1PW7753u27LC+lp5UzelP2bJlslWmVGXWCVDDJmTeDrT7V/Y7H7ONHBBMeYhdJThCMWm+XNjVoYERjS8nKXwRj5+F5Myyx8D+IbELYfETf+UZ5R05coLaRUwhSk52E98TnxHQ2JTVJMBaIQlzsO2KhWy6oBhAbkaAy9A2rT2FtatgZH8u8ew+doXdCY+mPP/9S8uJBc0xGNUGCwWPkvkrYxcIHz6avvz1uWKSqEqptgeiuitHsR5jwU4Z3pPrVS1hKEE4SDNr1fvcg6tr2bdN2+INgQOLo++ipq0yH8YUMqen55551zbcHv/5umL9tk7doaO93hSTIH85doRbdJtFPF6/rysM4FyuQgwq9/gpBTd5/5Cx3k82V/SVaMqUHvZIlI7c9kmAsVic76WBjiR8ZrOj5WGROppFxK2nCrPUucglrXZ3iF2x2kUzJwjnpk7gIn+jfFy7doGZdJ9CZ87+4dQlSSHjbmlxdH4R35dodWrRyJ02Ytc5NncJETRgVQvWqlzCFiiWYFMmfUyQ02HRE0u9//ElXr9+ljV8fphGTV9Dd+7/pPoPd6tOpPahMiTyGxSU2wYBcBoz6hOYv3+7WJhBKaKvq9E7tN5TFy861+w9+p692fU8j41ZxVfc2QRVpZP8WplIbyugYNY227jqmq79900o0qHsQQd3VJmwmQ2KX0fL1e3R/b9e0Eg3v25ySPfO0Wz8kwZjMXgxAm55x9M2+U65cPTrWpgHhjUTmvFd5rt64Q0Ehsa7J89ILaSlhdAiFvz+bQAZIvjL23rn3KwVHTXeTXLJmzkATP2wnbMO4dec+9f5ogWIQ1qaM6VPT8um9KF/uLIaYeEsw2oLRjuCoBNqx94SuvrffKqJgmPz5fyQBNiUmwYCYp8zbSB9NXK5rBggZ8yukZTUhCQTSBex0H4xb6iZJjh7QkrD4jdKKL/ZS2MCZup+tJD0eKWLTXTmzN+XPnVUSjJ1VzyOYmdGhVK+a+W5spw6jvFt2HqVW3eNc0gpONObFdqXB0Uto8epvXJ95a+z96++HNGjMYpq7dJuuKWVL5KHpo0MIxGYnGe3KDWqUVNQlqE285EuCQfk8icxKKkpMgvli6yHq0CdeRwpYqCDASmXy24FcyQvjcEj/BF15ON1cMaO3csrHJp59z4qA1TLYzQ9/HxwRRN3a15QEY2fkeAQzL7YL1axU1E4xtvNi0Xd7fzZhh1GTKjlhYrbrNdX1d2+NvVt3H6OW3SbpJib07vnju3p8IoETqJYRk+jg0XOudmJnXjQ5giqXKZAoBINKeCdwYwa2ItgoeCmxCIZ3cABpdFZMZ6pRsbDt+YIPIBGNiV9NsTPW6b6HJDS0d1M3FevQsfOK+8Ovv/2h5IcKtnBiOFUtX0io/kHRS5TDADXBzhY/spObmiRVJBM4/UUw8D9o0CGaYHRVB39xXHdlZ4M/TIOO0S41Cb97auzl7WLY5ZdN60mw73iToCZ16pugM0o3rlWaJn/Ugaur+1qCQdvhXtAgOIZu3b7v6kqz+mUVtc+fBMMjvoHdGlFE+1pe2fVwCtSkcyz9+NPjI26jDWjZum8Vg76aXs+VhVYk9CKosyKJHS8jn6EngmAglu/Ye1IxNG7fc5yu37znwkjrIFSzUhFLg6jd0wwrsVtksNg87ATUDj52KvjFzF6y1fWZp8Ze9pQKBXqrcqmNgl2nRfgk2n/kR1c7zaQtJwgGRsm67cYozoFWOy1+TwwJhocL/HWWJ0TSixntqaO8uRUzbQ3FTP9c9xPcBUDu2gQVctvuY7Tn0Bnatf8UFS/0Gk0a2p5L/rx6tAcQ+N3IDyxJEwx24AXLtys+G+ypAQ8UiOkdmlVRHMFYC7ma398Ew5Oa2EXP2mcg3qoSjh1CY8VcnAIsmx5JZYobn7TYKX/Op1sVZ73alYspamWpIjkpVcrnuUUkFsGYeecmBsFs232cmodP1El2vjw4OP7DRWoXOZWK5H+ValcprnhYZ86U1ivJiB0wnuQb3KIqDY9q5ja2SZZgRHw2jBYDdozZMZ0pb86X3bL4m2B4vi8sedy4dY8ah4xTXMjVZFfyuH3nATULn0jfff/YTgLHqSVx3Sndv/49dsjE27yJRTD+VpHYnd/XpO7tOIh8j2sEYQNnuUjSzL6WJAnGzGcDDnCwVWTKkIYgju7cd5JgVGMTdM1Fk7oRFpU2fbnjqMtdGqoX/CqwoNWEex+vZnnB9W8ceXZuXZ1ezfr4byKDZJSHlSqMFj07Ue0ae4+evKAY+e5p/EWc8EYWxcIJgmENmWiLmbTgtAQD57h2veJ17gA5smWiNXOifKIeiWLtaT4ciS9ZvUtxPNV6eZudECY5gjFyDoJUEvu/96h00Zxu4iB2+u5D5upONQAyvvl0SnfD05LENvLyjv/6hNajPp3ru80Jnv3EjrF3w7ZD1Dby8WkUKjA7YfF0Uop+5wTB8OwRc8d1oVqV+aeAThMMzyZUvWJhmhUd6vLSFcUrsfKBVH65eps2bj+iOH1q7VloQ77cWWnBhPAnx5MX/hr9Ri7S4Vu/RknlHlDa1CkMcQfjjpi0gqbO36TLY3SMh0yJTTDsEbSZAZlnLLRj7GUXtKd2HF9NdF8TzIkfLlFQ6Did9Gkl5TlNMDyJysh24StcPS1n98HT1CQ0luAyYZTqVi1Oowa0NPWVSlISDG9RwY190aQIypAulSWWIJmug2YpV+DVZOZlmpgEw/N9qVquIM0eG2bo0ckjCVFfBl8vaEvwLTL4sj3fn/pZuSSK435tMttMkM8fBGPkoOYtnt5+z5Nw1TKxGXV5rwb16FDb0l6XpAiGtcDbdQ4CQLCyNwoeS7fvPo5pYjTIiUkwPJ8NK5WH1xdRYy+7mFKnSq7c/YGTnT+SNwQDMR52Nhis53y6jb7cedStC7B1LI3vaWorc5pgeIsWMYR8fUnWF+MnctghciqbpAiGNWx6cuoBSSFswAxas/nxfRkjPTgxCYb1fUmfLhWtntmHe9KlTiBc8OsYNZ02bz/imlNWaoCakV1MCGC0dm5fJXiWP5LIhPa0XaIXLv1BMInhGe4JbrjbBJX9zeK5lTtv127eJWzwvFvcZnaYJEMwPAu8p/orG4LBaHElFsHwVD8j12t2srAemfjdSvJBHva0ygmHQTsT2ymCsRMywmmC4fnA/FdVJN7YQVLcvucE9RuxyC0gFhztcE+PtYMmGYLh+X54eurBGrCMFldiEQzvRIjnfckbdF5ITRFjrzcqiR3iEM3rBMHg+HRor6bCd6qcJhiekTcpEYw6lka31Xl9STIEwzvi81S8ZAfa3wTDShOiag4GnGccFrFN8SSfhRO7UbUKYpfdRIlDNJ+3BIMrIfB9gqcubgWXLZnX8loI2zanCYa9Y4b6/el7JDo2vHwIUNU0bLzOkM67zyQJ5l/0eGSVGBIMz/fFm4FXvzW7WIg8vGNITyVCX7T3vyBROU0wPO/p/7ofjNnYsnZDnleyJBgi5YIX7w5OYhAML+iPLxaslRT0X9tNA4FgeAcMScmTl52XPJUvblgHalLnTVdWSTBE5C8Viafe+IJc1DLMjL28EyhPTuXM2osdu13veMqUIbXlxbtAIBhgxR4wOHEXKTp+DW3acYQa1ihJNSoWoVw5XrIMeO7JvOOZLVg7TJIhGF8aeVlrvpEPiNMSDM+PRRtM3M6g//33Q93bOfjWytjLLmpfT3ae8drI5hAoBGP3fpSdOYC8vBNJp6SkJ4pgeDuupway+P+/LoDgxWoyGgCnCYbVYUWMs0YTjneF3qo8HsEBU9hieAGc7Ux2XtwafG90OhYoBMMbJ4S0XDYtkhAD2dvEhvJAeaw9Dm1YuGIHHfz+HO0//CNd/OUWTRraznYo2EtXblLd9mN0jwKydrwkI8EAqCfJ0Y6303irovCeWDEz9vJUNJHo+yKLgHcXyCywUqAQDLDj2d0iO9WhvmENvIrbAuII6ZegRABQE2+TuffgN2odEacY+tUk6gGuHXueZz0bWiRJEQyvQzPGhNhiXju+CE5KMLxTHKOb0yILGnl4PjEI2r16dhRBSuOl3QdO07tdJrjeXkIeO/e7eGUavVJg5vMRSATDi1nsLbEbvVJgFGDLG9cIjDk2J4RtgCSkJjyvsnpWFOGAQU1JimCcuOxo9uSCUwTDUx984UlrpJaYGXuNXhWoUDofzYwOofRprS+RaknG6FUBuJMvi+9JmTKm4RJdIBEMADB6VWD+hHB6o5j9N8d5rwqYBYLiqVJ2pChefTwpKEkRDAaGF66hWvlCymNonoRrMHs0yimC4fm+WN2cFpVieJKRlbHXKIAXnn+NGdxa+F0k6OTdh8xze18JE312TJhhHBb0LdAIxoiI7bz7DNygFsXN20A4OWKT3bktemdrz3dnqE2PON2FYaOoBEmOYIwCTpktBjyP2XvYAiW4sTZZPXvpFMHwdHBfObnxpDwrYy8wOXD0rBKkW3vLXMXK7GVHLBQE9Jq2YDMtXbub+4ys1WNegUgw6LORKonfzF52VF/TXLZ2N035eKMuyL06ZkZ3g7Tzn6ceg2QQs7pTi6qEE01twn3AmYu/ojFTV7u9W47XIprWfez/kiRVJLXRvgiZCdUIz66WKPSaoXDgBMHwnK2snOJEpRc1H+9ZDCvPXnyLY2VEudOGCGXr1h6j42RP+4IDr50gl7D3alg+xh5oEoyKFe71hA2YpTwDa5TsvAeOMqqULUjxIztaqrcgKjxF/KHmRFVtg/ZtavwNIVa/O3aOu4HgyZXwdjW5Y5zkJBgVAIjjIf1nKIvCboK0M2NMKBXOZx77xAmC4R0Niyx+O33keema2Zq0ZRupOXbqR16IzFNHdKRKb+YXOhkJVIIBVmZqjl3co8LqK++IGz2Py5YHksETOINjlnDJw6x+kNCHvZpSx+ZVDDeQJEsw6sBgt8aLdhgkqwT9tldIPercupqb+Mf71gmC8SYKnVX/1N+NPIRFwjigDFzLx3vcwyYtd4tjbNUGTOweHetQp5ZVDZ+G4ZURyASj4oFTwNiZ65SngR8+fGQFte53hK8cFNGYcufIbOs7NbNR3GqjwvAEDWJg44KjWUrSBKN2DLrh5h1HCc8pfLP/lE5shy5brmRealSzNFWvUEiIWNRyfU0wTj+6pR1o3imBlbGXN1FgkN647TBt+Pow4Yj/6vU7Ov0bKhMcxfD+TsO3S1HRAtk9ckuXBPMYfaieO/eeVF63ANEj2NOtO48jMEJyQAyjN4rmojpVixNeujA74BBlHEgzZ87/QgtX7qS1Xx5QbkqrRIfNGWSCp23x9Avsl7DtWaUngmCsOil/lwhIBPyDgCQY/+Aua5UIBAQCkmACYphlJyUC/kFAEox/cJe1SgQCAgFJMAExzLKTEgH/ICAJxj+4y1olAgGBgCSYgBhm2UmJgH8QkATjH9xlrRKBgEBAEkxADLPspETAPwhIgvEP7rJWiUBAICAJJiCGWXZSIuAfBCTB+Ad3WatEICAQ+M8TTECMguykREAiIISA9fVKoWJkJomAREAi4I6AJBg5KyQCEgHHEJAE4xi0smCJgERAEoycAxIBiYBjCEiCcQxaWbBEQCIgCUbOAYmARMAxBCTBOAatLFgiIBGQBCPngERAIuAYApJgHINWFiwRkAhIgpFzQCIgEXAMAUkwjkErC5YISAT+D0udTC/E19fWAAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-11"><g><path d="M 261 51 L 314.63 51" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 319.88 51 L 312.88 54.5 L 314.63 51 L 312.88 47.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-9"><g><rect x="161" y="1" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 51px; margin-left: 162px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">MP0/MP1 boot process</div></div></div></foreignObject><image x="162" y="37" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmAXFWV9++8qk434kgYRiBARBwGlXGbL5h09ZJpDUsAQ4BQ3R0gJIojqEjEEXH/5BPkY5yRgY9FkC1sSVfJpsgutOnqqupARkBEPmAEJ8gSZFgidrq76p3hVld1Xr2679V7tXS97jrvr6Tr3O137ru/d8899xyCPIKAICAICAKCgAYBElQEAUFAEBAEBAEdAkIQMi8EAUFAEBAEtAgIQcjEEAQEAUFAEBCCkDkgCAgCgoAg4B0B2UF4x0okBQFBQBBoKgSEIJpK3TJYQUAQEAS8IyAE4R0rkRQEBAFBoKkQEIJoKnXLYAUBQUAQ8I5A4AliUVd0gQG6D8CupcPikRBaDk8kbnrN+5CdJdu7ei8k4PQSCcK61FBsja5kd/ex8zIcHgLwtxX2IQvgJYBfByjBwC/awvzg4GD8zxXWp4rRoq7o/zKIPgvGIQD2BRAGkAVjC4EGTYOvnL8np+PxuGq/qsemo7dM5qUjw/GEqlT0VxW0RYUXLPhcS+s73riemfsAvGaCDxlJxDfXroXSmtq7e79GjPOnfnF5F+rZD6m7MQjMcIJA0WJUDYQLelb+zZxM5pcAfWSaCULX7VEGfsITLd8ZGbnxTT/jWtjVd4ABXkdAe7lyDKSZ+OSRofgT5WTdfo909p0C4h9PyvBj4+Hwks2D6/9UniBEf35wb++M9hHRdQDmCEH4QS4YstFoNLTlBVoBwofTidh3gtEr917MdIIAMX6QHI59q1qw27t6DybgF/mXr7i6+u4gXLpOv4dBx6U2bvi1l/F1dEePZKbr9bstxxq2MdOa9PDALV7asMv09PSExzK73wQgmqMH4KJ0Ira2IOe+g4DozyPoke7oQjD9HMDu+SKyg/CIXQDEqL2rfyFg/nvuw20G7cJmPEEAtTEzOZqX1OxqGEHkpvYTZGaOSCZv+YPrV3zpAjIpTngVjI0AvQLwuwEcDOCvbHW9ZjIfVTAL+XmhFi6O7hcy6ZcA9lMmLGY+Nj0c/5lXghD9lUe7o+PYfdkI3wngQIu0EER56AIh0d7d9yVivgBAKP9OOpqsA9FhSydmAUHgTTJ5aTIZT1UKrqt5qRKC8PeFQAt6Vu42x+QDYJpnAlg2NZHyAyKigbG/7LJq8+YrJnRjXLjwmN2MOS132MxK28B8xvj2Xa+zlotEojshjNPApLa4U0TBwKOGOXFoMnnrVj84tndGjyIitfsIMbAljGxPInHz770ThOjPDe9FPcfuY2RCP1XHSiWkLmcQfqZqw2Rn8jnOzCSIya/i3QoaJ+DsZCL2vUpnQKQruhSg2/PmJXVoawJomarP7w7CH0FYu03tndFeIroGwE6WH0ZN5uUjw3F1WF/yRLp6vwjgYqs8EVYlh2I3O2HS0d27ghnKHDXVDhPOSg/F/sUPjsU7L76jNbxzdHDw2u2uBCH68wRx++K+D8Lk2wg4QFNAdhCeUGy8kBBEHXWgs2ET6DIGn7zjvICT4204YvP98Tcq6ApFuvsuAvNp+e3fM2B+HqCeBhBEbr8S6Y5+FUznWXcSDFyTTsTUmNk6xkWLVu5htGTvB/Chwt8ZOCediH3XLmvDRtfO4+ZE6OCRkfUve8Gxq+v4XbOYuKvwdasjGNHfJJJO+nPAWX0oLCOia13Ok4QgvEzSAMgIQdRRCdpDTubPgpQ76pTHUcVmJvsCq15kmrQVntQggoBu0bd7BxX6ZjXx5P/2bNbgJZs2xp8tpxZNO1kiOjE5NLChXFn1u5t7q+sOQvTnCG8kEv1rChuXMvNxNlOjMi8alr8JQXiZpAGQEYKooxJ0BMHAUeqw1XpnoVIzk828NA7wchD1g7G6UQSh2tUcmusWBGrv6r2KgE/vUAHd1Bp+efXg4GDGi1o6OnvPZcI3LbLx1vDW472Ud3NvdSMI0V+pZnJnQwZ9EQTlkTfXJvECEU7nyfsIhfs2QhBeJngAZIQg6qgEF4IYLXZLrcjMVGxeAnImFmNO9vyGE4T9ghJK7wzYTTw5UwbTienhgRu9qqS9s+9wIlbukzkPCwae4onQ4nJmJo17q94EprnomCeIptefVUcli0j+RwLumwjzSW3ZLNkuZDacIHp6ou8cy9BqMJ8Mog/kz7MyAD8DGPeYZF4xMhT/XRlTp+tUVQ4YoZbw0SB8BkTvt5w9qg+gPxDTUBbmVZntu444OXF4fRc+vrh/fpiz/WBSFxHVuU/BiWMUwHMA3QvDvGqfPfBEuQumke7ea4vWELdOMB5obeHlVV6O9TpMX3Iz8pBaLTAT4VDKdrHN9wtTYmIhujg1NHB6pLv3mgAShFq8j0onYmoxzz3qEJNMVi6m8/J/ep1MOjSZHHjI6yyotI5IJLo3wjQIxv7KvdXJNOVE8KK/Yg1pCOKVty/Drd1nHsfUYqS5se97vnudE67ENemA8elIV9+RAF8NQLlOOz3K4SOdBX1mU2LgKT/tq8U6ZJpnE7AqHwWgXPE/AjizgFc5Yevvi7qjBxpMP8q7gE+6oro/j5PBa5Mb4w86kZ8QRDkIa/S70wKjFkq7GcavF47OvJRKxO8uUe70eTFNoaZZMEoWBPvXP4BnzXBm8cjgLc97hV+RJLVkN1o9ZZhpRbmLc1bsdO6thfZFf1OacF3QLfrOEOOybKbl29Zb9IEhCOA6Bj9KIGXuUuFbvDyvEfGq5FBcXUQt+3Qsjn6SzdyFz73KChcLZBm4ri3Mp3v8GqdIV98qgJUHoP1uULmmM8qsPTY693zdzkUIohx8Nfq9zAJTfPvZ31ZNa15SppUAEETJ2YJuEW7v7DuBiG/YAbX/S4M5M8EE3Q7CJwv1eCHacu6tHgmiqfVnfUUiXdG1gPHhjEFnP7Rxwxb76xMgglBxz9qK3LAZz8HAr8A0BpgfBWiBhjy2gnhZaii+yW1p0Llf5+WzICjHi2HVDoH348lwMiULOxPunGjl48t4NWq9BfNtZRj4LYEezsUvI/4wGB+zuZ4r0SyBrhgb3WWtnSQindGTQcbCyfpYlc3/O/f/pwFD7T7yP/PTrS3vuNjqHl6j5bPqamasiUntIDQX3Dxvu53MS0p7jSYI2+3k/CQqtVOW7DL8EWSuXvtZQm762sJl2GeZF/dWLwTR7Prz8/YGiCAs3eaHTcJqeyyv3LlBa8u/gXGizVXb9TJmZHH/P8A077aEE5lchAkbQsicOTR0y4tWzFRso+dfpN63XYEvtJu6lCv8nPDLpzs5W+S9/5S3nvW+UQagfzUnwufZY6A5XTDNRQ8Afy2diCsTlfaRQ2o/M92nrNsOQlVVqZnJbl5i4Mh0IqbuE6DBBEGR7t4fgfFlK1Q6L61qPJCsdfsZryrnxb3VC0E0u/78vApBIwh1eD7WxlGnr/Tc4v0CzgTROUUk4XAZc9KLC+tBtNyCizocPjOViF3qdtDtcNvc8XJpR8cxu5tGy70EfNTSlicz2CSJ8U8Bfp+l7AsGZw8dHr75tzqdCkH4mek+ZT0QRCVmCpt5qTgCqZ8FUxvuu8Kb1PmX5OsgUq6O1sMy7QT000832P3W48W91QdBNK3+/LwKASMIT/HBbOHJC8PVXsbs6IhG2CC1e3hXQdDjhc+ceCQS3R+hXEyw90zhynw7TKxMpeKKaKaeju6+fuacabbwjqldyprkUMxirnXWTmdnf7dJ5h3WvoLw76mh2Fd0RCYE4Wem+5QtRxAaM8VWMmlJMjnwuFNTNg+cEpOKnwWzWoJQZwDj48Z7YfDRDJwKYO+SfjN/IzUcV4eCRbeo/fSzVgTh1b3VK0E0s/78vAoBIghflyk1C3/urpFyBrGOX3Pvx9et/hxJlIacKTE59/SsaRvLvBUH6FM72ue7kMUKO5G46Md+fqmCYj6DDPekUnHlTVX0CEH4mek+ZcsRhKpOM7lOSyVilzg1Zbt9PG41L+Ummt2H2a8Xk88xuom7beX99LNWBFFCrmXuXYj+3E0xXqdKgAjC18Ktc4Kwh+jX3eepJIx/V9eK92UQGiRg/o5dBJ2aGh64vPB/nYzfu0OqrkWd0S6DcjuenfN1l6wjhTaFILzO8grkPC4wtlwOrl8ENg+hYvNSwAjibmT5hFQq/t866BpBELa8GS+yQUvSGwfUZSjtI/pz1p+f1yEoBMGMG9LDMRWGpmg36zYW+1mZ+uj58ztaj3ns3uvfUuU6OqIfg0H3MfA35RZbt3a0uwPbx53GNbzsHNa1qYsA7eT9JwThZ6b7lPWywPgxU9i/gHVfKn4W3hqkHNUh4unSj59+1moHUbRb8+A1JfqrPqWr0l1gCKKSiL82d2z7bX1Nsq6KFu0c2dhDx9jmaPH5mSpR+oHoZYnSef855Y0RgvCCaIUyXhYYVbVXM5PNvKRNWepn4a0BQagDtJcIeIaJ72My7pi/h/lUuav8upehkq87PzsmP+6tBXWL/iqc+LZigSEID5co7SNu7+pdRsBUEikA/xmmTHfBbbXc734QLOf6Xe53P215XSeEIPyg6lPWxwJjNzOV5CZQobSLg9vpL5Z5VbzDl51rBjqfw3cVr8Vk93MPwo97awUE0XT68zMXAkMQtnAvXsbQ0dH3cTb4XksQwukjCDsZ2WOcedgFO42xZJ1wqEsIwsssqVDGK0FozEwl21Qv5iU/X9QNJ4iSm9T4lcHblw0P/2ybV7j93KT2497qlyCaUX9edRQoE1MFBKE50J1Ggij+CKzFR1VBb5pwGtpIyEIQfma6T1mvBKE1M3GxB4MX89KMIoiuXttXd/HW3QvU2lhMmkXAr3urX4JoRv150U9BJjA7CDExTanNq6VBCMLPTPcp65Mg3MwUdvOSYxY6r4pv+A6iNJqr51AjBTV4jebq1721QoJoKv35eRUCQxA1OKQGbF/1pR86FR9S288i7R5TtTukLr1PQYwrk8Oxf7LrVQjCz0z3KeuHINzMFCXmJZc81jOFIKYzH4Rf99ZKCKLZ9OfnVQgKQTgtgm5j0TiQFJliNB8pWWZalh4euMsPRh85dNXO7/zL2K0MHFIoZ++v3WOKgD/B5EOSyfgjftrS7rwJX0kPxS4QgvCDZJWyfghCZ6YoXIKxmZdcU5TOFIIoPXT3nftY5/2ltaMWuQ/6ONgT/VX5AuSLB4Ug7F//5Ua34ODoLnO24863k+10FGTt9wXkolw5FBv3+4yO5qqDTeNTHR8fnXtCy06vX74jNad79rkZRBCocU5qbUY6+0vu55ZrBQRhP1eZ1frz+uoHhyDgK/+7LSimGq62vGaX8buMYRymC33uhJmXHCray3Q+PnhU2+o8bnxi98uY8NmpvkioDa9TubZyfhcYu5lC5VEIsXGCaZhX57OfoVz+6plEECVhyyevuJ6TTsS+W+a2K0U6o2eB6DyLxp7NGrxk08a4irs/9dh04BhSQKd50V9t3ocAEYTKa6sNgmcfaf7D4jaAenb8po9yUJdgfXBoqy7B+iazUUqwvtrMd8+1+F1gNGamLDPWE2FlPnpj2S+gmUQQufGW5q8eZeZPp4fjMSeS0CVmcSKWStxbCwoW/Xme6q6CgSIIlUSH+dv77IUfOl3oVJGJyaCLir6ygXFmWqnLVugQ7jvD4LPSibiy6zuG9nAK983M/enhuPWCXg5jh3Df25hpTdlMivpw31thGEtTGzf8WmvVKH0/tWbc2syU2tYy60xMeYKwmyksHzDlE4TPNIJwmPCjaqfEWb7IGqXSJfHJ42Y4c7g9XWml7q1VEkRT6c/LKx0wglBdVhnebsiOTfzzpk23vmodg0p4FTbpcuthsfqdgWsmRueeokvRqX53ShjEjNuyIeMMu7lJ5ZzY8oKxnIj/X2l6UrpifHSX05zackoYpEv3muubIryQ8WkG/19bFjuVce4bqaH4vzrmp+7sOwXEP7ZgpAjlmNTGDUkvum+kzKwkCF0grQLIXtJpzjSCyE3g7uhCMP3clo1L/bSNgDSDngVYJZg/WJOm0TEdZKXurdUQRDPqr9wiECCCiANQSX3m5PucAXgzYDw6uUM32wH6gC2fiVo7R8xw9rhy+dJrkXK0XDKjfL9rkXJU+Ym4EpHLB2tWhdgBaAxkPpMdyxxvJ9pyc2I6fp+VBKExMxWw9HRPYCYSRG7r3B09kjmX7H1XH5PHdWtdqXtrNQTRrPpz01lQCIIIx+VyQTPOKCUB/QjUgj0R5pMeGoy/5GVe5iOurrOnEfVQVi26sfE2/nyZfNSFqqijq+/zml2Bh6ag8lb/28To3O847VIKlTjs8Kfa0OWb99KB6ZCZzQRRaqbw6K0wUwlCTZhF3dEDiekqmkzo7vqw2lkQn2zPKWwtVKl7aw0Ioin156SwoBAEA0fNn8d3bnnJ+AIxn6vZjVqHsA3E30cGF/tIxpMr//HF/fNbmH/IzCsAhMvNZYCfJMJXk0PxO/2EIi+8MwbTFeo4zwvpqfcmxMaXhoc3PFy+X5MS+Q+tGzU7fPWzNmio17rrKTdrCcJPvHY7wDOZINRYcrbZF0MHEZlrwLlLQ/vmX7IsGFsINGgafOX8PTntFjXWHqfJj3trtQTRzPrTvfBBIoh0IqZMmbkQ5BPc8kWCuQKg/fNzbBTMTzLhBnM8s65as4kiipCZjRKwAkTvB2O3PD4qCvJzb9+vuJdB6+fPyz7sJQKyy2JKC7v6/i7MvJoJhwE4wEJ+KrbZU8S4J0O0blNi4Gm/JFQgvbBpngXGkaBcUqOptMKVJC2qJzEU6g48QUwHCNKGICAICAKCQCkCQhAyKwQBQUAQEAS0CAhByMQQBAQBQUAQEIKQOSAICAKCgCDgHQHZQXjHSiQFAUFAEGgqBIQgmkrdMlhBQBAQBLwjIAThHSuRFAQEAUGgqRAQgmgqdctgBQFBQBDwjoAQhHesRFIQEAQEgaZCQAiiqdQtgxUEBAFBwDsCQhDesRJJQUAQEASaCgEhiKZStwxWEBAEBAHvCAhBeMdKJAUBQUAQaCoEhCCaSt0yWEFAEBAEvCMgBOEdK5EUBAQBQaCpEBCCaCp1y2AFAUFAEPCOgBCEd6xEUhAQBASBpkJACKKp1C2DFQQEAUHAOwJCEN6xEklBQBAQBJoKASGIplK3DFYQEAQEAe8ICEF4x0okBQFBQBBoKgSEIJpK3TJYQUAQEAS8IyAE4R0rkRQEBAFBoKkQEIJoKnXLYAUBQUAQ8I6AEIR3rERSEBAEBIGmQkAIoqnULYMVBAQBQcA7AkIQ3rESSUFAEBAEmgoBIYimUrcMVhAQBAQB7wgIQXjHSiQFAUFAEGgqBIQgmkrdMlhBQBAQBLwjIAThHatAS/b0RN85NkG3g/DJfEf/M0yZ7qGhW15U/+/sjL7fNOjLMLEUhPkAQgBGwfwkE24wxzPrNm269VUvg+zuPnZehsNDAP42J894oLWFlw8Oxt/qWNzbzSZ+AGARgDCAlwE8wIRL5u/J6Xg8ntW1EY1GQ1teDB1EZK4B4xAA++bLA4RXwfz/CTSQnQgNjIysV3VW/CxY8LmW1tY3PgHCSibutrSl+vYSgCQD10+Mzr178+YrJvw25DoW4GUCHgPhujkhvm1wMP5nv/Ur+QUHR3dpHaNlYJzEwEcA7JGvJzcGAp5g5oHsROY2r3q19qPe9VcyZikz/QgIQUw/5nVp0YkgMpnQ6zCMs0F8xtSCq+9BhoFL28L8rXKLlo4gshMTvaHWlq+DodpR5GN/xhk4Mp2I3W/9IbeYvkAriHKkMkk47k+GiG6eIDrzoY0btpQTtrf1/IvUC+CHAPb2UPaPAJ2aSgz8IkeD5R/qWBz9BJu4BKAPlBfHNhB/HxlcnErFRz3II6fnLH0fjFMA7OShTIYYl2UzLd8eGbnxzXLy9a6/XPvye7AQEIIIlj4q7o2OIELIHmpy6BtM+Kz3innEDGePGxm85XmnMiUEAR5kGMME/roDOaj1NTnehiM23x9/o1Cv6vP2DF1EwEnO5Rx7/gIZvCq5Mf6Al7FFItG/RohuBLDUi7xFJkvAOWOjc891201EItGdKGScw+C1/sdSHnPVn/wYBgAc7HMMit2egkFHpzcO/M6pbL3r99tnkW88AkIQjddBTXqgIYhnGbiLgC9YGsgAvBkwHgVxK0z8IwjvtXeAgPvG2jhqXcytMqUEgW0AWgHMyckxnoOBX+X+vaON01KJ2CWFenImjO0UZ+TMSfZnGwFpBj2b6yfQCcZ+moV3GzOtSQ8P3OIGomtbOfMVNgL0CoPfRUAXgH1s9WUZ/LV0Iv4jXTvKZDVnpzcuBvhzJb8TXiXGf+TGAoQYfBABf6/ZzT1BZuaIZPKWP/hsI2cmBBn/ASDrjhcPjrfhaJ1eXcZQk/prMsmlkmlHQAhi2iGvT4MagrA2lGXGbdmQcYbNLEORxf0fg2leB+BD1gIEumxO+OXTBwcHM/YeawiiIDJKwNf3nseXWM4aqKtrxX7ZrPF6KhX/byXY09MTHs/scRGDP2+r+wVm+tL8vczb7WcVH1/cPz+UNS8gwtE2otgKw1ia2rjh1zpkndviJ03G6e/ZCw9Y21Imr+dfytn2rwDwbkudLxicPXR4+Obf2tqhSHf0q2A6r7hf+vpV2YULj9nNmNPyfwg5M5HVHHdPa5iP05n42rv6ewjmnRazUpaB89rCfL5OfuHi6H5hky63EXCWiE5MDg1ssGNV7/rrM+ul1nojIARRb4SnqX4XgihrInEwLbxJJi9NJuMpzwTB/I3UcPz8cvb6RZ3RQwyi24tt6J7MLBTp6lU7InWGsMP+znw7TKzU2fE7OqIRNuhuAO+yjON+ZLmvQFg6FUW6owvB9HMAu0/9TnRxamjgdOv4FnVFFxig+wDs6qd+ANTeHV1NTJdaF30irEkOxW4oXcB7LyRAtT35aPpiL6N2TnO24zaAenb8xne0hneODg5eu90q395V3/qn6TWQZmqMgBBEjQFtVHVOBMHANROjc08p543T3tP/XmTMewg4oNwi5LCDeDZr8JJNG+PKlOL4qC/6scwe6wA+3iL0X8jyklQq/owH/Ki9q1d9fX/bIjtqMi8fGY6rhdr6UKS77yIwn1b4Y84WHzYOSw9ueK5cW+3dvV8jhiK8wmMfY0n9AFxNRfb+lY6l9KzmI4eu2vmdfxm71bobYOCodCKmCMz1iXRFlwI5Ms6Z/xjYEka2J5G4+feFgvWuv1wf5ffgIiAEEVzd+OqZA0FsJZOWJJMDj3uprGRBJDyDDPekUvE/Wss7EES8Nbz1eJ1Jylq2fXHfB8nkXwKYZ/l70flEub4uWrRyD6Mlq7yhpsxiigjTidjJ1q/7SCS6N8I0CMb+UwRBOCs9FPuXcm2o35WZJmSS6qs6/1BPlpmWpYcH7nL63WkH4NSepo0Sby+dbpnpxPTwgDp0d30W9Ry7j5EJ3QeQOst5hEFPtJB54dBQ/JVCwXrXX66P8ntwERCCCK5ufPVMTxB0U2v45dXlFu1CQ5rFW+uaqiMIJnwrPRRTrqquT3tn3wlEPGVC0X3RlqtD/d7R2XsuE765Q5YfGw+Hl2weXP+nqfF09R5MgHJRnTw8B3wRptrtbM/sfj0BHQA2g/k3HDI2FDyB2jujRxGROiCfPEdwIFS38UzuqHa/CUC0IEeMHySHY9+ylKP2zt7riHBi4W9evJK84JiXqXf9ProiokFCQAgiSNqooi/VfAUWmu3sPOqvTGpTZot/nFqICF9JD8UusHZNSxAeTR4dnb0/KXa71dvEy0HRXrr4v2aCDxlJxDdPEUR37xnEsHoe/crg7cuGh3+mvK6qfuwkpby//vyO1mMeu/f6t/xU3t7d+01inGshu5Jzgo7uvn7mHLFaD7VHGbidwOtawzsP2s8V/PSh3vX76YvIBgcBIYjg6KKqnmgI4nUy6dBkcuAhPxVHunuvBWP1VBnCutRQbE0ZgnjLZF46MhxPuH8tr2kby7wVB+hTli/hi9KJmLo74Ovp6lrxvgxCg4TcrfDcY7fL28fCjBvSwzF158LLpTfX/ui+/AF+GjAe9DWQyZ5/TFm0LAQxEkLL4YnETa8V/pb3fLqDgHaH+tVFx98SY4BDdNv8PcynnG6t68rXu37/mEiJICAgBBEELdSgD7p7EGY4s9jtwpuu2VLTDUrOFjQ7iJKvd13d2l2OjzOBMiRlJ4gSswkxrkwOx/6pBnBP3mguDm1Si2oLdRSFSSn8sbOz/yCTTHXgvJeHxtQu6Q4y6dqxsV0eLOekoOqrd/0e+iwiAUNACCJgCqm0O+ViMXmtt+SgekecpamYQYElCAvZ1JKMvJKdV4w9yGkJQpVzuQ/iVq0K43F1mDLnFmJzOQnXu34PYxeRACEgBBEgZVTTFSEIwOrZM1sJojBHcgu5aa4lYFXRXQ33SeQ5PEm9669mrkvZ6UNACGL6sK5rSzUjiJILU57OIAJnYnLwDqqriYkrNJdVOTGovad/X5rgw2DwajDUeYZbED/Xm+eavtS7/iqHL8XriYAQRD3Rnca67QRBwJ9g8iHJZPwRH93QuTuWHCJXbmKq3SG1xiW36I6CGnM9D6l1l8tqecbhQ2dFoiqmUlvbqwdkKbSKGH26WFtebmE7tV/v+isdt5SrDwJCEPXBddpr1ewgtHcY3DrW1XX8rllM3AWQyuUw+TCdmhoeuNxarlKCUHXYQzpU7Bra2Xc4ESuX3Jzbp44QS91H4dvNtb2rdxkRrgHzSyB6WAX2aw1vvU7dLSlx2dWc10z7RChukCLdvYeCoWJtTYUMUXcoeCK0uNq8Ggr2OtffYPikeSGIWTIHdDZ3As5OJmLf8zpETVwhremoKoKYzotyNhIB8CIbtMQt5LUdKzevrkhn3ykg/rGljK+LePlyhXAd6qLc8wA9wmze19byys2FC4653RKbnwJTROUKYiDZFt66yvMFyO7oGmK6xtLPIr3Wu36v80/kgoeAEETwdFJRj/Rul6Vxfdwq7+jq/R4D/3uHjL58VQQw976IAAAF9ElEQVShCbXh13avDlDDpnkPgA9ObXQ0oTY0YSyUuOewHvlgd3cCpG5Sl+yoOjr6PsRGLmyI9ev8nHQi9l2vdy0ikej+COXCebzHMpYis161N7bLEX+9669oQkuhQCAgBBEINVTfCQe/fBXm+7T0cMz6lattrLNzxd+bFLrX5mOvXUyrIYg6BesbB3h5KhFXUVunHl1bvoL1dfYdS8TrnUJ1qCRBMLAeRMutuwgQL0sNxTeV06pDKPJRhnFEOrFhsFBecykw6yfmUwkBAM9a78jUu/5yOMjvwUVACCK4uvHVM5eLW1sZOMGe6tNaeT6g20+Lzh6A32UM4zBdWs9qCEK1qwv3zcCjhplZ7pQwJ99ffbhvwDGPgi7cd7mESKqt/Je9upR2YAErdQg9p2Xr562mHYexlM3elgv33RntJcqZflxDlzuQ6lYvRKQP+V18+bHe9fuayCIcKASEIAKljso7U+Zmr1Pu43wOZeMnAL/P0rrrF2q1BOGSMEhFGF27zzyO2cNEqDazCP+QGf0lCYPcv9jVQeqPwPhyMbr8sEnhk0eG1v/Gag7KJQx6UWW5o6tsuyltwiCXsbwG5jNh4iZ7nopFi054Vyg8cQ4TVMKksJfdR2dnf7dJ5h22vBavMNMZE9t3ieluSi/qjh5oMNYBdJClDW1o9HrXX/nMlpKNREAIopHo17BtB4IYt5hHVGtTqTzz6TU/qb9kRVeMj+5ymlN4hmoJQnWkRilHR4mwKjkUu9kNyjJtPc9AgkBvAvxuEBaDsZutPtd2ytQ/CsIjYFJEpHKwfhSgBZqUo6NM/M/pofhlDmNxILqc9DaAfwMY+bDurDLhqXOTPUrrctRtveuv4WyXqqYLASGI6UK6zu04eDGtNYHPEPBRj81nQbhg/C9zv+kWu6cWBFEgiTnbSS2IvZp80+W6rL6eVxdyM5QTnjS1kEojqtry86iF+wvpofg6t4PnKupXfVGhMM5MJWIqu5xjIMGcjjPGpQCr29N+H5Wi9Lq2MJ+uS1GqKqt3/X47LPKNR0AIovE6qEkPtKElgKMoy8MUNi5l5uPcF2F+kgx8MbkxrqKRukY7rRVBqIFPmnRILdoqjejeHsDIEOgnY2Hju9bcDx7KWdu60JZvWlucgTQTnzwyFH/CZ/1ex6KArqSNU9/egZwDYK6XfgFQCZ/O1Jnu7OXz+qhb/R77K2IBQUAIIiCKqLYbTgSRT0tJ7V39CwnmWhDUxamCCeVlAA8wcPX8efyg1/DQtSSIwrjVDd3W1jc+AcJKJu4GsK/FDPMyAY8x80B2InPbpk23vloNXqqtlp1eX5qPY6RMMXvmyTMLxhYCDZoGXzl/T057xcTaH1V/uO21RSEYJzOZnQDtYzmIVuag/2LQHSbo6k2JgafLEbJurMV4lbQxCvDzxMZwFuZVme27jniJ5mofww591L7+avQnZacPASGI6cO6ri2VIYi6ti2VCwKCwOxEQAhiluhVCGKWKFKGIQgECAEhiAApo5quCEFUg56UFQQEAR0CQhCzZF4IQcwSRcowBIEAISAEESBlVNMVIYhq0JOygoAgIDuIWTwHhCBmsXJlaIJAgxCQHUSDgK91s0IQtUZU6hMEBAEhiFkyB4QgZokiZRiCQIAQEIIIkDKq6YoQRDXoSVlBQBCQM4hZPAeEIGaxcmVogkCDEJAdRIOAl2YFAUFAEAg6AkIQQdeQ9E8QEAQEgQYhIATRIOClWUFAEBAEgo6AEETQNST9EwQEAUGgQQgIQTQIeGlWEBAEBIGgIyAEEXQNSf8EAUFAEGgQAkIQDQJemhUEBAFBIOgICEEEXUPSP0FAEBAEGoSAEESDgJdmBQFBQBAIOgJCEEHXkPRPEBAEBIEGISAE0SDgpVlBQBAQBIKOgBBE0DUk/RMEBAFBoEEICEE0CHhpVhAQBASBoCMgBBF0DUn/BAFBQBBoEAJCEA0CXpoVBAQBQSDoCAhBBF1D0j9BQBAQBBqEgBBEg4CXZgUBQUAQCDoCQhBB15D0TxAQBASBBiHwPxbskUN/+NYeAAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-27"><g><path d="M 371 251 L 371 294.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 299.88 L 367.5 292.88 L 371 294.63 L 374.5 292.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-13"><g><rect x="321" y="151" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 201px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">OS checks all wake sources</div></div></div></foreignObject><image x="322" y="187" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmcHEXZ//fpmckmoCT8UG4UPF/xeNVA9o6rQCAiJCTMbsKNqCDkB4IoL4KirwgvIh78QARBLiXZHcMhKHKIS3ZnZzYQRUREROUykCAvAYQ9ZrqfH9Uzvenpqe6unp3ZY7b689l/dqqqq771dH2rnnoOgn40AhoBjYBGQCMgQYA0KhoBjYBGQCOgEZAhoAlCy4VGQCOgEdAISBHQBKEFQyOgEdAIaAQ0QWgZ0AhoBDQCGgF1BPQJQh0rXVIjoBHQCMwoBDRBzKjp1oPVCGgENALqCGiCUMdKl9QIaAQ0AjMKAU0QM2q69WA1AhoBjYA6AjOWIBobV+5kxPOfAtHBAOYD2BVAvAhdHsBGAv7CTGs5QXdle9c8BYDVoVUruWDBYTvEGxKLmXEogEZPP0wAzwP8FBj3GojdPjy83R82bLgqp9b61C3V0ZF800iObgPhE04vGTg0299z+9Tt9eT3rKm988vEuGisJ4TrM309x01+z+qjB83tndeBcWwYvu3ty3bJc7wPwDvrWX5nFEEkk8nYP5/HQRbTuQTsCyCmLtb8GDO+nhve/uZqLNCN7cm9DabvAtg/Wj+whYBLzFzi0sHBn72i3v+pVVITRGXzoQmiMtxUa2mCKEVqxhCEWJCJ6RoCmlSFRV6OHwPhpExf6v5K2mluTs6BYXwDxKe7TiyVNLXRYj5uMJ26txYnm0o6FKWOJogoaG0tqwmiMtxUa2mCmHkEQc1tXUcDfBmAN/sKCuFFML1c+J23BbBTgFDlCfjGyNC8i6KcJuxFMW/8EOCjVQU2pNwQMx+fTad6phtJaIKoTAI0QVSGm2otTRAziyCoqS15OoGEzta5X3AQyDPh7phl/E8iMfuB3t7rht3QzJ//ucScOS/vbYqdPqMTwByPkJkEumpkaO5piiRBze3JM8F0oUelZDLwgEG4HGa+b9as7Ta5+kLzO1bukMibHybCUT79eIlhLM72rxlU/QimQjlNEJXNgiaIynBTraUJYgYRREt753Jm3ChZ3G81DT5j/brUP1QER1wkxxoSl4BxlHdxB/HZmb7Ud8J28K2ty99vUezu4iW089rNIByT6esR/w+9AG9uTv4fxOhKAIeX9JtxX0OCl/T2pv6tMp6pUEYTRGWzoAmiMtxUa2mCmCEE0dyeXAAmYRGzo2vIQwT812678OWpVEpYCEV5qKk9eSwx/dBDOENEOHqgr2dtUGMtbZ1fZ+A8V5lXDDY+lU6vEZYQyo99hxGj6wEkXZVGAV6S6U/9WrmhSS6oCaKyCdAEURluqrU0QcwAgijo+unnAA50kwMTn5ztS4nFNXS37idQPqeSR6x4fvFg783Pyup9aNHR277p9ZFbGDhg7HeiyzJ93adW0hfZaYSBa7P9PSdU0p7qx1PNcpogKkNTE0RluKnW0gQxAwiipb1rBTP/1K0OItAVs+KbTu3t7RU+DuN5qLm987tgfMHdiLi0Hujv+bqsYanNNNNR2XT3zyrpSEdHR3wkv9P1AB/hqn+/wcOHpNO/eLWSNie6jiaIyhDXBFEZbqq1NEHUOUEUVDBYC9Bi11D/nDeMAx9Yt+YZVUEJKrfvwhV7xC3rLgDvc5V7xMrF9h8cXL3JW9eHIJZn0903V9qf5tauE0G2ZdZGgPIg6wlzJH/E+vW3vFhpmyr17PuYRHwpEXUx8CGXtdcQgCcButsi66rBvtSfg04zYQTR2HjkdkZ8NAnCpwH64JgFmm1txuuJ8BPO45eZTEq8t6LHGYv9DqL3grFDsaFIYwl7uTB4aGh4+eMgrGTidgBvLxpNFB0hMcDAjbmheb8OM3iohCD8rOeIcTVbfKofhqLf8dkvNcZgnODpN2DPA54kxl15ouvX93f/dSJPrx0dx80eyb/WAdBSgNsAepvHSvFVgJ8GjHsZtHqPXcwHVdTKmiDqnCBaWpLNbJDQxW/nDDVodx/2cfv9XvahAiYRHTXQ173GW0d4bVPCXEfAe8Z+G4eKqdI+j6eeWLApkfsmAZ+VXPp7mzYB7uN47Phs75onZe/1IwixSM6aveU0EL4WaJZcaPQFgD6d6e/+ZZTFSYwlFs+dz4TPq/iiMJBl4hMG+1KPRsFQOGY++xwJC7iLAeymUPefAJ0UNJ6oBCEW+VlzXr4M4M+53x9CDtTUmuwkoksU+y10thVhpIBJSZHCxiF/LohPUpAPV11+jAycMrAu9dsgWdEEUe8EIbkMJosPGhhIZaIKY1D5poVd7yOLfwNgl63l6KaG+KZjvWqsgkpox5s8F8ubQXxIpi+1vpr9qkVbRdIVdzoiHEmUZzMDR2b7e4QzX8kjIwgCH8OgTwG2WbHqk2fwWdn+1PdUSKKlpWtfNrAG4HeovqBYTpwqvpTp7xFGCqF3WEWLM6FCPCjie0wCzh8Zmvct2WkiCkH4kQNANzbErZNlVm82qW3El0B0fkQPfzHMoWrc8/nh1bxwxUdg8c8rmDunyVBZ0QRRxwRRPHamUFhkig8/PBqP77ehd/W/In6ogcWjvkt2LyJ2wMx0em54bk+YaqGafY/Slo81mGjCBEGYCafBNELgvbjgpe51RtwMwzgos27N793vlREEgOdKCRd5Bv5EoAdFXQbvQ8D7Jbt+JV+QgLHkAX4CRINiLAC/FYSFLpWT03VTxax5/v7JuQ3DlCoxSnBaKKhm1gH0AoO3I6ANwO6eOTEZ/OVsf0qEYil5VAnCVmvN2XIOA+eWLvT+5CBe5GOEYYLxDBOyBCqEdyH+IBgflpwmleYiigza/WpZ9nY24r8CsLenrrhTfEr4Ejl9s2WR8FHJ/ImqL1ngAwb7UxtkfdAEUccEIVXlAKmG+OYjqnA5XSZPLa2d32LCV1w/+AqfWDRmDeNWgDokgrkFhDWGRdcPD8/dMFXIorFj2e5GPn4ngA+4+mwy41Yrxl/y+pHYi75J5xUv8Lc6JjLfBgsr3bpuH4JwXpMnxhVmPnGuN95U8f7nUgBLPaIsPb05ZXwWGPs9+VzuG967G7GTfnojPmGQ8SPPjlV4r6/IplO/kC0w4rQ4mt/pUgYL9ZXr4ccsxqlv2xX3uXXh9o79eToEjKsAvNVVYaPB5qJ0eu2f3K0oEoSfU2bP6Gz+3IZ7U8WIAaU9tO9kZiWET85Ht/7CvaaBT8t8hsQcDufpi1QgIZcjqvwkHZUUnPI2prkdr2DCZ0rkELjSGs19zefejZraO1uI8SOP/AIB6l1NEHVMEC0tyQ/DoHsYeIszTGJcMJDuOadS4Qyq19TatYyIS/wfgiKStrau2Mci67YQVY29azYYv2SDb2uIbfuw18u7FmORtSnx3TDBfO7uu+LigAs/2eJU5qcRQBChfiXSHTrhCeS5I5NJ/dM7Fp8FRilMyb4dyZ3jeax2E7vQt1ujuU/JFibZHRiAe2FyVyaT+l+/uZOebiQLmQJBVEQOol9NbZ37vxHEUtznzCr209fwwjUOam5NngWyIwQ4z3Ns0H7Zdd3CUGHcT0tL1wfYsNW5Yz5NDJyf7e8R91SB6r729uRb84zbARKRkosPD8aQWNzff9NL3s5pgqhjgmhsTbYZZF9Qi1hK9sOEs7J9Pd8et5RKGpB8UOKFJ2XS3cLbWfoU7y5E7CT3rjyoewVVDtMvDcaaiTph2OHQE6a4Oxjrp/C1yA3NOzHshCNf/Et3lb4EwXx2Jp0SoVECP/zmtuRBAAmydRazLWTRooGB7ge8YEoWGCVVkdNOc3PyXYiRWKCEpYx4/AwSqLm961IwrxqTP+BxxI0D/S7r3X2VGD78wzR4P/fuPYQg/Jw5A08OTh+a2jtPJ8ZWtZZiKHGprIzDjLts0S5Y7ImTgPOoENdY4abWriOJbLN35/mHFc8vlPktaYKoY4Joaus8hICSo38tcww0tiXnG6B7AGwfhZAKEV3pFBDEyWZeRPIaAqHHsPjCdDr1l4h1lYs3tSYPJSJhhuuERA/U3ZYtyh5fFAYe51xsoWMG7EMQZQuiX4eL6q91APZyLcbSfBLek1DQCcDvfeWnKb6jIb5t0n26a25O7oY49YLxrijy4JRdsDC5V8yyicgZk8lMh2TT3ULNZz9BBCG7P2DCr3INfISfWimEoFTVs9Tc1nkdgP1E7hIGPQQYN1QrPlhLS9ciNngZgH3EfQ0xromiFZB8p76yrAlCE4TyIhlWsFKCcNot2qcvBvi0YrIgb0DBoC7YdwFmglc90Jt6PqyvUX9vauv8AQHC07v48J0wsVzV76Ctbfk7TBhCXWGC6EEC0pzntY6axYcgVBckSM1kmcp8S6TlKjhVek+nDDwTh9nR37/272OLd7mKZjNZtN/AQPcjKvgLVdhwfscbCWgBsAHMf+SYscatqvEjiPGSg00+5SrTmlolqWBSjTKaICpHsa7yQUyXE4RsumyrKGv4o7C4E8QHg+1dpEpCo41k8eHVNOOVhQZhwjnZvp4LKhe10po+C7fyO1QXfkFUecR6Cdij2IOyXbnKmCQnltE3LGcOdpvwlqlogKp7t8sIghk/JUCY1LrijnFfDIklMj2733glJxhRVPi0/B6MH1n5+B0yR1AV/CazjCaIytGvL4Jo7VpMxCJA39jCypJdZeVwldb0ufM4I9vXI2zyx/XYhGG+9iFmWkaE5SGEUVWfilp4fnvBUF3g/UBUrS+ZI3FhfgdgRDJ7ZuZtiOy0sGMOmOzRs3vVE2LhzqZ7jgm7T4kiKBKCeAJs98kdlFI0WUkwSGpq6/zvolWSX7eeBeEOZqydHd+mf7IMKMIwK/l+GF0g7Omqo1VMYQAWf68rghivykcRs7FiUa2YorbvLi8sdxIjOIwYZwH0H+Vtce/obCxV0TWH9UOyW65o1x30nrBQG2F9VCUI2akyrG3V3z0GENTU2nmDnbfD+bgYVw+ke4TnedUeyUV2QNvRZaIgZ3QTMT6p0GnhP7IBjB+bufyttQ7z4u2PnUL4n/kdLKNhbzDvBuIFBN7HEzZFNgxNEAqTK4rUFUHIdr4ipEC1P1IH26b2zq8Q41surH0taRTnI7RYMXyDSKr+fY9Tmm+oj9BGPQWiHMmjtu2UrzeCUCWsSvFyydyXiSGsvGTP6wAaSlSTilZh7sZsI4o4VoHpqxHCWYi4UlmG8cVs/xoRHSDU27wSLGzfCxOHg+kLPk6TKs1qglBBqd4Iwse72dfmWREjaTHZu7yWOuNpP6yuj8er8iVvUPuaIMLQL/zuPkFMPkHwoAnjmBhb3wbREtcIpJ7sKiN0FmNinFk8tarciVWUjjesP664Vj/wOBSGVc2DIdRiIkyMYxKtCSIMteLvdXWCEGOSHMFfqUUsJsnlp3h92QIt4seQZV3EoHcCLHZ3w1Y83+GXO0Jx3iAP+V2dsCLVutgNGsskniD+Fqd8e1/fzSKsR9UeWbytWpxe5SomHrTi5uFCpmRqVkg82aMO3FY9DdNCAlaCsMgnjIXTrG+okKjvFeXtmFLbbLkAjNNDDDeGAH6WQRsMUB9x/v7h4R0ej8956UMec3RNEIoTUXcEIfNkVfW6VMTMLtbc1nkKABFue+yjkEVzlTjTvWYxHzSYTvVHeZ+sbDHkt9uBqCqL36RdUgNSPwbZ2FV37E0ewwUC/gWLDxgYSD00Xvy99SflkhrYYMXzS10bDtlFszCJXpVN97hlZTzDp9bWFbtYMBeDRHRfmi+JjxXJmS2oM02tyS4iusF1AhDF8yC+jy26ySLKbBOf87TfhXmUE7H2gyidibojCJ98EE/D5P0ymdQT4/kqnLotLYftaBmJuwn4T1d70pwTsqiv1fLullySV4UgWlsPfbNFs4U12Mec8VVi5trU1vkTAhbJnKcm6gQhCb9iMvMyv1hK45EPyZ1UZDNX+1KdcC2Ynxf+IyKwX0N88w1OLDGFUBvwkc+qfgNunArxscwbPHHGqmLYUIxh9qs38owI3xDniWTarQmicqmuO4Io7O7LwjCIGzOlMBEKUEpNAf1yTsgEnIE/GFZu0cDALZsV3udbRBIsMPKC5Nd4S2vnj0uDo5V7Dgf1va3tiO1N5O50x8Bxm4VOFEHM71j5lln5/G8AEsmNCk8FuThEuA7L4F+9YQIqooc+RMBfifnydDr1tNOs97QiotNGjUkkmdMStaUKQYj+FD3hRW6SOWMkH/ANFMKTGyIK8kLAElF5YzFYB7sdAYPmu3Hh8ncbVkzkWhjLe1GNjZDETNkkwnEDfT3u0BmBn5FkI6VVTIoLT30ShB3KAqs9l3WR4u/4LpztncuZcaMnzHFgxjpJ0DugAusSd59ku0QGLs329wiv7HE/kvDkkbyCJaq+ksVyoghC0IE3PhKAqLtp2aagbPH3cTRblenvuVxlQqS7ZU9sL1WCEHr7xJwtVxJwvOvdo8x8TDad6vb2RxImJNJJS1Xlp4KDu4wkjlJU0pXNvyYIxYmoS4KwTxF2chFLBO5zOxAFJmMJwcwvEFpo9FFJsDfxqiEC/mu3XfhylVSI7r75JIKp6mW8bLFTPYXJo6eWhuqYQIJAY2vyAIPswH5ju2mArhodmrsqLPCgvyzxzQ3xbY90671lhgPCsk05WF8h1MVql669jJRVCcLud3mQQfHvR6x4frHXSEJ6yU7UPfL63KOVMJLFoaqCk6r3ni3qHVJr64p2i6w73A6OQTkh9B1E6SpYtwQhhuljCip+ujVvGKeq5qguptu8kIATy60o1BaaprbkGQQSUWXdpoLCdvx2g/m/VAPv+eVDoAgfs+LmAdJw38RnZ/pS3wmwc3fSVV7rWpBHmWmlOwf3RBJEIThi+YkyKHObg1ExM5zYce/vwm3IYl4ymE6JQI0lj0/K23tGZnMyyImxuJgLEhtLiCOsoGYlNn/encskCkGIjjW1dp5EZBtTjMkdga6YFd90qjdHiuTUqHrqLgv5LYtVpSp3nhNEWXQE1dN3QCZEX38lTRAziCBs9UJb58nFnMCu3aMNgtAl3wTDuLLBmP07rwWE2KXPnv3ie0yKfa54TPdmSkNY0nc31IXLc9sS43DJh1KMd2NcbRJ+67bIsE8Ls17aETGjBWR9BkyfkFiMPEpW/pMDAzc/VclH6FfH57LTZJ9ELb4JZBQTBkWJvBtVpeFzohQ8d4fBOFNC0NSyMPlxtnB5ued64KaAmts7v1tMmuSClh+0KH7CYN/qP7rJtWDfjwMAusaTJ2Q8CYPG3uuTqEpKcCJhkDErcQcVMgM6j29SJVHAd/NE+H6mr+eM8TrM+ajtxOn7G2zypbLgkWITlbD4bAYLL3ZpAEw/WdMEMbMIQoxW7GgPISIRjngsLHfZomingqRipi3bX2HnAJvripyBIoYxUF3rNxpsLEmn19hpOav9BKXpdKUDFTtNvxSUUvKayBOEg0nAiVIUeZaBfpG2spgKVBCxN76RiD2QiSO/PMiPIjDlqOs9AalNfdWWUU8QYmAyNYtfyHO5Os7ZULnSshI3gLkRIBHa3LsIV3PDEhQf6lWA/wgYxWi5LDLyCWunnTzfwQsMDLsCNsIvRpsmiJlHEPaIxa4iZlrfI7JTVap4hPqstfwYGThlYF1KWGxEDidQDGPwpWIYgyjhvcv6Y3/koGPX93c/Xm1iKDn9VJgs3ta/G7RUlllsMghCjKmlPXkwMwkjA//Ngi+YWx3SwvAu7NxJpBHtDCvr+T0wxHYlBFG4qC8/1fj4B/ndtakO4xE2qLNa2eTES8e3seI+jseOMXLWOW6rPD+zbU0QM5QgisOmprYVCwjWJUI9G5EoHiGic3fb2boj6qWy7MsqHJ2NcwA+QpL4PeRj5MeY6bw9duW11eiLypcvVAmxeO58Johcy2HE5ptT2nnXZBGEs1lIMF/MzMsVxiKqvAribyKPy1TzYYhKUcNDCMJn4hMG+1KP+s1JhQQBn/ziLzGMxbLEPi0tXfuywT+JkPlwiIEfcy7xVW8ecRX5CitTVF9+iwChMg6TP9Hc35jxFecbKXMqZdzXkOAlvb2pf5dshto7rwNDxDorPD5Z9aTOpBEcPcPGO1V+r+tL6iCQhb413pBYzGyHcBaZqoRKybmnEPcTG4tJW35Zyzj4IqbTcP71NgM4gMFtAL1dot7aBPDfQLjVZOO29f3df63k9FINobNxm5U4ksFdnqiZQ2B+TFy652Oxq8MMACaTIBwcRKrMWMLsssdih0IZU00I44HnAX4YMK5tiFt3eheSKFgWTU4PIuDoogrEUV+aYDxDoF7L4Kv32JmzYYRfKUGI/vp4JN/VEOfDfcZHC9q63h1nPpYJB9ohsxk7FMduY0TAoyDcMNLAt1cjknAYrraRhml+BkQHv3GH+B5XMMEhAE++4VB3N4NW77GL+aAbS0naWWk4dH2CmNkniDD5079rBDQCGgGNwNgBSkOhEdAIaAQ0AhoBCQIzVsWkpUEjoBHQCGgEghHQBKElRCOgEdAIaASkCGiC0IKhEdAIaAQ0ApogtAxoBDQCGgGNgDoC+gShjpUuqRHQCGgEZhQCmiBm1HTrwWoENAIaAXUENEGoY6VLagQ0AhqBGYWAJogZNd16sBoBjYBGQB0BTRDqWOmSGgGNgEZgRiGgCWJGTbcerEZAI6ARUEdAE4Q6VrqkRkAjoBGYUQhogphR060HqxHQCGgE1BHQBKGOlS6pEdAIaARmFAKaIGbUdOvBagQ0AhoBdQQ0QahjpUtqBDQCGoEZhYAmiBk13XqwGgGNgEZAHQFNEOpY6ZIaAY2ARmBGIaAJYkZNtx6sRkAjoBFQR0AThDpWuqRGQCOgEZhRCGiCqIPpbmrrPISAX4wNhXFfQ4KX9Pam/l0Hw9ND0AhoBCYJAU0QkwR8NV+rCaKaaOq2NAIaAQcBTRB1IAuaIOpgEvUQNAJTEAFNEFNwUqJ2SRNEVMR0eY2ARkAFAU0QKihN8TKaIKb4BOnuaQSmKQKaIKbpxLm7rQmiDiZRD0EjMAUR0AQxBSclapc0QURFTJfXCGgEVBDQBKGC0hQvowliik+Q7p5GYJoioAlimk6cVjHVwcTpIWgEpjgCdUEQTa1dRxLxT7dizQ+PxuP7behd/a8w/JvaOn9AwKlj5QhPIM8dmUzqn1HrMuGsbF/Pt/3qzZ//ucTs2S/PZ0KSwR8DYU8wdnCVHwL4WWIjDcbqkZG5v92w4aqcQj8iO8qJvjTM2XIOA+cCiLnecS9M7spkUv8b8l5a0Nb1bgP8aQIOBbAngDl2HcKLYP4Lgbrzo7mfrV9/y4thY6jm742NK3eKJcwuBneB6L0ujPMANgIYBCjVELfuHI8z4b4LV+wRZ3MFmLoAvAfAm4vjGALwJEB3w7Cu2X0nPJpKpUyVMTa3d14HxrEuebw+09dznEpdUUa1fkdH8k0jOboNhE8U2/5bnPLtfX03P9fYntzbYFwE0H72nBbmcz2DrsgNzft1kEwuWHDYDrFEfCkRdTHwIQA7eTGxyLpqsC/1ZwCsOi6nnN3vvLEYsI4H6KOu9sXcPkVMfZbBV++xM2dVMff2YaLkJ+rYJ6N8fRDEwq73kcW/AbBLEcTXLOaDBtOp/iBQJR+JKK5Ut63tiO1N5O4EqLH4jlfI4oMGBlIZ7zubm5NzYOAEEH0NwFsjTPQLAE7bfRfuCRL2ClRM1NyePBNMF1ZCDmIBIaZrCGhSGEueGFeY+cS5g4M/e0WhfMVF9u1I7pwwje8z83IAcYWGXgXxN5HHZZlMSizqSk9hAaXvAtjfg59f/UfI4NMG1qV+G7Yoqi7wfi9SrS8jCJj8McRoKYCLx8je+yKmkzLp7ivLF9Ujt6NE7psEfNa37tZKJsB9HI8dn+1d86QK6PY3FMcqMH3VRcQBVfkxMnCKCuZOIxMlPyrjnSpl6oIgZAt92G5eTEBTObHY88KEM7J9Pd8LmqTGtuR8A3QPgO0L5XgwhsTi/v6bXnLXa+xYtruRj/3cRSRR594k0FUjQ3NP89u5RSQIH3Lg3nwcKx/oTT3v18FkMhn753N0CgP/o7AIeJqhv8OgwzPr1vw+KgAq5Zubk+9CjG4DsLdK+dIy4WMvlqfmtq6jAb5MbZEqeUuegG+MDM27KGgHrrrA14IgCLg0ZG7/YRq83/p1qX+439/Skmxmg34OYNeI2G9m4Mhsf8+9QfWaOlbsSXlzTQXfkAnC90Zfn/eVsJP4BMlPRHgmv3hdEISAsaWt8+sMnOeCNNUQ33xEb2+vOHpKn6bWrmVEvFbyY3jd9s4vk30ML9IDcGm2v+c0d1vz90/ObRimFAMHeN4h1A1iIV4PkDglCIIRJ4sW15HZXWWUmVZm0903ywYSgSCoqT15LDH9sHSBV1og/U4dokuvAvxHwHjE7h/xB8H4sIRENoP4kExfan01RT8A55J+MXg7AtoA7O59PwPX5obmnRiwkASNP8/Anwj0IAAzYPyhZD+JBDEKYMRFfGLxvo9ArwFWE0D/AaIrMn3dQh07phpqbk8uANPtAHYsk3GCIJI0mEYIvBcXTpyOGs4pvhmGcZDfxqGlZdnb2Yj/SkL8JhjPMCFLoFeC5hagq0aH5q7ym9sJkp9qivyEtVU3BNHYmmwziH79xvF428IiFX6XUHb/MAZ78B1GR8dxs0fyr6UA+lSxyigDB3t3Qk0eEimWvdU0+AzvLsx5dWtr8r0m0RUEfLxUCvhOmFguU4WoEkRLe+dyZtzoIYdBK24ePth787NBUievixeY6fTc8Nwe78dXONVhVVGtVribKDyPWPH84rD3RfkCmluTQn0n1B7OXcoQEU4beX3edZJFgZoXrvgwLOsGAB9wvcdXRSjKNLUmDyWiNR7SywP0HSsXv9CrPgtQiZgM/nK2PyVUVGXPJBKE0xdBYj9g0zrXLWvivgWGmXOfMAun4/idHhxNZtxqxfhLXhm3ZcKk88D4QokKkPk2WFjplW2fhdu3ffHVN7av/KDB+WsA2scFbjDmEyA/UeR5KpXo9etGAAAR/ElEQVStG4KY37HyLbPy+d8AJC7GxCNdtB3wJXcI7nkJvIdoa1v+jjxivQTsMbbo5WL7Dw6u3uQ0Ii66jIQpjs5jixARdY+8PvdohePuHMToegBJpz0GnonD7OjvX/t3rwCpEMR4yMG+jLWsuwC8z/XuR2Hykkwm9USQQDe1de5PwM/cO0wGzs/294j7mMiXlN53SchaFFmV6e+5PLBfHSv2RN66iwqXy/bDklOg+H9Ly2E7WkbibgL+09XmS0R89EBf6pdB72leuOIjsPjnAL/DVW6jweaidHrtn7x1J5sgFE5SY12WnNpNMJ+7+664OODOTHYSGwV4SaY/JTZ4Y49kg6XSPooX2T8E+GhXc0/D5P288joR8jOVFvyofakbghC7h6a2TnFxerwDAjEuGEj3nCMDpaUl+WEYdA8DbymuDutAWDhW1ucyTvxe3E0KdY+9YxUfVba/5wT3gldcGMXiMavY5mayaL+Bge6CGibkKTsRAVvIokUDA90PeKuGEYRskRZ3JionB/Gu5rbOUwAIvbvzRFIVtbR3HsWM61w7fKkuOwwT2e+CiClhrnMt9C9Z4AMG+1MbwtorH5f8HqmlvWsFs20l55xQTCIcN9DX47Kc839ba+uKdousOwBsN1aK8P1MX88ZXpKcZIIIPEW5RyjbAKmSi9w4hG5qiG861lEJy9oPUxW5+yesqYxZiTvchhTiDmigv+fr3nHUWn7C5HAq/15PBAGJuesdDfFtk7291w2XLaqlprEvAfQFgMXlq20JxYyfZtM9x0h2udTc3nUpmFcV2zSJ6KiBvm6hfhh7BAFZBh1P4H0AejsR9c+KbToq6E7EXb+9fdkueY73AXin838GDs329wh9b8kTRBAyHTEDfzCs/JKBgZufChNOccyfNYxfvWGyKe5H7CfqCUDWBgIIOKxPITgpWaGJNlpauvZlg29h4DUAvzOAh9jkS92qDvkO01/d59N3r8z4qkAnlyDkBCkbk3eT9Ia1nTIx29h7SJeBxzkXW+icwiWkLD0BBMlKeRvlqmPJd1ZV+Ykiy1OxbF0RhFf1E6CW8Zw2+GETRjIGoZd2zFbl9xBeVVaQ6mc8E14Nghgxsbf3AjEKOYj+l1trRVsIHAxaWju/xYSvuDAJNQRQwa+19dA3WzRbkObHtpZXunRXaR4SdSKY6ahsuluozZQfyYlQqgKdTIII2BSVjbP8/i4aaQpcTRjihG2C6EEC0pzntUX/mzJtgOyUHga+ZO7KFv9ay09YH6f673VFEJLdnsnMy7Lp1NZsawC89w9C+PbYhT/7zEa6jghHFSdNuiMq/9DZ95QynskfL0HA4LOZ6Ua3jh3A32IwF8nuMfz66j2VeXd6qmMstxhTd2YMe4dEFy6qbAFhjWHR9cPDczeE3fsEjH8xEQsCctRLz7FB+2XXdQtHL+VHckcmzKnLHCsnlSBCHD2dwX5o0dHbvun1EXHyGrPOY8I52b6eC5QBCSgoux9UMT33Nild/CUn11rKTzXwmMw26oogBJDeiy3ZxWPZrrgoNE3tnacTY6t1iUSYyi7OqqQqEX0veFq/+B4LsUNBfKRtWujyclZWMQFPAzAk5pyRdOeiT5Kd/0YG7iJQqId3qWDz2wEc6Pyvmievog27cJR8m8/HNATCQ7Doek7QXdneNUK1pnRB3tzadSKIf7S13cqIraOjIz6S3/Emt+EBCGVe0pNKEIonI+nmhWm5nxl21AVOdmp74yrsLoBCVaLudzE4QQWZG/PPkN1L1lJ+oo59qpWvO4Io2+FL8jN7Pvqxk4K3rvdYKzmhRNK7Fief5nes3GGWmX8HW9jLIDtcwEc8YQmkchKBIHzlTJwAEDcOVPZg9YZ+qJ4EV4Kd79uLJxRxEe61s5fV2QzCzczGdXvsYj4Y6KXuNVUeR75vlcVfpUzQFKjWlzqX+txxed9XNG9dB2Cv4m8mMx2STXcLk9dxPxK15rjbHGtAQsr2xrLgE1V1+alexyenpbojCMlR3qsSKLt/cOI2lVvElF7ale1sFBcLcTJIzNlyEDHOBGHf6F7IBeGokCDEBb0IOzEWekLV2kS8s2zBqZ6cVpUgRLcihgBxRrKFgEvMXOJSWSiQ8hMj7mtI8JJKYjiVYSnbvExeLCZf+SojiLIoApXdS/mJ0mQQRK3kp3qfy+S0VHcEYe8GPAH43JeKkkvmMRNVrxqAgH/B4gMGBlIPFXcZJUEBg8xondNCy8Lkx9nC5UV1keos216iIDvQ2ZiTWXSCoL+LwIAk7MELzknOM8TMK7x3M7LOTSeCcDBvbE++j5jOJOBwxROFqPoIG9TpvVuoKUEAZRf1qicAP0FSrT+uE0SdEkQt5Ef1g5+q5eqTIAper1I/haJp490A5hW25aXBx7z3EC5y8VpWhJnDUXNb58mBgc8KUuFEGN0AYhHo7/6G2LYPm+Yr21ds5mo3uzX2kczRrWDNlFs0MHDL5iDhVF1wpqKAi9hRz2wy3kMmL2VCFwHvDwniV+blXVOCmGp3EIoqJskdQa1VTFU/barIazXkR+U9U7lMXRJEuQBvVRX53T84k+S9hyDG1QPpns+Wq66CLyub2lY0Eiyhky0G87PfIKJY/p5gXAsL62bNsp70U1WMy4oJeJqJDsv2df/OGZfEWU3Y4kudtdwC29La+WMmfMb5HwH3/HubhsMevvtG4TswrR77Dska/ijYEieqZZL4QWI8JV7Y1bukLgvPAke23CCOl5BV64/nBFHrS+oyJ1agqgRUqdBWIj+Vvmuq1KtLgvC7TJ4Tf+EPpZYk5Yu83z1EjnPvL431RJd5A5c5k1pQVe10PcBHuCb6VYBOyPR3i6iXoRY04yIImW67EHJ8NYiWuPr0isHGp9LpNcIhT/qUWXYpxLiaKsId1A+xO3x2I44D0f9zq/G8BOj1iPeqHVXHKvH4lkYNluQnUc4HITM/lVlKiT6PhyBk5qOVmLk2tXX+hIBFAD/FoIcA44Zs/5pBH6wCc62ozkO1yqnKT7XeN1nt1CVBCDDLwigwnTSaMNa64zXJnG8k5oj2JbdhsUiA4kSLlfpXOJModa6KGH+oTBUW5ZLa5/K8EBfIEvFuXJE3uXd0NpZuuDf1skwIJSFDpHFzwgTYng/CN8D8vHCMAuMBmPyTKHkY5P1b0UhsfpKImhnYuzinImeAyiPUhhcScJar8FjiHPG/iXaUKzPT9vfoLxufbGGtBUGIF3tPlkA0fyCpr0PRzLZK3uso4iHibW1PwF8s4M+Gxdc6d4piHPZJv4byoyKEU7lM3RKExBIiZTCusAjC6akQ8dXHh6H8HgLHEXCMK/tWYCwhVa/ZIMGQxD9St2Lyt66i5tbkWSA7UdDWh/nsTDolQpeXnWzGE3TQeYEsLk7UBcUPq/HeEZSFKQFKCEK6WClarzl9FpuO0dyOV7hVdX7RhiUB6u43ePiQdPoXr4YtJBIyF2pE6QlkPCcImyDK41NFijVWzCEhNitOfKoSa0OJ/Ieedr34SNSqZd7rtZafsDmb6r/XLUGU7VAITzBTN4Gd4H2+F1+SBf7+YkpJJ2NdYJgIyYcaSYfq57ijbMUUsIDJF2sExeSn5vbO73qtoIhw9EBfjyyXRpnMN7UlzyCQSMU6FuwOzCdm0qlrxvuBSOYq0kKisiDXJlifXEVZHk8MSguvnUJ2m5dvZGaR/nTrUyOCWLAwuVfMIuGc6PhC2EErQ3Jq2P2SEiZKQ3W0ti5/v0UxYUyy1ckNuGdkNif9TrvuYfuEIv+dOZpb5E6BOxHyM14Zn8z6dUsQheNjSb5pcakq/orqFf9LZulRvWTHLU+76BSRXLIpfzyCHDhGP/eEli4ceHy8VcOiuXoFrLE1eYBBdvY1d56GuxrifLjs0lz2sQJQygbWvHBFCyzrFk+4byULKpUPQxoIEHiUrPwnw4IRikxl3pDfMtNln3DfrzLTcWHewz7hvn0JWe4DEJzwpqgP/xKIzi9LgVojgrBPEeVJukSypLMzfanvBNyzUVNrspOIrnXJnywhlmxjIqzzbmyIWycH+aGIXBxk0KUlJzb7ohursukel1c8MBHyoyLHU7VMfROEx9y1ZI2XhOh2qwTKwiJsrRwai8dH6EwGruRc4myZQ5ad7L0hfgqYzvSz3fdLoxqVIIqOe1e6Q6MLCyvZB+QMW3IKED8NMXAR5xLfi5AwRyTzUT59qHw4Ugst8GMW49S37Yr7JJ7SVPBPMX6smqfBL2GQX75te5GKGcdzIUKw27s7cBH1yZPuKztiJ2+YdDERRC5p54RW8xOETRDyPBl2X63R3NfcO/XCySH5puE8fZGAc1USBvmcAgT3PMiIrcr2rxGZCUvUon7OksL4wO/0MRHyoyLHU7FMXRNEc3NyN8SpF4x3lYEfEkOpzHrHaUBR/ywXOruRITA/BjKECarIdLUdMZpAdvIh9wf+Kgib3H33sxSJShCiEz5qLN+QyoJUZs15+TKAPycR5NKUm4XkOK0Sj3GxeHxndnzzuaphz1U+Gjt7W7mFVqEq4UVi/I5Bdh5lO/Ul4aNg7OBpO2z3W42Uo6IHgekvRZ+KYR9Wu3KJOF0dslN4Ch8X4gYbY7ZVPGOZ9MB0GYiFWXLBvLqGJwhbjvxTjqqmYQ087QW0b88tGOvstL0CDwsfA2HPcpkRPkHW0sy61B9l8jRB8qMiylOuTF0ThDRAWmEKQh1vJLpJu6LfLt47s7ZOeM7LP+DCglq+swsUBX6MLOMYK8Zt7uCBfuGYKyGIwkLUeRKRnQhorH9BWe9skthmywVgnB59TCKss1oC+Uq+kn07kjvH81gNUEcF9fMimczI0LyLQqK+Uktb1+clpwKVV4oF85Lc0LyvhkWWrRDnISY+OWbFHrHIErr7CSEImyTkWfNCMbHjghm0NCwybqXtF3cIf4dBh/vlvHY6OUHyE4rJVCtQ1wRhC29ZNE57mX/Yib/kNyE+9xBh3tMlzdkf+uwtp4Eg0muqBJF7Acz/DQvXCPPPsstuHx+ESgmikJqRhF/GWJRVO1Ur8zHZdKrbB5uieoZ+4MlFHCDb/BgZOGVgXeq3Kj4glX4kYifIMTqPgFMjxLt6hAw+LUrfhBrDYLpKcKwKUTKQjbHxf9PpNQ+qjs2+V3iOOt9Y6AXObw2uJ06kOCnTl7q/7A6jxicIp1+NjUduF4vnzmfC50O81UWVvJ9qzv97PHI7SuS+ScBnFec28jsmSn5UZWAqlKt7gpBd+qkkH5GfPtQzbrknV9wvxGcljmRwF4je61JviDAbTxFTHxirR0bm/ta9u5SYmErDdVdKEPYpQu7xXRZywius9gK2CXvDMk4ArP0BEqG2HRIcAvhZEP1GJWJqtT+EYk7ixQCLnN6NRUsYJ1jhqwA/DRj3wrCu2X0nPBoUzTWgb7SgrevdceZjmWyCFbmtnfELk9THiXFXnuj69f3df62UGMVYRk1aysyf98jOJoCF6uyK3NC8XztyM1kE4eAUIOsF1Spwez4Wu/qBdWueqWTexf1ewwgdAsYxwu8FwM4ukhaY/A2Mn5i5/K3eOxDV902Q/Kh2Z1LL1T1BTCq6+uUaAY2ARmAaI6AJYhpPnu66RkAjoBGoJQKaIGqJrm5bI6AR0AhMYwQ0QUzjydNd1whoBDQCtURAE0Qt0dVtawQ0AhqBaYyAJohpPHm66xoBjYBGoJYIaIKoJbq6bY2ARkAjMI0R0AQxjSdPd10joBHQCNQSAU0QtURXt60R0AhoBKYxApogpvHk6a5rBDQCGoFaIqAJopbo6rY1AhoBjcA0RkATxDSePN11jYBGQCNQSwQ0QdQSXd22RkAjoBGYxghogpjGk6e7rhHQCGgEaomAJohaoqvb1ghoBDQC0xgBTRDTePJ01zUCGgGNQC0R0ARRS3R12xoBjYBGYBojoAliGk+e7rpGQCOgEaglApogaomublsjoBHQCExjBDRBTOPJ013XCGgENAK1REATRC3R1W1rBDQCGoFpjIAmiGk8ebrrGgGNgEaglghogqglurptjYBGQCMwjRHQBDGNJ093XSOgEdAI1BIBTRC1RFe3rRHQCGgEpjECmiCm8eTprmsENAIagVoioAmilujqtjUCGgGNwDRGQBPENJ483XWNgEZAI1BLBP4/pzMUf7+OmDIAAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-29"><g><path d="M 371 551 L 371 594.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 599.88 L 367.5 592.88 L 371 594.63 L 374.5 592.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-44"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 562px; margin-left: 382px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="375.5" y="556" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-34"><g><path d="M 321 501 L 51 501 L 51 407.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 402.12 L 54.5 409.12 L 51 407.37 L 47.5 409.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-41"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 512px; margin-left: 312px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="303" y="506" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-15"><g><path d="M 371 451 L 421 501 L 371 551 L 321 501 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 501px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">ACPI fixed <br />event active</div></div></div></foreignObject><image x="322" y="487" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfQuYHFWZ9vvV9GQSkNuPEK4KruuuKKwazMz0TIfRABIEAgk9k3BnUUDIwyoL+gu44iq46v+I5kFRlLuQTLfhogGEAM5menp6InFdREBWQLknyHITJpnpru/n1HTP1FSduk11D1Dz1fPwANPnUuc9p857znclyCMICAKCgCAgCGgQIEFFEBAEBAFBQBDQISAEIetCEBAEBAFBQIuAEIQsDEFAEBAEBAEhCFkDgoAgIAgIAuERkBtEeKykpCAgCAgCMwoBIYgZNd0yWEFAEBAEwiMgBBEeKykpCAgCgsCMQkAIYkZNtwxWEBAEBIHwCAhBhMeq7iXTnd0XM/DVyQ3TTS2pTSf39fWV696ho8H584/ZOdXSvIgZRwE4EMBuAOZUi6n+nwWwEcy3m+XU2qGhVZuivlMms2T3Mqf6Afxd1LrV8hUAzwP8MkAFBm6fneJf9/Xl/xa2vdbO7DwDtA7ATrU6DBxVKuR+GbaNqOXa27NzYOAYEM4GaH8A21XbGB4bD93Xktp63pbKrNOJ8a3x9gnXDfbnTona30wqr5nPx1JUzvT33/zcTMJhOsYqBDEdKGv6aG1dPtdortwD4MOOnzeTSQuLxd4HG/Fq2Wy26ZlNOMg06RICPg6gKWQ/aqO+xyQ+d6g//1DIOqgDQei6GmbgJzza/JWhoRtfDXqX6SaItgU9HySTc5q5tfEA1v1tm5ZjthneerYQRNAMTv5dCCIaXnFKC0HEQS9G3fbO7GEA3QZglrMZAr5WLOQujtG8tmprJrsfMV1FQFuMtstgugym+dXBwbw6Dfs+DSKIap/0OAw6dnD96v/ye4npJIh0+phdTaP5bgL+ye+diPHT4kDus22Z7i8KQQStIiGIaAjVr7QQRP2wDN1SV1dXamt57nUAH1etNArAsJ3mHzRHmw6eikjH4yWovbPnRIAvt4k6dEU3AfT62A+8LYC5PoP6FSp8/OBg/n/9Bt5YgrB6fojM8uHF4s1/8XqP6SSI9o6eM0D8I9e7EF4E0yvW34l3YOCSUn/uMiGI0J/NeEG5QUTHbKo1hCCmilyMep2dS99XRlMfAXtXN+N7x/5NC6vNVojohGJ/7+oY3dSqUnsmex6YvqkRJ70MwrVMdOXec81H8/m8EiONP11dp8weHd3ycZNMVVfdOiaJowhYt3U2Zzfekx/b+DSPliCiydlpXtfynWeZ/AGY5vlv6kSOdL0HUe/WN3Y4cePGKxXRup5pJYhM97VgnGx7ic0gnDTYn7tbsa7z5YQgoq9wIYjomE21hhDEVJGLUa+9s/tsAOo0P0YLjEtNA69MEjWgPsrqdKZ7KTNusCmfVZcVBn7Mo81fDiPDV6/Y1pE9koiutSt6x14e3xvsz52r2/zUz3UgCDvS6j26iegax3iGTebFQwN5pYh+ywiiqyv7rq2jdBsIn7TPbXEgd2GM5SJVHQgIQUzfkhCCmD6srZ4s65YmrAFoUbXrCjMvYcIzDkub2Mrq9gXLPgrT/BWAXW3DHGbis0r9+eu8NnUvSNoz2flgUpY/9vZeNdg4YmBgtbJUcj11JgiLknQ3IgauKRVyp+nGNF03CB1BMNPS0kDvzdO8zBLdnRDE9E2vEMT0YW31lE5n29kgtWlvr/6fgadSqHSlUsZm1+kzhrK6ama5CkSLbUOsEOicYqH3iqjkUGsjnek+gRnqJjEhbmK+DSaW65TWDSAI6C3A+IGRVGrhxr5Vf3VO6VtKEA02p53m5fu26E4IYvqmQQhi+rAeI4iO7kuYcMFEt7y2JbVttq/v2i0ueTQwZWW1zkpKnbJHh3c8w0tWHwYK3SkZwKtk8mHFYn7Q2UYjCEL10dbZ/X0CzrH195IJPmSokN8oBBFmJt+5ZYQgpm/uhCCmD2v9yZfpzMGB3h+P3S56PswGK4V1TYQzJWW1xkpKNR9bZDVxi+hZxsw/s98ivExzG0YQTvNQ4HWT+bChgXxhOglCQ+pBK2oSkQUpqdVcjpTnrmTw5+wNE+iKWalN54RxqNTe+kBXjgzvsCLosGAdCMrGIsA8FaCP2SzblCPlX4ip3zT4p3vvxiWnkUMQEPbfJ/VDNB+Mnau/Pw3CWsPk7w0M5P+o/iYEEQXZeGWFIOLhF6l2W0f2KCJS8uiaeGbSpq3RTyhJ0PgNI2xnVUctRTS72+rkW1KbjwuzoQT1MybiKd9GoFeZaQ0x7tlzT/PPug1iGglCieu03tGNFDE1miCsDbFryV5GOXWnw/HOVzFfm8P29uz70URqLbzHNq8PmqnyoqG+m5/2mmtrLaawAkxfCTCNrjbBj5CBs4vr87+OIr5UjptPP0fdbxo/fB/ALj5rb9z/xmzCfg59nXhSB320U/xdCGKKwEWtpj/Vuzd/jR195JN/W0fP8UTWCX/8YaYTSgO9N0Z977jlp5Eg3hIR03QQhJqD6uFCmT3XQqEoQiyZI6NHbNhwy4u6eZo37/Tmlm1euYGZe2y/+xoVWH11LduHypXVALVGnP8KCJeNvLHjBUE3E9WuujVsKdNKAk4K69GvHAwr4OsMol/YLOqEICJOVNjiQhBhkYpZTneq123abh8JZUkazbM63dH9EyZ8pvbKBPwVJh9SLOZ/F3MYkas3iCCorbNbeYSfWnuhmrK/UFjzuPMlG3qD6OhZRISjVZ8MbibgUwD2sL3DXQDZnfiGOUXfK/Wt/rO1GYf3pKb2TPd3wfj8pPExf3lwIK9iObl9LDq6zySyzKntBgWe5VW76fSS97KRugPAfg4cK2A8xYSSdXMEb09AJ4C93IsiWHylyGvWnFcuB/h0zaLazMB9qh8C78tjPji1WFYgwi+YsQDAjtW6QhCRv8xwFYQgwuEUu5TT9wHAExWDF25Yn3/C3riH/iC0slqvRPa28Ik9sIAGGkEQ8xdk920yLbHJvuPdM+5raebFuiB+jSSIyXPn9oMICgoYgSDgEcZjMwzjMGe4kY6OpR8yqUk5542TVZBj47yDszu0bKE8A4fYxlVhxq1mE5/vXKvK5Lg1s3x/g8tXAaSCPdaeCoO/WCrkv+u1PPR6ERUckk/ba3ess4srx25CL/cwY6XLD2esAyGIBn3IQhANAjbExuFpt5/OuJTAoZXV+tAW0fUY9YKlAQShPUn73bKSQhBqTlo7socYZMXwGhc1wWFmPKbwpZ9j7DZTe7REYp9njbisAuaL9toD3/FTQFcVzD8E+ERbe0+iwgsHB/N/cq4lD6J7CBVerCtfq+/hhyMEUa+PVdOOEEQDwa017fR9ADAC8OLBQl75Q7ie9vbsnkhRHxjvn/iR70QFS4MC5DVgQ46FUD3fp+rb8X9BpDyT7WE/njW4cujAwJo/6F42SQQx5ijoEjVViHBKsT9n6Z3aOrPnEujbNozULWBFaSDnjhFVBUzvWxIsKqrhrULHG7Oa19oDQXqRtuYAFKgXqfXT1pHtIaLrHUEu5QYR6yv1riwE0SBgJ53M3Db7QSIjas/0rATzCvsJMEwYcN1mWIscOg1DdXURlyDU6XRkxNgHBh/NwJkA9nR14iOHV2UTRhDQbcZQPjOp8iLDnLWL03s+jP+LZtP2vAF4rSN3G27R5pgIddebAGQn2gkfVkZv6ScipkZ920IQjULW52TGwMpSIfcvfl23dXYf/Ga+htvtJ6UwymrtZkj4Uqk/p06U0/40OpprkFw9iQShxtTRsSxjkrm25pGv/qbWlQF80KFDCIx2a8Xaciv9PUWgXotIY2Dh8k3R3Y6jWthprPTkBtGgL1sIokHA1prVnMw8Hbrsr9LZedxOFYze6TA1DLp5QOsDES16al0RaTBBhAo5nrQbRHWC1Kb+7wRc5DNhw0Q4sdifW+M3qbq1xoRzVTjyKIuho+Oo7UyarWJ1HTRez+YIapF1R7bTICvUjAonr57n2KCFpfW9D4ftK53OfgQGrWPg3dU6QhBhwYtYTggiImBRinv4PhRHZuNwvxDZ4+TiTkkaqKyOK9KJMr4wZRtEEM8AOH+v3TkXxns3oQThJWoan5aw3tY60+o340o6zXMDp1tn5qsiFduj2bZ19Cwh4nHCYuBRHm1aECX3idKXUHNlPQEfEIIInJZYBYQgYsHnX1l3mg8jJhonCEdgv7G/+yurdadBJYZR6S0fuPuGajKgBg7a0XQdCMLK4UzAn5h4HZOxVpe7wm9ESSUINea2zmWtBFN5WY/n27ZWSYATnR0vHT51WyGO26vLUsrHPNnrHTSm3HKDqNuETW5ICKJBwKpmNb4P9ejNMzCealyvBEyWH0RUEJNMECpUxVPP0RUEfNaOCwPfKhVyXw4T9kIIIuqKmjnlhSAaNNfK6WjWFtwBULreXQTdQpwRYxPoSR0J0iQThE5ZXQUntOmoEESk5TSjCgtBNGi6Nb4P9ezJV1mts4CKaikS9LLtnd3LAb6QmG5jg29radr2ARWy3Fnv7aATSSpBeJi7jk9BWDGTBh/PuFZB6yLodxExBSH09vpdCKIx86HzY1DhkZ8FSP072kO8gy38sarrq6zWO9qhbtFcdcp3r1uKEIQ+wmxtAUQJteFYNDorppFqmVnjZQNSwqpyGqugCjMdWRroVbqNuj5tnd1HEqAC7dWeyPoDjZ4tcht1HVSCGxOCaMDkemQ8C+UJrXsdXfKfAGW1jqDqdirU5K1Qom6tdZYQRGMIQusHQbByXxPjEts6CgwLrrEKAjfIdyad7vk4G6xiRNUC7UVelxqrKyGIBuxj1lpqULszulmN7wPiiHjmdS1/96xy+V6ADrAB66us1om4iKh36xs7nBgmFLPPBOqjigIrBgu5HzjrCUHUnyD0sYy4b2Q2jm56tZxyhrxg4L8Nc/TQYvGWzbp57eo6ZfbW8ut5gI6Y+D1caBd7e1WiuYuAnQj4owk8bJh8jT2KsI6M4PCVCNo8NHlVhCCCQJvi70IQUwTOq5reikgfuTVK12m3T4Q6KU6yMbe355ELYFLMnij918qmM91LmXHDpGBxwMNlw/jUb9avfkoIgm4D4ZM1HOoZzbXapo6gJymktQH9AkRNGou70Epu29pw5isfYeDTpULuHtu6cHltI4Kpq0e0YyGIqXzMIeoIQYQAKUoRnfhFxcIpFXKnhTE59OpLL9bxz1ndvmDZR51xeQAMM/OppYF8Lur7eETT9A0EJzeI+t4gwmz+HmlKfUVNUwkPPun2oM9699vKyOihzoRGGpFp6IOLh9WWEESUTSpCWSGICGCFKao5iflGbg3TpirjcTMJ8qym9kz2PDB90xH9tEyMKyrl5ouGhm58NegdqmkhT35Tbvw9TfrJu1pSfKwuD4NqVwiifgThESZbmz5UZYVD2VTinpq3sZoOv1SjHqJDuqElZZ7lNb+qURVAjwxaaU9SpQwpvCLIVqPyrgLRYtvaU0mCjnfcNiYtzWr6VBXq3JnMSAgi6COe4u9CEFMETldN7/tQPyc1nW4jyLM6IHPXyyBcy0RX6ryTle6juVI+ghhfAugfNWMODAQnBFE3gtBt4L6xlnRJefzCb3jkvlYXzfsZTStKhdUbnLfO1kx2P2JS2f1U1rfxJyiIosdt9DUQfx1lXG4Paz92QFFJjOgqR7a+Wn9CEHXcxxzz2KCWZ2CzmkBkkdOF+sGmt46Cr7K6dsJDilS6SuVta8+jYO/OYYbLKoH8eJpH53spxSdV+Fi/BC9yg7BCXhxVKuRUADvtE9bM1SMn9TWjwzue4WV04KGHUiLGZaWBvN3UdPzdfJLyKJOWF8FYD9ALIG6BiYPePGDs4x4YPQ7DPHpwff73fuvZQ5+lqrxGQIlBTwC8CwgLHGbezmaFIBq038oNon7A6kxLAzfvqN07vaSt+kSXD/b3nuOnUwgQE0V5DZWY/mfmSPM5YcRTcoOITxAeJ/tQ+Rp0uoUAURPGdFf8c4DfF2VhjJVV5EDHOlOgerRDbZnsycT0Q4fRg1+3Sl+xmpn3tUUpEIKIPlGhaghBhIIpuFCcLHDBrU+U8PDQDgwDXmuh6n17PgFn2GzRw7xCBdapzvhXnajBqwEhiHgE4aFwDswQZ58PTYY5BEV6bW09fntqHv16NcbTRHpT75USSa9lb8ZLTKXp6jUmunC0xbx+1jDdbLMWE4II8wVPoYwQxBRA01XR6wf0vgFxuvTIqBWkrHZ1qcQPs2e/Mo8JWQar+P27Vf+piaCqIidsBPPtZjm1NkpI5lqHQhDxCEInWnLmoA5aT1Xd2K0AddnKhsoVoeq2bKUjwTiJx5TDap3U1simN/NQPwbG1ZXR8q1Oa6Wg97L/bgUdfJ7aiHE2gAW2fobB/Ahbt9bydaoPieYaBdl4ZYUg4uEntQUBQUAQSCwCQhCJnVoZmCAgCAgC8RAQgoiHn9QWBAQBQSCxCAhBJHZqZWCCgCAgCMRDQAgiHn5SWxAQBASBxCIgBJHYqZWBCQKCgCAQDwEhiHj4SW1BQBAQBBKLgBBEYqdWBiYICAKCQDwEhCDi4Se1BQFBQBBILAJCEImdWhmYICAICALxEBCCiIef1BYEBAFBILEICEEkdmplYIKAICAIxENACCIeflJbEBAEBIHEIiAEkdiplYEJAoKAIBAPASGIePhJbUFAEBAEEouAEERip1YGJggIAoJAPASEIOLhJ7UFAUFAEEgsAkIQiZ1aGZggIAgIAvEQEIKIh5/UFgQEAUEgsQgIQSR2amVggoAgIAjEQ0AIIh5+UlsQEAQEgcQiIASR2KmVgQkC9UEgk1mye5lT/QD+rtriSyb4kKFCfmN9epBW3q4ICEG8XWdG3isSAh9fsGzvFJvnUoWvKxbzv4tUeYYWbm/PzoGB00AYGSzkr/SCQQhihi4QAEIQM3fuEzHy1tbjtzdmjXweTOcBKMvJNnhas9ls0zPPG0cwm/8B0D8y4Uul/ty3hSCCsZtpJYQgZtqMJ2i86fQxu7LR/GsA+4noI/zEtme6rwDjzFoNIYjw2M20kkIQM23GEzReEX1MbTLbM93XgnGyEMTU8JtJtYQgZtJsJ2ysQhBTm9CoBDG1XqRWEhAQgkjCLM7QMQhBTG3ihSCmhttMrCUEMRNnPSFjFoKY2kQKQUwNt5lYSwhiJs56QsYsBDG1iRSCmBpuM7GWEETEWe/qyr5ra9lYBJinAvQxAHOrTZQB/IWY+k2Df7r3blzK5/MVr+a7urpSW8tzrwP4OFuZfEtq83F9fX2qrdBPuqP7EiZcMFGB17akts329V27xacRmt/Z8/cG+J8JOArAPgDmWOUJL4L5jwTqLY+M3rhhwy0vhn2Ztkz3F4nxrVp5Bo4qFXK/VP8/b97pzc1zXj6MwJ9zYPcagEcZfJM5Ur7Or7/Wzuw8A7QOwE5h3snef5jyYct0dZ0ye2v59S6Ajga4E6D3ANjOVv81gJ8EjHsYtGrv3Sv3+60Hv34Vbi0tr3wChOVMnAHwXgApAGp9PQ+gyMANo8M7/mrjxitHnW1Za3aUbgPhk6HGR7husD93Sq1sEBHPOzi7w6wtuAOgdK0OMS4tDuQuDNVftdDYN7HrTQCytvWzslTI/YtfO8ps96nnmg4kMk8B4xAbPqraJgIeAOH6WU18a19f/m9R3mmmlxWCCLkCLKeiFFaA6SuOjcCjBX6EDJxdXJ9XZpisK5TO9Cxj5p8BaFK/M/BUCpWuQmHN4yFfC52dx+1UweidALVW61SI6IRif+9qrzZaM9n9iOkqAtpC9FMmxhWVcvNFQ0M3vhpU3osg2jPZg8D4kbK7D2ijDKbLYJpfHRzMDzvLvtUEYfldpMoXgViZidoJIWBYwevB2YDa+J5+jroBfAfAnkHYA3gGoDMHC72329dcowlCvVe6s/tiBr468Y481ITmRYXCTS+FeG+rSGfn0veV0dRHwN7VOq+SyYcVi/lBjzYovSD7CTbxgxDrSjXxGoi/jjIu162tsO85k8oJQYSY7bauZftQubLatgmHqGUVqYBw2cgbO16gO9m1ti6fazRX7gHw4VqDzHRCaaD3xrAdtHV2H0yA2hBmVes8UTF44Yb1+Sd0G84zz9HZDPzH+G0hbEegx2HQsYPrV/+XXxUXQTAvJiL1watNbuyGEuJhwh2jLXzcxnvyr9iLv5UE0b5g2Udh8s8Bfl+IIeiKlBn8pVIhf5nXoaFWqb09+3/QRGodHBaxrwoB39g6vOMltTU3LQSR7vkwG3wvgF2r7/u6yXzY0EC+EPb92zt6zgDxj2wkUxyZjcOda0D9rg5s1GR8g8HqdmEdsMI/PGSmKscO9d38dPg6M7OkEETAvKfTS97LRuoOmzNWrUYFjKeYUCLQqwzenoBOAHu5m6QrR4Z3WKEhCWrP9KwE8wrbRxFGPDRevK2z+/sEnFP7AwPXlAq50zQbELVnsueB6ZuaD0qJQ34PGA9a7RDvD8ZHNBv6ZhAfOdif3+AFm5MgAKwCcLStLUWairwGwLSVwPvy2E3GdRrXOXCNkTV/XrXHzNsQWeKx7avvMwLwWsD46/j7GfSjIFIL8+n7rANLtMjAb9Q6sOBTYyJ8DIydNW0HxjFSIpuWLZRnWOKSyY8l/sN6gF7wWXMVBn+xVMh/V1W2xGGjb6wA0d+PNWZ+Aqj9t/WHDW+KyibCk7C5YXAgf1Wt4yARkypn3bCbsAagRba1GCgeqpWtiuzyAB0xXt/Dw1uJ3GbNeeVygE/X4UOM3zJIrbEmBh9IwIeqIjl78YfILB9eLN78lzDzP1PLCEH4zLzHh1phxq1mE5+vOaVTa2b5/gaXrwLoQFvTkz5Ye5eaG8BzbNDC0vreh4MW5byu5e+eVS7fC9ABtg1y8WAh/ytn3XSmeykzbnBs+i8w0xdGt+yQc5LX2KkTalP5N0edB81UeZHX6UtDELVX8cStKr47vyq+U7L12vOgOdp08NDQqk06LMJsXEEYhvldycZHRne9ggmfmTyn+LE5MvpvHjoTast0p8kSq03cEK36RJcP9vcqUneJHq2+ynNXsqWnsT/8iMk45z174D67LsMSQz1PR4KhYintYqvxrMGVQwcG1vzBOcaoSuqwOGtuAA+MpFILN/atmiBsD8DbFvR8kEzrBrJ7tchmMmlhsdg7dmiZeDwOOnp8VLX584/Z2ZjV/O8EnOE4HN3VkuJjRS/h/RUIQfjsEJrNrgLmi/baA9/xV0BbiuwfAnyirfknUeGFg4P5P9m71Cn4wHTm4EDvj4M2r/bO7GEA3WYTL2k3VCuQnWneBeCDtjYfQoUXO9/H2WeVwJSooyY6ULvaN0qFnCIO1wbnQRBhcNN9+BVmXlIayP/irSSItFt84ouB/V0zmewuZcYvJ4snveXz6XS2nQ1SBF+7Fanm7kGFewYH8//rtSbaM9n5YFLGAOPz5EVEjSKI+Quy+zaZpDb5favvOfLmzerTpUJOiVF9n/bO7rMBXD5RiO9EBUudugIPEWMgPoqW2zLZk4nph/bbLBFOKfbnlB5QHg0CQhAey0KnHwA8RUWuVqqnlrV2RTABXysWchc7C2sUfGHETC7xlJfliPvjQ6CoyP6O6Uz3Ccy41nb68tRzaAmC+TaYWB6kGNTqZAgXlvpzl76VBOE+GcP3ZuMi2Y6e44ksY4Ta84SZKi/Q3MJcc8rAo0gZnyr1rf5z0A6mwV47T40iCJ1lHgOBYiateEqvi9OIZBFFVERtnd3qJnGRjYg89RxBeM+E34UgPGbZaWEEQHsD8Fsk7jZYe+XWnIoCxUyazVRr8aG7ofjdAHTjiXLL0d26/G4B9v50Zo5wmFxOPp1PT56CdLrnUDZ4CQAlNtyLGFdFMeHUzK9WD9Hent0TKeoD4/21cQYF0rPjoTnBV5jpyNJA7532co0iCNVHW0f2KCK6eeIwoV/zkw4g7hualth044t6A4hzy5kJhOAcoxCEftbVSUOZgZ46/qF6K389143GbE9r2aE7QQFYMVjI/cCrcc2HqD0Jhd2cgha/29cCWp8NjZlrJNNdVz9vA4IIwibo97BzoNFHecnhtV0qgt1S3vUGApQ/wkYw/56bjNVOfVYjCUJzcFGGA1q9WG0Q7kMF3dSS2nSy0x/IteYJf0KZuwYH888EzUHtd90hZCo+G2H7e6eXE4LQzKDGtwBMOLfUn1PmiaGfjo6jtjNptpILHzReyUO/oFHwaWWw1XbcBOZh8dHmEG8okQWPNi3wUvx6E1LPEiJeM/G7/mSouUH8p8FbjhwY+IVyhgt8XPVnEkFkur9ADMvyqPpEwi4Q3GqBRhKE6sJpWeevlHc58XnqnZyHBwLW/W2blmMeuPuG18OO3Xq/TPcFxLjEtpbDiHSjdJGYskIQWoJwOeyoUncBFMkkjsHNBHwKwB61brxOK5rbhufpUSOK8CyrOfk/y8BdBHJ53Pqvalbeu2os1uPl1Ofc4KN+xEkhCMtss/L6Acy0hBg9IMtTvfboRUzOMNyMn5UGcicF+UxE3Y0aTRCtHdlOgyxF+7bVd/PU2WhuV9qyWvEj+H8AQzmiRnxYmXDPtxFEZKe+iB2+Y4sLQWimLqozVqTZ9zgRe4Te0IqZNLoNzxOQczOI9K7+hbWbXJQNXtd8lPphzS/rOOZJTVmZ2Z4p72waLfuBeU8QzyfwgSD6Bw8fCD+CoLaO7uuJcILtMPHT4kDus/V+/0YThEZn5SlmchpoeCm1Izv7RQPtsRSVM/39Nz8XrVrySwtBvE0IQr2GRq/gEjNpLUV8vK+FIOr7EauNaksFx4Lp8x4OWGE6dJGrbgOMoqAO02mtTKMJwlrLjphcOjGThkg8va+FIKLMcP3KCkG8jQhCo+BziY40VhieJqdqaEIQ9flYbHGRvu9wSAvqoAzG0yBLzFgLh5J4gtD4jrhER26fD/Y0ORWCCFpmjfldCCIcQQSGR6jT9Ljt4B3KZ6fSGdBbfHidFv3MRusxhigioneKiMkK7bDNy5eC8YWAuD/DAD/NoI0GqJ+D1F7DAAAJcklEQVS4/J9btuz8aGrOSwc4ItBqCMIdyZQY70gRk3UwcYfecCmfNUpnrZ+Qam86b1j1+A6S0oYQhGYm0+nsR2DQOgbeXf1Za0/eiEXgUvAx7mtp5sUqHIBGURdoQpju6P6JPUREVKVx1DEmkSDaOrI9RHS97QagYCmD+D426SaTaHCb1JwnvcKrhzVzdYl+3qFK6tqa0VjQjccJ01gK+pr0HnDoidu+642tt9jjUzWKQKOu+SSXF4LQ3SBal8+l5sp6Aj5Q+7lR8mBn9xq57PhpUxOvJtCjt81pOjkF2/EoH0DSCELrJAg8SyYf6xOGehJkYQnCbX6JyGaubZ3dRxLhGjA/D6L7VWC/ltTm6+0+BdOhg1AAuMShtrXn9vnQh9awA+k87MB2eIqyRqVseASEIDRY6SJLAsEL2NmU0ilQc+UuAnYi4I8m8LBh8jXFYn4icqbuBuOIrV8jJ6evRJgwBhrnq8Bbh275WOE6CF+zbTy/QYWvdobPSBpBaEw2K1G9d9s6nD4k0FuAdfQsImLlN1MLXx3oUe+cqzAOjdNFEBqDinExk8tXIkT8MU3Ik0iOhFWsamJclZToaRXFltlcN7v5hTVRE3WF32bfuSWFIDzmThO/6FWDjSMGBlb3h51uTQyjUMHL3Ao+lSEOy7eW6Wpbtq1Q8fZ18Y2IqHfrGzucqMtRoRubLq6UCquty1qXNIJw63wQddPWxQ/SEoTGAEFNh69HvX2+woZEmS6CUO/mDCipDjUppC62J7kKmygrTtDEGk7t7dn3o8kKKKgyAFpPmINW2G8+aeWEIDxmtKNj6YdMarp7kpMbsG7rbM7qEpi4bg9dS/YyyikVA2c8GRCA31ZGRg8NSuHpVPCpD8gw6TMqlelEti1viw/Hu1B7pvu7YKgcCrVnmAgnFvtzNs9o76Xd1pk9l0Dftp1sVXTWM+w5A2q1k0YQzlMrAX+FyYcE3QJreHR0LMuYZK51RGfVEoRHsLvwwfrGbioq/0bNWkp7wp5OgnAfUHiIGZcSkUotWnWk8ze0sG3uc2BgFYgW21Zr6MCTHqHUhxnG4aXC6r6kbe71GI8QhDeKuo1VRQ2+oSVlnuUXQ97KdmXQSlf+AMaK0kDOljHLu3PNdVp9+Cr95Fh6Uo/QGroWdWQHYDMDxweFYm5fsCwN07zFEe77vw1z9NBi8ZbNzv7eYoIIdauK8uG0ucU+APOXBwfyKu+2NpVsrf2qGefP7YeM6m8vk0mHFou9v3G+iy7ctzIsCDqYVE/GKvT7frU2lRJ3VvPmzzlFJ06CCIpFFNch0SFOepkI65mtRE/q8Q3p7jp4dWQPMcgKcT+endCKeGvQ0QE5VKitI9tNRNdMym8SMtJwlDWTpLJCED6z2aq/Bah94X5G04pSYbXKrDZpk/DK9xzmI7e/ioe4oVYksuxVcwtQbQ0z8C0ebb7MmW/aJwe37+1jOglCkzAJalNkk88JCi0e9iP2mIdhFbqdK7xS14/Kv9Fs8pcZrLyg7QmQxrtl4KhSIaf0Dc7H42DC95uUOm2of9Xv7WtuzD9DZZ4jlQFuPKQLAM+EQS75P+CbGyQuQXjkuKiNO9DQwg6Qd0IlvATm82HiJlcOidbjt29KjX6DCSoJk30+Qt8+wq6XpJUTggiYUW0ilvEj2kT6RxC3wMRBjpg71ZIqn7N59OD6vPq4Qz0eoTeqdaMrzH3TNAJlBv5AoPutPNpjOZc7NClHKwz8v9mpzRd5KfSmkyD0xgQWRK+plJzqPwjmxcVCXmXSm+qjySEw3tTkVK1gldFNRVKd6+jsBQa2TIgH1SWElpYGelVYbNfjm3IUeJqBwlh6U94FhAWasB6+JK65nap3UKlTnwWoTIz1s5rnfK5mthuXIDwswcbG7ZNdz2vCAvAZBuF3YKp+a+Y/ATRPQ9TDTPyvpf78FVNdGDOhnhBEiFmOl6xekQMdO5W8yO7QGzV+CJdxzjm0CA5fOlRULunLRt7Y8QI/5fZ0EoR6ybaO7jOJrExk2sT1QeKTENMPtSE1b6WbiHF4mPKTy3A/p5pOMkbNC+0iR/ZJhKTqj22qpNKIKrFilEdtfGeV+vPXeYnAPESOtj4mZ7yLSxDWPDlDb4z1Fspow4tEp4iPam4YwPmDhZzKLucrJowCfBLLCkGEnNXW1uO3p+bRrxOgxAbj8k+f6uokdkWl3HyRU3wTskvos9rBN7RGiLYpvSD7CTZJhYywK9B9qvIjZODs4vq8ipzp+0FNN0FYHrbu9K72sWjzVoTAaVIRKwZTmS4h4CwvsZGjzceYccHee/AalZ7WdWoPYcMfNbwHAyUmPm2oP/9QwPh06TfHqzitiupBEDoLJCC0oYV2ODZ8vgNgzzBzGgGjMM0lvowQRMQptq63W60k8SfxmEJwN9vpddObeagfA+Pqymj51iBrpTBdu+XF4Sw+gtq2Pq5N2A+mcRpgHgyQMvvbrlrPChkBonuZjWv33r1yv18Obntf000Qqm8rqurzdDQznwvQ/rZxKD6rayhnK793pfIZEH0aY46UNszwZ4DuZtAqJ2aaDTK02bS6+TXPefkwAlSOcyXCqq25ChhPEahPWbjtvRuXws6Twk3pywwmlX7zkw6x2CRlfz0IQicO9ErBG7R2nb8rfFKzX2ptgnEak9kB0F62Q5wSAz7JoLUm6OoNhd7/CTrkRO0/yeWFIJI8uzI2QUAQEARiICAEEQM8qSoICAKCQJIREIJI8uzK2AQBQUAQiIGAEEQM8KSqICAICAJJRkAIIsmzK2MTBAQBQSAGAkIQMcCTqoKAICAIJBkBIYgkz66MTRAQBASBGAgIQcQAT6oKAoKAIJBkBIQgkjy7MjZBQBAQBGIgIAQRAzypKggIAoJAkhEQgkjy7MrYBAFBQBCIgYAQRAzwpKogIAgIAklGQAgiybMrYxMEBAFBIAYCQhAxwJOqgoAgIAgkGQEhiCTProxNEBAEBIEYCAhBxABPqgoCgoAgkGQEhCCSPLsyNkFAEBAEYiAgBBEDPKkqCAgCgkCSERCCSPLsytgEAUFAEIiBgBBEDPCkqiAgCAgCSUZACCLJsytjEwQEAUEgBgJCEDHAk6qCgCAgCCQZASGIJM+ujE0QEAQEgRgICEHEAE+qCgKCgCCQZASEIJI8uzI2QUAQEARiICAEEQM8qSoICAKCQJIREIJI8uzK2AQBQUAQiIGAEEQM8KSqICAICAJJRuD/Awi3JiXVPgeVAAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-28"><g><path d="M 371 401 L 371 444.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 449.88 L 367.5 442.88 L 371 444.63 L 374.5 442.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-43"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 412px; margin-left: 382px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="375.5" y="406" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-57"><g><path d="M 321 351 L 107.37 351" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 102.12 351 L 109.12 347.5 L 107.37 351 L 109.12 354.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-58"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 362px; margin-left: 312px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="303" y="356" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-16"><g><path d="M 371 301 L 421 351 L 371 401 L 321 351 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 351px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">IRQ other <br />than ACPI SCI active</div></div></div></foreignObject><image x="322" y="330" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfQmYXFWV/++8qupOQAUGZd9c/4IbGkl3dXeF1rAEBAKB6k6AGBQV1Awq4wqM4Ci4jaIMCiIoyJauIkAQZRXbdG0dzAwyDCKDoLImyBCIZOmq986fW13VeV11X733auuq1Hnfx/eRfnc593df3d+9556FII8gIAgIAoKAIKBBgAQVQUAQEAQEAUFAh4AQhHwXgoAgIAgIAloEhCDkwxAEBAFBQBAQgpBvQBAQBAQBQcA7AnKC8I6VlBQEBAFBoKMQEILoqOmWwQoCgoAg4B0BIQjvWElJQUAQEAQ6CgEhiI6abhmsICAICALeERCC8I5VQ0r2Roa+SIxv2xr/c5BykbGxm5/VdTg4GH3N1iytAuGDNQpkAniu8F8G4Nu7gzuOjo5evaXGdqdVnzPnE6FZs16aw4Qogw8F4QAwdp0qRHgBjL8Q6HfEiG/ZstPatWuvyNZThnZvKxJZtGeOg2MA3lwYy4sW+PDxRHxtu49N5G9tBIQgZnh+ZpAgdCPfCOKvI4dL0+n45lqgKSxq5wL4KIDZPtrawMBVIcp9z4kkfbTV0kXD4ehsGDgdhIl0In6Fk7BCEC09jdu1cEIQMzy9LUYQeTSY8OtsN5+89t74S37hUYseB+h8As7ySQylXdWNrPyOodHlo9Fo4OnnjGOYrW8B9HYmfCkzFvuOEESjkZf2/SIgBOEXsTqXrxNBbAToeX+i8RsAvNa5Dl0xsXmn5X7UPb2Diw+gnLkCoJ4KsqiTyXMA8WQZNzl4nIOBxZnRFX/xN77WLR2ODF0GxplFCYUgWneuOl0yIYgZ/gLqQhCEa9JjsdP8DmXOYdGdQltxAjGdZ9NvF5sxiejU1NjICi/thuctfi8svgngN5WX50eY8O1sIHj72tEb/176vqdnye5G0DoeZH1W7ajL69PjMOik9OoV/+VFllYvE44MXQ3GMiGIVp8pkU8IYoa/gZkkiOLQ8xffOePHAC+dDgenJmbhaDdVU1/fov3ZCP4awEElcD4D8On77Il74vG4uhSv+CjVy1PP0hCAHwJQJxz78zBZuaNTqZv/6tZOq78Xgmj1GRL5iggIQczwt9AKBKEgmDv3hF2NrtDtBPTaIHmZLF6QSsXTTjAVLlpvBNHCEnIZzQWx5P7RuLKU8vU4qqqYV8HCklov0H0J04DCQhANAFWabAgCQhANgdV7o61CEEri3v7hU4j4umnSM52ZTo78xGlEfZGhU5lxNYCAbddxz9ZZHHU7eVRCyeFUYhLhtNRYbLqM3uFuiZJCEC0xDSKEBwSEIDyA1MgirUQQPQPROQbonldVPLsUx1zpArWv74TdLCN0NwHvsWH0N5g8P52OP1YrbuFIdC6Yfglgtyl5gD8YVvaIVOqW9bW2P1P1hSBmCnnp1y8CQhB+Eatz+bYmiMjwYub8iaN4ejCZsTyTjF1eJ5iod2Do3whQl+jFx9flebVyqPuQJ58NvJ/IOg2MwwHsDyCYby/v3Md/ItCImQ2MjI/fuK5SP76dG0uMDtz8IPLvEThVqd9A+Uv+ot+Jkus+Bn627578Wy/3QLpxVMQCWEfAgyD8oivAt46Oxv/hBXMNJlMOoj2R6EFG3nmU5ufHMon3GgZdlt28851+LOu8yCJlnBEQgpjhr6OlCKI/OmAQ3QlgR7cTxODgabO25l6JA3SMDcKHrGzgMLcF0w/k4XD0LQjQbwDst60e3wETJzbiLiK/GD5DJxLhIo1ll070HBGtzBJ94f7VK57UFWgUQcwO4k9bcnQhAZ+aIi9HcPkRsowPp1Ij9/vAn/rmRT/AFn6kty4ra8mz74qOIGDyoQjQ8a9+f9919KFxUXn6GJsU9YCAEIQHkBpZpIUIQrtbZ6ZjM8mRO0ox6J03fCBZrBbuPafIBLgkk4h9pp54DQ4OBrfmdr8G4JNt7T7LBs3PrB75Y337ir5mS44uIeDD9jsVj308QwYvTa2O31davhEEAcJHwfxlF5+TUlE2MtNpmeTIzW5jUsYHFDC+wWA1n1P3S271Jt/zuBU0Txofvfkpp/I6giDgEga+VcHB8gnT4PlrVsef8CaHlKoVASGIWhGssX6rEIRO3w/CY8jxYDodf7qMIPqHFxHxStvfTScyqREi7eU5M53oZaHz2rfyCeneQnFGXp1UtrASkGHQEyDuBtAPxhs1C6d2Ac6ftrKbloPorZMNWx8Aiv+f/8MagB6Y6pStNelk/KrivzUqpgkQNk6LaQVsBuEBMP13JRkZeBRB48hKjocqflbX7JcuBfgTZUgQXiDGf+axAAIMfj8B79CcYCqaJWsIYgLAVpvz5noG7iPQK4DVmz/BEF2WHhtRHvoFJ0uvsyvlqkVACKJa5OpUrxUIoq9v+BA2sKLUyY2Ar6USsQt0Q+3rH7qQCefY3jVkV6/a7+uLHgyD7mHg9cX+iHFRKhlTsZ5qftQpZSK3+yUM/mRJY88w0z/vu5e1qlR/f8i8xfsGTOtiIiiViH2HvR6GsaCSU18dLqntYjqqdQq6/GsAer+9QqV5VTcs4Uj082D65vRx8SMW46z99sJ9pVgUTKTVXdEZJVjc1R3kk3T3EhVOVSaBfsimdZ5dhajwhmFmqzGbrvkD6eAGhCBmePJngiDUjnZi4uXdEQhGWDnHManIsJMXsIWHAEdT1Um1z243AIhuq8EPTgSD83We0rVCrDytKWSuJuBttrbi3cH1J4+OjuZqbr8/erhBtGq6asNdTZJfTAeGlP5/us7cxV+jjgSxHsTHpsfia5ww0JoLM+7rDvFC3cKts2QDcC9MHk6n4/9XAWvqjUSXEdOPbTg6miU7EQQDP89u3vkMuYiu9auuT30hiPrgWHUrdSGIqnvXV+S8KWluoZPXsvbHXWHRqVW8RvbncMfhx1RXd3ez2WJeOJ6MK5PhsqdOBDHBzB/OJOMjbviGB4Y+DeBSW7knrGBunuaOgMKR4UvAvNxW1o8HuwYLvTe+A0G4Oma6jVXe1xcBIYj64um7tRYjiBxAMZjWP1faLQ4MnLyLiewd0y5I25QgdJftAJanE7EfeZ3MfCypkHkvgHcW66idcCYRO12nL68PQXgLg6LkmVQh8t0Adi7Ip80nMXde9I0BK28xpu5X1OPbMVHTxgQDH8okYgqfqUdPEDweQOioROKGF71iL+Uai4AQRGPxdW29RQhiPQg3Gxb/IJmM/8lNaM2lqbo2dFRbuLXn9t5hMamLSqvUe5yBJ4MwBxOJlY+7yWV/X34n46xyqwdB+LmD0czXBrLoiFKT197+6HFEpCycJu9UKhgpOGGjUz/qZNXNKTOuyyRjyoJMLqH9fHwNLCsE0UBwvTRdJ4KoEO6b1d3CXhork00E/Fs2yNfcPxpXDlWef5RzBpe8viuX+w1A754aY9MJAr8zeMuxyeRtG73g7FSmr3/op0z42Lb3+cx6Ub+Z9XoHhg4j4FcAuirt0tW7ehCEHysuHaEzcFwmEVNe6lNPKcmpe6h/7NB9woN3X/uKH4x7I0PnEOPCSphqCcIlL4YfGaRsfRAQgqgPjlW3UheCcAn3nTdb3GHDh8D8zRKHJxVh9fLuIH/ZqwesGmgj7wR0QDaqP52zH1fpyzEwcOKbcgiMErBvcQy6RbhuBKFZ4J0+Qi8E4WB48L+A8Vv/HzcfrOI/2giiTHWkP0HQqZnkyPX++5MajUJACKJRyHpstxkEURRF2fp3bSGV2lKF1LY97Cvy6vZixVTPXayXRbgIeB1OEL5yUnuRzbczn8fvu1CsLM+6FnsfpOeveyldLQJCENUiV6d6zSQIJbKTQ1gls1bdUHsHhn5YSCtafN1UP4hqd/r2sTScIBxUJkIQ+lOo04mrTj81aaYKBIQgqgCtnlWaTRB5FYc2vpF64z3NqCY0uMnMizLJ+G31xEe11VvutQ3m2tURDScIBxmFIIQg6v0baVR7QhCNQtZjuzNBEJOLbnSYiH5hu1RVf95MhKWpsZg9hIZ2JFrzUKJLGxAKQdnWX0XAR2yC1OW00nCCcFCZtAtBuOXK9viJa4uJiqkW9JpXVwiieVjrF9rI0BcpH9p46inT17qpRVBFTmp1cR2aveEnJQuvMmVyjdWj5HGI5lr3YGoau3plcFWXaK71vKTWEKZjbKpWJIh3H7F0x9ds2nqLPRYVMa5MJWMfb8RPRAiiEajWv00hiPpj6qvFmTpBVFI1EeiyruC6s9zCWPSV54OAn11nb//QmUR8GhFd6pBLQOelrET35chWaUJK71KqNu3sHz6KiJXZaN6HgIC/w+LDU6n4tiB8BUFakSCUaGUmv002XZY7CF9LR1MKC0E0BWbnTmaSICZVTWqRzodhsAecqxgqojgahzzWrvGBCuQ0GwGsBOioQnsbCPiemQ1dMj5+/cv5Mk3IKNemjnJ1t2LK490/fAaI7cme1pNF81OpkYd8/EyK4TpUnK6nVJRaZuueWaHnV9o3HHKC8IHoDBYVgphB8PML9AypmIrDzv9Qc3QTgCOnQ8GjE7NwvFteaf1dhnugu76+aJiNfHKi103vlx4HWR8l0/wLG8FfAzjI9t536Ae36dXmtfDpsKUijQYt6y4ABxb7a3CojYYQRF/f8DvZyOf4sKd4/UYmEfuqV0dKnQGEzuJMCMLty2yN90IQMzwPM00Qavj9/YsjFlm3lyzWJoO/mEnEv18JIqe7DJU0hoOBxU55B/KZ256jXmL8K4DDSk4wKkLrplLyIKKRrZt2WlrPSJ8NCtY3AfDCdCKuCLDsaVUVk0oSBAM3gmihTWhPJ0JV3iFs+maGcXQmsWLUDoQQxAwvPB67F4LwCFSjirUCQUzmABj6PhifLRmnp6im2pDSkw09D+Az++zJsUr5kFXeAmJSlkq9FXD2E1XU13T1aMJ9u0W0LXSgD/cNOOZBUPVKCcItrpJbTmq3wXpxlCu24YDFozDoeJcMftTbHx0iop9PC5vuEPpcCMJt1lrjvRDEDM9DixAEegcXH4CcdVdJzgVl1eQpPn8FklDaiUeY8O1sIHi7Ll9EPhpqMBclovPtSYFs6ppH2TCPGV+98n8bMV0VEgY5EpxadE0Ev8uMxWUJg1xyNGicDB+GyQvT6fhjuvE1kyAqYPEimL8ACzeU5gLv6TnldYFg9htMUAmX7HlFHE8fQhCN+JLr36YQRP0x9dViqxCEErqWC2tV/5DB6B6hHP3CIW1nEZfNAJ4DiKFsnoA9KuQgLtYxmXGrGTA+d//qFU/6Athj4TqlHPXkR6K5DFZSKrXaMwDliLG6KzT7k8WAgc0kCCWICxbbUpvmsbXeA9AcTTDIzUz8L5mx+GW6KRCC8PhhznAxIYgZnoBWIginC2sGMtZE9pg1a255wQ2uvB47iOVgUncLr3Ur7/O9Y3pNn+1oixdiVakFTcWqslt1eWn+eWZalkmO3OFWuL//xHdYFFD5GVSUXc0zPS9CswmiSBL6uF1uo8u/V5uAL6QTMZVdThslWAjCE44zXkgIYoanoJUIQkFRy4W1HcqCCewXCnmKi4lqvKC9EUTXWIHst41c8IhXk9z8QEM0T4P5/O4QRvxEofXSubo8f+pZUgSh0oju7aFOjkA/3Ro0vuoj3aouPedUV6U5KWaCIJQwVWCh2CDDxKePj8UfroSdEISHL6sFighBzPAktBpBVLiwfsZg84hkcuX/+IFMWTnNmvXSHCZEGXwoCAeAsautDZWL4ikC/Y4Y8S1bdlprt1JSJqQB07qYCMeX7OpNIjo1NTaywo88Xssqubu7X/oACEuYOAJgf5saZR0BDzLziJnN3erlZKXrV13OG0znAVA5wXe3lXnFYl4wnown1N9miiCK8igsgrNe7AnAOJ3J6gdoH5tacCPAf2PQ7RboZ2sSI+qeyDW3iBCE1y9xZssJQcws/tK7NwSob170A2zRD4tpPf2ovbx1IaUEAUGgFAEhCPkm2gaBou8EgHNg4ZeZZMzu9ds24xBBBYF2QUAIol1mSuQUBAQBQaDJCAhBNBlw6U4QEAQEgXZBQAiiXWZK5BQEBAFBoMkICEE0GXDpThAQBASBdkFACKJdZkrkFAQEAUGgyQgIQTQZcOlOEBAEBIF2QUAIol1mSuQUBAQBQaDJCAhBNBlw6U4QEAQEgXZBQAiiXWZK5BQEBAFBoMkICEE0GXDpThAQBASBdkFACKJdZkrkFAQEAUGgyQgIQTQZcOlOEBAEBIF2QUAIol1mSuQUBAQBQaDJCAhBNBlw6U4QEAQEgXZBQAiiXWZK5BQEBAFBoMkICEE0GXDpThAQBASBdkFACKJdZkrkFAQEAUGgyQgIQTQZcOlOEBAEBIF2QUAIol1mSuQUBAQBQaDJCHQEQfQODB1LwG1T2DLu6w7xwtHR+D+ajLd0B2BwcDC4Nbf7NQCfbAeEgK+lErELmgHS3Lkn7BrsDh3FjOMAvB/AHgBmF/rOAXgGwFow/8rKBW8fH79xnV+5yr47vw0AmwE8B/CzYNxrEW7bb088EI/HTa9N9UaGvkiMb9vK/zlIucjY2M3Pem2jlnJz5nwi1L3jixG2jOMA6zCA9gPw2qk2CS+A+TmAEgz8KjuLV6+9N/5SLX061S2ThWgPMHa1lV8H8J8V1hwwVuy7u/WoH6yL7UQii/bMcXAMwJuLf2PClzJjse80YlyNbLOtCSIcjs6GgdNBmEgn4lc4ASUE0chPyH/bfX3D72SDfwNgt5LaD1nZwGHVLMZepIhGo4Gn1+FQy6ILCTgEQMBLPQBqQb7XIj57fCz+sMc6qANB6Lp6GsAX9tmTY14Wr5kiiJ6eU15nBHPngfjMaYTgDl6OGasC4HOTyfif3Iu7l5g7L/rGgGWcW9iQFDcB7hWBPzPjnH334pVesBaC8AJpE8rkf+jPGccwW98C6O1u7CwE0YRJ8dFFeGDo0wAu1VQxiejU1NjICh/NeSraE4keRExXEdDrqYK+UA5MF8Oyzk+n42p3X/FpEEEU+4xNzOJPuO22Z4AgKDww/CGAfwbgDW4YVXifA/HXkcN3vWCta0eRVCCY/QYTPgkgWL0sPMbBwIczoyv+4qUNOUF4QamBZcKRocvAUDuT/CME0UCw69z0nMOiO3Vtwa8B6is0nQVgbNvN8x0wcWK1i4JGXLVgLQVYEdI21UZ5wXUAvVL4onYEsHuFod8Jk09Jp+P/VwmeBhMEALpiYvNOy9euvUJhqH2aTBDUG4kuI6Yf29R1NX1BxLiSLT7L7/fQO2/4QLI4BuCdFQTYCNDzhTlXBLJXBSJ5hiw+KZWKp90GJAThhlCD34cjQ1eDsUwIosFAN6D5nv7ogEF0JwC1CIOIRpitAwF6d6G7l8niBV5+iB7Eo3Ak+nkwfVOjTtoAwtVMdIVO1zw4eNqsbHbLIRZZqq46dUxTRxFwz9ZZHK20g9cRBAPHZRKxX3qQHUpn3tX14m4UNCLM1vnqtFxSzyTCaamx2HWtQBDhSHQumNTYSlWHTxNwOSy6tatr61Ojo7duKMo7OBh9zcSEcQAMPp6R3/TtXTYW5q+kk3F1j8JecKsgR44JdxPzf3QHdxwdHb16i729vGbiaeMANvhzAD6qIbn1ID42PRZfU0kOIQgvs9TAMkIQDQS3sU1TODJ8CZiXF7shwlKL8UECPjL1tzpdVvdFhk5kxrUlP3STgZ9wNvSV8fHrX/YwXOrtjx5LRFcD2GVaecIP0mOxs50WrloJwt6Xum8jgy5hwsfsf2fgD4aVPSKVumW9bizNOkEU7gNvBNFCmxwmmC+EhW95OQHkF+hn6dMMfKtkzl60wIePJ+Jr3earr2/R/mwEfw3goOll+fcWYZnXO6TewcUHUM5cAVCPH7xVWSEIt1lq8HshiAYD3KDme3qW7G6EzHttx/78D58YexPRzbZdes2X1eF5i98Ly1InFftudjMTfyozFr/G6260CIXDrvRlg41jkskVymKl7KknQajGlXquewvFGTjcvggz86JMMr7NSs/2slkE0dcXDbORPxm+rtg9A9/IJGJf9Yl1UU115bRTG9Gl6bGRsyq1pU4jW3N0E4Ajp08GXdsdtD7l12rRAW91jKk4LiGIBi0gXpsVgvCKVGuV64sML2ZmpQ4pqGt4PIDQUaaZ3QFBGgXjLQWJa7qsdtrNEuisVGLkMp8L1hSIfZGhU5mhThLb1E3Mq2BhiW6HXG+CUIKEB6ILAFoFoMu2EF+SScQ+M5MniN7I0OeI8X2bDFWTvHb+CI8hx4PpdFxZcWmf3v6hM4nyxg9T80OE27oCfIpfcpjaGISjb0GAlMWdMs8tPs8YbB6RTK78H50gQhAzvO4IQczwBFTR/aTvw243AIhqFjbqHRhSFkZTaiag+stqh0X059nNO59R6ULXbVj5HWqWVoHwQVtZxzuTRhDEnMElr+/K5X5ju7NRmnlHv55mnSBKf5MA4t3B9SePjo4qnxLfT/lmAhvIoiNSqZH7dY1pTqdqp/8ogsaRXq2PHImn3JcElXx2hCB8T3dtFRx+mM6NEq5Jj8VOKxZwMXOlnoHo+wyij8HCAhD2LexAcgA/xqBVZPKV6XT8sSpHQb2Di/dHzlxkgBbwpG5UOWUVdznKxv45Ah5m5hEzm7t1zZpbXnDrS4PJNAco5QhmdAWXEUg5o73NZsGjnIH+k0GXZTfvfGctC6abjPb3Gt+HCQY+lEnElMpJtzOu6rLawQlvPVk0P5UaeciPzLqymoXLcbFoBEHofwuTJ7FE4oYXS2WeKYJgxnWZZOzD1Z7WClZIauduKOsyBucCbHzMSZ2nmxcAy9OJ2I9qnfNJP4r8KeKN29ri1MQsHK0zUhCCqBVxn/UbRRCbcsZeBvgaD7bxSuVxE+esT7mZNtqGRn3zhiJs8fcAUp66Xp8cMS4zc6HzKl2iOhHEpk2v/3vXrA2fAUHpfiuZdSp5ngfoo+nEyK+q/SF7HZRmoZqmgnDYATqqThx3e5PmjerHvKetTE27WXtfk3LmVhHoZWZaSYx7997b+ovOkap5BAFH7+hmEURf/9BPSy7Qq1Yxef2miuWUxdnW3CtxgI4p/o2BJ4MwBxOJlY/7bU9TXplKX8zgQYPxKzZ4VXdgxwdLraCK9YQg6oC4nyYaQRBk0E+ZWdlrT7dMqSDYpMVIbmEqdfNfK8mfN0/cYcNFYChzOa/eutOadOtLRxAwzWMQCHwNwJAPfHMM/lImEb+4USSh8X1Qx/+yxb93YOiHBKiLyOLje5Hp7R8+hSh/zzH1MNOpmeTI9T4wqUvRphFEC6iYNLibIP5Keiz+7436roqTNDBw4ptyCIwS8qf/4lO3TYHfj0EIwi9iNZbP7xCym5aD6K2TTVkfAIr/n//DGoAe2LYiWGvSyfhVxX9rVEwvgfKelXlb/PyTjwmD1ZOOM/wGEOaVxGnJF2PATZftbHs/GXfmT4Axqeog7gZzD0DqcrbM01M5CXWF1n9Sp8fVEMTTAD8I0FE2uDeD8ACY/lsRFYPfT8A7NH15NiOsZip7B4YOezW0hTqlFC9WX7GYF4wn4wl7e6U+EirEhV/P6tKdLAF/h8WHp1Lxbd9HNYOook4jCCIcju5dcqGvJHNcDJt1gtCrYZA/Deey2a95UZtWAXG+Sm//8FFErPwvpjZjTDg7MxZTm56mP0IQTYd8eoc1X1JPa44fIQOfTq2O/9a+01E22U89g2EQKasI+ymjom68ZyA6xwDdM70OPW6xdeZ+e+E+nSoiH7cmlPsKwJ8vWbwddecup6qNBFzYFeQflVpvHDJv8b4hy7qqxFRSeay5mhFWOe1lvg+AXn+rO2n4uax20M8/OBEMzl87euPfq5S/6mqNIAidrr1SJIFmEYTa7oT7o18C5Z0SSx8VNuM+sHHlxCzrbrfwIH4B14xx2v2W3/ZqLS8EUSuCNdavF0F48YbVOVtV+EFqFkP8DSbP93DJrbX/dlKPVCCI9TCME9KrV6ScYNbad3swI6xm2nQ73koLWt/A0AUMnG/ry/Nlte6HCfDt3cEdo0764mrG5LVOvQmir++E3SwjdDcB7/GKTxMJIu+nEdpKNxDjaBeMniImFfzw5mwwkK6VvDUWVM+yQfMzq0f+6HWu6llOCKKeaFbRVj0IwqsJXN4mO4CVdtWNk4WG1gTRhyXFwMDJu5jI3mH33mTCuZmx2EWlMDkQhMmM5Zlk7HI3WDWmoBXNCN3ac3qv2fFWtCjSnsA8nm60BFFi0VbtOKqpV0+CUCe/oGX+AqDB6QdgZz8MVa6ZBKH6U9/llhxdQoCyYPJ2/zap3r0PoHg1JwwNQTQ1pHnptyEEUc2vpY516kEQfnIP9EaGziHGhVNDcLgUDIej/0QB+hQD7wNwMAhmgM0j/VhSlH3sDgucliB8nAJ0F3t+4gR5mU6ddYmbykhHyAA8XVbryEXd46SSsY97kbfeZWokCJozuGTXUM48mAingvOGB6Whql9kGEdlEivGnWRvNkEU5KC+edEPsEU/dAmWpxNbhUK53yD+xt574E63MNu634Ha/HE2MK9RYePdvhMhCDeEGvy+DgShvSR1/JGVJhwCfmfwlmOTyds21nuoNRGED+ck7WnFRyA5L+PW5n1gOjOdHPlJpfrh/uEzQGw/BXm6rNYRhFukXy/jqLZMg6O5erISmiGCyEOm7vGefDbwfsA6g4CTPJhdl0DNj4BwZnos/junOdBulGY4IZgQRLW/mDrVqwNBPGEFc/PGR29+yotImh96w46wtRCEkzpKN0aHnZfnSKNecCu9T/Bqm643W3T3rLY5V23zgWgxFZMX3DyUUZe+5+yzB77vtsOeSYKwj6MYKdUyrIVEdBIYB3sMCV5xrPpTqrNfiAdsay4iBFEzhLU1UAeC8LXAN5ggqL9/8Z5sWB9kxikADp32w/GhYvJj2tdogtBbJNEN3cF1y9zCLzh4Q7teVrfDHURtX34+s13GIv6E56ikM5xy1GkpUNLrAAAYg0lEQVS8U6G1CYfB4GUuhFHxtCR3EDV+VRWqt2XK0ZoJwucRtB4EoRZk5bkdID4I4Lcy0/sKaS/3r5jtyg9B+FARNZogNL4PntRExW9VGzrB5bJapzZTlmr/2KH7hAfvvraQDKhxP6bSlmtWMRFeYM5f4K4xDNxlTgTu8atXb5UThBvqyrk0OOvFHjLoK8Q4Quer43Tf0tc/dCETzin2MZO+L0oGOUG4zXaD37cJQVBPJHogMX2eCMfpnO48wdSeBKEz9/U0XJdCT5gGz1+zOv6EXm1WHhBQOQ9uT34QfkFsF4Kwj6snsuTdBpvK870kG5z+BKrx4hY/CL8fikN5OUF4ANLvCUKZJAZM62IiHO/Z1G9SDpX0Rc3Jtly+bUgQDl61HpB2LeJ6Cmml3WSNVkyuYHgp0I4EocalS/zjdIel8cBXaYjFk9rLB+JSRgjCA4h+CGIyUQ3fBPCbXJpWUVX/TKCExTSaDRn3K4ehmi6pW0TF5BBZ0wPSXopUvqzWqLZQ71hM4YGhJQCfS0yrKgVu6wSCGBw8fuctuS5lAq7CqYRUpOIC3ioBVE2P5jvSngwc/I/qGotJGU6YbNxEMP7g5uAnKqaapr32yq2qYipczN5a5swEPM3AdYBxZzZID60dvVGF89bm1213gnDwY9isQpqr4FP+Z5/VacoelbbiZbXfWEV+5dFdoDvpvDuBIHSLMzEuSiVj5/rFtrS8zirNwVenLJ+IV4s5rzI6hDjRnlKEILyi2qByrUoQmo/IBOGnyPHZXnLyKrjanSC0vg8ePaF1n0t4YOjTQD5L2Lancnu6+4+6BSPUjs8htlQnEITWzNSnEYjTMtEzuGgfIxdcbc/D4OTMqU0SRfhSZiz2nVqXIYdNj+NGRQiiVsRrrN+KBOGQMS1jTWSP8RrJsr//uNdaNEtFpVSmrpNPm91BaGIp+XJK9LKLBFDxslqXH5mIRrZu2mlpjQmSKBwZ+j4Yny2RU5uYphMIQuGgmfPNFvPC8WRcBa2s+tHcLTiGg9GbVXuOg1ZRxp7+6OEG5dO82jzZnVWdQhBVT3l9KrYiQTh4JvtKeKPffU/PjldEsFYz1Vrr62ZSh4FT5FavX4KDT0TFewVlMtm9w0vXMvOwrR+TCKelxmLTckV4lSO/EEaGTmTGtSUOXn/MGcaR969e8WQZuZV74Cu9Yl2dEd3kb8YltY6QAR6dmIXja4jcqiHjyhZpupzhtW4MdJflACaYaUkmOaK9ZxGCcPsqG/y+FQnCQRfrOQ6Qw6LWVieIRh3zde26xnTKGwtYdwLYzfY5bmbmj2SS8ZjfJDbhSHQumNTpzt5exeCInXKCcPx2gZtg8hk+sjBOTZWOjN3ip+U3PTm6CcCR9iVIxeNii8/yquYt1tVGPVYvuXKARCGIBhOAW/OlBOF2KeaSk9qtO3ixYnr3EUt3fM2mrbfY8yz4ihgbJKW6UEHlpkXAdIocW+sJoNb6GtB0uv+65ILWn0zg5lntlLjJU0rX4vjyeUGepWUAfqCJJXRXd5BPKs25UazbKQShxjtpvVdGyGo1fYTI+PLee1i3u4UFybcTjs6mAJ1VCPluD07oKWy+A5Grk1uGiU/36oHu7IsBFU5/QXr1iv9yWjiEIFyX1MYW0KSmfBgmL3TKudAMgsirIMpzGagfyO8twjLdh1lITfohMH8ToLdrUXO48Kt1ga+1fqmsDond65aLQYuty+V3Ht/ZL10K8Cc02G4A4WomumLf3a1HSxcvdSIMmbljiPElh7l5mKzc0ZXSz3YSQRRUcB9nhkrjW5YdEQVLPiK+uTuQfXR09NYNhTnJR63tsvhtYGspGItfJeOdS+ZrMxGWpsZiK72sLA6qQFU1x4S7ifk/uoM7jpbmCFEX7tnslkNMw/qygze3JzmEILzMUgPLaKJ95icfwDMAqR3i6q7Q7E8WP4BmEUQ4HH0LAvQbAPuVDN8E40kmZFSy+/wpga33gfKkYN8lqVg7jwJ4sy09pzZybK0LfK31S6dXl5PYb7rQSp+M3nqo8mV1cUcKh9OZrb+pb2fyb2WmtdNEU/nCyeST3JJAdRpBKJOK3oHo5wj07YrhY/ytDdXkTKfwwPBJAKu0w3YT6dKe1wFUCMHCKv3w7hVE2wjQ6enEiFJhVTTXFoLwN8F1L93ff+I7LArcDWAvfeM8HkDoqETihhfV+2YRRL6v/uFFRHy1/9DG2MhEynY8aTDfxcDrC2PTRp6tdYGvtb4ddwczwIqWRn4/Cp2VWKENrQWRvX0XNZEfUZTZ8nXWROis8fHrFdFXfDqQIBQe1NMfPcwgUr8Bh9+nG3JT759n5n+u5s5ItVBBTeRZgELBhywKnDI+duODXioKQXhBqbFliuk51XG2NImKovcngzAHi4l6mkkQatjhSPRQMC53VBtNxybHwLWmYZyvLGE0l90mMy/KJOO32avVusDXWt8uSy1Z4Px8JnoPbfcw4MU+5s49YVejK/QFAs7QqDEqiZKPosow/iWTWLHGbQdZbKhDCSI/fPV9TeToowx8EcDefuYZwAYGfsLZ0EVeiLhS20rNGJr14iIiXODx92hv7mkAX4XJN/q54BaC8DnbjSreE4keZDCdB+CDJcfDabb3zSYINd78hzl7wwIClqrrCRWCwHYBvY6AB5l5xMzmbi3xkyi77NWZ6tW6wNda3z6nmvuBCYAXphNxZUVUt6enZ8nuRsi8tySIm9tldVn/am5mzXppDhOiDFY+J2pu7PNTUDlhLZh/ZeWCt/uNoqo67WSCKIKeTxq0zngbsXUMmML5TIuTWNs3dhsB/htg3EuGddvWV3YZq9FfRffNUe/g4v2RMxcZoAWvRjZ4i0aOfPgbMO7lgLFCdzfl5WMWgvCCkpQRBAQBQUAQaGsE2jJYX1sjLsILAoKAINAmCAhBtMlEiZiCgCAgCDQbASGIZiMu/QkCgoAg0CYICEG0yUSJmIKAICAINBsBIYhmIy79CQKCgCDQJggIQbTJRImYgoAgIAg0GwEhiGYjLv0JAoKAINAmCAhBtMlEiZiCgCAgCDQbASGIZiMu/QkCgoAg0CYICEG0yUSJmIKAICAINBsBIYhmIy79CQKCgCDQJggIQbTJRImYgoAgIAg0GwEhiGYjLv0JAoKAINAmCAhBtMlEiZiCgCAgCDQbASGIZiMu/QkCgoAg0CYICEG0yUSJmIKAICAINBsBIYhmIy79CQKCgCDQJggIQbTJRImYgoAgIAg0GwEhiGYjLv0JAoKAINAmCAhBtMlEiZiCgCAgCDQbASGIZiMu/QkCgoAg0CYICEG0yUSJmIKAICAINBsBIYhmIy79CQKCgCDQJggIQbTJRImYgoAgIAg0GwEhiGYjLv21FQK9kaEvEuPbU0ITrkmPxU5rq0GIsIJAlQgIQVQJnFRrbwQOmbd43yBbZ5PJ16RS8QecRiME0d7zLNLXhoAQRG34Se02Q6Cn55TXGV0TnwXT5wHkLPDh44n4WiGINptIEbcpCAhBNAVm6aQVEOjrO2E3NkK/BXBQQZ4XhSBaYWZEhlZFQAiiVWdG5Ko7ApHIoj1zHBwD8GYhiLrDKw1uhwgIQWyHkypD0iNQDUEIloJAJyMgBNHJs99hYxeC6LAJl+HWjIAQRM0QSgPtgoAQRLvMlMjZKggIQbTKTIgcDUdACKLhEEsH2xkCQhDb2YQ2YTjUO7h4f+TMRQZoAU9aBO0BIFDo2wTwHAEPM/OImc3dumbNLS9UK9fcuSfsGggFjyeiYQbeDWD3QlubAfwFoLstsq4YH4v/EQCX9tMzEJ1jgO4BsIsXGRg4LpOI/bJY1s0Poq8vGmaD7gTwukKdVyzmBePJeMJLf8UyAwMnvimHwCgB+xb+NsHAhzKJ2L2V2hkcjL5ma844CrA+AtD7bPjkAPyVmMYsg6/cdw/OxONxNTfyCAKeERCC8AxVxxekvnlDEbb4ewC93wcaOWJcZuZC542PX/+y13rKX4FC2a8T8HEAs13qmQCPcTDwkczoir/YyzaaIOYcFt2pawt+/SpR9RX7JcZFqWTsXK9jVeV6+4dPIeLrttXh1MQsHL323vhLunbC4ehsBLEcTP8K4LXuffEjZODTqdVxZeZbRqTu9aVEJyIgBNGJs+5zzHPmfCLUtcOGi8D4nO2k4KsVBv5gWLmFqdTNf3WrWNiV3wRgL7eyJe/XM3CKfdfdaIJQ/YcHhj4N4FLb4j4eQOioROKGF73IPzh42qytuVfiAB0zRTLA11KJ2AW6+r2Diw+gnLkCoB4v7dvKmCBcPLFp53PWrr0i67OuFO9ABIQgOnDSfQ6ZwpHo58H0zTJyILwA5j8BxkP5Nom7wdwD0FsABEv7IcaVXaH1nxwdHVXqD+0TjkTngkmpeHYrKaAWtycAJMG0lcBvZKBXs3teD8NYkF694r9U/cnFlD+rTiHMvAMRjrOpgyYAvh0w/j7Vl0GXF+vm63uIxdTXN/xONvg3Npk9qYeKffbOGz6QrHz9PQt/c3Tg6+tbtD8bwV/bnP2KzZhgPMmEDIFeZvDrCBgAsE850HTFxOadlgtJ+PwldGBxIYgOnHQ/Q9bvwOlxi60z99sL9+n02vlwFqHcVwBW4SzsRLGeLJqfSo1MEkrJ0zO4aB8jF7wDwDvtu15m3GoF+AtrVscVQUw9ef27SeeDoQhgWz/Mq2BhSTodV/cUU081l9ReCEJ3AmDgkkwi9hkvWIf7h88A8eXbyvIdMHFiqfxKndW9heIMHO4FH0XZPZEl7zI4d1WJWtBk8Bczifj3vcgnZToXASGIzp17LyOncGT4EjAvtxX+G0yen07HH3NpgHoj0WXEdKX95MFMp2aSI9fr6vYNDF3AwPn2xQ/M5+2zF75b4YJVd8JRJ4OF6URcXR43nCBUB32R4cXM+TuEwmU9PzgRDM5fO3rjttOJZtD5u4QAVgJ0lO318nQi9qPS4mVkBZge8EHhIvvHAC+tYh69fCdSZjtFQAhiO53YegxrzuCS13flcr8BSFkPFR/t4qXrb2Dg5F1MZO+w68qZcG5mLHZRafmeniW7GyFTWexMnR4Y+Hl2885nuKlC8gtgllaB8MFt7dIN3cF1y+zqrEadIFSfc+dF3xiwSKmJ3liQwZOaqVQ9xcCTQZiDicTKx+0Y6fABvKuKlDWY0RW6nSbVcvmHKtxz1OP7kTbaHwEhiPafw4aNIByO/hMF6FMMKPPJg0EwA2weWbp4VRIgHBm6GoxlU2Uc8in09kePI6KbbacN10B69n5Ld/AMPMrZwLzx8RvXFcs1kiDUets7MHQVAR8p9udFzVR+wV1ObKq98hMKvJ7kpmCq9pTTsA9MGm55BIQgWn6K2ltAzwQxMPRDAs7aNlq9Ht4JDeVHYML4FZTahej3BCQ5xyvT6fj/NYkgEB6ILgBoFYCuQp8PWdnAYXaSssuvOfmYRHRqamxkRck4deTz80widrofk1WNr0VVPhvt/UWK9H4QEILwg5aU9Y2AF4J49xFLd3zNpq232C9fnVRRvgWwVWjwCQIalZz2LqQoUpkBAOEx5HgwnY4/bR+ng6ru7MxY7GI/ePT3H/dai2YpC7FDp+oxnZlOjvzETztStnMQEILonLlu1kipv3/xnmxYH2TGKYXFaJujm0bFpFm4wUwnZpIjSuVUt6fRBKEE7esfupAJ50wJTXRpemxEnYzKnNPKLaT0ZTU7f9X8XQC5+pTYwWNwiIAj7f4l1Tj11W1CpKGWR0AIouWnqDUFVOqRTTljrwDxQQC/lZneR8AhAPbX+UBsWzDLczoXzFtX2y54TWY6NpMcUSavdXuaQhDloTe0aiaNB7bjpbZfZz9fgEmObV9wdVphIYhOm/Hqx0s9keiBxPT5vLMZY9eqmtIsSJoF0NcFtVc5mkEQmnsFrZqpPIYTO3pfC0F4nWEpV28EhCDqjeh22N4h8xbvGzCti4lwvM9QG+snrSnxhooniPKAem1LEGqcZZZJGjVTqc9HJZNTIYjt8EfVJkMSgmiTiZopMcPzFr8XFt8E8JtcZFj3qiPWnwmUsJhGsyHjfuUk5uWSWqNjb1sVk8KoLPRGyeWz5tL5ZbJ4QSoVT+swbtYJa6a+Mem3dREQgmjduZlxyQp68lsBGiwR5mkGrgOMO7NBemjt6I0qnLc2QqgXgtieLqkVToODg8Gtud1uABAt4GYy86JMMn6b+ndPf3TAoHyI8B0n31c26e3rix4Mg+5h4PXb2qv/Hc2Mf3AiQMshIATRclPSOgJpnLNUwLyfIsdnl8YJcpLaC0HozC+rMXPtHRj6GQFHAPxXBj0AGL/IJFaMF2Vrxh1EsS+N496U30Jvmc8HKnqnKy9qCpmrCXhbsX0mfCkzFvtO63wtIsn2iIAQxPY4q3UYk2YXrI4IGWsie4zXBEBau3sHq5m+/qGfMuFj20Tn27uDO0ZHR6/e4mU4Wl+BkrhPzSSIstAbBTXTRHdwa0n4kmfZoPmZ1SMq4ZH20QUDdDt16BoqEM1dBOxCwJ8s4I+GxT9PpeIPeMFYynQeAkIQnTfnnkasXXB9RChVnWjCYKsr62vSY7HTSoXQnFYqRn4tq19uXlq28DaTINTlfEmgw7wZKzNvma5eQrw7uP7kSiHQ1VjLQ3LgZYONY5LJFWOeJjQfrmPoVGZcbTM08BQvymv7Um77Q0AIYvub07qMSBeoT+VzSCVjKsOb66OSDHXv8NK1zDw8rbADQWiC3akTi8dgfYPBiexul5WcQMpCZjeZIMpCbyinNIWFzZHOKbRGGb79/Se+w6LA3dOc3IB7ts7iqFPWOXsjDqHU/9OcyB7h9UToOulSYLtDQAhiu5vS+gxIG/4CeBRB48jStJ6lPU6mw6Tvg/PpQou5qvPFmHFdJhn7sO5SWxvum/gr6bH4v1eIOUS9/dEhIvq5LTXpBDMtKfXE1hCEaywiL/kgnBAvJVmVVY+YLRC9N1/HIbSGQ3sUjgwpTFXuC9tD13YHrU+Njsb/4SSHmg8y6JLpBAqTGcszyZgtD0V9vh1pZftBQAhi+5nLuo9Es2Crdfr3FmHZ+Fj84dIOC6lJPwTmbwL0dq1AjPu6Q7xQt6D19Z2wm2WE7ibgPba6JgM/sSayXy3d6SqntC05+hcCzvOSMMjpVMQWn+V06V4LQagx6DEsjK5CGA4ddg6ngPycMALLM4kVa0qJtCcSPYiYVJTZqTDfk9zk/fRR9w9LGmwbBIQg2maqmi9oOBx9CwL5HAf7lfQ+Lb1l/pTA1vtAeVLYFndJRVZVpw7gzbYIp78zeMuxyeRtG3UjqpByNMfA/xDo95MRW/ldYBxc0p9q8mGyckfrcl/rL3vzUmwE6PnJhdO6IJWIX1uUrWaCKL8bKTZdMZCf42nAOSWrEv4FMFbnx6LSv1o4FIQDytuix2FYx6dXx/+7+V+V9NhOCAhBtNNszYCsvf3Di4hYXWy+1mf3G5noXJVD2mC+y2bD/4QVzM0bH735KcdF0Ltz3rQmVA4IGHR8JYug3v6hM4lwqZNHeGnwuloJQp/MSIntLeOclkSrxGeyLUUOdJI977bPeZXiHYSAEEQHTXa1Qw1HooeCcbmj2mh6w2qnf61pGOffv3rFkxq1zjSnMSeZVF7rQDD7DSZ8smLwv8kGcsS4zMyFzhsfv/7lSuN0SL9przLNqqhWglANayyQas7mpvChUPbrhPw9j/3U5jR8zxhV+51Ive0PASGI7W9OGzIidb8Qmr1hAQEqr3EfgD1su/B1BDzIzCNmNndryV1BWV5rIhrZummnpW6pRNVAVKrMYFfoFAYPg+j/2YIEbgbzIwB+mQsErlRk5HXg0Wg08PRzdDwznw3Qu6afjqYHzasHQfTOGz6QLFaquj0LMlYMreF1HKqc8nbv3krHgvFhBg4qnRcV/gSMn2nmxU83UrZDERCC6NCJl2ELAoKAIOCGgBCEG0LyXhAQBASBDkVACKJDJ16GLQgIAoKAGwJCEG4IyXtBQBAQBDoUASGIDp14GbYgIAgIAm4ICEG4ISTvBQFBQBDoUASEIDp04mXYgoAgIAi4ISAE4YaQvBcEBAFBoEMREILo0ImXYQsCgoAg4IaAEIQbQvJeEBAEBIEORUAIokMnXoYtCAgCgoAbAkIQbgjJe0FAEBAEOhQBIYgOnXgZtiAgCAgCbggIQbghJO8FAUFAEOhQBIQgOnTiZdiCgCAgCLghIAThhpC8FwQEAUGgQxEQgujQiZdhCwKCgCDghoAQhBtC8l4QEAQEgQ5FQAiiQydehi0ICAKCgBsCQhBuCMl7QUAQEAQ6FAEhiA6deBm2ICAICAJuCAhBuCEk7wUBQUAQ6FAEhCA6dOJl2IKAICAIuCEgBOGGkLwXBAQBQaBDEfj/6ItS8xKWbU8AAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-30"><g><path d="M 371 701 L 371 726 L 371 744.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 749.88 L 367.5 742.88 L 371 744.63 L 374.5 742.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-65"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 715px; margin-left: 382px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="375.5" y="709" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-36"><g><path d="M 321 651 L 257.37 651" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 252.12 651 L 259.12 647.5 L 257.37 651 L 259.12 654.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-45"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 662px; margin-left: 302px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="293" y="656" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-17"><g><path d="M 371 601 L 421 651 L 371 701 L 321 651 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 651px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">GPIO<br />IRQ shared<br />with SCI</div></div></div></foreignObject><image x="322" y="630" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tnXmcXFWZ93/Prep0B5SAIGGLiPpx3HUmQO+x2QMCgYTqJEAgyogoTFhGZARUVMBxUBl4WQQHJWzp7jJsRnax7a6tw2RkHERkQFCWLOokgNBb1X1eTnVV5/atc6vuvXW7uqry3L8gfZbnfJ9b53fP9hyCPEJACAgBISAENARIqAgBISAEhIAQ0BEQgZD3QggIASEgBLQERCDkxRACQkAICAERCHkHhIAQEAJCwD0BGUG4ZyUphYAQEAI7FAERiB3K3dJYISAEhIB7AiIQ7llJSiEgBITADkVABGKHcrc0VggIASHgnoAIhHtWktIlgfmHR+Y0jNACAj4DcAeI9gJjd0v2YQCbADwJpofTIXrgiYGel1wWPyVZZ+fivdMcHgTwfj/5AWQmbOFtAMUY+HlTmH/Z3x/9m9vymjsi8w3QowB2y+dh4PhUrO9nbsvQpZs//8yGpqbX5jMhwuBPg/DeKRwJfwXzJsB4jMHrmsI7xfr7bx0pp07JKwSsBEQg5H0IhEAkEgm9shmfZpO+BaAZQNhbwfwMMy4bH9nt7g0bbh53mzcAgdBVNczAj3i84WtDQ3e+XsqWoAVCtWmcw/9MwBkAdi1V/1ThpbsyhnnF+oHoCx7ySVIhoCUgAiEvRtkEmjsjHyGmWwhoKbsw8DNkGqclEr1PuClrmgQiVzX9AQadlBzo+XUxW4ISiNbWyGyEcQ6YvgbgnW7a75AmzcANTWG+xMtIqIz6JGudEhCBqFPHVqJZ2VHDRjqbgX8FMLtInbkpJeJsGuI5tikne9Y3ADojGev9KYCJPA7P9ApEttKnyUwfk0jc/UcnG4IQiIO6Ins1pOk2Bo4o0tw0gFcBSgOsfrt7leD+FBvUnRro/V0l3gepo/4IiEDUn08r0iI1Pz5rp21XgnE+gJCt0jQTHiHm/9cYRkz3FdvVtbJpfHzkIJPMcwAs1kxJDRNhRWKwb61ngSCsTg72rXQJguZ3Ld99lskfhGleCOA4e3uIqHf0rTkrnKa+yhWItrbF+5tG+D4CPqmx+RUQX2OYoTvj8Z6NdsGc37V8j4ZM+lhiXATQhzT5XzXYWBSP9/ynSx6STAhMEhCBkJfBDwFq7Yx8GUzfsXWmGWbca4b4Qi9z4ActWDYvbJrXvr1ofYLNmKfMcProof67X/Y0gvAmENaiqaU90k1EP7F9mQ+bzIuG4lG1EF3wlCMQakG/cYSimpHDViKsGn1r1143azJqNPfyRjX6oFsA7GMzsuQoyM9LIHnqn4AIRP37OPAWtrcv6zTJXAdgF0vhwwT8y7578/XRaFTtDPL0ZEcks1+7DuAzrRkZuDwV6/u601STdorJv0CoqrXix8BPUrE+tWhcMOXlVyCc2gzwEIdDy1L9PS96gghATVWF01gDUNcU5QMeHW3iyIbHoq95LVPS77gERCB2XN/7anlXV+Qdo+N0HwiHWgrIgPirycHo90qtGRSr1OFr+oWMwYc5jUimQSDQ3Lx8rtGQeQzAx7bby78ZC4cP29C/5i/2NvgViJb2yPFE1DN1tML96TCWP9EfVduAfT0OHDMM/koqFv2Br0Il0w5JQARih3S7/0a3dS5dxsx3WKeW1Nf1+PCuX3AzFVKq5ub2yBEG0X3WTpOZTk3Fe+/U5Z0OgVD1tHR0X0PAKkudW03wEUOx6IYgBEJ14rNG8ABAbZbytsAwFpbaNVWKofp7e/uSj5oUesQ23VRUbN2UK2l2LAIiEDuWv8tqbXYbZghrATraUtCrBmeOjMfX/raswnOZdXUw0DNvbz5VN3U1bQLR2f0VYnzX0qY3TeaFQ/FoLAiBaO2ILASyQjgrXx4TLkoN9v1bEByzIlfYBvXP5yRjfdcHVYeUU98ERCDq27+Btq6tLdLKBj00Ze2B6LrkYK/60i66HdWLIW2d3Z9n5nPBdCeH6N55c81nndY1KigQqoHa09Fep5i6urrCo+m5qwE+2cIl8K/73OL/wwA+vL0eTow14RhZi/DyRu64aUUgdlzfe255W3v3FUy42M1XtefCfWaooEAENsV08ILIASGTfgHggO3Nprsaw5tP7+/vV2cdAns0U2Wvk8kLE4loMrBKpKC6JSACUbeuDbZh+sVpHgqh4ehY7K6twdbmvrRpEghq6ehWJ8M/Ozn9A7wURqYrFlv7B7t1XkcQLR3dhxPwc+v0EpjOSsZ7b3LfcncpK1mXO4skVS0REIGoJW/NoK3NXYv3M9LhAetXb7Gtn5UydToEQvuFz3i8sYEX6Q79eRaIzu7ziWHdTeS4vlEux46OJe9LI9RPwLzJssrbBlyuSZK/hgiIQNSQs2bS1La2pQexwWpXzGTwuKAXVf20bxoEglo7u38AxnlWewj4ZiLWd5nORq8C0dbe/SMm/KOb0YkfJtY87e3Hv9OkJhVV9tOT/15E7MqtT/LXFwERiPry57S1pqWj+zgC7rdWEERI63INDlIgsjuoDPwLiC6xnRAvulPLi0BMLFDveReAiKXtz4cp3Tk4eLcKpRHoo0KajKbfjAJ07PaCnc90BFq5FFbzBEQgat6FlWlAS/vSU4iy5x8sH6Ll33lQrvXlCoRaWxkbM94Lg09g4CwA+xbYxPzVZDyqtrxqd2p5EwjNQcNp/qJv7ey+FYzTKyFI5fpT8lcXARGI6vJH1Vqj21PvdQSh60jdNtiprumO5kouQlSIQLj1oqSrNQIiELXmsRmyt7V96RdA/MNyRhA1KBAPIcOnJJPR/yuGXQRihl5KqXbaCYhATDvi+qggiDWIGhKIVwBcuN/e3Ocm8KAXgfjEkSt2fsdbo/fYorf+yuCR4+Lx+98I+m3RrXkw8CyPhxYMDa3ZHHR9Ul59ERCBqC9/TltrdPvpmXBBarDvareVVqlAZC8zIuA5Jn6UyVhX7OS2rq1eBELlr+SagOxicvt2SjodAREIeS9cEWhZsPTDZLI6/bt3PgMD16Zifee6KsBDIi8dbrmL1B7MckzqxV5ViH09h4C/wOQjEonok0HYYy1De36FcUcq3ndakOFRgrZbyqsOAiIQ1eGHqreio+Pk3TIYfxCg5kljp2n3jZcOtyYFQnOSuljE2nJejiBGfuXUL3lrm4AIRG37r6LWa+L6bCGTDkskep8K0pB6F4jW1si+CFM/GB/Yzm16YjFVY/ysIN8VKWt6CYhATC/fuipdF6K62Aljv42vd4FwiOb6J2T4sGQy+pxfbvZ8DhcfSTTXoADvAOWIQOwATg6qibpLbtSOGISNo/xcj+lkV70LhGq39j6IEterevVjS3v3WUS4bsrlTgHfOeHVJklfWwREIGrLXzNura7TAejmseE55wRxo5xq4A4hEPrLl7YyjKNTsZ6hch3d2hr5AELZkOLvsZQV+J0T5dop+aubgAhEdfun6qzLjSLuBajLYlwgd1Lny9sRBCIrhJrrVQE8TWb6mETi7j/6db7DndSohuCKftsk+WaGgAjEzHCv6VpbOyMHg0lFCN3T0pC0Wo8YHd71u+WMJNQtaKGMeTURTrDde629za0WdzHlmam1iLH03GsZ/EXrC5GdtjPohNRA7++8vigHdUX2CqexxibgcBMyxGtdkr7+CYhA1L+Pp6WFbR2RFQxSF9zMtnVuKcA4LxXrWe9ln33uesyLAHzOXqYq31Mspgrfd+BlxGN3htPXPoCtYD5nv33Q6+Y0NwBqWxA5hE3jRwC/z1bPFhAflxyMKp/IIwRcExCBcI1KEtoIUGtH95cAXKXr0AF+Bmz8uwH6WTzeo8JYT4mEGolEQi9uDu0TzmSOBKkoqvT3thDb+erSDNzQFOZLdJf11PIIYnJKLXsZU+inU86YbIf9PJiucuI4v2v5HrPS5iGA+RWADtS8pVtgGCcmB3oS8gYLAa8ERCC8EpP0VgLU2rH0MwD/GMC7i6BR9yy/ClDuvmXeGcDc0ij5GTJwdmIg+kun0Ug9CITi0NoaeRdC2RHZSR44KubvLJL+KTao289UVWnfSIodgYAIxI7g5WluY3PzKbuEwuOXM0HNpYcDqO55Zlw8bx9eW2p6pV4EQjFTo6qXN1I3gGtKCG4pxGli3JhJN1w6NHTn66USy9+FgBMBEQh5NwIjkF1gNs1zCTjDejWpywrSTFBXml45by9OlRKGfJn1JBCTU05KcBvGVzHwzx45qsCDPw5T+orpuJ3OpR8lWR0REIGoI2dWS1PUl/BLm40PUsZcRkStDPwdgH0so4uMiqAK8B8JFDOBR5vCO8X6+28d8dqGehSIPAPF8U8b8akQ02ImHAVgvylTc4S/gvEigX5FjOjIyJwN5ewg88pe0tc/ARGI+vextFAICAEh4IuACIQvbJJJCAgBIVD/BEQg6t/H0kIhIASEgC8CIhC+sEkmISAEhED9ExCBqH8fSwuFgBAQAr4IiED4wiaZhIAQEAL1T0AEov59LC0UAkJACPgiIALhC5tkEgJCQAjUPwERiPr3sbRQCAgBIeCLgAiEL2ySSQgIASFQ/wREIOrfx9JCISAEhIAvAiIQvrBJJiEgBIRA/RMQgah/H0sLhYAQEAK+CIhA+MImmYSAEBAC9U9ABKL+fSwtFAJCQAj4IiAC4QubZBICQkAI1D8BEYj697G0UAgIASHgi4AIhC9skkkICAEhUP8ERCDq38fSQiEgBISALwIiEL6wSSYhIASEQP0TEIGofx9LC4WAEBACvgiIQPjA1twRmW+AHgWwWz47A8enYn0/cyqutbP7VjBO91GdPctmgP8CGCkAD481mY9seCz6WgDlWouglq5l+yOdWWyAFjLwEQB7AQjlEg0D/DIzDRHR/dNkQ8BN8lZcV1fkHaPjdB8Ih7r1sbcaJPV0EujsXLx3msODAN6fq2erCT5iKBbdMJ311lvZIhA+PDrDAmG3OE2MGzPphkuHhu583UdzJrO0tkZmI0TLAXwLwL4eykoz4RGw8a1UrGc9APaQtyqTikBUpVtcGyUC4RpV0YQiED44VplAZFvAwH8bZnpRInH3H300iVo7ln4G4B96FAZ7VRlm3JsJGec/MdDzkg87qiaLCETVuMKXISIQvrAVZBKB8MExIIEYBrAJIPdf28RzwNjdyWQCHh1t4oiXKSc1aqCQcTmDz7VMIRV0/DlbRyf+wDsDmFsE3VZmXpmKR9WUm/v2+fDFdGURgZguspUpVwQiGM4iED44BiIQjMcbG3hRf3/0b15M6Opa2TSWGT6K2bwUoAPteQn4ZiLWd5mbMucfHpkza4RuBtCtSb+NgVsMk27cd1/zxWg0mrGmUXaMpt/sYqJ/IsaRAMK2MpQAXpiM9d1QiyIhAuHmDareNCIQwfhGBMIHx5kUiLy58+ef2dA4e9slDFxq+/J/IWPwYesHoi8Ua5rKP2v2a9cBfKYtXZqBy5vC/H234tXcGfkIMd1CQItdJIiwIjHYt9YH5hnNIgIxo/jLrlwEomyE2QJEIHxwrAaBUGZnRWKn125n5qWWZmSI6NTEYG9Psaa1tHefRYTrbOKyhYFTUrG+x7xiKTJV9arBmSPj8bW/9VrmTKYXgZhJ+uXXLQJRPkMRCJ8Mq0UglPnN7ZEOg+ghAGpdIPswcG0q1qfWFLRPa2vkAwjRLwC8x5JgC4iPSw5G1S4kX4/TqISIekffmrNiw4abx30VPAOZRCBmAHqAVYpABANTRhA+OFaTQGh+CGpcuDo52LfSoWnU2tn9AzDOmzrqwMrEYN8dPnBMyaLWNRpHKMrAEZY/DJvMi4biUXV2pCYeEYiacJOjkSIQwfhPBMIHx1oWiIMXRA4ImdnRwwGTTWe+DyaWJ5NRtbBc9tPevqzTJHMdgF22F0Z3NYY3n97f358uuwJNAVlhGqXjwDiNgU9YdlmpxfVNBDzNzL2Z8fS969ff89dSNpQSiObmU3YxwmMRED4H0McBvDNbJuGvYF5PhB9zGj/3y7S5eflcI5w+FkSfUbOJAPaxbQRQByafZ2BtxghF3W4rbuns/goxvptvf/6Ap2oPNYxfTMBnAewJYBjMzzDhDnMsvboYs0gkEnppY+hAInMlOPthsL/F1s0E/AaE22aF+F6361o6/2SnVBtfO4QNVh8/6gDjxE66LHM8wjCuyZ/DEYEo9Ya7+7sIhDtOU1JVk0B0dCx5XxqhfgLmTRpZZATR2r70C6DseYf8MwbwomQsqqapAnkc1ka2kEmHJRK9TwVSSa6QbEeeoW+D8QUAs12U7epgoZNAjA/v+tCspm3ngvD1SVFwrvTPAH0uGev9ududXGrB32D6AYDDi2w7ttfo+vyJTiDIMF6GyT8F+H36pvC6xvDOkf7+W0dsf6e2BZFD2MT1AH3IBfs3QPxtpHGdR+F0W49af/spp80vhcOZRjlJ7cIjJZKIQPhgWE0C0dbZfSozbrV2Jky4IDXYd7W9abmtqVGAjt3+N/7NWDh82Ib+NX/xgcIxS1vn0mXMrKas8uE5AKazkvHem4Kqp7U18i6EqDfXmXoqloFnYdAJqYHe3+ky6gSCwKfxBDvdtmCn+tMMvigViyp/FDsTQq0d3V96ey3pKpdCp6vvVYONRfF4z386GWMXCALOZWTFVYVT0T3aTQ8uz884FMlDZjhz0lD/3S+Xclput95FDHxDs5XaKftjBmfOMyl0n4TaKEW4+N9FIHzwqxaBaGtbvD8b4QdsP27HmDPNXYv3M9LhAev0EjH+IxHv+7wPDEWz6EY2zLgjFe87ze3XdLEKimzTzU6NgIz/ApABcSOAdnB2Sm27WGUL5/6xJpygO1ioEwgAGwHsbbFLbQn+LYGyHTKDDyTgo5qObCvDODoV6xlyalNbZ/cSZtyuEYc3oMQM9GS2PaoNbP4DKPvFXjhiKnG+xi4QuemZ/OFLNcUYB+gPudFEO4DnzfHQ4UNDazbnbS/CPjvdQ4z/YpDaZh0qwuRpMtPHlDj5T62dkS+D6Tua0ZTi8hhAfwb43SAssB0iVTvxPmyJDCCxmHz8yEUgfECrBoFo6Vr2XkpnegBqntoE57l+3Y6noL/q87Z84sgVO7/jrdF7pi5W81AIDUfHYndt9YF9SpaWjmVdBFOJY76TzDDwnaYwf1c3z63WXsIm3WRbPHfcEuwgEHkbHKepDlqwbF7YNK8FcIJbv+TyPJzr0PLZthJh1ehbu/bqdn/lvuA/y+B/tU11FZ0yLBCISSN5iMOhZan+nhfz/6QYDGfwnqHB6NOWtjh02vyMyVj1nn3wuP1Q5cEHn7i7MavhWzQxUrGK9MONYT7JaV1Cv5aFN8B8/tjIbrdZuah1kD+9ikMNMn7oMFUmAuHjRycC4QPaTAhE9qtt1tY9OUT/8PYX5ckELNJ8QRb9KmtpX3oKUXbaJ/+MMfAZP+ce3GBra+/+ERP+MZ+WgZfCyHTFYmv/4CZ/sTQtHd3XELBqMg3RdcnBXvX/jtM4EyfHcS9AXdvL1s+vFxGI4VKH/7Q7uQjPIc1dyWT0FXu7Wju6zwayZ1Lyz+sGG8fG4z0qGmnRp21B5FA26R7rhgBiXJmI912iy6gTiOx0W9g4yioOTpXq3v3sl3yGlyaT0f8rYiy1dEZOJyZ1sn5S1In0u+eygSMNrAGRes/zzxYYxonJgZ6EUz0Oo2qVXASi1Muk+bsIhA9ogQiEj3pLZPE8/zzdPxpNZxTIj1Q3OikVbj3PrrUjshAgNTc9S/2bk2g5CgTzV5PxqNoFVDTGlL0eANvIpCMTid4nrH7Urgu5ELt8GV1dXeHR9J53AYhMCnGRqTydQHgIz0KtnUuvBfM5lja4mSrKJ6eWjm41klCn/3MPJ8aacIx9mq+tLdLKRvZ8T34nXMZJTOy/i5aOZc0E80FrOP7pfteD/zlXR4kiED78UGUCkQF4kMOhz5b6Amxr776CCRdbmhxIh+2EcLoEQrvDiOnUVLz3zlLunFiHCT0KkFqbeJJBTzeQec3gYPTPUzvuwvsgALgKY6LK0a336EQsF2L9cwx0APgHAuaYzCcNxaOxUm3J/72Ac5F1CI1P3jSZF7qpT7NF2nWnnbdVU4Z2FFv4ruqFxIGRTsim9V1366taSycC4cNjVSIQapFunXXvd6mmaC4tmtYfjaYzCmpKi1rau28jwqmT36EldiWVYmP/u8MIItoY3nKym7McDiK2JBXvvdurLaXSlykQL5jh9AI3O4pa2iPHE5Gyf2Idoci0mZPNuhGPfUpMu4OsyLSZri7Netu0vuulfFSrfxeB8OG5gASiSLhvVn5RN7jZd6mMg+gmyox/b999Qy/bFwNLNaVg3n6a52U1AqGdZillt+7v2m20wDAD9xF4dWN4537Nvn3XVWk7eMIlqcG+K90U4pD/otRg37+5ye8lTVkC4SGqsP2rXoWX/9tOjSf+5pHb3/Ro78XEuGJ7nqnrQJodcJ4/LNRBQ2rIDBDwwVw9IhBenJRLKwLhA1ogAlHih5k9nbqJWsD4niZK6r3pMH/xif7oJi/mT9eUj5MN01lfbmfMOg2bvDkTW1AZvRyie+fNNZ/1IqjldvDl5i/l1/ldy/doSGdaCVA3AKqzGRMnudXjbYrJ1ahI9+UP8P8Cxi9L2Vr4d/4UgIMtAjFld1tb29KD2OBHAOw6MVDBX2DyEYlEVG31dfVo1qlEIFyRm5pIBMIHtEoIRN6s7O6lnbZdCcb5Uw7D+ZhSqaddTIpPe/uyA00y1YKzCkNR6slOyZFJt46OzvllqcCBpUJtlKosCIHI3v0x9vpc0wh/wgDNY+KDwdmO9b1FD9N5EYjicbsmm1li228pHKX+/nyY0p2Dg3ercyZo6eg+joD7LZmm/L1UYfm/26ZURSDcgrOkE4HwAa2SAqHMK3IwycsOEvXDO5wAFfYhu4Mn+wR8ujlfrMM5iMBPbaszBKGMeTVR9tyB7SCco3PV9N6Pw5S+It8p2VPOlEBk22Oa5xKgQrjv5+P19DaCEIHwhXhHySQC4cPTlRYIZaLTlIqXa0a1p5uBn6RifWeU2rbpFZNuFw8AV9MZXutS6S0d64pcsDk3xbxKBq9IDEQfn2mBUMHyQuHxy5nwRQ8hJZTZamSkDh5uD90uI4isO2UE4eYnUDyNCIQPhjMhEMpMh/3dKpzEV5OD0e+V6uT1sZjwlD2Ugg8kBVkKdrxM42jFVjm1dC3bn8b5KBh8OhhqvrtYED91+GphcqDn19ZyKjmCyG29/WnhqXh7y7JRS9VJ5xRAvzKY4vF4z8aWzu4LrRFaPa1BlDGCYML0LLrLFFMQP8FAyhCB8IFxpgRCrde1tkcuAmVj01ifkrF+8ok10Vxd3UDnBdPEgubc1QCfbMk3LdFcS9mlpueamv76wQyFVhBjKSg7fz/10RxMq5RAKPsaZm+7KRdm22rXNhB62KT7G4zxJ/faK7TFaZG9rF1MLgVCN2U4XXG8NL8vz+sHmkV1z2WUerd2hL+LQPjw8gwKhONU09sj6qJxbfLN1N8HgccbG3iRm1j9bW1LP8aG+ROG8XDGoJt09xDoY+jwg8hgiccwzz68UzSLuizpSDBus05DqVATPB5aYA1IVymB0JwYVg14CBk+pUToismGVkIgVGX20CnFRirlOE4zPZlh5sWpeNS6cF20io6Ok3fLYPxBy6hMBMKHU0QgfECbSYFQ5ja3R44wKBsuwjp1ou4EOCcV77Pe9aBrnSbcQTbqqatpKlvcoDQz7guBL4nHo79XlVXiRrmWBUs/TGweC6ZWVSUDiabwlhVuDrApG1s6IyuJ6ScWOAWdR8UEovB0+5+Q4cOSyehzLl/NgkOD0zHFpGzRjD79jArzp5xVaJCXVZRaZvPRpoY/r837zyGUStFrdO2sghiFuORf18lEIHy4d6YFQg2fx9Jzr2WwWtCcfNwGXVORYJE2H7YcIlJlbCXiFYnBqNrlpH1ywe4eAKjNlmAYxN9oDOHG0bTxfYDPtP496Dupyz3R66bzqIRAOJwtcLqcR+uT7M1zDRkV2vpjlhfBcUSoCfdd7HraKXVOjB5Z3UaobpzLPgxcnor1qcuTisamyqfX3Yeuu0Ndc4bmd2nDOMrtzXltHd2X5e6QyFctIwgffZ0IhA9oMy0Q2a+51sgHEMpeHbp990r2UBHdOCu8eVWpr+mW9shSIlJTLdu3vAJbmXllKh79mdMPXu0WajD5PAarOyS2H86a4Lgt92/W7aZev4hLekSzG8tTTCDNAnpBuInKCMTKptH0m1MucPJ4Olk3GpyWba65d04fYZX4uORgdH0pxzl82AwzjGNSsZ5+a/5yxMjhtyECUcpBmr+LQPiAVg0Cocxuae8+iygbJtraIbsKFV3kbIWrKzldbsssGRrbB344LYLDRUelD/lduP22EgKh2l4wr69EusTlQiqfOmn/8iZcACYV+iNs4/grg0eOi8fvV1tgpzzljCBUQbrpzVK38+UMoJb2SDdRdmpv+9So833oar3oB2CcZ2nAMBN/KTUYXe30AVPklkERCB8/NhEIH9CqRSAcOruiN6VZm1v0ZjBgGwO3GCbduO++5ov2HTS5LbPqXoXLHLZnjgFYmYz19bidfvDiCofLZP7MTOePj8zp052UnrjvGasBOtDa6ZjMi4bi0Uet9VdMIHRXs4L+QIb5+cRAVIWxmDJ1UyIES74JjiePyxUIp+nN7FkM5gth4i77RoQiHxNbiom6w90OTh8w1Lpg2adgmmpUvH26bbtTRSC8/MDyqu4jzw6fpVoEwumLTl1N6XLBOntKu3H2tlJ3/qqrLjcBNJp1PvEc2/WODu8EP0MGztZ1dAG8RLovzHyxbwD8P4Dx1MQ/8LvVxzqAuYX10s1jw3POsQtKpQSiREwpdc1nYuJaTfWYHwPo4wVTeyqqKmfXBfJ3JzhGaC1XIJQVDhsR8miHQXgSTP+Ts/mTAM3XjHLUaOCfU4PRG4u9C62dkYPBpKY8J9c9cukn62HwLjQRLr3YyXMRCB/be3tsAAAUgElEQVQ/OhlB+IBWTQJR5IvOy9w/tS2IHMImrgeydx0H+SixujcTMs53u8DotvJsJ542bgBYnZ72+qgrSm9rCvMq3fbeSgmEMtpjTClrO9Ngupob6AbbpgPHqLlBCEReJGaN0M0Aur2CB6BCnVyYjPWp2+VKLm7nQsSouz7sIuFYNRMeMAC1y+74XCIRCB+OEoHwAa2aBEKZX+6CdR5B9tBW09bFRNlpIy9CkQY4xgj9i2FmRtmg2zXD/Gz8o7RhfDdIocjOxW+ks97+Qr08H/3ThUvVtZ8X7rc39zkdPqukQCh7D+5Y+kEDvLpIdFprs9SI7jGT+AJ1X7TuhLzTKeegBEIZk2OvBOIqAPu64K7UIMXEZ9juuS6Z1UPMraxowjS/gTDdCMbpIhAl8TomEIHwwa7aBEI1wWHBelg3v+6iydlwFUhnFhughQx8JHc/RX4x/A0GNoKxnojuH2syH7FeGZm9JS2Mc8D0tcKdTnRXY3jz6aV2WbmwcUqS7FRZ42uHgLCcyWwHSE035BdDhwF+mdiIZ2Dekh7Zbagao7nm1xcMk/6RiTsB7D85NUMqzAb/noG1GSMUtYus/V5r1RGbY+PHrl9/z1+toIIUCOuHRbhpa3MIxhka9mq6708MWmeCfrw+1vu/bkYNDv6n5s7Ihw3QP4FxhIWP+kB5jmGstR7elFhMXn9FhelFIMpnKCU4EMhuiWW+ipmX5Do6VzusBKgQEALVQUAEojr8UNdW5COtGkT7cdr87AyH26hr1tI4IRAkARGIIGlKWUJACAiBOiIgAlFHzpSmCAEhIASCJCACESRNKUsICAEhUEcERCDqyJnSFCEgBIRAkAREIIKkKWUJASEgBOqIgAhEHTlTmiIEhIAQCJKACESQNKUsISAEhEAdERCBqCNnSlOEgBAQAkESEIEIkqaUJQSEgBCoIwIiEHXkTGmKEBACQiBIAiIQQdKUsoSAEBACdURABKKOnClNEQJCQAgESUAEIkiaUpYQEAJCoI4IiEDUkTOlKUJACAiBIAmIQARJU8oSAkJACNQRARGIOnKmNEUICAEhECQBEYggaUpZQkAICIE6IiACUUfOlKYIASEgBIIkIAIRJE0pSwgIASFQRwREIOrImdIUISAEhECQBEQggqQpZQkBISAE6oiACEQdOVOaIgSEgBAIkoAIRJA0pSwhIASEQB0REIGoI2fORFNaOru/QozvTtZNWJ0c7FsZlC3TXX5QdurKOWjBsnnhDB9DxEsY+AiAvQCEcmnTAF4l4PdM/CiTsW7eXPPZaDSamQ6bbLb8HYB9AIRzdQ0D/DIzDRHR/WNN5iMbHou+5seOAn8Bz4cp3Tk4ePdGP+VJnpklIAIxs/xrvna/HXi2w2LzAsrw6kQi+qQTCL/lzyBYalvQ3ckmfx+gAz3a8QqAryPDa5LJ6LDHvAXJ588/s6GhaetiIlwG0Ic8lJdm4PaMYXzjiYGelzzkgwiEF1rVn1YEovp9VNUWeu3Am5tP2cWYNXYemL4MIG2CjxiKRTfUg0C0tkbeRWHjBmY+yTJS8OO/p0wKnTI0uOY3fjIDoLYFkUPYxPUehcFe3RtMdMm8vcwb3I5sRCB8eqxKs4lAVKljasUsLwLR1nbinmw0/BIT0y3q2VovAnFQV2SvcBprAOoKyHdbYBgnJgd6El7KU6OGxtnbLmLgG5YpJHsRahprE0Cj2T8QzwFjd4d6MiD8CGm+wM2oRgTCi7eqP60IRPX7qKot9CIQnZ2L905zeBDA++tJIFSnPGv2a9cBfKbNWWkmPALGDynD/zU2ttuWDRtuHldpIpFIaNOmzJ7jHD4QhLOIcaS9Q2fgWYSNo1L9PS+6eQmK2KGyvwLG9dxgrEn19/wRAFvL7OqKvGNk3DiSiL8J4GOF9dHNY8Nzzsnb73rEJ2sQblxXtWlEIKrWNfVnWL0KRHN75AiD6D4As/NeU507UyjidpqopWvZeymd6QGo2ep5YvzHrIYtX+zv71eL2sUeau2MfBlM37FNbw0T8E3O8LVuRgBKuF7ZSGcz8K/W9gDIMOOcVLzvh8WMkBFEff1uRSDqy59V3Zp6FIiurq7waHruaoBPtsD/EzJ8WDIZfc6LQzo7I+9OM35mE4ktZNJhiUTvU0U75vbI8UTUY+vUtzBwSirW95gXO1Tats7uJcy43VZeyXaJQHglXd3pRSCq2z91ZV09CkRz8/K51JAZIOCDFmedk4z1Xe/HedrRCOGi1GDfvzmVp9Z2TKPhEQI+aUnzOhGfnBiM/tyPHWploqWj+1sEXDolP+Hfk4N9F9inqPJpRCB80q7SbCIQVeqYejSrLgWiIzLfAD0KYLecz940mRcOxaMxPz6cf3hkzqwRPKA2Ik1OVzHuSMX7TnPqlFs7us8GcN2U+pi/moxH1fmUKWsNXmzKbkU2zYcBfNiS74WMwYetH4i+oCtLBMIL4epPKwJR/T7KWtjcHukwiB4CsHPO5I1s0GGpgd7flWpCa/vSL4DYOndccvdQvkx7XgZ+kor1nZHveEotUjcXdqBFzWXg+FSs72eOX6SWg3hqvvyljXQIAZ8DcCiAubl8w2B+BgbWhJG5YzoPaWnat41MOjKR6H2ilF+c/t7S0X0NAZ/P7TRiYgzMapj9xf7+W0fseXSCAuApczx0+NDQms1+bcjna+voviy3Iyr/TxkiOjUx2KumswoeEYhyiVdXfhGI6vKHozXzu5bvMSud/gVAn9j+ZUmnpuK9d5ZogpoquIWAz1rTMZfOOzG/vuddatNNLm9B5zBTAtHaGfm02h3kYp9/GkxXwzS/4WaR1uvr0NYW+RQMepSBPSx5fU8xea2/paP7cALUNNKsfF5iXJmI913itSxd+ra2pR8zDV5LwH8T4X4yjcfj8R51Klo7MhGBCIJ69ZQhAlE9vihlSUFHb/+a1xWgExaVTu2OScT71Feq49PaGtkXYeoH4wMqEQMvhZHpisXW/iGfqeICAdwGYP3bI6mrbAuoRfkx4YHxRj7ZbwgJp8I7Ok7eLYPxB20Ly0+TmT4mkbhbbSed1qetvfsKJlxsqaSsKa5yjRWBKJdgdeUXgagufxS1pq1z6TJmvmP7NkYeCqHh6Fjsrq1OGdvalh7EBj8CYNepaUrnbZnYGXO3pb51jeGdI9apjlICMbF9k89TnTkz70SE4wHskrNlDOB1gPGXSdsM+mFyoOfXjgIEqLY22cThZQZiBHqdwAcw0ALgnQVMApiX135lF07DKDl9xmSses8+eNztKWSvr+Injlyx8zveGr2HgSPyebPba8dDC4KYXvJqj0ovAuGHWvXmEYGoXt8UWNbRseR9aYT6CZiX+2PJtQTN+kO+3JJrGLm58FWThjCdlYz33mQ1rJRAWNMGskhtKZCBFBOfMTQYfdpaT2trZDaFaFVu7nzybAKAogusfl8FJYJImw/bdjLli3uFgTtg0Oqgg/FpeCphKhBxv+3yk08Ewg+16s0jAlG9vimwrKtrZdNo+s0oQMcW67Tzf9OsIUwps9g6hGbqRLsff+YEouTJXt3BsQwzL07Fo/cH7faW9qWLifhW7chle2XDIDwJ8D2GaT4wMrL7s6VOJhezU7cBgIFrU7G+c4Nun9vyRCDckqqNdCIQteGnSSs1P8BoY3jLybqTtgV79Jl/DaID8tNNxTqTgs6H8XhjAy/q74/+beZHENw/1oQTSq0nqPYbDRl1SGwydAQTLkkN9l05HW5vWxA5lE1Sh8tUKG03j4qa+luD8XMzRHd4HWFoBaLEmQk3RpWTRgSiHHrVl1cEovp8UtSiwu2u/JuxcPiwDf1rts/j50qwpc0w0VnE5j9ZdkL9yuCR4+Lx+9+wV2rfW6/CNSRifZfZ083ACKLoNkubfdTS3n0bEU6d/PeA76uw81DRaqlh/Nu5barW6S03b9o2Bm5poPT33WzNbenoPo6AKaMhJlyQGuy72k1l05FGBGI6qM5cmSIQM8feV82aqR/HXSu2H+tGk0ILDc6ok7H5bavadQjNVJbbOoAiHXAQaxC6nVTFQNo7LC5x6MyXUzSZDj74xN1DDbNOAmUFWd3FkL8oyE0VamRxQ1OYL7GP2KaM3nQCYTtH4qayINOIQARJc+bLEoGYeR94tsC+eMyaaYWC9YfcFNFIhlYR44pcpdo5+cLFcOdRygyMIBxHPTqQBfY5TJV5doKHDCpS6mgaHcTGUiY+HMB+brKzOnuQ4ZOcYjrpzkDo3gU3dQWVRgQiKJLVUY4IRHX4wZMVbrafas4wZBcvW9qXHk3E6qRy9otWtw5hL7/YWkXFBcJjB18NAmF3bja0dppaiLAEDLXhoJhgOJ6pkDUITz8bSeyDgAiED2gznaXgC5/wHNLclUxG1ZWV2cf2dTk5UijIq+lwbSOUojt/Ki4QHtcQqlEg7O9PdjF9Vvo0MKndR/va/64ORI4P7/oF+44nzbZnVwcgp/P9lRHEdNKtfNkiEJVnXnaNmjWCMQY+Yw3rbF9/yMdtsh+uss/pF6xxaMTH2gARiLLdOVmAOr+BMC4E09dslwe9TiYvTCSiSWtt+lPccg4iOI9ISSIQNfoO2A/AWePvFAiIbZRgC8+QYabjUvHeBxUKTVBAx2202ZFKZ/dXiKGihk4807xIXax8nStrYQRhs1t78Y8uvpLunIucpK7RH3SVmi0CUaWOKWVWsXMKzV2L9zPS4QEA6sxDwTqDfR3C2vkU7vopHtRvRxaICY7mpYCxF8Aqyu4zTtuGS/nT+nd9hFb9yEBzUj7wWEyt7ZFvgnAUCGtLHfCTKSYvnq7+tCIQ1e8jrYX26QXrVJHT+kO+IPvcNQGP/m2nxhPfNRbK2E5qlw7HsQOPIDSh0AuCGfp9vQrDnOgPKuoWqoOM5qq9n6JIvCcRCL8er858IhDV6RdXVjktJtumkAo6ead1CFXp1FhP/CAyWFIsTPYOPYIoDLUdWCiPls7uiy3bkdUwUHuSPbtuEcJagI62vDS/SxvGUU8M9Lzk6kUqkqi1I7IQyN63PRlOHKC7GsObT9ed3heBKJd4deUXgaguf3iyxv7jVdtRm8I7XTRlFODQsdhEJLvIbRDtYY0W62ZP/Y4sEAcviBwQMukX+am8rPOY74OJ5eXcPaFdWyhywK8wym92WvHyVKzv6+XcKJcVHwNrQLTI8mKqCLyLkrGouryq4BGB8PQTrvrEIhBV7yJnA+1nHQAeMoGzCXRPPuKr0xmGwvMQfClA77dcLKTdOWO3ZkcWiImOfO5qgE+2dqDMfFoqHu31+2q1ty/5qEkhFaJ9e0wnTSTdfPnq1LYxq2EdTYQ5zz+vG2wcG4/3DPq0Q7tY7jSSydchAuGTdpVmE4GoUse4MUvzpbkVjKtAUDGT1JSA45SH5rT0f4Jo1/zlQEpsSt01oWzckQVCtb+9fVmnSeY6yx0X6p+3MvPKVDyqDiR6uhNazfk3jlDUeseDmzDlucON6hpQa/ynLTCME5MDPQk375M1TVtn9xJmqMCD1vKGTeZFQ/GouoNb+4hAeCVd3elFIKrbPyWt0+xiUZfJZ3cvAXBcZNZdNmOtzG3Y6DIFouSOGy/l62BN9zZXJdJj6bnXMviLtvrTBPpRiMa/7Sbwnsrb3Bn5iMFYDdCBNl+UnC6aP//MhlmzX7sO4DNtdrwB4m8jjevcTHsVOYsBAt04K7x5lW7tQUYQJX+qNZlABKIm3bbdaN0ulsm/lghLobmuMp+14OCd6y/GIucgdNefqqtP2eRVTp1XtQuE4tLWduKeHGpYB8ZBGk5pED9OoNvJ5IGRkd025k9Eq/MqY2Ovz2UKHQ7CWW8vBv+9Paif2mE22sSRUqHNVb0Oo4+8Sa+AcT03GGtS/T3qKlTryIba25ftbRqZU5xOc7u1Q0YQNd6h2MwXgahxf2YDwY3TfSAcam9KqVGAfR1iMn+J09PWerx04NoLjyYKewOgP6v/IJiXJWJRNbWRfbyUPxMjiHydbW2L9zeN8H0EfDKoV0oF6zPM9CIvd1u3tkbehRDdCWBhETuGAWzK3kwOVn3AXiXu934IGT4lmYz+X6m2iUCUIlRbfxeBqC1/aa11GAmU3HKpi+WjKlCxf1KxvjPczJ977cBb2rvPIsJ1TuGv7Xv4vZZvBzTdU0zW+tRdEMas8WvB2fsnvIT3tpudYca9mQY+54n+6Cavr2ixaSKPZanRj5qeusrN9JRW0IHnw5TudDvN5tE+ST7NBEQgphlwJYrXhX0utv6Qt8lpHaLYVaQlO+ASwfQmQl8bNwC8woHNlNAetSQQufZQc+fyjxMy3yHGkbaYSqVeh8zb242fMAxclBjoU7uPPC1w2wuf2IZrXJLbZeXl8qI0M+4LgS+Jx6O/L2W09e8ygvBCq/rTikBUv49KWli43TXbtWgPVtkLs48+yr2Qx02spEgkEnplE53AzBcA9PGp9zhP3T1VgwIxiVitCTSM0AICjlKzZbmw3nMtPsioqR4CnmamtekQPRDE4Ta7j3NTe10AnQBwB0B7ALDaMQzwy8w0RET3jzWZj7hZ89C9mCIQJX+uNZVABKKm3CXGCgEhIAQqR0AEonKspSYhIASEQE0REIGoKXeJsUJACAiByhEQgagca6lJCAgBIVBTBEQgaspdYqwQEAJCoHIERCAqx1pqEgJCQAjUFAERiJpylxgrBISAEKgcARGIyrGWmoSAEBACNUVABKKm3CXGCgEhIAQqR0AEonKspSYhIASEQE0REIGoKXeJsUJACAiByhEQgagca6lJCAgBIVBTBEQgaspdYqwQEAJCoHIERCAqx1pqEgJCQAjUFAERiJpylxgrBISAEKgcARGIyrGWmoSAEBACNUVABKKm3CXGCgEhIAQqR0AEonKspSYhIASEQE0REIGoKXeJsUJACAiByhEQgagca6lJCAgBIVBTBEQgaspdYqwQEAJCoHIERCAqx1pqEgJCQAjUFAERiJpylxgrBISAEKgcARGIyrGWmoSAEBACNUXg/wNgXgWow6KHCAAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-31"><g><path d="M 371 851 L 371 894.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 899.88 L 367.5 892.88 L 371 894.63 L 374.5 892.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-32"><g><path d="M 371 1001 L 371.5 1026.5 L 371.13 1044.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371.02 1049.88 L 367.67 1042.81 L 371.13 1044.63 L 374.66 1042.96 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-48"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 1023px; margin-left: 382px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="375.5" y="1017" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-52"><g><path d="M 321 1101 L 51 1101 L 51 407.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 402.12 L 54.5 409.12 L 51 407.37 L 47.5 409.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-53"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 1112px; margin-left: 312px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="303" y="1106" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-62"><g><path d="M 421 1101 L 531 1101 L 531 407.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 531 402.12 L 534.5 409.12 L 531 407.37 L 527.5 409.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-64"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 1112px; margin-left: 432px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="425.5" y="1106" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-23"><g><path d="M 371 1051 L 421 1101 L 371 1151 L 321 1101 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 1101px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Any PM<br />wakeup event<br />pending</div></div></div></foreignObject><image x="322" y="1080" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmcJFWV7ncis6oaUZYHsqOD+nPcBh1bu/aeGhp4gEDTDVnVLLIIgqM8GJ1BRuCNG+C4/GRkGlAUkUXoypRmGUaWRqbozMrMal47iPwYR0FGFhsamG5AqO6qzDiPm5VZHRVxI+JGZlZVZtaJ/6ryLud858b94px777kEeQQBQUAQEAQEAQ0CJKgIAoKAICAICAI6BIQgZFwIAoKAICAIaBEQgpCBIQgIAoKAICAEIWNAEBAEBAFBwBwB8SDMsZKSgoAgIAgsKASEIBaUuUVZQUAQEATMERCCMMdKSgoCgoAgsKAQEIJYUOYWZQUBQUAQMEdACMIcq5YouWRp4qCYTb8AcJBDoVfJ5iOy2VSuJZScJyW6+gaPIeCuGrofB/A8wJvAeMAm3PWOffFIKpUqmrTZ1T/4RWJ8U1eWGJdnR5MXm7QTVmbxwIl7thcKvwDoYHdZBo7NZ5L/GtaG/N4cCAhBNIed6iZld9/g5wCs9jRItDqXHj4PANetswXWUB0IQofYcwAuOGBfToYRRRBBADwWQ9uRmcwtW2o1S1ff4KEE/BuAdiGIWtFs7PpCEI1tn7pK192d2Akx3AbQkZqGnypavGzD+tRTde10ATU2SwRRQTA5sYjP3vhA6hU/SIMJAq/bzEeMjaYytZqkq2/wewSojwnPIx5Ereg2Vn0hiMayx6xK09OT6GaL7gWwS7mjCddX4Lm5TPKqWRWihRufZYIAQNdOjO967saN107qYAwhCNQjzBQUXlIyCUG01gAXgmgtewZq09M7eBkTLpomB8KV4NKXYDlUwNmJRTgq6Ct1AcEVWVUdQUSZMBcvPrutvX3LXhS3+pntLwP0PpcQRSKcnk0nb66GIIDa7RsUXhKCiDxkGr6CEETDm6g+AnZ2nri31VZ8AMCHyi/yMzG2TrYt+8dgvKfciyxW1wB3rQTh7FqFA8miK5lwlvP/DPzKsicPz2Zv3+wWVeNBKA9xO4C31cm+1N0/dCWYzy23pzwZC0CsIksUQqwBaqk6RwgIQcwR0PPdTXdf4giA7pz2FhgP2oW2FdQ2+c8EnDEtnyxWV22qehKEEmLxoYldO7ZRioHDHEIVmXllfjTl2S2lIYgnAf4VQCsr9Qn4ajaT/Eo1Sro/MgCoD453O3fECUFUg2zj1hGCaFzb1E2ygYGB+PbC3jcAfNL0RFHe9tjVmziWiNY6vgJlsbpK5OtNEEoMD7FPxfmvzGeS5xt4EE8C9DWAf1iPMKJLliKBzmPwF8okURJHCKLKwdOg1YQgGtQw9RSrr+/4dxUQGyHgwHK7Ewx8Ip9JPtDdndgfcRpxhJlUEVmsrsIAs0EQ2kVhxoMdbbx8ZCT1J6eYOg/CpthKiws3Oc4sVBtGdIeXHoNlnQrbTglBVDFYmqSKEESTGKoWMb1nH/jRiXh82caRW19SW2NccWX1HRh5sXpgIPHW7ZN0JwiHlGV9Mk6F/nR67Sb195IlK/aw2uOnEUh5Me91xMVfAPiXDLpmcny3e/126Kg2NLuwfMMtQXhpQiV12QI6GwShwVXZR3umQUcQygaTHP8H57bUasJMHsyIVqNg/xNi9JAQRC1vZ2PXFYJobPvULJ12gnGtM2jCGNMehqkAfgTxxht7vtS+aOv5IPyjgxT8mn0RoE/lMsPqEJbnwJ5OFwauz2eSZ0Y54OcNq0UnRJ0Cc0cQmEG+FVkCCOKDMw62+XggQbZ2jZEJgJfHqfirAsfTQhCmb0nzlROCaD6bRZJYd/ahEl6qNKQNY0RcrNYRBIrFoxGLfRXAYAShCwy+MJ9JXaGb9D3eEOEJFHggl0upE8cmD3X1DV7nXJhnwoX5dPJbJpWDyswZQUQIMSkP4o1Yx6QrNcYWG3zYWCa10VBnT3jJnowd2t6+3RKCMESwSYsJQTSp4UzF9p561YcnXGckVPORFqs1BPEcwI+6Tm2Pg/AImH6tFsUZ/DECPggg7tLHdwLr6Rn6EFusckntVa5TJKJTsunhNSaYaHJRbSablmWzw4+Z1J9rgvBZI0p1xDefNDIyUnDK4+dBqDCfexxEIUVdeEmlZenvX7mPEESto6ax6wtBNLZ9apJOE2uHX/xZ42movo0Xq/Wx8mnxXyPgsvY4X+VeWP340lUHttn2da6tnICPBzMwcPqi7YXXUwAdvQMcuqUj/sJp7glTB15P/9AqZlYHzcp79/keFHF8LpdSifJqembDg/DKC/hN7iEEMTN/UoQwky68lMuk7u3vX7mvEERNQ6bhKwtBNLyJqhdQs4XV98tc7blv34afA9Szo0fz2HwAQWyGZa3IrV+T9dNEu98/IHSkmTSNvB3ddl9mOiU/OvzT6lHeUbPeBNHTs2Iv22q7n4APO+Tz3YUURBCaMKJpmMkVXtqxwUEIoh6jprHbEIJobPtULZ1uMgSCv5b1J3F5ufpaDBPEhyCKzDg3P5r8flh9zUL5VrLp8Gx2+GF3XU2YyCjMpNnua0QsYbJXfq8nQSjPKm4XbwRoYEb/zHfCxok6jyeIIFQb1YSZNCfwp89gCEGYjozmLScE0by2C5S8a+nQ+8kuxer3dRQMDBlp4vu+oR535/rdUuYLyJrJO+jQlWehGQgPM3X3Dp0DYgdZhdeJMjxqJAhaPHDiHm2F4keIcAq4tLC/k6v/LQzryHxmzZhOLgOCcKXpDg+vubzQGbvbhCCijI7mLCsE0Zx2C5Vac+/DJrZoWX798H/6VdbH980Wq308CO1iqq7/vr6Tdi9i8h6AOiu/B53KdXscDDwTR3Egk7nt97r2NbqVtmqaeEehYJcLzHI21yKIv5RLp77jt6U3jCA0YaawBXoXEc84PwMhCNOR0bzlhCCa13a+kuvXE2A0WXf1Dp1MVFrEdT6hi9XaMwqEi/Pp5OUmEPuccfC9nUy3NTdoPUHjHT2mtmqOjd36gol8JmVmkSAKIL7ogH3w3aBLg8IIQhdmCtqI4N5B5U7xIQRhMiqau4wQRHPbTyu97sSx6VZQ/ZWk4YvVPgTxhXw6qc4zhD5RCUI1qNma60uCbo/KL59RqKABBWaBINRVo3mb+OyxdOrxMNkMCcI4zOQKL3lOmwtBhFmk+X8Xgmh+G3o00Nz4ZbwYq1/cRmg4ppoJ3il4NfXdROgXZtLcpFeX1Bpu4GsmCMLLzHgZjA2WhfuKE7F1UTwcE4KIEGZyh5c86T2EIFpw8nCpJATRYjbWnX2oj4rBC7rVTPC1EoQ2lMb0mdzo8A+cbXf2JRZboHUAdp/6f7hHVA1mNS5SV9PljDomBKENM2kwc4eXdLfRCUHUbLKGb0AIouFNFE1A3cGqaC34lg70QuaDIJSkmkSEd3fEd06MjPxkW0UT98QZ5RRxFOyaiCDcYSYvZjPTwGs9LiGIKKOjOcsKQTSn3bRS+4SH6qmh72L1fBGEZvF5xm4tjZcRtnOnaryahSA0YSb3DrfQ8JICSQii6qHSNBWFIJrGVOGC6s8xqJg2vRJe212CVX6k/WbmSfIPzcwXQfhszZ0mMu+Cffje/+hYTdVoFoIICzN5wks+t9AJQVQ7UpqnnhBE89gqVFLN2YfQxWW/RjULu6qob3vzRRClyc6zNZenQyY9fYNfYeDLFT3rmVrDjV2TEYRvmMm1e8k3tYcQROgr2fQFhCCa3oRTCvjkUtJeLGOqsvfksaqpX6yeT4LQbM0thUwWWfYzrkuMjHdzmWLkLNdMBOEOMzl2gD01Mx26v9coBFHNKGmuOkIQzWUvX2k7exN9FpHKmbRzpVA1N4c5O9CdifDbSjqfBKFYy3PHA9MpTPZvZu5eqm9qjWb2IHRhJuVdkW2POK+gDRpDQhAtMnkEqCEE0Ro21lwbCtNsnUEI6NpV5T2L1fNMENCk3rhenScgwjVlBasOt5kOkWbyIMoE4QozIUWEYWYMl9OhB95fLQRhOjKat5wQRPPablpy/dmH+izG6jwT3TmC+SYIDQaPAfyM48KiuqfWaHYPQrOb6Ukw/rDjXvHg8yJCEC0weYSoIATRAjbWnn3QHH6qRlWfvE6er/H5Jgilmyb1xrTKs5Fao9kJQhdmcuoUdl5ECKKaN6q56ghBNJe9PNJOnX3Y6xYACcePdV2M1eyO8ixWNwRB9CS62Sqtw+ziAmpWUmu0CEG4w0wVtUJDlEIQTT55GIgvBGEAUiMX0Z59MLgbIYpOursaAMw4cNYIBOHj7cxaao1WIAhdVtySXgZXkgpBRHmLmrOsEERz2m1a6lpugTNVPeCE9vRidSMQRClk0j/4RWJ8M0qoxBSHsHLNtkhd0UeT3NH33msnBkIQYSOi+X8XgmhiG/p8Mc/KYqzmStAZX+aNQhAaj2rWUmu0ggdRItW+QXeYKTS8pOoJQTTx5GEouhCEIVBSrDkQ8BJEfXZzNYf2IqUgUF8EhCDqi6e0Ns8IeC4GYjolPzr803kWS7oXBJoSASGIpjSbCK1DQBNyq+tuLkFdEFhoCAhBLDSLt7C+nnUSotW59PB5pT058ggCgkBkBIQgIkMmFRoRAeU9dGyjFAOHleULTBPRiDqITIJAoyEgBNFoFhF5QhHo7k68p6Nj8qWRkTu2JhKJ2LMvxA5m276agK7pysx3wsaJuVxqPLRBKSAICAJaBIQgZGA0HQJd/YMXEeOyAMFftdg6enR0TbrplBOBBYEGQkAIooGMIaKYIaC/p2K6bhHEX8qlU9+RtQczPKWUIOCHgBCEjI2mQ0B3YrmsRBGEKybe2O2ijRuvnWw6xURgQaDBEBCCaDCDiDjhCHQNrPozKthXAFgG4G0A1DrDQ2ThG9n1SRVWkl1L4TBKCUEgFAEhiFCIpIAgIAgIAgsTASGIhWl30VoQEAQEgVAEhCBCIZICgoAgIAgsTASEIBam3UVrQUAQEARCERCCCIVICggCgoAgsDAREIJYmHYXrQUBQUAQCEVACCIUIikgCAgCgsDCREAIYmHaXbQWBAQBQSAUASGIUIikgCAgCAgCCxMBIYiFaXfRWhAQBASBUASEIEIhkgKCgCAgCCxMBIQgmtzuXf2DXyTGN6fVINyQSydPb3K1RHxBQBBoAASEIBrACLWIIARRC3pSVxCoHYHO/sQHYjZ9vliIXTI2dusLtbfYOC0IQTSOLaqSRAiiKtikkiBQMwL9/Sv3LXLb/2XwpwH8IU6F/nR67aaaG26gBoQgGsgY1YgiBFENalJHEKgNgb6+oQ8XwfcB2Lvc0pNCELVhKrVnAQEhiFkAVZoUBEIQ6OxLLLZA6wDsLgQhw6VhERCCaFjTiGAtjIAQRAsbt5VUE4JoJWuKLs2CgBBEs1hqgcspBLHAB4CoPy8ICEHMC+zSaVQEhCCiIiblBYHaERCCqB3DWW9hYGAgvr2w1y0AEpXOGLgyn0meH9b54oET92wvFH4B0MGOutfnM8kzwy6919TdYoMPG8ukNvr129l54t5WvHA0iD4BYDGA/QDEHeVfAPhJBm4rWrHUw+vXPBOmg/q9GoIYGEi8dXvBuvrN/j7p7IMYP2Kbz8vlUuNBfScSidgzm2IfI7JPB+MwAO906PICAY+CcGN7jO8YGUn9yUQPtWWwwPE0gHc77HFsPpP813rX9/TFeLCjjZeXZaWuvlVLCLYaQ4c4dqm8BvCvieiaKHqZyG5aZsmSFXvE2uLHgfApEP05GHuU6yp7/TdA99tkXzuWTv1n0BhefGhi1/Zt+Pmb5XsqfRPj8uxo8mJTWVS5at+/2Rg/pTE9SXeCSjZTz4xdRQo7qz1+GoFOAvBeAG8rl1Pv3S8ZdM3k+G73btx47aQfBl19g8cQcJchRqFzgmE781qs6be5dvcOnQPi7+9AkcdiaDsyk7llSxCymi8A9U6Z1e1N9FlE9wLYeaoPzk4swlEbH0i94u5THaKxmL4L4FAAMUNrF5lxRzFmfT6MKKISxOLFZ7e17/TKaoDProIcqGdp4q/ZxlUAvc9Al9dA/HUUsDqMdBqBILZvRzvFrauZ+YQQW70Gxtcmtu32vaAJxQAfoyKdnSfvEotPXsqEv3F9VGjrM5Bn4jPH0qnH/Tro6Rv8CgNfjvreONvr6zv+XQXERgg4sPz/V8nmI7LZVM6n31kbP34E8cYbe77Uvmjr+SD8o4MU/GB5EaBP5TLD/6YjWCEIo+HaWIU0E70Rc3uJBSDgJdh8WDabeiRIS/ek7PP1Rd19g599k0S+DWCnKlH7o8XW8tHRNf/P96smQqoNP3IA6KaOuP3ZoK/97u7EThSzLmWw+rI2Jbqy2Dxmx4snjI2sfdZPj/kmCAt8hk10XZnITc2VnFjEZ+s+DEwbCCvX0zP0cbawBuB3hZV1/a68igtymeTVusmup2foQ2zxLwDsVa73us18xNhoKmPaj+bjzPdDabbHj44gUCwejVjsqwAGTXUCUGDwhflM6go3bkIQEVBslKI6d5mZTsmPDv80QEbq6h28kQinuMuE1dUMxAkGPpHPJB9wttXTP3g8M27SkMNrAH4LkCKhYmmyZfujoNIXuZdIZoY/PCqZehCKHDp22noxA5fMnODDycGfWKBY9WViKBf9KdUugz9GwAc1X7qPk104Kptd+wedXeaVIMBZZtpEhOMdsqkJdhSg3zN4F5oKN1UmU0cxunZifNdzZ8OT6O5PLAGTCrG5+y0A/ASIxsC0HeC3g7DUEXKqyFcE8Zdy6dR33JOdmrARw20AHVkpbBqeVeUHBk5ftL3wegqgo6frEy7Mp5Pfctt3LsaP5r18DuBHnfoBGAfhETD9OmSsaj8yu5eu+kvY/Jkp/ew9y7q3l/V9lRl3EdEb5b/HOU7/nB9Z89+NMldWI0fTh5iU0j29g5cx4SLHQA9cS9CtP5i+JF1Lh95PdunLa9+pOvzoRDy+bOPIrS9V2vj40lUHxm1bnbJ8v8MoW4hw3vY3dhvWTSblL6wzGPxPLld4AuDluUxKhbSqJQjq7k/8PZi+4fr6N/kC9qnLv7EZ571jPzyYSqUU0U0/U/Hetq8RcI6rv/s64nyCzlOZX4KYAWuBGNcUC22XjI399NXKLypu/uwmtd5S8jLU+lHlKTL4i/lMSoUR6/b09Kx8J1vxnwP4gKPRkmyFycmvbthw+8vOzpR8T/8Rh1hkfd/lbYwz86r8aMoTO9d4AJ6x7KeQ9z3AZrJpWTY7/JirzpyMHw1BOMV4jYDL2uN8lXvsqXe1zbavY5TW0nY8RKtz6eHz/NZyZJG6bkN99hvq6hs8lAAVNyyzuXfSdkrR6VlDcP4avA7R1Tt0MhHfPF1DM5C6+wY/B2C1o9VXLbaOHh1doxZhA5+epYlD2KbbAexSKRi0gGjgQdRCDtCv1eABFHkol0v9T4Ay1NWfOI2YVIij4hkViXB6Np3cgV+5gQYhiHEm/mw+nbrBb2Lo7k68BzG60zVxP1W0eNmG9SnlRdX8qMXficm9rmHCWY7G1ER/Rn40lQxagP74QGKfeAG3AjRQqavWJOyJyaPdpLJkaeKgmE3qY+egclmtN6xTyDvG+R4Ucbx7rWmuxk8AQWyGZa3IrV+T9TOMikJ0bKPUDJIgPIECD+Ryqed09YQgah7mc9dAd3dif8RpBIz3lHsNjKd29Q9+nhiVL77NAKvFKRUWUc8mtmhZfv2w2gnifqirb/A6As4o/1Bk5pXOrzOd642QrxFnJ9qdIYyb86PJU7ULZ8FrELpJWnVn4jmoctTdP3QlmM91yBgYKnIBpvBSnoQKa5Uf/YJ+QxAE85dyoymVOp2DRm9X36pOgn2PI82CKn5uLpO8qh6jXrM+4Bsq0k7eUySmJv53VMYpEZ2STQ+v8Y61vW8AWO3sKT0mYSZteEof1p2z8eNDEGqzx7n50aRjE4veQt19iSOAEvFXQkZbyabDs9nhh3U1hCDqMdLnqA3dpAqmz+RGh3/gFsFTlvEggHtApcXkqZeE6fj86PBad93SVtW2olpr+FD5N8+X49TLQ59ioA/ARwnY1WY+Icrin8crCFiHCPIgdOsgTPj5ZAefZLKwqvnC9PUA/Ext+pXaAATxmD0ZO9QkXbP2Cz9krSjKq+DeYeTnAQS1qdmldHdHfOfEyMhPtjnrdfUmjiUiNdbLGw+CvW9VV0NgWg9qLsePliBCvAAnDpodWYosfbdZC0FEGdENUNYT+gFSHfHNJ42MjBSc4rm9DfXFxMwp57ZVv68obyhL30etcNSDIGolB6WDZ/KI8MJVMNCRty5kNt8EQcBXs5nkV0xtpxkLQZ6nabPQTXTss/gb1Kg7jMrAM3EUBzKZ237vrKf56Alc8yqNC7fXCrqlI/7Cae53bS7Hj48HoZ0DdLj19Z20exGT9wDUOf2hKAShdna2xuNZNPOZzFwvTilExG3FX1qF+PrpWKzP16B7MdzPS6kV0VoJghk3E6B2cTl2v3A6hrblYedDnLK79SVg3Z/e0rHi0ftvej2Kjl39gxcR47IdddjzNTvPBBF5i2fnwMoDZowZoMhMx+RHh1XoqepH8yVbVbsa+XzXF7r6Br9HgFqQnXoCQqKaidgTZq00M5fjx4dYL86nk5ebGENbXwiidQjCdPupa/ItffW9saj96be+sf12xyKV52tQ035dvhgrg1ftrGorFLsJOBGA2jpYOempAsPOk74zxrsmxPQEuLTA7d4aabxQrjrQhu3AvwOsfzd54WaW4Y8AWOIgCM+BxHkmiKfseGFp0DkNt871+tJ3t6vZQKG+6O8GrOldcib4M/NbiHCsc7OD3xZuTZ++4TZNaEVbdq7Hj489vpBPJ9V5htBHCEIPUct4EEo9z3ZXl2vuWUB2TLyuup6vIo+HUkXMWfU/MfHq3rYVP9gCHcjES8ClifPPAg/TRSGIwFeBRyYW4TiT9YeQbYOhL1xIAc/lKvNMEA9ZvO2Y0dG71BkVo8c0dGbUmKNQxMNYkZr3C1VpzhL5hpk06yPa1DZzPX6iTvBGhC8eROt4EMrgmu2uM0IZuvWHSt6mrt6hI4lYHUoqLda54+TuNQ7TuLDaZx2z7fMJGAJwQKQ3ulK4eoJQh3Y6ZpxFMNypM9cv+LwSRBVkr0zT3T/4EzBOm7Yp4YZcOnl6VTYuV5oPgii9O57dcN5zABoi8Q3NzfX4EYKoZdT5120pD8J9AM69MOcikBlegjv264y3a74WQ2PWUfPnOEykvmJVHqnKFsVoIabphnisCOvUGNvfAtFyR/tqX/gRufVr/iNoSM31Cy4EMWWN+SIIzc4kT+iopyfRzVYpB1n5jI5/DrK5Hj9CEEIQJggEnlNwLZbOWEM4+PBP7uxch3CSi/ecRfBhuqkFwtjPnDsitMITXgarLJzIA/SQxTQ6OrpmU1f/4AXEUPvxp57IHsSO3Efag0rMd8LGiUEJ9GYrxu5nxGYjiCipJkwGbqWMhiDm5K5jzdkGT5hVs+jsu/NrrsePEESUUWZetqU8iNIXmOukcyVUFLT+UIHLuw4xtSvFHboKOkyk8s607bT1B47DdJXmt4Kwhm26q82afGSffWKb3SkqpicJt7sfjSA22vHCcY4FV81hNbXjJvgAkZswlWwqHXh2NPlp8+FlXnI+CYKB3/JkbKnJGYiKRr29x77NpkUqJPlXO4hcf/bGHIXS+J0Z6jRMIBmlD7+ynjAqMJ2yRrMN1C+1Rqn5uR4/QhD1GAHeNlqPINy5ksqTa6Fg7+VMTayb5P3WIVzEEbhP3OuGl0C/F0U+OSQ1xbR1at3m6o6D9/Ss2Mu22u4n4MOOIfA0irwsl0s94Te0enoHfzgj3UOVsXqToetzP4fxfRA9PYmPwKJ1DOy5w/HSH3TSkFHkHWma7ajGaSqC8NDo4buN1ATXKGU8B9scW8U163va1BrO/uZy/AhBRLG0edmWIwiNq1x6+WHz/o58TdqXzvvS8932ZPwsq62wdvpylZDDYp6zEkDoROwylzfTbBQPwmehtHxoSaVamM4Yy8D1k+O7neOXiVSTEj3wq9Fn2FXSLahLnZ5VWWyZ7XWL2l68zXmwSvuC+5xo1/XjJndVxu8krIYgirpUFEGvUT0Ogena1yaSjJCqpdKmWlOwLf75m2lO1EHRRwj4HTFfNTqaetpPr6m1thmpN6bfE89ZCZ9MBc625338BOxCcmMQlWDkJLU5yTRcSfcXuNr/bYE/4Mj4qv1i1LjFT4HwBTB+vCPvjv7UqALBZ++3NsWBH2iak63R1iB8CMIn9DXBzKfmR1PDOnk0C5dq0r00n0mqy1cC8xVV2isnt3PmBdLm+6n1oJNnAotGECqApj0NrMNFM5FGqh/ywujyF0X/yPDkwArMMTYtkjsnkfK044h/xXnK2O9UtluveR8/QhA1z80t50EoRDRpBtYQ420gqOs+AydcdziJCPcylw4cTVUNuGtCt3AZ8fSxbr2gLgShZNdN1gAes+OFI3WHxEremIVbPbugiI/JpVMbwkZfKWdRYe8rGaxuQqs84wzrqHxmzYizvg+5hoYxAvSK4kGoZsZt5uVjo6l1YXp19iYOs6iU2K3ijYWmpwhr0/m7pn1FQMb3TkzdW2Cr3UbOk/RrO+I7n+zOxeSWy/uBwmPMuJyI1NW+5RsUzch0bseP58rRwFxKbr3Fg9CP0JYkCI2bvrk8uEsDPGiRWReqcEAXGqv2xF2BLQzryHxmzVjQJFG6b+B55a2QSg3gvKtaVfM9yGWQ7ntGt129g58hKqUin74VjkDXtMdfOM+dS6dMtu7JUOH3W1h0nE/G20p/1NWbGCSi62ccAgzYQeXN8YNxInwym07e5oddd3fifyFGygNSV7rOeCKEmKbI30Cv8qUxP5tx50Kd12Z8JtYiAZduH9/tsqDLiXzwMCY/hYPLG9tKhPWOj6RIayI6sjPBWTFilPETdYKvN0GYelVRPhQaoWxLEkRpcM1My+3EOnCA67I67qjszSHkNmJP/9Aq5tJ9EY5rOen3ZNmfzq5PqTSTd0MUAAAV4ElEQVQVM0IzpQvcn6cuML5DQJfPoPDd6hiVIMqHne5w3hcQ9PXs4wUoMbeA+QLYuMVzB4D/HcqbEeB9+NwdoPo594D9MOzc9bXjghxc6Xc/dlSCKGP/4pu6nX/Avpx09/fsHzGk8hS50nxHSmFi+tLrvYASjd1tMf5+dDT1X662Au57Nvc+VJs+Gy0q3RlnvVUV5mr8zDVBaC5MKoL5kgP2w7f9diea2r6RyrUqQXgzke5APdAL0G3Pq1Q1OT1dvk3tbp/J/gUAWYDUJKSuLfwQQH/huUxdLYRzKTxQuTTIN1dQVIJQvfb2ruq3yb57Rp4en0tlVHnthSo78HRe46h0+jBAizVekLqM5+/y6dQ1fi+Az0U5leKbGXiQQK/6XLH5OEAZgM+etpdPDFq3SK0EB9A2LdvUGZX1U7bit5c9lB35saYKRrqnIeqLH3BtrWrqWQYyCo/AK1EJuTgKx6fTazeZ9q+7xncHLsE3ren6mIvxM9cE4XsrZWnc0CsMLsTYOsvkkjBTu8xHuZYlCF9PwCAcoNmJVPpitsGHjWVSG8MM1du76mM22SpG7byaMqya+r0Apiu4ja5Gwb6PgPeWK/leXlINQUxdBDT4XTD+1ilU0AL01KRB10a8AL7SvLrf+YJcJqlulwtc3DY+ZOgQvBKyAPMxzgOGETyILQS6hMEXR7CZsU4mhvcr09Of+AQzqbvNd4/ezo4Dk1HrasJ9qomqt/LO9viZa4Lwe4dmvE8B65VR7TFf5VuWILQ3uxnemKVfhwg+Pe024JK+ofda4BsCwkbOKupO5wds4i+MpVOPRzmlWyVBoJwOWqWmrlx+VCLBoPWSqXuZafDN9Rx1udL+JoNWXXbDxGcqvUzKqzIR0pSoA393FNv43IdHUs97dq+ZexAl8qd47GUqFG8EqD9I1mp0MtVdV650bzLzt5n5eI1npqvyGoi/jgJWB52WD5JJtwMJ8E+tYaLfbI6feSAI6K53deIQdFWwCV6NUKZlCUKBq/kKMlpg03kf1Ri7sr5g2XQWE6tJ553TL3jJFeX/YuC2ohVLPbx+zTPOAeG+89fvVrFqCaKET29iiIhudFyzqP59X0ecT3Bf7u6UTW2ZjS/a0hmDdSaT3QuQSkJY2dHzGsBPM+huG/TjDZnh34V5DX4vwlSiQz6HwCsdGW/Vvv4/gLDOBv/LWDqlroYteSW1EoTyDpXNnnseRzDjcwCpWwFVWKkIxjOwcK/N/KOxTOqX1epUy0uvdhjF2opDDB4C6N0A9i63pz4wngf4UcC6viNu3xNkPxMZfHbkRbpUya+f2Rg/80EQSj+1oYBi1hnM9lkgep8rK7PxhUUmNpmPMi1NEPMBqPTZ+Aho1iCMw4eNr51IKAjUDwEhiPphKS01CQJCEE1iKBFz3hEQgph3E4gAc42AEMRcIy79NSsCQhDNajmRu2oEhCCqhk4qLjAEhCAWmMFFXUAIQkaBIGCGgBCEGU5SqoUQEIJoIWOKKrOKgBDErMIrjTciAkIQjWgVkakRERCCaESriEyzioAQxKzCK423EAJCEC1kTFHFDAEhCDOcpJQgIAQhY2DBISAEseBMLgpXiYAQRJXASTVBQBAQBFodASGIVrew6CcICAKCQJUICEFUCZxUEwQEAUGg1REQgmh1C4t+goAgIAhUiYAQRJXASTVBQBAQBFodASGIVrew6CcICAKCQJUICEFUCZxUEwQEAUGg1REQgmh1C4t+goAgIAhUiYAQRJXASTVBQBAQBFodASGIVrew6CcICAKCQJUICEFUCZxUEwQEAUGg1REQgmh1C4t+goAgIAhUiYAQRJXASTVBQBAQBFodASGIVrew6CcICAKCQJUICEFUCZxUEwQEAUGg1REQgmh1C4t+s45AZ19isQVaB2D3cmdPxqnQn06v3eTuvKt/8IvE+Ob0/wk35NLJ02ddSOlAEKgCASGIKkCTKoKAEwEhCBkPrYqAEESrWlb0mjMEhCDmDGrpaI4REIKYY8Clu9ZDQAii9WwqGk0hIAQhI0EQqBEBIYgaAZTqDYuAEETDmkYEaxYEohBEs+gkcgoC4kHIGBAE6oCAEEQdQJQmGhIB8SAa0iwiVDMhIATRTNYSWaMgIAQRBS0pKwhoEBCCkGHRqggIQbSAZbv6Bo8h4K6KKky4MJ9Ofkv93d2d2AkWVoDwOYD+AsDbyuVeAPAgE646cB/Op1KpYrVQJBKJ2DObYh8jsk8H4zAA7wQQr/RDwKMg3Nge4ztGRlJ/MulnYCDx1u2TdCcIh5TLzzh8tmTJij2s9vhpBDoJwHtn6sW/ZNA1k+O73btx47WTJv25y5T6L1hHAvYZIFoCxh7lMs+CcLdl8z+Pjqb+S/0vCkGYHpSbb/3VuKE4PsGMTwH0UQB7l/QnvAzmDW583XoxcGw+k/zXarCXOo2DgBBE49iiakn8CKKnZ+jjbNk3AvS+4Mb5NyB8JpdOPRRRCOpZmvhrtnFVeB+lll8D8ddRwOpcLjUe1JffBPnGG3u+1L5o6/kg/KODFPyaehGgT+Uyw/8GgE10U2T37CYafPNU9PcAvD2gTgFMV8C2v2zH8IF6n6SeL/0VBXT1JgaJ6F9C9FeQTo8bIQiT0dV8ZYQgms9mHol1BEFkZWDbtwPYy1BFNWFfkMskrzaZTEtfmDHrUgafDyBm2Ee5GI/Z8eIJYyNrn/Wrp5sgUSwejVjsqwDUBG76FBh8YT6TuiJML9XntgJdScCppjoR40dF8A0WkfLg6pZqYz70X7z47Lb2t2y9HIzPm+oPYJyZz7CI/oyBf6oYRTwI0+HZ2OWEIBrbPkbSeQgC+CGAv6Kp0Evl2czAgwR6FeB3AegFsJOrg3EifDKbTt4W1HFpItnpldUAn+0pR3iZGCrE85SaZBj8MQI+6Ag5Vao8TnbhqGx27R90fWkmyOcAfhSgIx3lx0F4BEy/Dulriw0+bCyT2uinV6BOwDR2BD6IgS6n90KEu5ixFMBu5fZrzsU01/orz6G7P/H3YPqGhxxKYSWsB+hFgN8OwlJHyE2pPA5W3gT9pRCE0SvbNIWEIJrGVP6CugnCVfJFAOcfsC8nnesM5a/lvyPgEtfkvRmWdURu/Zr/8OnRZyLh39iM896xHx50r2dMrRe0fY2Ac1yTz30dcT5Bty6hmSCd4rxGwGXtcb7KXffjS1cd2Gbb1zFKayE7HqLVufTweX5eRE//4CnM+IlLvj8CfOYB+2KdUydFJh1v2TrEjCsdXoOzt9kgiFnVv6s3cSwRrXF9NGjHjkkYTjyIFphY5CR1axjRjyAY+C0sOi6/fvg//Sb7crz5+pkTA93SEX/htJGRkYK7nmZBVhV5AEUeyuVS/xOAKHX1J04jJhXCqnguRSKcnk0nb3bXCyAIRWArcuvXZH29gUMTu3Zso9QMkiA8gQIP5HKp59z1enpW7GVbbfcT8GHHb4+jyMtzudQTfv109yeWgEktxLrDeLNJEHXXv0zgd9OUZ1R6DMYOupYOvR823+HyVCv1ZZG6BaYX8SBawIg+BLGFYR2Zz6wZC1GRunsTF4JKoYXKs5lsWpbNDj/mqkvd/UNXgvlc50QaFCpy1+/qG1SehPJaKlNRdmIRjtr4QOoVZ1kfgigy49z8aPL7YWbr7kscAdCdANrLZbeSTYdns8MPewiif2gVMyuSqqylvGqxdfTo6Jp0WD9dvYkhIrrR0Y+qMlsEMVf6j9vMy8dGUyqFeeDT27uq3yb7bgC7OAuKBxGGXHP8LgTRHHYKlFJLECEhFWeDKiwTt+37ALx/etp2bJWt/G/J0sRBMZt+AeCg8v98PQA/gTVtTDDwiXwm+UAoQQR4Ae7++vqOf1cBsRECDpzWSbP1cmBgIL69sNctABI72vD3oNz9lLYRx3Cba21kdgiiAfWfWrvwfDQoD0Q8iBaYW4QgWsCIGoJ4lWw+IptN5UzV6+ob/B4BKkZf+bK/uyO+c2Jk5CfbKv8px6nXTn9pR5iwKm3oJmRiXJ4dTV4cShBAqiO++SRd6MtLECftXsTkPQB1BhFEd3dif8RpBIz3TJdjOiU/OvxTY+x6h04mKnkglWd2CGIW9NcQaZGZV+ZHU9PnasJw6OxN9FlE9wLYOQjrsHbk98ZDQAii8WwSWSIvQfBYDG1HZjK3bDFtrKt3aCURT+9eUjFonowtHRu7VR2oKz09vYOXMeGiyt8ErPvTWzpWPHr/Ta+b9qPKdfUPXkSMy4LISBdiYsLF+XTycpO+tPU1X7WayW0TW7QsYN3G031PT+IjsGgdA3uWf5wVgpgN/bv6Bg8lQJ0TKYXiGHgmjuJAJnPb701wVmU6O0/cm9qK651rEeJBmKLX2OWEIBrbPkbSeba5Mm7OjybVXn6jw2Gll9x7beaMraH6UAz/DrD+3UjIGYX4IwCWOAjCQ2g+BPGFfDqpzjOEPqYEYUKMYZ1pJsjZIoi669/dO3QOiJ1rOg9ZvO2Y0dG7XgvTu/L7wYd/cue3vrH9duemACEIU/Qau5wQRGPbx0g6zUE54y/tSgeaUMPrNvMRY6OpjCoTsu3USM6AQp4J1XSC92vTtL4n9QXjwY42Xm6aEsQHm9khiAhx/Qj6z/TmqtBfYdDdP/gTME6r2EMIotZXojHqC0E0hh1qkiIoF5Npw/39K/ctcFzt2nm37iUXgvBHMixvkrNmDbmYIi38mhKEe2IH4YZcOnm66biplBOCiIpYc5QXgmgOOwVKKQThhcd0glzoHoQQRAtMALOoghDELII7V03XgyDUoSeyWW1h3bcst2sNwpNdFc6ssfXW1XSClxCTHgFT/DwbBiTEVO+h3NTtCUE0tfmmhHcThEoglx1NfjqKalOZX/l+Rz6hGbt5dAuR1fRjKpPpBFczQbhSpQcdcvPrq6/Ps6W2mdYgPk+M7zp0k0Vq00G6AMoJQbSAkTXbXD1nGMLU7PLs5edHJ+LxZRtHbn2pUrend/CHTDhruq0qvzbDZFG/zxVBaIgxNLGfW37NAn/zEETv0JFErNKFlE6RV7PNdfHAiXu2Fwq/AOjgCjaySG0yyhu/jBBE49soVELNQbmnihYv27A+pTKqmjzU1Td4HQFnOF7w6/OZ5JnOrbKaLZF+KTmC+qycvFUnl58F6BFme92ithdvcx6AmyuC0O3hB9NncqPDPzABruTBTSW623GAcJZSbUSZdE3x05DbBMDLc5mUOvhm9MhBOSOYmrKQEERTmm2m0BqCKBLRKdn0sMrOGfroU2h46/f0DH2IrdI6xXRyOgYuzWeS6vIeozMX3d2J9yBWStfxDgcZXZnPJNW9EtOP6QTnp1yE+h5yRATPaOp8yN43AKxutqs8TeNBDAycvmh74fUUQEfvEN881UjpgiFPfq3SYJBUG6FvXuMXEIJofBuFSqjLxcTAryx78vBs9vbNIQ1Qd//gd8H4W0e5x+zJ2KHOU9Tqt/L1pbeCaLmj7GYQH5NLpzaECaom04nC3lcy+G8cZccZ1lH5zJqReSIIaBL7GeeY8klW1zQEoTDvqSFZYffSVX8J21bexoyMtkIQYW9Dc/wuBNEcdgqU0v8+CLqpI25/NuDQF3X3DX72zRw633am+w7andTZmzjMolKW1OnLhkxSQzuuspyZWpz5Ttg40X0FaQQPQItNlPp+xMfAye4kgs7Oyt6QwuIDLiGaiiB06b7fvLUvNN1558DKA6xC7GfOfFcOr1A8iBaYW4QgWsCIQRcGMZBn4jPH0qnHnap2dp68C7VNfsN9iY8qb09MHr1hw+0v66Dx8QJU0S1gvgA2bnFP9qqvWHzyUiYozyFu4n1EmeD1cmq25QaEPXzudtDeoT11YY66kIiuA7Cfpv+mIgglv8+FQX9kpv9z4H72nc4Lk5T+T/8Rh1hkfb98O6EHAvEgWmBikQuDWsOIGoKYBGA57jcogqAWrEfBtB2wPwzQYs01oEbhosW6C3l2QOm8BhQBfY0z8d/l06lr6jHBu9uohmB6+gePZ8ZNmqtY1Q12+alrVLVXbrq7bzqCmErb7Qk1TukVfuWoe7zJGkRrTC0QD6IFDKnZ5jpCoIcYUCm0nV/sAdrS72HRCQFXjc6oq0iifRtd+2YoYrAKCMcBXJDLJNXtctrF7WomeKccVdbX3XoXpp5ar1jDzAcB1FMu3IwEgdJVqju98j2eumu8cnlSmP5bAP4HgL7ol6YlrAH5vXEREIJoXNsYS+YhCMaDdqFthdVWWArwjwG8PaCxAjGuKRbaLhkb++mrxp2qG3ZKoRZSBKHWMPY3qesX8qqHB1AHgig10dmf+AAxqW2/01dw+uj2GhNdPNlh39g+TmtBOKSZCULJbnLftAOLx9iiwTae3OrK41VkpmPyo8P3mIwJKdO4CAhBNK5tjCXTEUQlI+nUWsPEWQCdSsAHyx7FOJh/Awu3xlG8OZ1eu8m4M01B9eUZX7SlMwbrTCa7F6ADHGGa1wB+mkF326Afb8gM/85kS2yVHsC0dLXWVxPlM89TFzE+B2ApgH3KX9Ul7Jhwsz1RuEGt1TRzsj4/uyudJop0HBinMuGjYOxRKlsKN/EGIly1/z64V61NaBI9Rj5sWMv4k7qzh4AQxOxhO2ctBxHEnAkhHS1YBNyH7Qh4CTYfls2mHlmwoLSI4kIQLWBIIYgWMGITq6BJV+K7BtPEai5I0YUgWsDsQhAtYMR5UqEUHitQCsCfA3gEzL+GjWtzudRzpiJ19w2qMNzqSvlqr6I17U/KzR0CQhBzh/Ws9SQEMWvQLoiG3XdCEOPy7GhS7YALfXSH7KLUD+1ACswrAkIQ8wp/fToXgqgPjgu1FfelSaWT8XHrf+dH1vx3ECZqc0L7Tq+sxtS22MrzKtl8RDabyi1UPFtJbyGIFrCmEEQLGHEeVejsSyy2QOsA7L5DDP6NzTjvHfvhQecp6srvKsGjVaRvE+E455kJAl3THn/hPGdm3nlUTbquEQEhiBoBbITqQhCNYIWmliHwFDUxfjl1ilw9rM7UqAOBe3s0JuTiKBxf67bppkayxYQXgmgBgwpBtIAR51mFqcVq62qAP1mNKGphejLOpz48knq+mvpSpzEREIJoTLtEkkoIIhJcUtgHgYinqCutbAXjMth8lTtJowDd/AgIQTS/DT13Uke58KYF1BcV6oxAKSdTxyt/DcKJmpPxBQB/IKY0GLdu377rv2/ceK1K1idPCyIgBNGCRhWVBAFBQBCoBwJCEPVAUdoQBAQBQaAFERCCaEGjikqCgCAgCNQDASGIeqAobQgCgoAg0IIICEG0oFFFJUFAEBAE6oGAEEQ9UJQ2BAFBQBBoQQSEIFrQqKKSICAICAL1QEAIoh4oShuCgCAgCLQgAkIQLWhUUUkQEAQEgXogIARRDxSlDUFAEBAEWhABIYgWNKqoJAgIAoJAPRAQgqgHitKGICAICAItiIAQRAsaVVQSBAQBQaAeCAhB1ANFaUMQEAQEgRZE4P8DLfZkEZ5avf0AAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-24"><g><path d="M 31.61 171 L 70.39 171 C 87.29 171 101 184.43 101 201 C 101 217.57 87.29 231 70.39 231 L 31.61 231 C 14.71 231 1 217.57 1 201 C 1 184.43 14.71 171 31.61 171 Z" fill="#0cf232" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 201px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Kernel resumes system</div></div></div></foreignObject><image x="2" y="187" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tnX18XFWZx3/PnUkTRGlZVgpYQNQPu6Cia6GZZJIYF6iAQEthJuGlUD+sgMqCb/iCrKLAIotvdAsIyktBoZmhpXUriK01NpNJUiyLCMgigkrlpSxbCkKaZOY+y5nMpDd3zr1z72Rmkpl57n/JnHPuOd/z3PM7r88hyCMEhIAQEAJCQEOAhIoQEAJCQAgIAR0BEQixCyEgBISAENASEIEQwxACQkAICAERCLEBISAEhIAQ8E5ARhDeWUlIISAEhEBdERCBqKvqlsIKASEgBLwTEIHwzkpCCgEhIATqioAIRF1VtxRWCAgBIeCdwLQKRKg9+kViXGPJ7h+DlGrv61vzvPciTA7Z2h49lRl3AtjD+gsT7htr5DO2bozvLDZtiVc6Au3tS/ZPcbAPwLtzqTJw8mAi9l+le4ukJASEwFQI1JRAOIkDgNhoE58n4jAVUyltXBGI0vKU1IRAOQjUjECEwl1LiPh2AG+zgRJxKIflTDFNEYgpApToQqACBGpCIFrbIx9jJjWttLeIQwWspgSvEIEoAURJQgiUmUDVC0SoLXoMAT8BsO9kVnRnY9D8VG9v/G9lZijJF0FABKIIaBJFCFSYQFULREtHdytM814RhwpbTQleJwJRAoiShBAoM4GqFYiW9sgCMKkdLzJyKLORlCN5EYhyUJU0hUBpCVSlQIg4lNYIpiM1EYjpoC7vFAL+CFSdQITD3UeaZK4DcIC1qMT4EZt80cBAfNgfAgk9HQREIKaDurxTCPgjUFUC0dLR/U8w+R6A31UucejsjLx1JGUcD5gfB+hDAOZm35UC8Gdi6jMN/tGB+/FgPB5Pe8EdaoueRMBPc2GZ8KXBvth/tLRE9oCBcwH6PAjvBJAC+CmGsTpt0E0Pbl71rIrjFF/9FolEAs++QCFifBpAB4D9AAQAKKH8E4PWWNPykl97mMw7ng8cSWQuA+NYAAcDCGbDvUjAIyDcMSvAa71uCii3QGTqcYzWgfDP2XxOHMJsbo8cbmQOaNLRmQOVhJfBvIVBN44Nz/n51q03jzlxKod92N/V3Hz63EBDuovBXSD6BzD2sdjgcwCGAIo3Bs37vfBubovMN0AbLLv8dpjgY4cS8a1e7MFPfPvhV+vhx6y9nwJStkrvt2xJf/FN293EhOudvqsFC07Zx5gVPIcYZ4HoH7MHYTPfJANr04ZxXe578VIma5hy2Lc9D/OPicxuHKGTwDibgSMs7YpqQ14g4HFm7kmPpdZu2XLvy37LUK7wVSMQoY6uw2DyWgIOLYc4ZIw3iAvB9G+asxQa/vwEGfh0cnP8VwDYrYJ0DTwHUncZqcA9ADXr4/Ijo8Hg0Vt77/5fJ4FY0NZ1qAFeSUCogIGkiegeTpmfGhiI/58PY6LWjshH2MT1QOajLPS8BuIrkMKKQiO56RAIpPnDCNBiANfaT9pPFIzpgoH+npvsBS2nfeTedVRnZL+GtPF9Zj7VIsBuzD3x9tPA617mJ76DQKxvaev6GMC3Ani7W4EI2DAW5LMf7I2/sLsDZHyKmK8q8F2mCPjGyPCca9wE3vbustl37j2ZDkWargDjfEebm5ypFDFuTKcaLhsa+smrhT64cv9eFQLR0hJ5DwKkppUOnwyEbh4dnn2hD4PQ8gx1dr+TUulVzo21YzWkQfje6BtzLnXLg72BB+hbBJ7PyPTGtY8y9mQidrn6UScQBvgxh7Mfjpll4LeGmVqUTK75cyHDUg0iBYwrGXxxdkRSKIrldx4yg+nThnrXbHOKNB0CQcByBr7l8qE+kzb46C2b489Y811u+1DvcrZxL9i5NxXE6blG1R7DTwNfcoFgXgTCewikXOrkRp2uhVIiMdLEEezY+43GPXZex+DzPNpgmoHvDyZilxTqtJXbvrN1+ncIUA+AY7zUojUMA0/CoMWDm3t+7zduKcPPeIFobV1yMBvB+8olDs7pIw3Gs0wYJNCrDN6LgDYA8/IrwF2o8gQiM6VhnTbgrYDxW4BV70oZ0zCZdHQy2fOoTiCI8FPmzKjBuoNrGwMJlddMOoQOyzsmsqzWamY1bP9kb2+vGp5rn/nzz2uYtcfOFRj/MCc/hJeJ8RCDVCMaYPCRBLxX8/E/TmbqBCcxmgaBGAUwYumFbmdgE4FeB8xQZoREdONAX89F1salEvaRmX7YRXFNh+E1gH8HGBk7cLNBBm4bG55zvq6jMp0CAeBuAGrUlvONpqZRx+2duBFAGIxD7AJAwJeZ2ATT1ZbfXgOwEaCXLN+K3XPCq2TycclkfGA67dvlGxoG8xMg4yEAaTcGAPeONmHxdLoImtEC4fBxpgl088jw7IunOnJw+DDTzFhrBvgSe08SaojRfvr7DU7dAtCRFgNMM/iLg4n4d3VGmT+CyIWip8lEdzLZ82DuP8qwmppePrSh4eX/yTXizvEzc1uDTIHzh/ru/p21YVPzqtuep3MAfN82NN9uFR9NfqmlPfIF24epkn7CZFx00AHYZF97GZ8fbvgmITOMVusfueeBxiCfppsnnwaByOVJ2c91nDYvs06DHdXRfSCM9Ji1F14p+2gJR84FkZrWyrEbJsLFI2/MuV1j49TS0f1BmOYdAN5nYe3YME6zQExwZ+Amc3Tsa7Y5dmppjy4EQ5XH2uFR60BGlskOMF847wD0WG0vu6bxZRB9dZLdEa2wC72FU0XsO9TW3UkwVcc2J4xqdHN1U5Cv0X0PCzoihwRNusnWSVBTw2cl+3pWlXJU4CetGSsQShxMI7iOgA/YCvTHANILE4nVT/spqLbhzvcmmwbzZfMOwLVuC9DZhcobAF5qSfcvSPPRAwPxp+zvcmjgdzCM4wcTq4YKlcNJILzs3NI5MMwtkuveq2lMVLCNSHNXgfULCrVHziGmG6wfBRGWJftiP7a/a7oEwq2nnVdvFbCPzs5lTSOp1+MAnWh5/4UDidj1bnahpr2QMh+wrskxsHwwEVNTgpOeGSAQqqf8lYG++Ledpn5a2qJqk8UKTZm3g/ikgb74Fh0P1aFq2OOVmwj4+O7fd6/feWBRFvsOtUWvI0CNRscfd9HKBFEdklm7sBagTktZ1jcG94z09t6+q1A7UY7fZ6RAjAYQcF3AZV4HE6cXWgh1A6Z2ihgN6Y2Te2He1zSyveb11gVi67qB9d36Bp7uagy+eI7bVE8uDV388fWEsYXJ5L3b3cqZ6WUFsBqg43PhmPHjwf7Y2ZqPlVrau5ar3polTdepItu7KdQWVSOJyywGnhxtwgn2YfI0CUTB6YdcvitlH+o91JDebGnoPe8wym9UeSiAhuMTibt2WOtl+gWC70cap7p9r21tp74rhUAvAQda8q5G8xcO9sd+4CqW4+52fgZgVjbc82zQ0Zr5+4rY9xELl+751jdG7rWOBry6sm9pixwHZNZbM2Vh4Nkg0p2l6BAXIyAzTiCYKEpsfmeyiuYVrWCPpBCM1vaubmZWPdvcsN5xBOCUVn4a+p6LtoFnOmuwv0f5kCr4aONnt8oWjAygNRy9igmXToRlbGps4EX2oa4a5gZM+iWQmRNWT9ppBOD0Xk0aowx8bDARU2I88UyPQOgbUF1ZKmUfGg6vm8zHDfXHE4XqtrW16yg2WDVErwN4yAAe5jQvtzfE0ywQnqZJNNuSAcJTSHHnwED8r64C0dF1GJms7Hb/bDityFbKvnVlYY/fe3PnknlGKrABILU+8zCDHm8g87q+vvhLheyhHL/PNIHYAfCLti2Vap/wagBq2GWdo/Q8RaMBp3q6t1iHpWrqYTARO7fQ7gdrWppej/bj1jTwr5BJC61rDwV6SJPOUQDw3IiodEPhrjOJMmI4/jgIRCgcOZmI1kyIpscP1Jr3zs7O4Ehq37vUEY3c/4nx78n+mJonnlaBcBk52fFXzD7C4ZPfZlKTchnz4d2ZcN+V5LchmGaBcOrNTyqGzm4AxBuD288oNMrWiKxWICpo3xQKR+8gwlm7P7mZsSvJr+3MNIGw5z8zUpi3H7677QV8Ln/hFI+awdTxbtspdUDa2s7YO42x+63bWpnwucG+2Pf8ANR+3Jp99BqB8HVznib+M2Yw1eG13HnxHQTCPtJQ2w3/9pbGUx75xZ2qh+r5CbVHLyWG2reeU6S8edTpGEG4rb1MFv7K2kdrW/RyBr5uA/wKCKsMk1bu2jV761Q2ZEyvQHgftbW0R28HQ22syPZj9GsqdkP0KhCVtG/NCFRle5iBdQRe2Rjcs3e61hU8f8hq6cRP4FKH1Vw5an3FpAXj8YVhugfARyfnw/u6QS6ew3znAwAVPB9gfTeDG2g8PxNuP3S95fwG3vtHo96Xdw4CeJLHAh1DQ3erE6gFHy8Coe/B8R8AQx0E9PnwBwEssAhE3tz4tAiEx2F+pe0jewZCTZEc5AB6GISHYdJKbqAHBntXKTt1PZxpTWdaBcKhM6IrZ55AeJxG9SIQlbZv3RqlrcwpBh4jRg8HaO2Bc80nvXpm8PkxTin4TBUI7W6icPjU95oU+IXND5PveXKHnTpTAjkRmbByoC+2zJqYlwba7eWViK+dAy4NEZVK3ohpWgTC453XlbaPTCfA+UZEXS1sB2ENs3H7gfunf1OoYZlWgfA4TaQKWV6ByHO9Ujrr1ti3StzJb5zDi9UZj/Vk0u0jI7N/NZURYykLNhMFwnWraWt79CxmqKtFrfvtt8MwjhvYvOq/vcCpdANQiQZ+qgIjArGbYKXtI/dm5SOKmNTaWCHXKdbqfoWA76THGpY7uWaYVoHQdJicbLXWBEKVU52vCaTN7xFlDgta2yy3T1b5Ubs1SKmr+vrWPO+lTStXmJkmEGkCrhwZnnOVk4Kqfc+Nb9l5JzN3WaHkjud7OXVY6QZABEJGEG4jTNvHTc3tkcOI6QsEnObNL1gmhUfZoKjONYMIBFDpDpC9wc4IhWleTIA6O2W7w8axeX+ODF6a3BzfVC4BKJTuTBMIT4u3ukNCqqAMXDmYiH2t0PzsVD+YQlDtv1erQHhd1PXLQ4Wvsikmz2cTimHhFCfjZfRF41BK82ImdDm4NLFG127amKq9+4mft644g0cQ5bRvFzugUGf3wTTGH4XB54Ch1utyp6110XzNjpTS/lRaVSkQKuO6E8IZH0aEpcm+mNoW6/i0tkY+CIM2MPD32UBpZjppsL/n/lIDVulVg0DoDveok9rJ/tgnysFkJgtEpe3DK9/MqWtz14fA5lIwljj0RPNOYftp4HV58RN/pgpEpe3ba53mXOukKbCUGF1Zt/+To3s4he31fX7DVa1AuDjDKnjyV3N6FeXsTVSDQGRENxz9IRP+ZcKIfOxA8Wt4M1kgKm0fftmp8BlfW89hGYj+09oD1W1NVq7yvRwkc8pHKNx1PBGrsxq5OXTHEdVMFYhK23cxdao67Dq/VMqzq59di0W+WxutagVClWb81GFQ9fqtTsuAAq449P5vCrsDsBPMNiTKH87eBPyPCfzeMPm2ZDL+sDVstQhES7jrfBBb3RoUcuynM6qcOwN1UG4bQA8zmxuaGl5abT3wNJMFopL2EWrrbiZOn0BELQwcnj2wqe4k8fKoA31XE/AlS2AvO8b8HdRsj36WGFZHlFUpEJWy74wgs3kimFqUiyUGkk3B7UsLHfjL1WGoPbKMmG6z1Om0THGq91e1QGREIhw51qCM7xLrPF5BVxwa52CvGmyc2N+/qs/Ll5npkeTvqNK6lagWgWht7XofGxmXBROLaF7XdXLMdHv6dU7kZrJAqLJUyj7yetw+R21eDmFqWTOdOtjfo07Nuz4OzgSrUiAqZd9TPbHtZ0qvUP1N9feqFwglcvkO4jJYXL1A6s5U+NoJpR+9PJQeHVtovzKwWgQi6z75bhAtshiWK0erAarDSKOpucsZ/EnL/4cZxgmDiVW91rAzXSAqZR/N4UibQfTzN2+52zPLx1dHRXPY9NcG7zqpv/+nal995tF5DvDiXVTFDYe7200y1wPYy0uPdiZPMVXKvjUHLX2d1coTGMCX54SpioI1fi0IhIOb3MyuJjePp2q+77tgfGYyULqzMWh+yu2u38xtVAYtnzRfDzh6nqwWgXAakXm83YpC4UiUKDM03j2ac5jum+kCkZ0PLrt9ZF083wdQq8UOC66jqbC63Xy6k/zZTtQk32OFOlCZUZTjTY6oyhFEpex7/NT23JUAn+G3o6V3+e3NJ1UphSGXVk0IRMaY2yMLwKQW0mx7jJ1dcTiuYYB/wwhcOJhYpXzQT3Jp4HSYyW30UU0C4TAKUIjVpS2XwMRded5Cm8/cKxAcu5IJauRgvVbScfRRBQLhvMZVYvvQH/50vqBJNfjjd4UbPwT4XZaG4TmD0wv7+1c/Zm8sNL1SlczTZJifsN+rnrljoWlnlIiVbzLdHdJVKxCVsm+HkddLzPTZsV2zY9qb/9ojhxuMlbbLyIZN5kVD/fEN5RCAQmnWjECM9/h0N6FhlJnPHuyPq7th8x5nYcms0KirQTdnrjhU1yOa+LB2GxroaRjm4oHNcXWrW95TTQKhMu9yBab6edwvEFO2rOYHAJqvuXJ0mIk/P9gXv1HHpBoEwr3jUTr7cJj6GMc2+YpXvOno7RAmfEhznazrupvrQTHGn2Dg12AayaQ/fpJ74ipP1fkBYZgZJ2frsmoFolL27TwCzRCcdJVs9vpUNYKcm/+t+Pc1V6jR9/N7LQmEW8PmetikpaP7n2DyPbbemEeOShzoNDc3H9UmELmPaNYuuhlA1CMIazDlKuCSgURM3S6ndSpXLQKREYky24d6x1Gdkf2CKXV/s/U2Mc/kU+qyqpHhOde4+fAprhzjrseDafqWxdNqVQtEJexbvcPh5kmvlaquKL2jKcgXuU13e02s2HA1JRC7P2ZTLfpNmmoqtADd3HzmXtQwdgUB6mCY28nGHOsUMW5Mpxouc/KBkwtYjQKh8p6911oJxLUA3uHFyMbvyOZzh/rij7uFryaBUOUop33kOKmRBAfo69mrKr3YoIr6KBl8sX2ayIm9D99AytvoDU1B/qpqoGx+kqpeIMpt3zn+2W/ogjdH2Fe+eT/8HC/fEAB1QdIl8/bnWCFHjB7TKzpYzQmEIhEKRy8gytxvO9k5FvNXBvrj17i54shMr4zQSWCcrfalA9jPks6Lb95D/Ucwbk2Ppdbadys51UK1CkSuPGpOOti0ozkA41wmMwzQPIuIquHyXxi03gTduiXR84dCrk5UutUmEBMsymAfdrvJ9jyPB1idJWnOei/Ore1keAPGRhjmLfPm4vEiGpGMvycD9K9gHAvg4OwUoRr5/YlBa9IG3fTg5lXPTojX5LsaakIgymnf9jrN+JBr3PkREE7XfEPDAG8jNvrTMG9J7dp7SLy5Fq1pElEICAEhIAQqQWBaRxCVKKC8QwgIASEgBIojIAJRHDeJJQSEgBCoeQIiEDVfxVJAISAEhEBxBEQgiuMmsYSAEBACNU9ABKLmq1gKKASEgBAojoAIRHHcJJYQEAJCoOYJiEDUfBVLAYWAEBACxREQgSiOm8QSAkJACNQ8ARGImq9iKaAQEAJCoDgCIhDFcZNYQkAICIGaJyACUfNVLAUUAkJACBRHQASiOG4SSwgIASFQ8wREIGq+iqWAQkAICIHiCIhAFMdNYgkBISAEap6ACETNV7EUUAgIASFQHAERiOK4SSwhIASEQM0TEIGo+SqWAgoBISAEiiMgAlEcN4klBISAEKh5AiIQNV/FUkAhIASEQHEERCCK4yaxhIAQEAI1T0AEouarWAooBISAECiOgAhEcdwkVo0TOKqj+8Agm5+jNK9MJuMP13hxpXhCQEtABEIMQwhYCDQ3n7mXMWv0M2D6AoCUCT52KBHfKpCEQD0SEIGox1qXMmsJtLaesi8bDb8CcHg2wA4RCDGWeiYgAlHPtS9ln0SgvX3J/ikO9gF4twiEGIcQAEQgxAqEQJaACISYghCYTEAEQixCCIhAiA0IAVmkFhsQAm4EZAQh9iEEZAQhNiAEtAREIMQwhIAIhNgAgPnHRGY3jtBJYJzNwBEA5mbBpAG8QMDjzNyTHkut3bLl3pedoC3oiBwSMOmXAA7JhWHClwb7Yv/hB3RLS2QPBLAaoOMn0mE6a7C/5ye6dEqV/+a2yHwDtAHA3l7yy8DJg4nYf7mFjUQigWefDxxJZC4D41gABwMIZuO8SMAjINwxK8Bre3vjf/Py3s7OyFtHxmgdCP+cDf/HIKXa+/rWPK/+VuI2xg2fJpinAvSe7PtSAD/FMFanDbrpwc2rntW8i5rbI4cZoH+FieNAOBBAAMBrAJ5k4hVNAdzjNZ9eyiJhqoeArEFUT12VJKeZhiZNV4BxPoA9PCSaIsaN6VTDZUNDP3lV18CE2qK3EPDxid8YmxobeJGfRkXTUD+TNvjoLZvjz1jfWer8l1ggqLUj8hE2cT1A/+iB7WsgvgIprBgYiA+7hXcSiFQq8AoM4xsg/qxFhHRJpQj4xsjwnGu2br15TAVoaYn8HQWNG5j5tKwoOGXhOTJ4aXJzfJOHMkmQGiIgAlFDlVmoKKpBQIB6ABxTKKz9dwaehEGLBzf3/N7+W0tb5DiA1gGYlf3N9/mBUHv0i8S4ZmL0ANw2mIidC4Bz/ytH/kslEGoERAHjSgZfXKCx1aDnITOYPm2od802p3rRCQTS6RM5GPgOMU7wWJ9pEH9loC/+7dbWJQeZRnAdAR/wGPc1BpYMJmIbPYaXYDVAQASiBirRSxHmzz+vYdYeO1cAfJ4t/DCYnwAZDwFQDUgjgDA4M2WkphosD/eONmHx1o3xndb/NjefPtdoSKuG4325/6veajIRu9xT3o6JzJ61C/cB1JoNPwrwooFE/Oe5+OXKf6iz+52U4s+o0RQzv4UIJwPYy5KP9YDxvxPlMOgHA5tX/be1XC55UxvJXybGQwxSI6EAg48k4L2a3v7jZKZOSCbX/FnHTCMQfwX4EeuUXHZaaCNALwH8dhA6wNjHlt4OkBEhNr/EyEx/jT+MP8HAr8E0ApgfAGh+fh45OdqEE+z176WOJUx1EhCBqM56853rUFt3J8G8zzKtlGbg6qYgX6ObClJrC0GTbprUiABpIjor2dezyp6BUFv0OgIu2v1/741Ja2ukhQ1SYpBrmB81xwLHDA3d/WIuvXLnX72nyEVqammPfAFMV08WVH7CZFx00AHYFI/H1brOxLNgwSn7GLMavknITPNZRfiBxiCfpqsPjUBYk9xBhItG3pjTk5s+Uj9m1nUMfBlEX7W9Z3T3aI9/YxLOGeqLP25NMOOLykzfAVCn5f95wu3bECVCVREQgaiq6io+s3kNONGKgb4e1aBPTOHYU1cLwbN2Ye3kRoLXNwb3jPT23r7LGl7TyL9KJh+XTMYHCuW6tS16OQNfz4Ujxr8n+2OqUZt4yp3/YgXCYYpqI9LcNTAQ/z+XslOoPXIOMd1gFW0iLEv2xX5sj+ciENtBfNJAX3yL7l26xf8JzsCGkSaOOI0I1OgKKfMBAg7NxWFg+WAipqbR5KkDAiIQdVDJRyxcuudb3xi51zoa8LIbJ9MLta0vMPBsEOnORGL101Z0WTGxThOpY/oFp5na2s7YO42x+wFqzqaXJyyVyH+RAkEt7V3LwXyhhYXrVJHN3CjUFlUjict2/18/8nIRiAsHErHr3cw4FO46k4jtovOcwemF/f2rH3ONmz8y1HYQ6uAzqssiikDUQbXrGhd22UJqRdLcuWSekQpsAEitTTzMoMcbyLyury/+kh2dfaEZKDzNFGqLHkPAzyxTHnnz3JXKv98pJs0W37TTCMDJzDRpjDLwMftisINAaHd62d+lG+WwZhOAdgQS7jofxD+Y+K2IHWp18InVbBFFIGq2aicVjELh6B1EOMsyVeC4K6lYJK2tXe9jg9WZiH2zabxuMh831B9POKWZv3YBXY+4Ivn3KxChcORkIlozMb9PeAop7hwYiP/VK8POzs7gSGrfuwBEcnF0U2x6gdBP99nf3dZ26rtSCPQSMmccMo/XDkKoLXoSAT8VgfBao7UVTgSiturTsTSt7V3dzJlpBuui6DAD6wi8sjG4Z699XcEvms7OZU0jqdfjAJ3o1tjlftPsftpOJh2dTPY8an93JfLvVyBaw9GrmHDpRFmBDX97S+Mpj/ziztf9sAu1Ry8lxlW74+Q3/DqB0AmJ7r2acmlHKbq4IhB+arL2wopA1F6dakuU3TmznoCQQ5FTDDxGjB4O0NoD55pP2nffeEGVP9/NQwE0HJ9I3LXDHj///IRzj7gS+fcjELqeP8B/AAx1n4TPhz8IYIFFIPKYaafZPJ5Y91Mue8ZFIHxWZY0FF4GosQp1K0443H2kSaY60HaAh2IrVwvryaTbR0Zm/8q6fdItrmZO3WmaKW+Bt9C0R7nz76chLbDt1ANe1yCT3GiokFNZh/FTLhGIqVZdbcUXgait+ixYGrW/PZA2v0eExT5O/Co3ELcGKXVVzveP04vGe9ZzVwJ8Ri6MbmtkS0vkHQhSLxjKb5B6PC24ljP/fhrSGSEQHvxCKbB+yiUCUfATqqsAIhB1Vd27C5tpaE3zYgKWWhaVC9Hw5JNHM3X0yGgwePTW3rsnTiTbF3i97qrJZbAc+ffTkIpAeHMyWMig5PeZTUAEYmbXTyVyR6HO7oNpjD8Kg88BQ82Huznx2w7DOM7ubsKaUc3i86RFUc0oYyondEuW/6kKRDFebL1WsHaKSUYQXvFJuCIJiEAUCa5Woym/Qk1NLx+apsBSYnSB8M68sno4he128lmzTpHnWqNYvlPJvx+B0B3eI8aPkv2xTxSbd7d4IhDloCppFiIgAlGIUH3/Ti3t0YVg3GGdhlKeXXks0GH1lWTH1ByOtBmU8a+05/hvPDHNZN+y6nW7ZhFV4Sv/fgRC5aU1HP0hE/5lIl9lPEQmAlFE7UuUKRMQgZgywpmfQKij6zBi80Qwtai7ghi5rY7OAAAFx0lEQVRINgW3L+3t7U15yX2oPbKMmG6zhC3ozlvjeiM3zfRL2/0RBX02VSr/fgWixX7KGHA8x+HCObebSx2U2wbQw8zmhqaGl1Zb60cEwoulSphSExCBKDXRGZjeVE/8alw1FBQIhcHuekONFNjkGybvXvLgjmOKJ5a95t+vQGhOjivPh1cOJmJfc3OCaDWRlpbIexDI3Mh3UO7/ul1fIhAz8MOqgyyJQNRBJWtcLfjyGZQnMMAzZjDV4XbBTWYKxu56g7EJxCvevG1NuQvPXS5U0NlcpfLvVyCy7rTvBtEiixm5ele1mptarB9NzV3O4E9a/j/MME4YTKzqnRw278pRJUYFrz9Vafgtl/W9clCuDhoIt+FtfRe/PkqvO5sAwFNDpnf5jXhjcPsZhaaoNK6mnyfCJmacmSXvaUqmUvnXNKQFfUk1hyPHGpS5TW9i55fb7XsWi6NQOBIlykzd7d41xrwOJk63X0EqI4j6+FZnWillBDHTaqRM+QmHu9tNMtdbLuVRb3qJmT47tmt2THdSurk9crjBWAnQkdYersm8aKg/vsFLVh1cTWejenM2pwJXIv/zO0//+1mp1C8BOiJXNrUziU2+yOnOaIdRgIq+A8yXwMRd9rjNzWfuFQiOXckENXIIehl9iEB4sTYJU2oCIhClJjpz01M7er4Lhrpe0/68BvDvACPrJI/frmaIAMzND0o3jw7PvnAKrjcmkizkWsP27rLnX+dsMJuH18av8VQ3iJqXJxPxO615U6Osxl0Ut92+lwsyDMLDYPrd+D+crvPEMBN/frAvfqPOhEQgZu6HVcs5E4Go5dq1lS3TyKSMGwBWp6f9PuqK0juagnyR7kpMp8QcpodUcE+uNazpViL/oXD0AiKscHJD4rQld3wqjm4GEPULFoByZXLJQCKmbpfT3vAnAlEEVYkyZQIiEFNGWF0JRCKRwLbn6YI3pzauBDDHY+7V/QaXzNufY8V5eLXdmzDeCt42mIid63W3Ty6f5c6/BxFyXH/J5k0JxLUA3uGFLQODTHyu/U5oe1wRCC80JUypCYhAlJpolaSnThw3Nu78CAinM5lhgOZZFkuHAd5GbPSnYd6S2rX3kNcpJV3xda43AF40kIirg3RFPeXMv2ro//oCLWbmzwH0fgBv251JZ/fluTAqb8GmHc0BGOdq2KrpvL8waL0JunVLoucPXkRSBKIoM5FIUyQgAjFFgBK9MAGNQJTMtUbht0sIISAEiiUgAlEsOYnnmYDdu2sZXWt4zpMEFAJCoDABEYjCjCTEFAhoFqkLutaYwuskqhAQAiUkIAJRQpiSVD6BfHcUfD/SONXpXIEwFAJCYOYQEIGYOXVRczlRi7Wz9ti5AuDzsoVLE9FZyb4e5WpDHiEgBGY4ARGIGV5B1ZK95s4l8wKjPJpM3ps5UNbSEnk3G/Qt69WmDPzWMMcWJpP3bq+Wckk+hUA9ExCBqOfaL2HZQ+GuJUS82iVJXw4CS5g1SUoICIEiCYhAFAlOok0mEGqLHkPAzyxeWm2I/LnoEL5CQAhMPwERiOmvg5rIgebOBWu5YqNNfN7WjfGdNVFYKYQQqBMCIhB1UtHlLmZn5+I5I6nGb4N4MRj7AEgBvJWIrn3Hfry2GBcd5c6zpC8EhIA7AREIsRAhIASEgBDQEhCBEMMQAkJACAgBEQixASEgBISAEPBOQEYQ3llJSCEgBIRAXREQgair6pbCCgEhIAS8ExCB8M5KQgoBISAE6oqACERdVbcUVggIASHgnYAIhHdWElIICAEhUFcERCDqqrqlsEJACAgB7wREILyzkpBCQAgIgboiIAJRV9UthRUCQkAIeCcgAuGdlYQUAkJACNQVARGIuqpuKawQEAJCwDsBEQjvrCSkEBACQqCuCIhA1FV1S2GFgBAQAt4JiEB4ZyUhhYAQEAJ1RUAEoq6qWworBISAEPBOQATCOysJKQSEgBCoKwL/D5yktjRWSi00AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-26"><g><path d="M 51 301 L 51 237.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 232.12 L 54.5 239.12 L 51 237.37 L 47.5 239.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-25"><g><rect x="1" y="301" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 351px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">uPEP driver removes OS_HINT</div></div></div></foreignObject><image x="2" y="330" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tnXucW2Wd/z/fk2SmLAp0UcpVRPfnT/GGW+hkkkkdLWCLlEshmRYolJ+7XLSLq7uIIquooKvu6tIfF0WUq9BJpLSIIBRxnMltytZFRURXQAUpFFkExGlncs53fTLJzJnkOck5uUwymW/+6auT5/J93t8n53Oe2/chyEcICAEhIASEgIYACRUhIASEgBAQAjoCIhDSL4SAEBACQkBLQARCOoYQEAJCQAiIQEgfEAJCQAgIAfcEZAThnpWkFAJCQAjMKwIiEPPK3dJYISAEhIB7AiIQ7llJSiEgBITAvCIgAjGv3C2NFQJCQAi4JyAC4Z5VU1MG+2IrCbizjkrGADwD8A4w7rcId77uADyUSCRMN2X29EUXG6CtABa6SV9TGsKNmZH4Ol3eVre/pvbUkSkYiX2MGF+cKqICmzqqyWedzbrqtVXytxcBEYg28UcDHpC6lvwewIUHH8DxakLRAQJRV/tnuxvM5kN7NuuabY5SX3MJiEA0l6/r0pskEMX64+ML+Jzt9ydedDKoQwXCdftdO6pBCWfzoT2bdTUIjxTTJgREINrEEU0WCAB07fjY3uu3b792QtfkDheIqu2f7W4wmw/t2axrtjlKfc0lIALRXL6uS9cJBAMnZJPx77opZPHicwJdXS/sR34jwmx9GqA3l+QzibAuPRK/xa1AeKnfjY2V0rS6/fXa7zX/bD60Z7MurxwkfXsTEIFoE//U+4C0N6O3N7oHGbSBCX9n/zsDPzGsiWPT6Tt2ljZbN4KYSwJRb/tnuxvIQ3u2iUt9tRAQgaiFWhPyNFIglHmLj47u3b2LEgwcYzPXZOZV2VSibLdUJwlELe1vgksrFikCMdvEpb5aCIhA1EKtCXkaLRDKxN6+6HKAtgDoKprMwIZsMv7hTh5BFNvmpf1NcKkIxGxDlfoaTkAEouFIayuwGQKxuH/Na7pyuR8A9I4pqxgPdAf4xKGhxJ/slnbaCCI/ivDQ/tq8VnsuGUHUzk5yzh4BEYgKrMt+xA4PV6civORvhkD090dftXuCtoDw3mkbedSHwIpk8tYXOl0gvLS/ET+5o5auPsTP5mpYWAPKbxLYA0COgZ8DfBNPdF03Ovrtl1RdXgSiNG1xbain5/S9KDBxMQFnA9gPwBiYH2XCLdZ47sZt2+543k1doVC0lw36PoC9ChxesZiXj6YSSS9c+vpOeUMOviECDinkG2fg/dlk/P5K5eT9lDNWANbZAP0tgEWF9DkAvyWmEcvg6w7Zn7PVzvMU64lEVh2QY/8IgDfm/zb9230ltDQWYQufB9ADwA/gWQAPMOEqL3V4YTNX04pAzDuBwGN+ykVGRjbtmJ8CoW9/PT9g9aD2+ScuY8L5hQeOU3HPAfT/MsnB7wUjsQvdnqTWCQQZxlOw+Dt/EZ436Cvju7r9e0aHhm7YVU2M1HpV1y7cDVCoWBYxPp9OxT/phUswPHA6Edt2yXF6fAGOczp/ozZTwI/1YPoXAK+uXhc/SgY+lB5O/DD/yK/w0QmEOTER83UHPg7GRwD4NNldCVp1OzsnhQjEfBOIeTTFpB1BeBwFVvupB5cOvIUsjgN4W7W0he/HmPlsg+j1DPzrVJ5KYUhKwnIQ8GEGzgVwuEOdJhGdkR4Z3OhmBKHS9PbFPgTgyuny9CNNpzb2969bsDv3SgKg46dEBvhMOhm/VJcn2L/69ZQzNwKk3uK9fEwQvjr+530udjrToworEwjwEMNIEfjjDuKgNKeioHkxslPSikB0sED09kYPgp+GwPgbWzMT3f6dpw0NDanh+9SnE9cgvLS/lh90KLTqUDb8d2se1CbAjwJGVpXL4CMJeKttdPECGGkQ3l+LQIDwPBj7FgUHQAqgxwujiTCAx6wJ39Gjo7epqRNX01mh0MDb2OAfFKaqVDZPb9MFoVT5DyjY9YIFPmY0mdheyrYiN8aTTMgS6CUG70VAH4CDy/1T+eBnuUDgZQDdUxs2GL+BgR/ly7XwbhBeD2B9Jhm/qpa+0Kl5RCA6WCBCkYHVzPkh/9RwmgkXZUfiXyptdicKhJf2e/2Bq4OJ3X/14s3MPGDPS8DWnMHnbhtOPGH/+5IlJ+9rdAU+S5Nv/uXTGx5GENPl8ij7fauzQxt/U/ybGjWNmXjd6EjikeLfqk0xqXS6EYDTjjcdq97wwLkg/prNtntg4pRMJqGCSE59nLdfY7Pl4wtLuakj8D2RNW83OPdNgI60FWUy+GPZZOIrOns0AlFMNkbAxw86gK+yrWdQX98ph5mm8cdMJvE/XvtCJ6cXgehQgQiFTt7PMgL3EfBOWxNfIouXp9OJTKcLhNf2e/2R94SjxxiU30KsFqLzH2JcxxZfUPpQtJVNvZHoP4PpC2Ui4VEgGPgV/Mb77OLg1AY3AqHylgsq/3Tc71+2fei2P1Tik19L8OF2gFbY0mnfxstsAUwwX3LwgfhypQXowkL21QCvtdXxO5i8LJNJ/LrUPkeBYP5EJpVQUXQrrmF47Q+dml4EogMFIr+bxjJvAqh/RvOYt6gdNroHWLNjMVU7ld3IXVy1tN/LD7y/v9+/O7foRoBPK+ZjIGuNTxxf3DnkVJ4aeQT2+OPXCzuPppN5FAiqML9fWrdbgViyNHqYzyI1TXRYoQxX00yl01MMPOmH2Z9M3v643ZaenjWLjICpdjTZ1msqTxXZ8xdGYXcREJwSZQcODgLxhGnwMs0oxYv751VaEYjOEAha3L9m30DOPIIIZ4ARs7/ZFpr4AsNYkU1uHNU1eY4LRN3t9/Kr12znnLEoXK0szXw/4E0gPG1DdSsQahAU7It90y5ebqaZyhe46dZu/7Nnla5zaab8HEcATgzdjnIcBEK7/lbNX/P5exGINhaIBnZME8SfyIwk/s1paN2OAjGb7fdSVzAcPYGINhWniZzemJ3KnByB7HcrgOhUGm8C8YTlzy0dHdr0lBu7PQiE7vT9w/YF79L6NDvFnMRSJz7XZ5PxD3iZ7tGIs1YsdQLBhE9mR+Lq/IN8XBIQgeh8gciB+OKD98dXKs3xdrBAuGq/y99LPlkoHLucCRdP55k+c+C2nGAk9hFiTC+wehEIj1t1vQiE5vT5OMAnZpIJdZCu7FPWbwi/Ro77M5mEuqxq6tPXd9pCExP32Le1MuGj2ZH4V90yU+nC4RNebdECFeH43VP5mM7LpAa/bi9HKxAeoiN7samT04pAdK5AqKtGsxbxOfYdLU7NbcddTHX+8Dy130NdFAzHbspP5RU+anE6nYr/vYcyEAwPrCBi9aCb3NHkRSAAT1MlXgRCK4BEV2ZGBi/QvemXl61Pq3nzV1XdC9BvvXBjcICA9wE40Ma/7FCfRiA8Tct5samT04pAdIpAEJ5nzu+P32YYuNcc920t7oN304HnvEDU2X43jFQa3eE7p63DlcrUvHk739dd5/3VngWiPPSGdppJcwLbcVG7qSNUjbhqBMLxXIZb38/HdCIQbSwQ1Xb+NLLDtqNAzGb73bKcDwKhaaN2mqk8hpPz6WsRCLc9rL3SiUCIQOQJiEC4+2HOB4FQJMp2JmmmmUJ9sUsZ+PTUVE+FrbciEO76V7ulEoEQgRCB8PCrnC8CUbYVt2TxWbPo7HgI0+EFpKlTPjLF5KFTV0gqAiECIQLh8bfUG4ndAMZZU9kqLDA7Fd0TjvYZlA+xvWc+jZdFao/1eV2DmFxrKduKO+M2wjL7wdrQGsX2h0LRI2DQVgZeU/ibyUwrs6nBezzid5VcBMIVpqqJRCAqICqLL+Nxe2EoHPvGjHuhK+Rv5Eniql7XJJApJvfUgpHYxcS4fCqHx36h8pWFxm4zgVA2lh5KY2Dq3EKwL3YFAWpnU/FTMdCdOkVNAXOYgDcVM9SyuO/WSyIQbklVTicCUWkE0RdbSYD9/uYfGbxrZSp1p4oMWfHzjmPX7vmqP+++Y8ad0CIQjsxaLZDV/Gn/vmyLKrCDDVqWHR78hctyyg6NtdsIQrWjLPRGYZppvNu/u+Smwqrt1wUDRJVRh45lQWjuJWAhAb+0gF8YFl+fTicesqcXgXDZE6skE4HwJhCuT7Bqwh/bb7Wacd1n/o2yXIxUNLETssm42ivf9I+MINwj1oUR9/I2rIl51HZTTAUa1BsZ2ADm9YX/57exMvOuGdNjLs9llIfkwEsGG8enUhvVzW+uPqFI7Axm3GALdqjdWisC4Qpn1UQiEBUQlc+zouKpUltR6g1RhXa+ZEbxMoLoiBGEWjEoeXCqdrmNK6TvG204xaQa1dsXXQ7ko9Z2qf+rm+bUv7aT5K7jUIXDp7zVIt99Mw65AVt3L+Co061z9g7T07/qYCPnV2sW9suZfmyOTxxbGiRRBKLqs99VAhGICph0b4oq3n+VDk3BcDRGRNeXBcwTgegUgYDuYQfwUM6PNQ8OJZ5xaKhz32hTgSgNvcHAT4jZAtG78m10CK3h1P7eSOwrYPzjzO/p5m6/9cGhoUTZyLqYToUUJ4M2zFjTA0xmrM+m4rZ7KCZziEC4ev5XTSQCURlR+VzxZPrNOcO44MHhjU/as6twxP5A4NOOdxOLQHSMQEyOInR3O+jvTVbbY3fl6J8Ko0p/GYg2FQhlZ+l5hxm2VwjDoXO2wyhAjUn+k+Fbn01u3FYa0qMnEj2cmFSU2akw35Pa5PyyJgJR9dnvKoEIRBVMFQ745ADeDhg/UfOhmmslTWJczQR1R+9kfP05JhCuepDHRE5z9a1eg/HYjHxydbdD1x4vXgnwOZr8TzGQVFdngvjtYByhCcE+na2dBaI89EbRbrdTrjPw9EaiS8Ck1tb20wilChczDNBzIO62XQdakpQeh2GdlBlO/EznOxGIWnp0eR4RiOocqbcv9sG/7Ff/csUf+MxyTAIug5W7ng2/uoDljSIQk4A6SSBUexxuOqvWq14m4BoGVIC/hfnEbSwQusOBBW+6unFOB6N36ep3weLvFO7RrsZLIw50amZ44385ZRSB8IjUIbkIhDuOFFoafQ9buAqgN1fJ8hwz/0M2lYhHIqv2z7Ff7dAQgShA6zSBUM2KRqO+p3aQuqTpCgCvrdw/+FGyjDNNw7IM0Na5IBCqPZodSGqK5zPpZPxSdz+h8lQ9PafvRYGJz9GkUE5d3VqhvBwxrjFzgUtGR7/9UqV6RSBq9crMfCIQHjgWrotcTuDzAfpbAIsKb3/Pg/mXRHRNl483FxfbyjqpTDF13AjC3n3Um/a4SScx8/kg+r9g7GvrH9sYdM3E2D7f37792ol2juaq+0lotm1XDK3h4WcFFRW2ezetBONMBg4HsL9tG+uzf7mH+jEwvmVO5DZXu9K1WK8IhBcPOKcVgWgMRylFCAgBIdBxBEQgOs6l0iAhIASEQGMIiEA0hqOUIgSEgBDoOAIiEB3nUmmQEBACQqAxBEQgGsNRShECQkAIdBwBEYiOc6k0SAgIASHQGAIiEI3hKKUIASEgBDqOgAhEx7lUGiQEhIAQaAwBEYjGcJRShIAQEAIdR0AEouNcKg0SAkJACDSGgAhEYzhKKUJACAiBjiMgAtFxLpUGCQEhIAQaQ0AEojEcpRQhIASEQMcREIHoOJdKg4SAEBACjSEgAtEYjlKKEBACQqDjCIhAdJxLpUFCQAgIgcYQEIFoDEcpRQgIASHQcQREIDrOpdIgISAEhEBjCIhANIajlCIEhIAQ6DgCIhAd51JpkBAQAkKgMQREIBrDUUoRAkJACHQcARGIjnOpNEgICAEh0BgCIhCN4SilCAEhIAQ6joAIRMe5VBokBISAEGgMARGIxnCUUoSAEBACHUdABKLjXCoNEgJCQAg0hoAIRGM4SilCQAgIgY4jIALRcS6VBgkBISAEGkNABKIxHKUUISAEhEDHERCB6DiXSoOEgBAQAo0hIALRGI5NL6W/P/qq3RO0BYT3Fip7zE+5yMjIph09kejhBuOLAC0DsAcIz4N5G4OumRjb5/vbt1874WRgvtycsQKwzgbobwEsKqTNAfgtMY1YBl93yP6cTSQSppuGBvtiKwm4s5iWCRdlR+JfUv+PRqO+3z+D5cz4EIiWgLFvPp0Lm5Wt4yadxMznA/R2AK8GoGx6hoH7mPjfRkcSvwDAbuy0p1F2PbnDdySRtQ6MYwAcCsBvs+2XBBo0J3yDo6O3PVup/MVHR/fu2oW7AQoV0xHj8+lU/JNe7Orv7/fvzu13q8I2xRLYkE3GP1ypnIptAZ4l4Kcg3NTl481DQ4k/ebGpmFa1sXs3rQTjTAbeYes3eX8Q8AgzD5oTuc3btt3xfC11SJ7WExCBaL0PXFmgEwiY/G746CQAX84Lg+7DdF4mNfj10q96e6N7wI/1YPqXwoO2ih38KBn4UHo48cNqD2AngQiFBo5iw7oJoDdXqexhNiiWHR5UD/u8fPT2Dbwf4G8BeG2FvCYDNy3w8wVuH3z5h+nTdAoRPg/gjS6ckSOi2yeILnxweOOTTulDfbFLGfj09Pc86kNgRTJ56wsu6sgn6es75Q05+IYIOKSQ5yWyeHk6ncg4lEGhpdH3sIWrXDBWRbwM4s8hhyszmcSYG7vy/dCkz4FxrmOfm1lQjhjXmLnAJaOj337JTR2Spn0IiEC0jy8qWqITCAI2MPCvFX6oT5gGL9s2nHjCXniwf/XrKWduBKjHY/NNEL46/ud9Lq40KtEJBCz6NRHf4E6M8lY9QlbuuHR60++CfdGPEOiLU2/01Y3+Dkw+s9pDTzHdlaMNBJwJwFe92BkpniaD16aHEw/o8oVCA29jg38AYL/C969YzMtHU4mk23p6wwPngvhrNpFJjy/AcdvvT7xYWoYSfPIZlzFYjS48toVHLb956ujQpqcq2dbbG/1r+GgQwNFu21BMx8CvYNBJNtH3WoSkbwEBEYgWQK+lSo1AjAPYbXvg7mTgAQK9AljB/Bsk0TWZkcEL7G/8odCqQ9nw3w3g8BI7TDCeZEKWQC8xeC8C+gAcXG4vXTs+tvd6J5EoFQgi3MmMoO1haQL8KGBkVdkMPpKAt5YKADGus8D3E9H1NhFUb7opgB4v2Kim3IoP4aKpJhGdkR4Z3OjEOj9FsosSjPx0UunnZQKyDHoCxN0AwmAcpnnwvsxM67KpwU26BzZ8uB2gFbaHZNXpoWLa/v51C3bnXkkAdPxUfttUnb2+xYvPCXTt8eKVAJ9T1hLC88T4cb4tgM+JtU2Qf6tjVqGOMTA/CjJ+nJ/uq8iLh8YX4CSdwNXym5A8zScgAtF8xg2pQSMQ0w9D0BVsWpfY35iPWrr6EBjmxINDiWeKCR0eiiYzNls+vrB0pKGmdnoia95ucO6bAB1pa4jJ4I9lk4mv6BpXKhAlaTabBn+0tK6eyJp3EJsJAt5krweABSAAYIyAz7DJG+ztVFNETz2NdSD6/zNHUnwPTJyiG0Wouf3x3KINDLWWMePzNDP9wyEHWltK11sUT59pfZUIakrP/oa+E4axPDO88b/KRKJ8BPDTcb9/2fah2/5QrVMElw68haz8COSAQtqdZNGydHrw4ZK81BuJ/jOYvjDTLn7UYlzwugPxQGlbliw5eV+jK/BZQn6ayN6We7v9fKpuei7Yt7qfYKkXi+JUpprO+8ICP39Rl37J0uhhfou+XiLAVYW7Ghf5fnYJiEDMLu+aa3MSCAaunxjb59xKUz7FSoOR2Mcov5g99THBfMnBB+LLlRagCwvZVwO81pb3dzB5WSaT+HVpo5wFovLIo7cvuhygLQC6SsocI8La9Ej8dgeA1BuOXgTKPySLnx1s0DLdlEZPOHqMQfl6bOs2rqZZqLcv9sGyNR/mLbCwplSM1EPSZ5F6yKvRh/qMM/D+bDJ+f7WO0NsX+xCAK6fT6QWvpy+62ABtBbDQVub9MHkgk0n8T4V6KBiJnkVMV9sf+kRYlx6J36Lx6RUEqNHo5IfoytLRaWmewmL9ZoD6be24q9u/Z3Ro6IZd1RjI960nIALReh+4ssBBIKotWk6V3dOzZpERMNWD6W3TFVZ+YNsNK7x13kXITxVNPiOAz6ST8Us1D5MZu5gK3z9sTfiOrrQDqK/vtIUmJu4pWxsh/EdmJP7RSovjmjdu7cN4cmfQohsBPs1mt6PYaZxDwb6Yevu+xPbdmMV84mgqoR7UUx9dXexiF1J+A0Hp9BTTGdnU4LdL7KHeyMAGMK+3/b24dqOdKirNX94WLlvneMexa/d81Z9332EfDTBwQjYZ/261zlsq+gw86YfZn0ze/ni1vPJ96wmIQLTeB64s0AuE+50xocjAamZWb4bFKQUvD8W8jeVlsHbKRDeCcBKT0sb3RmI3gHGW7e8vWOBjRpOJ7ZVALe5f85quXO4HAKktl/mP7iGmERKVdH0mGb/KlSMA6MRWjeSyyfgHSkUsGI6eQERqjaLAXc/MXrdmgVu72UAzQjGdRgBObXMzytH1PdYLVlk1Pf2rDjZyvq0AqbWchxj0SICsK0ZGEs+55S3pWkdABKJ17D3VrP+R4pZsKq524FTb96/eer9JwNm2h6f2gVbJKM22S+3OHI1AuJ5aCYVjlzPh4mk73Imglo/mLTcYHjidKC+URRGp6Y1WY6dWLDViMg7wiZlk4vtOrMunAunWbv+zZw0NDamzKVOfMvEh/Bo57s9kEr9327l0Zy00ZzYoGI7dRIQzbNxkV5JbyHM4nQjEHHGe9gHosKultEm6qRsmfDQ7Ev+ql+aHwye82qIFalrh3VP5NOcsyra5ephWKH84sqs5a7cCEQrHvsGEv7MJkKvySzkF+2JHE/A923qJ40gn2BdzPX+vaYfJzKuyqcTUwcOiLaUiRcDWP/1V98k/ve/mV7z4NRiJXUyMyysx0YxAVfIxBrYQ+MZu/55Dsq7ghfrcSCsCMTf8hHqG+Zo3f9XqewFyM089rQXgAAHvA3Bg8Y+6E8KaEcTUqe9quMsEgnBjZiS+rlo+NwKh3TrqYk1AV7eOqdO8fE842mcQqRHDnoWyHNdjNIvO2rS6N3+A/xsw1EFGjx8+AsASm0CUHerTrUGVVJJj4OfEGGQfbT5kkfUrtyfvPRoryWeRgAjELMKupyo3D0Cn8h12utRjznRezQO8XCDcTROpQpsrEGXhSmAPA+IFSCSy6oAc+0fsp6+dBEITesNxmqn0BLbTonaFbc9emuGUVivo4fDqIy2y1O6vqReECpW9DOAusuiG3bv3/qGbXXaNMFzKaCwBEYjG8mxaaXNaIBgPdAf4RDfhL+a0QFSY8itvV/k2UY2QOJ6+boVAqM5d4TxIpb6vDjd+y0+5y1XssKb9SKTghhMQgWg40uYUKAJRmasbPvWs45TWrh1BVNjZo9mZVDZ1FApFe9nIT0XtNVlf+ZbToh2tEohi/XmhsKwPE6DOxpSeZHdyVsXwJM355Uip9RAQgaiH3izmdfMAdDJHM8Xkautorc0rm2KaLyOICmcDNGcbyhafNYvO2nMmyi+NFLta/VzIR8H+1YfSBL8PBp8FhlrP0AeOnMzgePK8TjskexMIiEA0AWoziqxHIEKh6BEwaCsDrynYZjLTymxq8J5m2Nq+AqGJb1TjIrXmPEVVppottlNbjTU7zZxCa+Rdpju8pmJXpVPxv2+GT92WqWI2LVjw/JtM8q0lxgAIry/L6+IUttv6JF1zCYhANJdvw0qvRyDUXnwKmMP2OEe1Ls66aVC7CoSyvXTLac1bQ8MDK4hYbfnNH4Aj4A+w+Jh0OvGQE6OyQ2m2cwvl22adY0kVyy/bsuthpObGjw1IQ72R2LFg3GSfhlKRXXnCt7TavRoNqF+KqJOACESdAGcrez0CodveCVR/AJW2rSA09xKwkIBfWsAvDIuvL30otrVAzPJBOTtDTeiNqWmmsrMSDvd42MsrDweOiqMOh75aDNehLiV6CqCHmK2tCwLP3V48mJcfLbF1PJh6ASxmIL3Av3Nt6cE9p99CMBJdR5yPyFv8NHWKc7Z+k/OhHhGIOeLlegRCNbE8+BteMtg4PpXaqLZquvqEIrEzmKHudCiG69CekG5rgSiPkup5q6taoPVb1r0A3lIE5xRqoxSsJjbRBj/8l9pjULmNV6RZ+FZH6i/LJuOfcnG6Pm9ab2/0b+DLBxR8na0tM8KS13tie7bXwFx1ZknkioAIhCtMrU9Ur0CEw6e81SLffTMOuQFbdy/gqJv4/JMxdfxqzcIW7A8/Nscnji29UrKdBaJJwfqqhs8o9qDy0Bs8yozPE5G6WrRwkE4fWqNMbFRQPwO3gehE23c7QbwyM5LYVq3XOoQ9H2MYx2WTG4eK+TWHAj3FfCqPR4UnLH9uabULiqrZL983n4AIRPMZN6SGegUif21nJPYVMP5xpkF0c7ff+mClMwr528oM2jAzRAXUPRLrs6m47cazyZLbWSCUfbpw3wz8xLByJ6bTmyqdLteH+wYc71HQOb9kOumPRBhmxgmFtI6hNXRlObTFTZwkCoajsZLLmABN6HIHUXUlRPqQ30h0+3ee5naKqiE/ICmkJgIiEDVhm/1MDRAIOIwC1GzEfzJ867PJjeqtc0bgv55I9HBiUoH+psJ8q9arxV2n0Ue7C0SFC4NUhNEPH3wAx0vDRKhzDyb8X2bG6rILg1y+sRd7Tfl5hxn9qWpYdHvqCm15AcwXwsKtpfdU9PScvpfPP3EZE9SFSX43o49weHXEIuuu6TMa+VzPMdNHJnbtHdedlFZ9x2DcWHLZlDY0+uz/oqRGNwREINxQaoM0jRAI1YzeSHQJmNTum/LDTYTnwRgG6Ln81ZEW3q3dpgh6HIZ1UmY48TOHN+SZ90F42F3TzJPUdlsbdOVotYuMtD1Hc2J6Ol0NW0CrtGUMhIfAVPCV9U6AFmvu9x5j4n/KjiSucejuDiPQfOqXAf4ZYBRuu+PXqujwABaVl+X+DpI2+NnNexNEIOZIF2iUQORFYunqd8Hi7wD8Bu/NV+JAp+qu2CyW1e4jiKKdkw+w1OXCAAARLElEQVRqUg/EmOa+6Wpo1NvzWbWeJdHc7qfqcx0WvdS4QluuLbSlmu2l36tQGBdmknF1u5xj6HiHmwXd1qWuKL1pgZ8vcBNyxW2hkq65BEQgmsu3YaU3UiCUUWqagQITnyNAHayqdPK12IYcMa4xc4FLRke//VKlhs0VgVBtyN9pvYOUQHwZwEEuHJYj0Dd2+41Publb2qk83Q6kSqE1XNhVS1uUGmSZ+AOjI4lHPNRx3l9GIJcB2MdNHgDqfooLdVN3LvNLshYREIFoEXiv1TZaIOxv0d27aSUYZzJwOID9bW/Tz/7lHurHwPiWOZHbXLpbyakNc0kgpjgsPifQ3f3ie0BYw8QRAIfapmGeJeCnzDzohUMlH+vOpri9da9a31Gnmf0LXujxwfgAkxUG6GDbS4CaDvodg+6yQN/alhz8b7dbYu31qjqmeZXVMQbwU8RGyoT1zdyuhaMSzbWa19rzexGI9vSLWCUEhIAQaDkBEYiWu0AMEAJCQAi0JwERiPb0i1glBISAEGg5ARGIlrtADBACQkAItCcBEYj29ItYJQSEgBBoOQERiJa7QAwQAkJACLQnARGI9vSLWCUEhIAQaDkBEYiWu0AMEAJCQAi0JwERiPb0i1glBISAEGg5ARGIlrtADBACQkAItCcBEYj29ItYJQSEgBBoOQERiJa7QAwQAkJACLQnARGI9vSLWCUEhIAQaDkBEYiWu0AMEAJCQAi0JwERiPb0i1glBISAEGg5ARGIlrtADBACQkAItCcBEYj29ItYJQSEgBBoOQERiJa7QAwQAkJACLQnARGI9vSLWCUEhIAQaDkBEYiWu0AMEAJCQAi0JwERiPb0i1glBISAEGg5ARGIlrtADBACQkAItCcBEYj29ItYJQSEgBBoOQERiJa7QAwQAkJACLQnARGI9vSLWCUEhIAQaDkBEYiWu6CyAdFo1Pfks8abyLRWg3A0QG8EsMiW62UGdhhMaYt408QCHt5+f+LFZjTrqKWrD/GbfBwRn8LA4QD2B+Ar1JUD8DQBv2TirUzGXYcssn6VSCTMZtjSiDJ7+qKLDdBWAAuL5TFwQjYZ/2495ff3R1+1e4K2gPDeqXIYD3QH+MShocSfnMrujcRuAOMst3mCfbGVBNw5szy6dnxs7/Xbt187UU8bymwh3JgZia+zlxmJrDogx/4RAKpPNvvzggU+ZjSZ2N7siqT8aQIiEG3aG3p6Tt/L6Bo/F0wfBnCQBzNzTLiP4fvE6MhtPwPAHvLqklJoaSzCFv87QEd6LOv3AD4Fk2/LZBJjHvM2PXlnCgTGmHl1NpUoEQ5vOEUgvPHq1NQiEG3mWTVieGoHxf7yVnsFgNfWYZ7JjM1mgNc/OJR4ppZyenujf01+42pmPtU2UqilqIct8p0+OnLbT2vJ3Kw8HSoQCtfDlj+3YnRo01O1shOBqJVcZ+UTgWgjf6qpiV052kDAmRUeyGrK5hmAdk+azkpEXl2hGU+TwWvTw4kHvDT1qP7o/v4cbgOo30u+Cml3wjBOzgxvTDeovLqL6WCBAIGu6fI/e8HQ0JCa+vP8EYHwjKwjM4hAtIlbFx8d3Tuwm24lxnEak/7IwDcNi6456CDrN6Xz+kpYxk1jmcX8cQKO0ojLy8y0Lpsa3OSmuYsXnxPo2uPFKwE+pyR9fvoKjK+RyT8eH1+4szjXrUY+zzxj7jfB/iNBOI8YxwLw2/Mz8Cv4jfdlhzb+xo0dzU7TyQIBYMxiPnE0lVBrLJ4/bgTCa6Fe11i8li/pG09ABKLxTD2XWOGBPAbmz3YHcGWlxU1bhdS7dPURsKybALytxJCdIF6ZGUlsq2ZgTzh6jEG0BcAexbTq4c7ki7qdJgr2r3495cyNAPXY6yPGdV2BnefX+mZbzXYv33e4QKjFp58Y1sSx6fQdO71wUWlFILwS68z0IhCt9yv1RqL/DKYvzHzzp8fJwup0evBBrybmd9HkjKsBXluS9xGycsel05t+61Rmf3+/f3du0Y0An2ZL8zuYvCyTSfzaiy2RSPS1OcZ3S0RiJ1m0LJ0efNhLWc1I2+kCkWdG+I/MSPyjXjcriEA0o8fNvTJFIFrss96lq98Fy/o+gP3sb+sw6KTs8OAvajXPaVRSbW66p2fNIgqYwwS8yVb3+kwyflUttmhHI4SLsiPxL9VSXiPzdKBAqJ1iu+zbdmudahKBaGRPm7tliUC00HcOb+sN2aaomhUKnbyfZQTuI+Cdtma+RBYvT6cTGV3TNQ/NVyzm5aOpRLIWVGptpWsX7gYoNCWAjFuyqbhaiK93C24tJk3l6UCBeIyADQz8a8n0YNYanzh+27Y7nncLTATCLanOTicC0UL/hkIDb2ODf2AfPTR6jj4YHlhFxLcB6JpqKtGVmZHBC3QPaM1D849k0bG1THUV6wv2xa4g4O8Lu6+YGMNdgT3OHxq6Qb3ttuzTkQJh5ZaxEbi4dIMBA5dlk/FPuRVlEYiWdcu2qlgEooXuCPXFLmXg027f7msxVXuqF3jCNHjZtuHEE6VlhkLRI2DQVgZe04gpplpsnq08nSgQfspFJnxd3chZ95ZME75ksHF8KrVRnXyu+hGBqIpoXiQQgWiRm3VTLwCnxxfguEaHyugND5wL4q/ZmmoS0RnpkcGNpc3v6zttoYmJe0oWlqsubrcIY13VdqpAjIxs2hEMRweISO1mmx45gofGF+AkN/1LBKKurtUxmUUgWuTK4NKBt5CVn146oGgCAxuyybgKrdHQj9e6NCMbNTPxqMW44HUH4oF2jq/kBVwnC4TapBDY449fJ+DsGUyYP5FJJb5YbapJBMJLT+rctCIQLfJtMDywgohVULhisDsw0xnZ1OC3G22SdlRQIXicOsOgmaIomvV7Bm6BQTe2ezC+ahw7WSBU2x38+ALDWJFNbhytxEcEolrvmR/fi0C0yM/BSOwjxPiKrfq6dgtVaQYFw7GbiHCGLd1jar5aTUfo8hYWt2+oEsZjDISHAL7DsKy7d+3a91f1RhGdTXfoBKJp9c9ONNcyn4YisTOYofw49SICF1NNIhBN6wlzqmARiBa5KxSOXc6Ei23VNzWccWEnkdq5VPzsYIOWVTprEVoafS9bdDOAA11iyjHwc4PxPctHt7T7CGM+CISaaur+qxdvZuYBmw9NEH8iM5L4N6epJhEIlz2+w5OJQLTIwWU/QKDiG329ZgYjsY8RQ809Fz+uBEmFHafAxOcK21SnQm+4tCcfQypAuX93Gqm4LKcpyeaDQChwvb3Rv4GP1HrX62wgK4ZeEYFoSpebc4WKQLTIZXNFIIp4liw5eV9foOtUkPUPAL3ZY/hvNbK4eoGfP+kyptSseGW+CISCGQzHziPClSV+u7fbz6fqfCICMStdsO0rEYFokYvmmkDYMU3GekIfsTHAxEcDONgNRhU8jkw+1WtMJzdl15Km0xep7Ux6e6N7wMBtIDrRPtXE4I9lkwn7Wlj+axGIWnpU5+URgWiRT0Ph2DeY8He26p+w/Lml9VzyUqkppWseBPwBFh+TTiceqhdB4R6LIBFOAeP4KoLRNmcq5pNAKB+Hw6e81SLffSVrSuqejuWZ4Y3/NUNQSq8/1Vw56rXfSLhvr8Ran14EokU+qHVNoEZzPe9iqrGefDYV8M/oyp3pdF0qA9dPjO1zbqt3PM03gchPNfVFP0ogFSjRtqsJZVNNMoKo5xfQOXlFIFrkS82F8yYzrcymBu9ptEnh8AmvtmiBOnPxblvZPzJ418pU6s6XG11fsbz8tIYfF4LpX0ouD6oYMLBZ9pSWOx8FYnJ6kL4D4H0zppoY67Op+NRpexGI2eqF7V2PCESL/KM73UyMz6dT8U822iSHuq5Lp+IqgF6zP9r7LprVVi+NmY8CofjoQswDeNpg89hU6vaf59PIFJOXrtSxaUUgWuRafRC95sRiCoYHTifiW+xN1Z3anjzdbV0CGPsDvCeARxsxynCIO3VXt3/PaCsjus5XgXCcamLeAgtrMpnEmAhEix4MbVatCEQLHaKL5uol4qYb0/PTPD7cDtAKW3ptNNfSoH4MPOmH2Z9M3v64m7oqpSk7qOfiZHG9dVbLP58FQol29y5KMHCMfaqJCOvSI/FbRCCq9Z758b0IRAv9rL0Pgmhw95/3XtuoBVzdjW5Od04E+2JHE/A9WwRQk5lXZVOJO+vFFIzELibG5VPliECUIfW6y0ezjuXpsGVvJLoETGptauo2QwD562Xhp0vAOGvKSNnFVO9PYE7mF4ForduoNzKwAczrbWaMM/OZ2VRisF7T1OE2oytwFwFBW1mOC8RLlkYP81n5E7eHTT/Ip6cdarVn8ua8/W4FEC2WwW1wq9x8HkEU/EC94ehFoPx96NNaQDTIzBaANSIQtfb6zsgnAtFiPzruTSdemRlJbKvVPMc7qRnXdQV2nj80NJQrLdvhCtS6BUvbRqbzMqnBr9favkbkE4EACutDmwHqt081AVACERCBaERPm7tliEC0ge8c9qbvZOD0bDJ+v1cTJ7cyGlcDvLYkb9VDauHw6ohF1l0A9rLlfYGZ12VTCTUd4ekeaYe5bscb7by2tZ70IhCT9IJ9q3sIltpevdCRp0wx1dPV5mxeEYg2cJ3T2z6AMQI+wyZvUDtL3JjaE4kebjBuBOjIkvRjRFibHonfXqkcNYoYzy3awODzS9LlCPQNH018zm3gPSdbvN6P7KbdtaQRgZgeH+immmYwFYGopYvN+TwiEG3iwsmhPl0LIKYx6TkwvkpM8YMOsn5TeqNbMTYSQP8IQMVGsp+SVcWNMfPZ2VQi7mYEEAqdvB/7AneBcZTGlhyIHyDQzWTx8K5dC3cUF9T7+9ctGB9/aRGT72gQzgPoXaW2ELB19wKOurn2stmuEYGYJuywXjWdQASi2d2xLcsXgWgjt6gtqeQzLmOwuna09CFftNQE8AxAu/N/IN4bjH0rNONpMnhtejjxgJemhkKrDrUM/xYC3uklX6W0KlifYeVOTKc3/bZRZdZTjgjETHoO04uTiUQg6ulqczavCET7uY5CS6PvYQtXFcJq12qhyYzNps/4yIPDG5+spRB1F4TRNbEBnL+Jzkmw3BQ9aUuA1z84lHjGTYbZSCMCUUaZgn2xzxJwSfk3uDEzEl9Xj1+8buOtpy7J2xgCIhCN4djwUiYPuJHaZvhZAAd5qCDHjC0+8CdTqcQvPeRzSko9kTVvJ5hfIMaxJTGVqhVvMvCgYeCi9HB8xM30VrUCG/m9CEQ5TTW9aBmB+8pGjjKCaGTXmzNliUC0v6toSd/A//GRdSIYJwF0KID9bW/0LwP8O4CSAG/u9u851KzwFWqdJLCLltJkoDd1tkLdA7HIhjA//UXAI8x0e85Hd9c6epkNt4hA6CnrDlfKFNNs9Mj2q0MEov18IhYJASEgBNqCgAhEW7hBjBACQkAItB8BEYj284lYJASEgBBoCwIiEG3hBjFCCAgBIdB+BEQg2s8nYpEQEAJCoC0IiEC0hRvECCEgBIRA+xEQgWg/n8wrizR3GjS1/Uy4KDsS/1JTK5HChUCHEBCB6BBHztVmiEDMVc+J3fOBgAjEfPByG7dRBKKNnSOmzXsCIhDzvgsIACEgBISAnoAIhPQMISAEhIAQ0BIQgZCOIQSEgBAQAiIQ0geEgBAQAkLAPQEZQbhnJSmFgBAQAvOKgAjEvHK3NFYICAEh4J6ACIR7VpJSCAgBITCvCIhAzCt3S2OFgBAQAu4JiEC4ZyUphYAQEALzioAIxLxytzRWCAgBIeCegAiEe1aSUggIASEwrwiIQMwrd0tjhYAQEALuCYhAuGclKYWAEBAC84qACMS8crc0VggIASHgnoAIhHtWklIICAEhMK8I/C+wpca3TmA5zAAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-37"><g><path d="M 151 651 L 51 651 L 51 407.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 402.12 L 54.5 409.12 L 51 407.37 L 47.5 409.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-40"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 662px; margin-left: 142px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="133" y="656" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-60"><g><path d="M 201 701 L 201 793.2 L 313.83 793.2" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 319.08 793.2 L 312.08 796.7 L 313.83 793.2 L 312.08 789.7 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-61"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 712px; margin-left: 211px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="204.5" y="706" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-35"><g><path d="M 201 601 L 251 651 L 201 701 L 151 651 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 651px; margin-left: 152px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Any GPIO<br />w/ WAKESTS<br />active</div></div></div></foreignObject><image x="152" y="630" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXl8ZFWV//e8qiQNCN38YIBmUXBhRkR+agtJKkmboZt9aeimkm4WgWEUkBYEF2QZxRFwmx8IP1REEJSlOylpFpEdjJ2qVAXsGX4OgwwfBBF6sBuUTUgnqXrnx61UJS/v3VfvvVpS23n/KF13Ofd7bu733nPOPZcgnyAgCAgCgoAgoEGABBVBQBAQBAQBQUCHgBCEzAtBQBAQBAQBLQJCEDIxBAFBQBAQBIQgZA4IAoKAICAI+EdAThD+sZKSgoAgIAg0FQJCEE2lbhmsICAICAL+ERCC8I+VlBQEBAFBoKkQEIJoKnXLYAUBQUAQ8I+AEIR/rOak5AGLo3uFTHoEwF6WDt8kkw8dGYkl50SIJu9k0dLo/JYttJiAIwDuBtEuYOxggWUMwJ8BPAGmB9Ihuvfx9WtfLAa2np7lC9McHgbwgWLqA8hMycKvAxRn4FfzwvzroaHY3/y2194dXWSAHgKwfb4OA0en4oO/9NuGrpzCsXWLcTAzH03E7QDtDmCrXNm83C+AcCeTcc8eO5vPxGIx9e/y1QgCQhA1ooi8GJ3dfWcBuMYhFtE1yeGBswFwjYncEOJEo9HQxk34FJv0rwDaAYSDDYyfZsYlk1u2X7dhw3WTfuuWgSB0XY0x8BOebPmX0dFb3/SSpcwEQR3dKw8AmV8jxsEBcdxIwHdbw/zTIATnNT75vXgEhCCKx67sNTs7o1shhNsBOkzT+PMZg5c8tj72fNk7bvIG23ui+xDTDQR0lA4FP02m8emRkYHH/bRVIYLIdU3PwaDjkuvX/kchWcpFEPsvXrlHKGNeSYRjAIT8jN+lzCvM/PlUIjYoG6ISUCxDVSGIMoBYriYikWgnG3Q/gO1ybU4AaLW0vzoZH/xBufpr9nayp4aX6SwGvm0xfWh35FNmHJo6vRHPt5mc7HXeAui0ZHzgF14LXGUJIivWU2SmDx8ZWfeCm77LQRCRxdED2aSbAexaYF4pXF6Z+p3bAOxSgEgyDPx8XpjPltNE9f5ShSCqh72j50hX32VMuHCaHAhXg6HMSjmS4JGJeTh8w8OxN2pI7LoUZdGiz7a0bv365WCcq1mk0kx4kJj/b1sYcd0C1dt7yrzJyS37m2SuBrBcY0oZI8JJI8ODtxcCSEsQhJ8lhwdP8QksLepdtUOryXvDNL8M4Cj7eIhoYPyd+Se5mb5KJYiOrv7lRHwTgG1tMqtF/nGD6NutIfMRO45Zgt5o7GkafCYBpwFYYB8zE+6dbOPjZc77nA1lLiYEUWZAi22uvX3VzkZL5mEA+2b3V8CLITZOMA3zp2B8MNeuOKuLBXh2PersiX4JTN+yLaYZZtxphvjLQUx5yrQSNs2r33VaK9OK9XvSDKcPGx1a95Kb2GUgCGvT1NEV7SOiG20nojGTedloIqYc0Y6vFILo7IkeACblzN5pdsP8W5PCp40Or/lPr1OUqtfbG33PljR9kYCLnWRL102MzV8dxLdTnmkirQhB1Mgc6OyOHgrQXdOnBcajZrrlWGqZ/D4Bp06LKc7qkjXW1bWyxyTzHospT7U5RsBXd1vIPygmkiZ7ItnqjWsA/qxVQAYuTcUHv+a2SJaZIFTXWvJj4MZUfFDt0h1BDsUSRCSy/H1shO8FsI9lzBkCXcUZ8+JkMqaivQJ9Hd19Swm41UY4GRBfkByO/ZsfsgnUoRQuiIAQRA1MkN7e3vB4euefAXx8XhxiXD6SGLyooyt6NBGts+x0xVldgs7UTnV8ku4C4UDrolaOBUiFdbZtoRgDB1naLqivChAE7KfRKVn4dxPh8JINQ2tetcNXDEGoOTuR3vlqBp85mxxw6fjYgstK2e27nEo2wzAO9XK4lzA1pKoGASGIGpgW3d0r3p9GaIiAPXLiTLxruz0iFR98uLMzuhvCNGQxM6ki4qwuUm+Rnv6VzHyL1bSkdteTYwtOL2VRy4vT3hU9yKDsSTAf7w9mOjGVGFC7YsdXCYJQnXR0911FyPqv8t9rJvig0XhsQzkIQhNQoZp9oC3Mx5XDqdzR1XcGUTbc2xINRbe1hTedPDQ0lC5S/VItIAJCEAEBq0Rx592HWbs96uzpvxrMyhma+4I7qzU75z+EKd0zPLzuZdXoAQccu4PRGj6ZQOoUs7fF4bgJ4H9n0I8mxxbcX2gRjXT3XcLA16fFJDyLNPcmk7GNQXBzmNvAoyG0HBaP3/ZakHbsZV3CiP/H4MzBicTt/1VK2/m6uj4YWLvHQj5RZ7qqGEH09H2FGN+xjOltk/nQ0UQsXgaC0MxJuBJQMbi6nPQ2k0lLRkYGniymTakTHAEhiOCYlbWG9g/B5mdwLpiYPmH4FcaNIN55Z8dXW+e9fg4Iyk5uj0KxN/8KQP+UjA/8yqctO8PMy1OJ2N1+5VQ29I7uPnUnYdrvQsA3RuKDlwRoQ1tUu+utgE8n0tP3GWY+B0y3cojuLHRDeA4JQjkftLejg5qY9Lf9y7+7z0VHrbGGepdrLpQ6l5qlvhBElTWtu/uQNy/lRVvUu2rH1nT6EYD2m9mdB7tZrSMIZDJHIhT6xrux8n0BYEgz+PxUPHalnSRcds+uzlFdnxqTWtl2prYwYtW96646AB4lFZ1DgiibiUljpssQ0YkjwwNrSwLDVlk77xH89FxOmZqtLSGIKmvcaSvWm1M0i1sgZ7WGIDYqx6Xt1vYYKJtfSIUmhhj8SQI+oonxd11sNKlCAsnpXHz4PmSwopiIGKtq9SaL8piuSplCFSIIxylMhU2HkemNx29/rlQTk8a/8TIbtCS1fuD3pWChqzuXfZVb9kZoTwiiilrURZu4HaFdnIK+ndUuNt386N8i4LLWMP/A7mBUMf4tpnmDLTIHcDHNdCzu/zCZrJINLsw17nt3qYvmKpdDvr13+e5GOrzemgSxUOjnXE2LShCE1gTEeLSthZfpHMhBTEz7HXzSNu95Z/yO2fOhckTb0dV/AlE2qGD6K0ciwbnSb733IwRRRQ1qQlhdd+ZTmTFxL0ARy5+K75vVBQhChQ8em1y/dsQNCm34posDWr/I+7NP2xe2QrveoGqLRPr3Z4MftN7WZcL5qeHB7wZtq5zlK0AQ1NnTdwUYX7DKWch2H4Qg1KaGWjLraSqQIf/F2sKbj69EdFGt6q2cc6CW2xKCqJJ29AtpYXNKhzMyZQLgZcl4TOVvKvi5xf8zY3UqMXitV32No/x1MulgXVI6jY3al5nJuVv0RyxesqvfO7r7jiJglrO8Fnai5SSIrA/IwFdBdJHthnjBSK0gBKEJyQaCpQbxo67pMpoTqXK2X52KD54TqCEpXBQCQhBFwVZ6Jd3E9zKnRCL9+7KRNd/MpDXwGYWjj5byH4aqWxjcFliNo9nTzKRyG42n344BdGQOXc86QbRQq6aKUglC6XViwtgTBh/DwBkAdnPgwnxBMhFTIa/aVPFBCEJbtoInsVLxCTJHpKwTASGIKs0KjTPX09GnWUSV9L525y4nCN+mge7u47fPYPI+gNRbCdmvwA5cFydfsC8NYT5pToaWjo6u2VQOFWlOX4Xk13apWxz9yuaGVaWzuRLw0Pg8jhZKdicE4VeLzVdOCKIKOtf7E+BrsdbthL1OHmqIOoJgwkWp4cHL/UCgrV/g1bFcTh11XyKbidbLn+AgTJ8nIz+yqzKdXf2ng3iWKS2oiakOCeJ+ZPiEZDL210I4CUH4nUXNV04Iogo610Qk+Tan6C8peceGuxDEeanhQXWfwfMLShDaE4dLygnN/YnAFwG9BlAOH0QdEYS6uf7l3RfyoJ/Eg0EIIhKJfgwGPcTAjtMnyQAbDS892X/XmTaJcf1IYvAzQduS8sEREIIIjlnJNTSx3b7MRFMnAWdiPwCezuqgC7x9kMXUd6TegP6U5PStlD9s0n6iyZ5qCL4JUpWvUYLIvo9NwLNM/FAxbzsHIYi59glIFFPJy01JDQhBlARf8Mr6TJvB23HWKBzxU8wCb+2jmPqak5LWz2L3D1QincJcRsPU8oKrm2lB5NXNA+Xn+NvWbcf+7sGb3y7HTLa2kUu3MevRJWZakUoMqAzH8lUYASGICgNsb16XTbRMIhQ8hRSzwJdKENrkeExnJBMDP863rZGrbKk1rPLrTF4ocHmsFJ0EWXDnekdeKkGo+vYTsJd/qRQs7X0R8CpMPmhkJPZEKe1KXX8ICEH4w6kspVzMQ2VpO9eI683qahCEkkmTqfaetvA20aGhm7ao352njPKk1tCBqjHtVSQ7aMMThPONEt8+tCCTXUvqZcrsG0SOZi4rBDGH2tffY8BfwFTEG9Mczj0Qr/4397k7q6tFEBrTzqxFWeOn8J0+JKjqNJf9UAlzVqMThC5Qwuvd66C6mtpc2F5ZVKl+y5TZtxh5mrGOEMQcal1z98HTuewmnsvbBq7tVYsgpk5NO90GIDrDY1NmJnu4byVNFapvXXgxA88gbBySGlr7x3JNhUYniKlnTe1vlOBNg40jE4m1w+XAMXcrfA2Illnaq4j5sRzyNmobQhBzpFmXXEolPYSji+0H9M7qahFE1ozkeMWNs2amsfTbHzFADwHYfkoN5Uut4aZWl5fKrpsYm7+6HC/KqX6bgCCgPQ2Dhybm4ZhCl/L8/rlFevpOZMZN8qKcX8QqU04IojK4Olpt74p2G0QqZ9I2+R9LPS7rjvpuu/BqEoQ99UZexjSFjrO8elYRO7ZdETmivhOgXstvmXK8SZ1vrxkIYuoU4UwKCFDJZOvyJrWcHuZorbJ2IwQxN6BX6olGXbtqRA47fjUJQmOSUC/NnUCEEy25l8qaWqOQWl0WoLQi7PGxBd8p5SSh0qOHMuaVRDjG9u619jW3eoximibCqRTq9wHYdzbedHNb2PxcMW9T5+6rqPe7Z/KNAWUl8Ln5k2+MXoQg5kCP+rsP5YnW0Z1MoHl1q8oEoXM43gHCR8H44JR1KdgLeaWqLdIdPYlBKtx2K2tbDKQA4wup+NrH3JLb6fpWxBA2zfMB/JO9TVU+UC6mCmZH1cke5MRjr+9CtmrEvzUJJ48Ox57yoyvlc6AQnZ1703yWTvzkk/LTh5QJjoAQRHDMAtfQ3n2w3QcI3GiugkteJ4ezutoEoQ9ZnB512VNr+MCTOrv7Pveuye97ugUd4KfBxvcN0C8TibUv28kiGo2G/rgptGs4kzkYpLKo0sdtKbbzIqQZ+OG8MF+k21HX8wkiP8BIT/QIZrp5xpc0jX4GwMMAf78tjLh9/ArDjRuNPZm4D4RzAfydRm8PI8P9XvmkfOhbihSBgBBEEaAFqaKN4vGZgdVvP5roKIfDt9oEocaiCWnNDbH8qTV8Yked3f1HAPxTl8VpepEH8D8Apaf+gZUfaWfvPvhpMnDWyPrYr91OI41AEFndZh9kwlqA318Al7cAeiWHoSZMe1bNDAM/nxfms4sxVXnrRkr4QUAIwg9KJZTRR3uUN1pH+4gLMOu+QU0QRCTayUbWUb+dFdJSnfUlqCdbtb39hO1C4clLmXCm5v3tYpr/AzMu3GNXvt0rWV6jEESZcVQkco7fZIPFKEjq+ENACMIfTkWXKuUVOL+dFrihPe2srgWCcHmTomaiU7IOZtM8h4DTrE+T+tRDmgnqSdPL99iFU17EkG+zkQgiP6aurujfZ0CXEUHdYbBc5PREUmWh/RoyvCaZjKkkhPJVGQEhiAoqwMU/UJFoHd2tU6uzuhYIQkGtSb1xHzJYUUsLgrKNv7jJ2Jsy5koi6mTg72231pVt/c8Av0CguAk8NC+8dTyfPiTIlGpEgsiPX825LWn6R4NpuUkcIWAhgG1zv2cxJOApE3w/wqF1qaG1LwQJDAiCs5QtDgEhiOJwk1pFIqDxl1QstUaRIko1QUAQyCEgBCFTYc4QsKcHqXRqjTkbmHQkCDQoAkIQDarYWhyWM3NreZ31tThmkUkQqGcEhCDqWXv1Jbv91nfRiQrra9girSBQvwgIQdSv7upKcueNW+93tOtqgCKsINCACAhBNKBSqzmkbMoEMnZtbd1qo4rq6e09Zd5E5p3jmHGF5TJahhmrU4nBa6spq/QtCAgChREQgpAZUlYEurqO3takeb8E8Cm3hlW+I3Ni8sjHHrvjL2XtXBoTBASBsiIgBFFWOKUxdWIYT78ds2RptYOyGcRHJYdjKhmefIKAIFDDCAhB1LBy6lW0zp6+m8A4WSP/a0R80shw7Ff1OjaRWxBoJgSEIJpJ23M01s7u/iMZfCkBH8mlWtgMIJY2jO88vn7ti3MkhnQjCAgCJSIgBFEigFJdEBAEBIFGRUAIolE1K+MSBAQBQaBEBIQgSgRQqgsCgoAg0KgICEE0qmZlXIKAICAIlIiAEESJAEp1QUAQEAQaFQEhiEbVrIxLEBAEBIESERCCKBFAqS4ICAKCQKMiIATRqJqVcQkCgoAgUCICQhAlAijVBQFBQBBoVASEIBpVszIuQUAQEARKREAIokQApbogIAgIAo2KgBDE3GuWOrr7biDg1FzXsbbw5uOHhobSQURZ1Ltqx9Z0+hGA9svXY8YtqcTgpwFwkLbcM7Dy7ybC4SUbhta8GqQ9VTbS1XcZEy6cqResLefzpNmWns8YvOSx9bHng8pjLe9IJsh4tK2Flw0Nxf5WZLvU2RP9Epi+BSBkaSNDoOvGx+afs2HDdZP5f+/o7juKgLuL7MtXNSacnxoe/K6vwrlCixZ9tqVtm9d62DSOBsylAL0XwLbTbRD+AuY/AxRn4FeT83j9hodjb/jpo6dn+cI0h4cBfMBP+RLLvGaCDxqNxzZ4tRONRkN/ehkfCzEtZ8IhIOwJxg6Wem8x8DIYjxkGHshMhB4aHV2zyavdRvldCGKONelY2JnOSCYGflyEGHaiUbxQ1ILe3b3i/WmEhgjYwybH2ybzoaOJWDyIfL290feMT9JdIBxYJHnZnye1dr86GR/8QRB57GXLTBCu5ADClRPvLLjQSg5KllojiPb2E7YzwumLQXzGLELwBjnNjLtC4IsSidh/FypeawShHraCgdNA9DXLQ1beIwYyAFIM44up+FqVsj7QZsxPB7VURghijrXR3hXtNojuB7ANgJfZoCWp9QO/L0aMzq7+00E8/SobAa/C5INGRmJPBGmvoyt6NBGts+1+s00UsxPVEk4AImxvX7Wz0ZJ5GMC+znGU/lRpGQmCOrqj5xLoO7mstXlxM27kUGMEQZ3d/UcA/NOAi6RdLWkQfxNpfC+ZjI3p5l4tEUQk0r8/G+bPAfqHIH8ntrJKx7eYEy1nj47e+mYJ7dR0VSGIOVZPpLvvEga+PrX6lmbaaO+OLjJADwHYfmanTiemEgO3BhlWR3ffVQScra/D97SFt4mq50P9ttnR1X8YEatX5fLmFt9HftVHpKd/JTPfkquvdmwmgJZc/xMAL0vGY4pki/rKRBDU2d33uXeJ/nsAtrIIUpAcaoggqKMnejIx/dAmf1GYqkrEuJ5NPltHErVCEB3dfUsJUH8fOxU90NkVH0aG+5PJ2F/L1F5NNSMEMYfqsJteiHH5SGLwomJF6O4+fvsMJu8DqH2aIICrU/HBc/y2qWnjpdxivPMUh+HFMDK98fjtz/ltU+N/GA2h5bB4/LbXvNro7e0Nj6d3ug1ANFf2ZQDqdHPWTF26rS286eSgfpt8/TIQhNvimiHg0vGxBZfZzUrWcWtMTIEI1AtDP7939kQPAJMicftCuZGAa2HSna2t4y8NDd35er49NX8nJow9YfAxDChz1G6OvpgvSCZi6kRVkumlDDpyiBaJLH8fG+F7Aexj+/H1d30PNwG8ZiIUfm7D0Br1FG5WfuWfeyc99t4wsJRhfl536mDgxsmxBacX0rkfndRiGSGIOdRKx+L+D5PJjwBYCGCCgSNS8UFlSin6i3T1/YQJ/zzdQMBTieMUQvhZtq2ZF+EyzLw8lYj5cqrqHN4cgLQikf592chilFu4eNRk/ItBNGA5KZXkrC5x8aGOrmgfEd1oPzn4IQcFbbUJImd/XwOiZZaJlwHzZTDxbTczkXWSKufuxpfpLAa+bcOhLGRXoo50f0/U2dN3BRhfsP6oAjs43XKWTzOR0v1RRHST9dSe/VtmWpVKDKiNTEN9QhBzqM6Orv4TiLKmE4DwLNLcm0zGNpYiwqw2i9jx2/0YYFI7Q2UvmPZtBFng23uX726kw+sB7JUbVyCC6ezuUyeFa/KYqN0ZT7Z8wQhP3mF1egMo2lldyuIT6elbwYybiyWHWiAIXYQYA5em4oPKYRtk558/SV0/y39FdE1yeECZLIO0NevPoBQd6f6eDlgc3Stkktp45OcliGhg/J35JwXd+UcWRw9kk+4AsN1MX3wfMljhh1xL+Xuf67pCEHOHeFnCW+3i2k4l6mffJxONOSe7+1ONzPZtsG8TUc7Gq96cbp06iPg3US1aGp3fugX3AhSZJgie8ql09PR9hRjKdJH7indWF7v4lIMcaoEgOnr6ziXGFZa59KQ5GVpaTPim9jRShs1PsTpy+3PW+MU2k0lLRkYGnixiCdCdRspycipClopWEYKoKLwzjZcxvHWWxF1dR29r0jxlS/7U9NJJuCg1PHi519A6O6O7IUxDYHxwquwUEaj/Z/Nt+J78joU8gMnLFuGlxJiO8tI45It2Vhez+HR09S8nYmVamLkXoGBivnj3XfG9WCymnOm+vqqbmHr6brKYEJXMRd3FyQ/WFlSg/vl1MungkZGBx30BoilUjI4K9aXZYPje9Oja1Z7CmFY0mpmp7glC56hVZhI/dws0u2/fYZ2Ouh67Jtvi53vB9fMHpolC8vUHr9ntTzu4HW36wFRzIlGRLX4d8Zq7DzMRVNmdagi3A5QlsKmvOGd10MUn0hM9gpmUWWk6WizLoUWQg5K61gii2AuWeS1Y/hYMgN5mcDrExj8nEmvVxbiivqA68urESRD4jcFbjkok7n7Lq67u9+kNH9FuYMpeFiSYl4zEY2qeNMxX9wSR+4Ozh2n6WyD18f8+61r8CT4Wq9kTtHjziG7m2e8xMPAMT4YWe5kM7NFGnDPnZDF1YuOJi7q/QC2Z9QTsnZPTt7nLeZrJtjDLz2D3TxR7jyTI4uMSFlk0OdQCQTgCG4CiTUyVWgmD6MiPDBqzWikmJj9dNkSZxiAI+2Lm0waqi//3ubg6bjFbF1f7zLDvfgPsqn1NMs3FNM8jvua286xLe442fWDqMBH5qJMfoMZM4bhEqDvxFeOs9rv4dC5eGYFpKmekNRQ0A+ILksOxfyvWCVvtE4Q9sCF7GipxTL4maoBCfnXkt0mN+VLt+a+bGJu/OqiT2m+fjVCuIQhCE6HgmSJCZ7vPKdSzrma3W/BGdCXCW62TT5fawsvM5lhsbb4CTbiq52nAERHl07btkgvKcWLRmbCA4KcxP4uPyz2Bsiyk1SYIXUQPgDQxfpSenPzGY4/doe4BVPXzo6MgAuoCIBQxMuNOM8RfLjW/VxBZ6qlsQxCEbuHwShHhshud0p2Hvd1uuwcK3zauRHirfZLZzUXqVutIYvAzbpPRvovUnWo6evouJMZl+TY8Tj7U0dX3cyKcON2nD7+FKuu8+4AMEZ04Mjyw1jHO2bes1c+BndVei48LOaQZfH4qHruy2JNDfizVJgi1de7sip4PyiYXtH8qbcajYOP6iXnmg36T8ZV70fPSUTH9dXRF+4no5/kIO0sbmXdv5/8HGNea6fA9XqbZYvqu1zoNQRAKfOfuNcCi7dReQXu786ZwwZj8ioS32kXWkFahKA27TNrTgaPNAhFJmuyynmau6QXTHsJawDSl3/0Gc1YXWny6ulZ+0iTzLgC7zsaYh0NoWebnNrjXYlADBAG1o24Zp9uIcbiHvC8R08Mm8brJcChZTGZfLzx0v1eCILLZard64yoGf1aXd2xajmzWWjxKhLvJNB5NJNaq2/xF3+koZvy1UqdhCMIeBukVf29z1L0E8BhAH8odIVyzomrMOQUjkioV3mqfQBo/hKvZyyGTy4IcxJTmDEP1l1lWe/QvfNFKk8UWgRyObovPuBn6EEz+BcDv1/6BEr6fHB48r9TFosLZXH1HyKm5vCVNVxOgUsRb05S7r0+5xROgWCVPGJUgCDWoKZJ4/fxcPrSwz4X4LVIZXJkHmu2E0TAEoQl3dbWZ28uq27oG429M+HxuwrjufjWpIEYm5uFwt6N4JcNbrZNbY8d3vcGscdhpT0xa050l0snav+ZG942p+OBpXoup8+TjbTLq7I4eCpDa5Wcv4+U+3zerdYsPDL5AhbJaIrB0a8cYEU4aGR683efCoi1WKwSRE44ii6P/yCZdpc+eW3CkmXfTxTxuEF+62y64P8hdEC/8KkUQ+X7be6L7GEzXqYA93+Q4LTQ/DRjfQsaMNdrNabteGoYg1MA0dnhtDL5jt8t0BgN/m06DoRpzsZ87Umx7xPnbwltLupzj9Udlj/V28xk4Qv4K+Ars41Vkqlv47aGThaK6LOPQ3X3wfNNC91hSEGe1Y/EB/pS9PA7sbpFNkcE6Ziy3ptVQUW4IG4ekhtb+0Usfbr/XGEFkxVS5lV58OfRJwDydgOMCvguh/mCeBuGM5HDsN8XiYq1XaYLIk+MB3f0fChGfCcbxRWR4fYWZP59KxAa9NkLlwKQabTQUQfi1mdt2u9kjecg0DDb4QQALsvygWQg1O+qCkT32XX2QnEbFTAY/znO39Bpur2/5MR1pTm++3rnQ5sfxebFO4wfy7azWEIQd7uxJYfydBXe3bvXGNZiyWVu/B9rCfFyxL9DVIkFYB5dNxLfR2NM0zGVEdBwYH/OZElw5uC/cfRdcUeppYo4Iwjps6upaudBE5jAi6uepk4X11rzbn6RK7/4TpPm8RjxNNBRB+LSZ22zYU7by8BhCsy95OW3omtQUBXe7tkipQEnriiEIzfifN8PpxaND61QK7+znll7DzfmqWfwdYcBeIbNuY9GLrtqPAAAZ10lEQVTcfXiTTD50ZCSW9Bq//klSf85qD4IYY+ZT87vCXPLB+2zml5LCXWvBSe2Fr44wmLAUBp/sQRglYTM9T+3pQAKkbAkytgJls4TBhnkgMx8PUHdhwmjMOxUNRRCa3bFjUbabJ/Inhd7e3pDtHQKHs0+TmqKgnX0uwlutE1wzfudiPvVgijWZnuf7EfYLhfYQ4lyeomm7vJ+LgPq7D/7vNLjEtftyVhcgiFnkkMe2vSt6kEFZn4f1YaA3DTaOLCadRL0RhH0RVY7e8LzX2smgC4hxsO01PVX8NYZxWCq+drTYxboKJ4iCoqpT1UubsA+YvghGn+ZENcHMn04lYiotfcN8DUUQ2R2y7RlOu1lH53/I522y2+btdnSbWcPrRDAn4a32megYA+G81PCgit3Pfpr0Gp4JxuwEYE/uZiMQL1ym5HC8+1CevykCvjESH7ykUGsuBKElh1w7Spf/SsDFtnafNMPpw6wnND+jqHeCsI6xvWfVfgZn1Atttudh/Z3m3PCqNYKwyrn/4pV7tJjmDQxkMx/PfP43OH7mSS2UaTiCcF6Am52q2kYgs04J9uge62UzTeRTwZfW5iq81T6J7GOwJmLzSq/hNiHtmFrTkdjb9AovniYq69OrZf1L8P4j1RDEGBN/LjUcU48laePdDzjg2B2M1pZ7aMo2bfmCmxYaiSCmyN75UpvfeVCPBKFkVifYti0Us5GEZxaGsk71OWis4QjC456C7bbvbD+DM9ncDLlo0k0XvEw3V+Gt9jnijPCZGWOxvgINptN/CBpC9nzD2sU8VK7p7umsLnZ32tG9sp1gKn/ErKyuRDhlZHhw6iEoH1+1CKK395gFW9Kt6ma8MjOqN753yZ2SS34JTeNP8kzNUgiqYnVkb1OZMrek3/4iAasAYyuA/44J3/aTDt9LlVo/mM/sAV5t18rvDUcQ2R1NV99lTLgwD3LeVOTmf5h5f3b2e8gEvAqTDxoZiT2he3mtUErxuQxvtU2mWSRoHYOf9BpuE9MRNZT7Q3BkkiWcnxoe/G7BP379PYZNKlV08D8MblML3exY9sLmjRIWH+rsiX4JnE1RYb1YthnERyWHY4/5kb9aBKELD/bjL/I1ptnP6WarMHB0Kj6o3ioJ/JWgo1l9+c3zFVhAdYroXbVjazr9CED7Ta81PuZ/MX1Vq05DEoSbM7m9K9plEN0PYJupGex8N0Lnh5jXsmnA5sAu6Ayd6/BW++Rx+GFyD5nYfAWBdnh2TPPmNxtx+Dlia+4+BLsJbR2vOvUZLRn1rrfVBl5QP6UsPi6mBRDw0Pg8jvrJXVQtgtAulmWKDtI8NVsTBKHmiiNrc4Asw4UWZpfHujw3SNVa7IvptyEJwrloTJlZWifNFZa3lrUpCRw2fOBq0+Dvz3rP1uOPypb2wpfTthjludWxm8PULpGw5duzXp4L+EdiD49VC2J6YnJVqDV8E0BHTsninV7DJZOo51sThfDRpW0v5KwuhSCUHJ2LV34cpqk2GtY04OrFGF+pOKpFEEr2iNP3M2YyLxtNxB4qZQ5qbuf7zsWl67dUHVnb1Ji/VBbX1anE4PS768WMXZPeBtxgr8o1JEGobJUd3X03EHBqTvFvG4yjTMKZ6tJoocVM8+jNbwh8A4NuzJsVvCJlrBOyVGddcRP3+O1nPxnK98AIfQ2mqcJbF+baDLQoa0Jo/wDi48GkMq5mH4L38zKZ7i0Ct8ytfseuvxPh/o52ORafjq6+M4hwjc3U5CsVR1UJIhLtZCN7it5uBl8empiHY/ycflx0onmj2XuzUEi/5dBRvn3dpqQsN+Kdc8DXBVG/87oWyjUqQcBhbwddwTCX5u2FbikjNAuhyuT4jOXNZy8ziv0inqfTtgITYZYM2T8GxpWzFrQinGn2CDAmfJMY3yxksrOOTftsaMCTjHa3qX2O1D2nUzkWn2zSt63fuJmZ+20yPUVm+vCRkXUvuOm1mgRRQO5fIMOnJ5Oxvwadj5GevhXMUE9tTt8T8dpEefVRDh1Z+nAJU+ZRDodWFpM2RZ8SvrTQXi9MqvF74xKE02m2ObeQufof8grQPE9o0U3hnVG1wlvtk8dKkMpRzcATKnolV853xk9ru5q0G8pBt8Rvm9q7D4Uzt/r+m9CcTFzfrC7X4tPRu3JPpM0HHAn+mO+CiVVuqReqSRAKUFcTGfhpIuOru+1i3uMnVYYifArR2bnMqNZLhH9Chpckk7FnfSvQVrBcOso363IjXv38CoCvIsNr/KTKyF6Y+x/0g0idHq3RbEVfnCwWo7mo17AEoX1lbQbRgguk/nnCqcpe+ZSqFd7qIAhNVMlMGXfzS6FJp4namJjJqOrdpsb+HchRXkg2F9+G1lldzsVHt3v2esKz2gShcIz09H2GGT/U3IJWP29k4BYiXtcWmnxmaOjO13PY06LeVTu0mrw32DwJjJX53GUW3fgys3ktbuXU0fTGbyqLgArp1eVYeh2Etcy4fTIcemLD0Br1ql72TowKD56YaNvdNPhEQvZBrN1s8pclvYgXJtX4vWEJQoFpz25qWSAL5lDS+CHyVT0dzlUMb501fzQ5lKZ/9yK5AhNR9xaDL+LUy+N9qS3AH4UuOkpFFzluVpdz8VEmG5eEfq7pJiqcrM+q50JhptTRHT2XQN9xIYkA0E8XLdure+XUkXUguawAN/lMxOcHgwyBrhsfm39OI75t3dAE4XYScPM/5GeD/u1jZbHAs0hzbzIZ26ibOdUOb3WcIrr7riLgbPu/lxJpoTflqLd9eXkqEbvb7S9K94ZDqXZqe18u+n7SnAwttT4jWe7FR3eTOCebNhVHjRCEEpHau6JLDSK1YNpe0POzNs4qU9bU1+XWkVVS9/Qggcf8Fhj/OrFlwVWNSA7ZCRIYkjqqoH83wPvN6dzp41xiXGEbbsHIn2qHtzoIoit6NBGpI7X1UldJkRa6t7x9RGqV/Aqcn2nnckPbcbO6EotP7sKgiuiy2uLVn9h1E2PzV1sXkBoiiCysyhw7kaZ/YuArGvOJF/SvM/Bjnmy5fHT01je9Cvv9vRI6svatTn4t815bToRLAPoHv3LlyqnXJ29LG/SNx9evfTFg3boq3tAEoQl3Vcrx5aDV7Ua9HsGx3ir2sWhWfKLoFnP11m5bCy8r9i0DrW/Ho029f4DvQwYr/DgGgwDV2d13FpANP7V8s6NLKrT4aEI9syI4snzWGkHkgco+GrTJ2JvYPBJMnUD2HQh1S91Kem8B/CfAeJgM8+7xt7cfrsTuuUI60k0l6uhd+T6a5EOYuBfAJ2gqFNzqpxgD+CViI6He5p4X5l8X+/cTZC7XQtlGJ4hawFhkEAQEAUGgLhEQgqhLtYnQgoAgIAhUHgEhiMpjLD0IAoKAIFCXCAhB1KXaRGhBQBAQBCqPgBBE5TGWHgQBQUAQqEsEhCDqUm0itCAgCAgClUdACKLyGEsPgoAgIAjUJQJCEHWpNhFaEBAEBIHKIyAEUXmMpQdBQBAQBOoSASGIulSbCC0ICAKCQOUREIKoPMbSgyAgCAgCdYmAEERdqk2EFgQEAUGg8ggIQVQeY+lBEBAEBIG6REAIoi7VJkILAoKAIFB5BIQgKo+x9CAICAKCQF0iIARRl2oToQUBQUAQqDwCQhCVx1h6EAQEAUGgLhEQgqhLtYnQgoAgIAhUHgEhiMpjLD0IAoKAIFCXCAhB1KXaRGhBQBAQBCqPgBBE5TGWHgQBQUAQqEsEhCDqUm0itCAgCAgClUdACKLyGEsPgoAgIAjUJQJCEHWpNhFaEBAEBIHKIyAEUXmMpQdBQBAQBOoSASGIulSbCD1XCHT09H2FGN+Z7o/ws+Tw4Clz1b/0IwhUEwEhiGqiL31XDYH9F6/cI8zmeZThn42MxJ5wE0QIomoqko5rAAEhiBpQgogwdwi0t5+wndE68QUwfQlA2gQfNBqPbRCCmDsdSE/1g4AQRP3oSiQtEYFI5Nid2Gj5NYB9ck29JgRRIqhSvaEREIJoaPXK4KwI9PQsX5jm8DCADwhByNwQBLwREILwxkhKNAgCxRBEgwxdhiEIFIWAEERRsEmlekRACKIetSYyVxMBIYhqoi99zykCQhBzCrd01gAICEE0gBJlCP4QEILwh5OUEgTyCAhByFwIigB19K58H9KZ5QboUJ6KCNoFQCjXUAbAnwl4ipkHMpPpOx977I6/BO0kX/6AA47dIdQSPoaI+hnYD8DOud/GAPwRoAdNMq8bHY79HgDb+2nvji4yQA8B2N6PDAwcnYoP/jJf1useRCQS7WSD7gewXa7O2ybzoaOJWNxPf/ky3d0r3p9GaIiAPXL/NsHAEan44MOF2untjb5nPG0cBpinAvQJCz5pAC8Q07Bp8PV77MKpWCymdCOfIOAbASEI31A1fUGKLO7rYZP/D0CfDIBGmhg/yqRbLh4dvfVNv/XUfQVqmfwmAZ8BsJVHvQzAwxwOnZoaWvtHa9lKE8SipdH5rVtw77tEFZnedTEuH0kMXuR3rKpcR1f/CUR8y0wdHpmYh8M3PBx7Q9dOZ2d0K4SxGkz/AmBb7774aTJw1sj6mArzdRCpd30p0YwICEE0o9YDjnnRos+2tG79+uVgnGs5KQRqhYH/Z5jpZSMj617wqpjblf8CwK5eZW2/b2bgBOuuu9IEofrv7O47C8A1lsV9NISWw+Lx217zI39v7ynzxtNvxwA6cppkgG+MxAcv0dXv6F25J6UzawFq99O+pUwGhCsn3llw4YYN100GrCvFmxABIYgmVHrAIVNnT/RLYPqWgxwIfwHzfwPGk9k2idvA3A7QBwGE7f0Q4/rWls1nDg0NKfOH9uvsiR4AJmXi2clWQC1uzwNIgGmcwHsx0KHZPW+GYRyaXL/2P1T9qcWUv6BOIcy8NRGOtpiDJgC+BzBene7LoGvzdbP1feRiikT692WDH7HI7Ms8lO+zY3H/h8nM1l+Y+zfXC3yRyPL3sRG+13LZL99MBowXmZAi0JsM3o6AbgC7O4Gm6ybG5q8Wkgj4l9CExYUgmlDpQYas34HTcyabZ7x3Vzyqs2tn01m0pC8AWKWzsBLFZjJpycjIwBSh2L723uW7G+nwfQD2te56mXGnGeIvP7Y+pghi+sva3zP0dTAUAcz0w3wXTKxKJmPKTzH9FeOk9kMQuhMAA1en4oPn+MG6s6v/dBBfO1OW70MGK+zyK3NW2xaKMXCQH3wUZbf3rPqowekbbGbBDIO/korHrvAjn5RpXgSEIJpX935GTp09/VeDebWl8J+Q4SXJZOxZjwaooyd6MjFdbz15MNOJqcTArbq6ke6+Sxj4unXxA/PFu++K7xVwsOpOOOpksCwZjynnccUJQnUQ6elfyZz1IeSc9fy7iXB4yYahNTOnE82gs76EEG4H6DDLz6uT8cEf2Is7yArI+MAHOUf2DwE+qQg9+pknUqZBERCCaFDFlmNYi3pX7diaTj8CkIoeyn/axUvXX3f38dtnMHmf1VbOhItSw4OX28u3t6/a2WjJqIid6dMDAzdOji043csUkl0AJ+kuEA6caZduawtvOtlqzqrUCUL1ecDi6F4hk5SZaK+cDL7MTHbzFAMvhpHpjcdvf86KkQ4fwL+pSEWDGa0t99CUWS77UQE/Rznmj7RR/wgIQdS/Dis2gs7O6P+iEH2OARU++TEQMiHOHGJfvAoJ0NnTdxMYJ0+XcXlPoaMrejQRrbOcNjwT6Vn7te/gGXiGJ0OLR0fXbMqXqyRBqPW2o7vvBgJOzffnx8zkdHA7iU215zyhwO9JbhqmYk85FZtg0nDNIyAEUfMqqm8BfRNEd99VBJw9M1q9Hd4NDXWPIAPjV1BmF6LfEpDgNN+eTMb+OkcEgc7u6KEA3QWgNdfnk+ZkaKmVpKzya04+GSI6cWR4YK1tnDryuTEVHzwtSMiq5q5FUXc26ntGivRBEBCCCIKWlA2MgB+C2O/gk7Z5zzvjd1idr26mqMACWCpU+AQBjUlO6wvJi+QIACA8izT3JpOxjdZxupjqzksND14ZBI+urqO3NWmeihD71HQ9pjOSiYEfB2lHyjYPAkIQzaPruRopdXWtXMiGeSAzTsgtRjMX3TQmJs3CDWZakUoMKJNT2b5KE4QSNNLVdxkTLpwWmuia5PCAOhk5Lqc5I6T0ZTU7f9X8AwB53imxgsfgFgIOsd4voSIu9ZVNIdJQzSMgBFHzKqpNAZV55J20sWuIeB+AP8RMnyBgfwDv092BmFkwnW8658Jb11scvBlmOiqVGFAhr2X75oQgnKk3tGYmzQ1sV6d20Mt+gQCTN7YDwdVshYUgmk3jxY+X2nuiHyamL2UvmzF2KKopzYKkWQADOaj9yjEXBKHxK2jNTM4cTux6+1oIwq+GpVy5ERCCKDeiDdje/otX7hHKmFcS4ZiAqTY2T0VT4u8KniCcCfXqliDUOB2RSRozk/3OR6GQUyGIBvyjqpMhCUHUiaKqJWbn4pUfh8m/APj9HjJsevci1h8IFDeZhiZbjMfVJTE/TmqNjb1uTUwKI0fqDZvzWeN0fpNMPnRkJJbUYTxXJ6xqzTHpt3YREIKoXd1UXbKcnfxOgHptwmxk4BbAuH8yTE9uGFqj0nlrM4T6IYhGclIrnHp7e8Pj6Z1uAxDN4ZZh5uWpROxu9d/tXdFug7IpwreZ+r1wSG8kEv0YDHqIgR1n2iu/j6bqE04EqDkEhCBqTiW1I5DmcpZKmPcTpPk8e54gN6n9EIQu/LKYMNeO7r6fEnAwwC8w6AnA+HkqvnY0L9tc+CDyfWku7k3fW+hw3PlAwdvp6hY1tWTWE7B3vn0mnJ8aHvxu7cwWkaQRERCCaEStlmFMml2wOiKkzInJI/0+AKSNu3eJmol09f2ECf88Izrf0xbeJjo0dNMWP8PR3hWw5X2aS4JwpN7ImZkm2sLjtvQlL7NBS1LrB9SDR9pPlwzQ69ShayhHNA8QsD0B/20CvzdMvnFkJPaEH4ylTPMhIATRfDr3NWLtghsgQ6nqRJMGW7msf5YcHjzFLoTmtFIw86ujvjO81LHwziVBKOe8LdFhNoyVmbfMNi8h1hbefHyhFOhqrM6UHHjTYOPIRGLtsC+FZtN19J3IjJssgQa+8kX5bV/KNR4CQhCNp9OyjEiXqE+95zCSGFQvvHl+6pGhtq3fuJmZ+2cVdiEITbI7dWLxmayvNzwxudOPbCcQR8rsOSYIR+oNdSlNYWG5SOeWWsOBb1fXio+YFHpw1iU34KHxeRx1e3XO2ohLKvV/z0xMHuz3ROipdCnQcAgIQTScSsszIG36C+AZhI1D7M962nuceg6TrgBnnwvNv1WdLcaMW1KJwU/rnNradN/EFySHY/9WIOcQdXRF+4joRsvTpBPMtMp+E1tDEJ65iPy8B+GGuJ1k1at6xGyC6OPZOi6pNVzao86ePoWpevvC8tHNbWHzc0NDsb+5yaH0QQZdPZtAkWHG6lRi0PIORXnmjrTSOAgIQTSOLss+Es2Crdbp35qEk0eHY0/ZO8w9TXoEmL8F0D9oBWI82tbCy3QLWiRy7E6m0fIgAf/bUjfDwI/Nicmv2Xe66lLaljR9kYCL/TwY5HYqYpPPdnO6l0IQagx6DHOjK5CGQ4edyykgqxNGaHUqvvYxO5G290T3ISaVZXY6zfcUN/k/fZR9YkmDdYOAEETdqGruBe3sjH4QoewbB++19T7recvsKYHNT4CypDCTd0llVlWnDuADlgynvzF4y1GJxN1v6UZU4MnRNAP/RaDfTmVs5Y+C8TFbf6rJp8hMH657+1rv7M1K8RZAr0wtnOYlI/HYzXnZSiYIp28k33TBRH6upwH3J1mV8H8BY312LOr5VxOfAmFPZ1v0HAzzmOT62H/O/aySHusJASGIetJWFWTt6OpfTsTKsbltwO7fYqKL1BvSBvMDlhj+581wevHo0LqXXBdB/5fzZjWh3oCAQccUigjq6Oo7gwjXuN0ItyevK5Ug9I8ZKbH9vTinJdEi8ZlqS5EDHWd9dzugXqV4EyEgBNFEyi52qJ090U+Bca2r2Wh2w2qnf3PGML7++Pq1L2rMOrMujbnJpN61DoUnL2XCmQWT/001kCbGjzLplotHR299s9A4XZ7ftFaZFVVUKkGohjURSCW/5qbwoZbJbxKyfh7rqc1t+L4xKnaeSL3GQ0AIovF0WpERKf9Cy1avH0qAetc4AmAXyy58EwG/Y+aBzGT6TpuvwPGuNRENjL8z/ySvp0TVQNRTmeHWlhMY3A+iv7ckCRwD89MAfpkOha5XZOR34NFoNLTxz3QMM58H0Ednn45mJ80rB0F0LO7/MJmsTHULczIWTK3hdxyqnLrt3jZOR4HxaQb2setFpT8B46cavQTpRso2KQJCEE2qeBm2ICAICAJeCAhBeCEkvwsCgoAg0KQICEE0qeJl2IKAICAIeCEgBOGFkPwuCAgCgkCTIiAE0aSKl2ELAoKAIOCFgBCEF0LyuyAgCAgCTYqAEESTKl6GLQgIAoKAFwJCEF4Iye+CgCAgCDQpAkIQTap4GbYgIAgIAl4ICEF4ISS/CwKCgCDQpAgIQTSp4mXYgoAgIAh4ISAE4YWQ/C4ICAKCQJMiIATRpIqXYQsCgoAg4IWAEIQXQvK7ICAICAJNioAQRJMqXoYtCAgCgoAXAkIQXgjJ74KAICAINCkCQhBNqngZtiAgCAgCXggIQXghJL8LAoKAINCkCAhBNKniZdiCgCAgCHghIAThhZD8LggIAoJAkyIgBNGkipdhCwKCgCDghYAQhBdC8rsgIAgIAk2KgBBEkypehi0ICAKCgBcCQhBeCMnvgoAgIAg0KQL/H52JHfOwXYv9AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-38"><g><rect x="321" y="751" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 801px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Check for ACPI Notify() events</div></div></div></foreignObject><image x="322" y="787" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfX2cHEWd/vPtmc0moBIOJYABRT09UQENZF9mN67yjkBCwuxugEA4FERy4BvyEVFRwZdDUfOLcqIoAYTsDOFNBAkvrtl522A8DhH5CQoCAglwvAn7NtPfS/XObHp6qrurZ3p3ZyfVfyU71dVVT708Vd9Xgn40AhoBjYBGQCMgQYA0KhoBjYBGQCOgEZAhoAlCzwuNgEZAI6ARkCKgCUJPDI2ARkAjoBHQBKHngEZAI6AR0AioI6BvEOpY6ZIaAY2ARmCHQkATxA413LqzGgGNgEZAHQFNEOpY6ZIaAY2ARmCHQkATxA413LqzGgGNgEZAHYEZRxDxeDzyxDM4MMK0lAlHAJgPYN5ElwkvgPE4gX5HjOTw8C6bN2++YkwdEqC1o/tYAm6deIdxb3MTL+7vT/4zSD07UtmurvgbRsboFhA+Wuo3A8flUolf1REO1NrRu5BgngtY7SzNmwKAZwH+O4zIquzGdf9dR22e0qYsXBTfN2LSPQD2tX34FTL5yEwmmZ3sxkzF+hZ9aO3s/gIxvlNDf14F6DkC/5WJ7yqwccumVN8jAFi1zrbO7qvAOLWe95kZQxALFx6/mzGr6TwCzgQwV3UQADwH5q/DxJXZbHJI5T1NECoolZepd4IQ7RvO02oCTgEQkfWQgOdh8mGZTPL+4Ag0xhttHd1nA1hT0RuiNdmBvnOCbIBBEJnK9R0SQci69yAZfG5mY/K3KjhpgggyQ1zKtrXF5yCKVWD6MoA3Vl8lP0ymcUom03efXx2aIPwQqvy9zgmC2jq7LwPj0149Y+AvPBZZNDh4/ZbgCMz8N6y1FsF6gI6S9OaxgsGHbNqYfCzMnk7H+p5EghBVF0D4/ujrcy/wk1xogqhxJh3cFd+jKU9XM3CYR1XiVvAsQAywuBHtAWCOS3lxLTw9m+q7wYvhNUEEH7h6JojWRT3vJZOF2GRPR8/sc6cZwIOv7dS87IEN17wWHIGZ/0Z7e7yNDfoNgDcVezMKYJatZ6uyqcSPwurpdK3vSSaIIknwF7MDye967TOaIGqYSe3tS9/GRvR2APs5qikwcB+BLhudbW7YfHfyZcfvFIv17slk/jsDn5OIo4aIsCIzkFjv1jxNEMEHrq4JwqlTEqc85ktg4tuqYsfgiMy8N9pj3Zcw4YIJciCsBkOIlYokwZnR2ThasuYCd3Y617cLQbxogg8bTCU3q3Smq2vl7NHRV+axET1mGwl8FcBbHO+9YrBxTDq9bsCtPk0QKkhLyiw4NL5L8zAlK28O9DcyzE8oy/ja4v+CCP1k28nwBMdnnja4cHg6vf5PsiZqggg+cHVNEBUKSR6MoOmoVOq6F4P3tDHfaGlZPs9oKtwN4P2ihww8GWHjJNMwfw7Gu4q9DkVZPd3rOwyCsM+CtvF9pg/AoWWzg/kWmFjudgjRBFHFWurq6oqO5uetZvBZ5a9zfz6K5ff1J58NUu2CBWc0zZrz8hqAz7C/R0R9I6/vskImJ9QEEQTh8bIziSCYcW0unRDKamWLk+CIzKw32jriRwJ0y8RtgXGvmW86nprGfkDAaRO9qVFZXQ/rO2yCEPW53Ig8byWaIKpYIy2x+GEGWRPVrkfYbEbzSwb7b3yqiipRtJC4jYBW2/tDJvPiwXTyLmedmiCCozyTCAKEtdmBxMrgvWzMN8SmPZKftxbgE0s9JMY3M+nEl1pj8eOI6Eab5VdNyup6WN+TQRCiTqkFGNMns+k+IcWoeDRBBFxPLlYULzKMo3KpdYMBqysr3hrrWUrE15cr3ei65uiWU/v7+/P2wpoggiOtCSI4ZvXyRkfHsnfkEeknYO9im0a36fk+lksl7m5ri78VUeq3iZlEkaqU1fWyvieLIKTGEB6HEU0QAVeAxIoCqPFKW2qCkHvOGsbtALXbmiU9DfkQBLV0xD9kEH0cJo4EWYtK2NXnAX6UQbdQgX+WzSYfDdj9ieLCWejJZyIHEZkrwZYF19sARIsFthDwAAhXz4rwzbU674nbVaQpuoSIehjY3+Y8Jix8Ht+G1waTzCsGB5J/9hLJVEsQYlHB5JsJeLf9dsfMp+XSyUQ1YqCWjvgCAyRuhruqjgETzs8NJP7Tq/zBi3r3jnKhF0w9GG9vyex6AisY5pXz5+GhZDIpnO98H6fDVsm5sKXlpDdR09gFRfHO7gCGwPwwE641R/NrN2266QXfyhULVJ58+YHRaPSQzf3XPw+A2jp7VoN51fbqqlNW18v6niyC6Oxcumeeo0Ip/U4b9Mnm6NYTnYdQ8bsmCMUJWirW2tH9Q4JlNVF6XjOZjxxMJ1MBq5IWb+/o+RQTfwZMv2bwbbOjnJNtsG4E8Xre2MsAr3WIqmTfKhDRDZw3P5XNJv83QNupfVH8I2ziRwD9m8J7r4L4G8hjTVBrnOIG9A0CPuFhFlxqQgHgAY5GTsv1r3tc1q5qCMJFbjtUCzmItoVNEC2d8f0MpsuKSkipk50DE2WHKRlBkGE8BZNvAPgd8jnAtzVHd4739181rDBHPIvIxs15KKvQTwATN4wg36+X9T2lBOERhUETRIDZ09Fx4q4FjN0BUIvtpDIt1iYygiCDfsrMPw50KgX+xzDzizOZG//uB4W4flPEuJjBIgyEyiZkq5IHzWjhBFUdTfEkJ3xB9vJrl+P3rQycJEQPzveCEoQrORB/KjeQXFvNzaHUphAJgto6elYALDyLgzpp5gn42sjQ3O94OUw5CYKAc3k8WoDTvHuCrIno5MxA37qAYyc/NEl8H0ripdILC7qWv3lWPn8PQOKGOf4EvNnX0/qeUoLQIqYwpqmwAogfCIPuYuDNE3OQ8bNMOiFOuFP6SAjiZZAl4tl5+wKxYj5tFPFYAH4LCIvA2M3ZUAZ+MTY090yvTcLN0mp8IeIFYvyBQcKDNbLt5nMQAe+ziZxKn3yIzPzRfmTU1hlfCCYRH0mILeyP8AAV30iDaYTA+/K4Ut+5MW6FYRzpjFcUhCCEg1Q0j+sB6rI1YIhDIAdr8Xf1vp3yLLymi4YOfCCAhdu/xY8AhgiHMPEw4+Zcuu8O25+orTP+eTB9S0LYeQb+RKDfj3vO8gfAEN9wOmgWCHTFyNAu57qNf0VMoPFYYqV5JERXaYD+VrxNxAD81RyLHBqWt3flqV5uAuzwkRAwBVJW19P6niyCkB1MSsp+2QambxABtvWKTVm862EBEKDqwEWlbdm+lTxMBs52+mIIvcFTT6NHnKwctww/23GXjYgfNhnn7LMX7nXKs4tWWV8vxqWy3zbubI7yCW56iZaupfONfFRsgpate/EpiM3RjPB5zjAK1qZfoK8WQ1SUdCCAxL5blSBcbODFRnheNpUQN7TQTU8lm7CvFVPRekec0u2bfh6g75pj0W8NDv7yFfvE8ggZUWDwF3KppBBRVTzuQeN4kKORXrtIT2A8VMA+gwPJhwJPaskLTt8HUUTcejKpxEXO4lL9QQBldT2t78kiiPaO7ou2hXMQTnO2tcVLc+nk9sCfth81QQSYxZKFUpWcM8AnXYu6EQQBd43M5riXJ2l7Z/cyZlxj31i8FKAu4pC7UeAeH/0FtXbGTyUmsamWNrECEVZmBhLXyjonm8BgvnD+XrjUQ6kqI7BRgBdnU0kRlsF6VAhiOshBuhn4mLm2tx+/u2k0bSDgABuOLxLxisxA8tdec6xtUe8HJfoDV8dMGUGImFCIGke46XvCmOMWLpUmrK52+3IjD3VldT2t78kgiFhs2ftMimxwiG09b1maIALMZMkECuT6HuBTvkVlBKG6aGWmfB6OWRILESiJioqdoNaObnGTuHB7p+SLVnZaVBF/uW3+QLmJsB9BWL/njR8DvMI2AJN6cyh9J+gNor2zp5eZBcmWbmeexOucULFYb6dJ5m22mEbiaP6D7EDis84bkowg3E7xvhM3QAGZ7wPAd6CAZW4GD7JN3nlQcGtCPa3vsAmipXP5/sSFpMMSz3XMSxhpgggwYZ1gTWfoZRlBBFm0rZ3dFxDjkonuu1gySGLvB9qIRP2SOqQ3ryCnRentw7FpOiOfehGEi44lz+Dzc6nk9ydDrGTvQxCCEDF2RvKvJQESMXaKj/fGKcGrkvgJjyLPXdls8h+ebQNCtdxz3bDlAQw9/Rva23vez4YV9HC7/kpRWV1P67tWghDi5GefLew+xtGDQPgkMQ536gRVDpSaIGogCKGMi1K+c2DgxmcCVBNKUQlBBFq0kvd/Z/Dwsen0ra+WbQ7OK77LJuLVqfGT4O7XAYiXyskUYxJlpOdp0flN4UxVgCHEKwUQ/Z6ANOd5fUkM5kYQY0NzfyMJdTJl5CDdDDxETBKnMTDTybl03y+DTK6WWLzDICsyasmwQU7clYlrHjOj+UWqFmlB2mQvK/H6fYYNOiS3sU/4u0gfOXmqKasrNsNpXN8uBFEtlLL3fAOCipc0QQSAXGIjXU8ipkCLVkIQUrJzWoYIHcc/d2o+Pmi46YobC8rt5Pc/fMXOb3h95CZ78EMmfCk3kPhmgCHyLCojCCKcAMYHeFwEtl2ZzvhKNp24eLJvDqUGB7lBtMZ6jiJiYeVVaq/vxikDRmYaKtNFVbRtCrIXujiNujp0lR9qek4issRv9sfXs7qe1vckE4RSSgFNEAF3nnqSUapu8G5dVHlfdvIHKs0v1WCsMOMs8x+ReXgy07Jcuk/E2AnlkTpcAZsALHCaiTKQM0fHjgnTG9irE0EIoi3WcyaI/2t7fWVexcpYScdXcnORzHuljVq5IZKCEosk4dip5FshT0nqr6yup/U9SQSRJ6L1Y0Tn3bdx3ZMq46NvECooFcu0xipOJgVmOtZhmx6gxuqL1hqLSY0gKnM4V9/iijfLbixF89aNtjzDoWPrQhDuXWL+YjadFDmBQzdrdX40CEGEeaKv2ABUCGIKAglKTvPKPg1y5TYqrNoqxqCO1ndIBLEF4OcBSjHw67HZvDFongxNEAF2PInMtn78IAJe++uOICpjE4UuvvMhCBGXaATATrYpEUoQRpUpVjcEIZlHQdqm0le/MjJrNr931H6XB74svVtP69uFIEJfE364aYLwQ8j2u0w5SPXjSX1vcxMvVg2MpwmibOCtHL0g4zqYlumnPbyHp2NfgOnjWTTIJjypNwigQnwUpG1h4CEx4Q2jWlGH5y2knta3Jgj1IRc5nOvicTEvDDUWk4gHk0d+vTChJcKtZBr3ptPrhJVUmZhjukRMKhFFqxksyeKcKhFTWQJ3iRxaeHGvyqUTNpl/NT30fifIJjypBDHNIiYX8VCYgLsqq+tpfWuCUB/yuiEIl0ELZF7q121ZuADZLWUqCEJmWTRZN6ZpU1Izvj86PPf8Uhwil8RNnulf/cZU5fcgBBGekrrSn0I615xmrpOog5D7MYjYT+TM664AK4vQK+I2uD0EC7yV1ZIDwrSsb00QCsNbLFJXBCGbwGJRzWraepYsnrp6N62SMq9lqfXGVBCEaFB7rPunTPj4RD8C6jpU+x+LHfdGk2YL080Pl96pxsy1taP75wThFMR/Z9D9gHF1KZGTnyd16buyjGJe6V9V++hVLghBtHZ0H0qA8PeYZU0a4HmYfFgmk7w/SFuErJ+aChvt3rVM+GxuICEcAyeeIG0L8n1ZWYnvg69y2e2bLsl/POurl/WtCUJ9JtUVQbhcgV8x2DgmnV4nEnFU/bjESnlQFhlzqgii8rSKrWTSIZlM34MBOloiPuEo9xRA9zObd81uem69nVQryMjhK+H3PVm4ZrsDmSpBuOQkHmXmU3LppEj8HvoTZBOedke5SbpBuMRSqkmEK5m/glKlWRrFoNbL+tYEob7E6oogRLNlcWxqtZt3CRIHN5n/VBGE7ETFwMW5VOIrquafbW3xdyFCIvzBPhO3A2B1LpUQeSUmHolyMhAZScRzZQ5kqgRhLc6u3rcjb95ZdrqexAB1QQhCKisPeLOzSHBs98vLboeqoTYmiSBkVkRBwsfIthSZTwQDT0ZR6Eql1v9N9k49rG9NEDOYIFxOmKJHKhFOK3rulmuBrWQ+Y4dnMjdtdb40VQRhXdMNXA+ixbY2bAXxsdmBpHAy83xcsBpiGEfnUuv67S+7LGbfXBWlk1/FhucI7BaEIESd7Z3dJzPjKrsTHYEunxXdck4I4sQy3IIQxHjbJiNYH63JDvSJbInlBhFTo4OQiVfDMOuU1Ssg9FBWd0VH8/NWM/gsx+SesvWtCcJvZ9n+e93dIKwF2r70bWxEb6/MqsXSXAxu3RU5hJtM80p7iIliWZHWstctTvtUEYRoi0wmbwX6MmiJV1wccZdvjcW7iegXZTkLJLkaSvhIw30TfzE7kPyux41F9p1RZlpu98QOShAu5Og5LurTurxkYIKQh/t+lZlW+nmfu4T7liZZkm5Uk3CDkPs+BA5AKIVf6t/go6ye7vWtCUJ9JdUlQYjme2U+25YS8T4CXTY629zg9F4UN4Y5c17er2Dlnka3LMsXfDbFqSQIjxvTi2A+Dyauc4ZfFvmkI9Gxi5kgTmE2KxJ43j5c8hwUGPiJOTr2FWfoC7HpD+fpc8Vw4qEkDLJPzdaO3haCKRIY7Vr6u9fNTn1a10YQ1gYyHkixImEQMS4v5JsulCUMoohxGoO/7cjCJzLOuZJwUPKqBgOp70NIybhc4jr5Kr+nc31rglCfRXVLEOM3iXgbG+SdO9lK0Vgy0+NmAHt45HTOg/iC+XvgMo8EOZhKghD9dNORlG47INwPpj+O/988ACAR38hODOIHkbLzc7mB5OVew++xMFXTaErzVQS9QRTbKMlnYclgAulh/KZ7lZtwGClHxUXvitGhXVYFSDnqm+3Or7/23+Uxv9QisKp+R2Id5amsnrjRTtP61gShOrKWFV99Px5ioqANf9pkXjmYTt7tpwCeaoIokcSsYboCsG49QZ9AiXdcxCC+3/QSfVVJEHC51YRiuVbqUJUEIV6n9o6esyS3Al+sAAjC/d7Y0Nwve+Ujr6FtKm2A1PfBw9JIqVJHIZnlF6BmkTcd61sThPoo1z1BiK6IBB3/2IIPmyZdss1G/WCPG4Ks5y8R8L3CWNNqp1jADabpIIhSP596hgRBXArgrSrDKCy8mPj0oHmKPcRUss/m3UQrpcLVEoS1WKXiHO4fnY0lQQOgyRpf6ybc0hnfz2CLvFtV5p4Ykwgb/5FOr/u93xjW2rbA9cM/sJ5fnc7fPTy0fcOAT8f61gShPsIzgiDs3bEUbtH8MSD6GEAHAPyWMpmvJXLC4wT6HTGSw8O7bPY6wUk3lI7uYwnYnmg8oJmjSiwmryESepTo7BdbIjBOZzJjAM236VJeBfgJBt1mgn6+KdX3iN+NyOtbwrs5OqvpJAb3gOg9YOxWLD8E5oe3ScB+lY9EfuYXwrgWghD9bZrz0k8IOM3W1gKDv5BLJS9Tn87ykiFtwrSwo+dfo8ynMuEIAO+2zTuRCOovxLgzT7Q2yJiE1DZpx130A1Lfn1oxbuuIHwnQLSUHw/H6/MOAO787FetbE4T6aM84glDvmi6pEdAIaAQ0ArUgoAmiFvT0uxoBjYBGoIER0ATRwIOru6YR0AhoBGpBQBNELejpdzUCGgGNQAMjoAmigQdXd00joBHQCNSCgCaIWtDT72oENAIagQZGQBNEAw+u7ppGQCOgEagFAU0QtaCn39UIaAQ0Ag2MgCaIBh5c3TWNgEZAI1ALApogakFPv6sR0AhoBBoYAU0QDTy4umsaAY2ARqAWBDRB1IKeflcjoBHQCDQwApogGnhwddc0AhoBjUAtCGiCqAU9/a5GQCOgEWhgBDRBNPDg6q5pBDQCGoFaENAEUQt6+l2NgEZAI9DACGiCaODB1V3TCGgENAK1IKAJohb09LsaAY2ARqCBEdAE0cCDq7umEdAIaARqQUATRC3o6Xc1AhoBjUADI6AJooEHV3dNI6AR0AjUgsCMI4jOzqV75jk6AOCdto4/RGb+6Ezmxr/XAkZrR/exBNxqq+OvUcp3Dgzc+Ewt9U7Hu11d8TeMjNEtIHy09H0GjsulEr/ya08sFn+PadCnYeJIEPYGECm+swXAUwBdlE313eZXz2T+3tYZXwgm0ZfdGfgfwxw7PJO5aWvpm1b/83QDgCMADBFhRWYgsX4y26Tr1gg0GgKNQhAg0OWzolvO6e/vz1c7SDOBIOLxeOTJp2kZCB/IpRJfdutrNQSxYMEZTc1zXjqfga8CiLrUXWCmY3PpvjuqxbnW9xYcGt+leZiSDBwGoECElZmBxLXOemOx3k6TTEFkbwIQyiGi1rbr9+sHgba2+BwYOB2E0WwqeUX9tKx+WtIwBCFOiczcm0sn7TeAQEjXOUFQa0fvQsD8AQGtIKzNDiRWhkkQrbHuTxJhje3GUFE9Ac/D5MMymeT9gcANrzC1xeLng+hbVpWMe5ubeHF/f/Kfzk90dXVFR8d2v5wJHx8vil+MDc09c/PmK8bCa46uaaYhIA5Z/3jWOIbZ/DZA/8aE83MDif+caf2YivY2EkEIvB40o/mjBvtvfKoa8OqZIFo7e/6DmL8/sXmHTBALupa/eVY+fw9A+zuwEzeypwHKAyxuFf80xyIfHRy8Xoibpvxp7ehtIZji9rKrOBSYzIsH08m73BoSiy17n0mRDQD2AjDKzKfk0sm+KW+4/mDdINDW2X05GJ8sNUgThPvQNBpBbBOP4wfZgcRnxw+MwZ76JojuLxDjOxM9CpkgWjriCwyQ2GjFxjt+OGdcy/mmswcHf/lKMCQnp7RDrwAi6ht5fZcVPjcCau3o/joBFxZbVdMhYnJ6pmudSgTaOruvAuNUTRD+qDceQSicKt1gaSSC8B/68hLOvjPwZBSFrlRq/d+C1jVZ5ds7u09mxlXFW9QrZPKRmUwy6/e9trb4uxChewDsYxEfcHEulfhKNYcIv2/p3+sfAU0Q6mPUiAQhNoAKqxYVSDRBlFlw/c7g4WPT6VtfVcFussu0tx+/u2k0bSDggPFv0XXN0S2nqholtHd0X1RUvouXnza4cHg6vf5Pk91uXX/9IaAJQn1MGoUghBnqnmXdrkLUpAnCRhAeyl/16RVeSYcCfRTgxdlU8jeqX2hv73k/GyxuEbuP80v1okjVb+py9YmAJgj1cWkIgiDgXAbOBLCfreuvGGwck06vEz4TSo8miPokiMrbA2dGZ+PozXcnX1YaWADComkkP28twCcW39G3CFXwGqycJgj1AW0IghAOYAZhFjOuATCn1H0Gcubo2DGbNt30ggokYRDEwoXH7xZpii4B4d9B9B4wdit+W1gD/Z2YBgowr8wP7zroZ27pnMiefXCc+P38IFwcDr1hIqwtEH8tYlry/H0ncK7CTNCyQY9gPUBHTdTDdHIu3fdLZyPaO3t6mVn4OZQc9lZlU4kfqYypvUxrLH4cEd1YqoeAr2VSiYuC1hO0vOW78kzkICJzJdjy3Xibzc9kCwEPgHD1rAjfLDPXtX+vvT3exgaJm5Pw7RDPaybzkYPpZCpIuzo6lr0jj0g/wXKEFM8oAx/LpRJ3e9UzbihgHAWYpwH0IQDznPPbNPhne+/BuWQyWVBpU8VcdMzlgxf17h0x+UwCLwXw9uIaF3U/C2AjE37k9T3ZWvBsl48BSEvL8nmRpkIPg3ska/xpAIMAJZuj5h1+46mCz3SWaRiCGBua+5umOS/9hIDT7IAGUUjWQhDjk9j8GgErPJzM7E37x7bFfd78PTnhtpDqkSCyA4nTWju6ryzDuQpxlMRq6rGCwYds2ph8zA6ShEi2kkmHZDJ9DwZdOG1t8bciSv1gvKv47oPmWOTQSTTZpfZF8Y+wiR8Je3uF9r4K4m8gjzXZbHJIVl44Cc4axu0AtZd+J8Y3M+nElxTqnyjSGus5icgi3eLjfSuzxiGKVWASzplv9P8WP0wGzs5sTP7WzxjAjSDEN4bzdAkBn/JfU/wwmcYpmUzffc62hUUQB3fF92gqGD9g5mX+7bFa4Tue/jhOb4mGIQgRQqK1q/ftyJt3EvBuG6wvMoyjcql1g35QV0sQ7YviH2WTxO1F2NoHeQoMXD07yufIThp1ShAr2zriRwJ0C4BZxc6+aIIPG0wlN6t2vrWz3GxXOLHlUonTnZtJBZFUQUa2NgmTVzu5FYjo5MxA3zrVdquWExsqRYyLGXyul+OhvD4eNKOFE9z8edo6us8GLIfG0uY+GEHTUanUdS+qtK+ra+XskfxrSYCOmSAZj9uUWFeUL6wDqEWlfluZAgjfH3197gVet2UZQcAsnI2IcVXAb77KTCtz6T5xS5x4wiCIoiWcmPN2MbYiHNyfj2L5ff1JceOZUU9DEYRAvjUW7yGiq22bl9hz+kdnY4mfzLoagmjv7F7mFG0VZ4BYHOI0nAbTCIH3ZeEBLTl9MeH2sWY+0dm+tlj8dJCxcLw+PhBA8d/W/x8BDHE6K/7MjzQ37bSmv/+qYfEHPxFTV9eSuSP55vMB/EuxAiH2EHGLSs/TDNxJoO1ex2xuyqaTV4orttFUEKKI96tsMM4VITkFuyqdHdZHqOa0bP9+5ck5mDWUyuoWIUtmzXl5DcBnVJQnvECMPzBIzI0Igw8i4H2SE6lraJAKhbuieKjUltZFPe8l01LYlww7XAm+vX3p29iI3i7ZGAtgPMmEHIFeYfCbCOgAML8SI7pidGiXVW4k4SQIYYVIwEj5fMerBOQs3IibAcTAlpizJHYsffbPecM44r6N654s/cEixLHXV4HoX8f/Zn4EKP3b+sMmgLZHBijO89L7jtAu9u69CvAfAcO6zXphMFO9+BuOIMTilImawPzFbDopHM1cHeiCEkTbot4PwjSFPHjcMmb8EbGB1kWQP88Z5E/Iop96hrq3OaP9EMBb7DPNL5aU88QddqiNir77nNRbO7p/SMA52/ugrjiWyNGloh4JydUcB6riRkJ4FHnuymaTQuQXxkNtnfHPg62kqPbMAAAQ9klEQVRQILbNix82GefssxfudYoUhd7KmNUknPmEoYV9w7uzOconOG+XshsAA6tzqYS4rfg+bbGeM0H8X7axuwMFLHOKtVw2xgIzbjYjfJ5THChsw1o6l3/A4PyVAB1kXxMM/kIulbxM1jgffdhzzPSZseFdEk6Caelcvr/BBaGzmjioFOv31FEFVVKPH9ToJ7axEcEfzx15fe5VEtKjtkW9B8I0xSHV3i5lvx3fAZzCAg1HEAI7p2NUEU9fUVMQgigG+roeRItt4yXkxudlU4kfexFRS9fS+UY+coPj+uwZNqLeCEKyySsvANVbgfOkG4bzniSkiJJyVnVNyjzSAdyNAvdks8n/9aiHWjvjpxKTmDslQwvXQISVint+YDQaPWRz//XPe7VVZhwAQLqhVsw5oADmC+fvhUu9FNBFRfaPARb6uNLzBAp8SDabfNTZPg+CeAgFXix7p1SH9Ibjc7gJQhAyMnbDy94vmbg7CImrzrfJLteQBCFAc3jdWjgScNfIbI67iZqCEIRkgwzkoSslMeZbYGK5TEFZbwQhVZYqWAV1dJy4awFjd9jI0ZVYWmM9S4nYFqKbA8naZYtn3Nx19+sAxEu/M+FLuYHEN0NYbNTW2bMazKtsdQWJIusMCyLOGVKT3oWL4vs6rMmUiM4pnnIjXZkYEfAWFdnxK96KbrMCSxYfN6sxF4LwjbNVqrdSJ4PHzGh+kasOJ0CoDYEDNRU22vSayvq2WnVFIczHmqtoWIKwQlfv9PI1zNxjQ6kA4i9mB5LflZ3wgxBEpYgFgS1iJBPbdfLVG0EITCtPmP5iptaO7kMJ+PV2HZH7O+2x7kuYcMHERs64NpdOnOJnFeO3KiarXsmm7XoDcGtjgI3fqXAXBxRfMVPlnJPrYCSmxa43ALe+qN5ypAQRwBihJRbvMMgy/d252BbPTTzIDULSNmWz4vb2noPZ4JsYeA3AHwzgfi7wajcLNb95Ox2/NyxBCDAdkTxL+G6FYRyZ3bjuv52AqxKE5BRclfJUYosOMH0ym+4T8s6ypx4JQqIs9V08EmJ1kxdTa6z7aiKcPHECrcKcU7aoKhXVCCWsiNPPAlXoN2Q3HDfFvMSazPOQItPpuFhxychHamXmtWlJ5rd0fsgIIogxguQ7oRFELHbcG02aLRJTfXh7X2euVVJQkmlogrBOufIcB1LlnypBtLfHD4RBdzHw5iLgStd75+BI5ZsuTjr1SBBSc0mPTVwitnD1aZBaYRE+mxtIiJDnNT2tsZ6jiFgs+pJCOJTMgc6biRBp/nOn5uMf2HCNOEEqP62d3RcQ4xLbhnRbc3TneMlCrfR3mT7FKwSJqoJedgDiKrCXbq6SA5CMIJhpmdNc1Q1AyfuhEYT4plNnVmzHSyCsM0xaOzy8y2Y/p1flwa+zgg1PEC7K5ILMqkKVICrFJHiGDTokt7Hvz0HH17mpuCXAqUeCGCfgCocrVz1B5YmXpRufqFe6aSimTPUbA+dGGUYSJNnJv8IU2a9hE79XmDS7Yloxf4jWZAf6hHVZhbVe5RySl5XebIE7AQqU0pfBTTRuOj3hHyS7GUjGOpC12mQThIvRi300h0C4Hyat5Sa6M9e/TuAUON2A8vSYwoINTxACSxdz1ApRUwCCCC13dcWidZG91itBSGTmbmKmCgUuu4TWmGqCAKCseHRbm4GdsYItctcbjqrJsMz3xC20hoslVrAWu5WW3JCDbvDOqoO+H0QHUfpW0WBChJpX8CLHVhBuZDau2nvPwu9VQ46EA3C4tewQBGGddDvinyWQSCu43c7cYTVUFwQBSDeDeiUISRA8qbJUEuZCGlqjNL1lp1gRc0t4zNe6BII4iql+a7oIQvJdqdNhJZG4W4RpgpCPektnfD9iEp74E5ZZCvPjJQK+VxhrWl0vibcU2jxRZIchCGc2siICwulnVS6dsJyG6oMg5Au3XgnCuqFVhN6otMl3KnDdQmuUZqY0w11IBBH0xKmyoKaLIMbxd4TekIiZKnxPPEySNUF4jji1dMbfS0yfJ+AExRuFqPBBNqi7GjG0yvybrDI7DEF4iJomwj7XBUHMMBGTwFWifC5T2ktuGb75HCbzBiExMpgUEdNU5TqW5Loo8w4P4ntijWdl+tma8fHawGol7KDvVyNikrXfitK7xXg3FXgJE3pcQqbYX51x6W53KIIQvnKyMAil3MZNc146klCWVU0u7qmw5a9eSe00+3SzfKnnG0Tx9lUeesN2ipXoKXx9RqZSSR2GDmL/w1fs/IbXR4TNuwjnbT3E+FkmnfjEZJ3uSvVKFOQFZl6aSydvtTb8Cj8BlobWKNUnIdBASuOg/Q26wTvrD/p+WAThbIdl1WcOfwhsrgBDhCa3h+ApFa8qVH1QTMMqv6MRBNziyxBhpcl4WYkgKoOdVbWAgmwq9U4Qkk1oIvSD02FKxcZd4sEqdBuh6CAmw4pJLMj2WPdPmfDxicUZwNmr1gXtxNguwgvge2I1Q4p9FTk/VPsUdIOvV4Kwt8uKu/Y0VoLo/9lz1FRr+qyKZdjldjiCGD/t9rYQzDu2Bc3b1QboEwSsZkB4WZce6Q1CO8pVTkMPK5l7HCG2lWI2ufhBnJ8bSAhDg5oeVVFi0I9UBsFDNbkrStZeIhTIUyLKKLN51+ym59Z75d+uuKUVnfRGm6Mjs/L5ewDav9gfX5Nsefwh71uHDKsi0Yjw+7sS8P9N4M+Gyb/IZJLbI6fKTZoDibSCEkyQG4S1V3DhaCJqY2C/IvGKnBgqj3A4/BYBImqy556iUtl0lNkhCcISNcXi54OsiJv2Z7Q8TLjcomicZJzRTFERZthvQCXB0GZUqA1n/5z9ETcFNvnH5Ul6/MNxFOut8KRWCSXhh7n4vWIjD+mkL/EsDxSfy2pbW/xdiFgZ+/Yp9UWx304zYksPxMzDjjAUyebo1hO9yMZqR0XOCQRO4SuJhyZ1KA26wTvHOOj7gQjCkbfEzU/Jbd5N1mFEZZ6HUWZHJQhL1DRrGDcD1OUBZBD780Cbgdz5xv2UVu8iJoFhxQbJuBfEa7ZlUxMJeUrJhZRlsJUexVDa3PwWxmTFYnJxytwK4mOzA8lNfu0SuoTR/LzVDD7LVnaIYRydS63r93vfaU0mCFq8Y4tnpZwgSRamxi/Ypb194xGLo+KWbg95/YfC6NjhzhTAQTf4qSQISZynQEQpOQSGEtbFby6E9fsOSxDjtwCpqMmOrStBuGwGeQafn0slRTgIV09Kt3DfzNxbUiz6nc4B783SL2FQRf0d3eXOf1WcqiWhpJ8hwr3MOKn4vUAil8qQGGohrb0WhzSXQhVhJNy+0RKLH2aQlW3Pnhv9LzBoiY+JI7XG4t1E9Av7u/CI8OtsgzP0hpV4h9kE0QetssFiQ1FbZ/dlYHy6/Dt0TXPU/JRXrmUrm55Bq8v0MUCZSbm9zukmCC+dmCxqMQClCL2ykN8q+rewNvcw6tmhCUIsmdaObpGo5UIXMD1j9LglDBIJVQoR4zP2rFaifsss7mljMRELxZUjPal3KGWZfBuGcXx247qMrO3TQRAW6VaE3rC3zj20hqwPTlPXMEJiSBSwvgEGgyw0l1uAqOJFMJ8HE9c5o3m2tJz0pkh07GImiJtD1PY95dtH6R2XuEHjP3uE4ZD10eUWIM4+v2dEVuVS68StqOwg5OZM5nX7mGqCkIiHPfNOyFIHAO4JoATS47nIjZ8C/A4bthMm9UHm1HSW3dEJArK49bYB8Q3iFkbKUZWruyT+k2hmAcCzAI2AzEcLI/kTS9f36SIIiUnrBJxeoTXkJFeZOzloHc56iyGYNwCYO/5b7beSipP8ofFdmocpaTd5tZUZj9vD9Mfxv5kHALRAknJ0iIk/lxtIXh5kg5DlKSm+7+t7IvtOW2d8IZiE93qlySbhBTA2AvSclQbUxIdBeHtlPfQ3GOaS7MZksc/lJaaaICSHLdGgPICnAcoTY+OspjlnlYIjukgLiqRblkIWVmphwofA2M2Bg2eqgSBjPJVld3iCEGDHYr2dJpm3AXiTA3xfghDli6KQtc40ogoDKTb4xOhsPssvX3Z7+/G7m0bTBgIOkNXrTPwyXQQhC71RbK9naA03rJwyXEWFrSv0FZtDwFO1wphaRcZFE3TFNnGESDEb9FHKTCir1N2ju3oiHL8p8w2O07BinwQ50Amy8PqlCqaaIFzSANj6UxnN4OCu+B7RPK730Vm6YZIXyZJGhuZ+Z6ZFfdUEUTwHyOWt7lZMzplw8KLevZuYL2XmZZLToGTi8MNE+HxmICkSwitFfizeIkQOXpkDTpmoZLoIYpww48cR0Y32uFd+oTXcVlalZVD1G51E/1DVqVpxZ7REisUc5JcCeKvKewzkmPj0wYHkQyrlZWUkFkgim+LXMqnERdXWKcRg1DT2DQKE49+EfsWjPnESv7yQb7rQLwbRVBOEJVquTO860RW3LHviJsER+moxF7sKBqLOB8ngczMbk79VXefVjtFkvKcJooiqywld6QZhHxhBFBGzECdgGYjeY7tqilPh49vEkxsYdH21UR5F/VHTPB+Mj4Gwd9kmbIuOOp0EIQu94ZWnwGtiSzb1qnUGleE7lE1ua1p7IrthdPaLLREYpzOZMYDm2zbZVwF+gkG3maCfb0r1PVLrRiIJRqjke6LSScvRdISOBeMU4RcAYA/bHNyyLQ/1X8H4eWEsf7PTWsmt/mkgCKspQl9iMAn940cBzLO1z3OOFXNuHwWw8FVpKeoTS7ojazwB424Y5pXz5+EhHc1VZWbpMjsMAhKC8A2t4QWOxEvYN7WmrD6JAl3Z5HaHGTzdUY2ADYEZd4PQo1f/CMjs8TPpxJeqbbnE1DAw4UhMcKvSiVTbB/2eRmAmIqAJYiaOWh23WaKkDkW84Ugdq+zwVYLKad3DwMW5VOIrtYpz6ngodNM0AjUjoAmiZgh1BXYEJEplz8ihquhV6ogCxQZyhqF4AgU+JJtNPqr6fV1OI7AjIqAJYkcc9Unqs1DGzprz8hqAzyh+IvBJ31sX0X0yM0TaR5EVUDnkgTOsib49TNIE0NU2HAKaIBpuSKemQ8LLNjLKo5nMTc+JL7a1xd/JBn2bCEtKVi0i1INhjh2eydy0NYxWOS2zSnk8fGzLnd7yMy5pSxjY6To0AtUgoAmiGtT0O8I5cCkRr/eAoiBybGQGEteGCZfDqdH3FuG4PUxKm8Lsn65LI1BPCGiCqKfRmEFtcQn9YeuBd2ypGrpafiPwDmZXFnBO8cZRQ9P0qxqBxkJAE0RjjeeU9cYnsb0IH3KGX/iQahvriJ81ykzLc+k+4bld9tij9TLwF0SNI3L96x6v9rv6PY3AjoaAJogdbcRD6m9X15K5I/nm74J4SdFbPA/wZiK69K178M2T7T1qDyIn03U4AqwNEWFFZiDhJRILCRldjUagcRDQBNE4Y6l7ohHQCGgEQkVAE0SocOrKNAIaAY1A4yCgCaJxxlL3RCOgEdAIhIqAJohQ4dSVaQQ0AhqBxkFAE0TjjKXuiUZAI6ARCBUBTRChwqkr0whoBDQCjYOAJojGGUvdE42ARkAjECoCmiBChVNXphHQCGgEGgcBTRCNM5a6JxoBjYBGIFQENEGECqeuTCOgEdAINA4CmiAaZyx1TzQCGgGNQKgIaIIIFU5dmUZAI6ARaBwENEE0zljqnmgENAIagVAR0AQRKpy6Mo2ARkAj0DgIaIJonLHUPdEIaAQ0AqEioAkiVDh1ZRoBjYBGoHEQ0ATROGOpe6IR0AhoBEJFQBNEqHDqyjQCGgGNQOMg8H91paTKirrUQgAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-49"><g><path d="M 371 1001 L 371 1044.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 1049.88 L 367.5 1042.88 L 371 1044.63 L 374.5 1042.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-50"><g><path d="M 321 951 L 51 951 L 51 407.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 402.12 L 54.5 409.12 L 51 407.37 L 47.5 409.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-51"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 962px; margin-left: 312px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="303" y="956" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-39"><g><path d="M 371 901 L 421 951 L 371 1001 L 321 951 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 951px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Any GPE <br />pending</div></div></div></foreignObject><image x="322" y="937" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tnXucW2WZx3/PSabTggouQrkr6Mf7ZbXKJHOpo1ws10IhMy0goLggwsJ6AVTcXV0BRfyosEUQUblJOwnlJgpyqWMnk2Tqpy4iy/aDF7yAQAFbWnDameQ82zeTTM+cvOfknORkkpw+559+mry35/uceX95b89LkEcICAEhIASEgIYACRUhIASEgBAQAjoCIhDyXggBISAEhICWgAiEvBhCQAgIASEgAiHvgBAQAkJACHgnICMI76wkpRAQAkJgpyIgArFTuVuMFQJCQAh4JyAC4Z2VpBQCQkAI7FQERCB2KneLsUJACAgB7wREILyzapmU/f390W35+TcBfLK1UQR8JZNOfrllGhrihixYcFbH3Lmb32OicCwIhwH0egB7A4iUzC4AeBbgP4PxkEm458B98EgqlVKf+35ifQMXEeMK3xl3ZNgC0PME/gMTP1hg4+616aHfAWAvZcZ6B44l4B4vaWtNw4SLcyPJb9SaX/IFT0AEInimDS+xu3vwnWzwwwD2slX2mDkZOWxsbMVzDW/ETlrBIQsTBxkmfYGApQBe7RPDJga+Z05MXrl27Z0v+skbgEDoqnuMDL4gsyb1i2pCIQLhx1vhSSsC0Ya+jPcOnAtguabpBSI6NTMytLINzWrpJnd1nfKaSHTyUiacAyBaZ2O3MNElB+xtftfriKJBAqHMKIDw7Yl/7P7Fdeuun3SySwSiTo+3aXYRiDZz3ILDErvN2YqfAdRdarr6ozZ2TG3wfSjgxGw2Nd5mprVsc7u7Bz/ABlYCfLBLI0tTSrRtKg132qac7FlVx/x95PkzXnzVQIEoiQR/ITuS+qbTSEIEomVfz4Y2TASioXiDL7yrJ9FrEN0PYFdVOhENMZtvA+jdpdo2k8mLMplUNvjad74Su/sSRzPTLQBeW2k9rwcb3zFAPxkdXfmMvXNNJBKRp5823mAa+BiBzwKwZ2UZdP3E+G7nuf16V3k0ArHRBB8+lk6t8+KV/v4z5k5MbJ7PRvSY7e38T01bNhtsHDM6unJEV55GIHzV76WNkqb1CIhAtJ5P3FpE8b7Bq8F8XjkRET5qMj5MwMemP5PF6kC8Gu9LHAKmn1Su9fB6k3H+gftitdcpong8MY8idD5Pdc7zLA0cZ+aludGU6wJwvQJhBRKPJ/4JERoC1OK65WG+GyaW6UY0IhCBvFJtV4gIRBu5rKtr2Xyjo/AQgHeWml38FUeM/YjoDssOGlmsrtOvhxxywh7GnI57CYjZirodBT47m039vZYquvsGTmSGGpFMiwQDOXNi8hi3hesgBUK1u7t7yevZiP4MwNstdjiOCkQgavF2++cRgWgjH3b3DS5l5lst6w1jEXQcWShM7oIoDYPxppI5slhdp191c/4EPLhtLifWPZR6qY7iKd6X+ByYvmbdElttc0HQAqHar93swPTJ7OjQ9+z2iUDU4fE2zioC0SbOmzr7sNdtABLlJjNwdS6dvEAtRcR6B35gnWYCZLG6VteqrawRk9Q24oMsrJ9A1PhIbnjln2ott5yvu/uEvUyj4wEC3mMpK9UZ3XDy8PBwXld+IwQitnDwbWQWt0vvM10n4absSPIMEYh6vRyO/CIQbeJHzdmHCQaOzqWTasoJ8d7EIoDuBjCnZFJNi9X2joiB43LppJqHhzoc1jFv0yICnwPQ+wDML9W1BcATDL7NnMjf5DZVou18azggpeb0EcEqgI6c7sSZTs2NDv24XpdqflkXmHFebjR5Xb1ll/Nr6vgLmfmFmcwdf54tgejrW7JPnqNqUfqN1YRKRhBBeb69yhGBaBN/aX5Bzlhn0KxPqCOy5RGGZyudBCLel/ggGNcB9NYqheXB9G2Y5n86bN+sHO0wVnd28OLh4dTLXhva1ZtYYIAetOwuerJg8KFr16Se9FqGLp1mG7FKFviaTunXe3L7Kew0M1bNje6SHh6+catT2xsxgtAKhIMvRCDqeavaN68IRBv4Ttdp6Tr/WO/AVQScbzHJd8dWIRDMi4nogO3baq+07b5xJceEn0128sm6+XrNaMf3lkmNkP0ol06eWe1EcDV327cRq/TEuDwzmrykWt5Gfj9rAiFTTI10Y9uVLQLRBi6L9Q4cRsBPLdNHr5jMi8ZGU2lr8zWdm+/Fak1HtALA8RZxUAe81K/0UTBtI/BBPLXTpyLshFNsHd1ox08cKY1gTgC8OJtOqfMhdT0a+7Ws66qkhsyNEAjNKMxRDGUEUYPTQpBFBKL1nVhx9gHgzMRcHGX/da6fHvG3WO1yYlfNw99lRvhC+zROcT0gigvB9O+2MBSOI5jK0Y7eJp17ursTcTaKhwVfU/re90jJwe0U6xm4mQinTn9P+D3y3J/Npp5u5qvSCIHo7h34culcRtm0AjMv0Z3JEIFopvebV7cIRPPYe6o5Hk/sZ9vCCreol5o/el+L1Q4CUQDzl/bfF1e6HAzTbt906nA0nbzndtptDGoKqKfnuFebNFctyH9w2jk1rI94cqzPREELRE/Pie8wKfIAgH0tTXFcxxGB8OmwkCQXgWhxR1aefcAGMunQTGboMV3TddMGIFqeHRlSaxNVQztrBcLlhO2MKa7Kg3xKzC7JjSQvt7dVN9rxMs3U23vyawuYvA+grlKZnoWlmqu1i7YOc/LVygr6+yAFoqtv2buJCykC3jyjnYTvZEeSn9G9Jw2OxeR7DSpovlKenoAIRAu/GSp+zrb8KymAVPyc0uM+ZaTb/ulnF46mI3KcdrCj053VgEsHW1lX9WmmyvWY6nm8uri398SD84gME6AW5aeeEAiEign17LOFvSY5+n4QPkmMI+wRaRlwPechAuH1LQpXOhGIFvan9t4Hh5OuVjPiPYNng9i6Z9/zYrVmd9Bfoyj0p9Or/ugFVXfPwGVM+KKXDlZjX9UFYc1OrfOy6eQ1XtpWLY1+9KU/OOZUVn9/4lXbJuluED5crb6K732Jqe/S3TKMq5hemZHkKqdEIhCB8m6bwkQgWthV9rl2Bjx11tpfwh5PVmtGEL80eOuxo6P3qMNwVZ+K/C6dnm6E5LaeoNn95DrdVrWxtgR+ThaHSCDUTXNnZtNDt7tNQYpA+H2bwpFeBKJF/ajfkUS3dUafO90pHEPZFIcrST3N1ds7eBV/6OVdOk949IFbXvGCyo9AqPJiPYOnEBXjS5UeLsaXSqdv22ivr/L8BN/bGd014XbAzEuby2mCWINooxFEnohWTRJd+Ks1K/9ajZMsUlcjFM7vRSBa1K+asw+ep4mUSZrFbXV5RNXFar8dvB2f3/ya0BtO00wV2305oNAaZRsW9C973Zx8/mHL3RrwK5AtLBDPAfwCQGkGfjo5l9f4CTooAtGiHUWDmyUC0WDANRavOftQY0kzs1UNR+G3g69XIHSjHd0pcc1236q2+CXmsCng0Ylo9NB1wyte8FueW3qtkPhbg5jVnT8iEEF6v33KEoFoQV/pAtoF1Myqo5DZFghll2bqqKJTjvUkjrPeecFAIKE17FwrFtmBhnTEIhABvdFSTEMJiEA0FG9thWunh2orSpPLfZtsMwRCs/g8I1KtZpQRWGiNihFQZVgTlSSwnVLl+kQgAnuhpaAGEhCBaCDcWop2OMcwDuDZYtg43w+re5CtcZJcF6ubIRDKpIrtq5b1Es2IKqjQGhU0dXGinEKb+HaFJYMIRD30JO9sERCBmC3SHuvRnn3wsLjsVLz21jCX8polEJWBBnl6msk+ogoqtIYTM024kglmPi03mlL3OAfyiEAEglEKaTABEYgGA/ZbvKZzqnp4zK0O7d5+wDnmTt/ARcS4YrpMnyeJaxUYXYTW0oVID9tuy/O0Xdcvd2t6hzhFj5OZP8rpQh+/9YlA+CUm6ZtBQASiGdQd6tTEGVJnl7SRW7022+FMBJy2iNbawZfbU0/+ijMYjMvZ5O/ODFZYHw+P3Cjek7gYVLw32qKVgdxJXSxPBMKjJyRZUwmIQDQV/8zKNRfpuEZu9dp0XblOd1bX08Gr9tSTv2J6jbEaxMu332K30nIXRuALxjqOakTTuZVSDBxuF4nJKJ/2q+HUs17529OVwqOfVwqPvmN9SLa51opU8jWIgAhEg8DWUKzu7EMgoST0IxNop2rq6eDrFQjNAv0zRFjNjFNKPAPh4dU3h/QOvtkA/6Qi6inwPIDPo8ArHK5V1VYxNWrAIIi+AmC/ikQiEF5dI+lmiYAIxCyBrlaN/uxDcKEkNGsb2pPVzRSIosBUhN6wkguORzV/lL+PL1z6Xph8O8AHa/JsAuFGwyzcsHXrHk+sW3f9pD1Nf//xu2/Ld8QYdDoBi52ubWUgZ4JOX5seekLXtiDDfXu13ZpODsrVQq3984hAtIgPNR1j1UNtfpqu3R2lWaxutkC4HRIMOrSGV36x/qVvoHzhZoD6quR5DqBSzCqOli7jUf+6PVtA/FXksdxtNNKCAuEVn690DByXSyfVpU3ytAABEYgWcILD2YdAQ0lo72qYsn3GnH6zBcJpUR0uO69mw4XqToWnnqEBAFcBUGdL6n22gOk6Mx+9dGzsx5urFSYCUY2QfN8IAiIQjaDqs8x6b4HzWp3+hPbMk9XNFghliz2shvqsUaE1vLIrp1PrCFsLOIkYF29fPH+r3/wAr2fCFXMjuH14OPWy1/wiEF5JSbogCYhABEmzxrJ0B7MAXpxNp+6vsUhtNv0p4ZmL1a0gELrQG43gUS/bYjuj+WOY6DAA7yNgnxmn1gkvgvlZwMgR8XBhIvLg2NiK52qpVwSiFmqSp14CIhD1EpT8gRPQCETDQmsE3ngpUAiEiIAIRIicGRZT7Oc2Gh1aIyzcxA4hEDQBEYigiUp5dRHQLFI3PLRGXQ2WzEIgxAREIELs3HY0rXI7rnt48na0UdosBNqFgAhEu3hqJ2jnggVndcyZ99JygM8qmRvoWZCdAKGYKAQCJSACEShOKcwrga7+JftHJngik7lTha1APJ54Ixv0dSIcDyCiPmPgN4Y5eUQmc+cGr+VKOiEgBIIjIAIRHEspyQeBWM/gEiJe5ZKlQIQzMiPJW30UK0mFgBAIkIAIRIAwpSjvBGL6qz0tBdD1E+O7naeLb+S9FkkpBIRAPQREIOqhJ3lrJqA9Pb6jtOTEXD5r3UOpl2quQDIKASFQNwERiLoRSgG1EJiKctr5TRAfD8YeAPIAryOiK/fbm+9KpVKFWsqVPEJACARHQAQiOJZSkhAQAkIgVAREIELlTjFGCAgBIRAcARGI4FhKSUJACAiBUBEQgQiVO8UYISAEhEBwBEQggmMpJQkBISAEQkVABCJU7hRjhIAQEALBERCBCI6llCQEhIAQCBUBEYhQuVOMEQJCQAgER0AEIjiWUpIQEAJCIFQERCBC5U4xRggIASEQHAERiOBYSklCQAgIgVAREIEIlTvFmGYQiPUNXESMK6brJtyUHUmeoWuLn7TNsEXqFAJWAiIQ8j4IgToJ+On0/aSts1mSXQjUTUAEom6EUsDOTsBPp+8n7c7OVexvPgERiOb7QFrQ5gT8dPp+0rY5Fml+CAiIQITAiWJCcwn46fT9pG2uVVK7EABEIOQtEAJ1EpBOv06Akr1lCYhAtKxrpGHtQkAEol08Je30S0AEwi8xSS8EbAREIOSVCCsBEYiwelbsmjUCIhCzhloqmmUCIhCzDLwR1cV6B44l4J5y2Uy4ODeS/Ib6fzyemAcDJ4BwLkDvAvDqUrrnAKxmwjUH7M25VCpVqLVtiUQi8tdnIu8nMs8A43AArwcQLddDwKMg3DwnwncND6de9lJPf3/iVdsm6W4QPlxK/4co5ftGRu54Rv3/kENO2MOYEz2dQCcDePNMu/jXDLp2cnz3+9etu37SS332NB9YuPSAKBeWwsQyEL0VwDwAeQb+F+CbeXLODWNjP96s8vkRCK9pm22/em8oiqOZ8XGA3gdgfpER4UUwr7XztdvFwHG5dPIntbCXPK1DQASidXxRc0ucBKK7e/ADbJg3A8UOzuXh9SB8MjuS+qXPRlD3wsSH2MQ11esolrwFxF9FHsuz2dS4W11OHeQ//vG6F+bM3XQBCP9hEQWnop4H6OPZ9NBPAbAX27q6TnlNJDp5KRPOsYicLut02bG+gQuDPkndLPuVBMR6EgNE9N8A9vT63ohAeHm72i+NCET7+ayixTqBIDLSMM07Aezl0UTVYV+YTSe/66UzLf7CjBiXMvgCABGPdZSS8ZgZLZw0NnzHU075dB0kCoVjEIl8BcCAj/ryDL44l059u5pdsYWDbyOTkwDe6bH8cWb+mEH0Bga+Pp0ngFAbzbB/wYKzOubssulyMD7tw6daBjKC8PgGtXgyEYgWd5CX5lUIBPB9AB+kqamX8rOBgdUE2rx9iuRgAD2laRNrFeNE+GhmJLnKrd5iRzLvpeUAn1WRjvAiMdQUz5Oqk2Hw+wl4h+bX+ONk5o/KZO74s64uTQf5NMCPAnSkJf04CI+A6bdV6tpogg8fS6fWOdnV3b3k9WxEfwbg7bY0BYDXA0ZOfa6xZyMYGRCObrBANNR+NXKI9yU+B6avVYhDcVoJawB6HuA9QVgIxh4z/MBqFErvLX8mAuHlL7f104hAtL6PqrbQLhC2DM8DuGD/fThpXWdQHfDWPH2WgC/ZOu8NMIxF2TUr/8ehYoeOhNebjPMP3Ber7esZU+sFHf9FwNm2zufnnVE+SbcuoREIa3O2EHDZnChfY8+r1g46TPMHjOJayI6HaHl2ZOh83ShCCV7nLi/dwsyDM7IAD+YNPnvtmpQSu+nHxZ6pNI0ZQTTMflVwrCdxHBGttP1o0L47as3pqWdIjeKucpqGEoGo+mfbFglEINrCTe6NdBIIBp6AQcfn1gz9n1NnX5pv/tHMjoFu64w+d/rw8HDenq+rN7HAAD24vXN4reW7h1DgwWw29XeXllKsL3E6MakpLLXgq54CEc7IjCRvtedzEQglYCdk16zMONW14LDEbp1bKTVDJAi/R577s9nU0xU29SQON4jutjIgxg1s8vkuayVuv7jrjuY6m/aXBO9eAmJlNh7eHagpOZh8l22kWixCBCIEHUvxt448bU/AQSA2Mowjc+mVY1UMpHhP4mJQcWqh/Gwgkw7NZIYes+WleN/g1WA+z/K561SRPX+sd0CNJNSopfRwZmIujlr3UOola1qHDrLAjPNyo8nrqjkt3ptYBBQ7/TmltJvIpCMymaFfzaynP7otP/8mgNVuqHLnljMnJo9Zu/bOF93qUSOPjnmbvkfAx2aka9wIInD7Vbu7+waXMrMS6fJa0rjJvHhsNKV+CLg+PT1L+0wy7wXwGmtCEYhq5NrjexGI9vCTayu1AuEypWIvrLil0zR/DuBt052kZats+bNDFiYOipj0MICDSp85jgCcGqwpY4KBo3Pp5ENVBcJlFGCvr7f3xIPziAwTcICl46/YeqlJVyCiUzMjQ2q6perT3T34TjZYMdmxGaBRAtEA+/v7lUDudRuAxA5jnUeQGiC6Hw0ygqj65rRHAhGI9vCTX4HYTCYvymRSWa/mxXoHriJAzdGXHr63M7prYnj4xq3lT0rz1HdM/9L00WGVy9B1SMS4PDOavKSqQACpzuiGk3VTX5UCcfJrC5i8D6AuN4Gw28TAX6Mo9KfTq/7ohZ22g22UQDTAfp1AMvOS3Ghq+lxNNQ5dPYleg+h+ALu6sa5WjnzfegREIFrPJ75bVDmC4LEIOo5Mp2/b6LWwWM/gEiKe3r2k5qB5MrJwbGyFOlBXfLp7Bi5jwhfL/yfgwZd36Tzh0QduecVrPSpdrG/gi8S4zE2MdFNMTLgkN5K83Etd2vyaw1t2m4BKYaxWX6xv4NPE+NZ0ugYJRCPsj/UOHEaAOidSnIrzK5AqT1fXsvnUUVhjXYuQKaZqb017fC8C0R5+cm1lxTZXxq250eRp1fb9WwvVLD7P2Bqqn4rg3wHGL/wj5H9Wh6EtAlEhaA4C8ZncSFKdZ6j6eBQIivUM3EyEU6dFj3FDZjT5L1UrsCSI9QweScTq1PDUHH7jBCJo+xHvGTwbxNY1nV8avPXY0dF7tnhl8O4jPrrrq/6x7U7rpgARCK/0WjudCERr+8dT6zQH5Tz/0i5XoJlqeMVkXjQ2mkqrNFW2nXpqp0uiGWE0nOrz0+l4EQgHEZoOU+LVqApxbZRA+Ahf4cV+ZV/FaI6xurODF3sNiVJmFO8buBGM08v/9+Mrr5wl3ewTEIGYfeaB1+gWi8lrZX19S/bJc3QEwBt1f+QiEM4k21kg7B272+jH7V0SgfD6l9Ze6UQg2stf2taKQFRi8fILWkYQgAhECDqABpogAtFAuLNVdBACUYpDpLZr7lNqt20NoiK6KqxRY4O21UsH71anl/wiEDLFFPR7G7byRCBC4FG7QKhTwH4XWqciv/IDAHYvIXmGDTq0fApbtxBZSz1ecXvp4OsVCJU/iF/QFds822sNYuYOLEAWqb2+pDtBOhGIEDhZs8214gxDNTNjPYOnEBVP05YefnQiGj103fCKF8qfdPcMfJ8Jn9iRpLYFzWptUd/PlkAEsUhbwa6dBMK2A6uWba4L+pe9bk4+/zBA755+e3wsqHt5HyRNcwiIQDSHe6C1ak5SP1kw+FB7kDmXSinWO/ADa8gIBn6USyfPtG6V1WyJdArJ4WZf+eStOrn7FECPMJsPzu14fpX1ANysCYR9iyowY+TkwVEV7Bq2zdVHp+uVn2b32gTAi7PplDr45umRg3KeMLVlIhGItnTbzEZrBMJXuAh9CI3KcBO6sBIMXJpLJ9XlPZ4u5InHE29CpBiu40DLr82rc+mkuldi+vHawTm5z2v+eDyxH6I0DMabptujCTPiVI+GXePOQTRAIPr7z5i7Lf9KCqBjdtjoL9RGZXwtCdYXgm6laIIIRAg8qYvFxMBvDHPyiEzmzg1VTKR438C3wPg3S7rHzMnIYdZT1Oq70vWlK0C02JJ2A4iPzY6k1lZDqQ7bTeTnX81gdVtb+RlnGEfl0iuHmyEQU/cgVAQg/AsKfGg2m/p9NXa6zrGdRhDKPk2wvs0GG8eMjq5U255dn/jCpe+FaarRxoyLqeQcRDVy7fG9CER7+Mm1lc73QdAtnVHzUy6HnijeO/Cp7TF0rrSGunbbndSlCY3tJTS05SrLmaHFme9W9z7bw2p7HQHUO4JQ+Xt6TnyHSRG1QL/vjvJ4OB/Fsl8Np551qKN8NactVHoDT1I3YAShbNOF+95+a9/jKPBiN5Hs6l+yv5GP3G6NdzU9CvPR1hD8CYbWBBGIELjW7cIgBnJMfObYSOpxq6nq7mXqmPya/RIfld4t1LXDKEAVvRHMF8LEbfbO3uWeZ8fRx2wKhPNtaryeDJybWZNS4USmp9BcLluaQtxGi9Tld8LhwqC/MdO/HrCvebf1Eih1YdBf/oYPG2RcV7qdsOKvSEYQIehYZIopHE7UCMQkAMMS378AgroVbRRM2wDzPQAt0FwD6mm6SHshj2XKyHINKFzqGmfiz+ZGUtfqvDDLAgHXa1SBpxhIF69rJX4XGCqWVPnSo8rmt6FATIlkxVRjWfCqXTlqf98k3Hc4uhZZgwiDHzXbXIcJ9EsGVAjtqDcb6Y8w6CSXq0ZnFKNEYs5Wun77VIS6etLvMw7gwmw6qW6X0y5uz7ZAKAOKdeaN7wL8UR8GqetPr2VABfibumWvPQWiKJKd8166iqfuGi9fHlQNxUaAPw/QRU5hWqoVIN+3LgGZYmpd33huWYVAMFab+Y4TjI78QoB/6HRvcKmCPDGuLeQ7vjQ29uPNnitVN8zsuJtYrWHs5yWv05SXPW8zBEK1wct9yzvayuvJNE4rGKY54xrWNhUI//bjMTZooIMnN9nieBWY6djc6NB9Xt4JSdO6BEQgWtc3nlumE4hyRM6ptYaJTwB0GgHvKI0oxsG8HgZWRFG4dWTkjmc8V6ZJqH55Rudu7IrAOJPJ7AFof8sUzBaA/8Kge03QD9emh37nZUtsswSibJ6qf6JAxzPzOSB6Cxh7lEYHL4J5LYOunRzf/f51666fbOdgfU5+L9sPxmlMeJ/dfiJcs9/euF+tTWgCPc4I01LPuyV5m0tABKK5/AOp3U0gAqlAChECLgTsh+0IeAEmH57JpB4RcO1NQASivf1XbL0IRAic2MYmaOJ4Vdzv0cbm7dRNF4EIgftFIELgxCaZMLUwTykAbwHwCJh/CxPXZ7Opp702Kd47cC6A5eX0tV5F67U+STd7BEQgZo91w2oSgWgY2p2iYHtEW2JcnhlNqh1wVR/dITs/+atWIAmaSkAEoqn4g6lcBCIYjjtrKbG+gYuIcUXZ/uLJ+Kjxkdzwyj+5MXE4O7KZTF6UyaSyOyvPMNktAhECb4pAhMCJTTShYhdWsS283mScf+C+WG09RV1upgpSaBToSiIcbz0zQaBr50SfO98ambeJpknVdRIQgagTYCtkF4FoBS+0dRtcT1ET49cMUifxlXDsqeL7AZhfYTEhG0X+xHq3Tbc1yZA1XgQiBA4VgQiBE5tsQo2nyKdbrRamJ6N8mktwwyZbKNXXQkAEohZqLZZHBKLFHNKmzfF3inzayE1gXAaTr7EHaWxTDNJsCwERiBC8DiIQIXBiC5lQjMnU+dKHQFimORmfB/BnYhoBY8W2bbv9Qp0mb6HmS1MCJCACESBMKUoICAEhECYCIhBh8qbYIgSEgBAIkIAIRIAwpSghIASEQJgIiECEyZtiixAQAkIgQAIiEAHClKKEgBAQAmEiIAIRJm+KLUJACAiBAAmIQAQIU4oSAkJACISJgAhEmLwptggBISAEAiQgAhEgTClKCAgBIRAmAiIQYfKm2CIEhIAQCJCACESAMKUoISAEhECYCIhAhMmbYosQEAJCIEACIhABwpSihIAQEAJhIiACESZvii1CQAgIgQAJiEAECFOKEgJCQAiEiYAIRJiFIhQEAAAALElEQVS8KbYIASEgBAIkIAIRIEwpSggIASEQJgIiEGHyptgiBISAEAiQwP8DT5NGvC3HI6sAAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-63"><g><path d="M 531 301 L 531 237.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 531 232.12 L 534.5 239.12 L 531 237.37 L 527.5 239.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-54"><g><rect x="481" y="301" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 351px; margin-left: 482px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">OS moves active <br />core back to<br />ACPI C3</div></div></div></foreignObject><image x="482" y="330" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmcXFWV//e8qk4noBAGJawCjj8XxpVIeo8t+x5IqO6EnUERhIEBQQbFHXBchm1ABGXYSXcXRMEIAhHbdFdXNZCRQQYZB5EdEmXIInS6u+qdX251Vef1q/te3Vf1Xq/n/ZNPuu76vcv33nPPQpBPEBAEBAFBQBDQIECCiiAgCAgCgoAgoENACELmhSAgCAgCgoAWASEImRiCgCAgCAgCQhAyBwQBQUAQEATMEZAbhDlWklIQEAQEgRmFgBDEjBpu6awgIAgIAuYICEGYYyUpBQFBQBCYUQgIQcyo4ZbOCgKCgCBgjsC4EsT8AxPbz9psHczMRxNxHUC7A5hTaG4OwBsAvwjCz5mslXvMs/+YTCbV30P9EolE7KXX8ckY02ImHALCXmDs6KhkEwOvg/GYZeGh3FDskf7+5WtDbYQUJggIAhOGQGtr4l2Dw3QfCPsXG8HA0Znerl9MWKMmYcXjQRBU37x0Acj+OjEOBhAPgMOrBHx/Vpz/o7s7+bcA+bRJGxoSc2DhdBB9HcB7A5SnSCrDsL6U6e14DAAHyCtJBQFBYBwRqGtJ7BOz6fxcNnap18FOCMJsQCIliP0WLt0jlrOvIsIxAGJmTdKm+gsz/1MmleyqdHNubGzfjy37doA+XEU7ciDcaQ/VnNvff9fGKsqRrIKAIBAyAi0ti3fJcc3XGPx5AC/GKdvS07PidV01QhBm4EdGEI0LE/uzTXcA2NWnKZsA+svI71wLYGcfIskxcPvsOJ8b9DZR39x2IAF3AdjJDJayqVYhx+3pdPL/yqaUBIKAIBA5As3N7Z/IgR8CMK9Q2Z+EIKqHPRKCqG9qX0zEtwJ4t6uJapN/3CL611kx+9fujV69Dbz6qrWXbfFZBJwOYK67i0x4YLiWj1+zKrnBpPuNjYv3ZCv+AIB9XOnXb3l7uBXg5UOx+PNrupe/WbydtLaeOvud7MD74sCBDPufdLcOBm4ZHpj7hTVrbho2aYekEQQEgegQqGtOzLdAjwDYQQgiPJxDJ4iGlsQCMKmHHtdpnZ+wKX56f8/y35uIidQVcHOWvkTApaXvFnTT0MD25xhsztTQ0nYlGP/shIwZd3K25mxDMRHVNyWOIiJFeMXJp4obYqZlmVTnivCGQ0oSBASBShAIShCV1DET84RKEB6n9RyBruGcfWk6nRwICrKHeCgH4kvSPckf+pHNgoWJvWM2/RrA3sV6iahz8J3tTzIglzFNLYjMfgZgu60/8IPIYUkl/QqKg6QXBAQBbwSEIKKZHaERRGtra3woO+9aBp/laGqOgMsGB+ZeHnRDdnbX41ayDpZ1aHp1x++8oKlvaj+MiNVtpvhAvo5sOqCvr/PpCuDU3UbessEH9fcm11RQnmQRBASBkBAQgggJSFcxoRFEY2OigS361dgTNh6qjfNxQR+VdV2tb2o7kwjXjX3Eprtr42tP6e7uzmrztLR9mRjfc5z4+2OoOay39+63KoFT10dmWiJipkrQlDyCQHgICEGEh6WzpLAIghpa2q8F8zmOwkM9XevU0gD43gjqSwgCv7V481Gp1P2bKoFzfuuy98zKZn8Not3AlH8kJ9jf7OtNKm2tUD53m53GOwU7jmNBOBugjzmUAJQR36NMuH6PnTmjMy5csODYHa1Z8VOIcSIor+qrDBQVsb7IwM9zlnXN46s7Xq6kE/Pnn1FTW7vhs2zxqVvasdCljbaWgKdAuH2wln9RTrlAQ8Jv28yH9qeSvUHa1ty85P1ZxLoJ2KOQb2iLgsQRmd6uVX7l5OdZ1joMsE8DaF+HVkweK2LqsS3+qRfOJm2sq1s2L1aTa2dwO4g+5DDSVHW8BqAfoGRt3H4wjMOVSZtUGjWOs2dvmM+EBIM/ozEgHQD4FWIrBcbywcHtf1ONZCCvBm/nEgQsceGg1ucfwfzLbCz2U695Wd/cdhQB9xv2b8x+ZKLm2tjSvpSZ7yweShl4OY5ca2/vvc8b1plPVteUaLYof3jetpDvdbbogMzqzj/4laPWbKwmfgwI/+jCR4nqXwDoYZvsm/p7kqqcSGyzQiEInawf8D/dBwG4mLagHbUcwKzi3wj4Vl9v1zc9bhDnE+NKx2/ViJgqaXLgPB4EsbKhuf0IgP+jnIEfAY8Mx/nkx7uTb6jKlWbYy29YXyTmyzVaZc72ZRWWgwNzv2e66CswPBxg4Cc8XPM1LwWBEWt7PLBl8jeOjjHjir5U11eDgFnf1H4CUX5xFz7uG5qNw70IKt+XOM4B09fK4FQs71mycHbf6uRvTBfnfq2JnWty1tXMvMTQYHQTiL+DLK6L8p2rgnEsgqpU1M/bfRfuCuLxQBFDDfMPDHHIAtSFnP1PbrXyqAlCt68x04mZVKdSmTf+GpvaLmfCV7Zm8N8b6+pO2C4WH76MCUpcX9awmJURL/Hp/T3JZ4wbZZgwFIJwMy2AHBGd2NfT2WHYDqNkoyd40MdNFr6GudWZ31QDyqhNYScqIQjmRVuM8z5AICUqKztZVHsUSQzO5gTe2uGd2jkbrmHwGYaGikoN+epMb9dF5Ta9/GaXpdsZOKgCDJ5mi9q8TlANzW1nA3lxYnEzDiQaVGrKg9m3kwAdOUoyfgeJ1qV7UTbXAVBdwL4ow8mrht6Z+5VypNrQkPgAYnSfRt3aoEruzsaxrEj6BhmMk9S1Lt7dysbuqaDvxTqUEspNgwPbn1cOAzU1G5rbjwP4ZjMSdswAdaOw6BjnnImaIPLtLZWMJGvj6473Emu7gdfsWb5744hBLzoAfr/xII4kVLeKi9K9XT8qt3aDlBsKQdQ3t11DwLmOio2uUEEaWkwbpC7daVSRFzN+bsf4osdWJ/9cSRuizKMRi6kbk7JEL/qsygK8BrD+C5Q3LmwC57W0xliqE/AvTGyD6buO39TVfdWIcSIrVyMHahbqRrL50L6+ZNqrnz62JYqd3gRjtaqDwdsR0AxA+dxyf69ZbC1KpTqecP/Q2Nj+UbZYaZ8VVaWNxEOjc2Rh+0fIzuffpfA3T3GnT19yYLzMhAyBNvr3xf/QoeZh7WZKash0E8C/B6y80oRfHVHY3fi0q+AXDY85DFnVfFG3uqIhmnPYjFS+G1valjBDiWOLc7lYRpaB/yaQmgtKQ/FjYHxSk+5pO549rL97xSsqY8PCpZ+CzWeOFGK/p3AgKEoXNjLjfiJ6p1DJAMfp6kx3xwvq/yYipnwdzYlDgTyxF8sNtLcVtDB/OZqf8Byy3JpOJ191z3tvEwElCubnQNQPpsH82iUsdPmPU8UZaXcG2b+qJoiPH3zStu96Z/BnYyc/BzrxBWlwqeggL3zzdLJV35RoJ6LbnWKpQn05gH8Hxo/tbHzlZHHGpyGIIjzqdH+jPTT89cce+5ky6it+SrvqYDBUH522J8qAzyqQw1vqfWj3XdHpFAUURAv/AiIlvtlKMETXpXs6FeGXyDVHZPR0D4BDXOP2NBPOzPR09bnzKd84xHQzAfXuPM4FX/xNdwNg4NpMb9d5JnOloan9CyD+8da0enVkjw3S7wBBdS3LPmZx9maAPu1oS47BX870Jp3izNGfG5oSyv/XjQ6MB4hw3uA7c2/VnLqpYeHST8JWbmHwUUcdZYnbBBtnGo+59vOcxRd4HZ6amhIfyhHdQMBnx9bnr/I9spnbSg7vnKNZYtyQHR7+lmtOo7Hx2J0Qm3UtMx83dm7i6nRP1wUlcywiQzmt1ILpzHSqU41n2a/kQOuxtjwOKp745B2Ovob9LbJ+7LptDDDz0kwqafo249uHqglCPbhRTW41AR901BToGlYWZUeCkSsYP+y0smbCxZmeru/rysk/oJqIWUZOvo8S4X6yrUdTqQ7lwyWShx+//nos2rIng1KxzGgt60B8VLonqZwMlnwKn5o5628k4DTHhvrUUDx+wJru5X91Z9Brk6FraDaf4fcAreqZtc36K8A432TBl4ot2bNNzjbmSS+GewE6zPH3c9K9XdeX9KVUiSEH5kt33xU/8JOpFx6yfwTwSY4yX0KOD0ink88569GRHQBte8Zs3q1L90LWfsi5roKQZLk1pdatVZNTD/ajJGRqIzSCMd2mnriK9fg94CoV+MHsvNsAPt7RLrWRnebnXy0/Z+ZsuA4jItLip31HDKrFZHqDUJU2Nrd9k4FvONbHytr4tonu7ls3++GswXhoy6F0Ubo3qYhy9MubCAzvdAMTPhcEH5VWiXrjWSwHqNUxFhl7aPhIN+mWmxO636smCI22iBIz3Jbu6VIaLaF/9aXiA7WL+54uR0hi/cWFQTaS4wPYROrxh7lzPG8YeoIob5CnHYcRcdo5mVSX4zRdOiQl12BAe43WiezUA5npZMyPwzYb7mDmdkcrXrM4d3Aqde9/O1umeSA0EjO5xVNeG5dugwzyPjWiFVaz0nkr0ilMaA5Qxtp91b7F+C0+zZgHUuDQvO+tJ5sO7uvrfNxdr0ZkCDBfkk4l1bua7yFMPWjHbVv5WPpIsVwtzhHdIPIEUarCbyRmKhVP6SUrGnzKHgidGBfet5RI9X2Fv4f2Blw1QWiYG34n+moZQ3lszHK8B8Dfj5ZlSEhK1GEx3YQRUUdA77L8LGB9Fzk7GaVGiYYgjAZbqwbsI+90joOGdLWbmGZT0Z6I/MZYt1l4zBeqb25TYqnRm025g4Cqt3RT1WuMaBQrtDcA37641CCB0luOZr4aq+0WbstKfPs2gP+0gCc5x9eGMf8aGxOftC06jcCfBmhPIuqdFVt7ounjq24deol6NXP6D1nLOsRUrdp9gldKGH/bpvbYpx6+Q+GS/6K8QWjXVnkxU8kDt5fGpbt/QQ5dxf5Xessptx/PKIIogEELmtv/X4z4LDDUlTeoh9eqXY/7nuxKxR5Gp5WRa/xOdzuv/QCMRH2axa4liFJ1veBvTfp2svbKrnkgfNoejh0YwMe/F7nqyOeWTG+XchBpLFbU3NpKNv+mpqPfbdNsZc3/ma3jHp1WUrkFH9bvpgShFbH5vHHp2qduK0R0NwHrADxBRN2cte9zEmWUBKE/eJR5c2lI7IY4dYPxgUKftG9IWlGXj8jca/zcN7pKbTbc5c9EgnBiQE1NS3exkTuMiNp55Gbh9kCrGxOl3vgTZPmCME5zY07zVVh/N7S03QrGKcXyTE7cKq0JQeiUEUzLdwNY+ogMrWtmzQOh742lZJPwuEE1Nx+/Qw7DDzpVO5lwQaan66ogG6h289ecLEtPd/lalDfhDsum2zZv3n6NgYpokKZFntaUIHRvlFF4H4iaIDS3bH8j3abE0USkHHmOSCoYj9bW8CK34aPmkJFjpqMyqc4HgwziiLpyfLXD75yRSLZcHVUThLqqwqJHGHjP6MZE+Gqmp+uKcpVX8rtO1k6Mn/alulSQkGq/PGGwZe/PrB7USKlo+hBG+DYVJddxj4ml62gJQRieREwIolptDmd7NfJrbzVUt5GRz+mzBDuPtB7vNQ8B9GKQCcTgGhrR5hqNeUIaoz6NjNhdzQAIT8Km27iGHsp0d6h2GN9kgrS5mrQjltZvftBG7GgQn1Bwgz8qqtWJmDQbt+dbRTVti5ogPG7oXsoGJTdUL8UEzVpQh6CVgFWiIOKHDzNvQ4Sjna6OKjHqC/0GUc2bQCUTIqgWUyV1FPMoVbJX1mIfMH0JjDaNXvYQM5+cSSU7q6nHmVcjrzUSE6kyoiQI0xOjCQ6axexjp1Di40srZtI8oHueoHTvZibtNkrj8R7mEyNFV+w6EFYwW7fusUvuiSBWykZt9E9E81uX7Tgrl30/29jbory7kU8xoIxTdTYQo6XpCMK9Xgn4K2w+qK8v+WQIbR0tImqCUBVpNOu03pw1ChaeYuKAxn6BIAvjLbjqG4ROhqZ7RArUM5/EhYV2rzNJFFdWdxPy7gFs++ZSYyd/Fw5B+116CjbXCJvCBOFpy6KZX1oxU6mmiff7yEQQhJoHPvYgftNkPQH/lhuuudYwfknQKZf3wVQzZ/2hxLgQhP00ByGjMnUEodkAfSO9GVWkSTQeBNFQ+q6gFTNpFCA8D3nTniDUWLmNQcJ6INFNFnddUZ1IdHV7GFYZa6WYTP4ZShC+GJZoJmlERxpNF08fXRNFEIXxp7qWxEeI6UIClBGYyZuXyurrnsRkbmnSUOPCxGfZxvUBY7XnrcxB+RvFqFX0dCcID9cbY8RMGpsPXy3EmUEQ7geZiHwx6R4XgdKTotKc2Jx9W0WjWwZYc5RpOhP+NYx3Ea1b8/Iqb8brd4YShK9tQIlqrOvxWTMvfK2Og4i4jAeugoR5R4prrQ9Sjo9hQjsB/1DG39YYVxMVVOnMQg3NbV/c4mH0B2VuDEUPs2tArNyv/LY2tu1TudzGHdzq5to3CJcn06gOdONxg1DglWjWud4INe9bf85ZfICXZfp43bAqnStVi5hUxWFGbvPriEbtUTmmKzkpelivGsvy/dqge6wNQ9ZXrHOyEkSYj9Rue4pym4bmgTDHzIuL7gRKH/r8VRA1ihUVaY5Uuui88uXnrb15X7B9EhiLPVSwy1phm7SrvnlpHcFWmjLOMLp59zME6xbYWD1rlv2Cl7tx0zep6fJIXcRUcxgZc7gpcQVURqXXHdSs3FowGdsw04RCEB5Xr40WW0emUh3KqK3qr+A3aDmIFjkK8zx5lvpA8XaSFaRxOtXGmUAQWg+pFWqP1be0jXHDzsAfeTi20M8flsY3/6jdgsaBo+8mqlW9NNT4CjJXqkmbV5B4DaeC6N+dJ/ww3vc8XF9sAuj0dG+n8rNVVoPKmCBK1S8R9M1QvUMNZWkFAx/aIhX4Hxv4Qxy5a5xxGcbrBqHGtMSwrTB3NGukrCGp/rCy9fBTzRwKI29YBAGtOT24e2g2jikXJMakI40tbScy41bTiHI6F+QmbifKtUWnIhl0wvvVMVlvEPmFUeLXPlpDOSdOJbfUgphpqDY+mA/itNUFfFnDQv0Ns7w7E/e4FYhG+UvaobhxWTbf4tTQyZ/UOXc4ETUwsI/yyprp7VIxJ0w+pS75XQIudiSu+pFXO4eByzK9XV83IYf8XND5RNM4zQzDfsbEIHFcCcLteqMgZtpsW3uM9SJc3n+Y9mYe0JCwMB4ftS1+gEaCgD1JwP8S8/WpVPIlk4nmlSY0ghi5RbRdCcY/j62selsBDze4vnJrbbAP5VM+bh1SdPlbCXAaZ3VlN6Qg9UxmgtC52mCmZUFCrlbhmsXtuiCvxsrMm13RuoxEiRrnhoFvvJpDS4lqbTV2LWreRCGj1unem0Tbc85jnXNIL1cbGkNBX4t493oxcdw4ngShcQiZ3wOQw77OIFU6mxjNXqCLORHU7Ys6SHx7Czlc6ig/lH0pTIJAwZpPyTWdbooVd9xRG7e/WEn4xMKmpCI4OV1imDiz0oGmDkj9HI8trYQk9EQVbuS8yUwQETnrM3YS536DUgtQLQhHtC4jv1UqT1PTkn+wKaa8Am81cisEWjK58XrM9f/MDQ0f7PSiqdmMAxFR2GFzC6RzIAFbYxTknTqaW+96Gf55EoQuXr2hsz69j7FSd/TjSRAKQzdBMvFpxHS4w9WNsXv2uqbEQRblY0444mSYH6z1rtR5RW182xPKeZwtd3gNlSDywLUkFoBJ+Z5x+TjiJ2zCKaZh8RRLU4zOLXhgHRNgpBgxrdxC9iYsqFCJ/4IcLzdxlVGQB6v4wSrKmfNRL9BiLzcY+cXrdrVh6IhwBHuXqw1DubqJJXWx7Tp330x4YLiWj6/E3TeBbpgVX3uuiZM493Wcgf8iZhtEn8q3z9A5YaEvXjfesoeZ/Ny06FqXe2at51yPoFXPkJ09vK9vha/ldr3G5bfhqdR3qmm9HwC3DA/M/UI5lx+KHDhG9xDwCXclXqJWL3ffRDipr6drjE2Ts8zCursIRJc5RMtayYGbIMqp2gdx960Ds9T1Bvds8QasDhsjTkSDeEBQ7tMtuN9XcwRcNjgw93K/MWloSPwdYqQMdVXwr+I3YDMv6k8lHzHZc/zShE4QqrLGlsQRzKQiRzk3U/WTilS1CuCra+Podd8o1IR49VVrLyZuA+XjBqgoVu5vFXLc7o5P69XJwg1E+UTR6Zvn/eEw497heOzJNd3LVSCe/ANda+sxc4eGane3LT6RgBMB7Oaqw+QWE3h8JjtBeAcMYs8Yzd5BZmC0UTpB9PBrNJIkoOzW+wDBTzBi52R6O1QMjTEPtl7Gbn6HFv37GT9rM8593654VGMpXbBPsH7iCgajdY0edJJ5RloEbuThmkt0BnnKvXmsNn42mC70st3wU9bQ3djyYTKZv11bg+vce8FIfTX/Bs6vPUcwK33AII2vJN/YHtUShAfpOYcikLaZR0AlNf1WWowLU6nk/7jG2ceGxfz2UW7uREIQeZIwi62qNCfUaV4BoeI0KAb2itegIqrdPjvO5wYVVQV0c1AOszzRBYjDa1LeaJrJThAjY7t4T7biD2jjK5uHHPUNZOQFmtYOZSRxWY0RXZk+YR7HhE/Nh3e18RkQ9ioth56HZR+TXp38vbYO/QmxQGp4kxj/yaB8+FsC782EfaMOJ6knrXyT1Kb9LMj6z3x4XhU2llEPwh4uF/mbQFjr8FaqRH2+Pth8Qo6O+KJiUvjFGPxpnU2IHwnr49XnAX0TTBsYnI2x9bmiVmW1BJFfByXu3kdH31hsOubw4x2SVSV7hYFeR/jb/bVq0IR0HNklPT0rVMCzqr/ICEK1rK7uhO1i8eHLmHBWGQOgch1RJHLe7rtwV6V+aepaln3c4px6y3C9j5SruuT3TWB8e2jz3GvKXccDlzwFREzFPhWC3d8NUEvwfvKzsGLHp1d3/C5oXq1MOl9IeY0Rr7oKsY3vqSBQvNqBnodFx5Xriy7yV4C+Z5W9z+DA3O+FNeeMIy1qG8nPkm2dbMe4mRijYVaZcWcm1XWynyaUj3ShDBxlXaR7iAy3Fut0XhcGQWhcbxQq07uvNxnvyvHJr4F+O547rhiz26S+cmkiJYhi5XkRA+hyIigbBtOIbiq7Cuz9ddO3gnKdzfucmf3WYiJ8M6BrgZGTFejurEXfMg10Uq49ut+nwg2i2O4R2xQ6GwQV03quQX9VlL7vz4rz1UFvgc6ydRo0XsFYDNqUT6IOM1Qz/B0ClFfgMW9eHmXk4wXnsjWXmvpIUnhxjL5BgIr3bVKHqvppsvi8vtXJ35iqoJr2OR/Sc/b680BQ6q0mLj/+okRCsHGzersr0WozfANSfs1iOfsqIhxjELhrE4i/gyyuK/deWI6Ene83YRCEh/2XsaKE1zjl/b4x/4CZlxjul8YYmc6NYrpxIYhiZWpQNmfpsxbTYpu4kYBdHBNTvU+8QcAzNvhXiMdWROj2mOpbl+5Jw3wIE6tYrvu62lIgBH6F2ErZxCtmx/k31WxqpgMzlQjCOa6DWeswgFWM4kYAOxcWfn5MAfQBlKyN2w+GgaFG3mysMVJuHPL+tgbpKDBOVnYLjr6orGu3xKH+Exj/kRvO/rzSmL+FmNZFvOpcotVNAL8EWKtg2TfvPg/PVHprLtfX4u9K3h+fVXMCg5Uixocc4i2lU/8iMfWAsXxwcPvfOG8wmrCtOSKc2tfTdadJ3flwornc50B0BEZi2hdJSmHwewJ1ZoeG7wqC84hyi3Uas63K/bCLiEdVoEMiCKWG7NYI83WtYYJLMY3CN1aTa8+PC0g9fhe96RbWFT8FWLeEta50bRtXgggCjqQVBAQBQUAQmFgEhCAmFn+pXRAQBASBSYuAEMSkHRppmCAgCAgCE4uAEMTE4i+1CwKCgCAwaREQgpi0QyMNEwQEAUFgYhEQgphY/KV2QUAQEAQmLQJCEJN2aKRhgoAgIAhMLAJCEBOLv9QuCAgCgsCkRUAIYtIOjTRMEBAEBIGJRUAIYmLxl9oFAUFAEJi0CAhBTNqhkYYJAoKAIDCxCAhBTCz+UrsgIAgIApMWASGISTs00jBBQBAQBCYWASGIicVfahcEBAFBYNIiIAQxaYdGGiYICAKCwMQiIAQxsfhL7YKAICAITFoEhCAm7dBIwwQBQUAQmFgEhCAmFn+pXRAQBASBSYuAEMSkHRppmCAgCAgCE4uAEMTE4i+1CwKCgCAwaREQgpi0QyMNEwQEAUFgYhEQgphY/KX2ChFobU28a3CY7gNh/2IRDByd6e36RYVFzohs9S1tXybG90Y7S7gt3dN16ozovHQyMAJCEIEhkwyTAQEhiMpGYSoQRF1LYp+YTefnsrFL+/uXr62sp5IrDASEIMJAUcoYdwSEICqDfDITREvL4l1yXPM1Bn8ewItxyrb09Kx4vbKeSq4wEBCCCANFKWPcERCCqAzyyUoQzc3tn8iBHwIwr9CzPwlBVDbGYeYSgggTTSlr3BAQgqgM6slKEHXNifkW6BEAOwhBVDa2UeQSgogCVSkzcgSEICqDWAiiMtxmai4hiJk68lO830IQlQ2gEERluM3UXEIQM3Xkp3i/hSAqG0AhiMpwm6m5hCBm6shP8X4LQVQ2gEIQleE2U3MJQRiM/Pz5Z9TU1m74LAjLmLgFwJ4A4gByAN4A0MfAHcMDc3+1Zs1NwwZFjklSLJ8tVgZLCwHsDCBWSLSWgKdAuH2wln+xZlVyg0n5mg10VCtE6ZlbeWMpOgDAHBDeBPNjDLqhXB8WLDh2x1hN/BgQ/hFEHwJjx0J7BgC8ANDDNtk39fck/wCATdpaSZpyBLFgYWLvmE1fAuMIEPYo4LkJ4N8TqDM7NHzXY4/97M1K6lZ56uqWzbPi2SNBdASA+QB2LcyJYpFrAf4TA/fmrFjy8dUdL1daV5jzrxKCyGOdtX60pT8nOftAjJ+yzeem00k19oG/+ua2owi43zDjWzb4oP7e5Bq/9D7zM6tUZ4mpJwf75uzmHforWauGbZ02yYQgfIYykUjEXnmd2gD8AMBuBqP+KkBnpns7f2myOTY0JObAwukc9GxmAAAgAElEQVQg+jqA9xqUP8DAT3i45mv9/Xdt9EuvIwjk+DOI0TGF/szR5mc6M53qvNH9W13dCdvF4sOXMeEs10aoLwbIMPHp/T3JZwz6FTiJF0FQjlfBsr4F4vPLtHMAhBtrY/y17u7k30wbMEKudCWAAx0kXi57jhk/z8Ws84MQRRTzLyhBKHKaNWfDdQCfESY5qLLCJIj9Fi7dI2bb3yJAkZg6vJX7XgVw0e67cFcymVQHPfk0CAhBeEyLhobE3yFGdwE4NODMyRFw2eDA3Mv9Tij7tSZ2rsnS7QwcFLB8lfxptqgts7pTndK1n44gCLiWgX/N3xr0359zFh/w2Orkn50/Nza278cWOgB+f8C2qpPlRenerh+ZEGaQsnUEQeCTbaKlxDjcvCzut+O54/q7V7xSJg81NLd90Zdcy1f6msXWolSq44lySaOaf0EIwoscALqjNm5/MQix6vobFkE0LkzszzbdUbjFlYPW+XuOgdtnx/ncavsSpNKplFYIQjNa8w9MbF+7mZLazTsvjsFqgP7C4O0IaAawu6uYHIO/nOlNqpNmydfYuHhPtuIPANin5Eez8lU2381Gs4EOARgE8O5CnesYeJRAbwN2PUAfBtEN6Z7Oc52beUNLYgGYlH+jnVxtzQL8HIj6wTQI8HtBWOgQORWT50B8Sbon+cMwSUJHEACU1e0ujnaqNq4BrP8CcS1sfAaEvdyYE/DI4GxO+InvGlvaljBDbUJuct0E4I8APVkQOcbA9r4g+rCWiBmP1tbwIr8NKcr5Z0oQebHWnPVfZeDSsTelcMhBjUHDwqWfgs1njoyH/R6AjgQwqzA+G5lxPxG9U/j/AMfp6kx3xwtjDi/e45IDQR10Ump+EnhvBuod83+0GCY8MFzLx5uKb6fSBl9tW4UgXAi2trbGh7LzrmWwEqU4Pn7WZpz7vl3xqPNKmhcDvEFHgXGTS0z0msW5g1Ope//bWcqIPJfuAXCIq+qnmXBmpqerz72RKrEGMd1MIxPc+T1tx7OH6U6/Hhuoypsj0DWcsy91yo7VFR1Wbvjx7qR6U8l/HkSWJcYN2eHhb7ll+AqLl17D/hZZP3bdNgaYeWkmlTSVN5ed1z79y/eRgRt5uOYSlyiOGhYu/SRs+3YAHx1TCeHqdE/XBToSU9jEbVtZ+X7EkectIpw7+M7cTt1NUYkPKWadxmB1YyuSsso+BPCidG/yV7pORj3/DAmCGloSF4Lpuy4xWtfQbD4jio20EkO5EYKxFY7Ow0uOCB0xZC9yu+lwiOyucYt0CXTDrPjac7u7u9VbhXwFBIQgXFOhsTHRwBapSbed46dVyHF7Op38P6+Zoz1pE13nPpHXN7WdSYTrgi68/HV/m/VXgKFk68UHbMBjY/PaQBm4ZXhg7hfKPdDlN6rhnW5gwuccfVYb/WmZVLLL7zagxGfxLJYD1FrMy0DGHho+spqH4RKidXlzLfxe9sZSEN90Ft4RisV6PoI2NLedDeTHrPhttNg6MpXq6Cm3kxTEHz9zzidiXNGX6vqqLm/k86+8N9dxJweFQ1CCKLzfLQfRIuf8NBFp1rUu3t3Kxu4BqM6Z12Ze1J9KKmtu+YQgtHOAGlrarwXzOY6N7Y+IW4e4r7a63CWnM2CMTF+JDmZtxgNbNH0aK9k489f+bTbcwcztjvq9byqlG+hGsvnQvr5kutwKaGxs/yhb/GvH6azsxusss6Eh8QHESOV/X3HjJqIT+3o6O8rVbfK79w2Cbhoa2P6ccgSoaR+gIfTW1lNnD2bfThbEHyNN06TzarMi2sHsTncDSIyOOePOTKrrZA3JRjr/VP1lbhBU35I4hZjUm5FTlBbZzaGISVCC0BEpA5dleruUwkdZ7Tnt+DPfBxvLKtXKMpm3Uy2N3CAcI9bQkNgNceoG4wOji5lwcaan6/smA1tQrVSb4t7FTZGZjsqkOh/ML87mtgMJUBpORTmrr7hBV6dm4wZr2qjfQLk/hprDenvvfqtcfxqb277JwDcqIbJiHncZAK+sjW+b6O6+dXO5+sv97kEQ2kd2r7JK24en7eHYgU4X0/mTaoz+kUfemvYlYHub+bj+VLK3XBuLv5dsyh7vEFHPv3IEoXtnGS/5fFCCqG9uu4YA9V5W/ErGrtz4aG6GRqq05cqdTr8LQThGU7OBryObDujr63zaZNDVaXFzdqc7CFA3hDVg/j3HrI6itlFjU9vlTPjK1rLMN+xiHt2JVLfxatVAvU+uY7qnzRuAKEdPhU2JZovy4rpt1d8YeDmOXGtv773Pm+Dpl0ZLEAFO9qpszab0ts18aJDN36QfpgQR9fzzI4iJJAePsfD05trcfPwOOQw/6BQR+YntvMaouXnJ+7OIdRPydjIjn4eat8k4T8c0QhBOgmhpO58YTs2j31q8+ahU6n6lqVLV9/GDT9r2Xe8M/sypGcXAtZnervOCFtzQ1P4FEP/Yka9kMVWzyWsWTs55EzJt74isN77acaMaYuCITG/XKtMyvNLpCZCWZFKdK0zL1m00TLgg09N1lWkZJumMCSLC+Vdsp07ExIw7CVAq3Y7HXu6JoWaRyW3TBINyaYLcIBobE5+ERY8w8J5CuRXNK734UCLsOcdKCMKBRkNL261gnDJ6mDA8cZeb/Or3+a3L3jMrm/01QB+v9rRS5zqZAyi5GntsoCdmUp1qI/D9NOUrUdhKwPprubzO35l5GyIc7XygZSajNpSrR9O/wOIB3W1MWQf3pbpUwJqqPjXeNdlcAwHLACj1za2aTF4ipgjnnw9BPAfOK2S41ZiNH+KrAqqQOQhBaG5ar7NFB/jZBXm1seRWb6CGHEZ/p0oZQhBbR4rqm9puJ8KJxT+FtVmo8lS0rCzHldbL348SUIUxlDWLyYwgDOsLaMAUaK7r3ksCFVBIrCGIP9vx7EIDg7cx1bkPBQgQo1mdQIeGNs6zrfjHLdAeTLwAjAVA3tbCyxhRydp0thCRzj9PgvAFn7uHZuOYKNRa3dUGJAi3i46KgwuZ3u4qmaPTIY8QhPeGo338rXTQIyYIJd8/OtPbpQza8l85X0V+/ZiiBFHRJlHf0vYVYlw+ikcZgii4dDiPAKVJ5jaQNJseGoKoRiRoVulIKo2mnTO7MkqrHaNGzXxJOpX8nolmUJB2TFqCACqaR9X0fTLnFYKYHgRR8rgqBGG27EpEDB4EEdQXlaN29X6ltMaK6r7aG8TEEwT352CdHGP7+y7bgnWwrEPTqzt+Z4ZoZakmzQ0CwRVHKuvx1MglBDFKEKX66iJiGp3Ek+5UNZ4iJg/DqtIVPuImRbmCyAD0W4splUp1vF7f0nYR5b3nFj7tDSLa+VesWn+D2OqPSrNRA+NgHzBpCELeIMbMayEIBxxT5ZHa/UhHwF9h80F9fUnlDyj/VXWDaGo/jIiVuCpvsa0rf6LPPxE+Uo+xclbGiTVz1t9IwGmuPq8HoYNtur/GGn5y551j67y8gprKuaOcfz4EscaOZ49xvN1QfXPbt2nEB1PxU95oz8mkupyac6FOgYAE4bYnqviR2m1Pofxy/W2b2mOfeviOt0Pt4BQtTAjCMXAl8mggsJprXn5PuAXMb4DoCeXYrza+7nZgr7jbIrfSG0q9Sx2SgT/ycGyh08CrGoLQqBHmmHlxmL6Uql0vmv4FVsXVaZa5taw8XF/8Cjk+wc/1irN/pgQR5fwr+hgy8cXU2HjsTrZV8zABn3D04yXk+IB0OvlctWOnyx+IIBa2f4TsvJV/0TFj4LFXbdCpnle6JqPAZDKUKQThJAjXyVl5Bw2qPldqDIdkbXzd8WqBTrihnKEWk1YlN6ARmoJVWX3bFj+w5TSqHKA9ScD/EvP1qVTypWonv9bdt4+PI119ms2/RBtMM55BN8oS7SQPLSbURzz/FAYmBJFP15Q4moiUW5RRbSxTP16VjG0QghBDuUoQriyPEIQDN42rDPXrOenerutN4NX5WnJaZupcbTDTsiDGXToZsamrDbemk0+fSnwCAQi+MZaKKgITrlcbPVyJ9A3NxuGGapmaPvKY/KZW635zIx95rianDAO3eo/1kHNHPf+CEISHaG2ImU/OpJLK0WGoXxCCyPej1NXGH7KWdUiQgEya95jAtjShgjAJCxOCcAzKyIYw7zaAjy/+WYlvjJ31NbUvJuLlDl9LY1x1ROSsT+sOpBoRk+p7XVPiIIvovrH6/GaO8FR+vStmXlEb3/aECH0xGcvK65uX1hFs5SNrB8cUGHMY0FnaBpRR6+T5XnYQiHr+BSGI/BiWOlxUf/Z0MV/N/haUICJx1gd+EDksEWd9W0dSCMI1q3UTzySgTGExqQ11NAiQkmfOqll3ltPHvM7dt4lDNC93315+7KslCA93ykbR8jzcaQ+E6U7ZJx6EUss8Nr26Q8XV0H66sQKgPYE2NrX9xOXy/C2GdVimt6Pfb0MciROCC8B0hSYEpufbVuTzr7y77zHd0s3XKGInuAminN8uj/mZZfDFmd6kcpXi6dHVy9132DFLqiHMyZJXCKJ0JKihpe1KMP557E/8hE3x0/t7lv/eOflGgpCosKF0syvkYcCAQfwsWTi7b3XyN+7J3dSU+FCO6AYCPutq7jNkZw/v61vxorsb1RKE9y1A/cIrLcaFqVTyf1z1UuPCxGfZxvX5CHVjPvPbh8niKBMwaBOYzx/avMPtTrffPmPlKTppbGlfysx3uqKqPU+W/XndWKk6Xn6D6sH4oSbAU7FrfmrDkc4/0zeIYkMLt96fO2N7AAiV7PM3G83DM5gv3X1X/MBLO8wrYJBX/O/82LxmLSLify8NTxru/DSZw1MhjRCEZpR8Qz4CrzDQS6CNPmE2B4hwUl9P1726SRBSyNF1ID4q3ZN8TFdHGAShyvUJtal+HsWiEH51f41PH6Unm44ju8Qd4auaBaLpnwpCr2J0H+goVxmprXKEh9W1T4WmvGronblf0cWQWLDg2B2tWTUrPTb7tQD6VPkjddofBehjJWEtCcrfkfJ1VAxC5esWJMr5F5QgVK+ampa22GSvHONTK+QAUFrFCFV53raENjA4G2Prc+4gTT7z0zjkqImEoJq5OpXzCkF4jN7IyYlUGNG2gAM8wMRfzPQkbzO45t4NUEvA8lWxz8KKHe9n3RoWQYyQROII5nxQeKe83rDZW42wDDMYJdP0709k0zK27H93RQrzKy9LwLcGB+Z+zy/AUFPT0k/bZCvx4a5GjduaKAumq7iGfoSs/RABHyz8tJ5sOrivr/Nxr/Kimn+VEITapnW36iABegxw87g5bc3p5eixoP2l1tt7DepxJsltEQmrYEhnGSo2BCx+6icXgvAZQ78YtrpsKqwmE5/e35N8xmRqjMhR6WwQVPjJuQZ5NhHw/Vlxvtov6L0qJ0yCUOUpH0Q1zD9g5iUambqu6ZtA/B1kcV0Uj346gohTtiWbjQ1S3PoRMx/nCuvqaqO3SE/XmQXN7R+0wLf5iI3cG88qm/gCNRd0j90mTgujmH8VEgQKrtvVo74zlrfRe4zBvM4n0YWqdeb1i/kQfH7ys0S4sK8n+UDUfqZM+z8Z0wlBGIxKQeXvUAJOUgdqADsXNp8cGC8TqNu2+Kd77MwZL3mpXzX5zS5rHQawCks5tnzgjYIYI1kbtx8sRwzFesImiGK5Sm0zVpNrZ6iwp6Q8084r/KZOY28A/BRg3RKkrQZDUJLEiyAKYiyqb166gGCrWBtKrDTSxry4gh9j0A3DA3N/VS4sqbvS4vuCZdPnmFjd/PYcJcuRsv+HgXtzVizpVrd0Ry8LEqM7zPlXKUEoLOqbEu1EdLtDS0/9+aHaOB9nOi/LjbU6NFHMOo3Z/hwo/47l9Io7alPkVc6IM8VcgoAlIPoQGDsW0g4Ayg0KPcyg5XvsknuikrVarv3T7XchiOk2otIfQUAQEARCQkAIIiQgpRhBQBAQBKYbAkIQ021EpT+CgCAgCISEgBBESEBKMYKAICAITDcEhCCm24hKfwQBQUAQCAkBIYiQgJRiBAFBQBCYbggIQUy3EZX+CAKCgCAQEgJCECEBKcUIAoKAIDDdEBCCmG4jKv0RBAQBQSAkBIQgQgJSihEEBAFBYLohIAQx3UZU+iMICAKCQEgICEGEBKQUIwgIAoLAdENACGK6jaj0RxAQBASBkBAQgggJSClGEBAEBIHphoAQxHQbUemPICAICAIhISAEERKQUowgIAgIAtMNASGI6Tai0h9BQBAQBEJCQAgiJCClGEFAEBAEphsCQhDTbUSlP4KAICAIhISAEERIQEoxgoAgIAhMNwSEIKbbiEp/BAFBQBAICQEhiJCAlGIEAUFAEJhuCAhBTLcRlf4IAoKAIBASAkIQIQEpxQgCgoAgMN0QEIKYbiMq/REEBAFBICQEhCBCAnIyFNPY3PZNBr4xti10d2187Snd3d3ZqNu4YMGxO8Zraw5jxtEAPg1gZwBzCvWq+l8DsAbMv7Sz8ZX9/cvXBm1TS8viXbIc7wHw90HzFtLnALwB8HqAehn45ew4/6a7O/k30/LqmhPzLdAjAHYo5mHg6Exv1y9MywgrXSKRiL30Oj4ZY1rMhEMA7A5g3mj5hDfBeIFAvyVGcvPm7desWXPTcLX1z59/Rk3NnPWHEvgsgPZ11DkA4AWAHrbJvqm/J/kHAFxtfZJ/YhAQgpgY3EOvta5u2TyrJrcKwEddha8jmw7o6+t8OvRKAagN6tW1+Ixt0+UE7AcgZliP2qhX2cQX9PcknzHMgxAIQlfVAAM/4eGar/X337WxXFsmA0EoMrZm1VxEwBcAzC3XZsfvfwHzt2Hj5nQ6qTbzoB/VNyXaiOjfAby3XGYGMkx8epAxLlem/D5+CAhBjB/WkdbU0Jw4FKD7AMxyV0TAt/p6u74ZdgPqWhL7ENPNBNRXUXYWTFfBtr9hsmFFRBCF5tPzsOi49OqO3/n1ZyIJoqEhMQdxnAOmrwF4d+W487NkWyf39XU+blqGujXM2mb9FWCcH+AgoIpXRHRRurfrR3KbMEV7cqQTgpgc41BVK1pbW+OD2Xm3AXx8oSAlQrAci/hpezh2YCUiHY+GUUNz+0kAX1dmk1oL0NsjZfC2Y0QfpQX/Cjk+IZ1O/p8fGNESRL7mZ8jOHt7Xt+JFr3ZMFEHs15rYuSZLtzNwkA9GajN+A6At0kZW69sp5nNn2wTQ6eneznsMNm5qaEp8E0Rf1ZDDKwz0EmgjwO8FYSEYO7oqy4H4knRP8ocGdVW1HiRzeAgIQYSH5YSV1Ny85P1ZxLoJ2KOwGf965F86oNCoHBGd2NfT2RFCI6mhJXEhmL6r2SjWg3ArE920xzz7j8lkUomRRr/W1lNnDw9v3s8mW+VVt44x4igCHhmczYk1q5IbvNqpJQjCbemerlMN+0bzW5ftOMvmD8K2L9ryJnJUSTuIOgff2f4kL1n9RBBEY+PiPdmKPwBgH/fGy8DjBLpyaLb9sAY7ampauguT/Y8MfEkjjhogwkl9PV33+uFX35Q4mojU/Cm+Kan59TxZ9uf7Vid/49z0ldjx5desRUSsxFC7OspdB8s6tNwNzXAcJdk4ICAEMQ4gR11FQ3Pb2QDUaX6EFhhX2BY2EON7W+sO57G6saVtCTPuGLtRIMfAjTxcc4mJDF81sb4pcRQR3ep86B1pPK5O93Rd4HXKDIEgnMNRlKff4urPgM28qD+VVA/RJd94E8T8AxPb126mZOnNQb9Be823hobE3yFGNwI4zpXmNYtzB6dS9/63Lq+qf9ZmPLDl4bnR8XvZm1ZDS2IBmNTD/U6j+YiuS/d0niu3iKh3hXDKF4IIB8cJKyUvk47hXoAOKzQix8xKo+VVl6ZN1Y/VDQuXfgq2/asxCx4YYOIvZnqStwVd9NoNBNhosXVkKtWhNJVKvpAJIk9JuhsRA7dkertO1/VpPAlCiQ+HsvOu5by2kPPj7mwcyx7vTr4RZPLl3xHmbLgO4DPGMKXPrUnzvuVLoM5y3YcXEJ5DllvT6eSrQdotaScGASGIicE9tFobGxMNbJHatLdThTLwchy51njcWjc4TPeBsH+xsmoeq/NEZGE5iBY5Gp8j0Ll9vZ03BCWHYhmNLW0nMkPdJLaKm5jvg41lukfrCAgCeg0wfmooHj9gTffyv7oHazwJoq4pcZBFeeUDh2gHa+x49pj+7hWvVDKRChpQK13KBV6bPtU3tylFhNO21sUPIoclJkoF9QvbP0I2K5HnLoX8b9vMh/ankr2VtF3yjC8CQhDji3fotTU2tV3OhK84Fu/K2vi2ie7uWzfXt7R9eayYCRU/Vuu0pNQpe3hg7heq0atvbU28y01kADaSzYf29SXTbsCiIAhVR31z2zUEKNFH8XvLBh/U35tcM1EEobkdqqa8xbAOy/R29Fczmeqb2hcT8fKxWm+lYsjm5uN3yGH4QYDqRutjOjOd6lSiqrKfbrwmymakbGMlQQkCQhBTeFJoT76OxdvY2P5RtvKnt6IMuKLHao2WlEKtapHV1ltE+1JmvtN5i/C67URGEKVk6nnSHa8bhPt2mMcrJBm+x7vCn3MWH/DY6uSfi2Mzotq6cSHs3EIiamDQ39tsn2J6AyhVoECOmY7KpDofnMJLb8Y0XQhiCg91QbNkhWNjHbNp60+gPHrDMO26RkygsiZr4+uOD8NCe4TosvcpNUlmupcYq3bbzX7BrQWlKh1HglDiOq119HgRhOZWE6p4prG5/YtMfD6YfsnglbPjnAliUW4yfzQ3z9fZogMyqzuVhbV8kxwBIYhJPkBezdOf6ks3/4am9i+A+MeOcgKf/Oub2k8gyp/wt0oZmE7MpDrvGm/4xpEgJlTEpBXtgPtjqDmst/fut8Yb90rqK9xSfg5Qq2PieL4vVVKH5IkWASGIaPGNrHTdqZ41m7bmio+gj9WNTW0/YcLnip0h4K+w+aC+vuSTkXXQo+CICKLkIbb42N/be+/z7qaMxw2isTHxSVj0CAPvGcWd8dO+VNfnxxvzSurbb+HSPWps+2aXaq6vhlol9UieaBEQgogW38hKL1EfBErkx6pyj/cD48dq/SOyt4ZPZB0uFBwFQSxYmNg7ZpN6q9l760kXj9bW8CKdyGU8CKK+ue0oAu4fg2eAx+Gox0FXfn6u2LGPw7aVfyhl1R93pMsy+OJMb/KqSjXeJqJPM71OIYgpOAN0m7af3n5jS8kjsPFjtd61RfB3jLBgjoAgqKGl7Uow/tnZRr9b1rgQROmj+dAWi+kjMr1dyiHjpPlGDiA73a38Nvo06i/M/E+ZVLJLyGHSDJ1RQ4QgjGCaXIk02i1DAC9K9yaVPUTJ19CQ2A1x6gbjA1t/NNNlj2BDrgrMMNtTsO34F41/IV/L4gkiCM83kaoArTKzh5qyo1T+X7Jxym674TGd0kGV1Uv2iBEQgogY4CiK12i3lBMZUUNL+7VgPsfRHqPHat1mSBMoC6+WINSGNjRk7QWLj2HgTAC7lYwR8yXpVFK5KdHGMRgPgmhoabsVjFOKbZvIdx+/ORzAeeLTZPF5br9NUawPKTM8BIQgwsNyXErS2T4wcG2mt+s8vwbUN7cduCVewy+dhlEmj9XazZBwcaan6/vj0mFXJQE2pIqaZ+IwcCIIAsCf4pRt6elZ8XpFHYsoU0FZ4l4CXmGQsp+IbVGZ/TQB/+B6g1AtkHeIiMYhqmKFIKJCNqJyNe8JRrrxerXJ8pbVWhuIYN5TQ0UiYoIwcjk+HgQRxLI7VIBDKiwfXbCm5htMUD6knI/VRt5jQ2qGFFMlAkIQVQI4ntk9bB/6hmbjcD8X2cU2akKSln2srlakEzY+ERGEchx30e67cJeJnHxcCKL0kXpSvkGUGV8vb7l/yFrWIY+v7ng57Pkh5YWLgBBEuHhGWpruNG8iJholCJdjv5G/+z9W624eSgzzt21qj33q4TsKwYAi7faYwkMgiHxAHQKeY+JHmKyVutgVfj0aF4IoNU6cqi4qVKChi0H5+CFbv0musjt+M3py1yQEMbnHZ0zrNLYPYbTe0zGeKlyvxji97CCCgjgeBFHXlGi2KO+lV0XiK3C5uZO8oH2KMr1HvPTQXLVE2faZXrYQxBSZAR7O1UJpfblbiNtj7ERq1EwGkdd4EITWAn4CtceqnGg6l+FTym1Ilf2fstmFIKbI0Gk9e4bXdl81WZ0GlM6tRzXNaWhuWwbwV4npPrb4vtrYtk8pl+XuMmcKQajwrIPZt5MAHemQy4S6qSrxYRZZpYH0VyLcT7b1aCrVobSktOq91YyvxvX8pNTKqqaP0zGvEMTUGFWdHUMWwGsAqX+DfcTbu4LK+z5W6w3twvPmqnt897qlzBSCUAOq2VSNNNZMJ4Pu0BGVjYsQhOmoTK50QhCTazy0rfGIeGYc1ctdqC74T5nHah1BhaZVo4lboQ6xWu2smUQQOlzUBj6rZt1ZIbhZ141pyUFBOQ20LTrNAj7CwIfAeM7C5mNSqfs3BVk69S1tXyHG5Y7bkGfEviDlStpoERCCiBbfUErX2D6gGhHP/NZl75mVzf4aoI87Guj7WK09bfrEMQ7Qca0vJADnpHu7rp+pIibVbw9Hi6F4RG1qWvIPNsUeBrCrA+MSUWMh8ty9jjRGFvjOcfMQlwWOSxJgTknSkBAQgggJyKiK8XCGpvXcGqQNGpsIEOOKvlTXV3XlqMhitdtsuIOZ2x2/54hwal9P15hYEYHa0dK2hBl3uGIue+rJz6QbhMKxqWlpi032ymLMcfU3BjL20PCRjz32szeDYF1MqxQeajdT0uWKG6yxkK9WtVrVOdms8SvBbKbmEYKY5COvEzP4eW417Y5erONvWd2wcOmnYNtK9bIYwlRVN8DMp1XiqbOhJbEATL9wlZdjxjmZVJczyNFot2YaQagDwlB23rUMVhbJzm8VctyeTif/z3TMVbp8CNE5G64D+AxnPgb+y7KHD+7r+9k659894gOxpu4AAASJSURBVGK/hBwfkE4nnytXtwcZBb6FlKtHfo8GASGIaHANrVSN7YOv51bTij1uJuUsq6mhJXEhOG/0FHPUlSXGDblszaX9/XdtLNeGRCIRe+V1Uo7orgbwblf6h2rjfJxX6MuZRhAKm8bGxXuyFX8AwD5jseJnycLZpg7wPIL4FEl+aSaVHBt/olBZQcy03OXH65HB2Zzws+BX5EIWXesMNpUvknB1uqfrgii0pcrNPfk9GAJCEMHwGtfUetuH8IzUdG8b5SyrvU6gBWDWg3ArE92ks05Wbx81ueyRxLgYoA9rwHyG7OzhfX0rXvQCeiYShMLC47alfsptiRPxOIGuHJptP+zesNV4zZmzYZ9cPvY02lyivHx+EF+S7kn+0GvD9hAvquRP2BQ/vb9n+e9deamuZdnHiHM3ElDvuqlUJR4b1wUolanok/JNVgR01rTljNqC9MXDwtX3sTq/WTUk5iBOKsiOCn/pvEk4q3ep4fJ7NbeF0fRKxEE5Pq6c2GKmEsTITSLRwBbd43pYHjvkhDfBtGHkj1wLYGffMSL+yu4748pyPqjULca24vcR8AnNHHuFgV4CbWTwdgQ0A9hddwBAjheVG+Mgc1jSRouAEES0+FZTuk4NsezmHbRCt5V0Pj/RdemeznP9RABlxERBmpED4U57qOZcE/HUTCYIBaqPmCgI5irtazbzqf2ppIpQZ2QYV9e6eHcrG7sHoLqglQHcz/HY0kx3xwvB80qOiUJACGKikC9TbzVR4IJ0ycNCu1wAotEqlFtna1bNRQSoOMRzA9SdA5BhWF/K9HY8ZrpJzXSCUPgqcn51LT5j23T5lhgf+/ncEHTDsZ6Af8sN11xrQsjuAkZujzgHTF/zuxE68q2Hsn+w+fp0OqkcJco3hRAQgpikg6V/H9DbBlTTBQ8tlXKP1SVVKjn17Nkb5jMhweDPFEQbTvFGQeSENWD+pZ2Nr+zvX742aNuFIMYilhcTxrNHgugIgD4BuER5eZETXiDQb4mR3Lx5+zVr1tw0HBR3d/p8qNGsdRhgnwbQvgDmFdIo4n8DQB9Aydq4/aCXwkG1bZD80SMgBBE9xlKDICAICAJTEgEhiCk5bNJoQUAQEASiR0AIInqMpQZBQBAQBKYkAkIQU3LYpNGCgCAgCESPgBBE9BhLDYKAICAITEkEhCCm5LBJowUBQUAQiB4BIYjoMZYaBAFBQBCYkggIQUzJYZNGCwKCgCAQPQJCENFjLDUIAoKAIDAlERCCmJLDJo0WBAQBQSB6BIQgosdYahAEBAFBYEoiIAQxJYdNGi0ICAKCQPQICEFEj7HUIAgIAoLAlERACGJKDps0WhAQBASB6BEQgogeY6lBEBAEBIEpiYAQxJQcNmm0ICAICALRIyAEET3GUoMgIAgIAlMSASGIKTls0mhBQBAQBKJHQAgieoylBkFAEBAEpiQCQhBTctik0YKAICAIRI+AEET0GEsNgoAgIAhMSQSEIKbksEmjBQFBQBCIHgEhiOgxlhoEAUFAEJiSCAhBTMlhk0YLAoKAIBA9Av8fOT8cXA9P5EoAAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-55"><g><path d="M 511.61 171 L 550.39 171 C 567.29 171 581 184.43 581 201 C 581 217.57 567.29 231 550.39 231 L 511.61 231 C 494.71 231 481 217.57 481 201 C 481 184.43 494.71 171 511.61 171 Z" fill="#f27979" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 201px; margin-left: 482px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">MP1 puts system back to sleep</div></div></div></foreignObject><image x="482" y="187" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXtgXEX1/+fc3U1SUChflAIWER+o+OJLgbxrFKgtAi0tm7RApfxQHlKpIogiX/EBAiKi/VaQNwWFJmuhIMqjiLFJNptC+QLW6hcRVJ4t8i1QaJpk954fc7Ob3L079965d3dDkp37V5uduffM58zMZ+bMOWcI+tEIaAQ0AhoBjYAEAdKoaAQ0AhoBjYBGQIaAJgjdLzQCGgGNgEZAioAmCN0xNAIaAY2ARkAThO4DGgGNgEZAI6COgN5BqGOlS2oENAIagYpCQBNERalbN1YjoBHQCKgjoAlCHStdUiOgEdAIVBQCmiAqSt26sRoBjYBGQB2Bogiirrn1G8S4TPY5Yvww2dPxbXVR3EvOaFn0rqp0+vcAfdJZioFjUt0dv5HVrmtqPZqAu4uQoR/ASwC/CMaDJuHu9+6FxxKJRKaId0qr1tfHP4gI/R7Ae8F4qDrGczs7E2+U+jv6fRoBjYBGQBWBshEEwH0RxOZ0d9+2VVUYt3J1Ta2HE/BbAFVjTBAykZ4HcO70vbijVEQxY8apsdiUV68h4GTrg5ogiu0yVv36+vgUGDgFhMHe7sS1JXmpfsm4ROCQmQv3ibJ5NmV4ZTKZeGxcCjkBhSojQeBNk3l2X0+iu1hc6ppaf0bAWbL3lHkH4SV6x2ANn7rhwcRrRbaP6pvj54DpEgARTRBFogkgHo9Hnn/JOIrZvBSgjzDhvFRXx4+Kf7N+w3hDoLb2hF2MqsGvgukcAGkTfERfd2LDeJNzospTToJAKcxMXual4cV2WU1MPnqlawf7d126YcO1Q2E7QENz6wJm3Apgysg79A4iLJzDO4fm1qvBOD33Ek0QRcE5bis3NBy7BxuxPwA4ICvkVk0QpVVXWQkC4ORgDY4sZpXtZV4KQxBehOKEVph+qqq27kFRo5nZvFCsRh1lMkRYkuzq+GUYtTQ0xz/PTIIcdsurrwkiDJwjdeqbW28G4yRNEEXBOO4rNzfP3yvN0S4AH9AEUR51lZogBgEMAHhnVtzXyeTZyWSiN6T4VN/cthzMS7P1xUrdGDHFhNhBBCEIu8zCnk0GLWfCF+1/Z+BxwxyalUzeuSVAG6muOX4SMV2Vt3MYmdH0IXUALAuKaoIoBr2JU1cTRPl1VWqC+DvAjwM0Pyc6Ad9Ldnd8N0xTamsXTTNimQcBfDxbX/xbrBb2G51Lg5mYwhKE+N6Mw+O7Vu+gBANH2NqTYeb5qZ6EkreURTQR4yIGL7MTnd5BhOkh8jqaIEqH5Xh+kyaI8munDARB3wf4ulGPo/Bmpvqm+GyA7sq+K0Ogsxh8tm1LGfgMohiCEOpwyGRpiIHlqe4OMeF7Pg0NbYewYd4iMVWJndeoh5Y2MflB6fm7Joii4JswlTVBlF9VJScIkyLzDU7faotZCGtmcpqXNsIwvgDTTLydBCE9NPeZ0IULXoz5cmZeACDqUOt9TNxOTDeN/F0TRFE9XxNEUfBNmMqaIMqvqpITRJTSzUMc/abdLTWMmanAvES0AmnzUkToj28nQbS0xN8xMER3gfDZUfW4x3zIy1s10wKXgf6pl8WmvDo7L6BvDAhCItffhe66uu54UQjX2Bj/sGnQV2FiNgj7ZM1h/WD+KxN+aQ6mV65ff+crKl20tik+wwCttR3GB/I28avvgbFcPMLK3q6OJW6yi74XiWXaGNwGog+DsXu2bBrACwD6AEpUR817xyqY0TJvDtDRYHyBAREwOi0rkwjafImATczcnhlKr/HSy6Ez4/tFTCsgc9RMG8IN2IoxiWA1QHNyODLTiame9l/JcC2V/JK+4NkFVSwGwi362RcjBxOZS8CW+Xhf20JuMwFPgHBLVYTXqOrbb3wJchvi2JkEcwFAH8x+Lw3wUwxjdcagax5et+pZSeOotjn+UQP0FcfY3AbgSSZeURPBr1Xl9Bu/5SKIj+UFtoWY8BymnEGA50Yp87jDa2HMTUwuk1He5GoHXVaegRQTn9LXldgkyhZEfIfAy0/Rzt/dOnA6HXkVhvE9EH9NstuxvybNwFU1Uf62X2f0m+D9ZPerXyqCOKQlvmcsY/zUZacnE3MbiH+ANFb09iZE1H3JH6ttGfoBGKdJHRoKv5gmxtWZdOyCvr5fvS6bYOqaWm8YCcoUBUL0N4lOnskYfNj6dYlnCvp/CeUvMUFQw8z4Z9jEzyVm36L0XYrxlVtA5tzo6+vj/0FR4ypmPs71/HJY6hfI4MXJdYmHiu2QZSGI7ZHqIUdqjEArRgAF5iVzKHJ4VdWAMS4JwmOAOTrKywCWOaOwxwtBRJCZZXLkW05PLe9Oxn1mNHNcX+cdz7muyCfADiKb6kScd+V86gOMLe5MR7Ho4c7ESwEq+RYVEwIi1A7gcN/CjgIMPAmD5qXWtf/FWVdyjhZ0fMKZZoeBm1LdHacMH8kNP+WQv1QEoeQs4gq6f5+XEQQymaM4GrmCGEcq6jMD4m/1diV+3NAw/72mEb2LgE8p1t3GwPxUd4dw7An9lIUghJnCGf0cJFhJZl7q7Wo/q7l5/p5vN0HU18ffgyh1giG2hbknUR3dcnxnZ6cwQeQ9oqPsSNN1xNwDEzfIVprjhCCeYeBeAr5sa4DY8m4AjMdBXA0Tnwbhfc42ErB2oIbjbvEufjsAv97rV7+lZUnNwND2pSD60PC7zM8AuX9bf1gP0Gj6BTbX9/Ykbsh918U7Tfy8DeA/AcZG8R8G70JAE4DpTpnFBDnUP/W0YoIm7e+0YnCmvLYC4FMd37LMfCDjUQBiAqkWFkGwZTIajsQfebhzsAbznHqReAciiBlY4FW1A78DqCH7KWuH39uduG8E0zLJX9ey8H2U5q+K3RQz70SEYwDsYpPjHsD49wgEBv2id92q/1HEFiC8QoxHGSR2QhEGH0zAxyS76U1kpo9MJu/4p6z/SgjieYCfsJvkhvsXHgToZYDfDcJMmzkz99qtICNObJ6X5z3J+AcM/BFMA4D5KYBmFMoY3kEo9/FyEkR+/qQA21iZeUl0Psmh1JibmBqa2xYyswiMGxmMQchP1pnGCUHYRcswY00mYnzNYQel+pkLD4QpPLFGXI+tegS6uiq6+SwZSfpN8MUShLN+0EPq+sb4KSC6xqbTfiIsG9g+9WbJhO+GQVhnDGnz65oWthDM39nMShkGLqmJ8mUyk544W4iadI3TBZuITkx2ta9yfqQwfY36ZNLQEK9ngwQZ5CbmjWKH39d3++bcd8otv/hOyEPqwtQ2ltD8V5Nx1nv3xkPOHGuHHnrs7kZV7PsEy8xnJ+H7q6N8nEwfPmbPrUQ4a2D71HZ7/8rmDvsmiESSU/t3bF6O/IhJOClnns7hbeWiMjPCQ7LFpusC4vYba87fy0YQEm8f1W2sw7zETwxGo4dt6Lz93283QYjQftOIPeDY5hU9MYwzgsgQcNFA/9SL3VbDLqYDVxzGM0FYu4/0mwmAjrINjqW93R0/9xpMYiWLtHk/Afvnyqm6O6sM0oIJnGiF2EXbTTjO92RX9mvyJwm+pzq6c7yz8+Yd9vKSSV65Hzc0tX6XgQtHVpmSzM3llj8sQbiYqB5Ehtt6exP/56EbWXCrayYFD4LYAuKje7sS62Xfkh3+21bznjv1cvTJshGEaFQYM5Nz+2sfdG8nQbgwNMB8F0wsKuaQcjwRhKqpRNYZ4TKJjWeCEP2NYpl1toledSGD+qbWMwGsGB3opclg/MlZi3d+x/aBO+27ARVvHCGH83yBgWejyLR0d69+2j4hScxESmampqbjd8tg6F6AarPvKyCWsZA/JEE4zzbFazxNRY5JnOqaWsVO4gKbzqXphDwIwn/x0dh2ApFlpbA/LxicmdXTs/rPnguXgsSm8gWCyiJFlCk3QTjSdPO9yGCB12Ra1xg/hojuyG6xBhn4fO6gZYwJgma0LNo9ls4cSIQTwWiVeJFsZRhzUt2r+lQBl5UbRwSxhUw6LJlst2zufk/BfSCEp5Dmlt7ehEiJPvKMZ4KQ9CnlLMTDgY8sJvI3ATxqAI9xhpcXs1gQoEk93zxcSPOwbpk/3UhH1gIkziYeY9CmGJk/6+pKCAeJvKfwPhd/M1NhbrTCOmMlf1ATk8TFN3AuNck78uaoHMAuBCH19HLqRbbLkTkByMZnfWPbaSD+xchvAUz7sveVlSAkZia/CUgwtM0Fb9S8JIQvBUH4TXoBfh/xMPDa9qu8b/wQBN1WHd18kuwcQUpsM9s+SiYLn/q9sr9LB8t4JojGxmPeaVKNuHDq06NtLI9XkkpfyJahusbWW6yFSfbx8koK8N68og0NbR9nw9LfHtkffMlRknpftiIeE/mDEoRj8SmWx9IFjReeLS0t0YH0HreJrPK5crKs1S7xUlJzn/N7TU0L3p9GpJNgxR9Zj1eMib1+qeeSshKEEFyxQ1ltdHoIOW2644gg0iA+f/qe+EkpLg0qtVJVJoxiVnm598smVyacnerquNIuw3gmCCGn06aelf1VEFYZJq3csWPXDaXyTlLRjSWTxBkCQD8DdxF4ZXV0507nuYLqu3PlZOcvXin6Jd5Prgu+sZA/KEE0NLZezITzRyZ2YO0bO1Uf+8QDt4odoPJT19x6PjEuHq1QaMaRjS/V6w8k7ZIuvGQCl3ouGQuCUDYzORi+YDUzDghCRK2mTOJTnV4Eyr1LUrDUSlWRRdKBXyWTZiWT7Q+r1M+VcXoMQRKlPN4JIu+6V3nj+0F4DCat5Bjdn+pcJVwbR/z9g+ClWjbrOXMPAXUudUSg4p+J0c4RWrPPNPPJMIuVugJ7t/s5SmH8hLt9eyzkD0IQspU/wH8DDHGfRMCHDwRwqI0gCm7PlC7AFCPWg7TLKXip55KyE0QAM5PTvFQA+pgTBOEVZrwCxnrDwP2ZwchauytfwF7lWrzUSlWRS9KBnzGj6ZleAW+y9zpXZQAKYkLGO0GIdtU1ts0n4pttqeq9YNwCwh3Mxs377JV5JMzErKKjxsaFB5tkiuC9vRXKC5/6e8ikmwcGdv2D6o5HYlN3MzMVHPD6mT3KLX+QiTRwtL0C4LYiBZkUitmhB2nXhCcIa/A5T9aZTu/taRd+5yOP07wk246VgiBUvUGC9Y/iSo8TgnBNF+LVuoKDTsmh2EQgCNHG2ub4AcQkzsDcVu0yKF4l4IrMUGy5S2qLojqH8J6LZMwriTDPJ72C/Tsi7ceNUUpfnMut5SbE8Mp62kqAj8+VkbnrSgJElQ5cyyl/kIl0XBCEx+2Xdv0EaddkIQinmangsMbPvCSA0ARR1FyTV9kvmZjqlyYTQWTbbCVDI6ZzCBA5b3KXX/lBspENapWltvCrqPK7NdGa5jICFtsOlf2qKuXkkZiORmKPch9wHvCqetXk6pdD/iATqSaIxBt+nUX2e9lNTOKjEjPTi2zQYbbB5Gte0gQRRr3udUpGEM7d4QQ8g3BDycryudnYnzI8jwltLikX7NU3mtH0nKBmuoCapbqWhfvSEH8OBp8EhrCHj95nXviyLTCM2c50E/ZiksPnvENRyS6jmAjdkslfLEEUmwHBS28uSTqPSXV3CI85zydIuybFDkI0wsvMVGBecrmFTu8g/LqW+u/ODkzAv2HyEclkYjRnkf/rZO6MBZcnTRQTk19zLa8fc8dBYHMxGOLWxJx7qL2qbyCU33eC/C5yNtXUvLJ/hiKLidEmy5XlFsBo/45X5LPknKIgtUYQme1li5E/yEQqC94jxvXJno4vhZVdE4QPcoWBNnC1Y0sCbEbMTI7tq2vIvyaI0nVlyQpH2ZUuJ4UkqhaQnC/VFcZLKEcrW4uLxrY5RCxWXrn8NJ71g+ZiCoOq2F089wKWgOi/7St4kbgwjOtkGBkkdai+uXUWGCJX1gh5iRgKHorM9HKwqG2MNxlk5Vfaefi9ozFITpdVVXfNEG0KJH8QghCyNDS2XpeXqbjIIDJNECUkCKeZyZYC4BlHcJw0dF2IogkixJBzqSL10w54f7jqzkCit0AutXXNrV8jxk9sTSkZQdQ1LawlzhxJRPUMHJC1rf+XItLCNHoJAefZyoc67M9bzQtCZfMoMNULCy0DyZrolsXKAYzN8SV5NxQCvoQsy9CazWLwe8f9Eb45m6wFwRjIH5QgCqKMAb/AXVk3yHlziUC550SWYGZzbU3s5dV2/WgTk1jZNbd+gxiXqQ6OgtxMTCeSaXba02d7pR3WBKE4bSkUc4n0dCVn2SsLA8zkqRqkemNakOppFylVPB+XZHqlIwhnHw64qizwQIP7Ltqvrbnfi434VSVupzzO8Sx2CmzyVfnp7RXSceSnywkcsawqf1CCkESOi2CWi1LdHd9RjWuRxczIvL40QYQjCIc3ExJEaGeGuBRFmA88VyeaIFSnGP9yLl4dIs330lRPx2guF5dXNTYu+JhJkQccPvpS+7vUFKWQnVR8urFxYbNJ5j221NLizyUjiELTCl432Diqp2dVlz+K0kXSHw3ecXRPz90iLiHUI0m1EChnUAHBAEoxLgUTKOMhEK9467Y1kS68KtsY3zOWsZI/KEFk02nfDqK5NsV4Zle1K1Ac1g+mpy1n8Bm2v/czjCNT3as688sWXk2s6mIftF15u8+m1qNLeX3xmHgx5Rog8Wb6Oxj/HL3f2Xt1ogki1HwjreSVjpiBE7xuoqodTgj3a1tGT/GNv6QN43Nu9+gWXHMJ+A5Mj1veAhGEl81cltVUNcOnLKNtKezzstgEKOAllCBP+V0YvCjrFJJU0y8S4SFmnJAtr2SSGSv5JfOBby6p2sb4EQaRCD4c8fxSzHNFdY3xViK6Kc9rzCWbs95BhNhBiE4myc000lf93M40QYwJQYiPuN21nL3D17gO4PfbpPFd4UpWtSKZ8NNkmF9KrkuIdAcjqSuEZ0us5rVWIhY5nd4tabUnQUj62CZkeG5vb+IpGYINza0nMkNEUdsuaXG/QEYIPnyXcQEOSimZVbTosnN6mZm+NrRj1w5ZpLQI9DMYKwE62L7CNZnn9vUk1qp8tzD1hr2WeurosZBfsuCE8Exik89yy6jrsguwdqVgPhcmbnPWra09YZdIdOgiJoidQ1Rl96EJIjxBOM1MObx9D9I0QagMcbUyLjsI281V1nu2EZAS1y9mr9v8rNy1k64d7N91qVd6B89AJdv1iW8lotuPhyOZRwLUhGcQCP3M1vWS1mA2wUf0dSc2SFfCzpTHw4XEdbAvAJQmxrqq2JQzcsnuXEwPw7Xyr6CEJR/hIMnVkCXL7pttk/Do+QkY4npN55N3Fap1XSUgrv+cVljUXzf2OhKX1pGf/VJrOL5ddvldzqesfjt8jadQn/ndZHfiVrtsHlfMimLDebeY/jRcx+06T/Qz8ddTXYmrZX1QE0RIgpCxvgWwwuGgJghZVwz3NxcvpmUm8P8CXIyeAeHKwe1Tz1fJ/VM/c+F/wuRfO3YfPg0YTr0dzdClYJyULexJEC7nI7bvFCakO6Qlvmc0jdsdVzaqgpsWzhUD/VMvU8FB9aWWjtLGVQCL6Omgj7ii9JaaKJ8luxLT7WUu5iFRXCm1hv29YyF/XWPr6UTWpU2O+7iHJXEz+Q2b4ujat8yJ4p6XoI9IZXJub3fHVW6H25ogQhKE0ITMzORnXhL1NEEE7cfu5d06MGW4h6LGVcws0kxIB93wW/mvZOBMp3nIT8IAuXlEttKraqL8bTHBOWIb/HabsushR0Rzu2VN7CQ4QhcSIK729IpOtjdzIxm8LCgOfjjlfrfiLV6k098ybVwEYKpiPXFh07nT9+KOMIkEZebAoKk1xkp+BRIqSB7pkE0QxOUA3qOCLYsdNfEpftmcNUEURxBOM5PfgLd0pwlCpQurlfHpwFTXtPBQgrkMBBF4tXv2reJS+ocYuHGfvfgPYSaf7HusfEcG6CtgHAFg36xtV6zM/sGgOzIGXWM/8A5IENZnhm3yJK6HFKYxu/nF8zAzO+nMAVj4uourNUU21ZztWZh3/gUYD8Iwb5g+DZuKwEFNWeLwecapserq1z4DwiImsxGg6TYS6wf4OWKjJwPzhvSO3fqK2cnIUm8APLe3OyEC6UI95ZRfkOjzL9E8Zj4boE/k58/yvwZWyBat2VobgXGKBFtL3wy6xwTduL67/W8qLrGaIEJ1E11pvCBQTAceL23QcpQHAQlBlCy1Rnkk1m8tFwJFubmWSyj93vIjoAmi/BhP1C84s7uWwnV3omJR6XJrgqjQHqAJokIV79NsySG1b2oNjeTkRUATxOTVrWfLNEFUqOJ9ml2YjoLvRQYL3OIKNIqTGwFNEJNbv66t0wRRoYr3aLY4rK2a8toKgE/NFssQ0YnJrnaRakM/FYiAJogKVLposiaIClV8ttkiXUpkkAeTyTutgLL6+vgH2KBL7VebMvC4YQ7NSibv3FLZaFVu6zVBVKjuNUFUqOKzza5rbJtPxKs9UPBNn1LZCFZG6zVBVIaeC1qpCaJCFZ8jiKZWt5Q32RLBUnRUNpqTt/WaICavbj1bpgmiQhWfMzE1xWcYIJHAbzcJEh2DNXzqhgcTr1U2Srr1miAqtA9ogqhQxWeb3dIyb+pAuvrHIJ6XjZRPA7yBiC5/z568ZiyiwytbAxOj9ZogJoaetJQaAY2ARmDMEdAEMeaQ6w9qBDQCGoGJgYAmiImhJy2lRkAjoBEYcwQ0QYw55PqDGgGNgEZgYiCgCWJi6ElLqRHQCGgExhwBTRBjDrn+oEZAI6ARmBgIaIKYGHrSUmoENAIagTFHQBPEmEOuP6gR0AhoBCYGApogJoaetJQaAY2ARmDMEdAEMeaQ6w9qBDQCGoGJgYAmiImhJy2lRkAjoBEYcwQqgiDqmlqPJuDuEXQZD1XHeG5nZ+KNMUd8gnxQ52qaIIpSELO2MDHfVhN8RF93YoNCdV2kghHQBFHByvdq+kQgiPr6+BQYOAWEwd7uxLValXIENEHonhEWAU0QYZGb5PXGM0HE4/HI8y8ZRzGblwL0ESacl+rq+NEkV0no5mmCCA1dxVfUBFHxXUAOwHgmiPrm1qvBOD0nuSYI706sCUIP8rAIaIIIi9wkrzfOCeJmME7SBKHWCTVBqOGkSxUioAlC9wopApogJk/H0AQxeXQ51i3RBDHWiE+Q72mCmCCKUhBTE4QCSLqIFAFNELpj6B3EJO8DmiAmuYLL2DxNEGUEdyK/Wu8gJrL28mXXBDF5dDnWLal4gpgx49RY9U5b5zLjawB9AsA7AWTAeJZAnabB1++zJ6eKuMSd6loW7ot0Zr4Bms3AAQD2BBDJKjsD4CUCNjFze2YovWb9+jtfCdsRDj302N0jseg8Impj4JMApmXf1Q/gHwA9YJJ5bV9X4i8A2O07YQmibmbbR2HyGgL2t727n5lPTvUkOry+GUQWT3wIK3u7Opb4YXjIzIX7RDmzEExtGJZX6F48I1jBMG+YPg2bitC/nxgFv9fWLpoWiWXaGNwGog+DsXu2UBrACwD6AEpUR817VYI9iyUI4Vb87IuRg4nMJWAcAWBfANGsTJsJeAKEW6oivEZFHi9ArH6XNuYA5skAHWTrv6Lt/ySmrqBjsrl5/l5pjnYB+ID17fxAWaprWngowVwG4LO2720D+E9EdHUp2hW4E4yTChVNEIODxkfZMG8RvvQ++tgI4qW9XYk/BtAbNcxsbWaTrwDo4AD10sS4OpOOXdDX96vXVevV1p6wC8WGfkDAlwBM8amXAbiLo5GTU52r/iErG4YgGhrm78tG9HcYJsHcUxQ5iJfIZCmGIGqb4wcYTD8BcLiNqL1euZEMXpZcl/hDGIJT1eEhLfE9Yxnjp8y8wDYBe1XfBuIfII0Vvb0JQWrSpwiCoIaZ8c+wiZ8rjBHxbSV5ZEJaQY9RLAXTf9mI2qPt/FcycKaKTtwIYmAAVRQ1rmLm43z6wTYwvj+4Y+rPNmy4dkhVn5OhXMUSBAO3EOG/1TqjpWoxAM/t7e64ym+SELuSqp1e/SGsXcnITiFQf2HgccNMz00m7/inX8WGhng9G/RrAHv7lXX8voWBE1LdHQ866wUlCFdyIP5yqiux0g8zL7lLSBBU39S2GOAVAfSeEy1NwPcG+qdeVo5Jor4+/kFE6C4HuSqqkzvTUSx6uDPxkqxCGIIQEzZFjIsYLFbWud2uqjx9ZjRzXF/nHc+pVKhrWfg+SmdWAVSrUt5WJgPClYPbp57vpRMZQRjgk02iG7KLBNXPdgzW8KkbHky8plphoperVIJ4DWRtkXe2KVBMlg8R6HWA3w+gUbIS7yfC4mRXx2oPxVN9c/wcMF1SMLAIr4D5fwFjo1WfuBrMtQB9ULZiJMb1VbEtZ3R2dorttfSpb44fCqbfANjDUUAMnmcA9IBpgMD7MVAnmRi3wDBm965b9T/2+kEIQqx8o2ncDlCL7R39XAJyEO9raVlSMzC0fSmIPjT8fvMzQO7f1h/WA/TYyLfZXN/bkxCD3/646wVIM/BnAj1imReJPwHGgRL9Zwh07UD/rstKSRIzDo/vWr2DEgzLfGN/LDNHrr8weBcCmgBMd3YGBm4a6p96mkyuoARhLXCmvLYC4FMLOh3hFWI8yiDRtyIMPpiAj0n67yYy00f6LXBcFhbis5aZlwkpMSa92g7QtYP9uy5100kBQYCTzPQiEcROLfeIBWAPQE9nvyXMTc4xJQat57c85oUJ+VNlEkT+vPE0GeaXnFtVMUHuSNPXCbjA0flfMDgzq6dn9Z9lGpcMRtGpnjbZPP29e+MhmS1bmIeMWPpbAJ/j+NYWMumwZLJ9mFAcT23L/OlGOnovgI/bfsowY40Z4XPXr0uIQTzyWJN+hi4E46t532G+CyYW2c0UqgThMrkp77bCjJr65tbAgXJ1jfFjiGiVY9JPA/Rjcyh6idOc52HyyDD4G6nuhDBRleSpb4yfAqJrbAsKsRBZNrB96s2SSY/qZy48EKYwjebp/XUyeXYymegt6CfBkvW5EClmlP2ZAAAR1klEQVT/1WScJevD4tzLqIp9n4DTHIui+6ujfJzbuYRL33Htv2Ig1TYv+oTB6RscZltPnRQSRB5CUpOuOHd57kVB2NYuw74zL7n+S9KJyvSSSicIv1UO1TXGW4noJvvE4rGyp/rmtuVgXmrT17+Q4cN6exNP+eiQ6prjJxHT9fZBxkwnpnrafyWr29DU+l0GLrSTA5gvmL43Lvc4VJVNAIMAz+3tTtyXe5cKQbwd5CDkC0oQDQ3H7mEasQcI+JQNq61EvDjZlfitl17qZy78T5j86+yuMlfUc5EQZKxau6P0mwmAjrLVW9rb3fFzr/cIswzS5v12ZwAGlqe6O4RJKO8JsoOQL3DwIDLc1tub+D8PmXL9V5hgc2dgGSIsSXZ1/FJWr6659RvEuCxg/x0+k0obVwG8WGWceRCE7y7XxfT3TMbgw5wLsCB6nyhlK5kgtoD46N6uxHrfibupVayOxE4i90hX9jNaFr2rKp3+PUDCeyj3+A72XMGmpuN3y2DoXrstlgnfTnV1/LBg0NcummbEMuLsYGT34GVmsNeX2/Tpturo5pNy5iw/gnAZpGXdOeTaEJggmtsWMrOYpEY8x7wmLifWjY0Lm00y7wGwy8hvhJ/2dnWcXczZiniX8FiiWGadbaJXTsVd39R6JgBxnpJ9uC+C2Jzu7tu22tsQgCBkCxy/RZT9U1RXMFY4OViDI512e9FuZ/8NYr7J7lruoWGzqfWIM6Jkd8d3nfpzJQjmb/X2JARBuXrziXfVNS2sJZhip75bmHE9UchAJmfFEoRbZ5KBdOjM+H4Rk34PYD+vDlJfH/8PitCXGRDueQeCkIlw5nPd3aufVu0kzskPLi6bWZPJHbZJT3liEbI0OCZNBp7kocjMvr7bN4vfvQjCxUadZvB5qe7ElX4DThULt3JBCEK+Qud7kcECL88fx7cLJ07CU0hzS29v4vli2iOZvN40mWf39SS6/d7b0NB2CBt8JwNvAnjUAB7jDC93tkuVICT93HMHoDhWBhn4vNMRwtn/AKjutEc+W/gOfmIwGj1sQ+ft/7bL5kIQG82hyOG5/u6FdUtLS3RwaI+rmfDFUS6ujDtlKpUgPG37ks4iVkY3EHCy7bdEdXTL8V4HyH4DXPa7MkE0tf6MgLNG3xFs0mtqWvD+DAxhXsmA6BECejjNq3NmBDeCGOqfep/kAHPMyEG0NwhBiHamEekkYJ8cVl5mOzed1TbGmwwiYYLLOTZIJ76gOm9sPOadJtUIJ4NP23Tp6ZUU9BuqBFGw6AhBgmIyHUjvcRuAeE5OYvww2dPxbZvcBeNJ7H5T3R2nBFlcSHQrJVcZQQRZIA7vIloPJ0CMl6psO15kgw5LrWsX8UST9qlUgvijwTuO7um5e5uqZusa204gsswU1uNccau+x6+cCkF8ctbind+xfUCsHEe8XtxMUX7fc/tdRhBEOA6MT/CwuW3U9ZHxnd6ejouCDO6wcgUliLrGtjlELCbgnLyhBrbMfFiqNOOSsyTRzFdBWGWYtHLHjl03FOM1pUoQDY2tFzPh/JGJHVj7xk7Vxz7xwK1ih6L81DW3nk+Mi22Ed091dOd4Z+fNO8TfXEypZ6e6OsTuU/mRkivT6b097eLAf+QpZpeWe0nWIWSdzYqQYaajUz3twvQ0aZ/KJAjFSFu71p0rSAL+DZOPSCYTo+6V4boJNTYu3IsN87PMOCG7khwNdJPIKlsRMdOCVE+7MDmV5HGJPRDnNTOc7rsMpMzBoaOKiQAPInSQHUR9Y9tpIP6FbbKSmiH8vi9bGbuZ//ze5fw9exAqTJjvdanbD8JjMGklx+j+VOcqERvjaTfP67sKXkzS9oH/BhgiODDgw8JF+FAb5nlnI7Jd3Vsbw/sB8o35sQvC4BgBn7N7GUl2K5CMl2fMaHqmapyG+KZ0R10BF1VVJEGEWW2rrsLcRpLoYNvTxt4R4gMA/hAzHfTWlvUQR9qCwuoSghiL1Uzg4DTFA7+AM420eBCCKPCUKeI+cpXdXdj21TW2zSfimxUD+LaAcAezcfM+e2Ue8UsDotJ3A+s7WEP/HqV0c1fXHS+Kai6eUsHe6FZabUEV2IKgaDorTRvG0VsqlSACX1EpmZT9DoWptjn+UWI6hwjH2PLpBFO/jCAUVoTBPlJY2mfCEPmjBgDsZKu5lWHMSXWv6iv22371xw1BFEE2sjaKFCDEJM66Rjxz/LAQpigCrsgMxZa7pWbRBOGZi0kB4uEi5VwgKAsxxgU1QSgCLtkWuxKESAIXyZhXEmFewDQFW4a99fDuEbHGH0FY6Q1Axm0wLddPexCRZ2CUItS+xcYNQQDlcFQYXVgAIkdQLoGgHy4b2aBW2aGpJghNEH6dx31DFrbmBKpX19R6NAF350Qup4nJJbBKhtbmtwJ9/k6gbpOpcyhmPCzc81RWKRKyKvmBmcsOIi/3jSzQiRlLUz0dNpt/6TvKuCGIEGdZQdCwsqhuNvanDM9jQptLSgv7Kzea0fQcp209LEGU6hDe2WYVeYLg5FfWJ5urX3Xrd5m7dLnwURJojApV5A4izOGi85CagWejyLTYYxxEZHHVDqxx5CQSqnyegV8Cxn1DUdq4ofN2kc5besioQhBv2yE148rBHVPPy3nUyIKVRDpqr1QkpejXQQiidIfUhRHPIqI+2dMhsueOyWNNUuaOg8DmYjDmy3MFoSAwU2VClnnGlat9DQ3xA2HQWgbelQWu5Ascu0Kc4yWMB6Kqx9SYdIQx/EhFEgSFcN+TTDQFUauS4B+x4r4OaT5bNShLhSBknTXMrqiuqfVGAmYB/E+2kt0Zt+TOEPwiqXN9tLYxfoRBVhbSEc8rImof2L7r4mJcM73GQBCCcPqvh/U+k0Q8gwmBXTNLNbatXEEvYAmIREbiUewlfVuFIIRcDY2t141FMJgLloHPBVWxlCyoArs6S3btJYmDUW3D21WuIglCtvr3UsCwB8O0lQAfP2KmcgT2yLwcgrp/SlcpLmaMgsEMzvM19+tQUl90W94nVYKwokzT05Yz+AzbNweZ+QupnkS7nxxhfg9CEOM9UM5K48CZI4moXlwmlQ0YE3ciqDwi4OwSAs6zFc7zGBJ/VyWIwkUQggaUis/los5FoNxzIssus7m2Jvby6tE0LrL8U8ECPa12DacpEfmodiPgf03gL4bJNzldzyUEkSGiE5Nd7SJ5o9JTiiBCpQ+Ns0IVSRAiejhIB2loaPs4Gyz81HPpfwvqSydcl+Rpbn1A8h1xZC29HU2yWwk0mLN3SIjI4Fx+obxVlSpBiLa4JI57ElHjc24XEhUzDoIQhDTVRkDvI2mqhRBRxrI2F+uG6zxfAxCaIGT9j4GLUt0d31GNu5DFdMiSCBbmkcLrBhtH9fSsEje/KT0Nza0nMkO4BueCIKWrenmqjfzcY0EXiIB6faXGjNNClUoQ4gDgccMcmpVM3ik8h1wfkXcoNuXVaxxpNgryuMgibYPYcIevPn3tVmYW11+OPi4EIcsPpZ6sT5JbBvkruCAEIYSVDFYQ6Oqq6Oazyp2OxO+wUGb6Kz5ZH63o7WoXqU6UA9ZknUySwiPQRClxFCjw8VfeQYhb3QzcDqK5NllVk1rCZTfZzzCOTHWv6rS3v7FxwcdMijyQF+QGrB2o4bjKhTwuqe4fzQwOzXIGbLrkYuo3mef29STW+s3NEjNqQfZjv3dM1N8rliCEwsQEziaf5XY+kLXzngsikUZiJLWEbEKSpr8AlFbRw3cP0E/A1nWhebd3MeOXqZ6OL8gmImm6b+Jv9XYlfuwxcclSmA8y0yJ7JHZQgrDaUDi5iOtGF6Z6EiMeZKUYKM4dhCx61v4dl3Tf25hpiV/0uYtXmvSSpTBtyzo2/O6tu8IbbPWVMqjKdm4yLFQJQnxfdqYkDnVh0DyfvEPS1PiQ3DWSbSfVN7eKPi/uJrGviG6tjppf9rrb2rrtzqDleeclgLhHQupB55bNVaVdUv0H3IGG6RfjpU5FE4RFEsDatMGnOXO7Cw+dSHXsCjBOzJ+0uXOwBvNkqxx5Th1+xCSc1NeV2ORUevZq0s+D+RLXO389OqPLxJdh4BpzcOg7zpWU6yVIRVwYZG+TLC2y6k4tyICoK0hUiE3I8FyvOzfcLgxyu/87e+XmyQy+1BGLIG6c8yPhIM2R7r4A9wt6RLcdvivauE7lnoogBOGyCxDt2Qrmc2HitoJssbUn7BKJDl3EBHEOJW5qzD2euw+XXYBY2zzCiCxNda8SqV3ydmhuwYRiHLvtPnwuDHr5rbYtm74Xd9gj0rOLwzYQiXTq9jTfgXZ4gTrCOCxcmQTB+C0IYsWWU3xGDEjASFnZTWF+CiCRc8je2a2b4WDQcc7rOXN69cipk3d9okU4bB4Eoo84bjgTEcpPAviALWukZ1oAjytHVa/RlK5Wg+4gcqvCwvsArBEeyI7tN04kh6miiriW9QWAxA1h66piU87IJYcbXbG6XAWrfuVoWa6cdNl9DYudf8UnrKtjCQdJIvNdiSsIQYhPelyBKn4ezgvF9KdhAd3GCsRlPF9PdSWu9tKnR/+12g7GOoBetq7nNfFpEN5X+D4xLs15vesSWZnyS8gOqYXgAGIjJe3fAotA1cMlQYolXxz49fW3+/eKJAgCvsnAG2+lbr5ccu+wm05cI1XzVtHBcurYq25jIpESucdgvt/mI+6bWCxAcF5e27y22CEJAi67mpKuumT263ylyS/OsVbeTW1nSHYFKuNQEO4VQ/1T/6sc7rsu93qryGWRo0hfPdA/9bJS3EmdI4mqHXQtgFZVIWzlAl0cFbb/ZhnUc9EmykgIYiuBLmCwGG/2TABeTQ3UphCYjcsqFUkQ2TOEy+ub2j4P8I15qS0K1SS9s9ZnVfRpMH7hajbKrywmnlszhnHhw+tWPSs57M4w83w/O76419plmy8T1bdNYQlCfExuznE3zYUYGbLrLUde4+fGLMwUBluTn8h5lHfmI5NFuCtH2PhKT8+qR0LIqlxF7CQ4Qhdm7/kYzejr/YaNZPAy553q9ipBdxC5usP3MpMgCLGQeo9KQwRWTHyKzKTqVV/0X4oN/YBgncOptN23D+e+JyMIE3wERSOvUDpzC0DNXrKFbZMKXuO9TCUTxI+EcoY75uAXSZw1jJp80gA/xTBWZwy6RkzcQRWZ9X6aTYC4N1eYs/a0TUabCXiCmdszQ+k1jrOCgtvLggSeibOTaFXsBAYL++mHbaaIfrAwo+E36Ujker82FUMQLp5fJb/sPTvRi7spPgtgmk1HKrey0aFNbR+KMp/EZKWM3t9mUhD3hDxJjPvTRCvXd7f/rVhvpSD9J3ud6xyARSxBbXaVmzN3bgP4X4DxIAzzhunTsKkU2Vy95BP6jNZsrY3AOIXJbARoum0St+Rh0D0m6MZisbLMWwN0NBhfEHEhznEj0tOAcaNk3Lg2wY0g+roTGwQJPv8SZjPjTICasn3AMgnDwH0m8/V93YlHx1L/QfpKuctWBEGUG0T9fo2ARmD8IuBFEONX6vEhmSaI8aEHLYVGQCNQJgQ0QYQHVhNEeOx0TY2ARmACIKAJIrySNEGEx07X1AhoBCYAApogwitJE0R47HRNjYBGYAIgoAkivJI0QYTHTtfUCGgEJgACmiDCK0kTRHjsdE2NgEZgAiCgCSK8kjRBhMdO19QIaAQmAAKaIMIrSRNEeOx0TY2ARmACIKAJIrySNEGEx07X1AhoBCYAApogwitJE0R47HRNjYBGQCMwqRHQBDGp1asbpxHQCGgEwiOgCSI8drqmRkAjoBGY1AhogpjU6tWN0whoBDQC4RHQBBEeO11TI6AR0AhMagQ0QUxq9erGaQQ0AhqB8AhoggiPna6pEdAIaAQmNQKaICa1enXjNAIaAY1AeAQ0QYTHTtfUCGgENAKTGgFNEJNavbpxGgGNgEYgPAKaIMJjp2tqBDQCGoFJjYAmiEmtXt04jYBGQCMQHgFNEOGx0zU1AhoBjcCkRkATxKRWr26cRkAjoBEIj4AmiPDY6ZoaAY2ARmBSI6AJYlKrVzdOI6AR0AiER0ATRHjsdE2NgEZAIzCpEdAEManVqxunEdAIaATCI/D/Ac6OmMpaS1RPAAAAAElFTkSuQmCC"/></switch></g></g></g></g></g></g></svg> \ No newline at end of file
diff --git a/Documentation/arch/x86/suspend.svg b/Documentation/arch/x86/suspend.svg
new file mode 100644
index 000000000000..a69073c018d5
--- /dev/null
+++ b/Documentation/arch/x86/suspend.svg
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Do not edit this file with editors other than draw.io -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="407px" height="1132px" viewBox="-0.5 -0.5 407 1132" content="&lt;mxfile host=&quot;confluence.amd.com&quot; agent=&quot;Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.0&quot; version=&quot;24.7.10&quot; scale=&quot;1&quot; border=&quot;0&quot;&gt;&#10; &lt;diagram id=&quot;46NsKM0iVOHTgNer6hpB&quot; name=&quot;Page-1&quot;&gt;&#10; &lt;mxGraphModel dx=&quot;1964&quot; dy=&quot;1073&quot; grid=&quot;1&quot; gridSize=&quot;10&quot; guides=&quot;1&quot; tooltips=&quot;1&quot; connect=&quot;1&quot; arrows=&quot;1&quot; fold=&quot;1&quot; page=&quot;0&quot; pageScale=&quot;1&quot; pageWidth=&quot;850&quot; pageHeight=&quot;1100&quot; math=&quot;0&quot; shadow=&quot;0&quot;&gt;&#10; &lt;root&gt;&#10; &lt;mxCell id=&quot;0&quot; /&gt;&#10; &lt;mxCell id=&quot;1&quot; parent=&quot;0&quot; /&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-21&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-4&quot; target=&quot;8N6JJebqrzA787TgpwUj-12&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-4&quot; value=&quot;SFH driver notifies MP2 to stop all sensor collection&quot; style=&quot;rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;120&quot; y=&quot;420&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-6&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;exitPerimeter=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-51&quot; target=&quot;8N6JJebqrzA787TgpwUj-4&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;330&quot; y=&quot;400&quot; as=&quot;sourcePoint&quot; /&gt;&#10; &lt;mxPoint x=&quot;170&quot; y=&quot;450&quot; as=&quot;targetPoint&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-37&quot; value=&quot;no&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=none;fontColor=#393C56;fontStyle=1;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;8N6JJebqrzA787TgpwUj-6&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.2133&quot; y=&quot;-1&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;-22&quot; y=&quot;16&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-9&quot; value=&quot;Abort suspend; details logged in dmesg&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F27979;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;425&quot; y=&quot;140&quot; width=&quot;100&quot; height=&quot;60&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-12&quot; value=&quot;Failures?&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;280&quot; y=&quot;420&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-18&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;entryPerimeter=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; target=&quot;8N6JJebqrzA787TgpwUj-9&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;380&quot; y=&quot;320&quot; as=&quot;sourcePoint&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-19&quot; value=&quot;yes&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=none;fontColor=#393C56;fontStyle=1;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;8N6JJebqrzA787TgpwUj-18&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.3265&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;-27&quot; y=&quot;10&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-24&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;exitPerimeter=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-12&quot; target=&quot;8N6JJebqrzA787TgpwUj-28&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;340&quot; y=&quot;570&quot; as=&quot;sourcePoint&quot; /&gt;&#10; &lt;mxPoint x=&quot;180&quot; y=&quot;620&quot; as=&quot;targetPoint&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-38&quot; value=&quot;no&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=none;fontColor=#393C56;fontStyle=1;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;8N6JJebqrzA787TgpwUj-24&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.0038&quot; y=&quot;2&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint y=&quot;13&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-26&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;entryPerimeter=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;exitPerimeter=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-12&quot; target=&quot;8N6JJebqrzA787TgpwUj-9&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;410&quot; y=&quot;530&quot; as=&quot;sourcePoint&quot; /&gt;&#10; &lt;mxPoint x=&quot;555&quot; y=&quot;230&quot; as=&quot;targetPoint&quot; /&gt;&#10; &lt;Array as=&quot;points&quot;&gt;&#10; &lt;mxPoint x=&quot;475&quot; y=&quot;470&quot; /&gt;&#10; &lt;/Array&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-35&quot; value=&quot;yes&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=none;fontColor=#393C56;fontStyle=1;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;8N6JJebqrzA787TgpwUj-26&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.7458&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;-1&quot; y=&quot;10&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-30&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-28&quot; target=&quot;8N6JJebqrzA787TgpwUj-29&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-28&quot; value=&quot;All devices go into deepest D-state or F-state&quot; style=&quot;rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;120&quot; y=&quot;570&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-29&quot; value=&quot;Failures?&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;280&quot; y=&quot;570&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-31&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;exitPerimeter=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-29&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;330&quot; y=&quot;760&quot; as=&quot;sourcePoint&quot; /&gt;&#10; &lt;mxPoint x=&quot;170&quot; y=&quot;720&quot; as=&quot;targetPoint&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-64&quot; value=&quot;no&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];strokeColor=#E07A5F;fontColor=#393C56;fillColor=#F2CC8F;fontStyle=1;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;8N6JJebqrzA787TgpwUj-31&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.0683&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint y=&quot;15&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-34&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;entryPerimeter=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-29&quot; target=&quot;8N6JJebqrzA787TgpwUj-9&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-36&quot; value=&quot;yes&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=none;fontColor=#393C56;fontStyle=1;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;8N6JJebqrzA787TgpwUj-34&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.8315&quot; y=&quot;-1&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;2&quot; y=&quot;9&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-46&quot; value=&quot;GPIO driver suspends non-wake GPIOs&quot; style=&quot;rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;120&quot; y=&quot;720&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-47&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-48&quot; target=&quot;8N6JJebqrzA787TgpwUj-50&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-48&quot; value=&quot;Suspend initiated from userspace&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.start_2;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#0CF232;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;120&quot; y=&quot;120&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-49&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-50&quot; target=&quot;8N6JJebqrzA787TgpwUj-51&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-50&quot; value=&quot;GPU driver shuts down clocks and sends SMU messages&quot; style=&quot;rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;120&quot; y=&quot;270&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-51&quot; value=&quot;Failures?&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;280&quot; y=&quot;270&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-53&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-54&quot; target=&quot;8N6JJebqrzA787TgpwUj-56&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-54&quot; value=&quot;ACPI s2idle driver notifies EC using _DSM&quot; style=&quot;rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;120&quot; y=&quot;870&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-55&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-56&quot; target=&quot;8N6JJebqrzA787TgpwUj-58&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-56&quot; value=&quot;uPEP driver (amd-pmc) sends OS_HINT&quot; style=&quot;rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;120&quot; y=&quot;1010&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-57&quot; value=&quot;&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-58&quot; target=&quot;8N6JJebqrzA787TgpwUj-59&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-58&quot; value=&quot;Put all x86 CPU cores into ACPI C3&quot; style=&quot;rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;120&quot; y=&quot;1150&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-59&quot; value=&quot;s2idle loop waiting for IRQ &amp;lt;br&amp;gt;to wake&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#0CF232;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;280&quot; y=&quot;1170&quot; width=&quot;100&quot; height=&quot;60&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-65&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;exitPerimeter=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;strokeColor=#E07A5F;fontColor=#393C56;fillColor=#F2CC8F;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-60&quot; target=&quot;8N6JJebqrzA787TgpwUj-54&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-66&quot; value=&quot;no&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];strokeColor=#E07A5F;fontColor=#393C56;fillColor=#F2CC8F;fontStyle=1;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;8N6JJebqrzA787TgpwUj-65&quot;&gt;&#10; &lt;mxGeometry x=&quot;-0.144&quot; y=&quot;-4&quot; relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;-4&quot; y=&quot;14&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-60&quot; value=&quot;Failures?&quot; style=&quot;strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;280&quot; y=&quot;720&quot; width=&quot;100&quot; height=&quot;100&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-61&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;entryPerimeter=0;strokeColor=#E07A5F;fontColor=#393C56;fillColor=#F2CC8F;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;8N6JJebqrzA787TgpwUj-60&quot; target=&quot;8N6JJebqrzA787TgpwUj-9&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;8N6JJebqrzA787TgpwUj-62&quot; value=&quot;yes&quot; style=&quot;edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=none;fontColor=#393C56;fontStyle=1;&quot; vertex=&quot;1&quot; connectable=&quot;0&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;440&quot; y=&quot;620&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;-14&quot; y=&quot;160&quot; as=&quot;offset&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;/root&gt;&#10; &lt;/mxGraphModel&gt;&#10; &lt;/diagram&gt;&#10;&lt;/mxfile&gt;&#10;"><defs/><g><g data-cell-id="0"><g data-cell-id="1"><g data-cell-id="8N6JJebqrzA787TgpwUj-21"><g><path d="M 101 351 L 154.63 351" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 159.88 351 L 152.88 354.5 L 154.63 351 L 152.88 347.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-4"><g><rect x="1" y="301" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 351px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">SFH driver notifies MP2 to stop all sensor collection</div></div></div></foreignObject><image x="2" y="322.5" width="98" height="61" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAD0CAYAAACB1LEoAAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXlgXFW9/+d7Z6YpoCxPpICg4i7uVppMkol5lFWWQsskLYuAuID2gfioKKDoE+GHqEgfi7IIZWuTkQpYQUAxNjPJpFDl8RARkUWQpcpjE9Jk5t7vjzOZSW/unHvn3tkyk3zvX23mrJ/vOedzzvd8v99DkE8QEAQEAUFAENAgQIKKICAICAKCgCCgQ0AIQsaFICAICAKCgBYBIQgZGIKAICAICAJCEDIGBAFBQBAQBPwjICcI/1hJSkFAEBAEZhUCQhCzStzSWUFAEBAE/CMgBOEfK0kpCAgCgsCsQkAIYlaJWzorCAgCgoB/BIQg/GNVccq9upbuHjb5U0S8hIE9AewMIJQvOAvgaQL+zMR3MRnrdp9nPZxIJMygFUdjPdeAcWzQfH7SM+H09GD/95xp2zp7DiHgVtvfX7DA+44kExv9lOuWJhZbvEuWw4MA3jmZhrBqeLD/uErKrWbe1s74fAN0F4Ad8uX+NUzZ2ODg2meqWY8qqy3W81VinN+oWFS7v1Le9CIgBFF7/Km9qyfGFv8AoE8ErO7vAL4Jk1cPDydG/eYVgvCLVHXSCUFUB0cppfEQEIKooUyi0fi/Udi4lJmPsJ0UyqnxAYtCR40Mrr7fT2YhCD8oVS+NEET1sJSSGgsBIYgayWOv7vjO4SxWA9RdpSo2wTAOH16/ZqhUeUIQpRCq7u9CENXFU0prHASEIGogi/nzPx+Zs9VLFwP8eUfxWSbcCcaPyeTfj4/vsGnjxsszKk08Hg89+6y5U4bDnwDhRGLsByBsz8/Awwgb+6cH1jzu1ewigmDc3RLhRQMDiX/VoLu5IuUOQu4gajW2pNzpQ0AIogbYt3bE9zWIbgGwVaF4tbgzheJ+1URt3UvfTllzDUCt9iYS48o5kU0nDQwMqEtt7ScEUQOhehRZzxNEfXsmtc12BIQgqjwCuru7w2PZeasAPtJW9N9g8sLh4cQjQaqLxeJvzjJ+4SCJTWTRwqGhvgeEIIKgWbu0QhC1w1ZKnl4EhCCqjH9r67J5FDHXE/AeW9HLh5P9l5RTlfY04mJqWihfThDlIF1+HiGI8rGTnI2NgBBEleWjWSxetZgPGEklkuVUNX+f+HZzNuM2gNon1VWM69Op/k8DYF2ZQhDlIF1+HiGI8rGTnI2NgBBEleWjWSxeJIv2Gxrqu6fcqto6ey4i4HMAngWIibF+TmSrkwYGrtksBFEuqt75urvjbxjLGgcC1vEgWgDGm/I5ngJhnWHxj1KpxJ/V34IQhNPRjYFD08n+X7S2HrUtRTJnEHA8gJ0AjIL5ISZcb41nV23Y8PPnVV2lHOXa2+NRNuhXALbNt7esDUpn55J3ZBEaIGD3fDnjDByUTvb/2gu5KbiBPg5gXj69ujN7gpgGLYOv3H1nTvt1Ai1yltxidPHqhI8RzlViyBt1PAfgbiZcEqSO2oyi5i9VCKLKMmxvj38UBt3FwI7VUDGV0zw5QZSD2kQeZU321DPU87pX9EUA3uxRUhZMF8KyzrZC2NOvJ7WOIMgwnoLFPwP4Hfr6eF1LeJu42hCUIgjdiZMY5w6l+s8MgkpbR+9RRHz9ljw8ND4Xn9r468RL2k1JNL4VwlgOpm8AeGPpuvghMvClofWJ37qdhAtl6AjCzGR6Qi2Rr4FxqouPkS9CK93O2Z1CCKLK8u/sPHIHE5nbHRfLD5KV/dTQ0NonqlydtjghiPJQVrvfzVlaSYBS3xVCoHgWpqzKTPAqg0iFGSkZasO5wBNwCgNfwEToFd1nEtHRQ4N9a/ycIFSaaGfPlwBcbFvcR0KIHJhM3viCH2S6u4+bO5Z9NQHQwYX0BHx7KNn/LV1+N4s7H3WZIFw4/tr2ZxTMvXV5isOt8ADDSBH4a+5y8iY0H22TJACEIGowDNo7e77FwNlTi+aHLMbJb90Vd/s9WpfbNCGI4Mh5+K6owjYxcDeBXibwHgy02XfJRLiVGV0Ats/X7BqLSXMCeN6mvlLhVFIAPZo/TXQA+KuVCe0zMrJaqU5KqphUmvb23g+ywb/Jq6rUnwLtptu6et9PVi7/Lvn+uMbVam9f/DY2wrdpCM4E40kmpBVuDN6WgE4AuxVLhy4fH91uuRtJaOJxvQKgBcCcXFmMx2Hgd7l/W/gkCG8HULZhSPDRM3NzCEHUQLZqR4WsdYfDkqlQ098ZuB4GrSo3GF+pJgtBlEKo+Pf2WM/RzLjGsSN9GuATdtsFd9lJXZFJy9Yv9jJjpe3UYC/UP0FM5uIRDoeW2p0g1Ylm1MRbRwYTDxaSlVIxqXS6EwADK9PJ/lP8IBPt6P0CiH+8JS3fDhNLnPHAlDqrZTMlGNjXVq7JjJutEK/YsD7xmKM+ao0t+5DB2asccclMBn81nUz8UNc+bcDGiYSjBHztLbvwJTb5UGfnkj1M03hxeDjxf376K2ncERCCqNHoaOvoXUzEasHx0seOgnAfwD83LOu2zZvf9LDXUdtvUxuEIPw2N3i6KkdzbW8/fCfLiNxJwEdsjXkQJi/y8l2JxuILwPQL2069kD0QQfj1kFeF+yEIla491ruUOXeHkFeV8f3j4fDCjQOr/+kFeFTdJYRwE0AH2tJpd+NFbQFMMJ+12664wOuUnL/IvhTgY2x1uPoKuRIE89eHUwkV2VZrzRd8YEkOJwJCEDUcE+1d8b3ZousA7OqzmiwDfzQYv7RCdH25J4waxGIqGb5aE2rDZ5fLSFZtgihaTPGywcbBqdQaFWbc82vriPcS0bWT6o6J1IEIwku/76zcL0Es6IrvEbJIqYn2yJfhS83kVE8x8GQYZncyedOj9rYofx8jYiqLpg9u+bu3qsief8GCw99kzImsowl1Xe5zw8GFIB4zDV6oOaWUEpn8HgABIYgAYJWTNG+++J28mepk6A2fZb3IwFURyv4gyNsCQhA+0c2pY5Tn+043KgMm20J3Y0v4uWO9wpkU0rrsuIMQRCAzVL8Eodbbts6eq/Jms7nm+lEzFV9wkxaL4hMKAkcL8HvKcSGIREt405F+ZOR/NEhKJwJCEHUaE2rHFIrMOQJk/QdA7/NrJZNvnjpZXDo3zGf6CbgnBOFfqNFo/C0I0wAY7yrkYqaj06m+G/yWUmwSGugE8ZgVznaNDKx9yk99AQgC0c74AUAuJtjEZS7wgP3C21lfTvWToVtA2Dv/2xQLKlt6HflcnU72nxBE3aPxtdCSpY4gmHBmerBf+T/IV0MEhCBqCK5b0RM6WHQSG71MvI/esqM4NwP/QyYfUSqmU4MQRFO8KNfaEe80KOdYtk0e8WfYoIXp9X1/8js0NL4v/k8QASPtBiGI+d3LdpyTzf4GoA/n+zIO8KLhZEL1t+grcvgjPIIsdw8PJ9TDVZOfzpSbCV9JD/Zf6Bczla6j49A3WjRX3eF8cjIf04nDqb6f2MvREkTewTBIfZI2OAJCEMExq3qOvP19GxGWgKFszzWmgJPVlvSpaJBL6qYgiLwxwU0FdHNRdzOhroJZqR9ha+Jv+ScIIJCqJAhBqLa3d/R8lwlnTPaD6OLhwb6TdTv94rL1aTU7f1X8HQAF8vNhcISA/e13dDqnPg1BBFLL+ZGhpNEjIATRgCMjdwE4J/tpMCmzxLc4m8jA1ZnR7b/gZvEkBOFfqEWLYsAdvapJo5rxTxABL9wDE0Rx6A2tmknjge16qa0JLeIf8FIpNXhoCKIqm49STZHfxVGuocdA7gI0jBX58AX2x4NeJosPGBpKDOs6IAThX6wznSA05KVVMxXHcGJX72shCP/jq9lTygmi8SVI0Vj8NDCdZ7/Y9oqvIwThX6gznSAUEkWWSRo1k9P738v0VgjC//hq9pRCEFWUYFtH74FE1lmAsTPA6tLzIYM3H5JK3apCA5T96QKwAVsCuDkLFoLwD/VsIIii0BuOy2fNpbPnCVVDEDVV+YiKyf94rnZKIYgqIuoMUeDmZFROlfmQ3+pyceLz0JULQfhHWOPgV9Ip0Fm6ZoFtmDsI1VaNr4fJzIvTqYQKMIhiSy59aI1CvzVWWyYzHZJO9d3uH3n/KYUg/GNV7ZRCEFVEtK2zZx8CfmmzO58yESupqi3WcwYxvisEkXO5XTU82H9cJXhuWex692KD77QF2gu8G9ZY9TQUQai+Op3SlKFDwW+haPNRItCd7tVELvHKYSWyEoKoBL3K8gpBVIbflNya8AYA8y2wsMwZ6CxItTpvX/Z4VU5OEP7R1T4Rq7HF9yqxrSN+KBGttd0RNRxBFI3NvJppvCU85vCVKOkHogsGCHifOnT45bFXQS13IODPFvAnw+Krh4YS99nTC0H4H8/VTikEUUVEJxbyeasAPtJW7DgzfzqdSvSVW1VHx5IPWBRSu9wtMZ08FjEhiEBIF3kFe6nvnCW7yLzhCEKdu6Kx3pVgXp7vQ86MlZk3OxwFffllFIfk8B+/avL0VhxBV2taKwQRaDxXNbEQRFXhVN6hS2MWWetsTz6qGl5g5uPSqYTyGg0UedIlpLJnoDIhiGBC1YSkMIlw3NBgv+1FNX2ZLvJuRIIoCr2hLOFUr2yOdG6hNYo6r9u0EHDX2FyOu706Zy+ktXvxbkY2rO4sbMH+8HtzPLNf4XnVQnohiGDjuZqphSCqiWb+QnA8O28lg09yFJ0l0BUhynzHb+C91lh8T4OxyhE7XzHMOelk/zfdyEYIIphQc/4mBlaDaJEtp3ok6CivN5ij0fi7EMrFOnK+BteQBOEMvZEL3cJsgehjuX67hNZwQZOisZ4fgvHlqb/TdS1h64teMcMU3mTQSiZ81pZXvSOxPJ3qt71DMfGrEESw8VzN1EIQ1UQzX5Z6X4BDkXVg7KUpPgti9TrZdWTx+s2bd3im4BGtdLvj4y/PYwrtA8KJrwdaUxN3ytOXfnZpQhDBherytsMrIP4OsrjYfoc08W61eiSHrnIJ5d6QBKFQ0b92mMfLIwyHDlGXU4Dat9zLCC1PJ9dscG5i1KaHmFSU2ckw3xPc5H76EIIIPp6rlUMIolpIOspRTzFaRvgWxyM0FdWmdnyGlV1U6m1rIYjyYG6P9Sxhhnq/wxmW/RUC0gx6DOA3g9BleyZUV1njEkRx6I1C+z0D+bkh6vFoklr11XOq6wH6B4hbbM+BOoqjR2FYhw2vT/yvrh4hiPLGczVyCUFUA0WXMtRbEMaczEowjg4Y3ttZYu4ZRzPCy+8ZSDxbqslCEKUQcv2d2mLxY4npUg1JuGVS9xVrmHkPgNrziRqWIDShN/JN9vfinA6EaNfSj8Hin+Xf0Q4IviIHOmJ4/Zo/uGUUgggIaRWTC0FUEUyXonLv8BLM84ixHwB7TKVStZuvW5rcYxg4fWh9v3rdzNcFtxBEKVi9f3dTg2hyvcJEZ2ZarGvnjNJa2zsKDUsQqg8aCyTX19z8IlnGw1hZYlxmZiNnjYzc8LJXPUIQfqVQ/XRCENXH1LVEZZEU2Uxd+RDHSgerwnrPs2UwATxLwIPMdFM2RLfds37Nk0GbKAQRFLHi9Oqe4clnqY0YXwLQBWDn/ClwFMwPMeF6azy7SlncNHI0Vx0SbV297yeL1XOku+R/9wytEQTNnNXdGB0Cxqd54vK+gJsq5rnX36H+Kxg/NTPZm53WSnKCCIJ0fdIKQdQHZ6lFEBAEBIGmQ0AIoulEJg0WBAQBQaA+CAhB1AdnqUUQEAQEgaZDQAii6UQmDRYEBAFBoD4ICEHUB2epRRAQBASBpkNACKLpRCYNFgQEAUGgPggIQdQHZ6lFEBAEBIGmQ0AIoulEJg0WBAQBQaA+CAhB1AdnqUUQEAQEgaZDQAii6UQmDRYEBAFBoD4ICEHUB2epRRAQBASBpkNACKLpRCYNFgQEAUGgPggIQdQHZ6lFEBAEBIGmQ0AIoulEJg0WBAQBQaA+CAhB1AdnqUUQEAQEgaZDQAii6UQmDRYEBAFBoD4ICEHUB2epRRAQBASBpkNACKLpRCYNFgQEAUGgPggIQdQHZ6lFEBAEBIGmQ0AIoulEJg0WBAQBQaA+CAhB1AdnqUUQEAQEgaZDQAiiSUTW3R1/w1iGbgFh70KTGTg0nez/RakutLYetS1Fxj9LjKNB9D4AW+XzvMLAM0ToawmFVo5lzL5yyi9V/0z8PRrruQaMY3V9Y6aj06m+G6rR77au3veTxb8BsIujvBcs8L4jycRGXT1tsZ6vEuP8CtrwCkD/IPBfmfguk41bNiT7/gKAyyyT2rqXvo0y5vEgOgiEt4PxpnxZWQBPA0iRRdeMjW33240bL8+UWY9kqyICQhBVBLPcolpj8T1DFp1qZkNnjYysfk5XTrkE0d4V35stug7Arm7tI8aVcyJ8arkEVG6/mzmfF0EASLSENx05MDCgFr6KvmhH7xdA/GNNIbUmCF27HyCDTxlan/htEKJY0Nn7nhD4pwDaAIR8APIPAKfstgv3JxIJ00f6ipL4mX8VVdDEmYUgplF4sdjiXUyOfIPBnwPwRJiyscHBtc9UiyA6OpZ8wKLQnV7koOpiwlfmhvgKIQj/g8HzBAE8GYbZnUze9Kj/EotTdncfN3cs+2oCoIMbhCBUM0wQLhx/bfszfOzyKdrZewTAVwF4YxlY9I/P5c9v/HXipTLylswSZP6VLGyGJhCCmCbBdnb2fsQE3wFgXr4Jf602QbR19lxEwMmaLj4H0KsTf+cdmWnp3Ig1KAThfzCUOEGgGmomD/WSauh0nCAKAJkg/vrwYOL7XieJ9ljPEmao02tBpVnIn2XgjwS6VxEOgffgidOFhkTo8vHR7Zb7ICP/wgMQdP4FKnwGJRaCmCZhtnbG5xuguwDsUAuCcFFJPcwUio8Mrr7f2e1yVVjTBN+0V1uKIAC6sSX83LGVqJk81EvlEIQnoRSPh+Pmjo+/PI+NsDq9nA3gzY40LxtsHJxKrRnUCSMajb8LIVJ3J2+1/Z4F6PtWJnzeyMgNL9vzxePx0FPPUM/r8+EiR10mg7+aTiZ+WE2hB51/1ay7mcoSgpgmadV6gKrjc5bDavK+s9DFauxqpwmuhqtWQxCvAGgBMCff2MdMgxduWJ94rJzGR6PxrRDCTQAdmM+vLm0Nmw4/6AkiEEHY2xyNxv8NIeoDsM+UvjDfAgvLhocTo44+UjTW80Mwvmz7+ygRjhka7L/JC488sdwCYE9buoqw1NVX6/lXjswbMY8QxDRJpdYDVEMQL5JF+w0N9d0zTV2eUdU6CYKBNDHGQejKd9QkoqOHBvvWlNPx9vbeD7KRs17aKZ//5wC6bSfOuhGEqr+9ffHb2Ajf5li4tW1Y0BXfI2TlTg97TG5OgHPSyf5v+rnc7uhYGrPIWgdgWxt2y4eT/ZeUg6UQRPmoCUGUj11FOaeBIMreQVbU0RmauegEwbgbwO0gXLCly+WrmaKdPV8CcHG+rBeYcQYRzp0uglDtcLRpomlMJw6n+n5iF3NbR+9RRHx9uScAzelJFVU1yzBVWK3n30wZ9kIQ0yTJWg9QzQlCCKKKstYShMFfB9PNNp+FslQjxQsk324xzjWIbp1OgtBemhNWDQ/2HzeFIIqNIwIv7ho/jt8ZvPmQVOpWpcqr+Kv1/Ku4gQ1SQMMThHOg2J3D5s//fCSy1YsHEPgkgD5uswhSg+hhBt9ojWdXbdjw8+fLxXvBgsPfFIqEDwPhMyB6r8O55wliGjRhXZXdvMNIKUuLts6eQwhQk9zPN2VB93OJXPritKjaScspP+WXarQHVkpH/ThAd1pkXT4ymPiTH1WDsz4l7/DcF1pDME5g4hiAtwEI59IRngfjcWLckSVaVaFTV6muQkcQ45FQ75xs9mqbWWpZaiaNemm5BU47jBrqqmJSgOjutZw7+w/vd8w2b3ht7OcM7FsAkQlnpgf71enH96eZK55Wfn4KrmT+uZVfzfXBTx/qnaZpCSIai38SjB8DOc9gry8LpgthWWdrLtNc8+3VtXT3kGV9m4BjJhch73r+DmCFl3NPJQPUzwI+XQShPLVD4cw5TDjJD1ZKX8/EJ4wMJh70OeCprSPeQ0Q/APAWP3nKqMNPsZNpdATREuFFYxnjKLtjGwNXp5P9JwQhRIcqZxNZtNA0rJaGJAjG3arfAwOJfylwcr4bmdeWg9AO0EdAvB0D30sP9n8vCMBtsZ5TiWGzXOL7x8PhhRsHVv8zSDn2tJXMP2edtVgfyu1XLfM1H0EwLyKi3YGcrtdpX+2KFRNuy7TwkX6cbvx4H7tUZDJw7dwwn1yYMNUaoI1KEO3tvXuxgTUAvyPgQFWnihXDyf5LvRbPnPnj01gBonN8euHamzHKxF9MDyZWBVmg/fTDjSA2W8buU0JjEB5BlruHhxNqA1Hy06mXYGKJFcKeDUkQGhVTyU6WTkBtnT1XEXB8ISkBd/1r65bD77/zurz/TulCnCmqRRC1Wh+C96j2OZqOIACsBnCYjRyUZ6cyJUyBaczL6YYJp5fayXg49wSpR0tG0a6lH4PFJ06I1doxr4oomEW+zIxbiei1vNhHOUw/Sg+seXxiZ1Y6FlO0I34CyFig0jPz1kQ41GYJMg7wOsCw78D+ryU8dv7AwM0v+infORyjsfgCMKlYUAVLm0KSLMCPgGhEyQTgN+ese7bE3imkK+lw5SIPE4wnmZAm0IQ9PfGHwPioZtPwAsM4MJ1cM1LN6eRGEMA2WYf3s8nMi9OphC/Vok69pKx3NDrzuquYNG0AMc4dSvWfWU1sdVZQ1ainkvlX6F8t14dqYlitspqRICYXF2bcbIV4hdPWPLcLC2MFmL7hUHk8YGVC+7jFO5oYQNavHAueSYQ1IWRXOMNgeDj3gECXzQk/d7Kbo1TQS7KgC3jQS+qg5buYPWaJcVk2k/m2895HYfW3p7G3QcaPHaeNUWZeqltAc/rdOREVKkTdL+U/HjANfEbnX6D6sDlL/0nAWVPlXr41kdtEcyMIdXJ0OrgFUTPp1EtDQ30PNAJBtHf2fIsnnOZsc9A/+fldtDQX1C+TxQcMDSWG/ZZRKl3Q+afKq+f6UKr99fq9WQnCBPNZu+2KCzyCeVE0Fj8NTOfZVBOuu7kcqRhYDaJFNvB9qUFauxfvZmRDPwOo1Z7XYl40kkoob+miL+gADbqA15Iguru7w+OZnS5jwmft/WXm49OpRL+XOmev7vjO4aw6BZKy6c996r7AGs8c7CSVts6efQj4pc35zJPg88VRtCN+Oign98L3DBu0ML2+T12OV+XzIogiax+faiaNemldS3ib+MDANZunmyBc4nqVZaXlJQD9qZRvV2q2IHeIpYQcdP7Ve30o1f56/d6cBOHuwTkFt9bWZfOMiPlrAB+cXIxcLCra2+NRNkidHiadcziAc482tIBHO4MO0EYiCI0apKSqyC4YDVZaa5+ii0qf+m6t3KsYgju3m3SG+7Zd1mqC7PlSMxXhavMxmE6CaI0t+zCxmSDgPVMmGOFHw4P9X6nW/c78feLbtWymhN0CCoBnSI9yF8qg86/e60O5/ap2vmYkCF+TTQGldrpj2Z1uBBCfBM5lkdEEtvOzW50iD40jkaueOOgAbSiCcKga3E4AXoO1WF3Bk7vlQj6NqsGvPT1FO3uuAbAQ4CcYdB9gXFvNewgvgsgRiDNMN9HFw4N9KnCi63sKjv5OOfXUkyCUOvDZZ82dMhz+BAgnEmM/p3UaAw8jbOxfuCOrdGFS5DBnM10OQMVjmvyCbNKCtCHo/Kv3+hCkL7VM23QEwQFDKbd39HyXCWd4EURn55E7mMjcblcRlXMp1tm55B1ZhAYIUFZWE5/G01T9OegAbRSC0LbDx+W/cxC3dsQ7Dcqd2LbJwaSRa1tH72IitsfuqZlVUtBJVoogNE5lnhuOYlynEmYVCCJoF73S+4qr5LdCpb4hg1Y6VJZQlktjcznux/LQb12FdEHm33SsD0H7U6v0TUcQAAJ5VBbtQjUniPb2+Edh0F0M7JgHepyBg9LJfqWe8v1p4/e7nFiCDFDVgEYhCA0Jmsx0SDrVd7tvoBRB5u5twutt8XqKMNdZs+TeIwD/QfnAWNnwOjeDgyBtKSdtKYLQjAVlRbZoOJlQpFj0FY0Hx8aigQhCvTR3wnCy72fVUC3lxnXWuBRg5W80+THwP4aVXTQ0tPaJcuRTKk+Q+Tcd60Op9tfr96YjiKD20H4IQnMZWvalZtGJxeFIVM4OppEIwrnzB6Azny05fjVmuLo3FJQ9/H/lrZLcynwKhHXMuGlueOukutAtWXkVEpQiCFVFEDWTfZzqTlMNQBBZIropQ7TinvVrnqwChMhHiVVPsx5QT3JQdQUhiOlYH6qBbzXKaDqCgM+LygI4PgnCGQKjbLf+ovpmGEEEdDYKNEZ1fipKNx0ZoxuJ8SkfhSn/i41gXGFmsjdXEmKlVF1+CMKvmklzOiy6a5kGgngO4H8ClGTgl5m5vL6aqh4Xyz91KBmxwuYRIwNrnyolg0p+D0gQdV8fKulbNfMKQaiHcotjJFWPIABtWUEGaCOdIOpNELmd+IRfy/K8X4vfpyvVW8ZphvGf6eSaDdVQh9gnnh+C0KmZdKpL51jQvdtRBYJomGCNeYe1nxV73/NANoxl9wwknq3mIqcrK8j8m471odb991u+EETNCYJHQogcmEze+IJdKEEG6GwniAJuOUc4E0cQ47R8DK6Qj4GeJeDbY6Pbn18qmKKPsiaT+CEInZqJgZXpZP8p9rpKqZdcVCJ196QOgo9LWhVT6xAiUhZmhZcUC0lr+v60sz1B5l9tCUK/PlQB66oUIQRRa4KY+Sqmsk9blYzgnOppM3URsAyE/TRhPOzFV/3ZSr8EUaxmmhp0zo96aYYQhDI9/qImhpoKYXMFsvyVajrClRpbDUMQLutDqfbX63chiAmCcHrsln1J7bSXdrtUDzJAG+oE0dF7IBGr+Eu53TsB/4TF+w4NJe6r16DV1EMdHUt3sWAeCMLnAJqviSob2K/Fqz9GGfWvAAAgAElEQVR+CaKUmskxDlzDgzezikmFaW/Z6sUzeSIEiv3UlwXxGbvtjB96RESoybAKMv+mY32oSafLKFQIQhFEV+/7p0TgBMoy3dTFwifGlUOp/s9VcsRtJILQmPz5dlwsY3yWlUWFYg5b5rX2cB7KPLYcc1y3BvgliFJqJodRg2voimYliAlyeOkiBn/eQQ5VNZcNOlACEcQ0rA9B+1Or9EIQAKbDESbIAG0kgpjfvWzHOdnsbwD68OSg9OEl7BzAKqyEZfBtr5uwZl+PznsfAX8h5ktSqcTfcotqNP5vCBkHA+qNZ6tNLS4hWAclkzc96mcytHYtebdhhX5rfz/CTzRfP2Xn2ucRasNZhkbNlLuXCoczmbEM3QLC3hN53IMKNiNBKHKYs/WL54JxqoMcNjFwVFA/I7+y8ZMuyPybjvXBTx/qkUYIIo+yxpX+T1nD2D+IzbcmNMRMDLVB0VjvSjAvtw3Qv8HkhcPDiUd8Dlqdf8MUtV40Gn8LwjQAxrvyZQY6qVTL47saJwiNmulVi/kAEEZtbzx4vj7XhAShC5apSPBRGHTE8Po1f/A5VmqSLAhBqAbUe32oSafLKFQIIg9aTYJxwT0KZdAB2iie1Aqu1o74vgbRLVPfXqDLx0e3W+7HUkgfNpnXtoS3Oarg6KaLo0VEfWOvbXeMrzqKCUY54i1Jp/rWljFPirIEOUHkThyO2EwqlItl4CVinJ8v3DMyarMRhG6M5OI3GXRYNaPqlivLoPOv3utDuf2qdj4hiDyiLuF8sww+PZ1MXOhlR+8W7tvtnYPcItsZn29/IaxUjKlGIggXrEwCzhkb3f67Xgt43nu2D8A+tsE8qguN3h7rXcrM19vDtYP468ODie+X8GsoCvldCt+gEysoQWjuuTbm61QX6p7qJd14AdCwZq7t7YfvZBmROwn4iA3XTSA+ZHgwoXxSpv0LOv/qvT5MO0D5BghB2CTh9iCIepjIDBmnOtVNKurlk08bi4j4vwHsOlWo3jtq3cW41xsXjUQQuR2x/nEltW6vMxinpVKJPzsGObV3xf+dLVxS/I64Hiv1YJAxJ7KOAHUHUfhcHyXKLaStR21Lkcx5BHxhit67yqGpgxKENk7Xlj6VVJ810wlCo2qtuplxpQto0PnnMebNWqwPlfavWvmFIBxIVuNJQT9RKLWXvbmNJJ4H00sMzobY+GwqtWZQ/bnRCEK1yQMr9fNTDCTVk6AM3paQu4h1Pk2q+jscRnaJ87W+glj06qzcr1OfNSVuAXMrQOrOIuwQ64NkZT9VzcBvQQkit8A4Q4AXGunjQaFmIQjdWxxVWqyq6m8TdP4V+lCv9aFKmFVcjBCEBsK2CVt/9dD9mwMirMI7KI/Qk3zEraForOeHYHzZrQ57yIVGJIgJkogfxEzXaTxjfUDnK+4OtcXixxLTpZr3pn3UgQfYoJ5q673LIQjNrjXXfj9PkjYLQWjUgn5k5CdNVQlCbcWCzD97A+u0PvjBpOZphCBcIFa29BHmC5h5iWZHqsnFDxHhtKHBxG1+4/7ont+0F2x/k6JRCUK1NzhWeAXE30EWF/v1nm1v792LDf6p/XXAErNjlIErOBP5xsjIDS9XeyaVQxAuaqaS6iXV9mYhCI21T7WgrzZBIMj8c3Yi+JgPvj5UC7hKyhGCKIGeGgghy4wTsARE77WFdFDvVT8O0J0MWr37Lua95XiD5h5LCRnHM1ufBdH7HLvkyaiejUwQBQiVeiEUMXsZ3AvQOwHMy/+mTlbPAnw/YFzdErZuHxhI/KuMgUsLOnvfHWY+lgn7g/B2mzxydRDwIAjXjrXwL3yc4spowkSWcggil6/opTk8gix3Dw8n/u7VmGYhiCJcyka4KGPVCSInD5/zz60btV4fqgdfeSU1PEGU1y3JJQgIAoKAIFApAkIQlSIo+QUBQUAQmKEICEHMUMFKtwQBQUAQqBQBIYhKEZT8goAgIAjMUASEIGaoYKVbgoAgIAhUioAQRKUISn5BQBAQBGYoAkIQM1Sw0i1BQBAQBCpFQAiiUgQlvyAgCAgCMxQBIYgZKljpliAgCAgClSIgBFEpgpJfEBAEBIEZioAQxAwVrHRLEBAEBIFKERCCqBRByS8ICAKCwAxFQAhihgpWuiUICAKCQKUICEFUiqDkFwQEAUFghiIgBDFDBSvdEgQEAUGgUgSEICpFUPILAoKAIDBDERCCmKGClW4JAoKAIFApAkIQlSIo+QUBQUAQmKEICEHMUMFKtwQBQUAQqBQBIYhKEZT8goAgIAjMUASEIGaoYKVbgoAgIAhUioAQRKUISn5BQBAQBGYoAkIQM1Sw0i1BQBAQBCpFQAiiUgQlvyAgCAgCMxQBIYgZKljpliAgCAgClSIgBFEpgpJ/WhCIxRbvkuXwIIB3FhrAwKHpZP8vnA1q7YzPN0B3Adgh/9sLFnjfkWRi47Q0XioVBJoEASEID0FFo/GtYOAEEMaHk4nLm0Sms6KZQhCzQszSyWlGQAhCI4B4PB76+7PGwczW/wPofUw4PT3Y/71plpVUb0NACEKGgyBQewSEIDQYR2M9l4Fx4qTqQgii9iMxYA1CEAEBk+SCQBkICEHoCeIaMI4VgihjRNUpixBEnYCWamY1AkIQQhBNOQGEIJpSbNLoJkNACEIIosmG7ERzhSCaUmzS6CZDQAhCCKLJhqwQRFMKTBrdlAgIQQhBNOXAlRNEU4pNGt1kCDQ1Qcyf//lIeO4LrSEYJzBxDMDbAIRzMiA8D8bjxLgjS7RqQ7LvLwBYJ5/u7vgbxjJ0Cwh7+5IfYdXwYP9xXmmVqeyTz4Q+QWQdB8a+xW3jPxOoz8yE+kZGVj/nq14AbbGerxLj/EJ6u3NYa+tR21Jk/LPEOBpE7wOwFQATjCdh4FcW+L9HBhN/csPBbxsCpqO27qVvQ9ZcbIAOYGBPADsDCOXLMQE8S8CDzNxnZrI3b9jw8+dL1dHIBNHaumxeKGL2MrgXRO8F4035/mQBPA1gBKBES9i6fWAg8a9SfXX7fcGCw98UioQPA+EzjnpGATwO0J0WWZcHkbnX+FLzLbLViwcQ+CSAPg5gXr5trwB4mME3WuPZVX7k5+zT/H3i27WM0SFgfJqBD9vKLmt8lIGZks0TxDRowroqu3mHkY0bL8/4kU3RWGTc3RLhRQMDiVfbu3pibOFcAK35tUnN9buZcMnuO3M6kUio/jXs16wEQW0d8R4i+gGAt/hBl4E0E58wMph40Jm+mgSRI4anaQlRblBMevl6tDFLRDdliFbcs37Nk6X64jKB10U7ew8C+KcA3uxVhsLBAh27Idn3cKm6KvydJiYH/wCgTwQoK0uMy8xs5KyRkRtedsvXiASxV3d854hp/IiZl0xuVLw7/gqIv4MsLh4eTqhF3denNgKhcOYcJpzkpx6vse+s0I0gorH4J8H4sfILKtHILJguhGWd7adPubln0nfA+EJ+Q1MKA1/jw1nIXl1Ldw9Z1rcJOMYPZgD+DmDFbrtwf6lFXEcQZibTE2qJfA2MU22bIXuzxhk4KJ3s/3WpDk/n701HEGoBfupprADROS7Ae+E5ysRfTA8mVtl30dUiCFXO5iytJODTZbTtaTL4mKH1ibu9OlA0gZkXqYlFRFf7nGCq+BeI+JihwcQvazH41E5zztYvnusxOUpWy8D/GFZ20dDQ2id0iRuNIKLR+LsQolswcUIK+PFANoxl9wwkni2Vsb29dy82sAbgd5RK6/hdEdCK4WT/pV4nSN34IqLdAVwQYHyBCbdlWvjIjb9OvOTWzmg0/m8IUR+AfQL2RakCHoZBh6XX96kTsefX3hXfmy26DsCupdI6fjcZuHZumE/2OukVj0UeYBgpAn/NfR3gofG5+JQXPgHbWpPkTUcQ7bGeJcxQwlbqk8KXU6MwIU2giV0n8YfA+KhmUL/AMA5MJ9eMFDJ3dx83dyzz2nIQvXvib9a/A4V/5/6wAaD7Jmtja8NwKnGVXSK5I/JmSjBy6iTn9wqpEwzoMRC3AOgAYw/N4HmFmY5Lp/rWuklbs8M7n4DP2+IMKUXa4zDwOzCNAdZHAJqv2TVtAvEhw4OJDVUeWRSNxU8D03lF/cup/fjPgPFAXkYtYG4F6F26XR0xrpwT2XTSwMCAOv5P+RqJIDxk/wrA/1voL4O3JaATwG7O/jBwdWZ0+y94qTWisfgCMKlYUzs58mcBfgREIxMy5zeD0GVTbW2ZJ8RfHx5MfN+NJJzjC8BqAIfZ5pEJwmMAUqouAu/BQBuANxb1ycPBNLeJ2OqliwFWY9f+jYL5IZDx+5x61HO+8MD4XBzmtci6rBeqviD98CQ7zVhUKjc1z+fkOlaYj7mlBZ8E4e0Alg8n+y+p8tyrenFNRRA5neucyJ0AlP4z//GAaeAzG9Yn1KCd8uV39P9JwFlTFyC6sSX83LG6hUcVEI31BHKU6+7uDo9n563knG52yvc0M/3H7rtatziPqbkjr2ldSJSbfAV9vMq8CYZxwPD6NX/QSVszgW3J+CEy8KWh9Ynf2heAHG4tkR9A3U1MresBK5w9cGRg7VPVGlmawHiKrR+12Drxrbvibt1xXalMjEj26wCf5iCKTWTRwqGhvglCsX2NRBDRjvgJIPqJDdtRIpwy9tr212gWfIp2Lf0oLOtaAB+0dellsviAoaHEsE4W7e2L38ZG+DbHCSWnbslmMt926v3VSftvT2Nvg4wfO04bo8y8NJ1K3BpwfJnMuNkK8QrnXMvFLAtjBZi+4ZDfA1YmtI/ujq2tc2k3wVL9KWz01G79vLlhPl+3W1/QFd8jbNFPHBswk4iOHhrsW6PrS7Rr6cdgWb9yEKpJhDUhZFcMDq59xp4vp514hnpe32xd5FTVEuiyOeHnTva7WcmXO0rA196yC19iG/fU2blkD9M0XhweTvxfteZdrcppKoJo6+zZhwClFplgZsB1ANoAo2hH/HRQbkdb+J5hgxa6HU+DEkRrR3xfg3LqBduphkessHlEicWXop09Xyw6vjPfAgvLdDpcd4LwVlO4qeYI+PZQsv9bVRpgFI31rgTzclt5f4PJC4eHE4+UqIPaYvFjielKO4kx09HpVN8NjUoQudNn9tUEQAfb2lhyd9jWvfTtyFp3EPCeQj4GVqaT/ac4+5rbgGR2uowJn7X9phb649OpRL+Xykjdi4Sz6hRA3bZ60tZ45mDdZbLL+DLBfNZuu+ICD3287uRoMvNiHRm1dfZcRMDJk/0hunh4sE/9X2tIotKpk9qczbjZ3heA17WEt4kPDFyz2Y5bPtDmahApFWzh86Vma+1evJuRDf0MIHWxPJnXYl40kkqoqMBTPt1mJZeA+evDqYQyKHHtU5XmXc2KaS6CiPWcSowfbhlUpa2JVFplVWJETHUZNLljc1t4VPogBKEm71h23iqAjyxjUVRZqK2z57/yp5ySg1E3gXP62LCxf3pgzeNeIyVvhfITAo63pfNDsr4G4PzuZTvOyWZ/A5CyQil8JRfLQsLOziN3MJG53T4xmXBmerBfXfiXnJTTEe5bjS2KmOttC73vUOLRzp4vAbh4S8d4JITIgcnkjS/YO9ve3vtBNvg3tp2wUr14qors+fP3Iyr/W/N/d915awnCY8Nir0c7zzTy+/B+x2zzhtfGfm4/DbjJzin3aGf8ACC3GcttEhl4MgyzO5m86dGpmMWjbJA6PWxb+DsD56ST/d/0s2BrMFMLvnbj5kIQj5kGL9RpNnxNpgZJ1GwEMcXEE0CiJbzpSDdVkQ1jtVO/BsBCgJ/g3H2Cca39HmLKhAqgYmrr6n0/WbnJu0s5i6IrgQFXp5P9JzgHs8sOz/cirFEBue7ygo5RdelIIfoiT6gAPwqCGWJzf+fk9SrXSc5wMSluFBWTph2vWswHjKQSyVL4TVw4s1ooXwXwewO4j01e6Tw5tnf2fIuBs20LnesJwK1OZxluO2/N+PI9PiY2SzvdCCA+2Q6N/HRGIV4btikkNLG7vwsgpeO/j0EPRsi6aHAw8Q97uqITij9twxT4igkcWvJ3IQi/a1OpYTKtvzcXQXT0Libim2yIaa2SKkU0yAmiraP3KCK+3jZ5tTuaUm1q7+j5LhPOsO0m7x8PhxduHFj9zykD3+EHASDQTkWnEnFTbZRqcy1+bzaC6Og49I0WzVUXx5+0yc63VVIpDLWLaRnRhVs74p0G5XbU23jtvDVGEIHGc9E41hM8tXX0XEuUuxPLfUGskkphpjuJEuPcoVT/maXy2n/v7FzyjixCAwQoK658Q+nE4VSfum+a/LSbFZeTb5D6GyFtUxGEuqgKWaR268oCqPCZAP9B2Whb2fC6IE5nbgIIQhDtHT1XTNUN63WipYStuV/R7lY0O7zAO5W2WM8ZxPhuoU0E3PWvrVsOv//O69ROdlq/ZiMIBVbx7jwH4YsgrDEsWrV583Yb/TpdOcHXLFImMx2STvXdHkRQE3r18Hrb3NHa4WvG1+8M3nxIKnWrsswp+RXldzkBtsd6lzLnNlZ2A41RBm553TJqVUt4mwHnvULJyvMJ2tvjH4VBdzGwY/5PZfkcaO+XNP0Jcpr124dGSddUBOGir3di+RQI65hx09zw1slyBplfgqjmbly3W9HpZYt2eGXsVNo6ew4hwG7F8tcwZWNOq446DVLq6Fi6CxvW3sw4Kr8T33LZ3+AqJoWRVl89FbxREO6DRas4QnekB9Yo3w5fF5fOnT+AcaUeAowpJ8tSsmLmrYlw6BSdvMYAwDm+gm4e/BKEsqwz5kTW0YSJrO7LMvBHYvRxiG7efZ71cCmHtUIhms2Wp1GKF3ZFJ6ItXtKTHvCVqBlLyW26f282gshZMkTG6EZifMoHeMo+fCMYV/gN4ZCb8D7vIKp1/Fd1+t2FFDsy0RIvvwkdRn7VDT7w9Z1EYfVa1tg1RLwnwO9mpo+/bpG215QQJLrSmoAgVLPbJtSf6p6ryB9A061NIKxlNq7ZfRfzXq+FT0PmvjEvlVD3UqLfBd6t7CD5OzqWfsIiS104+3FgUyeYdWTRNWNj2/3W60RWzQ1QUX/8EYRvQ4VSMpru35uOIPI7NmV3vTxvd+1nQqpsKuZJmmH8Zzq5RjmHue7gGoYgNLpmr1g5fgeT5qK6FgOaWmPx9xPTabmd65Z4RH6bOZGuSQhCNbU1Ft+TmK7y2BXr+v4iAT8wM5GVutAiM5kgFBge/kBe40SZq/40TNnv6k69NSUIoOi0rdnc1WI+BZs3VUrdlARR6HvOEc7EEcQ4LR8jxq7PdIMoq2z/x0a3P99tF9IwBOFDBeDXPNAORq0JosxJr5q4KUcJ9nhSTUQQeYy3ECNwhM8Thcr6ABvU4/TNmekEURiX+VhJp+RjJTk9xd3msjY8TW0JotgUWQiiSmxUy2JyqqfN1EXAMhD2K7FjNRn81XQyscWnwta4hiEI4NB0sl9ZyEx+1ThBaHS0VbuDmPBe5Z/5iBX0HMB/JVDSYhrIRIx7lMVWM15Su43rXODG54z3kMmHMaGXgA+UCBRX5NlezcXOz/wLoiLSlVdp/tw9Y/fSt1GG94fBx7qEy7FXXRR5oJqYiYrJz6hpvjS5i08L5oEgfM4lFpGrg5h/gij2oi3XZFTjT6G1VtFcUn8lPdh/YRAR5fXlNnNh1prUBilTpdV7uuZK+TsD1wPGrzJhemDjwGoVzlur4ptJBOHEL2fUYG3+ONg6BozFmphKKssUn5a2jt4DiVhtEnKnYwL+CYv3HRpKbIkNFlRQHukrXeArze9smnLunDv3+feYFDqGGL35OEZTkzm8sKt5Se30p9Bd2ssJoooDcDqKUsfXsGVeO9VFH67mgn4JQvXFzwDy02e/C4HmBKENz+BVp8bnQhuuwE+77Wk0posqINoVyPJX/IR+VmXNZIKwY5UPfXIciP7bHqLFuQBpTDZ9O64FlV9uPDv9bHy8fWKvp9L8JdpM0ViP0g6oOFaTaijlQ8GZUFfBxN3vZqsUPjqPbxVAcijV/zl7XiGIUkjW4feJ0MCGinfTBVjKNC4UgnWQXy/d1q4l7zaskApiN/l+hM6KQ7dIuaXLTahpd5QLFja4mpZXdrHrvGhzb0+4xPzRDRmt01mD30G0dS5tJTY/RURR9RiSisqaTvaroHV+PhVm5TwCTrclnqLu04Yv8RG3yFm5CtdhGXzb6yFdVGTc+wj4CzFfkkol/lbNBd4PQeQWcLYOBlNUHTwZGJob3nSMj4gIuaa2xeLHEefC2xe+KZfCDeAoJ5fUfkZ/NdNEo/G3IEwDYKjQ0OoLtJMKsjAGOkFoQm14EYoOk4kTjnUHgPcXfs8vNH5CbXhGAS1eKIpi1ATK7yZTbRwll+BzbmVoYg41vBWTHx211zzwoS+vJABioWpdvC+tb4CfBd6zPz5OIG0d8UOJSIW0nzAqITyCLHcPDyfUIz0lPz9GFppQG3/KGsb+fh7lKjRA4zToN9SGEERJKVY5gW6HSkR9Y69td4wfL1UNwYBZ70MQhCBqFKxPOUMtGk4mVGiEKV8lwdT0ES75dphY4lcF5CZW3U5Xdxx3zT//85GWrV+6jpl7p6Rp8BOExpHtZYONg1OpNYN+poAfz2V9tGC6fHx0u+W+xr427DWvbQlvc5TTkbQeBKHzDifCcUOD/ZMhazxJyEkwwGNWONtlj5zc3l6DYH3QzxVRMfkZ6XVIo9dx+4pqWRTy2y0KpOqGkyBKxXHRTeBSL6Ll4dKH+wbuaAnzEbq4+C7B+kwCzhkb3f67bguGyytv48y0LKijnU7ULhE6fUWZnXhPgH4IVgYFU0IvgBnXp1P96oW+KZfafh0LVVv97DjLHb75i/nbXn//ud1WxoNkZT/l9hre5A5VE/JbN9ZcQleXlHluLOtfbRt1C11dD4Jw2VT5esDKxRCiKNyMC2ZZBp+eTiaUUYerH5RbuG+3dzSEIMqdPVXO5+Ke7/poSm5xaD1qW4pklJ5XvXm7xU+C8KPhwf6v6AaK5nj6IExe5PamgceDQSrC5Cm6d23VoDIRvoAZS4seDPJ46c2FIFRXcw+6mCHjVOcxOq/CWpl/GWxSKkFOYH5EqY9JxPdahGN1b4HnSesgMJ/n+taxxnNVtaVRCEK1pT3WczQzlBe1zQ+HH7IYJ7s8kkTtXfF/Z8u4wmEO/LTB5n6p1E1/dOLt8viNGr7rDMZpqVTiz448+TpwSTG27qePehCEamdHx9KYRdY6e+gPAP9gplMzm7fr1210lCOiwVjleOPclezcHgxymycT78kbi4hYGQ44vLvdMROC8LM61CmN/ridq3zqs4vqqUL35yw9d3jRjt4vgPjHji6py72nAVKEtH5OZKuT7MfzKj05ql4iO2ZosN8esbaUiikDwLAtThMxbED3TmRUF/q5h+adToS+drlBxOoRk8j5JGwIbH0clGvX1Kdj1dsWwDttj0Jpg8U1EkG47FYnoCM8T4zf556bzf2X92DCx8t5DtTj+UxV9FMMJNWTu/mnTffWmtEShsPILnGLu1UvglBQRGM96tT4Zc0Ym/JUa+4JVUCd0OYVp/VWtVXjyVFlWTY2l+NuT5sKQQRZJWqftvDymHp83b64+K1Z67Fqz9zRseQDFoXU06YuMWL0D7tMHH/pMgDq2UI/Xt32atXu6dhSUTo1J4jVBHqRJ9729Vmnr9fu/OI5lcCCxSSy532FiVQ45pTBfIctEmeRfrnRThCqPbqX2wIAWNK7v1BWeyx+EDOpN9l3CFB+PmlpudeRIJAzHMkal77uMHlM8L5APVF67dwwn6xTxdrLy5uQr3I+I+qjThWep398Lp/k9e61EIQPJOudJP/Yyk8d7/p6NUOFEr6CM5Fv6GLeODJ6kpDX/YXtXdsL7Ca1Hg3LEuiKsbDxTefbD7o8ugm82858wpPPGl8kZhXC2ys21Ssg/g6yuLjSS2kvoKOx+CdV+HVXtdHUzOrEc51pGGcr1ZjmsltrrdZIJ4hCd9RJgkN0dv4pTb+blwfI4FOc74h74atUhhHmC5h5SQnP7EIxvuVeT4JQjcvPlxNf78c5ALb3uY4oa6cVOtWtW/7gmPFDRDhtaDCh3s32jLwrBOFTatOQjBZ09r47zHwsE/bPeVluCQqn2P9ZAh4E4dqxFv6F1y5A1/YJnSedBUAd1+3H25Kvhin9ekvLS/8OwjImjjmilj5HwP3M3Bckyqxqo9cEVgM1w5EvEXEvOPdmhjpRTBzXGT+1snMSPsixKmLMP296QD62jlIP7Gw74Xj1v8isU3dX0ogEUQAuvzM+EGD1spp611idRMP535U8/gYYv4ZhXbXbPDzoN4y1UzDqic9QxOxlKMsvUmq5whjNjX2A7weMq1vC1u2ldtmFsutNEIV6p84XqwOg3WwaglGAnyI2Uiasq7KbdxjxY72lG8gTMZ/MOAFLQPRe23qhAgA+/rqxwZ0MWl0qyq69bCGIqiwZUkg1EKh0AlejDVKGICAIzA4Emjqa6+wQ0dReCkHMRqlLnwWB6UFACGJ6cC+7ViGIsqGTjIKAIBAQASGIgIBNd3IhiOmWgNQvCMweBIQgmkzWQhBNJjBpriDQxAgIQTSZ8IQgmkxg0lxBoIkREIJoMuEJQTSZwKS5gkATIyAE0WTCE4JoMoFJcwWBJkZACKLJhCcE0WQCk+YKAk2MgBBEkwlPCKLJBCbNFQSaGAEhiCYWnjRdEBAEBIFaIiAEUUt0pWxBQBAQBJoYASGIJhaeNF0QEAQEgVoiIARRS3SlbEFAEBAEmhgBIYgmFp40XRAQBASBWiIgBFFLdKVsQUAQEASaGAEhiCYWnjRdEBAEBIFaIiAEUUt0pWxBQBAQBJoYASGIJhaeNF0QEAQEgVoiIARRS3SlbEFAEBAEmhgBIYgmFp40XRAQBASBWiIgBFFLdGdp2a2d8fkG6C4AO+QheMEC7zuSTGx0QhKN9VwDxrGTfyesGlYfS0YAABFYSURBVB7sP26WQufa7e7u+BvGMnQLCHsXEjFwaDrZ/wvBShCoFQJCELVCdhaXKwQRTPitsfieIYtONbOhs0ZGVj+nyy0EEQxTSV0dBIQgqoOjlGJDQAjC33CIxRbvYnLkGwz+HIAnwpSNDQ6ufUYIwh9+kqr2CAhB1B7jWVeDEERpkXd29n7EBN8BYF4+9V+FIErjJinqi4AQRH3xnhW1CUGUFrMGIyGI0rBJijojIARRZ8BnQ3VCEKWlHJQgSpcoKQSB6iMgBFF9TGd9iUIQpYeAEERpjCTF9CMgBDH9MphxLRCCKC1SIYjSGEmK6UdACGL6ZTDjWiAEUVqkQhClMZIU04+AEMT0y8CzBQsWHP6mUCR8GBH1MvBhm9XLKIDHAbrTIuvykcHEnwBw0O4UygfhMyB6LxhvypeRVaaXxDRowroqu3mHkY0bL8/4Kb+eBJHzD8gaBwLW8QB93IbPZPstg6/cfWdOJxIJ00/73dLs1bV095BlxglY4sDqFQAPg/mX2VDoynvWr3lSV0ZbZ88hBNzqsw1TnAur4Qeh2h9mcymYegG8B8Ab822ZHEswrKt2m4cH/WLVFuv5KjHOL/TJ7rw3f/7nI5GtXjyAwCc5ZJPDi8E3WuPZVRs2/Px5n5hIsjojIARRZ8D9VtfaetS2FMl8hwBlI79ViXwmwIMcDh2fHljzuJ86JhY769sEHAMg7CPP3wGs2G0X7i+1eNSDIKLR+FYIYzmYvmFb6Dy6wQ+RgS8NrU/8NiiRKqwizBcw8xIfWGUB6odp/cfwcOL/7A2aLoJQjngG0w8B7AMg5EPWD5DBp/jByo0gorH4J8H4MUDvK1FfFkwXwrLOHh5OKKKSr4EQEIJoIGEUmtLeHo+yQT8DsGvA5m1i4Kh0sv/XXvnau+J7s0XXlVG+ycC1c8N88sBA4l9uddSaINq6l76dsuYagFoD4mOCcOH4a9uf4fM0RNHO3iMAvsofCW1pDasThUGHpdf3qZNd7psGglDtPwbgi4O2H0CWgG+PjW5/vhdWRQTBvIiIdgdwgY+NzSRgTLgt08JHbvx14qWAMpXkNURACKKG4JZTdDQWXwAmFV9nJ0d+tbg9BiAFpjEC78FAm2bib4JhHDC8fs0fdPW3x3qWMEORg/NU4rd8lJrMtSSI9vbFb2MjfBuAPYvwYTzJhDSBXmbwtgR0AtitGAe6fHx0u+WlSMIDqywDfyTQvQBMEH8IjI9qMH3ACmcPHBlY+5RqQ7Rr6cdg8YkT7bF2BOhgAHPy7XuZGbcS0Wv5/49ymH5UOBGWoWKiaCx+GpjO05wa/LbfJNDlY6PbneKGlZMgAKwGcJgNiyDj6vT0YP/3ypk3kqc2CAhB1AbXskpt7V68m5EN3w7gg7YCTGbcbIV4xYb1CUUQk19u0TDpbDC+PEX1wXwLLCxzHtknFijrVw7yMYmwJoTsCmeYh3g8HnrqGep5PejeRQDebK+bQJfNCT938sDAgNL1T/lqRRDz94lv17KZEgzs6wcfqCNGbNmHDM5eBdAnpuQBfzWdTCi1i/ZzwSpLjMuymcy3nXrz9vbDd0JozkpmPmLKgkz40fBg/1ecaq2gl9RBCaKtI34oEa1xkJZSf33fyoTPGxm54WV7xz1UdiZ7YKUhiEKxruM2X9eKvHrQrt58wMqE9nGLR1XWpJJMFSEgBFERfNXN3N7Z8y0GzrYvZGA+a7ddcYGH3l+3UxwHeNFwMqHIIPflJqWB1SBaZCtf6XxXDCf7L/XSy08QV+hnDpXOqMW8aCSVUFFb60IQmsXI9IEP8hfZlwKs7lsK399g8sLh4cQjzvZ3d3eHx7LzVgF8pB0rZj4+nUr0u2GlLmXnbPXSxQB/3pZvE1m0cGio7wF7PbUkCEVWlhG5k4CP2Op8gYiPGRpM/NJr1OZPOT8D+B22dE8bbO6XSt30R2deF4LwIxfduDWZeXE6lfB7kV/dCSilFSEgBNEgg6K1ddk8I2Kqu4PJ0wMDV2dGt/9CKVWIbncJ0I0t4eeOLezw8/caijC2LXSZgXPSyf5v+rm0jUbj70KIfgPgrZOQuZxUanGC0OED+FMVqfYqay1jTmQdTajlcp/SsQ8l+7/lHALt7b0fZINVX7eo+Zi/PpxKKGsdT0uxnKWQZakYS+/3qqemBBHrXcrM19tOMuqUeNzQYL/6W8mvo2NpzCJrnX2swOUkpCUIl3HhrFg75glnpgf7zy3ZSElQFwSEIOoCc+lK8iqBtbZJ7fqGgq60dseioC5JORPqKhzX2zp7LiLgZFvewMf5aGfPlwCoC8/Cp21jLQjC2T8AricAN7SLy+D7x8PhhRsHVv/Tnkez6P0paxj7u5mvFhGM4yRIwF3/2rrl8PvvvO7VQtpaEUR393Fzx7KvJvL3G/nq+HaYWBLASoiisd6VYF4+2TfCI8hy9/BwQlmzTX66U53fU8DESW2nGwHEbfXIeyCll4u6pRCCqBvU3hUVL+DBJnVn55J3mDCU+sAE0b0EpDjLNylTy87OI3cwkbndriIixrlDqf4zg3Rf1ZFFaIAAZaUy8TGdOJzq+4m9nBoQBLV19lxFwPGT1QJXp5P9J5Ta0dvbpWn/qxbzASOpRLKQTrvAEl08PNinyNWXn0lrR7yTiG4kYBOAe4logLPWLfYFulYEoZMRMx2dTvXdEETWqg8GkTpxbpPPN87AQU4LOY2Z65NhmN3J5E2P+qmvvaPnu0w4QwjCD1r1TyMEUX/Mi2r88H7HbPOG18Z+br985Soetdvb4x+FQXcxsKPXZC8FhX7xLH4BrtoEoSM4JnwlPdh/Yak223/v6Dj0jRbNVRZin3QjOKX2oIi5niYcySY4kGlJOtWnTndV+2pFEG0dvQcSsepjwd/hGTZood3c1k8n5ncv23FONvsbgJRzZn4vgCIrI80J4ncGbz4klbpVOcOV/Iryy4uCJTGrZwIhiHqi7VKXejgmy+FBAO+sxaLU1tmzDwHqdFEwqSxr0VBtK9rxMe5uifAiu19E9QlCc3IB7gDoiSDiY3CEgP3t/h/Ok5Sm7S+SRfsNDfXdE6SuUmlrRRDRjt4vgPjHW+rXq9FKtc+v+se5wOvUaV51CUGUksT0/i4EMb3452rPm7euB7BHvjkmMx2STvUpk9eKP42DlufbA4EmdB0IQrOYVozJZAGOHWt7e+9ebPCdALZXaQj4Jyzed2gocV/1KgVqRRBFC65GPn774ee98EoX+Erz++2LpCsPASGI8nCraq4gO+5yKq4pQQBFZBOkP34WoXoSRDWx8pJVUxKEhmwqXeArzV/OfJA8/hEQgvCPVc1SBllQy2lENRe9Yp0zj4QQOTCZvPGFQtuC9EcIAjvkcavKi3I1PUEAiZbwpiPtzpGVLvCV5i9nPkge/wgIQfjHqmYpNZYnomKyoR2EcCoVktN6R1RMONZNHaf+XukCX2n+SuUt+b0REIJogBHSTJfUTnNcnzb+rj4dfk4QGiusqhKofQjIJfUEGjqLNWJcOZTqV9GFJ79KF/hK8zfA9J3RTRCCaADx6swvyzFzbevs+SkB+wH8BIPuA4xr08k1I21dve8nK+cZvEu+u2UtsDpzXN2iEWTH74cgtKanVGxyWQ1RagwGApu5Ks/28SytZeC9BPzZAv4UhnmR3TegZncQDou1ck9ALpgXmRZXusBXmr8aMpcy3BEQgmiQ0dHe0XMFEz67pTm8riW8TXxg4JrNfpqo9RXIO0g1u6NclbyDkV/07iBgh8LCbVh8td1CSeuTAqxMJ/tP8SMHlcaPQ16tCGK6HeUQ0I9BCMLvqJqedEIQ04N7Ua2aUBLaIG9uzdXEWpri66AJtREofIRW3wzULdSGJszHywYbB6dSa5T/iK+vPdZzNDOusTmRab2DNUETA4Ul8RPSo1YEoSXTgKauygdiPLPTZVM2LH5DbQhB+BqLzZJICKJBJLWgK75HyMoFwyv4Qqi4Dj6D9WkmNKaG6qhJsD5HHQUoq61iUuV2dCz5gEUh5Z8w+YiSuv8Ym8txP4/MuIRS/705ntmvOHR37sGmKYEN4TNYnzZwoiZUR60IQmGl2WxUIVifPtxIpSeASvM3yPSdsc0Qgmgg0WrDfRN/fXgw8X2POEDU1hHvIaKrbbH/x5lpmT08hEu47yyDT08nEypkhWucIbdw38y8VBeauRYEoXzWorGeH+bfvrBJja5rCVtf9HrhTvWdDFo5VYUH9V7B8nSq3+Z1PFGsW7hvIhwzNNh/k9uQyb2f8TRWgOicUkEXnRgx4BnDKMh7EC7hvl9hpuNKhQxxCfft+ghVpQt8pfkbaPrOyKYIQTSQWF0mtnrm8yfWeOabzp2uWjQ2Z+k/CTirkgeD1INEZsg41RmtVC14Tz5tLCLi/y5+ntQ91HaNCKLgce58UElx272M0PJ0cs2Good5YvE9iUkF+psM861EXur0oTuxABgF83+1RHCxk5BUOPFQS+QHYBzt58EgneGA19sWQQgipw50eTBIPXhkZiNn6R4MopBxPIP/n+OVQvVinusmpdIFvtL8DTR9Z2RThCAaTKweT476fSbyQbKynxoaWquNU1SNJ0dLLa61IgglKg981Kr/PBjrAfoHiFtg4ZMgvL1YxPQoDOuw4fWJ//USvwdWoyDcByaVP8TgTxDwgSkkXYKEdMHwcm3J9YFeYnA2xMZnC3csQQli4sRV8ZOjqhjP51krXeArzd9g03fGNUcIogFF6nLML9lS9QYEDDqsVOTOfMTPVc5nREtWoEKJA/3jc/kkL71/LQkiRxITbzs7Xz3z0fzcCvwoDDrC7c1uZyHtsfhBzKTe8C54PPushweyYSy7ZyDxrEsGF5XZltT2MN1lEESus+2dvSdpTgV++qA2JD/IjG7/Da8Hqypd4CvN76cjkqZ8BIQgyseupjlbW4/aNhTOnMOEk5w7U03FubeSdaoDt0aql88izBcw8xIf5SvNzUNEOG1oMHFbqXcRak0Qqk8KH4pkvkOActzayocwAmNUKFNhFTKtC4lw2BT1kb7SV0D8HWRxcakHevbqju8czmI1QN26ouyRZsskiFyxrbH4ngbT5Urz5KP96jIqHWLjP1KpNfeWwrXSBb7S/KXaJ79XhoAQRGX41Ty30m2H50SOYnAviN4LxpvylSp9+EMAfpENha70+9qZs8G5xc8y4wQsKSofePz1TeidDFq9+y7mvR7vYk8pth4EUahw/j7x7VrG6BAwPs3AngB2ti2Cz73+DvVfwfipmcne7LzDCSq83HOipvlZEB2Eifci3pgv4xWA/5dAfdnxzA1B6sldoCvdP1uq3Pc5yG4y9lElBJFvIy3o7H13mPlYplzIc0f78TAx7sgSrdqQ7PtLqU1AAbtKF/hK8weVoaQPhoAQRDC8JLUgIAgIArMGASGIWSNq6aggIAgIAsEQEIIIhpekFgQEAUFg1iAgBDFrRC0dFQQEAUEgGAJCEMHwktSCgCAgCMwaBIQgZo2opaOCgCAgCARDQAgiGF6SWhAQBASBWYOAEMSsEbV0VBAQBASBYAgIQQTDS1ILAoKAIDBrEBCCmDWilo4KAoKAIBAMASGIYHhJakFAEBAEZg0CQhCzRtTSUUFAEBAEgiEgBBEML0ktCAgCgsCsQUAIYtaIWjoqCAgCgkAwBIQgguElqQUBQUAQmDUICEHMGlFLRwUBQUAQCIaAEEQwvCS1ICAICAKzBgEhiFkjaumoICAICALBEBCCCIaXpBYEBAFBYNYgIAQxa0QtHRUEBAFBIBgCQhDB8JLUgoAgIAjMGgSEIGaNqKWjgoAgIAgEQ0AIIhhekloQEAQEgVmDgBDErBG1dFQQEAQEgWAICEEEw0tSCwKCgCAwaxAQgpg1opaOCgKCgCAQDAEhiGB4SWpBQBAQBGYNAkIQs0bU0lFBQBAQBIIh8P8B94GApsa48RAAAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-6"><g><path d="M 211 251 L 211 276 L 51 276 L 51 294.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 299.88 L 47.5 292.88 L 51 294.63 L 54.5 292.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-37"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 292px; margin-left: 132px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="125.5" y="286" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-9"><g><path d="M 336.61 21 L 375.39 21 C 392.29 21 406 34.43 406 51 C 406 67.57 392.29 81 375.39 81 L 336.61 81 C 319.71 81 306 67.57 306 51 C 306 34.43 319.71 21 336.61 21 Z" fill="#f27979" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 51px; margin-left: 307px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Abort suspend; details logged in dmesg</div></div></div></foreignObject><image x="307" y="30" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXt8XFW1/3edmTQFlMdVeQmKr6ugcrkUmkwmiUGeBaHQMknLW1FAqCgIchW86hVU5CrCBQrigxaEJkN5aOUhyI3Na1KocqUiKoIK8ijyK6VCmmTmrB/7zEx65sw+5+wzSabJZJ1/+mlmP7/78d1r7b3WIsgnCAgCgoAgIAhoECBBRRAQBAQBQUAQ0CEgBCHzQhAQBAQBQUCLgBCETAxBQBAQBAQBIQiZA4KAICAICALmCIgEYY6VpBQEBAFBYEYhIAQxo4ZbOisICAKCgDkCQhDmWElKQUAQEARmFAJCEDNquKWzgoAgIAiYIzCtCaKpuf2rDHyltLt0a338xVO7u7uz5jCUpmxsbj+agJ+O/ZXxUH0dz+/uTv+z0jIlnyAgCNQeAo0t7V8gxuVjPSMsG+jpOq1WejptCaKhYfEuVl3uQQAf8gzGerLp4P7+znWVDpIQRDhyB7Yu2jPO9vmU42X9/elHw3NICkGg9hAQgpiiY5poTh0B0N0AZnmbSMDX+nu7vlpp04Ug/JFraDhxe2vWyOfAdAGArA0+dLA3vbZSrCWfIDCdERCCmIKj19bWFh/O7rIM4BMKzRsFYAGIFf6/zh6NHTI4eNuLlTRfCEKPWlPTcTuzVfe/APYppNggBFHJDJM8tYKAEMQUHMnm5oXvziLWTcCe+ebxL/P/0sGF5uaI6KT+ns4VlTRfCEKPWkvLgt2yHO8B8B4hiEpmluSpNQSEIKbgiCaa288BcE2xacT4hm1hY8llESq/rBaCEIKYgtNemjQFERCCmGKDkkiktkEMKwGaV5QWmHkBE/5ugR4AsFPh7xVfVgtBCEFMsWkvzZmiCAhBTLGBaWpKJdii+wBs7yiXgGfiyLXF49b64VG6G4SPjkkWFV5WC0EIQUyxaS/NmaIICEFMsYFpSrZfxoQvbWkWr6qPb5fq7r5pc9lgARVdVgtBCEFMsWkvzZmiCAhBTKGB0do+MJ010Nd5g2pmU1PHh9hyLqx3LqqfKrmsDiKIOXPOqKvfdsN8ZpwH0IcBvBlADoxnCNRtW/yDPXflTDqdzlUKnVNH/caD2GJlcNMKYFfXC60XCfgtCMuH6/lnax9MbzSpp60t9SaPhPXnOGVbenrueL6hJbWP5Rj7OJf824DwMpjXMGgpYK0n2Pe6VHeB1TFwTKa362cmbTJNo8Y9VpfrYHAHiN4PxlsKeZUx5HMABgFK18fte02MGRuaU3M86shIr7Eqza/GNT57Q0MM1ulM3ALgnQDiTl8czPEXYtyfJVq2prfzT3kBOfjzblBu/NWTZKob+SQxTgLRB5yxLcxVWLjPBv/PYE/69yb1+LXCmVdZax5gfxyg/QHs4hqbvxJTT9Q1UfYYwmOoqmxwYjafSeAFAPYa6xfwAoDVTLh2PGswb+OTWwQbi124ZRn4HcDLeXTWDwYHf/Kq6qcQRNgMreLvjcnUMUR0h2uzLLln0NxPqLk/JmGYNtWPIEZGrL3ZspcDzmIL+taBeMlAT/pXpnWqdE77LZwOov8E8DaDvEMM3MijdV8uTtjAhVyqgvszcvwRxOhYAFcUFllZdvUAgAmf3hoEcWBbate6nPU9Zl44tpEGg7IJxF9HFtcMDKSH/JJWusEXy6sgPzUmU+1E9B0AbzcYV8UMGSY+fbAn/XhQeh+CWJVo7jgK4B+FzSNVjw06dU1v5x9N2lVM48zVOJaA6cuFQ1JIdn6CLJzTvzqtnkkHEp8fQagKNmfpMgLODp8P/ATZ1in9/Z0Pm/ZLEWosPnppYb7niVv/vQTQJwZ6O3/e2NJ+YRRL6kRL+01gnOoqduygZtrOaqabNpbUGtsH7eafSHacCeLrXSBGvqzWEQQDy4nwP2aLwaldbVAXDvR2XRe2IFRiZzPM0nIGDq1gAqxji9ozqzvVaVD76SQIAq5m4Ft+5ADgaQY+S8CyahNEIpF6L2KOIWTR5iICLNydjWPxw91pdaIs+yrY4EvKiJI/lUrFnn0OF4LoUtfBxrQvQ0x8dqYnrfDXbqplBME8X40nEf04YFy99W8g4pP7e9I/N2lYY9uivSibWwFQg0l6V5ocCFeOvL7jl9au/b6yXdJ+OoKAnTsHMeumiHVuYqbTMn2d6lAZ+DW2duxNNndpPDP45Rti5o9bRHsV1lA+XYirDSGIsJGo8PfCACr10W7FIpjppExf50/cRZbbSKgxi2ZZrSGIjSBHFbCdq671DDxEoFffEDvfDSCpWZBDRDi5v6drZVC3m5oWvJOt+D3azTCvelgN0EsM3p6AZgB7aMp7zmJrfl/fikd0dWkIYgTAsIvwiv15DbAbHSmJaGmO7O/GbOuzqm/MvC0Rjik+EAAwoiQ0wPrHWJ0WXT+wesVvKhxmJ9ucQ1I71G+mtIYsNwH8GGA5blSC8GDgx6NDO56p24iibPC6fkTJ39TSvpAZN3vmhqOSZEImP3/UxsIfBmM/zRzawLDmZXpXDOraopEgLifgjBJCZ/wFFn4FpmHA/jeA5mhO4OtBfPRAT3pNhXO1pE/Bc5W+PzK0wxI/kvASBAP/R/m5OtfVtk2kpCzQ0yCud9Yf410aEv591rIOf3j1imf8+hWw/nIAPwFYmcJ8O4CAD7qw2wBGPwhHjZUtBDGepV95Xq/tgzrd5iw+eM3q9NPuUvWSRrTL6jKCKGk2PUWW/SmvqKw24M1Z+jwBl3gW33MW5w7r61v5O9+NO0u3Azjc8/s6JpyV6enq954e1Z0BMf2QgEZvHjuenTfYfcez3ro0BFFMkiPQVZyzL3GrZZQeFlZu1H0Kr5ahXCKZUmo2da9UtIxXRPvZ4dd3vEmzqVCiddF+sJXqr8Qv16tk8xH9/ekBLxZRNvjxEMTcuce9JTar7hcAlG6+8HF3zsInvPNW/eg/h/xtejQPM1xN1qt1nHbV130H6m5iC8Yq3zq/+RNA3Dlm3GXH+EJNn6ihZfGHLc7+EKADXA3LMfgLmd70d3X4auaZO9lLzHTe6OYdurxzoaFl8b4W59SB0eufbclAb9e1urryd4obb2bmDvfvBDyQtfhMb58Udtasuv8i4EytRCgEUfkmX2lO3eamToiZ3q7TdaJ3U0vHIma+xTWAkSyrAwjicbKzR/b33/FXn74Udc0l4j0xfjCrbv2ndR5mG5PtZxE5Rn/FzVAV3TUym88IuoBWE3vWtq98A85luSsv4XsDPV3ne3HxI4igk7a3j9UgiLa202YPZ19LA/QxV/2+C7yYRqk9kLXvJ+Bfx7Zi4OpMb5eSfkq+ahFEY3P7IQQotU3RX5jJqzpKJFMXgeibrkY/zxYdrFMh+hNEsJrNT/UVJG1r6sqB+ZI9dscVQY8yChfZ1wF8sqtPf0OODx4YSD9pMM+KSR5Hjufr8hQTaKWBAG/MDcnUoRY5qkx1ge98ar2yzecG3GNRoiV1AdgZI/e6FRVTpZv8ePJ5bR8Kqo35A71pZQ9R9iUSqbcjTt1gvHfLj3wvclgYdHk5ttl43X3nfzASwdX8amxuVycMJUkUP+09iFKlzNqMewBqcm1qGXtk9GNr1tz5chhmPqcfrcTiQxC+p2xd3dUgCPViiepyq10bvfELo3IpkwdjqJvX23vrBnd/qkYQLe3nEWPLKdnQFbTutZ5Onar6pCMIBv6IuHV4pnvFX4LmkJo/ddu8cgMBH3el05KY3ntysKrIXXfh5L3KLfX6kZGPBDFkM88f7EsrY9jAT6dtsOPZVq9krdM2OJf2BuvPB7tQgghr+1T7fVpcUjc2t19FwLlhk9j1OyVaOq4G85KwTVo3IDoJIso9xtzW1LtiNqn7EqUTLX5lp2DNCVPp9H2JT9dWzdNeMOGiTE/Xt93p9QSh30D9Jmk1CEJTx2s28xGDfenesMXT1NRxIFt8JwOvAfi1BTzKOb7aeyioIkGUxgoA0vXx9ScYxCqhRHP7TQAOBvivDHoUsJbr7iF8JIhQiauIpQaLnPJMkOlLb4mHop6Ql0vlvhKA3ziVl8G/HYnHD17bfduWOywAWoKIEJOlIZlqtsgxpi3eGWoPGZr7ykiaBt3aC7ukDpvDU+33KU8Q2tOUj+rADa5m8zW+rNYQRNSXUEqKUHcE7pNZ2eagMfrTnniDJk3+FLTzrQBSW9KVP+3VqukYt2T6uk4xeWWlyq4GQSSTx7zZptnKjuIjrv4EvkqKuqiqRhDJjgVE7H6gEPoqKWpfNAShvZvzK1en0uPy9VU2n4NUvH51aTZkLfnrCEI9t+7v67rYBB9NPVqC8D6bL3pl6O1d+ZRJPdq1ZyglmpQ/FdJMeYLQnFyMTpTNzSfslMPovZ5ncSY6YGgI4lcWbz66r++nm0wHrTHZcSKRcw/ifErs59FYa9EF+b6Hnbzdm14fVqfdsWetmoVpVJ3maW/Z22otQWgkjaAKq0EQqn59pEC8AsIKy6ZlmzfvsDbomWQYaNUiCB9JUr2M+Q0Y19vZ+KpKXdIX+6ghCFMpZQymxpb2LxHjsuIf1AXtP7etP+63v7hZSWLQrSUmnJ/p6boyDGv371rydxm6FtPqCIKZFpo8V41ykAnyymDar8YK1Yim5W/tdFOaIHxsH/pHZuNIEwtizUZjJEKWEUQFpwKvmEvAP2DzocXoa3PaFr91Vjb7S4D2HZsEmsViMkFMRGq9BFH+THgqEETBBkKp6N7h054hEB6FTcu4ju7PdK9QjwZCrY6LZVWLIHzuo7xdehaEVcxYOTu+ba9yGWMy5n4EwYSLMz1d34hURvmdW8kBQ/d0/A27zvsB8nusoa2ewXWUf623+xgZaSQDDUHkmOnoTF+nsugP/QwPMtSYbFe2Teoll/Opy+n+vq5PhVbgStCY7JhHxErizV9WV7BXRKmv2mmnNEHobB+i3AVoLrfVPhJ6We0liEoWXdgmpD0lVeimIqwuNal8XoJFcothuPAmZA435tUzSg+vXJmEfetBuIPZumnP3XKPhLk5McErqMIo+dVDhLphupUYR4Z1QkXoA3gtGDfmRrN3mTxUKDeUMz9pjxGmR2fvVbVo+mvQFcMkmg11vPPMJP9ESNSqh2XYCEEYDvwEJNO8RpiAUhH6ckdDEGWXvmENaWhbsIeVja92XVSX6EEnmSDUcbpk859uBOEsPn97jyD4XyHgO7nRuqv93I9E2eB1FUXNH90thVOr8uWVYVifz/SuUMZrZpbUFRwywvojBOE/3YQgwnbCSfpd9wR0oqoKk0ImgiDCLsommSDK7mmmI0EUxpsaWlJ7E9MFBBxvKFGorL7uR8I2xLB5Vml+xxAuh+OJcUHBn1fpG3p9xVk1X4eHdrxcd+8S5KwvrB9jEkSI80IhCCEI07lUtXR69dCEVR94WS0qJj3OJqL7hI2QpiBl3PXMi9a/Uo6PZUKHx+2BrmqtZXClG7zphmqCgaN62kytBOUxFIe5PNTqsvtaHk8EQWhe/JXcQYwXLxM83GnGO89M8ouKyWxUpuodhM6OoeDamdS/0T7iHTwLMPCyejIuqb163Ym8pPYucO+FuAJrGksQvmPtPNG0N+8Ptk8GQ7l+Lrp5d+cpswkY74Y33vyaDlEyuWg3G7l5IHzKx1eS9lBTRhAVvC4q3Pe4nuOW2ic0NaX2g0UPMPDWQtsjXRpHW6zjf05tQhCqTWWO8yq4Pyh7IFJBGVHxqWb6KUkQeqvN8MtlP+ASzakjAMecvujyIPCy2ksQ3md/JgNU/vS01ChN9/68klcUqi3ep3beJ7W1ShDucSi4jjgNRMrj7ha3CZ4nmw5eec+dbsePxtbaTn7vyxUgUv6w+ePEI7Bzyq18myutdlPWSBBa9yJBdYY999RYt2uNMcP6Zfq76QbvV55pfu/zXkQwxivW7X3OLq+YTEd5HOk0tg/wczVgUo32tA7/y+oyFVMhrGk0A5pdlgF8QrF9OsOirW4oF/FC03ThmYyJX5rG5kUNxLkjiSjBwD4F3FTMAZNPGXR9k4CLXInLbEI0/XiFbDrMNHZA2dt3DUEkEql/QcxS/qRa895xEYvBPsp0DjW0LnyfZcdU7ISx+BE6C/lyOwg2fgbue3Dw2Mfo/WNFP7AViEb5y9rpDdXaH2zg95bNPy4+/S6O2XjnmWl+DdH7+rzymXzlBrEiQZis08rT6C2D9Z5bo9SiM77ys87UGMoZ2U8U22Ma2U7naoOZFpsaBKn6dBeIpq42okZ/M114UcbFm7Zsw4t4qtOMnQlBqAOIkSGWjzPBMglC4w9M68LCDytTHbnGUC70lZ67Ts1dnza/5kXhqxZbH+vrW9FjOt5NLe0nMUM9XS5ezo8wcFSmt+tBdxnjnWem+XU+23Rrx69/WkNIIQjT6VBZOq1voQDPraa1aP2m+MSs1vliUj7pLXv0sP7+O9cH1RnFAdokOevTugWZLncQGqO/SBuRZsMss4LXWtkTXTPQ06n8fQUa3CWTi1pssle5YmKo6VBGELqDDhF1Dr++w8kmVuDazUtDYlpfTMx3q3CZYY4pCxEMbwORCjJU+PSSQTK58IM2xZTr8i1GbsADw7M5ZWK0Wnj2rQzd3K64f50bGT3Ma+9husH7rcMI+XV3naY+pnROOcVQznRDrjSd5qQS2YGdrm4fyUQrGfi5+w5zA+znQjnoVKJz982Ee0br+YRK3H0TaOms+Ivneh3CTRJBGLk9iTIXfJ43h7lZd6rQufz2kRJ1vrJCvfUGRLnT3kHo3M6D+IsDPen/DiGiMpfffn6CfJz15Qi4dHhox8v8yMjHXfxIgARLiZb274LxudLxpJvr4/bZQbHAFRGRRVcz4ZOuvCqOxJJMX5c7+qPzc4QNXju1ouTXER8Q7CrdsZDPh5Atj9onEkSU5R4trX5z0Ht8jFZyPrXubkNnWR0UMCgokIg+CAt3j8zGsX6bfd5Xvi5gkH8M32Qy9f4c0VICDvLg4LuRTgRB6O5ywkizsnEqU0Wo/fQJm3HuO3bHQxpLaWpqTR3EtnVjIbpfsVrfYE2a+OZq3WuDQTlS4eyN7USsfA/pYoVrCULn4lpZSxNjaXZ09Gs6S2kVF5nqRtU9SmlAGp84HwEBg5xAPrmYdZ43mlr+Ety+GoCKRz72hUk4PlKAGptHGLElOoM+P2NHtY78pI8oG7xufkXM7xPbQb/+AgKD5ZsiAYMqWfJmeTTqBWMPrCY16F9HlV9Wa0KO/hwEFbNhp0I97lCEOf8wjvQULDo+LATnBIUcDTwBTwRB+OjfFSSbVEjU/Pqwv9rfm1YhNiv+9KqPQnGEl4nxayfUpFMfv4sJ+2vsCHJBp/WACHtKyTQWotMpPx+5b8zlh9rcQBhidsKvqs/3FZMuIE0hj3Kr8SSIBp1QoCpsJnMDQCqGiQpv6/58iV9DECrWs+XS82cZ+B2BCqFoC+FkvYFuACMpLdGSmgsm5Xuo/EmxKzyu0x8bHwFhr/KJoNaFfezA6vRjE7DBlxURkSDgSFPbbLwGYBWq1fs9y0CvExrWPyzsljxCEBWv+7CMOn1gpAu3sArU7+Uvh9QuU6p/1jxz/Q8G/vmGf/krIgSC97Xk1bUzfzqL3QpQi0k/StPwE7BiJwQR0UQQhKrTJwLeWHOiuGUO6ueBbald41nc5nnqaQpNoPVxsZBE66J/h823e6SOkDry6od4jr4FxqlhBOGoI1pSpxLTdRHmjrsNgfNIQxC3EegVzm92JpbaSgIYtOO543WhanVgVIZbsaTwQ1PUDd7bxkry+0S9C5tvKi72UoayXSkcHoUgwjCr7PfxRoEzrdXHQrvECMnH1cYVieaOowD+kY+aodgER4WQy9Zd4ucLyK+t+ZMznQOC8nu/o0Gf1AT99qw4fy9IB6zKmSiCMFhIkd1NB+HBMfpKIVjUmG1DCC7ryOLPemOG++VR6pZYzr6SyFG3BG2o6iR+3ew4X6yw9hhZhdpBFIIZqbnjjZfs17QhBm7k0bovB82jMoIgLNtjVz79mRess4lZufAOcna4CcRfRxbXhF1oextZUIV9nfKbo8nYGK+LSjZ4d/sqze/cIT5P7W9s9leFrHFH5Um2dUrOsm0LpKLc5bULQhAG21YFSfT3AzCOjGVapbMJx7ASoHmuPCWX1UG+mPILY+STpAK+E32gsDgcdQHDWpmz6Aavzte0bcV0hU14HsAqCJBSbe1a2LyUA7cXAPQDlK6P2/eGEUNJmaN0NwgfLf4t6jPXYj61kP7+Ah3LzOcD9OHSTShalDoTbDx4NBRe0hTVMJsA/htgPQjL/uEeu+DxMG+umjodf08W6DNgJz7HOwtqniEAf2HQHd5xjUoQhTppbnPH++LMpzLhcEf9wnhL4TdnbAl4HITlw/X8M5PXQTqCGOjpOk2VqTbKUa47h4g7wE50Q0WACq/HwPiRnZ2VjnqI8WKn7g3rh+loME5RdiuuuaqSvvhGHOo/q7pMvdMW253luHo++55CfaEEPBEE4V4rIzlnfn8aRO8fGyNHhcZrGLR0dGjH+9QDgKjO+sqst4GyZ9gma6JaaaakJXW1Oi/1CALTHYEggpjufavl9rsOoUIQtTzQ0jdBYGsiIASxNdGvvG7XuAlBVA6j5BQEBIEgBIQgpt/8cNtkVeLnrZo9FhVTNdGWugSBCUZACGKCAa1CcY6/MdjKqlz5pPpaf2/XV6tQbUVVCEFUBJtkEgSmBgJCEFNjHAxb4TXoNHXrYVj8xCcTgph4TKVEQaBqCAhBVA3qcVekDPLqt914MzN3qNdkzHRaFMec425ABQUIQVQAmmQRBKYKAkIQU2UkzNqRNzLMfZds6wum7uXNSp6cVEIQk4OrlCoIVAUBIYiqwDxjKxGCmLFDLx2vBQSEIGphFKduH4Qgpu7YSMsEgVAEhCBCIZIE40BACGIc4ElWQUAQEARqGQEhiFoeXembICAICALjQEAIYhzgSVZBQBAQBGoZASGIWh5d6ZsgIAgIAuNAQAhiHOBJVkFAEBAEahkBIYhaHl3pmyAgCAgC40BACGIc4ElWQUAQEARqGQEhiFoeXembICAICALjQEAIYhzgSVZBQBAQBGoZASGIWh5d6ZsgIAgIAuNAQAhiHOBJVkFAEBAEahmBaUEQ4m8m+hRsa0u9aXiU7gbho8XcDByT6e36mbe0MnwZD9XX8fzu7vQ/o9dc/RzTvf3VR6z2a2xpWbBbluM9AN5T6O0GG3zoYG967UT0Psr6moj6tlYZQhAThPyBrYv2jLN9PuV4WX9/+tEJKrbiYqJM4Om+wU739lc8yJLRFwEhiImZHEIQ48SxoeHE7a1ZI58D0wUAshN5ShlP04Qgpo8ENJ5xlrx6BIQgJmZmCEGMA8empuN2ZqvufwHsMxli7DiaBiEIIYjxzJ/pnlcIYmJGUAhiHDhO9iQcR9OEIKbRHcp4xlnyigQxmXNACGIc6E5lgojSremuw5/u7Y8yVpLWDIFaWZtmvZ28VEIQ48C2VibhdN9gp3v7xzEFJasPArWyNrf2AAtBjGMEamUSTvcNdrq3fxxTULIKQUzqHBCCGAe8QhDjAG8CswpBTCCYNVJUrazNrT0cW40g8nYDuUWwsRhEHwCwjXomysDvAF7Oo7N+MDj4k1cVQOM1lEulUrFnno8dQGSfBsahAN4JIF4A/0UCfgvC8lkxvivMOKyhOTXHAj0AYCeTwfMzTnPlpca2Re9ENrfAAh3B+RdRuwKIFdLkALxAwOPM3Jkbzd61Zs2dL4fVXY1XTHPmnFEXn72hIQbrdCZuKcGV8DIYfyHG/VmiZWt6O/8EgMPaXcnv4yUI1Y/6+o0HscWnAWj14D82P4br+WdrH0xvrKSNKk9+HtJBBHzCU88mgB8joqXuOdjY3H40AT8t1seEizI9Xd82qT+RSG1DcRzFrOqi/QHs4uRzxoXXMGjp6NCO961d+/1R3RozmLdjzZjI9RXUN884KQNQV5/wC4Z1VaZ3xRo1zyabIKKsr7K2eAxR1V4Ys/lMAi8AsFdhL3TWPYDVTLh2z105k06n1d+q+lWdIJTdQCw+eikTPu3apHWdfgmgTwz0dv68saX9QmJcPpaIsGygp0st5rCPmlpTB7GNawGHhMK+TSD+OrK4ZmAgPaRLPIEEQU2t7S1s83cAOiCsYa7fs8RYmsvWXVIkUF3eKBO4gg2WGpOpdiL6DoC3m7SdgQwTnz7Yk37cJH2UNBW03ylebaKwcDqI/hPA2wzqHGLgRh6t+3IQ9rpyEi2pj4BxvcE8fImZP5PpS3c1Nrd/rAKCKI7N/4T3iZ8A4ayBnvSvvBgaEsSEry+fMTCtJ0dEt3PWPjsez9VPFUtqP4JQfd2cpcsIODtkL1Sc9wTZ1in9/Z0PG8zTCUtSVYJobO3Ym2zuAvAhwx4MMfPHLaK9GPhWFIJwTlAx61IGf9Z1GjeslgfteO74we47nvVmmAiCUCehWdu+8g0wzovetnyLGPg/y87O7++/46/VJAh1Wnz2OVwIoksraPsQE5+d6Ukvm0hpohKCOLAttWtdlpYzHIky6reOLWrPrO78vUFGSjS3qw3gisLJ0CALcmC+BBY9AcadxQxhEkSF80q7xsIIYrLWlxccR2rY5pWLGPhK+CY6lvtBi3Ofsyl291RwtaEjCNi5cxCzbgKowWRCFNJsYqbTMn2dd0TIM66kVSOIpqYF72Qrfo/LqKzY8JxiR8DK5Dc+PoCAD7omwwYw+kE4ypQgnIWyzcZrAD6jDB3Cy8T4NYOeVhucpr5ilsfJzh7p3YAb2xbtRVn+nFrszLwtEY4BsH0h0wjAqwDrH2P1WnT9wOoVv3G1gxItqQvA9M2yDTYv/v8BsNY56YnrwdwA0Ht1i4MYP5hVt/7T3d3dWW8/J0uCaGppX8iMmz2bXQ6MZ5iQIZCjFgTxh8HYT7MpbmBY8zK9KwbHNXNdmaMSRMBcLKhgsBqglxi8PQHNAPbQtPU5i635fX0rHgnqhw9eKssmUlIV6GkN2+i4AAAgAElEQVSfepQ6QfkNmmtIECHzKt8ngN8GQisYb3G1ewispAn697G6fPx2qd8nc315sPTvE7AJwIMBfXoQwN4uCXer+WLyEoQ63BEw7B5b93xw1j2QBONdmkPY77OWdfjDq1c8M1HrJ6icqhCEcwrYduPNzNzhbgwBD2QtPnPN6rTarMe+uXOPe4s1q+6/CDhTe0oNVjH5TCp+wmac+47d8ZBXlxdQ3/31cT7e716iEj2nXgKhp2y2z9K1TYHiuPOoy34RYOXOo3h3on5aTzYd3N/fmScU1zcZBKFwis2q+wUApdMufNyds/AJ7xiqH1UbNmfp8wRcUtpuurU+/uKpOmKrZNJHIQgHlyzdDuBwT13rmHBWpqer3yvdNLSk9iGmHxLQ6M1jx7PzdJKmSpdoXfTvsO37AOzsyuenxgxVowRJEI3J1DFEtMJDyC8B+Oweu3GXe847UuDz1P7GPdpVfmqoAAmiausrmVzUYpO9ynUAc4gVzOeNbN5pefH+RP1R9elvz+GjFlnXv3GH+W7NPJoyBOFp20vMdN7o5h263P1x1n3L4n0tzv1Eo3FZMtDbdW0layVqnqoQREMydahFpMQ9dRHtfOr0yzaf66frV0kCTtq+dxA+KqAHkeOOgYH0/wsAiBpbUqcS03WuduaIcFp/T9ctunwVEAQlWjquBvMSV3l/Q44PHhhIPxkyeMX2/cBNmsx0UqavU02iSSeIxub2Qwj4OYBZhcrW2aOxQwYHb3sxCNdEMnURyJGYit/zbNHBhiqa0DkdhSAak+1nEeEaz8Gja2Q2nxF0Ae2rviF8b6Cn63wvqfgcitYzcGKmt0udbrWfUn3Fs7gNoDZvAj+CKBxwVrkJjIE/wqJjgzBWKl/YfBcB/1pWl48EUa31Vbgfug1E811tWw/LOm5g9QpF4tovQDqcigTxOHI8P2jta/tTRW/Lk04QbW1t8eHsLssAPqE4ourC0h4Z/VjYaxy1yOq2eeUGAj5eMhv8JQjdBqxVFfnML2psbleSizrxFj7uH5mNI3WbR1SCmNO2+K2zstlfArSvq37j00Bz8wk75TB6r1tvyYSLMz1d36gKQbS0n0eM747VZfhYoKFh8S5WXU5timN3T37EFsoGmgSmBDHnkNQOszbjHoCaos5Fld5n03/O4txhfX0rf+duWlNTKsEWKemhqH7MMWNJpq/r+rA+JhKp9yJGvwTwDndaP4JoaulYxMzqEFN8+TZkM88f7Eur13aBn88pXd1x6VzDV2196fALOqy5O9nYvKiBYN/reWk41QjCeIwSze3nAM6hpvg9bcezrX6Sa9iYR/l90gmiuXnhu7OIdROwZ6Fh6qXBSf09nUocDv2amjo+xBarxbJFTPfZmOa2pt4Vs52FpXR36guUAHSVa8oYYeAo3akvKkEkEql/oRidzXkVzX4g5GKcO7y3d+VToUAUEiRa2m8C49SwTXoyVExlGzGQro+vP8FAVaQuam8CcDDAf2XQo4C1fKLuIUwJQiMBqTuj+QO9abWRG326+ajbuJuS7Zcx4UtbCvU/aOgq1mwK0NWTP4DtfKvSsmwpJ5IKT7fpawmimutrnPjp+jS1CCKCFNCQTDVb5Bw2tiuM8YT2JWjiTzpBFHSj6tbdOd0w8EwcuTbTTVG7AHwIwlsXCE8iy20DA+m/G61+R29evuCI8Y3+vq6LvWVEJQjTNgSl26oEkexYQMQrXe2blFdJUXEyJQjNpjMYQ9283t5bN5jWqd+QeVV9fLtUd/dNm1U5PpKesQ1Dvoyyg5WWIHQHMGZekOlLj9lPhPVNswFpCaJa60t3uPFbg359m+xNNcoBTLNPKBW7dk/R9UczxrVDEJpFWbKYwiav+r3RULXhrUtdgv9z2/rjfvuLm18zqaeYprGl/UvEuMx1+tO2eaYRhOYE6Uhpb5zCf6Pe+NvZ+KqQ+4gow2Cc1oQg9j3s5O3e9Prwne5nrQxcnentUs+gI32JZMeZIHariv4cp2xLT88dzzvzNf+cW0myuxUKfs1mPmKwL91rWpG2vRpDOa9UFPUAptqjVIBUl1vtvovQqZiqtb40G6KvFO9LEOV9mtBNdbwEwUwLTZ+rbo19pojrZEsQ1JhsX06Ek8YqZPygv6/rU6YLxVlwyY55RKxCZeZ1rBoJwudk9yfAUvEaIn6snmeOPS8EWHvSrNLAUTK5aDe27I8y40QAHyl5qeIjTUWZwCYbbAFAzR1NGbTPgrCKGStnx7ftLZ6qIw5ApOQm7dfe/zCdNdDXeUOkytSGGiLya1RZFemMm5LtNzLhk8X26VRMGrL6lcWbj+7r+6l6Bmr0+ZBnyR1ENddXU1PHgWyxei23Y3654x+w+dAokRo1fZpKBJFjpqMzfZ3qniT0q9I+o23HpBKEdpOK4C6g2OKylxNagiiPwRyKvHmCkhNiMdtEDpzC6vWstXuMeB+A38dM+7/xYuhAj1uQ8hZXlyCgLnrrhulWYhxpAF8W4LVg3GjqIsSgzLIkJgShE/PDjMH82qJ5yVOy+TSWq+K08yesr2XWzToJwivtRtBtu+v3qi692OjWclj7I/xeKoF5XIwAqAg/T5+mEkFEastE7jMRxsRJKgRhhthkEAQ1tKT2JqYLHGO7UsMls1blR1D75HeSJAinXc4TxDiWgOnLAN5s2Fhl+JVhWJ8v+ssxzBeabAoQRInO3qQ9oZ3S+CDTShCGjxbC6hOCCEOo9Pco62u8G/x480frWWlqIQgz9CaUIBznXDn7SiIcG9FdxfoCqW/xG7QVCKIImWMIl8PxxLig4GOo+MwyCNUsAV8bHtrxcq9hkNlQlKcy2ZAnWYIouWMwaY9JX00kCNNHC2H1CUGEISQEEQ0hg9RbW8UU5rvGoAuBSSph9rx1Ld/uY+3pru9FgP9MoF6bqXu0znp4bfdt/zDdEKKccCZiQ3NUT5uplaC88+KwEIkox+AvZHrTW2wqxjEYJu2fZIKYkSqmyVpfXi+2omJasNtkOh4MWnqTKkE46ogJEIHLLgU1p2bdRZuy1o56IR5ln4pKEAVDrbs0VrJ/Z+AWwLpvNE7r1nbfptx5a11jm+JZbYLw4OZcrNvIzQPhUwDN0fiSMrHCNhoOE4KYyEtq7yW09xK17FEFMGmX1GUv/IBJuaSu5voKu+MxmRSaS/VIev+wOqKsr6j7hLfu8eYP68tWJYiyJ6MVXKI1JjtOJHIsRfOfj1rF++oDFdQVBcyoA6exeM2BcCOyfH6Ay5GSJk0TgihpsxP7w84t9xBjpJccQeNiQhBtbafNHs6+lgboY2PTqMIDhHdTVm4teDTWWnzi29SU2g8WPcDAWwt1RX7mqmuv7sTuJaNKnrnqyNPnmWvJq6rJWl8NbQv2sLLx1W6D16i2HRpbFCGIKJvb2FZbQaYoWTSnqah+eNTTSuUobYu7DR+C0Dz583VmF9CHohWmskx9FqBHme0HZte9tNJrMRyFIHTPBE1djhTbmkwe82abZqvnvuqpayBZRjnhmGywygocMUttrq2ArZzWxWKwjzI1eGxoXfg+y46pJ8dj8SMmSkVh0n4F1tY0lANg7FJFtXUchnKRrcNNDeWqtb4mwmZlIqSQoH0uyvqKsk/o6hxv/ij7tTft5KuYEqm3I07dYCiX1c4XZWPQGmf5SRAatxwMXJrp7VIBYYyimen84PgZVEUZOK11bURDrShuR6JMYJMNNlE+jrkop7qJuo/STXaT9qt8OlcbzLTY1GBJlaFzVqc92Te3X0XAuVvaG83VRlNL+0nMUO5Jxi7+9a42yiUjIJqrjXL/Y85iKfPFpHUzMknrS+PWJZKb66bm9q8WYkgUh0AkiAqYYtIJIu+VdRweTMuc5/mrmHw9QBIfPdCTVqEIAz91yh/J7nI1g1W0u+I3xLCOzPSu6PZmjkIQOjE+yh2Jn8v0aj1z1bogIeocfn2Hk01eI2kIBlGsSYMGzpQgJslZn1ZKHY+zuYKKRRlRlQTWiuCs71WLrY/19a3oCZvzPi7JtQRRzfU1HjLycXYoBBE2GTS/V4MgkEwu/KBNMWUZubvrRNWdjWPxw91pFXdV9xVDJ/64LOhMgBdRnWtxE9fHinYKYTRL62O+W8XN1t0RaAjCV9fsIzb/EXHr8Ez3ir8EjV3e7oC+C1YXvltOlI40xrgl09d1ildCmmgJQtWlv0PhLw70pP87REIjr8vvSnTlfhiZEoQjRWjcfTPhntF6PqESd98EWjor/uK5XvWjn7vvMHfVeRKj7wNQ8RpKvijuvt/IH+pKOk9Esdt1Uc38jAiruL4o0dKu5rwKzrXlsBYSkTCvCqVOAId44BOCmKoE4R/bgZ8gC+f0r04r3fSYCsg/0EyhhwEE4SMFqIwbwHwhbNzq3ewD4mSvR4D04ScV+MW50Ii9qtuP2IRTdbGaCzEIjgLzN31jGftcxE8GQejiDgBwYmRnR0e/pnPfrrClutFvlgV/8omjUMEcVr66vlASszzgcYJ/wCD9XFTtSSZT788RLSXgIE/7Al3J+wUMItB/cM7+sWceUkPL4g9bnP2hX4zyCgIGPcdMn9lzd/tub8CgkOA6fu6+HWeWGil7UtaXT2wHv5jslGhdtB9se7lPSGMhiAoWV1UkCNWuwDCFwLMM9DrhKv1DVW7pXkgcAnUKq99MaZ94w0MgPAqmx/IF2v/m8wxTeSr9fKYnvdQPV91Lk0LaTflQiOrBlf3V/t60CtGpLJC1fv4dh3elITtjYHt/EH2gLLSnCgQDvMcVtEf7rHEyCEL1QXeCLPRZudV4EkSDYBoOCZcaJUZH6LSOQhCOJOQf/jZKyNHAw0Ox0T4BitTPJiFHbQB1xbJC7u50J+58VieUbWDI0VEAVsl9R1DI0SqtL2fNtKTmgkk9zHBH5VM/ja3jkNCwRfiEIEJXUnmCqhGEqjp/erOue8MA7OQIbVULaSnDUa/sVJjwvhHliuUGieoGdQ8BuHCgt0tFlwu83A7YAPJN9bj1LfjpUZePpi4qis3dxETK5XifxXy/6wml9o39ZBGEo4orj7xnAOlYknVsUftERZNTpUYlCIfo8uqVWwFqidL4fFp+AlbsBE+scb9iqLE5dR6BLtfFFffJNATG10F8mPtpcNjjDkettc3Gqzgfi93Eql1VvwHg/wDoC4WDR76HAQShfq/W+nLGNx/JUEVN9JKE79AptaEFZJmdmPFOP23woYO9aRXne9xflPUV5a5S17Dx5h9PZ6tKEKqhJvFwt3SInyDbOiVn2bYFUtGxjAnCU9cV7ueVQYCpp6dMfLpO5aPLZ0B6ZUF1Ei2pjyj32L5qo9KKsgzcnLOsr6hA5Rq1lvY1UZQJXMkGW/C4+SMfcV4H1RADN/Jo3ZcHB3/y6ngmrTdvJe1XZeQvXekcEBTxOp5DQz51WPn2rDh/zy9OuU/+0HjTrnzPkcUnz7KwZniU7gbho8XfmHB+pqfryqA2RltfcMi6jkdf8VjqGtmouOqatPVV7GsE9zRZMF0J2/4K4rTUFVxLCCJsdmt+rzpBFNugNrCRHB3LzJ8G0fvHXDM44jCvYdDS0aEd71MvZEy8uQb1XZ2s4rM3NMRgnc5kJwHaw6W62QTw3xi0ygb9aE1v55/CpAZvXWqh/P0Fpy/nA/ThUulA7yq8EE71CAKUNKVCYO7qOvW9SMBvmblT4wW17FUYaV4TTTZBFDCguc0d74szn8qEw0HYy+ViQznne4HUZSlh+XA9/yzoEriCuTuWpVKCcM/F4aw1D2Bl++IeC6cPAPoBStfH7XsjEkNJtzxj3gpgl0ICNQcfA+Na2LhT3U1EGT+/g4taX2CcwoT9veuLCNe+fVfcp+4mxntCnez15eqf4+DSAn0GjENdno4d9SbDWpmz6AZ1kHIOAKVeHIQgKlhkW40gKmirZBEEZgwCGrsZo1N9JQB5jfIqib9QSb2SZ+ojIAQx9cdIWjgDEdC4m5jQE7AbUm+Ankqd483AYar5LgtB1PwQSwe3BgKJ5vYfqrf4b3i3/QOA37DFtwysThdezoW3KNGcOgKgu4uv1bz+nkrVY5QG8H5Avc7jx2Dj+1HisCea288BcE2xzEpD9Yb3SlJMNwSEIKbbiEl7pwUCGlcRZY8V/DriY2Tnm9/rwNH7ci4IMJ1tS5T802IwpJEVIyAEUTF0klEQ8EegMZk6hojucD082MCw5mV6VwyG4EaJltQFYPqmK2+OiE7q7+lcoctbFlhI2coYWOj72Ca9SjYf0d+fHpDxFQSEIGQOCAKTgEDexbl9P4C9XcW/xEznjW7eoUvnv0qd5uN1dV9hgvIFFnflu78+zsf7vaDSORBUtho249x37I6H3FbUxTKVE0wrR1d4oxr6uQ6ZBIikyGmAgBDENBgkaeL0RCDIitp51gprneqZYwnMaARhz3IDN3oKln1syP1FoBU1MX7NoKfzKLIKV6ue8haf2G4BlzAQR3ZhT88dz09PxKXVE42AEMREIyrlCQIFBPKWza9czMAlESyb3fgZW50bGGwGjou6mB6N8ykBzjNlXGcgAkIQM3DQpctVRSCKFXWxYRVZnUe0oi7W9QoYl8Hma02jGlYVPalsqyIgBLFV4ZfKZwoCavN+5vnYAQReDNiHAPQOl8X9hFts19dvPAiExRrPAVkAfyWmHjBuGx7e4X9N4nnMlHGSfpYiIAQhM0IQEAQEAUFAi4AQhEwMQUAQEAQEASEImQOCgCAgCAgC5giIBGGOlaQUBAQBQWBGISAEMaOGWzorCAgCgoA5AkIQ5lhJSkFAEBAEZhQCQhAzarils4KAICAImCMgBGGOlaQUBAQBQWBGISAEMaOGWzorCAgCgoA5AkIQ5lhJSkFAEBAEZhQCQhAzarils4KAICAImCMgBGGOlaQUBAQBQWBGISAEMaOGWzorCAgCgoA5AkIQ5lhJSkFAEBAEZhQCQhAzarils4KAICAImCMgBGGOlaR0IdDSsmC3LMd7ALyn8OcNNvjQwd70WgFKEBAEagMBIYjaGMeq90IIouqQS4WCQNUREIKoOuS1UaEQRG2Mo/RCEAhCQAhC5kdFCAhBVASbZBIEphUCQhDTarimTmOFIKbOWEhLBIHJQkAIYrKQrfFyhSBqfICle4IAACEImQYVISAEURFskkkQmFYICEFMq+GaOo0Vgpg6YyEtEQQmCwEhiMlCtsbLFYKo8QGW7gkComKSOeBGYM6cM+rq6zcexBafBuCjAHZxfie8DMYvGNZVmd4VawBwFIIoS8t4qL6O53d3p/+pik8mU++3LfocbBwBwp4AYgCGwPwEE26xR7LL1qy582XvaKVSqdgzL1AjMc7RtHcdA98ZHdrxvrVrvz9awUjT3OaO91ngTxBwDIC9AGyzBQ/+A4E6syOjP9G1zaQ+hXd89oaGGKzTmbgFwDsBxF2Y/4UY92eJlq3p7fyTwt2kXG+aRCL1Xo7RJz39yAL8JMNambPohodXr3hG5Qsbq0rqlzzTFwGRIKbv2E1ky6mpNXUQ27gWoA8EFJwjots5a58dj+fqTS2p/Tad4WHMorh1HTMfXyAFv6o3AbRkoLfz5uIm2di2aC/K5pYDpDbWoG8dW9SeWd35e1PAGlpS+xDTDwloNMiTJcbSXLbuksHBn7xqkN6h3MZkqp2IvgPg7SZ5GMgw8emDPenHTdKrNAe2pXaNjdI1RDg2BN8sA9fNjvPFuVzuzSXj6iFz07olXW0gIARRG+NYcS8cqWGbVy5i4Ctjp9fw0h60OPc5m2J3m7ja0BEEMX3WtvgWAv4tvDonxRARTu7v6VqZTC46wCZb1b27SV4GnoFFh4eRhJJI/v48ncPAt8akBZMKnDT0FCw6fmD1it8EZVF1PPscLgTRpSGbtq6YISY+O9OTXhYmTSRaF/07bL4d4HcbdwEoH1chiAjw1V5SIYjaG9MoPaJES+oCMH1Ts1ltAvAgQC8B/DYQWsF4i6vwBwHs7ToB+/pi8hIEA/9HwDCAua7y1jPwEIFeJfC7OH96f7OnM7+32DrTJvt6APsUfssB/ARgZfL/txsLUpBSU7k+urU+/uKp3d3dWR+AQrDgxwBrXZ4L+MNg7KchkfUgPnqgJ63UcNqvqaV9ITOUJJRXV+W/HBjPMCGj+h9SxwaGNS/Tu2LQt46mBe9kK36PC6NiUiUp/I5Aj6g/MPgAAj5YejBQWJJSLe7kZBKC8IN5RvxdCGJGDLO+k8nkohab7FUAtnel2ATm80Y277TcrbtXJ9+/PYePWmRd73MqNSYIz8b9lM32We/YHQ+l0+lc8beGhhO3p/jotUQ4ydP6EQCzCn+7K2tZ5xb152N5WxbvS5xLE/CvrrzryaaD+/s785u85/PZuF9ipvNGN+/Q5b3HaGtLvWl4FEtA9J+ezX6dHc/OG+y+41lvHXPnHveW2Ky6XwDYf8tv3J2z8Ik1q9NPe9OrOjZn6fMEXFK6ifuTndOuLN0O4HBXeTlm3JWLWed5sTqwddGesZx9pa8aSghiBu8QYgcxYwc/kUhtAwu3gWi+exOFZR03sHpFfwWn00oI4nGys0f299/xV119DQ2Ld7HqckpS+VD57/T9kaEdlvhdQDc2L2og2PeOnYSd0zCdNdDXeYO3LLVJxm37/oJEVPz5ceR4/sBA+smgSdLY3H4IAT8BsHMxHQOXZnq7FHGUXCoX0v7cRXDr7NHYIYODt70YUAclkqmLQI6UV/yeZ4sO1qnMmlraT2LGTS6JMEfApcNDO17mh1VBzXgx54moVPISgpixe4TquEgQM3T4m5pSCbboPpf0kCPCaf09XbeEQaLdfIGoBDEC8PyB3rRqg+/XlGy/jAlfcidQKirLHj2sv//O9X4Z29pOmz2cfS0N0MdcG/fVmd6uz3rzJJrb1Suoa0qIMkRV5C5Dsyk/nbP4YK9U0NjSfh4xvjuWl7BsoKdLvRgL/HREyUwnZfo6FTGNfXMOSe0wazPuAahp7I/Md8PG4oGB9FBQJQ5JbLvxZmbuKEknBBE2PDX9uxBETQ9vlI2X+0dm48i1D6Y3GkBCiZaOq8G8xJU2IkGY1dfY3H40AT8tIQjCRZmerm+HtbOxuf0qAs4N2pB1m6qfBOBXn35jLpdWGlvav0CMy13lpOvj608IuBcpJqVEc7uSCg4G+K8MehSwlnvvIRqSqWaLHNLfrpDxVbL5iP7+9EAYVur3pqaOD7HFv3RLQ3IHYYJc7aYRgqjdsfXtWV5/TneDHFsH5yPGN/r7ui42hUOzGUUiCAa0p3lv/Zp6XrOZjxjsS/eGtbVsQ9ac2BuaU3Ms0AMuVVRFgY80kk7Z5t+Y7FhAxCtd7TZ+lRTWV2eD90pbEU//bW1t8eHszrcCSI3VF7EMk3ZKmumDgBDE9BmrCWtpc/PCd2cR6yY4RmnqG2HgqExvl9L3G31K7UF1udWui+BoBKFRkegq1mzgvvp3b34TgmhMdpxIxGNqNQb+yKOx1pB7gbKmlm/+/NuRePzgtd23/aOYeG5r6l0xm9QJ/V2uAtQrrN+Acb2dja+KWm+xHJ1KLSrpq7LK1GBCEEbroVYTCUHU6sgG9KupqeNAtli9ptnRkR6Af8DmQ/v704+awrHvYSdv96bXh+9k4NBCnmgEARyT6e36WVh9GoL4c5yyLT09dzwflteEIDQn/+cYuJ9AEa2vWVlBj70cUrYXceTaentXPuVqJzU2t/9X4VWSX/OfBWEVM1bOjm/b29190+awfqrf57QtfuusbPaXAO1bTM9MCzN9nXeY5C+maUx2zCNiNS75y2ohiCjw1VxaIYiaG9LwDmn0+sabrrv0REv7TWCcWgFBGKuJJpsgPH0IB888hZYw1X1F3TDdSowjDYpS7jDWgnFjbjR7V5BLj4a2BXtY2fhqt3TChiTsbkcZ3kIQBsNUu0mEIGp3bH17NgUIwljPX2sEoQbFeWIcxxIwfVljDOg3bspGJMOwPl/0hxW4sQe8Kgua8kIQM3BDCOiyEMQMnA9CEFsGvdoShHu6OYZwORxPjAv01t/ayZkl4GvDQzte7rZrmKjLdiGIGbghCEHIoLsREIIIIAhD24SJnlGO6mkztRKwGITDPG5NvNXlGPyFTG96zKZCVEwTPSJSnkJAJIgZOA8m4rSpeRIZ5ZJ6yqiYmpLtNzLhk8VpQMAD/9y2/rjf/uLm17bi1KBkctFuNnLzQPgUQHM0jhRLrLA1r8ogl9RbcQRrpGohiBoZyCjd0Jw2c8y8INOXLjFICyqzufmEnXIYvReghkK6aUkQGuvmJ5HltoGB9N+jYDqZafOuQBzX5m2uenLMdHSmr1O5E4HmVVlk2xZVTiLZcSaIlTPE/CeX1JM5tFO+bCGIKT9EE99A3WZiarhWbE0UKSRKcCFvbyf7klrjH8nIBYi3nY67DsLXwPwCiB4B42Hk+EdFFxeJROpfELOU24/WvMdZxGKwj/I8g/Ud7IbWhe+z7Nj/uuNHsMeivPzJLt+LHBaGudkoViqGchO/1qZ7iUIQ030EK2y/xu3D77OWdbjX26df8U3N7V8txJAoJpmWEoTOzxERdQ6/vsPJppHolJdWa1bdqtIAQ7yqPr5dqmjHkEik3o44dYPx3gJgkaQ2nfW7lyA0ZBfJ1YaKPIeYY8j3DpEgKlxYNZZNCKLGBtS0Ozq/O6Y+iLQbSTRnfVPmDkLdwyVa2r8Lxudc2I0FJzLBs7E5dT6BlG+ooifUHJjPHOhL/zDodB6FiDQEU3bHoPMJZVqHkh5GsrtczeBPl/RZVEwmU6Bm0whB1OzQhnZMuzGGRSzLq0qoE8AhnhqmpQSh+pBMLvygTTFlWe6OUKcCGJ0Y5n4k0bqoCbZ9p8fdt9bbbFNLxyJmx63HFiIh/uJAT/q/QyLElbn89rHUhs7dN5gv2WN3XOGOt+EZO+UM8Ow3nPxdURYESQgidCHVcgIhiFoe3ZC+Nekjj/nFWMig3NUAAASdSURBVKZE66L9YNvL9fEZIrn7nkoShIOSRgpQfx5i4HIerbvSG286wNjNV/rQq6Lg4J0dHf2azlLaCZxUN/pNAs4sidVA+N5AT9f5XmLxCxhEhBUxZC/0uigptEm5/ygtvzh3hCBm8A4hz1xn9OCrzidaUnPBpHzvjAW8KYAyBMKjYHqMwdsT0AxgjwDApq0Eofqk4iHM2mbjNQCfoemjO1Sncq6n4jwnNSFHcwz89+z4+kv8XHg3JFOHWkQqnrY75KiqUrnVeBJEg2AaBnE9mBsAUncWcU+bAgMt+UhETh3hIUfhjtgnr5hm+A4hEsQMnwD503N5VLQwWJhwj6U2HMYxhbTTmiDGSGLbV74BxnmaGN1hkORAuHLk9R2/FHK5TY0tqVOJ6ToNSYTVoX5fxxa166LJuTMnk4sOsMlWRORWm4WV3wXQUwD/x1hCkSDCMKvp34Uganp4zTsXGpt4S1FZMF0J2/4K4rS0Qmd9U07F5EKKmlpTB7FNV/mo0jSg8hNk4Zz+1Wn1DLUkzKjfCBQ86v7IvA5H3XUjj9Z92avu8qsjypgW3XfUbfuKino3FtRoihgOmk9kSTmhCAhBTCic074wamhJ7W2BPgN23HgrF9ZKveGoPxjWypxFNxSfwo7Dm+tUJghnEFOpVOzZF7EPbOt0wD4EIPX0882FER4C+FkQ/ZLZumnP3XKPBFwAB00Kmtvc8b4486lMOByEvVwuNpRzvhcIeByE5cP1/DPDaH/e+vJjytYZgH24S2U1NqZ1NHpt8W7CxEX6tJ/l0gFjBIQgjKGShIJA7SPgNbYjxg/6+7o+Vfs9lx7qEBCCkHkhCAgCRQSoMdm+nAgnFf/gNcYTqGYWAkIQM2u8pbc1jEDBS++NAJ4F8AgR3Tsr9uLP/V5UeaHQWJWX+HuqYeikaz4ICEHI1BAEagQBr98qP2M6v+56jeyi5q8RGKUbLgSEIGQ6CAI1goDOHQeYvzjQl1avkgJfV+ntYejW+viLp5pKIDUCo3RDCELmgCBQewg4/pRGd17qjm+hXqAFWWorA8G62RvbifhKAG9zobIelnXEwOoVv6k9pKRHpgiIBGGKlKQTBKYBAkFW1CWW2sofFNv7g+gDGoO9ISb+fKYnvXQadFmaOIkICEFMIrhStCCwNRCoxDLe1c5NAC0Z6O28OUwttTX6JnVWFwEhiOriLbUJAlVBIIIVdbE9yjAvYxOfMdiTfrwqjZRKpjwCQhBTfoikgYJA5Qg4RGHnUgQsBOg9AHYZK43wMpj/wMDKnBVLmwaLqrw1knO6ISAEMd1GTNorCAgCgkCVEBCCqBLQUo0gIAgIAtMNASGI6TZi0l5BQBAQBKqEgBBElYCWagQBQUAQmG4ICEFMtxGT9goCgoAgUCUEhCCqBLRUIwgIAoLAdENACGK6jZi0VxAQBASBKiEgBFEloKUaQUAQEASmGwJCENNtxKS9goAgIAhUCQEhiCoBLdUIAoKAIDDdEBCCmG4jJu0VBAQBQaBKCAhBVAloqUYQEAQEgemGgBDEdBsxaa8gIAgIAlVCQAiiSkBLNYKAICAITDcE/j8AD6rFgBlvJgAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-12"><g><path d="M 211 301 L 261 351 L 211 401 L 161 351 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 351px; margin-left: 162px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Failures?</div></div></div></foreignObject><image x="162" y="344.5" width="98" height="17" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAABECAYAAACbHqJdAAAAAXNSR0IArs4c6QAAEYRJREFUeF7tnX90HNV1x793dmSJkGJoSAyEH/lVTpJSmoONtSvt6ojfhNgxAXYlEwxOSUKaUtOkpTmltE0bk4SQk5xwCBRSBwwES7sYDHVLiF2ieH9o5dQpJYSmSSAtuBhMXWMIyJJ255a32hWj3ZndmZFmdq298+fO+/l5b+c789699xHkEgJCQAgIASFgQYCEihAQAkJACAgBKwIiEDIvhIAQEAJCwJKACIRMDCEgBISAEBCBkDkgBISAEBACzgnIF4RzVpJSCAgBIdBWBEQg2mq4pbNCQAgIAecERCCcs5KUQkAICIG2IiAC0VbDLZ0VAkJACDgnIALhnFVgKSOxxF1gXOFHhUz4Qj6d/JofZTspMxxL/DkxbpxJS9g4mk6urc4bi110bIH1NID3Vu4x8NF8JvmPTuqRNE0hQMujA7+jM1/BhPMAnAzgt8otKQJ4AUAOoFSnbjwyMpL6TVNaKZU6JiAC4RhVcAlFIAARiODm23zU1B2Lf5CYNhAQdljeOAPf4amOvxob+94rDvNIsoAJiEAEDNxJdSIQIhBO5kmLpKFINPFZADcBOMxDm55kjRL5HcP/4SGvZPGZgAiEz4C9FC8CIQLhZd40IQ9FYvE/A9NXAISq6i+C8GsAWTBNAPx2EPrAeFttO+kZaHTJ6I6hf2tCH6TKOgREIFpwetQIBOOxzg5etRDWbGUPogUnnMcmhaOD3QTjEQBHmYooArRBp6kvptMP7DEXHY/HQ88+jzM1ws0AvX92tTwy2YULd21PHfDYHMnmAwERCB+gzrXIhSwQTtnIHoRTUs1JF4nED4OGTSBaNUsciP9iNJ36OgC2a9np/fFj9AI2AdRvzsuMq/PZ5N83p0dSqxUBEYgWnBciELLE1ILTclaTenriEdbo+wCOqNxg4M6p8SOv2rXrjqlG7Q/3D74LBeNRmrZ0Kl+cm+zCBfIV0YhecPdFIIJj7bgmEQgRCMeTpUkJa5YKgb1k0Fm53PCTTpsUiSb+CMAtpvR7WKOzZMPaKUH/04lA+M/YdQ0iECIQridNgBn6+/v1icI77gMQN739P4IiLh4dTY07bUpPz8DprPEPABxZzjPJwEfymeR2p2VIOn8JiED4y9dT6a0gEN3dq5doemEFiD4CYCmA4wDopg69CPDTDGwuaqHUj3cMPeeks35sUteU6XJT303+6rQV573u7o8fQR1T1xHwCQDvADAO5p8z4V5jsrBx584H99nxUZu3z+0JLSMy1oJxDoCTTKxfJOAJEO5eFOItXg0Vlp4dX9w5QSvBuJyBUwEsKben5MBGwFPMPFycKmyp11aVp79/bdfE1OtXQ8MpYF4G0NHE2JDLJv/SyRyopJF9Jje0mpNWBKI53OvW2kyBUA5PGtM3AJxtYbpo1+4iM7YUQ9rnGgnFQhQI0rTdMPh+gN9jDYi3duqHx0dG7jpYdZ96+uJnsIFv11r1WJb0Koi/hAJucfqm3t8ff+tEkb4ExlUO/RQKxLitWOi43m8Htp6e+Ieg0TYGji73dr8BPmcsk9rVgn/LtmySCEQLDnuTBGKuDk+K5PMaa6uy2aF/tcO60ASCgGsYpYfvB236XCSiy3Lp4SHzfWUFRCFtPYOvcSHE5SJ4zNCLl4yNPLC73vSNROK/jRANl8Xe1Uxn4BfQ6EI/9wNq9yD4iUldP2vXyKb/ddVYSewbAREI39B6L7gZAtETS1zMjHss3jJfhXpYgB4HoJYjQmDjNFDJjr3Wc7bB8s5CEwgQ9pmcv9T6exagZ8pfE70AnjamQmePjW16sTIjli79dMeiww7cAvCna2YJYR8xfsIg5WQWYvAyAn63anlPZXuKjMIFudwD/2010+rUUVr6Amk/KY0ncSeAXjDeXStU/vkm9PRcdBJr+j/PElaiW0bTw+vqmch6/1dJTi8ERCC8UPM5T9ACcXrf4Am6YTwK4AOmru0nwrqJ148ctjJbLL8Bf4LBXzUFZFPZJwFeNZpJKRPImmvBCcRMD3mM9dBgfmTovyo/qeWd8SJOHEunnjKBsPE+5p8bjHUnHofHUqmUEuKZa/nyj71NW9TxdzT9pWL2WH60U+dLrPYlwtHBfoKhHsAVES8y8JUunW+0Sr+8L/5u3aDbGaU9kMpl+fUz1+lv82XzvMbFc7PZzT+ba/mSf/4IiEDMH8t5KylogbAwN3xFY21FNjukoqnWvXr64meyQQ+a7eGJ8WW7DcuFKBCl5RhdO88sDnbQuqPxpRpoW5X38XYUeWB0NPV/dWBTOBa/gphuNT/0ibA2l07eW50vHE18iwD1Nj59OXg7VxvZiw5iy2wHNtv9k0ZTw/J+d/9Fx2uF0P0AdZsSjBNhTS6d3OypUMnkGwERCN/Qei/Yh1hMT+tUiFWHPlAtLFmkFF5LAbTCzcOkktbK5JEZ9+azycutlgoWokAQ8Le5TPKLDkacIrGBm8F8tSlt3aWiqjIpHE2oL4nr3/y91rns1HPXHP7W1yceNH8NOA2VHonGzwfoIQCLVB0MPKej2J/JbH7GQf/qJgn3DXwABm+Z7RxXWuZq6H0917olvzcCIhDeuPmaK0iBKIVMCNEfMBAFcBoBiw3mS8ayqYzTTs7FTBTzcB6Em/qt+uQmv4WD2GsG8/lOeKllnJBB/wKU1vvVVbT7ArBjb1FGje9AyXJpih4C4cxKOcx0WT47/L1GY1p+w98GkNqbeJxBT3WQ8a10OvVSo7z17pc9r+8vm0tXkhbBfP3xx+Gm6mW1udQleeePgAjE/LGct5KCFIj5aPScHrCHvkD82tALfY0sihTncG/8o0T0wMw+AuFXKHD/6Gjqf5yOg9UXm8WSHoV7E3cT4bIZgQjAKsmuD5FYfDmY1EFPyj+kcqkN/WtHM0m1ZGYbt8kpF0nnDwERCH+4zqlUEQh3ntRuBGrevyBcOOX19CZuYMJ1lTYQsO03b+n82BM/uOc1NxMmHEtcR4wb3sxTu0/QExsYZGa1N2He1FaH9DxE4I2d+uEjFn4ZbprhKK2ltRLwKkBXjmaG1ReFiIMjks1JJALRHO51aw16k9oLgqX9q4/uKBQjBKwGoPYvKkdLqr+8bXjyBbgHkerU9146MjJSqMfRJjzFLwHth+7584cALDcJxFgIHR/OZO7bX/mtbPm0tc4JbwUGfkaMYQ7RlhOWGL+Y72UeZWrb+ZYD9zDzgKmP+4l4TS6d+if3/ZYcQRMQgQiauIP6WkUg1Ab25OQrSwxNP1UDncDEy8GlB9O76nrltpNA2CyRVQ+z1b6Ag6ngNImlEUJv7+Aygwy14azCpDS6lL/LVjLoromJxT90EpG1UYEWEV8nmfnyfDalnPfkOgQIiEC04CA1UyCUT0TIMK4hQL31He8JjwhEDbZmCIRqRGk8i8Y3iXChC49ttT/wXZ0KN1hZvjmdE9VLagC7DujntC5J5w8BEQh/uM6p1GYIhAo2F9Kn1jPhDy28duv1R715qqWNE2cSiUC0jEBUGmIS/jVVm8X1xvZ50nhNbkfqMbcT2tJ8mukzo9nh292WJembR0AEonnsbWsOWiBsnJdq2zcdVkJ5CucB+pHGlM1mh/aEY4lriXGjCIT9ZLI0PSV8IZ9Ofi3gKUjh/sGTaIrPg8ZXgKH2M2pDprzZqL3QtPPdnhcdjV56VBFTj5gc4orMtDKfHVZHlMp1iBAQgWjBgQpSINRGYsdhL99eDlNtpvEyCENs0MMd2tTjxxwT2mu3ienGimjBbVI73IOwcl4jxj/ksslPNXMKqvHv6tp3cpFCa4gxACrtL82+HHhhV2exCOUtkVqbOdAe6xaB8AjOz2xBCoTV0ZFvmOx/H0X+eIPQDzMImi0Qkd6Bq0D85lnGLkxPVSd6ehPfYcIn5/MLyGp+uKnHz/lVp2yKxBLngnG3eRlKhRLhqVCfOeBgo/aJQDQidGjcF4FowXEKVCCqbPMBPIsinzU6mvqVQzQ1TlmBm7lGEysJeNjU3h9pfHBlNvuw2h+pe1m92c9H+60qrREyD8d0qqhK5XAd6jS33SrKLrOxravjpc0VU1sV0oLYWAGmiDrsiYFcl753TSNT3Eqbw7H4WmK609QHeftvNJEW6H0RiBYc2KAEwsY23+5wG0tSpZPnOorqiMhT5vMN3M1pY+FagXDu3awepgar8BfHzmf7Lb8gegZOYa1U14xHMQPr85nkXzt1GItE4u9DqBSuY8YogIGb85mkOleidM3VY9sioKAIRAs+J4JokghEEJRd1hGcQNQG6nPp3WsRPK70qAvUUa67Nx7ViFR48cPLqOuGHDcNh2/tt/yCUHGvNGwC0SrT/b0gXjmaTu1sNE2UoE8WltzMYGVpVrnGGdoF+czQSOWHaPTi9xQQGiHghPJvrmI+1QgM4FhwG/VB7h9aBEQgWnC8ghII1fWadXFgP0P7cD4zNFYPjTpHefcL+DyYvmxhFmu7xOPHJnUkEn8ndBoB432VNiuhm+ji+K7tqQM2/aBwbzxBVFpKmW3FMw8CZ8euuzd+jkalaKkzdTo8vc26vcwPwcBq8xGk01+GSzYCfKlbIbIO+Q1H3uIt+FeSJs2RgAjEHAH6kT1QgbCM2UPPkGZ8KrcjpcJAzIqVo4ThuRcoDMbX64RxsA0v7odAqHX5cDSxwcISa0tB09ZVn5OtwlDoHR1/Y+vz4aNA2HwFqGm0H8zXwsB91edN1/FRsf366O0djBlkbDWf0wHgJWb63NTBxUkrT+np88ixEaBl5i8Ug3nVWDalzrBwfFmee+HQ4stxJZLQdwIiEL4jdl9BkALRIGaPOiYzB1A51LNxCkC/V3WCHKCiknJpXf2Icm9tlyR8EgjYHMSjmlMAeBeg/bvNEZ5FYtzKVIonNR2G20eBUMWrt/TOg5SqOr2tMlHGQXgcTD+d/sH4fYCWWnyljTPxn+bTqdvsvpAiscQ3wPgTi/uvAvxTQHty+h6/XX1MAlhSm5bumBxffLXb0BsiEO7/962YQwSiBUclSIFQ3XcZs8dMrACmb3IH3YqC8ajpIJiXyaBzc7nhH1fj9UsgStY90cRn39iHuKmB45e5SUUC1sMo3MmarjZ+3xuEQFREYtFBuuONs6UTHqago1DZJee8gnYrwMp72u2ljii9u0vndVZHlDYqTASiEaFD474IRAuOU9ACoRAsjw6crIE31lk2mvVgBbDdIP68Om/ZKqwC23gJ+ygQqn3U0xc/gw18G6D3Nxjal5j5j/PZVDIWu+iYAuvqeNXABEK1rbSPs4eUQChRe6eTqchAnomvrDrn2jZruY7PvPEFsh7AkU7qAKDOp7j2+GM56TXCqwiEQ9ItnkwEogUHqBkCUXlgqf0FzaBPMnEMwEkzSxulMBv8nwxsLmqhVPW6fvW51upBZkxOrdi588F9ZsQ+C0SpqrJ3+PlUsvah02aWTsp9IKLbFoV4S+XNuMak1uclpuopp9qrd+3vDkG7ksnoBUgFSaxsYqvloGcZtNUAfXdnZviXTk1izfWUQm93HjgDhNUWdYwDvJtYyxZhbCgcPGrM7ZJSdZ9EIFrwweKhSSIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCAgAuEBmmQRAkJACLQDARGIdhhl6aMQEAJCwAMBEQgP0CSLEBACQqAdCIhAtMMoSx+FgBAQAh4IiEB4gCZZhIAQEALtQEAEoh1GWfooBISAEPBAQATCAzTJIgSEgBBoBwIiEO0wytJHISAEhIAHAiIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCDw/9PUXuoYdJv9AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-18"><g><path d="M 261 201 L 356 201.5 L 356 87.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 356 82.12 L 359.5 89.12 L 356 87.37 L 352.5 89.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-19"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 212px; margin-left: 307px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="298" y="206" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-24"><g><path d="M 211 401 L 211 426 L 51 426 L 51 444.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 449.88 L 47.5 442.88 L 51 444.63 L 54.5 442.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-38"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 442px; margin-left: 132px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="125.5" y="436" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-26"><g><path d="M 261 351 L 356 351 L 356 87.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 356 82.12 L 359.5 89.12 L 356 87.37 L 352.5 89.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-35"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 362px; margin-left: 307px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="298" y="356" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-30"><g><path d="M 101 501 L 154.63 501" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 159.88 501 L 152.88 504.5 L 154.63 501 L 152.88 497.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-28"><g><rect x="1" y="451" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 501px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">All devices go into deepest D-state or F-state</div></div></div></foreignObject><image x="2" y="480" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tvXuAXEWZNv68p3suASVhUW4miq6XFQVdA5lrh8FwCxAuCT2TIAlgXEDIgqLIT8RdXcHLuqvCD4ygXBKEZKYJgkaQizg709PdEwjyIYsIiAoYSJAvhAiTmek+75c6fcnp03VO1+lbuid1/iFM16l666k69VS99V4I+tEIaAQ0AhoBjYAEAdKoaAQ0AhoBjYBGQIaAJgg9LzQCGgGNgEZAioAmCD0xNAIaAY2ARkAThJ4DGgGNgEZAI6COgD5BqGOlS2oENAIagT0KAU0Qe9Rw685qBDQCGgF1BDRBqGOlS2oENAIagT0KAU0Qe9Rw685qBDQCGgF1BMoiiM7u3q8x8O/5zdEdLcHNZw8ODibVxcgv2R7q/RIxvpP7K+PhliY+dXAw8ndnnR2h3lvBODv3d8Kq+PDAOaW2Xen3nPIx4fLE8MB/Vrqdeqqvvbt3AQE/Vxm/epJbyzJ1EejpCb9tfJLuAeGT2V4ycEoiOvCLqdvr8ntWMkG0tS05wGhKPQTgow4xtpBJ82Kx/idLFU8TRKnI1cd7miDqYxy0FLsQaASC6OgIT4OB5SBMxKORG+th/EomiI7u8AkA3QOg2dkRAr4eiw58rdQOaoIoFbn6eE8TRH2Mg5aiMQgiHA4H/vqKcTKz+W2A/qmetAwlEURPT09wPHnAKoDPzAzBJAADQCDz/0+ak4FjRkfXbC5lkmqCKAW1+nlHE0T9jIWWJI1APZ8gOkK9K8G4IKf6qiM1dEkE0d296H1JBAYJmJXuFP86/V+al+lkiojOig33ry1lgmqCKAW1+nlHE0T9jIWWpCEIIu8eteFPEB3dvRcBuC47+YjxTdPAtryLZZR+Wa0JQn/WGgGNwJ6CQD0bsvg+QVgXKQGsA2h+9rTAzAuZ8FcD9CCAfTN/L/myWhPEnvJp6H5qBDQCU4ogOjvDHWzQrwDsYymXgBeDSPUEg8YWpxlZqZfVmiD0R6MR0AjsKQhMLYLo6r2aCVfsGjxe3xLcOzw4eOuOgoUdKOmyWhPEnvJp6H5qBDQCU4YgpL4PTBfER/pvEMPc2dn3UTasC+v9y7msrnOCoDndfR8wwJ8m8KkAvR9AEMAYCI/D5Otg4mfxeGRMYFDu4FvWF0ljPmCeC9AnAByQwVY4Iv6FmIZNg38y60BORCKRlNvnJrE8E0UjLcEtZ/p1auz02CSISsu9pJ49+7ymlpZtR4OwhIlDAN6TwVj07xUAMQZumxyb8auNG28UFnS+HmFW+OLLgSOIzHPAONZWv6hnMwFPgLC6OcB3y5wzVRqbfUx4ess4LQBjGQOH28bN6gMBTzFzf2oyefeGDT97TaXOSpXp6Ai/nwP0GQJOAXAIgGkAkgA/xzDWpQy64ZGhtS+K9kKhhQclOTgM4B+t9j2cVmXyzZlz+n6BpuBpIHwaRB8CYz/n/E3BvCm5Y9/RUsZSFRM/VkzF+nzk3MWzAiafT+CFNvyyc3OICdd7fY8yWTz7oej8K+QKcmoxmPoAfBDA2zP1irXozwA9AMO8aeYBeMprrbDL4usOor0rfAoR3WUzZ827Z5DcT4gZlTthqA5mvRKENTFS5veJcJoNA0m3+GkyjWWxWP8jpRKEhWUQK8D0VdtAe0DIT5OBi2JDkd+kNX+FT2eobzEz/zQre1Y9GI2ue151bLq7z9w3hcn7AGpz2wSUShBi4X7pZeoF8F0A71KQ6a8AXRCP9v/Src+OOqhzbvhoNnG9sDdXqH87iL+BJK7LEn6xd6yPP0XfAOP8zMJb7JUkMVamkk1Xjo7e/kaxwuX8fmRP+MDAJF1XfP4iycAPW4P8lVQq9fZSCCK9iJpfJ2BphtyLif5XAJfNPIgHVBevYhXaf68EQYj6diTpagIuLN6nXWuAU85KE0RbKHyowfQ9AMd4r0s5SZ4kgy/xWiuyJZUJQr4DLVz8O7r6zgfxj2yg+L6srkeC6Jwb/iSbdBuAgxUn5hYYxulg8zx7KBAVE7b2nsWHUDK11rYIKzaJFAjfn3hrxhWy3ZjsBMhMZyVG+m9XbaC9u/cYAsSCnHWQ/FPK4HkbhiJ/ytZRCkF0dIT/AQEScpygKkuOoICrxsdmXO21AxWESwHjKgZfovgR2cTgUTOYOmN08K6XvGTL9KE/86H66gYDz8Cg0xJD/b/39aJi4Y65i/8ZJt8J8PsUXxHFHjI49TmTAsIhVvkEUcK3khUpxcDq1iBfXOrJza1v5RIEzNRFCBi3+vwmtzPTOYmRfrGpzj0VJAjq6O5bCrCwKM2eFlSHNynuiMfHZnzH67tRJoj2uX0fJtNSHx2UlUC2uBT6SAB+L6vrjSA6QuE5YBIxW7KqsywE4li+ETD+D4hbAHSB8V7bDv0ZIrwGRkcOsyJOMJ2dC9/DRvBeAIc6RjoFxotMSBDoDQbvQ0A3gJmFM4JunBibvkIy8NQR6rsWzCt2vePvhNfe3XsNARfn+gPckogOLLfv4P0ShKWO2UERhqXuyX/S+A0B9KpHn1MM/lIiGhG7qIJHqKyap227DuDzZPUT4zEGCYILMPgIAj4i2SE+RWbyxFjsrr/4bGMMzE+DjMcAQeCF88Q2FoMTrTht40ORbapfuUo5jzklTgr/S6BHRT3yvvPTAAm1Zto6sYiKqTPUu4gZYiMl1Fb2R2xeBMYjYBon8HsZaJctbEy4d7KFz6wkDuUQBAP/h4BxAHNsHdpOQMKaN55jit8nDeP4rMpOvN/Tc07r+ORbK0D0gXR95tFA9t/WHzYA9HiuLTY3xEciNzm/jI5Q+Itg+pZkw2MfVzHnDgPj47IxIdCN42PTL3EjCWWCcPo+ACjYOaY7X+BlLf7s67K6nghC6FCN5qb1lJ7M2UdM9p+mxie/4NQfz5kbfm/AtI57Qg1V8HidIFwWyhQz7jYDfJl9l56pmNpCSw4zOHkTQEfY5XNbMCUngJfZoHkqO9fZPUve0ZxM/hogoVMXzwTAp8ajEWHVlnv8EISYLxPJA65l8GfzweKnTcbF7z4YD9tVDpYa6hVLty9i1bzT9s4mg1PHjYys+1+1D0lev3g3M+b/QbDURNnoAOKn+1uCfIZsd9vevbiHYApizy6MYjf8rdYgf0dWXsyToEk3OEixLAdT2XxL32HRnQCOz5sfjLtTAePz9oVL/F5UjeoVONM6pZhiLtg3UikirA0gednw8F0v22W0qRSvcYwlCLSyObj5Yr/3YzIM0uuSerC+gjuI/EpfZabPT+6YPuBcVNtCSw43OCVOwc74dCvi0YHr3WQrRQ2dUfcLR2Q7EScB+i9zMvgtp7rSQ2XtublSIggXcAt2jlkAnLpusXPy41ldTwTR3tV7AZHlFJhdKFJgvnLmwfium67Ua8fqRRASK7CibeUmf9L4IcBC35t9XkCK58XjkefsE1OQUPMO3Lvzwqoz93eboYHbJBZ/l8TfkhK/H4Jwmk1n2n8IKe6LxyP/1/2jkpzqiK6LD/eL003uDqatOzzb4Z8jqixavwgL0B4Kn01MP7Qv+kQ4JzY8IO5x8h7nyQoSWZzvZMbiboB6dv3m70TnNV7it85Q71nMuNU+f6mISs4yEpj2+lcYuLJgd+pCEJlAc2tAdKpNJnE5elk8OiAwlN6LibJtPQtnGsnAnQ71zZjJfOroSET4VpX9VIggnkKKT3V+U3bhpKe1IqcuvwTR2Xn6/qbR9AABH7O1vZWIl8aGI0L96/q4qBrdNldC+1P8kXzE0p1jtqaOjvC7EKRBMISFT+bh+5DCIpXLvnohCNliSkT9429NX1rM4kL+8QNuBCGPjuuqKioYNNlJx021VximXWlRKlBPCQ/62MjAVySLpWq474I6LV180Dg+Mbj2z8VmpoRQnadaiUoNnqoiR5vU3t0rThJioczO49hEK060qz8OP27p3m97a/xn9tOAaihpJ+mWYjjghpN8M8D3wMSSYt+hRRJ7bbuNmYVFjK378tD7MqJn4KpEdODfVAwIhGUVAiRU2O/eBbWarMXmifi9AgShTFgybYsZTM51u8PyTRAOY5P0Bly+cZFh09W1OGSSuT7ry2aVIfwgPjxwqXOslAiiYHdUXGUk+zCVL6vrhSDausLdBllOgXtngH6DTD4hFovEVSalLOKtG0FITl3SE4BXu4V18BMTweC8jYNr/mZ/T7KrLqpmkhCYKxaqJwjZRkLlEj/bl4w6Tywq4t5HPClmWpAY6b9P/I/sdz8fkksdEwyclIgOiFD31iNdfBQv/zO75wcBEndYjzPoqSYyrxkejryqMse8ypQ7fyVm6653ECWsEQWiSxbWrSb42NFoZGO5WJRNED7MeyW4e/bDD0FY9xfJNyMAnWxjbeXNd+adwvWZ8ByS3BOPR4Q1We4pShBSyxfg2kR0QFiDuD4SXbfyZXW9EEThDpVHA2iaH43esVVlwkpMQt1OEGKnehMB52brZcnlb7E2JQYCb5rMJ4yORKL2d+XmyPDUkxaaOHPBTjrbhipBSOaI8iYivTD3BHck97+NAKEu2wjm33HAWJu9TymQ2eUj8MI1fae2/x0AwtlykpMTtXf1ribCWbbxq6pVUrG5IH4v8Ffxschl8XX2XXZJLZvnbqdLL7llBi5QVH8Ww6NcgvDTH0k/KkYQMoz8WiIKrCQkVrDxEeWKEoRkZytddJwDJJs0qpfV9UAQsoWBFYjRiUNnV++PmfCZ3MIhsWJyIZJLE8MD3y828e2/d3Wd8naTWoW11VG5v7t8YIXmyJ4qwEIC87DGUiaIUO/niWG3PPofg3csGBn5+XY//XYr61wgCXjw73u1nP7EA7e96af+9lDvFcS4etc7hSo5yXciio8xcM9Oi51VLcG9B0W0AT/tllNWttP0s8jlyN45RhKS6ewMfxwGPcjAOzLvSRebYv2R7o4VncSK113eJTUzLXKaq7q1KbnkrhhBtHf1zSdi8Y1n70SLnv5lckoMTqSbV0+CcPF9cN05FiyOhSlJlS6r64EgZIstE3wv2u2OD0ymQpHunID7AZKaVLpNTAY3UdpaJeer4bYoSNp03b1LVEGeO31Vgig4WjN+mhgZWKaisy6+IBTu/AF+FjCEI6HPh4WJoM3EsfAk6WLtZm8nbXrI6OcA3T3rAPOZajiEZRuULgA+FrkcQTgXJAlBlGMZJ9lQ5Yfy8XnqcRvYMk8QearLYpOnmgQh2dhJ1cjFZJRtgCEhY0+CkPk++PFpkFuoFL+srgeCkJm6qV482gfHuVjKCMLF0qbYGKv97rIDczFHlqqZJHcbnt7xigRRoJYhxk9iIwP/otYx71K+nZH8NfrHICVDTrPNrq7FR5hkCqcyFWdKcUpaTybdOj4+/TfFjB78iZe1DAoO2e5nhBmR7xzMBXNTThD5RgmAFB+VPvj59lXqy5YpkyB83YVUkyAqiY9zg+abICSXRn7GxK1s0YtePyCodLIUoac6QQhMJPcKBZddMiIppvNUIQjpB1vBTFq7gyAEpkX9COSTUZiD3hyk5NVO0ill7op3JJsOX4tctt3dThBlkI0dO00QhTOpYO2UkL/rCUJqIlfqbHW8V+wUogmiQkCLajx0uBIDhALVkcQSSOog6XVqkl1sTlWCyOKQiUV0SSYWkdMD322AN5HBS2NDkYfLnQFThyD8GYZUScXki1wb9gQhCd7pShAuDkzlztvs+56e1Zog4GtCljEohX4Ijl18e1ffp4isAH+Zp3imQLUThNQ6qKoqJj8mtGVg6nyV2nsWv4cm+XgYfLZLyAP7OyKG1wnxobW/LUeGtPmsVjFpFZP7LFLRvrgRhMyPQYSX3gSQ+K+/h3i6LcyveNfzsroeCKJSO1znAitbpCRWIL4uxfwNRn7pAnM32zFTcpHl6SCZrVmFIETZal5Sy5zXKnnHUSrmwgGttfW1D6YosJQYfSAr3Hb+o+CFXax9cTqkptQQpcM+W48fS5zcWNb4ktrpT1Gq5ZkTn6miYqrcJXWhP4Xs+5AShNyrt/jlstuklTmMAe711QNBWKEWCm3bXcOLuPXdaWopvaSWfcwV1Md7LSYSVWLu9CIxUlCKqaVKEIXmo/Bt5mq1RbgFzK+A6FER2K8luGW1iOHjNDEuFmiu2KJbhd+pI9R7HBir7fGLhDc5Twbmjo6u2Vxqmy4EKfV892qjYEGSXVIXBvIsaYNTTVKfKgThtBgj4G8w+dhYLLIruJ/CpJFuICRWmlKCkNl0F7uY9FyECoO8ieLunrih3i8R4zu5Or0ChIV6b7WH0/bSuSvgllfEaaIKyD2T3epVPYVUyDsSmUG/n4B9CfiDCfzeMPmWYpPHGXojS2LOxUHVD0SZICpg012YvGhXEqRKhJ4XG4VMBFzhKPeSiLLJbD7Y2vTqumwgOYtI2TwZTCJq72wGYq3BLUtVA821h8LnENMttnlUERWjJLGTL49bqSmk5FvcAxzlfI1HNe8gdrujnHRSuERu9bPgFsb/AVxj+dQJQUgu+iaYaYmqw4w0Po3LyUBiMfaGwcbJIyNrRTYvpUcSmE3JYakwpIJwBMOS8STdbPMgVnKQFIKqEoTkAly87unRbQeiWOBBWagIP/GBRFuyGEFOoizXY7tSF8rOSSLxTyhqQWivQx4fSR6LSRJqoyDMdbFJLImt5Wth9qp/qpwgpJtJn74iVgTlyf1X2h14oRpqw+Wj8q1acQ6WNK6LS0ynOlExicVhGgJYB9D8bH8YSJgTkycXSxPpFuzM7aK0q2vRR0wKPJDn5AY8ON7KYZW4+JlLSRGDyB5q+LHUxORxxWR19lMEjDNM+oxIZUrArHTf3UNrSBYmpWB9UhNaP8H6uvoWEvEaW/KiwgyHBpwRRreAeEF8OLKh2ILlEop8jGGcmIiuHcy+L9nV+QqeJsnU+Cev4G7F5M7+Xk6wSdcw7C6LUVWC9XmooVUxyJabKgQh+iOPll1usL7CSMiirQIVk2Qnq3QxWWzAXE4m0svqeiEI0ae2rvCxBpFwfrLFXafbWoLmhW5Zr6w495twGYiucoZL9rCkEfro74HxuXwsvdvK7HKnkUHX5u0IrMB1WJEYGbBn93MdJok6Riy8Iv2n5dLvxwJI9QRhTfbOcAcbVkDEfbLCiYvJYsSY2d2KccklVhKXbM1NWz5rV+3Ixk8xexu1d4V7iSzVz66x58IIoy5Oh0pE5BL1t6Rc4bLBlYX7Lhau3lKrdfeKtJoi9Wt+4h9/4b6TDL48EY2IkDG+w30z8+LESOTnxdYWld8biSCKhURxCfctzV7nxMYl3Ler5VweQch9H/zp3b0GSx6vpvCyup4Iwi23gzhJMPHy0eHIU/Y+W0naW5r+G2wFbrMnm0nvwz0un11OAeKtRxmBFYnoWrHrzfvQRD5aYhKB/uwJjQTzF11k8+ROJzqyR0a1/+wriJ4fgkjr+GXEyI+aFFw+Orzmd/Y+p5PMiMxzJDJs2T2WpTHt3RMSYSuYL4OJO5yhr9vaPrVPIDh5FRNEEqOgDQjXRV8aQhlwTS5jbT6sXMJY5Uj2pBxWWnlhlCQMckvi45EsKd1cCQmDRMIrWXIiMZYvbjJOJeL/v9D7XD3UvTIOk3QPCJ/MlnfzLPd7h+Bs3+/7EvVc0bwTbgmD3PKbZ1Lunsvgbzuy+ImMc1+OD0f+S0bieQQhifCnHIFVZZDk1lGFl9X1RBCiXx4pMe1pFMVuyS1dZQ6eYjtxj/Sm4ryXS79ppTk0cZTUTBL0PAzztPhQRCyuSo/LLjjzrj8LNp8E4YWvaP8lBqIizSrA7wRhrsNkWpQZI8LS2PDAOllnPVOaAmMgPA6mDFbmxwCaLUk5OsbEX0gMR1a6AOpCdFbp7QD/DjCezKyyIhOeiEArUnk6nsoujKJymfoy06hCylGRNTCXf7wmKUf9bm5UJnhdnyC6+s4HsfOkn3MrIMZQc9O0zzqCPVIFUo6K/ZlbemILVjtByHwffF1qqQyUxOoEzuxb9UYQol9H9oQPDCaxJj/7V9EebyLGrUy4IrdrUTBfLTHBfKYJQQ50RimOVhJdeGY9owviI/03FO1tpoBfgsiScPMOEmlEhVrLzyMW7gsTw5FVXmqM9Om4pPotAlLJjJZO71mQ2U+1LyJF6erWIF/sprpUrUhWzmecqGwVAxCbDfD/l6tT4UI0E3FUjIc9JayK+KmdKsOBiVb+rMq9m0qF2TL1TBAeBJ4R39WbnDq7+z4rORWoQCM2B/89OTbjq15xwHIEUW4WOBWJRBkXD+08+/p6JAghu0deV0n3eZiDgWVImocRkNOjFjtBZCsSag5qmvwGASJ4nTMBvAzupNvxUnVsXE54RUNrOOsvhSBEHV45imV9cFPzufXXVr/Qrb9LBZcS27hg5wlE3D/NUGkDgEjSctnMg3igmhFefcSJSopwOONjM77TtNfrIiR7zuRc1XFNtNXE/F1mXiQ5jcm+l6eJ8MXYcETk9Xa9r1DEs6BYPROES3pbGyfjxSBSPdHouudl/U+rKq3Nj1AzF6i1ne+IOR1g419HRtY+WgzPHEG4xLNXNjks1lD2d5dkNXmX1fVKENk+pPW0wbMJdCbSnqpvz/wmHJseZsL1sw7khPjYVaK5emFnqUfGaQEYyzh9IXugbRJs3pmH+o9g3JyaTN5dzFpJZYwK9aHFQ2tUiiCy9Yh7n6Zpr5+QiWMkVDHZPqfAeJFAg8LCKouxSr/sZUT9wdatbQEYy5nMLoBm2khYqINeYNB6E3Tzhmj/s6UsWJYVW8u2o0FYImljDOCXiI2RFMybkjv2Ha10NFcPTKgtFP6wwcZ5gHk8QCItsLhnSQL8HMNY10ST12eDBhZ8iz7zM6RjUqXCBCwC0Yds6kFxKvvzztzoDzBozayDUo9WkxzrnCCs4cos9CK9rbgnsasfVUzMaU533weCzGczWSH/7euSiBz8DDHuTxKt8jOniyYM8vvx6fIaAY3A1EGgIOlSBUOyTx2Upm5PNEFM3bHVPdMIlItAYbgZhTu0chvV79cPApog6mcstCQagYoikFFv/jgdIgSPEtF9zYHNv1QNASK5kyopxlJFO6UrqykCmiBqCrduTCNQOwScITyEh7zXZadTMqeTnd/3a9dT3VK1ENAEUS1kdb0agd2MgNQykfnL8ZGIsErytBSS++P4N1jYzRDo5stEQBNEmQDq1zUC9YqANCgbYJlDJycnvy6zerMsyFq39RKxCI9h92OoSCKjesVKyyVHQBOEnhkagSmMgJcXtTBrBdEomMYt02k2PwGif5L43RTzIp/CCO7ZXdMEsWePv+79HoBAJuz37fakRD66vR2gFfFo/23F1FI+6tRFGwQBTRANMlBaTI1AOQj48KLONiPCXiRM4vOcASnLkUO/21gIaIJorPHS0moEykIgz7MZ9I95HrtWMEj+AwPrUkYg8sjQ2hfLaky/3PAIaIJo+CHUHdAIaAQ0AtVBQBNEdXDVtWoENAIagYZHQBNEww+h7oBGQCOgEagOApogqoOrrlUjoBHQCDQ8ApogGn4IdQc0AhoBjUB1ENAEUR1cda0aAY2ARqDhEdAE0fBDqDugEdAIaASqg4AmiOrgqmvVCGgENAINj4AmiIYfQt0BjYBGQCNQHQQ0QVQHV12rRkAjoBFoeAQ0QTT8EOoOaAQ0AhqB6iDQkATRHur9EjFE0pP0Q1gVHx44pzoQ6Vo1Av4RKJijfqqwYiLRNgL/0QT/CsHAXYnBtX/R0VT9gKjLVgIBTRCVQFHX0XAIiKB1QTYvpRSvisUij1e6A2URhFQYfpqZ/n3WwbwuEomISKtT7unoCE+DgeUgTMSjkRvrrYPhcDjw4iZaBMJhiejAV+tNvmrIowmiGqjqOusWgba2T+1jNE98DkxfBJA0wceORiMbKy1w5Qkie1jGg5NBXvbIYOSVSsu8u+oTC+9fXzFOZja/DdA/MeHyxPDAf+4ueSTtUnv34jmA+QMC2vckjYUmiDqahVqU6iLQ2Xn6/mw0/QbAoZmWtjYaQQi5GXgGBp2WGOr/fXURq03tHaHelWBckG2t3giiPdT3r8RWCtaAJeMepNLWBFGbb0C3UgcIhEILD0pycBiAyIMgnloShFJb1m76r8n9gOb9mXgBiJeJXbUEvqfITJ4Yi90l7iYa+ukI9d4Kxtn1SxB77p2nJoiG/rS08H4QaASCcPZHEMZLL5NYPH8A4O2O3+9vCfIZg4ORv/vBod7KaoKotxHZJY8miPodGy1ZhRFoRILIQtAxd3EnTPNnjrzSKWasSIwM/KjCUNW0Ok0QNYXbV2OaIHzBpQs3MgKNTBAC985Q7yJm3AZgmm0cnjQnA8eMjq7Z3KhjowmifkdOE0T9jo2WrMIINDpB9PT0BCeSB1zL4M/aoEkR0Vmx4f61FYarZtVpgqgZ1L4bqhuCSNulpxbDxBKQdSkndklJBv4X4NU82fyT0dHb3xA9LNdRzrJnfjlwBJF5DhjHAngPgGAGvc0EPAHC6uYA312ufrenJ/y28aQxHzDPBegTtiTxSQB/IaZh0+CfzDqQE6r27QULHePhliY+NSOrZZJHMC8B8Elbe9sB/h0RrSy3X7XAb/Yx4ekt47QAjGUMHG7rh/ABeIWAp5i5PzWZvHvDhp+95jbz27rDsw3QgwD2Vfk6GDglER34hUpZrzISM1elS+pi7XZ1LfqISYEHABy8qyyvbwnuHR4cvHVHsffL+X327POagq1b2wIwljNxKO+7sZz78Gdi3J8kWrUh2v+sm2Of9U1M0j0ga34WfxSshnp6zmkdT77ZA9BpAHcD9G7HnY2Y/y8AxkMMWjProNSjXt+bk7Q8hcz//lyLzplz+n6BpuBpIHwaRB8CY79M4TEAfwboAZPMG0eHI8I6jYsDU/0Su50ghF16IDh5FRPErii7SMt6/ipAn45H+3/ZHuq9rERPauqcGz6aTVzvYhnibHc7iL+BJK6LxyNiEJUfy+kniBVgEg5w6MdNAAAgAElEQVQ1zstFST38NBm4KDYUEWaYnpPDjSDGx9FMQeOHzHxGziRPLvF2MP5jYseMazZuvHFSuVM7Z3C18bMWjxR9A4zzHaoUNzGTxFiZSjZdmd1A2AtONYIQp4jx5AGrAD4z208GXgwi1RONrnvex1j6KUrtXeFeIvpvAO9SeZGBBBMvHx2OPOUsX0mCsPxagskrQSzMZBW+sxxqnt9bJQnCxxpnCeeFnQr2lSyzWwmifW7fh8nkAQAfVezUGDOfaxAdwsC3c+8o7DDEgk0B4yoGi5112p5Z+eFRM5g6Y3TwrpdUXmnvWXwIJVNrAWpTKW8rkwLh+xNvzbjCa+GWEYQBPtckugnAMT7aHJho5fM2PhTZVuydWuDX0RH+BwSo32cfsh+V1DdgqhGE6Gx7V9+niPin9nnDzAsTI5GfFxtHv79bVlSbcBmIrvL/3WCMiS9MDEdW2Tc9lSKIjrmL/xkm37lTw/A+v/3KlE8y+PJENCJ8HPI2ZZUiiM7OviPZwNoSZBQb0svi0YEfFtswlth3pdd2G0F0di58DxvBe21OS1mBUwA/DRiJNJvyEQR8xHa62ApGDISTVAlCHI2bp227DuDzClAhvEaMxxj0J/EBSNrLvqJkd+7ZL8aLTEgQ6A0G70NAN4CZhSNFN06MTV/hRhKFunSOMdPLRFhkq0tMsBGAns+0JY7z+/ttS5SvBX4ebYyB+WmQ8RiAFIhbAHSB8d7CBYsHJ1pxmp3w0mTNnxOnEWbeiwinANgng8MEwOsB4285XAz6UXxo7W+Vvh6PQtVSMYkmOzvDH4dBDzLwjqwITPhKYnjgm+XK7Xzf5WI8Bdtctt4hPgyMj0tOfVsZxvxEdO1otm5LHTT51goQfSD9N/NoIPtv6w8bdqqKdoU/YXNDfCQiNj+5x+M7s1S3DDwivjNLNPB7mfAJm0rHXpVU9dfRFV4OMuakC7LoV+bf1v8/CxjipJ/5mZ9tadrrOqeKryMUngMmoa50fndJgJ8D0SiYxgF+JwhzJfKJ+f7l+HDkv3YXSewWghCLQcte225j5j77SBHwYNLg8zcMRcRinXuE7s5obvoPgqV2KNz9e58gqCMU/iKYvpX/Lj9tMi5+98F42KmL9GjP0+7c0pvvoAjDutfIPsIU8W4zwJc5+yXmbltoyWEGJ28C6Ii8d8BfSkQj37PjkP235LLVXkyqcknb0wu5rFOGTX+NFHu0JWSsBX7t3Yt7CKbYMGQtdFIMfKs1yN+R3QPNmRt+b9CkG5xYe13YNvoltef4K5yiZXPJ62+Wzry5Sdx3iLuz7Go4mDLwaclchjgZ7EjSFwi4Ml9dTHe0BDefPTg4KBbvgsfvJbV1WT+5/0omfCb/m8EN5sTkv7ncSVF7qLeTGMIkOF9jQXRdfLj/YrdFuJQ7TxcCs77N5OTk150yiu/zhU34pEHGjxynDaE1WVyN06HKfNgtBNHWFT7WILrHvtsgxk/Y5Is9dP0uC5W367uLiuEhpLgvHo/8Xw+QqD0UPpuYxBEvt2gR4ZzY8ID9eJ+rQrJrTIH5ypkH47teF2KZi+wfArzUJs8LSPG8eDzynFNGD4KQHunt73d0hN+PgIV9NtyE+PlPKYPnyT76WuHX3t17DQHiI00/RT5aUUQQcvMO3A1Qj20Bc72wnSoEIVXRKF6UqiwK2TLt3b3HEPBLAM2Zv6mY1FJHV/hykLUhyz4vs0Hz3EKD+CWIzs6+j7LBv7bvzBm4KhEd+LdiO+1QKPzOJOMX+epfHg2gaX40esdWGT5+CcKFwCz1eGIkIlTqrneMR/aEDwwmscY+p8WdhDkxebKXMYafcfVTtuYE4XLJpgSAOHk0TXv9BgLOzeuk++6JOkJ914J5ha28kqoou0y1d/eKk4vYEWUejk204kSn3r6tbckBRlPqofzdibeqyN6HzKllvRUMLNs48PVYdOBrygTB/OX4SESEQfe85G7vXtxGMO9zWPasiEcHrne0VRP8Dj9u6d5ve2v8Z/bTgKo1UUd3+ATAIjxrEfO6sNUE4WdpsKwFP0+MXadYxVOK7FtgprMSI/23yyTwSxAdXX3ng9juHKhCXLmmJXc4fzKDyblud4x+CUJCYL5URZlNnCBAYYklnt1mylxzgujuXvS+JAKDBMwqpfOy3YNb8CyhhgiYJIAW+uoM0O4nANnkldQxsVO/eVIiOiDIIPd0hvoWM1sXh1kVmOsJwO0zLayDn5gIBudtHFyzS0cOwOUEofyRSHc4kh1orfCT7Yi9FhQ7fm09C2caycCDAIm7iccZ9FQTmdcMD0deVSDWipieysazmncQ6U3W/ncACOe2LcAzPBmYW0mHOUkfIi3BLWe6qYpsOFBHd++tAOYB/Be27hOM1fZ7CDtmfgmis7PvODZ4IQChlp1JjJtiIwNfUaU/yanYcx74Joju3q8x8O+2sVHaAOetJ446xF1ZLUyZnRjWnCDau8KnENFd2YXUr4me7ONwIwhnWyA8hyT3xOORv6pOJll7xPimY0JSe3fvTfaTDQO3JKIDy4vt5u1ySMjzTZP5hNGRSNReTkYQ5HLacOunRH1QoAaoKX5dvauJcJZ9wat0xNKpcoIQCrh2B14A/hikZGh4+K6XVed2sXLtXX0LiXidrVxRFWaxOmW/+yWIUtrI21QU+sZUjCCkm50Swpe3dYW7DaJfAdhbyO53nSwXo+z7NSeIzq7eq5lwxa4O+GdG1aOvsy1xCf73vVpOf+KB2970A2B7qPcKYlztJnN395n7pjB5n12vyYRLE8MDwnxO+enqOuXtJrUKq4ejci8xXRAf6b+hCEFIicSr4fTOOzhkP10x04LESL9QPVlPrfCz2io8gYk/jzFwz04rlFUtwb0Hy3UEmyoEITcTlZ82lSefpKDkBClKCSvD34LxIzMZXF+JE8tUIgiZhsT5XamMieT7lGouVOoqp0ytCaJg5yMup2MjA//ipxPtXX3ziVgspK7x2aUnDad5mnKjBWZueZdakkkhar4fIF+hmBncRMDxdisjyWlFpmLy1KHKullsp1NL/IR8sjsYh9yWVz0x+jlAd886wHxG1fM8W8+UJogqXFJbJ5WCO7iC2fQSCOuZsa41uFe0FBKvBUFYprWpNw9npoXE6APhEFtPKnaCcO78ARSaUiusOxKTbKiqXRWqVy5SU4IotiipSl2gQ5Rcnvl2xlFtPF0u7zjv1xnLV1OSvkkWuv8xeMeCkZGfb1etu5jqrJb4ZWXu6lp8hEmmuHC2m+G6dUn0dT2ZdOv4+PTfqHiETxWCkJ1YnafjIqbQbpgWqKmEpVjTON1BjBMV5paw798Ixo+LhUCx11Upgsjm0jCNlkPB/C4QzyHwEY6wFrJuVIwg2rt7FxBQcYdFIfTuSKSkCUJh1kuK1BdBlLh7LPAWtZHR7iAIgbOIyRVImd8nwmk+PHeFU+DNQUpe7aWDnzoEUWDoUZDlrFIEIcbEf9gY64sRMbMSDOMLiejaDV53ceUQhOV7kcIZYPqcw6HWz5etCcIFLU0QfqbRrrKaIErDLftW0QtViyhM8xIChG+IxANcKsAmMnhpbCjysOzXqUIQEjWG2F3m3XlVkiCyWGYXY2J8MRPLTCVkTVIYUIyPzfiO2ymvFILIJFLq3WmqfQ2Ad/qYjkkwhFpMnFKz/h2aIDRBVO+I5tdszsdklhYtEs1VqfpM9MsIQCdnX7AfYSulDlQSxrsQtfcsfg9N8vEw+GyXcA72GrbAME6QhcuYKgQhMT8tuMCsBkHYQbZUTztoLkFEX8ZxLmEssq94euv7JQgrLMter38TjM8XOWWOAfwSgzYaoGHi5P/s2LHfM8FpWw93RPitJkEU3QxV4BupWhU1PUFYx1VH/tlSEoAX7KAkenqZ81UpF+IqyEti46RKsVxQaUuUcX78Iom9Xxv4YhZTtcRPtd+inFgcWltf+2CKAksll43pqly8sKcCQVjqngDWATQ/h1sJ5tt+MFcoS11diw8ykZoPwr8ANFsSmdnVT8cvQbR3hfuIaLXtBCBETIL4YTbpDpMovldw2gtuF+Z+N3R+/CCcBjQE/A0mHxuLRXbFllIAtF6K1JwgCkxGS9CfF3hCunh4dnb1/jgvXksJbakMlPAcpabUEAEflO3GVerwU0ay0HmGMpDVLbG8KtiF1go/P313lKWOUK/YvYrFIqeGciPMqUAQnZ3hDjYs+/hswEFhI+/b56YMzIu+auV2MVOr80OgwHXT5IcgMuFV7hWR522CbCKTz4jFIvGiwgGoJkHIN4vVibSr0tdyy9SeIJwmqoDfxa3AKc3tFCJxyd9CJs2Lxfqf9AFcNtyE8Fp9SUSZZDYfbG16dV3Wo1SmrgH4PqSwyE8eiQzR3E/AvjuP7n8wgd8bJt/i3H1IFjrfrvgqTnC1ws8K+87myWDqEIcEBmKtwS1LFTx2rWFsD4XPIaZbbGMqVRk0OkG4xPgRZpSnxqMRQRoVe9Kh1w2hfpwLmCL8SyAA8yTVnBNtcxd9wDADIuJpLn+EmxWOH4KQ3L+kvOKjyQCROABWTMU0u2fJO5qTyV8DJJJcpR+FuGJOOUXECNPge3eG+REBDh8n4Flivn5kJPJCxQZZoaKaE0RHR/hdCNIgGO8vZbctdd5xO0GUEdQrK5skLorYsV2biA6IvBK5p6O79yIA19n+9IbBxskjI2uHFcbBKtIZ6j2LGSJEQfbyT+ocI9cve0fMtMsgi4cFFL5fTlA0P/ipkJUXhqo7wkYnCGno7SqdiiXfacpPzgk/d1h+CEISR8n3BlMSn61iBJGOflwQ/81v2B2Z/4nffqouO57lak4QZQIod9xxIQhLX2tgDYhOtaGwBcQL4sMRYXrn+bjkAB5jGCcmomsH7S/LUkEK2/TxVg6rJOTJeE4KL2Z7KOLHUhOTxzmjOLpcQI6ZzKeOjkREek3PRxJNV7oLrRV+8vhc6jGzCggGkDoONjJBuOQWqFooaKmfDFH/+FvTl6r4nEg3gkyLEiP9IsxO3uOHIJynWr86/q6uxSGTzPV2FR2AShIEZNGqAfXAnelESKY4Edqs9/iuluDenyrFEbHYeuD1++4gCLjk1R1MBrHkkcHIKy4CZ9MeClVCNvx2uqhHlEnZYAkdtUKcH3l7zPeIvNkS1ZHQh38PDJGcxvbQbS1B80Kv3NZWtjaDri2Ib89YkRgZsEettOp1s1BR6Zc0C5fHLrQW+MlPNFAicnnIb0iDyklw8x2iRPVjq1SwPstip3XrMhCJsC2OlJrqi46q3PZykvAnqlFJC0J+e8USchKELHpAVq6CKAriB8Uoxpn7mzsljpivk0nHxWL9j8hw8hu00GVjlSLgqvGxGVd7EaxLVkXlzV8p41x3BOGVhEaWl9k9EUmmax4E4XIKEC9uBfNlMHGHc7H3yCHruWi5nALEDH6UEVghcxhqC4UPJSYR6C8X5jvNee6njyImjCKK6SUzD+IBeyiKTOrIPqEPdYT59lSF1Qo/l53dq8z0+ckd0wdkH5XAzmCsciRbcv2YZPphhTwkJX1zpRKEIIRgcPs/UBN/mMg8FQyRe7rAD8TP6bSkDriHP3FNeiPaEd8ONU1+qyC5F+EH8eGBS2UOcwW5QICnkOJTZblQXOJDjQlfC07xtbI7P3Fp3mTylxksQvpI8957hZeX3cXBME6PD62NuWErPwVYbLbeYHxxZCTyB8e7Hvneq7sRqEeC8E5jCbzEQNRKGeieynBXv4rEqXfJ9JZ9fwyEx8H0u/QfzI+5mOmJSJZfSAxHVnoB6pFmUKz6r4ExBNCrVupME0c5YsJkGe95GOZp8aFIRqb8FmWX1EJwAE25kva2RErDdK5qZ1J3pR1hjfBzOYFZPdoO8O8AI2NcYPVHWLEcUDgW7h+T3JggWz9Z4cEJ5tdi0chtpS6quZ1uqPdLxBC5Oarw+MuRXo4AcnWJVWN+2kwxn5nbABJ3i85F2DMHi2QBztSPTQAJQhpqbpr22Yx6xSs+lOo8eZWBHbaUAyLOkVT9JQSRRD4Wfxae4q8ANA4yn0uNJ890qoJd0rVmhyO3xnmmBCbEg0guqmSUXj/zYbeomLICumRSKyb/dgJWMoS9Nfa1CiskMkmrIuhGAML70u/jK4F4ecnU6XkYdIZXXmSZLp1AVzJYxMRXiWMk+u+rT7XAr8T5kB1LkaJ0dWuQL/ZS57V39V5AZBkTSL2AvdQbfiaN5ATh53W3ssJa7U5OmhcWyYZYibaydcgyK/qp/0k2qNctm5yoSK5ytjeRn/HNZ3woh6w8zMHAMmPS/IpdpeuV07uz8/T9TaPpAQI+Juu4l/qsMxQ+iZnEhiO9Vvl6arcRcBNrtxKEEMqfyzw/TaaxLGWYZp4npAJBONr6rt38zmvMRLo/Jl4+Ohx5ys/YZo7a36A0keXfmcgrkuaSlhV1u2ylYOA1Slr256Fq9Mk2VlXDL9PGBTt3oVcBmKGIucjvcZlTrSZ7V4GEVJPieIpWYYJQjmukiJfvYp2dfUeywTcX5HN2r0mEav8xTzZ9dXT09jeKNOhJQrIFOKN2vpqAC93URo42/8iMK2YdzOuE6rXg1FLEGixzihAZ8WRhXzzvsiwVF/N3mXmRoqzbQfwNJHGdHzN534Oq8MJuJ4isjGLAJ1J0GjN/Ni/6oqUq4Q0MWjk5NuNXQhetEs3Vq++Wnrd1a1sAxnImswugmbZFXBxTX2DQehN084Zo/7N+kv4427XUM+O0AIxlnM4DfaBt97p5Zx7qP4Jxs5/ol17WOFZEy1dwAjMuAqg7o1ZKgfEiDPzKZP7JaDTyWFl9qgF+YoxaWrYdDcISyRhZIRSIjZEUzJuSO/YdVbGsyY5NGiNrrl0K0GH5qjfv/MQK35RVpEyCEPPibwBFGfjlZCsPqVjCqcpWRjma0933gSDz2Uw43lKPMvbL1GepXEjcHxBWj7fwL/zKnL5TIpHe95MO9aHrAmw55aVSnwHRSUg7qmbVqOKE/OedDnUPMGjNrINSj9rv5CQm3EXN0tMOgOblYJwEsjJi5k6hKqG4hZ9ToCnVx+A+gP7R1seMuoqfAIxbWoLmfV6n4DLGz/erdUMQviXfg1+opbnmHgyz7rpGYI9HQBNEA04BTRANOGhaZI1AAyKgCaIBB00TRAMOmhZZI9CACGiCaMBB0wTRgIOmRdYINCACmiAacNA0QTTgoGmRNQINiIAmiAYcNE0QDThoWmSNQAMioAmiAQdNE0QDDpoWWSPQgAhogmjAQdME0YCDpkXWCDQgApogGnDQNEE04KBpkTUCDYiAJogGHDRNEA04aFpkjUADIqAJogEHTYusEdAIaARqgYAmiFqgrNvQCGgENAINiIAmiAYcNC2yRkAjoBGoBQKaIGqBsm5DI6AR0Ag0IAKaIBpw0LTIGgGNgEagFghogqgFyroNjYBGQCPQgAhogmjAQdMiawQ0AhqBWiCgCaIWKOs2NAIaAY1AAyKgCaIBB02LrBHQCGgEaoGAJohaoKzb0AhoBDQCDYiAJogGHDQtskZAI6ARqAUCmiBqgbJuQyOgEdAINCACmiAacNC0yBoBjYBGoBYIaIKoBcq6DY2ARkAj0IAIaIJowEHTImsENAIagVogoAmiFijv4W10hHpvBePsasDAhMsTwwP/WY26dZ0agT0dAU0Qe/oMqEH/NUG4gxwOhwMvbqJFIByWiA58tQbD4auJepfPV2d0Yd8IaILwDZl+wS8CmiCkiFF79+I5gPkDAtpBWBUfHjjHL7ZVLF/v8lWx67rqLAKaIPRcqDoCmiAKIW4P9f0rMX8fQMD6tc4Iot7lq/qk1Q1kpqUGQiNQZQQKCILxcEsTnzo4GPl7lZuu2+rbQ71fIsZ3cgLWHUHUt3x1O7BTTDB9gphiA1qP3dEEITtB1PcCXO8EVo/zfCrKpAliKo5qnfVJE4QmiDqbklocRQQ0QSgCpYuVjoAmCE0Qpc8e/ebuREATxO5Efw9pWxOEJog9ZKpPuW5qgqjDIZ09+7ymlpZtR7PBwuxxLoADc9YuwGYCngBh9XgL/2LjQ5FtKl3o6Qm/bXyS7gHhk5nyfwxSMjQ8fNfLbaHwoYZ1YUrzAEwD4TUwb2DQysmxGb/auPHGSZU23Mo0MkEIP4CXNuNQmMZygI8DcIiFUfrZDvALgPEQg9bMOij1aCQSSSnj4AWqwkW+mCetrdtmMyHM4KNAOASM/WzVjgH8ErExAsaa8fHpv/EaS1/WZgryCTmseZc05gPmuQB9AsABGfmSAP5CTMOmwT+ZdSAnvLArZ/7pd0tHQBNE6dhV/M2OjvA0GFgOon8D8E6FBsYY+DFPNn11dPT2N7zKywgCKT4KAToNwHdti15+NUwXxEf6b1CQxbVIoxJERyh8FJiuA/BRtf7z02TgothQ5DcA2PlOpRbgEuZJVpRXAVwy8yAekC3GlZJPNGbJGMQKMAnnv7cXx88bu+Lv6xLVQEATRDVQLaHOI3vCBzYlaTUDx5bw+pNsUG9iqP/3bu/KCIKAaxn4tis5AH9KGTxvw1DkTyXIlHulAQmC2rvCvUR0iwc2bpCkCLhqfGzG1c7deiUW4LaehTONZOBOgNpKHJMUgW4cH5t+STXkEzK19yw+hJKptSXImALh+xNvzbii3FNridjo1xwIaIKogynR2bnwPWwE7wVwaIE4lroHQwC9yuB9COgGMFMi9iaDjVNHRtY+KuuShCAmAIzbdndbGHiYQG8CZjtA/wSilfHh/otlu2E/sDUaQXSEwnPA9AsA+zv6uRlATIyF+DuB38vCC7pwhzzBzMsSI5F++/sdXeHlIGNO+m/8cQCZf1v//yxgiJNH5md+tqVpr+sGB2/dkf3T7GPC01t2UESyiRBqrVcAbMjKBrA4gXbaVDp2USaYaUlipP+uSson6vKYyykwXmRCgkBveM9lunFibPoKTRJ+vrLqlNUEUR1clWtN62jpTgDHO156kgkXJIYHYs4FWtwZENNNVoiG/OdJM5icPzp410tOASQEkS0idpTXcMq8Mh6PjGX/eOTcxbNgpCYfGYyIhaesp5EIoqfnnNbx5Ju3A7Qwt1YDz5BhLI4PrX3cORbiHqC5desyEAmvaJsqhWMTrTjR7Y6oFD+DgnfSAt6dMvhSt1NeV1f4QymilQQcnT+IfB9SWGQfc/vvpcjnQmApZtxtBvgyiYzUFlpymMHJmwA6wtZ+isFfSkQj3ytr4umXy0ZAE0TZEJZXQXtX7wVEEHrudMiF9DMw0crneV1AWwvTXq9/E4zP571L+EF8eOBS50LmRhAM3DI5NuP8au7WGokg2uf2fZhM/jWAgzJjsYVMmheL9T/pNdLtXeE+IloNoDlTboKBkxLRgYdk7/ldgNvalhxgNKVEXbn7ECLqH39r+tJiY2fdBwRoFYCwjfReDCLVE42ue74S8ok6JASWAvOVMw/Gd70uoDMX2T8EeKlNlheQ4nnxeOS58r4w/XY5CGiCKAe9Mt8VO67mHbgXIKEKsB4GEubE5MkbNvzstWLVW9ZOe227jZn7bGU3GZw6bmRk3f/a33chiDfI5BNisUi8WFvl/O5L967WUM4CS624eqn2rr75RCzUS2nCVrTWSS/CWAfQ/GxrxPhmbGTgK5VYgNu7e48h4Jc2AlIirmzbbV3hboPoVzsNEvbO/O11Mum4WKz/kUrIJyMwQF1VNGfO6fsZzU3r7adiAr4eiw58TX30dMlKI6AJotKI+qhP8tFPAHxqPBoRH7LS09nZ91E2rB1vTl8uy5EgJwgeDaBpfjR6x1alxkos1FAE0d27gICf7+qqOkbtod4riCHubF4C6HEG35OIDgiyKXj8niA6O8MfNw06l8BHAPQeIoo2BzafNTg4KMxFiz6h0MKDkhwcBvCP2cIMnFIx+UJ9i5n5p7bTrO8TQGdBHfzERDA4b+Pgmr8V7aAuUBUENEFUBVa1Sju7eq9mwhWlLEbZd3p6eoLjyf3vsKsPAF7fEtw7bL/glBEEM36aGBlYVu4ldLHeNhJBdHb2HckGPwBgRqZfrlZJxfrt9btfgiinLfFulQmC2rt7xZ3YuTbyuSURHVjuZ251dy96XxKBQQJmZep502Q+YXQkEi23//r90hDQBFEabmW/dfhxS/d+21vjP7NbpDBwbSI6cInfyju6+s4H8Y9s7xWoYKQEUaNsbI1EEDK1XxpXfhqEmzkQiCQG1/7Fz8InG8+pRBDd3Wfum8LkfXazViZcmhgeEBf3yk9X1ylvN6lVnLiOyr1UAT8cZQF0wQIENEHspkkxu2fJO5qTyV8DdHi5H4NEv7zVBB87Go1s3HXSKPCkBjOdlRjpv73aEDTSJbXAojPUexYzbnUYDuyCKW16/AADayZbeUjVm92Ocy0IIu1p/doHTQROAfGnLNNlmzFEpVRMkp2/6Or9AAkiVX4Y3ERpa76Dsy953eMoV6wLloyAJoiSoSvvRb9Hfq/W2rrDsw3QgwD2zZRTIwgPHXR5vct/u1YEIcNUoR8Fpy3r8n/atmsYfJ4rSeyqOJU5XaxKUmDtI0NrX1Ros9Dip/R8EDS7Z8l+zank+9jEew2ywln8MwNi45ENayEVqVIEIZl/KhColSkdF7X6dSlPBDRB7KYJUmWCENZQeReQUhWTJggx+lKLKCsG08vUu5N0r1EMe5KdSU8S0ZXvOtBc72XaWc4JQhBY07TXTyDGF0E4sgRvb0tWTRC76eNvoGY1QeymwaoyQRRc7mmCcB1oT5PZ3GIMXA5AhLcIKkyZFAOrW4N8sVvWvBIJgjrnho9mE9dn1EUKolhFLC9mkHWiyAYa1AShit4eXE4TxG4a/CoThFYxqY+rsk+F8LLekXyrmwiLwDjZJeRJrmUCrWwObr5YZopaAkFQR3fvhZ6BFdMtC7PXTQA2glj4t/xPS2DvJ1KpN/atlpmriopTfTh0yXpCQBPEbhqNSrsncxgAAAbTSURBVF5SO/0pCPgbTD42FouI0BDWsyecIGo9lOkxNI/eGfL70xnLm9zuPCOLqzObX4Jo717cRjDvs90zpU8G4N8SjFtgYqi52fyz24nF74bEj3zCRwMGPcjAOzL9TjHTgsRIv5BXPw2MgCaI3TR4mZg/EYDETtR6iPGT2MjAv/gVqT3U+3li5OLWMPAMTwbmjo6uEcHlNEH4BbSE8m1tn9qHgpPXE+Es++tu5p5+FuC0r8sBqwA+01b3doCWx6P9Io5XQWhxZxeqSRDCi5qaUkMEfDDbrsxZswRY9Su7GQFNELtxAHa7o9wUu6Qudygz4TJOyCROamdgf2Y+U9VRyzpRpFL3gq2L4wzrY1V8eEAkfsp7/BCEzIyUgasS0QGRN6QoOYiGJQ6AFbuDkG12AO9ggLKxyhDN/QTsS8AfTOD3hsm32E/C5Y6xft8fApog/OFV0dKyUBuyMMxejcpMDFVDbXhZsVSyo7Uycy1X5ko4Exb01cVM0w9BSPxcPAMBynDo6O69CLCCQuaeSlkxiQol9b9hsHHyyMhaEd5D6ZH4n/jup1JDupAyApoglKGqfMEqBeuT6r31HYTa+DlPdX6CJ7oQzFcSwwPfLOcEIdlI+NLxd3SE348AiXhd764WQXR1LfqISQERomSXkxvw4Hgrh1UcCdOJkILizsKeve+x1MTkcSqBK9VGV5fyi4AmCL+IVbi8LNw3E+6dbOEzSwn37WY5owlCbeA6O8MdbFhRT/fZ9YZaVFJJyG/XWEKS0NiRluCWM2UWT5JLYKEeUgrTLsiBA3QnAR9zIsBMi5xJg7Jl/MiXeYc6Qr3fA+Nz+e3QbS1B80K3y3Pr9NERnkYGXcuEz9jeFXkkViRGBuwhZNQGUZeqGAKaICoGZWkVuScMcs/R654EBk+RmTwxFrurIMSBJgi18XEJoe6V9Ca9wAXoYgb+Pc9pjfkemFgiS8ojiZ+1BYZxenxorUgQlfe4xIcSvhY38GTTl2X5yEX47EBL8CIwfdEtJ7TXRbIf+bLCupwCxDXJo4zAikR07QbV5Ffk4/ShNrK6VCkIaIIoBbUKv1OhlKNbQLwgPhwRH2HBowlCfdBk6pLM23lpM60wHGx+AmTFOCowcfUaD4naSDSRSR1K4yDzudR48sysesUjPtQYWAQSNB4T71upPBntICsiqj0J1XYQNoPx/iwSTJCqv8TvfuXL1umRrlXkaM2lzwVxC0wcBcIhhSNDz8MwT4sPRX6nPmq6ZDUQ0ARRDVRLqDOTjP4OgEL+X+enYQTOjA+t/a3bu5og/KGaWSBFIENnXmqVijaRwUtjQ5GH3Qp3dp6+v2k0PSBT/Yh3GMjL+OYzPpSjWX6aTGOZGeDuPHNoj3DvfuWzN9gxd/E/w+Q7AX6fClj5ZQQ50Blec9l/nfqNUhHQBFEqclV4zzKzNOgiEEQWsmw+Aq+WthPwn81B/oGXjldUoAnC/4C19yw+hJKpWzKkbd+Nu1WWJKJ1k0SXqQTtK0JCBfcX6fzXr18CgjBvteW/du3bq2D+D5i4Sai5Ck4FhOeQ5J54PPJXWQ1+5bPXYfmFNE1+gwDh1+M8XcmaSxJjZSrZdKVMZeZ/9PQblUBAE0QlUKxwHZkcvfMBFjmERTrSAzPqgowKAjGAIi1B875ixJAVTRNE6YN05NzFswJmKkzAIoBERjZ7lNTNO3Mp/5FA/anJQL/dOVGlRVF30DQvB+Mkp1rILRy7uF8INjd9isF9IPoQGPtl2hJhNv5CTMNgrBkfn/4be75qSVrQFBHOiQ0PiExw0qcU+ewVifuTlnFaAMYyBg61zWVRzMIOjJtTk8m7tbWSyoypbRlNELXFW7emEdAIaAQaBgFNEA0zVFpQjYBGQCNQWwQ0QdQWb92aRkAjoBFoGAQ0QTTMUGlBNQIaAY1AbRHQBFFbvHVrGgGNgEagYRDQBNEwQ6UF1QhoBDQCtUVAE0Rt8dataQQ0AhqBhkFAE0TDDJUWVCOgEdAI1BYBTRC1xVu3phHQCGgEGgYBTRANM1RaUI2ARkAjUFsENEHUFm/dmkZAI6ARaBgENEE0zFBpQTUCGgGNQG0R0ARRW7x1axoBjYBGoGEQ0ATRMEOlBdUIaAQ0ArVFQBNEbfHWrWkENAIagYZBQBNEwwyVFlQjoBHQCNQWAU0QtcVbt6YR0AhoBBoGAU0QDTNUWlCNgEZAI1BbBDRB1BZv3ZpGQCOgEWgYBDRBNMxQaUE1AhoBjUBtEdAEUVu8dWsaAY2ARqBhENAE0TBDpQXVCGgENAK1RUATRG3x1q1pBDQCGoGGQUATRMMMlRZUI6AR0AjUFoH/Bzc2K9R2TznNAAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-29"><g><path d="M 211 451 L 261 501 L 211 551 L 161 501 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 501px; margin-left: 162px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Failures?</div></div></div></foreignObject><image x="162" y="494.5" width="98" height="17" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAABECAYAAACbHqJdAAAAAXNSR0IArs4c6QAAEYRJREFUeF7tnX90HNV1x793dmSJkGJoSAyEH/lVTpJSmoONtSvt6ojfhNgxAXYlEwxOSUKaUtOkpTmltE0bk4SQk5xwCBRSBwwES7sYDHVLiF2ieH9o5dQpJYSmSSAtuBhMXWMIyJJ255a32hWj3ZndmZFmdq298+fO+/l5b+c789699xHkEgJCQAgIASFgQYCEihAQAkJACAgBKwIiEDIvhIAQEAJCwJKACIRMDCEgBISAEBCBkDkgBISAEBACzgnIF4RzVpJSCAgBIdBWBEQg2mq4pbNCQAgIAecERCCcs5KUQkAICIG2IiAC0VbDLZ0VAkJACDgnIALhnFVgKSOxxF1gXOFHhUz4Qj6d/JofZTspMxxL/DkxbpxJS9g4mk6urc4bi110bIH1NID3Vu4x8NF8JvmPTuqRNE0hQMujA7+jM1/BhPMAnAzgt8otKQJ4AUAOoFSnbjwyMpL6TVNaKZU6JiAC4RhVcAlFIAARiODm23zU1B2Lf5CYNhAQdljeOAPf4amOvxob+94rDvNIsoAJiEAEDNxJdSIQIhBO5kmLpKFINPFZADcBOMxDm55kjRL5HcP/4SGvZPGZgAiEz4C9FC8CIQLhZd40IQ9FYvE/A9NXAISq6i+C8GsAWTBNAPx2EPrAeFttO+kZaHTJ6I6hf2tCH6TKOgREIFpwetQIBOOxzg5etRDWbGUPogUnnMcmhaOD3QTjEQBHmYooArRBp6kvptMP7DEXHY/HQ88+jzM1ws0AvX92tTwy2YULd21PHfDYHMnmAwERCB+gzrXIhSwQTtnIHoRTUs1JF4nED4OGTSBaNUsciP9iNJ36OgC2a9np/fFj9AI2AdRvzsuMq/PZ5N83p0dSqxUBEYgWnBciELLE1ILTclaTenriEdbo+wCOqNxg4M6p8SOv2rXrjqlG7Q/3D74LBeNRmrZ0Kl+cm+zCBfIV0YhecPdFIIJj7bgmEQgRCMeTpUkJa5YKgb1k0Fm53PCTTpsUiSb+CMAtpvR7WKOzZMPaKUH/04lA+M/YdQ0iECIQridNgBn6+/v1icI77gMQN739P4IiLh4dTY07bUpPz8DprPEPABxZzjPJwEfymeR2p2VIOn8JiED4y9dT6a0gEN3dq5doemEFiD4CYCmA4wDopg69CPDTDGwuaqHUj3cMPeeks35sUteU6XJT303+6rQV573u7o8fQR1T1xHwCQDvADAO5p8z4V5jsrBx584H99nxUZu3z+0JLSMy1oJxDoCTTKxfJOAJEO5eFOItXg0Vlp4dX9w5QSvBuJyBUwEsKben5MBGwFPMPFycKmyp11aVp79/bdfE1OtXQ8MpYF4G0NHE2JDLJv/SyRyopJF9Jje0mpNWBKI53OvW2kyBUA5PGtM3AJxtYbpo1+4iM7YUQ9rnGgnFQhQI0rTdMPh+gN9jDYi3duqHx0dG7jpYdZ96+uJnsIFv11r1WJb0Koi/hAJucfqm3t8ff+tEkb4ExlUO/RQKxLitWOi43m8Htp6e+Ieg0TYGji73dr8BPmcsk9rVgn/LtmySCEQLDnuTBGKuDk+K5PMaa6uy2aF/tcO60ASCgGsYpYfvB236XCSiy3Lp4SHzfWUFRCFtPYOvcSHE5SJ4zNCLl4yNPLC73vSNROK/jRANl8Xe1Uxn4BfQ6EI/9wNq9yD4iUldP2vXyKb/ddVYSewbAREI39B6L7gZAtETS1zMjHss3jJfhXpYgB4HoJYjQmDjNFDJjr3Wc7bB8s5CEwgQ9pmcv9T6exagZ8pfE70AnjamQmePjW16sTIjli79dMeiww7cAvCna2YJYR8xfsIg5WQWYvAyAn63anlPZXuKjMIFudwD/2010+rUUVr6Amk/KY0ncSeAXjDeXStU/vkm9PRcdBJr+j/PElaiW0bTw+vqmch6/1dJTi8ERCC8UPM5T9ACcXrf4Am6YTwK4AOmru0nwrqJ148ctjJbLL8Bf4LBXzUFZFPZJwFeNZpJKRPImmvBCcRMD3mM9dBgfmTovyo/qeWd8SJOHEunnjKBsPE+5p8bjHUnHofHUqmUEuKZa/nyj71NW9TxdzT9pWL2WH60U+dLrPYlwtHBfoKhHsAVES8y8JUunW+0Sr+8L/5u3aDbGaU9kMpl+fUz1+lv82XzvMbFc7PZzT+ba/mSf/4IiEDMH8t5KylogbAwN3xFY21FNjukoqnWvXr64meyQQ+a7eGJ8WW7DcuFKBCl5RhdO88sDnbQuqPxpRpoW5X38XYUeWB0NPV/dWBTOBa/gphuNT/0ibA2l07eW50vHE18iwD1Nj59OXg7VxvZiw5iy2wHNtv9k0ZTw/J+d/9Fx2uF0P0AdZsSjBNhTS6d3OypUMnkGwERCN/Qei/Yh1hMT+tUiFWHPlAtLFmkFF5LAbTCzcOkktbK5JEZ9+azycutlgoWokAQ8Le5TPKLDkacIrGBm8F8tSlt3aWiqjIpHE2oL4nr3/y91rns1HPXHP7W1yceNH8NOA2VHonGzwfoIQCLVB0MPKej2J/JbH7GQf/qJgn3DXwABm+Z7RxXWuZq6H0917olvzcCIhDeuPmaK0iBKIVMCNEfMBAFcBoBiw3mS8ayqYzTTs7FTBTzcB6Em/qt+uQmv4WD2GsG8/lOeKllnJBB/wKU1vvVVbT7ArBjb1FGje9AyXJpih4C4cxKOcx0WT47/L1GY1p+w98GkNqbeJxBT3WQ8a10OvVSo7z17pc9r+8vm0tXkhbBfP3xx+Gm6mW1udQleeePgAjE/LGct5KCFIj5aPScHrCHvkD82tALfY0sihTncG/8o0T0wMw+AuFXKHD/6Gjqf5yOg9UXm8WSHoV7E3cT4bIZgQjAKsmuD5FYfDmY1EFPyj+kcqkN/WtHM0m1ZGYbt8kpF0nnDwERCH+4zqlUEQh3ntRuBGrevyBcOOX19CZuYMJ1lTYQsO03b+n82BM/uOc1NxMmHEtcR4wb3sxTu0/QExsYZGa1N2He1FaH9DxE4I2d+uEjFn4ZbprhKK2ltRLwKkBXjmaG1ReFiIMjks1JJALRHO51aw16k9oLgqX9q4/uKBQjBKwGoPYvKkdLqr+8bXjyBbgHkerU9146MjJSqMfRJjzFLwHth+7584cALDcJxFgIHR/OZO7bX/mtbPm0tc4JbwUGfkaMYQ7RlhOWGL+Y72UeZWrb+ZYD9zDzgKmP+4l4TS6d+if3/ZYcQRMQgQiauIP6WkUg1Ab25OQrSwxNP1UDncDEy8GlB9O76nrltpNA2CyRVQ+z1b6Ag6ngNImlEUJv7+Aygwy14azCpDS6lL/LVjLoromJxT90EpG1UYEWEV8nmfnyfDalnPfkOgQIiEC04CA1UyCUT0TIMK4hQL31He8JjwhEDbZmCIRqRGk8i8Y3iXChC49ttT/wXZ0KN1hZvjmdE9VLagC7DujntC5J5w8BEQh/uM6p1GYIhAo2F9Kn1jPhDy28duv1R715qqWNE2cSiUC0jEBUGmIS/jVVm8X1xvZ50nhNbkfqMbcT2tJ8mukzo9nh292WJembR0AEonnsbWsOWiBsnJdq2zcdVkJ5CucB+pHGlM1mh/aEY4lriXGjCIT9ZLI0PSV8IZ9Ofi3gKUjh/sGTaIrPg8ZXgKH2M2pDprzZqL3QtPPdnhcdjV56VBFTj5gc4orMtDKfHVZHlMp1iBAQgWjBgQpSINRGYsdhL99eDlNtpvEyCENs0MMd2tTjxxwT2mu3ienGimjBbVI73IOwcl4jxj/ksslPNXMKqvHv6tp3cpFCa4gxACrtL82+HHhhV2exCOUtkVqbOdAe6xaB8AjOz2xBCoTV0ZFvmOx/H0X+eIPQDzMImi0Qkd6Bq0D85lnGLkxPVSd6ehPfYcIn5/MLyGp+uKnHz/lVp2yKxBLngnG3eRlKhRLhqVCfOeBgo/aJQDQidGjcF4FowXEKVCCqbPMBPIsinzU6mvqVQzQ1TlmBm7lGEysJeNjU3h9pfHBlNvuw2h+pe1m92c9H+60qrREyD8d0qqhK5XAd6jS33SrKLrOxravjpc0VU1sV0oLYWAGmiDrsiYFcl753TSNT3Eqbw7H4WmK609QHeftvNJEW6H0RiBYc2KAEwsY23+5wG0tSpZPnOorqiMhT5vMN3M1pY+FagXDu3awepgar8BfHzmf7Lb8gegZOYa1U14xHMQPr85nkXzt1GItE4u9DqBSuY8YogIGb85mkOleidM3VY9sioKAIRAs+J4JokghEEJRd1hGcQNQG6nPp3WsRPK70qAvUUa67Nx7ViFR48cPLqOuGHDcNh2/tt/yCUHGvNGwC0SrT/b0gXjmaTu1sNE2UoE8WltzMYGVpVrnGGdoF+czQSOWHaPTi9xQQGiHghPJvrmI+1QgM4FhwG/VB7h9aBEQgWnC8ghII1fWadXFgP0P7cD4zNFYPjTpHefcL+DyYvmxhFmu7xOPHJnUkEn8ndBoB432VNiuhm+ji+K7tqQM2/aBwbzxBVFpKmW3FMw8CZ8euuzd+jkalaKkzdTo8vc26vcwPwcBq8xGk01+GSzYCfKlbIbIO+Q1H3uIt+FeSJs2RgAjEHAH6kT1QgbCM2UPPkGZ8KrcjpcJAzIqVo4ThuRcoDMbX64RxsA0v7odAqHX5cDSxwcISa0tB09ZVn5OtwlDoHR1/Y+vz4aNA2HwFqGm0H8zXwsB91edN1/FRsf366O0djBlkbDWf0wHgJWb63NTBxUkrT+np88ixEaBl5i8Ug3nVWDalzrBwfFmee+HQ4stxJZLQdwIiEL4jdl9BkALRIGaPOiYzB1A51LNxCkC/V3WCHKCiknJpXf2Icm9tlyR8EgjYHMSjmlMAeBeg/bvNEZ5FYtzKVIonNR2G20eBUMWrt/TOg5SqOr2tMlHGQXgcTD+d/sH4fYCWWnyljTPxn+bTqdvsvpAiscQ3wPgTi/uvAvxTQHty+h6/XX1MAlhSm5bumBxffLXb0BsiEO7/962YQwSiBUclSIFQ3XcZs8dMrACmb3IH3YqC8ajpIJiXyaBzc7nhH1fj9UsgStY90cRn39iHuKmB45e5SUUC1sMo3MmarjZ+3xuEQFREYtFBuuONs6UTHqago1DZJee8gnYrwMp72u2ljii9u0vndVZHlDYqTASiEaFD474IRAuOU9ACoRAsjw6crIE31lk2mvVgBbDdIP68Om/ZKqwC23gJ+ygQqn3U0xc/gw18G6D3Nxjal5j5j/PZVDIWu+iYAuvqeNXABEK1rbSPs4eUQChRe6eTqchAnomvrDrn2jZruY7PvPEFsh7AkU7qAKDOp7j2+GM56TXCqwiEQ9ItnkwEogUHqBkCUXlgqf0FzaBPMnEMwEkzSxulMBv8nwxsLmqhVPW6fvW51upBZkxOrdi588F9ZsQ+C0SpqrJ3+PlUsvah02aWTsp9IKLbFoV4S+XNuMak1uclpuopp9qrd+3vDkG7ksnoBUgFSaxsYqvloGcZtNUAfXdnZviXTk1izfWUQm93HjgDhNUWdYwDvJtYyxZhbCgcPGrM7ZJSdZ9EIFrwweKhSSIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCAgAuEBmmQRAkJACLQDARGIdhhl6aMQEAJCwAMBEQgP0CSLEBACQqAdCIhAtMMoSx+FgBAQAh4IiEB4gCZZhIAQEALtQEAEoh1GWfooBISAEPBAQATCAzTJIgSEgBBoBwIiEO0wytJHISAEhIAHAiIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCDw/9PUXuoYdJv9AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-31"><g><path d="M 211 551 L 211 576 L 51.5 576 L 51.13 594.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51.02 599.88 L 47.66 592.81 L 51.13 594.63 L 54.66 592.95 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-64"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 592px; margin-left: 139px;"><div data-drawio-colors="color: #393C56; background-color: rgb(255, 255, 255); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; background-color: rgb(255, 255, 255); white-space: nowrap;">no</div></div></div></foreignObject><image x="132.5" y="586" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABKNJREFUaEPtWH1MU1cU/wFmWAoraJawyFacgIIoLszMBHCZm07M5A+D4seyaMQvVDY+WoFCakVWSoFaUReNcTP6h44lOpdpNsVghpqp6BgwjNvYMqe2dmUw5pCvdrtNSlree7nvMdYH5N2/mt5zzzm/8zvn3POun9PpdGICLT8J0BhnU2JojBMEiSGJIR9HQEo5HwdcsDmJIcEh8/EBiSEfB1ywOYkhwSHz8QHBDFkf2/FJ7XnUX/kGVqsdDocDQUEyzIyZjjWr38aCV+dh0qSAEcEYGBjE7dutuFh3Fd82tcFi+R2Dg4MuXVOmKBA1Q4nUpQuRlJSIYHkQq40hQHb7H9i6XYsHD6xegstSX0NxURZ6e/tw9FgtTp3+YsgIm0alchrU+Zl4eV4cb1AkKHWXr8Nk/hidnX9SzwUGPoO05W9gc2YG5HKZlzwvQO+/tx67dftx7fodqjEiEBIiR0W5GglzZ1Hl7R2dKN17EDdufkeVHS5AgqfTZiMmOnJoiwpo8ZtJrpT67NwlQQbjZ0fDaNgFhSKE89xjmx2a4mq0fv+jIN2ewmFhChgNasTFRrn+pgIi9eBwOF21ImT5+/tDX5aHlORXWI+Retmz9wAu1V0TopZVdnpkBKqMBQgPf44OyK2BOJiclIhNmatAFJDV3HwPxuqjaG+/z2poZXoqcv5NV7ZVf+UGtDoz+vsHGNuhoc9i25Y1SEmZj1BFCPr6+tHW9hMOfngSLa0/sOpLW74IqrxMfoAImC2bMrBubRrIb8/1yGJDTm4Zfr3/iGFo0esLXDkeEODd9f568jfy8vVobrnHOEOCZdCrEBERztgjjclk/gjnPr/M2CPNwVRZxA/QnPgYVFUWcrbKw0dO4fiJMwwjiYnxqNCrIJNN9tq7easZ+epyBjvEqUpDARISuJtJV1c3VLsMrEyRjKDWEPEka9s6vLM2jTPXv/yqAbrSGt6AuAKwZHEySjRZDEaHKyZ1t3tPDaOuZ8dF8QNEWjCpH67VcLUR6oIKXoB6nvaisKiStU1rS3birSXJ1Cbx8KEV23fqQC55zzV1ahgdUHCwHOZqDWJjZ4wKoI6OLmTt0DJqjo8dtwPd3U+Qm/8Bo93LJgfSAZFL0mwqxqyZL40KIK6JhI8dtwM9PU+hLjSisbGF4RO1hvgYEpJyEw4QV7qM25QTvSmMdsqRpCdTde2nFxj575O2/X8A+rrhFgo1VYx75L9erBs3pPu+yxFaSOvOzillnQFHOvqQqbtmX4k4gAioM2cvosp0jHWKH8lwmrFqGbJ3vCseIDKgFpeYRvRhN7z4oqOU2FetAWHJ5/eQpzMWiw15qnL8/Mtv1HGHS+DFF55HuV6FSOU0l4iogIgD5KtVq9uPpqa7gkERZspKc70+NUQHRFC4H0kOHDoJm62DCow8CWxYvwLpK5aCPJh4rjEByO0QAdZ2tx3nL9S7aovrGWthynzGN5Zbh+B3OWr4RBaQAIlMANW8xBA1RCILSAyJTADVvMQQNUQiC0gMiUwA1fzEY4gKeZwJ+I0zf6nuSoCoIRJZQGJIZAKo5iWGqCESWUBiSGQCqOYnHEP/ADZNm1jj/+tzAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-34"><g><path d="M 261 501 L 356 501 L 356 87.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 356 82.12 L 359.5 89.12 L 356 87.37 L 352.5 89.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-36"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 512px; margin-left: 307px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="298" y="506" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-46"><g><rect x="1" y="601" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 651px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">GPIO driver suspends non-wake GPIOs</div></div></div></foreignObject><image x="2" y="630" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfQuUXFWV9rdvVT8CIwk/ComAgKM44uigkfS7bQ2vICGQUN0dIAFlBIRMGFBEAX9foIOOIgyIoEgCQtJdvEGRh9j0o7o6EEVEjP4KCAgkyIQQoV917/5zblV137517r3n1qO7uvvctViLdJ3ndx7f2fvsvQ9BfxoBjYBGQCOgEZAgQBoVjYBGQCOgEdAIyBDQBKHnhUZAI6AR0AhIEdAEoSeGRkAjoBHQCGiC0HNAI6AR0AhoBNQR0BKEOlY6pUZAI6ARmFUIaIKYVcOtO6sR0AhoBNQR0AShjpVOqRHQCGgEZhUCmiBm1XDrzmoENAIaAXUENEGoY+WbcuHCMyqqq9/4NwvmUhAOB+gAAPMBRDIZTQCvAPxXMB62CPe8awGeiMfj4u+hvpaW2D8Nj9LdIHwiVMaJibcysAOMTYaBB1LDo/dv2nTna6rlNTUtX5DiaA+Af87mYcKFyZ7Ob6uWIUsXi8UiL2w1DibTas/gKMrfx5F2JwMvG0wJi/iO0Wru3vxwfEchdU523tqm1i8Q4/Kxegnr+3s6TytFOyazrlK0X5c5tQhogigQ/0XNsYMMi75EQDuAt4Us7nUGrrNGRr8TZnMuEkG4m5oiottHiS54rHvjC0H9KDZB1NScvIdROXImmM4FsG9Q/Y7fU0x4kBH50kDPht8B4BB5pyTpZG7ak1nXlICpKy0pApog8oRXbGiR6OilTPgsgGiexWSz7WSii/efb/1ARaIoEUFk27KdiFcleuI/8+tTsQhCSAwvvkytAK4E8I4CcDSZcZdZwWse64q/UkA5Jc86mZv2ZNZVcuB0BZOOgCaIPCCvr287jA1sBPjdPtkzKiUaTqfhKpfKyZ3VBOFHSPH5/f3xQb9mlZggRNXbQLy0vye+yasdxSAI0Y+hFF1FwGqHKi4XF1s1N4ajIBE/Se0lMnhVojv+SB5DOylZJnPTnsy6JgU8XcmkIqAJIiTc9U2xTzLTzbtOvHvmZuUtYOP7Bujevr6NL7vVHeK0/Le/GQdaBj5F4DPkJ2a6fmRw7prNm68f9WqalCAYj1RV8LKurvg/VLrU0nL8vJGRqv0sg9cS8Cm3FMRA0hoZPdZL9VUoQSw8PDa3YphuJcYxkvYK1dsNhkXX7ruv9ZxbqhL9HzGNxRbzFwk4TEIuO5nptGRfxx0qWEx2msnctCezrsnGUddXegQ0QYTAuK4ptghM9wLYe2I23mIx1r7rnXhERUUk8tbVxeZQhNYy8BUAcxzlDTJze7Ivfk8pCcJZdl1zez0s605Xv4TKZk2yr/OHsnYUQhDiQr9yzo6rkSZJ5zcI5q9XVeBqRaKjuub2Q2FZNwH4V1dZgVJQiKEvalK9aRcVTl1YCRHQBKEI7qJFJ+xlVFbcR0CtK8ttMPnM/v74/yoWNSFZfVPrCmYIiWSMJIJO78WQINxtlbUDPlJJAQRBdU2xz4PpWxNP/vQMWWhPJDoeC4ujjUfK+AHAq1x5nyYrdUwiccdfw5ZZyvSaIEqJri67mAhoglBEM2dRAyDgoeFqjhVoZinbME0iOiXR07FR1rxSEISQaBDB7QAtcdT5Mhu0ONnd8Qd3O/IliLrm9g/Dsn7hlFYY+BMMOl5Wj+LwwEsqIdC1ldGta7u6ulKqZZU6nSaIUiOsyy8WApogFJAUpqwRi34J4KBscntTixpHJbs2PqdQhG+S+voT9raMigcJ+DdHwnhVdNtJso2tFAQh6q1raDsTxBNUSgwcl+ztFGq1CV8+BNHS0hIdTu2zHuCTHIUFqtRU8fXA8Q2y+OhEIt6vWk6p02mCKDXCuvxiIaAJQgHJusbWcwBc7Ujqq59XKDIniaSO58lKNcvUI6UiiNrG1qUETLj7YKZTkn0dtxSDIOrr2/6VDRZEO3aHQ4wfV1Zs+2yxTvi1DW3LiXgDgMqxNhNd3d/TsbaUPhKHNbfvH2WzHRZWguhfMirDFAO/32XtdhOPVv54YOCWN0SbwhCEO22WsIWZNVWMXpQxMBB4ivubLUz4qTWSWp81Lgiqq74+VscGCYlujwxeb1rMRw/0xXvDzOPGxhXvTiHSRcD+mXwjDHwy2dv5sF85GfXgEsD6FEAfcThFConvr8TUYxn84/3nc1L1fi/n8DKuKn2zvrm1iS18E0BNxjBjK4BHmHBNmDrCYDOd02qCCBg9YW1TOYSfA1TvSPqUNRo5fGBgg5hcRflqm9veTxZ37vIe7mXG7dXR3Xq7utYNyQqfVILw8I7OR4Kob2z9auZSPtutop/uPUyAnzUNXrypO/5sUQbLUUgIf5hXAfp0f2/Hz2qbWi9Q9aSWEQQZxouw+DZvM2u+ryq6e0zMnyCCkM1vYnwz0dd5cRisahvaTibin47n4cRINY7xUr/aKs0o1oDpy2oOpryFDJyT6I7/KojoZQRhjo62RqoqvgjGeR4m1UqEFgaTmZBWE0TAKNY0xBoNsk9Yu2eT5rOAijlZJpUgiqRikhOt/yaSL2YSVZnvnU6+9YyReq4FlVeRQp32KYPoQAb+a1zC8Q614d7gCTh3V6iRMwEc4lHJhL4GEYQoI1d65YEIKpb09t66XQWblpbTqodTb8YBOnZsjQBfS/R2flWWv7al/UBKmRsBEqf4MJ/wFbpi5K15F/mZgeceXriLYfQR+Ive/jalmYthOleOaTVBBIyK5HI6LxG8mINfMoJwxwgCPPsaVoLIbKZCvbQgiwUDVyV7O0VojaJ+k1FXff3yA9iI/lyyUZsAbwGMpOgUgz9KwAccfibbwUiA8Ml8CAKE18DYK5NXOFT2AfRMRppoAPAXp3SrQhAS1V+o07QE7+0W+IiB3vhm98D64sZ4gQlJAr3B4D0IaASwX+7k8PcVkszNnQCEo2pa7ch4DgYetf/fwsdAOBDAmv7ezmuKOhFnQGGaIPwHkWobWm8iwimOxfxnpLilvz/+t6ka/1IQhPSET/Dsa2iCaGhbQsTisjsbvBBe9xuF4trYeNKeJkbvn3BCDelI6NcGYTFVtduOm5m5zZlOWLWlDD7TrcrKmEh/ndIn/7H+50UQY5l4gKORdqeRhJgXgybeNdATfzqbTIUgZBJAGPLOldj4fphY4Y4IIOZY1RDFGTjCgZsdIsWK8AUSFSDVNK38oMGpGwD66IQ84C8ke+Pfk42TbG5mCZWAL+67gK9x3GdQY+OKg0zTeD1fU/VC52s559cE4TM6DQ3Hvc2iarGpfWx8XYbzWC7F4JeCIGobWs8isi/iHRsY3VoV3Xqq7AI5NEE0tZ5HDOeCLqUklkvswF+ilGrq6blDeLgX9NU0xI4wiO52+q6Iy3a2eK1PmBQP/w/bXtozmqvMvDqMBZ0KQQgw6pva2pntO4TM+POTI9Ho4s1dG/7uB5aHebT0NC7piwnmS/Z7J77jdwHt4efyPExe3N8f/7O7fZ4Ewfyl/r64iKJb9gEdC5qgRcysCcIHTOlEK2FoZtVxLSZB2MHyXsJpIPoft0e3xbxsoC/+kOopzS/cd31D62VMuMhRlqcaQhUHv3S1ja1XEiAsl7Kfp09HmPpkprpBjo3Z8oXkUTHn9esylkfj1YYkCPLR77v7okoQElNuJTWTWz3FwAtRmC29vbc/42xLTc3KfYwKU1g0OTzeg8PKZMuQOap64eBBECUzVAgzf6ZbWk0QPiMmMd3zPe1N1uAXShBCpfBWavBdUeBwhvUfgG2WOeFj4MbRwXlnel0GhpUg6ppa14FxqqOSop3oZbhLTqtFISTJnAh1AS4z9Q0pQYSSvFQJQsgxtY2tNzjJS0XNlHvBLZc6cyUUeEoAXutIVcrxIAhPv6LJWrfTsR5NED6jVtMYW2iAxAl6PDBfSAmioMirHnUVVKbaLA0MUTFbCaK2IXYcEYkggLYqxuvE7AVzWgLZ+1YAsbE04SSIZ61oqnmg644XVYYyBEGgrjF2NGCrzrI+JL7m3JJ56EWWMvK5MdnbeXoYdY+EnKVk6TE3L072dAr/B/2FQEAThA9YMmsYv9OerKiCNvMpIAgGfksmnyjT7Tr7N1sJIldVNu5zoLruat33MWEIIuRlexiCWNiy8u2VqdQvAfpQpi8jAC/r740LM++cL+cA5WHUIDMaYML5yZ7OK1QxE+nkd4J0Vn9fx3WBc9PDXDtM/bMxrSYIn1Evxh3ENCKI18G4DBZfE/QehYAsLEHUN7T+iAn/7oA71Ek47OJ0b+QE/B0WH5FIxJ8IW5Yjfc7lt7icTvR1fiZMmbVui64wBAGEUpWEIQjRhxwC9PFCzy1b7rEuVdUCDwAUKogigysIOArAO7N4y3ySJHMzlFouzFjO9LSaIHxGWHKisgP0/WO3qhOefPDmN1UmR5kShAhj8BLAL4r3sQ1E7h0a2uO3fs5H7r6GJYhS3Ql4jEFJrJhkY5nPO9ySk7e6FVNIFWdogsgNvSFVM0nMoj0vtaWqWpXFo5JGgodkbhbl/kmlOTMtjSYInxGV2YcDauZ/YSeKlEjCqJhCqh7Ctq9ggsiN82Qy09JkX8f9hbbFnV+qigAeNXhoaV/fPcJpKq9vNhCEpI9SNVNuDCdv72tNEHlNt7LIpAkiYBgmyzxzxhNEOtbUBE/qUoUskd0d5aMKck+N2UAQos85lkkSNZM7rpaf6a0miLLY6/NqhCaIANhqG1sP3/Ws5c8mRActgVv+TCcIuaqtNPFvcgPHoShe27OFIHJMcV2Xz5JLZ9+gixKCKKnKR6uY8uICaSZNEAFYyh18ir+xzXSCEDDLorkabBzb17exp1hT2sOzt2hOUjn+HCHvBEQ/cwJAhrmkDllf2DsI0T6JKa7JzMuzz+DmBrCUh9bIjml9fexQGPQQA2/P/K1k6kVRviaIYq0m+1E0/QUhINnYRph5dbIv3hGUV/X3WUEQsvcgiDqG35q7KswFuR+mXmEwivXmRG1T60UkrL2yXx53PzkSTpkRhE3mrtAbwnEy67cg8VL3DXQnDllUYXYTcPAYbB5h5FXXi186TRDFQDFdhiYIBSwbGlZ8wKLIg07zOgCBzmQKRY8lmQ0EIeZbXVPbVWBe48CmaGTr8W54Ud+cyDFRBcKG8MhxGgvlST0JEoQYm5zQGxk100hVdNjlKxHYfw9jD2lAP1/yTxPNAwTsScAfLeAPhsU3uk2XNUGE2Xn802qCUMOS6hpiF4LoW87kRXqT2i5ylhAEPMh2G4iX9vfEN6kNR24qzzepi/xiXV1dbF9EqQuM9+RzGpY9X1uOBCEhc9uMlZmHXO+jKPllSF5MfCOserG+qfUUZqxzBJSUmtZqgsh3FeXm0wShiKVHqGLbL2I0yqsf64q/olhUTjLP17VmkJmrs9O1jbHzCfRtV+jrbQycHPREpQxjj2ifImlRpbxM3TIpSDWukJAeRNjvSyb0owxVTKJ97tAbwupM/N0RdFE5DpXsYBDmgFXTsnw/IxUVJtGOYH/4tTkyemT2edUsppog8t2JNEEUhNyixraDDfC9Tl1qpsBXAXwRJm9Q8ULONiItNaANRF8DsG9O42YoQXid9sW7ysJckk2+ShXHmqbYIQZjveu9AAHlIBFWJXo6by9o0CWZ5VIQd6WiWOlzUKDahlgrEd3oipobLtz3JKmYRLfdjqJ2GBZmC0QftmHxeS9EAhvVNbV+D4z/nPgb3VwVtc7u6or/w2ucxAGKDLrK5Ynv+S68JojizXgtQYTEsq65/cM+7wG/vut1qnWGZf54aGivP8kuXltajp83nKqoZdCpBCzL2Swy7bFDSINO3dTb8Sd3EwuN5hqyy9LkYT2p3YWkPXHp+l2n/FZJBa+CcQUxde67r/Wc+62AtMQgXhsjsdkcLnmEx37aM9kX7wwTDC4ELh5vO8jfTRbtHUrR5zKSQ1T1ICDS5WOF5Cy/0PwSA43x4n3CcMiw9JACxBA9zoisSfZuFCrGCW81iAMAMYkos7XOMv2kD00QIWZyQFJNEHlgmXlT9yaAmgKybwUoE5KDxcYgYsjkbhATC9kJ4m8ghau9TtEzgSBEl+2TYcS4lMHi2dHcl9bSuJgAXgFoOH1q5bmOJzdl8L9EBq9KdMcfyWNolbP4SEGijBcZ6BVPZ4L4g2Ac6nUQSPcpxINBkyhBiKblekyPQeQbyM8LyLqm2CIwiUe49pYQpXhOtRugV0Fc5XgO1JWUnoFhHd/fHf+drB5NEMrTODChJohAiOQJ7Id2XiZx+r0SwDvyLMaZbSeYfmilopcODNzyhl95M4UgMn2k+ubYx9nCNbJ3KULgaj9daUaM8x7r3vhCiHx5J/W5+/ArcycB1zIgAvylw8iXMUF4xxLLP+RMgBQeMB6CHOjE/u6Nv/FKqAki7ykt4ezilTUrS7LVByZOJMaF+W1wvIUJl1dHcJufHtYJ7gwjCLtraQc3Wgng69L7GO/ZlWLG3RHwxX198T9O9iQMd1DgLWQZq03Dsia8M1LGBGGPTWPrOYD9HO3YF+ZVO9mY1NScvAdVjH6D0kQ5R2HcUsS41kxVXBJ0gNIEoYCmYhItQSgCpZLM9rqOpo5lIqEX/wgBCwC8zbGqXgPzK4CRJOIucyTy0MDAhq0qZc90gnDuPYsa294bIWsZGMcDdACA+Q4V1E6AnweoF+C7qqK7d3V1rRsKi2Gx0wvSHjHpeGb+LIjeN6YGI4gx38Sga0cH5/1C3EuVczRXGS6S2FZF8y2xrQOHaSkYqxk4xDXWWwH+Cxg/MUdTd7mtlbQEUexZnFueJojSY6xr0AhoBDQC0xIBTRDTcth0ozUCGgGNQOkR0ARReox1DRoBjYBGYFoioAliWg6bbrRGQCOgESg9ApogSo+xrkEjoBHQCExLBDRBTMth043WCGgENAKlR0ATROkx1jVoBDQCGoFpiYAmiGk5bLrRGgGNgEag9Ahogig9xroGjYBGQCMwLRHQBDEth003WiOgEdAIlB4BTRClx1jXoBHQCGgEpiUCmiCm5bDpRmsENAIagdIjoAmi9BjrGjQCGgGNwLREQBPEtBw23WiNgEZAI1B6BDRBlB5jXYNGQCOgEZiWCGiCmJbDphutEdAIaARKj4AmiNJjrGvQCGgENALTEgFNENNy2HSjNQIaAY1A6RHQBFF6jHUNGgGNgEZgWiKgCWJaDptutEZAI6ARKD0CmiBKj7GuQSOgEdAITEsENEFMy2HTjS5XBOqaWteBcWq2fUy4MNnT+e1yba9ul0bADwFNEHp+aASKiIAmiCKCqYuacgQ0QUz5EOgGzCQENEHMpNHUfdEEoeeARqCICGiCKCKYuqgpR0ATxJQPgW7ATEJAE8RMGk3dF00Qeg5oBIqIgCaIIoKpi5pyBDRBTPkQ6AbMJAQ0Qcyk0dR90QSh54BGoIgIaIIoIpi6qClHQBPElA+BbsBMQkATxEwaTd2XaUcQsVgs8uJWHALLOB3gIwEcCGBOZih3Avw8YDzMoA37LzAfj8fjZtAw1za1foEYl4+lYzxSVcHLurri/wjKK37PN//ChWdURKu310RgnM7ETQAOABC16yS8BsZzxHggRbR+U2/H/wPAQe1xt4WB45K9nfeKfDU1J+9BFSP/ToxTQPQvGdxMMF6AgV9Y4P8Z6In/QaUer3a0tMT+aThlLAGsTwH0EQD7ZNKmAPyVmHosg3+8/3xOqoyNjW9j61IC7snW6XQ+E/PhhVeolhjnAGgGMB9ABMAggOcYdIdp0HWPdW98IQg7j99pUWPbew3wpwm8DKD3ZMZoEIQnYPHVsHBnf39c1Id8CWLh4bG5VcO0FIzVDHzIgZuYv68Q8DQzd5ijqbs2bbrztTz7EpjNb/6I+Vox5/WjCfxZ19juBPAnBt9qjaTWF9K+RYtO2CtSET0ehE+D6H1g7OWePyasG1JDew5s3nz9aGCHADQ1LV+Q4mgPgH+207vW92HN7ftHLD6TwMsd+4mNO4BuJlwTZr6qtGm6pJlWBFHXFPsYmK4G8K9qAPMWMnBOojv+K79NL98NPtuGPPJTbUOslYi+C2Bflb4wkGTi0wd64k/7pfdY4PfVNbZ9EuCfAHiHX35RjwU6dVNvx59U2pVNU1cXm4Mo1oDpywDeFpxXbWz8CGJRY9vBBng9AbUB9ZlEdBunrLP7++P/G9y2dAp74zCtK4hwfIZ0PLLyFrKM1YlEx2NhCcImVJO+AcaZjoOOXxNTxLjWTFVcMjBwyxuqfVFN50UQ6bWHHwL2wcK3fWC6Apb1lSxpqtSd3qStrxGwauyQ5J/xbwAu2G8BdwYdNLwIQhQ/lKLLCDg7uM7xMVbpz0xJM10IIruh3qi4iJzjYxJw6fDgvMu8Thx5bPATxj9MflsCegkXgOhS/01HOsUGmfjsZE98vRfh5SxwFqdezCGiMNhtJ+JViZ74z1Qmem1L+4GUMjcCVKOS3pHGBOGKkbfmXeR3GpRJEAb498x0M4A9Vetk4LeGlVqWSNzx16A89c2xT7Bll//OoLSZ37fBME4AW2eohtqoq4v9H0SoA8DhinWMJWPgTzDo+GR3h5D4ivbJ5g8R7Q/gO2HWHhN+PlrFJ21+OL4jqHF5YJ0t0mTgpuoor/WT9mUEAcs8BxFjXcg5u5OZTkv2ddwR1KeZ8vu0IIi6ptgiMAk1yd4u4LcCSAD0qvg7gQ/i9GnSfYIdYebVyb64WIw5X5gNvtD89U2tK5ghNp6sWkwUaat5mJAkUPpUSPxBMA6VLMrtDGNJsnfjgEpbGLicgDMmbKSM52DgUTANA9a/AbRQcoLaBuKl/T3xTX6Tvb5++QFsRH8O4BBXugl9YvAeBDQC2C+3PLp+ZHDuGk8Cd6mYiHAPsz3OzvnwIgO9afz4HSA0O9QTY1US48eVFds+29XVJVRe0s9nvqUA3gwYvwVxFYAGMA7KEr3YtCmtGqzLFuwVi0moayrn7LgaYDE2zm8QzFtAxq/teSGpZzwxd41U43iVTVh1w8pZC8AGwJagsvNVkPqzAPrE/PFZc1CJQ+WxHtJrQr0eXzJyE4Q4KBAwDGCRA5edJKR00LP+mOMPKcM4qgCVpepQlEW6sieIlpbTqodTb94CkNAP2p+9EA2jvb974xPuk7S98Kq3rwbRFROJghMj1ThGtpgmiyBs/WplxYMAhG4+25su08CnN3XHxaKb8An1w1CKPkfAJRM3cLq1Krr1VNkmJ1ngjjLlah27XVUV34W4m0jr77PfU1Y0tWSg644XZbPV1psPUZyBIxy/m8y4y4rwBZI+UU3Tyg8anLoBoI9OyAP+QrI3/j1ZPW4JwpkmrXqLnDnQs+F3zrlgS2ovkwia933XgWEbWbQ4keh4SlaXwMKorLjPpbYSm9VPzeHRz7n164uaYwdFLBLtFptozue1SdY2trcQLEGsYxsvA9+qjvLlstOwqCdq0XVurInolERPx8Zi7SY+88dzXDPqxQsy6sX0HVr6e8oajRw+MLBBHORyvrrm9g/Dsn7hInqTCBsjSF3Q03PHy85MmTFt3XXYudKtKiXQtZXRrWtlayJHgpjYkleZ6bzRobmd7gNKTdPKDxls3iJRaa/p7+28pliYl3M5ZU8Qtc1t7yeLfwlgQQZI3wWeBbu2IdZGRDcBqMz8bYSBTyZ7Ox92D8hkEURtY+vhBAi1TbZNvgso006qa4hdCKJvOdr9Mhu0WKZe8F7g3JWKYuVjXXFx8Zbzeam+CPhaorfzq7I8krpMMF+y3zvxHT+9cOYi+wcAC31z9nseJi/u74//OWd8XBJE9nchDbDFa/103bITqt/Jtrah9SwiiHuuLFEG9slHGvA8Rdc2tl5JwNqxvhJd3d/TIf7taYggCLlyCHcB1DKOEd9XFd091tW1bqgYG43H/AnEQMi8dU2xz4PteTqGHTMvT/bFxwwMsm20ScXABhAJFWj2Exf9F/T3dv7AD4ealuX7GanIbS710KDFvGygL/6QGwcfgngaJi+TzblsGVIJOaQRSzHGZarKKH+CaGhbQsRCvZSedIqDY0/ACG4HaIljQ/lmoq/z4ikjiKbW84gxfkomrO/v6TwtaPBralbuY1SYgtjGLueZ6ZRkX4c43Uz4ZAvc1ldHjaOSXRuf86srY6VyHQGfcqSTkpisTYC/qshZt+yk7kVGMgkifZ8wemQicec2vz7J5gEzfprs61ydI32mN+CfA1Q/NmeIOobfmrsqyGJGvnlDShAfOnLV7v/01vCdTmnAaW3m25/G2NEA3Z09ZDDwQhRmS2/v7c8EzSOV36UEwXw3LKwMunSWzlPCxcmezm+6666vj9WxQUJ62CP7GwOXJns7/6+KFV1dXew9iJA4OL5rrGyPdnoQhCehuNta19gqLOTEoSH7PWtFU81ekrUKztMlTfkTRM7pkQciqFjS23vr9iCQa5taLyK2T2kvAvQEg+/Omnw6806aBOE2pwXiVdFtJ/npwzPtpLrG1nUAFgP8VwY9ARg3ye4hPE6AyiJxTWNsoQESp7Ds5a8pOwXWN7W1M/NPHadFTwnAa5xyy+AnR6LRxZu7Nvx9wvhIJAgV/Xa2jPqG1suYcNH4RiI3Y65piDUaZG9au2fSvkEWH51IxPuD5pr4vc61eYu/ydppS1CjdDcInxjbHD0I311v5vT8EEDiDuQJBj1dQdaVPT1x+x6u0E8mFXpJAe66WlpaosOpvW8FEBv7zeMQlCNBBaijZP2SbNzbLfARA73xzc70UoJQPGiKciTzQlpPodiXY/6yJ4j6+rbD2GCht5+XATDQKiks0JNGEA1ty4n4dkf7Aq2SCu4L8Kxp8GLZHYes7MydTxygY8c2L+CqZG/nuY70VNvYeoNT0mDgxmRv5+kqp79sOY2NK96dQqSLAGElI743LeajB/rivc62SSQIaTovrGob2k4mssks/XlsDrmbo/phRBTb2HjSniZG73eqPjyIjGobWm8isu98Mk0qjVVSofMnrISSQ8YSgpDhRAypdO/Xfsn8EYy2A3DAAAAgAElEQVR8Vn9fx3VBBBGmPkk9miDCTqxSpc+I7hPE/syS2gLCTzgSiSe7NgqzxUAnMs8NZJIc5TIXmkIsFpYv2c8E+DfCxtxKRe/zutBTxVdyAlSVUsaqyEhel2X/QMBD/9it6oQnH7z5TZ+N8PxkT6cwDFD+GhqOe5tF1UJ9+LHxzTt3gUsIIpSIn5NfQhCy0y/nEmNg3+obWn/EhH8f2/g9XpSTSGAiyyADd++yDFpfFd29q1j3CoGNdiSQzJ9HDR5a2td3j3CGC/xy8ksIor4+digMeoiBt2cK9Lwf9KtQdpiBpD6ZBMFMK1TNVSX5NUEEzoRJTFDf1HoKM4SKxWlhM96CtGnhgwxsGK3m7rBmf5MlQYiLvNrG1q9nrJK8EHwRhPuYcXt1dLfesJtEjh27hw7Yb/gkG/JfopRqylqVSE9uwAMABfoXOOtlcAUBRzl9DWQnuxw/COG1OxppViVTFYKQkRUTQpNereueyUsV5mEt5YQnxcDvidHBEbpr/32sPwU5hBVjSbrnj/twEFSHCkFIjDU8jS6C6lNRH0o2eJOZlib7Ou4PKl/8rglCBaUpTCMuT6vm7LiS0zbjcpIYb584kQvpYn2KIhtV7JUnkSAgJKKKYbqVGMcoQJq2u2f8SDXEQq6jk/pJKdset87VrWaQ3FModEUxieQEqLLBhyI8iQQhPWU6wpQott43LIi7jIaG9o9aZIkLZxVnPHGCv48sWjc8PPdXQZfmqu11p1PZ4H2xdkvjKuMJTDiAhGm7ytotdINXyR9gSuvVpbz7HQajQtKW/R1EtnN+NtABADxFRJfsO9+6z+sEpjLJQi2KgAuw8GEp7NpFbJgkw/hcsnejcF6TqtRyCCKPTU5CABNEak0Q3rPBL26ULJd6OI8JuYU56E+ilLrM7StQyGYg8k57gpCQjcoG74ebSn5NEIXOvCLlHw8YhguFgUFwDJX05urnkj/ZBJGFwnaEM3EiMT6fiXETJB2JrClhDjo8OO9y2SlSE0TuRFORQKZCgnC2NBOL6NxMLCJ3xACv1fMSGbwq0R1/pEjLawYQRK5hgcoGrwlCjsC0kSBkzReXVEOptxqJsAIMYXUjCeMwntPL23KqCMLZJ1v1NETNBKwE4UhZmAhHepM9PI+LQRASHfEEUThIwijWZpUtR2WD95Xw3GayZaJi8mgz1ba0H0CjfBQMPtUj3Iozq4gBdXR/98bfFAP3aS9BqI1tqEtmFYLREkQxZl+Jy1jYsvLtlSnr47tCfn86YxnjjHckapd6YZcDQbigoYaG9gUWzCUgfMYjVpLUgU1ySR3+ojXHHHeif4LECiXUpV/YaTAZBCH1TfCwQPIlI5dJbRh/DVm5QmKurn7tYJMiq4jRBrLD20/8FLywVTGfJIJwRxTI+5La7U8hu1RX2eALlSBU8Z1u6aa1BOEHtv32QXT0GqetuUgvs0ypa2g7E8Q/HCsvhBONyOM2bVT19ladLEL9ELXMmyaGWIB0U5ZIEG4fhsBqcyxDMDGcg/CYpQqzm4CDs4UVuhEWKgEUIb/MNyG0b4cbuyLjQnVNrUK6FCFkxtRQwlM+jFWXL1YKl8yF5peEz8nrgCHzSBfhVxJ9nZ9xtlETROCS90xQ1gSRCZMgQgssFvdnDOzNzCe5Ham8emdLFKb5czAOG0ujZlWhbPstm6QygkiHdjaEGqwZsEQk0kgE1idVQyTUNK94r2FFxLsWY+9HyDYfibOXZ5BCGW4qJ2mp/Tn4fphYERSOwVlnhmgeIGDPXaq1P1rAHwyLb0wk4iII49g3GRKEqMxtogrIPbu95psKdnY9Ir4YW8eCSUR9XchAojq6bZWCR71ddW1T7DRiO3x79gulMil0gy80fxk4yoXCq1CCyX97nvqcZU0QqgvOD0b3Ay4yR5pCHLEkpyGpp25dXWxfRKkLDPEimfikISwK3nxyw3mEChchiZEjzS8Jc/CGwcaxfX0bxctdSp/Ev0XqMDVZBCG5WxlhppWqDlXS+EISNVVtQ+w4IhJvCqSNEgh/Ropb+vvj4hGcwK+Ud0CToWKySc4drBDhw2hLnPpUQ21oggicZekEZU0QooE5Irt48Wxk9FiVZw09CCYneJgk1srILu/mZf29cRGXx++TO74peuqSYiA40QAJwUDmDVpIsDV5hE25ZNDQsOIDFkVECJQxG36h/x2u5piKo2I6plBUOCo5Xwf8tTkyeqR7bCeLIKSB/RTnm+2rs9uOm5m5zTlhZFKexNFQhLg+LdHTOR4OxGfW5RAMEMqzvFAJoBj5SxKsz0OKLVQCKDS/4l5clsnKnyAkUR9Vo4ZKQn5LY/jINl+Fzc77lTuPOwxJeAXxIMyX+nvi/x0QKiQn5LdXjByPYH2B8avskNW7vf5NMM5zOCP6naCFPvx7YPznxJlNN1dFrbP9XvgSGzEZdJUzJEVaosKaZF/n+F1QpuDJIghRXU1D7AiD7GipDgMH/z75vRIoD9Yngtrtsx7gkxzYKT3Q5BE1NnQ4Fa/daLIkCI9w3ykGX5jsjYuQLZ6hc7zCfTNzuyy0eKEbfKH5y3LnV2xU2ROEx8nM71EacdqeQxFay8BXJix077DFOcHnMvjdlTKMtW5vbBEmIVpR8RUmfFbqh+FBEB7hFew3hlOjo1+TSUX2ZXvF6LcI9pvF434ShO/393Se715IHgQhumNjZkaM89z9SV+CW1e5H70JknA8pADRpMcZkTUyh76aptghxCQC/U14R9qPkCeTILzedvB6E9znsSV7CnldUjc0tDdZZN3nDHcNwPPxGpu8mmKHGIz1rseWlMNWq+wJk0UQtlTs8WCQ1zwVRPzCS8YyIv6fXO9z71DzhW7wheZXwb1c05Q9QQjgZOqMDKDupzojYOsjIPth9RwTV78nNH28g8efmQQiDP4oAR9wEINJjB8w2X4Y6SB8PlZQ8hOqnUvU82cQDdhPgYqnJplrABJ3Fs5XukTap8lKHSN7W1lCEKMADAe5pGP8gB5PYyguzG283E56nnU4J7PP85xCgSliZHXbT8KK/lj4mNRME/QMDOv4/u64eBUu55tMghCVe7yUZ5Os4xlMcdp1z4WctvtYMXlIYHYROwH+HWBkXr3jdwhtK4B9ctFRf4NDZROaTIIQ7SnGk6NB0n6hG3yh+VVwL9c004IgBHgZ5y3xQI6ql6kTcxWPU/HmwtkhH2e3VTewUjeyERVRWv85iCDsgH1NsVOJSbya5SYxlXnyFBvU6vVYvYQgNhDodcU4Vpn6ecCKmieqPoiSPgnybQC/W6UDE9MIcqAT/Ry9JpsgRPsOa4nNj6bEe8zO19sCe/cSMdY5357wM3P1eFkvsJJMAt/oAKqFuNNNNkHYazv9KNh69zOiCn0Q4Wc6R6r5s373XoVu8IXmV+hH2SaZNgRhT6SW9gMpZd4IUJNC0D6RJUVEt48SXaAStE9s3vXNsY+zhWsyp2q/gXuVmf8j2RfvbGpaPj/FUWG9o0IQdpmZdy5+Innv1qtOEQr6Rzxa8eWBgVve8EokW+D7zefTX3jFOJuYRQjvt/l0aieIv4EUrg5jrirKy6jCvkEQjn1KxGer1sxUxSV+/ckcDpYSMP5sZUg/lXwJJlzMLO7haGQ1UtYHnW0N8oPIxBg7a5eUeKnjzZOgDUNYO12w3wLuLHaE16kgCJuQm9v3r2D+DjOvUAufw1uI8PlET1y86+0b6r/QDb7Q/EGDWc6/TyuCyAKZjltjxghYAZDYlJ2i99Zdbx3/hUAd5mikQzUktHOQxuM98WcB+shY+bbKhP9IRNdWRviu7EVszgRS38BoUWPbe6PMpzLhKFv9wtgre0IE8AoBT4Nw03AV36tiHeS3wEU7R7niHCJuA9vqMKFWSqszGD+xUpXxoM06aDLb6plhWgrGagYOATDfQeb22Ii6VKPTTiVBZPuavjuKnkogcaksnAOzJLt1l1/LI0y4Zv/5nBSbddhgfdk67Lu2qh0fB2Elk9UAkAgbk5UwBwF+kdjoM2HdkBrac2A6R3P1m0MT1jbR+xzrQQQofG7XGe5BBm3Yf4H5uCo5FrrBF5o/aM2U8+/TkiDKGdCpbluhJ8Cpbr+uXyOgESgfBDRBlM9YFKUlmiCKAqMuRCOgEZgOjnJ6lMIhoAkiHF46tUZAI+CNgJYgZtjs0AQxwwZUd0cjMIUIaIKYQvBLUbUmiFKgqsvUCMxOBDRBzLBx1wQxwwZUd0cjMIUIaIKYQvBLUbUmiFKgqsvUCMxOBDRBzLBx1wQxwwZUd0cjMIUIaIKYQvBLUbUmiFKgqsvUCMxOBDRBzLBx1wQxwwZUd0cjMIUIaIKYQvB11RoBjYBGoJwR0ARRzqOj26YR0AhoBKYQAU0QUwi+rlojoBHQCJQzApogynl0dNs0AhoBjcAUIqAJYgrB11VrBDQCGoFyRkATRDmPjm6bRkAjoBGYQgQ0QUwh+LpqjYBGQCNQzghogijn0dFt0whoBDQCU4iAJogpBF9XrRHQCGgEyhkBTRDlPDq6bRoBjYBGYAoR0AQxheDrqjUCGgGNQDkjoAminEdHt00joBHQCEwhApogphB8XbVGQCOgEShnBDRBlPPo6LZpBDQCGoEpREATxBSCr6vWCGgENALljIAmiHIenRK1rbaxdSkB94wVz3ikqoKXdXXF/1GiKqdtsTU1K/eJVJpHWBaOAmERAQsAvM3RoZ0MvAzGJsPAA6nh0fs3bbrztXw6nDMu4QsZBPAKwKI9D1uEe961AE/E43FTtaic90SAv0Qp1dTTc8fLqmXI0gkcjWjqWBB9EsBCAO8EEM2kTQF4iYA/MtPtXEEPJLs2/hUAF1Knzls4ApogCsdw2pWgCcJ/yBYuPKOionr7ciJ8GaB/ARAJMcgmA48ZBl+87z54NNTm7CbuEJX6JP0bgAv2W8CdKm0pJkHEYrHI317B0RbTJQQcFg5H3sKMr44O7XnH5s3XjxYHCl1KWAQ0QYRFbAak1wThOYhU3xz7OFu4JkMMBY4293A0sjrZtfE5lYKKIEH4VdM5Us1nbH44vsMvUbEIoqYpdggx3UBArUrfvdPwFhDO6u+JP1pYOTp3PghogsgHtWmeRxNE7gDW1cXmUMS4lMHnBpx0dwL06ngJ/A6Xysld+Etk8YmJRLw/aNqUmCAA0PUjg3PX+J3Ii0AQVNfYtgrgq31xIbwGpgxZ8e4A9vHBJ0XA14YH512upYmgWVTc3zVBFBfPaVGaJoiJw7Tw8NjcyiG6HkCrZAAHQegEGddXGdW/7upaN+RO09IS+6cR01jMbP0HQC0SgtkG4qX9PfFNvqd3iYqJgeOSvZ33qkwsoRqrrNy+N0WNJmbrKxIpyCTCaYmezp96lVcgQVBtY+w8Al3uuF/IVpViwoMRy/iviorqx9w4irbPmbPjEJP4PLA9DnNcbTQJdP3w4NxzNUmozIbipNEEURwcp1UpmiDGh8veVOfsuBrgM1yDmCLGtanR0a+FuXQWqhWDsR6gj7rKe6Aqyif6GQLIJIgwBOGsz5aIDLqKCf/u/DsDvzWs0SMTiTu3ySZtIQRR39S6ghk3Szb3u0yDz9/UHX9WZaEsWnTCXpGqiu+CcYqLbE0Qf6m/J/7f+gJbBcnC02iCKBzDaVeCJojxIatvaj2FGetcG9F2Zj4t2RcXJ/fQljRCIqkaojgDRzgmR/DpvUAJwj0RvdrBzMuTffFxKzZHxnwJoq4ptghMAq+9HcUNEvDFfRfwNSoX5K72U21T7FRi+oGLcAaJsCrR03n7tFt407DBmiCm4aAV2mRNEGkED2tu3z9qWQ8AeP+ETa0IG1B9/fID2Ij+HMAh42VzYqQax3hdFBdTgsjWWdcYOxqguwFUZv/GwFXJ3k5x15Lz5UMQQsU2nKLbABzlxJGJz072xNfnQ7LZcjykkqesaGrJQNcdLxa6FnR+fwQ0QczCGaIJIj3o9Y2tX2XgKxOmAPOX+vviQoceWnJwT6XahtaziCAua7Nmsm9azEcP9MV7pZtzkSUIUcfClpVvr0ylfgnQh8bq9PF7yYcg6pva2plZ3GuMmQMT6NrK6Na1XV1dwsehkI/qmlq/B8Z/OgsRl9aJ3s6vFlKwzhuMwKwkiNqGtpOJ7Amd+fjJkWh08eauDX8Pgqy2sfVKAtaOpSP8GSlu6e+PC3tz38+dlwkXJns6v+2VSejHq6t3LGRCjMEfA+FAMPZypB8E+EViow+MDcPDc3+lcoGXD0GItlTNef1iBi5xqWMehslt/f3x/w3oPi1qbHuvAf40AccBOHBMdWBbtPAfCdSRGhm9JYzOPwhzr99tx60K82EA/zo2CwL082HrktXht7GVQoKwT/ejdDcIn3DM94EIKpb09t66PYfUmlq/QAxBkNnP11FO3HUggtsBWuLI84eUYRz1WPfGF8JiJkvvIek9ZY1GDh8Y2LA1qA7b2bHCbGNwG4je51hDtoMegAGA4lVR637tLDoRzdlJEM1t7yeLf4m0V6z4fE92Wcjki00tb2PjSXuaGL0foJpMeW+QxUfLzB/tRWfgdBD9XwDCjFL1E+aX5wY5ReVBEFTXFPs8mL6VDzmEtIm3L4fNVMUlAwO3vKHa8bDpZKdeAGv6ezuvCVuWX/r6xrbvMqwGsHFjkIfw5BGEt3d0WAmivj5Wxwb9AsAeWRxKcbqXtMskolMSPR0bvfA/rCU2v8I0vs/MKyRWVbJsO0H8DaRwdX9/XHilz/pvVhKEbKMPOs2LmVKbSyz2BGLC+cmeziv8ZlNNY2yhAXoIwJ7pdCw9xdW0LN/PSEVucxBJ2EkaaA4YkiA8yIG7UlGsfKwr/opXA21P2pfpHAb+S2LZEtAvegYGndjfvfE3YQEISt/S0hIdTu19K4BYNi0DL0RhtvT23v5MUP5S/T5pBFFEFZNETed58CkEN/nao1uroltPlamx6upi70HEvntx3AGptiB4bquWNN3TzUqCEIMmmdjxqui2k/x0prUNbcuJWGY9EZzXJbrLLgo9rE5Ec0UsHbERbxp30rIdtOo9HIxGmGllsq/jDtkEDUEQHpYkSgvIS+oQTdoJ8O8A4ym7fcQfBONQCYko+Q+EXYR1dbF9EaUuMN7jyBs4hmHrCZu+FAQRtq9hJIiWltOqh1NvxgE6dryv6uraMPiEqctnHU2Ydwzeg4BGAPu528LAjaOD885UUdmG6cd0SztrCaKmIdZokC0aCy9OQOEuIef+YWy0/ReFZHKP7IrX88lkb6fQgY99ksUpfvO1IW9oiL3PJLqWgI9PnHx8P0yskInKqgQhtyDhAStqnhhkQeJhffIqM503OjS3073w0lId1mTUak4nqaJbrNTXtx3GBj8IYN6YBBFwHzQZC7sUBCFTpflJy2EIQuj2qcLsJuDgySDa+obWy5hwkaOu7Rb4iIHe+Gbn+NQ1xIR69jqHOlSYxp47/Na8dZINn+qa2w+FZd3kvI8CUBJJaDLmUTHrmLUEIbHukG7aWbAldwjOcfC9w2hsXPHuFCJdBOyfyZRzwSa90CTqGH5r7qqgU0z6opCEOaGSykSFIAohB49Lxadh8rL+/vif/SZwbWPr4QTc4rSnZ+DSZG+nuI8p2LJI1C2TBPN1SCvmYiw2QdTXn7C3ZVQ8SMC/Odrpu/GFIYj6+tihMOghBt6eLZ8Y30z0dV5cTFyyZamMm1zSCL5bqm1pPxAp6wEn2fmZA5eif+VY5qwlCCEz1Da2imBin1KZ3DmLgdENQvO4EEFn9fd1iFNLzlfbEDuOiIS6xzYDFOJrsrfzdOeGl9kYf+awV99GFi1OJDrSapiAL0ciAl4ni45MJDoec2cNIgjZJi3uTFQkB1FXXWPrOYBt3pn9QqmKJM5rz5oGL1b1xA3CSrIJSk+iQeUU+/diEkSapM2bMqE/xpvKfDcsrPS6hA1DEJI5J+7jfC3zCsFMskbEBeCEdSeRapTHNnfeyu8JC+nDdMs7mwlCnCTd5q73VUV3j8ni7bjSbgfoPwEWl6+2JRQzfprs61wtOeVSXVPbVWBek5kcUusLQUCWQZ8i8EcBOoCIeisjW09RtSNvalq+IMXRHgD/nJ2EXqdiP4KQecSmwzOkliUSd4gY/b5fOq4Rfg6QuB+xv7ASgKwM90YQ1A6/31VVFYXUkU/eAgmCFras3KsiZR5KhFM84hltZxhLkr0bB7zaF4YgCmxvaIhyDT1sA5EJhCRZB0oWiqIxGdXjnQy8CeDXBvAEm3zVbLZomtUE4Vb9+FiyuKQNftKEEYtA6C2zZqvyewi3KqtU1jLFIIhhE4e4wyWEIQexyCSLWPkE59wxJJt40S6R65pa14FxqqO+0G0sIPKqZ10FlKmy2SrFMZruBNHQcNzbLKoWIT8+5hCbAi3uVACcjWlmNUFI9JWmLE6N+/5BqIj2X8CfeeElWmef1tKfdOHniuHsKaUUMgELJQgY/CVmutl14fiXCMwjw5h+uqUyBv7Eo5FmFYcmZ/9z9c3Fs46RGBvMdIJIgfii/ebje0ExkaY7QdiSgMxDHngdhI2GReuHhuZuDrrXK2QtzqS8s5ogxEC6F4TsYirnVJzRe9Y2tZ5HjO+NTQiXPlRWfjFVJWlP69cOthA5DsQnu18/U1YxAc8DMCTmfoEB5tyLQXLyf4mBBwgU8lUwPsAZ26eYklcx7iAKOO1PpgQhzKOTFvEZAz3xp1U2rlAE0dC2hIjFaX0sxAYzrfAyr1ap3y+Nx51Hjg9SxgdCOMK+y6M8EcL9CVi0Psh5sdA2T/f8s54gciadxImorqHtTBD/0C0puPO6L58lEkrok6q4TBe65Uoz9W62cJBB9BEAH2ZAxNbxe2RF6P6lbwmE2dyEBICocZTqq2gS9U2x1kg+2Enrdo2nSGMy09JkX8f9qo0Ng6GrzNIRBOE1ZhG2JP0+tjkSeSi05BYi1IbKnYAqnirpVKyYsuVk0ooovc73w72q2QbCHczGuv0XmI8HSVkqbZ0paWY9QUjMXV9mgxYnuzv+kBnknPuHbNymXIuJiVYPOeatPh6szgllv4k85/WjifF5kP2Wr/vxFKX5lydBiAdxxGPy2QflbasrVaeh6UAQKtYwSgArJJIQSSiCmGzz2zAShEytSYwfJ/o6P6MATegktU2tFxHjMkdGT0s9kSZkiJdssa8T8F1ztOKqUoZ6Cd35Kcow6wnCVgO5AvAx0ynJvg5hi58TDdMpJbhDNhDwd1h8RCIRf8Iu12UlpWAjnu+byCYYL4BsiWKMTMITBD0jAgMSeJUreuYgM7d7vSHgnLvTgSAkfilS0+NirMmZTBAe3s2egQALwVNWl+L9FtU0xd5PTJ8n4ERFiUI09Sk2qNVxUCyk+dM2ryYIeyP39lPI8bp13TO47yEc5OL2swgyt6O6xtazd3l2fydAYshGoNwMYvHO8aNVkd2fNM039szbzNWevuOxj2SObkEvkWVXQA5BENb393SeVk4rxMOZSjk6aJi+zGSCsA9BuSqpknggy0gdQCjLNhEb7IWtxsFk8vFMaCPgAwFB/IruxR9m7pRDWk0QAHIn37iqyOv+ITt47nuIrIidq7ryt8KpbWyvIVhCB54J5mfXYAL8G4JxIyx0V1Zaz3mFIy7Iigl4nolOSPZ0/DrbL+lLa4Tv9/d0nu/n0Vzf0Poj5zOXBDz0j92qTnjywZuFbXnZfBJnvsDooPk0fqYThCyaa1i/FxVcSzFe9kHBGvoI2BIS83LXa3jZZhU9wq9Kf8sljSYIAF6XyXOir/52YtTP3E3e6x5ilEc/MDHWE13d39Mh3pHICReRVlXtsx7gkxwTYydAp/f3doiXugJDTBREELKL+XTI8Q0gWuZo0xsGG8f29W0UDnnSL8eySyHG1VQshkXNsYMiFglLl4PG6+eukWoc7/XiWz7tnOkE4fEexPMweXFQWBVVPD1ChhT1zQkhXbz4Ek4D0f84JfhyPeCoYldoOk0QGQRzTihMZ41UGLc7X+OShciQhI62L7kNi9scr5VJ/Suyg+ehEw8Vf0gagE7Visnj8ryuuf3DsCwR0NDxzrD/Jiq5AB7ZJQUt6++Ni3KUP3s8CF8D8ysgehyMx2DyT4ro1Sp7qUzJmUy5E+n7raUEON9/njGX1FkcPJ41VTZsCMBTqGq/TumHqsY+rzcnbEmczWOIqI6BQzJr9suKYybq+hYBFzrS+z6YpFjutE2mCSIzdBKTvbjBuNYiCDvvdMRXiZ+D+HPuPQROI2C14xUv31hCEvtu38CBstkmEcHVzVy9rauoriF2Ich+KMhx0PZ+lrOQoIPZChYtOmEvo7LiPgJqHZUW3cFQFqANgIj8uSrR0ykL6x56oc8KgpBLm0UhW4+owJ7SQ86diKLlYHZgJeOlCSL0rJ+BGXKitRL+zEwdBM5GpvQ8+Uk2+EeRDoGcfbHO9zJNcuoOZZfv5RikbMXks4jkmzW2wTCO9njMR3YyD7Xp1jbGzieQeIo164BlgvnM/r74DcWeeh4bkFDvrenv7bhZRb3n1SYPM8sZJ0GI/sulTZgEXDo8OO+yPDyXPd4i8SdwyVoMVIs6x09y6f6owUNL+/ru2VnsuTcdytMShGOUXOau4lJV/JdRr3hfMnvExXccfr0jvYpEsrDJqr4Hghw4Qre5QjqnBR4Pr9agaK7uiVvTEDvCIPt1Lqc/xgNVUT5Rdmne0LDiAxZFxHsL73SUtY2Bk91vYLjrqmtur4dl3ekK9/1bwxo9MpG4c1uxF5VQEY6k9v7irkuer7qeUxVeyPcazF/s64v/MUS9abNKi75OBHF/M+ZPkiljRhKEPY+bWlcwQ5Cq22/nrpRhrFV9o7qm5uQ9qGJUqHrOdI2JsLa7fmRw7hovwpEGegSeJit1TFCwSZlEqWCaHmJqTL+kmiCcBOEyd52gVZGE6M7+LnvC0pHX7XiXM0s8JrXJwHU8WvElmcOOOJzNChgAAAVOSURBVNlHqqLngOnzXrbdXqGXwxJExnHvOmdo9LT3MdYk+zqzHuYT+iWRAsTvgwxczqMVV7j7ZF92RrEGTEJf7PR+DSV95LMERf8qd3v9m2Ccl7shQYzDYwT63kjU+NXmrg1/d9ch8kd3f+1Ag43jwfh0JuSJrCk7wfj6yNC8K2Ub3GRHR5U1MIyjnCS/n6m2MM++FYZxXZVR/Wt3xORs2BiTImdk5lmOB7SwEGSL1wbdQ0kt8MBbLMbad70Tj0g8pTP+R8aPAH63o18vGWwe2dd3++/zmVczIY8mCMcoejzPmE7hcf+QzZ5jvZP9QVEHKp/UdiGDYN4CMoQJqmk/k8ioBdmPD43FwLGf8SRsdT6jyYSLkz2d33RP1LAEIfJ7qLE8rVXsTXfOjqsBPkOyUFIM/J5Aj2dMecWibJCcPMXm/N/V0W2XqIY9z3dR2jbyrxhnE7Pw1PULzyAes39FvJNq10U8F4y9guplIMnEp/vFRJoBBGEjUtsQW0pEIsyF02R7IkQkQoLQjsziqgIwX0LO2TwpcSk9PDjvchVVlX3YyLXAS5dFeI0Yv2bQs+l/8kFM+IhkDItyhxI0L8r9d00QjhHykQQC4wDJAomleUXtARWxoVbN2XElpzdU58avMId4C1nGaivCjc7ggV5vVORDEKIRtQ2tZxHZDwGNtY98Xr0LOJkH9csE4YqRt+ZdpLIpBBWm+rtwEoyY1hVEOD78OMhq4cfJoM8lujuFabCvufIMIQgbhOLhyFvIwDmJ7vivgvBzon9YS2x+NIUNOQ8mqU2EUISkVuT0TKUJwjVukkBuYl4+mY2/5DXMHvcQQd7TE4qzN9Tq188FQTyvqRJk7FUwfx0WbhBid85lt4cPQr4EYb8bnSLhl3GUo+EjzLw62Rfv8MAmI77Tla43f31WTH6bQhGXoH2PYIC+4PHwTlBVIlpopwX+9kBPXMT0CvRjsQk41yTW0xItqAH5/l6gisldLdU2ti8iWN8V3QtJuE8R0SX7zrfuyzd4npAkOEJfIUD4H6nGM3uKDD43LCHli3e559ME4RohaYRKn/uHbHa59JHfk4XifiFaWXEyg9tA9D6H+Cv0uH8lph4wNgwPz/2V83QtMTGVhuvOlyDSm5jU4zswJIHtiLQVh8AyTgeswwESoZizJDgI8Isg+mW5RdRM68Z3LGTiExjcKF77c6lD7NAnBPyRmfsNRO4dGtrjt/lIPTOQIMZWlz2nqyqWMOM4AB/NYJjdtMfDxzD/zEpF7wsbhdZvo00fbIwlAIs322syxhNZ44GdAD8PGA/DsG7Ybx88nS8hlftmn0/7NEHkg5rOoxHQCGgEZgECmiBmwSDrLmoENAIagXwQ0ASRD2o6j0ZAI6ARmAUIaIKYBYOsu6gR0AhoBPJBQBNEPqjpPBoBjYBGYBYgoAliFgyy7qJGQCOgEcgHAU0Q+aCm82gENAIagVmAgCaIWTDIuosaAY2ARiAfBDRB5IOazqMR0AhoBGYBApogZsEg6y5qBDQCGoF8ENAEkQ9qOo9GQCOgEZgFCGiCmAWDrLuoEdAIaATyQUATRD6o6TwaAY2ARmAWIKAJYhYMsu6iRkAjoBHIBwFNEPmgpvNoBDQCGoFZgIAmiFkwyLqLGgGNgEYgHwQ0QeSDms6jEdAIaARmAQKaIGbBIOsuagQ0AhqBfBDQBJEPajqPRkAjoBGYBQhogpgFg6y7qBHQCGgE8kFAE0Q+qOk8GgGNgEZgFiCgCWIWDLLuokZAI6ARyAcBTRD5oKbzaAQ0AhqBWYCAJohZMMi6ixoBjYBGIB8E/j80OxKJiN+5EAAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-47"><g><path d="M 51 101 L 51 144.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 149.88 L 47.5 142.88 L 51 144.63 L 54.5 142.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-48"><g><ellipse cx="51" cy="51" rx="50" ry="50" fill="#0cf232" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 51px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Suspend initiated from userspace</div></div></div></foreignObject><image x="2" y="37" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmAHFW1/neqe5aAj0WURUDBBRUVl0hm7TgSQHZCkp5J2AUUEQRRkB+LT1FAffjkgWAUZV+SmYawGAEBcch0T/cE8uQhDyMisoQlIC8BhMlMd9X55VYvqa6+VXWruyfpmdz6K5muust3z73fveeehaAfjYBGQCOgEdAISBAgjYpGQCOgEdAIaARkCGiC0HKhEdAIaAQ0AlIENEFowdAIaAQ0AhoBTRBaBjQCGgGNgEZAHQF9glDHSr+pEdAIaAQ2KwQ0QWxWw607qxHQCGgE1BHQBKGOlX5TI6AR0AhsVghogtishlt3ViOgEdAIqCMwIQQxffpXm1q2XBNjyzgMsPYF6P0A/q3ULMLrYH4FoCQDv8u28rIVDybeUG+2flMjUD8E2mO93yHGTxzyeUN6aOD4etUQi83ZKcfRIQAfKpS5xgLvN5JMrKhXHWHL6emJv2ssS3eBsE/xWwYOyyQHfhu2LNn7E11+Pdo4FcuotyzXlSDa2o7ayojmLgDx18oIIXgkcsy4KwI+P5VK/DX4df2GRqB+CNR7UrlbNtUIoi0W3zNi0ZlmLnLByMii1ZOVIDo64tNg4EQQxtPJxNX1k6hNV1K9ZbleBEEd3X0HA3wtgPfWAE8OxD9EDpem04nRGsrRn2oElBGo96SaqgQhiM7kpu8y+CsAnotSLjY0tOTlyUYQ8Xg88uIrxiHM1o8B+hgTzskMDfyHssA08Iv1luV6EAS1x+LHEdMvAEyrB3bE+A1bfLomiXqgqcsIQqDek2oqEkR3d9+nTfDvAexQ6N/fJytBdMR6F4IhtBz2ownCe4bUTBAdsfgMMAm95faual4k4Jew6M7m5rFVg4N3ri3+LvST4+PGbjB4NsMeqJ0rmsh8bjqVEHphDprg+neNQC0IaIIoLJQ+dxBt3fHpBugBANtOAYK4HozjNEEEz5qaCKKgw1sEosMdVZlgvhgWfqxyArCPey/TqQz82HUC2eQXecHw6TemAgITTRBTAaOwBNHIfe6I9WqCUBygmgiiszPewQbdB2CrEhsDF2WSA/8ecudfVFP9BkCk1HaiK9ND/aeHLEux6/o1jUAeAU0QwZKgCSIYo0Z4o96yXBNBtMd6zyTGzxzAPGFlI/t6WTb4ASg9jRCeRo570unEi40Avm7D1ESg3pNqKqKkCWJyjGq9ZbkmgnAf1QAkWqKvHjk4OJirBs7OWN98Zr7ZcYpYSxbtPzzc/0g15elvNAIqCNR7UqnUOdne0QQxOUas3rJcV4Jgxs2Z1MCx1aqE2mf2fZws/gMAA6C3GZyLsHFSKrVYOBmVPRKBDXVnUe334s5k1WrsCcs4EeD9AezmuDt5C+DnAeNBBi3adSfz0UQiYQaJVnt376EE3F1S0znM7gonqyNAOBWgTzn8S4T9+UNMuGrXHTmjUo9XO0SfXng58nki63gw9gPwAQDRwvurCXgchBubI3zn4GDiX0H9Eb9LHKXKrF5mzDhiO6M5ehyBjgSwR3m/+L8ZtDA7us19K1ZcnVWpz/3O3jPn7xplcz4sLADRxwpjlGPgfwG+kbPNvxkZueXNjaFiCuMH4Z7gTuc14YDaNG3tAQQ+BaDPOSyK3gLwFINvtcZzNyxffsfrQZipOLK55TKgzLL5p1K+X3k9Pce3juXe7gFoNsDdFc62gPJck7XFty8EJUdJIcORpuhsEE4A0UfB2K5QrjDRfxag+y2yrh4ZSvyl2jVRlLcpZbkmgujs6v01E05ygF21iilIoBuBIDpi8S+A6UoAn1RrL68kA6cOL0v80U9AvAiis7NvbzasG4Wttn99vBKEr6WHEg+rtav0FnXOjH+RLVwVXIf9zVsFP5UrgwwQvAjinXfe88/m1rVngCDuqTZ418sb/hpAJ6ST/b9TnWDCWTMSzV7EhFMcJCcrvVR2e6z37EbxpPYiiLzs4ZcK45QD02WwrO/5jZHKAr4pCKJ6Z1vvuVZvggghY7bcMZBh4hNHhhJPhpmfIeqZMFmuiSDau/qOIrJVQsXHBPG56aHET1UndBjAnO9WewIolhHye2rvivcS0XVV+HqYBFw0NrrNxV67YRlBEBlJWNYdEvNhL8jEruXsdHJA+KMEmgaLkwlFjIsYfEaZYYDSgPCIFTXnjQwuWeX1uowgYJqHIBK5EECvUjX5l3IMPieTTFwW1K/CCXRAncAxysxfNoh2K1jR5WtU3EGq9qGmEwTz4US0K4BLw8geE+7JtvCRXiFsGpEgOmbO/ywsvm39Ce+Dqti63pPKSj0JIr9pw+Iq2hhqfjaKLNdEEDNmxnePWCRUQrs7BipHjIW5bPZClaNulYKAkAt8RTVhvvfx9RBqnmGAXsuvK7w7A+2SnfE4Mx+bSSX6Zf2tIAjg1wC+QHnVS/F5lYGHCPRmQTi7JAvGKBGOGR4auN0PV6GqaJ72xpUAf7XiPcLrxBAqnn8I4mDw5wn4hGQ3/iRZuYOGh5c8J6tLMilfBPhxgA50vD8KwmNg+nNAXYHqw87OOR9gI3oPgD1d7TEBXgkYGfF3SX/WgDEMwsGl7xqIIAAsAjDbMdYmCGJsUmAa85E5XwcwFYIoLNgFhzLrPQAdAqC5gNObzLibiN4p/H+Uo/RfmcHFz4r/q5TvHCef8RP3mc8x8Ehe9gvzjPA5h0rHWVSFrNjqquw7p4HoI/kXrS8CxX/bf1i+XpX1WKkQtpanU4lr3HLtsw7kAH4aRCNiTAB+LwgzJe1T2kA3kizXRBBirDq64ueA6EeSRUKEzXgIbPxmvNW6v97B+MIs8LIFTPX7gi70FoDmFMth4CkyjPnpZYuFUJXt1u3Ft3XNsSASO16HCoWHx1txkAyHgKO8IJ8zdtmJB5z3DGICrsvRtwm4wLV4vwrDOCC9bPGfPEiCOmLxs8D2mG0wKQavtBinv/99eMh9n5G/L2j6AQEnu04bv2+J8jzZvUTAru0tAi5ujvJV7m+FvrXJsq5h2HchGx4fk2c7OOQWb9zEzH1lnwAP5Aw+efmyhFhQS49Pf/LvNBZBFNttMuNOK8Jnu/tj31NFcTaYvuuSBU+Vb9gFPOwldZjye3p6ouPZ7Re61NUmA7+yxrP/7rHRpPZYbyfZajeXyjfAPL4aPwiPRdtzMyzu9Z5/CfsYZPzSddoQp9b5mVSidOfolM1Gk+VaCQLT941v3TRGtxLjII8FqfjnVcT0oEW8JBuNpFcMLvpnwPu+P6su8F6FqH7vuDjfqVDWq2TRrOHh/if8GtjeFe8johsdO67x9buggzPJgQfd33kRhCAiGDQ7s6xfXHLJHg/VF93aEl19nMyaTNJvUe6DMLkvnU78n0+fZCFVTCIcPzw04FQz2kX4EIQgsCPSyxYPe9UlZKplHSXKSMLH5LmtK76fQXSX80SlEK7FgygbkiCE8+kFu7wPl/oYI8j6YzLzHNliFGYBF+M0kQTR2dn3STZs45RSNAZW9KeKxeLvzTF+C1DbBnnikQiaDkwmb10jk7GwBOFBYLZ6MpNKCJWmp0p37574jtGcOAVST7Et4k7CGs8eIiO+RpPlmgmiuBisy9EVBAgLJseu1G+5ESG/8RBAiWpOGKoLfM0E0dV3IBGLUCL5fjEeamniw4OseexdXQS3O1UqxLhkODVwviJBrGEYB2aSi0cCiFR2ivMiMeqI9V0B5tMcZfqqilx1U3t3rzhJiFNL4ZGfjDwIQuyCT8ukBsSuz/fp6I4fANiLflGlITV5FpN3LLfDDQALa6jCEHlPQGelBaugXxHw5bLGNNoJgvkuYY0VZBjQ1rZgB6PJFBuQkhEFE87PDA1c4ga7kQiio6vvZBA7ZSKUsYvkLvQfVjQ30+uOLCxBSAhMSVVUxLyjI/5hRGxVvEh7IB6TiI4eHupf7ByXRpTluhBEoZMFixi6PMQlYREfcZx8xCC+aOcdcZ+KyeZGIwiXCSrgvztxDnh7rPc8YghP8FVCx8ngu2Tx9qUniBBe5LYZnGWJQGofLy2SkgiVkjsjzxOA1+otKUN6MpISRAjHx+7uuR/MITJIgLigLS78FfkKJO9JJ59Xf2S71wZTMXmeAioXfUGW298KIF76zYPsGokgOjv79meDhQr38wB2IcY1so2U1xiGXQtCE0R37/cZ+F7YDYizvZ2uMgBe2hLdMj44eP264nuNKMv1JAi7n0WbesA6mYB5CqaMrnFXM9kMKxRu4VL9Pm+1wPcD2KbE/gFWSV6C7PV3CUG8SRYfMDycSKuW1d7deznBJqPiclohgO1d8cOIaEnpNBRiwS6Wmt/llC9CspORxwlC2ZGyu/vIbU1k73WqDmQJbdx9YuCFKMyeZPL2Z1Swk/WnkQgibH86u3ovZsJ5k4kgVMbJ7x3VuVwsIwxBSIm0ivDgbV3xboPssERbinbIxrURZbnuBOEcSDsQ34vGbpZhCVO9eWB8RtFUT1xwn7fLjviZ12kirFBUSxBCH968Dvesd3rpLC/DJrJrORJJZAYXC0ueQNNSdYJQP6UUy2zv6ptDxCXrJXF/wdnITGfYE/fiQcAD/9qi5YjH77/p7TCTtHAyutiPjDwmllTdIatbdYdbsSBKdmZBfasIGdNYKqaHDV53aCp1t3CGC3xUPWlV8S1WOJF3EIGdCngh7FoQhiBku3pmOjST6r83TLvbeubsYuSiyxwWnxUn70aU5QklCDeARcJgwr4w+LgAwvDV84UVimoJQnzXGes9mhnXe96v2ClUcT8Di6pJn1ph5lqFR3oQHtKdMvhvgCGc+EI+LIh+hoMgKi4FPQjiW5mhAWHdFfgoLmDU3tV7IxGOLhYoLqeHUwMioY3y0+6+Z2oggghL4psLQdjWhebbezHTHGL0geyIBsXH1yw6DEG4d/4AxoV6CDBCGdkw8xZEOKwssCnT0ZlU/y2FRjekLG9UgnDPWHFJGG1d00YGnUsMEbaiGN6hNNBeF7VBC2LQ6hDme9v0bNobl3PebyDoEj5vd0+4IUeRxY8sW/xCUFskjnLKO+1i2ZKdztsW8wEjqURSvBPaWSio0eW/VySPUVzgPWtR+b5ux393roMGIoiw6q6pRBD5DWVuO8to2RPMO4N4BoE/7wprIZOhuhFESG/yULPGmaioUWV5kxKEE8222IK9DDYFm7rCWMhNNsMs8LJRC/u9HYPpZRIewOISPkxa1SeI6IKdd7SWeqnL/GIxqUqcxFtX6LxKl7qaILyRrJAFTRAVYG0sFZPt32NiHpi+6eGgqTIlNEEUEzvVKMsNQxBi1GXOKF6XdGEXeLdUVfv9hoBpOEeYhwfE+ylWK6y0bmyN8uky81hNEJVzXp8giooHtcBxRQQn6wmihg1YDoxVILzPYRKtCWJTEkRPz+xt1uWaxSXlvus9YpsA7Mh5fZqwkKnpkYT8lppRVrvAFxtX6/d5tc3xrety73QTYS4YIgzBLn6dJ9DC5ujq090ObPUgCIlDX3B0zSqsMVQHV2WB9ytL5ftGPZa7+1VLLKbNQcVkRx/YYu0lYJwZoMIdBXgVg1YYoCHi3MPr1m33VHTamr1c6VAnkiB8c3Grzg/3e40qy1WdIKb3LHhPcy73B4D2KnbUywksLGCSha5MVVKvBb4eBOHuWx4XEeeFTxCxlCQWW1IHNjdBVHPRKjHHfZkNmlX0wt5r/2O2fNc7Y3c4vZOrqUd1PFUW+FoJQnxfkZOkiiN1xUVkFWX49UUTRB4dmZmy+Lsk6oD4sx2qhy261SJKbxGd9rzTZ8CJd9i5HOaS2m3AQMA/YfF+w8OJDbGbVCdFwHuNKMtVEUQhPlGiELyrOPpKHsZBWErMwaSCFbRjDqqnwnIFCAwIF1RmmdC2HbUVRbNXOS1s7ElCqLDkqbwIq3SiCaq70puUHx+PRmc5Q5pUhGdX9AoPqlv2+8YiiAqT2yr6VIGdJoiKIZ2oOwgPM/KXyOJ5qn5AE0kQnZ3xz8CgBxh4TwEUZcfFsPOmEWW5KoIQHa/0DMSoxXz4SCrxQFhgyhZWl0MJAGmIBcmuLFT2OUm61AqCKITLECEfZomNDgPbM/ORRcugoH7aJwrTvAeMvUvvShYfiaXEP0yDZ7mDsvnUJ0JgXOMMGcHAdZnkwIlO/wxJSAOluFKueovhOoS3bt5DnK0HWpteu92pOttoBOE2UQXKTk5BYyQiL7mxC6vWCapDnyC8TxASM9LQ3v1uH6D1wS3rpmKSaUsQIspBUTaEx75l8D3rw9SI6LSPEfA3Yr4qlUo8X3xHsmnd5LJcPUF0xjvYsD0Dt9owQXhwvBWza4jcSh2x3p+B8U1HmRU7YfGb1GqHaa7KPYj0BCQRqnroBVWOjRKCCBUuQh5CozLWSy1B0YrjIYkrI054V2SSAyKvROnZWATR0RHfGVEaBOPDxcqd5oNBi7c0ZL0+QWy0E4QkjlLoRVESX6xuBGFHrK6MX/Y8TJ6VTieeDpKvwu+SGGaVG5lGlOWqCcIrLO16M9XbYPLJAZFBpbh2xnrnMuOmsqicwIXDyYHvuz+QhWJQZfaurvkxi6yl5eQm33W4vRv9IjG626jqTSyztWbgfwwru//w8B2vBgihhFQhDXZWSF+6CESHO8p8FcSHpocSy4OE3Y5qmdvhCrZTXpaeUYZxUCa5eHBTEESNE1g2cRsr3HdIsppsVkzuU21YHX+YuVza5MR6rwfjONUNhSzCKkBXj49ufZpKStx8Xg1LbKZL0WoBXtIS3fIo171KLWQ0IbJcNUEIcOUdF7/wSiLj//nZ/jsXk3x2Mzq9EBBrmuM3P6auVA0AgYtdYQcsooS6E8tIdx2d0pOSmnBILt/KnNdKR8uKgIDFX+imlqj1dZ/IsdTR3ft1d7Yxvx20TNgVwoqLBslDi3tEGt1YJwjRsK6uuZ+wKCLiZQlTx8LDg7koFjwymHjFg/i8swSGXJSDiHUqqpiCYkSpjr9ErQIwn5tOJX4SFL6mMDdvKx93ezR81c3uU32QgY3HxiowU6S9RnbE340IiURh+zo3VV7q+EaT5ZoIQnS4M9b7FWaINJduL2jx84sM3EzES1oi2acGB+9cWwCJpvcs2K7Z4j3A1jFgzHcEwyviGJgdrSL4nP0lPUOG9RV3Hmjbf6H1jV4iFqEeZI5uUoLwOCl5Jm8pCIWc8DwWUz9vTa98tiJfLTVlf+RO4hN0wvE4BYhmrwHz2bBwqzustE9uXE9CVl0gvBbXkN97JkGS5QS3nbHkyZbyzdEEUTEsEqMQ3xwVquPnkZVylIAL2eQrZCHO80ml+FwGi5AqsnXH02JKdKwysCWehMmH+6mMfDbDSw3GWalU4q8u0HzyvftuMBtKlmsmiPwlX/xMAgnGlw5W0A5L8rtSHmJf72DGszDwsFdaRhHjBiRyEtvxUewF0gLvN5JMrHC3R87q9lsmGC8wIVNIhxgBW58D0cdkJq5eqhwJQWQBGA6b8LI0k4D1aYCmS/AOPEGJRksT8mzotDMNqEjP6FXXKBN/OzOUWCgbX9UFok4EAd80qsAqBpL2GBF/KjBopCaIimGRXtbmyfR1ML3B4FyEjZNSqcVD4s8hxl+uGsm34C2A/wwYheRcLDZ2ImjmDq4GvsbAurLQ8D73kRJjDVGcuDx+CSCRJW5Zc9O0U9xmtTIVuKMdJRlj8FYE7CPNJ09IR5GbOzS05GUv2W8kWa4HQdhi0tYV33d98ncR0M5xzK+CGoDXmPkbQZmaiiVXl+g8r36ImvRjhy7S92KrvbtXOAWKUCAOPaJy/14ig48ZXpZ4SPaFxMx1kEAPMyCSCymSLj0Dg+b5pBotqzpvXkhXAxDhQ8I+gQnYQywQ0rqr+d7+Jmf8AuBjQnRIpD9dyIDYjW5rf6cJQgaf7K6r7L2Cs6wdfC7M+IXISilpFw9xNHKskbXOd6Ys9UqUJArw2fAVyveOptwZix/MTOKeNC8roR4esaLmPK9ERs6iGkWW60UQdt9Ep8ZzdAID3wGwcyjsgLUiBy1nmy4ZGbnFTk6u+ogjZ8S0LiOyk7v7BdPLrTdV/UVrlM8Xen2XLjLQD6K9Z/5ulDOvAyimELRPND9HRLdnic72C9pXQRCMh6xc0xFGU24mwNcGxH6y8+KauaYLwuLmCG9wqep4eam83GMVZoGQjXO134cL2cAryTKONQ3LKvPE1QQhnXqy9JnOF526/LDjV1D7XUyAuFNT2RT9nRnn7fo+vl3EOKs4Ffj7w8jS55a6EnS/Yqu4mC9l5rmKbX0LxD9EDlcGZQV04tkIslxXgih2zk4atNrYg9g6BEwdgJ0HYkeX2kUcH58HjAfJsO4ee3vbIRWLAB/SoLZY/OMG6BtgO+H9BwqDJ3a7zzJoiWnQr5wLdViCKNZtE5JlxgmYC9CHXEfe1et3sH8nUL+ZjfQ78zF4tV1GEMW0pvm7hvGT1t+tHOsIXjYK5pUwsCgK82a/46oKyRaj6kZgnMhkdQEkQoYUjQXscWLQUgt07fJk/9+CLg/D7iDrSRDFsuzNikmzmfmUsuiftkqElzNoYXZ0m/uEzOlgfXnUvDydneOTNygxvsxsnSRRpZYSQoUlCOfcipqmKPtgAHs4Eo7Z83h9Xpb7GbRo153MR53BLyUm3G8abBxSVHnJZKwtFt/TYBLpc4U6yKm2khqTuMsQKV4jTWYfg/tc64AJ4BWAHweM61qi1r1BKYr95ummlOUJIQiVRUm/swEBP4LQOGkENAIagU2FgCaITYW8o15NEA0wCLoJGgGNQAUCmiAaQCg0QTTAIOgmaAQ0ApogGlEGNEE04qjoNmkENAL6BNEAMqAJogEGQTdBI6AR0CeIRpQBTRCNOCq6TRoBjYA+QTSADGiCaIBB0E3QCGgE9AmiEWVAE0Qjjopuk0ZAI6BPEA0gA5ogGmAQdBM0AhoBfYJoRBnQBNGIo6LbpBHQCOgThJYBjYBGQCOgEZAioAlCC4ZGQCOgEdAIaILQMqAR0AhoBDQC6gjoE4Q6VvpNjYBGQCOwWSGgCWKzGm7dWY2ARkAjoI6AJgh1rPSbGgGNgEZgs0JAE8RmNdy6sxoBjYBGQB0BTRDqWOk3NQIaAY3AZoWAJojNarh1ZzUCGgGNgDoCmiDUsdJvagQ0AhqBzQoBTRCb1XDrzmoENAIaAXUEGpIgurriH7UM+iYsHADCrgAihS6tBrAKoO+nk/1L1bup39QIaAQ0AhqBsAg0FEFMn/7VppZpa89h4HsAoh6dMZnp0Eyq/96wndXvawQ0AhoBjYA6Ag1FEO1dvV8jwpWOE0NFTwj4Jyzeb3g48Zh6N/WbGgGNgEZAIxAWgYYhiOk9C97TnMv9AaC9XJ3IAXgJoBzA4lTxLysb2WdkZJFQN+lHI6AR0AhoBCYIgYYhiLbu+HQD9ACAbYt9ZcbNnGs6dWTkljcnqP+6WI2ARkAjoBHwQKBhCMKdNIeBF6Iwe5LJ25/Ro6cR0AhoBDQCGx+BhiUIAA8bvO7QVOrutzY+LLpGjYBGQCOgEWhcgmA81NLEhw8OJv6lh0kjoBHQCGgENj4CmiA2Pua6Ro2ARkAjMCkQ2GQEEYvN2SnH0SEAH1JGinBDemjgePF+T0/8XWNZuguEfQrf/z1KudjQ0JKX22LxPQ3GTwCaBWAaCK+DeTmDFmZHt7lvxYqrs151zphxxHaRpuhsEE4A0UfB2K7wrrCmeo6YhkxY1+TWbTviV46z/Ir7FcI5maGB/xDvxOPxyIuv4ABmnAqiGaX6FNosMBg3aTYznwLQpwD8GwATwCsM3M/EPx0ZSvwFACtjrPiibPwYOCyTHPitShHVfi/wWrUae8IyTgR4fwC72WOcf94C+HnAeJBBi3bdyXw0kUgIPKp5aEZ330cM8AkEHFZWT35s/kqg/tx49pbly+94XbWC9ljvd8iWzfxTxKyt7aitqCl7HgFfBrA9gFEwr2TCzdZ47gZZHcJvKNq6pi0C40QmjgH4QMl/yG4jniXG73NENyxP9v9NRQ682ifamm/j+EnEOBpEHyvgboLxAgzcZ4F/Xou8if60tr4xnQlxBn8BhN0c8080YRTgVcRGCoxFY2Nb/1F1DsrGZ++Z83eNWGacgLmuuS7U2k+B+Xe5SOQ3jyxb/ILq+Drf81lLRgE8C9D9FllX14JZNe0K882UIgiY/AVEaDaASx2LRjkeTF9Lp/p/5QYpLyzWhQQc4+Ok5/zsRQBn77ITDwQtQl4E0dnZtzcb1o2APdn8nifYoN7Msn6x2IuHOrr7Dgb4WgDv9fnQZODG1iifXm9VXbULfLGt1XzfEYt/AUzCT+aTakLOK8nAqcPLEn9UWRyLZYoNBjFdQ0C7Qj05Yiw0c00XqFjbyRZgMoxVsPg2gD8or4+XtkS3jA8OXr+uOP7tXfFeIvpPADsrtFEQUYaJTxwZSjzp974HQSxVlDe7Hgt03PJk/1Mq7RLvdHTEp8HAiSD69wB5dhf5GoAzVOag80Mx15uYL2XmuQpzPQfQAEzrG+l04v9U+iSINBLNXsSEUxTKVx4blbrr/c6UIggCrmDgx57kAPzDNHjW8mWJfziB7JwZ34ctugnA+0ICrLQAywgCFj1NxNcXdv0q1T5JVu6g4eElz7d3x88kkNiFenmbu8u7DSYfm04nxM6lLk81C7yz4pDfU2FBvM5nbL36ZRJw0djoNhcH7Tbt09zLdGqADHnUQ8/AoHnpZYv/FGYBJuAMBk4GsKfHdyYRHT081L9Y/G6foF7C2SC6yM+h1KOsUSb+emYocYMXYVYQBPPhAnMiCoP9GiI+Zngo8bsgYWvrmbOLkYvcBlBb0Lue+ICuHhvd+oyg8S1srOYBfE2IeWdXy+JEYdBsxyZN2pz8pg+Lvcnes5dibp6dTg78IsxmpkrMlD/bZATR0zN7m7FcyzkA3p1vLYvj8ZccLX+Jgd8TaIM6iK3l6VRCDK5MxTQOYMwx8K8y8BCB3gasdnuXTrQwPdR/unMAOmOmjklYAAAU70lEQVS9c5khyKGopig2wQRBEEkKTGME3p3zO0qhxil7mHBPtoWPXPFg4g0Z8m6CIMLdzHZZQpUgHhPglYCRyQsjf56AT7gJgBi/scAPuiarEKwUQM8weCuCrXIrllvqi3ORUZYOnxdDLvAVJYX5viMWnwEmobpy90s4Sw4DJHaS8BmjcWY+NpNK9Pt0iTpi8bPA9CPJwitUV38GjCfs74k/BcZnJDLzKogPTQ8llnvV416A8+rPkhqzNJaFBaYLwN+tbGTfomOoh7zaah4mZAiU9xnybuMahnFgJrl4RCqrlSqwnxDwVad/klBdwcDDYl4A1qcBmi7ZrARiMX3f+NYt6yjBwH6utthqUgDLi2MLsDgpdwLYQdLucWZakEn1L/GTbZ+5nmPgfwn0qD0XvbF7wormDhwZXLJKVo+PnAon36dBNJLHjN8LwkyX+iy/DhCfmx5K/LRRSGKTEYQbYPciigArJskdxIbFEHQ5m9YFzh2zOFbCMLOPDCaE4NlPx8z5n4Vl3edaeEwiLI4gd7a4z3C20969vUy96yfL5e6jMIEWNkdXnz44OCjuKsqeir6V/3ynafC33KeattiCvYjNBAF7OF4XE8cC0CT0sQRcyCZf4exnYYd5PIh+Xr6A8b0wMbdep4gwC7xsMql+39NzfOtY7u1bAJpTLEfs5sgw5qeXLRbhVsruV4Qeu7l1zbEguqyczHl4vBUHeZG4x+LxGjOdmV239YB7d5qXP5xWUIs4Nxe+i0gFQZTA4RGORuZnBhc/W/yTqGPUxPuLaiFbp93cdD+Az23AlAdNAye45Uf8Lr5fl6NvE3BB+QJOt7ZEVx8nlVUXQZSPnVxlZ7erpek/Ie4mNgTWFJ9Wg4V0PhTbIQJ5mkQLCfiiq22+8u0x1231YC6bvdB9x9PZecT2iDRfwczzyvpE+K/00MC33HLX2TnnA2xE73GdBD3LF/P0+Zewj0HGL12njVFmnp9JJe72I7uN9duUIwgGrsuObnNy0HGzoPdcBCJxhC4+Ssc8j2PxqMV8+EgqIbzBFQmCrh4f3fo0r7Z2dMcPAOguAM2uIkeJcMzw0MDtHoJCHV3xc0D2brj4vMwGzQo6IqsKnuoC71We6vftM/s+Thb/AcBOhbJeJYtmDQ/353fzHk97V7yPiG50YDfOwMGZ5MCD7k/E5iFqWb8H8HHHb0/C5MPT6cTTvvV09+5LwC3OTQYDF2WSA0KfXmEcICMIW30RNb7kJAdZne35uoTapigPTzhPF/WQBR8CG8xFscC5wXLW56X6EpuY4eTA991ta2tbsIPRZIqxKN0nEVH/2DtbH6M0dyMk1GTxYrl+jrU9PT3RsdwONwB8pHOuM/OXM6nEgNdu3d5sTHvjSoDFCar4VMifKH88u/1CJpwUpnzx7t498R2jOSwCqMfRl4w1nj0kjPGD6rwN+95UI4g3yeIDhocT6SAgOjvjHWyQOD1s5RgYz4ntLq+jI/5hREgsXO8v/cZ8FywscO/SPU4QgZO7u/vIbU1k763Qz3rsYpxtlCysngtkEFay31UXeK+yVb9v7+o7kIiFeikf8l3RP8beAERwO0AHFttAjEuGUwPnV4xld++pgB0ksrQIBKmKnGV0xnqPZoa4TyqGpZfedYlvZAuw1yLqbmd7rPdMYvys9HeHVZ/fGMoWY2Y6OpPqF8RW9tRCYGJBbZq29lcFS6xiuVI5l5CdEvEXC23rincbZM/fLQt/W0sW7T883P+Iu0+dnX2fZMPeZGxQUTKfm04lxD2er4WfbPPgHi9J+aFURZK1pOzeqZr5Wa9vphhB8EgETQcmk7euCQKovbv3cgLEfYSvIPuV01G5sKyxwPuNJBMrnN/JCEJ1UeiI9V4PxnGO8qR1uNspC34Yxgw1CD/VBd6rHNXvK7ELMcax3vOI7TEWOUQeY/BdbjNcoQdvXod71pscCv22/fidAGT9kZUBD2s5yQL8tsV8wEgqkQzCXPJtoiX66pEyVZGrLOro7hUENgvg5xj0GGDcKLuH8DhBnJZODlwV1D7xuySmmsnMc9wqk87O+Gcsg75M4M8D9AEiSjZHVh+t0Be7GaryI96V9OkvOcP4kqr5amd37/cLKQjsugl44F9btBzx+P03vS3+7/7dtuQKeQJwlwFUWK6pwF/3d6YUQYjgfpnUwLFBuwLZztxrd+mHeHf33A/mEBkk2EmNCqtLpRmthCCUd/OdXb0XM+G8De1QWyBldzSTkSDyViEs9O7bFDBQtkpSmS2SBU2JgN1lV44TpIu3ZLH6hxXNzfS6+CzbaHT1zSFip1ox0CpJBYOyOirvIDxPQ7KyC3dGCYAOcRDuFZnkwBlh2+L3vipByNoDoivdxip+dYnTChHdSsCrAB4lokHOWXcJTYF0njn8nFT77D4RNUosuqlFEIoDI3YvMOgBBt5TGEDlBds54HLh2+DMV3y3lkCElQuK2s5iqhCEdHduA8srQbiWI5FEZnDxc0GbAq+J2t7VdxQR3+xYzJ7ibGRm2HDy7RWLNz8+Ho3OWjG46J++C7CiykyUMWNmfPeIZas1d3eUKSzg/gTGL61cdGnYdrtxqeGUUiqqPX9yu7j4B/eOW3XRrAdBCPUaNZnLnMYezDQ3yOJJtY2STWJVCc3y95rRZY6xrWpNUm236ntTiyA89KoVk6Dysq/qC9yKnaNkwktOECWv76CBkphFlrzJ/b6dKgQh+ijR8Zd3PW8qej8Di7KtvMzLUkmGl2TnX2leHTRIecIqM9P22gHWuABTe3fvDwpWSV6tWgXCUmbc3hrdIulwrlPqRYUfBOH8zNDAJUofF16qRd796sl7Wr++h4XIYSA+quBgWrz3KXmlO8uQnBA97yrC9LH4ruQuZFyohwCjbGMQVDYzb0Fke+xvuBNVXM+Cyq7l96lFEIqhHuopwBUTXokg1NREYmA1QQD5VLRvXM55a5LSguAh+HmfEsINOYosDtIzS+54aplPzm/l91FuFY7iRXOxYHGiahqjW4lxkEJDhf39CjB+bWZzd6pYxVQ6yoXfbdeoLqHpPQu2azZzH2QLuxtEwqT3swyIRGIyH4gSDDIVqltFWe+MlAEm7ApD5P0KK2pEaqok4GNNEHmAlHf0bjwlO8KKssL6eDjr0ASRR8PPByVAxp8gogt23tFaKguJMtkIQvTVttCK4jQwfTeER7DwockwjG9nkouFI5/UescvFpPqQhT2Xqdg/XQAMc4CYe8qvOXtpskIop6bQVn/NUGoSkWN74VdRGtRodRTaCR3BBWWVGH7pgnCW5hKiwkgvPBFeAaVcCOeIVEmI0EU0bEd4UzMsxfWfDyvoNOV+DQnLOjGRrf5iczfYCMTBHXOjH+RLVylEI/MKRT5AIFknyhKToqaIGpchCWf6xNEvU8QKiqmEBeT+gThLfTCSGBd7p1uIswFQ1jN7OI3RWTe7hUEEVLlE3ZKVjueQfXYqqd1NJOABSDsLwnjULbAMvg7mWRig09F4dd6EITEx0F2Qhemt1/3DayZb1MhJz1WgFj4Nz3cEtnycdN8c1t3NGgZQbjVXRtBxVS1NiJojDfF75srQbg9Uqu+pHb7U8gsNvQJolK0Vc0Uw0yKvO+H9cX1Ib9PAPAFWawktxd2Z1fvr50esBNhcVOPE2EYHISpflfX/J0smAeC8BWPWElyBzZ3LCbCtzJDAyJsifKjYtHV3j2/jWDdWxbjKR+T7E8E4zpYWNbcbD3rFYVYVX4m+pLa7chZbwJSBn2CXtw8CaIyfENVpml77X/Mlu96Z+wOZ7AxEVBvODXwlbJFobv3UAI2xFaZAieIWh3xJKbGUh1ytXJv5y6IZq8isuMDlR52LXgS7+SnkeOedDohwrnX/ZmoE4RfQ/PewKYIK18K5yACwzHToZlUv1ikS4/kBBHah0Hiu1MWrtwj9MVbAJ2YTvbfpmKyrEwQleajCGvmauddydGS9VZyH11/QvurBfwlCvPyZPL2ZyRyLHUMrLsgbaQCN0uC2OSOclOAIKR3QCHsyytCaEguGQvhMkQ8KpH4qZ2B7Zn5SBWvYzF/bBIzzXvA9sVn/nGpkCTqEGGmeHg6mRBhHJQf26uecCGYXwHRo2A8ApOvrQi7UqUVU0dH/N2IGEKNNjMfnRiRCKyDxSKl0si2mXM/YlgRkRejlD9CZiUjuVPzDXLorlvFcUzmYBrWe13iQCndYMg2cQyEIj1Je0ve77KNUlhHPIGhCNdhGXzPehNmoVJ7jIC/EfNVqVTieZXxnah3NkuCEGBKQm2Ecr+3y6j0OlULtTFVCSKEzbwE/4oJrrLYBE2MoDuGWoLGFesW0UyN5qal5QmG5A6N1Z4gOjriOyNKg2B8uFBvqJ2qKpYSmVaOb5Zf6CpinFV8L/Md8Aqk6DW+kjA3nifQyjAWCIyD5qy3M9Y3n9l2piwYAZQ5QVJHrO8KMJ/m+OZ5mDwrKNCj432Zf0vVau+gORHm982WICYkWB/kIbWn4h1EXk2w/a3OiJrw6L9bIKWBDj3MFN3qijBxbjwWRbfjF3XEen8Gxjcd7QyKllvWpfbu+LcIJFLIFq2ITDCfXMxd4ny5WoKQ4a0a/VTULyEYqapFGovJIwilZFxFZjhXhOTKOSE5tYVS8YaRHw/SAhSD9UnTCrhCdbR1xfczyI667Aj77h+p2YmdPBQ5L2mJbnlUWEfHMIu/yrubLUF4hPvOMficTDIhLuU8ozx6hfv2iuM+FQnC4wQVuLDmVSUkEvfs6xZQuaNTZdRdQG3ySUJ+SwPjdXXN/YRFERHvyZlRUCScOkoWHtw1uTthWXe4wn3/j2Fl9x8evkPE7iknkypVTPZCV7GTVU4wUxH+PYSnt6g6MP6VHRp7i7WXgHGmgyiliXw87p9Uw/R/mCN0GwGfrpAfDxWnV7jvgJD5Xtn7KrQEHmtJIGYF4pbNB8/UASqLej3f2WwJwh4cj4RBzLjTjBhnur1whbPWCy8ZhxOxSMbjSk/qvWhNVYKQWIgIWNeI4/Yu70O/0zFtQ4IUXOFl8y4jCNuLeos3bmLmPofgm2KMrAifLUuUIyYsRej0QgTODbs6n52w5BQgqhtl4CecbbrMnW/ax1nNlySrPUGIxshVWfBMSiO+sS/rm7I/onxa0w1+Eh4h4z2iudok4TUvCiGxr1ivOxf54EuP1wnHI76W8FX5FWebzpXl9s4nJYqeCqazvJwD/TyPPTYBo2D+QUsTrnRbS3kmQfLAzWMtEfvMpQbjrFQq8VfXwu3jA6K2AaonEXiVtVkTRH5XVnvKUWEaOdbKcdWUo6o5DaS7dEU7/VocCVUFzyNRSvHzYsrXNz1SLD4JUNKZjMUr2qzH5LYXLVeqzQjY+hzIdhpzp5D1TYHpkRym2JfylJTgDwIQ6UAr0tQy8NPW6KsXeIWtroUg7AVfqs6wm1me1pK4BcxtAIk7C7czYTG/uQhyWPZICEKk/DUc5OLEQiQ4zKfzrXTS86yjMO/cOTSK7RCL9kqQ8d82KYk0uiI9L9kRk52OgG+BsNpxJwMOuAPzmeujIDwGpj+LOjxT/gbMc5/yRd9WMZAUKWF9UgMLI4p0FLm57myWqnOy3u9t9gRhL8L5pDQiQ5XIexvmEeELBsZb+RS/AHFT9QRhL1hVJJ0vJoAH86HEEElb7McvHHlBb12WuS3EQL1EBh8zvCzxkN83HmoS1WpEDvPLxt/Z5jy/jGi1EoSww2qPxY8jJpHc3k1QKm19gg3q9cosKCGIRQRaqxgHqziSI1bUnOcXwjxkfC1Xv3glWcaxVoS7nQmUVML9d8biBzOTyEG/rQpYG95h34x6xfeqL9+eAYG4hWtz7W9rgihgKI7JTcyXMvNctfANvJIIZw0PJUQeWt+sVFOZIGySaDtqq0g0exETTgnALq+maOLTROrKsF677T3zd6OceR1AMeWwEkS3Z4nODgra55hKhaM/ibzjpXSY/lNNnqtZ9k0dCMIutmDmea16G2112a852/RdmQqn2FZZ+3bZkU984RXj68QsQnj/mw8Wb4H4h8jhSpXc5/n84WvPAEGkZ/Urt1jla0IlBAvXiPIrLrsJSj4sYq5HTOsyIlslFhSeJFSfREPDryUIXUftS79aCZogXDjZwmOZcQLmguijjpAFIl/1s+szj93PoEW77mQ+Kgv+Jl0UpqCjnKyfeez4ZALPAbBbYYcr7LqfA+EBC/zzkaHEX4qEGpYginWWjRHoQ64on6sB/juB+s1spL/a/Ah2cMDV2BOWcSJg7QuQSC1bXMRGAV4Foj8wG9eHkoUaLqklmNOM7r6PRJmPY8KXQNjNIa/idPsKAU+CcONYC/9WJQy6H4EJ57QsN51KxH1gOyeFWFzfAvjPYFxr5ZoTfuTjtSQJfX+0uekoBve55pwtO8Q0BMaisbGt/+g8nUlMlE0iHD88NFDK7+G3DNp3J6Z5EogOBrCHY3ztPgkZyo1nb1GJgiurR7Qv0mT22f0ql1N7bAB+HDCua4la93p5jKst4xP3VsMQxMR1UZesEdAIqCJQrxOOan36vcZGQBNEY4+Pbp1GYKMioAlio8Ld8JVpgmj4IdIN1AhsPAQ0QWw8rCdDTZogJsMo6TZqBDYSApogNhLQk6QaTRCTZKB0MzUCGwMBTRAbA+XJU4cmiMkzVrqlGoEJR0ATxIRDPKkq0AQxqYZLN1YjMLEIaIKYWHwnW+maICbbiOn2agQmEAFNEBMI7iQsWhPEJBw03WSNwEQhoAliopCdnOVqgpic46ZbrRGYEAQ0QUwIrJO2UE0Qk3bodMM1AhoBjcDEIqAJYmLx1aVrBDQCGoFJi4AmiEk7dLrhGgGNgEZgYhHQBDGx+OrSNQIaAY3ApEVAE8SkHTrdcI2ARkAjMLEIaIKYWHx16RoBjYBGYNIioAli0g6dbrhGQCOgEZhYBDRBTCy+unSNgEZAIzBpEdAEMWmHTjdcI6AR0AhMLAKaICYWX126RkAjoBGYtAhogpi0Q6cbrhHQCGgEJhYBTRATi68uXSOgEdAITFoENEFM2qHTDdcIaAQ0AhOLgCaIicVXl64R0AhoBCYtApogJu3Q6YZrBDQCGoGJRUATxMTiq0vXCGgENAKTFgFNEJN26HTDNQIaAY3AxCKgCWJi8dWlawQ0AhqBSYvA/wcO/kj32m27bwAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-49"><g><path d="M 101 201 L 154.63 201" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 159.88 201 L 152.88 204.5 L 154.63 201 L 152.88 197.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-50"><g><rect x="1" y="151" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 201px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">GPU driver shuts down clocks and sends SMU messages</div></div></div></foreignObject><image x="2" y="172.5" width="98" height="61" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAD0CAYAAACB1LEoAAAAAXNSR0IArs4c6QAAIABJREFUeF7sfXmAXFWV/ndeVacbkG1UNkHAUVRcRg2kq7u62lbWCCGQUNUJEIKiLBJhYEBGYFRGkFFHEQZEEISwJd0lIQiyKvaku7qrA1FEjMgoqCBLkB+b0Eu9984v91VV59Wr+7ba6K6+9Rek7/qd++5377lnIaifQkAhoBBQCCgEJAiQQkUhoBBQCCgEFAIyBBRBqHWhEFAIKAQUAlIEFEGohaEQUAgoBBQCiiDUGlAIKAQUAgqB4AioG0RwrFRJhYBCQCEwqxBQBDGrxK0mqxBQCCgEgiOgCCI4VqqkQkAhoBCYVQgogphV4laTVQgoBBQCwRFoGEHMnXtSS1vba/9iwlgAwoEA7QlgFwCRwnANAM8D/Bcwfm4SfvruXfFIOp0W/x7q19OTfNtEju4A4dOhKpYWfoGBV8FYr2m4T5/I3bN+/e0vBW2vI5G6AYzlU+UZD7a28MKBgfQ/grYhKxfrSi0g4Kf2vzFwRHao/85q2q1l3Vgi9WVifGuqTcLKkcH+E2rZR7GtRvZVj/GrNvMIyL7Z6bauZ6Os6k4Q87qTe2smfYWAJQC2DQnyKwxcbU7mvhNmc64RQTiHqhPRbTmicx5at/ppv3koglAE4bdG1N+3IDAbCKKjI7kVNJwIwuTIUPqamSD/uhFEe/ux20WiuYuYcCqAaJVgvM5E5++xi/mDIDeKOhFEcQovE/Gy4cH0z7zmpAhCEUSVa35WVW9mgkgmk5G/Pa8dzmz+F0AfYMK52cH+b88EAdeFIDo7e/dnDasBfo8HCAWVEk3ky3CrQ+XkrGqA8CPofNbISHrMC9w6E4ToehOIF4wMpte7jUMRhCKImbABTJcxNjNBdCRSV4FxShHrWU0QnYnkYcx0E4AdyxcfPw7Wvq+B7sxkVj8nWMFexmLav2l7mRo+S+CTALyzvA26ZnJs+xUbNlyTc1vcUoII+QbQ03PkDpOTrbubGp9OwGedtyAGsuZk7nA31ZciCEUQ02XznQnjaHKCKHmPnLUE0ZFIzgOTeCzdqXRR8uMm4/R374YHg6iIRF2hr6MInc7A1wBsZWtvjJmXZDPpkodae3+1IAh7ex3dSzphmrc75mUwY0U20/9D2QeoCKIxBDETNj81Rn8EFEH4Y/RWlKiZimnevKPers1puYuAmGMiP4HBJ4+MpP9fJRPsTKQWM0PcSKZIwu/0XmuCEOOWjQMetxJFEIogKlnvs7WOIojpKfmaEUSZuSEAAh6YaOPkhp+nX61i+tSRSJ4NpkvsJrFEdNzwYN9qWbv1IAjLAiGC2wCab+vzOdbogOy6vt87x6EIQhFEFWt+1lVVBDE9RV4TghCmrBGTfgFg76mHGOAJRLVDsgOr/1zt1Ds7j9rJ1FruJ+BfbG2lW6ObjhkYGNCd7deDICy1V7z3ZBCXqJTcbLUVQSiCqHbdz6b6iiCmp7RrQhAdXanTAFxhm6Knfr4SKCR9/JVMvXt4eM1fGkUQUic1puOymb5bmv0GsX/3kj2ibCyBiaUg+kBB5acz8LvN1mo3cm7OtaOjt7wmcAjjvOYsWyRcYSZNLbnzCgYC4k1rDMyPM+Fmc1JfWTQO8OurszPZwRrdC2C7gozeMJkPHc2kh8Ksw66uxe/RERkgYI9CvUkGDssO9f/cqx1r49O1+YD5WYA+AWDnQnlxsPkLMQ2aGl+7xy6cDfo+l0gs2lXn6CCAf7ba2qLqfKOzO5VgE98E0F4wrHgBwINMuDJMH37YzD0wuX3rBC0A43gGPmqbl2WdSMBGZu4zcvraID5MfgQh1oMWnUyC8DmAPjLlU0V4CczrifBj1vEzPwtHMa/2ruRcDfSAzZDmZRN80OhQeoPfvIPUD21F6eNI2t6+dOdIi9HL4F4QvR+Mt9vW0LMARgFKt0bNe6p1xHXOv2qCEAtlzjjuBqjT1vhjZi5y4OjoKrE4a/KLdfd+kEzu3+yFPcSM29qiWw8NDNwwLmu8XjcIKUG42DQ3yw0ihD/LiwB9bmSo72exROqcoJ7UMoIgTXsGJv/E3Uya72qNbpMU8vcjCNn6JMY3hzP954dZmLF477FEfPOWOjw82YbPuKlPLZVkFCvA9B/BHET5cdJw2vC69C+d1n3OccoIwsjlUpHWln8H40ybKtZeNRCh+WFifVsGfQOMkx3GI25VdWJcZegtFxQPEEG/WXFYyI3tcO+ctlfOAOGrAXCcWoNeGM4Ugti/J7lLi6F9n5kXB/Qlex3E34COK4IQpZ+sxd+rJoj2eLJLI+uEtk2xw0o+wCCDDVqmoQThEuaiGQhiipSBDwfEXliYfVYj2ouB/5qq43FCcm7wBJzBsDaffV36NOzvT34EIdoov33yaAQt84eGbn05yLx6ek5om9DfSAN0+NQaBy4cHur/uqx+rGfJXqQbqwESp/gwP+Hrc+nkmzuc52XGXUYQ4AGGliHwv7uQg9gvPQktyCA7OpL/hAj1ASJUTrgfC5WzRkfK3utES7JvlsDHcx7zVIjedAafmx1KX+pGEjOBIDo6ku9FhO7w+A48IOEBPYqlDw2knw+Bm7Ro1QQheZyu6Apf7UTs9etGEM4YQ4DrXGc6QXR2LtqTtejdkgVqAPw4oGUF5gzej4AP2U44L4MxDMJhlRAELJXB1BVaOERmAHqycJuIA/iT/XYahCA6O3s/zBqLN7Ki+XWo03SBKEX9XQtzclVJeOLGeJoJWQK9xuDtCOgCsHv52vf29SknCLwOQDiazrHaYvwZGv7X+m8TnwRhLwArRob6r6z0OxOx1OZs9eoVyPsn2X+W6g+k/QqAAbIcXuNg6z2yGGetUJ4HJttwpOzW5aKWEb5SRcxFG5ZKk0APu6y94rheZmjzs0OrR2XzrT9BnNA2kXtzBYjel+/f/BRQ/G/rH9YD9MjU2NhcP5JJX1f8f0t9N05pBg5yjP91gH8LaI8V5u+6hhi4Pje2w8leB40ga6FagqBYPHUjEY6zbQZ/hM49IyPpvwUZQD3K1IMgpKo0gutcZzJBiM2gdetXb2LmXrt8hFWarvHJ69eln7L/e8HE+T8pf/J3bArWPdU1WJ/M+i3fNo9yNLLEbuQg5Dpm4N2jg+mNxf6DEITsBsDA5dmh/jOCrL9y4wS+BwYWO6/xLh+2eI9ba0b4HCduApn2xNKPaKxfB9B+trEYDP5ydij9Pdn4JARRLDZGwL+/a1e+0vaeQV1di/c2DO2VSk3NReOxriU9BFMcGIrm5gYDl7RF+VsyvbcwXImadLVjkyu5/dnn5qO3d1VTWW9jpnk5gCNLsaJbW6MvLJcZsdSbIJwyc+4Ffo5yHfHkiSC62vYtjRHhjIk3d7hBsuFTR/eSj8E0b0TpTf81MvnQ4eH0SJA17lamKoKIx4/Y1qQ24Rj3yS1sWJuopdVMqh4EEYunTiGyHuJtG6D7IpzJBNEeTx6kkXW9nfI9Ica1bPLpHrpNmTlyXowhCcJSRwS0gAtCEGIInYneJczWG0JBfvzoZDR6wIaBVX/3Wmsu5s3S07iE7AwwX7D7bviO1wN04SH7BwAvs43lrzD4gJGR9B+d43MlCOavjGTSIopuSYSCar6lKSLuSl1GwOlbDoJ0xchgn/h/174Kh6q1APVsGcOW96OABCE2x2XDg/23uc1DSsweh7fpTBCyw0yQ259Qa0I37yNgnyJOYQ5BdSEI6UKtY2jnoAu9lgQhwn888yxOANH/OD26TeaFo5m0sIYo+81Ugujp6YlO6DuvBPgY20LzDCtSLCduHi1bvXJ1wfJoCyYhCYI89PtOoIMShMQUO5CayameYuDpKIyeoaHbnrSPRViaaC2GsGiyvdf4h4UptiFzNHXDwYUgnjI0PkBySwn62biW++jBy7Z525sTt9tvA0FDcXd0JQ8FrMOGpf5yw8/1BhGQ9Jz9AHiFTDp4eLjvIefEpjNBiHVELcY620Yf2MKq2rc22QKo6gYhMf3zPC1WvVIDNlAtQQgWf1Mfe3cUOJBhfklEYHR27afjm6kEIZGpq1pAJg6Jvj/sDSLUG1ZQghD3mFhX6jo7eQU5YZV/dPJbY/kNBa43ALdlHPSW40IQrn5BAT8b12JSE1QX8+6yzbhn0e6aHnkAIPE28QiDNraQedngYPrFADeIwKTXbvUTXefwxZLmSZnOBCGRbeDvIR8klQWRvwHgV5rA2+DLq7FoqoogJECHJojQNsP2VeVyMq2qzWBf00Yy9c/IfDCK1WcqQcTiySOIaE1RFeN24nODKX8D2elWAMmpMuFuEE+ZUb17dGDNM0FEEYIgIDlleppjS9aRG1nKyOf67FD/iWHUPRJylm4OMoJgwvnZwX7h/1CPX9lbo59VUthBuHyzgUnPhcQWZzN9Yi2X/KYzQUjV9qidVVJYuVRFEBLrjqYnCAZ+QwYfLdMN28GfqQTRGU9dzITztsxFrjP2WmixROpMYmx5YA1DECGj7oYhiLk9S98xR9d/AZBw7BK/SYAXjgylhZl22a9sI3HRa3d1HbOjgdw9drNWJpyVHewXppaBf/I3PTplJNMnHiynflKCqHNWQckNSYxnjIE7CLyyNbrNgJtfUhAApBt8CNJzqS/NuzCdCUJg1dmV+nohSKkduldAWK2ZtHJ8fPsN1VonBZGJKFMVQdTiDaKq035jbxCvgHExTL4yyJVthhJE2UlRPE4PZ/q/EHRBiXKxeO98IhbGC/kH4TAEAQQ+NVp9hUxvWkaA5P7YWt62vKxU1QrcB1CZl78XjgxuIeAQALsVy8l8iqpRQ4SRo72sRzDOYrG8CSqjjyO0do+dzSeCeoaLBsJs8LI5hKk/3Qmi4AMhzKrf7SKvMRAegUkruYXuyw6sFuus5oYJVROE5ERmBej7x9atRz16/01CD+b7m6YEIcIgPAvwMyI/tobInePj2/0mDGvPRIII85F5CVZy8g5u5hrSyCE0QZSH3pCqmSRmza6P2lJVq+/KD1hAgoeEIAI/ZAbsVVosHl+yn0mmeHCeIjCP9oRvxl1k0g0TE9v/0u/b8Qu14TfuMGt3uhNE4ZC1iIhvCOA9LopvAmENs3bDHrsaD4chZj9cq7pByE2ygpkP+g3M+XcpkYS5QYRUXYQdn7O8IghbrJswN4g6E4RkHUnVTOUxnNy9r2cLQYg1LvwOIoZ5KZHld1Du8yL/cITD44+jpF88OLhGOL+V/RRBlGPSnkjuS0zCsMKZQsFre3qFgO8auZbLvUKbBN3fqiII0Um5zhp1Oc3MeIJAuPAObgKUxIOqGd5hTmEz9QYhxl1mmSRRMzn1wF6mt7OJIIpyt4jCNM8gQPhuOBKEua6OZ0njZcPr0g8GOQAGNaUVbYVZuzPhBmHDh9oTyQ8S09kEHB3wRiGqP8YapdxCmzSMIGJdqQMJ+NmUm3++56rc+oOeMNx029WauQYFz6tcLJE6j8SbxZbfn6KkJ9xOUEH7LHsARu0IOcxHNpMJoswU1/H4LHl09vRKrXbDCSr7Yrm3SsXkMk6K9SzZk3J8CDReDsbHfIL4bYKmHTqybvWv7e2pG4T/KhA+WU+/oO1DBh/JhF5HiBtZA4+ZUX1+UItAWQNV3yDkDkLVBwYLcsKYzgThDM9AwN9h8kHDw+ktMVj810RZiZjToxWoCfEUOypTjYVU+Yh2ygI4TiMVU/60WWaKazDzomIa2/IAlPLQGkXMOjuTH4NGDzDwjsK/Gcy0IJvpu6cCEftWmWYEUTJe4SzZ1vbSPgZFlhGjtxAHqnROkhubIghfsZcVsFT85vgnwOYyMBa53OSqOqxXTRBi1BKzrElmPj6bSYvIjzX5zTQVk+RmVbIJVQKKy4P+/2o8viCT+al4FKz6V3bzqeDtpiw09jQjCGvNOkJvCMfHot+ChIQ9PzKJ9yv84u1UI6jpTBCOeVFHInUwGCJO0JQaSvhQcC7SbU8H0EiCCBN8USanMis9n1t82FhMlawNt4gPYY2GnH3XhCDi8cUfMilyv8O6wdeZLAwQM40g5F7m/vFrvDCReSkH8QYOg7Nk8bumVXVpt8xpLJSZa8gbS1grpuKYy0JvFNRMk63RCYevhO/8XYw1pAH9vGRRIBoRT2dHAv5gAr/XTL7eeetsNEFYGyqbh4OpA8BcBobbopuWyQLhSTfURPIEYrre9reyd7NGEoQEP9ewHPL5OPx8akgQsa4l7cTGZ4iog4F9CwcXkVMkyE98e5cQcK6tcFUahpoQhLB074gnzwVZeaOnfjXKSW21N9MIwiXo1rMaGwdnMrf9Loi0naexWFdKREy9wPbvVd9KnOPo6Ei+C1EaAOO9xb+FOQ3L0s9OR4Kw1myi93IwryjM0zJjZeZxR36TQH4ZkoyHr2msHZ7JrBaZ3wL9OhOp45ghTBuL1kFS09qGE4TDux4egfBkEw3yRvMWEwSYSep17ZyPy3ftaSgS5gZRduAJeYOXGLFMC4KAWwxzQRK5KB9fTfIK1+xc09jMVSwsmfdppaTZkUjOA5NwPrNbjNQ8c59k4xRTCRpXSJxgnCQWzlGuQTcIMSln6A3hlCb+3eZJHjgOlewWHUbWhVhC4s3CnpzpV8Zk7mBnys5GE4Q8PhdOGB7st2XYc+dAZ/gWAGXhVBpJEDLPd3g4TNpnFo8vSZhk3mVLYSv+XDOCkCRgC3XQkEQUrkoFXasbhIXhvK7efTTwnfaQswVwRWCuf4fBq4J4IRcFkl804qGLLgTwrrIlON0JovOonUyt5X4C/qV07PywSVhuz2vg9nkVdIsCAxFqfMfS2wNWZDP9Pwx0PA1RSK4y9I0HQ7F4MkVkqRKmwoRb3U7DNwgxLKejpxVGhdkE0ccL4w6T20To278Hxr+WQk03tUbNL3rlChYHINLociZ8Poh8G00Qsgi/eecsXjAymF7vtbTkIb/LveUbSRCywI1B5uOR5S0UQXhl3HRJ4RxIXS8L+V1tds+aEoR1Kute8nGPfMIinsgNmmlcOz7+9idk3pU9PUfuMKG3xBi0nICFbiZzDGRN0PL1Q31PlF8DBbHQHSB8eupvIa9qIfZTz6KF09NqyTx0JtxPzP8zGY0+vGFg1UtFd3nxsUxOansx8QIQHy+LJhvmdFrBXFxyO8jzJovxjuv0bwX1VzQokYtylb4hFPuotr5L3Jt88wFPlcWxuNwChFgfZkRWZIdWi820JCSCmzOUl3wbTRBifi4n5xeZ6czc+Pb9sm9ZzE1jrHQkQxqThclvMEFAcqsRAn+SNPMLzrzgVhj7tldTRCxia71T8j15EoTE6GEjDF7oFs9NomoUy+Zxk3H6u3fDgxJPaersTn6KTe1Hjjzu1ai0C2ekCnYPvyqFnLw3ApTwKfsCQIWQHCw2FuHCX77BlDbim5h7OvhB2IZMsa7kmQQSiVz85uYHbeHvPGpGjaOrsW/268gjxaSo+gwDQyJ1Jog/4mv7Pk1vEGIi5R7TU8h4BvJzw89FFVggHCud6jqAXrRSc25JB+pojp6EZh45si79W1k/bwVB5FWPshuSNcKSVJgAi020E8DO5eOX58hoNEF4hvgppmxlmtgciHBvznsyb1uciyBvEMaYcUTh37xvEPHek0HsvOkXwvmQyJa3bk7LVqcWgx1aKnUNq0AkDsilP8JLxPjV5gO0ldXRGh/hE7Y0vcXyIv3rV0YG0/9dTZymmt8giqOzVCPPkUg2fpkL6/rtUc6/vw6mH5p69CI/F/JpRhCWHAvql6scaqKwGAid+E9YN79YTfrIoJ1aOOqaM9OZX/XXCbiKARHgL68Sm8YE4b5RVB4yxucW7YOfIAc62ulIZq/0FhFE3lAk/HqY2rAYuLEtyqfL1G2NJggxqMrklFe1Rg36LzCWByEIFytPm0jLoyzs35PcJapjVWk2Pr9Pb+rvuvD8nxjb4Vt+MbD8WqwbQRQ7ttQPBo4mFqZX5Yl3/AYorlZM+FZbBD/x0uPa25mGBGENr/Bhn7/50fdzPt6mMlgeI43PcF5//fGrrkQ4oufHydSONzTT1DA9YzHJ0JBYIImgkxcOD/V/vVL02tuP3Y5act+gPFGWvsnIG3XNu+ws/lYRhBhHYT2csvk2fBGAHQLiI/LTn7P7rtzvFkjurSAIMfYQsaVEtNoftEX5fLEPOSyT/MLdUCyRXE5MP5CtBbecK+ImwRH6WiHVa5A1JKZU032i7gRhX0CW13VUP5yJDgTwCQJ2LYktQuIKzs8DWpaIB4zJyAN2Z5qAi1FuEvsWvUHIxixM5cb1N7s00HwGi3zeuzuu49b1M28Lz/ciGllTz5C+QXC13kUMOpKZTwXR+6eutHmZrWfQVbmxHe4VJ5bpHM1VNleJ41RNEr6LvizrvglaAMbxwq4dwC42M9YXNueh/hMYPzZy+lqntZKbXN5KgiiOSaggW1tf/RQIS5nMOEBiDRc3sTERCZlYyxgwr9PHdxz1O8m+VQRRmI8V70gDfQmMgwDsWVAHiyCDf2bQGkOjqx9at/rp4vxDEoRVLf8mQ8JMXbyN2tVvnlnjCje3+QCLJFztDlW8UO/9FdB+Ds28bvedsXHaRHMNsrGoMgoBhYBCQCEwMxFo6A1iZkKkRq0QUAgoBGYnAoogZqfc1awVAgoBhYAvAoogfCFSBRQCCgGFwOxEQBHE7JS7mrVCQCGgEPBFQBGEL0SqgEJAIaAQmJ0IKIKYnXJXs1YIKAQUAr4IKILwhUgVUAgoBBQCsxMBRRCzU+5q1goBhYBCwBcBRRC+EKkCCgGFgEJgdiKgCGJ2yl3NWiGgEFAI+CKgCMIXIlVAIaAQUAjMTgQUQcxOuatZKwQUAgoBXwQUQfhCpAooBBQCCoHZiYAiiNkpdzVrhYBCQCHgi4AiCF+IVAGFgEJAITA7EVAEMTvlrmatEFAIKAR8EVAE4QuRKqAQUAgoBGYnAoogZqfc1awVAgoBhYAvAoogfCFSBRQCCgGFwOxEQBHE7JS7mrVCQCGgEPBFYFoSRCyR+jIxvjU1esLKkcH+E3xnowrMCgTau5JzNdADAHYsTPhlE3zQ6FB6Q7MC0NOTfNtEju4A4dPFOTJwRHao/85mnbOalxwByfr/U5T0xODgmudqjZkiiFojqtqrOwKKIPIQK4Ko+1Kblh0oglA3iGm5MKfLoBRBKIKYLmvxrRiHIghFEG/FupsxfSqCUAQxYxZrHQaqCEIRRB2WVfM0qQhCEUTzrObwM1EEoQgi/KqZRTUUQSiCmEXLvWyqiiAUQczm9e87d0UQiiB8F0kTF1AEoQiiiZd39VNTBKEIovpVNHNbUAShCGLmrt4GjFwRhCKIBiyzadtFUxLE/t1L9oiysQQmloLoAwC2AqAz8DuAb+TcnGtHR295TUilWke5uXNPamltffVTrLFwrusGsAuASEHiLxDwKAg3TrTynRt+nn7VbSX09PREJ/SdbgWQLJZh4PLsUP8Zfqtnbs/Sd8zR9V8A9FFb3euzQ/0n5k3Y3X+SuiWOYLGu1AICfjrVLuHc7GD/t8X/J5PJyNPPU4wYpznmPgbgzwxaY2h09UPrVj/tN4da/12sgYhpJAlYDKL3g/H2Qh+vA3gCzD/TI5Fr/cZWLUHUan0EwWfevKPeHmmJHklEvQyItbBzoZ4lD4DuN8m8ZnQw/XuvdVGpo1ysu/eDMHktAfvYxjvGzJ/NZtL9bn3OPTC5fesELQDjeMe4DQDPE7CRmfuMnL52/frbXwqCRY3KUKxnyZ7QjUUa6FAG9nV83xWNT4JvifOZkKM2J7qcQMcgj+W2xf0E4F8x6Krc2A73bthwTa6SeVr969p8wPwsiObZvo1nQLhLM/n7mUz6D6LtpiKI9vZjt4tEcxcx4VQAUQ/wXgTocyNDfT+LJVLnVOJJ3dGR3AoaTgTRVwG8M4Cgxhj4Eeda/qNITs46HfHek0H8wy3/zqMRtMwfGrr1Za/2JUIU32KwuvFkl0Z0L4BtCmfF4ck2fKZIZm4EMa+rdx8NvJKAmM/cDSL6CevmF0dG0v8vAE5VFRHE0ML8HWZe7LMGRD86QP0wzC+5ja1SgqjH+nADRqx7asl9g4AvFA5DXhgaAA9yNPLZ7MDqP8sKVkIQnZ2L9mQtejfym2jx50kOVj8GfQOMkwOM25IXMa4y9JYL3L6hqhbPlsrU2Z1KsMnfBWi/EG0GGp8bQbz55jv+PqftlTNAEHtKkRTcup/aw/wOgcUGxIHumecotTkqwGU+e5YOpkthml8zI9jXEUlgZnpSi9MLmSxOKR8OKFBr8WpEezHwX1N1AoTa2L8nuUuLTjcycFDAvuzFHmONUtl1feIUV/KreDMqIxaAgL/D5IOGh9OPeI3ReYMixjeHM/3nF+vICEID/46ZbrKFn/CFgYHfaKa+cHh4zV98C1dWgDq6eo8G+LoAH1dJDyxuFBodWSuZ1Gt9yGDp7Ex2sEY/AbBbSNg2MXBsdqj/5856YQnClRyIv5gdTK+UbWAdHcl/QoT6ABwYctziSuwqr7BtOcuLG9+crV/5Jhhn2jQBoZr1W+sygoBhHI5I5EIAYgMP+tMZfG52KH2pH0mIPsd1upyA44POixjXGuCVGpHQIBRDzcw8gnBZoAJkcVp6HNCy4n8YvB8BH7KdLF8GYxiEw4IShEdfYld+CYx1AL3I4O0I6AKwu0Taz2qsLcxkVj9s/5u4as8Zx92bVQGdxX9npuOymb5bPFYMxeKpG4lwnLOMX13JQp1k4DD7puEkCCL8lNm6Nexk6+8ZBoYI9BrA7wSh23Zt3QIt49o5LZtOHRgY0IN+AUHLdSZSi5khSEuoE+0/S7VIIIG1AeKPgPExSbnHzKg+f3RgzTP2ymFJu57rw4lFRyI5D0wiPpJdFvl1T3gKQAZMEwTem/M3PeepdBM07dCRdat/bW87DEEIMozqWAVQj62NMfYgB2sT3urVKwA+yTGnMTA/DtJ+VZBVK4A4GHuXb2o8MNmGI73UtkHXjq0cdSSSZ4NhmZgIAAAgAElEQVTpkrL+rG+b/wBoj1nliVvB3A7Qe2U3VbG5uq11Cb5/A/hRgObbMQThETD9VoxFsncVi/rGBvPAW7QhDgoPim9Xtk4K37tQne9Q6HBmEYSl49361ZuYude+IAh4QNf45PXr0uJDmfrl9Xst/0mwrrXFt4ItBTxuEHndnXVaO8Sx+B5jwinZwf5hJ5O3J5L7EtN1ElWMdEPqjKcuZsJ5UwQBeL4lyN4fbHU93zAKt65fANg1X4cfnYxGD9gwsOrvxTacBGGfNwNZpsjJo4OrxCKeeusoXGWXb15U33dsSpvIpAOGh/vyH1mNfh3dSz4O0xRqMvtGaV339VzuQqfeurPzqJ0QmXM5Mx9dsgYI3x8Z7D/LPpcwBNGI9VGErL1n0e6aHr3HcWM2mLHWjPA5znVfUOd8DYx/LdnQmO8Qb3UjI2nxTmH9ghKE9XYwTmnHTVq0c87IUP8P3E61sa4lPQRTqKOKZG4wcElblL81MJD+h3NZzOtO7h016WpHP0J1edzwYN/qGi0jmb5dMMGTJpunvHs3PJhOp8WbQ8lPqPe0Fv0rAJ/tIArXtS7D19bo6wRcPCfKVzqxsNSnpnldmeaC6IqRwb7T3fDuTKSOY8YNjv3uWYBP3H1XPGCfV34/faWXGZe7aAhmFkG0x5MHaUR32E+Egr3Z5NPti94hV6+Tgms011g8dQoRrnAA3T/Zxid5nWRcr62SDSnWlTqQgJ8BmOO2advn0l72hlCyhXu+Q8TivccS8c1TNSQLzY0gAmAM2amebY/ctfiw84/7O68EWDzoFX/+D6PyU2zZRx2GIBqxPooT7OxKfZ2Br9nmbID5gt13w3dkG1mhnGzdTwK8cGQoLQg2MEFUSg6ig1hX6jICxIaW//lscKJI4Xa9tvSmwne1RrdJDgzcMF6DtUQdid7LwbzC1tZfYfABIyPpP/q0T7FEcjkxXWvfG9xu8B4EIW50R42sWy0OmtKfFHfCH6Fzz8hI+m/OSuIwZGot9xPwL7a/bYTBC73m5XE7nTkEIdscxKnWnMwd7mftIDbtlq1euZqAz5aA6nKDkKp/AvZlLXD5TedZjY2DM5nbflccQ0dH8l2I0gAY4uoqfm+YzIeOZtJDshUTS6TOJMb3Cn/bBLB4vBJqNPF7jjU6QKZbF59lrCslbjbF+RvMvCibSU9ZLBU+5hIrJvFveR1r7uDh4ds3eX041kNtBLfZr87MuDmb6Rd6UE/rqqAffGdn74dZY3EL2nJ7YP7KSCYtQrh79mFZu5nmfQA+OLVXARcOD/V/vfj/QQmiUetDjKu9fenOWosh3g6m3tsYuD43tsPJfpYt8s2Jbm2NvrC8qPrzu0EUrGB+APAym5x8bw6i7EcPXrbN296cuN1+Cg4aKbajK3koYB0GrcMTA09HYfQMDd32ZND14rrxSiwBAawYGeq/MkjbXV3H7Gggdw9A7cXyTDg/O9j/TWd9F4IQt78V2Uy/zUhF3rMTBwCvkEkHDw/3PVRGEIneJczWIbCoLXlNY+3wTGb1oN+8YvFkLxHduOWwatWYOQTR1bX4PToiAwTsUZhsqGundHNxIYjykz3KTl5+gMv6c56oZeauYDplJNN3dflCc5jGMh7cvH/cA8J3phYp0+Jspm+Ns65kk3nK0PgAp2pCdoMIcwtwqszAeLC1hRfKVAl++LkQZGk+D+D3uqYd4me+WmzLeRIXqsl/bN161KP33/SGtRkHzAfRqPVhkXY8eQQRCZkWP3pfPbQdu07HpiEefTkX6R4dXfWCKOdFEC767FCPpWW5Jvzf2azh59VqkQcAEm8TjzBoYwuZlw0Opl+sZO3Y64hHc4rQFxn4BICPgWBE2DgkDPl0JFI3gCFUq/mfy14iJQiPW4BzbpJ9TxqOXbqXoPQw4IWb7IA3owjC+aGEPVFIAXQRatlGF9CM1C4AucDKr8llqh8g3RrddIzzcdd52xB+E8yctputuvlSSDY0aR8SgvC80TgXXNlcakgQPT0ntE3ob6QBOnzLR+mtjy0jyniyi4huJUDchh4mogHWzTuK6smgBNHQ9eFU0YDvgYHFHirVkmmLDcaAJtSYBogeJiDDOt9WNPV1Iwhhey95XA5MDsVt02lUUU+rpGqJI0z9qgjC5RuX9S+9rUgSOkm0EfAzXPH9fmfSDULyUYbWSTpUNFLWd7kWB3JicwJe7utQfmUrezx2OV043h8sFRG3GL/S9Og6wLL8EPdw6Ym9/GQvv6VICOIpM6p3O6193D6ksvo1JAhxC6IWY53dMYtdbkxhPnR72SAE0cj1Ie3LRZVR6ZxlBEGEo8H4CAMXlLzBMb46kum/yE+d53WDKfxN+AndsdmSZmVrdJuBGr0rVApBRfWqIQg3dZRsIH4qwGIdyfukl8pZOufOzuTHoNEDDLyjUGDGqJjKzDvFw+lwpl84CwX+xeK984lYmArmr+uSG4TUUshF7ePXsURoZeqBIOanoh+HD4Ml/Dfb5vzVoeMtWxSS9l0XTpkfhEMd4TffuhJEufrHVRfrN063vwchiEauj0Ri0a46R4X++J+LY641KbroyNeLpzSn5V/QNz87vgVLwrs8nCzzpsmMPo7Q2j12Np/weHivVLS1qEfx+JJdWTM/zYxjAXyyxHw6hIqJCWdlB/uFP4PvLyhBxOK9i4j4tql1EvLbFfUkh7CZQRBSkCqwkCnbACRClX6UFeboDbLhCMGUmbs65lamXrGdzB11yx6fy24oHqf6ajf4aut7fS2dnb37s8b3F220gzoH+n6BtgJB5NXI9VEwb91yQwQMZlqQzfQJk9ea/HzMMMv7CGgUYK8Yjy/ZzyRTPDgHcfAT4VHuIpNumJjY/pd+D/E1AcHWiMDjTV3bLUK8L8DvY6ZPbLY03B/Anp7e+mEIIsR+EpggnHHmKri9u3l9T/uc1E1EENIHJom5a4n6TPb+UIzb5LwVlXlHO8xbvR6dq93gq63v9bFL1F81P91MA4IoWR9BxlPtBulDEMIXYALA1rZ+XmZo87NDq0fD9G3FyzLMS4lwZFDvXgDCWurHUdIvrscmVRg/tSeSHySms4lwhMzpM9A8FUEEgqlYiEKV9incRAQhffR1qi2cD/AOAim5JTitHOyWOZKHcm8zWkewvrBWSIogwq16CQGUyOctJgjhoX0pSLsVpnmX4/R/X2uUj67EOi0fWNE8gwBhNuv0CncD8FnSeNnwurSw3KvZr0LSEv0LIwexx22Jy6YIIpRcFEGEMJv081OIJVLnEePiggRK3hCcD5l2cim3bPAO6lftBl9tfa8V5nzPmQ0qJplpd4NUTBY5TL65w3lCxVMWBdlSdQWz4/eQqRU9lXJ8CDRe7hISxV5dGiok1K5kK5z3yOefbI74/B6fNl7Y7APyJwINmUwDuRbtIRF9oKpHaqVisti1pr+gAgmzydT7kdqpOvLa1JwmokVVkdf7Q3Gu5e8QeT21s3+/kOLVbvDV1veUXRM+Uvutj7fskZpx6eT4DucW9f8uD81ljp/VfPDC56Kt7aV9DIosI0YvCHuVtRfACzvIGOSe2lbNvzFwM6Ddm4vSYxsGVolw41IHzKD7UdA3BLdxB61fCxWsxKS25mrcuqiYRKOOU7SrSafXAikPN4GyUBsye/tKLKYKY7Z7PluRKe1OSvaxuj0m67q5k91BULbJu71DOIjD19mv2g2+2vqeBJGPR2R/sBV23lLHQK+PbVKnNQy8n4A/mMDvozAuKzpIBVHpNHJ9xONHbGtSm7C6ExYz1i+MiWSxTqwr9WMCDgb4Lwx6BNBuLL4hBN2ApGFuiPom3tx+WR0ekqkjkToYDOHZO6WG8vp+ghDD1IGq3ONY3Jh+BJ3PCupfMt0IwmnEASCUQ6XARnJjnUEE4TRR9Q4tIVsvznATrt6PjXSEKg5U4sloqZJg8rts8ZqkITLKBct3mbno57UWfc1UtNgA3pvVbvDV1vf6yGvhfyD5ACrS+TdyfXTGUz9iwue3YBMuJpHU0crmzRyUIMR71qS+8+UMFvlXir9JZj4+m0mLUN5lP+vQw+bhYOoQZrMMDLdFNy0LGuE3lkieQEzX2xoOvek5B+WSrCtQyJ5iWzLiDuNJHTTciOgvqHxkfkJuURncvjOJ1/7MIQipp2AIU1cRJTJikojjk3cqE78QoTaYaaksjIUb2LLEPn5hK5y6XuEJqYH3tUV8lfowSDbPp0A4C4wfb4nS6O92X+0GX219v1OgJGjdY2YucmAxbIRv/bKTY2lE2yA3CNGHLNRGvdaHM1SGeCANEyW3kENCBOfbroBPyRoKugFZ8+5Zshd0874SZ0WRryGqHSJLSFS24QQ4pNhlGFQefnK3/93FMzmUI2yYsD1h8JXNI0T98gNwCFNXl0CYM4cgxHZeVQTGrpQI+y08Q7f8Ghusz/fDdj7EMrCaGNtO5bDwELhTnUSEe5lxRHGyQdzuq93gq63v96FLNjsgoF2+PCZOaaiOoBtSnYL1SdeH7GATPFhfT3Qyt9NVjhtISaiOEBuQJR5ZOGkCXTUn+sLpzpuBPH4aThge7N8SVdhD6JITbSjPflnTMkfHMCpkt5QDb/UNQsxVEtjPIAqGdzy+JGGSZa1WPEiIJmcUQSAeX/whkyLCWcrmcMMDehRLHxpIP++y1igWT6aIrKtqaYIZj3wQsnDOTLg718rHVBLu2+0jso9ZsniFOZ1ID2qlCPV6ZC7zEi8FI5DbfbUbfLX1/QjCLdw3EZYND/ZPeZE627FyVjyLc0AkwkS4Br0LShDWaVoSDr5e60Ma7pv4KyOD6f/2CHshW/eTzptOWIIopFddBaKFNpxFyPUlzujALvLaBOIFI4Np4a3t+nN5SJbGEPNbN/a/u6gqXW9B9rrW3KP0PbCV7rUkv4xb5OKw+DrnEqa+i2xcswkW++roSL4XEStyrj2F7MwjiPwtQpYFih8nDacNr0v/0v7BCHDHdfq3ws2hPG91RQmD5H0JNOPx5PsNoqsI+JRD0BvJ1D8TIAVn+TVxS0PS94fin2VRH7dUDaa3rnaDr7Z+kA9dfkiAyE72n60tuMJpmy8scCKtLd8FW1n4tnzUdUsYVPv14RLnXyTeudqczH3VGe7edd1XkTDILptY15J2gim8uYupKV3DwrucTF9kpjNz49v3yx64ReItjbHSkSN6zGReOJpJPxBknXiVkRCu2DYeNgnLRwfTG511CzleDgPzJQB9QNq2y+0+zAYvazdsfZfcDq+D+BvQcYX9ET6f7EukUiaRulfm5T6zbhACQJ+UelvSYbqnm9wiB5+c1DVKKRnoxFQclORaXfyT5y1AdjIqVvR7+5jqexo7ytk/Ho+Uo2NBUjcKZ8KJNk46b4JhbhBiPI1cHx5JXYKmWZUeUsJuQAU5iINMmcqWgYuyQ/1fddxqhEWSOHWL7HbO3+sA/3YqtadIYQuIFLw7lxelaybHtl9RC4upwolZvEe+29GPAcbTTMjmU+oiAjY/AbJIwa59EB7mTxRiZBWSfeF/NR5fkMn8VIQKmfpViG9V9T2+D5HBLsugp7zSBduGP/MIQgzeAl3XnElM/A4WApyrGNb1MH/y8SEIUaQQl/5WgBJ+HZT/nR+HFjnGmQfYqx3Xm0CAB6dy6xqrp8CWH9XeAKqtHwbfzkTyMGYSeamnTrHB6rurJMMSRKPXRwjnrhIovEJsV7qBudxqpAlqKvxei3MQN6Ub26J8eiWe225rohDcTqTmdObu9ltGrzPR+SIHuMZ8ny3yqfR9pFJ8i4OosH4x651IBevM2+42P/FesZqZ956yfJxpbxD2mRVyIac2bxCXlbi8S6fPj5OpHW9opqmBxBU1MEGI5vK6PToNBLEwigm9vRaSIKNvz4ny98MuamneA5/3h+JA5O8Q3t7T9klUu8FXW9/vy3T+PWSoBOk1295mJQTR6PUh8iJHormLmCDMTcvVpqUgWbm6Db3lgtHRW8SJuOxX4QZktVO47Yo80bZNiAcm23Ck83ZW+F5P2Txm8Q4U5BsSXYi0mufsviv31yPCa0ci+UkwfuiqNnJgycBNhqZ9TSSokrwXSlXA1eAruq+mvlDVEZPIJBnz+bYs0su1mjfOGaM1IHy6UH5m3iDskxUAThp0JDOfCqL3TwXbIrwE5vUMukokPxFX0yDRXL2ALJyE5gOcLFyFdynotcWVUzySD29OAZpujZr3hCWGko3aGZnRCm1QniLUOVbZ7cMZvM9rftVu8NXWD0sQxfJWOlHD+DyIDgOwj+1UaKkwCNSnT+Zu8UtNWylBlJz2dK3u60P0J95WonNajmVwb8m6FwHumB/frI29U49ErvXLtlfNBuSSytdg8JezQ+liatwSsVpWQK2vfgqEpUxmHKDdbQQzBvAzxFrGgHmdPr7jaC1USl7rqjCHQwuxoYR6q/hNi2ovEPAoM/cZOX2tY/2UWVWSxHGwGnyrJQhRXxDz089TjBinAei2zc9aJ0y42ZzUV4q5ScY68wmi0k1F1VMIKAQUAgqBtwaBmsdiemumoXpVCCgEFAIKgVojoAii1oiq9hQCCgGFQJMgoAiiSQSppqEQUAgoBGqNgCKIWiOq2lMIKAQUAk2CgCKIJhGkmoZCQCGgEKg1Aoogao2oak8hoBBQCDQJAoogmkSQahoKAYWAQqDWCCiCqDWiqj2FgEJAIdAkCCiCaBJBqmkoBBQCCoFaI6AIotaIqvYUAgoBhUCTIKAIokkEqaahEFAIKARqjYAiiFojqtpTCCgEFAJNgoAiiCYRpJqGQkAhoBCoNQKKIGqNqGpPIaAQUAg0CQKKIJpEkGoaCgGFgEKg1ggogqg1oqo9hYBCQCHQJAgogmgSQappKAQUAgqBWiOgCKLWiKr2FAIKAYVAkyCgCKJJBKmmoRBQCCgEao2AIohaI6raUwgoBBQCTYKAIogmEaSahkJAIaAQqDUCiiBqjahqTyGgEFAINAkCiiCaRJBqGgoBhYBCoNYIKIKoNaKqPYWAQkAh0CQIKIJoEkE26zQ6EqkbwFhenB8Tzs0O9n+7Gea7f/eSPaIGf4aIFzOwL4BdAEQKc9MBPEvAH5j4ASbtrj12Np9Ip9NGmLknEot21Tk6COCfnfUYeDoKo2do6LYnw7TpVrYj3nsyiH9Y9nfGg60tvHBgIP0P5996epJvm8jRHSB8eupvhJUjg/0nVDumWCL1ZWJ8y9bOyyb4oNGh9IZq254t9RVBzBZJz9B5NiFBUGd3KsEmfxeg/UKK5W8AvgqDV42MpMeC1PUiCFGfmY7LZvpuCdKWV5menhPaJvQ30gAdrgiiWjSnT31FENNHFmokEgSaiSA6OpL/RFHtB8x8tO2mUIncHzMpcuzo4KpH/Sr7EQSAdGt00zEDAwPixlLxL9bd+0Ey+RcAdlUEUTGM066iIohpJxI1IDsCzUIQ+/ckd4nqWAVQT40kvAmadtTIutXDXu35EUSt1Eyu6iXrmqJUTDWSecObUQTRcMhVh2EQaAaCmDv3pJY5W716BcAnOeauM+F+MH5IBv9qcnLHTRs2XJMTZZLJZOT5542dchzdD4RTiHEwgKi9PgNPIKodkh1Y/Wc3TP0IAoBBRMcND/atDiMXe1lP9ZIiiEphnRb1FEFMCzGoQbgh0AwE0R5PHqQR3QFgq+I8xebOFEkGUROJOrGeJXuRbqwGqN2OFTGundOy6VQ3FZGUIAgvgfH2Le3Qra3RF5ZXqmbq7Oz9MGuWemmnQpuTAOZMta9uEDP2A1cEMWNFNzsGPtMJoqenJzqh77wS4GNsEvsrDD5gZCT9xzBSTCSS79QZdzpIYhOZdMDwcN9jsrZkBEGEW5gtq6Hie8FThsYHrF+XfirMeIplO7pSpwG4ovD/LwMYAHCUIohK0JxedRRBTC95qNE4EJjpBNHevnRnajHWEbCPbWorRob6r6xE2NLbiIfpr4wgGPg6gfezWRxVrGbq6EhuhQhuA2i+NR/Gg0w0QuDzFUFUIuHpVUcRxPSShxpNsxFEV3KuBnoAwI6Fqb1hMh86mkkPVSLsuQcmt58zjrsB6izWZ8bN2Uz/8db27PhJCYJwLpn0aqnPQmVqJol6aQUTtinxP1AqpkpEPS3qKIKoUAziQ22doAVgHM/ARwHsXGhKODI9T8BGZu4zcvra9etvf6nCbmA5EunafMD8LECfsPUjzBL/QkyDpsbX7rELZ4M6UZVtGo4PWDhwRUw+mcCLAOxV0J1b8wKwjglXhunPMXea19X7Pg38OQIvBOi9hcfXMRAegclXwMTtRTv/Sm8QjZKPn1zbywniFTLp4OHhvof86rr9PdaVuoyAL+TlQUyMdXNatjp1YOCG8aAEAaI7HWapFamZHOolS91lRvgziiAqle70qqcIIqQ8rA3boG+AcbL90dGjGZ0YVxl6ywWjo7e8FrQ76+oexQow/QeAbf3r8eOk4bThdelfyk6S9vpuBCHKjOt0MQFfdFrMlPfPj5OpHR9mo7OIxzAvJcKR3n4AW9oOSxCNko+/PPIlOjuTH4NGDzDwDludilVMQfstlnO7QbRFtr7c4dhmMPOibCb906B9lKmXwPfAwGKO0pcUQQRFcXqXUwQRQj7C0QkR6gNwYIhqVlHLJFGjI7Pr+n7vV9fNYsWvnjBZBOHSyTd3OK9oLimrIyMImMZpiGg3OK1kfPp8nZlOyGb61viNrbM7+Wk26SYAu/mVLfzdsvMHmycFDbXRKPkEHL9VrKvrmB0N5O5x4LqRTP0zw8Nr/hKmrUrKuhGECFfi9F1g4PrsUP+JfgeM4jhk6iXxtlIW4kKpmCoR3bSoowgioBg8bNnHwPw4SPtVfoPmVgBxMPYuPyXzwGQbjtzw8/Srbt12di7ak7Xo3cjH5rH/DDCeZkKWQK8xeDsCugDsXt4WXTM5tv0KN5JwbhoM/IaACQDzbG29TkCWQU95zwm/1zXtkIfWrX7abU4dieQ8MN1pM4MsFtUB3gBov5H1IUiV8iaZHcUKbrGYGiWfgMulpFhnV+rrDHyttC4/bjJOf/dueDCoarCSvr0Iosz7mfBH6NwzMpIWIT18fzL1krCmUgThC92MKaAIIqCoYl1Legim2LiLtuwGA5e0RflbsiBk87qTe0dNupqBg2xdeFqLWHrzcUo76zBjrRnhcyRmiNSeWPoRjfXrHHF9DAZ/OTuU/p5sej7OUy8y05m58e37nQTTnlj6UY0NEbfnw452XVUm8+Yd9XZtTstdBMTsOIBwszGR+zfn+4zALWKSGLdQQ5X93AiiEfIJuFTKiokbIXTzPoclU7Hc3xi4GRqtrCQYn9+YvAhC4uAWWM3kpl4Sb0eKIPykMnP+rggioKwKD4OnTxUnumJksE/8f5nlSLFMweJkbWl4Bb6rNbpNUvagKIk+aYD5gt13w3e8TpmFh+wfALzMNh1XW3sPgtgIgxd62edLbzgeKoRYPHUKkWUjX4xS6jsnj9sA3AnCeritq3wCLhVpsVi8dxER3+DznpR/qAffrpnm3ePjb3/CS1UYZDxeBCHqV6pmKlMvMZ0ykum7WrSpCCKIZGZGGUUQAeT00YOXbfO2Nydut5/sGTgiO9Qv1Caev46u5KGA5UVreZa6xb4R9vJai/Hz0tO5t6rI3rHspE7AhcND/V93DtCFIMZM5oWjmbQwyfSZU4ljlCj7lBnVu0cH1jxjrygzySSivok3t1/mt/HJyRVSgmiEfPwwCfL3Ct5hdAZ+pzF+Zkbo5kpuGH4EUamayaFeeo41OqD4vqYIIshqmBllFEEEkJMsZn3QMMntPYt21/TIAwCJt4lHGLSxhczLBgfTL9q77kz0LmHmm20n7dDetuVt8KOT0egBGwZW/d3el5QgPG4BToja48kujeheANsU/iaNsy8p9xqZfOjwcHokAOxwkqtFsBKnsEbIJ8h4g5Rpbz92O2rJfaNgpjoVeiNIXQCvMHBdC+nfHRxc81yQOn4EIVEzTQK8cGQoLeQr/ZXjXXorVgQRRDIzo4wiiGByolg8dSMRjisWD2OVFKALinWlriPgs7b2Q1mUiHpdXYvfoyMyQMAehXakTlnS8AuMbw5n+rd4v3oMWtKPlCDKVWY8GkHL/KGhW0U4Bt+fzALIRcVUb/n4jjVsAXHji7TMORpkfgmgD4QM/y1uFj9oi/L5svcvv8OAE8OySKw+6tMy3w6bekn0rQgi7GqYvuUVQQSUjeSEL2qOMXAHgVe2RrcZkL0rBGneZSM8KzvYf2mQ+sUy8fgR25rUJtRen5yq5/h4xb9LT5VMi4OYq7rULyOIfAyinW4VgUltpHd5dqj/jDBz6oynfsSEz0+14RJWop7yCTPeSsrm35DQRaz1MrEwoZZYppW3bFmfGXy015uR3w3C2tDLczk8ZuYiB46OrnpBNh8HAZSolxRBVLICpm8dRRABZeNijWOvbemLidHHEVobRl8sOZGLdu8DKJSdPINbCDjE7mtAkpuBZNMwmGlBNtN3TxA4JPXLCEJGVkwITXqxROpMYkxZY7k9UtdTPkEwqWUZQRjjOsWIsBgMkaHNizA8fSqCEEQYNZNEnVeWcEjdIGq5Gt7athRBhMA/Hl+yn0mmeHAO4uz1OoC7yKQbJia2/6XXo6wkHEOIUfkUleT3DbLBe7UapL5LkLhAD/v2vmNdqQUETHn3euWkrpd8aieMylqyDBjm6MeDSdy+3uVsRTi45cZ2OFm2xoIQhGgvqJrJuVZlb3GKICqT83SspQgipFSCh4soaVjkD/5xlPSLZY+LiiDchRCGIEQr9ZBPyCVSt+KF8CvnFMKv2JMHuT7+ByWIcjWT3MDBvvm7WeQpgqjbEmh4w4ogKoQ8H9DOPIMA4XtQTJTi19qzpPGy4XXpB+0FFUHUjiCKLdVSPn5CbfDfqSORPBtMl9gftmWqRDGuoAQhUzMxcFh2qF+YXlu/IOolUa5qgkaMbXYAABzvSURBVKhVnuxE6sslMaGAP0VJTwS1AGuwXKdld4ogqhcLxXqW7Ek5PgQaLwfjYz5B/ESMoUNH1q3+dbFrCUFIrYKqH2q+hSAqIq++gtR/K1RMLmOuWj6V4h6L984nMi8AtF0AFibBj2s8viCT+alQP1b8k/mXAHIHzKAEIQYjcZorMSoIol4KSxAAyizQvHJYhwHNaeAARRBh4LPKKoIIDZl3BeEF3Nb20j4GRZYRoxdkhcsu/TnMCCURP0M9GoedQpANvlqCkPomeCS2cesvFu89lsjyD7F+Xm8QQXCoRD5B2pWVkWy4T0dh9AwN3fZkpW0W65V59rv4sYQhCD81k+Nm4BoePMwNwiKUfPjyKS94Kx1rLtLtZkUVBDtpnuwQvj5B+pgNZRRB1FfK1JFIHQzGjXY1lPMDkGUdq3YjrHaDr0F9mW9CaN+OznjqYiacVyuCcFJ1EPlUukRiXakDCfiZLT9z4FhHfn3GEqnziHHxVLkaEISXmqmc8N0TDIUliLIHcqDqG3RHR/JdiNIAGCLfSP5w4ZFYyQ/v2fp3RRABJG+drNg8HEwiquhcBobbopuWBU3yHkskTyCm621dlXwA0tNOIbZ+MXFOgGGiQDQiKNyOBPzBBH6vmXz98HD6EXv9RtwgrJOhw0QVkD98us0t6C2k3vIJgr2sTCHw4C8AK7JvcZe6AyaWhpGrs22pj4nL5hfmBiH6cW7WxbcNh3rJM+hkWIKQeNyLoVSVM0PmhQ+JT1Clsp0t9RRBBJB0LJ48gohEzoN8wLmQYZGDvDE4YtuIXl7TWDs8k1k9GGCIVpHOROo4ZoiAcMXAeJPOh0ZRrlEEIZn3JDMtDeqQ19mZ7GDNCumx3dT+KlFTNUI+QWVgL5ffyHdeCfAxtn+fZObjs5m0yCtS0S8eX/whkyL3l5hbu2x+YQlComayvN910r9ge/D1zD4XliDm9ix9xxxd/wVAIjOj9RNOgJqZO3h4+PZNYUESasTWrV+9iZl7bXWrvpWEHUczlFcEEUCKEkc2gwgnDA/2T+nGvZop28Akwe1kHz0BD0y0cdIrf0Sx33zMp6hwdLOH4v6VMZk72BlSu1EEUR4S2vrws+Zk7nC/NKwuH7n0DaIR8gmwTKRF4vElCZPMu+wkB+BlZj4hm0kLr3fXaMCyBl1Cwrtu2GEJQnKbfYOZkgQ+G4RP58fknb86LEFYhxtpzozgwSrtWHUmUouZIZJT2WJd5bPdVXNzq3QNzOR6iiACSM/lJLgJxAtGBtPrvZpwiUpa5n0qvrqOROp7YPxraXt0U2vU/KJXzB2xEZNGl9tDUojkRcxYkc30/9A5vkYRhOi3PZ48SCMrmq3tY/WeUzKZjDzzLM4B0UXOGEXyYH3Sk3qt5RNgpZQXEWtnUt/5cgaf6virTqAfRSj3jaBml+2J5L4aY6Uj94dgmIuyQ/1flZFNWIIQY5S8CdwurFwB7CjWFREdNzzYt9oNkEoIQnorynewVte0070SUhXHYX0HETq9kJzJHggxcKTiioTcxJUUQQQUrstJ0DW5jrU5yj9o18XqcgsQ3/3DjMiK7NBqQUYlJ07RBzGJQH/2hDzCPM319tFIgnDL7SBuEkx84uhgeqNdBFYQu9aW74KtwIhFVdlUEbfH+0bIJ+BSKSvW2XnUThxpuQuM/SVt6CB+kEA3kcnrxsd3fK7oES1O85OTr+3MFDkQhFM2h43/uBMTv1tmJQQhic20ZdgB1KuVEIToINaVPItA35bIXeTJ6Adp17Rq4xsHBta+UhxQT8+RO0wYLfsw0yKCtWbKPM2Bym4ilcq7meopggguTZcTvtXA6wD/FtAeyzfH7xS3ZgA7lzfvvVg90nOKtw+RfnMdQC9aKTpNfFJqRgt6Epp55Mi69G9l02skQYj+XdQi4k8ih/ZTADJg0hm8HwEfAmD3Ei6Zgod1V0PkE3y5lJYUiZZMLXoHAf9SaRvOenk9vb7QK7d1JQQhN5oorOwAeasrJQhLrbjVq5cx+KSQ0W29IO2fbOOTgqhpayWXZmpHEUQIabpkbgvagkhRemNblE/3C9Hc0b3k4zD5JwC/J2jjtiPek9DoaLsjnrONRhOE6H//nuQuUR2rSrPr+c7uWWLcENTMtVHy8R21SwGRC0Kbk7vc7XYUol2hPlxrtPCKhwbSz3vVq4QgRHsSNZNF6My8KJtJT8XGkvVdKUFYh4m5J7XM2frlfwXThT4Op35w6WC6FKb5NfXu4AeV+98VQYTEztKPP0enbD7lCv34DgGriyTw5+y+K/cHTVBfQWIZnRhXGXrLBaOjt7wWctMIZeFRKcEUYgmtKMQS2tYbOx7kaOR46OZHggbrE+01Sj4B5S4rZuURJxiXEONgr9uSpLI4ZDykaTh3eF2/sG7zfeCulCCkaqYA6iUx5moIojjneDz5fgN0MREWhsUIwhCC+CSn+rIKmc3aqoogKhS9dR1uffVTICxlMuMAiZDMxYexMYCfIdYyBszr9PEdR/1SbLoNw1LPTNACMI5nYF8Au9iu3y9szkP9JzB+bOT0tX6WQcU+Kt3ga1U/H5o7upxAwvxzH1ueZpF/4EEmXLnHLpwVZBo2WF9xjI2ST4XLx6omZNsyTt2FEO3iDUmsIbta0gDwPAEbmek2PUJ3B3mstY+pUoKQqZlE1NjsUP+JfsRUC4KYkuMWjA4DuAtEu4DxdtscRSBMcYt6BMw/M/XoXdV4YFcjz2asqwiiGaWq5qQQUAgoBGqAgCKIGoComlAIKAQUAs2IgCKIZpSqmpNCQCGgEKgBAoogagCiakIhoBBQCDQjAoogmlGqak4KAYWAQqAGCCiCqAGIqgmFgEJAIdCMCCiCaEapqjkpBBQCCoEaIKAIogYgqiYUAgoBhUAzIqAIohmlquakEFAIKARqgIAiiBqAqJpQCCgEFALNiIAiiGaUqpqTQkAhoBCoAQKKIGoAompCIaAQUAg0IwKKIJpRqmpOCgGFgEKgBggogqgBiKoJhYBCQCHQjAgogmhGqao5KQQUAgqBGiCgCKIGIKomFAIKAYVAMyKgCKIZparmpBBQCCgEaoCAIogagKiaUAgoBBQCzYiAIohmlKqak0JAIaAQqAECiiBqAKJqQiGgEFAINCMCiiCaUapqTgoBhYBCoAYIKIKoAYiqCYWAQkAh0IwIKIJoRqmqOSkEFAIKgRogoAiiBiCqJhQCCgGFQDMioAiiGaWq5qQQUAgoBGqAgCKIGoComlAIKAQUAs2IgCKIZpSqmpNCQCGgEKgBAoogagCiakIhoBBQCDQjAoogmlGqak4KAYWAQqAGCCiCqAGIqgmFgEJAIdCMCCiCaIBUY12pBQT8tNgVE87NDvZ/W/x/MpmM/O15HMqM00A0D4y3W+UIL4F5PYOuyo3tcO+GDdfknEPt6Um+bdKgI5n5VIA+AmBbAAaA5xm4n4n/e3Qw/XsAXME0aV5X7/s08OcIOALAXgC2so3tDwTq0ydzt6xff/tLFbSPuXNPaom2vdwegXYiEycA7AkguqUP/JkY9+lEK9cP9f1f2HnUu/3CnCnWs2RP6MYiDXQoA/sC2AVApPB3Sx4EbGTmPiOnr60UL7FWnn6OPkXA5wB02/p5HeDfEtFVcyK8dmAg/Q/Rt9e6CyKvefOOenukJXokCJ8D0fun1iYwBuDPAN1vknlNpWusQfIJMlVVxgUBRRANWBpuH2pnZ+/+rJk3AvQBn2E8xhqlsuv6xGZv0UdHV+9hAP8YwDs96hoM3NgW5dOLm0aQ6bYnkvsS03UExAKU14lxlaG3XDA6estrAcpb44/Fkyki+i6AdwWpw0CWiU8cHUxvDFC+3u1bc+jsTiXY5O8CtF+AMRWLVIIXOhLJT4LxwwBr5UVm/lI2k+6PdaUOdzuYeI23vf3Y7SLR3EVMOHWKsD0qhJRNI+QfQhyqqBcCiiAasD5kBAGT/kjENxRO/UFGsZFM/TPDw2v+GutKnkmgbwX5eAsN/wQGHz8ykhYnP9efdZt5jk5j4L+mbgtBRpbnrCeh0dEj61b/2q+PZ57FOSC6yHbKDtrLGBN/MTuYXul2mxBzqGf7YqDi5Dtn61e+CcaZFczBmisDv9FMfeHw8Jq/+EyeOrpSXwTwnRAyMcB8ATR6HIzbi+3bb65ufeYPLVgN8HuCCqVQTqytc0aG+n/gddNrhHxCjlsV90BAEUQDloeTIIjwU2brdL5ToXsD4McBLZvfPHg/Aj7kJABiXGuCf05E19s2C/FhZsQGzeDtCPi0rd3i7AwiOm54sG+111roSCTPBtMlkk3PUmEA2mN5LuCPgPExyYa1CcQLRgbT6103oERqMTNuctQ1wHiaCVkC5W8h7n28zNDmZ4dWj8r66Kxz+9btzQ2nvFrwDzacWsHcDtB7ZWQu5DmnZdOpAwMDeki8RPHXSdyqQE8V5N4FYHdbO0K1tQHAvKAE0ZFIzgPTnZL1owP8RxCNgmkC4HeC0G1TOU2tMxB/ZWQw/d9uJNEA+TTgi549XSiCaICsnQTh6HKtofFZ69eln7L/e3ti6UeJjTQB+zg+ehNAC4AxAi5kgy+33wwKJ7QTQPQ/pZsw3wMDi91uES4f7ovMdGZufPt+5xuIeP+YyGEFiL7q2OwfM6P6/NGBNc84obV02nNa7gfwiS1/4wFDw+ec8xd/F32M6/RvBFxQusHSra3RF5Y7N9Z6ty/G1N6VnKuBHgCw45Y50JMmm6e8ezc8mE6nxcZc8hMqG61F/wrAZzuIYhOZdMDwcF+eeB2/ju4lH4dp3uvYsF8H8Teg4wqHLKmzO/kpNnGlmxrK6wbR2bloT9aidyP/hlL8WeowPZe70PluItbZX5/FpzXSfui4bYwx85JsJj315lZsrBHyacDnPKu6UATRAHG7EwRdMzm2/QrZA7QYVkdX8lCA7gAwxzHMMSIsGx7sv81l+NQRT54Lsm4Dxd9zrNEBtneMqT/s371kj6hp3gfgg7byG2HwwpGR9B+9IIp1pQ4k4Bb7JsbARdmhfkEcJY/jhbI/s83nMTMXOXB0dNULHn0Enku928/fHnovB/MK23j/CoMP8MPJendJJJcT07X2GxozHZfN9An8Sn5CjdW69as3MXOv7Q+bGDg2O9T/cze89u9J7hLVsQqgHmcZN4Lo6emJTuZ2uooJn7fVERv9Z8VbhpfKSNafeJMwJ3OHO0mlAfJpwNc8u7pQBNEAebsQhO/m2NV1zI4GcvcA1F4yTML3Rwb7z/L6cGPdvR8kk38BYNdC3UkGDpNtLh1dqdMAXGHfiPxURfbxdCZSxzFDvKcULXeeMjQ+wHkriCVSZxLje1N1CStHBvtP8BNBe/vSnbUWQ2yKHy6WlW2s9W5/bs/Sd8zR9V8A9FHbmFeMDPVf6TcH8XeZPJlwfnaw/5vO+p2dyQ7WSNwetiv8zWDGimym/4d+fXV0JN+LCAnZv9te1o0gOjt7P8yatVa2qDx9VEX2diX9SVWa9ZaPHy7q7+ERUAQRHrPQNWQEIdRDw0P9X/drrCORugGM5bZyL5vgg0aH0kK/7PqTbWYMHJEd6hc65qnf3AOT288Zx93CKGdq83W5Abh1JmsDTKeMZPqutteJJVJfJoZ4XC/+0q3RTcd46eALBcVDrSCgAwD+C4MeAbQbne8Q9W6/oyP5TxShL3JeRfYxEIwIG4cMDd32pJ8ci38vk6cLSXbGUxcz4bwt7fLwZBs+s+Hn6VeD9CUhfbgSRFfq6wx8zSZ/6Q3Aq99ORxsA39Ua3SY5MHDDeLFeveUTBBdVJhwCiiDC4VVRaQlBuJ7mnR1INorRCFrmDw3d+rLXYPJvBHQHyHq0tn4ygpDo1AMRkP84Ubb5x+K9i4jYrhbztUoKA3i92w8zFreyQQjC5aYx5TsTZBxdXYvfoyMyQMAeU/K3+d8U/026TiTl/Ppsjye7NLJuPNsU1trTURg9dvKcCfLxm+ds+7siiAZIvMzMFSj7eNyGUX7qKj+ZyeoGJYhYvPdYIr7ZRiJPcC7S7fMuUNZl+cfPj05GowdsGFj192Lhed3JvSOmpfrY29aAsOD6tbDxN/XoXWH7tQ+k3u3XYqkEIQiJevANk/nQ0Ux6KOgYPnrwsm3e9ubE7Qwc5EUQEiIxmGlBNtN3T9C+RLn2nkW7a3p0nU22ZYegmSCfMHOeDWUVQTRAypIbxJ+ipCcGB9c859d9GUEE1NsHJYjyGwqeZeA+ApV5bnuPlYUX9CE2opGRIMW6Uv9ZsEpya+4ZEO5ixm1t0a2H7CoKP6ysh+D6th9gCGVFKB5fsitr5qeZcSyAT5ZYfUnkKXnMfcqM6t0yyzCvAXXGUz+yPzzLVEzOkz+ASaEeArQpYg8yaWbemsjyuC++mUDyTjQd5RNkerO2jCKIBoi+nCA4kJpIDK3eBCF546gVIlJVlXivaJmgW4nxmQAdCfv7DWD8KGiIinq37zZmQchv6tpuEeJ9AX4fM32CgP1LwofIKssIolwVF/hAYe/CuXZkBOFjgh1ARO5FZP29VfKpaiKzuLIiiAYIv+wjZDzY2sILg4S/aDaCEHB3dCS3QhQrwPQfITzJhX9BlqH9W3ZotXDEc40vVe/2C0uG2hPJDxLT2dbJuRhDK+x6khGE8zE/xHqZ7gTRCPmHFYEq746AIogGrA5FEHKQLUc4A0cT4+yCc1fRTNZLKrqwAJsY2+Fbbv4jxcr1al/4jUQM81IiHBky1MYmKwyjPX7WLCSIesunAZ/0rOlCEUQDRD2jCCLgG0etYbNUD+PUTcBSEA72OZEbDP5ydii9xafCZ0C1aj/v3cw/CRCr6AWA/0SgIZNpINeiPSQe7AM9Ur+1KqaK1FnVrodayafacaj6pQgogmjAipjOBOF8yCTggX9s3XrUo/ff9EYDoHHrwnrYNWHMB+ELAM2VxDLydTT0GH9F7Rf8PdZKvJT/xsDNgHZvLkqPbRhYJcKfS1VgAQliPhELf5Upx8N6PVLH4r0lfRHwd5h80PBw+pEmlv9bOLWZ1bUiiAbIazoThMS79Y/QuWdkJP23BkATqIt8KBBDhEW3h4+oyBxT1mHQ9jsTvUuYLZPgLbkeCD+Czmf5Rcot9huEIDo7kx+DRg8w8I5CvdBmrj09J7RN6G+kATq82Lfs0VjSl8HMi2SxlAIJqw6FgsqnDl3P+iYVQTRgCUxrgsjHUrLHRxJmjgtHhtLC6Snwz/LcJVwI5udB9DAYD8HgHxc3TuGFjIgmNqtuwBSRbCMRmIcF9UJu7178Ps2M/NKeP8K+4dW7fRGvaELf6VaR42lqw3WJOeQGWjx+xLYmtYmbgTB1zf8kKj2XECuBQ3qIZoM6yknDhxBdMTLYd3qYBE0iXIep8d2bTZhFZNpHCPg/Yr4yk0n/VYyn3vIJvFBVwVAIKIIIBVdlhaczQcjiHBFR38Sb2y/zewQuoiGidGpzWu4qTTBU6tDX0ZF8F6I0AIYIfS1+oU6qfh6/9W5f6t0MXJ4d6j8j6KqQxDySEoRoL9aVuowAsUkXfuFCbUjiY7mF2qgmAOEUzUn8T0qCQ9ZbPkFloMqFQ0ARRDi8Kio9nQkiH6E09T0w/tU2Ob9osSU4xLqSZxFIpFDdonphPnkkk76uWFB2Ag9DRJINRjhiLc5m+taIPurdvuykLfI5DGf6vxBkUbhEZ3UlCFmwPiKcMDzYP+X17tZvwatZeEJPBTcUZd1iMbXHkwdpZEUNzqeUtX7ekYbtfcvDkvOa1ug2xxYdHestnyAyUGXCI6AIIjxmoWtMc4JAPL74QyZFRJ6G3WyT8w0tLcp2dC/phGmKrGXFSKCFbGm5g4eHbxdmnVM/uQ7fO8FMcbdyhi9nSbiSerYvDV0BPIGodkh2YPWfvRZF3i+DBAkLMikx5WXGzdlM//FOdY5buG9o2lEj61YPu/WXf0inawCknGXcCMIan4ZVIFpoq2MQcNHE2A4Xe90k86oj6gNwoP2AYTIvHM2kRd6Mhsk/9IepKvgioAjCF6LqC0x3ghAzlNwCxD+PMfAtzrVc6sw37eGM5nr7kKui4JqURgxAJNuhltwlBJxcsrlKQp7Xu/3yiKVihPywSVguy5VdSE16GJgvcc0l7eEE55YwiED/zoZ5vTNhUHti6Uc01q9zy5HtlTDIpS8xv7s0xtmZTPoPji/BI0GR/PZRb/lU/6WqFpwIKIJowJqYCQRhbWZbvXoFwCdJINEZ+B2BHhZvBwUfgLgk5ajBwH+3RTdd4BbCW67OsHosTWtJ7JWus5ifuyyfcz3bd8uzYGFSmjI1AjY/AaIPlKVWFbcO4J9tSZP+V+PxBZnMT1+XLcVYPHUKkZWrw+lEGCTlaDH7oNW0X05ql6yCxWE9w8CQSAnrkdpWqMxGotAXu8UZq6d8GvApz7ouFEE0QOQzgSAEDIUT7zfBODOkh7CoboBw6eSbO5zn87hdzKwmktvbdN6BBfEYa5SSZcYrtFDX9gtRa0Vuim0Djzhf8HUmOl/kD9eY77OZsPoF4qNYV/JMAok8GtGAfY6B8Q0QH2w3DfYjCNF2ZyJ5GDOJnOG2lKoBewWPmlHjaJ+ggnWVT9CRqnLBEFAEEQynqkrNFIIobrD53MZ0mfOR0x0Efpw0nDa8Li3MUF1jJNnrd3b27s8a/zh4H5a660eca/kPp7pLNq56tt+RSH5ShCd3VRuVDkjcvm4yNO1rD61b/bTksTuINZdvvmlbl8+SxsvmaFhflg+EcFZ2sP9Sv8Us/A5amL/DzIsDkpJbnmzXruopH7/5qb8HR0ARRHCsKi45wwjCmqdISv/MC9gXpnYiYB4IkEhfWTw1jwH8DIh+wazdsMeuxsPpdFoE0wv7o3ldve+LMi9nwiEg7GULsSHae56AjSDcONHKdwbNpmYbRN3aF7etlq1eOZSAZeLgDWAX263rBQIeZeY+SRTaMrPSoNZcjj67AexcmOvrAP8WjCth4nbxNhE03LuXwIQJdKTF6GWIvNgk1GLF/izZAPwooF3fGjXvCRJ4UtJX3eQTdiGq8nIEFEGolaEQaEIEJH4bNfM8b0K41JRcEFAEoZaGQqAJEZBkeKsolWwTQqOmFAIBRRAhwFJFFQKNQqCjKyWcDEUYFGFe+mvW+OaRdenfBu2/oyt5KGA5v80RdRioKJXs/2/vjlEaiIIwjs+sG4KNnsAD2FtExAPYiIUpFC9gKdgpWOYq0ZwgtRK0t7axELTQUhDyZBMDi0lMJr4Ub+ffjy9vflN8xe6s8/4eddUUICCqOVe6Slxg/H+RS6eevx5Pe3243O6UJbu5/z5xOq4fUYCAiIjJUQjEEmjsNPdVtfiMyGj/4T1Itnd/136Y8Ru6vds8l6Ct8qdPVPWkd3vdjnU/zvEhQED4mDNdJiYw/MR1vysim6Wrv4WgZ1+f6zeTdk2KTeW8VrsKKqe/Xk/t1vNwuOCbRonJcd2YAgRETE3OQiCiwF9b1IPXWiV7HD5fCGsapCEqG+MLjvokWf/A8vwiYgsclbgAAZH4ALl+dQUGzxJWPy6CyOUCm+0FzKyt8+ri0VkUAQIiCiOHILA0AcsW9egSpq3zpd2cg5MXICCSHyENeBAoNtufX1a2VMLRhM32n81m6Ylo5x+bzR4o6dEgQEAYsChFAAEEPAkQEJ6mTa8IIICAQYCAMGBRigACCHgSICA8TZteEUAAAYMAAWHAohQBBBDwJEBAeJo2vSKAAAIGAQLCgEUpAggg4EmAgPA0bXpFAAEEDAIEhAGLUgQQQMCTAAHhadr0igACCBgECAgDFqUIIICAJwECwtO06RUBBBAwCBAQBixKEUAAAU8CBISnadMrAgggYBAgIAxYlCKAAAKeBAgIT9OmVwQQQMAgQEAYsChFAAEEPAkQEJ6mTa8IIICAQeAbGWDfHvqz/kUAAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-51"><g><path d="M 211 151 L 261 201 L 211 251 L 161 201 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 201px; margin-left: 162px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Failures?</div></div></div></foreignObject><image x="162" y="194.5" width="98" height="17" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAABECAYAAACbHqJdAAAAAXNSR0IArs4c6QAAEYRJREFUeF7tnX90HNV1x793dmSJkGJoSAyEH/lVTpJSmoONtSvt6ojfhNgxAXYlEwxOSUKaUtOkpTmltE0bk4SQk5xwCBRSBwwES7sYDHVLiF2ieH9o5dQpJYSmSSAtuBhMXWMIyJJ255a32hWj3ZndmZFmdq298+fO+/l5b+c789699xHkEgJCQAgIASFgQYCEihAQAkJACAgBKwIiEDIvhIAQEAJCwJKACIRMDCEgBISAEBCBkDkgBISAEBACzgnIF4RzVpJSCAgBIdBWBEQg2mq4pbNCQAgIAecERCCcs5KUQkAICIG2IiAC0VbDLZ0VAkJACDgnIALhnFVgKSOxxF1gXOFHhUz4Qj6d/JofZTspMxxL/DkxbpxJS9g4mk6urc4bi110bIH1NID3Vu4x8NF8JvmPTuqRNE0hQMujA7+jM1/BhPMAnAzgt8otKQJ4AUAOoFSnbjwyMpL6TVNaKZU6JiAC4RhVcAlFIAARiODm23zU1B2Lf5CYNhAQdljeOAPf4amOvxob+94rDvNIsoAJiEAEDNxJdSIQIhBO5kmLpKFINPFZADcBOMxDm55kjRL5HcP/4SGvZPGZgAiEz4C9FC8CIQLhZd40IQ9FYvE/A9NXAISq6i+C8GsAWTBNAPx2EPrAeFttO+kZaHTJ6I6hf2tCH6TKOgREIFpwetQIBOOxzg5etRDWbGUPogUnnMcmhaOD3QTjEQBHmYooArRBp6kvptMP7DEXHY/HQ88+jzM1ws0AvX92tTwy2YULd21PHfDYHMnmAwERCB+gzrXIhSwQTtnIHoRTUs1JF4nED4OGTSBaNUsciP9iNJ36OgC2a9np/fFj9AI2AdRvzsuMq/PZ5N83p0dSqxUBEYgWnBciELLE1ILTclaTenriEdbo+wCOqNxg4M6p8SOv2rXrjqlG7Q/3D74LBeNRmrZ0Kl+cm+zCBfIV0YhecPdFIIJj7bgmEQgRCMeTpUkJa5YKgb1k0Fm53PCTTpsUiSb+CMAtpvR7WKOzZMPaKUH/04lA+M/YdQ0iECIQridNgBn6+/v1icI77gMQN739P4IiLh4dTY07bUpPz8DprPEPABxZzjPJwEfymeR2p2VIOn8JiED4y9dT6a0gEN3dq5doemEFiD4CYCmA4wDopg69CPDTDGwuaqHUj3cMPeeks35sUteU6XJT303+6rQV573u7o8fQR1T1xHwCQDvADAO5p8z4V5jsrBx584H99nxUZu3z+0JLSMy1oJxDoCTTKxfJOAJEO5eFOItXg0Vlp4dX9w5QSvBuJyBUwEsKben5MBGwFPMPFycKmyp11aVp79/bdfE1OtXQ8MpYF4G0NHE2JDLJv/SyRyopJF9Jje0mpNWBKI53OvW2kyBUA5PGtM3AJxtYbpo1+4iM7YUQ9rnGgnFQhQI0rTdMPh+gN9jDYi3duqHx0dG7jpYdZ96+uJnsIFv11r1WJb0Koi/hAJucfqm3t8ff+tEkb4ExlUO/RQKxLitWOi43m8Htp6e+Ieg0TYGji73dr8BPmcsk9rVgn/LtmySCEQLDnuTBGKuDk+K5PMaa6uy2aF/tcO60ASCgGsYpYfvB236XCSiy3Lp4SHzfWUFRCFtPYOvcSHE5SJ4zNCLl4yNPLC73vSNROK/jRANl8Xe1Uxn4BfQ6EI/9wNq9yD4iUldP2vXyKb/ddVYSewbAREI39B6L7gZAtETS1zMjHss3jJfhXpYgB4HoJYjQmDjNFDJjr3Wc7bB8s5CEwgQ9pmcv9T6exagZ8pfE70AnjamQmePjW16sTIjli79dMeiww7cAvCna2YJYR8xfsIg5WQWYvAyAn63anlPZXuKjMIFudwD/2010+rUUVr6Amk/KY0ncSeAXjDeXStU/vkm9PRcdBJr+j/PElaiW0bTw+vqmch6/1dJTi8ERCC8UPM5T9ACcXrf4Am6YTwK4AOmru0nwrqJ148ctjJbLL8Bf4LBXzUFZFPZJwFeNZpJKRPImmvBCcRMD3mM9dBgfmTovyo/qeWd8SJOHEunnjKBsPE+5p8bjHUnHofHUqmUEuKZa/nyj71NW9TxdzT9pWL2WH60U+dLrPYlwtHBfoKhHsAVES8y8JUunW+0Sr+8L/5u3aDbGaU9kMpl+fUz1+lv82XzvMbFc7PZzT+ba/mSf/4IiEDMH8t5KylogbAwN3xFY21FNjukoqnWvXr64meyQQ+a7eGJ8WW7DcuFKBCl5RhdO88sDnbQuqPxpRpoW5X38XYUeWB0NPV/dWBTOBa/gphuNT/0ibA2l07eW50vHE18iwD1Nj59OXg7VxvZiw5iy2wHNtv9k0ZTw/J+d/9Fx2uF0P0AdZsSjBNhTS6d3OypUMnkGwERCN/Qei/Yh1hMT+tUiFWHPlAtLFmkFF5LAbTCzcOkktbK5JEZ9+azycutlgoWokAQ8Le5TPKLDkacIrGBm8F8tSlt3aWiqjIpHE2oL4nr3/y91rns1HPXHP7W1yceNH8NOA2VHonGzwfoIQCLVB0MPKej2J/JbH7GQf/qJgn3DXwABm+Z7RxXWuZq6H0917olvzcCIhDeuPmaK0iBKIVMCNEfMBAFcBoBiw3mS8ayqYzTTs7FTBTzcB6Em/qt+uQmv4WD2GsG8/lOeKllnJBB/wKU1vvVVbT7ArBjb1FGje9AyXJpih4C4cxKOcx0WT47/L1GY1p+w98GkNqbeJxBT3WQ8a10OvVSo7z17pc9r+8vm0tXkhbBfP3xx+Gm6mW1udQleeePgAjE/LGct5KCFIj5aPScHrCHvkD82tALfY0sihTncG/8o0T0wMw+AuFXKHD/6Gjqf5yOg9UXm8WSHoV7E3cT4bIZgQjAKsmuD5FYfDmY1EFPyj+kcqkN/WtHM0m1ZGYbt8kpF0nnDwERCH+4zqlUEQh3ntRuBGrevyBcOOX19CZuYMJ1lTYQsO03b+n82BM/uOc1NxMmHEtcR4wb3sxTu0/QExsYZGa1N2He1FaH9DxE4I2d+uEjFn4ZbprhKK2ltRLwKkBXjmaG1ReFiIMjks1JJALRHO51aw16k9oLgqX9q4/uKBQjBKwGoPYvKkdLqr+8bXjyBbgHkerU9146MjJSqMfRJjzFLwHth+7584cALDcJxFgIHR/OZO7bX/mtbPm0tc4JbwUGfkaMYQ7RlhOWGL+Y72UeZWrb+ZYD9zDzgKmP+4l4TS6d+if3/ZYcQRMQgQiauIP6WkUg1Ab25OQrSwxNP1UDncDEy8GlB9O76nrltpNA2CyRVQ+z1b6Ag6ngNImlEUJv7+Aygwy14azCpDS6lL/LVjLoromJxT90EpG1UYEWEV8nmfnyfDalnPfkOgQIiEC04CA1UyCUT0TIMK4hQL31He8JjwhEDbZmCIRqRGk8i8Y3iXChC49ttT/wXZ0KN1hZvjmdE9VLagC7DujntC5J5w8BEQh/uM6p1GYIhAo2F9Kn1jPhDy28duv1R715qqWNE2cSiUC0jEBUGmIS/jVVm8X1xvZ50nhNbkfqMbcT2tJ8mukzo9nh292WJembR0AEonnsbWsOWiBsnJdq2zcdVkJ5CucB+pHGlM1mh/aEY4lriXGjCIT9ZLI0PSV8IZ9Ofi3gKUjh/sGTaIrPg8ZXgKH2M2pDprzZqL3QtPPdnhcdjV56VBFTj5gc4orMtDKfHVZHlMp1iBAQgWjBgQpSINRGYsdhL99eDlNtpvEyCENs0MMd2tTjxxwT2mu3ienGimjBbVI73IOwcl4jxj/ksslPNXMKqvHv6tp3cpFCa4gxACrtL82+HHhhV2exCOUtkVqbOdAe6xaB8AjOz2xBCoTV0ZFvmOx/H0X+eIPQDzMImi0Qkd6Bq0D85lnGLkxPVSd6ehPfYcIn5/MLyGp+uKnHz/lVp2yKxBLngnG3eRlKhRLhqVCfOeBgo/aJQDQidGjcF4FowXEKVCCqbPMBPIsinzU6mvqVQzQ1TlmBm7lGEysJeNjU3h9pfHBlNvuw2h+pe1m92c9H+60qrREyD8d0qqhK5XAd6jS33SrKLrOxravjpc0VU1sV0oLYWAGmiDrsiYFcl753TSNT3Eqbw7H4WmK609QHeftvNJEW6H0RiBYc2KAEwsY23+5wG0tSpZPnOorqiMhT5vMN3M1pY+FagXDu3awepgar8BfHzmf7Lb8gegZOYa1U14xHMQPr85nkXzt1GItE4u9DqBSuY8YogIGb85mkOleidM3VY9sioKAIRAs+J4JokghEEJRd1hGcQNQG6nPp3WsRPK70qAvUUa67Nx7ViFR48cPLqOuGHDcNh2/tt/yCUHGvNGwC0SrT/b0gXjmaTu1sNE2UoE8WltzMYGVpVrnGGdoF+czQSOWHaPTi9xQQGiHghPJvrmI+1QgM4FhwG/VB7h9aBEQgWnC8ghII1fWadXFgP0P7cD4zNFYPjTpHefcL+DyYvmxhFmu7xOPHJnUkEn8ndBoB432VNiuhm+ji+K7tqQM2/aBwbzxBVFpKmW3FMw8CZ8euuzd+jkalaKkzdTo8vc26vcwPwcBq8xGk01+GSzYCfKlbIbIO+Q1H3uIt+FeSJs2RgAjEHAH6kT1QgbCM2UPPkGZ8KrcjpcJAzIqVo4ThuRcoDMbX64RxsA0v7odAqHX5cDSxwcISa0tB09ZVn5OtwlDoHR1/Y+vz4aNA2HwFqGm0H8zXwsB91edN1/FRsf366O0djBlkbDWf0wHgJWb63NTBxUkrT+np88ixEaBl5i8Ug3nVWDalzrBwfFmee+HQ4stxJZLQdwIiEL4jdl9BkALRIGaPOiYzB1A51LNxCkC/V3WCHKCiknJpXf2Icm9tlyR8EgjYHMSjmlMAeBeg/bvNEZ5FYtzKVIonNR2G20eBUMWrt/TOg5SqOr2tMlHGQXgcTD+d/sH4fYCWWnyljTPxn+bTqdvsvpAiscQ3wPgTi/uvAvxTQHty+h6/XX1MAlhSm5bumBxffLXb0BsiEO7/962YQwSiBUclSIFQ3XcZs8dMrACmb3IH3YqC8ajpIJiXyaBzc7nhH1fj9UsgStY90cRn39iHuKmB45e5SUUC1sMo3MmarjZ+3xuEQFREYtFBuuONs6UTHqago1DZJee8gnYrwMp72u2ljii9u0vndVZHlDYqTASiEaFD474IRAuOU9ACoRAsjw6crIE31lk2mvVgBbDdIP68Om/ZKqwC23gJ+ygQqn3U0xc/gw18G6D3Nxjal5j5j/PZVDIWu+iYAuvqeNXABEK1rbSPs4eUQChRe6eTqchAnomvrDrn2jZruY7PvPEFsh7AkU7qAKDOp7j2+GM56TXCqwiEQ9ItnkwEogUHqBkCUXlgqf0FzaBPMnEMwEkzSxulMBv8nwxsLmqhVPW6fvW51upBZkxOrdi588F9ZsQ+C0SpqrJ3+PlUsvah02aWTsp9IKLbFoV4S+XNuMak1uclpuopp9qrd+3vDkG7ksnoBUgFSaxsYqvloGcZtNUAfXdnZviXTk1izfWUQm93HjgDhNUWdYwDvJtYyxZhbCgcPGrM7ZJSdZ9EIFrwweKhSSIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCAgAuEBmmQRAkJACLQDARGIdhhl6aMQEAJCwAMBEQgP0CSLEBACQqAdCIhAtMMoSx+FgBAQAh4IiEB4gCZZhIAQEALtQEAEoh1GWfooBISAEPBAQATCAzTJIgSEgBBoBwIiEO0wytJHISAEhIAHAiIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCDw/9PUXuoYdJv9AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-53"><g><path d="M 51 851 L 51 884.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 889.88 L 47.5 882.88 L 51 884.63 L 54.5 882.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-54"><g><rect x="1" y="751" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 801px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">ACPI s2idle driver notifies EC using _DSM</div></div></div></foreignObject><image x="2" y="780" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmAXEWd/+f7uiczAeVYlBtB13XXYz02yNxxNOEUCBB6JuEKKyooEUUXkWNXWQEPdkHzA6MoiqCQmTZcolyRHTNH94SNusgiIorKZYJsQITJzHS/74960z1587ree/X6mu7p7/sHMl1Vr+rzrVefqm99D4I8goAgIAgIAoKABgESVAQBQUAQEAQEAR0CQhAyLwQBQUAQEAS0CAhByMQQBAQBQUAQEIKQOSAICAKCgCBgjoCcIMyxkpKCgCAgCDQUAkIQDSVuGawgIAgIAuYICEGYYyUlBQFBQBBoKASEIBpK3DJYQUAQEATMESiZIA5ZnHh9zKafAHi967V/IZuPGB1Npsy7UlzJRCIR++MzeGeM6QQmHA5gfwB7zbRGeA6M3xPop8RIbt++6+bNm6+divK2np7Eqyam6HYQ3helnqfsFgZeAGOTZeGezMTUXZs23fqcaXvd3Sfsk+H4EIC/zddhwvnpoYEvm7ZRq+V6ek5vmcyOH87MJwNoBbAvgLjT35z8QNyfodi6Bzaue8JkHDqZMXBsenjghyb1o5Rp6+79NDG+5Jpz300NDZyua6O9u/d6MFaZlI3Sh3orq5nP22zwoWPDyc3lHks131Xuvs91eyUTRHtX79kAri4YCNHVqaH+cwBwJQZ5yCHH72EtaDqPgDMB7BbhHc+C+d9h47pUKjluUq9MBOF9VYaI1k8RnWey6M1Hgli06MNNTQu3fYxA/2oowywzbsvGrHPDMBOCMJnZc1emmot2Nd81d4hW5s0lEUR7e2IhYlgP0JGa7j2etXjJpo3Jx8vZdeedcawGO4vKq4tvmx8h2zptdLT/gbA2KkQQ+dduI+JTR4eSPwrqx3wjiPb2xN8gRt8HcEQY/prfQzETgigC1SpWqeaiXc13VRHCqryqJILo6Ei0s0V3A9gl19tJAAtcPV+dGh64plwjeXdPYu+mDN3AwKEBbapTwZ8AYoDV+PYGsNCn/IsAnZEa7v9B0EmnwgShurYVxMekhpKb/MY1nwhi0dLErs3bKamV47RKaSNAz4K4GUAn2FFfxjzYBGImBFGur64y7VRz0a7muyqD1ty1WhpBdPZexoQLZ8iBsAYMpVbKkQSPTrbgqM0bki+UOsSOjhMOZCv+YwBv8bSVZeABAl052WLfq3kXdXau2IfJ/gADn9KoMsaJcOro0MB6vz5qCYJxf3MTLxscTP7VZGw9PcftNjnZvL9t8TkE/POMjj1XmYG0PTl1tN+9xDwiCGrr6v13Ai724PYQE85KDw2Mesn63YtXHBDL2lcR4TgPUTxkxzNHjg3e8qRXBkIQJrNy7spUc9Gu5rvmDtHKvLlogmhtXbmX1ZTdAOBtqmsMPBFj62Tbsr8Nxhtz3S3LZbX/jpN+R5b9odGNyf8yuevIqTW+AeBED5xPW5w9bGRk/f/qYC4HQbjbbV+8ogO2fSuAPV1/V/r11emRga/r+jBfCKKzc/lbbYrdm7uIdoZKwH0TLZwI2kgoY4Qnn8Z5ILrUTRIEXDI6PPC5ynweZq3KJbUZTu5SsmhHx2wuahRNEO1diSMAun3mtMC43840HU9NU1/J7ZCnx1PiZXVPT098MrPXGgZ/ZDZAPJiJY+UDg8k/RQFOXYwuWPjC1QB/2F2PiPonXt71VJ2FU7kJQr23o7t3OTNunKX+CjiVzBeC6Ojq/dwrur/PurD/I7K8JJVKPhYmRyW75p1euJGZ+1xlH7KnYkvHxm7eEla/Ur8LQURHVggiOmZzUaMoglCL9kRmr+8CfFK+08S4fHRk4KK2zsSxRHSLa5dX0mV1a2fiUIscInLfI2y245njdKoFExBzFlB3EtDmKj9uMy8bG0ne522jEgThc8H/DFu0JL2x/1fePswHgujqOmn3LKbuAkiZsuZPD5FOAAUbE+Alm/mIsZHksInsK1FGCCI6qkIQ0TGbixpFEURX1/I3ZBAbJOCAXKcnX7kHeH96eGBDe3tiP8Rp0KVmUkWKuqz2WUS3Mawj08PrxkoBrK2z7wQivnn2pTrd1BzfsmpwcDDjbrsSBKHab+/sOxPEs1RKfrb684Eg2hb3vZlsVj4z++Twjby4a3FgWp4e6Vebkjl5hCCiwy4EER2zuahRFEEU+j7wg5Px+JLNgzf/WSmV2rv71oB59Y4BFXdZrbGSKlllle+TutdYsB0/BqjDBbz2tFMpgmjr6j2GgDvcgmemU9Ij/cr8c9YzFwShVDrxlm2tMVhnMHE3gAO9DmzEuCdD9N1Nw/2/CbsHauvsO5KIlaNa3iLpcTueWRzlJKi9fNY4DJZ6Sa0uxuOcXQEbK0H0D7kTbIaB/wX4Bp5a8K2xse//RQmpmgThjCtjHQnY/wzQP7mcQtWm5g/ENGRb/K0D9uZ0MpnMVnNRcVSAzS+8ly1WToLKqXTaYXXaMu1ehvXV9PA6ZanHUQiioOwOVexLHYt7u9nG5TkHS+VcqVSN9zPhmjwGYe/SrQV5jUgU/KY1K3veBCCRr8fAmvTwwMeD2lH3a088EzuYyD4d7Fho7vjOgC0EPAjCDQtifJu5UUyBc+9v45TpHhq65ZnW7sRbLMexk5Y489qRD29i0Nqp8d3udqvZIxOEdrH03DNo1AAzJ4wogLd19X6V4FhF5Z/IO86g93V09X2Uic8F048YfGdLnNM6AVSVIHy8o6tMENTWmeglov8EsJ+JzJQVFhOfMTaUfNivfPviFe+CzWcD/E4Ar2VgexNletSkNXmHKqOMI6gpu5GAN83UYTorNdKvjA9mnmIJorX15F1i8alLmaDuvKa9ufXPswB9IDXc/6O27t7zKu1JHd3/hx8hC2ebGnCY4u9TjjoWJ97LNq4BHDL1e7JE9APO2B+Nx7PNnsgAvp7UOoLITk31xpqbPgPGuRoTaPX+mTUnjCBU4cK7MR6LoenI4eGbtplio9GshBnpmOKW78KLIP48Mrg6zMlXM/9/iyy/BzFSloBX+Jr+e76lyASh833Iq5fyo1jUs/I1CzKZnwD09hlwI15W6/TVQHShmQo3qFxVCcInHES1CMLPWsgQx3Em/mh6KPndsNOEYXsFxVq7EosskLon2j33Y5aZjkmP9N9VKkHkVGADecs8gz6OM/M/W0QHMfDFHXMdZQ210daz4iDKZNe5724M+qaKZEG4avLl3S6MGl7GsH04p4aFz5+fMzwIIlR3kxsszn7Cppi6W8yHjjEnCPAgwxoh8Gd8yEFNvxkTeyOC6Oh7G1uO+jNvWRh5M1qoMvbXnCjCp5h1KYPV6cLr4xMCP4/Z8eyJQSdvHUEQsCY3T/38wgo0KJEJonBXr1+0O2b7SKgBR7qs7uhIvBMW3cfAa/JoEeNboyMDHzKdvOUqVzGC8MbwCbhwrRZBaK2r1ELDeIIJaQI5ahUQ/yMY6iTgnWxluSPyk513p6fMq+PI9gwPr/9dKQQR5GcD8COAlVbtv3LSPJiAt7pOF9vAGAXh/ZUgiMB+uWTC4F0I6MrFIvPAR9dOju+6ugIkQe3diX8B0xc0i9yLADY4Do/g14KwGIw9XB1TJvJvdp1QIxAEVNvKiXLa34rxe1j4qfP/Nt4DwkHue08TgtDdd5qoh/LjUfHEJjIvJQE6Ov83v1hpfpaU098VniPGzxikIlDENPMt3/zDZGeOGh295Q+6b0WzZikn5glX9ImtDNxPoJcAu805+RGt9YZHikQQXt+H6fHo7dC19wcRLqt1+nloVAnlIoGgdipBENo7EMJjyHBPKpV8ytufahCEsu6KLWhSPgpKt517eDBr4QO6kCkKl+0Z+lTO6c21e9Rf9pcqK+dewLbvyS0suebMDQv8DAB8zGcd/4yMxWd6x56zglPOfioOWOHuj8pzgvDx/3HiUdkxPk8jE2rtXvmPFmeuA+hgF95ZBn86PZy8slQZuOt3dq7otsm+0xVJQf38IpjPndy++w1uQnKCaj6N91lkff2VO5w3aPoRhSDy1ccJ+Mx++/A1rvsW6upa/vps1no+lUr+nypoQhCqnOYE4L5bDYROY4CxlWxaMjra/5CXrfWkyo/YjHNety/u994dBcy3e5rjfKKxWny6I1kCfZWz9sVuNZX6tmBlp7xuA5EIQmPC6itU/SWw+WV1wcWfS6dYzklu0lYlCKKts/csIifIoWuB8V9Yq0EQbV29SwlQMaHy4VJMfAyovTNxPsjZReYfX3NdE7x1ZXx2XZMAL0sNJ1W4l1lPlDsInSm1Oq2yzecE6Hr9d89lIgjNN5AF88X774srgi6gcxfZXwP4VBcoxv4mJjJydtwWbgbRMlf5rbCs41Mb1ylveO0TcCKKThDMF6RGkiqKbmBAUFOC0ESmNr471Rju3IUslnvnj0ZFqnDagCz35QnNBzpq606sIqavuU7tWSKcPjo08D2T+a/KMPCdqfHdzjQ9TRoThM73AWAtCPnO6hZ5vw/aO0BN3YqFAw77IMpJEDkd/+kg+n8e9YyvH4bqX1UIorv3XGLs2GUGLHRuzHQnSz9rrDCsfRcWjWNhVOdG3QlCN6/Dwp7k+zgdjfb5b8xyDFU/loEgdJgC5qoina9POb3ONRoC38Wq4NvuWtFKsNWdUf4eSRWJShDGKmtTgvCZC6FWSFr1lN4aUWPhiUBVkQc7TZga/abbJ35c2KV5wednTBCaI5RqLNC/oaPw4sfYTNUbN5+AP8PmQ0dHk78odpEptl6pBKH0ky9nxl8XB5Yy7I/pLD3CmL1KBDE7rwGQbI5vPcnrF6LBkdq7eq8HsATgPzDoF4B1Q6m+Kvn3dHQn3s9MyuvcvaAE7ohNTxAayxNlaXPK6FD/OpP5op/jpauYOrr7VjCz2hnmT5iRTwCFbcwyRzcZnm+ZwjtGc+2A3hQ+MkGYzk1jFZMabKGWJBwzzRzQkpfmhGJMqnlBmJ5y9AQR3cjHmCA0eR9C1Qi6ixvTy+qCxCrAjB1vSTO7iMpViOYauouoCkFMOw+6gxZW3CopTBw+cauKCrCoO0F4FwS/S2+/fups38twglA7xevcJxO1gUgPD5wRpk5x91NDfpEtc3Tj1n0PUf0GWjsTXRY5kaB3zr0j0gmCCRelhwaU/0PoY3qCUA1pTm6+asz8iwu1HXpVcQH5BNw5RplvOuy1GyTG99IjA6dFmUNGBOHjVGbE4G2dfScTOTsh9xPqWa3xgagtFVPotDQrwMD/UJZPDItFVA2C8MkOmH1FLfhzML5uZ+J3VjPmUe5ORDkNuoMaZhh8fno4eVXUEO06gtDshO9sju+cGBy8fruZBB1HOWPVnElGOZ2JNxM+mR4aUGM2fjo7j321TS3KMfE9M5XKYOgRFEnBtHMaf5ZoBBEhO2AUgnBOEV7/qwATfc1CnGXmE9IjyVkOsKpd71xTRhB/3an5+AfvvfElU9yc/nX3XkiMy3bU4YI5a+pQGvZeI4LQ6xvNjuH6RSf8OFrzdxBhyIb//jyUkG2+JszpRTVVDYJQR3+fUNzu0TwJwp3MWN8S32k4ykIaDslMCWrv6jsR4Os8SaGMyEG1YqhiorbO3huIcEr+zcWYUhd4iJd4B6FZgFX37gFIa9LohyuDmwhOGl6VwtV5ou70dW13dPS9my1W1m5OJsdi1L9vP+zUnV/18sStrpwgUQgi0kkoKkFoTje+xhqaS2dtWe1JE/wbwFKRqCM+jqPpIS6CKHDq058g9FEagl5uRBCa3bzxBZH+chvhx7bCk4fWISoiskUVL4OKSYVBeBrgJ8HYYCH2w+3bd/kfU0uCKhIE1GmxaYJuIsZRBmBlAN4MxjezU5nbouTYDmhb3Wd8VOPtqRJBnZcaHlBWHKFpbE0Ioly7rIJFokSC8LF0MRCHQRFDw4OgljQm6EWpfz2nqSgEEUmbEJUgNBoT3/VK45ejvdQuwxoSJJIC/E3mv8FsUeQf/OitKcJqmfwebCuvYXGVIK4gpILJm0otU+oldanvryZBqHdFD+vgjFDF/UkzrE/l4+1EHbdjyrrT85drwidEvgsx+UCEIKJKaLr8fCcIZ4xeJ1aNmklDJL4nm3lLEBpriuJmVWGtwFOI7phdzPG/HJ1tNILIY+Y4wmVxIjH+JWd5ZRISIKPMKSfGd/tSlBNSQDKn0PzTOhkLQfjMfDlBKGBCTyAay6QC1VGh6t1fdT4vCcJHPVSONTffhu9ltd4CKrqZVlBn1WVgBpn1SodKhDvItu4fGVmnAsfNUmE0KkG4sXNUT9tpMUFFN8VhnrAJXpgjee629pywv5WJ/UATa+hpsvnE0dFkKuqkm2cEEbqgRcWnlPKNcILQ+DYUXD5rLp19c5uU67RqKjeT+W/SVqCKycfG+zkwFZFjmlUoBnVZ5grJEHxZrbmojnQ5FQaALhyI7pQiBFGApJPn20b2SBA+BNAiTeRTEy9sKP8a2HzbrOis0wxtZN3lJ2PTD8TEqihsHhWoQ0u8g9DEIZuz+zfd2DV3JJEJTHNpWzN3EPkxey0w3abGGkszv9AaTnOaS3llMFCx2HKm8z9sbgcShMb3IfRy2e+FPsl/AtvTEZQCdUHT1o8YOG+Fjr0wbwW0TlJCEMFQTsdIyt4AUI+rZOii1tm54mCbbBXRc8bKJlf/bmT55JDQA4GdMv1ACkwGA9K++r2wwJS7RILQhTT3C/wWNskr8fv0iS++EcDrc+37mnb6vV+zwNYcQRRYYLr8FgrD0gRHlVA4dHT2fpMJH5zBpIi5ZipP0/kf1p4vQfjEUoocI93dAV0GNcD/stpHxfUXi62jR0bWDYUNLuj3zs7lb7Uppkz13IuTj4laQfINtcW9v7mJl5km8Cilr6pupc1cp+8ALBWJcvF0dEfEYrDf742S6jeO1sXL/86yY8pkbyZ/RNCilssN8YPCwG10Y3Pc/mipuJp+IJokRqEOoB4MCpzaSnWU81GvBoa10e70p3Nn3EPA7q+oBn9tA7+ybP5OqdEIdLvhKJFPVV+jnEKiWiF5sSi2vmb9mSHCAstOAwMazfoXeOrw+dby4TpUUqInAfoFs31fS9Oz692bZtP5H7Yu+RKEzoqo1FguOp+IMM9VXcRI01g5foP3iZIJvwWtEU4QmlSxkXaFUXSsPgHbypq7wPQD0aXIjbJb1/r5lHiCUPNWc3qPvDHq6O49hRkqBEreuMA4+FzYwqFR//4qY1mHP7Bx3RNhdZ3ddFfv53I5JPLFa+4EMS2HxBEAqVOuE8BSEWEc8c+5c6uHrWH5Aeo0Igxcmh4e+DcT022nP+2JNyJGKm/F6/Lt6sjZdP6HycqPIHRBpSLrGTUv17WrigVcVvfEJzN7rWGwyvDlfkwiIBZ0wS8Wu9J5W/bUYaOjt271VmoEgtA58gQFw/NipF1oNbmifaKAZgm4dGJ8t8uiWD4FTe4IH4huTprGPdI7FpaBIHQnXOV5O9HCic0bkqF3gDk1kAqI9zYXTj/LTk4dVg5/lVIWO90iFzFYX6S1qNgThHPSaV25l9WUVbkrcjjyGDMuJyKVWjQXJsQsvL1vBFziY1JDSZWKNfBR36hmLRxnWEelh9cNuitHmP+B79QShN73IVzHFjZAB/DCGCyzsj/p2vAPERwtraLSlTfZ9nUu783861RmsBU693hVoBEIwtnVFQaIy4L4gtRQ8j9CdjgFIb/9dlU+Yc7LntAmygeiVzfyYCaOld74+K75mU/L+p2CpEllIIjpgHa9V4LxidnfRLgKzslWZtGaWfpuQOWRWJ0eGfi6yXdqUEbXv1B/lZw5cz+ApZ531OQJQvXRo056nggbmXFsrv+RTtq60PIMPAqLjktv7P9VAO76+cZ8u8qb7o3GEGX+B8laSxBa3wcDHZvBpHI8dRdsx48B6nCVD738bu9OHAImFVfGHZdHNZF9JeXpAwS6crLFvte7u1InhoULX3hL1sk9jV5NBrTQRbBRCEIXIhpAhhhrM1NTl+h2niqHMzVNfaEgeQ7hK6mhgU+6iUWf8AcP2fHMkUHpE03mlcmpzy9h0PRirMuMpt+AOP4h2kRJuV6UhyDgcwpQkP43I7Za55CoEtITkwr0p+6RZp4opw9TvH02bs58yWaaLh4b+/509sHph9oXr3gnbPsGn5SuNUsQPsnP8uMystbLF/Y5Baift4H5PNi4qSCHhH+e9K3wOX1UjCD0MUOipQsNm2Aa/aqaPzc1x7esCrJOygnqBxqrF/eX4DLDZZWWcO+AnK8ZEF+4/964MjQByxTdDsL7Zl40zy6p8+PS7XByv6mwGo+BaAxMEyBuBnMrQG/UmLhqo9Pq5R42W8J/190ZRP1AAtNAAk8yMOykW/VPteqeg2XLSR2wMXLSU4Kx0UnrqeSxI92mBzT6HSz7uNTG5C/D0YxWIqB/4yD8Aky/DE6HOvO+miUIn01tjvboam+azjAE/e5Ac/VmcJv+t/0OHzNydVr7VHoouVb3vqjz36/PBScIre+DweIdBor7d59gZEY3+gFqoihdUGWftplPHxtJKv1iYGyfRjlB5Hd6msxVUbB9iC3q9R6XAz+yKK1rypaDIFSzjpwzljcTW1jvXiRgLUP5g+TyVZTpBJF/sb/FV1jX1O+KHOjE1MZ1PzcpXUwZn6i7gU0x4ccWkHGpamqWINRANJfy6s9FX/pPfw90LeBoNaI+oXHJKkYQpWSBMx1lgId2aBhw9Q6Vle2pLXiPbdNlr6TIfHfACUHXpecJ+M/sVNMazxHYt/sNRhAODrmInd/2UQfosBpn4Js81fSvOlx9Ek6ZTpmwxeb89NDAl92Fiv1AnIx/z5D6aL8K4LXBHeRHyLZOy1q2bYHuqxRBqD7kVHmfp2kiWmgAnJ+qx6Bq9CJq4xbL2lcR4biQ7zEDpqtg259FnNaCsSr3tpomCP3GOTwqdRCSrrl2hds8PKiOsuBk4jPGhpIPB5Urdv5725x1gvDZ5UXSsZlOLa/52HS96IA7F+rxzNEgej9A7wBYfdSvnunH9DH89wT6KTGS27fvujmqpUwjEkQOPzqkq+/v4syrmHA4CAe5Qmyo4Hx/IuBhEG6YaOYfBlnXVDJCablOEF6CmczSccz8ERD9/cy4nfnEmxi0dmp8t7vVXCp3NNegb8hRT0zQMWCcxsBbPCrULa/kof4tGN8uY3Rd009alaPW7sSbLdDHwDgUwIE59aOjnmRY67MWfSNvClur0Vx1A9b5ppRq9p9/j1Jvxlu2tcZgncFkdwK0v2sT8CLAf2TQnTbo25uG+38TpvFQ7VaEIKLMBCkrCAgCgoAgML8RCA33Pb+HL6MTBAQBQUAQ8ENACELmhiAgCAgCgoAWASEImRiCgCAgCAgCQhAyBwQBQUAQEATMEZAThDlWUlIQEAQEgYZCQAiiocQtgxUEBAFBwBwBIQhzrKSkICAICAINhYAQREOJWwYrCAgCgoA5AkIQ5lhJSUFAEBAEGgoBIYiGErcMVhAQBAQBcwSEIMyxkpKCgCAgCDQUAkIQDSVuGawgIAgIAuYICEGYYyUlBQFBQBBoKASEIBpK3DJYQUAQEATMERCCMMdKSgoCgoAg0FAICEE0lLhlsIKAICAImCMgBGGOlZQUBAQBQaChEBCCaChxy2AFAUFAEDBHQAjCHCspKQgIAoJAQyEgBNFQ4pbBCgKCgCBgjoAQhDlWxiXbunqPIeCOmQqM+5ubeNngYPKvIY1QW9eKQwj2xwG8D8BeufJZAH8C+A+wYqvZtvcvsn3jMcyXgj09iVdNTNHtIAfPSj2/jVOme2jolmeKfcG7F684IJ7lo4h4OQN/D2BfAPFce+MAP8lMY0R0x2SLfe/mDckXin1XI9bTzQMGjk0PD/ywEfEwHbMQhClSANrbEwth4QwQJlPDyWv9qhZDEGoCb8/QGgJOAxDTtU3An2HzobZFBwhBmAmulgli0aIPNzW1bDuBCJ8D6B/MRuSUyjBwY9ayPvvAxnVPRKjXsEWFIIoTvRCEAW6JRCL21J+so5ntL6oPmQnnp4cGvlxGgqD27t4rwfhEUHcYeJSnYoupKXuIEISB4ADUKEFQx+LEe9nGNRGJwTvoF5noogP2tr+WTCbVKVMeHwSEIIqbGkIQBri1d/euBeOsfNFyE0Tb4r43k80/AbCPpzvj06olYoCbATz00k7Ny3d6eeJ9QhAGgqtBglCnhuaFz5//ikA/61IheQeTUynShPMD8a5g7OEz4iwI30SGP5lKJdV8kUeDgBBEcdNCCMIAt/bu3uvBWFUxgvDeWQBZMF8GG1/UffTFqLAMhjkvi2hPEITvpoYGTq/2gBU5LFj4wtUAf1jz7qfAuIabrJvTg+v+AIDdZRwV5JR1GBFfAuBthfXp2snxXVdv3nztVLXHVQ/vE4IoTkpCEAa4RSUIgyZnFWnr7v00Mb604488FkPTkcPDN22L2paUn41ADREEtXcn/gVMX/DcMY0TcAlneY3JCcBRdz5DZzPwRQALXaPNMmN1emTg6zIHBIFyISAEYYBktQmCGd9Ljwyoy+pZu0iDrkoRDwK1QhBtnYljiWidZ1HfysDJ6eGBDVEF19Hdu5wZN3ra+yOyvCSVSj4WtT0pLwjoEBCCMJgX1SYIzJEKxACKuitSCwTR0XH8nrbVdC8B73AB+BciPml0KPmjIkGltq7efyfg4ln1CV9JDQ18UjYXRaIq1TzTSQAJRUAIIhSimi1QCwTR3tV7NoCrZ4HEfEFqJKnUikWfEh3fCdu+B8CbXW0/nrV4yaaNycdrVijSsbpBoGwnCM2HOMt56JBDjt/DWhBfRaCTALwJwKtzKG0B+GcMWjs1vtvdxV6yKd3sE8/EDiayTwfjUAAHzliJEJ4D868J1J+divWPjd28JUhCkU0jPTv+sEvk1q7EIgt0H4DdTWeK23IqrP2wNgOxArYQ8CAINyyI8W0Gzn3a1y1amti1eYKOAeM0Bt7udfoj4GFm7s9OZW7btOnW58L6XOzvc02Kf+WIAAAgAElEQVQQCocF2/FjgDpcY3jInootDZuHJmPu6Or9XM4iKl88S0SnjA71K3VWyY93cxT1dFts/XLPnyiX1N3dJ+yT4fgQgL91APQ4uipijtl8JoFPAHBQTs2XszzDRiZcc8DenC7W9Li9PfFGjtEHCTjW1X4G4McY1vqsRd/I+7+E9bXUCVBxgnj55df8eUHL8x8H4d9cpODX72cB+kBquF8du412Vs5i9zQtJ8LlMwINRiVDROuniM7zczKaxwQR1f7+RRB/HhlcbXKBqmB3sMvS58E406Mf95NKhhhrs5mmi8fGvv+XUie0t/5cE0RbV+9SAtR8XpDvGzEuHx0ZuKgcY+3o6HubbfF6Av6HCHeQbd0/MrJOeXQbfT9hfSh2gc+3G7V+peZPOQhCjWl7hi4j4KMBJsq5ofMjZFunjY72PxCGcf73d/ck9o5N0dVEOM7PWTZXVjlKfq0lzhdls9lXB5GZ6bv9ylWUIJDNHo1YTJnl9UboaIbB56eHk1eFTXIT7+OA9z5NFp86ujF5v9GiEjSAOjhBKC9wilmXMliF8dB6avsPkcfsePbEscFbngyCob098TeIUT+ApRHknduk4VFYdFx6Y/+votYNKj/XBNHR2XsZEy509fElm/mIsZHkcDnHWam2oi7w3n5EqV/J+VMqQcDOno2YdT1ArRGwfpGZTk+P9N8SVqd98Yp3weYfAPyGsLKu3zdYnP2ETbHb/U47EdrSFq0kQTwF8IMAHel68zgIvwDTL9UixeCDCXirho232eBDx4aTm/0G6BxBt1OS4aiTvM+LBKQZ9DjIcTDrBOP1moVRK8CentNbJqZeXg2iv5tu2H4vkP9/5w+bAPrFzEvZ3pQaSV6X/3eYCqitZ8VBlGHlNZ0zU+R3AjhkxyD4N4D1X+5BMeO29Ej/XepvYe17wQi0vyc8Rwyl4lM66yCZPEx25qjR0VuUjX7BE/COcTA/ArJ+BuXfESgPHpxswXHljDM0lwTx9sNO3flVL0/c6p6jeW/4cqiXSv34TepHWeB17ZnWr/T8KYUgWJ3OAOW06PpGYbrG/CpjWYcHhUTp6DjhQLbiPwbwFg+G6qTwvwT6b/V3/XrJjwCkYrZNq6vN476ZiB+VJAh3BxSYly2I8zVenbbS5zXZ9nUFCz3R1amh/nN0p4ienp74ZGavNQz+iGeUTzPTxw7Y177dq/9z9IZZ+yrN8W0rLOuI1MZ1P/dDLOolddQFvMAPIsSKKWL7Pvb3/IjNOOd1++J+L1bT90VNykJGqYncp417muN8ou5eoq1rRQ/BVpM8b5ufZeALLXH+kq78IYsTr4/b9A2P3MuqP1fynEuCKNAPT3/BdzbHd04MDl6/3egLneNCpgu86bfjd4dR6flTCkF4xvYsM507tX3XAe99aWv3yrdbnP2+xpFxdWp44BodRk6/MvQDAIe7flc+LbdlY9a5XmIJWMemq9chQagF+PjUxnWjfpNIexogPIYM96RSyae89Vo7E4daROpY5XIUMlKDUHtXr9IfXjGrLvPtsLHST89ezwThcyG+AVnuS6WS/xew/lBbd2IVMX3NvegT4fTRoYHveeu1dfV+lQBF6NNPAMHni+QucG8DqGdHe+VdQOeSIHTYM7AmPTyg1Hx18VSPICo7f8pEEA8jy8uC/Ey0p4GARbuju/cUZlzv2ohlCbh0Yny3y/wMdnLhWi7iaRPn2eriOiMIY+/O9q7EEYCz6Ocv854nmw7zXvKo08NEZq/vAqysofJPFAchnf34uM28bGwkqSyLCp46Jghq7+5bA+bVrkEFqoo8g9dgxaOTLTjKrQbyUaUYhVL2yp2BJ+LI9gwPr/9dOVbQyAYHUV8acNrTEkRIoMeor690+WoQRDXmTxkIInCNcMtBY9b8uB3PLPbe4Wkt3EI2qzObKxXTa6cXbmTmvllzoK4IIuAU4J3YXV3L35BBbJCAA/K/6eK1+wS28z3C6T6g1taVe1lNWeW9OhPThoHvpIcHztCptOqVIJQaJ2aTCgKo7l/Uk/U7AfgtNJo2Jhl4v9v7V/vxMZ2SHulXx+3Ap7XnhP2tTOw+gNRd0S8Y9HAT2V8dGko+G1bX5Pe5JIgCVaCaXIRPpocGlAFGXTzVIIhqzJ+SCSLCwtvameiyiO5+RVOxc07I2jtVTbm/kM1HjI4mUyaTQ1mwseUE+dxzpnyEfpq8o9J3EMnm+NaTBgcHM2Gd6eo6afcspu5yWwloCaKz72QinlFxFLvjLLQu4Qcn4/Elmwdv/rO3r/VKELnwDsqCYvoYGoGw8xhMn9j2vAlAIv83jZkmtXX23kCEU1zkXhGrpLB55P295giizpLUVIMg1Mys9PwplSCimCZrNrtagihYgyIu7rpvs67uIJhwUXpoQPknhD6mAuzo7P0mEz64o8HidNYa+3Rfy6l6JQjvBCTgvr/u1Hz8g/fe+FKoQFwF2rp7LyTGZUGYd3T3rWB2iNutEx1n4HYCf7c5vvPgXFzMzjFBFPhAhIWKjyKXapStEkGg0vPHdH1RmOqMC5hpuYm5qk/9grXFsZTMvJQE6OiAjVeoiNu6e88lxpV1eYKIcpw2EaAO1GIv/UxVWgr4eiQI7e4CheazoTPQKVBghlsQbTZn+XQnAW0+bU6b7DH6OUa3HbCX/WixnqZmfZ4uVXOX1HIHoQ21Xun5Y7K+5OeVhiCyzHRM3sw8bP5p6hcQxKKela9ZkMn8BCAVZWD6K4tAQvk6bZ19RxKxSps6vTGLeAoJG0tFVUxRcr6aCFBbpsgPTrtL8Dn+1ydBVDQXszYHc2fnioNtspWhgcqnHPa8COBOsun6iYld/6vYECthL5lLgtBtQojxrdGRgQ+F9btWfq/WCUKNt5Lzx2R9CSCIUL8st7xMCGL67i2+0XU/qFzfjQw73O8qMIQQgpidgL7YI7uWIHzIRgiiYLnSEoQqFWqnrV/5VCa0b8cpc9nQ0C0qTETZnrkliMJ7NfGDCE7WVKn5U3MEURiPLRIJ5T8QIYjhAXV8cp6KnyB8LG+EIMwJIl9yOqCZ/XECTp1lZRG89PuGPymWMeaSIHRqPvGkNsvmV+75IwRR3BckKqZ8xMZp+1btEW++EESxp63iptZMLWrrWXEgTfHhsHgVGCqsiDsTmrf5UM/2KP2ZS4JQ/Wzv7DsTxO4sb2WPxdTembgEhMNBWG/Z9o+3b9/j0XKp7KqpYvKRa1nmT80RhKiY/Bdc3UQwEWA5L6k1/hS+F1H1SBA656Na0H8rL9CWlufelKXYqcToAznhkmc/Bl7YpiQx1wShc5aLYjIZNk6ds1U5Tyk1QBCzICh2/pisL/kXmdwhBMnFpL7yxaKm7EaaTn3gPHJJHYCqqQC9IR2KNt303P4T8GfYfOjoaHJHEL5cf+uRIFTXC0yCy3yBFbZ4GfxO7d29h4Fxg1sNVc4Fbq4JQkXRRQzrPUErQwO4GWDnFNFEIFBuBTc1x7esMvE/CntPYQgVMxWRale3SYmaTyKkf8bzx3R9Ue8zWeBLJQifDVzkMPAFJ9Qyf+N1pWJSQmkTRznjaK4a9cZWsmnJ6Gj/Q2ELg+v3fLgO5Sj3pIpiy2zf19L07Pr8AuScxtg+GkztABYxMNoS33qq6QLV1p04nZi+43pnURd2pifTMi9SoVDqbPwZuDQ9PKBypBSdt8EhHws3g2iZqxOTAC9LDSeVJ2/JjzeQZJR86bpdsg77asyfWiOI3AbOEwqe70IWy81zrxQ6sTa0matDEGoxsh338n1mjmYRTV11qRrnY6gNnSt+1IVJZbdCzAnX8boZvD0B50r12NaoYeYVQfjY+P/FYuvokZF1KnNZMY8+Sm+Zd5AFkYaBn1q8/ZiRkTuUmXLgo0uWpCWIzsSxRFS0x7/J/KlFgtDgEynUhu7bbHiCqFCwvsBdV72qmHx2mFtBfExqKLkp7AP3Cas+zrCOSg+vG8zX19j7R4r5VEAwgDa4WVh/a/UEMX3ydRZBlQbUfUEfGunYb8wd3b3LmXGjpz3jgHKmWHpP7ACMTqFOxFFdMDlNcMNqzJ9aJAjd/RER9U+8vOupYUYGvikPyrxBqDsVk5rYunDfKqmHZWeW+SW0yX0Q+nDfgG+eA1XPSxBhl4wR8zWgkvkgfLAyiZNEbZ2JXiJH9bNjUdNEm/QhbSMi0of8hnEMr7CFbq7vIPL9C0iIEymtq0P6cZwHpn/1Jtoi0NoF8S3nmKr2wrBzvjVt/nS6dnJ819V+i5hKA/zk0zgPRJcWhKPWEEQ15k8tEoSjZtKE+wbzxfvviysCIg34rWON7Umdn9ABCYNUBNCP778PD3jBVRdPWcSvYMYKz6QNXcgKLuqAwLjwtUQQAVhtA/N5sHGTV+fZ2nryLrH41KVMUAmZ4q6FxBerzs4V3TbZdwLYxVXeN7mKs/h0J95iMb4L0MGuOmXdBdcKQajxhWRBfAqMa7jJujk9uE5l7XPfTVBn54p9bCt7MphULon9vIu7MtaYaOFEObPxqXf4xLJSyaC+wVNNF3jziKvov1aWrvDNq+wTHr3S86dWCcLplyZhEBHWxZA5z+s4GpDMa3pKyAliGocypRwdJ8Kpo0MD64N2U5rLXlVcRah9GqAMMTYuaFr4kXwwuloiCAOs3GlgVXrVdwC0SJMGdpyJP5UeSq71wUpZlFwJhkql6n1eBPiXgJW7HOfXqs0TAJUq0fME705Ndr3uMhUP1pd/meGHmcu7rMKgHxEwFuVZ/ieA2AkQDuwd4jtyN7J8ckgCqKjQzZRv6+w7gYhvduVqyf+m+jkC0O98UsmOg+lqEKvgmtMpMf3zZ1R0/tQqQShIOjuXv9Wm2L2aEDUGKUcxOUsuhvPQdDLUpYopP7hp9QSpBatXk286DAO1u11lEoArQIC5d/Cs4HW1RhB5kliwna7NYRWGjfd3tRCclxoeUNnlfK1upndD1tcAVt7TUR+1K72hJc7n6FKURm0sX77WCEL1K0hNFHGcGRB/HhlcYWr5ErF9p7ijHtvp+cvBODfCd6Y2FB+N2bGHbLLV4hdGENOnlQrNn1omiGmSiBTHLC/GAYecwZ+ZkasQxOwp7ug7nyFFECqNaMHRW/NBZAj0zYm49W+63A8+H5Au/aZLJrOzoNUiQajOFoGVYoM0E58xNpR82GRxyb3jrFdOIEr/vJtJHQAqrex5OtWgYX3fYrVIEPnOTidjsi7KZUcM8i73ji/DjNtj4ItGRpK/LhUjk/quufNVAOoEGPDwIyCclRpK/rTgDiMk33ql5k+tE4QCM0IcqgwBl0yM7/alpp2eV+G+v5QXRrF+YX7CrOsThHtQjtVE8wvvBWElE3cDONClJtlCwIPM3J+dyty2adOtz5l8FN4y0zpzUnlg3+dRj8wKn1CrBJEfj8Iq3rKtNQbrDCa7E6D9XSoMpQ76I4PutEHf3jTc/5tibPVny6PgHeMAP0lsjWRhX5fZvvtYmNVGMfJSdWqZIPJjykUI6HnFx+Q4gLsAeo1nfjl4MdMYEd0x2WLfW+67BlN8FZ6TWTqOmT8Cor8HY49c3S0A/4xBa6fGd7s7L8+oBOGeozu+59LnTz0QRG7s1NqdeLPF1ocB+3CA3phbxzIAP8aw1jfR1DX5u4moBi6mcnYRTtQqUl4QEAQEAUGgFhAoSApW5nDyZTtB1AJY0gdBQBAQBBoIgcJUrRGdhsOwEoIIQ0h+FwQEAUGgAgjkVNHfnA5hg/8morsWxLb8yNSPRYUysZqyGwC8Lde9SJnvTIYkBGGCkpQRBAQBQaDMCHjvZxizjV3CXud1sotaP6x99bsQhAlKUkYQEAQEgTIj0N6e2A9xGgRDXURPP8wXpEaSyiopMIhje3fiEDCpZGp77uhW+aL45tsUgiiz0KU5QUAQEARMEHCiHEztuZYJypEw/yjH27WZqalLdNaWyjqwqeWFXiK+ymNuXNZEW0IQJhKUMoKAICAIVBCBIC9qZdYKojEwTTgOimz/E4j+QeNVHxbloOgRyAmiaOikoiAgCAgCpSOQC/utwq+41EXG7b4I0OrUcL+K7Ft0bhG/twlBGMtBCgoCgoAgUBkEInhR5zuQfSWeV9om/rBplINiei4EUQxqUkcQEAQEgQog4BCFnU0QsBygv53lUU94Dsy/ZmB91oolH9i47okKdGFWk0IQlUZY2hcEBAFBoE4REIKoU8FJtwUBQUAQqDQCQhCVRljaFwQEAUGgThEQgqhTwUm3BQFBQBCoNAJCEJVGWNoXBAQBQaBOERCCqFPBSbcFAUFAEKg0AkIQlUZY2hcEBAFBoE4REIKoU8FJtwUBQUAQqDQCQhCVRljaFwQEAUGgThEQgqhTwUm3BQFBQBCoNAJCEJVGWNoXBAQBQaBOERCCqFPBSbcFAUFAEKg0AkIQlUZY2hcEBAFBoE4REIKoU8FJtwUBQUAQqDQCQhCVRljaFwQEAUGgThEQgqhTwc2Hbrd19R5DwB1FjkVl0noWsP8ExgaOWesO2Mt+NJlMqkQqZXtUDuDmnbd1s20dC9hLAXodgFfPvGA6Rv+fABpm4EdTLbxx84bkC1E70N7dez0Yq3T1mOmU9Ei/yjhW8tO2uO/NZPNPAOzjaWybDT50bDi5ueSXSAPzBgEhiHkjyvobSIkEoRvwU2D+Imxcl0olx0tBpLX15F2seOZiEJ81ixDCG80w4/YY+KKRkeSvw4tPlwgiCADJ5vjWkwYHBzOm7fmVa+/sOxPEX9f8LgRRKrjzsL4QxDwUar0MqQIEkR/6Q2xRb3pj/6+KwILau/reD/C3Aby2iPr5KhkQfx4ZXGFCVoEnCOCJOLI9w8Prf1dCf9DTc3rLROalJEBHC0GUgmTj1BWCaBxZ19xIK0gQaqxbGTg5PTywIcLAqa07sYqYvgZgYYR6vkWJ8S22+Zwwkgg5QaAcaqYA9ZLqv5wgyiHwedaGEMQ8E2g9DUdHEAwcmx4e+GHIOGhRz8o9Yhn7b2KM94LsDwL0LgAxT72tID4mNZTcZIJLe3fiEDCpd+/pKf8UAV+HTbctWDDx5ODgbc/nf+/pSbxqctI6CBYfx4BSR+1X8C7mC1IjyS8BYF/VT8AdxHQduqk5vmVVKWqmAPWSEITJBGnAMkIQDSj0WhlyCQThHQK1diaWWkTXA9jX8+NDdjxz5NjgLU8Gjbu9PbEQFm4G0TJXuSyYL4ONL4adAFSdRCIRe+oZOpuBL3pOIKG7c80J4kUAzQAW5PrzeNbiJZs2Jh8vRn7O+GJYD9CRufpTACwXqYb2sZj3Sp36RkAIor7lV9e9LyNBODi0tyfeiBjdDuAts4AhfCU1NPDJoB18R0einS26G8Au+boMXJoeHvi3oHoaAeTVVN+adaIhujo11H+OX1tegmAgTYxJEBbn3pElolNGh/rXFSP0jo6+t7HlWC/lT0e3AugBsHuuPSGIYoCd53WEIOa5gGt5eOUmCIck9GqirWTTktHR/of88Gjr7j2XGFe6fn/InootHRu7eUtUDLWnEcJjyHBPKpV8StdewQmCcT+Au0C4Ykf54tVM7V29ZwO4Ok8GzLiQCJcLQUSVbmOVF4JoLHnX1GgrQRBqgG1diU8S6MvuHTwBl4wOD3zODwCNiqck09KO7r4VzPw9Vx+eJ5sOGx3tf8CYICy+AEy3uXwWilIzFaqX+C6bcblFpHxQ5ARRU19FbXVGCKK25NFQvakUQXR0HL+nbTXdS8A7dgDKD07G40s2D978Z5MFmhnfS48MnBZRvTTTtMtiyALoJQZnYmx9cGRk3ZDJ+8G4f7Ip1rcgk/mOyyy1KDWTRr202ganLdB9QhAN9clFHqwQRGTIpEK5EKgUQaj+dXT1fo6Bz7r6+pLNfMTYSHJY1/+Ozt5vMuGD5VAxFYOPTsXU3MTLJqask92ObQx8Jz08cEYU4vKolxx1W9aym4UgipFUY9URgmgsedfUaCtJEK2diS6LnEvnnfODZsL56aEBpXoqeNo6+04mclRC+ScL4gtSQ8n/iLIYFwuwH0Fst60DZoXGCLnL8L5fp15CFsvtGN4iBFGstBqnnhBE48i65kZaUYJoXbkXNWU3EvCmGYIIUBsdsjjx+phNysrn9S6gMsRYm5maumTTplufqySAfgQB7JzxeD9nmfmE9EjSKIaVTr2UGh64prUrsUgIopISnR9tC0HMDznW5SgqSRDKgW1iim4H4X0z4DDuV2qbwcHkXzWAUXtn4nwQfUHzmwqbcT/Y+tZki31vMcH4wgTkRxCqr14HtyhqJp16SVlzCUGESUR+VwgIQcg8mDMEKkkQalAay6TfxinTPTR0yzO6QS9amti1aYJuIsZRIaA8SUwbbOJbpuKxlN/FdxRggwiiIESGoZpJo166szm+c2Jw8PrtQhBRpNO4ZYUgGlf2cz7yWiMIBYg6eWzP0BoClAWTN3SHHjMn5LfyW6BksSeMIILQBNkzUjMVqJeYzkqN9H9DDUIIYs6nf110QAiiLsQ0PztZaYLo6Oy9jAkX5tEj4M+w+dDR0eQvQhCljsWJ97JNXwXwtojoZxl4wCK+dL+9cbdpfooggnBOQ94w3SGe2apOW3fvp4mhYkCp5xm2aEk+wq0QRESpNmhxIYgGFXwtDLvSBNHW3XshMS5zjTVSOAkVW+mJZ2IHA/aZBJwYMS+EMn56BISzUkPJn4bhHUYQmkisgZ7ehXcwPKNekhNEmDTkd9emSsAQBOYGgUoThOYOYtYuOsqonUB8T1kH2Za9jIhOBOOdhiHB1QX3hfvvjSuDThNhBKFRM00CvCw1nFSmvAVPwQnBpV4Sgogi+cYuKyeIxpb/nI5+Dggi8JI6Chh5wmDCUli8KoQwQn0qwggiqprJrV5iTcIhUTFFkXbjlhWCaFzZz/nIK0kQPT098YnMnjepKNz5gTLwKE/FFhcTgC8MLJW7Ot6yrZUsuoAYhwGIe+psY1hHpofXjenaMiEIUzWTxsS3IK6UEESYROV3hYAQhMyDOUOgkgTR1XXS7llM3QVQ68wAg/0gyoZDa/fKt1uc/X7hBbd/NFYTgtCpmV65EH+/N2ued/HXZaMTgiibuOd1Q0IQ81q8tT24yhLE8jdkEBsk4IA8Cir95+jIwIeqgUpHxwkHshX/sTs3hU7Vk++LCUHo1EwMrEkPD3zcPaYw9ZLcQVRjBsyPdwhBzA851uUoKkkQbZ2JY4nollm+DJ6LWgVaT89xu23PLFCWTksJaAKwd27HreqW9GhCfk/qdvzOwu9NOepz2ilUM82OUmuiXhKCKEmsDVVZCKKhxF1bg60gQVB7d98aMK92jVhr4rqoZ+VrFmQyPwHo7a6TxuWjIwMXlYqW5s5AJaXW5tw2JYgwNZNHdeQbHlxUTKVKtzHqC0E0hpxrcpSVIght4D2fHblmwYXyig6I2WSMZWvPCftbmfhGdwDAUgkiTM3kcY7zTTAkBGEsxoYuKATR0OKf28FXiiA8i2R+kKtVFFPdiDW5I8Zt5mVjI0mVUKfoRxNy3DernOkJQnVGo2Yai6HpyHh8amp2gEL/S3EhiKLF2lAVhSAaSty1NdhKEIRPTupfZSzr8Ac2rntCSxAdiXa2nNwRu+z4nQcnW3BcCZFbqb2790owPuFq0zerXRSC0Jx6nGRIIIy7QngHZp8Tgqitb6FWeyMEUauSaYB+lZsgdJZDALIM/nR6OHmlH6TKh6F5pxduZOY+T5kfIMtnplLJ/4sqjo7u3uXMuNHtbR2UFzsKQejUTMS43Lbwgiv2UmD+aiGIqBJtzPJCEI0p95oYdbkIQnk1P/kMDgXoOgD7ugdHwH0TLZwIOwm0L17xLti2OkXsORscfoTI+sx+e9t3mgTeUyG2KUbn5NKdLnS19UdkeUkqlXxMB35UgtBcgG/Otbto+r/+6iX1qxBETXwCNd8JIYiaF9H87WCxBDEd5iKzB8fiBxHTYcw4A4SDNEg9THbmqNHRW/5ggmJHd++HmPE1jRe0qv4UA98j4luaY1OPDg7e9nyuTVrUs3KPBTa/CWyfCsYKALt53jdOhFNHhwbW+/UjKkFoL9d3NB4aDlwIwmRGSBkhCJkDc4aAjiDK1xn6HSw6MbVx3c8jtEltXYlzCaRCZHtDZURoZlbRDIPPTw8nrwrKbR2VINQbCkKA519rkFBICKJYcTZWPSGIxpJ3TY22cgTBQxyPnZYeXPf7IgZMrZ2JpRbR9V51VRFtPcvMH0uPJAeCyMFZ7A0d5dx90PlZqN9NUpIKQRQhzQasIgTRgEKvlSFXgCAeIqKLTe8LgnBQHsmTGfoAA58GsF9EzJ5n4Bs81XT52Nj3/2JStxiC8FEzhaqXVH+EIEykImWEIGQOzBkCJREE4TlmJ9XnJsvCPdnJ2H2ViNLqJA3aYr2J2D4aTO2Akwdib08uiBcB/iNgbSDLvmPipd2HNm++dioKsMUQhFbNZKBeEoKIIpnGLisE0djyl9ELAoKAIOCLgBCETA5BQBAQBAQBLQJCEDIxBAFBQBAQBIQgZA4IAoKAICAImCMgJwhzrKSkICAICAINhYAQREOJWwYrCAgCgoA5AkIQ5ljNy5IlmZoWgQgTzk8PDXy5iKpSRRAQBKqMgBBElQGvtdcJQdSaRKQ/gkDtICAEUTuymJOeCEHMCezyUkGgLhAQgqgLMUknBQFBQBCoPgJCENXHXN4oCAgCgkBdICAEURdikk4KAoKAIFB9BIQgqo+5vFEQEAQEgbpAQAiiLsQknRQEBAFBoPoICEFUH3N5oyAgCAgCdYGAEERdiEk6KQgIAoJA9REQgqg+5vJGQUAQEATqAgEhiLoQk3RSEBAEBIHqIyAEUX3M5Y2CgCAgCNQFAkIQdSEm6aQgIAgIAtVHQAii+pjLGwUBQUAQqAsEhCDqQkzSSUFAEBAEqo+AEET1MZc3CgKCgCBQFwgIQdSFmKSTgoAgIAhUH4H/D7YtySn4oWQAAAABSURBVLYKOmtRAAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-55"><g><path d="M 51 991 L 51 1024.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 1029.88 L 47.5 1022.88 L 51 1024.63 L 54.5 1022.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-56"><g><rect x="1" y="891" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 941px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">uPEP driver (amd-pmc) sends OS_HINT</div></div></div></foreignObject><image x="2" y="920" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmcXEW1//fc7plJRFkeSsKqqM+nuKERZu0wGggJsiZ2TwIE8OljjSg8EUWeuPtckfxYBFEIIMl0S1hEkEXeODO9TDA+UETkg4DsRHlhUSaZ6b7nR/UyuXO7bnfd7p6enp5z/8kn07Wd76mqb9WpU6cI8gkCgoAgIAgIAhoESFARBAQBQUAQEAR0CAhBSL8QBAQBQUAQ0CIgBCEdQxAQBAQBQUAIQvqAICAICAKCgDkCsoMwx0pSCgKCgCAwqxAQgphV6hZhBQFBQBAwR0AIwhwrSSkICAKCwKxCQAhiVqlbhBUEBAFBwBwBIQhzrKY0ZUdP5AgCbqmiklEAzwH8LBh324Rb9tkd98VisYxJme094QUW6C4Au5ikrygNYW1yKHqSLu90y1+RPFVk6ghFPkeMb08UUQKbKqrJZq1nXdW2VfI3FgJCEA2ijxpMkDpJngZwzl67c7QcUTQBQVQlf727QT0n7XrWVW8cpb6pRUAIYmrxNS59igiiUH90bA6fvOnu2EteDWpSgjCW31hRNUpYz0m7nnXVCB4ppkEQEIJoEEVMMUEAoCvGRndavWnTFeM6kZucIMrKX+9uUM9Ju5511RtHqW9qERCCmFp8jUvXEQQDR6aGo78wKWTBgpNbWlu37EZBK8RsXwDQO135MkQ4KTEUvc6UIPzUb9LGUmmmW/5q2+83fz0n7XrW5RcHSd/YCAhBNIh+qp0gnWJ0dobnkkVrmPBJ598ZuN+yxxcnEjdudout20HMJIKoVv56dwOZtOuNuNRXCQJCEJWgNgV5akkQqnkLDg7v1LaVYgwc4mhuhpmXpeKxIm+pZiKISuSfApWWLFIIot6IS32VICAEUQlqU5Cn1gShmtjZE14C0M0AWgtNZmBNajj66WbeQRRk8yP/FKhUCKLeoEp9NUdACKLmkFZW4FQQxILelW9sTad/DdD7JlrFuKethY8aGIj9w9nSZttBZHcRPuSvTGuV55IdROXYSc76ISAEUQLrokHsMbl6FeEn/1QQRG9v+PXbxulmED6yvY08EkDL0uHh67c0O0H4kb8WQ+6AhSv2DnJmBWysBGWdBOYCSDPwR4Cv4fHWK0dGfvayqssPQbjTFs6G2tuP25Faxs8j4OMAdgMwCuaHmHCdPZZeu3HjjS+Y1NXVFe5ki34FYMc8Dv+0mZeMxGPDfnDp6Vn+1jQCAwTsnc83xsBHU8PRu0uVk9VT2loK2B8H6IMA5uXTpwH8lZiGbIuv3Hs+p8rd5ynUEwot2z3NwSEAb8v+bfvY/WfXwkiIbXwTQDuAIIDnAdzDhEv81OEHm5maVghi1hEE/hKkdGhoaMOzs5Mg9PJXM4DVRB0Ijn+dCaflJxyv4v4G0L8nh/t/2RGKnGN6k1pHEGRZT8Hmn79GPG/VV8a3tgV3CA8MXL21HBmp86rWrbgNoK5CWcT4ZiIe/aIfXDq6+44jYoeXHCfG5uAwr/s3ypkCQawG038BeEP5uvghsnBGYjD2P9kpv8SnI4jM+Hgk0NbyeTDOAhDQZDcitPLtbJ4UQhCzjSBmkYlJu4PwuQssN9Q7Fva9i2yOAnhPubT530eZ+eMW0VsY+O+JPKXCkLjCchDwaQZOAbCfR50ZIjo+MdS/3mQHodJ09kTOAHDx9vL0O00vGXt7T5qzLf3PGECHT5AM8JXEcPTLujwdvSveQunMeoDUKt7PlwHhwrFXdz7P606PKqyIIMADDCtO4M97kIPinJKE5qeRzZJWCKKJCaKzM7wngjQAxtsdYsbagpuPHRgYUNv3ia8ZzyD8yF/JgO7qWvZmtoK3aSbqDMAPAVZKlcvgDxHwbsfuYgsYCRA+WglBgPACGLsWCAdAHKBH87uJbgB/sccDB4+MrFOmEyNzVldX33vY4l/nTVUqm6/VdJ4oVf7d8+3aYoMPGRmObXJjWxI3xpNMSBHoZQbvSEAPgL2K9VP64mcxQeAVAG0TDhuMx2HhN9lybRwEwlsArE4ORy+ppC80ax4hiCYmiK5Q3wrm7JZ/YjvNhHNTQ9HvuMVuRoLwI7/fAa4uJra97qVrmbnPmZeAu9IWn7JxMPaY8+8HHnjMrlZry1cpt/IvNm/42EFsL5dHOBhYkRpY/3jhb2rXNJrBPiNDsQcLfytnYlLpdDsAL483HVad3X2ngPhHjrbdjgyWJ5MxFURy4vN2v8ZNdoDPceOmrsC3h1a+1+L0TwD6kKOoDIM/lxqO/UDXHg1BFJKNEvD5PXfnSxznGdTTs3zfTMZ6MZmM/Z/fvtDM6YUgmpQgurqO2c22Wu4k4P0OEV8mm5ckErFksxOEX/n9DvL27vAhFmVdiNVBdPYjxpVs85nuSdFRNnWGwp8F07eKSMInQTDwMILWoU5y8JLBhCBU3mJC5d+PBYOLNg2s+3spfLJnCQHcANBSRzrtaryoLUAGzOfvtQe+W+oAOn+QfSnAqxx1PIEML0omY4+42+dJEMxfSMZjKopuyTMMv/2hWdMLQTQhQWS9aezMNQD1ThKP+WblYaObwKY6FlO5W9m19OKqRH4/A7y3tze4LT1vLcDHFvIxkLLHxg8veA55lad2Hi1zX7w873m0PZlPgqAS9n133aYEceDC8L4Bm5SZaN98GUZmJrd5ioEng8j0Dg/f8KizLe3tK+dZLRnl0eQ4ryltKnLmz+/CbiWgY4KUPXDwIIjHMhYv0uxS/Kh/VqUVgmgOgqAFvSt3bUln9ifC8WBEnCvbvIhbGNbS1PD6EZ3IM5wgqpbfz6jXuHNOOhQuV5bG3g/4IwhfbqimBKE2QR09kZ84ycvEzFR8wE3XtwWfP9F9zqUx+XnuALwwNN3leBCE9vytnL5m8+9CEA1MEDXsmBkQfyE5FPue19a6EQminvL7qaujO3wkEW0omIm8VsxeZeZ2ILtdDyA8kcYfQTxmB9MLRwY2PGXSbh8Eobt9/4DzwNtdn8ZTzIssdeRzVWo4+gk/5h4NOWvJUkcQTPhiaiiq7j/IZ4iAEETzE0QaxOftNR8/KGXjbWKCMJLfcLxkk3V1R77BhPO259l+58C0nI5Q5CxibD9g9UMQPl11/RCE5vb5GMBHJYdj6iJd0VfUbwiPIM29yWRMPVY18fX0HLtLBuO3O91amXB2aih6oSlmKl1395FvsGmOinB80EQ+plOT8f7LneVoCcJHdGQ/bWrmtEIQzUsQ6qnRlE18stOjxUvcRvRiqnLg+ZLfR13U0R25JmvKy3/qcDoRj/6HjzLQ0d23lIjVRJfzaPJDEIAvU4kfgtASINHFyaH+M3Ur/eKy9Wk1K39V1R0A/dUPbgxuIeBQAHs48C+61KchCF9mOT9taua0QhDNQhCEF5iz/vEbLQt3ZMYCdxX84E068IwniCrlN8FIpdFdvvNyHS5Vpmbl7f1ed5XvV/smiOLQG1ozk+YGtueh9pTuUDXkqiEIz3sZprqfjemEIBqYIMp5/tSywzYiQdRTflMsZwNBaGTUmpmKYzh5374WgjDtYY2VTghCCCKLgBCE2cCcDQShkCjyTNKYmbp6Il9m4IIJU08J11shCLP+1WiphCCEIIQgfIzK2UIQRa64rsNnzaGz5yVMjwXIlJp8xMTko1OXSCoEIQQhBOFzLHWGIleDceJEthIHzF5Ft3eHeyzKhtjeIZvGzyG1z/r8nkHkzlqKXHEnvUZY1H6wNrRGQf6urvD+sOguBt6Y/1uGmY5Ixftv9wm/UXIhCCOYyiYSgigBUVF8GZ/uhV3dkR9Pehe6RP5a3iQuq3VNAjExmaPWEYqcR4xvTOTw2S9UvqLQ2A1GEKqN7ktpDEzcW+joiVxEgPJsKnwlA92pW9TUkhkk4B2FDJUc7ptqSQjCFKnS6YQgSu0geiJHEOB8v/k3Fm89Ih6/RUWGLPm9b/GqHV7/6rYbJ70JLQThidl0E2Q5fTp/L3JRBZ5lixalBvv/ZFhO0aWxRttBKDmKQm/kzUxjbcFtrpcKy8qvCwaIMrsOHZZ5ormDgF0I+LMN/Mmy+apEInafM70QhGFPLJNMCMIfQRjfYNWEP3a+ajXpuc/sirKYjFQ0sSNTw1HlKz/ln+wgzCHWhRH3sxrWxDxqOBNTHg3qDPWtAfPq/P+zbqzMvHWSeczwXkZxSA68bLF1eDy+Xr38ZvR1hSLHM+NqR7BDrWutEIQRnGUTCUGUgKjYzoqSt0odRakVogrtfP6k4mUH0RQ7CHVi4Jo4lVymcYX0faMBTUxKqM6e8BIgG7W2Vf1fvTSn/nXcJDeOQ9XdvfzdNgXunHTJDbhr2xwOe7065+ww7b3L9rLSQXVm4Xyc6XeZsfHF7iCJQhBl536jBEIQJWDSrRRVvP8yHZo6usMRIrqqKGCeEESzEAR0kx3AA+kgVt47EHvOQ1DvvtGgBOEOvcHA/cRsg+gDWRk9Qmt4yd8ZivwAjM9M/p2ubQvapw8MxIp21oV0KqQ4WbRm0pkekGHG6lQ86niHIpdDCMJo/i+bSAiiNETFtuJc+pvSlnXmvYPrn3RmV+GIgy0tF3i+TSwE0TQEkdtF6N520L+brNxjt6bpP/O7ymAREA1KEKqd7vsOk9peIgyHTtkeuwC1J/ktI7A6Nbx+ozukR3sovB8xqSizE2G+c9zkvVgTgig79xslEIIoA1OJCz5pgDcB1v3KHqp5VjJDjEuZoN7ozcXXn2EEYdSDfCbystVP9xmMTzGyydXbDq1zX7oY4JM1+Z9iYFg9nQni94KxvyYE+/ZsjUwQxaE3Cu02NblOgqczFD4QTOpsbTcNUapwMYMA/Q3EbY7nQF1J6VFY9tHJwdgfdLoTgqikRxfnEYIojyN19kROf81f/bslB/jkcjIEfB12+iq2guoBlrcJQeQAaiaCUPJ4vHRWrle9QsBlDKgAf7tkEzcwQeguB+a1afTinA6MzoUrPgCbf55/R7scXhpyoI8lB9f/r1dGIQifkHokF4Iww5G6FoY/zDYuAeidZbL8jZk/lYrHoqHQsvlpDioPDSGIPGjNRhBKrHA4HHjqWVKPNF0E4E2l+wc/RLZ1QsaybQt010wgCCWPxgNJmXi+khiOftlsCBWnam8/bkdqGf8a5Yhy4unWEuWliXFZJt1y/sjIz14uVa8QRKVamZxPCMIHjvnnIpcQ+DSAPghgXn719wKY/0xEl7UG+KbCYVtRJxUTU9PtIJzdR620xzJ0NDOfBqJ/A2NXR//YyKDLxkd3/tWmTVeMN3I0V92Q0Lhtlwyt4WNYQUWFbdtGR4BxAgP7AZjvcGN9/rV3qP8Cxk8z4+mbyj3pWqhXCMKPBrzTCkHUBkcpRRAQBASBpkNACKLpVCoCCQKCgCBQGwSEIGqDo5QiCAgCgkDTISAE0XQqFYEEAUFAEKgNAkIQtcFRShEEBAFBoOkQEIJoOpWKQIKAICAI1AYBIYja4CilCAKCgCDQdAgIQTSdSkUgQUAQEARqg4AQRG1wlFIEAUFAEGg6BIQgmk6lIpAgIAgIArVBQAiiNjhKKYKAICAINB0CQhBNp1IRSBAQBASB2iAgBFEbHKUUQUAQEASaDgEhiKZTqQgkCAgCgkBtEBCCqA2OUoogIAgIAk2HgBBE06lUBBIEBAFBoDYICEHUBkcpRRAQBASBpkNACKLpVCoCCQKCgCBQGwSEIGqDo5QiCAgCgkDTISAE0XQqFYEEAUFAEKgNAkIQtcFRShEEBAFBoOkQqClBLDg4vFPbVooxcAiAUZv5qJF47K6mQ60BBAqFlu2e5uAQgLflm7PFBh8yMhzb1ADNkyZUiEBHT/hsAn0HQACgK8ZGd1q9adMV4xUWJ9kaFIGZMn5rSRDUGQp/FkzfUp2biPq3vbrTKuncU9NDZ0oHmxrpm7fUAw88ZlerteVWAjrUIosIqxJD0RuaV+LZKdlMGb81I4iOnhXtBPt2ALsAkNXsFPf7mdLBphiGpiy+o7tvGRGvA9DKwMMIWoemBtY/3pTCzlKhZsr4rQlBKNNS61bcBFBvVt+EHyaHomcD4Fmq/ykXe6Z0sCkHogkr6OwMz4WFdSA6SonHwFXjozufIrvx5lH2TBm/NSGIyXZTPIEML0omY480jzobT5KZ0sEaD7mZ0aLu7hUhm+xbAeyoTE3MvCIVj90yM1ovrSyHwEwZv1UTRGdn+O0I0K8B7JPbPOArieHol8sBJL9Xh8BM6WDVSTl7cy9YcHJL2+teupaZ+/K7iJQ9Nn74xo03vjB7UWkeyWfK+K2WIKgzFPkBGJ/Jq+6xjMWLNg7GHmseVTamJDOlgzUmejOjVV1d4U626Ff5XUSGGatT8eiPZkbrpZWlEJgp47cqgujuXv5umwJ3Atgjd/ZAFyeH+s+Us4epHxwzpYNNPRLNW0P2LCKAGwBamt9F3G/Z44sTiRs3N6/Us0OymTJ+qyEI9+7hZbJ5SSIRS84OFU+vlDOlg00vSjO/9q5Q3wpmvi53LwKyi5j5Ks1KMFPGb8UEceDC8L4BO3v2sG9OZ3w7MlieTMZGfeqQOnpXvBnpzDILtISB/QDMzw8IVVQGwHMEPMjM/Znx9E0mdtje3vDrt43TzSB8JN+evwQpHRoa2vBsQUHj3HIGwV4O0NsBBAGkAX6EYd2QsejyewfXP6mRhdpD4XdZoE/BxhIQ9s639RUADzPxxXMC+PnAQOwfPnFA1u7c9tKH2eKTgGy75+V2ZngBjDsZ1kWp4fUbFdj17GAdPZEjCJg4IGXCuamhqLrMhbzHzTEgnAHQewG8IS/38wDuYcIle8/nVCwWU3os+3WEIp8jxrcLCRk4MjUc/UW1deXuFwRPJMbxIHongLk5feOvDNyUsayLPPRdts0FvYGwkolDAN6c70/ZvgsgwcC146M7/8qvJ1JnZ3hPBGkADNVHleoTY3Nw2Ka7Yy+VbVgFCcLhcOCp57EfbOsTr9W1GMBb8lip0l4B+AnAuptB6/bePfNbU73qxtGBPX3/aoH/nYAjJ9WT7e/8ZwL1p8fGf2Yy3gvll+o/Sk8tc19cQuDTAPrgxPjKyoWHGXy9PZZe66c+p1z1Gr/t7SvnBVoyfQzuA9G/gbFrvh2qPz8DYASgWFvQvr2SecgpU8UE0dkTOQPAxY7CVieHo5f46JPUtTASYpu/D9CHfORLE+OyTLrl/JGRn73slc+LINLpwIuwrK+A+Kz8IPYqIq0O3LeN7vztwqDu7Az/CwWtS5n5Yw4C0+V/hixelRiM3WMoF3UtDH+YbVwCZCcvry9DRD/ntH16MJhpq9dNai+C6OrqO4At+5oybVaT2kMgnJociv2mHB4eA/zWzp6+jwL8UwBvKlUGAXeNB/mEewdiamKGmvCefM46nZi/4SAvXRFF+i7X1uxk+ixFAHwXwJ7l0gN4GqBTk8P9v/RhhqWOnshPCPh4vvwxgI9KDsfU2URNv85Q+CAwqTH9HrOC+SGycEZiMPY/PuRBeyi8HzEpmdRlwHKf0XgvRxA52fCj8n0VaTBdCNu+wMdity7j94De8PyWjPVDZl5eZu4qwPEKiL+GNC72IcskfVREEJrJ19fFOMW0ra978ZtgqElabZ19fwzcb9npoxKJDX/VZdYRBDKZwzkY+D4xDjOsMAPiLySHYt/r6lq2j20Fbybg/YZ5X2FgWWo4enep9NlVx9wXz2XgAkOlq+LutjjzGZsCN9cj1IaOIIisYdj2jQB2M8RD7SzPSQ5HLy01mRQRBPNRILydQGpXoXZ5ZT9FEtvmcBhbdnm1be5LFzH4ZMN+lmHgh6nh6DnlJjy1WECAfgZgSdkGTU6QIeDr20Z3/obpbqKjO3wkEW3YLgNd3xZ8/sSBgQG1YqzFRx3d4QgRXeXYLZiWayyPItSnn6UzGPhv//XQo7DoY8nB9f9bqmG6/kNEapevSFztGo0+Jtw23sbHltup1Wv85r1F1XhXFhafHw+kg1hZWDT5yVwRQbi8K9RYGgmgZenw8PVbDCqfFJJjUvr81hKwHsj+nbgNzO0OE9Dk5IwrW1s2n6YbKBqCeBrg3xcO/PIFqa3l3QD9DeA3gbDQsV0r1LUFZIWJbTWJqxhTuY/xOCz8BkzbAPv9AC0onsDKmgO8schte0u1TRHPuxwrV18kbaCniSRFBAH8GMBBBLzDUc5mBu4h0MsAvxVAt2ZAlg0d4R7gANSN4qMdZSkz4CbAuj/bP1Q9nDVzTlpoEPB5JrYLoV+0+gYO1uwqyp6luWKOTYYyZw4cVH2KwTsS0ANgLxfeGQZ/LjUc+4GJHnp6lr81jcAAIWvOVF9NvQU7Q+EDwaTMeG6yV2bCRG58KEsn78u5VX/BjFho/hgzn5CKx/pLyFOmr/MfHOP+vWDsr+k/m0F8RHIopsys2s+g/2RAUF6WcTV2S8gEpynVo7q6jN8S/U2Z/CZwK9HfKr5sWRFBuJVAjCsT8eh/mHT29p7wAgukAvipkBz5jx612T51nz1wj86m2d5+3I5WS/oLAH/WNQlvJpsWJRL9OUJxfBqCcP68hQhnbnt1537nKi5vT/88iL7omnDGVNiDXAH8W5tw4shQ7EFngQcsXLF30M4oc0vuNnnuK2kOcF2GKuR5BcxnjW3d5Rpn29Tq64ln8BGLrB/lJ2C3yHUjCFfFavL49F67c9SpO4X/1jT9JwHnu3UGy1ritRLUDPBCdWp1f7k9Nv4ll41YOUssBuMa1wSnAtxZeT1uAfPqvfZAv7ONnvou4Y3X29sbHEvPW8NZO7bz44dsxpnuPpw1Qz1HR4Bxhcs89ozFmcXx+A1/LDduNH05w8zLanFxrrf3pDnb0v/8GUDLCu1Q4T3IslYkB9ff595JZXf/c7acAKILJxNF6cVQVyiynBnXuib9vzHTWeNbd4q6d1M5mbEaRF9y5XnADqaXjgxseEqHW8n+w7jJDvA5bjf8bD8I4hww/Zerrz5gjwcOHhlZp4iy6KvX+O3sDn8CRJc75iS10Pr0tld3vlqzC6XOhSv2h61Mv5NMhWUXPjoZfRNEvkPFADp8okMxHZ+K96vtdrmPOkN9a9RgdSQ0vXlNHaHwicR0pXPyZo+6SxBEyVWI27XQKVDBdOG17ezoXfEWpO07nCtrBtakhqOfdgPjDqeQ/30zLOuY5OD6hBeQXV3L3sxW8DbNVrPuBJGNE2TR0anB/j95tNfDdOFtIvEY4BOmPi/Tj+ZMrNCkkvrOH1xe7rDxqyp+PxYMLto0sO7vbrmKd8/ZFHcjw33JZOz/vPSmXaX7cAvv6IlcRIByIc9+Xv2q3AB0/96xsO9dZLNyNtm90Ae9Fl3OvB3d4T4iUpNQfuGEMQY+qjOp5hZP9h35HW+hmAeR4aPKRVzo6IkcTICaWyZ2Nwx8PTUcVcRRFMrHs/8wn7/XHvhuiUN13W7Ak4jrNX518y2Asue9fuaiUn3GN0G09y7by0oHB7d7L+GfNvOSkXhsuFznXNC78o2t6fSvAXqfI21ZYQtpe3qO3SWD8dsBap8YKIQvpoai33TXXYIgytbX0d13HFHWtdD5Ga343AMZ4FvbgjuEBwau3uosTDPRZIhwUmIo6q63CFZXYMTC7/UmiC0Ma2lqeP1IGb1TZ3f4XFA2yu/EpO01CekHeHkPOY0ZRtVl5Baan4TUwXFhsnuWLVqkIb6iBY6fYHoa2YxNRZo++RuLtx4Rj9+iTJEVfx3dfUuJWJmXciY6xj1tLXxUOe8X3UKKGN9MxKNq9z3p05B3WVPRpLESihzPjKsdC0NP3LT9h/lm2FhZ7qBWeQdZLRllup04pGeP+aVe41e1iVoyg45Fp/E4L8bd11FAVgX+CaI73GNR9nbnDnklPmYH0wu9tnxORWe9gAJ0OgPKxWx/EDIBzhw6PHzDo6Y9vDMUuRqMEyfSE9Ymh6LKLXTS50EQRgNSZwZTAdNSw9FPlD287O47BcTbb7t6DLiu7sg3mHDe9kaXPa9wyqfbiRl3HFOsC+ncZxC5nmN+KVK3gvSy72oGuPLcOj4x1L++VLu1+iY8gjT3JpOxp0vl1ayitVgWu5zCxE49UXWxa7giMDoiFe9XUZBLfu1VjLuSsrtcmP2cJ3aEIucRZ3c1TwF0H4NvLrgkF+rMB/K8DaCuwt9K7QB0bdWVAaZTk/F+ZXaZ9On6j6k5TpkPt6V3u145v5WbX+o1fjXu7MYL8pyXId/IwD8B/M4C7uMMrylHlJMmmnIds0gBuVDEjvj0/lnJb53O9NURhH41726PbjXqZcoqwsc94DQEoZvMvFZfXlhpJox6EoRve6bpzkozwL1W864FgWZwA7G24OZjy3n7mN4p0ew0PM/AdHpTE9DW9G7XEqAmy01g/gMHrPUlTHQTxWj65Itk0+JEov/easZTfhJR0RB2zpdj7JVkUq9msVVRPy2ekPW61bhJPxlEptd0EVpUj2YBWs/x29195BtsmqN2eAdtx7tyryQTnVVHEK6LTMou/4/XtR3z+zuvVSw15V81BGE6CWsmDE/7aiUEoRnsxuUX6qtm6+lXScU7CP+LgvwbBxMLC2Wa4fHAQvcBYDFBmNfl7humdnpjgghFziKG0/OoJmYeE31o9K0M8BOXCE3KMF6dZxNm7678lAOBWGpgvXIlryh0v9s05qX3cu139x+vcyLNAsOXjoryawii3uO3qyfy5bwbvBOmF0FYb9m0duvWnTaZukyXw9n9u28Tk+kE7bchJdJTd/eK3dmyP8KM4/JMut2f2YeJycBtLdsM0wlD1+aiyVSzg3Cv2gj4O2w+JJGIKa8Ro+99i1ft8PpXt6ntY8H1tqKVmUllRW6ujOtS8egJfiYN05Vk0QA1tIkrOYoIwnHju5ScpvouKr8CHEzw1qXRrVqZcHZqKKq8iar6uopt/JPLm7jJj3Xjc3iw3N0AZ2bNyv8ZBu4gkM9nVFndTj+0UDYD2p1BkYelzwWsCUHUe/y6I2ZrlD0Kwn2waS230B3VEHrDEoQxSPg7AAAbC0lEQVQaAK+mrT0CxPsB/K/M9EECDnCELdAPAj8EYehtZTphVEoQGpv+pDAgpqPdNWEVEYRGDpOii9qiuSindQwoVbhm1aW1pWpWgEZmojoQBHV0R64hwvEFOf24d5sAXyqNB0FMhDyppvzcZS/jC4WZ/O5ibZoC68uFJylaUFbT0Ml5tQsikwm+VBNM8tdr/Drbmd9BqYN69x0UnTibQdjAbF1dZTgUf4fUfg5xDPpBNqYRMX2WCEdqLqgZFJE9Zjc+pDbdkgtBbI9ZpZRQKhaTmZK0uzKticRkgHrVOZU7iKmcoE0w1LqXG+6QTMp3hA25qFw4E1d5DxDR+XvOt2/VuZAKQXijX26B587pM0RJIfuLBHw/M96yplRoIq9W+jIxeXiKaCfoUp1SebUEMvaFRNkbsn5Cbagwx6rN2+PxCEEUPLrquYPwvXLV7WZ0hC0EYTyh+PKgMiEJlWZ7QDucC0C5k5uEN1GXGK+ZE+Qz3e6xQhDG+jQ1EW9fWAMqJpzJjkI14gG2KGLiEOFssU+CMHcD81zlLVzxAdj8c4/bwM5szwP8FwIN20wD4y3WverikukZiHbFZ3ioJzuI2u8gTF1JG5cgivt+PU1MmjOnKSEI5wBUu5at6Vd7iLAcDHUx1h0yZNIwJ9BlrcHnz3R6jZmOV1MCK5eumv6T3S27nHB0ForpMDHp5M4Gonzeegdl+Ggm9BHw7jKEXvIWuq4OXwShCqhG4Xl/5ptc4ShUsU8zcB1g/Wo8SA9sGlinnlXUek2Y1t/IBGF6YFtqMGjMfaYrkHJjrOh394CoZGLUuFNq3VdNBuh0mJh0fZ+n/5Da907Ot/IdGXIXXe0Pvxby+9+LnEVy6Yrcfru6Iz9mwicLxUy112M1/ceUIBp1/GbNkPbWD4LtVWCo0Cm6QJplLwpXvINQGd0K9zoD0HVE1+MnKokKnPVjpPls08sbTUEQxbfRfcfW0dwqrxtBeN0OLzX5FN8E1oezqGaAT+UZRH7yUBfDVNjwwufLhTJbhronQ7gKzM+B6LcqsF9bcPM15e5qTKUXUyWkoeKjUXD8EuehvSrH7VnV4XYNNry8WEmbTCf4kv3UYAehiSbRcOM3e6b0DE4C0f9zxrLyS9C+dxCVuiHqDrgZ8PUQu/bSyAw8g9CaCzxiNnl15lqsYkwHoWZLbXQj3VG++02DbHRJ3c30hiYId1gKwOgSnxNn0wtfbt2YnuGY6jS7I8o9aboEoEWKuxjYjZmPNQmbo/JndxSZzG3grLdh7nONR83lwores8iGjSB8xUGs9yLDP3UvLKvpP6YEU8/xmw2rw5nDiKhTPaaWHzcqqKDJp8bdtwjZ86TC58tj0j9BFN+k9gxs5pRAG0fJ56TY1dX3HraygcW2b51mIEFoOyLwp7RlHVrObbCAqebyTB13EDAKf1Foqy7EhFf4jGoG+FTvIDRyKBGNt+x+Qka4R7/pGY7JrFFIUwvPrHI7el18IyLq3/bqTqtML3flXgNsuXXyA0P6qAjV9B9Tgqjn+K10QV7QcbXnJb4Jwm1L9rqw4u6oukB9fmzZWV/t1710LTP3TSp7hhKEjuxMY9R4XJypJ0GoHcD9lj2+OJG4UXmWlfrcb5ertJ5hlKsZ4FNNELld8Ly1AB9bENhXsL7c4kq9b1EICmgcqmOqYjG5dzR+dvUeBOO+I6PTf9l3QZwdqqMnfDaB1BO3BY/HDJhPScZjPykiUgMTUanOatr/6jV+NXp/2WLr8Hh8/ZDJYqDam+W+CUJz5d8oTITHtuxhBK1DUwPrHy8lbC5eO/0ADPXmxCS3WK+DwkY+pM7Lqh04THx6aii21vOQPveSmXqYRT124/zqShC5iunatqB9eonIn9TZEznd/ZpXqRvtpgNU11+mmiBUnbpw3+XCwKt8uhfB1ALJ68Ert3ydriCQfm3JXuNLH76crhgb3Wl1uRW+JuS39vJjd/fyd9sUUPGe9nC0Qz0wdVy5Fxc7F67ocr9cWGpxUk3/8bODUJ2/MxRRc9JnHDKN1nr8anedwINkpw/zek1zYvegeX7ANNxQoQzfBKE9SzC88q+PKaJ/gEc1MP806UfB/C3Pt2Q9QjHMAIKAx9sOXm/wej0EUtDlNBBE1tUsxcSfcD+glD3EbBlX9s9TJr3fUebcqZoBXg+C8JgY8g9JBT8xMrTuD05yz11AU+FQSK12nROkUfj4gnKLvIE8QmubrCqdaTx25ipMuvZxnTzZzaUAnZmPD7Q97E2JsNqaXYAqapSBb/N4y4XuS1z5R3xW5x/xcfr6l9x9VNN/fBJE3cavPhSK/oGqwuIz98a99WPXdQJffS67BPTbobIdxB3S2jBqZomYIhkwnmRCKvdkJQJg+4MgeqfrNakM1CM1wNsc23StJ8lMIIgslt7PPebiqzD9odRTgg791ZMgnK+1qSZMesbR+wlWlH0HoJoBXieCQMknR4GnGBjOP73q9YytLxOLxjnDaNduOrY9Vvg5vZqNS5W27MNMrXNfuhi598HdX5qBPxLot9k6vZ+sVRfyvjcnuPl8L6+vavqPX4Ko1/j1eJwohyHhBWL8jkHqGdXcs7CED2oiU5R9dEvXXyoiiGIPGu8XuNyV+owp4sz+CueeAo1bzHcw8Mb8j9r3KGYKQWQ7pebVrHKDWz2qbgFpZhyZT1tHguABAv2GAaUPk5u2qutW9Oi8HzfqehGEwju39Sf1jGiknK5cv5c1QxSNmeJX30o+hemzPdnklfRBRz3PkMWrEoOxe0rVnbcIfBOMs3xGUCgsQi4ce3Xn80qZvupNEJVi53f8HtAbnh9Mq/fZJz1pbKrqNAFf2Ta687fLmQ3dBVZEEJrXpHytaDpD4YPA+JGn2WhyK9Xq4tqMZV2gPHw0h91aH+SZRBBKXB/hR9JguhC2fQGCdJnj8aT6EQTjHjvdcozVkl4I8E/LxO7xMplpO3c1A7yeBKEa7zd+kZc5rtwoL9qx+3isqVzZzt/VM5WUzlwFUMhwAk8T0Q3jROeYet+plULO/EEq5tPEy22l28kPkYUzEoOx/ykXQbia/pOd7Cs85K7H+FXzLgfogvzTs9tNe6XBe4As/rQJdjXbQaiCig7NfNpEHTFfVqmzPwDzHZ3yeQJ+z8z9mfH0TcWP1E9+11rnNjfTCCKvnGycFQv0KXA2jLcKcaxW6GmAH2FYN2QsurwwGP0G+/IzWUyaOEo8gpQ7axj75Gs7hBMcV/1HwfwQLKwLInPd0NCGZ03rrnSAZvuk67XBeoR3z+4mFpzc0jL3xSUEuPty1kRDoAHb4iv3ns+pEm8iayHSnPn5fqzJFPtCuuxkZ2fCBCx/zRFBmXPnOcoohMDpz4wH+t3veZjWlSXX57EfbOsTgH0wQPs44gqNAvwUiH7tNyJpNf2nGoKo5/jNzm1paynA6uU7FS9LnW0VdvKvvHbT/QnAuhuW/ZO95uFBv33OqcOKdhCqgGKfcHMzk2knknSNgYDJGxeN0dLma0VxmPTy73M3Hwoi0XQhUDFBqAa7vJIquiE5XYJLveYICEGYY1XrlK6H531dUKx1W6S82YdAVQRR7P1A17cFnz+xXFyZ2QfzzJZYCGJ69Of2gfdziW16Wiy1NhsCVRGExifc+GZoswHZzPIIQUyPdjt7wipO0s15l+4MEU5KDEWvm57WSK2zEYFqCaJwQ1TFR1KHTOri1NdTw9EvlfM2mI1gz1SZhSDqrzn3BTbZPdRfB1JjhRfl3MC5PAeeQIYXJZOxRwTg5kBACKL+euzuXhGyyb4VwI7ZG8fMK1Lx2C31b4nUOJsRqHoHocBzR1uUXURzdSkhiPrq07178Bv9tL6tldqaGYGaEIQCyLXi8R3zo5lBnumyCUHUV4Pt3eFDLMqePajLULIjry/8UpsDgZoRhDqw7uiJfJWA81X5fiJVikYaGwEhiPrpx/Us7xgzn5CKx1T0XvkEgbojUEuCcAcxG7WZjxqJx+6qu1RSYU0REIKoKZwlC+vojpxKhItzUQXMwm7Xr3VS02xDoKYEMdvAE3kFAUFAEGhmBIQgmlm7IpsgIAgIAlUgIARRBXiSVRAQBASBZkZACKKZtSuyCQKCgCBQBQJCEFWAJ1kFAUFAEGhmBIQgmlm7IpsgIAgIAlUgIARRBXiSVRAQBASBZkZACKKZtSuyCQKCgCBQBQJCEFWAJ1kFAUFAEGhmBIQgmlm7IpsgIAgIAlUgIARRBXiSVRAQBASBZkZACKKZtSuyCQKCgCBQBQJCEFWAJ1kFAUFAEGhmBIQgmlm7IpsgIAgIAlUgIARRBXiSVRAQBASBZkZACKLBtRsOhwNPPm+9gzL2ChAOBuhtAOY5mv0KA89aTAmbeMP4HB7cdHfspakQ64CFK/YOZvgwIl7OwH4A5ufeLch+aQDPEPBnJr6Lybp173n2w7FYLDMVbalFme094QUWSL1XskuhPAaOTA1Hf1FN+b294ddvG6ebQfjIRDmMe9pa+KiBgdg/vMruDEWuBuNE0zxF73RkM9bmDYmithDWJoeiJznbHgot2z3NwSEAqk9O9bfFBh8yMhzbNNUVSfnbERCCaNDe0N5+3I5W69gpYPo0gD19NDPNhDsZgS+MDK37AwD2kVeXlLoWRkJs8/cB+pDPsp4G8CVkeF0yGRv1mXfKkzcnQWCUmVek4rFbqgFQCKIa9JonrxBEg+lS7RieepYir61qLwLwpiqal2HGTZkWXn3vQOy5Ssrp7Az/CwWtS5n5Y46dQiVFPWBT4LiRoXW/ryTzVOVpUoJQcD1gB9NLRwY2PFUpdkIQlSLXXPmEIBpIn8o0sTVNawg4ocSErEw2zwG0Ldd0ViTyhhJiPEMWr0oMxu7xI+oBveH5wTTWAdTrJ1+JtJthWcckB9cnalRe1cU0MUGAQJe1Bp8/c2BgQJn+fH9CEL4ha8oMQhANolb1WH3LNrqeGIdpmvQiAz+xbLpszz3tx912fUUsYxlrkc38eQIO0JDLK8x0Uirev8FE3AULTm5pnfvSxQCf7EqfNV+B8SPK8O/GxnbZvGnTFeMqjdr5PPdcZrdxDn4IhFOJsRhA0JmfgYcRtA5NDax/3KQdU52mmQkCQFVvwpsQhF/9+D1j8Vu+pK89AkIQtcfUd4klJuRRMH+1rQUXlzrcdFRInQtX7A/bvgbAe1wN2QziI5JDsY3lGtjeHT7EIroZwNxCWjW5MwXCpmaijt4Vb6F0Zj1A7c76iHFla8vm0ypd2ZZru5/fm5wg1OHT/ZY9vjiRuHGzH1xUWiEIv4g1Z3ohiOnXK3WGwp8F07cmr/zpUbKxIpHov9dvE7NeNGnrUoBXufI+SHb6sERiw1+9yuzt7Q1uS89bC/CxjjRPIMOLksnYI37aEgqF35Rm/MJFEpvJpkWJRP8DfsqairTNThBZzAg/TA5Fz/brrCAEMRU9buaVKQQxzTrrXLjiA7DtXwHYzblah0VHpwb7/1Rp87x2JeVs0+3tK+dRS2aQgHc46l6dHI5eUklbtLsRwrmpoeh3KimvlnmakCCUp9hWp9tupaYmIYha9rSZW5YQxDTqzmO1XhM3RSVWV9cxu9lWy50EvN8h5stk85JEIpbUia6ZNP9pMy8ZiceGK4FKna20bsVtAHVNECDjulQ8qg7iq3XBraRJE3makCD+QsAaBv7bZR5M2WPjh2/ceOMLpoAJQZgi1dzphCCmUb9dXX3vYYt/7dw91NpG39Hdt4yI1wFonRCV6OLkUP+ZuglaM2m+SDYtrsTUVaivoydyEQH/kfe+YmIMtrbMPW1g4Gq12p22rykJwk4vYqvlPLeDAQNfTw1Hv2RKykIQ09YtG6piIYhpVEdXT+TLDFxgurqvpKnaW73AYxmLF20cjD3mLrOrK7w/LLqLgTfWwsRUSZvrlacZCSJI6dB4oLUNafsOl5nwZYutw+Px9ermc9lPCKIsRLMigRDENKlZZ3oBODE2B4fVOlRGZ3ffKSD+kUPUDBEdnxjqX+8Wv6fn2F0yGL/ddbBc9nB7mmCsqtpmJYihoQ3PdnSH+4hIebNt3zmCB8bm4GiT/iUEUVXXaprMQhDTpMqOhX3vIjtrXtq90AQG1qSGoyq0Rk0/v3VpdjbKMvGQzThznz1wTyPHV/IDXDMThHJSaJn74uUEfHwSJsxfSMZj3y5nahKC8NOTmjetEMQ06baju28pEaugcIVgd2Cm41Px/p/VuknaXUGJ4HHqDoPGRFFo1tMMXAeL1jZ6ML5yODYzQSjZPfS4hWEtTQ2vHymFjxBEud4zO34XgpgmPXeEImcR4weO6qvyFiojBnV0R64hwvGOdH9R9mpljtDlzR9uX10mjMcoCPcBfKNl27dt3brrw4Wb1dMEq69qdQThqwA/iesTzbVIp12hyPHMUHqcWIjAwNQkBOFHuc2bVghimnTb1R35BhPOc1Q/peGM855EynOp8D3LFi0qddeia2H4I2zTtQD2MIQpzcAfLcYv7QBd1+g7jNlAEMrU1Pa6l65l5j6HDjMg/kJyKPY9L1OTEIRhj2/yZEIQ06TgogEIlFzRV9vMjlDkc8RQtufCZ0RIKuw4tYx/Le+mOhF6w7A92RhSLZT+vtdOxbCcKUk2GwhCAdfZGX47AqTOu/ZxAFky9IoQxJR0uRlXqBDENKlsphBEAZ4DDzxm10BL68dA9qcAeqfP8N9qZ3HpnCB/0TCmVF20MlsIQoHZ0R05lQgXu/R2R1uQP6bTiRBEXbpgw1ciBDFNKpppBOGEKRfrCT3EVh8THwxgLxMYVfA4yvDH/MZ0Mim7kjTNfkjtxKSzMzwXFtaB6CinqYnBn0sNx5xnYdmfhSAq6VHNl0cIYpp02tUd+TETPumo/jE7mF5YzSMvpURxn3kQ8HfYfEgiEbuvWgjy71h0EGE5GIeXIYyGuVMxmwhC6bi7e/m7bQrc6TpTUu90LEkOrv/fSYTifv5U8+So334j4b79Ijb96YUgpkkHlZ4JVNhc315MFdaTzaYC/lmt6RO8nktl4Krx0Z1PmW6Pp9lGEFlTU0/4bAKpQIkOryYUmZpkB1HNCGievEIQ06RLzYPzGWY6IhXvv73WTeruPvINNs1Rdy4OcpT9G4u3HhGP3/JKresrlJc1awRxDpj+y/V4UMmAgVPVHne5s5EgcuZB+jmAQyeZmhirU/HoxG17IYh69cLGrkcIYpr0o7vdTIxvJuLRL9a6SR51XZmIR1UAvan+tO9dTJWsfoSZjQSh8NGFmAfwjMWZxfH4DX/MphETk5+u1LRphSCmSbX6IHpTE4upo7vvOCK+zimq7tZ27na3fT5gzQd4BwAP1WKX4RF36ta24A7h6YzoOlsJwtPUxHwzbKxMJmOjQhDTNDE0WLVCENOoEF00Vz8RN02anjXzBHADQEsd6bXRXN1B/Rh4MohM7/DwDY+a1FUqTdFFPYObxdXWWS7/bCYIRdptWynGwCFOUxMRTkoMRa8TgijXe2bH70IQ06hn7XsQRP3bXt1pVa0OcHUvunm9OdHREzmYgF86IoBmmHlZKh67pVqYOkKR84jxjYlyhCCKIPXr5aM5x/J12bIzFD4QTOpsauI1QwDZ52URpPPBOHGikeLFVO0QmJH5hSCmV23UGepbA+bVjmaMMfMJqXisv9qmqcttVmvLrQR0OMryPCA+cGF434CdvXG77/aJfLvZodL25F7O2+16AOFCGdwAr8rN5h1EXg/U2R0+F5R9D307FxD1M7MNYKUQRKW9vjnyCUFMsx49fdOJj0gOxTZW2jzPN6kZV7a2bD5tYGAg7S7b4wnUqglLKyPTqcl4/+WVyleLfEIQQP586CaAep2mJgCKIFqEIGrR02ZuGUIQDaA7D9/0zQwclxqO3u23iTlXRutSgFe58pa9pNbdvSJkk30rgB0debcw80mpeEyZI3y9I+1h6/Z80c6vrNWkF4LIodfRs6KdYCv36l088RQTUzVdbcbmFYJoANV5rfYBjBLwFc7wGuVZYtLU9lB4P4uxFqAPudKPEmFVYih6Q6ly1C5iLD1vDYNPc6VLE+jHARr/mmngPa+2+H0f2UTuStIIQWzfH+hMTZMwFYKopIvN+DxCEA2iwtxWn64AENE06W9gXEhM0T33tB93v+hWiI0E0GcAqNhIzluyqrhRZv54Kh6LmuwAurqO2Y0DLbeCcYCmLWkQ30Oga8nmwa1bd3m2cKDe23vSnLGxl+cxBQ4G4VSAPuBuCwF3bZvDYZNnL6daNUIQ2xH2OK/ankAIYqq7Y0OWLwTRQGpRLqkUsL7OYPXsqHuSL7Q0A+A5gLZl/0C8Exi7lhDjGbJ4VWIwdo8fUbu6lr3ZtoI3E/B+P/lKpVXB+iw7fVQiseGvtSqzmnKEICaj52FezCUSgqimq83YvEIQjac66loY/jDbuCQfVrvSFmaYcVMmYJ117+D6JyspRL0FYbWOrwFnX6LzIiyTonNtaeHV9w7EnjPJUI80QhBFKFNHT+SrBJxf/AvWJoeiJ1WjF79uvNXUJXlrg4AQRG1wrHkpuQtupNwMvwpgTx8VpJlxcwD8xXg89mcf+bySUnto5XsJmW8RY7ErplK54jMM3GtZODcxGB0yMW+VK7CWvwtBFKOpzIu21XJn0c5RdhC17HozpiwhiMZXFR3Y0/evAbKPAuNogN4MYL5jRf8KwE8ANAzwTW3BHQamKnyFOidp2UoLKRfoTd2tUO9AzHNAmDV/EfAgM92QDtBtle5e6qEWIQg9yrrLlWJiqkePbLw6hCAaTyfSIkFAEBAEGgIBIYiGUIM0QhAQBASBxkNACKLxdCItEgQEAUGgIRAQgmgINUgjBAFBQBBoPASEIBpPJ9IiQUAQEAQaAgEhiIZQgzRCEBAEBIHGQ0AIovF0MqtapHnTYErlZ8K5qaHod6a0EilcEGgSBIQgmkSRM1UMIYiZqjlp92xAQAhiNmi5gWUUgmhg5UjTZj0CQhCzvgsIAIKAICAI6BEQgpCeIQgIAoKAIKBFQAhCOoYgIAgIAoKAEIT0AUFAEBAEBAFzBGQHYY6VpBQEBAFBYFYhIAQxq9QtwgoCgoAgYI6AEIQ5VpJSEBAEBIFZhYAQxKxStwgrCAgCgoA5AkIQ5lhJSkFAEBAEZhUCQhCzSt0irCAgCAgC5ggIQZhjJSkFAUFAEJhVCAhBzCp1i7CCgCAgCJgjIARhjpWkFAQEAUFgViEgBDGr1C3CCgKCgCBgjoAQhDlWklIQEAQEgVmFwP8HEnP+awTfrQQAAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-57"><g><path d="M 101 1081 L 154.63 1081" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 159.88 1081 L 152.88 1084.5 L 154.63 1081 L 152.88 1077.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-58"><g><rect x="1" y="1031" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 1081px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Put all x86 CPU cores into ACPI C3</div></div></div></foreignObject><image x="2" y="1060" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXlgnFW59+95Z7IUlMIFgWIRQS8I+rl8hWaZpDeylEWg0DpJSqnUjUUqXLigH4siCi4Xr0tvEUX5EBCaZCjQiuxibDKZSfmqgIi4olKWFrmlrZAmmXmfr2cyk75557zbLMlM+rx/tZmzPO/vnPf8znnOsxDkEQQEAUFAEBAENAiQoCIICAKCgCAgCOgQEIKQeSEICAKCgCCgRUAIQiaGICAICAKCgBCEzAFBQBAQBAQB/wjICcI/VlJSEBAEBIHdCgEhiN1quOVlBQFBQBDwj4AQhH+spKQgIAgIArsVAkIQu9Vwy8sKAoKAIOAfgSkhiLa26FuGR2kNCMf6FzWv5CYGtoKx3jDwcGp49MH16+99rYj2pKpPBBpaonMM0KMA9slW2WKCTxjsj22wN9HU2v5jMM4Z/zvhtkRfzzKfXVVksTlzzq2pmfH6SQQsBdAM4EAAIQBpAK8AGGDgjtGhvR/asOHm0WJfYu7cM/cN19YsYXAHiI4AY99sm0Ngfg7AT1Oh0I+eWNf1QrF9FVs/Go2G/v4yPhhiWsiEEwHMBnCAZfxfA+OvBPolMWI7dszcUAhGja3tnyPGN4qQdztArxL4z0z8aJqNNev7u/8IgP202djSfhoBay1lHb8BP+3lyrS2LpyV4nAfgHdVwjdTzQRhxz1FRKtHiS6vhA9FNymamqIzYOCTIIwk+mM3B5k4lVR2NyYIaoxE24novwG8zceYvArg4tmzuCcWiynyCPQ0NCzZKxQevY4JFwAIe1ROEeOmdKrm6sHBO7cF6qgEhRWJGbU1lxNwHoC9AzT5Kpi/DBO3JBKxIb/1SkAQuq6eIYMvHlgX+4UXUQhB+B2pAsqV6ATh1PMWIl460Bf7WQGilaWK2lW9+IpxKrP5dYDew4TPJ/t6/rMsnU1Co7sjQahTQ+0er38VjEuypwW/SKdB+CFSfGmQBbBpXueHYPLdAB/mtyNVjoGnDDO1YGDgnr8FqVdo2cymJ4zlYPoCgLcW2g7Az5FpfGxgoPsJP22UiSBU12q8vj3y5t5Xup1shCD8jFKBZcpMEEqqzSA+LdEXW1+giCWt1tTafhMY5+caFYKoOhUTNbVGLwPT1/LIgaBUJuuUuoLBexEyatP98ycQ3TwyNHO5H3VKU2t0Lph+qmlnCIQnwfQbENcBiIBxqF0mAh4drufohsdiW0s6kW2NHdMWPbAmRbczcIJLP+pU8ApADLDSWCh13AyH8krt88lEf/fdnjv44lVMbtCkQXxFoi/2TSc5hCDKOLO0BMF4vK6GF/T2xv7pp+u2tjP2Hhmpm20afBEBH7cfwRlImiOjp1bCvYRdDy8EUV0EEYl0tppk3g9gL8vc3ALm5bMPQrdVfZTRwb+EYw0yvm/b/Y8w0+JkvPset/nd0LZwtpEKPwjgfZZyjuqjhtboUQbjNoCOtrbLwHXJ/p4vei20fr41XZnm5oWHsBF+AMBRtt/TDDxBoG+N1JuPaEiKIpHOWUzmJxj4D406aogISwf6ela7yaY5QQS6A2hrW1Y/MrLtADbCp+7E6BqNynCbwcap8XiXug/Ie4QgCp05PuqVgiCs3TTN62yGad5r23GlmbE8Ge/5vg+RylpECKJ6L6nb2trCw6kDbgP4LMsk8TyhNjVF340QrZm4gPLASD1OcdnZU1Nr+7fA+HdLX0PM/PFkPNbjtNjPOT46s24HxWw7+efTBh+3fl3s+VJPbof+ANBfyDA/7UeHr2Rqaor+C0L0AwAftcn4ksHp+fH46t86yV4sQUxYP8bk6AZw/IT+mNfAxGKdalAIotSzytJeqQlCNd3c2r6IGXdMOL4GPJWU65WFIKqXIFpaFh2WQqiXgINz88PvCbC5tf1sZvzYogJ6w2Q+aTAe69fNtUhk0XtNCj0C4KDs756qjlw7mnshpdE5PxHvVgtwyR5FmCOpA1YwWF2cWx7uTYWx+InemLLi8v1k7nZmbF0J8LnWSkTUPfzmzKVOKrlSEkRm/dCfiBxPJUIQvoc4eMFyEETmsiyE1QCdbJHoZTbouOS67t8Fl7J0NYQgqpcgGlvajydAGTzUZmeE7znV1BR9O8LUC8a7x2eTy6Ld3NL+pZ2KeqXuGHsCbHDGTjr73wUgOl6d8ZNkvOdjpVQzNUSiJxiUORlZ7xE2mOHUGYO992ws5KvJWkDdT0Cjpf6QybxgMB5T5tR5T6kJQnXQ1NJ+IYCVE3lPT7JCEIWMtM865SCIzABHOs4D8QSVEgOnJ/t71IXflD1CEFVNEHZ79z+HKdXa13fPy14TSjfPnU4fLS1n7ZPG6IMANWTbHQF4QaI/9pBXP7nfs/NfLXAvAZQC+MG68B6X9fb+eIffNtzKOWzCtjCMk5P9XYPF9NEY6VhIxKssRKxUVnfVhTed09vbm7K3XQ6CaJzXcSSZ/HMAs8b7c/DbEYIoZrQ96paLIDSDBmY6OxnvvtMqUt7kCrBTU+141Q9spVUm5zF1fK+v3zqHCVEG/xsI77Q4WalXGQJ4I7ERB2PV8PDMX/ixsqkEM9fGSPR0Iuqy7WSfMcOpk/3sZLN3BGoxeIdlbjxLZuoUq4moZk49b4ZT8/z0EYQg8jHlp0fC4eM29K76Rxk/xUBNNzdHm9ggRVi7LuuJVib6ui8q9pSi7jVqd+ABgJTjYe5xvEcpB0FondSAWF1481l2khKCCDR1ghWeVILQ+Bx4LfBeb+NVf6oJYpdDHikrFj8OXblX9uXYVQkE4agLJ3wn0ddzqduCpYizbo+tdzBzh2Ws1WVwZzIes3rHork5+kEY9CgD++V29jutdD6S7O95zGueaO8vmBbpLJnsp19i/Ggg3vNprz4m8/fGlvbvEqDIIPe43qkEla25peMzTHwJmH7G4Pvrw5x0smqcNIJw2DwKQQQd3QDlJ5UgNComrwXe61W86k8lQYyZSYbutqgqvF7H/nuaQDcPD8282Ok0UQkEoYR2MAl1NU9U9TSXx8rBTGsWqtvZel2gZgGlxpb2LxNwtZ8dcXOk/YdM+NR42TJcMAedCNbyGhWY4uDBEGpO7u+/a0sxbRdSd9IIQlRMhQxPcXXKRhD5zjPaHY7XAu/1dl71lY318Oiby0H0r2NtmR8Gcv/O/GE9QE/uWgzM9Yl47Bavfr1+dzY/HI8RpPpVpwT1cauThTrO74qTs6sDV5v9SiGIDEloL025d6QeZ+jMSTWWQgoLx/IOhJIm4Lrhob2vdyDRXEiOW60qMCcSev/8pXu+5c3hey1mqiO2Uwo1tEaPNECfhYmTQBmLKhX7KROLiQk/MUdSt5XT50dzklKub1N2yikHQegswYjx1YF4z1X2b09OEF6rURG/l4MgtDpMwp+Q4rZEIvaiVVyvBd7r1YLWn6xLaofwA/elDb7UyR4+EokekSa6iYAPT3xvfhBpLNLZgFcSQaibTI3vQJrBn0v2x75lfaes6m0ViBZY/u7p0+BkiqmcMUNsfK6mpv4JdRGsys2YsfWoNPhLAE6zeji7eTfPaVu8X20q9XOA3p+Va9y8UjnCEdMtNgsf3RQdYmAFpfnaICE9vOZ67nfd/V45zGh9y5O/GQzkKKfrJ8+KDEgz80K72lHVFYLwO1IFlCsHQTRG2s8nypioqZ1V9tFbQQRd4PN2D/bJ6XHJPRkE0dCw+ACjJq304uMeuD5VIcphaQZCdNsEE0nghTDSbf39q/9if/8KIwg0N5+5v2nUPELAByyy5jlbNbZELyWQioGVmyNaItFN6QxJ1L9+MQjqXidIzCHlWXx7fZgvctKnay5HMxfhNFozl4iVH0WA/grzR/D6jDWbD/spx6uJkv5e6hOE/mQJ50tyieZa0vGc0FgpCUKFNtj4EpZhLMKm1Tbb0Y56OhKExl5/M5l03MBA9zN+RrIhEm0xKGOhsme2/Otk0nxd8LRKIwglr07VZCXIseB3pnq/XXGSXDxlnTDL+DaEjK/tDC7XCaDGA9sNMIxPJ9Z1KXWiYxhpjfrmzwSsYODrtjm9mYHHCbTNLe5TOWIxlXpB9jMn3cqUUp6G1sXvJ07HCDh8Qp8uBg9ygih2BF3qF0sQSsf/ZmroHWHgeIb5WRUh1d4dA7eODu19nk5HPB0JQi0ypkEfJ/DRAB1CRP21oU1n62zIdUOjM/Fz8iGpRIJQqibNpXDGMqm+Bo8Pp0gFgFP5CTIPA39A2Dgx2dv1Vz9TXW1EXniJFhHhqxNi9XtXVurNy91CfmvwHAEwvOvkwM+RgQs1ISyoeV70w2ziRvs3QKCbasObLvI7/l6vYT8FE/APmHzCwEBs112aVyMl/L0YglBj+cor6f1HOXw0COcTY74mlpvr/BCCKOFg2psKbOUTXJY8e3ZrE9ORIIJDNLHGNCAI6Dxy1T0BgXoBvtyiWvIVEC6H0Nj9Fqn8He0anDcy0G/Z1bdkk+TYiz6ENC9JJGL/Y/9BGyYjW8jPaSAb08geS8jTmivInMlL/AT4dhgM0o/fsmUM961E8JwfQhB+R6qAcuUkCBUPn9L80UQi9icn0YQg8pGZDgSh3soh8qrthf2H3nayDFPEw8SfHOyLPWtHc+686KFGmm4gwhl+LqpdCMJ1o2PtV+/45+yJHPSz1fhAFH0pHFQG101eMY1NrOsr5LgQROkAz2upTATxOhjXw+Qbvaw4hCDGhmTM0/q1w02ETgfxkqyaYvySv8pUTLl5plM1Weegb29rVUl3se3lJ6LqZe/GLgfRdRMMJzR6bQeCSBNh2UBfz0/8fop5VjgOVnx+2/NYkKcbQQTKSCkEUcgs8lmnBAShYrO8pMJEgPGYgdBPd+zY6yk/YSIyH31AKyT7awWtPxlWTC7Q05y2xfvWplOHsYlDDaL/DeBDDCiTSp0PxHhTVUoQyJo83wdQmw2XQGoXnWXYzpBfD9eF+aN+8pY4mMfmGQ9oCaKAxV13l+HX69vr022MdCwhYitZpZnptGS8W+WumPSnBCqmTQD/A6B+Bn42Ws/rgiRYEoIo45AXe0ldrGhBF/hqIwi1MNXMeP0kYlwGwjEuGbxcoaxWgshsAsZiNamL6V2WRoQnRkKhU/zGN8q2oRL85E5V28jkkwYGYgm/c1Cpm0ImqZhPKvNb5rEH7NOF5AD4/rrwntEggfYUoVFNep3VGkcXi8yv7NZyGiu3soQT9ytbMZfUfvtwKycEUQoUHdoQgihbTmpHqxaP4dzp24UXQJkTxbipcLUShEP46LHFOUCmteZI+/VMuNKC3S8N3nFaPL52e4DPQ6m8lKObynqYeyYEgNMGiSsggGOQ4IAB5M8U1ZHYdPOkDoKJhiBKEpdKexc4hTnsVY7YSX+EIMpCENTU0v6ZnX4MN3icGLLqOWwAsdoJ/7IutOfT6fS2fVIcVukV3zW+03UIlV6hZq45sb3uIHyrmfIsdwpYtJVQja3tlxBjl1e3zbFS+z0U0Fc5CSITPib1RgwglaIz+5Q2FpOK95RCarUyoSXCWjKNx+PxLhVWPc+HZKpPEM3NHcewwSq5095e30uQBVYX0sRvgqog/fgtKwQxtq0MlA87qIpqMu4gGls6Gwim0gfvYxn89M6cAr8mGLfCxLraWvOvAbx51VepzaVRyQShsWJKZ30K9rB8yE8Z5uj8gYF7N7t9KCUjiHyvW7uJKDVG2m8nwtmWxTewismBIC5N9vV82++C4FZOsyiXZNec61MXTtzplDLVBKHLHcGEorHWOLw6foelGFOvNnZLgshLLBSQIPIjb7oTTLkJwiFvsi9zvdwEmQ5mrnrVEveC6Osq38UE8vQRFjxv3ArM0OZ1gtCeMhA8H4RmDEt6kdzc3PE+NjIJdca90dUCXluz+YISOORRU2vHCjAvt25wiOjsgb5ulfdjwjPVBKGJn6UW8luT/T2fLCY3hua9HCMaeC3upfh9tyQIjf7Qt25ZE3nT8wRSboLQ5h0IoGtXEynIkblCTxC6oH25bGfrdV7Wbikts4v2laRMp4tUqeT7EORfQGsugQNnlNO04Ts9qp/FxGEj4ltl59aHQyykZ8zR0PGDg6s2VRpB6FK8AnCU1w+++ox9wTcKfvryW0YIYgwp31nCtGkJpzhYn25xCWreqMvHW00qJl0sJuuFtC6gn3KqdFM1aY77wRdtXRBFTQhpfb6FQI5u+TvwgCdjP4uGzhFROQ2aI6OnFhpu3NEZ0eVydqpPEAorTYrjNDOWJ+M9E9Ie+8FVlXHAtuhTid/+deV2S4IoYremvwCdYoLQLGSBVAsO6Ter5g7C7+KvTVPqomrS+0G4546wfWTUFIl+HkRfs/zd0VRWs+g5BpzM21Hr76CWJ/p7bixmgbDXdczkBzyGNHfoQom49e8SSt2dvMsQ7jsoTjoTZgCe4eN1/TiQZOANSdB38Cq/WxJEJiJnmHrBeHcOIB8xb7RJYDL1AxKEUxISr8Fy+l1r+eASrNDajiIHDtHdtlDZY6/lkB6zwlRMOtWSNn1o1j/kBzaTU23ZHEY6hyyld2eTL/Lw2KfG1ug5xPQ9q1WZWwj2Y+Z1Hhw2zYcBHLlrjOgvMOijiXVdv3Ye/4WHsBF+AMBRuTJep6NC55qq19yc399Ye45BBbXdqfetMc1bLImScuVcx0QVqoQThEMuEiXeFnWXMvsgdMdiMWUk4fqonB8G4zaAjp5QsIBow159Bf19tySIbORPu226wu6+lGFc9MS6rhesQKrLz3BNzTVMuMAe9dEPQWji2DyLNC9wixcVZCAdEr6rPAQ/4NGaKwYH79xmb0+9U6gufCGYLnPKN+BkXldJBKHPKOcca8nhtOQYfsMtFhNg/Huyv2u9/VIye1mu0o2eNzE/CfJyVNjHpbm1fREz7rCZKm8H8VeQwkorKalwHn9/CccaZHwf4MMsbY0w88eS8ZgK4FeWp6k1OhdMP50QPn2sJzXvniDQt0bqzUfs3snjSZUyuaczwQ+tIfoz9UF8RaIv9k23y94KIQintLdZzPk5sPEdA+aDO3bs83Iu0sNYiJsts9igeQxeCqZjNetKQSeRUg/27koQcAmQlgJ4A2A8pT7uncnTjybgvZYBTBPje0xQ9uBj3rFeJ4hIx3kgtusls/4IlCLGutqaGRcE8ZrVLCxnM0Mll7EkTMqUyqSlBBm/Gvt4eS9iNFrSVuaa2g7CJuupiglXJft6VHjrCU+lEIQuJ7WfMN66nNRu4bGdd8xqq4HXwFiXSeVKXKdUyeDMvLCPw3ZmWpaMdyvPbLeHmlqjl4Ezaqm8NkgFCQQ975YPAvAfjLCYBSVrlqq81Q9ybCeDD20d+z2Dz4Ga98pVT4H4ytkH4lteO+9KIQgleCTSebRJ5hpXHIIBvYWIlw70xX4WrFrpS++2BJE5HvpzLLOinslFDDN1KxthZe435lTmQRAOFhqWdot3OFK7kroZW7/L4HNdPkCHGcTPkWl8zAxxi9Whix3MOiuBIBx04b4C3DmkHnVVa7ip4nx8li+RwUsH1sUe91E2E+jvxZfpQk3CIK/qaRC+PfLm3lf6jUvm1aDX7y5qIq+q9t9fMpmXDcZjKiuiY3KlXKVKIggl09yWjsNDMO/MUxMFRUGp6UzjY7pEXYGbKkGF3ZkgFHxBQlO8ysyfTcZjPa2tCw+c4HXsbS2i1Ufnxo9d0nsGGeMC0mK+CuYvw8QtSnWRd9ntEDCuEghCe+EcQGerzTCnzBTDqZMHe+/ZqMM9QyxhLAfTF3ymAQ0UIdTeZ4B81Krqi8z8H2p++llgg8wrr7IZQtuEfzNNup6Qif1lP/m4NfE6Af+VHq1ZoVOFOlWsNIJQcmZT9y4G8GUAb/fCzfa772jUAdstqvjuThAZ8MaD24EvADLRTseinGaOx/x7IrqpNsT35byQ8xySvAki09zYZRRdDUDpHK2RVEvqkZq5M6mtWcLgDhAdAca+2Vmi1Fp/I6Y+5Tg2PDzzF9adpsZqR7sjn2qC0KmWVHRfg9Pz4/HVv/X5RegsjNSg3zwyNHO52w4847GcMk4GOKrubG1qExUl9M8E6k6NjN5ZqOmn5R2ooTV6pMHGuQCrzGfvtOjtlX/AOgbuGB3a+6HJOjW44ZuZQ+HUqSD6CEAfAPhtE8h0TCX3VwL9khixHTtmbihE7kokiBwumeyDm4zDic1TiekEBo7Iqp/CFuzU2G0sFgefc73gYlNCEAVLKxUFAUFAEBAEJg0BIYhJg1o6EgQEAUGguhAQgqiu8RJpBQFBQBCYNASEICYNaulIEBAEBIHqQkAIorrGS6QVBAQBQWDSEBCCmDSopSNBQBAQBKoLASGI6hovkVYQEAQEgUlDQAhi0qCWjgQBQUAQqC4EhCCqa7xEWkFAEBAEJg0BIYhJg1o6EgQEAUGguhAQgqiu8RJpBQFBQBCYNASEICYNaulIEBAEBIHqQkAIorrGS6QVBAQBQWDSEBCCmDSopSNBQBAQBKoLASGI6hovkVYQEAQEgUlDQAhi0qCWjgQBQUAQqC4EhCCqa7xEWkFAEBAEJg0BIYhJg1o6EgQEAUGguhAQgqiu8RJpBQFBQBCYNASEICYNaulIEBAEBIHqQkAIorrGS6QVBAQBQWDSEBCCmDSopSM/CDS2tn+OGN8YL0u4LdHXs8xPXSlTGgTa2trCw6kDbgP4LGuLBFw70N/zpdL04t7KMfM6Dw6n+RQiXsTAEQAOAhDO1hoCeCMzDRLR2pF685ENj8W2BpWrsaX9NALWBq1nKT8E4BWAXwbjMZOw9h2z8GQsFkv7bTNvvgN/DlOqta/vnpf9tlHOckIQ5URX2g6MQDUQRENr9KiQSZekU6GrBwdXbQr8khVeobm5431s8M8B7G8T9RlzNHR8ud55zpxza2rqtywkwpcAek8AmFIM3JE2jGueWNf1gt96JSAIXVcvArh89izu8UMUQhB+R0vKCQIAKpkgWlsXzkpzzRcY/GkAf6uknV4pJ09TS/uFAFZq2kwT0dkDfd1dpewPADXPi36YTdwYkBjsYmxnoqsOPtD8nq/FufgThBsMPSP1fK7XyUYIosQzSZqb3ghUKkG0tHR8IA1+GMAB2RGoKFVAqWbFnOOjM2t34AG1ZmfbHAVgAAiN/Z8fRBqLEomYUq8U/ahTQ92M1z/PwDUWFZK9XaWyeQWg4cwPxDPB2Neh8zQIP0SKL/WSsUwnCItYdPPI0MzlGzbcrDDUPkIQRU8haWB3QqBSCaKhJTrHAD0KYJ/pTBANkWiLQfQQgD0zazFRN7N5JEDvz773NjL5pIGBWKLYeanIoXbG1pUAn6tp60UwbuQaY1Wyt+tvipmsZdraom/ZMWrMJ+JrAbwvv76PxVlzgmDg9GR/z0/9vFtG/tot+1PYaGU2r9GcftJEWDbQ1/MTIQg/iEoZQaBKEdhNCIKaWjtWgHl5bpiIsNRkHEvAx8f/VprLampqjV4Gpq/tOp1kehhSl+Gc5hVeJwBVOBqNhl58mS5k4OsAZlimV5oZy5Pxnu87Ls5FEoS13aam6AwyaAUTPmX9OwNPGebo/IGBezfr5JATRJUuCCK2IGBFYHcgiIaGxQcYNenHLDvyLSb4BGK8nYjusSzkRV9WN0aipxORusuwLuqbGViS7O9RMgR6mlvbFzHjDlt7f0eaj0skYn/SLs4lJAjVvlLP1e2gGAMnTCQqXpiMx7TWUkIQgYZZCgsClYnA7kAQza0dncys1CG5+4bBEGpOTqdH90CYesF4d3Z0irqsbm4+c3/TqHmEgA9YRnsbEZ810Bf7WYEzgBpb2r9MwNUT6hO+k+jrudSuolJldHcQQVRMOjmbWqInAbQGQG3udwZWJPt7LpYTRIEjK9UEgUpHYLoTxJjvw/53Ka2NZmFTi+8tVjVTMZfVWisp5isS8Zjyf5lw1xBkXmR8J0xTGRIcaan3fNrg49aviz1vb6scBDGnbfF+tanUzy13NuqNHq+r4QW9vbF/5slg9/sRPwh/Q650iy+8HDqawIsBng/gnZbj4yYCngbh9toQ36cD3quX8fbJXAbOHAkPGbeiILwG5t8TqDs9GuoOYvdtPzLmdiQNDUv2oprRK7MfmbIvHwLzc0z4iTmSum39+ntfc5LZVVagaCzGj8fDdBoYH2NAXUjmrHUyFiQEPMvM3enR1H1usnrh7vW730tqdUk5PEprQDg22+YEq6K5c8/c16gNn0Mg5ex1OIC3ZsttAvhXDLppdGjvh1wtTIKZQWbUMYP9sQ1u76jkCtWEzwDhEyA6wmKNk1Kms8TUl4Z5S2rHPoNusnnhGPR3je/DCAMfyal7NDvjgi6rNVZSStSiVVa5921uaf9S1iIq9yfH0045CEIzLxXnZU5i/f13bRGCCDoz88tnbaLpu3rrhLwKrwO4evYs/r4f2+fMYvsSLSLCVwG8y4e4KSJaPUp0uR8nHB1BkGFshMl3A3yYvj++vy68Z7S398c7bL8HtQ/fDuKvIIWVfi74VF+ZCZ2mr4Bxnk1/6wRNihg3pVM1Vw8O3rnNB36BihRLEG++ud8/autfvxiEL1pIwUmGVwH6RKK/W6k18nauAc0gXQlC7W5DpnktAUtdzDmtcgZyuAoEsqawRhc+YdHW3E8owBxVJ07yNLa0H0+AwntcBUOMrw7Ee64q9h1UfUV0psGrCXiKCGvJNB6Px7uUV7Kv8S1WxaQnCGfvaLmDCDDqyhIAYfoWGMoRKasH9dsA3VEXNj/jdprImMalaAUBHwvePl4ig5cOrIs97iaRfcAJuJiRWXyPcqin3eFkrCJCxnUMVrrLgFjwoBlOf3Sw956NbrI2NUX/BSHqBnC8X5Rz5Rj4Aww6I7mu+3dB6wbBDw6hNnQnCKTTpyIUUmaP7QFkSjH488n+2Lfti0ipCKJ5XvRYNkldoKpwEUGeNAO314f5okJOyX470u3qdYt/Y0v7dwm4yNJu4J1/c6T9eiZcaWnjDZP5pMF4rN+vvKUqN2mO12EnAAAcJElEQVQnCFExFT9kHjbR2wlIMuh5ENcBiIBxqH3hJNBNteFNF/X29qrj+oTHwcIgV8ZX+wC2M9OyZLxbWXRoH80O+DWLGkE5F8UB+kv2NBEB8Gd7+AJXLAivEUOpSJRONcTgowl4r2ZX+iyZqVMGBu5RNuR5j0sfGdUXyPgVgLQb3gD3jtTjDC9v0SCzo4gTxIsAPw3QyZb+hkB4Eky/8cBKu/tvmtf5IZh8/lh75n4AnWrZ+W5jxloiejPb3xCH6TvJ3q6/Wt/XwbpGFVEOXWoM42AaJvChDDTqTj1MeGC0js8qJc5WGTW7eu2ibfeRUO8QxLP6/fOX7vmWN4fvtVr5qI0Gj4bmBVHjBplPbmXLQRBNTdG32y70lQixuvDms3TrkpwgfI5mY0v0UgL9p23Rf5WZLhndMbPHpo+lSKSjwSTzVptzypDJvGAwHlMOTeOPuoAbSR2wgsEX2MR5iZk+e/BB5hq7eiqjEkib3ybCGTaZNsMwTkqs6/q17tU0A54txoMcDnVaFxC1Cx5K4x2DfbFnLW052IfzcybjoncchMftso7p22uUBYc6qVhPGw/Xhfmj2suxls42gvmARa2kdqtfqw/zN3Tl586LHho26Qd2E74gC4SfqVAEQVibV4R/fW2Yb7S/ixrXGtO8xfYeyiNsZaKvW+2OtZekhVxSjxGMqZzOrDGNlPNUVwipy+0B2ZT6c+PLpE4/Sr36NusLuW1+/ODqUibP92Hnnd/ASD1OsROS/v7Av2e1ClWS4nDfRNWuo3q1yNfyrl4Ogsi3BAOY8PlkX49a2/IeIQjvcYJafEImqeBg6lSQe55Fmhc42TCrQs3NCw9hI6wWOYv6Jn/CNkSiJxiUMT2z2Fz7UsNQU0v7Z3Z6ld4woS7zGphYrNPz6wgio44JGyfad5c6aDQLkSr2GNLckUjE/sftQ29sjZ5DTN+zLvpOnpx56gKPBVL1m10g7gOobZccpf3AS0AQisDPTKzrGnDCSnuaJPwJKW5LJGJK95/3BCWIjLrUwCoQLbA0pk6Qlyf6e9QYOVrrNLQtnG2kQncD1GCtq9v8+Pi8XIvodrxuC5rmEtj3ZbVubhdyj1HsO+fql5ogHM13XTzPhSB8jKbG7G0Lwzg52d816FW9MdKxkIhXWY/+1lAADqGLXR1obH3q7Ku1JxVVT0cQAcIka3ZzcFUVecuavxt0OOr7CjFgt2Zh4IUw0m39/av/4jVWfn4vkiA8vWdzMmiscl4nk+YPDHQ/UQqCaG6ONrGRCVmxV649Bq5L9veoy3NPU86mpui7Ecpsmt4xLo/LxsQPtroymh3vZjLpuIGB7md84uB5+sq1oyUIl911oe/kt14pCWLMxDZ9+8TNkxpp582kw3pRUTG+pjzct+7Wnxg/qq3ZfIFOZ2cffO0FG+HSZF+PunRE47yOI8nMhC6eZam7PNHfc6PfieRgwXFrsr/nk3kXm/l2zb4v4TQnKc9YLvZ30LQxwVxRlddhzkxnJ+Pdd3phkt3dPgqQugt6kkHP1pD53b6+2Ktedf38XhRBeJwCrP23tCw6LIVQLwEHWxZwR5IMeoIoxYWubuPkx5TWD85j82BZ/XDqjVj2biVbzV1llDkZhbDadtfj67JauyBbvlW/cpeqXJEEQXPaFu9bk0p/kAhngzOGEVavcCWm50ZXThAeo6lZwEcAXpDoj6ndl6+nsaX9K8q/QNnqA/i1StyR7OuJZwgi0rGEKOMdmnkK3fHmW1/w0yPh8HEbelf9wyqkZsCfN8OpeV4WRWOyZsIP7AppEGDBy8mgc3jSmBFSY6T99szE3oVLWaySfA2gpVBRBOFyGWiXo6XlrH3SGH3QqsZxM3EMQhC6tgsx5dSRGJjOT8S7fxAUV115bd4HH+03RTrOA7E1xpGvy+oiF+RSvPKENgJaqQXtXxl4XJHoi33T7cQoBOFFEJGOk4lYRU/MXa76XlD9jFhzpP2HEwNoFaYz11h6aC1f8gbcxcTNLr+dhAh49J971J359CN3vOHnXXNlGlvbryTG9bvq5L+z7jJNOe8xsGanRc1tdeE9ezV+GUHEKKhsMQTBhKuSfT3Kv8Xz0Z6iXCJ5BiGI5uboB2HQowzslxUk7xTnKaDTDr+EGfbs9wl+N09a4vIRBlznA+F23+EHo2LKlJEgUiC+cvaB+JaXb5YQhBdB5KtkfmnwjtPi8bXbixl8VVd3hC70UsyvSkIz4I4mbtb30+38Af4jYPwiOA78QQBzLQSR58mZtXy6n8ZMK3WPytL1W2J0c4juO/gA8w9ekz24nPk1iiSIcdWilyzlJAjNQvgyG3RcIT4jeSfXABsONwz0Fkl0V1140zleql2Hez3Py+pquIPwmjcev6uoA0mT+FybZaJjNSEIb4KYsNstdNes60a7CBR4KaYz0dOpJPwucHZ5HTwwi5yv49W1F1+RSOfRJpnKusuPA5ci7PvJpB8PD8/8RbnCQPjFL+gC7wfvUqmYNDvTgi8eizmRuk0eDYn5UhPl2tSeQD0s4XSbLHXfOBDvUY6xk/4UfYIgvMaswvJgvWHg4fRI6NGg/hxCEB7D3tTa/mMwzhkvVsIjdNkJQkM2fhc4PwtWCb8YxwXKxd/DrXtlrvl/w5S6vtTJ1f3it9sSRGmCuems5Uox3RwD46nGdXczQGEq31IIWwl3IkIQ05kgNJY/fhe4SiGInBzZWEEXZ2MF2ZPVO42ir/AjQT5mv/jtvgThHPjNL84Ofkd+q7uVcz2FOESMnVae1EFBFILwQKxUF7PTUcU0RRd41NjWeQiN8okw+Bww1H2G3XzPCrerZ3nRH4z/WEzKQs2XL4eSKSjBBLmkrnQVk4OBQtChcijvYSabbwHl2wzcr4BNkei1IJwIwmrDNB/YsWPfP+hUonKC8EZ0yv0gNAxakZfUGnPcNDOdlox3P2iF2e8O2D40Oue1qdTP5uRTMZvq6187PE2hpcToAGXCrk98fHhhe0/FsRJ+8Qu6wPs5sZXwDsIesbTgS2q7P0Wxd3QOfgxKZfgKQJ4OfPnjyCokSC6UuvrZ9bJad1FdiAmw03xyCDyoPaUIQXh/lVNPEGOe0KstogY2c1WTjkD3ZnIsAL8ylANXNqdtqT6wRps5LgH/gMknDAzEniwFQag28kxyS2Sx4j0NfJegptb2+WDcbo0vVMqAa9OCIPKdM7WbCS/Uy7Fp0Po+FEHw2uQ/Lu05ENTvUoZxop9w+l6Y6TK6AXrrLCEILzSBKSeIUtiM253hrAk6ptxRLsClu8YByTXsgcPw5i4gVWawjQA9yWw+Wl/z6uqc+WLmNMTmqWBqUiGWGBioD29e6mXemOuvsTW6jJhutfTvK1mO93ScHieISnaU08RSKkrF4xCpwPWyWhvQLkAYEqd55BD/ytHxVgjC+4uccoLQ2mMH2NHobLKtvg66CRxUt69LZciAv1AbAQhCt7sLEr9HDbcuho/d96NYj22NmkAIwvataUJtBN4la9SvReHsYEWkjdzqvXSMlXDwiYBb6BYHH5xtBhunxuNdKtprIY8+CrJbLgZNxsAg91iFCGmvI5fUPlDU7Go8Y5jkmtUsVhN2DGUK1ue8K7E7/gUgCIcd0GYQn5boi633gtIhrPkQwzgl2d/Vm6uvsUcPFPMpj2CAwGpBp3eZDiqmjLqwHMH6fHgru80Rnfol6GZJ175ereN+WZ2dQ102AwjPSLxO7+eQd8MxqKZqR04QXitKBaiYlIhO4b7dEt5kd8v5GdE0OwZduG8GnjLM1AKnhDpZ6PThvgHnPAtFEITq00FWP3GSqDESbSfKqH52WR1pokk6kKYvItKH/HZOiOI9BSeWqBaC8ApL4UD2jtnrrCg4hftm5s5kPLY2KKbjc7m1YwWYl1vqF6LCzOtefzJxv6x2SVgVKG3uWBZKXA6mL9iTZnnl0BCC8J5JU65iyonYGGk/nwgrJya8ob+YbJ6vSZJDTfM6PwjTVJel77O8ZtCEQSoC6cWzZ3GPPYyE8pxOI3wDMzrzEga57Oj9LnBOQ+OS3GgLmC+HibvseSgaGpbsFQqPXscElRApbF0AnE4fkUhnq0nm/dZw1ACcEjRlmmxojR5lMG4D6GgvzL2nnr6EX/wm24pJZ8UG5qtnH4QbnEKQOCUMYsZ96ZBxif1SdixfurGAiP8737udbh4Zmrm8UA92/SasdE5qGi2AZxhwjyyPL4JxI9cYq5K9XSorotXCiiKRzlmmkV4CJpWS9+322aSsvYbrOeqWhU8IwvsrrRiC8EqzCcY6gF5l8F4EtACYbXs91+iJJUo5OkSEpQN9PVarqwli+F3g3IbGQ1ZrGk2VCvMDAM3RpBwdYuL/SPbFbnLoS1kkqfzf/675fTvAvwGMbE6AjCljM4AD8ssWt3DZ2/OL32QTxJy2xfvVplI/B+j9E2QmFWqBtjI4FWLjU3YdeilSjvpZ7Lw+9XxDjmDpQr3a11pHAa6X1RYtgAozf5JLHxYzXFZr1oEevjkPIc1LPBJsiYrJa1BRISqmnJwZktjj9a+CcYk937THu6QJuG54aO/r3XZYY+oRUgumit1uTc3pA6rM7vocu99DoQucV4dZWW/OyupV3P67r8xlmUU2ZXwP4KVBO1D5iBm4vT7MF+lSlBbQXqZKpRKE+lZcCDUju9PFbNZE+jZ7GlEfGKngbz0j9XxBMfmoHUxLPRdvH/KNF9EHm8z87Jl7xU1NFEQGACqK6leQwg26bI9536pcUnvCWzEnCIuk1Dwv+mE2caMt37TDy/BzZODCgXUxFfXU09HHkvdXpRHNO5pqOkkR6IfDYeOL9twPOoH8LnCeIwOgAFkVAEkm/qTfaJLZPs7feQK5DsDefuQCoNJyXq5Tzfms71jML36TfYJQAh/TFj0wnMKqvKxh2bdxc/jK5MJmvoGZF2lOexo8+DkiXDbQF1MpdT3ntRvu2jS2ASwF/Y6p3kPbf87qMTWYcRXAZ3mcEOwipZixJgS+Kh6P/d6vvKJi8kaqEgkiI3VGH/sKNRomfYqJWwEcYvmwNgFYx8Ado0N7P1SIXladVurqtn4YhMW69gl4mpm706Op+9avv/c1byjHSvhd4Py2p8opWcP1WxpCMD7JZEYAUuq13EW0Ugf9nUH3m6D/u76/+4+FLCgT8cjrYwjgjcRGPA3zltSOfQYLwdzPO/vFbyoIQsmvdrsUMj7ObH4KRO+xLWSeod3HYl6lowQsAtERYOybxUWd+v4K0CMMWnXwrPT/K1V4dc39QOCkXH7GTpd50cuzWtduNkx/204fnjMAbgFI5dWwqjcz85GZBolo7Ui9+UghJywhCO9RrViC8BZdSggCgoAgIAiUEwEhiHKiK20LAoKAIFDFCAhBVPHgieiCgCAgCJQTASGIcqIrbQsCgoAgUMUICEFU8eCJ6IKAICAIlBMBIYhyoittCwKCgCBQxQgIQVTx4InogoAgIAiUEwEhiHKiK20LAoKAIFDFCAhBVPHgieiCgCAgCJQTASGIcqIrbQsCgoAgUMUICEFU8eCJ6IKAICAIlBMBIYhyoittCwKCgCBQxQgIQVTx4InogoAgIAiUEwEhiHKiK20LAoKAIFDFCAhBVPHgieiCgCAgCJQTASGIcqIrbQsCgoAgUMUICEFU8eCJ6IKAICAIlBMBIYhyoittCwKCgCBQxQgIQVTx4InogoAgIAiUEwEhiHKiK20LAoKAIFDFCAhBVPHgieiCgCAgCJQTASGIcqIrbQsCgoAgUMUICEFU8eCJ6IKAICAIlBMBIYhyoittCwKCgCBQxQgIQVTx4InogoAgIAiUEwEhiHKiK21PGgLHzOs8OJzmU4h4EQNHADgIQDgrwBDAG5lpkIjWjtSbj2x4LLa1FMKpfkMmn0fghQDeCWBGtt1NBDwNwu21Ib6vtzf2z1L0J20IApOJgBDEZKItfZUUgTlzzq2pqd+ykAhfAug9ARpPMXBH2jCueWJd1wsB6o0XPaYtemBolFYS4QwAIY82toP4K0hhZSIRGyqkP6kjCEwFAkIQU4G69FksAtQ8L/phNnFjQGKw97udia46+EDze7FYLO1XqKZ5nR+CyXcDfJjfOmPleNAMpz862HvPxmD1pLQgMDUICEFMDe7Sa4EIqFND3YzXP8/ANRYVkr01tdi/AtBw5gfimWDs69BlGoQfIsWX+tndz23pODwE836A/tXW3hAIT4LpN+pEweCjCXivRsZnyUydMjBwz98KhECqCQKThoAQxKRBLR0Vi4Aih9oZW1cCfK6mrRfBuJFrjFXJ3i61+LK1TFtb9C07Ro35RHwtgPfl16ebR4ZmLt+w4eZRJzlVG8MpuhvAiZYyaQZ+wKM1VwwO3rnNWre1deGsNMI3MKPTqoYi0E214U0X9fb2porFROoLAuVEQAiinOhK26VEgJpao5eB6Ws2nf8QAddymlf4OQFEo9HQiy/ThQx83XKhrORMM2N5Mt7zfSehm1s7Opn5J5b+0yC+ItEX+6adkHJtOJDaZjLpuIGB7mdKCZC0JQiUGgEhiFIjKu2VBYHGSPR0IuqyLeqbGViS7O95LGinza3ti5hxh629vyPNxyUSsT/Z22tqis5ACKsBOnn8N+Y1MLHYi5gaGhYfYNSklYzjJxcmfD7Z1/OfQeWW8oLAZCIgBDGZaEtfBSHQ3Hzm/qZR8wgBH7A0sI2Izxroi/2soEYBamxp/zIBV0+oT/hOoq/nUvuJoKElOscAPQpgn2z5EYAXJPpjD/npv7Gl/bsEXGQpG6sLbz5L1Ex+0JMyU4WAEMRUIS/9+kagqaX9QgArJ1RgviIRj33DSbXjp/GM74RpPgzgSEv559MGH7d+Xex5axuNkY4lRBn1Uvbhp0fC4eM29K76h5++GlvbP0cMJe/Yw3i8roYXiH+EH/SkzFQhIAQxVchLv74QmHN8dGbtDjwAULOlwjPmaOj4wcFVm3w14lKouaX9S1mLqFypNBGdPdDXrdRZ409z88JDmELHg4wmgJuJ8Kva0OZlfk8AzZH265lwZa5BAh795x51Zz79yB1vFPsOUl8QKBcCQhDlQlbaLQkCjS3txxOg1Ei144sr46sD8Z6rStFBc3PH+0yDVxPwFBHWkmk8Ho93vVzMycQul+7+goEVyf6ei0vxDtKGIFAuBIQgyoWstFsSBOw7bwBvmMwnDcZj/SXpYBIaaYxEO4jodgvJDZnMCwbjMXWnIY8gULEICEFU7NCIYO+fv3TPt7w5fC8DJ+TQYOAPPBqaVwr1UrkRVia1G1+mcwB8B8Bbx09ARN3Db85c6uZzUW7ZpH1BwA8CQhB+UJIyU4KAcjRLcbgPwLt2CcD314X3jPb2/njHlAjl0akihb9uCh2kAgeC+PKJsme8954yzNQC8aSuxNETmewICEHInKhYBDSmpWqBrUjdfUMk2mIQKZPXPR0AVY5496VrePkTvbFXKhZ0EUwQsCAgBCHToWIR0BJEhTqYNba0n0bAWgcwRwn4toHw1/v779pSsYCLYIKADQEhCJkSFYuAbtFlwqXJvp5vV5rQeX4OegGHGPghj9Z8wR63qdLeR+QRBBQCQhAyDyoWAS1BAKcn+3t+WmlCN7d0/BfDPAVEg2AVRZbfBsI8XRRZuYeotNETeZwQEIKQuVGxCOh8IKophpG6sP77SzjWIKyw561QjnLD9RwtVWa7ih1EEayqERCCqOrhm97CV9MdhNtINDVF/wUh6gZwvLVcNZHd9J5p8nZygpA5UHUItLQsOiyFUC8BB+eEJ8aPBuI9n662l2ls63wnUubDBBy+S/Zg8Zyq7Z1F3upHQE4Q1T+G0/YNWlrO2ieN0QcBarAsqhXtB+F6ksgPOlh1XuHTdrLJi2kREIKQiVGxCLS1tYWHU/vfBSCaE7KaPKntwGpVZkxnJ+Pdd1bsIIhguzUCQhC79fBX/ss3RTrOA7E1y1vJd91Nkei1IJwIwmrDNB/YsWPfP5QjDIbOM1zuISp/Du7OEgpB7M6jXwXvrtt1UwmjuerCiZfrlCIEUQUTTkScgIAQhEyIikZAm+oT+F3KME58Yl3XC8UK39QSPQmgNdZw4gDdVRfedE4u18OYDPQJEI4B89EgOpBNOjcZ774nSP8NbQtnG6nwOgCHjtdjOj8R7/5BkHakrCAwWQgIQUwW0tJPwQg0t3Z0MmeyuYUsdxHXJft7vlhM3obMwm9gFYgWWITLSyUaiZz+VpPqlXPev42XI1qZ6OtWKUTZ74tp/DpGGPhIITm1/fYp5QSBYhAQgigGPak7KQjMnXvmvkZtzf0ENFo63GawcWo83qWivRbyUFNr9DIwfc1KPE6pQDU5pbWpSZ0EURfuI6P738SET+0iGfwJKW5LJGIvFvICUkcQKDcCQhDlRljaLwkCjZHo6USk0oDOsDS4GYZxZmJd10DQTppb2xcx4w5be46JfHSqKAZ8n2K0/RVwCgn6nlJeECgGASGIYtCTupOGwJw559bUzti6EuBzbZ1uB/FXkMLKRCI25CVQRq0UxuVg+gKAsLU8gW6qDW+6SJdnuq0t+pbhUVoDwrGWOkNEWDrQ17Pard+meZ3NMM17AexvKfeSwen58fjq33rJLL8LAlOFgBDEVCEv/QZGQFkc1e2gmDXDnKWRF8G4kWuMVcnerr/Z7gYoEumcZRrpJWBSeaDfbu/cT2ykSKSz1STzfgB7WUkCzF+uq8HK3t7YP63tKlIZSdGFDKj82eMZ5QCkibBsoK9H3avIIwhULAJCEBU7NCKYDoFsXCPlWHaSC0LqJPEKQAywmuMH2lRJ9qoPIc1LEonY/3igTo0t0UsI9A376QPAEAhPguk3mTaI/xcYH9T0mwbz1bMPwg2xWCwtoywIVDICQhCVPDoimxYBNzVRQMhSWfXUDX7UU9m2qaml/TM7M8fd4EE6OlGGCPg/b5/FNwo5BBwpKT4lCAhBTAns0mkpEJg7L3poyDSuAvisgIt1ihlrQuCr4vHY7wuRpaE1ehQx3WKzrHJqSp0UkibxuYN9sWcL6U/qCAJTgYAQxFSgLn2WFIG2tmX1w6k32gA6A+AWgPYDcIClkyGANzLTIBGtHak3HylRHgaa29Lxrwb4EwScDuCdFqLaDuAPxHg4RXTb+v7uPwbxmSgpQNKYIFAgAkIQBQIn1QQBQUAQmO4ICEFM9xGW9xMEBAFBoEAEhCAKBE6qCQKCgCAw3REQgpjuIyzvJwgIAoJAgQgIQRQInFQTBAQBQWC6IyAEMd1HWN5PEBAEBIECERCCKBA4qSYICAKCwHRHQAhiuo+wvJ8gIAgIAgUiIARRIHBSTRAQBASB6Y6AEMR0H2F5P0FAEBAECkRACKJA4KSaICAICALTHQEhiOk+wvJ+goAgIAgUiIAQRIHASTVBQBAQBKY7AkIQ032E5f0EAUFAECgQASGIAoGTaoKAICAITHcEhCCm+wjL+wkCgoAgUCACQhAFAifVBAFBQBCY7ggIQUz3EZb3EwQEAUGgQASEIAoETqoJAoKAIDDdERCCmO4jLO8nCAgCgkCBCAhBFAicVBMEBAFBYLojIAQx3UdY3k8QEAQEgQIREIIoEDipJggIAoLAdEdACGK6j7C8nyAgCAgCBSLw/wHDpzQgBC3utgAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-59"><g><path d="M 191.61 1051 L 230.39 1051 C 247.29 1051 261 1064.43 261 1081 C 261 1097.57 247.29 1111 230.39 1111 L 191.61 1111 C 174.71 1111 161 1097.57 161 1081 C 161 1064.43 174.71 1051 191.61 1051 Z" fill="#0cf232" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 1081px; margin-left: 162px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">s2idle loop waiting for IRQ <br />to wake</div></div></div></foreignObject><image x="162" y="1060" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfQl4ZFWV/++8qnS6QQVGZWdAxxWXUVvI3gZZ7JalpZtKuoGGRkZkaUUckFHAFXWUUZQPbQWRTSGpkk1RBBQzSVWq0tijIoOMo6IDgg36b6CFdJKqd/7cV1XJy6v7tqqXpKpy3vfxfXTqLuf+7nv3d+85555DkEcQEAQEAUFAENAgQIKKICAICAKCgCCgQ0AIQt4LQUAQEAQEAS0CQhDyYggCgoAgIAgIQcg7IAgIAoKAIBAcATlBBMdKSgoCgoAgsKgQEIJYVNMtgxUEBAFBIDgCQhDBsZKSgoAgIAgsKgSEIBbVdMtgBQFBQBAIjoAQRHCs6rZkb+/GpZOF8Xcx80kA2gDsCyBuCUz4Gxh/BPFgnmID9w8PPBpkIL29iRdNTNEdILyzXJ6B43Lp5A+C1A9Tpr2n7yPE+MJ0HcL12ZHkRl0bHT1914FxapCyYWSY77I9PWv2yXN8BMA/zTW+8z026a95EBCCaOC5XL78jJaWZds/QKBLAOweYCgFZtxeiBnn+RGFEEQANGsoIgRRA3hSdd4QEIKYN6ij7aijI/EPiNF3AaysouXtRLxhdCT1Q7e6QhBVoBqiihBECLCk6IIhIASxYNBX3/HyIxK7te6kFANHVrRSVCkNA/QUiFsBdIHxCgAxR9knQXxsdiS1RSeJEET18xOkphBEEJSkzEIjIASx0DMQvn9q7+77NAEXO6o+yIQzcyPJUQBs/+2QFesOiBXMy4nwHgdRPGjG86vGhm59zCmGEET4iQlTQwgiDFpSdqEQEIJYKOSr7Lera+0bTIrdUzJEW60QcO/EUk5s/UnqGbdmE4lE7LHHcQGILrWTBAGfGk0nP1mlOJFUEyN1Eca5cgKIZJKkkUWJgBBEg017Z3ffJxn4hE3s/0OBD89mU7/zG4oyarfu8syNzNxvK/ugORU7Ymzs5m1+9efqdyEIIYi5erek3doQEIKoDb95rd3dfeIeBUzdBZByZS2fHkKdADq6EysBugPAklITz5nMK8cyqfS8DsbWmRCEEMRCvXvSrzcCQhAN9Ia0r+h/PZn8UwD7VLu4a3XfTGtzmcFbFwoKIQghiIV696RfIYgFeweUSie+dHtbDMbpTNwD4EDnBTZi3J0nun5LevB/ncZlp+DtXf2riFhdVCt7JD1ixvMrdEZmt0Frjc+EC3MjyS/a69RqpFaG8TgX1sHEehC9DsAyAHkG/hvgG3hqybfGxr77rOpzPgnCGlfeWAWYpwH0NgB7lcadB/AnYhoxDf7WAXtzLpVKFebq5YnCSD2NMZNSGb4GwItL8o4D+CNA98Awr9l/LzxUy1gs1WTrM4exwery4goAe9vewW0EPADCDROt/AMvO5jP+/X7OOV7RkZufUKV6+pKvNY06EMwsRKEA0r9jYP5YSZ8x5zMX79ly21/m6v5kXanNRQCxRwgQO1diT4i+hKA/YK0z0COiU8fG0k95Fa+Y8W6t8LkcwB+C4CXM7CzhfK95Y8qSD9tbev3opbCMBUXlNLWlc7MZga/GQVBtLWd9JJYfOpSJpw1TYZ6wZ4C6L3Z9OAP23v6Lpjrm9QdHYlliGMT2LpUWF5IPSDjh8nAOaPDqZ/5EXcQ3J1laiGItp7EwQbTlwEcoXFf1onzIBl8btixWJgZOB1EH1fvW4BxjjNwNU+1XFIm/xAbFYsg8vnY0zCMT4H4PJ/3R202vr40zhcNDaX+HkA2KVIFAqJiqgI0rypu3kIBuxln4rNzI6nr52JRUjK0dSeWG6B7AexRkqnATMfmMoN31UoQJRVYEsAbA4+X+TSD6CAG/n26TsShNtp71x1E+cKA3XYTUL4CCJdPPr/7x7ZuvWoqYJ1AxaokCOro7t8A8JXBSG6WKHnlsTYxvvsXgozlkN7E3i15ukF718Z/hA+yQX254cHfhCGIGApHmRz7KBP+xb+L6d3NmBkvnBDmFB28bSkpBBHxO9DZ07eWGTeWVCrl1gtgPMqEHIEstQqI3wSGOgko1Yv92c4wVuXSA2MRi2Y15/SCYuDROAq96fQtf6iFIDo71xzIRvxHAA52yF0A+GHAyKm/M/jtBLzBtjvcDsYoCEfPBUF4ymWbEwa/hIBuAPtX4k5XTY7vtinIwhp0zqogCOroSZwPps9rTg2W6o5APwdQ8Hi3CgS6amJ8t3O9xuKBWTm2l3UR0xszPG6wsTqTGVAyVTwaFeYjDNxFwNm2wnmAtwLGr6xLnybeAcJBzsaCuHkHnRcpNxsBIYgI34hDDz3+pbElLeqOgtJtlx4eKhh475bh1CPOrtRHsjNP/1q69FYMrmc9dFNrfNupQ0NDSi8e2WPprE3zbgCv9+srjA3CxX3Wup+RN/j9zrErnIwlLeqy3/u1KpKIThAuN86teFRmjC/QzAm19ax/k8H5awB6uw34AoM/kkunlFonkicsQbR3JY4jogHHhiIP0H+YU/HPO1U6Hio1z7EUbTT0PQDvcgzU9SKmUnkR0zUEtDvrhLmIOQtvfcww6lix7i0wzRucp1QCbV4S3/bBqL+ZSCa7gRsRgohw8tq7+44gQMU3KruQBrljQB1diQtB1s6w/DzBBh3udUQPK7ZaxJcse+ZKgM+w1Z0EeHU2nfqxjryCRnNt60ocaZDlOjt9GiLGt9jkD2azKWUw1T3uO+KICKLC+K1218wX778vLvMy2pYM2V8HeINN8MD3TYLMTRiC6Ow8fk/TaLmHgH+2te0bT0uVLdmtvveCY8ArbXUfN7hwVCZzy387ZW3v6juTCEqFZQ/Nkpxcymd4GaCt92uXpz8HhrIdzNQlfCU7kvywU2Wq24CUZCkQcOnE+O6fdTvllOKQDZZsMOUhPEsmrxwdTWWD4C9lgiEgBBEMp0Cl2nv6ziPGzC7TY6GzN6gMx0ZL4Sf2XREznZzLDKpgfJE8OtUXEQ1OPL/bBt2HGPQE0dvbG5/I73U9wCeWBVUGd3Ny6hg/L5NiNNqnv0nAabMGGQFB6DAFgquKSqecO+274ihvnYciiJ7+dcz8HdvCWyDCxtGRpPqb79PVta7HJPNOAC+ZLqxZuNWJa8lO/OgF76fOsHOpyrucJLVk5EYQDFw7Nb77+/3UecquhLx59yxnC6IrsyODH5wr+50v0E1YQAgiwknV7FhTrfEnTwxw7KWO7r7rABwO8J8Y9EvAuCEqO0RnT+JoZlJ2kbJhWo3ac0cclCC6u9e+Mo/YEMFyRVRPgYhOHh0ZVOoQ36ezs/+NbFh3O/a0LV4154PorFxUQ58AKtvgBybj8cO3Dt38V9+B+RQIShAq18dE/rkUQMfMNMl3oYC1HqczZ+/U0dN/BZg32TD+HfLcm82m/lz+m+YE7HrCdBuebj45oBs1gCfJpMNHRwcfDIKvxj26YkxB2pEy7ggIQUT4drR39a8h4ltsTc65V5Kf+B0r1nXCNG+btQAD40TYMDqStMs6q6mgBFHSjatLdpZawc3o7SZn8QSy500AEhEShApoqHTi0ycTtTPNpZOnh9ldasgvslvnQQlCIwOqOV22dSW6DSKlSty1hPMkA0fn0kl1crWezq6+zzLhYzYiGouhZVU6fdN2v/es/Lt2PsF3tsZ3TQwNXbdzplxlQqqwtjfNxdGKMQWVW8rpERCCiPDNOHRF4hUxk9RuWIXXLj/Ki+cXYHzDzMfvnM+YR6UdoVJTzezOrctqfGEunbrca7EMShCaRaViMfCDOIxqLkhGOV1IEiZ8ODeSVGMO/HR1Hfdik5aqi4nvmK7ElXdGAjdoKxiUIDSXI6uyTy3vXf+yJfn8TwF6c1kM+87+zUdt2PVFz0/cZndrZeCKXDp5btjxdXT1vx/E37DVm3UJTv1d+36FVKvq5qeaeQ47vsVUXggi2tl2C8Vt7+UxEO5kxi1L47uk7buqCEVR/vInAHyNw18+EDm4fsCVKUepvavvBiKcXJZdGadHM8n3hRlLxSJYow1Ct+t+wV57N0B/CiMXg1uo6M2jUrhaDzE+N5pJXhSmHV3ZoARRudhWp+byO6npCARVkqHmtLLdBB85lk5tLWOhIYinyaSjRkcH7w+DbZANQ5j2pOxsBIQgIn4jlKGvZYJuIsa7AzRd9PNmXF2Yyt/uZ9QN0J61hnV09ylf8sscLpHKm+iCbDr59SBqliAniKBhO/zkrri8VyNBaC4D+okQ/PeAjgd+DQYliAo9O+O+1hZeXc3tYa/FNKg8fuNSv2vwD0IQocPGqL4qT7AIavcLMpRFX0YIYg5egfBhHSwhVNyfHMP411x6QGV5m5X0J4iYrq6GQGhbiBCEC+LNRBA2spljgqjIdaF5vyrUUEHe+SgJNEh/i62MEMQczrh1Ea6AE4hxPmAFrHOm/dT1HiokQrmBkm+4iqd0gqPRQP7yTkGEIBYBQWBmtz3HBFFh3BeCmMOFJ8KmhSAiBNOrKUv1tJNWEFR0UxwFxks9yoe6udvWu2Z/Ix/7nibW0ONk8gnVXB5qMoKoUHHM07S7dhN0QY5yh1znKqbqThDdfV8lQN19KD4RnfAW+v2ol/6FIBZmJqira90+JgqrQHgfQMs1kSuD3MKGcvWDybfPujBU1E/9igp8QpBMczoIghCEqheFkbDCqFmjDaKzM/EWGHQvAy8rjU0bkHBhpr7Ya1CCiM5IXXmfwu5QEKWR2nmfgoC/wuQjR0dTvyxj7ny/dGUCzE+Fk0S1nlcB+lqURYQg6mDaizGSCjcA1GsTx3dR6+pa93aTTBXiYtrLplT/xyjwSdls6v9VO7ygBNHe0/cxYnx2up8qjKjtXf0nEVk3hX13gUEISRfSXHdZq1psoqgXlCCCLLZB5HHBZNr1V3chrxqPNCWL022Zgd/yVGyF3cVb836FvsOgc2eu1vMqCIaLsYwQRESzXrQBGOq26wrAVEHLYjGYRzujpLp117Zi7asNM6ZyD0znj/Ba1Fxi7Kgz9o2tcfPsarxc7LIFJojKJEZh/fQrLrV5qQmCEEREt49RWlRVOIc9XlAN/o8J/MYw+Vr7Trja1ycoQSymi3JhQ5kE8Zaqdn6kXnmvJkhEgkBHR2I/xGkIjFeVGiww85pcJvX9IB2EcRl1Ccccae6CoAShGTfC7Na1lwtrVDEpvDu6+84BrKBz5edZg41jMpmBkSDzocp09vSdzAwVAqXsXBB6l+vWV1CC0JJdyFOaugMxObXn5ll5FgiBQm0w0/ow6Wh1LsbBQ23w6ORSvDtoVjpn6HogXP2g78FiLicniIhmX3cRySsYnrNb7UKryRVdyvJ1M4hW29rwjYAZdphBCcK6d+GM8+MT58kmi/5iYQQE0dW19g0mxVTo9ZlLbsC9E0s5EWQBKhr+4yqJkj350X8VJqeOiuK+SlCCKBLVXATrqwxsN0fB+rTxlVyC9alQ7JtymaT9Frb21dXNL4BN2XTya2HfdSnvjoAQRIRvh+5DBvFHsyOp//C511AR8tstppE+HHPwKKVBhxuCIKD/WHkoH8f6+4dSf3Hps5yW9dqKpEkREESRuPq+DMaHZvfvr4JTJEwGXeHIbBZ48QqCcSiC0If73sFMG/129y6qyCdhGCuzwwO/cMqqe7+Y8KOpVj6xmnDfbnkaPMJ9P8nASfYYUU4ZXbz2fpM3jHfdPzzwaBD8pUwwBIQgguEUqJQuRDSAPDE256emPqXbeaocztQy9fmK5DmacMz6hD940C0pSyChXQqFIYjiYqzLdqbP6+yeKKkkTDQEAZdTgOLqnzNim3QXEt2S30SdtSwMQShU3BIGqXerkG+5WJcwiGLGaQxWqVztObhVxjnXTYt7wiD3HN1dXYnXFog2E3CY43V6iMz8u0dHb60IceJBEKqJHSD+DPK40hGxljpXJA5j07jakd8iVPjzWr6LxVZXCCLiGdclzyl1ocJq/A5EY2CasFIoMrcBpGwWtmxyVmnth6XRq0cifVAdMVfGYpru3yUhUfn3xxhIW+lW3VOtzowlIoJQDXb0JA4Fkwq4Zw9YWOyL8DcwrPSZXiktAfoDDPM92eHUryMBPISbq62/KFKOqkH7pk+NKOXokyA+NjuSUlEBKh4Xgpi0JduyiIKs6AL0SCm96Tu18xhgTFHN22JrRwgi+hmn9p7EqcSkYh45800H6U2b8F2nHw7SWJAyURCE6qe4+zScmdj8RFCLwGaGug9SylcRIUFYJLFi3VthsjOrmp9cpd8VOdAJOnVMwAa0xcKeIMrCdHb3n6U5FQQRReWt/tLU+O6X+CXjUY2V1Dg3AdQTpPHZZfhhGLETvTDTEQQB55rAex2Z87y6j9QxI/w4m7+GEMQczXFnZ/8hbPC3nblzPbobZ+Bqnmq5xKkuUHU0se8jkzwqglACJRKJ2GNPUN8Li/1XAbzcW0h+mEzjlIJhmgbo3rkiCGvBK6ryPkNFIgpC3JZqUKe+iQL4KgnC6lqpwQymq9RrESR8i8rwF2PjA5nMwM/DyF50iKBzQFDRa3cPUFeR/ReXxPkrfm7WbipMKnCG4sbXmVmFjPEITeOu8gogpxQJiIAQRECgqixGh3b3vzrOfCoT3gXCQbYQGyo4318IeAiEGyZa+QdeRsC5jFAaJUGUcVILwGSB3sPMZ4HotdPjttQ6vIVBm6fGd/+x2s1GHc3Va67USax1go4F4xQGDgawt20h2vZCHurfg/HtCKPrRnmCsLc1+90CXmOzNewA8Fti3J0nun5LevB/qwn+aJ/LibyxCmCV1EmlIy1jZr3DAEYBSrXGzbv8iGFWm1N0BwhKbWQ9NhUmtXevO5RgnusIS7PthXtG9zHw7QP24Z955RWv8nuVag4EhCDklRAEBIF5RyCkE8S8yycdFhEQgpA3QRAQBOYdASGIeYe8qg6FIKqCTSoJAoJALQgIQdSC3vzVFYKYP6ylJ0FAECghIATRGK+CEERjzJNIKQg0FQJCEI0xnUIQjTFPIqUg0FQICEE0xnQKQTTGPImUgkBTISAE0RjTKQTRGPMkUgoCTYWAEERjTKcQRGPMk0gpCDQVAkIQjTGdQhCNMU8ipSDQVAgIQTTGdApBNMY8iZSCgCAgCMw7AkIQ8w65dCgICAKCQGMgIATRGPMkUgoCgoAgMO8ICEHMO+TSoSAgCAgCjYGAEERjzJNIKQgIAoLAvCMgBDHvkEuHgoAgIAg0BgJCEI0xTyKlICAICALzjoAQxLxDLh0KAoKAINAYCAhBNMY8iZSCgCAgCMw7AkIQ8w65dCgICAKCQGMgIATRGPMkUgoCgoAgMO8ICEHMO+T+HbZ39x1LwPenSzLua23h1UNDqb/71/YvMdft+0vQ+CU6OhLLYOB4EM4B6E0AXlwa1TiAvwB0X2t84vyhodufbvzReo+grTux3ADdC2CPckkGjsulkz9wq1nxDlYP0g6AngL4VwxOIx67NTc08CcAXH2TFTWpvXfdgcgX1higlQwcDGBvALGZOefHmGmMiL4/udS8Z+tPUs9E2P+CNSUEsWDQu3dc7QJeWrROB2Eym05dFfjjjJiA6hDSSEVqX9H/ejI5CeCNbg0TcO/fd2k9/oF7bnwu0s7rsLEFJogKRBjIMfHpYyOph2qBy/qeYrQewKcB7BeirTwT7gEbn86lB7ZETFYhxKi9qBBE7RhG3kJYgkgkErE//8U4htn8d4Bex4QLcyPJLwpBRD416Ow8fk/TaLmHgH/2ap0Y3xrNJN8XvQT112K9EUQJoR3MtDGXGby1CsSoo7v/aIC/EZIYnF0VmHF7IWacd//wwKNVyLHgVYQgFnwKKgUISxAdPX2bwTiz3JIQxNxNakdX//tB1sIx+yH8DUxFtQLxbgx8NjeSvHzuJKmfliMiiEJJNTcRfGTc6lD1OKs+CeJjsyMptYsP9KhTA8WMSxl8rk2FVLHwz5aVdwWwl0cH25l5Yy6TUiq3KFVfgcZUSyEhiFrQm6O6VRDEdWCcKgQxRxNia7ajp28W1gCeBOGU7Ejynkb7+KNCKyKC2G6CjxxLp7aGkUudnh/bhoNh0vkATgQQn12fRyeX4t1BbALLj0jstmQnKdVsn0aGpxm4xjBp8377mX9MpVKK0Kaf3t6NSyfyz/Uy0QeIcVSlHFC2qQuy6eTXG+k9EYII8zbWaVnnouV3gqjTYdS9WLokN8T43GgmeVHdCz+HAi4kQdiH1d7ddwQB3wWwp+3vBSI6eXRkcMALguXLz2hZsuyZKwE+w1Euz8ClS+P8paBOIm09iYOJ6RoC2h1tjRNhw+hI8pY5nI5ImxaCiBTOhWlMCGJ+cNdmQWNaW6Wee36Enode6oUg1FA7e/pOZsZ1s9VDdFNrfNupQ0NDeTc42rv6ziTClQ610pMMnJRLJ38SFkYPVdXjBheOymRu+e+wbS5EeSGIhUA94j6FICIG1KU5SZOpB6aeCKK7+8Q9Cpi6C6C2GWn5gcl4/PCtQzf/VTeCjo7EqxCjnwL4R9vvoe0XzrbdTiVENDjx/G4btm69amp+3tzqexGCqB67uqkpBDE/UyEEUf8EoSTU2Il+H6d8z8jIrU9oRkAdPX1fBuNDtt8KRNg4OpL8Tq1vlrJrtO6kFANH2toaN5lXj2VS6u5IXT91SRDtXf0nEbFtcrx3AHaE27v7vkrAB6f/Rvgd8tybzab+7DcTzrp+uny1Q1i69JnlTEgw+B0gHATGS+0vAsCPERsZMG6emNjtZ0F2DX5Gat1C5Tk2wvXZkeTGchm/9mfh2dP3EWJ8ofw3+wUoNf6WZU+vJPBZAL3N5smx4wWD4W8ZfJM5mb9+y5bb/uaHve53ZYB89Ak6jID3Alhh81jZAfCviWjzkhjfXtYNO8flN39BZGp3jD9AHV9jq8KttfWZw9hgNSf2canmtxHwAAg3TLTyD4IYV1UlzTsxvSgqnbhhzSEdDmAZLI8r3sKgzVPju/84yDvpNe56OkGEJYhDVyReETOt08MrpsfIfAdMrM9mU8qwXPPT1bWuxyTzTgAvmWnMX+1Vc8cRNFCfBFG8iKQmbZ/SGJ8zmVeOZVJprzG7LJyB6mqOps+SyStHR1NZZ58zF9Lo4wBeHmIengJw7v77cNLpBeEgOc+b1PVAEB09iXeA8Q1178Jn/HkwXQ7T/ESYDy5E+08x8wdymVSyvbvvGPsN9HojiCrem3EGruaplkvGxr77bMh3//co8DsQo/cAuMwiBt3DdGY2M/jNEO9wRdF6Ioje3t74RH7PmwAkbIK6niA0bsuTAK/OplM/rgUTe11rQ7DLMzcyc7/t70+SSYePjg4+GFU/c9FOXRKE9ijvc/lLgVO64WonFgszJnzYzye98iXnsRhaVqXTN223A9/Wu2Z/Ix/73mwdZ6ipKRDoqonx3c5127n57fAXlCCYVxPRAZ6LjgYOJvxoqpVPDLAjpo7uvrNDtl8A88Uw6GEwbit3X08EcUhvYu+WPN3gUDUEfXEeZIP6csODv3GroDtBEHAFA//uSg7AIwWDD98ynHokqCC6cvVEEDp7gtut9pJragqgY2bGFVxbEQazzp7+dcyWVqQcnkMtTDWTcxgZqilblwShBtLZ3fdJBj5hG1SqNf7kid6eCP1riFjnQuZft1KVckUunVSXZaYfF32i+r10yQdbinFhLFpSJ4tOlws0k8y03s37xZ8gNi6dmHp+E4heXezLPAwo/7/1ByXHL6cFZ3NLNpO6pvxvv/btY9aoWG4GoHal5R1pAQS1wGTANEHgV3DRva8cm2hGjAAk39nTt5YZN2oWtR2kQiiAHmHwSwjoBrC/TVY1B8qH/tBICaKrfxWRNV4wuIWAdwHY19bv3QCp2D/lZ5zj9JXc0MAfy3/o7FxzIBvxH6EYw2f2Y6l7MKzeG5dxlcs/brCxOpMZ+HlFG3oV0yQAdemsPA/KI+c+Aj0HmO3WyY9oc3ZkUKlja7q8VS8E4WYUdtsoFDd78WG7emmubsB3d699ZR6xIQLU5qq4QjC+k8skT6kVf937ENXf6pYg2roS3QaROuapW4pAAFtChf1hZmny9GLQ7CQmGTja6d7moo++vWDwh912YV1didcWiDYTcNjsSeO7UMBandolzAKu2gxrpA7TvocO3gojYMb4AufYLVVKHBeA6RLHhaEHzanYEWNjN2/TvcAdK9a9Faap5tzux74DxJ9BHlc6sKLOFYnD2MTX3NRcUZwg7HJWY6S26uTpeygSi/15kAln5kaSo84FwsOP/kEznl81NnTrY078PE6V6sT6VS6YF9vxO2TFugNgFKbuH0r9pdbFpB4IwsOt1PWUVLHGWKv23Ozq33zUhl1f9PzEbbNPkHotRa3zEWX9uiWI5b3rX7Ykn/8pQG8uDVi7aJfB0Lu3TUPlaYfQsHvFQtbWtn4vo6Wg/KGnA7QFdVcrBf263q4XZeDROAq96fQtf3BOaJgFfIEIwlLp7L8vLvOwpVBHT+J8MH3edqwuMPOaXCY1E6m2NHg3Pa2fH7pS3cTzuBmgXieO9UAQLv71ycmlfIaXus3aDe/y9OfAOG+WWoLwlexI8sNOUnEjCAaunRrf/f21GqK9Fp0FIgha3rv+pUsK+Vcy0xoCTtbETfK8mFbpDAPPNabWhbezq+9qJvzL9AnCYw2ota+o6tctQagzQ3t3n7qNeFp5sF63Vjs7E2+BQfcy8DKrvDq2k+UhUnw8dgbtXYnjiEgF9bL0g+qjyqWTp9s/wtItzR8CWFJqMZSRSbNbeZpMOmp0dPD+hiOIgF4eOlJlwkW5keTnnGPu7Ex0sGGdGMueHuqEsimXSVbGPXJUdvFjV7Ynz6CFYT+isCeIYugG/AggpWosvZbImZNTxwTx7HIhTe1FKxeCcHW0CDv2eSCIKEVSbeUZfGEunVLxsLQqNM3p2NcDrRYh57u/WmSdXnOjaGSu2tC4u97ZGt81MTR03c6KRXW2a+x2gD4EsDLQWZ5QHvo+6ujpvwLMm0ptaq/mKwIyDTqNwG/FXwhkAAAexUlEQVQH6EAiSi+JbTvZyyZil7GnZ80+eY6PAPgn22KhjZlf5ycI11OAc060HiUOl9tync6uvs8y4WMzbQSPoaPqdHT3nQNYN2Gnn4UmCM2mIrSHTGdn/xvZsDz6ptVuunHpCWJ+VBgRnSCiXEb+DNCZ2fSg2tC52lcq3zkIQThmoZ5PEHCqfjzUMo7TBj9QgJGIwbxhxttI753gVGV5qX5qeYObhSDC4lPxEWoIQqceDLu4a42AC3yC0JCe1jPO673Su21yxUZJHwZkfoygdUIQeYC3gnEFTNwWxKVac6FuvgliTlVataxXDXGC0BiPtbtX5wKjVEQH7MPve/Rxuo7I0k2qRzv5laqfyo8vCqCbhSAA/KfBO4/NZL6vLsP5PhXHag1BaNyTA91dsXeuMwKGJRm/wYRRMWnlASo84/z6tE5HlSHGK/z6q3UND9K/X5mICMI73Dfxbo5LqCWxeNRg419bWpb9UqdZ8JJd49Qy3wThqmb2w3y+fq/rE4QCwbnAsOYjq3hBS/aG9p6+84jxZZvOocLvuGIBi9CLoXjT+m+vMRE7DsQnlbxtpv2g3dIy1rOKKWymtEAEUYzCabfvPGLG8yt03jpeH0aFEXABTxAaJ4uqPWQ09quKhcwlkODJucygim46p09EBOG7OCvPqxaTP8pglYjJFtab/gAy35sdSf1nmIHOt01gvvsLg4Vb2boniIqPQ5Me07HDmn7RnHWdxmfNCcX3JdUAOeNNYeIVBlkhJ97KgPK+8koiopSjjWeDcLEhuL1ggQiiq+L+ilfsHNf3vmIzsYAEEebE6PchaxbgYAThkxfar9+gv88XQZTl6exJHM1M6q7MdA5sAONMfHZuJKW8BQPd6xAvJv8ZrnuC0OzEnmCDDrfdKq2wP5QjNyovGmopDBPwmiIUs412FXrrgLmZp2MQMc4H4RCPm6qeMyAEUYSn8hSH+1pbeHXQ+PtlkBcRQVRsLsKov/yXhXAl5psglHQuFypD5VvQOBFUfcrzQ8zlHoTn/Sy/Nufj97onCGsBcQTgY6bpo7PGyDztouo08BHwV5h85Ohoyrpl7NxBBEj+4ns5y2XSCmA8CrJOFNMxcYQghCCCfOSaBbjCRrPYCKLkBv9pAi52YBg4TLfWsUHj4h5kjvzK6G5tA/CN8ODX7lz/3hgE4XFPobOz/xA2WKV73L14SJh9E9Jph7CRi/OehZ9hNGiMIJWU5HEr7AOxCvT3n62xXR8oFJ7doyncXEXFdAcI7yx/mG4kLyomvfp0+rTX3Tc7IKWLE4nXAli6Z3K785KkspNNLOWEX9wvfSwmeN72r3ZBdt610q1V1bY9l/UagiAqmX5GVeRmfyiD5rRDlGOtVKquvIN0tXevayOYdzn0noUXIj/+gmBcCxPDS5aYf3RTi4RZMOrZSI25IYhVRKwSupcN+GKktn31TlWI8ySsii7CE0RRPan/LtXFp49mMykVpt7THqHxEAuUojTMolzUZOx1PcAqZ3b5CXXRNkx/UZZtCIJwMyYviz/1q9mhfSsXeTc7xBRPvWF2rCe60i1wmcsE7wDo9Gx6UMXZ8TWKCUGUXlsNwVTcggf8TnMV34BuN7iQbq46eaoNBFdxCla5NqZiK+wxrRYrQXiqmgxjZXZ44BdeC6Y+H0RwG1jxIqN5LcO4u2DQN+8fHnjU2Z8+H4R7LLYoF/ha22oIglCDrLgpy3TmZItxiz1eky5EhuaikWXkNkzut0WL9bwd7KKrvDSXTqp8EL7koOSvUIUVK4oXEwCXOFqbsunk14K+4HJRLvGiiSkKpP4KimnQcgthpLbL1tl5/J6m0XIPAf88S+ZgIWGUqtlpyyiA+KPZkdR/+H3fjnUpz4w7YuCLMpnU/yhZJKNc0LeoxnKalzBlMDabBKWaKEZ8dbnDUGmHwEYCTrHpkj3j4mv80EPfgNSGghCCmH4rKi8thQu1oUtWv5AniKL6o+J+h2eYd90nol18Ne67i/gEUVQ1Fe2UAw6PwklmPiWXSQ16LT/tvesOQt68e8bb0Sq9nYg3jI6k1P0c7aOLtVUqOA7iT7TGsHkib3wJ4DPsDQQN8lnjkhlJ9YY5QVTsMgm/Y6ZBAl9UQsL1DoNmgVcXapTrazljnac3geZDLzDTsbnMoLJJ+D6uweSEIKax0wXrC5oXuOQhouZiOtJucb/QlMH6tLrrxU4QLoEN1WvgGiLd/uG2dyX6iegGWzBOiySYeWMuk1KbUK2moHR570Oly3vOHChPl/JxzCQJAv4PBT48m039znfhqIMCDUMQpR2ZPd/0cwDUf6UgZu5G5ko7hAN5n9vTGh25FfE1SBhlRQ4co+9VHH+tAIK0Vpc0qFYjtZ+7bpj2g1x089ydOXM6uxi53cJ9wzCOzw4PqJwJHrs4ugpAn7PAQhNEcWfbdyaRFURw5gZ9gOx6buG+CbR5SXzbB51BIhc7QSisu7rWvsGkmPJotCd0Ut+qrzrYLdmQigpLjM2FfMvFXmlf29pOekksPnUpE85y5ECxv5ah7mnUAT+gsQjC4e5qB1Bnfyj/7pKntvyz8+Jdxby4HCULDHyTp1o+qntxDj30+JfGWuPngOl8XXY1rx1umAVcQ5zqTw+hwKvddilh2p8vglBCuyUMItC/ccG81pkwqK1n/ZsMzl8D0Nt1H1M9EIR7wiB+mAycMzqc+plzd+qeZAoPkZl/9+jorfYMdtbQhSAsGKijK3EhyMpBYn+2M4xVufTAmNei60ESqtrTDFxjmLR5v/3MPzrzoJScElROkk+6pCNWGf42ZtNJpQYLZLcUggiJQEdHYj/EaQiMV1VU9TkFVMRlKjcQ8Pa0TsddamIczA+DjP9SqUettJGMdpCVWtB+tNwBwja77G65EcIs4NbCWhnMTf25dB+D1A5oeEnLsrPKwczCtD+fBOG24y7hHCTlqAmgZXpqF1jFVJYjopSjnhfAhCCKaNdosIZ1kl329IUlBxZbvKdZK87swIKugQSdq5T7piDkUjhvxRvqBOFxEvCNoaRNLxhCT118cZ75KhcNTvaFP8Bk8cNkGqeYMe62Bw90y1ERZgFXnbsdrWcEmx1iJEz7800QRbfFxHkEUj7sbh+oE/NxMD4D4qPsl6bq4QRRFrRoJ4ndBFBPgBfGUYQfhhE70ctlUwhiBjIXg3UhqE1LvYN+6WzDz+F0DStVbyFmnKdzia2h3Tmp2lAE4b5b9r7kpuq52CFC+dtbR9ClT58LgnJvdRqkdBP0FJg/DRPXKPVIhbHbJc92mAW81Cm19yROJaav6+JCOXM4hGl/AQhCDSnMB/o4GbxhiYEtFW6ehA/nRpIqo1gkT62LsJV61qBzQFCOFcWb/96POjV9cUmcv+IXl6pW2fwE8fp9od1cnbLVarAut2fFXFu6fQ2RpTZ6XQiMVG6KNCP2b4ZZmGDDCiw4y4FCBRcE8O28YXyhnomi4QjC5WWsSBHqnEyXxCuhE7iodpV9Ib6k5SQG94PotbY49Uqt8ydiGgHj5omJ3X5mzwWsScGp3dWEWcDt41TJ7g0mFZtGhYKwR5KdRYRh2l8ggrCGNR0UEdgAWOljy2PaAfCvwfhaOTnMfCyQUfVRtEsYqwBOKK0IgL1Lp9KS6gKjAKVa4+ZdfsRQnv+oZAuxCE4XrTeCUIK5nqpdcnr7jJvae9cdiHxhjQFaycDBtjlTVXcw8AQYW4jo+5NLzXvsYT6sjUEcm8B0SeXGkm5qjW87NWhmymrmp5Y6DUcQtQxW6jYvAprLdqFckZsXGRlZvSBgucQyX8bMa0vq02cNNo7JZAZUKuK6fIQg6nJaRKiwCGiiZfrapcL2IeUFgSgQUEQRM81zDaL9OW+eFiQ9ahT9VtOGEEQ1qEmdyBHo6O67BoDKLKdCFPyCDf5Odjj166AddXQnVgJ0R/miE2viFQVtS8oJAoJAEQEhCHkT6gIBTTrGwLHyXYySgevXBQAihCBQhwgIQdThpCxGkTTx8gNdbrIuR/Ukzgdbl6PK7seRh2xejHMiYxYEhCDkHagLBJReNm6adwN4vU2gp5jpvKmduyXt3mDl3y1vspaWT2jCG9zdGucTgnoA1QUAIoQgUIcICEHU4aQsVpF0cYtKWBTdWmE8qP7tcVtdHSj+AMN8Txj7xWLFW8YtCPghIAThh5D8Pm8IlMIcXMTFPMMhb6tbYj7IBvXlhgd/M29CS0eCQBMjIATRxJPboEMLc4u6PMRxBq7mqZZLvCJuNigeIrYgsGAICEEsGPTSsRcCiUQi9ugTsbcTeD1gHgHQP9puoVZ941hQFwQEgeAICEEEx0pKCgKCgCCwqBAQglhU0y2DFQQEAUEgOAJCEMGxkpKCgCAgCCwqBIQgFtV0y2AFAUFAEAiOgBBEcKykpCAgCAgCiwoBIYhFNd0yWEFAEBAEgiMgBBEcKykpCAgCgsCiQkAIYlFNtwxWEBAEBIHgCAhBBMdKSgoCgoAgsKgQEIJYVNMtgxUEBAFBIDgCQhDBsZKSgoAgIAgsKgSEIBbVdMtgBQFBQBAIjoAQRHCspKQgIAgIAosKASGIRTXdMlhBQBAQBIIjIAQRHCspKQgIAoLAokJACGJRTbcMtlEQ6O1NvGhiiu4A4Z1lmRk4LpdO/qBRxiByNj4CQhCNP4euI+joSCyDgdNBmMymU1c18VCbbmhCEE03pQ05ICGIhpw2b6FVNrY//8U4htn8d4Bex4QLcyPJLzbhUJt2SEIQTTu1DTUwIYiGmq5gwnb09G0G48xp1YQQRDDg6qiUEEQdTcYiFkUIogknv6On7zowThWCaNzJFYJo3LlrJsmFIJppNktjEYJo/EkVgmj8OWyGEQhBNMMsOsYgBNH4kyoE0fhz2AwjEIJohlkUgmi6WRSCaLopbcgBCUE05LR5Cy0niMafVCGIxp/DZhiBEEQTzKJuMfEcFuH67Ehyo1cZ5Sr76BOxtxOZG8E4EsCBAOJWHcLfwPw/BBosTMUGx8Zu3hY1jG1diW6D6McAdi21/QQbdHhuePA3fn11dPW/H8TfsJXbboKPHEuntoaty8C1uXTydADsUpfae9cdiHxhjQFaycDBAPYGECuVLwD4CwEPMfNgYSp/+5Ytt/3NT45qCaJ9Rf/rYfLtBLzG1sc4M5+Wy6SSHuOwilv95o1VgHkaQG8DsFepnTyAPxHTiGnwtw7Ym3OpVEqNTZ4mRkAIogkmN0qCsIjhcVpLhM8B+KcA8OSJ6JYpogvuHx54NED5QEWW965/2ZJ8/qcAvblcgZlOzmUGv+vTALV3911DwGn2ckHq9vb2xifye94EIFFe3Ino5NGRwQFNn9S5oq+HTf4SQG8PNKhioTwxNhfyLRePjX33Wbd61RBEZ+eaA9mI/whFkio/gcjBulQZxyYwXQLgxf7j4YfJwDmjw6mf+ZGOf1tSol4REIKo15kJIVdUBKHa2ZmnKwg4xbYDDirJ42TwhtHh1H1BK4Rd6APs5qEjFtUPMb41mkm+z6vPjo7EfojTEBivUuUYeDSOQm86fcsf7PWWLz+jZckuT38OjPOqwMlqioFfGWZ+9ejorX/SyRSWIFzJgfjs3Ejqeq9FvL133UGULwwA1BZy7gogXD75/O4f27r1qqmQdaV4AyAgBNEAk+QnYm/vxqUTU89vAtGri2XNw4Dy/1t/2ALQL6fbYXNLNpO6Ztaid0Rit9adlGJY6iTns4OAHIMeAXErgC4wXqFZHHcw08ZcZvBWP5mD/N7Z07+Omb8z0w+PxdCyKp2+abtb/c7O/kPY4HsA7D67jH/d9q7EcUSkZC+ph/jO1viuiaGh63ba2qKOnsT5YPp8xfhLqjfAeNAqr7BibgNIEU5RPWdviPGtJS1PnjU0NKTUN7OeMARxSG9i73geNwPUa2tknAOQgwuxqGYKYDzKhByBnmXwSwjoBrB/JfZ01eT4bpuEJIK81Y1VRgiiseYrkLRhjdRKtTKZ3+sKBp/l6OBxZvrAAfuadzj1zYesWHdArGBeToT3OBbKJ2EYK7PDA78IJKxHoe7uta/MIzZEwAGlYr62BI39odyDrw2jvbvvqwR8cFokpjOzmcFv2kVs604sN0D3Athj5u/0B5PNM/9xX9yn08u3tZ30EqMl/1GAz3cQxZNk0uGjo4NFQrE9QQliuZ7YxwFckE0nv+51cnCpW2DG7WaML9gynHrEyWltPevfZHD+GodarcDgj+TSqS/XOudSv74QEIKor/mIRJqwBNHWlTjSILoDwLIZAXjMjBdOGBu69TEPoaiju+/sFwzJl82qy3wHTKzPZlNqoar6sU5G+edSAB3jtWiXf9PYEGb17WWH6O4+cY8Cpu6yqVl0izd19PRfAeZNtob/DwU+PJtN/c5noNTekziVmL5lJ1Q3mYIQRC3koGRt7+n7CDG+YJO7AOaL998Xl3kZoEuG7K8DvKEKHKp+H6Ti/CMgBDH/mM95j2EIorio7nU9wCdW+bEro/CnCbjYVn/cZF49lkmpnXZNj2YRS7XGnzxRp5Zpa1u/F7UUhqc9eJh/ASKlCrPUTQxckUsnz9UJVHEyYNzX2sKrh4ZSfy+Xd7FvbMqmk18LMkgNCYEJF+VGksohYNbjRxAui3Sgk4PqSGFltBR+AuCNMx0HVxUdeujxLzWWtNxJQHu5PgGfGk0nPxkECynTGAgIQTTGPIWSMgxBKLdIMvmnAPaxdRJ40XNbbIIYlIMMqtLdlR+YjMcP3zp081+d9R1lC0x0JrH5AZsn1H8avPPYTOb7O5x1O7r7zgFwpddi19GR+AeK0dkMKPfPt4BQiHHhXU4jtte4nHMDF5djL4KwjOTLnrkS4DNsfeUZfGEunbo8iFdRpX0HQU9C011qbESucxNkrqVM/SEgBFF/c1KzRKEIoqv/JCLLEGw9bp47fkJ1dvV9lgkfmynnvpD7tWX/XbPrfs5kXjmWSaWd7ThOG0+YFFtpcEGdbMpuq1o7hEaV5dpHGNl1ZWsliKnx3X9cKzko87nTFbgaQtfYiOYMt1pxl/rVISAEUR1udV0rDEF0dvVdzYR/sS3sOs8d3/G2d/cdQcAPASwpFfY1KPs2WirgNB7r8ltU2B9KKqKdBfogMT5baqrAzGtymdT3Z5OQ0xgeDblFTRBEOAGMN3FRnVe+iKdY/ePZTPLSICcHJZOLquvDuZGkOn0Efrq6jnuxSUtVhrt3TFfSGPYDNygF6w4BIYi6m5LaBQpKEDojsJee3ksyzW5SnUYiSZEZxP1Uc4fBsje0d/WvImK1iFkLqm58zvarxcAFH+rqWrcPG+Y7mXFSaTGdcQYIoWIquitjudO9loGcOTl1TJAb2kWCqPAOU3++GyDtnQy3eWdwCwHvArBvuQwxPjeaSV5U+1ssLdQDAkIQ9TALEcsQnCA0eY+rTC7U07NmnzzHR+y3r6MiiIoFjfA75Lk3m039uQyd4wQzfVKoqKsxPjtOKNpTht8UKZvB83lj3xjxwQC/mpne9sKJ6pBZIUp0jYQjCHcxmD+azaSUR5JbSJDpunpXXb8RBvw9QBiXgC1JsTpAQAiiDiYhahHqhiCqJBsnHpqTziQDR+fSSeWFYz1O+0M5btObj9qw64uen7itfAHQaWOpULdoyMftZNDWk3g9MZ1PhOPAeGlV81gdQagYSBMAdrH1uZ1hrMqlB8b85BCC8ENIfp8+EQoUzYdA3RBEsNhJgSbAeQHOrsqoIBDHKcFhQC8w07G5zOBdqmNNUEBXN9qyoB6XBP3G8qQV6hB4+XTB8ARhhbcAGTfBNO+0q3eUmqg1zifYXXN1AglB+E2T/C4E0cTvQN0QREQ2CGshd95gtpFAW++a/Y18fBiwwn9U2Bmcdgg7uTjvWfgF9etYse6tMPl7AL/S5xXa9sJFst8TKG0yDU21GPcr19xavJis8Be22Ee6i27M2JTLJO2RbCvE1BBEZA4FTfxZLcqhiYqpCac9OEFU3lSu1kCruU8xa6deK8xOVZBdVeRmfyj36bRDEHDv33dpPf4fJmMFx01tz3Ac6ubykp243RHzSHXzZwa+Axg/norTg1uHblbhvLW2gJoIgnH55M7dLyzHPNJdVgPwuMGFozKZW/7bDfPOzsRbYNC9DLysVCbSuap1rqV+/SAgBFE/cxGZJEEJQnXodCEtL54P3HPjc2EEqtilA3+FyUeOjqZmggSGaVBT1s2Y7FAhVSzybnYI1cXsWE98FwpY6xYiRHO5TO3or0aePxw0rEgtBKEz+uvCpBDR4MTzu21wC55XceNcsVlE9qIap1iq1xkCQhB1NiFRiBOKIOr8opwdj47uxErAihll3bVQp52l8V0unHUK0HgpqbIOErGM3AbRy+zRYr0WSV2cp7Dupdp7AyFsEDqCcAm0OMnMp+QyqUHd+6SNcQVvctS1UyKauwnYg4D/MYHfGCZfG+WmIIrvQdqoHgEhiOqxq9uaoQhCE2oj7G5SGW3jpnk3gNeXQanmZq4foM67DgCPmcA5BLqtHPHVTUVWeR+CLwbon2yJhZ4lk1eOjqayOjm0l8s8Yjvp2ujs7H8jG1ZYkz2nf6+RIKxTYO+6g5A31UI9nUWOgd8ibrwrNzTwR50sztAiAJ412DgmkxlQrsqBns6evpOZcZ3tXkaFd1mghqRQ3SIgBFG3U1O9YGEIYo6C9U0CvDqbTqmUoZE9ml38djAue8EvSAWIU6cK1zsMlZfD+Ocg2r2cHEiRjVeuCV2gviBJiMqDV/GTWnd55kZm7p8FSAQEYZ2QKhdrEGjzkvi2D+oCG3Z1rX2DSTGVN2Pmkhtw78RSTmz9SeoZv0krOQYoTzBbsD/8V2Fy6qigF/b8+pDfFx4BIYiFn4PIJXAShN/tVp0e2y/jWUlofbjvgO6W1Qxck+9B5SywvJcAuBqZnXYIZ99+xnldfb9dermPYjpP+jIYKqPdTIgMpSZjfCeXSaoMfrOM2n7RXJ3yW30YuBlEq22/qXSj65yhRabnrqdPyfSh2W3Rja1x82wvV1nVFxl0xewQLYqc/T2oqplzqbNwCAhBLBz2c9ZzReIb4CEUeLVbzgKPhEFPATh3/3046cwPoG5OFxC/jBnrKhIGER+bHUmpsBCRP54+/C72h7IQlQEFp8ULpBrp7O77JAOfmD0o/rlJOHVsJPWQc7Cl1KRHg/nzAL1OC4aLzGEJQrXd3r2ujWCqXf10MqMi0U8dNTp6m7qDMetxOQUorvo5I7Yplx5QcziLuNp6EgcTk8r5PR3mWzWqnBuCnj4ifymkwTlDQAhizqBduIZdsqqptJaPA5QnxvCSlmVn2VNpuiSfKQ8iaMrRcSJsGB1J3jJXo/fKv+13CnDaIaZlDHh7uqMj8SrESNkQ/tExvlnpOS3CZPNtIIsUbEmYoG5A/7YUjqQc1FAbgrwagihFaXXm5lAr/KW5dPLjOtfbjp7EoWBSsapm7CLlwVkpVDEM0FNW+lQT7wDhoMq5pT/AMN+THU79eq7mXdpdGASEIBYG9zntVadfdux6tfr2op8/bQbQp8k37SfzU8x0avmGsl/hWn53OQn4xlByCVKnFtBrc+nk6UHiGLV39a8hYmWYfXHIMexgIhXELmMw3227g/CIGc+vcGbuq5Ig0Nl5/J6m0XIPAf9sk8/TAB3i8p9myIoc6IQoUsyGxFOKzwMCQhDzAPICdFFOb6lyEtt3sJYoXjkfEolE7LEnSBGESiO6XwDZ8wS6eiJufFyXxCdA/dBFNKHFVRu+Oafd7BB+t6edAnb0JN4Bxjdc1UazK+QZuLFgGJ+4f3jgUY2xW0ts1RKE6roUnXbAkUJ2aHIp3uNmgFZ5s6ll6jMEy05S8c5oJkmdRDcX8i0Xj41999nQkygVGgIBIYiGmKbqhFT6YoNJ5Q54J4C9bK34JnaxvG5anzkMhPVM3OOISrqNgAeYebAwlb99vr1WKt1dLdarSBGqQ815+qg2QZLCp2XZ0ysJUHmZOwHsbTt1eeFTkddad7GtFoIoyfZNmwuvgqLA4I/k0qkve71Nlqpxgo4F4xQGDnaOS4UPAePbCzHv1X0FUqsWBIQgakFP6goCgoAg0MQICEE08eTK0AQBQUAQqAUBIYha0JO6goAgIAg0MQJCEE08uTI0QUAQEARqQUAIohb0pK4gIAgIAk2MgBBEE0+uDE0QEAQEgVoQEIKoBT2pKwgIAoJAEyMgBNHEkytDEwQEAUGgFgSEIGpBT+oKAoKAINDECAhBNPHkytAEAUFAEKgFASGIWtCTuoKAICAINDECQhBNPLkyNEFAEBAEakFACKIW9KSuICAICAJNjIAQRBNPrgxNEBAEBIFaEBCCqAU9qSsICAKCQBMjIATRxJMrQxMEBAFBoBYEhCBqQU/qCgKCgCDQxAgIQTTx5MrQBAFBQBCoBQEhiFrQk7qCgCAgCDQxAkIQTTy5MjRBQBAQBGpBQAiiFvSkriAgCAgCTYyAEEQTT64MTRAQBASBWhAQgqgFPakrCAgCgkATIyAE0cSTK0MTBAQBQaAWBP4/px9TTYaZec0AAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-65"><g><path d="M 211 701 L 211 726 L 51 726 L 51 744.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 749.88 L 47.5 742.88 L 51 744.63 L 54.5 742.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-66"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 737px; margin-left: 143px;"><div data-drawio-colors="color: #393C56; background-color: rgb(255, 255, 255); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; background-color: rgb(255, 255, 255); white-space: nowrap;">no</div></div></div></foreignObject><image x="136.5" y="731" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABKNJREFUaEPtWH1MU1cU/wFmWAoraJawyFacgIIoLszMBHCZm07M5A+D4seyaMQvVDY+WoFCakVWSoFaUReNcTP6h44lOpdpNsVghpqp6BgwjNvYMqe2dmUw5pCvdrtNSlree7nvMdYH5N2/mt5zzzm/8zvn3POun9PpdGICLT8J0BhnU2JojBMEiSGJIR9HQEo5HwdcsDmJIcEh8/EBiSEfB1ywOYkhwSHz8QHBDFkf2/FJ7XnUX/kGVqsdDocDQUEyzIyZjjWr38aCV+dh0qSAEcEYGBjE7dutuFh3Fd82tcFi+R2Dg4MuXVOmKBA1Q4nUpQuRlJSIYHkQq40hQHb7H9i6XYsHD6xegstSX0NxURZ6e/tw9FgtTp3+YsgIm0alchrU+Zl4eV4cb1AkKHWXr8Nk/hidnX9SzwUGPoO05W9gc2YG5HKZlzwvQO+/tx67dftx7fodqjEiEBIiR0W5GglzZ1Hl7R2dKN17EDdufkeVHS5AgqfTZiMmOnJoiwpo8ZtJrpT67NwlQQbjZ0fDaNgFhSKE89xjmx2a4mq0fv+jIN2ewmFhChgNasTFRrn+pgIi9eBwOF21ImT5+/tDX5aHlORXWI+Retmz9wAu1V0TopZVdnpkBKqMBQgPf44OyK2BOJiclIhNmatAFJDV3HwPxuqjaG+/z2poZXoqcv5NV7ZVf+UGtDoz+vsHGNuhoc9i25Y1SEmZj1BFCPr6+tHW9hMOfngSLa0/sOpLW74IqrxMfoAImC2bMrBubRrIb8/1yGJDTm4Zfr3/iGFo0esLXDkeEODd9f568jfy8vVobrnHOEOCZdCrEBERztgjjclk/gjnPr/M2CPNwVRZxA/QnPgYVFUWcrbKw0dO4fiJMwwjiYnxqNCrIJNN9tq7easZ+epyBjvEqUpDARISuJtJV1c3VLsMrEyRjKDWEPEka9s6vLM2jTPXv/yqAbrSGt6AuAKwZHEySjRZDEaHKyZ1t3tPDaOuZ8dF8QNEWjCpH67VcLUR6oIKXoB6nvaisKiStU1rS3birSXJ1Cbx8KEV23fqQC55zzV1ahgdUHCwHOZqDWJjZ4wKoI6OLmTt0DJqjo8dtwPd3U+Qm/8Bo93LJgfSAZFL0mwqxqyZL40KIK6JhI8dtwM9PU+hLjSisbGF4RO1hvgYEpJyEw4QV7qM25QTvSmMdsqRpCdTde2nFxj575O2/X8A+rrhFgo1VYx75L9erBs3pPu+yxFaSOvOzillnQFHOvqQqbtmX4k4gAioM2cvosp0jHWKH8lwmrFqGbJ3vCseIDKgFpeYRvRhN7z4oqOU2FetAWHJ5/eQpzMWiw15qnL8/Mtv1HGHS+DFF55HuV6FSOU0l4iogIgD5KtVq9uPpqa7gkERZspKc70+NUQHRFC4H0kOHDoJm62DCow8CWxYvwLpK5aCPJh4rjEByO0QAdZ2tx3nL9S7aovrGWthynzGN5Zbh+B3OWr4RBaQAIlMANW8xBA1RCILSAyJTADVvMQQNUQiC0gMiUwA1fzEY4gKeZwJ+I0zf6nuSoCoIRJZQGJIZAKo5iWGqCESWUBiSGQCqOYnHEP/ADZNm1jj/+tzAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-60"><g><path d="M 211 601 L 261 651 L 211 701 L 161 651 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 651px; margin-left: 162px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Failures?</div></div></div></foreignObject><image x="162" y="644.5" width="98" height="17" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAABECAYAAACbHqJdAAAAAXNSR0IArs4c6QAAEYRJREFUeF7tnX90HNV1x793dmSJkGJoSAyEH/lVTpJSmoONtSvt6ojfhNgxAXYlEwxOSUKaUtOkpTmltE0bk4SQk5xwCBRSBwwES7sYDHVLiF2ieH9o5dQpJYSmSSAtuBhMXWMIyJJ255a32hWj3ZndmZFmdq298+fO+/l5b+c789699xHkEgJCQAgIASFgQYCEihAQAkJACAgBKwIiEDIvhIAQEAJCwJKACIRMDCEgBISAEBCBkDkgBISAEBACzgnIF4RzVpJSCAgBIdBWBEQg2mq4pbNCQAgIAecERCCcs5KUQkAICIG2IiAC0VbDLZ0VAkJACDgnIALhnFVgKSOxxF1gXOFHhUz4Qj6d/JofZTspMxxL/DkxbpxJS9g4mk6urc4bi110bIH1NID3Vu4x8NF8JvmPTuqRNE0hQMujA7+jM1/BhPMAnAzgt8otKQJ4AUAOoFSnbjwyMpL6TVNaKZU6JiAC4RhVcAlFIAARiODm23zU1B2Lf5CYNhAQdljeOAPf4amOvxob+94rDvNIsoAJiEAEDNxJdSIQIhBO5kmLpKFINPFZADcBOMxDm55kjRL5HcP/4SGvZPGZgAiEz4C9FC8CIQLhZd40IQ9FYvE/A9NXAISq6i+C8GsAWTBNAPx2EPrAeFttO+kZaHTJ6I6hf2tCH6TKOgREIFpwetQIBOOxzg5etRDWbGUPogUnnMcmhaOD3QTjEQBHmYooArRBp6kvptMP7DEXHY/HQ88+jzM1ws0AvX92tTwy2YULd21PHfDYHMnmAwERCB+gzrXIhSwQTtnIHoRTUs1JF4nED4OGTSBaNUsciP9iNJ36OgC2a9np/fFj9AI2AdRvzsuMq/PZ5N83p0dSqxUBEYgWnBciELLE1ILTclaTenriEdbo+wCOqNxg4M6p8SOv2rXrjqlG7Q/3D74LBeNRmrZ0Kl+cm+zCBfIV0YhecPdFIIJj7bgmEQgRCMeTpUkJa5YKgb1k0Fm53PCTTpsUiSb+CMAtpvR7WKOzZMPaKUH/04lA+M/YdQ0iECIQridNgBn6+/v1icI77gMQN739P4IiLh4dTY07bUpPz8DprPEPABxZzjPJwEfymeR2p2VIOn8JiED4y9dT6a0gEN3dq5doemEFiD4CYCmA4wDopg69CPDTDGwuaqHUj3cMPeeks35sUteU6XJT303+6rQV573u7o8fQR1T1xHwCQDvADAO5p8z4V5jsrBx584H99nxUZu3z+0JLSMy1oJxDoCTTKxfJOAJEO5eFOItXg0Vlp4dX9w5QSvBuJyBUwEsKben5MBGwFPMPFycKmyp11aVp79/bdfE1OtXQ8MpYF4G0NHE2JDLJv/SyRyopJF9Jje0mpNWBKI53OvW2kyBUA5PGtM3AJxtYbpo1+4iM7YUQ9rnGgnFQhQI0rTdMPh+gN9jDYi3duqHx0dG7jpYdZ96+uJnsIFv11r1WJb0Koi/hAJucfqm3t8ff+tEkb4ExlUO/RQKxLitWOi43m8Htp6e+Ieg0TYGji73dr8BPmcsk9rVgn/LtmySCEQLDnuTBGKuDk+K5PMaa6uy2aF/tcO60ASCgGsYpYfvB236XCSiy3Lp4SHzfWUFRCFtPYOvcSHE5SJ4zNCLl4yNPLC73vSNROK/jRANl8Xe1Uxn4BfQ6EI/9wNq9yD4iUldP2vXyKb/ddVYSewbAREI39B6L7gZAtETS1zMjHss3jJfhXpYgB4HoJYjQmDjNFDJjr3Wc7bB8s5CEwgQ9pmcv9T6exagZ8pfE70AnjamQmePjW16sTIjli79dMeiww7cAvCna2YJYR8xfsIg5WQWYvAyAn63anlPZXuKjMIFudwD/2010+rUUVr6Amk/KY0ncSeAXjDeXStU/vkm9PRcdBJr+j/PElaiW0bTw+vqmch6/1dJTi8ERCC8UPM5T9ACcXrf4Am6YTwK4AOmru0nwrqJ148ctjJbLL8Bf4LBXzUFZFPZJwFeNZpJKRPImmvBCcRMD3mM9dBgfmTovyo/qeWd8SJOHEunnjKBsPE+5p8bjHUnHofHUqmUEuKZa/nyj71NW9TxdzT9pWL2WH60U+dLrPYlwtHBfoKhHsAVES8y8JUunW+0Sr+8L/5u3aDbGaU9kMpl+fUz1+lv82XzvMbFc7PZzT+ba/mSf/4IiEDMH8t5KylogbAwN3xFY21FNjukoqnWvXr64meyQQ+a7eGJ8WW7DcuFKBCl5RhdO88sDnbQuqPxpRpoW5X38XYUeWB0NPV/dWBTOBa/gphuNT/0ibA2l07eW50vHE18iwD1Nj59OXg7VxvZiw5iy2wHNtv9k0ZTw/J+d/9Fx2uF0P0AdZsSjBNhTS6d3OypUMnkGwERCN/Qei/Yh1hMT+tUiFWHPlAtLFmkFF5LAbTCzcOkktbK5JEZ9+azycutlgoWokAQ8Le5TPKLDkacIrGBm8F8tSlt3aWiqjIpHE2oL4nr3/y91rns1HPXHP7W1yceNH8NOA2VHonGzwfoIQCLVB0MPKej2J/JbH7GQf/qJgn3DXwABm+Z7RxXWuZq6H0917olvzcCIhDeuPmaK0iBKIVMCNEfMBAFcBoBiw3mS8ayqYzTTs7FTBTzcB6Em/qt+uQmv4WD2GsG8/lOeKllnJBB/wKU1vvVVbT7ArBjb1FGje9AyXJpih4C4cxKOcx0WT47/L1GY1p+w98GkNqbeJxBT3WQ8a10OvVSo7z17pc9r+8vm0tXkhbBfP3xx+Gm6mW1udQleeePgAjE/LGct5KCFIj5aPScHrCHvkD82tALfY0sihTncG/8o0T0wMw+AuFXKHD/6Gjqf5yOg9UXm8WSHoV7E3cT4bIZgQjAKsmuD5FYfDmY1EFPyj+kcqkN/WtHM0m1ZGYbt8kpF0nnDwERCH+4zqlUEQh3ntRuBGrevyBcOOX19CZuYMJ1lTYQsO03b+n82BM/uOc1NxMmHEtcR4wb3sxTu0/QExsYZGa1N2He1FaH9DxE4I2d+uEjFn4ZbprhKK2ltRLwKkBXjmaG1ReFiIMjks1JJALRHO51aw16k9oLgqX9q4/uKBQjBKwGoPYvKkdLqr+8bXjyBbgHkerU9146MjJSqMfRJjzFLwHth+7584cALDcJxFgIHR/OZO7bX/mtbPm0tc4JbwUGfkaMYQ7RlhOWGL+Y72UeZWrb+ZYD9zDzgKmP+4l4TS6d+if3/ZYcQRMQgQiauIP6WkUg1Ab25OQrSwxNP1UDncDEy8GlB9O76nrltpNA2CyRVQ+z1b6Ag6ngNImlEUJv7+Aygwy14azCpDS6lL/LVjLoromJxT90EpG1UYEWEV8nmfnyfDalnPfkOgQIiEC04CA1UyCUT0TIMK4hQL31He8JjwhEDbZmCIRqRGk8i8Y3iXChC49ttT/wXZ0KN1hZvjmdE9VLagC7DujntC5J5w8BEQh/uM6p1GYIhAo2F9Kn1jPhDy28duv1R715qqWNE2cSiUC0jEBUGmIS/jVVm8X1xvZ50nhNbkfqMbcT2tJ8mukzo9nh292WJembR0AEonnsbWsOWiBsnJdq2zcdVkJ5CucB+pHGlM1mh/aEY4lriXGjCIT9ZLI0PSV8IZ9Ofi3gKUjh/sGTaIrPg8ZXgKH2M2pDprzZqL3QtPPdnhcdjV56VBFTj5gc4orMtDKfHVZHlMp1iBAQgWjBgQpSINRGYsdhL99eDlNtpvEyCENs0MMd2tTjxxwT2mu3ienGimjBbVI73IOwcl4jxj/ksslPNXMKqvHv6tp3cpFCa4gxACrtL82+HHhhV2exCOUtkVqbOdAe6xaB8AjOz2xBCoTV0ZFvmOx/H0X+eIPQDzMImi0Qkd6Bq0D85lnGLkxPVSd6ehPfYcIn5/MLyGp+uKnHz/lVp2yKxBLngnG3eRlKhRLhqVCfOeBgo/aJQDQidGjcF4FowXEKVCCqbPMBPIsinzU6mvqVQzQ1TlmBm7lGEysJeNjU3h9pfHBlNvuw2h+pe1m92c9H+60qrREyD8d0qqhK5XAd6jS33SrKLrOxravjpc0VU1sV0oLYWAGmiDrsiYFcl753TSNT3Eqbw7H4WmK609QHeftvNJEW6H0RiBYc2KAEwsY23+5wG0tSpZPnOorqiMhT5vMN3M1pY+FagXDu3awepgar8BfHzmf7Lb8gegZOYa1U14xHMQPr85nkXzt1GItE4u9DqBSuY8YogIGb85mkOleidM3VY9sioKAIRAs+J4JokghEEJRd1hGcQNQG6nPp3WsRPK70qAvUUa67Nx7ViFR48cPLqOuGHDcNh2/tt/yCUHGvNGwC0SrT/b0gXjmaTu1sNE2UoE8WltzMYGVpVrnGGdoF+czQSOWHaPTi9xQQGiHghPJvrmI+1QgM4FhwG/VB7h9aBEQgWnC8ghII1fWadXFgP0P7cD4zNFYPjTpHefcL+DyYvmxhFmu7xOPHJnUkEn8ndBoB432VNiuhm+ji+K7tqQM2/aBwbzxBVFpKmW3FMw8CZ8euuzd+jkalaKkzdTo8vc26vcwPwcBq8xGk01+GSzYCfKlbIbIO+Q1H3uIt+FeSJs2RgAjEHAH6kT1QgbCM2UPPkGZ8KrcjpcJAzIqVo4ThuRcoDMbX64RxsA0v7odAqHX5cDSxwcISa0tB09ZVn5OtwlDoHR1/Y+vz4aNA2HwFqGm0H8zXwsB91edN1/FRsf366O0djBlkbDWf0wHgJWb63NTBxUkrT+np88ixEaBl5i8Ug3nVWDalzrBwfFmee+HQ4stxJZLQdwIiEL4jdl9BkALRIGaPOiYzB1A51LNxCkC/V3WCHKCiknJpXf2Icm9tlyR8EgjYHMSjmlMAeBeg/bvNEZ5FYtzKVIonNR2G20eBUMWrt/TOg5SqOr2tMlHGQXgcTD+d/sH4fYCWWnyljTPxn+bTqdvsvpAiscQ3wPgTi/uvAvxTQHty+h6/XX1MAlhSm5bumBxffLXb0BsiEO7/962YQwSiBUclSIFQ3XcZs8dMrACmb3IH3YqC8ajpIJiXyaBzc7nhH1fj9UsgStY90cRn39iHuKmB45e5SUUC1sMo3MmarjZ+3xuEQFREYtFBuuONs6UTHqago1DZJee8gnYrwMp72u2ljii9u0vndVZHlDYqTASiEaFD474IRAuOU9ACoRAsjw6crIE31lk2mvVgBbDdIP68Om/ZKqwC23gJ+ygQqn3U0xc/gw18G6D3Nxjal5j5j/PZVDIWu+iYAuvqeNXABEK1rbSPs4eUQChRe6eTqchAnomvrDrn2jZruY7PvPEFsh7AkU7qAKDOp7j2+GM56TXCqwiEQ9ItnkwEogUHqBkCUXlgqf0FzaBPMnEMwEkzSxulMBv8nwxsLmqhVPW6fvW51upBZkxOrdi588F9ZsQ+C0SpqrJ3+PlUsvah02aWTsp9IKLbFoV4S+XNuMak1uclpuopp9qrd+3vDkG7ksnoBUgFSaxsYqvloGcZtNUAfXdnZviXTk1izfWUQm93HjgDhNUWdYwDvJtYyxZhbCgcPGrM7ZJSdZ9EIFrwweKhSSIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCAgAuEBmmQRAkJACLQDARGIdhhl6aMQEAJCwAMBEQgP0CSLEBACQqAdCIhAtMMoSx+FgBAQAh4IiEB4gCZZhIAQEALtQEAEoh1GWfooBISAEPBAQATCAzTJIgSEgBBoBwIiEO0wytJHISAEhIAHAiIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCDw/9PUXuoYdJv9AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-61"><g><path d="M 261 651 L 356 651 L 356 87.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 356 82.12 L 359.5 89.12 L 356 87.37 L 352.5 89.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-62"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 662px; margin-left: 308px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="299" y="656" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g></g></g></svg> \ No newline at end of file
diff --git a/Documentation/arch/x86/x86_64/5level-paging.rst b/Documentation/arch/x86/x86_64/5level-paging.rst
index 71f882f4a173..ad7ddc13f79d 100644
--- a/Documentation/arch/x86/x86_64/5level-paging.rst
+++ b/Documentation/arch/x86/x86_64/5level-paging.rst
@@ -22,15 +22,6 @@ QEMU 2.9 and later support 5-level paging.
Virtual memory layout for 5-level paging is described in
Documentation/arch/x86/x86_64/mm.rst
-
-Enabling 5-level paging
-=======================
-CONFIG_X86_5LEVEL=y enables the feature.
-
-Kernel with CONFIG_X86_5LEVEL=y still able to boot on 4-level hardware.
-In this case additional page table level -- p4d -- will be folded at
-runtime.
-
User-space and large virtual address space
==========================================
On x86, 5-level paging enables 56-bit userspace virtual address space.
diff --git a/Documentation/arch/x86/x86_64/fsgs.rst b/Documentation/arch/x86/x86_64/fsgs.rst
index d07e445dac5c..6bda4d16d3f7 100644
--- a/Documentation/arch/x86/x86_64/fsgs.rst
+++ b/Documentation/arch/x86/x86_64/fsgs.rst
@@ -130,7 +130,7 @@ instructions. Clang 5 supports them as well.
=================== ===========================
_readfsbase_u64() Read the FS base register
- _readfsbase_u64() Read the GS base register
+ _readgsbase_u64() Read the GS base register
_writefsbase_u64() Write the FS base register
_writegsbase_u64() Write the GS base register
=================== ===========================
diff --git a/Documentation/bpf/bpf_devel_QA.rst b/Documentation/bpf/bpf_devel_QA.rst
index de27e1620821..0acb4c9b8d90 100644
--- a/Documentation/bpf/bpf_devel_QA.rst
+++ b/Documentation/bpf/bpf_devel_QA.rst
@@ -382,6 +382,14 @@ In case of new BPF instructions, once the changes have been accepted
into the Linux kernel, please implement support into LLVM's BPF back
end. See LLVM_ section below for further information.
+Q: What "BPF_INTERNAL" symbol namespace is for?
+-----------------------------------------------
+A: Symbols exported as BPF_INTERNAL can only be used by BPF infrastructure
+like preload kernel modules with light skeleton. Most symbols outside
+of BPF_INTERNAL are not expected to be used by code outside of BPF either.
+Symbols may lack the designation because they predate the namespaces,
+or due to an oversight.
+
Stable submission
=================
diff --git a/Documentation/bpf/bpf_iterators.rst b/Documentation/bpf/bpf_iterators.rst
index 7f514cb6b052..189e3ec1c6c8 100644
--- a/Documentation/bpf/bpf_iterators.rst
+++ b/Documentation/bpf/bpf_iterators.rst
@@ -2,10 +2,117 @@
BPF Iterators
=============
+--------
+Overview
+--------
+
+BPF supports two separate entities collectively known as "BPF iterators": BPF
+iterator *program type* and *open-coded* BPF iterators. The former is
+a stand-alone BPF program type which, when attached and activated by user,
+will be called once for each entity (task_struct, cgroup, etc) that is being
+iterated. The latter is a set of BPF-side APIs implementing iterator
+functionality and available across multiple BPF program types. Open-coded
+iterators provide similar functionality to BPF iterator programs, but gives
+more flexibility and control to all other BPF program types. BPF iterator
+programs, on the other hand, can be used to implement anonymous or BPF
+FS-mounted special files, whose contents are generated by attached BPF iterator
+program, backed by seq_file functionality. Both are useful depending on
+specific needs.
+
+When adding a new BPF iterator program, it is expected that similar
+functionality will be added as open-coded iterator for maximum flexibility.
+It's also expected that iteration logic and code will be maximally shared and
+reused between two iterator API surfaces.
-----------
-Motivation
-----------
+------------------------
+Open-coded BPF Iterators
+------------------------
+
+Open-coded BPF iterators are implemented as tightly-coupled trios of kfuncs
+(constructor, next element fetch, destructor) and iterator-specific type
+describing on-the-stack iterator state, which is guaranteed by the BPF
+verifier to not be tampered with outside of the corresponding
+constructor/destructor/next APIs.
+
+Each kind of open-coded BPF iterator has its own associated
+struct bpf_iter_<type>, where <type> denotes a specific type of iterator.
+bpf_iter_<type> state needs to live on BPF program stack, so make sure it's
+small enough to fit on BPF stack. For performance reasons its best to avoid
+dynamic memory allocation for iterator state and size the state struct big
+enough to fit everything necessary. But if necessary, dynamic memory
+allocation is a way to bypass BPF stack limitations. Note, state struct size
+is part of iterator's user-visible API, so changing it will break backwards
+compatibility, so be deliberate about designing it.
+
+All kfuncs (constructor, next, destructor) have to be named consistently as
+bpf_iter_<type>_{new,next,destroy}(), respectively. <type> represents iterator
+type, and iterator state should be represented as a matching
+`struct bpf_iter_<type>` state type. Also, all iter kfuncs should have
+a pointer to this `struct bpf_iter_<type>` as the very first argument.
+
+Additionally:
+ - Constructor, i.e., `bpf_iter_<type>_new()`, can have arbitrary extra
+ number of arguments. Return type is not enforced either.
+ - Next method, i.e., `bpf_iter_<type>_next()`, has to return a pointer
+ type and should have exactly one argument: `struct bpf_iter_<type> *`
+ (const/volatile/restrict and typedefs are ignored).
+ - Destructor, i.e., `bpf_iter_<type>_destroy()`, should return void and
+ should have exactly one argument, similar to the next method.
+ - `struct bpf_iter_<type>` size is enforced to be positive and
+ a multiple of 8 bytes (to fit stack slots correctly).
+
+Such strictness and consistency allows to build generic helpers abstracting
+important, but boilerplate, details to be able to use open-coded iterators
+effectively and ergonomically (see libbpf's bpf_for_each() macro). This is
+enforced at kfunc registration point by the kernel.
+
+Constructor/next/destructor implementation contract is as follows:
+ - constructor, `bpf_iter_<type>_new()`, always initializes iterator state on
+ the stack. If any of the input arguments are invalid, constructor should
+ make sure to still initialize it such that subsequent next() calls will
+ return NULL. I.e., on error, *return error and construct empty iterator*.
+ Constructor kfunc is marked with KF_ITER_NEW flag.
+
+ - next method, `bpf_iter_<type>_next()`, accepts pointer to iterator state
+ and produces an element. Next method should always return a pointer. The
+ contract between BPF verifier is that next method *guarantees* that it
+ will eventually return NULL when elements are exhausted. Once NULL is
+ returned, subsequent next calls *should keep returning NULL*. Next method
+ is marked with KF_ITER_NEXT (and should also have KF_RET_NULL as
+ NULL-returning kfunc, of course).
+
+ - destructor, `bpf_iter_<type>_destroy()`, is always called once. Even if
+ constructor failed or next returned nothing. Destructor frees up any
+ resources and marks stack space used by `struct bpf_iter_<type>` as usable
+ for something else. Destructor is marked with KF_ITER_DESTROY flag.
+
+Any open-coded BPF iterator implementation has to implement at least these
+three methods. It is enforced that for any given type of iterator only
+applicable constructor/destructor/next are callable. I.e., verifier ensures
+you can't pass number iterator state into, say, cgroup iterator's next method.
+
+From a 10,000-feet BPF verification point of view, next methods are the points
+of forking a verification state, which are conceptually similar to what
+verifier is doing when validating conditional jumps. Verifier is branching out
+`call bpf_iter_<type>_next` instruction and simulates two outcomes: NULL
+(iteration is done) and non-NULL (new element is returned). NULL is simulated
+first and is supposed to reach exit without looping. After that non-NULL case
+is validated and it either reaches exit (for trivial examples with no real
+loop), or reaches another `call bpf_iter_<type>_next` instruction with the
+state equivalent to already (partially) validated one. State equivalency at
+that point means we technically are going to be looping forever without
+"breaking out" out of established "state envelope" (i.e., subsequent
+iterations don't add any new knowledge or constraints to the verifier state,
+so running 1, 2, 10, or a million of them doesn't matter). But taking into
+account the contract stating that iterator next method *has to* return NULL
+eventually, we can conclude that loop body is safe and will eventually
+terminate. Given we validated logic outside of the loop (NULL case), and
+concluded that loop body is safe (though potentially looping many times),
+verifier can claim safety of the overall program logic.
+
+------------------------
+BPF Iterators Motivation
+------------------------
There are a few existing ways to dump kernel data into user space. The most
popular one is the ``/proc`` system. For example, ``cat /proc/net/tcp6`` dumps
@@ -323,8 +430,8 @@ Now, in the userspace program, pass the pointer of struct to the
::
- link = bpf_program__attach_iter(prog, &opts); iter_fd =
- bpf_iter_create(bpf_link__fd(link));
+ link = bpf_program__attach_iter(prog, &opts);
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
If both *tid* and *pid* are zero, an iterator created from this struct
``bpf_iter_attach_opts`` will include every opened file of every task in the
diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst
index a8f5782bd833..ae468b781d31 100644
--- a/Documentation/bpf/kfuncs.rst
+++ b/Documentation/bpf/kfuncs.rst
@@ -160,6 +160,23 @@ Or::
...
}
+2.2.6 __prog Annotation
+---------------------------
+This annotation is used to indicate that the argument needs to be fixed up to
+the bpf_prog_aux of the caller BPF program. Any value passed into this argument
+is ignored, and rewritten by the verifier.
+
+An example is given below::
+
+ __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
+ int (callback_fn)(void *map, int *key, void *value),
+ unsigned int flags,
+ void *aux__prog)
+ {
+ struct bpf_prog_aux *aux = aux__prog;
+ ...
+ }
+
.. _BPF_kfunc_nodef:
2.3 Using an existing kernel function
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 3dad1f90b098..12de52a2b17e 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -28,16 +28,6 @@ def have_command(cmd):
"""
return shutil.which(cmd) is not None
-# Get Sphinx version
-major, minor, patch = sphinx.version_info[:3]
-
-#
-# Warn about older versions that we don't want to support for much
-# longer.
-#
-if (major < 2) or (major == 2 and minor < 4):
- print('WARNING: support for Sphinx < 2.4 will be removed soon.')
-
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
@@ -57,76 +47,71 @@ extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include',
'maintainers_include', 'sphinx.ext.autosectionlabel',
'kernel_abi', 'kernel_feat', 'translations']
-if major >= 3:
- if (major > 3) or (minor > 0 or patch >= 2):
- # Sphinx c function parser is more pedantic with regards to type
- # checking. Due to that, having macros at c:function cause problems.
- # Those needed to be scaped by using c_id_attributes[] array
- c_id_attributes = [
- # GCC Compiler types not parsed by Sphinx:
- "__restrict__",
-
- # include/linux/compiler_types.h:
- "__iomem",
- "__kernel",
- "noinstr",
- "notrace",
- "__percpu",
- "__rcu",
- "__user",
- "__force",
- "__counted_by_le",
- "__counted_by_be",
-
- # include/linux/compiler_attributes.h:
- "__alias",
- "__aligned",
- "__aligned_largest",
- "__always_inline",
- "__assume_aligned",
- "__cold",
- "__attribute_const__",
- "__copy",
- "__pure",
- "__designated_init",
- "__visible",
- "__printf",
- "__scanf",
- "__gnu_inline",
- "__malloc",
- "__mode",
- "__no_caller_saved_registers",
- "__noclone",
- "__nonstring",
- "__noreturn",
- "__packed",
- "__pure",
- "__section",
- "__always_unused",
- "__maybe_unused",
- "__used",
- "__weak",
- "noinline",
- "__fix_address",
- "__counted_by",
-
- # include/linux/memblock.h:
- "__init_memblock",
- "__meminit",
-
- # include/linux/init.h:
- "__init",
- "__ref",
-
- # include/linux/linkage.h:
- "asmlinkage",
-
- # include/linux/btf.h
- "__bpf_kfunc",
- ]
-
-else:
- extensions.append('cdomain')
+# Since Sphinx version 3, the C function parser is more pedantic with regards
+# to type checking. Due to that, having macros at c:function cause problems.
+# Those needed to be escaped by using c_id_attributes[] array
+c_id_attributes = [
+ # GCC Compiler types not parsed by Sphinx:
+ "__restrict__",
+
+ # include/linux/compiler_types.h:
+ "__iomem",
+ "__kernel",
+ "noinstr",
+ "notrace",
+ "__percpu",
+ "__rcu",
+ "__user",
+ "__force",
+ "__counted_by_le",
+ "__counted_by_be",
+
+ # include/linux/compiler_attributes.h:
+ "__alias",
+ "__aligned",
+ "__aligned_largest",
+ "__always_inline",
+ "__assume_aligned",
+ "__cold",
+ "__attribute_const__",
+ "__copy",
+ "__pure",
+ "__designated_init",
+ "__visible",
+ "__printf",
+ "__scanf",
+ "__gnu_inline",
+ "__malloc",
+ "__mode",
+ "__no_caller_saved_registers",
+ "__noclone",
+ "__nonstring",
+ "__noreturn",
+ "__packed",
+ "__pure",
+ "__section",
+ "__always_unused",
+ "__maybe_unused",
+ "__used",
+ "__weak",
+ "noinline",
+ "__fix_address",
+ "__counted_by",
+
+ # include/linux/memblock.h:
+ "__init_memblock",
+ "__meminit",
+
+ # include/linux/init.h:
+ "__init",
+ "__ref",
+
+ # include/linux/linkage.h:
+ "asmlinkage",
+
+ # include/linux/btf.h
+ "__bpf_kfunc",
+]
# Ensure that autosectionlabel will produce unique names
autosectionlabel_prefix_document = True
@@ -149,10 +134,6 @@ if 'SPHINX_IMGMATH' in os.environ:
else:
sys.stderr.write("Unknown env SPHINX_IMGMATH=%s ignored.\n" % env_sphinx_imgmath)
-# Always load imgmath for Sphinx <1.8 or for epub docs
-load_imgmath = (load_imgmath or (major == 1 and minor < 8)
- or 'epub' in sys.argv)
-
if load_imgmath:
extensions.append("sphinx.ext.imgmath")
math_renderer = 'imgmath'
@@ -322,14 +303,6 @@ if "DOCS_CSS" in os.environ:
for l in css:
html_css_files.append(l)
-if major <= 1 and minor < 8:
- html_context = {
- 'css_files': [],
- }
-
- for l in html_css_files:
- html_context['css_files'].append('_static/' + l)
-
if html_theme == 'alabaster':
html_theme_options = {
'description': get_cline_version(),
@@ -409,11 +382,6 @@ latex_elements = {
''',
}
-# Fix reference escape troubles with Sphinx 1.4.x
-if major == 1:
- latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n'
-
-
# Load kerneldoc specific LaTeX settings
latex_elements['preamble'] += '''
% Load kerneldoc specific LaTeX settings
@@ -540,7 +508,7 @@ pdf_documents = [
# kernel-doc extension configuration for running Sphinx directly (e.g. by Read
# the Docs). In a normal build, these are supplied from the Makefile via command
# line arguments.
-kerneldoc_bin = '../scripts/kernel-doc'
+kerneldoc_bin = '../scripts/kernel-doc.py'
kerneldoc_srctree = '..'
# ------------------------------------------------------------------------------
diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst
index 8e3cce3d0a23..2ad08517e626 100644
--- a/Documentation/core-api/dma-api.rst
+++ b/Documentation/core-api/dma-api.rst
@@ -530,6 +530,77 @@ routines, e.g.:::
....
}
+Part Ie - IOVA-based DMA mappings
+---------------------------------
+
+These APIs allow a very efficient mapping when using an IOMMU. They are an
+optional path that requires extra code and are only recommended for drivers
+where DMA mapping performance, or the space usage for storing the DMA addresses
+matter. All the considerations from the previous section apply here as well.
+
+::
+
+ bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t size);
+
+Is used to try to allocate IOVA space for mapping operation. If it returns
+false this API can't be used for the given device and the normal streaming
+DMA mapping API should be used. The ``struct dma_iova_state`` is allocated
+by the driver and must be kept around until unmap time.
+
+::
+
+ static inline bool dma_use_iova(struct dma_iova_state *state)
+
+Can be used by the driver to check if the IOVA-based API is used after a
+call to dma_iova_try_alloc. This can be useful in the unmap path.
+
+::
+
+ int dma_iova_link(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+
+Is used to link ranges to the IOVA previously allocated. The start of all
+but the first call to dma_iova_link for a given state must be aligned
+to the DMA merge boundary returned by ``dma_get_merge_boundary())``, and
+the size of all but the last range must be aligned to the DMA merge boundary
+as well.
+
+::
+
+ int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size);
+
+Must be called to sync the IOMMU page tables for IOVA-range mapped by one or
+more calls to ``dma_iova_link()``.
+
+For drivers that use a one-shot mapping, all ranges can be unmapped and the
+IOVA freed by calling:
+
+::
+
+ void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
+ size_t mapped_len, enum dma_data_direction dir,
+ unsigned long attrs);
+
+Alternatively drivers can dynamically manage the IOVA space by unmapping
+and mapping individual regions. In that case
+
+::
+
+ void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+
+is used to unmap a range previously mapped, and
+
+::
+
+ void dma_iova_free(struct device *dev, struct dma_iova_state *state);
+
+is used to free the IOVA space. All regions must have been unmapped using
+``dma_iova_unlink()`` before calling ``dma_iova_free()``.
Part II - Non-coherent DMA allocations
--------------------------------------
diff --git a/Documentation/core-api/folio_queue.rst b/Documentation/core-api/folio_queue.rst
index 1fe7a9bc4b8d..83cfbc157e49 100644
--- a/Documentation/core-api/folio_queue.rst
+++ b/Documentation/core-api/folio_queue.rst
@@ -151,19 +151,16 @@ The marks can be set by::
void folioq_mark(struct folio_queue *folioq, unsigned int slot);
void folioq_mark2(struct folio_queue *folioq, unsigned int slot);
- void folioq_mark3(struct folio_queue *folioq, unsigned int slot);
Cleared by::
void folioq_unmark(struct folio_queue *folioq, unsigned int slot);
void folioq_unmark2(struct folio_queue *folioq, unsigned int slot);
- void folioq_unmark3(struct folio_queue *folioq, unsigned int slot);
And the marks can be queried by::
bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot);
bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot);
- bool folioq_is_marked3(const struct folio_queue *folioq, unsigned int slot);
The marks can be used for any purpose and are not interpreted by this API.
diff --git a/Documentation/core-api/genericirq.rst b/Documentation/core-api/genericirq.rst
index 25f94dfd66fa..582bde9bf5a9 100644
--- a/Documentation/core-api/genericirq.rst
+++ b/Documentation/core-api/genericirq.rst
@@ -410,8 +410,6 @@ which are used in the generic IRQ layer.
.. kernel-doc:: include/linux/interrupt.h
:internal:
-.. kernel-doc:: include/linux/irqdomain.h
-
Public Functions Provided
=========================
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index e9789bd381d8..7a4ca18ca6e2 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -115,6 +115,7 @@ more memory-management documentation in Documentation/mm/index.rst.
pin_user_pages
boot-time-mm
gfp_mask-from-fs-io
+ kho/index
Interfaces for kernel debugging
===============================
diff --git a/Documentation/core-api/irq/concepts.rst b/Documentation/core-api/irq/concepts.rst
index 4273806a606b..7c4564f3cbdf 100644
--- a/Documentation/core-api/irq/concepts.rst
+++ b/Documentation/core-api/irq/concepts.rst
@@ -2,23 +2,24 @@
What is an IRQ?
===============
-An IRQ is an interrupt request from a device.
-Currently they can come in over a pin, or over a packet.
-Several devices may be connected to the same pin thus
-sharing an IRQ.
+An IRQ is an interrupt request from a device. Currently, they can come
+in over a pin, or over a packet. Several devices may be connected to
+the same pin thus sharing an IRQ. Such as on legacy PCI bus: All devices
+typically share 4 lanes/pins. Note that each device can request an
+interrupt on each of the lanes.
An IRQ number is a kernel identifier used to talk about a hardware
-interrupt source. Typically this is an index into the global irq_desc
-array, but except for what linux/interrupt.h implements the details
-are architecture specific.
+interrupt source. Typically, this is an index into the global irq_desc
+array or sparse_irqs tree. But except for what linux/interrupt.h
+implements, the details are architecture specific.
An IRQ number is an enumeration of the possible interrupt sources on a
-machine. Typically what is enumerated is the number of input pins on
-all of the interrupt controller in the system. In the case of ISA
-what is enumerated are the 16 input pins on the two i8259 interrupt
-controllers.
+machine. Typically, what is enumerated is the number of input pins on
+all of the interrupt controllers in the system. In the case of ISA,
+what is enumerated are the 8 input pins on each of the two i8259
+interrupt controllers.
Architectures can assign additional meaning to the IRQ numbers, and
-are encouraged to in the case where there is any manual configuration
-of the hardware involved. The ISA IRQs are a classic example of
+are encouraged to in the case where there is any manual configuration
+of the hardware involved. The ISA IRQs are a classic example of
assigning this kind of additional meaning.
diff --git a/Documentation/core-api/irq/irq-domain.rst b/Documentation/core-api/irq/irq-domain.rst
index f88a6ee67a35..a01c6ead1bc0 100644
--- a/Documentation/core-api/irq/irq-domain.rst
+++ b/Documentation/core-api/irq/irq-domain.rst
@@ -1,59 +1,77 @@
===============================================
-The irq_domain interrupt number mapping library
+The irq_domain Interrupt Number Mapping Library
===============================================
The current design of the Linux kernel uses a single large number
-space where each separate IRQ source is assigned a different number.
-This is simple when there is only one interrupt controller, but in
-systems with multiple interrupt controllers the kernel must ensure
+space where each separate IRQ source is assigned a unique number.
+This is simple when there is only one interrupt controller. But in
+systems with multiple interrupt controllers, the kernel must ensure
that each one gets assigned non-overlapping allocations of Linux
IRQ numbers.
The number of interrupt controllers registered as unique irqchips
-show a rising tendency: for example subdrivers of different kinds
+shows a rising tendency. For example, subdrivers of different kinds
such as GPIO controllers avoid reimplementing identical callback
mechanisms as the IRQ core system by modelling their interrupt
-handlers as irqchips, i.e. in effect cascading interrupt controllers.
+handlers as irqchips. I.e. in effect cascading interrupt controllers.
-Here the interrupt number loose all kind of correspondence to
-hardware interrupt numbers: whereas in the past, IRQ numbers could
-be chosen so they matched the hardware IRQ line into the root
-interrupt controller (i.e. the component actually fireing the
-interrupt line to the CPU) nowadays this number is just a number.
+So in the past, IRQ numbers could be chosen so that they match the
+hardware IRQ line into the root interrupt controller (i.e. the
+component actually firing the interrupt line to the CPU). Nowadays,
+this number is just a number and the number loose all kind of
+correspondence to hardware interrupt numbers.
-For this reason we need a mechanism to separate controller-local
-interrupt numbers, called hardware irq's, from Linux IRQ numbers.
+For this reason, we need a mechanism to separate controller-local
+interrupt numbers, called hardware IRQs, from Linux IRQ numbers.
The irq_alloc_desc*() and irq_free_desc*() APIs provide allocation of
-irq numbers, but they don't provide any support for reverse mapping of
+IRQ numbers, but they don't provide any support for reverse mapping of
the controller-local IRQ (hwirq) number into the Linux IRQ number
space.
-The irq_domain library adds mapping between hwirq and IRQ numbers on
-top of the irq_alloc_desc*() API. An irq_domain to manage mapping is
-preferred over interrupt controller drivers open coding their own
+The irq_domain library adds a mapping between hwirq and IRQ numbers on
+top of the irq_alloc_desc*() API. An irq_domain to manage the mapping
+is preferred over interrupt controller drivers open coding their own
reverse mapping scheme.
-irq_domain also implements translation from an abstract irq_fwspec
-structure to hwirq numbers (Device Tree and ACPI GSI so far), and can
-be easily extended to support other IRQ topology data sources.
+irq_domain also implements a translation from an abstract struct
+irq_fwspec to hwirq numbers (Device Tree, non-DT firmware node, ACPI
+GSI, and software node so far), and can be easily extended to support
+other IRQ topology data sources. The implementation is performed
+without any extra platform support code.
-irq_domain usage
+irq_domain Usage
================
-
-An interrupt controller driver creates and registers an irq_domain by
-calling one of the irq_domain_add_*() or irq_domain_create_*() functions
-(each mapping method has a different allocator function, more on that later).
-The function will return a pointer to the irq_domain on success. The caller
-must provide the allocator function with an irq_domain_ops structure.
+struct irq_domain could be defined as an irq domain controller. That
+is, it handles the mapping between hardware and virtual interrupt
+numbers for a given interrupt domain. The domain structure is
+generally created by the PIC code for a given PIC instance (though a
+domain can cover more than one PIC if they have a flat number model).
+It is the domain callbacks that are responsible for setting the
+irq_chip on a given irq_desc after it has been mapped.
+
+The host code and data structures use a fwnode_handle pointer to
+identify the domain. In some cases, and in order to preserve source
+code compatibility, this fwnode pointer is "upgraded" to a DT
+device_node. For those firmware infrastructures that do not provide a
+unique identifier for an interrupt controller, the irq_domain code
+offers a fwnode allocator.
+
+An interrupt controller driver creates and registers a struct irq_domain
+by calling one of the irq_domain_create_*() functions (each mapping
+method has a different allocator function, more on that later). The
+function will return a pointer to the struct irq_domain on success. The
+caller must provide the allocator function with a struct irq_domain_ops
+pointer.
In most cases, the irq_domain will begin empty without any mappings
between hwirq and IRQ numbers. Mappings are added to the irq_domain
by calling irq_create_mapping() which accepts the irq_domain and a
-hwirq number as arguments. If a mapping for the hwirq doesn't already
-exist then it will allocate a new Linux irq_desc, associate it with
-the hwirq, and call the .map() callback so the driver can perform any
-required hardware setup.
+hwirq number as arguments. If a mapping for the hwirq doesn't already
+exist, irq_create_mapping() allocates a new Linux irq_desc, associates
+it with the hwirq, and calls the :c:member:`irq_domain_ops.map()`
+callback. In there, the driver can perform any required hardware
+setup.
Once a mapping has been established, it can be retrieved or used via a
variety of methods:
@@ -63,8 +81,6 @@ variety of methods:
mapping.
- irq_find_mapping() returns a Linux IRQ number for a given domain and
hwirq number, and 0 if there was no mapping
-- irq_linear_revmap() is now identical to irq_find_mapping(), and is
- deprecated
- generic_handle_domain_irq() handles an interrupt described by a
domain and a hwirq number
@@ -77,9 +93,10 @@ be allocated.
If the driver has the Linux IRQ number or the irq_data pointer, and
needs to know the associated hwirq number (such as in the irq_chip
-callbacks) then it can be directly obtained from irq_data->hwirq.
+callbacks) then it can be directly obtained from
+:c:member:`irq_data.hwirq`.
-Types of irq_domain mappings
+Types of irq_domain Mappings
============================
There are several mechanisms available for reverse mapping from hwirq
@@ -92,7 +109,6 @@ Linear
::
- irq_domain_add_linear()
irq_domain_create_linear()
The linear reverse map maintains a fixed size table indexed by the
@@ -105,19 +121,13 @@ map are fixed time lookup for IRQ numbers, and irq_descs are only
allocated for in-use IRQs. The disadvantage is that the table must be
as large as the largest possible hwirq number.
-irq_domain_add_linear() and irq_domain_create_linear() are functionally
-equivalent, except for the first argument is different - the former
-accepts an Open Firmware specific 'struct device_node', while the latter
-accepts a more general abstraction 'struct fwnode_handle'.
-
-The majority of drivers should use the linear map.
+The majority of drivers should use the Linear map.
Tree
----
::
- irq_domain_add_tree()
irq_domain_create_tree()
The irq_domain maintains a radix tree map from hwirq numbers to Linux
@@ -129,11 +139,6 @@ since it doesn't need to allocate a table as large as the largest
hwirq number. The disadvantage is that hwirq to IRQ number lookup is
dependent on how many entries are in the table.
-irq_domain_add_tree() and irq_domain_create_tree() are functionally
-equivalent, except for the first argument is different - the former
-accepts an Open Firmware specific 'struct device_node', while the latter
-accepts a more general abstraction 'struct fwnode_handle'.
-
Very few drivers should need this mapping.
No Map
@@ -141,7 +146,7 @@ No Map
::
- irq_domain_add_nomap()
+ irq_domain_create_nomap()
The No Map mapping is to be used when the hwirq number is
programmable in the hardware. In this case it is best to program the
@@ -159,8 +164,6 @@ Legacy
::
- irq_domain_add_simple()
- irq_domain_add_legacy()
irq_domain_create_simple()
irq_domain_create_legacy()
@@ -189,13 +192,13 @@ supported. For example, ISA controllers would use the legacy map for
mapping Linux IRQs 0-15 so that existing ISA drivers get the correct IRQ
numbers.
-Most users of legacy mappings should use irq_domain_add_simple() or
-irq_domain_create_simple() which will use a legacy domain only if an IRQ range
-is supplied by the system and will otherwise use a linear domain mapping.
-The semantics of this call are such that if an IRQ range is specified then
-descriptors will be allocated on-the-fly for it, and if no range is
-specified it will fall through to irq_domain_add_linear() or
-irq_domain_create_linear() which means *no* irq descriptors will be allocated.
+Most users of legacy mappings should use irq_domain_create_simple()
+which will use a legacy domain only if an IRQ range is supplied by the
+system and will otherwise use a linear domain mapping. The semantics of
+this call are such that if an IRQ range is specified then descriptors
+will be allocated on-the-fly for it, and if no range is specified it
+will fall through to irq_domain_create_linear() which means *no* irq
+descriptors will be allocated.
A typical use case for simple domains is where an irqchip provider
is supporting both dynamic and static IRQ assignments.
@@ -206,13 +209,7 @@ that the driver using the simple domain call irq_create_mapping()
before any irq_find_mapping() since the latter will actually work
for the static IRQ assignment case.
-irq_domain_add_simple() and irq_domain_create_simple() as well as
-irq_domain_add_legacy() and irq_domain_create_legacy() are functionally
-equivalent, except for the first argument is different - the former
-accepts an Open Firmware specific 'struct device_node', while the latter
-accepts a more general abstraction 'struct fwnode_handle'.
-
-Hierarchy IRQ domain
+Hierarchy IRQ Domain
--------------------
On some architectures, there may be multiple interrupt controllers
@@ -253,20 +250,40 @@ There are four major interfaces to use hierarchy irq_domain:
4) irq_domain_deactivate_irq(): deactivate interrupt controller hardware
to stop delivering the interrupt.
-Following changes are needed to support hierarchy irq_domain:
+The following is needed to support hierarchy irq_domain:
-1) a new field 'parent' is added to struct irq_domain; it's used to
+1) The :c:member:`parent` field in struct irq_domain is used to
maintain irq_domain hierarchy information.
-2) a new field 'parent_data' is added to struct irq_data; it's used to
- build hierarchy irq_data to match hierarchy irq_domains. The irq_data
- is used to store irq_domain pointer and hardware irq number.
-3) new callbacks are added to struct irq_domain_ops to support hierarchy
- irq_domain operations.
-
-With support of hierarchy irq_domain and hierarchy irq_data ready, an
-irq_domain structure is built for each interrupt controller, and an
+2) The :c:member:`parent_data` field in struct irq_data is used to
+ build hierarchy irq_data to match hierarchy irq_domains. The
+ irq_data is used to store irq_domain pointer and hardware irq
+ number.
+3) The :c:member:`alloc()`, :c:member:`free()`, and other callbacks in
+ struct irq_domain_ops to support hierarchy irq_domain operations.
+
+With the support of hierarchy irq_domain and hierarchy irq_data ready,
+an irq_domain structure is built for each interrupt controller, and an
irq_data structure is allocated for each irq_domain associated with an
-IRQ. Now we could go one step further to support stacked(hierarchy)
+IRQ.
+
+For an interrupt controller driver to support hierarchy irq_domain, it
+needs to:
+
+1) Implement irq_domain_ops.alloc() and irq_domain_ops.free()
+2) Optionally, implement irq_domain_ops.activate() and
+ irq_domain_ops.deactivate().
+3) Optionally, implement an irq_chip to manage the interrupt controller
+ hardware.
+4) There is no need to implement irq_domain_ops.map() and
+ irq_domain_ops.unmap(). They are unused with hierarchy irq_domain.
+
+Note the hierarchy irq_domain is in no way x86-specific, and is
+heavily used to support other architectures, such as ARM, ARM64 etc.
+
+Stacked irq_chip
+~~~~~~~~~~~~~~~~
+
+Now, we could go one step further to support stacked (hierarchy)
irq_chip. That is, an irq_chip is associated with each irq_data along
the hierarchy. A child irq_chip may implement a required action by
itself or by cooperating with its parent irq_chip.
@@ -276,22 +293,28 @@ with the hardware managed by itself and may ask for services from its
parent irq_chip when needed. So we could achieve a much cleaner
software architecture.
-For an interrupt controller driver to support hierarchy irq_domain, it
-needs to:
-
-1) Implement irq_domain_ops.alloc and irq_domain_ops.free
-2) Optionally implement irq_domain_ops.activate and
- irq_domain_ops.deactivate.
-3) Optionally implement an irq_chip to manage the interrupt controller
- hardware.
-4) No need to implement irq_domain_ops.map and irq_domain_ops.unmap,
- they are unused with hierarchy irq_domain.
-
-Hierarchy irq_domain is in no way x86 specific, and is heavily used to
-support other architectures, such as ARM, ARM64 etc.
-
Debugging
=========
Most of the internals of the IRQ subsystem are exposed in debugfs by
turning CONFIG_GENERIC_IRQ_DEBUGFS on.
+
+Structures and Public Functions Provided
+========================================
+
+This chapter contains the autogenerated documentation of the structures
+and exported kernel API functions which are used for IRQ domains.
+
+.. kernel-doc:: include/linux/irqdomain.h
+
+.. kernel-doc:: kernel/irq/irqdomain.c
+ :export:
+
+Internal Functions Provided
+===========================
+
+This chapter contains the autogenerated documentation of the internal
+functions.
+
+.. kernel-doc:: kernel/irq/irqdomain.c
+ :internal:
diff --git a/Documentation/core-api/kho/bindings/kho.yaml b/Documentation/core-api/kho/bindings/kho.yaml
new file mode 100644
index 000000000000..11e8ab7b219d
--- /dev/null
+++ b/Documentation/core-api/kho/bindings/kho.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+title: Kexec HandOver (KHO) root tree
+
+maintainers:
+ - Mike Rapoport <rppt@kernel.org>
+ - Changyuan Lyu <changyuanl@google.com>
+
+description: |
+ System memory preserved by KHO across kexec.
+
+properties:
+ compatible:
+ enum:
+ - kho-v1
+
+ preserved-memory-map:
+ description: |
+ physical address (u64) of an in-memory structure describing all preserved
+ folios and memory ranges.
+
+patternProperties:
+ "$[0-9a-f_]+^":
+ $ref: sub-fdt.yaml#
+ description: physical address of a KHO user's own FDT.
+
+required:
+ - compatible
+ - preserved-memory-map
+
+additionalProperties: false
+
+examples:
+ - |
+ kho {
+ compatible = "kho-v1";
+ preserved-memory-map = <0xf0be16 0x1000000>;
+
+ memblock {
+ fdt = <0x80cc16 0x1000000>;
+ };
+ };
diff --git a/Documentation/core-api/kho/bindings/memblock/memblock.yaml b/Documentation/core-api/kho/bindings/memblock/memblock.yaml
new file mode 100644
index 000000000000..d388c28eb91d
--- /dev/null
+++ b/Documentation/core-api/kho/bindings/memblock/memblock.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+title: Memblock reserved memory
+
+maintainers:
+ - Mike Rapoport <rppt@kernel.org>
+
+description: |
+ Memblock can serialize its current memory reservations created with
+ reserve_mem command line option across kexec through KHO.
+ The post-KHO kernel can then consume these reservations and they are
+ guaranteed to have the same physical address.
+
+properties:
+ compatible:
+ enum:
+ - reserve-mem-v1
+
+patternProperties:
+ "$[0-9a-f_]+^":
+ $ref: reserve-mem.yaml#
+ description: reserved memory regions
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ memblock {
+ compatible = "memblock-v1";
+ n1 {
+ compatible = "reserve-mem-v1";
+ start = <0xc06b 0x4000000>;
+ size = <0x04 0x00>;
+ };
+ };
diff --git a/Documentation/core-api/kho/bindings/memblock/reserve-mem.yaml b/Documentation/core-api/kho/bindings/memblock/reserve-mem.yaml
new file mode 100644
index 000000000000..10282d3d1bcd
--- /dev/null
+++ b/Documentation/core-api/kho/bindings/memblock/reserve-mem.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+title: Memblock reserved memory regions
+
+maintainers:
+ - Mike Rapoport <rppt@kernel.org>
+
+description: |
+ Memblock can serialize its current memory reservations created with
+ reserve_mem command line option across kexec through KHO.
+ This object describes each such region.
+
+properties:
+ compatible:
+ enum:
+ - reserve-mem-v1
+
+ start:
+ description: |
+ physical address (u64) of the reserved memory region.
+
+ size:
+ description: |
+ size (u64) of the reserved memory region.
+
+required:
+ - compatible
+ - start
+ - size
+
+additionalProperties: false
+
+examples:
+ - |
+ n1 {
+ compatible = "reserve-mem-v1";
+ start = <0xc06b 0x4000000>;
+ size = <0x04 0x00>;
+ };
diff --git a/Documentation/core-api/kho/bindings/sub-fdt.yaml b/Documentation/core-api/kho/bindings/sub-fdt.yaml
new file mode 100644
index 000000000000..b9a3d2d24850
--- /dev/null
+++ b/Documentation/core-api/kho/bindings/sub-fdt.yaml
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+title: KHO users' FDT address
+
+maintainers:
+ - Mike Rapoport <rppt@kernel.org>
+ - Changyuan Lyu <changyuanl@google.com>
+
+description: |
+ Physical address of an FDT blob registered by a KHO user.
+
+properties:
+ fdt:
+ description: |
+ physical address (u64) of an FDT blob.
+
+required:
+ - fdt
+
+additionalProperties: false
+
+examples:
+ - |
+ memblock {
+ fdt = <0x80cc16 0x1000000>;
+ };
diff --git a/Documentation/core-api/kho/concepts.rst b/Documentation/core-api/kho/concepts.rst
new file mode 100644
index 000000000000..36d5c05cfb30
--- /dev/null
+++ b/Documentation/core-api/kho/concepts.rst
@@ -0,0 +1,74 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+.. _kho-concepts:
+
+=======================
+Kexec Handover Concepts
+=======================
+
+Kexec HandOver (KHO) is a mechanism that allows Linux to preserve memory
+regions, which could contain serialized system states, across kexec.
+
+It introduces multiple concepts:
+
+KHO FDT
+=======
+
+Every KHO kexec carries a KHO specific flattened device tree (FDT) blob
+that describes preserved memory regions. These regions contain either
+serialized subsystem states, or in-memory data that shall not be touched
+across kexec. After KHO, subsystems can retrieve and restore preserved
+memory regions from KHO FDT.
+
+KHO only uses the FDT container format and libfdt library, but does not
+adhere to the same property semantics that normal device trees do: Properties
+are passed in native endianness and standardized properties like ``regs`` and
+``ranges`` do not exist, hence there are no ``#...-cells`` properties.
+
+KHO is still under development. The FDT schema is unstable and would change
+in the future.
+
+Scratch Regions
+===============
+
+To boot into kexec, we need to have a physically contiguous memory range that
+contains no handed over memory. Kexec then places the target kernel and initrd
+into that region. The new kernel exclusively uses this region for memory
+allocations before during boot up to the initialization of the page allocator.
+
+We guarantee that we always have such regions through the scratch regions: On
+first boot KHO allocates several physically contiguous memory regions. Since
+after kexec these regions will be used by early memory allocations, there is a
+scratch region per NUMA node plus a scratch region to satisfy allocations
+requests that do not require particular NUMA node assignment.
+By default, size of the scratch region is calculated based on amount of memory
+allocated during boot. The ``kho_scratch`` kernel command line option may be
+used to explicitly define size of the scratch regions.
+The scratch regions are declared as CMA when page allocator is initialized so
+that their memory can be used during system lifetime. CMA gives us the
+guarantee that no handover pages land in that region, because handover pages
+must be at a static physical memory location and CMA enforces that only
+movable pages can be located inside.
+
+After KHO kexec, we ignore the ``kho_scratch`` kernel command line option and
+instead reuse the exact same region that was originally allocated. This allows
+us to recursively execute any amount of KHO kexecs. Because we used this region
+for boot memory allocations and as target memory for kexec blobs, some parts
+of that memory region may be reserved. These reservations are irrelevant for
+the next KHO, because kexec can overwrite even the original kernel.
+
+.. _kho-finalization-phase:
+
+KHO finalization phase
+======================
+
+To enable user space based kexec file loader, the kernel needs to be able to
+provide the FDT that describes the current kernel's state before
+performing the actual kexec. The process of generating that FDT is
+called serialization. When the FDT is generated, some properties
+of the system may become immutable because they are already written down
+in the FDT. That state is called the KHO finalization phase.
+
+Public API
+==========
+.. kernel-doc:: kernel/kexec_handover.c
+ :export:
diff --git a/Documentation/core-api/kho/fdt.rst b/Documentation/core-api/kho/fdt.rst
new file mode 100644
index 000000000000..62505285d60d
--- /dev/null
+++ b/Documentation/core-api/kho/fdt.rst
@@ -0,0 +1,80 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+=======
+KHO FDT
+=======
+
+KHO uses the flattened device tree (FDT) container format and libfdt
+library to create and parse the data that is passed between the
+kernels. The properties in KHO FDT are stored in native format.
+It includes the physical address of an in-memory structure describing
+all preserved memory regions, as well as physical addresses of KHO users'
+own FDTs. Interpreting those sub FDTs is the responsibility of KHO users.
+
+KHO nodes and properties
+========================
+
+Property ``preserved-memory-map``
+---------------------------------
+
+KHO saves a special property named ``preserved-memory-map`` under the root node.
+This node contains the physical address of an in-memory structure for KHO to
+preserve memory regions across kexec.
+
+Property ``compatible``
+-----------------------
+
+The ``compatible`` property determines compatibility between the kernel
+that created the KHO FDT and the kernel that attempts to load it.
+If the kernel that loads the KHO FDT is not compatible with it, the entire
+KHO process will be bypassed.
+
+Property ``fdt``
+----------------
+
+Generally, a KHO user serialize its state into its own FDT and instructs
+KHO to preserve the underlying memory, such that after kexec, the new kernel
+can recover its state from the preserved FDT.
+
+A KHO user thus can create a node in KHO root tree and save the physical address
+of its own FDT in that node's property ``fdt`` .
+
+Examples
+========
+
+The following example demonstrates KHO FDT that preserves two memory
+regions created with ``reserve_mem`` kernel command line parameter::
+
+ /dts-v1/;
+
+ / {
+ compatible = "kho-v1";
+
+ preserved-memory-map = <0x40be16 0x1000000>;
+
+ memblock {
+ fdt = <0x1517 0x1000000>;
+ };
+ };
+
+where the ``memblock`` node contains an FDT that is requested by the
+subsystem memblock for preservation. The FDT contains the following
+serialized data::
+
+ /dts-v1/;
+
+ / {
+ compatible = "memblock-v1";
+
+ n1 {
+ compatible = "reserve-mem-v1";
+ start = <0xc06b 0x4000000>;
+ size = <0x04 0x00>;
+ };
+
+ n2 {
+ compatible = "reserve-mem-v1";
+ start = <0xc067 0x4000000>;
+ size = <0x04 0x00>;
+ };
+ };
diff --git a/Documentation/core-api/kho/index.rst b/Documentation/core-api/kho/index.rst
new file mode 100644
index 000000000000..0c63b0c5c143
--- /dev/null
+++ b/Documentation/core-api/kho/index.rst
@@ -0,0 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+========================
+Kexec Handover Subsystem
+========================
+
+.. toctree::
+ :maxdepth: 1
+
+ concepts
+ fdt
+
+.. only:: subproject and html
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index 4bdc394e86af..4b7f3646ec6c 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -571,9 +571,8 @@ struct clk
::
%pC pll1
- %pCn pll1
-For printing struct clk structures. %pC and %pCn print the name of the clock
+For printing struct clk structures. %pC prints the name of the clock
(Common Clock Framework) or a unique 32-bit ID (legacy clock framework).
Passed by reference.
@@ -648,6 +647,38 @@ Examples::
%p4cc Y10 little-endian (0x20303159)
%p4cc NV12 big-endian (0xb231564e)
+Generic FourCC code
+-------------------
+
+::
+ %p4c[h[R]lb] gP00 (0x67503030)
+
+Print a generic FourCC code, as both ASCII characters and its numerical
+value as hexadecimal.
+
+The generic FourCC code is always printed in the big-endian format,
+the most significant byte first. This is the opposite of V4L/DRM FourCCs.
+
+The additional ``h``, ``hR``, ``l``, and ``b`` specifiers define what
+endianness is used to load the stored bytes. The data might be interpreted
+using the host, reversed host byte order, little-endian, or big-endian.
+
+Passed by reference.
+
+Examples for a little-endian machine, given &(u32)0x67503030::
+
+ %p4ch gP00 (0x67503030)
+ %p4chR 00Pg (0x30305067)
+ %p4cl gP00 (0x67503030)
+ %p4cb 00Pg (0x30305067)
+
+Examples for a big-endian machine, given &(u32)0x67503030::
+
+ %p4ch gP00 (0x67503030)
+ %p4chR 00Pg (0x30305067)
+ %p4cl 00Pg (0x30305067)
+ %p4cb gP00 (0x67503030)
+
Rust
----
diff --git a/Documentation/dev-tools/kunit/run_wrapper.rst b/Documentation/dev-tools/kunit/run_wrapper.rst
index 19ddf5e07013..6697c71ee8ca 100644
--- a/Documentation/dev-tools/kunit/run_wrapper.rst
+++ b/Documentation/dev-tools/kunit/run_wrapper.rst
@@ -182,6 +182,8 @@ via UML. To run tests on qemu, by default it requires two flags:
is ignored), the tests will run via UML. Non-UML architectures,
for example: i386, x86_64, arm and so on; run on qemu.
+ ``--arch help`` lists all valid ``--arch`` values.
+
- ``--cross_compile``: Specifies the Kbuild toolchain. It passes the
same argument as passed to the ``CROSS_COMPILE`` variable used by
Kbuild. As a reminder, this will be the prefix for the toolchain
diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst
index 22955d56b379..038f480074fd 100644
--- a/Documentation/dev-tools/kunit/usage.rst
+++ b/Documentation/dev-tools/kunit/usage.rst
@@ -670,28 +670,50 @@ with ``kunit_remove_action``.
Testing Static Functions
------------------------
-If we do not want to expose functions or variables for testing, one option is to
-conditionally export the used symbol. For example:
+If you want to test static functions without exposing those functions outside of
+testing, one option is conditionally export the symbol. When KUnit is enabled,
+the symbol is exposed but remains static otherwise. To use this method, follow
+the template below.
.. code-block:: c
- /* In my_file.c */
+ /* In the file containing functions to test "my_file.c" */
- VISIBLE_IF_KUNIT int do_interesting_thing();
+ #include <kunit/visibility.h>
+ #include <my_file.h>
+ ...
+ VISIBLE_IF_KUNIT int do_interesting_thing()
+ {
+ ...
+ }
EXPORT_SYMBOL_IF_KUNIT(do_interesting_thing);
- /* In my_file.h */
+ /* In the header file "my_file.h" */
#if IS_ENABLED(CONFIG_KUNIT)
int do_interesting_thing(void);
#endif
-Alternatively, you could conditionally ``#include`` the test file at the end of
-your .c file. For example:
+ /* In the KUnit test file "my_file_test.c" */
+
+ #include <kunit/visibility.h>
+ #include <my_file.h>
+ ...
+ MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+ ...
+ // Use do_interesting_thing() in tests
+
+For a full example, see this `patch <https://lore.kernel.org/all/20221207014024.340230-3-rmoar@google.com/>`_
+where a test is modified to conditionally expose static functions for testing
+using the macros above.
+
+As an **alternative** to the method above, you could conditionally ``#include``
+the test file at the end of your .c file. This is not recommended but works
+if needed. For example:
.. code-block:: c
- /* In my_file.c */
+ /* In "my_file.c" */
static int do_interesting_thing();
diff --git a/Documentation/devicetree/bindings/arm/altera.yaml b/Documentation/devicetree/bindings/arm/altera.yaml
index 8c7575455422..30c44a0e6407 100644
--- a/Documentation/devicetree/bindings/arm/altera.yaml
+++ b/Documentation/devicetree/bindings/arm/altera.yaml
@@ -47,6 +47,7 @@ properties:
- novtech,chameleon96
- samtec,vining
- terasic,de0-atlas
+ - terasic,de10-nano
- terasic,socfpga-cyclone5-sockit
- const: altr,socfpga-cyclone5
- const: altr,socfpga
diff --git a/Documentation/devicetree/bindings/arm/altera/socfpga-clk-manager.yaml b/Documentation/devicetree/bindings/arm/altera/socfpga-clk-manager.yaml
index 572381306681..a758f4bb2bb3 100644
--- a/Documentation/devicetree/bindings/arm/altera/socfpga-clk-manager.yaml
+++ b/Documentation/devicetree/bindings/arm/altera/socfpga-clk-manager.yaml
@@ -9,20 +9,120 @@ title: Altera SOCFPGA Clock Manager
maintainers:
- Dinh Nguyen <dinguyen@kernel.org>
-description: test
+description:
+ This binding describes the Altera SOCFGPA Clock Manager and its associated
+ tree of clocks, pll's, and clock gates for the Cyclone5, Arria5 and Arria10
+ chip families.
properties:
compatible:
items:
- const: altr,clk-mgr
+
reg:
maxItems: 1
+ clocks:
+ type: object
+ additionalProperties: false
+
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ patternProperties:
+ "^osc[0-9]$":
+ type: object
+
+ "^[a-z0-9,_]+(clk|pll|clk_gate|clk_divided)(@[a-f0-9]+)?$":
+ type: object
+ $ref: '#/$defs/clock-props'
+ unevaluatedProperties: false
+
+ properties:
+ compatible:
+ enum:
+ - altr,socfpga-pll-clock
+ - altr,socfpga-perip-clk
+ - altr,socfpga-gate-clk
+ - altr,socfpga-a10-pll-clock
+ - altr,socfpga-a10-perip-clk
+ - altr,socfpga-a10-gate-clk
+ - fixed-clock
+
+ clocks:
+ description: one or more phandles to input clock
+ minItems: 1
+ maxItems: 5
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ patternProperties:
+ "^[a-z0-9,_]+(clk|pll)(@[a-f0-9]+)?$":
+ type: object
+ $ref: '#/$defs/clock-props'
+ unevaluatedProperties: false
+
+ properties:
+ compatible:
+ enum:
+ - altr,socfpga-perip-clk
+ - altr,socfpga-gate-clk
+ - altr,socfpga-a10-perip-clk
+ - altr,socfpga-a10-gate-clk
+
+ clocks:
+ description: one or more phandles to input clock
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - compatible
+ - clocks
+ - "#clock-cells"
+
+ required:
+ - compatible
+ - "#clock-cells"
+
required:
- compatible
+ - reg
additionalProperties: false
+$defs:
+ clock-props:
+ properties:
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 0
+
+ clk-gate:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - description: gating register offset
+ - description: bit index
+
+ div-reg:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - description: divider register offset
+ - description: bit shift
+ - description: bit width
+
+ fixed-divider:
+ $ref: /schemas/types.yaml#/definitions/uint32
+
examples:
- |
clkmgr@ffd04000 {
diff --git a/Documentation/devicetree/bindings/arm/amlogic.yaml b/Documentation/devicetree/bindings/arm/amlogic.yaml
index 0647851ae1f5..05edf22e6c30 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.yaml
+++ b/Documentation/devicetree/bindings/arm/amlogic.yaml
@@ -27,6 +27,7 @@ properties:
items:
- enum:
- minix,neo-x8
+ - tcu,fernsehfee3
- const: amlogic,meson8
- description: Boards with the Amlogic Meson8m2 SoC
@@ -73,6 +74,13 @@ properties:
- const: amlogic,s805x
- const: amlogic,meson-gxl
+ - description: Boards with the Amlogic Meson GXL S805Y SoC
+ items:
+ - enum:
+ - xiaomi,aquaman
+ - const: amlogic,s805y
+ - const: amlogic,meson-gxl
+
- description: Boards with the Amlogic Meson GXL S905W SoC
items:
- enum:
@@ -237,6 +245,24 @@ properties:
- amlogic,aq222
- const: amlogic,s4
+ - description: Boards with the Amlogic S6 S905X5 SoC
+ items:
+ - enum:
+ - amlogic,bl209
+ - const: amlogic,s6
+
+ - description: Boards with the Amlogic S7 S805X3 SoC
+ items:
+ - enum:
+ - amlogic,bp201
+ - const: amlogic,s7
+
+ - description: Boards with the Amlogic S7D S905X5M SoC
+ items:
+ - enum:
+ - amlogic,bm202
+ - const: amlogic,s7d
+
- description: Boards with the Amlogic T7 A311D2 SoC
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/atmel,sama5d2-secumod.yaml b/Documentation/devicetree/bindings/arm/atmel,sama5d2-secumod.yaml
new file mode 100644
index 000000000000..ad4a98a4ee67
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/atmel,sama5d2-secumod.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/atmel,sama5d2-secumod.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip AT91 Security Module (SECUMOD)
+
+maintainers:
+ - Nicolas Ferre <nicolas.ferre@microchip.com>
+
+description:
+ The Security Module also offers the PIOBU pins which can be used as GPIO pins.
+ Note that they maintain their voltage during Backup/Self-refresh.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: atmel,sama5d2-secumod
+ - const: syscon
+ - items:
+ - enum:
+ - microchip,sama7d65-secumod
+ - microchip,sama7g5-secumod
+ - const: atmel,sama5d2-secumod
+ - const: syscon
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ security-module@fc040000 {
+ compatible = "atmel,sama5d2-secumod", "syscon";
+ reg = <0xfc040000 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
index d3821f651e72..5ce54f9befe6 100644
--- a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
@@ -46,28 +46,3 @@ Examples:
reg = <0xffffe800 0x200>;
};
-Security Module (SECUMOD)
-
-The Security Module macrocell provides all necessary secure functions to avoid
-voltage, temperature, frequency and mechanical attacks on the chip. It also
-embeds secure memories that can be scrambled.
-
-The Security Module also offers the PIOBU pins which can be used as GPIO pins.
-Note that they maintain their voltage during Backup/Self-refresh.
-
-required properties:
-- compatible: Should be "atmel,<chip>-secumod", "syscon".
- <chip> can be "sama5d2".
-- reg: Should contain registers location and length
-- gpio-controller: Marks the port as GPIO controller.
-- #gpio-cells: There are 2. The pin number is the
- first, the second represents additional
- parameters such as GPIO_ACTIVE_HIGH/LOW.
-
-
- secumod@fc040000 {
- compatible = "atmel,sama5d2-secumod", "syscon";
- reg = <0xfc040000 0x100>;
- gpio-controller;
- #gpio-cells = <2>;
- };
diff --git a/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml b/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml
index e4ff71f006b8..2729a542c4f3 100644
--- a/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml
+++ b/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml
@@ -52,6 +52,7 @@ properties:
- description: BCM2837 based Boards
items:
- enum:
+ - raspberrypi,2-model-b-rev2
- raspberrypi,3-model-a-plus
- raspberrypi,3-model-b
- raspberrypi,3-model-b-plus
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index 2e666b2a4dcd..2e9ab9583005 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -10,9 +10,9 @@ maintainers:
- Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
description: |+
- The device tree allows to describe the layout of CPUs in a system through
- the "cpus" node, which in turn contains a number of subnodes (ie "cpu")
- defining properties for every cpu.
+ The device tree allows to describe the layout of CPUs in a system through the
+ "cpus" node, which in turn contains a number of subnodes (ie "cpu") defining
+ properties for every cpu.
Bindings for CPU nodes follow the Devicetree Specification, available from:
@@ -41,45 +41,40 @@ description: |+
properties:
reg:
maxItems: 1
- description: |
- Usage and definition depend on ARM architecture version and
- configuration:
+ description: >
+ Usage and definition depend on ARM architecture version and configuration:
- On uniprocessor ARM architectures previous to v7
- this property is required and must be set to 0.
+ On uniprocessor ARM architectures previous to v7 this property is required
+ and must be set to 0.
- On ARM 11 MPcore based systems this property is
- required and matches the CPUID[11:0] register bits.
+ On ARM 11 MPcore based systems this property is required and matches the
+ CPUID[11:0] register bits.
- Bits [11:0] in the reg cell must be set to
- bits [11:0] in CPU ID register.
+ Bits [11:0] in the reg cell must be set to bits [11:0] in CPU ID register.
All other bits in the reg cell must be set to 0.
- On 32-bit ARM v7 or later systems this property is
- required and matches the CPU MPIDR[23:0] register
- bits.
+ On 32-bit ARM v7 or later systems this property is required and matches
+ the CPU MPIDR[23:0] register bits.
- Bits [23:0] in the reg cell must be set to
- bits [23:0] in MPIDR.
+ Bits [23:0] in the reg cell must be set to bits [23:0] in MPIDR.
All other bits in the reg cell must be set to 0.
- On ARM v8 64-bit systems this property is required
- and matches the MPIDR_EL1 register affinity bits.
+ On ARM v8 64-bit systems this property is required and matches the
+ MPIDR_EL1 register affinity bits.
* If cpus node's #address-cells property is set to 2
- The first reg cell bits [7:0] must be set to
- bits [39:32] of MPIDR_EL1.
+ The first reg cell bits [7:0] must be set to bits [39:32] of
+ MPIDR_EL1.
- The second reg cell bits [23:0] must be set to
- bits [23:0] of MPIDR_EL1.
+ The second reg cell bits [23:0] must be set to bits [23:0] of
+ MPIDR_EL1.
* If cpus node's #address-cells property is set to 1
- The reg cell bits [23:0] must be set to bits [23:0]
- of MPIDR_EL1.
+ The reg cell bits [23:0] must be set to bits [23:0] of MPIDR_EL1.
All other bits in the reg cells must be set to 0.
@@ -273,103 +268,122 @@ properties:
description:
The DT specification defines this as 64-bit always, but some 32-bit Arm
systems have used a 32-bit value which must be supported.
- Required for systems that have an "enable-method"
- property value of "spin-table".
cpu-idle-states:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
maxItems: 1
- description: |
- List of phandles to idle state nodes supported
- by this cpu (see ./idle-states.yaml).
+ description:
+ List of phandles to idle state nodes supported by this cpu (see
+ ./idle-states.yaml).
capacity-dmips-mhz:
description:
u32 value representing CPU capacity (see ../cpu/cpu-capacity.txt) in
- DMIPS/MHz, relative to highest capacity-dmips-mhz
- in the system.
+ DMIPS/MHz, relative to highest capacity-dmips-mhz in the system.
cci-control-port: true
dynamic-power-coefficient:
$ref: /schemas/types.yaml#/definitions/uint32
- description:
- A u32 value that represents the running time dynamic
- power coefficient in units of uW/MHz/V^2. The
- coefficient can either be calculated from power
+ description: >
+ A u32 value that represents the running time dynamic power coefficient in
+ units of uW/MHz/V^2. The coefficient can either be calculated from power
measurements or derived by analysis.
- The dynamic power consumption of the CPU is
- proportional to the square of the Voltage (V) and
- the clock frequency (f). The coefficient is used to
+ The dynamic power consumption of the CPU is proportional to the square of
+ the Voltage (V) and the clock frequency (f). The coefficient is used to
calculate the dynamic power as below -
Pdyn = dynamic-power-coefficient * V^2 * f
where voltage is in V, frequency is in MHz.
+ interconnects:
+ minItems: 1
+ maxItems: 3
+
+ nvmem-cells:
+ maxItems: 1
+
+ nvmem-cell-names:
+ const: speed_grade
+
performance-domains:
maxItems: 1
- description:
- List of phandles and performance domain specifiers, as defined by
- bindings of the performance domain provider. See also
- dvfs/performance-domain.yaml.
power-domains:
- description:
- List of phandles and PM domain specifiers, as defined by bindings of the
- PM domain provider (see also ../power_domain.txt).
+ minItems: 1
+ maxItems: 2
power-domain-names:
description:
- A list of power domain name strings sorted in the same order as the
- power-domains property.
-
For PSCI based platforms, the name corresponding to the index of the PSCI
PM domain provider, must be "psci". For SCMI based platforms, the name
corresponding to the index of an SCMI performance domain provider, must be
"perf".
+ minItems: 1
+ maxItems: 2
+ items:
+ enum: [ psci, perf, cpr ]
- qcom,saw:
- $ref: /schemas/types.yaml#/definitions/phandle
- description: |
- Specifies the SAW* node associated with this CPU.
+ resets:
+ maxItems: 1
- Required for systems that have an "enable-method" property
- value of "qcom,kpss-acc-v1" or "qcom,kpss-acc-v2"
+ arm-supply:
+ deprecated: true
+ description: Use 'cpu-supply' instead
- * arm/msm/qcom,saw2.txt
+ cpu0-supply:
+ deprecated: true
+ description: Use 'cpu-supply' instead
- qcom,acc:
+ mem-supply: true
+
+ proc-supply:
+ deprecated: true
+ description: Use 'cpu-supply' instead
+
+ sram-supply:
+ deprecated: true
+ description: Use 'mem-supply' instead
+
+ mediatek,cci:
$ref: /schemas/types.yaml#/definitions/phandle
- description: |
- Specifies the ACC* node associated with this CPU.
+ description: Link to Mediatek Cache Coherent Interconnect
- Required for systems that have an "enable-method" property
- value of "qcom,kpss-acc-v1", "qcom,kpss-acc-v2", "qcom,msm8226-smp" or
- "qcom,msm8916-smp".
+ qcom,saw:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Specifies the SAW node associated with this CPU.
- * arm/msm/qcom,kpss-acc.txt
+ qcom,acc:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Specifies the ACC node associated with this CPU.
+
+ qcom,freq-domain:
+ description: Specifies the QCom CPUFREQ HW associated with the CPU.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
rockchip,pmu:
$ref: /schemas/types.yaml#/definitions/phandle
- description: |
+ description: >
Specifies the syscon node controlling the cpu core power domains.
- Optional for systems that have an "enable-method"
- property value of "rockchip,rk3066-smp"
- While optional, it is the preferred way to get access to
- the cpu-core power-domains.
+ Optional for systems that have an "enable-method" property value of
+ "rockchip,rk3066-smp". While optional, it is the preferred way to get
+ access to the cpu-core power-domains.
secondary-boot-reg:
$ref: /schemas/types.yaml#/definitions/uint32
- description: |
+ description: >
Required for systems that have an "enable-method" property value of
"brcm,bcm11351-cpu-method", "brcm,bcm23550" or "brcm,bcm-nsp-smp".
- This includes the following SoCs: |
- BCM11130, BCM11140, BCM11351, BCM28145, BCM28155, BCM21664, BCM23550
+ This includes the following SoCs:
+ BCM11130, BCM11140, BCM11351, BCM28145, BCM28155, BCM21664, BCM23550,
BCM58522, BCM58525, BCM58535, BCM58622, BCM58623, BCM58625, BCM88312
The secondary-boot-reg property is a u32 value that specifies the
@@ -378,22 +392,66 @@ properties:
formed by encoding the target CPU id into the low bits of the
physical start address it should jump to.
-if:
- # If the enable-method property contains one of those values
- properties:
- enable-method:
- contains:
- enum:
- - brcm,bcm11351-cpu-method
- - brcm,bcm23550
- - brcm,bcm-nsp-smp
- # and if enable-method is present
- required:
- - enable-method
-
-then:
- required:
- - secondary-boot-reg
+ thermal-idle:
+ type: object
+
+allOf:
+ - $ref: /schemas/cpu.yaml#
+ - $ref: /schemas/opp/opp-v1.yaml#
+ - if:
+ # If the enable-method property contains one of those values
+ properties:
+ enable-method:
+ contains:
+ enum:
+ - brcm,bcm11351-cpu-method
+ - brcm,bcm23550
+ - brcm,bcm-nsp-smp
+ # and if enable-method is present
+ required:
+ - enable-method
+ then:
+ required:
+ - secondary-boot-reg
+ - if:
+ properties:
+ enable-method:
+ enum:
+ - spin-table
+ - renesas,r9a06g032-smp
+ required:
+ - enable-method
+ then:
+ required:
+ - cpu-release-addr
+ - if:
+ properties:
+ enable-method:
+ enum:
+ - qcom,kpss-acc-v1
+ - qcom,kpss-acc-v2
+ - qcom,msm8226-smp
+ - qcom,msm8916-smp
+ required:
+ - enable-method
+ then:
+ required:
+ - qcom,acc
+ - qcom,saw
+ else:
+ if:
+ # 2 Qualcomm platforms bootloaders need qcom,acc and qcom,saw yet use
+ # "spin-table" or "psci" enable-methods. Disallowing the properties for
+ # all other CPUs is the best we can do as there's not any way to
+ # distinguish these Qualcomm platforms.
+ not:
+ properties:
+ compatible:
+ const: arm,cortex-a53
+ then:
+ properties:
+ qcom,acc: false
+ qcom,saw: false
required:
- device_type
@@ -403,7 +461,7 @@ required:
dependencies:
rockchip,pmu: [enable-method]
-additionalProperties: true
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,imx51-m4if.yaml b/Documentation/devicetree/bindings/arm/freescale/fsl,imx51-m4if.yaml
new file mode 100644
index 000000000000..1f515bea3959
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,imx51-m4if.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/freescale/fsl,imx51-m4if.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Multi Master Multi Memory Interface (M4IF) and Tigerp module
+
+description: collect the imx devices, which only have compatible and reg property
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - fsl,imx51-m4if
+ - fsl,imx51-tigerp
+ - fsl,imx51-aipstz
+ - fsl,imx53-aipstz
+ - fsl,imx7d-pcie-phy
+ - items:
+ - const: fsl,imx53-tigerp
+ - const: fsl,imx51-tigerp
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ m4if@83fd8000 {
+ compatible = "fsl,imx51-m4if";
+ reg = <0x83fd8000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/freescale/m4if.txt b/Documentation/devicetree/bindings/arm/freescale/m4if.txt
deleted file mode 100644
index 93bd7b867a53..000000000000
--- a/Documentation/devicetree/bindings/arm/freescale/m4if.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-* Freescale Multi Master Multi Memory Interface (M4IF) module
-
-Required properties:
-- compatible : Should be "fsl,imx51-m4if"
-- reg : Address and length of the register set for the device
-
-Example:
-
-m4if: m4if@83fd8000 {
- compatible = "fsl,imx51-m4if";
- reg = <0x83fd8000 0x1000>;
-};
diff --git a/Documentation/devicetree/bindings/arm/freescale/tigerp.txt b/Documentation/devicetree/bindings/arm/freescale/tigerp.txt
deleted file mode 100644
index 19e2aad63d6e..000000000000
--- a/Documentation/devicetree/bindings/arm/freescale/tigerp.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-* Freescale Tigerp platform module
-
-Required properties:
-- compatible : Should be "fsl,imx51-tigerp"
-- reg : Address and length of the register set for the device
-
-Example:
-
-tigerp: tigerp@83fa0000 {
- compatible = "fsl,imx51-tigerp";
- reg = <0x83fa0000 0x28>;
-};
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index 1b90870958a2..d3b5e6923e41 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -1120,6 +1120,12 @@ properties:
- const: avnet,sm2s-imx8mp # SM2S-IMX8PLUS SoM
- const: fsl,imx8mp
+ - description: Boundary Devices Nitrogen8M Plus ENC Carrier Board
+ items:
+ - const: boundary,imx8mp-nitrogen-enc-carrier-board
+ - const: boundary,imx8mp-nitrogen-som
+ - const: fsl,imx8mp
+
- description: Boundary Device Nitrogen8MP Universal SMARC Carrier Board
items:
- const: boundary,imx8mp-nitrogen-smarc-universal-board
@@ -1156,6 +1162,13 @@ properties:
- const: kontron,imx8mp-osm-s # Kontron i.MX8MP OSM-S SoM
- const: fsl,imx8mp
+ - description: PHYTEC phyCORE-i.MX8MP FPSC based boards
+ items:
+ - enum:
+ - phytec,imx8mp-libra-rdk-fpsc # i.MX 8M Plus Libra RDK
+ - const: phytec,imx8mp-phycore-fpsc # phyCORE-i.MX 8M Plus FPSC
+ - const: fsl,imx8mp
+
- description: PHYTEC phyCORE-i.MX8MP SoM based boards
items:
- const: phytec,imx8mp-phyboard-pollux-rdk # phyBOARD-Pollux RDK
@@ -1176,6 +1189,12 @@ properties:
- const: polyhex,imx8mp-debix-som-a # Polyhex Debix SOM A
- const: fsl,imx8mp
+ - description: Toradex Boards with SMARC iMX8M Plus Modules
+ items:
+ - const: toradex,smarc-imx8mp-dev # Toradex SMARC iMX8M Plus on Toradex SMARC Development Board
+ - const: toradex,smarc-imx8mp # Toradex SMARC iMX8M Plus Module
+ - const: fsl,imx8mp
+
- description: Toradex Boards with Verdin iMX8M Plus Modules
items:
- enum:
@@ -1333,6 +1352,22 @@ properties:
- const: tq,imx8qxp-tqma8xqp # TQ-Systems GmbH TQMa8XQP SOM (with i.MX8QXP)
- const: fsl,imx8qxp
+ - description:
+ TQMa8XxS is a series of SOM featuring NXP i.MX8X system-on-chip
+ variants. It has the SMARC-2.0 form factor and is designed to be placed on
+ different carrier boards. MB-SMARC-2 is a carrier reference design.
+ oneOf:
+ - items:
+ - enum:
+ - tq,imx8qxp-tqma8xqps-mb-smarc-2 # TQ-Systems GmbH TQMa8QXPS SOM on MB-SMARC-2
+ - const: tq,imx8qxp-tqma8xqps # TQ-Systems GmbH TQMa8QXPS SOM
+ - const: fsl,imx8qxp
+ - items:
+ - enum:
+ - tq,imx8dxp-tqma8xdps-mb-smarc-2 # TQ-Systems GmbH TQMa8XDPS SOM on MB-SMARC-2
+ - const: tq,imx8dxp-tqma8xdps # TQ-Systems GmbH TQMa8XDPS SOM
+ - const: fsl,imx8dxp
+
- description: i.MX8ULP based Boards
items:
- enum:
@@ -1347,6 +1382,12 @@ properties:
- fsl,imx93-14x14-evk # i.MX93 14x14 EVK Board
- const: fsl,imx93
+ - description: i.MX94 based Boards
+ items:
+ - enum:
+ - fsl,imx943-evk # i.MX943 EVK Board
+ - const: fsl,imx94
+
- description: i.MX95 based Boards
items:
- enum:
@@ -1374,12 +1415,16 @@ properties:
All SOM and CPU variants use the same device tree hence only one
compatible is needed. Bootloader disables all features not present
in the assembled SOC.
+ MBa91xxCA mainboard can be used as starterkit for the SOM
+ soldered on an adapter board or for the connector variant
+ to evaluate RGB display support.
MBa93xxCA mainboard can be used as starterkit for the SOM
soldered on an adapter board or for the connector variant
MBa93xxLA mainboard is a single board computer using the solderable
SOM variant
items:
- enum:
+ - tq,imx93-tqma9352-mba91xxca # TQ-Systems GmbH i.MX93 TQMa93xxCA/LA SOM on MBa91xxCA
- tq,imx93-tqma9352-mba93xxca # TQ-Systems GmbH i.MX93 TQMa93xxCA/LA SOM on MBa93xxCA
- tq,imx93-tqma9352-mba93xxla # TQ-Systems GmbH i.MX93 TQMa93xxLA SOM on MBa93xxLA SBC
- const: tq,imx93-tqma9352 # TQ-Systems GmbH i.MX93 TQMa93xxCA/LA SOM
@@ -1387,8 +1432,10 @@ properties:
- description: PHYTEC phyCORE-i.MX93 SoM based boards
items:
- - const: phytec,imx93-phyboard-segin # phyBOARD-Segin with i.MX93
- - const: phytec,imx93-phycore-som # phyCORE-i.MX93 SoM
+ - enum:
+ - phytec,imx93-phyboard-nash # phyBOARD-Nash-i.MX93
+ - phytec,imx93-phyboard-segin # phyBOARD-Segin with i.MX93
+ - const: phytec,imx93-phycore-som # phyCORE-i.MX93 SoM
- const: fsl,imx93
- description: Variscite VAR-SOM-MX93 based boards
@@ -1404,6 +1451,16 @@ properties:
- const: fsl,imx93
- description:
+ TQMa95xxSA is a series of SOM featuring NXP i.MX95 SoC variants.
+ It has the SMARC form factor and is designed to be placed on
+ different carrier boards. MB-SMARC-2 is a carrier reference design.
+ items:
+ - enum:
+ - tq,imx95-tqma9596sa-mb-smarc-2 # TQ-Systems GmbH i.MX95 TQMa95xxSA SOM on MB-SMARC-2
+ - const: tq,imx95-tqma9596sa # TQ-Systems GmbH i.MX95 TQMa95xxSA SOM
+ - const: fsl,imx95
+
+ - description:
Freescale Vybrid Platform Device Tree Bindings
For the Vybrid SoC family all variants with DDR controller are supported,
diff --git a/Documentation/devicetree/bindings/arm/intel,socfpga.yaml b/Documentation/devicetree/bindings/arm/intel,socfpga.yaml
index 2ee0c740eb56..c75cd7d29f1a 100644
--- a/Documentation/devicetree/bindings/arm/intel,socfpga.yaml
+++ b/Documentation/devicetree/bindings/arm/intel,socfpga.yaml
@@ -25,6 +25,7 @@ properties:
items:
- enum:
- intel,socfpga-agilex5-socdk
+ - intel,socfpga-agilex5-socdk-nand
- const: intel,socfpga-agilex5
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/arm/mediatek.yaml b/Documentation/devicetree/bindings/arm/mediatek.yaml
index 108ae5e0185d..a7e0a72f6e4c 100644
--- a/Documentation/devicetree/bindings/arm/mediatek.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek.yaml
@@ -105,6 +105,10 @@ properties:
- bananapi,bpi-r4
- const: mediatek,mt7988a
- items:
+ - const: bananapi,bpi-r4-2g5
+ - const: bananapi,bpi-r4
+ - const: mediatek,mt7988a
+ - items:
- enum:
- mediatek,mt8127-moose
- const: mediatek,mt8127
@@ -285,6 +289,13 @@ properties:
- const: google,steelix-sku393218
- const: google,steelix
- const: mediatek,mt8186
+ - description: Google Ponyta
+ items:
+ - enum:
+ - google,ponyta-sku0
+ - google,ponyta-sku1
+ - const: google,ponyta
+ - const: mediatek,mt8186
- description: Google Rusty (Lenovo 100e Chromebook Gen 4)
items:
- const: google,steelix-sku196609
diff --git a/Documentation/devicetree/bindings/arm/psci.yaml b/Documentation/devicetree/bindings/arm/psci.yaml
index cbb012e217ab..7360a2849b5b 100644
--- a/Documentation/devicetree/bindings/arm/psci.yaml
+++ b/Documentation/devicetree/bindings/arm/psci.yaml
@@ -191,27 +191,27 @@ examples:
#size-cells = <0>;
#address-cells = <1>;
- CPU0: cpu@0 {
+ cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0>;
enable-method = "psci";
- power-domains = <&CPU_PD0>;
+ power-domains = <&cpu_pd0>;
power-domain-names = "psci";
};
- CPU1: cpu@1 {
+ cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x100>;
enable-method = "psci";
- power-domains = <&CPU_PD1>;
+ power-domains = <&cpu_pd1>;
power-domain-names = "psci";
};
idle-states {
- CPU_PWRDN: cpu-power-down {
+ cpu_pwrdn: cpu-power-down {
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x0000001>;
entry-latency-us = <10>;
@@ -222,7 +222,7 @@ examples:
domain-idle-states {
- CLUSTER_RET: cluster-retention {
+ cluster_ret: cluster-retention {
compatible = "domain-idle-state";
arm,psci-suspend-param = <0x1000011>;
entry-latency-us = <500>;
@@ -230,7 +230,7 @@ examples:
min-residency-us = <2000>;
};
- CLUSTER_PWRDN: cluster-power-down {
+ cluster_pwrdn: cluster-power-down {
compatible = "domain-idle-state";
arm,psci-suspend-param = <0x1000031>;
entry-latency-us = <2000>;
@@ -244,21 +244,21 @@ examples:
compatible = "arm,psci-1.0";
method = "smc";
- CPU_PD0: power-domain-cpu0 {
+ cpu_pd0: power-domain-cpu0 {
#power-domain-cells = <0>;
- domain-idle-states = <&CPU_PWRDN>;
- power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&cpu_pwrdn>;
+ power-domains = <&cluster_pd>;
};
- CPU_PD1: power-domain-cpu1 {
+ cpu_pd1: power-domain-cpu1 {
#power-domain-cells = <0>;
- domain-idle-states = <&CPU_PWRDN>;
- power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&cpu_pwrdn>;
+ power-domains = <&cluster_pd>;
};
- CLUSTER_PD: power-domain-cluster {
+ cluster_pd: power-domain-cluster {
#power-domain-cells = <0>;
- domain-idle-states = <&CLUSTER_RET>, <&CLUSTER_PWRDN>;
+ domain-idle-states = <&cluster_ret>, <&cluster_pwrdn>;
};
};
...
diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index 618a87693ac1..56f78f0f3803 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -90,6 +90,7 @@ description: |
sm6350
sm6375
sm7125
+ sm7150
sm7225
sm7325
sm8150
@@ -1020,6 +1021,7 @@ properties:
- items:
- enum:
- sony,pdx201
+ - xiaomi,ginkgo
- xiaomi,laurel-sprout
- const: qcom,sm6125
@@ -1041,6 +1043,11 @@ properties:
- items:
- enum:
+ - google,sunfish
+ - const: qcom,sm7150
+
+ - items:
+ - enum:
- fairphone,fp4
- const: qcom,sm7225
@@ -1123,14 +1130,18 @@ properties:
- items:
- enum:
- - lenovo,thinkpad-t14s
+ - lenovo,thinkpad-t14s-lcd
+ - lenovo,thinkpad-t14s-oled
+ - const: lenovo,thinkpad-t14s
- const: qcom,x1e78100
- const: qcom,x1e80100
- items:
- enum:
- asus,vivobook-s15
+ - asus,zenbook-a14-ux3407ra
- dell,xps13-9345
+ - hp,elitebook-ultra-g1q
- hp,omnibook-x14
- lenovo,yoga-slim7x
- microsoft,romulus13
@@ -1141,6 +1152,7 @@ properties:
- items:
- enum:
+ - asus,zenbook-a14-ux3407qa
- qcom,x1p42100-crd
- const: qcom,x1p42100
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index 650fb833d96e..5772d905f390 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -946,6 +946,11 @@ properties:
- const: radxa,rock-5b
- const: rockchip,rk3588
+ - description: Radxa ROCK 5B+
+ items:
+ - const: radxa,rock-5b-plus
+ - const: rockchip,rk3588
+
- description: Radxa ROCK 5C
items:
- const: radxa,rock-5c
@@ -1047,6 +1052,11 @@ properties:
- const: rockchip,rk3399-evb
- const: rockchip,rk3399
+ - description: Rockchip RK3399 Industry Evaluation board
+ items:
+ - const: rockchip,rk3399-evb-ind
+ - const: rockchip,rk3399
+
- description: Rockchip RK3399 Sapphire standalone
items:
- const: rockchip,rk3399-sapphire
@@ -1057,6 +1067,11 @@ properties:
- const: rockchip,rk3399-sapphire-excavator
- const: rockchip,rk3399
+ - description: Rockchip RK3562 Evaluation board 2
+ items:
+ - const: rockchip,rk3562-evb2-v10
+ - const: rockchip,rk3562
+
- description: Rockchip RK3566 BOX Evaluation Demo board
items:
- const: rockchip,rk3566-box-demo
@@ -1074,7 +1089,9 @@ properties:
- description: Rockchip RK3588 Evaluation board
items:
- - const: rockchip,rk3588-evb1-v10
+ - enum:
+ - rockchip,rk3588-evb1-v10
+ - rockchip,rk3588-evb2-v10
- const: rockchip,rk3588
- description: Rockchip RK3588S Evaluation board
@@ -1109,6 +1126,24 @@ properties:
- rockchip,rv1126
- rockchip,rv1109
+ - description: Theobroma Systems PX30-Cobra
+ items:
+ - enum:
+ - tsd,px30-cobra-ltk050h3146w
+ - tsd,px30-cobra-ltk050h3146w-a2
+ - tsd,px30-cobra-ltk050h3148w
+ - tsd,px30-cobra-ltk500hd1829
+ - const: tsd,px30-cobra
+ - const: rockchip,px30
+
+ - description: Theobroma Systems PX30-PP1516
+ items:
+ - enum:
+ - tsd,px30-pp1516-ltk050h3146w-a2
+ - tsd,px30-pp1516-ltk050h3148w
+ - const: tsd,px30-pp1516
+ - const: rockchip,px30
+
- description: Theobroma Systems PX30-uQ7 with Haikou baseboard
items:
- const: tsd,px30-ringneck-haikou
diff --git a/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml b/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml
index 52016a141227..46c1af851be7 100644
--- a/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml
@@ -25,6 +25,7 @@ select:
- rockchip,rk3288-pmu
- rockchip,rk3368-pmu
- rockchip,rk3399-pmu
+ - rockchip,rk3562-pmu
- rockchip,rk3568-pmu
- rockchip,rk3576-pmu
- rockchip,rk3588-pmu
@@ -43,6 +44,7 @@ properties:
- rockchip,rk3288-pmu
- rockchip,rk3368-pmu
- rockchip,rk3399-pmu
+ - rockchip,rk3562-pmu
- rockchip,rk3568-pmu
- rockchip,rk3576-pmu
- rockchip,rk3588-pmu
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml
index fab29f95d8e6..b3be184c7e56 100644
--- a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml
+++ b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml
@@ -212,6 +212,14 @@ properties:
- samsung,exynos7-espresso # Samsung Exynos7 Espresso
- const: samsung,exynos7
+ - description: Exynos7870 based boards
+ items:
+ - enum:
+ - samsung,a2corelte # Samsung Galaxy A2 Core
+ - samsung,j6lte # Samsung Galaxy J6
+ - samsung,on7xelte # Samsung Galaxy J7 Prime
+ - const: samsung,exynos7870
+
- description: Exynos7885 based boards
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
index 5fee2f38ff25..408532504a24 100644
--- a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
@@ -44,6 +44,10 @@ properties:
- const: st,stm32h743
- items:
- enum:
+ - st,stm32h747i-disco
+ - const: st,stm32h747
+ - items:
+ - enum:
- st,stm32h750i-art-pi
- const: st,stm32h750
- items:
@@ -184,6 +188,11 @@ properties:
- const: phytec,phycore-stm32mp157c-som
- const: st,stm32mp157
+ - description: Ultratronik STM32MP1 SBC based Boards
+ items:
+ - const: ultratronik,stm32mp157c-ultra-fly-sbc
+ - const: st,stm32mp157
+
- description: ST STM32MP257 based Boards
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml
index f536cdd2c1a6..7807ea613258 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.yaml
+++ b/Documentation/devicetree/bindings/arm/sunxi.yaml
@@ -492,6 +492,11 @@ properties:
- const: lamobo,lamobo-r1
- const: allwinner,sun7i-a20
+ - description: Liontron H-A133L
+ items:
+ - const: liontron,h-a133l
+ - const: allwinner,sun50i-a100
+
- description: HAOYU Electronics Marsboard A10
items:
- const: haoyu,a10-marsboard
@@ -845,6 +850,11 @@ properties:
- const: allwinner,r7-tv-dongle
- const: allwinner,sun5i-a10s
+ - description: Radxa Cubie A5E
+ items:
+ - const: radxa,cubie-a5e
+ - const: allwinner,sun55i-a527
+
- description: Remix Mini PC
items:
- const: jide,remix-mini-pc
@@ -966,6 +976,11 @@ properties:
- const: hechuang,x96-mate
- const: allwinner,sun50i-h616
+ - description: X96Q Pro+
+ items:
+ - const: amediatech,x96q-pro-plus
+ - const: allwinner,sun55i-h728
+
- description: Xunlong OrangePi
items:
- const: xunlong,orangepi
@@ -1081,4 +1096,14 @@ properties:
- const: xunlong,orangepi-zero3
- const: allwinner,sun50i-h618
+ - description: YuzukiHD Avaota A1
+ items:
+ - const: yuzukihd,avaota-a1
+ - const: allwinner,sun55i-t527
+
+ - description: YuzukiHD Chameleon
+ items:
+ - const: yuzukihd,chameleon
+ - const: allwinner,sun50i-h618
+
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml b/Documentation/devicetree/bindings/arm/tegra.yaml
index 65e0ff1fdf1e..9cae3268a827 100644
--- a/Documentation/devicetree/bindings/arm/tegra.yaml
+++ b/Documentation/devicetree/bindings/arm/tegra.yaml
@@ -52,17 +52,14 @@ properties:
- nvidia,cardhu-a04
- const: nvidia,cardhu
- const: nvidia,tegra30
- - items:
- - const: asus,tf201
- - const: nvidia,tegra30
- - items:
- - const: asus,tf300t
- - const: nvidia,tegra30
- - items:
- - const: asus,tf300tg
- - const: nvidia,tegra30
- - items:
- - const: asus,tf700t
+ - description: ASUS Transformers Device family
+ items:
+ - enum:
+ - asus,tf201
+ - asus,tf300t
+ - asus,tf300tg
+ - asus,tf300tl
+ - asus,tf700t
- const: nvidia,tegra30
- description: LG Optimus 4X P880
items:
diff --git a/Documentation/devicetree/bindings/arm/ti/k3.yaml b/Documentation/devicetree/bindings/arm/ti/k3.yaml
index 18f155cd06c8..bf6003d8fb76 100644
--- a/Documentation/devicetree/bindings/arm/ti/k3.yaml
+++ b/Documentation/devicetree/bindings/arm/ti/k3.yaml
@@ -46,6 +46,7 @@ properties:
- description: K3 AM625 SoC
items:
- enum:
+ - beagle,am62-pocketbeagle2
- beagle,am625-beagleplay
- ti,am625-sk
- ti,am62-lp-sk
@@ -75,6 +76,30 @@ properties:
- const: toradex,verdin-am62 # Verdin AM62 Module
- const: ti,am625
+ - description: K3 AM62P5 SoC Toradex Verdin Modules and Carrier Boards
+ items:
+ - enum:
+ - toradex,verdin-am62p-nonwifi-dahlia # Verdin AM62P Module on Dahlia
+ - toradex,verdin-am62p-nonwifi-dev # Verdin AM62P Module on Verdin Development Board
+ - toradex,verdin-am62p-nonwifi-ivy # Verdin AM62P Module on Ivy
+ - toradex,verdin-am62p-nonwifi-mallow # Verdin AM62P Module on Mallow
+ - toradex,verdin-am62p-nonwifi-yavia # Verdin AM62P Module on Yavia
+ - const: toradex,verdin-am62p-nonwifi # Verdin AM62P Module without Wi-Fi / BT
+ - const: toradex,verdin-am62p # Verdin AM62P Module
+ - const: ti,am62p5
+
+ - description: K3 AM62P5 SoC Toradex Verdin Modules and Carrier Boards with Wi-Fi / BT
+ items:
+ - enum:
+ - toradex,verdin-am62p-wifi-dahlia # Verdin AM62P Wi-Fi / BT Module on Dahlia
+ - toradex,verdin-am62p-wifi-dev # Verdin AM62P Wi-Fi / BT M. on Verdin Development B.
+ - toradex,verdin-am62p-wifi-ivy # Verdin AM62P Wi-Fi / BT Module on Ivy
+ - toradex,verdin-am62p-wifi-mallow # Verdin AM62P Wi-Fi / BT Module on Mallow
+ - toradex,verdin-am62p-wifi-yavia # Verdin AM62P Wi-Fi / BT Module on Yavia
+ - const: toradex,verdin-am62p-wifi # Verdin AM62P Wi-Fi / BT Module
+ - const: toradex,verdin-am62p # Verdin AM62P Module
+ - const: ti,am62p5
+
- description: K3 AM642 SoC
items:
- enum:
@@ -139,6 +164,13 @@ properties:
- ti,j721s2-evm
- const: ti,j721s2
+ - description: K3 J721s2 SoC Phytec SoM based boards
+ items:
+ - enum:
+ - phytec,am68-phyboard-izar
+ - const: phytec,am68-phycore-som
+ - const: ti,j721s2
+
- description: K3 J722S SoC and Boards
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/vt8500.yaml b/Documentation/devicetree/bindings/arm/vt8500.yaml
index 5d5ad5a60451..fa47b8989bbf 100644
--- a/Documentation/devicetree/bindings/arm/vt8500.yaml
+++ b/Documentation/devicetree/bindings/arm/vt8500.yaml
@@ -7,14 +7,13 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: VIA/Wondermedia VT8500 Platforms
maintainers:
- - Tony Prisk <linux@prisktech.co.nz>
-description: test
+ - Alexey Charkov <alchark@gmail.com>
properties:
$nodename:
const: '/'
compatible:
- items:
+ oneOf:
- enum:
- via,vt8500
- wm,wm8505
@@ -22,4 +21,9 @@ properties:
- wm,wm8750
- wm,wm8850
+ - description: VIA APC Rock and Paper boards
+ items:
+ - const: via,apc-rock
+ - const: wm,wm8950
+
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/ata/ahci-dm816.txt b/Documentation/devicetree/bindings/ata/ahci-dm816.txt
deleted file mode 100644
index f8c535f3541f..000000000000
--- a/Documentation/devicetree/bindings/ata/ahci-dm816.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Device tree binding for the TI DM816 AHCI SATA Controller
----------------------------------------------------------
-
-Required properties:
- - compatible: must be "ti,dm816-ahci"
- - reg: physical base address and size of the register region used by
- the controller (as defined by the AHCI 1.1 standard)
- - interrupts: interrupt specifier (refer to the interrupt binding)
- - clocks: list of phandle and clock specifier pairs (or only
- phandles for clock providers with '0' defined for
- #clock-cells); two clocks must be specified: the functional
- clock and an external reference clock
-
-Example:
-
- sata: sata@4a140000 {
- compatible = "ti,dm816-ahci";
- reg = <0x4a140000 0x10000>;
- interrupts = <16>;
- clocks = <&sysclk5_ck>, <&sata_refclk>;
- };
diff --git a/Documentation/devicetree/bindings/ata/ahci-st.txt b/Documentation/devicetree/bindings/ata/ahci-st.txt
deleted file mode 100644
index 909c9935360d..000000000000
--- a/Documentation/devicetree/bindings/ata/ahci-st.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-STMicroelectronics STi SATA controller
-
-This binding describes a SATA device.
-
-Required properties:
- - compatible : Must be "st,ahci"
- - reg : Physical base addresses and length of register sets
- - interrupts : Interrupt associated with the SATA device
- - interrupt-names : Associated name must be; "hostc"
- - clocks : The phandle for the clock
- - clock-names : Associated name must be; "ahci_clk"
- - phys : The phandle for the PHY port
- - phy-names : Associated name must be; "ahci_phy"
-
-Optional properties:
- - resets : The power-down, soft-reset and power-reset lines of SATA IP
- - reset-names : Associated names must be; "pwr-dwn", "sw-rst" and "pwr-rst"
-
-Example:
-
- /* Example for stih407 family silicon */
- sata0: sata@9b20000 {
- compatible = "st,ahci";
- reg = <0x9b20000 0x1000>;
- interrupts = <GIC_SPI 159 IRQ_TYPE_NONE>;
- interrupt-names = "hostc";
- phys = <&phy_port0 PHY_TYPE_SATA>;
- phy-names = "ahci_phy";
- resets = <&powerdown STIH407_SATA0_POWERDOWN>,
- <&softreset STIH407_SATA0_SOFTRESET>,
- <&softreset STIH407_SATA0_PWR_SOFTRESET>;
- reset-names = "pwr-dwn", "sw-rst", "pwr-rst";
- clocks = <&clk_s_c0_flexgen CLK_ICN_REG>;
- clock-names = "ahci_clk";
- };
diff --git a/Documentation/devicetree/bindings/ata/apm,xgene-ahci.yaml b/Documentation/devicetree/bindings/ata/apm,xgene-ahci.yaml
new file mode 100644
index 000000000000..7dc942808656
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/apm,xgene-ahci.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/apm,xgene-ahci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: APM X-Gene 6.0 Gb/s SATA host controller
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+allOf:
+ - $ref: ahci-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - apm,xgene-ahci
+ - apm,xgene-ahci-pcie
+
+ reg:
+ minItems: 4
+ items:
+ - description: AHCI memory resource
+ - description: Host controller core
+ - description: Host controller diagnostic
+ - description: Host controller AXI
+ - description: Host controller MUX
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+ - phys
+ - phy-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sata@1a400000 {
+ compatible = "apm,xgene-ahci";
+ reg = <0x1a400000 0x1000>,
+ <0x1f220000 0x1000>,
+ <0x1f22d000 0x1000>,
+ <0x1f22e000 0x1000>,
+ <0x1f227000 0x1000>;
+ clocks = <&sataclk 0>;
+ dma-coherent;
+ interrupts = <0x0 0x87 0x4>;
+ phys = <&phy2 0>;
+ phy-names = "sata-phy";
+ };
diff --git a/Documentation/devicetree/bindings/ata/apm-xgene.txt b/Documentation/devicetree/bindings/ata/apm-xgene.txt
deleted file mode 100644
index 02e690a675db..000000000000
--- a/Documentation/devicetree/bindings/ata/apm-xgene.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-* APM X-Gene 6.0 Gb/s SATA host controller nodes
-
-SATA host controller nodes are defined to describe on-chip Serial ATA
-controllers. Each SATA controller (pair of ports) have its own node.
-
-Required properties:
-- compatible : Shall contain:
- * "apm,xgene-ahci"
-- reg : First memory resource shall be the AHCI memory
- resource.
- Second memory resource shall be the host controller
- core memory resource.
- Third memory resource shall be the host controller
- diagnostic memory resource.
- 4th memory resource shall be the host controller
- AXI memory resource.
- 5th optional memory resource shall be the host
- controller MUX memory resource if required.
-- interrupts : Interrupt-specifier for SATA host controller IRQ.
-- clocks : Reference to the clock entry.
-- phys : A list of phandles + phy-specifiers, one for each
- entry in phy-names.
-- phy-names : Should contain:
- * "sata-phy" for the SATA 6.0Gbps PHY
-
-Optional properties:
-- dma-coherent : Present if dma operations are coherent
-- status : Shall be "ok" if enabled or "disabled" if disabled.
- Default is "ok".
-
-Example:
- sataclk: sataclk {
- compatible = "fixed-clock";
- #clock-cells = <1>;
- clock-frequency = <100000000>;
- clock-output-names = "sataclk";
- };
-
- phy2: phy@1f22a000 {
- compatible = "apm,xgene-phy";
- reg = <0x0 0x1f22a000 0x0 0x100>;
- #phy-cells = <1>;
- };
-
- phy3: phy@1f23a000 {
- compatible = "apm,xgene-phy";
- reg = <0x0 0x1f23a000 0x0 0x100>;
- #phy-cells = <1>;
- };
-
- sata2: sata@1a400000 {
- compatible = "apm,xgene-ahci";
- reg = <0x0 0x1a400000 0x0 0x1000>,
- <0x0 0x1f220000 0x0 0x1000>,
- <0x0 0x1f22d000 0x0 0x1000>,
- <0x0 0x1f22e000 0x0 0x1000>,
- <0x0 0x1f227000 0x0 0x1000>;
- interrupts = <0x0 0x87 0x4>;
- dma-coherent;
- clocks = <&sataclk 0>;
- phys = <&phy2 0>;
- phy-names = "sata-phy";
- };
-
- sata3: sata@1a800000 {
- compatible = "apm,xgene-ahci-pcie";
- reg = <0x0 0x1a800000 0x0 0x1000>,
- <0x0 0x1f230000 0x0 0x1000>,
- <0x0 0x1f23d000 0x0 0x1000>,
- <0x0 0x1f23e000 0x0 0x1000>,
- <0x0 0x1f237000 0x0 0x1000>;
- interrupts = <0x0 0x88 0x4>;
- dma-coherent;
- clocks = <&sataclk 0>;
- phys = <&phy3 0>;
- phy-names = "sata-phy";
- };
diff --git a/Documentation/devicetree/bindings/ata/arasan,cf-spear1340.yaml b/Documentation/devicetree/bindings/ata/arasan,cf-spear1340.yaml
new file mode 100644
index 000000000000..4d7017452dda
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/arasan,cf-spear1340.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/arasan,cf-spear1340.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Arasan PATA Compact Flash Controller
+
+maintainers:
+ - Viresh Kumar <viresh.kumar@linaro.org>
+
+properties:
+ compatible:
+ const: arasan,cf-spear1340
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ arasan,broken-udma:
+ description: UDMA mode is unusable
+ type: boolean
+
+ arasan,broken-mwdma:
+ description: MWDMA mode is unusable
+ type: boolean
+
+ arasan,broken-pio:
+ description: PIO mode is unusable
+ type: boolean
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ items:
+ - const: data
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+allOf:
+ - if:
+ not:
+ required:
+ - arasan,broken-udma
+ - arasan,broken-mwdma
+ then:
+ required:
+ - dmas
+ - dma-names
+
+examples:
+ - |
+ cf@fc000000 {
+ compatible = "arasan,cf-spear1340";
+ reg = <0xfc000000 0x1000>;
+ interrupts = <12>;
+ dmas = <&dma 23>;
+ dma-names = "data";
+ };
diff --git a/Documentation/devicetree/bindings/ata/cavium,ebt3000-compact-flash.yaml b/Documentation/devicetree/bindings/ata/cavium,ebt3000-compact-flash.yaml
new file mode 100644
index 000000000000..349f289b81e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/cavium,ebt3000-compact-flash.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/cavium,ebt3000-compact-flash.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cavium Compact Flash
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+description:
+ The Cavium Compact Flash device is connected to the Octeon Boot Bus, and is
+ thus a child of the Boot Bus device. It can read and write industry standard
+ compact flash devices.
+
+properties:
+ compatible:
+ const: cavium,ebt3000-compact-flash
+
+ reg:
+ description: The base address of the CF chip select banks.
+ items:
+ - description: CF chip select bank 0
+ - description: CF chip select bank 1
+
+ cavium,bus-width:
+ description: The width of the connection to the CF devices.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [8, 16]
+
+ cavium,true-ide:
+ description: True IDE mode when present.
+ type: boolean
+
+ cavium,dma-engine-handle:
+ description: A phandle for the DMA Engine connected to this device.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ compact-flash@5,0 {
+ compatible = "cavium,ebt3000-compact-flash";
+ reg = <5 0 0x10000>, <6 0 0x10000>;
+ cavium,bus-width = <16>;
+ cavium,true-ide;
+ cavium,dma-engine-handle = <&dma0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/ata/cavium-compact-flash.txt b/Documentation/devicetree/bindings/ata/cavium-compact-flash.txt
deleted file mode 100644
index 3bacc8e0931e..000000000000
--- a/Documentation/devicetree/bindings/ata/cavium-compact-flash.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-* Compact Flash
-
-The Cavium Compact Flash device is connected to the Octeon Boot Bus,
-and is thus a child of the Boot Bus device. It can read and write
-industry standard compact flash devices.
-
-Properties:
-- compatible: "cavium,ebt3000-compact-flash";
-
- Compatibility with many Cavium evaluation boards.
-
-- reg: The base address of the CF chip select banks. Depending on
- the device configuration, there may be one or two banks.
-
-- cavium,bus-width: The width of the connection to the CF devices. Valid
- values are 8 and 16.
-
-- cavium,true-ide: Optional, if present the CF connection is in True IDE mode.
-
-- cavium,dma-engine-handle: Optional, a phandle for the DMA Engine connected
- to this device.
-
-Example:
- compact-flash@5,0 {
- compatible = "cavium,ebt3000-compact-flash";
- reg = <5 0 0x10000>, <6 0 0x10000>;
- cavium,bus-width = <16>;
- cavium,true-ide;
- cavium,dma-engine-handle = <&dma0>;
- };
diff --git a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
index 6ad78429dc74..c92341888a28 100644
--- a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
+++ b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ceva AHCI SATA Controller
maintainers:
- - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
description: |
diff --git a/Documentation/devicetree/bindings/ata/marvell,orion-sata.yaml b/Documentation/devicetree/bindings/ata/marvell,orion-sata.yaml
new file mode 100644
index 000000000000..f656ea9223d6
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/marvell,orion-sata.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/marvell,orion-sata.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Orion SATA
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+allOf:
+ - $ref: sata-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - marvell,orion-sata
+ - marvell,armada-370-sata
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 8
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: '0'
+ - const: '1'
+ - const: '2'
+ - const: '3'
+ - const: '4'
+ - const: '5'
+ - const: '6'
+ - const: '7'
+
+ interrupts:
+ maxItems: 1
+
+ nr-ports:
+ description:
+ Number of SATA ports in use.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 8
+
+ phys:
+ minItems: 1
+ maxItems: 8
+
+ phy-names:
+ minItems: 1
+ items:
+ - const: port0
+ - const: port1
+ - const: port2
+ - const: port3
+ - const: port4
+ - const: port5
+ - const: port6
+ - const: port7
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - nr-ports
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sata@80000 {
+ compatible = "marvell,orion-sata";
+ reg = <0x80000 0x5000>;
+ interrupts = <21>;
+ phys = <&sata_phy0>, <&sata_phy1>;
+ phy-names = "port0", "port1";
+ nr-ports = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/ata/marvell.txt b/Documentation/devicetree/bindings/ata/marvell.txt
deleted file mode 100644
index b460edd12766..000000000000
--- a/Documentation/devicetree/bindings/ata/marvell.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-* Marvell Orion SATA
-
-Required Properties:
-- compatibility : "marvell,orion-sata" or "marvell,armada-370-sata"
-- reg : Address range of controller
-- interrupts : Interrupt controller is using
-- nr-ports : Number of SATA ports in use.
-
-Optional Properties:
-- phys : List of phandles to sata phys
-- phy-names : Should be "0", "1", etc, one number per phandle
-
-Example:
-
- sata@80000 {
- compatible = "marvell,orion-sata";
- reg = <0x80000 0x5000>;
- interrupts = <21>;
- phys = <&sata_phy0>, <&sata_phy1>;
- phy-names = "0", "1";
- nr-ports = <2>;
- }
diff --git a/Documentation/devicetree/bindings/ata/pata-arasan.txt b/Documentation/devicetree/bindings/ata/pata-arasan.txt
deleted file mode 100644
index 872edc105680..000000000000
--- a/Documentation/devicetree/bindings/ata/pata-arasan.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-* ARASAN PATA COMPACT FLASH CONTROLLER
-
-Required properties:
-- compatible: "arasan,cf-spear1340"
-- reg: Address range of the CF registers
-- interrupt: Should contain the CF interrupt number
-- clock-frequency: Interface clock rate, in Hz, one of
- 25000000
- 33000000
- 40000000
- 50000000
- 66000000
- 75000000
- 100000000
- 125000000
- 150000000
- 166000000
- 200000000
-
-Optional properties:
-- arasan,broken-udma: if present, UDMA mode is unusable
-- arasan,broken-mwdma: if present, MWDMA mode is unusable
-- arasan,broken-pio: if present, PIO mode is unusable
-- dmas: one DMA channel, as described in bindings/dma/dma.txt
- required unless both UDMA and MWDMA mode are broken
-- dma-names: the corresponding channel name, must be "data"
-
-Example:
-
- cf@fc000000 {
- compatible = "arasan,cf-spear1340";
- reg = <0xfc000000 0x1000>;
- interrupt-parent = <&vic1>;
- interrupts = <12>;
- dmas = <&dma-controller 23>;
- dma-names = "data";
- };
diff --git a/Documentation/devicetree/bindings/ata/rockchip,dwc-ahci.yaml b/Documentation/devicetree/bindings/ata/rockchip,dwc-ahci.yaml
index 13eaa8d9a16e..b5ecaabfe2e2 100644
--- a/Documentation/devicetree/bindings/ata/rockchip,dwc-ahci.yaml
+++ b/Documentation/devicetree/bindings/ata/rockchip,dwc-ahci.yaml
@@ -20,6 +20,7 @@ select:
contains:
enum:
- rockchip,rk3568-dwc-ahci
+ - rockchip,rk3576-dwc-ahci
- rockchip,rk3588-dwc-ahci
required:
- compatible
@@ -29,6 +30,7 @@ properties:
items:
- enum:
- rockchip,rk3568-dwc-ahci
+ - rockchip,rk3576-dwc-ahci
- rockchip,rk3588-dwc-ahci
- const: snps,dwc-ahci
@@ -83,6 +85,7 @@ allOf:
contains:
enum:
- rockchip,rk3568-dwc-ahci
+ - rockchip,rk3576-dwc-ahci
then:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/ata/st,ahci.yaml b/Documentation/devicetree/bindings/ata/st,ahci.yaml
new file mode 100644
index 000000000000..6e8e4b4f3d6c
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/st,ahci.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/st,ahci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STi SATA controller
+
+maintainers:
+ - Patrice Chotard <patrice.chotard@foss.st.com>
+
+allOf:
+ - $ref: ahci-common.yaml#
+
+properties:
+ compatible:
+ const: st,ahci
+
+ interrupt-names:
+ items:
+ - const: hostc
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ahci_clk
+
+ resets:
+ items:
+ - description: Power-down line
+ - description: Soft-reset line
+ - description: Power-reset line
+
+ reset-names:
+ items:
+ - const: pwr-dwn
+ - const: sw-rst
+ - const: pwr-rst
+
+required:
+ - compatible
+ - interrupt-names
+ - phys
+ - phy-names
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/phy/phy.h>
+ #include <dt-bindings/reset/stih407-resets.h>
+ #include <dt-bindings/clock/stih407-clks.h>
+
+ sata@9b20000 {
+ compatible = "st,ahci";
+ reg = <0x9b20000 0x1000>;
+ interrupts = <GIC_SPI 159 IRQ_TYPE_NONE>;
+ interrupt-names = "hostc";
+ phys = <&phy_port0 PHY_TYPE_SATA>;
+ phy-names = "sata-phy";
+ resets = <&powerdown STIH407_SATA0_POWERDOWN>,
+ <&softreset STIH407_SATA0_SOFTRESET>,
+ <&softreset STIH407_SATA0_PWR_SOFTRESET>;
+ reset-names = "pwr-dwn", "sw-rst", "pwr-rst";
+ clocks = <&clk_s_c0_flexgen CLK_ICN_REG>;
+ clock-names = "ahci_clk";
+ };
diff --git a/Documentation/devicetree/bindings/ata/ti,dm816-ahci.yaml b/Documentation/devicetree/bindings/ata/ti,dm816-ahci.yaml
new file mode 100644
index 000000000000..d0ff9e78afe6
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/ti,dm816-ahci.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/ti,dm816-ahci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI DM816 AHCI SATA Controller
+
+maintainers:
+ - Bartosz Golaszewski <brgl@bgdev.pl>
+
+allOf:
+ - $ref: ahci-common.yaml#
+
+properties:
+ compatible:
+ const: ti,dm816-ahci
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: functional clock
+ - description: external reference clock
+
+ ti,hwmods:
+ const: sata
+
+required:
+ - compatible
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sata@4a140000 {
+ compatible = "ti,dm816-ahci";
+ reg = <0x4a140000 0x10000>;
+ interrupts = <16>;
+ clocks = <&sysclk5_ck>, <&sata_refclk>;
+ };
diff --git a/Documentation/devicetree/bindings/bus/microsoft,vmbus.yaml b/Documentation/devicetree/bindings/bus/microsoft,vmbus.yaml
index a8d40c766dcd..0bea4f5287ce 100644
--- a/Documentation/devicetree/bindings/bus/microsoft,vmbus.yaml
+++ b/Documentation/devicetree/bindings/bus/microsoft,vmbus.yaml
@@ -10,8 +10,8 @@ maintainers:
- Saurabh Sengar <ssengar@linux.microsoft.com>
description:
- VMBus is a software bus that implement the protocols for communication
- between the root or host OS and guest OSs (virtual machines).
+ VMBus is a software bus that implements the protocols for communication
+ between the root or host OS and guest OS'es (virtual machines).
properties:
compatible:
@@ -25,9 +25,16 @@ properties:
'#size-cells':
const: 1
+ dma-coherent: true
+
+ interrupts:
+ maxItems: 1
+ description: Interrupt is used to report a message from the host.
+
required:
- compatible
- ranges
+ - interrupts
- '#address-cells'
- '#size-cells'
@@ -35,6 +42,8 @@ additionalProperties: false
examples:
- |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
soc {
#address-cells = <2>;
#size-cells = <1>;
@@ -49,6 +58,9 @@ examples:
#address-cells = <2>;
#size-cells = <1>;
ranges = <0x0f 0xf0000000 0x0f 0xf0000000 0x10000000>;
+ dma-coherent;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_PPI 2 IRQ_TYPE_EDGE_RISING>;
};
};
};
diff --git a/Documentation/devicetree/bindings/bus/nvidia,tegra210-aconnect.yaml b/Documentation/devicetree/bindings/bus/nvidia,tegra210-aconnect.yaml
index 26362c9006e2..81a65e9f93f1 100644
--- a/Documentation/devicetree/bindings/bus/nvidia,tegra210-aconnect.yaml
+++ b/Documentation/devicetree/bindings/bus/nvidia,tegra210-aconnect.yaml
@@ -21,6 +21,7 @@ properties:
- const: nvidia,tegra210-aconnect
- items:
- enum:
+ - nvidia,tegra264-aconnect
- nvidia,tegra234-aconnect
- nvidia,tegra186-aconnect
- nvidia,tegra194-aconnect
diff --git a/Documentation/devicetree/bindings/cache/andestech,ax45mp-cache.yaml b/Documentation/devicetree/bindings/cache/andestech,ax45mp-cache.yaml
index d2cbe49f4e15..4de5bb2e5f24 100644
--- a/Documentation/devicetree/bindings/cache/andestech,ax45mp-cache.yaml
+++ b/Documentation/devicetree/bindings/cache/andestech,ax45mp-cache.yaml
@@ -28,6 +28,9 @@ select:
properties:
compatible:
items:
+ - enum:
+ - andestech,qilai-ax45mp-cache
+ - renesas,r9a07g043f-ax45mp-cache
- const: andestech,ax45mp-cache
- const: cache
@@ -65,12 +68,27 @@ required:
- cache-size
- cache-unified
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: andestech,qilai-ax45mp-cache
+
+ then:
+ properties:
+ cache-sets:
+ const: 2048
+ cache-size:
+ const: 2097152
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
cache-controller@13400000 {
- compatible = "andestech,ax45mp-cache", "cache";
+ compatible = "renesas,r9a07g043f-ax45mp-cache", "andestech,ax45mp-cache",
+ "cache";
reg = <0x13400000 0x100000>;
interrupts = <508 IRQ_TYPE_LEVEL_HIGH>;
cache-line-size = <64>;
diff --git a/Documentation/devicetree/bindings/cache/marvell,feroceon-cache.txt b/Documentation/devicetree/bindings/cache/marvell,feroceon-cache.txt
deleted file mode 100644
index 0d244b999d10..000000000000
--- a/Documentation/devicetree/bindings/cache/marvell,feroceon-cache.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-* Marvell Feroceon Cache
-
-Required properties:
-- compatible : Should be either "marvell,feroceon-cache" or
- "marvell,kirkwood-cache".
-
-Optional properties:
-- reg : Address of the L2 cache control register. Mandatory for
- "marvell,kirkwood-cache", not used by "marvell,feroceon-cache"
-
-
-Example:
- l2: l2-cache@20128 {
- compatible = "marvell,kirkwood-cache";
- reg = <0x20128 0x4>;
- };
diff --git a/Documentation/devicetree/bindings/cache/marvell,kirkwood-cache.yaml b/Documentation/devicetree/bindings/cache/marvell,kirkwood-cache.yaml
new file mode 100644
index 000000000000..2bfa3c29f6a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/cache/marvell,kirkwood-cache.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/cache/marvell,kirkwood-cache.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Feroceon/Kirkwood Cache
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+properties:
+ compatible:
+ enum:
+ - marvell,feroceon-cache
+ - marvell,kirkwood-cache
+
+ reg:
+ maxItems: 1
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: marvell,kirkwood-cache
+ then:
+ required:
+ - reg
+ else:
+ properties:
+ reg: false
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ l2-cache@20128 {
+ compatible = "marvell,kirkwood-cache";
+ reg = <0x20128 0x4>;
+ };
diff --git a/Documentation/devicetree/bindings/cache/marvell,tauros2-cache.txt b/Documentation/devicetree/bindings/cache/marvell,tauros2-cache.txt
deleted file mode 100644
index 31af1cbb60bd..000000000000
--- a/Documentation/devicetree/bindings/cache/marvell,tauros2-cache.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-* Marvell Tauros2 Cache
-
-Required properties:
-- compatible : Should be "marvell,tauros2-cache".
-- marvell,tauros2-cache-features : Specify the features supported for the
- tauros2 cache.
- The features including
- CACHE_TAUROS2_PREFETCH_ON (1 << 0)
- CACHE_TAUROS2_LINEFILL_BURST8 (1 << 1)
- The definition can be found at
- arch/arm/include/asm/hardware/cache-tauros2.h
-
-Example:
- L2: l2-cache {
- compatible = "marvell,tauros2-cache";
- marvell,tauros2-cache-features = <0x3>;
- };
diff --git a/Documentation/devicetree/bindings/cache/marvell,tauros2-cache.yaml b/Documentation/devicetree/bindings/cache/marvell,tauros2-cache.yaml
new file mode 100644
index 000000000000..9f7f0d031631
--- /dev/null
+++ b/Documentation/devicetree/bindings/cache/marvell,tauros2-cache.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/cache/marvell,tauros2-cache.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Tauros2 Cache
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+properties:
+ compatible:
+ const: marvell,tauros2-cache
+
+ marvell,tauros2-cache-features:
+ description: >
+ Specify the features supported for the tauros2 cache. The features include:
+
+ - CACHE_TAUROS2_PREFETCH_ON (1 << 0)
+ - CACHE_TAUROS2_LINEFILL_BURST8 (1 << 1)
+
+ The definition can be found at arch/arm/include/asm/hardware/cache-tauros2.h
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 0x3
+
+required:
+ - compatible
+ - marvell,tauros2-cache-features
+
+additionalProperties: false
+
+examples:
+ - |
+ l2-cache {
+ compatible = "marvell,tauros2-cache";
+ marvell,tauros2-cache-features = <0x3>;
+ };
diff --git a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
index e5effbb4a606..37e3ebd55487 100644
--- a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
+++ b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
@@ -40,6 +40,7 @@ properties:
- qcom,sm8450-llcc
- qcom,sm8550-llcc
- qcom,sm8650-llcc
+ - qcom,sm8750-llcc
- qcom,x1e80100-llcc
reg:
@@ -274,6 +275,7 @@ allOf:
- qcom,sm8450-llcc
- qcom,sm8550-llcc
- qcom,sm8650-llcc
+ - qcom,sm8750-llcc
then:
properties:
reg:
diff --git a/Documentation/devicetree/bindings/cache/sifive,ccache0.yaml b/Documentation/devicetree/bindings/cache/sifive,ccache0.yaml
index 7e8cebe21584..579bacb66f34 100644
--- a/Documentation/devicetree/bindings/cache/sifive,ccache0.yaml
+++ b/Documentation/devicetree/bindings/cache/sifive,ccache0.yaml
@@ -39,6 +39,7 @@ properties:
- const: cache
- items:
- enum:
+ - eswin,eic7700-l3-cache
- starfive,jh7100-ccache
- starfive,jh7110-ccache
- const: sifive,ccache0
@@ -55,10 +56,10 @@ properties:
enum: [2, 3]
cache-sets:
- enum: [1024, 2048]
+ enum: [1024, 2048, 4096]
cache-size:
- const: 2097152
+ enum: [2097152, 4194304]
cache-unified: true
@@ -89,6 +90,7 @@ allOf:
compatible:
contains:
enum:
+ - eswin,eic7700-l3-cache
- sifive,fu740-c000-ccache
- starfive,jh7100-ccache
- starfive,jh7110-ccache
@@ -112,6 +114,22 @@ allOf:
properties:
compatible:
contains:
+ const: eswin,eic7700-l3-cache
+
+ then:
+ properties:
+ cache-size:
+ const: 4194304
+
+ else:
+ properties:
+ cache-size:
+ const: 2097152
+
+ - if:
+ properties:
+ compatible:
+ contains:
enum:
- sifive,fu740-c000-ccache
- starfive,jh7100-ccache
@@ -122,7 +140,15 @@ allOf:
cache-sets:
const: 2048
- else:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - microchip,mpfs-ccache
+ - sifive,fu540-c000-ccache
+
+ then:
properties:
cache-sets:
const: 1024
@@ -131,6 +157,18 @@ allOf:
properties:
compatible:
contains:
+ enum:
+ - eswin,eic7700-l3-cache
+
+ then:
+ properties:
+ cache-sets:
+ const: 4096
+
+ - if:
+ properties:
+ compatible:
+ contains:
const: sifive,ccache0
then:
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun8i-a83t-de2-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun8i-a83t-de2-clk.yaml
index 70369bd633e4..7fcd55d468d4 100644
--- a/Documentation/devicetree/bindings/clock/allwinner,sun8i-a83t-de2-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun8i-a83t-de2-clk.yaml
@@ -25,6 +25,7 @@ properties:
- const: allwinner,sun50i-a64-de2-clk
- const: allwinner,sun50i-h5-de2-clk
- const: allwinner,sun50i-h6-de3-clk
+ - const: allwinner,sun50i-h616-de33-clk
- items:
- const: allwinner,sun8i-r40-de2-clk
- const: allwinner,sun8i-h3-de2-clk
diff --git a/Documentation/devicetree/bindings/clock/altr_socfpga.txt b/Documentation/devicetree/bindings/clock/altr_socfpga.txt
deleted file mode 100644
index f72e80e0dade..000000000000
--- a/Documentation/devicetree/bindings/clock/altr_socfpga.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Device Tree Clock bindings for Altera's SoCFPGA platform
-
-This binding uses the common clock binding[1].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-- compatible : shall be one of the following:
- "altr,socfpga-pll-clock" - for a PLL clock
- "altr,socfpga-perip-clock" - The peripheral clock divided from the
- PLL clock.
- "altr,socfpga-gate-clk" - Clocks that directly feed peripherals and
- can get gated.
-
-- reg : shall be the control register offset from CLOCK_MANAGER's base for the clock.
-- clocks : shall be the input parent clock phandle for the clock. This is
- either an oscillator or a pll output.
-- #clock-cells : from common clock binding, shall be set to 0.
-
-Optional properties:
-- fixed-divider : If clocks have a fixed divider value, use this property.
-- clk-gate : For "socfpga-gate-clk", clk-gate contains the gating register
- and the bit index.
-- div-reg : For "socfpga-gate-clk" and "socfpga-periph-clock", div-reg contains
- the divider register, bit shift, and width.
-- clk-phase : For the sdmmc_clk, contains the value of the clock phase that controls
- the SDMMC CIU clock. The first value is the clk_sample(smpsel), and the second
- value is the cclk_in_drv(drvsel). The clk-phase is used to enable the correct
- hold/delay times that is needed for the SD/MMC CIU clock. The values of both
- can be 0-315 degrees, in 45 degree increments.
diff --git a/Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.txt b/Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.txt
deleted file mode 100644
index 4acfc8f641b6..000000000000
--- a/Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Broadcom BCM2835 auxiliary peripheral support
-
-This binding uses the common clock binding:
- Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-The auxiliary peripherals (UART, SPI1, and SPI2) have a small register
-area controlling clock gating to the peripherals, and providing an IRQ
-status register.
-
-Required properties:
-- compatible: Should be "brcm,bcm2835-aux"
-- #clock-cells: Should be <1>. The permitted clock-specifier values can be
- found in include/dt-bindings/clock/bcm2835-aux.h
-- reg: Specifies base physical address and size of the registers
-- clocks: The parent clock phandle
-
-Example:
-
- clocks: cprman@7e101000 {
- compatible = "brcm,bcm2835-cprman";
- #clock-cells = <1>;
- reg = <0x7e101000 0x2000>;
- clocks = <&clk_osc>;
- };
-
- aux: aux@7e215004 {
- compatible = "brcm,bcm2835-aux";
- #clock-cells = <1>;
- reg = <0x7e215000 0x8>;
- clocks = <&clocks BCM2835_CLOCK_VPU>;
- };
diff --git a/Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.yaml b/Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.yaml
new file mode 100644
index 000000000000..0f4050ffa41c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/brcm,bcm2835-aux-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM2835 auxiliary peripheral clock
+
+maintainers:
+ - Stefan Wahren <wahrenst@gmx.net>
+ - Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com>
+
+description:
+ The auxiliary peripherals (UART, SPI1, and SPI2) have a small register
+ area controlling clock gating to the peripherals, and providing an IRQ
+ status register.
+
+properties:
+ compatible:
+ const: brcm,bcm2835-aux
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/bcm2835.h>
+ clock@7e215000 {
+ compatible = "brcm,bcm2835-aux";
+ reg = <0x7e215000 0x8>;
+ #clock-cells = <1>;
+ clocks = <&clocks BCM2835_CLOCK_VPU>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/fsl,vf610-ccm.yaml b/Documentation/devicetree/bindings/clock/fsl,vf610-ccm.yaml
new file mode 100644
index 000000000000..29ae5be51acf
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/fsl,vf610-ccm.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/fsl,vf610-ccm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock for Freescale Vybrid VF610 SOC
+
+description:
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. See include/dt-bindings/clock/vf610-clock.h
+ for the full list of VF610 clock IDs
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,vf610-ccm
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ items:
+ - description: external crystal oscillator 32KHz, recommended
+ - description: external crystal oscillator 24MHz, recommended
+ - description: audio
+ - description: enet
+ minItems: 2
+
+ clock-names:
+ items:
+ - const: sxosc
+ - const: fxosc
+ - const: enet_ext
+ - const: audio_ext
+ minItems: 2
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@4006b000 {
+ compatible = "fsl,vf610-ccm";
+ reg = <0x4006b000 0x1000>;
+ #clock-cells = <1>;
+ clocks = <&sxosc>, <&fxosc>;
+ clock-names = "sxosc", "fxosc";
+ };
+
diff --git a/Documentation/devicetree/bindings/clock/maxim,max77686.txt b/Documentation/devicetree/bindings/clock/maxim,max77686.txt
deleted file mode 100644
index c10849efb444..000000000000
--- a/Documentation/devicetree/bindings/clock/maxim,max77686.txt
+++ /dev/null
@@ -1,114 +0,0 @@
-Binding for Maxim MAX77686/MAX77802/MAX77620 32k clock generator block
-
-This is a part of device tree bindings of MAX77686/MAX77802/MAX77620
-multi-function device. More information can be found in MFD DT binding
-doc as follows:
- bindings/mfd/max77686.txt for MAX77686 and
- bindings/mfd/max77802.txt for MAX77802 and
- bindings/mfd/max77620.txt for MAX77620.
-
-The MAX77686 contains three 32.768khz clock outputs that can be controlled
-(gated/ungated) over I2C. Clocks are defined as preprocessor macros in
-dt-bindings/clock/maxim,max77686.h.
-
-
-The MAX77802 contains two 32.768khz clock outputs that can be controlled
-(gated/ungated) over I2C. Clocks are defined as preprocessor macros in
-dt-bindings/clock/maxim,max77802.h.
-
-The MAX77686 contains one 32.768khz clock outputs that can be controlled
-(gated/ungated) over I2C. Clocks are defined as preprocessor macros in
-dt-bindings/clock/maxim,max77620.h.
-
-Following properties should be presend in main device node of the MFD chip.
-
-Required properties:
-
-- #clock-cells: from common clock binding; shall be set to 1.
-
-Optional properties:
-- clock-output-names: From common clock binding.
-
-Each clock is assigned an identifier and client nodes can use this identifier
-to specify the clock which they consume. Following indices are allowed:
- - 0: 32khz_ap clock (max77686, max77802), 32khz_out0 (max77620)
- - 1: 32khz_cp clock (max77686, max77802),
- - 2: 32khz_pmic clock (max77686).
-
-Clocks are defined as preprocessor macros in above dt-binding header for
-respective chips.
-
-Example:
-
-1. With MAX77686:
-
-#include <dt-bindings/clock/maxim,max77686.h>
-/* ... */
-
- Node of the MFD chip
- max77686: max77686@9 {
- compatible = "maxim,max77686";
- interrupt-parent = <&wakeup_eint>;
- interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
- reg = <0x09>;
- #clock-cells = <1>;
-
- /* ... */
- };
-
- Clock consumer node
-
- foo@0 {
- compatible = "bar,foo";
- /* ... */
- clock-names = "my-clock";
- clocks = <&max77686 MAX77686_CLK_PMIC>;
- };
-
-2. With MAX77802:
-
-#include <dt-bindings/clock/maxim,max77802.h>
-/* ... */
-
- Node of the MFD chip
- max77802: max77802@9 {
- compatible = "maxim,max77802";
- interrupt-parent = <&wakeup_eint>;
- interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
- reg = <0x09>;
- #clock-cells = <1>;
-
- /* ... */
- };
-
- Clock consumer node
-
- foo@0 {
- compatible = "bar,foo";
- /* ... */
- clock-names = "my-clock";
- clocks = <&max77802 MAX77802_CLK_32K_AP>;
- };
-
-
-3. With MAX77620:
-
-#include <dt-bindings/clock/maxim,max77620.h>
-/* ... */
-
- Node of the MFD chip
- max77620: max77620@3c {
- compatible = "maxim,max77620";
- reg = <0x3c>;
- #clock-cells = <1>;
- /* ... */
- };
-
- Clock consumer node
-
- foo@0 {
- compatible = "bar,foo";
- /* ... */
- clock-names = "my-clock";
- clocks = <&max77620 MAX77620_CLK_32K_OUT0>;
- };
diff --git a/Documentation/devicetree/bindings/clock/qcom,videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,videocc.yaml
index 340c7e5cf980..5f7738d6835c 100644
--- a/Documentation/devicetree/bindings/clock/qcom,videocc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,videocc.yaml
@@ -14,6 +14,7 @@ description: |
domains on Qualcomm SoCs.
See also::
+ include/dt-bindings/clock/qcom,sm6350-videocc.h
include/dt-bindings/clock/qcom,videocc-sc7180.h
include/dt-bindings/clock/qcom,videocc-sc7280.h
include/dt-bindings/clock/qcom,videocc-sdm845.h
@@ -26,6 +27,7 @@ properties:
- qcom,sc7180-videocc
- qcom,sc7280-videocc
- qcom,sdm845-videocc
+ - qcom,sm6350-videocc
- qcom,sm8150-videocc
- qcom,sm8250-videocc
@@ -91,6 +93,24 @@ allOf:
properties:
compatible:
enum:
+ - qcom,sm6350-videocc
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Video AHB clock from GCC
+ - description: Board XO source
+ - description: Sleep Clock source
+ clock-names:
+ items:
+ - const: iface
+ - const: bi_tcxo
+ - const: sleep_clk
+
+ - if:
+ properties:
+ compatible:
+ enum:
- qcom,sm8150-videocc
then:
properties:
diff --git a/Documentation/devicetree/bindings/clock/renesas,rzv2h-cpg.yaml b/Documentation/devicetree/bindings/clock/renesas,rzv2h-cpg.yaml
index c3fe76abd549..f261445bf341 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rzv2h-cpg.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,rzv2h-cpg.yaml
@@ -4,13 +4,13 @@
$id: http://devicetree.org/schemas/clock/renesas,rzv2h-cpg.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Renesas RZ/{G3E,V2H(P)} Clock Pulse Generator (CPG)
+title: Renesas RZ/{G3E,V2H(P),V2N} Clock Pulse Generator (CPG)
maintainers:
- Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
description:
- On Renesas RZ/{G3E,V2H(P)} SoCs, the CPG (Clock Pulse Generator) handles
+ On Renesas RZ/{G3E,V2H(P),V2N} SoCs, the CPG (Clock Pulse Generator) handles
generation and control of clock signals for the IP modules, generation and
control of resets, and control over booting, low power consumption and power
supply domains.
@@ -19,6 +19,7 @@ properties:
compatible:
enum:
- renesas,r9a09g047-cpg # RZ/G3E
+ - renesas,r9a09g056-cpg # RZ/V2N
- renesas,r9a09g057-cpg # RZ/V2H
reg:
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml
index 3330b2727474..6961a68098f4 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml
@@ -8,6 +8,7 @@ title: Samsung ExynosAuto v920 SoC clock controller
maintainers:
- Sunyeal Hong <sunyeal.hong@samsung.com>
+ - Shin Son <shin.son@samsung.com>
- Chanwoo Choi <cw00.choi@samsung.com>
- Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
@@ -32,6 +33,9 @@ properties:
compatible:
enum:
- samsung,exynosautov920-cmu-top
+ - samsung,exynosautov920-cmu-cpucl0
+ - samsung,exynosautov920-cmu-cpucl1
+ - samsung,exynosautov920-cmu-cpucl2
- samsung,exynosautov920-cmu-peric0
- samsung,exynosautov920-cmu-peric1
- samsung,exynosautov920-cmu-misc
@@ -74,6 +78,71 @@ allOf:
compatible:
contains:
enum:
+ - samsung,exynosautov920-cmu-cpucl0
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (38.4 MHz)
+ - description: CMU_CPUCL0 SWITCH clock (from CMU_TOP)
+ - description: CMU_CPUCL0 CLUSTER clock (from CMU_TOP)
+ - description: CMU_CPUCL0 DBG clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: switch
+ - const: cluster
+ - const: dbg
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynosautov920-cmu-cpucl1
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (38.4 MHz)
+ - description: CMU_CPUCL1 SWITCH clock (from CMU_TOP)
+ - description: CMU_CPUCL1 CLUSTER clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: switch
+ - const: cluster
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynosautov920-cmu-cpucl2
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (38.4 MHz)
+ - description: CMU_CPUCL2 SWITCH clock (from CMU_TOP)
+ - description: CMU_CPUCL2 CLUSTER clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: switch
+ - const: cluster
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- samsung,exynosautov920-cmu-peric0
- samsung,exynosautov920-cmu-peric1
diff --git a/Documentation/devicetree/bindings/clock/sophgo,cv1800-clk.yaml b/Documentation/devicetree/bindings/clock/sophgo,cv1800-clk.yaml
index 59ef41adb539..379ce3e9e391 100644
--- a/Documentation/devicetree/bindings/clock/sophgo,cv1800-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/sophgo,cv1800-clk.yaml
@@ -11,10 +11,18 @@ maintainers:
properties:
compatible:
- enum:
- - sophgo,cv1800-clk
- - sophgo,cv1810-clk
- - sophgo,sg2000-clk
+ oneOf:
+ - enum:
+ - sophgo,cv1800b-clk
+ - sophgo,cv1812h-clk
+ - sophgo,sg2000-clk
+ - items:
+ - const: sophgo,sg2002-clk
+ - const: sophgo,sg2000-clk
+ - const: sophgo,cv1800-clk
+ deprecated: true
+ - const: sophgo,cv1810-clk
+ deprecated: true
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/clock/sophgo,sg2044-clk.yaml b/Documentation/devicetree/bindings/clock/sophgo,sg2044-clk.yaml
new file mode 100644
index 000000000000..272e58bdb62c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/sophgo,sg2044-clk.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/sophgo,sg2044-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo SG2044 Clock Controller
+
+maintainers:
+ - Inochi Amaoto <inochiama@gmail.com>
+
+description: |
+ The Sophgo SG2044 clock controller requires an external oscillator
+ as input clock.
+
+ All available clocks are defined as preprocessor macros in
+ include/dt-bindings/clock/sophgo,sg2044-clk.h
+
+properties:
+ compatible:
+ const: sophgo,sg2044-clk
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: fpll0
+ - description: fpll1
+ - description: fpll2
+ - description: dpll0
+ - description: dpll1
+ - description: dpll2
+ - description: dpll3
+ - description: dpll4
+ - description: dpll5
+ - description: dpll6
+ - description: dpll7
+ - description: mpll0
+ - description: mpll1
+ - description: mpll2
+ - description: mpll3
+ - description: mpll4
+ - description: mpll5
+
+ clock-names:
+ items:
+ - const: fpll0
+ - const: fpll1
+ - const: fpll2
+ - const: dpll0
+ - const: dpll1
+ - const: dpll2
+ - const: dpll3
+ - const: dpll4
+ - const: dpll5
+ - const: dpll6
+ - const: dpll7
+ - const: mpll0
+ - const: mpll1
+ - const: mpll2
+ - const: mpll3
+ - const: mpll4
+ - const: mpll5
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sophgo,sg2044-pll.h>
+
+ clock-controller@50002000 {
+ compatible = "sophgo,sg2044-clk";
+ reg = <0x50002000 0x1000>;
+ #clock-cells = <1>;
+ clocks = <&syscon CLK_FPLL0>, <&syscon CLK_FPLL1>,
+ <&syscon CLK_FPLL2>, <&syscon CLK_DPLL0>,
+ <&syscon CLK_DPLL1>, <&syscon CLK_DPLL2>,
+ <&syscon CLK_DPLL3>, <&syscon CLK_DPLL4>,
+ <&syscon CLK_DPLL5>, <&syscon CLK_DPLL6>,
+ <&syscon CLK_DPLL7>, <&syscon CLK_MPLL0>,
+ <&syscon CLK_MPLL1>, <&syscon CLK_MPLL2>,
+ <&syscon CLK_MPLL3>, <&syscon CLK_MPLL4>,
+ <&syscon CLK_MPLL5>;
+ clock-names = "fpll0", "fpll1", "fpll2", "dpll0",
+ "dpll1", "dpll2", "dpll3", "dpll4",
+ "dpll5", "dpll6", "dpll7", "mpll0",
+ "mpll1", "mpll2", "mpll3", "mpll4",
+ "mpll5";
+ };
diff --git a/Documentation/devicetree/bindings/clock/spacemit,k1-pll.yaml b/Documentation/devicetree/bindings/clock/spacemit,k1-pll.yaml
new file mode 100644
index 000000000000..06bafd68c00a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/spacemit,k1-pll.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/spacemit,k1-pll.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SpacemiT K1 PLL
+
+maintainers:
+ - Haylen Chu <heylenay@4d2.org>
+
+properties:
+ compatible:
+ const: spacemit,k1-pll
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description: External 24MHz oscillator
+
+ spacemit,mpmu:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the "Main PMU (MPMU)" syscon. It is used to check PLL
+ lock status.
+
+ "#clock-cells":
+ const: 1
+ description:
+ See <dt-bindings/clock/spacemit,k1-syscon.h> for valid indices.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - spacemit,mpmu
+ - "#clock-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@d4090000 {
+ compatible = "spacemit,k1-pll";
+ reg = <0xd4090000 0x1000>;
+ clocks = <&vctcxo_24m>;
+ spacemit,mpmu = <&sysctl_mpmu>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt b/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt
deleted file mode 100644
index cac24ee10b72..000000000000
--- a/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-STMicroelectronics STM32H7 Reset and Clock Controller
-=====================================================
-
-The RCC IP is both a reset and a clock controller.
-
-Please refer to clock-bindings.txt for common clock controller binding usage.
-Please also refer to reset.txt for common reset controller binding usage.
-
-Required properties:
-- compatible: Should be:
- "st,stm32h743-rcc"
-
-- reg: should be register base and length as documented in the
- datasheet
-
-- #reset-cells: 1, see below
-
-- #clock-cells : from common clock binding; shall be set to 1
-
-- clocks: External oscillator clock phandle
- - high speed external clock signal (HSE)
- - low speed external clock signal (LSE)
- - external I2S clock (I2S_CKIN)
-
-Optional properties:
-- st,syscfg: phandle for pwrcfg, mandatory to disable/enable backup domain
- write protection (RTC clock).
-
-Example:
-
- rcc: reset-clock-controller@58024400 {
- compatible = "st,stm32h743-rcc", "st,stm32-rcc";
- reg = <0x58024400 0x400>;
- #reset-cells = <1>;
- #clock-cells = <1>;
- clocks = <&clk_hse>, <&clk_lse>, <&clk_i2s_ckin>;
-
- st,syscfg = <&pwrcfg>;
-};
-
-The peripheral clock consumer should specify the desired clock by
-having the clock ID in its "clocks" phandle cell.
-
-Example:
-
- timer5: timer@40000c00 {
- compatible = "st,stm32-timer";
- reg = <0x40000c00 0x400>;
- interrupts = <50>;
- clocks = <&rcc TIM5_CK>;
- };
-
-Specifying softreset control of devices
-=======================================
-
-Device nodes should specify the reset channel required in their "resets"
-property, containing a phandle to the reset device node and an index specifying
-which channel to use.
-The index is the bit number within the RCC registers bank, starting from RCC
-base address.
-It is calculated as: index = register_offset / 4 * 32 + bit_offset.
-Where bit_offset is the bit offset within the register.
-
-For example, for CRC reset:
- crc = AHB4RSTR_offset / 4 * 32 + CRCRST_bit_offset = 0x88 / 4 * 32 + 19 = 1107
-
-Example:
-
- timer2 {
- resets = <&rcc STM32H7_APB1L_RESET(TIM2)>;
- };
diff --git a/Documentation/devicetree/bindings/clock/thead,th1520-clk-ap.yaml b/Documentation/devicetree/bindings/clock/thead,th1520-clk-ap.yaml
index 0129bd0ba4b3..9d058c00ab3d 100644
--- a/Documentation/devicetree/bindings/clock/thead,th1520-clk-ap.yaml
+++ b/Documentation/devicetree/bindings/clock/thead,th1520-clk-ap.yaml
@@ -8,7 +8,8 @@ title: T-HEAD TH1520 AP sub-system clock controller
description: |
The T-HEAD TH1520 AP sub-system clock controller configures the
- CPU, DPU, GMAC and TEE PLLs.
+ CPU, DPU, GMAC and TEE PLLs. Additionally the VO subsystem configures
+ the clock gates for the HDMI, MIPI and the GPU.
SoC reference manual
https://openbeagle.org/beaglev-ahead/beaglev-ahead/-/blob/main/docs/TH1520%20System%20User%20Manual.pdf
@@ -20,14 +21,24 @@ maintainers:
properties:
compatible:
- const: thead,th1520-clk-ap
+ enum:
+ - thead,th1520-clk-ap
+ - thead,th1520-clk-vo
reg:
maxItems: 1
clocks:
items:
- - description: main oscillator (24MHz)
+ - description: |
+ One input clock:
+ - For "thead,th1520-clk-ap": the clock input must be the 24 MHz
+ main oscillator.
+ - For "thead,th1520-clk-vo": the clock input must be the VIDEO_PLL,
+ which is configured by the AP clock controller. According to the
+ TH1520 manual, VIDEO_PLL is a Silicon Creations Sigma-Delta PLL
+ (integer PLL) typically running at 792 MHz (FOUTPOSTDIV), with
+ a maximum FOUTVCO of 2376 MHz.
"#clock-cells":
const: 1
diff --git a/Documentation/devicetree/bindings/clock/vf610-clock.txt b/Documentation/devicetree/bindings/clock/vf610-clock.txt
deleted file mode 100644
index 109ffa3a5b66..000000000000
--- a/Documentation/devicetree/bindings/clock/vf610-clock.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-* Clock bindings for Freescale Vybrid VF610 SOC
-
-Required properties:
-- compatible: Should be "fsl,vf610-ccm"
-- reg: Address and length of the register set
-- #clock-cells: Should be <1>
-
-Optional properties:
-- clocks: list of clock identifiers which are external input clocks to the
- given clock controller. Please refer the next section to find
- the input clocks for a given controller.
-- clock-names: list of names of clocks which are external input clocks to the
- given clock controller.
-
-Input clocks for top clock controller:
- - sxosc (external crystal oscillator 32KHz, recommended)
- - fxosc (external crystal oscillator 24MHz, recommended)
- - audio_ext
- - enet_ext
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/vf610-clock.h
-for the full list of VF610 clock IDs.
-
-Examples:
-
-clks: ccm@4006b000 {
- compatible = "fsl,vf610-ccm";
- reg = <0x4006b000 0x1000>;
- #clock-cells = <1>;
- clocks = <&sxosc>, <&fxosc>;
- clock-names = "sxosc", "fxosc";
-};
-
-uart1: serial@40028000 {
- compatible = "fsl,vf610-uart";
- reg = <0x40028000 0x1000>;
- interrupts = <0 62 0x04>;
- clocks = <&clks VF610_CLK_UART1>;
- clock-names = "ipg";
-};
diff --git a/Documentation/devicetree/bindings/counter/fsl,ftm-quaddec.yaml b/Documentation/devicetree/bindings/counter/fsl,ftm-quaddec.yaml
new file mode 100644
index 000000000000..384ca63b64d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/counter/fsl,ftm-quaddec.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/counter/fsl,ftm-quaddec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: FlexTimer Quadrature decoder counter
+
+description:
+ Exposes a simple counter for the quadrature decoder mode.
+
+maintainers:
+ - Frank Li <Frank.li@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,ftm-quaddec
+
+ reg:
+ maxItems: 1
+
+ big-endian: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ counter@29d0000 {
+ compatible = "fsl,ftm-quaddec";
+ reg = <0x29d0000 0x10000>;
+ big-endian;
+ };
diff --git a/Documentation/devicetree/bindings/counter/ftm-quaddec.txt b/Documentation/devicetree/bindings/counter/ftm-quaddec.txt
deleted file mode 100644
index 4d18cd722074..000000000000
--- a/Documentation/devicetree/bindings/counter/ftm-quaddec.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-FlexTimer Quadrature decoder counter
-
-This driver exposes a simple counter for the quadrature decoder mode.
-
-Required properties:
-- compatible: Must be "fsl,ftm-quaddec".
-- reg: Must be set to the memory region of the flextimer.
-
-Optional property:
-- big-endian: Access the device registers in big-endian mode.
-
-Example:
- counter0: counter@29d0000 {
- compatible = "fsl,ftm-quaddec";
- reg = <0x0 0x29d0000 0x0 0x10000>;
- big-endian;
- status = "disabled";
- };
diff --git a/Documentation/devicetree/bindings/cpu/cpu-topology.txt b/Documentation/devicetree/bindings/cpu/cpu-topology.txt
deleted file mode 100644
index 9bd530a35d14..000000000000
--- a/Documentation/devicetree/bindings/cpu/cpu-topology.txt
+++ /dev/null
@@ -1,553 +0,0 @@
-===========================================
-CPU topology binding description
-===========================================
-
-===========================================
-1 - Introduction
-===========================================
-
-In a SMP system, the hierarchy of CPUs is defined through three entities that
-are used to describe the layout of physical CPUs in the system:
-
-- socket
-- cluster
-- core
-- thread
-
-The bottom hierarchy level sits at core or thread level depending on whether
-symmetric multi-threading (SMT) is supported or not.
-
-For instance in a system where CPUs support SMT, "cpu" nodes represent all
-threads existing in the system and map to the hierarchy level "thread" above.
-In systems where SMT is not supported "cpu" nodes represent all cores present
-in the system and map to the hierarchy level "core" above.
-
-CPU topology bindings allow one to associate cpu nodes with hierarchical groups
-corresponding to the system hierarchy; syntactically they are defined as device
-tree nodes.
-
-Currently, only ARM/RISC-V intend to use this cpu topology binding but it may be
-used for any other architecture as well.
-
-The cpu nodes, as per bindings defined in [4], represent the devices that
-correspond to physical CPUs and are to be mapped to the hierarchy levels.
-
-A topology description containing phandles to cpu nodes that are not compliant
-with bindings standardized in [4] is therefore considered invalid.
-
-===========================================
-2 - cpu-map node
-===========================================
-
-The ARM/RISC-V CPU topology is defined within the cpu-map node, which is a direct
-child of the cpus node and provides a container where the actual topology
-nodes are listed.
-
-- cpu-map node
-
- Usage: Optional - On SMP systems provide CPUs topology to the OS.
- Uniprocessor systems do not require a topology
- description and therefore should not define a
- cpu-map node.
-
- Description: The cpu-map node is just a container node where its
- subnodes describe the CPU topology.
-
- Node name must be "cpu-map".
-
- The cpu-map node's parent node must be the cpus node.
-
- The cpu-map node's child nodes can be:
-
- - one or more cluster nodes or
- - one or more socket nodes in a multi-socket system
-
- Any other configuration is considered invalid.
-
-The cpu-map node can only contain 4 types of child nodes:
-
-- socket node
-- cluster node
-- core node
-- thread node
-
-whose bindings are described in paragraph 3.
-
-The nodes describing the CPU topology (socket/cluster/core/thread) can
-only be defined within the cpu-map node and every core/thread in the
-system must be defined within the topology. Any other configuration is
-invalid and therefore must be ignored.
-
-===========================================
-2.1 - cpu-map child nodes naming convention
-===========================================
-
-cpu-map child nodes must follow a naming convention where the node name
-must be "socketN", "clusterN", "coreN", "threadN" depending on the node type
-(ie socket/cluster/core/thread) (where N = {0, 1, ...} is the node number; nodes
-which are siblings within a single common parent node must be given a unique and
-sequential N value, starting from 0).
-cpu-map child nodes which do not share a common parent node can have the same
-name (ie same number N as other cpu-map child nodes at different device tree
-levels) since name uniqueness will be guaranteed by the device tree hierarchy.
-
-===========================================
-3 - socket/cluster/core/thread node bindings
-===========================================
-
-Bindings for socket/cluster/cpu/thread nodes are defined as follows:
-
-- socket node
-
- Description: must be declared within a cpu-map node, one node
- per physical socket in the system. A system can
- contain single or multiple physical socket.
- The association of sockets and NUMA nodes is beyond
- the scope of this bindings, please refer [2] for
- NUMA bindings.
-
- This node is optional for a single socket system.
-
- The socket node name must be "socketN" as described in 2.1 above.
- A socket node can not be a leaf node.
-
- A socket node's child nodes must be one or more cluster nodes.
-
- Any other configuration is considered invalid.
-
-- cluster node
-
- Description: must be declared within a cpu-map node, one node
- per cluster. A system can contain several layers of
- clustering within a single physical socket and cluster
- nodes can be contained in parent cluster nodes.
-
- The cluster node name must be "clusterN" as described in 2.1 above.
- A cluster node can not be a leaf node.
-
- A cluster node's child nodes must be:
-
- - one or more cluster nodes; or
- - one or more core nodes
-
- Any other configuration is considered invalid.
-
-- core node
-
- Description: must be declared in a cluster node, one node per core in
- the cluster. If the system does not support SMT, core
- nodes are leaf nodes, otherwise they become containers of
- thread nodes.
-
- The core node name must be "coreN" as described in 2.1 above.
-
- A core node must be a leaf node if SMT is not supported.
-
- Properties for core nodes that are leaf nodes:
-
- - cpu
- Usage: required
- Value type: <phandle>
- Definition: a phandle to the cpu node that corresponds to the
- core node.
-
- If a core node is not a leaf node (CPUs supporting SMT) a core node's
- child nodes can be:
-
- - one or more thread nodes
-
- Any other configuration is considered invalid.
-
-- thread node
-
- Description: must be declared in a core node, one node per thread
- in the core if the system supports SMT. Thread nodes are
- always leaf nodes in the device tree.
-
- The thread node name must be "threadN" as described in 2.1 above.
-
- A thread node must be a leaf node.
-
- A thread node must contain the following property:
-
- - cpu
- Usage: required
- Value type: <phandle>
- Definition: a phandle to the cpu node that corresponds to
- the thread node.
-
-===========================================
-4 - Example dts
-===========================================
-
-Example 1 (ARM 64-bit, 16-cpu system, two clusters of clusters in a single
-physical socket):
-
-cpus {
- #size-cells = <0>;
- #address-cells = <2>;
-
- cpu-map {
- socket0 {
- cluster0 {
- cluster0 {
- core0 {
- thread0 {
- cpu = <&CPU0>;
- };
- thread1 {
- cpu = <&CPU1>;
- };
- };
-
- core1 {
- thread0 {
- cpu = <&CPU2>;
- };
- thread1 {
- cpu = <&CPU3>;
- };
- };
- };
-
- cluster1 {
- core0 {
- thread0 {
- cpu = <&CPU4>;
- };
- thread1 {
- cpu = <&CPU5>;
- };
- };
-
- core1 {
- thread0 {
- cpu = <&CPU6>;
- };
- thread1 {
- cpu = <&CPU7>;
- };
- };
- };
- };
-
- cluster1 {
- cluster0 {
- core0 {
- thread0 {
- cpu = <&CPU8>;
- };
- thread1 {
- cpu = <&CPU9>;
- };
- };
- core1 {
- thread0 {
- cpu = <&CPU10>;
- };
- thread1 {
- cpu = <&CPU11>;
- };
- };
- };
-
- cluster1 {
- core0 {
- thread0 {
- cpu = <&CPU12>;
- };
- thread1 {
- cpu = <&CPU13>;
- };
- };
- core1 {
- thread0 {
- cpu = <&CPU14>;
- };
- thread1 {
- cpu = <&CPU15>;
- };
- };
- };
- };
- };
- };
-
- CPU0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x0 0x0>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x0 0x1>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU2: cpu@100 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x0 0x100>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU3: cpu@101 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x0 0x101>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU4: cpu@10000 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x0 0x10000>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU5: cpu@10001 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x0 0x10001>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU6: cpu@10100 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x0 0x10100>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU7: cpu@10101 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x0 0x10101>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU8: cpu@100000000 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x1 0x0>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU9: cpu@100000001 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x1 0x1>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU10: cpu@100000100 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x1 0x100>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU11: cpu@100000101 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x1 0x101>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU12: cpu@100010000 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x1 0x10000>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU13: cpu@100010001 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x1 0x10001>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU14: cpu@100010100 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x1 0x10100>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-
- CPU15: cpu@100010101 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x1 0x10101>;
- enable-method = "spin-table";
- cpu-release-addr = <0 0x20000000>;
- };
-};
-
-Example 2 (ARM 32-bit, dual-cluster, 8-cpu system, no SMT):
-
-cpus {
- #size-cells = <0>;
- #address-cells = <1>;
-
- cpu-map {
- cluster0 {
- core0 {
- cpu = <&CPU0>;
- };
- core1 {
- cpu = <&CPU1>;
- };
- core2 {
- cpu = <&CPU2>;
- };
- core3 {
- cpu = <&CPU3>;
- };
- };
-
- cluster1 {
- core0 {
- cpu = <&CPU4>;
- };
- core1 {
- cpu = <&CPU5>;
- };
- core2 {
- cpu = <&CPU6>;
- };
- core3 {
- cpu = <&CPU7>;
- };
- };
- };
-
- CPU0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a15";
- reg = <0x0>;
- };
-
- CPU1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a15";
- reg = <0x1>;
- };
-
- CPU2: cpu@2 {
- device_type = "cpu";
- compatible = "arm,cortex-a15";
- reg = <0x2>;
- };
-
- CPU3: cpu@3 {
- device_type = "cpu";
- compatible = "arm,cortex-a15";
- reg = <0x3>;
- };
-
- CPU4: cpu@100 {
- device_type = "cpu";
- compatible = "arm,cortex-a7";
- reg = <0x100>;
- };
-
- CPU5: cpu@101 {
- device_type = "cpu";
- compatible = "arm,cortex-a7";
- reg = <0x101>;
- };
-
- CPU6: cpu@102 {
- device_type = "cpu";
- compatible = "arm,cortex-a7";
- reg = <0x102>;
- };
-
- CPU7: cpu@103 {
- device_type = "cpu";
- compatible = "arm,cortex-a7";
- reg = <0x103>;
- };
-};
-
-Example 3: HiFive Unleashed (RISC-V 64 bit, 4 core system)
-
-{
- #address-cells = <2>;
- #size-cells = <2>;
- compatible = "sifive,fu540g", "sifive,fu500";
- model = "sifive,hifive-unleashed-a00";
-
- ...
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
- cpu-map {
- socket0 {
- cluster0 {
- core0 {
- cpu = <&CPU1>;
- };
- core1 {
- cpu = <&CPU2>;
- };
- core2 {
- cpu0 = <&CPU2>;
- };
- core3 {
- cpu0 = <&CPU3>;
- };
- };
- };
- };
-
- CPU1: cpu@1 {
- device_type = "cpu";
- compatible = "sifive,rocket0", "riscv";
- reg = <0x1>;
- }
-
- CPU2: cpu@2 {
- device_type = "cpu";
- compatible = "sifive,rocket0", "riscv";
- reg = <0x2>;
- }
- CPU3: cpu@3 {
- device_type = "cpu";
- compatible = "sifive,rocket0", "riscv";
- reg = <0x3>;
- }
- CPU4: cpu@4 {
- device_type = "cpu";
- compatible = "sifive,rocket0", "riscv";
- reg = <0x4>;
- }
- }
-};
-===============================================================================
-[1] ARM Linux kernel documentation
- Documentation/devicetree/bindings/arm/cpus.yaml
-[2] Devicetree NUMA binding description
- Documentation/devicetree/bindings/numa.txt
-[3] RISC-V Linux kernel documentation
- Documentation/devicetree/bindings/riscv/cpus.yaml
-[4] https://www.devicetree.org/specifications/
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
deleted file mode 100644
index e0a4ba599abc..000000000000
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
+++ /dev/null
@@ -1,250 +0,0 @@
-Binding for MediaTek's CPUFreq driver
-=====================================
-
-Required properties:
-- clocks: A list of phandle + clock-specifier pairs for the clocks listed in clock names.
-- clock-names: Should contain the following:
- "cpu" - The multiplexer for clock input of CPU cluster.
- "intermediate" - A parent of "cpu" clock which is used as "intermediate" clock
- source (usually MAINPLL) when the original CPU PLL is under
- transition and not stable yet.
- Please refer to Documentation/devicetree/bindings/clock/clock-bindings.txt for
- generic clock consumer properties.
-- operating-points-v2: Please refer to Documentation/devicetree/bindings/opp/opp-v2.yaml
- for detail.
-- proc-supply: Regulator for Vproc of CPU cluster.
-
-Optional properties:
-- sram-supply: Regulator for Vsram of CPU cluster. When present, the cpufreq driver
- needs to do "voltage tracking" to step by step scale up/down Vproc and
- Vsram to fit SoC specific needs. When absent, the voltage scaling
- flow is handled by hardware, hence no software "voltage tracking" is
- needed.
-- mediatek,cci:
- Used to confirm the link status between cpufreq and mediatek cci. Because
- cpufreq and mediatek cci could share the same regulator in some MediaTek SoCs.
- To prevent the issue of high frequency and low voltage, we need to use this
- property to make sure mediatek cci is ready.
- For details of mediatek cci, please refer to
- Documentation/devicetree/bindings/interconnect/mediatek,cci.yaml
-- #cooling-cells:
- For details, please refer to
- Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml
-
-Example 1 (MT7623 SoC):
-
- cpu_opp_table: opp_table {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp-598000000 {
- opp-hz = /bits/ 64 <598000000>;
- opp-microvolt = <1050000>;
- };
-
- opp-747500000 {
- opp-hz = /bits/ 64 <747500000>;
- opp-microvolt = <1050000>;
- };
-
- opp-1040000000 {
- opp-hz = /bits/ 64 <1040000000>;
- opp-microvolt = <1150000>;
- };
-
- opp-1196000000 {
- opp-hz = /bits/ 64 <1196000000>;
- opp-microvolt = <1200000>;
- };
-
- opp-1300000000 {
- opp-hz = /bits/ 64 <1300000000>;
- opp-microvolt = <1300000>;
- };
- };
-
- cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a7";
- reg = <0x0>;
- clocks = <&infracfg CLK_INFRA_CPUSEL>,
- <&apmixedsys CLK_APMIXED_MAINPLL>;
- clock-names = "cpu", "intermediate";
- operating-points-v2 = <&cpu_opp_table>;
- #cooling-cells = <2>;
- };
- cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a7";
- reg = <0x1>;
- operating-points-v2 = <&cpu_opp_table>;
- };
- cpu@2 {
- device_type = "cpu";
- compatible = "arm,cortex-a7";
- reg = <0x2>;
- operating-points-v2 = <&cpu_opp_table>;
- };
- cpu@3 {
- device_type = "cpu";
- compatible = "arm,cortex-a7";
- reg = <0x3>;
- operating-points-v2 = <&cpu_opp_table>;
- };
-
-Example 2 (MT8173 SoC):
- cpu_opp_table_a: opp_table_a {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp-507000000 {
- opp-hz = /bits/ 64 <507000000>;
- opp-microvolt = <859000>;
- };
-
- opp-702000000 {
- opp-hz = /bits/ 64 <702000000>;
- opp-microvolt = <908000>;
- };
-
- opp-1001000000 {
- opp-hz = /bits/ 64 <1001000000>;
- opp-microvolt = <983000>;
- };
-
- opp-1105000000 {
- opp-hz = /bits/ 64 <1105000000>;
- opp-microvolt = <1009000>;
- };
-
- opp-1183000000 {
- opp-hz = /bits/ 64 <1183000000>;
- opp-microvolt = <1028000>;
- };
-
- opp-1404000000 {
- opp-hz = /bits/ 64 <1404000000>;
- opp-microvolt = <1083000>;
- };
-
- opp-1508000000 {
- opp-hz = /bits/ 64 <1508000000>;
- opp-microvolt = <1109000>;
- };
-
- opp-1573000000 {
- opp-hz = /bits/ 64 <1573000000>;
- opp-microvolt = <1125000>;
- };
- };
-
- cpu_opp_table_b: opp_table_b {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp-507000000 {
- opp-hz = /bits/ 64 <507000000>;
- opp-microvolt = <828000>;
- };
-
- opp-702000000 {
- opp-hz = /bits/ 64 <702000000>;
- opp-microvolt = <867000>;
- };
-
- opp-1001000000 {
- opp-hz = /bits/ 64 <1001000000>;
- opp-microvolt = <927000>;
- };
-
- opp-1209000000 {
- opp-hz = /bits/ 64 <1209000000>;
- opp-microvolt = <968000>;
- };
-
- opp-1404000000 {
- opp-hz = /bits/ 64 <1007000000>;
- opp-microvolt = <1028000>;
- };
-
- opp-1612000000 {
- opp-hz = /bits/ 64 <1612000000>;
- opp-microvolt = <1049000>;
- };
-
- opp-1807000000 {
- opp-hz = /bits/ 64 <1807000000>;
- opp-microvolt = <1089000>;
- };
-
- opp-1989000000 {
- opp-hz = /bits/ 64 <1989000000>;
- opp-microvolt = <1125000>;
- };
- };
-
- cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x000>;
- enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
- clocks = <&infracfg CLK_INFRA_CA53SEL>,
- <&apmixedsys CLK_APMIXED_MAINPLL>;
- clock-names = "cpu", "intermediate";
- operating-points-v2 = <&cpu_opp_table_a>;
- };
-
- cpu1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x001>;
- enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
- clocks = <&infracfg CLK_INFRA_CA53SEL>,
- <&apmixedsys CLK_APMIXED_MAINPLL>;
- clock-names = "cpu", "intermediate";
- operating-points-v2 = <&cpu_opp_table_a>;
- };
-
- cpu2: cpu@100 {
- device_type = "cpu";
- compatible = "arm,cortex-a72";
- reg = <0x100>;
- enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
- clocks = <&infracfg CLK_INFRA_CA72SEL>,
- <&apmixedsys CLK_APMIXED_MAINPLL>;
- clock-names = "cpu", "intermediate";
- operating-points-v2 = <&cpu_opp_table_b>;
- };
-
- cpu3: cpu@101 {
- device_type = "cpu";
- compatible = "arm,cortex-a72";
- reg = <0x101>;
- enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
- clocks = <&infracfg CLK_INFRA_CA72SEL>,
- <&apmixedsys CLK_APMIXED_MAINPLL>;
- clock-names = "cpu", "intermediate";
- operating-points-v2 = <&cpu_opp_table_b>;
- };
-
- &cpu0 {
- proc-supply = <&mt6397_vpca15_reg>;
- };
-
- &cpu1 {
- proc-supply = <&mt6397_vpca15_reg>;
- };
-
- &cpu2 {
- proc-supply = <&da9211_vcpu_reg>;
- sram-supply = <&mt6397_vsramca7_reg>;
- };
-
- &cpu3 {
- proc-supply = <&da9211_vcpu_reg>;
- sram-supply = <&mt6397_vsramca7_reg>;
- };
diff --git a/Documentation/devicetree/bindings/crypto/amd,ccp-seattle-v1a.yaml b/Documentation/devicetree/bindings/crypto/amd,ccp-seattle-v1a.yaml
new file mode 100644
index 000000000000..32bf3a1c3b42
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/amd,ccp-seattle-v1a.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/amd,ccp-seattle-v1a.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AMD Cryptographic Coprocessor (ccp)
+
+maintainers:
+ - Tom Lendacky <thomas.lendacky@amd.com>
+
+properties:
+ compatible:
+ const: amd,ccp-seattle-v1a
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ dma-coherent: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ crypto@e0100000 {
+ compatible = "amd,ccp-seattle-v1a";
+ reg = <0xe0100000 0x10000>;
+ interrupts = <0 3 4>;
+ dma-coherent;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/amd-ccp.txt b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
deleted file mode 100644
index d87579d63da6..000000000000
--- a/Documentation/devicetree/bindings/crypto/amd-ccp.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-* AMD Cryptographic Coprocessor driver (ccp)
-
-Required properties:
-- compatible: Should be "amd,ccp-seattle-v1a"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the CCP interrupt
-
-Optional properties:
-- dma-coherent: Present if dma operations are coherent
-
-Example:
- ccp@e0100000 {
- compatible = "amd,ccp-seattle-v1a";
- reg = <0 0xe0100000 0 0x10000>;
- interrupt-parent = <&gic>;
- interrupts = <0 3 4>;
- };
diff --git a/Documentation/devicetree/bindings/crypto/artpec6-crypto.txt b/Documentation/devicetree/bindings/crypto/artpec6-crypto.txt
deleted file mode 100644
index d9cca4875bd6..000000000000
--- a/Documentation/devicetree/bindings/crypto/artpec6-crypto.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-Axis crypto engine with PDMA interface.
-
-Required properties:
-- compatible : Should be one of the following strings:
- "axis,artpec6-crypto" for the version in the Axis ARTPEC-6 SoC
- "axis,artpec7-crypto" for the version in the Axis ARTPEC-7 SoC.
-- reg: Base address and size for the PDMA register area.
-- interrupts: Interrupt handle for the PDMA interrupt line.
-
-Example:
-
-crypto@f4264000 {
- compatible = "axis,artpec6-crypto";
- reg = <0xf4264000 0x1000>;
- interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
-};
diff --git a/Documentation/devicetree/bindings/crypto/axis,artpec6-crypto.yaml b/Documentation/devicetree/bindings/crypto/axis,artpec6-crypto.yaml
new file mode 100644
index 000000000000..c91f81e3c39e
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/axis,artpec6-crypto.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/axis,artpec6-crypto.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Axis ARTPEC6 crypto engine with PDMA interface
+
+maintainers:
+ - Lars Persson <lars.persson@axis.com>
+
+properties:
+ compatible:
+ enum:
+ - axis,artpec6-crypto
+ - axis,artpec7-crypto
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ crypto@f4264000 {
+ compatible = "axis,artpec6-crypto";
+ reg = <0xf4264000 0x1000>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/brcm,spu-crypto.txt b/Documentation/devicetree/bindings/crypto/brcm,spu-crypto.txt
deleted file mode 100644
index 29b6007568eb..000000000000
--- a/Documentation/devicetree/bindings/crypto/brcm,spu-crypto.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-The Broadcom Secure Processing Unit (SPU) hardware supports symmetric
-cryptographic offload for Broadcom SoCs. A SoC may have multiple SPU hardware
-blocks.
-
-Required properties:
-- compatible: Should be one of the following:
- brcm,spum-crypto - for devices with SPU-M hardware
- brcm,spu2-crypto - for devices with SPU2 hardware
- brcm,spu2-v2-crypto - for devices with enhanced SPU2 hardware features like SHA3
- and Rabin Fingerprint support
- brcm,spum-nsp-crypto - for the Northstar Plus variant of the SPU-M hardware
-
-- reg: Should contain SPU registers location and length.
-- mboxes: The mailbox channel to be used to communicate with the SPU.
- Mailbox channels correspond to DMA rings on the device.
-
-Example:
- crypto@612d0000 {
- compatible = "brcm,spum-crypto";
- reg = <0 0x612d0000 0 0x900>;
- mboxes = <&pdc0 0>;
- };
diff --git a/Documentation/devicetree/bindings/crypto/brcm,spum-crypto.yaml b/Documentation/devicetree/bindings/crypto/brcm,spum-crypto.yaml
new file mode 100644
index 000000000000..9a5fb61727fa
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/brcm,spum-crypto.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/brcm,spum-crypto.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom SPU Crypto Offload
+
+maintainers:
+ - Rob Rice <rob.rice@broadcom.com>
+
+description:
+ The Broadcom Secure Processing Unit (SPU) hardware supports symmetric
+ cryptographic offload for Broadcom SoCs. A SoC may have multiple SPU hardware
+ blocks.
+
+properties:
+ compatible:
+ enum:
+ - brcm,spum-crypto
+ - brcm,spu2-crypto
+ - brcm,spu2-v2-crypto # enhanced SPU2 hardware features like SHA3 and Rabin Fingerprint support
+ - brcm,spum-nsp-crypto # Northstar Plus variant of the SPU-M hardware
+
+ reg:
+ maxItems: 1
+
+ mboxes:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - mboxes
+
+additionalProperties: false
+
+examples:
+ - |
+ crypto@612d0000 {
+ compatible = "brcm,spum-crypto";
+ reg = <0x612d0000 0x900>;
+ mboxes = <&pdc0 0>;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0-mon.yaml b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0-mon.yaml
index e879bc0be8e2..9f8e6689cd94 100644
--- a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0-mon.yaml
+++ b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0-mon.yaml
@@ -83,6 +83,8 @@ properties:
by SNVS ONOFF, the driver can report the status of POWER key and wakeup
system if pressed after system suspend.
+ $ref: /schemas/input/input.yaml
+
properties:
compatible:
const: fsl,sec-v4.0-pwrkey
@@ -111,6 +113,9 @@ properties:
maxItems: 1
default: 116
+ power-off-time-sec:
+ enum: [0, 5, 10, 15]
+
required:
- compatible
- interrupts
diff --git a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml
index f0c4a7c83568..75afa441e019 100644
--- a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml
+++ b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml
@@ -38,7 +38,9 @@ properties:
compatible:
oneOf:
- items:
- - const: fsl,sec-v5.4
+ - enum:
+ - fsl,sec-v5.4
+ - fsl,sec-v6.0
- const: fsl,sec-v5.0
- const: fsl,sec-v4.0
- items:
@@ -94,6 +96,12 @@ patternProperties:
compatible:
oneOf:
- items:
+ - const: fsl,sec-v6.0-job-ring
+ - const: fsl,sec-v5.2-job-ring
+ - const: fsl,sec-v5.0-job-ring
+ - const: fsl,sec-v4.4-job-ring
+ - const: fsl,sec-v4.0-job-ring
+ - items:
- const: fsl,sec-v5.4-job-ring
- const: fsl,sec-v5.0-job-ring
- const: fsl,sec-v4.0-job-ring
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec6.txt b/Documentation/devicetree/bindings/crypto/fsl-sec6.txt
deleted file mode 100644
index 73b0eb950bb3..000000000000
--- a/Documentation/devicetree/bindings/crypto/fsl-sec6.txt
+++ /dev/null
@@ -1,157 +0,0 @@
-SEC 6 is as Freescale's Cryptographic Accelerator and Assurance Module (CAAM).
-Currently Freescale powerpc chip C29X is embedded with SEC 6.
-SEC 6 device tree binding include:
- -SEC 6 Node
- -Job Ring Node
- -Full Example
-
-=====================================================================
-SEC 6 Node
-
-Description
-
- Node defines the base address of the SEC 6 block.
- This block specifies the address range of all global
- configuration registers for the SEC 6 block.
- For example, In C293, we could see three SEC 6 node.
-
-PROPERTIES
-
- - compatible
- Usage: required
- Value type: <string>
- Definition: Must include "fsl,sec-v6.0".
-
- - fsl,sec-era
- Usage: optional
- Value type: <u32>
- Definition: A standard property. Define the 'ERA' of the SEC
- device.
-
- - #address-cells
- Usage: required
- Value type: <u32>
- Definition: A standard property. Defines the number of cells
- for representing physical addresses in child nodes.
-
- - #size-cells
- Usage: required
- Value type: <u32>
- Definition: A standard property. Defines the number of cells
- for representing the size of physical addresses in
- child nodes.
-
- - reg
- Usage: required
- Value type: <prop-encoded-array>
- Definition: A standard property. Specifies the physical
- address and length of the SEC 6 configuration registers.
-
- - ranges
- Usage: required
- Value type: <prop-encoded-array>
- Definition: A standard property. Specifies the physical address
- range of the SEC 6.0 register space (-SNVS not included). A
- triplet that includes the child address, parent address, &
- length.
-
- Note: All other standard properties (see the Devicetree Specification)
- are allowed but are optional.
-
-EXAMPLE
- crypto@a0000 {
- compatible = "fsl,sec-v6.0";
- fsl,sec-era = <6>;
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xa0000 0x20000>;
- ranges = <0 0xa0000 0x20000>;
- };
-
-=====================================================================
-Job Ring (JR) Node
-
- Child of the crypto node defines data processing interface to SEC 6
- across the peripheral bus for purposes of processing
- cryptographic descriptors. The specified address
- range can be made visible to one (or more) cores.
- The interrupt defined for this node is controlled within
- the address range of this node.
-
- - compatible
- Usage: required
- Value type: <string>
- Definition: Must include "fsl,sec-v6.0-job-ring".
-
- - reg
- Usage: required
- Value type: <prop-encoded-array>
- Definition: Specifies a two JR parameters: an offset from
- the parent physical address and the length the JR registers.
-
- - interrupts
- Usage: required
- Value type: <prop_encoded-array>
- Definition: Specifies the interrupts generated by this
- device. The value of the interrupts property
- consists of one interrupt specifier. The format
- of the specifier is defined by the binding document
- describing the node's interrupt parent.
-
-EXAMPLE
- jr@1000 {
- compatible = "fsl,sec-v6.0-job-ring";
- reg = <0x1000 0x1000>;
- interrupts = <49 2 0 0>;
- };
-
-===================================================================
-Full Example
-
-Since some chips may contain more than one SEC, the dtsi contains
-only the node contents, not the node itself. A chip using the SEC
-should include the dtsi inside each SEC node. Example:
-
-In qoriq-sec6.0.dtsi:
-
- compatible = "fsl,sec-v6.0";
- fsl,sec-era = <6>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- jr@1000 {
- compatible = "fsl,sec-v6.0-job-ring",
- "fsl,sec-v5.2-job-ring",
- "fsl,sec-v5.0-job-ring",
- "fsl,sec-v4.4-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x1000 0x1000>;
- };
-
- jr@2000 {
- compatible = "fsl,sec-v6.0-job-ring",
- "fsl,sec-v5.2-job-ring",
- "fsl,sec-v5.0-job-ring",
- "fsl,sec-v4.4-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x2000 0x1000>;
- };
-
-In the C293 device tree, we add the include of public property:
-
- crypto@a0000 {
- /include/ "qoriq-sec6.0.dtsi"
- }
-
- crypto@a0000 {
- reg = <0xa0000 0x20000>;
- ranges = <0 0xa0000 0x20000>;
-
- jr@1000 {
- interrupts = <49 2 0 0>;
- };
-
- jr@2000 {
- interrupts = <50 2 0 0>;
- };
- };
diff --git a/Documentation/devicetree/bindings/crypto/hisilicon,hip06-sec.yaml b/Documentation/devicetree/bindings/crypto/hisilicon,hip06-sec.yaml
new file mode 100644
index 000000000000..2bfac9d1c020
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/hisilicon,hip06-sec.yaml
@@ -0,0 +1,134 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/hisilicon,hip06-sec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Hisilicon hip06/hip07 Security Accelerator
+
+maintainers:
+ - Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+properties:
+ compatible:
+ enum:
+ - hisilicon,hip06-sec
+ - hisilicon,hip07-sec
+
+ reg:
+ items:
+ - description: Registers for backend processing engines
+ - description: Registers for common functionality
+ - description: Registers for queue 0
+ - description: Registers for queue 1
+ - description: Registers for queue 2
+ - description: Registers for queue 3
+ - description: Registers for queue 4
+ - description: Registers for queue 5
+ - description: Registers for queue 6
+ - description: Registers for queue 7
+ - description: Registers for queue 8
+ - description: Registers for queue 9
+ - description: Registers for queue 10
+ - description: Registers for queue 11
+ - description: Registers for queue 12
+ - description: Registers for queue 13
+ - description: Registers for queue 14
+ - description: Registers for queue 15
+
+ interrupts:
+ items:
+ - description: SEC unit error queue interrupt
+ - description: Completion interrupt for queue 0
+ - description: Error interrupt for queue 0
+ - description: Completion interrupt for queue 1
+ - description: Error interrupt for queue 1
+ - description: Completion interrupt for queue 2
+ - description: Error interrupt for queue 2
+ - description: Completion interrupt for queue 3
+ - description: Error interrupt for queue 3
+ - description: Completion interrupt for queue 4
+ - description: Error interrupt for queue 4
+ - description: Completion interrupt for queue 5
+ - description: Error interrupt for queue 5
+ - description: Completion interrupt for queue 6
+ - description: Error interrupt for queue 6
+ - description: Completion interrupt for queue 7
+ - description: Error interrupt for queue 7
+ - description: Completion interrupt for queue 8
+ - description: Error interrupt for queue 8
+ - description: Completion interrupt for queue 9
+ - description: Error interrupt for queue 9
+ - description: Completion interrupt for queue 10
+ - description: Error interrupt for queue 10
+ - description: Completion interrupt for queue 11
+ - description: Error interrupt for queue 11
+ - description: Completion interrupt for queue 12
+ - description: Error interrupt for queue 12
+ - description: Completion interrupt for queue 13
+ - description: Error interrupt for queue 13
+ - description: Completion interrupt for queue 14
+ - description: Error interrupt for queue 14
+ - description: Completion interrupt for queue 15
+ - description: Error interrupt for queue 15
+
+ dma-coherent: true
+
+ iommus:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - dma-coherent
+
+additionalProperties: false
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ crypto@400d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x400 0xd0000000 0x0 0x10000
+ 0x400 0xd2000000 0x0 0x10000
+ 0x400 0xd2010000 0x0 0x10000
+ 0x400 0xd2020000 0x0 0x10000
+ 0x400 0xd2030000 0x0 0x10000
+ 0x400 0xd2040000 0x0 0x10000
+ 0x400 0xd2050000 0x0 0x10000
+ 0x400 0xd2060000 0x0 0x10000
+ 0x400 0xd2070000 0x0 0x10000
+ 0x400 0xd2080000 0x0 0x10000
+ 0x400 0xd2090000 0x0 0x10000
+ 0x400 0xd20a0000 0x0 0x10000
+ 0x400 0xd20b0000 0x0 0x10000
+ 0x400 0xd20c0000 0x0 0x10000
+ 0x400 0xd20d0000 0x0 0x10000
+ 0x400 0xd20e0000 0x0 0x10000
+ 0x400 0xd20f0000 0x0 0x10000
+ 0x400 0xd2100000 0x0 0x10000>;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+ dma-coherent;
+ iommus = <&p1_smmu_alg_a 0x600>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt b/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt
deleted file mode 100644
index d28fd1af01b4..000000000000
--- a/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-* Hisilicon hip07 Security Accelerator (SEC)
-
-Required properties:
-- compatible: Must contain one of
- - "hisilicon,hip06-sec"
- - "hisilicon,hip07-sec"
-- reg: Memory addresses and lengths of the memory regions through which
- this device is controlled.
- Region 0 has registers to control the backend processing engines.
- Region 1 has registers for functionality common to all queues.
- Regions 2-18 have registers for the 16 individual queues which are isolated
- both in hardware and within the driver.
-- interrupts: Interrupt specifiers.
- Refer to interrupt-controller/interrupts.txt for generic interrupt client node
- bindings.
- Interrupt 0 is for the SEC unit error queue.
- Interrupt 2N + 1 is the completion interrupt for queue N.
- Interrupt 2N + 2 is the error interrupt for queue N.
-- dma-coherent: The driver assumes coherent dma is possible.
-
-Optional properties:
-- iommus: The SEC units are behind smmu-v3 iommus.
- Refer to iommu/arm,smmu-v3.txt for more information.
-
-Example:
-
-p1_sec_a: crypto@400d2000000 {
- compatible = "hisilicon,hip07-sec";
- reg = <0x400 0xd0000000 0x0 0x10000
- 0x400 0xd2000000 0x0 0x10000
- 0x400 0xd2010000 0x0 0x10000
- 0x400 0xd2020000 0x0 0x10000
- 0x400 0xd2030000 0x0 0x10000
- 0x400 0xd2040000 0x0 0x10000
- 0x400 0xd2050000 0x0 0x10000
- 0x400 0xd2060000 0x0 0x10000
- 0x400 0xd2070000 0x0 0x10000
- 0x400 0xd2080000 0x0 0x10000
- 0x400 0xd2090000 0x0 0x10000
- 0x400 0xd20a0000 0x0 0x10000
- 0x400 0xd20b0000 0x0 0x10000
- 0x400 0xd20c0000 0x0 0x10000
- 0x400 0xd20d0000 0x0 0x10000
- 0x400 0xd20e0000 0x0 0x10000
- 0x400 0xd20f0000 0x0 0x10000
- 0x400 0xd2100000 0x0 0x10000>;
- interrupt-parent = <&p1_mbigen_sec_a>;
- iommus = <&p1_smmu_alg_a 0x600>;
- dma-coherent;
- interrupts = <576 4>,
- <577 1>, <578 4>,
- <579 1>, <580 4>,
- <581 1>, <582 4>,
- <583 1>, <584 4>,
- <585 1>, <586 4>,
- <587 1>, <588 4>,
- <589 1>, <590 4>,
- <591 1>, <592 4>,
- <593 1>, <594 4>,
- <595 1>, <596 4>,
- <597 1>, <598 4>,
- <599 1>, <600 4>,
- <601 1>, <602 4>,
- <603 1>, <604 4>,
- <605 1>, <606 4>,
- <607 1>, <608 4>;
-};
diff --git a/Documentation/devicetree/bindings/crypto/img,hash-accelerator.yaml b/Documentation/devicetree/bindings/crypto/img,hash-accelerator.yaml
new file mode 100644
index 000000000000..46617561ef94
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/img,hash-accelerator.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/img,hash-accelerator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Imagination Technologies hardware hash accelerator
+
+maintainers:
+ - James Hartley <james.hartley@imgtec.com>
+
+description:
+ The hash accelerator provides hardware hashing acceleration for
+ SHA1, SHA224, SHA256 and MD5 hashes.
+
+properties:
+ compatible:
+ const: img,hash-accelerator
+
+ reg:
+ items:
+ - description: Register base address and size
+ - description: DMA port specifier
+
+ interrupts:
+ maxItems: 1
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ items:
+ - const: tx
+
+ clocks:
+ items:
+ - description: System clock for hash block registers
+ - description: Hash clock for data path
+
+ clock-names:
+ items:
+ - const: sys
+ - const: hash
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - dmas
+ - dma-names
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/mips-gic.h>
+ #include <dt-bindings/clock/pistachio-clk.h>
+
+ hash@18149600 {
+ compatible = "img,hash-accelerator";
+ reg = <0x18149600 0x100>, <0x18101100 0x4>;
+ interrupts = <GIC_SHARED 59 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dma 8 0xffffffff 0>;
+ dma-names = "tx";
+ clocks = <&cr_periph SYS_CLK_HASH>, <&clk_periph PERIPH_CLK_ROM>;
+ clock-names = "sys", "hash";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/img-hash.txt b/Documentation/devicetree/bindings/crypto/img-hash.txt
deleted file mode 100644
index 91a3d757d641..000000000000
--- a/Documentation/devicetree/bindings/crypto/img-hash.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Imagination Technologies hardware hash accelerator
-
-The hash accelerator provides hardware hashing acceleration for
-SHA1, SHA224, SHA256 and MD5 hashes
-
-Required properties:
-
-- compatible : "img,hash-accelerator"
-- reg : Offset and length of the register set for the module, and the DMA port
-- interrupts : The designated IRQ line for the hashing module.
-- dmas : DMA specifier as per Documentation/devicetree/bindings/dma/dma.txt
-- dma-names : Should be "tx"
-- clocks : Clock specifiers
-- clock-names : "sys" Used to clock the hash block registers
- "hash" Used to clock data through the accelerator
-
-Example:
-
- hash: hash@18149600 {
- compatible = "img,hash-accelerator";
- reg = <0x18149600 0x100>, <0x18101100 0x4>;
- interrupts = <GIC_SHARED 59 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dma 8 0xffffffff 0>;
- dma-names = "tx";
- clocks = <&cr_periph SYS_CLK_HASH>, <&clk_periph PERIPH_CLK_ROM>;
- clock-names = "sys", "hash";
- };
diff --git a/Documentation/devicetree/bindings/crypto/marvell,orion-crypto.yaml b/Documentation/devicetree/bindings/crypto/marvell,orion-crypto.yaml
new file mode 100644
index 000000000000..b44d36c50ec4
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/marvell,orion-crypto.yaml
@@ -0,0 +1,133 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/marvell,orion-crypto.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Cryptographic Engines And Security Accelerator
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Boris Brezillon <bbrezillon@kernel.org>
+
+description: |
+ Marvell Cryptographic Engines And Security Accelerator
+
+properties:
+ compatible:
+ enum:
+ - marvell,armada-370-crypto
+ - marvell,armada-xp-crypto
+ - marvell,armada-375-crypto
+ - marvell,armada-38x-crypto
+ - marvell,dove-crypto
+ - marvell,kirkwood-crypto
+ - marvell,orion-crypto
+
+ reg:
+ minItems: 1
+ items:
+ - description: Registers region
+ - description: SRAM region
+ deprecated: true
+
+ reg-names:
+ minItems: 1
+ items:
+ - const: regs
+ - const: sram
+ deprecated: true
+
+ interrupts:
+ description: One interrupt for each CESA engine
+ minItems: 1
+ maxItems: 2
+
+ clocks:
+ description: One or two clocks for each CESA engine
+ minItems: 1
+ maxItems: 4
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: cesa0
+ - const: cesa1
+ - const: cesaz0
+ - const: cesaz1
+
+ marvell,crypto-srams:
+ description: Phandle(s) to crypto SRAM.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 1
+ maxItems: 2
+ items:
+ maxItems: 1
+
+ marvell,crypto-sram-size:
+ description: SRAM size reserved for crypto operations.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0x800
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - marvell,crypto-srams
+
+allOf:
+ - if:
+ not:
+ properties:
+ compatible:
+ enum:
+ - marvell,kirkwood-crypto
+ - marvell,orion-crypto
+ then:
+ required:
+ - clocks
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - marvell,armada-370-crypto
+ - marvell,armada-375-crypto
+ - marvell,armada-38x-crypto
+ - marvell,armada-xp-crypto
+ then:
+ required:
+ - clock-names
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - marvell,armada-375-crypto
+ - marvell,armada-38x-crypto
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ clock-names:
+ minItems: 4
+ else:
+ properties:
+ clocks:
+ maxItems: 2
+ clock-names:
+ maxItems: 2
+
+additionalProperties: false
+
+examples:
+ - |
+ crypto@30000 {
+ compatible = "marvell,orion-crypto";
+ reg = <0x30000 0x10000>;
+ reg-names = "regs";
+ interrupts = <22>;
+ marvell,crypto-srams = <&crypto_sram>;
+ marvell,crypto-sram-size = <0x600>;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/marvell-cesa.txt b/Documentation/devicetree/bindings/crypto/marvell-cesa.txt
deleted file mode 100644
index 28d3f2496b89..000000000000
--- a/Documentation/devicetree/bindings/crypto/marvell-cesa.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-Marvell Cryptographic Engines And Security Accelerator
-
-Required properties:
-- compatible: should be one of the following string
- "marvell,orion-crypto"
- "marvell,kirkwood-crypto"
- "marvell,dove-crypto"
- "marvell,armada-370-crypto"
- "marvell,armada-xp-crypto"
- "marvell,armada-375-crypto"
- "marvell,armada-38x-crypto"
-- reg: base physical address of the engine and length of memory mapped
- region. Can also contain an entry for the SRAM attached to the CESA,
- but this representation is deprecated and marvell,crypto-srams should
- be used instead
-- reg-names: "regs". Can contain an "sram" entry, but this representation
- is deprecated and marvell,crypto-srams should be used instead
-- interrupts: interrupt number
-- clocks: reference to the crypto engines clocks. This property is not
- required for orion and kirkwood platforms
-- clock-names: "cesaX" and "cesazX", X should be replaced by the crypto engine
- id.
- This property is not required for the orion and kirkwoord
- platforms.
- "cesazX" clocks are not required on armada-370 platforms
-- marvell,crypto-srams: phandle to crypto SRAM definitions
-
-Optional properties:
-- marvell,crypto-sram-size: SRAM size reserved for crypto operations, if not
- specified the whole SRAM is used (2KB)
-
-
-Examples:
-
- crypto@90000 {
- compatible = "marvell,armada-xp-crypto";
- reg = <0x90000 0x10000>;
- reg-names = "regs";
- interrupts = <48>, <49>;
- clocks = <&gateclk 23>, <&gateclk 23>;
- clock-names = "cesa0", "cesa1";
- marvell,crypto-srams = <&crypto_sram0>, <&crypto_sram1>;
- marvell,crypto-sram-size = <0x600>;
- };
diff --git a/Documentation/devicetree/bindings/crypto/mediatek-crypto.txt b/Documentation/devicetree/bindings/crypto/mediatek-crypto.txt
deleted file mode 100644
index 450da3661cad..000000000000
--- a/Documentation/devicetree/bindings/crypto/mediatek-crypto.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-MediaTek cryptographic accelerators
-
-Required properties:
-- compatible: Should be "mediatek,eip97-crypto"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the five crypto engines interrupts in numeric
- order. These are global system and four descriptor rings.
-- clocks: the clock used by the core
-- clock-names: Must contain "cryp".
-- power-domains: Must contain a reference to the PM domain.
-
-
-Example:
- crypto: crypto@1b240000 {
- compatible = "mediatek,eip97-crypto";
- reg = <0 0x1b240000 0 0x20000>;
- interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_LOW>,
- <GIC_SPI 83 IRQ_TYPE_LEVEL_LOW>,
- <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>,
- <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>,
- <GIC_SPI 97 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&ethsys CLK_ETHSYS_CRYPTO>;
- clock-names = "cryp";
- power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
- };
diff --git a/Documentation/devicetree/bindings/crypto/mv_cesa.txt b/Documentation/devicetree/bindings/crypto/mv_cesa.txt
deleted file mode 100644
index d9b92e2f3138..000000000000
--- a/Documentation/devicetree/bindings/crypto/mv_cesa.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-Marvell Cryptographic Engines And Security Accelerator
-
-Required properties:
-- compatible: should be one of the following string
- "marvell,orion-crypto"
- "marvell,kirkwood-crypto"
- "marvell,dove-crypto"
-- reg: base physical address of the engine and length of memory mapped
- region. Can also contain an entry for the SRAM attached to the CESA,
- but this representation is deprecated and marvell,crypto-srams should
- be used instead
-- reg-names: "regs". Can contain an "sram" entry, but this representation
- is deprecated and marvell,crypto-srams should be used instead
-- interrupts: interrupt number
-- clocks: reference to the crypto engines clocks. This property is only
- required for Dove platforms
-- marvell,crypto-srams: phandle to crypto SRAM definitions
-
-Optional properties:
-- marvell,crypto-sram-size: SRAM size reserved for crypto operations, if not
- specified the whole SRAM is used (2KB)
-
-Examples:
-
- crypto@30000 {
- compatible = "marvell,orion-crypto";
- reg = <0x30000 0x10000>;
- reg-names = "regs";
- interrupts = <22>;
- marvell,crypto-srams = <&crypto_sram>;
- marvell,crypto-sram-size = <0x600>;
- };
diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
index 3f35122f7873..e009cb712fb8 100644
--- a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
@@ -45,6 +45,7 @@ properties:
- items:
- enum:
+ - qcom,qcs615-qce
- qcom,qcs8300-qce
- qcom,sa8775p-qce
- qcom,sc7280-qce
diff --git a/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
index 350fb8f400f0..5952e6448ed4 100644
--- a/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
@@ -111,11 +111,27 @@ properties:
unevaluatedProperties: false
port@1:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description:
DSI output port node to the panel or the next bridge
in the chain
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ description: array of physical DSI data lane indexes.
+ minItems: 1
+ items:
+ - const: 1
+ - const: 2
+ - const: 3
+ - const: 4
+
required:
- port@0
- port@1
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dsi.yaml b/Documentation/devicetree/bindings/display/bridge/renesas,dsi.yaml
index e08c24633926..5a99d9b9635e 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,dsi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,dsi.yaml
@@ -128,7 +128,7 @@ required:
- power-domains
- ports
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
@@ -180,4 +180,69 @@ examples:
};
};
};
+
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi1: dsi@10860000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,r9a07g044-mipi-dsi", "renesas,rzg2l-mipi-dsi";
+ reg = <0x10860000 0x20000>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "seq0", "seq1", "vin1", "rcv",
+ "ferr", "ppi", "debug";
+ clocks = <&cpg CPG_MOD R9A07G044_MIPI_DSI_PLLCLK>,
+ <&cpg CPG_MOD R9A07G044_MIPI_DSI_SYSCLK>,
+ <&cpg CPG_MOD R9A07G044_MIPI_DSI_ACLK>,
+ <&cpg CPG_MOD R9A07G044_MIPI_DSI_PCLK>,
+ <&cpg CPG_MOD R9A07G044_MIPI_DSI_VCLK>,
+ <&cpg CPG_MOD R9A07G044_MIPI_DSI_LPCLK>;
+ clock-names = "pllclk", "sysclk", "aclk", "pclk", "vclk", "lpclk";
+ resets = <&cpg R9A07G044_MIPI_DSI_CMN_RSTB>,
+ <&cpg R9A07G044_MIPI_DSI_ARESET_N>,
+ <&cpg R9A07G044_MIPI_DSI_PRESET_N>;
+ reset-names = "rst", "arst", "prst";
+ power-domains = <&cpg>;
+
+ panel@0 {
+ compatible = "rocktech,jh057n00900";
+ reg = <0>;
+ vcc-supply = <&reg_2v8_p>;
+ iovcc-supply = <&reg_1v8_p>;
+ reset-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi1_out>;
+ };
+ };
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi1_in: endpoint {
+ remote-endpoint = <&du_out_dsi1>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi1_out: endpoint {
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/display/fsl,tcon.txt b/Documentation/devicetree/bindings/display/fsl,tcon.txt
deleted file mode 100644
index 475008747801..000000000000
--- a/Documentation/devicetree/bindings/display/fsl,tcon.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Device Tree bindings for Freescale TCON Driver
-
-Required properties:
-- compatible: Should be one of
- * "fsl,vf610-tcon".
-
-- reg: Address and length of the register set for tcon.
-- clocks: From common clock binding: handle to tcon ipg clock.
-- clock-names: From common clock binding: Shall be "ipg".
-
-Examples:
-timing-controller@4003d000 {
- compatible = "fsl,vf610-tcon";
- reg = <0x4003d000 0x1000>;
- clocks = <&clks VF610_CLK_TCON0>;
- clock-names = "ipg";
-};
diff --git a/Documentation/devicetree/bindings/display/fsl,vf610-tcon.yaml b/Documentation/devicetree/bindings/display/fsl,vf610-tcon.yaml
new file mode 100644
index 000000000000..06bd680524a5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/fsl,vf610-tcon.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/fsl,vf610-tcon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale TCON
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,vf610-tcon
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ipg
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/vf610-clock.h>
+
+ timing-controller@4003d000 {
+ compatible = "fsl,vf610-tcon";
+ reg = <0x4003d000 0x1000>;
+ clocks = <&clks VF610_CLK_TCON0>;
+ clock-names = "ipg";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx-display-subsystem.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx-display-subsystem.yaml
new file mode 100644
index 000000000000..92a0a797d099
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx-display-subsystem.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx-display-subsystem.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX DRM master device
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description:
+ The freescale i.MX DRM master device is a virtual device needed to list all
+ IPU or other display interface nodes that comprise the graphics subsystem.
+
+properties:
+ compatible:
+ const: fsl,imx-display-subsystem
+
+ ports:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ Should contain a list of phandles pointing to camera
+ sensor interface ports of IPU devices.
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ display-subsystem {
+ compatible = "fsl,imx-display-subsystem";
+ ports = <&ipu_di0>;
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx-parallel-display.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx-parallel-display.yaml
new file mode 100644
index 000000000000..bbcfe7e2958b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx-parallel-display.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx-parallel-display.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Parallel display support
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx-parallel-display
+
+ interface-pix-fmt:
+ $ref: /schemas/types.yaml#/definitions/string
+ enum:
+ - rgb24
+ - rgb565
+ - bgr666
+ - lvds666
+
+ ddc:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle describing the i2c bus handling the display data channel
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: input port connected to the IPU display interface
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: output port connected to a panel
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ display {
+ compatible = "fsl,imx-parallel-display";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interface-pix-fmt = "rgb24";
+
+ port@0 {
+ reg = <0>;
+
+ endpoint {
+ remote-endpoint = <&ipu_di0_disp0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx6q-ipu.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx6q-ipu.yaml
new file mode 100644
index 000000000000..ec78645d4de0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx6q-ipu.yaml
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx6q-ipu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX IPUv3
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - fsl,imx51-ipu
+ - fsl,imx53-ipu
+ - fsl,imx6q-ipu
+ - items:
+ - const: fsl,imx6qp-ipu
+ - const: fsl,imx6q-ipu
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+ clocks:
+ maxItems: 3
+
+ clock-names:
+ items:
+ - const: bus
+ - const: di0
+ - const: di1
+
+ resets:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ fsl,prg:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle to prg node associated with this IPU instance
+
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: CSI0
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: CSI1
+
+ port@2:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: DI0
+
+ port@3:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: DI1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ display-controller@18000000 {
+ compatible = "fsl,imx53-ipu";
+ reg = <0x18000000 0x080000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <11 10>;
+ resets = <&src 2>;
+
+ port@2 {
+ reg = <2>;
+
+ endpoint {
+ remote-endpoint = <&display_in>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx6q-ldb.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx6q-ldb.yaml
new file mode 100644
index 000000000000..1646f41d8f72
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx6q-ldb.yaml
@@ -0,0 +1,193 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx6q-ldb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale LVDS Display Bridge (ldb)
+
+description:
+ The LVDS Display Bridge device tree node contains up to two lvds-channel
+ nodes describing each of the two LVDS encoder channels of the bridge.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - fsl,imx53-ldb
+ - items:
+ - enum:
+ - fsl,imx6q-ldb
+ - const: fsl,imx53-ldb
+
+ reg:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ gpr:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ The phandle points to the iomuxc-gpr region containing the LVDS
+ control register.
+
+ clocks:
+ minItems: 6
+ maxItems: 8
+
+ clock-names:
+ oneOf:
+ - items:
+ - const: di0_pll
+ - const: di1_pll
+ - const: di0_sel
+ - const: di1_sel
+ - const: di0
+ - const: di1
+ - items:
+ - const: di0_pll
+ - const: di1_pll
+ - const: di0_sel
+ - const: di1_sel
+ - const: di2_sel
+ - const: di3_sel
+ - const: di0
+ - const: di1
+
+ fsl,dual-channel:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ if it exists, only LVDS channel 0 should
+ be configured - one input will be distributed on both outputs in dual
+ channel mode
+
+patternProperties:
+ '^lvds-channel@[0-1]$':
+ type: object
+ description:
+ Each LVDS Channel has to contain either an of graph link to a panel device node
+ or a display-timings node that describes the video timings for the connected
+ LVDS display as well as the fsl,data-mapping and fsl,data-width properties.
+
+ properties:
+ reg:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ display-timings:
+ $ref: /schemas/display/panel/display-timings.yaml#
+
+ fsl,data-mapping:
+ enum:
+ - spwg
+ - jeida
+
+ fsl,data-width:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: should be <18> or <24>
+ enum:
+ - 18
+ - 24
+
+ fsl,panel:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle to lcd panel
+
+ patternProperties:
+ '^port@[0-4]$':
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ On i.MX5, the internal two-input-multiplexer is used. Due to hardware
+ limitations, only one input port (port@[0,1]) can be used for each channel
+ (lvds-channel@[0,1], respectively).
+ On i.MX6, there should be four input ports (port@[0-3]) that correspond
+ to the four LVDS multiplexer inputs.
+ A single output port (port@2 on i.MX5, port@4 on i.MX6) must be connected
+ to a panel input port. Optionally, the output port can be left out if
+ display-timings are used instead.
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - gpr
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx5-clock.h>
+
+ ldb@53fa8008 {
+ compatible = "fsl,imx53-ldb";
+ reg = <0x53fa8008 0x4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpr = <&gpr>;
+ clocks = <&clks IMX5_CLK_LDB_DI0_SEL>,
+ <&clks IMX5_CLK_LDB_DI1_SEL>,
+ <&clks IMX5_CLK_IPU_DI0_SEL>,
+ <&clks IMX5_CLK_IPU_DI1_SEL>,
+ <&clks IMX5_CLK_LDB_DI0_GATE>,
+ <&clks IMX5_CLK_LDB_DI1_GATE>;
+ clock-names = "di0_pll", "di1_pll",
+ "di0_sel", "di1_sel",
+ "di0", "di1";
+
+ /* Using an of-graph endpoint link to connect the panel */
+ lvds-channel@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ endpoint {
+ remote-endpoint = <&ipu_di0_lvds0>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+
+ /* Using display-timings and fsl,data-mapping/width instead */
+ lvds-channel@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ fsl,data-mapping = "spwg";
+ fsl,data-width = <24>;
+
+ display-timings {/* ... */
+ };
+
+ port@1 {
+ reg = <1>;
+
+ endpoint {
+ remote-endpoint = <&ipu_di1_lvds1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx6qp-pre.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx6qp-pre.yaml
new file mode 100644
index 000000000000..73bc73ff6e69
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx6qp-pre.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx6qp-pre.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX PRE (Prefetch Resolve Engine)
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx6qp-pre
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: axi
+ fsl,iram:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle pointing to the mmio-sram device node, that should be
+ used for the PRE SRAM double buffer.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx6qdl-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ pre@21c8000 {
+ compatible = "fsl,imx6qp-pre";
+ reg = <0x021c8000 0x1000>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clks IMX6QDL_CLK_PRE0>;
+ clock-names = "axi";
+ fsl,iram = <&ocram2>;
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx6qp-prg.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx6qp-prg.yaml
new file mode 100644
index 000000000000..582da8c489f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx6qp-prg.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx6qp-prg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX PRG (Prefetch Resolve Gasket)
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx6qp-prg
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: ipg
+ - const: axi
+
+ fsl,pres:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ maxItems: 1
+ description:
+ phandles to the PRE units attached to this PRG, with the fixed
+ PRE as the first entry and the muxable PREs following.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx6qdl-clock.h>
+
+ prg@21cc000 {
+ compatible = "fsl,imx6qp-prg";
+ reg = <0x021cc000 0x1000>;
+ clocks = <&clks IMX6QDL_CLK_PRG0_APB>, <&clks IMX6QDL_CLK_PRG0_AXI>;
+ clock-names = "ipg", "axi";
+ fsl,pres = <&pre1>, <&pre2>, <&pre3>;
+ };
+
diff --git a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt b/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt
deleted file mode 100644
index 269b1ae2fca9..000000000000
--- a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt
+++ /dev/null
@@ -1,160 +0,0 @@
-Freescale i.MX DRM master device
-================================
-
-The freescale i.MX DRM master device is a virtual device needed to list all
-IPU or other display interface nodes that comprise the graphics subsystem.
-
-Required properties:
-- compatible: Should be "fsl,imx-display-subsystem"
-- ports: Should contain a list of phandles pointing to display interface ports
- of IPU devices
-
-example:
-
-display-subsystem {
- compatible = "fsl,imx-display-subsystem";
- ports = <&ipu_di0>;
-};
-
-
-Freescale i.MX IPUv3
-====================
-
-Required properties:
-- compatible: Should be "fsl,<chip>-ipu" where <chip> is one of
- - imx51
- - imx53
- - imx6q
- - imx6qp
-- reg: should be register base and length as documented in the
- datasheet
-- interrupts: Should contain sync interrupt and error interrupt,
- in this order.
-- resets: phandle pointing to the system reset controller and
- reset line index, see reset/fsl,imx-src.txt for details
-Additional required properties for fsl,imx6qp-ipu:
-- fsl,prg: phandle to prg node associated with this IPU instance
-Optional properties:
-- port@[0-3]: Port nodes with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
- Ports 0 and 1 should correspond to CSI0 and CSI1,
- ports 2 and 3 should correspond to DI0 and DI1, respectively.
-
-example:
-
-ipu: ipu@18000000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx53-ipu";
- reg = <0x18000000 0x080000000>;
- interrupts = <11 10>;
- resets = <&src 2>;
-
- ipu_di0: port@2 {
- reg = <2>;
-
- ipu_di0_disp0: endpoint {
- remote-endpoint = <&display_in>;
- };
- };
-};
-
-Freescale i.MX PRE (Prefetch Resolve Engine)
-============================================
-
-Required properties:
-- compatible: should be "fsl,imx6qp-pre"
-- reg: should be register base and length as documented in the
- datasheet
-- clocks : phandle to the PRE axi clock input, as described
- in Documentation/devicetree/bindings/clock/clock-bindings.txt and
- Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
-- clock-names: should be "axi"
-- interrupts: should contain the PRE interrupt
-- fsl,iram: phandle pointing to the mmio-sram device node, that should be
- used for the PRE SRAM double buffer.
-
-example:
-
-pre@21c8000 {
- compatible = "fsl,imx6qp-pre";
- reg = <0x021c8000 0x1000>;
- interrupts = <GIC_SPI 90 IRQ_TYPE_EDGE_RISING>;
- clocks = <&clks IMX6QDL_CLK_PRE0>;
- clock-names = "axi";
- fsl,iram = <&ocram2>;
-};
-
-Freescale i.MX PRG (Prefetch Resolve Gasket)
-============================================
-
-Required properties:
-- compatible: should be "fsl,imx6qp-prg"
-- reg: should be register base and length as documented in the
- datasheet
-- clocks : phandles to the PRG ipg and axi clock inputs, as described
- in Documentation/devicetree/bindings/clock/clock-bindings.txt and
- Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
-- clock-names: should be "ipg" and "axi"
-- fsl,pres: phandles to the PRE units attached to this PRG, with the fixed
- PRE as the first entry and the muxable PREs following.
-
-example:
-
-prg@21cc000 {
- compatible = "fsl,imx6qp-prg";
- reg = <0x021cc000 0x1000>;
- clocks = <&clks IMX6QDL_CLK_PRG0_APB>,
- <&clks IMX6QDL_CLK_PRG0_AXI>;
- clock-names = "ipg", "axi";
- fsl,pres = <&pre1>, <&pre2>, <&pre3>;
-};
-
-Parallel display support
-========================
-
-Required properties:
-- compatible: Should be "fsl,imx-parallel-display"
-Optional properties:
-- interface-pix-fmt: How this display is connected to the
- display interface. Currently supported types: "rgb24", "rgb565", "bgr666"
- and "lvds666".
-- ddc: phandle describing the i2c bus handling the display data
- channel
-- port@[0-1]: Port nodes with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
- Port 0 is the input port connected to the IPU display interface,
- port 1 is the output port connected to a panel.
-
-example:
-
-disp0 {
- compatible = "fsl,imx-parallel-display";
- interface-pix-fmt = "rgb24";
-
- port@0 {
- reg = <0>;
-
- display_in: endpoint {
- remote-endpoint = <&ipu_di0_disp0>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- display_out: endpoint {
- remote-endpoint = <&panel_in>;
- };
- };
-};
-
-panel {
- ...
-
- port {
- panel_in: endpoint {
- remote-endpoint = <&display_out>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/display/imx/ldb.txt b/Documentation/devicetree/bindings/display/imx/ldb.txt
deleted file mode 100644
index 03653a291b54..000000000000
--- a/Documentation/devicetree/bindings/display/imx/ldb.txt
+++ /dev/null
@@ -1,146 +0,0 @@
-Device-Tree bindings for LVDS Display Bridge (ldb)
-
-LVDS Display Bridge
-===================
-
-The LVDS Display Bridge device tree node contains up to two lvds-channel
-nodes describing each of the two LVDS encoder channels of the bridge.
-
-Required properties:
- - #address-cells : should be <1>
- - #size-cells : should be <0>
- - compatible : should be "fsl,imx53-ldb" or "fsl,imx6q-ldb".
- Both LDB versions are similar, but i.MX6 has an additional
- multiplexer in the front to select any of the four IPU display
- interfaces as input for each LVDS channel.
- - gpr : should be <&gpr> on i.MX53 and i.MX6q.
- The phandle points to the iomuxc-gpr region containing the LVDS
- control register.
-- clocks, clock-names : phandles to the LDB divider and selector clocks and to
- the display interface selector clocks, as described in
- Documentation/devicetree/bindings/clock/clock-bindings.txt
- The following clocks are expected on i.MX53:
- "di0_pll" - LDB LVDS channel 0 mux
- "di1_pll" - LDB LVDS channel 1 mux
- "di0" - LDB LVDS channel 0 gate
- "di1" - LDB LVDS channel 1 gate
- "di0_sel" - IPU1 DI0 mux
- "di1_sel" - IPU1 DI1 mux
- On i.MX6q the following additional clocks are needed:
- "di2_sel" - IPU2 DI0 mux
- "di3_sel" - IPU2 DI1 mux
- The needed clock numbers for each are documented in
- Documentation/devicetree/bindings/clock/imx5-clock.yaml, and in
- Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
-
-Optional properties:
- - pinctrl-names : should be "default" on i.MX53, not used on i.MX6q
- - pinctrl-0 : a phandle pointing to LVDS pin settings on i.MX53,
- not used on i.MX6q
- - fsl,dual-channel : boolean. if it exists, only LVDS channel 0 should
- be configured - one input will be distributed on both outputs in dual
- channel mode
-
-LVDS Channel
-============
-
-Each LVDS Channel has to contain either an of graph link to a panel device node
-or a display-timings node that describes the video timings for the connected
-LVDS display as well as the fsl,data-mapping and fsl,data-width properties.
-
-Required properties:
- - reg : should be <0> or <1>
- - port: Input and output port nodes with endpoint definitions as defined in
- Documentation/devicetree/bindings/graph.txt.
- On i.MX5, the internal two-input-multiplexer is used. Due to hardware
- limitations, only one input port (port@[0,1]) can be used for each channel
- (lvds-channel@[0,1], respectively).
- On i.MX6, there should be four input ports (port@[0-3]) that correspond
- to the four LVDS multiplexer inputs.
- A single output port (port@2 on i.MX5, port@4 on i.MX6) must be connected
- to a panel input port. Optionally, the output port can be left out if
- display-timings are used instead.
-
-Optional properties (required if display-timings are used):
- - display-timings : A node that describes the display timings as defined in
- Documentation/devicetree/bindings/display/panel/display-timing.txt.
- - fsl,data-mapping : should be "spwg" or "jeida"
- This describes how the color bits are laid out in the
- serialized LVDS signal.
- - fsl,data-width : should be <18> or <24>
-
-example:
-
-gpr: iomuxc-gpr@53fa8000 {
- /* ... */
-};
-
-ldb: ldb@53fa8008 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx53-ldb";
- gpr = <&gpr>;
- clocks = <&clks IMX5_CLK_LDB_DI0_SEL>,
- <&clks IMX5_CLK_LDB_DI1_SEL>,
- <&clks IMX5_CLK_IPU_DI0_SEL>,
- <&clks IMX5_CLK_IPU_DI1_SEL>,
- <&clks IMX5_CLK_LDB_DI0_GATE>,
- <&clks IMX5_CLK_LDB_DI1_GATE>;
- clock-names = "di0_pll", "di1_pll",
- "di0_sel", "di1_sel",
- "di0", "di1";
-
- /* Using an of-graph endpoint link to connect the panel */
- lvds-channel@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- port@0 {
- reg = <0>;
-
- lvds0_in: endpoint {
- remote-endpoint = <&ipu_di0_lvds0>;
- };
- };
-
- port@2 {
- reg = <2>;
-
- lvds0_out: endpoint {
- remote-endpoint = <&panel_in>;
- };
- };
- };
-
- /* Using display-timings and fsl,data-mapping/width instead */
- lvds-channel@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- fsl,data-mapping = "spwg";
- fsl,data-width = <24>;
-
- display-timings {
- /* ... */
- };
-
- port@1 {
- reg = <1>;
-
- lvds1_in: endpoint {
- remote-endpoint = <&ipu_di1_lvds1>;
- };
- };
- };
-};
-
-panel: lvds-panel {
- /* ... */
-
- port {
- panel_in: endpoint {
- remote-endpoint = <&lvds0_out>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml
index 5d2089dc596e..daf90ebb39bf 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml
@@ -27,6 +27,10 @@ properties:
- mediatek,mt8195-mdp3-aal
- items:
- enum:
+ - mediatek,mt8188-mdp3-aal
+ - const: mediatek,mt8195-mdp3-aal
+ - items:
+ - enum:
- mediatek,mt2712-disp-aal
- mediatek,mt6795-disp-aal
- const: mediatek,mt8173-disp-aal
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,color.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,color.yaml
index 6160439ce4d7..5564f4063317 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,color.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,color.yaml
@@ -29,6 +29,10 @@ properties:
- mediatek,mt8195-mdp3-color
- items:
- enum:
+ - mediatek,mt8188-mdp3-color
+ - const: mediatek,mt8195-mdp3-color
+ - items:
+ - enum:
- mediatek,mt7623-disp-color
- mediatek,mt2712-disp-color
- const: mediatek,mt2701-disp-color
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,merge.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,merge.yaml
index 0de9f64f3f84..3798a25402d3 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,merge.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,merge.yaml
@@ -26,6 +26,10 @@ properties:
- mediatek,mt8195-disp-merge
- mediatek,mt8195-mdp3-merge
- items:
+ - enum:
+ - mediatek,mt8188-mdp3-merge
+ - const: mediatek,mt8195-mdp3-merge
+ - items:
- const: mediatek,mt6795-disp-merge
- const: mediatek,mt8173-disp-merge
- items:
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,mt8195-hdmi-ddc.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,mt8195-hdmi-ddc.yaml
new file mode 100644
index 000000000000..bde4dc556d4f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,mt8195-hdmi-ddc.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,mt8195-hdmi-ddc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek HDMI MT8195 series HDMI Display Data Channel (DDC)
+
+maintainers:
+ - AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ - CK Hu <ck.hu@mediatek.com>
+
+properties:
+ compatible:
+ oneOf:
+ - const: mediatek,mt8195-hdmi-ddc
+ - items:
+ - const: mediatek,mt8188-hdmi-ddc
+ - const: mediatek,mt8195-hdmi-ddc
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ hdmi {
+ hdmi_ddc: i2c {
+ compatible = "mediatek,mt8195-hdmi-ddc";
+ clocks = <&clk26m>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,mt8195-hdmi.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,mt8195-hdmi.yaml
new file mode 100644
index 000000000000..1b382f99d3ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,mt8195-hdmi.yaml
@@ -0,0 +1,151 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,mt8195-hdmi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT8195 series HDMI-TX Encoder
+
+maintainers:
+ - AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ - CK Hu <ck.hu@mediatek.com>
+
+description:
+ The MediaTek HDMI-TX v2 encoder can generate HDMI format data based on
+ the HDMI Specification 2.0b.
+
+properties:
+ compatible:
+ enum:
+ - mediatek,mt8188-hdmi-tx
+ - mediatek,mt8195-hdmi-tx
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: HDMI Peripheral Bus (APB) clock
+ - description: HDCP and HDMI_TOP clock
+ - description: HDCP, HDMI_TOP and HDMI Audio reference clock
+ - description: VPP HDMI Split clock
+
+ clock-names:
+ items:
+ - const: bus
+ - const: hdcp
+ - const: hdcp24m
+ - const: hdmi-split
+
+ i2c:
+ type: object
+ $ref: /schemas/display/mediatek/mediatek,mt8195-hdmi-ddc.yaml
+ unevaluatedProperties: false
+ description: HDMI DDC I2C controller
+
+ phys:
+ maxItems: 1
+ description: PHY providing clocking TMDS and pixel to controller
+
+ phy-names:
+ items:
+ - const: hdmi
+
+ power-domains:
+ maxItems: 1
+
+ '#sound-dai-cells':
+ const: 1
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Input port, usually connected to the output port of a DPI
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Output port that must be connected either to the input port of
+ a HDMI connector node containing a ddc-i2c-bus, or to the input
+ port of an attached bridge chip, such as a SlimPort transmitter.
+
+ required:
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+ - phys
+ - phy-names
+ - ports
+
+allOf:
+ - $ref: /schemas/sound/dai-common.yaml#
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mt8195-clk.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/mt8195-power.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ hdmi@1c300000 {
+ compatible = "mediatek,mt8195-hdmi-tx";
+ reg = <0 0x1c300000 0 0x1000>;
+ clocks = <&topckgen CLK_TOP_HDMI_APB>,
+ <&topckgen CLK_TOP_HDCP>,
+ <&topckgen CLK_TOP_HDCP_24M>,
+ <&vppsys1 CLK_VPP1_VPP_SPLIT_HDMI>;
+ clock-names = "bus", "hdcp", "hdcp24m", "hdmi-split";
+ interrupts = <GIC_SPI 677 IRQ_TYPE_LEVEL_HIGH 0>;
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi";
+ power-domains = <&spm MT8195_POWER_DOMAIN_HDMI_TX>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_pins>;
+ #sound-dai-cells = <1>;
+
+ hdmitx_ddc: i2c {
+ compatible = "mediatek,mt8195-hdmi-ddc";
+ clocks = <&clk26m>;
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ hdmi_in: endpoint {
+ remote-endpoint = <&dpi1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ hdmi_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,padding.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,padding.yaml
index be07bbdc54e3..86787866ced0 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,padding.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,padding.yaml
@@ -20,9 +20,13 @@ description:
properties:
compatible:
- enum:
- - mediatek,mt8188-disp-padding
- - mediatek,mt8195-mdp3-padding
+ oneOf:
+ - enum:
+ - mediatek,mt8188-disp-padding
+ - mediatek,mt8195-mdp3-padding
+ - items:
+ - const: mediatek,mt8188-mdp3-padding
+ - const: mediatek,mt8195-mdp3-padding
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
index e00b88332f2f..246bbb509bea 100644
--- a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
@@ -31,6 +31,7 @@ properties:
- qcom,sm8650-dp
- items:
- enum:
+ - qcom,sar2130p-dp
- qcom,sm6350-dp
- qcom,sm8150-dp
- qcom,sm8250-dp
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
index 2aab33cd0017..82fe95a6d959 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
@@ -23,6 +23,8 @@ properties:
- qcom,msm8996-dsi-ctrl
- qcom,msm8998-dsi-ctrl
- qcom,qcm2290-dsi-ctrl
+ - qcom,sa8775p-dsi-ctrl
+ - qcom,sar2130p-dsi-ctrl
- qcom,sc7180-dsi-ctrl
- qcom,sc7280-dsi-ctrl
- qcom,sdm660-dsi-ctrl
@@ -314,6 +316,8 @@ allOf:
contains:
enum:
- qcom,msm8998-dsi-ctrl
+ - qcom,sa8775p-dsi-ctrl
+ - qcom,sar2130p-dsi-ctrl
- qcom,sc7180-dsi-ctrl
- qcom,sc7280-dsi-ctrl
- qcom,sdm845-dsi-ctrl
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
index 321470435e65..3c75ff42999a 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
@@ -17,6 +17,8 @@ properties:
enum:
- qcom,dsi-phy-7nm
- qcom,dsi-phy-7nm-8150
+ - qcom,sa8775p-dsi-phy-5nm
+ - qcom,sar2130p-dsi-phy-5nm
- qcom,sc7280-dsi-phy-7nm
- qcom,sm6375-dsi-phy-7nm
- qcom,sm8350-dsi-phy-5nm
diff --git a/Documentation/devicetree/bindings/display/msm/hdmi.yaml b/Documentation/devicetree/bindings/display/msm/hdmi.yaml
index d4a2033afea8..dfec6c3480f3 100644
--- a/Documentation/devicetree/bindings/display/msm/hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/msm/hdmi.yaml
@@ -66,21 +66,6 @@ properties:
maxItems: 1
description: hpd pin
- qcom,hdmi-tx-mux-en-gpios:
- maxItems: 1
- deprecated: true
- description: HDMI mux enable pin
-
- qcom,hdmi-tx-mux-sel-gpios:
- maxItems: 1
- deprecated: true
- description: HDMI mux select pin
-
- qcom,hdmi-tx-mux-lpm-gpios:
- maxItems: 1
- deprecated: true
- description: HDMI mux lpm pin
-
'#sound-dai-cells':
const: 1
@@ -89,12 +74,12 @@ properties:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
- $ref: /schemas/graph.yaml#/$defs/port-base
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller.
port@1:
- $ref: /schemas/graph.yaml#/$defs/port-base
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoints of the controller.
diff --git a/Documentation/devicetree/bindings/display/msm/mdp4.yaml b/Documentation/devicetree/bindings/display/msm/mdp4.yaml
index 35204a287579..03ee09faa335 100644
--- a/Documentation/devicetree/bindings/display/msm/mdp4.yaml
+++ b/Documentation/devicetree/bindings/display/msm/mdp4.yaml
@@ -18,9 +18,10 @@ properties:
clocks:
minItems: 6
- maxItems: 6
+ maxItems: 8
clock-names:
+ minItems: 6
items:
- const: core_clk
- const: iface_clk
@@ -28,6 +29,12 @@ properties:
- const: lut_clk
- const: hdmi_clk
- const: tv_clk
+ - const: lcdc_clk
+ - const: pxo
+ description: XO used to drive the internal LVDS PLL
+
+ '#clock-cells':
+ const: 0
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml
index 7c6462caa442..db9c43b20e2a 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml
@@ -84,6 +84,18 @@ properties:
items:
- description: MDSS_CORE reset
+ interconnects:
+ minItems: 1
+ items:
+ - description: Interconnect path from mdp0 (or a single mdp) port to the data bus
+ - description: Interconnect path from CPU to the reg bus
+
+ interconnect-names:
+ minItems: 1
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
index 5fac3e266703..1053b3bc4908 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
@@ -52,12 +52,23 @@ patternProperties:
items:
- const: qcom,sa8775p-dp
+ "^dsi@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ contains:
+ const: qcom,sa8775p-dsi-ctrl
+
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
- const: qcom,sa8775p-edp-phy
+ contains:
+ enum:
+ - qcom,sa8775p-dsi-phy-5nm
+ - qcom,sa8775p-edp-phy
required:
- compatible
@@ -139,6 +150,20 @@ examples:
remote-endpoint = <&mdss0_dp0_in>;
};
};
+
+ port@1 {
+ reg = <1>;
+ dpu_intf1_out: endpoint {
+ remote-endpoint = <&mdss0_dsi0_in>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ dpu_intf2_out: endpoint {
+ remote-endpoint = <&mdss0_dsi1_in>;
+ };
+ };
};
mdss0_mdp_opp_table: opp-table {
@@ -186,6 +211,160 @@ examples:
vdda-pll-supply = <&vreg_l4a>;
};
+ dsi@ae94000 {
+ compatible = "qcom,sa8775p-dsi-ctrl", "qcom,mdss-dsi-ctrl";
+ reg = <0x0ae94000 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4>;
+
+ clocks = <&dispc_byte_clk>,
+ <&dispcc_intf_clk>,
+ <&dispcc_pclk>,
+ <&dispcc_esc_clk>,
+ <&dispcc_ahb_clk>,
+ <&gcc_bus_clk>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+ assigned-clocks = <&dispcc_byte_clk>,
+ <&dispcc_pclk>;
+ assigned-clock-parents = <&mdss0_dsi0_phy 0>, <&mdss0_dsi0_phy 1>;
+ phys = <&mdss0_dsi0_phy>;
+
+ operating-points-v2 = <&dsi0_opp_table>;
+ power-domains = <&rpmhpd SA8775P_MMCX>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdss0_dsi0_in: endpoint {
+ remote-endpoint = <&dpu_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ mdss0_dsi0_out: endpoint { };
+ };
+ };
+
+ dsi0_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-358000000 {
+ opp-hz = /bits/ 64 <358000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+ };
+ };
+
+ mdss0_dsi0_phy: phy@ae94400 {
+ compatible = "qcom,sa8775p-dsi-phy-5nm";
+ reg = <0x0ae94400 0x200>,
+ <0x0ae94600 0x280>,
+ <0x0ae94900 0x27c>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&dispcc_iface_clk>,
+ <&rpmhcc_ref_clk>;
+ clock-names = "iface", "ref";
+
+ vdds-supply = <&vreg_dsi_supply>;
+ };
+
+ dsi@ae96000 {
+ compatible = "qcom,sa8775p-dsi-ctrl", "qcom,mdss-dsi-ctrl";
+ reg = <0x0ae96000 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4>;
+
+ clocks = <&dispc_byte_clk>,
+ <&dispcc_intf_clk>,
+ <&dispcc_pclk>,
+ <&dispcc_esc_clk>,
+ <&dispcc_ahb_clk>,
+ <&gcc_bus_clk>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+ assigned-clocks = <&dispcc_byte_clk>,
+ <&dispcc_pclk>;
+ assigned-clock-parents = <&mdss0_dsi1_phy 0>, <&mdss0_dsi1_phy 1>;
+ phys = <&mdss0_dsi1_phy>;
+
+ operating-points-v2 = <&dsi1_opp_table>;
+ power-domains = <&rpmhpd SA8775P_MMCX>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdss0_dsi1_in: endpoint {
+ remote-endpoint = <&dpu_intf2_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ mdss0_dsi1_out: endpoint { };
+ };
+ };
+
+ dsi1_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-358000000 {
+ opp-hz = /bits/ 64 <358000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+ };
+ };
+
+ mdss0_dsi1_phy: phy@ae96400 {
+ compatible = "qcom,sa8775p-dsi-phy-5nm";
+ reg = <0x0ae96400 0x200>,
+ <0x0ae96600 0x280>,
+ <0x0ae96900 0x27c>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&dispcc_iface_clk>,
+ <&rpmhcc_ref_clk>;
+ clock-names = "iface", "ref";
+
+ vdds-supply = <&vreg_dsi_supply>;
+ };
+
displayport-controller@af54000 {
compatible = "qcom,sa8775p-dp";
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sar2130p-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sar2130p-mdss.yaml
new file mode 100644
index 000000000000..870144b53cec
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sar2130p-mdss.yaml
@@ -0,0 +1,439 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/msm/qcom,sar2130p-mdss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SAR2130P Display MDSS
+
+maintainers:
+ - Dmitry Baryshkov <lumag@kernel.org>
+
+description:
+ SAR2310P MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like
+ DPU display controller, DSI and DP interfaces etc.
+
+$ref: /schemas/display/msm/mdss-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,sar2130p-mdss
+
+ clocks:
+ items:
+ - description: Display MDSS AHB
+ - description: Display AHB
+ - description: Display hf AXI
+ - description: Display core
+
+ iommus:
+ maxItems: 1
+
+ interconnects:
+ items:
+ - description: Interconnect path from mdp0 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
+
+ interconnect-names:
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
+
+patternProperties:
+ "^display-controller@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ const: qcom,sar2130p-dpu
+
+ "^displayport-controller@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ contains:
+ const: qcom,sar2130p-dp
+
+ "^dsi@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ contains:
+ const: qcom,sar2130p-dsi-ctrl
+
+ "^phy@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ const: qcom,sar2130p-dsi-phy-5nm
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/qcom,rpmhpd.h>
+ #include <dt-bindings/phy/phy-qcom-qmp.h>
+
+ display-subsystem@ae00000 {
+ compatible = "qcom,sar2130p-mdss";
+ reg = <0x0ae00000 0x1000>;
+ reg-names = "mdss";
+
+ interconnects = <&mmss_noc_master_mdp &mc_virt_slave_ebi1>,
+ <&gem_noc_master_appss_proc &config_noc_slave_display_cfg>;
+ interconnect-names = "mdp0-mem", "cpu-cfg";
+
+ resets = <&dispcc_disp_cc_mdss_core_bcr>;
+
+ power-domains = <&dispcc_mdss_gdsc>;
+
+ clocks = <&dispcc_disp_cc_mdss_ahb_clk>,
+ <&gcc_gcc_disp_ahb_clk>,
+ <&gcc_gcc_disp_hf_axi_clk>,
+ <&dispcc_disp_cc_mdss_mdp_clk>;
+ clock-names = "iface", "bus", "nrt_bus", "core";
+
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ iommus = <&apps_smmu 0x1c00 0x2>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ display-controller@ae01000 {
+ compatible = "qcom,sar2130p-dpu";
+ reg = <0x0ae01000 0x8f000>,
+ <0x0aeb0000 0x2008>;
+ reg-names = "mdp", "vbif";
+
+ clocks = <&gcc_gcc_disp_ahb_clk>,
+ <&gcc_gcc_disp_hf_axi_clk>,
+ <&dispcc_disp_cc_mdss_ahb_clk>,
+ <&dispcc_disp_cc_mdss_mdp_lut_clk>,
+ <&dispcc_disp_cc_mdss_mdp_clk>,
+ <&dispcc_disp_cc_mdss_vsync_clk>;
+ clock-names = "bus",
+ "nrt_bus",
+ "iface",
+ "lut",
+ "core",
+ "vsync";
+
+ assigned-clocks = <&dispcc_disp_cc_mdss_vsync_clk>;
+ assigned-clock-rates = <19200000>;
+
+ operating-points-v2 = <&mdp_opp_table>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dpu_intf0_out: endpoint {
+ remote-endpoint = <&mdss_dp0_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ dpu_intf1_out: endpoint {
+ remote-endpoint = <&mdss_dsi0_in>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ dpu_intf2_out: endpoint {
+ remote-endpoint = <&mdss_dsi1_in>;
+ };
+ };
+ };
+
+ mdp_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-325000000 {
+ opp-hz = /bits/ 64 <325000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-375000000 {
+ opp-hz = /bits/ 64 <375000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-514000000 {
+ opp-hz = /bits/ 64 <514000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+
+ displayport-controller@ae90000 {
+ compatible = "qcom,sar2130p-dp",
+ "qcom,sm8350-dp";
+ reg = <0xae90000 0x200>,
+ <0xae90200 0x200>,
+ <0xae90400 0xc00>,
+ <0xae91000 0x400>,
+ <0xae91400 0x400>;
+
+ interrupt-parent = <&mdss>;
+ interrupts = <12>;
+ clocks = <&dispcc_disp_cc_mdss_ahb_clk>,
+ <&dispcc_disp_cc_mdss_dptx0_aux_clk>,
+ <&dispcc_disp_cc_mdss_dptx0_link_clk>,
+ <&dispcc_disp_cc_mdss_dptx0_link_intf_clk>,
+ <&dispcc_disp_cc_mdss_dptx0_pixel0_clk>;
+ clock-names = "core_iface",
+ "core_aux",
+ "ctrl_link",
+ "ctrl_link_iface",
+ "stream_pixel";
+
+ assigned-clocks = <&dispcc_disp_cc_mdss_dptx0_link_clk_src>,
+ <&dispcc_disp_cc_mdss_dptx0_pixel0_clk_src>;
+ assigned-clock-parents = <&usb_dp_qmpphy_QMP_USB43DP_DP_LINK_CLK>,
+ <&usb_dp_qmpphy_QMP_USB43DP_DP_VCO_DIV_CLK>;
+
+ phys = <&usb_dp_qmpphy QMP_USB43DP_DP_PHY>;
+ phy-names = "dp";
+
+ #sound-dai-cells = <0>;
+
+ operating-points-v2 = <&dp_opp_table>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdss_dp0_in: endpoint {
+ remote-endpoint = <&dpu_intf0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ mdss_dp0_out: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_dp_in>;
+ };
+ };
+ };
+
+ dp_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-162000000 {
+ opp-hz = /bits/ 64 <162000000>;
+ required-opps = <&rpmhpd_opp_low_svs_d1>;
+ };
+
+ opp-270000000 {
+ opp-hz = /bits/ 64 <270000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-540000000 {
+ opp-hz = /bits/ 64 <540000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-810000000 {
+ opp-hz = /bits/ 64 <810000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+
+ dsi@ae94000 {
+ compatible = "qcom,sar2130p-dsi-ctrl",
+ "qcom,mdss-dsi-ctrl";
+ reg = <0x0ae94000 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4>;
+
+ clocks = <&dispcc_disp_cc_mdss_byte0_clk>,
+ <&dispcc_disp_cc_mdss_byte0_intf_clk>,
+ <&dispcc_disp_cc_mdss_pclk0_clk>,
+ <&dispcc_disp_cc_mdss_esc0_clk>,
+ <&dispcc_disp_cc_mdss_ahb_clk>,
+ <&gcc_gcc_disp_hf_axi_clk>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+
+ assigned-clocks = <&dispcc_disp_cc_mdss_byte0_clk_src>,
+ <&dispcc_disp_cc_mdss_pclk0_clk_src>;
+ assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+
+ operating-points-v2 = <&dsi_opp_table>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ phys = <&mdss_dsi0_phy>;
+ phy-names = "dsi";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdss_dsi0_in: endpoint {
+ remote-endpoint = <&dpu_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mdss_dsi0_out: endpoint {
+ };
+ };
+ };
+
+ dsi_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-187500000 {
+ opp-hz = /bits/ 64 <187500000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-358000000 {
+ opp-hz = /bits/ 64 <358000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+ };
+ };
+
+ mdss_dsi0_phy: phy@ae94400 {
+ compatible = "qcom,sar2130p-dsi-phy-5nm";
+ reg = <0x0ae95000 0x200>,
+ <0x0ae95200 0x280>,
+ <0x0ae95500 0x400>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&dispcc_disp_cc_mdss_ahb_clk>,
+ <&rpmhcc_rpmh_cxo_clk>;
+ clock-names = "iface", "ref";
+ };
+
+ dsi@ae96000 {
+ compatible = "qcom,sar2130p-dsi-ctrl",
+ "qcom,mdss-dsi-ctrl";
+ reg = <0x0ae96000 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <5>;
+
+ clocks = <&dispcc_disp_cc_mdss_byte1_clk>,
+ <&dispcc_disp_cc_mdss_byte1_intf_clk>,
+ <&dispcc_disp_cc_mdss_pclk1_clk>,
+ <&dispcc_disp_cc_mdss_esc1_clk>,
+ <&dispcc_disp_cc_mdss_ahb_clk>,
+ <&gcc_gcc_disp_hf_axi_clk>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+
+ assigned-clocks = <&dispcc_disp_cc_mdss_byte1_clk_src>,
+ <&dispcc_disp_cc_mdss_pclk1_clk_src>;
+ assigned-clock-parents = <&mdss_dsi1_phy 0>, <&mdss_dsi1_phy 1>;
+
+ operating-points-v2 = <&dsi_opp_table>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ phys = <&mdss_dsi1_phy>;
+ phy-names = "dsi";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdss_dsi1_in: endpoint {
+ remote-endpoint = <&dpu_intf2_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mdss_dsi1_out: endpoint {
+ };
+ };
+ };
+ };
+
+ mdss_dsi1_phy: phy@ae97000 {
+ compatible = "qcom,sar2130p-dsi-phy-5nm";
+ reg = <0x0ae97000 0x200>,
+ <0x0ae97200 0x280>,
+ <0x0ae97500 0x400>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&dispcc_disp_cc_mdss_ahb_clk>,
+ <&rpmhcc_rpmh_cxo_clk>;
+ clock-names = "iface", "ref";
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sc7280-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sc7280-dpu.yaml
index 6902795b4e2c..df9ec15ad6c3 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sc7280-dpu.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sc7280-dpu.yaml
@@ -17,6 +17,7 @@ $ref: /schemas/display/msm/dpu-common.yaml#
properties:
compatible:
enum:
+ - qcom,sar2130p-dpu
- qcom,sc7280-dpu
- qcom,sc8280xp-dpu
- qcom,sm8350-dpu
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8350-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8350-mdss.yaml
index 163fc83c1e80..68176de854b3 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sm8350-mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8350-mdss.yaml
@@ -38,12 +38,16 @@ properties:
maxItems: 1
interconnects:
- maxItems: 2
+ items:
+ - description: Interconnect path from the MDP0 port to the data bus
+ - description: Interconnect path from the MDP1 port to the data bus
+ - description: Interconnect path from the CPU to the reg bus
interconnect-names:
items:
- const: mdp0-mem
- const: mdp1-mem
+ - const: cpu-cfg
patternProperties:
"^display-controller@[0-9a-f]+$":
@@ -88,6 +92,7 @@ examples:
#include <dt-bindings/clock/qcom,gcc-sm8350.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interconnect/qcom,sm8350.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
@@ -97,8 +102,10 @@ examples:
reg-names = "mdss";
interconnects = <&mmss_noc MASTER_MDP0 0 &mc_virt SLAVE_EBI1 0>,
- <&mmss_noc MASTER_MDP1 0 &mc_virt SLAVE_EBI1 0>;
- interconnect-names = "mdp0-mem", "mdp1-mem";
+ <&mmss_noc MASTER_MDP1 0 &mc_virt SLAVE_EBI1 0>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "mdp0-mem", "mdp1-mem", "cpu-cfg";
power-domains = <&dispcc MDSS_GDSC>;
resets = <&dispcc DISP_CC_MDSS_CORE_BCR>;
diff --git a/Documentation/devicetree/bindings/display/panel/boe,td4320.yaml b/Documentation/devicetree/bindings/display/panel/boe,td4320.yaml
new file mode 100644
index 000000000000..c6bff0ece360
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/boe,td4320.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/boe,td4320.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BOE TD4320 MIPI-DSI panels
+
+maintainers:
+ - Barnabas Czeman <barnabas.czeman@mainlining.org>
+
+description:
+ BOE TD4320 6.3" 1080x2340 panel found in Xiaomi Redmi Note 7 smartphone.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: boe,td4320
+
+ reg:
+ maxItems: 1
+
+ iovcc-supply:
+ description: I/O voltage rail
+
+ vsn-supply:
+ description: Negative source voltage rail
+
+ vsp-supply:
+ description: Positive source voltage rail
+
+required:
+ - compatible
+ - reg
+ - reset-gpios
+ - port
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "boe,td4320";
+ reg = <0>;
+ backlight = <&backlight>;
+ reset-gpios = <&tlmm 45 GPIO_ACTIVE_LOW>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8279.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8279.yaml
new file mode 100644
index 000000000000..f619aea82bdf
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/himax,hx8279.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/himax,hx8279.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Himax HX8279/HX8279-D based MIPI-DSI panels
+
+maintainers:
+ - AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+
+description:
+ The Himax HX8279 is a 1803 channel outputs source driver with MIPI
+ TCON, which generates the horizontal and vertical control timing to
+ the source and gate drivers.
+ This DriverIC is most suitable for 1200x1920, 1080x1920, 1200x1600,
+ and 600x1024 panels and outputs full RGB888 over two or four lanes,
+ single or dual, MIPI-DSI video interface.
+
+allOf:
+ - $ref: panel-common-dual.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - aoly,sl101pm1794fog-v15
+ - startek,kd070fhfid078
+ - const: himax,hx8279
+
+ reg:
+ maxItems: 1
+
+ iovcc-supply:
+ description: I/O voltage supply
+
+ vdd-supply:
+ description: Panel power supply
+
+required:
+ - compatible
+ - reg
+ - backlight
+ - reset-gpios
+ - iovcc-supply
+ - vdd-supply
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "startek,kd070fhfid078", "himax,hx8279";
+ reg = <0>;
+ backlight = <&backlight>;
+ enable-gpios = <&pio 25 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 45 GPIO_ACTIVE_HIGH>;
+ iovcc-supply = <&vreg_lcm_vio>;
+ vdd-supply = <&vreg_lcm_vdd>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml
index e2a2dd4ef5fa..5fcea62fd58f 100644
--- a/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml
+++ b/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml
@@ -23,6 +23,7 @@ properties:
maxItems: 1
backlight: true
+ port: true
reset-gpios: true
iovcc-supply:
description: regulator that supplies the iovcc voltage
diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
index af9e0ea0e72f..b0e2c82232d3 100644
--- a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
+++ b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
@@ -22,6 +22,7 @@ properties:
maxItems: 1
backlight: true
+ port: true
reset-gpios: true
iovcc-supply:
description: regulator that supplies the iovcc voltage
diff --git a/Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml b/Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
index bbaaa783d184..2219d3d4ac43 100644
--- a/Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
+++ b/Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: LG SW43408 1080x2160 DSI panel
maintainers:
- - Caleb Connolly <caleb.connolly@linaro.org>
+ - Casey Connolly <casey.connolly@linaro.org>
description:
This panel is used on the Pixel 3, it is a 60hz OLED panel which
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt37801.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt37801.yaml
new file mode 100644
index 000000000000..1b38c1d0af68
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt37801.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/novatek,nt37801.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Novatek NT37801 AMOLED DSI Panel
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+description:
+ Naming is inconclusive and different sources claim this is either Novatek
+ NT37801 or NT37810 AMOLED DSI Panel.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: novatek,nt37801
+
+ reg:
+ maxItems: 1
+ description: DSI virtual channel
+
+ vci-supply: true
+ vdd-supply: true
+ vddio-supply: true
+ port: true
+ reset-gpios: true
+
+required:
+ - compatible
+ - reg
+ - vci-supply
+ - vdd-supply
+ - vddio-supply
+ - port
+ - reset-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "novatek,nt37801";
+ reg = <0>;
+
+ vci-supply = <&vreg_l13b_3p0>;
+ vdd-supply = <&vreg_l11b_1p2>;
+ vddio-supply = <&vreg_l12b_1p8>;
+
+ reset-gpios = <&tlmm 98 GPIO_ACTIVE_LOW>;
+
+ port {
+ endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index b0de4fd6f3d4..5542c9229d54 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -226,6 +226,8 @@ properties:
- netron-dy,e231732
# Newhaven Display International 480 x 272 TFT LCD panel
- newhaven,nhd-4.3-480272ef-atxl
+ # NLT Technologies, Ltd. 15.6" WXGA (1366×768) LVDS TFT LCD panel
+ - nlt,nl13676bc25-03f
# New Vision Display 7.0" 800 RGB x 480 TFT LCD panel
- nvd,9128
# OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel
@@ -246,6 +248,8 @@ properties:
- osddisplays,osd070t1718-19ts
# One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel
- osddisplays,osd101t2045-53ts
+ # POWERTIP PH128800T004-ZZA01 10.1" WXGA TFT LCD panel
+ - powertip,ph128800t004-zza01
# POWERTIP PH128800T006-ZHC01 10.1" WXGA TFT LCD panel
- powertip,ph128800t006-zhc01
# POWERTIP PH800480T013-IDF2 7.0" WVGA TFT LCD panel
@@ -284,6 +288,8 @@ properties:
- startek,kd070wvfpa
# Team Source Display Technology TST043015CMHX 4.3" WQVGA TFT LCD panel
- team-source-display,tst043015cmhx
+ # Tianma Micro-electronics P0700WXF1MBAA 7.0" WXGA (1280x800) LVDS TFT LCD panel
+ - tianma,p0700wxf1mbaa
# Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
- tianma,tm070jdhg30
# Tianma Micro-electronics TM070JDHG34-00 7.0" WXGA (1280x800) LVDS TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
index 684c2896d238..31f0c0f038e4 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
@@ -19,6 +19,8 @@ properties:
- const: samsung,atna33xc20
- items:
- enum:
+ # Samsung 14" WQXGA+ (2880×1800 pixels) eDP AMOLED panel
+ - samsung,atna40yk20
# Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
- samsung,atna45af01
# Samsung 14.5" 3K (2944x1840 pixels) eDP AMOLED panel
diff --git a/Documentation/devicetree/bindings/display/panel/truly,nt35597-2K-display.yaml b/Documentation/devicetree/bindings/display/panel/truly,nt35597-2K-display.yaml
new file mode 100644
index 000000000000..36be09c900f2
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/truly,nt35597-2K-display.yaml
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/truly,nt35597-2K-display.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Truly NT35597 DSI 2K display
+
+maintainers:
+ - Neil Armstrong <neil.armstrong@linaro.org>
+
+description: |
+ Truly NT35597 DSI 2K display is used on the Qualcomm SDM845 MTP board.
+
+allOf:
+ - $ref: panel-common-dual.yaml#
+
+properties:
+ compatible:
+ const: truly,nt35597-2K-display
+
+ reg:
+ maxItems: 1
+
+ vdda-supply:
+ description: regulator that provides the supply voltage Power IC supply
+
+ vdispp-supply:
+ description: regulator that provides the supply voltage for positive LCD bias
+
+ vdispn-supply:
+ description: regulator that provides the supply voltage for negative LCD bias
+
+ reset-gpios: true
+
+ mode-gpios:
+ description:
+ Gpio for choosing the mode of the display for single DSI or Dual DSI.
+ This should be low for dual DSI and high for single DSI mode.
+
+ ports:
+ required:
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - reg
+ - vdda-supply
+ - reset-gpios
+ - mode-gpios
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "truly,nt35597-2K-display";
+ reg = <0>;
+
+ vdda-supply = <&pm8998_l14>;
+ vdispp-supply = <&lab_regulator>;
+ vdispn-supply = <&ibb_regulator>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+ mode-gpios = <&tlmm 52 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ panel0_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ panel1_in: endpoint {
+ remote-endpoint = <&dsi1_out>;
+ };
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/panel/visionox,g2647fb105.yaml b/Documentation/devicetree/bindings/display/panel/visionox,g2647fb105.yaml
new file mode 100644
index 000000000000..49dcd9b8f670
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/visionox,g2647fb105.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/visionox,g2647fb105.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Visionox G2647FB105 6.47" 1080x2340 MIPI-DSI Panel
+
+maintainers:
+ - Alexander Baransky <sanyapilot496@gmail.com>
+
+description:
+ The Visionox G2647FB105 is a 6.47 inch 1080x2340 MIPI-DSI CMD mode OLED panel.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: visionox,g2647fb105
+
+ reg:
+ maxItems: 1
+
+ vdd3p3-supply:
+ description: 3.3V source voltage rail
+
+ vddio-supply:
+ description: I/O source voltage rail
+
+ vsn-supply:
+ description: Negative source voltage rail
+
+ vsp-supply:
+ description: Positive source voltage rail
+
+ reset-gpios: true
+ port: true
+
+required:
+ - compatible
+ - reg
+ - vdd3p3-supply
+ - vddio-supply
+ - vsn-supply
+ - vsp-supply
+ - reset-gpios
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "visionox,g2647fb105";
+ reg = <0>;
+
+ vdd3p3-supply = <&vreg_l7c_3p0>;
+ vddio-supply = <&vreg_l13a_1p8>;
+ vsn-supply = <&vreg_ibb>;
+ vsp-supply = <&vreg_lab>;
+
+ reset-gpios = <&pm6150l_gpios 9 GPIO_ACTIVE_LOW>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/rockchip/cdn-dp-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/cdn-dp-rockchip.txt
deleted file mode 100644
index 8df7d2e393d6..000000000000
--- a/Documentation/devicetree/bindings/display/rockchip/cdn-dp-rockchip.txt
+++ /dev/null
@@ -1,74 +0,0 @@
-Rockchip RK3399 specific extensions to the cdn Display Port
-================================
-
-Required properties:
-- compatible: must be "rockchip,rk3399-cdn-dp"
-
-- reg: physical base address of the controller and length
-
-- clocks: from common clock binding: handle to dp clock.
-
-- clock-names: from common clock binding:
- Required elements: "core-clk" "pclk" "spdif" "grf"
-
-- resets : a list of phandle + reset specifier pairs
-- reset-names : string of reset names
- Required elements: "apb", "core", "dptx", "spdif"
-- power-domains : power-domain property defined with a phandle
- to respective power domain.
-- assigned-clocks: main clock, should be <&cru SCLK_DP_CORE>
-- assigned-clock-rates : the DP core clk frequency, shall be: 100000000
-
-- rockchip,grf: this soc should set GRF regs, so need get grf here.
-
-- ports: contain a port nodes with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
- contained 2 endpoints, connecting to the output of vop.
-
-- phys: from general PHY binding: the phandle for the PHY device.
-
-- extcon: extcon specifier for the Power Delivery
-
-- #sound-dai-cells = it must be 1 if your system is using 2 DAIs: I2S, SPDIF
-
--------------------------------------------------------------------------------
-
-Example:
- cdn_dp: dp@fec00000 {
- compatible = "rockchip,rk3399-cdn-dp";
- reg = <0x0 0xfec00000 0x0 0x100000>;
- interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru SCLK_DP_CORE>, <&cru PCLK_DP_CTRL>,
- <&cru SCLK_SPDIF_REC_DPTX>, <&cru PCLK_VIO_GRF>;
- clock-names = "core-clk", "pclk", "spdif", "grf";
- assigned-clocks = <&cru SCLK_DP_CORE>;
- assigned-clock-rates = <100000000>;
- power-domains = <&power RK3399_PD_HDCP>;
- phys = <&tcphy0_dp>, <&tcphy1_dp>;
- resets = <&cru SRST_DPTX_SPDIF_REC>;
- reset-names = "spdif";
- extcon = <&fusb0>, <&fusb1>;
- rockchip,grf = <&grf>;
- #address-cells = <1>;
- #size-cells = <0>;
- #sound-dai-cells = <1>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- dp_in: port {
- #address-cells = <1>;
- #size-cells = <0>;
- dp_in_vopb: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&vopb_out_dp>;
- };
-
- dp_in_vopl: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&vopl_out_dp>;
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml
index 60dedf9b2be7..d99b23b88cc5 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml
@@ -15,6 +15,7 @@ properties:
enum:
- rockchip,rk3288-dp
- rockchip,rk3399-edp
+ - rockchip,rk3588-edp
clocks:
minItems: 2
@@ -31,16 +32,23 @@ properties:
maxItems: 1
resets:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
reset-names:
- const: dp
+ minItems: 1
+ items:
+ - const: dp
+ - const: apb
rockchip,grf:
$ref: /schemas/types.yaml#/definitions/phandle
description:
This SoC makes use of GRF regs.
+ aux-bus:
+ $ref: /schemas/display/dp-aux-bus.yaml#
+
required:
- compatible
- clocks
@@ -52,6 +60,19 @@ required:
allOf:
- $ref: /schemas/display/bridge/analogix,dp.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,rk3588-edp
+ then:
+ properties:
+ resets:
+ minItems: 2
+ reset-names:
+ minItems: 2
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml
index 5b87b0f1963e..290376bec079 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml
@@ -23,13 +23,11 @@ properties:
maxItems: 1
clocks:
- minItems: 1
items:
- description: The HDMI controller main clock
- description: The HDMI PHY reference clock
clock-names:
- minItems: 1
items:
- const: pclk
- const: ref
@@ -58,6 +56,12 @@ properties:
- port@0
- port@1
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to GRF used for control the polarity of hsync/vsync of rk3036
+ HDMI.
+
required:
- compatible
- reg
@@ -77,6 +81,8 @@ allOf:
const: rockchip,rk3036-inno-hdmi
then:
+ required:
+ - rockchip,grf
properties:
power-domains: false
@@ -87,11 +93,6 @@ allOf:
const: rockchip,rk3128-inno-hdmi
then:
- properties:
- clocks:
- minItems: 2
- clock-names:
- minItems: 2
required:
- power-domains
@@ -106,10 +107,11 @@ examples:
compatible = "rockchip,rk3036-inno-hdmi";
reg = <0x20034000 0x4000>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_HDMI>;
- clock-names = "pclk";
+ clocks = <&cru PCLK_HDMI>, <&cru SCLK_LCDC>;
+ clock-names = "pclk", "ref";
pinctrl-names = "default";
pinctrl-0 = <&hdmi_ctl>;
+ rockchip,grf = <&grf>;
#sound-dai-cells = <0>;
ports {
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3399-cdn-dp.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3399-cdn-dp.yaml
new file mode 100644
index 000000000000..1a33128e77f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3399-cdn-dp.yaml
@@ -0,0 +1,170 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/rockchip/rockchip,rk3399-cdn-dp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RK3399 specific extensions to the CDN Display Port
+
+maintainers:
+ - Andy Yan <andy.yan@rock-chip.com>
+ - Heiko Stuebner <heiko@sntech.de>
+ - Sandy Huang <hjc@rock-chips.com>
+
+allOf:
+ - $ref: /schemas/sound/dai-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: rockchip,rk3399-cdn-dp
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: DP core work clock
+ - description: APB clock
+ - description: SPDIF interface clock
+ - description: GRF clock
+
+ clock-names:
+ items:
+ - const: core-clk
+ - const: pclk
+ - const: spdif
+ - const: grf
+
+ extcon:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 1
+ items:
+ - description: Extcon device providing the cable state for DP PHY device 0
+ - description: Extcon device providing the cable state for DP PHY device 1
+ description:
+ List of phandle to the extcon device providing the cable state for the DP PHY.
+
+ interrupts:
+ maxItems: 1
+
+ phys:
+ minItems: 1
+ items:
+ - description: DP output to the DP PHY device 0
+ - description: DP output to the DP PHY device 1
+ description:
+ RK3399 have two DP-USB PHY, specifying one PHY which want to use, or
+ specify two PHYs here to let the driver determine which PHY to use.
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Input of the CDN DP
+
+ properties:
+ endpoint@0:
+ description: Connection to the VOPB
+
+ endpoint@1:
+ description: Connection to the VOPL
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Output of the CDN DP
+
+ required:
+ - port@0
+ - port@1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 4
+
+ reset-names:
+ items:
+ - const: spdif
+ - const: dptx
+ - const: apb
+ - const: core
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to GRF register to control HPD.
+
+ "#sound-dai-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - phys
+ - ports
+ - resets
+ - reset-names
+ - rockchip,grf
+ - "#sound-dai-cells"
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3399-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/rk3399-power.h>
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ dp@fec00000 {
+ compatible = "rockchip,rk3399-cdn-dp";
+ reg = <0x0 0xfec00000 0x0 0x100000>;
+ assigned-clocks = <&cru SCLK_DP_CORE>;
+ assigned-clock-rates = <100000000>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_DP_CORE>, <&cru PCLK_DP_CTRL>, <&cru SCLK_SPDIF_REC_DPTX>,
+ <&cru PCLK_VIO_GRF>;
+ clock-names = "core-clk", "pclk", "spdif", "grf";
+ power-domains = <&power RK3399_PD_HDCP>;
+ phys = <&tcphy0_dp>, <&tcphy1_dp>;
+ resets = <&cru SRST_DPTX_SPDIF_REC>, <&cru SRST_P_UPHY0_DPTX>,
+ <&cru SRST_P_UPHY0_APB>, <&cru SRST_DP_CORE>;
+ reset-names = "spdif", "dptx", "apb", "core";
+ rockchip,grf = <&grf>;
+ #sound-dai-cells = <1>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dp_in: port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dp_in_vopb: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vopb_out_dp>;
+ };
+
+ dp_in_vopl: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vopl_out_dp>;
+ };
+ };
+
+ dp_out: port@1 {
+ reg = <1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
index b339b7e708c6..8b5f58103dda 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
@@ -73,12 +73,6 @@ properties:
port:
$ref: /schemas/graph.yaml#/properties/port
- assigned-clocks:
- maxItems: 2
-
- assigned-clock-rates:
- maxItems: 2
-
iommus:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/sitronix,st7571.yaml b/Documentation/devicetree/bindings/display/sitronix,st7571.yaml
new file mode 100644
index 000000000000..4fea782fccd7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/sitronix,st7571.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/sitronix,st7571.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sitronix ST7571 Display Controller
+
+maintainers:
+ - Marcus Folkesson <marcus.folkesson@gmail.com>
+
+description:
+ Sitronix ST7571 is a driver and controller for 4-level gray
+ scale and monochrome dot matrix LCD panels.
+
+allOf:
+ - $ref: panel/panel-common.yaml#
+
+properties:
+ compatible:
+ const: sitronix,st7571
+
+ reg:
+ maxItems: 1
+
+ sitronix,grayscale:
+ type: boolean
+ description:
+ Display supports 4-level grayscale.
+
+ reset-gpios: true
+ width-mm: true
+ height-mm: true
+ panel-timing: true
+
+required:
+ - compatible
+ - reg
+ - reset-gpios
+ - width-mm
+ - height-mm
+ - panel-timing
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ display@3f {
+ compatible = "sitronix,st7571";
+ reg = <0x3f>;
+ reset-gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
+ width-mm = <37>;
+ height-mm = <27>;
+
+ panel-timing {
+ hactive = <128>;
+ vactive = <96>;
+ hback-porch = <0>;
+ vback-porch = <0>;
+ clock-frequency = <0>;
+ hfront-porch = <0>;
+ hsync-len = <0>;
+ vfront-porch = <0>;
+ vsync-len = <0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/truly,nt35597.txt b/Documentation/devicetree/bindings/display/truly,nt35597.txt
deleted file mode 100644
index f39c77ee36ea..000000000000
--- a/Documentation/devicetree/bindings/display/truly,nt35597.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-Truly model NT35597 DSI display driver
-
-The Truly NT35597 is a generic display driver, currently only configured
-for use in the 2K display on the Qualcomm SDM845 MTP board.
-
-Required properties:
-- compatible: should be "truly,nt35597-2K-display"
-- vdda-supply: phandle of the regulator that provides the supply voltage
- Power IC supply
-- vdispp-supply: phandle of the regulator that provides the supply voltage
- for positive LCD bias
-- vdispn-supply: phandle of the regulator that provides the supply voltage
- for negative LCD bias
-- reset-gpios: phandle of gpio for reset line
- This should be 8mA, gpio can be configured using mux, pinctrl, pinctrl-names
- (active low)
-- mode-gpios: phandle of the gpio for choosing the mode of the display
- for single DSI or Dual DSI
- This should be low for dual DSI and high for single DSI mode
-- ports: This device has two video ports driven by two DSIs. Their connections
- are modeled using the OF graph bindings specified in
- Documentation/devicetree/bindings/graph.txt.
- - port@0: DSI input port driven by master DSI
- - port@1: DSI input port driven by secondary DSI
-
-Example:
-
- dsi@ae94000 {
- panel@0 {
- compatible = "truly,nt35597-2K-display";
- reg = <0>;
- vdda-supply = <&pm8998_l14>;
- vdispp-supply = <&lab_regulator>;
- vdispn-supply = <&ibb_regulator>;
- pinctrl-names = "default", "suspend";
- pinctrl-0 = <&dpu_dsi_active>;
- pinctrl-1 = <&dpu_dsi_suspend>;
-
- reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
- mode-gpios = <&tlmm 52 GPIO_ACTIVE_HIGH>;
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- panel0_in: endpoint {
- remote-endpoint = <&dsi0_out>;
- };
- };
-
- port@1 {
- reg = <1>;
- panel1_in: endpoint {
- remote-endpoint = <&dsi1_out>;
- };
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.txt b/Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.txt
deleted file mode 100644
index 447fb44e7abe..000000000000
--- a/Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-* NVIDIA Tegra APB DMA controller
-
-Required properties:
-- compatible: Should be "nvidia,<chip>-apbdma"
-- reg: Should contain DMA registers location and length. This should include
- all of the per-channel registers.
-- interrupts: Should contain all of the per-channel DMA interrupts.
-- clocks: Must contain one entry, for the module clock.
- See ../clocks/clock-bindings.txt for details.
-- resets : Must contain an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
-- reset-names : Must include the following entries:
- - dma
-- #dma-cells : Must be <1>. This dictates the length of DMA specifiers in
- client nodes' dmas properties. The specifier represents the DMA request
- select value for the peripheral. For more details, consult the Tegra TRM's
- documentation of the APB DMA channel control register REQ_SEL field.
-
-Examples:
-
-apbdma: dma@6000a000 {
- compatible = "nvidia,tegra20-apbdma";
- reg = <0x6000a000 0x1200>;
- interrupts = < 0 136 0x04
- 0 137 0x04
- 0 138 0x04
- 0 139 0x04
- 0 140 0x04
- 0 141 0x04
- 0 142 0x04
- 0 143 0x04
- 0 144 0x04
- 0 145 0x04
- 0 146 0x04
- 0 147 0x04
- 0 148 0x04
- 0 149 0x04
- 0 150 0x04
- 0 151 0x04 >;
- clocks = <&tegra_car 34>;
- resets = <&tegra_car 34>;
- reset-names = "dma";
- #dma-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.yaml b/Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.yaml
new file mode 100644
index 000000000000..a2ffd5209b3b
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.yaml
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/nvidia,tegra20-apbdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra APB DMA Controller
+
+description:
+ The NVIDIA Tegra APB DMA controller is a hardware component that
+ enables direct memory access (DMA) on Tegra systems. It facilitates
+ data transfer between I/O devices and main memory without constant
+ CPU intervention.
+
+maintainers:
+ - Jonathan Hunter <jonathanh@nvidia.com>
+
+properties:
+ compatible:
+ oneOf:
+ - const: nvidia,tegra20-apbdma
+ - items:
+ - const: nvidia,tegra30-apbdma
+ - const: nvidia,tegra20-apbdma
+
+ reg:
+ maxItems: 1
+
+ "#dma-cells":
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ description:
+ Should contain all of the per-channel DMA interrupts in
+ ascending order with respect to the DMA channel index.
+ minItems: 1
+ maxItems: 32
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: dma
+
+required:
+ - compatible
+ - reg
+ - "#dma-cells"
+ - clocks
+ - interrupts
+ - resets
+ - reset-names
+
+allOf:
+ - $ref: dma-controller.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/reset/tegra186-reset.h>
+ dma-controller@6000a000 {
+ compatible = "nvidia,tegra30-apbdma", "nvidia,tegra20-apbdma";
+ reg = <0x6000a000 0x1200>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car 34>;
+ resets = <&tegra_car 34>;
+ reset-names = "dma";
+ #dma-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/example-schema.yaml b/Documentation/devicetree/bindings/example-schema.yaml
index 484f8babcda4..c731d5045e80 100644
--- a/Documentation/devicetree/bindings/example-schema.yaml
+++ b/Documentation/devicetree/bindings/example-schema.yaml
@@ -178,7 +178,9 @@ properties:
description: Child nodes are just another property from a json-schema
perspective.
type: object # DT nodes are json objects
- # Child nodes also need additionalProperties or unevaluatedProperties
+ # Child nodes also need additionalProperties or unevaluatedProperties, where
+ # 'false' should be used in most cases (see 'child-node-with-own-schema'
+ # below).
additionalProperties: false
properties:
vendor,a-child-node-property:
@@ -189,6 +191,17 @@ properties:
required:
- vendor,a-child-node-property
+ child-node-with-own-schema:
+ description: |
+ Child node with their own compatible and device schema which ends in
+ 'additionalProperties: false' or 'unevaluatedProperties: false' can
+ mention only the compatible and use here 'additionalProperties: true'.
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ const: vendor,sub-device
+
# Describe the relationship between different properties
dependencies:
# 'vendor,bool-property' is only allowed when 'vendor,string-array-property'
diff --git a/Documentation/devicetree/bindings/firmware/google,gs101-acpm-ipc.yaml b/Documentation/devicetree/bindings/firmware/google,gs101-acpm-ipc.yaml
index 2cdad1bbae73..9785aac3b5f3 100644
--- a/Documentation/devicetree/bindings/firmware/google,gs101-acpm-ipc.yaml
+++ b/Documentation/devicetree/bindings/firmware/google,gs101-acpm-ipc.yaml
@@ -27,6 +27,15 @@ properties:
mboxes:
maxItems: 1
+ pmic:
+ description: Child node describing the main PMIC.
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: samsung,s2mpg10-pmic
+
shmem:
description:
List of phandle pointing to the shared memory (SHM) area. The memory
@@ -43,8 +52,34 @@ additionalProperties: false
examples:
- |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
power-management {
compatible = "google,gs101-acpm-ipc";
mboxes = <&ap2apm_mailbox>;
shmem = <&apm_sram>;
+
+ pmic {
+ compatible = "samsung,s2mpg10-pmic";
+ interrupts-extended = <&gpa0 6 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ LDO1 {
+ regulator-name = "vdd_ldo1";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-always-on;
+ };
+
+ // ...
+
+ BUCK1 {
+ regulator-name = "vdd_mif";
+ regulator-min-microvolt = <450000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt b/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt
deleted file mode 100644
index 6eff1afd8daf..000000000000
--- a/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-Intel Service Layer Driver for Stratix10 SoC
-============================================
-Intel Stratix10 SoC is composed of a 64 bit quad-core ARM Cortex A53 hard
-processor system (HPS) and Secure Device Manager (SDM). When the FPGA is
-configured from HPS, there needs to be a way for HPS to notify SDM the
-location and size of the configuration data. Then SDM will get the
-configuration data from that location and perform the FPGA configuration.
-
-To meet the whole system security needs and support virtual machine requesting
-communication with SDM, only the secure world of software (EL3, Exception
-Layer 3) can interface with SDM. All software entities running on other
-exception layers must channel through the EL3 software whenever it needs
-service from SDM.
-
-Intel Stratix10 service layer driver, running at privileged exception level
-(EL1, Exception Layer 1), interfaces with the service providers and provides
-the services for FPGA configuration, QSPI, Crypto and warm reset. Service layer
-driver also manages secure monitor call (SMC) to communicate with secure monitor
-code running in EL3.
-
-Required properties:
--------------------
-The svc node has the following mandatory properties, must be located under
-the firmware node.
-
-- compatible: "intel,stratix10-svc" or "intel,agilex-svc"
-- method: smc or hvc
- smc - Secure Monitor Call
- hvc - Hypervisor Call
-- memory-region:
- phandle to the reserved memory node. See
- Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
- for details
-
-Example:
--------
-
- reserved-memory {
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- service_reserved: svcbuffer@0 {
- compatible = "shared-dma-pool";
- reg = <0x0 0x0 0x0 0x1000000>;
- alignment = <0x1000>;
- no-map;
- };
- };
-
- firmware {
- svc {
- compatible = "intel,stratix10-svc";
- method = "smc";
- memory-region = <&service_reserved>;
- };
- };
diff --git a/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.yaml b/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.yaml
new file mode 100644
index 000000000000..fac1e955852e
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/firmware/intel,stratix10-svc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Service Layer Driver for Stratix10 SoC
+
+maintainers:
+ - Dinh Nguyen <dinguyen@kernel.org>
+ - Mahesh Rao <mahesh.rao@altera.com>
+
+description: >
+ Intel Stratix10 SoC is composed of a 64 bit quad-core ARM Cortex A53 hard
+ processor system (HPS) and Secure Device Manager (SDM). When the FPGA is
+ configured from HPS, there needs to be a way for HPS to notify SDM the
+ location and size of the configuration data. Then SDM will get the
+ configuration data from that location and perform the FPGA configuration.
+
+ To meet the whole system security needs and support virtual machine requesting
+ communication with SDM, only the secure world of software (EL3, Exception
+ Layer 3) can interface with SDM. All software entities running on other
+ exception layers must channel through the EL3 software whenever it needs
+ service from SDM.
+
+ Intel Stratix10 service layer driver, running at privileged exception level
+ (EL1, Exception Layer 1), interfaces with the service providers and provides
+ the services for FPGA configuration, QSPI, Crypto and warm reset. Service layer
+ driver also manages secure monitor call (SMC) to communicate with secure monitor
+ code running in EL3.
+
+properties:
+ compatible:
+ enum:
+ - intel,stratix10-svc
+ - intel,agilex-svc
+
+ method:
+ description: |
+ Supervisory call method to be used to communicate with the
+ secure service layer.
+ Permitted values are:
+ - "smc" : SMC #0, following the SMCCC
+ - "hvc" : HVC #0, following the SMCCC
+
+ $ref: /schemas/types.yaml#/definitions/string-array
+ enum:
+ - smc
+ - hvc
+
+ memory-region:
+ maxItems: 1
+ description:
+ reserved memory region for the service layer driver to
+ communicate with the secure device manager.
+
+ fpga-mgr:
+ $ref: /schemas/fpga/intel,stratix10-soc-fpga-mgr.yaml
+ description: Optional child node for fpga manager to perform fabric configuration.
+
+required:
+ - compatible
+ - method
+ - memory-region
+
+additionalProperties: false
+
+examples:
+ - |
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ service_reserved: svcbuffer@0 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x0 0x0 0x1000000>;
+ alignment = <0x1000>;
+ no-map;
+ };
+ };
+
+ firmware {
+ svc {
+ compatible = "intel,stratix10-svc";
+ method = "smc";
+ memory-region = <&service_reserved>;
+
+ fpga-mgr {
+ compatible = "intel,stratix10-soc-fpga-mgr";
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/firmware/nxp,imx95-scmi.yaml b/Documentation/devicetree/bindings/firmware/nxp,imx95-scmi.yaml
index 1a95010a546b..2bda2e0e1369 100644
--- a/Documentation/devicetree/bindings/firmware/nxp,imx95-scmi.yaml
+++ b/Documentation/devicetree/bindings/firmware/nxp,imx95-scmi.yaml
@@ -11,6 +11,18 @@ maintainers:
- Peng Fan <peng.fan@nxp.com>
properties:
+ protocol@80:
+ description:
+ SCMI LMM protocol which is for boot, shutdown, and reset of other logical
+ machines (LM). It is usually used to allow one LM to manage another used
+ as an offload or accelerator engine.
+ $ref: '/schemas/firmware/arm,scmi.yaml#/$defs/protocol-node'
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ const: 0x80
+
protocol@81:
$ref: '/schemas/firmware/arm,scmi.yaml#/$defs/protocol-node'
unevaluatedProperties: false
@@ -19,6 +31,17 @@ properties:
reg:
const: 0x81
+ protocol@82:
+ description:
+ SCMI CPU Protocol which allows an agent to start or stop a CPU. It is
+ used to manage auxiliary CPUs in a LM.
+ $ref: '/schemas/firmware/arm,scmi.yaml#/$defs/protocol-node'
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ const: 0x82
+
protocol@84:
$ref: '/schemas/firmware/arm,scmi.yaml#/$defs/protocol-node'
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/fpga/intel,stratix10-soc-fpga-mgr.yaml b/Documentation/devicetree/bindings/fpga/intel,stratix10-soc-fpga-mgr.yaml
new file mode 100644
index 000000000000..6e536d6b28a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/intel,stratix10-soc-fpga-mgr.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/fpga/intel,stratix10-soc-fpga-mgr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Stratix10 SoC FPGA Manager
+
+maintainers:
+ - Mahesh Rao <mahesh.rao@altera.com>
+ - Adrian Ng Ho Yin <adrian.ho.yin.ng@altera.com>
+ - Niravkumar L Rabara <nirav.rabara@altera.com>
+
+description:
+ The Intel Stratix10 SoC consists of a 64-bit quad-core ARM Cortex A53 hard
+ processor system (HPS) and a Secure Device Manager (SDM). The Stratix10
+ SoC FPGA Manager driver is used to configure/reconfigure the FPGA fabric
+ on the die.The driver communicates with SDM/ATF via the stratix10-svc
+ platform driver for performing its operations.
+
+properties:
+ compatible:
+ enum:
+ - intel,stratix10-soc-fpga-mgr
+ - intel,agilex-soc-fpga-mgr
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ fpga-mgr {
+ compatible = "intel,stratix10-soc-fpga-mgr";
+ };
diff --git a/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt b/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt
deleted file mode 100644
index 0f874137ca46..000000000000
--- a/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-Intel Stratix10 SoC FPGA Manager
-
-Required properties:
-The fpga_mgr node has the following mandatory property, must be located under
-firmware/svc node.
-
-- compatible : should contain "intel,stratix10-soc-fpga-mgr" or
- "intel,agilex-soc-fpga-mgr"
-
-Example:
-
- firmware {
- svc {
- fpga_mgr: fpga-mgr {
- compatible = "intel,stratix10-soc-fpga-mgr";
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/gpio/atmel,at91rm9200-gpio.yaml b/Documentation/devicetree/bindings/gpio/atmel,at91rm9200-gpio.yaml
index 3dd70933ed8e..d810043b56b6 100644
--- a/Documentation/devicetree/bindings/gpio/atmel,at91rm9200-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/atmel,at91rm9200-gpio.yaml
@@ -69,13 +69,13 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
gpio@fffff400 {
- compatible = "atmel,at91rm9200-gpio";
- reg = <0xfffff400 0x200>;
- interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>;
- #gpio-cells = <2>;
- gpio-controller;
- interrupt-controller;
- #interrupt-cells = <2>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
+ compatible = "atmel,at91rm9200-gpio";
+ reg = <0xfffff400 0x200>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
};
...
diff --git a/Documentation/devicetree/bindings/gpio/blaize,blzp1600-gpio.yaml b/Documentation/devicetree/bindings/gpio/blaize,blzp1600-gpio.yaml
new file mode 100644
index 000000000000..a05f6ea619c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/blaize,blzp1600-gpio.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/blaize,blzp1600-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Blaize BLZP1600 GPIO controller
+
+description:
+ Blaize BLZP1600 GPIO controller is an implementation of the VeriSilicon
+ APB GPIO v0.2 IP block. It has 32 ports each of which are intended to be
+ represented as child nodes with the generic GPIO-controller properties
+ as described in this binding's file.
+
+maintainers:
+ - Nikolaos Pasaloukos <nikolaos.pasaloukos@blaize.com>
+ - James Cowgill <james.cowgill@blaize.com>
+ - Matt Redfearn <matt.redfearn@blaize.com>
+ - Neil Jones <neil.jones@blaize.com>
+
+properties:
+ $nodename:
+ pattern: "^gpio@[0-9a-f]+$"
+
+ compatible:
+ enum:
+ - blaize,blzp1600-gpio
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ ngpios:
+ default: 32
+ minimum: 1
+ maximum: 32
+
+ interrupts:
+ maxItems: 1
+
+ gpio-line-names: true
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - '#gpio-cells'
+
+dependencies:
+ interrupt-controller: [ interrupts ]
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ gpio: gpio@4c0000 {
+ compatible = "blaize,blzp1600-gpio";
+ reg = <0x004c0000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml b/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml
index 0e5c22929bde..ab35bcf98101 100644
--- a/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml
+++ b/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml
@@ -71,15 +71,15 @@ unevaluatedProperties: false
examples:
- |
spi {
- #address-cells = <1>;
- #size-cells = <0>;
-
- gpio5: gpio5@0 {
- compatible = "fairchild,74hc595";
- reg = <0>;
- gpio-controller;
- #gpio-cells = <2>;
- registers-number = <4>;
- spi-max-frequency = <100000>;
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio5@0 {
+ compatible = "fairchild,74hc595";
+ reg = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ registers-number = <4>;
+ spi-max-frequency = <100000>;
+ };
};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mxs.yaml b/Documentation/devicetree/bindings/gpio/gpio-mxs.yaml
index 8ff54369d16c..b58e08c8ecd8 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mxs.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-mxs.yaml
@@ -84,52 +84,52 @@ examples:
reg = <0x80018000 0x2000>;
gpio@0 {
- compatible = "fsl,imx28-gpio";
- reg = <0>;
- interrupts = <127>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
+ compatible = "fsl,imx28-gpio";
+ reg = <0>;
+ interrupts = <127>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
gpio@1 {
- compatible = "fsl,imx28-gpio";
- reg = <1>;
- interrupts = <126>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
+ compatible = "fsl,imx28-gpio";
+ reg = <1>;
+ interrupts = <126>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
gpio@2 {
- compatible = "fsl,imx28-gpio";
- reg = <2>;
- interrupts = <125>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
+ compatible = "fsl,imx28-gpio";
+ reg = <2>;
+ interrupts = <125>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
gpio@3 {
- compatible = "fsl,imx28-gpio";
- reg = <3>;
- interrupts = <124>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
+ compatible = "fsl,imx28-gpio";
+ reg = <3>;
+ interrupts = <124>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
gpio@4 {
- compatible = "fsl,imx28-gpio";
- reg = <4>;
- interrupts = <123>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
+ compatible = "fsl,imx28-gpio";
+ reg = <4>;
+ interrupts = <123>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml b/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
index 7b1eb08fa055..4d3f52f8d1b8 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
@@ -17,6 +17,9 @@ properties:
compatible:
oneOf:
- items:
+ - const: toradex,ecgpiol16
+ - const: nxp,pcal6416
+ - items:
- const: diodes,pi4ioe5v6534q
- const: nxp,pcal6534
- items:
@@ -132,6 +135,7 @@ allOf:
- maxim,max7325
- maxim,max7326
- maxim,max7327
+ - toradex,ecgpiol16
then:
properties:
reset-gpios: false
diff --git a/Documentation/devicetree/bindings/gpio/gpio-vf610.yaml b/Documentation/devicetree/bindings/gpio/gpio-vf610.yaml
index 4fb32e9aec0a..a31f64b6d40b 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-vf610.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-vf610.yaml
@@ -70,6 +70,13 @@ properties:
minItems: 1
maxItems: 4
+ gpio-reserved-ranges: true
+
+ ngpios:
+ minimum: 1
+ maximum: 32
+ default: 32
+
patternProperties:
"^.+-hog(-[0-9]+)?$":
type: object
diff --git a/Documentation/devicetree/bindings/gpio/maxim,max77759-gpio.yaml b/Documentation/devicetree/bindings/gpio/maxim,max77759-gpio.yaml
new file mode 100644
index 000000000000..55734190d5eb
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/maxim,max77759-gpio.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/maxim,max77759-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Maxim Integrated MAX77759 GPIO
+
+maintainers:
+ - André Draszik <andre.draszik@linaro.org>
+
+description: |
+ This module is part of the MAX77759 PMIC. For additional information, see
+ Documentation/devicetree/bindings/mfd/maxim,max77759.yaml.
+
+ The MAX77759 is a PMIC integrating, amongst others, a GPIO controller
+ including interrupt support for 2 GPIO lines.
+
+properties:
+ compatible:
+ const: maxim,max77759-gpio
+
+ "#gpio-cells":
+ const: 2
+
+ gpio-controller: true
+
+ gpio-line-names:
+ minItems: 1
+ maxItems: 2
+
+ "#interrupt-cells":
+ const: 2
+
+ interrupt-controller: true
+
+required:
+ - compatible
+ - "#gpio-cells"
+ - gpio-controller
+ - "#interrupt-cells"
+ - interrupt-controller
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.yaml b/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.yaml
index 4ef06b2ff1ff..065f5761a93f 100644
--- a/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.yaml
@@ -111,6 +111,9 @@ properties:
gpio-controller: true
+ gpio-ranges:
+ maxItems: 1
+
"#gpio-cells":
description: |
Indicates how many cells are used in a consumer's GPIO specifier. In the
diff --git a/Documentation/devicetree/bindings/gpio/nxp,pcf8575.yaml b/Documentation/devicetree/bindings/gpio/nxp,pcf8575.yaml
index 8bca574bb66d..5a6ecaa7b44b 100644
--- a/Documentation/devicetree/bindings/gpio/nxp,pcf8575.yaml
+++ b/Documentation/devicetree/bindings/gpio/nxp,pcf8575.yaml
@@ -128,17 +128,17 @@ additionalProperties: false
examples:
- |
i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- pcf8575: gpio@20 {
- compatible = "nxp,pcf8575";
- reg = <0x20>;
- interrupt-parent = <&irqpin2>;
- interrupts = <3 0>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio@20 {
+ compatible = "nxp,pcf8575";
+ reg = <0x20>;
+ interrupt-parent = <&irqpin2>;
+ interrupts = <3 0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
};
diff --git a/Documentation/devicetree/bindings/gpio/realtek,otto-gpio.yaml b/Documentation/devicetree/bindings/gpio/realtek,otto-gpio.yaml
index 39fd959c45d2..728099c65824 100644
--- a/Documentation/devicetree/bindings/gpio/realtek,otto-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/realtek,otto-gpio.yaml
@@ -81,7 +81,7 @@ dependencies:
examples:
- |
- gpio@3500 {
+ gpio@3500 {
compatible = "realtek,rtl8380-gpio", "realtek,otto-gpio";
reg = <0x3500 0x1c>;
gpio-controller;
@@ -91,9 +91,9 @@ examples:
#interrupt-cells = <2>;
interrupt-parent = <&rtlintc>;
interrupts = <23>;
- };
+ };
- |
- gpio@3300 {
+ gpio@3300 {
compatible = "realtek,rtl9300-gpio", "realtek,otto-gpio";
reg = <0x3300 0x1c>, <0x3338 0x8>;
gpio-controller;
@@ -103,6 +103,6 @@ examples:
#interrupt-cells = <2>;
interrupt-parent = <&rtlintc>;
interrupts = <13>;
- };
+ };
...
diff --git a/Documentation/devicetree/bindings/gpio/renesas,em-gio.yaml b/Documentation/devicetree/bindings/gpio/renesas,em-gio.yaml
index 8bdef812c87c..49fb8f613ead 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,em-gio.yaml
+++ b/Documentation/devicetree/bindings/gpio/renesas,em-gio.yaml
@@ -57,14 +57,14 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
gpio0: gpio@e0050000 {
- compatible = "renesas,em-gio";
- reg = <0xe0050000 0x2c>, <0xe0050040 0x20>;
- interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pfc 0 0 32>;
- ngpios = <32>;
- interrupt-controller;
- #interrupt-cells = <2>;
+ compatible = "renesas,em-gio";
+ reg = <0xe0050000 0x2c>, <0xe0050040 0x20>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 0 32>;
+ ngpios = <32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
diff --git a/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml b/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
index cc7a950a6030..d32e103a64aa 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
@@ -138,16 +138,16 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/r8a77470-sysc.h>
gpio3: gpio@e6053000 {
- compatible = "renesas,gpio-r8a77470", "renesas,rcar-gen2-gpio";
- reg = <0xe6053000 0x50>;
- interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 909>;
- power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
- resets = <&cpg 909>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pfc 0 96 30>;
- gpio-reserved-ranges = <17 10>;
- interrupt-controller;
- #interrupt-cells = <2>;
+ compatible = "renesas,gpio-r8a77470", "renesas,rcar-gen2-gpio";
+ reg = <0xe6053000 0x50>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 909>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 909>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 96 30>;
+ gpio-reserved-ranges = <17 10>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
diff --git a/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml b/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
index fc095646adea..4bdc201b719e 100644
--- a/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
@@ -76,8 +76,8 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/clock/sifive-fu540-prci.h>
- gpio@10060000 {
+ #include <dt-bindings/clock/sifive-fu540-prci.h>
+ gpio@10060000 {
compatible = "sifive,fu540-c000-gpio", "sifive,gpio0";
interrupt-parent = <&plic>;
interrupts = <7>, <8>, <9>, <10>, <11>, <12>, <13>, <14>, <15>, <16>,
@@ -88,6 +88,6 @@ examples:
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
- };
+ };
...
diff --git a/Documentation/devicetree/bindings/gpio/spacemit,k1-gpio.yaml b/Documentation/devicetree/bindings/gpio/spacemit,k1-gpio.yaml
new file mode 100644
index 000000000000..ec0232e72c71
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/spacemit,k1-gpio.yaml
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/spacemit,k1-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SpacemiT K1 GPIO controller
+
+maintainers:
+ - Yixun Lan <dlan@gentoo.org>
+
+description:
+ The controller's registers are organized as sets of eight 32-bit
+ registers with each set of port controlling 32 pins. A single
+ interrupt line is shared for all of the pins by the controller.
+
+properties:
+ $nodename:
+ pattern: "^gpio@[0-9a-f]+$"
+
+ compatible:
+ const: spacemit,k1-gpio
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: GPIO Core Clock
+ - description: GPIO Bus Clock
+
+ clock-names:
+ items:
+ - const: core
+ - const: bus
+
+ resets:
+ maxItems: 1
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 3
+ description:
+ The first two cells are the GPIO bank index and offset inside the bank,
+ the third cell should specify GPIO flag.
+
+ gpio-ranges: true
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 3
+ description:
+ The first two cells are the GPIO bank index and offset inside the bank,
+ the third cell should specify interrupt flag. The controller does not
+ support level interrupts, so flags of IRQ_TYPE_LEVEL_HIGH,
+ IRQ_TYPE_LEVEL_LOW should not be used.
+ Refer <dt-bindings/interrupt-controller/irq.h> for valid flags.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - gpio-controller
+ - "#gpio-cells"
+ - interrupts
+ - interrupt-controller
+ - "#interrupt-cells"
+ - gpio-ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ gpio@d4019000 {
+ compatible = "spacemit,k1-gpio";
+ reg = <0xd4019000 0x800>;
+ clocks =<&ccu 9>, <&ccu 61>;
+ clock-names = "core", "bus";
+ gpio-controller;
+ #gpio-cells = <3>;
+ interrupts = <58>;
+ interrupt-controller;
+ interrupt-parent = <&plic>;
+ #interrupt-cells = <3>;
+ gpio-ranges = <&pinctrl 0 0 0 32>,
+ <&pinctrl 1 0 32 32>,
+ <&pinctrl 2 0 64 32>,
+ <&pinctrl 3 0 96 32>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml b/Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml
index b085450b527f..712063417bc8 100644
--- a/Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml
+++ b/Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml
@@ -48,22 +48,22 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/interrupt-controller/irq.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
- soc {
+ soc {
#address-cells = <2>;
#size-cells = <2>;
gpio: gpio@28020000 {
- compatible = "toshiba,gpio-tmpv7708";
- reg = <0 0x28020000 0 0x1000>;
- #gpio-cells = <0x2>;
- gpio-ranges = <&pmux 0 0 32>;
- gpio-controller;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupt-parent = <&gic>;
+ compatible = "toshiba,gpio-tmpv7708";
+ reg = <0 0x28020000 0 0x1000>;
+ #gpio-cells = <0x2>;
+ gpio-ranges = <&pmux 0 0 32>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gic>;
};
- };
+ };
...
diff --git a/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml b/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
index d3d8a2e143ed..8fbf12ca067e 100644
--- a/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
+++ b/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
@@ -126,29 +126,29 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
- gpio@a0020000 {
- compatible = "xlnx,xps-gpio-1.00.a";
- reg = <0xa0020000 0x10000>;
- #gpio-cells = <2>;
- #interrupt-cells = <0x2>;
- clocks = <&zynqmp_clk 71>;
- gpio-controller;
- interrupt-controller;
- interrupt-names = "ip2intc_irpt";
- interrupt-parent = <&gic>;
- interrupts = <0 89 4>;
- xlnx,all-inputs = <0x0>;
- xlnx,all-inputs-2 = <0x0>;
- xlnx,all-outputs = <0x0>;
- xlnx,all-outputs-2 = <0x0>;
- xlnx,dout-default = <0x0>;
- xlnx,dout-default-2 = <0x0>;
- xlnx,gpio-width = <0x20>;
- xlnx,gpio2-width = <0x20>;
- xlnx,interrupt-present = <0x1>;
- xlnx,is-dual = <0x1>;
- xlnx,tri-default = <0xFFFFFFFF>;
- xlnx,tri-default-2 = <0xFFFFFFFF>;
- };
+ gpio@a0020000 {
+ compatible = "xlnx,xps-gpio-1.00.a";
+ reg = <0xa0020000 0x10000>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <0x2>;
+ clocks = <&zynqmp_clk 71>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-names = "ip2intc_irpt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 89 4>;
+ xlnx,all-inputs = <0x0>;
+ xlnx,all-inputs-2 = <0x0>;
+ xlnx,all-outputs = <0x0>;
+ xlnx,all-outputs-2 = <0x0>;
+ xlnx,dout-default = <0x0>;
+ xlnx,dout-default-2 = <0x0>;
+ xlnx,gpio-width = <0x20>;
+ xlnx,gpio2-width = <0x20>;
+ xlnx,interrupt-present = <0x1>;
+ xlnx,is-dual = <0x1>;
+ xlnx,tri-default = <0xFFFFFFFF>;
+ xlnx,tri-default-2 = <0xFFFFFFFF>;
+ };
...
diff --git a/Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml b/Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml
index bb93baa88879..e13e9d6dd148 100644
--- a/Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml
+++ b/Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml
@@ -12,7 +12,6 @@ description:
PS_MODE). Every pin can be configured as input/output.
maintainers:
- - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index 019bd28a29f1..b8d659d272d0 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -25,6 +25,8 @@ properties:
- realtek,rtd1619-mali
- renesas,r9a07g044-mali
- renesas,r9a07g054-mali
+ - renesas,r9a09g047-mali
+ - renesas,r9a09g056-mali
- renesas,r9a09g057-mali
- rockchip,px30-mali
- rockchip,rk3562-mali
@@ -145,6 +147,8 @@ allOf:
enum:
- renesas,r9a07g044-mali
- renesas,r9a07g054-mali
+ - renesas,r9a09g047-mali
+ - renesas,r9a09g056-mali
- renesas,r9a09g057-mali
then:
properties:
diff --git a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
index dc078ceeca9a..43c6d2d72456 100644
--- a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
+++ b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Broadcom V3D GPU
maintainers:
- - Eric Anholt <eric@anholt.net>
+ - Maíra Canal <mcanal@igalia.com>
- Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
properties:
@@ -22,20 +22,12 @@ properties:
- brcm,7278-v3d
reg:
- items:
- - description: hub register (required)
- - description: core0 register (required)
- - description: GCA cache controller register (if GCA controller present)
- - description: bridge register (if no external reset controller)
minItems: 2
+ maxItems: 4
reg-names:
- items:
- - const: hub
- - const: core0
- - enum: [ bridge, gca ]
- - enum: [ bridge, gca ]
minItems: 2
+ maxItems: 4
interrupts:
items:
@@ -58,6 +50,76 @@ required:
- reg-names
- interrupts
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,2711-v3d
+ then:
+ properties:
+ reg:
+ items:
+ - description: hub register
+ - description: core0 register
+ reg-names:
+ items:
+ - const: hub
+ - const: core0
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,2712-v3d
+ then:
+ properties:
+ reg:
+ items:
+ - description: hub register
+ - description: core0 register
+ - description: SMS state manager register
+ reg-names:
+ items:
+ - const: hub
+ - const: core0
+ - const: sms
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,7268-v3d
+ then:
+ properties:
+ reg:
+ items:
+ - description: hub register
+ - description: core0 register
+ - description: GCA cache controller register
+ - description: bridge register
+ reg-names:
+ items:
+ - const: hub
+ - const: core0
+ - const: gca
+ - const: bridge
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,7278-v3d
+ then:
+ properties:
+ reg:
+ items:
+ - description: hub register
+ - description: core0 register
+ - description: bridge register
+ reg-names:
+ items:
+ - const: hub
+ - const: core0
+ - const: bridge
+
additionalProperties: false
examples:
@@ -66,9 +128,9 @@ examples:
compatible = "brcm,7268-v3d";
reg = <0xf1200000 0x4000>,
<0xf1208000 0x4000>,
- <0xf1204000 0x100>,
- <0xf1204100 0x100>;
- reg-names = "hub", "core0", "bridge", "gca";
+ <0xf1204100 0x100>,
+ <0xf1204000 0x100>;
+ reg-names = "hub", "core0", "gca", "bridge";
interrupts = <0 78 4>,
<0 77 4>;
};
diff --git a/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml b/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml
index 256e252f8087..4450e2e73b3c 100644
--- a/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml
+++ b/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml
@@ -12,10 +12,28 @@ maintainers:
properties:
compatible:
- items:
- - enum:
- - ti,am62-gpu
- - const: img,img-axe # IMG AXE GPU model/revision is fully discoverable
+ oneOf:
+ - items:
+ - enum:
+ - ti,am62-gpu
+ - const: img,img-axe-1-16m
+ # This deprecated element must be kept around to allow old kernels to
+ # work with newer dts.
+ - const: img,img-axe
+ - const: img,img-rogue
+ - items:
+ - enum:
+ - ti,j721s2-gpu
+ - const: img,img-bxs-4-64
+ - const: img,img-rogue
+
+ # This legacy combination of compatible strings was introduced early on
+ # before the more specific GPU identifiers were used.
+ - items:
+ - enum:
+ - ti,am62-gpu
+ - const: img,img-axe
+ deprecated: true
reg:
maxItems: 1
@@ -35,6 +53,18 @@ properties:
maxItems: 1
power-domains:
+ minItems: 1
+ maxItems: 2
+
+ power-domain-names:
+ items:
+ - const: a
+ - const: b
+ minItems: 1
+
+ dma-coherent: true
+
+ resets:
maxItems: 1
required:
@@ -47,11 +77,49 @@ required:
additionalProperties: false
allOf:
+ # Constraints added alongside the new compatible strings that would otherwise
+ # create an ABI break.
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: img,img-rogue
+ then:
+ required:
+ - power-domains
+ - power-domain-names
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: img,img-axe-1-16m
+ then:
+ properties:
+ power-domains:
+ maxItems: 1
+ power-domain-names:
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: img,img-bxs-4-64
+ then:
+ properties:
+ power-domains:
+ minItems: 2
+ power-domain-names:
+ minItems: 2
+
- if:
properties:
compatible:
contains:
- const: ti,am62-gpu
+ enum:
+ - ti,am62-gpu
+ - ti,j721s2-gpu
then:
properties:
clocks:
@@ -64,10 +132,12 @@ examples:
#include <dt-bindings/soc/ti,sci_pm_domain.h>
gpu@fd00000 {
- compatible = "ti,am62-gpu", "img,img-axe";
+ compatible = "ti,am62-gpu", "img,img-axe-1-16m", "img,img-axe",
+ "img,img-rogue";
reg = <0x0fd00000 0x20000>;
clocks = <&k3_clks 187 0>;
clock-names = "core";
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&k3_pds 187 TI_SCI_PD_EXCLUSIVE>;
+ power-domain-names = "a";
};
diff --git a/Documentation/devicetree/bindings/hwinfo/via,vt8500-scc-id.yaml b/Documentation/devicetree/bindings/hwinfo/via,vt8500-scc-id.yaml
new file mode 100644
index 000000000000..b0f425a4a882
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwinfo/via,vt8500-scc-id.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwinfo/via,vt8500-scc-id.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: VIA/WonderMedia SoC system configuration information
+
+maintainers:
+ - Alexey Charkov <alchark@gmail.com>
+
+description:
+ The system configuration controller on VIA/WonderMedia SoC's contains a chip
+ identifier and revision used to differentiate between different hardware
+ versions of on-chip IP blocks having their own peculiarities which may or
+ may not be captured by their respective DT compatible strings
+
+properties:
+ compatible:
+ items:
+ - const: via,vt8500-scc-id
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ chipid@d8120000 {
+ compatible = "via,vt8500-scc-id";
+ reg = <0xd8120000 0x4>;
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/pmbus/adi,lt3074.yaml b/Documentation/devicetree/bindings/hwmon/pmbus/adi,lt3074.yaml
new file mode 100644
index 000000000000..bf028a8718f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/pmbus/adi,lt3074.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/pmbus/adi,lt3074.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices LT3074 voltage regulator
+
+maintainers:
+ - Cedric Encarnacion <cedricjustine.encarnacion@analog.com>
+
+description: |
+ The LT3074 is a low voltage, ultra-low noise and ultra-fast transient
+ response linear regulator. It allows telemetry for input/output voltage,
+ output current and temperature through the PMBus serial interface.
+
+ Datasheet:
+ https://www.analog.com/en/products/lt3074.html
+
+allOf:
+ - $ref: /schemas/regulator/regulator.yaml#
+
+properties:
+ compatible:
+ enum:
+ - adi,lt3074
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ regulator@6d {
+ compatible = "adi,lt3074";
+ reg = <0x6d>;
+ regulator-name = "vout";
+ regulator-max-microvolt = <1250000>;
+ regulator-min-microvolt = <1150000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/pmbus/mps,mpq8785.yaml b/Documentation/devicetree/bindings/hwmon/pmbus/mps,mpq8785.yaml
new file mode 100644
index 000000000000..90970a0433e9
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/pmbus/mps,mpq8785.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/pmbus/mps,mpq8785.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Monolithic Power Systems Multiphase Voltage Regulators with PMBus
+
+maintainers:
+ - Charles Hsu <ythsu0511@gmail.com>
+
+description:
+ Monolithic Power Systems digital multiphase voltage regulators with PMBus.
+
+properties:
+ compatible:
+ enum:
+ - mps,mpm3695
+ - mps,mpm3695-25
+ - mps,mpm82504
+ - mps,mpq8785
+
+ reg:
+ maxItems: 1
+
+ mps,vout-fb-divider-ratio-permille:
+ description:
+ The feedback resistor divider ratio, expressed in permille
+ (Vfb / Vout * 1000). This value is written to the PMBUS_VOUT_SCALE_LOOP
+ register and is required for correct output voltage presentation.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 4095
+ default: 706
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ - mps,mpm3695
+ - mps,mpm82504
+ then:
+ properties:
+ mps,vout-fb-divider-ratio-permille:
+ maximum: 1023
+
+ - if:
+ properties:
+ compatible:
+ const: mps,mpq8785
+ then:
+ properties:
+ mps,vout-fb-divider-ratio-permille:
+ maximum: 2047
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@30 {
+ compatible = "mps,mpm82504";
+ reg = <0x30>;
+ mps,vout-fb-divider-ratio-permille = <600>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/sophgo,sg2042-hwmon-mcu.yaml b/Documentation/devicetree/bindings/hwmon/sophgo,sg2042-hwmon-mcu.yaml
index f0667ac41d75..b76805d39427 100644
--- a/Documentation/devicetree/bindings/hwmon/sophgo,sg2042-hwmon-mcu.yaml
+++ b/Documentation/devicetree/bindings/hwmon/sophgo,sg2042-hwmon-mcu.yaml
@@ -11,7 +11,11 @@ maintainers:
properties:
compatible:
- const: sophgo,sg2042-hwmon-mcu
+ oneOf:
+ - items:
+ - const: sophgo,sg2044-hwmon-mcu
+ - const: sophgo,sg2042-hwmon-mcu
+ - const: sophgo,sg2042-hwmon-mcu
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/hwmon/ti,amc6821.yaml b/Documentation/devicetree/bindings/hwmon/ti,amc6821.yaml
index 5d33f1a23d03..9ca7356760a7 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,amc6821.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,amc6821.yaml
@@ -28,6 +28,17 @@ properties:
i2c-mux:
type: object
+ fan:
+ $ref: fan-common.yaml#
+ unevaluatedProperties: false
+
+ "#pwm-cells":
+ const: 2
+ description: |
+ Number of cells in a PWM specifier.
+ - cell 0: PWM period in nanoseconds
+ - cell 1: PWM polarity: 0 or PWM_POLARITY_INVERTED
+
required:
- compatible
- reg
@@ -50,9 +61,14 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- fan@18 {
+ fan_controller: fan@18 {
compatible = "ti,amc6821";
reg = <0x18>;
+ #pwm-cells = <2>;
+
+ fan {
+ pwms = <&fan_controller 40000 0>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml b/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml
index bc03781342c0..d1fb7b9abda0 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml
@@ -19,6 +19,7 @@ description: |
properties:
compatible:
enum:
+ - silergy,sq52206
- silergy,sy24655
- ti,ina209
- ti,ina219
@@ -58,6 +59,9 @@ properties:
shunt voltage, and a value of 4 maps to ADCRANGE=0 such that a wider
voltage range is used.
+ For SQ52206,the shunt-gain value 1 mapps to ADCRANGE=10/11, the value 2
+ mapps to ADCRANGE=01, and the value 4 mapps to ADCRANGE=00.
+
The default value is device dependent, and is defined by the reset value
of PGA/ADCRANGE in the respective configuration registers.
$ref: /schemas/types.yaml#/definitions/uint32
@@ -97,6 +101,7 @@ allOf:
compatible:
contains:
enum:
+ - silergy,sq52206
- silergy,sy24655
- ti,ina209
- ti,ina219
diff --git a/Documentation/devicetree/bindings/hwmon/ti,tmp102.yaml b/Documentation/devicetree/bindings/hwmon/ti,tmp102.yaml
index 7e5b62a0215d..4c89448eba0d 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,tmp102.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,tmp102.yaml
@@ -23,6 +23,9 @@ properties:
"#thermal-sensor-cells":
const: 1
+ vcc-supply:
+ description: Power supply for tmp102
+
required:
- compatible
- reg
@@ -42,6 +45,7 @@ examples:
reg = <0x48>;
interrupt-parent = <&gpio7>;
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+ vcc-supply = <&supply>;
#thermal-sensor-cells = <1>;
};
};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml
index fda0467cdd95..23fe8ff76645 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml
@@ -52,6 +52,7 @@ properties:
- const: mediatek,mt8173-i2c
- items:
- enum:
+ - mediatek,mt6893-i2c
- mediatek,mt8195-i2c
- const: mediatek,mt8192-i2c
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
index 8101afa6f146..2f1e97969c3f 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
@@ -37,6 +37,7 @@ properties:
- rockchip,px30-i2c
- rockchip,rk3308-i2c
- rockchip,rk3328-i2c
+ - rockchip,rk3528-i2c
- rockchip,rk3562-i2c
- rockchip,rk3568-i2c
- rockchip,rk3576-i2c
diff --git a/Documentation/devicetree/bindings/i2c/i2c-wmt.txt b/Documentation/devicetree/bindings/i2c/i2c-wmt.txt
deleted file mode 100644
index 94a425eaa6c7..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-wmt.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-* Wondermedia I2C Controller
-
-Required properties :
-
- - compatible : should be "wm,wm8505-i2c"
- - reg : Offset and length of the register set for the device
- - interrupts : <IRQ> where IRQ is the interrupt number
- - clocks : phandle to the I2C clock source
-
-Optional properties :
-
- - clock-frequency : desired I2C bus clock frequency in Hz.
- Valid values are 100000 and 400000.
- Default to 100000 if not specified, or invalid value.
-
-Example :
-
- i2c_0: i2c@d8280000 {
- compatible = "wm,wm8505-i2c";
- reg = <0xd8280000 0x1000>;
- interrupts = <19>;
- clocks = <&clki2c0>;
- clock-frequency = <400000>;
- };
diff --git a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
index 1b7fed232642..cc39511a49d6 100644
--- a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
+++ b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
@@ -29,6 +29,7 @@ properties:
- enum:
- renesas,riic-r9a08g045 # RZ/G3S
- renesas,riic-r9a09g047 # RZ/G3E
+ - renesas,riic-r9a09g056 # RZ/V2N
- const: renesas,riic-r9a09g057 # RZ/V2H(P)
- const: renesas,riic-r9a09g057 # RZ/V2H(P)
diff --git a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
index bc5d0fb5abfe..d904191bb0c6 100644
--- a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
@@ -32,15 +32,13 @@ properties:
- const: renesas,r9a06g032-i2c # RZ/N1D
- const: renesas,rzn1-i2c # RZ/N1
- const: snps,designware-i2c
- - description: Microsemi Ocelot SoCs I2C controller
- items:
- - const: mscc,ocelot-i2c
- - const: snps,designware-i2c
- description: Baikal-T1 SoC System I2C controller
const: baikal,bt1-sys-i2c
- - description: T-HEAD TH1520 SoCs I2C controller
- items:
- - const: thead,th1520-i2c
+ - items:
+ - enum:
+ - mscc,ocelot-i2c
+ - sophgo,sg2044-i2c
+ - thead,th1520-i2c
- const: snps,designware-i2c
reg:
diff --git a/Documentation/devicetree/bindings/i2c/wm,wm8505-i2c.yaml b/Documentation/devicetree/bindings/i2c/wm,wm8505-i2c.yaml
new file mode 100644
index 000000000000..e498ce47b885
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/wm,wm8505-i2c.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/wm,wm8505-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: I2C Controller on WonderMedia WM8505 and related SoCs
+
+maintainers:
+ - Alexey Charkov <alchark@gmail.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ const: wm,wm8505-i2c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-frequency:
+ enum: [100000, 400000]
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c_0: i2c@d8280000 {
+ compatible = "wm,wm8505-i2c";
+ reg = <0xd8280000 0x1000>;
+ interrupts = <19>;
+ clocks = <&clki2c0>;
+ clock-frequency = <400000>;
+ };
diff --git a/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml b/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
index 4fbdcdac0aee..853092f7522d 100644
--- a/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
+++ b/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
@@ -9,14 +9,17 @@ title: Silvaco I3C master
maintainers:
- Conor Culhane <conor.culhane@silvaco.com>
-allOf:
- - $ref: i3c.yaml#
-
properties:
compatible:
- enum:
- - nuvoton,npcm845-i3c
- - silvaco,i3c-master-v1
+ oneOf:
+ - enum:
+ - nuvoton,npcm845-i3c
+ - silvaco,i3c-master-v1
+ - items:
+ - enum:
+ - nxp,imx94-i3c
+ - nxp,imx95-i3c
+ - const: silvaco,i3c-master-v1
reg:
maxItems: 1
@@ -25,12 +28,14 @@ properties:
maxItems: 1
clocks:
+ minItems: 2
items:
- description: system clock
- description: bus clock
- description: other (slower) events clock
clock-names:
+ minItems: 2
items:
- const: pclk
- const: fast_clk
@@ -46,6 +51,34 @@ required:
- clock-names
- clocks
+allOf:
+ - $ref: i3c.yaml#
+ - if:
+ properties:
+ compatible:
+ enum:
+ - nuvoton,npcm845-i3c
+ - silvaco,i3c-master-v1
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ clock-names:
+ minItems: 3
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nxp,imx94-i3c
+ - nxp,imx95-i3c
+ then:
+ properties:
+ clocks:
+ maxItems: 2
+ clock-names:
+ maxItems: 2
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-rradc.yaml b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-rradc.yaml
index f39bc92c2b99..862e450da214 100644
--- a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-rradc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-rradc.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm's SPMI PMIC Round Robin ADC
maintainers:
- - Caleb Connolly <caleb.connolly@linaro.org>
+ - Casey Connolly <casey.connolly@linaro.org>
description: |
The Qualcomm SPMI Round Robin ADC (RRADC) provides interface to clients to
diff --git a/Documentation/devicetree/bindings/input/dlg,da7280.txt b/Documentation/devicetree/bindings/input/dlg,da7280.txt
deleted file mode 100644
index 96ee5d50e111..000000000000
--- a/Documentation/devicetree/bindings/input/dlg,da7280.txt
+++ /dev/null
@@ -1,108 +0,0 @@
-Dialog Semiconductor DA7280 Haptics bindings
-
-Required properties:
-- compatible: Should be "dlg,da7280".
-- reg: Specifies the I2C slave address.
-
-- interrupt-parent : Specifies the phandle of the interrupt controller to
- which the IRQs from DA7280 are delivered to.
-
-- dlg,actuator-type: Set Actuator type. it should be one of:
- "LRA" - Linear Resonance Actuator type.
- "ERM-bar" - Bar type Eccentric Rotating Mass.
- "ERM-coin" - Coin type Eccentric Rotating Mass.
-
-- dlg,const-op-mode: Haptic operation mode for FF_CONSTANT.
- Possible values:
- 1 - Direct register override(DRO) mode triggered by i2c(default),
- 2 - PWM data source mode controlled by PWM duty,
-- dlg,periodic-op-mode: Haptic operation mode for FF_PERIODIC.
- Possible values:
- 1 - Register triggered waveform memory(RTWM) mode, the pattern
- assigned to the PS_SEQ_ID played as much times as PS_SEQ_LOOP,
- 2 - Edge triggered waveform memory(ETWM) mode, external GPI(N)
- control are required to enable/disable and it needs to keep
- device enabled by sending magnitude (X > 0),
- the pattern is assigned to the GPI(N)_SEQUENCE_ID below.
- The default value is 1 for both of the operation modes.
- For more details, please see the datasheet.
-
-- dlg,nom-microvolt: Nominal actuator voltage rating.
- Valid values: 0 - 6000000.
-- dlg,abs-max-microvolt: Absolute actuator maximum voltage rating.
- Valid values: 0 - 6000000.
-- dlg,imax-microamp: Actuator max current rating.
- Valid values: 0 - 252000.
- Default: 130000.
-- dlg,impd-micro-ohms: the impedance of the actuator in micro ohms.
- Valid values: 0 - 1500000000.
-
-Optional properties:
-- pwms : phandle to the physical PWM(Pulse Width Modulation) device.
- PWM properties should be named "pwms". And number of cell is different
- for each pwm device.
- (See Documentation/devicetree/bindings/pwm/pwm.txt
- for further information relating to pwm properties)
-
-- dlg,ps-seq-id: the PS_SEQ_ID(pattern ID in waveform memory inside chip)
- to play back when RTWM-MODE is enabled.
- Valid range: 0 - 15.
-- dlg,ps-seq-loop: the PS_SEQ_LOOP, Number of times the pre-stored sequence
- pointed to by PS_SEQ_ID or GPI(N)_SEQUENCE_ID is repeated.
- Valid range: 0 - 15.
-- dlg,gpiN-seq-id: the GPI(N)_SEQUENCE_ID, pattern to play
- when gpi0 is triggered, 'N' must be 0 - 2.
- Valid range: 0 - 15.
-- dlg,gpiN-mode: the pattern mode which can select either
- "Single-pattern" or "Multi-pattern", 'N' must be 0 - 2.
-- dlg,gpiN-polarity: gpiN polarity which can be chosen among
- "Rising-edge", "Falling-edge" and "Both-edge",
- 'N' must be 0 - 2
- Haptic will work by this edge option in case of ETWM mode.
-
-- dlg,resonant-freq-hz: use in case of LRA.
- the frequency range: 50 - 300.
- Default: 205.
-
-- dlg,bemf-sens-enable: Enable for internal loop computations.
-- dlg,freq-track-enable: Enable for resonant frequency tracking.
-- dlg,acc-enable: Enable for active acceleration.
-- dlg,rapid-stop-enable: Enable for rapid stop.
-- dlg,amp-pid-enable: Enable for the amplitude PID.
-- dlg,mem-array: Customized waveform memory(patterns) data downloaded to
- the device during initialization. This is an array of 100 values(u8).
-
-For further information, see device datasheet.
-
-======
-
-Example:
-
- haptics: da7280-haptics@4a {
- compatible = "dlg,da7280";
- reg = <0x4a>;
- interrupt-parent = <&gpio6>;
- interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
- dlg,actuator-type = "LRA";
- dlg,dlg,const-op-mode = <1>;
- dlg,dlg,periodic-op-mode = <1>;
- dlg,nom-microvolt = <2000000>;
- dlg,abs-max-microvolt = <2000000>;
- dlg,imax-microamp = <170000>;
- dlg,resonant-freq-hz = <180>;
- dlg,impd-micro-ohms = <10500000>;
- dlg,freq-track-enable;
- dlg,rapid-stop-enable;
- dlg,mem-array = <
- 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
- 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
- 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
- 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
- 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
- 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
- 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
- 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
- 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
- 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
- >;
- };
diff --git a/Documentation/devicetree/bindings/input/dlg,da7280.yaml b/Documentation/devicetree/bindings/input/dlg,da7280.yaml
new file mode 100644
index 000000000000..0d06755aaaa8
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/dlg,da7280.yaml
@@ -0,0 +1,248 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/dlg,da7280.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Dialog Semiconductor DA7280 Low Power High-Definition Haptic Driver
+
+maintainers:
+ - Roy Im <roy.im.opensource@diasemi.com>
+
+properties:
+ compatible:
+ const: dlg,da7280
+
+ reg:
+ maxItems: 1
+ description: I2C address of the device.
+
+ interrupts:
+ maxItems: 1
+
+ dlg,actuator-type:
+ enum:
+ - LRA # Linear Resonance Actuator type
+ - ERM-bar # Bar type Eccentric Rotating Mass
+ - ERM-coin # Coin type Eccentric Rotating Mass
+
+ dlg,const-op-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 1 # Direct register override (DRO) mode triggered by i2c (default)
+ - 2 # PWM data source mode controlled by PWM duty
+ description:
+ Haptic operation mode for FF_CONSTANT
+
+ dlg,periodic-op-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 1 # Register triggered waveform memory(RTWM) mode, the pattern
+ # assigned to the PS_SEQ_ID played as much times as PS_SEQ_LOOP
+ - 2 # Edge triggered waveform memory(ETWM) mode, external GPI(N)
+ # control are required to enable/disable and it needs to keep
+ # device enabled by sending magnitude (X > 0),
+ # the pattern is assigned to the GPI(N)_SEQUENCE_ID below
+ default: 1
+ description:
+ Haptic operation mode for FF_PERIODIC.
+ The default value is 1 for both of the operation modes.
+ For more details, please see the datasheet
+
+ dlg,nom-microvolt:
+ minimum: 0
+ maximum: 6000000
+ description:
+ Nominal actuator voltage rating
+
+ dlg,abs-max-microvolt:
+ minimum: 0
+ maximum: 6000000
+ description:
+ Absolute actuator maximum voltage rating
+
+ dlg,imax-microamp:
+ minimum: 0
+ maximum: 252000
+ default: 130000
+ description:
+ Actuator max current rating
+
+ dlg,impd-micro-ohms:
+ minimum: 0
+ maximum: 1500000000
+ description:
+ Impedance of the actuator
+
+ pwms:
+ maxItems: 1
+
+ dlg,ps-seq-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description:
+ The PS_SEQ_ID(pattern ID in waveform memory inside chip)
+ to play back when RTWM-MODE is enabled
+
+ dlg,ps-seq-loop:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description:
+ The PS_SEQ_LOOP, Number of times the pre-stored sequence pointed to by
+ PS_SEQ_ID or GPI(N)_SEQUENCE_ID is repeated
+
+ dlg,gpi0-seq-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description:
+ the GPI0_SEQUENCE_ID, pattern to play when gpi0 is triggered
+
+ dlg,gpi1-seq-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description:
+ the GPI1_SEQUENCE_ID, pattern to play when gpi1 is triggered
+
+ dlg,gpi2-seq-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description:
+ the GPI2_SEQUENCE_ID, pattern to play when gpi2 is triggered
+
+ dlg,gpi0-mode:
+ enum:
+ - Single-pattern
+ - Multi-pattern
+ description:
+ Pattern mode for gpi0
+
+ dlg,gpi1-mode:
+ enum:
+ - Single-pattern
+ - Multi-pattern
+ description:
+ Pattern mode for gpi1
+
+ dlg,gpi2-mode:
+ enum:
+ - Single-pattern
+ - Multi-pattern
+ description:
+ Pattern mode for gpi2
+
+ dlg,gpi0-polarity:
+ enum:
+ - Rising-edge
+ - Falling-edge
+ - Both-edge
+ description:
+ gpi0 polarity, Haptic will work by this edge option in case of ETWM mode
+
+ dlg,gpi1-polarity:
+ enum:
+ - Rising-edge
+ - Falling-edge
+ - Both-edge
+ description:
+ gpi1 polarity, Haptic will work by this edge option in case of ETWM mode
+
+ dlg,gpi2-polarity:
+ enum:
+ - Rising-edge
+ - Falling-edge
+ - Both-edge
+ description:
+ gpi2 polarity, Haptic will work by this edge option in case of ETWM mode
+
+ dlg,resonant-freq-hz:
+ minimum: 50
+ maximum: 300
+ default: 205
+
+ dlg,bemf-sens-enable:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Enable for internal loop computations
+
+ dlg,freq-track-enable:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Enable for resonant frequency tracking
+
+ dlg,acc-enable:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Enable for active acceleration
+
+ dlg,rapid-stop-enable:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Enable for rapid stop
+
+ dlg,amp-pid-enable:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Enable for the amplitude PID
+
+ dlg,mem-array:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 100
+ description:
+ Customized waveform memory (patterns) data downloaded to the device during initialization.
+ Each entry value must be included between 0 and 255.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - dlg,actuator-type
+ - dlg,const-op-mode
+ - dlg,periodic-op-mode
+ - dlg,nom-microvolt
+ - dlg,abs-max-microvolt
+ - dlg,imax-microamp
+ - dlg,impd-micro-ohms
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ haptics@4a {
+ compatible = "dlg,da7280";
+ reg = <0x4a>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ dlg,actuator-type = "LRA";
+ dlg,const-op-mode = <1>;
+ dlg,periodic-op-mode = <1>;
+ dlg,nom-microvolt = <2000000>;
+ dlg,abs-max-microvolt = <2000000>;
+ dlg,imax-microamp = <170000>;
+ dlg,resonant-freq-hz = <180>;
+ dlg,impd-micro-ohms = <10500000>;
+ dlg,freq-track-enable;
+ dlg,rapid-stop-enable;
+ dlg,mem-array = <0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
+ 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
+ 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
+ 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
+ 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
+ 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
+ 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
+ 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
+ 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
+ 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml b/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
index 517a4ac1bea3..e365413732e7 100644
--- a/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
+++ b/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Mediatek's Keypad Controller
maintainers:
- - Mattijs Korpershoek <mkorpershoek@baylibre.com>
+ - Mattijs Korpershoek <mkorpershoek@kernel.org>
allOf:
- $ref: /schemas/input/matrix-keymap.yaml#
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
index 70a922e213f2..ab821490284a 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
@@ -103,16 +103,9 @@ properties:
minimum: 0
maximum: 255
- touchscreen-size-x: true
- touchscreen-size-y: true
- touchscreen-fuzz-x: true
- touchscreen-fuzz-y: true
- touchscreen-inverted-x: true
- touchscreen-inverted-y: true
- touchscreen-swapped-x-y: true
interrupt-controller: true
-additionalProperties: false
+unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt b/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt
deleted file mode 100644
index 5a4dd263fc12..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-TB10x Top Level Interrupt Controller
-====================================
-
-The Abilis TB10x SOC contains a custom interrupt controller. It performs
-one-to-one mapping of external interrupt sources to CPU interrupts and
-provides support for reconfigurable trigger modes.
-
-Required properties
--------------------
-
-- compatible: Should be "abilis,tb10x-ictl"
-- reg: specifies physical base address and size of register range.
-- interrupt-congroller: Identifies the node as an interrupt controller.
-- #interrupt cells: Specifies the number of cells used to encode an interrupt
- source connected to this controller. The value shall be 2.
-- interrupts: Specifies the list of interrupt lines which are handled by
- the interrupt controller in the parent controller's notation. Interrupts
- are mapped one-to-one to parent interrupts.
-
-Example
--------
-
-intc: interrupt-controller { /* Parent interrupt controller */
- interrupt-controller;
- #interrupt-cells = <1>; /* For example below */
- /* ... */
-};
-
-tb10x_ictl: pic@2000 { /* TB10x interrupt controller */
- compatible = "abilis,tb10x-ictl";
- reg = <0x2000 0x20>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupt-parent = <&intc>;
- interrupts = <5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
- 20 21 22 23 24 25 26 27 28 29 30 31>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.yaml b/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.yaml
new file mode 100644
index 000000000000..cd2c49670e7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/abilis,tb10x-ictl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TB10x Top Level Interrupt Controller
+
+maintainers:
+ - Christian Ruppert <christian.ruppert@abilis.com>
+
+description:
+ The Abilis TB10x SOC contains a custom interrupt controller. It performs
+ one-to-one mapping of external interrupt sources to CPU interrupts and
+ provides support for reconfigurable trigger modes.
+
+properties:
+ compatible:
+ const: abilis,tb10x-ictl
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ interrupts:
+ description: A one-to-one mapping of external interrupt sources to parent
+ interrupts.
+ minItems: 1
+ maxItems: 32
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@2000 {
+ compatible = "abilis,tb10x-ictl";
+ reg = <0x2000 0x20>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <5>, <6>, <7>, <8>, <9>, <10>, <11>, <12>, <13>, <14>,
+ <15>, <16>, <17>, <18>, <19>, <20>, <21>, <22>, <23>,
+ <24>, <25>, <26>, <27>, <28>, <29>, <30>, <31>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt b/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt
deleted file mode 100644
index 5669764f9cc9..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Alpine MSIX controller
-
-See arm,gic-v3.txt for SPI and MSI definitions.
-
-Required properties:
-
-- compatible: should be "al,alpine-msix"
-- reg: physical base address and size of the registers
-- interrupt-controller: identifies the node as an interrupt controller
-- msi-controller: identifies the node as an PCI Message Signaled Interrupt
- controller
-- al,msi-base-spi: SPI base of the MSI frame
-- al,msi-num-spis: number of SPIs assigned to the MSI frame, relative to SPI0
-
-Example:
-
-msix: msix {
- compatible = "al,alpine-msix";
- reg = <0x0 0xfbe00000 0x0 0x100000>;
- interrupt-parent = <&gic>;
- interrupt-controller;
- msi-controller;
- al,msi-base-spi = <160>;
- al,msi-num-spis = <160>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.yaml b/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.yaml
new file mode 100644
index 000000000000..9f1ff8ec686f
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/al,alpine-msix.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Alpine MSIX controller
+
+maintainers:
+ - Antoine Tenart <atenart@kernel.org>
+
+properties:
+ compatible:
+ const: al,alpine-msix
+
+ reg:
+ maxItems: 1
+
+ interrupt-parent: true
+
+ msi-controller: true
+
+ al,msi-base-spi:
+ description: SPI base of the MSI frame
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ al,msi-num-spis:
+ description: number of SPIs assigned to the MSI frame, relative to SPI0
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+required:
+ - compatible
+ - reg
+ - msi-controller
+ - al,msi-base-spi
+ - al,msi-num-spis
+
+additionalProperties: false
+
+examples:
+ - |
+ msi-controller@fbe00000 {
+ compatible = "al,alpine-msix";
+ reg = <0xfbe00000 0x100000>;
+ interrupt-parent = <&gic>;
+ msi-controller;
+ al,msi-base-spi = <160>;
+ al,msi-num-spis = <160>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/altr,msi-controller.yaml b/Documentation/devicetree/bindings/interrupt-controller/altr,msi-controller.yaml
index 98814862d006..d046954b8a27 100644
--- a/Documentation/devicetree/bindings/pci/altr,msi-controller.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/altr,msi-controller.yaml
@@ -2,7 +2,7 @@
# Copyright (C) 2015, 2024, Intel Corporation
%YAML 1.2
---
-$id: http://devicetree.org/schemas/altr,msi-controller.yaml#
+$id: http://devicetree.org/schemas/interrupt-controller/altr,msi-controller.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Altera PCIe MSI controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt b/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt
deleted file mode 100644
index c676b03c752e..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Amazon's Annapurna Labs Fabric Interrupt Controller
-
-Required properties:
-
-- compatible: should be "amazon,al-fic"
-- reg: physical base address and size of the registers
-- interrupt-controller: identifies the node as an interrupt controller
-- #interrupt-cells : must be 2. Specifies the number of cells needed to encode
- an interrupt source. Supported trigger types are low-to-high edge
- triggered and active high level-sensitive.
-- interrupts: describes which input line in the interrupt parent, this
- fic's output is connected to. This field property depends on the parent's
- binding
-
-Please refer to interrupts.txt in this directory for details of the common
-Interrupt Controllers bindings used by client devices.
-
-Example:
-
-amazon_fic: interrupt-controller@fd8a8500 {
- compatible = "amazon,al-fic";
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x0 0xfd8a8500 0x0 0x1000>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 0x0 IRQ_TYPE_LEVEL_HIGH>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.yaml b/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.yaml
new file mode 100644
index 000000000000..26bc05dee0bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/amazon,al-fic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amazon Annapurna Labs Fabric Interrupt Controller
+
+maintainers:
+ - Talel Shenhar <talel@amazon.com>
+
+properties:
+ compatible:
+ const: amazon,al-fic
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ interrupt-controller@fd8a8500 {
+ compatible = "amazon,al-fic";
+ reg = <0xfd8a8500 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 0x0 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,nvic.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,nvic.txt
deleted file mode 100644
index 386ab37a383f..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,nvic.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-* ARM Nested Vector Interrupt Controller (NVIC)
-
-The NVIC provides an interrupt controller that is tightly coupled to
-Cortex-M based processor cores. The NVIC implemented on different SoCs
-vary in the number of interrupts and priority bits per interrupt.
-
-Main node required properties:
-
-- compatible : should be one of:
- "arm,v6m-nvic"
- "arm,v7m-nvic"
- "arm,v8m-nvic"
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. The type shall be a <u32> and the value shall be 2.
-
- The 1st cell contains the interrupt number for the interrupt type.
-
- The 2nd cell is the priority of the interrupt.
-
-- reg : Specifies base physical address(s) and size of the NVIC registers.
- This is at a fixed address (0xe000e100) and size (0xc00).
-
-- arm,num-irq-priority-bits: The number of priority bits implemented by the
- given SoC
-
-Example:
-
- intc: interrupt-controller@e000e100 {
- compatible = "arm,v7m-nvic";
- #interrupt-cells = <2>;
- #address-cells = <1>;
- interrupt-controller;
- reg = <0xe000e100 0xc00>;
- arm,num-irq-priority-bits = <4>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,nvic.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,nvic.yaml
new file mode 100644
index 000000000000..d89eca956c5f
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,nvic.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/arm,nvic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Nested Vector Interrupt Controller (NVIC)
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+description:
+ The NVIC provides an interrupt controller that is tightly coupled to Cortex-M
+ based processor cores. The NVIC implemented on different SoCs vary in the
+ number of interrupts and priority bits per interrupt.
+
+properties:
+ compatible:
+ enum:
+ - arm,v6m-nvic
+ - arm,v7m-nvic
+ - arm,v8m-nvic
+
+ reg:
+ maxItems: 1
+
+ '#address-cells':
+ const: 0
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+ description: |
+ Number of cells to encode an interrupt source:
+ first = interrupt number, second = priority.
+
+ arm,num-irq-priority-bits:
+ description: Number of priority bits implemented by the SoC
+ minimum: 1
+ maximum: 8
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+ - arm,num-irq-priority-bits
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@e000e100 {
+ compatible = "arm,v7m-nvic";
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0xe000e100 0xc00>;
+ arm,num-irq-priority-bits = <4>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt
deleted file mode 100644
index ea939f54c5eb..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-* ARM Versatile FPGA interrupt controller
-
-One or more FPGA IRQ controllers can be synthesized in an ARM reference board
-such as the Integrator or Versatile family. The output of these different
-controllers are OR:ed together and fed to the CPU tile's IRQ input. Each
-instance can handle up to 32 interrupts.
-
-Required properties:
-- compatible: "arm,versatile-fpga-irq"
-- interrupt-controller: Identifies the node as an interrupt controller
-- #interrupt-cells: The number of cells to define the interrupts. Must be 1
- as the FPGA IRQ controller has no configuration options for interrupt
- sources. The cell is a u32 and defines the interrupt number.
-- reg: The register bank for the FPGA interrupt controller.
-- clear-mask: a u32 number representing the mask written to clear all IRQs
- on the controller at boot for example.
-- valid-mask: a u32 number representing a bit mask determining which of
- the interrupts are valid. Unconnected/unused lines are set to 0, and
- the system till not make it possible for devices to request these
- interrupts.
-
-The "oxsemi,ox810se-rps-irq" compatible is deprecated.
-
-Example:
-
-pic: pic@14000000 {
- compatible = "arm,versatile-fpga-irq";
- #interrupt-cells = <1>;
- interrupt-controller;
- reg = <0x14000000 0x100>;
- clear-mask = <0xffffffff>;
- valid-mask = <0x003fffff>;
-};
-
-Optional properties:
-- interrupts: if the FPGA IRQ controller is cascaded, i.e. if its IRQ
- output is simply connected to the input of another IRQ controller,
- then the parent IRQ shall be specified in this property.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.yaml
new file mode 100644
index 000000000000..8d581b3aac3a
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/arm,versatile-fpga-irq.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Versatile FPGA IRQ Controller
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description:
+ One or more FPGA IRQ controllers can be synthesized in an ARM reference board
+ such as the Integrator or Versatile family. The output of these different
+ controllers are OR:ed together and fed to the CPU tile's IRQ input. Each
+ instance can handle up to 32 interrupts.
+
+properties:
+ compatible:
+ const: arm,versatile-fpga-irq
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+ clear-mask:
+ description: A mask written to clear all IRQs on the controller at boot.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ valid-mask:
+ description:
+ A bit mask determining which interrupts are valid; unused lines are set to 0.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ interrupts:
+ maxItems: 1
+
+additionalProperties: false
+
+required:
+ - compatible
+ - interrupt-controller
+ - '#interrupt-cells'
+ - reg
+ - clear-mask
+ - valid-mask
+
+examples:
+ - |
+ interrupt-controller@14000000 {
+ compatible = "arm,versatile-fpga-irq";
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ reg = <0x14000000 0x100>;
+ clear-mask = <0xffffffff>;
+ valid-mask = <0x003fffff>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.txt
deleted file mode 100644
index 033cc82e5684..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Device tree configuration for the I2C Interrupt Controller on the AST24XX and
-AST25XX SoCs.
-
-Required Properties:
-- #address-cells : should be 1
-- #size-cells : should be 1
-- #interrupt-cells : should be 1
-- compatible : should be "aspeed,ast2400-i2c-ic"
- or "aspeed,ast2500-i2c-ic"
-- reg : address start and range of controller
-- interrupts : interrupt number
-- interrupt-controller : denotes that the controller receives and fires
- new interrupts for child busses
-
-Example:
-
-i2c_ic: interrupt-controller@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- #interrupt-cells = <1>;
- compatible = "aspeed,ast2400-i2c-ic";
- reg = <0x0 0x40>;
- interrupts = <12>;
- interrupt-controller;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.yaml b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.yaml
new file mode 100644
index 000000000000..6cff6a7231bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/aspeed,ast2400-i2c-ic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Aspeed I2C Interrupt Controller (AST24XX/AST25XX)
+
+maintainers:
+ - Ryan Chen <ryan_chen@aspeedtech.com>
+
+properties:
+ compatible:
+ enum:
+ - aspeed,ast2400-i2c-ic
+ - aspeed,ast2500-i2c-ic
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#interrupt-cells'
+ - interrupts
+ - interrupt-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@0 {
+ compatible = "aspeed,ast2400-i2c-ic";
+ reg = <0x0 0x40>;
+ #interrupt-cells = <1>;
+ interrupts = <12>;
+ interrupt-controller;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2500-scu-ic.yaml b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2500-scu-ic.yaml
new file mode 100644
index 000000000000..d5287a2bf866
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2500-scu-ic.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2025 Eddie James
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/aspeed,ast2500-scu-ic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Aspeed AST25XX and AST26XX SCU Interrupt Controller
+
+maintainers:
+ - Eddie James <eajames@linux.ibm.com>
+
+properties:
+ compatible:
+ enum:
+ - aspeed,ast2500-scu-ic
+ - aspeed,ast2600-scu-ic0
+ - aspeed,ast2600-scu-ic1
+
+ reg:
+ maxItems: 1
+
+ '#interrupt-cells':
+ const: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+required:
+ - compatible
+ - reg
+ - '#interrupt-cells'
+ - interrupts
+ - interrupt-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@18 {
+ compatible = "aspeed,ast2500-scu-ic";
+ reg = <0x18 0x4>;
+ #interrupt-cells = <1>;
+ interrupts = <21>;
+ interrupt-controller;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2xxx-scu-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2xxx-scu-ic.txt
deleted file mode 100644
index 251ed44171db..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2xxx-scu-ic.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Aspeed AST25XX and AST26XX SCU Interrupt Controller
-
-Required Properties:
- - #interrupt-cells : must be 1
- - compatible : must be "aspeed,ast2500-scu-ic",
- "aspeed,ast2600-scu-ic0" or
- "aspeed,ast2600-scu-ic1"
- - interrupts : interrupt from the parent controller
- - interrupt-controller : indicates that the controller receives and
- fires new interrupts for child busses
-
-Example:
-
- syscon@1e6e2000 {
- ranges = <0 0x1e6e2000 0x1a8>;
-
- scu_ic: interrupt-controller@18 {
- #interrupt-cells = <1>;
- compatible = "aspeed,ast2500-scu-ic";
- interrupts = <21>;
- interrupt-controller;
- };
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
deleted file mode 100644
index bdd173056f72..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+++ /dev/null
@@ -1,131 +0,0 @@
-BCM2835 Top-Level ("ARMCTRL") Interrupt Controller
-
-The BCM2835 contains a custom top-level interrupt controller, which supports
-72 interrupt sources using a 2-level register scheme. The interrupt
-controller, or the HW block containing it, is referred to occasionally
-as "armctrl" in the SoC documentation, hence naming of this binding.
-
-The BCM2836 contains the same interrupt controller with the same
-interrupts, but the per-CPU interrupt controller is the root, and an
-interrupt there indicates that the ARMCTRL has an interrupt to handle.
-
-Required properties:
-
-- compatible : should be "brcm,bcm2835-armctrl-ic" or
- "brcm,bcm2836-armctrl-ic"
-- reg : Specifies base physical address and size of the registers.
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. The value shall be 2.
-
- The 1st cell is the interrupt bank; 0 for interrupts in the "IRQ basic
- pending" register, or 1/2 respectively for interrupts in the "IRQ pending
- 1/2" register.
-
- The 2nd cell contains the interrupt number within the bank. Valid values
- are 0..7 for bank 0, and 0..31 for bank 1.
-
-Additional required properties for brcm,bcm2836-armctrl-ic:
-- interrupts : Specifies the interrupt on the parent for this interrupt
- controller to handle.
-
-The interrupt sources are as follows:
-
-Bank 0:
-0: ARM_TIMER
-1: ARM_MAILBOX
-2: ARM_DOORBELL_0
-3: ARM_DOORBELL_1
-4: VPU0_HALTED
-5: VPU1_HALTED
-6: ILLEGAL_TYPE0
-7: ILLEGAL_TYPE1
-
-Bank 1:
-0: TIMER0
-1: TIMER1
-2: TIMER2
-3: TIMER3
-4: CODEC0
-5: CODEC1
-6: CODEC2
-7: VC_JPEG
-8: ISP
-9: VC_USB
-10: VC_3D
-11: TRANSPOSER
-12: MULTICORESYNC0
-13: MULTICORESYNC1
-14: MULTICORESYNC2
-15: MULTICORESYNC3
-16: DMA0
-17: DMA1
-18: VC_DMA2
-19: VC_DMA3
-20: DMA4
-21: DMA5
-22: DMA6
-23: DMA7
-24: DMA8
-25: DMA9
-26: DMA10
-27: DMA11-14 - shared interrupt for DMA 11 to 14
-28: DMAALL - triggers on all dma interrupts (including channel 15)
-29: AUX
-30: ARM
-31: VPUDMA
-
-Bank 2:
-0: HOSTPORT
-1: VIDEOSCALER
-2: CCP2TX
-3: SDC
-4: DSI0
-5: AVE
-6: CAM0
-7: CAM1
-8: HDMI0
-9: HDMI1
-10: PIXELVALVE1
-11: I2CSPISLV
-12: DSI1
-13: PWA0
-14: PWA1
-15: CPR
-16: SMI
-17: GPIO0
-18: GPIO1
-19: GPIO2
-20: GPIO3
-21: VC_I2C
-22: VC_SPI
-23: VC_I2SPCM
-24: VC_SDIO
-25: VC_UART
-26: SLIMBUS
-27: VEC
-28: CPG
-29: RNG
-30: VC_ARASANSDIO
-31: AVSPMON
-
-Example:
-
-/* BCM2835, first level */
-intc: interrupt-controller {
- compatible = "brcm,bcm2835-armctrl-ic";
- reg = <0x7e00b200 0x200>;
- interrupt-controller;
- #interrupt-cells = <2>;
-};
-
-/* BCM2836, second level */
-intc: interrupt-controller {
- compatible = "brcm,bcm2836-armctrl-ic";
- reg = <0x7e00b200 0x200>;
- interrupt-controller;
- #interrupt-cells = <2>;
-
- interrupt-parent = <&local_intc>;
- interrupts = <8>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.yaml b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.yaml
new file mode 100644
index 000000000000..625eb22bedf0
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.yaml
@@ -0,0 +1,162 @@
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/brcm,bcm2835-armctrl-ic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BCM2835 ARMCTRL Interrupt Controller
+
+maintainers:
+ - Florian Fainelli <florian.fainelli@broadcom.com>
+ - Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com>
+
+description: >
+ The BCM2835 contains a custom top-level interrupt controller, which supports
+ 72 interrupt sources using a 2-level register scheme. The interrupt
+ controller, or the HW block containing it, is referred to occasionally as
+ "armctrl" in the SoC documentation, hence naming of this binding.
+
+ The BCM2836 contains the same interrupt controller with the same interrupts,
+ but the per-CPU interrupt controller is the root, and an interrupt there
+ indicates that the ARMCTRL has an interrupt to handle.
+
+ The interrupt sources are as follows:
+
+ Bank 0:
+ 0: ARM_TIMER
+ 1: ARM_MAILBOX
+ 2: ARM_DOORBELL_0
+ 3: ARM_DOORBELL_1
+ 4: VPU0_HALTED
+ 5: VPU1_HALTED
+ 6: ILLEGAL_TYPE0
+ 7: ILLEGAL_TYPE1
+
+ Bank 1:
+ 0: TIMER0
+ 1: TIMER1
+ 2: TIMER2
+ 3: TIMER3
+ 4: CODEC0
+ 5: CODEC1
+ 6: CODEC2
+ 7: VC_JPEG
+ 8: ISP
+ 9: VC_USB
+ 10: VC_3D
+ 11: TRANSPOSER
+ 12: MULTICORESYNC0
+ 13: MULTICORESYNC1
+ 14: MULTICORESYNC2
+ 15: MULTICORESYNC3
+ 16: DMA0
+ 17: DMA1
+ 18: VC_DMA2
+ 19: VC_DMA3
+ 20: DMA4
+ 21: DMA5
+ 22: DMA6
+ 23: DMA7
+ 24: DMA8
+ 25: DMA9
+ 26: DMA10
+ 27: DMA11-14 - shared interrupt for DMA 11 to 14
+ 28: DMAALL - triggers on all dma interrupts (including channel 15)
+ 29: AUX
+ 30: ARM
+ 31: VPUDMA
+
+ Bank 2:
+ 0: HOSTPORT
+ 1: VIDEOSCALER
+ 2: CCP2TX
+ 3: SDC
+ 4: DSI0
+ 5: AVE
+ 6: CAM0
+ 7: CAM1
+ 8: HDMI0
+ 9: HDMI1
+ 10: PIXELVALVE1
+ 11: I2CSPISLV
+ 12: DSI1
+ 13: PWA0
+ 14: PWA1
+ 15: CPR
+ 16: SMI
+ 17: GPIO0
+ 18: GPIO1
+ 19: GPIO2
+ 20: GPIO3
+ 21: VC_I2C
+ 22: VC_SPI
+ 23: VC_I2SPCM
+ 24: VC_SDIO
+ 25: VC_UART
+ 26: SLIMBUS
+ 27: VEC
+ 28: CPG
+ 29: RNG
+ 30: VC_ARASANSDIO
+ 31: AVSPMON
+
+properties:
+ compatible:
+ enum:
+ - brcm,bcm2835-armctrl-ic
+ - brcm,bcm2836-armctrl-ic
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+ description: >
+ The 1st cell is the interrupt bank; 0 for interrupts in the "IRQ basic
+ pending" register, or 1/2 respectively for interrupts in the "IRQ pending
+ 1/2" register.
+
+ The 2nd cell contains the interrupt number within the bank. Valid values
+ are 0..7 for bank 0, and 0..31 for bank 1.
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,bcm2836-armctrl-ic
+ then:
+ required:
+ - interrupts
+ else:
+ properties:
+ interrupts: false
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@7e00b200 {
+ compatible = "brcm,bcm2835-armctrl-ic";
+ reg = <0x7e00b200 0x200>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ - |
+ interrupt-controller@7e00b200 {
+ compatible = "brcm,bcm2836-armctrl-ic";
+ reg = <0x7e00b200 0x200>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <8>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt
deleted file mode 100644
index 2bc19b1ac877..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-Broadcom BCM6345-style Level 1 interrupt controller
-
-This block is a first level interrupt controller that is typically connected
-directly to one of the HW INT lines on each CPU.
-
-Key elements of the hardware design include:
-
-- 32, 64 or 128 incoming level IRQ lines
-
-- Most onchip peripherals are wired directly to an L1 input
-
-- A separate instance of the register set for each CPU, allowing individual
- peripheral IRQs to be routed to any CPU
-
-- Contains one or more enable/status word pairs per CPU
-
-- No atomic set/clear operations
-
-- No polarity/level/edge settings
-
-- No FIFO or priority encoder logic; software is expected to read all
- 2-4 status words to determine which IRQs are pending
-
-Required properties:
-
-- compatible: should be "brcm,bcm<soc>-l1-intc", "brcm,bcm6345-l1-intc"
-- reg: specifies the base physical address and size of the registers;
- the number of supported IRQs is inferred from the size argument
-- interrupt-controller: identifies the node as an interrupt controller
-- #interrupt-cells: specifies the number of cells needed to encode an interrupt
- source, should be 1.
-- interrupts: specifies the interrupt line(s) in the interrupt-parent controller
- node; valid values depend on the type of parent interrupt controller
-
-If multiple reg ranges and interrupt-parent entries are present on an SMP
-system, the driver will allow IRQ SMP affinity to be set up through the
-/proc/irq/ interface. In the simplest possible configuration, only one
-reg range and one interrupt-parent is needed.
-
-The driver operates in native CPU endian by default, there is no support for
-specifying an alternative endianness.
-
-Example:
-
-periph_intc: interrupt-controller@10000000 {
- compatible = "brcm,bcm63168-l1-intc", "brcm,bcm6345-l1-intc";
- reg = <0x10000020 0x20>,
- <0x10000040 0x20>;
-
- interrupt-controller;
- #interrupt-cells = <1>;
-
- interrupt-parent = <&cpu_intc>;
- interrupts = <2>, <3>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.yaml
new file mode 100644
index 000000000000..ca6a2ff43acd
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/brcm,bcm6345-l1-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM6345-style Level 1 interrupt controller
+
+maintainers:
+ - Simon Arlott <simon@octiron.net>
+
+description: >
+ This block is a first level interrupt controller that is typically connected
+ directly to one of the HW INT lines on each CPU.
+
+ Key elements of the hardware design include:
+
+ - 32, 64 or 128 incoming level IRQ lines
+
+ - Most onchip peripherals are wired directly to an L1 input
+
+ - A separate instance of the register set for each CPU, allowing individual
+ peripheral IRQs to be routed to any CPU
+
+ - Contains one or more enable/status word pairs per CPU
+
+ - No atomic set/clear operations
+
+ - No polarity/level/edge settings
+
+ - No FIFO or priority encoder logic; software is expected to read all
+ 2-4 status words to determine which IRQs are pending
+
+ If multiple reg ranges and interrupt-parent entries are present on an SMP
+ system, the driver will allow IRQ SMP affinity to be set up through the
+ /proc/irq/ interface. In the simplest possible configuration, only one
+ reg range and one interrupt-parent is needed.
+
+ The driver operates in native CPU endian by default, there is no support for
+ specifying an alternative endianness.
+
+properties:
+ compatible:
+ const: brcm,bcm6345-l1-intc
+
+ reg:
+ description: One entry per CPU core
+ minItems: 1
+ maxItems: 2
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 1
+
+ interrupts:
+ description: One entry per CPU core
+ minItems: 1
+ maxItems: 2
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@10000000 {
+ compatible = "brcm,bcm6345-l1-intc";
+ reg = <0x10000020 0x20>,
+ <0x10000040 0x20>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupts = <2>, <3>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-mx.txt b/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-mx.txt
deleted file mode 100644
index d4de980e55fa..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-mx.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-* Xtensa Interrupt Distributor and Programmable Interrupt Controller (MX)
-
-Required properties:
-- compatible: Should be "cdns,xtensa-mx".
-
-Remaining properties have exact same meaning as in Xtensa PIC
-(see cdns,xtensa-pic.txt).
-
-Examples:
- pic: pic {
- compatible = "cdns,xtensa-mx";
- /* one cell: internal irq number,
- * two cells: second cell == 0: internal irq number
- * second cell == 1: external irq number
- */
- #interrupt-cells = <2>;
- interrupt-controller;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-pic.txt b/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-pic.txt
deleted file mode 100644
index 026ef4cfc1d5..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-pic.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-* Xtensa built-in Programmable Interrupt Controller (PIC)
-
-Required properties:
-- compatible: Should be "cdns,xtensa-pic".
-- interrupt-controller: Identifies the node as an interrupt controller.
-- #interrupt-cells: The number of cells to define the interrupts.
- It may be either 1 or 2.
- When it's 1, the first cell is the internal IRQ number.
- When it's 2, the first cell is the IRQ number, and the second cell
- specifies whether it's internal (0) or external (1).
- Periferals are usually connected to a fixed external IRQ, but for different
- core variants it may be mapped to different internal IRQ.
- IRQ sensitivity and priority are fixed for each core variant and may not be
- changed at runtime.
-
-Examples:
- pic: pic {
- compatible = "cdns,xtensa-pic";
- /* one cell: internal irq number,
- * two cells: second cell == 0: internal irq number
- * second cell == 1: external irq number
- */
- #interrupt-cells = <2>;
- interrupt-controller;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-pic.yaml b/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-pic.yaml
new file mode 100644
index 000000000000..6773207fee01
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-pic.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2025 Max Filippov <jcmvbkbc@gmail.com>
+
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/cdns,xtensa-pic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xtensa Interrupt Controllers
+
+maintainers:
+ - Max Filippov <jcmvbkbc@gmail.com>
+
+description:
+ Xtensa Interrupt Distributor and Programmable Interrupt Controller (MX) and
+ Xtensa built-in Programmable Interrupt Controller (PIC)
+
+properties:
+ compatible:
+ enum:
+ - cdns,xtensa-mx
+ - cdns,xtensa-pic
+
+ '#interrupt-cells':
+ enum: [ 1, 2 ]
+ description:
+ Number of cells to define the interrupts. When 1, the first cell is the
+ internal IRQ number; when 2, the second cell specifies internal (0) or
+ external (1).
+
+ interrupt-controller: true
+
+required:
+ - compatible
+ - '#interrupt-cells'
+ - interrupt-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller {
+ compatible = "cdns,xtensa-pic";
+ /* one cell: internal irq number,
+ * two cells: second cell == 0: internal irq number
+ * second cell == 1: external irq number
+ */
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/chrp,open-pic.yaml b/Documentation/devicetree/bindings/interrupt-controller/chrp,open-pic.yaml
new file mode 100644
index 000000000000..f0d9bbd7d510
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/chrp,open-pic.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/chrp,open-pic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Open PIC Interrupt Controller
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+description:
+ This binding specifies what properties must be available in the device tree
+ representation of an Open PIC compliant interrupt controller. This binding is
+ based on the binding defined for Open PIC in [1] and is a superset of that
+ binding.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: fsl,mpic
+ - const: chrp,open-pic
+ - const: chrp,open-pic
+
+ device_type:
+ const: open-pci
+ deprecated: true
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#address-cells':
+ const: 0
+
+ '#interrupt-cells':
+ const: 2
+
+ pic-no-reset:
+ description: Indicates the PIC shall not be reset during runtime initialization.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#address-cells'
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@40000 {
+ compatible = "chrp,open-pic";
+ reg = <0x40000 0x40000>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ pic-no-reset;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/cirrus,clps711x-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/cirrus,clps711x-intc.txt
deleted file mode 100644
index 969b4582ec60..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/cirrus,clps711x-intc.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-Cirrus Logic CLPS711X Interrupt Controller
-
-Required properties:
-
-- compatible: Should be "cirrus,ep7209-intc".
-- reg: Specifies base physical address of the registers set.
-- interrupt-controller: Identifies the node as an interrupt controller.
-- #interrupt-cells: Specifies the number of cells needed to encode an
- interrupt source. The value shall be 1.
-
-The interrupt sources are as follows:
-ID Name Description
----------------------------
-1: BLINT Battery low (FIQ)
-3: MCINT Media changed (FIQ)
-4: CSINT CODEC sound
-5: EINT1 External 1
-6: EINT2 External 2
-7: EINT3 External 3
-8: TC1OI TC1 under flow
-9: TC2OI TC2 under flow
-10: RTCMI RTC compare match
-11: TINT 64Hz tick
-12: UTXINT1 UART1 transmit FIFO half empty
-13: URXINT1 UART1 receive FIFO half full
-14: UMSINT UART1 modem status changed
-15: SSEOTI SSI1 end of transfer
-16: KBDINT Keyboard
-17: SS2RX SSI2 receive FIFO half or greater full
-18: SS2TX SSI2 transmit FIFO less than half empty
-28: UTXINT2 UART2 transmit FIFO half empty
-29: URXINT2 UART2 receive FIFO half full
-32: DAIINT DAI interface (FIQ)
-
-Example:
- intc: interrupt-controller {
- compatible = "cirrus,ep7312-intc", "cirrus,ep7209-intc";
- reg = <0x80000000 0x4000>;
- interrupt-controller;
- #interrupt-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/cirrus,ep7209-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/cirrus,ep7209-intc.yaml
new file mode 100644
index 000000000000..d3cc49d29e10
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/cirrus,ep7209-intc.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/cirrus,ep7209-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic CLPS711X Interrupt Controller
+
+maintainers:
+ - Alexander Shiyan <shc_work@mail.ru>
+
+description: >
+ Cirrus Logic CLPS711X Interrupt Controller
+
+ The interrupt sources are as follows:
+ ID Name Description
+ ---------------------------
+ 1: BLINT Battery low (FIQ)
+ 3: MCINT Media changed (FIQ)
+ 4: CSINT CODEC sound
+ 5: EINT1 External 1
+ 6: EINT2 External 2
+ 7: EINT3 External 3
+ 8: TC1OI TC1 under flow
+ 9: TC2OI TC2 under flow
+ 10: RTCMI RTC compare match
+ 11: TINT 64Hz tick
+ 12: UTXINT1 UART1 transmit FIFO half empty
+ 13: URXINT1 UART1 receive FIFO half full
+ 14: UMSINT UART1 modem status changed
+ 15: SSEOTI SSI1 end of transfer
+ 16: KBDINT Keyboard
+ 17: SS2RX SSI2 receive FIFO half or greater full
+ 18: SS2TX SSI2 transmit FIFO less than half empty
+ 28: UTXINT2 UART2 transmit FIFO half empty
+ 29: URXINT2 UART2 receive FIFO half full
+ 32: DAIINT DAI interface (FIQ)
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: cirrus,ep7312-intc
+ - const: cirrus,ep7209-intc
+ - items:
+ - const: cirrus,ep7209-intc
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@80000000 {
+ compatible = "cirrus,ep7312-intc", "cirrus,ep7209-intc";
+ reg = <0x80000000 0x4000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/cnxt,cx92755-ic.yaml b/Documentation/devicetree/bindings/interrupt-controller/cnxt,cx92755-ic.yaml
new file mode 100644
index 000000000000..3f016cf47812
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/cnxt,cx92755-ic.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/cnxt,cx92755-ic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Conexant Digicolor Interrupt Controller
+
+maintainers:
+ - Baruch Siach <baruch@tkos.co.il>
+
+description: Conexant Digicolor Interrupt Controller
+
+properties:
+ compatible:
+ const: cnxt,cx92755-ic
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+ syscon:
+ description: A phandle to the syscon node describing UC registers
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+ - syscon
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@f0000040 {
+ compatible = "cnxt,cx92755-ic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0xf0000040 0x40>;
+ syscon = <&uc_regs>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/csky,apb-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/csky,apb-intc.txt
deleted file mode 100644
index 44286dcbac62..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/csky,apb-intc.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-==============================
-C-SKY APB Interrupt Controller
-==============================
-
-C-SKY APB Interrupt Controller is a simple soc interrupt controller
-on the apb bus and we only use it as root irq controller.
-
- - csky,apb-intc is used in a lot of csky fpgas and socs, it support 64 irq nums.
- - csky,dual-apb-intc consists of 2 apb-intc and 128 irq nums supported.
- - csky,gx6605s-intc is gx6605s soc internal irq interrupt controller, 64 irq nums.
-
-=============================
-intc node bindings definition
-=============================
-
- Description: Describes APB interrupt controller
-
- PROPERTIES
-
- - compatible
- Usage: required
- Value type: <string>
- Definition: must be "csky,apb-intc"
- "csky,dual-apb-intc"
- "csky,gx6605s-intc"
- - #interrupt-cells
- Usage: required
- Value type: <u32>
- Definition: must be <1>
- - reg
- Usage: required
- Value type: <u32 u32>
- Definition: <phyaddr size> in soc from cpu view
- - interrupt-controller:
- Usage: required
- - csky,support-pulse-signal:
- Usage: select
- Description: to support pulse signal flag
-
-Examples:
----------
-
- intc: interrupt-controller@500000 {
- compatible = "csky,apb-intc";
- #interrupt-cells = <1>;
- reg = <0x00500000 0x400>;
- interrupt-controller;
- };
-
- intc: interrupt-controller@500000 {
- compatible = "csky,dual-apb-intc";
- #interrupt-cells = <1>;
- reg = <0x00500000 0x400>;
- interrupt-controller;
- };
-
- intc: interrupt-controller@500000 {
- compatible = "csky,gx6605s-intc";
- #interrupt-cells = <1>;
- reg = <0x00500000 0x400>;
- interrupt-controller;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/csky,apb-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/csky,apb-intc.yaml
new file mode 100644
index 000000000000..902648ead975
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/csky,apb-intc.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/csky,apb-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: C-SKY APB Interrupt Controller
+
+maintainers:
+ - Guo Ren <guoren@kernel.org>
+
+description: >
+ C-SKY APB Interrupt Controller is a simple soc interrupt controller on the apb
+ bus and we only use it as root irq controller.
+
+ - csky,apb-intc is used in a lot of csky fpgas and socs, it support 64 irq nums.
+ - csky,dual-apb-intc consists of 2 apb-intc and 128 irq nums supported.
+ - csky,gx6605s-intc is gx6605s soc internal irq interrupt controller, 64 irq nums.
+
+properties:
+ compatible:
+ enum:
+ - csky,apb-intc
+ - csky,dual-apb-intc
+ - csky,gx6605s-intc
+
+ reg:
+ maxItems: 1
+
+ '#interrupt-cells':
+ const: 1
+
+ interrupt-controller: true
+
+ csky,support-pulse-signal:
+ type: boolean
+ description: Support for pulse signal flag.
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - '#interrupt-cells'
+ - interrupt-controller
+
+examples:
+ - |
+ intc: interrupt-controller@500000 {
+ compatible = "csky,apb-intc";
+ #interrupt-cells = <1>;
+ reg = <0x00500000 0x400>;
+ interrupt-controller;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/csky,mpintc.txt b/Documentation/devicetree/bindings/interrupt-controller/csky,mpintc.txt
deleted file mode 100644
index e6bbcae4d07f..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/csky,mpintc.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-===========================================
-C-SKY Multi-processors Interrupt Controller
-===========================================
-
-C-SKY Multi-processors Interrupt Controller is designed for ck807/ck810/ck860
-SMP soc, and it also could be used in non-SMP system.
-
-Interrupt number definition:
- 0-15 : software irq, and we use 15 as our IPI_IRQ.
- 16-31 : private irq, and we use 16 as the co-processor timer.
- 31-1024: common irq for soc ip.
-
-Interrupt trigger mode: (Defined in dt-bindings/interrupt-controller/irq.h)
- IRQ_TYPE_LEVEL_HIGH (default)
- IRQ_TYPE_LEVEL_LOW
- IRQ_TYPE_EDGE_RISING
- IRQ_TYPE_EDGE_FALLING
-
-=============================
-intc node bindings definition
-=============================
-
- Description: Describes SMP interrupt controller
-
- PROPERTIES
-
- - compatible
- Usage: required
- Value type: <string>
- Definition: must be "csky,mpintc"
- - #interrupt-cells
- Usage: required
- Value type: <u32>
- Definition: <2>
- - interrupt-controller:
- Usage: required
-
-Examples: ("interrupts = <irq_num IRQ_TYPE_XXX>")
----------
-#include <dt-bindings/interrupt-controller/irq.h>
-
- intc: interrupt-controller {
- compatible = "csky,mpintc";
- #interrupt-cells = <2>;
- interrupt-controller;
- };
-
- device: device-example {
- ...
- interrupts = <34 IRQ_TYPE_EDGE_RISING>;
- interrupt-parent = <&intc>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/csky,mpintc.yaml b/Documentation/devicetree/bindings/interrupt-controller/csky,mpintc.yaml
new file mode 100644
index 000000000000..3df7739e31c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/csky,mpintc.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/csky,mpintc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: C-SKY Multi-processors Interrupt Controller
+
+maintainers:
+ - Guo Ren <guoren@kernel.org>
+
+description: >
+ C-SKY Multi-processors Interrupt Controller is designed for ck807/ck810/ck860
+ SMP soc, and it also could be used in non-SMP system.
+
+ Interrupt number definition:
+ 0-15 : software irq, and we use 15 as our IPI_IRQ.
+ 16-31 : private irq, and we use 16 as the co-processor timer.
+ 31-1024: common irq for soc ip.
+
+properties:
+ compatible:
+ const: csky,mpintc
+
+ '#interrupt-cells':
+ const: 2
+
+ interrupt-controller: true
+
+required:
+ - compatible
+ - "#interrupt-cells"
+ - interrupt-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller {
+ compatible = "csky,mpintc";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/digicolor-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/digicolor-ic.txt
deleted file mode 100644
index 42d41ec84c7b..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/digicolor-ic.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Conexant Digicolor Interrupt Controller
-
-Required properties:
-
-- compatible : should be "cnxt,cx92755-ic"
-- reg : Specifies base physical address and size of the interrupt controller
- registers (IC) area
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. The value shall be 1.
-- syscon: A phandle to the syscon node describing UC registers
-
-Example:
-
- intc: interrupt-controller@f0000040 {
- compatible = "cnxt,cx92755-ic";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0xf0000040 0x40>;
- syscon = <&uc_regs>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/econet,en751221-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/econet,en751221-intc.yaml
new file mode 100644
index 000000000000..5536319c49c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/econet,en751221-intc.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/econet,en751221-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: EcoNet EN751221 Interrupt Controller
+
+maintainers:
+ - Caleb James DeLisle <cjd@cjdns.fr>
+
+description:
+ The EcoNet EN751221 Interrupt Controller is a simple interrupt controller
+ designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can
+ be routed to either VPE but not both, so to support per-CPU interrupts, a
+ secondary IRQ number is allocated to control masking/unmasking on VPE#1. For
+ lack of a better term we call these "shadow interrupts". The assignment of
+ shadow interrupts is defined by the SoC integrator when wiring the interrupt
+ lines, so they are configurable in the device tree.
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ const: econet,en751221-intc
+
+ reg:
+ maxItems: 1
+
+ "#interrupt-cells":
+ const: 1
+
+ interrupt-controller: true
+
+ interrupts:
+ maxItems: 1
+ description: Interrupt line connecting this controller to its parent.
+
+ econet,shadow-interrupts:
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ description:
+ An array of interrupt number pairs where each pair represents a shadow
+ interrupt relationship. The first number in each pair is the primary IRQ,
+ and the second is its shadow IRQ used for VPE#1 control. For example,
+ <8 3> means IRQ 8 is shadowed by IRQ 3, so IRQ 3 cannot be mapped, but
+ when VPE#1 requests IRQ 8, it will manipulate the IRQ 3 mask bit.
+ minItems: 1
+ maxItems: 20
+ items:
+ items:
+ - description: primary per-CPU IRQ
+ - description: shadow IRQ number
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - "#interrupt-cells"
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@1fb40000 {
+ compatible = "econet,en751221-intc";
+ reg = <0x1fb40000 0x100>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+
+ econet,shadow-interrupts = <7 2>, <8 3>, <13 12>, <30 29>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ezchip,nps400-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/ezchip,nps400-ic.txt
deleted file mode 100644
index 888b2b9f7064..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/ezchip,nps400-ic.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-EZchip NPS Interrupt Controller
-
-Required properties:
-
-- compatible : should be "ezchip,nps400-ic"
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. The value shall be 1.
-
-
-Example:
-
-intc: interrupt-controller {
- compatible = "ezchip,nps400-ic";
- interrupt-controller;
- #interrupt-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ezchip,nps400-ic.yaml b/Documentation/devicetree/bindings/interrupt-controller/ezchip,nps400-ic.yaml
new file mode 100644
index 000000000000..589c6ebf6c1a
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/ezchip,nps400-ic.yaml
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ezchip,nps400-ic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: EZchip NPS Interrupt Controller
+
+maintainers:
+ - Noam Camus <noamc@ezchip.com>
+
+properties:
+ compatible:
+ const: ezchip,nps400-ic
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+required:
+ - compatible
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller {
+ compatible = "ezchip,nps400-ic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/faraday,ftintc010.txt b/Documentation/devicetree/bindings/interrupt-controller/faraday,ftintc010.txt
deleted file mode 100644
index 24428d47f487..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/faraday,ftintc010.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-* Faraday Technologt FTINTC010 interrupt controller
-
-This interrupt controller is a stock IP block from Faraday Technology found
-in the Gemini SoCs and other designs.
-
-Required properties:
-- compatible: must be one of
- "faraday,ftintc010"
- "cortina,gemini-interrupt-controller" (deprecated)
-- reg: The register bank for the interrupt controller.
-- interrupt-controller: Identifies the node as an interrupt controller
-- #interrupt-cells: The number of cells to define the interrupts.
- Must be 2 as the controller can specify level or rising edge
- IRQs. The bindings follows the standard binding for controllers
- with two cells specified in
- interrupt-controller/interrupts.txt
-
-Example:
-
-interrupt-controller@48000000 {
- compatible = "faraday,ftintc010"
- reg = <0x48000000 0x1000>;
- interrupt-controller;
- #interrupt-cells = <2>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/faraday,ftintc010.yaml b/Documentation/devicetree/bindings/interrupt-controller/faraday,ftintc010.yaml
new file mode 100644
index 000000000000..980e5c45f25b
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/faraday,ftintc010.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+---
+$id: http://devicetree.org/schemas/interrupt-controller/faraday,ftintc010.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Faraday Technology FTINTC010 interrupt controller
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description:
+ This interrupt controller is a stock IP block from Faraday Technology found
+ in the Gemini SoCs and other designs.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: moxa,moxart-ic
+ - const: faraday,ftintc010
+ - enum:
+ - faraday,ftintc010
+ - cortina,gemini-interrupt-controller
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@48000000 {
+ compatible = "faraday,ftintc010";
+ reg = <0x48000000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
index 6076ddf56bb5..c49688be1058 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
@@ -19,6 +19,7 @@ properties:
- fsl,imx8mp-irqsteer
- fsl,imx8qm-irqsteer
- fsl,imx8qxp-irqsteer
+ - fsl,imx94-irqsteer
- const: fsl,imx-irqsteer
reg:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,tzic.yaml b/Documentation/devicetree/bindings/interrupt-controller/fsl,tzic.yaml
new file mode 100644
index 000000000000..5f2c8761a31d
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,tzic.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/fsl,tzic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale tzic Interrupt controller
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - fsl,imx51-tzic
+ - fsl,imx53-tzic
+ - const: fsl,tzic
+ - items:
+ - const: fsl,imx50-tzic
+ - const: fsl,imx53-tzic
+ - const: fsl,tzic
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ tz-interrupt-controller@fffc000 {
+ compatible = "fsl,imx53-tzic", "fsl,tzic";
+ reg = <0x0fffc000 0x4000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt b/Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt
deleted file mode 100644
index 35f752706e7d..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Android Goldfish PIC
-
-Android Goldfish programmable interrupt device used by Android
-emulator.
-
-Required properties:
-
-- compatible : should contain "google,goldfish-pic"
-- reg : <registers mapping>
-- interrupts : <interrupt mapping>
-
-Example for mips when used in cascade mode:
-
- cpuintc {
- #interrupt-cells = <0x1>;
- #address-cells = <0>;
- interrupt-controller;
- compatible = "mti,cpu-interrupt-controller";
- };
-
- interrupt-controller@1f000000 {
- compatible = "google,goldfish-pic";
- reg = <0x1f000000 0x1000>;
-
- interrupt-controller;
- #interrupt-cells = <0x1>;
-
- interrupt-parent = <&cpuintc>;
- interrupts = <0x2>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.yaml b/Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.yaml
new file mode 100644
index 000000000000..ac3c3c3ca186
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/google,goldfish-pic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Android Goldfish PIC
+
+maintainers:
+ - Miodrag Dinic <miodrag.dinic@mips.com>
+
+description:
+ Android Goldfish programmable interrupt device used by Android emulator.
+
+properties:
+ compatible:
+ const: google,goldfish-pic
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+
+examples:
+ - |
+ interrupt-controller@1f000000 {
+ compatible = "google,goldfish-pic";
+ reg = <0x1f000000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupts = <2>;
+ };
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.txt
deleted file mode 100644
index 5dc2a55ad811..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.txt
+++ /dev/null
@@ -1,105 +0,0 @@
-* ImgTec Powerdown Controller (PDC) Interrupt Controller Binding
-
-This binding specifies what properties must be available in the device tree
-representation of a PDC IRQ controller. This has a number of input interrupt
-lines which can wake the system, and are passed on through output interrupt
-lines.
-
-Required properties:
-
- - compatible: Specifies the compatibility list for the interrupt controller.
- The type shall be <string> and the value shall include "img,pdc-intc".
-
- - reg: Specifies the base PDC physical address(s) and size(s) of the
- addressable register space. The type shall be <prop-encoded-array>.
-
- - interrupt-controller: The presence of this property identifies the node
- as an interrupt controller. No property value shall be defined.
-
- - #interrupt-cells: Specifies the number of cells needed to encode an
- interrupt source. The type shall be a <u32> and the value shall be 2.
-
- - num-perips: Number of waking peripherals.
-
- - num-syswakes: Number of SysWake inputs.
-
- - interrupts: List of interrupt specifiers. The first specifier shall be the
- shared SysWake interrupt, and remaining specifies shall be PDC peripheral
- interrupts in order.
-
-* Interrupt Specifier Definition
-
- Interrupt specifiers consists of 2 cells encoded as follows:
-
- - <1st-cell>: The interrupt-number that identifies the interrupt source.
- 0-7: Peripheral interrupts
- 8-15: SysWake interrupts
-
- - <2nd-cell>: The level-sense information, encoded using the Linux interrupt
- flags as follows (only 4 valid for peripheral interrupts):
- 0 = none (decided by software)
- 1 = low-to-high edge triggered
- 2 = high-to-low edge triggered
- 3 = both edge triggered
- 4 = active-high level-sensitive (required for perip irqs)
- 8 = active-low level-sensitive
-
-* Examples
-
-Example 1:
-
- /*
- * TZ1090 PDC block
- */
- pdc: pdc@02006000 {
- // This is an interrupt controller node.
- interrupt-controller;
-
- // Three cells to encode interrupt sources.
- #interrupt-cells = <2>;
-
- // Offset address of 0x02006000 and size of 0x1000.
- reg = <0x02006000 0x1000>;
-
- // Compatible with Meta hardware trigger block.
- compatible = "img,pdc-intc";
-
- // Three peripherals are connected.
- num-perips = <3>;
-
- // Four SysWakes are connected.
- num-syswakes = <4>;
-
- interrupts = <18 4 /* level */>, /* Syswakes */
- <30 4 /* level */>, /* Peripheral 0 (RTC) */
- <29 4 /* level */>, /* Peripheral 1 (IR) */
- <31 4 /* level */>; /* Peripheral 2 (WDT) */
- };
-
-Example 2:
-
- /*
- * An SoC peripheral that is wired through the PDC.
- */
- rtc0 {
- // The interrupt controller that this device is wired to.
- interrupt-parent = <&pdc>;
-
- // Interrupt source Peripheral 0
- interrupts = <0 /* Peripheral 0 (RTC) */
- 4> /* IRQ_TYPE_LEVEL_HIGH */
- };
-
-Example 3:
-
- /*
- * An interrupt generating device that is wired to a SysWake pin.
- */
- touchscreen0 {
- // The interrupt controller that this device is wired to.
- interrupt-parent = <&pdc>;
-
- // Interrupt source SysWake 0 that is active-low level-sensitive
- interrupts = <8 /* SysWake0 */
- 8 /* IRQ_TYPE_LEVEL_LOW */>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.yaml
new file mode 100644
index 000000000000..99e7a4281595
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/img,pdc-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ImgTec Powerdown Controller (PDC) Interrupt Controller
+
+maintainers:
+ - James Hogan <jhogan@kernel.org>
+
+description:
+ ImgTec Powerdown Controller (PDC) Interrupt Controller has a number of input
+ interrupt lines which can wake the system, and are passed on through output
+ interrupt lines.
+
+properties:
+ compatible:
+ const: img,pdc-intc
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ description: >
+ <1st-cell>: The interrupt-number that identifies the interrupt source.
+ 0-7: Peripheral interrupts
+ 8-15: SysWake interrupts
+
+ <2nd-cell>: The level-sense information, encoded using the Linux interrupt
+ flags as follows (only 4 valid for peripheral interrupts):
+ 0 = none (decided by software)
+ 1 = low-to-high edge triggered
+ 2 = high-to-low edge triggered
+ 3 = both edge triggered
+ 4 = active-high level-sensitive (required for perip irqs)
+ 8 = active-low level-sensitive
+ const: 2
+
+ num-perips:
+ description: Number of waking peripherals
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 8
+
+ num-syswakes:
+ description: Number of SysWake inputs
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 8
+
+ interrupts:
+ description:
+ First entry is syswake IRQ. Subsequent entries are 1 per peripheral.
+ minItems: 2
+ maxItems: 9
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+ - num-perips
+ - num-syswakes
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@2006000 {
+ compatible = "img,pdc-intc";
+ reg = <0x02006000 0x1000>;
+ interrupts = <18 4>, <30 4>, <29 4>, <31 4>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-perips = <3>;
+ num-syswakes = <4>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/jcore,aic.txt b/Documentation/devicetree/bindings/interrupt-controller/jcore,aic.txt
deleted file mode 100644
index ee2ad36f8df8..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/jcore,aic.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-J-Core Advanced Interrupt Controller
-
-Required properties:
-
-- compatible: Should be "jcore,aic1" for the (obsolete) first-generation aic
- with 8 interrupt lines with programmable priorities, or "jcore,aic2" for
- the "aic2" core with 64 interrupts.
-
-- reg: Memory region(s) for configuration. For SMP, there should be one
- region per cpu, indexed by the sequential, zero-based hardware cpu
- number.
-
-- interrupt-controller: Identifies the node as an interrupt controller
-
-- #interrupt-cells: Specifies the number of cells needed to encode an
- interrupt source. The value shall be 1.
-
-
-Example:
-
-aic: interrupt-controller@200 {
- compatible = "jcore,aic2";
- reg = < 0x200 0x30 0x500 0x30 >;
- interrupt-controller;
- #interrupt-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/jcore,aic.yaml b/Documentation/devicetree/bindings/interrupt-controller/jcore,aic.yaml
new file mode 100644
index 000000000000..df8abc24591c
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/jcore,aic.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2018 Linaro Ltd.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/jcore,aic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: J-Core Advanced Interrupt Controller
+
+maintainers:
+ - Rich Felker <dalias@libc.org>
+
+properties:
+ compatible:
+ enum:
+ - jcore,aic1
+ - jcore,aic2
+
+ reg:
+ description: Memory region(s) for configuration. For SMP, there should be one
+ region per CPU, indexed by the sequential, zero-based hardware CPU number.
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ aic: interrupt-controller@200 {
+ compatible = "jcore,aic2";
+ reg = <0x200 0x30>, <0x500 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt
deleted file mode 100644
index aee38e7c13e7..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-TI-NSPIRE interrupt controller
-
-Required properties:
-- compatible: Compatible property value should be "lsi,zevio-intc".
-
-- reg: Physical base address of the controller and length of memory mapped
- region.
-
-- interrupt-controller : Identifies the node as an interrupt controller
-
-Example:
-
-interrupt-controller {
- compatible = "lsi,zevio-intc";
- interrupt-controller;
- reg = <0xDC000000 0x1000>;
- #interrupt-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.yaml
new file mode 100644
index 000000000000..e66b25f579c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2025 Daniel Tang <dt.tangr@gmail.com>
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/lsi,zevio-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI-NSPIRE Interrupt Controller
+
+maintainers:
+ - Daniel Tang <dt.tangr@gmail.com>
+
+description: |
+ TI-NSPIRE interrupt controller
+
+properties:
+ compatible:
+ const: lsi,zevio-intc
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@dc000000 {
+ compatible = "lsi,zevio-intc";
+ interrupt-controller;
+ reg = <0xdc000000 0x1000>;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,ap806-gicp.yaml b/Documentation/devicetree/bindings/interrupt-controller/marvell,ap806-gicp.yaml
new file mode 100644
index 000000000000..5faedd95b9a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,ap806-gicp.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/marvell,ap806-gicp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell GICP Controller
+
+maintainers:
+ - Thomas Petazzoni <thomas.petazzoni@bootlin.com>
+
+description:
+ GICP is a Marvell extension of the GIC that allows to trigger GIC SPI
+ interrupts by doing a memory transaction. It is used by the ICU
+ located in the Marvell CP110 to turn wired interrupts inside the CP
+ into GIC SPI interrupts.
+
+properties:
+ compatible:
+ const: marvell,ap806-gicp
+
+ reg:
+ maxItems: 1
+
+ marvell,spi-ranges:
+ description: Tuples of GIC SPI interrupt ranges available for this GICP
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: SPI interrupt base
+ - description: Number of interrupts in the range
+
+ msi-controller: true
+
+required:
+ - compatible
+ - reg
+ - msi-controller
+ - marvell,spi-ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ msi-controller@3f0040 {
+ compatible = "marvell,ap806-gicp";
+ reg = <0x3f0040 0x10>;
+ marvell,spi-ranges = <64 64>, <288 64>;
+ msi-controller;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,ap806-sei.yaml b/Documentation/devicetree/bindings/interrupt-controller/marvell,ap806-sei.yaml
new file mode 100644
index 000000000000..e812f9a86307
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,ap806-sei.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/marvell,ap806-sei.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell SEI (System Error Interrupt) Controller
+
+maintainers:
+ - Miquel Raynal <miquel.raynal@bootlin.com>
+
+description: >
+ Marvell SEI (System Error Interrupt) controller is an interrupt aggregator. It
+ receives interrupts from several sources and aggregates them to a single
+ interrupt line (an SPI) on the parent interrupt controller.
+
+ This interrupt controller can handle up to 64 SEIs, a set comes from the AP
+ and is wired while a second set comes from the CPs by the mean of MSIs.
+
+properties:
+ compatible:
+ const: marvell,ap806-sei
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#interrupt-cells':
+ const: 1
+
+ interrupt-controller: true
+
+ msi-controller: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#interrupt-cells'
+ - interrupt-controller
+ - msi-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ interrupt-controller@3f0200 {
+ compatible = "marvell,ap806-sei";
+ reg = <0x3f0200 0x40>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ msi-controller;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,armada-8k-pic.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,armada-8k-pic.txt
deleted file mode 100644
index 86a7b4cd03f5..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/marvell,armada-8k-pic.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Marvell Armada 7K/8K PIC Interrupt controller
----------------------------------------------
-
-This is the Device Tree binding for the PIC, a secondary interrupt
-controller available on the Marvell Armada 7K/8K ARM64 SoCs, and
-typically connected to the GIC as the primary interrupt controller.
-
-Required properties:
-- compatible: should be "marvell,armada-8k-pic"
-- interrupt-controller: identifies the node as an interrupt controller
-- #interrupt-cells: the number of cells to define interrupts on this
- controller. Should be 1
-- reg: the register area for the PIC interrupt controller
-- interrupts: the interrupt to the primary interrupt controller,
- typically the GIC
-
-Example:
-
- pic: interrupt-controller@3f0100 {
- compatible = "marvell,armada-8k-pic";
- reg = <0x3f0100 0x10>;
- #interrupt-cells = <1>;
- interrupt-controller;
- interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_HIGH>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,armada-8k-pic.yaml b/Documentation/devicetree/bindings/interrupt-controller/marvell,armada-8k-pic.yaml
new file mode 100644
index 000000000000..5a455f7353db
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,armada-8k-pic.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/marvell,armada-8k-pic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Armada 7K/8K PIC Interrupt controller
+
+maintainers:
+ - Thomas Petazzoni <thomas.petazzoni@bootlin.com>
+
+description:
+ The Marvell Armada 7K/8K PIC is a secondary interrupt controller available on
+ the Marvell Armada 7K/8K ARM64 SoCs, and typically connected to the GIC as the
+ primary interrupt controller.
+
+properties:
+ compatible:
+ const: marvell,armada-8k-pic
+
+ reg:
+ maxItems: 1
+
+ "#interrupt-cells":
+ const: 1
+
+ interrupt-controller: true
+
+ interrupts:
+ maxItems: 1
+ description: Interrupt to the primary interrupt controller (GIC).
+
+required:
+ - compatible
+ - reg
+ - "#interrupt-cells"
+ - interrupt-controller
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ interrupt-controller@3f0100 {
+ compatible = "marvell,armada-8k-pic";
+ reg = <0x3f0100 0x10>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,cp110-icu.yaml b/Documentation/devicetree/bindings/interrupt-controller/marvell,cp110-icu.yaml
new file mode 100644
index 000000000000..9d4f06f45372
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,cp110-icu.yaml
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/marvell,cp110-icu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+maintainers:
+ - Miquel Raynal <miquel.raynal@bootlin.com>
+ - Thomas Petazzoni <thomas.petazzoni@bootlin.com>
+
+title: Marvell ICU Interrupt Controller
+
+description:
+ The Marvell ICU (Interrupt Consolidation Unit) controller is responsible for
+ collecting all wired-interrupt sources in the CP and communicating them to the
+ GIC in the AP. The unit translates interrupt requests on input wires to MSG
+ memory mapped transactions to the GIC. These messages access different GIC
+ memory areas depending on their type (NSR, SR, SEI, REI, etc).
+
+properties:
+ compatible:
+ const: marvell,cp110-icu
+
+ reg:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 1
+
+ ranges: true
+
+patternProperties:
+ "^interrupt-controller@":
+ type: object
+ description: Interrupt group child nodes
+ additionalProperties: false
+
+ properties:
+ compatible:
+ enum:
+ - marvell,cp110-icu-nsr
+ - marvell,cp110-icu-sr
+ - marvell,cp110-icu-sei
+ - marvell,cp110-icu-rei
+
+ reg:
+ maxItems: 1
+
+ '#interrupt-cells':
+ const: 2
+
+ interrupt-controller: true
+
+ msi-parent:
+ maxItems: 1
+ description: Phandle to the GICP controller
+
+ required:
+ - compatible
+ - reg
+ - '#interrupt-cells'
+ - interrupt-controller
+ - msi-parent
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@1e0000 {
+ compatible = "marvell,cp110-icu";
+ reg = <0x1e0000 0x440>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ interrupt-controller@10 {
+ compatible = "marvell,cp110-icu-nsr";
+ reg = <0x10 0x20>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ msi-parent = <&gicp>;
+ };
+
+ interrupt-controller@50 {
+ compatible = "marvell,cp110-icu-sei";
+ reg = <0x50 0x10>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ msi-parent = <&sei>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,gicp.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,gicp.txt
deleted file mode 100644
index 64a00ceb7da4..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/marvell,gicp.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Marvell GICP Controller
------------------------
-
-GICP is a Marvell extension of the GIC that allows to trigger GIC SPI
-interrupts by doing a memory transaction. It is used by the ICU
-located in the Marvell CP110 to turn wired interrupts inside the CP
-into GIC SPI interrupts.
-
-Required properties:
-
-- compatible: Must be "marvell,ap806-gicp"
-
-- reg: Must be the address and size of the GICP SPI registers
-
-- marvell,spi-ranges: tuples of GIC SPI interrupts ranges available
- for this GICP
-
-- msi-controller: indicates that this is an MSI controller
-
-Example:
-
-gicp_spi: gicp-spi@3f0040 {
- compatible = "marvell,ap806-gicp";
- reg = <0x3f0040 0x10>;
- marvell,spi-ranges = <64 64>, <288 64>;
- msi-controller;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt
deleted file mode 100644
index 1c94a57a661e..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt
+++ /dev/null
@@ -1,112 +0,0 @@
-Marvell ICU Interrupt Controller
---------------------------------
-
-The Marvell ICU (Interrupt Consolidation Unit) controller is
-responsible for collecting all wired-interrupt sources in the CP and
-communicating them to the GIC in the AP, the unit translates interrupt
-requests on input wires to MSG memory mapped transactions to the GIC.
-These messages will access a different GIC memory area depending on
-their type (NSR, SR, SEI, REI, etc).
-
-Required properties:
-
-- compatible: Should be "marvell,cp110-icu"
-
-- reg: Should contain ICU registers location and length.
-
-Subnodes: Each group of interrupt is declared as a subnode of the ICU,
-with their own compatible.
-
-Required properties for the icu_nsr/icu_sei subnodes:
-
-- compatible: Should be one of:
- * "marvell,cp110-icu-nsr"
- * "marvell,cp110-icu-sr"
- * "marvell,cp110-icu-sei"
- * "marvell,cp110-icu-rei"
-
-- #interrupt-cells: Specifies the number of cells needed to encode an
- interrupt source. The value shall be 2.
-
- The 1st cell is the index of the interrupt in the ICU unit.
-
- The 2nd cell is the type of the interrupt. See arm,gic.txt for
- details.
-
-- interrupt-controller: Identifies the node as an interrupt
- controller.
-
-- msi-parent: Should point to the GICP controller, the GIC extension
- that allows to trigger interrupts using MSG memory mapped
- transactions.
-
-Note: each 'interrupts' property referring to any 'icu_xxx' node shall
- have a different number within [0:206].
-
-Example:
-
-icu: interrupt-controller@1e0000 {
- compatible = "marvell,cp110-icu";
- reg = <0x1e0000 0x440>;
-
- CP110_LABEL(icu_nsr): interrupt-controller@10 {
- compatible = "marvell,cp110-icu-nsr";
- reg = <0x10 0x20>;
- #interrupt-cells = <2>;
- interrupt-controller;
- msi-parent = <&gicp>;
- };
-
- CP110_LABEL(icu_sei): interrupt-controller@50 {
- compatible = "marvell,cp110-icu-sei";
- reg = <0x50 0x10>;
- #interrupt-cells = <2>;
- interrupt-controller;
- msi-parent = <&sei>;
- };
-};
-
-node1 {
- interrupt-parent = <&icu_nsr>;
- interrupts = <106 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-node2 {
- interrupt-parent = <&icu_sei>;
- interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-/* Would not work with the above nodes */
-node3 {
- interrupt-parent = <&icu_nsr>;
- interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-The legacy bindings were different in this way:
-
-- #interrupt-cells: The value was 3.
- The 1st cell was the group type of the ICU interrupt. Possible
- group types were:
- ICU_GRP_NSR (0x0) : Shared peripheral interrupt, non-secure
- ICU_GRP_SR (0x1) : Shared peripheral interrupt, secure
- ICU_GRP_SEI (0x4) : System error interrupt
- ICU_GRP_REI (0x5) : RAM error interrupt
- The 2nd cell was the index of the interrupt in the ICU unit.
- The 3rd cell was the type of the interrupt. See arm,gic.txt for
- details.
-
-Example:
-
-icu: interrupt-controller@1e0000 {
- compatible = "marvell,cp110-icu";
- reg = <0x1e0000 0x440>;
-
- #interrupt-cells = <3>;
- interrupt-controller;
- msi-parent = <&gicp>;
-};
-
-node1 {
- interrupt-parent = <&icu>;
- interrupts = <ICU_GRP_NSR 106 IRQ_TYPE_LEVEL_HIGH>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
deleted file mode 100644
index 0ebfc952cb34..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-
-* Marvell ODMI for MSI support
-
-Some Marvell SoCs have an On-Die Message Interrupt (ODMI) controller
-which can be used by on-board peripheral for MSI interrupts.
-
-Required properties:
-
-- compatible : The value here should contain:
-
- "marvell,ap806-odmi-controller", "marvell,odmi-controller".
-
-- interrupt,controller : Identifies the node as an interrupt controller.
-
-- msi-controller : Identifies the node as an MSI controller.
-
-- marvell,odmi-frames : Number of ODMI frames available. Each frame
- provides a number of events.
-
-- reg : List of register definitions, one for each
- ODMI frame.
-
-- marvell,spi-base : List of GIC base SPI interrupts, one for each
- ODMI frame. Those SPI interrupts are 0-based,
- i.e marvell,spi-base = <128> will use SPI #96.
- See Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
- for details about the GIC Device Tree binding.
-
-Example:
-
- odmi: odmi@300000 {
- compatible = "marvell,ap806-odmi-controller",
- "marvell,odmi-controller";
- interrupt-controller;
- msi-controller;
- marvell,odmi-frames = <4>;
- reg = <0x300000 0x4000>,
- <0x304000 0x4000>,
- <0x308000 0x4000>,
- <0x30C000 0x4000>;
- marvell,spi-base = <128>, <136>, <144>, <152>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.yaml b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.yaml
new file mode 100644
index 000000000000..9ec1ed4a5155
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/marvell,odmi-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell ODMI controller
+
+maintainers:
+ - Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+
+description:
+ Some Marvell SoCs have an On-Die Message Interrupt (ODMI) controller which can
+ be used by on-board peripherals for MSI interrupts.
+
+properties:
+ compatible:
+ const: marvell,odmi-controller
+
+ reg:
+ description: List of register definitions, one for each ODMI frame.
+
+ msi-controller: true
+
+ marvell,odmi-frames:
+ description: Number of ODMI frames available. Each frame provides a number of events.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ marvell,spi-base:
+ description: >
+ List of GIC base SPI interrupts, one for each ODMI frame. Those SPI
+ interrupts are 0-based, i.e. marvell,spi-base = <128> will use SPI #96.
+ See Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
+ for details.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+
+required:
+ - compatible
+ - reg
+ - msi-controller
+ - marvell,odmi-frames
+ - marvell,spi-base
+
+additionalProperties: false
+
+examples:
+ - |
+ msi-controller@300000 {
+ compatible = "marvell,odmi-controller";
+ msi-controller;
+ marvell,odmi-frames = <4>;
+ reg = <0x300000 0x4000>, <0x304000 0x4000>, <0x308000 0x4000>, <0x30C000 0x4000>;
+ marvell,spi-base = <128>, <136>, <144>, <152>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,orion-bridge-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/marvell,orion-bridge-intc.yaml
new file mode 100644
index 000000000000..e1310ec65382
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,orion-bridge-intc.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+---
+$id: http://devicetree.org/schemas/interrupt-controller/marvell,orion-bridge-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Orion SoC Bridge Interrupt Controller
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+properties:
+ compatible:
+ const: marvell,orion-bridge-intc
+
+ reg:
+ minItems: 1
+ maxItems: 2
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+ interrupts:
+ description: Bridge interrupt of the main interrupt controller
+
+ marvell,#interrupts:
+ description: Number of interrupts provided by bridge interrupt controller.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 32
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@20110 {
+ compatible = "marvell,orion-bridge-intc";
+ reg = <0x20110 0x8>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupts = <0>;
+ /* Dove bridge provides 5 interrupts */
+ marvell,#interrupts = <5>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,orion-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,orion-intc.txt
deleted file mode 100644
index 2c11ac76fac9..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/marvell,orion-intc.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-Marvell Orion SoC interrupt controllers
-
-* Main interrupt controller
-
-Required properties:
-- compatible: shall be "marvell,orion-intc"
-- reg: base address(es) of interrupt registers starting with CAUSE register
-- interrupt-controller: identifies the node as an interrupt controller
-- #interrupt-cells: number of cells to encode an interrupt source, shall be 1
-
-The interrupt sources map to the corresponding bits in the interrupt
-registers, i.e.
-- 0 maps to bit 0 of first base address,
-- 1 maps to bit 1 of first base address,
-- 32 maps to bit 0 of second base address, and so on.
-
-Example:
- intc: interrupt-controller {
- compatible = "marvell,orion-intc";
- interrupt-controller;
- #interrupt-cells = <1>;
- /* Dove has 64 first level interrupts */
- reg = <0x20200 0x10>, <0x20210 0x10>;
- };
-
-* Bridge interrupt controller
-
-Required properties:
-- compatible: shall be "marvell,orion-bridge-intc"
-- reg: base address of bridge interrupt registers starting with CAUSE register
-- interrupts: bridge interrupt of the main interrupt controller
-- interrupt-controller: identifies the node as an interrupt controller
-- #interrupt-cells: number of cells to encode an interrupt source, shall be 1
-
-Optional properties:
-- marvell,#interrupts: number of interrupts provided by bridge interrupt
- controller, defaults to 32 if not set
-
-Example:
- bridge_intc: interrupt-controller {
- compatible = "marvell,orion-bridge-intc";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0x20110 0x8>;
- interrupts = <0>;
- /* Dove bridge provides 5 interrupts */
- marvell,#interrupts = <5>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,sei.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,sei.txt
deleted file mode 100644
index 0beafed502f5..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/marvell,sei.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Marvell SEI (System Error Interrupt) Controller
------------------------------------------------
-
-Marvell SEI (System Error Interrupt) controller is an interrupt
-aggregator. It receives interrupts from several sources and aggregates
-them to a single interrupt line (an SPI) on the parent interrupt
-controller.
-
-This interrupt controller can handle up to 64 SEIs, a set comes from the
-AP and is wired while a second set comes from the CPs by the mean of
-MSIs.
-
-Required properties:
-
-- compatible: should be one of:
- * "marvell,ap806-sei"
-- reg: SEI registers location and length.
-- interrupts: identifies the parent IRQ that will be triggered.
-- #interrupt-cells: number of cells to define an SEI wired interrupt
- coming from the AP, should be 1. The cell is the IRQ
- number.
-- interrupt-controller: identifies the node as an interrupt controller
- for AP interrupts.
-- msi-controller: identifies the node as an MSI controller for the CPs
- interrupts.
-
-Example:
-
- sei: interrupt-controller@3f0200 {
- compatible = "marvell,ap806-sei";
- reg = <0x3f0200 0x40>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
- #interrupt-cells = <1>;
- interrupt-controller;
- msi-controller;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/microchip,pic32-evic.txt b/Documentation/devicetree/bindings/interrupt-controller/microchip,pic32-evic.txt
deleted file mode 100644
index c3a1b37c4c35..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/microchip,pic32-evic.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-Microchip PIC32 Interrupt Controller
-====================================
-
-The Microchip PIC32 contains an Enhanced Vectored Interrupt Controller (EVIC).
-It handles all internal and external interrupts. This controller exists outside
-of the CPU and is the arbitrator of all interrupts (including interrupts from
-the CPU itself) before they are presented to the CPU.
-
-External interrupts have a software configurable edge polarity. Non external
-interrupts have a type and polarity that is determined by the source of the
-interrupt.
-
-Required properties
--------------------
-
-- compatible: Should be "microchip,pic32mzda-evic"
-- reg: Specifies physical base address and size of register range.
-- interrupt-controller: Identifies the node as an interrupt controller.
-- #interrupt cells: Specifies the number of cells used to encode an interrupt
- source connected to this controller. The value shall be 2 and interrupt
- descriptor shall have the following format:
-
- <hw_irq irq_type>
-
- hw_irq - represents the hardware interrupt number as in the data sheet.
- irq_type - is used to describe the type and polarity of an interrupt. For
- internal interrupts use IRQ_TYPE_EDGE_RISING for non persistent interrupts and
- IRQ_TYPE_LEVEL_HIGH for persistent interrupts. For external interrupts use
- IRQ_TYPE_EDGE_RISING or IRQ_TYPE_EDGE_FALLING to select the desired polarity.
-
-Optional properties
--------------------
-- microchip,external-irqs: u32 array of external interrupts with software
- polarity configuration. This array corresponds to the bits in the INTCON
- SFR.
-
-Example
--------
-
-evic: interrupt-controller@1f810000 {
- compatible = "microchip,pic32mzda-evic";
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x1f810000 0x1000>;
- microchip,external-irqs = <3 8 13 18 23>;
-};
-
-Each device/peripheral must request its interrupt line with the associated type
-and polarity.
-
-Internal interrupt DTS snippet
-------------------------------
-
-device@1f800000 {
- ...
- interrupts = <113 IRQ_TYPE_LEVEL_HIGH>;
- ...
-};
-
-External interrupt DTS snippet
-------------------------------
-
-device@1f800000 {
- ...
- interrupts = <3 IRQ_TYPE_EDGE_RISING>;
- ...
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/microchip,pic32mzda-evic.yaml b/Documentation/devicetree/bindings/interrupt-controller/microchip,pic32mzda-evic.yaml
new file mode 100644
index 000000000000..74bfc42693f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/microchip,pic32mzda-evic.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/microchip,pic32mzda-evic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip PIC32 EVIC Interrupt Controller
+
+maintainers:
+ - Cristian Birsan <cristian.birsan@microchip.com>
+
+description: >
+ The Microchip PIC32 contains an Enhanced Vectored Interrupt Controller (EVIC).
+ It handles all internal and external interrupts. This controller exists
+ outside of the CPU and is the arbitrator of all interrupts (including
+ interrupts from the CPU itself) before they are presented to the CPU.
+
+ External interrupts have a software configurable edge polarity. Non external
+ interrupts have a type and polarity that is determined by the source of the
+ interrupt.
+
+properties:
+ compatible:
+ items:
+ - const: microchip,pic32mzda-evic
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ interrupts:
+ maxItems: 1
+
+ microchip,external-irqs:
+ description:
+ External interrupts with software polarity configuration corresponding to
+ the INTCON SFR bits.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@1f810000 {
+ compatible = "microchip,pic32mzda-evic";
+ reg = <0x1f810000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ microchip,external-irqs = <3 8 13 18 23>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt b/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
deleted file mode 100644
index 2ff356640100..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-NVIDIA Legacy Interrupt Controller
-
-All Tegra SoCs contain a legacy interrupt controller that routes
-interrupts to the GIC, and also serves as a wakeup source. It is also
-referred to as "ictlr", hence the name of the binding.
-
-The HW block exposes a number of interrupt controllers, each
-implementing a set of 32 interrupts.
-
-Required properties:
-
-- compatible : should be: "nvidia,tegra<chip>-ictlr". The LIC on
- subsequent SoCs remained backwards-compatible with Tegra30, so on
- Tegra generations later than Tegra30 the compatible value should
- include "nvidia,tegra30-ictlr".
-- reg : Specifies base physical address and size of the registers.
- Each controller must be described separately (Tegra20 has 4 of them,
- whereas Tegra30 and later have 5).
-- interrupt-controller : Identifies the node as an interrupt controller.
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. The value must be 3.
-
-Notes:
-
-- Because this HW ultimately routes interrupts to the GIC, the
- interrupt specifier must be that of the GIC.
-- Only SPIs can use the ictlr as an interrupt parent. SGIs and PPIs
- are explicitly forbidden.
-
-Example:
-
- ictlr: interrupt-controller@60004000 {
- compatible = "nvidia,tegra20-ictlr", "nvidia,tegra-ictlr";
- reg = <0x60004000 64>,
- <0x60004100 64>,
- <0x60004200 64>,
- <0x60004300 64>;
- interrupt-controller;
- #interrupt-cells = <3>;
- interrupt-parent = <&intc>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.yaml b/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.yaml
new file mode 100644
index 000000000000..074a873880e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/nvidia,tegra20-ictlr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra20 Legacy Interrupt Controller
+
+maintainers:
+ - Thierry Reding <treding@nvidia.com>
+ - Jonathan Hunter <jonathanh@nvidia.com>
+
+description: >
+ All Tegra SoCs contain a legacy interrupt controller that routes interrupts to
+ the GIC, and also serves as a wakeup source. It is also referred to as
+ "ictlr", hence the name of the binding.
+
+ The HW block exposes a number of interrupt controllers, each implementing a
+ set of 32 interrupts.
+
+ Notes:
+ - Because this HW ultimately routes interrupts to the GIC, the
+ interrupt specifier must be that of the GIC.
+ - Only SPIs can use the ictlr as an interrupt parent. SGIs and PPIs
+ are explicitly forbidden.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - nvidia,tegra114-ictlr
+ - nvidia,tegra124-ictlr
+ - const: nvidia,tegra30-ictlr
+ - enum:
+ - nvidia,tegra20-ictlr
+ - nvidia,tegra30-ictlr
+
+ reg:
+ description: Each entry is a block of 32 interrupts
+ minItems: 4
+ maxItems: 5
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 3
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: nvidia,tegra20-ictlr
+ then:
+ properties:
+ reg:
+ maxItems: 4
+ else:
+ properties:
+ reg:
+ minItems: 5
+
+examples:
+ - |
+ interrupt-controller@60004000 {
+ compatible = "nvidia,tegra20-ictlr";
+ reg = <0x60004000 64>,
+ <0x60004100 64>,
+ <0x60004200 64>,
+ <0x60004300 64>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/open-pic.txt b/Documentation/devicetree/bindings/interrupt-controller/open-pic.txt
deleted file mode 100644
index ccbbfdc53c72..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/open-pic.txt
+++ /dev/null
@@ -1,97 +0,0 @@
-* Open PIC Binding
-
-This binding specifies what properties must be available in the device tree
-representation of an Open PIC compliant interrupt controller. This binding is
-based on the binding defined for Open PIC in [1] and is a superset of that
-binding.
-
-Required properties:
-
- NOTE: Many of these descriptions were paraphrased here from [1] to aid
- readability.
-
- - compatible: Specifies the compatibility list for the PIC. The type
- shall be <string> and the value shall include "open-pic".
-
- - reg: Specifies the base physical address(s) and size(s) of this
- PIC's addressable register space. The type shall be <prop-encoded-array>.
-
- - interrupt-controller: The presence of this property identifies the node
- as an Open PIC. No property value shall be defined.
-
- - #interrupt-cells: Specifies the number of cells needed to encode an
- interrupt source. The type shall be a <u32> and the value shall be 2.
-
- - #address-cells: Specifies the number of cells needed to encode an
- address. The type shall be <u32> and the value shall be 0. As such,
- 'interrupt-map' nodes do not have to specify a parent unit address.
-
-Optional properties:
-
- - pic-no-reset: The presence of this property indicates that the PIC
- shall not be reset during runtime initialization. No property value shall
- be defined. The presence of this property also mandates that any
- initialization related to interrupt sources shall be limited to sources
- explicitly referenced in the device tree.
-
-* Interrupt Specifier Definition
-
- Interrupt specifiers consists of 2 cells encoded as
- follows:
-
- - <1st-cell>: The interrupt-number that identifies the interrupt source.
-
- - <2nd-cell>: The level-sense information, encoded as follows:
- 0 = low-to-high edge triggered
- 1 = active low level-sensitive
- 2 = active high level-sensitive
- 3 = high-to-low edge triggered
-
-* Examples
-
-Example 1:
-
- /*
- * An Open PIC interrupt controller
- */
- mpic: pic@40000 {
- // This is an interrupt controller node.
- interrupt-controller;
-
- // No address cells so that 'interrupt-map' nodes which reference
- // this Open PIC node do not need a parent address specifier.
- #address-cells = <0>;
-
- // Two cells to encode interrupt sources.
- #interrupt-cells = <2>;
-
- // Offset address of 0x40000 and size of 0x40000.
- reg = <0x40000 0x40000>;
-
- // Compatible with Open PIC.
- compatible = "open-pic";
-
- // The PIC shall not be reset.
- pic-no-reset;
- };
-
-Example 2:
-
- /*
- * An interrupt generating device that is wired to an Open PIC.
- */
- serial0: serial@4500 {
- // Interrupt source '42' that is active high level-sensitive.
- // Note that there are only two cells as specified in the interrupt
- // parent's '#interrupt-cells' property.
- interrupts = <42 2>;
-
- // The interrupt controller that this device is wired to.
- interrupt-parent = <&mpic>;
- };
-
-* References
-
-[1] Devicetree Specification
- (https://www.devicetree.org/specifications/)
-
diff --git a/Documentation/devicetree/bindings/interrupt-controller/opencores,or1k-pic.txt b/Documentation/devicetree/bindings/interrupt-controller/opencores,or1k-pic.txt
deleted file mode 100644
index 55c04faa3f3f..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/opencores,or1k-pic.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-OpenRISC 1000 Programmable Interrupt Controller
-
-Required properties:
-
-- compatible : should be "opencores,or1k-pic-level" for variants with
- level triggered interrupt lines, "opencores,or1k-pic-edge" for variants with
- edge triggered interrupt lines or "opencores,or1200-pic" for machines
- with the non-spec compliant or1200 type implementation.
-
- "opencores,or1k-pic" is also provided as an alias to "opencores,or1200-pic",
- but this is only for backwards compatibility.
-
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. The value shall be 1.
-
-Example:
-
-intc: interrupt-controller {
- compatible = "opencores,or1k-pic-level";
- interrupt-controller;
- #interrupt-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/opencores,or1k-pic.yaml b/Documentation/devicetree/bindings/interrupt-controller/opencores,or1k-pic.yaml
new file mode 100644
index 000000000000..995b68c3aed4
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/opencores,or1k-pic.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/opencores,or1k-pic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OpenRISC 1000 Programmable Interrupt Controller
+
+maintainers:
+ - Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+
+properties:
+ compatible:
+ enum:
+ - opencores,or1k-pic-level
+ - opencores,or1k-pic-edge
+ - opencores,or1200-pic
+ - opencores,or1k-pic
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+required:
+ - compatible
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller {
+ compatible = "opencores,or1k-pic-level";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/openrisc,ompic.txt b/Documentation/devicetree/bindings/interrupt-controller/openrisc,ompic.txt
deleted file mode 100644
index caec07cc7149..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/openrisc,ompic.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Open Multi-Processor Interrupt Controller
-
-Required properties:
-
-- compatible : This should be "openrisc,ompic"
-- reg : Specifies base physical address and size of the register space. The
- size is based on the number of cores the controller has been configured
- to handle, this should be set to 8 bytes per cpu core.
-- interrupt-controller : Identifies the node as an interrupt controller.
-- #interrupt-cells : This should be set to 0 as this will not be an irq
- parent.
-- interrupts : Specifies the interrupt line to which the ompic is wired.
-
-Example:
-
-ompic: interrupt-controller@98000000 {
- compatible = "openrisc,ompic";
- reg = <0x98000000 16>;
- interrupt-controller;
- #interrupt-cells = <0>;
- interrupts = <1>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/openrisc,ompic.yaml b/Documentation/devicetree/bindings/interrupt-controller/openrisc,ompic.yaml
new file mode 100644
index 000000000000..4efbfba3aa6b
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/openrisc,ompic.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/openrisc,ompic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Open Multi-Processor Interrupt Controller
+
+maintainers:
+ - Stafford Horne <shorne@gmail.com>
+
+properties:
+ compatible:
+ items:
+ - const: openrisc,ompic
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 0
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@98000000 {
+ compatible = "openrisc,ompic";
+ reg = <0x98000000 16>;
+ interrupt-controller;
+ #interrupt-cells = <0>;
+ interrupts = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ar7100-cpu-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/qca,ar7100-cpu-intc.yaml
new file mode 100644
index 000000000000..ab32a91af4c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/qca,ar7100-cpu-intc.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/qca,ar7100-cpu-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Atheros ath79 CPU interrupt controller
+
+maintainers:
+ - Alban Bedel <albeu@free.fr>
+
+description:
+ On most SoC the IRQ controller need to flush the DDR FIFO before running the
+ interrupt handler of some devices. This is configured using the
+ qca,ddr-wb-channels and qca,ddr-wb-channel-interrupts properties.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: qca,ar9132-cpu-intc
+ - const: qca,ar7100-cpu-intc
+ - items:
+ - const: qca,ar7100-cpu-intc
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+ qca,ddr-wb-channel-interrupts:
+ description: List of interrupts needing a write buffer flush
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+
+ qca,ddr-wb-channels:
+ description: List of write buffer channel phandles for each interrupt
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+
+required:
+ - compatible
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller {
+ compatible = "qca,ar9132-cpu-intc", "qca,ar7100-cpu-intc";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ qca,ddr-wb-channel-interrupts = <2>, <3>, <4>, <5>;
+ qca,ddr-wb-channels = <&ddr_ctrl 3>, <&ddr_ctrl 2>,
+ <&ddr_ctrl 0>, <&ddr_ctrl 1>;
+ };
+
+ ddr_ctrl: memory-controller {
+ #qca,ddr-wb-channel-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ar7100-misc-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/qca,ar7100-misc-intc.yaml
new file mode 100644
index 000000000000..ae813189f5ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/qca,ar7100-misc-intc.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/qca,ar7100-misc-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Atheros AR7xxx/AR9XXX MISC interrupt controller
+
+maintainers:
+ - Alban Bedel <albeu@free.fr>
+ - Alexander Couzens <lynxis@fe80.eu>
+
+description:
+ The Qualcomm Atheros AR7xxx/AR9XXX MISC interrupt controller is a secondary
+ controller for lower priority interrupts.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: qca,ar9132-misc-intc
+ - const: qca,ar7100-misc-intc
+ - const: qca,ar7240-misc-intc
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - "#interrupt-cells"
+
+examples:
+ - |
+ interrupt-controller@18060010 {
+ compatible = "qca,ar9132-misc-intc", "qca,ar7100-misc-intc";
+ reg = <0x18060010 0x4>;
+ interrupts = <6>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-cpu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-cpu-intc.txt
deleted file mode 100644
index aabce7810d29..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-cpu-intc.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-Binding for Qualcomm Atheros AR7xxx/AR9XXX CPU interrupt controller
-
-On most SoC the IRQ controller need to flush the DDR FIFO before running
-the interrupt handler of some devices. This is configured using the
-qca,ddr-wb-channels and qca,ddr-wb-channel-interrupts properties.
-
-Required Properties:
-
-- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-cpu-intc"
- as fallback
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode interrupt
- source, should be 1 for intc
-
-Please refer to interrupts.txt in this directory for details of the common
-Interrupt Controllers bindings used by client devices.
-
-Optional Properties:
-
-- qca,ddr-wb-channel-interrupts: List of the interrupts needing a write
- buffer flush
-- qca,ddr-wb-channels: List of phandles to the write buffer channels for
- each interrupt. If qca,ddr-wb-channel-interrupts is not present the interrupt
- default to the entry's index.
-
-Example:
-
- interrupt-controller {
- compatible = "qca,ar9132-cpu-intc", "qca,ar7100-cpu-intc";
-
- interrupt-controller;
- #interrupt-cells = <1>;
-
- qca,ddr-wb-channel-interrupts = <2>, <3>, <4>, <5>;
- qca,ddr-wb-channels = <&ddr_ctrl 3>, <&ddr_ctrl 2>,
- <&ddr_ctrl 0>, <&ddr_ctrl 1>;
- };
-
- ...
-
- ddr_ctrl: memory-controller@18000000 {
- ...
- #qca,ddr-wb-channel-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
deleted file mode 100644
index ad70006c1848..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-Binding for Qualcomm Atheros AR7xxx/AR9XXX MISC interrupt controller
-
-The MISC interrupt controller is a secondary controller for lower priority
-interrupt.
-
-Required Properties:
-- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or
- "qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc"
-- reg: Base address and size of the controllers memory area
-- interrupts: Interrupt specifier for the controllers interrupt.
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode interrupt
- source, should be 1
-
-Compatible fallback depends on the SoC. Use ar7100 for ar71xx and ar913x,
-use ar7240 for all other SoCs.
-
-Please refer to interrupts.txt in this directory for details of the common
-Interrupt Controllers bindings used by client devices.
-
-Example:
-
- interrupt-controller@18060010 {
- compatible = "qca,ar9132-misc-intc", "qca,ar7100-misc-intc";
- reg = <0x18060010 0x4>;
-
- interrupt-parent = <&cpuintc>;
- interrupts = <6>;
-
- interrupt-controller;
- #interrupt-cells = <1>;
- };
-
-Another example:
-
- interrupt-controller@18060010 {
- compatible = "qca,ar9331-misc-intc", qca,ar7240-misc-intc";
- reg = <0x18060010 0x4>;
-
- interrupt-parent = <&cpuintc>;
- interrupts = <6>;
-
- interrupt-controller;
- #interrupt-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
index 3dfe425909d1..ffc4768bad06 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
@@ -70,6 +70,7 @@ properties:
- sophgo,cv1812h-plic
- sophgo,sg2002-plic
- sophgo,sg2042-plic
+ - sophgo,sg2044-plic
- thead,th1520-plic
- const: thead,c900-plic
- items:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,arc700-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,arc700-intc.txt
deleted file mode 100644
index 9a5d562435ea..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,arc700-intc.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-* ARC700 incore Interrupt Controller
-
- The core interrupt controller provides 32 prioritised interrupts (2 levels)
- to ARC700 core.
-
-Properties:
-
-- compatible: "snps,arc700-intc"
-- interrupt-controller: This is an interrupt controller.
-- #interrupt-cells: Must be <1>.
-
- Single Cell "interrupts" property of a device specifies the IRQ number
- between 0 to 31
-
- intc accessed via the special ARC AUX register interface, hence "reg" property
- is not specified.
-
-Example:
-
- intc: interrupt-controller {
- compatible = "snps,arc700-intc";
- interrupt-controller;
- #interrupt-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,arc700-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/snps,arc700-intc.yaml
new file mode 100644
index 000000000000..000a734d997c
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,arc700-intc.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/snps,arc700-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARC700 incore Interrupt Controller
+
+maintainers:
+ - Vineet Gupta <vgupta@kernel.org>
+
+description: >
+ The core interrupt controller provides 32 prioritized interrupts (2 levels)
+ to ARC700 core.
+
+ intc accessed via the special ARC AUX register interface, hence "reg" property
+ is not specified.
+
+properties:
+ compatible:
+ const: snps,arc700-intc
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ description: An interrupt number 0-31
+ const: 1
+
+required:
+ - compatible
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller {
+ compatible = "snps,arc700-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
deleted file mode 100644
index a5c1db95b3ec..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-* ARC-HS Interrupt Distribution Unit
-
- This optional 2nd level interrupt controller can be used in SMP configurations
- for dynamic IRQ routing, load balancing of common/external IRQs towards core
- intc.
-
-Properties:
-
-- compatible: "snps,archs-idu-intc"
-- interrupt-controller: This is an interrupt controller.
-- #interrupt-cells: Must be <1> or <2>.
-
- Value of the first cell specifies the "common" IRQ from peripheral to IDU.
- Number N of the particular interrupt line of IDU corresponds to the line N+24
- of the core interrupt controller.
-
- The (optional) second cell specifies any of the following flags:
- - bits[3:0] trigger type and level flags
- 1 = low-to-high edge triggered
- 2 = NOT SUPPORTED (high-to-low edge triggered)
- 4 = active high level-sensitive <<< DEFAULT
- 8 = NOT SUPPORTED (active low level-sensitive)
- When no second cell is specified, the interrupt is assumed to be level
- sensitive.
-
- The interrupt controller is accessed via the special ARC AUX register
- interface, hence "reg" property is not specified.
-
-Example:
- core_intc: core-interrupt-controller {
- compatible = "snps,archs-intc";
- interrupt-controller;
- #interrupt-cells = <1>;
- };
-
- idu_intc: idu-interrupt-controller {
- compatible = "snps,archs-idu-intc";
- interrupt-controller;
- interrupt-parent = <&core_intc>;
- #interrupt-cells = <1>;
- };
-
- some_device: serial@c0fc1000 {
- interrupt-parent = <&idu_intc>;
- interrupts = <0>; /* upstream idu IRQ #24 */
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.yaml
new file mode 100644
index 000000000000..286a964f23e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/snps,archs-idu-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARC-HS Interrupt Distribution Unit
+
+maintainers:
+ - Vineet Gupta <vgupta@kernel.org>
+
+description: >
+ ARC-HS Interrupt Distribution Unit is an optional 2nd level interrupt
+ controller which can be used in SMP configurations for dynamic IRQ routing,
+ load balancing of common/external IRQs towards core intc.
+
+ The interrupt controller is accessed via the special ARC AUX register
+ interface, hence "reg" property is not specified.
+
+properties:
+ compatible:
+ const: snps,archs-idu-intc
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ description: |
+ Number of interrupt specifier cells:
+ - 1: only a common IRQ is specified.
+ - 2: a second cell encodes trigger type and level flags:
+ 1 = low-to-high edge triggered
+ 4 = active high level-sensitive (default)
+ enum: [1, 2]
+
+required:
+ - compatible
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller {
+ compatible = "snps,archs-idu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-intc.txt
deleted file mode 100644
index 69f326d6a5ad..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-intc.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-* ARC-HS incore Interrupt Controller (Provided by cores implementing ARCv2 ISA)
-
-Properties:
-
-- compatible: "snps,archs-intc"
-- interrupt-controller: This is an interrupt controller.
-- #interrupt-cells: Must be <1>.
-
- Single Cell "interrupts" property of a device specifies the IRQ number
- between 16 to 256
-
- intc accessed via the special ARC AUX register interface, hence "reg" property
- is not specified.
-
-Example:
-
- intc: interrupt-controller {
- compatible = "snps,archs-intc";
- interrupt-controller;
- #interrupt-cells = <1>;
- interrupts = <16 17 18 19 20 21 22 23 24 25>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-intc.yaml
new file mode 100644
index 000000000000..9d248ef7fe3d
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-intc.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/snps,archs-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARC-HS incore Interrupt Controller
+
+maintainers:
+ - Vineet Gupta <vgupta@kernel.org>
+
+description:
+ ARC-HS incore Interrupt Controller provided by cores implementing ARCv2 ISA.
+ intc accessed via the special ARC AUX register interface, hence "reg" property
+ is not specified.
+
+properties:
+ compatible:
+ const: snps,archs-intc
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+ interrupts:
+ description: List of IRQ numbers between 16 and 256
+ items:
+ items:
+ - minimum: 16
+ maximum: 256
+
+required:
+ - compatible
+ - interrupt-controller
+ - '#interrupt-cells'
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller {
+ compatible = "snps,archs-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupts = <16>, <17>, <18>, <19>, <20>, <21>, <22>, <23>, <24>, <25>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.txt
deleted file mode 100644
index 2db59df9408f..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-Synopsys DesignWare APB interrupt controller (dw_apb_ictl)
-
-Synopsys DesignWare provides interrupt controller IP for APB known as
-dw_apb_ictl. The IP is used as secondary interrupt controller in some SoCs with
-APB bus, e.g. Marvell Armada 1500. It can also be used as primary interrupt
-controller in some SoCs, e.g. Hisilicon SD5203.
-
-Required properties:
-- compatible: shall be "snps,dw-apb-ictl"
-- reg: physical base address of the controller and length of memory mapped
- region starting with ENABLE_LOW register
-- interrupt-controller: identifies the node as an interrupt controller
-- #interrupt-cells: number of cells to encode an interrupt-specifier, shall be 1
-
-Additional required property when it's used as secondary interrupt controller:
-- interrupts: interrupt reference to primary interrupt controller
-
-The interrupt sources map to the corresponding bits in the interrupt
-registers, i.e.
-- 0 maps to bit 0 of low interrupts,
-- 1 maps to bit 1 of low interrupts,
-- 32 maps to bit 0 of high interrupts,
-- 33 maps to bit 1 of high interrupts,
-- (optional) fast interrupts start at 64.
-
-Example:
- /* dw_apb_ictl is used as secondary interrupt controller */
- aic: interrupt-controller@3000 {
- compatible = "snps,dw-apb-ictl";
- reg = <0x3000 0xc00>;
- interrupt-controller;
- #interrupt-cells = <1>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- /* dw_apb_ictl is used as primary interrupt controller */
- vic: interrupt-controller@10130000 {
- compatible = "snps,dw-apb-ictl";
- reg = <0x10130000 0x1000>;
- interrupt-controller;
- #interrupt-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.yaml b/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.yaml
new file mode 100644
index 000000000000..6b59b600a037
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/snps,dw-apb-ictl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare APB interrupt controller
+
+maintainers:
+ - Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ - Zhen Lei <thunder.leizhen@huawei.com>
+
+description:
+ Synopsys DesignWare provides interrupt controller IP for APB known as
+ dw_apb_ictl. The IP is used as secondary interrupt controller in some SoCs
+ with APB bus, e.g. Marvell Armada 1500. It can also be used as primary
+ interrupt controller in some SoCs, e.g. Hisilicon SD5203.
+
+properties:
+ compatible:
+ const: snps,dw-apb-ictl
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+ interrupts:
+ maxItems: 1
+ description: >
+ Interrupt input connected to the primary interrupt controller when used
+ as a secondary controller. The interrupt specifier maps to bits in the
+ low and high interrupt registers (0⇒bit 0 low, 1⇒bit 1 low, 32⇒bit 0 high,
+ 33⇒bit 1 high, fast interrupts start at 64).
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ interrupt-controller@3000 {
+ compatible = "snps,dw-apb-ictl";
+ reg = <0x3000 0xc00>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ - |
+ interrupt-controller@10130000 {
+ compatible = "snps,dw-apb-ictl";
+ reg = <0x10130000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml b/Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml
index e1ffd55fa7bf..f6b8b1d92f79 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml
@@ -18,7 +18,9 @@ allOf:
properties:
compatible:
- const: sophgo,sg2042-msi
+ enum:
+ - sophgo,sg2042-msi
+ - sophgo,sg2044-msi
reg:
items:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,spear300-shirq.yaml b/Documentation/devicetree/bindings/interrupt-controller/st,spear300-shirq.yaml
new file mode 100644
index 000000000000..27d36173366a
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,spear300-shirq.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/st,spear300-shirq.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SPEAr3xx Shared IRQ controller
+
+maintainers:
+ - Viresh Kumar <vireshk@kernel.org>
+ - Shiraz Hashim <shiraz.linux.kernel@gmail.com>
+
+description: |
+ SPEAr3xx architecture includes shared/multiplexed irqs for certain set of
+ devices. The multiplexor provides a single interrupt to parent interrupt
+ controller (VIC) on behalf of a group of devices.
+
+ There can be multiple groups available on SPEAr3xx variants but not exceeding
+ 4. The number of devices in a group can differ, further they may share same
+ set of status/mask registers spanning across different bit masks. Also in some
+ cases the group may not have enable or other registers. This makes software
+ little complex.
+
+ A single node in the device tree is used to describe the shared interrupt
+ multiplexer (one node for all groups). A group in the interrupt controller
+ shares config/control registers with other groups. For example, a 32-bit
+ interrupt enable/disable config register can accommodate up to 4 interrupt
+ groups.
+
+properties:
+ compatible:
+ enum:
+ - st,spear300-shirq
+ - st,spear310-shirq
+ - st,spear320-shirq
+
+ reg:
+ maxItems: 1
+
+ '#interrupt-cells':
+ const: 1
+
+ interrupt-controller: true
+
+ interrupts:
+ description: Interrupt specifier array for SHIRQ groups
+ minItems: 1
+ maxItems: 4
+
+required:
+ - compatible
+ - reg
+ - '#interrupt-cells'
+ - interrupt-controller
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@b3000000 {
+ compatible = "st,spear320-shirq";
+ reg = <0xb3000000 0x1000>;
+ interrupts = <28 29 30 1>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt b/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt
deleted file mode 100644
index a407c499b3cc..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-* SPEAr Shared IRQ layer (shirq)
-
-SPEAr3xx architecture includes shared/multiplexed irqs for certain set
-of devices. The multiplexor provides a single interrupt to parent
-interrupt controller (VIC) on behalf of a group of devices.
-
-There can be multiple groups available on SPEAr3xx variants but not
-exceeding 4. The number of devices in a group can differ, further they
-may share same set of status/mask registers spanning across different
-bit masks. Also in some cases the group may not have enable or other
-registers. This makes software little complex.
-
-A single node in the device tree is used to describe the shared
-interrupt multiplexor (one node for all groups). A group in the
-interrupt controller shares config/control registers with other groups.
-For example, a 32-bit interrupt enable/disable config register can
-accommodate up to 4 interrupt groups.
-
-Required properties:
- - compatible: should be, either of
- - "st,spear300-shirq"
- - "st,spear310-shirq"
- - "st,spear320-shirq"
- - interrupt-controller: Identifies the node as an interrupt controller.
- - #interrupt-cells: should be <1> which basically contains the offset
- (starting from 0) of interrupts for all the groups.
- - reg: Base address and size of shirq registers.
- - interrupts: The list of interrupts generated by the groups which are
- then connected to a parent interrupt controller. Each group is
- associated with one of the interrupts, hence number of interrupts (to
- parent) is equal to number of groups. The format of the interrupt
- specifier depends in the interrupt parent controller.
-
-Example:
-
-The following is an example from the SPEAr320 SoC dtsi file.
-
-shirq: interrupt-controller@b3000000 {
- compatible = "st,spear320-shirq";
- reg = <0xb3000000 0x1000>;
- interrupts = <28 29 30 1>;
- #interrupt-cells = <1>;
- interrupt-controller;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800-irqc.yaml b/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800-irqc.yaml
new file mode 100644
index 000000000000..f1a15d725cd6
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800-irqc.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/technologic,ts4800-irqc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TS-4800 FPGA Interrupt Controller
+
+maintainers:
+ - Damien Riegel <damien.riegel@savoirfairelinux.com>
+
+description:
+ TS-4800 FPGA has an internal interrupt controller. When one of the interrupts
+ is triggered, the SoC is notified, usually using a GPIO as parent interrupt
+ source.
+
+properties:
+ compatible:
+ const: technologic,ts4800-irqc
+
+ reg:
+ maxItems: 1
+
+ '#interrupt-cells':
+ const: 1
+
+ interrupt-controller: true
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@1000 {
+ compatible = "technologic,ts4800-irqc";
+ reg = <0x1000 0x80>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupts = <10>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800.txt b/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800.txt
deleted file mode 100644
index 341ae5909333..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-TS-4800 FPGA interrupt controller
-
-TS-4800 FPGA has an internal interrupt controller. When one of the
-interrupts is triggered, the SoC is notified, usually using a GPIO as
-parent interrupt source.
-
-Required properties:
-- compatible: should be "technologic,ts4800-irqc"
-- interrupt-controller: identifies the node as an interrupt controller
-- reg: physical base address of the controller and length of memory mapped
- region
-- #interrupt-cells: specifies the number of cells needed to encode an interrupt
- source, should be 1.
-- interrupts: specifies the interrupt line in the interrupt-parent controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-mswi.yaml b/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-mswi.yaml
index 065f2544b63b..d6fb08a54167 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-mswi.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-mswi.yaml
@@ -14,6 +14,7 @@ properties:
items:
- enum:
- sophgo,sg2042-aclint-mswi
+ - sophgo,sg2044-aclint-mswi
- const: thead,c900-aclint-mswi
reg:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,cp-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,cp-intc.txt
deleted file mode 100644
index 597e8a089fe4..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,cp-intc.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* TI Common Platform Interrupt Controller
-
-Common Platform Interrupt Controller (cp_intc) is used on
-OMAP-L1x SoCs and can support several configurable number
-of interrupts.
-
-Main node required properties:
-
-- compatible : should be:
- "ti,cp-intc"
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. The type shall be a <u32> and the value shall be 1.
-
- The cell contains the interrupt number in the range [0-128].
-- ti,intc-size: Number of interrupts handled by the interrupt controller.
-- reg: physical base address and size of the intc registers map.
-
-Example:
-
- intc: interrupt-controller@1 {
- compatible = "ti,cp-intc";
- interrupt-controller;
- #interrupt-cells = <1>;
- ti,intc-size = <101>;
- reg = <0xfffee000 0x2000>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,cp-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/ti,cp-intc.yaml
new file mode 100644
index 000000000000..77d018d20f9f
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,cp-intc.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/ti,cp-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI Common Platform Interrupt Controller
+
+maintainers:
+ - Bartosz Golaszewski <brgl@bgdev.pl>
+
+description:
+ Common Platform Interrupt Controller (cp_intc) is used on OMAP-L1x SoCs and
+ can support several configurable number of interrupts.
+
+properties:
+ compatible:
+ const: ti,cp-intc
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+ description: Encodes an interrupt number in the range 0–128.
+
+ ti,intc-size:
+ description: Number of interrupts handled by the interrupt controller.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+ - ti,intc-size
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@fffee000 {
+ compatible = "ti,cp-intc";
+ reg = <0xfffee000 0x2000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ ti,intc-size = <101>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.txt
deleted file mode 100644
index 5f94d7739d8d..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Keystone 2 IRQ controller IP
-
-On Keystone SOCs, DSP cores can send interrupts to ARM
-host using the IRQ controller IP. It provides 28 IRQ signals to ARM.
-The IRQ handler running on HOST OS can identify DSP signal source by
-analyzing SRCCx bits in IPCARx registers. This is one of the component
-used by the IPC mechanism used on Keystone SOCs.
-
-Required Properties:
-- compatible: should be "ti,keystone-irq"
-- ti,syscon-dev : phandle and offset pair. The phandle to syscon used to
- access device control registers and the offset inside
- device control registers range.
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode interrupt
- source should be 1.
-- interrupts: interrupt reference to primary interrupt controller
-
-Please refer to interrupts.txt in this directory for details of the common
-Interrupt Controllers bindings used by client devices.
-
-Example:
- kirq0: keystone_irq0@26202a0 {
- compatible = "ti,keystone-irq";
- ti,syscon-dev = <&devctrl 0x2a0>;
- interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
- interrupt-controller;
- #interrupt-cells = <1>;
- };
-
- dsp0: dsp0 {
- compatible = "linux,rproc-user";
- ...
- interrupt-parent = <&kirq0>;
- interrupts = <10 2>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.yaml b/Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.yaml
new file mode 100644
index 000000000000..27d448d1786a
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ti,keystone-irq.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Keystone 2 IRQ controller IP
+
+maintainers:
+ - Grygorii Strashko <grygorii.strashko@ti.com>
+
+description:
+ On Keystone SOCs, DSP cores can send interrupts to ARM host using the IRQ
+ controller IP. It provides 28 IRQ signals to ARM. The IRQ handler running on
+ HOST OS can identify DSP signal source by analyzing SRCCx bits in IPCARx
+ registers. This is one of the component used by the IPC mechanism used on
+ Keystone SOCs.
+
+properties:
+ compatible:
+ const: ti,keystone-irq
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+ interrupts:
+ maxItems: 1
+
+ ti,syscon-dev:
+ description: Phandle and offset to syscon device
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: Phandle to syscon device control registers
+ - description: Offset to control register
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+ - interrupts
+ - ti,syscon-dev
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ interrupt-controller@2a0 {
+ compatible = "ti,keystone-irq";
+ reg = <0x2a0 0x4>;
+ ti,syscon-dev = <&devctrl 0x2a0>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,omap-intc-irq.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,omap-intc-irq.txt
deleted file mode 100644
index 38ce5d037722..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,omap-intc-irq.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Omap2/3 intc controller
-
-On TI omap2 and 3 the intc interrupt controller can provide
-96 or 128 IRQ signals to the ARM host depending on the SoC.
-
-Required Properties:
-- compatible: should be one of
- "ti,omap2-intc"
- "ti,omap3-intc"
- "ti,dm814-intc"
- "ti,dm816-intc"
- "ti,am33xx-intc"
-
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode interrupt
- source, should be 1 for intc
-- interrupts: interrupt reference to primary interrupt controller
-
-Please refer to interrupts.txt in this directory for details of the common
-Interrupt Controllers bindings used by client devices.
-
-Example:
- intc: interrupt-controller@48200000 {
- compatible = "ti,omap3-intc";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0x48200000 0x1000>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,omap-intc-irq.yaml b/Documentation/devicetree/bindings/interrupt-controller/ti,omap-intc-irq.yaml
new file mode 100644
index 000000000000..cb118180621f
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,omap-intc-irq.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/ti,omap-intc-irq.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI OMAP Interrupt Controller
+
+maintainers:
+ - Tony Lindgren <tony@atomide.com>
+
+description:
+ On TI omap2 and 3 the intc interrupt controller can provide 96 or 128 IRQ
+ signals to the ARM host depending on the SoC.
+
+properties:
+ compatible:
+ enum:
+ - ti,omap2-intc
+ - ti,omap3-intc
+ - ti,dm814-intc
+ - ti,dm816-intc
+ - ti,am33xx-intc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@48200000 {
+ compatible = "ti,omap3-intc";
+ reg = <0x48200000 0x1000>;
+ interrupts = <32>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,omap2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,omap2-intc.txt
deleted file mode 100644
index f2583e6ec060..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,omap2-intc.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* OMAP Interrupt Controller
-
-OMAP2/3 are using a TI interrupt controller that can support several
-configurable number of interrupts.
-
-Main node required properties:
-
-- compatible : should be:
- "ti,omap2-intc"
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. The type shall be a <u32> and the value shall be 1.
-
- The cell contains the interrupt number in the range [0-128].
-- ti,intc-size: Number of interrupts handled by the interrupt controller.
-- reg: physical base address and size of the intc registers map.
-
-Example:
-
- intc: interrupt-controller@1 {
- compatible = "ti,omap2-intc";
- interrupt-controller;
- #interrupt-cells = <1>;
- ti,intc-size = <96>;
- reg = <0x48200000 0x1000>;
- };
-
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu.txt
deleted file mode 100644
index 422d6908f8b2..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-TI OMAP4 Wake-up Generator
-
-All TI OMAP4/5 (and their derivatives) an interrupt controller that
-routes interrupts to the GIC, and also serves as a wakeup source. It
-is also referred to as "WUGEN-MPU", hence the name of the binding.
-
-Required properties:
-
-- compatible : should contain at least "ti,omap4-wugen-mpu" or
- "ti,omap5-wugen-mpu"
-- reg : Specifies base physical address and size of the registers.
-- interrupt-controller : Identifies the node as an interrupt controller.
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. The value must be 3.
-
-Notes:
-
-- Because this HW ultimately routes interrupts to the GIC, the
- interrupt specifier must be that of the GIC.
-- Only SPIs can use the WUGEN as an interrupt parent. SGIs and PPIs
- are explicitly forbidden.
-
-Example:
-
- wakeupgen: interrupt-controller@48281000 {
- compatible = "ti,omap5-wugen-mpu", "ti,omap4-wugen-mpu";
- interrupt-controller;
- #interrupt-cells = <3>;
- reg = <0x48281000 0x1000>;
- interrupt-parent = <&gic>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu.yaml b/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu.yaml
new file mode 100644
index 000000000000..6e3d6e6d9e07
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/ti,omap4-wugen-mpu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI OMAP4 Wake-up Generator
+
+maintainers:
+ - Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+description: >
+ All TI OMAP4/5 (and their derivatives) are interrupt controllers that route
+ interrupts to the GIC, and also serve as wakeup sources. They are also
+ referred to as "WUGEN-MPU", hence the name of the binding.
+
+ Notes:
+
+ - Because this HW ultimately routes interrupts to the GIC, the interrupt
+ specifier must be that of the GIC.
+ - Only SPIs can use the WUGEN as an interrupt parent. SGIs and PPIs are
+ explicitly forbidden.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: ti,omap5-wugen-mpu
+ - const: ti,omap4-wugen-mpu
+ - const: ti,omap4-wugen-mpu
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 3
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@48281000 {
+ compatible = "ti,omap5-wugen-mpu", "ti,omap4-wugen-mpu";
+ reg = <0x48281000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/via,vt8500-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/via,vt8500-intc.txt
deleted file mode 100644
index 0a4ce1051b02..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/via,vt8500-intc.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-VIA/Wondermedia VT8500 Interrupt Controller
------------------------------------------------------
-
-Required properties:
-- compatible : "via,vt8500-intc"
-- reg : Should contain 1 register ranges(address and length)
-- #interrupt-cells : should be <1>
-
-Example:
-
- intc: interrupt-controller@d8140000 {
- compatible = "via,vt8500-intc";
- interrupt-controller;
- reg = <0xd8140000 0x10000>;
- #interrupt-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/via,vt8500-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/via,vt8500-intc.yaml
new file mode 100644
index 000000000000..bc14c74bf7d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/via,vt8500-intc.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/via,vt8500-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: VIA and WonderMedia SoCs Interrupt Controller
+
+description:
+ This is the interrupt controller used in single-core ARM SoCs made by
+ VIA and WonderMedia (up to and including WM8950). Each block handles
+ up to 64 interrupt sources (level or edge triggered) and can generate
+ up to 8 interrupts to its parent when used in a chained configuration.
+
+maintainers:
+ - Alexey Charkov <alchark@gmail.com>
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ const: via,vt8500-intc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description:
+ Interrupt number raised by the IRQ0 output of this controller
+ Only used if this controller is chained
+ - description:
+ Interrupt number raised by the IRQ1 output of this controller
+ Only used if this controller is chained
+ - description:
+ Interrupt number raised by the IRQ2 output of this controller
+ Only used if this controller is chained
+ - description:
+ Interrupt number raised by the IRQ3 output of this controller
+ Only used if this controller is chained
+ - description:
+ Interrupt number raised by the IRQ4 output of this controller
+ Only used if this controller is chained
+ - description:
+ Interrupt number raised by the IRQ5 output of this controller
+ Only used if this controller is chained
+ - description:
+ Interrupt number raised by the IRQ6 output of this controller
+ Only used if this controller is chained
+ - description:
+ Interrupt number raised by the IRQ7 output of this controller
+ Only used if this controller is chained
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@d8140000 {
+ compatible = "via,vt8500-intc";
+ interrupt-controller;
+ reg = <0xd8140000 0x10000>;
+ #interrupt-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml b/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
index ea6b0f5f24de..75750c64157c 100644
--- a/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
+++ b/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
@@ -74,6 +74,7 @@ properties:
- mediatek,mt2712-m4u # generation two
- mediatek,mt6779-m4u # generation two
- mediatek,mt6795-m4u # generation two
+ - mediatek,mt6893-iommu-mm # generation two
- mediatek,mt8167-m4u # generation two
- mediatek,mt8173-m4u # generation two
- mediatek,mt8183-m4u # generation two
@@ -131,6 +132,7 @@ properties:
dt-binding/memory/mt2712-larb-port.h for mt2712,
dt-binding/memory/mt6779-larb-port.h for mt6779,
dt-binding/memory/mt6795-larb-port.h for mt6795,
+ dt-binding/memory/mediatek,mt6893-memory-port.h for mt6893,
dt-binding/memory/mt8167-larb-port.h for mt8167,
dt-binding/memory/mt8173-larb-port.h for mt8173,
dt-binding/memory/mt8183-larb-port.h for mt8183,
@@ -157,6 +159,7 @@ allOf:
- mediatek,mt2701-m4u
- mediatek,mt2712-m4u
- mediatek,mt6795-m4u
+ - mediatek,mt6893-iommu-mm
- mediatek,mt8173-m4u
- mediatek,mt8186-iommu-mm
- mediatek,mt8188-iommu-vdo
@@ -173,6 +176,7 @@ allOf:
properties:
compatible:
enum:
+ - mediatek,mt6893-iommu-mm
- mediatek,mt8186-iommu-mm
- mediatek,mt8188-iommu-vdo
- mediatek,mt8188-iommu-vpp
diff --git a/Documentation/devicetree/bindings/leds/backlight/ti,lp8864.yaml b/Documentation/devicetree/bindings/leds/backlight/ti,lp8864.yaml
new file mode 100644
index 000000000000..d44232d462bd
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/backlight/ti,lp8864.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/backlight/ti,lp8864.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments - LP8864/LP8866 4/6-Channel LED Driver family
+
+maintainers:
+ - Andrew Davis <afd@ti.com>
+ - Alexander Sverdlin <alexander.sverdlin@siemens.com>
+
+description: |
+ LP8866-Q1, LP8866S-Q1, LP8864-Q1, LP8864S-Q1 are display LED-backlight drivers
+ with 4/6 channels. LED brightness can be controlled globally through the I2C
+ interface or PWM input.
+
+ For more product information please see the links below:
+ https://www.ti.com/product/LP8864-Q1
+ https://www.ti.com/product/LP8864S-Q1
+ https://www.ti.com/product/LP8866-Q1
+ https://www.ti.com/product/LP8866S-Q1
+
+properties:
+ compatible:
+ const: ti,lp8864
+
+ reg:
+ maxItems: 1
+ description: I2C slave address
+
+ enable-gpios:
+ maxItems: 1
+ description: GPIO pin to enable (active high) / disable the device
+
+ vled-supply:
+ description: LED supply
+
+ led:
+ type: object
+ $ref: common.yaml#
+ properties:
+ function: true
+ color: true
+ label: true
+ linux,default-trigger: true
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - led
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@3a {
+ compatible = "ti,lp8864";
+ reg = <0x3a>;
+ enable-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ vled-supply = <&vbatt>;
+
+ led {
+ function = LED_FUNCTION_BACKLIGHT;
+ color = <LED_COLOR_ID_WHITE>;
+ linux,default-trigger = "backlight";
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/leds/ti,tps61310.yaml b/Documentation/devicetree/bindings/leds/ti,tps61310.yaml
new file mode 100644
index 000000000000..118f9c8bfdf7
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/ti,tps61310.yaml
@@ -0,0 +1,120 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/ti,tps61310.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments TPS6131X flash LED driver
+
+maintainers:
+ - Matthias Fend <matthias.fend@emfend.at>
+
+description: |
+ The TPS61310/TPS61311 is a flash LED driver with I2C interface.
+ Its power stage is capable of supplying a maximum total current of roughly 1500mA.
+ The TPS6131x provides three constant-current sinks, capable of sinking
+ up to 2 x 400mA (LED1 and LED3) and 800mA (LED2) in flash mode.
+ In torch mode, each sink (LED1, LED2, LED3) supports currents up to 175mA.
+ Since the three current sinks share most of the control components such as
+ flash timer, control logic, safety timer and the operating mode, they cannot
+ be used completely independently of each other. Therefore, only one LED is
+ supported, but the current sinks can be combined accordingly.
+
+ The data sheet can be found at:
+ https://www.ti.com/lit/ds/symlink/tps61310.pdf
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - ti,tps61311
+ - const: ti,tps61310
+ - items:
+ - const: ti,tps61310
+
+ reg:
+ maxItems: 1
+
+ reset-gpios:
+ maxItems: 1
+ description: GPIO connected to NRESET pin
+
+ ti,valley-current-limit:
+ type: boolean
+ description:
+ Reduce the valley peak current limit from 1750mA to 1250mA (TPS61310) or
+ from 2480mA to 1800mA (TPS61311).
+
+ led:
+ type: object
+ $ref: common.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ led-sources:
+ minItems: 1
+ maxItems: 3
+ items:
+ enum: [1, 2, 3]
+
+ led-max-microamp:
+ oneOf:
+ - minimum: 50000
+ maximum: 350000
+ multipleOf: 50000
+ - minimum: 25000
+ maximum: 525000
+ multipleOf: 25000
+
+ flash-max-microamp:
+ oneOf:
+ - minimum: 50000
+ maximum: 800000
+ multipleOf: 50000
+ - minimum: 25000
+ maximum: 1500000
+ multipleOf: 25000
+
+ flash-max-timeout-us:
+ enum: [ 5300, 10700, 16000, 21300, 26600, 32000, 37300, 68200, 71500,
+ 102200, 136300, 170400, 204500, 340800, 579300, 852000 ]
+
+ required:
+ - led-sources
+ - led-max-microamp
+ - flash-max-microamp
+ - flash-max-timeout-us
+
+required:
+ - compatible
+ - reg
+ - led
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@33 {
+ compatible = "ti,tps61311", "ti,tps61310";
+ reg = <0x33>;
+
+ reset-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
+
+ led {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ led-sources = <1>, <2>, <3>;
+ led-max-microamp = <525000>;
+ flash-max-microamp = <1500000>;
+ flash-max-timeout-us = <852000>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
index a58a018f3f7b..ac726136f7e5 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
@@ -49,6 +49,7 @@ properties:
- qcom,qcs615-apss-shared
- qcom,sc7180-apss-shared
- qcom,sc8180x-apss-shared
+ - qcom,sm7150-apss-shared
- qcom,sm8150-apss-shared
- const: qcom,sdm845-apss-shared
- items:
@@ -72,6 +73,7 @@ properties:
description: phandles to the parent clocks of the clock driver
minItems: 2
maxItems: 3
+ deprecated: true
'#mbox-cells':
const: 1
@@ -82,6 +84,23 @@ properties:
clock-names:
minItems: 2
maxItems: 3
+ deprecated: true
+
+ clock-controller:
+ type: object
+ additionalProperties: false
+ properties:
+ clocks:
+ description: phandles to the parent clocks of the clock driver
+ minItems: 2
+ maxItems: 3
+
+ '#clock-cells':
+ enum: [0, 1]
+
+ clock-names:
+ minItems: 2
+ maxItems: 3
required:
- compatible
@@ -90,6 +109,76 @@ required:
additionalProperties: false
+# Clocks should be specified either on the parent node or on the child node
+oneOf:
+ - required:
+ - clock-controller
+ properties:
+ clocks: false
+ clock-names: false
+ '#clock-cells': false
+ - properties:
+ clock-controller: false
+
+$defs:
+ msm8916-apcs-clock-controller:
+ properties:
+ clocks:
+ items:
+ - description: primary pll parent of the clock driver
+ - description: auxiliary parent
+ clock-names:
+ items:
+ - const: pll
+ - const: aux
+ '#clock-cells':
+ const: 0
+
+ msm8939-apcs-clock-controller:
+ properties:
+ clocks:
+ items:
+ - description: primary pll parent of the clock driver
+ - description: auxiliary parent
+ - description: reference clock
+ clock-names:
+ items:
+ - const: pll
+ - const: aux
+ - const: ref
+ '#clock-cells':
+ const: 0
+
+ sdx55-apcs-clock-controller:
+ properties:
+ clocks:
+ items:
+ - description: reference clock
+ - description: primary pll parent of the clock driver
+ - description: auxiliary parent
+ clock-names:
+ items:
+ - const: ref
+ - const: pll
+ - const: aux
+ '#clock-cells':
+ const: 0
+
+ ipq6018-apcs-clock-controller:
+ properties:
+ clocks:
+ items:
+ - description: primary pll parent of the clock driver
+ - description: XO clock
+ - description: GCC GPLL0 clock source
+ clock-names:
+ items:
+ - const: pll
+ - const: xo
+ - const: gpll0
+ '#clock-cells':
+ const: 1
+
allOf:
- if:
properties:
@@ -98,15 +187,10 @@ allOf:
enum:
- qcom,msm8916-apcs-kpss-global
then:
+ $ref: "#/$defs/msm8916-apcs-clock-controller"
properties:
- clocks:
- items:
- - description: primary pll parent of the clock driver
- - description: auxiliary parent
- clock-names:
- items:
- - const: pll
- - const: aux
+ clock-controller:
+ $ref: "#/$defs/msm8916-apcs-clock-controller"
- if:
properties:
@@ -115,17 +199,10 @@ allOf:
enum:
- qcom,msm8939-apcs-kpss-global
then:
+ $ref: "#/$defs/msm8939-apcs-clock-controller"
properties:
- clocks:
- items:
- - description: primary pll parent of the clock driver
- - description: auxiliary parent
- - description: reference clock
- clock-names:
- items:
- - const: pll
- - const: aux
- - const: ref
+ clock-controller:
+ $ref: "#/$defs/msm8939-apcs-clock-controller"
- if:
properties:
@@ -134,17 +211,10 @@ allOf:
enum:
- qcom,sdx55-apcs-gcc
then:
+ $ref: "#/$defs/sdx55-apcs-clock-controller"
properties:
- clocks:
- items:
- - description: reference clock
- - description: primary pll parent of the clock driver
- - description: auxiliary parent
- clock-names:
- items:
- - const: ref
- - const: pll
- - const: aux
+ clock-controller:
+ $ref: "#/$defs/sdx55-apcs-clock-controller"
- if:
properties:
@@ -153,17 +223,10 @@ allOf:
enum:
- qcom,ipq6018-apcs-apps-global
then:
+ $ref: "#/$defs/ipq6018-apcs-clock-controller"
properties:
- clocks:
- items:
- - description: primary pll parent of the clock driver
- - description: XO clock
- - description: GCC GPLL0 clock source
- clock-names:
- items:
- - const: pll
- - const: xo
- - const: gpll0
+ clock-controller:
+ $ref: "#/$defs/ipq6018-apcs-clock-controller"
- if:
properties:
@@ -179,19 +242,7 @@ allOf:
properties:
clocks: false
clock-names: false
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,ipq6018-apcs-apps-global
- then:
- properties:
- '#clock-cells':
- const: 1
- else:
- properties:
+ clock-controller: false
'#clock-cells':
const: 0
@@ -219,6 +270,23 @@ examples:
- |
#define GCC_APSS_AHB_CLK_SRC 1
#define GCC_GPLL0_AO_OUT_MAIN 123
+ mailbox@b011000 {
+ compatible = "qcom,qcs404-apcs-apps-global",
+ "qcom,msm8916-apcs-kpss-global", "syscon";
+ reg = <0x0b011000 0x1000>;
+ #mbox-cells = <1>;
+
+ apcs_clk: clock-controller {
+ clocks = <&apcs_hfpll>, <&gcc GCC_GPLL0_AO_OUT_MAIN>;
+ clock-names = "pll", "aux";
+ #clock-cells = <0>;
+ };
+ };
+
+ # Example apcs with qcs404 (deprecated: use clock-controller subnode)
+ - |
+ #define GCC_APSS_AHB_CLK_SRC 1
+ #define GCC_GPLL0_AO_OUT_MAIN 123
apcs: mailbox@b011000 {
compatible = "qcom,qcs404-apcs-apps-global",
"qcom,msm8916-apcs-kpss-global", "syscon";
diff --git a/Documentation/devicetree/bindings/mailbox/sophgo,cv1800b-mailbox.yaml b/Documentation/devicetree/bindings/mailbox/sophgo,cv1800b-mailbox.yaml
new file mode 100644
index 000000000000..24e126bd3a20
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/sophgo,cv1800b-mailbox.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/sophgo,cv1800b-mailbox.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo CV1800/SG2000 mailbox controller
+
+maintainers:
+ - Yuntao Dai <d1581209858@live.com>
+ - Junhui Liu <junhui.liu@pigmoral.tech>
+
+description:
+ Mailboxes integrated in Sophgo CV1800/SG2000 SoCs have 8 channels, each
+ shipping an 8-byte FIFO. Any processor can write to an arbitrary channel
+ and raise interrupts to receivers. Sending messages to itself is also
+ supported.
+
+properties:
+ compatible:
+ const: sophgo,cv1800b-mailbox
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ "#mbox-cells":
+ const: 2
+ description: |
+ <&phandle channel target>
+ phandle : Label name of mailbox controller
+ channel : 0-7, Channel index
+ target : 0-3, Target processor ID
+
+ Sophgo CV1800/SG2000 SoCs include the following processors, numbered as:
+ <0> Cortex-A53 (Only available on CV181X/SG200X)
+ <1> C906B
+ <2> C906L
+ <3> 8051
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#mbox-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ mailbox@1900000 {
+ compatible = "sophgo,cv1800b-mailbox";
+ reg = <0x01900000 0x1000>;
+ interrupts = <101 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/media/amlogic,c3-isp.yaml b/Documentation/devicetree/bindings/media/amlogic,c3-isp.yaml
new file mode 100644
index 000000000000..123bf462f098
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/amlogic,c3-isp.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/amlogic,c3-isp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amlogic C3 Image Signal Processing Unit
+
+maintainers:
+ - Keke Li <keke.li@amlogic.com>
+
+description:
+ Amlogic ISP is the RAW image processing module
+ and supports three channels image output.
+
+properties:
+ compatible:
+ enum:
+ - amlogic,c3-isp
+
+ reg:
+ maxItems: 1
+
+ reg-names:
+ items:
+ - const: isp
+
+ power-domains:
+ maxItems: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: vapb
+ - const: isp0
+
+ interrupts:
+ maxItems: 1
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: input port node.
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - power-domains
+ - clocks
+ - clock-names
+ - interrupts
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/amlogic,c3-peripherals-clkc.h>
+ #include <dt-bindings/power/amlogic,c3-pwrc.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ isp: isp@ff000000 {
+ compatible = "amlogic,c3-isp";
+ reg = <0x0 0xff000000 0x0 0xf000>;
+ reg-names = "isp";
+ power-domains = <&pwrc PWRC_C3_ISP_TOP_ID>;
+ clocks = <&clkc_periphs CLKID_VAPB>,
+ <&clkc_periphs CLKID_ISP0>;
+ clock-names = "vapb", "isp0";
+ assigned-clocks = <&clkc_periphs CLKID_VAPB>,
+ <&clkc_periphs CLKID_ISP0>;
+ assigned-clock-rates = <0>, <400000000>;
+ interrupts = <GIC_SPI 145 IRQ_TYPE_EDGE_RISING>;
+
+ port {
+ c3_isp_in: endpoint {
+ remote-endpoint = <&c3_adap_out>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/amlogic,c3-mipi-adapter.yaml b/Documentation/devicetree/bindings/media/amlogic,c3-mipi-adapter.yaml
new file mode 100644
index 000000000000..ba43bc6709a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/amlogic,c3-mipi-adapter.yaml
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/amlogic,c3-mipi-adapter.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amlogic C3 MIPI adapter receiver
+
+maintainers:
+ - Keke Li <keke.li@amlogic.com>
+
+description:
+ MIPI adapter is used to convert the MIPI CSI-2 data
+ into an ISP supported data format.
+
+properties:
+ compatible:
+ enum:
+ - amlogic,c3-mipi-adapter
+
+ reg:
+ maxItems: 3
+
+ reg-names:
+ items:
+ - const: top
+ - const: fd
+ - const: rd
+
+ power-domains:
+ maxItems: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: vapb
+ - const: isp0
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: input port node.
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: output port node.
+
+ required:
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - power-domains
+ - clocks
+ - clock-names
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/amlogic,c3-peripherals-clkc.h>
+ #include <dt-bindings/power/amlogic,c3-pwrc.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ adap: adap@ff010000 {
+ compatible = "amlogic,c3-mipi-adapter";
+ reg = <0x0 0xff010000 0x0 0x100>,
+ <0x0 0xff01b000 0x0 0x100>,
+ <0x0 0xff01d000 0x0 0x200>;
+ reg-names = "top", "fd", "rd";
+ power-domains = <&pwrc PWRC_C3_ISP_TOP_ID>;
+ clocks = <&clkc_periphs CLKID_VAPB>,
+ <&clkc_periphs CLKID_ISP0>;
+ clock-names = "vapb", "isp0";
+ assigned-clocks = <&clkc_periphs CLKID_VAPB>,
+ <&clkc_periphs CLKID_ISP0>;
+ assigned-clock-rates = <0>, <400000000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ c3_adap_in: endpoint {
+ remote-endpoint = <&c3_mipi_csi_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ c3_adap_out: endpoint {
+ remote-endpoint = <&c3_isp_in>;
+ };
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/amlogic,c3-mipi-csi2.yaml b/Documentation/devicetree/bindings/media/amlogic,c3-mipi-csi2.yaml
new file mode 100644
index 000000000000..b0129beab0c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/amlogic,c3-mipi-csi2.yaml
@@ -0,0 +1,127 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/amlogic,c3-mipi-csi2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amlogic C3 MIPI CSI-2 receiver
+
+maintainers:
+ - Keke Li <keke.li@amlogic.com>
+
+description:
+ MIPI CSI-2 receiver contains CSI-2 RX PHY and host controller.
+ It receives the MIPI data from the image sensor and sends MIPI data
+ to MIPI adapter.
+
+properties:
+ compatible:
+ enum:
+ - amlogic,c3-mipi-csi2
+
+ reg:
+ maxItems: 3
+
+ reg-names:
+ items:
+ - const: aphy
+ - const: dphy
+ - const: host
+
+ power-domains:
+ maxItems: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: vapb
+ - const: phy0
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: input port node, connected to sensor.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - data-lanes
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: output port node
+
+ required:
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - power-domains
+ - clocks
+ - clock-names
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/amlogic,c3-peripherals-clkc.h>
+ #include <dt-bindings/power/amlogic,c3-pwrc.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ csi: csi@ff018000 {
+ compatible = "amlogic,c3-mipi-csi2";
+ reg = <0x0 0xff018000 0x0 0x400>,
+ <0x0 0xff019000 0x0 0x300>,
+ <0x0 0xff01a000 0x0 0x100>;
+ reg-names = "aphy", "dphy", "host";
+ power-domains = <&pwrc PWRC_C3_MIPI_ISP_WRAP_ID>;
+ clocks = <&clkc_periphs CLKID_VAPB>,
+ <&clkc_periphs CLKID_CSI_PHY0>;
+ clock-names = "vapb", "phy0";
+ assigned-clocks = <&clkc_periphs CLKID_VAPB>,
+ <&clkc_periphs CLKID_CSI_PHY0>;
+ assigned-clock-rates = <0>, <200000000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ c3_mipi_csi_in: endpoint {
+ remote-endpoint = <&imx290_out>;
+ data-lanes = <1 2 3 4>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ c3_mipi_csi_out: endpoint {
+ remote-endpoint = <&c3_adap_in>;
+ };
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/cec/nvidia,tegra114-cec.yaml b/Documentation/devicetree/bindings/media/cec/nvidia,tegra114-cec.yaml
index a6b73498bc21..4b46aa755ccd 100644
--- a/Documentation/devicetree/bindings/media/cec/nvidia,tegra114-cec.yaml
+++ b/Documentation/devicetree/bindings/media/cec/nvidia,tegra114-cec.yaml
@@ -14,10 +14,16 @@ allOf:
properties:
compatible:
- enum:
- - nvidia,tegra114-cec
- - nvidia,tegra124-cec
- - nvidia,tegra210-cec
+ oneOf:
+ - enum:
+ - nvidia,tegra114-cec
+ - nvidia,tegra124-cec
+ - nvidia,tegra210-cec
+ - items:
+ - enum:
+ - nvidia,tegra186-cec
+ - nvidia,tegra194-cec
+ - const: nvidia,tegra210-cec
clocks:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/fsl,imx-capture-subsystem.yaml b/Documentation/devicetree/bindings/media/fsl,imx-capture-subsystem.yaml
new file mode 100644
index 000000000000..25e65a344a0a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/fsl,imx-capture-subsystem.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/fsl,imx-capture-subsystem.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX Media Video Device
+
+description:
+ This is the media controller node for video capture support. It is a
+ virtual device that lists the camera serial interface nodes that the
+ media device will control
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx-capture-subsystem
+
+ ports:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ Should contain a list of phandles pointing to camera
+ sensor interface ports of IPU devices.
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ capture-subsystem {
+ compatible = "fsl,imx-capture-subsystem";
+ ports = <&ipu1_csi0>, <&ipu1_csi1>;
+ };
diff --git a/Documentation/devicetree/bindings/media/fsl,imx6-mipi-csi2.yaml b/Documentation/devicetree/bindings/media/fsl,imx6-mipi-csi2.yaml
new file mode 100644
index 000000000000..65255f576f26
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/fsl,imx6-mipi-csi2.yaml
@@ -0,0 +1,143 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/fsl,imx6-mipi-csi2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MIPI CSI-2 Receiver core in the i.MX SoC
+
+description:
+ This is the device node for the MIPI CSI-2 Receiver core in the i.MX
+ SoC. This is a Synopsys Designware MIPI CSI-2 host controller core
+ combined with a D-PHY core mixed into the same register block. In
+ addition this device consists of an i.MX-specific "CSI2IPU gasket"
+ glue logic, also controlled from the same register block. The CSI2IPU
+ gasket demultiplexes the four virtual channel streams from the host
+ controller's 32-bit output image bus onto four 16-bit parallel busses
+ to the i.MX IPU CSIs.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx6-mipi-csi2
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: hsi_tx (the D-PHY clock)
+ - description: video_27m (D-PHY PLL reference clock)
+ - description: eim_podf;
+
+ clock-names:
+ items:
+ - const: dphy
+ - const: ref
+ - const: pix
+
+ interrupts:
+ items:
+ - description: CSI-2 ERR1 irq
+ - description: CSI-2 ERR2 irq
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port node, single endpoint describing the CSI-2 transmitter.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ const: 0
+
+ data-lanes:
+ minItems: 1
+ items:
+ - const: 1
+ - const: 2
+ - const: 3
+ - const: 4
+
+ required:
+ - data-lanes
+
+patternProperties:
+ '^port@[1-4]$':
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ ports 1 through 4 are output ports connecting with parallel bus sink
+ endpoint nodes and correspond to the four MIPI CSI-2 virtual channel
+ outputs.
+
+ properties:
+ endpoint@0:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ endpoint@1:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx6qdl-clock.h>
+
+ mipi@21dc000 {
+ compatible = "fsl,imx6-mipi-csi2";
+ reg = <0x021dc000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clks IMX6QDL_CLK_HSI_TX>,
+ <&clks IMX6QDL_CLK_VIDEO_27M>,
+ <&clks IMX6QDL_CLK_EIM_PODF>;
+ clock-names = "dphy", "ref", "pix";
+
+ port@0 {
+ reg = <0>;
+
+ endpoint {
+ remote-endpoint = <&ov5640_to_mipi_csi2>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&ipu1_csi0_mux_from_mipi_vc0>;
+ };
+
+ endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&ipu1_csi1_mux_from_mipi_vc0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/ad5820.txt b/Documentation/devicetree/bindings/media/i2c/ad5820.txt
deleted file mode 100644
index 5764cbedf9b7..000000000000
--- a/Documentation/devicetree/bindings/media/i2c/ad5820.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Analog Devices AD5820 autofocus coil
-
-Required Properties:
-
- - compatible: Must contain one of:
- - "adi,ad5820"
- - "adi,ad5821"
- - "adi,ad5823"
-
- - reg: I2C slave address
-
- - VANA-supply: supply of voltage for VANA pin
-
-Optional properties:
-
- - enable-gpios : GPIO spec for the XSHUTDOWN pin. The XSHUTDOWN signal is
-active low, a high level on the pin enables the device.
-
-Example:
-
- ad5820: coil@c {
- compatible = "adi,ad5820";
- reg = <0x0c>;
-
- VANA-supply = <&vaux4>;
- enable-gpios = <&msmgpio 26 GPIO_ACTIVE_HIGH>;
- };
-
diff --git a/Documentation/devicetree/bindings/media/i2c/adi,ad5820.yaml b/Documentation/devicetree/bindings/media/i2c/adi,ad5820.yaml
new file mode 100644
index 000000000000..0c8f24f692ca
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/adi,ad5820.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/adi,ad5820.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD5820 autofocus coil
+
+maintainers:
+ - Pavel Machek <pavel@ucw.cz>
+
+description:
+ The AD5820 is a current sink driver designed for precise control of
+ voice coil motors (VCMs) in camera autofocus systems.
+
+properties:
+ compatible:
+ enum:
+ - adi,ad5820
+ - adi,ad5821
+ - adi,ad5823
+
+ reg:
+ maxItems: 1
+
+ enable-gpios:
+ maxItems: 1
+ description:
+ GPIO spec for the XSHUTDOWN pin. The XSHUTDOWN signal is active low,
+ a high level on the pin enables the device.
+
+ VANA-supply:
+ description: supply of voltage for VANA pin
+
+required:
+ - compatible
+ - reg
+ - VANA-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ coil@c {
+ compatible = "adi,ad5820";
+ reg = <0x0c>;
+
+ enable-gpios = <&msmgpio 26 GPIO_ACTIVE_HIGH>;
+ VANA-supply = <&vaux4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/adp1653.txt b/Documentation/devicetree/bindings/media/i2c/adi,adp1653.txt
index 4cce0de40ee9..4cce0de40ee9 100644
--- a/Documentation/devicetree/bindings/media/i2c/adp1653.txt
+++ b/Documentation/devicetree/bindings/media/i2c/adi,adp1653.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7180.yaml b/Documentation/devicetree/bindings/media/i2c/adi,adv7180.yaml
index 9ee1483775f6..dee8ce7cb7ba 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv7180.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/adi,adv7180.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/media/i2c/adv7180.yaml#
+$id: http://devicetree.org/schemas/media/i2c/adi,adv7180.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analog Devices ADV7180 analog video decoder family
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7343.txt b/Documentation/devicetree/bindings/media/i2c/adi,adv7343.txt
index 5653bc2428b8..5653bc2428b8 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv7343.txt
+++ b/Documentation/devicetree/bindings/media/i2c/adi,adv7343.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/adv748x.yaml b/Documentation/devicetree/bindings/media/i2c/adi,adv748x.yaml
index d6353081402b..254987350321 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv748x.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/adi,adv748x.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/media/i2c/adv748x.yaml#
+$id: http://devicetree.org/schemas/media/i2c/adi,adv748x.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analog Devices ADV748X video decoder with HDMI receiver
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7604.yaml b/Documentation/devicetree/bindings/media/i2c/adi,adv7604.yaml
index 7589d377c686..6c403003cdda 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv7604.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/adi,adv7604.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/media/i2c/adv7604.yaml#
+$id: http://devicetree.org/schemas/media/i2c/adi,adv7604.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analog Devices ADV7604/10/11/12 video decoder with HDMI receiver
diff --git a/Documentation/devicetree/bindings/media/i2c/mt9v032.txt b/Documentation/devicetree/bindings/media/i2c/aptina,mt9v032.txt
index 100f0ae43269..100f0ae43269 100644
--- a/Documentation/devicetree/bindings/media/i2c/mt9v032.txt
+++ b/Documentation/devicetree/bindings/media/i2c/aptina,mt9v032.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/max2175.txt b/Documentation/devicetree/bindings/media/i2c/maxim,max2175.txt
index 02b4e9cd7b1b..02b4e9cd7b1b 100644
--- a/Documentation/devicetree/bindings/media/i2c/max2175.txt
+++ b/Documentation/devicetree/bindings/media/i2c/maxim,max2175.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/mt9m111.txt b/Documentation/devicetree/bindings/media/i2c/micron,mt9m111.txt
index d0bed6fa901a..d0bed6fa901a 100644
--- a/Documentation/devicetree/bindings/media/i2c/mt9m111.txt
+++ b/Documentation/devicetree/bindings/media/i2c/micron,mt9m111.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/tda1997x.txt b/Documentation/devicetree/bindings/media/i2c/nxp,tda1997x.txt
index e76167999d76..e76167999d76 100644
--- a/Documentation/devicetree/bindings/media/i2c/tda1997x.txt
+++ b/Documentation/devicetree/bindings/media/i2c/nxp,tda1997x.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/mt9m001.txt b/Documentation/devicetree/bindings/media/i2c/onnn,mt9m001.txt
index c920552b03ef..c920552b03ef 100644
--- a/Documentation/devicetree/bindings/media/i2c/mt9m001.txt
+++ b/Documentation/devicetree/bindings/media/i2c/onnn,mt9m001.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov02e10.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov02e10.yaml
new file mode 100644
index 000000000000..03d476bcf805
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov02e10.yaml
@@ -0,0 +1,152 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (c) 2025 Linaro Ltd.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/ovti,ov02e10.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Omnivision OV02E10 CMOS Sensor
+
+maintainers:
+ - Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+
+description: |
+ The Omnivision OV02E10 and OV02C10 sensors are 2 megapixel, CMOS image sensors which support:
+ - Automatic black level calibration (ABLC)
+ - Programmable controls for frame rate, mirror and flip, binning, cropping
+ and windowing
+ - OVO2C10
+ - 10 bit RAW Bayer 1920x1080 60 fps 2-lane @ 800 Mbps/lane
+ - 10 bit RAW Bayer 1920x1080 60 fps 1-lane @ 1500 Mbps/lane
+ - 10 bit RAW Bayer 1280x720 60 fps cropped 1-lane @ 960 Mbps/lane
+ - 10 bit RGB/BW 640x480 60 fps bin2 or skip2 1-lane @ 800 Mbps/lane
+ - 10 bit RGB/BW 480x270 60 fps bin4 or skip4 1-lane @ 800 Mbps/lane
+ - OV02E10
+ - 10 bit RAW Bayer 1920x1088 60 fps 2-lane @ 720 Mbps/lane
+ - 10 bit RAW Bayer 1280x1080 60 fps 2-lane @ 720 Mbps/lane
+ - 10 bit Quad Bayer 960x540 60 fps 2-lane 360 Mbps/lane
+ - 8 bit Quad Bayer 480x270 1/3/5/10 fps sub2 288 Mbps/lane
+ - 8 bit Quad Bayer 232x132 1/3/5/10 fps sub4 144 Mbps/lane
+ - Dynamic defect pixel cancellation
+ - Standard SCCB command interface
+
+allOf:
+ - $ref: /schemas/media/video-interface-devices.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ovti,ov02c10
+ - ovti,ov02e10
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ avdd-supply:
+ description: Analogue circuit voltage supply.
+
+ dovdd-supply:
+ description: I/O circuit voltage supply.
+
+ dvdd-supply:
+ description: Digital circuit voltage supply.
+
+ reset-gpios:
+ description: Active low GPIO connected to XSHUTDOWN pad of the sensor.
+
+ port:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ additionalProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ additionalProperties: false
+
+ properties:
+ data-lanes:
+ items:
+ - const: 1
+ - const: 2
+ link-frequencies: true
+ remote-endpoint: true
+
+ required:
+ - data-lanes
+ - link-frequencies
+ - remote-endpoint
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - port
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ov02e10: camera@10 {
+ compatible = "ovti,ov02e10";
+ reg = <0x10>;
+
+ reset-gpios = <&tlmm 237 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_rgb_defaultt>;
+
+ clocks = <&ov02e10_clk>;
+
+ assigned-clocks = <&ov02e10_clk>;
+ assigned-clock-parents = <&ov02e10_clk_parent>;
+ assigned-clock-rates = <19200000>;
+
+ avdd-supply = <&vreg_l7b_2p8>;
+ dvdd-supply = <&vreg_l7b_1p8>;
+ dovdd-supply = <&vreg_l3m_1p8>;
+
+ port {
+ ov02e10_ep: endpoint {
+ remote-endpoint = <&csiphy4_ep>;
+ data-lanes = <1 2>;
+ link-frequencies = /bits/ 64 <400000000>;
+ };
+ };
+ };
+
+ ov02c10: camera@36 {
+ compatible = "ovti,ov02c10";
+ reg = <0x36>;
+
+ reset-gpios = <&tlmm 237 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_rgb_defaultt>;
+
+ clocks = <&ov02c10_clk>;
+
+ assigned-clocks = <&ov02c10_clk>;
+ assigned-clock-parents = <&ov02c10_clk_parent>;
+ assigned-clock-rates = <19200000>;
+
+ avdd-supply = <&vreg_l7b_2p8>;
+ dvdd-supply = <&vreg_l7b_1p8>;
+ dovdd-supply = <&vreg_l3m_1p8>;
+
+ port {
+ ov02c10_ep: endpoint {
+ remote-endpoint = <&csiphy4_ep>;
+ data-lanes = <1 2>;
+ link-frequencies = /bits/ 64 <400000000>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/i2c/ov2640.txt b/Documentation/devicetree/bindings/media/i2c/ovti,ov2640.txt
index 989ce6cb6ac3..989ce6cb6ac3 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov2640.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov2640.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/ov2659.txt b/Documentation/devicetree/bindings/media/i2c/ovti,ov2659.txt
index 92989a619f29..92989a619f29 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov2659.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov2659.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/ov7670.txt b/Documentation/devicetree/bindings/media/i2c/ovti,ov7670.txt
index 2c972a56f3cb..2c972a56f3cb 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov7670.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov7670.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/ov7740.txt b/Documentation/devicetree/bindings/media/i2c/ovti,ov7740.txt
index af781c3a5f0e..af781c3a5f0e 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov7740.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov7740.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/ov9650.txt b/Documentation/devicetree/bindings/media/i2c/ovti,ov9650.txt
index 506dfc52872a..506dfc52872a 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov9650.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov9650.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/imx219.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx219.yaml
index 07d088cf66e0..38c3759bcd9f 100644
--- a/Documentation/devicetree/bindings/media/i2c/imx219.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx219.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/media/i2c/imx219.yaml#
+$id: http://devicetree.org/schemas/media/i2c/sony,imx219.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Sony 1/4.0-Inch 8Mpixel CMOS Digital Image Sensor
@@ -16,6 +16,9 @@ description: |-
Image data is sent through MIPI CSI-2, which is configured as either 2 or
4 data lanes.
+allOf:
+ - $ref: /schemas/media/video-interface-devices.yaml#
+
properties:
compatible:
const: sony,imx219
@@ -79,7 +82,7 @@ required:
- VDDL-supply
- port
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml
index fa69bd21c8da..990acf89af8f 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml
@@ -136,7 +136,7 @@ examples:
port {
imx290_ep: endpoint {
data-lanes = <1 2 3 4>;
- link-frequencies = /bits/ 64 <445500000>;
+ link-frequencies = /bits/ 64 <222750000 148500000>;
remote-endpoint = <&csiphy0_ep>;
};
};
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml
index 34962c5c7006..7c11e871dca6 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Sony IMX415 CMOS Image Sensor
maintainers:
- - Michael Riesch <michael.riesch@wolfvision.net>
+ - Michael Riesch <michael.riesch@collabora.com>
description: |-
The Sony IMX415 is a diagonal 6.4 mm (Type 1/2.8) CMOS active pixel type
diff --git a/Documentation/devicetree/bindings/media/i2c/st,vd55g1.yaml b/Documentation/devicetree/bindings/media/i2c/st,vd55g1.yaml
new file mode 100644
index 000000000000..3c071e6fbea6
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/st,vd55g1.yaml
@@ -0,0 +1,133 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (c) 2025 STMicroelectronics SA.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/st,vd55g1.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics VD55G1 Global Shutter Image Sensor
+
+maintainers:
+ - Benjamin Mugnier <benjamin.mugnier@foss.st.com>
+ - Sylvain Petinot <sylvain.petinot@foss.st.com>
+
+description: |-
+ The STMicroelectronics VD55G1 is a global shutter image sensor with an active
+ array size of 804H x 704V. It is programmable through I2C interface. The I2C
+ address is fixed to 0x10.
+
+ Image data is sent through MIPI CSI-2, which is configured as only 1 data
+ lane. The sensor provides 4 GPIOS that can be used for external LED signal
+ (synchronized with sensor integration periods).
+
+allOf:
+ - $ref: /schemas/media/video-interface-devices.yaml#
+
+properties:
+ compatible:
+ const: st,vd55g1
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ vcore-supply:
+ description: Digital core power supply (1.15V)
+
+ vddio-supply:
+ description: Digital IO power supply (1.8V)
+
+ vana-supply:
+ description: Analog power supply (2.8V)
+
+ reset-gpios:
+ description: Sensor reset active low GPIO (XSHUTDOWN)
+ maxItems: 1
+
+ st,leds:
+ description:
+ List sensor's GPIOs used to control strobe light sources during exposure
+ time. The numbers identify the sensor pin on which the illumination
+ system is connected. GPIOs are active-high.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 4
+ items:
+ minimum: 0
+ maximum: 3
+
+ port:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ additionalProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ items:
+ - const: 1
+
+ link-frequencies:
+ maxItems: 1
+ items:
+ minimum: 125000000
+ maximum: 600000000
+
+ lane-polarities:
+ minItems: 1
+ maxItems: 2
+
+ required:
+ - data-lanes
+ - link-frequencies
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - vcore-supply
+ - vddio-supply
+ - vana-supply
+ - reset-gpios
+ - port
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ camera-sensor@10 {
+ compatible = "st,vd55g1";
+ reg = <0x10>;
+
+ clocks = <&camera_clk_12M>;
+
+ vcore-supply = <&camera_vcore_v1v15>;
+ vddio-supply = <&camera_vddio_v1v8>;
+ vana-supply = <&camera_vana_v2v8>;
+
+ reset-gpios = <&gpio 5 GPIO_ACTIVE_LOW>;
+ st,leds = <2>;
+
+ orientation = <2>;
+ rotation = <0>;
+
+ port {
+ endpoint {
+ data-lanes = <1>;
+ link-frequencies = /bits/ 64 <600000000>;
+ remote-endpoint = <&csiphy0_ep>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/st,vd56g3.yaml b/Documentation/devicetree/bindings/media/i2c/st,vd56g3.yaml
new file mode 100644
index 000000000000..c6673b8539db
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/st,vd56g3.yaml
@@ -0,0 +1,139 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (c) 2024 STMicroelectronics SA.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/st,vd56g3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics VD56G3 Global Shutter Image Sensor
+
+maintainers:
+ - Benjamin Mugnier <benjamin.mugnier@foss.st.com>
+ - Sylvain Petinot <sylvain.petinot@foss.st.com>
+
+description: |-
+ The STMicroelectronics VD56G3 is a 1.5 M pixel global shutter image sensor
+ with an active array size of 1124 x 1364 (portrait orientation). It is
+ programmable through I2C, the address is fixed to 0x10. The sensor output is
+ available via CSI-2, which is configured as either 1 or 2 data lanes. The
+ sensor provides 8 GPIOS that can be used for external LED signal
+ (synchronized with sensor integration periods)
+
+allOf:
+ - $ref: /schemas/media/video-interface-devices.yaml#
+
+properties:
+ compatible:
+ enum:
+ - st,vd56g3
+ - st,vd66gy
+ description:
+ Two variants are availables; VD56G3 is a monochrome sensor while VD66GY
+ is a colour variant.
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ vcore-supply:
+ description: Digital core power supply (1.15V)
+
+ vddio-supply:
+ description: Digital IO power supply (1.8V)
+
+ vana-supply:
+ description: Analog power supply (2.8V)
+
+ reset-gpios:
+ description: Sensor reset active low GPIO (XSHUTDOWN)
+ maxItems: 1
+
+ st,leds:
+ description:
+ List sensor's GPIOs used to control strobe light sources during exposure
+ time. The numbers identify the sensor pin on which the illumination system
+ is connected. GPIOs are active-high.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8
+ items:
+ minimum: 0
+ maximum: 7
+
+ port:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ additionalProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ maxItems: 2
+ items:
+ enum: [1, 2]
+
+ link-frequencies:
+ maxItems: 1
+ items:
+ enum: [402000000, 750000000]
+
+ lane-polarities:
+ minItems: 1
+ maxItems: 3
+ description: Any lane can be inverted or not.
+
+ required:
+ - data-lanes
+ - link-frequencies
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - vcore-supply
+ - vddio-supply
+ - vana-supply
+ - reset-gpios
+ - port
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ camera-sensor@10 {
+ compatible = "st,vd56g3";
+ reg = <0x10>;
+
+ clocks = <&camera_clk_12M>;
+
+ vcore-supply = <&camera_vcore_v1v15>;
+ vddio-supply = <&camera_vddio_v1v8>;
+ vana-supply = <&camera_vana_v2v8>;
+
+ reset-gpios = <&gpio 5 GPIO_ACTIVE_LOW>;
+ st,leds = <6>;
+
+ orientation = <2>;
+ rotation = <0>;
+
+ port {
+ endpoint {
+ data-lanes = <1 2>;
+ link-frequencies = /bits/ 64 <402000000>;
+ remote-endpoint = <&csiphy0_ep>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/ti,ds90ub953.yaml b/Documentation/devicetree/bindings/media/i2c/ti,ds90ub953.yaml
index 2030366994d1..2e129bf573b7 100644
--- a/Documentation/devicetree/bindings/media/i2c/ti,ds90ub953.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ti,ds90ub953.yaml
@@ -38,6 +38,13 @@ properties:
'#clock-cells':
const: 0
+ reg:
+ maxItems: 1
+ description:
+ The strap I2C address of the serializer. Can be used by the deserializer
+ to communicate over back-channel when the forward-channel is not yet
+ active.
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -81,51 +88,57 @@ examples:
- |
#include <dt-bindings/gpio/gpio.h>
- serializer {
- compatible = "ti,ds90ub953-q1";
+ link {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ serializer@18 {
+ compatible = "ti,ds90ub953-q1";
+ reg = <0x18>;
- gpio-controller;
- #gpio-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
- #clock-cells = <0>;
+ #clock-cells = <0>;
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
- port@0 {
- reg = <0>;
- ub953_in: endpoint {
- clock-lanes = <0>;
- data-lanes = <1 2 3 4>;
- remote-endpoint = <&sensor_out>;
+ port@0 {
+ reg = <0>;
+ ub953_in: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&sensor_out>;
+ };
};
- };
- port@1 {
- reg = <1>;
- endpoint {
- remote-endpoint = <&deser_fpd_in>;
+ port@1 {
+ reg = <1>;
+ endpoint {
+ remote-endpoint = <&deser_fpd_in>;
+ };
};
};
- };
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
- sensor@1a {
- compatible = "sony,imx274";
- reg = <0x1a>;
+ sensor@1a {
+ compatible = "sony,imx274";
+ reg = <0x1a>;
- reset-gpios = <&serializer 0 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&serializer 0 GPIO_ACTIVE_LOW>;
- clocks = <&serializer>;
- clock-names = "inck";
+ clocks = <&serializer>;
+ clock-names = "inck";
- port {
- sensor_out: endpoint {
- remote-endpoint = <&ub953_in>;
+ port {
+ sensor_out: endpoint {
+ remote-endpoint = <&ub953_in>;
+ };
};
};
};
diff --git a/Documentation/devicetree/bindings/media/i2c/ti,ds90ub960.yaml b/Documentation/devicetree/bindings/media/i2c/ti,ds90ub960.yaml
index 0b71e6f911a8..4dcbd2b039a5 100644
--- a/Documentation/devicetree/bindings/media/i2c/ti,ds90ub960.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ti,ds90ub960.yaml
@@ -68,6 +68,12 @@ properties:
description: The link number
maxItems: 1
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
i2c-alias:
$ref: /schemas/types.yaml#/definitions/uint32
description:
@@ -107,7 +113,8 @@ properties:
maximum: 14
description: Manual EQ level
- serializer:
+ patternProperties:
+ '^serializer(@[0-9a-f]+)*$':
type: object
description: FPD-Link Serializer node
@@ -115,7 +122,6 @@ properties:
- reg
- i2c-alias
- ti,rx-mode
- - serializer
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -309,13 +315,17 @@ examples:
/* Link 0 has DS90UB953 serializer and IMX274 sensor */
link@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
reg = <0>;
i2c-alias = <0x44>;
ti,rx-mode = <3>;
- serializer1: serializer {
+ serializer1: serializer@30 {
compatible = "ti,ds90ub953-q1";
+ reg = <0x30>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/media/i2c/ths8200.txt b/Documentation/devicetree/bindings/media/i2c/ti,ths8200.txt
index 285f6ae7dfa9..285f6ae7dfa9 100644
--- a/Documentation/devicetree/bindings/media/i2c/ths8200.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ti,ths8200.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/tvp514x.txt b/Documentation/devicetree/bindings/media/i2c/ti,tvp514x.txt
index 46752cc71f2e..46752cc71f2e 100644
--- a/Documentation/devicetree/bindings/media/i2c/tvp514x.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ti,tvp514x.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/tvp5150.txt b/Documentation/devicetree/bindings/media/i2c/ti,tvp5150.txt
index 94b908ace53c..94b908ace53c 100644
--- a/Documentation/devicetree/bindings/media/i2c/tvp5150.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ti,tvp5150.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/tvp7002.txt b/Documentation/devicetree/bindings/media/i2c/ti,tvp7002.txt
index 5f28b5d9abcc..5f28b5d9abcc 100644
--- a/Documentation/devicetree/bindings/media/i2c/tvp7002.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ti,tvp7002.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/tc358743.txt b/Documentation/devicetree/bindings/media/i2c/toshiba,tc358743.txt
index 59102edcf01e..59102edcf01e 100644
--- a/Documentation/devicetree/bindings/media/i2c/tc358743.txt
+++ b/Documentation/devicetree/bindings/media/i2c/toshiba,tc358743.txt
diff --git a/Documentation/devicetree/bindings/media/imx.txt b/Documentation/devicetree/bindings/media/imx.txt
deleted file mode 100644
index 77f4b0a7fd2b..000000000000
--- a/Documentation/devicetree/bindings/media/imx.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-Freescale i.MX Media Video Device
-=================================
-
-Video Media Controller node
----------------------------
-
-This is the media controller node for video capture support. It is a
-virtual device that lists the camera serial interface nodes that the
-media device will control.
-
-Required properties:
-- compatible : "fsl,imx-capture-subsystem";
-- ports : Should contain a list of phandles pointing to camera
- sensor interface ports of IPU devices
-
-example:
-
-capture-subsystem {
- compatible = "fsl,imx-capture-subsystem";
- ports = <&ipu1_csi0>, <&ipu1_csi1>;
-};
-
-
-mipi_csi2 node
---------------
-
-This is the device node for the MIPI CSI-2 Receiver core in the i.MX
-SoC. This is a Synopsys Designware MIPI CSI-2 host controller core
-combined with a D-PHY core mixed into the same register block. In
-addition this device consists of an i.MX-specific "CSI2IPU gasket"
-glue logic, also controlled from the same register block. The CSI2IPU
-gasket demultiplexes the four virtual channel streams from the host
-controller's 32-bit output image bus onto four 16-bit parallel busses
-to the i.MX IPU CSIs.
-
-Required properties:
-- compatible : "fsl,imx6-mipi-csi2";
-- reg : physical base address and length of the register set;
-- clocks : the MIPI CSI-2 receiver requires three clocks: hsi_tx
- (the D-PHY clock), video_27m (D-PHY PLL reference
- clock), and eim_podf;
-- clock-names : must contain "dphy", "ref", "pix";
-- port@* : five port nodes must exist, containing endpoints
- connecting to the source and sink devices according to
- of_graph bindings. The first port is an input port,
- connecting with a MIPI CSI-2 source, and ports 1
- through 4 are output ports connecting with parallel
- bus sink endpoint nodes and correspond to the four
- MIPI CSI-2 virtual channel outputs.
-
-Optional properties:
-- interrupts : must contain two level-triggered interrupts,
- in order: 100 and 101;
diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-fg.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-fg.yaml
index 03f31b009085..40fda59fa8a8 100644
--- a/Documentation/devicetree/bindings/media/mediatek,mdp3-fg.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-fg.yaml
@@ -16,8 +16,12 @@ description:
properties:
compatible:
- enum:
- - mediatek,mt8195-mdp3-fg
+ oneOf:
+ - enum:
+ - mediatek,mt8195-mdp3-fg
+ - items:
+ - const: mediatek,mt8188-mdp3-fg
+ - const: mediatek,mt8195-mdp3-fg
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-hdr.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-hdr.yaml
index d4609bba6578..d9f926c20220 100644
--- a/Documentation/devicetree/bindings/media/mediatek,mdp3-hdr.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-hdr.yaml
@@ -16,8 +16,12 @@ description:
properties:
compatible:
- enum:
- - mediatek,mt8195-mdp3-hdr
+ oneOf:
+ - enum:
+ - mediatek,mt8195-mdp3-hdr
+ - items:
+ - const: mediatek,mt8188-mdp3-hdr
+ - const: mediatek,mt8195-mdp3-hdr
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-rsz.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-rsz.yaml
index f5676bec4326..8124c39d73e9 100644
--- a/Documentation/devicetree/bindings/media/mediatek,mdp3-rsz.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-rsz.yaml
@@ -20,6 +20,7 @@ properties:
- mediatek,mt8183-mdp3-rsz
- items:
- enum:
+ - mediatek,mt8188-mdp3-rsz
- mediatek,mt8195-mdp3-rsz
- const: mediatek,mt8183-mdp3-rsz
diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-stitch.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-stitch.yaml
index d815bea29154..1d8e7e202c42 100644
--- a/Documentation/devicetree/bindings/media/mediatek,mdp3-stitch.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-stitch.yaml
@@ -16,8 +16,12 @@ description:
properties:
compatible:
- enum:
- - mediatek,mt8195-mdp3-stitch
+ oneOf:
+ - enum:
+ - mediatek,mt8195-mdp3-stitch
+ - items:
+ - const: mediatek,mt8188-mdp3-stitch
+ - const: mediatek,mt8195-mdp3-stitch
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-tcc.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-tcc.yaml
index 14ea556d4f82..6cff7c073ce4 100644
--- a/Documentation/devicetree/bindings/media/mediatek,mdp3-tcc.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-tcc.yaml
@@ -17,8 +17,12 @@ description:
properties:
compatible:
- enum:
- - mediatek,mt8195-mdp3-tcc
+ oneOf:
+ - enum:
+ - mediatek,mt8195-mdp3-tcc
+ - items:
+ - const: mediatek,mt8188-mdp3-tcc
+ - const: mediatek,mt8195-mdp3-tcc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-tdshp.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-tdshp.yaml
index 8ab7f2d8e148..cdfa27324738 100644
--- a/Documentation/devicetree/bindings/media/mediatek,mdp3-tdshp.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-tdshp.yaml
@@ -16,8 +16,12 @@ description:
properties:
compatible:
- enum:
- - mediatek,mt8195-mdp3-tdshp
+ oneOf:
+ - enum:
+ - mediatek,mt8195-mdp3-tdshp
+ - items:
+ - const: mediatek,mt8188-mdp3-tdshp
+ - const: mediatek,mt8195-mdp3-tdshp
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml
index 53a679338402..b6269f4f9fd6 100644
--- a/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml
@@ -20,6 +20,7 @@ properties:
- mediatek,mt8183-mdp3-wrot
- items:
- enum:
+ - mediatek,mt8188-mdp3-wrot
- mediatek,mt8195-mdp3-wrot
- const: mediatek,mt8183-mdp3-wrot
diff --git a/Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml b/Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml
index 3469a43f00d4..7c8e0a905d89 100644
--- a/Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml
@@ -93,6 +93,10 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
@@ -112,6 +116,10 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
diff --git a/Documentation/devicetree/bindings/media/qcom,msm8953-camss.yaml b/Documentation/devicetree/bindings/media/qcom,msm8953-camss.yaml
index 8856fba385b1..6d776b0ca711 100644
--- a/Documentation/devicetree/bindings/media/qcom,msm8953-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,msm8953-camss.yaml
@@ -112,6 +112,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
@@ -131,6 +136,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
@@ -150,6 +160,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
diff --git a/Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml b/Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml
index 644646de338a..a2025952fe95 100644
--- a/Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml
@@ -115,6 +115,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
@@ -134,6 +139,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
@@ -153,6 +163,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
@@ -172,6 +187,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
diff --git a/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml b/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml
index 83c4a5d95f02..bfd8b1ad4731 100644
--- a/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml
@@ -18,7 +18,12 @@ allOf:
properties:
compatible:
- const: qcom,sc7180-venus
+ oneOf:
+ - items:
+ - enum:
+ - qcom,qcs615-venus
+ - const: qcom,sc7180-venus
+ - const: qcom,sc7180-venus
power-domains:
minItems: 2
diff --git a/Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml
index 9936f0132417..d195f1bfb23d 100644
--- a/Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml
@@ -143,6 +143,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- clock-lanes
- data-lanes
@@ -166,6 +171,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- clock-lanes
- data-lanes
@@ -189,6 +199,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- clock-lanes
- data-lanes
@@ -212,6 +227,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- clock-lanes
- data-lanes
diff --git a/Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml
index 68d8670557f5..6e6ad8390e44 100644
--- a/Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml
@@ -121,6 +121,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
@@ -140,6 +145,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
@@ -159,6 +169,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
@@ -178,6 +193,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
diff --git a/Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml
index 289494f561e5..82bf4689d330 100644
--- a/Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml
@@ -108,6 +108,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
@@ -127,6 +132,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
@@ -146,6 +156,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
@@ -165,6 +180,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- data-lanes
diff --git a/Documentation/devicetree/bindings/media/qcom,sm8250-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sm8250-camss.yaml
index a372d991e652..ebf68ff4ab96 100644
--- a/Documentation/devicetree/bindings/media/qcom,sm8250-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sm8250-camss.yaml
@@ -128,6 +128,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- clock-lanes
- data-lanes
@@ -151,6 +156,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- clock-lanes
- data-lanes
@@ -174,6 +184,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- clock-lanes
- data-lanes
@@ -197,6 +212,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- clock-lanes
- data-lanes
@@ -220,6 +240,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- clock-lanes
- data-lanes
@@ -243,6 +268,11 @@ properties:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- clock-lanes
- data-lanes
diff --git a/Documentation/devicetree/bindings/media/qcom,sm8550-iris.yaml b/Documentation/devicetree/bindings/media/qcom,sm8550-iris.yaml
index e424ea84c211..c79bf2101812 100644
--- a/Documentation/devicetree/bindings/media/qcom,sm8550-iris.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sm8550-iris.yaml
@@ -14,12 +14,17 @@ description:
The iris video processing unit is a video encode and decode accelerator
present on Qualcomm platforms.
-allOf:
- - $ref: qcom,venus-common.yaml#
-
properties:
compatible:
- const: qcom,sm8550-iris
+ oneOf:
+ - items:
+ - enum:
+ - qcom,sa8775p-iris
+ - const: qcom,sm8550-iris
+ - enum:
+ - qcom,qcs8300-iris
+ - qcom,sm8550-iris
+ - qcom,sm8650-iris
power-domains:
maxItems: 4
@@ -49,11 +54,15 @@ properties:
- const: video-mem
resets:
- maxItems: 1
+ minItems: 1
+ maxItems: 3
reset-names:
+ minItems: 1
items:
- const: bus
+ - const: xo
+ - const: core
iommus:
maxItems: 2
@@ -75,6 +84,26 @@ required:
- iommus
- dma-coherent
+allOf:
+ - $ref: qcom,venus-common.yaml#
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,sm8650-iris
+ then:
+ properties:
+ resets:
+ minItems: 3
+ reset-names:
+ minItems: 3
+ else:
+ properties:
+ resets:
+ maxItems: 1
+ reset-names:
+ maxItems: 1
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/media/qcom,x1e80100-camss.yaml b/Documentation/devicetree/bindings/media/qcom,x1e80100-camss.yaml
new file mode 100644
index 000000000000..113565cf2a99
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/qcom,x1e80100-camss.yaml
@@ -0,0 +1,367 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/qcom,x1e80100-camss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm X1E80100 Camera Subsystem (CAMSS)
+
+maintainers:
+ - Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+
+description:
+ The CAMSS IP is a CSI decoder and ISP present on Qualcomm platforms.
+
+properties:
+ compatible:
+ const: qcom,x1e80100-camss
+
+ reg:
+ maxItems: 17
+
+ reg-names:
+ items:
+ - const: csid0
+ - const: csid1
+ - const: csid2
+ - const: csid_lite0
+ - const: csid_lite1
+ - const: csid_wrapper
+ - const: csiphy0
+ - const: csiphy1
+ - const: csiphy2
+ - const: csiphy4
+ - const: csitpg0
+ - const: csitpg1
+ - const: csitpg2
+ - const: vfe0
+ - const: vfe1
+ - const: vfe_lite0
+ - const: vfe_lite1
+
+ clocks:
+ maxItems: 29
+
+ clock-names:
+ items:
+ - const: camnoc_nrt_axi
+ - const: camnoc_rt_axi
+ - const: core_ahb
+ - const: cpas_ahb
+ - const: cpas_fast_ahb
+ - const: cpas_vfe0
+ - const: cpas_vfe1
+ - const: cpas_vfe_lite
+ - const: cphy_rx_clk_src
+ - const: csid
+ - const: csid_csiphy_rx
+ - const: csiphy0
+ - const: csiphy0_timer
+ - const: csiphy1
+ - const: csiphy1_timer
+ - const: csiphy2
+ - const: csiphy2_timer
+ - const: csiphy4
+ - const: csiphy4_timer
+ - const: gcc_axi_hf
+ - const: gcc_axi_sf
+ - const: vfe0
+ - const: vfe0_fast_ahb
+ - const: vfe1
+ - const: vfe1_fast_ahb
+ - const: vfe_lite
+ - const: vfe_lite_ahb
+ - const: vfe_lite_cphy_rx
+ - const: vfe_lite_csid
+
+ interrupts:
+ maxItems: 13
+
+ interrupt-names:
+ items:
+ - const: csid0
+ - const: csid1
+ - const: csid2
+ - const: csid_lite0
+ - const: csid_lite1
+ - const: csiphy0
+ - const: csiphy1
+ - const: csiphy2
+ - const: csiphy4
+ - const: vfe0
+ - const: vfe1
+ - const: vfe_lite0
+ - const: vfe_lite1
+
+ interconnects:
+ maxItems: 4
+
+ interconnect-names:
+ items:
+ - const: ahb
+ - const: hf_mnoc
+ - const: sf_mnoc
+ - const: sf_icp_mnoc
+
+ iommus:
+ maxItems: 8
+
+ power-domains:
+ items:
+ - description: IFE0 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: IFE1 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: Titan Top GDSC - Titan ISP Block, Global Distributed Switch Controller.
+
+ power-domain-names:
+ items:
+ - const: ife0
+ - const: ife1
+ - const: top
+
+ vdd-csiphy-0p8-supply:
+ description:
+ Phandle to a 0.8V regulator supply to a PHY.
+
+ vdd-csiphy-1p2-supply:
+ description:
+ Phandle to 1.8V regulator supply to a PHY.
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ description:
+ CSI input ports.
+
+ patternProperties:
+ "^port@[0-3]+$":
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+
+ description:
+ Input port for receiving CSI data from a CSIPHY.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - interconnects
+ - interconnect-names
+ - iommus
+ - power-domains
+ - power-domain-names
+ - vdd-csiphy-0p8-supply
+ - vdd-csiphy-1p2-supply
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/qcom,x1e80100-gcc.h>
+ #include <dt-bindings/clock/qcom,x1e80100-camcc.h>
+ #include <dt-bindings/interconnect/qcom,icc.h>
+ #include <dt-bindings/interconnect/qcom,x1e80100-rpmh.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ camss: isp@acb6000 {
+ compatible = "qcom,x1e80100-camss";
+
+ reg = <0 0x0acb7000 0 0x2000>,
+ <0 0x0acb9000 0 0x2000>,
+ <0 0x0acbb000 0 0x2000>,
+ <0 0x0acc6000 0 0x1000>,
+ <0 0x0acca000 0 0x1000>,
+ <0 0x0acb6000 0 0x1000>,
+ <0 0x0ace4000 0 0x1000>,
+ <0 0x0ace6000 0 0x1000>,
+ <0 0x0ace8000 0 0x1000>,
+ <0 0x0acec000 0 0x4000>,
+ <0 0x0acf6000 0 0x1000>,
+ <0 0x0acf7000 0 0x1000>,
+ <0 0x0acf8000 0 0x1000>,
+ <0 0x0ac62000 0 0x4000>,
+ <0 0x0ac71000 0 0x4000>,
+ <0 0x0acc7000 0 0x2000>,
+ <0 0x0accb000 0 0x2000>;
+
+ reg-names = "csid0",
+ "csid1",
+ "csid2",
+ "csid_lite0",
+ "csid_lite1",
+ "csid_wrapper",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "csiphy4",
+ "csitpg0",
+ "csitpg1",
+ "csitpg2",
+ "vfe0",
+ "vfe1",
+ "vfe_lite0",
+ "vfe_lite1";
+
+ clocks = <&camcc CAM_CC_CAMNOC_AXI_NRT_CLK>,
+ <&camcc CAM_CC_CAMNOC_AXI_RT_CLK>,
+ <&camcc CAM_CC_CORE_AHB_CLK>,
+ <&camcc CAM_CC_CPAS_AHB_CLK>,
+ <&camcc CAM_CC_CPAS_FAST_AHB_CLK>,
+ <&camcc CAM_CC_CPAS_IFE_0_CLK>,
+ <&camcc CAM_CC_CPAS_IFE_1_CLK>,
+ <&camcc CAM_CC_CPAS_IFE_LITE_CLK>,
+ <&camcc CAM_CC_CPHY_RX_CLK_SRC>,
+ <&camcc CAM_CC_CSID_CLK>,
+ <&camcc CAM_CC_CSID_CSIPHY_RX_CLK>,
+ <&camcc CAM_CC_CSIPHY0_CLK>,
+ <&camcc CAM_CC_CSI0PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY1_CLK>,
+ <&camcc CAM_CC_CSI1PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY2_CLK>,
+ <&camcc CAM_CC_CSI2PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY4_CLK>,
+ <&camcc CAM_CC_CSI4PHYTIMER_CLK>,
+ <&gcc GCC_CAMERA_HF_AXI_CLK>,
+ <&gcc GCC_CAMERA_SF_AXI_CLK>,
+ <&camcc CAM_CC_IFE_0_CLK>,
+ <&camcc CAM_CC_IFE_0_FAST_AHB_CLK>,
+ <&camcc CAM_CC_IFE_1_CLK>,
+ <&camcc CAM_CC_IFE_1_FAST_AHB_CLK>,
+ <&camcc CAM_CC_IFE_LITE_CLK>,
+ <&camcc CAM_CC_IFE_LITE_AHB_CLK>,
+ <&camcc CAM_CC_IFE_LITE_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_LITE_CSID_CLK>;
+
+ clock-names = "camnoc_nrt_axi",
+ "camnoc_rt_axi",
+ "core_ahb",
+ "cpas_ahb",
+ "cpas_fast_ahb",
+ "cpas_vfe0",
+ "cpas_vfe1",
+ "cpas_vfe_lite",
+ "cphy_rx_clk_src",
+ "csid",
+ "csid_csiphy_rx",
+ "csiphy0",
+ "csiphy0_timer",
+ "csiphy1",
+ "csiphy1_timer",
+ "csiphy2",
+ "csiphy2_timer",
+ "csiphy4",
+ "csiphy4_timer",
+ "gcc_axi_hf",
+ "gcc_axi_sf",
+ "vfe0",
+ "vfe0_fast_ahb",
+ "vfe1",
+ "vfe1_fast_ahb",
+ "vfe_lite",
+ "vfe_lite_ahb",
+ "vfe_lite_cphy_rx",
+ "vfe_lite_csid";
+
+ interrupts = <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 431 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 359 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 122 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 360 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-names = "csid0",
+ "csid1",
+ "csid2",
+ "csid_lite0",
+ "csid_lite1",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "csiphy4",
+ "vfe0",
+ "vfe1",
+ "vfe_lite0",
+ "vfe_lite1";
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_CAMERA_CFG QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mmss_noc MASTER_CAMNOC_HF QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&mmss_noc MASTER_CAMNOC_SF QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&mmss_noc MASTER_CAMNOC_ICP QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+
+ interconnect-names = "ahb",
+ "hf_mnoc",
+ "sf_mnoc",
+ "sf_icp_mnoc";
+
+ iommus = <&apps_smmu 0x800 0x60>,
+ <&apps_smmu 0x860 0x60>,
+ <&apps_smmu 0x1800 0x60>,
+ <&apps_smmu 0x1860 0x60>,
+ <&apps_smmu 0x18e0 0x00>,
+ <&apps_smmu 0x1980 0x20>,
+ <&apps_smmu 0x1900 0x00>,
+ <&apps_smmu 0x19a0 0x20>;
+
+ power-domains = <&camcc CAM_CC_IFE_0_GDSC>,
+ <&camcc CAM_CC_IFE_1_GDSC>,
+ <&camcc CAM_CC_TITAN_TOP_GDSC>;
+
+ power-domain-names = "ife0",
+ "ife1",
+ "top";
+
+ vdd-csiphy-0p8-supply = <&csiphy_0p8_supply>;
+ vdd-csiphy-1p2-supply = <&csiphy_1p2_supply>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ csiphy_ep0: endpoint {
+ clock-lanes = <7>;
+ data-lanes = <0 1>;
+ remote-endpoint = <&sensor_ep>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/renesas,fcp.yaml b/Documentation/devicetree/bindings/media/renesas,fcp.yaml
index f94dacd96278..7bf1266223e8 100644
--- a/Documentation/devicetree/bindings/media/renesas,fcp.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,fcp.yaml
@@ -30,14 +30,24 @@ properties:
- renesas,r9a07g043u-fcpvd # RZ/G2UL
- renesas,r9a07g044-fcpvd # RZ/G2{L,LC}
- renesas,r9a07g054-fcpvd # RZ/V2L
+ - renesas,r9a09g057-fcpvd # RZ/V2H(P)
- const: renesas,fcpv # Generic FCP for VSP fallback
reg:
maxItems: 1
- clocks: true
+ clocks:
+ minItems: 1
+ items:
+ - description: Main clock
+ - description: Register access clock
+ - description: Video clock
- clock-names: true
+ clock-names:
+ items:
+ - const: aclk
+ - const: pclk
+ - const: vclk
iommus:
maxItems: 1
@@ -66,18 +76,11 @@ allOf:
- renesas,r9a07g043u-fcpvd
- renesas,r9a07g044-fcpvd
- renesas,r9a07g054-fcpvd
+ - renesas,r9a09g057-fcpvd
then:
properties:
clocks:
- items:
- - description: Main clock
- - description: Register access clock
- - description: Video clock
- clock-names:
- items:
- - const: aclk
- - const: pclk
- - const: vclk
+ minItems: 3
required:
- clock-names
else:
diff --git a/Documentation/devicetree/bindings/media/renesas,isp.yaml b/Documentation/devicetree/bindings/media/renesas,isp.yaml
index c4de4555b753..d25e020f5e5e 100644
--- a/Documentation/devicetree/bindings/media/renesas,isp.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,isp.yaml
@@ -25,19 +25,55 @@ properties:
- renesas,r8a779h0-isp # V4M
- const: renesas,rcar-gen4-isp # Generic R-Car Gen4
reg:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
+
+ reg-names:
+ minItems: 1
+ items:
+ - const: cs
+ - const: core
interrupts:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: cs
+ - const: core
clocks:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: cs
+ - const: core
power-domains:
maxItems: 1
resets:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
+
+ reset-names:
+ minItems: 1
+ items:
+ - const: cs
+ - const: core
+
+ renesas,vspx:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ A phandle to the companion VSPX responsible for the Streaming Bridge
+ functionality. The Streaming Bridge is responsible for feeding image
+ and configuration data to the ISP when operating in memory-to-memory
+ mode.
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -103,10 +139,14 @@ properties:
required:
- compatible
- reg
+ - reg-names
- interrupts
+ - interrupt-names
- clocks
+ - clock-names
- power-domains
- resets
+ - reset-names
- ports
additionalProperties: false
@@ -119,11 +159,18 @@ examples:
isp1: isp@fed20000 {
compatible = "renesas,r8a779a0-isp", "renesas,rcar-gen4-isp";
- reg = <0xfed20000 0x10000>;
- interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 613>;
+ reg = <0xfed20000 0x10000>, <0xfee00000 0x100000>;
+ reg-names = "cs", "core";
+ interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cs", "core";
+ clocks = <&cpg CPG_MOD 613>, <&cpg CPG_MOD 17>;
+ clock-names = "cs", "core";
power-domains = <&sysc R8A779A0_PD_A3ISP01>;
- resets = <&cpg 613>;
+ resets = <&cpg 613>, <&cpg 17>;
+ reset-names = "cs", "core";
+
+ renesas,vspx = <&vspx1>;
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/media/renesas,rzg2l-cru.yaml b/Documentation/devicetree/bindings/media/renesas,rzg2l-cru.yaml
index bc1245127025..47e18690fa57 100644
--- a/Documentation/devicetree/bindings/media/renesas,rzg2l-cru.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,rzg2l-cru.yaml
@@ -17,24 +17,43 @@ description:
properties:
compatible:
- items:
- - enum:
- - renesas,r9a07g043-cru # RZ/G2UL
- - renesas,r9a07g044-cru # RZ/G2{L,LC}
- - renesas,r9a07g054-cru # RZ/V2L
- - const: renesas,rzg2l-cru
+ oneOf:
+ - items:
+ - enum:
+ - renesas,r9a07g043-cru # RZ/G2UL
+ - renesas,r9a07g044-cru # RZ/G2{L,LC}
+ - renesas,r9a07g054-cru # RZ/V2L
+ - const: renesas,rzg2l-cru
+ - const: renesas,r9a09g047-cru # RZ/G3E
reg:
maxItems: 1
interrupts:
- maxItems: 3
+ oneOf:
+ - items:
+ - description: CRU Interrupt for image_conv
+ - description: CRU Interrupt for image_conv_err
+ - description: CRU AXI master error interrupt
+ - items:
+ - description: CRU Interrupt for image_conv
+ - description: CRU AXI master error interrupt
+ - description: CRU Video Data AXI Master Address 0 Write End interrupt
+ - description: CRU Statistics data AXI master addr 0 write end interrupt
+ - description: CRU Video statistics data AXI master addr 0 write end interrupt
interrupt-names:
- items:
- - const: image_conv
- - const: image_conv_err
- - const: axi_mst_err
+ oneOf:
+ - items:
+ - const: image_conv
+ - const: image_conv_err
+ - const: axi_mst_err
+ - items:
+ - const: image_conv
+ - const: axi_mst_err
+ - const: vd_addr_wend
+ - const: sd_addr_wend
+ - const: vsd_addr_wend
clocks:
items:
@@ -109,6 +128,10 @@ allOf:
- renesas,r9a07g054-cru
then:
properties:
+ interrupts:
+ maxItems: 3
+ interrupt-names:
+ maxItems: 3
ports:
required:
- port@0
@@ -122,10 +145,30 @@ allOf:
- renesas,r9a07g043-cru
then:
properties:
+ interrupts:
+ maxItems: 3
+ interrupt-names:
+ maxItems: 3
ports:
properties:
port@0: false
+ required:
+ - port@1
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a09g047-cru
+ then:
+ properties:
+ interrupts:
+ minItems: 5
+ interrupt-names:
+ minItems: 5
+ ports:
+ properties:
+ port@0: false
required:
- port@1
diff --git a/Documentation/devicetree/bindings/media/renesas,rzg2l-csi2.yaml b/Documentation/devicetree/bindings/media/renesas,rzg2l-csi2.yaml
index 7faa12fecd5b..c5c511c9f0db 100644
--- a/Documentation/devicetree/bindings/media/renesas,rzg2l-csi2.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,rzg2l-csi2.yaml
@@ -17,12 +17,17 @@ description:
properties:
compatible:
- items:
- - enum:
- - renesas,r9a07g043-csi2 # RZ/G2UL
- - renesas,r9a07g044-csi2 # RZ/G2{L,LC}
- - renesas,r9a07g054-csi2 # RZ/V2L
- - const: renesas,rzg2l-csi2
+ oneOf:
+ - items:
+ - enum:
+ - renesas,r9a07g043-csi2 # RZ/G2UL
+ - renesas,r9a07g044-csi2 # RZ/G2{L,LC}
+ - renesas,r9a07g054-csi2 # RZ/V2L
+ - const: renesas,rzg2l-csi2
+ - items:
+ - const: renesas,r9a09g047-csi2 # RZ/G3E
+ - const: renesas,r9a09g057-csi2
+ - const: renesas,r9a09g057-csi2 # RZ/V2H(P)
reg:
maxItems: 1
@@ -31,16 +36,24 @@ properties:
maxItems: 1
clocks:
- items:
- - description: Internal clock for connecting CRU and MIPI
- - description: CRU Main clock
- - description: CRU Register access clock
+ oneOf:
+ - items:
+ - description: Internal clock for connecting CRU and MIPI
+ - description: CRU Main clock
+ - description: CRU Register access clock
+ - items:
+ - description: CRU Main clock
+ - description: CRU Register access clock
clock-names:
- items:
- - const: system
- - const: video
- - const: apb
+ oneOf:
+ - items:
+ - const: system
+ - const: video
+ - const: apb
+ - items:
+ - const: video
+ - const: apb
power-domains:
maxItems: 1
@@ -48,7 +61,7 @@ properties:
resets:
items:
- description: CRU_PRESETN reset terminal
- - description: CRU_CMN_RSTB reset terminal
+ - description: D-PHY reset (CRU_CMN_RSTB or CRU_n_S_RESETN)
reset-names:
items:
@@ -101,6 +114,25 @@ required:
- reset-names
- ports
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a09g057-csi2
+ then:
+ properties:
+ clocks:
+ maxItems: 2
+ clock-names:
+ maxItems: 2
+ else:
+ properties:
+ clocks:
+ minItems: 3
+ clock-names:
+ minItems: 3
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/media/renesas,vsp1.yaml b/Documentation/devicetree/bindings/media/renesas,vsp1.yaml
index 1a03e67462a4..fcf7219b1f40 100644
--- a/Documentation/devicetree/bindings/media/renesas,vsp1.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,vsp1.yaml
@@ -25,6 +25,7 @@ properties:
- enum:
- renesas,r9a07g043u-vsp2 # RZ/G2UL
- renesas,r9a07g054-vsp2 # RZ/V2L
+ - renesas,r9a09g057-vsp2 # RZ/V2H(P)
- const: renesas,r9a07g044-vsp2 # RZ/G2L fallback
reg:
@@ -33,8 +34,18 @@ properties:
interrupts:
maxItems: 1
- clocks: true
- clock-names: true
+ clocks:
+ minItems: 1
+ items:
+ - description: Main clock
+ - description: Register access clock
+ - description: Video clock
+
+ clock-names:
+ items:
+ - const: aclk
+ - const: pclk
+ - const: vclk
power-domains:
maxItems: 1
@@ -78,15 +89,7 @@ allOf:
then:
properties:
clocks:
- items:
- - description: Main clock
- - description: Register access clock
- - description: Video clock
- clock-names:
- items:
- - const: aclk
- - const: pclk
- - const: vclk
+ minItems: 3
required:
- clock-names
else:
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
index 2f36ac23604c..0762e0ff66ef 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
@@ -33,6 +33,7 @@ properties:
- mediatek,mt2712-smi-common
- mediatek,mt6779-smi-common
- mediatek,mt6795-smi-common
+ - mediatek,mt6893-smi-common
- mediatek,mt8167-smi-common
- mediatek,mt8173-smi-common
- mediatek,mt8183-smi-common
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
index 2381660b324c..2e7fac4b5094 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
@@ -21,6 +21,7 @@ properties:
- mediatek,mt2712-smi-larb
- mediatek,mt6779-smi-larb
- mediatek,mt6795-smi-larb
+ - mediatek,mt6893-smi-larb
- mediatek,mt8167-smi-larb
- mediatek,mt8173-smi-larb
- mediatek,mt8183-smi-larb
diff --git a/Documentation/devicetree/bindings/memory-controllers/renesas,rzg3e-xspi.yaml b/Documentation/devicetree/bindings/memory-controllers/renesas,rzg3e-xspi.yaml
new file mode 100644
index 000000000000..2bfe63ec62dc
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/renesas,rzg3e-xspi.yaml
@@ -0,0 +1,135 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/renesas,rzg3e-xspi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas Expanded Serial Peripheral Interface (xSPI)
+
+maintainers:
+ - Biju Das <biju.das.jz@bp.renesas.com>
+
+description: |
+ Renesas xSPI allows a SPI flash connected to the SoC to be accessed via
+ the memory-mapping or the manual command mode.
+
+ The flash chip itself should be represented by a subnode of the XSPI node.
+ The flash interface is selected based on the "compatible" property of this
+ subnode:
+ - "jedec,spi-nor";
+
+allOf:
+ - $ref: /schemas/spi/spi-controller.yaml#
+
+properties:
+ compatible:
+ const: renesas,r9a09g047-xspi # RZ/G3E
+
+ reg:
+ items:
+ - description: xSPI registers
+ - description: direct mapping area
+
+ reg-names:
+ items:
+ - const: regs
+ - const: dirmap
+
+ interrupts:
+ items:
+ - description: Interrupt pulse signal by factors excluding errors
+ - description: Interrupt pulse signal by error factors
+
+ interrupt-names:
+ items:
+ - const: pulse
+ - const: err_pulse
+
+ clocks:
+ items:
+ - description: AHB clock
+ - description: AXI clock
+ - description: SPI clock
+ - description: Double speed SPI clock
+
+ clock-names:
+ items:
+ - const: ahb
+ - const: axi
+ - const: spi
+ - const: spix2
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ items:
+ - description: Hardware reset
+ - description: AXI reset
+
+ reset-names:
+ items:
+ - const: hresetn
+ - const: aresetn
+
+ renesas,xspi-cs-addr-sys:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: |
+ Phandle to the system controller (sys) that allows to configure
+ xSPI CS0 and CS1 addresses.
+
+patternProperties:
+ "flash@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ contains:
+ const: jedec,spi-nor
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - interrupt-names
+ - clocks
+ - clock-names
+ - power-domains
+ - resets
+ - reset-names
+ - '#address-cells'
+ - '#size-cells'
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/renesas,r9a09g047-cpg.h>
+
+ spi@11030000 {
+ compatible = "renesas,r9a09g047-xspi";
+ reg = <0x11030000 0x10000>, <0x20000000 0x10000000>;
+ reg-names = "regs", "dirmap";
+ interrupts = <GIC_SPI 228 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 229 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "pulse", "err_pulse";
+ clocks = <&cpg CPG_MOD 0x9f>, <&cpg CPG_MOD 0xa0>,
+ <&cpg CPG_CORE 9>, <&cpg CPG_MOD 0xa1>;
+ clock-names = "ahb", "axi", "spi", "spix2";
+ power-domains = <&cpg>;
+ resets = <&cpg 0xa3>, <&cpg 0xa4>;
+ reset-names = "hresetn", "aresetn";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/memory-controllers/st,stm32mp25-omm.yaml b/Documentation/devicetree/bindings/memory-controllers/st,stm32mp25-omm.yaml
new file mode 100644
index 000000000000..344878db8818
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/st,stm32mp25-omm.yaml
@@ -0,0 +1,226 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/st,stm32mp25-omm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STM32 Octo Memory Manager (OMM)
+
+maintainers:
+ - Patrice Chotard <patrice.chotard@foss.st.com>
+
+description: |
+ The STM32 Octo Memory Manager is a low-level interface that enables an
+ efficient OCTOSPI pin assignment with a full I/O matrix (before alternate
+ function map) and multiplex of single/dual/quad/octal SPI interfaces over
+ the same bus. It Supports up to:
+ - Two single/dual/quad/octal SPI interfaces
+ - Two ports for pin assignment
+
+properties:
+ compatible:
+ const: st,stm32mp25-omm
+
+ "#address-cells":
+ const: 2
+
+ "#size-cells":
+ const: 1
+
+ ranges:
+ description: |
+ Reflects the memory layout per OSPI instance.
+ Format:
+ <chip-select> 0 <registers base address> <size>
+ minItems: 2
+ maxItems: 2
+
+ reg:
+ items:
+ - description: OMM registers
+ - description: OMM memory map area
+
+ reg-names:
+ items:
+ - const: regs
+ - const: memory_map
+
+ memory-region:
+ description:
+ Memory region shared between the 2 OCTOSPI instance.
+ One or two phandle to a node describing a memory mapped region
+ depending of child number.
+ minItems: 1
+ maxItems: 2
+
+ memory-region-names:
+ description:
+ Identify to which OSPI instance the memory region belongs to.
+ items:
+ enum: [ospi1, ospi2]
+ minItems: 1
+ maxItems: 2
+
+ clocks:
+ maxItems: 3
+
+ clock-names:
+ items:
+ - const: omm
+ - const: ospi1
+ - const: ospi2
+
+ resets:
+ maxItems: 3
+
+ reset-names:
+ items:
+ - const: omm
+ - const: ospi1
+ - const: ospi2
+
+ access-controllers:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ st,syscfg-amcr:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ The Address Mapping Control Register (AMCR) is used to split the 256MB
+ memory map area shared between the 2 OSPI instance. The Octo Memory
+ Manager sets the AMCR depending of the memory-region configuration.
+ The memory split bitmask description is:
+ - 000: OCTOSPI1 (256 Mbytes), OCTOSPI2 unmapped
+ - 001: OCTOSPI1 (192 Mbytes), OCTOSPI2 (64 Mbytes)
+ - 010: OCTOSPI1 (128 Mbytes), OCTOSPI2 (128 Mbytes)
+ - 011: OCTOSPI1 (64 Mbytes), OCTOSPI2 (192 Mbytes)
+ - 1xx: OCTOSPI1 unmapped, OCTOSPI2 (256 Mbytes)
+ items:
+ - items:
+ - description: phandle to syscfg
+ - description: register offset within syscfg
+ - description: register bitmask for memory split
+
+ st,omm-req2ack-ns:
+ description:
+ In multiplexed mode (MUXEN = 1), this field defines the time in
+ nanoseconds between two transactions.
+ default: 0
+
+ st,omm-cssel-ovr:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Configure the chip select selector override for the 2 OCTOSPIs.
+ - 0: OCTOSPI1 chip select send to NCS1 OCTOSPI2 chip select send to NCS1
+ - 1: OCTOSPI1 chip select send to NCS2 OCTOSPI2 chip select send to NCS1
+ - 2: OCTOSPI1 chip select send to NCS1 OCTOSPI2 chip select send to NCS2
+ - 3: OCTOSPI1 chip select send to NCS2 OCTOSPI2 chip select send to NCS2
+ minimum: 0
+ maximum: 3
+ default: 0
+
+ st,omm-mux:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Configure the muxing between the 2 OCTOSPIs busses and the 2 output ports.
+ - 0: direct mode
+ - 1: mux OCTOSPI1 and OCTOSPI2 to port 1
+ - 2: swapped mode
+ - 3: mux OCTOSPI1 and OCTOSPI2 to port 2
+ minimum: 0
+ maximum: 3
+ default: 0
+
+patternProperties:
+ ^spi@[0-9]:
+ type: object
+ $ref: /schemas/spi/st,stm32mp25-ospi.yaml#
+ description: Required spi child node
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - st,syscfg-amcr
+ - ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/st,stm32mp25-rcc.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/reset/st,stm32mp25-rcc.h>
+ ommanager@40500000 {
+ compatible = "st,stm32mp25-omm";
+ reg = <0x40500000 0x400>, <0x60000000 0x10000000>;
+ reg-names = "regs", "memory_map";
+ ranges = <0 0 0x40430000 0x400>,
+ <1 0 0x40440000 0x400>;
+ memory-region = <&mm_ospi1>, <&mm_ospi2>;
+ memory-region-names = "ospi1", "ospi2";
+ pinctrl-0 = <&ospi_port1_clk_pins_a
+ &ospi_port1_io03_pins_a
+ &ospi_port1_cs0_pins_a>;
+ pinctrl-1 = <&ospi_port1_clk_sleep_pins_a
+ &ospi_port1_io03_sleep_pins_a
+ &ospi_port1_cs0_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ clocks = <&rcc CK_BUS_OSPIIOM>,
+ <&scmi_clk CK_SCMI_OSPI1>,
+ <&scmi_clk CK_SCMI_OSPI2>;
+ clock-names = "omm", "ospi1", "ospi2";
+ resets = <&rcc OSPIIOM_R>,
+ <&scmi_reset RST_SCMI_OSPI1>,
+ <&scmi_reset RST_SCMI_OSPI2>;
+ reset-names = "omm", "ospi1", "ospi2";
+ access-controllers = <&rifsc 111>;
+ power-domains = <&CLUSTER_PD>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ st,syscfg-amcr = <&syscfg 0x2c00 0x7>;
+ st,omm-req2ack-ns = <0>;
+ st,omm-mux = <0>;
+ st,omm-cssel-ovr = <0>;
+
+ spi@0 {
+ compatible = "st,stm32mp25-ospi";
+ reg = <0 0 0x400>;
+ memory-region = <&mm_ospi1>;
+ interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&hpdma 2 0x62 0x00003121 0x0>,
+ <&hpdma 2 0x42 0x00003112 0x0>;
+ dma-names = "tx", "rx";
+ clocks = <&scmi_clk CK_SCMI_OSPI1>;
+ resets = <&scmi_reset RST_SCMI_OSPI1>, <&scmi_reset RST_SCMI_OSPI1DLL>;
+ access-controllers = <&rifsc 74>;
+ power-domains = <&CLUSTER_PD>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ st,syscfg-dlyb = <&syscfg 0x1000>;
+ };
+
+ spi@1 {
+ compatible = "st,stm32mp25-ospi";
+ reg = <1 0 0x400>;
+ memory-region = <&mm_ospi1>;
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&hpdma 3 0x62 0x00003121 0x0>,
+ <&hpdma 3 0x42 0x00003112 0x0>;
+ dma-names = "tx", "rx";
+ clocks = <&scmi_clk CK_KER_OSPI2>;
+ resets = <&scmi_reset RST_SCMI_OSPI2>, <&scmi_reset RST_SCMI_OSPI1DLL>;
+ access-controllers = <&rifsc 75>;
+ power-domains = <&CLUSTER_PD>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ st,syscfg-dlyb = <&syscfg 0x1000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/aspeed,ast2x00-scu.yaml b/Documentation/devicetree/bindings/mfd/aspeed,ast2x00-scu.yaml
index c800d5e53b65..5eccd10d95ce 100644
--- a/Documentation/devicetree/bindings/mfd/aspeed,ast2x00-scu.yaml
+++ b/Documentation/devicetree/bindings/mfd/aspeed,ast2x00-scu.yaml
@@ -66,8 +66,15 @@ patternProperties:
- compatible
'^interrupt-controller@[0-9a-f]+$':
- description: See Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2xxx-scu-ic.txt
type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ contains:
+ enum:
+ - aspeed,ast2500-scu-ic
+ - aspeed,ast2600-scu-ic0
+ - aspeed,ast2600-scu-ic1
'^silicon-id@[0-9a-f]+$':
description: Unique hardware silicon identifiers within the SoC
diff --git a/Documentation/devicetree/bindings/mfd/atmel,at91sam9260-gpbr.yaml b/Documentation/devicetree/bindings/mfd/atmel,at91sam9260-gpbr.yaml
index f805545aa62a..f6f47999c6c1 100644
--- a/Documentation/devicetree/bindings/mfd/atmel,at91sam9260-gpbr.yaml
+++ b/Documentation/devicetree/bindings/mfd/atmel,at91sam9260-gpbr.yaml
@@ -19,6 +19,7 @@ properties:
- items:
- enum:
- atmel,at91sam9260-gpbr
+ - microchip,sama7d65-gpbr
- const: syscon
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/mfd/brcm,bcm59056.txt b/Documentation/devicetree/bindings/mfd/brcm,bcm59056.txt
deleted file mode 100644
index be51a15e05f9..000000000000
--- a/Documentation/devicetree/bindings/mfd/brcm,bcm59056.txt
+++ /dev/null
@@ -1,39 +0,0 @@
--------------------------------
-BCM590xx Power Management Units
--------------------------------
-
-Required properties:
-- compatible: "brcm,bcm59056"
-- reg: I2C slave address
-- interrupts: interrupt for the PMU. Generic interrupt client node bindings
- are described in interrupt-controller/interrupts.txt
-
-------------------
-Voltage Regulators
-------------------
-
-Optional child nodes:
-- regulators: container node for regulators following the generic
- regulator binding in regulator/regulator.txt
-
- The valid regulator node names for BCM59056 are:
- rfldo, camldo1, camldo2, simldo1, simldo2, sdldo, sdxldo,
- mmcldo1, mmcldo2, audldo, micldo, usbldo, vibldo,
- csr, iosr1, iosr2, msr, sdsr1, sdsr2, vsr,
- gpldo1, gpldo2, gpldo3, gpldo4, gpldo5, gpldo6,
- vbus
-
-Example:
- pmu: bcm59056@8 {
- compatible = "brcm,bcm59056";
- reg = <0x08>;
- interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
- regulators {
- rfldo_reg: rfldo {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3300000>;
- };
-
- ...
- };
- };
diff --git a/Documentation/devicetree/bindings/mfd/brcm,bcm59056.yaml b/Documentation/devicetree/bindings/mfd/brcm,bcm59056.yaml
new file mode 100644
index 000000000000..b67d7a723fc2
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/brcm,bcm59056.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/brcm,bcm59056.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM590xx Power Management Units
+
+maintainers:
+ - Artur Weber <aweber.kernel@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - brcm,bcm59054
+ - brcm,bcm59056
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ regulators:
+ type: object
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,bcm59054
+ then:
+ properties:
+ regulators:
+ $ref: /schemas/regulator/brcm,bcm59054.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,bcm59056
+ then:
+ properties:
+ regulators:
+ $ref: /schemas/regulator/brcm,bcm59056.yaml#
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@8 {
+ compatible = "brcm,bcm59056";
+ reg = <0x08>;
+ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+
+ regulators {
+ rfldo {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/iqs62x.yaml b/Documentation/devicetree/bindings/mfd/iqs62x.yaml
index e79ce447a800..f242dd0e18fd 100644
--- a/Documentation/devicetree/bindings/mfd/iqs62x.yaml
+++ b/Documentation/devicetree/bindings/mfd/iqs62x.yaml
@@ -60,43 +60,34 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- iqs620a@44 {
- compatible = "azoteq,iqs620a";
- reg = <0x44>;
- interrupt-parent = <&gpio>;
- interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
-
- keys {
- compatible = "azoteq,iqs620a-keys";
-
- linux,keycodes = <KEY_SELECT>,
- <KEY_MENU>,
- <KEY_OK>,
- <KEY_MENU>;
-
- hall-switch-south {
- linux,code = <SW_LID>;
- azoteq,use-prox;
- };
- };
-
- iqs620a_pwm: pwm {
- compatible = "azoteq,iqs620a-pwm";
- #pwm-cells = <2>;
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ iqs620a@44 {
+ compatible = "azoteq,iqs620a";
+ reg = <0x44>;
+ interrupt-parent = <&gpio>;
+ interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
+
+ keys {
+ compatible = "azoteq,iqs620a-keys";
+
+ linux,keycodes = <KEY_SELECT>,
+ <KEY_MENU>,
+ <KEY_OK>,
+ <KEY_MENU>;
+
+ hall-switch-south {
+ linux,code = <SW_LID>;
+ azoteq,use-prox;
+ };
};
- };
-
- pwmleds {
- compatible = "pwm-leds";
- led-1 {
- pwms = <&iqs620a_pwm 0 1000000>;
- max-brightness = <255>;
+ iqs620a_pwm: pwm {
+ compatible = "azoteq,iqs620a-pwm";
+ #pwm-cells = <2>;
};
+ };
};
- |
@@ -105,37 +96,37 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- iqs620a@44 {
- compatible = "azoteq,iqs620a";
- reg = <0x44>;
- interrupt-parent = <&gpio>;
- interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
-
- firmware-name = "iqs620a_coil.bin";
-
- keys {
- compatible = "azoteq,iqs620a-keys";
-
- linux,keycodes = <0>,
- <0>,
- <0>,
- <0>,
- <0>,
- <0>,
- <KEY_MUTE>;
-
- hall-switch-north {
- linux,code = <SW_DOCK>;
- };
-
- hall-switch-south {
- linux,code = <SW_TABLET_MODE>;
- };
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ iqs620a@44 {
+ compatible = "azoteq,iqs620a";
+ reg = <0x44>;
+ interrupt-parent = <&gpio>;
+ interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
+
+ firmware-name = "iqs620a_coil.bin";
+
+ keys {
+ compatible = "azoteq,iqs620a-keys";
+
+ linux,keycodes = <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <KEY_MUTE>;
+
+ hall-switch-north {
+ linux,code = <SW_DOCK>;
+ };
+
+ hall-switch-south {
+ linux,code = <SW_TABLET_MODE>;
+ };
};
+ };
};
- |
@@ -144,36 +135,36 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- iqs624@44 {
- compatible = "azoteq,iqs624";
- reg = <0x44>;
- interrupt-parent = <&gpio>;
- interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
-
- keys {
- compatible = "azoteq,iqs624-keys";
-
- linux,keycodes = <BTN_0>,
- <0>,
- <BTN_1>,
- <0>,
- <0>,
- <0>,
- <0>,
- <0>,
- <0>,
- <0>,
- <0>,
- <0>,
- <0>,
- <0>,
- <KEY_VOLUMEUP>,
- <KEY_VOLUMEDOWN>;
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ iqs624@44 {
+ compatible = "azoteq,iqs624";
+ reg = <0x44>;
+ interrupt-parent = <&gpio>;
+ interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
+
+ keys {
+ compatible = "azoteq,iqs624-keys";
+
+ linux,keycodes = <BTN_0>,
+ <0>,
+ <BTN_1>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <KEY_VOLUMEUP>,
+ <KEY_VOLUMEDOWN>;
};
+ };
};
...
diff --git a/Documentation/devicetree/bindings/mfd/maxim,max77759.yaml b/Documentation/devicetree/bindings/mfd/maxim,max77759.yaml
new file mode 100644
index 000000000000..525de9ab3c2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/maxim,max77759.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/maxim,max77759.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Maxim Integrated MAX77759 PMIC for USB Type-C applications
+
+maintainers:
+ - André Draszik <andre.draszik@linaro.org>
+
+description: |
+ This is a part of device tree bindings for the MAX77759 companion Power
+ Management IC for USB Type-C applications.
+
+ The MAX77759 includes Battery Charger, Fuel Gauge, temperature sensors, USB
+ Type-C Port Controller (TCPC), NVMEM, and a GPIO expander.
+
+properties:
+ compatible:
+ const: maxim,max77759
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ reg:
+ maxItems: 1
+
+ gpio:
+ $ref: /schemas/gpio/maxim,max77759-gpio.yaml
+
+ nvmem-0:
+ $ref: /schemas/nvmem/maxim,max77759-nvmem.yaml
+
+required:
+ - compatible
+ - interrupts
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@66 {
+ compatible = "maxim,max77759";
+ reg = <0x66>;
+ interrupts-extended = <&gpa8 3 IRQ_TYPE_LEVEL_LOW>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ gpio {
+ compatible = "maxim,max77759-gpio";
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ nvmem-0 {
+ compatible = "maxim,max77759-nvmem";
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reboot-mode@0 {
+ reg = <0x0 0x4>;
+ };
+
+ boot-reason@4 {
+ reg = <0x4 0x4>;
+ };
+
+ shutdown-user-flag@8 {
+ reg = <0x8 0x1>;
+ };
+
+ rsoc@10 {
+ reg = <0xa 0x2>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/mediatek,mt8195-scpsys.yaml b/Documentation/devicetree/bindings/mfd/mediatek,mt8195-scpsys.yaml
index 768390b92682..0e1d43c96fb9 100644
--- a/Documentation/devicetree/bindings/mfd/mediatek,mt8195-scpsys.yaml
+++ b/Documentation/devicetree/bindings/mfd/mediatek,mt8195-scpsys.yaml
@@ -18,6 +18,7 @@ properties:
compatible:
items:
- enum:
+ - mediatek,mt6893-scpsys
- mediatek,mt8167-scpsys
- mediatek,mt8173-scpsys
- mediatek,mt8183-scpsys
diff --git a/Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml b/Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml
index 8bd1abfc44d9..b613da83dca4 100644
--- a/Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml
+++ b/Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml
@@ -76,12 +76,6 @@ additionalProperties: false
examples:
- |
- ocelot_clock: ocelot-clock {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <125000000>;
- };
-
spi {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/mfd/netronix,ntxec.yaml b/Documentation/devicetree/bindings/mfd/netronix,ntxec.yaml
index 59a630025f52..37fbb953ea12 100644
--- a/Documentation/devicetree/bindings/mfd/netronix,ntxec.yaml
+++ b/Documentation/devicetree/bindings/mfd/netronix,ntxec.yaml
@@ -48,29 +48,18 @@ examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- ec: embedded-controller@43 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_ntxec>;
-
- compatible = "netronix,ntxec";
- reg = <0x43>;
- system-power-controller;
- interrupt-parent = <&gpio4>;
- interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
- #pwm-cells = <2>;
- };
- };
-
- backlight {
- compatible = "pwm-backlight";
- pwms = <&ec 0 50000>;
- power-supply = <&backlight_regulator>;
- };
-
- backlight_regulator: regulator-dummy {
- compatible = "regulator-fixed";
- regulator-name = "backlight";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ec: embedded-controller@43 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ntxec>;
+
+ compatible = "netronix,ntxec";
+ reg = <0x43>;
+ system-power-controller;
+ interrupt-parent = <&gpio4>;
+ interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
+ #pwm-cells = <2>;
+ };
};
diff --git a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
index 7e7225aadae3..14ae3f00ef7e 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
+++ b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
@@ -41,6 +41,7 @@ properties:
- qcom,sm8450-tcsr
- qcom,tcsr-apq8064
- qcom,tcsr-apq8084
+ - qcom,tcsr-ipq5018
- qcom,tcsr-ipq5332
- qcom,tcsr-ipq5424
- qcom,tcsr-ipq6018
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd9571mwv.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd9571mwv.yaml
index 534cf03f36bb..47611c2a982c 100644
--- a/Documentation/devicetree/bindings/mfd/rohm,bd9571mwv.yaml
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd9571mwv.yaml
@@ -99,29 +99,29 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- pmic: pmic@30 {
- compatible = "rohm,bd9571mwv";
- reg = <0x30>;
- interrupt-parent = <&gpio2>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
- interrupt-controller;
- #interrupt-cells = <2>;
- gpio-controller;
- #gpio-cells = <2>;
- rohm,ddr-backup-power = <0xf>;
- rohm,rstbmode-pulse;
-
- regulators {
- dvfs: dvfs {
- regulator-name = "dvfs";
- regulator-min-microvolt = <750000>;
- regulator-max-microvolt = <1030000>;
- regulator-boot-on;
- regulator-always-on;
- };
- };
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic: pmic@30 {
+ compatible = "rohm,bd9571mwv";
+ reg = <0x30>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ rohm,ddr-backup-power = <0xf>;
+ rohm,rstbmode-pulse;
+
+ regulators {
+ dvfs: dvfs {
+ regulator-name = "dvfs";
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1030000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml
index efee3de0d9ad..0e06570483ae 100644
--- a/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml
@@ -4,19 +4,21 @@
$id: http://devicetree.org/schemas/mfd/rohm,bd96801-pmic.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: ROHM BD96801 Scalable Power Management Integrated Circuit
+title: ROHM BD96801/BD96805 Scalable Power Management Integrated Circuit
maintainers:
- Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
description:
- BD96801 is an automotive grade single-chip power management IC.
- It integrates 4 buck converters and 3 LDOs with safety features like
+ BD96801 and BD96805 are automotive grade, single-chip power management ICs.
+ They both integrate 4 buck converters and 3 LDOs with safety features like
over-/under voltage and over current detection and a watchdog.
properties:
compatible:
- const: rohm,bd96801
+ enum:
+ - rohm,bd96801
+ - rohm,bd96805
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd96802-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd96802-pmic.yaml
new file mode 100644
index 000000000000..6cbea796d12f
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd96802-pmic.yaml
@@ -0,0 +1,101 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/rohm,bd96802-pmic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BD96802 / BD96806 Scalable Power Management Integrated Circuit
+
+maintainers:
+ - Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+
+description: |
+ BD96802Qxx-C and BD96806 are automotive grade configurable Power Management
+ Integrated Circuits supporting Functional Safety features for application
+ processors, SoCs and FPGAs
+
+properties:
+ compatible:
+ enum:
+ - rohm,bd96802
+ - rohm,bd96806
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ The PMIC provides intb and errb IRQ lines. The errb IRQ line is used
+ for fatal IRQs which will cause the PMIC to shut down power outputs.
+ In many systems this will shut down the SoC contolling the PMIC and
+ connecting/handling the errb can be omitted. However, there are cases
+ where the SoC is not powered by the PMIC or has a short time backup
+ energy to handle shutdown of critical hardware. In that case it may be
+ useful to connect the errb and handle errb events.
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - enum: [intb, errb]
+ - const: errb
+
+ regulators:
+ $ref: ../regulator/rohm,bd96802-regulator.yaml
+ description:
+ List of child nodes that specify the regulators.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - regulators
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/leds/common.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pmic: pmic@62 {
+ reg = <0x62>;
+ compatible = "rohm,bd96802";
+ interrupt-parent = <&gpio1>;
+ interrupts = <29 IRQ_TYPE_LEVEL_LOW>, <6 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "intb", "errb";
+
+ regulators {
+ buck1 {
+ regulator-name = "buck1";
+ regulator-ramp-delay = <1250>;
+ /* 0.5V min INITIAL - 150 mV tune */
+ regulator-min-microvolt = <350000>;
+ /* 3.3V + 150mV tune */
+ regulator-max-microvolt = <3450000>;
+
+ /* These can be set only when PMIC is in STBY */
+ rohm,initial-voltage-microvolt = <500000>;
+ regulator-ov-error-microvolt = <230000>;
+ regulator-uv-error-microvolt = <230000>;
+ regulator-temp-protection-kelvin = <1>;
+ regulator-temp-warn-kelvin = <0>;
+ };
+ buck2 {
+ regulator-name = "buck2";
+ regulator-min-microvolt = <350000>;
+ regulator-max-microvolt = <3450000>;
+
+ rohm,initial-voltage-microvolt = <3000000>;
+ regulator-ov-error-microvolt = <18000>;
+ regulator-uv-error-microvolt = <18000>;
+ regulator-temp-protection-kelvin = <1>;
+ regulator-temp-warn-kelvin = <1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml b/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
index ac5d0c149796..d6b9e2914796 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
+++ b/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
@@ -20,6 +20,7 @@ description: |
properties:
compatible:
enum:
+ - samsung,s2mpg10-pmic
- samsung,s2mps11-pmic
- samsung,s2mps13-pmic
- samsung,s2mps14-pmic
@@ -58,11 +59,12 @@ properties:
reset (setting buck voltages to default values).
type: boolean
+ system-power-controller: true
+
wakeup-source: true
required:
- compatible
- - reg
- regulators
additionalProperties: false
@@ -72,6 +74,28 @@ allOf:
properties:
compatible:
contains:
+ const: samsung,s2mpg10-pmic
+ then:
+ properties:
+ reg: false
+ samsung,s2mps11-acokb-ground: false
+ samsung,s2mps11-wrstbi-ground: false
+
+ oneOf:
+ - required: [interrupts]
+ - required: [interrupts-extended]
+
+ else:
+ properties:
+ system-power-controller: false
+
+ required:
+ - reg
+
+ - if:
+ properties:
+ compatible:
+ contains:
const: samsung,s2mps11-pmic
then:
properties:
diff --git a/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml b/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
index d41308856408..4eabafb8079d 100644
--- a/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
+++ b/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
@@ -21,7 +21,12 @@ maintainers:
properties:
compatible:
- const: st,stm32-lptimer
+ oneOf:
+ - items:
+ - const: st,stm32mp25-lptimer
+ - const: st,stm32-lptimer
+ - items:
+ - const: st,stm32-lptimer
reg:
maxItems: 1
@@ -48,13 +53,21 @@ properties:
minItems: 1
maxItems: 2
+ power-domains:
+ maxItems: 1
+
pwm:
type: object
additionalProperties: false
properties:
compatible:
- const: st,stm32-pwm-lp
+ oneOf:
+ - items:
+ - const: st,stm32mp25-pwm-lp
+ - const: st,stm32-pwm-lp
+ - items:
+ - const: st,stm32-pwm-lp
"#pwm-cells":
const: 3
@@ -69,7 +82,12 @@ properties:
properties:
compatible:
- const: st,stm32-lptimer-counter
+ oneOf:
+ - items:
+ - const: st,stm32mp25-lptimer-counter
+ - const: st,stm32-lptimer-counter
+ - items:
+ - const: st,stm32-lptimer-counter
required:
- compatible
@@ -80,7 +98,12 @@ properties:
properties:
compatible:
- const: st,stm32-lptimer-timer
+ oneOf:
+ - items:
+ - const: st,stm32mp25-lptimer-timer
+ - const: st,stm32-lptimer-timer
+ - items:
+ - const: st,stm32-lptimer-timer
required:
- compatible
@@ -92,13 +115,18 @@ patternProperties:
properties:
compatible:
- const: st,stm32-lptimer-trigger
+ oneOf:
+ - items:
+ - const: st,stm32mp25-lptimer-trigger
+ - const: st,stm32-lptimer-trigger
+ - items:
+ - const: st,stm32-lptimer-trigger
reg:
description: Identify trigger hardware block.
items:
minimum: 0
- maximum: 2
+ maximum: 4
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mfd/syscon.yaml b/Documentation/devicetree/bindings/mfd/syscon.yaml
index c6bbb19c3e3e..27672adeb1fe 100644
--- a/Documentation/devicetree/bindings/mfd/syscon.yaml
+++ b/Documentation/devicetree/bindings/mfd/syscon.yaml
@@ -84,6 +84,7 @@ select:
- mediatek,mt2701-pctl-a-syscfg
- mediatek,mt2712-pctl-a-syscfg
- mediatek,mt6397-pctl-pmic-syscfg
+ - mediatek,mt7988-topmisc
- mediatek,mt8135-pctl-a-syscfg
- mediatek,mt8135-pctl-b-syscfg
- mediatek,mt8173-pctl-a-syscfg
@@ -98,6 +99,8 @@ select:
- mstar,msc313-pmsleep
- nuvoton,ma35d1-sys
- nuvoton,wpcm450-shm
+ - qcom,apq8064-mmss-sfpb
+ - qcom,apq8064-sps-sic
- rockchip,px30-qos
- rockchip,rk3036-qos
- rockchip,rk3066-qos
@@ -187,9 +190,11 @@ properties:
- mediatek,mt2701-pctl-a-syscfg
- mediatek,mt2712-pctl-a-syscfg
- mediatek,mt6397-pctl-pmic-syscfg
+ - mediatek,mt7988-topmisc
- mediatek,mt8135-pctl-a-syscfg
- mediatek,mt8135-pctl-b-syscfg
- mediatek,mt8173-pctl-a-syscfg
+ - mediatek,mt8365-infracfg-nao
- mediatek,mt8365-syscfg
- microchip,lan966x-cpu-syscon
- microchip,mpfs-sysreg-scb
@@ -201,6 +206,8 @@ properties:
- mstar,msc313-pmsleep
- nuvoton,ma35d1-sys
- nuvoton,wpcm450-shm
+ - qcom,apq8064-mmss-sfpb
+ - qcom,apq8064-sps-sic
- rockchip,px30-qos
- rockchip,rk3036-qos
- rockchip,rk3066-qos
diff --git a/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml b/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml
index 3f7661bdd202..45f015d63df1 100644
--- a/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml
+++ b/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml
@@ -316,106 +316,106 @@ additionalProperties: false
examples:
- |
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- pmic@30 {
- compatible = "x-powers,axp152";
- reg = <0x30>;
- interrupts = <0>;
- interrupt-controller;
- #interrupt-cells = <1>;
- };
- };
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@30 {
+ compatible = "x-powers,axp152";
+ reg = <0x30>;
+ interrupts = <0>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
- |
- #include <dt-bindings/interrupt-controller/irq.h>
-
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- pmic@34 {
- compatible = "x-powers,axp209";
- reg = <0x34>;
- interrupt-parent = <&nmi_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
- interrupt-controller;
- #interrupt-cells = <1>;
-
- ac_power_supply: ac-power {
- compatible = "x-powers,axp202-ac-power-supply";
- };
-
- axp_adc: adc {
- compatible = "x-powers,axp209-adc";
- #io-channel-cells = <1>;
- };
-
- axp_gpio: gpio {
- compatible = "x-powers,axp209-gpio";
- gpio-controller;
- #gpio-cells = <2>;
-
- gpio0-adc-pin {
- pins = "GPIO0";
- function = "adc";
- };
- };
-
- battery_power_supply: battery-power {
- compatible = "x-powers,axp209-battery-power-supply";
- };
-
- regulators {
- /* Default work frequency for buck regulators */
- x-powers,dcdc-freq = <1500>;
-
- reg_dcdc2: dcdc2 {
- regulator-always-on;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1450000>;
- regulator-name = "vdd-cpu";
- };
-
- reg_dcdc3: dcdc3 {
- regulator-always-on;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1400000>;
- regulator-name = "vdd-int-dll";
- };
-
- reg_ldo1: ldo1 {
- /* LDO1 is a fixed output regulator */
- regulator-always-on;
- regulator-min-microvolt = <1300000>;
- regulator-max-microvolt = <1300000>;
- regulator-name = "vdd-rtc";
- };
-
- reg_ldo2: ldo2 {
- regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "avcc";
- };
-
- reg_ldo3: ldo3 {
- regulator-name = "ldo3";
- };
-
- reg_ldo4: ldo4 {
- regulator-name = "ldo4";
- };
-
- reg_ldo5: ldo5 {
- regulator-name = "ldo5";
- };
- };
-
- usb_power_supply: usb-power {
- compatible = "x-powers,axp202-usb-power-supply";
- };
- };
- };
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@34 {
+ compatible = "x-powers,axp209";
+ reg = <0x34>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ ac_power_supply: ac-power {
+ compatible = "x-powers,axp202-ac-power-supply";
+ };
+
+ axp_adc: adc {
+ compatible = "x-powers,axp209-adc";
+ #io-channel-cells = <1>;
+ };
+
+ axp_gpio: gpio {
+ compatible = "x-powers,axp209-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio0-adc-pin {
+ pins = "GPIO0";
+ function = "adc";
+ };
+ };
+
+ battery_power_supply: battery-power {
+ compatible = "x-powers,axp209-battery-power-supply";
+ };
+
+ regulators {
+ /* Default work frequency for buck regulators */
+ x-powers,dcdc-freq = <1500>;
+
+ reg_dcdc2: dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-name = "vdd-cpu";
+ };
+
+ reg_dcdc3: dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-int-dll";
+ };
+
+ reg_ldo1: ldo1 {
+ /* LDO1 is a fixed output regulator */
+ regulator-always-on;
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-name = "vdd-rtc";
+ };
+
+ reg_ldo2: ldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "avcc";
+ };
+
+ reg_ldo3: ldo3 {
+ regulator-name = "ldo3";
+ };
+
+ reg_ldo4: ldo4 {
+ regulator-name = "ldo4";
+ };
+
+ reg_ldo5: ldo5 {
+ regulator-name = "ldo5";
+ };
+ };
+
+ usb_power_supply: usb-power {
+ compatible = "x-powers,axp202-usb-power-supply";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mips/cpus.yaml b/Documentation/devicetree/bindings/mips/cpus.yaml
index a85137add668..471373ad0cfb 100644
--- a/Documentation/devicetree/bindings/mips/cpus.yaml
+++ b/Documentation/devicetree/bindings/mips/cpus.yaml
@@ -50,6 +50,7 @@ properties:
device_type: true
allOf:
+ - $ref: /schemas/opp/opp-v1.yaml#
- if:
properties:
compatible:
@@ -68,7 +69,7 @@ required:
- compatible
- reg
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/misc/ti,fpc202.yaml b/Documentation/devicetree/bindings/misc/ti,fpc202.yaml
new file mode 100644
index 000000000000..a8cb10f2d0df
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/ti,fpc202.yaml
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/misc/ti,fpc202.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI FPC202 dual port controller with expanded IOs
+
+maintainers:
+ - Romain Gantois <romain.gantois@bootlin.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-atr.yaml#
+
+properties:
+ compatible:
+ const: ti,fpc202
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+ enable-gpios:
+ description:
+ Specifier for the GPIO connected to the EN pin.
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ "^i2c@[0-1]$":
+ $ref: /schemas/i2c/i2c-controller.yaml#
+ description: Downstream device ports 0 and 1
+
+ properties:
+ reg:
+ maxItems: 1
+ description:
+ Downstream port ID
+
+ required:
+ - "#address-cells"
+ - "#size-cells"
+ - reg
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - "#gpio-cells"
+ - "#address-cells"
+ - "#size-cells"
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c-atr@f {
+ compatible = "ti,fpc202";
+ reg = <0xf>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml b/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml
index 9075add020bf..8e79de97b242 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml
@@ -38,6 +38,15 @@ allOf:
- items:
- const: clk_out_sd1
- const: clk_in_sd1
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,rzn1-sdhci
+ then:
+ properties:
+ interrupts:
+ minItems: 2
properties:
compatible:
@@ -46,6 +55,10 @@ properties:
- const: arasan,sdhci-4.9a # generic Arasan SDHCI 4.9a PHY
- const: arasan,sdhci-5.1 # generic Arasan SDHCI 5.1 PHY
- items:
+ - const: renesas,r9a06g032-sdhci # Renesas RZ/N1D SoC
+ - const: renesas,rzn1-sdhci # Renesas RZ/N1 family
+ - const: arasan,sdhci-8.9a
+ - items:
- const: rockchip,rk3399-sdhci-5.1 # rk3399 eMMC PHY
- const: arasan,sdhci-5.1
description:
@@ -109,7 +122,14 @@ properties:
- const: gate
interrupts:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: int
+ - const: wakeup
phys:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mmc/fsl,esdhc.yaml b/Documentation/devicetree/bindings/mmc/fsl,esdhc.yaml
index b86ffb53b18b..62087cf920df 100644
--- a/Documentation/devicetree/bindings/mmc/fsl,esdhc.yaml
+++ b/Documentation/devicetree/bindings/mmc/fsl,esdhc.yaml
@@ -24,6 +24,7 @@ properties:
- fsl,t1040-esdhc
- fsl,t4240-esdhc
- fsl,ls1012a-esdhc
+ - fsl,ls1021a-esdhc
- fsl,ls1028a-esdhc
- fsl,ls1088a-esdhc
- fsl,ls1043a-esdhc
diff --git a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml
index 3f48d8292d5b..ee2ddef36369 100644
--- a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml
@@ -52,9 +52,14 @@ properties:
- const: core
- const: axi
+ dma-coherent: true
+
interrupts:
maxItems: 1
+ iommus:
+ maxItems: 1
+
marvell,pad-type:
$ref: /schemas/types.yaml#/definitions/string
enum:
@@ -142,7 +147,7 @@ properties:
This property provides the re-tuning counter.
allOf:
- - $ref: mmc-controller.yaml#
+ - $ref: sdhci-common.yaml#
- if:
properties:
compatible:
@@ -164,26 +169,6 @@ allOf:
marvell,pad-type: false
- - if:
- properties:
- compatible:
- contains:
- enum:
- - marvell,armada-cp110-sdhci
- - marvell,armada-ap807-sdhci
- - marvell,armada-ap806-sdhci
-
- then:
- properties:
- clocks:
- minItems: 2
-
- clock-names:
- items:
- - const: core
- - const: axi
-
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt b/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt
deleted file mode 100644
index f064528effed..000000000000
--- a/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-* Microchip PIC32 SDHCI Controller
-
-This file documents differences between the core properties in mmc.txt
-and the properties used by the sdhci-pic32 driver.
-
-Required properties:
-- compatible: Should be "microchip,pic32mzda-sdhci"
-- interrupts: Should contain interrupt
-- clock-names: Should be "base_clk", "sys_clk".
- See: Documentation/devicetree/bindings/resource-names.txt
-- clocks: Phandle to the clock.
- See: Documentation/devicetree/bindings/clock/clock-bindings.txt
-- pinctrl-names: A pinctrl state names "default" must be defined.
-- pinctrl-0: Phandle referencing pin configuration of the SDHCI controller.
- See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
-
-Example:
-
- sdhci@1f8ec000 {
- compatible = "microchip,pic32mzda-sdhci";
- reg = <0x1f8ec000 0x100>;
- interrupts = <191 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rootclk REF4CLK>, <&rootclk PB5CLK>;
- clock-names = "base_clk", "sys_clk";
- bus-width = <4>;
- cap-sd-highspeed;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_sdhc1>;
- };
diff --git a/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.yaml b/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.yaml
new file mode 100644
index 000000000000..ca0ca7df9ee9
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/microchip,sdhci-pic32.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip PIC32 SDHI Controller
+
+description:
+ The Microchip PIC32 family of microcontrollers (MCUs) includes models with
+ Secure Digital Host Controller Interface (SDHCI) controllers, allowing them
+ to interface with Secure Digital (SD) cards. This interface is used for reading,
+ writing, and managing data on SD cards, enabling storage and data transfer
+ capabilities in embedded systems.
+
+allOf:
+ - $ref: mmc-controller.yaml
+
+maintainers:
+ - Ulf Hansson <ulf.hansson@linaro.org>
+
+properties:
+ compatible:
+ const: microchip,pic32mzda-sdhci
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: base_clk
+ - const: sys_clk
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - pinctrl-names
+ - pinctrl-0
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/microchip,pic32-clock.h>
+ mmc@1f8ec000 {
+ compatible = "microchip,pic32mzda-sdhci";
+ reg = <0x1f8ec000 0x100>;
+ interrupts = <191 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rootclk REF4CLK>, <&rootclk PB5CLK>;
+ clock-names = "base_clk", "sys_clk";
+ bus-width = <4>;
+ cap-sd-highspeed;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhc1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.yaml b/Documentation/devicetree/bindings/mmc/mtk-sd.yaml
index 0debccbd6519..6dd26ad31491 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.yaml
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.yaml
@@ -32,6 +32,7 @@ properties:
- const: mediatek,mt2701-mmc
- items:
- enum:
+ - mediatek,mt6893-mmc
- mediatek,mt8186-mmc
- mediatek,mt8188-mmc
- mediatek,mt8192-mmc
@@ -299,6 +300,7 @@ allOf:
properties:
compatible:
enum:
+ - mediatek,mt6893-mmc
- mediatek,mt8186-mmc
- mediatek,mt8188-mmc
- mediatek,mt8195-mmc
diff --git a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
index 773baa6c2656..7563623876fc 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
+++ b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
@@ -69,7 +69,9 @@ properties:
- renesas,sdhi-r9a09g011 # RZ/V2M
- const: renesas,rzg2l-sdhi
- items:
- - const: renesas,sdhi-r9a09g047 # RZ/G3E
+ - enum:
+ - renesas,sdhi-r9a09g047 # RZ/G3E
+ - renesas,sdhi-r9a09g056 # RZ/V2N
- const: renesas,sdhi-r9a09g057 # RZ/V2H(P)
reg:
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
index eed9063e9bb3..2b2cbce2458b 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
@@ -60,6 +60,7 @@ properties:
- qcom,sm6125-sdhci
- qcom,sm6350-sdhci
- qcom,sm6375-sdhci
+ - qcom,sm7150-sdhci
- qcom,sm8150-sdhci
- qcom,sm8250-sdhci
- qcom,sm8350-sdhci
diff --git a/Documentation/devicetree/bindings/mmc/sdhci.txt b/Documentation/devicetree/bindings/mmc/sdhci.txt
deleted file mode 100644
index 0e9923a64024..000000000000
--- a/Documentation/devicetree/bindings/mmc/sdhci.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-The properties specific for SD host controllers. For properties shared by MMC
-host controllers refer to the mmc[1] bindings.
-
- [1] Documentation/devicetree/bindings/mmc/mmc.txt
-
-Optional properties:
-- sdhci-caps-mask: The sdhci capabilities register is incorrect. This 64bit
- property corresponds to the bits in the sdhci capability register. If the bit
- is on in the mask then the bit is incorrect in the register and should be
- turned off, before applying sdhci-caps.
-- sdhci-caps: The sdhci capabilities register is incorrect. This 64bit
- property corresponds to the bits in the sdhci capability register. If the
- bit is on in the property then the bit should be turned on.
diff --git a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
index e6e604072d3c..f882219a0a26 100644
--- a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
@@ -19,6 +19,9 @@ properties:
- rockchip,rk3562-dwcmshc
- rockchip,rk3576-dwcmshc
- const: rockchip,rk3588-dwcmshc
+ - items:
+ - const: sophgo,sg2044-dwcmshc
+ - const: sophgo,sg2042-dwcmshc
- enum:
- rockchip,rk3568-dwcmshc
- rockchip,rk3588-dwcmshc
@@ -117,10 +120,6 @@ allOf:
required:
- power-domains
- else:
- properties:
- power-domains: false
-
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/mmc/spacemit,sdhci.yaml b/Documentation/devicetree/bindings/mmc/spacemit,sdhci.yaml
new file mode 100644
index 000000000000..13d9382058fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/spacemit,sdhci.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/spacemit,sdhci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SpacemiT SDHCI Controller
+
+maintainers:
+ - Yixun Lan <dlan@gentoo.org>
+
+allOf:
+ - $ref: mmc-controller.yaml#
+
+properties:
+ compatible:
+ const: spacemit,k1-sdhci
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: core clock, used by internal controller
+ - description: io clock, output for SD, SDIO, eMMC device
+
+ clock-names:
+ items:
+ - const: core
+ - const: io
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mmc@d4281000 {
+ compatible = "spacemit,k1-sdhci";
+ reg = <0xd4281000 0x200>;
+ interrupts = <101>;
+ interrupt-parent = <&plic>;
+ clocks = <&clk_apmu 10>, <&clk_apmu 13>;
+ clock-names = "core", "io";
+ };
diff --git a/Documentation/devicetree/bindings/mmc/vt8500-sdmmc.txt b/Documentation/devicetree/bindings/mmc/vt8500-sdmmc.txt
deleted file mode 100644
index d7fb6abb3eb8..000000000000
--- a/Documentation/devicetree/bindings/mmc/vt8500-sdmmc.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-* Wondermedia WM8505/WM8650 SD/MMC Host Controller
-
-This file documents differences between the core properties described
-by mmc.txt and the properties used by the wmt-sdmmc driver.
-
-Required properties:
-- compatible: Should be "wm,wm8505-sdhc".
-- interrupts: Two interrupts are required - regular irq and dma irq.
-
-Optional properties:
-- sdon-inverted: SD_ON bit is inverted on the controller
-
-Examples:
-
-sdhc@d800a000 {
- compatible = "wm,wm8505-sdhc";
- reg = <0xd800a000 0x1000>;
- interrupts = <20 21>;
- clocks = <&sdhc>;
- bus-width = <4>;
- sdon-inverted;
-};
-
diff --git a/Documentation/devicetree/bindings/mmc/wm,wm8505-sdhc.yaml b/Documentation/devicetree/bindings/mmc/wm,wm8505-sdhc.yaml
new file mode 100644
index 000000000000..5b55174e9088
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/wm,wm8505-sdhc.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/wm,wm8505-sdhc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: WonderMedia SoC SDHCI Controller
+
+maintainers:
+ - Alexey Charkov <alchark@gmail.com>
+
+allOf:
+ - $ref: mmc-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: wm,wm8505-sdhc
+ - items:
+ - const: wm,wm8650-sdhc
+ - const: wm,wm8505-sdhc
+ - items:
+ - const: wm,wm8750-sdhc
+ - const: wm,wm8505-sdhc
+ - items:
+ - const: wm,wm8850-sdhc
+ - const: wm,wm8505-sdhc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: SDMMC controller interrupt
+ - description: SDMMC controller DMA interrupt
+
+ sdon-inverted:
+ type: boolean
+ description: All chips before (not including) WM8505 rev. A2 treated their
+ "clock stop" bit (register offset 0x08 a.k.a. SDMMC_BUSMODE, bit 0x10)
+ as "set 1 to disable SD clock", while all the later versions treated it
+ as "set 0 to disable SD clock". Set this property for later versions of
+ wm,wm8505-sdhc. On wm,wm8650-sdhc and later this property is implied and
+ does not need to be set explicitly
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mmc@d800a000 {
+ compatible = "wm,wm8505-sdhc";
+ reg = <0xd800a000 0x1000>;
+ interrupts = <20>, <21>;
+ clocks = <&sdhc>;
+ bus-width = <4>;
+ sdon-inverted;
+ };
diff --git a/Documentation/devicetree/bindings/mtd/fsl,vf610-nfc.yaml b/Documentation/devicetree/bindings/mtd/fsl,vf610-nfc.yaml
new file mode 100644
index 000000000000..480a5c87859d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/fsl,vf610-nfc.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/fsl,vf610-nfc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale's NAND flash controller (NFC)
+
+description:
+ This variant of the Freescale NAND flash controller (NFC) can be found on
+ Vybrid (vf610), MPC5125, MCF54418 and Kinetis K70.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,vf610-nfc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: nfc
+
+patternProperties:
+ "^nand@[a-f0-9]$":
+ type: object
+ $ref: raw-nand-chip.yaml
+
+ properties:
+ compatible:
+ const: fsl,vf610-nfc-nandcs
+
+ reg:
+ const: 0
+
+ nand-ecc-strength:
+ enum: [24, 32]
+
+ nand-ecc-step-size:
+ const: 2048
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+allOf:
+ - $ref: nand-controller.yaml
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/vf610-clock.h>
+
+ nand-controller@400e0000 {
+ compatible = "fsl,vf610-nfc";
+ reg = <0x400e0000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks VF610_CLK_NFC>;
+ clock-names = "nfc";
+ assigned-clocks = <&clks VF610_CLK_NFC>;
+ assigned-clock-rates = <33000000>;
+
+ nand@0 {
+ compatible = "fsl,vf610-nfc-nandcs";
+ reg = <0>;
+ nand-bus-width = <8>;
+ nand-ecc-mode = "hw";
+ nand-ecc-strength = <32>;
+ nand-ecc-step-size = <2048>;
+ nand-on-flash-bbt;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/loongson,ls1b-nand-controller.yaml b/Documentation/devicetree/bindings/mtd/loongson,ls1b-nand-controller.yaml
new file mode 100644
index 000000000000..a09e92e416c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/loongson,ls1b-nand-controller.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/loongson,ls1b-nand-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson-1 NAND Controller
+
+maintainers:
+ - Keguang Zhang <keguang.zhang@gmail.com>
+
+description:
+ The Loongson-1 NAND controller abstracts all supported operations,
+ meaning it does not support low-level access to raw NAND flash chips.
+ Moreover, the controller is paired with the DMA engine to perform
+ READ and PROGRAM functions.
+
+allOf:
+ - $ref: nand-controller.yaml
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - loongson,ls1b-nand-controller
+ - loongson,ls1c-nand-controller
+ - items:
+ - enum:
+ - loongson,ls1a-nand-controller
+ - const: loongson,ls1b-nand-controller
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: nand
+ - const: nand-dma
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ const: rxtx
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - dmas
+ - dma-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ nand-controller@1fe78000 {
+ compatible = "loongson,ls1b-nand-controller";
+ reg = <0x1fe78000 0x24>, <0x1fe78040 0x4>;
+ reg-names = "nand", "nand-dma";
+ dmas = <&dma 0>;
+ dma-names = "rxtx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>;
+ label = "ls1x-nand";
+ nand-use-soft-ecc-engine;
+ nand-ecc-algo = "hamming";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/qcom,nandc.yaml b/Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
index 35b4206ea918..5511389960f0 100644
--- a/Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
+++ b/Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
@@ -11,12 +11,18 @@ maintainers:
properties:
compatible:
- enum:
- - qcom,ipq806x-nand
- - qcom,ipq4019-nand
- - qcom,ipq6018-nand
- - qcom,ipq8074-nand
- - qcom,sdx55-nand
+ oneOf:
+ - items:
+ - enum:
+ - qcom,sdx75-nand
+ - const: qcom,sdx55-nand
+ - items:
+ - enum:
+ - qcom,ipq806x-nand
+ - qcom,ipq4019-nand
+ - qcom,ipq6018-nand
+ - qcom,ipq8074-nand
+ - qcom,sdx55-nand
reg:
maxItems: 1
@@ -100,6 +106,18 @@ allOf:
compatible:
contains:
enum:
+ - qcom,sdx75-nand
+
+ then:
+ properties:
+ iommus:
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,ipq4019-nand
- qcom,ipq6018-nand
- qcom,ipq8074-nand
diff --git a/Documentation/devicetree/bindings/mtd/vf610-nfc.txt b/Documentation/devicetree/bindings/mtd/vf610-nfc.txt
deleted file mode 100644
index 7db5e6e609df..000000000000
--- a/Documentation/devicetree/bindings/mtd/vf610-nfc.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-Freescale's NAND flash controller (NFC)
-
-This variant of the Freescale NAND flash controller (NFC) can be found on
-Vybrid (vf610), MPC5125, MCF54418 and Kinetis K70.
-
-Required properties:
-- compatible: Should be set to "fsl,vf610-nfc".
-- reg: address range of the NFC.
-- interrupts: interrupt of the NFC.
-- #address-cells: shall be set to 1. Encode the nand CS.
-- #size-cells : shall be set to 0.
-- assigned-clocks: main clock from the SoC, for Vybrid <&clks VF610_CLK_NFC>;
-- assigned-clock-rates: The NAND bus timing is derived from this clock
- rate and should not exceed maximum timing for any NAND memory chip
- in a board stuffing. Typical NAND memory timings derived from this
- clock are found in the SoC hardware reference manual. Furthermore,
- there might be restrictions on maximum rates when using hardware ECC.
-
-- #address-cells, #size-cells : Must be present if the device has sub-nodes
- representing partitions.
-
-Required children nodes:
-Children nodes represent the available nand chips. Currently the driver can
-only handle one NAND chip.
-
-Required properties:
-- compatible: Should be set to "fsl,vf610-nfc-cs".
-- nand-bus-width: see nand-controller.yaml
-- nand-ecc-mode: see nand-controller.yaml
-
-Required properties for hardware ECC:
-- nand-ecc-strength: supported strengths are 24 and 32 bit (see nand-controller.yaml)
-- nand-ecc-step-size: step size equals page size, currently only 2k pages are
- supported
-- nand-on-flash-bbt: see nand-controller.yaml
-
-Example:
-
- nfc: nand@400e0000 {
- compatible = "fsl,vf610-nfc";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x400e0000 0x4000>;
- interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks VF610_CLK_NFC>;
- clock-names = "nfc";
- assigned-clocks = <&clks VF610_CLK_NFC>;
- assigned-clock-rates = <33000000>;
-
- nand@0 {
- compatible = "fsl,vf610-nfc-nandcs";
- reg = <0>;
- nand-bus-width = <8>;
- nand-ecc-mode = "hw";
- nand-ecc-strength = <32>;
- nand-ecc-step-size = <2048>;
- nand-on-flash-bbt;
- };
- };
diff --git a/Documentation/devicetree/bindings/net/aeonsemi,as21xxx.yaml b/Documentation/devicetree/bindings/net/aeonsemi,as21xxx.yaml
new file mode 100644
index 000000000000..69eb29dc4d7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/aeonsemi,as21xxx.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/aeonsemi,as21xxx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Aeonsemi AS21XXX Ethernet PHY
+
+maintainers:
+ - Christian Marangi <ansuelsmth@gmail.com>
+
+description: |
+ Aeonsemi AS21xxx Ethernet PHYs requires a firmware to be loaded to actually
+ work. The same firmware is compatible with various PHYs of the same family.
+
+ A PHY with not firmware loaded will be exposed on the MDIO bus with ID
+ 0x7500 0x7500 or 0x7500 0x9410 on C45 registers.
+
+ This can be done and is implemented by OEM in 2 different way:
+ - Attached SPI flash directly to the PHY with the firmware. The PHY
+ will self load the firmware in the presence of this configuration.
+ - Manually provided firmware loaded from a file in the filesystem.
+
+ Each PHY can support up to 5 LEDs.
+
+ AS2xxx PHY Name logic:
+
+ AS21x1xxB1
+ ^ ^^
+ | |J: Supports SyncE/PTP
+ | |P: No SyncE/PTP support
+ | 1: Supports 2nd Serdes
+ | 2: Not 2nd Serdes support
+ 0: 10G, 5G, 2.5G
+ 5: 5G, 2.5G
+ 2: 2.5G
+
+allOf:
+ - $ref: ethernet-phy.yaml#
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ethernet-phy-id7500.9410
+ - ethernet-phy-id7500.9402
+ - ethernet-phy-id7500.9412
+ - ethernet-phy-id7500.9422
+ - ethernet-phy-id7500.9432
+ - ethernet-phy-id7500.9442
+ - ethernet-phy-id7500.9452
+ - ethernet-phy-id7500.9462
+ - ethernet-phy-id7500.9472
+ - ethernet-phy-id7500.9482
+ - ethernet-phy-id7500.9492
+ required:
+ - compatible
+
+properties:
+ reg:
+ maxItems: 1
+
+ firmware-name:
+ description: specify the name of PHY firmware to load
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+if:
+ properties:
+ compatible:
+ contains:
+ const: ethernet-phy-id7500.9410
+then:
+ required:
+ - firmware-name
+else:
+ properties:
+ firmware-name: false
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@1f {
+ compatible = "ethernet-phy-id7500.9410",
+ "ethernet-phy-ieee802.3-c45";
+
+ reg = <31>;
+ firmware-name = "as21x1x_fw.bin";
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <0>;
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <1>;
+ default-state = "keep";
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml
index 0fdd11265417..6d22131ac2f9 100644
--- a/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml
+++ b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml
@@ -57,6 +57,16 @@ properties:
- const: hsi-mac
- const: xfp-mac
+ memory-region:
+ items:
+ - description: QDMA0 buffer memory
+ - description: QDMA1 buffer memory
+
+ memory-region-names:
+ items:
+ - const: qdma0-buf
+ - const: qdma1-buf
+
"#address-cells":
const: 1
@@ -140,6 +150,9 @@ examples:
<GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ memory-region = <&qdma0_buf>, <&qdma1_buf>;
+ memory-region-names = "qdma0-buf", "qdma1-buf";
+
airoha,npu = <&npu>;
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
index 7fe0352dff0f..7b6a2fde8175 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
@@ -23,6 +23,7 @@ properties:
- allwinner,sun20i-d1-emac
- allwinner,sun50i-h6-emac
- allwinner,sun50i-h616-emac0
+ - allwinner,sun55i-a523-emac0
- const: allwinner,sun50i-a64-emac
reg:
diff --git a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
index d02e9dd847ef..3ab60c70286f 100644
--- a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
@@ -48,6 +48,18 @@ properties:
description:
The GPIO number of the NXP chipset used for BT_WAKE_IN.
+ interrupts:
+ maxItems: 1
+ description:
+ Host wakeup by falling edge interrupt on this pin which is
+ connected to BT_WAKE_OUT pin of the NXP chipset.
+
+ interrupt-names:
+ items:
+ - const: wakeup
+
+ wakeup-source: true
+
nxp,wakeout-pin:
$ref: /schemas/types.yaml#/definitions/uint8
description:
@@ -61,6 +73,7 @@ unevaluatedProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
serial {
bluetooth {
compatible = "nxp,88w8987-bt";
@@ -70,5 +83,9 @@ examples:
nxp,wakein-pin = /bits/ 8 <18>;
nxp,wakeout-pin = /bits/ 8 <19>;
local-bd-address = [66 55 44 33 22 11];
+ interrupt-parent = <&gpio>;
+ interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "wakeup";
+ wakeup-source;
};
};
diff --git a/Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml b/Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml
index 660e2ca42daf..a3db6d594c8c 100644
--- a/Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml
+++ b/Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/net/brcm,asp-v2.0.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Broadcom ASP 2.0 Ethernet controller
+title: Broadcom ASP Ethernet controller
maintainers:
- Justin Chen <justin.chen@broadcom.com>
@@ -17,16 +17,16 @@ properties:
oneOf:
- items:
- enum:
+ - brcm,bcm74110-asp
+ - const: brcm,asp-v3.0
+ - items:
+ - enum:
- brcm,bcm74165b0-asp
- const: brcm,asp-v2.2
- items:
- enum:
- brcm,bcm74165-asp
- const: brcm,asp-v2.1
- - items:
- - enum:
- - brcm,bcm72165-asp
- - const: brcm,asp-v2.0
"#address-cells":
const: 1
@@ -39,11 +39,9 @@ properties:
ranges: true
interrupts:
- minItems: 1
items:
- description: RX/TX interrupt
- - description: Port 0 Wake-on-LAN
- - description: Port 1 Wake-on-LAN
+ - description: Wake-on-LAN interrupt
clocks:
maxItems: 1
@@ -106,16 +104,17 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
ethernet@9c00000 {
- compatible = "brcm,bcm72165-asp", "brcm,asp-v2.0";
+ compatible = "brcm,bcm74165-asp", "brcm,asp-v2.1";
reg = <0x9c00000 0x1fff14>;
- interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&intc GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+ <&aon_pm_l2_intc 14>;
ranges = <0x0 0x9c00000 0x1fff14>;
clocks = <&scmi 14>;
#address-cells = <1>;
#size-cells = <1>;
mdio@c614 {
- compatible = "brcm,asp-v2.0-mdio";
+ compatible = "brcm,asp-v2.1-mdio";
reg = <0xc614 0x8>;
reg-names = "mdio";
#address-cells = <1>;
@@ -127,7 +126,7 @@ examples:
};
mdio@ce14 {
- compatible = "brcm,asp-v2.0-mdio";
+ compatible = "brcm,asp-v2.1-mdio";
reg = <0xce14 0x8>;
reg-names = "mdio";
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml
index 63bee5b542f5..43516dd357b8 100644
--- a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml
@@ -22,9 +22,9 @@ properties:
- brcm,genet-mdio-v3
- brcm,genet-mdio-v4
- brcm,genet-mdio-v5
- - brcm,asp-v2.0-mdio
- brcm,asp-v2.1-mdio
- brcm,asp-v2.2-mdio
+ - brcm,asp-v3.0-mdio
- brcm,unimac-mdio
- brcm,bcm6846-mdio
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml b/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml
index e0ec53bc10c6..1525a50ded47 100644
--- a/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml
+++ b/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/can/microchip,mcp2510.yaml#
+$id: http://devicetree.org/schemas/net/can/microchip,mcp2510.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Microchip MCP251X stand-alone CAN controller
diff --git a/Documentation/devicetree/bindings/net/can/nxp,sja1000.yaml b/Documentation/devicetree/bindings/net/can/nxp,sja1000.yaml
index 144a3785132c..ec0c2168e4b9 100644
--- a/Documentation/devicetree/bindings/net/can/nxp,sja1000.yaml
+++ b/Documentation/devicetree/bindings/net/can/nxp,sja1000.yaml
@@ -16,9 +16,7 @@ properties:
- nxp,sja1000
- technologic,sja1000
- items:
- - enum:
- - renesas,r9a06g032-sja1000 # RZ/N1D
- - renesas,r9a06g033-sja1000 # RZ/N1S
+ - const: renesas,r9a06g032-sja1000 # RZ/N1D
- const: renesas,rzn1-sja1000 # RZ/N1
reg:
diff --git a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
index f6884f6e59e7..f4ac21c68427 100644
--- a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
+++ b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
@@ -42,19 +42,80 @@ properties:
- renesas,r9a07g054-canfd # RZ/V2L
- const: renesas,rzg2l-canfd # RZ/G2L family
+ - const: renesas,r9a09g047-canfd # RZ/G3E
+
reg:
maxItems: 1
- interrupts: true
+ interrupts:
+ oneOf:
+ - items:
+ - description: Channel interrupt
+ - description: Global interrupt
+ - items:
+ - description: CAN global error interrupt
+ - description: CAN receive FIFO interrupt
+ - description: CAN0 error interrupt
+ - description: CAN0 transmit interrupt
+ - description: CAN0 transmit/receive FIFO receive completion interrupt
+ - description: CAN1 error interrupt
+ - description: CAN1 transmit interrupt
+ - description: CAN1 transmit/receive FIFO receive completion interrupt
+ - description: CAN2 error interrupt
+ - description: CAN2 transmit interrupt
+ - description: CAN2 transmit/receive FIFO receive completion interrupt
+ - description: CAN3 error interrupt
+ - description: CAN3 transmit interrupt
+ - description: CAN3 transmit/receive FIFO receive completion interrupt
+ - description: CAN4 error interrupt
+ - description: CAN4 transmit interrupt
+ - description: CAN4 transmit/receive FIFO receive completion interrupt
+ - description: CAN5 error interrupt
+ - description: CAN5 transmit interrupt
+ - description: CAN5 transmit/receive FIFO receive completion interrupt
+ minItems: 8
+
+ interrupt-names:
+ oneOf:
+ - items:
+ - const: ch_int
+ - const: g_int
+ - items:
+ - const: g_err
+ - const: g_recc
+ - const: ch0_err
+ - const: ch0_rec
+ - const: ch0_trx
+ - const: ch1_err
+ - const: ch1_rec
+ - const: ch1_trx
+ - const: ch2_err
+ - const: ch2_rec
+ - const: ch2_trx
+ - const: ch3_err
+ - const: ch3_rec
+ - const: ch3_trx
+ - const: ch4_err
+ - const: ch4_rec
+ - const: ch4_trx
+ - const: ch5_err
+ - const: ch5_rec
+ - const: ch5_trx
+ minItems: 8
clocks:
maxItems: 3
clock-names:
- items:
- - const: fck
- - const: canfd
- - const: can_clk
+ oneOf:
+ - items:
+ - const: fck
+ - const: canfd
+ - const: can_clk
+ - items:
+ - const: fck
+ - const: ram_clk
+ - const: can_clk
power-domains:
maxItems: 1
@@ -117,48 +178,38 @@ allOf:
then:
properties:
interrupts:
- items:
- - description: CAN global error interrupt
- - description: CAN receive FIFO interrupt
- - description: CAN0 error interrupt
- - description: CAN0 transmit interrupt
- - description: CAN0 transmit/receive FIFO receive completion interrupt
- - description: CAN1 error interrupt
- - description: CAN1 transmit interrupt
- - description: CAN1 transmit/receive FIFO receive completion interrupt
+ maxItems: 8
interrupt-names:
- items:
- - const: g_err
- - const: g_recc
- - const: ch0_err
- - const: ch0_rec
- - const: ch0_trx
- - const: ch1_err
- - const: ch1_rec
- - const: ch1_trx
+ maxItems: 8
resets:
+ minItems: 2
maxItems: 2
reset-names:
- items:
- - const: rstp_n
- - const: rstc_n
+ minItems: 2
+ maxItems: 2
required:
- reset-names
- else:
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rcar-gen3-canfd
+ - renesas,rcar-gen4-canfd
+ then:
properties:
interrupts:
- items:
- - description: Channel interrupt
- - description: Global interrupt
+ minItems: 2
+ maxItems: 2
interrupt-names:
- items:
- - const: ch_int
- - const: g_int
+ minItems: 2
+ maxItems: 2
resets:
maxItems: 1
@@ -167,20 +218,54 @@ allOf:
properties:
compatible:
contains:
+ const: renesas,r9a09g047-canfd
+ then:
+ properties:
+ interrupts:
+ minItems: 20
+
+ interrupt-names:
+ minItems: 20
+
+ resets:
+ minItems: 2
+ maxItems: 2
+
+ reset-names:
+ minItems: 2
+ maxItems: 2
+
+ required:
+ - reset-names
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rcar-gen3-canfd
+ - renesas,rzg2l-canfd
+ then:
+ patternProperties:
+ "^channel[2-7]$": false
+
+ - if:
+ properties:
+ compatible:
+ contains:
const: renesas,r8a779h0-canfd
then:
patternProperties:
"^channel[4-7]$": false
- else:
- if:
- not:
- properties:
- compatible:
- contains:
- const: renesas,rcar-gen4-canfd
- then:
- patternProperties:
- "^channel[2-7]$": false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a09g047-canfd
+ then:
+ patternProperties:
+ "^channel[6-7]$": false
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
index ea979bcae1d6..51205f9f2985 100644
--- a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
@@ -96,6 +96,10 @@ properties:
Built-in switch of the Airoha EN7581 SoC
const: airoha,en7581-switch
+ - description:
+ Built-in switch of the Airoha AN7583 SoC
+ const: airoha,an7583-switch
+
reg:
maxItems: 1
@@ -291,6 +295,7 @@ allOf:
enum:
- mediatek,mt7988-switch
- airoha,en7581-switch
+ - airoha,an7583-switch
then:
$ref: "#/$defs/mt7530-dsa-port"
properties:
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index 45819b235800..7cbf11bbe99c 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -16,30 +16,6 @@ properties:
label:
description: Human readable label on a port of a box.
- local-mac-address:
- description:
- Specifies the MAC address that was assigned to the network device.
- $ref: /schemas/types.yaml#/definitions/uint8-array
- minItems: 6
- maxItems: 6
-
- mac-address:
- description:
- Specifies the MAC address that was last used by the boot
- program; should be used in cases where the MAC address assigned
- to the device by the boot program is different from the
- local-mac-address property.
- $ref: /schemas/types.yaml#/definitions/uint8-array
- minItems: 6
- maxItems: 6
-
- max-frame-size:
- $ref: /schemas/types.yaml#/definitions/uint32
- description:
- Maximum transfer unit (IEEE defined MTU), rather than the
- maximum frame size (there\'s contradiction in the Devicetree
- Specification).
-
max-speed:
$ref: /schemas/types.yaml#/definitions/uint32
description:
@@ -74,19 +50,17 @@ properties:
- rev-rmii
- moca
- # RX and TX delays are added by the MAC when required
+ # RX and TX delays are provided by the PCB. See below
- rgmii
- # RGMII with internal RX and TX delays provided by the PHY,
- # the MAC should not add the RX or TX delays in this case
+ # RX and TX delays are not provided by the PCB. This is the most
+ # frequent case. See below
- rgmii-id
- # RGMII with internal RX delay provided by the PHY, the MAC
- # should not add an RX delay in this case
+ # TX delay is provided by the PCB. See below
- rgmii-rxid
- # RGMII with internal TX delay provided by the PHY, the MAC
- # should not add an TX delay in this case
+ # RX delay is provided by the PCB. See below
- rgmii-txid
- rtbi
- smii
@@ -197,7 +171,7 @@ properties:
description:
Link speed.
$ref: /schemas/types.yaml#/definitions/uint32
- enum: [10, 100, 1000, 2500, 10000]
+ enum: [10, 100, 1000, 2500, 5000, 10000]
full-duplex:
$ref: /schemas/types.yaml#/definitions/flag
@@ -262,6 +236,7 @@ dependencies:
pcs-handle-names: [pcs-handle]
allOf:
+ - $ref: /schemas/net/network-class.yaml#
- if:
properties:
phy-mode:
@@ -286,4 +261,89 @@ allOf:
additionalProperties: true
+# Informative
+# ===========
+#
+# 'phy-modes' & 'phy-connection-type' properties 'rgmii', 'rgmii-id',
+# 'rgmii-rxid', and 'rgmii-txid' are frequently used wrongly by
+# developers. This informative section clarifies their usage.
+#
+# The RGMII specification requires a 2ns delay between the data and
+# clock signals on the RGMII bus. How this delay is implemented is not
+# specified.
+#
+# One option is to make the clock traces on the PCB longer than the
+# data traces. A sufficiently difference in length can provide the 2ns
+# delay. If both the RX and TX delays are implemented in this manner,
+# 'rgmii' should be used, so indicating the PCB adds the delays.
+#
+# If the PCB does not add these delays via extra long traces,
+# 'rgmii-id' should be used. Here, 'id' refers to 'internal delay',
+# where either the MAC or PHY adds the delay.
+#
+# If only one of the two delays are implemented via extra long clock
+# lines, either 'rgmii-rxid' or 'rgmii-txid' should be used,
+# indicating the MAC or PHY should implement one of the delays
+# internally, while the PCB implements the other delay.
+#
+# Device Tree describes hardware, and in this case, it describes the
+# PCB between the MAC and the PHY, if the PCB implements delays or
+# not.
+#
+# In practice, very few PCBs make use of extra long clock lines. Hence
+# any RGMII phy mode other than 'rgmii-id' is probably wrong, and is
+# unlikely to be accepted during review without details provided in
+# the commit description and comments in the .dts file.
+#
+# When the PCB does not implement the delays, the MAC or PHY must. As
+# such, this is software configuration, and so not described in Device
+# Tree.
+#
+# The following describes how Linux implements the configuration of
+# the MAC and PHY to add these delays when the PCB does not. As stated
+# above, developers often get this wrong, and the aim of this section
+# is reduce the frequency of these errors by Linux developers. Other
+# users of the Device Tree may implement it differently, and still be
+# consistent with both the normative and informative description
+# above.
+#
+# By default in Linux, when using phylib/phylink, the MAC is expected
+# to read the 'phy-mode' from Device Tree, not implement any delays,
+# and pass the value to the PHY. The PHY will then implement delays as
+# specified by the 'phy-mode'. The PHY should always be reconfigured
+# to implement the needed delays, replacing any setting performed by
+# strapping or the bootloader, etc.
+#
+# Experience to date is that all PHYs which implement RGMII also
+# implement the ability to add or not add the needed delays. Hence
+# this default is expected to work in all cases. Ignoring this default
+# is likely to be questioned by Reviews, and require a strong argument
+# to be accepted.
+#
+# There are a small number of cases where the MAC has hard coded
+# delays which cannot be disabled. The 'phy-mode' only describes the
+# PCB. The inability to disable the delays in the MAC does not change
+# the meaning of 'phy-mode'. It does however mean that a 'phy-mode' of
+# 'rgmii' is now invalid, it cannot be supported, since both the PCB
+# and the MAC and PHY adding delays cannot result in a functional
+# link. Thus the MAC should report a fatal error for any modes which
+# cannot be supported. When the MAC implements the delay, it must
+# ensure that the PHY does not also implement the same delay. So it
+# must modify the phy-mode it passes to the PHY, removing the delay it
+# has added. Failure to remove the delay will result in a
+# non-functioning link.
+#
+# Sometimes there is a need to fine tune the delays. Often the MAC or
+# PHY can perform this fine tuning. In the MAC node, the Device Tree
+# properties 'rx-internal-delay-ps' and 'tx-internal-delay-ps' should
+# be used to indicate fine tuning performed by the MAC. The values
+# expected here are small. A value of 2000ps, i.e 2ns, and a phy-mode
+# of 'rgmii' will not be accepted by Reviewers.
+#
+# If the PHY is to perform fine tuning, the properties
+# 'rx-internal-delay-ps' and 'tx-internal-delay-ps' in the PHY node
+# should be used. When the PHY is implementing delays, e.g. 'rgmii-id'
+# these properties should have a value near to 2000ps. If the PCB is
+# implementing delays, e.g. 'rgmii', a small value can be used to fine
+# tune the delay added by the PCB.
...
diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
index 824bbe4333b7..71e2cd32580f 100644
--- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
@@ -238,6 +238,16 @@ properties:
peak-to-peak specified in ANSI X3.263. When omitted, the PHYs default
will be left as is.
+ mac-termination-ohms:
+ maximum: 200
+ description:
+ The xMII signals need series termination on the driver side to match both
+ the output driver impedance and the line characteristic impedance, to
+ prevent reflections and EMI problems. Select a resistance value which is
+ supported by the builtin resistors of the PHY, otherwise the resistors may
+ have to be placed on board. When omitted, the PHYs default will be left as
+ is.
+
leds:
type: object
diff --git a/Documentation/devicetree/bindings/net/network-class.yaml b/Documentation/devicetree/bindings/net/network-class.yaml
new file mode 100644
index 000000000000..06461fb92eb8
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/network-class.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/network-class.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Network Class Common Properties
+
+maintainers:
+ - Devicetree Specification Mailing List <devicetree-spec@vger.kernel.org>
+
+properties:
+ address-bits:
+ description:
+ Specifies number of address bits required to address the device
+ described by this node, e.g. size of the MAC address.
+ default: 48
+ const: 48
+
+ local-mac-address:
+ description:
+ Specifies MAC address that was assigned to the network device described by
+ the node containing this property.
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ minItems: 6
+ maxItems: 6
+
+ mac-address:
+ description:
+ Specifies the MAC address that was last used by the boot program. This
+ property should be used in cases where the MAC address assigned to the
+ device by the boot program is different from the
+ local-mac-address property. This property shall be used only if the value
+ differs from local-mac-address property value.
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ minItems: 6
+ maxItems: 6
+
+ max-frame-size:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Maximum transfer unit (IEEE defined MTU), rather than the
+ maximum frame size (there\'s contradiction in the Devicetree
+ Specification).
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml b/Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml
new file mode 100644
index 000000000000..c498a9999289
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml
@@ -0,0 +1,203 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/renesas,r9a09g057-gbeth.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: GBETH glue layer for Renesas RZ/V2H(P) (and similar SoCs)
+
+maintainers:
+ - Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,r9a09g056-gbeth
+ - renesas,r9a09g057-gbeth
+ - renesas,rzv2h-gbeth
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r9a09g056-gbeth # RZ/V2N
+ - renesas,r9a09g057-gbeth # RZ/V2H(P)
+ - const: renesas,rzv2h-gbeth
+ - const: snps,dwmac-5.20
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: CSR clock
+ - description: AXI system clock
+ - description: PTP clock
+ - description: TX clock
+ - description: RX clock
+ - description: TX clock phase-shifted by 180 degrees
+ - description: RX clock phase-shifted by 180 degrees
+
+ clock-names:
+ items:
+ - const: stmmaceth
+ - const: pclk
+ - const: ptp_ref
+ - const: tx
+ - const: rx
+ - const: tx-180
+ - const: rx-180
+
+ interrupts:
+ minItems: 11
+
+ interrupt-names:
+ items:
+ - const: macirq
+ - const: eth_wake_irq
+ - const: eth_lpi
+ - const: rx-queue-0
+ - const: rx-queue-1
+ - const: rx-queue-2
+ - const: rx-queue-3
+ - const: tx-queue-0
+ - const: tx-queue-1
+ - const: tx-queue-2
+ - const: tx-queue-3
+
+ resets:
+ items:
+ - description: AXI power-on system reset
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - resets
+
+allOf:
+ - $ref: snps,dwmac.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/renesas-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ ethernet@15c30000 {
+ compatible = "renesas,r9a09g057-gbeth", "renesas,rzv2h-gbeth", "snps,dwmac-5.20";
+ reg = <0x15c30000 0x10000>;
+ clocks = <&cpg CPG_MOD 0xbd>, <&cpg CPG_MOD 0xbc>,
+ <&ptp_clock>, <&cpg CPG_MOD 0xb8>,
+ <&cpg CPG_MOD 0xb9>, <&cpg CPG_MOD 0xba>,
+ <&cpg CPG_MOD 0xbb>;
+ clock-names = "stmmaceth", "pclk", "ptp_ref",
+ "tx", "rx", "tx-180", "rx-180";
+ resets = <&cpg 0xb0>;
+ interrupts = <GIC_SPI 765 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 767 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 766 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 772 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 773 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 774 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 745 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 768 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 769 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 770 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 771 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_wake_irq", "eth_lpi",
+ "rx-queue-0", "rx-queue-1", "rx-queue-2",
+ "rx-queue-3", "tx-queue-0", "tx-queue-1",
+ "tx-queue-2", "tx-queue-3";
+ phy-mode = "rgmii-id";
+ snps,multicast-filter-bins = <256>;
+ snps,perfect-filter-entries = <128>;
+ rx-fifo-depth = <8192>;
+ tx-fifo-depth = <8192>;
+ snps,fixed-burst;
+ snps,force_thresh_dma_mode;
+ snps,axi-config = <&stmmac_axi_setup>;
+ snps,mtl-rx-config = <&mtl_rx_setup>;
+ snps,mtl-tx-config = <&mtl_tx_setup>;
+ snps,txpbl = <32>;
+ snps,rxpbl = <32>;
+ phy-handle = <&phy0>;
+
+ stmmac_axi_setup: stmmac-axi-config {
+ snps,lpi_en;
+ snps,wr_osr_lmt = <0xf>;
+ snps,rd_osr_lmt = <0xf>;
+ snps,blen = <16 8 4 0 0 0 0>;
+ };
+
+ mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <4>;
+ snps,rx-sched-sp;
+
+ queue0 {
+ snps,dcb-algorithm;
+ snps,priority = <0x1>;
+ snps,map-to-dma-channel = <0>;
+ };
+
+ queue1 {
+ snps,dcb-algorithm;
+ snps,priority = <0x2>;
+ snps,map-to-dma-channel = <1>;
+ };
+
+ queue2 {
+ snps,dcb-algorithm;
+ snps,priority = <0x4>;
+ snps,map-to-dma-channel = <2>;
+ };
+
+ queue3 {
+ snps,dcb-algorithm;
+ snps,priority = <0x8>;
+ snps,map-to-dma-channel = <3>;
+ };
+ };
+
+ mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <4>;
+
+ queue0 {
+ snps,dcb-algorithm;
+ snps,priority = <0x1>;
+ };
+
+ queue1 {
+ snps,dcb-algorithm;
+ snps,priority = <0x2>;
+ };
+
+ queue2 {
+ snps,dcb-algorithm;
+ snps,priority = <0x4>;
+ };
+
+ queue3 {
+ snps,dcb-algorithm;
+ snps,priority = <0x1>;
+ };
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 78b3030dc56d..90b79283e228 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -75,6 +75,7 @@ properties:
- qcom,sm8150-ethqos
- renesas,r9a06g032-gmac
- renesas,rzn1-gmac
+ - renesas,rzv2h-gbeth
- rockchip,px30-gmac
- rockchip,rk3128-gmac
- rockchip,rk3228-gmac
@@ -114,19 +115,25 @@ properties:
interrupts:
minItems: 1
- items:
- - description: Combined signal for various interrupt events
- - description: The interrupt to manage the remote wake-up packet detection
- - description: The interrupt that occurs when Rx exits the LPI state
- - description: The interrupt that occurs when HW safety error triggered
+ maxItems: 11
interrupt-names:
minItems: 1
+ maxItems: 11
items:
- - const: macirq
- - enum: [eth_wake_irq, eth_lpi, sfty]
- - enum: [eth_wake_irq, eth_lpi, sfty]
- - enum: [eth_wake_irq, eth_lpi, sfty]
+ oneOf:
+ - description: Combined signal for various interrupt events
+ const: macirq
+ - description: The interrupt to manage the remote wake-up packet detection
+ const: eth_wake_irq
+ - description: The interrupt that occurs when Rx exits the LPI state
+ const: eth_lpi
+ - description: The interrupt that occurs when HW safety error triggered
+ const: sfty
+ - description: Per channel receive completion interrupt
+ pattern: '^rx-queue-[0-3]$'
+ - description: Per channel transmit completion interrupt
+ pattern: '^tx-queue-[0-3]$'
clocks:
minItems: 1
@@ -703,7 +710,7 @@ examples:
};
};
- mdio0 {
+ mdio {
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,dwmac-mdio";
diff --git a/Documentation/devicetree/bindings/net/ti,dp83822.yaml b/Documentation/devicetree/bindings/net/ti,dp83822.yaml
index 50c24248df26..28a0bddb9af9 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83822.yaml
+++ b/Documentation/devicetree/bindings/net/ti,dp83822.yaml
@@ -122,6 +122,9 @@ properties:
- free-running
- recovered
+ mac-termination-ohms:
+ enum: [43, 44, 46, 48, 50, 53, 55, 58, 61, 65, 69, 73, 78, 84, 91, 99]
+
required:
- reg
@@ -137,6 +140,7 @@ examples:
rx-internal-delay-ps = <1>;
tx-internal-delay-ps = <1>;
ti,gpio2-clk-out = "xi";
+ mac-termination-ohms = <43>;
};
};
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
index b11894fbaec4..7b3d948f187d 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
@@ -143,6 +143,8 @@ properties:
label:
description: label associated with this port
+ fixed-link: true
+
ti,mac-only:
$ref: /schemas/types.yaml#/definitions/flag
description:
diff --git a/Documentation/devicetree/bindings/net/vertexcom-mse102x.yaml b/Documentation/devicetree/bindings/net/vertexcom-mse102x.yaml
index 4158673f723c..8359de7ad272 100644
--- a/Documentation/devicetree/bindings/net/vertexcom-mse102x.yaml
+++ b/Documentation/devicetree/bindings/net/vertexcom-mse102x.yaml
@@ -63,7 +63,7 @@ examples:
compatible = "vertexcom,mse1021";
reg = <0>;
interrupt-parent = <&gpio>;
- interrupts = <23 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <23 IRQ_TYPE_LEVEL_HIGH>;
spi-cpha;
spi-cpol;
spi-max-frequency = <7142857>;
diff --git a/Documentation/devicetree/bindings/net/via,vt8500-rhine.yaml b/Documentation/devicetree/bindings/net/via,vt8500-rhine.yaml
new file mode 100644
index 000000000000..e663d5a2f014
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/via,vt8500-rhine.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/via,vt8500-rhine.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: VIA Rhine 10/100 Network Controller
+
+description:
+ VIA's Ethernet controller integrated into VIA VT8500,
+ WonderMedia WM8950 and related SoCs
+
+maintainers:
+ - Alexey Charkov <alchark@gmail.com>
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+properties:
+ compatible:
+ const: via,vt8500-rhine
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - reg
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ ethernet@d8004000 {
+ compatible = "via,vt8500-rhine";
+ reg = <0xd8004000 0x100>;
+ interrupts = <10>;
+ };
diff --git a/Documentation/devicetree/bindings/net/via-rhine.txt b/Documentation/devicetree/bindings/net/via-rhine.txt
deleted file mode 100644
index 334eca2bf937..000000000000
--- a/Documentation/devicetree/bindings/net/via-rhine.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-* VIA Rhine 10/100 Network Controller
-
-Required properties:
-- compatible : Should be "via,vt8500-rhine" for integrated
- Rhine controllers found in VIA VT8500, WonderMedia WM8950
- and similar. These are listed as 1106:3106 rev. 0x84 on the
- virtual PCI bus under vendor-provided kernels
-- reg : Address and length of the io space
-- interrupts : Should contain the controller interrupt line
-
-Examples:
-
-ethernet@d8004000 {
- compatible = "via,vt8500-rhine";
- reg = <0xd8004000 0x100>;
- interrupts = <10>;
-};
diff --git a/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml b/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
index a3607d55ef36..7c8100e59a6c 100644
--- a/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
@@ -16,7 +16,7 @@ description:
binding.
allOf:
- - $ref: ieee80211.yaml#
+ - $ref: /schemas/net/wireless/wireless-controller.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml
index 9e557cb838c7..dc68dd59988f 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml
@@ -21,6 +21,12 @@ properties:
reg:
maxItems: 1
+ firmware-name:
+ maxItems: 1
+ description:
+ If present, a board or platform specific string used to lookup
+ usecase-specific firmware files for the device.
+
vddaon-supply:
description: VDD_AON supply regulator handle
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ipq5332-wifi.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ipq5332-wifi.yaml
new file mode 100644
index 000000000000..363a0ecb6ad9
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ipq5332-wifi.yaml
@@ -0,0 +1,315 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/wireless/qcom,ipq5332-wifi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies ath12k wireless devices (AHB)
+
+maintainers:
+ - Jeff Johnson <jjohnson@kernel.org>
+
+description:
+ Qualcomm Technologies IEEE 802.11be AHB devices.
+
+properties:
+ compatible:
+ enum:
+ - qcom,ipq5332-wifi
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: XO clock used for copy engine
+
+ clock-names:
+ items:
+ - const: xo
+
+ interrupts:
+ items:
+ - description: Fatal interrupt
+ - description: Ready interrupt
+ - description: Spawn acknowledge interrupt
+ - description: Stop acknowledge interrupt
+ - description: misc-pulse1 interrupt events
+ - description: misc-latch interrupt events
+ - description: sw exception interrupt events
+ - description: interrupt event for ring CE0
+ - description: interrupt event for ring CE1
+ - description: interrupt event for ring CE2
+ - description: interrupt event for ring CE3
+ - description: interrupt event for ring CE4
+ - description: interrupt event for ring CE5
+ - description: interrupt event for ring CE6
+ - description: interrupt event for ring CE7
+ - description: interrupt event for ring CE8
+ - description: interrupt event for ring CE9
+ - description: interrupt event for ring CE10
+ - description: interrupt event for ring CE11
+ - description: interrupt event for ring host2wbm-desc-feed
+ - description: interrupt event for ring host2reo-re-injection
+ - description: interrupt event for ring host2reo-command
+ - description: interrupt event for ring host2rxdma-monitor-ring1
+ - description: interrupt event for ring reo2ost-exception
+ - description: interrupt event for ring wbm2host-rx-release
+ - description: interrupt event for ring reo2host-status
+ - description: interrupt event for ring reo2host-destination-ring4
+ - description: interrupt event for ring reo2host-destination-ring3
+ - description: interrupt event for ring reo2host-destination-ring2
+ - description: interrupt event for ring reo2host-destination-ring1
+ - description: interrupt event for ring rxdma2host-monitor-destination-mac3
+ - description: interrupt event for ring rxdma2host-monitor-destination-mac2
+ - description: interrupt event for ring rxdma2host-monitor-destination-mac1
+ - description: interrupt event for ring host2rxdma-host-buf-ring-mac3
+ - description: interrupt event for ring host2rxdma-host-buf-ring-mac2
+ - description: interrupt event for ring host2rxdma-host-buf-ring-mac1
+ - description: interrupt event for ring host2tcl-input-ring4
+ - description: interrupt event for ring host2tcl-input-ring3
+ - description: interrupt event for ring host2tcl-input-ring2
+ - description: interrupt event for ring host2tcl-input-ring1
+ - description: interrupt event for ring wbm2host-tx-completions-ring4
+ - description: interrupt event for ring wbm2host-tx-completions-ring3
+ - description: interrupt event for ring wbm2host-tx-completions-ring2
+ - description: interrupt event for ring wbm2host-tx-completions-ring1
+ - description: interrupt event for ring host2tx-monitor-ring1
+ - description: interrupt event for ring txmon2host-monitor-destination-mac3
+ - description: interrupt event for ring txmon2host-monitor-destination-mac2
+ - description: interrupt event for ring txmon2host-monitor-destination-mac1
+ - description: interrupt event for umac-reset
+
+ interrupt-names:
+ items:
+ - const: fatal
+ - const: ready
+ - const: spawn
+ - const: stop-ack
+ - const: misc-pulse1
+ - const: misc-latch
+ - const: sw-exception
+ - const: ce0
+ - const: ce1
+ - const: ce2
+ - const: ce3
+ - const: ce4
+ - const: ce5
+ - const: ce6
+ - const: ce7
+ - const: ce8
+ - const: ce9
+ - const: ce10
+ - const: ce11
+ - const: host2wbm-desc-feed
+ - const: host2reo-re-injection
+ - const: host2reo-command
+ - const: host2rxdma-monitor-ring1
+ - const: reo2ost-exception
+ - const: wbm2host-rx-release
+ - const: reo2host-status
+ - const: reo2host-destination-ring4
+ - const: reo2host-destination-ring3
+ - const: reo2host-destination-ring2
+ - const: reo2host-destination-ring1
+ - const: rxdma2host-monitor-destination-mac3
+ - const: rxdma2host-monitor-destination-mac2
+ - const: rxdma2host-monitor-destination-mac1
+ - const: host2rxdma-host-buf-ring-mac3
+ - const: host2rxdma-host-buf-ring-mac2
+ - const: host2rxdma-host-buf-ring-mac1
+ - const: host2tcl-input-ring4
+ - const: host2tcl-input-ring3
+ - const: host2tcl-input-ring2
+ - const: host2tcl-input-ring1
+ - const: wbm2host-tx-completions-ring4
+ - const: wbm2host-tx-completions-ring3
+ - const: wbm2host-tx-completions-ring2
+ - const: wbm2host-tx-completions-ring1
+ - const: host2tx-monitor-ring1
+ - const: txmon2host-monitor-destination-mac3
+ - const: txmon2host-monitor-destination-mac2
+ - const: txmon2host-monitor-destination-mac1
+ - const: umac-reset
+
+ memory-region:
+ description:
+ Memory regions used by the ath12k firmware.
+ items:
+ - description: Q6 memory region
+ - description: m3 dump memory region
+ - description: Q6 caldata memory region
+ - description: Multi Link Operation (MLO) Global memory region
+
+ memory-region-names:
+ items:
+ - const: q6-region
+ - const: m3-dump
+ - const: q6-caldb
+ - const: mlo-global-mem
+
+ qcom,calibration-variant:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ String to uniquely identify variant of the calibration data for designs
+ with colliding bus and device ids
+
+ qcom,rproc:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the Qualcomm Hexagon DSP(q6 remote processor), which is utilized
+ for offloading WiFi processing tasks, this q6 remote processor operates in
+ conjunction with WiFi.
+
+ qcom,smem-states:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: States used by the AP to signal the remote processor
+ items:
+ - description: Shutdown WCSS pd
+ - description: Stop WCSS pd
+ - description: Spawn WCSS pd
+
+ qcom,smem-state-names:
+ description:
+ Names of the states used by the AP to signal the remote processor
+ items:
+ - const: shutdown
+ - const: stop
+ - const: spawn
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - memory-region
+ - memory-region-names
+ - qcom,rproc
+ - qcom,smem-states
+ - qcom,smem-state-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/qcom,ipq5332-gcc.h>
+
+ wifi0: wifi@c000000 {
+ compatible = "qcom,ipq5332-wifi";
+ reg = <0x0c000000 0x1000000>;
+ clocks = <&gcc GCC_XO_CLK>;
+ clock-names = "xo";
+ interrupts-extended = <&wcss_smp2p_in 8 IRQ_TYPE_NONE>,
+ <&wcss_smp2p_in 9 IRQ_TYPE_NONE>,
+ <&wcss_smp2p_in 12 IRQ_TYPE_NONE>,
+ <&wcss_smp2p_in 11 IRQ_TYPE_NONE>,
+ <&intc GIC_SPI 559 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 560 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 561 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 422 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 423 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 424 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 425 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 426 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 427 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 428 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 429 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 430 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 431 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 432 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 433 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 491 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 495 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 493 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 544 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 457 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 497 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 454 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 453 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 452 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 451 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 488 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 488 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 484 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 554 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 554 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 549 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 507 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 500 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 499 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 498 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 450 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 449 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 447 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 543 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 486 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 486 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 482 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 419 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "fatal",
+ "ready",
+ "spawn",
+ "stop-ack",
+ "misc-pulse1",
+ "misc-latch",
+ "sw-exception",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring4",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "host2tx-monitor-ring1",
+ "txmon2host-monitor-destination-mac3",
+ "txmon2host-monitor-destination-mac2",
+ "txmon2host-monitor-destination-mac1",
+ "umac-reset";
+
+ memory-region = <&q6_region>, <&m3_dump>, <&q6_caldb>, <&mlo_mem>;
+ memory-region-names = "q6-region", "m3-dump", "q6-caldb", "mlo-global-mem";
+ qcom,calibration-variant = "RDP441_1";
+ qcom,rproc = <&q6v5_wcss>;
+ qcom,smem-states = <&wcss_smp2p_out 8>,
+ <&wcss_smp2p_out 9>,
+ <&wcss_smp2p_out 10>;
+ qcom,smem-state-names = "shutdown",
+ "stop",
+ "spawn";
+ };
diff --git a/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml b/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
index 84e5659e50ef..6c0888ae4c4e 100644
--- a/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
@@ -71,15 +71,12 @@ properties:
"Platform Data Set" in Silabs jargon). Default depends of "compatible"
string. For "silabs,wf200", the default is 'wf200.pds'.
- local-mac-address: true
-
- mac-address: true
-
required:
- compatible
- reg
allOf:
+ - $ref: /schemas/net/wireless/wireless-controller.yaml#
- $ref: /schemas/spi/spi-peripheral-props.yaml#
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/net/wireless/wireless-controller.yaml b/Documentation/devicetree/bindings/net/wireless/wireless-controller.yaml
new file mode 100644
index 000000000000..7379f6c1aa05
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/wireless-controller.yaml
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/wireless/wireless-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Wireless Controller Common Properties
+
+maintainers:
+ - Lorenzo Bianconi <lorenzo@kernel.org>
+
+properties:
+ $nodename:
+ pattern: "^wifi(@.*)?$"
+
+allOf:
+ - $ref: ieee80211.yaml#
+ - $ref: /schemas/net/network-class.yaml#
+
+additionalProperties: true
+
+...
+
diff --git a/Documentation/devicetree/bindings/numa.txt b/Documentation/devicetree/bindings/numa.txt
deleted file mode 100644
index 42f282c2f3cc..000000000000
--- a/Documentation/devicetree/bindings/numa.txt
+++ /dev/null
@@ -1,319 +0,0 @@
-==============================================================================
-NUMA binding description.
-==============================================================================
-
-==============================================================================
-1 - Introduction
-==============================================================================
-
-Systems employing a Non Uniform Memory Access (NUMA) architecture contain
-collections of hardware resources including processors, memory, and I/O buses,
-that comprise what is commonly known as a NUMA node.
-Processor accesses to memory within the local NUMA node is generally faster
-than processor accesses to memory outside of the local NUMA node.
-DT defines interfaces that allow the platform to convey NUMA node
-topology information to OS.
-
-==============================================================================
-2 - numa-node-id
-==============================================================================
-
-For the purpose of identification, each NUMA node is associated with a unique
-token known as a node id. For the purpose of this binding
-a node id is a 32-bit integer.
-
-A device node is associated with a NUMA node by the presence of a
-numa-node-id property which contains the node id of the device.
-
-Example:
- /* numa node 0 */
- numa-node-id = <0>;
-
- /* numa node 1 */
- numa-node-id = <1>;
-
-==============================================================================
-3 - distance-map
-==============================================================================
-
-The optional device tree node distance-map describes the relative
-distance (memory latency) between all numa nodes.
-
-- compatible : Should at least contain "numa-distance-map-v1".
-
-- distance-matrix
- This property defines a matrix to describe the relative distances
- between all numa nodes.
- It is represented as a list of node pairs and their relative distance.
-
- Note:
- 1. Each entry represents distance from first node to second node.
- The distances are equal in either direction.
- 2. The distance from a node to self (local distance) is represented
- with value 10 and all internode distance should be represented with
- a value greater than 10.
- 3. distance-matrix should have entries in lexicographical ascending
- order of nodes.
- 4. There must be only one device node distance-map which must
- reside in the root node.
- 5. If the distance-map node is not present, a default
- distance-matrix is used.
-
-Example:
- 4 nodes connected in mesh/ring topology as below,
-
- 0_______20______1
- | |
- | |
- 20 20
- | |
- | |
- |_______________|
- 3 20 2
-
- if relative distance for each hop is 20,
- then internode distance would be,
- 0 -> 1 = 20
- 1 -> 2 = 20
- 2 -> 3 = 20
- 3 -> 0 = 20
- 0 -> 2 = 40
- 1 -> 3 = 40
-
- and dt presentation for this distance matrix is,
-
- distance-map {
- compatible = "numa-distance-map-v1";
- distance-matrix = <0 0 10>,
- <0 1 20>,
- <0 2 40>,
- <0 3 20>,
- <1 0 20>,
- <1 1 10>,
- <1 2 20>,
- <1 3 40>,
- <2 0 40>,
- <2 1 20>,
- <2 2 10>,
- <2 3 20>,
- <3 0 20>,
- <3 1 40>,
- <3 2 20>,
- <3 3 10>;
- };
-
-==============================================================================
-4 - Empty memory nodes
-==============================================================================
-
-Empty memory nodes, which no memory resides in, are allowed. There are no
-device nodes for these empty memory nodes. However, the NUMA node IDs and
-distance maps are still valid and memory may be added into them through
-hotplug afterwards.
-
-Example:
-
- memory@0 {
- device_type = "memory";
- reg = <0x0 0x0 0x0 0x80000000>;
- numa-node-id = <0>;
- };
-
- memory@80000000 {
- device_type = "memory";
- reg = <0x0 0x80000000 0x0 0x80000000>;
- numa-node-id = <1>;
- };
-
- /* Empty memory node 2 and 3 */
- distance-map {
- compatible = "numa-distance-map-v1";
- distance-matrix = <0 0 10>,
- <0 1 20>,
- <0 2 40>,
- <0 3 20>,
- <1 0 20>,
- <1 1 10>,
- <1 2 20>,
- <1 3 40>,
- <2 0 40>,
- <2 1 20>,
- <2 2 10>,
- <2 3 20>,
- <3 0 20>,
- <3 1 40>,
- <3 2 20>,
- <3 3 10>;
- };
-
-==============================================================================
-5 - Example dts
-==============================================================================
-
-Dual socket system consists of 2 boards connected through ccn bus and
-each board having one socket/soc of 8 cpus, memory and pci bus.
-
- memory@c00000 {
- device_type = "memory";
- reg = <0x0 0xc00000 0x0 0x80000000>;
- /* node 0 */
- numa-node-id = <0>;
- };
-
- memory@10000000000 {
- device_type = "memory";
- reg = <0x100 0x0 0x0 0x80000000>;
- /* node 1 */
- numa-node-id = <1>;
- };
-
- cpus {
- #address-cells = <2>;
- #size-cells = <0>;
-
- cpu@0 {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0x0>;
- enable-method = "psci";
- /* node 0 */
- numa-node-id = <0>;
- };
- cpu@1 {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0x1>;
- enable-method = "psci";
- numa-node-id = <0>;
- };
- cpu@2 {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0x2>;
- enable-method = "psci";
- numa-node-id = <0>;
- };
- cpu@3 {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0x3>;
- enable-method = "psci";
- numa-node-id = <0>;
- };
- cpu@4 {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0x4>;
- enable-method = "psci";
- numa-node-id = <0>;
- };
- cpu@5 {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0x5>;
- enable-method = "psci";
- numa-node-id = <0>;
- };
- cpu@6 {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0x6>;
- enable-method = "psci";
- numa-node-id = <0>;
- };
- cpu@7 {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0x7>;
- enable-method = "psci";
- numa-node-id = <0>;
- };
- cpu@8 {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0x8>;
- enable-method = "psci";
- /* node 1 */
- numa-node-id = <1>;
- };
- cpu@9 {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0x9>;
- enable-method = "psci";
- numa-node-id = <1>;
- };
- cpu@a {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0xa>;
- enable-method = "psci";
- numa-node-id = <1>;
- };
- cpu@b {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0xb>;
- enable-method = "psci";
- numa-node-id = <1>;
- };
- cpu@c {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0xc>;
- enable-method = "psci";
- numa-node-id = <1>;
- };
- cpu@d {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0xd>;
- enable-method = "psci";
- numa-node-id = <1>;
- };
- cpu@e {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0xe>;
- enable-method = "psci";
- numa-node-id = <1>;
- };
- cpu@f {
- device_type = "cpu";
- compatible = "arm,armv8";
- reg = <0x0 0xf>;
- enable-method = "psci";
- numa-node-id = <1>;
- };
- };
-
- pcie0: pcie0@848000000000 {
- compatible = "arm,armv8";
- device_type = "pci";
- bus-range = <0 255>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0x8480 0x00000000 0 0x10000000>; /* Configuration space */
- ranges = <0x03000000 0x8010 0x00000000 0x8010 0x00000000 0x70 0x00000000>;
- /* node 0 */
- numa-node-id = <0>;
- };
-
- pcie1: pcie1@948000000000 {
- compatible = "arm,armv8";
- device_type = "pci";
- bus-range = <0 255>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0x9480 0x00000000 0 0x10000000>; /* Configuration space */
- ranges = <0x03000000 0x9010 0x00000000 0x9010 0x00000000 0x70 0x00000000>;
- /* node 1 */
- numa-node-id = <1>;
- };
-
- distance-map {
- compatible = "numa-distance-map-v1";
- distance-matrix = <0 0 10>,
- <0 1 20>,
- <1 1 10>;
- };
diff --git a/Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml b/Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml
index 8b3826243ddd..38e3ad50ff4f 100644
--- a/Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml
+++ b/Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml
@@ -27,7 +27,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32-array
items:
- minimum: 0
- maximum: 7
+ maximum: 31
description:
Offset in bit within the address range specified by reg.
- minimum: 1
diff --git a/Documentation/devicetree/bindings/nvmem/maxim,max77759-nvmem.yaml b/Documentation/devicetree/bindings/nvmem/maxim,max77759-nvmem.yaml
new file mode 100644
index 000000000000..1e3bd4433007
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/maxim,max77759-nvmem.yaml
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/maxim,max77759-nvmem.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Maxim Integrated MAX77759 Non Volatile Memory
+
+maintainers:
+ - André Draszik <andre.draszik@linaro.org>
+
+description: |
+ This module is part of the MAX77759 PMIC. For additional information, see
+ Documentation/devicetree/bindings/mfd/maxim,max77759.yaml.
+
+ The MAX77759 is a PMIC integrating, amongst others, Non Volatile Memory
+ (NVMEM) with 30 bytes of storage which can be used by software to store
+ information or communicate with a boot loader.
+
+properties:
+ compatible:
+ const: maxim,max77759-nvmem
+
+ wp-gpios: false
+
+required:
+ - compatible
+
+allOf:
+ - $ref: nvmem.yaml#
+
+unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
index 39c209249c9c..3f6dc6a3a9f1 100644
--- a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
+++ b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
@@ -19,6 +19,7 @@ properties:
- enum:
- qcom,apq8064-qfprom
- qcom,apq8084-qfprom
+ - qcom,ipq5018-qfprom
- qcom,ipq5332-qfprom
- qcom,ipq5424-qfprom
- qcom,ipq6018-qfprom
@@ -28,6 +29,8 @@ properties:
- qcom,msm8226-qfprom
- qcom,msm8916-qfprom
- qcom,msm8917-qfprom
+ - qcom,msm8937-qfprom
+ - qcom,msm8960-qfprom
- qcom,msm8974-qfprom
- qcom,msm8976-qfprom
- qcom,msm8996-qfprom
@@ -51,6 +54,7 @@ properties:
- qcom,sm8450-qfprom
- qcom,sm8550-qfprom
- qcom,sm8650-qfprom
+ - qcom,x1e80100-qfprom
- const: qcom,qfprom
reg:
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml b/Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml
index a44d44b32809..dc89020b0950 100644
--- a/Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml
@@ -14,6 +14,7 @@ properties:
enum:
- rockchip,px30-otp
- rockchip,rk3308-otp
+ - rockchip,rk3576-otp
- rockchip,rk3588-otp
reg:
@@ -62,6 +63,8 @@ allOf:
properties:
clocks:
maxItems: 3
+ clock-names:
+ maxItems: 3
resets:
maxItems: 1
reset-names:
@@ -73,11 +76,33 @@ allOf:
compatible:
contains:
enum:
+ - rockchip,rk3576-otp
+ then:
+ properties:
+ clocks:
+ maxItems: 3
+ clock-names:
+ maxItems: 3
+ resets:
+ minItems: 2
+ maxItems: 2
+ reset-names:
+ items:
+ - const: otp
+ - const: apb
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- rockchip,rk3588-otp
then:
properties:
clocks:
minItems: 4
+ clock-names:
+ minItems: 4
resets:
minItems: 3
reset-names:
diff --git a/Documentation/devicetree/bindings/opp/opp-v1.yaml b/Documentation/devicetree/bindings/opp/opp-v1.yaml
index 07e26c267815..61c080e50859 100644
--- a/Documentation/devicetree/bindings/opp/opp-v1.yaml
+++ b/Documentation/devicetree/bindings/opp/opp-v1.yaml
@@ -18,9 +18,21 @@ description: |+
This binding only supports voltage-frequency pairs.
-select: true
+deprecated: true
properties:
+ clock-latency:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The latency in nanoseconds for clock changes. Use OPP tables for new
+ designs instead.
+
+ voltage-tolerance:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 10
+ description:
+ The voltage tolerance in percent. Use OPP tables for new designs instead.
+
operating-points:
$ref: /schemas/types.yaml#/definitions/uint32-matrix
items:
@@ -28,8 +40,12 @@ properties:
- description: Frequency in kHz
- description: Voltage for OPP in uV
+dependencies:
+ clock-latency: [ operating-points ]
+ voltage-tolerance: [ operating-points ]
additionalProperties: true
+
examples:
- |
cpus {
diff --git a/Documentation/devicetree/bindings/opp/opp-v2-qcom-adreno.yaml b/Documentation/devicetree/bindings/opp/opp-v2-qcom-adreno.yaml
new file mode 100644
index 000000000000..a27ba7b663d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/opp/opp-v2-qcom-adreno.yaml
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/opp/opp-v2-qcom-adreno.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Adreno compatible OPP supply
+
+description:
+ Adreno GPUs present in Qualcomm's Snapdragon chipsets uses an OPP specific
+ ACD related information tailored for the specific chipset. This binding
+ provides the information needed to describe such a hardware value.
+
+maintainers:
+ - Rob Clark <robdclark@gmail.com>
+
+allOf:
+ - $ref: opp-v2-base.yaml#
+
+properties:
+ compatible:
+ contains:
+ const: operating-points-v2-adreno
+
+patternProperties:
+ '^opp-[0-9]+$':
+ type: object
+ additionalProperties: false
+
+ properties:
+ opp-hz: true
+
+ opp-level: true
+
+ opp-peak-kBps: true
+
+ opp-supported-hw: true
+
+ qcom,opp-acd-level:
+ description: |
+ A positive value representing the ACD (Adaptive Clock Distribution,
+ a fancy name for clk throttling during voltage droop) level associated
+ with this OPP node. This value is shared to a co-processor inside GPU
+ (called Graphics Management Unit a.k.a GMU) during wake up. It may not
+ be present for some OPPs and GMU will disable ACD while transitioning
+ to that OPP. This value encodes a voltage threshold, delay cycles &
+ calibration margins which are identified by characterization of the
+ SoC. So, it doesn't have any unit. This data is passed to GMU firmware
+ via 'HFI_H2F_MSG_ACD' packet.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ required:
+ - opp-hz
+ - opp-level
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/power/qcom-rpmpd.h>
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2-adreno", "operating-points-v2";
+
+ opp-687000000 {
+ opp-hz = /bits/ 64 <687000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ opp-peak-kBps = <8171875>;
+ qcom,opp-acd-level = <0x882e5ffd>;
+ };
+
+ opp-550000000 {
+ opp-hz = /bits/ 64 <550000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ opp-peak-kBps = <6074219>;
+ qcom,opp-acd-level = <0xc0285ffd>;
+ };
+
+ opp-390000000 {
+ opp-hz = /bits/ 64 <390000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ opp-peak-kBps = <3000000>;
+ qcom,opp-acd-level = <0xc0285ffd>;
+ };
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D1>;
+ opp-peak-kBps = <2136719>;
+ /* Intentionally left out qcom,opp-acd-level property here */
+ };
+
+ };
diff --git a/Documentation/devicetree/bindings/pci/apple,pcie.yaml b/Documentation/devicetree/bindings/pci/apple,pcie.yaml
index c8775f9cb071..c0852be04f6d 100644
--- a/Documentation/devicetree/bindings/pci/apple,pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/apple,pcie.yaml
@@ -17,6 +17,10 @@ description: |
implements its root ports. But the ATU found on most DesignWare
PCIe host bridges is absent.
+ On systems derived from T602x, the PHY registers are in a region
+ separate from the port registers. In that case, there is one PHY
+ register range per port register range.
+
All root ports share a single ECAM space, but separate GPIOs are
used to take the PCI devices on those ports out of reset. Therefore
the standard "reset-gpios" and "max-link-speed" properties appear on
@@ -30,16 +34,18 @@ description: |
properties:
compatible:
- items:
- - enum:
- - apple,t8103-pcie
- - apple,t8112-pcie
- - apple,t6000-pcie
- - const: apple,pcie
+ oneOf:
+ - items:
+ - enum:
+ - apple,t8103-pcie
+ - apple,t8112-pcie
+ - apple,t6000-pcie
+ - const: apple,pcie
+ - const: apple,t6020-pcie
reg:
minItems: 3
- maxItems: 6
+ maxItems: 10
reg-names:
minItems: 3
@@ -50,6 +56,10 @@ properties:
- const: port1
- const: port2
- const: port3
+ - const: phy0
+ - const: phy1
+ - const: phy2
+ - const: phy3
ranges:
minItems: 2
@@ -98,6 +108,15 @@ allOf:
maxItems: 5
interrupts:
maxItems: 3
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: apple,t6020-pcie
+ then:
+ properties:
+ reg-names:
+ minItems: 10
examples:
- |
diff --git a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
index 29f0e1eb5096..c4f9674e8695 100644
--- a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -186,49 +186,48 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
scb {
- #address-cells = <2>;
- #size-cells = <1>;
- pcie0: pcie@7d500000 {
- compatible = "brcm,bcm2711-pcie";
- reg = <0x0 0x7d500000 0x9310>;
- device_type = "pci";
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "pcie", "msi";
- interrupt-map-mask = <0x0 0x0 0x0 0x7>;
- interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH
- 0 0 0 2 &gicv2 GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH
- 0 0 0 3 &gicv2 GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH
- 0 0 0 4 &gicv2 GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
-
- msi-parent = <&pcie0>;
- msi-controller;
- ranges = <0x02000000 0x0 0xf8000000 0x6 0x00000000 0x0 0x04000000>;
- dma-ranges = <0x42000000 0x1 0x00000000 0x0 0x40000000 0x0 0x80000000>,
- <0x42000000 0x1 0x80000000 0x3 0x00000000 0x0 0x80000000>;
- brcm,enable-ssc;
- brcm,scb-sizes = <0x0000000080000000 0x0000000080000000>;
-
- /* PCIe bridge, Root Port */
- pci@0,0 {
- #address-cells = <3>;
- #size-cells = <2>;
- reg = <0x0 0x0 0x0 0x0 0x0>;
- compatible = "pciclass,0604";
- device_type = "pci";
- vpcie3v3-supply = <&vreg7>;
- ranges;
-
- /* PCIe endpoint */
- pci-ep@0,0 {
- assigned-addresses =
- <0x82010000 0x0 0xf8000000 0x6 0x00000000 0x0 0x2000>;
- reg = <0x0 0x0 0x0 0x0 0x0>;
- compatible = "pci14e4,1688";
- };
- };
+ #address-cells = <2>;
+ #size-cells = <1>;
+ pcie0: pcie@7d500000 {
+ compatible = "brcm,bcm2711-pcie";
+ reg = <0x0 0x7d500000 0x9310>;
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pcie", "msi";
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH
+ 0 0 0 2 &gicv2 GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH
+ 0 0 0 3 &gicv2 GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH
+ 0 0 0 4 &gicv2 GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+
+ msi-parent = <&pcie0>;
+ msi-controller;
+ ranges = <0x02000000 0x0 0xf8000000 0x6 0x00000000 0x0 0x04000000>;
+ dma-ranges = <0x42000000 0x1 0x00000000 0x0 0x40000000 0x0 0x80000000>,
+ <0x42000000 0x1 0x80000000 0x3 0x00000000 0x0 0x80000000>;
+ brcm,enable-ssc;
+ brcm,scb-sizes = <0x0000000080000000 0x0000000080000000>;
+
+ /* PCIe bridge, Root Port */
+ pci@0,0 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ compatible = "pciclass,0604";
+ device_type = "pci";
+ vpcie3v3-supply = <&vreg7>;
+ ranges;
+
+ /* PCIe endpoint */
+ pci-ep@0,0 {
+ assigned-addresses = <0x82010000 0x0 0xf8000000 0x6 0x00000000 0x0 0x2000>;
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ compatible = "pci14e4,1688";
+ };
};
+ };
};
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml
index 98651ab22103..8735293962ee 100644
--- a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml
@@ -37,14 +37,14 @@ examples:
#size-cells = <2>;
pcie-ep@fc000000 {
- compatible = "cdns,cdns-pcie-ep";
- reg = <0x0 0xfc000000 0x0 0x01000000>,
- <0x0 0x80000000 0x0 0x40000000>;
- reg-names = "reg", "mem";
- cdns,max-outbound-regions = <16>;
- max-functions = /bits/ 8 <8>;
- phys = <&pcie_phy0>;
- phy-names = "pcie-phy";
+ compatible = "cdns,cdns-pcie-ep";
+ reg = <0x0 0xfc000000 0x0 0x01000000>,
+ <0x0 0x80000000 0x0 0x40000000>;
+ reg-names = "reg", "mem";
+ cdns,max-outbound-regions = <16>;
+ max-functions = /bits/ 8 <8>;
+ phys = <&pcie_phy0>;
+ phy-names = "pcie-phy";
};
};
...
diff --git a/Documentation/devicetree/bindings/pci/intel,keembay-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/intel,keembay-pcie-ep.yaml
index 730e63fd7669..b19f61ae72fb 100644
--- a/Documentation/devicetree/bindings/pci/intel,keembay-pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/intel,keembay-pcie-ep.yaml
@@ -53,17 +53,17 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
pcie-ep@37000000 {
- compatible = "intel,keembay-pcie-ep";
- reg = <0x37000000 0x00001000>,
- <0x37100000 0x00001000>,
- <0x37300000 0x00001000>,
- <0x36000000 0x01000000>,
- <0x37800000 0x00000200>;
- reg-names = "dbi", "dbi2", "atu", "addr_space", "apb";
- interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 108 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "pcie", "pcie_ev", "pcie_err", "pcie_mem_access";
- num-lanes = <2>;
+ compatible = "intel,keembay-pcie-ep";
+ reg = <0x37000000 0x00001000>,
+ <0x37100000 0x00001000>,
+ <0x37300000 0x00001000>,
+ <0x36000000 0x01000000>,
+ <0x37800000 0x00000200>;
+ reg-names = "dbi", "dbi2", "atu", "addr_space", "apb";
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pcie", "pcie_ev", "pcie_err", "pcie_mem_access";
+ num-lanes = <2>;
};
diff --git a/Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml b/Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml
index 1fd557504b10..dd71e3d6bf94 100644
--- a/Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml
@@ -75,23 +75,23 @@ examples:
#define KEEM_BAY_A53_PCIE
#define KEEM_BAY_A53_AUX_PCIE
pcie@37000000 {
- compatible = "intel,keembay-pcie";
- reg = <0x37000000 0x00001000>,
- <0x37300000 0x00001000>,
- <0x36e00000 0x00200000>,
- <0x37800000 0x00000200>;
- reg-names = "dbi", "atu", "config", "apb";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
- ranges = <0x02000000 0 0x36000000 0x36000000 0 0x00e00000>;
- interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "pcie", "pcie_ev", "pcie_err";
- clocks = <&scmi_clk KEEM_BAY_A53_PCIE>,
- <&scmi_clk KEEM_BAY_A53_AUX_PCIE>;
- clock-names = "master", "aux";
- reset-gpios = <&pca2 9 GPIO_ACTIVE_LOW>;
- num-lanes = <2>;
+ compatible = "intel,keembay-pcie";
+ reg = <0x37000000 0x00001000>,
+ <0x37300000 0x00001000>,
+ <0x36e00000 0x00200000>,
+ <0x37800000 0x00000200>;
+ reg-names = "dbi", "atu", "config", "apb";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ ranges = <0x02000000 0 0x36000000 0x36000000 0 0x00e00000>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pcie", "pcie_ev", "pcie_err";
+ clocks = <&scmi_clk KEEM_BAY_A53_PCIE>,
+ <&scmi_clk KEEM_BAY_A53_AUX_PCIE>;
+ clock-names = "master", "aux";
+ reset-gpios = <&pca2 9 GPIO_ACTIVE_LOW>;
+ num-lanes = <2>;
};
diff --git a/Documentation/devicetree/bindings/pci/marvell,armada8k-pcie.yaml b/Documentation/devicetree/bindings/pci/marvell,armada8k-pcie.yaml
new file mode 100644
index 000000000000..f3ba9230ce2a
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/marvell,armada8k-pcie.yaml
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/marvell,armada8k-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Armada 7K/8K PCIe interface
+
+maintainers:
+ - Thomas Petazzoni <thomas.petazzoni@bootlin.com>
+
+description:
+ This PCIe host controller is based on the Synopsys DesignWare PCIe IP.
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - marvell,armada8k-pcie
+ required:
+ - compatible
+
+allOf:
+ - $ref: snps,dw-pcie.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - marvell,armada8k-pcie
+ - const: snps,dw-pcie
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: ctrl
+ - const: config
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: core
+ - const: reg
+
+ interrupts:
+ maxItems: 1
+
+ msi-parent:
+ maxItems: 1
+
+ phys:
+ minItems: 1
+ maxItems: 4
+
+ phy-names:
+ minItems: 1
+ maxItems: 4
+
+ marvell,reset-gpio:
+ maxItems: 1
+ deprecated: true
+
+required:
+ - interrupt-map
+ - clocks
+ - msi-parent
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ pcie@f2600000 {
+ compatible = "marvell,armada8k-pcie", "snps,dw-pcie";
+ reg = <0xf2600000 0x10000>, <0xf6f00000 0x80000>;
+ reg-names = "ctrl", "config";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ device_type = "pci";
+ dma-coherent;
+ msi-parent = <&gic_v2m0>;
+
+ ranges = <0x81000000 0 0xf9000000 0xf9000000 0 0x10000>, /* downstream I/O */
+ <0x82000000 0 0xf6000000 0xf6000000 0 0xf00000>; /* non-prefetchable memory */
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ num-lanes = <1>;
+ clocks = <&cpm_syscon0 1 13>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/pci/marvell,kirkwood-pcie.yaml b/Documentation/devicetree/bindings/pci/marvell,kirkwood-pcie.yaml
new file mode 100644
index 000000000000..7be695320ddf
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/marvell,kirkwood-pcie.yaml
@@ -0,0 +1,277 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/marvell,kirkwood-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell EBU PCIe interfaces
+
+maintainers:
+ - Thomas Petazzoni <thomas.petazzoni@bootlin.com>
+ - Pali Rohár <pali@kernel.org>
+
+allOf:
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
+
+properties:
+ compatible:
+ enum:
+ - marvell,armada-370-pcie
+ - marvell,armada-xp-pcie
+ - marvell,dove-pcie
+ - marvell,kirkwood-pcie
+
+ ranges:
+ description: >
+ The ranges describing the MMIO registers have the following layout:
+
+ 0x82000000 0 r MBUS_ID(0xf0, 0x01) r 0 s
+
+ where:
+
+ * r is a 32-bits value that gives the offset of the MMIO registers of
+ this PCIe interface, from the base of the internal registers.
+
+ * s is a 32-bits value that give the size of this MMIO registers area.
+ This range entry translates the '0x82000000 0 r' PCI address into the
+ 'MBUS_ID(0xf0, 0x01) r' CPU address, which is part of the internal
+ register window (as identified by MBUS_ID(0xf0, 0x01)).
+
+ The ranges describing the MBus windows have the following layout:
+
+ 0x8t000000 s 0 MBUS_ID(w, a) 0 1 0
+
+ where:
+
+ * t is the type of the MBus window (as defined by the standard PCI DT
+ bindings), 1 for I/O and 2 for memory.
+
+ * s is the PCI slot that corresponds to this PCIe interface
+
+ * w is the 'target ID' value for the MBus window
+
+ * a the 'attribute' value for the MBus window.
+
+ Since the location and size of the different MBus windows is not fixed in
+ hardware, and only determined in runtime, those ranges cover the full first
+ 4 GB of the physical address space, and do not translate into a valid CPU
+ address.
+
+ msi-parent:
+ maxItems: 1
+
+patternProperties:
+ '^pcie@':
+ type: object
+ allOf:
+ - $ref: /schemas/pci/pci-bus-common.yaml#
+ - $ref: /schemas/pci/pci-device.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: intx
+ - const: error
+
+ reset-delay-us:
+ default: 100000
+ description: todo
+
+ marvell,pcie-port:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 3
+ description: todo
+
+ marvell,pcie-lane:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 3
+ description: todo
+
+ interrupt-controller:
+ type: object
+ additionalProperties: false
+
+ properties:
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+ required:
+ - assigned-addresses
+ - clocks
+ - interrupt-map
+ - marvell,pcie-port
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pcie@f001000000000000 {
+ compatible = "marvell,armada-xp-pcie";
+ device_type = "pci";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ bus-range = <0x00 0xff>;
+ msi-parent = <&mpic>;
+
+ ranges =
+ <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000 /* Port 0.0 registers */
+ 0x82000000 0 0x42000 MBUS_ID(0xf0, 0x01) 0x42000 0 0x00002000 /* Port 2.0 registers */
+ 0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000 /* Port 0.1 registers */
+ 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */
+ 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */
+ 0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */
+ 0x82000000 0 0x82000 MBUS_ID(0xf0, 0x01) 0x82000 0 0x00002000 /* Port 3.0 registers */
+ 0x82000000 0 0x84000 MBUS_ID(0xf0, 0x01) 0x84000 0 0x00002000 /* Port 1.1 registers */
+ 0x82000000 0 0x88000 MBUS_ID(0xf0, 0x01) 0x88000 0 0x00002000 /* Port 1.2 registers */
+ 0x82000000 0 0x8c000 MBUS_ID(0xf0, 0x01) 0x8c000 0 0x00002000 /* Port 1.3 registers */
+ 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
+ 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */
+ 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
+ 0x81000000 0x2 0 MBUS_ID(0x04, 0xd0) 0 1 0 /* Port 0.1 IO */
+ 0x82000000 0x3 0 MBUS_ID(0x04, 0xb8) 0 1 0 /* Port 0.2 MEM */
+ 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */
+ 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
+ 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */
+
+ 0x82000000 0x5 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
+ 0x81000000 0x5 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */
+ 0x82000000 0x6 0 MBUS_ID(0x08, 0xd8) 0 1 0 /* Port 1.1 MEM */
+ 0x81000000 0x6 0 MBUS_ID(0x08, 0xd0) 0 1 0 /* Port 1.1 IO */
+ 0x82000000 0x7 0 MBUS_ID(0x08, 0xb8) 0 1 0 /* Port 1.2 MEM */
+ 0x81000000 0x7 0 MBUS_ID(0x08, 0xb0) 0 1 0 /* Port 1.2 IO */
+ 0x82000000 0x8 0 MBUS_ID(0x08, 0x78) 0 1 0 /* Port 1.3 MEM */
+ 0x81000000 0x8 0 MBUS_ID(0x08, 0x70) 0 1 0 /* Port 1.3 IO */
+
+ 0x82000000 0x9 0 MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */
+ 0x81000000 0x9 0 MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO */
+
+ 0x82000000 0xa 0 MBUS_ID(0x08, 0xf8) 0 1 0 /* Port 3.0 MEM */
+ 0x81000000 0xa 0 MBUS_ID(0x08, 0xf0) 0 1 0 /* Port 3.0 IO */>;
+
+ pcie@1,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+ reg = <0x0800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
+ 0x81000000 0 0 0x81000000 0x1 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &mpic 58>;
+ marvell,pcie-port = <0>;
+ marvell,pcie-lane = <0>;
+ num-lanes = <1>;
+ /* low-active PERST# reset on GPIO 25 */
+ reset-gpios = <&gpio0 25 1>;
+ /* wait 20ms for device settle after reset deassertion */
+ reset-delay-us = <20000>;
+ clocks = <&gateclk 5>;
+ };
+
+ pcie@2,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+ 0x81000000 0 0 0x81000000 0x2 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &mpic 59>;
+ marvell,pcie-port = <0>;
+ marvell,pcie-lane = <1>;
+ num-lanes = <1>;
+ clocks = <&gateclk 6>;
+ };
+
+ pcie@3,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
+ reg = <0x1800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0
+ 0x81000000 0 0 0x81000000 0x3 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &mpic 60>;
+ marvell,pcie-port = <0>;
+ marvell,pcie-lane = <2>;
+ num-lanes = <1>;
+ clocks = <&gateclk 7>;
+ };
+
+ pcie@4,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
+ reg = <0x2000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0 0 0x82000000 0x4 0 1 0
+ 0x81000000 0 0 0x81000000 0x4 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &mpic 61>;
+ marvell,pcie-port = <0>;
+ marvell,pcie-lane = <3>;
+ num-lanes = <1>;
+ clocks = <&gateclk 8>;
+ };
+
+ pcie@5,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
+ reg = <0x2800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
+ 0x81000000 0 0 0x81000000 0x5 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &mpic 62>;
+ marvell,pcie-port = <1>;
+ marvell,pcie-lane = <0>;
+ num-lanes = <1>;
+ clocks = <&gateclk 9>;
+ };
+
+ pcie@6,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82003000 0 0x84000 0 0x2000>;
+ reg = <0x3000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0 0 0x82000000 0x6 0 1 0
+ 0x81000000 0 0 0x81000000 0x6 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &mpic 63>;
+ marvell,pcie-port = <1>;
+ marvell,pcie-lane = <1>;
+ num-lanes = <1>;
+ clocks = <&gateclk 10>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml b/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
index 103574d18dbc..47b0bad690d5 100644
--- a/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
+++ b/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
@@ -50,7 +50,7 @@ properties:
items:
pattern: '^fic[0-3]$'
- dma-coherent: true
+ dma-noncoherent: true
ranges:
minItems: 1
@@ -65,33 +65,33 @@ unevaluatedProperties: false
examples:
- |
soc {
- #address-cells = <2>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ pcie0: pcie@2030000000 {
+ compatible = "microchip,pcie-host-1.0";
+ reg = <0x0 0x70000000 0x0 0x08000000>,
+ <0x0 0x43008000 0x0 0x00002000>,
+ <0x0 0x4300a000 0x0 0x00002000>;
+ reg-names = "cfg", "bridge", "ctrl";
+ device_type = "pci";
+ #address-cells = <3>;
#size-cells = <2>;
- pcie0: pcie@2030000000 {
- compatible = "microchip,pcie-host-1.0";
- reg = <0x0 0x70000000 0x0 0x08000000>,
- <0x0 0x43008000 0x0 0x00002000>,
- <0x0 0x4300a000 0x0 0x00002000>;
- reg-names = "cfg", "bridge", "ctrl";
- device_type = "pci";
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- interrupts = <119>;
- interrupt-map-mask = <0x0 0x0 0x0 0x7>;
- interrupt-map = <0 0 0 1 &pcie_intc0 0>,
- <0 0 0 2 &pcie_intc0 1>,
- <0 0 0 3 &pcie_intc0 2>,
- <0 0 0 4 &pcie_intc0 3>;
- interrupt-parent = <&plic0>;
- msi-parent = <&pcie0>;
- msi-controller;
- bus-range = <0x00 0x7f>;
- ranges = <0x03000000 0x0 0x78000000 0x0 0x78000000 0x0 0x04000000>;
- pcie_intc0: interrupt-controller {
- #address-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-controller;
- };
+ #interrupt-cells = <1>;
+ interrupts = <119>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+ <0 0 0 2 &pcie_intc0 1>,
+ <0 0 0 3 &pcie_intc0 2>,
+ <0 0 0 4 &pcie_intc0 3>;
+ interrupt-parent = <&plic0>;
+ msi-parent = <&pcie0>;
+ msi-controller;
+ bus-range = <0x00 0x7f>;
+ ranges = <0x03000000 0x0 0x78000000 0x0 0x78000000 0x0 0x04000000>;
+ pcie_intc0: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
};
+ };
};
diff --git a/Documentation/devicetree/bindings/pci/mvebu-pci.txt b/Documentation/devicetree/bindings/pci/mvebu-pci.txt
deleted file mode 100644
index 6d022a9d36ee..000000000000
--- a/Documentation/devicetree/bindings/pci/mvebu-pci.txt
+++ /dev/null
@@ -1,310 +0,0 @@
-* Marvell EBU PCIe interfaces
-
-Mandatory properties:
-
-- compatible: one of the following values:
- marvell,armada-370-pcie
- marvell,armada-xp-pcie
- marvell,dove-pcie
- marvell,kirkwood-pcie
-- #address-cells, set to <3>
-- #size-cells, set to <2>
-- #interrupt-cells, set to <1>
-- bus-range: PCI bus numbers covered
-- device_type, set to "pci"
-- ranges: ranges describing the MMIO registers to control the PCIe
- interfaces, and ranges describing the MBus windows needed to access
- the memory and I/O regions of each PCIe interface.
-- msi-parent: Link to the hardware entity that serves as the Message
- Signaled Interrupt controller for this PCI controller.
-
-The ranges describing the MMIO registers have the following layout:
-
- 0x82000000 0 r MBUS_ID(0xf0, 0x01) r 0 s
-
-where:
-
- * r is a 32-bits value that gives the offset of the MMIO
- registers of this PCIe interface, from the base of the internal
- registers.
-
- * s is a 32-bits value that give the size of this MMIO
- registers area. This range entry translates the '0x82000000 0 r' PCI
- address into the 'MBUS_ID(0xf0, 0x01) r' CPU address, which is part
- of the internal register window (as identified by MBUS_ID(0xf0,
- 0x01)).
-
-The ranges describing the MBus windows have the following layout:
-
- 0x8t000000 s 0 MBUS_ID(w, a) 0 1 0
-
-where:
-
- * t is the type of the MBus window (as defined by the standard PCI DT
- bindings), 1 for I/O and 2 for memory.
-
- * s is the PCI slot that corresponds to this PCIe interface
-
- * w is the 'target ID' value for the MBus window
-
- * a the 'attribute' value for the MBus window.
-
-Since the location and size of the different MBus windows is not fixed in
-hardware, and only determined in runtime, those ranges cover the full first
-4 GB of the physical address space, and do not translate into a valid CPU
-address.
-
-In addition, the device tree node must have sub-nodes describing each
-PCIe interface, having the following mandatory properties:
-
-- reg: used only for interrupt mapping, so only the first four bytes
- are used to refer to the correct bus number and device number.
-- assigned-addresses: reference to the MMIO registers used to control
- this PCIe interface.
-- clocks: the clock associated to this PCIe interface
-- marvell,pcie-port: the physical PCIe port number
-- status: either "disabled" or "okay"
-- device_type, set to "pci"
-- #address-cells, set to <3>
-- #size-cells, set to <2>
-- #interrupt-cells, set to <1>
-- ranges, translating the MBus windows ranges of the parent node into
- standard PCI addresses.
-- interrupt-map-mask and interrupt-map, standard PCI properties to
- define the mapping of the PCIe interface to interrupt numbers.
-
-and the following optional properties:
-- marvell,pcie-lane: the physical PCIe lane number, for ports having
- multiple lanes. If this property is not found, we assume that the
- value is 0.
-- num-lanes: number of SerDes PCIe lanes for this link (1 or 4)
-- reset-gpios: optional GPIO to PERST#
-- reset-delay-us: delay in us to wait after reset de-assertion, if not
- specified will default to 100ms, as required by the PCIe specification.
-- interrupt-names: list of interrupt names, supported are:
- - "intx" - interrupt line triggered by one of the legacy interrupt
-- interrupts or interrupts-extended: List of the interrupt sources which
- corresponding to the "interrupt-names". If non-empty then also additional
- 'interrupt-controller' subnode must be defined.
-
-Example:
-
-pcie-controller {
- compatible = "marvell,armada-xp-pcie";
- device_type = "pci";
-
- #address-cells = <3>;
- #size-cells = <2>;
-
- bus-range = <0x00 0xff>;
- msi-parent = <&mpic>;
-
- ranges =
- <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000 /* Port 0.0 registers */
- 0x82000000 0 0x42000 MBUS_ID(0xf0, 0x01) 0x42000 0 0x00002000 /* Port 2.0 registers */
- 0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000 /* Port 0.1 registers */
- 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */
- 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */
- 0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */
- 0x82000000 0 0x82000 MBUS_ID(0xf0, 0x01) 0x82000 0 0x00002000 /* Port 3.0 registers */
- 0x82000000 0 0x84000 MBUS_ID(0xf0, 0x01) 0x84000 0 0x00002000 /* Port 1.1 registers */
- 0x82000000 0 0x88000 MBUS_ID(0xf0, 0x01) 0x88000 0 0x00002000 /* Port 1.2 registers */
- 0x82000000 0 0x8c000 MBUS_ID(0xf0, 0x01) 0x8c000 0 0x00002000 /* Port 1.3 registers */
- 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
- 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */
- 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
- 0x81000000 0x2 0 MBUS_ID(0x04, 0xd0) 0 1 0 /* Port 0.1 IO */
- 0x82000000 0x3 0 MBUS_ID(0x04, 0xb8) 0 1 0 /* Port 0.2 MEM */
- 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */
- 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
- 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */
-
- 0x82000000 0x5 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
- 0x81000000 0x5 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */
- 0x82000000 0x6 0 MBUS_ID(0x08, 0xd8) 0 1 0 /* Port 1.1 MEM */
- 0x81000000 0x6 0 MBUS_ID(0x08, 0xd0) 0 1 0 /* Port 1.1 IO */
- 0x82000000 0x7 0 MBUS_ID(0x08, 0xb8) 0 1 0 /* Port 1.2 MEM */
- 0x81000000 0x7 0 MBUS_ID(0x08, 0xb0) 0 1 0 /* Port 1.2 IO */
- 0x82000000 0x8 0 MBUS_ID(0x08, 0x78) 0 1 0 /* Port 1.3 MEM */
- 0x81000000 0x8 0 MBUS_ID(0x08, 0x70) 0 1 0 /* Port 1.3 IO */
-
- 0x82000000 0x9 0 MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */
- 0x81000000 0x9 0 MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO */
-
- 0x82000000 0xa 0 MBUS_ID(0x08, 0xf8) 0 1 0 /* Port 3.0 MEM */
- 0x81000000 0xa 0 MBUS_ID(0x08, 0xf0) 0 1 0 /* Port 3.0 IO */>;
-
- pcie@1,0 {
- device_type = "pci";
- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
- reg = <0x0800 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
- 0x81000000 0 0 0x81000000 0x1 0 1 0>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 58>;
- marvell,pcie-port = <0>;
- marvell,pcie-lane = <0>;
- num-lanes = <1>;
- /* low-active PERST# reset on GPIO 25 */
- reset-gpios = <&gpio0 25 1>;
- /* wait 20ms for device settle after reset deassertion */
- reset-delay-us = <20000>;
- clocks = <&gateclk 5>;
- };
-
- pcie@2,0 {
- device_type = "pci";
- assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
- reg = <0x1000 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
- 0x81000000 0 0 0x81000000 0x2 0 1 0>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 59>;
- marvell,pcie-port = <0>;
- marvell,pcie-lane = <1>;
- num-lanes = <1>;
- clocks = <&gateclk 6>;
- };
-
- pcie@3,0 {
- device_type = "pci";
- assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
- reg = <0x1800 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0
- 0x81000000 0 0 0x81000000 0x3 0 1 0>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 60>;
- marvell,pcie-port = <0>;
- marvell,pcie-lane = <2>;
- num-lanes = <1>;
- clocks = <&gateclk 7>;
- };
-
- pcie@4,0 {
- device_type = "pci";
- assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
- reg = <0x2000 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0x4 0 1 0
- 0x81000000 0 0 0x81000000 0x4 0 1 0>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 61>;
- marvell,pcie-port = <0>;
- marvell,pcie-lane = <3>;
- num-lanes = <1>;
- clocks = <&gateclk 8>;
- };
-
- pcie@5,0 {
- device_type = "pci";
- assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
- reg = <0x2800 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
- 0x81000000 0 0 0x81000000 0x5 0 1 0>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 62>;
- marvell,pcie-port = <1>;
- marvell,pcie-lane = <0>;
- num-lanes = <1>;
- clocks = <&gateclk 9>;
- };
-
- pcie@6,0 {
- device_type = "pci";
- assigned-addresses = <0x82003000 0 0x84000 0 0x2000>;
- reg = <0x3000 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0x6 0 1 0
- 0x81000000 0 0 0x81000000 0x6 0 1 0>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 63>;
- marvell,pcie-port = <1>;
- marvell,pcie-lane = <1>;
- num-lanes = <1>;
- clocks = <&gateclk 10>;
- };
-
- pcie@7,0 {
- device_type = "pci";
- assigned-addresses = <0x82003800 0 0x88000 0 0x2000>;
- reg = <0x3800 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0x7 0 1 0
- 0x81000000 0 0 0x81000000 0x7 0 1 0>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 64>;
- marvell,pcie-port = <1>;
- marvell,pcie-lane = <2>;
- num-lanes = <1>;
- clocks = <&gateclk 11>;
- };
-
- pcie@8,0 {
- device_type = "pci";
- assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>;
- reg = <0x4000 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0x8 0 1 0
- 0x81000000 0 0 0x81000000 0x8 0 1 0>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 65>;
- marvell,pcie-port = <1>;
- marvell,pcie-lane = <3>;
- num-lanes = <1>;
- clocks = <&gateclk 12>;
- };
-
- pcie@9,0 {
- device_type = "pci";
- assigned-addresses = <0x82004800 0 0x42000 0 0x2000>;
- reg = <0x4800 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0
- 0x81000000 0 0 0x81000000 0x9 0 1 0>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 99>;
- marvell,pcie-port = <2>;
- marvell,pcie-lane = <0>;
- num-lanes = <1>;
- clocks = <&gateclk 26>;
- };
-
- pcie@a,0 {
- device_type = "pci";
- assigned-addresses = <0x82005000 0 0x82000 0 0x2000>;
- reg = <0x5000 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0xa 0 1 0
- 0x81000000 0 0 0x81000000 0xa 0 1 0>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 103>;
- marvell,pcie-port = <3>;
- marvell,pcie-lane = <0>;
- num-lanes = <1>;
- clocks = <&gateclk 27>;
- };
-};
diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie-ep.yaml
index a24fb8307d29..6d6052a2748f 100644
--- a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie-ep.yaml
@@ -74,7 +74,7 @@ properties:
reset-gpios:
description: Must contain a phandle to a GPIO controller followed by GPIO
- that is being used as PERST input signal. Please refer to pci.txt.
+ that is being used as PERST input signal.
phys:
minItems: 1
diff --git a/Documentation/devicetree/bindings/pci/pci-armada8k.txt b/Documentation/devicetree/bindings/pci/pci-armada8k.txt
deleted file mode 100644
index ff25a134befa..000000000000
--- a/Documentation/devicetree/bindings/pci/pci-armada8k.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-* Marvell Armada 7K/8K PCIe interface
-
-This PCIe host controller is based on the Synopsys DesignWare PCIe IP
-and thus inherits all the common properties defined in snps,dw-pcie.yaml.
-
-Required properties:
-- compatible: "marvell,armada8k-pcie"
-- reg: must contain two register regions
- - the control register region
- - the config space region
-- reg-names:
- - "ctrl" for the control register region
- - "config" for the config space region
-- interrupts: Interrupt specifier for the PCIe controller
-- clocks: reference to the PCIe controller clocks
-- clock-names: mandatory if there is a second clock, in this case the
- name must be "core" for the first clock and "reg" for the second
- one
-
-Optional properties:
-- phys: phandle(s) to PHY node(s) following the generic PHY bindings.
- Either 1, 2 or 4 PHYs might be needed depending on the number of
- PCIe lanes.
-- phy-names: names of the PHYs corresponding to the number of lanes.
- Must be "cp0-pcie0-x4-lane0-phy", "cp0-pcie0-x4-lane1-phy" for
- 2 PHYs.
-
-Example:
-
- pcie@f2600000 {
- compatible = "marvell,armada8k-pcie", "snps,dw-pcie";
- reg = <0 0xf2600000 0 0x10000>, <0 0xf6f00000 0 0x80000>;
- reg-names = "ctrl", "config";
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- device_type = "pci";
- dma-coherent;
-
- bus-range = <0 0xff>;
- ranges = <0x81000000 0 0xf9000000 0 0xf9000000 0 0x10000 /* downstream I/O */
- 0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>; /* non-prefetchable memory */
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
- num-lanes = <1>;
- clocks = <&cpm_syscon0 1 13>;
- };
diff --git a/Documentation/devicetree/bindings/pci/pci-ep.yaml b/Documentation/devicetree/bindings/pci/pci-ep.yaml
index f75000e3093d..214caa4ec3d5 100644
--- a/Documentation/devicetree/bindings/pci/pci-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/pci-ep.yaml
@@ -17,6 +17,24 @@ properties:
$nodename:
pattern: "^pcie-ep@"
+ iommu-map:
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: Device ID (see msi-map) base
+ maximum: 0x7ffff
+ - description: phandle to IOMMU
+ - description: IOMMU specifier base (currently always 1 cell)
+ - description: Number of Device IDs
+ maximum: 0x80000
+
+ iommu-map-mask:
+ description:
+ A mask to be applied to each Device ID prior to being mapped to an
+ IOMMU specifier per the iommu-map property.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 0x7ffff
+
max-functions:
description: Maximum number of functions that can be configured
$ref: /schemas/types.yaml#/definitions/uint8
@@ -35,6 +53,56 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [ 1, 2, 3, 4 ]
+ msi-map:
+ description: |
+ Maps a Device ID to an MSI and associated MSI specifier data.
+
+ A PCI Endpoint (EP) can use MSI as a doorbell function. This is achieved by
+ mapping the MSI controller's address into PCI BAR<n>. The PCI Root Complex
+ can write to this BAR<n>, triggering the EP to generate IRQ. This notifies
+ the EP-side driver of an event, eliminating the need for the driver to
+ continuously poll for status changes.
+
+ However, the EP cannot rely on Requester ID (RID) because the RID is
+ determined by the PCI topology of the host system. Since the EP may be
+ connected to different PCI hosts, the RID can vary between systems and is
+ therefore not a reliable identifier.
+
+ Each EP can support up to 8 physical functions and up to 65,536 virtual
+ functions. To uniquely identify each child device, a device ID is defined
+ as
+ - Bits [2:0] for the function number (func)
+ - Bits [18:3] for the virtual function index (vfunc)
+
+ The resulting device ID is computed as:
+
+ (func & 0x7) | (vfunc << 3)
+
+ The property is an arbitrary number of tuples of
+ (device-id-base, msi, msi-base,length).
+
+ Any Device ID id in the interval [id-base, id-base + length) is
+ associated with the listed MSI, with the MSI specifier
+ (id - id-base + msi-base).
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: The Device ID base matched by the entry
+ maximum: 0x7ffff
+ - description: phandle to msi-controller node
+ - description: (optional) The msi-specifier produced for the first
+ Device ID matched by the entry. Currently, msi-specifier is 0 or
+ 1 cells.
+ - description: The length of consecutive Device IDs following the
+ Device ID base
+ maximum: 0x80000
+
+ msi-map-mask:
+ description: A mask to be applied to each Device ID prior to being
+ mapped to an msi-specifier per the msi-map property.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 0x7ffff
+
num-lanes:
description: maximum number of lanes
$ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/pci/pci-iommu.txt b/Documentation/devicetree/bindings/pci/pci-iommu.txt
deleted file mode 100644
index 0def586fdcdf..000000000000
--- a/Documentation/devicetree/bindings/pci/pci-iommu.txt
+++ /dev/null
@@ -1,171 +0,0 @@
-This document describes the generic device tree binding for describing the
-relationship between PCI(e) devices and IOMMU(s).
-
-Each PCI(e) device under a root complex is uniquely identified by its Requester
-ID (AKA RID). A Requester ID is a triplet of a Bus number, Device number, and
-Function number.
-
-For the purpose of this document, when treated as a numeric value, a RID is
-formatted such that:
-
-* Bits [15:8] are the Bus number.
-* Bits [7:3] are the Device number.
-* Bits [2:0] are the Function number.
-* Any other bits required for padding must be zero.
-
-IOMMUs may distinguish PCI devices through sideband data derived from the
-Requester ID. While a given PCI device can only master through one IOMMU, a
-root complex may split masters across a set of IOMMUs (e.g. with one IOMMU per
-bus).
-
-The generic 'iommus' property is insufficient to describe this relationship,
-and a mechanism is required to map from a PCI device to its IOMMU and sideband
-data.
-
-For generic IOMMU bindings, see
-Documentation/devicetree/bindings/iommu/iommu.txt.
-
-
-PCI root complex
-================
-
-Optional properties
--------------------
-
-- iommu-map: Maps a Requester ID to an IOMMU and associated IOMMU specifier
- data.
-
- The property is an arbitrary number of tuples of
- (rid-base,iommu,iommu-base,length).
-
- Any RID r in the interval [rid-base, rid-base + length) is associated with
- the listed IOMMU, with the IOMMU specifier (r - rid-base + iommu-base).
-
-- iommu-map-mask: A mask to be applied to each Requester ID prior to being
- mapped to an IOMMU specifier per the iommu-map property.
-
-
-Example (1)
-===========
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- iommu: iommu@a {
- reg = <0xa 0x1>;
- compatible = "vendor,some-iommu";
- #iommu-cells = <1>;
- };
-
- pci: pci@f {
- reg = <0xf 0x1>;
- compatible = "vendor,pcie-root-complex";
- device_type = "pci";
-
- /*
- * The sideband data provided to the IOMMU is the RID,
- * identity-mapped.
- */
- iommu-map = <0x0 &iommu 0x0 0x10000>;
- };
-};
-
-
-Example (2)
-===========
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- iommu: iommu@a {
- reg = <0xa 0x1>;
- compatible = "vendor,some-iommu";
- #iommu-cells = <1>;
- };
-
- pci: pci@f {
- reg = <0xf 0x1>;
- compatible = "vendor,pcie-root-complex";
- device_type = "pci";
-
- /*
- * The sideband data provided to the IOMMU is the RID with the
- * function bits masked out.
- */
- iommu-map = <0x0 &iommu 0x0 0x10000>;
- iommu-map-mask = <0xfff8>;
- };
-};
-
-
-Example (3)
-===========
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- iommu: iommu@a {
- reg = <0xa 0x1>;
- compatible = "vendor,some-iommu";
- #iommu-cells = <1>;
- };
-
- pci: pci@f {
- reg = <0xf 0x1>;
- compatible = "vendor,pcie-root-complex";
- device_type = "pci";
-
- /*
- * The sideband data provided to the IOMMU is the RID,
- * but the high bits of the bus number are flipped.
- */
- iommu-map = <0x0000 &iommu 0x8000 0x8000>,
- <0x8000 &iommu 0x0000 0x8000>;
- };
-};
-
-
-Example (4)
-===========
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- iommu_a: iommu@a {
- reg = <0xa 0x1>;
- compatible = "vendor,some-iommu";
- #iommu-cells = <1>;
- };
-
- iommu_b: iommu@b {
- reg = <0xb 0x1>;
- compatible = "vendor,some-iommu";
- #iommu-cells = <1>;
- };
-
- iommu_c: iommu@c {
- reg = <0xc 0x1>;
- compatible = "vendor,some-iommu";
- #iommu-cells = <1>;
- };
-
- pci: pci@f {
- reg = <0xf 0x1>;
- compatible = "vendor,pcie-root-complex";
- device_type = "pci";
-
- /*
- * Devices with bus number 0-127 are mastered via IOMMU
- * a, with sideband data being RID[14:0].
- * Devices with bus number 128-255 are mastered via
- * IOMMU b, with sideband data being RID[14:0].
- * No devices master via IOMMU c.
- */
- iommu-map = <0x0000 &iommu_a 0x0000 0x8000>,
- <0x8000 &iommu_b 0x0000 0x8000>;
- };
-};
diff --git a/Documentation/devicetree/bindings/pci/pci-msi.txt b/Documentation/devicetree/bindings/pci/pci-msi.txt
deleted file mode 100644
index b73d839657b6..000000000000
--- a/Documentation/devicetree/bindings/pci/pci-msi.txt
+++ /dev/null
@@ -1,220 +0,0 @@
-This document describes the generic device tree binding for describing the
-relationship between PCI devices and MSI controllers.
-
-Each PCI device under a root complex is uniquely identified by its Requester ID
-(AKA RID). A Requester ID is a triplet of a Bus number, Device number, and
-Function number.
-
-For the purpose of this document, when treated as a numeric value, a RID is
-formatted such that:
-
-* Bits [15:8] are the Bus number.
-* Bits [7:3] are the Device number.
-* Bits [2:0] are the Function number.
-* Any other bits required for padding must be zero.
-
-MSIs may be distinguished in part through the use of sideband data accompanying
-writes. In the case of PCI devices, this sideband data may be derived from the
-Requester ID. A mechanism is required to associate a device with both the MSI
-controllers it can address, and the sideband data that will be associated with
-its writes to those controllers.
-
-For generic MSI bindings, see
-Documentation/devicetree/bindings/interrupt-controller/msi.txt.
-
-
-PCI root complex
-================
-
-Optional properties
--------------------
-
-- msi-map: Maps a Requester ID to an MSI controller and associated
- msi-specifier data. The property is an arbitrary number of tuples of
- (rid-base,msi-controller,msi-base,length), where:
-
- * rid-base is a single cell describing the first RID matched by the entry.
-
- * msi-controller is a single phandle to an MSI controller
-
- * msi-base is an msi-specifier describing the msi-specifier produced for the
- first RID matched by the entry.
-
- * length is a single cell describing how many consecutive RIDs are matched
- following the rid-base.
-
- Any RID r in the interval [rid-base, rid-base + length) is associated with
- the listed msi-controller, with the msi-specifier (r - rid-base + msi-base).
-
-- msi-map-mask: A mask to be applied to each Requester ID prior to being mapped
- to an msi-specifier per the msi-map property.
-
-- msi-parent: Describes the MSI parent of the root complex itself. Where
- the root complex and MSI controller do not pass sideband data with MSI
- writes, this property may be used to describe the MSI controller(s)
- used by PCI devices under the root complex, if defined as such in the
- binding for the root complex.
-
-
-Example (1)
-===========
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- msi: msi-controller@a {
- reg = <0xa 0x1>;
- compatible = "vendor,some-controller";
- msi-controller;
- #msi-cells = <1>;
- };
-
- pci: pci@f {
- reg = <0xf 0x1>;
- compatible = "vendor,pcie-root-complex";
- device_type = "pci";
-
- /*
- * The sideband data provided to the MSI controller is
- * the RID, identity-mapped.
- */
- msi-map = <0x0 &msi_a 0x0 0x10000>,
- };
-};
-
-
-Example (2)
-===========
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- msi: msi-controller@a {
- reg = <0xa 0x1>;
- compatible = "vendor,some-controller";
- msi-controller;
- #msi-cells = <1>;
- };
-
- pci: pci@f {
- reg = <0xf 0x1>;
- compatible = "vendor,pcie-root-complex";
- device_type = "pci";
-
- /*
- * The sideband data provided to the MSI controller is
- * the RID, masked to only the device and function bits.
- */
- msi-map = <0x0 &msi_a 0x0 0x100>,
- msi-map-mask = <0xff>
- };
-};
-
-
-Example (3)
-===========
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- msi: msi-controller@a {
- reg = <0xa 0x1>;
- compatible = "vendor,some-controller";
- msi-controller;
- #msi-cells = <1>;
- };
-
- pci: pci@f {
- reg = <0xf 0x1>;
- compatible = "vendor,pcie-root-complex";
- device_type = "pci";
-
- /*
- * The sideband data provided to the MSI controller is
- * the RID, but the high bit of the bus number is
- * ignored.
- */
- msi-map = <0x0000 &msi 0x0000 0x8000>,
- <0x8000 &msi 0x0000 0x8000>;
- };
-};
-
-
-Example (4)
-===========
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- msi: msi-controller@a {
- reg = <0xa 0x1>;
- compatible = "vendor,some-controller";
- msi-controller;
- #msi-cells = <1>;
- };
-
- pci: pci@f {
- reg = <0xf 0x1>;
- compatible = "vendor,pcie-root-complex";
- device_type = "pci";
-
- /*
- * The sideband data provided to the MSI controller is
- * the RID, but the high bit of the bus number is
- * negated.
- */
- msi-map = <0x0000 &msi 0x8000 0x8000>,
- <0x8000 &msi 0x0000 0x8000>;
- };
-};
-
-
-Example (5)
-===========
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- msi_a: msi-controller@a {
- reg = <0xa 0x1>;
- compatible = "vendor,some-controller";
- msi-controller;
- #msi-cells = <1>;
- };
-
- msi_b: msi-controller@b {
- reg = <0xb 0x1>;
- compatible = "vendor,some-controller";
- msi-controller;
- #msi-cells = <1>;
- };
-
- msi_c: msi-controller@c {
- reg = <0xc 0x1>;
- compatible = "vendor,some-controller";
- msi-controller;
- #msi-cells = <1>;
- };
-
- pci: pci@f {
- reg = <0xf 0x1>;
- compatible = "vendor,pcie-root-complex";
- device_type = "pci";
-
- /*
- * The sideband data provided to MSI controller a is the
- * RID, but the high bit of the bus number is negated.
- * The sideband data provided to MSI controller b is the
- * RID, identity-mapped.
- * MSI controller c is not addressable.
- */
- msi-map = <0x0000 &msi_a 0x8000 0x08000>,
- <0x8000 &msi_a 0x0000 0x08000>,
- <0x0000 &msi_b 0x0000 0x10000>;
- };
-};
diff --git a/Documentation/devicetree/bindings/pci/pci.txt b/Documentation/devicetree/bindings/pci/pci.txt
deleted file mode 100644
index 6a8f2874a24d..000000000000
--- a/Documentation/devicetree/bindings/pci/pci.txt
+++ /dev/null
@@ -1,84 +0,0 @@
-PCI bus bridges have standardized Device Tree bindings:
-
-PCI Bus Binding to: IEEE Std 1275-1994
-https://www.devicetree.org/open-firmware/bindings/pci/pci2_1.pdf
-
-And for the interrupt mapping part:
-
-Open Firmware Recommended Practice: Interrupt Mapping
-https://www.devicetree.org/open-firmware/practice/imap/imap0_9d.pdf
-
-Additionally to the properties specified in the above standards a host bridge
-driver implementation may support the following properties:
-
-- linux,pci-domain:
- If present this property assigns a fixed PCI domain number to a host bridge,
- otherwise an unstable (across boots) unique number will be assigned.
- It is required to either not set this property at all or set it for all
- host bridges in the system, otherwise potentially conflicting domain numbers
- may be assigned to root buses behind different host bridges. The domain
- number for each host bridge in the system must be unique.
-- max-link-speed:
- If present this property specifies PCI gen for link capability. Host
- drivers could add this as a strategy to avoid unnecessary operation for
- unsupported link speed, for instance, trying to do training for
- unsupported link speed, etc. Must be '4' for gen4, '3' for gen3, '2'
- for gen2, and '1' for gen1. Any other values are invalid.
-- reset-gpios:
- If present this property specifies PERST# GPIO. Host drivers can parse the
- GPIO and apply fundamental reset to endpoints.
-- supports-clkreq:
- If present this property specifies that CLKREQ signal routing exists from
- root port to downstream device and host bridge drivers can do programming
- which depends on CLKREQ signal existence. For example, programming root port
- not to advertise ASPM L1 Sub-States support if there is no CLKREQ signal.
-
-PCI-PCI Bridge properties
--------------------------
-
-PCIe root ports and switch ports may be described explicitly in the device
-tree, as children of the host bridge node. Even though those devices are
-discoverable by probing, it might be necessary to describe properties that
-aren't provided by standard PCIe capabilities.
-
-Required properties:
-
-- reg:
- Identifies the PCI-PCI bridge. As defined in the IEEE Std 1275-1994
- document, it is a five-cell address encoded as (phys.hi phys.mid
- phys.lo size.hi size.lo). phys.hi should contain the device's BDF as
- 0b00000000 bbbbbbbb dddddfff 00000000. The other cells should be zero.
-
- The bus number is defined by firmware, through the standard bridge
- configuration mechanism. If this port is a switch port, then firmware
- allocates the bus number and writes it into the Secondary Bus Number
- register of the bridge directly above this port. Otherwise, the bus
- number of a root port is the first number in the bus-range property,
- defaulting to zero.
-
- If firmware leaves the ARI Forwarding Enable bit set in the bridge
- above this port, then phys.hi contains the 8-bit function number as
- 0b00000000 bbbbbbbb ffffffff 00000000. Note that the PCIe specification
- recommends that firmware only leaves ARI enabled when it knows that the
- OS is ARI-aware.
-
-Optional properties:
-
-- external-facing:
- When present, the port is external-facing. All bridges and endpoints
- downstream of this port are external to the machine. The OS can, for
- example, use this information to identify devices that cannot be
- trusted with relaxed DMA protection, as users could easily attach
- malicious devices to this port.
-
-Example:
-
-pcie@10000000 {
- compatible = "pci-host-ecam-generic";
- ...
- pcie@0008 {
- /* Root port 00:01.0 is external-facing */
- reg = <0x00000800 0 0 0 0>;
- external-facing;
- };
-};
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sa8775p.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sa8775p.yaml
index efde49d1bef8..e3fa232da2ca 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sa8775p.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sa8775p.yaml
@@ -45,9 +45,10 @@ properties:
interrupts:
minItems: 8
- maxItems: 8
+ maxItems: 9
interrupt-names:
+ minItems: 8
items:
- const: msi0
- const: msi1
@@ -57,6 +58,7 @@ properties:
- const: msi5
- const: msi6
- const: msi7
+ - const: global
resets:
maxItems: 1
@@ -129,7 +131,8 @@ examples:
<GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0",
"msi1",
"msi2",
@@ -137,7 +140,8 @@ examples:
"msi4",
"msi5",
"msi6",
- "msi7";
+ "msi7",
+ "global";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc GIC_SPI 434 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml
index 76cb9fbfd476..ff508f592a1a 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml
@@ -54,9 +54,10 @@ properties:
interrupts:
minItems: 8
- maxItems: 8
+ maxItems: 9
interrupt-names:
+ minItems: 8
items:
- const: msi0
- const: msi1
@@ -66,6 +67,7 @@ properties:
- const: msi5
- const: msi6
- const: msi7
+ - const: global
resets:
maxItems: 1
@@ -149,9 +151,10 @@ examples:
<GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0", "msi1", "msi2", "msi3",
- "msi4", "msi5", "msi6", "msi7";
+ "msi4", "msi5", "msi6", "msi7", "global";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sc8180x.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sc8180x.yaml
index baf1813ec0ac..331fc25d7a17 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sc8180x.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sc8180x.yaml
@@ -49,9 +49,10 @@ properties:
interrupts:
minItems: 8
- maxItems: 8
+ maxItems: 9
interrupt-names:
+ minItems: 8
items:
- const: msi0
- const: msi1
@@ -61,6 +62,7 @@ properties:
- const: msi5
- const: msi6
- const: msi7
+ - const: global
resets:
maxItems: 1
@@ -136,7 +138,8 @@ examples:
<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0",
"msi1",
"msi2",
@@ -144,7 +147,8 @@ examples:
"msi4",
"msi5",
"msi6",
- "msi7";
+ "msi7",
+ "global";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc 0 149 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml
index 9d569644fda9..a604f2a79de3 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml
@@ -49,9 +49,10 @@ properties:
interrupts:
minItems: 8
- maxItems: 8
+ maxItems: 9
interrupt-names:
+ minItems: 8
items:
- const: msi0
- const: msi1
@@ -61,6 +62,7 @@ properties:
- const: msi5
- const: msi6
- const: msi7
+ - const: global
resets:
maxItems: 1
@@ -128,9 +130,10 @@ examples:
<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0", "msi1", "msi2", "msi3",
- "msi4", "msi5", "msi6", "msi7";
+ "msi4", "msi5", "msi6", "msi7", "global";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc 0 149 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8250.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8250.yaml
index 4d060bce6f9d..af4dae68d508 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8250.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8250.yaml
@@ -61,9 +61,10 @@ properties:
interrupts:
minItems: 8
- maxItems: 8
+ maxItems: 9
interrupt-names:
+ minItems: 8
items:
- const: msi0
- const: msi1
@@ -73,6 +74,7 @@ properties:
- const: msi5
- const: msi6
- const: msi7
+ - const: global
resets:
maxItems: 1
@@ -143,9 +145,10 @@ examples:
<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0", "msi1", "msi2", "msi3",
- "msi4", "msi5", "msi6", "msi7";
+ "msi4", "msi5", "msi6", "msi7", "global";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc 0 149 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml
index 2a4cc41fc710..dde3079adbb3 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml
@@ -51,9 +51,10 @@ properties:
interrupts:
minItems: 8
- maxItems: 8
+ maxItems: 9
interrupt-names:
+ minItems: 8
items:
- const: msi0
- const: msi1
@@ -63,6 +64,7 @@ properties:
- const: msi5
- const: msi6
- const: msi7
+ - const: global
resets:
maxItems: 1
@@ -132,9 +134,10 @@ examples:
<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0", "msi1", "msi2", "msi3",
- "msi4", "msi5", "msi6", "msi7";
+ "msi4", "msi5", "msi6", "msi7", "global";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc 0 149 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
index 8f628939209e..0e1808105a81 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
@@ -21,6 +21,7 @@ properties:
- qcom,pcie-apq8064
- qcom,pcie-apq8084
- qcom,pcie-ipq4019
+ - qcom,pcie-ipq5018
- qcom,pcie-ipq6018
- qcom,pcie-ipq8064
- qcom,pcie-ipq8064-v2
@@ -168,6 +169,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,pcie-ipq5018
- qcom,pcie-ipq6018
- qcom,pcie-ipq8074-gen3
- qcom,pcie-ipq9574
@@ -175,14 +177,16 @@ allOf:
properties:
reg:
minItems: 5
- maxItems: 5
+ maxItems: 6
reg-names:
+ minItems: 5
items:
- const: dbi # DesignWare PCIe registers
- const: elbi # External local bus interface registers
- const: atu # ATU address space
- const: parf # Qualcomm specific registers
- const: config # PCIe configuration space
+ - const: mhi # MHI registers
- if:
properties:
@@ -327,6 +331,53 @@ allOf:
compatible:
contains:
enum:
+ - qcom,pcie-ipq5018
+ then:
+ properties:
+ clocks:
+ minItems: 6
+ maxItems: 6
+ clock-names:
+ items:
+ - const: iface # PCIe to SysNOC BIU clock
+ - const: axi_m # AXI Master clock
+ - const: axi_s # AXI Slave clock
+ - const: ahb # AHB clock
+ - const: aux # Auxiliary clock
+ - const: axi_bridge # AXI bridge clock
+ resets:
+ minItems: 8
+ maxItems: 8
+ reset-names:
+ items:
+ - const: pipe # PIPE reset
+ - const: sleep # Sleep reset
+ - const: sticky # Core sticky reset
+ - const: axi_m # AXI master reset
+ - const: axi_s # AXI slave reset
+ - const: ahb # AHB reset
+ - const: axi_m_sticky # AXI master sticky reset
+ - const: axi_s_sticky # AXI slave sticky reset
+ interrupts:
+ minItems: 9
+ maxItems: 9
+ interrupt-names:
+ items:
+ - const: msi0
+ - const: msi1
+ - const: msi2
+ - const: msi3
+ - const: msi4
+ - const: msi5
+ - const: msi6
+ - const: msi7
+ - const: global
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,pcie-msm8996
then:
properties:
@@ -562,6 +613,7 @@ allOf:
enum:
- qcom,pcie-apq8064
- qcom,pcie-ipq4019
+ - qcom,pcie-ipq5018
- qcom,pcie-ipq8064
- qcom,pcie-ipq8064v2
- qcom,pcie-ipq8074
@@ -589,7 +641,11 @@ allOf:
compatible:
contains:
enum:
+ - qcom,pcie-ipq6018
+ - qcom,pcie-ipq8074
+ - qcom,pcie-ipq8074-gen3
- qcom,pcie-msm8996
+ - qcom,pcie-msm8998
- qcom,pcie-sdm845
then:
oneOf:
@@ -602,8 +658,9 @@ allOf:
- properties:
interrupts:
minItems: 8
- maxItems: 8
+ maxItems: 9
interrupt-names:
+ minItems: 8
items:
- const: msi0
- const: msi1
@@ -613,6 +670,7 @@ allOf:
- const: msi5
- const: msi6
- const: msi7
+ - const: global
- if:
properties:
@@ -622,11 +680,8 @@ allOf:
- qcom,pcie-apq8064
- qcom,pcie-apq8084
- qcom,pcie-ipq4019
- - qcom,pcie-ipq6018
- qcom,pcie-ipq8064
- qcom,pcie-ipq8064-v2
- - qcom,pcie-ipq8074
- - qcom,pcie-ipq8074-gen3
- qcom,pcie-qcs404
then:
properties:
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-ep.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-ep.yaml
index 32a3b7665ff5..6b91581c30ae 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/rcar-pci-ep.yaml
@@ -73,21 +73,21 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/r8a774c0-sysc.h>
- pcie0_ep: pcie-ep@fe000000 {
- compatible = "renesas,r8a774c0-pcie-ep",
- "renesas,rcar-gen3-pcie-ep";
- reg = <0xfe000000 0x80000>,
- <0xfe100000 0x100000>,
- <0xfe200000 0x200000>,
- <0x30000000 0x8000000>,
- <0x38000000 0x8000000>;
- reg-names = "apb-base", "memory0", "memory1", "memory2", "memory3";
- interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
- resets = <&cpg 319>;
- power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
- clocks = <&cpg CPG_MOD 319>;
- clock-names = "pcie";
- max-functions = /bits/ 8 <1>;
+ pcie0_ep: pcie-ep@fe000000 {
+ compatible = "renesas,r8a774c0-pcie-ep",
+ "renesas,rcar-gen3-pcie-ep";
+ reg = <0xfe000000 0x80000>,
+ <0xfe100000 0x100000>,
+ <0xfe200000 0x200000>,
+ <0x30000000 0x8000000>,
+ <0x38000000 0x8000000>;
+ reg-names = "apb-base", "memory0", "memory1", "memory2", "memory3";
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&cpg 319>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ clocks = <&cpg CPG_MOD 319>;
+ clock-names = "pcie";
+ max-functions = /bits/ 8 <1>;
};
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
index 666f013e3af8..7896576920aa 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
+++ b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
@@ -113,27 +113,27 @@ examples:
pcie: pcie@fe000000 {
compatible = "renesas,pcie-r8a7791", "renesas,pcie-rcar-gen2";
reg = <0 0xfe000000 0 0x80000>;
- #address-cells = <3>;
- #size-cells = <2>;
- bus-range = <0x00 0xff>;
- device_type = "pci";
- ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
- <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
- <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
- <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
- dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>,
- <0x42000000 2 0x00000000 2 0x00000000 0 0x40000000>;
- interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
- clock-names = "pcie", "pcie_bus";
- power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
- resets = <&cpg 319>;
- vpcie3v3-supply = <&pcie_3v3>;
- vpcie12v-supply = <&pcie_12v>;
- };
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x00 0xff>;
+ device_type = "pci";
+ ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
+ <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
+ <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
+ <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+ dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>,
+ <0x42000000 2 0x00000000 2 0x00000000 0 0x40000000>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
+ clock-names = "pcie", "pcie_bus";
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 319>;
+ vpcie3v3-supply = <&pcie_3v3>;
+ vpcie12v-supply = <&pcie_12v>;
+ };
};
diff --git a/Documentation/devicetree/bindings/pci/rockchip-dw-pcie-common.yaml b/Documentation/devicetree/bindings/pci/rockchip-dw-pcie-common.yaml
index cc9adfc7611c..fde9b87508b3 100644
--- a/Documentation/devicetree/bindings/pci/rockchip-dw-pcie-common.yaml
+++ b/Documentation/devicetree/bindings/pci/rockchip-dw-pcie-common.yaml
@@ -65,7 +65,11 @@ properties:
tx_cpl_timeout, cor_err_sent, nf_err_sent, f_err_sent, cor_err_rx,
nf_err_rx, f_err_rx, radm_qoverflow
- description:
- eDMA write channel 0 interrupt
+ If the matching interrupt name is "msi", then this is the combined
+ MSI line interrupt, which is to support MSI interrupts output to GIC
+ controller via GIC SPI interrupt instead of GIC ITS interrupt.
+ If the matching interrupt name is "dma0", then this is the eDMA write
+ channel 0 interrupt.
- description:
eDMA write channel 1 interrupt
- description:
@@ -81,7 +85,9 @@ properties:
- const: msg
- const: legacy
- const: err
- - const: dma0
+ - enum:
+ - msi
+ - dma0
- const: dma1
- const: dma2
- const: dma3
diff --git a/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml b/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml
index 550d8a684af3..6c6d828ce964 100644
--- a/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml
@@ -16,16 +16,14 @@ description: |+
PCIe IP and thus inherits all the common properties defined in
snps,dw-pcie.yaml.
-allOf:
- - $ref: /schemas/pci/snps,dw-pcie.yaml#
- - $ref: /schemas/pci/rockchip-dw-pcie-common.yaml#
-
properties:
compatible:
oneOf:
- const: rockchip,rk3568-pcie
- items:
- enum:
+ - rockchip,rk3562-pcie
+ - rockchip,rk3576-pcie
- rockchip,rk3588-pcie
- const: rockchip,rk3568-pcie
@@ -71,8 +69,58 @@ properties:
vpcie3v3-supply: true
-required:
- - msi-map
+allOf:
+ - $ref: /schemas/pci/snps,dw-pcie.yaml#
+ - $ref: /schemas/pci/rockchip-dw-pcie-common.yaml#
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,rk3562-pcie
+ - rockchip,rk3576-pcie
+ then:
+ required:
+ - msi-map
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,rk3562-pcie
+ - rockchip,rk3576-pcie
+ then:
+ properties:
+ interrupts:
+ minItems: 6
+ maxItems: 6
+ interrupt-names:
+ items:
+ - const: sys
+ - const: pmc
+ - const: msg
+ - const: legacy
+ - const: err
+ - const: msi
+ else:
+ properties:
+ interrupts:
+ minItems: 5
+ interrupt-names:
+ minItems: 5
+ items:
+ - const: sys
+ - const: pmc
+ - const: msg
+ - const: legacy
+ - const: err
+ - const: dma0
+ - const: dma1
+ - const: dma2
+ - const: dma3
+
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml b/Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml
index 844fc7142302..d35ff807936b 100644
--- a/Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml
@@ -81,10 +81,10 @@ unevaluatedProperties: false
examples:
- |
+ #include <dt-bindings/clock/sifive-fu740-prci.h>
bus {
#address-cells = <2>;
#size-cells = <2>;
- #include <dt-bindings/clock/sifive-fu740-prci.h>
pcie@e00000000 {
compatible = "sifive,fu740-pcie";
diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie-common.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie-common.yaml
index dc05761c5cf9..34594972d8db 100644
--- a/Documentation/devicetree/bindings/pci/snps,dw-pcie-common.yaml
+++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie-common.yaml
@@ -115,7 +115,7 @@ properties:
above for new bindings.
oneOf:
- description: See native 'dbi' clock for details
- enum: [ pcie, pcie_apb_sys, aclk_dbi ]
+ enum: [ pcie, pcie_apb_sys, aclk_dbi, reg ]
- description: See native 'mstr/slv' clock for details
enum: [ pcie_bus, pcie_inbound_axi, pcie_aclk, aclk_mst, aclk_slv ]
- description: See native 'pipe' clock for details
@@ -201,6 +201,7 @@ properties:
oneOf:
- pattern: '^pcie(-?phy[0-9]*)?$'
- pattern: '^p2u-[0-7]$'
+ - pattern: '^cp[01]-pcie[0-2]-x[124](-lane[0-3])?-phy$' # marvell,armada8k-pcie
reset-gpio:
deprecated: true
diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
index 1117a86fb6f7..69e82f438f58 100644
--- a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
@@ -105,6 +105,8 @@ properties:
Vendor-specific CSR names. Consider using the generic names above
for new bindings.
oneOf:
+ - description: See native 'dbi' CSR region for details.
+ enum: [ ctrl ]
- description: See native 'elbi/app' CSR region for details.
enum: [ apb, mgmt, link, ulreg, appl ]
- description: See native 'atu' CSR region for details.
@@ -117,7 +119,7 @@ properties:
const: slcr
allOf:
- contains:
- const: dbi
+ enum: [ dbi, ctrl ]
- contains:
const: config
diff --git a/Documentation/devicetree/bindings/pci/v3,v360epc-pci.yaml b/Documentation/devicetree/bindings/pci/v3,v360epc-pci.yaml
new file mode 100644
index 000000000000..38cac88f17bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/v3,v360epc-pci.yaml
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/v3,v360epc-pci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: V3 Semiconductor V360 EPC PCI bridge
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description:
+ This bridge is found in the ARM Integrator/AP (Application Platform)
+
+allOf:
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: arm,integrator-ap-pci
+ - const: v3,v360epc-pci
+
+ reg:
+ items:
+ - description: V3 host bridge controller
+ - description: Configuration space
+
+ clocks:
+ maxItems: 1
+
+ dma-ranges:
+ maxItems: 2
+ description:
+ The inbound ranges must be aligned to a 1MB boundary, and may be 1MB, 2MB,
+ 4MB, 8MB, 16MB, 32MB, 64MB, 128MB, 256MB, 512MB, 1GB or 2GB in size. The
+ memory should be marked as pre-fetchable.
+
+ interrupts:
+ description: Bus Error IRQ
+ maxItems: 1
+
+ ranges:
+ description:
+ The non-prefetchable and prefetchable memory windows must each be exactly
+ 256MB (0x10000000) in size. The prefetchable memory window must be
+ immediately adjacent to the non-prefetchable memory window.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - dma-ranges
+ - "#interrupt-cells"
+ - interrupt-map
+ - interrupt-map-mask
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ pci@62000000 {
+ compatible = "arm,integrator-ap-pci", "v3,v360epc-pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0x62000000 0x10000>, <0x61000000 0x01000000>;
+ device_type = "pci";
+ interrupt-parent = <&pic>;
+ interrupts = <17>; /* Bus error IRQ */
+ clocks = <&pciclk>;
+ ranges = <0x01000000 0 0x00000000 0x60000000 0 0x01000000>, /* 16 MiB @ LB 60000000 */
+ <0x02000000 0 0x40000000 0x40000000 0 0x10000000>, /* 256 MiB @ LB 40000000 1:1 */
+ <0x42000000 0 0x50000000 0x50000000 0 0x10000000>; /* 256 MiB @ LB 50000000 1:1 */
+ dma-ranges = <0x02000000 0 0x20000000 0x20000000 0 0x20000000>, /* EBI: 512 MB @ LB 20000000 1:1 */
+ <0x02000000 0 0x80000000 0x80000000 0 0x40000000>; /* CM alias: 1GB @ LB 80000000 */
+ interrupt-map-mask = <0xf800 0 0 0x7>;
+ interrupt-map =
+ /* IDSEL 9 */
+ <0x4800 0 0 1 &pic 13>, /* INT A on slot 9 is irq 13 */
+ <0x4800 0 0 2 &pic 14>, /* INT B on slot 9 is irq 14 */
+ <0x4800 0 0 3 &pic 15>, /* INT C on slot 9 is irq 15 */
+ <0x4800 0 0 4 &pic 16>, /* INT D on slot 9 is irq 16 */
+ /* IDSEL 10 */
+ <0x5000 0 0 1 &pic 14>, /* INT A on slot 10 is irq 14 */
+ <0x5000 0 0 2 &pic 15>, /* INT B on slot 10 is irq 15 */
+ <0x5000 0 0 3 &pic 16>, /* INT C on slot 10 is irq 16 */
+ <0x5000 0 0 4 &pic 13>, /* INT D on slot 10 is irq 13 */
+ /* IDSEL 11 */
+ <0x5800 0 0 1 &pic 15>, /* INT A on slot 11 is irq 15 */
+ <0x5800 0 0 2 &pic 16>, /* INT B on slot 11 is irq 16 */
+ <0x5800 0 0 3 &pic 13>, /* INT C on slot 11 is irq 13 */
+ <0x5800 0 0 4 &pic 14>, /* INT D on slot 11 is irq 14 */
+ /* IDSEL 12 */
+ <0x6000 0 0 1 &pic 16>, /* INT A on slot 12 is irq 16 */
+ <0x6000 0 0 2 &pic 13>, /* INT B on slot 12 is irq 13 */
+ <0x6000 0 0 3 &pic 14>, /* INT C on slot 12 is irq 14 */
+ <0x6000 0 0 4 &pic 15>; /* INT D on slot 12 is irq 15 */
+ };
+...
diff --git a/Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt b/Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt
deleted file mode 100644
index 11063293f761..000000000000
--- a/Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-V3 Semiconductor V360 EPC PCI bridge
-
-This bridge is found in the ARM Integrator/AP (Application Platform)
-
-Required properties:
-- compatible: should be one of:
- "v3,v360epc-pci"
- "arm,integrator-ap-pci", "v3,v360epc-pci"
-- reg: should contain two register areas:
- first the base address of the V3 host bridge controller, 64KB
- second the configuration area register space, 16MB
-- interrupts: should contain a reference to the V3 error interrupt
- as routed on the system.
-- bus-range: see pci.txt
-- ranges: this follows the standard PCI bindings in the IEEE Std
- 1275-1994 (see pci.txt) with the following restriction:
- - The non-prefetchable and prefetchable memory windows must
- each be exactly 256MB (0x10000000) in size.
- - The prefetchable memory window must be immediately adjacent
- to the non-prefetcable memory window
-- dma-ranges: three ranges for the inbound memory region. The ranges must
- be aligned to a 1MB boundary, and may be 1MB, 2MB, 4MB, 8MB, 16MB, 32MB,
- 64MB, 128MB, 256MB, 512MB, 1GB or 2GB in size. The memory should be marked
- as pre-fetchable. Two ranges are supported by the hardware.
-
-Integrator-specific required properties:
-- syscon: should contain a link to the syscon device node, since
- on the Integrator, some registers in the syscon are required to
- operate the V3 host bridge.
-
-Example:
-
-pci: pciv3@62000000 {
- compatible = "arm,integrator-ap-pci", "v3,v360epc-pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0x62000000 0x10000>, <0x61000000 0x01000000>;
- interrupt-parent = <&pic>;
- interrupts = <17>; /* Bus error IRQ */
- clocks = <&pciclk>;
- bus-range = <0x00 0xff>;
- ranges = 0x01000000 0 0x00000000 /* I/O space @00000000 */
- 0x60000000 0 0x01000000 /* 16 MiB @ LB 60000000 */
- 0x02000000 0 0x40000000 /* non-prefectable memory @40000000 */
- 0x40000000 0 0x10000000 /* 256 MiB @ LB 40000000 1:1 */
- 0x42000000 0 0x50000000 /* prefetchable memory @50000000 */
- 0x50000000 0 0x10000000>; /* 256 MiB @ LB 50000000 1:1 */
- dma-ranges = <0x02000000 0 0x20000000 /* EBI memory space */
- 0x20000000 0 0x20000000 /* 512 MB @ LB 20000000 1:1 */
- 0x02000000 0 0x80000000 /* Core module alias memory */
- 0x80000000 0 0x40000000>; /* 1GB @ LB 80000000 */
- interrupt-map-mask = <0xf800 0 0 0x7>;
- interrupt-map = <
- /* IDSEL 9 */
- 0x4800 0 0 1 &pic 13 /* INT A on slot 9 is irq 13 */
- 0x4800 0 0 2 &pic 14 /* INT B on slot 9 is irq 14 */
- 0x4800 0 0 3 &pic 15 /* INT C on slot 9 is irq 15 */
- 0x4800 0 0 4 &pic 16 /* INT D on slot 9 is irq 16 */
- /* IDSEL 10 */
- 0x5000 0 0 1 &pic 14 /* INT A on slot 10 is irq 14 */
- 0x5000 0 0 2 &pic 15 /* INT B on slot 10 is irq 15 */
- 0x5000 0 0 3 &pic 16 /* INT C on slot 10 is irq 16 */
- 0x5000 0 0 4 &pic 13 /* INT D on slot 10 is irq 13 */
- /* IDSEL 11 */
- 0x5800 0 0 1 &pic 15 /* INT A on slot 11 is irq 15 */
- 0x5800 0 0 2 &pic 16 /* INT B on slot 11 is irq 16 */
- 0x5800 0 0 3 &pic 13 /* INT C on slot 11 is irq 13 */
- 0x5800 0 0 4 &pic 14 /* INT D on slot 11 is irq 14 */
- /* IDSEL 12 */
- 0x6000 0 0 1 &pic 16 /* INT A on slot 12 is irq 16 */
- 0x6000 0 0 2 &pic 13 /* INT B on slot 12 is irq 13 */
- 0x6000 0 0 3 &pic 14 /* INT C on slot 12 is irq 14 */
- 0x6000 0 0 4 &pic 15 /* INT D on slot 12 is irq 15 */
- >;
-};
diff --git a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
index d674a24c8ccc..9823456addea 100644
--- a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
+++ b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
@@ -76,64 +76,62 @@ unevaluatedProperties: false
examples:
- |
-
versal {
- #address-cells = <2>;
- #size-cells = <2>;
- cpm_pcie: pcie@fca10000 {
- compatible = "xlnx,versal-cpm-host-1.00";
- device_type = "pci";
- #address-cells = <3>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- interrupts = <0 72 4>;
- interrupt-parent = <&gic>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie_intc_0 0>,
- <0 0 0 2 &pcie_intc_0 1>,
- <0 0 0 3 &pcie_intc_0 2>,
- <0 0 0 4 &pcie_intc_0 3>;
- bus-range = <0x00 0xff>;
- ranges = <0x02000000 0x0 0xe0010000 0x0 0xe0010000 0x0 0x10000000>,
- <0x43000000 0x80 0x00000000 0x80 0x00000000 0x0 0x80000000>;
- msi-map = <0x0 &its_gic 0x0 0x10000>;
- reg = <0x0 0xfca10000 0x0 0x1000>,
- <0x6 0x00000000 0x0 0x10000000>;
- reg-names = "cpm_slcr", "cfg";
- pcie_intc_0: interrupt-controller {
- #address-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-controller;
- };
- };
-
- cpm5_pcie: pcie@fcdd0000 {
- compatible = "xlnx,versal-cpm5-host";
- device_type = "pci";
- #address-cells = <3>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- interrupts = <0 72 4>;
- interrupt-parent = <&gic>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie_intc_1 0>,
- <0 0 0 2 &pcie_intc_1 1>,
- <0 0 0 3 &pcie_intc_1 2>,
- <0 0 0 4 &pcie_intc_1 3>;
- bus-range = <0x00 0xff>;
- ranges = <0x02000000 0x0 0xe0000000 0x0 0xe0000000 0x0 0x10000000>,
- <0x43000000 0x80 0x00000000 0x80 0x00000000 0x0 0x80000000>;
- msi-map = <0x0 &its_gic 0x0 0x10000>;
- reg = <0x00 0xfcdd0000 0x00 0x1000>,
- <0x06 0x00000000 0x00 0x1000000>,
- <0x00 0xfce20000 0x00 0x1000000>;
- reg-names = "cpm_slcr", "cfg", "cpm_csr";
-
- pcie_intc_1: interrupt-controller {
- #address-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-controller;
- };
- };
-
+ #address-cells = <2>;
+ #size-cells = <2>;
+ pcie@fca10000 {
+ compatible = "xlnx,versal-cpm-host-1.00";
+ device_type = "pci";
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ interrupts = <0 72 4>;
+ interrupt-parent = <&gic>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc_0 0>,
+ <0 0 0 2 &pcie_intc_0 1>,
+ <0 0 0 3 &pcie_intc_0 2>,
+ <0 0 0 4 &pcie_intc_0 3>;
+ bus-range = <0x00 0xff>;
+ ranges = <0x02000000 0x0 0xe0010000 0x0 0xe0010000 0x0 0x10000000>,
+ <0x43000000 0x80 0x00000000 0x80 0x00000000 0x0 0x80000000>;
+ msi-map = <0x0 &its_gic 0x0 0x10000>;
+ reg = <0x0 0xfca10000 0x0 0x1000>,
+ <0x6 0x00000000 0x0 0x10000000>;
+ reg-names = "cpm_slcr", "cfg";
+ pcie_intc_0: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
+
+ pcie@fcdd0000 {
+ compatible = "xlnx,versal-cpm5-host";
+ device_type = "pci";
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ interrupts = <0 72 4>;
+ interrupt-parent = <&gic>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc_1 0>,
+ <0 0 0 2 &pcie_intc_1 1>,
+ <0 0 0 3 &pcie_intc_1 2>,
+ <0 0 0 4 &pcie_intc_1 3>;
+ bus-range = <0x00 0xff>;
+ ranges = <0x02000000 0x0 0xe0000000 0x0 0xe0000000 0x0 0x10000000>,
+ <0x43000000 0x80 0x00000000 0x80 0x00000000 0x0 0x80000000>;
+ msi-map = <0x0 &its_gic 0x0 0x10000>;
+ reg = <0x00 0xfcdd0000 0x00 0x1000>,
+ <0x06 0x00000000 0x00 0x1000000>,
+ <0x00 0xfce20000 0x00 0x1000000>;
+ reg-names = "cpm_slcr", "cfg", "cpm_csr";
+
+ pcie_intc_1: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/pinctrl/amlogic,pinctrl-a4.yaml b/Documentation/devicetree/bindings/pinctrl/amlogic,pinctrl-a4.yaml
index 8eb50cad61d5..a6ef4797e5c5 100644
--- a/Documentation/devicetree/bindings/pinctrl/amlogic,pinctrl-a4.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/amlogic,pinctrl-a4.yaml
@@ -14,7 +14,12 @@ allOf:
properties:
compatible:
- const: amlogic,pinctrl-a4
+ oneOf:
+ - const: amlogic,pinctrl-a4
+ - items:
+ - enum:
+ - amlogic,pinctrl-a5
+ - const: amlogic,pinctrl-a4
"#address-cells":
const: 2
@@ -65,6 +70,7 @@ patternProperties:
patternProperties:
"^group-[0-9a-z-]+$":
type: object
+ unevaluatedProperties: false
allOf:
- $ref: /schemas/pinctrl/pincfg-node.yaml
- $ref: /schemas/pinctrl/pinmux-node.yaml
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx7ulp-iomuxc1.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx7ulp-iomuxc1.yaml
new file mode 100644
index 000000000000..957918b73a93
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx7ulp-iomuxc1.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/fsl,imx7ulp-iomuxc1.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX7ULP IOMUX Controller
+
+description: |
+ i.MX 7ULP has three IOMUXC instances: IOMUXC0 for M4 ports, IOMUXC1 for A7
+ ports and IOMUXC DDR for DDR interface.
+
+ Note: This binding doc is only for the IOMUXC1 support in A7 Domain and it
+ only supports generic pin config.
+
+ Please refer to fsl,imx-pinctrl.txt in this directory for common binding
+ part and usage.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx7ulp-iomuxc1
+
+ reg:
+ maxItems: 1
+
+patternProperties:
+ 'grp$':
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+
+ properties:
+ fsl,pins:
+ description:
+ Each entry consists of 5 integers which represents the mux
+ and config setting for one pin. The first 4 integers
+ <mux_conf_reg input_reg mux_mode input_val> are specified
+ using a PIN_FUNC_ID macro, which can be found in
+ imx7ulp-pinfunc.h in the device tree source folder.
+ The last integer CONFIG is the pad setting value like
+ pull-up on this pin.
+
+ Please refer to i.MX7ULP Reference Manual for detailed
+ CONFIG settings.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: |
+ "mux_conf_reg" indicates the offset of mux register.
+ - description: |
+ "input_reg" indicates the offset of select input register.
+ - description: |
+ "mux_mode" indicates the mux value to be applied.
+ - description: |
+ "input_val" indicates the select input value to be applied.
+ - description: |
+ CONFIG bits definition:
+ PAD_CTL_OBE (1 << 17)
+ PAD_CTL_IBE (1 << 16)
+ PAD_CTL_LK (1 << 16)
+ PAD_CTL_DSE_HI (1 << 6)
+ PAD_CTL_DSE_STD (0 << 6)
+ PAD_CTL_ODE (1 << 5)
+ PAD_CTL_PUSH_PULL (0 << 5)
+ PAD_CTL_SRE_SLOW (1 << 2)
+ PAD_CTL_SRE_STD (0 << 2)
+ PAD_CTL_PE (1 << 0)
+
+ required:
+ - fsl,pins
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: pinctrl.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ pinctrl@40ac0000 {
+ compatible = "fsl,imx7ulp-iomuxc1";
+ reg = <0x40ac0000 0x1000>;
+
+ lpuart4grp {
+ fsl,pins = <
+ 0x000c 0x0248 0x4 0x1 0x1
+ 0x0008 0x024c 0x4 0x1 0x1
+ >;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx7ulp-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx7ulp-pinctrl.txt
deleted file mode 100644
index bfa3703a7446..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx7ulp-pinctrl.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-* Freescale i.MX7ULP IOMUX Controller
-
-i.MX 7ULP has three IOMUXC instances: IOMUXC0 for M4 ports, IOMUXC1 for A7
-ports and IOMUXC DDR for DDR interface.
-
-Note:
-This binding doc is only for the IOMUXC1 support in A7 Domain and it only
-supports generic pin config.
-
-Please refer to fsl,imx-pinctrl.txt in this directory for common binding
-part and usage.
-
-Required properties:
-- compatible: "fsl,imx7ulp-iomuxc1".
-- fsl,pins: Each entry consists of 5 integers which represents the mux
- and config setting for one pin. The first 4 integers
- <mux_conf_reg input_reg mux_mode input_val> are specified
- using a PIN_FUNC_ID macro, which can be found in
- imx7ulp-pinfunc.h in the device tree source folder.
- The last integer CONFIG is the pad setting value like
- pull-up on this pin.
-
- Please refer to i.MX7ULP Reference Manual for detailed
- CONFIG settings.
-
-CONFIG bits definition:
-PAD_CTL_OBE (1 << 17)
-PAD_CTL_IBE (1 << 16)
-PAD_CTL_LK (1 << 16)
-PAD_CTL_DSE_HI (1 << 6)
-PAD_CTL_DSE_STD (0 << 6)
-PAD_CTL_ODE (1 << 5)
-PAD_CTL_PUSH_PULL (0 << 5)
-PAD_CTL_SRE_SLOW (1 << 2)
-PAD_CTL_SRE_STD (0 << 2)
-PAD_CTL_PE (1 << 0)
-
-Examples:
-#include "imx7ulp-pinfunc.h"
-
-/* Pin Controller Node */
-iomuxc1: pinctrl@40ac0000 {
- compatible = "fsl,imx7ulp-iomuxc1";
- reg = <0x40ac0000 0x1000>;
-
- /* Pin Configuration Node */
- pinctrl_lpuart4: lpuart4grp {
- fsl,pins = <
- IMX7ULP_PAD_PTC3__LPUART4_RX 0x1
- IMX7ULP_PAD_PTC2__LPUART4_TX 0x1
- >;
- };
-};
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,vf610-iomuxc.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,vf610-iomuxc.yaml
new file mode 100644
index 000000000000..3e13587df310
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,vf610-iomuxc.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/fsl,vf610-iomuxc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Vybrid VF610 IOMUX Controller
+
+description:
+ Please refer to fsl,imx-pinctrl.txt in this directory for common binding part
+ and usage.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,vf610-iomuxc
+
+ reg:
+ maxItems: 1
+
+patternProperties:
+ 'grp$':
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+
+ properties:
+ fsl,pins:
+ description:
+ two integers array, represents a group of pins mux and config setting.
+ The format is fsl,pins = <PIN_FUNC_ID CONFIG>, PIN_FUNC_ID is a pin
+ working on a specific function, CONFIG is the pad setting value such
+ as pull-up, speed, ode for this pin. Please refer to Vybrid VF610
+ datasheet for the valid pad config settings.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description:
+ PIN_FUN_ID refer to vf610-pinfunc.h in device tree source folder
+ for all available PIN_FUNC_ID for Vybrid VF610.
+ - description: |
+ CONFIG bits definition is
+ PAD_CTL_SPEED_LOW (1 << 12)
+ PAD_CTL_SPEED_MED (2 << 12)
+ PAD_CTL_SPEED_HIGH (3 << 12)
+ PAD_CTL_SRE_FAST (1 << 11)
+ PAD_CTL_SRE_SLOW (0 << 11)
+ PAD_CTL_ODE (1 << 10)
+ PAD_CTL_HYS (1 << 9)
+ PAD_CTL_DSE_DISABLE (0 << 6)
+ PAD_CTL_DSE_150ohm (1 << 6)
+ PAD_CTL_DSE_75ohm (2 << 6)
+ PAD_CTL_DSE_50ohm (3 << 6)
+ PAD_CTL_DSE_37ohm (4 << 6)
+ PAD_CTL_DSE_30ohm (5 << 6)
+ PAD_CTL_DSE_25ohm (6 << 6)
+ PAD_CTL_DSE_20ohm (7 << 6)
+ PAD_CTL_PUS_100K_DOWN (0 << 4)
+ PAD_CTL_PUS_47K_UP (1 << 4)
+ PAD_CTL_PUS_100K_UP (2 << 4)
+ PAD_CTL_PUS_22K_UP (3 << 4)
+ PAD_CTL_PKE (1 << 3)
+ PAD_CTL_PUE (1 << 2)
+ PAD_CTL_OBE_ENABLE (1 << 1)
+ PAD_CTL_IBE_ENABLE (1 << 0)
+ PAD_CTL_OBE_IBE_ENABLE (3 << 0)
+
+ required:
+ - fsl,pins
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: pinctrl.yaml#
+
+unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,vf610-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,vf610-pinctrl.txt
deleted file mode 100644
index ddcdeb697c29..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/fsl,vf610-pinctrl.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-Freescale Vybrid VF610 IOMUX Controller
-
-Please refer to fsl,imx-pinctrl.txt in this directory for common binding part
-and usage.
-
-Required properties:
-- compatible: "fsl,vf610-iomuxc"
-- fsl,pins: two integers array, represents a group of pins mux and config
- setting. The format is fsl,pins = <PIN_FUNC_ID CONFIG>, PIN_FUNC_ID is
- a pin working on a specific function, CONFIG is the pad setting value
- such as pull-up, speed, ode for this pin. Please refer to Vybrid VF610
- datasheet for the valid pad config settings.
-
-CONFIG bits definition:
-PAD_CTL_SPEED_LOW (1 << 12)
-PAD_CTL_SPEED_MED (2 << 12)
-PAD_CTL_SPEED_HIGH (3 << 12)
-PAD_CTL_SRE_FAST (1 << 11)
-PAD_CTL_SRE_SLOW (0 << 11)
-PAD_CTL_ODE (1 << 10)
-PAD_CTL_HYS (1 << 9)
-PAD_CTL_DSE_DISABLE (0 << 6)
-PAD_CTL_DSE_150ohm (1 << 6)
-PAD_CTL_DSE_75ohm (2 << 6)
-PAD_CTL_DSE_50ohm (3 << 6)
-PAD_CTL_DSE_37ohm (4 << 6)
-PAD_CTL_DSE_30ohm (5 << 6)
-PAD_CTL_DSE_25ohm (6 << 6)
-PAD_CTL_DSE_20ohm (7 << 6)
-PAD_CTL_PUS_100K_DOWN (0 << 4)
-PAD_CTL_PUS_47K_UP (1 << 4)
-PAD_CTL_PUS_100K_UP (2 << 4)
-PAD_CTL_PUS_22K_UP (3 << 4)
-PAD_CTL_PKE (1 << 3)
-PAD_CTL_PUE (1 << 2)
-PAD_CTL_OBE_ENABLE (1 << 1)
-PAD_CTL_IBE_ENABLE (1 << 0)
-PAD_CTL_OBE_IBE_ENABLE (3 << 0)
-
-Please refer to vf610-pinfunc.h in device tree source folder
-for all available PIN_FUNC_ID for Vybrid VF610.
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt65xx-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt65xx-pinctrl.yaml
index bccff08a5ba3..b9680b896f12 100644
--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt65xx-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt65xx-pinctrl.yaml
@@ -136,75 +136,44 @@ examples:
#address-cells = <2>;
#size-cells = <2>;
- syscfg_pctl_a: syscfg-pctl-a@10005000 {
- compatible = "mediatek,mt8135-pctl-a-syscfg", "syscon";
- reg = <0 0x10005000 0 0x1000>;
- };
-
- syscfg_pctl_b: syscfg-pctl-b@1020c020 {
- compatible = "mediatek,mt8135-pctl-b-syscfg", "syscon";
- reg = <0 0x1020C020 0 0x1000>;
- };
-
pinctrl@1c20800 {
- compatible = "mediatek,mt8135-pinctrl";
- reg = <0 0x1000B000 0 0x1000>;
- mediatek,pctl-regmap = <&syscfg_pctl_a>, <&syscfg_pctl_b>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
-
- i2c0_pins_a: i2c0-pins {
- pins1 {
- pinmux = <MT8135_PIN_100_SDA0__FUNC_SDA0>,
- <MT8135_PIN_101_SCL0__FUNC_SCL0>;
- bias-disable;
- };
- };
-
- i2c1_pins_a: i2c1-pins {
- pins {
- pinmux = <MT8135_PIN_195_SDA1__FUNC_SDA1>,
- <MT8135_PIN_196_SCL1__FUNC_SCL1>;
- bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ compatible = "mediatek,mt8135-pinctrl";
+ reg = <0 0x1000B000 0 0x1000>;
+ mediatek,pctl-regmap = <&syscfg_pctl_a>, <&syscfg_pctl_b>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+
+ i2c0_pins_a: i2c0-pins {
+ pins1 {
+ pinmux = <MT8135_PIN_100_SDA0__FUNC_SDA0>,
+ <MT8135_PIN_101_SCL0__FUNC_SCL0>;
+ bias-disable;
+ };
};
- };
- i2c2_pins_a: i2c2-pins {
- pins1 {
- pinmux = <MT8135_PIN_193_SDA2__FUNC_SDA2>;
- bias-pull-down;
+ i2c1_pins_a: i2c1-pins {
+ pins {
+ pinmux = <MT8135_PIN_195_SDA1__FUNC_SDA1>,
+ <MT8135_PIN_196_SCL1__FUNC_SCL1>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
};
- pins2 {
- pinmux = <MT8135_PIN_49_WATCHDOG__FUNC_GPIO49>;
- bias-pull-up;
- };
- };
-
- i2c3_pins_a: i2c3-pins {
- pins1 {
- pinmux = <MT8135_PIN_40_DAC_CLK__FUNC_GPIO40>,
- <MT8135_PIN_41_DAC_WS__FUNC_GPIO41>;
- bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- };
-
- pins2 {
- pinmux = <MT8135_PIN_35_SCL3__FUNC_SCL3>,
- <MT8135_PIN_36_SDA3__FUNC_SDA3>;
- output-low;
- bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- };
+ i2c2_pins_a: i2c2-pins {
+ pins1 {
+ pinmux = <MT8135_PIN_193_SDA2__FUNC_SDA2>;
+ bias-pull-down;
+ };
- pins3 {
- pinmux = <MT8135_PIN_57_JTCK__FUNC_GPIO57>,
- <MT8135_PIN_60_JTDI__FUNC_JTDI>;
- drive-strength = <32>;
+ pins2 {
+ pinmux = <MT8135_PIN_49_WATCHDOG__FUNC_GPIO49>;
+ bias-pull-up;
+ };
};
- };
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt6779-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt6779-pinctrl.yaml
index 3bbc00df5548..f4bab7a132d3 100644
--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt6779-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt6779-pinctrl.yaml
@@ -245,9 +245,4 @@ examples:
};
};
};
-
- mmc0 {
- pinctrl-0 = <&mmc0_pins_default>;
- pinctrl-names = "default";
- };
};
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt6893-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt6893-pinctrl.yaml
new file mode 100644
index 000000000000..fa189fe00624
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt6893-pinctrl.yaml
@@ -0,0 +1,193 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/mediatek,mt6893-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT6893 Pin Controller
+
+maintainers:
+ - AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+
+description:
+ The MediaTek's MT6893 Pin controller is used to control SoC pins.
+
+properties:
+ compatible:
+ const: mediatek,mt6893-pinctrl
+
+ reg:
+ items:
+ - description: pin controller base
+ - description: rm group IO
+ - description: bm group IO
+ - description: lm group IO
+ - description: lb group IO
+ - description: rt group IO
+ - description: lt group IO
+ - description: tm group IO
+ - description: External Interrupt (EINT) controller base
+
+ reg-names:
+ items:
+ - const: base
+ - const: rm
+ - const: bm
+ - const: lm
+ - const: lb
+ - const: rt
+ - const: lt
+ - const: tm
+ - const: eint
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ description:
+ Number of cells in GPIO specifier. Since the generic GPIO binding is used,
+ the amount of cells must be specified as 2. See the below mentioned gpio
+ binding representation for description of particular cells.
+ const: 2
+
+ gpio-ranges:
+ maxItems: 1
+
+ gpio-line-names: true
+
+ interrupts:
+ description: The interrupt outputs to sysirq
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+# PIN CONFIGURATION NODES
+patternProperties:
+ '-pins$':
+ type: object
+ additionalProperties: false
+
+ patternProperties:
+ '^pins':
+ type: object
+ allOf:
+ - $ref: /schemas/pinctrl/pincfg-node.yaml
+ - $ref: /schemas/pinctrl/pinmux-node.yaml
+ description:
+ A pinctrl node should contain at least one subnodes representing the
+ pinctrl groups available on the machine. Each subnode will list the
+ pins it needs, and how they should be configured, with regard to muxer
+ configuration, pullups, drive strength, input enable/disable and input
+ schmitt.
+
+ properties:
+ pinmux:
+ description:
+ Integer array, represents gpio pin number and mux setting.
+ Supported pin number and mux are defined as macros in
+ arch/arm64/boot/dts/mediatek/mt8196-pinfunc.h for this SoC.
+
+ drive-strength:
+ enum: [2, 4, 6, 8, 10, 12, 14, 16]
+
+ drive-strength-microamp:
+ enum: [125, 250, 500, 1000]
+
+ bias-pull-down:
+ oneOf:
+ - type: boolean
+ - enum: [75000, 5000]
+ description: Pull down RSEL type resistance values (in ohms)
+ description:
+ For normal pull down type there is no need to specify a resistance
+ value, hence this can be specified as a boolean property.
+ For RSEL pull down type a resistance value (in ohms) can be added.
+
+ bias-pull-up:
+ oneOf:
+ - type: boolean
+ - enum: [10000, 5000, 4000, 3000]
+ description: Pull up RSEL type resistance values (in ohms)
+ description:
+ For normal pull up type there is no need to specify a resistance
+ value, hence this can be specified as a boolean property.
+ For RSEL pull up type a resistance value (in ohms) can be added.
+
+ bias-disable: true
+
+ output-high: true
+
+ output-low: true
+
+ input-enable: true
+
+ input-disable: true
+
+ input-schmitt-enable: true
+
+ input-schmitt-disable: true
+
+ required:
+ - pinmux
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/pinctrl/mt65xx.h>
+ #define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+ #define PINMUX_GPIO99__FUNC_SCL0 (MTK_PIN_NO(99) | 1)
+ #define PINMUX_GPIO100__FUNC_SDA0 (MTK_PIN_NO(100) | 1)
+
+ pio: pinctrl@10005000 {
+ compatible = "mediatek,mt6893-pinctrl";
+ reg = <0x10005000 0x1000>,
+ <0x11c20000 0x0200>,
+ <0x11d10000 0x0200>,
+ <0x11e20000 0x0200>,
+ <0x11e70000 0x0200>,
+ <0x11ea0000 0x0200>,
+ <0x11f20000 0x0200>,
+ <0x11f30000 0x0200>,
+ <0x1100b000 0x1000>;
+ reg-names = "base", "rm", "bm", "lm", "lb", "rt",
+ "lt", "tm", "eint";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pio 0 0 220>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH 0>;
+ #interrupt-cells = <2>;
+
+ gpio-pins {
+ pins {
+ pinmux = <PINMUX_GPIO0__FUNC_GPIO0>;
+ bias-pull-up = <4000>;
+ drive-strength = <6>;
+ };
+ };
+
+ i2c0-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO99__FUNC_SCL0>,
+ <PINMUX_GPIO100__FUNC_SDA0>;
+ bias-pull-down = <75000>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
index d74cae9d4d65..9acca85184fa 100644
--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
@@ -366,34 +366,34 @@ examples:
#size-cells = <2>;
pio: pinctrl@10211000 {
- compatible = "mediatek,mt7622-pinctrl";
- reg = <0 0x10211000 0 0x1000>;
- gpio-controller;
- #gpio-cells = <2>;
-
- pinctrl_eth_default: eth-pins {
- mux-mdio {
- groups = "mdc_mdio";
- function = "eth";
- drive-strength = <12>;
+ compatible = "mediatek,mt7622-pinctrl";
+ reg = <0 0x10211000 0 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ pinctrl_eth_default: eth-pins {
+ mux-mdio {
+ groups = "mdc_mdio";
+ function = "eth";
+ drive-strength = <12>;
+ };
+
+ mux-gmac2 {
+ groups = "rgmii_via_gmac2";
+ function = "eth";
+ drive-strength = <12>;
+ };
+
+ mux-esw {
+ groups = "esw";
+ function = "eth";
+ drive-strength = <8>;
+ };
+
+ conf-mdio {
+ pins = "MDC";
+ bias-pull-up;
+ };
};
-
- mux-gmac2 {
- groups = "rgmii_via_gmac2";
- function = "eth";
- drive-strength = <12>;
- };
-
- mux-esw {
- groups = "esw";
- function = "eth";
- drive-strength = <8>;
- };
-
- conf-mdio {
- pins = "MDC";
- bias-pull-up;
- };
- };
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml
index 8507bd15f243..464879274cae 100644
--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml
@@ -195,43 +195,43 @@ examples:
#size-cells = <2>;
pio: pinctrl@10005000 {
- compatible = "mediatek,mt8183-pinctrl";
- reg = <0 0x10005000 0 0x1000>,
- <0 0x11f20000 0 0x1000>,
- <0 0x11e80000 0 0x1000>,
- <0 0x11e70000 0 0x1000>,
- <0 0x11e90000 0 0x1000>,
- <0 0x11d30000 0 0x1000>,
- <0 0x11d20000 0 0x1000>,
- <0 0x11c50000 0 0x1000>,
- <0 0x11f30000 0 0x1000>,
- <0 0x1000b000 0 0x1000>;
- reg-names = "iocfg0", "iocfg1", "iocfg2",
- "iocfg3", "iocfg4", "iocfg5",
- "iocfg6", "iocfg7", "iocfg8",
- "eint";
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pio 0 0 192>;
- interrupt-controller;
- interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
- #interrupt-cells = <2>;
-
- i2c0_pins_a: i2c0-pins {
- pins1 {
- pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
- <PINMUX_GPIO49__FUNC_SDA5>;
- mediatek,pull-up-adv = <3>;
- drive-strength-microamp = <1000>;
+ compatible = "mediatek,mt8183-pinctrl";
+ reg = <0 0x10005000 0 0x1000>,
+ <0 0x11f20000 0 0x1000>,
+ <0 0x11e80000 0 0x1000>,
+ <0 0x11e70000 0 0x1000>,
+ <0 0x11e90000 0 0x1000>,
+ <0 0x11d30000 0 0x1000>,
+ <0 0x11d20000 0 0x1000>,
+ <0 0x11c50000 0 0x1000>,
+ <0 0x11f30000 0 0x1000>,
+ <0 0x1000b000 0 0x1000>;
+ reg-names = "iocfg0", "iocfg1", "iocfg2",
+ "iocfg3", "iocfg4", "iocfg5",
+ "iocfg6", "iocfg7", "iocfg8",
+ "eint";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pio 0 0 192>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
+
+ i2c0_pins_a: i2c0-pins {
+ pins1 {
+ pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
+ <PINMUX_GPIO49__FUNC_SDA5>;
+ mediatek,pull-up-adv = <3>;
+ drive-strength-microamp = <1000>;
+ };
};
- };
- i2c1_pins_a: i2c1-pins {
- pins {
- pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
- <PINMUX_GPIO51__FUNC_SDA3>;
- mediatek,pull-down-adv = <2>;
+ i2c1_pins_a: i2c1-pins {
+ pins {
+ pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
+ <PINMUX_GPIO51__FUNC_SDA3>;
+ mediatek,pull-down-adv = <2>;
+ };
};
- };
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt8192-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt8192-pinctrl.yaml
index 1686427eb854..949dcd6fd847 100644
--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt8192-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt8192-pinctrl.yaml
@@ -142,43 +142,43 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/pinctrl/mt8192-pinfunc.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
- pio: pinctrl@10005000 {
- compatible = "mediatek,mt8192-pinctrl";
- reg = <0x10005000 0x1000>,
- <0x11c20000 0x1000>,
- <0x11d10000 0x1000>,
- <0x11d30000 0x1000>,
- <0x11d40000 0x1000>,
- <0x11e20000 0x1000>,
- <0x11e70000 0x1000>,
- <0x11ea0000 0x1000>,
- <0x11f20000 0x1000>,
- <0x11f30000 0x1000>,
- <0x1000b000 0x1000>;
- reg-names = "iocfg0", "iocfg_rm", "iocfg_bm",
- "iocfg_bl", "iocfg_br", "iocfg_lm",
- "iocfg_lb", "iocfg_rt", "iocfg_lt",
- "iocfg_tl", "eint";
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pio 0 0 220>;
- interrupt-controller;
- interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH 0>;
- #interrupt-cells = <2>;
-
- spi1-default-pins {
- pins-cs-mosi-clk {
- pinmux = <PINMUX_GPIO157__FUNC_SPI1_A_CSB>,
- <PINMUX_GPIO159__FUNC_SPI1_A_MO>,
- <PINMUX_GPIO156__FUNC_SPI1_A_CLK>;
- bias-disable;
- };
-
- pins-miso {
- pinmux = <PINMUX_GPIO158__FUNC_SPI1_A_MI>;
- bias-pull-down;
- };
- };
+ #include <dt-bindings/pinctrl/mt8192-pinfunc.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ pio: pinctrl@10005000 {
+ compatible = "mediatek,mt8192-pinctrl";
+ reg = <0x10005000 0x1000>,
+ <0x11c20000 0x1000>,
+ <0x11d10000 0x1000>,
+ <0x11d30000 0x1000>,
+ <0x11d40000 0x1000>,
+ <0x11e20000 0x1000>,
+ <0x11e70000 0x1000>,
+ <0x11ea0000 0x1000>,
+ <0x11f20000 0x1000>,
+ <0x11f30000 0x1000>,
+ <0x1000b000 0x1000>;
+ reg-names = "iocfg0", "iocfg_rm", "iocfg_bm",
+ "iocfg_bl", "iocfg_br", "iocfg_lm",
+ "iocfg_lb", "iocfg_rt", "iocfg_lt",
+ "iocfg_tl", "eint";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pio 0 0 220>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH 0>;
+ #interrupt-cells = <2>;
+
+ spi1-default-pins {
+ pins-cs-mosi-clk {
+ pinmux = <PINMUX_GPIO157__FUNC_SPI1_A_CSB>,
+ <PINMUX_GPIO159__FUNC_SPI1_A_MO>,
+ <PINMUX_GPIO156__FUNC_SPI1_A_CLK>;
+ bias-disable;
};
+
+ pins-miso {
+ pinmux = <PINMUX_GPIO158__FUNC_SPI1_A_MI>;
+ bias-pull-down;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt8196-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt8196-pinctrl.yaml
new file mode 100644
index 000000000000..9082bd625e2f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt8196-pinctrl.yaml
@@ -0,0 +1,236 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/mediatek,mt8196-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT8196 Pin Controller
+
+maintainers:
+ - Lei Xue <lei.xue@mediatek.com>
+ - Cathy Xu <ot_cathy.xu@mediatek.com>
+
+description:
+ The MediaTek's MT8196 Pin controller is used to control SoC pins.
+
+properties:
+ compatible:
+ const: mediatek,mt8196-pinctrl
+
+ reg:
+ items:
+ - description: gpio base
+ - description: rt group IO
+ - description: rm1 group IO
+ - description: rm2 group IO
+ - description: rb group IO
+ - description: bm1 group IO
+ - description: bm2 group IO
+ - description: bm3 group IO
+ - description: lt group IO
+ - description: lm1 group IO
+ - description: lm2 group IO
+ - description: lb1 group IO
+ - description: lb2 group IO
+ - description: tm1 group IO
+ - description: tm2 group IO
+ - description: tm3 group IO
+ - description: eint0 group IO
+ - description: eint1 group IO
+ - description: eint2 group IO
+ - description: eint3 group IO
+ - description: eint4 group IO
+
+ reg-names:
+ items:
+ - const: base
+ - const: rt
+ - const: rm1
+ - const: rm2
+ - const: rb
+ - const: bm1
+ - const: bm2
+ - const: bm3
+ - const: lt
+ - const: lm1
+ - const: lm2
+ - const: lb1
+ - const: lb2
+ - const: tm1
+ - const: tm2
+ - const: tm3
+ - const: eint0
+ - const: eint1
+ - const: eint2
+ - const: eint3
+ - const: eint4
+
+ interrupts:
+ description: The interrupt outputs to sysirq.
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ description:
+ Number of cells in GPIO specifier, should be two. The first cell is the
+ pin number, the second cell is used to specify optional parameters which
+ are defined in <dt-bindings/gpio/gpio.h>.
+ const: 2
+
+ gpio-ranges:
+ maxItems: 1
+
+ gpio-line-names: true
+
+# PIN CONFIGURATION NODES
+patternProperties:
+ '-pins$':
+ type: object
+ additionalProperties: false
+
+ patternProperties:
+ '^pins':
+ type: object
+ $ref: /schemas/pinctrl/pincfg-node.yaml
+ additionalProperties: false
+ description:
+ A pinctrl node should contain at least one subnode representing the
+ pinctrl groups available on the machine. Each subnode will list the
+ pins it needs, and how they should be configured, with regard to muxer
+ configuration, pullups, drive strength, input enable/disable and input
+ schmitt.
+
+ properties:
+ pinmux:
+ description:
+ Integer array, represents gpio pin number and mux setting.
+ Supported pin number and mux varies for different SoCs, and are
+ defined as macros in arch/arm64/boot/dts/mediatek/mt8196-pinfunc.h
+ directly, for this SoC.
+
+ drive-strength:
+ enum: [2, 4, 6, 8, 10, 12, 14, 16]
+
+ bias-pull-down:
+ oneOf:
+ - type: boolean
+ - enum: [100, 101, 102, 103]
+ description: mt8196 pull down PUPD/R0/R1 type define value.
+ - enum: [75000, 5000]
+ description: mt8196 pull down RSEL type si unit value(ohm).
+ description: |
+ For pull down type is normal, it doesn't need add R1R0 define
+ and resistance value.
+ For pull down type is PUPD/R0/R1 type, it can add R1R0 define to
+ set different resistance. It can support "MTK_PUPD_SET_R1R0_00" &
+ "MTK_PUPD_SET_R1R0_01" & "MTK_PUPD_SET_R1R0_10" &
+ "MTK_PUPD_SET_R1R0_11" define in mt8196.
+ For pull down type is PD/RSEL, it can add resistance value(ohm)
+ to set different resistance by identifying property
+ "mediatek,rsel-resistance-in-si-unit". It can support resistance
+ value(ohm) "75000" & "5000" in mt8196.
+
+ bias-pull-up:
+ oneOf:
+ - type: boolean
+ - enum: [100, 101, 102, 103]
+ description: mt8196 pull up PUPD/R0/R1 type define value.
+ - enum: [1000, 1500, 2000, 3000, 4000, 5000, 75000]
+ description: mt8196 pull up RSEL type si unit value(ohm).
+ description: |
+ For pull up type is normal, it don't need add R1R0 define
+ and resistance value.
+ For pull up type is PUPD/R0/R1 type, it can add R1R0 define to
+ set different resistance. It can support "MTK_PUPD_SET_R1R0_00" &
+ "MTK_PUPD_SET_R1R0_01" & "MTK_PUPD_SET_R1R0_10" &
+ "MTK_PUPD_SET_R1R0_11" define in mt8196.
+ For pull up type is PU/RSEL, it can add resistance value(ohm)
+ to set different resistance by identifying property
+ "mediatek,rsel-resistance-in-si-unit". It can support resistance
+ value(ohm) "1000" & "1500" & "2000" & "3000" & "4000" & "5000" &
+ "75000" in mt8196.
+
+ bias-disable: true
+
+ output-high: true
+
+ output-low: true
+
+ input-enable: true
+
+ input-disable: true
+
+ input-schmitt-enable: true
+
+ input-schmitt-disable: true
+
+ required:
+ - pinmux
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/pinctrl/mt65xx.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #define PINMUX_GPIO99__FUNC_SCL0 (MTK_PIN_NO(99) | 1)
+ #define PINMUX_GPIO100__FUNC_SDA0 (MTK_PIN_NO(100) | 1)
+
+ pio: pinctrl@1002d000 {
+ compatible = "mediatek,mt8196-pinctrl";
+ reg = <0x1002d000 0x1000>,
+ <0x12000000 0x1000>,
+ <0x12020000 0x1000>,
+ <0x12040000 0x1000>,
+ <0x12060000 0x1000>,
+ <0x12820000 0x1000>,
+ <0x12840000 0x1000>,
+ <0x12860000 0x1000>,
+ <0x13000000 0x1000>,
+ <0x13020000 0x1000>,
+ <0x13040000 0x1000>,
+ <0x130f0000 0x1000>,
+ <0x13110000 0x1000>,
+ <0x13800000 0x1000>,
+ <0x13820000 0x1000>,
+ <0x13860000 0x1000>,
+ <0x12080000 0x1000>,
+ <0x12880000 0x1000>,
+ <0x13080000 0x1000>,
+ <0x13880000 0x1000>,
+ <0x1c54a000 0x1000>;
+ reg-names = "base", "rt", "rm1", "rm2", "rb" , "bm1",
+ "bm2", "bm3", "lt", "lm1", "lm2", "lb1",
+ "lb2", "tm1", "tm2", "tm3", "eint0", "eint1",
+ "eint2", "eint3", "eint4";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pio 0 0 271>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH 0>;
+ #interrupt-cells = <2>;
+
+ i2c0-pins {
+ pins {
+ pinmux = <PINMUX_GPIO99__FUNC_SCL0>,
+ <PINMUX_GPIO100__FUNC_SDA0>;
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,qcs615-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,qcs615-tlmm.yaml
index 1ce4b5df584a..2791e578c1de 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,qcs615-tlmm.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,qcs615-tlmm.yaml
@@ -110,7 +110,7 @@ examples:
<0x03c00000 0x300000>;
reg-names = "east", "west", "south";
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
- gpio-ranges = <&tlmm 0 0 123>;
+ gpio-ranges = <&tlmm 0 0 124>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,qcs8300-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,qcs8300-tlmm.yaml
index bb0d7132886a..489b41dcc179 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,qcs8300-tlmm.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,qcs8300-tlmm.yaml
@@ -106,7 +106,7 @@ examples:
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
- gpio-ranges = <&tlmm 0 0 133>;
+ gpio-ranges = <&tlmm 0 0 134>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
index 768bb3c2b456..5156d54b240b 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
@@ -27,6 +27,7 @@ properties:
- renesas,r9a07g044-pinctrl # RZ/G2{L,LC}
- renesas,r9a08g045-pinctrl # RZ/G3S
- renesas,r9a09g047-pinctrl # RZ/G3E
+ - renesas,r9a09g056-pinctrl # RZ/V2N
- renesas,r9a09g057-pinctrl # RZ/V2H(P)
- items:
@@ -145,6 +146,7 @@ allOf:
contains:
enum:
- renesas,r9a09g047-pinctrl
+ - renesas,r9a09g056-pinctrl
- renesas,r9a09g057-pinctrl
then:
properties:
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.yaml
index 816688580e33..aa882b5bfe97 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.yaml
@@ -13,9 +13,7 @@ maintainers:
properties:
compatible:
items:
- - enum:
- - renesas,r9a06g032-pinctrl # RZ/N1D
- - renesas,r9a06g033-pinctrl # RZ/N1S
+ - const: renesas,r9a06g032-pinctrl # RZ/N1D
- const: renesas,rzn1-pinctrl # Generic RZ/N1
reg:
diff --git a/Documentation/devicetree/bindings/pinctrl/spacemit,k1-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/spacemit,k1-pinctrl.yaml
index b01ecd83b71b..d80e88aa07b4 100644
--- a/Documentation/devicetree/bindings/pinctrl/spacemit,k1-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/spacemit,k1-pinctrl.yaml
@@ -17,6 +17,19 @@ properties:
items:
- description: pinctrl io memory base
+ clocks:
+ items:
+ - description: Functional Clock
+ - description: Bus Clock
+
+ clock-names:
+ items:
+ - const: func
+ - const: bus
+
+ resets:
+ maxItems: 1
+
patternProperties:
'-cfg$':
type: object
@@ -94,6 +107,8 @@ patternProperties:
required:
- compatible
- reg
+ - clocks
+ - clock-names
additionalProperties: false
@@ -108,6 +123,9 @@ examples:
pinctrl@d401e000 {
compatible = "spacemit,k1-pinctrl";
reg = <0x0 0xd401e000 0x0 0x400>;
+ clocks = <&syscon_apbc 42>,
+ <&syscon_apbc 94>;
+ clock-names = "func", "bus";
uart0_2_cfg: uart0-2-cfg {
uart0-2-pins {
diff --git a/Documentation/devicetree/bindings/power/allwinner,sun50i-h6-prcm-ppu.yaml b/Documentation/devicetree/bindings/power/allwinner,sun50i-h6-prcm-ppu.yaml
new file mode 100644
index 000000000000..73a9b4d6220e
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/allwinner,sun50i-h6-prcm-ppu.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/allwinner,sun50i-h6-prcm-ppu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner SoCs PRCM power domain controller
+
+maintainers:
+ - Andre Przywara <andre.przywara@arm.com>
+
+description:
+ The Allwinner Power Reset Clock Management (PRCM) unit contains bits to
+ control a few power domains.
+
+properties:
+ compatible:
+ enum:
+ - allwinner,sun50i-h6-prcm-ppu
+ - allwinner,sun50i-h616-prcm-ppu
+ - allwinner,sun55i-a523-prcm-ppu
+
+ reg:
+ maxItems: 1
+
+ '#power-domain-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ prcm_ppu: power-controller@7010210 {
+ compatible = "allwinner,sun50i-h616-prcm-ppu";
+ reg = <0x7010210 0x10>;
+ #power-domain-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml b/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml
index 591a080ca3ff..9c7cc632abee 100644
--- a/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml
+++ b/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml
@@ -25,6 +25,7 @@ properties:
enum:
- mediatek,mt6735-power-controller
- mediatek,mt6795-power-controller
+ - mediatek,mt6893-power-controller
- mediatek,mt8167-power-controller
- mediatek,mt8173-power-controller
- mediatek,mt8183-power-controller
@@ -88,6 +89,7 @@ $defs:
description: |
Power domain index. Valid values are defined in:
"include/dt-bindings/power/mt6795-power.h" - for MT8167 type power domain.
+ "include/dt-bindings/power/mediatek,mt6893-power.h" - for MT6893 type power domain.
"include/dt-bindings/power/mt8167-power.h" - for MT8167 type power domain.
"include/dt-bindings/power/mt8173-power.h" - for MT8173 type power domain.
"include/dt-bindings/power/mt8183-power.h" - for MT8183 type power domain.
diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
index 655687369a23..1bf65f2a583a 100644
--- a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
+++ b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
@@ -50,6 +50,7 @@ properties:
- qcom,sdx55-rpmhpd
- qcom,sdx65-rpmhpd
- qcom,sdx75-rpmhpd
+ - qcom,sm4450-rpmhpd
- qcom,sm6115-rpmpd
- qcom,sm6125-rpmpd
- qcom,sm6350-rpmhpd
diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml b/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml
index 19d3093e6cd2..ccd555870094 100644
--- a/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml
+++ b/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml
@@ -21,7 +21,9 @@ description: |+
properties:
compatible:
- const: syscon-reboot
+ enum:
+ - syscon-reboot
+ - google,gs101-reboot
mask:
$ref: /schemas/types.yaml#/definitions/uint32
@@ -49,12 +51,6 @@ properties:
priority:
default: 192
-oneOf:
- - required:
- - offset
- - required:
- - reg
-
required:
- compatible
@@ -63,12 +59,29 @@ additionalProperties: false
allOf:
- $ref: restart-handler.yaml#
- if:
- not:
- required:
- - mask
+ properties:
+ compatible:
+ contains:
+ const: google,gs101-reboot
then:
- required:
- - value
+ properties:
+ mask: false
+ offset: false
+ reg: false
+ value: false
+
+ else:
+ if:
+ not:
+ required:
+ - mask
+ then:
+ required:
+ - value
+
+ oneOf:
+ - required: [offset]
+ - required: [reg]
examples:
- |
@@ -78,3 +91,8 @@ examples:
offset = <0x0>;
mask = <0x1>;
};
+
+ - |
+ reboot {
+ compatible = "google,gs101-reboot";
+ };
diff --git a/Documentation/devicetree/bindings/power/reset/toradex,smarc-ec.yaml b/Documentation/devicetree/bindings/power/reset/toradex,smarc-ec.yaml
new file mode 100644
index 000000000000..ffcd5f2c2bf6
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/toradex,smarc-ec.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/reset/toradex,smarc-ec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Toradex Embedded Controller
+
+maintainers:
+ - Emanuele Ghidoli <emanuele.ghidoli@toradex.com>
+ - Francesco Dolcini <francesco.dolcini@toradex.com>
+
+description: |
+ The Toradex Embedded Controller (EC) is used on Toradex SMARC modules,
+ primarily to manage power and reset functionalities.
+
+ The EC provides the following functions:
+ - Reads the SMARC POWER_BTN# and RESET_IN# signals and controls the PMIC accordingly.
+ - Controls the SoC boot mode signals based on the SMARC BOOT_SEL# and FORCE_RECOV# inputs.
+ - Manages the CARRIER_STDBY# signal in response to relevant SoC signals.
+
+ The EC runs a small firmware, factory programmed into its internal flash, and communicates over I2C.
+ It allows software to control power-off and reset functionalities of the module.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - toradex,smarc-imx95-ec
+ - toradex,smarc-imx8mp-ec
+ - const: toradex,smarc-ec
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-controller@28 {
+ compatible = "toradex,smarc-imx95-ec", "toradex,smarc-ec";
+ reg = <0x28>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml b/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml
index ebab98987e49..f494b7710c09 100644
--- a/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml
+++ b/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml
@@ -40,6 +40,7 @@ properties:
- rockchip,rk3366-power-controller
- rockchip,rk3368-power-controller
- rockchip,rk3399-power-controller
+ - rockchip,rk3562-power-controller
- rockchip,rk3568-power-controller
- rockchip,rk3576-power-controller
- rockchip,rk3588-power-controller
diff --git a/Documentation/devicetree/bindings/power/supply/bq24190.yaml b/Documentation/devicetree/bindings/power/supply/bq24190.yaml
index 07adf88997b4..307c99c07721 100644
--- a/Documentation/devicetree/bindings/power/supply/bq24190.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq24190.yaml
@@ -19,6 +19,7 @@ properties:
- ti,bq24190
- ti,bq24192
- ti,bq24192i
+ - ti,bq24193
- ti,bq24196
- ti,bq24296
- ti,bq24297
diff --git a/Documentation/devicetree/bindings/power/supply/bq25980.yaml b/Documentation/devicetree/bindings/power/supply/bq25980.yaml
index b70ce8d7f86c..256adbef55eb 100644
--- a/Documentation/devicetree/bindings/power/supply/bq25980.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq25980.yaml
@@ -87,28 +87,28 @@ unevaluatedProperties: false
examples:
- |
bat: battery {
- compatible = "simple-battery";
- constant-charge-current-max-microamp = <4000000>;
- constant-charge-voltage-max-microvolt = <8400000>;
- precharge-current-microamp = <160000>;
- charge-term-current-microamp = <160000>;
+ compatible = "simple-battery";
+ constant-charge-current-max-microamp = <4000000>;
+ constant-charge-voltage-max-microvolt = <8400000>;
+ precharge-current-microamp = <160000>;
+ charge-term-current-microamp = <160000>;
};
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- bq25980: charger@65 {
- compatible = "ti,bq25980";
- reg = <0x65>;
- interrupt-parent = <&gpio1>;
- interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
- ti,watchdog-timeout-ms = <0>;
- ti,sc-ocp-limit-microamp = <2000000>;
- ti,sc-ovp-limit-microvolt = <17800000>;
- monitored-battery = <&bat>;
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bq25980: charger@65 {
+ compatible = "ti,bq25980";
+ reg = <0x65>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
+ ti,watchdog-timeout-ms = <0>;
+ ti,sc-ocp-limit-microamp = <2000000>;
+ ti,sc-ovp-limit-microvolt = <17800000>;
+ monitored-battery = <&bat>;
+ };
};
...
diff --git a/Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml b/Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml
index 741022b4449d..cb04fb25d8ac 100644
--- a/Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml
+++ b/Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml
@@ -48,14 +48,14 @@ examples:
#include <dt-bindings/iio/adc/ingenic,adc.h>
simple_battery: battery {
- compatible = "simple-battery";
- voltage-min-design-microvolt = <3600000>;
- voltage-max-design-microvolt = <4200000>;
+ compatible = "simple-battery";
+ voltage-min-design-microvolt = <3600000>;
+ voltage-max-design-microvolt = <4200000>;
};
ingenic-battery {
- compatible = "ingenic,jz4740-battery";
- io-channels = <&adc INGENIC_ADC_BATTERY>;
- io-channel-names = "battery";
- monitored-battery = <&simple_battery>;
+ compatible = "ingenic,jz4740-battery";
+ io-channels = <&adc INGENIC_ADC_BATTERY>;
+ io-channel-names = "battery";
+ monitored-battery = <&simple_battery>;
};
diff --git a/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml b/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml
index 06595a953659..bc7ed7b22085 100644
--- a/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml
+++ b/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml
@@ -61,13 +61,13 @@ additionalProperties: false
examples:
- |
i2c {
- #address-cells = <1>;
- #size-cells = <0>;
- charger: battery-charger@68 {
- compatible = "lltc,ltc4162-l";
- reg = <0x68>;
- lltc,rsnsb-micro-ohms = <10000>;
- lltc,rsnsi-micro-ohms = <16000>;
- lltc,cell-count = <2>;
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+ charger: battery-charger@68 {
+ compatible = "lltc,ltc4162-l";
+ reg = <0x68>;
+ lltc,rsnsb-micro-ohms = <10000>;
+ lltc,rsnsi-micro-ohms = <16000>;
+ lltc,cell-count = <2>;
+ };
};
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max77705.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max77705.yaml
index bce7fabbd9d3..e3b84068993b 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,max77705.yaml
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max77705.yaml
@@ -37,8 +37,8 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
- #address-cells = <1>;
- #size-cells = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
charger@69 {
compatible = "maxim,max77705-charger";
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max8971.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max8971.yaml
new file mode 100644
index 000000000000..2244cc3d45a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max8971.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/maxim,max8971.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Maxim MAX8971 IC charger
+
+maintainers:
+ - Svyatoslav Ryhel <clamor95@gmail.com>
+
+description:
+ The MAX8971 is a compact, high-frequency, high-efficiency switch-mode charger
+ for a one-cell lithium-ion (Li+) battery.
+
+allOf:
+ - $ref: power-supply.yaml#
+
+properties:
+ compatible:
+ const: maxim,max8971
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ monitored-battery: true
+
+ port:
+ description:
+ An optional port node to link the extcon device to detect type of plug.
+ $ref: /schemas/graph.yaml#/properties/port
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ charger@35 {
+ compatible = "maxim,max8971";
+ reg = <0x35>;
+
+ interrupt-parent = <&gpio>;
+ interrupts = <74 IRQ_TYPE_LEVEL_LOW>;
+
+ monitored-battery = <&battery>;
+
+ port {
+ charger_input: endpoint {
+ remote-endpoint = <&extcon_output>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/power/supply/pegatron,chagall-ec.yaml b/Documentation/devicetree/bindings/power/supply/pegatron,chagall-ec.yaml
new file mode 100644
index 000000000000..defb0861e268
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/pegatron,chagall-ec.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/pegatron,chagall-ec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Pegatron Chagall EC
+
+maintainers:
+ - Svyatoslav Ryhel <clamor95@gmail.com>
+
+description:
+ Pegatron Chagall EC is based on an 8-bit programmable microcontroller from
+ Infineon/Cypress Semiconductor, it communicates over I2C and is used in the
+ Pegatron Chagall tablet for fuel gauge and battery control functions.
+
+$ref: /schemas/power/supply/power-supply.yaml
+
+properties:
+ compatible:
+ const: pegatron,chagall-ec
+
+ reg:
+ maxItems: 1
+
+ monitored-battery: true
+ power-supplies: true
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ embedded-controller@10 {
+ compatible = "pegatron,chagall-ec";
+ reg = <0x10>;
+
+ monitored-battery = <&battery>;
+ power-supplies = <&mains>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml b/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml
index a0f9d49ff8fb..90c7dc7632c5 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm PMI8998/PM660 Switch-Mode Battery Charger "2"
maintainers:
- - Caleb Connolly <caleb.connolly@linaro.org>
+ - Casey Connolly <casey.connolly@linaro.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt b/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt
deleted file mode 100644
index 07256b7ffcaa..000000000000
--- a/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-* Power Management Controller
-
-Properties:
-- compatible: "fsl,<chip>-pmc".
-
- "fsl,mpc8349-pmc" should be listed for any chip whose PMC is
- compatible. "fsl,mpc8313-pmc" should also be listed for any chip
- whose PMC is compatible, and implies deep-sleep capability.
-
- "fsl,mpc8548-pmc" should be listed for any chip whose PMC is
- compatible. "fsl,mpc8536-pmc" should also be listed for any chip
- whose PMC is compatible, and implies deep-sleep capability.
-
- "fsl,mpc8641d-pmc" should be listed for any chip whose PMC is
- compatible; all statements below that apply to "fsl,mpc8548-pmc" also
- apply to "fsl,mpc8641d-pmc".
-
- Compatibility does not include bit assignments in SCCR/PMCDR/DEVDISR; these
- bit assignments are indicated via the sleep specifier in each device's
- sleep property.
-
-- reg: For devices compatible with "fsl,mpc8349-pmc", the first resource
- is the PMC block, and the second resource is the Clock Configuration
- block.
-
- For devices compatible with "fsl,mpc8548-pmc", the first resource
- is a 32-byte block beginning with DEVDISR.
-
-- interrupts: For "fsl,mpc8349-pmc"-compatible devices, the first
- resource is the PMC block interrupt.
-
-- fsl,mpc8313-wakeup-timer: For "fsl,mpc8313-pmc"-compatible devices,
- this is a phandle to an "fsl,gtm" node on which timer 4 can be used as
- a wakeup source from deep sleep.
-
-Sleep specifiers:
-
- fsl,mpc8349-pmc: Sleep specifiers consist of one cell. For each bit
- that is set in the cell, the corresponding bit in SCCR will be saved
- and cleared on suspend, and restored on resume. This sleep controller
- supports disabling and resuming devices at any time.
-
- fsl,mpc8536-pmc: Sleep specifiers consist of three cells, the third of
- which will be ORed into PMCDR upon suspend, and cleared from PMCDR
- upon resume. The first two cells are as described for fsl,mpc8578-pmc.
- This sleep controller only supports disabling devices during system
- sleep, or permanently.
-
- fsl,mpc8548-pmc: Sleep specifiers consist of one or two cells, the
- first of which will be ORed into DEVDISR (and the second into
- DEVDISR2, if present -- this cell should be zero or absent if the
- hardware does not have DEVDISR2) upon a request for permanent device
- disabling. This sleep controller does not support configuring devices
- to disable during system sleep (unless supported by another compatible
- match), or dynamically.
-
-Example:
-
- power@b00 {
- compatible = "fsl,mpc8313-pmc", "fsl,mpc8349-pmc";
- reg = <0xb00 0x100 0xa00 0x100>;
- interrupts = <80 8>;
- };
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/pmc.yaml b/Documentation/devicetree/bindings/powerpc/fsl/pmc.yaml
new file mode 100644
index 000000000000..276ece7f01db
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/pmc.yaml
@@ -0,0 +1,152 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/powerpc/fsl/pmc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Power Management Controller
+
+maintainers:
+ - J. Neuschäfer <j.ne@posteo.net>
+
+description: |
+ The Power Management Controller in several MPC8xxx SoCs helps save power by
+ controlling chip-wide low-power states as well as peripheral clock gating.
+
+ Sleep of peripheral devices is configured by the `sleep` property, for
+ example `sleep = <&pmc 0x00000030>`. Any cells after the &pmc phandle are
+ called a sleep specifier.
+
+ For "fsl,mpc8349-pmc", sleep specifiers consist of one cell. For each bit that
+ is set in the cell, the corresponding bit in SCCR will be saved and cleared
+ on suspend, and restored on resume. This sleep controller supports disabling
+ and resuming devices at any time.
+
+ For "fsl,mpc8536-pmc", sleep specifiers consist of three cells, the third of
+ which will be ORed into PMCDR upon suspend, and cleared from PMCDR upon
+ resume. The first two cells are as described for fsl,mpc8548-pmc. This
+ sleep controller only supports disabling devices during system sleep, or
+ permanently.
+
+ For "fsl,mpc8548-pmc" or "fsl,mpc8641d-pmc", Sleep specifiers consist of one
+ or two cells, the first of which will be ORed into DEVDISR (and the second
+ into DEVDISR2, if present -- this cell should be zero or absent if the
+ hardware does not have DEVDISR2) upon a request for permanent device
+ disabling. This sleep controller does not support configuring devices to
+ disable during system sleep (unless supported by another compatible match),
+ or dynamically.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: fsl,mpc8315-pmc
+ - const: fsl,mpc8313-pmc
+ - const: fsl,mpc8349-pmc
+
+ - items:
+ - enum:
+ - fsl,mpc8313-pmc
+ - fsl,mpc8323-pmc
+ - fsl,mpc8360-pmc
+ - fsl,mpc8377-pmc
+ - fsl,mpc8378-pmc
+ - fsl,mpc8379-pmc
+ - const: fsl,mpc8349-pmc
+
+ - items:
+ - const: fsl,p1022-pmc
+ - const: fsl,mpc8536-pmc
+ - const: fsl,mpc8548-pmc
+
+ - items:
+ - enum:
+ - fsl,mpc8536-pmc
+ - fsl,mpc8568-pmc
+ - fsl,mpc8569-pmc
+ - const: fsl,mpc8548-pmc
+
+ - enum:
+ - fsl,mpc8548-pmc
+ - fsl,mpc8641d-pmc
+
+ description: |
+ "fsl,mpc8349-pmc" should be listed for any chip whose PMC is
+ compatible. "fsl,mpc8313-pmc" should also be listed for any chip
+ whose PMC is compatible, and implies deep-sleep capability.
+
+ "fsl,mpc8548-pmc" should be listed for any chip whose PMC is
+ compatible. "fsl,mpc8536-pmc" should also be listed for any chip
+ whose PMC is compatible, and implies deep-sleep capability.
+
+ "fsl,mpc8641d-pmc" should be listed for any chip whose PMC is
+ compatible; all statements below that apply to "fsl,mpc8548-pmc" also
+ apply to "fsl,mpc8641d-pmc".
+
+ Compatibility does not include bit assignments in SCCR/PMCDR/DEVDISR; these
+ bit assignments are indicated via the sleep specifier in each device's
+ sleep property.
+
+ reg:
+ minItems: 1
+ maxItems: 2
+
+ interrupts:
+ maxItems: 1
+
+ fsl,mpc8313-wakeup-timer:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ For "fsl,mpc8313-pmc"-compatible devices, this is a phandle to an
+ "fsl,gtm" node on which timer 4 can be used as a wakeup source from deep
+ sleep.
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,mpc8349-pmc
+ then:
+ properties:
+ reg:
+ items:
+ - description: PMC block
+ - description: Clock Configuration block
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,mpc8548-pmc
+ - fsl,mpc8641d-pmc
+ then:
+ properties:
+ reg:
+ items:
+ - description: 32-byte block beginning with DEVDISR
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ pmc: power@b00 {
+ compatible = "fsl,mpc8377-pmc", "fsl,mpc8349-pmc";
+ reg = <0xb00 0x100>, <0xa00 0x100>;
+ interrupts = <80 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ - |
+ power@e0070 {
+ compatible = "fsl,mpc8548-pmc";
+ reg = <0xe0070 0x20>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/pwm/loongson,ls7a-pwm.yaml b/Documentation/devicetree/bindings/pwm/loongson,ls7a-pwm.yaml
new file mode 100644
index 000000000000..5d64fb40a0d6
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/loongson,ls7a-pwm.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/loongson,ls7a-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson PWM Controller
+
+maintainers:
+ - Binbin Zhou <zhoubinbin@loongson.cn>
+
+description:
+ The Loongson PWM has one pulse width output signal and one pulse input
+ signal to be measured.
+ It can be found on Loongson-2K series cpus and Loongson LS7A bridge chips.
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: loongson,ls7a-pwm
+ - items:
+ - enum:
+ - loongson,ls2k0500-pwm
+ - loongson,ls2k1000-pwm
+ - loongson,ls2k2000-pwm
+ - const: loongson,ls7a-pwm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#pwm-cells':
+ description:
+ The first cell must have a value of 0, which specifies the PWM output signal;
+ The second cell is the period in nanoseconds;
+ The third cell flag supported by this binding is PWM_POLARITY_INVERTED.
+ const: 3
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/loongson,ls2k-clk.h>
+
+ pwm@1fe22000 {
+ compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm";
+ reg = <0x1fe22000 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml b/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
index 195e4371196b..68ef30414325 100644
--- a/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
+++ b/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
@@ -27,6 +27,7 @@ properties:
- const: mediatek,mt8173-disp-pwm
- items:
- enum:
+ - mediatek,mt6893-disp-pwm
- mediatek,mt8186-disp-pwm
- mediatek,mt8188-disp-pwm
- mediatek,mt8192-disp-pwm
diff --git a/Documentation/devicetree/bindings/pwm/nxp,mc33xs2410.yaml b/Documentation/devicetree/bindings/pwm/nxp,mc33xs2410.yaml
new file mode 100644
index 000000000000..1729fe5c3dfb
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/nxp,mc33xs2410.yaml
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/nxp,mc33xs2410.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: High-side switch MC33XS2410
+
+maintainers:
+ - Dimitri Fedrau <dima.fedrau@gmail.com>
+
+allOf:
+ - $ref: pwm.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+ compatible:
+ const: nxp,mc33xs2410
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 10000000
+
+ spi-cpha: true
+
+ spi-cs-setup-delay-ns:
+ minimum: 100
+ default: 100
+
+ spi-cs-hold-delay-ns:
+ minimum: 10
+ default: 10
+
+ spi-cs-inactive-delay-ns:
+ minimum: 300
+ default: 300
+
+ reset-gpios:
+ description:
+ GPIO connected to the active low reset pin.
+ maxItems: 1
+
+ "#pwm-cells":
+ const: 3
+
+ pwm-names:
+ items:
+ - const: di0
+ - const: di1
+ - const: di2
+ - const: di3
+
+ pwms:
+ description:
+ Direct inputs(di0-3) are used to directly turn-on or turn-off the
+ outputs.
+ maxItems: 4
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ description:
+ The external clock can be used if the internal clock doesn't meet
+ timing requirements over temperature and voltage operating range.
+ maxItems: 1
+
+ vdd-supply:
+ description:
+ Logic supply voltage
+
+ vspi-supply:
+ description:
+ Supply voltage for SPI
+
+ vpwr-supply:
+ description:
+ Power switch supply
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pwm@0 {
+ compatible = "nxp,mc33xs2410";
+ reg = <0x0>;
+ spi-max-frequency = <4000000>;
+ spi-cpha;
+ spi-cs-setup-delay-ns = <100>;
+ spi-cs-hold-delay-ns = <10>;
+ spi-cs-inactive-delay-ns = <300>;
+ reset-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
+ #pwm-cells = <3>;
+ pwm-names = "di0", "di1", "di2", "di3";
+ pwms = <&pwm0 0 1000000>,
+ <&pwm1 0 1000000>,
+ <&pwm2 0 1000000>,
+ <&pwm3 0 1000000>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&clk_ext_fixed>;
+ vdd-supply = <&reg_3v3>;
+ vspi-supply = <&reg_3v3>;
+ vpwr-supply = <&reg_24v0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pwm/renesas,rzg2l-gpt.yaml b/Documentation/devicetree/bindings/pwm/renesas,rzg2l-gpt.yaml
new file mode 100644
index 000000000000..13b807765a30
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/renesas,rzg2l-gpt.yaml
@@ -0,0 +1,378 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/renesas,rzg2l-gpt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/G2L General PWM Timer (GPT)
+
+maintainers:
+ - Biju Das <biju.das.jz@bp.renesas.com>
+
+description: |
+ RZ/G2L General PWM Timer (GPT) composed of 8 channels with 32-bit timer
+ (GPT32E). It supports the following functions
+ * 32 bits x 8 channels.
+ * Up-counting or down-counting (saw waves) or up/down-counting
+ (triangle waves) for each counter.
+ * Clock sources independently selectable for each channel.
+ * Two I/O pins per channel.
+ * Two output compare/input capture registers per channel.
+ * For the two output compare/input capture registers of each channel,
+ four registers are provided as buffer registers and are capable of
+ operating as comparison registers when buffering is not in use.
+ * In output compare operation, buffer switching can be at crests or
+ troughs, enabling the generation of laterally asymmetric PWM waveforms.
+ * Registers for setting up frame cycles in each channel (with capability
+ for generating interrupts at overflow or underflow)
+ * Generation of dead times in PWM operation.
+ * Synchronous starting, stopping and clearing counters for arbitrary
+ channels.
+ * Starting, stopping, clearing and up/down counters in response to input
+ level comparison.
+ * Starting, clearing, stopping and up/down counters in response to a
+ maximum of four external triggers.
+ * Output pin disable function by dead time error and detected
+ short-circuits between output pins.
+ * A/D converter start triggers can be generated (GPT32E0 to GPT32E3)
+ * Enables the noise filter for input capture and external trigger
+ operation.
+
+ The below pwm channels are supported.
+ pwm0 - GPT32E0.GTIOC0A channel
+ pwm1 - GPT32E0.GTIOC0B channel
+ pwm2 - GPT32E1.GTIOC1A channel
+ pwm3 - GPT32E1.GTIOC1B channel
+ pwm4 - GPT32E2.GTIOC2A channel
+ pwm5 - GPT32E2.GTIOC2B channel
+ pwm6 - GPT32E3.GTIOC3A channel
+ pwm7 - GPT32E3.GTIOC3B channel
+ pwm8 - GPT32E4.GTIOC4A channel
+ pwm9 - GPT32E4.GTIOC4B channel
+ pwm10 - GPT32E5.GTIOC5A channel
+ pwm11 - GPT32E5.GTIOC5B channel
+ pwm12 - GPT32E6.GTIOC6A channel
+ pwm13 - GPT32E6.GTIOC6B channel
+ pwm14 - GPT32E7.GTIOC7A channel
+ pwm15 - GPT32E7.GTIOC7B channel
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r9a07g044-gpt # RZ/G2{L,LC}
+ - renesas,r9a07g054-gpt # RZ/V2L
+ - const: renesas,rzg2l-gpt
+
+ reg:
+ maxItems: 1
+
+ '#pwm-cells':
+ const: 3
+
+ interrupts:
+ items:
+ - description: GPT32E0.GTCCRA input capture/compare match
+ - description: GPT32E0.GTCCRB input capture/compare
+ - description: GPT32E0.GTCCRC compare match
+ - description: GPT32E0.GTCCRD compare match
+ - description: GPT32E0.GTCCRE compare match
+ - description: GPT32E0.GTCCRF compare match
+ - description: GPT32E0.GTADTRA compare match
+ - description: GPT32E0.GTADTRB compare match
+ - description: GPT32E0.GTCNT overflow/GTPR compare match
+ - description: GPT32E0.GTCNT underflow
+ - description: GPT32E1.GTCCRA input capture/compare match
+ - description: GPT32E1.GTCCRB input capture/compare
+ - description: GPT32E1.GTCCRC compare match
+ - description: GPT32E1.GTCCRD compare match
+ - description: GPT32E1.GTCCRE compare match
+ - description: GPT32E1.GTCCRF compare match
+ - description: GPT32E1.GTADTRA compare match
+ - description: GPT32E1.GTADTRB compare match
+ - description: GPT32E1.GTCNT overflow/GTPR compare match
+ - description: GPT32E1.GTCNT underflow
+ - description: GPT32E2.GTCCRA input capture/compare match
+ - description: GPT32E2.GTCCRB input capture/compare
+ - description: GPT32E2.GTCCRC compare match
+ - description: GPT32E2.GTCCRD compare match
+ - description: GPT32E2.GTCCRE compare match
+ - description: GPT32E2.GTCCRF compare match
+ - description: GPT32E2.GTADTRA compare match
+ - description: GPT32E2.GTADTRB compare match
+ - description: GPT32E2.GTCNT overflow/GTPR compare match
+ - description: GPT32E2.GTCNT underflow
+ - description: GPT32E3.GTCCRA input capture/compare match
+ - description: GPT32E3.GTCCRB input capture/compare
+ - description: GPT32E3.GTCCRC compare match
+ - description: GPT32E3.GTCCRD compare match
+ - description: GPT32E3.GTCCRE compare match
+ - description: GPT32E3.GTCCRF compare match
+ - description: GPT32E3.GTADTRA compare match
+ - description: GPT32E3.GTADTRB compare match
+ - description: GPT32E3.GTCNT overflow/GTPR compare match
+ - description: GPT32E3.GTCNT underflow
+ - description: GPT32E4.GTCCRA input capture/compare match
+ - description: GPT32E4.GTCCRB input capture/compare
+ - description: GPT32E4.GTCCRC compare match
+ - description: GPT32E4.GTCCRD compare match
+ - description: GPT32E4.GTCCRE compare match
+ - description: GPT32E4.GTCCRF compare match
+ - description: GPT32E4.GTADTRA compare match
+ - description: GPT32E4.GTADTRB compare match
+ - description: GPT32E4.GTCNT overflow/GTPR compare match
+ - description: GPT32E4.GTCNT underflow
+ - description: GPT32E5.GTCCRA input capture/compare match
+ - description: GPT32E5.GTCCRB input capture/compare
+ - description: GPT32E5.GTCCRC compare match
+ - description: GPT32E5.GTCCRD compare match
+ - description: GPT32E5.GTCCRE compare match
+ - description: GPT32E5.GTCCRF compare match
+ - description: GPT32E5.GTADTRA compare match
+ - description: GPT32E5.GTADTRB compare match
+ - description: GPT32E5.GTCNT overflow/GTPR compare match
+ - description: GPT32E5.GTCNT underflow
+ - description: GPT32E6.GTCCRA input capture/compare match
+ - description: GPT32E6.GTCCRB input capture/compare
+ - description: GPT32E6.GTCCRC compare match
+ - description: GPT32E6.GTCCRD compare match
+ - description: GPT32E6.GTCCRE compare match
+ - description: GPT32E6.GTCCRF compare match
+ - description: GPT32E6.GTADTRA compare match
+ - description: GPT32E6.GTADTRB compare match
+ - description: GPT32E6.GTCNT overflow/GTPR compare match
+ - description: GPT32E6.GTCNT underflow
+ - description: GPT32E7.GTCCRA input capture/compare match
+ - description: GPT32E7.GTCCRB input capture/compare
+ - description: GPT32E7.GTCCRC compare match
+ - description: GPT32E7.GTCCRD compare match
+ - description: GPT32E7.GTCCRE compare match
+ - description: GPT32E7.GTCCRF compare match
+ - description: GPT32E7.GTADTRA compare match
+ - description: GPT32E7.GTADTRB compare match
+ - description: GPT32E7.GTCNT overflow/GTPR compare match
+ - description: GPT32E7.GTCNT underflow
+
+ interrupt-names:
+ items:
+ - const: ccmpa0
+ - const: ccmpb0
+ - const: cmpc0
+ - const: cmpd0
+ - const: cmpe0
+ - const: cmpf0
+ - const: adtrga0
+ - const: adtrgb0
+ - const: ovf0
+ - const: unf0
+ - const: ccmpa1
+ - const: ccmpb1
+ - const: cmpc1
+ - const: cmpd1
+ - const: cmpe1
+ - const: cmpf1
+ - const: adtrga1
+ - const: adtrgb1
+ - const: ovf1
+ - const: unf1
+ - const: ccmpa2
+ - const: ccmpb2
+ - const: cmpc2
+ - const: cmpd2
+ - const: cmpe2
+ - const: cmpf2
+ - const: adtrga2
+ - const: adtrgb2
+ - const: ovf2
+ - const: unf2
+ - const: ccmpa3
+ - const: ccmpb3
+ - const: cmpc3
+ - const: cmpd3
+ - const: cmpe3
+ - const: cmpf3
+ - const: adtrga3
+ - const: adtrgb3
+ - const: ovf3
+ - const: unf3
+ - const: ccmpa4
+ - const: ccmpb4
+ - const: cmpc4
+ - const: cmpd4
+ - const: cmpe4
+ - const: cmpf4
+ - const: adtrga4
+ - const: adtrgb4
+ - const: ovf4
+ - const: unf4
+ - const: ccmpa5
+ - const: ccmpb5
+ - const: cmpc5
+ - const: cmpd5
+ - const: cmpe5
+ - const: cmpf5
+ - const: adtrga5
+ - const: adtrgb5
+ - const: ovf5
+ - const: unf5
+ - const: ccmpa6
+ - const: ccmpb6
+ - const: cmpc6
+ - const: cmpd6
+ - const: cmpe6
+ - const: cmpf6
+ - const: adtrga6
+ - const: adtrgb6
+ - const: ovf6
+ - const: unf6
+ - const: ccmpa7
+ - const: ccmpb7
+ - const: cmpc7
+ - const: cmpd7
+ - const: cmpe7
+ - const: cmpf7
+ - const: adtrga7
+ - const: adtrgb7
+ - const: ovf7
+ - const: unf7
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+ - power-domains
+ - resets
+
+allOf:
+ - $ref: pwm.yaml#
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r9a07g044-cpg.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ gpt: pwm@10048000 {
+ compatible = "renesas,r9a07g044-gpt", "renesas,rzg2l-gpt";
+ reg = <0x10048000 0x800>;
+ interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 219 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 220 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 221 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 222 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 223 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 224 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 225 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 226 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 227 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 231 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 232 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 233 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 234 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 235 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 236 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 237 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 238 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 239 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 240 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 244 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 245 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 246 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 247 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 248 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 249 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 250 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 251 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 252 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 253 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 257 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 258 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 259 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 260 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 261 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 262 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 263 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 265 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 266 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 270 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 271 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 272 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 273 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 274 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 275 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 276 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 277 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 278 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 279 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 283 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 284 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 285 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 286 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 287 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 288 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 289 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 290 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 291 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 292 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 296 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 297 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 298 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 299 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 300 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 301 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 302 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 303 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 304 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 305 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 309 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 310 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 311 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 312 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 313 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 314 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 315 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 316 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 317 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 318 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ccmpa0", "ccmpb0", "cmpc0", "cmpd0",
+ "cmpe0", "cmpf0", "adtrga0", "adtrgb0",
+ "ovf0", "unf0",
+ "ccmpa1", "ccmpb1", "cmpc1", "cmpd1",
+ "cmpe1", "cmpf1", "adtrga1", "adtrgb1",
+ "ovf1", "unf1",
+ "ccmpa2", "ccmpb2", "cmpc2", "cmpd2",
+ "cmpe2", "cmpf2", "adtrga2", "adtrgb2",
+ "ovf2", "unf2",
+ "ccmpa3", "ccmpb3", "cmpc3", "cmpd3",
+ "cmpe3", "cmpf3", "adtrga3", "adtrgb3",
+ "ovf3", "unf3",
+ "ccmpa4", "ccmpb4", "cmpc4", "cmpd4",
+ "cmpe4", "cmpf4", "adtrga4", "adtrgb4",
+ "ovf4", "unf4",
+ "ccmpa5", "ccmpb5", "cmpc5", "cmpd5",
+ "cmpe5", "cmpf5", "adtrga5", "adtrgb5",
+ "ovf5", "unf5",
+ "ccmpa6", "ccmpb6", "cmpc6", "cmpd6",
+ "cmpe6", "cmpf6", "adtrga6", "adtrgb6",
+ "ovf6", "unf6",
+ "ccmpa7", "ccmpb7", "cmpc7", "cmpd7",
+ "cmpe7", "cmpf7", "adtrga7", "adtrgb7",
+ "ovf7", "unf7";
+ clocks = <&cpg CPG_MOD R9A07G044_GPT_PCLK>;
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G044_GPT_RST_C>;
+ #pwm-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
index a4dfa09344dd..f85ee5d20ccb 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
@@ -9,15 +9,6 @@ title: Renesas R-Car Timer Pulse Unit PWM Controller
maintainers:
- Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
-select:
- properties:
- compatible:
- contains:
- const: renesas,tpu
- required:
- - compatible
- - '#pwm-cells'
-
properties:
compatible:
items:
diff --git a/Documentation/devicetree/bindings/pwm/via,vt8500-pwm.yaml b/Documentation/devicetree/bindings/pwm/via,vt8500-pwm.yaml
new file mode 100644
index 000000000000..d9146ad715ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/via,vt8500-pwm.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/via,vt8500-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: VIA/Wondermedia VT8500/WM8xxx series SoC PWM controller
+
+maintainers:
+ - Alexey Charkov <alchark@gmail.com>
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: via,vt8500-pwm
+
+ reg:
+ maxItems: 1
+
+ '#pwm-cells':
+ const: 3
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ pwm1: pwm@d8220000 {
+ compatible = "via,vt8500-pwm";
+ reg = <0xd8220000 0x1000>;
+ #pwm-cells = <3>;
+ clocks = <&clkpwm>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt b/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
deleted file mode 100644
index 4fba93ce1985..000000000000
--- a/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-VIA/Wondermedia VT8500/WM8xxx series SoC PWM controller
-
-Required properties:
-- compatible: should be "via,vt8500-pwm"
-- reg: physical base address and length of the controller's registers
-- #pwm-cells: should be 3. See pwm.yaml in this directory for a description of
- the cells format. The only third cell flag supported by this binding is
- PWM_POLARITY_INVERTED.
-- clocks: phandle to the PWM source clock
-
-Example:
-
-pwm1: pwm@d8220000 {
- #pwm-cells = <3>;
- compatible = "via,vt8500-pwm";
- reg = <0xd8220000 0x1000>;
- clocks = <&clkpwm>;
-};
diff --git a/Documentation/devicetree/bindings/regulator/adi,adp5055-regulator.yaml b/Documentation/devicetree/bindings/regulator/adi,adp5055-regulator.yaml
new file mode 100644
index 000000000000..9c4ead4c9fd1
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/adi,adp5055-regulator.yaml
@@ -0,0 +1,157 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/adi,adp5055-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices ADP5055 Triple Buck Regulator
+
+maintainers:
+ - Alexis Czezar Torreno <alexisczezar.torreno@analog.com>
+
+description: |
+ The ADP5055 combines three high performance buck regulator. The device enables
+ direct connection to high input voltages up to 18 V with no preregulators.
+ https://www.analog.com/media/en/technical-documentation/data-sheets/adp5055.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,adp5055
+
+ reg:
+ enum:
+ - 0x70
+ - 0x71
+
+ adi,tset-us:
+ description:
+ Setting time used by the device. This is changed via soldering specific
+ resistor values on the CFG2 pin.
+ enum: [2600, 20800]
+ default: 2600
+
+ adi,ocp-blanking:
+ description:
+ If present, overcurrent protection (OCP) blanking for all regulator is on.
+ type: boolean
+
+ adi,delay-power-good:
+ description:
+ Configures delay timer of the power good (PWRGD) pin. Delay is based on
+ Tset which can be 2.6 ms or 20.8 ms.
+ type: boolean
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ '^buck[0-2]$':
+ type: object
+ $ref: regulator.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ enable-gpios:
+ maxItems: 1
+ description:
+ GPIO specifier to enable the GPIO control for each regulator. The
+ driver supports two modes of enable, hardware only (GPIOs) or software
+ only (Registers). Pure hardware enabling requires each regulator to
+ contain this property. If at least one regulator does not have this,
+ the driver automatically switches to software only mode.
+
+ adi,dvs-limit-upper-microvolt:
+ description:
+ Configure the allowable upper side limit of the voltage output of each
+ regulator in microvolt. Relative to the default Vref trimming value.
+ Vref = 600 mV. Voltages are in 12 mV steps, value is autoadjusted.
+ Vout_high = Vref_trim + dvs-limit-upper.
+ minimum: 12000
+ maximum: 192000
+ default: 192000
+
+ adi,dvs-limit-lower-microvolt:
+ description:
+ Configure the allowable lower side limit of the voltage output of each
+ regulator in microvolt. Relative to the default Vref trimming value.
+ Vref = 600 mV. Voltages are in 12 mV steps, value is autoadjusted.
+ Vout_low = Vref_trim + dvs-limit-lower.
+ minimum: -190500
+ maximum: -10500
+ default: -190500
+
+ adi,fast-transient:
+ description:
+ Configures the fast transient sensitivity for each regulator.
+ "none" - No fast transient.
+ "3G_1.5%" - 1.5% window with 3*350uA/V
+ "5G_1.5%" - 1.5% window with 5*350uA/V
+ "5G_2.5%" - 2.5% window with 5*350uA/V
+ enum: [none, 3G_1.5%, 5G_1.5%, 5G_2.5%]
+ default: 5G_2.5%
+
+ adi,mask-power-good:
+ description:
+ If present, masks individual regulators PWRGD signal to the external
+ PWRGD hardware pin.
+ type: boolean
+
+ required:
+ - regulator-name
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ regulator@70 {
+ compatible = "adi,adp5055";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adi,tset-us = <2600>;
+ adi,ocp-blanking;
+ adi,delay-power-good;
+
+ buck0 {
+ regulator-name = "buck0";
+ enable-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+ adi,dvs-limit-upper-microvolt = <192000>;
+ adi,dvs-limit-lower-microvolt = <(-190500)>;
+ adi,fast-transient = "5G_2.5%";
+ adi,mask-power-good;
+ };
+
+ buck1 {
+ regulator-name = "buck1";
+ enable-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>;
+ adi,dvs-limit-upper-microvolt = <192000>;
+ adi,dvs-limit-lower-microvolt = <(-190500)>;
+ adi,fast-transient = "5G_2.5%";
+ adi,mask-power-good;
+ };
+
+ buck2 {
+ regulator-name = "buck2";
+ enable-gpios = <&gpio 19 GPIO_ACTIVE_HIGH>;
+ adi,dvs-limit-upper-microvolt = <192000>;
+ adi,dvs-limit-lower-microvolt = <(-190500)>;
+ adi,fast-transient = "5G_2.5%";
+ adi,mask-power-good;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/brcm,bcm59054.yaml b/Documentation/devicetree/bindings/regulator/brcm,bcm59054.yaml
new file mode 100644
index 000000000000..5b46d7fca05e
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/brcm,bcm59054.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/brcm,bcm59054.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM59054 Power Management Unit regulators
+
+description: |
+ This is a part of device tree bindings for the BCM59054 power
+ management unit.
+
+ See Documentation/devicetree/bindings/mfd/brcm,bcm59056.yaml for
+ additional information and example.
+
+maintainers:
+ - Artur Weber <aweber.kernel@gmail.com>
+
+patternProperties:
+ "^(cam|sim|mmc)ldo[1-2]$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+ "^(rf|sd|sdx|aud|mic|usb|vib|tcx)ldo$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+ "^(c|mm|v)sr$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+ "^(io|sd)sr[1-2]$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+ "^gpldo[1-3]$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+ "^lvldo[1-2]$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+properties:
+ vbus:
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/regulator/brcm,bcm59056.yaml b/Documentation/devicetree/bindings/regulator/brcm,bcm59056.yaml
new file mode 100644
index 000000000000..7a5e36394d21
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/brcm,bcm59056.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/brcm,bcm59056.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM59056 Power Management Unit regulators
+
+description: |
+ This is a part of device tree bindings for the BCM59056 power
+ management unit.
+
+ See Documentation/devicetree/bindings/mfd/brcm,bcm59056.yaml for
+ additional information and example.
+
+maintainers:
+ - Artur Weber <aweber.kernel@gmail.com>
+
+patternProperties:
+ "^(cam|sim|mmc)ldo[1-2]$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+ "^(rf|sd|sdx|aud|mic|usb|vib)ldo$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+ "^(c|m|v)sr$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+ "^(io|sd)sr[1-2]$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+ "^gpldo[1-6]$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+properties:
+ vbus:
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/regulator/mediatek,mt6357-regulator.yaml b/Documentation/devicetree/bindings/regulator/mediatek,mt6357-regulator.yaml
index 6327bb2f6ee0..698266c09e25 100644
--- a/Documentation/devicetree/bindings/regulator/mediatek,mt6357-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/mediatek,mt6357-regulator.yaml
@@ -33,7 +33,7 @@ patternProperties:
"^ldo-v(camio18|aud28|aux18|io18|io28|rf12|rf18|cn18|cn28|fe28)$":
type: object
- $ref: fixed-regulator.yaml#
+ $ref: regulator.yaml#
unevaluatedProperties: false
description:
Properties for single fixed LDO regulator.
@@ -112,7 +112,6 @@ examples:
regulator-enable-ramp-delay = <220>;
};
mt6357_vfe28_reg: ldo-vfe28 {
- compatible = "regulator-fixed";
regulator-name = "vfe28";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
@@ -125,14 +124,12 @@ examples:
regulator-enable-ramp-delay = <110>;
};
mt6357_vrf18_reg: ldo-vrf18 {
- compatible = "regulator-fixed";
regulator-name = "vrf18";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-enable-ramp-delay = <110>;
};
mt6357_vrf12_reg: ldo-vrf12 {
- compatible = "regulator-fixed";
regulator-name = "vrf12";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
@@ -157,14 +154,12 @@ examples:
regulator-enable-ramp-delay = <264>;
};
mt6357_vcn28_reg: ldo-vcn28 {
- compatible = "regulator-fixed";
regulator-name = "vcn28";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
regulator-enable-ramp-delay = <264>;
};
mt6357_vcn18_reg: ldo-vcn18 {
- compatible = "regulator-fixed";
regulator-name = "vcn18";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -183,7 +178,6 @@ examples:
regulator-enable-ramp-delay = <264>;
};
mt6357_vcamio_reg: ldo-vcamio18 {
- compatible = "regulator-fixed";
regulator-name = "vcamio";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -212,28 +206,24 @@ examples:
regulator-always-on;
};
mt6357_vaux18_reg: ldo-vaux18 {
- compatible = "regulator-fixed";
regulator-name = "vaux18";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-enable-ramp-delay = <264>;
};
mt6357_vaud28_reg: ldo-vaud28 {
- compatible = "regulator-fixed";
regulator-name = "vaud28";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
regulator-enable-ramp-delay = <264>;
};
mt6357_vio28_reg: ldo-vio28 {
- compatible = "regulator-fixed";
regulator-name = "vio28";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
regulator-enable-ramp-delay = <264>;
};
mt6357_vio18_reg: ldo-vio18 {
- compatible = "regulator-fixed";
regulator-name = "vio18";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd96802-regulator.yaml b/Documentation/devicetree/bindings/regulator/rohm,bd96802-regulator.yaml
new file mode 100644
index 000000000000..671eaf1096d3
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/rohm,bd96802-regulator.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/rohm,bd96802-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BD96802 Power Management Integrated Circuit regulators
+
+maintainers:
+ - Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+
+description:
+ This module is part of the ROHM BD96802 MFD device. For more details
+ see Documentation/devicetree/bindings/mfd/rohm,bd96802-pmic.yaml.
+
+ The regulator controller is represented as a sub-node of the PMIC node
+ on the device tree.
+
+ Regulator nodes should be named to buck1 and buck2.
+
+patternProperties:
+ "^buck[1-2]$":
+ type: object
+ description:
+ Properties for single BUCK regulator.
+ $ref: regulator.yaml#
+
+ properties:
+ rohm,initial-voltage-microvolt:
+ description:
+ Initial voltage for regulator. Voltage can be tuned +/-150 mV from
+ this value. NOTE, This can be modified via I2C only when PMIC is in
+ STBY state.
+ minimum: 500000
+ maximum: 3300000
+
+ rohm,keep-on-stby:
+ description:
+ Keep the regulator powered when PMIC transitions to STBY state.
+ type: boolean
+
+ unevaluatedProperties: false
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml
index 56ff6386534d..5dcc2a32c080 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml
@@ -16,6 +16,9 @@ description:
properties:
compatible:
enum:
+ - qcom,sc8180x-adsp-pas
+ - qcom,sc8180x-cdsp-pas
+ - qcom,sc8180x-slpi-pas
- qcom,sm8150-adsp-pas
- qcom,sm8150-cdsp-pas
- qcom,sm8150-mpss-pas
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sm8350-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sm8350-pas.yaml
index fd3423e6051b..6d09823153fc 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sm8350-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sm8350-pas.yaml
@@ -15,16 +15,20 @@ description:
properties:
compatible:
- enum:
- - qcom,sar2130p-adsp-pas
- - qcom,sm8350-adsp-pas
- - qcom,sm8350-cdsp-pas
- - qcom,sm8350-slpi-pas
- - qcom,sm8350-mpss-pas
- - qcom,sm8450-adsp-pas
- - qcom,sm8450-cdsp-pas
- - qcom,sm8450-mpss-pas
- - qcom,sm8450-slpi-pas
+ oneOf:
+ - enum:
+ - qcom,sar2130p-adsp-pas
+ - qcom,sm8350-adsp-pas
+ - qcom,sm8350-cdsp-pas
+ - qcom,sm8350-slpi-pas
+ - qcom,sm8350-mpss-pas
+ - qcom,sm8450-adsp-pas
+ - qcom,sm8450-cdsp-pas
+ - qcom,sm8450-mpss-pas
+ - qcom,sm8450-slpi-pas
+ - items:
+ - const: qcom,sc8280xp-slpi-pas
+ - const: qcom,sm8350-slpi-pas
reg:
maxItems: 1
@@ -61,14 +65,15 @@ allOf:
- if:
properties:
compatible:
- enum:
- - qcom,sar2130p-adsp-pas
- - qcom,sm8350-adsp-pas
- - qcom,sm8350-cdsp-pas
- - qcom,sm8350-slpi-pas
- - qcom,sm8450-adsp-pas
- - qcom,sm8450-cdsp-pas
- - qcom,sm8450-slpi-pas
+ contains:
+ enum:
+ - qcom,sar2130p-adsp-pas
+ - qcom,sm8350-adsp-pas
+ - qcom,sm8350-cdsp-pas
+ - qcom,sm8350-slpi-pas
+ - qcom,sm8450-adsp-pas
+ - qcom,sm8450-cdsp-pas
+ - qcom,sm8450-slpi-pas
then:
properties:
interrupts:
@@ -102,12 +107,13 @@ allOf:
- if:
properties:
compatible:
- enum:
- - qcom,sar2130p-adsp-pas
- - qcom,sm8350-adsp-pas
- - qcom,sm8350-slpi-pas
- - qcom,sm8450-adsp-pas
- - qcom,sm8450-slpi-pas
+ contains:
+ enum:
+ - qcom,sar2130p-adsp-pas
+ - qcom,sm8350-adsp-pas
+ - qcom,sm8350-slpi-pas
+ - qcom,sm8450-adsp-pas
+ - qcom,sm8450-slpi-pas
then:
properties:
power-domains:
diff --git a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
index 370af61d8f28..843679c557e7 100644
--- a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
@@ -139,6 +139,10 @@ properties:
If defined, when remoteproc is probed, it loads the default firmware and
starts the remote processor.
+ firmware-name:
+ maxItems: 1
+ description: Default name of the remote processor firmware.
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml b/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
index 695ef38a7bb3..150e95c0d9be 100644
--- a/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
@@ -12,14 +12,20 @@ maintainers:
properties:
compatible:
- enum:
- - amlogic,meson8b-reset # Reset Controller on Meson8b and compatible SoCs
- - amlogic,meson-gxbb-reset # Reset Controller on GXBB and compatible SoCs
- - amlogic,meson-axg-reset # Reset Controller on AXG and compatible SoCs
- - amlogic,meson-a1-reset # Reset Controller on A1 and compatible SoCs
- - amlogic,meson-s4-reset # Reset Controller on S4 and compatible SoCs
- - amlogic,c3-reset # Reset Controller on C3 and compatible SoCs
- - amlogic,t7-reset
+ oneOf:
+ - enum:
+ - amlogic,meson8b-reset # Reset Controller on Meson8b and compatible SoCs
+ - amlogic,meson-gxbb-reset # Reset Controller on GXBB and compatible SoCs
+ - amlogic,meson-axg-reset # Reset Controller on AXG and compatible SoCs
+ - amlogic,meson-a1-reset # Reset Controller on A1 and compatible SoCs
+ - amlogic,meson-s4-reset # Reset Controller on S4 and compatible SoCs
+ - amlogic,c3-reset # Reset Controller on C3 and compatible SoCs
+ - amlogic,t7-reset
+ - items:
+ - enum:
+ - amlogic,a4-reset
+ - amlogic,a5-reset
+ - const: amlogic,meson-s4-reset
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml b/Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml
index c3b33bbc7319..84c4801df8d9 100644
--- a/Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml
@@ -24,6 +24,9 @@ properties:
- microchip,sam9x60-rstc
- microchip,sama7g5-rstc
- items:
+ - const: microchip,sama7d65-rstc
+ - const: microchip,sama7g5-rstc
+ - items:
- const: atmel,sama5d3-rstc
- const: atmel,at91sam9g45-rstc
- items:
diff --git a/Documentation/devicetree/bindings/reset/renesas,rzv2h-usb2phy-reset.yaml b/Documentation/devicetree/bindings/reset/renesas,rzv2h-usb2phy-reset.yaml
new file mode 100644
index 000000000000..c79f61c2373b
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/renesas,rzv2h-usb2phy-reset.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/renesas,rzv2h-usb2phy-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/V2H(P) USB2PHY Port reset Control
+
+maintainers:
+ - Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+
+description:
+ The RZ/V2H(P) USB2PHY Control mainly controls Port reset and power down of the
+ USB2.0 PHY.
+
+properties:
+ compatible:
+ const: renesas,r9a09g057-usb2phy-reset # RZ/V2H(P)
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ '#reset-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+ - power-domains
+ - '#reset-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/renesas,r9a09g057-cpg.h>
+
+ reset-controller@15830000 {
+ compatible = "renesas,r9a09g057-usb2phy-reset";
+ reg = <0x15830000 0x10000>;
+ clocks = <&cpg CPG_MOD 0xb6>;
+ resets = <&cpg 0xaf>;
+ power-domains = <&cpg>;
+ #reset-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/reset/sophgo,sg2042-reset.yaml b/Documentation/devicetree/bindings/reset/sophgo,sg2042-reset.yaml
index 76e1931f0908..1d1b84575960 100644
--- a/Documentation/devicetree/bindings/reset/sophgo,sg2042-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/sophgo,sg2042-reset.yaml
@@ -11,7 +11,12 @@ maintainers:
properties:
compatible:
- const: sophgo,sg2042-reset
+ oneOf:
+ - items:
+ - enum:
+ - sophgo,sg2044-reset
+ - const: sophgo,sg2042-reset
+ - const: sophgo,sg2042-reset
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/reset/thead,th1520-reset.yaml b/Documentation/devicetree/bindings/reset/thead,th1520-reset.yaml
new file mode 100644
index 000000000000..f2e91d0add7a
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/thead,th1520-reset.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/thead,th1520-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: T-HEAD TH1520 SoC Reset Controller
+
+description:
+ The T-HEAD TH1520 reset controller is a hardware block that asserts/deasserts
+ resets for SoC subsystems.
+
+maintainers:
+ - Michal Wilczynski <m.wilczynski@samsung.com>
+
+properties:
+ compatible:
+ enum:
+ - thead,th1520-reset
+
+ reg:
+ maxItems: 1
+
+ "#reset-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ rst: reset-controller@ffef528000 {
+ compatible = "thead,th1520-reset";
+ reg = <0xff 0xef528000 0x0 0x1000>;
+ #reset-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml
index 1f1b42dde94d..1db85fc9966f 100644
--- a/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Zynq UltraScale+ MPSoC and Versal reset
maintainers:
- - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
description: |
diff --git a/Documentation/devicetree/bindings/riscv/sophgo.yaml b/Documentation/devicetree/bindings/riscv/sophgo.yaml
index a14cb10ff3f0..b4c4d7a7d7ad 100644
--- a/Documentation/devicetree/bindings/riscv/sophgo.yaml
+++ b/Documentation/devicetree/bindings/riscv/sophgo.yaml
@@ -35,6 +35,10 @@ properties:
- enum:
- milkv,pioneer
- const: sophgo,sg2042
+ - items:
+ - enum:
+ - sophgo,srd3-10
+ - const: sophgo,sg2044
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/rng/rockchip,rk3588-rng.yaml b/Documentation/devicetree/bindings/rng/rockchip,rk3588-rng.yaml
index ca71b400bcae..fcc5be80142d 100644
--- a/Documentation/devicetree/bindings/rng/rockchip,rk3588-rng.yaml
+++ b/Documentation/devicetree/bindings/rng/rockchip,rk3588-rng.yaml
@@ -4,9 +4,9 @@
$id: http://devicetree.org/schemas/rng/rockchip,rk3588-rng.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Rockchip RK3588 TRNG
+title: Rockchip RK3576/RK3588 TRNG
-description: True Random Number Generator on Rockchip RK3588 SoC
+description: True Random Number Generator on Rockchip RK3576/RK3588 SoCs
maintainers:
- Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
@@ -14,6 +14,7 @@ maintainers:
properties:
compatible:
enum:
+ - rockchip,rk3576-rng
- rockchip,rk3588-rng
reg:
diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
index 1aa3480d8d81..1ee0aed5057d 100644
--- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
@@ -17,9 +17,7 @@ allOf:
properties:
compatible:
items:
- - enum:
- - renesas,r9a06g032-uart
- - renesas,r9a06g033-uart
+ - const: renesas,r9a06g032-uart
- const: renesas,rzn1-uart
- const: snps,dw-apb-uart
then:
@@ -45,15 +43,11 @@ properties:
compatible:
oneOf:
- items:
- - enum:
- - renesas,r9a06g032-uart
- - renesas,r9a06g033-uart
+ - const: renesas,r9a06g032-uart
- const: renesas,rzn1-uart
- const: snps,dw-apb-uart
- items:
- - enum:
- - renesas,r9a06g032-uart
- - renesas,r9a06g033-uart
+ - const: renesas,r9a06g032-uart
- const: renesas,rzn1-uart
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/soc/amlogic/amlogic,meson-gx-clk-measure.yaml b/Documentation/devicetree/bindings/soc/amlogic/amlogic,meson-gx-clk-measure.yaml
index 77c281153010..39d4637c2d08 100644
--- a/Documentation/devicetree/bindings/soc/amlogic/amlogic,meson-gx-clk-measure.yaml
+++ b/Documentation/devicetree/bindings/soc/amlogic/amlogic,meson-gx-clk-measure.yaml
@@ -22,6 +22,8 @@ properties:
- amlogic,meson-axg-clk-measure
- amlogic,meson-g12a-clk-measure
- amlogic,meson-sm1-clk-measure
+ - amlogic,c3-clk-measure
+ - amlogic,s4-clk-measure
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml
index 31295be91013..234089b5954d 100644
--- a/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml
+++ b/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale Layerscape Reset Registers Module
maintainers:
- - Frank Li
+ - Frank Li <Frank.Li@nxp.com>
description:
Reset Module includes chip reset, service processor control and Reset Control
diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,qman-fqd.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,qman-fqd.yaml
index de0b4ae740ff..a975bce59975 100644
--- a/Documentation/devicetree/bindings/soc/fsl/fsl,qman-fqd.yaml
+++ b/Documentation/devicetree/bindings/soc/fsl/fsl,qman-fqd.yaml
@@ -50,7 +50,7 @@ required:
- compatible
allOf:
- - $ref: reserved-memory.yaml
+ - $ref: /schemas/reserved-memory/reserved-memory.yaml
unevaluatedProperties: false
@@ -61,7 +61,7 @@ examples:
#size-cells = <2>;
qman-fqd {
- compatible = "shared-dma-pool";
+ compatible = "fsl,qman-fqd";
size = <0 0x400000>;
alignment = <0 0x400000>;
no-map;
diff --git a/Documentation/devicetree/bindings/soc/google/google,gs101-pmu-intr-gen.yaml b/Documentation/devicetree/bindings/soc/google/google,gs101-pmu-intr-gen.yaml
new file mode 100644
index 000000000000..2be022ca6a7d
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/google/google,gs101-pmu-intr-gen.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/google/google,gs101-pmu-intr-gen.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Google Power Management Unit (PMU) Interrupt Generation
+
+description: |
+ PMU interrupt generator for handshaking between PMU through interrupts.
+
+maintainers:
+ - Peter Griffin <peter.griffin@linaro.org>
+
+properties:
+ compatible:
+ items:
+ - const: google,gs101-pmu-intr-gen
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ pmu_intr_gen: syscon@17470000 {
+ compatible = "google,gs101-pmu-intr-gen", "syscon";
+ reg = <0x17470000 0x10000>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/mediatek/mediatek,mt8183-dvfsrc.yaml b/Documentation/devicetree/bindings/soc/mediatek/mediatek,mt8183-dvfsrc.yaml
index 1ad5b61b249f..4c96d4917967 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/mediatek,mt8183-dvfsrc.yaml
+++ b/Documentation/devicetree/bindings/soc/mediatek/mediatek,mt8183-dvfsrc.yaml
@@ -23,6 +23,7 @@ properties:
compatible:
oneOf:
- enum:
+ - mediatek,mt6893-dvfsrc
- mediatek,mt8183-dvfsrc
- mediatek,mt8195-dvfsrc
- items:
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,rpm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,rpm.yaml
index b00be9e01206..3e8d99cb4dc3 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,rpm.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,rpm.yaml
@@ -36,6 +36,13 @@ properties:
- const: err
- const: wakeup
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ram
+
qcom,ipc:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
@@ -46,6 +53,14 @@ properties:
description:
Three entries specifying the outgoing ipc bit used for signaling the RPM.
+ clock-controller:
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ contains:
+ const: qcom,rpmcc
+
patternProperties:
"^regulators(-[01])?$":
type: object
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml
index af632d0e0355..036562eb5140 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml
@@ -44,7 +44,13 @@ description: |
properties:
compatible:
- const: qcom,rpmh-rsc
+ oneOf:
+ - items:
+ - enum:
+ - qcom,sc7180-rpmh-apps-rsc
+ - qcom,sdm845-rpmh-apps-rsc
+ - const: qcom,rpmh-rsc
+ - const: qcom,rpmh-rsc
interrupts:
minItems: 1
@@ -124,7 +130,21 @@ required:
- qcom,tcs-offset
- reg
- reg-names
- - power-domains
+
+allOf:
+ # Some platforms may lack a OSI-mode PSCI implementation, which implies the
+ # system power domain can't provide feedback about entering power collapse
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sc7180-rpmh-apps-rsc
+ - qcom,sdm845-rpmh-apps-rsc
+ then:
+ required:
+ - power-domains
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,saw2.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,saw2.yaml
index ca4bce817273..c2f1f5946cfa 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,saw2.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,saw2.yaml
@@ -73,9 +73,10 @@ examples:
#size-cells = <0>;
cpu@0 {
- compatible = "qcom,kryo";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "qcom,kpss-acc-v2";
+ qcom,acc = <&acc0>;
qcom,saw = <&saw0>;
reg = <0x0>;
operating-points-v2 = <&cpu_opp_table>;
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml
index fd6db0ca98eb..4fcae6bedfff 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml
@@ -54,7 +54,7 @@ properties:
- compatible
wifi:
- additionalProperties: false
+ unevaluatedProperties: false
type: object
properties:
compatible:
@@ -88,6 +88,9 @@ properties:
- qcom,smem-states
- qcom,smem-state-names
+ allOf:
+ - $ref: /schemas/net/wireless/wireless-controller.yaml#
+
required:
- compatible
- qcom,mmio
diff --git a/Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml b/Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml
index e0f7503a9f35..c41dcaea568a 100644
--- a/Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml
+++ b/Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml
@@ -25,6 +25,7 @@ properties:
items:
- enum:
- renesas,r9a09g047-sys # RZ/G3E
+ - renesas,r9a09g056-sys # RZ/V2N
- renesas,r9a09g057-sys # RZ/V2H
reg:
diff --git a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
index 51a4c48eea6d..5e6e6e6208dc 100644
--- a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
+++ b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
@@ -375,6 +375,13 @@ properties:
- renesas,r8a779g3 # ES3.x
- const: renesas,r8a779g0
+ - description: R-Car V4H (R8A779G3)
+ items:
+ - enum:
+ - retronix,sparrow-hawk # Sparrow Hawk board
+ - const: renesas,r8a779g3 # ES3.x
+ - const: renesas,r8a779g0
+
- description: R-Car V4M (R8A779H0)
items:
- enum:
@@ -551,6 +558,21 @@ properties:
- renesas,r9a09g047e58 # Quad Cortex-A55 + Cortex-M33 + Ethos-U55 (21mm BGA)
- const: renesas,r9a09g047
+ - description: RZ/V2N (R9A09G056)
+ items:
+ - enum:
+ - renesas,rzv2n-evk # RZ/V2N EVK (RTK0EF0186C03000BJ)
+ - enum:
+ - renesas,r9a09g056n41 # RZ/V2N
+ - renesas,r9a09g056n42 # RZ/V2N with Mali-G31 support
+ - renesas,r9a09g056n43 # RZ/V2N with Mali-C55 support
+ - renesas,r9a09g056n44 # RZ/V2N with Mali-G31 + Mali-C55 support
+ - renesas,r9a09g056n45 # RZ/V2N with cryptographic extension support
+ - renesas,r9a09g056n46 # RZ/V2N with Mali-G31 + cryptographic extension support
+ - renesas,r9a09g056n47 # RZ/V2N with Mali-C55 + cryptographic extension support
+ - renesas,r9a09g056n48 # RZ/V2N with Mali-G31 + Mali-C55 + cryptographic extension support
+ - const: renesas,r9a09g056
+
- description: RZ/V2H(P) (R9A09G057)
items:
- enum:
@@ -570,6 +592,16 @@ properties:
- const: renesas,r9a09g057h48
- const: renesas,r9a09g057
+ - description: RZ/T2H (R9A09G077)
+ items:
+ - enum:
+ - renesas,rzt2h-evk # RZ/T2H Evaluation Board
+ - enum:
+ - renesas,r9a09g077m04 # RZ/T2H with Single Cortex-A55 + Dual Cortex-R52 - no security
+ - renesas,r9a09g077m24 # RZ/T2H with Dual Cortex-A55 + Dual Cortex-R52 - no security
+ - renesas,r9a09g077m44 # RZ/T2H with Quad Cortex-A55 + Dual Cortex-R52 - no security
+ - const: renesas,r9a09g077
+
additionalProperties: true
...
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
index 2f61c1b95fea..8cbf5b6772dd 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -18,6 +18,12 @@ properties:
- rockchip,rk3528-ioc-grf
- rockchip,rk3528-vo-grf
- rockchip,rk3528-vpu-grf
+ - rockchip,rk3562-ioc-grf
+ - rockchip,rk3562-peri-grf
+ - rockchip,rk3562-pipephy-grf
+ - rockchip,rk3562-pmu-grf
+ - rockchip,rk3562-sys-grf
+ - rockchip,rk3562-usbphy-grf
- rockchip,rk3566-pipe-grf
- rockchip,rk3568-pcie3-phy-grf
- rockchip,rk3568-pipe-grf
@@ -82,6 +88,7 @@ properties:
- rockchip,rk3368-pmugrf
- rockchip,rk3399-grf
- rockchip,rk3399-pmugrf
+ - rockchip,rk3562-pmu-grf
- rockchip,rk3568-grf
- rockchip,rk3568-pmugrf
- rockchip,rk3576-ioc-grf
diff --git a/Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml b/Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml
index 204da6fe458d..3109df43d502 100644
--- a/Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml
+++ b/Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml
@@ -129,6 +129,11 @@ properties:
description:
Node for reboot method
+ google,pmu-intr-gen-syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to PMU interrupt generation interface.
+
required:
- compatible
- reg
@@ -189,6 +194,16 @@ allOf:
properties:
dp-phy: false
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - google,gs101-pmu
+ then:
+ required:
+ - google,pmu-intr-gen-syscon
+
examples:
- |
#include <dt-bindings/clock/exynos5250.h>
diff --git a/Documentation/devicetree/bindings/soc/sophgo/sophgo,cv1800b-rtc.yaml b/Documentation/devicetree/bindings/soc/sophgo/sophgo,cv1800b-rtc.yaml
new file mode 100644
index 000000000000..5cf186c396c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/sophgo/sophgo,cv1800b-rtc.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sophgo/sophgo,cv1800b-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Real Time Clock of the Sophgo CV1800 SoC
+
+description:
+ The RTC (Real Time Clock) is an independently powered module in the chip. It
+ contains a 32KHz oscillator and a Power-On-Reset (POR) sub-module, which can
+ be used for time display and scheduled alarm produce. In addition, the
+ hardware state machine provides triggering and timing control for chip
+ power-on, power-off and reset.
+
+ Furthermore, the 8051 subsystem is located within RTCSYS and is independently
+ powered. System software can use the 8051 to manage wake conditions and wake
+ the system while the system is asleep, and communicate with external devices
+ through peripheral controllers.
+
+ Technical Reference Manual available at
+ https://github.com/sophgo/sophgo-doc/tree/main/SG200X/TRM
+
+maintainers:
+ - sophgo@lists.linux.dev
+
+allOf:
+ - $ref: /schemas/rtc/rtc.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: sophgo,cv1800b-rtc
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: RTC Alarm
+ - description: RTC Longpress
+ - description: VBAT DET
+
+ interrupt-names:
+ items:
+ - const: alarm
+ - const: longpress
+ - const: vbat
+
+ clocks:
+ items:
+ - description: RTC clock source
+ - description: DW8051 MCU clock source
+
+ clock-names:
+ items:
+ - const: rtc
+ - const: mcu
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sophgo,cv1800.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ rtc@5025000 {
+ compatible = "sophgo,cv1800b-rtc", "syscon";
+ reg = <0x5025000 0x2000>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>,
+ <18 IRQ_TYPE_LEVEL_HIGH>,
+ <19 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "alarm", "longpress", "vbat";
+ clocks = <&clk CLK_RTC_25M>,
+ <&clk CLK_SRC_RTC_SYS_0>;
+ clock-names = "rtc", "mcu";
+ };
diff --git a/Documentation/devicetree/bindings/soc/sophgo/sophgo,sg2044-top-syscon.yaml b/Documentation/devicetree/bindings/soc/sophgo/sophgo,sg2044-top-syscon.yaml
new file mode 100644
index 000000000000..a82cc3cae576
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/sophgo/sophgo,sg2044-top-syscon.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/sophgo/sophgo,sg2044-top-syscon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo SG2044 SoC TOP system controller
+
+maintainers:
+ - Inochi Amaoto <inochiama@gmail.com>
+
+description:
+ The Sophgo SG2044 TOP system controller is a hardware block grouping
+ multiple small functions, such as clocks and some other internal
+ function.
+
+properties:
+ compatible:
+ items:
+ - const: sophgo,sg2044-top-syscon
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+ description:
+ See <dt-bindings/clock/sophgo,sg2044-pll.h> for valid clock.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ syscon@50000000 {
+ compatible = "sophgo,sg2044-top-syscon", "syscon";
+ reg = <0x50000000 0x1000>;
+ #clock-cells = <1>;
+ clocks = <&osc>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/spacemit/spacemit,k1-syscon.yaml b/Documentation/devicetree/bindings/soc/spacemit/spacemit,k1-syscon.yaml
new file mode 100644
index 000000000000..30aaf49da03d
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/spacemit/spacemit,k1-syscon.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/spacemit/spacemit,k1-syscon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SpacemiT K1 SoC System Controller
+
+maintainers:
+ - Haylen Chu <heylenay@4d2.org>
+
+description:
+ System controllers found on SpacemiT K1 SoC, which are capable of
+ clock, reset and power-management functions.
+
+properties:
+ compatible:
+ enum:
+ - spacemit,k1-syscon-apbc
+ - spacemit,k1-syscon-apmu
+ - spacemit,k1-syscon-mpmu
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 4
+
+ clock-names:
+ items:
+ - const: osc
+ - const: vctcxo_1m
+ - const: vctcxo_3m
+ - const: vctcxo_24m
+
+ "#clock-cells":
+ const: 1
+ description:
+ See <dt-bindings/clock/spacemit,k1-syscon.h> for valid indices.
+
+ "#power-domain-cells":
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - "#clock-cells"
+ - "#reset-cells"
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: spacemit,k1-syscon-apbc
+ then:
+ properties:
+ "#power-domain-cells": false
+ else:
+ required:
+ - "#power-domain-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ system-controller@d4050000 {
+ compatible = "spacemit,k1-syscon-mpmu";
+ reg = <0xd4050000 0x209c>;
+ clocks = <&osc>, <&vctcxo_1m>, <&vctcxo_3m>, <&vctcxo_24m>;
+ clock-names = "osc", "vctcxo_1m", "vctcxo_3m", "vctcxo_24m";
+ #clock-cells = <1>;
+ #power-domain-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/ti/ti,j721e-system-controller.yaml b/Documentation/devicetree/bindings/soc/ti/ti,j721e-system-controller.yaml
index 378e9cc5fac2..f3bd0be3b279 100644
--- a/Documentation/devicetree/bindings/soc/ti/ti,j721e-system-controller.yaml
+++ b/Documentation/devicetree/bindings/soc/ti/ti,j721e-system-controller.yaml
@@ -26,6 +26,7 @@ properties:
compatible:
items:
- enum:
+ - ti,am654-system-controller
- ti,j7200-system-controller
- ti,j721e-system-controller
- ti,j721s2-system-controller
@@ -68,6 +69,23 @@ patternProperties:
description:
The node corresponding to SoC chip identification.
+ "^pcie-ctrl@[0-9a-f]+$":
+ type: object
+ description:
+ The node corresponding to PCIe control register.
+
+ "^clock@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/soc/ti/ti,am654-serdes-ctrl.yaml#
+ description:
+ This is the Serdes Control region.
+
+ "^dss-oldi-io-ctrl@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/mfd/syscon.yaml#
+ description:
+ This is the DSS OLDI CTRL region.
+
required:
- compatible
- reg
@@ -110,5 +128,10 @@ examples:
compatible = "ti,am654-chipid";
reg = <0x14 0x4>;
};
+
+ pcie0_ctrl: pcie-ctrl@4070 {
+ compatible = "ti,j784s4-pcie-ctrl", "syscon";
+ reg = <0x4070 0x4>;
+ };
};
...
diff --git a/Documentation/devicetree/bindings/sound/audio-graph-card2.yaml b/Documentation/devicetree/bindings/sound/audio-graph-card2.yaml
index 94588353f852..40eb1d7d6cf1 100644
--- a/Documentation/devicetree/bindings/sound/audio-graph-card2.yaml
+++ b/Documentation/devicetree/bindings/sound/audio-graph-card2.yaml
@@ -18,11 +18,7 @@ properties:
label:
maxItems: 1
routing:
- description: |
- A list of the connections between audio components.
- Each entry is a pair of strings, the first being the
- connection's sink, the second being the connection's source.
- $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ $ref: audio-graph.yaml#/properties/routing
aux-devs:
description: |
List of phandles pointing to auxiliary devices, such
@@ -39,6 +35,8 @@ properties:
description: Codec to Codec node
hp-det-gpios:
$ref: audio-graph.yaml#/properties/hp-det-gpios
+ mic-det-gpios:
+ $ref: audio-graph.yaml#/properties/mic-det-gpios
widgets:
$ref: audio-graph.yaml#/properties/widgets
diff --git a/Documentation/devicetree/bindings/sound/cirrus,cs48l32.yaml b/Documentation/devicetree/bindings/sound/cirrus,cs48l32.yaml
new file mode 100644
index 000000000000..bf087b57aaf6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cirrus,cs48l32.yaml
@@ -0,0 +1,195 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/cirrus,cs48l32.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic CS48L32 audio DSP.
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ The CS48L32 is a high-performance low-power audio DSP for smartphones and
+ other portable audio devices. The CS48L32 combines a programmable Halo Core
+ DSP with a variety of power-efficient fixed-function audio processors.
+
+ See also the binding headers:
+
+ include/dt-bindings/sound/cs48l32.yaml
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - cirrus,cs48l32
+
+ reg:
+ description: SPI chip-select number.
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 25000000
+
+ vdd-a-supply:
+ description: Regulator supplying VDD_A
+
+ vdd-d-supply:
+ description: Regulator supplying VDD_D
+
+ vdd-io-supply:
+ description: Regulator supplying VDD_IO
+
+ vdd-cp-supply:
+ description: Regulator supplying VDD_CP
+
+ reset-gpios:
+ description:
+ One entry specifying the GPIO controlling /RESET. Although optional,
+ it is strongly recommended to use a hardware reset.
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: The clock supplied on MCLK1
+
+ clock-names:
+ const: mclk1
+
+ '#sound-dai-cells':
+ const: 1
+
+ cirrus,in-type:
+ description: |
+ A list of input type settings for each ADC input.
+ Inputs are one of these types:
+ CS48L32_IN_TYPE_DIFF : analog differential (default)
+ CS48L32_IN_TYPE_SE : analog single-ended
+
+ The type of the left (L) and right (R) channel on each input is
+ independently configured, as are the two groups of pins muxable to
+ the input (referred to in the datasheet as "1" and "2").
+
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - description:
+ IN1L_1 analog input type. One of the CS48L32_IN_TYPE_xxx.
+ minimum: 0
+ maximum: 1
+ default: 0
+ - description:
+ IN1R_1 analog input type. One of the CS48L32_IN_TYPE_xxx.
+ minimum: 0
+ maximum: 1
+ default: 0
+ - description:
+ IN1L_2 analog input type. One of the CS48L32_IN_TYPE_xxx.
+ minimum: 0
+ maximum: 1
+ default: 0
+ - description:
+ IN1R_2 analog input type. One of the CS48L32_IN_TYPE_xxx.
+ minimum: 0
+ maximum: 1
+ default: 0
+
+ cirrus,pdm-sup:
+ description: |
+ Indicate which MICBIAS output supplies bias to the microphone.
+ There is one cell per input (IN1, IN2, ...).
+
+ One of the CS48L32_MICBIAS_xxx values.
+ CS48L32_PDM_SUP_VOUT_MIC : mic biased from VOUT_MIC
+ CS48L32_PDM_SUP_MICBIAS1 : mic biased from MICBIAS1
+
+ Also see the INn_PDM_SUP field in the datasheet.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - description: IN1 PDM supply source
+ minimum: 0
+ maximum: 1
+ default: 0
+ - description: IN2 PDM supply source
+ minimum: 0
+ maximum: 1
+ default: 0
+
+required:
+ - compatible
+ - reg
+ - vdd-a-supply
+ - vdd-d-supply
+ - vdd-io-supply
+ - vdd-cp-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/sound/cs48l32.h>
+
+ spi@e0006000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xe0006000 0x1000>;
+
+ codec@1 {
+ compatible = "cirrus,cs48l32";
+
+ reg = <0x1>;
+ spi-max-frequency = <2500000>;
+
+ vdd-a-supply = <&regulator_1v8>;
+ vdd-d-supply = <&regulator_1v2>;
+ vdd-io-supply = <&regulator_1v8>;
+ vdd-cp-supply = <&regulator_1v8>;
+
+ reset-gpios = <&gpio 0 0>;
+
+ clocks = <&clks 0>;
+ clock-names = "mclk1";
+
+ interrupt-parent = <&gpio0>;
+ interrupts = <56 8>;
+
+ #sound-dai-cells = <1>;
+
+ cirrus,in-type = <
+ CS48L32_IN_TYPE_DIFF CS48L32_IN_TYPE_DIFF
+ CS48L32_IN_TYPE_SE CS48L32_IN_TYPE_SE
+ >;
+
+ cirrus,pdm-sup = <
+ CS48L32_PDM_SUP_MICBIAS1 CS48L32_PDM_SUP_MICBIAS1
+ >;
+ };
+ };
+
+#
+# Minimal config
+#
+ - |
+ #include <dt-bindings/sound/cs48l32.h>
+
+ spi@e0006000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xe0006000 0x1000>;
+
+ codec@1 {
+ compatible = "cirrus,cs48l32";
+
+ reg = <0x1>;
+
+ vdd-a-supply = <&regulator_1v8>;
+ vdd-d-supply = <&regulator_1v2>;
+ vdd-io-supply = <&regulator_1v8>;
+ vdd-cp-supply = <&regulator_1v8>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/everest,es8375.yaml b/Documentation/devicetree/bindings/sound/everest,es8375.yaml
new file mode 100644
index 000000000000..4a3d671c66b1
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/everest,es8375.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/everest,es8375.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Everest ES8375 audio CODEC
+
+maintainers:
+ - Michael Zhang <zhangyi@everest-semi.com>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ const: everest,es8375
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: clock for master clock (MCLK)
+
+ clock-names:
+ items:
+ - const: mclk
+
+ vdda-supply:
+ description:
+ Analogue power supply.
+
+ vddd-supply:
+ description:
+ Interface power supply.
+
+ everest,mclk-src:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ description: |
+ Represents the MCLK/SCLK pair pins used as the internal clock.
+ 0 represents selecting MCLK.
+ 1 represents selecting SCLK.
+ enum: [0, 1]
+ default: 0
+
+ "#sound-dai-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - "#sound-dai-cells"
+ - vdda-supply
+ - vddd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ es8375: codec@18 {
+ compatible = "everest,es8375";
+ reg = <0x18>;
+ vdda-supply = <&vdd3v3>;
+ vddd-supply = <&vdd3v3>;
+ #sound-dai-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/everest,es8389.yaml b/Documentation/devicetree/bindings/sound/everest,es8389.yaml
new file mode 100644
index 000000000000..a673df485ab3
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/everest,es8389.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/everest,es8389.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Everest ES8389 audio CODEC
+
+maintainers:
+ - Michael Zhang <zhangyi@everest-semi.com>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ const: everest,es8389
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: clock for master clock (MCLK)
+
+ clock-names:
+ items:
+ - const: mclk
+
+ "#sound-dai-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - "#sound-dai-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ es8389: codec@10 {
+ compatible = "everest,es8389";
+ reg = <0x10>;
+ #sound-dai-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/fsl,mqs.yaml b/Documentation/devicetree/bindings/sound/fsl,mqs.yaml
index 8c22e8348b14..1415247c92c8 100644
--- a/Documentation/devicetree/bindings/sound/fsl,mqs.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,mqs.yaml
@@ -28,6 +28,9 @@ properties:
- fsl,imx95-aonmix-mqs
- fsl,imx95-netcmix-mqs
+ "#sound-dai-cells":
+ const: 0
+
clocks:
minItems: 1
maxItems: 2
@@ -49,12 +52,17 @@ properties:
resets:
maxItems: 1
+ port:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- clocks
- clock-names
allOf:
+ - $ref: dai-common.yaml#
- if:
properties:
compatible:
@@ -86,7 +94,7 @@ allOf:
required:
- gpr
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/sound/loongson,ls1b-ac97.yaml b/Documentation/devicetree/bindings/sound/loongson,ls1b-ac97.yaml
new file mode 100644
index 000000000000..1c6a2771f942
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/loongson,ls1b-ac97.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/loongson,ls1b-ac97.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson-1 AC97 Controller
+
+maintainers:
+ - Keguang Zhang <keguang.zhang@gmail.com>
+
+description:
+ The Loongson-1 AC97 controller supports 2-channel stereo output and input.
+ It is paired with the DMA engine to handle playback and capture functions.
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: loongson,ls1b-ac97
+ - items:
+ - enum:
+ - loongson,ls1a-ac97
+ - loongson,ls1c-ac97
+ - const: loongson,ls1b-ac97
+
+ reg:
+ maxItems: 3
+
+ reg-names:
+ items:
+ - const: ac97
+ - const: audio-tx
+ - const: audio-rx
+
+ dmas:
+ maxItems: 2
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+ '#sound-dai-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - dmas
+ - dma-names
+ - '#sound-dai-cells'
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ audio-controller@1fe74000 {
+ compatible = "loongson,ls1b-ac97";
+ reg = <0x1fe74000 0x60>, <0x1fe72420 0x4>, <0x1fe74c4c 0x4>;
+ reg-names = "ac97", "audio-tx", "audio-rx";
+ dmas = <&dma 1>, <&dma 2>;
+ dma-names = "tx", "rx";
+ #sound-dai-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/maxim,max98925.yaml b/Documentation/devicetree/bindings/sound/maxim,max98925.yaml
index 32fd86204a7a..121e8d2d44da 100644
--- a/Documentation/devicetree/bindings/sound/maxim,max98925.yaml
+++ b/Documentation/devicetree/bindings/sound/maxim,max98925.yaml
@@ -77,11 +77,11 @@ additionalProperties: false
examples:
- |
+ #include <dt-bindings/gpio/gpio.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
- #include <dt-bindings/gpio/gpio.h>
audio-codec@3a {
compatible = "maxim,max98927";
reg = <0x3a>;
diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml
index 76d5a437dc8f..7ba2ea2dfa0b 100644
--- a/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml
+++ b/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml
@@ -96,10 +96,9 @@ patternProperties:
mediatek,clk-provider:
$ref: /schemas/types.yaml#/definitions/string
description: Indicates dai-link clock master.
- items:
- enum:
- - cpu
- - codec
+ enum:
+ - cpu
+ - codec
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml b/Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml
index cbc641ecbe94..037f21443ad1 100644
--- a/Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml
+++ b/Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml
@@ -124,10 +124,9 @@ patternProperties:
mediatek,clk-provider:
$ref: /schemas/types.yaml#/definitions/string
description: Indicates dai-link clock master.
- items:
- enum:
- - cpu
- - codec
+ enum:
+ - cpu
+ - codec
required:
- link-name
diff --git a/Documentation/devicetree/bindings/sound/mt8195-mt6359.yaml b/Documentation/devicetree/bindings/sound/mt8195-mt6359.yaml
index 2af1d8ffbd8b..356e1feee962 100644
--- a/Documentation/devicetree/bindings/sound/mt8195-mt6359.yaml
+++ b/Documentation/devicetree/bindings/sound/mt8195-mt6359.yaml
@@ -21,6 +21,7 @@ properties:
- mediatek,mt8195_mt6359_rt1019_rt5682
- mediatek,mt8195_mt6359_rt1011_rt5682
- mediatek,mt8195_mt6359_max98390_rt5682
+ - mediatek,mt8195_mt6359
model:
$ref: /schemas/types.yaml#/definitions/string
@@ -44,6 +45,8 @@ properties:
- Right Spk
# Sources
+ - Headphone L
+ - Headphone R
- Headset Mic
- HPOL
- HPOR
@@ -88,6 +91,7 @@ patternProperties:
link-name:
description: Indicates dai-link name and PCM stream name
enum:
+ - DL_SRC_BE
- DPTX_BE
- ETDM1_IN_BE
- ETDM2_IN_BE
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml
index b4bee466d67a..da89523ccf5f 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml
@@ -23,6 +23,7 @@ properties:
enum:
- nvidia,tegra210-audio-graph-card
- nvidia,tegra186-audio-graph-card
+ - nvidia,tegra264-audio-graph-card
clocks:
minItems: 2
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra186-asrc.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra186-asrc.yaml
index e15f387c4c29..66b56e71599b 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra186-asrc.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra186-asrc.yaml
@@ -31,7 +31,9 @@ properties:
compatible:
oneOf:
- - const: nvidia,tegra186-asrc
+ - enum:
+ - nvidia,tegra186-asrc
+ - nvidia,tegra264-asrc
- items:
- enum:
- nvidia,tegra234-asrc
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra186-dspk.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra186-dspk.yaml
index e1362c77472b..46ba167081ef 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra186-dspk.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra186-dspk.yaml
@@ -29,6 +29,7 @@ properties:
- const: nvidia,tegra186-dspk
- items:
- enum:
+ - nvidia,tegra264-dspk
- nvidia,tegra234-dspk
- nvidia,tegra194-dspk
- const: nvidia,tegra186-dspk
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-admaif.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-admaif.yaml
index 15ab40aeab1e..b32f33214ba6 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-admaif.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-admaif.yaml
@@ -26,6 +26,7 @@ properties:
- enum:
- nvidia,tegra210-admaif
- nvidia,tegra186-admaif
+ - nvidia,tegra264-admaif
- items:
- enum:
- nvidia,tegra234-admaif
@@ -39,6 +40,19 @@ properties:
dma-names: true
+ interconnects:
+ items:
+ - description: APE read memory client
+ - description: APE write memory client
+
+ interconnect-names:
+ items:
+ - const: dma-mem # read
+ - const: write
+
+ iommus:
+ maxItems: 1
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
description: |
@@ -74,6 +88,9 @@ then:
Should be "tx1", "tx2" ... "tx10" for DMA Tx channel
minItems: 1
maxItems: 20
+ interconnects: false
+ interconnect-names: false
+ iommus: false
else:
properties:
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-adx.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-adx.yaml
index e4c871797fa6..19a80929f93e 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-adx.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-adx.yaml
@@ -27,7 +27,9 @@ properties:
compatible:
oneOf:
- - const: nvidia,tegra210-adx
+ - enum:
+ - nvidia,tegra210-adx
+ - nvidia,tegra264-adx
- items:
- enum:
- nvidia,tegra234-adx
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml
index c4abac81f207..1c9f24d26819 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml
@@ -27,6 +27,7 @@ properties:
- nvidia,tegra210-ahub
- nvidia,tegra186-ahub
- nvidia,tegra234-ahub
+ - nvidia,tegra264-ahub
- items:
- const: nvidia,tegra194-ahub
- const: nvidia,tegra186-ahub
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-amx.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-amx.yaml
index 021b72546ba4..89712102cfdf 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-amx.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-amx.yaml
@@ -26,11 +26,13 @@ properties:
compatible:
oneOf:
- - const: nvidia,tegra210-amx
+ - enum:
+ - nvidia,tegra210-amx
+ - nvidia,tegra194-amx
+ - nvidia,tegra264-amx
- items:
- const: nvidia,tegra186-amx
- const: nvidia,tegra210-amx
- - const: nvidia,tegra194-amx
- items:
- const: nvidia,tegra234-amx
- const: nvidia,tegra194-amx
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-dmic.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-dmic.yaml
index bff551c35da7..bb8088878d4b 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-dmic.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-dmic.yaml
@@ -28,6 +28,7 @@ properties:
- const: nvidia,tegra210-dmic
- items:
- enum:
+ - nvidia,tegra264-dmic
- nvidia,tegra234-dmic
- nvidia,tegra194-dmic
- nvidia,tegra186-dmic
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml
index a82f11fb6c9a..903e815af8fd 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml
@@ -25,7 +25,9 @@ properties:
compatible:
oneOf:
- - const: nvidia,tegra210-i2s
+ - enum:
+ - nvidia,tegra210-i2s
+ - nvidia,tegra264-i2s
- items:
- enum:
- nvidia,tegra234-i2s
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-mbdrc.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-mbdrc.yaml
index 5b9198602fc6..4c121b9cde1e 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-mbdrc.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-mbdrc.yaml
@@ -23,6 +23,7 @@ properties:
- const: nvidia,tegra210-mbdrc
- items:
- enum:
+ - nvidia,tegra264-mbdrc
- nvidia,tegra234-mbdrc
- nvidia,tegra194-mbdrc
- nvidia,tegra186-mbdrc
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-mixer.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-mixer.yaml
index 049898f02e85..56b4c4fc123c 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-mixer.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-mixer.yaml
@@ -28,6 +28,7 @@ properties:
- const: nvidia,tegra210-amixer
- items:
- enum:
+ - nvidia,tegra264-amixer
- nvidia,tegra234-amixer
- nvidia,tegra194-amixer
- nvidia,tegra186-amixer
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-mvc.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-mvc.yaml
index d0280d8aa3af..bde4ac6319b1 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-mvc.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-mvc.yaml
@@ -31,6 +31,7 @@ properties:
- const: nvidia,tegra210-mvc
- items:
- enum:
+ - nvidia,tegra264-mvc
- nvidia,tegra234-mvc
- nvidia,tegra194-mvc
- nvidia,tegra186-mvc
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-ope.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-ope.yaml
index 9017fb6d575d..756c3096a2d6 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-ope.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-ope.yaml
@@ -25,6 +25,7 @@ properties:
- const: nvidia,tegra210-ope
- items:
- enum:
+ - nvidia,tegra264-ope
- nvidia,tegra234-ope
- nvidia,tegra194-ope
- nvidia,tegra186-ope
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-peq.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-peq.yaml
index 1e373c49d639..2f11a484dc2e 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-peq.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-peq.yaml
@@ -24,6 +24,7 @@ properties:
- const: nvidia,tegra210-peq
- items:
- enum:
+ - nvidia,tegra264-peq
- nvidia,tegra234-peq
- nvidia,tegra194-peq
- nvidia,tegra186-peq
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-sfc.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-sfc.yaml
index 185ca0be4f02..959aa7fffdac 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-sfc.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-sfc.yaml
@@ -28,6 +28,7 @@ properties:
- const: nvidia,tegra210-sfc
- items:
- enum:
+ - nvidia,tegra264-sfc
- nvidia,tegra234-sfc
- nvidia,tegra194-sfc
- nvidia,tegra186-sfc
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml
index 3ca9affb79a2..8a8767589ee0 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml
@@ -20,11 +20,13 @@ properties:
compatible:
oneOf:
- - const: nvidia,tegra30-hda
+ - enum:
+ - nvidia,tegra30-hda
+ - nvidia,tegra194-hda
+ - nvidia,tegra234-hda
+ - nvidia,tegra264-hda
- items:
- enum:
- - nvidia,tegra234-hda
- - nvidia,tegra194-hda
- nvidia,tegra186-hda
- nvidia,tegra210-hda
- nvidia,tegra124-hda
@@ -43,15 +45,12 @@ properties:
maxItems: 1
clocks:
- minItems: 2
+ minItems: 1
maxItems: 3
clock-names:
- minItems: 2
- items:
- - const: hda
- - const: hda2hdmi
- - const: hda2codec_2x
+ minItems: 1
+ maxItems: 3
resets:
minItems: 2
@@ -59,10 +58,7 @@ properties:
reset-names:
minItems: 2
- items:
- - const: hda
- - const: hda2hdmi
- - const: hda2codec_2x
+ maxItems: 3
power-domains:
maxItems: 1
@@ -93,6 +89,92 @@ required:
additionalProperties: false
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nvidia,tegra30-hda
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ clock-names:
+ items:
+ - const: hda
+ - const: hda2hdmi
+ - const: hda2codec_2x
+ resets:
+ minItems: 3
+ reset-names:
+ items:
+ - const: hda
+ - const: hda2hdmi
+ - const: hda2codec_2x
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nvidia,tegra194-hda
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ clock-names:
+ items:
+ - const: hda
+ - const: hda2hdmi
+ - const: hda2codec_2x
+ resets:
+ maxItems: 2
+ reset-names:
+ items:
+ - const: hda
+ - const: hda2hdmi
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nvidia,tegra234-hda
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: hda
+ - const: hda2codec_2x
+ resets:
+ maxItems: 2
+ reset-names:
+ items:
+ - const: hda
+ - const: hda2codec_2x
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nvidia,tegra264-hda
+ then:
+ properties:
+ clocks:
+ maxItems: 1
+ clock-names:
+ items:
+ - const: hda
+ resets:
+ maxItems: 2
+ reset-names:
+ items:
+ - const: hda
+ - const: hda2codec_2x
+ power-domains: false
+
examples:
- |
#include<dt-bindings/clock/tegra124-car-common.h>
diff --git a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
index b9e33a7429b0..22fe6814b706 100644
--- a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
@@ -28,9 +28,12 @@ properties:
- qcom,sm8750-sndcard
- const: qcom,sm8450-sndcard
- enum:
+ - fairphone,fp5-sndcard
- qcom,apq8096-sndcard
- qcom,qcm6490-idp-sndcard
- qcom,qcs6490-rb3gen2-sndcard
+ - qcom,qcs9075-sndcard
+ - qcom,qcs9100-sndcard
- qcom,qrb4210-rb2-sndcard
- qcom,qrb5165-rb5-sndcard
- qcom,sc7180-qdsp6-sndcard
diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml b/Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml
index 10531350c336..ab1c6285dbf8 100644
--- a/Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml
@@ -23,9 +23,15 @@ properties:
- qcom,wcd9380-codec
- qcom,wcd9385-codec
+ mux-controls:
+ description: A reference to the audio mux switch for
+ switching CTIA/OMTP Headset types
+ maxItems: 1
+
us-euro-gpios:
description: GPIO spec for swapping gnd and mic segments
maxItems: 1
+ deprecated: true
required:
- compatible
diff --git a/Documentation/devicetree/bindings/sound/realtek,alc203.yaml b/Documentation/devicetree/bindings/sound/realtek,alc203.yaml
new file mode 100644
index 000000000000..6b90788b45eb
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/realtek,alc203.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/realtek,alc203.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek ALC203 AC97 Audio Codec
+
+maintainers:
+ - Keguang Zhang <keguang.zhang@gmail.com>
+
+description:
+ ALC203 is a full duplex AC97 2.3 compatible stereo audio codec.
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ const: realtek,alc203
+
+ '#sound-dai-cells':
+ const: 0
+
+required:
+ - compatible
+ - '#sound-dai-cells'
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ audio-codec {
+ compatible = "realtek,alc203";
+ #sound-dai-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/richtek,rt9123.yaml b/Documentation/devicetree/bindings/sound/richtek,rt9123.yaml
new file mode 100644
index 000000000000..5acb05cdfefd
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/richtek,rt9123.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/richtek,rt9123.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT9123 Audio Amplifier
+
+maintainers:
+ - ChiYuan Huang <cy_huang@richtek.com>
+
+description:
+ RT9123 is a 3.2W mono Class-D audio amplifier that features high efficiency
+ and performance with ultra-low quiescent current. The digital audio interface
+ support various formats, including I2S, left-justified, right-justified, and
+ TDM formats.
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - richtek,rt9123
+
+ reg:
+ maxItems: 1
+
+ '#sound-dai-cells':
+ const: 0
+
+ enable-gpios:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - '#sound-dai-cells'
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ amplifier@5e {
+ compatible = "richtek,rt9123";
+ reg = <0x5e>;
+ enable-gpios = <&gpio 26 GPIO_ACTIVE_HIGH>;
+ #sound-dai-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/richtek,rt9123p.yaml b/Documentation/devicetree/bindings/sound/richtek,rt9123p.yaml
new file mode 100644
index 000000000000..693511dfdda4
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/richtek,rt9123p.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/richtek,rt9123p.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT9123P Audio Amplifier
+
+maintainers:
+ - ChiYuan Huang <cy_huang@richtek.com>
+
+description:
+ RT9123P is a RT9123 variant which does not support I2C control.
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - richtek,rt9123p
+
+ '#sound-dai-cells':
+ const: 0
+
+ enable-gpios:
+ maxItems: 1
+
+ enable-delay-ms:
+ description:
+ Delay time for 'ENABLE' pin changes intended to make I2S clocks ready to
+ prevent speaker pop noise. The unit is in millisecond.
+
+required:
+ - compatible
+ - '#sound-dai-cells'
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ amplifier {
+ compatible = "richtek,rt9123p";
+ enable-gpios = <&gpio 26 GPIO_ACTIVE_HIGH>;
+ #sound-dai-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/rockchip,rk3576-sai.yaml b/Documentation/devicetree/bindings/sound/rockchip,rk3576-sai.yaml
new file mode 100644
index 000000000000..149da9a91451
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip,rk3576-sai.yaml
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/rockchip,rk3576-sai.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip Serial Audio Interface Controller
+
+description:
+ The Rockchip Serial Audio Interface (SAI) controller is a flexible audio
+ controller that implements the I2S, I2S/TDM and the PDM standards.
+
+maintainers:
+ - Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ const: rockchip,rk3576-sai
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ dmas:
+ minItems: 1
+ maxItems: 2
+
+ dma-names:
+ minItems: 1
+ items:
+ - enum: [tx, rx]
+ - const: rx
+
+ clocks:
+ items:
+ - description: master audio clock
+ - description: AHB clock driving the interface
+
+ clock-names:
+ items:
+ - const: mclk
+ - const: hclk
+
+ resets:
+ minItems: 1
+ items:
+ - description: reset for the mclk domain
+ - description: reset for the hclk domain
+
+ reset-names:
+ minItems: 1
+ items:
+ - const: m
+ - const: h
+
+ port:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
+ power-domains:
+ maxItems: 1
+
+ "#sound-dai-cells":
+ const: 0
+
+ rockchip,sai-rx-route:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ Defines the mapping of the controller's SDI ports to actual input lanes,
+ as well as the number of input lanes.
+ rockchip,sai-rx-route = <3> would mean sdi3 is receiving from data0, and
+ that there is only one receiving lane.
+ This property's absence is to be understood as only one receiving lane
+ being used if the controller has capture capabilities.
+ maxItems: 4
+ items:
+ minimum: 0
+ maximum: 3
+
+ rockchip,sai-tx-route:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ Defines the mapping of the controller's SDO ports to actual output lanes,
+ as well as the number of output lanes.
+ rockchip,sai-tx-route = <3> would mean sdo3 is sending to data0, and
+ that there is only one transmitting lane.
+ This property's absence is to be understood as only one transmitting lane
+ being used if the controller has playback capabilities.
+ maxItems: 4
+ items:
+ minimum: 0
+ maximum: 3
+
+required:
+ - compatible
+ - reg
+ - dmas
+ - dma-names
+ - clocks
+ - clock-names
+ - "#sound-dai-cells"
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rockchip,rk3576-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/pinctrl/rockchip.h>
+ #include <dt-bindings/power/rockchip,rk3576-power.h>
+ #include <dt-bindings/reset/rockchip,rk3576-cru.h>
+
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ sai1: sai@2a610000 {
+ compatible = "rockchip,rk3576-sai";
+ reg = <0x0 0x2a610000 0x0 0x1000>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru MCLK_SAI1_8CH>, <&cru HCLK_SAI1_8CH>;
+ clock-names = "mclk", "hclk";
+ dmas = <&dmac0 2>, <&dmac0 3>;
+ dma-names = "tx", "rx";
+ power-domains = <&power RK3576_PD_AUDIO>;
+ resets = <&cru SRST_M_SAI1_8CH>, <&cru SRST_H_SAI1_8CH>;
+ reset-names = "m", "h";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sai1m0_lrck
+ &sai1m0_sclk
+ &sai1m0_sdi0
+ &sai1m0_sdo0
+ &sai1m0_sdo1
+ &sai1m0_sdo2
+ &sai1m0_sdo3>;
+ rockchip,sai-tx-route = <3 1 2 0>;
+ #sound-dai-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml b/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml
index 3591c8c49bfe..95d947fda6a7 100644
--- a/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml
+++ b/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml
@@ -15,13 +15,18 @@ description:
properties:
compatible:
- enum:
- - qcom,soundwire-v1.3.0
- - qcom,soundwire-v1.5.0
- - qcom,soundwire-v1.5.1
- - qcom,soundwire-v1.6.0
- - qcom,soundwire-v1.7.0
- - qcom,soundwire-v2.0.0
+ oneOf:
+ - enum:
+ - qcom,soundwire-v1.3.0
+ - qcom,soundwire-v1.5.0
+ - qcom,soundwire-v1.5.1
+ - qcom,soundwire-v1.6.0
+ - qcom,soundwire-v1.7.0
+ - qcom,soundwire-v2.0.0
+ - items:
+ - enum:
+ - qcom,soundwire-v2.1.0
+ - const: qcom,soundwire-v2.0.0
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/spi/fsl,dspi.yaml b/Documentation/devicetree/bindings/spi/fsl,dspi.yaml
index 7ca8fceda717..bf9cce53c48d 100644
--- a/Documentation/devicetree/bindings/spi/fsl,dspi.yaml
+++ b/Documentation/devicetree/bindings/spi/fsl,dspi.yaml
@@ -105,12 +105,12 @@ examples:
big-endian;
flash@0 {
- compatible = "jedec,spi-nor";
- reg = <0>;
- spi-max-frequency = <16000000>;
- spi-cpol;
- spi-cpha;
- spi-cs-setup-delay-ns = <100>;
- spi-cs-hold-delay-ns = <50>;
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <16000000>;
+ spi-cpol;
+ spi-cpha;
+ spi-cs-setup-delay-ns = <100>;
+ spi-cs-hold-delay-ns = <50>;
};
};
diff --git a/Documentation/devicetree/bindings/spi/nuvoton,wpcm450-fiu.yaml b/Documentation/devicetree/bindings/spi/nuvoton,wpcm450-fiu.yaml
index 4e0d391e1d69..c97bf48b56b4 100644
--- a/Documentation/devicetree/bindings/spi/nuvoton,wpcm450-fiu.yaml
+++ b/Documentation/devicetree/bindings/spi/nuvoton,wpcm450-fiu.yaml
@@ -59,8 +59,3 @@ examples:
reg = <0>;
};
};
-
- shm: syscon@c8001000 {
- compatible = "nuvoton,wpcm450-shm", "syscon";
- reg = <0xc8001000 0x1000>;
- };
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml b/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml
index 48e97e240265..8b3640280559 100644
--- a/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml
+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml
@@ -10,9 +10,6 @@ maintainers:
- Thierry Reding <thierry.reding@gmail.com>
- Jonathan Hunter <jonathanh@nvidia.com>
-allOf:
- - $ref: spi-controller.yaml#
-
properties:
compatible:
enum:
@@ -47,6 +44,9 @@ properties:
- const: rx
- const: tx
+ iommus:
+ maxItems: 1
+
patternProperties:
"@[0-9a-f]+$":
type: object
@@ -69,6 +69,18 @@ required:
unevaluatedProperties: false
+allOf:
+ - $ref: spi-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ const: nvidia,tegra234-qspi
+ then:
+ properties:
+ iommus: false
+
examples:
- |
#include <dt-bindings/clock/tegra210-car.h>
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml b/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml
index aa3f93319203..cb1f15224b45 100644
--- a/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml
@@ -21,8 +21,12 @@ allOf:
properties:
compatible:
- enum:
- - qcom,ipq9574-snand
+ oneOf:
+ - items:
+ - enum:
+ - qcom,ipq5018-snand
+ - const: qcom,ipq9574-snand
+ - const: qcom,ipq9574-snand
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml b/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
index 49649fc3f95a..e0c7047ae8ad 100644
--- a/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
+++ b/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
@@ -4,14 +4,11 @@
$id: http://devicetree.org/schemas/spi/renesas,sh-msiof.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Renesas MSIOF SPI controller
+title: Renesas MSIOF SPI / I2S controller
maintainers:
- Geert Uytterhoeven <geert+renesas@glider.be>
-allOf:
- - $ref: spi-controller.yaml#
-
properties:
compatible:
oneOf:
@@ -146,24 +143,38 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
default: 64
+ # for MSIOF-I2S
+ port:
+ $ref: ../sound/audio-graph-port.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- reg
- interrupts
- clocks
- power-domains
- - '#address-cells'
- - '#size-cells'
-
-if:
- not:
- properties:
- compatible:
- contains:
- const: renesas,sh-mobile-msiof
-then:
- required:
- - resets
+
+allOf:
+ # additional "required""
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: renesas,sh-mobile-msiof
+ then:
+ required:
+ - resets
+
+ # If it doesn't have "port" node, it is "MSIOF-SPI"
+ - if:
+ not:
+ required:
+ - port
+ then:
+ allOf:
+ - $ref: spi-controller.yaml#
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/spi/samsung,spi.yaml b/Documentation/devicetree/bindings/spi/samsung,spi.yaml
index 3c206a64d60a..fe298d47b1a9 100644
--- a/Documentation/devicetree/bindings/spi/samsung,spi.yaml
+++ b/Documentation/devicetree/bindings/spi/samsung,spi.yaml
@@ -29,6 +29,7 @@ properties:
- items:
- enum:
- samsung,exynos8895-spi
+ - samsung,exynosautov920-spi
- const: samsung,exynos850-spi
- const: samsung,exynos7-spi
deprecated: true
diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
index bccd00a1ddd0..0543c526b783 100644
--- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
+++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
@@ -56,19 +56,18 @@ properties:
enum:
- snps,dw-apb-ssi
- snps,dwc-ssi-1.01a
- - description: Microsemi Ocelot/Jaguar2 SoC SPI Controller
- items:
- - enum:
- - mscc,ocelot-spi
- - mscc,jaguar2-spi
- - const: snps,dw-apb-ssi
- description: Microchip Sparx5 SoC SPI Controller
const: microchip,sparx5-spi
- description: Amazon Alpine SPI Controller
const: amazon,alpine-dw-apb-ssi
- - description: Renesas RZ/N1 SPI Controller
+ - description: Vendor controllers which use snps,dw-apb-ssi as fallback
items:
- - const: renesas,rzn1-spi
+ - enum:
+ - mscc,ocelot-spi
+ - mscc,jaguar2-spi
+ - renesas,rzn1-spi
+ - sophgo,sg2042-spi
+ - thead,th1520-spi
- const: snps,dw-apb-ssi
- description: Intel Keem Bay SPI Controller
const: intel,keembay-ssi
@@ -84,14 +83,8 @@ properties:
const: canaan,k210-spi
- description: Renesas RZ/N1 SPI Controller
items:
- - enum:
- - renesas,r9a06g032-spi # RZ/N1D
- - renesas,r9a06g033-spi # RZ/N1S
+ - const: renesas,r9a06g032-spi # RZ/N1D
- const: renesas,rzn1-spi # RZ/N1
- - description: T-HEAD TH1520 SoC SPI Controller
- items:
- - const: thead,th1520-spi
- - const: snps,dw-apb-ssi
reg:
minItems: 1
diff --git a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
index 0bb443b8decd..8fc17e16efb2 100644
--- a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
@@ -8,12 +8,13 @@ title: Peripheral-specific properties for a SPI bus.
description:
Many SPI controllers need to add properties to peripheral devices. They could
- be common properties like spi-max-frequency, spi-cpha, etc. or they could be
- controller specific like delay in clock or data lines, etc. These properties
- need to be defined in the peripheral node because they are per-peripheral and
- there can be multiple peripherals attached to a controller. All those
- properties are listed here. The controller specific properties should go in
- their own separate schema that should be referenced from here.
+ be common properties like spi-max-frequency, spi-cs-high, etc. or they could
+ be controller specific like delay in clock or data lines, etc. These
+ properties need to be defined in the peripheral node because they are
+ per-peripheral and there can be multiple peripherals attached to a
+ controller. All those properties are listed here. The controller specific
+ properties should go in their own separate schema that should be referenced
+ from here.
maintainers:
- Mark Brown <broonie@kernel.org>
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
index 104f5ffdd04e..748faf7f7081 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
@@ -34,6 +34,7 @@ properties:
- rockchip,rk3328-spi
- rockchip,rk3368-spi
- rockchip,rk3399-spi
+ - rockchip,rk3528-spi
- rockchip,rk3562-spi
- rockchip,rk3568-spi
- rockchip,rk3576-spi
diff --git a/Documentation/devicetree/bindings/spi/st,stm32mp25-ospi.yaml b/Documentation/devicetree/bindings/spi/st,stm32mp25-ospi.yaml
index 5f276f27dc4c..272bc308726b 100644
--- a/Documentation/devicetree/bindings/spi/st,stm32mp25-ospi.yaml
+++ b/Documentation/devicetree/bindings/spi/st,stm32mp25-ospi.yaml
@@ -68,6 +68,7 @@ required:
- compatible
- reg
- clocks
+ - resets
- interrupts
- st,syscfg-dlyb
diff --git a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
index a7236f7db4ec..e7f7cf72719e 100644
--- a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
+++ b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
@@ -50,6 +50,7 @@ properties:
- enum:
- allwinner,sun50i-a100-system-control
- allwinner,sun50i-h6-system-control
+ - allwinner,sun55i-a523-system-control
- const: allwinner,sun50i-a64-system-control
reg:
diff --git a/Documentation/devicetree/bindings/thermal/airoha,en7581-thermal.yaml b/Documentation/devicetree/bindings/thermal/airoha,en7581-thermal.yaml
new file mode 100644
index 000000000000..ca0242ef0378
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/airoha,en7581-thermal.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/airoha,en7581-thermal.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Airoha EN7581 Thermal Sensor and Monitor
+
+maintainers:
+ - Christian Marangi <ansuelsmth@gmail.com>
+
+properties:
+ compatible:
+ const: airoha,en7581-thermal
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ airoha,chip-scu:
+ description: phandle to the chip SCU syscon
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ '#thermal-sensor-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - airoha,chip-scu
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ thermal-sensor@1efbd800 {
+ compatible = "airoha,en7581-thermal";
+ reg = <0x1efbd000 0xd5c>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ airoha,chip-scu = <&chip_scu>;
+
+ #thermal-sensor-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
index f9d8012c8cf5..0e653bbe9884 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
@@ -39,6 +39,7 @@ properties:
- description: v1 of TSENS
items:
- enum:
+ - qcom,ipq5018-tsens
- qcom,msm8937-tsens
- qcom,msm8956-tsens
- qcom,msm8976-tsens
@@ -251,6 +252,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq5018-tsens
- qcom,ipq8064-tsens
- qcom,msm8960-tsens
- qcom,tsens-v0_1
diff --git a/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt b/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt
deleted file mode 100644
index e698e3488735..000000000000
--- a/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-Altera Timer
-
-Required properties:
-
-- compatible : should be "altr,timer-1.0"
-- reg : Specifies base physical address and size of the registers.
-- interrupts : Should contain the timer interrupt number
-- clock-frequency : The frequency of the clock that drives the counter, in Hz.
-
-Example:
-
-timer {
- compatible = "altr,timer-1.0";
- reg = <0x00400000 0x00000020>;
- interrupt-parent = <&cpu>;
- interrupts = <11>;
- clock-frequency = <125000000>;
-};
diff --git a/Documentation/devicetree/bindings/timer/altr,timer-1.0.yaml b/Documentation/devicetree/bindings/timer/altr,timer-1.0.yaml
new file mode 100644
index 000000000000..576260c72d42
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/altr,timer-1.0.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/altr,timer-1.0.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera Timer
+
+maintainers:
+ - Dinh Nguyen <dinguyen@kernel.org>
+
+properties:
+ compatible:
+ const: altr,timer-1.0
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clock-frequency:
+ description: Frequency of the clock that drives the counter, in Hz.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@400000 {
+ compatible = "altr,timer-1.0";
+ reg = <0x00400000 0x00000020>;
+ interrupts = <11>;
+ clock-frequency = <125000000>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/arm,mps2-timer.txt b/Documentation/devicetree/bindings/timer/arm,mps2-timer.txt
deleted file mode 100644
index 48f84d74edde..000000000000
--- a/Documentation/devicetree/bindings/timer/arm,mps2-timer.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-ARM MPS2 timer
-
-The MPS2 platform has simple general-purpose 32 bits timers.
-
-Required properties:
-- compatible : Should be "arm,mps2-timer"
-- reg : Address and length of the register set
-- interrupts : Reference to the timer interrupt
-
-Required clocking property, have to be one of:
-- clocks : The input clock of the timer
-- clock-frequency : The rate in HZ in input of the ARM MPS2 timer
-
-Examples:
-
-timer1: mps2-timer@40000000 {
- compatible = "arm,mps2-timer";
- reg = <0x40000000 0x1000>;
- interrupts = <8>;
- clocks = <&sysclk>;
-};
-
-timer2: mps2-timer@40001000 {
- compatible = "arm,mps2-timer";
- reg = <0x40001000 0x1000>;
- interrupts = <9>;
- clock-frequency = <25000000>;
-};
diff --git a/Documentation/devicetree/bindings/timer/arm,mps2-timer.yaml b/Documentation/devicetree/bindings/timer/arm,mps2-timer.yaml
new file mode 100644
index 000000000000..64c6aedd7e8e
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/arm,mps2-timer.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm,mps2-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM MPS2 timer
+
+maintainers:
+ - Vladimir Murzin <vladimir.murzin@arm.com>
+
+description:
+ The MPS2 platform has simple general-purpose 32 bits timers.
+
+properties:
+ compatible:
+ const: arm,mps2-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-frequency:
+ description: Rate in Hz of the timer input clock
+
+oneOf:
+ - required: [clocks]
+ - required: [clock-frequency]
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@40000000 {
+ compatible = "arm,mps2-timer";
+ reg = <0x40000000 0x1000>;
+ interrupts = <8>;
+ clocks = <&sysclk>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt b/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt
deleted file mode 100644
index d4c62e7b1714..000000000000
--- a/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-* Cirrus Logic CLPS711X Timer Counter
-
-Required properties:
-- compatible: Shall contain "cirrus,ep7209-timer".
-- reg : Address and length of the register set.
-- interrupts: The interrupt number of the timer.
-- clocks : phandle of timer reference clock.
-
-Note: Each timer should have an alias correctly numbered in "aliases" node.
-
-Example:
- aliases {
- timer0 = &timer1;
- timer1 = &timer2;
- };
-
- timer1: timer@80000300 {
- compatible = "cirrus,ep7312-timer", "cirrus,ep7209-timer";
- reg = <0x80000300 0x4>;
- interrupts = <8>;
- clocks = <&clks 5>;
- };
-
- timer2: timer@80000340 {
- compatible = "cirrus,ep7312-timer", "cirrus,ep7209-timer";
- reg = <0x80000340 0x4>;
- interrupts = <9>;
- clocks = <&clks 6>;
- };
diff --git a/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.yaml b/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.yaml
new file mode 100644
index 000000000000..507b777e16bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/cirrus,clps711x-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic CLPS711X Timer Counter
+
+maintainers:
+ - Alexander Shiyan <shc_work@mail.ru>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - cirrus,ep7312-timer
+ - const: cirrus,ep7209-timer
+ - const: cirrus,ep7209-timer
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@80000300 {
+ compatible = "cirrus,ep7312-timer", "cirrus,ep7209-timer";
+ reg = <0x80000300 0x4>;
+ interrupts = <8>;
+ clocks = <&clks 5>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/cnxt,cx92755-timer.yaml b/Documentation/devicetree/bindings/timer/cnxt,cx92755-timer.yaml
new file mode 100644
index 000000000000..8f1a5af32a36
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/cnxt,cx92755-timer.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/cnxt,cx92755-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Conexant Digicolor SoCs Timer Controller
+
+maintainers:
+ - Baruch Siach <baruch@tkos.co.il>
+
+properties:
+ compatible:
+ const: cnxt,cx92755-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: Contains 8 interrupts, one for each timer
+ items:
+ - description: interrupt for timer 0
+ - description: interrupt for timer 1
+ - description: interrupt for timer 2
+ - description: interrupt for timer 3
+ - description: interrupt for timer 4
+ - description: interrupt for timer 5
+ - description: interrupt for timer 6
+ - description: interrupt for timer 7
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@f0000fc0 {
+ compatible = "cnxt,cx92755-timer";
+ reg = <0xf0000fc0 0x40>;
+ interrupts = <19>, <31>, <34>, <35>, <52>, <53>, <54>, <55>;
+ clocks = <&main_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/csky,gx6605s-timer.txt b/Documentation/devicetree/bindings/timer/csky,gx6605s-timer.txt
deleted file mode 100644
index 6b04344f4bea..000000000000
--- a/Documentation/devicetree/bindings/timer/csky,gx6605s-timer.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-=================
-gx6605s SOC Timer
-=================
-
-The timer is used in gx6605s soc as system timer and the driver
-contain clk event and clk source.
-
-==============================
-timer node bindings definition
-==============================
-
- Description: Describes gx6605s SOC timer
-
- PROPERTIES
-
- - compatible
- Usage: required
- Value type: <string>
- Definition: must be "csky,gx6605s-timer"
- - reg
- Usage: required
- Value type: <u32 u32>
- Definition: <phyaddr size> in soc from cpu view
- - clocks
- Usage: required
- Value type: phandle + clock specifier cells
- Definition: must be input clk node
- - interrupt
- Usage: required
- Value type: <u32>
- Definition: must be timer irq num defined by soc
-
-Examples:
----------
-
- timer0: timer@20a000 {
- compatible = "csky,gx6605s-timer";
- reg = <0x0020a000 0x400>;
- clocks = <&dummy_apb_clk>;
- interrupts = <10>;
- interrupt-parent = <&intc>;
- };
diff --git a/Documentation/devicetree/bindings/timer/csky,gx6605s-timer.yaml b/Documentation/devicetree/bindings/timer/csky,gx6605s-timer.yaml
new file mode 100644
index 000000000000..888fc8113996
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/csky,gx6605s-timer.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/csky,gx6605s-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: gx6605s SOC Timer
+
+maintainers:
+ - Guo Ren <guoren@kernel.org>
+
+properties:
+ compatible:
+ const: csky,gx6605s-timer
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@20a000 {
+ compatible = "csky,gx6605s-timer";
+ reg = <0x0020a000 0x400>;
+ clocks = <&dummy_apb_clk>;
+ interrupts = <10>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/csky,mptimer.txt b/Documentation/devicetree/bindings/timer/csky,mptimer.txt
deleted file mode 100644
index f5c7e99cf52b..000000000000
--- a/Documentation/devicetree/bindings/timer/csky,mptimer.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-============================
-C-SKY Multi-processors Timer
-============================
-
-C-SKY multi-processors timer is designed for C-SKY SMP system and the
-regs is accessed by cpu co-processor 4 registers with mtcr/mfcr.
-
- - PTIM_CTLR "cr<0, 14>" Control reg to start reset timer.
- - PTIM_TSR "cr<1, 14>" Interrupt cleanup status reg.
- - PTIM_CCVR "cr<3, 14>" Current counter value reg.
- - PTIM_LVR "cr<6, 14>" Window value reg to trigger next event.
-
-==============================
-timer node bindings definition
-==============================
-
- Description: Describes SMP timer
-
- PROPERTIES
-
- - compatible
- Usage: required
- Value type: <string>
- Definition: must be "csky,mptimer"
- - clocks
- Usage: required
- Value type: <node>
- Definition: must be input clk node
- - interrupts
- Usage: required
- Value type: <u32>
- Definition: must be timer irq num defined by soc
-
-Examples:
----------
-
- timer: timer {
- compatible = "csky,mptimer";
- clocks = <&dummy_apb_clk>;
- interrupts = <16>;
- interrupt-parent = <&intc>;
- };
diff --git a/Documentation/devicetree/bindings/timer/csky,mptimer.yaml b/Documentation/devicetree/bindings/timer/csky,mptimer.yaml
new file mode 100644
index 000000000000..12cc5282c8f8
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/csky,mptimer.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/csky,mptimer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: C-SKY Multi-processors Timer
+
+maintainers:
+ - Flavio Suligoi <f.suligoi@asem.it>
+ - Guo Ren <guoren@kernel.org>
+
+description: |
+ C-SKY multi-processors timer is designed for C-SKY SMP system and the regs are
+ accessed by cpu co-processor 4 registers with mtcr/mfcr.
+
+ - PTIM_CTLR "cr<0, 14>" Control reg to start reset timer.
+ - PTIM_TSR "cr<1, 14>" Interrupt cleanup status reg.
+ - PTIM_CCVR "cr<3, 14>" Current counter value reg.
+ - PTIM_LVR "cr<6, 14>" Window value reg to trigger next event.
+
+properties:
+ compatible:
+ items:
+ - const: csky,mptimer
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ timer {
+ compatible = "csky,mptimer";
+ clocks = <&dummy_apb_clk>;
+ interrupts = <16>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/digicolor-timer.txt b/Documentation/devicetree/bindings/timer/digicolor-timer.txt
deleted file mode 100644
index d1b659bbc29f..000000000000
--- a/Documentation/devicetree/bindings/timer/digicolor-timer.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-Conexant Digicolor SoCs Timer Controller
-
-Required properties:
-
-- compatible : should be "cnxt,cx92755-timer"
-- reg : Specifies base physical address and size of the "Agent Communication"
- timer registers
-- interrupts : Contains 8 interrupts, one for each timer
-- clocks: phandle to the main clock
-
-Example:
-
- timer@f0000fc0 {
- compatible = "cnxt,cx92755-timer";
- reg = <0xf0000fc0 0x40>;
- interrupts = <19>, <31>, <34>, <35>, <52>, <53>, <54>, <55>;
- clocks = <&main_clk>;
- };
diff --git a/Documentation/devicetree/bindings/timer/econet,en751221-timer.yaml b/Documentation/devicetree/bindings/timer/econet,en751221-timer.yaml
new file mode 100644
index 000000000000..c1e7c2b6afde
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/econet,en751221-timer.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/econet,en751221-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: EcoNet EN751221 High Precision Timer (HPT)
+
+maintainers:
+ - Caleb James DeLisle <cjd@cjdns.fr>
+
+description:
+ The EcoNet High Precision Timer (HPT) is a timer peripheral found in various
+ EcoNet SoCs, including the EN751221 and EN751627 families. It provides per-VPE
+ count/compare registers and a per-CPU control register, with a single interrupt
+ line using a percpu-devid interrupt mechanism.
+
+properties:
+ compatible:
+ oneOf:
+ - const: econet,en751221-timer
+ - items:
+ - const: econet,en751627-timer
+ - const: econet,en751221-timer
+
+ reg:
+ minItems: 1
+ maxItems: 2
+
+ interrupts:
+ maxItems: 1
+ description: A percpu-devid timer interrupt shared across CPUs.
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: econet,en751627-timer
+ then:
+ properties:
+ reg:
+ items:
+ - description: VPE timers 0 and 1
+ - description: VPE timers 2 and 3
+ else:
+ properties:
+ reg:
+ items:
+ - description: VPE timers 0 and 1
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@1fbf0400 {
+ compatible = "econet,en751627-timer", "econet,en751221-timer";
+ reg = <0x1fbf0400 0x100>, <0x1fbe0000 0x100>;
+ interrupt-parent = <&intc>;
+ interrupts = <30>;
+ clocks = <&hpt_clock>;
+ };
+ - |
+ timer@1fbf0400 {
+ compatible = "econet,en751221-timer";
+ reg = <0x1fbe0400 0x100>;
+ interrupt-parent = <&intc>;
+ interrupts = <30>;
+ clocks = <&hpt_clock>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/timer/ezchip,nps400-timer.yaml b/Documentation/devicetree/bindings/timer/ezchip,nps400-timer.yaml
new file mode 100644
index 000000000000..317c5010c4c1
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/ezchip,nps400-timer.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ezchip,nps400-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: EZChip NPS400 Timers
+
+maintainers:
+ - Noam Camus <noamca@mellanox.com>
+
+properties:
+ compatible:
+ enum:
+ - ezchip,nps400-timer0
+ - ezchip,nps400-timer1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ezchip,nps400-timer0
+ then:
+ required: [ interrupts ]
+
+examples:
+ - |
+ timer {
+ compatible = "ezchip,nps400-timer0";
+ interrupts = <3>;
+ clocks = <&sysclk>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/ezchip,nps400-timer0.txt b/Documentation/devicetree/bindings/timer/ezchip,nps400-timer0.txt
deleted file mode 100644
index e3cfce8fecc5..000000000000
--- a/Documentation/devicetree/bindings/timer/ezchip,nps400-timer0.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-NPS Network Processor
-
-Required properties:
-
-- compatible : should be "ezchip,nps400-timer0"
-
-Clocks required for compatible = "ezchip,nps400-timer0":
-- interrupts : The interrupt of the first timer
-- clocks : Must contain a single entry describing the clock input
-
-Example:
-
-timer {
- compatible = "ezchip,nps400-timer0";
- interrupts = <3>;
- clocks = <&sysclk>;
-};
diff --git a/Documentation/devicetree/bindings/timer/ezchip,nps400-timer1.txt b/Documentation/devicetree/bindings/timer/ezchip,nps400-timer1.txt
deleted file mode 100644
index c0ab4190b8fb..000000000000
--- a/Documentation/devicetree/bindings/timer/ezchip,nps400-timer1.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-NPS Network Processor
-
-Required properties:
-
-- compatible : should be "ezchip,nps400-timer1"
-
-Clocks required for compatible = "ezchip,nps400-timer1":
-- clocks : Must contain a single entry describing the clock input
-
-Example:
-
-timer {
- compatible = "ezchip,nps400-timer1";
- clocks = <&sysclk>;
-};
diff --git a/Documentation/devicetree/bindings/timer/fsl,gtm.txt b/Documentation/devicetree/bindings/timer/fsl,gtm.txt
deleted file mode 100644
index fc1c571f7412..000000000000
--- a/Documentation/devicetree/bindings/timer/fsl,gtm.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-* Freescale General-purpose Timers Module
-
-Required properties:
- - compatible : should be
- "fsl,<chip>-gtm", "fsl,gtm" for SOC GTMs
- "fsl,<chip>-qe-gtm", "fsl,qe-gtm", "fsl,gtm" for QE GTMs
- "fsl,<chip>-cpm2-gtm", "fsl,cpm2-gtm", "fsl,gtm" for CPM2 GTMs
- - reg : should contain gtm registers location and length (0x40).
- - interrupts : should contain four interrupts.
- - clock-frequency : specifies the frequency driving the timer.
-
-Example:
-
-timer@500 {
- compatible = "fsl,mpc8360-gtm", "fsl,gtm";
- reg = <0x500 0x40>;
- interrupts = <90 8 78 8 84 8 72 8>;
- interrupt-parent = <&ipic>;
- /* filled by u-boot */
- clock-frequency = <0>;
-};
-
-timer@440 {
- compatible = "fsl,mpc8360-qe-gtm", "fsl,qe-gtm", "fsl,gtm";
- reg = <0x440 0x40>;
- interrupts = <12 13 14 15>;
- interrupt-parent = <&qeic>;
- /* filled by u-boot */
- clock-frequency = <0>;
-};
diff --git a/Documentation/devicetree/bindings/timer/fsl,gtm.yaml b/Documentation/devicetree/bindings/timer/fsl,gtm.yaml
new file mode 100644
index 000000000000..1f35f1ee0be2
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/fsl,gtm.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/fsl,gtm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale General-purpose Timers Module
+
+maintainers:
+ - J. Neuschäfer <j.ne@posteo.net>
+
+properties:
+ compatible:
+ oneOf:
+ # for SoC GTMs
+ - items:
+ - enum:
+ - fsl,mpc8308-gtm
+ - fsl,mpc8313-gtm
+ - fsl,mpc8315-gtm
+ - fsl,mpc8360-gtm
+ - const: fsl,gtm
+
+ # for QE GTMs
+ - items:
+ - enum:
+ - fsl,mpc8360-qe-gtm
+ - fsl,mpc8569-qe-gtm
+ - const: fsl,qe-gtm
+ - const: fsl,gtm
+
+ # for CPM2 GTMs (no known examples)
+ - items:
+ # - enum:
+ # - fsl,<chip>-cpm2-gtm
+ - const: fsl,cpm2-gtm
+ - const: fsl,gtm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: Interrupt for timer 1 (e.g. GTM1 or GTM5)
+ - description: Interrupt for timer 2 (e.g. GTM2 or GTM6)
+ - description: Interrupt for timer 3 (e.g. GTM3 or GTM7)
+ - description: Interrupt for timer 4 (e.g. GTM4 or GTM8)
+
+ clock-frequency: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clock-frequency
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ timer@500 {
+ compatible = "fsl,mpc8360-gtm", "fsl,gtm";
+ reg = <0x500 0x40>;
+ interrupts = <90 IRQ_TYPE_LEVEL_LOW>,
+ <78 IRQ_TYPE_LEVEL_LOW>,
+ <84 IRQ_TYPE_LEVEL_LOW>,
+ <72 IRQ_TYPE_LEVEL_LOW>;
+ /* filled by u-boot */
+ clock-frequency = <0>;
+ };
+
+ - |
+ timer@440 {
+ compatible = "fsl,mpc8360-qe-gtm", "fsl,qe-gtm", "fsl,gtm";
+ reg = <0x440 0x40>;
+ interrupts = <12>, <13>, <14>, <15>;
+ /* filled by u-boot */
+ clock-frequency = <0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/timer/fsl,vf610-pit.yaml b/Documentation/devicetree/bindings/timer/fsl,vf610-pit.yaml
new file mode 100644
index 000000000000..bee2c35bd0e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/fsl,vf610-pit.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/fsl,vf610-pit.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Periodic Interrupt Timer (PIT)
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description:
+ The PIT module is an array of timers that can be used to raise interrupts
+ and trigger DMA channels.
+
+properties:
+ compatible:
+ enum:
+ - fsl,vf610-pit
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: pit
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/vf610-clock.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ timer@40037000 {
+ compatible = "fsl,vf610-pit";
+ reg = <0x40037000 0x1000>;
+ interrupts = <39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks VF610_CLK_PIT>;
+ clock-names = "pit";
+ };
diff --git a/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.txt b/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.txt
deleted file mode 100644
index 7afce80bf6a0..000000000000
--- a/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Pistachio general-purpose timer based clocksource
-
-Required properties:
- - compatible: "img,pistachio-gptimer".
- - reg: Address range of the timer registers.
- - interrupts: An interrupt for each of the four timers
- - clocks: Should contain a clock specifier for each entry in clock-names
- - clock-names: Should contain the following entries:
- "sys", interface clock
- "slow", slow counter clock
- "fast", fast counter clock
- - img,cr-periph: Must contain a phandle to the peripheral control
- syscon node.
-
-Example:
- timer: timer@18102000 {
- compatible = "img,pistachio-gptimer";
- reg = <0x18102000 0x100>;
- interrupts = <GIC_SHARED 60 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SHARED 61 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SHARED 62 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SHARED 63 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk_periph PERIPH_CLK_COUNTER_FAST>,
- <&clk_periph PERIPH_CLK_COUNTER_SLOW>,
- <&cr_periph SYS_CLK_TIMER>;
- clock-names = "fast", "slow", "sys";
- img,cr-periph = <&cr_periph>;
- };
diff --git a/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.yaml b/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.yaml
new file mode 100644
index 000000000000..a8654bcf68a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/img,pistachio-gptimer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Pistachio general-purpose timer
+
+maintainers:
+ - Ezequiel Garcia <ezequiel.garcia@imgtec.com>
+
+properties:
+ compatible:
+ const: img,pistachio-gptimer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: Timer0 interrupt
+ - description: Timer1 interrupt
+ - description: Timer2 interrupt
+ - description: Timer3 interrupt
+
+ clocks:
+ items:
+ - description: Fast counter clock
+ - description: Slow counter clock
+ - description: Interface clock
+
+ clock-names:
+ items:
+ - const: fast
+ - const: slow
+ - const: sys
+
+ img,cr-periph:
+ description: Peripheral control syscon phandle
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - img,cr-periph
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/mips-gic.h>
+ #include <dt-bindings/clock/pistachio-clk.h>
+
+ timer@18102000 {
+ compatible = "img,pistachio-gptimer";
+ reg = <0x18102000 0x100>;
+ interrupts = <GIC_SHARED 60 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 61 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 62 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 63 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_periph PERIPH_CLK_COUNTER_FAST>,
+ <&clk_periph PERIPH_CLK_COUNTER_SLOW>,
+ <&cr_periph SYS_CLK_TIMER>;
+ clock-names = "fast", "slow", "sys";
+ img,cr-periph = <&cr_periph>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/jcore,pit.txt b/Documentation/devicetree/bindings/timer/jcore,pit.txt
deleted file mode 100644
index af5dd35469d7..000000000000
--- a/Documentation/devicetree/bindings/timer/jcore,pit.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-J-Core Programmable Interval Timer and Clocksource
-
-Required properties:
-
-- compatible: Must be "jcore,pit".
-
-- reg: Memory region(s) for timer/clocksource registers. For SMP,
- there should be one region per cpu, indexed by the sequential,
- zero-based hardware cpu number.
-
-- interrupts: An interrupt to assign for the timer. The actual pit
- core is integrated with the aic and allows the timer interrupt
- assignment to be programmed by software, but this property is
- required in order to reserve an interrupt number that doesn't
- conflict with other devices.
-
-
-Example:
-
-timer@200 {
- compatible = "jcore,pit";
- reg = < 0x200 0x30 0x500 0x30 >;
- interrupts = < 0x48 >;
-};
diff --git a/Documentation/devicetree/bindings/timer/jcore,pit.yaml b/Documentation/devicetree/bindings/timer/jcore,pit.yaml
new file mode 100644
index 000000000000..9e6e25b75293
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/jcore,pit.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/jcore,pit.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: J-Core Programmable Interval Timer and Clocksource
+
+maintainers:
+ - Rich Felker <dalias@libc.org>
+
+properties:
+ compatible:
+ const: jcore,pit
+
+ reg:
+ description:
+ Memory region(s) for timer/clocksource registers. For SMP, there should be
+ one region per cpu, indexed by the sequential, zero-based hardware cpu
+ number.
+
+ interrupts:
+ description:
+ An interrupt to assign for the timer. The actual pit core is integrated
+ with the aic and allows the timer interrupt assignment to be programmed by
+ software, but this property is required in order to reserve an interrupt
+ number that doesn't conflict with other devices.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@200 {
+ compatible = "jcore,pit";
+ reg = <0x200 0x30 0x500 0x30>;
+ interrupts = <0x48>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/lsi,zevio-timer.txt b/Documentation/devicetree/bindings/timer/lsi,zevio-timer.txt
deleted file mode 100644
index b2d07ad90e9a..000000000000
--- a/Documentation/devicetree/bindings/timer/lsi,zevio-timer.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-TI-NSPIRE timer
-
-Required properties:
-
-- compatible : should be "lsi,zevio-timer".
-- reg : The physical base address and size of the timer (always first).
-- clocks: phandle to the source clock.
-
-Optional properties:
-
-- interrupts : The interrupt number of the first timer.
-- reg : The interrupt acknowledgement registers
- (always after timer base address)
-
-If any of the optional properties are not given, the timer is added as a
-clock-source only.
-
-Example:
-
-timer {
- compatible = "lsi,zevio-timer";
- reg = <0x900D0000 0x1000>, <0x900A0020 0x8>;
- interrupts = <19>;
- clocks = <&timer_clk>;
-};
-
-Example (no clock-events):
-
-timer {
- compatible = "lsi,zevio-timer";
- reg = <0x900D0000 0x1000>;
- clocks = <&timer_clk>;
-};
diff --git a/Documentation/devicetree/bindings/timer/lsi,zevio-timer.yaml b/Documentation/devicetree/bindings/timer/lsi,zevio-timer.yaml
new file mode 100644
index 000000000000..358455d8e7a8
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/lsi,zevio-timer.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/lsi,zevio-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI-NSPIRE timer
+
+maintainers:
+ - Daniel Tang <dt.tangr@gmail.com>
+
+properties:
+ compatible:
+ const: lsi,zevio-timer
+
+ reg:
+ minItems: 1
+ items:
+ - description: Timer registers
+ - description: Interrupt acknowledgement registers (optional)
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+allOf:
+ - if:
+ required: [ interrupts ]
+ then:
+ properties:
+ reg:
+ minItems: 2
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@900d0000 {
+ compatible = "lsi,zevio-timer";
+ reg = <0x900D0000 0x1000>, <0x900A0020 0x8>;
+ interrupts = <19>;
+ clocks = <&timer_clk>;
+ };
+ - |
+ timer@900d0000 {
+ compatible = "lsi,zevio-timer";
+ reg = <0x900D0000 0x1000>;
+ clocks = <&timer_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/marvell,armada-370-timer.yaml b/Documentation/devicetree/bindings/timer/marvell,armada-370-timer.yaml
new file mode 100644
index 000000000000..bc0677fe86eb
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/marvell,armada-370-timer.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/marvell,armada-370-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Armada 370, 375, 380 and XP Timers
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: marvell,armada-380-timer
+ - const: marvell,armada-xp-timer
+ - items:
+ - const: marvell,armada-375-timer
+ - const: marvell,armada-370-timer
+ - enum:
+ - marvell,armada-370-timer
+ - marvell,armada-xp-timer
+
+ reg:
+ items:
+ - description: Global timer registers
+ - description: Local/private timer registers
+
+ interrupts:
+ items:
+ - description: Global timer interrupt 0
+ - description: Global timer interrupt 1
+ - description: Global timer interrupt 2
+ - description: Global timer interrupt 3
+ - description: First private timer interrupt
+ - description: Second private timer interrupt
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: nbclk
+ - const: fixed
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - marvell,armada-375-timer
+ - marvell,armada-xp-timer
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ clock-names:
+ minItems: 2
+ required:
+ - clock-names
+ else:
+ properties:
+ clocks:
+ maxItems: 1
+ clock-names:
+ maxItems: 1
+
+examples:
+ - |
+ timer@20300 {
+ compatible = "marvell,armada-xp-timer";
+ reg = <0x20300 0x30>, <0x21040 0x30>;
+ interrupts = <37>, <38>, <39>, <40>, <5>, <6>;
+ clocks = <&coreclk 2>, <&refclk>;
+ clock-names = "nbclk", "fixed";
+ };
diff --git a/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt b/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
deleted file mode 100644
index e9c78ce880e6..000000000000
--- a/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-Marvell Armada 370 and Armada XP Timers
----------------------------------------
-
-Required properties:
-- compatible: Should be one of the following
- "marvell,armada-370-timer",
- "marvell,armada-375-timer",
- "marvell,armada-xp-timer".
-- interrupts: Should contain the list of Global Timer interrupts and
- then local timer interrupts
-- reg: Should contain location and length for timers register. First
- pair for the Global Timer registers, second pair for the
- local/private timers.
-
-Clocks required for compatible = "marvell,armada-370-timer":
-- clocks : Must contain a single entry describing the clock input
-
-Clocks required for compatibles = "marvell,armada-xp-timer",
- "marvell,armada-375-timer":
-- clocks : Must contain an entry for each entry in clock-names.
-- clock-names : Must include the following entries:
- "nbclk" (L2/coherency fabric clock),
- "fixed" (Reference 25 MHz fixed-clock).
-
-Examples:
-
-- Armada 370:
-
- timer {
- compatible = "marvell,armada-370-timer";
- reg = <0x20300 0x30>, <0x21040 0x30>;
- interrupts = <37>, <38>, <39>, <40>, <5>, <6>;
- clocks = <&coreclk 2>;
- };
-
-- Armada XP:
-
- timer {
- compatible = "marvell,armada-xp-timer";
- reg = <0x20300 0x30>, <0x21040 0x30>;
- interrupts = <37>, <38>, <39>, <40>, <5>, <6>;
- clocks = <&coreclk 2>, <&refclk>;
- clock-names = "nbclk", "fixed";
- };
diff --git a/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt b/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt
deleted file mode 100644
index cd1a0c256f94..000000000000
--- a/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-Marvell Orion SoC timer
-
-Required properties:
-- compatible: shall be "marvell,orion-timer"
-- reg: base address of the timer register starting with TIMERS CONTROL register
-- interrupts: should contain the interrupts for Timer0 and Timer1
-- clocks: phandle of timer reference clock (tclk)
-
-Example:
- timer: timer {
- compatible = "marvell,orion-timer";
- reg = <0x20300 0x20>;
- interrupt-parent = <&bridge_intc>;
- interrupts = <1>, <2>;
- clocks = <&core_clk 0>;
- };
diff --git a/Documentation/devicetree/bindings/timer/marvell,orion-timer.yaml b/Documentation/devicetree/bindings/timer/marvell,orion-timer.yaml
new file mode 100644
index 000000000000..f973afffa5ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/marvell,orion-timer.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/marvell,orion-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Orion SoC timer
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+properties:
+ compatible:
+ const: marvell,orion-timer
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: Timer0 interrupt
+ - description: Timer1 interrupt
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@20300 {
+ compatible = "marvell,orion-timer";
+ reg = <0x20300 0x20>;
+ interrupts = <1>, <2>;
+ clocks = <&core_clk 0>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/nxp,s32g2-stm.yaml b/Documentation/devicetree/bindings/timer/nxp,s32g2-stm.yaml
new file mode 100644
index 000000000000..b44b9794bb85
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/nxp,s32g2-stm.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/nxp,s32g2-stm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP System Timer Module (STM)
+
+maintainers:
+ - Daniel Lezcano <daniel.lezcano@kernel.org>
+
+description:
+ The System Timer Module supports commonly required system and application
+ software timing functions. STM includes a 32-bit count-up timer and four
+ 32-bit compare channels with a separate interrupt source for each channel.
+ The timer is driven by the STM module clock divided by an 8-bit prescale
+ value.
+
+properties:
+ compatible:
+ oneOf:
+ - const: nxp,s32g2-stm
+ - items:
+ - const: nxp,s32g3-stm
+ - const: nxp,s32g2-stm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Counter clock
+ - description: Module clock
+ - description: Register clock
+
+ clock-names:
+ items:
+ - const: counter
+ - const: module
+ - const: register
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ timer@4011c000 {
+ compatible = "nxp,s32g2-stm";
+ reg = <0x4011c000 0x3000>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 0x3b>, <&clks 0x3c>, <&clks 0x3c>;
+ clock-names = "counter", "module", "register";
+ };
diff --git a/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml b/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml
index 891cca009528..6b80b060672e 100644
--- a/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml
@@ -18,9 +18,14 @@ description: |
properties:
compatible:
- enum:
- - nxp,imx95-sysctr-timer
- - nxp,sysctr-timer
+ oneOf:
+ - enum:
+ - nxp,imx95-sysctr-timer
+ - nxp,sysctr-timer
+ - items:
+ - enum:
+ - nxp,imx94-sysctr-timer
+ - const: nxp,imx95-sysctr-timer
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/timer/renesas,ostm.yaml b/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
index 9ba858f094ab..0983c1efec80 100644
--- a/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
@@ -26,6 +26,7 @@ properties:
- renesas,r9a07g043-ostm # RZ/G2UL and RZ/Five
- renesas,r9a07g044-ostm # RZ/G2{L,LC}
- renesas,r9a07g054-ostm # RZ/V2L
+ - renesas,r9a09g056-ostm # RZ/V2N
- renesas,r9a09g057-ostm # RZ/V2H(P)
- const: renesas,ostm # Generic
@@ -54,12 +55,11 @@ required:
if:
properties:
compatible:
- contains:
- enum:
- - renesas,r9a07g043-ostm
- - renesas,r9a07g044-ostm
- - renesas,r9a07g054-ostm
- - renesas,r9a09g057-ostm
+ not:
+ contains:
+ enum:
+ - renesas,r7s72100-ostm
+ - renesas,r7s9210-ostm
then:
required:
- resets
diff --git a/Documentation/devicetree/bindings/timer/renesas,tpu.yaml b/Documentation/devicetree/bindings/timer/renesas,tpu.yaml
deleted file mode 100644
index 7a473b302775..000000000000
--- a/Documentation/devicetree/bindings/timer/renesas,tpu.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/timer/renesas,tpu.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Renesas H8/300 Timer Pulse Unit
-
-maintainers:
- - Yoshinori Sato <ysato@users.sourceforge.jp>
-
-description:
- The TPU is a 16bit timer/counter with configurable clock inputs and
- programmable compare match.
- This implementation supports only cascade mode.
-
-select:
- properties:
- compatible:
- contains:
- const: renesas,tpu
- '#pwm-cells': false
- required:
- - compatible
-
-properties:
- compatible:
- const: renesas,tpu
-
- reg:
- items:
- - description: First channel
- - description: Second channel
-
- clocks:
- maxItems: 1
-
- clock-names:
- const: fck
-
-required:
- - compatible
- - reg
- - clocks
- - clock-names
-
-additionalProperties: false
-
-examples:
- - |
- tpu: tpu@ffffe0 {
- compatible = "renesas,tpu";
- reg = <0xffffe0 16>, <0xfffff0 12>;
- clocks = <&pclk>;
- clock-names = "fck";
- };
diff --git a/Documentation/devicetree/bindings/timer/sifive,clint.yaml b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
index 653e2e0ca878..d85a1a088b35 100644
--- a/Documentation/devicetree/bindings/timer/sifive,clint.yaml
+++ b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
@@ -30,6 +30,7 @@ properties:
- items:
- enum:
- canaan,k210-clint # Canaan Kendryte K210
+ - eswin,eic7700-clint # ESWIN EIC7700
- sifive,fu540-c000-clint # SiFive FU540
- spacemit,k1-clint # SpacemiT K1
- starfive,jh7100-clint # StarFive JH7100
diff --git a/Documentation/devicetree/bindings/timer/snps,arc-timer.txt b/Documentation/devicetree/bindings/timer/snps,arc-timer.txt
deleted file mode 100644
index b02ab0af10ce..000000000000
--- a/Documentation/devicetree/bindings/timer/snps,arc-timer.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Synopsys ARC Local Timer with Interrupt Capabilities
-- Found on all ARC CPUs (ARC700/ARCHS)
-- Can be optionally programmed to interrupt on Limit
-- Two identical copies TIMER0 and TIMER1 exist in ARC cores and historically
- TIMER0 used as clockevent provider (true for all ARC cores)
- TIMER1 used for clocksource (mandatory for ARC700, optional for ARC HS)
-
-Required properties:
-
-- compatible : should be "snps,arc-timer"
-- interrupts : single Interrupt going into parent intc
- (16 for ARCHS cores, 3 for ARC700 cores)
-- clocks : phandle to the source clock
-
-Example:
-
- timer0 {
- compatible = "snps,arc-timer";
- interrupts = <3>;
- interrupt-parent = <&core_intc>;
- clocks = <&core_clk>;
- };
-
- timer1 {
- compatible = "snps,arc-timer";
- clocks = <&core_clk>;
- };
diff --git a/Documentation/devicetree/bindings/timer/snps,arc-timer.yaml b/Documentation/devicetree/bindings/timer/snps,arc-timer.yaml
new file mode 100644
index 000000000000..0d1e37db6f8e
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/snps,arc-timer.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/snps,arc-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys ARC Local Timer
+
+maintainers:
+ - Vineet Gupta <vgupta@synopsys.com>
+
+description: >
+ Synopsys ARC Local Timer with Interrupt Capabilities
+
+ - Found on all ARC CPUs (ARC700/ARCHS)
+ - Can be optionally programmed to interrupt on Limit
+ - Two identical copies TIMER0 and TIMER1 exist in ARC cores and historically
+ TIMER0 used as clockevent provider (true for all ARC cores)
+ TIMER1 used for clocksource (mandatory for ARC700, optional for ARC HS)
+
+properties:
+ compatible:
+ const: snps,arc-timer
+
+ interrupts:
+ maxItems: 1
+ description: A single timer interrupt going into the parent interrupt controller.
+ Use <16> for ARCHS cores, <3> for ARC700 cores.
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ timer0 {
+ compatible = "snps,arc-timer";
+ interrupts = <3>;
+ clocks = <&core_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/snps,archs-gfrc.txt b/Documentation/devicetree/bindings/timer/snps,archs-gfrc.txt
deleted file mode 100644
index b6cd1b3922de..000000000000
--- a/Documentation/devicetree/bindings/timer/snps,archs-gfrc.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-Synopsys ARC Free Running 64-bit Global Timer for ARC HS CPUs
-- clocksource provider for SMP SoC
-
-Required properties:
-
-- compatible : should be "snps,archs-gfrc"
-- clocks : phandle to the source clock
-
-Example:
-
- gfrc {
- compatible = "snps,archs-gfrc";
- clocks = <&core_clk>;
- };
diff --git a/Documentation/devicetree/bindings/timer/snps,archs-gfrc.yaml b/Documentation/devicetree/bindings/timer/snps,archs-gfrc.yaml
new file mode 100644
index 000000000000..fb16f4aba1c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/snps,archs-gfrc.yaml
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/snps,archs-gfrc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys ARC Free Running 64-bit Global Timer for ARC HS CPUs
+
+maintainers:
+ - Vineet Gupta <vgupta@synopsys.com>
+
+properties:
+ compatible:
+ const: snps,archs-gfrc
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ timer {
+ compatible = "snps,archs-gfrc";
+ clocks = <&core_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/snps,archs-rtc.txt b/Documentation/devicetree/bindings/timer/snps,archs-rtc.txt
deleted file mode 100644
index 47bd7a702f3f..000000000000
--- a/Documentation/devicetree/bindings/timer/snps,archs-rtc.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-Synopsys ARC Free Running 64-bit Local Timer for ARC HS CPUs
-- clocksource provider for UP SoC
-
-Required properties:
-
-- compatible : should be "snps,archs-rtc"
-- clocks : phandle to the source clock
-
-Example:
-
- rtc {
- compatible = "snps,arc-rtc";
- clocks = <&core_clk>;
- };
diff --git a/Documentation/devicetree/bindings/timer/snps,archs-rtc.yaml b/Documentation/devicetree/bindings/timer/snps,archs-rtc.yaml
new file mode 100644
index 000000000000..7478810eb24a
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/snps,archs-rtc.yaml
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/snps,archs-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys ARC Free Running 64-bit Local Timer for ARC HS CPUs
+
+maintainers:
+ - Vineet Gupta <vgupta@synopsys.com>
+
+properties:
+ compatible:
+ const: snps,archs-rtc
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ rtc {
+ compatible = "snps,archs-rtc";
+ clocks = <&core_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.txt b/Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.txt
deleted file mode 100644
index ac44c4b67530..000000000000
--- a/Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Milbeaut SoCs Timer Controller
-
-Required properties:
-
-- compatible : should be "socionext,milbeaut-timer".
-- reg : Specifies base physical address and size of the registers.
-- interrupts : The interrupt of the first timer.
-- clocks: phandle to the input clk.
-
-Example:
-
-timer {
- compatible = "socionext,milbeaut-timer";
- reg = <0x1e000050 0x20>
- interrupts = <0 91 4>;
- clocks = <&clk 4>;
-};
diff --git a/Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.yaml b/Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.yaml
new file mode 100644
index 000000000000..9ab72b762314
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/socionext,milbeaut-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Milbeaut SoCs Timer Controller
+
+maintainers:
+ - Sugaya Taichi <sugaya.taichi@socionext.com>
+
+properties:
+ compatible:
+ const: socionext,milbeaut-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@1e000050 {
+ compatible = "socionext,milbeaut-timer";
+ reg = <0x1e000050 0x20>;
+ interrupts = <0 91 4>;
+ clocks = <&clk 4>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/st,spear-timer.txt b/Documentation/devicetree/bindings/timer/st,spear-timer.txt
deleted file mode 100644
index b5238a07da17..000000000000
--- a/Documentation/devicetree/bindings/timer/st,spear-timer.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-* SPEAr ARM Timer
-
-** Timer node required properties:
-
-- compatible : Should be:
- "st,spear-timer"
-- reg: Address range of the timer registers
-- interrupt: Should contain the timer interrupt number
-
-Example:
-
- timer@f0000000 {
- compatible = "st,spear-timer";
- reg = <0xf0000000 0x400>;
- interrupts = <2>;
- };
diff --git a/Documentation/devicetree/bindings/timer/st,spear-timer.yaml b/Documentation/devicetree/bindings/timer/st,spear-timer.yaml
new file mode 100644
index 000000000000..9f26b5f2b38a
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/st,spear-timer.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/st,spear-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SPEAr ARM Timer
+
+maintainers:
+ - Viresh Kumar <vireshk@kernel.org>
+ - Shiraz Hashim <shiraz.linux.kernel@gmail.com>
+
+properties:
+ compatible:
+ const: st,spear-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@f0000000 {
+ compatible = "st,spear-timer";
+ reg = <0xf0000000 0x400>;
+ interrupts = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/thead,c900-aclint-mtimer.yaml b/Documentation/devicetree/bindings/timer/thead,c900-aclint-mtimer.yaml
index 2e92bcdeb423..4ed30efe4052 100644
--- a/Documentation/devicetree/bindings/timer/thead,c900-aclint-mtimer.yaml
+++ b/Documentation/devicetree/bindings/timer/thead,c900-aclint-mtimer.yaml
@@ -14,6 +14,7 @@ properties:
items:
- enum:
- sophgo,sg2042-aclint-mtimer
+ - sophgo,sg2044-aclint-mtimer
- const: thead,c900-aclint-mtimer
reg:
diff --git a/Documentation/devicetree/bindings/timer/ti,keystone-timer.txt b/Documentation/devicetree/bindings/timer/ti,keystone-timer.txt
deleted file mode 100644
index d3905a5412b8..000000000000
--- a/Documentation/devicetree/bindings/timer/ti,keystone-timer.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-* Device tree bindings for Texas instruments Keystone timer
-
-This document provides bindings for the 64-bit timer in the KeyStone
-architecture devices. The timer can be configured as a general-purpose 64-bit
-timer, dual general-purpose 32-bit timers. When configured as dual 32-bit
-timers, each half can operate in conjunction (chain mode) or independently
-(unchained mode) of each other.
-
-It is global timer is a free running up-counter and can generate interrupt
-when the counter reaches preset counter values.
-
-Documentation:
-https://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf
-
-Required properties:
-
-- compatible : should be "ti,keystone-timer".
-- reg : specifies base physical address and count of the registers.
-- interrupts : interrupt generated by the timer.
-- clocks : the clock feeding the timer clock.
-
-Example:
-
-timer@22f0000 {
- compatible = "ti,keystone-timer";
- reg = <0x022f0000 0x80>;
- interrupts = <GIC_SPI 110 IRQ_TYPE_EDGE_RISING>;
- clocks = <&clktimer15>;
-};
diff --git a/Documentation/devicetree/bindings/timer/ti,keystone-timer.yaml b/Documentation/devicetree/bindings/timer/ti,keystone-timer.yaml
new file mode 100644
index 000000000000..1caf5ce64f01
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/ti,keystone-timer.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ti,keystone-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI Keystone timer
+
+maintainers:
+ - Alexander A. Klimov <grandmaster@al2klimov.de>
+ - Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
+
+description: >
+ A 64-bit timer in the KeyStone architecture devices. The timer can be
+ configured as a general-purpose 64-bit timer, dual general-purpose 32-bit
+ timers. When configured as dual 32-bit timers, each half can operate in
+ conjunction (chain mode) or independently (unchained mode) of each other.
+
+ It is global timer is a free running up-counter and can generate interrupt
+ when the counter reaches preset counter values.
+
+ Documentation:
+ https://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf
+
+properties:
+ compatible:
+ const: ti,keystone-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ items:
+ - const: irq
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: timer
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ timer@22f0000 {
+ compatible = "ti,keystone-timer";
+ reg = <0x022f0000 0x80>;
+ interrupts = <110 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clktimer15>;
+ };
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 8da408107e55..8dc81b1ca48e 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -173,6 +173,8 @@ properties:
- maxim,ds3502
# Temperature Sensor, I2C interface
- maxim,max1619
+ # Digital temperature sensor with 0.1°C accuracy
+ - maxim,max30208
# 3-Channel Remote Temperature Sensor
- maxim,max31730
# 10-bit 10 kOhm linear programmable voltage divider
@@ -293,8 +295,6 @@ properties:
- mps,mp5990
# Monolithic Power Systems Inc. digital step-down converter mp9941
- mps,mp9941
- # Monolithic Power Systems Inc. synchronous step-down converter mpq8785
- - mps,mpq8785
# Temperature sensor with integrated fan control
- national,lm63
# Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor
@@ -343,6 +343,8 @@ properties:
- sensortek,stk8ba50
# SGX Sensortech VZ89X Sensors
- sgx,vz89x
+ # SGX Sensortech VZ89TE Sensors
+ - sgx,vz89te
# Silicon Labs EM3581 Zigbee SoC with SPI interface
- silabs,em3581
# Silicon Labs SI3210 Programmable CMOS SLIC/CODEC with SPI interface
diff --git a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
index a03fff5df5ef..6c6043d9809e 100644
--- a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
@@ -43,6 +43,7 @@ properties:
- qcom,sm8450-ufshc
- qcom,sm8550-ufshc
- qcom,sm8650-ufshc
+ - qcom,sm8750-ufshc
- const: qcom,ufshc
- const: jedec,ufs-2.0
@@ -158,6 +159,7 @@ allOf:
- qcom,sm8450-ufshc
- qcom,sm8550-ufshc
- qcom,sm8650-ufshc
+ - qcom,sm8750-ufshc
then:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/usb/cypress,hx3.yaml b/Documentation/devicetree/bindings/usb/cypress,hx3.yaml
index 1033b7a4b8f9..d6eac1213228 100644
--- a/Documentation/devicetree/bindings/usb/cypress,hx3.yaml
+++ b/Documentation/devicetree/bindings/usb/cypress,hx3.yaml
@@ -14,9 +14,22 @@ allOf:
properties:
compatible:
- enum:
- - usb4b4,6504
- - usb4b4,6506
+ oneOf:
+ - enum:
+ - usb4b4,6504
+ - usb4b4,6506
+ - items:
+ - enum:
+ - usb4b4,6500
+ - usb4b4,6508
+ - const: usb4b4,6504
+ - items:
+ - enum:
+ - usb4b4,6502
+ - usb4b4,6503
+ - usb4b4,6507
+ - usb4b4,650a
+ - const: usb4b4,6506
reg: true
diff --git a/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
index b5843f4d17d8..36f5c644d959 100644
--- a/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx SuperSpeed DWC3 USB SoC controller
maintainers:
- - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
@@ -27,6 +26,8 @@ properties:
ranges: true
+ dma-coherent: true
+
power-domains:
description: specifies a phandle to PM domain provider node
maxItems: 1
diff --git a/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
index e2a72deae776..c68c04da3399 100644
--- a/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
+++ b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
@@ -17,7 +17,6 @@ description:
maintainers:
- Michal Simek <michal.simek@amd.com>
- - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
diff --git a/Documentation/devicetree/bindings/usb/xlnx,usb2.yaml b/Documentation/devicetree/bindings/usb/xlnx,usb2.yaml
index a7f75fe36665..f295aa9d9ee7 100644
--- a/Documentation/devicetree/bindings/usb/xlnx,usb2.yaml
+++ b/Documentation/devicetree/bindings/usb/xlnx,usb2.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx udc controller
maintainers:
- - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 86f6a19b28ae..c01adbaacbbb 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -129,6 +129,8 @@ patternProperties:
description: Andes Technology Corporation
"^anvo,.*":
description: Anvo-Systems Dresden GmbH
+ "^aoly,.*":
+ description: Shenzhen Aoly Technology Co., Ltd.
"^aosong,.*":
description: Guangzhou Aosong Electronic Co., Ltd.
"^apm,.*":
@@ -432,6 +434,8 @@ patternProperties:
description: EBV Elektronik
"^eckelmann,.*":
description: Eckelmann AG
+ "^econet,.*":
+ description: EcoNet (HK) Limited
"^edgeble,.*":
description: Edgeble AI Technologies Pvt. Ltd.
"^edimax,.*":
@@ -864,6 +868,8 @@ patternProperties:
description: Linux-specific binding
"^linx,.*":
description: Linx Technologies
+ "^liontron,.*":
+ description: Shenzhen Liontron Technology Co., Ltd
"^liteon,.*":
description: LITE-ON Technology Corp.
"^litex,.*":
@@ -1158,6 +1164,8 @@ patternProperties:
description: Parallax Inc.
"^pda,.*":
description: Precision Design Associates, Inc.
+ "^pegatron,.*":
+ description: Pegatron Corporation
"^pericom,.*":
description: Pericom Technology Inc.
"^pervasive,.*":
@@ -1262,6 +1270,8 @@ patternProperties:
description: Renesas Electronics Corporation
"^rervision,.*":
description: Shenzhen Rervision Technology Co., Ltd.
+ "^retronix,.*":
+ description: Retronix Technology Inc.
"^revotics,.*":
description: Revolution Robotics, Inc. (Revotics)
"^rex,.*":
@@ -1494,6 +1504,8 @@ patternProperties:
description: Toby Churchill Ltd.
"^tcs,.*":
description: Shenzhen City Tang Cheng Technology Co., Ltd.
+ "^tcu,.*":
+ description: TC Unterhaltungselektronik AG
"^tdo,.*":
description: Shangai Top Display Optoelectronics Co., Ltd
"^team-source-display,.*":
@@ -1607,6 +1619,8 @@ patternProperties:
description: Universal Scientific Industrial Co., Ltd.
"^usr,.*":
description: U.S. Robotics Corporation
+ "^ultratronik,.*":
+ description: Ultratronik GmbH
"^utoo,.*":
description: Aigo Digital Technology Co., Ltd.
"^v3,.*":
@@ -1749,6 +1763,8 @@ patternProperties:
description: Y Soft Corporation a.s.
"^yuridenki,.*":
description: Yuridenki-Shokai Co. Ltd.
+ "^yuzukihd,.*":
+ description: YuzukiHD Open Source Hardware
"^zarlink,.*":
description: Zarlink Semiconductor
"^zealz,.*":
diff --git a/Documentation/devicetree/bindings/virtio/pci-iommu.yaml b/Documentation/devicetree/bindings/virtio/pci-iommu.yaml
index 972a785a42de..8bd6ad72ac7a 100644
--- a/Documentation/devicetree/bindings/virtio/pci-iommu.yaml
+++ b/Documentation/devicetree/bindings/virtio/pci-iommu.yaml
@@ -20,6 +20,9 @@ description: |
virtio-iommu node doesn't have an "iommus" property, and is omitted from
the iommu-map property of the root complex.
+allOf:
+ - $ref: /schemas/pci/pci-device.yaml#
+
properties:
# If compatible is present, it should contain the vendor and device ID
# according to the PCI Bus Binding specification. Since PCI provides
@@ -33,12 +36,7 @@ properties:
- const: pci1af4,1057
reg:
- description: |
- PCI address of the IOMMU. As defined in the PCI Bus Binding
- reference, the reg property is a five-cell address encoded as (phys.hi
- phys.mid phys.lo size.hi size.lo). phys.hi should contain the device's
- BDF as 0b00000000 bbbbbbbb dddddfff 00000000. The other cells should be
- zero. See Documentation/devicetree/bindings/pci/pci.txt
+ maxItems: 1
'#iommu-cells':
const: 1
diff --git a/Documentation/devicetree/bindings/watchdog/fsl,scu-wdt.yaml b/Documentation/devicetree/bindings/watchdog/fsl,scu-wdt.yaml
index 8b7aa922249b..1d9f15ec6657 100644
--- a/Documentation/devicetree/bindings/watchdog/fsl,scu-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/fsl,scu-wdt.yaml
@@ -20,6 +20,7 @@ properties:
items:
- enum:
- fsl,imx8dxl-sc-wdt
+ - fsl,imx8qm-sc-wdt
- fsl,imx8qxp-sc-wdt
- const: fsl,imx-sc-wdt
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.yaml b/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.yaml
index 0da953cb7127..8a6c3a75a547 100644
--- a/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.yaml
@@ -35,6 +35,7 @@ properties:
- fsl,imx8mp-wdt
- fsl,imx8mq-wdt
- fsl,ls1012a-wdt
+ - fsl,ls1021a-wdt
- fsl,ls1043a-wdt
- fsl,vf610-wdt
- const: fsl,imx21-wdt
@@ -102,6 +103,7 @@ allOf:
contains:
enum:
- fsl,ls1012a-wdt
+ - fsl,ls1021a-wdt
- fsl,ls1043a-wdt
then:
properties:
diff --git a/Documentation/devicetree/bindings/watchdog/nxp,s32g2-swt.yaml b/Documentation/devicetree/bindings/watchdog/nxp,s32g2-swt.yaml
new file mode 100644
index 000000000000..8f168a05b50c
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/nxp,s32g2-swt.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/nxp,s32g2-swt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP Software Watchdog Timer (SWT)
+
+maintainers:
+ - Daniel Lezcano <daniel.lezcano@kernel.org>
+
+allOf:
+ - $ref: watchdog.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: nxp,s32g2-swt
+ - items:
+ - const: nxp,s32g3-swt
+ - const: nxp,s32g2-swt
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Counter clock
+ - description: Module clock
+ - description: Register clock
+
+ clock-names:
+ items:
+ - const: counter
+ - const: module
+ - const: register
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ watchdog@40100000 {
+ compatible = "nxp,s32g2-swt";
+ reg = <0x40100000 0x1000>;
+ clocks = <&clks 0x3a>, <&clks 0x3b>, <&clks 0x3c>;
+ clock-names = "counter", "module", "register";
+ timeout-sec = <10>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
index 3e0a8747a357..78874b90c88c 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
@@ -76,7 +76,9 @@ properties:
- const: renesas,rcar-gen4-wdt # R-Car Gen4
- items:
- - const: renesas,r9a09g047-wdt # RZ/G3E
+ - enum:
+ - renesas,r9a09g047-wdt # RZ/G3E
+ - renesas,r9a09g056-wdt # RZ/V2N
- const: renesas,r9a09g057-wdt # RZ/V2H(P)
- const: renesas,r9a09g057-wdt # RZ/V2H(P)
diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml
index d175ae968336..53fc64f5b56d 100644
--- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml
@@ -25,6 +25,7 @@ properties:
- samsung,exynos5420-wdt # for Exynos5420
- samsung,exynos7-wdt # for Exynos7
- samsung,exynos850-wdt # for Exynos850
+ - samsung,exynos990-wdt # for Exynos990
- samsung,exynosautov9-wdt # for Exynosautov9
- samsung,exynosautov920-wdt # for Exynosautov920
- items:
@@ -49,14 +50,14 @@ properties:
samsung,cluster-index:
$ref: /schemas/types.yaml#/definitions/uint32
description:
- Index of CPU cluster on which watchdog is running (in case of Exynos850
- or Google gs101).
+ Index of CPU cluster on which watchdog is running (in case of Exynos850,
+ Exynos990 or Google gs101).
samsung,syscon-phandle:
$ref: /schemas/types.yaml#/definitions/phandle
description:
Phandle to the PMU system controller node (in case of Exynos5250,
- Exynos5420, Exynos7, Exynos850 and gs101).
+ Exynos5420, Exynos7, Exynos850, Exynos990 and gs101).
required:
- compatible
@@ -77,6 +78,7 @@ allOf:
- samsung,exynos5420-wdt
- samsung,exynos7-wdt
- samsung,exynos850-wdt
+ - samsung,exynos990-wdt
- samsung,exynosautov9-wdt
- samsung,exynosautov920-wdt
then:
@@ -89,6 +91,7 @@ allOf:
enum:
- google,gs101-wdt
- samsung,exynos850-wdt
+ - samsung,exynos990-wdt
- samsung,exynosautov9-wdt
- samsung,exynosautov920-wdt
then:
@@ -102,7 +105,7 @@ allOf:
- const: watchdog
- const: watchdog_src
samsung,cluster-index:
- enum: [0, 1]
+ enum: [0, 1, 2]
required:
- samsung,cluster-index
else:
diff --git a/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml b/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml
index 1efefd741c06..ef088e0f6917 100644
--- a/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml
@@ -28,6 +28,7 @@ properties:
- rockchip,rk3328-wdt
- rockchip,rk3368-wdt
- rockchip,rk3399-wdt
+ - rockchip,rk3562-wdt
- rockchip,rk3568-wdt
- rockchip,rk3576-wdt
- rockchip,rk3588-wdt
diff --git a/Documentation/devicetree/bindings/writing-schema.rst b/Documentation/devicetree/bindings/writing-schema.rst
index eb8ced400c7e..fc73072f12fc 100644
--- a/Documentation/devicetree/bindings/writing-schema.rst
+++ b/Documentation/devicetree/bindings/writing-schema.rst
@@ -117,9 +117,14 @@ additionalProperties / unevaluatedProperties
should be allowed.
* additionalProperties: true
- Rare case, used for schemas implementing common set of properties. Such
- schemas are supposed to be referenced by other schemas, which then use
- 'unevaluatedProperties: false'. Typically bus or common-part schemas.
+ - Top-level part:
+ Rare case, used for schemas implementing common set of properties. Such
+ schemas are supposed to be referenced by other schemas, which then use
+ 'unevaluatedProperties: false'. Typically bus or common-part schemas.
+ - Nested node:
+ When listing only the expected compatible of the nested node and there
+ is an another schema matching that compatible which ends with one of
+ two above cases ('false').
examples
Optional. A list of one or more DTS hunks implementing this binding only.
diff --git a/Documentation/devicetree/overlay-notes.rst b/Documentation/devicetree/overlay-notes.rst
index e139f22b363e..35e79242af9a 100644
--- a/Documentation/devicetree/overlay-notes.rst
+++ b/Documentation/devicetree/overlay-notes.rst
@@ -38,10 +38,10 @@ Lets take an example where we have a foo board with the following base tree::
};
---- foo.dts ---------------------------------------------------------------
-The overlay bar.dts,
+The overlay bar.dtso,
::
- ---- bar.dts - overlay target location by label ----------------------------
+ ---- bar.dtso - overlay target location by label ---------------------------
/dts-v1/;
/plugin/;
&ocp {
@@ -51,7 +51,7 @@ The overlay bar.dts,
... /* various properties and child nodes */
};
};
- ---- bar.dts ---------------------------------------------------------------
+ ---- bar.dtso --------------------------------------------------------------
when loaded (and resolved as described in [1]) should result in foo+bar.dts::
@@ -88,9 +88,9 @@ in the base DT. In this case, the target path can be provided. The target
location by label syntax is preferred because the overlay can be applied to
any base DT containing the label, no matter where the label occurs in the DT.
-The above bar.dts example modified to use target path syntax is::
+The above bar.dtso example modified to use target path syntax is::
- ---- bar.dts - overlay target location by explicit path --------------------
+ ---- bar.dtso - overlay target location by explicit path -------------------
/dts-v1/;
/plugin/;
&{/ocp} {
@@ -100,7 +100,7 @@ The above bar.dts example modified to use target path syntax is::
... /* various properties and child nodes */
}
};
- ---- bar.dts ---------------------------------------------------------------
+ ---- bar.dtso --------------------------------------------------------------
Overlay in-kernel API
diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst
index 8081ebfe48bc..5a91df105141 100644
--- a/Documentation/doc-guide/sphinx.rst
+++ b/Documentation/doc-guide/sphinx.rst
@@ -28,7 +28,7 @@ Sphinx Install
==============
The ReST markups currently used by the Documentation/ files are meant to be
-built with ``Sphinx`` version 2.4.4 or higher.
+built with ``Sphinx`` version 3.4.3 or higher.
There's a script that checks for the Sphinx requirements. Please see
:ref:`sphinx-pre-install` for further details.
@@ -42,12 +42,6 @@ with your distributions. In order to do so, it is recommended to install
Sphinx inside a virtual environment, using ``virtualenv-3``
or ``virtualenv``, depending on how your distribution packaged Python 3.
-.. note::
-
- #) It is recommended to use the RTD theme for html output. Depending
- on the Sphinx version, it should be installed separately,
- with ``pip install sphinx_rtd_theme``.
-
In summary, if you want to install the latest version of Sphinx, you
should do::
@@ -162,6 +156,12 @@ By default, the "Alabaster" theme is used to build the HTML documentation;
this theme is bundled with Sphinx and need not be installed separately.
The Sphinx theme can be overridden by using the ``DOCS_THEME`` make variable.
+.. note::
+
+ Some people might prefer to use the RTD theme for html output.
+ Depending on the Sphinx version, it should be installed separately,
+ with ``pip install sphinx_rtd_theme``.
+
There is another make variable ``SPHINXDIRS``, which is useful when test
building a subset of documentation. For example, you can build documents
under ``Documentation/doc-guide`` by running
diff --git a/Documentation/driver-api/basics.rst b/Documentation/driver-api/basics.rst
index d78b7c328ff7..5e9f7aee71a7 100644
--- a/Documentation/driver-api/basics.rst
+++ b/Documentation/driver-api/basics.rst
@@ -108,6 +108,9 @@ Kernel objects manipulation
.. kernel-doc:: lib/kobject.c
:export:
+.. kernel-doc:: lib/kobject_uevent.c
+ :export:
+
Kernel utility functions
------------------------
diff --git a/Documentation/driver-api/coco/index.rst b/Documentation/driver-api/coco/index.rst
new file mode 100644
index 000000000000..af9f08ca0cfd
--- /dev/null
+++ b/Documentation/driver-api/coco/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
+Confidential Computing
+======================
+
+.. toctree::
+ :maxdepth: 1
+
+ measurement-registers
+
+.. only:: subproject and html
diff --git a/Documentation/driver-api/coco/measurement-registers.rst b/Documentation/driver-api/coco/measurement-registers.rst
new file mode 100644
index 000000000000..962a44efa2c0
--- /dev/null
+++ b/Documentation/driver-api/coco/measurement-registers.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+=====================
+Measurement Registers
+=====================
+
+.. kernel-doc:: include/linux/tsm-mr.h
+ :internal:
+
+.. kernel-doc:: drivers/virt/coco/guest/tsm-mr.c
+ :export:
diff --git a/Documentation/driver-api/cxl/access-coordinates.rst b/Documentation/driver-api/cxl/access-coordinates.rst
deleted file mode 100644
index b07950ea30c9..000000000000
--- a/Documentation/driver-api/cxl/access-coordinates.rst
+++ /dev/null
@@ -1,91 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-.. include:: <isonum.txt>
-
-==================================
-CXL Access Coordinates Computation
-==================================
-
-Shared Upstream Link Calculation
-================================
-For certain CXL region construction with endpoints behind CXL switches (SW) or
-Root Ports (RP), there is the possibility of the total bandwidth for all
-the endpoints behind a switch being more than the switch upstream link.
-A similar situation can occur within the host, upstream of the root ports.
-The CXL driver performs an additional pass after all the targets have
-arrived for a region in order to recalculate the bandwidths with possible
-upstream link being a limiting factor in mind.
-
-The algorithm assumes the configuration is a symmetric topology as that
-maximizes performance. When asymmetric topology is detected, the calculation
-is aborted. An asymmetric topology is detected during topology walk where the
-number of RPs detected as a grandparent is not equal to the number of devices
-iterated in the same iteration loop. The assumption is made that subtle
-asymmetry in properties does not happen and all paths to EPs are equal.
-
-There can be multiple switches under an RP. There can be multiple RPs under
-a CXL Host Bridge (HB). There can be multiple HBs under a CXL Fixed Memory
-Window Structure (CFMWS).
-
-An example hierarchy:
-
-> CFMWS 0
-> |
-> _________|_________
-> | |
-> ACPI0017-0 ACPI0017-1
-> GP0/HB0/ACPI0016-0 GP1/HB1/ACPI0016-1
-> | | | |
-> RP0 RP1 RP2 RP3
-> | | | |
-> SW 0 SW 1 SW 2 SW 3
-> | | | | | | | |
-> EP0 EP1 EP2 EP3 EP4 EP5 EP6 EP7
-
-Computation for the example hierarchy:
-
-Min (GP0 to CPU BW,
- Min(SW 0 Upstream Link to RP0 BW,
- Min(SW0SSLBIS for SW0DSP0 (EP0), EP0 DSLBIS, EP0 Upstream Link) +
- Min(SW0SSLBIS for SW0DSP1 (EP1), EP1 DSLBIS, EP1 Upstream link)) +
- Min(SW 1 Upstream Link to RP1 BW,
- Min(SW1SSLBIS for SW1DSP0 (EP2), EP2 DSLBIS, EP2 Upstream Link) +
- Min(SW1SSLBIS for SW1DSP1 (EP3), EP3 DSLBIS, EP3 Upstream link))) +
-Min (GP1 to CPU BW,
- Min(SW 2 Upstream Link to RP2 BW,
- Min(SW2SSLBIS for SW2DSP0 (EP4), EP4 DSLBIS, EP4 Upstream Link) +
- Min(SW2SSLBIS for SW2DSP1 (EP5), EP5 DSLBIS, EP5 Upstream link)) +
- Min(SW 3 Upstream Link to RP3 BW,
- Min(SW3SSLBIS for SW3DSP0 (EP6), EP6 DSLBIS, EP6 Upstream Link) +
- Min(SW3SSLBIS for SW3DSP1 (EP7), EP7 DSLBIS, EP7 Upstream link))))
-
-The calculation starts at cxl_region_shared_upstream_perf_update(). A xarray
-is created to collect all the endpoint bandwidths via the
-cxl_endpoint_gather_bandwidth() function. The min() of bandwidth from the
-endpoint CDAT and the upstream link bandwidth is calculated. If the endpoint
-has a CXL switch as a parent, then min() of calculated bandwidth and the
-bandwidth from the SSLBIS for the switch downstream port that is associated
-with the endpoint is calculated. The final bandwidth is stored in a
-'struct cxl_perf_ctx' in the xarray indexed by a device pointer. If the
-endpoint is direct attached to a root port (RP), the device pointer would be an
-RP device. If the endpoint is behind a switch, the device pointer would be the
-upstream device of the parent switch.
-
-At the next stage, the code walks through one or more switches if they exist
-in the topology. For endpoints directly attached to RPs, this step is skipped.
-If there is another switch upstream, the code takes the min() of the current
-gathered bandwidth and the upstream link bandwidth. If there's a switch
-upstream, then the SSLBIS of the upstream switch.
-
-Once the topology walk reaches the RP, whether it's direct attached endpoints
-or walking through the switch(es), cxl_rp_gather_bandwidth() is called. At
-this point all the bandwidths are aggregated per each host bridge, which is
-also the index for the resulting xarray.
-
-The next step is to take the min() of the per host bridge bandwidth and the
-bandwidth from the Generic Port (GP). The bandwidths for the GP is retrieved
-via ACPI tables SRAT/HMAT. The min bandwidth are aggregated under the same
-ACPI0017 device to form a new xarray.
-
-Finally, the cxl_region_update_bandwidth() is called and the aggregated
-bandwidth from all the members of the last xarray is updated for the
-access coordinates residing in the cxl region (cxlr) context.
diff --git a/Documentation/driver-api/cxl/allocation/dax.rst b/Documentation/driver-api/cxl/allocation/dax.rst
new file mode 100644
index 000000000000..c6f7a5da832f
--- /dev/null
+++ b/Documentation/driver-api/cxl/allocation/dax.rst
@@ -0,0 +1,60 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========
+DAX Devices
+===========
+CXL capacity exposed as a DAX device can be accessed directly via mmap.
+Users may wish to use this interface mechanism to write their own userland
+CXL allocator, or to managed shared or persistent memory regions across multiple
+hosts.
+
+If the capacity is shared across hosts or persistent, appropriate flushing
+mechanisms must be employed unless the region supports Snoop Back-Invalidate.
+
+Note that mappings must be aligned (size and base) to the dax device's base
+alignment, which is typically 2MB - but maybe be configured larger.
+
+::
+
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <stdint.h>
+ #include <sys/mman.h>
+ #include <fcntl.h>
+ #include <unistd.h>
+
+ #define DEVICE_PATH "/dev/dax0.0" // Replace DAX device path
+ #define DEVICE_SIZE (4ULL * 1024 * 1024 * 1024) // 4GB
+
+ int main() {
+ int fd;
+ void* mapped_addr;
+
+ /* Open the DAX device */
+ fd = open(DEVICE_PATH, O_RDWR);
+ if (fd < 0) {
+ perror("open");
+ return -1;
+ }
+
+ /* Map the device into memory */
+ mapped_addr = mmap(NULL, DEVICE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ if (mapped_addr == MAP_FAILED) {
+ perror("mmap");
+ close(fd);
+ return -1;
+ }
+
+ printf("Mapped address: %p\n", mapped_addr);
+
+ /* You can now access the device through the mapped address */
+ uint64_t* ptr = (uint64_t*)mapped_addr;
+ *ptr = 0x1234567890abcdef; // Write a value to the device
+ printf("Value at address %p: 0x%016llx\n", ptr, *ptr);
+
+ /* Clean up */
+ munmap(mapped_addr, DEVICE_SIZE);
+ close(fd);
+ return 0;
+ }
diff --git a/Documentation/driver-api/cxl/allocation/hugepages.rst b/Documentation/driver-api/cxl/allocation/hugepages.rst
new file mode 100644
index 000000000000..1023c6922829
--- /dev/null
+++ b/Documentation/driver-api/cxl/allocation/hugepages.rst
@@ -0,0 +1,32 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========
+Huge Pages
+==========
+
+Contiguous Memory Allocator
+===========================
+CXL Memory onlined as SystemRAM during early boot is eligible for use by CMA,
+as the NUMA node hosting that capacity will be `Online` at the time CMA
+carves out contiguous capacity.
+
+CXL Memory deferred to the CXL Driver for configuration cannot have its
+capacity allocated by CMA - as the NUMA node hosting the capacity is `Offline`
+at :code:`__init` time - when CMA carves out contiguous capacity.
+
+HugeTLB
+=======
+Different huge page sizes allow different memory configurations.
+
+2MB Huge Pages
+--------------
+All CXL capacity regardless of configuration time or memory zone is eligible
+for use as 2MB huge pages.
+
+1GB Huge Pages
+--------------
+CXL capacity onlined in :code:`ZONE_NORMAL` is eligible for 1GB Gigantic Page
+allocation.
+
+CXL capacity onlined in :code:`ZONE_MOVABLE` is not eligible for 1GB Gigantic
+Page allocation.
diff --git a/Documentation/driver-api/cxl/allocation/page-allocator.rst b/Documentation/driver-api/cxl/allocation/page-allocator.rst
new file mode 100644
index 000000000000..7b8fe1b8d5bb
--- /dev/null
+++ b/Documentation/driver-api/cxl/allocation/page-allocator.rst
@@ -0,0 +1,85 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================
+The Page Allocator
+==================
+
+The kernel page allocator services all general page allocation requests, such
+as :code:`kmalloc`. CXL configuration steps affect the behavior of the page
+allocator based on the selected `Memory Zone` and `NUMA node` the capacity is
+placed in.
+
+This section mostly focuses on how these configurations affect the page
+allocator (as of Linux v6.15) rather than the overall page allocator behavior.
+
+NUMA nodes and mempolicy
+========================
+Unless a task explicitly registers a mempolicy, the default memory policy
+of the linux kernel is to allocate memory from the `local NUMA node` first,
+and fall back to other nodes only if the local node is pressured.
+
+Generally, we expect to see local DRAM and CXL memory on separate NUMA nodes,
+with the CXL memory being non-local. Technically, however, it is possible
+for a compute node to have no local DRAM, and for CXL memory to be the
+`local` capacity for that compute node.
+
+
+Memory Zones
+============
+CXL capacity may be onlined in :code:`ZONE_NORMAL` or :code:`ZONE_MOVABLE`.
+
+As of v6.15, the page allocator attempts to allocate from the highest
+available and compatible ZONE for an allocation from the local node first.
+
+An example of a `zone incompatibility` is attempting to service an allocation
+marked :code:`GFP_KERNEL` from :code:`ZONE_MOVABLE`. Kernel allocations are
+typically not migratable, and as a result can only be serviced from
+:code:`ZONE_NORMAL` or lower.
+
+To simplify this, the page allocator will prefer :code:`ZONE_MOVABLE` over
+:code:`ZONE_NORMAL` by default, but if :code:`ZONE_MOVABLE` is depleted, it
+will fallback to allocate from :code:`ZONE_NORMAL`.
+
+
+Zone and Node Quirks
+====================
+Let's consider a configuration where the local DRAM capacity is largely onlined
+into :code:`ZONE_NORMAL`, with no :code:`ZONE_MOVABLE` capacity present. The
+CXL capacity has the opposite configuration - all onlined in
+:code:`ZONE_MOVABLE`.
+
+Under the default allocation policy, the page allocator will completely skip
+:code:`ZONE_MOVABLE` as a valid allocation target. This is because, as of
+Linux v6.15, the page allocator does (approximately) the following: ::
+
+ for (each zone in local_node):
+
+ for (each node in fallback_order):
+
+ attempt_allocation(gfp_flags);
+
+Because the local node does not have :code:`ZONE_MOVABLE`, the CXL node is
+functionally unreachable for direct allocation. As a result, the only way
+for CXL capacity to be used is via `demotion` in the reclaim path.
+
+This configuration also means that if the DRAM ndoe has :code:`ZONE_MOVABLE`
+capacity - when that capacity is depleted, the page allocator will actually
+prefer CXL :code:`ZONE_MOVABLE` pages over DRAM :code:`ZONE_NORMAL` pages.
+
+We may wish to invert this priority in future Linux versions.
+
+If `demotion` and `swap` are disabled, Linux will begin to cause OOM crashes
+when the DRAM nodes are depleted. See the reclaim section for more details.
+
+
+CGroups and CPUSets
+===================
+Finally, assuming CXL memory is reachable via the page allocation (i.e. onlined
+in :code:`ZONE_NORMAL`), the :code:`cpusets.mems_allowed` may be used by
+containers to limit the accessibility of certain NUMA nodes for tasks in that
+container. Users may wish to utilize this in multi-tenant systems where some
+tasks prefer not to use slower memory.
+
+In the reclaim section we'll discuss some limitations of this interface to
+prevent demotions of shared data to CXL memory (if demotions are enabled).
+
diff --git a/Documentation/driver-api/cxl/allocation/reclaim.rst b/Documentation/driver-api/cxl/allocation/reclaim.rst
new file mode 100644
index 000000000000..f40f1cae391a
--- /dev/null
+++ b/Documentation/driver-api/cxl/allocation/reclaim.rst
@@ -0,0 +1,51 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======
+Reclaim
+=======
+Another way CXL memory can be utilized *indirectly* is via the reclaim system
+in :code:`mm/vmscan.c`. Reclaim is engaged when memory capacity on the system
+becomes pressured based on global and cgroup-local `watermark` settings.
+
+In this section we won't discuss the `watermark` configurations, just how CXL
+memory can be consumed by various pieces of reclaim system.
+
+Demotion
+========
+By default, the reclaim system will prefer swap (or zswap) when reclaiming
+memory. Enabling :code:`kernel/mm/numa/demotion_enabled` will cause vmscan
+to opportunistically prefer distant NUMA nodes to swap or zswap, if capacity
+is available.
+
+Demotion engages the :code:`mm/memory_tier.c` component to determine the
+next demotion node. The next demotion node is based on the :code:`HMAT`
+or :code:`CDAT` performance data.
+
+cpusets.mems_allowed quirk
+--------------------------
+In Linux v6.15 and below, demotion does not respect :code:`cpusets.mems_allowed`
+when migrating pages. As a result, if demotion is enabled, vmscan cannot
+guarantee isolation of a container's memory from nodes not set in mems_allowed.
+
+In Linux v6.XX and up, demotion does attempt to respect
+:code:`cpusets.mems_allowed`; however, certain classes of shared memory
+originally instantiated by another cgroup (such as common libraries - e.g.
+libc) may still be demoted. As a result, the mems_allowed interface still
+cannot provide perfect isolation from the remote nodes.
+
+ZSwap and Node Preference
+=========================
+In Linux v6.15 and below, ZSwap allocates memory from the local node of the
+processor for the new pages being compressed. Since pages being compressed
+are typically cold, the result is a cold page becomes promoted - only to
+be later demoted as it ages off the LRU.
+
+In Linux v6.XX, ZSwap tries to prefer the node of the page being compressed
+as the allocation target for the compression page. This helps prevent
+thrashing.
+
+Demotion with ZSwap
+===================
+When enabling both Demotion and ZSwap, you create a situation where ZSwap
+will prefer the slowest form of CXL memory by default until that tier of
+memory is exhausted.
diff --git a/Documentation/driver-api/cxl/devices/device-types.rst b/Documentation/driver-api/cxl/devices/device-types.rst
new file mode 100644
index 000000000000..f5e4330c1cfe
--- /dev/null
+++ b/Documentation/driver-api/cxl/devices/device-types.rst
@@ -0,0 +1,165 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+Devices and Protocols
+=====================
+
+The type of CXL device (Memory, Accelerator, etc) dictates many configuration steps. This section
+covers some basic background on device types and on-device resources used by the platform and OS
+which impact configuration.
+
+Protocols
+=========
+
+There are three core protocols to CXL. For the purpose of this documentation,
+we will only discuss very high level definitions as the specific hardware
+details are largely abstracted away from Linux. See the CXL specification
+for more details.
+
+CXL.io
+------
+The basic interaction protocol, similar to PCIe configuration mechanisms.
+Typically used for initialization, configuration, and I/O access for anything
+other than memory (CXL.mem) or cache (CXL.cache) operations.
+
+The Linux CXL driver exposes access to .io functionalty via the various sysfs
+interfaces and /dev/cxl/ devices (which exposes direct access to device
+mailboxes).
+
+CXL.cache
+---------
+The mechanism by which a device may coherently access and cache host memory.
+
+Largely transparent to Linux once configured.
+
+CXL.mem
+---------
+The mechanism by which the CPU may coherently access and cache device memory.
+
+Largely transparent to Linux once configured.
+
+
+Device Types
+============
+
+Type-1
+------
+
+A Type-1 CXL device:
+
+* Supports cxl.io and cxl.cache protocols
+* Implements a fully coherent cache
+* Allows Device-to-Host coherence and Host-to-Device snoops.
+* Does NOT have host-managed device memory (HDM)
+
+Typical examples of type-1 devices is a Smart NIC - which may want to
+directly operate on host-memory (DMA) to store incoming packets. These
+devices largely rely on CPU-attached memory.
+
+Type-2
+------
+
+A Type-2 CXL Device:
+
+* Supports cxl.io, cxl.cache, and cxl.mem protocols
+* Optionally implements coherent cache and Host-Managed Device Memory
+* Is typically an accelerator device w/ high bandwidth memory.
+
+The primary difference between a type-1 and type-2 device is the presence
+of host-managed device memory, which allows the device to operate on a
+local memory bank - while the CPU sill has coherent DMA to the same memory.
+
+The allows things like GPUs to expose their memory via DAX devices or file
+descriptors, allows drivers and programs direct access to device memory
+rather than use block-transfer semantics.
+
+Type-3
+------
+
+A Type-3 CXL Device
+
+* Supports cxl.io and cxl.mem
+* Implements Host-Managed Device Memory
+* May provide either Volatile or Persistent memory capacity (or both).
+
+A basic example of a type-3 device is a simple memory expander, whose
+local memory capacity is exposed to the CPU for access directly via
+basic coherent DMA.
+
+Switch
+------
+
+A CXL switch is a device capacity of routing any CXL (and by extension, PCIe)
+protocol between an upstream, downstream, or peer devices. Many devices, such
+as Multi-Logical Devices, imply the presence of switching in some manner.
+
+Logical Devices and Heads
+-------------------------
+
+A CXL device may present one or more "Logical Devices" to one or more hosts
+(via physical "Heads").
+
+A Single-Logical Device (SLD) is a device which presents a single device to
+one or more heads.
+
+A Multi-Logical Device (MLD) is a device which may present multiple devices
+to one or more devices.
+
+A Single-Headed Device exposes only a single physical connection.
+
+A Multi-Headed Device exposes multiple physical connections.
+
+MHSLD
+~~~~~
+A Multi-Headed Single-Logical Device (MHSLD) exposes a single logical
+device to multiple heads which may be connected to one or more discrete
+hosts. An example of this would be a simple memory-pool which may be
+statically configured (prior to boot) to expose portions of its memory
+to Linux via :doc:`CEDT <../platform/acpi/cedt>`.
+
+MHMLD
+~~~~~
+A Multi-Headed Multi-Logical Device (MHMLD) exposes multiple logical
+devices to multiple heads which may be connected to one or more discrete
+hosts. An example of this would be a Dynamic Capacity Device or which
+may be configured at runtime to expose portions of its memory to Linux.
+
+Example Devices
+===============
+
+Memory Expander
+---------------
+The simplest form of Type-3 device is a memory expander. A memory expander
+exposes Host-Managed Device Memory (HDM) to Linux. This memory may be
+Volatile or Non-Volatile (Persistent).
+
+Memory Expanders will typically be considered a form of Single-Headed,
+Single-Logical Device - as its form factor will typically be an add-in-card
+(AIC) or some other similar form-factor.
+
+The Linux CXL driver provides support for static or dynamic configuration of
+basic memory expanders. The platform may program decoders prior to OS init
+(e.g. auto-decoders), or the user may program the fabric if the platform
+defers these operations to the OS.
+
+Multiple Memory Expanders may be added to an external chassis and exposed to
+a host via a head attached to a CXL switch. This is a "memory pool", and
+would be considered an MHSLD or MHMLD depending on the management capabilities
+provided by the switch platform.
+
+As of v6.14, Linux does not provide a formalized interface to manage non-DCD
+MHSLD or MHMLD devices.
+
+Dynamic Capacity Device (DCD)
+-----------------------------
+
+A Dynamic Capacity Device is a Type-3 device which provides dynamic management
+of memory capacity. The basic premise of a DCD to provide an allocator-like
+interface for physical memory capacity to a "Fabric Manager" (an external,
+privileged host with privileges to change configurations for other hosts).
+
+A DCD manages "Memory Extents", which may be volatile or persistent. Extents
+may also be exclusive to a single host or shared across multiple hosts.
+
+As of v6.14, Linux does not provide a formalized interface to manage DCD
+devices, however there is active work on LKML targeting future release.
diff --git a/Documentation/driver-api/cxl/index.rst b/Documentation/driver-api/cxl/index.rst
index 965ba90e8fb7..9e1414ad3357 100644
--- a/Documentation/driver-api/cxl/index.rst
+++ b/Documentation/driver-api/cxl/index.rst
@@ -4,12 +4,50 @@
Compute Express Link
====================
-.. toctree::
- :maxdepth: 1
+CXL device configuration has a complex handoff between platform (Hardware,
+BIOS, EFI), OS (early boot, core kernel, driver), and user policy decisions
+that have impacts on each other. The docs here break up configurations steps.
- memory-devices
- access-coordinates
+.. toctree::
+ :maxdepth: 2
+ :caption: Overview
+ theory-of-operation
maturity-map
+.. toctree::
+ :maxdepth: 2
+ :caption: Device Reference
+
+ devices/device-types
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Platform Configuration
+
+ platform/bios-and-efi
+ platform/acpi
+ platform/cdat
+ platform/example-configs
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Linux Kernel Configuration
+
+ linux/overview
+ linux/early-boot
+ linux/cxl-driver
+ linux/dax-driver
+ linux/memory-hotplug
+ linux/access-coordinates
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Memory Allocation
+
+ allocation/dax
+ allocation/page-allocator
+ allocation/reclaim
+ allocation/hugepages.rst
+
.. only:: subproject and html
diff --git a/Documentation/driver-api/cxl/linux/access-coordinates.rst b/Documentation/driver-api/cxl/linux/access-coordinates.rst
new file mode 100644
index 000000000000..341a7c682043
--- /dev/null
+++ b/Documentation/driver-api/cxl/linux/access-coordinates.rst
@@ -0,0 +1,178 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+==================================
+CXL Access Coordinates Computation
+==================================
+
+Latency and Bandwidth Calculation
+=================================
+A memory region performance coordinates (latency and bandwidth) are typically
+provided via ACPI tables :doc:`SRAT <../platform/acpi/srat>` and
+:doc:`HMAT <../platform/acpi/hmat>`. However, the platform firmware (BIOS) is
+not able to annotate those for CXL devices that are hot-plugged since they do
+not exist during platform firmware initialization. The CXL driver can compute
+the performance coordinates by retrieving data from several components.
+
+The :doc:`SRAT <../platform/acpi/srat>` provides a Generic Port Affinity
+subtable that ties a proximity domain to a device handle, which in this case
+would be the CXL hostbridge. Using this association, the performance
+coordinates for the Generic Port can be retrieved from the
+:doc:`HMAT <../platform/acpi/hmat>` subtable. This piece represents the
+performance coordinates between a CPU and a Generic Port (CXL hostbridge).
+
+The :doc:`CDAT <../platform/cdat>` provides the performance coordinates for
+the CXL device itself. That is the bandwidth and latency to access that device's
+memory region. The DSMAS subtable provides a DSMADHandle that is tied to a
+Device Physical Address (DPA) range. The DSLBIS subtable provides the
+performance coordinates that's tied to a DSMADhandle and this ties the two
+table entries together to provide the performance coordinates for each DPA
+region. For example, if a device exports a DRAM region and a PMEM region,
+then there would be different performance characteristsics for each of those
+regions.
+
+If there's a CXL switch in the topology, then the performance coordinates for the
+switch is provided by SSLBIS subtable. This provides the bandwidth and latency
+for traversing the switch between the switch upstream port and the switch
+downstream port that points to the endpoint device.
+
+Simple topology example::
+
+ GP0/HB0/ACPI0016-0
+ RP0
+ |
+ | L0
+ |
+ SW 0 / USP0
+ SW 0 / DSP0
+ |
+ | L1
+ |
+ EP0
+
+In this example, there is a CXL switch between an endpoint and a root port.
+Latency in this example is calculated as such:
+L(EP0) - Latency from EP0 CDAT DSMAS+DSLBIS
+L(L1) - Link latency between EP0 and SW0DSP0
+L(SW0) - Latency for the switch from SW0 CDAT SSLBIS.
+L(L0) - Link latency between SW0 and RP0
+L(RP0) - Latency from root port to CPU via SRAT and HMAT (Generic Port).
+Total read and write latencies are the sum of all these parts.
+
+Bandwidth in this example is calculated as such:
+B(EP0) - Bandwidth from EP0 CDAT DSMAS+DSLBIS
+B(L1) - Link bandwidth between EP0 and SW0DSP0
+B(SW0) - Bandwidth for the switch from SW0 CDAT SSLBIS.
+B(L0) - Link bandwidth between SW0 and RP0
+B(RP0) - Bandwidth from root port to CPU via SRAT and HMAT (Generic Port).
+The total read and write bandwidth is the min() of all these parts.
+
+To calculate the link bandwidth:
+LinkOperatingFrequency (GT/s) is the current negotiated link speed.
+DataRatePerLink (MB/s) = LinkOperatingFrequency / 8
+Bandwidth (MB/s) = PCIeCurrentLinkWidth * DataRatePerLink
+Where PCIeCurrentLinkWidth is the number of lanes in the link.
+
+To calculate the link latency:
+LinkLatency (picoseconds) = FlitSize / LinkBandwidth (MB/s)
+
+See `CXL Memory Device SW Guide r1.0 <https://www.intel.com/content/www/us/en/content-details/643805/cxl-memory-device-software-guide.html>`_,
+section 2.11.3 and 2.11.4 for details.
+
+In the end, the access coordinates for a constructed memory region is calculated from one
+or more memory partitions from each of the CXL device(s).
+
+Shared Upstream Link Calculation
+================================
+For certain CXL region construction with endpoints behind CXL switches (SW) or
+Root Ports (RP), there is the possibility of the total bandwidth for all
+the endpoints behind a switch being more than the switch upstream link.
+A similar situation can occur within the host, upstream of the root ports.
+The CXL driver performs an additional pass after all the targets have
+arrived for a region in order to recalculate the bandwidths with possible
+upstream link being a limiting factor in mind.
+
+The algorithm assumes the configuration is a symmetric topology as that
+maximizes performance. When asymmetric topology is detected, the calculation
+is aborted. An asymmetric topology is detected during topology walk where the
+number of RPs detected as a grandparent is not equal to the number of devices
+iterated in the same iteration loop. The assumption is made that subtle
+asymmetry in properties does not happen and all paths to EPs are equal.
+
+There can be multiple switches under an RP. There can be multiple RPs under
+a CXL Host Bridge (HB). There can be multiple HBs under a CXL Fixed Memory
+Window Structure (CFMWS) in the :doc:`CEDT <../platform/acpi/cedt>`.
+
+An example hierarchy::
+
+ CFMWS 0
+ |
+ _________|_________
+ | |
+ ACPI0017-0 ACPI0017-1
+ GP0/HB0/ACPI0016-0 GP1/HB1/ACPI0016-1
+ | | | |
+ RP0 RP1 RP2 RP3
+ | | | |
+ SW 0 SW 1 SW 2 SW 3
+ | | | | | | | |
+ EP0 EP1 EP2 EP3 EP4 EP5 EP6 EP7
+
+Computation for the example hierarchy:
+
+Min (GP0 to CPU BW,
+ Min(SW 0 Upstream Link to RP0 BW,
+ Min(SW0SSLBIS for SW0DSP0 (EP0), EP0 DSLBIS, EP0 Upstream Link) +
+ Min(SW0SSLBIS for SW0DSP1 (EP1), EP1 DSLBIS, EP1 Upstream link)) +
+ Min(SW 1 Upstream Link to RP1 BW,
+ Min(SW1SSLBIS for SW1DSP0 (EP2), EP2 DSLBIS, EP2 Upstream Link) +
+ Min(SW1SSLBIS for SW1DSP1 (EP3), EP3 DSLBIS, EP3 Upstream link))) +
+Min (GP1 to CPU BW,
+ Min(SW 2 Upstream Link to RP2 BW,
+ Min(SW2SSLBIS for SW2DSP0 (EP4), EP4 DSLBIS, EP4 Upstream Link) +
+ Min(SW2SSLBIS for SW2DSP1 (EP5), EP5 DSLBIS, EP5 Upstream link)) +
+ Min(SW 3 Upstream Link to RP3 BW,
+ Min(SW3SSLBIS for SW3DSP0 (EP6), EP6 DSLBIS, EP6 Upstream Link) +
+ Min(SW3SSLBIS for SW3DSP1 (EP7), EP7 DSLBIS, EP7 Upstream link))))
+
+The calculation starts at cxl_region_shared_upstream_perf_update(). A xarray
+is created to collect all the endpoint bandwidths via the
+cxl_endpoint_gather_bandwidth() function. The min() of bandwidth from the
+endpoint CDAT and the upstream link bandwidth is calculated. If the endpoint
+has a CXL switch as a parent, then min() of calculated bandwidth and the
+bandwidth from the SSLBIS for the switch downstream port that is associated
+with the endpoint is calculated. The final bandwidth is stored in a
+'struct cxl_perf_ctx' in the xarray indexed by a device pointer. If the
+endpoint is direct attached to a root port (RP), the device pointer would be an
+RP device. If the endpoint is behind a switch, the device pointer would be the
+upstream device of the parent switch.
+
+At the next stage, the code walks through one or more switches if they exist
+in the topology. For endpoints directly attached to RPs, this step is skipped.
+If there is another switch upstream, the code takes the min() of the current
+gathered bandwidth and the upstream link bandwidth. If there's a switch
+upstream, then the SSLBIS of the upstream switch.
+
+Once the topology walk reaches the RP, whether it's direct attached endpoints
+or walking through the switch(es), cxl_rp_gather_bandwidth() is called. At
+this point all the bandwidths are aggregated per each host bridge, which is
+also the index for the resulting xarray.
+
+The next step is to take the min() of the per host bridge bandwidth and the
+bandwidth from the Generic Port (GP). The bandwidths for the GP are retrieved
+via ACPI tables (:doc:`SRAT <../platform/acpi/srat>` and
+:doc:`HMAT <../platform/acpi/hmat>`). The minimum bandwidth are aggregated
+under the same ACPI0017 device to form a new xarray.
+
+Finally, the cxl_region_update_bandwidth() is called and the aggregated
+bandwidth from all the members of the last xarray is updated for the
+access coordinates residing in the cxl region (cxlr) context.
+
+QTG ID
+======
+Each :doc:`CEDT <../platform/acpi/cedt>` has a QTG ID field. This field provides
+the ID that associates with a QoS Throttling Group (QTG) for the CFMWS window.
+Once the access coordinates are calculated, an ACPI Device Specific Method can
+be issued to the ACPI0016 device to retrieve the QTG ID depends on the access
+coordinates provided. The QTG ID for the device can be used as guidance to match
+to the CFMWS to setup the best Linux root decoder for the device performance.
diff --git a/Documentation/driver-api/cxl/linux/cxl-driver.rst b/Documentation/driver-api/cxl/linux/cxl-driver.rst
new file mode 100644
index 000000000000..9759e90c3cf1
--- /dev/null
+++ b/Documentation/driver-api/cxl/linux/cxl-driver.rst
@@ -0,0 +1,630 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+CXL Driver Operation
+====================
+
+The devices described in this section are present in ::
+
+ /sys/bus/cxl/devices/
+ /dev/cxl/
+
+The :code:`cxl-cli` library, maintained as part of the NDTCL project, may
+be used to script interactions with these devices.
+
+Drivers
+=======
+The CXL driver is split into a number of drivers.
+
+* cxl_core - fundamental init interface and core object creation
+* cxl_port - initializes root and provides port enumeration interface.
+* cxl_acpi - initializes root decoders and interacts with ACPI data.
+* cxl_p/mem - initializes memory devices
+* cxl_pci - uses cxl_port to enumates the actual fabric hierarchy.
+
+Driver Devices
+==============
+Here is an example from a single-socket system with 4 host bridges. Two host
+bridges have a single memory device attached, and the devices are interleaved
+into a single memory region. The memory region has been converted to dax. ::
+
+ # ls /sys/bus/cxl/devices/
+ dax_region0 decoder3.0 decoder6.0 mem0 port3
+ decoder0.0 decoder4.0 decoder6.1 mem1 port4
+ decoder1.0 decoder5.0 endpoint5 port1 region0
+ decoder2.0 decoder5.1 endpoint6 port2 root0
+
+
+.. kernel-render:: DOT
+ :alt: Digraph of CXL fabric describing host-bridge interleaving
+ :caption: Diagraph of CXL fabric with a host-bridge interleave memory region
+
+ digraph foo {
+ "root0" -> "port1";
+ "root0" -> "port3";
+ "root0" -> "decoder0.0";
+ "port1" -> "endpoint5";
+ "port3" -> "endpoint6";
+ "port1" -> "decoder1.0";
+ "port3" -> "decoder3.0";
+ "endpoint5" -> "decoder5.0";
+ "endpoint6" -> "decoder6.0";
+ "decoder0.0" -> "region0";
+ "decoder0.0" -> "decoder1.0";
+ "decoder0.0" -> "decoder3.0";
+ "decoder1.0" -> "decoder5.0";
+ "decoder3.0" -> "decoder6.0";
+ "decoder5.0" -> "region0";
+ "decoder6.0" -> "region0";
+ "region0" -> "dax_region0";
+ "dax_region0" -> "dax0.0";
+ }
+
+For this section we'll explore the devices present in this configuration, but
+we'll explore more configurations in-depth in example configurations below.
+
+Base Devices
+------------
+Most devices in a CXL fabric are a `port` of some kind (because each
+device mostly routes request from one device to the next, rather than
+provide a direct service).
+
+Root
+~~~~
+The `CXL Root` is logical object created by the `cxl_acpi` driver during
+:code:`cxl_acpi_probe` - if the :code:`ACPI0017` `Compute Express Link
+Root Object` Device Class is found.
+
+The Root contains links to:
+
+* `Host Bridge Ports` defined by CHBS in the :doc:`CEDT<../platform/acpi/cedt>`
+
+* `Downstream Ports` typically connected to `Host Bridge Ports`.
+
+* `Root Decoders` defined by CFMWS the :doc:`CEDT<../platform/acpi/cedt>`
+
+::
+
+ # ls /sys/bus/cxl/devices/root0
+ decoder0.0 dport0 dport5 port2 subsystem
+ decoders_committed dport1 modalias port3 uevent
+ devtype dport4 port1 port4 uport
+
+ # cat /sys/bus/cxl/devices/root0/devtype
+ cxl_port
+
+ # cat port1/devtype
+ cxl_port
+
+ # cat decoder0.0/devtype
+ cxl_decoder_root
+
+The root is first `logical port` in the CXL fabric, as presented by the Linux
+CXL driver. The `CXL root` is a special type of `switch port`, in that it
+only has downstream port connections.
+
+Port
+~~~~
+A `port` object is better described as a `switch port`. It may represent a
+host bridge to the root or an actual switch port on a switch. A `switch port`
+contains one or more decoders used to route memory requests downstream ports,
+which may be connected to another `switch port` or an `endpoint port`.
+
+::
+
+ # ls /sys/bus/cxl/devices/port1
+ decoder1.0 dport0 driver parent_dport uport
+ decoders_committed dport113 endpoint5 subsystem
+ devtype dport2 modalias uevent
+
+ # cat devtype
+ cxl_port
+
+ # cat decoder1.0/devtype
+ cxl_decoder_switch
+
+ # cat endpoint5/devtype
+ cxl_port
+
+CXL `Host Bridges` in the fabric are probed during :code:`cxl_acpi_probe` at
+the time the `CXL Root` is probed. The allows for the immediate logical
+connection to between the root and host bridge.
+
+* The root has a downstream port connection to a host bridge
+
+* The host bridge has an upstream port connection to the root.
+
+* The host bridge has one or more downstream port connections to switch
+ or endpoint ports.
+
+A `Host Bridge` is a special type of CXL `switch port`. It is explicitly
+defined in the ACPI specification via `ACPI0016` ID. `Host Bridge` ports
+will be probed at `acpi_probe` time, while similar ports on an actual switch
+will be probed later. Otherwise, switch and host bridge ports look very
+similar - the both contain switch decoders which route accesses between
+upstream and downstream ports.
+
+Endpoint
+~~~~~~~~
+An `endpoint` is a terminal port in the fabric. This is a `logical device`,
+and may be one of many `logical devices` presented by a memory device. It
+is still considered a type of `port` in the fabric.
+
+An `endpoint` contains `endpoint decoders` and the device's Coherent Device
+Attribute Table (which describes the device's capabilities). ::
+
+ # ls /sys/bus/cxl/devices/endpoint5
+ CDAT decoders_committed modalias uevent
+ decoder5.0 devtype parent_dport uport
+ decoder5.1 driver subsystem
+
+ # cat /sys/bus/cxl/devices/endpoint5/devtype
+ cxl_port
+
+ # cat /sys/bus/cxl/devices/endpoint5/decoder5.0/devtype
+ cxl_decoder_endpoint
+
+
+Memory Device (memdev)
+~~~~~~~~~~~~~~~~~~~~~~
+A `memdev` is probed and added by the `cxl_pci` driver in :code:`cxl_pci_probe`
+and is managed by the `cxl_mem` driver. It primarily provides the `IOCTL`
+interface to a memory device, via :code:`/dev/cxl/memN`, and exposes various
+device configuration data. ::
+
+ # ls /sys/bus/cxl/devices/mem0
+ dev firmware_version payload_max security uevent
+ driver label_storage_size pmem serial
+ firmware numa_node ram subsystem
+
+A Memory Device is a discrete base object that is not a port. While the
+physical device it belongs to may also host an `endpoint`, the relationship
+between an `endpoint` and a `memdev` is not captured in sysfs.
+
+Port Relationships
+~~~~~~~~~~~~~~~~~~
+In our example described above, there are four host bridges attached to the
+root, and two of the host bridges have one endpoint attached.
+
+.. kernel-render:: DOT
+ :alt: Digraph of CXL fabric describing host-bridge interleaving
+ :caption: Diagraph of CXL fabric with a host-bridge interleave memory region
+
+ digraph foo {
+ "root0" -> "port1";
+ "root0" -> "port2";
+ "root0" -> "port3";
+ "root0" -> "port4";
+ "port1" -> "endpoint5";
+ "port3" -> "endpoint6";
+ }
+
+Decoders
+--------
+A `Decoder` is short for a CXL Host-Managed Device Memory (HDM) Decoder. It is
+a device that routes accesses through the CXL fabric to an endpoint, and at
+the endpoint translates a `Host Physical` to `Device Physical` Addressing.
+
+The CXL 3.1 specification heavily implies that only endpoint decoders should
+engage in translation of `Host Physical Address` to `Device Physical Address`.
+::
+
+ 8.2.4.20 CXL HDM Decoder Capability Structure
+
+ IMPLEMENTATION NOTE
+ CXL Host Bridge and Upstream Switch Port Decode Flow
+
+ IMPLEMENTATION NOTE
+ Device Decode Logic
+
+These notes imply that there are two logical groups of decoders.
+
+* Routing Decoder - a decoder which routes accesses but does not translate
+ addresses from HPA to DPA.
+
+* Translating Decoder - a decoder which translates accesses from HPA to DPA
+ for an endpoint to service.
+
+The CXL drivers distinguish 3 decoder types: root, switch, and endpoint. Only
+endpoint decoders are Translating Decoders, all others are Routing Decoders.
+
+.. note:: PLATFORM VENDORS BE AWARE
+
+ Linux makes a strong assumption that endpoint decoders are the only decoder
+ in the fabric that actively translates HPA to DPA. Linux assumes routing
+ decoders pass the HPA unchanged to the next decoder in the fabric.
+
+ It is therefore assumed that any given decoder in the fabric will have an
+ address range that is a subset of its upstream port decoder. Any deviation
+ from this scheme undefined per the specification. Linux prioritizes
+ spec-defined / architectural behavior.
+
+Decoders may have one or more `Downstream Targets` if configured to interleave
+memory accesses. This will be presented in sysfs via the :code:`target_list`
+parameter.
+
+Root Decoder
+~~~~~~~~~~~~
+A `Root Decoder` is logical construct of the physical address and interleave
+configurations present in the CFMWS field of the :doc:`CEDT
+<../platform/acpi/cedt>`.
+Linux presents this information as a decoder present in the `CXL Root`. We
+consider this a `Root Decoder`, though technically it exists on the boundary
+of the CXL specification and platform-specific CXL root implementations.
+
+Linux considers these logical decoders a type of `Routing Decoder`, and is the
+first decoder in the CXL fabric to receive a memory access from the platform's
+memory controllers.
+
+`Root Decoders` are created during :code:`cxl_acpi_probe`. One root decoder
+is created per CFMWS entry in the :doc:`CEDT <../platform/acpi/cedt>`.
+
+The :code:`target_list` parameter is filled by the CFMWS target fields. Targets
+of a root decoder are `Host Bridges`, which means interleave done at the root
+decoder level is an `Inter-Host-Bridge Interleave`.
+
+Only root decoders are capable of `Inter-Host-Bridge Interleave`.
+
+Such interleaves must be configured by the platform and described in the ACPI
+CEDT CFMWS, as the target CXL host bridge UIDs in the CFMWS must match the CXL
+host bridge UIDs in the CHBS field of the :doc:`CEDT
+<../platform/acpi/cedt>` and the UID field of CXL Host Bridges defined in
+the :doc:`DSDT <../platform/acpi/dsdt>`.
+
+Interleave settings in a root decoder describe how to interleave accesses among
+the *immediate downstream targets*, not the entire interleave set.
+
+The memory range described in the root decoder is used to
+
+1) Create a memory region (:code:`region0` in this example), and
+
+2) Associate the region with an IO Memory Resource (:code:`kernel/resource.c`)
+
+::
+
+ # ls /sys/bus/cxl/devices/decoder0.0/
+ cap_pmem devtype region0
+ cap_ram interleave_granularity size
+ cap_type2 interleave_ways start
+ cap_type3 locked subsystem
+ create_ram_region modalias target_list
+ delete_region qos_class uevent
+
+ # cat /sys/bus/cxl/devices/decoder0.0/region0/resource
+ 0xc050000000
+
+The IO Memory Resource is created during early boot when the CFMWS region is
+identified in the EFI Memory Map or E820 table (on x86).
+
+Root decoders are defined as a separate devtype, but are also a type
+of `Switch Decoder` due to having downstream targets. ::
+
+ # cat /sys/bus/cxl/devices/decoder0.0/devtype
+ cxl_decoder_root
+
+Switch Decoder
+~~~~~~~~~~~~~~
+Any non-root, translating decoder is considered a `Switch Decoder`, and will
+present with the type :code:`cxl_decoder_switch`. Both `Host Bridge` and `CXL
+Switch` (device) decoders are of type :code:`cxl_decoder_switch`. ::
+
+ # ls /sys/bus/cxl/devices/decoder1.0/
+ devtype locked size target_list
+ interleave_granularity modalias start target_type
+ interleave_ways region subsystem uevent
+
+ # cat /sys/bus/cxl/devices/decoder1.0/devtype
+ cxl_decoder_switch
+
+ # cat /sys/bus/cxl/devices/decoder1.0/region
+ region0
+
+A `Switch Decoder` has associations between a region defined by a root
+decoder and downstream target ports. Interleaving done within a switch decoder
+is a multi-downstream-port interleave (or `Intra-Host-Bridge Interleave` for
+host bridges).
+
+Interleave settings in a switch decoder describe how to interleave accesses
+among the *immediate downstream targets*, not the entire interleave set.
+
+Switch decoders are created during :code:`cxl_switch_port_probe` in the
+:code:`cxl_port` driver, and is created based on a PCI device's DVSEC
+registers.
+
+Switch decoder programming is validated during probe if the platform programs
+them during boot (See `Auto Decoders` below), or on commit if programmed at
+runtime (See `Runtime Programming` below).
+
+
+Endpoint Decoder
+~~~~~~~~~~~~~~~~
+Any decoder attached to a *terminal* point in the CXL fabric (`An Endpoint`) is
+considered an `Endpoint Decoder`. Endpoint decoders are of type
+:code:`cxl_decoder_endpoint`. ::
+
+ # ls /sys/bus/cxl/devices/decoder5.0
+ devtype locked start
+ dpa_resource modalias subsystem
+ dpa_size mode target_type
+ interleave_granularity region uevent
+ interleave_ways size
+
+ # cat /sys/bus/cxl/devices/decoder5.0/devtype
+ cxl_decoder_endpoint
+
+ # cat /sys/bus/cxl/devices/decoder5.0/region
+ region0
+
+An `Endpoint Decoder` has an association with a region defined by a root
+decoder and describes the device-local resource associated with this region.
+
+Unlike root and switch decoders, endpoint decoders translate `Host Physical` to
+`Device Physical` address ranges. The interleave settings on an endpoint
+therefore describe the entire *interleave set*.
+
+`Device Physical Address` regions must be committed in-order. For example, the
+DPA region starting at 0x80000000 cannot be committed before the DPA region
+starting at 0x0.
+
+As of Linux v6.15, Linux does not support *imbalanced* interleave setups, all
+endpoints in an interleave set are expected to have the same interleave
+settings (granularity and ways must be the same).
+
+Endpoint decoders are created during :code:`cxl_endpoint_port_probe` in the
+:code:`cxl_port` driver, and is created based on a PCI device's DVSEC registers.
+
+Decoder Relationships
+~~~~~~~~~~~~~~~~~~~~~
+In our example described above, there is one root decoder which routes memory
+accesses over two host bridges. Each host bridge has a decoder which routes
+access to their singular endpoint targets. Each endpoint has a decoder which
+translates HPA to DPA and services the memory request.
+
+The driver validates relationships between ports by decoder programming, so
+we can think of decoders being related in a similarly hierarchical fashion to
+ports.
+
+.. kernel-render:: DOT
+ :alt: Digraph of hierarchical relationship between root, switch, and endpoint decoders.
+ :caption: Diagraph of CXL root, switch, and endpoint decoders.
+
+ digraph foo {
+ "root0" -> "decoder0.0";
+ "decoder0.0" -> "decoder1.0";
+ "decoder0.0" -> "decoder3.0";
+ "decoder1.0" -> "decoder5.0";
+ "decoder3.0" -> "decoder6.0";
+ }
+
+Regions
+-------
+
+Memory Region
+~~~~~~~~~~~~~
+A `Memory Region` is a logical construct that connects a set of CXL ports in
+the fabric to an IO Memory Resource. It is ultimately used to expose the memory
+on these devices to the DAX subsystem via a `DAX Region`.
+
+An example RAM region: ::
+
+ # ls /sys/bus/cxl/devices/region0/
+ access0 devtype modalias subsystem uuid
+ access1 driver mode target0
+ commit interleave_granularity resource target1
+ dax_region0 interleave_ways size uevent
+
+A memory region can be constructed during endpoint probe, if decoders were
+programmed by BIOS/EFI (see `Auto Decoders`), or by creating a region manually
+via a `Root Decoder`'s :code:`create_ram_region` or :code:`create_pmem_region`
+interfaces.
+
+The interleave settings in a `Memory Region` describe the configuration of the
+`Interleave Set` - and are what can be expected to be seen in the endpoint
+interleave settings.
+
+.. kernel-render:: DOT
+ :alt: Digraph of CXL memory region relationships between root and endpoint decoders.
+ :caption: Regions are created based on root decoder configurations. Endpoint decoders
+ must be programmed with the same interleave settings as the region.
+
+ digraph foo {
+ "root0" -> "decoder0.0";
+ "decoder0.0" -> "region0";
+ "region0" -> "decoder5.0";
+ "region0" -> "decoder6.0";
+ }
+
+DAX Region
+~~~~~~~~~~
+A `DAX Region` is used to convert a CXL `Memory Region` to a DAX device. A
+DAX device may then be accessed directly via a file descriptor interface, or
+converted to System RAM via the DAX kmem driver. See the DAX driver section
+for more details. ::
+
+ # ls /sys/bus/cxl/devices/dax_region0/
+ dax0.0 devtype modalias uevent
+ dax_region driver subsystem
+
+Mailbox Interfaces
+------------------
+A mailbox command interface for each device is exposed in ::
+
+ /dev/cxl/mem0
+ /dev/cxl/mem1
+
+These mailboxes may receive any specification-defined command. Raw commands
+(custom commands) can only be sent to these interfaces if the build config
+:code:`CXL_MEM_RAW_COMMANDS` is set. This is considered a debug and/or
+development interface, not an officially supported mechanism for creation
+of vendor-specific commands (see the `fwctl` subsystem for that).
+
+Decoder Programming
+===================
+
+Runtime Programming
+-------------------
+During probe, the only decoders *required* to be programmed are `Root Decoders`.
+In reality, `Root Decoders` are a logical construct to describe the memory
+region and interleave configuration at the host bridge level - as described
+in the ACPI CEDT CFMWS.
+
+All other `Switch` and `Endpoint` decoders may be programmed by the user
+at runtime - if the platform supports such configurations.
+
+This interaction is what creates a `Software Defined Memory` environment.
+
+See the :code:`cxl-cli` documentation for more information about how to
+configure CXL decoders at runtime.
+
+Auto Decoders
+-------------
+Auto Decoders are decoders programmed by BIOS/EFI at boot time, and are
+almost always locked (cannot be changed). This is done by a platform
+which may have a static configuration - or certain quirks which may prevent
+dynamic runtime changes to the decoders (such as requiring additional
+controller programming within the CPU complex outside the scope of CXL).
+
+Auto Decoders are probed automatically as long as the devices and memory
+regions they are associated with probe without issue. When probing Auto
+Decoders, the driver's primary responsibility is to ensure the fabric is
+sane - as-if validating runtime programmed regions and decoders.
+
+If Linux cannot validate auto-decoder configuration, the memory will not
+be surfaced as a DAX device - and therefore not be exposed to the page
+allocator - effectively stranding it.
+
+Interleave
+----------
+
+The Linux CXL driver supports `Cross-Link First` interleave. This dictates
+how interleave is programmed at each decoder step, as the driver validates
+the relationships between a decoder and it's parent.
+
+For example, in a `Cross-Link First` interleave setup with 16 endpoints
+attached to 4 host bridges, linux expects the following ways/granularity
+across the root, host bridge, and endpoints respectively.
+
+.. flat-table:: 4x4 cross-link first interleave settings
+
+ * - decoder
+ - ways
+ - granularity
+
+ * - root
+ - 4
+ - 256
+
+ * - host bridge
+ - 4
+ - 1024
+
+ * - endpoint
+ - 16
+ - 256
+
+At the root, every a given access will be routed to the
+:code:`((HPA / 256) % 4)th` target host bridge. Within a host bridge, every
+:code:`((HPA / 1024) % 4)th` target endpoint. Each endpoint translates based
+on the entire 16 device interleave set.
+
+Unbalanced interleave sets are not supported - decoders at a similar point
+in the hierarchy (e.g. all host bridge decoders) must have the same ways and
+granularity configuration.
+
+At Root
+~~~~~~~
+Root decoder interleave is defined by CFMWS field of the :doc:`CEDT
+<../platform/acpi/cedt>`. The CEDT may actually define multiple CFMWS
+configurations to describe the same physical capacity, with the intent to allow
+users to decide at runtime whether to online memory as interleaved or
+non-interleaved. ::
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Window base address : 0000000100000000
+ Window size : 0000000100000000
+ Interleave Members (2^n) : 00
+ Interleave Arithmetic : 00
+ First Target : 00000007
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Window base address : 0000000200000000
+ Window size : 0000000100000000
+ Interleave Members (2^n) : 00
+ Interleave Arithmetic : 00
+ First Target : 00000006
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Window base address : 0000000300000000
+ Window size : 0000000200000000
+ Interleave Members (2^n) : 01
+ Interleave Arithmetic : 00
+ First Target : 00000007
+ Next Target : 00000006
+
+In this example, the CFMWS defines two discrete non-interleaved 4GB regions
+for each host bridge, and one interleaved 8GB region that targets both. This
+would result in 3 root decoders presenting in the root. ::
+
+ # ls /sys/bus/cxl/devices/root0/decoder*
+ decoder0.0 decoder0.1 decoder0.2
+
+ # cat /sys/bus/cxl/devices/decoder0.0/target_list start size
+ 7
+ 0x100000000
+ 0x100000000
+
+ # cat /sys/bus/cxl/devices/decoder0.1/target_list start size
+ 6
+ 0x200000000
+ 0x100000000
+
+ # cat /sys/bus/cxl/devices/decoder0.2/target_list start size
+ 7,6
+ 0x300000000
+ 0x200000000
+
+These decoders are not runtime programmable. They are used to generate a
+`Memory Region` to bring this memory online with runtime programmed settings
+at the `Switch` and `Endpoint` decoders.
+
+At Host Bridge or Switch
+~~~~~~~~~~~~~~~~~~~~~~~~
+`Host Bridge` and `Switch` decoders are programmable via the following fields:
+
+- :code:`start` - the HPA region associated with the memory region
+- :code:`size` - the size of the region
+- :code:`target_list` - the list of downstream ports
+- :code:`interleave_ways` - the number downstream ports to interleave across
+- :code:`interleave_granularity` - the granularity to interleave at.
+
+Linux expects the :code:`interleave_granularity` of switch decoders to be
+derived from their upstream port connections. In `Cross-Link First` interleave
+configurations, the :code:`interleave_granularity` of a decoder is equal to
+:code:`parent_interleave_granularity * parent_interleave_ways`.
+
+At Endpoint
+~~~~~~~~~~~
+`Endpoint Decoders` are programmed similar to Host Bridge and Switch decoders,
+with the exception that the ways and granularity are defined by the interleave
+set (e.g. the interleave settings defined by the associated `Memory Region`).
+
+- :code:`start` - the HPA region associated with the memory region
+- :code:`size` - the size of the region
+- :code:`interleave_ways` - the number endpoints in the interleave set
+- :code:`interleave_granularity` - the granularity to interleave at.
+
+These settings are used by endpoint decoders to *Translate* memory requests
+from HPA to DPA. This is why they must be aware of the entire interleave set.
+
+Linux does not support unbalanced interleave configurations. As a result, all
+endpoints in an interleave set must have the same ways and granularity.
+
+Example Configurations
+======================
+.. toctree::
+ :maxdepth: 1
+
+ example-configurations/single-device.rst
+ example-configurations/hb-interleave.rst
+ example-configurations/intra-hb-interleave.rst
+ example-configurations/multi-interleave.rst
diff --git a/Documentation/driver-api/cxl/linux/dax-driver.rst b/Documentation/driver-api/cxl/linux/dax-driver.rst
new file mode 100644
index 000000000000..10d953a2167b
--- /dev/null
+++ b/Documentation/driver-api/cxl/linux/dax-driver.rst
@@ -0,0 +1,43 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+DAX Driver Operation
+====================
+The `Direct Access Device` driver was originally designed to provide a
+memory-like access mechanism to memory-like block-devices. It was
+extended to support CXL Memory Devices, which provide user-configured
+memory devices.
+
+The CXL subsystem depends on the DAX subsystem to either:
+
+- Generate a file-like interface to userland via :code:`/dev/daxN.Y`, or
+- Engage the memory-hotplug interface to add CXL memory to page allocator.
+
+The DAX subsystem exposes this ability through the `cxl_dax_region` driver.
+A `dax_region` provides the translation between a CXL `memory_region` and
+a `DAX Device`.
+
+DAX Device
+==========
+A `DAX Device` is a file-like interface exposed in :code:`/dev/daxN.Y`. A
+memory region exposed via dax device can be accessed via userland software
+via the :code:`mmap()` system-call. The result is direct mappings to the
+CXL capacity in the task's page tables.
+
+Users wishing to manually handle allocation of CXL memory should use this
+interface.
+
+kmem conversion
+===============
+The :code:`dax_kmem` driver converts a `DAX Device` into a series of `hotplug
+memory blocks` managed by :code:`kernel/memory-hotplug.c`. This capacity
+will be exposed to the kernel page allocator in the user-selected memory
+zone.
+
+The :code:`memmap_on_memory` setting (both global and DAX device local)
+dictates where the kernell will allocate the :code:`struct folio` descriptors
+for this memory will come from. If :code:`memmap_on_memory` is set, memory
+hotplug will set aside a portion of the memory block capacity to allocate
+folios. If unset, the memory is allocated via a normal :code:`GFP_KERNEL`
+allocation - and as a result will most likely land on the local NUM node of the
+CPU executing the hotplug operation.
diff --git a/Documentation/driver-api/cxl/linux/early-boot.rst b/Documentation/driver-api/cxl/linux/early-boot.rst
new file mode 100644
index 000000000000..a7fc6fc85fbe
--- /dev/null
+++ b/Documentation/driver-api/cxl/linux/early-boot.rst
@@ -0,0 +1,137 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+Linux Init (Early Boot)
+=======================
+
+Linux configuration is split into two major steps: Early-Boot and everything else.
+
+During early boot, Linux sets up immutable resources (such as numa nodes), while
+later operations include things like driver probe and memory hotplug. Linux may
+read EFI and ACPI information throughout this process to configure logical
+representations of the devices.
+
+During Linux Early Boot stage (functions in the kernel that have the __init
+decorator), the system takes the resources created by EFI/BIOS
+(:doc:`ACPI tables <../platform/acpi>`) and turns them into resources that the
+kernel can consume.
+
+
+BIOS, Build and Boot Options
+============================
+
+There are 4 pre-boot options that need to be considered during kernel build
+which dictate how memory will be managed by Linux during early boot.
+
+* EFI_MEMORY_SP
+
+ * BIOS/EFI Option that dictates whether memory is SystemRAM or
+ Specific Purpose. Specific Purpose memory will be deferred to
+ drivers to manage - and not immediately exposed as system RAM.
+
+* CONFIG_EFI_SOFT_RESERVE
+
+ * Linux Build config option that dictates whether the kernel supports
+ Specific Purpose memory.
+
+* CONFIG_MHP_DEFAULT_ONLINE_TYPE
+
+ * Linux Build config that dictates whether and how Specific Purpose memory
+ converted to a dax device should be managed (left as DAX or onlined as
+ SystemRAM in ZONE_NORMAL or ZONE_MOVABLE).
+
+* nosoftreserve
+
+ * Linux kernel boot option that dictates whether Soft Reserve should be
+ supported. Similar to CONFIG_EFI_SOFT_RESERVE.
+
+Memory Map Creation
+===================
+
+While the kernel parses the EFI memory map, if :code:`Specific Purpose` memory
+is supported and detected, it will set this region aside as
+:code:`SOFT_RESERVED`.
+
+If :code:`EFI_MEMORY_SP=0`, :code:`CONFIG_EFI_SOFT_RESERVE=n`, or
+:code:`nosoftreserve=y` - Linux will default a CXL device memory region to
+SystemRAM. This will expose the memory to the kernel page allocator in
+:code:`ZONE_NORMAL`, making it available for use for most allocations (including
+:code:`struct page` and page tables).
+
+If `Specific Purpose` is set and supported, :code:`CONFIG_MHP_DEFAULT_ONLINE_TYPE_*`
+dictates whether the memory is onlined by default (:code:`_OFFLINE` or
+:code:`_ONLINE_*`), and if online which zone to online this memory to by default
+(:code:`_NORMAL` or :code:`_MOVABLE`).
+
+If placed in :code:`ZONE_MOVABLE`, the memory will not be available for most
+kernel allocations (such as :code:`struct page` or page tables). This may
+significant impact performance depending on the memory capacity of the system.
+
+
+NUMA Node Reservation
+=====================
+
+Linux refers to the proximity domains (:code:`PXM`) defined in the :doc:`SRAT
+<../platform/acpi/srat>` to create NUMA nodes in :code:`acpi_numa_init`.
+Typically, there is a 1:1 relation between :code:`PXM` and NUMA node IDs.
+
+The SRAT is the only ACPI defined way of defining Proximity Domains. Linux
+chooses to, at most, map those 1:1 with NUMA nodes.
+:doc:`CEDT <../platform/acpi/cedt>` adds a description of SPA ranges which
+Linux may map to one or more NUMA nodes.
+
+If there are CXL ranges in the CFMWS but not in SRAT, then a fake :code:`PXM`
+is created (as of v6.15). In the future, Linux may reject CFMWS not described
+by SRAT due to the ambiguity of proximity domain association.
+
+It is important to note that NUMA node creation cannot be done at runtime. All
+possible NUMA nodes are identified at :code:`__init` time, more specifically
+during :code:`mm_init`. The CEDT and SRAT must contain sufficient :code:`PXM`
+data for Linux to identify NUMA nodes their associated memory regions.
+
+The relevant code exists in: :code:`linux/drivers/acpi/numa/srat.c`.
+
+See :doc:`Example Platform Configurations <../platform/example-configs>`
+for more info.
+
+Memory Tiers Creation
+=====================
+Memory tiers are a collection of NUMA nodes grouped by performance characteristics.
+During :code:`__init`, Linux initializes the system with a default memory tier that
+contains all nodes marked :code:`N_MEMORY`.
+
+:code:`memory_tier_init` is called at boot for all nodes with memory online by
+default. :code:`memory_tier_late_init` is called during late-init for nodes setup
+during driver configuration.
+
+Nodes are only marked :code:`N_MEMORY` if they have *online* memory.
+
+Tier membership can be inspected in ::
+
+ /sys/devices/virtual/memory_tiering/memory_tierN/nodelist
+ 0-1
+
+If nodes are grouped which have clear difference in performance, check the
+:doc:`HMAT <../platform/acpi/hmat>` and CDAT information for the CXL nodes. All
+nodes default to the DRAM tier, unless HMAT/CDAT information is reported to the
+memory_tier component via `access_coordinates`.
+
+For more, see :doc:`CXL access coordinates documentation
+<../linux/access-coordinates>`.
+
+Contiguous Memory Allocation
+============================
+The contiguous memory allocator (CMA) enables reservation of contiguous memory
+regions on NUMA nodes during early boot. However, CMA cannot reserve memory
+on NUMA nodes that are not online during early boot. ::
+
+ void __init hugetlb_cma_reserve(int order) {
+ if (!node_online(nid))
+ /* do not allow reservations */
+ }
+
+This means if users intend to defer management of CXL memory to the driver, CMA
+cannot be used to guarantee huge page allocations. If enabling CXL memory as
+SystemRAM in `ZONE_NORMAL` during early boot, CMA reservations per-node can be
+made with the :code:`cma_pernuma` or :code:`numa_cma` kernel command line
+parameters.
diff --git a/Documentation/driver-api/cxl/linux/example-configurations/hb-interleave.rst b/Documentation/driver-api/cxl/linux/example-configurations/hb-interleave.rst
new file mode 100644
index 000000000000..f071490763a2
--- /dev/null
+++ b/Documentation/driver-api/cxl/linux/example-configurations/hb-interleave.rst
@@ -0,0 +1,314 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================
+Inter-Host-Bridge Interleave
+============================
+This cxl-cli configuration dump shows the following host configuration:
+
+* A single socket system with one CXL root
+* CXL Root has Four (4) CXL Host Bridges
+* Two CXL Host Bridges have a single CXL Memory Expander Attached
+* The CXL root is configured to interleave across the two host bridges.
+
+This output is generated by :code:`cxl list -v` and describes the relationships
+between objects exposed in :code:`/sys/bus/cxl/devices/`.
+
+::
+
+ [
+ {
+ "bus":"root0",
+ "provider":"ACPI.CXL",
+ "nr_dports":4,
+ "dports":[
+ {
+ "dport":"pci0000:00",
+ "alias":"ACPI0016:01",
+ "id":0
+ },
+ {
+ "dport":"pci0000:a8",
+ "alias":"ACPI0016:02",
+ "id":4
+ },
+ {
+ "dport":"pci0000:2a",
+ "alias":"ACPI0016:03",
+ "id":1
+ },
+ {
+ "dport":"pci0000:d2",
+ "alias":"ACPI0016:00",
+ "id":5
+ }
+ ],
+
+This chunk shows the CXL "bus" (root0) has 4 downstream ports attached to CXL
+Host Bridges. The `Root` can be considered the singular upstream port attached
+to the platform's memory controller - which routes memory requests to it.
+
+The `ports:root0` section lays out how each of these downstream ports are
+configured. If a port is not configured (id's 0 and 1), they are omitted.
+
+::
+
+ "ports:root0":[
+ {
+ "port":"port1",
+ "host":"pci0000:d2",
+ "depth":1,
+ "nr_dports":3,
+ "dports":[
+ {
+ "dport":"0000:d2:01.1",
+ "alias":"device:02",
+ "id":0
+ },
+ {
+ "dport":"0000:d2:01.3",
+ "alias":"device:05",
+ "id":2
+ },
+ {
+ "dport":"0000:d2:07.1",
+ "alias":"device:0d",
+ "id":113
+ }
+ ],
+
+This chunk shows the available downstream ports associated with the CXL Host
+Bridge :code:`port1`. In this case, :code:`port1` has 3 available downstream
+ports: :code:`dport1`, :code:`dport2`, and :code:`dport113`..
+
+::
+
+ "endpoints:port1":[
+ {
+ "endpoint":"endpoint5",
+ "host":"mem0",
+ "parent_dport":"0000:d2:01.1",
+ "depth":2,
+ "memdev":{
+ "memdev":"mem0",
+ "ram_size":137438953472,
+ "serial":0,
+ "numa_node":0,
+ "host":"0000:d3:00.0"
+ },
+ "decoders:endpoint5":[
+ {
+ "decoder":"decoder5.0",
+ "resource":825975898112,
+ "size":274877906944,
+ "interleave_ways":2,
+ "interleave_granularity":256,
+ "region":"region0",
+ "dpa_resource":0,
+ "dpa_size":137438953472,
+ "mode":"ram"
+ }
+ ]
+ }
+ ],
+
+This chunk shows the endpoints attached to the host bridge :code:`port1`.
+
+:code:`endpoint5` contains a single configured decoder :code:`decoder5.0`
+which has the same interleave configuration as :code:`region0` (shown later).
+
+Next we have the decodesr belonging to the host bridge:
+
+::
+
+ "decoders:port1":[
+ {
+ "decoder":"decoder1.0",
+ "resource":825975898112,
+ "size":274877906944,
+ "interleave_ways":1,
+ "region":"region0",
+ "nr_targets":1,
+ "targets":[
+ {
+ "target":"0000:d2:01.1",
+ "alias":"device:02",
+ "position":0,
+ "id":0
+ }
+ ]
+ }
+ ]
+ },
+
+Host Bridge :code:`port1` has a single decoder (:code:`decoder1.0`), whose only
+target is :code:`dport1` - which is attached to :code:`endpoint5`.
+
+The following chunk shows a similar configuration for Host Bridge :code:`port3`,
+the second host bridge with a memory device attached.
+
+::
+
+ {
+ "port":"port3",
+ "host":"pci0000:a8",
+ "depth":1,
+ "nr_dports":1,
+ "dports":[
+ {
+ "dport":"0000:a8:01.1",
+ "alias":"device:c3",
+ "id":0
+ }
+ ],
+ "endpoints:port3":[
+ {
+ "endpoint":"endpoint6",
+ "host":"mem1",
+ "parent_dport":"0000:a8:01.1",
+ "depth":2,
+ "memdev":{
+ "memdev":"mem1",
+ "ram_size":137438953472,
+ "serial":0,
+ "numa_node":0,
+ "host":"0000:a9:00.0"
+ },
+ "decoders:endpoint6":[
+ {
+ "decoder":"decoder6.0",
+ "resource":825975898112,
+ "size":274877906944,
+ "interleave_ways":2,
+ "interleave_granularity":256,
+ "region":"region0",
+ "dpa_resource":0,
+ "dpa_size":137438953472,
+ "mode":"ram"
+ }
+ ]
+ }
+ ],
+ "decoders:port3":[
+ {
+ "decoder":"decoder3.0",
+ "resource":825975898112,
+ "size":274877906944,
+ "interleave_ways":1,
+ "region":"region0",
+ "nr_targets":1,
+ "targets":[
+ {
+ "target":"0000:a8:01.1",
+ "alias":"device:c3",
+ "position":0,
+ "id":0
+ }
+ ]
+ }
+ ]
+ },
+
+
+The next chunk shows the two CXL host bridges without attached endpoints.
+
+::
+
+ {
+ "port":"port2",
+ "host":"pci0000:00",
+ "depth":1,
+ "nr_dports":2,
+ "dports":[
+ {
+ "dport":"0000:00:01.3",
+ "alias":"device:55",
+ "id":2
+ },
+ {
+ "dport":"0000:00:07.1",
+ "alias":"device:5d",
+ "id":113
+ }
+ ]
+ },
+ {
+ "port":"port4",
+ "host":"pci0000:2a",
+ "depth":1,
+ "nr_dports":1,
+ "dports":[
+ {
+ "dport":"0000:2a:01.1",
+ "alias":"device:d0",
+ "id":0
+ }
+ ]
+ }
+ ],
+
+Next we have the `Root Decoders` belonging to :code:`root0`. This root decoder
+applies the interleave across the downstream ports :code:`port1` and
+:code:`port3` - with a granularity of 256 bytes.
+
+This information is generated by the CXL driver reading the ACPI CEDT CMFWS.
+
+::
+
+ "decoders:root0":[
+ {
+ "decoder":"decoder0.0",
+ "resource":825975898112,
+ "size":274877906944,
+ "interleave_ways":2,
+ "interleave_granularity":256,
+ "max_available_extent":0,
+ "volatile_capable":true,
+ "nr_targets":2,
+ "targets":[
+ {
+ "target":"pci0000:a8",
+ "alias":"ACPI0016:02",
+ "position":1,
+ "id":4
+ },
+ {
+ "target":"pci0000:d2",
+ "alias":"ACPI0016:00",
+ "position":0,
+ "id":5
+ }
+ ],
+
+Finally we have the `Memory Region` associated with the `Root Decoder`
+:code:`decoder0.0`. This region describes the overall interleave configuration
+of the interleave set.
+
+::
+
+ "regions:decoder0.0":[
+ {
+ "region":"region0",
+ "resource":825975898112,
+ "size":274877906944,
+ "type":"ram",
+ "interleave_ways":2,
+ "interleave_granularity":256,
+ "decode_state":"commit",
+ "mappings":[
+ {
+ "position":1,
+ "memdev":"mem1",
+ "decoder":"decoder6.0"
+ },
+ {
+ "position":0,
+ "memdev":"mem0",
+ "decoder":"decoder5.0"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
diff --git a/Documentation/driver-api/cxl/linux/example-configurations/intra-hb-interleave.rst b/Documentation/driver-api/cxl/linux/example-configurations/intra-hb-interleave.rst
new file mode 100644
index 000000000000..077dfaf8458d
--- /dev/null
+++ b/Documentation/driver-api/cxl/linux/example-configurations/intra-hb-interleave.rst
@@ -0,0 +1,291 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================
+Intra-Host-Bridge Interleave
+============================
+This cxl-cli configuration dump shows the following host configuration:
+
+* A single socket system with one CXL root
+* CXL Root has Four (4) CXL Host Bridges
+* One (1) CXL Host Bridges has two CXL Memory Expanders Attached
+* The Host bridge decoder is programmed to interleave across the expanders.
+
+This output is generated by :code:`cxl list -v` and describes the relationships
+between objects exposed in :code:`/sys/bus/cxl/devices/`.
+
+::
+
+ [
+ {
+ "bus":"root0",
+ "provider":"ACPI.CXL",
+ "nr_dports":4,
+ "dports":[
+ {
+ "dport":"pci0000:00",
+ "alias":"ACPI0016:01",
+ "id":0
+ },
+ {
+ "dport":"pci0000:a8",
+ "alias":"ACPI0016:02",
+ "id":4
+ },
+ {
+ "dport":"pci0000:2a",
+ "alias":"ACPI0016:03",
+ "id":1
+ },
+ {
+ "dport":"pci0000:d2",
+ "alias":"ACPI0016:00",
+ "id":5
+ }
+ ],
+
+This chunk shows the CXL "bus" (root0) has 4 downstream ports attached to CXL
+Host Bridges. The `Root` can be considered the singular upstream port attached
+to the platform's memory controller - which routes memory requests to it.
+
+The `ports:root0` section lays out how each of these downstream ports are
+configured. If a port is not configured (id's 0 and 1), they are omitted.
+
+::
+
+ "ports:root0":[
+ {
+ "port":"port1",
+ "host":"pci0000:d2",
+ "depth":1,
+ "nr_dports":3,
+ "dports":[
+ {
+ "dport":"0000:d2:01.1",
+ "alias":"device:02",
+ "id":0
+ },
+ {
+ "dport":"0000:d2:01.3",
+ "alias":"device:05",
+ "id":2
+ },
+ {
+ "dport":"0000:d2:07.1",
+ "alias":"device:0d",
+ "id":113
+ }
+ ],
+
+This chunk shows the available downstream ports associated with the CXL Host
+Bridge :code:`port1`. In this case, :code:`port1` has 3 available downstream
+ports: :code:`dport1`, :code:`dport2`, and :code:`dport113`..
+
+::
+
+ "endpoints:port1":[
+ {
+ "endpoint":"endpoint5",
+ "host":"mem0",
+ "parent_dport":"0000:d2:01.1",
+ "depth":2,
+ "memdev":{
+ "memdev":"mem0",
+ "ram_size":137438953472,
+ "serial":0,
+ "numa_node":0,
+ "host":"0000:d3:00.0"
+ },
+ "decoders:endpoint5":[
+ {
+ "decoder":"decoder5.0",
+ "resource":825975898112,
+ "size":274877906944,
+ "interleave_ways":2,
+ "interleave_granularity":256,
+ "region":"region0",
+ "dpa_resource":0,
+ "dpa_size":137438953472,
+ "mode":"ram"
+ }
+ ]
+ },
+ {
+ "endpoint":"endpoint6",
+ "host":"mem1",
+ "parent_dport":"0000:d2:01.3,
+ "depth":2,
+ "memdev":{
+ "memdev":"mem1",
+ "ram_size":137438953472,
+ "serial":0,
+ "numa_node":0,
+ "host":"0000:a9:00.0"
+ },
+ "decoders:endpoint6":[
+ {
+ "decoder":"decoder6.0",
+ "resource":825975898112,
+ "size":274877906944,
+ "interleave_ways":2,
+ "interleave_granularity":256,
+ "region":"region0",
+ "dpa_resource":0,
+ "dpa_size":137438953472,
+ "mode":"ram"
+ }
+ ]
+ }
+ ],
+
+This chunk shows the endpoints attached to the host bridge :code:`port1`.
+
+:code:`endpoint5` contains a single configured decoder :code:`decoder5.0`
+which has the same interleave configuration memory region they belong to
+(show later).
+
+Next we have the decoders belonging to the host bridge:
+
+::
+
+ "decoders:port1":[
+ {
+ "decoder":"decoder1.0",
+ "resource":825975898112,
+ "size":274877906944,
+ "interleave_ways":2,
+ "interleave_granularity":256,
+ "region":"region0",
+ "nr_targets":2,
+ "targets":[
+ {
+ "target":"0000:d2:01.1",
+ "alias":"device:02",
+ "position":0,
+ "id":0
+ },
+ {
+ "target":"0000:d2:01.3",
+ "alias":"device:05",
+ "position":1,
+ "id":0
+ }
+ ]
+ }
+ ]
+ },
+
+Host Bridge :code:`port1` has a single decoder (:code:`decoder1.0`) with two
+targets: :code:`dport1` and :code:`dport3` - which are attached to
+:code:`endpoint5` and :code:`endpoint6` respectively.
+
+The host bridge decoder interleaves these devices at a 256 byte granularity.
+
+The next chunk shows the three CXL host bridges without attached endpoints.
+
+::
+
+ {
+ "port":"port2",
+ "host":"pci0000:00",
+ "depth":1,
+ "nr_dports":2,
+ "dports":[
+ {
+ "dport":"0000:00:01.3",
+ "alias":"device:55",
+ "id":2
+ },
+ {
+ "dport":"0000:00:07.1",
+ "alias":"device:5d",
+ "id":113
+ }
+ ]
+ },
+ {
+ "port":"port3",
+ "host":"pci0000:a8",
+ "depth":1,
+ "nr_dports":1,
+ "dports":[
+ {
+ "dport":"0000:a8:01.1",
+ "alias":"device:c3",
+ "id":0
+ }
+ ],
+ },
+ {
+ "port":"port4",
+ "host":"pci0000:2a",
+ "depth":1,
+ "nr_dports":1,
+ "dports":[
+ {
+ "dport":"0000:2a:01.1",
+ "alias":"device:d0",
+ "id":0
+ }
+ ]
+ }
+ ],
+
+Next we have the `Root Decoders` belonging to :code:`root0`. This root decoder
+applies the interleave across the downstream ports :code:`port1` and
+:code:`port3` - with a granularity of 256 bytes.
+
+This information is generated by the CXL driver reading the ACPI CEDT CMFWS.
+
+::
+
+ "decoders:root0":[
+ {
+ "decoder":"decoder0.0",
+ "resource":825975898112,
+ "size":274877906944,
+ "interleave_ways":1,
+ "max_available_extent":0,
+ "volatile_capable":true,
+ "nr_targets":2,
+ "targets":[
+ {
+ "target":"pci0000:a8",
+ "alias":"ACPI0016:02",
+ "position":1,
+ "id":4
+ },
+ ],
+
+Finally we have the `Memory Region` associated with the `Root Decoder`
+:code:`decoder0.0`. This region describes the overall interleave configuration
+of the interleave set.
+
+::
+
+ "regions:decoder0.0":[
+ {
+ "region":"region0",
+ "resource":825975898112,
+ "size":274877906944,
+ "type":"ram",
+ "interleave_ways":2,
+ "interleave_granularity":256,
+ "decode_state":"commit",
+ "mappings":[
+ {
+ "position":1,
+ "memdev":"mem1",
+ "decoder":"decoder6.0"
+ },
+ {
+ "position":0,
+ "memdev":"mem0",
+ "decoder":"decoder5.0"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
diff --git a/Documentation/driver-api/cxl/linux/example-configurations/multi-interleave.rst b/Documentation/driver-api/cxl/linux/example-configurations/multi-interleave.rst
new file mode 100644
index 000000000000..008f9053c630
--- /dev/null
+++ b/Documentation/driver-api/cxl/linux/example-configurations/multi-interleave.rst
@@ -0,0 +1,401 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
+Multi-Level Interleave
+======================
+This cxl-cli configuration dump shows the following host configuration:
+
+* A single socket system with one CXL root
+* CXL Root has Four (4) CXL Host Bridges
+* Two CXL Host Bridges have a two CXL Memory Expanders Attached each.
+* The CXL root is configured to interleave across the two host bridges.
+* Each host bridge with expanders interleaves across two endpoints.
+
+This output is generated by :code:`cxl list -v` and describes the relationships
+between objects exposed in :code:`/sys/bus/cxl/devices/`.
+
+::
+
+ [
+ {
+ "bus":"root0",
+ "provider":"ACPI.CXL",
+ "nr_dports":4,
+ "dports":[
+ {
+ "dport":"pci0000:00",
+ "alias":"ACPI0016:01",
+ "id":0
+ },
+ {
+ "dport":"pci0000:a8",
+ "alias":"ACPI0016:02",
+ "id":4
+ },
+ {
+ "dport":"pci0000:2a",
+ "alias":"ACPI0016:03",
+ "id":1
+ },
+ {
+ "dport":"pci0000:d2",
+ "alias":"ACPI0016:00",
+ "id":5
+ }
+ ],
+
+This chunk shows the CXL "bus" (root0) has 4 downstream ports attached to CXL
+Host Bridges. The `Root` can be considered the singular upstream port attached
+to the platform's memory controller - which routes memory requests to it.
+
+The `ports:root0` section lays out how each of these downstream ports are
+configured. If a port is not configured (id's 0 and 1), they are omitted.
+
+::
+
+ "ports:root0":[
+ {
+ "port":"port1",
+ "host":"pci0000:d2",
+ "depth":1,
+ "nr_dports":3,
+ "dports":[
+ {
+ "dport":"0000:d2:01.1",
+ "alias":"device:02",
+ "id":0
+ },
+ {
+ "dport":"0000:d2:01.3",
+ "alias":"device:05",
+ "id":2
+ },
+ {
+ "dport":"0000:d2:07.1",
+ "alias":"device:0d",
+ "id":113
+ }
+ ],
+
+This chunk shows the available downstream ports associated with the CXL Host
+Bridge :code:`port1`. In this case, :code:`port1` has 3 available downstream
+ports: :code:`dport0`, :code:`dport2`, and :code:`dport113`.
+
+::
+
+ "endpoints:port1":[
+ {
+ "endpoint":"endpoint5",
+ "host":"mem0",
+ "parent_dport":"0000:d2:01.1",
+ "depth":2,
+ "memdev":{
+ "memdev":"mem0",
+ "ram_size":137438953472,
+ "serial":0,
+ "numa_node":0,
+ "host":"0000:d3:00.0"
+ },
+ "decoders:endpoint5":[
+ {
+ "decoder":"decoder5.0",
+ "resource":825975898112,
+ "size":549755813888,
+ "interleave_ways":4,
+ "interleave_granularity":256,
+ "region":"region0",
+ "dpa_resource":0,
+ "dpa_size":137438953472,
+ "mode":"ram"
+ }
+ ]
+ },
+ {
+ "endpoint":"endpoint6",
+ "host":"mem1",
+ "parent_dport":"0000:d2:01.3",
+ "depth":2,
+ "memdev":{
+ "memdev":"mem1",
+ "ram_size":137438953472,
+ "serial":0,
+ "numa_node":0,
+ "host":"0000:d3:00.0"
+ },
+ "decoders:endpoint6":[
+ {
+ "decoder":"decoder6.0",
+ "resource":825975898112,
+ "size":549755813888,
+ "interleave_ways":4,
+ "interleave_granularity":256,
+ "region":"region0",
+ "dpa_resource":0,
+ "dpa_size":137438953472,
+ "mode":"ram"
+ }
+ ]
+ }
+ ],
+
+This chunk shows the endpoints attached to the host bridge :code:`port1`.
+
+:code:`endpoint5` contains a single configured decoder :code:`decoder5.0`
+which has the same interleave configuration as :code:`region0` (shown later).
+
+:code:`endpoint6` contains a single configured decoder :code:`decoder5.0`
+which has the same interleave configuration as :code:`region0` (shown later).
+
+Next we have the decoders belonging to the host bridge:
+
+::
+
+ "decoders:port1":[
+ {
+ "decoder":"decoder1.0",
+ "resource":825975898112,
+ "size":549755813888,
+ "interleave_ways":2,
+ "interleave_granularity":512,
+ "region":"region0",
+ "nr_targets":2,
+ "targets":[
+ {
+ "target":"0000:d2:01.1",
+ "alias":"device:02",
+ "position":0,
+ "id":0
+ },
+ {
+ "target":"0000:d2:01.3",
+ "alias":"device:05",
+ "position":2,
+ "id":0
+ }
+ ]
+ }
+ ]
+ },
+
+Host Bridge :code:`port1` has a single decoder (:code:`decoder1.0`), whose
+targets are :code:`dport0` and :code:`dport2` - which are attached to
+:code:`endpoint5` and :code:`endpoint6` respectively.
+
+The following chunk shows a similar configuration for Host Bridge :code:`port3`,
+the second host bridge with a memory device attached.
+
+::
+
+ {
+ "port":"port3",
+ "host":"pci0000:a8",
+ "depth":1,
+ "nr_dports":1,
+ "dports":[
+ {
+ "dport":"0000:a8:01.1",
+ "alias":"device:c3",
+ "id":0
+ },
+ {
+ "dport":"0000:a8:01.3",
+ "alias":"device:c5",
+ "id":0
+ }
+ ],
+ "endpoints:port3":[
+ {
+ "endpoint":"endpoint7",
+ "host":"mem2",
+ "parent_dport":"0000:a8:01.1",
+ "depth":2,
+ "memdev":{
+ "memdev":"mem2",
+ "ram_size":137438953472,
+ "serial":0,
+ "numa_node":0,
+ "host":"0000:a9:00.0"
+ },
+ "decoders:endpoint7":[
+ {
+ "decoder":"decoder7.0",
+ "resource":825975898112,
+ "size":549755813888,
+ "interleave_ways":4,
+ "interleave_granularity":256,
+ "region":"region0",
+ "dpa_resource":0,
+ "dpa_size":137438953472,
+ "mode":"ram"
+ }
+ ]
+ },
+ {
+ "endpoint":"endpoint8",
+ "host":"mem3",
+ "parent_dport":"0000:a8:01.3",
+ "depth":2,
+ "memdev":{
+ "memdev":"mem3",
+ "ram_size":137438953472,
+ "serial":0,
+ "numa_node":0,
+ "host":"0000:a9:00.0"
+ },
+ "decoders:endpoint8":[
+ {
+ "decoder":"decoder8.0",
+ "resource":825975898112,
+ "size":549755813888,
+ "interleave_ways":4,
+ "interleave_granularity":256,
+ "region":"region0",
+ "dpa_resource":0,
+ "dpa_size":137438953472,
+ "mode":"ram"
+ }
+ ]
+ }
+ ],
+ "decoders:port3":[
+ {
+ "decoder":"decoder3.0",
+ "resource":825975898112,
+ "size":549755813888,
+ "interleave_ways":2,
+ "interleave_granularity":512,
+ "region":"region0",
+ "nr_targets":1,
+ "targets":[
+ {
+ "target":"0000:a8:01.1",
+ "alias":"device:c3",
+ "position":1,
+ "id":0
+ },
+ {
+ "target":"0000:a8:01.3",
+ "alias":"device:c5",
+ "position":3,
+ "id":0
+ }
+ ]
+ }
+ ]
+ },
+
+
+The next chunk shows the two CXL host bridges without attached endpoints.
+
+::
+
+ {
+ "port":"port2",
+ "host":"pci0000:00",
+ "depth":1,
+ "nr_dports":2,
+ "dports":[
+ {
+ "dport":"0000:00:01.3",
+ "alias":"device:55",
+ "id":2
+ },
+ {
+ "dport":"0000:00:07.1",
+ "alias":"device:5d",
+ "id":113
+ }
+ ]
+ },
+ {
+ "port":"port4",
+ "host":"pci0000:2a",
+ "depth":1,
+ "nr_dports":1,
+ "dports":[
+ {
+ "dport":"0000:2a:01.1",
+ "alias":"device:d0",
+ "id":0
+ }
+ ]
+ }
+ ],
+
+Next we have the `Root Decoders` belonging to :code:`root0`. This root decoder
+applies the interleave across the downstream ports :code:`port1` and
+:code:`port3` - with a granularity of 256 bytes.
+
+This information is generated by the CXL driver reading the ACPI CEDT CMFWS.
+
+::
+
+ "decoders:root0":[
+ {
+ "decoder":"decoder0.0",
+ "resource":825975898112,
+ "size":549755813888,
+ "interleave_ways":2,
+ "interleave_granularity":256,
+ "max_available_extent":0,
+ "volatile_capable":true,
+ "nr_targets":2,
+ "targets":[
+ {
+ "target":"pci0000:a8",
+ "alias":"ACPI0016:02",
+ "position":1,
+ "id":4
+ },
+ {
+ "target":"pci0000:d2",
+ "alias":"ACPI0016:00",
+ "position":0,
+ "id":5
+ }
+ ],
+
+Finally we have the `Memory Region` associated with the `Root Decoder`
+:code:`decoder0.0`. This region describes the overall interleave configuration
+of the interleave set. So we see there are a total of :code:`4` interleave
+targets across 4 endpoint decoders.
+
+::
+
+ "regions:decoder0.0":[
+ {
+ "region":"region0",
+ "resource":825975898112,
+ "size":549755813888,
+ "type":"ram",
+ "interleave_ways":4,
+ "interleave_granularity":256,
+ "decode_state":"commit",
+ "mappings":[
+ {
+ "position":3,
+ "memdev":"mem3",
+ "decoder":"decoder8.0"
+ },
+ {
+ "position":2,
+ "memdev":"mem1",
+ "decoder":"decoder6.0"
+ }
+ {
+ "position":1,
+ "memdev":"mem2",
+ "decoder":"decoder7.0"
+ },
+ {
+ "position":0,
+ "memdev":"mem0",
+ "decoder":"decoder5.0"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
diff --git a/Documentation/driver-api/cxl/linux/example-configurations/single-device.rst b/Documentation/driver-api/cxl/linux/example-configurations/single-device.rst
new file mode 100644
index 000000000000..5fd38eb0aaf4
--- /dev/null
+++ b/Documentation/driver-api/cxl/linux/example-configurations/single-device.rst
@@ -0,0 +1,246 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+Single Device
+=============
+This cxl-cli configuration dump shows the following host configuration:
+
+* A single socket system with one CXL root
+* CXL Root has Four (4) CXL Host Bridges
+* One CXL Host Bridges has a single CXL Memory Expander Attached
+* No interleave is present.
+
+This output is generated by :code:`cxl list -v` and describes the relationships
+between objects exposed in :code:`/sys/bus/cxl/devices/`.
+
+::
+
+ [
+ {
+ "bus":"root0",
+ "provider":"ACPI.CXL",
+ "nr_dports":4,
+ "dports":[
+ {
+ "dport":"pci0000:00",
+ "alias":"ACPI0016:01",
+ "id":0
+ },
+ {
+ "dport":"pci0000:a8",
+ "alias":"ACPI0016:02",
+ "id":4
+ },
+ {
+ "dport":"pci0000:2a",
+ "alias":"ACPI0016:03",
+ "id":1
+ },
+ {
+ "dport":"pci0000:d2",
+ "alias":"ACPI0016:00",
+ "id":5
+ }
+ ],
+
+This chunk shows the CXL "bus" (root0) has 4 downstream ports attached to CXL
+Host Bridges. The `Root` can be considered the singular upstream port attached
+to the platform's memory controller - which routes memory requests to it.
+
+The `ports:root0` section lays out how each of these downstream ports are
+configured. If a port is not configured (id's 0, 1, and 4), they are omitted.
+
+::
+
+ "ports:root0":[
+ {
+ "port":"port1",
+ "host":"pci0000:d2",
+ "depth":1,
+ "nr_dports":3,
+ "dports":[
+ {
+ "dport":"0000:d2:01.1",
+ "alias":"device:02",
+ "id":0
+ },
+ {
+ "dport":"0000:d2:01.3",
+ "alias":"device:05",
+ "id":2
+ },
+ {
+ "dport":"0000:d2:07.1",
+ "alias":"device:0d",
+ "id":113
+ }
+ ],
+
+This chunk shows the available downstream ports associated with the CXL Host
+Bridge :code:`port1`. In this case, :code:`port1` has 3 available downstream
+ports: :code:`dport1`, :code:`dport2`, and :code:`dport113`..
+
+::
+
+ "endpoints:port1":[
+ {
+ "endpoint":"endpoint5",
+ "host":"mem0",
+ "parent_dport":"0000:d2:01.1",
+ "depth":2,
+ "memdev":{
+ "memdev":"mem0",
+ "ram_size":137438953472,
+ "serial":0,
+ "numa_node":0,
+ "host":"0000:d3:00.0"
+ },
+ "decoders:endpoint5":[
+ {
+ "decoder":"decoder5.0",
+ "resource":825975898112,
+ "size":137438953472,
+ "interleave_ways":1,
+ "region":"region0",
+ "dpa_resource":0,
+ "dpa_size":137438953472,
+ "mode":"ram"
+ }
+ ]
+ }
+ ],
+
+This chunk shows the endpoints attached to the host bridge :code:`port1`.
+
+:code:`endpoint5` contains a single configured decoder :code:`decoder5.0`
+which has the same interleave configuration as :code:`region0` (shown later).
+
+Next we have the decoders belonging to the host bridge:
+
+::
+
+ "decoders:port1":[
+ {
+ "decoder":"decoder1.0",
+ "resource":825975898112,
+ "size":137438953472,
+ "interleave_ways":1,
+ "region":"region0",
+ "nr_targets":1,
+ "targets":[
+ {
+ "target":"0000:d2:01.1",
+ "alias":"device:02",
+ "position":0,
+ "id":0
+ }
+ ]
+ }
+ ]
+ },
+
+Host Bridge :code:`port1` has a single decoder (:code:`decoder1.0`), whose only
+target is :code:`dport1` - which is attached to :code:`endpoint5`.
+
+The next chunk shows the three CXL host bridges without attached endpoints.
+
+::
+
+ {
+ "port":"port2",
+ "host":"pci0000:00",
+ "depth":1,
+ "nr_dports":2,
+ "dports":[
+ {
+ "dport":"0000:00:01.3",
+ "alias":"device:55",
+ "id":2
+ },
+ {
+ "dport":"0000:00:07.1",
+ "alias":"device:5d",
+ "id":113
+ }
+ ]
+ },
+ {
+ "port":"port3",
+ "host":"pci0000:a8",
+ "depth":1,
+ "nr_dports":1,
+ "dports":[
+ {
+ "dport":"0000:a8:01.1",
+ "alias":"device:c3",
+ "id":0
+ }
+ ]
+ },
+ {
+ "port":"port4",
+ "host":"pci0000:2a",
+ "depth":1,
+ "nr_dports":1,
+ "dports":[
+ {
+ "dport":"0000:2a:01.1",
+ "alias":"device:d0",
+ "id":0
+ }
+ ]
+ }
+ ],
+
+Next we have the `Root Decoders` belonging to :code:`root0`. This root decoder
+is a pass-through decoder because :code:`interleave_ways` is set to :code:`1`.
+
+This information is generated by the CXL driver reading the ACPI CEDT CMFWS.
+
+::
+
+ "decoders:root0":[
+ {
+ "decoder":"decoder0.0",
+ "resource":825975898112,
+ "size":137438953472,
+ "interleave_ways":1,
+ "max_available_extent":0,
+ "volatile_capable":true,
+ "nr_targets":1,
+ "targets":[
+ {
+ "target":"pci0000:d2",
+ "alias":"ACPI0016:00",
+ "position":0,
+ "id":5
+ }
+ ],
+
+Finally we have the `Memory Region` associated with the `Root Decoder`
+:code:`decoder0.0`. This region describes the discrete region associated
+with the lone device.
+
+::
+
+ "regions:decoder0.0":[
+ {
+ "region":"region0",
+ "resource":825975898112,
+ "size":137438953472,
+ "type":"ram",
+ "interleave_ways":1,
+ "decode_state":"commit",
+ "mappings":[
+ {
+ "position":0,
+ "memdev":"mem0",
+ "decoder":"decoder5.0"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
diff --git a/Documentation/driver-api/cxl/linux/memory-hotplug.rst b/Documentation/driver-api/cxl/linux/memory-hotplug.rst
new file mode 100644
index 000000000000..af368c2bc9cf
--- /dev/null
+++ b/Documentation/driver-api/cxl/linux/memory-hotplug.rst
@@ -0,0 +1,78 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============
+Memory Hotplug
+==============
+The final phase of surfacing CXL memory to the kernel page allocator is for
+the `DAX` driver to surface a `Driver Managed` memory region via the
+memory-hotplug component.
+
+There are four major configurations to consider:
+
+1) Default Online Behavior (on/off and zone)
+2) Hotplug Memory Block size
+3) Memory Map Resource location
+4) Driver-Managed Memory Designation
+
+Default Online Behavior
+=======================
+The default-online behavior of hotplug memory is dictated by the following,
+in order of precedence:
+
+- :code:`CONFIG_MHP_DEFAULT_ONLINE_TYPE` Build Configuration
+- :code:`memhp_default_state` Boot parameter
+- :code:`/sys/devices/system/memory/auto_online_blocks` value
+
+These dictate whether hotplugged memory blocks arrive in one of three states:
+
+1) Offline
+2) Online in :code:`ZONE_NORMAL`
+3) Online in :code:`ZONE_MOVABLE`
+
+:code:`ZONE_NORMAL` implies this capacity may be used for almost any allocation,
+while :code:`ZONE_MOVABLE` implies this capacity should only be used for
+migratable allocations.
+
+:code:`ZONE_MOVABLE` attempts to retain the hotplug-ability of a memory block
+so that it the entire region may be hot-unplugged at a later time. Any capacity
+onlined into :code:`ZONE_NORMAL` should be considered permanently attached to
+the page allocator.
+
+Hotplug Memory Block Size
+=========================
+By default, on most architectures, the Hotplug Memory Block Size is either
+128MB or 256MB. On x86, the block size increases up to 2GB as total memory
+capacity exceeds 64GB. As of v6.15, Linux does not take into account the
+size and alignment of the ACPI CEDT CFMWS regions (see Early Boot docs) when
+deciding the Hotplug Memory Block Size.
+
+Memory Map
+==========
+The location of :code:`struct folio` allocations to represent the hotplugged
+memory capacity are dictated by the following system settings:
+
+- :code:`/sys_module/memory_hotplug/parameters/memmap_on_memory`
+- :code:`/sys/bus/dax/devices/daxN.Y/memmap_on_memory`
+
+If both of these parameters are set to true, :code:`struct folio` for this
+capacity will be carved out of the memory block being onlined. This has
+performance implications if the memory is particularly high-latency and
+its :code:`struct folio` becomes hotly contended.
+
+If either parameter is set to false, :code:`struct folio` for this capacity
+will be allocated from the local node of the processor running the hotplug
+procedure. This capacity will be allocated from :code:`ZONE_NORMAL` on
+that node, as it is a :code:`GFP_KERNEL` allocation.
+
+Systems with extremely large amounts of :code:`ZONE_MOVABLE` memory (e.g.
+CXL memory pools) must ensure that there is sufficient local
+:code:`ZONE_NORMAL` capacity to host the memory map for the hotplugged capacity.
+
+Driver Managed Memory
+=====================
+The DAX driver surfaces this memory to memory-hotplug as "Driver Managed". This
+is not a configurable setting, but it's important to note that driver managed
+memory is explicitly excluded from use during kexec. This is required to ensure
+any reset or out-of-band operations that the CXL device may be subject to during
+a functional system-reboot (such as a reset-on-probe) will not cause portions of
+the kexec kernel to be overwritten.
diff --git a/Documentation/driver-api/cxl/linux/overview.rst b/Documentation/driver-api/cxl/linux/overview.rst
new file mode 100644
index 000000000000..648beb2c8c83
--- /dev/null
+++ b/Documentation/driver-api/cxl/linux/overview.rst
@@ -0,0 +1,103 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========
+Overview
+========
+
+This section presents the configuration process of a CXL Type-3 memory device,
+and how it is ultimately exposed to users as either a :code:`DAX` device or
+normal memory pages via the kernel's page allocator.
+
+Portions marked with a bullet are points at which certain kernel objects
+are generated.
+
+1) Early Boot
+
+ a) BIOS, Build, and Boot Parameters
+
+ i) EFI_MEMORY_SP
+ ii) CONFIG_EFI_SOFT_RESERVE
+ iii) CONFIG_MHP_DEFAULT_ONLINE_TYPE
+ iv) nosoftreserve
+
+ b) Memory Map Creation
+
+ i) EFI Memory Map / E820 Consulted for Soft-Reserved
+
+ * CXL Memory is set aside to be handled by the CXL driver
+
+ * Soft-Reserved IO Resource created for CFMWS entry
+
+ c) NUMA Node Creation
+
+ * Nodes created from ACPI CEDT CFMWS and SRAT Proximity domains (PXM)
+
+ d) Memory Tier Creation
+
+ * A default memory_tier is created with all nodes.
+
+ e) Contiguous Memory Allocation
+
+ * Any requested CMA is allocated from Online nodes
+
+ f) Init Finishes, Drivers start probing
+
+2) ACPI and PCI Drivers
+
+ a) Detects PCI device is CXL, marking it for probe by CXL driver
+
+3) CXL Driver Operation
+
+ a) Base device creation
+
+ * root, port, and memdev devices created
+ * CEDT CFMWS IO Resource creation
+
+ b) Decoder creation
+
+ * root, switch, and endpoint decoders created
+
+ c) Logical device creation
+
+ * memory_region and endpoint devices created
+
+ d) Devices are associated with each other
+
+ * If auto-decoder (BIOS-programmed decoders), driver validates
+ configurations, builds associations, and locks configs at probe time.
+
+ * If user-configured, validation and associations are built at
+ decoder-commit time.
+
+ e) Regions surfaced as DAX region
+
+ * dax_region created
+
+ * DAX device created via DAX driver
+
+4) DAX Driver Operation
+
+ a) DAX driver surfaces DAX region as one of two dax device modes
+
+ * kmem - dax device is converted to hotplug memory blocks
+
+ * DAX kmem IO Resource creation
+
+ * hmem - dax device is left as daxdev to be accessed as a file.
+
+ * If hmem, journey ends here.
+
+ b) DAX kmem surfaces memory region to Memory Hotplug to add to page
+ allocator as "driver managed memory"
+
+5) Memory Hotplug
+
+ a) mhp component surfaces a dax device memory region as multiple memory
+ blocks to the page allocator
+
+ * blocks appear in :code:`/sys/bus/memory/devices` and linked to a NUMA node
+
+ b) blocks are onlined into the requested zone (NORMAL or MOVABLE)
+
+ * Memory is marked "Driver Managed" to avoid kexec from using it as region
+ for kernel updates
diff --git a/Documentation/driver-api/cxl/maturity-map.rst b/Documentation/driver-api/cxl/maturity-map.rst
index a2288f9df658..1330f3f52129 100644
--- a/Documentation/driver-api/cxl/maturity-map.rst
+++ b/Documentation/driver-api/cxl/maturity-map.rst
@@ -51,9 +51,9 @@ in place, but there are several corner cases that are pending closure.
* [2] CXL Window Enumeration
- * [0] :ref:`Extended-linear memory-side cache <extended-linear>`
+ * [2] :ref:`Extended-linear memory-side cache <extended-linear>`
* [0] Low Memory-hole
- * [0] Hetero-interleave
+ * [X] Hetero-interleave
* [2] Switch Enumeration
@@ -173,7 +173,7 @@ Accelerator
User Flow Support
-----------------
-* [0] HPA->DPA Address translation (need xormaps export solution)
+* [0] Inject & clear poison by HPA
Details
=======
diff --git a/Documentation/driver-api/cxl/platform/acpi.rst b/Documentation/driver-api/cxl/platform/acpi.rst
new file mode 100644
index 000000000000..ee7e6bd4c43d
--- /dev/null
+++ b/Documentation/driver-api/cxl/platform/acpi.rst
@@ -0,0 +1,76 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========
+ACPI Tables
+===========
+
+ACPI is the "Advanced Configuration and Power Interface", which is a standard
+that defines how platforms and OS manage power and configure computer hardware.
+For the purpose of this theory of operation, when referring to "ACPI" we will
+usually refer to "ACPI Tables" - which are the way a platform (BIOS/EFI)
+communicates static configuration information to the operation system.
+
+The Following ACPI tables contain *static* configuration and performance data
+about CXL devices.
+
+.. toctree::
+ :maxdepth: 1
+
+ acpi/cedt.rst
+ acpi/srat.rst
+ acpi/hmat.rst
+ acpi/slit.rst
+ acpi/dsdt.rst
+
+The SRAT table may also contain generic port/initiator content that is intended
+to describe the generic port, but not information about the rest of the path to
+the endpoint.
+
+Linux uses these tables to configure kernel resources for statically configured
+(by BIOS/EFI) CXL devices, such as:
+
+- NUMA nodes
+- Memory Tiers
+- NUMA Abstract Distances
+- SystemRAM Memory Regions
+- Weighted Interleave Node Weights
+
+ACPI Debugging
+==============
+
+The :code:`acpidump -b` command dumps the ACPI tables into binary format.
+
+The :code:`iasl -d` command disassembles the files into human readable format.
+
+Example :code:`acpidump -b && iasl -d cedt.dat` ::
+
+ [000h 0000 4] Signature : "CEDT" [CXL Early Discovery Table]
+
+Common Issues
+-------------
+Most failures described here result in a failure of the driver to surface
+memory as a DAX device and/or kmem.
+
+* CEDT CFMWS targets list UIDs do not match CEDT CHBS UIDs.
+* CEDT CFMWS targets list UIDs do not match DSDT CXL Host Bridge UIDs.
+* CEDT CFMWS Restriction Bits are not correct.
+* CEDT CFMWS Memory regions are poorly aligned.
+* CEDT CFMWS Memory regions spans a platform memory hole.
+* CEDT CHBS UIDs do not match DSDT CXL Host Bridge UIDs.
+* CEDT CHBS Specification version is incorrect.
+* SRAT is missing regions described in CEDT CFMWS.
+
+ * Result: failure to create a NUMA node for the region, or
+ region is placed in wrong node.
+
+* HMAT is missing data for regions described in CEDT CFMWS.
+
+ * Result: NUMA node being placed in the wrong memory tier.
+
+* SLIT has bad data.
+
+ * Result: Lots of performance mechanisms in the kernel will be very unhappy.
+
+All of these issues will appear to users as if the driver is failing to
+support CXL - when in reality they are all the failure of a platform to
+configure the ACPI tables correctly.
diff --git a/Documentation/driver-api/cxl/platform/acpi/cedt.rst b/Documentation/driver-api/cxl/platform/acpi/cedt.rst
new file mode 100644
index 000000000000..1d9c9d3592dc
--- /dev/null
+++ b/Documentation/driver-api/cxl/platform/acpi/cedt.rst
@@ -0,0 +1,62 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+================================
+CEDT - CXL Early Discovery Table
+================================
+
+The CXL Early Discovery Table is generated by BIOS to describe the CXL memory
+regions configured at boot by the BIOS.
+
+CHBS
+====
+The CXL Host Bridge Structure describes CXL host bridges. Other than describing
+device register information, it reports the specific host bridge UID for this
+host bridge. These host bridge ID's will be referenced in other tables.
+
+Example ::
+
+ Subtable Type : 00 [CXL Host Bridge Structure]
+ Reserved : 00
+ Length : 0020
+ Associated host bridge : 00000007 <- Host bridge _UID
+ Specification version : 00000001
+ Reserved : 00000000
+ Register base : 0000010370400000
+ Register length : 0000000000010000
+
+CFMWS
+=====
+The CXL Fixed Memory Window structure describes a memory region associated
+with one or more CXL host bridges (as described by the CHBS). It additionally
+describes any inter-host-bridge interleave configuration that may have been
+programmed by BIOS.
+
+Example ::
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Reserved : 00
+ Length : 002C
+ Reserved : 00000000
+ Window base address : 000000C050000000 <- Memory Region
+ Window size : 0000003CA0000000
+ Interleave Members (2^n) : 01 <- Interleave configuration
+ Interleave Arithmetic : 00
+ Reserved : 0000
+ Granularity : 00000000
+ Restrictions : 0006
+ QtgId : 0001
+ First Target : 00000007 <- Host Bridge _UID
+ Next Target : 00000006 <- Host Bridge _UID
+
+The restriction field dictates what this SPA range may be used for (memory type,
+voltile vs persistent, etc). One or more bits may be set. ::
+
+ Bit[0]: CXL Type 2 Memory
+ Bit[1]: CXL Type 3 Memory
+ Bit[2]: Volatile Memory
+ Bit[3]: Persistent Memory
+ Bit[4]: Fixed Config (HPA cannot be re-used)
+
+INTRA-host-bridge interleave (multiple devices on one host bridge) is NOT
+reported in this structure, and is solely defined via CXL device decoder
+programming (host bridge and endpoint decoders).
diff --git a/Documentation/driver-api/cxl/platform/acpi/dsdt.rst b/Documentation/driver-api/cxl/platform/acpi/dsdt.rst
new file mode 100644
index 000000000000..b4583b01d67d
--- /dev/null
+++ b/Documentation/driver-api/cxl/platform/acpi/dsdt.rst
@@ -0,0 +1,28 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================================
+DSDT - Differentiated system Description Table
+==============================================
+
+This table describes what peripherals a machine has.
+
+This table's UIDs for CXL devices - specifically host bridges, must be
+consistent with the contents of the CEDT, otherwise the CXL driver will
+fail to probe correctly.
+
+Example Compute Express Link Host Bridge ::
+
+ Scope (_SB)
+ {
+ Device (S0D0)
+ {
+ Name (_HID, "ACPI0016" /* Compute Express Link Host Bridge */) // _HID: Hardware ID
+ Name (_CID, Package (0x02) // _CID: Compatible ID
+ {
+ EisaId ("PNP0A08") /* PCI Express Bus */,
+ EisaId ("PNP0A03") /* PCI Bus */
+ })
+ ...
+ Name (_UID, 0x05) // _UID: Unique ID
+ ...
+ }
diff --git a/Documentation/driver-api/cxl/platform/acpi/hmat.rst b/Documentation/driver-api/cxl/platform/acpi/hmat.rst
new file mode 100644
index 000000000000..095a26f02a37
--- /dev/null
+++ b/Documentation/driver-api/cxl/platform/acpi/hmat.rst
@@ -0,0 +1,32 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================================
+HMAT - Heterogeneous Memory Attribute Table
+===========================================
+
+The Heterogeneous Memory Attributes Table contains information such as cache
+attributes and bandwidth and latency details for memory proximity domains.
+For the purpose of this document, we will only discuss the SSLIB entry.
+
+SLLBI
+=====
+The System Locality Latency and Bandwidth Information records latency and
+bandwidth information for proximity domains.
+
+This table is used by Linux to configure interleave weights and memory tiers.
+
+Example (Heavily truncated for brevity) ::
+
+ Structure Type : 0001 [SLLBI]
+ Data Type : 00 <- Latency
+ Target Proximity Domain List : 00000000
+ Target Proximity Domain List : 00000001
+ Entry : 0080 <- DRAM LTC
+ Entry : 0100 <- CXL LTC
+
+ Structure Type : 0001 [SLLBI]
+ Data Type : 03 <- Bandwidth
+ Target Proximity Domain List : 00000000
+ Target Proximity Domain List : 00000001
+ Entry : 1200 <- DRAM BW
+ Entry : 0200 <- CXL BW
diff --git a/Documentation/driver-api/cxl/platform/acpi/slit.rst b/Documentation/driver-api/cxl/platform/acpi/slit.rst
new file mode 100644
index 000000000000..a56768e8fe41
--- /dev/null
+++ b/Documentation/driver-api/cxl/platform/acpi/slit.rst
@@ -0,0 +1,21 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========================================
+SLIT - System Locality Information Table
+========================================
+
+The system locality information table provides "abstract distances" between
+accessor and memory nodes. Node without initiators (cpus) are infinitely (FF)
+distance away from all other nodes.
+
+The abstract distance described in this table does not describe any real
+latency of bandwidth information.
+
+Example ::
+
+ Signature : "SLIT" [System Locality Information Table]
+ Localities : 0000000000000004
+ Locality 0 : 10 20 20 30
+ Locality 1 : 20 10 30 20
+ Locality 2 : FF FF 0A FF
+ Locality 3 : FF FF FF 0A
diff --git a/Documentation/driver-api/cxl/platform/acpi/srat.rst b/Documentation/driver-api/cxl/platform/acpi/srat.rst
new file mode 100644
index 000000000000..cc98ca0e508e
--- /dev/null
+++ b/Documentation/driver-api/cxl/platform/acpi/srat.rst
@@ -0,0 +1,71 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================
+SRAT - Static Resource Affinity Table
+=====================================
+
+The System/Static Resource Affinity Table describes resource (CPU, Memory)
+affinity to "Proximity Domains". This table is technically optional, but for
+performance information (see "HMAT") to be enumerated by linux it must be
+present.
+
+There is a careful dance between the CEDT and SRAT tables and how NUMA nodes are
+created. If things don't look quite the way you expect - check the SRAT Memory
+Affinity entries and CEDT CFMWS to determine what your platform actually
+supports in terms of flexible topologies.
+
+The SRAT may statically assign portions of a CFMWS SPA range to a specific
+proximity domains. See linux numa creation for more information about how
+this presents in the NUMA topology.
+
+Proximity Domain
+================
+A proximity domain is ROUGHLY equivalent to "NUMA Node" - though a 1-to-1
+mapping is not guaranteed. There are scenarios where "Proximity Domain 4" may
+map to "NUMA Node 3", for example. (See "NUMA Node Creation")
+
+Memory Affinity
+===============
+Generally speaking, if a host does any amount of CXL fabric (decoder)
+programming in BIOS - an SRAT entry for that memory needs to be present.
+
+Example ::
+
+ Subtable Type : 01 [Memory Affinity]
+ Length : 28
+ Proximity Domain : 00000001 <- NUMA Node 1
+ Reserved1 : 0000
+ Base Address : 000000C050000000 <- Physical Memory Region
+ Address Length : 0000003CA0000000
+ Reserved2 : 00000000
+ Flags (decoded below) : 0000000B
+ Enabled : 1
+ Hot Pluggable : 1
+ Non-Volatile : 0
+
+
+Generic Port Affinity
+=====================
+The Generic Port Affinity subtable provides an association between a proximity
+domain and a device handle representing a Generic Port such as a CXL host
+bridge. With the association, latency and bandwidth numbers can be retrieved
+from the SRAT for the path between CPU(s) (initiator) and the Generic Port.
+This is used to construct performance coordinates for hotplugged CXL DEVICES,
+which cannot be enumerated at boot by platform firmware.
+
+Example ::
+
+ Subtable Type : 06 [Generic Port Affinity]
+ Length : 20 <- 32d, length of table
+ Reserved : 00
+ Device Handle Type : 00 <- 0 - ACPI, 1 - PCI
+ Proximity Domain : 00000001
+ Device Handle : ACPI0016:01
+ Flags : 00000001 <- Bit 0 (Enabled)
+ Reserved : 00000000
+
+The Proximity Domain is matched up to the :doc:`HMAT <hmat>` SSLBI Target
+Proximity Domain List for the related latency or bandwidth numbers. Those
+performance numbers are tied to a CXL host bridge via the Device Handle.
+The driver uses the association to retrieve the Generic Port performance
+numbers for the whole CXL path access coordinates calculation.
diff --git a/Documentation/driver-api/cxl/platform/bios-and-efi.rst b/Documentation/driver-api/cxl/platform/bios-and-efi.rst
new file mode 100644
index 000000000000..645322632cc9
--- /dev/null
+++ b/Documentation/driver-api/cxl/platform/bios-and-efi.rst
@@ -0,0 +1,262 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
+BIOS/EFI Configuration
+======================
+
+BIOS and EFI are largely responsible for configuring static information about
+devices (or potential future devices) such that Linux can build the appropriate
+logical representations of these devices.
+
+At a high level, this is what occurs during this phase of configuration.
+
+* The bootloader starts the BIOS/EFI.
+
+* BIOS/EFI do early device probe to determine static configuration
+
+* BIOS/EFI creates ACPI Tables that describe static config for the OS
+
+* BIOS/EFI create the system memory map (EFI Memory Map, E820, etc)
+
+* BIOS/EFI calls :code:`start_kernel` and begins the Linux Early Boot process.
+
+Much of what this section is concerned with is ACPI Table production and
+static memory map configuration. More detail on these tables can be found
+at :doc:`ACPI Tables <acpi>`.
+
+.. note::
+ Platform Vendors should read carefully, as this sections has recommendations
+ on physical memory region size and alignment, memory holes, HDM interleave,
+ and what linux expects of HDM decoders trying to work with these features.
+
+UEFI Settings
+=============
+If your platform supports it, the :code:`uefisettings` command can be used to
+read/write EFI settings. Changes will be reflected on the next reboot. Kexec
+is not a sufficient reboot.
+
+One notable configuration here is the EFI_MEMORY_SP (Specific Purpose) bit.
+When this is enabled, this bit tells linux to defer management of a memory
+region to a driver (in this case, the CXL driver). Otherwise, the memory is
+treated as "normal memory", and is exposed to the page allocator during
+:code:`__init`.
+
+uefisettings examples
+---------------------
+
+:code:`uefisettings identify` ::
+
+ uefisettings identify
+
+ bios_vendor: xxx
+ bios_version: xxx
+ bios_release: xxx
+ bios_date: xxx
+ product_name: xxx
+ product_family: xxx
+ product_version: xxx
+
+On some AMD platforms, the :code:`EFI_MEMORY_SP` bit is set via the :code:`CXL
+Memory Attribute` field. This may be called something else on your platform.
+
+:code:`uefisettings get "CXL Memory Attribute"` ::
+
+ selector: xxx
+ ...
+ question: Question {
+ name: "CXL Memory Attribute",
+ answer: "Enabled",
+ ...
+ }
+
+Physical Memory Map
+===================
+
+Physical Address Region Alignment
+---------------------------------
+
+As of Linux v6.14, the hotplug memory system requires memory regions to be
+uniform in size and alignment. While the CXL specification allows for memory
+regions as small as 256MB, the supported memory block size and alignment for
+hotplugged memory is architecture-defined.
+
+A Linux memory blocks may be as small as 128MB and increase in powers of two.
+
+* On ARM, the default block size and alignment is either 128MB or 256MB.
+
+* On x86, the default block size is 256MB, and increases to 2GB as the
+ capacity of the system increases up to 64GB.
+
+For best support across versions, platform vendors should place CXL memory at
+a 2GB aligned base address, and regions should be 2GB aligned. This also helps
+prevent the creating thousands of memory devices (one per block).
+
+Memory Holes
+------------
+
+Holes in the memory map are tricky. Consider a 4GB device located at base
+address 0x100000000, but with the following memory map ::
+
+ ---------------------
+ | 0x100000000 |
+ | CXL |
+ | 0x1BFFFFFFF |
+ ---------------------
+ | 0x1C0000000 |
+ | MEMORY HOLE |
+ | 0x1FFFFFFFF |
+ ---------------------
+ | 0x200000000 |
+ | CXL CONT. |
+ | 0x23FFFFFFF |
+ ---------------------
+
+There are two issues to consider:
+
+* decoder programming, and
+* memory block alignment.
+
+If your architecture requires 2GB uniform size and aligned memory blocks, the
+only capacity Linux is capable of mapping (as of v6.14) would be the capacity
+from `0x100000000-0x180000000`. The remaining capacity will be stranded, as
+they are not of 2GB aligned length.
+
+Assuming your architecture and memory configuration allows 1GB memory blocks,
+this memory map is supported and this should be presented as multiple CFMWS
+in the CEDT that describe each side of the memory hole separately - along with
+matching decoders.
+
+Multiple decoders can (and should) be used to manage such a memory hole (see
+below), but each chunk of a memory hole should be aligned to a reasonable block
+size (larger alignment is always better). If you intend to have memory holes
+in the memory map, expect to use one decoder per contiguous chunk of host
+physical memory.
+
+As of v6.14, Linux does provide support for memory hotplug of multiple
+physical memory regions separated by a memory hole described by a single
+HDM decoder.
+
+
+Decoder Programming
+===================
+If BIOS/EFI intends to program the decoders to be statically configured,
+there are a few things to consider to avoid major pitfalls that will
+prevent Linux compatibility. Some of these recommendations are not
+required "per the specification", but Linux makes no guarantees of support
+otherwise.
+
+
+Translation Point
+-----------------
+Per the specification, the only decoders which **TRANSLATE** Host Physical
+Address (HPA) to Device Physical Address (DPA) are the **Endpoint Decoders**.
+All other decoders in the fabric are intended to route accesses without
+translating the addresses.
+
+This is heavily implied by the specification, see: ::
+
+ CXL Specification 3.1
+ 8.2.4.20: CXL HDM Decoder Capability Structure
+ - Implementation Note: CXL Host Bridge and Upstream Switch Port Decoder Flow
+ - Implementation Note: Device Decoder Logic
+
+Given this, Linux makes a strong assumption that decoders between CPU and
+endpoint will all be programmed with addresses ranges that are subsets of
+their parent decoder.
+
+Due to some ambiguity in how Architecture, ACPI, PCI, and CXL specifications
+"hand off" responsibility between domains, some early adopting platforms
+attempted to do translation at the originating memory controller or host
+bridge. This configuration requires a platform specific extension to the
+driver and is not officially endorsed - despite being supported.
+
+It is *highly recommended* **NOT** to do this; otherwise, you are on your own
+to implement driver support for your platform.
+
+Interleave and Configuration Flexibility
+----------------------------------------
+If providing cross-host-bridge interleave, a CFMWS entry in the :doc:`CEDT
+<acpi/cedt>` must be presented with target host-bridges for the interleaved
+device sets (there may be multiple behind each host bridge).
+
+If providing intra-host-bridge interleaving, only 1 CFMWS entry in the CEDT is
+required for that host bridge - if it covers the entire capacity of the devices
+behind the host bridge.
+
+If intending to provide users flexibility in programming decoders beyond the
+root, you may want to provide multiple CFMWS entries in the CEDT intended for
+different purposes. For example, you may want to consider adding:
+
+1) A CFMWS entry to cover all interleavable host bridges.
+2) A CFMWS entry to cover all devices on a single host bridge.
+3) A CFMWS entry to cover each device.
+
+A platform may choose to add all of these, or change the mode based on a BIOS
+setting. For each CFMWS entry, Linux expects descriptions of the described
+memory regions in the :doc:`SRAT <acpi/srat>` to determine the number of
+NUMA nodes it should reserve during early boot / init.
+
+As of v6.14, Linux will create a NUMA node for each CEDT CFMWS entry, even if
+a matching SRAT entry does not exist; however, this is not guaranteed in the
+future and such a configuration should be avoided.
+
+Memory Holes
+------------
+If your platform includes memory holes intersparsed between your CXL memory, it
+is recommended to utilize multiple decoders to cover these regions of memory,
+rather than try to program the decoders to accept the entire range and expect
+Linux to manage the overlap.
+
+For example, consider the Memory Hole described above ::
+
+ ---------------------
+ | 0x100000000 |
+ | CXL |
+ | 0x1BFFFFFFF |
+ ---------------------
+ | 0x1C0000000 |
+ | MEMORY HOLE |
+ | 0x1FFFFFFFF |
+ ---------------------
+ | 0x200000000 |
+ | CXL CONT. |
+ | 0x23FFFFFFF |
+ ---------------------
+
+Assuming this is provided by a single device attached directly to a host bridge,
+Linux would expect the following decoder programming ::
+
+ ----------------------- -----------------------
+ | root-decoder-0 | | root-decoder-1 |
+ | base: 0x100000000 | | base: 0x200000000 |
+ | size: 0xC0000000 | | size: 0x40000000 |
+ ----------------------- -----------------------
+ | |
+ ----------------------- -----------------------
+ | HB-decoder-0 | | HB-decoder-1 |
+ | base: 0x100000000 | | base: 0x200000000 |
+ | size: 0xC0000000 | | size: 0x40000000 |
+ ----------------------- -----------------------
+ | |
+ ----------------------- -----------------------
+ | ep-decoder-0 | | ep-decoder-1 |
+ | base: 0x100000000 | | base: 0x200000000 |
+ | size: 0xC0000000 | | size: 0x40000000 |
+ ----------------------- -----------------------
+
+With a CEDT configuration with two CFMWS describing the above root decoders.
+
+Linux makes no guarantee of support for strange memory hole situations.
+
+Multi-Media Devices
+-------------------
+The CFMWS field of the CEDT has special restriction bits which describe whether
+the described memory region allows volatile or persistent memory (or both). If
+the platform intends to support either:
+
+1) A device with multiple medias, or
+2) Using a persistent memory device as normal memory
+
+A platform may wish to create multiple CEDT CFMWS entries to describe the same
+memory, with the intent of allowing the end user flexibility in how that memory
+is configured. Linux does not presently have strong requirements in this area.
diff --git a/Documentation/driver-api/cxl/platform/cdat.rst b/Documentation/driver-api/cxl/platform/cdat.rst
new file mode 100644
index 000000000000..34bbe7264d71
--- /dev/null
+++ b/Documentation/driver-api/cxl/platform/cdat.rst
@@ -0,0 +1,118 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================================
+Coherent Device Attribute Table (CDAT)
+======================================
+
+The CDAT provides functional and performance attributes of devices such
+as CXL accelerators, switches, or endpoints. The table formatting is
+similar to ACPI tables. CDAT data may be parsed by BIOS at boot or may
+be enumerated at runtime (after device hotplug, for example).
+
+Terminology:
+DPA - Device Physical Address, used by the CXL device to denote the address
+it supports for that device.
+
+DSMADHandle - A device unique handle that is associated with a DPA range
+defined by the DSMAS table.
+
+
+===============================================
+Device Scoped Memory Affinity Structure (DSMAS)
+===============================================
+
+The DSMAS contains information such as DSMADHandle, the DPA Base, and DPA
+Length.
+
+This table is used by Linux in conjunction with the Device Scoped Latency and
+Bandwidth Information Structure (DSLBIS) to determine the performance
+attributes of the CXL device itself.
+
+Example ::
+
+ Structure Type : 00 [DSMAS]
+ Reserved : 00
+ Length : 0018 <- 24d, size of structure
+ DSMADHandle : 01
+ Flags : 00
+ Reserved : 0000
+ DPA Base : 0000000040000000 <- 1GiB base
+ DPA Length : 0000000080000000 <- 2GiB size
+
+
+==================================================================
+Device Scoped Latency and Bandwidth Information Structure (DSLBIS)
+==================================================================
+
+This table is used by Linux in conjunction with DSMAS to determine the
+performance attributes of a CXL device. The DSLBIS contains latency
+and bandwidth information based on DSMADHandle matching.
+
+Example ::
+
+ Structure Type : 01 [DSLBIS]
+ Reserved : 00
+ Length : 18 <- 24d, size of structure
+ Handle : 0001 <- DSMAS handle
+ Flags : 00 <- Matches flag field for HMAT SLLBIS
+ Data Type : 00 <- Latency
+ Entry Basee Unit : 0000000000001000 <- Entry Base Unit field in HMAT SSLBIS
+ Entry : 010000000000 <- First byte used here, CXL LTC
+ Reserved : 0000
+
+ Structure Type : 01 [DSLBIS]
+ Reserved : 00
+ Length : 18 <- 24d, size of structure
+ Handle : 0001 <- DSMAS handle
+ Flags : 00 <- Matches flag field for HMAT SLLBIS
+ Data Type : 03 <- Bandwidth
+ Entry Basee Unit : 0000000000001000 <- Entry Base Unit field in HMAT SSLBIS
+ Entry : 020000000000 <- First byte used here, CXL BW
+ Reserved : 0000
+
+
+==================================================================
+Switch Scoped Latency and Bandwidth Information Structure (SSLBIS)
+==================================================================
+
+The SSLBIS contains information about the latency and bandwidth of a switch.
+
+The table is used by Linux to compute the performance coordinates of a CXL path
+from the device to the root port where a switch is part of the path.
+
+Example ::
+
+ Structure Type : 05 [SSLBIS]
+ Reserved : 00
+ Length : 20 <- 32d, length of record, including SSLB entries
+ Data Type : 00 <- Latency
+ Reserved : 000000
+ Entry Base Unit : 00000000000000001000 <- Matches Entry Base Unit in HMAT SSLBIS
+
+ <- SSLB Entry 0
+ Port X ID : 0100 <- First port, 0100h represents an upstream port
+ Port Y ID : 0000 <- Second port, downstream port 0
+ Latency : 0100 <- Port latency
+ Reserved : 0000
+ <- SSLB Entry 1
+ Port X ID : 0100
+ Port Y ID : 0001
+ Latency : 0100
+ Reserved : 0000
+
+
+ Structure Type : 05 [SSLBIS]
+ Reserved : 00
+ Length : 18 <- 24d, length of record, including SSLB entry
+ Data Type : 03 <- Bandwidth
+ Reserved : 000000
+ Entry Base Unit : 00000000000000001000 <- Matches Entry Base Unit in HMAT SSLBIS
+
+ <- SSLB Entry 0
+ Port X ID : 0100 <- First port, 0100h represents an upstream port
+ Port Y ID : FFFF <- Second port, FFFFh indicates any port
+ Bandwidth : 1200 <- Port bandwidth
+ Reserved : 0000
+
+The CXL driver uses a combination of CDAT, HMAT, SRAT, and other data to
+generate "whole path performance" data for a CXL device.
diff --git a/Documentation/driver-api/cxl/platform/example-configs.rst b/Documentation/driver-api/cxl/platform/example-configs.rst
new file mode 100644
index 000000000000..90a10d7473c6
--- /dev/null
+++ b/Documentation/driver-api/cxl/platform/example-configs.rst
@@ -0,0 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Example Platform Configurations
+###############################
+
+.. toctree::
+ :maxdepth: 1
+ :caption: Contents
+
+ example-configurations/one-dev-per-hb.rst
+ example-configurations/multi-dev-per-hb.rst
+ example-configurations/hb-interleave.rst
+ example-configurations/flexible.rst
diff --git a/Documentation/driver-api/cxl/platform/example-configurations/flexible.rst b/Documentation/driver-api/cxl/platform/example-configurations/flexible.rst
new file mode 100644
index 000000000000..dab704b6fcc2
--- /dev/null
+++ b/Documentation/driver-api/cxl/platform/example-configurations/flexible.rst
@@ -0,0 +1,296 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+Flexible Presentation
+=====================
+This system has a single socket with two CXL host bridges. Each host bridge
+has two CXL memory expanders with a 4GB of memory (32GB total).
+
+On this system, the platform designer wanted to provide the user flexibility
+to configure the memory devices in various interleave or NUMA node
+configurations. So they provided every combination.
+
+Things to note:
+
+* Cross-Bridge interleave is described in one CFMWS that covers all capacity.
+* One CFMWS is also described per-host bridge.
+* One CFMWS is also described per-device.
+* This SRAT describes one node for each of the above CFMWS.
+* The HMAT describes performance for each node in the SRAT.
+
+:doc:`CEDT <../acpi/cedt>`::
+
+ Subtable Type : 00 [CXL Host Bridge Structure]
+ Reserved : 00
+ Length : 0020
+ Associated host bridge : 00000007
+ Specification version : 00000001
+ Reserved : 00000000
+ Register base : 0000010370400000
+ Register length : 0000000000010000
+
+ Subtable Type : 00 [CXL Host Bridge Structure]
+ Reserved : 00
+ Length : 0020
+ Associated host bridge : 00000006
+ Specification version : 00000001
+ Reserved : 00000000
+ Register base : 0000010380800000
+ Register length : 0000000000010000
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Reserved : 00
+ Length : 002C
+ Reserved : 00000000
+ Window base address : 0000001000000000
+ Window size : 0000000400000000
+ Interleave Members (2^n) : 01
+ Interleave Arithmetic : 00
+ Reserved : 0000
+ Granularity : 00000000
+ Restrictions : 0006
+ QtgId : 0001
+ First Target : 00000007
+ Second Target : 00000006
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Reserved : 00
+ Length : 002C
+ Reserved : 00000000
+ Window base address : 0000002000000000
+ Window size : 0000000200000000
+ Interleave Members (2^n) : 00
+ Interleave Arithmetic : 00
+ Reserved : 0000
+ Granularity : 00000000
+ Restrictions : 0006
+ QtgId : 0001
+ First Target : 00000007
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Reserved : 00
+ Length : 002C
+ Reserved : 00000000
+ Window base address : 0000002200000000
+ Window size : 0000000200000000
+ Interleave Members (2^n) : 00
+ Interleave Arithmetic : 00
+ Reserved : 0000
+ Granularity : 00000000
+ Restrictions : 0006
+ QtgId : 0001
+ First Target : 00000006
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Reserved : 00
+ Length : 002C
+ Reserved : 00000000
+ Window base address : 0000003000000000
+ Window size : 0000000100000000
+ Interleave Members (2^n) : 00
+ Interleave Arithmetic : 00
+ Reserved : 0000
+ Granularity : 00000000
+ Restrictions : 0006
+ QtgId : 0001
+ First Target : 00000007
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Reserved : 00
+ Length : 002C
+ Reserved : 00000000
+ Window base address : 0000003100000000
+ Window size : 0000000100000000
+ Interleave Members (2^n) : 00
+ Interleave Arithmetic : 00
+ Reserved : 0000
+ Granularity : 00000000
+ Restrictions : 0006
+ QtgId : 0001
+ First Target : 00000007
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Reserved : 00
+ Length : 002C
+ Reserved : 00000000
+ Window base address : 0000003200000000
+ Window size : 0000000100000000
+ Interleave Members (2^n) : 00
+ Interleave Arithmetic : 00
+ Reserved : 0000
+ Granularity : 00000000
+ Restrictions : 0006
+ QtgId : 0001
+ First Target : 00000006
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Reserved : 00
+ Length : 002C
+ Reserved : 00000000
+ Window base address : 0000003300000000
+ Window size : 0000000100000000
+ Interleave Members (2^n) : 00
+ Interleave Arithmetic : 00
+ Reserved : 0000
+ Granularity : 00000000
+ Restrictions : 0006
+ QtgId : 0001
+ First Target : 00000006
+
+:doc:`SRAT <../acpi/srat>`::
+
+ Subtable Type : 01 [Memory Affinity]
+ Length : 28
+ Proximity Domain : 00000001
+ Reserved1 : 0000
+ Base Address : 0000001000000000
+ Address Length : 0000000400000000
+ Reserved2 : 00000000
+ Flags (decoded below) : 0000000B
+ Enabled : 1
+ Hot Pluggable : 1
+ Non-Volatile : 0
+
+ Subtable Type : 01 [Memory Affinity]
+ Length : 28
+ Proximity Domain : 00000002
+ Reserved1 : 0000
+ Base Address : 0000002000000000
+ Address Length : 0000000200000000
+ Reserved2 : 00000000
+ Flags (decoded below) : 0000000B
+ Enabled : 1
+ Hot Pluggable : 1
+ Non-Volatile : 0
+
+ Subtable Type : 01 [Memory Affinity]
+ Length : 28
+ Proximity Domain : 00000003
+ Reserved1 : 0000
+ Base Address : 0000002200000000
+ Address Length : 0000000200000000
+ Reserved2 : 00000000
+ Flags (decoded below) : 0000000B
+ Enabled : 1
+ Hot Pluggable : 1
+ Non-Volatile : 0
+
+ Subtable Type : 01 [Memory Affinity]
+ Length : 28
+ Proximity Domain : 00000004
+ Reserved1 : 0000
+ Base Address : 0000003000000000
+ Address Length : 0000000100000000
+ Reserved2 : 00000000
+ Flags (decoded below) : 0000000B
+ Enabled : 1
+ Hot Pluggable : 1
+ Non-Volatile : 0
+
+ Subtable Type : 01 [Memory Affinity]
+ Length : 28
+ Proximity Domain : 00000005
+ Reserved1 : 0000
+ Base Address : 0000003100000000
+ Address Length : 0000000100000000
+ Reserved2 : 00000000
+ Flags (decoded below) : 0000000B
+ Enabled : 1
+ Hot Pluggable : 1
+ Non-Volatile : 0
+
+ Subtable Type : 01 [Memory Affinity]
+ Length : 28
+ Proximity Domain : 00000006
+ Reserved1 : 0000
+ Base Address : 0000003200000000
+ Address Length : 0000000100000000
+ Reserved2 : 00000000
+ Flags (decoded below) : 0000000B
+ Enabled : 1
+ Hot Pluggable : 1
+ Non-Volatile : 0
+
+ Subtable Type : 01 [Memory Affinity]
+ Length : 28
+ Proximity Domain : 00000007
+ Reserved1 : 0000
+ Base Address : 0000003300000000
+ Address Length : 0000000100000000
+ Reserved2 : 00000000
+ Flags (decoded below) : 0000000B
+ Enabled : 1
+ Hot Pluggable : 1
+ Non-Volatile : 0
+
+:doc:`HMAT <../acpi/hmat>`::
+
+ Structure Type : 0001 [SLLBI]
+ Data Type : 00 [Latency]
+ Target Proximity Domain List : 00000000
+ Target Proximity Domain List : 00000001
+ Target Proximity Domain List : 00000002
+ Target Proximity Domain List : 00000003
+ Target Proximity Domain List : 00000004
+ Target Proximity Domain List : 00000005
+ Target Proximity Domain List : 00000006
+ Target Proximity Domain List : 00000007
+ Entry : 0080
+ Entry : 0100
+ Entry : 0100
+ Entry : 0100
+ Entry : 0100
+ Entry : 0100
+ Entry : 0100
+ Entry : 0100
+
+ Structure Type : 0001 [SLLBI]
+ Data Type : 03 [Bandwidth]
+ Target Proximity Domain List : 00000000
+ Target Proximity Domain List : 00000001
+ Target Proximity Domain List : 00000002
+ Target Proximity Domain List : 00000003
+ Target Proximity Domain List : 00000004
+ Target Proximity Domain List : 00000005
+ Target Proximity Domain List : 00000006
+ Target Proximity Domain List : 00000007
+ Entry : 1200
+ Entry : 0400
+ Entry : 0200
+ Entry : 0200
+ Entry : 0100
+ Entry : 0100
+ Entry : 0100
+ Entry : 0100
+
+:doc:`SLIT <../acpi/slit>`::
+
+ Signature : "SLIT" [System Locality Information Table]
+ Localities : 0000000000000003
+ Locality 0 : 10 20 20 20 20 20 20 20
+ Locality 1 : FF 0A FF FF FF FF FF FF
+ Locality 2 : FF FF 0A FF FF FF FF FF
+ Locality 3 : FF FF FF 0A FF FF FF FF
+ Locality 4 : FF FF FF FF 0A FF FF FF
+ Locality 5 : FF FF FF FF FF 0A FF FF
+ Locality 6 : FF FF FF FF FF FF 0A FF
+ Locality 7 : FF FF FF FF FF FF FF 0A
+
+:doc:`DSDT <../acpi/dsdt>`::
+
+ Scope (_SB)
+ {
+ Device (S0D0)
+ {
+ Name (_HID, "ACPI0016" /* Compute Express Link Host Bridge */) // _HID: Hardware ID
+ ...
+ Name (_UID, 0x07) // _UID: Unique ID
+ }
+ ...
+ Device (S0D5)
+ {
+ Name (_HID, "ACPI0016" /* Compute Express Link Host Bridge */) // _HID: Hardware ID
+ ...
+ Name (_UID, 0x06) // _UID: Unique ID
+ }
+ }
diff --git a/Documentation/driver-api/cxl/platform/example-configurations/hb-interleave.rst b/Documentation/driver-api/cxl/platform/example-configurations/hb-interleave.rst
new file mode 100644
index 000000000000..c474dcf09fb0
--- /dev/null
+++ b/Documentation/driver-api/cxl/platform/example-configurations/hb-interleave.rst
@@ -0,0 +1,107 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================
+Cross-Host-Bridge Interleave
+============================
+This system has a single socket with two CXL host bridges. Each host bridge
+has a single CXL memory expander with a 4GB of memory.
+
+Things to note:
+
+* Cross-Bridge interleave is described.
+* The expanders are described by a single CFMWS.
+* This SRAT describes one node for both host bridges.
+* The HMAT describes a single node's performance.
+
+:doc:`CEDT <../acpi/cedt>`::
+
+ Subtable Type : 00 [CXL Host Bridge Structure]
+ Reserved : 00
+ Length : 0020
+ Associated host bridge : 00000007
+ Specification version : 00000001
+ Reserved : 00000000
+ Register base : 0000010370400000
+ Register length : 0000000000010000
+
+ Subtable Type : 00 [CXL Host Bridge Structure]
+ Reserved : 00
+ Length : 0020
+ Associated host bridge : 00000006
+ Specification version : 00000001
+ Reserved : 00000000
+ Register base : 0000010380800000
+ Register length : 0000000000010000
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Reserved : 00
+ Length : 002C
+ Reserved : 00000000
+ Window base address : 0000001000000000
+ Window size : 0000000200000000
+ Interleave Members (2^n) : 01
+ Interleave Arithmetic : 00
+ Reserved : 0000
+ Granularity : 00000000
+ Restrictions : 0006
+ QtgId : 0001
+ First Target : 00000007
+ Second Target : 00000006
+
+:doc:`SRAT <../acpi/srat>`::
+
+ Subtable Type : 01 [Memory Affinity]
+ Length : 28
+ Proximity Domain : 00000001
+ Reserved1 : 0000
+ Base Address : 0000001000000000
+ Address Length : 0000000200000000
+ Reserved2 : 00000000
+ Flags (decoded below) : 0000000B
+ Enabled : 1
+ Hot Pluggable : 1
+ Non-Volatile : 0
+
+:doc:`HMAT <../acpi/hmat>`::
+
+ Structure Type : 0001 [SLLBI]
+ Data Type : 00 [Latency]
+ Target Proximity Domain List : 00000000
+ Target Proximity Domain List : 00000001
+ Target Proximity Domain List : 00000002
+ Entry : 0080
+ Entry : 0100
+
+ Structure Type : 0001 [SLLBI]
+ Data Type : 03 [Bandwidth]
+ Target Proximity Domain List : 00000000
+ Target Proximity Domain List : 00000001
+ Target Proximity Domain List : 00000002
+ Entry : 1200
+ Entry : 0400
+
+:doc:`SLIT <../acpi/slit>`::
+
+ Signature : "SLIT" [System Locality Information Table]
+ Localities : 0000000000000003
+ Locality 0 : 10 20
+ Locality 1 : FF 0A
+
+:doc:`DSDT <../acpi/dsdt>`::
+
+ Scope (_SB)
+ {
+ Device (S0D0)
+ {
+ Name (_HID, "ACPI0016" /* Compute Express Link Host Bridge */) // _HID: Hardware ID
+ ...
+ Name (_UID, 0x07) // _UID: Unique ID
+ }
+ ...
+ Device (S0D5)
+ {
+ Name (_HID, "ACPI0016" /* Compute Express Link Host Bridge */) // _HID: Hardware ID
+ ...
+ Name (_UID, 0x06) // _UID: Unique ID
+ }
+ }
diff --git a/Documentation/driver-api/cxl/platform/example-configurations/multi-dev-per-hb.rst b/Documentation/driver-api/cxl/platform/example-configurations/multi-dev-per-hb.rst
new file mode 100644
index 000000000000..a7854a79dbbd
--- /dev/null
+++ b/Documentation/driver-api/cxl/platform/example-configurations/multi-dev-per-hb.rst
@@ -0,0 +1,90 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+================================
+Multiple Devices per Host Bridge
+================================
+
+In this example system we will have a single socket and one CXL host bridge.
+There are two CXL memory expanders with 4GB attached to the host bridge.
+
+Things to note:
+
+* Intra-Bridge interleave is not described here.
+* The expanders are described by a single CEDT/CFMWS.
+* This CEDT/SRAT describes one node for both devices.
+* There is only one proximity domain the HMAT for both devices.
+
+:doc:`CEDT <../acpi/cedt>`::
+
+ Subtable Type : 00 [CXL Host Bridge Structure]
+ Reserved : 00
+ Length : 0020
+ Associated host bridge : 00000007
+ Specification version : 00000001
+ Reserved : 00000000
+ Register base : 0000010370400000
+ Register length : 0000000000010000
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Reserved : 00
+ Length : 002C
+ Reserved : 00000000
+ Window base address : 0000001000000000
+ Window size : 0000000200000000
+ Interleave Members (2^n) : 00
+ Interleave Arithmetic : 00
+ Reserved : 0000
+ Granularity : 00000000
+ Restrictions : 0006
+ QtgId : 0001
+ First Target : 00000007
+
+:doc:`SRAT <../acpi/srat>`::
+
+ Subtable Type : 01 [Memory Affinity]
+ Length : 28
+ Proximity Domain : 00000001
+ Reserved1 : 0000
+ Base Address : 0000001000000000
+ Address Length : 0000000200000000
+ Reserved2 : 00000000
+ Flags (decoded below) : 0000000B
+ Enabled : 1
+ Hot Pluggable : 1
+ Non-Volatile : 0
+
+:doc:`HMAT <../acpi/hmat>`::
+
+ Structure Type : 0001 [SLLBI]
+ Data Type : 00 [Latency]
+ Target Proximity Domain List : 00000000
+ Target Proximity Domain List : 00000001
+ Entry : 0080
+ Entry : 0100
+
+ Structure Type : 0001 [SLLBI]
+ Data Type : 03 [Bandwidth]
+ Target Proximity Domain List : 00000000
+ Target Proximity Domain List : 00000001
+ Entry : 1200
+ Entry : 0200
+
+:doc:`SLIT <../acpi/slit>`::
+
+ Signature : "SLIT" [System Locality Information Table]
+ Localities : 0000000000000003
+ Locality 0 : 10 20
+ Locality 1 : FF 0A
+
+:doc:`DSDT <../acpi/dsdt>`::
+
+ Scope (_SB)
+ {
+ Device (S0D0)
+ {
+ Name (_HID, "ACPI0016" /* Compute Express Link Host Bridge */) // _HID: Hardware ID
+ ...
+ Name (_UID, 0x07) // _UID: Unique ID
+ }
+ ...
+ }
diff --git a/Documentation/driver-api/cxl/platform/example-configurations/one-dev-per-hb.rst b/Documentation/driver-api/cxl/platform/example-configurations/one-dev-per-hb.rst
new file mode 100644
index 000000000000..aebda0eb3e17
--- /dev/null
+++ b/Documentation/driver-api/cxl/platform/example-configurations/one-dev-per-hb.rst
@@ -0,0 +1,136 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+One Device per Host Bridge
+==========================
+
+This system has a single socket with two CXL host bridges. Each host bridge
+has a single CXL memory expander with a 4GB of memory.
+
+Things to note:
+
+* Cross-Bridge interleave is not being used.
+* The expanders are in two separate but adjascent memory regions.
+* This CEDT/SRAT describes one node per device
+* The expanders have the same performance and will be in the same memory tier.
+
+:doc:`CEDT <../acpi/cedt>`::
+
+ Subtable Type : 00 [CXL Host Bridge Structure]
+ Reserved : 00
+ Length : 0020
+ Associated host bridge : 00000007
+ Specification version : 00000001
+ Reserved : 00000000
+ Register base : 0000010370400000
+ Register length : 0000000000010000
+
+ Subtable Type : 00 [CXL Host Bridge Structure]
+ Reserved : 00
+ Length : 0020
+ Associated host bridge : 00000006
+ Specification version : 00000001
+ Reserved : 00000000
+ Register base : 0000010380800000
+ Register length : 0000000000010000
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Reserved : 00
+ Length : 002C
+ Reserved : 00000000
+ Window base address : 0000001000000000
+ Window size : 0000000100000000
+ Interleave Members (2^n) : 00
+ Interleave Arithmetic : 00
+ Reserved : 0000
+ Granularity : 00000000
+ Restrictions : 0006
+ QtgId : 0001
+ First Target : 00000007
+
+ Subtable Type : 01 [CXL Fixed Memory Window Structure]
+ Reserved : 00
+ Length : 002C
+ Reserved : 00000000
+ Window base address : 0000001100000000
+ Window size : 0000000100000000
+ Interleave Members (2^n) : 00
+ Interleave Arithmetic : 00
+ Reserved : 0000
+ Granularity : 00000000
+ Restrictions : 0006
+ QtgId : 0001
+ First Target : 00000006
+
+:doc:`SRAT <../acpi/srat>`::
+
+ Subtable Type : 01 [Memory Affinity]
+ Length : 28
+ Proximity Domain : 00000001
+ Reserved1 : 0000
+ Base Address : 0000001000000000
+ Address Length : 0000000100000000
+ Reserved2 : 00000000
+ Flags (decoded below) : 0000000B
+ Enabled : 1
+ Hot Pluggable : 1
+ Non-Volatile : 0
+
+ Subtable Type : 01 [Memory Affinity]
+ Length : 28
+ Proximity Domain : 00000002
+ Reserved1 : 0000
+ Base Address : 0000001100000000
+ Address Length : 0000000100000000
+ Reserved2 : 00000000
+ Flags (decoded below) : 0000000B
+ Enabled : 1
+ Hot Pluggable : 1
+ Non-Volatile : 0
+
+:doc:`HMAT <../acpi/hmat>`::
+
+ Structure Type : 0001 [SLLBI]
+ Data Type : 00 [Latency]
+ Target Proximity Domain List : 00000000
+ Target Proximity Domain List : 00000001
+ Target Proximity Domain List : 00000002
+ Entry : 0080
+ Entry : 0100
+ Entry : 0100
+
+ Structure Type : 0001 [SLLBI]
+ Data Type : 03 [Bandwidth]
+ Target Proximity Domain List : 00000000
+ Target Proximity Domain List : 00000001
+ Target Proximity Domain List : 00000002
+ Entry : 1200
+ Entry : 0200
+ Entry : 0200
+
+:doc:`SLIT <../acpi/slit>`::
+
+ Signature : "SLIT" [System Locality Information Table]
+ Localities : 0000000000000003
+ Locality 0 : 10 20 20
+ Locality 1 : FF 0A FF
+ Locality 2 : FF FF 0A
+
+:doc:`DSDT <../acpi/dsdt>`::
+
+ Scope (_SB)
+ {
+ Device (S0D0)
+ {
+ Name (_HID, "ACPI0016" /* Compute Express Link Host Bridge */) // _HID: Hardware ID
+ ...
+ Name (_UID, 0x07) // _UID: Unique ID
+ }
+ ...
+ Device (S0D5)
+ {
+ Name (_HID, "ACPI0016" /* Compute Express Link Host Bridge */) // _HID: Hardware ID
+ ...
+ Name (_UID, 0x06) // _UID: Unique ID
+ }
+ }
diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/theory-of-operation.rst
index d732c42526df..40793dad3630 100644
--- a/Documentation/driver-api/cxl/memory-devices.rst
+++ b/Documentation/driver-api/cxl/theory-of-operation.rst
@@ -1,9 +1,9 @@
.. SPDX-License-Identifier: GPL-2.0
.. include:: <isonum.txt>
-===================================
-Compute Express Link Memory Devices
-===================================
+===============================================
+Compute Express Link Driver Theory of Operation
+===============================================
A Compute Express Link Memory Device is a CXL component that implements the
CXL.mem protocol. It contains some amount of volatile memory, persistent memory,
@@ -14,8 +14,8 @@ that optionally define a device's contribution to an interleaved address
range across multiple devices underneath a host-bridge or interleaved
across host-bridges.
-CXL Bus: Theory of Operation
-============================
+The CXL Bus
+===========
Similar to how a RAID driver takes disk objects and assembles them into a new
logical device, the CXL subsystem is tasked to take PCIe and ACPI objects and
assemble them into a CXL.mem decode topology. The need for runtime configuration
@@ -347,6 +347,9 @@ CXL Core
.. kernel-doc:: drivers/cxl/cxl.h
:internal:
+.. kernel-doc:: drivers/cxl/acpi.c
+ :identifiers: add_cxl_resources
+
.. kernel-doc:: drivers/cxl/core/hdm.c
:doc: cxl core hdm
@@ -371,12 +374,26 @@ CXL Core
.. kernel-doc:: drivers/cxl/core/pmem.c
:doc: cxl pmem
+.. kernel-doc:: drivers/cxl/core/pmem.c
+ :identifiers:
+
.. kernel-doc:: drivers/cxl/core/regs.c
:doc: cxl registers
+.. kernel-doc:: drivers/cxl/core/regs.c
+ :identifiers:
+
.. kernel-doc:: drivers/cxl/core/mbox.c
:doc: cxl mbox
+.. kernel-doc:: drivers/cxl/core/mbox.c
+ :identifiers:
+
+.. kernel-doc:: drivers/cxl/core/features.c
+ :doc: cxl features
+
+See :c:func:`devm_cxl_setup_features` for API details.
+
CXL Regions
-----------
.. kernel-doc:: drivers/cxl/core/region.c
diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst
index 3085f8b460fa..8f0910668ca3 100644
--- a/Documentation/driver-api/dmaengine/provider.rst
+++ b/Documentation/driver-api/dmaengine/provider.rst
@@ -217,10 +217,12 @@ Currently, the types available are:
- DMA_ASYNC_TX
- - Must not be set by the device, and will be set by the framework
- if needed
+ - The device supports asynchronous memory-to-memory operations,
+ including memcpy, memset, xor, pq, xor_val, and pq_val.
- - TODO: What is it about?
+ - This capability is automatically set by the DMA engine
+ framework and must not be configured manually by device
+ drivers.
- DMA_SLAVE
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index d75728eb05f8..3d56f94ac2ee 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -391,12 +391,11 @@ PCI
devm_pci_remap_cfgspace() : ioremap PCI configuration space
devm_pci_remap_cfg_resource() : ioremap PCI configuration space resource
- pcim_enable_device() : after success, some PCI ops become managed
+ pcim_enable_device() : after success, the PCI device gets disabled automatically on driver detach
pcim_iomap() : do iomap() on a single BAR
pcim_iomap_regions() : do request_region() and iomap() on multiple BARs
pcim_iomap_table() : array of mapped addresses indexed by BAR
pcim_iounmap() : do iounmap() on a single BAR
- pcim_iounmap_regions() : do iounmap() and release_region() on multiple BARs
pcim_pin_device() : keep PCI device enabled after release
pcim_set_mwi() : enable Memory-Write-Invalidate PCI transaction
diff --git a/Documentation/driver-api/early-userspace/buffer-format.rst b/Documentation/driver-api/early-userspace/buffer-format.rst
index 7f74e301fdf3..726bfa2fe70d 100644
--- a/Documentation/driver-api/early-userspace/buffer-format.rst
+++ b/Documentation/driver-api/early-userspace/buffer-format.rst
@@ -4,20 +4,18 @@ initramfs buffer format
Al Viro, H. Peter Anvin
-Last revision: 2002-01-13
-
-Starting with kernel 2.5.x, the old "initial ramdisk" protocol is
-getting {replaced/complemented} with the new "initial ramfs"
-(initramfs) protocol. The initramfs contents is passed using the same
-memory buffer protocol used by the initrd protocol, but the contents
+With kernel 2.5.x, the old "initial ramdisk" protocol was complemented
+with an "initial ramfs" protocol. The initramfs content is passed
+using the same memory buffer protocol used by initrd, but the content
is different. The initramfs buffer contains an archive which is
-expanded into a ramfs filesystem; this document details the format of
-the initramfs buffer format.
+expanded into a ramfs filesystem; this document details the initramfs
+buffer format.
The initramfs buffer format is based around the "newc" or "crc" CPIO
formats, and can be created with the cpio(1) utility. The cpio
-archive can be compressed using gzip(1). One valid version of an
-initramfs buffer is thus a single .cpio.gz file.
+archive can be compressed using gzip(1), or any other algorithm provided
+via CONFIG_DECOMPRESS_*. One valid version of an initramfs buffer is
+thus a single .cpio.gz file.
The full format of the initramfs buffer is defined by the following
grammar, where::
@@ -25,12 +23,20 @@ grammar, where::
* is used to indicate "0 or more occurrences of"
(|) indicates alternatives
+ indicates concatenation
- GZIP() indicates the gzip(1) of the operand
+ GZIP() indicates gzip compression of the operand
+ BZIP2() indicates bzip2 compression of the operand
+ LZMA() indicates lzma compression of the operand
+ XZ() indicates xz compression of the operand
+ LZO() indicates lzo compression of the operand
+ LZ4() indicates lz4 compression of the operand
+ ZSTD() indicates zstd compression of the operand
ALGN(n) means padding with null bytes to an n-byte boundary
- initramfs := ("\0" | cpio_archive | cpio_gzip_archive)*
+ initramfs := ("\0" | cpio_archive | cpio_compressed_archive)*
- cpio_gzip_archive := GZIP(cpio_archive)
+ cpio_compressed_archive := (GZIP(cpio_archive) | BZIP2(cpio_archive)
+ | LZMA(cpio_archive) | XZ(cpio_archive) | LZO(cpio_archive)
+ | LZ4(cpio_archive) | ZSTD(cpio_archive))
cpio_archive := cpio_file* + (<nothing> | cpio_trailer)
@@ -75,6 +81,8 @@ c_chksum 8 bytes Checksum of data field if c_magic is 070702;
The c_mode field matches the contents of st_mode returned by stat(2)
on Linux, and encodes the file type and file permissions.
+c_mtime is ignored unless CONFIG_INITRAMFS_PRESERVE_MTIME=y is set.
+
The c_filesize should be zero for any file which is not a regular file
or symlink.
diff --git a/Documentation/driver-api/gpio/index.rst b/Documentation/driver-api/gpio/index.rst
index 34b57cee3391..43f6a3afe10b 100644
--- a/Documentation/driver-api/gpio/index.rst
+++ b/Documentation/driver-api/gpio/index.rst
@@ -27,7 +27,7 @@ Core
ACPI support
============
-.. kernel-doc:: drivers/gpio/gpiolib-acpi.c
+.. kernel-doc:: drivers/gpio/gpiolib-acpi-core.c
:export:
Device tree support
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 16e2c4ec3c01..3e2a270bd828 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -81,6 +81,7 @@ Subsystem-specific APIs
acpi/index
backlight/lp855x-driver.rst
clk
+ coco/index
console
crypto/index
dmaengine/index
diff --git a/Documentation/driver-api/ipmi.rst b/Documentation/driver-api/ipmi.rst
index dfa021eacd63..2cc6c898ab90 100644
--- a/Documentation/driver-api/ipmi.rst
+++ b/Documentation/driver-api/ipmi.rst
@@ -45,7 +45,7 @@ manual), choose the 'IPMI SI handler' option. A driver also exists
for direct I2C access to the IPMI management controller. Some boards
support this, but it is unknown if it will work on every board. For
this, choose 'IPMI SMBus handler', but be ready to try to do some
-figuring to see if it will work on your system if the SMBIOS/APCI
+figuring to see if it will work on your system if the SMBIOS/ACPI
information is wrong or not present. It is fairly safe to have both
these enabled and let the drivers auto-detect what is present.
@@ -63,7 +63,7 @@ situation, you need to read the section below named 'The SI Driver' or
IPMI defines a standard watchdog timer. You can enable this with the
'IPMI Watchdog Timer' config option. If you compile the driver into
the kernel, then via a kernel command-line option you can have the
-watchdog timer start as soon as it initializes. It also have a lot
+watchdog timer start as soon as it initializes. It also has a lot
of other options, see the 'Watchdog' section below for more details.
Note that you can also have the watchdog continue to run if it is
closed (by default it is disabled on close). Go into the 'Watchdog
@@ -280,10 +280,8 @@ Creating the User
To use the message handler, you must first create a user using
ipmi_create_user. The interface number specifies which SMI you want
to connect to, and you must supply callback functions to be called
-when data comes in. The callback function can run at interrupt level,
-so be careful using the callbacks. This also allows to you pass in a
-piece of data, the handler_data, that will be passed back to you on
-all calls.
+when data comes in. This also allows to you pass in a piece of data,
+the handler_data, that will be passed back to you on all calls.
Once you are done, call ipmi_destroy_user() to get rid of the user.
@@ -303,8 +301,7 @@ use it for anything you like.
Responses come back in the function pointed to by the ipmi_recv_hndl
field of the "handler" that you passed in to ipmi_create_user().
-Remember again, these may be running at interrupt level. Remember to
-look at the receive type, too.
+Remember to look at the receive type, too.
From userland, you fill out an ipmi_req_t structure and use the
IPMICTL_SEND_COMMAND ioctl. For incoming stuff, you can use select()
@@ -317,13 +314,13 @@ This gives the receiver a place to actually put the message.
If the message cannot fit into the data you provide, you will get an
EMSGSIZE error and the driver will leave the data in the receive
-queue. If you want to get it and have it truncate the message, us
+queue. If you want to get it and have it truncate the message, use
the IPMICTL_RECEIVE_MSG_TRUNC ioctl.
When you send a command (which is defined by the lowest-order bit of
the netfn per the IPMI spec) on the IPMB bus, the driver will
automatically assign the sequence number to the command and save the
-command. If the response is not receive in the IPMI-specified 5
+command. If the response is not received in the IPMI-specified 5
seconds, it will generate a response automatically saying the command
timed out. If an unsolicited response comes in (if it was after 5
seconds, for instance), that response will be ignored.
@@ -367,7 +364,7 @@ channel bitmasks do not overlap.
To respond to a received command, set the response bit in the returned
netfn, use the address from the received message, and use the same
-msgid that you got in the receive message.
+msgid that you got in the received message.
From userland, equivalent IOCTLs are provided to do these functions.
@@ -440,7 +437,7 @@ register would be 0xca6. This defaults to 1.
The regsizes parameter gives the size of a register, in bytes. The
data used by IPMI is 8-bits wide, but it may be inside a larger
-register. This parameter allows the read and write type to specified.
+register. This parameter allows the read and write type to be specified.
It may be 1, 2, 4, or 8. The default is 1.
Since the register size may be larger than 32 bits, the IPMI data may not
@@ -481,8 +478,8 @@ If your IPMI interface does not support interrupts and is a KCS or
SMIC interface, the IPMI driver will start a kernel thread for the
interface to help speed things up. This is a low-priority kernel
thread that constantly polls the IPMI driver while an IPMI operation
-is in progress. The force_kipmid module parameter will all the user to
-force this thread on or off. If you force it off and don't have
+is in progress. The force_kipmid module parameter will allow the user
+to force this thread on or off. If you force it off and don't have
interrupts, the driver will run VERY slowly. Don't blame me,
these interfaces suck.
@@ -583,7 +580,7 @@ kernel command line as::
These are the same options as on the module command line.
The I2C driver does not support non-blocking access or polling, so
-this driver cannod to IPMI panic events, extend the watchdog at panic
+this driver cannot do IPMI panic events, extend the watchdog at panic
time, or other panic-related IPMI functions without special kernel
patches and driver modifications. You can get those at the openipmi
web page.
@@ -610,7 +607,7 @@ Parameters are::
ipmi_ipmb.retry_time_ms=<Time between retries on IPMB>
ipmi_ipmb.max_retries=<Number of times to retry a message>
-Loading the module will not result in the driver automatcially
+Loading the module will not result in the driver automatically
starting unless there is device tree information setting it up. If
you want to instantiate one of these by hand, do::
diff --git a/Documentation/driver-api/ntb.rst b/Documentation/driver-api/ntb.rst
index e991d92b8b1d..a49c41383779 100644
--- a/Documentation/driver-api/ntb.rst
+++ b/Documentation/driver-api/ntb.rst
@@ -35,7 +35,7 @@ anyone who has written a pci driver.
NTB Typical client driver implementation
----------------------------------------
-Primary purpose of NTB is to share some peace of memory between at least two
+Primary purpose of NTB is to share some piece of memory between at least two
systems. So the NTB device features like Scratchpad/Message registers are
mainly used to perform the proper memory window initialization. Typically
there are two types of memory window interfaces supported by the NTB API:
diff --git a/Documentation/driver-api/thermal/intel_dptf.rst b/Documentation/driver-api/thermal/intel_dptf.rst
index 8fb8c5b2d685..ec5769accae0 100644
--- a/Documentation/driver-api/thermal/intel_dptf.rst
+++ b/Documentation/driver-api/thermal/intel_dptf.rst
@@ -191,6 +191,27 @@ ABI.
User space can specify any one of the available workload type using
this interface.
+:file:`/sys/bus/pci/devices/0000\:00\:04.0/ptc_0_control`
+:file:`/sys/bus/pci/devices/0000\:00\:04.0/ptc_1_control`
+:file:`/sys/bus/pci/devices/0000\:00\:04.0/ptc_2_control`
+
+All these controls needs admin privilege to update.
+
+``enable`` (RW)
+ 1 for enable, 0 for disable. Shows the current enable status of
+ platform temperature control feature. User space can enable/disable
+ hardware controls.
+
+``temperature_target`` (RW)
+ Update a new temperature target in milli degree celsius for hardware to
+ use for the temperature control.
+
+Given that this is platform temperature control, it is expected that a
+single user-level manager owns and manages the controls. If multiple
+user-level software applications attempt to write different targets, it
+can lead to unexpected behavior.
+
+
DPTF Processor thermal RFIM interface
--------------------------------------------
diff --git a/Documentation/driver-api/usb/usb.rst b/Documentation/driver-api/usb/usb.rst
index 89f9c37bb979..976fb4221062 100644
--- a/Documentation/driver-api/usb/usb.rst
+++ b/Documentation/driver-api/usb/usb.rst
@@ -161,6 +161,7 @@ rely on 64bit DMA to eliminate another kind of bounce buffer.
.. kernel-doc:: drivers/usb/core/urb.c
:export:
+.. c:namespace:: usb_core
.. kernel-doc:: drivers/usb/core/message.c
:export:
diff --git a/Documentation/edac/memory_repair.rst b/Documentation/edac/memory_repair.rst
index 52162a422864..5f8da7c9b186 100644
--- a/Documentation/edac/memory_repair.rst
+++ b/Documentation/edac/memory_repair.rst
@@ -119,3 +119,34 @@ sysfs
Sysfs files are documented in
`Documentation/ABI/testing/sysfs-edac-memory-repair`.
+
+Examples
+--------
+
+The memory repair usage takes the form shown in this example:
+
+1. CXL memory sparing
+
+Memory sparing is defined as a repair function that replaces a portion of
+memory with a portion of functional memory at that same DPA. The subclass
+for this operation, cacheline/row/bank/rank sparing, vary in terms of the
+scope of the sparing being performed.
+
+Memory sparing maintenance operations may be supported by CXL devices that
+implement CXL.mem protocol. A sparing maintenance operation requests the
+CXL device to perform a repair operation on its media. For example, a CXL
+device with DRAM components that support memory sparing features may
+implement sparing maintenance operations.
+
+2. CXL memory Soft Post Package Repair (sPPR)
+
+Post Package Repair (PPR) maintenance operations may be supported by CXL
+devices that implement CXL.mem protocol. A PPR maintenance operation
+requests the CXL device to perform a repair operation on its media.
+For example, a CXL device with DRAM components that support PPR features
+may implement PPR Maintenance operations. Soft PPR (sPPR) is a temporary
+row repair. Soft PPR may be faster, but the repair is lost with a power
+cycle.
+
+Sysfs files for memory repair are documented in
+`Documentation/ABI/testing/sysfs-edac-memory-repair`
diff --git a/Documentation/edac/scrub.rst b/Documentation/edac/scrub.rst
index daab929cdba1..2cfa74fa1ffd 100644
--- a/Documentation/edac/scrub.rst
+++ b/Documentation/edac/scrub.rst
@@ -264,3 +264,79 @@ Sysfs files are documented in
`Documentation/ABI/testing/sysfs-edac-scrub`
`Documentation/ABI/testing/sysfs-edac-ecs`
+
+Examples
+--------
+
+The usage takes the form shown in these examples:
+
+1. CXL memory Patrol Scrub
+
+The following are the use cases identified why we might increase the scrub rate.
+
+- Scrubbing is needed at device granularity because a device is showing
+ unexpectedly high errors.
+
+- Scrubbing may apply to memory that isn't online at all yet. Likely this
+ is a system wide default setting on boot.
+
+- Scrubbing at a higher rate because the monitor software has determined that
+ more reliability is necessary for a particular data set. This is called
+ Differentiated Reliability.
+
+1.1. Device based scrubbing
+
+CXL memory is exposed to memory management subsystem and ultimately userspace
+via CXL devices. Device-based scrubbing is used for the first use case
+described in "Section 1 CXL Memory Patrol Scrub".
+
+When combining control via the device interfaces and region interfaces,
+"see Section 1.2 Region based scrubbing".
+
+Sysfs files for scrubbing are documented in
+`Documentation/ABI/testing/sysfs-edac-scrub`
+
+1.2. Region based scrubbing
+
+CXL memory is exposed to memory management subsystem and ultimately userspace
+via CXL regions. CXL Regions represent mapped memory capacity in system
+physical address space. These can incorporate one or more parts of multiple CXL
+memory devices with traffic interleaved across them. The user may want to control
+the scrub rate via this more abstract region instead of having to figure out the
+constituent devices and program them separately. The scrub rate for each device
+covers the whole device. Thus if multiple regions use parts of that device then
+requests for scrubbing of other regions may result in a higher scrub rate than
+requested for this specific region.
+
+Region-based scrubbing is used for the third use case described in
+"Section 1 CXL Memory Patrol Scrub".
+
+Userspace must follow below set of rules on how to set the scrub rates for any
+mixture of requirements.
+
+1. Taking each region in turn from lowest desired scrub rate to highest and set
+ their scrub rates. Later regions may override the scrub rate on individual
+ devices (and hence potentially whole regions).
+
+2. Take each device for which enhanced scrubbing is required (higher rate) and
+ set those scrub rates. This will override the scrub rates of individual devices,
+ setting them to the maximum rate required for any of the regions they help back,
+ unless a specific rate is already defined.
+
+Sysfs files for scrubbing are documented in
+`Documentation/ABI/testing/sysfs-edac-scrub`
+
+2. CXL memory Error Check Scrub (ECS)
+
+The Error Check Scrub (ECS) feature enables a memory device to perform error
+checking and correction (ECC) and count single-bit errors. The associated
+memory controller sets the ECS mode with a trigger sent to the memory
+device. CXL ECS control allows the host, thus the userspace, to change the
+attributes for error count mode, threshold number of errors per segment
+(indicating how many segments have at least that number of errors) for
+reporting errors, and reset the ECS counter. Thus the responsibility for
+initiating Error Check Scrub on a memory device may lie with the memory
+controller or platform when unexpectedly high error rates are detected.
+
+Sysfs files for scrubbing are documented in
+`Documentation/ABI/testing/sysfs-edac-ecs`
diff --git a/Documentation/fb/sstfb.rst b/Documentation/fb/sstfb.rst
index 88d5a52b13d8..7386eb63bac8 100644
--- a/Documentation/fb/sstfb.rst
+++ b/Documentation/fb/sstfb.rst
@@ -192,7 +192,7 @@ Todo
- Get rid of the previous paragraph.
- Buy more coffee.
- test/port to other arch.
-- try to add panning using tweeks with front and back buffer .
+- try to add panning using tweaks with front and back buffer.
- try to implement accel on voodoo2, this board can actually do a
lot in 2D even if it was sold as a 3D only board ...
diff --git a/Documentation/filesystems/bcachefs/casefolding.rst b/Documentation/filesystems/bcachefs/casefolding.rst
index ba5de97d155f..871a38f557e8 100644
--- a/Documentation/filesystems/bcachefs/casefolding.rst
+++ b/Documentation/filesystems/bcachefs/casefolding.rst
@@ -88,3 +88,21 @@ This would fail if negative dentry's were cached.
This is slightly suboptimal, but could be fixed in future with some vfs work.
+
+References
+----------
+
+(from Peter Anvin, on the list)
+
+It is worth noting that Microsoft has basically declared their
+"recommended" case folding (upcase) table to be permanently frozen (for
+new filesystem instances in the case where they use an on-disk
+translation table created at format time.) As far as I know they have
+never supported anything other than 1:1 conversion of BMP code points,
+nor normalization.
+
+The exFAT specification enumerates the full recommended upcase table,
+although in a somewhat annoying format (basically a hex dump of
+compressed data):
+
+https://learn.microsoft.com/en-us/windows/win32/fileio/exfat-specification
diff --git a/Documentation/filesystems/bcachefs/future/idle_work.rst b/Documentation/filesystems/bcachefs/future/idle_work.rst
new file mode 100644
index 000000000000..59a332509dcd
--- /dev/null
+++ b/Documentation/filesystems/bcachefs/future/idle_work.rst
@@ -0,0 +1,78 @@
+Idle/background work classes design doc:
+
+Right now, our behaviour at idle isn't ideal, it was designed for servers that
+would be under sustained load, to keep pending work at a "medium" level, to
+let work build up so we can process it in more efficient batches, while also
+giving headroom for bursts in load.
+
+But for desktops or mobile - scenarios where work is less sustained and power
+usage is more important - we want to operate differently, with a "rush to
+idle" so the system can go to sleep. We don't want to be dribbling out
+background work while the system should be idle.
+
+The complicating factor is that there are a number of background tasks, which
+form a heirarchy (or a digraph, depending on how you divide it up) - one
+background task may generate work for another.
+
+Thus proper idle detection needs to model this heirarchy.
+
+- Foreground writes
+- Page cache writeback
+- Copygc, rebalance
+- Journal reclaim
+
+When we implement idle detection and rush to idle, we need to be careful not
+to disturb too much the existing behaviour that works reasonably well when the
+system is under sustained load (or perhaps improve it in the case of
+rebalance, which currently does not actively attempt to let work batch up).
+
+SUSTAINED LOAD REGIME
+---------------------
+
+When the system is under continuous load, we want these jobs to run
+continuously - this is perhaps best modelled with a P/D controller, where
+they'll be trying to keep a target value (i.e. fragmented disk space,
+available journal space) roughly in the middle of some range.
+
+The goal under sustained load is to balance our ability to handle load spikes
+without running out of x resource (free disk space, free space in the
+journal), while also letting some work accumululate to be batched (or become
+unnecessary).
+
+For example, we don't want to run copygc too aggressively, because then it
+will be evacuating buckets that would have become empty (been overwritten or
+deleted) anyways, and we don't want to wait until we're almost out of free
+space because then the system will behave unpredicably - suddenly we're doing
+a lot more work to service each write and the system becomes much slower.
+
+IDLE REGIME
+-----------
+
+When the system becomes idle, we should start flushing our pending work
+quicker so the system can go to sleep.
+
+Note that the definition of "idle" depends on where in the heirarchy a task
+is - a task should start flushing work more quickly when the task above it has
+stopped generating new work.
+
+e.g. rebalance should start flushing more quickly when page cache writeback is
+idle, and journal reclaim should only start flushing more quickly when both
+copygc and rebalance are idle.
+
+It's important to let work accumulate when more work is still incoming and we
+still have room, because flushing is always more efficient if we let it batch
+up. New writes may overwrite data before rebalance moves it, and tasks may be
+generating more updates for the btree nodes that journal reclaim needs to flush.
+
+On idle, how much work we do at each interval should be proportional to the
+length of time we have been idle for. If we're idle only for a short duration,
+we shouldn't flush everything right away; the system might wake up and start
+generating new work soon, and flushing immediately might end up doing a lot of
+work that would have been unnecessary if we'd allowed things to batch more.
+
+To summarize, we will need:
+
+ - A list of classes for background tasks that generate work, which will
+ include one "foreground" class.
+ - Tracking for each class - "Am I doing work, or have I gone to sleep?"
+ - And each class should check the class above it when deciding how much work to issue.
diff --git a/Documentation/filesystems/bcachefs/index.rst b/Documentation/filesystems/bcachefs/index.rst
index 3864d0ae89c1..e5c4c2120b93 100644
--- a/Documentation/filesystems/bcachefs/index.rst
+++ b/Documentation/filesystems/bcachefs/index.rst
@@ -29,3 +29,10 @@ At this moment, only a few of these are described here.
casefolding
errorcodes
+
+Future design
+-------------
+.. toctree::
+ :maxdepth: 1
+
+ future/idle_work
diff --git a/Documentation/filesystems/debugfs.rst b/Documentation/filesystems/debugfs.rst
index 610f718ef8b5..55f807293924 100644
--- a/Documentation/filesystems/debugfs.rst
+++ b/Documentation/filesystems/debugfs.rst
@@ -229,22 +229,15 @@ module is unloaded without explicitly removing debugfs entries, the result
will be a lot of stale pointers and no end of highly antisocial behavior.
So all debugfs users - at least those which can be built as modules - must
be prepared to remove all files and directories they create there. A file
-can be removed with::
+or directory can be removed with::
void debugfs_remove(struct dentry *dentry);
The dentry value can be NULL or an error value, in which case nothing will
-be removed.
-
-Once upon a time, debugfs users were required to remember the dentry
-pointer for every debugfs file they created so that all files could be
-cleaned up. We live in more civilized times now, though, and debugfs users
-can call::
-
- void debugfs_remove_recursive(struct dentry *dentry);
-
-If this function is passed a pointer for the dentry corresponding to the
-top-level directory, the entire hierarchy below that directory will be
-removed.
+be removed. Note that this function will recursively remove all files and
+directories underneath it. Previously, debugfs_remove_recursive() was used
+to perform that task, but this function is now just an alias to
+debugfs_remove(). debugfs_remove_recursive() should be considered
+deprecated.
.. [1] http://lwn.net/Articles/309298/
diff --git a/Documentation/filesystems/erofs.rst b/Documentation/filesystems/erofs.rst
index c293f8e37468..7ddb235aee9d 100644
--- a/Documentation/filesystems/erofs.rst
+++ b/Documentation/filesystems/erofs.rst
@@ -128,6 +128,7 @@ device=%s Specify a path to an extra device to be used together.
fsid=%s Specify a filesystem image ID for Fscache back-end.
domain_id=%s Specify a domain ID in fscache mode so that different images
with the same blobs under a given domain ID can share storage.
+fsoffset=%llu Specify block-aligned filesystem offset for the primary device.
=================== =========================================================
Sysfs Entries
diff --git a/Documentation/filesystems/ext4/atomic_writes.rst b/Documentation/filesystems/ext4/atomic_writes.rst
new file mode 100644
index 000000000000..f65767df3620
--- /dev/null
+++ b/Documentation/filesystems/ext4/atomic_writes.rst
@@ -0,0 +1,225 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. _atomic_writes:
+
+Atomic Block Writes
+-------------------------
+
+Introduction
+~~~~~~~~~~~~
+
+Atomic (untorn) block writes ensure that either the entire write is committed
+to disk or none of it is. This prevents "torn writes" during power loss or
+system crashes. The ext4 filesystem supports atomic writes (only with Direct
+I/O) on regular files with extents, provided the underlying storage device
+supports hardware atomic writes. This is supported in the following two ways:
+
+1. **Single-fsblock Atomic Writes**:
+ EXT4's supports atomic write operations with a single filesystem block since
+ v6.13. In this the atomic write unit minimum and maximum sizes are both set
+ to filesystem blocksize.
+ e.g. doing atomic write of 16KB with 16KB filesystem blocksize on 64KB
+ pagesize system is possible.
+
+2. **Multi-fsblock Atomic Writes with Bigalloc**:
+ EXT4 now also supports atomic writes spanning multiple filesystem blocks
+ using a feature known as bigalloc. The atomic write unit's minimum and
+ maximum sizes are determined by the filesystem block size and cluster size,
+ based on the underlying device’s supported atomic write unit limits.
+
+Requirements
+~~~~~~~~~~~~
+
+Basic requirements for atomic writes in ext4:
+
+ 1. The extents feature must be enabled (default for ext4)
+ 2. The underlying block device must support atomic writes
+ 3. For single-fsblock atomic writes:
+
+ 1. A filesystem with appropriate block size (up to the page size)
+ 4. For multi-fsblock atomic writes:
+
+ 1. The bigalloc feature must be enabled
+ 2. The cluster size must be appropriately configured
+
+NOTE: EXT4 does not support software or COW based atomic write, which means
+atomic writes on ext4 are only supported if underlying storage device supports
+it.
+
+Multi-fsblock Implementation Details
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The bigalloc feature changes ext4 to allocate in units of multiple filesystem
+blocks, also known as clusters. With bigalloc each bit within block bitmap
+represents cluster (power of 2 number of blocks) rather than individual
+filesystem blocks.
+EXT4 supports multi-fsblock atomic writes with bigalloc, subject to the
+following constraints. The minimum atomic write size is the larger of the fs
+block size and the minimum hardware atomic write unit; and the maximum atomic
+write size is smaller of the bigalloc cluster size and the maximum hardware
+atomic write unit. Bigalloc ensures that all allocations are aligned to the
+cluster size, which satisfies the LBA alignment requirements of the hardware
+device if the start of the partition/logical volume is itself aligned correctly.
+
+Here is the block allocation strategy in bigalloc for atomic writes:
+
+ * For regions with fully mapped extents, no additional work is needed
+ * For append writes, a new mapped extent is allocated
+ * For regions that are entirely holes, unwritten extent is created
+ * For large unwritten extents, the extent gets split into two unwritten
+ extents of appropriate requested size
+ * For mixed mapping regions (combinations of holes, unwritten extents, or
+ mapped extents), ext4_map_blocks() is called in a loop with
+ EXT4_GET_BLOCKS_ZERO flag to convert the region into a single contiguous
+ mapped extent by writing zeroes to it and converting any unwritten extents to
+ written, if found within the range.
+
+Note: Writing on a single contiguous underlying extent, whether mapped or
+unwritten, is not inherently problematic. However, writing to a mixed mapping
+region (i.e. one containing a combination of mapped and unwritten extents)
+must be avoided when performing atomic writes.
+
+The reason is that, atomic writes when issued via pwritev2() with the RWF_ATOMIC
+flag, requires that either all data is written or none at all. In the event of
+a system crash or unexpected power loss during the write operation, the affected
+region (when later read) must reflect either the complete old data or the
+complete new data, but never a mix of both.
+
+To enforce this guarantee, we ensure that the write target is backed by
+a single, contiguous extent before any data is written. This is critical because
+ext4 defers the conversion of unwritten extents to written extents until the I/O
+completion path (typically in ->end_io()). If a write is allowed to proceed over
+a mixed mapping region (with mapped and unwritten extents) and a failure occurs
+mid-write, the system could observe partially updated regions after reboot, i.e.
+new data over mapped areas, and stale (old) data over unwritten extents that
+were never marked written. This violates the atomicity and/or torn write
+prevention guarantee.
+
+To prevent such torn writes, ext4 proactively allocates a single contiguous
+extent for the entire requested region in ``ext4_iomap_alloc`` via
+``ext4_map_blocks_atomic()``. EXT4 also force commits the current journalling
+transaction in case if allocation is done over mixed mapping. This ensures any
+pending metadata updates (like unwritten to written extents conversion) in this
+range are in consistent state with the file data blocks, before performing the
+actual write I/O. If the commit fails, the whole I/O must be aborted to prevent
+from any possible torn writes.
+Only after this step, the actual data write operation is performed by the iomap.
+
+Handling Split Extents Across Leaf Blocks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There can be a special edge case where we have logically and physically
+contiguous extents stored in separate leaf nodes of the on-disk extent tree.
+This occurs because on-disk extent tree merges only happens within the leaf
+blocks except for a case where we have 2-level tree which can get merged and
+collapsed entirely into the inode.
+If such a layout exists and, in the worst case, the extent status cache entries
+are reclaimed due to memory pressure, ``ext4_map_blocks()`` may never return
+a single contiguous extent for these split leaf extents.
+
+To address this edge case, a new get block flag
+``EXT4_GET_BLOCKS_QUERY_LEAF_BLOCKS flag`` is added to enhance the
+``ext4_map_query_blocks()`` lookup behavior.
+
+This new get block flag allows ``ext4_map_blocks()`` to first check if there is
+an entry in the extent status cache for the full range.
+If not present, it consults the on-disk extent tree using
+``ext4_map_query_blocks()``.
+If the located extent is at the end of a leaf node, it probes the next logical
+block (lblk) to detect a contiguous extent in the adjacent leaf.
+
+For now only one additional leaf block is queried to maintain efficiency, as
+atomic writes are typically constrained to small sizes
+(e.g. [blocksize, clustersize]).
+
+
+Handling Journal transactions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To support multi-fsblock atomic writes, we ensure enough journal credits are
+reserved during:
+
+ 1. Block allocation time in ``ext4_iomap_alloc()``. We first query if there
+ could be a mixed mapping for the underlying requested range. If yes, then we
+ reserve credits of up to ``m_len``, assuming every alternate block can be
+ an unwritten extent followed by a hole.
+
+ 2. During ``->end_io()`` call, we make sure a single transaction is started for
+ doing unwritten-to-written conversion. The loop for conversion is mainly
+ only required to handle a split extent across leaf blocks.
+
+How to
+------
+
+Creating Filesystems with Atomic Write Support
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+First check the atomic write units supported by block device.
+See :ref:`atomic_write_bdev_support` for more details.
+
+For single-fsblock atomic writes with a larger block size
+(on systems with block size < page size):
+
+.. code-block:: bash
+
+ # Create an ext4 filesystem with a 16KB block size
+ # (requires page size >= 16KB)
+ mkfs.ext4 -b 16384 /dev/device
+
+For multi-fsblock atomic writes with bigalloc:
+
+.. code-block:: bash
+
+ # Create an ext4 filesystem with bigalloc and 64KB cluster size
+ mkfs.ext4 -F -O bigalloc -b 4096 -C 65536 /dev/device
+
+Where ``-b`` specifies the block size, ``-C`` specifies the cluster size in bytes,
+and ``-O bigalloc`` enables the bigalloc feature.
+
+Application Interface
+~~~~~~~~~~~~~~~~~~~~~
+
+Applications can use the ``pwritev2()`` system call with the ``RWF_ATOMIC`` flag
+to perform atomic writes:
+
+.. code-block:: c
+
+ pwritev2(fd, iov, iovcnt, offset, RWF_ATOMIC);
+
+The write must be aligned to the filesystem's block size and not exceed the
+filesystem's maximum atomic write unit size.
+See ``generic_atomic_write_valid()`` for more details.
+
+``statx()`` system call with ``STATX_WRITE_ATOMIC`` flag can provides following
+details:
+
+ * ``stx_atomic_write_unit_min``: Minimum size of an atomic write request.
+ * ``stx_atomic_write_unit_max``: Maximum size of an atomic write request.
+ * ``stx_atomic_write_segments_max``: Upper limit for segments. The number of
+ separate memory buffers that can be gathered into a write operation
+ (e.g., the iovcnt parameter for IOV_ITER). Currently, this is always set to one.
+
+The STATX_ATTR_WRITE_ATOMIC flag in ``statx->attributes`` is set if atomic
+writes are supported.
+
+.. _atomic_write_bdev_support:
+
+Hardware Support
+----------------
+
+The underlying storage device must support atomic write operations.
+Modern NVMe and SCSI devices often provide this capability.
+The Linux kernel exposes this information through sysfs:
+
+* ``/sys/block/<device>/queue/atomic_write_unit_min`` - Minimum atomic write size
+* ``/sys/block/<device>/queue/atomic_write_unit_max`` - Maximum atomic write size
+
+Nonzero values for these attributes indicate that the device supports
+atomic writes.
+
+See Also
+--------
+
+* :doc:`bigalloc` - Documentation on the bigalloc feature
+* :doc:`allocators` - Documentation on block allocation in ext4
+* Support for atomic block writes in 6.13:
+ https://lwn.net/Articles/1009298/
diff --git a/Documentation/filesystems/ext4/overview.rst b/Documentation/filesystems/ext4/overview.rst
index 0fad6eda6e15..9d4054c17ecb 100644
--- a/Documentation/filesystems/ext4/overview.rst
+++ b/Documentation/filesystems/ext4/overview.rst
@@ -25,3 +25,4 @@ order.
.. include:: inlinedata.rst
.. include:: eainode.rst
.. include:: verity.rst
+.. include:: atomic_writes.rst
diff --git a/Documentation/filesystems/ext4/super.rst b/Documentation/filesystems/ext4/super.rst
index a1eb4a11a1d0..1b240661bfa3 100644
--- a/Documentation/filesystems/ext4/super.rst
+++ b/Documentation/filesystems/ext4/super.rst
@@ -328,9 +328,13 @@ The ext4 superblock is laid out as follows in
- s_checksum_type
- Metadata checksum algorithm type. The only valid value is 1 (crc32c).
* - 0x176
- - __le16
- - s_reserved_pad
- -
+ - \_\_u8
+ - s\_encryption\_level
+ - Versioning level for encryption.
+ * - 0x177
+ - \_\_u8
+ - s\_reserved\_pad
+ - Padding to next 32bits.
* - 0x178
- __le64
- s_kbytes_written
@@ -466,9 +470,13 @@ The ext4 superblock is laid out as follows in
- s_last_error_time_hi
- Upper 8 bits of the s_last_error_time field.
* - 0x27A
- - __u8
- - s_pad[2]
- - Zero padding.
+ - \_\_u8
+ - s\_first\_error\_errcode
+ -
+ * - 0x27B
+ - \_\_u8
+ - s\_last\_error\_errcode
+ -
* - 0x27C
- __le16
- s_encoding
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index e15c4275862a..440e4ae74e44 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -182,32 +182,34 @@ fault_type=%d Support configuring fault injection type, should be
enabled with fault_injection option, fault type value
is shown below, it supports single or combined type.
- =========================== ===========
+ =========================== ==========
Type_Name Type_Value
- =========================== ===========
- FAULT_KMALLOC 0x000000001
- FAULT_KVMALLOC 0x000000002
- FAULT_PAGE_ALLOC 0x000000004
- FAULT_PAGE_GET 0x000000008
- FAULT_ALLOC_BIO 0x000000010 (obsolete)
- FAULT_ALLOC_NID 0x000000020
- FAULT_ORPHAN 0x000000040
- FAULT_BLOCK 0x000000080
- FAULT_DIR_DEPTH 0x000000100
- FAULT_EVICT_INODE 0x000000200
- FAULT_TRUNCATE 0x000000400
- FAULT_READ_IO 0x000000800
- FAULT_CHECKPOINT 0x000001000
- FAULT_DISCARD 0x000002000
- FAULT_WRITE_IO 0x000004000
- FAULT_SLAB_ALLOC 0x000008000
- FAULT_DQUOT_INIT 0x000010000
- FAULT_LOCK_OP 0x000020000
- FAULT_BLKADDR_VALIDITY 0x000040000
- FAULT_BLKADDR_CONSISTENCE 0x000080000
- FAULT_NO_SEGMENT 0x000100000
- FAULT_INCONSISTENT_FOOTER 0x000200000
- =========================== ===========
+ =========================== ==========
+ FAULT_KMALLOC 0x00000001
+ FAULT_KVMALLOC 0x00000002
+ FAULT_PAGE_ALLOC 0x00000004
+ FAULT_PAGE_GET 0x00000008
+ FAULT_ALLOC_BIO 0x00000010 (obsolete)
+ FAULT_ALLOC_NID 0x00000020
+ FAULT_ORPHAN 0x00000040
+ FAULT_BLOCK 0x00000080
+ FAULT_DIR_DEPTH 0x00000100
+ FAULT_EVICT_INODE 0x00000200
+ FAULT_TRUNCATE 0x00000400
+ FAULT_READ_IO 0x00000800
+ FAULT_CHECKPOINT 0x00001000
+ FAULT_DISCARD 0x00002000
+ FAULT_WRITE_IO 0x00004000
+ FAULT_SLAB_ALLOC 0x00008000
+ FAULT_DQUOT_INIT 0x00010000
+ FAULT_LOCK_OP 0x00020000
+ FAULT_BLKADDR_VALIDITY 0x00040000
+ FAULT_BLKADDR_CONSISTENCE 0x00080000
+ FAULT_NO_SEGMENT 0x00100000
+ FAULT_INCONSISTENT_FOOTER 0x00200000
+ FAULT_TIMEOUT 0x00400000 (1000ms)
+ FAULT_VMALLOC 0x00800000
+ =========================== ==========
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
writes towards main area.
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index e80329908549..29e84d125e02 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -70,7 +70,7 @@ Online attacks
--------------
fscrypt (and storage encryption in general) can only provide limited
-protection, if any at all, against online attacks. In detail:
+protection against online attacks. In detail:
Side-channel attacks
~~~~~~~~~~~~~~~~~~~~
@@ -99,16 +99,23 @@ Therefore, any encryption-specific access control checks would merely
be enforced by kernel *code* and therefore would be largely redundant
with the wide variety of access control mechanisms already available.)
-Kernel memory compromise
-~~~~~~~~~~~~~~~~~~~~~~~~
+Read-only kernel memory compromise
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Unless `hardware-wrapped keys`_ are used, an attacker who gains the
+ability to read from arbitrary kernel memory, e.g. by mounting a
+physical attack or by exploiting a kernel security vulnerability, can
+compromise all fscrypt keys that are currently in-use. This also
+extends to cold boot attacks; if the system is suddenly powered off,
+keys the system was using may remain in memory for a short time.
-An attacker who compromises the system enough to read from arbitrary
-memory, e.g. by mounting a physical attack or by exploiting a kernel
-security vulnerability, can compromise all encryption keys that are
-currently in use.
+However, if hardware-wrapped keys are used, then the fscrypt master
+keys and file contents encryption keys (but not other types of fscrypt
+subkeys such as filenames encryption keys) are protected from
+compromises of arbitrary kernel memory.
-However, fscrypt allows encryption keys to be removed from the kernel,
-which may protect them from later compromise.
+In addition, fscrypt allows encryption keys to be removed from the
+kernel, which may protect them from later compromise.
In more detail, the FS_IOC_REMOVE_ENCRYPTION_KEY ioctl (or the
FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS ioctl) can wipe a master
@@ -144,6 +151,24 @@ However, these ioctls have some limitations:
accelerator hardware (if used by the crypto API to implement any of
the algorithms), or in other places not explicitly considered here.
+Full system compromise
+~~~~~~~~~~~~~~~~~~~~~~
+
+An attacker who gains "root" access and/or the ability to execute
+arbitrary kernel code can freely exfiltrate data that is protected by
+any in-use fscrypt keys. Thus, usually fscrypt provides no meaningful
+protection in this scenario. (Data that is protected by a key that is
+absent throughout the entire attack remains protected, modulo the
+limitations of key removal mentioned above in the case where the key
+was removed prior to the attack.)
+
+However, if `hardware-wrapped keys`_ are used, such attackers will be
+unable to exfiltrate the master keys or file contents keys in a form
+that will be usable after the system is powered off. This may be
+useful if the attacker is significantly time-limited and/or
+bandwidth-limited, so they can only exfiltrate some data and need to
+rely on a later offline attack to exfiltrate the rest of it.
+
Limitations of v1 policies
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -170,6 +195,10 @@ policies on all new encrypted directories.
Key hierarchy
=============
+Note: this section assumes the use of raw keys rather than
+hardware-wrapped keys. The use of hardware-wrapped keys modifies the
+key hierarchy slightly. For details, see `Hardware-wrapped keys`_.
+
Master Keys
-----------
@@ -832,7 +861,9 @@ a pointer to struct fscrypt_add_key_arg, defined as follows::
struct fscrypt_key_specifier key_spec;
__u32 raw_size;
__u32 key_id;
- __u32 __reserved[8];
+ #define FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED 0x00000001
+ __u32 flags;
+ __u32 __reserved[7];
__u8 raw[];
};
@@ -851,7 +882,7 @@ a pointer to struct fscrypt_add_key_arg, defined as follows::
struct fscrypt_provisioning_key_payload {
__u32 type;
- __u32 __reserved;
+ __u32 flags;
__u8 raw[];
};
@@ -879,24 +910,32 @@ as follows:
Alternatively, if ``key_id`` is nonzero, this field must be 0, since
in that case the size is implied by the specified Linux keyring key.
-- ``key_id`` is 0 if the raw key is given directly in the ``raw``
- field. Otherwise ``key_id`` is the ID of a Linux keyring key of
- type "fscrypt-provisioning" whose payload is
- struct fscrypt_provisioning_key_payload whose ``raw`` field contains
- the raw key and whose ``type`` field matches ``key_spec.type``.
- Since ``raw`` is variable-length, the total size of this key's
- payload must be ``sizeof(struct fscrypt_provisioning_key_payload)``
- plus the raw key size. The process must have Search permission on
- this key.
-
- Most users should leave this 0 and specify the raw key directly.
- The support for specifying a Linux keyring key is intended mainly to
+- ``key_id`` is 0 if the key is given directly in the ``raw`` field.
+ Otherwise ``key_id`` is the ID of a Linux keyring key of type
+ "fscrypt-provisioning" whose payload is struct
+ fscrypt_provisioning_key_payload whose ``raw`` field contains the
+ key, whose ``type`` field matches ``key_spec.type``, and whose
+ ``flags`` field matches ``flags``. Since ``raw`` is
+ variable-length, the total size of this key's payload must be
+ ``sizeof(struct fscrypt_provisioning_key_payload)`` plus the number
+ of key bytes. The process must have Search permission on this key.
+
+ Most users should leave this 0 and specify the key directly. The
+ support for specifying a Linux keyring key is intended mainly to
allow re-adding keys after a filesystem is unmounted and re-mounted,
- without having to store the raw keys in userspace memory.
+ without having to store the keys in userspace memory.
+
+- ``flags`` contains optional flags from ``<linux/fscrypt.h>``:
+
+ - FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED: This denotes that the key is a
+ hardware-wrapped key. See `Hardware-wrapped keys`_. This flag
+ can't be used if FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR is used.
- ``raw`` is a variable-length field which must contain the actual
key, ``raw_size`` bytes long. Alternatively, if ``key_id`` is
- nonzero, then this field is unused.
+ nonzero, then this field is unused. Note that despite being named
+ ``raw``, if FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED is specified then it
+ will contain a wrapped key, not a raw key.
For v2 policy keys, the kernel keeps track of which user (identified
by effective user ID) added the key, and only allows the key to be
@@ -908,8 +947,8 @@ prevent that other user from unexpectedly removing it. Therefore,
FS_IOC_ADD_ENCRYPTION_KEY may also be used to add a v2 policy key
*again*, even if it's already added by other user(s). In this case,
FS_IOC_ADD_ENCRYPTION_KEY will just install a claim to the key for the
-current user, rather than actually add the key again (but the raw key
-must still be provided, as a proof of knowledge).
+current user, rather than actually add the key again (but the key must
+still be provided, as a proof of knowledge).
FS_IOC_ADD_ENCRYPTION_KEY returns 0 if either the key or a claim to
the key was either added or already exists.
@@ -918,20 +957,23 @@ FS_IOC_ADD_ENCRYPTION_KEY can fail with the following errors:
- ``EACCES``: FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR was specified, but the
caller does not have the CAP_SYS_ADMIN capability in the initial
- user namespace; or the raw key was specified by Linux key ID but the
+ user namespace; or the key was specified by Linux key ID but the
process lacks Search permission on the key.
+- ``EBADMSG``: invalid hardware-wrapped key
- ``EDQUOT``: the key quota for this user would be exceeded by adding
the key
- ``EINVAL``: invalid key size or key specifier type, or reserved bits
were set
-- ``EKEYREJECTED``: the raw key was specified by Linux key ID, but the
- key has the wrong type
-- ``ENOKEY``: the raw key was specified by Linux key ID, but no key
- exists with that ID
+- ``EKEYREJECTED``: the key was specified by Linux key ID, but the key
+ has the wrong type
+- ``ENOKEY``: the key was specified by Linux key ID, but no key exists
+ with that ID
- ``ENOTTY``: this type of filesystem does not implement encryption
- ``EOPNOTSUPP``: the kernel was not configured with encryption
support for this filesystem, or the filesystem superblock has not
- had encryption enabled on it
+ had encryption enabled on it; or a hardware wrapped key was specified
+ but the filesystem does not support inline encryption or the hardware
+ does not support hardware-wrapped keys
Legacy method
~~~~~~~~~~~~~
@@ -994,9 +1036,8 @@ or removed by non-root users.
These ioctls don't work on keys that were added via the legacy
process-subscribed keyrings mechanism.
-Before using these ioctls, read the `Kernel memory compromise`_
-section for a discussion of the security goals and limitations of
-these ioctls.
+Before using these ioctls, read the `Online attacks`_ section for a
+discussion of the security goals and limitations of these ioctls.
FS_IOC_REMOVE_ENCRYPTION_KEY
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1316,7 +1357,8 @@ inline encryption hardware doesn't have the needed crypto capabilities
(e.g. support for the needed encryption algorithm and data unit size)
and where blk-crypto-fallback is unusable. (For blk-crypto-fallback
to be usable, it must be enabled in the kernel configuration with
-CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y.)
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y, and the file must be
+protected by a raw key rather than a hardware-wrapped key.)
Currently fscrypt always uses the filesystem block size (which is
usually 4096 bytes) as the data unit size. Therefore, it can only use
@@ -1324,7 +1366,76 @@ inline encryption hardware that supports that data unit size.
Inline encryption doesn't affect the ciphertext or other aspects of
the on-disk format, so users may freely switch back and forth between
-using "inlinecrypt" and not using "inlinecrypt".
+using "inlinecrypt" and not using "inlinecrypt". An exception is that
+files that are protected by a hardware-wrapped key can only be
+encrypted/decrypted by the inline encryption hardware and therefore
+can only be accessed when the "inlinecrypt" mount option is used. For
+more information about hardware-wrapped keys, see below.
+
+Hardware-wrapped keys
+---------------------
+
+fscrypt supports using *hardware-wrapped keys* when the inline
+encryption hardware supports it. Such keys are only present in kernel
+memory in wrapped (encrypted) form; they can only be unwrapped
+(decrypted) by the inline encryption hardware and are temporally bound
+to the current boot. This prevents the keys from being compromised if
+kernel memory is leaked. This is done without limiting the number of
+keys that can be used and while still allowing the execution of
+cryptographic tasks that are tied to the same key but can't use inline
+encryption hardware, e.g. filenames encryption.
+
+Note that hardware-wrapped keys aren't specific to fscrypt; they are a
+block layer feature (part of *blk-crypto*). For more details about
+hardware-wrapped keys, see the block layer documentation at
+:ref:`Documentation/block/inline-encryption.rst
+<hardware_wrapped_keys>`. The rest of this section just focuses on
+the details of how fscrypt can use hardware-wrapped keys.
+
+fscrypt supports hardware-wrapped keys by allowing the fscrypt master
+keys to be hardware-wrapped keys as an alternative to raw keys. To
+add a hardware-wrapped key with `FS_IOC_ADD_ENCRYPTION_KEY`_,
+userspace must specify FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED in the
+``flags`` field of struct fscrypt_add_key_arg and also in the
+``flags`` field of struct fscrypt_provisioning_key_payload when
+applicable. The key must be in ephemerally-wrapped form, not
+long-term wrapped form.
+
+Some limitations apply. First, files protected by a hardware-wrapped
+key are tied to the system's inline encryption hardware. Therefore
+they can only be accessed when the "inlinecrypt" mount option is used,
+and they can't be included in portable filesystem images. Second,
+currently the hardware-wrapped key support is only compatible with
+`IV_INO_LBLK_64 policies`_ and `IV_INO_LBLK_32 policies`_, as it
+assumes that there is just one file contents encryption key per
+fscrypt master key rather than one per file. Future work may address
+this limitation by passing per-file nonces down the storage stack to
+allow the hardware to derive per-file keys.
+
+Implementation-wise, to encrypt/decrypt the contents of files that are
+protected by a hardware-wrapped key, fscrypt uses blk-crypto,
+attaching the hardware-wrapped key to the bio crypt contexts. As is
+the case with raw keys, the block layer will program the key into a
+keyslot when it isn't already in one. However, when programming a
+hardware-wrapped key, the hardware doesn't program the given key
+directly into a keyslot but rather unwraps it (using the hardware's
+ephemeral wrapping key) and derives the inline encryption key from it.
+The inline encryption key is the key that actually gets programmed
+into a keyslot, and it is never exposed to software.
+
+However, fscrypt doesn't just do file contents encryption; it also
+uses its master keys to derive filenames encryption keys, key
+identifiers, and sometimes some more obscure types of subkeys such as
+dirhash keys. So even with file contents encryption out of the
+picture, fscrypt still needs a raw key to work with. To get such a
+key from a hardware-wrapped key, fscrypt asks the inline encryption
+hardware to derive a cryptographically isolated "software secret" from
+the hardware-wrapped key. fscrypt uses this "software secret" to key
+its KDF to derive all subkeys other than file contents keys.
+
+Note that this implies that the hardware-wrapped key feature only
+protects the file contents encryption keys. It doesn't protect other
+fscrypt subkeys such as filenames encryption keys.
Direct I/O support
==================
@@ -1409,7 +1520,7 @@ read the ciphertext into the page cache and decrypt it in-place. The
folio lock must be held until decryption has finished, to prevent the
folio from becoming visible to userspace prematurely.
-For the write path (->writepage()) of regular files, filesystems
+For the write path (->writepages()) of regular files, filesystems
cannot encrypt data in-place in the page cache, since the cached
plaintext must be preserved. Instead, filesystems must encrypt into a
temporary buffer or "bounce page", then write out the temporary
diff --git a/Documentation/filesystems/fuse-passthrough.rst b/Documentation/filesystems/fuse-passthrough.rst
new file mode 100644
index 000000000000..2b0e7c2da54a
--- /dev/null
+++ b/Documentation/filesystems/fuse-passthrough.rst
@@ -0,0 +1,133 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+================
+FUSE Passthrough
+================
+
+Introduction
+============
+
+FUSE (Filesystem in Userspace) passthrough is a feature designed to improve the
+performance of FUSE filesystems for I/O operations. Typically, FUSE operations
+involve communication between the kernel and a userspace FUSE daemon, which can
+incur overhead. Passthrough allows certain operations on a FUSE file to bypass
+the userspace daemon and be executed directly by the kernel on an underlying
+"backing file".
+
+This is achieved by the FUSE daemon registering a file descriptor (pointing to
+the backing file on a lower filesystem) with the FUSE kernel module. The kernel
+then receives an identifier (``backing_id``) for this registered backing file.
+When a FUSE file is subsequently opened, the FUSE daemon can, in its response to
+the ``OPEN`` request, include this ``backing_id`` and set the
+``FOPEN_PASSTHROUGH`` flag. This establishes a direct link for specific
+operations.
+
+Currently, passthrough is supported for operations like ``read(2)``/``write(2)``
+(via ``read_iter``/``write_iter``), ``splice(2)``, and ``mmap(2)``.
+
+Enabling Passthrough
+====================
+
+To use FUSE passthrough:
+
+ 1. The FUSE filesystem must be compiled with ``CONFIG_FUSE_PASSTHROUGH``
+ enabled.
+ 2. The FUSE daemon, during the ``FUSE_INIT`` handshake, must negotiate the
+ ``FUSE_PASSTHROUGH`` capability and specify its desired
+ ``max_stack_depth``.
+ 3. The (privileged) FUSE daemon uses the ``FUSE_DEV_IOC_BACKING_OPEN`` ioctl
+ on its connection file descriptor (e.g., ``/dev/fuse``) to register a
+ backing file descriptor and obtain a ``backing_id``.
+ 4. When handling an ``OPEN`` or ``CREATE`` request for a FUSE file, the daemon
+ replies with the ``FOPEN_PASSTHROUGH`` flag set in
+ ``fuse_open_out::open_flags`` and provides the corresponding ``backing_id``
+ in ``fuse_open_out::backing_id``.
+ 5. The FUSE daemon should eventually call ``FUSE_DEV_IOC_BACKING_CLOSE`` with
+ the ``backing_id`` to release the kernel's reference to the backing file
+ when it's no longer needed for passthrough setups.
+
+Privilege Requirements
+======================
+
+Setting up passthrough functionality currently requires the FUSE daemon to
+possess the ``CAP_SYS_ADMIN`` capability. This requirement stems from several
+security and resource management considerations that are actively being
+discussed and worked on. The primary reasons for this restriction are detailed
+below.
+
+Resource Accounting and Visibility
+----------------------------------
+
+The core mechanism for passthrough involves the FUSE daemon opening a file
+descriptor to a backing file and registering it with the FUSE kernel module via
+the ``FUSE_DEV_IOC_BACKING_OPEN`` ioctl. This ioctl returns a ``backing_id``
+associated with a kernel-internal ``struct fuse_backing`` object, which holds a
+reference to the backing ``struct file``.
+
+A significant concern arises because the FUSE daemon can close its own file
+descriptor to the backing file after registration. The kernel, however, will
+still hold a reference to the ``struct file`` via the ``struct fuse_backing``
+object as long as it's associated with a ``backing_id`` (or subsequently, with
+an open FUSE file in passthrough mode).
+
+This behavior leads to two main issues for unprivileged FUSE daemons:
+
+ 1. **Invisibility to lsof and other inspection tools**: Once the FUSE
+ daemon closes its file descriptor, the open backing file held by the kernel
+ becomes "hidden." Standard tools like ``lsof``, which typically inspect
+ process file descriptor tables, would not be able to identify that this
+ file is still open by the system on behalf of the FUSE filesystem. This
+ makes it difficult for system administrators to track resource usage or
+ debug issues related to open files (e.g., preventing unmounts).
+
+ 2. **Bypassing RLIMIT_NOFILE**: The FUSE daemon process is subject to
+ resource limits, including the maximum number of open file descriptors
+ (``RLIMIT_NOFILE``). If an unprivileged daemon could register backing files
+ and then close its own FDs, it could potentially cause the kernel to hold
+ an unlimited number of open ``struct file`` references without these being
+ accounted against the daemon's ``RLIMIT_NOFILE``. This could lead to a
+ denial-of-service (DoS) by exhausting system-wide file resources.
+
+The ``CAP_SYS_ADMIN`` requirement acts as a safeguard against these issues,
+restricting this powerful capability to trusted processes.
+
+**NOTE**: ``io_uring`` solves this similar issue by exposing its "fixed files",
+which are visible via ``fdinfo`` and accounted under the registering user's
+``RLIMIT_NOFILE``.
+
+Filesystem Stacking and Shutdown Loops
+--------------------------------------
+
+Another concern relates to the potential for creating complex and problematic
+filesystem stacking scenarios if unprivileged users could set up passthrough.
+A FUSE passthrough filesystem might use a backing file that resides:
+
+ * On the *same* FUSE filesystem.
+ * On another filesystem (like OverlayFS) which itself might have an upper or
+ lower layer that is a FUSE filesystem.
+
+These configurations could create dependency loops, particularly during
+filesystem shutdown or unmount sequences, leading to deadlocks or system
+instability. This is conceptually similar to the risks associated with the
+``LOOP_SET_FD`` ioctl, which also requires ``CAP_SYS_ADMIN``.
+
+To mitigate this, FUSE passthrough already incorporates checks based on
+filesystem stacking depth (``sb->s_stack_depth`` and ``fc->max_stack_depth``).
+For example, during the ``FUSE_INIT`` handshake, the FUSE daemon can negotiate
+the ``max_stack_depth`` it supports. When a backing file is registered via
+``FUSE_DEV_IOC_BACKING_OPEN``, the kernel checks if the backing file's
+filesystem stack depth is within the allowed limit.
+
+The ``CAP_SYS_ADMIN`` requirement provides an additional layer of security,
+ensuring that only privileged users can create these potentially complex
+stacking arrangements.
+
+General Security Posture
+------------------------
+
+As a general principle for new kernel features that allow userspace to instruct
+the kernel to perform direct operations on its behalf based on user-provided
+file descriptors, starting with a higher privilege requirement (like
+``CAP_SYS_ADMIN``) is a conservative and common security practice. This allows
+the feature to be used and tested while further security implications are
+evaluated and addressed.
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index a9cf8e950b15..11a599387266 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -99,6 +99,7 @@ Documentation for filesystem implementations.
fuse
fuse-io
fuse-io-uring
+ fuse-passthrough
inotify
isofs
nilfs2
@@ -113,6 +114,7 @@ Documentation for filesystem implementations.
qnx6
ramfs-rootfs-initramfs
relay
+ resctrl
romfs
smb/index
spufs/index
diff --git a/Documentation/filesystems/iomap/design.rst b/Documentation/filesystems/iomap/design.rst
index e29651a42eec..f2df9b6df988 100644
--- a/Documentation/filesystems/iomap/design.rst
+++ b/Documentation/filesystems/iomap/design.rst
@@ -243,13 +243,25 @@ The fields are as follows:
regular file data.
This is only useful for FIEMAP.
- * **IOMAP_F_PRIVATE**: Starting with this value, the upper bits can
- be set by the filesystem for its own purposes.
+ * **IOMAP_F_BOUNDARY**: This indicates I/O and its completion must not be
+ merged with any other I/O or completion. Filesystems must use this when
+ submitting I/O to devices that cannot handle I/O crossing certain LBAs
+ (e.g. ZNS devices). This flag applies only to buffered I/O writeback; all
+ other functions ignore it.
+
+ * **IOMAP_F_PRIVATE**: This flag is reserved for filesystem private use.
* **IOMAP_F_ANON_WRITE**: Indicates that (write) I/O does not have a target
block assigned to it yet and the file system will do that in the bio
submission handler, splitting the I/O as needed.
+ * **IOMAP_F_ATOMIC_BIO**: This indicates write I/O must be submitted with the
+ ``REQ_ATOMIC`` flag set in the bio. Filesystems need to set this flag to
+ inform iomap that the write I/O operation requires torn-write protection
+ based on HW-offload mechanism. They must also ensure that mapping updates
+ upon the completion of the I/O must be performed in a single metadata
+ update.
+
These flags can be set by iomap itself during file operations.
The filesystem should supply an ``->iomap_end`` function if it needs
to observe these flags:
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index 0ec0bb6eb0fb..2e567e341c3b 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -249,7 +249,6 @@ address_space_operations
========================
prototypes::
- int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*read_folio)(struct file *, struct folio *);
int (*writepages)(struct address_space *, struct writeback_control *);
bool (*dirty_folio)(struct address_space *, struct folio *folio);
@@ -280,7 +279,6 @@ locking rules:
====================== ======================== ========= ===============
ops folio locked i_rwsem invalidate_lock
====================== ======================== ========= ===============
-writepage: yes, unlocks (see below)
read_folio: yes, unlocks shared
writepages:
dirty_folio: maybe
@@ -309,54 +307,6 @@ completion.
->readahead() unlocks the folios that I/O is attempted on like ->read_folio().
-->writepage() is used for two purposes: for "memory cleansing" and for
-"sync". These are quite different operations and the behaviour may differ
-depending upon the mode.
-
-If writepage is called for sync (wbc->sync_mode != WBC_SYNC_NONE) then
-it *must* start I/O against the page, even if that would involve
-blocking on in-progress I/O.
-
-If writepage is called for memory cleansing (sync_mode ==
-WBC_SYNC_NONE) then its role is to get as much writeout underway as
-possible. So writepage should try to avoid blocking against
-currently-in-progress I/O.
-
-If the filesystem is not called for "sync" and it determines that it
-would need to block against in-progress I/O to be able to start new I/O
-against the page the filesystem should redirty the page with
-redirty_page_for_writepage(), then unlock the page and return zero.
-This may also be done to avoid internal deadlocks, but rarely.
-
-If the filesystem is called for sync then it must wait on any
-in-progress I/O and then start new I/O.
-
-The filesystem should unlock the page synchronously, before returning to the
-caller, unless ->writepage() returns special WRITEPAGE_ACTIVATE
-value. WRITEPAGE_ACTIVATE means that page cannot really be written out
-currently, and VM should stop calling ->writepage() on this page for some
-time. VM does this by moving page to the head of the active list, hence the
-name.
-
-Unless the filesystem is going to redirty_page_for_writepage(), unlock the page
-and return zero, writepage *must* run set_page_writeback() against the page,
-followed by unlocking it. Once set_page_writeback() has been run against the
-page, write I/O can be submitted and the write I/O completion handler must run
-end_page_writeback() once the I/O is complete. If no I/O is submitted, the
-filesystem must run end_page_writeback() against the page before returning from
-writepage.
-
-That is: after 2.5.12, pages which are under writeout are *not* locked. Note,
-if the filesystem needs the page to be locked during writeout, that is ok, too,
-the page is allowed to be unlocked at any point in time between the calls to
-set_page_writeback() and end_page_writeback().
-
-Note, failure to run either redirty_page_for_writepage() or the combination of
-set_page_writeback()/end_page_writeback() on a page submitted to writepage
-will leave the page itself marked clean but it will be tagged as dirty in the
-radix tree. This incoherency can lead to all sorts of hard-to-debug problems
-in the filesystem like having dirty inodes at umount and losing written data.
-
->writepages() is used for periodic writeback and for syscall-initiated
sync operations. The address_space should start I/O against at least
``*nr_to_write`` pages. ``*nr_to_write`` must be decremented for each page
@@ -364,8 +314,8 @@ which is written. The address_space implementation may write more (or less)
pages than ``*nr_to_write`` asks for, but it should try to be reasonably close.
If nr_to_write is NULL, all dirty pages must be written.
-writepages should _only_ write pages which are present on
-mapping->io_pages.
+writepages should _only_ write pages which are present in
+mapping->i_pages.
->dirty_folio() is called from various places in the kernel when
the target folio is marked as needing writeback. The folio cannot be
diff --git a/Documentation/filesystems/mount_api.rst b/Documentation/filesystems/mount_api.rst
index d92c276f1575..e149b89118c8 100644
--- a/Documentation/filesystems/mount_api.rst
+++ b/Documentation/filesystems/mount_api.rst
@@ -671,7 +671,6 @@ The members are as follows:
fsparam_bool() fs_param_is_bool
fsparam_u32() fs_param_is_u32
fsparam_u32oct() fs_param_is_u32_octal
- fsparam_u32hex() fs_param_is_u32_hex
fsparam_s32() fs_param_is_s32
fsparam_u64() fs_param_is_u64
fsparam_enum() fs_param_is_enum
@@ -755,21 +754,6 @@ process the parameters it is given.
* ::
- bool validate_constant_table(const struct constant_table *tbl,
- size_t tbl_size,
- int low, int high, int special);
-
- Validate a constant table. Checks that all the elements are appropriately
- ordered, that there are no duplicates and that the values are between low
- and high inclusive, though provision is made for one allowable special
- value outside of that range. If no special value is required, special
- should just be set to lie inside the low-to-high range.
-
- If all is good, true is returned. If the table is invalid, errors are
- logged to the kernel log buffer and false is returned.
-
- * ::
-
bool fs_validate_description(const char *name,
const struct fs_parameter_description *desc);
diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst
index 3886c14f89f4..ddd799df6ce3 100644
--- a/Documentation/filesystems/netfs_library.rst
+++ b/Documentation/filesystems/netfs_library.rst
@@ -1,33 +1,187 @@
.. SPDX-License-Identifier: GPL-2.0
-=================================
-Network Filesystem Helper Library
-=================================
+===================================
+Network Filesystem Services Library
+===================================
.. Contents:
- Overview.
+ - Requests and streams.
+ - Subrequests.
+ - Result collection and retry.
+ - Local caching.
+ - Content encryption (fscrypt).
- Per-inode context.
- Inode context helper functions.
- - Buffered read helpers.
- - Read helper functions.
- - Read helper structures.
- - Read helper operations.
- - Read helper procedure.
- - Read helper cache API.
+ - Inode locking.
+ - Inode writeback.
+ - High-level VFS API.
+ - Unlocked read/write iter.
+ - Pre-locked read/write iter.
+ - Monolithic files API.
+ - Memory-mapped I/O API.
+ - High-level VM API.
+ - Deprecated PG_private2 API.
+ - I/O request API.
+ - Request structure.
+ - Stream structure.
+ - Subrequest structure.
+ - Filesystem methods.
+ - Terminating a subrequest.
+ - Local cache API.
+ - API function reference.
Overview
========
-The network filesystem helper library is a set of functions designed to aid a
-network filesystem in implementing VM/VFS operations. For the moment, that
-just includes turning various VM buffered read operations into requests to read
-from the server. The helper library, however, can also interpose other
-services, such as local caching or local data encryption.
+The network filesystem services library, netfslib, is a set of functions
+designed to aid a network filesystem in implementing VM/VFS API operations. It
+takes over the normal buffered read, readahead, write and writeback and also
+handles unbuffered and direct I/O.
-Note that the library module doesn't link against local caching directly, so
-access must be provided by the netfs.
+The library provides support for (re-)negotiation of I/O sizes and retrying
+failed I/O as well as local caching and will, in the future, provide content
+encryption.
+
+It insulates the filesystem from VM interface changes as much as possible and
+handles VM features such as large multipage folios. The filesystem basically
+just has to provide a way to perform read and write RPC calls.
+
+The way I/O is organised inside netfslib consists of a number of objects:
+
+ * A *request*. A request is used to track the progress of the I/O overall and
+ to hold on to resources. The collection of results is done at the request
+ level. The I/O within a request is divided into a number of parallel
+ streams of subrequests.
+
+ * A *stream*. A non-overlapping series of subrequests. The subrequests
+ within a stream do not have to be contiguous.
+
+ * A *subrequest*. This is the basic unit of I/O. It represents a single RPC
+ call or a single cache I/O operation. The library passes these to the
+ filesystem and the cache to perform.
+
+Requests and Streams
+--------------------
+
+When actually performing I/O (as opposed to just copying into the pagecache),
+netfslib will create one or more requests to track the progress of the I/O and
+to hold resources.
+
+A read operation will have a single stream and the subrequests within that
+stream may be of mixed origins, for instance mixing RPC subrequests and cache
+subrequests.
+
+On the other hand, a write operation may have multiple streams, where each
+stream targets a different destination. For instance, there may be one stream
+writing to the local cache and one to the server. Currently, only two streams
+are allowed, but this could be increased if parallel writes to multiple servers
+is desired.
+
+The subrequests within a write stream do not need to match alignment or size
+with the subrequests in another write stream and netfslib performs the tiling
+of subrequests in each stream over the source buffer independently. Further,
+each stream may contain holes that don't correspond to holes in the other
+stream.
+
+In addition, the subrequests do not need to correspond to the boundaries of the
+folios or vectors in the source/destination buffer. The library handles the
+collection of results and the wrangling of folio flags and references.
+
+Subrequests
+-----------
+
+Subrequests are at the heart of the interaction between netfslib and the
+filesystem using it. Each subrequest is expected to correspond to a single
+read or write RPC or cache operation. The library will stitch together the
+results from a set of subrequests to provide a higher level operation.
+
+Netfslib has two interactions with the filesystem or the cache when setting up
+a subrequest. First, there's an optional preparatory step that allows the
+filesystem to negotiate the limits on the subrequest, both in terms of maximum
+number of bytes and maximum number of vectors (e.g. for RDMA). This may
+involve negotiating with the server (e.g. cifs needing to acquire credits).
+
+And, secondly, there's the issuing step in which the subrequest is handed off
+to the filesystem to perform.
+
+Note that these two steps are done slightly differently between read and write:
+
+ * For reads, the VM/VFS tells us how much is being requested up front, so the
+ library can preset maximum values that the cache and then the filesystem can
+ then reduce. The cache also gets consulted first on whether it wants to do
+ a read before the filesystem is consulted.
+
+ * For writeback, it is unknown how much there will be to write until the
+ pagecache is walked, so no limit is set by the library.
+
+Once a subrequest is completed, the filesystem or cache informs the library of
+the completion and then collection is invoked. Depending on whether the
+request is synchronous or asynchronous, the collection of results will be done
+in either the application thread or in a work queue.
+
+Result Collection and Retry
+---------------------------
+
+As subrequests complete, the results are collected and collated by the library
+and folio unlocking is performed progressively (if appropriate). Once the
+request is complete, async completion will be invoked (again, if appropriate).
+It is possible for the filesystem to provide interim progress reports to the
+library to cause folio unlocking to happen earlier if possible.
+
+If any subrequests fail, netfslib can retry them. It will wait until all
+subrequests are completed, offer the filesystem the opportunity to fiddle with
+the resources/state held by the request and poke at the subrequests before
+re-preparing and re-issuing the subrequests.
+
+This allows the tiling of contiguous sets of failed subrequest within a stream
+to be changed, adding more subrequests or ditching excess as necessary (for
+instance, if the network sizes change or the server decides it wants smaller
+chunks).
+
+Further, if one or more contiguous cache-read subrequests fail, the library
+will pass them to the filesystem to perform instead, renegotiating and retiling
+them as necessary to fit with the filesystem's parameters rather than those of
+the cache.
+
+Local Caching
+-------------
+
+One of the services netfslib provides, via ``fscache``, is the option to cache
+on local disk a copy of the data obtained from/written to a network filesystem.
+The library will manage the storing, retrieval and some invalidation of data
+automatically on behalf of the filesystem if a cookie is attached to the
+``netfs_inode``.
+
+Note that local caching used to use the PG_private_2 (aliased as PG_fscache) to
+keep track of a page that was being written to the cache, but this is now
+deprecated as PG_private_2 will be removed.
+
+Instead, folios that are read from the server for which there was no data in
+the cache will be marked as dirty and will have ``folio->private`` set to a
+special value (``NETFS_FOLIO_COPY_TO_CACHE``) and left to writeback to write.
+If the folio is modified before that happened, the special value will be
+cleared and the write will become normally dirty.
+
+When writeback occurs, folios that are so marked will only be written to the
+cache and not to the server. Writeback handles mixed cache-only writes and
+server-and-cache writes by using two streams, sending one to the cache and one
+to the server. The server stream will have gaps in it corresponding to those
+folios.
+
+Content Encryption (fscrypt)
+----------------------------
+
+Though it does not do so yet, at some point netfslib will acquire the ability
+to do client-side content encryption on behalf of the network filesystem (Ceph,
+for example). fscrypt can be used for this if appropriate (it may not be -
+cifs, for example).
+
+The data will be stored encrypted in the local cache using the same manner of
+encryption as the data written to the server and the library will impose bounce
+buffering and RMW cycles as necessary.
Per-Inode Context
@@ -40,10 +194,13 @@ structure is defined::
struct netfs_inode {
struct inode inode;
const struct netfs_request_ops *ops;
- struct fscache_cookie *cache;
+ struct fscache_cookie * cache;
+ loff_t remote_i_size;
+ unsigned long flags;
+ ...
};
-A network filesystem that wants to use netfs lib must place one of these in its
+A network filesystem that wants to use netfslib must place one of these in its
inode wrapper struct instead of the VFS ``struct inode``. This can be done in
a way similar to the following::
@@ -56,7 +213,8 @@ This allows netfslib to find its state by using ``container_of()`` from the
inode pointer, thereby allowing the netfslib helper functions to be pointed to
directly by the VFS/VM operation tables.
-The structure contains the following fields:
+The structure contains the following fields that are of interest to the
+filesystem:
* ``inode``
@@ -71,6 +229,37 @@ The structure contains the following fields:
Local caching cookie, or NULL if no caching is enabled. This field does not
exist if fscache is disabled.
+ * ``remote_i_size``
+
+ The size of the file on the server. This differs from inode->i_size if
+ local modifications have been made but not yet written back.
+
+ * ``flags``
+
+ A set of flags, some of which the filesystem might be interested in:
+
+ * ``NETFS_ICTX_MODIFIED_ATTR``
+
+ Set if netfslib modifies mtime/ctime. The filesystem is free to ignore
+ this or clear it.
+
+ * ``NETFS_ICTX_UNBUFFERED``
+
+ Do unbuffered I/O upon the file. Like direct I/O but without the
+ alignment limitations. RMW will be performed if necessary. The pagecache
+ will not be used unless mmap() is also used.
+
+ * ``NETFS_ICTX_WRITETHROUGH``
+
+ Do writethrough caching upon the file. I/O will be set up and dispatched
+ as buffered writes are made to the page cache. mmap() does the normal
+ writeback thing.
+
+ * ``NETFS_ICTX_SINGLE_NO_UPLOAD``
+
+ Set if the file has a monolithic content that must be read entirely in a
+ single go and must not be written back to the server, though it can be
+ cached (e.g. AFS directories).
Inode Context Helper Functions
------------------------------
@@ -84,117 +273,250 @@ set the operations table pointer::
then a function to cast from the VFS inode structure to the netfs context::
- struct netfs_inode *netfs_node(struct inode *inode);
+ struct netfs_inode *netfs_inode(struct inode *inode);
and finally, a function to get the cache cookie pointer from the context
attached to an inode (or NULL if fscache is disabled)::
struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx);
+Inode Locking
+-------------
+
+A number of functions are provided to manage the locking of i_rwsem for I/O and
+to effectively extend it to provide more separate classes of exclusion::
+
+ int netfs_start_io_read(struct inode *inode);
+ void netfs_end_io_read(struct inode *inode);
+ int netfs_start_io_write(struct inode *inode);
+ void netfs_end_io_write(struct inode *inode);
+ int netfs_start_io_direct(struct inode *inode);
+ void netfs_end_io_direct(struct inode *inode);
+
+The exclusion breaks down into four separate classes:
+
+ 1) Buffered reads and writes.
+
+ Buffered reads can run concurrently each other and with buffered writes,
+ but buffered writes cannot run concurrently with each other.
+
+ 2) Direct reads and writes.
+
+ Direct (and unbuffered) reads and writes can run concurrently since they do
+ not share local buffering (i.e. the pagecache) and, in a network
+ filesystem, are expected to have exclusion managed on the server (though
+ this may not be the case for, say, Ceph).
+
+ 3) Other major inode modifying operations (e.g. truncate, fallocate).
+
+ These should just access i_rwsem directly.
+
+ 4) mmap().
+
+ mmap'd accesses might operate concurrently with any of the other classes.
+ They might form the buffer for an intra-file loopback DIO read/write. They
+ might be permitted on unbuffered files.
+
+Inode Writeback
+---------------
+
+Netfslib will pin resources on an inode for future writeback (such as pinning
+use of an fscache cookie) when an inode is dirtied. However, this pinning
+needs careful management. To manage the pinning, the following sequence
+occurs:
+
+ 1) An inode state flag ``I_PINNING_NETFS_WB`` is set by netfslib when the
+ pinning begins (when a folio is dirtied, for example) if the cache is
+ active to stop the cache structures from being discarded and the cache
+ space from being culled. This also prevents re-getting of cache resources
+ if the flag is already set.
+
+ 2) This flag then cleared inside the inode lock during inode writeback in the
+ VM - and the fact that it was set is transferred to ``->unpinned_netfs_wb``
+ in ``struct writeback_control``.
+
+ 3) If ``->unpinned_netfs_wb`` is now set, the write_inode procedure is forced.
+
+ 4) The filesystem's ``->write_inode()`` function is invoked to do the cleanup.
+
+ 5) The filesystem invokes netfs to do its cleanup.
+
+To do the cleanup, netfslib provides a function to do the resource unpinning::
+
+ int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
+
+If the filesystem doesn't need to do anything else, this may be set as a its
+``.write_inode`` method.
+
+Further, if an inode is deleted, the filesystem's write_inode method may not
+get called, so::
-Buffered Read Helpers
-=====================
+ void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
-The library provides a set of read helpers that handle the ->read_folio(),
-->readahead() and much of the ->write_begin() VM operations and translate them
-into a common call framework.
+must be called from ``->evict_inode()`` *before* ``clear_inode()`` is called.
-The following services are provided:
- * Handle folios that span multiple pages.
+High-Level VFS API
+==================
- * Insulate the netfs from VM interface changes.
+Netfslib provides a number of sets of API calls for the filesystem to delegate
+VFS operations to. Netfslib, in turn, will call out to the filesystem and the
+cache to negotiate I/O sizes, issue RPCs and provide places for it to intervene
+at various times.
- * Allow the netfs to arbitrarily split reads up into pieces, even ones that
- don't match folio sizes or folio alignments and that may cross folios.
+Unlocked Read/Write Iter
+------------------------
- * Allow the netfs to expand a readahead request in both directions to meet its
- needs.
+The first API set is for the delegation of operations to netfslib when the
+filesystem is called through the standard VFS read/write_iter methods::
- * Allow the netfs to partially fulfil a read, which will then be resubmitted.
+ ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from);
+ ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from);
- * Handle local caching, allowing cached data and server-read data to be
- interleaved for a single request.
+They can be assigned directly to ``.read_iter`` and ``.write_iter``. They
+perform the inode locking themselves and the first two will switch between
+buffered I/O and DIO as appropriate.
- * Handle clearing of bufferage that isn't on the server.
+Pre-Locked Read/Write Iter
+--------------------------
- * Handle retrying of reads that failed, switching reads from the cache to the
- server as necessary.
+The second API set is for the delegation of operations to netfslib when the
+filesystem is called through the standard VFS methods, but needs to do some
+other stuff before or after calling netfslib whilst still inside locked section
+(e.g. Ceph negotiating caps). The unbuffered read function is::
- * In the future, this is a place that other services can be performed, such as
- local encryption of data to be stored remotely or in the cache.
+ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter);
-From the network filesystem, the helpers require a table of operations. This
-includes a mandatory method to issue a read operation along with a number of
-optional methods.
+This must not be assigned directly to ``.read_iter`` and the filesystem is
+responsible for performing the inode locking before calling it. In the case of
+buffered read, the filesystem should use ``filemap_read()``.
+There are three functions for writes::
-Read Helper Functions
+ ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
+ struct netfs_group *netfs_group);
+ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
+ struct netfs_group *netfs_group);
+ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter,
+ struct netfs_group *netfs_group);
+
+These must not be assigned directly to ``.write_iter`` and the filesystem is
+responsible for performing the inode locking before calling them.
+
+The first two functions are for buffered writes; the first just adds some
+standard write checks and jumps to the second, but if the filesystem wants to
+do the checks itself, it can use the second directly. The third function is
+for unbuffered or DIO writes.
+
+On all three write functions, there is a writeback group pointer (which should
+be NULL if the filesystem doesn't use this). Writeback groups are set on
+folios when they're modified. If a folio to-be-modified is already marked with
+a different group, it is flushed first. The writeback API allows writing back
+of a specific group.
+
+Memory-Mapped I/O API
---------------------
-Three read helpers are provided::
+An API for support of mmap()'d I/O is provided::
+
+ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
+
+This allows the filesystem to delegate ``.page_mkwrite`` to netfslib. The
+filesystem should not take the inode lock before calling it, but, as with the
+locked write functions above, this does take a writeback group pointer. If the
+page to be made writable is in a different group, it will be flushed first.
+
+Monolithic Files API
+--------------------
+
+There is also a special API set for files for which the content must be read in
+a single RPC (and not written back) and is maintained as a monolithic blob
+(e.g. an AFS directory), though it can be stored and updated in the local cache::
+
+ ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter);
+ void netfs_single_mark_inode_dirty(struct inode *inode);
+ int netfs_writeback_single(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct iov_iter *iter);
- void netfs_readahead(struct readahead_control *ractl);
- int netfs_read_folio(struct file *file,
- struct folio *folio);
- int netfs_write_begin(struct netfs_inode *ctx,
- struct file *file,
- struct address_space *mapping,
- loff_t pos,
- unsigned int len,
- struct folio **_folio,
- void **_fsdata);
+The first function reads from a file into the given buffer, reading from the
+cache in preference if the data is cached there; the second function allows the
+inode to be marked dirty, causing a later writeback; and the third function can
+be called from the writeback code to write the data to the cache, if there is
+one.
-Each corresponds to a VM address space operation. These operations use the
-state in the per-inode context.
+The inode should be marked ``NETFS_ICTX_SINGLE_NO_UPLOAD`` if this API is to be
+used. The writeback function requires the buffer to be of ITER_FOLIOQ type.
-For ->readahead() and ->read_folio(), the network filesystem just point directly
-at the corresponding read helper; whereas for ->write_begin(), it may be a
-little more complicated as the network filesystem might want to flush
-conflicting writes or track dirty data and needs to put the acquired folio if
-an error occurs after calling the helper.
+High-Level VM API
+==================
-The helpers manage the read request, calling back into the network filesystem
-through the supplied table of operations. Waits will be performed as
-necessary before returning for helpers that are meant to be synchronous.
+Netfslib also provides a number of sets of API calls for the filesystem to
+delegate VM operations to. Again, netfslib, in turn, will call out to the
+filesystem and the cache to negotiate I/O sizes, issue RPCs and provide places
+for it to intervene at various times::
-If an error occurs, the ->free_request() will be called to clean up the
-netfs_io_request struct allocated. If some parts of the request are in
-progress when an error occurs, the request will get partially completed if
-sufficient data is read.
+ void netfs_readahead(struct readahead_control *);
+ int netfs_read_folio(struct file *, struct folio *);
+ int netfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc);
+ bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio);
+ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
+ bool netfs_release_folio(struct folio *folio, gfp_t gfp);
-Additionally, there is::
+These are ``address_space_operations`` methods and can be set directly in the
+operations table.
- * void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
- ssize_t transferred_or_error,
- bool was_async);
+Deprecated PG_private_2 API
+---------------------------
-which should be called to complete a read subrequest. This is given the number
-of bytes transferred or a negative error code, plus a flag indicating whether
-the operation was asynchronous (ie. whether the follow-on processing can be
-done in the current context, given this may involve sleeping).
+There is also a deprecated function for filesystems that still use the
+``->write_begin`` method::
+ int netfs_write_begin(struct netfs_inode *inode, struct file *file,
+ struct address_space *mapping, loff_t pos, unsigned int len,
+ struct folio **_folio, void **_fsdata);
-Read Helper Structures
-----------------------
+It uses the deprecated PG_private_2 flag and so should not be used.
-The read helpers make use of a couple of structures to maintain the state of
-the read. The first is a structure that manages a read request as a whole::
+
+I/O Request API
+===============
+
+The I/O request API comprises a number of structures and a number of functions
+that the filesystem may need to use.
+
+Request Structure
+-----------------
+
+The request structure manages the request as a whole, holding some resources
+and state on behalf of the filesystem and tracking the collection of results::
struct netfs_io_request {
+ enum netfs_io_origin origin;
struct inode *inode;
struct address_space *mapping;
- struct netfs_cache_resources cache_resources;
+ struct netfs_group *group;
+ struct netfs_io_stream io_streams[];
void *netfs_priv;
- loff_t start;
- size_t len;
- loff_t i_size;
- const struct netfs_request_ops *netfs_ops;
+ void *netfs_priv2;
+ unsigned long long start;
+ unsigned long long len;
+ unsigned long long i_size;
unsigned int debug_id;
+ unsigned long flags;
...
};
-The above fields are the ones the netfs can use. They are:
+Many of the fields are for internal use, but the fields shown here are of
+interest to the filesystem:
+
+ * ``origin``
+
+ The origin of the request (readahead, read_folio, DIO read, writeback, ...).
* ``inode``
* ``mapping``
@@ -202,11 +524,19 @@ The above fields are the ones the netfs can use. They are:
The inode and the address space of the file being read from. The mapping
may or may not point to inode->i_data.
- * ``cache_resources``
+ * ``group``
+
+ The writeback group this request is dealing with or NULL. This holds a ref
+ on the group.
- Resources for the local cache to use, if present.
+ * ``io_streams``
+
+ The parallel streams of subrequests available to the request. Currently two
+ are available, but this may be made extensible in future. ``NR_IO_STREAMS``
+ indicates the size of the array.
* ``netfs_priv``
+ * ``netfs_priv2``
The network filesystem's private data. The value for this can be passed in
to the helper functions or set during the request.
@@ -221,37 +551,121 @@ The above fields are the ones the netfs can use. They are:
The size of the file at the start of the request.
- * ``netfs_ops``
-
- A pointer to the operation table. The value for this is passed into the
- helper functions.
-
* ``debug_id``
A number allocated to this operation that can be displayed in trace lines
for reference.
+ * ``flags``
+
+ Flags for managing and controlling the operation of the request. Some of
+ these may be of interest to the filesystem:
+
+ * ``NETFS_RREQ_RETRYING``
+
+ Netfslib sets this when generating retries.
+
+ * ``NETFS_RREQ_PAUSE``
+
+ The filesystem can set this to request to pause the library's subrequest
+ issuing loop - but care needs to be taken as netfslib may also set it.
+
+ * ``NETFS_RREQ_NONBLOCK``
+ * ``NETFS_RREQ_BLOCKED``
+
+ Netfslib sets the first to indicate that non-blocking mode was set by the
+ caller and the filesystem can set the second to indicate that it would
+ have had to block.
+
+ * ``NETFS_RREQ_USE_PGPRIV2``
+
+ The filesystem can set this if it wants to use PG_private_2 to track
+ whether a folio is being written to the cache. This is deprecated as
+ PG_private_2 is going to go away.
+
+If the filesystem wants more private data than is afforded by this structure,
+then it should wrap it and provide its own allocator.
+
+Stream Structure
+----------------
+
+A request is comprised of one or more parallel streams and each stream may be
+aimed at a different target.
+
+For read requests, only stream 0 is used. This can contain a mixture of
+subrequests aimed at different sources. For write requests, stream 0 is used
+for the server and stream 1 is used for the cache. For buffered writeback,
+stream 0 is not enabled unless a normal dirty folio is encountered, at which
+point ->begin_writeback() will be invoked and the filesystem can mark the
+stream available.
+
+The stream struct looks like::
-The second structure is used to manage individual slices of the overall read
-request::
+ struct netfs_io_stream {
+ unsigned char stream_nr;
+ bool avail;
+ size_t sreq_max_len;
+ unsigned int sreq_max_segs;
+ unsigned int submit_extendable_to;
+ ...
+ };
+
+A number of members are available for access/use by the filesystem:
+
+ * ``stream_nr``
+
+ The number of the stream within the request.
+
+ * ``avail``
+
+ True if the stream is available for use. The filesystem should set this on
+ stream zero if in ->begin_writeback().
+
+ * ``sreq_max_len``
+ * ``sreq_max_segs``
+
+ These are set by the filesystem or the cache in ->prepare_read() or
+ ->prepare_write() for each subrequest to indicate the maximum number of
+ bytes and, optionally, the maximum number of segments (if not 0) that that
+ subrequest can support.
+
+ * ``submit_extendable_to``
+
+ The size that a subrequest can be rounded up to beyond the EOF, given the
+ available buffer. This allows the cache to work out if it can do a DIO read
+ or write that straddles the EOF marker.
+
+Subrequest Structure
+--------------------
+
+Individual units of I/O are managed by the subrequest structure. These
+represent slices of the overall request and run independently::
struct netfs_io_subrequest {
struct netfs_io_request *rreq;
- loff_t start;
+ struct iov_iter io_iter;
+ unsigned long long start;
size_t len;
size_t transferred;
unsigned long flags;
+ short error;
unsigned short debug_index;
+ unsigned char stream_nr;
...
};
-Each subrequest is expected to access a single source, though the helpers will
+Each subrequest is expected to access a single source, though the library will
handle falling back from one source type to another. The members are:
* ``rreq``
A pointer to the read request.
+ * ``io_iter``
+
+ An I/O iterator representing a slice of the buffer to be read into or
+ written from.
+
* ``start``
* ``len``
@@ -260,241 +674,295 @@ handle falling back from one source type to another. The members are:
* ``transferred``
- The amount of data transferred so far of the length of this slice. The
- network filesystem or cache should start the operation this far into the
- slice. If a short read occurs, the helpers will call again, having updated
- this to reflect the amount read so far.
+ The amount of data transferred so far for this subrequest. This should be
+ added to with the length of the transfer made by this issuance of the
+ subrequest. If this is less than ``len`` then the subrequest may be
+ reissued to continue.
* ``flags``
- Flags pertaining to the read. There are two of interest to the filesystem
- or cache:
+ Flags for managing the subrequest. There are a number of interest to the
+ filesystem or cache:
+
+ * ``NETFS_SREQ_MADE_PROGRESS``
+
+ Set by the filesystem to indicates that at least one byte of data was read
+ or written.
+
+ * ``NETFS_SREQ_HIT_EOF``
+
+ The filesystem should set this if a read hit the EOF on the file (in which
+ case ``transferred`` should stop at the EOF). Netfslib may expand the
+ subrequest out to the size of the folio containing the EOF on the off
+ chance that a third party change happened or a DIO read may have asked for
+ more than is available. The library will clear any excess pagecache.
* ``NETFS_SREQ_CLEAR_TAIL``
- This can be set to indicate that the remainder of the slice, from
- transferred to len, should be cleared.
+ The filesystem can set this to indicate that the remainder of the slice,
+ from transferred to len, should be cleared. Do not set if HIT_EOF is set.
+
+ * ``NETFS_SREQ_NEED_RETRY``
+
+ The filesystem can set this to tell netfslib to retry the subrequest.
+
+ * ``NETFS_SREQ_BOUNDARY``
+
+ This can be set by the filesystem on a subrequest to indicate that it ends
+ at a boundary with the filesystem structure (e.g. at the end of a Ceph
+ object). It tells netfslib not to retile subrequests across it.
- * ``NETFS_SREQ_SEEK_DATA_READ``
+ * ``error``
- This is a hint to the cache that it might want to try skipping ahead to
- the next data (ie. using SEEK_DATA).
+ This is for the filesystem to store result of the subrequest. It should be
+ set to 0 if successful and a negative error code otherwise.
* ``debug_index``
+ * ``stream_nr``
A number allocated to this slice that can be displayed in trace lines for
- reference.
+ reference and the number of the request stream that it belongs to.
+If necessary, the filesystem can get and put extra refs on the subrequest it is
+given::
-Read Helper Operations
-----------------------
+ void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+ enum netfs_sreq_ref_trace what);
+ void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
+ enum netfs_sreq_ref_trace what);
-The network filesystem must provide the read helpers with a table of operations
-through which it can issue requests and negotiate::
+using netfs trace codes to indicate the reason. Care must be taken, however,
+as once control of the subrequest is returned to netfslib, the same subrequest
+can be reissued/retried.
+
+Filesystem Methods
+------------------
+
+The filesystem sets a table of operations in ``netfs_inode`` for netfslib to
+use::
struct netfs_request_ops {
- void (*init_request)(struct netfs_io_request *rreq, struct file *file);
+ mempool_t *request_pool;
+ mempool_t *subrequest_pool;
+ int (*init_request)(struct netfs_io_request *rreq, struct file *file);
void (*free_request)(struct netfs_io_request *rreq);
+ void (*free_subrequest)(struct netfs_io_subrequest *rreq);
void (*expand_readahead)(struct netfs_io_request *rreq);
- bool (*clamp_length)(struct netfs_io_subrequest *subreq);
+ int (*prepare_read)(struct netfs_io_subrequest *subreq);
void (*issue_read)(struct netfs_io_subrequest *subreq);
- bool (*is_still_valid)(struct netfs_io_request *rreq);
- int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
- struct folio **foliop, void **_fsdata);
void (*done)(struct netfs_io_request *rreq);
+ void (*update_i_size)(struct inode *inode, loff_t i_size);
+ void (*post_modify)(struct inode *inode);
+ void (*begin_writeback)(struct netfs_io_request *wreq);
+ void (*prepare_write)(struct netfs_io_subrequest *subreq);
+ void (*issue_write)(struct netfs_io_subrequest *subreq);
+ void (*retry_request)(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream);
+ void (*invalidate_cache)(struct netfs_io_request *wreq);
};
-The operations are as follows:
-
- * ``init_request()``
+The table starts with a pair of optional pointers to memory pools from which
+requests and subrequests can be allocated. If these are not given, netfslib
+has default pools that it will use instead. If the filesystem wraps the netfs
+structs in its own larger structs, then it will need to use its own pools.
+Netfslib will allocate directly from the pools.
- [Optional] This is called to initialise the request structure. It is given
- the file for reference.
+The methods defined in the table are:
+ * ``init_request()``
* ``free_request()``
+ * ``free_subrequest()``
- [Optional] This is called as the request is being deallocated so that the
- filesystem can clean up any state it has attached there.
+ [Optional] A filesystem may implement these to initialise or clean up any
+ resources that it attaches to the request or subrequest.
* ``expand_readahead()``
[Optional] This is called to allow the filesystem to expand the size of a
- readahead read request. The filesystem gets to expand the request in both
- directions, though it's not permitted to reduce it as the numbers may
- represent an allocation already made. If local caching is enabled, it gets
- to expand the request first.
+ readahead request. The filesystem gets to expand the request in both
+ directions, though it must retain the initial region as that may represent
+ an allocation already made. If local caching is enabled, it gets to expand
+ the request first.
Expansion is communicated by changing ->start and ->len in the request
structure. Note that if any change is made, ->len must be increased by at
least as much as ->start is reduced.
- * ``clamp_length()``
-
- [Optional] This is called to allow the filesystem to reduce the size of a
- subrequest. The filesystem can use this, for example, to chop up a request
- that has to be split across multiple servers or to put multiple reads in
- flight.
-
- This should return 0 on success and an error code on error.
-
- * ``issue_read()``
+ * ``prepare_read()``
- [Required] The helpers use this to dispatch a subrequest to the server for
- reading. In the subrequest, ->start, ->len and ->transferred indicate what
- data should be read from the server.
+ [Optional] This is called to allow the filesystem to limit the size of a
+ subrequest. It may also limit the number of individual regions in iterator,
+ such as required by RDMA. This information should be set on stream zero in::
- There is no return value; the netfs_subreq_terminated() function should be
- called to indicate whether or not the operation succeeded and how much data
- it transferred. The filesystem also should not deal with setting folios
- uptodate, unlocking them or dropping their refs - the helpers need to deal
- with this as they have to coordinate with copying to the local cache.
+ rreq->io_streams[0].sreq_max_len
+ rreq->io_streams[0].sreq_max_segs
- Note that the helpers have the folios locked, but not pinned. It is
- possible to use the ITER_XARRAY iov iterator to refer to the range of the
- inode that is being operated upon without the need to allocate large bvec
- tables.
+ The filesystem can use this, for example, to chop up a request that has to
+ be split across multiple servers or to put multiple reads in flight.
- * ``is_still_valid()``
+ Zero should be returned on success and an error code otherwise.
- [Optional] This is called to find out if the data just read from the local
- cache is still valid. It should return true if it is still valid and false
- if not. If it's not still valid, it will be reread from the server.
+ * ``issue_read()``
- * ``check_write_begin()``
+ [Required] Netfslib calls this to dispatch a subrequest to the server for
+ reading. In the subrequest, ->start, ->len and ->transferred indicate what
+ data should be read from the server and ->io_iter indicates the buffer to be
+ used.
- [Optional] This is called from the netfs_write_begin() helper once it has
- allocated/grabbed the folio to be modified to allow the filesystem to flush
- conflicting state before allowing it to be modified.
+ There is no return value; the ``netfs_read_subreq_terminated()`` function
+ should be called to indicate that the subrequest completed either way.
+ ->error, ->transferred and ->flags should be updated before completing. The
+ termination can be done asynchronously.
- It may unlock and discard the folio it was given and set the caller's folio
- pointer to NULL. It should return 0 if everything is now fine (``*foliop``
- left set) or the op should be retried (``*foliop`` cleared) and any other
- error code to abort the operation.
+ Note: the filesystem must not deal with setting folios uptodate, unlocking
+ them or dropping their refs - the library deals with this as it may have to
+ stitch together the results of multiple subrequests that variously overlap
+ the set of folios.
- * ``done``
+ * ``done()``
- [Optional] This is called after the folios in the request have all been
+ [Optional] This is called after the folios in a read request have all been
unlocked (and marked uptodate if applicable).
+ * ``update_i_size()``
+
+ [Optional] This is invoked by netfslib at various points during the write
+ paths to ask the filesystem to update its idea of the file size. If not
+ given, netfslib will set i_size and i_blocks and update the local cache
+ cookie.
+
+ * ``post_modify()``
+
+ [Optional] This is called after netfslib writes to the pagecache or when it
+ allows an mmap'd page to be marked as writable.
+
+ * ``begin_writeback()``
+
+ [Optional] Netfslib calls this when processing a writeback request if it
+ finds a dirty page that isn't simply marked NETFS_FOLIO_COPY_TO_CACHE,
+ indicating it must be written to the server. This allows the filesystem to
+ only set up writeback resources when it knows it's going to have to perform
+ a write.
+
+ * ``prepare_write()``
+ [Optional] This is called to allow the filesystem to limit the size of a
+ subrequest. It may also limit the number of individual regions in iterator,
+ such as required by RDMA. This information should be set on stream to which
+ the subrequest belongs::
-Read Helper Procedure
----------------------
-
-The read helpers work by the following general procedure:
-
- * Set up the request.
-
- * For readahead, allow the local cache and then the network filesystem to
- propose expansions to the read request. This is then proposed to the VM.
- If the VM cannot fully perform the expansion, a partially expanded read will
- be performed, though this may not get written to the cache in its entirety.
-
- * Loop around slicing chunks off of the request to form subrequests:
-
- * If a local cache is present, it gets to do the slicing, otherwise the
- helpers just try to generate maximal slices.
-
- * The network filesystem gets to clamp the size of each slice if it is to be
- the source. This allows rsize and chunking to be implemented.
+ rreq->io_streams[subreq->stream_nr].sreq_max_len
+ rreq->io_streams[subreq->stream_nr].sreq_max_segs
- * The helpers issue a read from the cache or a read from the server or just
- clears the slice as appropriate.
+ The filesystem can use this, for example, to chop up a request that has to
+ be split across multiple servers or to put multiple writes in flight.
- * The next slice begins at the end of the last one.
+ This is not permitted to return an error. Instead, in the event of failure,
+ ``netfs_prepare_write_failed()`` must be called.
- * As slices finish being read, they terminate.
+ * ``issue_write()``
- * When all the subrequests have terminated, the subrequests are assessed and
- any that are short or have failed are reissued:
+ [Required] This is used to dispatch a subrequest to the server for writing.
+ In the subrequest, ->start, ->len and ->transferred indicate what data
+ should be written to the server and ->io_iter indicates the buffer to be
+ used.
- * Failed cache requests are issued against the server instead.
+ There is no return value; the ``netfs_write_subreq_terminated()`` function
+ should be called to indicate that the subrequest completed either way.
+ ->error, ->transferred and ->flags should be updated before completing. The
+ termination can be done asynchronously.
- * Failed server requests just fail.
+ Note: the filesystem must not deal with removing the dirty or writeback
+ marks on folios involved in the operation and should not take refs or pins
+ on them, but should leave retention to netfslib.
- * Short reads against either source will be reissued against that source
- provided they have transferred some more data:
+ * ``retry_request()``
- * The cache may need to skip holes that it can't do DIO from.
+ [Optional] Netfslib calls this at the beginning of a retry cycle. This
+ allows the filesystem to examine the state of the request, the subrequests
+ in the indicated stream and of its own data and make adjustments or
+ renegotiate resources.
+
+ * ``invalidate_cache()``
- * If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the
- end of the slice instead of reissuing.
+ [Optional] This is called by netfslib to invalidate data stored in the local
+ cache in the event that writing to the local cache fails, providing updated
+ coherency data that netfs can't provide.
- * Once the data is read, the folios that have been fully read/cleared:
+Terminating a subrequest
+------------------------
- * Will be marked uptodate.
+When a subrequest completes, there are a number of functions that the cache or
+subrequest can call to inform netfslib of the status change. One function is
+provided to terminate a write subrequest at the preparation stage and acts
+synchronously:
- * If a cache is present, will be marked with PG_fscache.
+ * ``void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq);``
- * Unlocked
+ Indicate that the ->prepare_write() call failed. The ``error`` field should
+ have been updated.
- * Any folios that need writing to the cache will then have DIO writes issued.
+Note that ->prepare_read() can return an error as a read can simply be aborted.
+Dealing with writeback failure is trickier.
- * Synchronous operations will wait for reading to be complete.
+The other functions are used for subrequests that got as far as being issued:
- * Writes to the cache will proceed asynchronously and the folios will have the
- PG_fscache mark removed when that completes.
+ * ``void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq);``
- * The request structures will be cleaned up when everything has completed.
+ Tell netfslib that a read subrequest has terminated. The ``error``,
+ ``flags`` and ``transferred`` fields should have been updated.
+ * ``void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error);``
-Read Helper Cache API
----------------------
+ Tell netfslib that a write subrequest has terminated. Either the amount of
+ data processed or the negative error code can be passed in. This is
+ can be used as a kiocb completion function.
-When implementing a local cache to be used by the read helpers, two things are
-required: some way for the network filesystem to initialise the caching for a
-read request and a table of operations for the helpers to call.
+ * ``void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq);``
-To begin a cache operation on an fscache object, the following function is
-called::
+ This is provided to optionally update netfslib on the incremental progress
+ of a read, allowing some folios to be unlocked early and does not actually
+ terminate the subrequest. The ``transferred`` field should have been
+ updated.
- int fscache_begin_read_operation(struct netfs_io_request *rreq,
- struct fscache_cookie *cookie);
+Local Cache API
+---------------
-passing in the request pointer and the cookie corresponding to the file. This
-fills in the cache resources mentioned below.
+Netfslib provides a separate API for a local cache to implement, though it
+provides some somewhat similar routines to the filesystem request API.
-The netfs_io_request object contains a place for the cache to hang its
+Firstly, the netfs_io_request object contains a place for the cache to hang its
state::
struct netfs_cache_resources {
const struct netfs_cache_ops *ops;
void *cache_priv;
void *cache_priv2;
+ unsigned int debug_id;
+ unsigned int inval_counter;
};
-This contains an operations table pointer and two private pointers. The
-operation table looks like the following::
+This contains an operations table pointer and two private pointers plus the
+debug ID of the fscache cookie for tracing purposes and an invalidation counter
+that is cranked by calls to ``fscache_invalidate()`` allowing cache subrequests
+to be invalidated after completion.
+
+The cache operation table looks like the following::
struct netfs_cache_ops {
void (*end_operation)(struct netfs_cache_resources *cres);
-
void (*expand_readahead)(struct netfs_cache_resources *cres,
loff_t *_start, size_t *_len, loff_t i_size);
-
enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
- loff_t i_size);
-
+ loff_t i_size);
int (*read)(struct netfs_cache_resources *cres,
loff_t start_pos,
struct iov_iter *iter,
bool seek_data,
netfs_io_terminated_t term_func,
void *term_func_priv);
-
- int (*prepare_write)(struct netfs_cache_resources *cres,
- loff_t *_start, size_t *_len, loff_t i_size,
- bool no_space_allocated_yet);
-
- int (*write)(struct netfs_cache_resources *cres,
- loff_t start_pos,
- struct iov_iter *iter,
- netfs_io_terminated_t term_func,
- void *term_func_priv);
-
- int (*query_occupancy)(struct netfs_cache_resources *cres,
- loff_t start, size_t len, size_t granularity,
- loff_t *_data_start, size_t *_data_len);
+ void (*prepare_write_subreq)(struct netfs_io_subrequest *subreq);
+ void (*issue_write)(struct netfs_io_subrequest *subreq);
};
With a termination handler function pointer::
@@ -511,10 +979,16 @@ The methods defined in the table are:
* ``expand_readahead()``
- [Optional] Called at the beginning of a netfs_readahead() operation to allow
- the cache to expand a request in either direction. This allows the cache to
+ [Optional] Called at the beginning of a readahead operation to allow the
+ cache to expand a request in either direction. This allows the cache to
size the request appropriately for the cache granularity.
+ * ``prepare_read()``
+
+ [Required] Called to configure the next slice of a request. ->start and
+ ->len in the subrequest indicate where and how big the next slice can be;
+ the cache gets to reduce the length to match its granularity requirements.
+
The function is passed pointers to the start and length in its parameters,
plus the size of the file for reference, and adjusts the start and length
appropriately. It should return one of:
@@ -528,12 +1002,6 @@ The methods defined in the table are:
downloaded from the server or read from the cache - or whether slicing
should be given up at the current point.
- * ``prepare_read()``
-
- [Required] Called to configure the next slice of a request. ->start and
- ->len in the subrequest indicate where and how big the next slice can be;
- the cache gets to reduce the length to match its granularity requirements.
-
* ``read()``
[Required] Called to read from the cache. The start file offset is given
@@ -547,44 +1015,33 @@ The methods defined in the table are:
indicating whether the termination is definitely happening in the caller's
context.
- * ``prepare_write()``
+ * ``prepare_write_subreq()``
- [Required] Called to prepare a write to the cache to take place. This
- involves checking to see whether the cache has sufficient space to honour
- the write. ``*_start`` and ``*_len`` indicate the region to be written; the
- region can be shrunk or it can be expanded to a page boundary either way as
- necessary to align for direct I/O. i_size holds the size of the object and
- is provided for reference. no_space_allocated_yet is set to true if the
- caller is certain that no data has been written to that region - for example
- if it tried to do a read from there already.
+ [Required] This is called to allow the cache to limit the size of a
+ subrequest. It may also limit the number of individual regions in iterator,
+ such as required by DIO/DMA. This information should be set on stream to
+ which the subrequest belongs::
- * ``write()``
+ rreq->io_streams[subreq->stream_nr].sreq_max_len
+ rreq->io_streams[subreq->stream_nr].sreq_max_segs
- [Required] Called to write to the cache. The start file offset is given
- along with an iterator to write from, which gives the length also.
-
- Also provided is a pointer to a termination handler function and private
- data to pass to that function. The termination function should be called
- with the number of bytes transferred or an error code, plus a flag
- indicating whether the termination is definitely happening in the caller's
- context.
+ The filesystem can use this, for example, to chop up a request that has to
+ be split across multiple servers or to put multiple writes in flight.
- * ``query_occupancy()``
+ This is not permitted to return an error. In the event of failure,
+ ``netfs_prepare_write_failed()`` must be called.
- [Required] Called to find out where the next piece of data is within a
- particular region of the cache. The start and length of the region to be
- queried are passed in, along with the granularity to which the answer needs
- to be aligned. The function passes back the start and length of the data,
- if any, available within that region. Note that there may be a hole at the
- front.
+ * ``issue_write()``
- It returns 0 if some data was found, -ENODATA if there was no usable data
- within the region or -ENOBUFS if there is no caching on this file.
+ [Required] This is used to dispatch a subrequest to the cache for writing.
+ In the subrequest, ->start, ->len and ->transferred indicate what data
+ should be written to the cache and ->io_iter indicates the buffer to be
+ used.
-Note that these methods are passed a pointer to the cache resource structure,
-not the read request structure as they could be used in other situations where
-there isn't a read request structure as well, such as writing dirty data to the
-cache.
+ There is no return value; the ``netfs_write_subreq_terminated()`` function
+ should be called to indicate that the subrequest completed either way.
+ ->error, ->transferred and ->flags should be updated before completing. The
+ termination can be done asynchronously.
API Function Reference
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index 767b2927c762..3616d7161dab 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -1203,3 +1203,49 @@ should use d_drop();d_splice_alias() and return the result of the latter.
If a positive dentry cannot be returned for some reason, in-kernel
clients such as cachefiles, nfsd, smb/server may not perform ideally but
will fail-safe.
+
+---
+
+** mandatory**
+
+lookup_one(), lookup_one_unlocked(), lookup_one_positive_unlocked() now
+take a qstr instead of a name and len. These, not the "one_len"
+versions, should be used whenever accessing a filesystem from outside
+that filesysmtem, through a mount point - which will have a mnt_idmap.
+
+---
+
+** mandatory**
+
+Functions try_lookup_one_len(), lookup_one_len(),
+lookup_one_len_unlocked() and lookup_positive_unlocked() have been
+renamed to try_lookup_noperm(), lookup_noperm(),
+lookup_noperm_unlocked(), lookup_noperm_positive_unlocked(). They now
+take a qstr instead of separate name and length. QSTR() can be used
+when strlen() is needed for the length.
+
+For try_lookup_noperm() a reference to the qstr is passed in case the
+hash might subsequently be needed.
+
+These function no longer do any permission checking - they previously
+checked that the caller has 'X' permission on the parent. They must
+ONLY be used internally by a filesystem on itself when it knows that
+permissions are irrelevant or in a context where permission checks have
+already been performed such as after vfs_path_parent_lookup()
+
+---
+
+** mandatory**
+
+d_hash_and_lookup() is no longer exported or available outside the VFS.
+Use try_lookup_noperm() instead. This adds name validation and takes
+arguments in the opposite order but is otherwise identical.
+
+Using try_lookup_noperm() will require linux/namei.h to be included.
+
+---
+
+**mandatory**
+
+Calling conventions for ->d_automount() have changed; we should *not* grab
+an extra reference to new mount - it should be returned with refcount 1.
diff --git a/Documentation/filesystems/relay.rst b/Documentation/filesystems/relay.rst
index 04ad083cfe62..301ff4c6e6c6 100644
--- a/Documentation/filesystems/relay.rst
+++ b/Documentation/filesystems/relay.rst
@@ -32,7 +32,7 @@ functions in the relay interface code - please see that for details.
Semantics
=========
-Each relay channel has one buffer per CPU, each buffer has one or more
+Each relay channel has one buffer per CPU; each buffer has one or more
sub-buffers. Messages are written to the first sub-buffer until it is
too full to contain a new message, in which case it is written to
the next (if available). Messages are never split across sub-buffers.
@@ -40,7 +40,7 @@ At this point, userspace can be notified so it empties the first
sub-buffer, while the kernel continues writing to the next.
When notified that a sub-buffer is full, the kernel knows how many
-bytes of it are padding i.e. unused space occurring because a complete
+bytes of it are padding, i.e., unused space occurring because a complete
message couldn't fit into a sub-buffer. Userspace can use this
knowledge to copy only valid data.
@@ -71,7 +71,7 @@ klog and relay-apps example code
================================
The relay interface itself is ready to use, but to make things easier,
-a couple simple utility functions and a set of examples are provided.
+a couple of simple utility functions and a set of examples are provided.
The relay-apps example tarball, available on the relay sourceforge
site, contains a set of self-contained examples, each consisting of a
@@ -91,7 +91,7 @@ registered will data actually be logged (see the klog and kleak
examples for details).
It is of course possible to use the relay interface from scratch,
-i.e. without using any of the relay-apps example code or klog, but
+i.e., without using any of the relay-apps example code or klog, but
you'll have to implement communication between userspace and kernel,
allowing both to convey the state of buffers (full, empty, amount of
padding). The read() interface both removes padding and internally
@@ -119,7 +119,7 @@ mmap() results in channel buffer being mapped into the caller's
must map the entire file, which is NRBUF * SUBBUFSIZE.
read() read the contents of a channel buffer. The bytes read are
- 'consumed' by the reader, i.e. they won't be available
+ 'consumed' by the reader, i.e., they won't be available
again to subsequent reads. If the channel is being used
in no-overwrite mode (the default), it can be read at any
time even if there's an active kernel writer. If the
@@ -138,7 +138,7 @@ poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are
notified when sub-buffer boundaries are crossed.
close() decrements the channel buffer's refcount. When the refcount
- reaches 0, i.e. when no process or kernel client has the
+ reaches 0, i.e., when no process or kernel client has the
buffer open, the channel buffer is freed.
=========== ============================================================
@@ -149,7 +149,7 @@ host filesystem must be mounted. For example::
.. Note::
- the host filesystem doesn't need to be mounted for kernel
+ The host filesystem doesn't need to be mounted for kernel
clients to create or use channels - it only needs to be
mounted when user space applications need access to the buffer
data.
@@ -301,16 +301,6 @@ user-defined data with a channel, and is immediately available
(including in create_buf_file()) via chan->private_data or
buf->chan->private_data.
-Buffer-only channels
---------------------
-
-These channels have no files associated and can be created with
-relay_open(NULL, NULL, ...). Such channels are useful in scenarios such
-as when doing early tracing in the kernel, before the VFS is up. In these
-cases, one may open a buffer-only channel and then call
-relay_late_setup_files() when the kernel is ready to handle files,
-to expose the buffered data to the userspace.
-
Channel 'modes'
---------------
@@ -325,7 +315,7 @@ section, as it pertains mainly to mmap() implementations.
In 'overwrite' mode, also known as 'flight recorder' mode, writes
continuously cycle around the buffer and will never fail, but will
unconditionally overwrite old data regardless of whether it's actually
-been consumed. In no-overwrite mode, writes will fail, i.e. data will
+been consumed. In no-overwrite mode, writes will fail, i.e., data will
be lost, if the number of unconsumed sub-buffers equals the total
number of sub-buffers in the channel. It should be clear that if
there is no consumer or if the consumer can't consume sub-buffers fast
@@ -344,7 +334,7 @@ initialize the next sub-buffer if appropriate 2) finalize the previous
sub-buffer if appropriate and 3) return a boolean value indicating
whether or not to actually move on to the next sub-buffer.
-To implement 'no-overwrite' mode, the userspace client would provide
+To implement 'no-overwrite' mode, the userspace client provides
an implementation of the subbuf_start() callback something like the
following::
@@ -364,9 +354,9 @@ following::
return 1;
}
-If the current buffer is full, i.e. all sub-buffers remain unconsumed,
+If the current buffer is full, i.e., all sub-buffers remain unconsumed,
the callback returns 0 to indicate that the buffer switch should not
-occur yet, i.e. until the consumer has had a chance to read the
+occur yet, i.e., until the consumer has had a chance to read the
current set of ready sub-buffers. For the relay_buf_full() function
to make sense, the consumer is responsible for notifying the relay
interface when sub-buffers have been consumed via
@@ -400,7 +390,7 @@ consulted.
The default subbuf_start() implementation, used if the client doesn't
define any callbacks, or doesn't define the subbuf_start() callback,
-implements the simplest possible 'no-overwrite' mode, i.e. it does
+implements the simplest possible 'no-overwrite' mode, i.e., it does
nothing but return 0.
Header information can be reserved at the beginning of each sub-buffer
@@ -467,7 +457,7 @@ rather than open and close a new channel for each use. relay_reset()
can be used for this purpose - it resets a channel to its initial
state without reallocating channel buffer memory or destroying
existing mappings. It should however only be called when it's safe to
-do so, i.e. when the channel isn't currently being written to.
+do so, i.e., when the channel isn't currently being written to.
Finally, there are a couple of utility callbacks that can be used for
different purposes. buf_mapped() is called whenever a channel buffer
diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/filesystems/resctrl.rst
index 6768fc1fad16..c7949dd44f2f 100644
--- a/Documentation/arch/x86/resctrl.rst
+++ b/Documentation/filesystems/resctrl.rst
@@ -1,9 +1,9 @@
.. SPDX-License-Identifier: GPL-2.0
.. include:: <isonum.txt>
-===========================================
-User Interface for Resource Control feature
-===========================================
+=====================================================
+User Interface for Resource Control feature (resctrl)
+=====================================================
:Copyright: |copy| 2016 Intel Corporation
:Authors: - Fenghua Yu <fenghua.yu@intel.com>
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index ae79c30b6c0c..fd32a9a17bfb 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -716,9 +716,8 @@ page lookup by address, and keeping track of pages tagged as Dirty or
Writeback.
The first can be used independently to the others. The VM can try to
-either write dirty pages in order to clean them, or release clean pages
-in order to reuse them. To do this it can call the ->writepage method
-on dirty pages, and ->release_folio on clean folios with the private
+release clean pages in order to reuse them. To do this it can call
+->release_folio on clean folios with the private
flag set. Clean pages without PagePrivate and with no external references
will be released without notice being given to the address_space.
@@ -731,8 +730,8 @@ maintains information about the PG_Dirty and PG_Writeback status of each
page, so that pages with either of these flags can be found quickly.
The Dirty tag is primarily used by mpage_writepages - the default
-->writepages method. It uses the tag to find dirty pages to call
-->writepage on. If mpage_writepages is not used (i.e. the address
+->writepages method. It uses the tag to find dirty pages to
+write back. If mpage_writepages is not used (i.e. the address
provides its own ->writepages) , the PAGECACHE_TAG_DIRTY tag is almost
unused. write_inode_now and sync_inode do use it (through
__sync_single_inode) to check if ->writepages has been successful in
@@ -756,23 +755,23 @@ pages, however the address_space has finer control of write sizes.
The read process essentially only requires 'read_folio'. The write
process is more complicated and uses write_begin/write_end or
-dirty_folio to write data into the address_space, and writepage and
+dirty_folio to write data into the address_space, and
writepages to writeback data to storage.
Adding and removing pages to/from an address_space is protected by the
inode's i_mutex.
When data is written to a page, the PG_Dirty flag should be set. It
-typically remains set until writepage asks for it to be written. This
+typically remains set until writepages asks for it to be written. This
should clear PG_Dirty and set PG_Writeback. It can be actually written
at any point after PG_Dirty is clear. Once it is known to be safe,
PG_Writeback is cleared.
Writeback makes use of a writeback_control structure to direct the
-operations. This gives the writepage and writepages operations some
+operations. This gives the writepages operation some
information about the nature of and reason for the writeback request,
and the constraints under which it is being done. It is also used to
-return information back to the caller about the result of a writepage or
+return information back to the caller about the result of a
writepages request.
@@ -819,7 +818,6 @@ cache in your filesystem. The following members are defined:
.. code-block:: c
struct address_space_operations {
- int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*read_folio)(struct file *, struct folio *);
int (*writepages)(struct address_space *, struct writeback_control *);
bool (*dirty_folio)(struct address_space *, struct folio *);
@@ -848,25 +846,6 @@ cache in your filesystem. The following members are defined:
int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
};
-``writepage``
- called by the VM to write a dirty page to backing store. This
- may happen for data integrity reasons (i.e. 'sync'), or to free
- up memory (flush). The difference can be seen in
- wbc->sync_mode. The PG_Dirty flag has been cleared and
- PageLocked is true. writepage should start writeout, should set
- PG_Writeback, and should make sure the page is unlocked, either
- synchronously or asynchronously when the write operation
- completes.
-
- If wbc->sync_mode is WB_SYNC_NONE, ->writepage doesn't have to
- try too hard if there are problems, and may choose to write out
- other pages from the mapping if that is easier (e.g. due to
- internal dependencies). If it chooses not to start writeout, it
- should return AOP_WRITEPAGE_ACTIVATE so that the VM will not
- keep calling ->writepage on that page.
-
- See the file "Locking" for more details.
-
``read_folio``
Called by the page cache to read a folio from the backing store.
The 'file' argument supplies authentication information to network
@@ -909,7 +888,7 @@ cache in your filesystem. The following members are defined:
given and that many pages should be written if possible. If no
->writepages is given, then mpage_writepages is used instead.
This will choose pages from the address space that are tagged as
- DIRTY and will pass them to ->writepage.
+ DIRTY and will write them back.
``dirty_folio``
called by the VM to mark a folio as dirty. This is particularly
@@ -1411,9 +1390,7 @@ defined:
If a vfsmount is returned, the caller will attempt to mount it
on the mountpoint and will remove the vfsmount from its
- expiration list in the case of failure. The vfsmount should be
- returned with 2 refs on it to prevent automatic expiration - the
- caller will clean up the additional ref.
+ expiration list in the case of failure.
This function is only used if DCACHE_NEED_AUTOMOUNT is set on
the dentry. This is set by __d_instantiate() if S_AUTOMOUNT is
diff --git a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst
index 8d8b53e96bcf..ccb4b153e6f2 100644
--- a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst
+++ b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst
@@ -12,11 +12,14 @@ ACPI in general allows referring to device objects in the tree only.
Hierarchical data extension nodes may not be referred to directly, hence this
document defines a scheme to implement such references.
-A reference consist of the device object name followed by one or more
-hierarchical data extension [dsd-guide] keys. Specifically, the hierarchical
-data extension node which is referred to by the key shall lie directly under
-the parent object i.e. either the device object or another hierarchical data
-extension node.
+A reference to a _DSD hierarchical data node is a string consisting of a
+device object reference followed by a dot (".") and a relative path to a data
+node object. Do not use non-string references as this will produce a copy of
+the hierarchical data node, not a reference!
+
+The hierarchical data extension node which is referred to shall be located
+directly under its parent object i.e. either the device object or another
+hierarchical data extension node [dsd-guide].
The keys in the hierarchical data nodes shall consist of the name of the node,
"@" character and the number of the node in hexadecimal notation (without pre-
@@ -33,11 +36,9 @@ extension key.
Example
=======
-In the ASL snippet below, the "reference" _DSD property contains a
-device object reference to DEV0 and under that device object, a
-hierarchical data extension key "node@1" referring to the NOD1 object
-and lastly, a hierarchical data extension key "anothernode" referring to
-the ANOD object which is also the final target node of the reference.
+In the ASL snippet below, the "reference" _DSD property contains a string
+reference to a hierarchical data extension node ANOD under DEV0 under the parent
+of DEV1. ANOD is also the final target node of the reference.
::
Device (DEV0)
@@ -76,10 +77,7 @@ the ANOD object which is also the final target node of the reference.
Name (_DSD, Package () {
ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
Package () {
- Package () {
- "reference", Package () {
- ^DEV0, "node@1", "anothernode"
- }
+ Package () { "reference", "^DEV0.ANOD" }
},
}
})
diff --git a/Documentation/firmware-guide/acpi/dsd/graph.rst b/Documentation/firmware-guide/acpi/dsd/graph.rst
index b9dbfc73ed25..d6ae5ffa748c 100644
--- a/Documentation/firmware-guide/acpi/dsd/graph.rst
+++ b/Documentation/firmware-guide/acpi/dsd/graph.rst
@@ -66,12 +66,9 @@ of that port shall be zero. Similarly, if a port may only have a single
endpoint, the number of that endpoint shall be zero.
The endpoint reference uses property extension with "remote-endpoint" property
-name followed by a reference in the same package. Such references consist of
-the remote device reference, the first package entry of the port data extension
-reference under the device and finally the first package entry of the endpoint
-data extension reference under the port. Individual references thus appear as::
+name followed by a string reference in the same package. [data-node-ref]::
- Package() { device, "port@X", "endpoint@Y" }
+ "device.datanode"
In the above example, "X" is the number of the port and "Y" is the number of
the endpoint.
@@ -109,7 +106,7 @@ A simple example of this is show below::
ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
Package () {
Package () { "reg", 0 },
- Package () { "remote-endpoint", Package() { \_SB.PCI0.ISP, "port@4", "endpoint@0" } },
+ Package () { "remote-endpoint", "\\_SB.PCI0.ISP.EP40" },
}
})
}
@@ -141,7 +138,7 @@ A simple example of this is show below::
ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
Package () {
Package () { "reg", 0 },
- Package () { "remote-endpoint", Package () { \_SB.PCI0.I2C2.CAM0, "port@0", "endpoint@0" } },
+ Package () { "remote-endpoint", "\\_SB.PCI0.I2C2.CAM0.EP00" },
}
})
}
diff --git a/Documentation/firmware-guide/acpi/dsd/leds.rst b/Documentation/firmware-guide/acpi/dsd/leds.rst
index 93db592c93c7..a97cd07d49be 100644
--- a/Documentation/firmware-guide/acpi/dsd/leds.rst
+++ b/Documentation/firmware-guide/acpi/dsd/leds.rst
@@ -15,11 +15,6 @@ Referring to LEDs in Device tree is documented in [video-interfaces], in
"flash-leds" property documentation. In short, LEDs are directly referred to by
using phandles.
-While Device tree allows referring to any node in the tree [devicetree], in
-ACPI references are limited to device nodes only [acpi]. For this reason using
-the same mechanism on ACPI is not possible. A mechanism to refer to non-device
-ACPI nodes is documented in [data-node-ref].
-
ACPI allows (as does DT) using integer arguments after the reference. A
combination of the LED driver device reference and an integer argument,
referring to the "reg" property of the relevant LED, is used to identify
@@ -74,7 +69,7 @@ omitted. ::
Package () {
Package () {
"flash-leds",
- Package () { ^LED, "led@0", ^LED, "led@1" },
+ Package () { "^LED.LED0", "^LED.LED1" },
}
}
})
diff --git a/Documentation/gpu/amdgpu/amd-hardware-list-info.rst b/Documentation/gpu/amdgpu/amd-hardware-list-info.rst
new file mode 100644
index 000000000000..1786544fe7c1
--- /dev/null
+++ b/Documentation/gpu/amdgpu/amd-hardware-list-info.rst
@@ -0,0 +1,23 @@
+=================================================
+ AMD Hardware Components Information per Product
+=================================================
+
+On this page, you can find the AMD product name and which component version is
+part of it.
+
+Accelerated Processing Units (APU) Info
+---------------------------------------
+
+.. csv-table::
+ :header-rows: 1
+ :widths: 3, 2, 2, 1, 1, 1, 1
+ :file: ./apu-asic-info-table.csv
+
+Discrete GPU Info
+-----------------
+
+.. csv-table::
+ :header-rows: 1
+ :widths: 3, 2, 2, 1, 1, 1
+ :file: ./dgpu-asic-info-table.csv
+
diff --git a/Documentation/gpu/amdgpu/amdgpu-glossary.rst b/Documentation/gpu/amdgpu/amdgpu-glossary.rst
index 1e9283e076ba..30812d9d53c6 100644
--- a/Documentation/gpu/amdgpu/amdgpu-glossary.rst
+++ b/Documentation/gpu/amdgpu/amdgpu-glossary.rst
@@ -12,18 +12,39 @@ we have a dedicated glossary for Display Core at
The number of CUs that are active on the system. The number of active
CUs may be less than SE * SH * CU depending on the board configuration.
+ BACO
+ Bus Alive, Chip Off
+
+ BOCO
+ Bus Off, Chip Off
+
CE
Constant Engine
+ CIK
+ Sea Islands
+
+ CB
+ Color Buffer
+
CP
Command Processor
CPLIB
Content Protection Library
+ CS
+ Command Submission
+
+ CSB
+ Clear State Indirect Buffer
+
CU
Compute Unit
+ DB
+ Depth Buffer
+
DFS
Digital Frequency Synthesizer
@@ -33,6 +54,9 @@ we have a dedicated glossary for Display Core at
EOP
End Of Pipe/Pipeline
+ FLR
+ Function Level Reset
+
GART
Graphics Address Remapping Table. This is the name we use for the GPUVM
page table used by the GPU kernel driver. It remaps system resources
@@ -45,6 +69,12 @@ we have a dedicated glossary for Display Core at
GC
Graphics and Compute
+ GDS
+ Global Data Share
+
+ GE
+ Geometry Engine
+
GMC
Graphic Memory Controller
@@ -80,6 +110,9 @@ we have a dedicated glossary for Display Core at
KCQ
Kernel Compute Queue
+ KFD
+ Kernel Fusion Driver
+
KGQ
Kernel Graphics Queue
@@ -89,6 +122,9 @@ we have a dedicated glossary for Display Core at
MC
Memory Controller
+ MCBP
+ Mid Command Buffer Preemption
+
ME
MicroEngine (Graphics)
@@ -104,6 +140,9 @@ we have a dedicated glossary for Display Core at
MQD
Memory Queue Descriptor
+ PA
+ Primitive Assembler / Physical Address
+
PFP
Pre-Fetch Parser (Graphics)
@@ -113,24 +152,39 @@ we have a dedicated glossary for Display Core at
PSP
Platform Security Processor
+ RB
+ Render Backends. Some people called it ROPs.
+
RLC
RunList Controller. This name is a remnant of past ages and doesn't have
much meaning today. It's a group of general-purpose helper engines for
the GFX block. It's involved in GFX power management and SR-IOV, among
other things.
+ SC
+ Scan Converter
+
SDMA
System DMA
SE
Shader Engine
+ SGPR
+ Scalar General-Purpose Registers
+
SH
SHader array
+ SI
+ Southern Islands
+
SMU/SMC
System Management Unit / System Management Controller
+ SPI (AMDGPU)
+ Shader Processor Input
+
SRLC
Save/Restore List Control
@@ -143,12 +197,21 @@ we have a dedicated glossary for Display Core at
SS
Spread Spectrum
+ SX
+ Shader Export
+
TA
Trusted Application
+ TC
+ Texture Cache
+
TOC
Table of Contents
+ UMSCH
+ User Mode Scheduler
+
UVD
Unified Video Decoder
@@ -158,5 +221,17 @@ we have a dedicated glossary for Display Core at
VCN
Video Codec Next
+ VGPR
+ Vector General-Purpose Registers
+
+ VMID
+ Virtual Memory ID
+
VPE
Video Processing Engine
+
+ XCC
+ Accelerator Core Complex
+
+ XCP
+ Accelerator Core Partition
diff --git a/Documentation/gpu/amdgpu/apu-asic-info-table.csv b/Documentation/gpu/amdgpu/apu-asic-info-table.csv
index 5dd4b8762d19..1d50b539677f 100644
--- a/Documentation/gpu/amdgpu/apu-asic-info-table.csv
+++ b/Documentation/gpu/amdgpu/apu-asic-info-table.csv
@@ -13,3 +13,5 @@ Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8
Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
Ryzen 8x40 series, Hawk Point, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
Ryzen AI 300 series, Strix Point, 3.5.0, 11.5.0, 4.0.5, 6.1.0, 14.0.0
+Ryzen AI 350 series, Krackan Point, 3.5.0, 11.5.2, 4.0.5, 6.1.2, 14.0.4
+Ryzen AI Max 300 series, Strix Halo, 3.5.1, 11.5.1, 4.0.6, 6.1.1, 14.0.1
diff --git a/Documentation/gpu/amdgpu/debugfs.rst b/Documentation/gpu/amdgpu/debugfs.rst
new file mode 100644
index 000000000000..5150d0a95658
--- /dev/null
+++ b/Documentation/gpu/amdgpu/debugfs.rst
@@ -0,0 +1,210 @@
+==============
+AMDGPU DebugFS
+==============
+
+The amdgpu driver provides a number of debugfs files to aid in debugging
+issues in the driver. These are usually found in
+/sys/kernel/debug/dri/<num>.
+
+DebugFS Files
+=============
+
+amdgpu_benchmark
+----------------
+
+Run benchmarks using the DMA engine the driver uses for GPU memory paging.
+Write a number to the file to run the test. The results are written to the
+kernel log. VRAM is on device memory (dGPUs) or carve out (APUs) and GTT
+(Graphics Translation Tables) is system memory that is accessible by the GPU.
+The following tests are available:
+
+- 1: simple test, VRAM to GTT and GTT to VRAM
+- 2: simple test, VRAM to VRAM
+- 3: GTT to VRAM, buffer size sweep, powers of 2
+- 4: VRAM to GTT, buffer size sweep, powers of 2
+- 5: VRAM to VRAM, buffer size sweep, powers of 2
+- 6: GTT to VRAM, buffer size sweep, common display sizes
+- 7: VRAM to GTT, buffer size sweep, common display sizes
+- 8: VRAM to VRAM, buffer size sweep, common display sizes
+
+amdgpu_test_ib
+--------------
+
+Read this file to run simple IB (Indirect Buffer) tests on all kernel managed
+rings. IBs are command buffers usually generated by userspace applications
+which are submitted to the kernel for execution on an particular GPU engine.
+This just runs the simple IB tests included in the kernel. These tests
+are engine specific and verify that IB submission works.
+
+amdgpu_discovery
+----------------
+
+Provides raw access to the IP discovery binary provided by the GPU. Read this
+file to access the raw binary. This is useful for verifying the contents of
+the IP discovery table. It is chip specific.
+
+amdgpu_vbios
+------------
+
+Provides raw access to the ROM binary image from the GPU. Read this file to
+access the raw binary. This is useful for verifying the contents of the
+video BIOS ROM. It is board specific.
+
+amdgpu_evict_gtt
+----------------
+
+Evict all buffers from the GTT memory pool. Read this file to evict all
+buffers from this pool.
+
+amdgpu_evict_vram
+-----------------
+
+Evict all buffers from the VRAM memory pool. Read this file to evict all
+buffers from this pool.
+
+amdgpu_gpu_recover
+------------------
+
+Trigger a GPU reset. Read this file to trigger reset the entire GPU.
+All work currently running on the GPU will be lost.
+
+amdgpu_ring_<name>
+------------------
+
+Provides read access to the kernel managed ring buffers for each ring <name>.
+These are useful for debugging problems on a particular ring. The ring buffer
+is how the CPU sends commands to the GPU. The CPU writes commands into the
+buffer and then asks the GPU engine to process it. This is the raw binary
+contents of the ring buffer. Use a tool like UMR to decode the rings into human
+readable form.
+
+amdgpu_mqd_<name>
+-----------------
+
+Provides read access to the kernel managed MQD (Memory Queue Descriptor) for
+ring <name> managed by the kernel driver. MQDs define the features of the ring
+and are used to store the ring's state when it is not connected to hardware.
+The driver writes the requested ring features and metadata (GPU addresses of
+the ring itself and associated buffers) to the MQD and the firmware uses the MQD
+to populate the hardware when the ring is mapped to a hardware slot. Only
+available on engines which use MQDs. This provides access to the raw MQD
+binary.
+
+amdgpu_error_<name>
+-------------------
+
+Provides an interface to set an error code on the dma fences associated with
+ring <name>. The error code specified is propogated to all fences associated
+with the ring. Use this to inject a fence error into a ring.
+
+amdgpu_pm_info
+--------------
+
+Provides human readable information about the power management features
+and state of the GPU. This includes current GFX clock, Memory clock,
+voltages, average SoC power, temperature, GFX load, Memory load, SMU
+feature mask, VCN power state, clock and power gating features.
+
+amdgpu_firmware_info
+--------------------
+
+Lists the firmware versions for all firmwares used by the GPU. Only
+entries with a non-0 version are valid. If the version is 0, the firmware
+is not valid for the GPU.
+
+amdgpu_fence_info
+-----------------
+
+Shows the last signalled and emitted fence sequence numbers for each
+kernel driver managed ring. Fences are associated with submissions
+to the engine. Emitted fences have been submitted to the ring
+and signalled fences have been signalled by the GPU. Rings with a
+larger emitted fence value have outstanding work that is still being
+processed by the engine that owns that ring. When the emitted and
+signalled fence values are equal, the ring is idle.
+
+amdgpu_gem_info
+---------------
+
+Lists all of the PIDs using the GPU and the GPU buffers that they have
+allocated. This lists the buffer size, pool (VRAM, GTT, etc.), and buffer
+attributes (CPU access required, CPU cache attributes, etc.).
+
+amdgpu_vm_info
+--------------
+
+Lists all of the PIDs using the GPU and the GPU buffers that they have
+allocated as well as the status of those buffers relative to that process'
+GPU virtual address space (e.g., evicted, idle, invalidated, etc.).
+
+amdgpu_sa_info
+--------------
+
+Prints out all of the suballocations (sa) by the suballocation manager in the
+kernel driver. Prints the GPU address, size, and fence info associated
+with each suballocation. The suballocations are used internally within
+the kernel driver for various things.
+
+amdgpu_<pool>_mm
+----------------
+
+Prints TTM information about the memory pool <pool>.
+
+amdgpu_vram
+-----------
+
+Provides direct access to VRAM. Used by tools like UMR to inspect
+objects in VRAM.
+
+amdgpu_iomem
+------------
+
+Provides direct access to GTT memory. Used by tools like UMR to inspect
+GTT memory.
+
+amdgpu_regs_*
+-------------
+
+Provides direct access to various register aperatures on the GPU. Used
+by tools like UMR to access GPU registers.
+
+amdgpu_regs2
+------------
+
+Provides an IOCTL interface used by UMR for interacting with GPU registers.
+
+
+amdgpu_sensors
+--------------
+
+Provides an interface to query GPU power metrics (temperature, average
+power, etc.). Used by tools like UMR to query GPU power metrics.
+
+
+amdgpu_gca_config
+-----------------
+
+Provides an interface to query GPU details (Graphics/Compute Array config,
+PCI config, GPU family, etc.). Used by tools like UMR to query GPU details.
+
+amdgpu_wave
+-----------
+
+Used to query GFX/compute wave information from the hardware. Used by tools
+like UMR to query GFX/compute wave information.
+
+amdgpu_gpr
+----------
+
+Used to query GFX/compute GPR (General Purpose Register) information from the
+hardware. Used by tools like UMR to query GPRs when debugging shaders.
+
+amdgpu_gprwave
+--------------
+
+Provides an IOCTL interface used by UMR for interacting with shader waves.
+
+amdgpu_fw_attestation
+---------------------
+
+Provides an interface for reading back firmware attestation records.
diff --git a/Documentation/gpu/amdgpu/debugging.rst b/Documentation/gpu/amdgpu/debugging.rst
index e75f97d0e4ea..7cbfea0606e1 100644
--- a/Documentation/gpu/amdgpu/debugging.rst
+++ b/Documentation/gpu/amdgpu/debugging.rst
@@ -2,6 +2,13 @@
GPU Debugging
===============
+General Debugging Options
+=========================
+
+The DebugFS section provides documentation on a number files to aid in debugging
+issues on the GPU.
+
+
GPUVM Debugging
===============
diff --git a/Documentation/gpu/amdgpu/display/dc-debug.rst b/Documentation/gpu/amdgpu/display/dc-debug.rst
index 013f63b271f3..605dca21f4ae 100644
--- a/Documentation/gpu/amdgpu/display/dc-debug.rst
+++ b/Documentation/gpu/amdgpu/display/dc-debug.rst
@@ -154,7 +154,7 @@ of the display parameters, but the userspace might also cause this issue. One
way to identify the source of the problem is to take a screenshot or make a
desktop video capture when the problem happens; after checking the
screenshot/video recording, if you don't see any of the artifacts, it means
-that the issue is likely on the the driver side. If you can still see the
+that the issue is likely on the driver side. If you can still see the
problem in the data collected, it is an issue that probably happened during
rendering, and the display code just got the framebuffer already corrupted.
diff --git a/Documentation/gpu/amdgpu/driver-core.rst b/Documentation/gpu/amdgpu/driver-core.rst
index 32723a925377..81256318e93c 100644
--- a/Documentation/gpu/amdgpu/driver-core.rst
+++ b/Documentation/gpu/amdgpu/driver-core.rst
@@ -67,36 +67,66 @@ GC (Graphics and Compute)
This is the graphics and compute engine, i.e., the block that
encompasses the 3D pipeline and and shader blocks. This is by far the
largest block on the GPU. The 3D pipeline has tons of sub-blocks. In
- addition to that, it also contains the CP microcontrollers (ME, PFP,
- CE, MEC) and the RLC microcontroller. It's exposed to userspace for
- user mode drivers (OpenGL, Vulkan, OpenCL, etc.)
+ addition to that, it also contains the CP microcontrollers (ME, PFP, CE,
+ MEC) and the RLC microcontroller. It's exposed to userspace for user mode
+ drivers (OpenGL, Vulkan, OpenCL, etc.). More details in :ref:`Graphics (GFX)
+ and Compute <amdgpu-gc>`.
VCN (Video Core Next)
This is the multi-media engine. It handles video and image encode and
decode. It's exposed to userspace for user mode drivers (VA-API,
OpenMAX, etc.)
-Graphics and Compute Microcontrollers
--------------------------------------
-
-CP (Command Processor)
- The name for the hardware block that encompasses the front end of the
- GFX/Compute pipeline. Consists mainly of a bunch of microcontrollers
- (PFP, ME, CE, MEC). The firmware that runs on these microcontrollers
- provides the driver interface to interact with the GFX/Compute engine.
-
- MEC (MicroEngine Compute)
- This is the microcontroller that controls the compute queues on the
- GFX/compute engine.
-
- MES (MicroEngine Scheduler)
- This is a new engine for managing queues. This is currently unused.
-
-RLC (RunList Controller)
- This is another microcontroller in the GFX/Compute engine. It handles
- power management related functionality within the GFX/Compute engine.
- The name is a vestige of old hardware where it was originally added
- and doesn't really have much relation to what the engine does now.
+.. _pipes-and-queues-description:
+
+GFX, Compute, and SDMA Overall Behavior
+=======================================
+
+.. note:: For simplicity, whenever the term block is used in this section, it
+ means GFX, Compute, and SDMA.
+
+GFX, Compute and SDMA share a similar form of operation that can be abstracted
+to facilitate understanding of the behavior of these blocks. See the figure
+below illustrating the common components of these blocks:
+
+.. kernel-figure:: pipe_and_queue_abstraction.svg
+
+In the central part of this figure, you can see two hardware elements, one called
+**Pipes** and another called **Queues**; it is important to highlight that Queues
+must be associated with a Pipe and vice-versa. Every specific hardware IP may have
+a different number of Pipes and, in turn, a different number of Queues; for
+example, GFX 11 has two Pipes and two Queues per Pipe for the GFX front end.
+
+Pipe is the hardware that processes the instructions available in the Queues;
+in other words, it is a thread executing the operations inserted in the Queue.
+One crucial characteristic of Pipes is that they can only execute one Queue at
+a time; no matter if the hardware has multiple Queues in the Pipe, it only runs
+one Queue per Pipe.
+
+Pipes have the mechanics of swapping between queues at the hardware level.
+Nonetheless, they only make use of Queues that are considered mapped. Pipes can
+switch between queues based on any of the following inputs:
+
+1. Command Stream;
+2. Packet by Packet;
+3. Other hardware requests the change (e.g., MES).
+
+Queues within Pipes are defined by the Hardware Queue Descriptors (HQD).
+Associated with the HQD concept, we have the Memory Queue Descriptor (MQD),
+which is responsible for storing information about the state of each of the
+available Queues in the memory. The state of a Queue contains information such
+as the GPU virtual address of the queue itself, save areas, doorbell, etc. The
+MQD also stores the HQD registers, which are vital for activating or
+deactivating a given Queue. The scheduling firmware (e.g., MES) is responsible
+for loading HQDs from MQDs and vice versa.
+
+The Queue-switching process can also happen with the firmware requesting the
+preemption or unmapping of a Queue. The firmware waits for the HQD_ACTIVE bit
+to change to low before saving the state into the MQD. To make a different
+Queue become active, the firmware copies the MQD state into the HQD registers
+and loads any additional state. Finally, it sets the HQD_ACTIVE bit to high to
+indicate that the queue is active. The Pipe will then execute work from active
+Queues.
Driver Structure
================
@@ -110,7 +140,8 @@ Some useful constructs:
KIQ (Kernel Interface Queue)
This is a control queue used by the kernel driver to manage other gfx
and compute queues on the GFX/compute engine. You can use it to
- map/unmap additional queues, etc.
+ map/unmap additional queues, etc. This is replaced by MES on
+ GFX 11 and newer hardware.
IB (Indirect Buffer)
A command buffer for a particular engine. Rather than writing
diff --git a/Documentation/gpu/amdgpu/driver-misc.rst b/Documentation/gpu/amdgpu/driver-misc.rst
index e40e15f89fd3..25b0c857816e 100644
--- a/Documentation/gpu/amdgpu/driver-misc.rst
+++ b/Documentation/gpu/amdgpu/driver-misc.rst
@@ -50,23 +50,6 @@ board_info
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
:doc: board_info
-Accelerated Processing Units (APU) Info
----------------------------------------
-
-.. csv-table::
- :header-rows: 1
- :widths: 3, 2, 2, 1, 1, 1, 1
- :file: ./apu-asic-info-table.csv
-
-Discrete GPU Info
------------------
-
-.. csv-table::
- :header-rows: 1
- :widths: 3, 2, 2, 1, 1, 1
- :file: ./dgpu-asic-info-table.csv
-
-
GPU Memory Usage Information
============================
diff --git a/Documentation/gpu/amdgpu/gc/index.rst b/Documentation/gpu/amdgpu/gc/index.rst
new file mode 100644
index 000000000000..ff6e9ef5cbee
--- /dev/null
+++ b/Documentation/gpu/amdgpu/gc/index.rst
@@ -0,0 +1,52 @@
+.. _amdgpu-gc:
+
+========================================
+ drm/amdgpu - Graphics and Compute (GC)
+========================================
+
+The relationship between the CPU and GPU can be described as the
+producer-consumer problem, where the CPU fills out a buffer with operations
+(producer) to be executed by the GPU (consumer). The requested operations in
+the buffer are called Command Packets, which can be summarized as a compressed
+way of transmitting command information to the graphics controller.
+
+The component that acts as the front end between the CPU and the GPU is called
+the Command Processor (CP). This component is responsible for providing greater
+flexibility to the GC since CP makes it possible to program various aspects of
+the GPU pipeline. CP also coordinates the communication between the CPU and GPU
+via a mechanism named **Ring Buffers**, where the CPU appends information to
+the buffer while the GPU removes operations. It is relevant to highlight that a
+CPU can add a pointer to the Ring Buffer that points to another region of
+memory outside the Ring Buffer, and CP can handle it; this mechanism is called
+**Indirect Buffer (IB)**. CP receives and parses the Command Streams (CS), and
+writes the operations to the correct hardware blocks.
+
+Graphics (GFX) and Compute Microcontrollers
+-------------------------------------------
+
+GC is a large block, and as a result, it has multiple firmware associated with
+it. Some of them are:
+
+CP (Command Processor)
+ The name for the hardware block that encompasses the front end of the
+ GFX/Compute pipeline. Consists mainly of a bunch of microcontrollers
+ (PFP, ME, CE, MEC). The firmware that runs on these microcontrollers
+ provides the driver interface to interact with the GFX/Compute engine.
+
+ MEC (MicroEngine Compute)
+ This is the microcontroller that controls the compute queues on the
+ GFX/compute engine.
+
+ MES (MicroEngine Scheduler)
+ This is the engine for managing queues. For more details check
+ :ref:`MicroEngine Scheduler (MES) <amdgpu-mes>`.
+
+RLC (RunList Controller)
+ This is another microcontroller in the GFX/Compute engine. It handles
+ power management related functionality within the GFX/Compute engine.
+ The name is a vestige of old hardware where it was originally added
+ and doesn't really have much relation to what the engine does now.
+
+.. toctree::
+
+ mes.rst
diff --git a/Documentation/gpu/amdgpu/gc/mes.rst b/Documentation/gpu/amdgpu/gc/mes.rst
new file mode 100644
index 000000000000..b99eb211b179
--- /dev/null
+++ b/Documentation/gpu/amdgpu/gc/mes.rst
@@ -0,0 +1,38 @@
+.. _amdgpu-mes:
+
+=============================
+ MicroEngine Scheduler (MES)
+=============================
+
+.. note::
+ Queue and ring buffer are used as a synonymous.
+
+.. note::
+ This section assumes that you are familiar with the concept of Pipes, Queues, and GC.
+ If not, check :ref:`GFX, Compute, and SDMA Overall Behavior<pipes-and-queues-description>`
+ and :ref:`drm/amdgpu - Graphics and Compute (GC) <amdgpu-gc>`.
+
+Every GFX has a pipe component with one or more hardware queues. Pipes can
+switch between queues depending on certain conditions, and one of the
+components that can request a queue switch to a pipe is the MicroEngine
+Scheduler (MES). Whenever the driver is initialized, it creates one MQD per
+hardware queue, and then the MQDs are handed to the MES firmware for mapping
+to:
+
+1. Kernel Queues (legacy): This queue is statically mapped to HQDs and never
+ preempted. Even though this is a legacy feature, it is the current default, and
+ most existing hardware supports it. When an application submits work to the
+ kernel driver, it submits all of the application command buffers to the kernel
+ queues. The CS IOCTL takes the command buffer from the applications and
+ schedules them on the kernel queue.
+
+2. User Queues: These queues are dynamically mapped to the HQDs. Regarding the
+ utilization of User Queues, the userspace application will create its user
+ queues and submit work directly to its user queues with no need to IOCTL for
+ each submission and no need to share a single kernel queue.
+
+In terms of User Queues, MES can dynamically map them to the HQD. If there are
+more MQDs than HQDs, the MES firmware will preempt other user queues to make
+sure each queues get a time slice; in other words, MES is a microcontroller
+that handles the mapping and unmapping of MQDs into HQDs, as well as the
+priorities and oversubscription of MQDs.
diff --git a/Documentation/gpu/amdgpu/index.rst b/Documentation/gpu/amdgpu/index.rst
index 302d039928ee..bb2894b5edaf 100644
--- a/Documentation/gpu/amdgpu/index.rst
+++ b/Documentation/gpu/amdgpu/index.rst
@@ -7,8 +7,10 @@ Next (GCN), Radeon DNA (RDNA), and Compute DNA (CDNA) architectures.
.. toctree::
- module-parameters
driver-core
+ amd-hardware-list-info
+ module-parameters
+ gc/index
display/index
flashing
xgmi
@@ -16,5 +18,6 @@ Next (GCN), Radeon DNA (RDNA), and Compute DNA (CDNA) architectures.
thermal
driver-misc
debugging
+ debugfs
process-isolation
amdgpu-glossary
diff --git a/Documentation/gpu/amdgpu/pipe_and_queue_abstraction.svg b/Documentation/gpu/amdgpu/pipe_and_queue_abstraction.svg
new file mode 100644
index 000000000000..0df3c6b3000b
--- /dev/null
+++ b/Documentation/gpu/amdgpu/pipe_and_queue_abstraction.svg
@@ -0,0 +1,1279 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ width="395.47891mm"
+ height="234.73715mm"
+ viewBox="0 0 395.47891 234.73714"
+ version="1.1"
+ id="svg1"
+ inkscape:version="1.4 (e7c3feb100, 2024-10-09)"
+ sodipodi:docname="pipe_and_queue_abstraction.svg"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:svg="http://www.w3.org/2000/svg">
+ <sodipodi:namedview
+ id="namedview1"
+ pagecolor="#ffffff"
+ bordercolor="#000000"
+ borderopacity="0.25"
+ inkscape:showpageshadow="2"
+ inkscape:pageopacity="0.0"
+ inkscape:pagecheckerboard="0"
+ inkscape:deskcolor="#d1d1d1"
+ inkscape:document-units="mm"
+ inkscape:zoom="1.6489689"
+ inkscape:cx="713.17296"
+ inkscape:cy="466.65527"
+ inkscape:window-width="3840"
+ inkscape:window-height="2083"
+ inkscape:window-x="0"
+ inkscape:window-y="0"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="layer1" />
+ <defs
+ id="defs1">
+ <marker
+ style="overflow:visible"
+ id="Dot"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Dot"
+ markerWidth="1"
+ markerHeight="1"
+ viewBox="0 0 1 1"
+ inkscape:isstock="true"
+ inkscape:collect="always"
+ preserveAspectRatio="xMidYMid">
+ <path
+ transform="scale(0.5)"
+ style="fill:context-stroke;fill-rule:evenodd;stroke:none"
+ d="M 5,0 C 5,2.76 2.76,5 0,5 -2.76,5 -5,2.76 -5,0 c 0,-2.76 2.3,-5 5,-5 2.76,0 5,2.24 5,5 z"
+ sodipodi:nodetypes="sssss"
+ id="path110" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker109"
+ refX="0"
+ refY="0"
+ orient="auto-start-reverse"
+ inkscape:stockid="Stylized triangle arrow"
+ markerWidth="1"
+ markerHeight="1"
+ viewBox="0 0 1 1"
+ inkscape:isstock="true"
+ inkscape:collect="always"
+ preserveAspectRatio="xMidYMid">
+ <path
+ transform="scale(0.5)"
+ style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
+ d="m 6,0 c -3,1 -7,3 -9,5 0,0 0,-4 2,-5 -2,-1 -2,-5 -2,-5 2,2 6,4 9,5 z"
+ id="path109" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="ArrowTriangleStylized"
+ refX="0"
+ refY="0"
+ orient="auto-start-reverse"
+ inkscape:stockid="Stylized triangle arrow"
+ markerWidth="1"
+ markerHeight="1"
+ viewBox="0 0 1 1"
+ inkscape:isstock="true"
+ inkscape:collect="always"
+ preserveAspectRatio="xMidYMid">
+ <path
+ transform="scale(0.5)"
+ style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
+ d="m 6,0 c -3,1 -7,3 -9,5 0,0 0,-4 2,-5 -2,-1 -2,-5 -2,-5 2,2 6,4 9,5 z"
+ id="path108" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="ArrowWide"
+ refX="0"
+ refY="0"
+ orient="auto-start-reverse"
+ inkscape:stockid="Wide arrow"
+ markerWidth="1"
+ markerHeight="1"
+ viewBox="0 0 1 1"
+ inkscape:isstock="true"
+ inkscape:collect="always"
+ preserveAspectRatio="xMidYMid">
+ <path
+ style="fill:none;stroke:context-stroke;stroke-width:1;stroke-linecap:butt"
+ d="M 3,-3 0,0 3,3"
+ transform="rotate(180,0.125,0)"
+ sodipodi:nodetypes="ccc"
+ id="path1" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="Triangle"
+ refX="0"
+ refY="0"
+ orient="auto-start-reverse"
+ inkscape:stockid="Triangle arrow"
+ markerWidth="1"
+ markerHeight="1"
+ viewBox="0 0 1 1"
+ inkscape:isstock="true"
+ inkscape:collect="always"
+ preserveAspectRatio="xMidYMid">
+ <path
+ transform="scale(0.5)"
+ style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ id="path135" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="ArrowWideHeavy"
+ refX="0"
+ refY="0"
+ orient="auto-start-reverse"
+ inkscape:stockid="Wide, heavy arrow"
+ markerWidth="1"
+ markerHeight="1"
+ viewBox="0 0 1 1"
+ inkscape:isstock="true"
+ inkscape:collect="always"
+ preserveAspectRatio="xMidYMid">
+ <path
+ style="fill:context-stroke;fill-rule:evenodd;stroke:none"
+ d="m 1,0 -3,3 h -2 l 3,-3 -3,-3 h 2 z"
+ id="path71" />
+ </marker>
+ </defs>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(149.03517,55.110629)">
+ <circle
+ style="fill:#ffeeaa;fill-opacity:1;stroke:#1a1a1a;stroke-width:0.733436;stroke-dasharray:none;stroke-dashoffset:0"
+ id="path98"
+ cx="-35.757576"
+ cy="-10.495151"
+ r="44.24876" />
+ <rect
+ style="fill:none;stroke:#000000;stroke-width:0.878057;stroke-dasharray:none"
+ id="rect1"
+ width="167.79619"
+ height="24.831829"
+ x="14.21942"
+ y="57.862854" />
+ <g
+ id="g11"
+ transform="translate(24.021362,-46.545299)">
+ <rect
+ style="fill:none;stroke:#006680;stroke-width:0.459999;stroke-dasharray:none"
+ id="rect2"
+ width="131.09708"
+ height="5.8163381"
+ x="23.245802"
+ y="107.16314" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 37.884707,107.26337 v 5.53565"
+ id="path2"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 52.466233,107.26337 v 5.53565"
+ id="path3"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 67.047758,107.26337 v 5.53565"
+ id="path4"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 81.629283,107.26337 v 5.53565"
+ id="path5"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 96.210809,107.26337 v 5.53565"
+ id="path6"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 110.79233,107.26337 v 5.53565"
+ id="path7"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 125.37386,107.26337 v 5.53565"
+ id="path8"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 139.95538,107.26337 v 5.53565"
+ id="path9"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-size:3.175px;text-align:start;writing-mode:lr-tb;direction:ltr;text-anchor:start;fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ x="86.713425"
+ y="110.1963"
+ id="text11"><tspan
+ sodipodi:role="line"
+ id="tspan11"
+ style="stroke:#006680;stroke-width:0.5"
+ x="86.713425"
+ y="110.1963">. . .</tspan></text>
+ </g>
+ <g
+ id="g18"
+ transform="translate(24.021362,-32.25779)">
+ <rect
+ style="fill:none;stroke:#006680;stroke-width:0.459999;stroke-dasharray:none"
+ id="rect11"
+ width="131.09708"
+ height="5.8163381"
+ x="23.245802"
+ y="107.16314" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 37.884707,107.26337 v 5.53565"
+ id="path11"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 52.466233,107.26337 v 5.53565"
+ id="path12"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 67.047758,107.26337 v 5.53565"
+ id="path13"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 81.629283,107.26337 v 5.53565"
+ id="path14"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 96.210809,107.26337 v 5.53565"
+ id="path15"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 110.79233,107.26337 v 5.53565"
+ id="path16"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 125.37386,107.26337 v 5.53565"
+ id="path17"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 139.95538,107.26337 v 5.53565"
+ id="path18"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-size:3.175px;text-align:start;writing-mode:lr-tb;direction:ltr;text-anchor:start;fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ x="86.713425"
+ y="110.1963"
+ id="text18"><tspan
+ sodipodi:role="line"
+ id="tspan18"
+ style="stroke:#006680;stroke-width:0.5"
+ x="86.713425"
+ y="110.1963">. . .</tspan></text>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-size:3.175px;text-align:start;writing-mode:lr-tb;direction:ltr;text-anchor:start;fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ x="113.37768"
+ y="68.463142"
+ id="text19"><tspan
+ sodipodi:role="line"
+ id="tspan19"
+ style="writing-mode:tb-rl;stroke-width:0.5"
+ x="113.37768"
+ y="68.463142">. . .</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="25.005701"
+ y="55.308445"
+ id="text844-2-9"><tspan
+ sodipodi:role="line"
+ x="25.005701"
+ y="55.308445"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan2868">Pipe[0]</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-74.441521"
+ y="63.075123"
+ id="text844-2-9-4"><tspan
+ sodipodi:role="line"
+ x="-74.441521"
+ y="63.075123"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan2868-7">MQD</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="30.264952"
+ y="65.490654"
+ id="text20"><tspan
+ sodipodi:role="line"
+ x="30.264952"
+ y="65.490654"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan20">Queue[0]</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="30.264952"
+ y="79.249001"
+ id="text21"><tspan
+ sodipodi:role="line"
+ x="30.264952"
+ y="79.249001"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan21">Queue[n]</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="30.264952"
+ y="71.84066"
+ id="text22"><tspan
+ sodipodi:role="line"
+ x="30.264952"
+ y="71.84066"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan22">...</tspan></text>
+ <g
+ id="g71"
+ transform="translate(-23.283342)">
+ <rect
+ style="fill:#ffffff;fill-opacity:0;stroke:#000000;stroke-width:0.5;stroke-dasharray:0.5, 0.5;stroke-dashoffset:0"
+ id="rect67"
+ width="18.533583"
+ height="114.96632"
+ x="250.9435"
+ y="54.754276"
+ ry="6.0427966" />
+ </g>
+ <rect
+ style="fill:#1a1a1a;fill-opacity:0;stroke:#000000;stroke-width:0.499999;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect68"
+ width="188.21231"
+ height="139.5948"
+ x="4.0113592"
+ y="37.597778"
+ ry="0" />
+ <g
+ id="g43"
+ transform="translate(0,40.745853)">
+ <rect
+ style="fill:none;stroke:#000000;stroke-width:0.878057;stroke-dasharray:none"
+ id="rect22"
+ width="167.79619"
+ height="24.831829"
+ x="14.21942"
+ y="57.862854" />
+ <g
+ id="g30"
+ transform="translate(24.021362,-46.545299)">
+ <rect
+ style="fill:none;stroke:#006680;stroke-width:0.459999;stroke-dasharray:none"
+ id="rect23"
+ width="131.09708"
+ height="5.8163381"
+ x="23.245802"
+ y="107.16314" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 37.884707,107.26337 v 5.53565"
+ id="path23"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 52.466233,107.26337 v 5.53565"
+ id="path24"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 67.047758,107.26337 v 5.53565"
+ id="path25"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 81.629283,107.26337 v 5.53565"
+ id="path26"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 96.210809,107.26337 v 5.53565"
+ id="path27"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 110.79233,107.26337 v 5.53565"
+ id="path28"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 125.37386,107.26337 v 5.53565"
+ id="path29"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 139.95538,107.26337 v 5.53565"
+ id="path30"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-size:3.175px;text-align:start;writing-mode:lr-tb;direction:ltr;text-anchor:start;fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ x="86.713425"
+ y="110.1963"
+ id="text30"><tspan
+ sodipodi:role="line"
+ id="tspan30"
+ style="stroke:#006680;stroke-width:0.5"
+ x="86.713425"
+ y="110.1963">. . .</tspan></text>
+ </g>
+ <g
+ id="g38"
+ transform="translate(24.021362,-32.25779)">
+ <rect
+ style="fill:none;stroke:#006680;stroke-width:0.459999;stroke-dasharray:none"
+ id="rect30"
+ width="131.09708"
+ height="5.8163381"
+ x="23.245802"
+ y="107.16314" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 37.884707,107.26337 v 5.53565"
+ id="path31"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 52.466233,107.26337 v 5.53565"
+ id="path32"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 67.047758,107.26337 v 5.53565"
+ id="path33"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 81.629283,107.26337 v 5.53565"
+ id="path34"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 96.210809,107.26337 v 5.53565"
+ id="path35"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 110.79233,107.26337 v 5.53565"
+ id="path36"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 125.37386,107.26337 v 5.53565"
+ id="path37"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 139.95538,107.26337 v 5.53565"
+ id="path38"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-size:3.175px;text-align:start;writing-mode:lr-tb;direction:ltr;text-anchor:start;fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ x="86.713425"
+ y="110.1963"
+ id="text38"><tspan
+ sodipodi:role="line"
+ id="tspan38"
+ style="stroke:#006680;stroke-width:0.5"
+ x="86.713425"
+ y="110.1963">. . .</tspan></text>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-size:3.175px;text-align:start;writing-mode:lr-tb;direction:ltr;text-anchor:start;fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ x="113.37768"
+ y="68.463142"
+ id="text39"><tspan
+ sodipodi:role="line"
+ id="tspan39"
+ style="writing-mode:tb-rl;stroke-width:0.5"
+ x="113.37768"
+ y="68.463142">. . .</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="25.005701"
+ y="55.308445"
+ id="text40"><tspan
+ sodipodi:role="line"
+ x="25.005701"
+ y="55.308445"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan40">Pipe[1]</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="30.264952"
+ y="65.490654"
+ id="text41"><tspan
+ sodipodi:role="line"
+ x="30.264952"
+ y="65.490654"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan41">Queue[0]</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="30.264952"
+ y="79.249001"
+ id="text42"><tspan
+ sodipodi:role="line"
+ x="30.264952"
+ y="79.249001"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan42">Queue[n]</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="30.264952"
+ y="71.84066"
+ id="text43"><tspan
+ sodipodi:role="line"
+ x="30.264952"
+ y="71.84066"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan43">...</tspan></text>
+ </g>
+ <g
+ id="g64"
+ transform="translate(0,85.195881)">
+ <rect
+ style="fill:none;stroke:#000000;stroke-width:0.878057;stroke-dasharray:none"
+ id="rect43"
+ width="167.79619"
+ height="24.831829"
+ x="14.21942"
+ y="57.862854" />
+ <g
+ id="g51"
+ transform="translate(24.021362,-46.545299)">
+ <rect
+ style="fill:none;stroke:#006680;stroke-width:0.459999;stroke-dasharray:none"
+ id="rect44"
+ width="131.09708"
+ height="5.8163381"
+ x="23.245802"
+ y="107.16314" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 37.884707,107.26337 v 5.53565"
+ id="path44"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 52.466233,107.26337 v 5.53565"
+ id="path45"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 67.047758,107.26337 v 5.53565"
+ id="path46"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 81.629283,107.26337 v 5.53565"
+ id="path47"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 96.210809,107.26337 v 5.53565"
+ id="path48"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 110.79233,107.26337 v 5.53565"
+ id="path49"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 125.37386,107.26337 v 5.53565"
+ id="path50"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 139.95538,107.26337 v 5.53565"
+ id="path51"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-size:3.175px;text-align:start;writing-mode:lr-tb;direction:ltr;text-anchor:start;fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ x="86.713425"
+ y="110.1963"
+ id="text51"><tspan
+ sodipodi:role="line"
+ id="tspan51"
+ style="stroke:#006680;stroke-width:0.5"
+ x="86.713425"
+ y="110.1963">. . .</tspan></text>
+ </g>
+ <g
+ id="g59"
+ transform="translate(24.021362,-32.25779)">
+ <rect
+ style="fill:none;stroke:#006680;stroke-width:0.459999;stroke-dasharray:none"
+ id="rect51"
+ width="131.09708"
+ height="5.8163381"
+ x="23.245802"
+ y="107.16314" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 37.884707,107.26337 v 5.53565"
+ id="path52"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 52.466233,107.26337 v 5.53565"
+ id="path53"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 67.047758,107.26337 v 5.53565"
+ id="path54"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 81.629283,107.26337 v 5.53565"
+ id="path55"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 96.210809,107.26337 v 5.53565"
+ id="path56"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 110.79233,107.26337 v 5.53565"
+ id="path57"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 125.37386,107.26337 v 5.53565"
+ id="path58"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ d="m 139.95538,107.26337 v 5.53565"
+ id="path59"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-size:3.175px;text-align:start;writing-mode:lr-tb;direction:ltr;text-anchor:start;fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ x="86.713425"
+ y="110.1963"
+ id="text59"><tspan
+ sodipodi:role="line"
+ id="tspan59"
+ style="stroke:#006680;stroke-width:0.5"
+ x="86.713425"
+ y="110.1963">. . .</tspan></text>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-size:3.175px;text-align:start;writing-mode:lr-tb;direction:ltr;text-anchor:start;fill:none;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ x="113.37768"
+ y="68.463142"
+ id="text60"><tspan
+ sodipodi:role="line"
+ id="tspan60"
+ style="writing-mode:tb-rl;stroke-width:0.5"
+ x="113.37768"
+ y="68.463142">. . .</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="25.005701"
+ y="55.308445"
+ id="text61"><tspan
+ sodipodi:role="line"
+ x="25.005701"
+ y="55.308445"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan61">Pipe[n]</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="30.264952"
+ y="65.490654"
+ id="text62"><tspan
+ sodipodi:role="line"
+ x="30.264952"
+ y="65.490654"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan62">Queue[0]</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="30.264952"
+ y="79.249001"
+ id="text63"><tspan
+ sodipodi:role="line"
+ x="30.264952"
+ y="79.249001"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan63">Queue[n]</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="30.264952"
+ y="71.84066"
+ id="text64"><tspan
+ sodipodi:role="line"
+ x="30.264952"
+ y="71.84066"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan64">...</tspan></text>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:7.76111px;font-family:Serif;-inkscape-font-specification:Serif;text-align:start;writing-mode:lr-tb;direction:ltr;text-anchor:start;fill:#000000;fill-opacity:0;stroke:#006680;stroke-width:0.499999;stroke-dasharray:none"
+ x="92.18071"
+ y="128.21965"
+ id="text65"><tspan
+ sodipodi:role="line"
+ id="tspan65"
+ style="font-size:7.76111px;writing-mode:tb-rl;fill:#1a1a1a;stroke:#000000;stroke-width:0.5"
+ x="92.18071"
+ y="128.21965">...</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:7.7611px;font-family:Serif;-inkscape-font-specification:Serif;text-align:start;writing-mode:tb-rl;direction:ltr;text-anchor:start;fill:#1a1a1a;fill-opacity:0;stroke:#000000;stroke-width:0.499999;stroke-dasharray:none"
+ x="198.72205"
+ y="80.708267"
+ id="text66"><tspan
+ sodipodi:role="line"
+ id="tspan66"
+ style="stroke-width:0.5"
+ x="198.72205"
+ y="80.708267" /></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="97.905846"
+ y="44.725101"
+ id="text68"><tspan
+ sodipodi:role="line"
+ x="97.905846"
+ y="44.725101"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan68">Hardware Block</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;writing-mode:vertical-lr;text-orientation:upright;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="236.36934"
+ y="112.10503"
+ id="text68-7"><tspan
+ sodipodi:role="line"
+ x="236.36934"
+ y="112.10503"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;writing-mode:vertical-lr;text-orientation:upright;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan68-4">EXECUTION</tspan></text>
+ <rect
+ style="fill:#1a1a1a;fill-opacity:0;stroke:#000000;stroke-width:0.940575;stroke-dasharray:7.5246, 0.940575;stroke-dashoffset:0"
+ id="rect68-1"
+ width="68.749969"
+ height="141.2751"
+ x="-129.49162"
+ y="37.881134"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-114.4223"
+ y="44.966106"
+ id="text68-2"><tspan
+ sodipodi:role="line"
+ x="-114.4223"
+ y="44.966106"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan68-9">Memory</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:12.7px;font-family:Serif;-inkscape-font-specification:Serif;text-align:start;writing-mode:tb-rl;direction:ltr;text-orientation:upright;text-anchor:start;fill:#1a1a1a;fill-opacity:0;stroke:#000000;stroke-width:0.499999;stroke-dasharray:none;stroke-dashoffset:0"
+ x="212.6013"
+ y="64.823341"
+ id="text69"><tspan
+ sodipodi:role="line"
+ id="tspan69"
+ style="font-size:12.7px;stroke-width:0.5"
+ x="212.6013"
+ y="64.823341" /></text>
+ <g
+ id="g72">
+ <path
+ style="font-weight:bold;font-size:16.9333px;line-height:1.25;-inkscape-font-specification:'sans-serif Bold';text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;stroke-width:0.0690111"
+ d="m 185.24734,83.96512 h 1.28693 c 1.20226,0.01693 2.09973,-1.015998 2.0828,-2.370662 v -9.393759 c -0.0169,-0.711199 0.33866,-1.066798 1.10066,-1.066798 h 0.2032 0.0677 v -1.557864 c -1.0668,0.01693 -1.38853,-0.270933 -1.37159,-1.236131 V 58.96308 c 0.0339,-1.354664 -0.88054,-2.387595 -2.0828,-2.370662 h -1.28693 v 1.676397 h 0.62653 c 0.57573,0 0.77893,0.321733 0.762,1.100664 v 9.122827 c 0,1.219198 0.44027,1.710263 1.64253,1.862663 -1.20226,0.135466 -1.64253,0.626532 -1.64253,1.84573 v 9.122826 c 0.0169,0.660399 -0.23707,0.965198 -0.762,0.965198 h -0.62653 z"
+ id="text70"
+ aria-label="}"
+ sodipodi:nodetypes="ccccsccccccccscscscscc" />
+ <path
+ style="fill:#1a1a1a;fill-opacity:0;stroke:#000000;stroke-width:1.25;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#ArrowWideHeavy)"
+ d="M 190.40199,70.278769 H 224.6663"
+ id="path70" />
+ </g>
+ <path
+ style="font-weight:bold;font-size:16.9333px;line-height:1.25;-inkscape-font-specification:'sans-serif Bold';text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;stroke-width:0.0690111"
+ d="m -88.44114,74.337891 h 1.28693 c 1.20226,0.01693 2.09973,-1.015998 2.0828,-2.370662 V 62.57347 c -0.0169,-0.711199 0.33866,-1.066798 1.10066,-1.066798 h 0.2032 0.0677 v -1.557864 c -1.0668,0.01693 -1.38853,-0.270933 -1.37159,-1.236131 v -9.376826 c 0.0339,-1.354664 -0.88054,-2.387595 -2.0828,-2.370662 h -1.28693 v 1.676397 h 0.62653 c 0.57573,0 0.77893,0.321733 0.762,1.100664 v 9.122827 c 0,1.219198 0.44027,1.710263 1.64253,1.862663 -1.20226,0.135466 -1.64253,0.626532 -1.64253,1.84573 v 9.122826 c 0.0169,0.660399 -0.23707,0.965198 -0.762,0.965198 h -0.62653 z"
+ id="text70-1"
+ aria-label="}"
+ sodipodi:nodetypes="ccccsccccccccscscscscc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16.9333px;font-family:Serif;-inkscape-font-specification:Serif;text-align:start;writing-mode:tb-rl;direction:ltr;text-orientation:upright;text-anchor:start;fill:#1a1a1a;fill-opacity:0;stroke:#000000;stroke-width:1.25;stroke-dasharray:none;stroke-dashoffset:0"
+ x="200.88817"
+ y="38.990276"
+ id="text72"><tspan
+ sodipodi:role="line"
+ id="tspan72"
+ style="stroke-width:1.25"
+ x="200.88817"
+ y="38.990276" /></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:6.35px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="205.12828"
+ y="60.090775"
+ id="text73"><tspan
+ sodipodi:role="line"
+ x="205.12828"
+ y="60.090775"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:6.35px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan73">e.g.,:</tspan><tspan
+ sodipodi:role="line"
+ x="205.12828"
+ y="68.028275"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:6.35px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan74">queue[0]</tspan></text>
+ <g
+ id="g75"
+ transform="translate(0,40.745853)">
+ <path
+ style="font-weight:bold;font-size:16.9333px;line-height:1.25;-inkscape-font-specification:'sans-serif Bold';text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;stroke-width:0.0690111"
+ d="m 185.24734,83.96512 h 1.28693 c 1.20226,0.01693 2.09973,-1.015998 2.0828,-2.370662 v -9.393759 c -0.0169,-0.711199 0.33866,-1.066798 1.10066,-1.066798 h 0.2032 0.0677 v -1.557864 c -1.0668,0.01693 -1.38853,-0.270933 -1.37159,-1.236131 V 58.96308 c 0.0339,-1.354664 -0.88054,-2.387595 -2.0828,-2.370662 h -1.28693 v 1.676397 h 0.62653 c 0.57573,0 0.77893,0.321733 0.762,1.100664 v 9.122827 c 0,1.219198 0.44027,1.710263 1.64253,1.862663 -1.20226,0.135466 -1.64253,0.626532 -1.64253,1.84573 v 9.122826 c 0.0169,0.660399 -0.23707,0.965198 -0.762,0.965198 h -0.62653 z"
+ id="path74"
+ aria-label="}"
+ sodipodi:nodetypes="ccccsccccccccscscscscc" />
+ <path
+ style="fill:#1a1a1a;fill-opacity:0;stroke:#000000;stroke-width:1.25;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#ArrowWideHeavy)"
+ d="M 190.40199,70.278769 H 224.6663"
+ id="path75" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:6.35px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="205.12828"
+ y="100.83664"
+ id="text76"><tspan
+ sodipodi:role="line"
+ x="205.12828"
+ y="100.83664"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:6.35px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan75">e.g.,:</tspan><tspan
+ sodipodi:role="line"
+ x="205.12828"
+ y="108.77414"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:6.35px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan76">queue[4]</tspan></text>
+ <g
+ id="g77"
+ transform="translate(0,85.725048)">
+ <path
+ style="font-weight:bold;font-size:16.9333px;line-height:1.25;-inkscape-font-specification:'sans-serif Bold';text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;stroke-width:0.0690111"
+ d="m 185.24734,83.96512 h 1.28693 c 1.20226,0.01693 2.09973,-1.015998 2.0828,-2.370662 v -9.393759 c -0.0169,-0.711199 0.33866,-1.066798 1.10066,-1.066798 h 0.2032 0.0677 v -1.557864 c -1.0668,0.01693 -1.38853,-0.270933 -1.37159,-1.236131 V 58.96308 c 0.0339,-1.354664 -0.88054,-2.387595 -2.0828,-2.370662 h -1.28693 v 1.676397 h 0.62653 c 0.57573,0 0.77893,0.321733 0.762,1.100664 v 9.122827 c 0,1.219198 0.44027,1.710263 1.64253,1.862663 -1.20226,0.135466 -1.64253,0.626532 -1.64253,1.84573 v 9.122826 c 0.0169,0.660399 -0.23707,0.965198 -0.762,0.965198 h -0.62653 z"
+ id="path76"
+ aria-label="}"
+ sodipodi:nodetypes="ccccsccccccccscscscscc" />
+ <path
+ style="fill:#1a1a1a;fill-opacity:0;stroke:#000000;stroke-width:1.25;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#ArrowWideHeavy)"
+ d="M 190.40199,70.278769 H 224.6663"
+ id="path77" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:6.35px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="205.12828"
+ y="145.81558"
+ id="text78"><tspan
+ sodipodi:role="line"
+ x="205.12828"
+ y="145.81558"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:6.35px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan77">e.g.,:</tspan><tspan
+ sodipodi:role="line"
+ x="205.12828"
+ y="153.75308"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:6.35px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan78">queue[n]</tspan></text>
+ <g
+ id="g81">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-18.502264"
+ y="65.642387"
+ id="text79"><tspan
+ sodipodi:role="line"
+ x="-18.502264"
+ y="65.642387"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;fill:#800000;stroke-width:0.0690111"
+ id="tspan79">HQD</tspan></text>
+ <ellipse
+ style="fill:#800000;fill-opacity:0;stroke:#800000;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="path79"
+ cx="-18.407015"
+ cy="63.2188"
+ rx="13.317666"
+ ry="4.4124799" />
+ <path
+ style="fill:#800000;fill-opacity:0;stroke:#800000;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#Triangle)"
+ d="M -4.6361224,63.2188 H 10.606988"
+ id="path80" />
+ </g>
+ <g
+ id="g82"
+ transform="translate(0,14.287503)">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-18.502264"
+ y="65.642387"
+ id="text81"><tspan
+ sodipodi:role="line"
+ x="-18.502264"
+ y="65.642387"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;fill:#800000;stroke-width:0.0690111"
+ id="tspan81">HQD</tspan></text>
+ <ellipse
+ style="fill:#800000;fill-opacity:0;stroke:#800000;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="ellipse81"
+ cx="-18.407015"
+ cy="63.2188"
+ rx="13.317666"
+ ry="4.4124799" />
+ <path
+ style="fill:#800000;fill-opacity:0;stroke:#800000;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#Triangle)"
+ d="M -4.6361224,63.2188 H 10.606988"
+ id="path81" />
+ </g>
+ <g
+ id="g83"
+ transform="translate(0,40.745853)">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-18.502264"
+ y="65.642387"
+ id="text82"><tspan
+ sodipodi:role="line"
+ x="-18.502264"
+ y="65.642387"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;fill:#800000;stroke-width:0.0690111"
+ id="tspan82">HQD</tspan></text>
+ <ellipse
+ style="fill:#800000;fill-opacity:0;stroke:#800000;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="ellipse82"
+ cx="-18.407015"
+ cy="63.2188"
+ rx="13.317666"
+ ry="4.4124799" />
+ <path
+ style="fill:#800000;fill-opacity:0;stroke:#800000;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#Triangle)"
+ d="M -4.6361224,63.2188 H 10.606988"
+ id="path82" />
+ </g>
+ <g
+ id="g84"
+ transform="translate(0,55.033362)">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-18.502264"
+ y="65.642387"
+ id="text83"><tspan
+ sodipodi:role="line"
+ x="-18.502264"
+ y="65.642387"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;fill:#800000;stroke-width:0.0690111"
+ id="tspan83">HQD</tspan></text>
+ <ellipse
+ style="fill:#800000;fill-opacity:0;stroke:#800000;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="ellipse83"
+ cx="-18.407015"
+ cy="63.2188"
+ rx="13.317666"
+ ry="4.4124799" />
+ <path
+ style="fill:#800000;fill-opacity:0;stroke:#800000;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#Triangle)"
+ d="M -4.6361224,63.2188 H 10.606988"
+ id="path83" />
+ </g>
+ <g
+ id="g85"
+ transform="translate(0,85.195881)">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-18.502264"
+ y="65.642387"
+ id="text84"><tspan
+ sodipodi:role="line"
+ x="-18.502264"
+ y="65.642387"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;fill:#800000;stroke-width:0.0690111"
+ id="tspan84">HQD</tspan></text>
+ <ellipse
+ style="fill:#800000;fill-opacity:0;stroke:#800000;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="ellipse84"
+ cx="-18.407015"
+ cy="63.2188"
+ rx="13.317666"
+ ry="4.4124799" />
+ <path
+ style="fill:#800000;fill-opacity:0;stroke:#800000;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#Triangle)"
+ d="M -4.6361224,63.2188 H 10.606988"
+ id="path84" />
+ </g>
+ <g
+ id="g86"
+ transform="translate(0,99.48339)">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-18.502264"
+ y="65.642387"
+ id="text85"><tspan
+ sodipodi:role="line"
+ x="-18.502264"
+ y="65.642387"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;fill:#800000;stroke-width:0.0690111"
+ id="tspan85">HQD</tspan></text>
+ <ellipse
+ style="fill:#800000;fill-opacity:0;stroke:#800000;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="ellipse85"
+ cx="-18.407015"
+ cy="63.2188"
+ rx="13.317666"
+ ry="4.4124799" />
+ <path
+ style="fill:#800000;fill-opacity:0;stroke:#800000;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#Triangle)"
+ d="M -4.6361224,63.2188 H 10.606988"
+ id="path85" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:5.64444px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-35.2131"
+ y="54.673237"
+ id="text86"><tspan
+ sodipodi:role="line"
+ x="-35.2131"
+ y="54.673237"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:5.64444px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan86">Registers</tspan></text>
+ <path
+ style="fill:#800000;fill-opacity:0;stroke:#1a1a1a;stroke-width:1;stroke-dasharray:1, 1;stroke-dashoffset:0;marker-end:url(#ArrowWide)"
+ d="m -45.247972,57.442462 v 5.888987 h 11.344412"
+ id="path86"
+ sodipodi:nodetypes="ccc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-74.441521"
+ y="82.441582"
+ id="text87"><tspan
+ sodipodi:role="line"
+ x="-74.441521"
+ y="82.441582"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan87">MQD</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-74.441521"
+ y="99.240776"
+ id="text88"><tspan
+ sodipodi:role="line"
+ x="-74.441521"
+ y="99.240776"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan88">MQD</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-74.441521"
+ y="116.03998"
+ id="text89"><tspan
+ sodipodi:role="line"
+ x="-74.441521"
+ y="116.03998"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan89">MQD</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-74.441521"
+ y="132.83917"
+ id="text90"><tspan
+ sodipodi:role="line"
+ x="-74.441521"
+ y="132.83917"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan90">MQD</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-74.441521"
+ y="168.3002"
+ id="text91"><tspan
+ sodipodi:role="line"
+ x="-74.441521"
+ y="168.3002"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:7.05556px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan91">MQD</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.5833px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-74.441521"
+ y="148.1461"
+ id="text92"><tspan
+ sodipodi:role="line"
+ x="-74.441521"
+ y="148.1461"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:10.5833px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;writing-mode:tb-rl;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan92">...</tspan></text>
+ <g
+ id="g97"
+ transform="translate(-5.8208336)">
+ <rect
+ style="fill:#aaffcc;fill-opacity:1;stroke:#1a1a1a;stroke-width:0.5;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect96"
+ width="58.726093"
+ height="27.598055"
+ x="-142.96434"
+ y="46.852512" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333px;line-height:1.25;font-family:sans-serif;text-align:start;letter-spacing:0px;word-spacing:0px;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-139.74622"
+ y="52.62756"
+ id="text93"><tspan
+ sodipodi:role="line"
+ x="-139.74622"
+ y="52.62756"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:4.23333px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:start;text-anchor:start;stroke-width:0.0690111"
+ id="tspan93">HQD Registers</tspan><tspan
+ sodipodi:role="line"
+ x="-139.74622"
+ y="57.91922"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:4.23333px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:start;text-anchor:start;stroke-width:0.0690111"
+ id="tspan95">Queue Address in the GPU</tspan><tspan
+ sodipodi:role="line"
+ x="-139.74622"
+ y="63.210884"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:4.23333px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:start;text-anchor:start;stroke-width:0.0690111"
+ id="tspan96">Doorbell</tspan><tspan
+ sodipodi:role="line"
+ x="-139.74622"
+ y="68.502548"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:4.23333px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:start;text-anchor:start;stroke-width:0.0690111"
+ id="tspan97">...</tspan><tspan
+ sodipodi:role="line"
+ x="-139.74622"
+ y="73.794212"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:4.23333px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:start;text-anchor:start;stroke-width:0.0690111"
+ id="tspan94" /></text>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333px;line-height:1.25;font-family:sans-serif;text-align:start;letter-spacing:0px;word-spacing:0px;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-61.293022"
+ y="-19.380915"
+ id="text93-1"><tspan
+ sodipodi:role="line"
+ x="-61.293022"
+ y="-19.380915"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:4.23333px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan104">SWITCH QUEUE:</tspan><tspan
+ sodipodi:role="line"
+ x="-61.293022"
+ y="-14.089252"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:4.23333px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:start;text-anchor:start;stroke-width:0.0690111"
+ id="tspan101">WAIT FOR HQD_ACTIVE = 0</tspan><tspan
+ sodipodi:role="line"
+ x="-61.293022"
+ y="-8.7975903"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:4.23333px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:start;text-anchor:start;stroke-width:0.0690111"
+ id="tspan105">SAVE QUEUE STATE TO THE MQD</tspan><tspan
+ sodipodi:role="line"
+ x="-61.293022"
+ y="-3.505928"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:4.23333px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:start;text-anchor:start;stroke-width:0.0690111"
+ id="tspan102">COPY NEW MQD STATE</tspan><tspan
+ sodipodi:role="line"
+ x="-61.293022"
+ y="1.7857342"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:4.23333px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:start;text-anchor:start;stroke-width:0.0690111"
+ id="tspan103">SET HQD_ACTIVE = 1</tspan></text>
+ <circle
+ style="fill:#ffeeaa;fill-opacity:1;stroke:#1a1a1a;stroke-width:1.88976;stroke-dasharray:none;stroke-dashoffset:0"
+ id="path97"
+ cx="0"
+ cy="0"
+ r="0"
+ transform="matrix(0.26458333,0,0,0.26458333,-149.03517,37.347779)" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:5.64444px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.0690111"
+ x="-36.659206"
+ y="-44.828983"
+ id="text106"><tspan
+ sodipodi:role="line"
+ x="-36.659206"
+ y="-44.828983"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:5.64444px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';text-align:center;text-anchor:middle;stroke-width:0.0690111"
+ id="tspan106">Firmware</tspan></text>
+ <path
+ style="fill:none;fill-opacity:1;stroke:#1a1a1a;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0;marker-start:url(#marker109);marker-end:url(#ArrowTriangleStylized)"
+ d="M -84.242601,-9.1838245 H -98.041629 V 33.17598"
+ id="path106" />
+ <path
+ style="fill:none;fill-opacity:1;stroke:#1a1a1a;stroke-width:1;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#Dot)"
+ d="M 9.9745536,-9.3442784 H 29.549918 V 37.170287"
+ id="path107"
+ sodipodi:nodetypes="ccc" />
+ </g>
+</svg>
diff --git a/Documentation/gpu/automated_testing.rst b/Documentation/gpu/automated_testing.rst
index 6d7c6086034d..62aa3ede02a5 100644
--- a/Documentation/gpu/automated_testing.rst
+++ b/Documentation/gpu/automated_testing.rst
@@ -115,6 +115,10 @@ created (eg. https://gitlab.freedesktop.org/janedoe/linux/-/pipelines)
5. The various jobs will be run and when the pipeline is finished, all jobs
should be green unless a regression has been found.
+6. Warnings in the pipeline indicate that lockdep
+(see Documentation/locking/lockdep-design.rst) issues have been detected
+during the tests.
+
How to update test expectations
===============================
diff --git a/Documentation/gpu/driver-uapi.rst b/Documentation/gpu/driver-uapi.rst
index 971cdb4816fc..1f15a8ca1265 100644
--- a/Documentation/gpu/driver-uapi.rst
+++ b/Documentation/gpu/driver-uapi.rst
@@ -27,3 +27,8 @@ drm/xe uAPI
===========
.. kernel-doc:: include/uapi/drm/xe_drm.h
+
+drm/asahi uAPI
+================
+
+.. kernel-doc:: include/uapi/drm/asahi_drm.h
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index b4ee25af1702..5139705089f2 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -233,6 +233,21 @@ Panel Self Refresh Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_self_refresh_helper.c
:export:
+HDMI Atomic State Helpers
+=========================
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/display/drm_hdmi_state_helper.c
+ :doc: hdmi helpers
+
+Functions Reference
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/display/drm_hdmi_state_helper.c
+ :export:
+
HDCP Helper Functions Reference
===============================
diff --git a/Documentation/gpu/nouveau.rst b/Documentation/gpu/nouveau.rst
index 0f34131ccc27..b8c801e0068c 100644
--- a/Documentation/gpu/nouveau.rst
+++ b/Documentation/gpu/nouveau.rst
@@ -27,3 +27,6 @@ GSP Support
.. kernel-doc:: drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
:doc: GSP message queue element
+
+.. kernel-doc:: drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+ :doc: GSP message handling policy
diff --git a/Documentation/gpu/nova/core/todo.rst b/Documentation/gpu/nova/core/todo.rst
index ca08377d3b73..8a459fc08812 100644
--- a/Documentation/gpu/nova/core/todo.rst
+++ b/Documentation/gpu/nova/core/todo.rst
@@ -102,7 +102,13 @@ Usage:
let boot0 = Boot0::read(&bar);
pr_info!("Revision: {}\n", boot0.revision());
+Note: a work-in-progress implementation currently resides in
+`drivers/gpu/nova-core/regs/macros.rs` and is used in nova-core. It would be
+nice to improve it (possibly using proc macros) and move it to the `kernel`
+crate so it can be used by other components as well.
+
| Complexity: Advanced
+| Contact: Alexandre Courbot
Delay / Sleep abstractions
--------------------------
@@ -190,16 +196,6 @@ Rust abstraction for debugfs APIs.
| Reference: Export GSP log buffers
| Complexity: Intermediate
-Vec extensions
---------------
-
-Implement ``Vec::truncate`` and ``Vec::resize``.
-
-Currently this is used for some experimental code to parse the vBIOS.
-
-| Reference vBIOS support
-| Complexity: Beginner
-
GPU (general)
=============
diff --git a/Documentation/gpu/rfc/i915_scheduler.rst b/Documentation/gpu/rfc/i915_scheduler.rst
index c237ebc024cd..2974525f0ac5 100644
--- a/Documentation/gpu/rfc/i915_scheduler.rst
+++ b/Documentation/gpu/rfc/i915_scheduler.rst
@@ -26,7 +26,7 @@ i915 with the DRM scheduler is:
which configures a slot with N contexts
* After I915_CONTEXT_ENGINES_EXT_PARALLEL a user can submit N batches to
a slot in a single execbuf IOCTL and the batches run on the GPU in
- paralllel
+ parallel
* Initially only for GuC submission but execlists can be supported if
needed
* Convert the i915 to use the DRM scheduler
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 256d0d1cb216..c57777a24e03 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -441,14 +441,15 @@ Contact: Thomas Zimmermann <tzimmermann@suse.de>
Level: Intermediate
-Request memory regions in all drivers
--------------------------------------
+Request memory regions in all fbdev drivers
+--------------------------------------------
-Go through all drivers and add code to request the memory regions that the
-driver uses. This requires adding calls to request_mem_region(),
+Old/ancient fbdev drivers do not request their memory properly.
+Go through these drivers and add code to request the memory regions
+that the driver uses. This requires adding calls to request_mem_region(),
pci_request_region() or similar functions. Use helpers for managed cleanup
-where possible.
-
+where possible. Problematic areas include hardware that has exclusive ranges
+like VGA. VGA16fb does not request the range as it is expected.
Drivers are pretty bad at doing this and there used to be conflicts among
DRM and fbdev drivers. Still, it's the correct thing to do.
diff --git a/Documentation/gpu/vgaarbiter.rst b/Documentation/gpu/vgaarbiter.rst
index bde3c0afb059..d1e953712cc2 100644
--- a/Documentation/gpu/vgaarbiter.rst
+++ b/Documentation/gpu/vgaarbiter.rst
@@ -11,9 +11,9 @@ Section 7, Legacy Devices.
The Resource Access Control (RAC) module inside the X server [0] existed for
the legacy VGA arbitration task (besides other bus management tasks) when more
-than one legacy device co-exists on the same machine. But the problem happens
+than one legacy device co-exist on the same machine. But the problem happens
when these devices are trying to be accessed by different userspace clients
-(e.g. two server in parallel). Their address assignments conflict. Moreover,
+(e.g. two servers in parallel). Their address assignments conflict. Moreover,
ideally, being a userspace application, it is not the role of the X server to
control bus resources. Therefore an arbitration scheme outside of the X server
is needed to control the sharing of these resources. This document introduces
@@ -106,7 +106,7 @@ In-kernel interface
libpciaccess
------------
-To use the vga arbiter char device it was implemented an API inside the
+To use the vga arbiter char device, an API was implemented inside the
libpciaccess library. One field was added to struct pci_device (each device
on the system)::
diff --git a/Documentation/gpu/xe/index.rst b/Documentation/gpu/xe/index.rst
index 92cfb25e64d3..b2369561f24e 100644
--- a/Documentation/gpu/xe/index.rst
+++ b/Documentation/gpu/xe/index.rst
@@ -25,3 +25,4 @@ DG2, etc is provided to prototype the driver.
xe_debugging
xe_devcoredump
xe-drm-usage-stats.rst
+ xe_configfs
diff --git a/Documentation/gpu/xe/xe_configfs.rst b/Documentation/gpu/xe/xe_configfs.rst
new file mode 100644
index 000000000000..9b9d941eb20e
--- /dev/null
+++ b/Documentation/gpu/xe/xe_configfs.rst
@@ -0,0 +1,10 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+.. _xe_configfs:
+
+============
+Xe Configfs
+============
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_configfs.c
+ :doc: Xe Configfs
diff --git a/Documentation/gpu/xe/xe_firmware.rst b/Documentation/gpu/xe/xe_firmware.rst
index afcb561cd37d..5d23e9f27391 100644
--- a/Documentation/gpu/xe/xe_firmware.rst
+++ b/Documentation/gpu/xe/xe_firmware.rst
@@ -31,6 +31,12 @@ GuC Power Conservation (PC)
.. kernel-doc:: drivers/gpu/drm/xe/xe_guc_pc.c
:doc: GuC Power Conservation (PC)
+PCIe Gen5 Limitations
+=====================
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_device_sysfs.c
+ :doc: PCIe Gen5 Limitations
+
Internal API
============
diff --git a/Documentation/gpu/xe/xe_pcode.rst b/Documentation/gpu/xe/xe_pcode.rst
index d2e22cc45061..5937ef3599b0 100644
--- a/Documentation/gpu/xe/xe_pcode.rst
+++ b/Documentation/gpu/xe/xe_pcode.rst
@@ -12,3 +12,10 @@ Internal API
.. kernel-doc:: drivers/gpu/drm/xe/xe_pcode.c
:internal:
+
+==================
+Boot Survivability
+==================
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_survivability_mode.c
+ :doc: Xe Boot Survivability
diff --git a/Documentation/hid/intel-thc-hid.rst b/Documentation/hid/intel-thc-hid.rst
index 6c417205ac6a..dc9250787fc5 100644
--- a/Documentation/hid/intel-thc-hid.rst
+++ b/Documentation/hid/intel-thc-hid.rst
@@ -182,7 +182,7 @@ value and use PIO write (by setting SubIP write opcode) to do a write operation.
THC also includes two GPIO pins, one for interrupt and the other for device reset control.
-Interrupt line can be configured to either level triggerred or edge triggerred by setting MMIO
+Interrupt line can be configured to either level triggered or edge triggered by setting MMIO
Control register.
Reset line is controlled by BIOS (or EFI) through ACPI _RST method, driver needs to call this
@@ -302,10 +302,10 @@ waiting for interrupt ready then read out the data from system memory.
3.3.2 Software DMA channel
~~~~~~~~~~~~~~~~~~~~~~~~~~
-THC supports a software triggerred RxDMA mode to read the touch data from touch IC. This SW RxDMA
+THC supports a software triggered RxDMA mode to read the touch data from touch IC. This SW RxDMA
is the 3rd THC RxDMA engine with the similar functionalities as the existing two RxDMAs, the only
-difference is this SW RxDMA is triggerred by software, and RxDMA2 is triggerred by external Touch IC
-interrupt. It gives a flexiblity to software driver to use RxDMA read Touch IC data in any time.
+difference is this SW RxDMA is triggered by software, and RxDMA2 is triggered by external Touch IC
+interrupt. It gives a flexibility to software driver to use RxDMA read Touch IC data in any time.
Before software starts a SW RxDMA, it shall stop the 1st and 2nd RxDMA, clear PRD read/write pointer
and quiesce the device interrupt (THC_DEVINT_QUIESCE_HW_STS = 1), other operations are the same with
diff --git a/Documentation/hwmon/acpi_power_meter.rst b/Documentation/hwmon/acpi_power_meter.rst
index 8628c1161015..a91403a2a26f 100644
--- a/Documentation/hwmon/acpi_power_meter.rst
+++ b/Documentation/hwmon/acpi_power_meter.rst
@@ -37,9 +37,16 @@ arbitrary strings that ACPI provides with the meter. The measures/ directory
contains symlinks to the devices that this meter measures.
Some computers have the ability to enforce a power cap in hardware. If this is
-the case, the `power[1-*]_cap` and related sysfs files will appear. When the
-average power consumption exceeds the cap, an ACPI event will be broadcast on
-the netlink event socket and a poll notification will be sent to the
+the case, the `power[1-*]_cap` and related sysfs files will appear.
+For information on enabling the power cap feature, refer to the description
+of the "force_on_cap" option in the "Module Parameters" chapter.
+To use the power cap feature properly, you need to set appropriate value
+(in microWatts) to the `power[1-*]_cap` sysfs files.
+The value must be within the range between the minimum value at `power[1-]_cap_min`
+and the maximum value at `power[1-]_cap_max (both in microWatts)`.
+
+When the average power consumption exceeds the cap, an ACPI event will be
+broadcast on the netlink event socket and a poll notification will be sent to the
appropriate `power[1-*]_alarm` file to indicate that capping has begun, and the
hardware has taken action to reduce power consumption. Most likely this will
result in reduced performance.
@@ -52,3 +59,19 @@ follows:
`power[1-*]_cap` will be notified if the firmware changes the power cap.
`power[1-*]_interval` will be notified if the firmware changes the averaging
interval.
+
+Module Parameters
+-----------------
+
+* force_cap_on: bool
+ Forcefully enable the power capping feature to specify
+ the upper limit of the system's power consumption.
+
+ By default, the driver's power capping feature is only
+ enabled on IBM products.
+ Therefore, on other systems that support power capping,
+ you will need to use the option to enable it.
+
+ Note: power capping is potentially unsafe feature.
+ Please check the platform specifications to make sure
+ that capping is supported before using this option.
diff --git a/Documentation/hwmon/asus_ec_sensors.rst b/Documentation/hwmon/asus_ec_sensors.rst
index d2be9db29614..816d1f9947ea 100644
--- a/Documentation/hwmon/asus_ec_sensors.rst
+++ b/Documentation/hwmon/asus_ec_sensors.rst
@@ -4,6 +4,7 @@ Kernel driver asus_ec_sensors
=================================
Supported boards:
+ * MAXIMUS VI HERO
* PRIME X470-PRO
* PRIME X570-PRO
* PRIME X670E-PRO WIFI
@@ -20,6 +21,7 @@ Supported boards:
* ROG CROSSHAIR X670E GENE
* ROG MAXIMUS XI HERO
* ROG MAXIMUS XI HERO (WI-FI)
+ * ROG MAXIMUS Z690 FORMULA
* ROG STRIX B550-E GAMING
* ROG STRIX B550-I GAMING
* ROG STRIX X570-E GAMING
diff --git a/Documentation/hwmon/ina238.rst b/Documentation/hwmon/ina238.rst
index d9f479984420..d1b93cf8627f 100644
--- a/Documentation/hwmon/ina238.rst
+++ b/Documentation/hwmon/ina238.rst
@@ -14,6 +14,12 @@ Supported chips:
Datasheet:
https://www.ti.com/lit/gpn/ina238
+ * Silergy SQ52206
+
+ Prefix: 'SQ52206'
+
+ Addresses: I2C 0x40 - 0x4f
+
Author: Nathan Rossi <nathan.rossi@digi.com>
Description
@@ -54,3 +60,12 @@ temp1_input Die temperature measurement (mC)
temp1_max Maximum die temperature threshold (mC)
temp1_max_alarm Maximum die temperature alarm
======================= =======================================================
+
+Additional sysfs entries for sq52206
+------------------------------------
+
+======================= =======================================================
+energy1_input Energy measurement (mJ)
+
+power1_input_highest Peak Power (uW)
+======================= =======================================================
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index f0ddf6222c44..b45bfb4ebf30 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -106,6 +106,8 @@ Hardware Monitoring Kernel Drivers
jc42
k10temp
k8temp
+ kbatt
+ kfan
lan966x
lineage-pem
lm25066
@@ -125,6 +127,7 @@ Hardware Monitoring Kernel Drivers
lm95234
lm95245
lochnagar
+ lt3074
lt7182s
ltc2992
ltc2945
@@ -161,6 +164,7 @@ Hardware Monitoring Kernel Drivers
max6639
max6650
max6697
+ max77705
max8688
mc13783-adc
mc34vr500
@@ -189,7 +193,6 @@ Hardware Monitoring Kernel Drivers
nzxt-kraken3
nzxt-smart2
occ
- oxp-sensors
pc87360
pc87427
pcf8591
diff --git a/Documentation/hwmon/kbatt.rst b/Documentation/hwmon/kbatt.rst
new file mode 100644
index 000000000000..b72718c5ede3
--- /dev/null
+++ b/Documentation/hwmon/kbatt.rst
@@ -0,0 +1,60 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver kbatt
+===================
+
+Supported chips:
+
+ * KEBA battery monitoring controller (IP core in FPGA)
+
+ Prefix: 'kbatt'
+
+Authors:
+
+ Gerhard Engleder <eg@keba.com>
+ Petar Bojanic <boja@keba.com>
+
+Description
+-----------
+
+The KEBA battery monitoring controller is an IP core for FPGAs, which
+monitors the health of a coin cell battery. The coin cell battery is
+typically used to supply the RTC during power off to keep the current
+time. E.g., the CP500 FPGA includes this IP core to monitor the coin cell
+battery of PLCs and the corresponding cp500 driver creates an auxiliary
+device for the kbatt driver.
+
+This driver provides information about the coin cell battery health to
+user space. Actually the user space shall be informed that the coin cell
+battery is nearly empty and needs to be replaced.
+
+The coin cell battery must be tested actively to get to know if its nearly
+empty or not. Therefore, a load is put on the coin cell battery and the
+resulting voltage is evaluated. This evaluation is done by some hard wired
+analog logic, which compares the voltage to a defined limit. If the
+voltage is above the limit, then the coin cell battery is assumed to be
+ok. If the voltage is below the limit, then the coin cell battery is
+nearly empty (or broken, removed, ...) and shall be replaced by a new one.
+The KEBA battery monitoring controller allows to start the test of the
+coin cell battery and to get the result if the voltage is above or below
+the limit. The actual voltage is not available. Only the information if
+the voltage is below a limit is available.
+
+The test load, which is put on the coin cell battery for the health check,
+is similar to the load during power off. Therefore, the lifetime of the
+coin cell battery is reduced directly by the duration of each test. To
+limit the negative impact to the lifetime the test is limited to at most
+once every 10 seconds. The test load is put on the coin cell battery for
+100ms. Thus, in worst case the coin cell battery lifetime is reduced by
+1% of the uptime or 3.65 days per year. As the coin cell battery lasts
+multiple years, this lifetime reduction negligible.
+
+This driver only provides a single alarm attribute, which is raised when
+the coin cell battery is nearly empty.
+
+====================== ==== ===================================================
+Attribute R/W Contents
+====================== ==== ===================================================
+in0_min_alarm R voltage of coin cell battery under load is below
+ limit
+====================== ==== ===================================================
diff --git a/Documentation/hwmon/kfan.rst b/Documentation/hwmon/kfan.rst
new file mode 100644
index 000000000000..ce02dddfb4b8
--- /dev/null
+++ b/Documentation/hwmon/kfan.rst
@@ -0,0 +1,39 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver kfan
+==================
+
+Supported chips:
+
+ * KEBA fan controller (IP core in FPGA)
+
+ Prefix: 'kfan'
+
+Authors:
+
+ Gerhard Engleder <eg@keba.com>
+ Petar Bojanic <boja@keba.com>
+
+Description
+-----------
+
+The KEBA fan controller is an IP core for FPGAs, which monitors the health
+and controls the speed of a fan. The fan is typically used to cool the CPU
+and the whole device. E.g., the CP500 FPGA includes this IP core to monitor
+and control the fan of PLCs and the corresponding cp500 driver creates an
+auxiliary device for the kfan driver.
+
+This driver provides information about the fan health to user space.
+The user space shall be informed if the fan is removed or blocked.
+Additionally, the speed in RPM is reported for fans with tacho signal.
+
+For fan control PWM is supported. For PWM 255 equals 100%. None-regulable
+fans can be turned on with PWM 255 and turned off with PWM 0.
+
+====================== ==== ===================================================
+Attribute R/W Contents
+====================== ==== ===================================================
+fan1_fault R Fan fault
+fan1_input R Fan tachometer input (in RPM)
+pwm1 RW Fan target duty cycle (0..255)
+====================== ==== ===================================================
diff --git a/Documentation/hwmon/lt3074.rst b/Documentation/hwmon/lt3074.rst
new file mode 100644
index 000000000000..234f369153cf
--- /dev/null
+++ b/Documentation/hwmon/lt3074.rst
@@ -0,0 +1,72 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver lt3074
+====================
+
+Supported chips:
+
+ * Analog Devices LT3074
+
+ Prefix: 'lt3074'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.analog.com/en/products/lt3074.html
+
+Authors: Cedric Encarnacion <cedricjustine.encarnacion@analog.com>
+
+
+Description
+-----------
+
+This driver supports hardware monitoring for Analog Devices LT3074 Linear
+Regulator with PMBus interface.
+
+The LT3074 is a low voltage, ultra-low noise and ultra-fast transient
+response linear regulator with PMBus serial interface. PMBus telemetry
+feature provides information regarding the output voltage and current,
+input voltage, bias voltage and die temperature.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate
+the devices explicitly. Please see Documentation/i2c/instantiating-devices.rst
+for details.
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+Sysfs entries
+-------------
+
+======================= =======================================================
+in1_label "vin"
+in1_input Measured input voltage
+in1_max Input overvoltage warning limit
+in1_max_alarm Input overvoltage warning status
+in1_min Input undervoltage warning limit
+in1_min_alarm Input undervoltage warning status
+in2_label "vmon"
+in2_input Measured bias voltage
+in2_max Bias overvoltage warning limit
+in2_min Bias undervoltage warning limit
+in3_label "vout1"
+in3_input Measured output voltage
+in3_max Output overvoltage warning limit
+in3_max_alarm Output overvoltage warning status
+in3_min Output undervoltage warning limit
+in3_min_alarm Output undervoltage warning status
+curr1_label "iout1"
+curr1_input Measured output current.
+curr1_crit Output overcurrent fault limit
+curr1_crit_alarm Output overcurrent fault status
+temp1_input Measured temperature
+temp1_max Maximum temperature limit
+temp1_max_alarm Overtemperature warning status
+======================= =======================================================
diff --git a/Documentation/hwmon/max34440.rst b/Documentation/hwmon/max34440.rst
index 162d289f0814..8591a7152ce5 100644
--- a/Documentation/hwmon/max34440.rst
+++ b/Documentation/hwmon/max34440.rst
@@ -3,6 +3,14 @@ Kernel driver max34440
Supported chips:
+ * ADI ADPM12160
+
+ Prefixes: 'adpm12160'
+
+ Addresses scanned: -
+
+ Datasheet: -
+
* Maxim MAX34440
Prefixes: 'max34440'
@@ -67,13 +75,14 @@ Author: Guenter Roeck <linux@roeck-us.net>
Description
-----------
-This driver supports hardware monitoring for Maxim MAX34440 PMBus 6-Channel
-Power-Supply Manager, MAX34441 PMBus 5-Channel Power-Supply Manager
-and Intelligent Fan Controller, and MAX34446 PMBus Power-Supply Data Logger.
-It also supports the MAX34451, MAX34460, and MAX34461 PMBus Voltage Monitor &
-Sequencers. The MAX34451 supports monitoring voltage or current of 12 channels
-based on GIN pins. The MAX34460 supports 12 voltage channels, and the MAX34461
-supports 16 voltage channels.
+This driver supports multiple devices: hardware monitoring for Maxim MAX34440
+PMBus 6-Channel Power-Supply Manager, MAX34441 PMBus 5-Channel Power-Supply
+Manager and Intelligent Fan Controller, and MAX34446 PMBus Power-Supply Data
+Logger; PMBus Voltage Monitor and Sequencers for MAX34451, MAX34460, and
+MAX34461; PMBus DC/DC Power Module ADPM12160. The MAX34451 supports monitoring
+voltage or current of 12 channels based on GIN pins. The MAX34460 supports 12
+voltage channels, and the MAX34461 supports 16 voltage channels. The ADPM1260
+also monitors both input and output of voltage and current.
The driver is a client driver to the core PMBus driver. Please see
Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
@@ -128,7 +137,10 @@ in[1-6]_highest Historical maximum voltage.
in[1-6]_reset_history Write any value to reset history.
======================= =======================================================
-.. note:: MAX34446 only supports in[1-4].
+.. note::
+
+ - MAX34446 only supports in[1-4].
+ - ADPM12160 only supports in[1-2]. Label is "vin1" and "vout1" respectively.
Curr
~~~~
@@ -150,6 +162,7 @@ curr[1-6]_reset_history Write any value to reset history.
- in6 and curr6 attributes only exist for MAX34440.
- MAX34446 only supports curr[1-4].
+ - For ADPM12160, curr[1] is "iin1" and curr[2-6] are "iout[1-5].
Power
~~~~~
@@ -185,6 +198,7 @@ temp[1-8]_reset_history Write any value to reset history.
.. note::
- temp7 and temp8 attributes only exist for MAX34440.
- MAX34446 only supports temp[1-3].
+ - ADPM12160 only supports temp[1].
.. note::
diff --git a/Documentation/hwmon/max77705.rst b/Documentation/hwmon/max77705.rst
new file mode 100644
index 000000000000..4a7680a340e1
--- /dev/null
+++ b/Documentation/hwmon/max77705.rst
@@ -0,0 +1,39 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver max77705
+======================
+
+Supported chips:
+
+ * Maxim Integrated MAX77705
+
+ Prefix: 'max77705'
+
+ Addresses scanned: none
+
+ Datasheet: Not available
+
+Authors:
+ - Dzmitry Sankouski <dsankouski@gmail.com>
+
+Description
+-----------
+
+The MAX77705 PMIC provides current and voltage measurements besides fuelgauge:
+- chip input current
+- system bus current and voltage
+- VBYP voltage
+
+Sysfs Attributes
+----------------
+
+================= ========================================
+in1_label "vbyp"
+in1_input Measured chip vbyp voltage
+in2_label "vsys"
+in2_input Measured chip system bus voltage
+curr1_label "iin"
+curr1_input Measured chip input current.
+curr2_label "isys"
+curr2_input Measured chip system bus current.
+================= ========================================
diff --git a/Documentation/hwmon/mpq8785.rst b/Documentation/hwmon/mpq8785.rst
index bf8176b87086..198d5dfd7c30 100644
--- a/Documentation/hwmon/mpq8785.rst
+++ b/Documentation/hwmon/mpq8785.rst
@@ -5,6 +5,8 @@ Kernel driver mpq8785
Supported chips:
+ * MPS MPM3695 family
+ * MPS MPM82504
* MPS MPQ8785
Prefix: 'mpq8785'
@@ -14,6 +16,22 @@ Author: Charles Hsu <ythsu0511@gmail.com>
Description
-----------
+The MPM3695 family is a scalable, ultra-thin, fully integrated power module with
+a PMBus interface. It offers a complete power solution that achieves up to
+10A (-10 variant), 20A (-25 variant), 25A (-20 variant), 100A (-100 variant)
+of output current with excellent load and line regulation across a wide input
+voltage range. It operates at high efficiency over a wide load range, and can
+be parallled to deliver higher current. Variants -10,-20 and -100 have different
+voltage scale configuration register range (10 bits) than -25 version (11 bits).
+
+The MPM82504 is a quad 25A, scalable, fully integrated power module with a PMBus
+interface. The device offers a complete power solution that achieves up to 25A
+per output channel. The MPM82504 has four output channels that can be paralleled
+to provide 50A, 75A, or 100A of output current for flexible configurations.
+The device can also operate in parallel with the MPM3695-100 and additional
+MPM82504 devices to provide a higher output current. The MPM82504 operates
+at high efficiency across a wide load range.
+
The MPQ8785 is a fully integrated, PMBus-compatible, high-frequency, synchronous
buck converter. The MPQ8785 offers a very compact solution that achieves up to
40A output current per phase, with excellent load and line regulation over a
@@ -23,19 +41,16 @@ output current load range.
The PMBus interface provides converter configurations and key parameters
monitoring.
-The MPQ8785 adopts MPS's proprietary multi-phase digital constant-on-time (MCOT)
+The devices adopts MPS's proprietary multi-phase digital constant-on-time (MCOT)
control, which provides fast transient response and eases loop stabilization.
-The MCOT scheme also allows multiple MPQ8785 devices to be connected in parallel
-with excellent current sharing and phase interleaving for high-current
+The MCOT scheme also allows multiple devices or channels to be connected in
+parallel with excellent current sharing and phase interleaving for high-current
applications.
Fully integrated protection features include over-current protection (OCP),
over-voltage protection (OVP), under-voltage protection (UVP), and
over-temperature protection (OTP).
-The MPQ8785 requires a minimal number of readily available, standard external
-components, and is available in a TLGA (5mmx6mm) package.
-
Device compliant with:
- PMBus rev 1.3 interface.
diff --git a/Documentation/hwmon/oxp-sensors.rst b/Documentation/hwmon/oxp-sensors.rst
deleted file mode 100644
index 581c4dafbfa1..000000000000
--- a/Documentation/hwmon/oxp-sensors.rst
+++ /dev/null
@@ -1,89 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0-or-later
-
-Kernel driver oxp-sensors
-=========================
-
-Authors:
- - Derek John Clark <derekjohn.clark@gmail.com>
- - Joaquín Ignacio Aramendía <samsagax@gmail.com>
-
-Description:
-------------
-
-Handheld devices from OneNetbook, AOKZOE, AYANEO, And OrangePi provide fan
-readings and fan control through their embedded controllers.
-
-Currently supports OneXPlayer devices, AOKZOE, AYANEO, and OrangePi
-handheld devices. AYANEO devices preceding the AIR and OneXPlayer devices
-preceding the Mini A07 are not supportable as the EC model is different
-and do not have manual control capabilities.
-
-Some OneXPlayer and AOKZOE models have a toggle for changing the behaviour
-of the "Turbo/Silent" button of the device. It will change the key event
-that it triggers with a flip of the `tt_toggle` attribute. See below for
-boards that support this function.
-
-Supported devices
------------------
-
-Currently the driver supports the following handhelds:
-
- - AOKZOE A1
- - AOKZOE A1 PRO
- - AYANEO 2
- - AYANEO 2S
- - AYANEO AIR
- - AYANEO AIR 1S
- - AYANEO AIR Plus (Mendocino)
- - AYANEO AIR Pro
- - AYANEO Flip DS
- - AYANEO Flip KB
- - AYANEO Geek
- - AYANEO Geek 1S
- - AYANEO KUN
- - OneXPlayer 2
- - OneXPlayer 2 Pro
- - OneXPlayer AMD
- - OneXPlayer mini AMD
- - OneXPlayer mini AMD PRO
- - OneXPlayer OneXFly
- - OneXPlayer X1 A
- - OneXPlayer X1 i
- - OneXPlayer X1 mini
- - OrangePi NEO-01
-
-"Turbo/Silent" button behaviour toggle is only supported on:
- - AOK ZOE A1
- - AOK ZOE A1 PRO
- - OneXPlayer 2
- - OneXPlayer 2 Pro
- - OneXPlayer mini AMD (only with updated alpha BIOS)
- - OneXPlayer mini AMD PRO
- - OneXPlayer OneXFly
- - OneXPlayer X1 A
- - OneXPlayer X1 i
- - OneXPlayer X1 mini
-
-Sysfs entries
--------------
-
-The following attributes are supported:
-
-fan1_input
- Read Only. Reads current fan RPM.
-
-pwm1_enable
- Read Write. Enable manual fan control. Write "1" to set to manual, write "0"
- to let the EC control de fan speed. Read this attribute to see current status.
-
-pwm1
- Read Write. Read this attribute to see current duty cycle in the range [0-255].
- When pwm1_enable is set to "1" (manual) write any value in the range [0-255]
- to set fan speed.
-
-tt_toggle
- Read Write. Read this attribute to check the status of the turbo/silent
- button behaviour function. Write "1" to activate the switch and "0" to
- deactivate it. The specific keycodes and behaviour is specific to the device
- both with this function on and off. This attribute is attached to the platform
- driver and not to the hwmon driver (/sys/devices/platform/oxp-platform/tt_toggle)
diff --git a/Documentation/i2c/busses/i2c-parport.rst b/Documentation/i2c/busses/i2c-parport.rst
index a9b4e8133700..4cbf45952d52 100644
--- a/Documentation/i2c/busses/i2c-parport.rst
+++ b/Documentation/i2c/busses/i2c-parport.rst
@@ -84,7 +84,7 @@ Remarks:
\|
must be 74HC05, they must be open collector output.
- - All resitors are 10k.
+ - All resistors are 10k.
- Pins 18-25 of the parallel port connected to GND.
- Pins 4-9 (D2-D7) could be used as VDD is the driver drives them high.
The ADM1032 evaluation board uses D4-D7. Beware that the amount of
diff --git a/Documentation/index.rst b/Documentation/index.rst
index f9f525f4c0dd..c0cf79a87c3a 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -84,7 +84,7 @@ which are kept separately from the kernel's own documentation.
Firmware-related documentation
==============================
The following holds information on the kernel's expectations regarding the
-platform firmwares.
+platform firmware.
.. toctree::
:maxdepth: 1
diff --git a/Documentation/input/devices/amijoy.rst b/Documentation/input/devices/amijoy.rst
index 8df7b11cd98d..a81e9de481c7 100644
--- a/Documentation/input/devices/amijoy.rst
+++ b/Documentation/input/devices/amijoy.rst
@@ -1,14 +1,15 @@
-~~~~~~~~~~~~~~~~~~~~~~~~~
-Amiga joystick extensions
-~~~~~~~~~~~~~~~~~~~~~~~~~
+===============
+Amiga joysticks
+===============
+Pinouts
+=======
-Amiga 4-joystick parport extension
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Amiga 4-joystick parallel port extension
+----------------------------------------
Parallel port pins:
-
===== ======== ==== ==========
Pin Meaning Pin Meaning
===== ======== ==== ==========
@@ -17,11 +18,11 @@ Pin Meaning Pin Meaning
4 Left1 8 Left2
5 Right1 9 Right2
13 Fire1 11 Fire2
-18 Gnd1 18 Gnd2
+19 Gnd1 18 Gnd2
===== ======== ==== ==========
-Amiga digital joystick pinout
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Amiga digital joystick
+----------------------
=== ============
Pin Meaning
@@ -37,8 +38,8 @@ Pin Meaning
9 Thumb button
=== ============
-Amiga mouse pinout
-~~~~~~~~~~~~~~~~~~
+Amiga mouse
+-----------
=== ============
Pin Meaning
@@ -54,8 +55,8 @@ Pin Meaning
9 Right button
=== ============
-Amiga analog joystick pinout
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Amiga analog joystick
+---------------------
=== ==============
Pin Meaning
@@ -71,8 +72,8 @@ Pin Meaning
9 Analog Y
=== ==============
-Amiga lightpen pinout
-~~~~~~~~~~~~~~~~~~~~~
+Amiga lightpen
+--------------
=== =============
Pin Meaning
@@ -88,19 +89,23 @@ Pin Meaning
9 Stylus button
=== =============
--------------------------------------------------------------------------------
+Register addresses
+==================
+
+JOY0DAT/JOY1DAT
+---------------
-======== === ==== ==== ====== ========================================
+======== === ==== ==== ====== ===========================================
NAME rev ADDR type chip Description
-======== === ==== ==== ====== ========================================
-JOY0DAT 00A R Denise Joystick-mouse 0 data (left vert, horiz)
-JOY1DAT 00C R Denise Joystick-mouse 1 data (right vert,horiz)
-======== === ==== ==== ====== ========================================
+======== === ==== ==== ====== ===========================================
+JOY0DAT 00A R Denise Joystick-mouse 0 data (left vert., horiz.)
+JOY1DAT 00C R Denise Joystick-mouse 1 data (right vert., horiz.)
+======== === ==== ==== ====== ===========================================
These addresses each read a 16 bit register. These in turn
are loaded from the MDAT serial stream and are clocked in on
the rising edge of SCLK. MLD output is used to parallel load
- the external parallel-to-serial converter.This in turn is
+ the external parallel-to-serial converter. This in turn is
loaded with the 4 quadrature inputs from each of two game
controller ports (8 total) plus 8 miscellaneous control bits
which are new for LISA and can be read in upper 8 bits of
@@ -108,7 +113,7 @@ JOY1DAT 00C R Denise Joystick-mouse 1 data (right vert,horiz)
Register bits are as follows:
- - Mouse counter usage (pins 1,3 =Yclock, pins 2,4 =Xclock)
+ - Mouse counter usage (pins 1,3 =Yclock, pins 2,4 =Xclock)
======== === === === === === === === === ====== === === === === === === ===
BIT# 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00
@@ -123,7 +128,7 @@ JOY1DAT Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 X7 X6 X5 X4 X3 X2 X1 X0
clocked by 2 of the signals input from the mouse serial
stream. Starting with first bit received:
- +-------------------+-----------------------------------------+
+ +--------+----------+-----------------------------------------+
| Serial | Bit Name | Description |
+========+==========+=========================================+
| 0 | M0H | JOY0DAT Horizontal Clock |
@@ -160,7 +165,8 @@ JOY1DAT Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 X7 X6 X5 X4 X3 X2 X1 X0
| Right | 4 | X1 |
+------------+------+---------------------------------+
--------------------------------------------------------------------------------
+JOYTEST
+-------
======== === ==== ==== ====== =================================================
NAME rev ADDR type chip Description
@@ -177,14 +183,15 @@ JOYTEST 036 W Denise Write to all 4 joystick-mouse counters at once.
JOYxDAT Y7 Y6 Y5 Y4 Y3 Y2 xx xx X7 X6 X5 X4 X3 X2 xx xx
========= === === === === === === === === ====== === === === === === === ===
--------------------------------------------------------------------------------
+POT0DAT/POT1DAT
+---------------
-======= === ==== ==== ====== ========================================
+======= === ==== ==== ====== ===========================================
NAME rev ADDR type chip Description
-======= === ==== ==== ====== ========================================
-POT0DAT h 012 R Paula Pot counter data left pair (vert, horiz)
-POT1DAT h 014 R Paula Pot counter data right pair (vert,horiz)
-======= === ==== ==== ====== ========================================
+======= === ==== ==== ====== ===========================================
+POT0DAT h 012 R Paula Pot counter data left pair (vert., horiz.)
+POT1DAT h 014 R Paula Pot counter data right pair (vert., horiz.)
+======= === ==== ==== ====== ===========================================
These addresses each read a pair of 8 bit pot counters.
(4 counters total). The bit assignment for both
@@ -213,12 +220,13 @@ POT1DAT h 014 R Paula Pot counter data right pair (vert,horiz)
+-------+------+-----+-----+-------+
With normal (NTSC or PAL) horiz. line rate, the pots will
- give a full scale (FF) reading with about 500kohms in one
- frame time. With proportionally faster horiz line times,
+ give a full scale (FF) reading with about 500k ohm in one
+ frame time. With proportionally faster horiz. line times,
the counters will count proportionally faster.
This should be noted when doing variable beam displays.
--------------------------------------------------------------------------------
+POTGO
+-----
====== === ==== ==== ====== ================================================
NAME rev ADDR type chip Description
@@ -227,7 +235,8 @@ POTGO 034 W Paula Pot port (4 bit) bi-direction and data, and pot
counter start.
====== === ==== ==== ====== ================================================
--------------------------------------------------------------------------------
+POTINP
+------
====== === ==== ==== ====== ================================================
NAME rev ADDR type chip Description
@@ -238,26 +247,26 @@ POTINP 016 R Paula Pot pin data read
This register controls a 4 bit bi-direction I/O port
that shares the same 4 pins as the 4 pot counters above.
- +-------+----------+---------------------------------------------+
- | BIT# | FUNCTION | DESCRIPTION |
- +=======+==========+=============================================+
- | 15 | OUTRY | Output enable for Paula pin 33 |
- +-------+----------+---------------------------------------------+
- | 14 | DATRY | I/O data Paula pin 33 |
- +-------+----------+---------------------------------------------+
- | 13 | OUTRX | Output enable for Paula pin 32 |
- +-------+----------+---------------------------------------------+
- | 12 | DATRX | I/O data Paula pin 32 |
- +-------+----------+---------------------------------------------+
- | 11 | OUTLY | Out put enable for Paula pin 36 |
- +-------+----------+---------------------------------------------+
- | 10 | DATLY | I/O data Paula pin 36 |
- +-------+----------+---------------------------------------------+
- | 09 | OUTLX | Output enable for Paula pin 35 |
- +-------+----------+---------------------------------------------+
- | 08 | DATLX | I/O data Paula pin 35 |
- +-------+----------+---------------------------------------------+
- | 07-01 | X | Not used |
- +-------+----------+---------------------------------------------+
- | 00 | START | Start pots (dump capacitors,start counters) |
- +-------+----------+---------------------------------------------+
+ +-------+----------+----------------------------------------------+
+ | BIT# | FUNCTION | DESCRIPTION |
+ +=======+==========+==============================================+
+ | 15 | OUTRY | Output enable for Paula pin 33 |
+ +-------+----------+----------------------------------------------+
+ | 14 | DATRY | I/O data Paula pin 33 |
+ +-------+----------+----------------------------------------------+
+ | 13 | OUTRX | Output enable for Paula pin 32 |
+ +-------+----------+----------------------------------------------+
+ | 12 | DATRX | I/O data Paula pin 32 |
+ +-------+----------+----------------------------------------------+
+ | 11 | OUTLY | Out put enable for Paula pin 36 |
+ +-------+----------+----------------------------------------------+
+ | 10 | DATLY | I/O data Paula pin 36 |
+ +-------+----------+----------------------------------------------+
+ | 09 | OUTLX | Output enable for Paula pin 35 |
+ +-------+----------+----------------------------------------------+
+ | 08 | DATLX | I/O data Paula pin 35 |
+ +-------+----------+----------------------------------------------+
+ | 07-01 | X | Not used |
+ +-------+----------+----------------------------------------------+
+ | 00 | START | Start pots (dump capacitors, start counters) |
+ +-------+----------+----------------------------------------------+
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index 3b9a8bc671e2..38cc656fac20 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -625,10 +625,10 @@ gcc-min-version
Example::
- cflags-$(call gcc-min-version, 70100) := -foo
+ cflags-$(call gcc-min-version, 110100) := -foo
In this example, cflags-y will be assigned the value -foo if $(CC) is gcc and
- $(CONFIG_GCC_VERSION) is >= 7.1.
+ $(CONFIG_GCC_VERSION) is >= 11.1.
clang-min-version
clang-min-version tests if the value of $(CONFIG_CLANG_VERSION) is greater
diff --git a/Documentation/kbuild/reproducible-builds.rst b/Documentation/kbuild/reproducible-builds.rst
index a7762486c93f..f2dcc39044e6 100644
--- a/Documentation/kbuild/reproducible-builds.rst
+++ b/Documentation/kbuild/reproducible-builds.rst
@@ -46,6 +46,21 @@ The kernel embeds the building user and host names in
`KBUILD_BUILD_USER and KBUILD_BUILD_HOST`_ variables. If you are
building from a git commit, you could use its committer address.
+Absolute filenames
+------------------
+
+When the kernel is built out-of-tree, debug information may include
+absolute filenames for the source files. This must be overridden by
+including the ``-fdebug-prefix-map`` option in the `KCFLAGS`_ variable.
+
+Depending on the compiler used, the ``__FILE__`` macro may also expand
+to an absolute filename in an out-of-tree build. Kbuild automatically
+uses the ``-fmacro-prefix-map`` option to prevent this, if it is
+supported.
+
+The Reproducible Builds web site has more information about these
+`prefix-map options`_.
+
Generated files in source packages
----------------------------------
@@ -116,5 +131,7 @@ See ``scripts/setlocalversion`` for details.
.. _KBUILD_BUILD_TIMESTAMP: kbuild.html#kbuild-build-timestamp
.. _KBUILD_BUILD_USER and KBUILD_BUILD_HOST: kbuild.html#kbuild-build-user-kbuild-build-host
+.. _KCFLAGS: kbuild.html#kcflags
+.. _prefix-map options: https://reproducible-builds.org/docs/build-path/
.. _Reproducible Builds project: https://reproducible-builds.org/
.. _SOURCE_DATE_EPOCH: https://reproducible-builds.org/docs/source-date-epoch/
diff --git a/Documentation/leds/index.rst b/Documentation/leds/index.rst
index 0ab0a2128a11..76fae171039c 100644
--- a/Documentation/leds/index.rst
+++ b/Documentation/leds/index.rst
@@ -28,5 +28,5 @@ LEDs
leds-mlxcpld
leds-mt6370-rgb
leds-sc27xx
- leds-st1202.rst
+ leds-st1202
leds-qcom-lpg
diff --git a/Documentation/leds/leds-class-multicolor.rst b/Documentation/leds/leds-class-multicolor.rst
index c57b98bfd387..c6b47b4093c4 100644
--- a/Documentation/leds/leds-class-multicolor.rst
+++ b/Documentation/leds/leds-class-multicolor.rst
@@ -18,24 +18,28 @@ array. These files are children under the LED parent node created by the
led_class framework. The led_class framework is documented in led-class.rst
within this documentation directory.
-Each colored LED will be indexed under the multi_* files. The order of the
-colors will be arbitrary. The multi_index file can be read to determine the
+Each colored LED will be indexed under the ``multi_*`` files. The order of the
+colors will be arbitrary. The ``multi_index`` file can be read to determine the
color name to indexed value.
-The multi_index file is an array that contains the string list of the colors as
-they are defined in each multi_* array file.
+The ``multi_index`` file is an array that contains the string list of the colors as
+they are defined in each ``multi_*`` array file.
-The multi_intensity is an array that can be read or written to for the
+The ``multi_intensity`` is an array that can be read or written to for the
individual color intensities. All elements within this array must be written in
order for the color LED intensities to be updated.
Directory Layout Example
========================
-root:/sys/class/leds/multicolor:status# ls -lR
--rw-r--r-- 1 root root 4096 Oct 19 16:16 brightness
--r--r--r-- 1 root root 4096 Oct 19 16:16 max_brightness
--r--r--r-- 1 root root 4096 Oct 19 16:16 multi_index
--rw-r--r-- 1 root root 4096 Oct 19 16:16 multi_intensity
+.. code-block:: console
+
+ root:/sys/class/leds/multicolor:status# ls -lR
+ -rw-r--r-- 1 root root 4096 Oct 19 16:16 brightness
+ -r--r--r-- 1 root root 4096 Oct 19 16:16 max_brightness
+ -r--r--r-- 1 root root 4096 Oct 19 16:16 multi_index
+ -rw-r--r-- 1 root root 4096 Oct 19 16:16 multi_intensity
+
+..
Multicolor Class Brightness Control
===================================
@@ -43,27 +47,31 @@ The brightness level for each LED is calculated based on the color LED
intensity setting divided by the global max_brightness setting multiplied by
the requested brightness.
-led_brightness = brightness * multi_intensity/max_brightness
+``led_brightness = brightness * multi_intensity/max_brightness``
Example:
A user first writes the multi_intensity file with the brightness levels
for each LED that are necessary to achieve a certain color output from a
multicolor LED group.
-cat /sys/class/leds/multicolor:status/multi_index
-green blue red
+.. code-block:: console
+
+ # cat /sys/class/leds/multicolor:status/multi_index
+ green blue red
-echo 43 226 138 > /sys/class/leds/multicolor:status/multi_intensity
+ # echo 43 226 138 > /sys/class/leds/multicolor:status/multi_intensity
-red -
- intensity = 138
- max_brightness = 255
-green -
- intensity = 43
- max_brightness = 255
-blue -
- intensity = 226
- max_brightness = 255
+ red -
+ intensity = 138
+ max_brightness = 255
+ green -
+ intensity = 43
+ max_brightness = 255
+ blue -
+ intensity = 226
+ max_brightness = 255
+
+..
The user can control the brightness of that multicolor LED group by writing the
global 'brightness' control. Assuming a max_brightness of 255 the user
@@ -71,16 +79,28 @@ may want to dim the LED color group to half. The user would write a value of
128 to the global brightness file then the values written to each LED will be
adjusted base on this value.
-cat /sys/class/leds/multicolor:status/max_brightness
-255
-echo 128 > /sys/class/leds/multicolor:status/brightness
+.. code-block:: console
+
+ # cat /sys/class/leds/multicolor:status/max_brightness
+ 255
+ # echo 128 > /sys/class/leds/multicolor:status/brightness
-adjusted_red_value = 128 * 138/255 = 69
-adjusted_green_value = 128 * 43/255 = 21
-adjusted_blue_value = 128 * 226/255 = 113
+..
+
+.. code-block:: none
+
+ adjusted_red_value = 128 * 138/255 = 69
+ adjusted_green_value = 128 * 43/255 = 21
+ adjusted_blue_value = 128 * 226/255 = 113
+
+..
Reading the global brightness file will return the current brightness value of
the color LED group.
-cat /sys/class/leds/multicolor:status/brightness
-128
+.. code-block:: console
+
+ # cat /sys/class/leds/multicolor:status/brightness
+ 128
+
+..
diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst
index f12d33749329..ddc50db3afa4 100644
--- a/Documentation/mm/damon/design.rst
+++ b/Documentation/mm/damon/design.rst
@@ -54,7 +54,7 @@ monitoring are address-space dependent.
DAMON consolidates these implementations in a layer called DAMON Operations
Set, and defines the interface between it and the upper layer. The upper layer
is dedicated for DAMON's core logics including the mechanism for control of the
-monitoring accruracy and the overhead.
+monitoring accuracy and the overhead.
Hence, DAMON can easily be extended for any address space and/or available
hardware features by configuring the core logic to use the appropriate
@@ -550,10 +550,10 @@ aggressiveness (the quota) of the corresponding scheme. For example, if DAMOS
is under achieving the goal, DAMOS automatically increases the quota. If DAMOS
is over achieving the goal, it decreases the quota.
-The goal can be specified with three parameters, namely ``target_metric``,
-``target_value``, and ``current_value``. The auto-tuning mechanism tries to
-make ``current_value`` of ``target_metric`` be same to ``target_value``.
-Currently, two ``target_metric`` are provided.
+The goal can be specified with four parameters, namely ``target_metric``,
+``target_value``, ``current_value`` and ``nid``. The auto-tuning mechanism
+tries to make ``current_value`` of ``target_metric`` be same to
+``target_value``.
- ``user_input``: User-provided value. Users could use any metric that they
has interest in for the value. Use space main workload's latency or
@@ -565,6 +565,11 @@ Currently, two ``target_metric`` are provided.
in microseconds that measured from last quota reset to next quota reset.
DAMOS does the measurement on its own, so only ``target_value`` need to be
set by users at the initial time. In other words, DAMOS does self-feedback.
+- ``node_mem_used_bp``: Specific NUMA node's used memory ratio in bp (1/10,000).
+- ``node_mem_free_bp``: Specific NUMA node's free memory ratio in bp (1/10,000).
+
+``nid`` is optionally required for only ``node_mem_used_bp`` and
+``node_mem_free_bp`` to point the specific NUMA node.
To know how user-space can set the tuning goal metric, the target value, and/or
the current value via :ref:`DAMON sysfs interface <sysfs_interface>`, refer to
diff --git a/Documentation/mm/damon/index.rst b/Documentation/mm/damon/index.rst
index 5a3359704cce..31c1fa955b3d 100644
--- a/Documentation/mm/damon/index.rst
+++ b/Documentation/mm/damon/index.rst
@@ -1,8 +1,8 @@
.. SPDX-License-Identifier: GPL-2.0
-==========================
-DAMON: Data Access MONitor
-==========================
+================================================================
+DAMON: Data Access MONitoring and Access-aware System Operations
+================================================================
DAMON is a Linux kernel subsystem that provides a framework for data access
monitoring and the monitoring results based system operations. The core
diff --git a/Documentation/netlink/genetlink-c.yaml b/Documentation/netlink/genetlink-c.yaml
index 96fa1f1522ed..5a234e9b5fa2 100644
--- a/Documentation/netlink/genetlink-c.yaml
+++ b/Documentation/netlink/genetlink-c.yaml
@@ -148,6 +148,9 @@ properties:
attr-max-name:
description: The explicit name for last member of attribute enum.
type: string
+ header:
+ description: For C-compatible languages, header which already defines this attribute set.
+ type: string
# End genetlink-c
attributes:
description: List of attributes in the space.
diff --git a/Documentation/netlink/genetlink-legacy.yaml b/Documentation/netlink/genetlink-legacy.yaml
index a8c5b521937d..4cbfe666e6f5 100644
--- a/Documentation/netlink/genetlink-legacy.yaml
+++ b/Documentation/netlink/genetlink-legacy.yaml
@@ -193,6 +193,9 @@ properties:
attr-max-name:
description: The explicit name for last member of attribute enum.
type: string
+ header:
+ description: For C-compatible languages, header which already defines this attribute set.
+ type: string
# End genetlink-c
attributes:
description: List of attributes in the space.
diff --git a/Documentation/netlink/netlink-raw.yaml b/Documentation/netlink/netlink-raw.yaml
index 1b0772c8e333..e34bf23897fa 100644
--- a/Documentation/netlink/netlink-raw.yaml
+++ b/Documentation/netlink/netlink-raw.yaml
@@ -207,6 +207,9 @@ properties:
attr-max-name:
description: The explicit name for last member of attribute enum.
type: string
+ header:
+ description: For C-compatible languages, header which already defines this attribute set.
+ type: string
# End genetlink-c
attributes:
description: List of attributes in the space.
diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml
index bd9726269b4f..05fee1b7fe19 100644
--- a/Documentation/netlink/specs/devlink.yaml
+++ b/Documentation/netlink/specs/devlink.yaml
@@ -202,6 +202,28 @@ definitions:
name: exception
-
name: control
+ -
+ type: enum
+ name: var-attr-type
+ entries:
+ -
+ name: u8
+ value: 1
+ -
+ name: u16
+ -
+ name: u32
+ -
+ name: u64
+ -
+ name: string
+ -
+ name: flag
+ -
+ name: nul_string
+ value: 10
+ -
+ name: binary
attribute-sets:
-
@@ -498,6 +520,7 @@ attribute-sets:
-
name: param-type
type: u8
+ enum: var-attr-type
# TODO: fill in the attributes in between
@@ -592,6 +615,7 @@ attribute-sets:
-
name: fmsg-obj-value-type
type: u8
+ enum: var-attr-type
# TODO: fill in the attributes in between
diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
index 655d8d10fe24..9f98715a6512 100644
--- a/Documentation/netlink/specs/ethtool.yaml
+++ b/Documentation/netlink/specs/ethtool.yaml
@@ -89,13 +89,33 @@ definitions:
doc: Group of short_detected states
-
name: phy-upstream-type
- enum-name:
+ enum-name: phy-upstream
+ header: linux/ethtool.h
type: enum
+ name-prefix: phy-upstream
entries: [ mac, phy ]
-
name: tcp-data-split
type: enum
entries: [ unknown, disabled, enabled ]
+ -
+ name: hwtstamp-source
+ doc: Source of the hardware timestamp
+ enum-name: hwtstamp-source
+ name-prefix: hwtstamp-source-
+ type: enum
+ entries:
+ -
+ name: netdev
+ doc: |
+ Hardware timestamp comes from a MAC or a device
+ which has MAC and PHY integrated
+ value: 1
+ -
+ name: phylib
+ doc: |
+ Hardware timestamp comes from one PHY device
+ of the network topology
attribute-sets:
-
@@ -894,6 +914,13 @@ attribute-sets:
name: hwtstamp-provider
type: nest
nested-attributes: ts-hwtstamp-provider
+ -
+ name: hwtstamp-source
+ type: u32
+ enum: hwtstamp-source
+ -
+ name: hwtstamp-phyindex
+ type: u32
-
name: cable-result
attr-cnt-name: __ethtool-a-cable-result-cnt
@@ -1979,6 +2006,8 @@ operations:
- phc-index
- stats
- hwtstamp-provider
+ - hwtstamp-source
+ - hwtstamp-phyindex
dump: *tsinfo-get-op
-
name: cable-test-act
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index f5e0750ab71d..c0ef6d0d7786 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -743,6 +743,18 @@ operations:
- defer-hard-irqs
- gro-flush-timeout
- irq-suspend-timeout
+ -
+ name: bind-tx
+ doc: Bind dmabuf to netdev for TX
+ attribute-set: dmabuf
+ do:
+ request:
+ attributes:
+ - ifindex
+ - fd
+ reply:
+ attributes:
+ - id
kernel-family:
headers: [ "net/netdev_netlink.h"]
diff --git a/Documentation/netlink/specs/nl80211.yaml b/Documentation/netlink/specs/nl80211.yaml
index 1ec49c3562cd..3611b11a7d8f 100644
--- a/Documentation/netlink/specs/nl80211.yaml
+++ b/Documentation/netlink/specs/nl80211.yaml
@@ -204,71 +204,6 @@ definitions:
- sched-scan-random-mac-addr
- no-random-mac-addr
-
- name: ieee80211-mcs-info
- type: struct
- members:
- -
- name: rx-mask
- type: binary
- len: 10
- -
- name: rx-highest
- type: u16
- byte-order: little-endian
- -
- name: tx-params
- type: u8
- -
- name: reserved
- type: binary
- len: 3
- -
- name: ieee80211-vht-mcs-info
- type: struct
- members:
- -
- name: rx-mcs-map
- type: u16
- byte-order: little-endian
- -
- name: rx-highest
- type: u16
- byte-order: little-endian
- -
- name: tx-mcs-map
- type: u16
- byte-order: little-endian
- -
- name: tx-highest
- type: u16
- byte-order: little-endian
- -
- name: ieee80211-ht-cap
- type: struct
- members:
- -
- name: cap-info
- type: u16
- byte-order: little-endian
- -
- name: ampdu-params-info
- type: u8
- -
- name: mcs
- type: binary
- struct: ieee80211-mcs-info
- -
- name: extended-ht-cap-info
- type: u16
- byte-order: little-endian
- -
- name: tx-bf-cap-info
- type: u32
- byte-order: little-endian
- -
- name: antenna-selection-info
- type: u8
- -
name: channel-type
type: enum
entries:
@@ -761,7 +696,6 @@ attribute-sets:
-
name: ht-capability-mask
type: binary
- struct: ieee80211-ht-cap
-
name: noack-map
type: u16
@@ -1382,7 +1316,6 @@ attribute-sets:
-
name: ht-mcs-set
type: binary
- struct: ieee80211-mcs-info
-
name: ht-capa
type: u16
@@ -1395,7 +1328,6 @@ attribute-sets:
-
name: vht-mcs-set
type: binary
- struct: ieee80211-vht-mcs-info
-
name: vht-capa
type: u32
diff --git a/Documentation/netlink/specs/ovpn.yaml b/Documentation/netlink/specs/ovpn.yaml
new file mode 100644
index 000000000000..096c51f0c69a
--- /dev/null
+++ b/Documentation/netlink/specs/ovpn.yaml
@@ -0,0 +1,367 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+#
+# Copyright (c) 2024-2025, OpenVPN Inc.
+#
+
+name: ovpn
+
+protocol: genetlink
+
+doc: Netlink protocol to control OpenVPN network devices
+
+definitions:
+ -
+ type: const
+ name: nonce-tail-size
+ value: 8
+ -
+ type: enum
+ name: cipher-alg
+ entries: [ none, aes-gcm, chacha20-poly1305 ]
+ -
+ type: enum
+ name: del-peer-reason
+ entries:
+ - teardown
+ - userspace
+ - expired
+ - transport-error
+ - transport-disconnect
+ -
+ type: enum
+ name: key-slot
+ entries: [ primary, secondary ]
+
+attribute-sets:
+ -
+ name: peer
+ attributes:
+ -
+ name: id
+ type: u32
+ doc: >-
+ The unique ID of the peer in the device context. To be used to identify
+ peers during operations for a specific device
+ checks:
+ max: 0xFFFFFF
+ -
+ name: remote-ipv4
+ type: u32
+ doc: The remote IPv4 address of the peer
+ byte-order: big-endian
+ display-hint: ipv4
+ -
+ name: remote-ipv6
+ type: binary
+ doc: The remote IPv6 address of the peer
+ display-hint: ipv6
+ checks:
+ exact-len: 16
+ -
+ name: remote-ipv6-scope-id
+ type: u32
+ doc: The scope id of the remote IPv6 address of the peer (RFC2553)
+ -
+ name: remote-port
+ type: u16
+ doc: The remote port of the peer
+ byte-order: big-endian
+ checks:
+ min: 1
+ -
+ name: socket
+ type: u32
+ doc: The socket to be used to communicate with the peer
+ -
+ name: socket-netnsid
+ type: s32
+ doc: The ID of the netns the socket assigned to this peer lives in
+ -
+ name: vpn-ipv4
+ type: u32
+ doc: The IPv4 address assigned to the peer by the server
+ byte-order: big-endian
+ display-hint: ipv4
+ -
+ name: vpn-ipv6
+ type: binary
+ doc: The IPv6 address assigned to the peer by the server
+ display-hint: ipv6
+ checks:
+ exact-len: 16
+ -
+ name: local-ipv4
+ type: u32
+ doc: The local IPv4 to be used to send packets to the peer (UDP only)
+ byte-order: big-endian
+ display-hint: ipv4
+ -
+ name: local-ipv6
+ type: binary
+ doc: The local IPv6 to be used to send packets to the peer (UDP only)
+ display-hint: ipv6
+ checks:
+ exact-len: 16
+ -
+ name: local-port
+ type: u16
+ doc: The local port to be used to send packets to the peer (UDP only)
+ byte-order: big-endian
+ checks:
+ min: 1
+ -
+ name: keepalive-interval
+ type: u32
+ doc: >-
+ The number of seconds after which a keep alive message is sent to the
+ peer
+ -
+ name: keepalive-timeout
+ type: u32
+ doc: >-
+ The number of seconds from the last activity after which the peer is
+ assumed dead
+ -
+ name: del-reason
+ type: u32
+ doc: The reason why a peer was deleted
+ enum: del-peer-reason
+ -
+ name: vpn-rx-bytes
+ type: uint
+ doc: Number of bytes received over the tunnel
+ -
+ name: vpn-tx-bytes
+ type: uint
+ doc: Number of bytes transmitted over the tunnel
+ -
+ name: vpn-rx-packets
+ type: uint
+ doc: Number of packets received over the tunnel
+ -
+ name: vpn-tx-packets
+ type: uint
+ doc: Number of packets transmitted over the tunnel
+ -
+ name: link-rx-bytes
+ type: uint
+ doc: Number of bytes received at the transport level
+ -
+ name: link-tx-bytes
+ type: uint
+ doc: Number of bytes transmitted at the transport level
+ -
+ name: link-rx-packets
+ type: uint
+ doc: Number of packets received at the transport level
+ -
+ name: link-tx-packets
+ type: uint
+ doc: Number of packets transmitted at the transport level
+ -
+ name: keyconf
+ attributes:
+ -
+ name: peer-id
+ type: u32
+ doc: >-
+ The unique ID of the peer in the device context. To be used to
+ identify peers during key operations
+ checks:
+ max: 0xFFFFFF
+ -
+ name: slot
+ type: u32
+ doc: The slot where the key should be stored
+ enum: key-slot
+ -
+ name: key-id
+ doc: >-
+ The unique ID of the key in the peer context. Used to fetch the
+ correct key upon decryption
+ type: u32
+ checks:
+ max: 7
+ -
+ name: cipher-alg
+ type: u32
+ doc: The cipher to be used when communicating with the peer
+ enum: cipher-alg
+ -
+ name: encrypt-dir
+ type: nest
+ doc: Key material for encrypt direction
+ nested-attributes: keydir
+ -
+ name: decrypt-dir
+ type: nest
+ doc: Key material for decrypt direction
+ nested-attributes: keydir
+ -
+ name: keydir
+ attributes:
+ -
+ name: cipher-key
+ type: binary
+ doc: The actual key to be used by the cipher
+ checks:
+ max-len: 256
+ -
+ name: nonce-tail
+ type: binary
+ doc: >-
+ Random nonce to be concatenated to the packet ID, in order to
+ obtain the actual cipher IV
+ checks:
+ exact-len: nonce-tail-size
+ -
+ name: ovpn
+ attributes:
+ -
+ name: ifindex
+ type: u32
+ doc: Index of the ovpn interface to operate on
+ -
+ name: peer
+ type: nest
+ doc: >-
+ The peer object containing the attributed of interest for the specific
+ operation
+ nested-attributes: peer
+ -
+ name: keyconf
+ type: nest
+ doc: Peer specific cipher configuration
+ nested-attributes: keyconf
+
+operations:
+ list:
+ -
+ name: peer-new
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Add a remote peer
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - peer
+ -
+ name: peer-set
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: modify a remote peer
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - peer
+ -
+ name: peer-get
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Retrieve data about existing remote peers (or a specific one)
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - peer
+ reply:
+ attributes:
+ - peer
+ dump:
+ request:
+ attributes:
+ - ifindex
+ reply:
+ attributes:
+ - peer
+ -
+ name: peer-del
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Delete existing remote peer
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - peer
+ -
+ name: peer-del-ntf
+ doc: Notification about a peer being deleted
+ notify: peer-get
+ mcgrp: peers
+
+ -
+ name: key-new
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Add a cipher key for a specific peer
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - keyconf
+ -
+ name: key-get
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Retrieve non-sensitive data about peer key and cipher
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - keyconf
+ reply:
+ attributes:
+ - keyconf
+ -
+ name: key-swap
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Swap primary and secondary session keys for a specific peer
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - keyconf
+ -
+ name: key-swap-ntf
+ notify: key-get
+ doc: >-
+ Notification about key having exhausted its IV space and requiring
+ renegotiation
+ mcgrp: peers
+ -
+ name: key-del
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Delete cipher key for a specific peer
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - keyconf
+
+mcast-groups:
+ list:
+ -
+ name: peers
diff --git a/Documentation/netlink/specs/ovs_datapath.yaml b/Documentation/netlink/specs/ovs_datapath.yaml
index edc8c95ca6f5..df6a8f94975e 100644
--- a/Documentation/netlink/specs/ovs_datapath.yaml
+++ b/Documentation/netlink/specs/ovs_datapath.yaml
@@ -35,8 +35,7 @@ definitions:
name: dispatch-upcall-per-cpu
doc: Allow per-cpu dispatch of upcalls
-
- name: datapath-stats
- enum-name: ovs-dp-stats
+ name: ovs-dp-stats
type: struct
members:
-
@@ -52,8 +51,7 @@ definitions:
name: n-flows
type: u64
-
- name: megaflow-stats
- enum-name: ovs-dp-megaflow-stats
+ name: ovs-dp-megaflow-stats
type: struct
members:
-
@@ -88,11 +86,11 @@ attribute-sets:
-
name: stats
type: binary
- struct: datapath-stats
+ struct: ovs-dp-stats
-
name: megaflow-stats
type: binary
- struct: megaflow-stats
+ struct: ovs-dp-megaflow-stats
-
name: user-features
type: u32
diff --git a/Documentation/netlink/specs/ovs_vport.yaml b/Documentation/netlink/specs/ovs_vport.yaml
index 86ba9ac2a521..306da6bb842d 100644
--- a/Documentation/netlink/specs/ovs_vport.yaml
+++ b/Documentation/netlink/specs/ovs_vport.yaml
@@ -23,9 +23,8 @@ definitions:
name-prefix: ovs-vport-type-
entries: [ unspec, netdev, internal, gre, vxlan, geneve ]
-
- name: vport-stats
+ name: ovs-vport-stats
type: struct
- enum-name: ovs-vport-stats
members:
-
name: rx-packets
@@ -106,7 +105,7 @@ attribute-sets:
-
name: stats
type: binary
- struct: vport-stats
+ struct: ovs-vport-stats
-
name: pad
type: unused
@@ -123,12 +122,12 @@ attribute-sets:
operations:
name-prefix: ovs-vport-cmd-
+ fixed-header: ovs-header
list:
-
name: new
doc: Create a new OVS vport
attribute-set: vport
- fixed-header: ovs-header
do:
request:
attributes:
@@ -141,7 +140,6 @@ operations:
name: del
doc: Delete existing OVS vport from a data path
attribute-set: vport
- fixed-header: ovs-header
do:
request:
attributes:
@@ -152,7 +150,6 @@ operations:
name: get
doc: Get / dump OVS vport configuration and state
attribute-set: vport
- fixed-header: ovs-header
do: &vport-get-op
request:
attributes:
diff --git a/Documentation/netlink/specs/rt_addr.yaml b/Documentation/netlink/specs/rt-addr.yaml
index df6b23f06a22..4f86aa1075da 100644
--- a/Documentation/netlink/specs/rt_addr.yaml
+++ b/Documentation/netlink/specs/rt-addr.yaml
@@ -2,6 +2,7 @@
name: rt-addr
protocol: netlink-raw
+uapi-header: linux/rtnetlink.h
protonum: 0
doc:
@@ -49,6 +50,8 @@ definitions:
-
name: ifa-flags
type: flags
+ name-prefix: ifa-f-
+ enum-name:
entries:
-
name: secondary
@@ -124,6 +127,7 @@ attribute-sets:
operations:
fixed-header: ifaddrmsg
enum-model: directional
+ name-prefix: rtm-
list:
-
name: newaddr
@@ -133,11 +137,6 @@ operations:
request:
value: 20
attributes: &ifaddr-all
- - ifa-family
- - ifa-flags
- - ifa-prefixlen
- - ifa-scope
- - ifa-index
- address
- label
- local
@@ -150,11 +149,6 @@ operations:
request:
value: 21
attributes:
- - ifa-family
- - ifa-flags
- - ifa-prefixlen
- - ifa-scope
- - ifa-index
- address
- local
-
@@ -164,8 +158,7 @@ operations:
dump:
request:
value: 22
- attributes:
- - ifa-index
+ attributes: []
reply:
value: 20
attributes: *ifaddr-all
@@ -177,9 +170,7 @@ operations:
do:
request:
value: 58
- attributes:
- - ifa-family
- - ifa-index
+ attributes: []
reply:
value: 58
attributes: &mcaddr-attrs
@@ -188,8 +179,7 @@ operations:
dump:
request:
value: 58
- attributes:
- - ifa-family
+ attributes: []
reply:
value: 58
attributes: *mcaddr-attrs
diff --git a/Documentation/netlink/specs/rt_link.yaml b/Documentation/netlink/specs/rt-link.yaml
index 31238455f8e9..5ec3d35b7a38 100644
--- a/Documentation/netlink/specs/rt_link.yaml
+++ b/Documentation/netlink/specs/rt-link.yaml
@@ -2,6 +2,7 @@
name: rt-link
protocol: netlink-raw
+uapi-header: linux/rtnetlink.h
protonum: 0
doc:
@@ -11,6 +12,9 @@ definitions:
-
name: ifinfo-flags
type: flags
+ header: linux/if.h
+ enum-name: net-device-flags
+ name-prefix: iff-
entries:
-
name: up
@@ -53,6 +57,7 @@ definitions:
-
name: vlan-protocols
type: enum
+ enum-name:
entries:
-
name: 8021q
@@ -299,421 +304,297 @@ definitions:
type: u8
-
name: ipv4-devconf
- type: struct
- members:
+ enum-name:
+ type: enum
+ entries:
-
name: forwarding
- type: u32
-
name: mc-forwarding
- type: u32
-
name: proxy-arp
- type: u32
-
name: accept-redirects
- type: u32
-
name: secure-redirects
- type: u32
-
name: send-redirects
- type: u32
-
name: shared-media
- type: u32
-
name: rp-filter
- type: u32
-
name: accept-source-route
- type: u32
-
name: bootp-relay
- type: u32
-
name: log-martians
- type: u32
-
name: tag
- type: u32
-
name: arpfilter
- type: u32
-
name: medium-id
- type: u32
-
name: noxfrm
- type: u32
-
name: nopolicy
- type: u32
-
name: force-igmp-version
- type: u32
-
name: arp-announce
- type: u32
-
name: arp-ignore
- type: u32
-
name: promote-secondaries
- type: u32
-
name: arp-accept
- type: u32
-
name: arp-notify
- type: u32
-
name: accept-local
- type: u32
-
name: src-vmark
- type: u32
-
name: proxy-arp-pvlan
- type: u32
-
name: route-localnet
- type: u32
-
name: igmpv2-unsolicited-report-interval
- type: u32
-
name: igmpv3-unsolicited-report-interval
- type: u32
-
name: ignore-routes-with-linkdown
- type: u32
-
name: drop-unicast-in-l2-multicast
- type: u32
-
name: drop-gratuitous-arp
- type: u32
-
name: bc-forwarding
- type: u32
-
name: arp-evict-nocarrier
- type: u32
-
name: ipv6-devconf
- type: struct
- members:
+ enum-name:
+ type: enum
+ entries:
-
name: forwarding
- type: u32
-
name: hoplimit
- type: u32
-
name: mtu6
- type: u32
-
name: accept-ra
- type: u32
-
name: accept-redirects
- type: u32
-
name: autoconf
- type: u32
-
name: dad-transmits
- type: u32
-
name: rtr-solicits
- type: u32
-
name: rtr-solicit-interval
- type: u32
-
name: rtr-solicit-delay
- type: u32
-
name: use-tempaddr
- type: u32
-
name: temp-valid-lft
- type: u32
-
name: temp-prefered-lft
- type: u32
-
name: regen-max-retry
- type: u32
-
name: max-desync-factor
- type: u32
-
name: max-addresses
- type: u32
-
name: force-mld-version
- type: u32
-
name: accept-ra-defrtr
- type: u32
-
name: accept-ra-pinfo
- type: u32
-
name: accept-ra-rtr-pref
- type: u32
-
name: rtr-probe-interval
- type: u32
-
name: accept-ra-rt-info-max-plen
- type: u32
-
name: proxy-ndp
- type: u32
-
name: optimistic-dad
- type: u32
-
name: accept-source-route
- type: u32
-
name: mc-forwarding
- type: u32
-
name: disable-ipv6
- type: u32
-
name: accept-dad
- type: u32
-
name: force-tllao
- type: u32
-
name: ndisc-notify
- type: u32
-
name: mldv1-unsolicited-report-interval
- type: u32
-
name: mldv2-unsolicited-report-interval
- type: u32
-
name: suppress-frag-ndisc
- type: u32
-
name: accept-ra-from-local
- type: u32
-
name: use-optimistic
- type: u32
-
name: accept-ra-mtu
- type: u32
-
name: stable-secret
- type: u32
-
name: use-oif-addrs-only
- type: u32
-
name: accept-ra-min-hop-limit
- type: u32
-
name: ignore-routes-with-linkdown
- type: u32
-
name: drop-unicast-in-l2-multicast
- type: u32
-
name: drop-unsolicited-na
- type: u32
-
name: keep-addr-on-down
- type: u32
-
name: rtr-solicit-max-interval
- type: u32
-
name: seg6-enabled
- type: u32
-
name: seg6-require-hmac
- type: u32
-
name: enhanced-dad
- type: u32
-
name: addr-gen-mode
- type: u8
-
name: disable-policy
- type: u32
-
name: accept-ra-rt-info-min-plen
- type: u32
-
name: ndisc-tclass
- type: u32
-
name: rpl-seg-enabled
- type: u32
-
name: ra-defrtr-metric
- type: u32
-
name: ioam6-enabled
- type: u32
-
name: ioam6-id
- type: u32
-
name: ioam6-id-wide
- type: u32
-
name: ndisc-evict-nocarrier
- type: u32
-
name: accept-untracked-na
- type: u32
-
name: ifla-icmp6-stats
- type: struct
- members:
+ enum-name:
+ type: enum
+ entries:
+ -
+ name: num
-
name: inmsgs
- type: u64
-
name: inerrors
- type: u64
-
name: outmsgs
- type: u64
-
name: outerrors
- type: u64
-
name: csumerrors
- type: u64
-
name: ratelimithost
- type: u64
-
name: ifla-inet6-stats
- type: struct
- members:
+ enum-name:
+ type: enum
+ entries:
+ -
+ name: num
-
name: inpkts
- type: u64
-
name: inoctets
- type: u64
-
name: indelivers
- type: u64
-
name: outforwdatagrams
- type: u64
-
name: outpkts
- type: u64
-
name: outoctets
- type: u64
-
name: inhdrerrors
- type: u64
-
name: intoobigerrors
- type: u64
-
name: innoroutes
- type: u64
-
name: inaddrerrors
- type: u64
-
name: inunknownprotos
- type: u64
-
name: intruncatedpkts
- type: u64
-
name: indiscards
- type: u64
-
name: outdiscards
- type: u64
-
name: outnoroutes
- type: u64
-
name: reasmtimeout
- type: u64
-
name: reasmreqds
- type: u64
-
name: reasmoks
- type: u64
-
name: reasmfails
- type: u64
-
name: fragoks
- type: u64
-
name: fragfails
- type: u64
-
name: fragcreates
- type: u64
-
name: inmcastpkts
- type: u64
-
name: outmcastpkts
- type: u64
-
name: inbcastpkts
- type: u64
-
name: outbcastpkts
- type: u64
-
name: inmcastoctets
- type: u64
-
name: outmcastoctets
- type: u64
-
name: inbcastoctets
- type: u64
-
name: outbcastoctets
- type: u64
-
name: csumerrors
- type: u64
-
name: noectpkts
- type: u64
-
name: ect1-pkts
- type: u64
-
name: ect0-pkts
- type: u64
-
name: cepkts
- type: u64
-
name: reasm-overlaps
- type: u64
- name: br-boolopt-multi
type: struct
+ header: linux/if_bridge.h
members:
-
name: optval
@@ -754,6 +635,7 @@ definitions:
-
name: vlan-flags
type: flags
+ enum-name:
entries:
- reorder-hdr
- gvrp
@@ -840,6 +722,7 @@ definitions:
-
name: ifla-vf-link-state-enum
type: enum
+ enum-name:
entries:
- auto
- enable
@@ -906,6 +789,7 @@ definitions:
-
name: rtext-filter
type: flags
+ enum-name:
entries:
- vf
- brvlan
@@ -918,6 +802,7 @@ definitions:
-
name: netkit-policy
type: enum
+ enum-name:
entries:
-
name: forward
@@ -928,6 +813,7 @@ definitions:
-
name: netkit-mode
type: enum
+ enum-name: netkit-mode
entries:
- name: l2
- name: l3
@@ -935,9 +821,18 @@ definitions:
-
name: netkit-scrub
type: enum
+ enum-name:
entries:
- name: none
- name: default
+ -
+ name: ovpn-mode
+ enum-name: ovpn-mode
+ name-prefix: ovpn-mode
+ type: enum
+ entries:
+ - p2p
+ - mp
attribute-sets:
-
@@ -1113,11 +1008,10 @@ attribute-sets:
-
name: prop-list
type: nest
- nested-attributes: link-attrs
+ nested-attributes: prop-list-link-attrs
-
name: alt-ifname
type: string
- multi-attr: true
-
name: perm-address
type: binary
@@ -1164,25 +1058,35 @@ attribute-sets:
name: netns-immutable
type: u8
-
+ name: prop-list-link-attrs
+ subset-of: link-attrs
+ attributes:
+ -
+ name: alt-ifname
+ multi-attr: true
+ -
name: af-spec-attrs
+ name-prefix: af-
+ attr-max-name: af-max
attributes:
-
- name: "inet"
+ name: inet
type: nest
value: 2
nested-attributes: ifla-attrs
-
- name: "inet6"
+ name: inet6
type: nest
value: 10
nested-attributes: ifla6-attrs
-
- name: "mctp"
+ name: mctp
type: nest
value: 45
nested-attributes: mctp-attrs
-
name: vfinfo-list-attrs
+ name-prefix: ifla-vf-
attributes:
-
name: info
@@ -1191,6 +1095,7 @@ attribute-sets:
multi-attr: true
-
name: vfinfo-attrs
+ name-prefix: ifla-vf-
attributes:
-
name: mac
@@ -1245,6 +1150,7 @@ attribute-sets:
type: binary
-
name: vf-stats-attrs
+ name-prefix: ifla-vf-stats-
attributes:
-
name: rx-packets
@@ -1276,6 +1182,8 @@ attribute-sets:
type: u64
-
name: vf-vlan-attrs
+ name-prefix: ifla-vf-vlan-
+ attr-max-name: ifla-vf-vlan-info-max
attributes:
-
name: info
@@ -1284,12 +1192,15 @@ attribute-sets:
multi-attr: true
-
name: vf-ports-attrs
+ name-prefix: ifla-
attributes: []
-
name: port-self-attrs
+ name-prefix: ifla-
attributes: []
-
name: linkinfo-attrs
+ name-prefix: ifla-info-
attributes:
-
name: kind
@@ -1414,6 +1325,8 @@ attribute-sets:
type: indexed-array
sub-type: binary
display-hint: ipv6
+ checks:
+ exact-len: 16
-
name: coupled-control
type: u8
@@ -1585,7 +1498,7 @@ attribute-sets:
name: nf-call-iptables
type: u8
-
- name: nf-call-ip6-tables
+ name: nf-call-ip6tables
type: u8
-
name: nf-call-arptables
@@ -1843,6 +1756,7 @@ attribute-sets:
-
name: linkinfo-vti-attrs
name-prefix: ifla-vti-
+ header: linux/if_tunnel.h
attributes:
-
name: link
@@ -2077,7 +1991,7 @@ attribute-sets:
name: id
type: u16
-
- name: flag
+ name: flags
type: binary
struct: ifla-vlan-flags
-
@@ -2095,7 +2009,7 @@ attribute-sets:
byte-order: big-endian
-
name: ifla-vlan-qos
- name-prefix: ifla-vlan-qos
+ name-prefix: ifla-vlan-qos-
attributes:
-
name: mapping
@@ -2111,6 +2025,7 @@ attribute-sets:
type: u32
-
name: xdp-attrs
+ name-prefix: ifla-xdp-
attributes:
-
name: fd
@@ -2138,13 +2053,16 @@ attribute-sets:
type: s32
-
name: ifla-attrs
+ name-prefix: ifla-inet-
attributes:
-
name: conf
type: binary
- struct: ipv4-devconf
+ sub-type: u32
+ doc: u32 indexed by ipv4-devconf - 1 on output, on input it's a nest
-
name: ifla6-attrs
+ name-prefix: ifla-inet6-
attributes:
-
name: flags
@@ -2152,11 +2070,12 @@ attribute-sets:
-
name: conf
type: binary
- struct: ipv6-devconf
+ sub-type: u32
+ doc: u32 indexed by ipv6-devconf - 1 on output, on input it's a nest
-
name: stats
type: binary
- struct: ifla-inet6-stats
+ sub-type: u64
-
name: mcast
type: binary
@@ -2165,9 +2084,9 @@ attribute-sets:
type: binary
struct: ifla-cacheinfo
-
- name: icmp6-stats
+ name: icmp6stats
type: binary
- struct: ifla-icmp6-stats
+ sub-type: u64
-
name: token
type: binary
@@ -2179,9 +2098,10 @@ attribute-sets:
type: u32
-
name: mctp-attrs
+ name-prefix: ifla-mctp-
attributes:
-
- name: mctp-net
+ name: net
type: u32
-
name: phys-binding
@@ -2209,6 +2129,7 @@ attribute-sets:
type: binary
-
name: link-offload-xstats
+ name-prefix: ifla-offload-xstats-
attributes:
-
name: cpu-hit
@@ -2223,6 +2144,7 @@ attribute-sets:
type: binary
-
name: hw-s-info-one
+ name-prefix: ifla-offload-xstats-hw-s-info-
attributes:
-
name: request
@@ -2232,6 +2154,8 @@ attribute-sets:
type: u8
-
name: link-dpll-pin-attrs
+ name-prefix: dpll-a-
+ header: linux/dpll.h
attributes:
-
name: id
@@ -2272,6 +2196,14 @@ attribute-sets:
-
name: tailroom
type: u16
+ -
+ name: linkinfo-ovpn-attrs
+ name-prefix: ifla-ovpn-
+ attributes:
+ -
+ name: mode
+ type: u8
+ enum: ovpn-mode
sub-messages:
-
@@ -2322,6 +2254,9 @@ sub-messages:
-
value: netkit
attribute-set: linkinfo-netkit-attrs
+ -
+ value: ovpn
+ attribute-set: linkinfo-ovpn-attrs
-
name: linkinfo-member-data-msg
formats:
@@ -2334,6 +2269,7 @@ sub-messages:
operations:
enum-model: directional
+ name-prefix: rtm-
list:
-
name: newlink
@@ -2344,7 +2280,6 @@ operations:
request:
value: 16
attributes: &link-new-attrs
- - ifi-index
- ifname
- net-ns-pid
- net-ns-fd
@@ -2360,7 +2295,6 @@ operations:
- txqlen
- operstate
- linkmode
- - group
- gso-max-size
- gso-max-segs
- gro-max-size
@@ -2368,6 +2302,12 @@ operations:
- gro-ipv4-max-size
- af-spec
-
+ name: newlink-ntf
+ doc: Notify that a link has been created
+ value: 16
+ notify: getlink
+ fixed-header: ifinfomsg
+ -
name: dellink
doc: Delete an existing link.
attribute-set: link-attrs
@@ -2376,7 +2316,6 @@ operations:
request:
value: 17
attributes:
- - ifi-index
- ifname
-
name: getlink
@@ -2387,7 +2326,6 @@ operations:
request:
value: 18
attributes:
- - ifi-index
- ifname
- alt-ifname
- ext-mask
@@ -2395,11 +2333,6 @@ operations:
reply:
value: 16
attributes: &link-all-attrs
- - ifi-family
- - ifi-type
- - ifi-index
- - ifi-flags
- - ifi-change
- address
- broadcast
- ifname
@@ -2445,7 +2378,6 @@ operations:
- xdp
- event
- new-netnsid
- - if-netnsid
- target-netnsid
- carrier-up-count
- carrier-down-count
@@ -2453,7 +2385,6 @@ operations:
- min-mtu
- max-mtu
- prop-list
- - alt-ifname
- perm-address
- proto-down-reason
- parent-dev-name
@@ -2493,14 +2424,9 @@ operations:
do:
request:
value: 94
- attributes:
- - ifindex
reply:
value: 92
attributes: &link-stats-attrs
- - family
- - ifindex
- - filter-mask
- link-64
- link-xstats
- link-xstats-slave
diff --git a/Documentation/netlink/specs/rt_neigh.yaml b/Documentation/netlink/specs/rt-neigh.yaml
index e670b6dc07be..e9cba164e3d1 100644
--- a/Documentation/netlink/specs/rt_neigh.yaml
+++ b/Documentation/netlink/specs/rt-neigh.yaml
@@ -2,6 +2,7 @@
name: rt-neigh
protocol: netlink-raw
+uapi-header: linux/rtnetlink.h
protonum: 0
doc:
@@ -13,25 +14,25 @@ definitions:
type: struct
members:
-
- name: family
+ name: ndm-family
type: u8
-
- name: pad
+ name: ndm-pad
type: pad
len: 3
-
- name: ifindex
+ name: ndm-ifindex
type: s32
-
- name: state
+ name: ndm-state
type: u16
enum: nud-state
-
- name: flags
+ name: ndm-flags
type: u8
enum: ntf-flags
-
- name: type
+ name: ndm-type
type: u8
enum: rtm-type
-
@@ -48,6 +49,7 @@ definitions:
-
name: nud-state
type: flags
+ enum-name:
entries:
- incomplete
- reachable
@@ -60,6 +62,7 @@ definitions:
-
name: ntf-flags
type: flags
+ enum-name:
entries:
- use
- self
@@ -72,12 +75,14 @@ definitions:
-
name: ntf-ext-flags
type: flags
+ enum-name:
entries:
- managed
- locked
-
name: rtm-type
type: enum
+ enum-name:
entries:
- unspec
- unicast
@@ -179,6 +184,7 @@ definitions:
attribute-sets:
-
name: neighbour-attrs
+ name-prefix: nda-
attributes:
-
name: unspec
@@ -189,7 +195,7 @@ attribute-sets:
type: binary
display-hint: ipv4
-
- name: lladr
+ name: lladdr
type: binary
display-hint: mac
-
@@ -241,6 +247,7 @@ attribute-sets:
type: u8
-
name: ndt-attrs
+ name-prefix: ndta-
attributes:
-
name: name
@@ -274,6 +281,7 @@ attribute-sets:
type: pad
-
name: ndtpa-attrs
+ name-prefix: ndtpa-
attributes:
-
name: ifindex
@@ -335,6 +343,7 @@ attribute-sets:
operations:
enum-model: directional
+ name-prefix: rtm-
list:
-
name: newneigh
@@ -372,7 +381,7 @@ operations:
name: delneigh-ntf
doc: Notify a neighbour deletion
value: 29
- notify: delneigh
+ notify: getneigh
fixed-header: ndmsg
-
name: getneigh
@@ -393,6 +402,7 @@ operations:
- ifindex
- master
reply:
+ value: 28
attributes: *neighbour-all
-
name: newneigh-ntf
diff --git a/Documentation/netlink/specs/rt_route.yaml b/Documentation/netlink/specs/rt-route.yaml
index 292469c7d4b9..800f3a823d47 100644
--- a/Documentation/netlink/specs/rt_route.yaml
+++ b/Documentation/netlink/specs/rt-route.yaml
@@ -2,6 +2,7 @@
name: rt-route
protocol: netlink-raw
+uapi-header: linux/rtnetlink.h
protonum: 0
doc:
@@ -11,6 +12,7 @@ definitions:
-
name: rtm-type
name-prefix: rtn-
+ enum-name:
type: enum
entries:
- unspec
@@ -245,21 +247,19 @@ attribute-sets:
operations:
enum-model: directional
+ fixed-header: rtmsg
+ name-prefix: rtm-
list:
-
name: getroute
doc: Dump route information.
attribute-set: route-attrs
- fixed-header: rtmsg
do:
request:
value: 26
attributes:
- - rtm-family
- src
- - rtm-src-len
- dst
- - rtm-dst-len
- iif
- oif
- ip-proto
@@ -271,15 +271,6 @@ operations:
reply:
value: 24
attributes: &all-route-attrs
- - rtm-family
- - rtm-dst-len
- - rtm-src-len
- - rtm-tos
- - rtm-table
- - rtm-protocol
- - rtm-scope
- - rtm-type
- - rtm-flags
- dst
- src
- iif
@@ -311,8 +302,7 @@ operations:
dump:
request:
value: 26
- attributes:
- - rtm-family
+ attributes: []
reply:
value: 24
attributes: *all-route-attrs
@@ -320,7 +310,6 @@ operations:
name: newroute
doc: Create a new route
attribute-set: route-attrs
- fixed-header: rtmsg
do:
request:
value: 24
@@ -329,7 +318,6 @@ operations:
name: delroute
doc: Delete an existing route
attribute-set: route-attrs
- fixed-header: rtmsg
do:
request:
value: 25
diff --git a/Documentation/netlink/specs/rt_rule.yaml b/Documentation/netlink/specs/rt-rule.yaml
index de0938d36541..003707ca4a3e 100644
--- a/Documentation/netlink/specs/rt_rule.yaml
+++ b/Documentation/netlink/specs/rt-rule.yaml
@@ -2,6 +2,7 @@
name: rt-rule
protocol: netlink-raw
+uapi-header: linux/fib_rules.h
protonum: 0
doc:
@@ -56,6 +57,7 @@ definitions:
-
name: fr-act
type: enum
+ enum-name:
entries:
- unspec
- to-tbl
@@ -90,6 +92,7 @@ definitions:
attribute-sets:
-
name: fib-rule-attrs
+ name-prefix: fra-
attributes:
-
name: dst
@@ -198,6 +201,7 @@ attribute-sets:
operations:
enum-model: directional
fixed-header: fib-rule-hdr
+ name-prefix: rtm-
list:
-
name: newrule
@@ -234,7 +238,7 @@ operations:
name: newrule-ntf
doc: Notify a rule creation
value: 32
- notify: newrule
+ notify: getrule
-
name: delrule
doc: Remove an existing FIB rule
@@ -247,7 +251,7 @@ operations:
name: delrule-ntf
doc: Notify a rule deletion
value: 33
- notify: delrule
+ notify: getrule
-
name: getrule
doc: Dump all FIB rules
diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml
index aacccea5dfe4..cb7ea7d62e56 100644
--- a/Documentation/netlink/specs/tc.yaml
+++ b/Documentation/netlink/specs/tc.yaml
@@ -2,6 +2,7 @@
name: tc
protocol: netlink-raw
+uapi-header: linux/pkt_cls.h
protonum: 0
doc:
@@ -12,6 +13,7 @@ definitions:
-
name: tcmsg
type: struct
+ header: linux/rtnetlink.h
members:
-
name: family
@@ -33,7 +35,8 @@ definitions:
name: info
type: u32
-
- name: tc-cls-flags
+ name: cls-flags
+ enum-name:
type: flags
entries:
- skip-hw
@@ -42,7 +45,9 @@ definitions:
- not-in-nw
- verbose
-
- name: tc-flower-key-ctrl-flags
+ name: flower-key-ctrl-flags
+ name-prefix: tca-flower-key-flags-
+ enum-name:
type: flags
entries:
- frag
@@ -630,6 +635,7 @@ definitions:
-
name: tc-ratespec
type: struct
+ header: linux/pkt_sched.h
members:
-
name: cell-log
@@ -1186,7 +1192,7 @@ definitions:
name: firstuse
type: u64
-
- name: tc-gen
+ name: tc-gact
type: struct
members:
-
@@ -1377,7 +1383,8 @@ definitions:
type: s32
attribute-sets:
-
- name: tc-attrs
+ name: attrs
+ name-prefix: tca-
attributes:
-
name: kind
@@ -1385,7 +1392,7 @@ attribute-sets:
-
name: options
type: sub-message
- sub-message: tc-options-msg
+ sub-message: options-msg
selector: kind
-
name: stats
@@ -1436,7 +1443,8 @@ attribute-sets:
name: ext-warn-msg
type: string
-
- name: tc-act-attrs
+ name: act-attrs
+ name-prefix: tca-act-
attributes:
-
name: kind
@@ -1444,7 +1452,7 @@ attribute-sets:
-
name: options
type: sub-message
- sub-message: tc-act-options-msg
+ sub-message: act-options-msg
selector: kind
-
name: index
@@ -1452,7 +1460,7 @@ attribute-sets:
-
name: stats
type: nest
- nested-attributes: tc-act-stats-attrs
+ nested-attributes: tca-stats-attrs
-
name: pad
type: pad
@@ -1472,39 +1480,9 @@ attribute-sets:
name: in-hw-count
type: u32
-
- name: tc-act-stats-attrs
- attributes:
- -
- name: basic
- type: binary
- struct: gnet-stats-basic
- -
- name: rate-est
- type: binary
- struct: gnet-stats-rate-est
- -
- name: queue
- type: binary
- struct: gnet-stats-queue
- -
- name: app
- type: binary
- -
- name: rate-est64
- type: binary
- struct: gnet-stats-rate-est64
- -
- name: pad
- type: pad
- -
- name: basic-hw
- type: binary
- struct: gnet-stats-basic
- -
- name: pkt64
- type: u64
- -
- name: tc-act-bpf-attrs
+ name: act-bpf-attrs
+ name-prefix: tca-act-bpf-
+ header: linux/tc_act/tc_bpf.h
attributes:
-
name: tm
@@ -1535,7 +1513,9 @@ attribute-sets:
name: id
type: binary
-
- name: tc-act-connmark-attrs
+ name: act-connmark-attrs
+ name-prefix: tca-connmark-
+ header: linux/tc_act/tc_connmark.h
attributes:
-
name: parms
@@ -1548,7 +1528,9 @@ attribute-sets:
name: pad
type: pad
-
- name: tc-act-csum-attrs
+ name: act-csum-attrs
+ name-prefix: tca-csum-
+ header: linux/tc_act/tc_csum.h
attributes:
-
name: parms
@@ -1561,7 +1543,9 @@ attribute-sets:
name: pad
type: pad
-
- name: tc-act-ct-attrs
+ name: act-ct-attrs
+ name-prefix: tca-ct-
+ header: linux/tc_act/tc_ct.h
attributes:
-
name: parms
@@ -1623,7 +1607,9 @@ attribute-sets:
name: helper-proto
type: u8
-
- name: tc-act-ctinfo-attrs
+ name: act-ctinfo-attrs
+ name-prefix: tca-ctinfo-
+ header: linux/tc_act/tc_ctinfo.h
attributes:
-
name: pad
@@ -1657,7 +1643,9 @@ attribute-sets:
name: stats-cpmark-set
type: u64
-
- name: tc-act-gate-attrs
+ name: act-gate-attrs
+ name-prefix: tca-gate-
+ header: linux/tc_act/tc_gate.h
attributes:
-
name: tm
@@ -1691,7 +1679,9 @@ attribute-sets:
name: clockid
type: s32
-
- name: tc-act-ife-attrs
+ name: act-ife-attrs
+ name-prefix: tca-ife-
+ header: linux/tc_act/tc_ife.h
attributes:
-
name: parms
@@ -1716,7 +1706,9 @@ attribute-sets:
name: pad
type: pad
-
- name: tc-act-mirred-attrs
+ name: act-mirred-attrs
+ name-prefix: tca-mirred-
+ header: linux/tc_act/tc_mirred.h
attributes:
-
name: tm
@@ -1732,7 +1724,9 @@ attribute-sets:
name: blockid
type: binary
-
- name: tc-act-mpls-attrs
+ name: act-mpls-attrs
+ name-prefix: tca-mpls-
+ header: linux/tc_act/tc_mpls.h
attributes:
-
name: tm
@@ -1762,7 +1756,9 @@ attribute-sets:
name: bos
type: u8
-
- name: tc-act-nat-attrs
+ name: act-nat-attrs
+ name-prefix: tca-nat-
+ header: linux/tc_act/tc_nat.h
attributes:
-
name: parms
@@ -1775,7 +1771,9 @@ attribute-sets:
name: pad
type: pad
-
- name: tc-act-pedit-attrs
+ name: act-pedit-attrs
+ name-prefix: tca-pedit-
+ header: linux/tc_act/tc_pedit.h
attributes:
-
name: tm
@@ -1798,45 +1796,9 @@ attribute-sets:
name: key-ex
type: binary
-
- name: tc-act-police-attrs
- attributes:
- -
- name: tbf
- type: binary
- struct: tc-police
- -
- name: rate
- type: binary # TODO
- -
- name: peakrate
- type: binary # TODO
- -
- name: avrate
- type: u32
- -
- name: result
- type: u32
- -
- name: tm
- type: binary
- struct: tcf-t
- -
- name: pad
- type: pad
- -
- name: rate64
- type: u64
- -
- name: peakrate64
- type: u64
- -
- name: pktrate64
- type: u64
- -
- name: pktburst64
- type: u64
- -
- name: tc-act-simple-attrs
+ name: act-simple-attrs
+ name-prefix: tca-def-
+ header: linux/tc_act/tc_defact.h
attributes:
-
name: tm
@@ -1852,7 +1814,9 @@ attribute-sets:
name: pad
type: pad
-
- name: tc-act-skbedit-attrs
+ name: act-skbedit-attrs
+ name-prefix: tca-skbedit-
+ header: linux/tc_act/tc_skbedit.h
attributes:
-
name: tm
@@ -1886,7 +1850,9 @@ attribute-sets:
name: queue-mapping-max
type: u16
-
- name: tc-act-skbmod-attrs
+ name: act-skbmod-attrs
+ name-prefix: tca-skbmod-
+ header: linux/tc_act/tc_skbmod.h
attributes:
-
name: tm
@@ -1908,7 +1874,9 @@ attribute-sets:
name: pad
type: pad
-
- name: tc-act-tunnel-key-attrs
+ name: act-tunnel-key-attrs
+ name-prefix: tca-tunnel-key-
+ header: linux/tc_act/tc_tunnel_key.h
attributes:
-
name: tm
@@ -1958,7 +1926,9 @@ attribute-sets:
name: no-frag
type: flag
-
- name: tc-act-vlan-attrs
+ name: act-vlan-attrs
+ name-prefix: tca-vlan-
+ header: linux/tc_act/tc_vlan.h
attributes:
-
name: tm
@@ -1987,7 +1957,8 @@ attribute-sets:
name: push-eth-src
type: binary
-
- name: tc-basic-attrs
+ name: basic-attrs
+ name-prefix: tca-basic-
attributes:
-
name: classid
@@ -1995,16 +1966,16 @@ attribute-sets:
-
name: ematches
type: nest
- nested-attributes: tc-ematch-attrs
+ nested-attributes: ematch-attrs
-
name: act
type: indexed-array
sub-type: nest
- nested-attributes: tc-act-attrs
+ nested-attributes: act-attrs
-
name: police
type: nest
- nested-attributes: tc-police-attrs
+ nested-attributes: police-attrs
-
name: pcnt
type: binary
@@ -2013,16 +1984,18 @@ attribute-sets:
name: pad
type: pad
-
- name: tc-bpf-attrs
+ name: bpf-attrs
+ name-prefix: tca-bpf-
attributes:
-
name: act
- type: nest
- nested-attributes: tc-act-attrs
+ type: indexed-array
+ sub-type: nest
+ nested-attributes: act-attrs
-
name: police
type: nest
- nested-attributes: tc-police-attrs
+ nested-attributes: police-attrs
-
name: classid
type: u32
@@ -2051,7 +2024,8 @@ attribute-sets:
name: id
type: u32
-
- name: tc-cake-attrs
+ name: cake-attrs
+ name-prefix: tca-cake-
attributes:
-
name: pad
@@ -2108,7 +2082,8 @@ attribute-sets:
name: fwmark
type: u32
-
- name: tc-cake-stats-attrs
+ name: cake-stats-attrs
+ name-prefix: tca-cake-stats-
attributes:
-
name: pad
@@ -2141,7 +2116,7 @@ attribute-sets:
name: tin-stats
type: indexed-array
sub-type: nest
- nested-attributes: tc-cake-tin-stats-attrs
+ nested-attributes: cake-tin-stats-attrs
-
name: deficit
type: s32
@@ -2161,7 +2136,8 @@ attribute-sets:
name: blue-timer-us
type: s32
-
- name: tc-cake-tin-stats-attrs
+ name: cake-tin-stats-attrs
+ name-prefix: tca-cake-tin-stats-
attributes:
-
name: pad
@@ -2239,28 +2215,32 @@ attribute-sets:
name: flow-quantum
type: u32
-
- name: tc-cbs-attrs
+ name: cbs-attrs
+ name-prefix: tca-cbs-
attributes:
-
name: parms
type: binary
struct: tc-cbs-qopt
-
- name: tc-cgroup-attrs
+ name: cgroup-attrs
+ name-prefix: tca-cgroup-
attributes:
-
name: act
- type: nest
- nested-attributes: tc-act-attrs
+ type: indexed-array
+ sub-type: nest
+ nested-attributes: act-attrs
-
name: police
type: nest
- nested-attributes: tc-police-attrs
+ nested-attributes: police-attrs
-
name: ematches
type: binary
-
- name: tc-choke-attrs
+ name: choke-attrs
+ name-prefix: tca-choke-
attributes:
-
name: parms
@@ -2276,7 +2256,8 @@ attribute-sets:
name: max-p
type: u32
-
- name: tc-codel-attrs
+ name: codel-attrs
+ name-prefix: tca-codel-
attributes:
-
name: target
@@ -2294,13 +2275,16 @@ attribute-sets:
name: ce-threshold
type: u32
-
- name: tc-drr-attrs
+ name: drr-attrs
+ name-prefix: tca-drr-
attributes:
-
name: quantum
type: u32
-
- name: tc-ematch-attrs
+ name: ematch-attrs
+ name-prefix: tca-ematch-
+ attr-max-name: tca-ematch-tree-max
attributes:
-
name: tree-hdr
@@ -2310,7 +2294,8 @@ attribute-sets:
name: tree-list
type: binary
-
- name: tc-flow-attrs
+ name: flow-attrs
+ name-prefix: tca-flow-
attributes:
-
name: keys
@@ -2342,7 +2327,7 @@ attribute-sets:
-
name: police
type: nest
- nested-attributes: tc-police-attrs
+ nested-attributes: police-attrs
-
name: ematches
type: binary
@@ -2350,7 +2335,8 @@ attribute-sets:
name: perturb
type: u32
-
- name: tc-flower-attrs
+ name: flower-attrs
+ name-prefix: tca-flower-
attributes:
-
name: classid
@@ -2362,7 +2348,7 @@ attribute-sets:
name: act
type: indexed-array
sub-type: nest
- nested-attributes: tc-act-attrs
+ nested-attributes: act-attrs
-
name: key-eth-dst
type: binary
@@ -2441,7 +2427,7 @@ attribute-sets:
-
name: flags
type: u32
- enum: tc-cls-flags
+ enum: cls-flags
enum-as-flags: true
-
name: key-vlan-id
@@ -2546,13 +2532,13 @@ attribute-sets:
name: key-flags
type: u32
byte-order: big-endian
- enum: tc-flower-key-ctrl-flags
+ enum: flower-key-ctrl-flags
enum-as-flags: true
-
name: key-flags-mask
type: u32
byte-order: big-endian
- enum: tc-flower-key-ctrl-flags
+ enum: flower-key-ctrl-flags
enum-as-flags: true
-
name: key-icmpv4-code
@@ -2675,11 +2661,11 @@ attribute-sets:
-
name: key-enc-opts
type: nest
- nested-attributes: tc-flower-key-enc-opts-attrs
+ nested-attributes: flower-key-enc-opts-attrs
-
name: key-enc-opts-mask
type: nest
- nested-attributes: tc-flower-key-enc-opts-attrs
+ nested-attributes: flower-key-enc-opts-attrs
-
name: in-hw-count
type: u32
@@ -2726,7 +2712,7 @@ attribute-sets:
-
name: key-mpls-opts
type: nest
- nested-attributes: tc-flower-key-mpls-opt-attrs
+ nested-attributes: flower-key-mpls-opt-attrs
-
name: key-hash
type: u32
@@ -2745,7 +2731,7 @@ attribute-sets:
type: u16
byte-order: big-endian
-
- name: key-l2-tpv3-sid
+ name: key-l2tpv3-sid
type: u32
byte-order: big-endian
-
@@ -2754,7 +2740,7 @@ attribute-sets:
-
name: key-cfm
type: nest
- nested-attributes: tc-flower-key-cfm-attrs
+ nested-attributes: flower-key-cfm-attrs
-
name: key-spi
type: u32
@@ -2767,35 +2753,37 @@ attribute-sets:
name: key-enc-flags
type: u32
byte-order: big-endian
- enum: tc-flower-key-ctrl-flags
+ enum: flower-key-ctrl-flags
enum-as-flags: true
-
name: key-enc-flags-mask
type: u32
byte-order: big-endian
- enum: tc-flower-key-ctrl-flags
+ enum: flower-key-ctrl-flags
enum-as-flags: true
-
- name: tc-flower-key-enc-opts-attrs
+ name: flower-key-enc-opts-attrs
+ name-prefix: tca-flower-key-enc-opts-
attributes:
-
name: geneve
type: nest
- nested-attributes: tc-flower-key-enc-opt-geneve-attrs
+ nested-attributes: flower-key-enc-opt-geneve-attrs
-
name: vxlan
type: nest
- nested-attributes: tc-flower-key-enc-opt-vxlan-attrs
+ nested-attributes: flower-key-enc-opt-vxlan-attrs
-
name: erspan
type: nest
- nested-attributes: tc-flower-key-enc-opt-erspan-attrs
+ nested-attributes: flower-key-enc-opt-erspan-attrs
-
name: gtp
type: nest
- nested-attributes: tc-flower-key-enc-opt-gtp-attrs
+ nested-attributes: flower-key-enc-opt-gtp-attrs
-
- name: tc-flower-key-enc-opt-geneve-attrs
+ name: flower-key-enc-opt-geneve-attrs
+ name-prefix: tca-flower-key-enc-opt-geneve-
attributes:
-
name: class
@@ -2807,13 +2795,15 @@ attribute-sets:
name: data
type: binary
-
- name: tc-flower-key-enc-opt-vxlan-attrs
+ name: flower-key-enc-opt-vxlan-attrs
+ name-prefix: tca-flower-key-enc-opt-vxlan-
attributes:
-
name: gbp
type: u32
-
- name: tc-flower-key-enc-opt-erspan-attrs
+ name: flower-key-enc-opt-erspan-attrs
+ name-prefix: tca-flower-key-enc-opt-erspan-
attributes:
-
name: ver
@@ -2828,7 +2818,8 @@ attribute-sets:
name: hwid
type: u8
-
- name: tc-flower-key-enc-opt-gtp-attrs
+ name: flower-key-enc-opt-gtp-attrs
+ name-prefix: tca-flower-key-enc-opt-gtp-
attributes:
-
name: pdu-type
@@ -2837,7 +2828,9 @@ attribute-sets:
name: qfi
type: u8
-
- name: tc-flower-key-mpls-opt-attrs
+ name: flower-key-mpls-opt-attrs
+ name-prefix: tca-flower-key-mpls-opt-
+ attr-max-name: tca-flower-key-mpls-opt-lse-max
attributes:
-
name: lse-depth
@@ -2855,7 +2848,8 @@ attribute-sets:
name: lse-label
type: u32
-
- name: tc-flower-key-cfm-attrs
+ name: flower-key-cfm-attrs
+ name-prefix: tca-flower-key-cfm-
attributes:
-
name: md-level
@@ -2864,7 +2858,8 @@ attribute-sets:
name: opcode
type: u8
-
- name: tc-fw-attrs
+ name: fw-attrs
+ name-prefix: tca-fw-
attributes:
-
name: classid
@@ -2872,7 +2867,7 @@ attribute-sets:
-
name: police
type: nest
- nested-attributes: tc-police-attrs
+ nested-attributes: police-attrs
-
name: indev
type: string
@@ -2880,12 +2875,13 @@ attribute-sets:
name: act
type: indexed-array
sub-type: nest
- nested-attributes: tc-act-attrs
+ nested-attributes: act-attrs
-
name: mask
type: u32
-
- name: tc-gred-attrs
+ name: gred-attrs
+ name-prefix: tca-gred-
attributes:
-
name: parms
@@ -2911,6 +2907,7 @@ attribute-sets:
nested-attributes: tca-gred-vq-list-attrs
-
name: tca-gred-vq-list-attrs
+ name-prefix: tca-gred-vq-
attributes:
-
name: entry
@@ -2919,6 +2916,7 @@ attribute-sets:
multi-attr: true
-
name: tca-gred-vq-entry-attrs
+ name-prefix: tca-gred-vq-
attributes:
-
name: pad
@@ -2957,7 +2955,7 @@ attribute-sets:
name: flags
type: u32
-
- name: tc-hfsc-attrs
+ name: hfsc-attrs
attributes:
-
name: rsc
@@ -2969,7 +2967,8 @@ attribute-sets:
name: usc
type: binary
-
- name: tc-hhf-attrs
+ name: hhf-attrs
+ name-prefix: tca-hhf-
attributes:
-
name: backlog-limit
@@ -2993,7 +2992,8 @@ attribute-sets:
name: non-hh-weight
type: u32
-
- name: tc-htb-attrs
+ name: htb-attrs
+ name-prefix: tca-htb-
attributes:
-
name: parms
@@ -3025,7 +3025,8 @@ attribute-sets:
name: offload
type: flag
-
- name: tc-matchall-attrs
+ name: matchall-attrs
+ name-prefix: tca-matchall-
attributes:
-
name: classid
@@ -3034,7 +3035,7 @@ attribute-sets:
name: act
type: indexed-array
sub-type: nest
- nested-attributes: tc-act-attrs
+ nested-attributes: act-attrs
-
name: flags
type: u32
@@ -3046,14 +3047,16 @@ attribute-sets:
name: pad
type: pad
-
- name: tc-etf-attrs
+ name: etf-attrs
+ name-prefix: tca-etf-
attributes:
-
name: parms
type: binary
struct: tc-etf-qopt
-
- name: tc-ets-attrs
+ name: ets-attrs
+ name-prefix: tca-ets-
attributes:
-
name: nbands
@@ -3064,7 +3067,7 @@ attribute-sets:
-
name: quanta
type: nest
- nested-attributes: tc-ets-attrs
+ nested-attributes: ets-attrs
-
name: quanta-band
type: u32
@@ -3072,13 +3075,14 @@ attribute-sets:
-
name: priomap
type: nest
- nested-attributes: tc-ets-attrs
+ nested-attributes: ets-attrs
-
name: priomap-band
type: u8
multi-attr: true
-
- name: tc-fq-attrs
+ name: fq-attrs
+ name-prefix: tca-fq-
attributes:
-
name: plimit
@@ -3149,7 +3153,8 @@ attribute-sets:
sub-type: s32
doc: Weights for each band
-
- name: tc-fq-codel-attrs
+ name: fq-codel-attrs
+ name-prefix: tca-fq-codel-
attributes:
-
name: target
@@ -3185,7 +3190,8 @@ attribute-sets:
name: ce-threshold-mask
type: u8
-
- name: tc-fq-pie-attrs
+ name: fq-pie-attrs
+ name-prefix: tca-fq-pie-
attributes:
-
name: limit
@@ -3224,7 +3230,8 @@ attribute-sets:
name: dq-rate-estimator
type: u32
-
- name: tc-netem-attrs
+ name: netem-attrs
+ name-prefix: tca-netem-
attributes:
-
name: corr
@@ -3245,7 +3252,7 @@ attribute-sets:
-
name: loss
type: nest
- nested-attributes: tc-netem-loss-attrs
+ nested-attributes: netem-loss-attrs
-
name: rate
type: binary
@@ -3277,7 +3284,8 @@ attribute-sets:
name: prng-seed
type: u64
-
- name: tc-netem-loss-attrs
+ name: netem-loss-attrs
+ name-prefix: netem-loss-
attributes:
-
name: gi
@@ -3290,7 +3298,8 @@ attribute-sets:
doc: Gilbert Elliot models
struct: tc-netem-gemodel
-
- name: tc-pie-attrs
+ name: pie-attrs
+ name-prefix: tca-pie-
attributes:
-
name: target
@@ -3317,7 +3326,8 @@ attribute-sets:
name: dq-rate-estimator
type: u32
-
- name: tc-police-attrs
+ name: police-attrs
+ name-prefix: tca-police-
attributes:
-
name: tbf
@@ -3325,10 +3335,10 @@ attribute-sets:
struct: tc-police
-
name: rate
- type: binary
+ type: binary # TODO
-
name: peakrate
- type: binary
+ type: binary # TODO
-
name: avrate
type: u32
@@ -3355,7 +3365,8 @@ attribute-sets:
name: pktburst64
type: u64
-
- name: tc-qfq-attrs
+ name: qfq-attrs
+ name-prefix: tca-qfq-
attributes:
-
name: weight
@@ -3364,7 +3375,8 @@ attribute-sets:
name: lmax
type: u32
-
- name: tc-red-attrs
+ name: red-attrs
+ name-prefix: tca-red-
attributes:
-
name: parms
@@ -3386,7 +3398,8 @@ attribute-sets:
name: mark-block
type: u32
-
- name: tc-route-attrs
+ name: route-attrs
+ name-prefix: tca-route4-
attributes:
-
name: classid
@@ -3403,14 +3416,15 @@ attribute-sets:
-
name: police
type: nest
- nested-attributes: tc-police-attrs
+ nested-attributes: police-attrs
-
name: act
type: indexed-array
sub-type: nest
- nested-attributes: tc-act-attrs
+ nested-attributes: act-attrs
-
- name: tc-taprio-attrs
+ name: taprio-attrs
+ name-prefix: tca-taprio-attr-
attributes:
-
name: priomap
@@ -3419,14 +3433,14 @@ attribute-sets:
-
name: sched-entry-list
type: nest
- nested-attributes: tc-taprio-sched-entry-list
+ nested-attributes: taprio-sched-entry-list
-
name: sched-base-time
type: s64
-
name: sched-single-entry
type: nest
- nested-attributes: tc-taprio-sched-entry
+ nested-attributes: taprio-sched-entry
-
name: sched-clockid
type: s32
@@ -3451,17 +3465,19 @@ attribute-sets:
-
name: tc-entry
type: nest
- nested-attributes: tc-taprio-tc-entry-attrs
+ nested-attributes: taprio-tc-entry-attrs
-
- name: tc-taprio-sched-entry-list
+ name: taprio-sched-entry-list
+ name-prefix: tca-taprio-sched-
attributes:
-
name: entry
type: nest
- nested-attributes: tc-taprio-sched-entry
+ nested-attributes: taprio-sched-entry
multi-attr: true
-
- name: tc-taprio-sched-entry
+ name: taprio-sched-entry
+ name-prefix: tca-taprio-sched-entry-
attributes:
-
name: index
@@ -3476,7 +3492,8 @@ attribute-sets:
name: interval
type: u32
-
- name: tc-taprio-tc-entry-attrs
+ name: taprio-tc-entry-attrs
+ name-prefix: tca-taprio-tc-entry-
attributes:
-
name: index
@@ -3488,7 +3505,8 @@ attribute-sets:
name: fp
type: u32
-
- name: tc-tbf-attrs
+ name: tbf-attrs
+ name-prefix: tca-tbf-
attributes:
-
name: parms
@@ -3504,7 +3522,7 @@ attribute-sets:
name: rate64
type: u64
-
- name: prate4
+ name: prate64
type: u64
-
name: burst
@@ -3516,7 +3534,9 @@ attribute-sets:
name: pad
type: pad
-
- name: tc-act-sample-attrs
+ name: act-sample-attrs
+ name-prefix: tca-sample-
+ header: linux/tc_act/tc_sample.h
attributes:
-
name: tm
@@ -3525,7 +3545,7 @@ attribute-sets:
-
name: parms
type: binary
- struct: tc-gen
+ struct: tc-gact
-
name: rate
type: u32
@@ -3539,7 +3559,9 @@ attribute-sets:
name: pad
type: pad
-
- name: tc-act-gact-attrs
+ name: act-gact-attrs
+ name-prefix: tca-gact-
+ header: linux/tc_act/tc_gact.h
attributes:
-
name: tm
@@ -3548,7 +3570,7 @@ attribute-sets:
-
name: parms
type: binary
- struct: tc-gen
+ struct: tc-gact
-
name: prob
type: binary
@@ -3558,6 +3580,7 @@ attribute-sets:
type: pad
-
name: tca-stab-attrs
+ name-prefix: tca-stab-
attributes:
-
name: base
@@ -3568,6 +3591,8 @@ attribute-sets:
type: binary
-
name: tca-stats-attrs
+ name-prefix: tca-stats-
+ header: linux/gen_stats.h
attributes:
-
name: basic
@@ -3601,7 +3626,8 @@ attribute-sets:
name: pkt64
type: u64
-
- name: tc-u32-attrs
+ name: u32-attrs
+ name-prefix: tca-u32-
attributes:
-
name: classid
@@ -3622,12 +3648,12 @@ attribute-sets:
-
name: police
type: nest
- nested-attributes: tc-police-attrs
+ nested-attributes: police-attrs
-
name: act
type: indexed-array
sub-type: nest
- nested-attributes: tc-act-attrs
+ nested-attributes: act-attrs
-
name: indev
type: string
@@ -3648,78 +3674,78 @@ attribute-sets:
sub-messages:
-
- name: tc-options-msg
+ name: options-msg
formats:
-
value: basic
- attribute-set: tc-basic-attrs
+ attribute-set: basic-attrs
-
value: bpf
- attribute-set: tc-bpf-attrs
+ attribute-set: bpf-attrs
-
value: bfifo
fixed-header: tc-fifo-qopt
-
value: cake
- attribute-set: tc-cake-attrs
+ attribute-set: cake-attrs
-
value: cbs
- attribute-set: tc-cbs-attrs
+ attribute-set: cbs-attrs
-
value: cgroup
- attribute-set: tc-cgroup-attrs
+ attribute-set: cgroup-attrs
-
value: choke
- attribute-set: tc-choke-attrs
+ attribute-set: choke-attrs
-
value: clsact # no content
-
value: codel
- attribute-set: tc-codel-attrs
+ attribute-set: codel-attrs
-
value: drr
- attribute-set: tc-drr-attrs
+ attribute-set: drr-attrs
-
value: etf
- attribute-set: tc-etf-attrs
+ attribute-set: etf-attrs
-
value: ets
- attribute-set: tc-ets-attrs
+ attribute-set: ets-attrs
-
value: flow
- attribute-set: tc-flow-attrs
+ attribute-set: flow-attrs
-
value: flower
- attribute-set: tc-flower-attrs
+ attribute-set: flower-attrs
-
value: fq
- attribute-set: tc-fq-attrs
+ attribute-set: fq-attrs
-
value: fq_codel
- attribute-set: tc-fq-codel-attrs
+ attribute-set: fq-codel-attrs
-
value: fq_pie
- attribute-set: tc-fq-pie-attrs
+ attribute-set: fq-pie-attrs
-
value: fw
- attribute-set: tc-fw-attrs
+ attribute-set: fw-attrs
-
value: gred
- attribute-set: tc-gred-attrs
+ attribute-set: gred-attrs
-
value: hfsc
fixed-header: tc-hfsc-qopt
-
value: hhf
- attribute-set: tc-hhf-attrs
+ attribute-set: hhf-attrs
-
value: htb
- attribute-set: tc-htb-attrs
+ attribute-set: htb-attrs
-
value: ingress # no content
-
value: matchall
- attribute-set: tc-matchall-attrs
+ attribute-set: matchall-attrs
-
value: mq # no content
-
@@ -3731,7 +3757,7 @@ sub-messages:
-
value: netem
fixed-header: tc-netem-qopt
- attribute-set: tc-netem-attrs
+ attribute-set: netem-attrs
-
value: pfifo
fixed-header: tc-fifo-qopt
@@ -3743,7 +3769,7 @@ sub-messages:
fixed-header: tc-fifo-qopt
-
value: pie
- attribute-set: tc-pie-attrs
+ attribute-set: pie-attrs
-
value: plug
fixed-header: tc-plug-qopt
@@ -3752,13 +3778,13 @@ sub-messages:
fixed-header: tc-prio-qopt
-
value: qfq
- attribute-set: tc-qfq-attrs
+ attribute-set: qfq-attrs
-
value: red
- attribute-set: tc-red-attrs
+ attribute-set: red-attrs
-
value: route
- attribute-set: tc-route-attrs
+ attribute-set: route-attrs
-
value: sfb
fixed-header: tc-sfb-qopt
@@ -3767,79 +3793,79 @@ sub-messages:
fixed-header: tc-sfq-qopt-v1
-
value: taprio
- attribute-set: tc-taprio-attrs
+ attribute-set: taprio-attrs
-
value: tbf
- attribute-set: tc-tbf-attrs
+ attribute-set: tbf-attrs
-
value: u32
- attribute-set: tc-u32-attrs
+ attribute-set: u32-attrs
-
- name: tc-act-options-msg
+ name: act-options-msg
formats:
-
value: bpf
- attribute-set: tc-act-bpf-attrs
+ attribute-set: act-bpf-attrs
-
value: connmark
- attribute-set: tc-act-connmark-attrs
+ attribute-set: act-connmark-attrs
-
value: csum
- attribute-set: tc-act-csum-attrs
+ attribute-set: act-csum-attrs
-
value: ct
- attribute-set: tc-act-ct-attrs
+ attribute-set: act-ct-attrs
-
value: ctinfo
- attribute-set: tc-act-ctinfo-attrs
+ attribute-set: act-ctinfo-attrs
-
value: gact
- attribute-set: tc-act-gact-attrs
+ attribute-set: act-gact-attrs
-
value: gate
- attribute-set: tc-act-gate-attrs
+ attribute-set: act-gate-attrs
-
value: ife
- attribute-set: tc-act-ife-attrs
+ attribute-set: act-ife-attrs
-
value: mirred
- attribute-set: tc-act-mirred-attrs
+ attribute-set: act-mirred-attrs
-
value: mpls
- attribute-set: tc-act-mpls-attrs
+ attribute-set: act-mpls-attrs
-
value: nat
- attribute-set: tc-act-nat-attrs
+ attribute-set: act-nat-attrs
-
value: pedit
- attribute-set: tc-act-pedit-attrs
+ attribute-set: act-pedit-attrs
-
value: police
- attribute-set: tc-act-police-attrs
+ attribute-set: police-attrs
-
value: sample
- attribute-set: tc-act-sample-attrs
+ attribute-set: act-sample-attrs
-
value: simple
- attribute-set: tc-act-simple-attrs
+ attribute-set: act-simple-attrs
-
value: skbedit
- attribute-set: tc-act-skbedit-attrs
+ attribute-set: act-skbedit-attrs
-
value: skbmod
- attribute-set: tc-act-skbmod-attrs
+ attribute-set: act-skbmod-attrs
-
value: tunnel_key
- attribute-set: tc-act-tunnel-key-attrs
+ attribute-set: act-tunnel-key-attrs
-
value: vlan
- attribute-set: tc-act-vlan-attrs
+ attribute-set: act-vlan-attrs
-
name: tca-stats-app-msg
formats:
-
value: cake
- attribute-set: tc-cake-stats-attrs
+ attribute-set: cake-stats-attrs
-
value: choke
fixed-header: tc-choke-xstats
@@ -3873,11 +3899,12 @@ sub-messages:
operations:
enum-model: directional
+ name-prefix: rtm-
list:
-
name: newqdisc
doc: Create new tc qdisc.
- attribute-set: tc-attrs
+ attribute-set: attrs
fixed-header: tcmsg
do:
request:
@@ -3892,7 +3919,7 @@ operations:
-
name: delqdisc
doc: Delete existing tc qdisc.
- attribute-set: tc-attrs
+ attribute-set: attrs
fixed-header: tcmsg
do:
request:
@@ -3900,9 +3927,9 @@ operations:
-
name: getqdisc
doc: Get / dump tc qdisc information.
- attribute-set: tc-attrs
+ attribute-set: attrs
fixed-header: tcmsg
- do:
+ do: &getqdisc-do
request:
value: 38
attributes:
@@ -3921,10 +3948,11 @@ operations:
- chain
- ingress-block
- egress-block
+ dump: *getqdisc-do
-
name: newtclass
doc: Get / dump tc traffic class information.
- attribute-set: tc-attrs
+ attribute-set: attrs
fixed-header: tcmsg
do:
request:
@@ -3933,7 +3961,7 @@ operations:
-
name: deltclass
doc: Get / dump tc traffic class information.
- attribute-set: tc-attrs
+ attribute-set: attrs
fixed-header: tcmsg
do:
request:
@@ -3941,7 +3969,7 @@ operations:
-
name: gettclass
doc: Get / dump tc traffic class information.
- attribute-set: tc-attrs
+ attribute-set: attrs
fixed-header: tcmsg
do:
request:
@@ -3952,7 +3980,7 @@ operations:
-
name: newtfilter
doc: Get / dump tc filter information.
- attribute-set: tc-attrs
+ attribute-set: attrs
fixed-header: tcmsg
do:
request:
@@ -3961,7 +3989,7 @@ operations:
-
name: deltfilter
doc: Get / dump tc filter information.
- attribute-set: tc-attrs
+ attribute-set: attrs
fixed-header: tcmsg
do:
request:
@@ -3972,7 +4000,7 @@ operations:
-
name: gettfilter
doc: Get / dump tc filter information.
- attribute-set: tc-attrs
+ attribute-set: attrs
fixed-header: tcmsg
do:
request:
@@ -3995,7 +4023,7 @@ operations:
-
name: newchain
doc: Get / dump tc chain information.
- attribute-set: tc-attrs
+ attribute-set: attrs
fixed-header: tcmsg
do:
request:
@@ -4004,7 +4032,7 @@ operations:
-
name: delchain
doc: Get / dump tc chain information.
- attribute-set: tc-attrs
+ attribute-set: attrs
fixed-header: tcmsg
do:
request:
@@ -4014,7 +4042,7 @@ operations:
-
name: getchain
doc: Get / dump tc chain information.
- attribute-set: tc-attrs
+ attribute-set: attrs
fixed-header: tcmsg
do:
request:
diff --git a/Documentation/networking/arcnet-hardware.rst b/Documentation/networking/arcnet-hardware.rst
index 982215723582..3bf7f99cd7bb 100644
--- a/Documentation/networking/arcnet-hardware.rst
+++ b/Documentation/networking/arcnet-hardware.rst
@@ -3152,7 +3152,7 @@ Tiara
(model unknown)
---------------
- - from Christoph Lameter <christoph@lameter.com>
+ - from Christoph Lameter <cl@gentwo.org>
Here is information about my card as far as I could figure it out::
diff --git a/Documentation/networking/dccp.rst b/Documentation/networking/dccp.rst
deleted file mode 100644
index 91e5c33ba3ff..000000000000
--- a/Documentation/networking/dccp.rst
+++ /dev/null
@@ -1,219 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-=============
-DCCP protocol
-=============
-
-
-.. Contents
- - Introduction
- - Missing features
- - Socket options
- - Sysctl variables
- - IOCTLs
- - Other tunables
- - Notes
-
-
-Introduction
-============
-Datagram Congestion Control Protocol (DCCP) is an unreliable, connection
-oriented protocol designed to solve issues present in UDP and TCP, particularly
-for real-time and multimedia (streaming) traffic.
-It divides into a base protocol (RFC 4340) and pluggable congestion control
-modules called CCIDs. Like pluggable TCP congestion control, at least one CCID
-needs to be enabled in order for the protocol to function properly. In the Linux
-implementation, this is the TCP-like CCID2 (RFC 4341). Additional CCIDs, such as
-the TCP-friendly CCID3 (RFC 4342), are optional.
-For a brief introduction to CCIDs and suggestions for choosing a CCID to match
-given applications, see section 10 of RFC 4340.
-
-It has a base protocol and pluggable congestion control IDs (CCIDs).
-
-DCCP is a Proposed Standard (RFC 2026), and the homepage for DCCP as a protocol
-is at http://www.ietf.org/html.charters/dccp-charter.html
-
-
-Missing features
-================
-The Linux DCCP implementation does not currently support all the features that are
-specified in RFCs 4340...42.
-
-The known bugs are at:
-
- http://www.linuxfoundation.org/collaborate/workgroups/networking/todo#DCCP
-
-For more up-to-date versions of the DCCP implementation, please consider using
-the experimental DCCP test tree; instructions for checking this out are on:
-http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp_testing#Experimental_DCCP_source_tree
-
-
-Socket options
-==============
-DCCP_SOCKOPT_QPOLICY_ID sets the dequeuing policy for outgoing packets. It takes
-a policy ID as argument and can only be set before the connection (i.e. changes
-during an established connection are not supported). Currently, two policies are
-defined: the "simple" policy (DCCPQ_POLICY_SIMPLE), which does nothing special,
-and a priority-based variant (DCCPQ_POLICY_PRIO). The latter allows to pass an
-u32 priority value as ancillary data to sendmsg(), where higher numbers indicate
-a higher packet priority (similar to SO_PRIORITY). This ancillary data needs to
-be formatted using a cmsg(3) message header filled in as follows::
-
- cmsg->cmsg_level = SOL_DCCP;
- cmsg->cmsg_type = DCCP_SCM_PRIORITY;
- cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t)); /* or CMSG_LEN(4) */
-
-DCCP_SOCKOPT_QPOLICY_TXQLEN sets the maximum length of the output queue. A zero
-value is always interpreted as unbounded queue length. If different from zero,
-the interpretation of this parameter depends on the current dequeuing policy
-(see above): the "simple" policy will enforce a fixed queue size by returning
-EAGAIN, whereas the "prio" policy enforces a fixed queue length by dropping the
-lowest-priority packet first. The default value for this parameter is
-initialised from /proc/sys/net/dccp/default/tx_qlen.
-
-DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of
-service codes (RFC 4340, sec. 8.1.2); if this socket option is not set,
-the socket will fall back to 0 (which means that no meaningful service code
-is present). On active sockets this is set before connect(); specifying more
-than one code has no effect (all subsequent service codes are ignored). The
-case is different for passive sockets, where multiple service codes (up to 32)
-can be set before calling bind().
-
-DCCP_SOCKOPT_GET_CUR_MPS is read-only and retrieves the current maximum packet
-size (application payload size) in bytes, see RFC 4340, section 14.
-
-DCCP_SOCKOPT_AVAILABLE_CCIDS is also read-only and returns the list of CCIDs
-supported by the endpoint. The option value is an array of type uint8_t whose
-size is passed as option length. The minimum array size is 4 elements, the
-value returned in the optlen argument always reflects the true number of
-built-in CCIDs.
-
-DCCP_SOCKOPT_CCID is write-only and sets both the TX and RX CCIDs at the same
-time, combining the operation of the next two socket options. This option is
-preferable over the latter two, since often applications will use the same
-type of CCID for both directions; and mixed use of CCIDs is not currently well
-understood. This socket option takes as argument at least one uint8_t value, or
-an array of uint8_t values, which must match available CCIDS (see above). CCIDs
-must be registered on the socket before calling connect() or listen().
-
-DCCP_SOCKOPT_TX_CCID is read/write. It returns the current CCID (if set) or sets
-the preference list for the TX CCID, using the same format as DCCP_SOCKOPT_CCID.
-Please note that the getsockopt argument type here is ``int``, not uint8_t.
-
-DCCP_SOCKOPT_RX_CCID is analogous to DCCP_SOCKOPT_TX_CCID, but for the RX CCID.
-
-DCCP_SOCKOPT_SERVER_TIMEWAIT enables the server (listening socket) to hold
-timewait state when closing the connection (RFC 4340, 8.3). The usual case is
-that the closing server sends a CloseReq, whereupon the client holds timewait
-state. When this boolean socket option is on, the server sends a Close instead
-and will enter TIMEWAIT. This option must be set after accept() returns.
-
-DCCP_SOCKOPT_SEND_CSCOV and DCCP_SOCKOPT_RECV_CSCOV are used for setting the
-partial checksum coverage (RFC 4340, sec. 9.2). The default is that checksums
-always cover the entire packet and that only fully covered application data is
-accepted by the receiver. Hence, when using this feature on the sender, it must
-be enabled at the receiver, too with suitable choice of CsCov.
-
-DCCP_SOCKOPT_SEND_CSCOV sets the sender checksum coverage. Values in the
- range 0..15 are acceptable. The default setting is 0 (full coverage),
- values between 1..15 indicate partial coverage.
-
-DCCP_SOCKOPT_RECV_CSCOV is for the receiver and has a different meaning: it
- sets a threshold, where again values 0..15 are acceptable. The default
- of 0 means that all packets with a partial coverage will be discarded.
- Values in the range 1..15 indicate that packets with minimally such a
- coverage value are also acceptable. The higher the number, the more
- restrictive this setting (see [RFC 4340, sec. 9.2.1]). Partial coverage
- settings are inherited to the child socket after accept().
-
-The following two options apply to CCID 3 exclusively and are getsockopt()-only.
-In either case, a TFRC info struct (defined in <linux/tfrc.h>) is returned.
-
-DCCP_SOCKOPT_CCID_RX_INFO
- Returns a ``struct tfrc_rx_info`` in optval; the buffer for optval and
- optlen must be set to at least sizeof(struct tfrc_rx_info).
-
-DCCP_SOCKOPT_CCID_TX_INFO
- Returns a ``struct tfrc_tx_info`` in optval; the buffer for optval and
- optlen must be set to at least sizeof(struct tfrc_tx_info).
-
-On unidirectional connections it is useful to close the unused half-connection
-via shutdown (SHUT_WR or SHUT_RD): this will reduce per-packet processing costs.
-
-
-Sysctl variables
-================
-Several DCCP default parameters can be managed by the following sysctls
-(sysctl net.dccp.default or /proc/sys/net/dccp/default):
-
-request_retries
- The number of active connection initiation retries (the number of
- Requests minus one) before timing out. In addition, it also governs
- the behaviour of the other, passive side: this variable also sets
- the number of times DCCP repeats sending a Response when the initial
- handshake does not progress from RESPOND to OPEN (i.e. when no Ack
- is received after the initial Request). This value should be greater
- than 0, suggested is less than 10. Analogue of tcp_syn_retries.
-
-retries1
- How often a DCCP Response is retransmitted until the listening DCCP
- side considers its connecting peer dead. Analogue of tcp_retries1.
-
-retries2
- The number of times a general DCCP packet is retransmitted. This has
- importance for retransmitted acknowledgments and feature negotiation,
- data packets are never retransmitted. Analogue of tcp_retries2.
-
-tx_ccid = 2
- Default CCID for the sender-receiver half-connection. Depending on the
- choice of CCID, the Send Ack Vector feature is enabled automatically.
-
-rx_ccid = 2
- Default CCID for the receiver-sender half-connection; see tx_ccid.
-
-seq_window = 100
- The initial sequence window (sec. 7.5.2) of the sender. This influences
- the local ackno validity and the remote seqno validity windows (7.5.1).
- Values in the range Wmin = 32 (RFC 4340, 7.5.2) up to 2^32-1 can be set.
-
-tx_qlen = 5
- The size of the transmit buffer in packets. A value of 0 corresponds
- to an unbounded transmit buffer.
-
-sync_ratelimit = 125 ms
- The timeout between subsequent DCCP-Sync packets sent in response to
- sequence-invalid packets on the same socket (RFC 4340, 7.5.4). The unit
- of this parameter is milliseconds; a value of 0 disables rate-limiting.
-
-
-IOCTLS
-======
-FIONREAD
- Works as in udp(7): returns in the ``int`` argument pointer the size of
- the next pending datagram in bytes, or 0 when no datagram is pending.
-
-SIOCOUTQ
- Returns the number of unsent data bytes in the socket send queue as ``int``
- into the buffer specified by the argument pointer.
-
-Other tunables
-==============
-Per-route rto_min support
- CCID-2 supports the RTAX_RTO_MIN per-route setting for the minimum value
- of the RTO timer. This setting can be modified via the 'rto_min' option
- of iproute2; for example::
-
- > ip route change 10.0.0.0/24 rto_min 250j dev wlan0
- > ip route add 10.0.0.254/32 rto_min 800j dev wlan0
- > ip route show dev wlan0
-
- CCID-3 also supports the rto_min setting: it is used to define the lower
- bound for the expiry of the nofeedback timer. This can be useful on LANs
- with very low RTTs (e.g., loopback, Gbit ethernet).
-
-
-Notes
-=====
-DCCP does not travel through NAT successfully at present on many boxes. This is
-because the checksum covers the pseudo-header as per TCP and UDP. Linux NAT
-support for DCCP has been added.
diff --git a/Documentation/networking/device_drivers/ethernet/huawei/hinic3.rst b/Documentation/networking/device_drivers/ethernet/huawei/hinic3.rst
new file mode 100644
index 000000000000..e3dfd083fa52
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/huawei/hinic3.rst
@@ -0,0 +1,137 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================================================
+Linux kernel driver for Huawei Ethernet Device Driver (hinic3) family
+=====================================================================
+
+Overview
+========
+
+The hinic3 is a network interface card (NIC) for Data Center. It supports
+a range of link-speed devices (10GE, 25GE, 100GE, etc.). The hinic3
+devices can have multiple physical forms: LOM (Lan on Motherboard) NIC,
+PCIe standard NIC, OCP (Open Compute Project) NIC, etc.
+
+The hinic3 driver supports the following features:
+- IPv4/IPv6 TCP/UDP checksum offload
+- TSO (TCP Segmentation Offload), LRO (Large Receive Offload)
+- RSS (Receive Side Scaling)
+- MSI-X interrupt aggregation configuration and interrupt adaptation.
+- SR-IOV (Single Root I/O Virtualization).
+
+Content
+=======
+
+- Supported PCI vendor ID/device IDs
+- Source Code Structure of Hinic3 Driver
+- Management Interface
+
+Supported PCI vendor ID/device IDs
+==================================
+
+19e5:0222 - hinic3 PF/PPF
+19e5:375F - hinic3 VF
+
+Prime Physical Function (PPF) is responsible for the management of the
+whole NIC card. For example, clock synchronization between the NIC and
+the host. Any PF may serve as a PPF. The PPF is selected dynamically.
+
+Source Code Structure of Hinic3 Driver
+======================================
+
+======================== ================================================
+hinic3_pci_id_tbl.h Supported device IDs
+hinic3_hw_intf.h Interface between HW and driver
+hinic3_queue_common.[ch] Common structures and methods for NIC queues
+hinic3_common.[ch] Encapsulation of memory operations in Linux
+hinic3_csr.h Register definitions in the BAR
+hinic3_hwif.[ch] Interface for BAR
+hinic3_eqs.[ch] Interface for AEQs and CEQs
+hinic3_mbox.[ch] Interface for mailbox
+hinic3_mgmt.[ch] Management interface based on mailbox and AEQ
+hinic3_wq.[ch] Work queue data structures and interface
+hinic3_cmdq.[ch] Command queue is used to post command to HW
+hinic3_hwdev.[ch] HW structures and methods abstractions
+hinic3_lld.[ch] Auxiliary driver adaptation layer
+hinic3_hw_comm.[ch] Interface for common HW operations
+hinic3_mgmt_interface.h Interface between firmware and driver
+hinic3_hw_cfg.[ch] Interface for HW configuration
+hinic3_irq.c Interrupt request
+hinic3_netdev_ops.c Operations registered to Linux kernel stack
+hinic3_nic_dev.h NIC structures and methods abstractions
+hinic3_main.c Main Linux kernel driver
+hinic3_nic_cfg.[ch] NIC service configuration
+hinic3_nic_io.[ch] Management plane interface for TX and RX
+hinic3_rss.[ch] Interface for Receive Side Scaling (RSS)
+hinic3_rx.[ch] Interface for transmit
+hinic3_tx.[ch] Interface for receive
+hinic3_ethtool.c Interface for ethtool operations (ops)
+hinic3_filter.c Interface for MAC address
+======================== ================================================
+
+Management Interface
+====================
+
+Asynchronous Event Queue (AEQ)
+------------------------------
+
+AEQ receives high priority events from the HW over a descriptor queue.
+Every descriptor is a fixed size of 64 bytes. AEQ can receive solicited or
+unsolicited events. Every device, VF or PF, can have up to 4 AEQs.
+Every AEQ is associated to a dedicated IRQ. AEQ can receive multiple types
+of events, but in practice the hinic3 driver ignores all events except for
+2 mailbox related events.
+
+Mailbox
+-------
+
+Mailbox is a communication mechanism between the hinic3 driver and the HW.
+Each device has an independent mailbox. Driver can use the mailbox to send
+requests to management. Driver receives mailbox messages, such as responses
+to requests, over the AEQ (using event HINIC3_AEQ_FOR_MBOX). Due to the
+limited size of mailbox data register, mailbox messages are sent
+segment-by-segment.
+
+Every device can use its mailbox to post request to firmware. The mailbox
+can also be used to post requests and responses between the PF and its VFs.
+
+Completion Event Queue (CEQ)
+----------------------------
+
+The implementation of CEQ is the same as AEQ. It receives completion events
+from HW over a fixed size descriptor of 32 bits. Every device can have up
+to 32 CEQs. Every CEQ has a dedicated IRQ. CEQ only receives solicited
+events that are responses to requests from the driver. CEQ can receive
+multiple types of events, but in practice the hinic3 driver ignores all
+events except for HINIC3_CMDQ that represents completion of previously
+posted commands on a cmdq.
+
+Command Queue (cmdq)
+--------------------
+
+Every cmdq has a dedicated work queue on which commands are posted.
+Commands on the work queue are fixed size descriptor of size 64 bytes.
+Completion of a command will be indicated using ctrl bits in the
+descriptor that carried the command. Notification of command completions
+will also be provided via event on CEQ. Every device has 4 command queues
+that are initialized as a set (called cmdqs), each with its own type.
+Hinic3 driver only uses type HINIC3_CMDQ_SYNC.
+
+Work Queues(WQ)
+---------------
+
+Work queues are logical arrays of fixed size WQEs. The array may be spread
+over multiple non-contiguous pages using indirection table. Work queues are
+used by I/O queues and command queues.
+
+Global function ID
+------------------
+
+Every function, PF or VF, has a unique ordinal identification within the device.
+Many management commands (mbox or cmdq) contain this ID so HW can apply the
+command effect to the right function.
+
+PF is allowed to post management commands to a subordinate VF by specifying the
+VFs ID. A VF must provide its own ID. Anti-spoofing in the HW will cause
+command from a VF to fail if it contains the wrong ID.
+
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index 05d822b904b4..139b4c75a191 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -28,6 +28,7 @@ Contents:
freescale/gianfar
google/gve
huawei/hinic
+ huawei/hinic3
intel/e100
intel/e1000
intel/e1000e
@@ -55,6 +56,7 @@ Contents:
ti/cpsw_switchdev
ti/am65_nuss_cpsw_switchdev
ti/tlan
+ ti/icssg_prueth
wangxun/txgbe
wangxun/ngbe
diff --git a/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst b/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst
index 04e0595bb0a7..f8592dec8851 100644
--- a/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst
+++ b/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst
@@ -28,9 +28,60 @@ devlink dev info provides version information for all three components. In
addition to the version the hg commit hash of the build is included as a
separate entry.
+Upgrading Firmware
+------------------
+
+fbnic supports updating firmware using signed PLDM images with devlink dev
+flash. PLDM images are written into the flash. Flashing does not interrupt
+the operation of the device.
+
+On host boot the latest UEFI driver is always used, no explicit activation
+is required. Firmware activation is required to run new control firmware. cmrt
+firmware can only be activated by power cycling the NIC.
+
Statistics
----------
+TX MAC Interface
+~~~~~~~~~~~~~~~~
+
+ - ``ptp_illegal_req``: packets sent to the NIC with PTP request bit set but routed to BMC/FW
+ - ``ptp_good_ts``: packets successfully routed to MAC with PTP request bit set
+ - ``ptp_bad_ts``: packets destined for MAC with PTP request bit set but aborted because of some error (e.g., DMA read error)
+
+TX Extension (TEI) Interface (TTI)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ - ``tti_cm_drop``: control messages dropped at the TX Extension (TEI) Interface because of credit starvation
+ - ``tti_frame_drop``: packets dropped at the TX Extension (TEI) Interface because of credit starvation
+ - ``tti_tbi_drop``: packets dropped at the TX BMC Interface (TBI) because of credit starvation
+
+RXB (RX Buffer) Enqueue
+~~~~~~~~~~~~~~~~~~~~~~~
+
+ - ``rxb_integrity_err[i]``: frames enqueued with integrity errors (e.g., multi-bit ECC errors) on RXB input i
+ - ``rxb_mac_err[i]``: frames enqueued with MAC end-of-frame errors (e.g., bad FCS) on RXB input i
+ - ``rxb_parser_err[i]``: frames experienced RPC parser errors
+ - ``rxb_frm_err[i]``: frames experienced signaling errors (e.g., missing end-of-packet/start-of-packet) on RXB input i
+ - ``rxb_drbo[i]_frames``: frames received at RXB input i
+ - ``rxb_drbo[i]_bytes``: bytes received at RXB input i
+
+RXB (RX Buffer) FIFO
+~~~~~~~~~~~~~~~~~~~~
+
+ - ``rxb_fifo[i]_drop``: transitions into the drop state on RXB pool i
+ - ``rxb_fifo[i]_dropped_frames``: frames dropped on RXB pool i
+ - ``rxb_fifo[i]_ecn``: transitions into the ECN mark state on RXB pool i
+ - ``rxb_fifo[i]_level``: current occupancy of RXB pool i
+
+RXB (RX Buffer) Dequeue
+~~~~~~~~~~~~~~~~~~~~~~~
+
+ - ``rxb_intf[i]_frames``: frames sent to the output i
+ - ``rxb_intf[i]_bytes``: bytes sent to the output i
+ - ``rxb_pbuf[i]_frames``: frames sent to output i from the perspective of internal packet buffer
+ - ``rxb_pbuf[i]_bytes``: bytes sent to output i from the perspective of internal packet buffer
+
RPC (Rx parser)
~~~~~~~~~~~~~~~
@@ -44,6 +95,15 @@ RPC (Rx parser)
- ``rpc_out_of_hdr_err``: frames where header was larger than parsable region
- ``ovr_size_err``: oversized frames
+Hardware Queues
+~~~~~~~~~~~~~~~
+
+1. RX DMA Engine:
+
+ - ``rde_[i]_pkt_err``: packets with MAC EOP, RPC parser, RXB truncation, or RDE frame truncation errors. These error are flagged in the packet metadata because of cut-through support but the actual drop happens once PCIE/RDE is reached.
+ - ``rde_[i]_pkt_cq_drop``: packets dropped because RCQ is full
+ - ``rde_[i]_pkt_bdq_drop``: packets dropped because HPQ or PPQ ran out of host buffer
+
PCIe
~~~~
diff --git a/Documentation/networking/device_drivers/ethernet/ti/icssg_prueth.rst b/Documentation/networking/device_drivers/ethernet/ti/icssg_prueth.rst
new file mode 100644
index 000000000000..da21ddf431bb
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/ti/icssg_prueth.rst
@@ -0,0 +1,56 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================================
+Texas Instruments ICSSG PRUETH ethernet driver
+==============================================
+
+:Version: 1.0
+
+ICSSG Firmware
+==============
+
+Every ICSSG core has two Programmable Real-Time Unit(PRUs), two auxiliary
+Real-Time Transfer Unit (RTUs), and two Transmit Real-Time Transfer Units
+(TX_PRUs). Each one of these runs its own firmware. The firmwares combnined are
+referred as ICSSG Firmware.
+
+Firmware Statistics
+===================
+
+The ICSSG firmware maintains certain statistics which are dumped by the driver
+via ``ethtool -S <interface>``
+
+These statistics are as follows,
+
+ - ``FW_RTU_PKT_DROP``: Diagnostic error counter which increments when RTU drops a locally injected packet due to port being disabled or rule violation.
+ - ``FW_Q0_OVERFLOW``: TX overflow counter for queue0
+ - ``FW_Q1_OVERFLOW``: TX overflow counter for queue1
+ - ``FW_Q2_OVERFLOW``: TX overflow counter for queue2
+ - ``FW_Q3_OVERFLOW``: TX overflow counter for queue3
+ - ``FW_Q4_OVERFLOW``: TX overflow counter for queue4
+ - ``FW_Q5_OVERFLOW``: TX overflow counter for queue5
+ - ``FW_Q6_OVERFLOW``: TX overflow counter for queue6
+ - ``FW_Q7_OVERFLOW``: TX overflow counter for queue7
+ - ``FW_DROPPED_PKT``: This counter is incremented when a packet is dropped at PRU because of rule violation.
+ - ``FW_RX_ERROR``: Incremented if there was a CRC error or Min/Max frame error at PRU
+ - ``FW_RX_DS_INVALID``: Incremented when RTU detects Data Status invalid condition
+ - ``FW_TX_DROPPED_PACKET``: Counter for packets dropped via TX Port
+ - ``FW_TX_TS_DROPPED_PACKET``: Counter for packets with TS flag dropped via TX Port
+ - ``FW_INF_PORT_DISABLED``: Incremented when RX frame is dropped due to port being disabled
+ - ``FW_INF_SAV``: Incremented when RX frame is dropped due to Source Address violation
+ - ``FW_INF_SA_DL``: Incremented when RX frame is dropped due to Source Address being in the denylist
+ - ``FW_INF_PORT_BLOCKED``: Incremented when RX frame is dropped due to port being blocked and frame being a special frame
+ - ``FW_INF_DROP_TAGGED`` : Incremented when RX frame is dropped for being tagged
+ - ``FW_INF_DROP_PRIOTAGGED``: Incremented when RX frame is dropped for being priority tagged
+ - ``FW_INF_DROP_NOTAG``: Incremented when RX frame is dropped for being untagged
+ - ``FW_INF_DROP_NOTMEMBER``: Incremented when RX frame is dropped for port not being member of VLAN
+ - ``FW_RX_EOF_SHORT_FRMERR``: Incremented if End Of Frame (EOF) task is scheduled without seeing RX_B1
+ - ``FW_RX_B0_DROP_EARLY_EOF``: Incremented when frame is dropped due to Early EOF
+ - ``FW_TX_JUMBO_FRM_CUTOFF``: Incremented when frame is cut off to prevent packet size > 2000 Bytes
+ - ``FW_RX_EXP_FRAG_Q_DROP``: Incremented when express frame is received in the same queue as the previous fragment
+ - ``FW_RX_FIFO_OVERRUN``: RX fifo overrun counter
+ - ``FW_CUT_THR_PKT``: Incremented when a packet is forwarded using Cut-Through forwarding method
+ - ``FW_HOST_RX_PKT_CNT``: Number of valid packets sent by Rx PRU to Host on PSI
+ - ``FW_HOST_TX_PKT_CNT``: Number of valid packets copied by RTU0 to Tx queues
+ - ``FW_HOST_EGRESS_Q_PRE_OVERFLOW``: Host Egress Q (Pre-emptible) Overflow Counter
+ - ``FW_HOST_EGRESS_Q_EXP_OVERFLOW``: Host Egress Q (Pre-emptible) Overflow Counter
diff --git a/Documentation/networking/devlink/devlink-info.rst b/Documentation/networking/devlink/devlink-info.rst
index 23073bc219d8..dd6adc4d0559 100644
--- a/Documentation/networking/devlink/devlink-info.rst
+++ b/Documentation/networking/devlink/devlink-info.rst
@@ -86,6 +86,10 @@ In case software/firmware components are loaded from the disk (e.g.
``/lib/firmware``) only the running version should be reported via
the kernel API.
+Please note that any security versions reported via devlink are purely
+informational. Devlink does not use a secure channel to communicate with
+the device.
+
Generic Versions
================
diff --git a/Documentation/networking/devlink/devlink-trap.rst b/Documentation/networking/devlink/devlink-trap.rst
index 2c14dfe69b3a..5885e21e2212 100644
--- a/Documentation/networking/devlink/devlink-trap.rst
+++ b/Documentation/networking/devlink/devlink-trap.rst
@@ -451,7 +451,7 @@ be added to the following table:
* - ``udp_parsing``
- ``drop``
- Traps packets dropped due to an error in the UDP header parsing.
- This packet trap could include checksum errorrs, an improper UDP
+ This packet trap could include checksum errors, an improper UDP
length detected (smaller than 8 bytes) or detection of header
truncation.
* - ``tcp_parsing``
diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst
index 948c8c44e233..8319f43b5933 100644
--- a/Documentation/networking/devlink/index.rst
+++ b/Documentation/networking/devlink/index.rst
@@ -84,6 +84,7 @@ parameters, info versions, and other features it supports.
i40e
ionic
ice
+ ixgbe
mlx4
mlx5
mlxsw
diff --git a/Documentation/networking/devlink/ixgbe.rst b/Documentation/networking/devlink/ixgbe.rst
new file mode 100644
index 000000000000..c27d1436c70e
--- /dev/null
+++ b/Documentation/networking/devlink/ixgbe.rst
@@ -0,0 +1,171 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+ixgbe devlink support
+=====================
+
+This document describes the devlink features implemented by the ``ixgbe``
+device driver.
+
+Info versions
+=============
+
+Any of the versions dealing with the security presented by ``devlink-info``
+is purely informational. Devlink does not use a secure channel to communicate
+with the device.
+
+The ``ixgbe`` driver reports the following versions
+
+.. list-table:: devlink info versions implemented
+ :widths: 5 5 5 90
+
+ * - Name
+ - Type
+ - Example
+ - Description
+ * - ``board.id``
+ - fixed
+ - H49289-000
+ - The Product Board Assembly (PBA) identifier of the board.
+ * - ``fw.undi``
+ - running
+ - 1.1937.0
+ - Version of the Option ROM containing the UEFI driver. The version is
+ reported in ``major.minor.patch`` format. The major version is
+ incremented whenever a major breaking change occurs, or when the
+ minor version would overflow. The minor version is incremented for
+ non-breaking changes and reset to 1 when the major version is
+ incremented. The patch version is normally 0 but is incremented when
+ a fix is delivered as a patch against an older base Option ROM.
+ * - ``fw.undi.srev``
+ - running
+ - 4
+ - Number indicating the security revision of the Option ROM.
+ * - ``fw.bundle_id``
+ - running
+ - 0x80000d0d
+ - Unique identifier of the firmware image file that was loaded onto
+ the device. Also referred to as the EETRACK identifier of the NVM.
+ * - ``fw.mgmt.api``
+ - running
+ - 1.5.1
+ - 3-digit version number (major.minor.patch) of the API exported over
+ the AdminQ by the management firmware. Used by the driver to
+ identify what commands are supported. Historical versions of the
+ kernel only displayed a 2-digit version number (major.minor).
+ * - ``fw.mgmt.build``
+ - running
+ - 0x305d955f
+ - Unique identifier of the source for the management firmware.
+ * - ``fw.mgmt.srev``
+ - running
+ - 3
+ - Number indicating the security revision of the firmware.
+ * - ``fw.psid.api``
+ - running
+ - 0.80
+ - Version defining the format of the flash contents.
+ * - ``fw.netlist``
+ - running
+ - 1.1.2000-6.7.0
+ - The version of the netlist module. This module defines the device's
+ Ethernet capabilities and default settings, and is used by the
+ management firmware as part of managing link and device
+ connectivity.
+ * - ``fw.netlist.build``
+ - running
+ - 0xee16ced7
+ - The first 4 bytes of the hash of the netlist module contents.
+
+Flash Update
+============
+
+The ``ixgbe`` driver implements support for flash update using the
+``devlink-flash`` interface. It supports updating the device flash using a
+combined flash image that contains the ``fw.mgmt``, ``fw.undi``, and
+``fw.netlist`` components.
+
+.. list-table:: List of supported overwrite modes
+ :widths: 5 95
+
+ * - Bits
+ - Behavior
+ * - ``DEVLINK_FLASH_OVERWRITE_SETTINGS``
+ - Do not preserve settings stored in the flash components being
+ updated. This includes overwriting the port configuration that
+ determines the number of physical functions the device will
+ initialize with.
+ * - ``DEVLINK_FLASH_OVERWRITE_SETTINGS`` and ``DEVLINK_FLASH_OVERWRITE_IDENTIFIERS``
+ - Do not preserve either settings or identifiers. Overwrite everything
+ in the flash with the contents from the provided image, without
+ performing any preservation. This includes overwriting device
+ identifying fields such as the MAC address, Vital product Data (VPD) area,
+ and device serial number. It is expected that this combination be used with an
+ image customized for the specific device.
+
+Reload
+======
+
+The ``ixgbe`` driver supports activating new firmware after a flash update
+using ``DEVLINK_CMD_RELOAD`` with the ``DEVLINK_RELOAD_ACTION_FW_ACTIVATE``
+action.
+
+.. code:: shell
+
+ $ devlink dev reload pci/0000:01:00.0 reload action fw_activate
+
+The new firmware is activated by issuing a device specific Embedded
+Management Processor reset which requests the device to reset and reload the
+EMP firmware image.
+
+The driver does not currently support reloading the driver via
+``DEVLINK_RELOAD_ACTION_DRIVER_REINIT``.
+
+Regions
+=======
+
+The ``ixgbe`` driver implements the following regions for accessing internal
+device data.
+
+.. list-table:: regions implemented
+ :widths: 15 85
+
+ * - Name
+ - Description
+ * - ``nvm-flash``
+ - The contents of the entire flash chip, sometimes referred to as
+ the device's Non Volatile Memory.
+ * - ``shadow-ram``
+ - The contents of the Shadow RAM, which is loaded from the beginning
+ of the flash. Although the contents are primarily from the flash,
+ this area also contains data generated during device boot which is
+ not stored in flash.
+ * - ``device-caps``
+ - The contents of the device firmware's capabilities buffer. Useful to
+ determine the current state and configuration of the device.
+
+Both the ``nvm-flash`` and ``shadow-ram`` regions can be accessed without a
+snapshot. The ``device-caps`` region requires a snapshot as the contents are
+sent by firmware and can't be split into separate reads.
+
+Users can request an immediate capture of a snapshot for all three regions
+via the ``DEVLINK_CMD_REGION_NEW`` command.
+
+.. code:: shell
+
+ $ devlink region show
+ pci/0000:01:00.0/nvm-flash: size 10485760 snapshot [] max 1
+ pci/0000:01:00.0/device-caps: size 4096 snapshot [] max 10
+
+ $ devlink region new pci/0000:01:00.0/nvm-flash snapshot 1
+
+ $ devlink region dump pci/0000:01:00.0/nvm-flash snapshot 1
+ 0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30
+ 0000000000000010 0000 0000 ffff ff04 0029 8c00 0028 8cc8
+ 0000000000000020 0016 0bb8 0016 1720 0000 0000 c00f 3ffc
+ 0000000000000030 bada cce5 bada cce5 bada cce5 bada cce5
+
+ $ devlink region read pci/0000:01:00.0/nvm-flash snapshot 1 address 0 length 16
+ 0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30
+
+ $ devlink region delete pci/0000:01:00.0/device-caps snapshot 1
diff --git a/Documentation/networking/devmem.rst b/Documentation/networking/devmem.rst
index eb678ca45496..a6cd7236bfbd 100644
--- a/Documentation/networking/devmem.rst
+++ b/Documentation/networking/devmem.rst
@@ -62,15 +62,15 @@ More Info
https://lore.kernel.org/netdev/20240831004313.3713467-1-almasrymina@google.com/
-Interface
-=========
+RX Interface
+============
Example
-------
-tools/testing/selftests/net/ncdevmem.c:do_server shows an example of setting up
-the RX path of this API.
+./tools/testing/selftests/drivers/net/hw/ncdevmem:do_server shows an example of
+setting up the RX path of this API.
NIC Setup
@@ -235,6 +235,148 @@ can be less than the tokens provided by the user in case of:
(a) an internal kernel leak bug.
(b) the user passed more than 1024 frags.
+TX Interface
+============
+
+
+Example
+-------
+
+./tools/testing/selftests/drivers/net/hw/ncdevmem:do_client shows an example of
+setting up the TX path of this API.
+
+
+NIC Setup
+---------
+
+The user must bind a TX dmabuf to a given NIC using the netlink API::
+
+ struct netdev_bind_tx_req *req = NULL;
+ struct netdev_bind_tx_rsp *rsp = NULL;
+ struct ynl_error yerr;
+
+ *ys = ynl_sock_create(&ynl_netdev_family, &yerr);
+
+ req = netdev_bind_tx_req_alloc();
+ netdev_bind_tx_req_set_ifindex(req, ifindex);
+ netdev_bind_tx_req_set_fd(req, dmabuf_fd);
+
+ rsp = netdev_bind_tx(*ys, req);
+
+ tx_dmabuf_id = rsp->id;
+
+
+The netlink API returns a dmabuf_id: a unique ID that refers to this dmabuf
+that has been bound.
+
+The user can unbind the dmabuf from the netdevice by closing the netlink socket
+that established the binding. We do this so that the binding is automatically
+unbound even if the userspace process crashes.
+
+Note that any reasonably well-behaved dmabuf from any exporter should work with
+devmem TCP, even if the dmabuf is not actually backed by devmem. An example of
+this is udmabuf, which wraps user memory (non-devmem) in a dmabuf.
+
+Socket Setup
+------------
+
+The user application must use MSG_ZEROCOPY flag when sending devmem TCP. Devmem
+cannot be copied by the kernel, so the semantics of the devmem TX are similar
+to the semantics of MSG_ZEROCOPY::
+
+ setsockopt(socket_fd, SOL_SOCKET, SO_ZEROCOPY, &opt, sizeof(opt));
+
+It is also recommended that the user binds the TX socket to the same interface
+the dma-buf has been bound to via SO_BINDTODEVICE::
+
+ setsockopt(socket_fd, SOL_SOCKET, SO_BINDTODEVICE, ifname, strlen(ifname) + 1);
+
+
+Sending data
+------------
+
+Devmem data is sent using the SCM_DEVMEM_DMABUF cmsg.
+
+The user should create a msghdr where,
+
+* iov_base is set to the offset into the dmabuf to start sending from
+* iov_len is set to the number of bytes to be sent from the dmabuf
+
+The user passes the dma-buf id to send from via the dmabuf_tx_cmsg.dmabuf_id.
+
+The example below sends 1024 bytes from offset 100 into the dmabuf, and 2048
+from offset 2000 into the dmabuf. The dmabuf to send from is tx_dmabuf_id::
+
+ char ctrl_data[CMSG_SPACE(sizeof(struct dmabuf_tx_cmsg))];
+ struct dmabuf_tx_cmsg ddmabuf;
+ struct msghdr msg = {};
+ struct cmsghdr *cmsg;
+ struct iovec iov[2];
+
+ iov[0].iov_base = (void*)100;
+ iov[0].iov_len = 1024;
+ iov[1].iov_base = (void*)2000;
+ iov[1].iov_len = 2048;
+
+ msg.msg_iov = iov;
+ msg.msg_iovlen = 2;
+
+ msg.msg_control = ctrl_data;
+ msg.msg_controllen = sizeof(ctrl_data);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_DEVMEM_DMABUF;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(struct dmabuf_tx_cmsg));
+
+ ddmabuf.dmabuf_id = tx_dmabuf_id;
+
+ *((struct dmabuf_tx_cmsg *)CMSG_DATA(cmsg)) = ddmabuf;
+
+ sendmsg(socket_fd, &msg, MSG_ZEROCOPY);
+
+
+Reusing TX dmabufs
+------------------
+
+Similar to MSG_ZEROCOPY with regular memory, the user should not modify the
+contents of the dma-buf while a send operation is in progress. This is because
+the kernel does not keep a copy of the dmabuf contents. Instead, the kernel
+will pin and send data from the buffer available to the userspace.
+
+Just as in MSG_ZEROCOPY, the kernel notifies the userspace of send completions
+using MSG_ERRQUEUE::
+
+ int64_t tstop = gettimeofday_ms() + waittime_ms;
+ char control[CMSG_SPACE(100)] = {};
+ struct sock_extended_err *serr;
+ struct msghdr msg = {};
+ struct cmsghdr *cm;
+ int retries = 10;
+ __u32 hi, lo;
+
+ msg.msg_control = control;
+ msg.msg_controllen = sizeof(control);
+
+ while (gettimeofday_ms() < tstop) {
+ if (!do_poll(fd)) continue;
+
+ ret = recvmsg(fd, &msg, MSG_ERRQUEUE);
+
+ for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
+ serr = (void *)CMSG_DATA(cm);
+
+ hi = serr->ee_data;
+ lo = serr->ee_info;
+
+ fprintf(stdout, "tx complete [%d,%d]\n", lo, hi);
+ }
+ }
+
+After the associated sendmsg has been completed, the dmabuf can be reused by
+the userspace.
+
+
Implementation & Caveats
========================
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index c64133d309bf..ac90b82f3ce9 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -48,7 +48,6 @@ Contents:
ax25
bonding
cdc_mbim
- dccp
dctcp
devmem
dns_resolver
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 5c63ab928b97..0f1251cce314 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -37,8 +37,8 @@ ip_no_pmtu_disc - INTEGER
Mode 3 is a hardened pmtu discover mode. The kernel will only
accept fragmentation-needed errors if the underlying protocol
can verify them besides a plain socket lookup. Current
- protocols for which pmtu events will be honored are TCP, SCTP
- and DCCP as they verify e.g. the sequence number or the
+ protocols for which pmtu events will be honored are TCP and
+ SCTP as they verify e.g. the sequence number or the
association. This mode should not be enabled globally but is
only intended to secure e.g. name servers in namespaces where
TCP path mtu must still work but path MTU information of other
@@ -735,7 +735,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables
automatic tuning of that socket's receive buffer size, in which
case this value is ignored.
- Default: between 131072 and 6MB, depending on RAM size.
+ Default: between 131072 and 32MB, depending on RAM size.
tcp_sack - BOOLEAN
Enable select acknowledgments (SACKS).
@@ -1099,7 +1099,7 @@ tcp_limit_output_bytes - INTEGER
limits the number of bytes on qdisc or device to reduce artificial
RTT/cwnd and reduce bufferbloat.
- Default: 1048576 (16 * 65536)
+ Default: 4194304 (4 MB)
tcp_challenge_ack_limit - INTEGER
Limits number of Challenge ACK sent per second, as recommended
diff --git a/Documentation/networking/net_cachelines/net_device.rst b/Documentation/networking/net_cachelines/net_device.rst
index 6327e689e8a8..c69cc89c958e 100644
--- a/Documentation/networking/net_cachelines/net_device.rst
+++ b/Documentation/networking/net_cachelines/net_device.rst
@@ -10,6 +10,7 @@ Type Name fastpath_tx_acce
=================================== =========================== =================== =================== ===================================================================================
unsigned_long:32 priv_flags read_mostly __dev_queue_xmit(tx)
unsigned_long:1 lltx read_mostly HARD_TX_LOCK,HARD_TX_TRYLOCK,HARD_TX_UNLOCK(tx)
+unsigned long:1 netmem_tx:1; read_mostly
char name[16]
struct netdev_name_node* name_node
struct dev_ifalias* ifalias
@@ -131,7 +132,7 @@ struct ref_tracker_dir refcnt_tracker
struct list_head link_watch_list
enum:8 reg_state
bool dismantle
-enum:16 rtnl_link_state
+bool rtnl_link_initilizing
bool needs_free_netdev
void*priv_destructor struct net_device
struct netpoll_info* npinfo read_mostly napi_poll/napi_poll_lock
diff --git a/Documentation/networking/net_cachelines/snmp.rst b/Documentation/networking/net_cachelines/snmp.rst
index bc96efc92cf5..bd44b3eebbef 100644
--- a/Documentation/networking/net_cachelines/snmp.rst
+++ b/Documentation/networking/net_cachelines/snmp.rst
@@ -37,6 +37,8 @@ unsigned_long LINUX_MIB_TIMEWAITKILLED
unsigned_long LINUX_MIB_PAWSACTIVEREJECTED
unsigned_long LINUX_MIB_PAWSESTABREJECTED
unsigned_long LINUX_MIB_TSECR_REJECTED
+unsigned_long LINUX_MIB_PAWS_OLD_ACK
+unsigned_long LINUX_MIB_PAWS_TW_REJECTED
unsigned_long LINUX_MIB_DELAYEDACKLOST
unsigned_long LINUX_MIB_LISTENOVERFLOWS
unsigned_long LINUX_MIB_LISTENDROPS
diff --git a/Documentation/networking/netdev-features.rst b/Documentation/networking/netdev-features.rst
index 5014f7cc1398..02bd7536fc0c 100644
--- a/Documentation/networking/netdev-features.rst
+++ b/Documentation/networking/netdev-features.rst
@@ -188,3 +188,8 @@ Redundancy) frames from one port to another in hardware.
This should be set for devices which duplicate outgoing HSR (High-availability
Seamless Redundancy) or PRP (Parallel Redundancy Protocol) tags automatically
frames in hardware.
+
+* netmem-tx
+
+This should be set for devices which support netmem TX. See
+Documentation/networking/netmem.rst
diff --git a/Documentation/networking/netdevices.rst b/Documentation/networking/netdevices.rst
index 6c2d8945f597..7ebb6c36482d 100644
--- a/Documentation/networking/netdevices.rst
+++ b/Documentation/networking/netdevices.rst
@@ -8,7 +8,7 @@ Network Devices, the Kernel, and You!
Introduction
============
The following is a random collection of documentation regarding
-network devices.
+network devices. It is intended for driver developers.
struct net_device lifetime rules
================================
@@ -314,13 +314,8 @@ napi->poll:
softirq
will be called with interrupts disabled by netconsole.
-struct netdev_queue_mgmt_ops synchronization rules
-==================================================
-
-All queue management ndo callbacks are holding netdev instance lock.
-
-RTNL and netdev instance lock
-=============================
+netdev instance lock
+====================
Historically, all networking control operations were protected by a single
global lock known as ``rtnl_lock``. There is an ongoing effort to replace this
@@ -328,30 +323,76 @@ global lock with separate locks for each network namespace. Additionally,
properties of individual netdev are increasingly protected by per-netdev locks.
For device drivers that implement shaping or queue management APIs, all control
-operations will be performed under the netdev instance lock. Currently, this
-instance lock is acquired within the context of ``rtnl_lock``. The drivers
-can also explicitly request instance lock to be acquired via
-``request_ops_lock``. In the future, there will be an option for individual
+operations will be performed under the netdev instance lock.
+Drivers can also explicitly request instance lock to be held during ops
+by setting ``request_ops_lock`` to true. Code comments and docs refer
+to drivers which have ops called under the instance lock as "ops locked".
+See also the documentation of the ``lock`` member of struct net_device.
+
+In the future, there will be an option for individual
drivers to opt out of using ``rtnl_lock`` and instead perform their control
operations directly under the netdev instance lock.
Devices drivers are encouraged to rely on the instance lock where possible.
For the (mostly software) drivers that need to interact with the core stack,
-there are two sets of interfaces: ``dev_xxx`` and ``netif_xxx`` (e.g.,
-``dev_set_mtu`` and ``netif_set_mtu``). The ``dev_xxx`` functions handle
-acquiring the instance lock themselves, while the ``netif_xxx`` functions
-assume that the driver has already acquired the instance lock.
+there are two sets of interfaces: ``dev_xxx``/``netdev_xxx`` and ``netif_xxx``
+(e.g., ``dev_set_mtu`` and ``netif_set_mtu``). The ``dev_xxx``/``netdev_xxx``
+functions handle acquiring the instance lock themselves, while the
+``netif_xxx`` functions assume that the driver has already acquired
+the instance lock.
+
+struct net_device_ops
+---------------------
+
+``ndos`` are called without holding the instance lock for most drivers.
+
+"Ops locked" drivers will have most of the ``ndos`` invoked under
+the instance lock.
+
+struct ethtool_ops
+------------------
+
+Similarly to ``ndos`` the instance lock is only held for select drivers.
+For "ops locked" drivers all ethtool ops without exceptions should
+be called under the instance lock.
+
+struct netdev_stat_ops
+----------------------
+
+"qstat" ops are invoked under the instance lock for "ops locked" drivers,
+and under rtnl_lock for all other drivers.
+
+struct net_shaper_ops
+---------------------
+
+All net shaper callbacks are invoked while holding the netdev instance
+lock. ``rtnl_lock`` may or may not be held.
+
+Note that supporting net shapers automatically enables "ops locking".
+
+struct netdev_queue_mgmt_ops
+----------------------------
+
+All queue management callbacks are invoked while holding the netdev instance
+lock. ``rtnl_lock`` may or may not be held.
+
+Note that supporting struct netdev_queue_mgmt_ops automatically enables
+"ops locking".
Notifiers and netdev instance lock
-==================================
+----------------------------------
For device drivers that implement shaping or queue management APIs,
some of the notifiers (``enum netdev_cmd``) are running under the netdev
instance lock.
+The following netdev notifiers are always run under the instance lock:
+* ``NETDEV_XDP_FEAT_CHANGE``
+
For devices with locked ops, currently only the following notifiers are
running under the lock:
+* ``NETDEV_CHANGE``
* ``NETDEV_REGISTER``
* ``NETDEV_UP``
diff --git a/Documentation/networking/netmem.rst b/Documentation/networking/netmem.rst
index 7de21ddb5412..b63aded46337 100644
--- a/Documentation/networking/netmem.rst
+++ b/Documentation/networking/netmem.rst
@@ -19,8 +19,8 @@ Benefits of Netmem :
* Simplified Development: Drivers interact with a consistent API,
regardless of the underlying memory implementation.
-Driver Requirements
-===================
+Driver RX Requirements
+======================
1. The driver must support page_pool.
@@ -77,3 +77,22 @@ Driver Requirements
that purpose, but be mindful that some netmem types might have longer
circulation times, such as when userspace holds a reference in zerocopy
scenarios.
+
+Driver TX Requirements
+======================
+
+1. The Driver must not pass the netmem dma_addr to any of the dma-mapping APIs
+ directly. This is because netmem dma_addrs may come from a source like
+ dma-buf that is not compatible with the dma-mapping APIs.
+
+ Helpers like netmem_dma_unmap_page_attrs() & netmem_dma_unmap_addr_set()
+ should be used in lieu of dma_unmap_page[_attrs](), dma_unmap_addr_set().
+ The netmem variants will handle netmem dma_addrs correctly regardless of the
+ source, delegating to the dma-mapping APIs when appropriate.
+
+ Not all dma-mapping APIs have netmem equivalents at the moment. If your
+ driver relies on a missing netmem API, feel free to add and propose to
+ netdev@, or reach out to the maintainers and/or almasrymina@google.com for
+ help adding the netmem API.
+
+2. Driver should declare support by setting `netdev->netmem_tx = true`
diff --git a/Documentation/networking/rds.rst b/Documentation/networking/rds.rst
index 498395f5fbcb..41b0a6182fe4 100644
--- a/Documentation/networking/rds.rst
+++ b/Documentation/networking/rds.rst
@@ -265,7 +265,7 @@ RDS Protocol
The bitmaps are allocated as connections are brought up. This
avoids allocation in the interrupt handling path which queues
- sages on sockets. The dense bitmaps let transports send the
+ messages on sockets. The dense bitmaps let transports send the
entire bitmap on any bitmap change reasonably efficiently. This
is much easier to implement than some finer-grained
communication of per-port congestion. The sender does a very
@@ -373,7 +373,7 @@ The recv path
- validate header checksum
- copy header to rds_ib_incoming struct if start of a new datagram
- add to ibinc's fraglist
- - if competed datagram:
+ - if completed datagram:
- update cong map if datagram was cong update
- call rds_recv_incoming() otherwise
- note if ack is required
@@ -415,7 +415,7 @@ Multipath RDS (mprds)
I/O workqs and reconnect threads are driven from the rds_conn_path.
Transports such as TCP that are multipath capable may then set up a
TCP socket per rds_conn_path, and this is managed by the transport via
- the transport privatee cp_transport_data pointer.
+ the transport private cp_transport_data pointer.
Transports announce themselves as multipath capable by setting the
t_mp_capable bit during registration with the rds core module. When the
@@ -430,7 +430,7 @@ Multipath RDS (mprds)
This is done by sending out a control packet exchange before the
first data packet. The control packet exchange must have completed
prior to outgoing hash completion in rds_sendmsg() when the transport
- is mutlipath capable.
+ is multipath capable.
The control packet is an RDS ping packet (i.e., packet to rds dest
port 0) with the ping packet having a rds extension header option of
diff --git a/Documentation/networking/rxrpc.rst b/Documentation/networking/rxrpc.rst
index e807e18ba32a..d63e3e27dd06 100644
--- a/Documentation/networking/rxrpc.rst
+++ b/Documentation/networking/rxrpc.rst
@@ -1062,30 +1062,6 @@ The kernel interface functions are as follows:
first function to change. Note that this must be called in TASK_RUNNING
state.
- (#) Get remote client epoch::
-
- u32 rxrpc_kernel_get_epoch(struct socket *sock,
- struct rxrpc_call *call)
-
- This allows the epoch that's contained in packets of an incoming client
- call to be queried. This value is returned. The function always
- successful if the call is still in progress. It shouldn't be called once
- the call has expired. Note that calling this on a local client call only
- returns the local epoch.
-
- This value can be used to determine if the remote client has been
- restarted as it shouldn't change otherwise.
-
- (#) Set the maximum lifespan on a call::
-
- void rxrpc_kernel_set_max_life(struct socket *sock,
- struct rxrpc_call *call,
- unsigned long hard_timeout)
-
- This sets the maximum lifespan on a call to hard_timeout (which is in
- jiffies). In the event of the timeout occurring, the call will be
- aborted and -ETIME or -ETIMEDOUT will be returned.
-
(#) Apply the RXRPC_MIN_SECURITY_LEVEL sockopt to a socket from within in the
kernel::
@@ -1172,3 +1148,18 @@ adjusted through sysctls in /proc/net/rxrpc/:
header plus exactly 1412 bytes of data. The terminal packet must contain
a four byte header plus any amount of data. In any event, a jumbo packet
may not exceed rxrpc_rx_mtu in size.
+
+
+API Function Reference
+======================
+
+.. kernel-doc:: net/rxrpc/af_rxrpc.c
+.. kernel-doc:: net/rxrpc/call_object.c
+.. kernel-doc:: net/rxrpc/key.c
+.. kernel-doc:: net/rxrpc/oob.c
+.. kernel-doc:: net/rxrpc/peer_object.c
+.. kernel-doc:: net/rxrpc/recvmsg.c
+.. kernel-doc:: net/rxrpc/rxgk.c
+.. kernel-doc:: net/rxrpc/rxkad.c
+.. kernel-doc:: net/rxrpc/sendmsg.c
+.. kernel-doc:: net/rxrpc/server_key.c
diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst
index b8fef8101176..7aabead90648 100644
--- a/Documentation/networking/timestamping.rst
+++ b/Documentation/networking/timestamping.rst
@@ -811,11 +811,9 @@ Documentation/devicetree/bindings/ptp/timestamper.txt for more details.
3.2.4 Other caveats for MAC drivers
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Stacked PHCs, especially DSA (but not only) - since that doesn't require any
-modification to MAC drivers, so it is more difficult to ensure correctness of
-all possible code paths - is that they uncover bugs which were impossible to
-trigger before the existence of stacked PTP clocks. One example has to do with
-this line of code, already presented earlier::
+The use of stacked PHCs may uncover MAC driver bugs which were impossible to
+trigger without them. One example has to do with this line of code, already
+presented earlier::
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
diff --git a/Documentation/networking/tproxy.rst b/Documentation/networking/tproxy.rst
index 7f7c1ff6f159..75e4990cc3db 100644
--- a/Documentation/networking/tproxy.rst
+++ b/Documentation/networking/tproxy.rst
@@ -69,9 +69,9 @@ add rules like this to the iptables ruleset above::
# iptables -t mangle -A PREROUTING -p tcp --dport 80 -j TPROXY \
--tproxy-mark 0x1/0x1 --on-port 50080
-Or the following rule to nft:
+Or the following rule to nft::
-# nft add rule filter divert tcp dport 80 tproxy to :50080 meta mark set 1 accept
+ # nft add rule filter divert tcp dport 80 tproxy to :50080 meta mark set 1 accept
Note that for this to work you'll have to modify the proxy to enable (SOL_IP,
IP_TRANSPARENT) for the listening socket.
diff --git a/Documentation/networking/xfrm_device.rst b/Documentation/networking/xfrm_device.rst
index 7f24c09f2694..122204da0fff 100644
--- a/Documentation/networking/xfrm_device.rst
+++ b/Documentation/networking/xfrm_device.rst
@@ -65,9 +65,13 @@ Callbacks to implement
/* from include/linux/netdevice.h */
struct xfrmdev_ops {
/* Crypto and Packet offload callbacks */
- int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack);
- void (*xdo_dev_state_delete) (struct xfrm_state *x);
- void (*xdo_dev_state_free) (struct xfrm_state *x);
+ int (*xdo_dev_state_add)(struct net_device *dev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack);
+ void (*xdo_dev_state_delete)(struct net_device *dev,
+ struct xfrm_state *x);
+ void (*xdo_dev_state_free)(struct net_device *dev,
+ struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
diff --git a/Documentation/power/energy-model.rst b/Documentation/power/energy-model.rst
index ada4938c37e5..cbdf7520aaa6 100644
--- a/Documentation/power/energy-model.rst
+++ b/Documentation/power/energy-model.rst
@@ -230,7 +230,7 @@ Drivers must provide a pointer to the allocated and initialized new EM
and will be visible to other sub-systems in the kernel (thermal, powercap).
The main design goal for this API is to be fast and avoid extra calculations
or memory allocations at runtime. When pre-computed EMs are available in the
-device driver, than it should be possible to simply re-use them with low
+device driver, then it should be possible to simply reuse them with low
performance overhead.
In order to free the EM, provided earlier by the driver (e.g. when the module
@@ -381,17 +381,17 @@ up periodically to check the temperature and modify the EM data::
26 rcu_read_unlock();
27
28 /* Calculate 'cost' values for EAS */
- 29 ret = em_dev_compute_costs(dev, table, pd->nr_perf_states);
+ 29 ret = em_dev_compute_costs(dev, new_table, pd->nr_perf_states);
30 if (ret) {
31 dev_warn(dev, "EM: compute costs failed %d\n", ret);
- 32 em_free_table(em_table);
+ 32 em_table_free(em_table);
33 return;
34 }
35
36 ret = em_dev_update_perf_domain(dev, em_table);
37 if (ret) {
38 dev_warn(dev, "EM: update failed %d\n", ret);
- 39 em_free_table(em_table);
+ 39 em_table_free(em_table);
40 return;
41 }
42
diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst
index 12f429359a82..63344bea8393 100644
--- a/Documentation/power/runtime_pm.rst
+++ b/Documentation/power/runtime_pm.rst
@@ -154,7 +154,7 @@ suspending the device are satisfied) and to queue up a suspend request for the
device in that case. If there is no idle callback, or if the callback returns
0, then the PM core will attempt to carry out a runtime suspend of the device,
also respecting devices configured for autosuspend. In essence this means a
-call to __pm_runtime_autosuspend() (do note that drivers needs to update the
+call to pm_runtime_autosuspend() (do note that drivers needs to update the
device last busy mark, pm_runtime_mark_last_busy(), to control the delay under
this circumstance). To prevent this (for example, if the callback routine has
started a delayed suspend), the routine must return a non-zero value. Negative
diff --git a/Documentation/process/1.Intro.rst b/Documentation/process/1.Intro.rst
index c3d0270bbfb3..25ca49f7ae4d 100644
--- a/Documentation/process/1.Intro.rst
+++ b/Documentation/process/1.Intro.rst
@@ -251,12 +251,12 @@ there is no prospect of a migration to version 3 of the GPL in the
foreseeable future.
It is imperative that all code contributed to the kernel be legitimately
-free software. For that reason, code from anonymous (or pseudonymous)
-contributors will not be accepted. All contributors are required to "sign
-off" on their code, stating that the code can be distributed with the
-kernel under the GPL. Code which has not been licensed as free software by
-its owner, or which risks creating copyright-related problems for the
-kernel (such as code which derives from reverse-engineering efforts lacking
+free software. For that reason, code from contributors without a known
+identity or anonymous contributors will not be accepted. All contributors are
+required to "sign off" on their code, stating that the code can be distributed
+with the kernel under the GPL. Code which has not been licensed as free
+software by its owner, or which risks creating copyright-related problems for
+the kernel (such as code which derives from reverse-engineering efforts lacking
proper safeguards) cannot be contributed.
Questions about copyright-related issues are common on Linux development
diff --git a/Documentation/process/adding-syscalls.rst b/Documentation/process/adding-syscalls.rst
index 906c47f1a9e5..fc0b0bbcd34d 100644
--- a/Documentation/process/adding-syscalls.rst
+++ b/Documentation/process/adding-syscalls.rst
@@ -248,6 +248,52 @@ To summarize, you need a commit that includes:
- fallback stub in ``kernel/sys_ni.c``
+.. _syscall_generic_6_11:
+
+Since 6.11
+~~~~~~~~~~
+
+Starting with kernel version 6.11, general system call implementation for the
+following architectures no longer requires modifications to
+``include/uapi/asm-generic/unistd.h``:
+
+ - arc
+ - arm64
+ - csky
+ - hexagon
+ - loongarch
+ - nios2
+ - openrisc
+ - riscv
+
+Instead, you need to update ``scripts/syscall.tbl`` and, if applicable, adjust
+``arch/*/kernel/Makefile.syscalls``.
+
+As ``scripts/syscall.tbl`` serves as a common syscall table across multiple
+architectures, a new entry is required in this table::
+
+ 468 common xyzzy sys_xyzzy
+
+Note that adding an entry to ``scripts/syscall.tbl`` with the "common" ABI
+also affects all architectures that share this table. For more limited or
+architecture-specific changes, consider using an architecture-specific ABI or
+defining a new one.
+
+If a new ABI, say ``xyz``, is introduced, the corresponding updates should be
+made to ``arch/*/kernel/Makefile.syscalls`` as well::
+
+ syscall_abis_{32,64} += xyz (...)
+
+To summarize, you need a commit that includes:
+
+ - ``CONFIG`` option for the new function, normally in ``init/Kconfig``
+ - ``SYSCALL_DEFINEn(xyzzy, ...)`` for the entry point
+ - corresponding prototype in ``include/linux/syscalls.h``
+ - new entry in ``scripts/syscall.tbl``
+ - (if needed) Makefile updates in ``arch/*/kernel/Makefile.syscalls``
+ - fallback stub in ``kernel/sys_ni.c``
+
+
x86 System Call Implementation
------------------------------
@@ -353,6 +399,41 @@ To summarize, you need:
``include/uapi/asm-generic/unistd.h``
+Since 6.11
+~~~~~~~~~~
+
+This applies to all the architectures listed in :ref:`Since 6.11<syscall_generic_6_11>`
+under "Generic System Call Implementation", except arm64. See
+:ref:`Compatibility System Calls (arm64)<compat_arm64>` for more information.
+
+You need to extend the entry in ``scripts/syscall.tbl`` with an extra column
+to indicate that a 32-bit userspace program running on a 64-bit kernel should
+hit the compat entry point::
+
+ 468 common xyzzy sys_xyzzy compat_sys_xyzzy
+
+To summarize, you need:
+
+ - ``COMPAT_SYSCALL_DEFINEn(xyzzy, ...)`` for the compat entry point
+ - corresponding prototype in ``include/linux/compat.h``
+ - modification of the entry in ``scripts/syscall.tbl`` to include an extra
+ "compat" column
+ - (if needed) 32-bit mapping struct in ``include/linux/compat.h``
+
+
+.. _compat_arm64:
+
+Compatibility System Calls (arm64)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+On arm64, there is a dedicated syscall table for compatibility system calls
+targeting 32-bit (AArch32) userspace: ``arch/arm64/tools/syscall_32.tbl``.
+You need to add an additional line to this table specifying the compat
+entry point::
+
+ 468 common xyzzy sys_xyzzy compat_sys_xyzzy
+
+
Compatibility System Calls (x86)
--------------------------------
@@ -575,3 +656,6 @@ References and Sources
- Recommendation from Linus Torvalds that x32 system calls should prefer
compatibility with 64-bit versions rather than 32-bit versions:
https://lore.kernel.org/r/CA+55aFxfmwfB7jbbrXxa=K7VBYPfAvmu3XOkGrLbB1UFjX1+Ew@mail.gmail.com
+ - Patch series revising system call table infrastructure to use
+ scripts/syscall.tbl across multiple architectures:
+ https://lore.kernel.org/lkml/20240704143611.2979589-1-arnd@kernel.org
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index d564362773b5..b14bd5b7cbc9 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -29,13 +29,13 @@ you probably needn't concern yourself with pcmciautils.
====================== =============== ========================================
Program Minimal version Command to check the version
====================== =============== ========================================
-GNU C 5.1 gcc --version
+GNU C 8.1 gcc --version
Clang/LLVM (optional) 13.0.1 clang --version
Rust (optional) 1.78.0 rustc --version
bindgen (optional) 0.65.1 bindgen --version
GNU make 4.0 make --version
bash 4.2 bash --version
-binutils 2.25 ld -v
+binutils 2.30 ld -v
flex 2.5.35 flex --version
bison 2.0 bison --version
pahole 1.16 pahole --version
@@ -116,7 +116,7 @@ Bash 4.2 or newer is needed.
Binutils
--------
-Binutils 2.25 or newer is needed to build the kernel.
+Binutils 2.30 or newer is needed to build the kernel.
pkg-config
----------
diff --git a/Documentation/process/debugging/driver_development_debugging_guide.rst b/Documentation/process/debugging/driver_development_debugging_guide.rst
index 46becda8764b..aca08f457793 100644
--- a/Documentation/process/debugging/driver_development_debugging_guide.rst
+++ b/Documentation/process/debugging/driver_development_debugging_guide.rst
@@ -155,7 +155,7 @@ The general idea is:
``my_variable``
- Clean up the directory when removing the device
- (``debugfs_remove_recursive(parent);``)
+ (``debugfs_remove(parent);``)
For the full documentation see :doc:`/filesystems/debugfs`.
diff --git a/Documentation/process/debugging/gdb-kernel-debugging.rst b/Documentation/process/debugging/gdb-kernel-debugging.rst
index 895285c037c7..9475c759c722 100644
--- a/Documentation/process/debugging/gdb-kernel-debugging.rst
+++ b/Documentation/process/debugging/gdb-kernel-debugging.rst
@@ -127,35 +127,31 @@ Examples of using the Linux-provided gdb helpers
- Make use of the per-cpu function for the current or a specified CPU::
- (gdb) p $lx_per_cpu("runqueues").nr_running
+ (gdb) p $lx_per_cpu(runqueues).nr_running
$3 = 1
- (gdb) p $lx_per_cpu("runqueues", 2).nr_running
+ (gdb) p $lx_per_cpu(runqueues, 2).nr_running
$4 = 0
- Dig into hrtimers using the container_of helper::
- (gdb) set $next = $lx_per_cpu("hrtimer_bases").clock_base[0].active.next
- (gdb) p *$container_of($next, "struct hrtimer", "node")
+ (gdb) set $leftmost = $lx_per_cpu(hrtimer_bases).clock_base[0].active.rb_root.rb_leftmost
+ (gdb) p *$container_of($leftmost, "struct hrtimer", "node")
$5 = {
node = {
node = {
- __rb_parent_color = 18446612133355256072,
- rb_right = 0x0 <irq_stack_union>,
- rb_left = 0x0 <irq_stack_union>
+ __rb_parent_color = 18446612686384860673,
+ rb_right = 0xffff888231da8b00,
+ rb_left = 0x0
},
- expires = {
- tv64 = 1835268000000
- }
+ expires = 1228461000000
},
- _softexpires = {
- tv64 = 1835268000000
- },
- function = 0xffffffff81078232 <tick_sched_timer>,
- base = 0xffff88003fd0d6f0,
- state = 1,
- start_pid = 0,
- start_site = 0xffffffff81055c1f <hrtimer_start_range_ns+20>,
- start_comm = "swapper/2\000\000\000\000\000\000"
+ _softexpires = 1228461000000,
+ function = 0xffffffff8137ab20 <tick_nohz_handler>,
+ base = 0xffff888231d9b4c0,
+ state = 1 '\001',
+ is_rel = 0 '\000',
+ is_soft = 0 '\000',
+ is_hard = 1 '\001'
}
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
index 0e19d2f0d6bb..da6bf0f6d01e 100644
--- a/Documentation/process/embargoed-hardware-issues.rst
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -290,7 +290,6 @@ an involved disclosed party. The current ambassadors list:
AMD Tom Lendacky <thomas.lendacky@amd.com>
Ampere Darren Hart <darren@os.amperecomputing.com>
ARM Catalin Marinas <catalin.marinas@arm.com>
- IBM Power Michael Ellerman <ellerman@au.ibm.com>
IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
Intel Tony Luck <tony.luck@intel.com>
Qualcomm Trilok Soni <quic_tsoni@quicinc.com>
diff --git a/Documentation/rust/coding-guidelines.rst b/Documentation/rust/coding-guidelines.rst
index 27f2a7bb5a4a..6ff9e754755d 100644
--- a/Documentation/rust/coding-guidelines.rst
+++ b/Documentation/rust/coding-guidelines.rst
@@ -85,6 +85,18 @@ written after the documentation, e.g.:
// ...
}
+This applies to both public and private items. This increases consistency with
+public items, allows changes to visibility with less changes involved and will
+allow us to potentially generate the documentation for private items as well.
+In other words, if documentation is written for a private item, then ``///``
+should still be used. For instance:
+
+.. code-block:: rust
+
+ /// My private function.
+ // TODO: ...
+ fn f() {}
+
One special kind of comments are the ``// SAFETY:`` comments. These must appear
before every ``unsafe`` block, and they explain why the code inside the block is
correct/sound, i.e. why it cannot trigger undefined behavior in any case, e.g.:
@@ -191,6 +203,23 @@ or:
/// [`struct mutex`]: srctree/include/linux/mutex.h
+C FFI types
+-----------
+
+Rust kernel code refers to C types, such as ``int``, using type aliases such as
+``c_int``, which are readily available from the ``kernel`` prelude. Please do
+not use the aliases from ``core::ffi`` -- they may not map to the correct types.
+
+These aliases should generally be referred directly by their identifier, i.e.
+as a single segment path. For instance:
+
+.. code-block:: rust
+
+ fn f(p: *const c_char) -> c_int {
+ // ...
+ }
+
+
Naming
------
diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst
index 6d2607870ba4..155f7107329a 100644
--- a/Documentation/rust/quick-start.rst
+++ b/Documentation/rust/quick-start.rst
@@ -90,15 +90,53 @@ they should generally work out of the box, e.g.::
Ubuntu
******
-Ubuntu LTS and non-LTS (interim) releases provide recent Rust releases and thus
-they should generally work out of the box, e.g.::
+25.04
+~~~~~
+
+The latest Ubuntu releases provide recent Rust releases and thus they should
+generally work out of the box, e.g.::
+
+ apt install rustc rust-src bindgen rustfmt rust-clippy
+
+In addition, ``RUST_LIB_SRC`` needs to be set, e.g.::
+
+ RUST_LIB_SRC=/usr/src/rustc-$(rustc --version | cut -d' ' -f2)/library
+
+For convenience, ``RUST_LIB_SRC`` can be exported to the global environment.
- apt install rustc-1.80 rust-1.80-src bindgen-0.65 rustfmt-1.80 rust-1.80-clippy
+
+24.04 LTS and older
+~~~~~~~~~~~~~~~~~~~
+
+Though Ubuntu 24.04 LTS and older versions still provide recent Rust
+releases, they require some additional configuration to be set, using
+the versioned packages, e.g.::
+
+ apt install rustc-1.80 rust-1.80-src bindgen-0.65 rustfmt-1.80 \
+ rust-1.80-clippy
+ ln -s /usr/lib/rust-1.80/bin/rustfmt /usr/bin/rustfmt-1.80
+ ln -s /usr/lib/rust-1.80/bin/clippy-driver /usr/bin/clippy-driver-1.80
+
+None of these packages set their tools as defaults; therefore they should be
+specified explicitly, e.g.::
+
+ make LLVM=1 RUSTC=rustc-1.80 RUSTDOC=rustdoc-1.80 RUSTFMT=rustfmt-1.80 \
+ CLIPPY_DRIVER=clippy-driver-1.80 BINDGEN=bindgen-0.65
+
+Alternatively, modify the ``PATH`` variable to place the Rust 1.80 binaries
+first and set ``bindgen`` as the default, e.g.::
+
+ PATH=/usr/lib/rust-1.80/bin:$PATH
+ update-alternatives --install /usr/bin/bindgen bindgen \
+ /usr/bin/bindgen-0.65 100
+ update-alternatives --set bindgen /usr/bin/bindgen-0.65
``RUST_LIB_SRC`` needs to be set when using the versioned packages, e.g.::
RUST_LIB_SRC=/usr/src/rustc-$(rustc-1.80 --version | cut -d' ' -f2)/library
+For convenience, ``RUST_LIB_SRC`` can be exported to the global environment.
+
In addition, ``bindgen-0.65`` is available in newer releases (24.04 LTS and
24.10), but it may not be available in older ones (20.04 LTS and 22.04 LTS),
thus ``bindgen`` may need to be built manually (please see below).
diff --git a/Documentation/rust/testing.rst b/Documentation/rust/testing.rst
index f692494f7b74..f43cb77bcc69 100644
--- a/Documentation/rust/testing.rst
+++ b/Documentation/rust/testing.rst
@@ -133,13 +133,85 @@ please see:
The ``#[test]`` tests
---------------------
-Additionally, there are the ``#[test]`` tests. These can be run using the
-``rusttest`` Make target::
+Additionally, there are the ``#[test]`` tests. Like for documentation tests,
+these are also fairly similar to what you would expect from userspace, and they
+are also mapped to KUnit.
+
+These tests are introduced by the ``kunit_tests`` procedural macro, which takes
+the name of the test suite as an argument.
+
+For instance, assume we want to test the function ``f`` from the documentation
+tests section. We could write, in the same file where we have our function:
+
+.. code-block:: rust
+
+ #[kunit_tests(rust_kernel_mymod)]
+ mod tests {
+ use super::*;
+
+ #[test]
+ fn test_f() {
+ assert_eq!(f(10, 20), 30);
+ }
+ }
+
+And if we run it, the kernel log would look like::
+
+ KTAP version 1
+ # Subtest: rust_kernel_mymod
+ # speed: normal
+ 1..1
+ # test_f.speed: normal
+ ok 1 test_f
+ ok 1 rust_kernel_mymod
+
+Like documentation tests, the ``assert!`` and ``assert_eq!`` macros are mapped
+back to KUnit and do not panic. Similarly, the
+`? <https://doc.rust-lang.org/reference/expressions/operator-expr.html#the-question-mark-operator>`_
+operator is supported, i.e. the test functions may return either nothing (i.e.
+the unit type ``()``) or ``Result`` (i.e. any ``Result<T, E>``). For instance:
+
+.. code-block:: rust
+
+ #[kunit_tests(rust_kernel_mymod)]
+ mod tests {
+ use super::*;
+
+ #[test]
+ fn test_g() -> Result {
+ let x = g()?;
+ assert_eq!(x, 30);
+ Ok(())
+ }
+ }
+
+If we run the test and the call to ``g`` fails, then the kernel log would show::
+
+ KTAP version 1
+ # Subtest: rust_kernel_mymod
+ # speed: normal
+ 1..1
+ # test_g: ASSERTION FAILED at rust/kernel/lib.rs:335
+ Expected is_test_result_ok(test_g()) to be true, but is false
+ # test_g.speed: normal
+ not ok 1 test_g
+ not ok 1 rust_kernel_mymod
+
+If a ``#[test]`` test could be useful as an example for the user, then please
+use a documentation test instead. Even edge cases of an API, e.g. error or
+boundary cases, can be interesting to show in examples.
+
+The ``rusttest`` host tests
+---------------------------
+
+These are userspace tests that can be built and run in the host (i.e. the one
+that performs the kernel build) using the ``rusttest`` Make target::
make LLVM=1 rusttest
-This requires the kernel ``.config``. It runs the ``#[test]`` tests on the host
-(currently) and thus is fairly limited in what these tests can test.
+This requires the kernel ``.config``.
+
+Currently, they are mostly used for testing the ``macros`` crate's examples.
The Kselftests
--------------
diff --git a/Documentation/scheduler/sched-ext.rst b/Documentation/scheduler/sched-ext.rst
index 0b2654e2164b..a1869c38046e 100644
--- a/Documentation/scheduler/sched-ext.rst
+++ b/Documentation/scheduler/sched-ext.rst
@@ -1,3 +1,5 @@
+.. _sched-ext:
+
==========================
Extensible Scheduler Class
==========================
@@ -47,8 +49,8 @@ options should be enabled to use sched_ext:
sched_ext is used only when the BPF scheduler is loaded and running.
If a task explicitly sets its scheduling policy to ``SCHED_EXT``, it will be
-treated as ``SCHED_NORMAL`` and scheduled by CFS until the BPF scheduler is
-loaded.
+treated as ``SCHED_NORMAL`` and scheduled by the fair-class scheduler until the
+BPF scheduler is loaded.
When the BPF scheduler is loaded and ``SCX_OPS_SWITCH_PARTIAL`` is not set
in ``ops->flags``, all ``SCHED_NORMAL``, ``SCHED_BATCH``, ``SCHED_IDLE``, and
@@ -57,11 +59,11 @@ in ``ops->flags``, all ``SCHED_NORMAL``, ``SCHED_BATCH``, ``SCHED_IDLE``, and
However, when the BPF scheduler is loaded and ``SCX_OPS_SWITCH_PARTIAL`` is
set in ``ops->flags``, only tasks with the ``SCHED_EXT`` policy are scheduled
by sched_ext, while tasks with ``SCHED_NORMAL``, ``SCHED_BATCH`` and
-``SCHED_IDLE`` policies are scheduled by CFS.
+``SCHED_IDLE`` policies are scheduled by the fair-class scheduler.
Terminating the sched_ext scheduler program, triggering `SysRq-S`, or
detection of any internal error including stalled runnable tasks aborts the
-BPF scheduler and reverts all tasks back to CFS.
+BPF scheduler and reverts all tasks back to the fair-class scheduler.
.. code-block:: none
@@ -197,8 +199,8 @@ Dispatch Queues
To match the impedance between the scheduler core and the BPF scheduler,
sched_ext uses DSQs (dispatch queues) which can operate as both a FIFO and a
priority queue. By default, there is one global FIFO (``SCX_DSQ_GLOBAL``),
-and one local dsq per CPU (``SCX_DSQ_LOCAL``). The BPF scheduler can manage
-an arbitrary number of dsq's using ``scx_bpf_create_dsq()`` and
+and one local DSQ per CPU (``SCX_DSQ_LOCAL``). The BPF scheduler can manage
+an arbitrary number of DSQs using ``scx_bpf_create_dsq()`` and
``scx_bpf_destroy_dsq()``.
A CPU always executes a task from its local DSQ. A task is "inserted" into a
diff --git a/Documentation/scheduler/sched-stats.rst b/Documentation/scheduler/sched-stats.rst
index 08b6bc9a315c..d82e7d2b54f0 100644
--- a/Documentation/scheduler/sched-stats.rst
+++ b/Documentation/scheduler/sched-stats.rst
@@ -135,7 +135,7 @@ of idleness (busy, idle and newly idle):
cpu was idle but no busier group was found
23) # of times in this domain sched_balance_rq() was called when the
- was just becoming idle
+ cpu was just becoming idle
24) # of times in this domain sched_balance_rq() checked but found the
load did not require balancing when the cpu was just becoming idle
25) # of times in this domain sched_balance_rq() tried to move one or more
diff --git a/Documentation/scsi/scsi_mid_low_api.rst b/Documentation/scsi/scsi_mid_low_api.rst
index 3cd6dce98e74..3ac4c7fafb55 100644
--- a/Documentation/scsi/scsi_mid_low_api.rst
+++ b/Documentation/scsi/scsi_mid_low_api.rst
@@ -37,7 +37,7 @@ ISA adapters).]
The SCSI mid level isolates an LLD from other layers such as the SCSI
upper layer drivers and the block layer.
-This version of the document roughly matches linux kernel version 2.6.8 .
+This version of the document roughly matches Linux kernel version 2.6.8 .
Documentation
=============
@@ -48,7 +48,7 @@ found in that directory. A more recent copy of this document may be found
at https://docs.kernel.org/scsi/scsi_mid_low_api.html. Many LLDs are
documented in Documentation/scsi (e.g. aic7xxx.rst). The SCSI mid-level is
briefly described in scsi.rst which contains a URL to a document describing
-the SCSI subsystem in the Linux Kernel 2.4 series. Two upper level
+the SCSI subsystem in the Linux kernel 2.4 series. Two upper level
drivers have documents in that directory: st.rst (SCSI tape driver) and
scsi-generic.rst (for the sg driver).
@@ -75,7 +75,7 @@ It is probably best to study how existing LLDs are organized.
As the 2.5 series development kernels evolve into the 2.6 series
production series, changes are being introduced into this interface. An
example of this is driver initialization code where there are now 2 models
-available. The older one, similar to what was found in the lk 2.4 series,
+available. The older one, similar to what was found in the Linux 2.4 series,
is based on hosts that are detected at HBA driver load time. This will be
referred to the "passive" initialization model. The newer model allows HBAs
to be hot plugged (and unplugged) during the lifetime of the LLD and will
@@ -1026,7 +1026,7 @@ initialized from the driver's struct scsi_host_template instance. Members
of interest:
host_no
- - system wide unique number that is used for identifying
+ - system-wide unique number that is used for identifying
this host. Issued in ascending order from 0.
can_queue
- must be greater than 0; do not send more than can_queue
@@ -1053,7 +1053,7 @@ of interest:
- pointer to driver's struct scsi_host_template from which
this struct Scsi_Host instance was spawned
hostt->proc_name
- - name of LLD. This is the driver name that sysfs uses
+ - name of LLD. This is the driver name that sysfs uses.
transportt
- pointer to driver's struct scsi_transport_template instance
(if any). FC and SPI transports currently supported.
@@ -1067,7 +1067,7 @@ The scsi_host structure is defined in include/scsi/scsi_host.h
struct scsi_device
------------------
Generally, there is one instance of this structure for each SCSI logical unit
-on a host. Scsi devices connected to a host are uniquely identified by a
+on a host. SCSI devices connected to a host are uniquely identified by a
channel number, target id and logical unit number (lun).
The structure is defined in include/scsi/scsi_device.h
@@ -1091,7 +1091,7 @@ Members of interest:
- should be set by LLD prior to calling 'done'. A value
of 0 implies a successfully completed command (and all
data (if any) has been transferred to or from the SCSI
- target device). 'result' is a 32 bit unsigned integer that
+ target device). 'result' is a 32-bit unsigned integer that
can be viewed as 2 related bytes. The SCSI status value is
in the LSB. See include/scsi/scsi.h status_byte() and
host_byte() macros and related constants.
@@ -1180,8 +1180,8 @@ may get out of synchronization. This is why it is best for the LLD
to perform autosense.
-Changes since lk 2.4 series
-===========================
+Changes since Linux kernel 2.4 series
+=====================================
io_request_lock has been replaced by several finer grained locks. The lock
relevant to LLDs is struct Scsi_Host::host_lock and there is
one per SCSI host.
diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py
index ecf54d22e9dc..fd633f7a0bc3 100644
--- a/Documentation/sphinx/automarkup.py
+++ b/Documentation/sphinx/automarkup.py
@@ -128,13 +128,8 @@ def note_failure(target):
# own C role, but both match the same regex, so we try both.
#
def markup_func_ref_sphinx3(docname, app, match):
- cdom = app.env.domains['c']
- #
- # Go through the dance of getting an xref out of the C domain
- #
base_target = match.group(2)
target_text = nodes.Text(match.group(0))
- xref = None
possible_targets = [base_target]
# Check if this document has a namespace, and if so, try
# cross-referencing inside it first.
@@ -146,22 +141,8 @@ def markup_func_ref_sphinx3(docname, app, match):
if (target not in Skipfuncs) and not failure_seen(target):
lit_text = nodes.literal(classes=['xref', 'c', 'c-func'])
lit_text += target_text
- pxref = addnodes.pending_xref('', refdomain = 'c',
- reftype = 'function',
- reftarget = target,
- modname = None,
- classname = None)
- #
- # XXX The Latex builder will throw NoUri exceptions here,
- # work around that by ignoring them.
- #
- try:
- xref = cdom.resolve_xref(app.env, docname, app.builder,
- 'function', target, pxref,
- lit_text)
- except NoUri:
- xref = None
-
+ xref = add_and_resolve_xref(app, docname, 'c', 'function',
+ target, contnode=lit_text)
if xref:
return xref
note_failure(target)
@@ -188,13 +169,8 @@ def markup_c_ref(docname, app, match):
RE_typedef: 'type',
}
- cdom = app.env.domains['c']
- #
- # Go through the dance of getting an xref out of the C domain
- #
base_target = match.group(2)
target_text = nodes.Text(match.group(0))
- xref = None
possible_targets = [base_target]
# Check if this document has a namespace, and if so, try
# cross-referencing inside it first.
@@ -206,21 +182,9 @@ def markup_c_ref(docname, app, match):
if not (match.re == RE_function and target in Skipfuncs):
lit_text = nodes.literal(classes=['xref', 'c', class_str[match.re]])
lit_text += target_text
- pxref = addnodes.pending_xref('', refdomain = 'c',
- reftype = reftype_str[match.re],
- reftarget = target, modname = None,
- classname = None)
- #
- # XXX The Latex builder will throw NoUri exceptions here,
- # work around that by ignoring them.
- #
- try:
- xref = cdom.resolve_xref(app.env, docname, app.builder,
- reftype_str[match.re], target, pxref,
- lit_text)
- except NoUri:
- xref = None
-
+ xref = add_and_resolve_xref(app, docname, 'c',
+ reftype_str[match.re], target,
+ contnode=lit_text)
if xref:
return xref
@@ -231,30 +195,12 @@ def markup_c_ref(docname, app, match):
# cross reference to that page
#
def markup_doc_ref(docname, app, match):
- stddom = app.env.domains['std']
- #
- # Go through the dance of getting an xref out of the std domain
- #
absolute = match.group(1)
target = match.group(2)
if absolute:
target = "/" + target
- xref = None
- pxref = addnodes.pending_xref('', refdomain = 'std', reftype = 'doc',
- reftarget = target, modname = None,
- classname = None, refexplicit = False)
- #
- # XXX The Latex builder will throw NoUri exceptions here,
- # work around that by ignoring them.
- #
- try:
- xref = stddom.resolve_xref(app.env, docname, app.builder, 'doc',
- target, pxref, None)
- except NoUri:
- xref = None
- #
- # Return the xref if we got it; otherwise just return the plain text.
- #
+
+ xref = add_and_resolve_xref(app, docname, 'std', 'doc', target)
if xref:
return xref
else:
@@ -265,10 +211,6 @@ def markup_doc_ref(docname, app, match):
# with a cross reference to that page
#
def markup_abi_ref(docname, app, match, warning=False):
- stddom = app.env.domains['std']
- #
- # Go through the dance of getting an xref out of the std domain
- #
kernel_abi = get_kernel_abi()
fname = match.group(1)
@@ -280,7 +222,18 @@ def markup_abi_ref(docname, app, match, warning=False):
kernel_abi.log.warning("%s not found", fname)
return nodes.Text(match.group(0))
- pxref = addnodes.pending_xref('', refdomain = 'std', reftype = 'ref',
+ xref = add_and_resolve_xref(app, docname, 'std', 'ref', target)
+ if xref:
+ return xref
+ else:
+ return nodes.Text(match.group(0))
+
+def add_and_resolve_xref(app, docname, domain, reftype, target, contnode=None):
+ #
+ # Go through the dance of getting an xref out of the corresponding domain
+ #
+ dom_obj = app.env.domains[domain]
+ pxref = addnodes.pending_xref('', refdomain = domain, reftype = reftype,
reftarget = target, modname = None,
classname = None, refexplicit = False)
@@ -289,17 +242,15 @@ def markup_abi_ref(docname, app, match, warning=False):
# work around that by ignoring them.
#
try:
- xref = stddom.resolve_xref(app.env, docname, app.builder, 'ref',
- target, pxref, None)
+ xref = dom_obj.resolve_xref(app.env, docname, app.builder, reftype,
+ target, pxref, contnode)
except NoUri:
xref = None
- #
- # Return the xref if we got it; otherwise just return the plain text.
- #
+
if xref:
return xref
- else:
- return nodes.Text(match.group(0))
+
+ return None
#
# Variant of markup_abi_ref() that warns whan a reference is not found
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
index 39ddae6ae7dd..b818d4c77924 100644
--- a/Documentation/sphinx/kerneldoc.py
+++ b/Documentation/sphinx/kerneldoc.py
@@ -40,8 +40,40 @@ from docutils.parsers.rst import directives, Directive
import sphinx
from sphinx.util.docutils import switch_source_input
from sphinx.util import logging
+from pprint import pformat
+
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "scripts/lib/kdoc"))
+
+from kdoc_files import KernelFiles
+from kdoc_output import RestFormat
__version__ = '1.0'
+kfiles = None
+logger = logging.getLogger(__name__)
+
+def cmd_str(cmd):
+ """
+ Helper function to output a command line that can be used to produce
+ the same records via command line. Helpful to debug troubles at the
+ script.
+ """
+
+ cmd_line = ""
+
+ for w in cmd:
+ if w == "" or " " in w:
+ esc_cmd = "'" + w + "'"
+ else:
+ esc_cmd = w
+
+ if cmd_line:
+ cmd_line += " " + esc_cmd
+ continue
+ else:
+ cmd_line = esc_cmd
+
+ return cmd_line
class KernelDocDirective(Directive):
"""Extract kernel-doc comments from the specified file"""
@@ -56,19 +88,48 @@ class KernelDocDirective(Directive):
'functions': directives.unchanged,
}
has_content = False
- logger = logging.getLogger('kerneldoc')
+ verbose = 0
+
+ parse_args = {}
+ msg_args = {}
+
+ def handle_args(self):
- def run(self):
env = self.state.document.settings.env
cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']
filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
+
+ # Arguments used by KernelFiles.parse() function
+ self.parse_args = {
+ "file_list": [filename],
+ "export_file": []
+ }
+
+ # Arguments used by KernelFiles.msg() function
+ self.msg_args = {
+ "enable_lineno": True,
+ "export": False,
+ "internal": False,
+ "symbol": [],
+ "nosymbol": [],
+ "no_doc_sections": False
+ }
+
export_file_patterns = []
+ verbose = os.environ.get("V")
+ if verbose:
+ try:
+ self.verbose = int(verbose)
+ except ValueError:
+ pass
+
# Tell sphinx of the dependency
env.note_dependency(os.path.abspath(filename))
- tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
+ self.tab_width = self.options.get('tab-width',
+ self.state.document.settings.tab_width)
# 'function' is an alias of 'identifiers'
if 'functions' in self.options:
@@ -77,80 +138,166 @@ class KernelDocDirective(Directive):
# FIXME: make this nicer and more robust against errors
if 'export' in self.options:
cmd += ['-export']
+ self.msg_args["export"] = True
export_file_patterns = str(self.options.get('export')).split()
elif 'internal' in self.options:
cmd += ['-internal']
+ self.msg_args["internal"] = True
export_file_patterns = str(self.options.get('internal')).split()
elif 'doc' in self.options:
- cmd += ['-function', str(self.options.get('doc'))]
+ func = str(self.options.get('doc'))
+ cmd += ['-function', func]
+ self.msg_args["symbol"].append(func)
elif 'identifiers' in self.options:
identifiers = self.options.get('identifiers').split()
if identifiers:
for i in identifiers:
+ i = i.rstrip("\\").strip()
+ if not i:
+ continue
+
cmd += ['-function', i]
+ self.msg_args["symbol"].append(i)
else:
cmd += ['-no-doc-sections']
+ self.msg_args["no_doc_sections"] = True
if 'no-identifiers' in self.options:
no_identifiers = self.options.get('no-identifiers').split()
if no_identifiers:
for i in no_identifiers:
+ i = i.rstrip("\\").strip()
+ if not i:
+ continue
+
cmd += ['-nosymbol', i]
+ self.msg_args["nosymbol"].append(i)
for pattern in export_file_patterns:
+ pattern = pattern.rstrip("\\").strip()
+ if not pattern:
+ continue
+
for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern):
env.note_dependency(os.path.abspath(f))
cmd += ['-export-file', f]
+ self.parse_args["export_file"].append(f)
+
+ # Export file is needed by both parse and msg, as kernel-doc
+ # cache exports.
+ self.msg_args["export_file"] = self.parse_args["export_file"]
cmd += [filename]
- try:
- self.logger.verbose("calling kernel-doc '%s'" % (" ".join(cmd)))
+ return cmd
+
+ def run_cmd(self, cmd):
+ """
+ Execute an external kernel-doc command.
+ """
+
+ env = self.state.document.settings.env
+ node = nodes.section()
+
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = p.communicate()
+ out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
- out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
+ if p.returncode != 0:
+ sys.stderr.write(err)
- if p.returncode != 0:
- sys.stderr.write(err)
+ logger.warning("kernel-doc '%s' failed with return code %d"
+ % (" ".join(cmd), p.returncode))
+ return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
+ elif env.config.kerneldoc_verbosity > 0:
+ sys.stderr.write(err)
+
+ filenames = self.parse_args["file_list"]
+ for filename in filenames:
+ self.parse_msg(filename, node, out, cmd)
+
+ return node.children
+
+ def parse_msg(self, filename, node, out, cmd):
+ """
+ Handles a kernel-doc output for a given file
+ """
+
+ env = self.state.document.settings.env
- self.logger.warning("kernel-doc '%s' failed with return code %d"
- % (" ".join(cmd), p.returncode))
- return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
- elif env.config.kerneldoc_verbosity > 0:
- sys.stderr.write(err)
+ lines = statemachine.string2lines(out, self.tab_width,
+ convert_whitespace=True)
+ result = ViewList()
+
+ lineoffset = 0;
+ line_regex = re.compile(r"^\.\. LINENO ([0-9]+)$")
+ for line in lines:
+ match = line_regex.search(line)
+ if match:
+ # sphinx counts lines from 0
+ lineoffset = int(match.group(1)) - 1
+ # we must eat our comments since the upset the markup
+ else:
+ doc = str(env.srcdir) + "/" + env.docname + ":" + str(self.lineno)
+ result.append(line, doc + ": " + filename, lineoffset)
+ lineoffset += 1
+
+ self.do_parse(result, node)
+
+ def run_kdoc(self, cmd, kfiles):
+ """
+ Execute kernel-doc classes directly instead of running as a separate
+ command.
+ """
+
+ env = self.state.document.settings.env
- lines = statemachine.string2lines(out, tab_width, convert_whitespace=True)
- result = ViewList()
+ node = nodes.section()
- lineoffset = 0;
- line_regex = re.compile(r"^\.\. LINENO ([0-9]+)$")
- for line in lines:
- match = line_regex.search(line)
- if match:
- # sphinx counts lines from 0
- lineoffset = int(match.group(1)) - 1
- # we must eat our comments since the upset the markup
- else:
- doc = str(env.srcdir) + "/" + env.docname + ":" + str(self.lineno)
- result.append(line, doc + ": " + filename, lineoffset)
- lineoffset += 1
+ kfiles.parse(**self.parse_args)
+ filenames = self.parse_args["file_list"]
- node = nodes.section()
- self.do_parse(result, node)
+ for filename, out in kfiles.msg(**self.msg_args, filenames=filenames):
+ self.parse_msg(filename, node, out, cmd)
- return node.children
+ return node.children
+
+ def run(self):
+ global kfiles
+
+ cmd = self.handle_args()
+ if self.verbose >= 1:
+ logger.info(cmd_str(cmd))
+
+ try:
+ if kfiles:
+ return self.run_kdoc(cmd, kfiles)
+ else:
+ return self.run_cmd(cmd)
except Exception as e: # pylint: disable=W0703
- self.logger.warning("kernel-doc '%s' processing failed with: %s" %
- (" ".join(cmd), str(e)))
+ logger.warning("kernel-doc '%s' processing failed with: %s" %
+ (cmd_str(cmd), pformat(e)))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
def do_parse(self, result, node):
with switch_source_input(self.state, result):
self.state.nested_parse(result, 0, node, match_titles=1)
+def setup_kfiles(app):
+ global kfiles
+
+ kerneldoc_bin = app.env.config.kerneldoc_bin
+
+ if kerneldoc_bin and kerneldoc_bin.endswith("kernel-doc.py"):
+ print("Using Python kernel-doc")
+ out_style = RestFormat()
+ kfiles = KernelFiles(out_style=out_style, logger=logger)
+ else:
+ print(f"Using {kerneldoc_bin}")
+
+
def setup(app):
app.add_config_value('kerneldoc_bin', None, 'env')
app.add_config_value('kerneldoc_srctree', None, 'env')
@@ -158,6 +305,8 @@ def setup(app):
app.add_directive('kernel-doc', KernelDocDirective)
+ app.connect('builder-inited', setup_kfiles)
+
return dict(
version = __version__,
parallel_read_safe = True,
diff --git a/Documentation/staging/rpmsg.rst b/Documentation/staging/rpmsg.rst
index 3713adaa1608..40282cca86ca 100644
--- a/Documentation/staging/rpmsg.rst
+++ b/Documentation/staging/rpmsg.rst
@@ -112,31 +112,6 @@ Returns 0 on success and an appropriate error value on failure.
::
- int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
-
-
-sends a message across to the remote processor, using the src and dst
-addresses provided by the user.
-
-The caller should specify the endpoint, the data it wants to send,
-its length (in bytes), and explicit source and destination addresses.
-The message will then be sent to the remote processor to which the
-endpoint's channel belongs, but the endpoint's src and channel dst
-addresses will be ignored (and the user-provided addresses will
-be used instead).
-
-In case there are no TX buffers available, the function will block until
-one becomes available (i.e. until the remote processor consumes
-a tx buffer and puts it back on virtio's used descriptor ring),
-or a timeout of 15 seconds elapses. When the latter happens,
--ERESTARTSYS is returned.
-
-The function can only be called from a process context (for now).
-Returns 0 on success and an appropriate error value on failure.
-
-::
-
int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
sends a message across to the remote processor from a given endpoint.
@@ -175,27 +150,6 @@ Returns 0 on success and an appropriate error value on failure.
::
- int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
-
-
-sends a message across to the remote processor, using source and
-destination addresses provided by the user.
-
-The user should specify the channel, the data it wants to send,
-its length (in bytes), and explicit source and destination addresses.
-The message will then be sent to the remote processor to which the
-channel belongs, but the channel's src and dst addresses will be
-ignored (and the user-provided addresses will be used instead).
-
-In case there are no TX buffers available, the function will immediately
-return -ENOMEM without waiting until one becomes available.
-
-The function can only be called from a process context (for now).
-Returns 0 on success and an appropriate error value on failure.
-
-::
-
struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev,
rpmsg_rx_cb_t cb, void *priv,
struct rpmsg_channel_info chinfo);
diff --git a/Documentation/staging/speculation.rst b/Documentation/staging/speculation.rst
index 8045d99bcf12..0d526ba55d14 100644
--- a/Documentation/staging/speculation.rst
+++ b/Documentation/staging/speculation.rst
@@ -63,7 +63,6 @@ of an out-of-bounds address, while the second call will influence
microarchitectural state dependent on this value. This may provide an
arbitrary read primitive.
-====================================
Mitigating speculation side-channels
====================================
diff --git a/Documentation/tools/rtla/common_timerlat_description.rst b/Documentation/tools/rtla/common_timerlat_description.rst
index 321201cb8597..49fcae3ffdec 100644
--- a/Documentation/tools/rtla/common_timerlat_description.rst
+++ b/Documentation/tools/rtla/common_timerlat_description.rst
@@ -6,5 +6,13 @@ debugging of operating system timer latency.
The *timerlat* tracer outputs information in two ways. It periodically
prints the timer latency at the timer *IRQ* handler and the *Thread*
-handler. It also enable the trace of the most relevant information via
+handler. It also enables the trace of the most relevant information via
**osnoise:** tracepoints.
+
+The **rtla timerlat** tool sets the options of the *timerlat* tracer
+and collects and displays a summary of the results. By default,
+the collection is done synchronously in kernel space using a dedicated
+BPF program attached to the *timerlat* tracer. If either BPF or
+the **osnoise:timerlat_sample** tracepoint it attaches to is
+unavailable, the **rtla timerlat** tool falls back to using tracefs to
+process the data asynchronously in user space.
diff --git a/Documentation/tools/rtla/rtla-timerlat.rst b/Documentation/tools/rtla/rtla-timerlat.rst
index 44a49e6f302b..20e2d259467f 100644
--- a/Documentation/tools/rtla/rtla-timerlat.rst
+++ b/Documentation/tools/rtla/rtla-timerlat.rst
@@ -16,13 +16,10 @@ DESCRIPTION
.. include:: common_timerlat_description.rst
-The *timerlat* tracer outputs information in two ways. It periodically
-prints the timer latency at the timer *IRQ* handler and the *Thread* handler.
-It also provides information for each noise via the **osnoise:** tracepoints.
The **rtla timerlat top** mode displays a summary of the periodic output
-from the *timerlat* tracer. The **rtla hist hist** mode displays a histogram
-of each tracer event occurrence. For further details, please refer to the
-respective man page.
+from the *timerlat* tracer. The **rtla timerlat hist** mode displays
+a histogram of each tracer event occurrence. For further details, please
+refer to the respective man page.
MODES
=====
diff --git a/Documentation/trace/coresight/panic.rst b/Documentation/trace/coresight/panic.rst
index a58aa914c241..6e4bde953cae 100644
--- a/Documentation/trace/coresight/panic.rst
+++ b/Documentation/trace/coresight/panic.rst
@@ -67,8 +67,8 @@ Trace data captured at the time of panic, can be read from rebooted kernel
or from crashdump kernel using a special device file /dev/crash_tmc_xxx.
This device file is created only when there is a valid crashdata available.
-General flow of trace capture and decode incase of kernel panic
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+General flow of trace capture and decode in case of kernel panic
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1. Enable source and sink on all the cores using the sysfs interface.
ETR sinks should have trace buffers allocated from reserved memory,
by selecting "resrv" buffer mode from sysfs.
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index c9e88bf65709..af66a05e18cc 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -1205,6 +1205,19 @@ Here are the available options:
default instance. The only way the top level instance has this flag
cleared, is by it being set in another instance.
+ copy_trace_marker
+ If there are applications that hard code writing into the top level
+ trace_marker file (/sys/kernel/tracing/trace_marker or trace_marker_raw),
+ and the tooling would like it to go into an instance, this option can
+ be used. Create an instance and set this option, and then all writes
+ into the top level trace_marker file will also be redirected into this
+ instance.
+
+ Note, by default this option is set for the top level instance. If it
+ is disabled, then writes to the trace_marker or trace_marker_raw files
+ will not be written into the top level file. If no instance has this
+ option set, then a write will error with the errno of ENODEV.
+
annotate
It is sometimes confusing when the CPU buffers are full
and one CPU buffer had a lot of events recently, thus
diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst
index 2c991dc96ace..cc1dc5a087e8 100644
--- a/Documentation/trace/index.rst
+++ b/Documentation/trace/index.rst
@@ -1,39 +1,103 @@
-==========================
-Linux Tracing Technologies
-==========================
+================================
+Linux Tracing Technologies Guide
+================================
+
+Tracing in the Linux kernel is a powerful mechanism that allows
+developers and system administrators to analyze and debug system
+behavior. This guide provides documentation on various tracing
+frameworks and tools available in the Linux kernel.
+
+Introduction to Tracing
+-----------------------
+
+This section provides an overview of Linux tracing mechanisms
+and debugging approaches.
.. toctree::
- :maxdepth: 2
+ :maxdepth: 1
- ftrace-design
+ debugging
+ tracepoints
tracepoint-analysis
+ ring-buffer-map
+
+Core Tracing Frameworks
+-----------------------
+
+The following are the primary tracing frameworks integrated into
+the Linux kernel.
+
+.. toctree::
+ :maxdepth: 1
+
ftrace
+ ftrace-design
ftrace-uses
- fprobe
kprobes
kprobetrace
- uprobetracer
fprobetrace
- tracepoints
+ fprobe
+ ring-buffer-design
+
+Event Tracing and Analysis
+--------------------------
+
+A detailed explanation of event tracing mechanisms and their
+applications.
+
+.. toctree::
+ :maxdepth: 1
+
events
events-kmem
events-power
events-nmi
events-msr
- mmiotrace
+ boottime-trace
histogram
histogram-design
- boottime-trace
- debugging
- hwlat_detector
- osnoise-tracer
- timerlat-tracer
+
+Hardware and Performance Tracing
+--------------------------------
+
+This section covers tracing features that monitor hardware
+interactions and system performance.
+
+.. toctree::
+ :maxdepth: 1
+
intel_th
- ring-buffer-design
- ring-buffer-map
stm
sys-t
coresight/index
- user_events
rv/index
hisi-ptt
+ mmiotrace
+ hwlat_detector
+ osnoise-tracer
+ timerlat-tracer
+
+User-Space Tracing
+------------------
+
+These tools allow tracing user-space applications and
+interactions.
+
+.. toctree::
+ :maxdepth: 1
+
+ user_events
+ uprobetracer
+
+Additional Resources
+--------------------
+
+For more details, refer to the respective documentation of each
+tracing tool and framework.
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/trace/tracepoints.rst b/Documentation/trace/tracepoints.rst
index decabcc77b56..b35c40e3abbe 100644
--- a/Documentation/trace/tracepoints.rst
+++ b/Documentation/trace/tracepoints.rst
@@ -71,7 +71,7 @@ In subsys/file.c (where the tracing statement must be added)::
void somefct(void)
{
...
- trace_subsys_eventname(arg, task);
+ trace_subsys_eventname_tp(arg, task);
...
}
@@ -129,12 +129,12 @@ within an if statement with the following::
for (i = 0; i < count; i++)
tot += calculate_nuggets();
- trace_foo_bar(tot);
+ trace_foo_bar_tp(tot);
}
-All trace_<tracepoint>() calls have a matching trace_<tracepoint>_enabled()
+All trace_<tracepoint>_tp() calls have a matching trace_<tracepoint>_enabled()
function defined that returns true if the tracepoint is enabled and
-false otherwise. The trace_<tracepoint>() should always be within the
+false otherwise. The trace_<tracepoint>_tp() should always be within the
block of the if (trace_<tracepoint>_enabled()) to prevent races between
the tracepoint being enabled and the check being seen.
@@ -143,7 +143,10 @@ the static_key of the tracepoint to allow the if statement to be implemented
with jump labels and avoid conditional branches.
.. note:: The convenience macro TRACE_EVENT provides an alternative way to
- define tracepoints. Check http://lwn.net/Articles/379903,
+ define tracepoints. Note, DECLARE_TRACE(foo) creates a function
+ "trace_foo_tp()" whereas TRACE_EVENT(foo) creates a function
+ "trace_foo()", and also exposes the tracepoint as a trace event in
+ /sys/kernel/tracing/events directory. Check http://lwn.net/Articles/379903,
http://lwn.net/Articles/381064 and http://lwn.net/Articles/383362
for a series of articles with more details.
@@ -159,7 +162,9 @@ In a C file::
void do_trace_foo_bar_wrapper(args)
{
- trace_foo_bar(args);
+ trace_foo_bar_tp(args); // for tracepoints created via DECLARE_TRACE
+ // or
+ trace_foo_bar(args); // for tracepoints created via TRACE_EVENT
}
In the header file::
diff --git a/Documentation/translations/it_IT/process/changes.rst b/Documentation/translations/it_IT/process/changes.rst
index c7d05e2fff15..77db13c4022b 100644
--- a/Documentation/translations/it_IT/process/changes.rst
+++ b/Documentation/translations/it_IT/process/changes.rst
@@ -32,13 +32,13 @@ PC Card, per esempio, probabilmente non dovreste preoccuparvi di pcmciautils.
====================== ================= ========================================
Programma Versione minima Comando per verificare la versione
====================== ================= ========================================
-GNU C 5.1 gcc --version
+GNU C 8.1 gcc --version
Clang/LLVM (optional) 13.0.0 clang --version
Rust (opzionale) 1.78.0 rustc --version
bindgen (opzionale) 0.65.1 bindgen --version
GNU make 4.0 make --version
bash 4.2 bash --version
-binutils 2.25 ld -v
+binutils 2.30 ld -v
flex 2.5.35 flex --version
bison 2.0 bison --version
pahole 1.16 pahole --version
@@ -118,7 +118,7 @@ Questo richiede bash 4.2 o successivo.
Binutils
--------
-Per generare il kernel è necessario avere Binutils 2.25 o superiore.
+Per generare il kernel è necessario avere Binutils 2.30 o superiore.
pkg-config
----------
diff --git a/Documentation/translations/sp_SP/process/2.Process.rst b/Documentation/translations/sp_SP/process/2.Process.rst
index 5993eed71563..c21b0134cfa1 100644
--- a/Documentation/translations/sp_SP/process/2.Process.rst
+++ b/Documentation/translations/sp_SP/process/2.Process.rst
@@ -428,13 +428,14 @@ los desarrolladores, que corren el riesgo de quedar enterrados bajo una
carga de correo electrónico, incumplir las convenciones utilizadas en las
listas de Linux, o ambas cosas.
-La mayoría de las listas de correo del kernel se ejecutan en
-vger.kernel.org; la lista principal se puede encontrar en:
+La mayoría de las listas de correo del kernel se alojan en kernel.org; la
+lista principal se puede encontrar en:
- http://vger.kernel.org/vger-lists.html
+ https://subspace.kernel.org
-Sim embargo, hay listas alojadas en otros lugares; varios de ellos se
-encuentran en redhat.com/mailman/listinfo.
+Sin embargo, hay listas alojadas en otros lugares; consulte el archivo
+MAINTAINERS para obtener la lista relevante para cualquier subsistema en
+particular.
La lista de correo principal para el desarrollo del kernel es, por
supuesto, linux-kernel. Esta lista es un lugar intimidante; el volumen
diff --git a/Documentation/translations/sp_SP/process/howto.rst b/Documentation/translations/sp_SP/process/howto.rst
index 72ea855ac9dc..e1a6e6a52ae4 100644
--- a/Documentation/translations/sp_SP/process/howto.rst
+++ b/Documentation/translations/sp_SP/process/howto.rst
@@ -334,7 +334,7 @@ con el árbol principal, necesitan probar su integración. Para ello, existe
un repositorio especial de pruebas en el que se encuentran casi todos los
árboles de subsistema, actualizado casi a diario:
- https://git.kernel.org/?p=linux/kernel/git/next/linux-next.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
De esta manera, linux-next ofrece una perspectiva resumida de lo que se
espera que entre en el kernel principal en el próximo período de "merge"
@@ -378,13 +378,13 @@ desarrolladores del kernel participan en la lista de correo del kernel de
Linux. Detalles sobre cómo para suscribirse y darse de baja de la lista se
pueden encontrar en:
- http://vger.kernel.org/vger-lists.html#linux-kernel
+ https://subspace.kernel.org/subscribing.html
Existen archivos de la lista de correo en la web en muchos lugares
distintos. Utilice un motor de búsqueda para encontrar estos archivos. Por
ejemplo:
- http://dir.gmane.org/gmane.linux.kernel
+ https://lore.kernel.org/linux-kernel/
Es muy recomendable que busque en los archivos sobre el tema que desea
tratar, antes de publicarlo en la lista. Un montón de cosas ya discutidas
@@ -398,13 +398,13 @@ los diferentes grupos.
Muchas de las listas están alojadas en kernel.org. La información sobre
estas puede ser encontrada en:
- http://vger.kernel.org/vger-lists.html
+ https://subspace.kernel.org
Recuerde mantener buenos hábitos de comportamiento al usar las listas.
Aunque un poco cursi, la siguiente URL tiene algunas pautas simples para
interactuar con la lista (o cualquier lista):
- http://www.albion.com/netiquette/
+ https://subspace.kernel.org/etiquette.html
Si varias personas responden a su correo, el CC (lista de destinatarios)
puede hacerse bastante grande. No elimine a nadie de la lista CC: sin una
diff --git a/Documentation/translations/sp_SP/process/kernel-docs.rst b/Documentation/translations/sp_SP/process/kernel-docs.rst
index a62c6854f59b..b9e0ca4be324 100644
--- a/Documentation/translations/sp_SP/process/kernel-docs.rst
+++ b/Documentation/translations/sp_SP/process/kernel-docs.rst
@@ -170,9 +170,8 @@ Recursos varios
* Título: **linux-kernel mailing list archives and search engines**
- :URL: http://vger.kernel.org/vger-lists.html
- :URL: http://www.uwsg.indiana.edu/hypermail/linux/kernel/index.html
- :URL: http://groups.google.com/group/mlist.linux.kernel
+ :URL: https://subspace.kernel.org
+ :URL: https://lore.kernel.org
:Palabras Clave: linux-kernel, archives, buscar, search, archivos.
:Descripción: Algunos de los archivadores de listas de correo del
kernel de Linux. Si usted tiene uno mejor/otro, por favor hágamelo
diff --git a/Documentation/translations/sp_SP/process/submitting-patches.rst b/Documentation/translations/sp_SP/process/submitting-patches.rst
index 328ec80bd61d..ecb08b14c2c0 100644
--- a/Documentation/translations/sp_SP/process/submitting-patches.rst
+++ b/Documentation/translations/sp_SP/process/submitting-patches.rst
@@ -136,11 +136,11 @@ algo documentado en la web, referencie esto.
Cuando se vincule a archivos de listas de correo, preferiblemente use el
servicio de archivador de mensajes lore.kernel.org. Para crear la URL del
-enlace, utilice el contenido del encabezado ("header") ``Message-Id`` del
+enlace, utilice el contenido del encabezado ("header") ``Message-ID`` del
mensaje sin los corchetes angulares que lo rodean.
Por ejemplo::
- Link: https://lore.kernel.org/r/30th.anniversary.repost@klaava.Helsinki.FI/
+ Link: https://lore.kernel.org/30th.anniversary.repost@klaava.Helsinki.FI
Verifique el enlace para asegurarse de que realmente funciona y apunta al
mensaje correspondiente.
@@ -257,10 +257,10 @@ archivo MAINTAINERS una lista específica de los subsistemas; su parche
probablemente recibirá más atención allí. Sin embargo, no envíe spam a
listas no relacionadas.
-Muchas listas relacionadas con el kernel están alojadas en vger.kernel.org;
+Muchas listas relacionadas con el kernel están alojadas en kernel.org;
puedes encontrar un listado de estas en
-http://vger.kernel.org/vger-lists.html. Existen listas relacionadas con el
-kernel alojadas en otros lugares, no obstante.
+https://subspace.kernel.org. Existen listas relacionadas con el kernel
+alojadas en otros lugares, no obstante.
¡No envíe más de 15 parches a la vez a las listas de correo de vger!
@@ -907,9 +907,6 @@ Referencias
<http://www.kroah.com/log/linux/maintainer-06.html>
-NO!!!! Gente, no mas bombas enormes de parches a linux-kernel@vger.kernel.org!
- <https://lore.kernel.org/r/20050711.125305.08322243.davem@davemloft.net>
-
Kernel Documentation/process/coding-style.rst
Email de Linus Torvalds sobre la forma canónica de los parches:
diff --git a/Documentation/translations/zh_CN/admin-guide/README.rst b/Documentation/translations/zh_CN/admin-guide/README.rst
index 1bdafdc4c8e2..82e628b77efd 100644
--- a/Documentation/translations/zh_CN/admin-guide/README.rst
+++ b/Documentation/translations/zh_CN/admin-guide/README.rst
@@ -224,7 +224,7 @@ Linux内核6.x版本 <http://kernel.org/>
编译内核
---------
- - ç¡®ä¿æ‚¨è‡³å°‘有gcc 5.1å¯ç”¨ã€‚
+ - ç¡®ä¿æ‚¨è‡³å°‘有gcc 8.1å¯ç”¨ã€‚
有关更多信æ¯ï¼Œè¯·å‚阅 :ref:`Documentation/process/changes.rst <changes>` 。
- 执行 ``make`` æ¥åˆ›å»ºåŽ‹ç¼©å†…æ ¸æ˜ åƒã€‚如果您安装了lilo以适é…内核makefile,
diff --git a/Documentation/translations/zh_CN/admin-guide/bug-hunting.rst b/Documentation/translations/zh_CN/admin-guide/bug-hunting.rst
index c3f6a83294dc..4b3432753eb9 100644
--- a/Documentation/translations/zh_CN/admin-guide/bug-hunting.rst
+++ b/Documentation/translations/zh_CN/admin-guide/bug-hunting.rst
@@ -188,7 +188,7 @@ objdump
编行。如果没有调试符å·ï¼Œæ‚¨å°†çœ‹åˆ°æ‰€ç¤ºä¾‹ç¨‹çš„æ±‡ç¼–程åºä»£ç ï¼Œä½†æ˜¯å¦‚果内核有调试
符å·ï¼ŒC代ç ä¹Ÿå°†å¯è§ï¼ˆè°ƒè¯•符å·å¯ä»¥åœ¨å†…æ ¸é…ç½®èœå•çš„hacking项中å¯ç”¨ï¼‰ã€‚例如::
- $ objdump -r -S -l --disassemble net/dccp/ipv4.o
+ $ objdump -r -S -l --disassemble net/ipv4/tcp.o
.. note::
diff --git a/Documentation/translations/zh_CN/arch/openrisc/openrisc_port.rst b/Documentation/translations/zh_CN/arch/openrisc/openrisc_port.rst
index cadc580fa23b..d728e4db0b85 100644
--- a/Documentation/translations/zh_CN/arch/openrisc/openrisc_port.rst
+++ b/Documentation/translations/zh_CN/arch/openrisc/openrisc_port.rst
@@ -17,10 +17,10 @@ OpenRISC 1000系列(或1k)。
关于OpenRISC处ç†å™¨å’Œæ­£åœ¨è¿›è¡Œä¸­çš„å¼€å‘的信æ¯:
- ======= =============================
+ ======= ==============================
网站 https://openrisc.io
- 邮箱 openrisc@lists.librecores.org
- ======= =============================
+ 邮箱 linux-openrisc@vger.kernel.org
+ ======= ==============================
---------------------------------------------------------------------
@@ -36,11 +36,11 @@ OpenRISC工具链和Linux的构建指å—
工具链的构建指å—å¯ä»¥åœ¨openrisc.io或Stafford的工具链构建和å‘布脚本
中找到。
- ====== =================================================
- 二进制 https://github.com/openrisc/or1k-gcc/releases
+ ====== ==========================================================
+ 二进制 https://github.com/stffrdhrn/or1k-toolchain-build/releases
工具链 https://openrisc.io/software
构建 https://github.com/stffrdhrn/or1k-toolchain-build
- ====== =================================================
+ ====== ==========================================================
2) 构建
diff --git a/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst b/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst
index 9174fce12c1b..4a2d3b27aa4d 100644
--- a/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst
+++ b/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst
@@ -60,8 +60,6 @@ irq_domain和一个hwirqå·ä½œä¸ºå‚数。 如果hwirq的映射还ä¸å­˜åœ¨ï¼Œé‚
- irq_find_mapping()返回给定域和hwirqçš„Linux IRQå·ï¼Œå¦‚果没有映射则返回0。
-- irq_linear_revmap()现与irq_find_mapping()相åŒï¼Œå·²è¢«åºŸå¼ƒã€‚
-
- generic_handle_domain_irq()处ç†ä¸€ä¸ªç”±åŸŸå’Œhwirqå·æè¿°çš„ä¸­æ–­ã€‚
请注æ„,irq域的查找必须å‘生在与RCU读临界区兼容的上下文中。
@@ -83,7 +81,6 @@ irq_domain映射的类型
::
- irq_domain_add_linear()
irq_domain_create_linear()
线性å呿˜ å°„维护了一个固定大å°çš„表,该表以hwirqå·ä¸ºç´¢å¼•。 当一个hwirq被映射
@@ -104,7 +101,6 @@ irq_domain_add_linear()和irq_domain_create_linear()在功能上是等价的,
::
- irq_domain_add_tree()
irq_domain_create_tree()
irq_domain维护ç€ä»Žhwirqå·åˆ°Linux IRQçš„radix的树状映射。 当一个hwirq被映射时,
@@ -124,7 +120,7 @@ irq_domain_add_tree()å’Œirq_domain_create_tree()在功能上是等价的,除äº
::
- irq_domain_add_nomap()
+ irq_domain_create_nomap()
当硬件中的hwirqå·æ˜¯å¯ç¼–程的时候,就å¯ä»¥é‡‡ç”¨æ— æ˜ å°„类型。 åœ¨è¿™ç§æƒ…况下,最好将
Linux IRQå·ç¼–入硬件本身,这样就ä¸éœ€è¦æ˜ å°„了。 调用irq_create_direct_mapping()
@@ -138,8 +134,6 @@ Linux IRQå·ç¼–入硬件本身,这样就ä¸éœ€è¦æ˜ å°„了。 调用irq_create
::
- irq_domain_add_simple()
- irq_domain_add_legacy()
irq_domain_create_simple()
irq_domain_create_legacy()
diff --git a/Documentation/translations/zh_CN/core-api/printk-formats.rst b/Documentation/translations/zh_CN/core-api/printk-formats.rst
index bd36d35eba4e..96a917ecc93f 100644
--- a/Documentation/translations/zh_CN/core-api/printk-formats.rst
+++ b/Documentation/translations/zh_CN/core-api/printk-formats.rst
@@ -523,9 +523,8 @@ clk结构体
::
%pC pll1
- %pCn pll1
-用于打å°clk结构。%pC å’Œ %pCn æ‰“å°æ—¶é’Ÿçš„å称(通用时钟框架)或唯一的32ä½
+用于打å°clk结构。%pC æ‰“å°æ—¶é’Ÿçš„å称(通用时钟框架)或唯一的32ä½
ID(传统时钟框架)。
通过引用传递。
diff --git a/Documentation/translations/zh_CN/dev-tools/gdb-kernel-debugging.rst b/Documentation/translations/zh_CN/dev-tools/gdb-kernel-debugging.rst
index 3c133a918f30..282aacd33442 100644
--- a/Documentation/translations/zh_CN/dev-tools/gdb-kernel-debugging.rst
+++ b/Documentation/translations/zh_CN/dev-tools/gdb-kernel-debugging.rst
@@ -120,35 +120,31 @@ Kgdb内核调试器ã€QEMU等虚拟机管ç†ç¨‹åºæˆ–基于JTAG的硬件接å£ï¼
- 坹当剿ˆ–指定的CPU使用per-cpu函数::
- (gdb) p $lx_per_cpu("runqueues").nr_running
+ (gdb) p $lx_per_cpu(runqueues).nr_running
$3 = 1
- (gdb) p $lx_per_cpu("runqueues", 2).nr_running
+ (gdb) p $lx_per_cpu(runqueues, 2).nr_running
$4 = 0
- 使用container_of查看更多hrtimersä¿¡æ¯::
- (gdb) set $next = $lx_per_cpu("hrtimer_bases").clock_base[0].active.next
- (gdb) p *$container_of($next, "struct hrtimer", "node")
+ (gdb) set $leftmost = $lx_per_cpu(hrtimer_bases).clock_base[0].active.rb_root.rb_leftmost
+ (gdb) p *$container_of($leftmost, "struct hrtimer", "node")
$5 = {
node = {
node = {
- __rb_parent_color = 18446612133355256072,
- rb_right = 0x0 <irq_stack_union>,
- rb_left = 0x0 <irq_stack_union>
+ __rb_parent_color = 18446612686384860673,
+ rb_right = 0xffff888231da8b00,
+ rb_left = 0x0
},
- expires = {
- tv64 = 1835268000000
- }
+ expires = 1228461000000
},
- _softexpires = {
- tv64 = 1835268000000
- },
- function = 0xffffffff81078232 <tick_sched_timer>,
- base = 0xffff88003fd0d6f0,
- state = 1,
- start_pid = 0,
- start_site = 0xffffffff81055c1f <hrtimer_start_range_ns+20>,
- start_comm = "swapper/2\000\000\000\000\000\000"
+ _softexpires = 1228461000000,
+ function = 0xffffffff8137ab20 <tick_nohz_handler>,
+ base = 0xffff888231d9b4c0,
+ state = 1 '\001',
+ is_rel = 0 '\000',
+ is_soft = 0 '\000',
+ is_hard = 1 '\001'
}
diff --git a/Documentation/translations/zh_CN/devicetree/overlay-notes.rst b/Documentation/translations/zh_CN/devicetree/overlay-notes.rst
index 43e3c0bc5a9f..ba5edd05dc1e 100644
--- a/Documentation/translations/zh_CN/devicetree/overlay-notes.rst
+++ b/Documentation/translations/zh_CN/devicetree/overlay-notes.rst
@@ -43,10 +43,10 @@ Documentation/devicetree/dynamic-resolution-notes.rst[1]çš„é…套文档。
};
---- foo.dts ---------------------------------------------------------------
-覆盖bar.dts,
+覆盖bar.dtso,
::
- ---- bar.dts - 按标签覆盖目标ä½ç½® ----------------------------
+ ---- bar.dtso - 按标签覆盖目标ä½ç½® ---------------------------
/dts-v1/;
/æ’ä»¶/;
&ocp {
@@ -56,7 +56,7 @@ Documentation/devicetree/dynamic-resolution-notes.rst[1]çš„é…套文档。
... /* å„ç§å±žæ€§å’Œå­èŠ‚ç‚¹ */
};
};
- ---- bar.dts ---------------------------------------------------------------
+ ---- bar.dtso --------------------------------------------------------------
当加载(并按照[1]中æè¿°çš„æ–¹å¼è§£å†³ï¼‰æ—¶ï¼Œåº”该产生foo+bar.dts::
@@ -90,9 +90,9 @@ Documentation/devicetree/dynamic-resolution-notes.rst[1]çš„é…套文档。
DT中的适当ä½ç½®ã€‚åœ¨è¿™ç§æƒ…况下,å¯ä»¥æä¾›ç›®æ ‡è·¯å¾„。通过标签的目标ä½ç½®çš„语法是比
较好的,因为ä¸ç®¡æ ‡ç­¾åœ¨DT中出现在哪里,覆盖都å¯ä»¥è¢«åº”ç”¨åˆ°ä»»ä½•åŒ…å«æ ‡ç­¾çš„基础DT上。
-上é¢çš„bar.dts例å­è¢«ä¿®æ”¹ä¸ºä½¿ç”¨ç›®æ ‡è·¯å¾„语法,å³ä¸º::
+上é¢çš„bar.dtso例å­è¢«ä¿®æ”¹ä¸ºä½¿ç”¨ç›®æ ‡è·¯å¾„语法,å³ä¸º::
- ---- bar.dts - 通过明确的路径覆盖目标ä½ç½® --------------------
+ ---- bar.dtso - 通过明确的路径覆盖目标ä½ç½® -------------------
/dts-v1/;
/æ’ä»¶/;
&{/ocp} {
@@ -102,7 +102,7 @@ DT中的适当ä½ç½®ã€‚åœ¨è¿™ç§æƒ…况下,å¯ä»¥æä¾›ç›®æ ‡è·¯å¾„。通过标
... /* å„ç§å¤–围设备和å­èŠ‚ç‚¹ */
}
};
- ---- bar.dts ---------------------------------------------------------------
+ ---- bar.dtso --------------------------------------------------------------
内核中关于覆盖的API
diff --git a/Documentation/translations/zh_CN/driver-api/gpio/index.rst b/Documentation/translations/zh_CN/driver-api/gpio/index.rst
index e4d54724a1b5..f64a69f771ca 100644
--- a/Documentation/translations/zh_CN/driver-api/gpio/index.rst
+++ b/Documentation/translations/zh_CN/driver-api/gpio/index.rst
@@ -42,7 +42,7 @@ ACPI支æŒ
该API在以下内核代ç ä¸­:
-drivers/gpio/gpiolib-acpi.c
+drivers/gpio/gpiolib-acpi-core.c
设备树支æŒ
==========
diff --git a/Documentation/translations/zh_CN/how-to.rst b/Documentation/translations/zh_CN/how-to.rst
new file mode 100644
index 000000000000..569b0209385a
--- /dev/null
+++ b/Documentation/translations/zh_CN/how-to.rst
@@ -0,0 +1,459 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================
+Linux内核中文文档翻译规范
+=========================
+
+修订记录:
+ - v1.0 2025å¹´3月28日,å¸å»¶è…¾ã€æ…•冬亮共åŒç¼–写了该规范。
+
+制定规范的背景
+==============
+
+过去几年,在广大社区爱好者的å‹å¥½åˆä½œä¸‹ï¼ŒLinux 内核中文文档迎æ¥äº†è“¬å‹ƒçš„å‘
+å±•ã€‚åœ¨ç¿»è¯‘çš„æ—©æœŸï¼Œä¸€åˆ‡éƒ½æ˜¯æ··ä¹±çš„ï¼Œç¤¾åŒºå¯¹è¯‘ç¨¿åªæœ‰ä¸€ä¸ªå‡†ç¡®ç¿»è¯‘çš„è¦æ±‚,以鼓
+励更多的开å‘者å‚与进æ¥ï¼Œè¿™æ˜¯ä»Ž0到1的必然过程,所以早期的中文文档目录更加
+具有多样性,ä¸è¿‡å¥½åœ¨æ–‡æ¡£ä¸å¤šï¼Œç»´æŠ¤ä¸Šå¹¶æ²¡æœ‰è¿‡å¤§çš„压力。
+
+然而,世事å˜å¹»ï¼Œä¸è§‰æœ‰å¹´ï¼ŒçŽ°åœ¨å†…æ ¸ä¸­æ–‡æ–‡æ¡£åœ¨å‰è¿›çš„é“路上越走越远,很多潜
+åœ¨çš„é—®é¢˜é€æ¸æµ®å‡ºæ°´é¢ï¼Œè€Œä¸”éšç€ä¸­æ–‡æ–‡æ¡£æ•°é‡çš„增加,翻译更多的文档与æé«˜ä¸­
+文文档å¯ç»´æŠ¤æ€§ä¹‹é—´çš„矛盾愈å‘å°–é”。由于文档翻译的特殊性,很多开å‘者并ä¸ä¼š
+一直更新文档,如果中文文档è½åŽè‹±æ–‡æ–‡æ¡£å¤ªå¤šï¼Œæ–‡æ¡£æ›´æ–°çš„工作é‡ä¼šè¿œå¤§äºŽé‡æ–°
+翻译。而且邮件列表中陆续有新的é¢å­”出现,他们那股热情,就åƒç‡ƒçƒ§çš„ç«ç„°ï¼Œèƒ½
+çž¬é—´ç‚¹ç‡ƒæ•´ä¸ªç©ºé—´ï¼Œå¯æ˜¯ä»–们的补ä¸å¾€å¾€å…·æœ‰ä¸ªæ€§ï¼Œè¿™ä¼šç»™å®¡é˜…带æ¥äº†å¾ˆå¤§çš„困难,
+reviewer 们åªèƒ½è€å¿ƒåœ°æŒ‡å¯¼ä»–们如何与社区更好地åˆä½œï¼Œä½†æ˜¯è¿™é¡¹å·¥ä½œå…·æœ‰é‡å¤
+æ€§ï¼Œé•¿æ­¤ä»¥å¾€ï¼Œä¼šæ¸æ¸æµ‡ç­ reviewer 审阅的热情。
+
+è™½ç„¶å†…æ ¸æ–‡æ¡£ä¸­å·²ç»æœ‰äº†ç±»ä¼¼çš„贡献指å—,但是缺ä¹ä¸“门针对于中文翻译的,尤其
+是对于新手æ¥è¯´ï¼Œæµè§ˆå¤§é‡çš„æ–‡æ¡£å而更加迷惑,该文档就是为了缓解这一问题而
+编写,目的是为æä¾›ç»™æ–°æ‰‹ä¸€ä¸ªå¿«é€Ÿç¿»è¯‘指å—。
+
+详细的贡献指å—:Documentation/translations/zh_CN/process/index.rst。
+
+环境æ­å»º
+========
+
+工欲善其事必先利其器,如果您目å‰å¯¹å†…核文档翻译满怀热情,并且会独立地安装
+linux å‘行版和简å•地使用 linux 命令行,那么å¯ä»¥è¿…速开始了。若您尚ä¸å…·å¤‡è¯¥
+能力,很多网站上会有详细的手把手教程,最多一个上åˆï¼Œæ‚¨åº”该就能掌æ¡å¯¹åº”技
+èƒ½ã€‚æ‚¨éœ€è¦æ³¨æ„的一点是,请ä¸è¦ä½¿ç”¨ root 用户进行åŽç»­æ­¥éª¤å’Œæ–‡æ¡£ç¿»è¯‘。
+
+拉å–开呿 ‘
+----------
+
+中文文档翻译工作目å‰ç‹¬ç«‹äºŽ linux-doc 开呿 ‘å¼€å±•ï¼Œæ‰€ä»¥æ‚¨éœ€è¦æ‹‰å–è¯¥å¼€å‘æ ‘,
+打开终端命令行执行::
+
+ git clone git://git.kernel.org/pub/scm/linux/kernel/git/alexs/linux.git
+
+如果您é‡åˆ°ç½‘络连接问题,也å¯ä»¥æ‰§è¡Œä»¥ä¸‹å‘½ä»¤::
+
+ git clone https://mirrors.hust.edu.cn/git/kernel-doc-zh.git linux
+
+这是 Alex 开呿 ‘的镜åƒåº“,æ¯ä¸¤ä¸ªå°æ—¶åŒæ­¥ä¸€æ¬¡ä¸Šæ¸¸ã€‚如果您了解到更快的 mirror,
+è¯·éšæ—¶ **添加** 。
+
+命令执行完毕åŽï¼Œæ‚¨ä¼šåœ¨å½“å‰ç›®å½•下得到一个 linux 目录,该目录就是您之åŽçš„工作
+仓库,请把它放在一个稳妥的ä½ç½®ã€‚
+
+安装文档构建环境
+----------------
+
+å†…æ ¸ä»“åº“é‡Œé¢æä¾›äº†ä¸€ä¸ªåŠè‡ªåŠ¨åŒ–è„šæœ¬ï¼Œæ‰§è¡Œè¯¥è„šæœ¬ï¼Œä¼šæ£€æµ‹æ‚¨çš„å‘行版中需è¦å®‰
+装哪些软件包,请按照命令行æç¤ºè¿›è¡Œå®‰è£…,通常您åªéœ€è¦å¤åˆ¶å‘½ä»¤å¹¶æ‰§è¡Œå°±è¡Œã€‚
+::
+
+ cd linux
+ ./scripts/sphinx-pre-install
+
+以Fedora为例,它的输出是这样的::
+
+ You should run:
+
+ sudo dnf install -y dejavu-sans-fonts dejavu-sans-mono-fonts dejavu-serif-fonts google-noto-sans-cjk-fonts graphviz-gd latexmk librsvg2-tools texlive-anyfontsize texlive-capt-of texlive-collection-fontsrecommended texlive-ctex texlive-eqparbox texlive-fncychap texlive-framed texlive-luatex85 texlive-multirow texlive-needspace texlive-tabulary texlive-threeparttable texlive-upquote texlive-wrapfig texlive-xecjk
+
+ Sphinx needs to be installed either:
+ 1) via pip/pypi with:
+
+ /usr/bin/python3 -m venv sphinx_latest
+ . sphinx_latest/bin/activate
+ pip install -r ./Documentation/sphinx/requirements.txt
+
+ If you want to exit the virtualenv, you can use:
+ deactivate
+
+ 2) As a package with:
+
+ sudo dnf install -y python3-sphinx
+
+ Please note that Sphinx >= 3.0 will currently produce false-positive
+ warning when the same name is used for more than one type (functions,
+ structs, enums,...). This is known Sphinx bug. For more details, see:
+ https://github.com/sphinx-doc/sphinx/pull/8313
+
+请您按照æç¤ºå¤åˆ¶æ‰“å°çš„命令到命令行执行,您必须具备 root æƒé™æ‰èƒ½æ‰§è¡Œ sudo
+开头的命令。
+
+如果您处于一个多用户环境中,为了é¿å…对其他人造æˆå½±å“,建议您é…ç½®å•用户
+sphinx 虚拟环境,å³åªéœ€è¦æ‰§è¡Œ::
+
+ /usr/bin/python3 -m venv sphinx_latest
+ . sphinx_latest/bin/activate
+ pip install -r ./Documentation/sphinx/requirements.txt
+
+æœ€åŽæ‰§è¡Œä»¥ä¸‹å‘½ä»¤é€€å‡ºè™šæ‹ŸçŽ¯å¢ƒ::
+
+ deactivate
+
+您å¯ä»¥åœ¨ä»»ä½•需è¦çš„æ—¶å€™å†æ¬¡æ‰§è¡Œä»¥ä¸‹å‘½ä»¤è¿›å…¥è™šæ‹ŸçŽ¯å¢ƒ::
+
+ . sphinx_latest/bin/activate
+
+进行第一次文档编译
+------------------
+
+è¿›å…¥å¼€å‘æ ‘目录::
+
+ cd linux
+
+这是一个标准的编译和调试æµç¨‹ï¼Œè¯·æ¯æ¬¡æž„建时都严格执行::
+
+ . sphinx_latest/bin/activate
+ make cleandocs
+ make htmldocs
+ deactivate
+
+检查编译结果
+------------
+
+编译输出在Documentation/output/目录下,请用æµè§ˆå™¨æ‰“开该目录下对应
+的文件进行检查。
+
+git和邮箱é…ç½®
+-------------
+
+打开命令行执行::
+
+ sudo dnf install git-email
+ vim ~/.gitconfig
+
+这里是我的一个é…ç½®æ–‡ä»¶ç¤ºèŒƒï¼Œè¯·æ ¹æ®æ‚¨çš„é‚®ç®±åŸŸåæœåŠ¡å•†æä¾›çš„æ‰‹å†Œæ›¿æ¢åˆ°å¯¹
+应的字段。
+::
+
+ [user]
+ name = Yanteng Si # 这会出现在您的补ä¸å¤´éƒ¨ç­¾åæ 
+ email = si.yanteng@linux.dev # 这会出现在您的补ä¸å¤´éƒ¨ç­¾åæ 
+
+ [sendemail]
+ from = Yanteng Si <si.yanteng@linux.dev> # 这会出现在您的补ä¸å¤´éƒ¨
+ smtpencryption = ssl
+ smtpserver = smtp.migadu.com
+ smtpuser = si.yanteng@linux.dev
+ smtppass = <passwd> # 建议使用第三方客户端专用密ç 
+ chainreplyto = false
+ smtpserverport = 465
+
+关于邮件客户端的é…置,请查阅Documentation/translations/zh_CN/process/email-clients.rst。
+
+开始翻译文档
+============
+
+文档索引结构
+------------
+
+ç›®å‰ä¸­æ–‡æ–‡æ¡£æ˜¯åœ¨Documentation/translations/zh_CN/目录下进行,该
+目录结构最终会与Documentation/结构一致,所以您åªéœ€è¦å°†æ‚¨æ„Ÿå…´è¶£çš„英文
+文档文件和对应的 index.rst å¤åˆ¶åˆ° zh_CN 目录下对应的ä½ç½®ï¼Œç„¶åŽä¿®æ”¹æ›´
+上一级的 index å³å¯å¼€å§‹æ‚¨çš„翻译。
+
+为了ä¿è¯ç¿»è¯‘的文档补ä¸è¢«é¡ºåˆ©åˆå¹¶ï¼Œä¸å»ºè®®å¤šäººåŒæ—¶ç¿»è¯‘一个目录,因为这会
+造æˆè¡¥ä¸ä¹‹é—´äº’相ä¾èµ–,往往会导致一部分补ä¸è¢«åˆå¹¶ï¼Œå¦ä¸€éƒ¨åˆ†äº§ç”Ÿå†²çªã€‚
+
+如果实在无法é¿å…ä¸¤ä¸ªäººåŒæ—¶å¯¹ä¸€ä¸ªç›®å½•进行翻译的情况,请将补ä¸åˆ¶ä½œè¿›ä¸€ä¸ªè¡¥
+ä¸é›†ã€‚ä½†æ˜¯ä¸æŽ¨è刚开始就这么åšï¼Œå› ä¸ºç»è¿‡å®žè·µï¼Œåœ¨æ²¡æœ‰æŒ‡å¯¼çš„æƒ…况下,新手很
+难一次处ç†å¥½è¿™ä¸ªè¡¥ä¸é›†ã€‚
+
+请执行以下命令,新建开å‘分支::
+
+ git checkout docs-next
+ git branch my-trans
+ git checkout my-trans
+
+译文格å¼è¦æ±‚
+------------
+
+ - æ¯è¡Œé•¿åº¦æœ€å¤šä¸è¶…过40个字符
+ - æ¯è¡Œé•¿åº¦è¯·ä¿æŒä¸€è‡´
+ - 标题的下划线长度请按照一个英文一个字符ã€ä¸€ä¸ªä¸­æ–‡ä¸¤ä¸ªå­—符与标题对é½
+ - å…¶å®ƒçš„ä¿®é¥°ç¬¦è¯·ä¸Žè‹±æ–‡æ–‡æ¡£ä¿æŒä¸€è‡´
+
+æ­¤å¤–åœ¨è¯‘æ–‡çš„å¤´éƒ¨ï¼Œæ‚¨éœ€è¦æ’入以下内容::
+
+ .. SPDX-License-Identifier: GPL-2.0
+ .. include:: ../disclaimer-zh_CN.rst #您需è¦äº†è§£è¯¥æ–‡ä»¶çš„路径,根
+ æ®æ‚¨å®žé™…ç¿»è¯‘çš„æ–‡æ¡£çµæ´»è°ƒæ•´
+
+ :Original: Documentation/xxx/xxx.rst #替æ¢ä¸ºæ‚¨ç¿»è¯‘的英文文档路径
+
+ :翻译:
+
+ å¸å»¶è…¾ Yanteng Si <si.yanteng@linux.dev> #替æ¢ä¸ºæ‚¨è‡ªå·±çš„è”系方å¼
+
+翻译技巧
+--------
+
+中文文档有æ¯è¡Œ40字符é™åˆ¶ï¼Œå› ä¸ºä¸€ä¸ªä¸­æ–‡å­—符等于2个英文字符。但是社区并没有
+é‚£ä¹ˆä¸¥æ ¼ï¼Œä¸€ä¸ªè¯€çªæ˜¯å°†æ‚¨çš„翻译的内容与英文原文的æ¯è¡Œé•¿åº¦å¯¹é½å³å¯ï¼Œè¿™æ ·ï¼Œ
+您也ä¸å¿…总是检查有没有超é™ã€‚
+
+如果您的英文阅读能力有é™ï¼Œå¯ä»¥è€ƒè™‘使用辅助翻译工具,例如 deepseek 。但是您
+必须仔细地打磨,使译文达到“信达雅â€çš„æ ‡å‡†ã€‚
+
+**请注æ„** ç¤¾åŒºä¸æŽ¥å—纯机器翻译的文档,社区工作建立在信任的基础上,请认真对待。
+
+编译和检查
+----------
+
+请执行::
+
+ . sphinx_latest/bin/activate
+ make cleandocs
+ make htmldocs
+
+解决与您翻译的文档相关的 warning å’Œ errorï¼Œç„¶åŽæ‰§è¡Œ::
+
+ make cleandocs #该步骤ä¸èƒ½çœç•¥ï¼Œå¦åˆ™å¯èƒ½ä¸ä¼šå†æ¬¡è¾“出真实存在的警告
+ make htmldocs
+ deactivate
+
+进入 output 目录用æµè§ˆå™¨æ‰“å¼€æ‚¨ç¿»è¯‘çš„æ–‡æ¡£ï¼Œæ£€æŸ¥æ¸²æŸ“çš„é¡µé¢æ˜¯å¦æ­£å¸¸ï¼Œå¦‚果正常,
+继续进行åŽç»­æ­¥éª¤ï¼Œå¦åˆ™è¯·å°è¯•解决。
+
+制作补ä¸
+========
+
+æäº¤æ”¹åЍ
+--------
+
+执行以下命令,在弹出的交互å¼é¡µé¢ä¸­å¡«å†™å¿…è¦çš„ä¿¡æ¯ã€‚
+::
+
+ git add .
+ git commit -s -v
+
+请å‚考以下信æ¯è¿›è¡Œè¾“å…¥::
+
+ docs/zh_CN: Add self-protection index Chinese translation
+
+ Translate .../security/self-protection.rst into Chinese.
+
+ Update the translation through commit b080e52110ea #请执行git log <您翻译的英文文档路径> å¤åˆ¶æœ€é¡¶éƒ¨ç¬¬ä¸€ä¸ªè¡¥ä¸çš„sha值的å‰12ä½ï¼Œæ›¿æ¢æŽ‰12ä½sha值。
+ ("docs: update self-protection __ro_after_init status")
+
+ Signed-off-by: Yanteng Si <si.yanteng@linux.dev> #如果您å‰é¢çš„æ­¥éª¤æ­£ç¡®æ‰§è¡Œï¼Œè¯¥è¡Œä¼šè‡ªåŠ¨æ˜¾ç¤ºï¼Œå¦åˆ™è¯·æ£€æŸ¥gitconfig文件。
+
+ä¿å­˜å¹¶é€€å‡ºã€‚
+
+**请注æ„** 以上四行,缺少任何一行,您都将会在第一轮审阅åŽè¿”工,如果您需è¦ä¸€ä¸ªæ›´åŠ æ˜Žç¡®çš„ç¤ºä¾‹ï¼Œè¯·å¯¹ zh_CN 目录执行 git log。
+
+导出补ä¸å’Œåˆ¶ä½œå°é¢
+------------------
+
+这个时候,å¯ä»¥å¯¼å‡ºè¡¥ä¸ï¼Œåšå‘é€é‚®ä»¶åˆ—表最åŽçš„准备了。命令行执行::
+
+ git format-patch -N
+
+ç„¶åŽå‘½ä»¤è¡Œä¼šè¾“出类似下é¢çš„内容::
+
+ 0001-docs-zh_CN-add-xxxxxxxx.patch
+ 0002-docs-zh_CN-add-xxxxxxxx.patch
+ ……
+
+测试补ä¸
+--------
+
+内核æä¾›äº†ä¸€ä¸ªè¡¥ä¸æ£€æµ‹è„šæœ¬ï¼Œè¯·æ‰§è¡Œ::
+
+ ./scripts/checkpatch.pl *.patch
+
+å‚考脚本输出,解决掉所有的 error å’Œ warningï¼Œé€šå¸¸æƒ…å†µä¸‹ï¼Œåªæœ‰ä¸‹é¢è¿™ä¸ª
+warning ä¸éœ€è¦è§£å†³::
+
+ WARNING: added, moved or deleted file(s), does MAINTAINERS need updating?
+
+一个简å•çš„è§£å†³æ–¹æ³•æ˜¯ä¸€æ¬¡åªæ£€æŸ¥ä¸€ä¸ªè¡¥ä¸ï¼Œç„¶åŽæ‰“上该补ä¸ï¼Œç›´æŽ¥å¯¹è¯‘文进行修改,
+ç„¶åŽæ‰§è¡Œä»¥ä¸‹å‘½ä»¤ä¸ºè¡¥ä¸è¿½åŠ æ›´æ”¹::
+
+ git checkout docs-next
+ git branch test-trans
+ git am 0001-xxxxx.patch
+ ./scripts/checkpatch.pl 0001-xxxxx.patch
+ 直接修改您的翻译
+ git add .
+ git am --amend
+ ä¿å­˜é€€å‡º
+ git am 0002-xxxxx.patch
+ ……
+
+釿–°å¯¼å‡ºå†æ¬¡æ£€æµ‹ï¼Œé‡å¤è¿™ä¸ªè¿‡ç¨‹ï¼Œç›´åˆ°å¤„ç†å®Œæ‰€æœ‰çš„è¡¥ä¸ã€‚
+
+最åŽï¼Œå¦‚果检测时没有 warning å’Œ error 需è¦è¢«å¤„ç†æˆ–è€…æ‚¨åªæœ‰ä¸€ä¸ªè¡¥ä¸ï¼Œè¯·è·³
+过下é¢è¿™ä¸ªæ­¥éª¤ï¼Œå¦åˆ™è¯·é‡æ–°å¯¼å‡ºè¡¥ä¸åˆ¶ä½œå°é¢::
+
+ git format-patch -N --cover-letter --thread=shallow #Nä¸ºæ‚¨çš„è¡¥ä¸æ•°é‡,N一般è¦å¤§äºŽ1。
+
+ç„¶åŽå‘½ä»¤è¡Œä¼šè¾“出类似下é¢çš„内容::
+
+ 0000-cover-letter.patch
+ 0001-docs-zh_CN-add-xxxxxxxx.patch
+ 0002-docs-zh_CN-add-xxxxxxxx.patch
+
+您需è¦ç”¨ç¼–辑器打开0å·è¡¥ä¸ï¼Œä¿®æ”¹ä¸¤å¤„内容::
+
+ vim 0000-cover-letter.patch
+
+ ...
+ Subject: [PATCH 0/1] *** SUBJECT HERE *** #修改该字段,概括您的补ä¸é›†éƒ½åšäº†å“ªäº›äº‹æƒ…
+
+ *** BLURB HERE *** #修改该字段,详细æè¿°æ‚¨çš„è¡¥ä¸é›†åšäº†å“ªäº›äº‹æƒ…
+
+ Yanteng Si (1):
+ docs/zh_CN: add xxxxx
+ ...
+
+å¦‚æžœæ‚¨åªæœ‰ä¸€ä¸ªè¡¥ä¸ï¼Œåˆ™å¯ä»¥ä¸åˆ¶ä½œå°é¢ï¼Œå³0å·è¡¥ä¸ï¼Œåªéœ€è¦æ‰§è¡Œ::
+
+ git format-patch -1
+
+æŠŠè¡¥ä¸æäº¤åˆ°é‚®ä»¶åˆ—è¡¨
+====================
+
+æ­å–œæ‚¨ï¼Œæ‚¨çš„æ–‡æ¡£ç¿»è¯‘现在å¯ä»¥æäº¤åˆ°é‚®ä»¶åˆ—表了。
+
+获å–维护者和审阅者邮箱以åŠé‚®ä»¶åˆ—表地å€
+--------------------------------------
+
+内核æä¾›äº†ä¸€ä¸ªè‡ªåŠ¨åŒ–è„šæœ¬å·¥å…·ï¼Œè¯·æ‰§è¡Œ::
+
+ ./scripts/get_maintainer.pl *.patch
+
+将输出的邮箱地å€ä¿å­˜ä¸‹æ¥ã€‚
+
+å°†è¡¥ä¸æäº¤åˆ°é‚®ä»¶åˆ—è¡¨
+--------------------
+
+æ‰“å¼€ä¸Šé¢æ‚¨ä¿å­˜çš„邮件地å€ï¼Œæ‰§è¡Œ::
+
+ git send-email *.patch --to <maintainer email addr> --cc <others addr> #一个to对应一个地å€ï¼Œä¸€ä¸ªcc对应一个地å€ï¼Œæœ‰å‡ ä¸ªå°±å†™å‡ ä¸ªã€‚
+
+执行该命令时,请确ä¿ç½‘络通常,邮件å‘逿ˆåŠŸä¸€èˆ¬ä¼šè¿”å›ž250。
+
+您å¯ä»¥å…ˆå‘é€ç»™è‡ªå·±ï¼Œå°è¯•å‘出的 patch 是å¦å¯ä»¥ç”¨ 'git am' 工具正常打上。
+如果检查正常, 您就å¯ä»¥æ”¾å¿ƒçš„å‘é€åˆ°ç¤¾åŒºè¯„审了。
+
+如果该步骤被中断,您å¯ä»¥æ£€æŸ¥ä¸€ä¸‹ï¼Œç»§ç»­ç”¨ä¸Šæ¡å‘½ä»¤å‘é€å¤±è´¥çš„è¡¥ä¸ï¼Œä¸€å®šä¸è¦å†
+次å‘é€å·²ç»å‘逿ˆåŠŸçš„è¡¥ä¸ã€‚
+
+积æžå‚与审阅过程并迭代补ä¸
+==========================
+
+è¡¥ä¸æäº¤åˆ°é‚®ä»¶åˆ—è¡¨å¹¶ä¸ä»£è¡¨ä¸‡äº‹å¤§å‰ï¼Œæ‚¨è¿˜éœ€è¦ç§¯æžå›žå¤ maintainer å’Œ
+reviewer 的评论,åšåˆ°æ¯æ¡éƒ½æœ‰å›žå¤ï¼Œæ¯ä¸ªå›žå¤éƒ½è½å®žåˆ°ä½ã€‚
+
+如何回å¤è¯„论
+------------
+
+ - 请先将您的邮箱客户端信件回å¤ä¿®æ”¹ä¸º **纯文本** æ ¼å¼ï¼Œå¹¶åŽ»é™¤æ‰€æœ‰ç­¾å,尤其是
+ ä¼ä¸šé‚®ç®±ã€‚
+ - ç„¶åŽç‚¹å‡»å›žå¤æŒ‰é’®ï¼Œå¹¶å°†è¦å›žå¤çš„邮件带入,
+ - 在第一æ¡è¯„论行尾æ¢è¡Œï¼Œè¾“入您的回å¤
+ - 在第二æ¡è¯„论行尾æ¢è¡Œï¼Œè¾“入您的回å¤
+ - 直到处ç†å®Œæœ€åŽä¸€æ¡è¯„论,æ¢è¡Œç©ºä¸¤è¡Œè¾“入问候语和署å
+
+注æ„,信件回å¤è¯·å°½é‡ä½¿ç”¨è‹±æ–‡ã€‚
+
+迭代补ä¸
+--------
+
+建议您æ¯å›žå¤ä¸€æ¡è¯„论,就修改一处翻译。然åŽé‡æ–°ç”Ÿæˆè¡¥ä¸ï¼Œç›¸ä¿¡æ‚¨çŽ°åœ¨å·²ç»å…·
+å¤‡äº†çµæ´»ä½¿ç”¨ git am --amend 的能力。
+
+æ¯æ¬¡è¿­ä»£ä¸€ä¸ªè¡¥ä¸ï¼Œä¸è¦ä¸€æ¬¡å¤šä¸ª::
+
+ git am <您è¦ä¿®æ”¹çš„è¡¥ä¸>
+ 直接对文件进行您的修改
+ git add .
+ git commit --amend
+
+当您将所有的评论è½å®žåˆ°ä½åŽï¼Œå¯¼å‡ºç¬¬äºŒç‰ˆè¡¥ä¸ï¼Œå¹¶ä¿®æ”¹å°é¢::
+
+ git format-patch -N -v 2 --cover-letter --thread=shallow
+
+打开0å·è¡¥ä¸ï¼Œåœ¨ BLURB HERE 处编写相较于上个版本,您åšäº†å“ªäº›æ”¹åŠ¨ã€‚
+
+ç„¶åŽæ‰§è¡Œ::
+
+ git send-email v2* --to <maintainer email addr> --cc <others addr>
+
+这样,新的一版补ä¸å°±åˆå‘é€åˆ°é‚®ä»¶åˆ—表等待审阅,之åŽå°±æ˜¯é‡å¤è¿™ä¸ªè¿‡ç¨‹ã€‚
+
+审阅周期
+--------
+
+因为有时邮件列表比较ç¹å¿™ï¼Œæ‚¨çš„邮件å¯èƒ½ä¼šè¢«æ·¹æ²¡ï¼Œå¦‚果超过两周没有得到任何
+回å¤ï¼Œè¯·è‡ªå·±å›žå¤è‡ªå·±ï¼Œå›žå¤çš„内容为 Ping.
+
+最终,如果您è½å®žå¥½äº†æ‰€æœ‰çš„è¯„è®ºï¼Œå¹¶ä¸”ä¸€æ®µæ—¶é—´åŽæ²¡æœ‰æœ€æ–°çš„评论,您的补ä¸å°†
+会先进入 Alex çš„å¼€å‘æ ‘,然åŽè¿›å…¥ linux-doc 开呿 ‘ï¼Œæœ€ç»ˆåœ¨ä¸‹ä¸ªçª—å£æ‰“å¼€
+æ—¶åˆå¹¶è¿› mainline 仓库。
+
+紧急处ç†
+--------
+
+如果您å‘é€åˆ°é‚®ä»¶åˆ—表之åŽã€‚å‘现å‘错了补ä¸é›†ï¼Œå°¤å…¶æ˜¯åœ¨å¤šä¸ªç‰ˆæœ¬è¿­ä»£çš„过程中;
+自己å‘现了一些ä¸å¦¥çš„翻译;å‘é€é”™äº†é‚®ä»¶åˆ—表……
+
+git email默认会抄é€ç»™æ‚¨ä¸€ä»½ï¼Œæ‰€ä»¥æ‚¨å¯ä»¥åˆ‡æ¢ä¸ºå®¡é˜…者的角色审查自己的补ä¸ï¼Œ
+并留下评论,æè¿°æœ‰ä½•ä¸å¦¥ï¼Œå°†åœ¨ä¸‹ä¸ªç‰ˆæœ¬æ€Žä¹ˆæ”¹ï¼Œå¹¶ä»˜è¯¸è¡ŒåŠ¨ï¼Œé‡æ–°æäº¤ï¼Œä½†æ˜¯
+注æ„频率,æ¯å¤©æäº¤çš„æ¬¡æ•°ä¸è¦è¶…过两次。
+
+新手任务
+--------
+对于首次å‚与 Linux 内核中文文档翻译的新手,建议您在 linux 目录中è¿è¡Œä»¥ä¸‹å‘½ä»¤ï¼š
+::
+
+ ./script/checktransupdate.py -l zh_CN``
+
+该命令会列出需è¦ç¿»è¯‘或更新的英文文档。
+
+关于详细æ“作说明,请å‚考: Documentation/translations/zh_CN/doc-guide/checktransupdate.rst\
+
+进阶
+----
+
+希望您ä¸åªæ˜¯å•纯的翻译内核文档,在熟悉了一起与社区工作之åŽï¼Œæ‚¨å¯ä»¥å®¡é˜…å…¶ä»–
+å¼€å‘者的翻译,或者æå‡ºå…·æœ‰å»ºè®¾æ€§çš„ä¸»å¼ ã€‚ä¸Žæ­¤åŒæ—¶ï¼Œä¸Žæ–‡æ¡£å¯¹åº”çš„ä»£ç æ›´åŠ æœ‰è¶£ï¼Œ
+而且需è¦å®Œå–„çš„åœ°æ–¹è¿˜æœ‰å¾ˆå¤šï¼Œå‹‡æ•¢åœ°åŽ»æŽ¢ç´¢ï¼Œç„¶åŽæäº¤ä½ çš„æƒ³æ³•å§ã€‚
+
+常è§çš„问题
+==========
+
+Maintainer回å¤è¡¥ä¸ä¸èƒ½æ­£å¸¸apply
+-------------------------------
+
+这通常是因为您的补ä¸ä¸Žé‚®ä»¶åˆ—表其他人的补ä¸äº§ç”Ÿäº†å†²çªï¼Œåˆ«äººçš„è¡¥ä¸å…ˆè¢« apply 了,
+您的补ä¸é›†å°±æ— æ³•æˆåŠŸ apply äº†ï¼Œè¿™éœ€è¦æ‚¨æ›´æ–°æœ¬åœ°åˆ†æ”¯ï¼Œåœ¨æœ¬åœ°è§£å†³å®Œå†²çªåŽå†æ¬¡æäº¤ã€‚
+
+请尽é‡é¿å…冲çªï¼Œä¸è¦å¤šä¸ªäººåŒæ—¶ç¿»è¯‘一个目录。翻译之å‰å¯ä»¥é€šè¿‡ git log 查看您感
+兴趣的目录近期有没有其他人翻译,如果有,请æå‰ç§ä¿¡è”系对方,请求其代为å‘逿‚¨
+的补ä¸ã€‚如果对方未æ¥ä¸€ä¸ªæœˆå†…没有æäº¤æ–°è¡¥ä¸çš„æ‰“算,您å¯ä»¥ç‹¬è‡ªå‘é€ã€‚
+
+回信被邮件列表拒收
+------------------
+
+大部分情况下,是由于您å‘é€äº†éžçº¯æ–‡æœ¬æ ¼å¼çš„信件,请尽é‡é¿å…使用 webmail,推è
+使用邮件客户端,比如 thunderbird,记得在设置中的回信é…置那改为纯文本å‘é€ã€‚
+
+如果超过了24å°æ—¶ï¼Œæ‚¨ä¾æ—§æ²¡æœ‰åœ¨<https://lore.kernel.org/linux-doc/>å‘现您的邮
+件,请è”系您的网络管ç†å‘˜å¸®å¿™è§£å†³ã€‚
diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst
index cc512ca54172..b08c09d8e96e 100644
--- a/Documentation/translations/zh_CN/index.rst
+++ b/Documentation/translations/zh_CN/index.rst
@@ -21,18 +21,18 @@
这是中文内核文档树的顶级目录。内核文档,就åƒå†…核本身一样,在很大程度上是一
é¡¹æ­£åœ¨è¿›è¡Œçš„å·¥ä½œï¼›å½“æˆ‘ä»¬åŠªåŠ›å°†è®¸å¤šåˆ†æ•£çš„æ–‡ä»¶æ•´åˆæˆä¸€ä¸ªè¿žè´¯çš„æ•´ä½“时尤其如此。
å¦å¤–ï¼Œéšæ—¶æ¬¢è¿Žæ‚¨å¯¹å†…核文档进行改进;如果您想æä¾›å¸®åŠ©ï¼Œè¯·åŠ å…¥vger.kernel.org
-上的linux-doc邮件列表。
-
-顺便说下,中文文档也需è¦éµå®ˆå†…核编ç é£Žæ ¼ï¼Œé£Žæ ¼ä¸­ä¸­æ–‡å’Œè‹±æ–‡çš„主è¦ä¸åŒå°±æ˜¯ä¸­æ–‡
-的字符标点å ç”¨ä¸¤ä¸ªè‹±æ–‡å­—ç¬¦å®½åº¦ï¼Œæ‰€ä»¥ï¼Œå½“è‹±æ–‡è¦æ±‚ä¸è¦è¶…过æ¯è¡Œ100个字符时,
-中文就ä¸è¦è¶…过50个字符。å¦å¤–ï¼Œä¹Ÿè¦æ³¨æ„'-','='等符å·ä¸Žç›¸å…³æ ‡é¢˜çš„对é½ã€‚在将
-è¡¥ä¸æäº¤åˆ°ç¤¾åŒºä¹‹å‰ï¼Œä¸€å®šè¦è¿›è¡Œå¿…è¦çš„ ``checkpatch.pl`` 检查和编译测试,确ä¿
-在 ``make htmldocs/pdfdocs`` 中ä¸å¢žåŠ æ–°çš„å‘Šè­¦ï¼Œæœ€åŽï¼Œå®‰è£…检查你生æˆçš„
-html/pdf æ–‡ä»¶ï¼Œç¡®è®¤å®ƒä»¬çœ‹èµ·æ¥æ˜¯æ­£å¸¸çš„。
-
-æäº¤ä¹‹å‰è¯·ç¡®è®¤ä½ çš„è¡¥ä¸å¯ä»¥æ­£å¸¸æäº¤åˆ°ä¸­æ–‡æ–‡æ¡£ç»´æŠ¤åº“:
-https://git.kernel.org/pub/scm/linux/kernel/git/alexs/linux.git/
-如果你的补ä¸ä¾èµ–于其他人的补ä¸, å¯ä»¥ä¸Žå…¶ä»–人商é‡åŽç”±æŸä¸€ä¸ªäººåˆå¹¶æäº¤ã€‚
+上的linux-doc邮件列表,并按照Documentation/translations/zh_CN/how-to.rst的
+指引æäº¤è¡¥ä¸ã€‚æäº¤è¡¥ä¸ä¹‹å‰è¯·ç¡®ä¿æ‰§è¡Œ"make htmldocsâ€åŽæ— ä¸Žç¿»è¯‘有关的异常输出。
+
+如何翻译内核文档
+----------------
+
+翻译文档本身是一件很简å•的事情,但是æäº¤è¡¥ä¸éœ€è¦æ³¨æ„一些细节,为了ä¿è¯å†…核中文文档的高质é‡å¯æŒç»­å‘展,æä¾›äº†ä¸€ä»½ç¿»è¯‘指å—。
+
+.. toctree::
+ :maxdepth: 1
+
+ how-to.rst
与Linux 内核社区一起工作
------------------------
diff --git a/Documentation/translations/zh_CN/networking/index.rst b/Documentation/translations/zh_CN/networking/index.rst
new file mode 100644
index 000000000000..d07dd69f980b
--- /dev/null
+++ b/Documentation/translations/zh_CN/networking/index.rst
@@ -0,0 +1,160 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/networking/index.rst
+
+:翻译:
+
+ 王亚鑫 Wang Yaxin <wang.yaxin@zte.com.cn>
+
+:校译:
+
+网络
+====
+
+有关网络设备(netdev)开å‘过程的详细指å—,请å‚考::ref:`netdev-FAQ`
+
+目录:
+
+.. toctree::
+ :maxdepth: 1
+
+ msg_zerocopy
+
+Todolist:
+
+* af_xdp
+* bareudp
+* batman-adv
+* can
+* can_ucan_protocol
+* device_drivers/index
+* diagnostic/index
+* dsa/index
+* devlink/index
+* caif/index
+* ethtool-netlink
+* ieee802154
+* iso15765-2
+* j1939
+* kapi
+* failover
+* net_dim
+* net_failover
+* page_pool
+* phy
+* sfp-phylink
+* alias
+* bridge
+* snmp_counter
+* checksum-offloads
+* segmentation-offloads
+* scaling
+* tls
+* tls-offload
+* tls-handshake
+* nfc
+* 6lowpan
+* 6pack
+* arcnet-hardware
+* arcnet
+* atm
+* ax25
+* bonding
+* cdc_mbim
+* dccp
+* dctcp
+* devmem
+* dns_resolver
+* driver
+* eql
+* fib_trie
+* filter
+* generic-hdlc
+* generic_netlink
+* netlink_spec/index
+* gen_stats
+* gtp
+* ila
+* ioam6-sysctl
+* ip_dynaddr
+* ipsec
+* ip-sysctl
+* ipv6
+* ipvlan
+* ipvs-sysctl
+* kcm
+* l2tp
+* lapb-module
+* mac80211-injection
+* mctp
+* mpls-sysctl
+* mptcp
+* mptcp-sysctl
+* multiqueue
+* multi-pf-netdev
+* napi
+* net_cachelines/index
+* netconsole
+* netdev-features
+* netdevices
+* netfilter-sysctl
+* netif-msg
+* netmem
+* nexthop-group-resilient
+* nf_conntrack-sysctl
+* nf_flowtable
+* oa-tc6-framework
+* openvswitch
+* operstates
+* packet_mmap
+* phonet
+* phy-link-topology
+* pktgen
+* plip
+* ppp_generic
+* proc_net_tcp
+* pse-pd/index
+* radiotap-headers
+* rds
+* regulatory
+* representors
+* rxrpc
+* sctp
+* secid
+* seg6-sysctl
+* skbuff
+* smc-sysctl
+* sriov
+* statistics
+* strparser
+* switchdev
+* sysfs-tagging
+* tc-actions-env-rules
+* tc-queue-filters
+* tcp_ao
+* tcp-thin
+* team
+* timestamping
+* tipc
+* tproxy
+* tuntap
+* udplite
+* vrf
+* vxlan
+* x25
+* x25-iface
+* xfrm_device
+* xfrm_proc
+* xfrm_sync
+* xfrm_sysctl
+* xdp-rx-metadata
+* xsk-tx-metadata
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/translations/zh_CN/networking/msg_zerocopy.rst b/Documentation/translations/zh_CN/networking/msg_zerocopy.rst
new file mode 100644
index 000000000000..821b32c4d1bf
--- /dev/null
+++ b/Documentation/translations/zh_CN/networking/msg_zerocopy.rst
@@ -0,0 +1,223 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/networking/msg_zerocopy.rst
+
+:翻译:
+
+ 王亚鑫 Wang Yaxin <wang.yaxin@zte.com.cn>
+
+:校译:
+
+ - å¾é‘« xu xin <xu.xin16@zte.com.cn>
+ - 何酿ž— He Peilin <he.peilin@zte.com.cn>
+
+============
+MSG_ZEROCOPY
+============
+
+简介
+====
+
+MSG_ZEROCOPY 标志用于å¯ç”¨å¥—接字å‘é€è°ƒç”¨çš„å…æ‹·è´åŠŸèƒ½ã€‚è¯¥åŠŸèƒ½ç›®å‰é€‚用于 TCPã€UDP å’Œ VSOCK
+(使用 virtio 传输)套接字。
+
+机é‡ä¸Žæ³¨æ„事项
+--------------
+
+在用户进程与内核之间拷è´å¤§åž‹ç¼“冲区å¯èƒ½ä¼šæ¶ˆè€—大é‡èµ„æºã€‚Linux 支æŒå¤šç§å…æ‹·è´çš„æŽ¥å£ï¼Œå¦‚sendfile
+å’Œ splice。MSG_ZEROCOPY 标志将底层的拷è´é¿å…机制扩展到了常è§çš„套接字å‘é€è°ƒç”¨ä¸­ã€‚
+
+å…æ‹·è´å¹¶éžæ¯«æ— ä»£ä»·ã€‚在实现上,它通过页é¢å›ºå®šï¼ˆpage pinning)将按字节拷è´çš„æˆæœ¬æ›¿æ¢ä¸ºé¡µé¢ç»Ÿè®¡
+(page accounting)和完æˆé€šçŸ¥çš„开销。因此,MSG_ZEROCOPY 通常仅在写入é‡è¶…过大约 10 KB æ—¶
+æ‰æœ‰æ•ˆã€‚
+
+页é¢å›ºå®šè¿˜ä¼šæ”¹å˜ç³»ç»Ÿè°ƒç”¨çš„语义。它会暂时在进程和网络堆栈之间共享缓冲区。与拷è´ä¸åŒï¼Œè¿›ç¨‹åœ¨ç³»ç»Ÿ
+调用返回åŽä¸èƒ½ç«‹å³è¦†ç›–缓冲区,å¦åˆ™å¯èƒ½ä¼šä¿®æ”¹æ­£åœ¨ä¼ è¾“中的数æ®ã€‚内核的完整性ä¸ä¼šå—到影å“,但有缺
+陷的程åºå¯èƒ½ä¼šç ´åè‡ªå·±çš„æ•°æ®æµã€‚
+
+当内核返回数æ®å¯ä»¥å®‰å…¨ä¿®æ”¹çš„通知时,进程æ‰å¯ä»¥ä¿®æ”¹æ•°æ®ã€‚因此,将现有应用程åºè½¬æ¢ä¸ºä½¿ç”¨
+MSG_ZEROCOPY å¹¶éžæ€»æ˜¯åƒç®€å•地传递该标志那样容易。
+
+更多信æ¯
+--------
+
+本文档的大部分内容是æ¥è‡ªäºŽ netdev 2.1 上å‘表的一篇长篇论文。如需更深入的信æ¯ï¼Œè¯·å‚阅该论文和
+演讲,或者æµè§ˆ LWN.net 上的精彩报é“,也å¯ä»¥ç›´æŽ¥é˜…读æºç ã€‚
+
+ 论文ã€å¹»ç¯ç‰‡ã€è§†é¢‘:
+ https://netdevconf.org/2.1/session.html?debruijn
+
+ LWN 文章:
+ https://lwn.net/Articles/726917/
+
+ è¡¥ä¸é›†ï¼š
+ [PATCH net-next v4 0/9] socket sendmsg MSG_ZEROCOPY
+ https://lore.kernel.org/netdev/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
+
+接å£
+====
+
+传递 MSG_ZEROCOPY 标志是å¯ç”¨å…æ‹·è´åŠŸèƒ½çš„æœ€æ˜Žæ˜¾æ­¥éª¤ï¼Œä½†å¹¶éžå”¯ä¸€çš„æ­¥éª¤ã€‚
+
+套接字设置
+----------
+
+当应用程åºå‘ send 系统调用传递未定义的标志时,内核通常会宽容对待。默认情况下,它会简å•地忽略
+这些标志。为了é¿å…为那些å¶ç„¶ä¼ é€’此标志的é—留进程å¯ç”¨å…æ‹·è´æ¨¡å¼ï¼Œè¿›ç¨‹å¿…须首先通过设置套接字选项
+æ¥è¡¨æ˜Žæ„图:
+
+::
+
+ if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one)))
+ error(1, errno, "setsockopt zerocopy");
+
+传输
+----
+
+对 send(或 sendtoã€sendmsgã€sendmmsg)本身的改动éžå¸¸ç®€å•。åªéœ€ä¼ é€’新的标志å³å¯ã€‚
+
+::
+
+ ret = send(fd, buf, sizeof(buf), MSG_ZEROCOPY);
+
+å¦‚æžœé›¶æ‹·è´æ“作失败,将返回 -1,并设置 errno 为 ENOBUFSã€‚è¿™ç§æƒ…况å¯èƒ½å‘生在套接字超出其
+optmem é™åˆ¶ï¼Œæˆ–者用户超出其é”定页é¢çš„ ulimit 时。
+
+æ··åˆä½¿ç”¨å…æ‹·è´å’Œæ‹·è´
+~~~~~~~~~~~~~~~~~~~~
+
+è®¸å¤šå·¥ä½œè´Ÿè½½åŒæ—¶åŒ…å«å¤§åž‹å’Œå°åž‹ç¼“å†²åŒºã€‚ç”±äºŽå¯¹äºŽå°æ•°æ®åŒ…æ¥è¯´ï¼Œå…æ‹·è´çš„æˆæœ¬é«˜äºŽæ‹·è´ï¼Œå› æ­¤è¯¥
+功能是通过标志实现的。带有标志的调用和没有标志的调用å¯ä»¥å®‰å…¨åœ°æ··åˆä½¿ç”¨ã€‚
+
+通知
+----
+
+当内核认为å¯ä»¥å®‰å…¨åœ°é‡ç”¨ä¹‹å‰ä¼ é€’的缓冲区时,它必须通知进程。完æˆé€šçŸ¥åœ¨å¥—接字的错误队列上
+排队,类似于传输时间戳接å£ã€‚
+
+通知本身是一个简å•的标é‡å€¼ã€‚æ¯ä¸ªå¥—æŽ¥å­—éƒ½ç»´æŠ¤ä¸€ä¸ªå†…éƒ¨çš„æ— ç¬¦å· 32 ä½è®¡æ•°å™¨ã€‚æ¯æ¬¡å¸¦æœ‰
+MSG_ZEROCOPY 标志的 send 调用æˆåŠŸå‘逿•°æ®æ—¶ï¼Œè®¡æ•°å™¨éƒ½ä¼šå¢žåŠ ã€‚å¦‚æžœè°ƒç”¨å¤±è´¥æˆ–é•¿åº¦ä¸ºé›¶ï¼Œ
+则计数器ä¸ä¼šå¢žåŠ ã€‚è¯¥è®¡æ•°å™¨ç»Ÿè®¡ç³»ç»Ÿè°ƒç”¨çš„è°ƒç”¨æ¬¡æ•°ï¼Œè€Œä¸æ˜¯å­—节数。在 UINT_MAX 次调用åŽï¼Œ
+计数器会循环。
+
+通知接收
+~~~~~~~~
+
+下é¢çš„代ç ç‰‡æ®µå±•示了 API 的使用。在最简å•çš„æƒ…å†µä¸‹ï¼Œæ¯æ¬¡ send 系统调用åŽï¼Œéƒ½ä¼šå¯¹é”™è¯¯é˜Ÿåˆ—
+进行轮询和 recvmsg 调用。
+
+从错误队列读å–始终是一个éžé˜»å¡žæ“作。poll 调用用于阻塞,直到出现错误。它会在其输出标志中
+设置 POLLERR。该标志ä¸éœ€è¦åœ¨ events 字段中设置。错误会无æ¡ä»¶åœ°å‘出信å·ã€‚
+
+::
+
+ pfd.fd = fd;
+ pfd.events = 0;
+ if (poll(&pfd, 1, -1) != 1 || pfd.revents & POLLERR == 0)
+ error(1, errno, "poll");
+
+ ret = recvmsg(fd, &msg, MSG_ERRQUEUE);
+ if (ret == -1)
+ error(1, errno, "recvmsg");
+
+read_notification(msg);
+
+
+这个示例仅用于演示目的。在实际应用中,ä¸ç­‰å¾…通知,而是æ¯éš”几次 send 调用就进行一次éžé˜»å¡ž
+读å–会更高效。
+
+é›¶æ‹·è´é€šçŸ¥å¯ä»¥ä¸Žå…¶ä»–套接字æ“作乱åºå¤„ç†ã€‚通常,拥有错误队列套接字会阻塞其他æ“作,直到错误
+被读å–。然而,零拷è´é€šçŸ¥å…·æœ‰é›¶é”™è¯¯ä»£ç ï¼Œå› æ­¤ä¸ä¼šé˜»å¡ž send å’Œ recv 调用。
+
+通知批处ç†
+~~~~~~~~~~~~
+
+å¯ä»¥ä½¿ç”¨ recvmmsg 调用æ¥ä¸€æ¬¡æ€§è¯»å–多个未决的数æ®åŒ…ã€‚è¿™é€šå¸¸ä¸æ˜¯å¿…éœ€çš„ã€‚åœ¨æ¯æ¡æ¶ˆæ¯ä¸­ï¼Œå†…æ ¸
+è¿”å›žçš„ä¸æ˜¯ä¸€ä¸ªå•一的值,而是一个范围。当错误队列上有一个通知正在等待接收时,它会将连续的通
+知åˆå¹¶èµ·æ¥ã€‚
+
+当一个新的通知å³å°†è¢«æŽ’队时,它会检查队列尾部的通知的范围是å¦å¯ä»¥æ‰©å±•ä»¥åŒ…å«æ–°çš„值。如果是这
+样,它会丢弃新的通知数æ®åŒ…,并增大未处ç†é€šçŸ¥çš„范围上é™å€¼ã€‚
+
+对于按顺åºç¡®è®¤æ•°æ®çš„å议(如 TCP),æ¯ä¸ªé€šçŸ¥éƒ½å¯ä»¥åˆå¹¶åˆ°å‰ä¸€ä¸ªé€šçŸ¥ä¸­ï¼Œå› æ­¤åœ¨ä»»ä½•时候在等待
+的通知都ä¸ä¼šè¶…过一个。
+
+有åºäº¤ä»˜æ˜¯å¸¸è§çš„æƒ…况,但ä¸èƒ½ä¿è¯ã€‚在é‡ä¼ å’Œå¥—接字拆除时,通知å¯èƒ½ä¼šä¹±åºåˆ°è¾¾ã€‚
+
+通知解æž
+~~~~~~~~
+
+下é¢çš„代ç ç‰‡æ®µæ¼”ç¤ºäº†å¦‚ä½•è§£æžæŽ§åˆ¶æ¶ˆæ¯ï¼šå‰é¢ä»£ç ç‰‡æ®µä¸­çš„ read_notification() 调用。通知
+ä»¥æ ‡å‡†é”™è¯¯æ ¼å¼ sock_extended_err ç¼–ç ã€‚
+
+控制数æ®ä¸­çš„级别和类型字段是åè®®æ—特定的,对于 TCP 或 UDP 套接字,分别为 IP_RECVERR 或
+IPV6_RECVERR。对于 VSOCK 套接字,cmsg_level 为 SOL_VSOCK,cmsg_type 为 VSOCK_RECVERR。
+
+é”™è¯¯æ¥æºæ˜¯æ–°çš„类型 SO_EE_ORIGIN_ZEROCOPYã€‚å¦‚å‰æ‰€è¿°ï¼Œee_errno 为零,以é¿å…在套接字上
+阻塞地读å–和写入系统调用。
+
+32 ä½é€šçŸ¥èŒƒå›´ç¼–ç ä¸º [ee_info, ee_data]。这个范围是包å«è¾¹ç•Œå€¼çš„。除了下é¢è®¨è®ºçš„ ee_code
+字段外,结构中的其他字段应被视为未定义的。
+
+::
+
+ struct sock_extended_err *serr;
+ struct cmsghdr *cm;
+
+ cm = CMSG_FIRSTHDR(msg);
+ if (cm->cmsg_level != SOL_IP &&
+ cm->cmsg_type != IP_RECVERR)
+ error(1, 0, "cmsg");
+
+ serr = (void *) CMSG_DATA(cm);
+ if (serr->ee_errno != 0 ||
+ serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY)
+ error(1, 0, "serr");
+
+printf("completed: %u..%u\n", serr->ee_info, serr->ee_data);
+
+
+延迟拷è´
+~~~~~~~~
+
+传递标志 MSG_ZEROCOPY 是å‘内核å‘出的一个æç¤ºï¼Œè®©å†…æ ¸é‡‡ç”¨å…æ‹·è´çš„ç­–ç•¥ï¼ŒåŒæ—¶ä¹Ÿæ˜¯ä¸€ç§çº¦
+定,å³å†…核会对完æˆé€šçŸ¥è¿›è¡ŒæŽ’队处ç†ã€‚但这并ä¸ä¿è¯æ‹·è´æ“作一定会被çœç•¥ã€‚
+
+æ‹·è´é¿å…䏿€»æ˜¯é€‚ç”¨çš„ã€‚ä¸æ”¯æŒåˆ†æ•£/èšé›† I/O 的设备无法å‘é€ç”±å†…核生æˆçš„å议头加上零拷è´ç”¨æˆ·
+æ•°æ®ç»„æˆçš„æ•°æ®åŒ…。数æ®åŒ…å¯èƒ½éœ€è¦åœ¨å议栈底层转æ¢ä¸ºä¸€ä»½ç§æœ‰æ•°æ®å‰¯æœ¬ï¼Œä¾‹å¦‚用于计算校验和。
+
+在所有这些情况下,当内核释放对共享页é¢çš„æŒæœ‰æƒæ—¶ï¼Œå®ƒä¼šè¿”回一个完æˆé€šçŸ¥ã€‚该通知å¯èƒ½åœ¨ï¼ˆå·²
+æ‹·è´ï¼‰æ•°æ®å®Œå…¨ä¼ è¾“之å‰åˆ°è¾¾ã€‚因此。零拷è´å®Œæˆé€šçŸ¥å¹¶ä¸æ˜¯ä¼ è¾“完æˆé€šçŸ¥ã€‚
+
+如果数æ®ä¸åœ¨ç¼“存中,延迟拷è´å¯èƒ½ä¼šæ¯”ç«‹å³åœ¨ç³»ç»Ÿè°ƒç”¨ä¸­æ‹·è´å¼€é”€æ›´å¤§ã€‚进程还会因通知处ç†è€Œäº§
+ç”Ÿæˆæœ¬ï¼Œä½†å´æ²¡æœ‰å¸¦æ¥ä»»ä½•好处。因此,内核会在返回时通过在 ee_code 字段中设置标志
+SO_EE_CODE_ZEROCOPY_COPIED æ¥æŒ‡ç¤ºæ•°æ®æ˜¯å¦ä»¥æ‹·è´çš„æ–¹å¼å®Œæˆã€‚进程å¯ä»¥åˆ©ç”¨è¿™ä¸ªä¿¡å·ï¼Œåœ¨
+åŒä¸€å¥—接字上åŽç»­çš„è¯·æ±‚ä¸­åœæ­¢ä¼ é€’ MSG_ZEROCOPY 标志。
+
+实现
+====
+
+环回
+----
+
+对于 TCP 和 UDP:
+如果接收进程ä¸è¯»å–其套接字,å‘é€åˆ°æœ¬åœ°å¥—接字的数æ®å¯èƒ½ä¼šæ— é™æœŸæŽ’é˜Ÿã€‚æ— é™æœŸçš„通知延迟是ä¸
+坿ޥå—的。因此,所有使用 MSG_ZEROCOPY 生æˆå¹¶çŽ¯å›žåˆ°æœ¬åœ°å¥—æŽ¥å­—çš„æ•°æ®åŒ…都将产生延迟拷è´ã€‚
+这包括环回到数æ®åŒ…套接字(例如,tcpdump)和 tun 设备。
+
+对于 VSOCK:
+å‘é€åˆ°æœ¬åœ°å¥—接字的数æ®è·¯å¾„ä¸Žéžæœ¬åœ°å¥—接字相åŒã€‚
+
+测试
+====
+
+更具体的示例代ç å¯ä»¥åœ¨å†…æ ¸æºç çš„ tools/testing/selftests/net/msg_zerocopy.c 中找到。
+
+è¦ç•™æ„环回约æŸé—®é¢˜ã€‚该测试å¯ä»¥åœ¨ä¸€å¯¹ä¸»æœºä¹‹é—´è¿›è¡Œã€‚但如果是在本地的一对进程之间è¿è¡Œï¼Œä¾‹å¦‚当使用
+msg_zerocopy.sh 脚本在跨命å空间的虚拟以太网(veth)对之间è¿è¡Œæ—¶ï¼Œæµ‹è¯•å°†ä¸ä¼šæ˜¾ç¤ºå‡ºä»»ä½•性能
+æå‡ã€‚为了便于测试,å¯ä»¥é€šè¿‡è®© skb_orphan_frags_rx 与 skb_orphan_frags 相åŒï¼Œæ¥æš‚时放宽
+环回é™åˆ¶ã€‚
+
+对于 VSOCK 类型套接字的示例å¯ä»¥åœ¨ tools/testing/vsock/vsock_test_zerocopy.c 中找到。
diff --git a/Documentation/translations/zh_TW/admin-guide/bug-hunting.rst b/Documentation/translations/zh_TW/admin-guide/bug-hunting.rst
index b25ecc44d735..80ea5677ee52 100644
--- a/Documentation/translations/zh_TW/admin-guide/bug-hunting.rst
+++ b/Documentation/translations/zh_TW/admin-guide/bug-hunting.rst
@@ -191,7 +191,7 @@ objdump
編行。如果沒有調試符號,您將看到所示例程的彙編程åºä»£ç¢¼ï¼Œä½†æ˜¯å¦‚果內核有調試
符號,C代碼也將å¯è¦‹ï¼ˆèª¿è©¦ç¬¦è™Ÿå¯ä»¥åœ¨å…§æ ¸é…ç½®èœå–®çš„hacking項中啓用)。例如::
- $ objdump -r -S -l --disassemble net/dccp/ipv4.o
+ $ objdump -r -S -l --disassemble net/ipv4/tcp.o
.. note::
diff --git a/Documentation/translations/zh_TW/arch/openrisc/openrisc_port.rst b/Documentation/translations/zh_TW/arch/openrisc/openrisc_port.rst
index 422fe9f7a3f2..a1e4517dc601 100644
--- a/Documentation/translations/zh_TW/arch/openrisc/openrisc_port.rst
+++ b/Documentation/translations/zh_TW/arch/openrisc/openrisc_port.rst
@@ -17,10 +17,10 @@ OpenRISC 1000系列(或1k)。
關於OpenRISC處ç†å™¨å’Œæ­£åœ¨é€²è¡Œä¸­çš„開發的信æ¯:
- ======= =============================
+ ======= ==============================
網站 https://openrisc.io
- 郵箱 openrisc@lists.librecores.org
- ======= =============================
+ 郵箱 linux-openrisc@vger.kernel.org
+ ======= ==============================
---------------------------------------------------------------------
@@ -36,11 +36,11 @@ OpenRISC工具éˆå’ŒLinux的構建指å—
工具éˆçš„æ§‹å»ºæŒ‡å—å¯ä»¥åœ¨openrisc.io或Staffordçš„å·¥å…·éˆæ§‹å»ºå’Œç™¼ä½ˆè…³æœ¬
中找到。
- ====== =================================================
- 二進制 https://github.com/openrisc/or1k-gcc/releases
+ ====== ==========================================================
+ 二進制 https://github.com/stffrdhrn/or1k-toolchain-build/releases
å·¥å…·éˆ https://openrisc.io/software
構建 https://github.com/stffrdhrn/or1k-toolchain-build
- ====== =================================================
+ ====== ==========================================================
2) 構建
diff --git a/Documentation/translations/zh_TW/dev-tools/gdb-kernel-debugging.rst b/Documentation/translations/zh_TW/dev-tools/gdb-kernel-debugging.rst
index c881e8872b19..b595af59ba78 100644
--- a/Documentation/translations/zh_TW/dev-tools/gdb-kernel-debugging.rst
+++ b/Documentation/translations/zh_TW/dev-tools/gdb-kernel-debugging.rst
@@ -116,35 +116,31 @@ Kgdb內核調試器ã€QEMU等虛擬機管ç†ç¨‹åºæˆ–基於JTAG的硬件接å£ï¼
- å°ç•¶å‰æˆ–指定的CPU使用per-cpu函數::
- (gdb) p $lx_per_cpu("runqueues").nr_running
+ (gdb) p $lx_per_cpu(runqueues).nr_running
$3 = 1
- (gdb) p $lx_per_cpu("runqueues", 2).nr_running
+ (gdb) p $lx_per_cpu(runqueues, 2).nr_running
$4 = 0
- 使用container_of查看更多hrtimersä¿¡æ¯::
- (gdb) set $next = $lx_per_cpu("hrtimer_bases").clock_base[0].active.next
- (gdb) p *$container_of($next, "struct hrtimer", "node")
+ (gdb) set $leftmost = $lx_per_cpu(hrtimer_bases).clock_base[0].active.rb_root.rb_leftmost
+ (gdb) p *$container_of($leftmost, "struct hrtimer", "node")
$5 = {
node = {
node = {
- __rb_parent_color = 18446612133355256072,
- rb_right = 0x0 <irq_stack_union>,
- rb_left = 0x0 <irq_stack_union>
+ __rb_parent_color = 18446612686384860673,
+ rb_right = 0xffff888231da8b00,
+ rb_left = 0x0
},
- expires = {
- tv64 = 1835268000000
- }
+ expires = 1228461000000
},
- _softexpires = {
- tv64 = 1835268000000
- },
- function = 0xffffffff81078232 <tick_sched_timer>,
- base = 0xffff88003fd0d6f0,
- state = 1,
- start_pid = 0,
- start_site = 0xffffffff81055c1f <hrtimer_start_range_ns+20>,
- start_comm = "swapper/2\000\000\000\000\000\000"
+ _softexpires = 1228461000000,
+ function = 0xffffffff8137ab20 <tick_nohz_handler>,
+ base = 0xffff888231d9b4c0,
+ state = 1 '\001',
+ is_rel = 0 '\000',
+ is_soft = 0 '\000',
+ is_hard = 1 '\001'
}
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 7a1409ecc238..3d1171fd96c1 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -28,10 +28,10 @@ or number from the table below. Because of the large number of drivers,
many drivers share a partial letter with other drivers.
If you are writing a driver for a new device and need a letter, pick an
-unused block with enough room for expansion: 32 to 256 ioctl commands.
-You can register the block by patching this file and submitting the
-patch to Linus Torvalds. Or you can e-mail me at <mec@shout.net> and
-I'll register one for you.
+unused block with enough room for expansion: 32 to 256 ioctl commands
+should suffice. You can register the block by patching this file and
+submitting the patch through :doc:`usual patch submission process
+</process/submitting-patches>`.
The second argument to _IO, _IOW, _IOR, or _IOWR is a sequence number
to distinguish ioctls from each other. The third argument to _IOW,
@@ -62,9 +62,8 @@ Following this convention is good because:
(5) When following the convention, the driver code can use generic
code to copy the parameters between user and kernel space.
-This table lists ioctls visible from user land for Linux/x86. It contains
-most drivers up to 2.6.31, but I know I am missing some. There has been
-no attempt to list non-X86 architectures or ioctls from drivers/staging/.
+This table lists ioctls visible from userland, excluding ones from
+drivers/staging/.
==== ===== ======================================================= ================================================================
Code Seq# Include File Comments
@@ -366,6 +365,12 @@ Code Seq# Include File Comments
<mailto:linuxppc-dev>
0xB2 01-02 arch/powerpc/include/uapi/asm/papr-sysparm.h powerpc/pseries system parameter API
<mailto:linuxppc-dev>
+0xB2 03-05 arch/powerpc/include/uapi/asm/papr-indices.h powerpc/pseries indices API
+ <mailto:linuxppc-dev>
+0xB2 06-07 arch/powerpc/include/uapi/asm/papr-platform-dump.h powerpc/pseries Platform Dump API
+ <mailto:linuxppc-dev>
+0xB2 08 powerpc/include/uapi/asm/papr-physical-attestation.h powerpc/pseries Physical Attestation API
+ <mailto:linuxppc-dev>
0xB3 00 linux/mmc/ioctl.h
0xB4 00-0F linux/gpio.h <mailto:linux-gpio@vger.kernel.org>
0xB5 00-0F uapi/linux/rpmsg.h <mailto:linux-remoteproc@vger.kernel.org>
diff --git a/Documentation/userspace-api/media/v4l/meta-formats.rst b/Documentation/userspace-api/media/v4l/meta-formats.rst
index 86ffb3bc8ade..bb6876cfc271 100644
--- a/Documentation/userspace-api/media/v4l/meta-formats.rst
+++ b/Documentation/userspace-api/media/v4l/meta-formats.rst
@@ -12,6 +12,7 @@ These formats are used for the :ref:`metadata` interface only.
.. toctree::
:maxdepth: 1
+ metafmt-c3-isp
metafmt-d4xx
metafmt-generic
metafmt-intel-ipu3
diff --git a/Documentation/userspace-api/media/v4l/metafmt-c3-isp.rst b/Documentation/userspace-api/media/v4l/metafmt-c3-isp.rst
new file mode 100644
index 000000000000..449b45c2ec24
--- /dev/null
+++ b/Documentation/userspace-api/media/v4l/metafmt-c3-isp.rst
@@ -0,0 +1,86 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+
+.. _v4l2-meta-fmt-c3isp-stats:
+.. _v4l2-meta-fmt-c3isp-params:
+
+***********************************************************************
+V4L2_META_FMT_C3ISP_STATS ('C3ST'), V4L2_META_FMT_C3ISP_PARAMS ('C3PM')
+***********************************************************************
+
+.. c3_isp_stats_info
+
+3A Statistics
+=============
+
+The C3 ISP can collect different statistics over an input Bayer frame.
+Those statistics are obtained from the "c3-isp-stats" metadata capture video nodes,
+using the :c:type:`v4l2_meta_format` interface.
+They are formatted as described by the :c:type:`c3_isp_stats_info` structure.
+
+The statistics collected are Auto-white balance,
+Auto-exposure and Auto-focus information.
+
+.. c3_isp_params_cfg
+
+Configuration Parameters
+========================
+
+The configuration parameters are passed to the c3-isp-params metadata output video node,
+using the :c:type:`v4l2_meta_format` interface. Rather than a single struct containing
+sub-structs for each configurable area of the ISP, parameters for the C3-ISP
+are defined as distinct structs or "blocks" which may be added to the data
+member of :c:type:`c3_isp_params_cfg`. Userspace is responsible for
+populating the data member with the blocks that need to be configured by the driver, but
+need not populate it with **all** the blocks, or indeed with any at all if there
+are no configuration changes to make. Populated blocks **must** be consecutive
+in the buffer. To assist both userspace and the driver in identifying the
+blocks each block-specific struct embeds
+:c:type:`c3_isp_params_block_header` as its first member and userspace
+must populate the type member with a value from
+:c:type:`c3_isp_params_block_type`. Once the blocks have been populated
+into the data buffer, the combined size of all populated blocks shall be set in
+the data_size member of :c:type:`c3_isp_params_cfg`. For example:
+
+.. code-block:: c
+
+ struct c3_isp_params_cfg *params =
+ (struct c3_isp_params_cfg *)buffer;
+
+ params->version = C3_ISP_PARAM_BUFFER_V0;
+ params->data_size = 0;
+
+ void *data = (void *)params->data;
+
+ struct c3_isp_params_awb_gains *gains =
+ (struct c3_isp_params_awb_gains *)data;
+
+ gains->header.type = C3_ISP_PARAMS_BLOCK_AWB_GAINS;
+ gains->header.flags = C3_ISP_PARAMS_BLOCK_FL_ENABLE;
+ gains->header.size = sizeof(struct c3_isp_params_awb_gains);
+
+ gains->gr_gain = 256;
+ gains->r_gain = 256;
+ gains->b_gain = 256;
+ gains->gb_gain = 256;
+
+ data += sizeof(struct c3_isp__params_awb_gains);
+ params->data_size += sizeof(struct c3_isp_params_awb_gains);
+
+ struct c3_isp_params_awb_config *awb_cfg =
+ (struct c3_isp_params_awb_config *)data;
+
+ awb_cfg->header.type = C3_ISP_PARAMS_BLOCK_AWB_CONFIG;
+ awb_cfg->header.flags = C3_ISP_PARAMS_BLOCK_FL_ENABLE;
+ awb_cfg->header.size = sizeof(struct c3_isp_params_awb_config);
+
+ awb_cfg->tap_point = C3_ISP_AWB_STATS_TAP_BEFORE_WB;
+ awb_cfg->satur = 1;
+ awb_cfg->horiz_zones_num = 32;
+ awb_cfg->vert_zones_num = 24;
+
+ params->data_size += sizeof(struct c3_isp_params_awb_config);
+
+Amlogic C3 ISP uAPI data types
+===============================
+
+.. kernel-doc:: include/uapi/linux/media/amlogic/c3-isp-config.h
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
index b788f6933855..6e4f399f1f88 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
@@ -137,6 +137,13 @@ All components are stored with the same number of bits per component.
- Cb, Cr
- No
- Linear
+ * - V4L2_PIX_FMT_NV15
+ - 'NV15'
+ - 10
+ - 4:2:0
+ - Cb, Cr
+ - Yes
+ - Linear
* - V4L2_PIX_FMT_NV15_4L4
- 'VT15'
- 15
@@ -186,6 +193,13 @@ All components are stored with the same number of bits per component.
- Cr, Cb
- No
- Linear
+ * - V4L2_PIX_FMT_NV20
+ - 'NV20'
+ - 10
+ - 4:2:2
+ - Cb, Cr
+ - Yes
+ - Linear
* - V4L2_PIX_FMT_NV24
- 'NV24'
- 8
@@ -302,6 +316,57 @@ of the luma plane.
- Cr\ :sub:`11`
+.. _V4L2-PIX-FMT-NV15:
+
+NV15
+----
+
+Semi-planar 10-bit YUV 4:2:0 format similar to NV12, using 10-bit components
+with no padding between each component. A group of 4 components are stored over
+5 bytes in little endian order.
+
+.. flat-table:: Sample 4x4 NV15 Image (1 byte per cell)
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - start + 0:
+ - Y'\ :sub:`00[7:0]`
+ - Y'\ :sub:`01[5:0]`\ Y'\ :sub:`00[9:8]`
+ - Y'\ :sub:`02[3:0]`\ Y'\ :sub:`01[9:6]`
+ - Y'\ :sub:`03[1:0]`\ Y'\ :sub:`02[9:4]`
+ - Y'\ :sub:`03[9:2]`
+ * - start + 5:
+ - Y'\ :sub:`10[7:0]`
+ - Y'\ :sub:`11[5:0]`\ Y'\ :sub:`10[9:8]`
+ - Y'\ :sub:`12[3:0]`\ Y'\ :sub:`11[9:6]`
+ - Y'\ :sub:`13[1:0]`\ Y'\ :sub:`12[9:4]`
+ - Y'\ :sub:`13[9:2]`
+ * - start + 10:
+ - Y'\ :sub:`20[7:0]`
+ - Y'\ :sub:`21[5:0]`\ Y'\ :sub:`20[9:8]`
+ - Y'\ :sub:`22[3:0]`\ Y'\ :sub:`21[9:6]`
+ - Y'\ :sub:`23[1:0]`\ Y'\ :sub:`22[9:4]`
+ - Y'\ :sub:`23[9:2]`
+ * - start + 15:
+ - Y'\ :sub:`30[7:0]`
+ - Y'\ :sub:`31[5:0]`\ Y'\ :sub:`30[9:8]`
+ - Y'\ :sub:`32[3:0]`\ Y'\ :sub:`31[9:6]`
+ - Y'\ :sub:`33[1:0]`\ Y'\ :sub:`32[9:4]`
+ - Y'\ :sub:`33[9:2]`
+ * - start + 20:
+ - Cb\ :sub:`00[7:0]`
+ - Cr\ :sub:`00[5:0]`\ Cb\ :sub:`00[9:8]`
+ - Cb\ :sub:`01[3:0]`\ Cr\ :sub:`00[9:6]`
+ - Cr\ :sub:`01[1:0]`\ Cb\ :sub:`01[9:4]`
+ - Cr\ :sub:`01[9:2]`
+ * - start + 25:
+ - Cb\ :sub:`10[7:0]`
+ - Cr\ :sub:`10[5:0]`\ Cb\ :sub:`10[9:8]`
+ - Cb\ :sub:`11[3:0]`\ Cr\ :sub:`10[9:6]`
+ - Cr\ :sub:`11[1:0]`\ Cb\ :sub:`11[9:4]`
+ - Cr\ :sub:`11[9:2]`
+
+
.. _V4L2-PIX-FMT-NV12MT:
.. _V4L2-PIX-FMT-NV12MT-16X16:
.. _V4L2-PIX-FMT-NV12-4L4:
@@ -631,6 +696,69 @@ number of lines as the luma plane.
- Cr\ :sub:`32`
+.. _V4L2-PIX-FMT-NV20:
+
+NV20
+----
+
+Semi-planar 10-bit YUV 4:2:2 format similar to NV16, using 10-bit components
+with no padding between each component. A group of 4 components are stored over
+5 bytes in little endian order.
+
+.. flat-table:: Sample 4x4 NV20 Image (1 byte per cell)
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - start + 0:
+ - Y'\ :sub:`00[7:0]`
+ - Y'\ :sub:`01[5:0]`\ Y'\ :sub:`00[9:8]`
+ - Y'\ :sub:`02[3:0]`\ Y'\ :sub:`01[9:6]`
+ - Y'\ :sub:`03[1:0]`\ Y'\ :sub:`02[9:4]`
+ - Y'\ :sub:`03[9:2]`
+ * - start + 5:
+ - Y'\ :sub:`10[7:0]`
+ - Y'\ :sub:`11[5:0]`\ Y'\ :sub:`10[9:8]`
+ - Y'\ :sub:`12[3:0]`\ Y'\ :sub:`11[9:6]`
+ - Y'\ :sub:`13[1:0]`\ Y'\ :sub:`12[9:4]`
+ - Y'\ :sub:`13[9:2]`
+ * - start + 10:
+ - Y'\ :sub:`20[7:0]`
+ - Y'\ :sub:`21[5:0]`\ Y'\ :sub:`20[9:8]`
+ - Y'\ :sub:`22[3:0]`\ Y'\ :sub:`21[9:6]`
+ - Y'\ :sub:`23[1:0]`\ Y'\ :sub:`22[9:4]`
+ - Y'\ :sub:`23[9:2]`
+ * - start + 15:
+ - Y'\ :sub:`30[7:0]`
+ - Y'\ :sub:`31[5:0]`\ Y'\ :sub:`30[9:8]`
+ - Y'\ :sub:`32[3:0]`\ Y'\ :sub:`31[9:6]`
+ - Y'\ :sub:`33[1:0]`\ Y'\ :sub:`32[9:4]`
+ - Y'\ :sub:`33[9:2]`
+ * - start + 20:
+ - Cb\ :sub:`00[7:0]`
+ - Cr\ :sub:`00[5:0]`\ Cb\ :sub:`00[9:8]`
+ - Cb\ :sub:`01[3:0]`\ Cr\ :sub:`00[9:6]`
+ - Cr\ :sub:`01[1:0]`\ Cb\ :sub:`01[9:4]`
+ - Cr\ :sub:`01[9:2]`
+ * - start + 25:
+ - Cb\ :sub:`10[7:0]`
+ - Cr\ :sub:`10[5:0]`\ Cb\ :sub:`10[9:8]`
+ - Cb\ :sub:`11[3:0]`\ Cr\ :sub:`10[9:6]`
+ - Cr\ :sub:`11[1:0]`\ Cb\ :sub:`11[9:4]`
+ - Cr\ :sub:`11[9:2]`
+ * - start + 30:
+ - Cb\ :sub:`20[7:0]`
+ - Cr\ :sub:`20[5:0]`\ Cb\ :sub:`20[9:8]`
+ - Cb\ :sub:`21[3:0]`\ Cr\ :sub:`20[9:6]`
+ - Cr\ :sub:`21[1:0]`\ Cb\ :sub:`21[9:4]`
+ - Cr\ :sub:`21[9:2]`
+ * - start + 35:
+ - Cb\ :sub:`30[7:0]`
+ - Cr\ :sub:`30[5:0]`\ Cb\ :sub:`30[9:8]`
+ - Cb\ :sub:`31[3:0]`\ Cr\ :sub:`30[9:6]`
+ - Cr\ :sub:`31[1:0]`\ Cb\ :sub:`31[9:4]`
+ - Cr\ :sub:`31[9:2]`
+
+
.. _V4L2-PIX-FMT-NV24:
.. _V4L2-PIX-FMT-NV42:
diff --git a/Documentation/userspace-api/mseal.rst b/Documentation/userspace-api/mseal.rst
index 1dabfc29be0d..7195a7f91107 100644
--- a/Documentation/userspace-api/mseal.rst
+++ b/Documentation/userspace-api/mseal.rst
@@ -27,7 +27,7 @@ SYSCALL
=======
mseal syscall signature
-----------------------
- ``int mseal(void \* addr, size_t len, unsigned long flags)``
+ ``int mseal(void *addr, size_t len, unsigned long flags)``
**addr**/**len**: virtual memory address range.
The address range set by **addr**/**len** must meet:
diff --git a/Documentation/userspace-api/netlink/netlink-raw.rst b/Documentation/userspace-api/netlink/netlink-raw.rst
index 1990eea772d0..31fc91020eb3 100644
--- a/Documentation/userspace-api/netlink/netlink-raw.rst
+++ b/Documentation/userspace-api/netlink/netlink-raw.rst
@@ -62,7 +62,7 @@ Sub-messages
------------
Several raw netlink families such as
-:doc:`rt_link<../../networking/netlink_spec/rt_link>` and
+:doc:`rt-link<../../networking/netlink_spec/rt-link>` and
:doc:`tc<../../networking/netlink_spec/tc>` use attribute nesting as an
abstraction to carry module specific information.
diff --git a/Documentation/virt/hyperv/vmbus.rst b/Documentation/virt/hyperv/vmbus.rst
index 1dcef6a7fda3..654bb4849972 100644
--- a/Documentation/virt/hyperv/vmbus.rst
+++ b/Documentation/virt/hyperv/vmbus.rst
@@ -250,10 +250,18 @@ interrupts are not Linux IRQs, there are no entries in /proc/interrupts
or /proc/irq corresponding to individual VMBus channel interrupts.
An online CPU in a Linux guest may not be taken offline if it has
-VMBus channel interrupts assigned to it. Any such channel
-interrupts must first be manually reassigned to another CPU as
-described above. When no channel interrupts are assigned to the
-CPU, it can be taken offline.
+VMBus channel interrupts assigned to it. Starting in kernel v6.15,
+any such interrupts are automatically reassigned to some other CPU
+at the time of offlining. The "other" CPU is chosen by the
+implementation and is not load balanced or otherwise intelligently
+determined. If the CPU is onlined again, channel interrupts previously
+assigned to it are not moved back. As a result, after multiple CPUs
+have been offlined, and perhaps onlined again, the interrupt-to-CPU
+mapping may be scrambled and non-optimal. In such a case, optimal
+assignments must be re-established manually. For kernels v6.14 and
+earlier, any conflicting channel interrupts must first be manually
+reassigned to another CPU as described above. Then when no channel
+interrupts are assigned to the CPU, it can be taken offline.
The VMBus channel interrupt handling code is designed to work
correctly even if an interrupt is received on a CPU other than the
@@ -324,3 +332,15 @@ rescinded, neither Hyper-V nor Linux retains any state about
its previous existence. Such a device might be re-added later,
in which case it is treated as an entirely new device. See
vmbus_onoffer_rescind().
+
+For some devices, such as the KVP device, Hyper-V automatically
+sends a rescind message when the primary channel is closed,
+likely as a result of unbinding the device from its driver.
+The rescind causes Linux to remove the device. But then Hyper-V
+immediately reoffers the device to the guest, causing a new
+instance of the device to be created in Linux. For other
+devices, such as the synthetic SCSI and NIC devices, closing the
+primary channel does *not* result in Hyper-V sending a rescind
+message. The device continues to exist in Linux on the VMBus,
+but with no driver bound to it. The same driver or a new driver
+can subsequently be bound to the existing instance of the device.
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 1f8625b7646a..1bd2d42e6424 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -1411,6 +1411,9 @@ the memory region are automatically reflected into the guest. For example, an
mmap() that affects the region will be made visible immediately. Another
example is madvise(MADV_DROP).
+For TDX guest, deleting/moving memory region loses guest memory contents.
+Read only region isn't supported. Only as-id 0 is supported.
+
Note: On arm64, a write generated by the page-table walker (to update
the Access and Dirty flags, for example) never results in a
KVM_EXIT_MMIO exit when the slot has the KVM_MEM_READONLY flag. This
@@ -3460,7 +3463,8 @@ The initial values are defined as:
- FPSIMD/NEON registers: set to 0
- SVE registers: set to 0
- System registers: Reset to their architecturally defined
- values as for a warm reset to EL1 (resp. SVC)
+ values as for a warm reset to EL1 (resp. SVC) or EL2 (in the
+ case of EL2 being enabled).
Note that because some registers reflect machine topology, all vcpus
should be created before this ioctl is invoked.
@@ -3527,6 +3531,17 @@ Possible features:
- the KVM_REG_ARM64_SVE_VLS pseudo-register is immutable, and can
no longer be written using KVM_SET_ONE_REG.
+ - KVM_ARM_VCPU_HAS_EL2: Enable Nested Virtualisation support,
+ booting the guest from EL2 instead of EL1.
+ Depends on KVM_CAP_ARM_EL2.
+ The VM is running with HCR_EL2.E2H being RES1 (VHE) unless
+ KVM_ARM_VCPU_HAS_EL2_E2H0 is also set.
+
+ - KVM_ARM_VCPU_HAS_EL2_E2H0: Restrict Nested Virtualisation
+ support to HCR_EL2.E2H being RES0 (non-VHE).
+ Depends on KVM_CAP_ARM_EL2_E2H0.
+ KVM_ARM_VCPU_HAS_EL2 must also be set.
+
4.83 KVM_ARM_PREFERRED_TARGET
-----------------------------
@@ -4768,7 +4783,7 @@ H_GET_CPU_CHARACTERISTICS hypercall.
:Capability: basic
:Architectures: x86
-:Type: vm
+:Type: vm ioctl, vcpu ioctl
:Parameters: an opaque platform specific structure (in/out)
:Returns: 0 on success; -1 on error
@@ -4776,9 +4791,11 @@ If the platform supports creating encrypted VMs then this ioctl can be used
for issuing platform-specific memory encryption commands to manage those
encrypted VMs.
-Currently, this ioctl is used for issuing Secure Encrypted Virtualization
-(SEV) commands on AMD Processors. The SEV commands are defined in
-Documentation/virt/kvm/x86/amd-memory-encryption.rst.
+Currently, this ioctl is used for issuing both Secure Encrypted Virtualization
+(SEV) commands on AMD Processors and Trusted Domain Extensions (TDX) commands
+on Intel Processors. The detailed commands are defined in
+Documentation/virt/kvm/x86/amd-memory-encryption.rst and
+Documentation/virt/kvm/x86/intel-tdx.rst.
4.111 KVM_MEMORY_ENCRYPT_REG_REGION
-----------------------------------
@@ -6827,6 +6844,7 @@ should put the acknowledged interrupt vector into the 'epr' field.
#define KVM_SYSTEM_EVENT_WAKEUP 4
#define KVM_SYSTEM_EVENT_SUSPEND 5
#define KVM_SYSTEM_EVENT_SEV_TERM 6
+ #define KVM_SYSTEM_EVENT_TDX_FATAL 7
__u32 type;
__u32 ndata;
__u64 data[16];
@@ -6853,6 +6871,11 @@ Valid values for 'type' are:
reset/shutdown of the VM.
- KVM_SYSTEM_EVENT_SEV_TERM -- an AMD SEV guest requested termination.
The guest physical address of the guest's GHCB is stored in `data[0]`.
+ - KVM_SYSTEM_EVENT_TDX_FATAL -- a TDX guest reported a fatal error state.
+ KVM doesn't do any parsing or conversion, it just dumps 16 general-purpose
+ registers to userspace, in ascending order of the 4-bit indices for x86-64
+ general-purpose registers in instruction encoding, as defined in the Intel
+ SDM.
- KVM_SYSTEM_EVENT_WAKEUP -- the exiting vCPU is in a suspended state and
KVM has recognized a wakeup event. Userspace may honor this event by
marking the exiting vCPU as runnable, or deny it and call KVM_RUN again.
@@ -7447,6 +7470,75 @@ Unused bitfields in the bitarrays must be set to zero.
This capability connects the vcpu to an in-kernel XIVE device.
+6.76 KVM_CAP_HYPERV_SYNIC
+-------------------------
+
+:Architectures: x86
+:Target: vcpu
+
+This capability, if KVM_CHECK_EXTENSION indicates that it is
+available, means that the kernel has an implementation of the
+Hyper-V Synthetic interrupt controller(SynIC). Hyper-V SynIC is
+used to support Windows Hyper-V based guest paravirt drivers(VMBus).
+
+In order to use SynIC, it has to be activated by setting this
+capability via KVM_ENABLE_CAP ioctl on the vcpu fd. Note that this
+will disable the use of APIC hardware virtualization even if supported
+by the CPU, as it's incompatible with SynIC auto-EOI behavior.
+
+6.77 KVM_CAP_HYPERV_SYNIC2
+--------------------------
+
+:Architectures: x86
+:Target: vcpu
+
+This capability enables a newer version of Hyper-V Synthetic interrupt
+controller (SynIC). The only difference with KVM_CAP_HYPERV_SYNIC is that KVM
+doesn't clear SynIC message and event flags pages when they are enabled by
+writing to the respective MSRs.
+
+6.78 KVM_CAP_HYPERV_DIRECT_TLBFLUSH
+-----------------------------------
+
+:Architectures: x86
+:Target: vcpu
+
+This capability indicates that KVM running on top of Hyper-V hypervisor
+enables Direct TLB flush for its guests meaning that TLB flush
+hypercalls are handled by Level 0 hypervisor (Hyper-V) bypassing KVM.
+Due to the different ABI for hypercall parameters between Hyper-V and
+KVM, enabling this capability effectively disables all hypercall
+handling by KVM (as some KVM hypercall may be mistakenly treated as TLB
+flush hypercalls by Hyper-V) so userspace should disable KVM identification
+in CPUID and only exposes Hyper-V identification. In this case, guest
+thinks it's running on Hyper-V and only use Hyper-V hypercalls.
+
+6.79 KVM_CAP_HYPERV_ENFORCE_CPUID
+---------------------------------
+
+:Architectures: x86
+:Target: vcpu
+
+When enabled, KVM will disable emulated Hyper-V features provided to the
+guest according to the bits Hyper-V CPUID feature leaves. Otherwise, all
+currently implemented Hyper-V features are provided unconditionally when
+Hyper-V identification is set in the HYPERV_CPUID_INTERFACE (0x40000001)
+leaf.
+
+6.80 KVM_CAP_ENFORCE_PV_FEATURE_CPUID
+-------------------------------------
+
+:Architectures: x86
+:Target: vcpu
+
+When enabled, KVM will disable paravirtual features provided to the
+guest according to the bits in the KVM_CPUID_FEATURES CPUID leaf
+(0x40000001). Otherwise, a guest may use the paravirtual features
+regardless of what has actually been exposed through the CPUID leaf.
+
+.. _KVM_CAP_DIRTY_LOG_RING:
+
+
.. _cap_enable_vm:
7. Capabilities that can be enabled on VMs
@@ -7909,6 +8001,11 @@ apply some other policy-based mitigation. When exiting to userspace, KVM sets
KVM_RUN_X86_BUS_LOCK in vcpu-run->flags, and conditionally sets the exit_reason
to KVM_EXIT_X86_BUS_LOCK.
+Due to differences in the underlying hardware implementation, the vCPU's RIP at
+the time of exit diverges between Intel and AMD. On Intel hosts, RIP points at
+the next instruction, i.e. the exit is trap-like. On AMD hosts, RIP points at
+the offending instruction, i.e. the exit is fault-like.
+
Note! Detected bus locks may be coincident with other exits to userspace, i.e.
KVM_RUN_X86_BUS_LOCK should be checked regardless of the primary exit reason if
userspace wants to take action on all detected bus locks.
@@ -7927,10 +8024,10 @@ by POWER10 processor.
7.24 KVM_CAP_VM_COPY_ENC_CONTEXT_FROM
-------------------------------------
-Architectures: x86 SEV enabled
-Type: vm
-Parameters: args[0] is the fd of the source vm
-Returns: 0 on success; ENOTTY on error
+:Architectures: x86 SEV enabled
+:Type: vm
+:Parameters: args[0] is the fd of the source vm
+:Returns: 0 on success; ENOTTY on error
This capability enables userspace to copy encryption context from the vm
indicated by the fd to the vm this is called on.
@@ -7963,24 +8060,6 @@ default.
See Documentation/arch/x86/sgx.rst for more details.
-7.26 KVM_CAP_PPC_RPT_INVALIDATE
--------------------------------
-
-:Capability: KVM_CAP_PPC_RPT_INVALIDATE
-:Architectures: ppc
-:Type: vm
-
-This capability indicates that the kernel is capable of handling
-H_RPT_INVALIDATE hcall.
-
-In order to enable the use of H_RPT_INVALIDATE in the guest,
-user space might have to advertise it for the guest. For example,
-IBM pSeries (sPAPR) guest starts using it if "hcall-rpt-invalidate" is
-present in the "ibm,hypertas-functions" device-tree property.
-
-This capability is enabled for hypervisors on platforms like POWER9
-that support radix MMU.
-
7.27 KVM_CAP_EXIT_ON_EMULATION_FAILURE
--------------------------------------
@@ -8038,24 +8117,9 @@ indicated by the fd to the VM this is called on.
This is intended to support intra-host migration of VMs between userspace VMMs,
upgrading the VMM process without interrupting the guest.
-7.30 KVM_CAP_PPC_AIL_MODE_3
--------------------------------
-
-:Capability: KVM_CAP_PPC_AIL_MODE_3
-:Architectures: ppc
-:Type: vm
-
-This capability indicates that the kernel supports the mode 3 setting for the
-"Address Translation Mode on Interrupt" aka "Alternate Interrupt Location"
-resource that is controlled with the H_SET_MODE hypercall.
-
-This capability allows a guest kernel to use a better-performance mode for
-handling interrupts and system calls.
-
7.31 KVM_CAP_DISABLE_QUIRKS2
----------------------------
-:Capability: KVM_CAP_DISABLE_QUIRKS2
:Parameters: args[0] - set of KVM quirks to disable
:Architectures: x86
:Type: vm
@@ -8158,6 +8222,28 @@ KVM_X86_QUIRK_STUFF_FEATURE_MSRS By default, at vCPU creation, KVM sets the
and 0x489), as KVM does now allow them to
be set by userspace (KVM sets them based on
guest CPUID, for safety purposes).
+
+KVM_X86_QUIRK_IGNORE_GUEST_PAT By default, on Intel platforms, KVM ignores
+ guest PAT and forces the effective memory
+ type to WB in EPT. The quirk is not available
+ on Intel platforms which are incapable of
+ safely honoring guest PAT (i.e., without CPU
+ self-snoop, KVM always ignores guest PAT and
+ forces effective memory type to WB). It is
+ also ignored on AMD platforms or, on Intel,
+ when a VM has non-coherent DMA devices
+ assigned; KVM always honors guest PAT in
+ such case. The quirk is needed to avoid
+ slowdowns on certain Intel Xeon platforms
+ (e.g. ICX, SPR) where self-snoop feature is
+ supported but UC is slow enough to cause
+ issues with some older guests that use
+ UC instead of WC to map the video RAM.
+ Userspace can disable the quirk to honor
+ guest PAT if it knows that there is no such
+ guest software, for example if it does not
+ expose a bochs graphics device (which is
+ known to have had a buggy driver).
=================================== ============================================
7.32 KVM_CAP_MAX_VCPU_ID
@@ -8210,27 +8296,6 @@ This capability is aimed to mitigate the threat that malicious VMs can
cause CPU stuck (due to event windows don't open up) and make the CPU
unavailable to host or other VMs.
-7.34 KVM_CAP_MEMORY_FAULT_INFO
-------------------------------
-
-:Architectures: x86
-:Returns: Informational only, -EINVAL on direct KVM_ENABLE_CAP.
-
-The presence of this capability indicates that KVM_RUN will fill
-kvm_run.memory_fault if KVM cannot resolve a guest page fault VM-Exit, e.g. if
-there is a valid memslot but no backing VMA for the corresponding host virtual
-address.
-
-The information in kvm_run.memory_fault is valid if and only if KVM_RUN returns
-an error with errno=EFAULT or errno=EHWPOISON *and* kvm_run.exit_reason is set
-to KVM_EXIT_MEMORY_FAULT.
-
-Note: Userspaces which attempt to resolve memory faults so that they can retry
-KVM_RUN are encouraged to guard against repeatedly receiving the same
-error/annotated fault.
-
-See KVM_EXIT_MEMORY_FAULT for more information.
-
7.35 KVM_CAP_X86_APIC_BUS_CYCLES_NS
-----------------------------------
@@ -8248,19 +8313,220 @@ by KVM_CHECK_EXTENSION.
Note: Userspace is responsible for correctly configuring CPUID 0x15, a.k.a. the
core crystal clock frequency, if a non-zero CPUID 0x15 is exposed to the guest.
-7.36 KVM_CAP_X86_GUEST_MODE
-------------------------------
+7.36 KVM_CAP_DIRTY_LOG_RING/KVM_CAP_DIRTY_LOG_RING_ACQ_REL
+----------------------------------------------------------
+
+:Architectures: x86, arm64
+:Type: vm
+:Parameters: args[0] - size of the dirty log ring
+
+KVM is capable of tracking dirty memory using ring buffers that are
+mmapped into userspace; there is one dirty ring per vcpu.
+
+The dirty ring is available to userspace as an array of
+``struct kvm_dirty_gfn``. Each dirty entry is defined as::
+
+ struct kvm_dirty_gfn {
+ __u32 flags;
+ __u32 slot; /* as_id | slot_id */
+ __u64 offset;
+ };
+
+The following values are defined for the flags field to define the
+current state of the entry::
+
+ #define KVM_DIRTY_GFN_F_DIRTY BIT(0)
+ #define KVM_DIRTY_GFN_F_RESET BIT(1)
+ #define KVM_DIRTY_GFN_F_MASK 0x3
+
+Userspace should call KVM_ENABLE_CAP ioctl right after KVM_CREATE_VM
+ioctl to enable this capability for the new guest and set the size of
+the rings. Enabling the capability is only allowed before creating any
+vCPU, and the size of the ring must be a power of two. The larger the
+ring buffer, the less likely the ring is full and the VM is forced to
+exit to userspace. The optimal size depends on the workload, but it is
+recommended that it be at least 64 KiB (4096 entries).
+
+Just like for dirty page bitmaps, the buffer tracks writes to
+all user memory regions for which the KVM_MEM_LOG_DIRTY_PAGES flag was
+set in KVM_SET_USER_MEMORY_REGION. Once a memory region is registered
+with the flag set, userspace can start harvesting dirty pages from the
+ring buffer.
+
+An entry in the ring buffer can be unused (flag bits ``00``),
+dirty (flag bits ``01``) or harvested (flag bits ``1X``). The
+state machine for the entry is as follows::
+
+ dirtied harvested reset
+ 00 -----------> 01 -------------> 1X -------+
+ ^ |
+ | |
+ +------------------------------------------+
+
+To harvest the dirty pages, userspace accesses the mmapped ring buffer
+to read the dirty GFNs. If the flags has the DIRTY bit set (at this stage
+the RESET bit must be cleared), then it means this GFN is a dirty GFN.
+The userspace should harvest this GFN and mark the flags from state
+``01b`` to ``1Xb`` (bit 0 will be ignored by KVM, but bit 1 must be set
+to show that this GFN is harvested and waiting for a reset), and move
+on to the next GFN. The userspace should continue to do this until the
+flags of a GFN have the DIRTY bit cleared, meaning that it has harvested
+all the dirty GFNs that were available.
+
+Note that on weakly ordered architectures, userspace accesses to the
+ring buffer (and more specifically the 'flags' field) must be ordered,
+using load-acquire/store-release accessors when available, or any
+other memory barrier that will ensure this ordering.
+
+It's not necessary for userspace to harvest the all dirty GFNs at once.
+However it must collect the dirty GFNs in sequence, i.e., the userspace
+program cannot skip one dirty GFN to collect the one next to it.
+
+After processing one or more entries in the ring buffer, userspace
+calls the VM ioctl KVM_RESET_DIRTY_RINGS to notify the kernel about
+it, so that the kernel will reprotect those collected GFNs.
+Therefore, the ioctl must be called *before* reading the content of
+the dirty pages.
+
+The dirty ring can get full. When it happens, the KVM_RUN of the
+vcpu will return with exit reason KVM_EXIT_DIRTY_LOG_FULL.
+
+The dirty ring interface has a major difference comparing to the
+KVM_GET_DIRTY_LOG interface in that, when reading the dirty ring from
+userspace, it's still possible that the kernel has not yet flushed the
+processor's dirty page buffers into the kernel buffer (with dirty bitmaps, the
+flushing is done by the KVM_GET_DIRTY_LOG ioctl). To achieve that, one
+needs to kick the vcpu out of KVM_RUN using a signal. The resulting
+vmexit ensures that all dirty GFNs are flushed to the dirty rings.
+
+NOTE: KVM_CAP_DIRTY_LOG_RING_ACQ_REL is the only capability that
+should be exposed by weakly ordered architecture, in order to indicate
+the additional memory ordering requirements imposed on userspace when
+reading the state of an entry and mutating it from DIRTY to HARVESTED.
+Architecture with TSO-like ordering (such as x86) are allowed to
+expose both KVM_CAP_DIRTY_LOG_RING and KVM_CAP_DIRTY_LOG_RING_ACQ_REL
+to userspace.
+
+After enabling the dirty rings, the userspace needs to detect the
+capability of KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP to see whether the
+ring structures can be backed by per-slot bitmaps. With this capability
+advertised, it means the architecture can dirty guest pages without
+vcpu/ring context, so that some of the dirty information will still be
+maintained in the bitmap structure. KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP
+can't be enabled if the capability of KVM_CAP_DIRTY_LOG_RING_ACQ_REL
+hasn't been enabled, or any memslot has been existing.
+
+Note that the bitmap here is only a backup of the ring structure. The
+use of the ring and bitmap combination is only beneficial if there is
+only a very small amount of memory that is dirtied out of vcpu/ring
+context. Otherwise, the stand-alone per-slot bitmap mechanism needs to
+be considered.
+
+To collect dirty bits in the backup bitmap, userspace can use the same
+KVM_GET_DIRTY_LOG ioctl. KVM_CLEAR_DIRTY_LOG isn't needed as long as all
+the generation of the dirty bits is done in a single pass. Collecting
+the dirty bitmap should be the very last thing that the VMM does before
+considering the state as complete. VMM needs to ensure that the dirty
+state is final and avoid missing dirty pages from another ioctl ordered
+after the bitmap collection.
+
+NOTE: Multiple examples of using the backup bitmap: (1) save vgic/its
+tables through command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} on
+KVM device "kvm-arm-vgic-its". (2) restore vgic/its tables through
+command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_RESTORE_TABLES} on KVM device
+"kvm-arm-vgic-its". VGICv3 LPI pending status is restored. (3) save
+vgic3 pending table through KVM_DEV_ARM_VGIC_{GRP_CTRL, SAVE_PENDING_TABLES}
+command on KVM device "kvm-arm-vgic-v3".
+
+7.37 KVM_CAP_PMU_CAPABILITY
+---------------------------
:Architectures: x86
-:Returns: Informational only, -EINVAL on direct KVM_ENABLE_CAP.
+:Type: vm
+:Parameters: arg[0] is bitmask of PMU virtualization capabilities.
+:Returns: 0 on success, -EINVAL when arg[0] contains invalid bits
-The presence of this capability indicates that KVM_RUN will update the
-KVM_RUN_X86_GUEST_MODE bit in kvm_run.flags to indicate whether the
-vCPU was executing nested guest code when it exited.
+This capability alters PMU virtualization in KVM.
-KVM exits with the register state of either the L1 or L2 guest
-depending on which executed at the time of an exit. Userspace must
-take care to differentiate between these cases.
+Calling KVM_CHECK_EXTENSION for this capability returns a bitmask of
+PMU virtualization capabilities that can be adjusted on a VM.
+
+The argument to KVM_ENABLE_CAP is also a bitmask and selects specific
+PMU virtualization capabilities to be applied to the VM. This can
+only be invoked on a VM prior to the creation of VCPUs.
+
+At this time, KVM_PMU_CAP_DISABLE is the only capability. Setting
+this capability will disable PMU virtualization for that VM. Usermode
+should adjust CPUID leaf 0xA to reflect that the PMU is disabled.
+
+7.38 KVM_CAP_VM_DISABLE_NX_HUGE_PAGES
+-------------------------------------
+
+:Architectures: x86
+:Type: vm
+:Parameters: arg[0] must be 0.
+:Returns: 0 on success, -EPERM if the userspace process does not
+ have CAP_SYS_BOOT, -EINVAL if args[0] is not 0 or any vCPUs have been
+ created.
+
+This capability disables the NX huge pages mitigation for iTLB MULTIHIT.
+
+The capability has no effect if the nx_huge_pages module parameter is not set.
+
+This capability may only be set before any vCPUs are created.
+
+7.39 KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
+---------------------------------------
+
+:Architectures: arm64
+:Type: vm
+:Parameters: arg[0] is the new split chunk size.
+:Returns: 0 on success, -EINVAL if any memslot was already created.
+
+This capability sets the chunk size used in Eager Page Splitting.
+
+Eager Page Splitting improves the performance of dirty-logging (used
+in live migrations) when guest memory is backed by huge-pages. It
+avoids splitting huge-pages (into PAGE_SIZE pages) on fault, by doing
+it eagerly when enabling dirty logging (with the
+KVM_MEM_LOG_DIRTY_PAGES flag for a memory region), or when using
+KVM_CLEAR_DIRTY_LOG.
+
+The chunk size specifies how many pages to break at a time, using a
+single allocation for each chunk. Bigger the chunk size, more pages
+need to be allocated ahead of time.
+
+The chunk size needs to be a valid block size. The list of acceptable
+block sizes is exposed in KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES as a
+64-bit bitmap (each bit describing a block size). The default value is
+0, to disable the eager page splitting.
+
+7.40 KVM_CAP_EXIT_HYPERCALL
+---------------------------
+
+:Architectures: x86
+:Type: vm
+
+This capability, if enabled, will cause KVM to exit to userspace
+with KVM_EXIT_HYPERCALL exit reason to process some hypercalls.
+
+Calling KVM_CHECK_EXTENSION for this capability will return a bitmask
+of hypercalls that can be configured to exit to userspace.
+Right now, the only such hypercall is KVM_HC_MAP_GPA_RANGE.
+
+The argument to KVM_ENABLE_CAP is also a bitmask, and must be a subset
+of the result of KVM_CHECK_EXTENSION. KVM will forward to userspace
+the hypercalls whose corresponding bit is in the argument, and return
+ENOSYS for the others.
+
+7.41 KVM_CAP_ARM_SYSTEM_SUSPEND
+-------------------------------
+
+:Architectures: arm64
+:Type: vm
+
+When enabled, KVM will exit to userspace with KVM_EXIT_SYSTEM_EVENT of
+type KVM_SYSTEM_EVENT_SUSPEND to process the guest suspend request.
7.37 KVM_CAP_ARM_WRITABLE_IMP_ID_REGS
-------------------------------------
@@ -8280,6 +8546,17 @@ aforementioned registers before the first KVM_RUN. These registers are VM
scoped, meaning that the same set of values are presented on all vCPUs in a
given VM.
+7.43 KVM_CAP_RISCV_MP_STATE_RESET
+---------------------------------
+
+:Architectures: riscv
+:Type: VM
+:Parameters: None
+:Returns: 0 on success, -EINVAL if arg[0] is not zero
+
+When this capability is enabled, KVM resets the VCPU when setting
+MP_STATE_INIT_RECEIVED through IOCTL. The original MP_STATE is preserved.
+
8. Other capabilities.
======================
@@ -8297,21 +8574,6 @@ H_RANDOM hypercall backed by a hardware random-number generator.
If present, the kernel H_RANDOM handler can be enabled for guest use
with the KVM_CAP_PPC_ENABLE_HCALL capability.
-8.2 KVM_CAP_HYPERV_SYNIC
-------------------------
-
-:Architectures: x86
-
-This capability, if KVM_CHECK_EXTENSION indicates that it is
-available, means that the kernel has an implementation of the
-Hyper-V Synthetic interrupt controller(SynIC). Hyper-V SynIC is
-used to support Windows Hyper-V based guest paravirt drivers(VMBus).
-
-In order to use SynIC, it has to be activated by setting this
-capability via KVM_ENABLE_CAP ioctl on the vcpu fd. Note that this
-will disable the use of APIC hardware virtualization even if supported
-by the CPU, as it's incompatible with SynIC auto-EOI behavior.
-
8.3 KVM_CAP_PPC_MMU_RADIX
-------------------------
@@ -8362,20 +8624,6 @@ may be incompatible with the MIPS VZ ASE.
virtualization, including standard guest virtual memory segments.
== ==========================================================================
-8.6 KVM_CAP_MIPS_TE
--------------------
-
-:Architectures: mips
-
-This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
-it is available, means that the trap & emulate implementation is available to
-run guest code in user mode, even if KVM_CAP_MIPS_VZ indicates that hardware
-assisted virtualisation is also available. KVM_VM_MIPS_TE (0) must be passed
-to KVM_CREATE_VM to create a VM which utilises it.
-
-If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is
-available, it means that the VM is using trap & emulate.
-
8.7 KVM_CAP_MIPS_64BIT
----------------------
@@ -8457,16 +8705,6 @@ virtual SMT modes that can be set using KVM_CAP_PPC_SMT. If bit N
(counting from the right) is set, then a virtual SMT mode of 2^N is
available.
-8.11 KVM_CAP_HYPERV_SYNIC2
---------------------------
-
-:Architectures: x86
-
-This capability enables a newer version of Hyper-V Synthetic interrupt
-controller (SynIC). The only difference with KVM_CAP_HYPERV_SYNIC is that KVM
-doesn't clear SynIC message and event flags pages when they are enabled by
-writing to the respective MSRs.
-
8.12 KVM_CAP_HYPERV_VP_INDEX
----------------------------
@@ -8481,7 +8719,6 @@ capability is absent, userspace can still query this msr's value.
-------------------------------
:Architectures: s390
-:Parameters: none
This capability indicates if the flic device will be able to get/set the
AIS states for migration via the KVM_DEV_FLIC_AISM_ALL attribute and allows
@@ -8555,21 +8792,6 @@ This capability indicates that KVM supports paravirtualized Hyper-V IPI send
hypercalls:
HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx.
-8.21 KVM_CAP_HYPERV_DIRECT_TLBFLUSH
------------------------------------
-
-:Architectures: x86
-
-This capability indicates that KVM running on top of Hyper-V hypervisor
-enables Direct TLB flush for its guests meaning that TLB flush
-hypercalls are handled by Level 0 hypervisor (Hyper-V) bypassing KVM.
-Due to the different ABI for hypercall parameters between Hyper-V and
-KVM, enabling this capability effectively disables all hypercall
-handling by KVM (as some KVM hypercall may be mistakenly treated as TLB
-flush hypercalls by Hyper-V) so userspace should disable KVM identification
-in CPUID and only exposes Hyper-V identification. In this case, guest
-thinks it's running on Hyper-V and only use Hyper-V hypercalls.
-
8.22 KVM_CAP_S390_VCPU_RESETS
-----------------------------
@@ -8647,142 +8869,6 @@ In combination with KVM_CAP_X86_USER_SPACE_MSR, this allows user space to
trap and emulate MSRs that are outside of the scope of KVM as well as
limit the attack surface on KVM's MSR emulation code.
-8.28 KVM_CAP_ENFORCE_PV_FEATURE_CPUID
--------------------------------------
-
-Architectures: x86
-
-When enabled, KVM will disable paravirtual features provided to the
-guest according to the bits in the KVM_CPUID_FEATURES CPUID leaf
-(0x40000001). Otherwise, a guest may use the paravirtual features
-regardless of what has actually been exposed through the CPUID leaf.
-
-.. _KVM_CAP_DIRTY_LOG_RING:
-
-8.29 KVM_CAP_DIRTY_LOG_RING/KVM_CAP_DIRTY_LOG_RING_ACQ_REL
-----------------------------------------------------------
-
-:Architectures: x86, arm64
-:Parameters: args[0] - size of the dirty log ring
-
-KVM is capable of tracking dirty memory using ring buffers that are
-mmapped into userspace; there is one dirty ring per vcpu.
-
-The dirty ring is available to userspace as an array of
-``struct kvm_dirty_gfn``. Each dirty entry is defined as::
-
- struct kvm_dirty_gfn {
- __u32 flags;
- __u32 slot; /* as_id | slot_id */
- __u64 offset;
- };
-
-The following values are defined for the flags field to define the
-current state of the entry::
-
- #define KVM_DIRTY_GFN_F_DIRTY BIT(0)
- #define KVM_DIRTY_GFN_F_RESET BIT(1)
- #define KVM_DIRTY_GFN_F_MASK 0x3
-
-Userspace should call KVM_ENABLE_CAP ioctl right after KVM_CREATE_VM
-ioctl to enable this capability for the new guest and set the size of
-the rings. Enabling the capability is only allowed before creating any
-vCPU, and the size of the ring must be a power of two. The larger the
-ring buffer, the less likely the ring is full and the VM is forced to
-exit to userspace. The optimal size depends on the workload, but it is
-recommended that it be at least 64 KiB (4096 entries).
-
-Just like for dirty page bitmaps, the buffer tracks writes to
-all user memory regions for which the KVM_MEM_LOG_DIRTY_PAGES flag was
-set in KVM_SET_USER_MEMORY_REGION. Once a memory region is registered
-with the flag set, userspace can start harvesting dirty pages from the
-ring buffer.
-
-An entry in the ring buffer can be unused (flag bits ``00``),
-dirty (flag bits ``01``) or harvested (flag bits ``1X``). The
-state machine for the entry is as follows::
-
- dirtied harvested reset
- 00 -----------> 01 -------------> 1X -------+
- ^ |
- | |
- +------------------------------------------+
-
-To harvest the dirty pages, userspace accesses the mmapped ring buffer
-to read the dirty GFNs. If the flags has the DIRTY bit set (at this stage
-the RESET bit must be cleared), then it means this GFN is a dirty GFN.
-The userspace should harvest this GFN and mark the flags from state
-``01b`` to ``1Xb`` (bit 0 will be ignored by KVM, but bit 1 must be set
-to show that this GFN is harvested and waiting for a reset), and move
-on to the next GFN. The userspace should continue to do this until the
-flags of a GFN have the DIRTY bit cleared, meaning that it has harvested
-all the dirty GFNs that were available.
-
-Note that on weakly ordered architectures, userspace accesses to the
-ring buffer (and more specifically the 'flags' field) must be ordered,
-using load-acquire/store-release accessors when available, or any
-other memory barrier that will ensure this ordering.
-
-It's not necessary for userspace to harvest the all dirty GFNs at once.
-However it must collect the dirty GFNs in sequence, i.e., the userspace
-program cannot skip one dirty GFN to collect the one next to it.
-
-After processing one or more entries in the ring buffer, userspace
-calls the VM ioctl KVM_RESET_DIRTY_RINGS to notify the kernel about
-it, so that the kernel will reprotect those collected GFNs.
-Therefore, the ioctl must be called *before* reading the content of
-the dirty pages.
-
-The dirty ring can get full. When it happens, the KVM_RUN of the
-vcpu will return with exit reason KVM_EXIT_DIRTY_LOG_FULL.
-
-The dirty ring interface has a major difference comparing to the
-KVM_GET_DIRTY_LOG interface in that, when reading the dirty ring from
-userspace, it's still possible that the kernel has not yet flushed the
-processor's dirty page buffers into the kernel buffer (with dirty bitmaps, the
-flushing is done by the KVM_GET_DIRTY_LOG ioctl). To achieve that, one
-needs to kick the vcpu out of KVM_RUN using a signal. The resulting
-vmexit ensures that all dirty GFNs are flushed to the dirty rings.
-
-NOTE: KVM_CAP_DIRTY_LOG_RING_ACQ_REL is the only capability that
-should be exposed by weakly ordered architecture, in order to indicate
-the additional memory ordering requirements imposed on userspace when
-reading the state of an entry and mutating it from DIRTY to HARVESTED.
-Architecture with TSO-like ordering (such as x86) are allowed to
-expose both KVM_CAP_DIRTY_LOG_RING and KVM_CAP_DIRTY_LOG_RING_ACQ_REL
-to userspace.
-
-After enabling the dirty rings, the userspace needs to detect the
-capability of KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP to see whether the
-ring structures can be backed by per-slot bitmaps. With this capability
-advertised, it means the architecture can dirty guest pages without
-vcpu/ring context, so that some of the dirty information will still be
-maintained in the bitmap structure. KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP
-can't be enabled if the capability of KVM_CAP_DIRTY_LOG_RING_ACQ_REL
-hasn't been enabled, or any memslot has been existing.
-
-Note that the bitmap here is only a backup of the ring structure. The
-use of the ring and bitmap combination is only beneficial if there is
-only a very small amount of memory that is dirtied out of vcpu/ring
-context. Otherwise, the stand-alone per-slot bitmap mechanism needs to
-be considered.
-
-To collect dirty bits in the backup bitmap, userspace can use the same
-KVM_GET_DIRTY_LOG ioctl. KVM_CLEAR_DIRTY_LOG isn't needed as long as all
-the generation of the dirty bits is done in a single pass. Collecting
-the dirty bitmap should be the very last thing that the VMM does before
-considering the state as complete. VMM needs to ensure that the dirty
-state is final and avoid missing dirty pages from another ioctl ordered
-after the bitmap collection.
-
-NOTE: Multiple examples of using the backup bitmap: (1) save vgic/its
-tables through command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} on
-KVM device "kvm-arm-vgic-its". (2) restore vgic/its tables through
-command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_RESTORE_TABLES} on KVM device
-"kvm-arm-vgic-its". VGICv3 LPI pending status is restored. (3) save
-vgic3 pending table through KVM_DEV_ARM_VGIC_{GRP_CTRL, SAVE_PENDING_TABLES}
-command on KVM device "kvm-arm-vgic-v3".
-
8.30 KVM_CAP_XEN_HVM
--------------------
@@ -8847,10 +8933,9 @@ clearing the PVCLOCK_TSC_STABLE_BIT flag in Xen pvclock sources. This will be
done when the KVM_CAP_XEN_HVM ioctl sets the
KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE flag.
-8.31 KVM_CAP_PPC_MULTITCE
--------------------------
+8.31 KVM_CAP_SPAPR_MULTITCE
+---------------------------
-:Capability: KVM_CAP_PPC_MULTITCE
:Architectures: ppc
:Type: vm
@@ -8882,72 +8967,9 @@ This capability indicates that the KVM virtual PTP service is
supported in the host. A VMM can check whether the service is
available to the guest on migration.
-8.33 KVM_CAP_HYPERV_ENFORCE_CPUID
----------------------------------
-
-Architectures: x86
-
-When enabled, KVM will disable emulated Hyper-V features provided to the
-guest according to the bits Hyper-V CPUID feature leaves. Otherwise, all
-currently implemented Hyper-V features are provided unconditionally when
-Hyper-V identification is set in the HYPERV_CPUID_INTERFACE (0x40000001)
-leaf.
-
-8.34 KVM_CAP_EXIT_HYPERCALL
----------------------------
-
-:Capability: KVM_CAP_EXIT_HYPERCALL
-:Architectures: x86
-:Type: vm
-
-This capability, if enabled, will cause KVM to exit to userspace
-with KVM_EXIT_HYPERCALL exit reason to process some hypercalls.
-
-Calling KVM_CHECK_EXTENSION for this capability will return a bitmask
-of hypercalls that can be configured to exit to userspace.
-Right now, the only such hypercall is KVM_HC_MAP_GPA_RANGE.
-
-The argument to KVM_ENABLE_CAP is also a bitmask, and must be a subset
-of the result of KVM_CHECK_EXTENSION. KVM will forward to userspace
-the hypercalls whose corresponding bit is in the argument, and return
-ENOSYS for the others.
-
-8.35 KVM_CAP_PMU_CAPABILITY
----------------------------
-
-:Capability: KVM_CAP_PMU_CAPABILITY
-:Architectures: x86
-:Type: vm
-:Parameters: arg[0] is bitmask of PMU virtualization capabilities.
-:Returns: 0 on success, -EINVAL when arg[0] contains invalid bits
-
-This capability alters PMU virtualization in KVM.
-
-Calling KVM_CHECK_EXTENSION for this capability returns a bitmask of
-PMU virtualization capabilities that can be adjusted on a VM.
-
-The argument to KVM_ENABLE_CAP is also a bitmask and selects specific
-PMU virtualization capabilities to be applied to the VM. This can
-only be invoked on a VM prior to the creation of VCPUs.
-
-At this time, KVM_PMU_CAP_DISABLE is the only capability. Setting
-this capability will disable PMU virtualization for that VM. Usermode
-should adjust CPUID leaf 0xA to reflect that the PMU is disabled.
-
-8.36 KVM_CAP_ARM_SYSTEM_SUSPEND
--------------------------------
-
-:Capability: KVM_CAP_ARM_SYSTEM_SUSPEND
-:Architectures: arm64
-:Type: vm
-
-When enabled, KVM will exit to userspace with KVM_EXIT_SYSTEM_EVENT of
-type KVM_SYSTEM_EVENT_SUSPEND to process the guest suspend request.
-
8.37 KVM_CAP_S390_PROTECTED_DUMP
--------------------------------
-:Capability: KVM_CAP_S390_PROTECTED_DUMP
:Architectures: s390
:Type: vm
@@ -8957,27 +8979,9 @@ PV guests. The `KVM_PV_DUMP` command is available for the
dump related UV data. Also the vcpu ioctl `KVM_S390_PV_CPU_COMMAND` is
available and supports the `KVM_PV_DUMP_CPU` subcommand.
-8.38 KVM_CAP_VM_DISABLE_NX_HUGE_PAGES
--------------------------------------
-
-:Capability: KVM_CAP_VM_DISABLE_NX_HUGE_PAGES
-:Architectures: x86
-:Type: vm
-:Parameters: arg[0] must be 0.
-:Returns: 0 on success, -EPERM if the userspace process does not
- have CAP_SYS_BOOT, -EINVAL if args[0] is not 0 or any vCPUs have been
- created.
-
-This capability disables the NX huge pages mitigation for iTLB MULTIHIT.
-
-The capability has no effect if the nx_huge_pages module parameter is not set.
-
-This capability may only be set before any vCPUs are created.
-
8.39 KVM_CAP_S390_CPU_TOPOLOGY
------------------------------
-:Capability: KVM_CAP_S390_CPU_TOPOLOGY
:Architectures: s390
:Type: vm
@@ -8999,37 +9003,9 @@ structure.
When getting the Modified Change Topology Report value, the attr->addr
must point to a byte where the value will be stored or retrieved from.
-8.40 KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
----------------------------------------
-
-:Capability: KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
-:Architectures: arm64
-:Type: vm
-:Parameters: arg[0] is the new split chunk size.
-:Returns: 0 on success, -EINVAL if any memslot was already created.
-
-This capability sets the chunk size used in Eager Page Splitting.
-
-Eager Page Splitting improves the performance of dirty-logging (used
-in live migrations) when guest memory is backed by huge-pages. It
-avoids splitting huge-pages (into PAGE_SIZE pages) on fault, by doing
-it eagerly when enabling dirty logging (with the
-KVM_MEM_LOG_DIRTY_PAGES flag for a memory region), or when using
-KVM_CLEAR_DIRTY_LOG.
-
-The chunk size specifies how many pages to break at a time, using a
-single allocation for each chunk. Bigger the chunk size, more pages
-need to be allocated ahead of time.
-
-The chunk size needs to be a valid block size. The list of acceptable
-block sizes is exposed in KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES as a
-64-bit bitmap (each bit describing a block size). The default value is
-0, to disable the eager page splitting.
-
8.41 KVM_CAP_VM_TYPES
---------------------
-:Capability: KVM_CAP_MEMORY_ATTRIBUTES
:Architectures: x86
:Type: system ioctl
@@ -9046,6 +9022,67 @@ Do not use KVM_X86_SW_PROTECTED_VM for "real" VMs, and especially not in
production. The behavior and effective ABI for software-protected VMs is
unstable.
+8.42 KVM_CAP_PPC_RPT_INVALIDATE
+-------------------------------
+
+:Architectures: ppc
+
+This capability indicates that the kernel is capable of handling
+H_RPT_INVALIDATE hcall.
+
+In order to enable the use of H_RPT_INVALIDATE in the guest,
+user space might have to advertise it for the guest. For example,
+IBM pSeries (sPAPR) guest starts using it if "hcall-rpt-invalidate" is
+present in the "ibm,hypertas-functions" device-tree property.
+
+This capability is enabled for hypervisors on platforms like POWER9
+that support radix MMU.
+
+8.43 KVM_CAP_PPC_AIL_MODE_3
+---------------------------
+
+:Architectures: ppc
+
+This capability indicates that the kernel supports the mode 3 setting for the
+"Address Translation Mode on Interrupt" aka "Alternate Interrupt Location"
+resource that is controlled with the H_SET_MODE hypercall.
+
+This capability allows a guest kernel to use a better-performance mode for
+handling interrupts and system calls.
+
+8.44 KVM_CAP_MEMORY_FAULT_INFO
+------------------------------
+
+:Architectures: x86
+
+The presence of this capability indicates that KVM_RUN will fill
+kvm_run.memory_fault if KVM cannot resolve a guest page fault VM-Exit, e.g. if
+there is a valid memslot but no backing VMA for the corresponding host virtual
+address.
+
+The information in kvm_run.memory_fault is valid if and only if KVM_RUN returns
+an error with errno=EFAULT or errno=EHWPOISON *and* kvm_run.exit_reason is set
+to KVM_EXIT_MEMORY_FAULT.
+
+Note: Userspaces which attempt to resolve memory faults so that they can retry
+KVM_RUN are encouraged to guard against repeatedly receiving the same
+error/annotated fault.
+
+See KVM_EXIT_MEMORY_FAULT for more information.
+
+8.45 KVM_CAP_X86_GUEST_MODE
+---------------------------
+
+:Architectures: x86
+
+The presence of this capability indicates that KVM_RUN will update the
+KVM_RUN_X86_GUEST_MODE bit in kvm_run.flags to indicate whether the
+vCPU was executing nested guest code when it exited.
+
+KVM exits with the register state of either the L1 or L2 guest
+depending on which executed at the time of an exit. Userspace must
+take care to differentiate between these cases.
+
9. Known KVM API problems
=========================
@@ -9076,9 +9113,10 @@ the local APIC.
The same is true for the ``KVM_FEATURE_PV_UNHALT`` paravirtualized feature.
-CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``.
-It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel
-has enabled in-kernel emulation of the local APIC.
+On older versions of Linux, CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by
+``KVM_GET_SUPPORTED_CPUID``, but it can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER``
+is present and the kernel has enabled in-kernel emulation of the local APIC.
+On newer versions, ``KVM_GET_SUPPORTED_CPUID`` does report the bit as available.
CPU topology
~~~~~~~~~~~~
diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst
index 31a9576c07af..60bf205cb373 100644
--- a/Documentation/virt/kvm/devices/vcpu.rst
+++ b/Documentation/virt/kvm/devices/vcpu.rst
@@ -137,6 +137,30 @@ exit_reason = KVM_EXIT_FAIL_ENTRY and populate the fail_entry struct by setting
hardare_entry_failure_reason field to KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED and
the cpu field to the processor id.
+1.5 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS
+--------------------------------------------------
+
+:Parameters: in kvm_device_attr.addr the address to an unsigned int
+ representing the maximum value taken by PMCR_EL0.N
+
+:Returns:
+
+ ======= ====================================================
+ -EBUSY PMUv3 already initialized, a VCPU has already run or
+ an event filter has already been set
+ -EFAULT Error accessing the value pointed to by addr
+ -ENODEV PMUv3 not supported or GIC not initialized
+ -EINVAL No PMUv3 explicitly selected, or value of N out of
+ range
+ ======= ====================================================
+
+Set the number of implemented event counters in the virtual PMU. This
+mandates that a PMU has explicitly been selected via
+KVM_ARM_VCPU_PMU_V3_SET_PMU, and will fail when no PMU has been
+explicitly selected, or the number of counters is out of range for the
+selected PMU. Selecting a new PMU cancels the effect of setting this
+attribute.
+
2. GROUP: KVM_ARM_VCPU_TIMER_CTRL
=================================
diff --git a/Documentation/virt/kvm/x86/index.rst b/Documentation/virt/kvm/x86/index.rst
index 9ece6b8dc817..851e99174762 100644
--- a/Documentation/virt/kvm/x86/index.rst
+++ b/Documentation/virt/kvm/x86/index.rst
@@ -11,6 +11,7 @@ KVM for x86 systems
cpuid
errata
hypercalls
+ intel-tdx
mmu
msr
nested-vmx
diff --git a/Documentation/virt/kvm/x86/intel-tdx.rst b/Documentation/virt/kvm/x86/intel-tdx.rst
new file mode 100644
index 000000000000..76bdd95334d6
--- /dev/null
+++ b/Documentation/virt/kvm/x86/intel-tdx.rst
@@ -0,0 +1,255 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================================
+Intel Trust Domain Extensions (TDX)
+===================================
+
+Overview
+========
+Intel's Trust Domain Extensions (TDX) protect confidential guest VMs from the
+host and physical attacks. A CPU-attested software module called 'the TDX
+module' runs inside a new CPU isolated range to provide the functionalities to
+manage and run protected VMs, a.k.a, TDX guests or TDs.
+
+Please refer to [1] for the whitepaper, specifications and other resources.
+
+This documentation describes TDX-specific KVM ABIs. The TDX module needs to be
+initialized before it can be used by KVM to run any TDX guests. The host
+core-kernel provides the support of initializing the TDX module, which is
+described in the Documentation/arch/x86/tdx.rst.
+
+API description
+===============
+
+KVM_MEMORY_ENCRYPT_OP
+---------------------
+:Type: vm ioctl, vcpu ioctl
+
+For TDX operations, KVM_MEMORY_ENCRYPT_OP is re-purposed to be generic
+ioctl with TDX specific sub-ioctl() commands.
+
+::
+
+ /* Trust Domain Extensions sub-ioctl() commands. */
+ enum kvm_tdx_cmd_id {
+ KVM_TDX_CAPABILITIES = 0,
+ KVM_TDX_INIT_VM,
+ KVM_TDX_INIT_VCPU,
+ KVM_TDX_INIT_MEM_REGION,
+ KVM_TDX_FINALIZE_VM,
+ KVM_TDX_GET_CPUID,
+
+ KVM_TDX_CMD_NR_MAX,
+ };
+
+ struct kvm_tdx_cmd {
+ /* enum kvm_tdx_cmd_id */
+ __u32 id;
+ /* flags for sub-command. If sub-command doesn't use this, set zero. */
+ __u32 flags;
+ /*
+ * data for each sub-command. An immediate or a pointer to the actual
+ * data in process virtual address. If sub-command doesn't use it,
+ * set zero.
+ */
+ __u64 data;
+ /*
+ * Auxiliary error code. The sub-command may return TDX SEAMCALL
+ * status code in addition to -Exxx.
+ */
+ __u64 hw_error;
+ };
+
+KVM_TDX_CAPABILITIES
+--------------------
+:Type: vm ioctl
+:Returns: 0 on success, <0 on error
+
+Return the TDX capabilities that current KVM supports with the specific TDX
+module loaded in the system. It reports what features/capabilities are allowed
+to be configured to the TDX guest.
+
+- id: KVM_TDX_CAPABILITIES
+- flags: must be 0
+- data: pointer to struct kvm_tdx_capabilities
+- hw_error: must be 0
+
+::
+
+ struct kvm_tdx_capabilities {
+ __u64 supported_attrs;
+ __u64 supported_xfam;
+ __u64 reserved[254];
+
+ /* Configurable CPUID bits for userspace */
+ struct kvm_cpuid2 cpuid;
+ };
+
+
+KVM_TDX_INIT_VM
+---------------
+:Type: vm ioctl
+:Returns: 0 on success, <0 on error
+
+Perform TDX specific VM initialization. This needs to be called after
+KVM_CREATE_VM and before creating any VCPUs.
+
+- id: KVM_TDX_INIT_VM
+- flags: must be 0
+- data: pointer to struct kvm_tdx_init_vm
+- hw_error: must be 0
+
+::
+
+ struct kvm_tdx_init_vm {
+ __u64 attributes;
+ __u64 xfam;
+ __u64 mrconfigid[6]; /* sha384 digest */
+ __u64 mrowner[6]; /* sha384 digest */
+ __u64 mrownerconfig[6]; /* sha384 digest */
+
+ /* The total space for TD_PARAMS before the CPUIDs is 256 bytes */
+ __u64 reserved[12];
+
+ /*
+ * Call KVM_TDX_INIT_VM before vcpu creation, thus before
+ * KVM_SET_CPUID2.
+ * This configuration supersedes KVM_SET_CPUID2s for VCPUs because the
+ * TDX module directly virtualizes those CPUIDs without VMM. The user
+ * space VMM, e.g. qemu, should make KVM_SET_CPUID2 consistent with
+ * those values. If it doesn't, KVM may have wrong idea of vCPUIDs of
+ * the guest, and KVM may wrongly emulate CPUIDs or MSRs that the TDX
+ * module doesn't virtualize.
+ */
+ struct kvm_cpuid2 cpuid;
+ };
+
+
+KVM_TDX_INIT_VCPU
+-----------------
+:Type: vcpu ioctl
+:Returns: 0 on success, <0 on error
+
+Perform TDX specific VCPU initialization.
+
+- id: KVM_TDX_INIT_VCPU
+- flags: must be 0
+- data: initial value of the guest TD VCPU RCX
+- hw_error: must be 0
+
+KVM_TDX_INIT_MEM_REGION
+-----------------------
+:Type: vcpu ioctl
+:Returns: 0 on success, <0 on error
+
+Initialize @nr_pages TDX guest private memory starting from @gpa with userspace
+provided data from @source_addr.
+
+Note, before calling this sub command, memory attribute of the range
+[gpa, gpa + nr_pages] needs to be private. Userspace can use
+KVM_SET_MEMORY_ATTRIBUTES to set the attribute.
+
+If KVM_TDX_MEASURE_MEMORY_REGION flag is specified, it also extends measurement.
+
+- id: KVM_TDX_INIT_MEM_REGION
+- flags: currently only KVM_TDX_MEASURE_MEMORY_REGION is defined
+- data: pointer to struct kvm_tdx_init_mem_region
+- hw_error: must be 0
+
+::
+
+ #define KVM_TDX_MEASURE_MEMORY_REGION (1UL << 0)
+
+ struct kvm_tdx_init_mem_region {
+ __u64 source_addr;
+ __u64 gpa;
+ __u64 nr_pages;
+ };
+
+
+KVM_TDX_FINALIZE_VM
+-------------------
+:Type: vm ioctl
+:Returns: 0 on success, <0 on error
+
+Complete measurement of the initial TD contents and mark it ready to run.
+
+- id: KVM_TDX_FINALIZE_VM
+- flags: must be 0
+- data: must be 0
+- hw_error: must be 0
+
+
+KVM_TDX_GET_CPUID
+-----------------
+:Type: vcpu ioctl
+:Returns: 0 on success, <0 on error
+
+Get the CPUID values that the TDX module virtualizes for the TD guest.
+When it returns -E2BIG, the user space should allocate a larger buffer and
+retry. The minimum buffer size is updated in the nent field of the
+struct kvm_cpuid2.
+
+- id: KVM_TDX_GET_CPUID
+- flags: must be 0
+- data: pointer to struct kvm_cpuid2 (in/out)
+- hw_error: must be 0 (out)
+
+::
+
+ struct kvm_cpuid2 {
+ __u32 nent;
+ __u32 padding;
+ struct kvm_cpuid_entry2 entries[0];
+ };
+
+ struct kvm_cpuid_entry2 {
+ __u32 function;
+ __u32 index;
+ __u32 flags;
+ __u32 eax;
+ __u32 ebx;
+ __u32 ecx;
+ __u32 edx;
+ __u32 padding[3];
+ };
+
+KVM TDX creation flow
+=====================
+In addition to the standard KVM flow, new TDX ioctls need to be called. The
+control flow is as follows:
+
+#. Check system wide capability
+
+ * KVM_CAP_VM_TYPES: Check if VM type is supported and if KVM_X86_TDX_VM
+ is supported.
+
+#. Create VM
+
+ * KVM_CREATE_VM
+ * KVM_TDX_CAPABILITIES: Query TDX capabilities for creating TDX guests.
+ * KVM_CHECK_EXTENSION(KVM_CAP_MAX_VCPUS): Query maximum VCPUs the TD can
+ support at VM level (TDX has its own limitation on this).
+ * KVM_SET_TSC_KHZ: Configure TD's TSC frequency if a different TSC frequency
+ than host is desired. This is Optional.
+ * KVM_TDX_INIT_VM: Pass TDX specific VM parameters.
+
+#. Create VCPU
+
+ * KVM_CREATE_VCPU
+ * KVM_TDX_INIT_VCPU: Pass TDX specific VCPU parameters.
+ * KVM_SET_CPUID2: Configure TD's CPUIDs.
+ * KVM_SET_MSRS: Configure TD's MSRs.
+
+#. Initialize initial guest memory
+
+ * Prepare content of initial guest memory.
+ * KVM_TDX_INIT_MEM_REGION: Add initial guest memory.
+ * KVM_TDX_FINALIZE_VM: Finalize the measurement of the TDX guest.
+
+#. Run VCPU
+
+References
+==========
+
+https://www.intel.com/content/www/us/en/developer/tools/trust-domain-extensions/documentation.html
diff --git a/Documentation/wmi/devices/alienware-wmi.rst b/Documentation/wmi/devices/alienware-wmi.rst
index ddc5e561960e..1d9d43e2e314 100644
--- a/Documentation/wmi/devices/alienware-wmi.rst
+++ b/Documentation/wmi/devices/alienware-wmi.rst
@@ -11,7 +11,7 @@ The WMI device WMAX has been implemented for many Alienware and Dell's G-Series
models. Throughout these models, two implementations have been identified. The
first one, used by older systems, deals with HDMI, brightness, RGB, amplifier
and deep sleep control. The second one used by newer systems deals primarily
-with thermal, overclocking, and GPIO control.
+with thermal control and overclocking.
It is suspected that the latter is used by Alienware Command Center (AWCC) to
manage manufacturer predefined thermal profiles. The alienware-wmi driver
@@ -69,9 +69,6 @@ data using the `bmfdec <https://github.com/pali/bmfdec>`_ utility:
[WmiMethodId(164), Implemented, read, write, Description("Tobii Camera Power Off.")] void TobiiCameraPowerOff([out] uint32 argr);
};
-Some of these methods get quite intricate so we will describe them using
-pseudo-code that vaguely resembles the original ASL code.
-
Methods not described in the following document have unknown behavior.
Argument Structure
@@ -87,175 +84,133 @@ ID 0xA0, the argument you would pass to the method is 0xA001.
Thermal Methods
===============
-WMI method Thermal_Information([in] uint32 arg2, [out] uint32 argr)
--------------------------------------------------------------------
-
-::
-
- if BYTE_0(arg2) == 0x01:
- argr = 1
-
- if BYTE_0(arg2) == 0x02:
- argr = SYSTEM_DESCRIPTION
-
- if BYTE_0(arg2) == 0x03:
- if BYTE_1(arg2) == 0x00:
- argr = FAN_ID_0
-
- if BYTE_1(arg2) == 0x01:
- argr = FAN_ID_1
-
- if BYTE_1(arg2) == 0x02:
- argr = FAN_ID_2
-
- if BYTE_1(arg2) == 0x03:
- argr = FAN_ID_3
-
- if BYTE_1(arg2) == 0x04:
- argr = SENSOR_ID_CPU | 0x0100
-
- if BYTE_1(arg2) == 0x05:
- argr = SENSOR_ID_GPU | 0x0100
-
- if BYTE_1(arg2) == 0x06:
- argr = THERMAL_MODE_QUIET_ID
-
- if BYTE_1(arg2) == 0x07:
- argr = THERMAL_MODE_BALANCED_ID
-
- if BYTE_1(arg2) == 0x08:
- argr = THERMAL_MODE_BALANCED_PERFORMANCE_ID
-
- if BYTE_1(arg2) == 0x09:
- argr = THERMAL_MODE_PERFORMANCE_ID
-
- if BYTE_1(arg2) == 0x0A:
- argr = THERMAL_MODE_LOW_POWER_ID
-
- if BYTE_1(arg2) == 0x0B:
- argr = THERMAL_MODE_GMODE_ID
-
- else:
- argr = 0xFFFFFFFF
-
- if BYTE_0(arg2) == 0x04:
- if is_valid_sensor(BYTE_1(arg2)):
- argr = SENSOR_TEMP_C
- else:
- argr = 0xFFFFFFFF
-
- if BYTE_0(arg2) == 0x05:
- if is_valid_fan(BYTE_1(arg2)):
- argr = FAN_RPM()
-
- if BYTE_0(arg2) == 0x06:
- skip
-
- if BYTE_0(arg2) == 0x07:
- argr = 0
-
- If BYTE_0(arg2) == 0x08:
- if is_valid_fan(BYTE_1(arg2)):
- argr = 0
- else:
- argr = 0xFFFFFFFF
-
- if BYTE_0(arg2) == 0x09:
- if is_valid_fan(BYTE_1(arg2)):
- argr = FAN_UNKNOWN_STAT_0()
-
- else:
- argr = 0xFFFFFFFF
-
- if BYTE_0(arg2) == 0x0A:
- argr = THERMAL_MODE_BALANCED_ID
-
- if BYTE_0(arg2) == 0x0B:
- argr = CURRENT_THERMAL_MODE()
-
- if BYTE_0(arg2) == 0x0C:
- if is_valid_fan(BYTE_1(arg2)):
- argr = FAN_UNKNOWN_STAT_1()
- else:
- argr = 0xFFFFFFFF
-
-Operation 0x02 returns a *system description* buffer with the following
-structure:
-
-::
-
- out[0] -> Number of fans
- out[1] -> Number of sensors
- out[2] -> 0x00
- out[3] -> Number of thermal modes
+WMI method GetFanSensors([in] uint32 arg2, [out] uint32 argr)
+-------------------------------------------------------------
-Operation 0x03 list all available fan IDs, sensor IDs and thermal profile
-codes in order, but different models may have different number of fans and
-thermal profiles. These are the known ranges:
++--------------------+------------------------------------+--------------------+
+| Operation (Byte 0) | Description | Arguments |
++====================+====================================+====================+
+| 0x01 | Get the number of temperature | - Byte 1: Fan ID |
+| | sensors related with a fan ID | |
++--------------------+------------------------------------+--------------------+
+| 0x02 | Get the temperature sensor IDs | - Byte 1: Fan ID |
+| | related to a fan sensor ID | - Byte 2: Index |
++--------------------+------------------------------------+--------------------+
-* Fan IDs: from 2 up to 4
-* Sensor IDs: 2
-* Thermal profile codes: from 1 up to 7
+WMI method Thermal_Information([in] uint32 arg2, [out] uint32 argr)
+-------------------------------------------------------------------
-In total BYTE_1(ARG2) may range from 0x5 up to 0xD depending on the model.
++--------------------+------------------------------------+--------------------+
+| Operation (Byte 0) | Description | Arguments |
++====================+====================================+====================+
+| 0x01 | Unknown. | - None |
++--------------------+------------------------------------+--------------------+
+| 0x02 | Get system description number with | - None |
+| | the following structure: | |
+| | | |
+| | - Byte 0: Number of fans | |
+| | - Byte 1: Number of temperature | |
+| | sensors | |
+| | - Byte 2: Unknown | |
+| | - Byte 3: Number of thermal | |
+| | profiles | |
++--------------------+------------------------------------+--------------------+
+| 0x03 | List an ID or resource at a given | - Byte 1: Index |
+| | index. Fan IDs, temperature IDs, | |
+| | unknown IDs and thermal profile | |
+| | IDs are listed in that exact | |
+| | order. | |
+| | | |
+| | Operation 0x02 is used to know | |
+| | which indexes map to which | |
+| | resources. | |
+| | | |
+| | **Returns:** ID at a given index | |
++--------------------+------------------------------------+--------------------+
+| 0x04 | Get the current temperature for a | - Byte 1: Sensor |
+| | given temperature sensor. | ID |
++--------------------+------------------------------------+--------------------+
+| 0x05 | Get the current RPM for a given | - Byte 1: Fan ID |
+| | fan. | |
++--------------------+------------------------------------+--------------------+
+| 0x06 | Get fan speed percentage. (not | - Byte 1: Fan ID |
+| | implemented in every model) | |
++--------------------+------------------------------------+--------------------+
+| 0x07 | Unknown. | - Unknown |
++--------------------+------------------------------------+--------------------+
+| 0x08 | Get minimum RPM for a given FAN | - Byte 1: Fan ID |
+| | ID. | |
++--------------------+------------------------------------+--------------------+
+| 0x09 | Get maximum RPM for a given FAN | - Byte 1: Fan ID |
+| | ID. | |
++--------------------+------------------------------------+--------------------+
+| 0x0A | Get balanced thermal profile ID. | - None |
++--------------------+------------------------------------+--------------------+
+| 0x0B | Get current thermal profile ID. | - None |
++--------------------+------------------------------------+--------------------+
+| 0x0C | Get current `boost` value for a | - Byte 1: Fan ID |
+| | given fan ID. | |
++--------------------+------------------------------------+--------------------+
WMI method Thermal_Control([in] uint32 arg2, [out] uint32 argr)
---------------------------------------------------------------
-::
-
- if BYTE_0(arg2) == 0x01:
- if is_valid_thermal_profile(BYTE_1(arg2)):
- SET_THERMAL_PROFILE(BYTE_1(arg2))
- argr = 0
-
- if BYTE_0(arg2) == 0x02:
- if is_valid_fan(BYTE_1(arg2)):
- SET_FAN_SPEED_MULTIPLIER(BYTE_2(arg2))
- argr = 0
- else:
- argr = 0xFFFFFFFF
-
-.. note::
- While you can manually change the fan speed multiplier with this method,
- Dell's BIOS tends to overwrite this changes anyway.
++--------------------+------------------------------------+--------------------+
+| Operation (Byte 0) | Description | Arguments |
++====================+====================================+====================+
+| 0x01 | Activate a given thermal profile. | - Byte 1: Thermal |
+| | | profile ID |
++--------------------+------------------------------------+--------------------+
+| 0x02 | Set a `boost` value for a given | - Byte 1: Fan ID |
+| | fan ID. | - Byte 2: Boost |
++--------------------+------------------------------------+--------------------+
These are the known thermal profile codes:
-::
-
- CUSTOM 0x00
-
- BALANCED_USTT 0xA0
- BALANCED_PERFORMANCE_USTT 0xA1
- COOL_USTT 0xA2
- QUIET_USTT 0xA3
- PERFORMANCE_USTT 0xA4
- LOW_POWER_USTT 0xA5
-
- QUIET 0x96
- BALANCED 0x97
- BALANCED_PERFORMANCE 0x98
- PERFORMANCE 0x99
-
- GMODE 0xAB
-
-Usually if a model doesn't support the first four profiles they will support
-the User Selectable Thermal Tables (USTT) profiles and vice-versa.
-
-GMODE replaces PERFORMANCE in G-Series laptops.
++------------------------------+----------+------+
+| Thermal Profile | Type | ID |
++==============================+==========+======+
+| Custom | Special | 0x00 |
++------------------------------+----------+------+
+| G-Mode | Special | 0xAB |
++------------------------------+----------+------+
+| Quiet | Legacy | 0x96 |
++------------------------------+----------+------+
+| Balanced | Legacy | 0x97 |
++------------------------------+----------+------+
+| Balanced Performance | Legacy | 0x98 |
++------------------------------+----------+------+
+| Performance | Legacy | 0x99 |
++------------------------------+----------+------+
+| Balanced | USTT | 0xA0 |
++------------------------------+----------+------+
+| Balanced Performance | USTT | 0xA1 |
++------------------------------+----------+------+
+| Cool | USTT | 0xA2 |
++------------------------------+----------+------+
+| Quiet | USTT | 0xA3 |
++------------------------------+----------+------+
+| Performance | USTT | 0xA4 |
++------------------------------+----------+------+
+| Low Power | USTT | 0xA5 |
++------------------------------+----------+------+
+
+If a model supports the User Selectable Thermal Tables (USTT) profiles, it will
+not support the Legacy profiles and vice-versa.
+
+Every model supports the CUSTOM (0x00) thermal profile. GMODE replaces
+PERFORMANCE in G-Series laptops.
WMI method GameShiftStatus([in] uint32 arg2, [out] uint32 argr)
---------------------------------------------------------------
-::
-
- if BYTE_0(arg2) == 0x1:
- TOGGLE_GAME_SHIFT()
- argr = GET_GAME_SHIFT_STATUS()
-
- if BYTE_0(arg2) == 0x2:
- argr = GET_GAME_SHIFT_STATUS()
++--------------------+------------------------------------+--------------------+
+| Operation (Byte 0) | Description | Arguments |
++====================+====================================+====================+
+| 0x01 | Toggle *Game Shift*. | - None |
++--------------------+------------------------------------+--------------------+
+| 0x02 | Get *Game Shift* status. | - None |
++--------------------+------------------------------------+--------------------+
Game Shift Status does not change the fan speed profile but it could be some
sort of CPU/GPU power profile. Benchmarks have not been done.
@@ -267,118 +222,82 @@ Thermal_Information does not list it.
G-key on Dell's G-Series laptops also changes Game Shift status, so both are
directly related.
-WMI method GetFanSensors([in] uint32 arg2, [out] uint32 argr)
--------------------------------------------------------------
-
-::
-
- if BYTE_0(arg2) == 0x1:
- if is_valid_fan(BYTE_1(arg2)):
- argr = 1
- else:
- argr = 0
-
- if BYTE_0(arg2) == 0x2:
- if is_valid_fan(BYTE_1(arg2)):
- if BYTE_2(arg2) == 0:
- argr == SENSOR_ID
- else
- argr == 0xFFFFFFFF
- else:
- argr = 0
-
Overclocking Methods
====================
-.. warning::
- These methods have not been tested and are only partially reverse
- engineered.
-
-WMI method Return_OverclockingReport([out] uint32 argr)
--------------------------------------------------------
-
-::
-
- CSMI (0xE3, 0x99)
- argr = 0
-
-CSMI is an unknown operation.
-
-WMI method Set_OCUIBIOSControl([in] uint32 arg2, [out] uint32 argr)
--------------------------------------------------------------------
-
-::
-
- CSMI (0xE3, 0x99)
- argr = 0
-
-CSMI is an unknown operation.
-
-WMI method Clear_OCFailSafeFlag([out] uint32 argr)
---------------------------------------------------
-
-::
-
- CSMI (0xE3, 0x99)
- argr = 0
-
-CSMI is an unknown operation.
-
-
WMI method MemoryOCControl([in] uint32 arg2, [out] uint32 argr)
---------------------------------------------------------------
AWCC supports memory overclocking, but this method is very intricate and has
not been deciphered yet.
-GPIO methods
-============
+GPIO control Methods
+====================
-These methods are probably related to some kind of firmware update system,
-through a GPIO device.
+Alienware and Dell G Series devices with the AWCC interface usually have an
+embedded STM32 RGB lighting controller with USB/HID capabilities. It's vendor ID
+is ``187c`` while it's product ID may vary from model to model.
+
+The control of two GPIO pins of this MCU is exposed as WMI methods for debugging
+purposes.
+
++--------------+--------------------------------------------------------------+
+| Pin | Description |
++==============+===============================+==============================+
+| 0 | Device Firmware Update (DFU) | **HIGH**: Enables DFU mode |
+| | mode pin. | on next MCU boot. |
+| | +------------------------------+
+| | | **LOW**: Disables DFU mode |
+| | | on next MCU boot. |
++--------------+-------------------------------+------------------------------+
+| 1 | Negative Reset (NRST) pin. | **HIGH**: MCU is ON. |
+| | | |
+| | +------------------------------+
+| | | **LOW**: MCU is OFF. |
+| | | |
++--------------+-------------------------------+------------------------------+
+
+See :ref:`acknowledgements` for more information on this MCU.
-.. warning::
- These methods have not been tested and are only partially reverse
- engineered.
+.. note::
+ Some GPIO control methods break the usual argument structure and take a
+ **Pin number** instead of an operation on the first byte.
WMI method FWUpdateGPIOtoggle([in] uint32 arg2, [out] uint32 argr)
------------------------------------------------------------------
-::
-
- if BYTE_0(arg2) == 0:
- if BYTE_1(arg2) == 1:
- SET_PIN_A_HIGH()
- else:
- SET_PIN_A_LOW()
-
- if BYTE_0(arg2) == 1:
- if BYTE_1(arg2) == 1:
- SET_PIN_B_HIGH()
-
- else:
- SET_PIN_B_LOW()
-
- else:
- argr = 1
++--------------------+------------------------------------+--------------------+
+| Operation (Byte 0) | Description | Arguments |
++====================+====================================+====================+
+| Pin number | Set the pin status | - Byte 1: Pin |
+| | | status |
++--------------------+------------------------------------+--------------------+
WMI method ReadTotalofGPIOs([out] uint32 argr)
----------------------------------------------
-::
++--------------------+------------------------------------+--------------------+
+| Operation (Byte 0) | Description | Arguments |
++====================+====================================+====================+
+| N/A | Get the total number of GPIOs | - None |
++--------------------+------------------------------------+--------------------+
- argr = 0x02
+.. note::
+ Due to how WMI methods are implemented on the firmware level, this method
+ requires a dummy uint32 input argument when invoked.
WMI method ReadGPIOpPinStatus([in] uint32 arg2, [out] uint32 argr)
------------------------------------------------------------------
-::
++--------------------+------------------------------------+--------------------+
+| Operation (Byte 0) | Description | Arguments |
++====================+====================================+====================+
+| Pin number | Get the pin status | - None |
++--------------------+------------------------------------+--------------------+
- if BYTE_0(arg2) == 0:
- argr = PIN_A_STATUS
-
- if BYTE_0(arg2) == 1:
- argr = PIN_B_STATUS
+.. note::
+ There known firmware bug in some laptops where reading the status of a pin
+ also flips it.
Other information Methods
=========================
@@ -386,12 +305,18 @@ Other information Methods
WMI method ReadChassisColor([out] uint32 argr)
----------------------------------------------
-::
+Returns the chassis color internal ID.
- argr = CHASSIS_COLOR_ID
+.. _acknowledgements:
Acknowledgements
================
-Kudos to `AlexIII <https://github.com/AlexIII/tcc-g15>`_ for documenting
-and testing available thermal profile codes.
+Kudos to
+
+* `AlexIII <https://github.com/AlexIII/tcc-g15>`_
+* `T-Troll <https://github.com/T-Troll/alienfx-tools/>`_
+* `Gabriel Marcano <https://gabriel.marcanobrady.family/blog/2024/12/16/dell-g5-5505-se-acpi-or-figuring-out-how-to-reset-the-rgb-controller/>`_
+
+for documenting and testing some of this device's functionality, making it
+possible to generalize this driver.
diff --git a/Documentation/wmi/devices/dell-wmi-ddv.rst b/Documentation/wmi/devices/dell-wmi-ddv.rst
index e0c20af30948..109d4c5c922e 100644
--- a/Documentation/wmi/devices/dell-wmi-ddv.rst
+++ b/Documentation/wmi/devices/dell-wmi-ddv.rst
@@ -118,9 +118,6 @@ The date is encoded in the following manner:
- bits 5 to 8 contain the manufacture month.
- bits 9 to 15 contain the manufacture year biased by 1980.
-.. note::
- The data format needs to be verified on more machines.
-
WMI method BatterySerialNumber()
--------------------------------
@@ -153,7 +150,40 @@ Returns the voltage flow of the battery in mV as an u16.
WMI method BatteryManufactureAccess()
-------------------------------------
-Returns a manufacture-defined value as an u16.
+Returns the health status of the battery as a u16.
+The health status encoded in the following manner:
+
+ - the third nibble contains the general failure mode
+ - the fourth nibble contains the specific failure code
+
+Valid failure modes are:
+
+ - permanent failure (``0x9``)
+ - overheat failure (``0xa``)
+ - overcurrent failure (``0xb``)
+
+All other failure modes are to be considered normal.
+
+The following failure codes are valid for a permanent failure:
+
+ - fuse blown (``0x0``)
+ - cell imbalance (``0x1``)
+ - overvoltage (``0x2``)
+ - fet failure (``0x3``)
+
+The last two bits of the failure code are to be ignored when the battery
+signals a permanent failure.
+
+The following failure codes a valid for a overheat failure:
+
+ - overheat at start of charging (``0x5``)
+ - overheat during charging (``0x7``)
+ - overheat during discharging (``0x8``)
+
+The following failure codes are valid for a overcurrent failure:
+
+ - overcurrent during charging (``0x6``)
+ - overcurrent during discharging (``0xb``)
WMI method BatteryRelativeStateOfCharge()
-----------------------------------------
@@ -260,14 +290,6 @@ Some machines like the Dell Inspiron 3505 only support a single battery and thus
ignore the battery index. Because of this the driver depends on the ACPI battery
hook mechanism to discover batteries.
-.. note::
- The ACPI battery matching algorithm currently used inside the driver is
- outdated and does not match the algorithm described above. The reasons for
- this are differences in the handling of the ToHexString() ACPI opcode between
- Linux and Windows, which distorts the serial number of ACPI batteries on many
- machines. Until this issue is resolved, the driver cannot use the above
- algorithm.
-
Reverse-Engineering the DDV WMI interface
=========================================
diff --git a/Documentation/wmi/devices/msi-wmi-platform.rst b/Documentation/wmi/devices/msi-wmi-platform.rst
index 31a136942892..73197b31926a 100644
--- a/Documentation/wmi/devices/msi-wmi-platform.rst
+++ b/Documentation/wmi/devices/msi-wmi-platform.rst
@@ -138,6 +138,10 @@ input data, the meaning of which depends on the subfeature being accessed.
The output buffer contains a single byte which signals success or failure (``0x00`` on failure)
and 31 bytes of output data, the meaning if which depends on the subfeature being accessed.
+.. note::
+ The ACPI control method responsible for handling the WMI method calls is not thread-safe.
+ This is a firmware bug that needs to be handled inside the driver itself.
+
WMI method Get_EC()
-------------------
diff --git a/LICENSES/deprecated/CC0-1.0 b/LICENSES/deprecated/CC0-1.0
new file mode 100644
index 000000000000..d6054d574d50
--- /dev/null
+++ b/LICENSES/deprecated/CC0-1.0
@@ -0,0 +1,129 @@
+Valid-License-Identifier: CC0-1.0
+SPDX-URL: https://spdx.org/licenses/CC0-1.0.html
+Usage-Guide:
+ To use the Creative Commons Zero v1.0 Universal License put the
+ following SPDX tag/value pair into a comment according to the
+ placement guidelines in the licensing rules documentation:
+ SPDX-License-Identifier: CC0-1.0
+License-Text:
+Creative Commons Legal Code
+
+CC0 1.0 Universal
+
+ CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
+ LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
+ ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
+ INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
+ REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
+ PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
+ THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
+ HEREUNDER.
+
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer
+exclusive Copyright and Related Rights (defined below) upon the creator
+and subsequent owner(s) (each and all, an "owner") of an original work of
+authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for
+the purpose of contributing to a commons of creative, cultural and
+scientific works ("Commons") that the public can reliably and without fear
+of later claims of infringement build upon, modify, incorporate in other
+works, reuse and redistribute as freely as possible in any form whatsoever
+and for any purposes, including without limitation commercial purposes.
+These owners may contribute to the Commons to promote the ideal of a free
+culture and the further production of creative, cultural and scientific
+works, or to gain reputation or greater distribution for their Work in
+part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any
+expectation of additional consideration or compensation, the person
+associating CC0 with a Work (the "Affirmer"), to the extent that he or she
+is an owner of Copyright and Related Rights in the Work, voluntarily
+elects to apply CC0 to the Work and publicly distribute the Work under its
+terms, with knowledge of his or her Copyright and Related Rights in the
+Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be
+protected by copyright and related or neighboring rights ("Copyright and
+Related Rights"). Copyright and Related Rights include, but are not
+limited to, the following:
+
+ i. the right to reproduce, adapt, distribute, perform, display,
+ communicate, and translate a Work;
+ ii. moral rights retained by the original author(s) and/or performer(s);
+iii. publicity and privacy rights pertaining to a person's image or
+ likeness depicted in a Work;
+ iv. rights protecting against unfair competition in regards to a Work,
+ subject to the limitations in paragraph 4(a), below;
+ v. rights protecting the extraction, dissemination, use and reuse of data
+ in a Work;
+ vi. database rights (such as those arising under Directive 96/9/EC of the
+ European Parliament and of the Council of 11 March 1996 on the legal
+ protection of databases, and under any national implementation
+ thereof, including any amended or successor version of such
+ directive); and
+vii. other similar, equivalent or corresponding rights throughout the
+ world based on applicable law or treaty, and any national
+ implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention
+of, applicable law, Affirmer hereby overtly, fully, permanently,
+irrevocably and unconditionally waives, abandons, and surrenders all of
+Affirmer's Copyright and Related Rights and associated claims and causes
+of action, whether now known or unknown (including existing as well as
+future claims and causes of action), in the Work (i) in all territories
+worldwide, (ii) for the maximum duration provided by applicable law or
+treaty (including future time extensions), (iii) in any current or future
+medium and for any number of copies, and (iv) for any purpose whatsoever,
+including without limitation commercial, advertising or promotional
+purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
+member of the public at large and to the detriment of Affirmer's heirs and
+successors, fully intending that such Waiver shall not be subject to
+revocation, rescission, cancellation, termination, or any other legal or
+equitable action to disrupt the quiet enjoyment of the Work by the public
+as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason
+be judged legally invalid or ineffective under applicable law, then the
+Waiver shall be preserved to the maximum extent permitted taking into
+account Affirmer's express Statement of Purpose. In addition, to the
+extent the Waiver is so judged Affirmer hereby grants to each affected
+person a royalty-free, non transferable, non sublicensable, non exclusive,
+irrevocable and unconditional license to exercise Affirmer's Copyright and
+Related Rights in the Work (i) in all territories worldwide, (ii) for the
+maximum duration provided by applicable law or treaty (including future
+time extensions), (iii) in any current or future medium and for any number
+of copies, and (iv) for any purpose whatsoever, including without
+limitation commercial, advertising or promotional purposes (the
+"License"). The License shall be deemed effective as of the date CC0 was
+applied by Affirmer to the Work. Should any part of the License for any
+reason be judged legally invalid or ineffective under applicable law, such
+partial invalidity or ineffectiveness shall not invalidate the remainder
+of the License, and in such case Affirmer hereby affirms that he or she
+will not (i) exercise any of his or her remaining Copyright and Related
+Rights in the Work or (ii) assert any associated claims and causes of
+action with respect to the Work, in either case contrary to Affirmer's
+express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ a. No trademark or patent rights held by Affirmer are waived, abandoned,
+ surrendered, licensed or otherwise affected by this document.
+ b. Affirmer offers the Work as-is and makes no representations or
+ warranties of any kind concerning the Work, express, implied,
+ statutory or otherwise, including without limitation warranties of
+ title, merchantability, fitness for a particular purpose, non
+ infringement, or the absence of latent or other defects, accuracy, or
+ the present or absence of errors, whether or not discoverable, all to
+ the greatest extent permissible under applicable law.
+ c. Affirmer disclaims responsibility for clearing rights of other persons
+ that may apply to the Work or any use thereof, including without
+ limitation any person's Copyright and Related Rights in the Work.
+ Further, Affirmer disclaims responsibility for obtaining any necessary
+ consents, permissions or other rights required for any use of the
+ Work.
+ d. Affirmer understands and acknowledges that Creative Commons is not a
+ party to this document and has no duty or obligation with respect to
+ this CC0 or use of the Work.
diff --git a/MAINTAINERS b/MAINTAINERS
index 96b827049501..eecc41c39a9c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -528,6 +528,7 @@ ADP1653 FLASH CONTROLLER DRIVER
M: Sakari Ailus <sakari.ailus@iki.fi>
L: linux-media@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/adi,adp1653.txt
F: drivers/media/i2c/adp1653.c
F: include/media/i2c/adp1653.h
@@ -646,6 +647,13 @@ F: drivers/iio/accel/adxl380.h
F: drivers/iio/accel/adxl380_i2c.c
F: drivers/iio/accel/adxl380_spi.c
+AEONSEMI PHY DRIVER
+M: Christian Marangi <ansuelsmth@gmail.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/aeonsemi,as21xxx.yaml
+F: drivers/net/phy/as21xxx.c
+
AF8133J THREE-AXIS MAGNETOMETER DRIVER
M: Ondřej Jirman <megi@xff.cz>
S: Maintained
@@ -797,6 +805,9 @@ M: Kurt Borja <kuurtb@gmail.com>
L: platform-driver-x86@vger.kernel.org
L: Dell.Client.Kernel@dell.com
S: Maintained
+F: Documentation/ABI/testing/debugfs-alienware-wmi
+F: Documentation/ABI/testing/sysfs-platform-alienware-wmi
+F: Documentation/admin-guide/laptops/alienware-wmi.rst
F: Documentation/wmi/devices/alienware-wmi.rst
F: drivers/platform/x86/dell/alienware-wmi*
@@ -940,7 +951,7 @@ F: include/linux/altera_uart.h
AMAZON ANNAPURNA LABS FIC DRIVER
M: Talel Shenhar <talel@amazon.com>
S: Maintained
-F: Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt
+F: Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.yaml
F: drivers/irqchip/irq-al-fic.c
AMAZON ANNAPURNA LABS MEMORY CONTROLLER EDAC
@@ -1097,7 +1108,7 @@ R: Carlos Bilbao <carlos.bilbao@kernel.org>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: Documentation/arch/x86/amd_hsmp.rst
-F: arch/x86/include/asm/amd_hsmp.h
+F: arch/x86/include/asm/amd/hsmp.h
F: arch/x86/include/uapi/asm/amd_hsmp.h
F: drivers/platform/x86/amd/hsmp/
@@ -1142,7 +1153,7 @@ M: Mario Limonciello <mario.limonciello@amd.com>
M: Yazen Ghannam <yazen.ghannam@amd.com>
L: linux-kernel@vger.kernel.org
S: Supported
-F: arch/x86/include/asm/amd_node.h
+F: arch/x86/include/asm/amd/node.h
F: arch/x86/kernel/amd_node.c
AMD PDS CORE DRIVER
@@ -1216,7 +1227,9 @@ AMD SPI DRIVER
M: Raju Rangoju <Raju.Rangoju@amd.com>
L: linux-spi@vger.kernel.org
S: Supported
+F: drivers/spi/spi-amd-pci.c
F: drivers/spi/spi-amd.c
+F: drivers/spi/spi-amd.h
AMD XDNA DRIVER
M: Min Ma <min.ma@amd.com>
@@ -1253,6 +1266,31 @@ F: Documentation/devicetree/bindings/perf/amlogic,g12-ddr-pmu.yaml
F: drivers/perf/amlogic/
F: include/soc/amlogic/
+AMLOGIC ISP DRIVER
+M: Keke Li <keke.li@amlogic.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: Documentation/admin-guide/media/c3-isp.dot
+F: Documentation/admin-guide/media/c3-isp.rst
+F: Documentation/devicetree/bindings/media/amlogic,c3-isp.yaml
+F: Documentation/userspace-api/media/v4l/metafmt-c3-isp.rst
+F: drivers/media/platform/amlogic/c3/isp/
+F: include/uapi/linux/media/amlogic/
+
+AMLOGIC MIPI ADAPTER DRIVER
+M: Keke Li <keke.li@amlogic.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/media/amlogic,c3-mipi-adapter.yaml
+F: drivers/media/platform/amlogic/c3/mipi-adapter/
+
+AMLOGIC MIPI CSI2 DRIVER
+M: Keke Li <keke.li@amlogic.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/media/amlogic,c3-mipi-csi2.yaml
+F: drivers/media/platform/amlogic/c3/mipi-csi2/
+
AMLOGIC PINCTRL DRIVER
M: Xianwei Zhao <xianwei.zhao@amlogic.com>
L: linux-amlogic@lists.infradead.org
@@ -1567,6 +1605,13 @@ W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/iio/filter/adi,admv8818.yaml
F: drivers/iio/filter/admv8818.c
+ANALOG DEVICES INC ADP5055 DRIVER
+M: Alexis Czezar Torreno <alexisczezar.torreno@analog.com>
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/regulator/adi,adp5055-regulator.yaml
+F: drivers/regulator/adp5055-regulator.c
+
ANALOG DEVICES INC ADP5061 DRIVER
M: Michael Hennerich <Michael.Hennerich@analog.com>
L: linux-pm@vger.kernel.org
@@ -1587,14 +1632,14 @@ M: Lars-Peter Clausen <lars@metafoo.de>
L: linux-media@vger.kernel.org
S: Supported
W: https://ez.analog.com/linux-software-drivers
-F: Documentation/devicetree/bindings/media/i2c/adv7180.yaml
+F: Documentation/devicetree/bindings/media/i2c/adi,adv7180.yaml
F: drivers/media/i2c/adv7180.c
ANALOG DEVICES INC ADV748X DRIVER
M: Kieran Bingham <kieran.bingham@ideasonboard.com>
L: linux-media@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/media/i2c/adv748x.yaml
+F: Documentation/devicetree/bindings/media/i2c/adi,adv748x.yaml
F: drivers/media/i2c/adv748x/*
ANALOG DEVICES INC ADV7511 DRIVER
@@ -1607,7 +1652,7 @@ ANALOG DEVICES INC ADV7604 DRIVER
M: Hans Verkuil <hverkuil-cisco@xs4all.nl>
L: linux-media@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/media/i2c/adv7604.yaml
+F: Documentation/devicetree/bindings/media/i2c/adi,adv7604.yaml
F: drivers/media/i2c/adv7604*
ANALOG DEVICES INC ADV7842 DRIVER
@@ -1682,7 +1727,7 @@ M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Arve Hjønnevåg <arve@android.com>
M: Todd Kjos <tkjos@android.com>
M: Martijn Coenen <maco@android.com>
-M: Joel Fernandes <joel@joelfernandes.org>
+M: Joel Fernandes <joelagnelf@nvidia.com>
M: Christian Brauner <christian@brauner.io>
M: Carlos Llamas <cmllamas@google.com>
M: Suren Baghdasaryan <surenb@google.com>
@@ -1694,7 +1739,7 @@ F: drivers/android/
ANDROID GOLDFISH PIC DRIVER
M: Miodrag Dinic <miodrag.dinic@mips.com>
S: Supported
-F: Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt
+F: Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.yaml
F: drivers/irqchip/irq-goldfish-pic.c
ANDROID GOLDFISH RTC DRIVER
@@ -1918,7 +1963,7 @@ F: Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml
F: Documentation/devicetree/bindings/auxdisplay/arm,versatile-lcd.yaml
F: Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml
F: Documentation/devicetree/bindings/i2c/arm,i2c-versatile.yaml
-F: Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt
+F: Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.yaml
F: Documentation/devicetree/bindings/mtd/mtd-physmap.yaml
F: arch/arm/boot/dts/arm/arm-realview-*
F: arch/arm/boot/dts/arm/integrator*
@@ -2102,7 +2147,7 @@ F: arch/arm/plat-*/
ARM/ACTIONS SEMI ARCHITECTURE
M: Andreas Färber <afaerber@suse.de>
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-actions@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -2251,7 +2296,7 @@ F: sound/soc/codecs/cs42l84.*
F: sound/soc/codecs/ssm3515.c
ARM/APPLE MACHINE SUPPORT
-M: Sven Peter <sven@svenpeter.dev>
+M: Sven Peter <sven@kernel.org>
M: Janne Grunau <j@jannau.net>
R: Alyssa Rosenzweig <alyssa@rosenzweig.io>
R: Neal Gompa <neal@gompa.dev>
@@ -2306,6 +2351,7 @@ F: drivers/watchdog/apple_wdt.c
F: include/dt-bindings/interrupt-controller/apple-aic.h
F: include/dt-bindings/pinctrl/apple.h
F: include/linux/soc/apple/*
+F: include/uapi/drm/asahi_drm.h
ARM/ARTPEC MACHINE SUPPORT
M: Jesper Nilsson <jesper.nilsson@axis.com>
@@ -2328,7 +2374,7 @@ L: linux-i2c@vger.kernel.org
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
-F: Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.txt
+F: Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.yaml
F: drivers/i2c/busses/i2c-aspeed.c
F: drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -2354,7 +2400,7 @@ F: arch/arm/boot/dts/intel/axm/
F: arch/arm/mach-axxia/
ARM/BITMAIN ARCHITECTURE
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/arm/bitmain.yaml
@@ -2491,6 +2537,7 @@ F: include/dt-bindings/bus/moxtet.h
F: include/linux/armada-37xx-rwtm-mailbox.h
F: include/linux/moxtet.h
F: include/linux/turris-omnia-mcu-interface.h
+F: include/linux/turris-signing-key.h
ARM/FARADAY FA526 PORT
M: Hans Ulli Kroll <ulli.kroll@googlemail.com>
@@ -2519,6 +2566,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
F: arch/arm/boot/dts/nxp/imx/
F: arch/arm/boot/dts/nxp/mxs/
F: arch/arm64/boot/dts/freescale/
+X: Documentation/devicetree/bindings/media/i2c/
X: arch/arm64/boot/dts/freescale/fsl-*
X: arch/arm64/boot/dts/freescale/qoriq-*
X: drivers/media/i2c/
@@ -3021,7 +3069,7 @@ F: include/linux/soc/qcom/
F: include/soc/qcom/
ARM/RDA MICRO ARCHITECTURE
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-unisoc@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -3063,10 +3111,10 @@ F: arch/arm/include/debug/renesas-scif.S
F: arch/arm/mach-shmobile/
F: arch/arm64/boot/dts/renesas/
F: arch/riscv/boot/dts/renesas/
-F: drivers/nvmem/rcar-efuse.c
F: drivers/pmdomain/renesas/
F: drivers/soc/renesas/
F: include/linux/soc/renesas/
+N: rcar
K: \brenesas,
ARM/RISCPC ARCHITECTURE
@@ -3191,6 +3239,12 @@ M: Dinh Nguyen <dinguyen@kernel.org>
S: Maintained
F: drivers/clk/socfpga/
+ARM/SOCFPGA DWMAC GLUE LAYER
+M: Maxime Chevallier <maxime.chevallier@bootlin.com>
+S: Maintained
+F: Documentation/devicetree/bindings/net/socfpga-dwmac.txt
+F: drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+
ARM/SOCFPGA EDAC BINDINGS
M: Matthew Gerlach <matthew.gerlach@altera.com>
S: Maintained
@@ -3427,7 +3481,10 @@ M: Alexey Charkov <alchark@gmail.com>
M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Odd Fixes
-F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt
+F: Documentation/devicetree/bindings/hwinfo/via,vt8500-scc-id.yaml
+F: Documentation/devicetree/bindings/i2c/wm,wm8505-i2c.yaml
+F: Documentation/devicetree/bindings/interrupt-controller/via,vt8500-intc.yaml
+F: Documentation/devicetree/bindings/pwm/via,vt8500-pwm.yaml
F: arch/arm/boot/dts/vt8500/
F: arch/arm/mach-vt8500/
F: drivers/clocksource/timer-vt8500.c
@@ -3435,6 +3492,7 @@ F: drivers/i2c/busses/i2c-viai2c-wmt.c
F: drivers/mmc/host/wmt-sdmmc.c
F: drivers/pwm/pwm-vt8500.c
F: drivers/rtc/rtc-vt8500.c
+F: drivers/soc/vt8500/
F: drivers/tty/serial/vt8500_serial.c
F: drivers/video/fbdev/vt8500lcdfb.*
F: drivers/video/fbdev/wm8505fb*
@@ -3577,7 +3635,7 @@ ASPEED SCU INTERRUPT CONTROLLER DRIVER
M: Eddie James <eajames@linux.ibm.com>
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2xxx-scu-ic.txt
+F: Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2500-scu-ic.yaml
F: drivers/irqchip/irq-aspeed-scu-ic.c
F: include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
@@ -3718,7 +3776,7 @@ F: Documentation/admin-guide/aoe/
F: drivers/block/aoe/
ATC260X PMIC MFD DRIVER
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
M: Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
L: linux-actions@lists.infradead.org
S: Maintained
@@ -3867,11 +3925,15 @@ AUXILIARY BUS DRIVER
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
R: Dave Ertman <david.m.ertman@intel.com>
R: Ira Weiny <ira.weiny@intel.com>
+R: Leon Romanovsky <leon@kernel.org>
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core.git
F: Documentation/driver-api/auxiliary_bus.rst
F: drivers/base/auxiliary.c
F: include/linux/auxiliary_bus.h
+F: rust/helpers/auxiliary.c
+F: rust/kernel/auxiliary.rs
+F: samples/rust/rust_driver_auxiliary.rs
AUXILIARY DISPLAY DRIVERS
M: Andy Shevchenko <andy@kernel.org>
@@ -4195,6 +4257,16 @@ F: Documentation/ABI/stable/sysfs-class-bluetooth
F: include/net/bluetooth/
F: net/bluetooth/
+BLZP1600 GPIO DRIVER
+M: James Cowgill <james.cowgill@blaize.com>
+M: Matt Redfearn <matt.redfearn@blaize.com>
+M: Neil Jones <neil.jones@blaize.com>
+M: Nikolaos Pasaloukos <nikolaos.pasaloukos@blaize.com>
+L: linux-gpio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/gpio/blaize,blzp1600-gpio.yaml
+F: drivers/gpio/gpio-blzp1600.c
+
BONDING DRIVER
M: Jay Vosburgh <jv@jvosburgh.net>
L: netdev@vger.kernel.org
@@ -4562,6 +4634,7 @@ F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
F: drivers/pci/controller/pcie-brcmstb.c
F: drivers/staging/vc04_services
N: bcm2711
+N: bcm2712
N: bcm283*
N: raspberrypi
@@ -5259,6 +5332,7 @@ F: include/uapi/linux/can/isotp.h
F: include/uapi/linux/can/raw.h
F: net/can/
F: net/sched/em_canid.c
+F: tools/testing/selftests/net/can/
CAN-J1939 NETWORK LAYER
M: Robin van der Gracht <robin@protonic.nl>
@@ -5502,7 +5576,7 @@ F: Documentation/dev-tools/checkpatch.rst
CHINESE DOCUMENTATION
M: Alex Shi <alexs@kernel.org>
-M: Yanteng Si <siyanteng@loongson.cn>
+M: Yanteng Si <si.yanteng@linux.dev>
R: Dongliang Mu <dzm91@hust.edu.cn>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/alexs/linux.git
S: Maintained
@@ -5647,7 +5721,6 @@ F: include/sound/cs*
F: sound/pci/hda/cirrus*
F: sound/pci/hda/cs*
F: sound/pci/hda/hda_component*
-F: sound/pci/hda/hda_cs_dsp_ctl.*
F: sound/soc/codecs/cs*
CIRRUS LOGIC HAPTIC DRIVERS
@@ -5883,6 +5956,8 @@ F: include/dt-bindings/clock/
F: include/linux/clk-pr*
F: include/linux/clk/
F: include/linux/of_clk.h
+F: rust/helpers/clk.c
+F: rust/kernel/clk.rs
X: drivers/clk/clkdev.c
COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3)
@@ -5975,7 +6050,9 @@ S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/a.hindborg/linux.git configfs-next
F: fs/configfs/
F: include/linux/configfs.h
+F: rust/kernel/configfs.rs
F: samples/configfs/
+F: samples/rust/rust_configfs.rs
CONGATEC BOARD CONTROLLER MFD DRIVER
M: Thomas Richard <thomas.richard@bootlin.com>
@@ -6140,6 +6217,7 @@ F: drivers/cpufreq/
F: include/linux/cpufreq.h
F: include/linux/sched/cpufreq.h
F: kernel/sched/cpufreq*.c
+F: rust/kernel/cpufreq.rs
F: tools/testing/selftests/cpufreq/
CPU HOTPLUG
@@ -6153,6 +6231,7 @@ F: include/linux/cpuhotplug.h
F: include/linux/smpboot.h
F: kernel/cpu.c
F: kernel/smpboot.*
+F: rust/kernel/cpu.rs
CPU IDLE TIME MANAGEMENT FRAMEWORK
M: "Rafael J. Wysocki" <rafael@kernel.org>
@@ -6237,6 +6316,12 @@ L: linux-riscv@lists.infradead.org
S: Maintained
F: drivers/cpuidle/cpuidle-riscv-sbi.c
+CPUMASK API [RUST]
+M: Viresh Kumar <viresh.kumar@linaro.org>
+R: Yury Norov <yury.norov@gmail.com>
+S: Maintained
+F: rust/kernel/cpumask.rs
+
CRAMFS FILESYSTEM
M: Nicolas Pitre <nico@fluxnic.net>
S: Maintained
@@ -6253,6 +6338,7 @@ F: Documentation/staging/crc*
F: arch/*/lib/crc*
F: include/linux/crc*
F: lib/crc*
+F: lib/tests/crc_kunit.c
F: scripts/gen-crc-consts.py
CREATIVE SB0540
@@ -6288,6 +6374,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git
F: Documentation/crypto/
F: Documentation/devicetree/bindings/crypto/
F: arch/*/crypto/
+F: arch/*/lib/crypto/
F: crypto/
F: drivers/crypto/
F: include/crypto/
@@ -6335,6 +6422,7 @@ F: Documentation/process/cve.rst
CW1200 WLAN driver
S: Orphan
+L: linux-wireless@vger.kernel.org
F: drivers/net/wireless/st/
F: include/linux/platform_data/net-cw1200.h
@@ -6507,6 +6595,12 @@ F: net/ax25/ax25_out.c
F: net/ax25/ax25_timer.c
F: net/ax25/sysctl_net_ax25.c
+DASHARO ACPI PLATFORM DRIVER
+M: Michał Kopeć <michal.kopec@3mdeb.com>
+S: Maintained
+W: https://docs.dasharo.com/
+F: drivers/platform/x86/dasharo-acpi.c
+
DATA ACCESS MONITOR
M: SeongJae Park <sj@kernel.org>
L: damon@lists.linux.dev
@@ -6546,15 +6640,6 @@ S: Maintained
F: Documentation/scsi/dc395x.rst
F: drivers/scsi/dc395x.*
-DCCP PROTOCOL
-L: dccp@vger.kernel.org
-S: Orphan
-W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
-F: include/linux/dccp.h
-F: include/linux/tfrc.h
-F: include/uapi/linux/dccp.h
-F: net/dccp/
-
DEBUGOBJECTS:
M: Thomas Gleixner <tglx@linutronix.de>
L: linux-kernel@vger.kernel.org
@@ -6721,7 +6806,7 @@ S: Orphan
F: drivers/mtd/nand/raw/denali*
DESIGNWARE EDMA CORE IP DRIVER
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/dw-edma/
@@ -6857,7 +6942,7 @@ DIALOG SEMICONDUCTOR DRIVERS
M: Support Opensource <support.opensource@diasemi.com>
S: Supported
W: http://www.dialog-semiconductor.com/products
-F: Documentation/devicetree/bindings/input/dlg,da72??.txt
+F: Documentation/devicetree/bindings/input/dlg,da72??.yaml
F: Documentation/devicetree/bindings/input/dlg,da9062-onkey.yaml
F: Documentation/devicetree/bindings/mfd/da90*.txt
F: Documentation/devicetree/bindings/mfd/dlg,da90*.yaml
@@ -7020,6 +7105,7 @@ L: rust-for-linux@vger.kernel.org
S: Supported
W: https://rust-for-linux.com
T: git https://github.com/Rust-for-Linux/linux.git alloc-next
+F: rust/helpers/dma.c
F: rust/kernel/dma.rs
F: samples/rust/rust_dma.rs
@@ -7074,7 +7160,10 @@ T: git git://git.lwn.net/linux.git docs-next
F: Documentation/
F: scripts/check-variable-fonts.sh
F: scripts/documentation-file-ref-check
-F: scripts/kernel-doc
+F: scripts/get_abi.py
+F: scripts/kernel-doc*
+F: scripts/lib/abi/*
+F: scripts/lib/kdoc/*
F: scripts/sphinx-pre-install
X: Documentation/ABI/
X: Documentation/admin-guide/media/
@@ -7225,7 +7314,7 @@ M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Danilo Krummrich <dakr@kernel.org>
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core.git
F: Documentation/core-api/kobject.rst
F: drivers/base/
F: fs/debugfs/
@@ -7370,8 +7459,7 @@ M: Javier Martinez Canillas <javierm@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
-F: drivers/gpu/drm/tiny/ofdrm.c
-F: drivers/gpu/drm/tiny/simpledrm.c
+F: drivers/gpu/drm/sysfb/
F: drivers/video/aperture.c
F: drivers/video/nomodeset.c
F: include/linux/aperture.h
@@ -7450,7 +7538,7 @@ F: drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
DRM DRIVER FOR LG SW43408 PANELS
M: Sumit Semwal <sumit.semwal@linaro.org>
-M: Caleb Connolly <caleb.connolly@linaro.org>
+M: Casey Connolly <casey.connolly@linaro.org>
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
@@ -7512,6 +7600,7 @@ S: Maintained
B: https://gitlab.freedesktop.org/drm/msm/-/issues
T: git https://gitlab.freedesktop.org/drm/msm.git
F: Documentation/devicetree/bindings/display/msm/gpu.yaml
+F: Documentation/devicetree/bindings/opp/opp-v2-qcom-adreno.yaml
F: drivers/gpu/drm/msm/adreno/
F: drivers/gpu/drm/msm/msm_gpu.*
F: drivers/gpu/drm/msm/msm_gpu_devfreq.*
@@ -7565,6 +7654,12 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+DRM DRIVER FOR NOVATEK NT37801 PANELS
+M: Krzysztof Kozlowski <krzk@kernel.org>
+S: Maintained
+F: Documentation/devicetree/bindings/display/panel/novatek,nt37801.yaml
+F: drivers/gpu/drm/panel/panel-novatek-nt37801.c
+
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
M: Lyude Paul <lyude@redhat.com>
M: Danilo Krummrich <dakr@kernel.org>
@@ -7591,6 +7686,18 @@ T: git https://gitlab.freedesktop.org/drm/nova.git nova-next
F: Documentation/gpu/nova/
F: drivers/gpu/nova-core/
+DRM DRIVER FOR NVIDIA GPUS [RUST]
+M: Danilo Krummrich <dakr@kernel.org>
+L: nouveau@lists.freedesktop.org
+S: Supported
+Q: https://patchwork.freedesktop.org/project/nouveau/
+B: https://gitlab.freedesktop.org/drm/nova/-/issues
+C: irc://irc.oftc.net/nouveau
+T: git https://gitlab.freedesktop.org/drm/nova.git nova-next
+F: Documentation/gpu/nova/
+F: drivers/gpu/drm/nova/
+F: include/uapi/drm/nova_drm.h
+
DRM DRIVER FOR OLIMEX LCD-OLINUXINO PANELS
M: Stefan Mavrodiev <stefan@olimex.com>
S: Maintained
@@ -7680,7 +7787,13 @@ M: David Lechner <david@lechnology.com>
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/sitronix,st7586.txt
-F: drivers/gpu/drm/tiny/st7586.c
+F: drivers/gpu/drm/sitronix/st7586.c
+
+DRM DRIVER FOR SITRONIX ST7571 PANELS
+M: Marcus Folkesson <marcus.folkesson@gmail.com>
+S: Maintained
+F: Documentation/devicetree/bindings/display/sitronix,st7571.yaml
+F: drivers/gpu/drm/sitronix/st7571-i2c.c
DRM DRIVER FOR SITRONIX ST7701 PANELS
M: Jagan Teki <jagan@amarulasolutions.com>
@@ -7701,7 +7814,7 @@ M: David Lechner <david@lechnology.com>
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
-F: drivers/gpu/drm/tiny/st7735r.c
+F: drivers/gpu/drm/sitronix/st7735r.c
DRM DRIVER FOR SOLOMON SSD130X OLED DISPLAYS
M: Javier Martinez Canillas <javierm@redhat.com>
@@ -7798,6 +7911,7 @@ F: Documentation/devicetree/bindings/display/
F: Documentation/devicetree/bindings/gpu/
F: Documentation/gpu/
F: drivers/gpu/
+F: rust/kernel/drm/
F: include/drm/
F: include/linux/vga*
F: include/uapi/drm/
@@ -7814,6 +7928,7 @@ F: Documentation/devicetree/bindings/gpu/
F: Documentation/gpu/
F: drivers/gpu/drm/
F: drivers/gpu/vga/
+F: rust/kernel/drm/
F: include/drm/drm
F: include/linux/vga*
F: include/uapi/drm/
@@ -7853,8 +7968,8 @@ F: drivers/gpu/drm/ci/xfails/meson*
F: drivers/gpu/drm/meson/
DRM DRIVERS FOR ATMEL HLCDC
-M: Sam Ravnborg <sam@ravnborg.org>
-M: Boris Brezillon <bbrezillon@kernel.org>
+M: Manikandan Muralidharan <manikandan.m@microchip.com>
+M: Dharma Balasubiramani <dharma.b@microchip.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
@@ -7895,7 +8010,7 @@ L: dri-devel@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/fsl,dcu.txt
-F: Documentation/devicetree/bindings/display/fsl,tcon.txt
+F: Documentation/devicetree/bindings/display/fsl,vf610-tcon.yaml
F: drivers/gpu/drm/fsl-dcu/
DRM DRIVERS FOR FREESCALE IMX 5/6
@@ -8182,7 +8297,8 @@ F: drivers/gpu/drm/ttm/
F: include/drm/ttm/
DRM AUTOMATED TESTING
-M: Helen Koike <helen.koike@collabora.com>
+M: Helen Koike <helen.fornazier@gmail.com>
+M: Vignesh Raman <vignesh.raman@collabora.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
@@ -8536,7 +8652,7 @@ S: Maintained
F: drivers/edac/pnd2_edac.[ch]
EDAC-QCOM
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: linux-arm-msm@vger.kernel.org
L: linux-edac@vger.kernel.org
S: Maintained
@@ -8717,6 +8833,7 @@ M: Chao Yu <chao@kernel.org>
R: Yue Hu <zbestahu@gmail.com>
R: Jeffle Xu <jefflexu@linux.alibaba.com>
R: Sandeep Dhavale <dhavale@google.com>
+R: Hongbo Li <lihongbo22@huawei.com>
L: linux-erofs@lists.ozlabs.org
S: Maintained
W: https://erofs.docs.kernel.org
@@ -8829,6 +8946,8 @@ F: include/linux/elf.h
F: include/uapi/linux/auxvec.h
F: include/uapi/linux/binfmts.h
F: include/uapi/linux/elf.h
+F: kernel/fork.c
+F: mm/vma_exec.c
F: tools/testing/selftests/exec/
N: asm/elf.h
N: binfmt
@@ -9203,7 +9322,7 @@ FLEXTIMER FTM-QUADDEC DRIVER
M: Patrick Havelange <patrick.havelange@essensium.com>
L: linux-iio@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/counter/ftm-quaddec.txt
+F: Documentation/devicetree/bindings/counter/fsl,ftm-quaddec.yaml
F: drivers/counter/ftm-quaddec.c
FLOPPY DRIVER
@@ -9376,6 +9495,7 @@ F: Documentation/devicetree/bindings/net/nxp,netc-blk-ctrl.yaml
F: drivers/net/ethernet/freescale/enetc/
F: include/linux/fsl/enetc_mdio.h
F: include/linux/fsl/netc_global.h
+F: include/linux/fsl/ntmp.h
FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
M: Claudiu Manoil <claudiu.manoil@nxp.com>
@@ -9726,7 +9846,7 @@ L: linux-fsdevel@vger.kernel.org
S: Maintained
W: https://github.com/libfuse/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git
-F: Documentation/filesystems/fuse.rst
+F: Documentation/filesystems/fuse*
F: fs/fuse/
F: include/uapi/linux/fuse.h
@@ -10080,6 +10200,7 @@ L: linux-samsung-soc@vger.kernel.org
S: Maintained
C: irc://irc.oftc.net/pixel6-kernel-dev
F: Documentation/devicetree/bindings/clock/google,gs101-clock.yaml
+F: Documentation/devicetree/bindings/soc/google/google,gs101-pmu-intr-gen.yaml
F: arch/arm64/boot/dts/exynos/google/
F: drivers/clk/samsung/clk-gs101.c
F: drivers/phy/samsung/phy-gs101-ufs.c
@@ -10105,7 +10226,7 @@ L: linux-acpi@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
F: Documentation/firmware-guide/acpi/gpio-properties.rst
-F: drivers/gpio/gpiolib-acpi.c
+F: drivers/gpio/gpiolib-acpi-*.c
F: drivers/gpio/gpiolib-acpi.h
GPIO AGGREGATOR
@@ -10136,6 +10257,13 @@ F: drivers/gpio/gpio-regmap.c
F: include/linux/gpio/regmap.h
K: (devm_)?gpio_regmap_(un)?register
+GPIO SLOPPY LOGIC ANALYZER
+M: Wolfram Sang <wsa+renesas@sang-engineering.com>
+S: Supported
+F: Documentation/dev-tools/gpio-sloppy-logic-analyzer.rst
+F: drivers/gpio/gpio-sloppy-logic-analyzer.c
+F: tools/gpio/gpio-sloppy-logic-analyzer.sh
+
GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
M: Bartosz Golaszewski <brgl@bgdev.pl>
@@ -10151,6 +10279,8 @@ F: include/linux/gpio.h
F: include/linux/gpio/
F: include/linux/of_gpio.h
K: (devm_)?gpio_(request|free|direction|get|set)
+K: GPIOD_FLAGS_BIT_NONEXCLUSIVE
+K: devm_gpiod_unhinge
GPIO UAPI
M: Bartosz Golaszewski <brgl@bgdev.pl>
@@ -10453,14 +10583,20 @@ S: Supported
F: drivers/infiniband/hw/hfi1
HFS FILESYSTEM
+M: Viacheslav Dubeyko <slava@dubeyko.com>
+M: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+M: Yangtao Li <frank.li@vivo.com>
L: linux-fsdevel@vger.kernel.org
-S: Orphan
+S: Maintained
F: Documentation/filesystems/hfs.rst
F: fs/hfs/
HFSPLUS FILESYSTEM
+M: Viacheslav Dubeyko <slava@dubeyko.com>
+M: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+M: Yangtao Li <frank.li@vivo.com>
L: linux-fsdevel@vger.kernel.org
-S: Orphan
+S: Maintained
F: Documentation/filesystems/hfsplus.rst
F: fs/hfsplus/
@@ -10583,20 +10719,23 @@ F: kernel/time/timer_list.c
F: kernel/time/timer_migration.*
F: tools/testing/selftests/timers/
-HIGH-RESOLUTION TIMERS [RUST]
+DELAY, SLEEP, TIMEKEEPING, TIMERS [RUST]
M: Andreas Hindborg <a.hindborg@kernel.org>
R: Boqun Feng <boqun.feng@gmail.com>
+R: FUJITA Tomonori <fujita.tomonori@gmail.com>
R: Frederic Weisbecker <frederic@kernel.org>
R: Lyude Paul <lyude@redhat.com>
R: Thomas Gleixner <tglx@linutronix.de>
R: Anna-Maria Behnsen <anna-maria@linutronix.de>
+R: John Stultz <jstultz@google.com>
+R: Stephen Boyd <sboyd@kernel.org>
L: rust-for-linux@vger.kernel.org
S: Supported
W: https://rust-for-linux.com
B: https://github.com/Rust-for-Linux/linux/issues
-T: git https://github.com/Rust-for-Linux/linux.git hrtimer-next
-F: rust/kernel/time/hrtimer.rs
-F: rust/kernel/time/hrtimer/
+T: git https://github.com/Rust-for-Linux/linux.git timekeeping-next
+F: rust/kernel/time.rs
+F: rust/kernel/time/
HIGH-SPEED SCC DRIVER FOR AX.25
L: linux-hams@vger.kernel.org
@@ -10724,6 +10863,7 @@ W: http://www.hisilicon.com
F: Documentation/admin-guide/perf/hisi-pcie-pmu.rst
F: Documentation/admin-guide/perf/hisi-pmu.rst
F: drivers/perf/hisilicon
+F: tools/perf/pmu-events/arch/arm64/hisilicon/
HISILICON PTT DRIVER
M: Yicong Yang <yangyicong@hisilicon.com>
@@ -10945,15 +11085,24 @@ S: Maintained
F: Documentation/networking/device_drivers/ethernet/huawei/hinic.rst
F: drivers/net/ethernet/huawei/hinic/
+HUAWEI 3RD GEN ETHERNET DRIVER
+M: Fan Gong <gongfan1@huawei.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: Documentation/networking/device_drivers/ethernet/huawei/hinic3.rst
+F: drivers/net/ethernet/huawei/hinic3/
+
HUAWEI MATEBOOK E GO EMBEDDED CONTROLLER DRIVER
M: Pengyu Luo <mitltlatltl@gmail.com>
S: Maintained
F: Documentation/devicetree/bindings/platform/huawei,gaokun-ec.yaml
F: drivers/platform/arm64/huawei-gaokun-ec.c
+F: drivers/power/supply/huawei-gaokun-battery.c
F: include/linux/platform_data/huawei-gaokun-ec.h
HUGETLB SUBSYSTEM
M: Muchun Song <muchun.song@linux.dev>
+R: Oscar Salvador <osalvador@suse.de>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
@@ -11079,6 +11228,14 @@ L: linuxppc-dev@lists.ozlabs.org
S: Odd Fixes
F: drivers/tty/hvc/
+HUNG TASK DETECTOR
+M: Andrew Morton <akpm@linux-foundation.org>
+R: Lance Yang <lance.yang@linux.dev>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: include/linux/hung_task.h
+F: kernel/hung_task.c
+
I2C ACPI SUPPORT
M: Mika Westerberg <westeri@kernel.org>
L: linux-i2c@vger.kernel.org
@@ -11217,7 +11374,6 @@ S: Maintained
F: drivers/i2c/busses/i2c-cht-wc.c
I2C/SMBUS ISMT DRIVER
-M: Seth Heasley <seth.heasley@intel.com>
M: Neil Horman <nhorman@tuxdriver.com>
L: linux-i2c@vger.kernel.org
F: Documentation/i2c/busses/i2c-ismt.rst
@@ -11890,7 +12046,7 @@ F: Documentation/networking/device_drivers/ethernet/intel/
F: drivers/net/ethernet/intel/
F: drivers/net/ethernet/intel/*/
F: include/linux/avf/virtchnl.h
-F: include/linux/net/intel/iidc.h
+F: include/linux/net/intel/*/
INTEL ETHERNET PROTOCOL DRIVER FOR RDMA
M: Mustafa Ismail <mustafa.ismail@intel.com>
@@ -11917,13 +12073,10 @@ F: drivers/gpio/gpio-tangier.c
F: drivers/gpio/gpio-tangier.h
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
-M: Zhenyu Wang <zhenyuw.linux@gmail.com>
-M: Zhi Wang <zhi.wang.linux@gmail.com>
-L: intel-gvt-dev@lists.freedesktop.org
-L: intel-gfx@lists.freedesktop.org
-S: Supported
+R: Zhenyu Wang <zhenyuw.linux@gmail.com>
+R: Zhi Wang <zhi.wang.linux@gmail.com>
+S: Odd Fixes
W: https://github.com/intel/gvt-linux/wiki
-T: git https://github.com/intel/gvt-linux.git
F: drivers/gpu/drm/i915/gvt/
INTEL HID EVENT DRIVER
@@ -12120,7 +12273,7 @@ M: Andy Shevchenko <andy@kernel.org>
L: linux-kernel@vger.kernel.org
S: Supported
F: arch/x86/include/asm/intel-mid.h
-F: arch/x86/pci/intel_mid_pci.c
+F: arch/x86/pci/intel_mid.c
F: arch/x86/platform/intel-mid/
F: drivers/dma/hsu/
F: drivers/extcon/extcon-intel-mrfld.c
@@ -12232,6 +12385,7 @@ INTEL SKYLAKE INT3472 ACPI DEVICE DRIVER
M: Daniel Scally <djrscally@gmail.com>
S: Maintained
F: drivers/platform/x86/intel/int3472/
+F: include/linux/platform_data/x86/int3472.h
INTEL SPEED SELECT TECHNOLOGY
M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
@@ -12810,6 +12964,7 @@ F: lib/Kconfig.kcsan
F: scripts/Makefile.kcsan
KDUMP
+M: Andrew Morton <akpm@linux-foundation.org>
M: Baoquan He <bhe@redhat.com>
R: Vivek Goyal <vgoyal@redhat.com>
R: Dave Young <dyoung@redhat.com>
@@ -12873,6 +13028,7 @@ F: include/linux/overflow.h
F: include/linux/randomize_kstack.h
F: include/linux/ucopysize.h
F: kernel/configs/hardening.config
+F: lib/tests/randstruct_kunit.c
F: lib/tests/usercopy_kunit.c
F: mm/usercopy.c
F: security/Kconfig.hardening
@@ -12888,7 +13044,7 @@ W: http://kernelnewbies.org/KernelJanitors
KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
M: Chuck Lever <chuck.lever@oracle.com>
M: Jeff Layton <jlayton@kernel.org>
-R: Neil Brown <neilb@suse.de>
+R: NeilBrown <neil@brown.name>
R: Olga Kornievskaia <okorniev@redhat.com>
R: Dai Ngo <Dai.Ngo@oracle.com>
R: Tom Talpey <tom@talpey.com>
@@ -13024,6 +13180,8 @@ F: Documentation/virt/kvm/loongarch/
F: arch/loongarch/include/asm/kvm*
F: arch/loongarch/include/uapi/asm/kvm*
F: arch/loongarch/kvm/
+F: tools/testing/selftests/kvm/*/loongarch/
+F: tools/testing/selftests/kvm/lib/loongarch/
KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
M: Huacai Chen <chenhuacai@kernel.org>
@@ -13073,12 +13231,14 @@ S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
F: Documentation/virt/kvm/s390*
F: arch/s390/include/asm/gmap.h
+F: arch/s390/include/asm/gmap_helpers.h
F: arch/s390/include/asm/kvm*
F: arch/s390/include/uapi/asm/kvm*
F: arch/s390/include/uapi/asm/uvdevice.h
F: arch/s390/kernel/uv.c
F: arch/s390/kvm/
F: arch/s390/mm/gmap.c
+F: arch/s390/mm/gmap_helpers.c
F: drivers/s390/char/uvdevice.c
F: tools/testing/selftests/drivers/s390x/uvdevice/
F: tools/testing/selftests/kvm/*/s390/
@@ -13106,17 +13266,30 @@ KERNFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Tejun Heo <tj@kernel.org>
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core.git
F: fs/kernfs/
F: include/linux/kernfs.h
KEXEC
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Baoquan He <bhe@redhat.com>
L: kexec@lists.infradead.org
W: http://kernel.org/pub/linux/utils/kernel/kexec/
F: include/linux/kexec.h
F: include/uapi/linux/kexec.h
F: kernel/kexec*
+KEXEC HANDOVER (KHO)
+M: Alexander Graf <graf@amazon.com>
+M: Mike Rapoport <rppt@kernel.org>
+M: Changyuan Lyu <changyuanl@google.com>
+L: kexec@lists.infradead.org
+S: Maintained
+F: Documentation/admin-guide/mm/kho.rst
+F: Documentation/core-api/kho/*
+F: include/linux/kexec_handover.h
+F: kernel/kexec_handover.c
+
KEYS-ENCRYPTED
M: Mimi Zohar <zohar@linux.ibm.com>
L: linux-integrity@vger.kernel.org
@@ -13624,7 +13797,6 @@ M: Madhavan Srinivasan <maddy@linux.ibm.com>
M: Michael Ellerman <mpe@ellerman.id.au>
R: Nicholas Piggin <npiggin@gmail.com>
R: Christophe Leroy <christophe.leroy@csgroup.eu>
-R: Naveen N Rao <naveen@kernel.org>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
W: https://github.com/linuxppc/wiki/wiki
@@ -13705,7 +13877,7 @@ M: Luc Maranget <luc.maranget@inria.fr>
M: "Paul E. McKenney" <paulmck@kernel.org>
R: Akira Yokosawa <akiyks@gmail.com>
R: Daniel Lustig <dlustig@nvidia.com>
-R: Joel Fernandes <joel@joelfernandes.org>
+R: Joel Fernandes <joelagnelf@nvidia.com>
L: linux-kernel@vger.kernel.org
L: linux-arch@vger.kernel.org
L: lkmm@lists.linux.dev
@@ -13910,6 +14082,13 @@ S: Maintained
F: Documentation/devicetree/bindings/i2c/loongson,ls2x-i2c.yaml
F: drivers/i2c/busses/i2c-ls2x.c
+LOONGSON PWM DRIVER
+M: Binbin Zhou <zhoubinbin@loongson.cn>
+L: linux-pwm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/pwm/loongson,ls7a-pwm.yaml
+F: drivers/pwm/pwm-loongson.c
+
LOONGSON-2 SOC SERIES CLOCK DRIVER
M: Yinbo Zhu <zhuyinbo@loongson.cn>
L: linux-clk@vger.kernel.org
@@ -13981,6 +14160,15 @@ L: linux-scsi@vger.kernel.org
S: Maintained
F: drivers/scsi/sym53c8xx_2/
+LT3074 HARDWARE MONITOR DRIVER
+M: Cedric Encarnacion <cedricjustine.encarnacion@analog.com>
+L: linux-hwmon@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/hwmon/pmbus/adi,lt3074.yaml
+F: Documentation/hwmon/lt3074.rst
+F: drivers/hwmon/pmbus/lt3074.c
+
LTC1660 DAC DRIVER
M: Marcus Folkesson <marcus.folkesson@gmail.com>
L: linux-iio@vger.kernel.org
@@ -14267,9 +14455,8 @@ F: drivers/gpu/drm/armada/
F: include/uapi/drm/armada_drm.h
MARVELL CRYPTO DRIVER
-M: Boris Brezillon <bbrezillon@kernel.org>
-M: Arnaud Ebalard <arno@natisbad.org>
M: Srujana Challa <schalla@marvell.com>
+M: Bharat Bhushan <bbhushan2@marvell.com>
L: linux-crypto@vger.kernel.org
S: Maintained
F: drivers/crypto/marvell/
@@ -14283,6 +14470,7 @@ S: Odd fixes
F: drivers/net/ethernet/marvell/sk*
MARVELL LIBERTAS WIRELESS DRIVER
+L: linux-wireless@vger.kernel.org
L: libertas-dev@lists.infradead.org
S: Orphan
F: drivers/net/wireless/marvell/libertas/
@@ -14448,7 +14636,7 @@ M: Ramesh Shanmugasundaram <rashanmu@gmail.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media.git
-F: Documentation/devicetree/bindings/media/i2c/max2175.txt
+F: Documentation/devicetree/bindings/media/i2c/maxim,max2175.txt
F: Documentation/userspace-api/media/drivers/max2175.rst
F: drivers/media/i2c/max2175*
F: include/uapi/linux/max2175.h
@@ -14575,6 +14763,16 @@ F: Documentation/devicetree/bindings/mfd/maxim,max77714.yaml
F: drivers/mfd/max77714.c
F: include/linux/mfd/max77714.h
+MAXIM MAX77759 PMIC MFD DRIVER
+M: André Draszik <andre.draszik@linaro.org>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/*/maxim,max77759*.yaml
+F: drivers/gpio/gpio-max77759.c
+F: drivers/mfd/max77759.c
+F: drivers/nvmem/max77759-nvmem.c
+F: include/linux/mfd/max77759.h
+
MAXIM MAX77802 PMIC REGULATOR DEVICE DRIVER
M: Javier Martinez Canillas <javier@dowhile0.org>
L: linux-kernel@vger.kernel.org
@@ -14610,7 +14808,6 @@ F: Documentation/devicetree/bindings/*/maxim,max77686.yaml
F: Documentation/devicetree/bindings/*/maxim,max77693.yaml
F: Documentation/devicetree/bindings/*/maxim,max77705*.yaml
F: Documentation/devicetree/bindings/*/maxim,max77843.yaml
-F: Documentation/devicetree/bindings/clock/maxim,max77686.txt
F: drivers/leds/leds-max77705.c
F: drivers/*/*max77843.c
F: drivers/*/max14577*.c
@@ -14638,6 +14835,7 @@ MAXLINEAR ETHERNET PHY DRIVER
M: Xu Liang <lxu@maxlinear.com>
L: netdev@vger.kernel.org
S: Supported
+F: drivers/net/phy/mxl-86110.c
F: drivers/net/phy/mxl-gpy.c
MCAN MMIO DEVICE DRIVER
@@ -14664,7 +14862,7 @@ F: drivers/hid/hid-mcp2221.c
MCP251XFD SPI-CAN NETWORK DRIVER
M: Marc Kleine-Budde <mkl@pengutronix.de>
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
R: Thomas Kopp <thomas.kopp@microchip.com>
L: linux-can@vger.kernel.org
S: Maintained
@@ -14775,7 +14973,7 @@ L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media.git
F: Documentation/admin-guide/media/imx.rst
-F: Documentation/devicetree/bindings/media/imx.txt
+F: Documentation/devicetree/bindings/media/fsl,imx6-mipi-csi2.yaml
F: drivers/staging/media/imx/
F: include/linux/imx-media.h
F: include/media/imx.h
@@ -14896,7 +15094,7 @@ F: Documentation/devicetree/bindings/media/renesas,csi2.yaml
F: Documentation/devicetree/bindings/media/renesas,isp.yaml
F: Documentation/devicetree/bindings/media/renesas,vin.yaml
F: drivers/media/platform/renesas/rcar-csi2.c
-F: drivers/media/platform/renesas/rcar-isp.c
+F: drivers/media/platform/renesas/rcar-isp/
F: drivers/media/platform/renesas/rcar-vin/
MEDIA DRIVERS FOR RENESAS - VSP1
@@ -15019,6 +15217,7 @@ M: Qingfang Deng <dqfext@gmail.com>
M: SkyLake Huang <SkyLake.Huang@mediatek.com>
L: netdev@vger.kernel.org
S: Maintained
+F: drivers/net/phy/mediatek/mtk-2p5ge.c
F: drivers/net/phy/mediatek/mtk-ge-soc.c
F: drivers/net/phy/mediatek/mtk-phy-lib.c
F: drivers/net/phy/mediatek/mtk-ge.c
@@ -15049,7 +15248,7 @@ F: Documentation/devicetree/bindings/media/mediatek-jpeg-*.yaml
F: drivers/media/platform/mediatek/jpeg/
MEDIATEK KEYPAD DRIVER
-M: Mattijs Korpershoek <mkorpershoek@baylibre.com>
+M: Mattijs Korpershoek <mkorpershoek@kernel.org>
S: Supported
F: Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
F: drivers/input/keyboard/mt6779-keypad.c
@@ -15414,6 +15613,7 @@ M: Mike Rapoport <rppt@kernel.org>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/core-api/boot-time-mm.rst
+F: Documentation/core-api/kho/bindings/memblock/*
F: include/linux/memblock.h
F: mm/memblock.c
F: mm/mm_init.c
@@ -15472,24 +15672,46 @@ F: Documentation/mm/
F: include/linux/gfp.h
F: include/linux/gfp_types.h
F: include/linux/memfd.h
-F: include/linux/memory.h
F: include/linux/memory_hotplug.h
F: include/linux/memory-tiers.h
F: include/linux/mempolicy.h
F: include/linux/mempool.h
F: include/linux/memremap.h
-F: include/linux/mm.h
-F: include/linux/mm_*.h
F: include/linux/mmzone.h
F: include/linux/mmu_notifier.h
F: include/linux/pagewalk.h
-F: include/linux/rmap.h
F: include/trace/events/ksm.h
F: mm/
F: tools/mm/
F: tools/testing/selftests/mm/
N: include/linux/page[-_]*
+MEMORY MANAGEMENT - CORE
+M: Andrew Morton <akpm@linux-foundation.org>
+M: David Hildenbrand <david@redhat.com>
+R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+R: Liam R. Howlett <Liam.Howlett@oracle.com>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Mike Rapoport <rppt@kernel.org>
+R: Suren Baghdasaryan <surenb@google.com>
+R: Michal Hocko <mhocko@suse.com>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: include/linux/memory.h
+F: include/linux/mm.h
+F: include/linux/mm_*.h
+F: include/linux/mmdebug.h
+F: include/linux/pagewalk.h
+F: kernel/fork.c
+F: mm/Kconfig
+F: mm/debug.c
+F: mm/init-mm.c
+F: mm/memory.c
+F: mm/pagewalk.c
+F: mm/util.c
+
MEMORY MANAGEMENT - EXECMEM
M: Andrew Morton <akpm@linux-foundation.org>
M: Mike Rapoport <rppt@kernel.org>
@@ -15498,6 +15720,53 @@ S: Maintained
F: include/linux/execmem.h
F: mm/execmem.c
+MEMORY MANAGEMENT - GUP (GET USER PAGES)
+M: Andrew Morton <akpm@linux-foundation.org>
+M: David Hildenbrand <david@redhat.com>
+R: Jason Gunthorpe <jgg@nvidia.com>
+R: John Hubbard <jhubbard@nvidia.com>
+R: Peter Xu <peterx@redhat.com>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: mm/gup.c
+
+MEMORY MANAGEMENT - KSM (Kernel Samepage Merging)
+M: Andrew Morton <akpm@linux-foundation.org>
+M: David Hildenbrand <david@redhat.com>
+R: Xu Xin <xu.xin16@zte.com.cn>
+R: Chengming Zhou <chengming.zhou@linux.dev>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: Documentation/admin-guide/mm/ksm.rst
+F: Documentation/mm/ksm.rst
+F: include/linux/ksm.h
+F: include/trace/events/ksm.h
+F: mm/ksm.c
+
+MEMORY MANAGEMENT - MEMORY POLICY AND MIGRATION
+M: Andrew Morton <akpm@linux-foundation.org>
+M: David Hildenbrand <david@redhat.com>
+R: Zi Yan <ziy@nvidia.com>
+R: Matthew Brost <matthew.brost@intel.com>
+R: Joshua Hahn <joshua.hahnjy@gmail.com>
+R: Rakie Kim <rakie.kim@sk.com>
+R: Byungchul Park <byungchul@sk.com>
+R: Gregory Price <gourry@gourry.net>
+R: Ying Huang <ying.huang@linux.alibaba.com>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: include/linux/mempolicy.h
+F: include/linux/migrate.h
+F: mm/mempolicy.c
+F: mm/migrate.c
+F: mm/migrate_device.c
+
MEMORY MANAGEMENT - NUMA MEMBLOCKS AND NUMA EMULATION
M: Andrew Morton <akpm@linux-foundation.org>
M: Mike Rapoport <rppt@kernel.org>
@@ -15508,6 +15777,49 @@ F: mm/numa.c
F: mm/numa_emulation.c
F: mm/numa_memblks.c
+MEMORY MANAGEMENT - PAGE ALLOCATOR
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Vlastimil Babka <vbabka@suse.cz>
+R: Suren Baghdasaryan <surenb@google.com>
+R: Michal Hocko <mhocko@suse.com>
+R: Brendan Jackman <jackmanb@google.com>
+R: Johannes Weiner <hannes@cmpxchg.org>
+R: Zi Yan <ziy@nvidia.com>
+L: linux-mm@kvack.org
+S: Maintained
+F: include/linux/compaction.h
+F: include/linux/gfp.h
+F: include/linux/page-isolation.h
+F: mm/compaction.c
+F: mm/page_alloc.c
+F: mm/page_isolation.c
+
+MEMORY MANAGEMENT - RECLAIM
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Johannes Weiner <hannes@cmpxchg.org>
+R: David Hildenbrand <david@redhat.com>
+R: Michal Hocko <mhocko@kernel.org>
+R: Qi Zheng <zhengqi.arch@bytedance.com>
+R: Shakeel Butt <shakeel.butt@linux.dev>
+R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+L: linux-mm@kvack.org
+S: Maintained
+F: mm/pt_reclaim.c
+F: mm/vmscan.c
+
+MEMORY MANAGEMENT - RMAP (REVERSE MAPPING)
+M: Andrew Morton <akpm@linux-foundation.org>
+M: David Hildenbrand <david@redhat.com>
+M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+R: Rik van Riel <riel@surriel.com>
+R: Liam R. Howlett <Liam.Howlett@oracle.com>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Harry Yoo <harry.yoo@oracle.com>
+L: linux-mm@kvack.org
+S: Maintained
+F: include/linux/rmap.h
+F: mm/rmap.c
+
MEMORY MANAGEMENT - SECRETMEM
M: Andrew Morton <akpm@linux-foundation.org>
M: Mike Rapoport <rppt@kernel.org>
@@ -15516,6 +15828,30 @@ S: Maintained
F: include/linux/secretmem.h
F: mm/secretmem.c
+MEMORY MANAGEMENT - THP (TRANSPARENT HUGE PAGE)
+M: Andrew Morton <akpm@linux-foundation.org>
+M: David Hildenbrand <david@redhat.com>
+R: Zi Yan <ziy@nvidia.com>
+R: Baolin Wang <baolin.wang@linux.alibaba.com>
+R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+R: Liam R. Howlett <Liam.Howlett@oracle.com>
+R: Nico Pache <npache@redhat.com>
+R: Ryan Roberts <ryan.roberts@arm.com>
+R: Dev Jain <dev.jain@arm.com>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: Documentation/admin-guide/mm/transhuge.rst
+F: include/linux/huge_mm.h
+F: include/linux/khugepaged.h
+F: include/trace/events/huge_memory.h
+F: mm/huge_memory.c
+F: mm/khugepaged.c
+F: tools/testing/selftests/mm/khugepaged.c
+F: tools/testing/selftests/mm/split_huge_page_test.c
+F: tools/testing/selftests/mm/transhuge-stress.c
+
MEMORY MANAGEMENT - USERFAULTFD
M: Andrew Morton <akpm@linux-foundation.org>
R: Peter Xu <peterx@redhat.com>
@@ -15529,16 +15865,31 @@ F: include/uapi/linux/userfaultfd.h
F: mm/userfaultfd.c
F: tools/testing/selftests/mm/uffd-*.[ch]
+MEMORY MANAGEMENT - RUST
+M: Alice Ryhl <aliceryhl@google.com>
+R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+R: Liam R. Howlett <Liam.Howlett@oracle.com>
+L: linux-mm@kvack.org
+L: rust-for-linux@vger.kernel.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: rust/helpers/mm.c
+F: rust/kernel/mm.rs
+F: rust/kernel/mm/
+
MEMORY MAPPING
M: Andrew Morton <akpm@linux-foundation.org>
M: Liam R. Howlett <Liam.Howlett@oracle.com>
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz>
R: Jann Horn <jannh@google.com>
+R: Pedro Falcato <pfalcato@suse.de>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: include/trace/events/mmap.h
F: mm/mlock.c
F: mm/mmap.c
F: mm/mprotect.c
@@ -15546,9 +15897,42 @@ F: mm/mremap.c
F: mm/mseal.c
F: mm/vma.c
F: mm/vma.h
+F: mm/vma_exec.c
+F: mm/vma_init.c
F: mm/vma_internal.h
+F: tools/testing/selftests/mm/merge.c
F: tools/testing/vma/
+MEMORY MAPPING - LOCKING
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Suren Baghdasaryan <surenb@google.com>
+M: Liam R. Howlett <Liam.Howlett@oracle.com>
+M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Shakeel Butt <shakeel.butt@linux.dev>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: Documentation/mm/process_addrs.rst
+F: include/linux/mmap_lock.h
+F: include/trace/events/mmap_lock.h
+F: mm/mmap_lock.c
+
+MEMORY MAPPING - MADVISE (MEMORY ADVICE)
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Liam R. Howlett <Liam.Howlett@oracle.com>
+M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+M: David Hildenbrand <david@redhat.com>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Jann Horn <jannh@google.com>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: include/uapi/asm-generic/mman-common.h
+F: mm/madvise.c
+
MEMORY TECHNOLOGY DEVICES (MTD)
M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Richard Weinberger <richard@nod.at>
@@ -15644,7 +16028,7 @@ F: arch/arm64/boot/dts/marvell/armada-3720-eDPU.dts
F: arch/arm64/boot/dts/marvell/armada-3720-uDPU.*
MHI BUS
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: mhi@lists.linux.dev
L: linux-arm-msm@vger.kernel.org
S: Maintained
@@ -16220,7 +16604,9 @@ F: Documentation/devicetree/bindings/*/loongson,ls1*.yaml
F: arch/mips/include/asm/mach-loongson32/
F: arch/mips/loongson32/
F: drivers/*/*loongson1*
+F: drivers/mtd/nand/raw/loongson1-nand-controller.c
F: drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
+F: sound/soc/loongson/loongson1_ac97.c
MIPS/LOONGSON2EF ARCHITECTURE
M: Jiaxun Yang <jiaxun.yang@flygoat.com>
@@ -16491,7 +16877,7 @@ M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media.git
-F: Documentation/devicetree/bindings/media/i2c/mt9v032.txt
+F: Documentation/devicetree/bindings/media/i2c/aptina,mt9v032.txt
F: drivers/media/i2c/mt9v032.c
F: include/media/i2c/mt9v032.h
@@ -16756,6 +17142,7 @@ F: Documentation/networking/net_cachelines/net_device.rst
F: drivers/connector/
F: drivers/net/
F: drivers/ptp/
+F: drivers/s390/net/
F: include/dt-bindings/net/
F: include/linux/cn_proc.h
F: include/linux/etherdevice.h
@@ -16765,6 +17152,7 @@ F: include/linux/fddidevice.h
F: include/linux/hippidevice.h
F: include/linux/if_*
F: include/linux/inetdevice.h
+F: include/linux/ism.h
F: include/linux/netdev*
F: include/linux/platform_data/wiznet.h
F: include/uapi/linux/cn_proc.h
@@ -16920,6 +17308,7 @@ X: net/ceph/
X: net/mac80211/
X: net/rfkill/
X: net/wireless/
+X: tools/testing/selftests/net/can/
NETWORKING [IPSEC]
M: Steffen Klassert <steffen.klassert@secunet.com>
@@ -17212,6 +17601,7 @@ M: Pavel Machek <pavel@kernel.org>
M: Sakari Ailus <sakari.ailus@iki.fi>
L: linux-media@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/adi,ad5820.yaml
F: drivers/media/i2c/ad5820.c
F: drivers/media/i2c/et8ek8
@@ -17368,7 +17758,7 @@ T: git git://git.infradead.org/nvme.git
F: drivers/nvme/target/
NVMEM FRAMEWORK
-M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M: Srinivas Kandagatla <srini@kernel.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/srini/nvmem.git
F: Documentation/ABI/stable/sysfs-bus-nvmem
@@ -17807,6 +18197,23 @@ T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml
F: drivers/media/i2c/ov02a10.c
+OMNIVISION OV02C10 SENSOR DRIVER
+M: Hans de Goede <hansg@kernel.org>
+R: Bryan O'Donoghue <bod@kernel.org>
+L: linux-media@vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media.git
+F: drivers/media/i2c/ov02c10.c
+
+OMNIVISION OV02E10 SENSOR DRIVER
+M: Bryan O'Donoghue <bod@kernel.org>
+M: Hans de Goede <hansg@kernel.org>
+L: linux-media@vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media.git
+F: Documentation/devicetree/bindings/media/i2c/ovti,ov02e10.yaml
+F: drivers/media/i2c/ov02e10.c
+
OMNIVISION OV08D10 SENSOR DRIVER
M: Jimmy Su <jimmy.su@intel.com>
L: linux-media@vger.kernel.org
@@ -17929,7 +18336,7 @@ OMNIVISION OV7670 SENSOR DRIVER
L: linux-media@vger.kernel.org
S: Orphan
T: git git://linuxtv.org/media.git
-F: Documentation/devicetree/bindings/media/i2c/ov7670.txt
+F: Documentation/devicetree/bindings/media/i2c/ovti,ov7670.txt
F: drivers/media/i2c/ov7670.c
OMNIVISION OV772x SENSOR DRIVER
@@ -17945,7 +18352,7 @@ OMNIVISION OV7740 SENSOR DRIVER
L: linux-media@vger.kernel.org
S: Orphan
T: git git://linuxtv.org/media.git
-F: Documentation/devicetree/bindings/media/i2c/ov7740.txt
+F: Documentation/devicetree/bindings/media/i2c/ovti,ov7740.txt
F: drivers/media/i2c/ov7740.c
OMNIVISION OV8856 SENSOR DRIVER
@@ -17986,7 +18393,7 @@ R: Sylwester Nawrocki <s.nawrocki@samsung.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media.git
-F: Documentation/devicetree/bindings/media/i2c/ov9650.txt
+F: Documentation/devicetree/bindings/media/i2c/ovti,ov9650.txt
F: drivers/media/i2c/ov9650.c
OMNIVISION OV9734 SENSOR DRIVER
@@ -18011,12 +18418,13 @@ S: Maintained
F: drivers/mtd/nand/onenand/
F: include/linux/mtd/onenand*.h
-ONEXPLAYER FAN DRIVER
+ONEXPLAYER PLATFORM EC DRIVER
+M: Antheas Kapenekakis <lkml@antheas.dev>
M: Derek John Clark <derekjohn.clark@gmail.com>
M: Joaquín Ignacio Aramendía <samsagax@gmail.com>
-L: linux-hwmon@vger.kernel.org
+L: platform-driver-x86@vger.kernel.org
S: Maintained
-F: drivers/hwmon/oxp-sensors.c
+F: drivers/platform/x86/oxpec.c
ONIE TLV NVMEM LAYOUT DRIVER
M: Miquel Raynal <miquel.raynal@bootlin.com>
@@ -18132,6 +18540,18 @@ F: arch/openrisc/
F: drivers/irqchip/irq-ompic.c
F: drivers/irqchip/irq-or1k-*
+OPENVPN DATA CHANNEL OFFLOAD
+M: Antonio Quartulli <antonio@openvpn.net>
+R: Sabrina Dubroca <sd@queasysnail.net>
+L: openvpn-devel@lists.sourceforge.net (subscribers-only)
+L: netdev@vger.kernel.org
+S: Supported
+T: git https://github.com/OpenVPN/ovpn-net-next.git
+F: Documentation/netlink/specs/ovpn.yaml
+F: drivers/net/ovpn/
+F: include/uapi/linux/ovpn.h
+F: tools/testing/selftests/net/ovpn/
+
OPENVSWITCH
M: Aaron Conole <aconole@redhat.com>
M: Eelco Chaudron <echaudro@redhat.com>
@@ -18156,6 +18576,7 @@ F: Documentation/devicetree/bindings/opp/
F: Documentation/power/opp.rst
F: drivers/opp/
F: include/linux/pm_opp.h
+F: rust/kernel/opp.rs
OPL4 DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
@@ -18191,6 +18612,7 @@ S: Maintained
W: https://linuxtv.org
Q: http://patchwork.linuxtv.org/project/linux-media/list/
T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
+F: Documentation/devicetree/bindings/media/i2c/ovti,ov2659.txt
F: drivers/media/i2c/ov2659.c
F: include/media/i2c/ov2659.h
@@ -18302,7 +18724,7 @@ F: include/uapi/linux/ppdev.h
PARAVIRT_OPS INTERFACE
M: Juergen Gross <jgross@suse.com>
R: Ajay Kaher <ajay.kaher@broadcom.com>
-R: Alexey Makhalov <alexey.amakhalov@broadcom.com>
+R: Alexey Makhalov <alexey.makhalov@broadcom.com>
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: virtualization@lists.linux.dev
L: x86@kernel.org
@@ -18368,6 +18790,13 @@ S: Maintained
F: Documentation/hwmon/pc87427.rst
F: drivers/hwmon/pc87427.c
+MAX77705 HARDWARE MONITORING DRIVER
+M: Dzmitry Sankouski <dsankouski@gmail.com>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: Documentation/hwmon/max77705.rst
+F: drivers/hwmon/max77705-hwmon.c
+
PCA9532 LED DRIVER
M: Riku Voipio <riku.voipio@iki.fi>
S: Maintained
@@ -18411,7 +18840,7 @@ M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/pci/pci-armada8k.txt
+F: Documentation/devicetree/bindings/pci/marvell,armada8k-pcie.yaml
F: drivers/pci/controller/dwc/pcie-armada8k.c
PCI DRIVER FOR CADENCE PCIE IP
@@ -18531,6 +18960,7 @@ M: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
L: linux-pci@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
S: Maintained
+F: Documentation/PCI/controller/rcar-pcie-firmware.rst
F: Documentation/devicetree/bindings/pci/*rcar*
F: drivers/pci/controller/*rcar*
F: drivers/pci/controller/dwc/*rcar*
@@ -18545,7 +18975,7 @@ F: drivers/pci/controller/dwc/pci-exynos.c
PCI DRIVER FOR SYNOPSYS DESIGNWARE
M: Jingoo Han <jingoohan1@gmail.com>
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml
@@ -18568,7 +18998,7 @@ PCI DRIVER FOR V3 SEMICONDUCTOR V360EPC
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-pci@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt
+F: Documentation/devicetree/bindings/pci/v3,v360epc-pci.yaml
F: drivers/pci/controller/pci-v3-semi.c
PCI DRIVER FOR XILINX VERSAL CPM
@@ -18580,8 +19010,8 @@ F: Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
F: drivers/pci/controller/pcie-xilinx-cpm.c
PCI ENDPOINT SUBSYSTEM
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-M: Krzysztof Wilczyński <kw@linux.com>
+M: Manivannan Sadhasivam <mani@kernel.org>
+M: Krzysztof Wilczyński <kwilczynski@kernel.org>
R: Kishon Vijay Abraham I <kishon@kernel.org>
L: linux-pci@vger.kernel.org
S: Supported
@@ -18619,7 +19049,7 @@ PCI MSI DRIVER FOR ALTERA MSI IP
M: Joyce Ooi <joyce.ooi@intel.com>
L: linux-pci@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/pci/altr,msi-controller.yaml
+F: Documentation/devicetree/bindings/interrupt-controller/altr,msi-controller.yaml
F: drivers/pci/controller/pcie-altera-msi.c
PCI MSI DRIVER FOR APPLIEDMICRO XGENE
@@ -18632,8 +19062,8 @@ F: drivers/pci/controller/pci-xgene-msi.c
PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
-M: Krzysztof Wilczyński <kw@linux.com>
-R: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Krzysztof Wilczyński <kwilczynski@kernel.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
R: Rob Herring <robh@kernel.org>
L: linux-pci@vger.kernel.org
S: Supported
@@ -18641,6 +19071,7 @@ Q: https://patchwork.kernel.org/project/linux-pci/list/
B: https://bugzilla.kernel.org
C: irc://irc.oftc.net/linux-pci
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
+F: Documentation/ABI/testing/debugfs-pcie-ptm
F: Documentation/devicetree/bindings/pci/
F: drivers/pci/controller/
F: drivers/pci/pci-bridge-emul.c
@@ -18686,6 +19117,16 @@ F: include/asm-generic/pci*
F: include/linux/of_pci.h
F: include/linux/pci*
F: include/uapi/linux/pci*
+
+PCI SUBSYSTEM [RUST]
+M: Danilo Krummrich <dakr@kernel.org>
+R: Bjorn Helgaas <bhelgaas@google.com>
+R: Krzysztof Wilczyński <kwilczynski@kernel.org>
+L: linux-pci@vger.kernel.org
+S: Maintained
+C: irc://irc.oftc.net/linux-pci
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
+F: rust/helpers/pci.c
F: rust/kernel/pci.rs
F: samples/rust/rust_driver_pci.rs
@@ -18779,7 +19220,7 @@ F: Documentation/devicetree/bindings/pci/microchip*
F: drivers/pci/controller/plda/*microchip*
PCIE DRIVER FOR QUALCOMM MSM
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: linux-pci@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
@@ -18815,7 +19256,7 @@ F: Documentation/devicetree/bindings/pci/starfive,jh7110-pcie.yaml
F: drivers/pci/controller/plda/pcie-starfive.c
PCIE ENDPOINT DRIVER FOR QUALCOMM
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: linux-pci@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
@@ -18878,7 +19319,7 @@ F: drivers/net/ethernet/pensando/
PER-CPU MEMORY ALLOCATOR
M: Dennis Zhou <dennis@kernel.org>
M: Tejun Heo <tj@kernel.org>
-M: Christoph Lameter <cl@linux.com>
+M: Christoph Lameter <cl@gentwo.org>
L: linux-mm@kvack.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
@@ -19205,6 +19646,12 @@ S: Maintained
F: drivers/pnp/
F: include/linux/pnp.h
+PORTWELL EC DRIVER
+M: Yen-Chi Huang <jesse.huang@portwell.com.tw>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/portwell-ec.c
+
POSIX CLOCKS and TIMERS
M: Anna-Maria Behnsen <anna-maria@linutronix.de>
M: Frederic Weisbecker <frederic@kernel.org>
@@ -19250,6 +19697,7 @@ M: Mark Rutland <mark.rutland@arm.com>
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/arm/psci.yaml
F: drivers/firmware/psci/
F: include/linux/psci.h
F: include/uapi/linux/psci.h
@@ -19573,7 +20021,7 @@ S: Supported
F: drivers/crypto/intel/qat/
QCOM AUDIO (ASoC) DRIVERS
-M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M: Srinivas Kandagatla <srini@kernel.org>
L: linux-sound@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Supported
@@ -19746,6 +20194,7 @@ F: drivers/media/tuners/qt1010*
QUALCOMM ATH12K WIRELESS DRIVER
M: Jeff Johnson <jjohnson@kernel.org>
+L: linux-wireless@vger.kernel.org
L: ath12k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath12k
@@ -19755,6 +20204,7 @@ N: ath12k
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
M: Jeff Johnson <jjohnson@kernel.org>
+L: linux-wireless@vger.kernel.org
L: ath10k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
@@ -19764,6 +20214,7 @@ N: ath10k
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
M: Jeff Johnson <jjohnson@kernel.org>
+L: linux-wireless@vger.kernel.org
L: ath11k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath11k
@@ -19873,7 +20324,7 @@ F: Documentation/devicetree/bindings/net/qcom,ethqos.yaml
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
QUALCOMM FASTRPC DRIVER
-M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M: Srinivas Kandagatla <srini@kernel.org>
M: Amol Maheshwari <amahesh@qti.qualcomm.com>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
@@ -19933,7 +20384,7 @@ F: drivers/iommu/arm/arm-smmu/arm-smmu-qcom*
F: drivers/iommu/msm_iommu*
QUALCOMM IPC ROUTER (QRTR) DRIVER
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: include/trace/events/qrtr.h
@@ -19941,7 +20392,7 @@ F: include/uapi/linux/qrtr.h
F: net/qrtr/
QUALCOMM IPCC MAILBOX DRIVER
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: linux-arm-msm@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
@@ -19968,6 +20419,7 @@ QUALCOMM IRIS VIDEO ACCELERATOR DRIVER
M: Vikash Garodia <quic_vgarodia@quicinc.com>
M: Dikshita Agarwal <quic_dikshita@quicinc.com>
R: Abhinav Kumar <quic_abhinavk@quicinc.com>
+R: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
L: linux-media@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
@@ -19975,7 +20427,7 @@ F: Documentation/devicetree/bindings/media/qcom,*-iris.yaml
F: drivers/media/platform/qcom/iris/
QUALCOMM NAND CONTROLLER DRIVER
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: linux-mtd@lists.infradead.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
@@ -20029,8 +20481,8 @@ F: Documentation/devicetree/bindings/usb/qcom,pmic-*.yaml
F: drivers/usb/typec/tcpm/qcom/
QUALCOMM VENUS VIDEO ACCELERATOR DRIVER
-M: Stanimir Varbanov <stanimir.k.varbanov@gmail.com>
M: Vikash Garodia <quic_vgarodia@quicinc.com>
+M: Dikshita Agarwal <quic_dikshita@quicinc.com>
R: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
L: linux-media@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
@@ -20279,11 +20731,14 @@ F: tools/testing/selftests/net/rds/
RDT - RESOURCE ALLOCATION
M: Tony Luck <tony.luck@intel.com>
M: Reinette Chatre <reinette.chatre@intel.com>
+R: Dave Martin <Dave.Martin@arm.com>
+R: James Morse <james.morse@arm.com>
L: linux-kernel@vger.kernel.org
S: Supported
-F: Documentation/arch/x86/resctrl*
+F: Documentation/filesystems/resctrl.rst
F: arch/x86/include/asm/resctrl.h
F: arch/x86/kernel/cpu/resctrl/
+F: fs/resctrl/
F: include/linux/resctrl*.h
F: tools/testing/selftests/resctrl/
@@ -20291,14 +20746,14 @@ READ-COPY UPDATE (RCU)
M: "Paul E. McKenney" <paulmck@kernel.org>
M: Frederic Weisbecker <frederic@kernel.org> (kernel/rcu/tree_nocb.h)
M: Neeraj Upadhyay <neeraj.upadhyay@kernel.org> (kernel/rcu/tasks.h)
-M: Joel Fernandes <joel@joelfernandes.org>
+M: Joel Fernandes <joelagnelf@nvidia.com>
M: Josh Triplett <josh@joshtriplett.org>
M: Boqun Feng <boqun.feng@gmail.com>
M: Uladzislau Rezki <urezki@gmail.com>
R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
R: Lai Jiangshan <jiangshanlai@gmail.com>
-R: Zqiang <qiang.zhang1211@gmail.com>
+R: Zqiang <qiang.zhang@linux.dev>
L: rcu@vger.kernel.org
S: Supported
W: http://www.rdrop.com/users/paulmck/RCU/
@@ -20451,8 +20906,8 @@ F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
F: drivers/i2c/busses/i2c-emev2.c
RENESAS ETHERNET AVB DRIVER
-M: Paul Barker <paul.barker.ct@bp.renesas.com>
M: Niklas Söderlund <niklas.soderlund@ragnatech.se>
+R: Paul Barker <paul@pbarker.dev>
L: netdev@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
S: Maintained
@@ -20613,6 +21068,22 @@ S: Maintained
F: Documentation/devicetree/bindings/usb/renesas,rzn1-usbf.yaml
F: drivers/usb/gadget/udc/renesas_usbf.c
+RENESAS RZ/V2H(P) DWMAC GBETH GLUE LAYER DRIVER
+M: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+L: netdev@vger.kernel.org
+L: linux-renesas-soc@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml
+F: drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
+
+RENESAS RZ/V2H(P) USB2PHY PORT RESET DRIVER
+M: Fabrizio Castro <fabrizio.castro.jz@renesas.com>
+M: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+L: linux-renesas-soc@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/reset/renesas,rzv2h-usb2phy-reset.yaml
+F: drivers/reset/reset-rzv2h-usb2phy.c
+
RENESAS RZ/V2M I2C DRIVER
M: Fabrizio Castro <fabrizio.castro.jz@renesas.com>
L: linux-i2c@vger.kernel.org
@@ -20825,6 +21296,7 @@ F: Documentation/devicetree/bindings/firmware/thead,th1520-aon.yaml
F: Documentation/devicetree/bindings/mailbox/thead,th1520-mbox.yaml
F: Documentation/devicetree/bindings/net/thead,th1520-gmac.yaml
F: Documentation/devicetree/bindings/pinctrl/thead,th1520-pinctrl.yaml
+F: Documentation/devicetree/bindings/reset/thead,th1520-reset.yaml
F: arch/riscv/boot/dts/thead/
F: drivers/clk/thead/clk-th1520-ap.c
F: drivers/firmware/thead,th1520-aon.c
@@ -20832,8 +21304,10 @@ F: drivers/mailbox/mailbox-th1520.c
F: drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
F: drivers/pinctrl/pinctrl-th1520.c
F: drivers/pmdomain/thead/
+F: drivers/reset/reset-th1520.c
F: include/dt-bindings/clock/thead,th1520-clk-ap.h
F: include/dt-bindings/power/thead,th1520-power.h
+F: include/dt-bindings/reset/thead,th1520-reset.h
F: include/linux/firmware/thead/thead,th1520-aon.h
RNBD BLOCK DRIVERS
@@ -20910,6 +21384,13 @@ F: Documentation/devicetree/bindings/sound/rockchip,rk3308-codec.yaml
F: sound/soc/codecs/rk3308_codec.c
F: sound/soc/codecs/rk3308_codec.h
+ROCKCHIP SAI DRIVER
+M: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+L: linux-rockchip@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/sound/rockchip,rk3576-sai.yaml
+F: sound/soc/rockchip/rockchip_sai.*
+
ROCKCHIP VIDEO DECODER DRIVER
M: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
L: linux-media@vger.kernel.org
@@ -21000,6 +21481,7 @@ F: include/linux/mfd/rohm-bd71828.h
F: include/linux/mfd/rohm-bd718x7.h
F: include/linux/mfd/rohm-bd957x.h
F: include/linux/mfd/rohm-bd96801.h
+F: include/linux/mfd/rohm-bd96802.h
F: include/linux/mfd/rohm-generic.h
F: include/linux/mfd/rohm-shared.h
@@ -21109,7 +21591,7 @@ M: Alex Gaynor <alex.gaynor@gmail.com>
R: Boqun Feng <boqun.feng@gmail.com>
R: Gary Guo <gary@garyguo.net>
R: Björn Roy Baron <bjorn3_gh@protonmail.com>
-R: Benno Lossin <benno.lossin@proton.me>
+R: Benno Lossin <lossin@kernel.org>
R: Andreas Hindborg <a.hindborg@kernel.org>
R: Alice Ryhl <aliceryhl@google.com>
R: Trevor Gross <tmgross@umich.edu>
@@ -21139,7 +21621,7 @@ F: rust/kernel/alloc.rs
F: rust/kernel/alloc/
RUST [PIN-INIT]
-M: Benno Lossin <benno.lossin@proton.me>
+M: Benno Lossin <lossin@kernel.org>
L: rust-for-linux@vger.kernel.org
S: Maintained
W: https://rust-for-linux.com/pin-init
@@ -21253,6 +21735,7 @@ L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
F: drivers/s390/net/
+F: include/linux/ism.h
S390 PCI SUBSYSTEM
M: Niklas Schnelle <schnelle@linux.ibm.com>
@@ -21436,6 +21919,7 @@ F: drivers/platform/x86/samsung-laptop.c
SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
M: Krzysztof Kozlowski <krzk@kernel.org>
+R: André Draszik <andre.draszik@linaro.org>
L: linux-kernel@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -21446,7 +21930,7 @@ F: Documentation/devicetree/bindings/mfd/samsung,s5m*.yaml
F: Documentation/devicetree/bindings/regulator/samsung,s2m*.yaml
F: Documentation/devicetree/bindings/regulator/samsung,s5m*.yaml
F: drivers/clk/clk-s2mps11.c
-F: drivers/mfd/sec*.c
+F: drivers/mfd/sec*.[ch]
F: drivers/regulator/s2m*.c
F: drivers/regulator/s5m*.c
F: drivers/rtc/rtc-s5m.c
@@ -21585,6 +22069,7 @@ F: include/linux/preempt.h
F: include/linux/sched.h
F: include/linux/wait.h
F: include/uapi/linux/sched.h
+F: kernel/fork.c
F: kernel/sched/
SCHEDULER - SCHED_EXT
@@ -21742,6 +22227,7 @@ F: include/linux/seccomp.h
F: include/uapi/linux/seccomp.h
F: kernel/seccomp.c
F: tools/testing/selftests/kselftest_harness.h
+F: tools/testing/selftests/kselftest_harness/
F: tools/testing/selftests/seccomp/*
K: \bsecure_computing
K: \bTIF_SECCOMP\b
@@ -21921,7 +22407,7 @@ S: Maintained
F: drivers/media/rc/serial_ir.c
SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus)
-M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M: Srinivas Kandagatla <srini@kernel.org>
L: linux-sound@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/slimbus/
@@ -22097,7 +22583,7 @@ M: Benedikt Niedermayr <benedikt.niedermayr@siemens.com>
M: Tobias Schaffner <tobias.schaffner@siemens.com>
L: linux-leds@vger.kernel.org
S: Maintained
-F: drivers/leds/simple/
+F: drivers/leds/simatic/
SIEMENS IPC PLATFORM DRIVERS
M: Bao Cheng Su <baocheng.su@siemens.com>
@@ -22137,6 +22623,7 @@ F: drivers/platform/x86/touchscreen_dmi.c
SILICON LABS WIRELESS DRIVERS (for WFxxx series)
M: Jérôme Pouiller <jerome.pouiller@silabs.com>
+L: linux-wireless@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
F: drivers/net/wireless/silabs/
@@ -22236,10 +22723,8 @@ F: Documentation/devicetree/bindings/nvmem/layouts/kontron,sl28-vpd.yaml
F: drivers/nvmem/layouts/sl28vpd.c
SLAB ALLOCATOR
-M: Christoph Lameter <cl@linux.com>
-M: Pekka Enberg <penberg@kernel.org>
+M: Christoph Lameter <cl@gentwo.org>
M: David Rientjes <rientjes@google.com>
-M: Joonsoo Kim <iamjoonsoo.kim@lge.com>
M: Andrew Morton <akpm@linux-foundation.org>
M: Vlastimil Babka <vbabka@suse.cz>
R: Roman Gushchin <roman.gushchin@linux.dev>
@@ -22481,7 +22966,7 @@ M: Dave Stevenson <dave.stevenson@raspberrypi.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media.git
-F: Documentation/devicetree/bindings/media/i2c/imx219.yaml
+F: Documentation/devicetree/bindings/media/i2c/sony,imx219.yaml
F: drivers/media/i2c/imx219.c
SONY IMX258 SENSOR DRIVER
@@ -22510,7 +22995,7 @@ F: Documentation/devicetree/bindings/media/i2c/sony,imx283.yaml
F: drivers/media/i2c/imx283.c
SONY IMX290 SENSOR DRIVER
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media.git
@@ -22519,7 +23004,7 @@ F: drivers/media/i2c/imx290.c
SONY IMX296 SENSOR DRIVER
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media.git
@@ -22541,8 +23026,9 @@ F: Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml
F: drivers/media/i2c/imx334.c
SONY IMX335 SENSOR DRIVER
+M: Kieran Bingham <kieran.bingham@ideasonboard.com>
L: linux-media@vger.kernel.org
-S: Orphan
+S: Maintained
T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
F: drivers/media/i2c/imx335.c
@@ -22562,7 +23048,7 @@ F: Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml
F: drivers/media/i2c/imx412.c
SONY IMX415 SENSOR DRIVER
-M: Michael Riesch <michael.riesch@wolfvision.net>
+M: Michael Riesch <michael.riesch@collabora.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media.git
@@ -22587,7 +23073,6 @@ W: http://www.linux.it/~malattia/wiki/index.php/Sony_drivers
F: Documentation/admin-guide/laptops/sony-laptop.rst
F: drivers/char/sonypi.c
F: drivers/platform/x86/sony-laptop.c
-F: include/linux/sony-laptop.h
SOPHGO DEVICETREES and DRIVERS
M: Chen Wang <unicorn_wang@outlook.com>
@@ -22654,9 +23139,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
F: Documentation/devicetree/bindings/sound/
F: Documentation/sound/soc/
F: include/dt-bindings/sound/
+F: include/sound/cs*
+X: include/sound/cs4231-regs.h
+X: include/sound/cs8403.h
+X: include/sound/cs8427.h
+F: include/sound/madera-pdata.h
F: include/sound/soc*
F: include/sound/sof.h
F: include/sound/sof/
+F: include/sound/wm*.h
F: include/trace/events/sof*.h
F: include/uapi/sound/asoc.h
F: sound/soc/
@@ -22767,7 +23258,6 @@ F: drivers/accessibility/speakup/
SPEAR PLATFORM/CLOCK/PINCTRL SUPPORT
M: Viresh Kumar <vireshk@kernel.org>
-M: Shiraz Hashim <shiraz.linux.kernel@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: soc@lists.linux.dev
S: Maintained
@@ -22881,6 +23371,12 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-stm32*
+ST STM32 OCTO MEMORY MANAGER
+M: Patrice Chotard <patrice.chotard@foss.st.com>
+S: Maintained
+F: Documentation/devicetree/bindings/memory-controllers/st,stm32mp25-omm.yaml
+F: drivers/memory/stm32_omm.c
+
ST STM32 SPI DRIVER
M: Alain Volmat <alain.volmat@foss.st.com>
L: linux-spi@vger.kernel.org
@@ -22894,6 +23390,22 @@ S: Maintained
F: Documentation/hwmon/stpddc60.rst
F: drivers/hwmon/pmbus/stpddc60.c
+ST VD55G1 DRIVER
+M: Benjamin Mugnier <benjamin.mugnier@foss.st.com>
+M: Sylvain Petinot <sylvain.petinot@foss.st.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/st,vd55g1.yaml
+F: drivers/media/i2c/vd55g1.c
+
+ST VD56G3 IMAGE SENSOR DRIVER
+M: Benjamin Mugnier <benjamin.mugnier@foss.st.com>
+M: Sylvain Petinot <sylvain.petinot@foss.st.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/st,vd56g3.yaml
+F: drivers/media/i2c/vd56g3.c
+
ST VGXY61 DRIVER
M: Benjamin Mugnier <benjamin.mugnier@foss.st.com>
M: Sylvain Petinot <sylvain.petinot@foss.st.com>
@@ -23452,6 +23964,13 @@ L: linux-i2c@vger.kernel.org
S: Supported
F: drivers/i2c/busses/i2c-designware-*
+SYNOPSYS DESIGNWARE I2C DRIVER - AMDISP
+M: Nirujogi Pratap <pratap.nirujogi@amd.com>
+M: Bin Du <bin.du@amd.com>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: drivers/i2c/busses/i2c-designware-amdisp.c
+
SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
M: Jaehoon Chung <jh80.chung@samsung.com>
L: linux-mmc@vger.kernel.org
@@ -23507,6 +24026,15 @@ F: include/linux/sc[mp]i_protocol.h
F: include/trace/events/scmi.h
F: include/uapi/linux/virtio_scmi.h
+SYSTEM CONTROL MANAGEMENT INTERFACE (SCMI) i.MX Extension Message Protocol drivers
+M: Peng Fan <peng.fan@nxp.com>
+L: arm-scmi@vger.kernel.org
+L: imx@lists.linux.dev
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/firmware/nxp,*scmi.yaml
+F: drivers/firmware/arm_scmi/vendors/imx/
+
SYSTEM RESET/SHUTDOWN DRIVERS
M: Sebastian Reichel <sre@kernel.org>
L: linux-pm@vger.kernel.org
@@ -23613,6 +24141,7 @@ L: linux-media@vger.kernel.org
S: Maintained
W: https://linuxtv.org
Q: http://patchwork.linuxtv.org/project/linux-media/list/
+F: Documentation/devicetree/bindings/media/i2c/nxp,tda1997x.txt
F: drivers/media/i2c/tda1997x.*
TDA827x MEDIA DRIVER
@@ -23880,7 +24409,6 @@ F: Documentation/devicetree/bindings/sound/ti,tlv320*.yaml
F: Documentation/devicetree/bindings/sound/ti,tlv320adcx140.yaml
F: include/sound/tas2*.h
F: include/sound/tlv320*.h
-F: include/sound/tpa6130a2-plat.h
F: sound/pci/hda/tas2781_hda_i2c.c
F: sound/soc/codecs/pcm1681.c
F: sound/soc/codecs/pcm1789*.*
@@ -23925,6 +24453,13 @@ F: Documentation/devicetree/bindings/hwmon/ti,tps23861.yaml
F: Documentation/hwmon/tps23861.rst
F: drivers/hwmon/tps23861.c
+TEXAS INSTRUMENTS TPS6131X FLASH LED DRIVER
+M: Matthias Fend <matthias.fend@emfend.at>
+L: linux-leds@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/leds/ti,tps6131x.yaml
+F: drivers/leds/flash/leds-tps6131x.c
+
TEXAS INSTRUMENTS' DAC7612 DAC DRIVER
M: Ricardo Ribalda <ribalda@kernel.org>
L: linux-iio@vger.kernel.org
@@ -24204,6 +24739,13 @@ F: drivers/misc/tifm*
F: drivers/mmc/host/tifm_sd.c
F: include/linux/tifm.h
+TI FPC202 DUAL PORT CONTROLLER
+M: Romain Gantois <romain.gantois@bootlin.com>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/misc/ti,fpc202.yaml
+F: drivers/misc/ti_fpc202.c
+
TI FPD-LINK DRIVERS
M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: linux-media@vger.kernel.org
@@ -24406,6 +24948,13 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/topstar-laptop.c
+TORADEX EMBEDDED CONTROLLER DRIVER
+M: Emanuele Ghidoli <ghidoliemanuele@gmail.com>
+M: Francesco Dolcini <francesco@dolcini.it>
+S: Maintained
+F: Documentation/devicetree/bindings/power/reset/toradex,smarc-ec.yaml
+F: drivers/power/reset/tdx-ec-poweroff.c
+
TORTURE-TEST MODULES
M: Davidlohr Bueso <dave@stgolabs.net>
M: "Paul E. McKenney" <paulmck@kernel.org>
@@ -24450,7 +24999,7 @@ TOSHIBA TC358743 DRIVER
M: Hans Verkuil <hverkuil-cisco@xs4all.nl>
L: linux-media@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/media/i2c/tc358743.txt
+F: Documentation/devicetree/bindings/media/i2c/toshiba,tc358743.txt
F: drivers/media/i2c/tc358743*
F: include/media/i2c/tc358743.h
@@ -24492,6 +25041,7 @@ F: arch/arm64/boot/dts/freescale/fsl-*tqml*.dts*
F: arch/arm64/boot/dts/freescale/imx*mba*.dts*
F: arch/arm64/boot/dts/freescale/imx*tqma*.dts*
F: arch/arm64/boot/dts/freescale/mba*.dtsi
+F: arch/arm64/boot/dts/freescale/tqma8*.dtsi
F: arch/arm64/boot/dts/freescale/tqml*.dts*
F: drivers/gpio/gpio-tqmx86.c
F: drivers/mfd/tqmx86.c
@@ -24553,13 +25103,15 @@ M: David Lechner <dlechner@baylibre.com>
S: Maintained
F: Documentation/devicetree/bindings/trigger-source/pwm-trigger.yaml
-TRUSTED SECURITY MODULE (TSM) ATTESTATION REPORTS
+TRUSTED SECURITY MODULE (TSM) INFRASTRUCTURE
M: Dan Williams <dan.j.williams@intel.com>
L: linux-coco@lists.linux.dev
S: Maintained
-F: Documentation/ABI/testing/configfs-tsm
-F: drivers/virt/coco/tsm.c
-F: include/linux/tsm.h
+F: Documentation/ABI/testing/configfs-tsm-report
+F: Documentation/driver-api/coco/
+F: drivers/virt/coco/guest/
+F: include/linux/tsm*.h
+F: samples/tsm-mr/
TRUSTED SERVICES TEE DRIVER
M: Balint Dobszay <balint.dobszay@arm.com>
@@ -24632,6 +25184,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git turbostat
F: tools/power/x86/turbostat/
F: tools/testing/selftests/turbostat/
+TUXEDO DRIVERS
+M: Werner Sembach <wse@tuxedocomputers.com>
+L: platform-driver-x86@vger.kernel.org
+S: Supported
+F: drivers/platform/x86/tuxedo/
+
TW5864 VIDEO4LINUX DRIVER
M: Bluecherry Maintainers <maintainers@bluecherrydvr.com>
M: Andrey Utkin <andrey.utkin@corp.bluecherry.net>
@@ -24752,6 +25310,12 @@ S: Maintained
F: drivers/usb/common/ulpi.c
F: include/linux/ulpi/
+ULTRATRONIK BOARD SUPPORT
+M: Goran Rađenović <goran.radni@gmail.com>
+M: Börge Strümpfel <boerge.struempfel@gmail.com>
+S: Maintained
+F: arch/arm/boot/dts/st/stm32mp157c-ultra-fly-sbc.dts
+
UNICODE SUBSYSTEM
M: Gabriel Krisman Bertazi <krisman@kernel.org>
L: linux-fsdevel@vger.kernel.org
@@ -24815,7 +25379,7 @@ S: Maintained
F: drivers/ufs/host/ufs-mediatek*
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER QUALCOMM HOOKS
-M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <mani@kernel.org>
L: linux-arm-msm@vger.kernel.org
L: linux-scsi@vger.kernel.org
S: Maintained
@@ -25126,9 +25690,13 @@ S: Maintained
F: drivers/usb/typec/mux/pi3usb30532.c
USB TYPEC PORT CONTROLLER DRIVERS
+M: Badhri Jagan Sridharan <badhri@google.com>
L: linux-usb@vger.kernel.org
-S: Orphan
-F: drivers/usb/typec/tcpm/
+S: Maintained
+F: drivers/usb/typec/tcpm/tcpci.c
+F: drivers/usb/typec/tcpm/tcpm.c
+F: include/linux/usb/tcpci.h
+F: include/linux/usb/tcpm.h
USB TYPEC TUSB1046 MUX DRIVER
M: Romain Gantois <romain.gantois@bootlin.com>
@@ -25288,6 +25856,8 @@ F: drivers/media/i2c/mt*
F: drivers/media/i2c/og*
F: drivers/media/i2c/ov*
F: drivers/media/i2c/s5*
+F: drivers/media/i2c/vd55g1.c
+F: drivers/media/i2c/vd56g3.c
F: drivers/media/i2c/vgxy61.c
VF610 NAND DRIVER
@@ -25584,6 +26154,7 @@ F: include/uapi/linux/virtio_gpio.h
VIRTIO GPU DRIVER
M: David Airlie <airlied@redhat.com>
M: Gerd Hoffmann <kraxel@redhat.com>
+M: Dmitry Osipenko <dmitry.osipenko@collabora.com>
R: Gurchetan Singh <gurchetansingh@chromium.org>
R: Chia-I Wu <olvaffe@gmail.com>
L: dri-devel@lists.freedesktop.org
@@ -25656,6 +26227,13 @@ S: Maintained
F: drivers/nvdimm/nd_virtio.c
F: drivers/nvdimm/virtio_pmem.c
+VIRTIO RTC DRIVER
+M: Peter Hilber <quic_philber@quicinc.com>
+L: virtualization@lists.linux.dev
+S: Maintained
+F: drivers/virtio/virtio_rtc_*
+F: include/uapi/linux/virtio_rtc.h
+
VIRTIO SOUND DRIVER
M: Anton Yakovlev <anton.yakovlev@opensynergy.com>
M: "Michael S. Tsirkin" <mst@redhat.com>
@@ -25741,13 +26319,14 @@ F: tools/testing/vsock/
VMALLOC
M: Andrew Morton <akpm@linux-foundation.org>
-R: Uladzislau Rezki <urezki@gmail.com>
+M: Uladzislau Rezki <urezki@gmail.com>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
F: include/linux/vmalloc.h
F: mm/vmalloc.c
+F: lib/test_vmalloc.c
VME SUBSYSTEM
L: linux-kernel@vger.kernel.org
@@ -25765,7 +26344,7 @@ F: drivers/misc/vmw_balloon.c
VMWARE HYPERVISOR INTERFACE
M: Ajay Kaher <ajay.kaher@broadcom.com>
-M: Alexey Makhalov <alexey.amakhalov@broadcom.com>
+M: Alexey Makhalov <alexey.makhalov@broadcom.com>
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: virtualization@lists.linux.dev
L: x86@kernel.org
@@ -25793,7 +26372,7 @@ F: drivers/scsi/vmw_pvscsi.h
VMWARE VIRTUAL PTP CLOCK DRIVER
M: Nick Shi <nick.shi@broadcom.com>
R: Ajay Kaher <ajay.kaher@broadcom.com>
-R: Alexey Makhalov <alexey.amakhalov@broadcom.com>
+R: Alexey Makhalov <alexey.makhalov@broadcom.com>
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
@@ -26137,7 +26716,7 @@ R: Ahmed S. Darwish <darwi@linutronix.de>
L: x86-cpuid@lists.linux.dev
S: Maintained
W: https://x86-cpuid.org
-F: tools/arch/x86/kcpuid/cpuid.csv
+F: tools/arch/x86/kcpuid/
X86 ENTRY CODE
M: Andy Lutomirski <luto@kernel.org>
@@ -26225,6 +26804,7 @@ L: x86@kernel.org
L: linux-coco@lists.linux.dev
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/tdx
+F: Documentation/ABI/testing/sysfs-devices-virtual-misc-tdx_guest
F: arch/x86/boot/compressed/tdx*
F: arch/x86/coco/tdx/
F: arch/x86/include/asm/shared/tdx.h
@@ -26252,6 +26832,17 @@ F: lib/test_xarray.c
F: lib/xarray.c
F: tools/testing/radix-tree
+XARRAY API [RUST]
+M: Tamir Duberstein <tamird@gmail.com>
+M: Andreas Hindborg <a.hindborg@kernel.org>
+L: rust-for-linux@vger.kernel.org
+S: Supported
+W: https://rust-for-linux.com
+B: https://github.com/Rust-for-Linux/linux/issues
+C: https://rust-for-linux.zulipchat.com
+T: git https://github.com/Rust-for-Linux/linux.git xarray-next
+F: rust/kernel/xarray.rs
+
XBOX DVD IR REMOTE
M: Benjamin Valentin <benpicco@googlemail.com>
S: Maintained
@@ -26665,6 +27256,14 @@ L: linux-kernel@vger.kernel.org
S: Maintained
F: arch/x86/kernel/cpu/zhaoxin.c
+ZONED LOOP DEVICE
+M: Damien Le Moal <dlemoal@kernel.org>
+R: Christoph Hellwig <hch@lst.de>
+L: linux-block@vger.kernel.org
+S: Maintained
+F: Documentation/admin-guide/blockdev/zoned_loop.rst
+F: drivers/block/zloop.c
+
ZONEFS FILESYSTEM
M: Damien Le Moal <dlemoal@kernel.org>
M: Naohiro Aota <naohiro.aota@wdc.com>
diff --git a/Makefile b/Makefile
index 38689a0c3605..3244c2a519e2 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION =
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -458,6 +458,11 @@ endif
HOSTRUSTC = rustc
HOSTPKG_CONFIG = pkg-config
+# the KERNELDOC macro needs to be exported, as scripts/Makefile.build
+# has a logic to call it
+KERNELDOC = $(srctree)/scripts/kernel-doc.py
+export KERNELDOC
+
KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
-O2 -fomit-frame-pointer -std=gnu11
KBUILD_USERCFLAGS := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
@@ -477,7 +482,6 @@ export rust_common_flags := --edition=2021 \
-Wclippy::ignored_unit_patterns \
-Wclippy::mut_mut \
-Wclippy::needless_bitwise_bool \
- -Wclippy::needless_continue \
-Aclippy::needless_lifetimes \
-Wclippy::no_mangle_with_rust_abi \
-Wclippy::undocumented_unsafe_blocks \
@@ -1053,10 +1057,6 @@ NOSTDINC_FLAGS += -nostdinc
# perform bounds checking.
KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
-#Currently, disable -Wstringop-overflow for GCC 11, globally.
-KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-option, -Wno-stringop-overflow)
-KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
-
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += -fno-strict-overflow
@@ -1068,10 +1068,12 @@ ifdef CONFIG_CC_IS_GCC
KBUILD_CFLAGS += -fconserve-stack
endif
+# Ensure compilers do not transform certain loops into calls to wcslen()
+KBUILD_CFLAGS += -fno-builtin-wcslen
+
# change __FILE__ to the relative path to the source directory
ifdef building_out_of_srctree
-KBUILD_CPPFLAGS += $(call cc-option,-ffile-prefix-map=$(srcroot)/=)
-KBUILD_RUSTFLAGS += --remap-path-prefix=$(srcroot)/=
+KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srcroot)/=)
endif
# include additional Makefiles when needed
diff --git a/arch/Kconfig b/arch/Kconfig
index b0adb665041f..a3308a220f86 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1518,6 +1518,14 @@ config STRICT_MODULE_RWX
config ARCH_HAS_PHYS_TO_DMA
bool
+config ARCH_HAS_CPU_RESCTRL
+ bool
+ help
+ An architecture selects this option to indicate that the necessary
+ hooks are provided to support the common memory system usage
+ monitoring and control interfaces provided by the 'resctrl'
+ filesystem (see RESCTRL_FS).
+
config HAVE_ARCH_COMPILER_H
bool
help
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 02e8817a8921..2676017f42f1 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -192,13 +192,6 @@ extern unsigned long __zero_page(void);
#define pte_pfn(pte) (pte_val(pte) >> PFN_PTE_SHIFT)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
-#define mk_pte(page, pgprot) \
-({ \
- pte_t pte; \
- \
- pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \
- pte; \
-})
extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 3df5f2dd4c0f..8f1f18adcdb5 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -150,6 +150,8 @@
#define SO_RCVPRIORITY 82
+#define SO_PASSRIGHTS 83
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 1f0eb4f25c0f..a3eaab094ece 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -852,14 +852,9 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
perf_sample_data_init(&data, 0, hwc->last_period);
- if (alpha_perf_event_set_period(event, hwc, idx)) {
- if (perf_event_overflow(event, &data, regs)) {
- /* Interrupts coming too quickly; "throttle" the
- * counter, i.e., disable it for a little while.
- */
- alpha_pmu_stop(event, 0);
- }
- }
+ if (alpha_perf_event_set_period(event, hwc, idx))
+ perf_event_overflow(event, &data, regs);
+
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
index 8a2441670a8f..7765dc105d54 100644
--- a/arch/arc/include/asm/hugepage.h
+++ b/arch/arc/include/asm/hugepage.h
@@ -40,8 +40,6 @@ static inline pmd_t pte_pmd(pte_t pte)
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
-#define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot))
-
#define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ)
#define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
diff --git a/arch/arc/include/asm/pgtable-levels.h b/arch/arc/include/asm/pgtable-levels.h
index 86e148226463..d1ce4b0f1071 100644
--- a/arch/arc/include/asm/pgtable-levels.h
+++ b/arch/arc/include/asm/pgtable-levels.h
@@ -142,7 +142,6 @@
#define pmd_pfn(pmd) ((pmd_val(pmd) & PMD_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
#endif
@@ -177,7 +176,6 @@
#define set_pte(ptep, pte) ((*(ptep)) = (pte))
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
-#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
#ifdef CONFIG_ISA_ARCV2
#define pmd_leaf(x) (pmd_val(x) & _PAGE_HW_SZ)
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
index 9709256e31c8..728d625a10f1 100644
--- a/arch/arc/include/asm/syscall.h
+++ b/arch/arc/include/asm/syscall.h
@@ -24,6 +24,17 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
}
static inline void
+syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr)
+{
+ /*
+ * Unlike syscall_get_nr(), syscall_set_nr() can be called only when
+ * the target task is stopped for tracing on entering syscall, so
+ * there is no need to have the same check syscall_get_nr() has.
+ */
+ regs->r8 = nr;
+}
+
+static inline void
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
{
regs->r0 = regs->orig_r0;
@@ -67,6 +78,20 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
}
}
+static inline void
+syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *args)
+{
+ unsigned long *inside_ptregs = &regs->r0;
+ unsigned int n = 6;
+ unsigned int i = 0;
+
+ while (n--) {
+ *inside_ptregs = args[i++];
+ inside_ptregs--;
+ }
+}
+
static inline int
syscall_get_arch(struct task_struct *task)
{
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index fea29d9d18d6..809edc59af25 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -170,7 +170,7 @@ init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
if (parent)
panic("DeviceTree incore intc not a root irq controller\n");
- root_domain = irq_domain_add_linear(intc, nr_cpu_irqs, &arcv2_irq_ops, NULL);
+ root_domain = irq_domain_create_linear(of_fwnode_handle(intc), nr_cpu_irqs, &arcv2_irq_ops, NULL);
if (!root_domain)
panic("root irq domain not avail\n");
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index 1d2ff1c6a61b..1b159e9e0234 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -112,8 +112,9 @@ init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
if (parent)
panic("DeviceTree incore intc not a root irq controller\n");
- root_domain = irq_domain_add_linear(intc, NR_CPU_IRQS,
- &arc_intc_domain_ops, NULL);
+ root_domain = irq_domain_create_linear(of_fwnode_handle(intc),
+ NR_CPU_IRQS,
+ &arc_intc_domain_ops, NULL);
if (!root_domain)
panic("root irq domain not avail\n");
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index cdd370ec9280..02b28a9324f4 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -391,7 +391,8 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs);
- domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(intc), nr_irqs,
+ &idu_irq_ops, NULL);
/* Parent interrupts (core-intc) are already mapped */
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 6e5a651cd75c..ed6d4f0cd621 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -599,10 +599,8 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
arc_perf_event_update(event, &event->hw, event->hw.idx);
perf_sample_data_init(&data, 0, hwc->last_period);
- if (arc_pmu_event_set_period(event)) {
- if (perf_event_overflow(event, &data, regs))
- arc_pmu_stop(event, 0);
- }
+ if (arc_pmu_event_set_period(event))
+ perf_event_overflow(event, &data, regs);
active_ints &= ~BIT(idx);
} while (active_ints);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 25ed6f1a7c7a..3072731fe09c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1380,8 +1380,7 @@ config CC_HAVE_STACKPROTECTOR_TLS
config STACKPROTECTOR_PER_TASK
bool "Use a unique stack canary value for each task"
depends on STACKPROTECTOR && CURRENT_POINTER_IN_TPIDRURO && !XIP_DEFLATED_DATA
- depends on GCC_PLUGINS || CC_HAVE_STACKPROTECTOR_TLS
- select GCC_PLUGIN_ARM_SSP_PER_TASK if !CC_HAVE_STACKPROTECTOR_TLS
+ depends on CC_HAVE_STACKPROTECTOR_TLS
default y
help
Due to the fact that GCC uses an ordinary symbol reference from
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 945b5975fce2..d61369b1eabe 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -96,7 +96,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \
-I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
- -I$(obj) $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
+ -I$(obj)
ccflags-remove-$(CONFIG_FUNCTION_TRACER) += -pg
asflags-y := -DZIMAGE
diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S
index 230030c13085..65a3025c0e13 100644
--- a/arch/arm/boot/compressed/efi-header.S
+++ b/arch/arm/boot/compressed/efi-header.S
@@ -20,7 +20,7 @@
@ is accepted as an EFI binary. Booting via the UEFI stub
@ will not execute those instructions, but the ARM/Linux
@ boot protocol does, so we need some NOPs here.
- .inst MZ_MAGIC | (0xe225 << 16) @ eor r5, r5, 0x4d000
+ .inst IMAGE_DOS_SIGNATURE | (0xe225 << 16) @ eor r5, r5, 0x4d000
eor r5, r5, 0x4d000 @ undo previous insn
#else
__nop
@@ -43,7 +43,7 @@
.long pe_header - start @ Offset to the PE header.
pe_header:
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
coff_header:
.short IMAGE_FILE_MACHINE_THUMB @ Machine
@@ -60,7 +60,7 @@ coff_header:
#define __pecoff_code_size (__pecoff_data_start - __efi_start)
optional_header:
- .short PE_OPT_MAGIC_PE32 @ PE32 format
+ .short IMAGE_NT_OPTIONAL_HDR32_MAGIC @ PE32 format
.byte 0x02 @ MajorLinkerVersion
.byte 0x14 @ MinorLinkerVersion
.long __pecoff_code_size @ SizeOfCode
diff --git a/arch/arm/boot/dts/allwinner/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/allwinner/sun7i-a20-bananapi.dts
index 46ecf9db2324..d8b362c9661a 100644
--- a/arch/arm/boot/dts/allwinner/sun7i-a20-bananapi.dts
+++ b/arch/arm/boot/dts/allwinner/sun7i-a20-bananapi.dts
@@ -48,6 +48,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
/ {
model = "LeMaker Banana Pi";
@@ -169,6 +170,32 @@
&gmac_mdio {
phy1: ethernet-phy@1 {
reg = <1>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ linux,default-trigger = "netdev";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_AMBER>;
+ function = LED_FUNCTION_LAN;
+ linux,default-trigger = "netdev";
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_LAN;
+ linux,default-trigger = "netdev";
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-a83t.dtsi b/arch/arm/boot/dts/allwinner/sun8i-a83t.dtsi
index addf0cb0f465..6f88d8764e6a 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-a83t.dtsi
+++ b/arch/arm/boot/dts/allwinner/sun8i-a83t.dtsi
@@ -1225,7 +1225,7 @@
};
cooling-maps {
- cpu-hot-limit {
+ map0 {
trip = <&cpu0_hot>;
cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
@@ -1255,7 +1255,7 @@
};
cooling-maps {
- cpu-hot-limit {
+ map0 {
trip = <&cpu1_hot>;
cooling-device = <&cpu100 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu101 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-neo-air.dts b/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-neo-air.dts
index 6d85370e04f1..9a2742363cd0 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-neo-air.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-neo-air.dts
@@ -94,7 +94,7 @@
non-removable;
status = "okay";
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
interrupt-parent = <&pio>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h3.dtsi b/arch/arm/boot/dts/allwinner/sun8i-h3.dtsi
index eac2349a2380..cfd039840b43 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h3.dtsi
+++ b/arch/arm/boot/dts/allwinner/sun8i-h3.dtsi
@@ -262,7 +262,7 @@
};
cooling-maps {
- cpu-hot-limit {
+ map0 {
trip = <&cpu_hot_trip>;
cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
diff --git a/arch/arm/boot/dts/allwinner/sun8i-r40.dtsi b/arch/arm/boot/dts/allwinner/sun8i-r40.dtsi
index a5b1f1e3900d..fa162f7fa9f0 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-r40.dtsi
+++ b/arch/arm/boot/dts/allwinner/sun8i-r40.dtsi
@@ -146,7 +146,7 @@
};
cooling-maps {
- cpu-hot-limit {
+ map0 {
trip = <&cpu_hot_trip>;
cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
diff --git a/arch/arm/boot/dts/amlogic/Makefile b/arch/arm/boot/dts/amlogic/Makefile
index 504c533b1173..3c8a1e88b386 100644
--- a/arch/arm/boot/dts/amlogic/Makefile
+++ b/arch/arm/boot/dts/amlogic/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_MACH_MESON8) += \
meson8-minix-neo-x8.dtb \
+ meson8-fernsehfee3.dtb \
meson8b-ec100.dtb \
meson8b-mxq.dtb \
meson8b-odroidc1.dtb \
diff --git a/arch/arm/boot/dts/amlogic/meson8-fernsehfee3.dts b/arch/arm/boot/dts/amlogic/meson8-fernsehfee3.dts
new file mode 100644
index 000000000000..4e52447d51bd
--- /dev/null
+++ b/arch/arm/boot/dts/amlogic/meson8-fernsehfee3.dts
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+// Copyright (C) 2025 J. Neuschäfer <j.ne@posteo.net>
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/leds/common.h>
+
+#include "meson8.dtsi"
+
+/ {
+ model = "Fernsehfee 3.0";
+ compatible = "tcu,fernsehfee3", "amlogic,meson8";
+
+ aliases {
+ serial0 = &uart_AO;
+ gpiochip0 = &gpio;
+ gpiochip1 = &gpio_ao;
+ i2c0 = &i2c_AO;
+ i2c1 = &i2c_B;
+ mmc0 = &sdhc;
+ mmc1 = &sdio;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x40000000>; /* 1 GiB */
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys-polled";
+ poll-interval = <100>;
+
+ power-button {
+ label = "Power button";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ /*
+ * The power LED can be turned red, otherwise it is green.
+ */
+ gpios = <&gpio_ao GPIO_TEST_N GPIO_ACTIVE_LOW>;
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_RED>;
+ };
+ };
+
+ vcc_5v: regulator-5v {
+ /* 5V rail, always on as long as the system is running */
+ compatible = "regulator-fixed";
+ regulator-name = "5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ vcc_3v3: regulator-3v3 {
+ /* Chipown AP2420 step-down converter */
+ compatible = "regulator-fixed";
+ regulator-name = "3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_5v>;
+ };
+
+ wifi_3v3: regulator-wifi {
+ compatible = "regulator-fixed";
+ regulator-name = "3.3V-WIFI";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3>;
+ gpio = <&gpio GPIOX_11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&vcck>;
+};
+
+&ethmac {
+ status = "okay";
+ pinctrl-0 = <&eth_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&eth_phy0>;
+ phy-mode = "rmii";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eth_phy0: ethernet-phy@0 {
+ /* IC Plus IP101A (0x02430c54) */
+ reg = <0>;
+
+ reset-assert-us = <10000>;
+ reset-deassert-us = <10000>;
+ reset-gpios = <&gpio GPIOH_4 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&i2c_AO {
+ status = "okay";
+ pinctrl-0 = <&i2c_ao_pins>;
+ pinctrl-names = "default";
+
+ pmic@32 {
+ compatible = "ricoh,rn5t618";
+ reg = <0x32>;
+ system-power-controller;
+
+ regulators {
+ vcck: DCDC1 {
+ regulator-name = "VCCK";
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vddee: DCDC2 {
+ /* the output is also used as VDDAO */
+ regulator-name = "VDD_EE";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ DCDC3 {
+ regulator-name = "VDD_DDR";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDO1 {
+ regulator-name = "VDDIO_AO28";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDO2 {
+ regulator-name = "VDDIO_AO18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vcc1v8_usb: LDO3 {
+ regulator-name = "VCC1V8_USB";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ };
+
+ LDO4 {
+ /* This one appears to be unused */
+ regulator-name = "VCC2V8";
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ LDO5 {
+ regulator-name = "AVDD1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDORTC1 {
+ regulator-name = "VDD_LDO";
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDORTC2 {
+ regulator-name = "RTC_0V9";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+
+ eeprom@50 {
+ /* Fairchild FM24C08A */
+ compatible = "atmel,24c08";
+ reg = <0x50>;
+ pagesize = <16>;
+ wp-gpios = <&gpio GPIOH_3 GPIO_ACTIVE_HIGH>;
+ num-addresses = <4>;
+ };
+};
+
+&i2c_B {
+ status = "okay";
+ pinctrl-0 = <&i2c_b_pins>;
+ pinctrl-names = "default";
+
+ /* TODO: SiI9293 HDMI receiver @ 0x39 */
+};
+
+&mali {
+ mali-supply = <&vddee>;
+};
+
+&sdhc {
+ status = "okay";
+ pinctrl-0 = <&sdxc_c_pins>;
+ pinctrl-names = "default";
+
+ /* eMMC */
+ bus-width = <8>;
+ max-frequency = <100000000>;
+
+ disable-wp;
+ cap-mmc-highspeed;
+ mmc-hs200-1_8v;
+ no-sdio;
+
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_3v3>;
+};
+
+&sdio {
+ status = "okay";
+ pinctrl-0 = <&sd_b_pins>;
+
+ /* SD card */
+ slot@1 {
+ compatible = "mmc-slot";
+ reg = <1>;
+ status = "okay";
+
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
+
+ vmmc-supply = <&vcc_3v3>;
+ };
+};
+
+&uart_AO {
+ status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
+};
+
+&usb0 {
+ status = "okay";
+};
+
+&usb0_phy {
+ status = "okay";
+ phy-supply = <&vcc1v8_usb>;
+};
+
+&usb1 {
+ status = "okay";
+ dr_mode = "host";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wifi: wifi@1 {
+ /* Realtek RTL8188 2.4GHz WiFi module */
+ compatible = "usbbda,179";
+ reg = <1>;
+ vdd-supply = <&wifi_3v3>;
+ };
+};
+
+&usb1_phy {
+ status = "okay";
+ phy-supply = <&vcc1v8_usb>;
+};
+
+&ir_receiver {
+ status = "okay";
+ pinctrl-0 = <&ir_recv_pins>;
+ pinctrl-names = "default";
+};
diff --git a/arch/arm/boot/dts/amlogic/meson8.dtsi b/arch/arm/boot/dts/amlogic/meson8.dtsi
index 847f7b1f1e96..a609b5a0fda4 100644
--- a/arch/arm/boot/dts/amlogic/meson8.dtsi
+++ b/arch/arm/boot/dts/amlogic/meson8.dtsi
@@ -398,7 +398,7 @@
mux {
groups = "uart_tx_ao_a", "uart_rx_ao_a";
function = "uart_ao";
- bias-disable;
+ bias-pull-up;
};
};
@@ -451,7 +451,7 @@
pwm_ef: pwm@86c0 {
compatible = "amlogic,meson8-pwm-v2";
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "Video PLL" */
+ <0>, /* unknown/untested, the datasheet calls it "Video PLL" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
reg = <0x86c0 0x10>;
@@ -481,6 +481,14 @@
gpio-ranges = <&pinctrl_cbus 0 0 120>;
};
+ i2c_b_pins: i2c-b {
+ mux {
+ groups = "i2c_sda_b", "i2c_sck_b";
+ function = "i2c_b";
+ bias-disable;
+ };
+ };
+
sd_a_pins: sd-a {
mux {
groups = "sd_d0_a", "sd_d1_a", "sd_d2_a",
@@ -526,6 +534,16 @@
};
};
+ sdxc_c_pins: sdxc-c {
+ mux {
+ groups = "sdxc_d0_c", "sdxc_d13_c",
+ "sdxc_clk_c", "sdxc_cmd_c",
+ "sdxc_d47_c";
+ function = "sdxc_c";
+ bias-pull-up;
+ };
+ };
+
spdif_out_pins: spdif-out {
mux {
groups = "spdif_out";
@@ -567,7 +585,7 @@
groups = "uart_tx_a1",
"uart_rx_a1";
function = "uart_a";
- bias-disable;
+ bias-pull-up;
};
};
@@ -705,7 +723,7 @@
&pwm_ab {
compatible = "amlogic,meson8-pwm-v2";
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "Video PLL" */
+ <0>, /* unknown/untested, the datasheet calls it "Video PLL" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
};
@@ -713,7 +731,7 @@
&pwm_cd {
compatible = "amlogic,meson8-pwm-v2";
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "Video PLL" */
+ <0>, /* unknown/untested, the datasheet calls it "Video PLL" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
};
diff --git a/arch/arm/boot/dts/amlogic/meson8b.dtsi b/arch/arm/boot/dts/amlogic/meson8b.dtsi
index 0876611ce26a..2d77b9876bf4 100644
--- a/arch/arm/boot/dts/amlogic/meson8b.dtsi
+++ b/arch/arm/boot/dts/amlogic/meson8b.dtsi
@@ -368,7 +368,7 @@
mux {
groups = "uart_tx_ao_a", "uart_rx_ao_a";
function = "uart_ao";
- bias-disable;
+ bias-pull-up;
};
};
@@ -406,7 +406,7 @@
compatible = "amlogic,meson8b-pwm-v2", "amlogic,meson8-pwm-v2";
reg = <0x86c0 0x10>;
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "Video PLL" */
+ <0>, /* unknown/untested, the datasheet calls it "Video PLL" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
#pwm-cells = <3>;
@@ -521,7 +521,7 @@
groups = "uart_tx_b0",
"uart_rx_b0";
function = "uart_b";
- bias-disable;
+ bias-pull-up;
};
};
@@ -680,7 +680,7 @@
&pwm_ab {
compatible = "amlogic,meson8b-pwm-v2", "amlogic,meson8-pwm-v2";
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "Video PLL" */
+ <0>, /* unknown/untested, the datasheet calls it "Video PLL" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
};
@@ -688,7 +688,7 @@
&pwm_cd {
compatible = "amlogic,meson8b-pwm-v2", "amlogic,meson8-pwm-v2";
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "Video PLL" */
+ <0>, /* unknown/untested, the datasheet calls it "Video PLL" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
};
diff --git a/arch/arm/boot/dts/broadcom/Makefile b/arch/arm/boot/dts/broadcom/Makefile
index d23cf466127b..71062ff9adbe 100644
--- a/arch/arm/boot/dts/broadcom/Makefile
+++ b/arch/arm/boot/dts/broadcom/Makefile
@@ -7,6 +7,7 @@ DTC_FLAGS_bcm2835-rpi-b-plus := -@
DTC_FLAGS_bcm2835-rpi-a-plus := -@
DTC_FLAGS_bcm2835-rpi-cm1-io1 := -@
DTC_FLAGS_bcm2836-rpi-2-b := -@
+DTC_FLAGS_bcm2837-rpi-2-b := -@
DTC_FLAGS_bcm2837-rpi-3-a-plus := -@
DTC_FLAGS_bcm2837-rpi-3-b := -@
DTC_FLAGS_bcm2837-rpi-3-b-plus := -@
@@ -25,6 +26,7 @@ dtb-$(CONFIG_ARCH_BCM2835) += \
bcm2835-rpi-a-plus.dtb \
bcm2835-rpi-cm1-io1.dtb \
bcm2836-rpi-2-b.dtb \
+ bcm2837-rpi-2-b.dtb \
bcm2837-rpi-3-a-plus.dtb \
bcm2837-rpi-3-b.dtb \
bcm2837-rpi-3-b-plus.dtb \
diff --git a/arch/arm/boot/dts/broadcom/bcm2166x-common.dtsi b/arch/arm/boot/dts/broadcom/bcm2166x-common.dtsi
index 87180b7fd695..f535212cb52f 100644
--- a/arch/arm/boot/dts/broadcom/bcm2166x-common.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm2166x-common.dtsi
@@ -46,6 +46,11 @@
interrupt-controller;
};
+ pinctrl: pinctrl@1004800 {
+ compatible = "brcm,bcm21664-pinctrl";
+ reg = <0x01004800 0x7f4>;
+ };
+
timer@1006000 {
compatible = "brcm,kona-timer";
reg = <0x01006000 0x1c>;
@@ -332,3 +337,5 @@
};
};
};
+
+#include "bcm2166x-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/broadcom/bcm2166x-pinctrl.dtsi b/arch/arm/boot/dts/broadcom/bcm2166x-pinctrl.dtsi
new file mode 100644
index 000000000000..51b8730c8fee
--- /dev/null
+++ b/arch/arm/boot/dts/broadcom/bcm2166x-pinctrl.dtsi
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Common pinmux configrations for BCM2166x (BCM21664/BCM23550).
+ *
+ * Copyright (C) 2025 Artur Weber <aweber.kernel@gmail.com>
+ */
+
+&pinctrl {
+ /* BSC1 */
+ bsc1_pins: bsc1-pins {
+ bsc1clk-grp0 {
+ pins = "bsc1clk";
+ function = "alt1"; /* BSC1CLK */
+ };
+
+ bsc1dat-grp0 {
+ pins = "bsc1dat";
+ function = "alt1"; /* BSC1DAT */
+ };
+ };
+
+ /* BSC2 */
+ bsc2_pins: bsc2-pins {
+ bsc2clk-grp0 {
+ pins = "gpio16";
+ function = "alt2"; /* BSC2CLK */
+ };
+
+ bsc2dat-grp0 {
+ pins = "gpio17";
+ function = "alt2"; /* BSC2DAT */
+ };
+ };
+
+ /* BSC3 */
+ bsc3_pins: bsc3-pins {
+ bsc3clk-grp0 {
+ pins = "lcdscl";
+ function = "alt1"; /* BSC3_CLK */
+ };
+
+ bsc3dat-grp0 {
+ pins = "lcdsda";
+ function = "alt1"; /* BSC3_SDA */
+ };
+ };
+
+ /* BSC4 */
+ bsc4_pins: bsc4-pins {
+ bsc4clk-grp0 {
+ pins = "lcdres";
+ function = "alt1"; /* BSC4_CLK */
+ };
+
+ bsc4dat-grp0 {
+ pins = "lcdte";
+ function = "alt1"; /* BSC4_SDA */
+ };
+ };
+
+ /* PMBSC */
+ pmbsc_pins: pmbsc-pins {
+ pmbscclk-grp0 {
+ pins = "pmbscclk";
+ function = "alt1"; /* PMBSCCLK */
+ };
+
+ pmbscdat-grp0 {
+ pins = "pmbscdat";
+ function = "alt1"; /* PMBSCDAT */
+ };
+ };
+
+ /* SD */
+ sd_width1_pins: sd-width1-pins {
+ sdck-grp0 {
+ pins = "sdck";
+ function = "alt1"; /* SDCK */
+ bias-disable;
+ };
+
+ sdcmd-grp0 {
+ pins = "sdcmd";
+ function = "alt1"; /* SDCMD */
+ bias-pull-up;
+ };
+
+ sddat-grp0 {
+ pins = "sddat0";
+ function = "alt1"; /* SDDATx */
+ bias-pull-up;
+ };
+ };
+
+ sd_width4_pins: sd-width4-pins {
+ sdck-grp0 {
+ pins = "sdck";
+ function = "alt1"; /* SDCK */
+ bias-disable;
+ };
+
+ sdcmd-grp0 {
+ pins = "sdcmd";
+ function = "alt1"; /* SDCMD */
+ bias-pull-up;
+ };
+
+ sddat-grp0 {
+ pins = "sddat0", "sddat1", "sddat2", "sddat3";
+ function = "alt1"; /* SDDATx */
+ bias-pull-up;
+ };
+ };
+
+ /* SD1 */
+ sd1_width1_pins: sd1-width1-pins {
+ sd1ck-grp0 {
+ pins = "mmc1dat7";
+ function = "alt6"; /* SD1CK */
+ bias-disable;
+ };
+
+ sd1cmd-grp0 {
+ pins = "spi0txd";
+ function = "alt2"; /* SD1CMD */
+ bias-pull-up;
+ };
+
+ sd1dat0-grp0 {
+ pins = "mmc1dat5";
+ function = "alt6"; /* SD1DAT0 */
+ bias-pull-up;
+ };
+ };
+
+ sd1_width4_pins: sd1-width4-pins {
+ sd1ck-grp0 {
+ pins = "mmc1dat7";
+ function = "alt6"; /* SD1CK */
+ bias-disable;
+ };
+
+ sd1cmd-grp0 {
+ pins = "spi0txd";
+ function = "alt2"; /* SD1CMD */
+ bias-pull-up;
+ };
+
+ sd1dat0-grp0 {
+ pins = "mmc1dat5";
+ function = "alt6"; /* SD1DAT0 */
+ bias-pull-up;
+ };
+
+ sd1dat1-grp0 {
+ pins = "gpio93";
+ function = "alt1"; /* SD1DAT1 */
+ bias-pull-up;
+ };
+
+ sd1dat2-grp0 {
+ pins = "gpio94";
+ function = "alt1"; /* SD1DAT2 */
+ bias-pull-up;
+ };
+
+ sd1dat3-grp0 {
+ pins = "mmc1dat3";
+ function = "alt6"; /* SD1DAT3 */
+ bias-pull-up;
+ };
+ };
+
+ /* MMC0 */
+ mmc0_width1_pins: mmc0-width1-pins {
+ mmc0ck-grp0 {
+ pins = "mmc0ck";
+ function = "alt1"; /* MMC0CK */
+ bias-disable;
+ };
+
+ mmc0cmd-grp0 {
+ pins = "mmc0cmd";
+ function = "alt1"; /* MMC0CMD */
+ bias-pull-up;
+ };
+
+ mmc0dat-grp0 {
+ pins = "mmc0dat0";
+ function = "alt1"; /* MMC0DATx */
+ bias-pull-up;
+ };
+ };
+
+ mmc0_width4_pins: mmc0-width4-pins {
+ mmc0ck-grp0 {
+ pins = "mmc0ck";
+ function = "alt1"; /* MMC0CK */
+ bias-disable;
+ };
+
+ mmc0cmd-grp0 {
+ pins = "mmc0cmd";
+ function = "alt1"; /* MMC0CMD */
+ bias-pull-up;
+ };
+
+ mmc0dat-grp0 {
+ pins = "mmc0dat0", "mmc0dat1", "mmc0dat2", "mmc0dat3";
+ function = "alt1"; /* MMC0DATx */
+ bias-pull-up;
+ };
+ };
+
+ mmc0_width8_pins: mmc0-width8-pins {
+ mmc0ck-grp0 {
+ pins = "mmc0ck";
+ function = "alt1"; /* MMC0CK */
+ bias-disable;
+ };
+
+ mmc0cmd-grp0 {
+ pins = "mmc0cmd";
+ function = "alt1"; /* MMC0CMD */
+ bias-pull-up;
+ };
+
+ mmc0dat-grp0 {
+ pins = "mmc0dat0", "mmc0dat1", "mmc0dat2", "mmc0dat3",
+ "mmc0dat4", "mmc0dat5", "mmc0dat6", "mmc0dat7";
+ function = "alt1"; /* MMC0DATx */
+ bias-pull-up;
+ };
+ };
+
+ /* MMC1 */
+ mmc1_width1_pins: mmc1-width1-pins {
+ mmc1ck-grp0 {
+ pins = "mmc1ck";
+ function = "alt1"; /* MMC1CK */
+ bias-disable;
+ };
+
+ mmc1cmd-grp0 {
+ pins = "mmc1cmd";
+ function = "alt1"; /* MMC1CMD */
+ bias-pull-up;
+ };
+
+ mmc1dat-grp0 {
+ pins = "mmc1dat0";
+ function = "alt1"; /* MMC1DATx */
+ bias-pull-up;
+ };
+ };
+
+ mmc1_width4_pins: mmc1-width4-pins {
+ mmc1ck-grp0 {
+ pins = "mmc1ck";
+ function = "alt1"; /* MMC1CK */
+ bias-disable;
+ };
+
+ mmc1cmd-grp0 {
+ pins = "mmc1cmd";
+ function = "alt1"; /* MMC1CMD */
+ bias-pull-up;
+ };
+
+ mmc1dat-grp0 {
+ pins = "mmc1dat0", "mmc1dat1", "mmc1dat2", "mmc1dat3";
+ function = "alt1"; /* MMC1DATx */
+ bias-pull-up;
+ };
+ };
+
+ mmc1_width8_pins: mmc1-width8-pins {
+ mmc1ck-grp0 {
+ pins = "mmc1ck";
+ function = "alt1"; /* MMC1CK */
+ bias-disable;
+ };
+
+ mmc1cmd-grp0 {
+ pins = "mmc1cmd";
+ function = "alt1"; /* MMC1CMD */
+ bias-pull-up;
+ };
+
+ mmc1dat-grp0 {
+ pins = "mmc1dat0", "mmc1dat1", "mmc1dat2", "mmc1dat3",
+ "mmc1dat4", "mmc1dat5", "mmc1dat6", "mmc1dat7";
+ function = "alt1"; /* MMC1DATx */
+ bias-pull-up;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/broadcom/bcm28155-ap.dts b/arch/arm/boot/dts/broadcom/bcm28155-ap.dts
index 2f3634545e64..cefaa9a3c45c 100644
--- a/arch/arm/boot/dts/broadcom/bcm28155-ap.dts
+++ b/arch/arm/boot/dts/broadcom/bcm28155-ap.dts
@@ -37,7 +37,39 @@
status = "okay";
pmu: pmu@8 {
+ compatible = "brcm,bcm59056";
+ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x08>;
+
+ regulators {
+ camldo1_reg: camldo1 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ sdldo_reg: sdldo {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ sdxldo_reg: sdxldo {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ usbldo_reg: usbldo {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ iosr1_reg: iosr1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ };
};
};
@@ -74,39 +106,3 @@
&usbphy {
status = "okay";
};
-
-#include "bcm59056.dtsi"
-
-&pmu {
- compatible = "brcm,bcm59056";
- interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
- regulators {
- camldo1_reg: camldo1 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- sdldo_reg: sdldo {
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- };
-
- sdxldo_reg: sdxldo {
- regulator-min-microvolt = <2700000>;
- regulator-max-microvolt = <3300000>;
- };
-
- usbldo_reg: usbldo {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- iosr1_reg: iosr1 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- };
- };
-};
diff --git a/arch/arm/boot/dts/broadcom/bcm2837-rpi-2-b.dts b/arch/arm/boot/dts/broadcom/bcm2837-rpi-2-b.dts
new file mode 100644
index 000000000000..1868cee05853
--- /dev/null
+++ b/arch/arm/boot/dts/broadcom/bcm2837-rpi-2-b.dts
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+#include "bcm2837.dtsi"
+#include "bcm2836-rpi.dtsi"
+#include "bcm283x-rpi-led-deprecated.dtsi"
+#include "bcm283x-rpi-smsc9514.dtsi"
+#include "bcm283x-rpi-usb-host.dtsi"
+
+/ {
+ compatible = "raspberrypi,2-model-b-rev2", "brcm,bcm2837";
+ model = "Raspberry Pi 2 Model B rev 1.2";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0 0x40000000>;
+ };
+};
+
+&gpio {
+ /*
+ * Taken from rpi_SCH_2b_1p2_reduced.pdf and
+ * the official GPU firmware DT blob.
+ *
+ * Legend:
+ * "FOO" = GPIO line named "FOO" on the schematic
+ * "FOO_N" = GPIO line named "FOO" on schematic, active low
+ */
+ gpio-line-names = "ID_SDA",
+ "ID_SCL",
+ "GPIO2",
+ "GPIO3",
+ "GPIO4",
+ "GPIO5",
+ "GPIO6",
+ "GPIO7",
+ "GPIO8",
+ "GPIO9",
+ "GPIO10",
+ "GPIO11",
+ "GPIO12",
+ "GPIO13",
+ "GPIO14",
+ "GPIO15",
+ "GPIO16",
+ "GPIO17",
+ "GPIO18",
+ "GPIO19",
+ "GPIO20",
+ "GPIO21",
+ "GPIO22",
+ "GPIO23",
+ "GPIO24",
+ "GPIO25",
+ "GPIO26",
+ "GPIO27",
+ "SDA0",
+ "SCL0",
+ "", /* GPIO30 */
+ "LAN_RUN",
+ "CAM_GPIO1",
+ "", /* GPIO33 */
+ "", /* GPIO34 */
+ "PWR_LOW_N",
+ "", /* GPIO36 */
+ "", /* GPIO37 */
+ "USB_LIMIT",
+ "", /* GPIO39 */
+ "PWM0_OUT",
+ "CAM_GPIO0",
+ "SMPS_SCL",
+ "SMPS_SDA",
+ "ETH_CLK",
+ "PWM1_OUT",
+ "HDMI_HPD_N",
+ "STATUS_LED",
+ /* Used by SD Card */
+ "SD_CLK_R",
+ "SD_CMD_R",
+ "SD_DATA0_R",
+ "SD_DATA1_R",
+ "SD_DATA2_R",
+ "SD_DATA3_R";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpioout &alt0 &i2s_alt0>;
+
+ /* I2S interface */
+ i2s_alt0: i2s_alt0 {
+ brcm,pins = <18 19 20 21>;
+ brcm,function = <BCM2835_FSEL_ALT0>;
+ };
+};
+
+&hdmi {
+ hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+ power-domains = <&power RPI_POWER_DOMAIN_HDMI>;
+ status = "okay";
+};
+
+&led_act {
+ gpios = <&gpio 47 GPIO_ACTIVE_HIGH>;
+};
+
+&leds {
+ led-pwr {
+ label = "PWR";
+ gpios = <&gpio 35 GPIO_ACTIVE_HIGH>;
+ default-state = "keep";
+ linux,default-trigger = "default-on";
+ };
+};
+
+&pwm {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_gpio40 &pwm1_gpio45>;
+ status = "okay";
+};
+
+&sdhost {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdhost_gpio48>;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_gpio14>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/broadcom/bcm59056.dtsi b/arch/arm/boot/dts/broadcom/bcm59056.dtsi
deleted file mode 100644
index a9bb7ad81378..000000000000
--- a/arch/arm/boot/dts/broadcom/bcm59056.dtsi
+++ /dev/null
@@ -1,91 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-* Copyright 2014 Linaro Limited
-* Author: Matt Porter <mporter@linaro.org>
-*/
-
-&pmu {
- compatible = "brcm,bcm59056";
- regulators {
- rfldo_reg: rfldo {
- };
-
- camldo1_reg: camldo1 {
- };
-
- camldo2_reg: camldo2 {
- };
-
- simldo1_reg: simldo1 {
- };
-
- simldo2_reg: simldo2 {
- };
-
- sdldo_reg: sdldo {
- };
-
- sdxldo_reg: sdxldo {
- };
-
- mmcldo1_reg: mmcldo1 {
- };
-
- mmcldo2_reg: mmcldo2 {
- };
-
- audldo_reg: audldo {
- };
-
- micldo_reg: micldo {
- };
-
- usbldo_reg: usbldo {
- };
-
- vibldo_reg: vibldo {
- };
-
- csr_reg: csr {
- };
-
- iosr1_reg: iosr1 {
- };
-
- iosr2_reg: iosr2 {
- };
-
- msr_reg: msr {
- };
-
- sdsr1_reg: sdsr1 {
- };
-
- sdsr2_reg: sdsr2 {
- };
-
- vsr_reg: vsr {
- };
-
- gpldo1_reg: gpldo1 {
- };
-
- gpldo2_reg: gpldo2 {
- };
-
- gpldo3_reg: gpldo3 {
- };
-
- gpldo4_reg: gpldo4 {
- };
-
- gpldo5_reg: gpldo5 {
- };
-
- gpldo6_reg: gpldo6 {
- };
-
- vbus_reg: vbus {
- };
- };
-};
diff --git a/arch/arm/boot/dts/intel/socfpga/Makefile b/arch/arm/boot/dts/intel/socfpga/Makefile
index c467828aeb4b..7f69a0355ea5 100644
--- a/arch/arm/boot/dts/intel/socfpga/Makefile
+++ b/arch/arm/boot/dts/intel/socfpga/Makefile
@@ -10,6 +10,7 @@ dtb-$(CONFIG_ARCH_INTEL_SOCFPGA) += \
socfpga_cyclone5_mcvevk.dtb \
socfpga_cyclone5_socdk.dtb \
socfpga_cyclone5_de0_nano_soc.dtb \
+ socfpga_cyclone5_de10nano.dtb \
socfpga_cyclone5_sockit.dtb \
socfpga_cyclone5_socrates.dtb \
socfpga_cyclone5_sodia.dtb \
diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_de10nano.dts b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_de10nano.dts
new file mode 100644
index 000000000000..ec25106caacf
--- /dev/null
+++ b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_de10nano.dts
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017, Intel Corporation
+ *
+ * based on socfpga_cyclone5_de0_nano_soc.dts
+ */
+/dts-v1/;
+
+#include "socfpga_cyclone5.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Terasic DE10-Nano";
+ compatible = "terasic,de10-nano", "altr,socfpga-cyclone5", "altr,socfpga";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ /* 1 GiB */
+ device_type = "memory";
+ reg = <0x0 0x40000000>;
+ };
+
+ soc {
+ fpga: bus@ff200000 {
+ compatible = "simple-bus";
+ reg = <0xff200000 0x00200000>;
+ ranges = <0x00000000 0xff200000 0x00200000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /*
+ * Here the devices will appear if an FPGA image is
+ * loaded. Their description is expected to be added
+ * using a device tree overlay that matches the image.
+ */
+ };
+ };
+};
+
+&gmac1 {
+ /* Uses a KSZ9031RNX phy */
+ phy-mode = "rgmii-id";
+ rxd0-skew-ps = <420>;
+ rxd1-skew-ps = <420>;
+ rxd2-skew-ps = <420>;
+ rxd3-skew-ps = <420>;
+ txen-skew-ps = <0>;
+ rxdv-skew-ps = <420>;
+ status = "okay";
+};
+
+&gpio0 {
+ status = "okay";
+};
+
+&gpio1 {
+ status = "okay";
+};
+
+&gpio2 {
+ status = "okay";
+};
+
+&i2c0 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ accelerometer@53 {
+ compatible = "adi,adxl345";
+ reg = <0x53>;
+ /* HPS_GSENSOR_INT is routed to UART0_RX/CAN0_RX/SPIM0_SS1/HPS_GPIO61 */
+ interrupt-parent = <&portc>;
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "INT1";
+ };
+};
+
+&mmc0 {
+ /* micro SD card socket J11 */
+ status = "okay";
+};
+
+&uart0 {
+ /*
+ * Accessible via USB (FT232R) on Mini-USB plug J4
+ * RX = TRACE_D0/SPIS0_CLK/UART0_RX/HPS_GPIO49
+ * TX = TRACE_D1/SPIS0_MOSI/UART0_TX/HPS_GPIO50
+ * no handshaking lines
+ */
+ clock-frequency = <100000000>;
+};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-db.dtsi b/arch/arm/boot/dts/marvell/kirkwood-db.dtsi
index 6fe2e31534af..8bacaeb4f4bd 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-db.dtsi
+++ b/arch/arm/boot/dts/marvell/kirkwood-db.dtsi
@@ -39,7 +39,7 @@
status = "okay";
};
- ehci@50000 {
+ usb@50000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-dir665.dts b/arch/arm/boot/dts/marvell/kirkwood-dir665.dts
index 2f6793f794cd..36394d1ab3e2 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-dir665.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-dir665.dts
@@ -129,7 +129,7 @@
status = "okay";
};
- ehci@50000 {
+ usb@50000 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-mv88f6281gtw-ge.dts b/arch/arm/boot/dts/marvell/kirkwood-mv88f6281gtw-ge.dts
index e3b41784c876..051579fc36b8 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-mv88f6281gtw-ge.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-mv88f6281gtw-ge.dts
@@ -63,7 +63,7 @@
status = "okay";
};
- ehci@50000 {
+ usb@50000 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood.dtsi b/arch/arm/boot/dts/marvell/kirkwood.dtsi
index 815ef7719d13..8a1338e672b3 100644
--- a/arch/arm/boot/dts/marvell/kirkwood.dtsi
+++ b/arch/arm/boot/dts/marvell/kirkwood.dtsi
@@ -263,7 +263,7 @@
status = "okay";
};
- usb0: ehci@50000 {
+ usb0: usb@50000 {
compatible = "marvell,orion-ehci";
reg = <0x50000 0x1000>;
interrupts = <19>;
diff --git a/arch/arm/boot/dts/marvell/orion5x.dtsi b/arch/arm/boot/dts/marvell/orion5x.dtsi
index 2d41f5c166ee..939259c57e05 100644
--- a/arch/arm/boot/dts/marvell/orion5x.dtsi
+++ b/arch/arm/boot/dts/marvell/orion5x.dtsi
@@ -146,7 +146,7 @@
status = "okay";
};
- ehci0: ehci@50000 {
+ ehci0: usb@50000 {
compatible = "marvell,orion-ehci";
reg = <0x50000 0x1000>;
interrupts = <17>;
@@ -218,7 +218,7 @@
status = "okay";
};
- ehci1: ehci@a0000 {
+ ehci1: usb@a0000 {
compatible = "marvell,orion-ehci";
reg = <0xa0000 0x1000>;
interrupts = <12>;
diff --git a/arch/arm/boot/dts/mediatek/mt2701-evb.dts b/arch/arm/boot/dts/mediatek/mt2701-evb.dts
index 4c76366aa938..e97dc37f716c 100644
--- a/arch/arm/boot/dts/mediatek/mt2701-evb.dts
+++ b/arch/arm/boot/dts/mediatek/mt2701-evb.dts
@@ -50,6 +50,7 @@
bt_sco_codec:bt_sco_codec {
compatible = "linux,bt-sco";
+ #sound-dai-cells = <0>;
};
backlight_lcd: backlight_lcd {
diff --git a/arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts
index 30fdc4f55a3b..53a657cf4efb 100644
--- a/arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts
@@ -30,6 +30,15 @@
device_type = "memory";
reg = <0x60000000 0x40000000>;
};
+
+ reg_5v: regulator-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "5V_MAIN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
};
&dma0 {
@@ -60,6 +69,26 @@
status = "okay";
};
+&gmac0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gmac0_default
+ &pinctrl_gmac0_mdio_default
+ &pinctrl_gmac0_txck_default
+ &pinctrl_gmac0_phy_irq>;
+ phy-mode = "rgmii-id";
+ nvmem-cells = <&eeprom0_eui48>;
+ nvmem-cell-names = "mac-address";
+ status = "okay";
+
+ ethernet-phy@7 {
+ reg = <0x7>;
+ interrupt-parent = <&pioa>;
+ interrupts = <PIN_PC1 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
&i2c10 {
dmas = <0>, <0>;
i2c-analog-filter;
@@ -99,6 +128,149 @@
label = "VDDCPU";
};
};
+
+ pmic@5b {
+ compatible = "microchip,mcp16502";
+ reg = <0x5b>;
+ lvin-supply = <&reg_5v>;
+ pvin1-supply = <&reg_5v>;
+ pvin2-supply = <&reg_5v>;
+ pvin3-supply = <&reg_5v>;
+ pvin4-supply = <&reg_5v>;
+ status = "okay";
+
+ regulators {
+ vdd_3v3: VDD_IO {
+ regulator-name = "VDD_IO";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ regulator-mode = <4>;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-mode = <4>;
+ };
+ };
+
+ vddioddr: VDD_DDR {
+ regulator-name = "VDD_DDR";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1350000>;
+ regulator-mode = <4>;
+ };
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1350000>;
+ regulator-mode = <4>;
+ };
+ };
+
+ vddcore: VDD_CORE {
+ regulator-name = "VDD_CORE";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1050000>;
+ regulator-mode = <4>;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-mode = <4>;
+ };
+ };
+
+ vddcpu: VDD_OTHER {
+ regulator-name = "VDD_OTHER";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-ramp-delay = <3125>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1050000>;
+ regulator-mode = <4>;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-mode = <4>;
+ };
+ };
+
+ vldo1: LDO1 {
+ regulator-name = "LDO1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-suspend-microvolt = <1800000>;
+ regulator-on-in-suspend;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vldo2: LDO2 {
+ regulator-name = "LDO2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+
+ eeprom0: eeprom@51 {
+ compatible = "microchip,24aa025e48";
+ reg = <0x51>;
+ size = <256>;
+ pagesize = <16>;
+ vcc-supply = <&vdd_3v3>;
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eeprom0_eui48: eui48@fa {
+ reg = <0xfa 0x6>;
+ };
+ };
+ };
};
&main_xtal {
@@ -106,6 +278,39 @@
};
&pioa {
+ pinctrl_gmac0_default: gmac0-default {
+ pinmux = <PIN_PA26__G0_TX0>,
+ <PIN_PA27__G0_TX1>,
+ <PIN_PB4__G0_TX2>,
+ <PIN_PB5__G0_TX3>,
+ <PIN_PA29__G0_RX0>,
+ <PIN_PA30__G0_RX1>,
+ <PIN_PB2__G0_RX2>,
+ <PIN_PB6__G0_RX3>,
+ <PIN_PA25__G0_TXCTL>,
+ <PIN_PB3__G0_RXCK>,
+ <PIN_PA28__G0_RXCTL>;
+ slew-rate = <0>;
+ bias-disable;
+ };
+
+ pinctrl_gmac0_mdio_default: gmac0-mdio-default {
+ pinmux = <PIN_PA31__G0_MDC>,
+ <PIN_PB0__G0_MDIO>;
+ bias-disable;
+ };
+
+ pinctrl_gmac0_phy_irq: gmac0-phy-irq {
+ pinmux = <PIN_PC1__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_gmac0_txck_default: gmac0-txck-default {
+ pinmux = <PIN_PB1__G0_REFCK>;
+ slew-rate = <0>;
+ bias-pull-up;
+ };
+
pinctrl_i2c10_default: i2c10-default{
pinmux = <PIN_PB19__FLEXCOM10_IO1>,
<PIN_PB20__FLEXCOM10_IO0>;
@@ -141,6 +346,10 @@
};
};
+&rtt {
+ atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+};
+
&sdmmc1 {
bus-width = <4>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
index 2dec2218f32c..eb5f27ce1942 100644
--- a/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
@@ -369,6 +369,38 @@
spi-tx-bus-width = <4>;
spi-rx-bus-width = <4>;
m25p,fast-read;
+ label = "at91-qspi";
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ at91bootstrap@0 {
+ label = "qspi1: at91bootstrap";
+ reg = <0x0 0x40000>;
+ };
+
+ bootloader@40000 {
+ label = "qspi1: u-boot";
+ reg = <0x40000 0x100000>;
+ };
+
+ bootloaderenv@140000 {
+ label = "qspi1: u-boot env";
+ reg = <0x140000 0x40000>;
+ };
+
+ dtb@180000 {
+ label = "qspi1: device tree";
+ reg = <0x180000 0x80000>;
+ };
+
+ kernel@200000 {
+ label = "qspi1: kernel";
+ reg = <0x200000 0x600000>;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/microchip/at91sam9263ek.dts b/arch/arm/boot/dts/microchip/at91sam9263ek.dts
index 471ea25296aa..93c5268a0845 100644
--- a/arch/arm/boot/dts/microchip/at91sam9263ek.dts
+++ b/arch/arm/boot/dts/microchip/at91sam9263ek.dts
@@ -152,7 +152,7 @@
nand@3 {
reg = <0x3 0x0 0x800000>;
rb-gpios = <&pioA 22 GPIO_ACTIVE_HIGH>;
- cs-gpios = <&pioA 15 GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&pioD 15 GPIO_ACTIVE_HIGH>;
nand-bus-width = <8>;
nand-ecc-mode = "soft";
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/microchip/sama7d65.dtsi b/arch/arm/boot/dts/microchip/sama7d65.dtsi
index b6710ccd4c36..d08d773b1cc5 100644
--- a/arch/arm/boot/dts/microchip/sama7d65.dtsi
+++ b/arch/arm/boot/dts/microchip/sama7d65.dtsi
@@ -47,12 +47,37 @@
};
};
+ ns_sram: sram@100000 {
+ compatible = "mmio-sram";
+ reg = <0x100000 0x20000>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
soc {
compatible = "simple-bus";
ranges;
#address-cells = <1>;
#size-cells = <1>;
+ securam: sram@e0000800 {
+ compatible = "microchip,sama7d65-securam", "atmel,sama5d2-securam", "mmio-sram";
+ reg = <0xe0000800 0x4000>;
+ ranges = <0 0xe0000800 0x4000>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 17>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ no-memory-wc;
+ };
+
+ secumod: security-module@e0004000 {
+ compatible = "microchip,sama7d65-secumod", "atmel,sama5d2-secumod", "syscon";
+ reg = <0xe0004000 0x4000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
sfrbu: sfr@e0008000 {
compatible ="microchip,sama7d65-sfrbu", "atmel,sama5d2-sfrbu", "syscon";
reg = <0xe0008000 0x20>;
@@ -107,6 +132,13 @@
status = "disabled";
};
+ rtt: rtc@e001d300 {
+ compatible = "microchip,sama7d65-rtt", "atmel,at91sam9260-rtt";
+ reg = <0xe001d300 0x30>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk32k 0>;
+ };
+
clk32k: clock-controller@e001d500 {
compatible = "microchip,sama7d65-sckc", "microchip,sam9x60-sckc";
reg = <0xe001d500 0x4>;
@@ -114,6 +146,11 @@
#clock-cells = <1>;
};
+ gpbr: syscon@e001d700 {
+ compatible = "microchip,sama7d65-gpbr", "syscon";
+ reg = <0xe001d700 0x48>;
+ };
+
rtc: rtc@e001d800 {
compatible = "microchip,sama7d65-rtc", "microchip,sam9x60-rtc";
reg = <0xe001d800 0x30>;
@@ -169,6 +206,38 @@
status = "disabled";
};
+ gmac0: ethernet@e1618000 {
+ compatible = "microchip,sama7d65-gem", "microchip,sama7g5-gem";
+ reg = <0xe1618000 0x2000>;
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 46>, <&pmc PMC_TYPE_PERIPHERAL 46>, <&pmc PMC_TYPE_GCK 46>, <&pmc PMC_TYPE_GCK 49>;
+ clock-names = "pclk", "hclk", "tx_clk", "tsu_clk";
+ assigned-clocks = <&pmc PMC_TYPE_GCK 46>, <&pmc PMC_TYPE_GCK 49>;
+ assigned-clock-rates = <125000000>, <200000000>;
+ status = "disabled";
+ };
+
+ gmac1: ethernet@e161c000 {
+ compatible = "microchip,sama7d65-gem", "microchip,sama7g5-gem";
+ reg = <0xe161c000 0x2000>;
+ interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 47>, <&pmc PMC_TYPE_PERIPHERAL 47>,<&pmc PMC_TYPE_GCK 47>, <&pmc PMC_TYPE_GCK 50>;
+ clock-names = "pclk", "hclk", "tx_clk", "tsu_clk";
+ assigned-clocks = <&pmc PMC_TYPE_GCK 47>, <&pmc PMC_TYPE_GCK 50>;
+ assigned-clock-rates = <125000000>, <200000000>;
+ status = "disabled";
+ };
+
pit64b0: timer@e1800000 {
compatible = "microchip,sama7d65-pit64b", "microchip,sam9x60-pit64b";
reg = <0xe1800000 0x100>;
@@ -185,6 +254,199 @@
clock-names = "pclk", "gclk";
};
+ flx0: flexcom@e1820000 {
+ compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
+ reg = <0xe1820000 0x200>;
+ ranges = <0x0 0xe1820000 0x800>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 34>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+
+ uart0: serial@200 {
+ compatible = "microchip,sama7d65-usart", "atmel,at91sam9260-usart";
+ reg = <0x200 0x200>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 34>;
+ clock-names = "usart";
+ dmas = <&dma1 AT91_XDMAC_DT_PERID(6)>,
+ <&dma1 AT91_XDMAC_DT_PERID(5)>;
+ dma-names = "tx", "rx";
+ atmel,use-dma-rx;
+ atmel,use-dma-tx;
+ atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@600 {
+ compatible = "microchip,sama7d65-i2c", "microchip,sam9x60-i2c";
+ reg = <0x600 0x200>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 34>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ atmel,fifo-size = <32>;
+ dmas = <&dma0 AT91_XDMAC_DT_PERID(6)>,
+ <&dma0 AT91_XDMAC_DT_PERID(5)>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+ };
+
+ flx1: flexcom@e1824000 {
+ compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
+ reg = <0xe1824000 0x200>;
+ ranges = <0x0 0xe1824000 0x800>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 35>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+
+ spi1: spi@400 {
+ compatible = "microchip,sama7d65-spi", "atmel,at91rm9200-spi";
+ reg = <0x400 0x200>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 35>;
+ clock-names = "spi_clk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dmas = <&dma0 AT91_XDMAC_DT_PERID(8)>,
+ <&dma0 AT91_XDMAC_DT_PERID(7)>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <32>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@600 {
+ compatible = "microchip,sama7d65-i2c", "microchip,sam9x60-i2c";
+ reg = <0x600 0x200>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 35>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dmas = <&dma0 AT91_XDMAC_DT_PERID(8)>,
+ <&dma0 AT91_XDMAC_DT_PERID(7)>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <32>;
+ status = "disabled";
+ };
+ };
+
+ flx2: flexcom@e1828000 {
+ compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
+ reg = <0xe1828000 0x200>;
+ ranges = <0x0 0xe1828000 0x800>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 36>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+
+ uart2: serial@200 {
+ compatible = "microchip,sama7d65-usart", "atmel,at91sam9260-usart";
+ reg = <0x200 0x200>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 36>;
+ clock-names = "usart";
+ dmas = <&dma1 AT91_XDMAC_DT_PERID(10)>,
+ <&dma1 AT91_XDMAC_DT_PERID(9)>;
+ dma-names = "tx", "rx";
+ atmel,use-dma-rx;
+ atmel,use-dma-tx;
+ atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ status = "disabled";
+ };
+ };
+
+ flx3: flexcom@e182c000 {
+ compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
+ reg = <0xe182c000 0x200>;
+ ranges = <0x0 0xe182c000 0x800>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 37>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+
+ i2c3: i2c@600 {
+ compatible = "microchip,sama7d65-i2c", "microchip,sam9x60-i2c";
+ reg = <0x600 0x200>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 37>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ dmas = <&dma0 AT91_XDMAC_DT_PERID(12)>,
+ <&dma0 AT91_XDMAC_DT_PERID(11)>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <32>;
+ status = "disabled";
+ };
+
+ };
+
+ flx4: flexcom@e2018000 {
+ compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
+ reg = <0xe2018000 0x200>;
+ ranges = <0x0 0xe2018000 0x800>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 38>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+
+ uart4: serial@200 {
+ compatible = "microchip,sama7d65-usart", "atmel,at91sam9260-usart";
+ reg = <0x200 0x200>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 38>;
+ clock-names = "usart";
+ dmas = <&dma1 AT91_XDMAC_DT_PERID(14)>,
+ <&dma1 AT91_XDMAC_DT_PERID(13)>;
+ dma-names = "tx", "rx";
+ atmel,use-dma-rx;
+ atmel,use-dma-tx;
+ atmel,fifo-size = <16>;
+ atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ status = "disabled";
+ };
+
+ spi4: spi@400 {
+ compatible = "microchip,sama7d65-spi", "atmel,at91rm9200-spi";
+ reg = <0x400 0x200>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 38>;
+ clock-names = "spi_clk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dmas = <&dma0 AT91_XDMAC_DT_PERID(14)>,
+ <&dma0 AT91_XDMAC_DT_PERID(13)>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <32>;
+ status = "disabled";
+ };
+ };
+
+ flx5: flexcom@e201c000 {
+ compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
+ reg = <0xe201c000 0x200>;
+ ranges = <0x0 0xe201c000 0x800>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 39>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+
+ i2c5: i2c@600 {
+ compatible = "microchip,sama7d65-i2c", "microchip,sam9x60-i2c";
+ reg = <0x600 0x200>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 39>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dmas = <&dma0 AT91_XDMAC_DT_PERID(16)>,
+ <&dma0 AT91_XDMAC_DT_PERID(15)>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <32>;
+ status = "disabled";
+ };
+ };
+
flx6: flexcom@e2020000 {
compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
reg = <0xe2020000 0x200>;
@@ -206,6 +468,80 @@
};
};
+ flx7: flexcom@e2024000 {
+ compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
+ reg = <0xe2024000 0x200>;
+ ranges = <0x0 0xe2024000 0x800>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 41>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+
+ uart7: serial@200 {
+ compatible = "microchip,sama7d65-usart", "atmel,at91sam9260-usart";
+ reg = <0x200 0x200>;
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 41>;
+ clock-names = "usart";
+ dmas = <&dma1 AT91_XDMAC_DT_PERID(20)>,
+ <&dma1 AT91_XDMAC_DT_PERID(19)>;
+ dma-names = "tx", "rx";
+ atmel,use-dma-rx;
+ atmel,use-dma-tx;
+ atmel,fifo-size = <16>;
+ atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ status = "disabled";
+ };
+ };
+
+ flx8: flexcom@e281c000 {
+ compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
+ reg = <0xe281c000 0x200>;
+ ranges = <0x0 0xe281c000 0x800>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 42>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+
+ i2c8: i2c@600 {
+ compatible = "microchip,sama7d65-i2c", "microchip,sam9x60-i2c";
+ reg = <0x600 0x200>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 42>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dmas = <&dma0 AT91_XDMAC_DT_PERID(22)>,
+ <&dma0 AT91_XDMAC_DT_PERID(21)>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <32>;
+ status = "disabled";
+ };
+ };
+
+ flx9: flexcom@e2820000 {
+ compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
+ reg = <0xe2820000 0x200>;
+ ranges = <0x0 0xe281c000 0x800>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 43>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+
+ i2c9: i2c@600 {
+ compatible = "microchip,sama7d65-i2c", "microchip,sam9x60-i2c";
+ reg = <0x600 0x200>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 43>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dmas = <&dma0 AT91_XDMAC_DT_PERID(24)>,
+ <&dma0 AT91_XDMAC_DT_PERID(23)>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <32>;
+ status = "disabled";
+ };
+ };
+
flx10: flexcom@e2824000 {
compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
reg = <0xe2824000 0x200>;
@@ -227,6 +563,16 @@
};
};
+ uddrc: uddrc@e3800000 {
+ compatible = "microchip,sama7d65-uddrc", "microchip,sama7g5-uddrc";
+ reg = <0xe3800000 0x4000>;
+ };
+
+ ddr3phy: ddr3phy@e3804000 {
+ compatible = "microchip,sama7d65-ddr3phy", "microchip,sama7g5-ddr3phy";
+ reg = <0xe3804000 0x1000>;
+ };
+
gic: interrupt-controller@e8c11000 {
compatible = "arm,cortex-a7-gic";
reg = <0xe8c11000 0x1000>,
diff --git a/arch/arm/boot/dts/microchip/tny_a9263.dts b/arch/arm/boot/dts/microchip/tny_a9263.dts
index 3dd48b3e06da..fd8244b56e05 100644
--- a/arch/arm/boot/dts/microchip/tny_a9263.dts
+++ b/arch/arm/boot/dts/microchip/tny_a9263.dts
@@ -64,7 +64,7 @@
nand@3 {
reg = <0x3 0x0 0x800000>;
rb-gpios = <&pioA 22 GPIO_ACTIVE_HIGH>;
- cs-gpios = <&pioA 15 GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&pioD 15 GPIO_ACTIVE_HIGH>;
nand-bus-width = <8>;
nand-ecc-mode = "soft";
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/microchip/usb_a9260.dts b/arch/arm/boot/dts/microchip/usb_a9260.dts
index e7f7b259ccf3..3b61e7145060 100644
--- a/arch/arm/boot/dts/microchip/usb_a9260.dts
+++ b/arch/arm/boot/dts/microchip/usb_a9260.dts
@@ -12,14 +12,6 @@
model = "Calao USB A9260";
compatible = "calao,usb-a9260", "atmel,at91sam9260", "atmel,at91sam9";
- chosen {
- bootargs = "mem=64M console=ttyS0,115200 root=/dev/mtdblock5 rw rootfstype=ubifs";
- };
-
- memory@20000000 {
- reg = <0x20000000 0x4000000>;
- };
-
ahb {
apb {
shdwc: poweroff@fffffd10 {
diff --git a/arch/arm/boot/dts/microchip/usb_a9260_common.dtsi b/arch/arm/boot/dts/microchip/usb_a9260_common.dtsi
index 8c3530638c6d..da32c5fdcc47 100644
--- a/arch/arm/boot/dts/microchip/usb_a9260_common.dtsi
+++ b/arch/arm/boot/dts/microchip/usb_a9260_common.dtsi
@@ -6,6 +6,11 @@
*/
/ {
+ chosen {
+ bootargs = "mem=64M root=/dev/mtdblock5 rw rootfstype=ubifs";
+ stdout-path = "serial0:115200n8";
+ };
+
clocks {
slow_xtal {
clock-frequency = <32768>;
@@ -16,6 +21,10 @@
};
};
+ memory@20000000 {
+ reg = <0x20000000 0x4000000>;
+ };
+
ahb {
apb {
dbgu: serial@fffff200 {
diff --git a/arch/arm/boot/dts/microchip/usb_a9263.dts b/arch/arm/boot/dts/microchip/usb_a9263.dts
index 60d7936dc562..8e1a3fb61087 100644
--- a/arch/arm/boot/dts/microchip/usb_a9263.dts
+++ b/arch/arm/boot/dts/microchip/usb_a9263.dts
@@ -58,7 +58,7 @@
};
spi0: spi@fffa4000 {
- cs-gpios = <&pioB 15 GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&pioA 5 GPIO_ACTIVE_LOW>;
status = "okay";
flash@0 {
compatible = "atmel,at45", "atmel,dataflash";
@@ -84,7 +84,7 @@
nand@3 {
reg = <0x3 0x0 0x800000>;
rb-gpios = <&pioA 22 GPIO_ACTIVE_HIGH>;
- cs-gpios = <&pioA 15 GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&pioD 15 GPIO_ACTIVE_HIGH>;
nand-bus-width = <8>;
nand-ecc-mode = "soft";
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/microchip/usb_a9g20.dts b/arch/arm/boot/dts/microchip/usb_a9g20.dts
index a2f748141d4b..555291cd30b3 100644
--- a/arch/arm/boot/dts/microchip/usb_a9g20.dts
+++ b/arch/arm/boot/dts/microchip/usb_a9g20.dts
@@ -5,9 +5,24 @@
* Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
*/
/dts-v1/;
-#include "usb_a9g20_common.dtsi"
+#include "at91sam9g20.dtsi"
+#include "usb_a9260_common.dtsi"
/ {
model = "Calao USB A9G20";
compatible = "calao,usb-a9g20", "atmel,at91sam9g20", "atmel,at91sam9";
};
+
+&spi0 {
+ cs-gpios = <&pioC 11 GPIO_ACTIVE_LOW>;
+ status = "okay";
+ /* TODO: Some revisions might have a dataflash here instead of an EEPROM */
+ eeprom@0 {
+ compatible = "st,m95640", "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <2000000>;
+ size = <8192>;
+ pagesize = <32>;
+ address-width = <16>;
+ };
+};
diff --git a/arch/arm/boot/dts/microchip/usb_a9g20_common.dtsi b/arch/arm/boot/dts/microchip/usb_a9g20_common.dtsi
deleted file mode 100644
index f1946e0996b7..000000000000
--- a/arch/arm/boot/dts/microchip/usb_a9g20_common.dtsi
+++ /dev/null
@@ -1,27 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * usb_a9g20.dts - Device Tree file for Calao USB A9G20 board
- *
- * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
- */
-
-#include "at91sam9g20.dtsi"
-#include "usb_a9260_common.dtsi"
-
-/ {
- chosen {
- bootargs = "mem=64M root=/dev/mtdblock5 rw rootfstype=ubifs";
- stdout-path = "serial0:115200n8";
- };
-
- memory@20000000 {
- reg = <0x20000000 0x4000000>;
- };
-
- i2c-gpio-0 {
- rtc@56 {
- compatible = "microcrystal,rv3029";
- reg = <0x56>;
- };
- };
-};
diff --git a/arch/arm/boot/dts/microchip/usb_a9g20_lpw.dts b/arch/arm/boot/dts/microchip/usb_a9g20_lpw.dts
index 4d104797176c..2eda00477bc5 100644
--- a/arch/arm/boot/dts/microchip/usb_a9g20_lpw.dts
+++ b/arch/arm/boot/dts/microchip/usb_a9g20_lpw.dts
@@ -5,7 +5,8 @@
* Copyright (C) 2013 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
*/
/dts-v1/;
-#include "usb_a9g20_common.dtsi"
+#include "at91sam9g20.dtsi"
+#include "usb_a9260_common.dtsi"
/ {
model = "Calao USB A9G20 Low Power";
@@ -27,4 +28,11 @@
};
};
};
+
+ i2c-gpio-0 {
+ rtc@56 {
+ compatible = "microcrystal,rv3029";
+ reg = <0x56>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-common-npcm7xx.dtsi b/arch/arm/boot/dts/nuvoton/nuvoton-common-npcm7xx.dtsi
index 868454ae6bde..791090f54d8b 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-common-npcm7xx.dtsi
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-common-npcm7xx.dtsi
@@ -99,6 +99,11 @@
};
};
+ udc0_phy: usb-phy {
+ compatible = "usb-nop-xceiv";
+ #phy-cells = <0>;
+ };
+
ahb {
#address-cells = <1>;
#size-cells = <1>;
@@ -122,6 +127,13 @@
clocks = <&clk_refclk>, <&clk_sysbypck>, <&clk_mcbypck>;
};
+ mc: memory-controller@f0824000 {
+ compatible = "nuvoton,npcm750-memory-controller";
+ reg = <0xf0824000 0x1000>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
gmac0: eth@f0802000 {
device_type = "network";
compatible = "snps,dwmac";
@@ -137,6 +149,29 @@
status = "disabled";
};
+ sdmmc: mmc@f0842000 {
+ compatible = "nuvoton,npcm750-sdhci";
+ status = "disabled";
+ reg = <0xf0842000 0x200>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_AHB>;
+ clock-names = "clk_mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc8_pins
+ &mmc_pins>;
+ };
+
+ sdhci: mmc@f0840000 {
+ compatible = "nuvoton,npcm750-sdhci";
+ status = "disabled";
+ reg = <0xf0840000 0x200>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_AHB>;
+ clock-names = "clk_sdhc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd1_pins>;
+ };
+
ehci1: usb@f0806000 {
compatible = "nuvoton,npcm750-ehci";
reg = <0xf0806000 0x1000>;
@@ -144,6 +179,13 @@
status = "disabled";
};
+ ohci1: usb@f0807000 {
+ compatible = "generic-ohci";
+ reg = <0xf0807000 0x1000>;
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
fiu0: spi@fb000000 {
compatible = "nuvoton,npcm750-fiu";
#address-cells = <1>;
@@ -179,6 +221,72 @@
status = "disabled";
};
+ udc5: usb@f0835000 {
+ compatible = "nuvoton,npcm750-udc";
+ reg = <0xf0835000 0x1000
+ 0xfffd2800 0x800>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_SU>;
+ clock-names = "clk_usb_bridge";
+ phys = <&udc0_phy>;
+ phy_type = "utmi_wide";
+ dr_mode = "peripheral";
+ status = "disabled";
+ };
+
+ udc6: usb@f0836000 {
+ compatible = "nuvoton,npcm750-udc";
+ reg = <0xf0836000 0x1000
+ 0xfffd3000 0x800>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_SU>;
+ clock-names = "clk_usb_bridge";
+ phys = <&udc0_phy>;
+ phy_type = "utmi_wide";
+ dr_mode = "peripheral";
+ status = "disabled";
+ };
+
+ udc7: usb@f0837000 {
+ compatible = "nuvoton,npcm750-udc";
+ reg = <0xf0837000 0x1000
+ 0xfffd3800 0x800>;
+ interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_SU>;
+ clock-names = "clk_usb_bridge";
+ phys = <&udc0_phy>;
+ phy_type = "utmi_wide";
+ dr_mode = "peripheral";
+ status = "disabled";
+ };
+
+ udc8: usb@f0838000 {
+ compatible = "nuvoton,npcm750-udc";
+ reg = <0xf0838000 0x1000
+ 0xfffd4000 0x800>;
+ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_SU>;
+ clock-names = "clk_usb_bridge";
+ phys = <&udc0_phy>;
+ phy_type = "utmi_wide";
+ dr_mode = "peripheral";
+ status = "disabled";
+ };
+
+ udc9: usb@f0839000 {
+ compatible = "nuvoton,npcm750-udc";
+ reg = <0xf0839000 0x1000
+ 0xfffd4800 0x800>;
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_SU>;
+ clock-names = "clk_usb_bridge";
+ nuvoton,sysgcr = <&gcr>;
+ phys = <&udc0_phy>;
+ phy_type = "utmi_wide";
+ dr_mode = "peripheral";
+ status = "disabled";
+ };
+
apb {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-gbs.dts b/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-gbs.dts
index c3501786d600..231228842e63 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-gbs.dts
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-gbs.dts
@@ -1050,19 +1050,19 @@
"","","","SIO_POWER_GOOD","","","","";
};
gpio2: gpio@f0012000 {
- bmc_usb_mux_oe_n {
+ bmc-usb-mux-oe-n-hog {
gpio-hog;
gpios = <25 GPIO_ACTIVE_HIGH>;
output-low;
line-name = "bmc-usb-mux-oe-n";
};
- bmc_usb_mux_sel {
+ bmc-usb-mux-sel-hog {
gpio-hog;
gpios = <26 GPIO_ACTIVE_HIGH>;
output-low;
line-name = "bmc-usb-mux-sel";
};
- bmc_usb2517_reset_n {
+ bmc-usb2517-reset-n-hog {
gpio-hog;
gpios = <27 GPIO_ACTIVE_LOW>;
output-low;
@@ -1070,19 +1070,19 @@
};
};
gpio3: gpio@f0013000 {
- assert_cpu0_reset {
+ assert-cpu0-reset-hog {
gpio-hog;
gpios = <14 GPIO_ACTIVE_HIGH>;
output-low;
line-name = "assert-cpu0-reset";
};
- assert_pwrok_cpu0_n {
+ assert-pwrok-cpu0-n-hog {
gpio-hog;
gpios = <15 GPIO_ACTIVE_HIGH>;
output-low;
line-name = "assert-pwrok-cpu0-n";
};
- assert_cpu0_prochot {
+ assert-cpu0-prochot-hog {
gpio-hog;
gpios = <16 GPIO_ACTIVE_HIGH>;
output-low;
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts b/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts
index f67ede148209..0c94e14d40e8 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts
@@ -427,91 +427,91 @@
gpio-controller;
#gpio-cells = <2>;
reset-gpios = <&gpio7 4 GPIO_ACTIVE_LOW>;
- G1A_P0_0 {
+ g1a-p0-0-hog {
gpio-hog;
gpios = <0 0>;
output-high;
line-name = "TPM_BMC_ALERT_N";
};
- G1A_P0_1 {
+ g1a-p0-1-hog {
gpio-hog;
gpios = <1 0>;
input;
line-name = "FM_BIOS_TOP_SWAP";
};
- G1A_P0_2 {
+ g1a-p0-2-hog {
gpio-hog;
gpios = <2 0>;
input;
line-name = "FM_BIOS_PREFRB2_GOOD";
};
- G1A_P0_3 {
+ g1a-p0-3-hog {
gpio-hog;
gpios = <3 0>;
input;
line-name = "BMC_SATAXPCIE_0TO3_SEL";
};
- G1A_P0_4 {
+ g1a-p0-4-hog {
gpio-hog;
gpios = <4 0>;
input;
line-name = "BMC_SATAXPCIE_4TO7_SEL";
};
- G1A_P0_5 {
+ g1a-p0-5-hog {
gpio-hog;
gpios = <5 0>;
output-low;
line-name = "FM_UV_ADR_TRIGGER_EN_N";
};
- G1A_P0_6 {
+ g1a-p0-6-hog {
gpio-hog;
gpios = <6 0>;
input;
line-name = "RM_THROTTLE_EN_N";
};
- G1A_P1_0 {
+ g1a-p1-0-hog {
gpio-hog;
gpios = <8 0>;
input;
line-name = "FM_BMC_TPM_PRES_N";
};
- G1A_P1_1 {
+ g1a-p1-1-hog {
gpio-hog;
gpios = <9 0>;
input;
line-name = "FM_CPU0_SKTOCC_LVT3_N";
};
- G1A_P1_2 {
+ g1a-p1-2-hog {
gpio-hog;
gpios = <10 0>;
input;
line-name = "FM_CPU1_SKTOCC_LVT3_N";
};
- G1A_P1_3 {
+ g1a-p1-3-hog {
gpio-hog;
gpios = <11 0>;
input;
line-name = "PSU1_ALERT_N";
};
- G1A_P1_4 {
+ g1a-p1-4-hog {
gpio-hog;
gpios = <12 0>;
input;
line-name = "PSU2_ALERT_N";
};
- G1A_P1_5 {
+ g1a-p1-5-hog {
gpio-hog;
gpios = <13 0>;
input;
line-name = "H_CPU0_FAST_WAKE_LVT3_N";
};
- G1A_P1_6 {
+ g1a-p1-6-hog {
gpio-hog;
gpios = <14 0>;
output-high;
line-name = "I2C_MUX1_RESET_N";
};
- G1A_P1_7 {
+ g1a-p1-7-hog {
gpio-hog;
gpios = <15 0>;
input;
@@ -524,91 +524,91 @@
reg = <0x75>;
gpio-controller;
#gpio-cells = <2>;
- G1B_P0_0 {
+ g1b-p0-0-hog {
gpio-hog;
gpios = <0 0>;
input;
line-name = "PVDDQ_ABC_PINALERT_N";
};
- G1B_P0_1 {
+ g1b-p0-1-hog {
gpio-hog;
gpios = <1 0>;
input;
line-name = "PVDDQ_DEF_PINALERT_N";
};
- G1B_P0_2 {
+ g1b-p0-2-hog {
gpio-hog;
gpios = <2 0>;
input;
line-name = "PVDDQ_GHJ_PINALERT_N";
};
- G1B_P0_3 {
+ g1b-p0-3-hog {
gpio-hog;
gpios = <3 0>;
input;
line-name = "PVDDQ_KLM_PINALERT_N";
};
- G1B_P0_5 {
+ g1b-p0-5-hog {
gpio-hog;
gpios = <5 0>;
input;
line-name = "FM_BOARD_REV_ID0";
};
- G1B_P0_6 {
+ g1b-p0-6-hog {
gpio-hog;
gpios = <6 0>;
input;
line-name = "FM_BOARD_REV_ID1";
};
- G1B_P0_7 {
+ g1b-p0-7-hog {
gpio-hog;
gpios = <7 0>;
input;
line-name = "FM_BOARD_REV_ID2";
};
- G1B_P1_0 {
+ g1b-p1-0-hog {
gpio-hog;
gpios = <8 0>;
input;
line-name = "FM_OC_DETECT_EN_N";
};
- G1B_P1_1 {
+ g1b-p1-1-hog {
gpio-hog;
gpios = <9 0>;
input;
line-name = "FM_FLASH_DESC_OVERRIDE";
};
- G1B_P1_2 {
+ g1b-p1-2-hog {
gpio-hog;
gpios = <10 0>;
output-low;
line-name = "FP_PWR_ID_LED_N";
};
- G1B_P1_3 {
+ g1b-p1-3-hog {
gpio-hog;
gpios = <11 0>;
output-low;
line-name = "BMC_LED_PWR_GRN";
};
- G1B_P1_4 {
+ g1b-p1-4-hog {
gpio-hog;
gpios = <12 0>;
output-low;
line-name = "BMC_LED_PWR_AMBER";
};
- G1B_P1_5 {
+ g1b-p1-5-hog {
gpio-hog;
gpios = <13 0>;
output-high;
line-name = "FM_BMC_FAULT_LED_N";
};
- G1B_P1_6 {
+ g1b-p1-6-hog {
gpio-hog;
gpios = <14 0>;
output-high;
line-name = "FM_CPLD_BMC_PWRDN_N";
};
- G1B_P1_7 {
+ g1b-p1-7-hog {
gpio-hog;
gpios = <15 0>;
output-high;
@@ -626,91 +626,91 @@
gpio-controller;
#gpio-cells = <2>;
reset-gpios = <&gpio5 28 GPIO_ACTIVE_LOW>;
- G2A_P0_0 {
+ g2a-p0-0-hog {
gpio-hog;
gpios = <0 0>;
output-high;
line-name = "BMC_PON_RST_REQ_N";
};
- G2A_P0_1 {
+ g2a-p0-1-hog {
gpio-hog;
gpios = <1 0>;
output-high;
line-name = "BMC_RST_IND_REQ_N";
};
- G2A_P0_2 {
+ g2a-p0-2-hog {
gpio-hog;
gpios = <2 0>;
input;
line-name = "RST_BMC_RTCRST";
};
- G2A_P0_3 {
+ g2a-p0-3-hog {
gpio-hog;
gpios = <3 0>;
output-high;
line-name = "FM_BMC_PWRBTN_OUT_N";
};
- G2A_P0_4 {
+ g2a-p0-4-hog {
gpio-hog;
gpios = <4 0>;
output-high;
line-name = "RST_BMC_SYSRST_BTN_OUT_N";
};
- G2A_P0_5 {
+ g2a-p0-5-hog {
gpio-hog;
gpios = <5 0>;
output-high;
line-name = "FM_BATTERY_SENSE_EN_N";
};
- G2A_P0_6 {
+ g2a-p0-6-hog {
gpio-hog;
gpios = <6 0>;
output-high;
line-name = "FM_BMC_READY_N";
};
- G2A_P0_7 {
+ g2a-p0-7-hog {
gpio-hog;
gpios = <7 0>;
input;
line-name = "IRQ_BMC_PCH_SMI_LPC_N";
};
- G2A_P1_0 {
+ g2a-p1-0-hog {
gpio-hog;
gpios = <8 0>;
input;
line-name = "FM_SLOT4_CFG0";
};
- G2A_P1_1 {
+ g2a-p1-1-hog {
gpio-hog;
gpios = <9 0>;
input;
line-name = "FM_SLOT4_CFG1";
};
- G2A_P1_2 {
+ g2a-p1-2-hog {
gpio-hog;
gpios = <10 0>;
input;
line-name = "FM_NVDIMM_EVENT_N";
};
- G2A_P1_3 {
+ g2a-p1-3-hog {
gpio-hog;
gpios = <11 0>;
input;
line-name = "PSU1_BLADE_EN_N";
};
- G2A_P1_4 {
+ g2a-p1-4-hog {
gpio-hog;
gpios = <12 0>;
input;
line-name = "BMC_PCH_FNM";
};
- G2A_P1_5 {
+ g2a-p1-5-hog {
gpio-hog;
gpios = <13 0>;
input;
line-name = "FM_SOL_UART_CH_SEL";
};
- G2A_P1_6 {
+ g2a-p1-6-hog {
gpio-hog;
gpios = <14 0>;
input;
@@ -723,91 +723,91 @@
reg = <0x75>;
gpio-controller;
#gpio-cells = <2>;
- G2B_P0_0 {
+ g2b-p0-0-hog {
gpio-hog;
gpios = <0 0>;
input;
line-name = "FM_CPU_MSMI_LVT3_N";
};
- G2B_P0_1 {
+ g2b-p0-1-hog {
gpio-hog;
gpios = <1 0>;
input;
line-name = "FM_BIOS_MRC_DEBUG_MSG_DIS";
};
- G2B_P0_2 {
+ g2b-p0-2-hog {
gpio-hog;
gpios = <2 0>;
input;
line-name = "FM_CPU1_DISABLE_BMC_N";
};
- G2B_P0_3 {
+ g2b-p0-3-hog {
gpio-hog;
gpios = <3 0>;
output-low;
line-name = "BMC_JTAG_SELECT";
};
- G2B_P0_4 {
+ g2b-p0-4-hog {
gpio-hog;
gpios = <4 0>;
output-high;
line-name = "PECI_MUX_SELECT";
};
- G2B_P0_5 {
+ g2b-p0-5-hog {
gpio-hog;
gpios = <5 0>;
output-high;
line-name = "I2C_MUX2_RESET_N";
};
- G2B_P0_6 {
+ g2b-p0-6-hog {
gpio-hog;
gpios = <6 0>;
input;
line-name = "FM_BMC_CPLD_PSU2_ON";
};
- G2B_P0_7 {
+ g2b-p0-7-hog {
gpio-hog;
gpios = <7 0>;
output-high;
line-name = "PSU2_ALERT_EN_N";
};
- G2B_P1_0 {
+ g2b-p1-0-hog {
gpio-hog;
gpios = <8 0>;
output-high;
line-name = "FM_CPU_BMC_INIT";
};
- G2B_P1_1 {
+ g2b-p1-1-hog {
gpio-hog;
gpios = <9 0>;
output-high;
line-name = "IRQ_BMC_PCH_SCI_LPC_N";
};
- G2B_P1_2 {
+ g2b-p1-2-hog {
gpio-hog;
gpios = <10 0>;
output-low;
line-name = "PMB_ALERT_EN_N";
};
- G2B_P1_3 {
+ g2b-p1-3-hog {
gpio-hog;
gpios = <11 0>;
output-high;
line-name = "FM_FAST_PROCHOT_EN_N";
};
- G2B_P1_4 {
+ g2b-p1-4-hog {
gpio-hog;
gpios = <12 0>;
output-high;
line-name = "BMC_NVDIMM_PRSNT_N";
};
- G2B_P1_5 {
+ g2b-p1-5-hog {
gpio-hog;
gpios = <13 0>;
output-low;
line-name = "FM_BACKUP_BIOS_SEL_H_BMC";
};
- G2B_P1_6 {
+ g2b-p1-6-hog {
gpio-hog;
gpios = <14 0>;
output-high;
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-npcm750.dtsi b/arch/arm/boot/dts/nuvoton/nuvoton-npcm750.dtsi
index 30eed40b89b5..f42ad259636c 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-npcm750.dtsi
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-npcm750.dtsi
@@ -58,5 +58,70 @@
&rg2mdio_pins>;
status = "disabled";
};
+
+ udc0: usb@f0830000 {
+ compatible = "nuvoton,npcm750-udc";
+ reg = <0xf0830000 0x1000
+ 0xfffd0000 0x800>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_SU>;
+ clock-names = "clk_usb_bridge";
+ phys = <&udc0_phy>;
+ phy_type = "utmi_wide";
+ dr_mode = "peripheral";
+ status = "disabled";
+ };
+
+ udc1: usb@f0831000 {
+ compatible = "nuvoton,npcm750-udc";
+ reg = <0xf0831000 0x1000
+ 0xfffd0800 0x800>;
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_SU>;
+ clock-names = "clk_usb_bridge";
+ phys = <&udc0_phy>;
+ phy_type = "utmi_wide";
+ dr_mode = "peripheral";
+ status = "disabled";
+ };
+
+ udc2: usb@f0832000 {
+ compatible = "nuvoton,npcm750-udc";
+ reg = <0xf0832000 0x1000
+ 0xfffd1000 0x800>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_SU>;
+ clock-names = "clk_usb_bridge";
+ phys = <&udc0_phy>;
+ phy_type = "utmi_wide";
+ dr_mode = "peripheral";
+ status = "disabled";
+ };
+
+ udc3: usb@f0833000 {
+ compatible = "nuvoton,npcm750-udc";
+ reg = <0xf0833000 0x1000
+ 0xfffd1800 0x800>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_SU>;
+ clock-names = "clk_usb_bridge";
+ phys = <&udc0_phy>;
+ phy_type = "utmi_wide";
+ dr_mode = "peripheral";
+ status = "disabled";
+ };
+
+ udc4: usb@f0834000 {
+ compatible = "nuvoton,npcm750-udc";
+ reg = <0xf0834000 0x1000
+ 0xfffd2000 0x800>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_SU>;
+ clock-names = "clk_usb_bridge";
+ phys = <&udc0_phy>;
+ phy_type = "utmi_wide";
+ dr_mode = "peripheral";
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/nvidia/Makefile b/arch/arm/boot/dts/nvidia/Makefile
index 96972559253c..ff2c5bfd8efa 100644
--- a/arch/arm/boot/dts/nvidia/Makefile
+++ b/arch/arm/boot/dts/nvidia/Makefile
@@ -34,6 +34,7 @@ dtb-$(CONFIG_ARCH_TEGRA_3x_SOC) += \
tegra30-asus-tf201.dtb \
tegra30-asus-tf300t.dtb \
tegra30-asus-tf300tg.dtb \
+ tegra30-asus-tf300tl.dtb \
tegra30-asus-tf700t.dtb \
tegra30-beaver.dtb \
tegra30-cardhu-a02.dtb \
diff --git a/arch/arm/boot/dts/nvidia/tegra124-apalis-eval.dts b/arch/arm/boot/dts/nvidia/tegra124-apalis-eval.dts
index 0f3debeb294b..1aa7265554d9 100644
--- a/arch/arm/boot/dts/nvidia/tegra124-apalis-eval.dts
+++ b/arch/arm/boot/dts/nvidia/tegra124-apalis-eval.dts
@@ -84,11 +84,6 @@
status = "okay";
clock-frequency = <400000>;
- pcie-switch@58 {
- compatible = "plx,pex8605";
- reg = <0x58>;
- };
-
/* M41T0M6 real time clock on carrier board */
rtc@68 {
compatible = "st,m41t0";
diff --git a/arch/arm/boot/dts/nvidia/tegra124-apalis-v1.2-eval.dts b/arch/arm/boot/dts/nvidia/tegra124-apalis-v1.2-eval.dts
index d13b8d25ca6a..23158bb82173 100644
--- a/arch/arm/boot/dts/nvidia/tegra124-apalis-v1.2-eval.dts
+++ b/arch/arm/boot/dts/nvidia/tegra124-apalis-v1.2-eval.dts
@@ -85,11 +85,6 @@
status = "okay";
clock-frequency = <400000>;
- pcie-switch@58 {
- compatible = "plx,pex8605";
- reg = <0x58>;
- };
-
/* M41T0M6 real time clock on carrier board */
rtc@68 {
compatible = "st,m41t0";
diff --git a/arch/arm/boot/dts/nvidia/tegra20.dtsi b/arch/arm/boot/dts/nvidia/tegra20.dtsi
index 8da75ccc4402..882adb7f2f26 100644
--- a/arch/arm/boot/dts/nvidia/tegra20.dtsi
+++ b/arch/arm/boot/dts/nvidia/tegra20.dtsi
@@ -284,7 +284,7 @@
reg = <0x60007000 0x1000>;
};
- apbdma: dma@6000a000 {
+ apbdma: dma-controller@6000a000 {
compatible = "nvidia,tegra20-apbdma";
reg = <0x6000a000 0x1200>;
interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/nvidia/tegra30-apalis-eval.dts b/arch/arm/boot/dts/nvidia/tegra30-apalis-eval.dts
index fc284155cd76..ccb9f29c5de3 100644
--- a/arch/arm/boot/dts/nvidia/tegra30-apalis-eval.dts
+++ b/arch/arm/boot/dts/nvidia/tegra30-apalis-eval.dts
@@ -91,11 +91,6 @@
status = "okay";
clock-frequency = <400000>;
- pcie-switch@58 {
- compatible = "plx,pex8605";
- reg = <0x58>;
- };
-
/* M41T0M6 real time clock on carrier board */
rtc@68 {
compatible = "st,m41t0";
diff --git a/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1-eval.dts b/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1-eval.dts
index 9d08e2b094b4..bc353324df43 100644
--- a/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1-eval.dts
+++ b/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1-eval.dts
@@ -92,11 +92,6 @@
status = "okay";
clock-frequency = <400000>;
- pcie-switch@58 {
- compatible = "plx,pex8605";
- reg = <0x58>;
- };
-
/* M41T0M6 real time clock on carrier board */
rtc@68 {
compatible = "st,m41t0";
diff --git a/arch/arm/boot/dts/nvidia/tegra30-asus-tf300tl.dts b/arch/arm/boot/dts/nvidia/tegra30-asus-tf300tl.dts
new file mode 100644
index 000000000000..2ef9d8737901
--- /dev/null
+++ b/arch/arm/boot/dts/nvidia/tegra30-asus-tf300tl.dts
@@ -0,0 +1,857 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "tegra30-asus-transformer-common.dtsi"
+#include "tegra30-asus-lvds-display.dtsi"
+
+/ {
+ model = "Asus Transformer Pad LTE TF300TL";
+ compatible = "asus,tf300tl", "nvidia,tegra30";
+
+ gpio@6000d000 {
+ tf300tl-init-hog {
+ gpio-hog;
+ gpios = <TEGRA_GPIO(C, 6) GPIO_ACTIVE_HIGH>;
+ output-low;
+ };
+ };
+
+ pinmux@70000868 {
+ state_default: pinmux {
+ lcd_pwr2_pc6 {
+ nvidia,pins = "lcd_pwr2_pc6",
+ "lcd_dc1_pd2";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ pbb3 {
+ nvidia,pins = "pbb3";
+ nvidia,function = "vgp3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ pbb7 {
+ nvidia,pins = "pbb7";
+ nvidia,function = "i2s4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ kb_row7_pr7 {
+ nvidia,pins = "kb_row7_pr7";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ gmi_cs4_n_pk2 {
+ nvidia,pins = "gmi_cs4_n_pk2";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ ulpi_data5_po6 {
+ nvidia,pins = "ulpi_data5_po6";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ dap3_din_pp1 {
+ nvidia,pins = "dap3_din_pp1";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ crt_hsync_pv6 {
+ nvidia,pins = "crt_hsync_pv6";
+ nvidia,function = "crt";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ crt_vsync_pv7 {
+ nvidia,pins = "crt_vsync_pv7";
+ nvidia,function = "crt";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ pu5 {
+ nvidia,pins = "pu5";
+ nvidia,function = "pwm2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ clk3_out_pee0 {
+ nvidia,pins = "clk3_out_pee0";
+ nvidia,function = "extperiph3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ clk3_req_pee1 {
+ nvidia,pins = "clk3_req_pee1";
+ nvidia,function = "dev3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ dap1_fs_pn0 {
+ nvidia,pins = "dap1_fs_pn0",
+ "dap1_sclk_pn3";
+ nvidia,function = "i2s0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ dap1_din_pn1 {
+ nvidia,pins = "dap1_din_pn1";
+ nvidia,function = "i2s0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ dap1_dout_pn2 {
+ nvidia,pins = "dap1_dout_pn2";
+ nvidia,function = "i2s0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ clk1_req_pee2 {
+ nvidia,pins = "clk1_req_pee2";
+ nvidia,function = "dap";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ spi2_mosi_px0 {
+ nvidia,pins = "spi2_mosi_px0";
+ nvidia,function = "spi2";
+ };
+
+ spi1_sck_px5 {
+ nvidia,pins = "spi1_sck_px5";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ spi1_miso_px7 {
+ nvidia,pins = "spi1_miso_px7";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ spi2_cs2_n_pw3 {
+ nvidia,pins = "spi2_cs2_n_pw3";
+ nvidia,function = "spi2";
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ };
+ };
+ };
+
+ serial@70006200 {
+ /* Azurewave AW-NH615 BCM4329B1 */
+ bluetooth {
+ compatible = "brcm,bcm4329-bt";
+ };
+ };
+
+ i2c@7000c400 {
+ /* Elantech EKTH1036 touchscreen */
+ touchscreen@10 {
+ compatible = "elan,ektf3624";
+ reg = <0x10>;
+
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(H, 4) IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpio TEGRA_GPIO(H, 6) GPIO_ACTIVE_LOW>;
+
+ vcc33-supply = <&vdd_3v3_sys>;
+ vccio-supply = <&vdd_3v3_sys>;
+
+ touchscreen-size-x = <2240>;
+ touchscreen-size-y = <1408>;
+ touchscreen-inverted-y;
+ };
+ };
+
+ i2c@7000c500 {
+ clock-frequency = <400000>;
+
+ magnetometer@e {
+ mount-matrix = "-1", "0", "0",
+ "0", "-1", "0",
+ "0", "0", "1";
+ };
+
+ gyroscope@68 {
+ mount-matrix = "-1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "-1";
+
+ /* External I2C interface */
+ i2c-gate {
+ accelerometer@f {
+ mount-matrix = "0", "-1", "0",
+ "-1", "0", "0",
+ "0", "0", "1";
+ };
+ };
+ };
+ };
+
+ i2c@7000d000 {
+ /* Realtek ALC5631 audio codec */
+ rt5631: audio-codec@1a {
+ compatible = "realtek,rt5631";
+ reg = <0x1a>;
+ };
+ };
+
+ memory-controller@7000f000 {
+ emc-timings-0 {
+ /* Elpida 1GB 667MHZ */
+ nvidia,ram-code = <0>;
+
+ timing-25500000 {
+ clock-frequency = <25500000>;
+
+ nvidia,emem-configuration = < 0x00020001 0xc0000020
+ 0x00000001 0x00000001 0x00000002 0x00000000
+ 0x00000001 0x00000001 0x00000003 0x00000008
+ 0x00000002 0x00000001 0x00000002 0x00000006
+ 0x06020102 0x000a0502 0x74830303 0x001f0000 >;
+ };
+
+ timing-51000000 {
+ clock-frequency = <51000000>;
+
+ nvidia,emem-configuration = < 0x00010001 0xc0000020
+ 0x00000001 0x00000001 0x00000002 0x00000000
+ 0x00000001 0x00000001 0x00000003 0x00000008
+ 0x00000002 0x00000001 0x00000002 0x00000006
+ 0x06020102 0x000a0502 0x73430303 0x001f0000 >;
+ };
+
+ timing-102000000 {
+ clock-frequency = <102000000>;
+
+ nvidia,emem-configuration = < 0x00000001 0xc0000030
+ 0x00000001 0x00000001 0x00000003 0x00000000
+ 0x00000001 0x00000001 0x00000003 0x00000008
+ 0x00000002 0x00000001 0x00000002 0x00000006
+ 0x06020102 0x000a0503 0x72830504 0x001f0000 >;
+ };
+
+ timing-204000000 {
+ clock-frequency = <204000000>;
+
+ nvidia,emem-configuration = < 0x00000003 0xc0000025
+ 0x00000001 0x00000001 0x00000005 0x00000002
+ 0x00000003 0x00000001 0x00000003 0x00000008
+ 0x00000002 0x00000001 0x00000002 0x00000006
+ 0x06020102 0x000a0505 0x72440a06 0x001f0000 >;
+ };
+
+ timing-333500000 {
+ clock-frequency = <333500000>;
+
+ nvidia,emem-configuration = < 0x00000005 0xc000003d
+ 0x00000001 0x00000002 0x00000008 0x00000004
+ 0x00000004 0x00000001 0x00000002 0x00000007
+ 0x00000002 0x00000002 0x00000003 0x00000006
+ 0x06030202 0x000b0608 0x70850f09 0x001f0000 >;
+ };
+
+ timing-667000000 {
+ clock-frequency = <667000000>;
+
+ nvidia,emem-configuration = < 0x0000000a 0xc0000079
+ 0x00000003 0x00000004 0x00000010 0x0000000b
+ 0x0000000a 0x00000001 0x00000003 0x0000000b
+ 0x00000002 0x00000002 0x00000004 0x00000008
+ 0x08040202 0x00130b10 0x70ea1f11 0x001f0000 >;
+ };
+ };
+
+ emc-timings-1 {
+ /* Hynix 1GB 667MHZ */
+ nvidia,ram-code = <1>;
+
+ timing-25500000 {
+ clock-frequency = <25500000>;
+
+ nvidia,emem-configuration = < 0x00020001 0xc0000020
+ 0x00000001 0x00000001 0x00000002 0x00000000
+ 0x00000001 0x00000001 0x00000003 0x00000008
+ 0x00000002 0x00000001 0x00000002 0x00000006
+ 0x06020102 0x000a0502 0x74830303 0x001f0000 >;
+ };
+
+ timing-51000000 {
+ clock-frequency = <51000000>;
+
+ nvidia,emem-configuration = < 0x00010001 0xc0000020
+ 0x00000001 0x00000001 0x00000002 0x00000000
+ 0x00000001 0x00000001 0x00000003 0x00000008
+ 0x00000002 0x00000001 0x00000002 0x00000006
+ 0x06020102 0x000a0502 0x73430303 0x001f0000 >;
+ };
+
+ timing-102000000 {
+ clock-frequency = <102000000>;
+
+ nvidia,emem-configuration = < 0x00000001 0xc0000030
+ 0x00000001 0x00000001 0x00000003 0x00000000
+ 0x00000001 0x00000001 0x00000003 0x00000008
+ 0x00000002 0x00000001 0x00000002 0x00000006
+ 0x06020102 0x000a0503 0x72830504 0x001f0000 >;
+ };
+
+ timing-204000000 {
+ clock-frequency = <204000000>;
+
+ nvidia,emem-configuration = < 0x00000003 0xc0000025
+ 0x00000001 0x00000001 0x00000005 0x00000002
+ 0x00000003 0x00000001 0x00000003 0x00000008
+ 0x00000002 0x00000001 0x00000002 0x00000006
+ 0x06020102 0x000a0505 0x72440a06 0x001f0000 >;
+ };
+
+ timing-333500000 {
+ clock-frequency = <333500000>;
+
+ nvidia,emem-configuration = < 0x00000005 0xc000003d
+ 0x00000001 0x00000002 0x00000008 0x00000004
+ 0x00000004 0x00000001 0x00000002 0x00000007
+ 0x00000002 0x00000002 0x00000003 0x00000006
+ 0x06030202 0x000b0608 0x70850f09 0x001f0000 >;
+ };
+
+ timing-667000000 {
+ clock-frequency = <667000000>;
+
+ nvidia,emem-configuration = < 0x0000000a 0xc0000079
+ 0x00000003 0x00000004 0x00000010 0x0000000b
+ 0x0000000a 0x00000001 0x00000003 0x0000000b
+ 0x00000002 0x00000002 0x00000004 0x00000008
+ 0x08040202 0x00130b10 0x70ea1f11 0x001f0000 >;
+ };
+ };
+ };
+
+ memory-controller@7000f400 {
+ emc-timings-0 {
+ /* Elpida 1GB 667MHZ */
+ nvidia,ram-code = <0>;
+
+ timing-25500000 {
+ clock-frequency = <25500000>;
+
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200048>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-zcal-cnt-long = <0x00000040>;
+ nvidia,emc-cfg-dyn-self-ref;
+ nvidia,emc-cfg-periodic-qrst;
+
+ nvidia,emc-configuration = < 0x00000001
+ 0x00000004 0x00000000 0x00000000 0x00000002
+ 0x0000000a 0x00000005 0x0000000b 0x00000000
+ 0x00000000 0x00000003 0x00000001 0x00000000
+ 0x00000005 0x00000005 0x00000004 0x0000000a
+ 0x0000000b 0x000000c0 0x00000000 0x00000030
+ 0x00000002 0x00000002 0x00000001 0x00000000
+ 0x00000007 0x0000000f 0x00000005 0x00000005
+ 0x00000004 0x00000001 0x00000000 0x00000004
+ 0x00000005 0x000000c7 0x00000006 0x00000004
+ 0x00000000 0x00000000 0x00004288 0x007800a4
+ 0x00008000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x000002a0 0x0800211c 0x00000000
+ 0x77fff884 0x01f1f108 0x05057404 0x54000007
+ 0x08000168 0x08000000 0x00000802 0x00000000
+ 0x00000040 0x000c000c 0xa0f10000 0x00000000
+ 0x00000000 0x80000287 0xe8000000 0xff00ff00 >;
+ };
+
+ timing-51000000 {
+ clock-frequency = <51000000>;
+
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200048>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-zcal-cnt-long = <0x00000040>;
+ nvidia,emc-cfg-dyn-self-ref;
+ nvidia,emc-cfg-periodic-qrst;
+
+ nvidia,emc-configuration = < 0x00000002
+ 0x00000008 0x00000001 0x00000000 0x00000002
+ 0x0000000a 0x00000005 0x0000000b 0x00000000
+ 0x00000000 0x00000003 0x00000001 0x00000000
+ 0x00000005 0x00000005 0x00000004 0x0000000a
+ 0x0000000b 0x00000181 0x00000000 0x00000060
+ 0x00000002 0x00000002 0x00000001 0x00000000
+ 0x00000007 0x0000000f 0x00000009 0x00000009
+ 0x00000004 0x00000002 0x00000000 0x00000004
+ 0x00000005 0x0000018e 0x00000006 0x00000004
+ 0x00000000 0x00000000 0x00004288 0x007800a4
+ 0x00008000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x000002a0 0x0800211c 0x00000000
+ 0x77fff884 0x01f1f108 0x05057404 0x54000007
+ 0x08000168 0x08000000 0x00000802 0x00000000
+ 0x00000040 0x000c000c 0xa0f10000 0x00000000
+ 0x00000000 0x8000040b 0xe8000000 0xff00ff00 >;
+ };
+
+ timing-102000000 {
+ clock-frequency = <102000000>;
+
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200048>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-zcal-cnt-long = <0x00000040>;
+ nvidia,emc-cfg-dyn-self-ref;
+ nvidia,emc-cfg-periodic-qrst;
+
+ nvidia,emc-configuration = < 0x00000005
+ 0x00000010 0x00000003 0x00000001 0x00000002
+ 0x0000000a 0x00000005 0x0000000b 0x00000001
+ 0x00000001 0x00000003 0x00000001 0x00000000
+ 0x00000005 0x00000005 0x00000004 0x0000000a
+ 0x0000000b 0x00000303 0x00000000 0x000000c0
+ 0x00000002 0x00000002 0x00000001 0x00000000
+ 0x00000007 0x0000000f 0x00000012 0x00000012
+ 0x00000004 0x00000004 0x00000000 0x00000004
+ 0x00000005 0x0000031c 0x00000006 0x00000004
+ 0x00000000 0x00000000 0x00004288 0x007800a4
+ 0x00008000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x000002a0 0x0800211c 0x00000000
+ 0x77fff884 0x01f1f108 0x05057404 0x54000007
+ 0x08000168 0x08000000 0x00000802 0x00000000
+ 0x00000040 0x000c000c 0xa0f10000 0x00000000
+ 0x00000000 0x80000713 0xe8000000 0xff00ff00 >;
+ };
+
+ timing-204000000 {
+ clock-frequency = <204000000>;
+
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200048>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-zcal-cnt-long = <0x00000040>;
+ nvidia,emc-cfg-dyn-self-ref;
+ nvidia,emc-cfg-periodic-qrst;
+
+ nvidia,emc-configuration = < 0x0000000a
+ 0x00000020 0x00000007 0x00000002 0x00000002
+ 0x0000000a 0x00000005 0x0000000b 0x00000002
+ 0x00000002 0x00000003 0x00000001 0x00000000
+ 0x00000005 0x00000006 0x00000004 0x0000000a
+ 0x0000000b 0x00000607 0x00000000 0x00000181
+ 0x00000002 0x00000002 0x00000001 0x00000000
+ 0x00000007 0x0000000f 0x00000023 0x00000023
+ 0x00000004 0x00000007 0x00000000 0x00000004
+ 0x00000005 0x00000638 0x00000007 0x00000004
+ 0x00000000 0x00000000 0x00004288 0x004400a4
+ 0x00008000 0x00080000 0x00080000 0x00080000
+ 0x00080000 0x00080000 0x00080000 0x00080000
+ 0x00080000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00080000 0x00080000 0x00080000
+ 0x00080000 0x000002a0 0x0800211c 0x00000000
+ 0x77fff884 0x01f1f108 0x05057404 0x54000007
+ 0x08000168 0x08000000 0x00000802 0x00020000
+ 0x00000100 0x000c000c 0xa0f10000 0x00000000
+ 0x00000000 0x80000d22 0xe8000000 0xff00ff00 >;
+ };
+
+ timing-333500000 {
+ clock-frequency = <333500000>;
+
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200040>;
+ nvidia,emc-mode-reset = <0x80000321>;
+ nvidia,emc-zcal-cnt-long = <0x00000040>;
+
+ nvidia,emc-configuration = < 0x0000000f
+ 0x00000034 0x0000000a 0x00000003 0x00000003
+ 0x00000008 0x00000002 0x00000009 0x00000003
+ 0x00000003 0x00000002 0x00000001 0x00000000
+ 0x00000004 0x00000006 0x00000004 0x0000000a
+ 0x0000000c 0x000009e9 0x00000000 0x0000027a
+ 0x00000001 0x00000008 0x00000001 0x00000000
+ 0x00000007 0x0000000e 0x00000039 0x00000200
+ 0x00000004 0x0000000a 0x00000000 0x00000004
+ 0x00000005 0x00000a2a 0x00000000 0x00000004
+ 0x00000000 0x00000000 0x00007088 0x002600a4
+ 0x00008000 0x0003c000 0x0003c000 0x0003c000
+ 0x0003c000 0x00014000 0x00014000 0x00014000
+ 0x00014000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00050000 0x00050000 0x00050000
+ 0x00050000 0x000002a0 0x0800013d 0x00000000
+ 0x77fff884 0x01f1f508 0x05057404 0x54000007
+ 0x080001e8 0x08000021 0x00000802 0x00020000
+ 0x00000100 0x018b000c 0xa0f10000 0x00000000
+ 0x00000000 0x800014d4 0xe8000000 0xff00ff89 >;
+ };
+
+ timing-667000000 {
+ clock-frequency = <667000000>;
+
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200058>;
+ nvidia,emc-mode-reset = <0x80000b71>;
+ nvidia,emc-zcal-cnt-long = <0x00000040>;
+ nvidia,emc-cfg-periodic-qrst;
+
+ nvidia,emc-configuration = < 0x0000001f
+ 0x00000069 0x00000017 0x00000007 0x00000005
+ 0x0000000c 0x00000003 0x00000011 0x00000007
+ 0x00000007 0x00000002 0x00000001 0x00000000
+ 0x00000007 0x0000000b 0x00000009 0x0000000b
+ 0x00000011 0x00001412 0x00000000 0x00000504
+ 0x00000002 0x0000000e 0x00000001 0x00000000
+ 0x0000000c 0x00000016 0x00000072 0x00000200
+ 0x00000005 0x00000015 0x00000000 0x00000006
+ 0x00000007 0x00001453 0x0000000c 0x00000004
+ 0x00000000 0x00000000 0x00005088 0xf00b0191
+ 0x00008000 0x0000000a 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a 0x0000000a 0x0000000a
+ 0x0000000a 0x00018000 0x00018000 0x00018000
+ 0x00018000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x0000000c 0x0000000c 0x0000000c
+ 0x0000000c 0x000002a0 0x0800013d 0x22220000
+ 0x77fff884 0x01f1f501 0x07077404 0x54000000
+ 0x080001e8 0x0a000021 0x00000802 0x00020000
+ 0x00000100 0x0156000c 0xa0f10000 0x00000000
+ 0x00000000 0x800028a5 0xe8000000 0xff00ff49 >;
+ };
+ };
+
+ emc-timings-1 {
+ /* Hynix 1GB 667MHZ */
+ nvidia,ram-code = <1>;
+
+ timing-25500000 {
+ clock-frequency = <25500000>;
+
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200048>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-zcal-cnt-long = <0x00000040>;
+ nvidia,emc-cfg-dyn-self-ref;
+ nvidia,emc-cfg-periodic-qrst;
+
+ nvidia,emc-configuration = < 0x00000001
+ 0x00000004 0x00000000 0x00000000 0x00000002
+ 0x0000000a 0x00000005 0x0000000b 0x00000000
+ 0x00000000 0x00000003 0x00000001 0x00000000
+ 0x00000005 0x00000005 0x00000004 0x0000000a
+ 0x0000000b 0x000000c0 0x00000000 0x00000030
+ 0x00000002 0x00000002 0x00000001 0x00000000
+ 0x00000007 0x0000000f 0x00000005 0x00000005
+ 0x00000004 0x00000001 0x00000000 0x00000004
+ 0x00000005 0x000000c7 0x00000006 0x00000004
+ 0x00000000 0x00000000 0x00004288 0x007800a4
+ 0x00008000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x000002a0 0x0800211c 0x00000000
+ 0x77fff884 0x01f1f108 0x05057404 0x54000007
+ 0x08000168 0x08000000 0x00000802 0x00000000
+ 0x00000040 0x000c000c 0xa0f10000 0x00000000
+ 0x00000000 0x80000287 0xe8000000 0xff00ff00 >;
+ };
+
+ timing-51000000 {
+ clock-frequency = <51000000>;
+
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200048>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-zcal-cnt-long = <0x00000040>;
+ nvidia,emc-cfg-dyn-self-ref;
+ nvidia,emc-cfg-periodic-qrst;
+
+ nvidia,emc-configuration = < 0x00000002
+ 0x00000008 0x00000001 0x00000000 0x00000002
+ 0x0000000a 0x00000005 0x0000000b 0x00000000
+ 0x00000000 0x00000003 0x00000001 0x00000000
+ 0x00000005 0x00000005 0x00000004 0x0000000a
+ 0x0000000b 0x00000181 0x00000000 0x00000060
+ 0x00000002 0x00000002 0x00000001 0x00000000
+ 0x00000007 0x0000000f 0x00000009 0x00000009
+ 0x00000004 0x00000002 0x00000000 0x00000004
+ 0x00000005 0x0000018e 0x00000006 0x00000004
+ 0x00000000 0x00000000 0x00004288 0x007800a4
+ 0x00008000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x000002a0 0x0800211c 0x00000000
+ 0x77fff884 0x01f1f108 0x05057404 0x54000007
+ 0x08000168 0x08000000 0x00000802 0x00000000
+ 0x00000040 0x000c000c 0xa0f10000 0x00000000
+ 0x00000000 0x8000040b 0xe8000000 0xff00ff00 >;
+ };
+
+ timing-102000000 {
+ clock-frequency = <102000000>;
+
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200048>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-zcal-cnt-long = <0x00000040>;
+ nvidia,emc-cfg-dyn-self-ref;
+ nvidia,emc-cfg-periodic-qrst;
+
+ nvidia,emc-configuration = < 0x00000005
+ 0x00000010 0x00000003 0x00000001 0x00000002
+ 0x0000000a 0x00000005 0x0000000b 0x00000001
+ 0x00000001 0x00000003 0x00000001 0x00000000
+ 0x00000005 0x00000005 0x00000004 0x0000000a
+ 0x0000000b 0x00000303 0x00000000 0x000000c0
+ 0x00000002 0x00000002 0x00000001 0x00000000
+ 0x00000007 0x0000000f 0x00000012 0x00000012
+ 0x00000004 0x00000004 0x00000000 0x00000004
+ 0x00000005 0x0000031c 0x00000006 0x00000004
+ 0x00000000 0x00000000 0x00004288 0x007800a4
+ 0x00008000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x000fc000 0x000fc000 0x000fc000
+ 0x000fc000 0x000002a0 0x0800211c 0x00000000
+ 0x77fff884 0x01f1f108 0x05057404 0x54000007
+ 0x08000168 0x08000000 0x00000802 0x00000000
+ 0x00000040 0x000c000c 0xa0f10000 0x00000000
+ 0x00000000 0x80000713 0xe8000000 0xff00ff00 >;
+ };
+
+ timing-204000000 {
+ clock-frequency = <204000000>;
+
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200048>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-zcal-cnt-long = <0x00000040>;
+ nvidia,emc-cfg-dyn-self-ref;
+ nvidia,emc-cfg-periodic-qrst;
+
+ nvidia,emc-configuration = < 0x0000000a
+ 0x00000020 0x00000007 0x00000002 0x00000002
+ 0x0000000a 0x00000005 0x0000000b 0x00000002
+ 0x00000002 0x00000003 0x00000001 0x00000000
+ 0x00000005 0x00000006 0x00000004 0x0000000a
+ 0x0000000b 0x00000607 0x00000000 0x00000181
+ 0x00000002 0x00000002 0x00000001 0x00000000
+ 0x00000007 0x0000000f 0x00000023 0x00000023
+ 0x00000004 0x00000007 0x00000000 0x00000004
+ 0x00000005 0x00000638 0x00000007 0x00000004
+ 0x00000000 0x00000000 0x00004288 0x004400a4
+ 0x00008000 0x00080000 0x00080000 0x00080000
+ 0x00080000 0x00080000 0x00080000 0x00080000
+ 0x00080000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00080000 0x00080000 0x00080000
+ 0x00080000 0x000002a0 0x0800211c 0x00000000
+ 0x77fff884 0x01f1f108 0x05057404 0x54000007
+ 0x08000168 0x08000000 0x00000802 0x00020000
+ 0x00000100 0x000c000c 0xa0f10000 0x00000000
+ 0x00000000 0x80000d22 0xe8000000 0xff00ff00 >;
+ };
+
+ timing-333500000 {
+ clock-frequency = <333500000>;
+
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200040>;
+ nvidia,emc-mode-reset = <0x80000321>;
+ nvidia,emc-zcal-cnt-long = <0x00000040>;
+
+ nvidia,emc-configuration = < 0x0000000f
+ 0x00000034 0x0000000a 0x00000003 0x00000003
+ 0x00000008 0x00000002 0x00000009 0x00000003
+ 0x00000003 0x00000002 0x00000001 0x00000000
+ 0x00000004 0x00000006 0x00000004 0x0000000a
+ 0x0000000c 0x000009e9 0x00000000 0x0000027a
+ 0x00000001 0x00000008 0x00000001 0x00000000
+ 0x00000007 0x0000000e 0x00000039 0x00000200
+ 0x00000004 0x0000000a 0x00000000 0x00000004
+ 0x00000005 0x00000a2a 0x00000000 0x00000004
+ 0x00000000 0x00000000 0x00007088 0x002600a4
+ 0x00008000 0x0003c000 0x0003c000 0x0003c000
+ 0x0003c000 0x00014000 0x00014000 0x00014000
+ 0x00014000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00048000 0x00048000 0x00048000
+ 0x00048000 0x000002a0 0x0800013d 0x00000000
+ 0x77fff884 0x01f1f508 0x05057404 0x54000007
+ 0x080001e8 0x08000021 0x00000802 0x00020000
+ 0x00000100 0x018b000c 0xa0f10000 0x00000000
+ 0x00000000 0x800014d4 0xe8000000 0xff00ff89 >;
+ };
+
+ timing-667000000 {
+ clock-frequency = <667000000>;
+
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200058>;
+ nvidia,emc-mode-reset = <0x80000b71>;
+ nvidia,emc-zcal-cnt-long = <0x00000040>;
+ nvidia,emc-cfg-periodic-qrst;
+
+ nvidia,emc-configuration = < 0x00000020
+ 0x00000069 0x00000017 0x00000007 0x00000005
+ 0x0000000c 0x00000003 0x00000011 0x00000007
+ 0x00000007 0x00000002 0x00000001 0x00000000
+ 0x00000007 0x0000000b 0x00000009 0x0000000b
+ 0x00000011 0x00001412 0x00000000 0x00000504
+ 0x00000002 0x0000000e 0x00000001 0x00000000
+ 0x0000000c 0x00000016 0x00000072 0x00000200
+ 0x00000005 0x00000015 0x00000000 0x00000006
+ 0x00000007 0x00001453 0x0000000c 0x00000004
+ 0x00000000 0x00000000 0x00005088 0xf00b0191
+ 0x00008000 0x0000000a 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a 0x0000000a 0x0000000a
+ 0x0000000a 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x0000000c 0x0000000c 0x0000000c
+ 0x0000000c 0x000002a0 0x0600013d 0x22220000
+ 0x77fff884 0x01f1f501 0x07077404 0x54000000
+ 0x080001e8 0x08000021 0x00000802 0x00020000
+ 0x00000100 0x0156000c 0xa0f10000 0x00000000
+ 0x00000000 0x800028a5 0xf8000000 0xff00ff49 >;
+ };
+ };
+ };
+
+ pad_battery: battery-pad {
+ compatible = "simple-battery";
+ device-chemistry = "lithium-ion-polymer";
+ charge-full-design-microamp-hours = <2940000>;
+ energy-full-design-microwatt-hours = <22000000>;
+ operating-range-celsius = <0 45>;
+ };
+
+ dock_battery: battery-dock {
+ compatible = "simple-battery";
+ device-chemistry = "lithium-ion-polymer";
+ charge-full-design-microamp-hours = <2260000>;
+ energy-full-design-microwatt-hours = <16000000>;
+ operating-range-celsius = <0 45>;
+ };
+
+ display-panel {
+ compatible = "innolux,g101ice-l01";
+ };
+
+ opp-table-emc {
+ /delete-node/ opp-750000000-1300;
+ /delete-node/ opp-800000000-1300;
+ /delete-node/ opp-900000000-1350;
+ };
+
+ opp-table-actmon {
+ /delete-node/ opp-750000000;
+ /delete-node/ opp-800000000;
+ /delete-node/ opp-900000000;
+ };
+
+ sound {
+ compatible = "asus,tegra-audio-rt5631-tf300tl",
+ "nvidia,tegra-audio-rt5631";
+ nvidia,model = "Asus Transformer Pad TF300TL RT5631";
+
+ nvidia,audio-routing =
+ "Headphone Jack", "HPOL",
+ "Headphone Jack", "HPOR",
+ "Int Spk", "SPOL",
+ "Int Spk", "SPOR",
+ "MIC1", "MIC Bias1",
+ "MIC Bias1", "Mic Jack",
+ "DMIC", "Int Mic";
+
+ nvidia,audio-codec = <&rt5631>;
+ };
+};
diff --git a/arch/arm/boot/dts/nvidia/tegra30.dtsi b/arch/arm/boot/dts/nvidia/tegra30.dtsi
index f866fa7b55a5..2a4d93db8134 100644
--- a/arch/arm/boot/dts/nvidia/tegra30.dtsi
+++ b/arch/arm/boot/dts/nvidia/tegra30.dtsi
@@ -431,7 +431,7 @@
reg = <0x60007000 0x1000>;
};
- apbdma: dma@6000a000 {
+ apbdma: dma-controller@6000a000 {
compatible = "nvidia,tegra30-apbdma", "nvidia,tegra20-apbdma";
reg = <0x6000a000 0x1400>;
interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/nxp/imx/imx25.dtsi b/arch/arm/boot/dts/nxp/imx/imx25.dtsi
index 9cfff2151b7e..82601a4b7b4b 100644
--- a/arch/arm/boot/dts/nxp/imx/imx25.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx25.dtsi
@@ -611,7 +611,7 @@
reg = <0x80000000 0x3b002000>;
ranges;
- nfc: nand@bb000000 {
+ nfc: nand-controller@bb000000 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx31-lite.dts b/arch/arm/boot/dts/nxp/imx/imx31-lite.dts
index d17abdfb6330..630f8fa69ba8 100644
--- a/arch/arm/boot/dts/nxp/imx/imx31-lite.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx31-lite.dts
@@ -157,7 +157,7 @@
&weim {
status = "okay";
- nor@0,0 {
+ flash@0,0 {
compatible = "cfi-flash";
reg = <0 0x0 0x200000>;
bank-width = <2>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx31.dtsi b/arch/arm/boot/dts/nxp/imx/imx31.dtsi
index 813a81558c40..8541a666747a 100644
--- a/arch/arm/boot/dts/nxp/imx/imx31.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx31.dtsi
@@ -218,7 +218,7 @@
};
iim: efuse@5001c000 {
- compatible = "fsl,imx31-iim", "fsl,imx27-iim";
+ compatible = "fsl,imx31-iim";
reg = <0x5001c000 0x1000>;
interrupts = <19>;
clocks = <&clks 25>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx35.dtsi b/arch/arm/boot/dts/nxp/imx/imx35.dtsi
index 30beb39e0162..111d7c0331f5 100644
--- a/arch/arm/boot/dts/nxp/imx/imx35.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx35.dtsi
@@ -363,7 +363,7 @@
reg = <0x80000000 0x40000000>;
ranges;
- nfc: nand@bb000000 {
+ nfc: nand-controller@bb000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,imx35-nand", "fsl,imx25-nand";
diff --git a/arch/arm/boot/dts/nxp/imx/imx51-digi-connectcore-som.dtsi b/arch/arm/boot/dts/nxp/imx/imx51-digi-connectcore-som.dtsi
index dc72a2d14960..1980f751f161 100644
--- a/arch/arm/boot/dts/nxp/imx/imx51-digi-connectcore-som.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx51-digi-connectcore-som.dtsi
@@ -165,7 +165,7 @@
mma7455l@1d {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mma7455l>;
- compatible = "fsl,mma7455l";
+ compatible = "fsl,mma7455";
reg = <0x1d>;
interrupt-parent = <&gpio1>;
interrupts = <7 IRQ_TYPE_LEVEL_HIGH>, <6 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx51.dtsi b/arch/arm/boot/dts/nxp/imx/imx51.dtsi
index 8323e3a56a1f..c8698a9af1a7 100644
--- a/arch/arm/boot/dts/nxp/imx/imx51.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx51.dtsi
@@ -476,7 +476,7 @@
};
iim: efuse@83f98000 {
- compatible = "fsl,imx51-iim", "fsl,imx27-iim", "syscon";
+ compatible = "fsl,imx51-iim";
reg = <0x83f98000 0x4000>;
interrupts = <69>;
clocks = <&clks IMX5_CLK_IIM_GATE>;
@@ -595,7 +595,7 @@
status = "disabled";
};
- nfc: nand@83fdb000 {
+ nfc: nand-controller@83fdb000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,imx51-nand";
diff --git a/arch/arm/boot/dts/nxp/imx/imx53.dtsi b/arch/arm/boot/dts/nxp/imx/imx53.dtsi
index faac7cc249d0..93225a56896f 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx53.dtsi
@@ -668,7 +668,7 @@
};
iim: efuse@63f98000 {
- compatible = "fsl,imx53-iim", "fsl,imx27-iim", "syscon";
+ compatible = "fsl,imx53-iim";
reg = <0x63f98000 0x4000>;
interrupts = <69>;
clocks = <&clks IMX5_CLK_IIM_GATE>;
@@ -775,7 +775,7 @@
status = "disabled";
};
- nfc: nand@63fdb000 {
+ nfc: nand-controller@63fdb000 {
compatible = "fsl,imx53-nand";
reg = <0x63fdb000 0x1000 0xf7ff0000 0x10000>;
interrupts = <8>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-apalis-eval.dts b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-eval.dts
index e1077e2da5f4..1f2200f50059 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-apalis-eval.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-eval.dts
@@ -36,15 +36,6 @@
status = "okay";
};
-/* I2C1_SDA/SCL on MXM3 209/211 (e.g. RTC on carrier board) */
-&i2c1 {
- /* PCIe Switch */
- pcie-switch@58 {
- compatible = "plx,pex8605";
- reg = <0x58>;
- };
-};
-
&pcie {
vpcie-supply = <&reg_pcie_switch>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-mccmon6.dts b/arch/arm/boot/dts/nxp/imx/imx6q-mccmon6.dts
index f08b37010291..bba82126aaaa 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-mccmon6.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-mccmon6.dts
@@ -279,7 +279,7 @@
ranges = <0 0 0x08000000 0x08000000>;
status = "okay";
- nor@0,0 {
+ flash@0,0 {
compatible = "cfi-flash";
reg = <0 0 0x02000000>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi
index a381cb224c1e..2587d17c5918 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi
@@ -854,7 +854,7 @@
ranges = <0 0 0x08000000 0x08000000>;
status = "disabled"; /* pin conflict with SPI NOR */
- nor@0,0 {
+ flash@0,0 {
compatible = "cfi-flash";
reg = <0 0 0x02000000>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qp-prtwd3.dts b/arch/arm/boot/dts/nxp/imx/imx6qp-prtwd3.dts
index fbe260c9872e..cad985e341a1 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qp-prtwd3.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6qp-prtwd3.dts
@@ -384,7 +384,7 @@
#address-cells = <1>;
#size-cells = <0>;
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi
index f2386dcb9ff2..dda4fa91b2f2 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi
@@ -40,6 +40,9 @@
reg = <1>;
interrupt-parent = <&gpio4>;
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+ micrel,led-mode = <1>;
+ clocks = <&clks IMX6UL_CLK_ENET_REF>;
+ clock-names = "rmii-ref";
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts b/arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts
index eec526a96311..ff9d50942884 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts
@@ -374,7 +374,7 @@
cap-power-off-card;
status = "okay";
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d.dtsi b/arch/arm/boot/dts/nxp/imx/imx7d.dtsi
index 0484e349e064..d961c61a93af 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7d.dtsi
@@ -48,7 +48,7 @@
opp-792000000 {
opp-hz = /bits/ 64 <792000000>;
- opp-microvolt = <1000000>;
+ opp-microvolt = <1000000 950000 1250000>;
clock-latency-ns = <150000>;
opp-supported-hw = <0xd>, <0x7>;
opp-suspend;
@@ -56,7 +56,7 @@
opp-996000000 {
opp-hz = /bits/ 64 <996000000>;
- opp-microvolt = <1100000>;
+ opp-microvolt = <1100000 1045000 1250000>;
clock-latency-ns = <150000>;
opp-supported-hw = <0xc>, <0x7>;
opp-suspend;
@@ -64,7 +64,7 @@
opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <1225000>;
+ opp-microvolt = <1225000 1200000 1250000>;
clock-latency-ns = <150000>;
opp-supported-hw = <0x8>, <0x3>;
opp-suspend;
diff --git a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
index 2629968001a7..9235dd7e93bb 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
@@ -73,7 +73,6 @@
device_type = "cpu";
reg = <0>;
clock-frequency = <792000000>;
- clock-latency = <61036>; /* two CLK32 periods */
clocks = <&clks IMX7D_CLK_ARM>;
cpu-idle-states = <&cpu_sleep_wait>;
operating-points-v2 = <&cpu0_opp_table>;
diff --git a/arch/arm/boot/dts/nxp/lpc/lpc32xx.dtsi b/arch/arm/boot/dts/nxp/lpc/lpc32xx.dtsi
index 974410918f35..41f41a786f9d 100644
--- a/arch/arm/boot/dts/nxp/lpc/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/nxp/lpc/lpc32xx.dtsi
@@ -94,7 +94,7 @@
/*
* Enable either ohci or usbd (gadget)!
*/
- ohci: ohci@0 {
+ ohci: usb@0 {
compatible = "nxp,ohci-nxp", "usb-ohci";
reg = <0x0 0x300>;
interrupt-parent = <&sic1>;
diff --git a/arch/arm/boot/dts/nxp/ls/Makefile b/arch/arm/boot/dts/nxp/ls/Makefile
index 14759331dba2..53240b04c968 100644
--- a/arch/arm/boot/dts/nxp/ls/Makefile
+++ b/arch/arm/boot/dts/nxp/ls/Makefile
@@ -6,3 +6,12 @@ dtb-$(CONFIG_SOC_LS1021A) += \
ls1021a-tqmls1021a-mbls1021a.dtb \
ls1021a-tsn.dtb \
ls1021a-twr.dtb
+
+ls1021a-tqmls1021a-mbls1021a-hdmi-dtbs += ls1021a-tqmls1021a-mbls1021a.dtb ls1021a-tqmls1021a-mbls1021a-hdmi.dtbo
+ls1021a-tqmls1021a-mbls1021a-lvds-tm070jvhg33-dtbs += ls1021a-tqmls1021a-mbls1021a.dtb ls1021a-tqmls1021a-mbls1021a-lvds-tm070jvhg33.dtbo
+ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-dc44-dtbs += ls1021a-tqmls1021a-mbls1021a.dtb ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-dc44.dtbo
+ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-fc21-dtbs += ls1021a-tqmls1021a-mbls1021a.dtb ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-fc21.dtbo
+dtb-$(CONFIG_SOC_LS1021A) += ls1021a-tqmls1021a-mbls1021a-hdmi.dtb
+dtb-$(CONFIG_SOC_LS1021A) += ls1021a-tqmls1021a-mbls1021a-lvds-tm070jvhg33.dtb
+dtb-$(CONFIG_SOC_LS1021A) += ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-dc44.dtb
+dtb-$(CONFIG_SOC_LS1021A) += ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-fc21.dtb
diff --git a/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-hdmi.dtso b/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-hdmi.dtso
new file mode 100644
index 000000000000..e713a2ecbfc2
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-hdmi.dtso
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright 2013-2014 Freescale Semiconductor, Inc.
+ * Copyright 2018-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
+ */
+
+/dts-v1/;
+/plugin/;
+
+&dcu {
+ status = "okay";
+
+ port {
+ dcu_out: endpoint {
+ remote-endpoint = <&sii9022a_in>;
+ };
+ };
+};
+
+&hdmi_out {
+ status = "okay";
+};
+
+&sii9022a {
+ status = "okay";
+};
+
+&sii9022a_in {
+ remote-endpoint = <&dcu_out>;
+};
diff --git a/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-lvds-tm070jvhg33.dtso b/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-lvds-tm070jvhg33.dtso
new file mode 100644
index 000000000000..e9708f3c6740
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-lvds-tm070jvhg33.dtso
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright 2013-2014 Freescale Semiconductor, Inc.
+ * Copyright 2018-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/dts-v1/;
+/plugin/;
+
+&backlight_dcu {
+ status = "okay";
+};
+
+&dcu {
+ status = "okay";
+
+ port {
+ dcu_out: endpoint {
+ remote-endpoint = <&lvds_encoder_in>;
+ };
+ };
+};
+
+&display {
+ compatible = "tianma,tm070jvhg33";
+ status = "okay";
+};
+
+&lvds_encoder {
+ status = "okay";
+};
+
+&lvds_encoder_in {
+ remote-endpoint = <&dcu_out>;
+};
+
+&lvds_encoder_out {
+ remote-endpoint = <&panel_in>;
+};
+
+&panel_in {
+ remote-endpoint = <&lvds_encoder_out>;
+};
diff --git a/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-dc44.dtso b/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-dc44.dtso
new file mode 100644
index 000000000000..146d45601f69
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-dc44.dtso
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright 2013-2014 Freescale Semiconductor, Inc.
+ * Copyright 2018-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/dts-v1/;
+/plugin/;
+
+&backlight_dcu {
+ status = "okay";
+};
+
+&dcu {
+ status = "okay";
+
+ port {
+ dcu_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+};
+
+&display {
+ compatible = "cdtech,s070swv29hg-dc44";
+ status = "okay";
+};
+
+&i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ polytouch: touchscreen@38 {
+ compatible = "edt,edt-ft5406", "edt,edt-ft5x06";
+ reg = <0x38>;
+ interrupt-parent = <&pca9554_0>;
+ interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
+ /* LCD_PWR_EN -> TSC_WAKE */
+ wake-gpios = <&pca9554_1 4 GPIO_ACTIVE_HIGH>;
+ iovcc-supply = <&reg_3p3v>;
+ vcc-supply = <&reg_3p3v>;
+ gain = <20>;
+ touchscreen-size-x = <800>;
+ touchscreen-size-y = <480>;
+ };
+};
+
+&panel_in {
+ remote-endpoint = <&dcu_out>;
+};
diff --git a/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-fc21.dtso b/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-fc21.dtso
new file mode 100644
index 000000000000..db66831f31af
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a-rgb-cdtech-fc21.dtso
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright 2013-2014 Freescale Semiconductor, Inc.
+ * Copyright 2018-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/dts-v1/;
+/plugin/;
+
+&backlight_dcu {
+ status = "okay";
+};
+
+&dcu {
+ status = "okay";
+
+ port {
+ dcu_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+};
+
+&display {
+ compatible = "cdtech,s070pws19hp-fc21";
+ status = "okay";
+};
+
+&i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ polytouch: touchscreen@38 {
+ compatible = "edt,edt-ft5406", "edt,edt-ft5x06";
+ reg = <0x38>;
+ interrupt-parent = <&pca9554_0>;
+ interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
+ /* LCD_PWR_EN -> TSC_WAKE */
+ wake-gpios = <&pca9554_1 4 GPIO_ACTIVE_HIGH>;
+ iovcc-supply = <&reg_3p3v>;
+ vcc-supply = <&reg_3p3v>;
+ gain = <20>;
+ touchscreen-size-x = <800>;
+ touchscreen-size-y = <480>;
+ };
+};
+
+&panel_in {
+ remote-endpoint = <&dcu_out>;
+};
diff --git a/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a.dts b/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a.dts
index 34636fcdfd6a..5606585dd560 100644
--- a/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a.dts
+++ b/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a-mbls1021a.dts
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0-or-later OR X11)
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
/*
* Copyright 2013-2014 Freescale Semiconductor, Inc.
* Copyright 2018-2023 TQ-Systems GmbH <linux@ew.tq-group.com>,
@@ -147,6 +147,7 @@
display: panel {
backlight = <&backlight_dcu>;
enable-gpios = <&pca9554_1 3 GPIO_ACTIVE_HIGH>;
+ power-supply = <&reg_3p3v>;
status = "disabled";
port {
@@ -156,7 +157,7 @@
sound {
compatible = "fsl,imx-audio-tlv320aic32x4";
- model = "ls1021a-mbls1021a-tlv320aic32";
+ model = "tqm-tlv320aic32";
ssi-controller = <&sai1>;
audio-codec = <&tlv320aic32x4>;
};
diff --git a/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a.dtsi b/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a.dtsi
index 1b13851ad997..271001eb5ad7 100644
--- a/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a.dtsi
+++ b/arch/arm/boot/dts/nxp/ls/ls1021a-tqmls1021a.dtsi
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0-or-later OR X11)
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
/*
* Copyright 2013-2014 Freescale Semiconductor, Inc.
* Copyright 2018-2023 TQ-Systems GmbH <linux@ew.tq-group.com>,
@@ -72,6 +72,7 @@
spi-rx-bus-width = <4>;
spi-tx-bus-width = <4>;
reg = <0>;
+ vcc-supply = <&reg_3p3v_som>;
partitions {
compatible = "fixed-partitions";
diff --git a/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts b/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts
index cb661bf2d157..613f13b6c8a8 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts
@@ -93,9 +93,9 @@
MX23_PAD_LCD_HSYNC__GPIO_1_24
MX23_PAD_PWM3__GPIO_1_29
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
};
diff --git a/arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts b/arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts
index 0b088c8ab6b6..fad08f6c008f 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts
@@ -83,9 +83,9 @@
fsl,pinmux-ids = <
MX23_PAD_GPMI_D07__GPIO_0_7
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
key_pins_a: keys@0 {
@@ -94,9 +94,9 @@
MX23_PAD_ROTARYA__GPIO_2_7
MX23_PAD_ROTARYB__GPIO_2_8
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
};
};
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-btt3.dtsi b/arch/arm/boot/dts/nxp/mxs/imx28-btt3.dtsi
index 2c52e67e5c14..a6903ef2b093 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx28-btt3.dtsi
+++ b/arch/arm/boot/dts/nxp/mxs/imx28-btt3.dtsi
@@ -299,7 +299,7 @@
keep-power-in-suspend;
status = "okay";
- wlan@1 {
+ wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
};
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-cfa10036.dts b/arch/arm/boot/dts/nxp/mxs/imx28-cfa10036.dts
index d004b1cbb4ae..f170df37b3f8 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx28-cfa10036.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx28-cfa10036.dts
@@ -102,9 +102,9 @@
0x31c3 /*
MX28_PAD_PWM3__GPIO_3_28 */
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index f06c6d425e91..0c1d116f6e84 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -12,6 +12,7 @@ dtb-$(CONFIG_ARCH_QCOM) += \
qcom-apq8064-ifc6410.dtb \
qcom-apq8064-sony-xperia-lagan-yuga.dtb \
qcom-apq8064-asus-nexus7-flo.dtb \
+ qcom-apq8064-lg-nexus4-mako.dtb \
qcom-apq8074-dragonboard.dtb \
qcom-apq8084-ifc6540.dtb \
qcom-apq8084-mtp.dtb \
diff --git a/arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts b/arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts
index 5dbca83f2230..e6392f7d14c7 100644
--- a/arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts
+++ b/arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts
@@ -31,6 +31,15 @@
vsp-supply = <&reg_lcd_pos>;
vsn-supply = <&reg_lcd_neg>;
vddio-supply = <&vddio_disp_vreg>;
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_AXI_CLK>,
+ <&mmcc MDSS_BYTE0_CLK>,
+ <&mmcc MDSS_ESC0_CLK>,
+ <&mmcc MDSS_MDP_CLK>,
+ <&mmcc MMSS_MISC_AHB_CLK>,
+ <&mmcc MDSS_PCLK0_CLK>,
+ <&mmcc MDSS_VSYNC_CLK>;
+ power-domains = <&mmcc MDSS_GDSC>;
};
};
@@ -53,9 +62,12 @@
};
};
+ /* TI TPS22902 */
vddio_disp_vreg: regulator-vddio-disp {
compatible = "regulator-fixed";
regulator-name = "vddio_disp";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
gpio = <&tlmm 34 GPIO_ACTIVE_HIGH>;
vin-supply = <&pm8226_l8>;
startup-delay-us = <300>;
@@ -97,6 +109,7 @@
};
&blsp1_i2c2 {
+ clock-frequency = <100000>;
status = "okay";
magnetometer@c {
@@ -126,6 +139,7 @@
};
&blsp1_i2c3 {
+ clock-frequency = <400000>;
status = "okay";
regulator@3e {
@@ -136,8 +150,8 @@
reg_lcd_pos: outp {
regulator-name = "outp";
- regulator-min-microvolt = <4000000>;
- regulator-max-microvolt = <6000000>;
+ regulator-min-microvolt = <5400000>;
+ regulator-max-microvolt = <5400000>;
regulator-active-discharge = <1>;
regulator-boot-on;
enable-gpios = <&tlmm 31 GPIO_ACTIVE_HIGH>;
@@ -145,8 +159,8 @@
reg_lcd_neg: outn {
regulator-name = "outn";
- regulator-min-microvolt = <4000000>;
- regulator-max-microvolt = <6000000>;
+ regulator-min-microvolt = <5400000>;
+ regulator-max-microvolt = <5400000>;
regulator-active-discharge = <1>;
regulator-boot-on;
enable-gpios = <&tlmm 33 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/qcom/msm8926.dtsi b/arch/arm/boot/dts/qcom/msm8926.dtsi
new file mode 100644
index 000000000000..629654c525b4
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8926.dtsi
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2025, Luca Weiss <luca@lucaweiss.eu>
+ */
+
+#include "qcom-msm8226.dtsi"
+
+&modem {
+ compatible = "qcom,msm8926-mss-pil";
+ /delete-property/ qcom,ext-bhs-reg;
+};
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
index da3be658e822..4546fa8beba4 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
+++ b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
@@ -5,6 +5,7 @@
/dts-v1/;
+#include "qcom-msm8226.dtsi"
#include "qcom-msm8226-samsung-matisse-common.dtsi"
/ {
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-milletwifi.dts b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-milletwifi.dts
index 7d519156d91d..a8543ca7b556 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-milletwifi.dts
+++ b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-milletwifi.dts
@@ -12,6 +12,8 @@
#include "pm8226.dtsi"
/delete-node/ &adsp_region;
+/delete-node/ &mba_region;
+/delete-node/ &mpss_region;
/delete-node/ &smem_region;
/ {
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts b/arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts
index b3ff8010b149..717bfd74edb7 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts
+++ b/arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts
@@ -138,7 +138,6 @@
&hdmi {
core-vdda-supply = <&pm8921_hdmi_switch>;
- hpd-gpios = <&tlmm_pinmux 72 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064-lg-nexus4-mako.dts b/arch/arm/boot/dts/qcom/qcom-apq8064-lg-nexus4-mako.dts
new file mode 100644
index 000000000000..c187c6875bc6
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/qcom-apq8064-lg-nexus4-mako.dts
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/mfd/qcom-rpm.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+#include "qcom-apq8064-v2.0.dtsi"
+#include "pm8821.dtsi"
+#include "pm8921.dtsi"
+
+/ {
+ model = "LG Nexus 4 (mako)";
+ compatible = "lg,nexus4-mako", "qcom,apq8064";
+ chassis-type = "handset";
+
+ aliases {
+ serial0 = &gsbi7_serial;
+ serial1 = &gsbi6_serial;
+ serial2 = &gsbi4_serial;
+ };
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ battery_cell: battery-cell {
+ compatible = "simple-battery";
+ constant-charge-current-max-microamp = <900000>;
+ operating-range-celsius = <0 45>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ ramoops@88d00000{
+ compatible = "ramoops";
+ reg = <0x88d00000 0x100000>;
+ record-size = <0x20000>;
+ console-size = <0x20000>;
+ ftrace-size = <0x20000>;
+ };
+ };
+};
+
+&gsbi1 {
+ qcom,mode = <GSBI_PROT_I2C>;
+
+ status = "okay";
+};
+
+&gsbi1_i2c {
+ clock-frequency = <200000>;
+
+ status = "okay";
+};
+
+&gsbi4 {
+ qcom,mode = <GSBI_PROT_I2C_UART>;
+
+ status = "okay";
+};
+
+&gsbi4_serial {
+ status = "okay";
+};
+
+&pm8821 {
+ interrupts-extended = <&tlmm_pinmux 76 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&pm8921 {
+ interrupts-extended = <&tlmm_pinmux 74 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&pm8921_keypad {
+ linux,keymap = <
+ MATRIX_KEY(0, 0, KEY_VOLUMEDOWN)
+ MATRIX_KEY(0, 1, KEY_VOLUMEUP)
+ >;
+
+ keypad,num-rows = <1>;
+ keypad,num-columns = <5>;
+
+ status = "okay";
+};
+
+&riva {
+ pinctrl-names = "default";
+ pinctrl-0 = <&riva_wlan_pin_a>, <&riva_bt_pin_a>, <&riva_fm_pin_a>;
+
+ vddcx-supply = <&pm8921_s3>;
+ vddmx-supply = <&pm8921_l24>;
+ vddpx-supply = <&pm8921_s4>;
+
+ status = "okay";
+
+ iris {
+ vddxo-supply = <&pm8921_l4>;
+ vddrfa-supply = <&pm8921_s2>;
+ vddpa-supply = <&pm8921_l10>;
+ vdddig-supply = <&pm8921_lvs2>;
+ };
+};
+
+&rpm {
+ regulators {
+ compatible = "qcom,rpm-pm8921-regulators";
+
+ vdd_l1_l2_l12_l18-supply = <&pm8921_s4>;
+ vdd_l24-supply = <&pm8921_s1>;
+ vdd_l25-supply = <&pm8921_s1>;
+ vdd_l26-supply = <&pm8921_s7>;
+ vdd_l27-supply = <&pm8921_s7>;
+ vdd_l28-supply = <&pm8921_s7>;
+ vin_lvs1_3_6-supply = <&pm8921_s4>;
+ vin_lvs2-supply = <&pm8921_s1>;
+ vin_lvs4_5_7-supply = <&pm8921_s4>;
+
+ pm8921_l1: l1 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ bias-pull-down;
+ };
+
+ /* mipi_dsi.1-dsi1_pll_vdda */
+ pm8921_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ bias-pull-down;
+ };
+
+ /* msm_otg-HSUSB_3p3 */
+ pm8921_l3: l3 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3500000>;
+ bias-pull-down;
+ };
+
+ /* msm_otg-HSUSB_1p8 */
+ pm8921_l4: l4 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ /* msm_sdcc.1-sdc_vdd */
+ pm8921_l5: l5 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ bias-pull-down;
+ };
+
+ /* earjack_debug */
+ pm8921_l6: l6 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ bias-pull-down;
+ };
+
+ /* mipi_dsi.1-dsi_vci */
+ pm8921_l8: l8 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3000000>;
+ bias-pull-down;
+ };
+
+ /* wcnss_wlan.0-iris_vddpa */
+ pm8921_l10: l10 {
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ bias-pull-down;
+ };
+
+ /* mipi_dsi.1-dsi1_avdd */
+ pm8921_l11: l11 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ bias-pull-down;
+ };
+
+ /* touch_vdd */
+ pm8921_l15: l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ bias-pull-down;
+ };
+
+ /* slimport_dvdd */
+ pm8921_l18: l18 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ bias-pull-down;
+ };
+
+ /* touch_io */
+ pm8921_l22: l22 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ bias-pull-down;
+ };
+
+ /*
+ * mipi_dsi.1-dsi_vddio
+ * pil_qdsp6v4.1-pll_vdd
+ * pil_qdsp6v4.2-pll_vdd
+ * msm_ehci_host.0-HSUSB_1p8
+ * msm_ehci_host.1-HSUSB_1p8
+ */
+ pm8921_l23: l23 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ bias-pull-down;
+ };
+
+ /*
+ * tabla2x-slim-CDC_VDDA_A_1P2V
+ * tabla2x-slim-VDDD_CDC_D
+ */
+ pm8921_l24: l24 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1150000>;
+ bias-pull-down;
+ };
+
+ pm8921_l25: l25 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-always-on;
+ bias-pull-down;
+ };
+
+ pm8921_l26: l26 {
+ regulator-min-microvolt = <375000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-always-on;
+ bias-pull-down;
+ };
+
+ pm8921_l27: l27 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ pm8921_l28: l28 {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ bias-pull-down;
+ };
+
+ /* wcnss_wlan.0-iris_vddio */
+ pm8921_lvs1: lvs1 {
+ bias-pull-down;
+ };
+
+ /* wcnss_wlan.0-iris_vdddig */
+ pm8921_lvs2: lvs2 {
+ bias-pull-down;
+ };
+
+ pm8921_lvs3: lvs3 {
+ bias-pull-down;
+ };
+
+ pm8921_lvs4: lvs4 {
+ bias-pull-down;
+ };
+
+ pm8921_lvs5: lvs5 {
+ bias-pull-down;
+ };
+
+ /* mipi_dsi.1-dsi_iovcc */
+ pm8921_lvs6: lvs6 {
+ bias-pull-down;
+ };
+
+ /*
+ * pil_riva-pll_vdd
+ * lvds.0-lvds_vdda
+ * mipi_dsi.1-dsi1_vddio
+ * hdmi_msm.0-hdmi_vdda
+ */
+ pm8921_lvs7: lvs7 {
+ bias-pull-down;
+ };
+
+ pm8921_ncp: ncp {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,switch-mode-frequency = <1600000>;
+ };
+
+ /* Buck SMPS */
+ pm8921_s1: s1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ qcom,switch-mode-frequency = <3200000>;
+ bias-pull-down;
+ };
+
+ pm8921_s2: s2 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ qcom,switch-mode-frequency = <1600000>;
+ bias-pull-down;
+ };
+
+ /* msm otg HSUSB_VDDCX */
+ pm8921_s3: s3 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1150000>;
+ qcom,switch-mode-frequency = <4800000>;
+ bias-pull-down;
+ };
+
+ /*
+ * msm_sdcc.1-sdc-vdd_io
+ * tabla2x-slim-CDC_VDDA_RX
+ * tabla2x-slim-CDC_VDDA_TX
+ * tabla2x-slim-CDC_VDD_CP
+ * tabla2x-slim-VDDIO_CDC
+ */
+ pm8921_s4: s4 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,switch-mode-frequency = <1600000>;
+ bias-pull-down;
+ qcom,force-mode = <QCOM_RPM_FORCE_MODE_AUTO>;
+ };
+
+ /*
+ * supply vdd_l26, vdd_l27, vdd_l28
+ */
+ pm8921_s7: s7 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ qcom,switch-mode-frequency = <3200000>;
+ };
+
+ pm8921_s8: s8 {
+ regulator-min-microvolt = <2200000>;
+ regulator-max-microvolt = <2200000>;
+ qcom,switch-mode-frequency = <1600000>;
+ };
+ };
+};
+
+/* eMMC */
+&sdcc1 {
+ vmmc-supply = <&pm8921_l5>;
+ vqmmc-supply = <&pm8921_s4>;
+
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
index 5f1a6b4b7644..17e506ca2438 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
@@ -213,12 +213,6 @@
};
};
- sfpb_mutex: hwmutex {
- compatible = "qcom,sfpb-mutex";
- syscon = <&sfpb_wrapper_mutex 0x604 0x4>;
- #hwlock-cells = <1>;
- };
-
smem {
compatible = "qcom,smem";
memory-region = <&smem_region>;
@@ -284,6 +278,40 @@
};
};
+ replicator {
+ compatible = "arm,coresight-static-replicator";
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ replicator_in: endpoint {
+ remote-endpoint = <&funnel_out>;
+ };
+ };
+ };
+
+ out-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ replicator_out0: endpoint {
+ remote-endpoint = <&etb_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ replicator_out1: endpoint {
+ remote-endpoint = <&tpiu_in>;
+ };
+ };
+ };
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -305,9 +333,10 @@
pinctrl-0 = <&ps_hold_default_state>;
};
- sfpb_wrapper_mutex: syscon@1200000 {
- compatible = "syscon";
- reg = <0x01200000 0x8000>;
+ sfpb_mutex: hwmutex@1200600 {
+ compatible = "qcom,sfpb-mutex";
+ reg = <0x01200600 0x100>;
+ #hwlock-cells = <1>;
};
intc: interrupt-controller@2000000 {
@@ -326,6 +355,8 @@
<GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
reg = <0x0200a000 0x100>;
clock-frequency = <27000000>;
+ clocks = <&sleep_clk>;
+ clock-names = "sleep";
cpu-offset = <0x80000>;
};
@@ -405,8 +436,8 @@
};
};
- sps_sic_non_secure: sps-sic-non-secure@12100000 {
- compatible = "syscon";
+ sps_sic_non_secure: interrupt-controller@12100000 {
+ compatible = "qcom,apq8064-sps-sic", "syscon";
reg = <0x12100000 0x10000>;
};
@@ -737,7 +768,8 @@
<&dsi0_phy 0>,
<&dsi1_phy 1>,
<&dsi1_phy 0>,
- <&hdmi_phy>;
+ <&hdmi_phy>,
+ <&mdp>;
clock-names = "pxo",
"pll3",
"pll8_vote",
@@ -745,7 +777,8 @@
"dsi1pllbyte",
"dsi2pll",
"dsi2pllbyte",
- "hdmipll";
+ "hdmipll",
+ "lvdspll";
};
l2cc: clock-controller@2011000 {
@@ -1089,7 +1122,7 @@
};
mmss_sfpb: syscon@5700000 {
- compatible = "syscon";
+ compatible = "qcom,apq8064-mmss-sfpb", "syscon";
reg = <0x5700000 0x70>;
};
@@ -1404,13 +1437,19 @@
<&mmcc MDP_AXI_CLK>,
<&mmcc MDP_LUT_CLK>,
<&mmcc HDMI_TV_CLK>,
- <&mmcc MDP_TV_CLK>;
+ <&mmcc MDP_TV_CLK>,
+ <&mmcc LVDS_CLK>,
+ <&rpmcc RPM_PXO_CLK>;
clock-names = "core_clk",
"iface_clk",
"bus_clk",
"lut_clk",
"hdmi_clk",
- "tv_clk";
+ "tv_clk",
+ "lcdc_clk",
+ "pxo";
+
+ #clock-cells = <0>;
iommus = <&mdp_port0 0
&mdp_port0 2
@@ -1532,39 +1571,6 @@
};
};
- replicator {
- compatible = "arm,coresight-static-replicator";
-
- clocks = <&rpmcc RPM_QDSS_CLK>;
- clock-names = "apb_pclk";
-
- out-ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- replicator_out0: endpoint {
- remote-endpoint = <&etb_in>;
- };
- };
- port@1 {
- reg = <1>;
- replicator_out1: endpoint {
- remote-endpoint = <&tpiu_in>;
- };
- };
- };
-
- in-ports {
- port {
- replicator_in: endpoint {
- remote-endpoint = <&funnel_out>;
- };
- };
- };
- };
-
funnel@1a04000 {
compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
reg = <0x1a04000 0x1000>;
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8074-dragonboard.dts b/arch/arm/boot/dts/qcom/qcom-apq8074-dragonboard.dts
index 6fce0112361f..34b0cf35fdac 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8074-dragonboard.dts
+++ b/arch/arm/boot/dts/qcom/qcom-apq8074-dragonboard.dts
@@ -149,7 +149,7 @@
};
&pm8941_gpios {
- msm_keys_default: pm8941-gpio-keys-state {
+ msm_keys_default: pm8941-gpio-keys-state {
pins = "gpio5", "gpio23";
function = "normal";
input-enable;
@@ -157,7 +157,7 @@
bias-pull-up;
qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
power-source = <PM8941_GPIO_S3>; /* 1.8V */
- };
+ };
};
&pm8941_lpg {
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi
index a6d4390efa7c..be76bc39ac27 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi
@@ -251,7 +251,7 @@
status = "okay";
nvmem-cell-names = "pre-calibration";
nvmem-cells = <&precal_art_5000>;
- qcom,ath10k-calibration-variant = "ALFA-Network-AP120C-AC";
+ qcom,calibration-variant = "ALFA-Network-AP120C-AC";
};
&usb3_hs_phy {
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts b/arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts
index 6640ea7b6acb..15baaf0d1529 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts
+++ b/arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts
@@ -179,13 +179,13 @@
&wifi0 {
status = "okay";
- qcom,ath10k-calibration-variant = "8devices-Jalapeno";
+ qcom,calibration-variant = "8devices-Jalapeno";
};
&wifi1 {
status = "okay";
- qcom,ath10k-calibration-variant = "8devices-Jalapeno";
+ qcom,calibration-variant = "8devices-Jalapeno";
};
&usb3_ss_phy {
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi
index cc88cf5f0d9b..5a95a2d03c42 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi
@@ -43,7 +43,7 @@
"gpio64", "gpio65", "gpio66",
"gpio67", "gpio68", "gpio69";
function = "qpic";
- };
+ };
};
serial@78af000 {
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi
index 06b20c196faf..f77542fb3d4f 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi
@@ -53,7 +53,6 @@
reg = <0x0>;
clocks = <&gcc GCC_APPS_CLK_SRC>;
clock-frequency = <0>;
- clock-latency = <256000>;
operating-points-v2 = <&cpu0_opp_table>;
};
@@ -67,7 +66,6 @@
reg = <0x1>;
clocks = <&gcc GCC_APPS_CLK_SRC>;
clock-frequency = <0>;
- clock-latency = <256000>;
operating-points-v2 = <&cpu0_opp_table>;
};
@@ -81,7 +79,6 @@
reg = <0x2>;
clocks = <&gcc GCC_APPS_CLK_SRC>;
clock-frequency = <0>;
- clock-latency = <256000>;
operating-points-v2 = <&cpu0_opp_table>;
};
@@ -95,7 +92,6 @@
reg = <0x3>;
clocks = <&gcc GCC_APPS_CLK_SRC>;
clock-frequency = <0>;
- clock-latency = <256000>;
operating-points-v2 = <&cpu0_opp_table>;
};
@@ -126,7 +122,7 @@
opp-716000000 {
opp-hz = /bits/ 64 <716000000>;
clock-latency-ns = <256000>;
- };
+ };
};
memory {
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi
index ca76bf8af75e..d4a32af0ef8f 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi
@@ -8,7 +8,11 @@
* Copyright (c) 2023, Rayyan Ansari <rayyan@ansari.sh>
*/
-#include "qcom-msm8226.dtsi"
+/*
+ * The .dts should first include qcom-msm8226.dtsi or msm8926.dtsi depending on
+ * the SoC on the given device.
+ */
+
#include "pm8226.dtsi"
#include <dt-bindings/input/input.h>
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-dempsey.dts b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-dempsey.dts
index 2c664b5934ec..f448c9088416 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-dempsey.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-dempsey.dts
@@ -8,6 +8,7 @@
/dts-v1/;
+#include "qcom-msm8226.dtsi"
#include "qcom-msm8226-microsoft-common.dtsi"
/ {
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-makepeace.dts b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-makepeace.dts
index 731c5c375678..94bf3b1ad1bd 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-makepeace.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-makepeace.dts
@@ -8,6 +8,7 @@
/dts-v1/;
+#include "qcom-msm8226.dtsi"
#include "qcom-msm8226-microsoft-common.dtsi"
/ {
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts
index a28a83cb5340..d8cdb75dfbb8 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts
@@ -8,6 +8,7 @@
/dts-v1/;
+#include "qcom-msm8226.dtsi"
#include "qcom-msm8226-microsoft-common.dtsi"
/* This device has no magnetometer */
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226-samsung-matisse-common.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8226-samsung-matisse-common.dtsi
index a15a44fc0181..f1544a7e8369 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8226-samsung-matisse-common.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226-samsung-matisse-common.dtsi
@@ -3,11 +3,17 @@
* Copyright (c) 2022, Matti Lehtimäki <matti.lehtimaki@gmail.com>
*/
+/*
+ * The .dts should first include qcom-msm8226.dtsi or msm8926.dtsi depending on
+ * the SoC on the given device.
+ */
+
#include <dt-bindings/input/input.h>
-#include "qcom-msm8226.dtsi"
#include "pm8226.dtsi"
/delete-node/ &adsp_region;
+/delete-node/ &mba_region;
+/delete-node/ &mpss_region;
/delete-node/ &smem_region;
/ {
@@ -145,12 +151,12 @@
no-map;
};
- mpss@8400000 {
+ mpss_region: mpss@8400000 {
reg = <0x08400000 0x1f00000>;
no-map;
};
- mba@a300000 {
+ mba_region: mba@a300000 {
reg = <0x0a300000 0x100000>;
no-map;
};
@@ -223,6 +229,13 @@
status = "okay";
};
+&modem {
+ mx-supply = <&pm8226_l3>;
+ pll-supply = <&pm8226_l8>;
+
+ status = "okay";
+};
+
&rpm_requests {
regulators {
compatible = "qcom,rpm-pm8226-regulators";
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
index 64c8ac94f352..51a7a3fb36d8 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
@@ -6,6 +6,7 @@
/dts-v1/;
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-msm8974.h>
#include <dt-bindings/clock/qcom,mmcc-msm8974.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
@@ -213,6 +214,18 @@
no-map;
};
+ mpss_region: mpss@8000000 {
+ reg = <0x08000000 0x5100000>;
+ no-map;
+ status = "disabled";
+ };
+
+ mba_region: mba@d100000 {
+ reg = <0x0d100000 0x100000>;
+ no-map;
+ status = "disabled";
+ };
+
adsp_region: adsp@dc00000 {
reg = <0x0dc00000 0x1900000>;
no-map;
@@ -253,6 +266,65 @@
};
};
+ smp2p-modem {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&apcs 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ modem_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smsm {
+ compatible = "qcom,smsm";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mboxes = <0>, <&apcs 13>, <&apcs 9>, <&apcs 19>;
+
+ apps_smsm: apps@0 {
+ reg = <0>;
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smsm: modem@1 {
+ reg = <1>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ adsp_smsm: adsp@2 {
+ reg = <2>;
+ interrupts = <GIC_SPI 157 IRQ_TYPE_EDGE_RISING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ wcnss_smsm: wcnss@7 {
+ reg = <7>;
+ interrupts = <GIC_SPI 144 IRQ_TYPE_EDGE_RISING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
soc: soc {
compatible = "simple-bus";
#address-cells = <1>;
@@ -845,12 +917,96 @@
#interrupt-cells = <4>;
};
+ bam_dmux_dma: dma-controller@fc834000 {
+ compatible = "qcom,bam-v1.4.0";
+ reg = <0xfc834000 0x7000>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+
+ num-channels = <6>;
+ qcom,num-ees = <1>;
+ qcom,powered-remotely;
+ };
+
+ modem: remoteproc@fc880000 {
+ compatible = "qcom,msm8226-mss-pil";
+ reg = <0xfc880000 0x4040>,
+ <0xfc820000 0x10000>;
+ reg-names = "qdsp6",
+ "rmb";
+
+ interrupts-extended = <&intc GIC_SPI 24 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack";
+
+ clocks = <&gcc GCC_MSS_Q6_BIMC_AXI_CLK>,
+ <&gcc GCC_MSS_CFG_AHB_CLK>,
+ <&gcc GCC_BOOT_ROM_AHB_CLK>,
+ <&rpmcc RPM_SMD_XO_CLK_SRC>;
+ clock-names = "iface",
+ "bus",
+ "mem",
+ "xo";
+
+ resets = <&gcc GCC_MSS_RESTART>;
+ reset-names = "mss_restart";
+
+ power-domains = <&rpmpd MSM8226_VDDCX>;
+ power-domain-names = "cx";
+
+ qcom,ext-bhs-reg = <&tcsr_regs_1 0x194>;
+ qcom,halt-regs = <&tcsr_regs_1 0x180 0x200 0x280>;
+
+ qcom,smem-states = <&modem_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ memory-region = <&mba_region>, <&mpss_region>;
+
+ status = "disabled";
+
+ bam_dmux: bam-dmux {
+ compatible = "qcom,bam-dmux";
+
+ interrupt-parent = <&modem_smsm>;
+ interrupts = <1 IRQ_TYPE_EDGE_BOTH>, <11 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "pc", "pc-ack";
+
+ qcom,smem-states = <&apps_smsm 1>, <&apps_smsm 11>;
+ qcom,smem-state-names = "pc", "pc-ack";
+
+ dmas = <&bam_dmux_dma 4>, <&bam_dmux_dma 5>;
+ dma-names = "tx", "rx";
+ };
+
+ smd-edge {
+ interrupts = <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&apcs 12>;
+ qcom,smd-edge = <0>;
+
+ label = "modem";
+ };
+ };
+
tcsr_mutex: hwlock@fd484000 {
compatible = "qcom,msm8226-tcsr-mutex", "qcom,tcsr-mutex";
reg = <0xfd484000 0x1000>;
#hwlock-cells = <1>;
};
+ tcsr_regs_1: syscon@fd485000 {
+ compatible = "qcom,tcsr-msm8226", "syscon";
+ reg = <0xfd485000 0x1000>;
+ };
+
tlmm: pinctrl@fd510000 {
compatible = "qcom,msm8226-pinctrl";
reg = <0xfd510000 0x4000>;
@@ -983,8 +1139,8 @@
<&gcc GPLL0_VOTE>,
<&gcc GPLL1_VOTE>,
<&rpmcc RPM_SMD_GFX3D_CLK_SRC>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi0_phy 0>;
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>;
clock-names = "xo",
"mmss_gpll0_vote",
"gpll0_vote",
@@ -1060,8 +1216,8 @@
assigned-clocks = <&mmcc BYTE0_CLK_SRC>,
<&mmcc PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
clocks = <&mmcc MDSS_MDP_CLK>,
<&mmcc MDSS_AHB_CLK>,
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8926-htc-memul.dts b/arch/arm/boot/dts/qcom/qcom-msm8926-htc-memul.dts
index 3037344eb240..cb571aa13c11 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8926-htc-memul.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8926-htc-memul.dts
@@ -5,10 +5,12 @@
/dts-v1/;
-#include "qcom-msm8226.dtsi"
+#include "msm8926.dtsi"
#include "pm8226.dtsi"
/delete-node/ &adsp_region;
+/delete-node/ &mba_region;
+/delete-node/ &mpss_region;
/delete-node/ &smem_region;
/ {
@@ -193,6 +195,16 @@
/* TPS61310 Flash/Torch @ 33 */
};
+&modem {
+ mx-supply = <&pm8226_l3>;
+ pll-supply = <&pm8226_l8>;
+ mss-supply = <&pm8226_s5>;
+
+ firmware-name = "qcom/msm8926/memul/mba.b00", "qcom/msm8926/memul/modem.mdt";
+
+ status = "okay";
+};
+
&pm8226_vib {
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-superman-lte.dts b/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-superman-lte.dts
index 9b48661d69c5..eea4fd8cd972 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-superman-lte.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-superman-lte.dts
@@ -8,6 +8,7 @@
/dts-v1/;
+#include "msm8926.dtsi"
#include "qcom-msm8226-microsoft-common.dtsi"
/* This device has touchscreen on i2c3 instead */
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts b/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts
index 55077a5f2e34..f23bbb94cc5e 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts
@@ -8,6 +8,7 @@
/dts-v1/;
+#include "msm8926.dtsi"
#include "qcom-msm8226-microsoft-common.dtsi"
/* This device has touchscreen on i2c1 instead */
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8926-motorola-peregrine.dts b/arch/arm/boot/dts/qcom/qcom-msm8926-motorola-peregrine.dts
index 376a33125941..db3273c755c2 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8926-motorola-peregrine.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8926-motorola-peregrine.dts
@@ -2,7 +2,7 @@
/dts-v1/;
-#include "qcom-msm8226.dtsi"
+#include "msm8926.dtsi"
#include "pm8226.dtsi"
/delete-node/ &smem_region;
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8926-samsung-matisselte.dts b/arch/arm/boot/dts/qcom/qcom-msm8926-samsung-matisselte.dts
index d0e1bc39f8ef..73e19176eb97 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8926-samsung-matisselte.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8926-samsung-matisselte.dts
@@ -6,6 +6,7 @@
/dts-v1/;
+#include "msm8926.dtsi"
#include "qcom-msm8226-samsung-matisse-common.dtsi"
/ {
@@ -27,6 +28,10 @@
};
};
+&modem {
+ mss-supply = <&pm8226_s5>;
+};
+
&tlmm {
tsp_en1_default_state: tsp-en1-default-state {
pins = "gpio32";
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8960.dtsi
index 865fe7cc3951..4babd0bbe5d6 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8960.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8960.dtsi
@@ -52,6 +52,48 @@
reg = <0x80000000 0>;
};
+ thermal-zones {
+ cpu0-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsens 0>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <60000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <95000>;
+ hysteresis = <10000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu1-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsens 1>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <60000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+
+ cpu_crit1: trip1 {
+ temperature = <95000>;
+ hysteresis = <10000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
cpu-pmu {
compatible = "qcom,krait-pmu";
interrupts = <GIC_PPI 10 0x304>;
@@ -112,9 +154,26 @@
<GIC_PPI 3 0x301>;
reg = <0x0200a000 0x100>;
clock-frequency = <27000000>;
+ clocks = <&sleep_clk>;
+ clock-names = "sleep";
cpu-offset = <0x80000>;
};
+ qfprom: efuse@700000 {
+ compatible = "qcom,msm8960-qfprom", "qcom,qfprom";
+ reg = <0x00700000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ tsens_calib: calib@404 {
+ reg = <0x404 0x10>;
+ };
+
+ tsens_backup: backup-calib@414 {
+ reg = <0x414 0x10>;
+ };
+ };
+
msmgpio: pinctrl@800000 {
compatible = "qcom,msm8960-pinctrl";
gpio-controller;
@@ -127,7 +186,7 @@
};
gcc: clock-controller@900000 {
- compatible = "qcom,gcc-msm8960";
+ compatible = "qcom,gcc-msm8960", "syscon";
#clock-cells = <1>;
#reset-cells = <1>;
reg = <0x900000 0x4000>;
@@ -135,6 +194,18 @@
<&pxo_board>,
<&lcc PLL4>;
clock-names = "cxo", "pxo", "pll4";
+
+ tsens: thermal-sensor {
+ compatible = "qcom,msm8960-tsens";
+
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow";
+
+ #qcom,sensors = <5>;
+ #thermal-sensor-cells = <1>;
+ };
};
lcc: clock-controller@28000000 {
@@ -279,7 +350,7 @@
compatible = "arm,pl18x", "arm,primecell";
arm,primecell-periphid = <0x00051180>;
status = "disabled";
- reg = <0x12180000 0x8000>;
+ reg = <0x12180000 0x2000>;
interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc SDC3_CLK>, <&gcc SDC3_H_CLK>;
clock-names = "mclk", "apb_pclk";
@@ -289,13 +360,25 @@
max-frequency = <192000000>;
no-1-8-v;
vmmc-supply = <&vsdcc_fixed>;
+ dmas = <&sdcc3bam 2>, <&sdcc3bam 1>;
+ dma-names = "tx", "rx";
+ };
+
+ sdcc3bam: dma-controller@12182000 {
+ compatible = "qcom,bam-v1.3.0";
+ reg = <0x12182000 0x4000>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc SDC3_H_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
};
sdcc1: mmc@12400000 {
status = "disabled";
compatible = "arm,pl18x", "arm,primecell";
arm,primecell-periphid = <0x00051180>;
- reg = <0x12400000 0x8000>;
+ reg = <0x12400000 0x2000>;
interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc SDC1_CLK>, <&gcc SDC1_H_CLK>;
clock-names = "mclk", "apb_pclk";
@@ -305,6 +388,18 @@
cap-sd-highspeed;
cap-mmc-highspeed;
vmmc-supply = <&vsdcc_fixed>;
+ dmas = <&sdcc1bam 2>, <&sdcc1bam 1>;
+ dma-names = "tx", "rx";
+ };
+
+ sdcc1bam: dma-controller@12402000 {
+ compatible = "qcom,bam-v1.3.0";
+ reg = <0x12402000 0x4000>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc SDC1_H_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
};
tcsr: syscon@1a400000 {
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
index e3f9c56a778c..7e119370f337 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
@@ -3,6 +3,7 @@
#include <dt-bindings/interconnect/qcom,msm8974.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-msm8974.h>
#include <dt-bindings/clock/qcom,mmcc-msm8974.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
@@ -1871,10 +1872,10 @@
<&gcc GPLL0_VOTE>,
<&gcc GPLL1_VOTE>,
<&rpmcc RPM_SMD_GFX3D_CLK_SRC>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi1_phy 1>,
- <&mdss_dsi1_phy 0>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
<0>,
<0>,
<0>;
@@ -1961,8 +1962,10 @@
interrupt-parent = <&mdss>;
interrupts = <4>;
- assigned-clocks = <&mmcc BYTE0_CLK_SRC>, <&mmcc PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+ assigned-clocks = <&mmcc BYTE0_CLK_SRC>,
+ <&mmcc PCLK0_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
clocks = <&mmcc MDSS_MDP_CLK>,
<&mmcc MDSS_AHB_CLK>,
@@ -2032,8 +2035,10 @@
interrupt-parent = <&mdss>;
interrupts = <4>;
- assigned-clocks = <&mmcc BYTE1_CLK_SRC>, <&mmcc PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>, <&mdss_dsi1_phy 1>;
+ assigned-clocks = <&mmcc BYTE1_CLK_SRC>,
+ <&mmcc PCLK1_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
clocks = <&mmcc MDSS_MDP_CLK>,
<&mmcc MDSS_AHB_CLK>,
diff --git a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
index 39530eb580ea..20fdae9825e0 100644
--- a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
@@ -57,7 +57,7 @@
enable-method = "psci";
clocks = <&apcs>;
power-domains = <&rpmhpd SDX55_CX>;
- power-domain-names = "rpmhpd";
+ power-domain-names = "perf";
operating-points-v2 = <&cpu_opp_table>;
};
};
diff --git a/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi b/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi
index 6b23ee676c9e..c8e312dcd26b 100644
--- a/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi
@@ -58,7 +58,7 @@
enable-method = "psci";
clocks = <&apcs>;
power-domains = <&rpmhpd SDX65_CX_AO>;
- power-domain-names = "rpmhpd";
+ power-domain-names = "perf";
operating-points-v2 = <&cpu_opp_table>;
};
};
diff --git a/arch/arm/boot/dts/renesas/Makefile b/arch/arm/boot/dts/renesas/Makefile
index 833a02447ecf..947c7fe02803 100644
--- a/arch/arm/boot/dts/renesas/Makefile
+++ b/arch/arm/boot/dts/renesas/Makefile
@@ -30,4 +30,5 @@ dtb-$(CONFIG_ARCH_RENESAS) += \
r8a7794-alt.dtb \
r8a7794-silk.dtb \
r9a06g032-rzn1d400-db.dtb \
+ r9a06g032-rzn1d400-eb.dtb \
sh73a0-kzm9g.dtb
diff --git a/arch/arm/boot/dts/renesas/r9a06g032-rzn1d400-db.dts b/arch/arm/boot/dts/renesas/r9a06g032-rzn1d400-db.dts
index 31cdca3e623c..2de047393652 100644
--- a/arch/arm/boot/dts/renesas/r9a06g032-rzn1d400-db.dts
+++ b/arch/arm/boot/dts/renesas/r9a06g032-rzn1d400-db.dts
@@ -8,8 +8,10 @@
/dts-v1/;
-#include <dt-bindings/pinctrl/rzn1-pinctrl.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
#include <dt-bindings/net/pcs-rzn1-miic.h>
+#include <dt-bindings/pinctrl/rzn1-pinctrl.h>
#include "r9a06g032.dtsi"
@@ -24,6 +26,68 @@
aliases {
serial0 = &uart0;
};
+
+ keyboard {
+ compatible = "gpio-keys-polled";
+ poll-interval = <100>;
+
+ switch-1 {
+ linux,code = <KEY_1>;
+ label = "SW1-1";
+ debounce-interval = <20>;
+ gpios = <&pca9698 8 GPIO_ACTIVE_LOW>;
+ };
+
+ switch-2 {
+ linux,code = <KEY_2>;
+ label = "SW1-2";
+ debounce-interval = <20>;
+ gpios = <&pca9698 9 GPIO_ACTIVE_LOW>;
+ };
+
+ switch-3 {
+ linux,code = <KEY_3>;
+ label = "SW1-3";
+ debounce-interval = <20>;
+ gpios = <&pca9698 10 GPIO_ACTIVE_LOW>;
+ };
+
+ switch-4 {
+ linux,code = <KEY_4>;
+ label = "SW1-4";
+ debounce-interval = <20>;
+ gpios = <&pca9698 11 GPIO_ACTIVE_LOW>;
+ };
+
+ switch-5 {
+ linux,code = <KEY_5>;
+ label = "SW1-5";
+ debounce-interval = <20>;
+ gpios = <&pca9698 12 GPIO_ACTIVE_LOW>;
+ };
+
+ switch-6 {
+ linux,code = <KEY_6>;
+ label = "SW1-6";
+ debounce-interval = <20>;
+ gpios = <&pca9698 13 GPIO_ACTIVE_LOW>;
+ };
+
+ switch-7 {
+ linux,code = <KEY_7>;
+ label = "SW1-7";
+ debounce-interval = <20>;
+ gpios = <&pca9698 14 GPIO_ACTIVE_LOW>;
+ };
+
+ switch-8 {
+ linux,code = <KEY_8>;
+ label = "SW1-8";
+ debounce-interval = <20>;
+ gpios = <&pca9698 15 GPIO_ACTIVE_LOW>;
+ };
+
+ };
};
&can0 {
@@ -57,6 +121,44 @@
};
};
+&i2c2 {
+ pinctrl-0 = <&pins_i2c2>;
+ pinctrl-names = "default";
+ status = "okay";
+ clock-frequency = <400000>;
+
+ pca9698: gpio@20 {
+ compatible = "nxp,pca9698";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ /* configure the analog switch to let i2c2 access the eeprom */
+ max4662-in1-hog {
+ gpio-hog;
+ gpios = <16 0>;
+ output-high;
+ };
+ max4662-in2-hog {
+ gpio-hog;
+ gpios = <17 0>;
+ output-low;
+ };
+ max4662-in3-hog {
+ gpio-hog;
+ gpios = <18 0>;
+ output-low;
+ };
+ };
+
+ /* Some revisions may have a 24cs64 at address 0x58 */
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ pagesize = <32>;
+ reg = <0x50>;
+ };
+};
+
&mii_conv4 {
renesas,miic-input = <MIIC_SWITCH_PORTB>;
status = "okay";
@@ -68,6 +170,9 @@
};
&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_cpld>;
+
pins_can0: pins_can0 {
pinmux = <RZN1_PINMUX(162, RZN1_FUNC_CAN)>, /* CAN0_TXD */
<RZN1_PINMUX(163, RZN1_FUNC_CAN)>; /* CAN0_RXD */
@@ -80,6 +185,13 @@
drive-strength = <6>;
};
+ pins_cpld: pins-cpld {
+ pinmux = <RZN1_PINMUX(119, RZN1_FUNC_USB)>,
+ <RZN1_PINMUX(120, RZN1_FUNC_USB)>,
+ <RZN1_PINMUX(121, RZN1_FUNC_USB)>,
+ <RZN1_PINMUX(122, RZN1_FUNC_USB)>;
+ };
+
pins_eth3: pins_eth3 {
pinmux = <RZN1_PINMUX(36, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
<RZN1_PINMUX(37, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
@@ -114,6 +226,12 @@
bias-disable;
};
+ pins_i2c2: pins_i2c2 {
+ pinmux = <RZN1_PINMUX(115, RZN1_FUNC_I2C)>,
+ <RZN1_PINMUX(116, RZN1_FUNC_I2C)>;
+ drive-strength = <12>;
+ };
+
pins_mdio1: pins_mdio1 {
pinmux = <RZN1_PINMUX(152, RZN1_FUNC_MDIO1_SWITCH)>,
<RZN1_PINMUX(153, RZN1_FUNC_MDIO1_SWITCH)>;
@@ -174,6 +292,10 @@
status = "okay";
};
+&udc {
+ status = "okay";
+};
+
&wdt0 {
timeout-sec = <60>;
status = "okay";
diff --git a/arch/arm/boot/dts/renesas/r9a06g032-rzn1d400-eb.dts b/arch/arm/boot/dts/renesas/r9a06g032-rzn1d400-eb.dts
new file mode 100644
index 000000000000..97a339b30d76
--- /dev/null
+++ b/arch/arm/boot/dts/renesas/r9a06g032-rzn1d400-eb.dts
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the RZN1D-EB Board
+ *
+ * Copyright (C) 2023 Schneider-Electric
+ *
+ */
+
+#include <dt-bindings/leds/common.h>
+#include "r9a06g032-rzn1d400-db.dts"
+
+/ {
+ model = "RZN1D-EB Board";
+ compatible = "renesas,rzn1d400-eb", "renesas,rzn1d400-db",
+ "renesas,r9a06g032";
+};
+
+&gmac1 {
+ pinctrl-0 = <&pins_eth0>, <&pins_mdio0>;
+ pinctrl-names = "default";
+
+ status = "okay";
+ phy-mode = "rgmii-id";
+ phy-handle = <&phy_mii0>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ phy_mii0: ethernet-phy@8 {
+ reg = <8>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_ORANGE>;
+ function = LED_FUNCTION_ACTIVITY;
+ default-state = "keep";
+ };
+ };
+ };
+ };
+};
+
+&i2c2 {
+ /* Sensors are different across revisions. All are LM75B compatible */
+ sensor@49 {
+ compatible = "national,lm75b";
+ reg = <0x49>;
+ };
+};
+
+&mii_conv1 {
+ renesas,miic-input = <MIIC_GMAC1_PORT>;
+ status = "okay";
+};
+
+&mii_conv2 {
+ renesas,miic-input = <MIIC_SWITCH_PORTD>;
+ status = "okay";
+};
+
+&mii_conv3 {
+ renesas,miic-input = <MIIC_SWITCH_PORTC>;
+ status = "okay";
+};
+
+&pci_usb {
+ status = "okay";
+};
+
+&pinctrl {
+ pins_eth0: pins-eth0 {
+ pinmux = <RZN1_PINMUX(0, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(1, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(2, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(3, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(4, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(5, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(6, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(7, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(8, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(9, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(10, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(11, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>;
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ pins_eth1: pins-eth1 {
+ pinmux = <RZN1_PINMUX(12, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(13, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(14, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(15, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(16, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(17, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(18, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(19, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(20, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(21, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(22, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(23, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>;
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ pins_eth2: pins-eth2 {
+ pinmux = <RZN1_PINMUX(24, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(25, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(26, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(27, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(28, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(29, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(30, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(31, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(32, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(33, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(34, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>,
+ <RZN1_PINMUX(35, RZN1_FUNC_CLK_ETH_MII_RGMII_RMII)>;
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ pins_mdio0: pins-mdio0 {
+ pinmux = <RZN1_PINMUX(150, RZN1_FUNC_MDIO0_GMAC0)>,
+ <RZN1_PINMUX(151, RZN1_FUNC_MDIO0_GMAC0)>;
+ };
+
+ pins_sdio1: pins-sdio1 {
+ pinmux = <RZN1_PINMUX(95, RZN1_FUNC_SDIO)>,
+ <RZN1_PINMUX(97, RZN1_FUNC_SDIO)>,
+ <RZN1_PINMUX(98, RZN1_FUNC_SDIO)>,
+ <RZN1_PINMUX(99, RZN1_FUNC_SDIO)>,
+ <RZN1_PINMUX(100, RZN1_FUNC_SDIO)>,
+ <RZN1_PINMUX(101, RZN1_FUNC_SDIO_E)>,
+ <RZN1_PINMUX(102, RZN1_FUNC_SDIO_E)>;
+ };
+
+ pins_sdio1_clk: pins-sdio1-clk {
+ pinmux = <RZN1_PINMUX(96, RZN1_FUNC_SDIO)>;
+ drive-strength = <12>;
+ };
+
+ pins_uart2: pins-uart2 {
+ pinmux = <RZN1_PINMUX(105, RZN1_FUNC_UART2)>,
+ <RZN1_PINMUX(106, RZN1_FUNC_UART2)>,
+ <RZN1_PINMUX(107, RZN1_FUNC_UART2)>,
+ <RZN1_PINMUX(108, RZN1_FUNC_UART2)>;
+ bias-disable;
+ };
+};
+
+&sdio1 {
+ pinctrl-0 = <&pins_sdio1>, <&pins_sdio1_clk>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&switch {
+ pinctrl-0 = <&pins_eth1>, <&pins_eth2>, <&pins_eth3>, <&pins_eth4>,
+ <&pins_mdio1>;
+
+ mdio {
+ /* CN15 and CN16 switches must be configured in MDIO2 mode */
+ switch0phy1: ethernet-phy@1 {
+ reg = <1>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_ORANGE>;
+ function = LED_FUNCTION_ACTIVITY;
+ default-state = "keep";
+ };
+ };
+ };
+
+ switch0phy10: ethernet-phy@10 {
+ reg = <10>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_ORANGE>;
+ function = LED_FUNCTION_ACTIVITY;
+ default-state = "keep";
+ };
+ };
+ };
+ };
+};
+
+&switch_port2 {
+ label = "lan2";
+ phy-mode = "rgmii-id";
+ phy-handle = <&switch0phy10>;
+ status = "okay";
+};
+
+&switch_port3 {
+ label = "lan3";
+ phy-mode = "rgmii-id";
+ phy-handle = <&switch0phy1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&pins_uart2>;
+ pinctrl-names = "default";
+ status = "okay";
+ uart-has-rtscts;
+};
diff --git a/arch/arm/boot/dts/renesas/r9a06g032.dtsi b/arch/arm/boot/dts/renesas/r9a06g032.dtsi
index 87e03446fb4d..80ad1fdc77a0 100644
--- a/arch/arm/boot/dts/renesas/r9a06g032.dtsi
+++ b/arch/arm/boot/dts/renesas/r9a06g032.dtsi
@@ -268,6 +268,28 @@
status = "disabled";
};
+ i2c1: i2c@40063000 {
+ compatible = "renesas,r9a06g032-i2c", "renesas,rzn1-i2c", "snps,designware-i2c";
+ reg = <0x40063000 0x100>;
+ interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sysctrl R9A06G032_HCLK_I2C0>, <&sysctrl R9A06G032_CLK_I2C0>;
+ clock-names = "ref", "pclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@40064000 {
+ compatible = "renesas,r9a06g032-i2c", "renesas,rzn1-i2c", "snps,designware-i2c";
+ reg = <0x40064000 0x100>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sysctrl R9A06G032_HCLK_I2C1>, <&sysctrl R9A06G032_CLK_I2C1>;
+ clock-names = "ref", "pclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
pinctrl: pinctrl@40067000 {
compatible = "renesas,r9a06g032-pinctrl", "renesas,rzn1-pinctrl";
reg = <0x40067000 0x1000>, <0x51000000 0x480>;
@@ -276,6 +298,30 @@
status = "okay";
};
+ sdio1: mmc@40100000 {
+ compatible = "renesas,r9a06g032-sdhci", "renesas,rzn1-sdhci", "arasan,sdhci-8.9a";
+ reg = <0x40100000 0x1000>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "int", "wakeup";
+ clocks = <&sysctrl R9A06G032_CLK_SDIO0>, <&sysctrl R9A06G032_HCLK_SDIO0>;
+ clock-names = "clk_xin", "clk_ahb";
+ no-1-8-v;
+ status = "disabled";
+ };
+
+ sdio2: mmc@40101000 {
+ compatible = "renesas,r9a06g032-sdhci", "renesas,rzn1-sdhci", "arasan,sdhci-8.9a";
+ reg = <0x40101000 0x1000>;
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "int", "wakeup";
+ clocks = <&sysctrl R9A06G032_CLK_SDIO1>, <&sysctrl R9A06G032_HCLK_SDIO1>;
+ clock-names = "clk_xin", "clk_ahb";
+ no-1-8-v;
+ status = "disabled";
+ };
+
nand_controller: nand-controller@40102000 {
compatible = "renesas,r9a06g032-nandc", "renesas,rzn1-nandc";
reg = <0x40102000 0x2000>;
diff --git a/arch/arm/boot/dts/rockchip/rk3036-kylin.dts b/arch/arm/boot/dts/rockchip/rk3036-kylin.dts
index 4f928c7898e9..ae2f84a4e922 100644
--- a/arch/arm/boot/dts/rockchip/rk3036-kylin.dts
+++ b/arch/arm/boot/dts/rockchip/rk3036-kylin.dts
@@ -8,6 +8,12 @@
model = "Rockchip RK3036 KylinBoard";
compatible = "rockchip,rk3036-kylin", "rockchip,rk3036";
+ aliases {
+ mmc0 = &emmc;
+ mmc1 = &sdmmc;
+ mmc2 = &sdio;
+ };
+
chosen {
stdout-path = "serial2:115200n8";
};
@@ -382,6 +388,18 @@
status = "okay";
};
+&usb2phy {
+ status = "okay";
+};
+
+&usb2phy_host {
+ status = "okay";
+};
+
+&usb2phy_otg {
+ status = "okay";
+};
+
&vop {
status = "okay";
};
diff --git a/arch/arm/boot/dts/rockchip/rk3036.dtsi b/arch/arm/boot/dts/rockchip/rk3036.dtsi
index 63b9912be06a..fca21ebb224b 100644
--- a/arch/arm/boot/dts/rockchip/rk3036.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk3036.dtsi
@@ -213,6 +213,8 @@
g-np-tx-fifo-size = <16>;
g-rx-fifo-size = <275>;
g-tx-fifo-size = <256 128 128 64 64 32>;
+ phys = <&usb2phy_otg>;
+ phy-names = "usb2-phy";
status = "disabled";
};
@@ -224,6 +226,8 @@
clocks = <&cru HCLK_OTG1>;
clock-names = "otg";
dr_mode = "host";
+ phys = <&usb2phy_host>;
+ phy-names = "usb2-phy";
status = "disabled";
};
@@ -342,6 +346,37 @@
grf: syscon@20008000 {
compatible = "rockchip,rk3036-grf", "syscon", "simple-mfd";
reg = <0x20008000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ usb2phy: usb2phy@17c {
+ compatible = "rockchip,rk3036-usb2phy";
+ reg = <0x017c 0x20>;
+ clocks = <&cru SCLK_OTGPHY0>;
+ clock-names = "phyclk";
+ clock-output-names = "usb480m_phy";
+ assigned-clocks = <&cru SCLK_USB480M>;
+ assigned-clock-parents = <&usb2phy>;
+ #clock-cells = <0>;
+ status = "disabled";
+
+ usb2phy_host: host-port {
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "linestate";
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ usb2phy_otg: otg-port {
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "otg-bvalid", "otg-id",
+ "linestate";
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+ };
power: power-controller {
compatible = "rockchip,rk3036-power-controller";
@@ -398,8 +433,9 @@
compatible = "rockchip,rk3036-inno-hdmi";
reg = <0x20034000 0x4000>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_HDMI>;
- clock-names = "pclk";
+ clocks = <&cru PCLK_HDMI>, <&cru SCLK_LCDC>;
+ clock-names = "pclk", "ref";
+ rockchip,grf = <&grf>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_ctl>;
#sound-dai-cells = <0>;
diff --git a/arch/arm/boot/dts/rockchip/rk3066a-marsboard.dts b/arch/arm/boot/dts/rockchip/rk3066a-marsboard.dts
index ada7dbfc06a5..de42d1855121 100644
--- a/arch/arm/boot/dts/rockchip/rk3066a-marsboard.dts
+++ b/arch/arm/boot/dts/rockchip/rk3066a-marsboard.dts
@@ -19,6 +19,17 @@
reg = <0x60000000 0x40000000>;
};
+ hdmi_con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
vdd_log: regulator-vdd-log {
compatible = "pwm-regulator";
pwms = <&pwm3 0 1000>;
@@ -58,6 +69,28 @@
cpu-supply = <&vdd_arm>;
};
+&gpu {
+ status = "okay";
+};
+
+&hdmi {
+ status = "okay";
+};
+
+&hdmi_in_vop1 {
+ status = "disabled";
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdmi_sound {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
clock-frequency = <400000>;
@@ -216,6 +249,10 @@
status = "okay";
};
+&vop0 {
+ status = "okay";
+};
+
&wdt {
status = "okay";
};
diff --git a/arch/arm/boot/dts/rockchip/rk3128.dtsi b/arch/arm/boot/dts/rockchip/rk3128.dtsi
index d4572146d135..c49099954c28 100644
--- a/arch/arm/boot/dts/rockchip/rk3128.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk3128.dtsi
@@ -48,7 +48,6 @@
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0xf00>;
- clock-latency = <40000>;
clocks = <&cru ARMCLK>;
resets = <&cru SRST_CORE0>;
operating-points-v2 = <&cpu_opp_table>;
@@ -87,31 +86,38 @@
opp-216000000 {
opp-hz = /bits/ 64 <216000000>;
opp-microvolt = <950000 950000 1325000>;
+ clock-latency-ns = <40000>;
};
opp-408000000 {
opp-hz = /bits/ 64 <408000000>;
opp-microvolt = <950000 950000 1325000>;
+ clock-latency-ns = <40000>;
};
opp-600000000 {
opp-hz = /bits/ 64 <600000000>;
opp-microvolt = <950000 950000 1325000>;
+ clock-latency-ns = <40000>;
};
opp-696000000 {
opp-hz = /bits/ 64 <696000000>;
opp-microvolt = <975000 975000 1325000>;
+ clock-latency-ns = <40000>;
};
opp-816000000 {
opp-hz = /bits/ 64 <816000000>;
opp-microvolt = <1075000 1075000 1325000>;
opp-suspend;
+ clock-latency-ns = <40000>;
};
opp-1008000000 {
opp-hz = /bits/ 64 <1008000000>;
opp-microvolt = <1200000 1200000 1325000>;
+ clock-latency-ns = <40000>;
};
opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <1325000 1325000 1325000>;
+ clock-latency-ns = <40000>;
};
};
diff --git a/arch/arm/boot/dts/rockchip/rk3188.dtsi b/arch/arm/boot/dts/rockchip/rk3188.dtsi
index 44b54af0bbf9..850bd6e67895 100644
--- a/arch/arm/boot/dts/rockchip/rk3188.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk3188.dtsi
@@ -23,7 +23,6 @@
compatible = "arm,cortex-a9";
next-level-cache = <&L2>;
reg = <0x0>;
- clock-latency = <40000>;
clocks = <&cru ARMCLK>;
operating-points-v2 = <&cpu0_opp_table>;
resets = <&cru SRST_CORE0>;
diff --git a/arch/arm/boot/dts/rockchip/rk322x.dtsi b/arch/arm/boot/dts/rockchip/rk322x.dtsi
index 96421355c274..cd11a018105b 100644
--- a/arch/arm/boot/dts/rockchip/rk322x.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk322x.dtsi
@@ -36,7 +36,6 @@
resets = <&cru SRST_CORE0>;
operating-points-v2 = <&cpu0_opp_table>;
#cooling-cells = <2>; /* min followed by max */
- clock-latency = <40000>;
clocks = <&cru ARMCLK>;
enable-method = "psci";
};
diff --git a/arch/arm/boot/dts/rockchip/rk3288.dtsi b/arch/arm/boot/dts/rockchip/rk3288.dtsi
index 3f1d640afafa..42d705b544ec 100644
--- a/arch/arm/boot/dts/rockchip/rk3288.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk3288.dtsi
@@ -70,7 +70,6 @@
resets = <&cru SRST_CORE0>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */
- clock-latency = <40000>;
clocks = <&cru ARMCLK>;
dynamic-power-coefficient = <370>;
};
@@ -81,7 +80,6 @@
resets = <&cru SRST_CORE1>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */
- clock-latency = <40000>;
clocks = <&cru ARMCLK>;
dynamic-power-coefficient = <370>;
};
@@ -92,7 +90,6 @@
resets = <&cru SRST_CORE2>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */
- clock-latency = <40000>;
clocks = <&cru ARMCLK>;
dynamic-power-coefficient = <370>;
};
@@ -103,7 +100,6 @@
resets = <&cru SRST_CORE3>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */
- clock-latency = <40000>;
clocks = <&cru ARMCLK>;
dynamic-power-coefficient = <370>;
};
@@ -116,6 +112,7 @@
opp-126000000 {
opp-hz = /bits/ 64 <126000000>;
opp-microvolt = <900000>;
+ clock-latency-ns = <40000>;
};
opp-216000000 {
opp-hz = /bits/ 64 <216000000>;
diff --git a/arch/arm/boot/dts/rockchip/rv1108.dtsi b/arch/arm/boot/dts/rockchip/rv1108.dtsi
index f3291f3bbc6f..42a4d72597a5 100644
--- a/arch/arm/boot/dts/rockchip/rv1108.dtsi
+++ b/arch/arm/boot/dts/rockchip/rv1108.dtsi
@@ -32,7 +32,6 @@
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0xf00>;
- clock-latency = <40000>;
clocks = <&cru ARMCLK>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <75>;
diff --git a/arch/arm/boot/dts/rockchip/rv1126-sonoff-ihost.dtsi b/arch/arm/boot/dts/rockchip/rv1126-sonoff-ihost.dtsi
index 9a87dc0d5f66..1aedcd3a2167 100644
--- a/arch/arm/boot/dts/rockchip/rv1126-sonoff-ihost.dtsi
+++ b/arch/arm/boot/dts/rockchip/rv1126-sonoff-ihost.dtsi
@@ -323,15 +323,15 @@
};
&pmu_io_domains {
- pmuio0-supply = <&vcc1v8_pmu>;
+ pmuio0-supply = <&vcc3v3_sys>;
pmuio1-supply = <&vcc3v3_sys>;
vccio1-supply = <&vcc_1v8>;
vccio2-supply = <&vccio_sd>;
vccio3-supply = <&vcc3v3_sd>;
- vccio4-supply = <&vcc_dovdd>;
- vccio5-supply = <&vcc_1v8>;
- vccio6-supply = <&vcc_1v8>;
- vccio7-supply = <&vcc_dovdd>;
+ vccio4-supply = <&vcc_3v3>;
+ vccio5-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_3v3>;
+ vccio7-supply = <&vcc_1v8>;
status = "okay";
};
@@ -342,16 +342,14 @@
&sdio {
bus-width = <4>;
- cap-sd-highspeed;
cap-sdio-irq;
keep-power-in-suspend;
- max-frequency = <50000000>;
+ max-frequency = <25000000>;
mmc-pwrseq = <&sdio_pwrseq>;
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc1_clk &sdmmc1_cmd &sdmmc1_bus4>;
rockchip,default-sample-phase = <90>;
- sd-uhs-sdr50;
vmmc-supply = <&vcc3v3_sd>;
vqmmc-supply = <&vcc_1v8>;
status = "okay";
diff --git a/arch/arm/boot/dts/samsung/s5pv210-aries.dtsi b/arch/arm/boot/dts/samsung/s5pv210-aries.dtsi
index f628d3660493..153514e80c9a 100644
--- a/arch/arm/boot/dts/samsung/s5pv210-aries.dtsi
+++ b/arch/arm/boot/dts/samsung/s5pv210-aries.dtsi
@@ -855,7 +855,7 @@
assigned-clock-rates = <0>, <50000000>;
assigned-clock-parents = <&clocks MOUT_MPLL>;
- wlan@1 {
+ wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
interrupt-parent = <&gph2>;
diff --git a/arch/arm/boot/dts/st/Makefile b/arch/arm/boot/dts/st/Makefile
index 60d55516f723..cc9948b9870f 100644
--- a/arch/arm/boot/dts/st/Makefile
+++ b/arch/arm/boot/dts/st/Makefile
@@ -28,6 +28,7 @@ dtb-$(CONFIG_ARCH_STM32) += \
stm32746g-eval.dtb \
stm32h743i-eval.dtb \
stm32h743i-disco.dtb \
+ stm32h747i-disco.dtb \
stm32h750i-art-pi.dtb \
stm32mp133c-prihmb.dtb \
stm32mp135f-dhcor-dhsbc.dtb \
@@ -70,7 +71,8 @@ dtb-$(CONFIG_ARCH_STM32) += \
stm32mp157c-lxa-tac-gen2.dtb \
stm32mp157c-odyssey.dtb \
stm32mp157c-osd32mp1-red.dtb \
- stm32mp157c-phycore-stm32mp1-3.dtb
+ stm32mp157c-phycore-stm32mp1-3.dtb \
+ stm32mp157c-ultra-fly-sbc.dtb
dtb-$(CONFIG_ARCH_U8500) += \
ste-snowball.dtb \
ste-hrefprev60-stuib.dtb \
diff --git a/arch/arm/boot/dts/st/spear1310-evb.dts b/arch/arm/boot/dts/st/spear1310-evb.dts
index ad216571ba57..089bd7db55c7 100644
--- a/arch/arm/boot/dts/st/spear1310-evb.dts
+++ b/arch/arm/boot/dts/st/spear1310-evb.dts
@@ -205,19 +205,19 @@
};
};
- ehci@e4800000 {
+ usb@e4800000 {
status = "okay";
};
- ehci@e5800000 {
+ usb@e5800000 {
status = "okay";
};
- ohci@e4000000 {
+ usb@e4000000 {
status = "okay";
};
- ohci@e5000000 {
+ usb@e5000000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/st/spear1340-evb.dts b/arch/arm/boot/dts/st/spear1340-evb.dts
index 9b515b21a633..d24146c3c9e8 100644
--- a/arch/arm/boot/dts/st/spear1340-evb.dts
+++ b/arch/arm/boot/dts/st/spear1340-evb.dts
@@ -203,7 +203,7 @@
};
};
- ehci@e4800000 {
+ usb@e4800000 {
status = "okay";
};
@@ -221,7 +221,7 @@
};
};
- ehci@e5800000 {
+ usb@e5800000 {
status = "okay";
};
@@ -238,11 +238,11 @@
status = "okay";
};
- ohci@e4000000 {
+ usb@e4000000 {
status = "okay";
};
- ohci@e5000000 {
+ usb@e5000000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/st/spear13xx.dtsi b/arch/arm/boot/dts/st/spear13xx.dtsi
index 3b6897084e26..76749992394d 100644
--- a/arch/arm/boot/dts/st/spear13xx.dtsi
+++ b/arch/arm/boot/dts/st/spear13xx.dtsi
@@ -174,7 +174,7 @@
status = "disabled";
};
- ehci@e4800000 {
+ usb@e4800000 {
compatible = "st,spear600-ehci", "usb-ehci";
reg = <0xe4800000 0x1000>;
interrupts = <0 64 0x4>;
@@ -182,7 +182,7 @@
status = "disabled";
};
- ehci@e5800000 {
+ usb@e5800000 {
compatible = "st,spear600-ehci", "usb-ehci";
reg = <0xe5800000 0x1000>;
interrupts = <0 66 0x4>;
@@ -190,7 +190,7 @@
status = "disabled";
};
- ohci@e4000000 {
+ usb@e4000000 {
compatible = "st,spear600-ohci", "usb-ohci";
reg = <0xe4000000 0x1000>;
interrupts = <0 65 0x4>;
@@ -198,7 +198,7 @@
status = "disabled";
};
- ohci@e5000000 {
+ usb@e5000000 {
compatible = "st,spear600-ohci", "usb-ohci";
reg = <0xe5000000 0x1000>;
interrupts = <0 67 0x4>;
diff --git a/arch/arm/boot/dts/st/spear300-evb.dts b/arch/arm/boot/dts/st/spear300-evb.dts
index 303ef29fb805..7d4e6412d558 100644
--- a/arch/arm/boot/dts/st/spear300-evb.dts
+++ b/arch/arm/boot/dts/st/spear300-evb.dts
@@ -119,15 +119,15 @@
status = "okay";
};
- ehci@e1800000 {
+ usb@e1800000 {
status = "okay";
};
- ohci@e1900000 {
+ usb@e1900000 {
status = "okay";
};
- ohci@e2100000 {
+ usb@e2100000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/st/spear310-evb.dts b/arch/arm/boot/dts/st/spear310-evb.dts
index ea0b53036f7b..459182210825 100644
--- a/arch/arm/boot/dts/st/spear310-evb.dts
+++ b/arch/arm/boot/dts/st/spear310-evb.dts
@@ -133,15 +133,15 @@
status = "okay";
};
- ehci@e1800000 {
+ usb@e1800000 {
status = "okay";
};
- ohci@e1900000 {
+ usb@e1900000 {
status = "okay";
};
- ohci@e2100000 {
+ usb@e2100000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/st/spear320-evb.dts b/arch/arm/boot/dts/st/spear320-evb.dts
index 3c026d021c92..6ac53d993cf3 100644
--- a/arch/arm/boot/dts/st/spear320-evb.dts
+++ b/arch/arm/boot/dts/st/spear320-evb.dts
@@ -142,15 +142,15 @@
status = "okay";
};
- ehci@e1800000 {
+ usb@e1800000 {
status = "okay";
};
- ohci@e1900000 {
+ usb@e1900000 {
status = "okay";
};
- ohci@e2100000 {
+ usb@e2100000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/st/spear320-hmi.dts b/arch/arm/boot/dts/st/spear320-hmi.dts
index 721e5ee7b680..8010918e5257 100644
--- a/arch/arm/boot/dts/st/spear320-hmi.dts
+++ b/arch/arm/boot/dts/st/spear320-hmi.dts
@@ -92,7 +92,7 @@
status = "okay";
};
- ehci@e1800000 {
+ usb@e1800000 {
status = "okay";
};
@@ -147,11 +147,11 @@
};
};
- ohci@e1900000 {
+ usb@e1900000 {
status = "okay";
};
- ohci@e2100000 {
+ usb@e2100000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/st/spear3xx.dtsi b/arch/arm/boot/dts/st/spear3xx.dtsi
index cc88ebe7a60c..f54bb80ba28a 100644
--- a/arch/arm/boot/dts/st/spear3xx.dtsi
+++ b/arch/arm/boot/dts/st/spear3xx.dtsi
@@ -73,21 +73,21 @@
status = "disabled";
};
- ehci@e1800000 {
+ usb@e1800000 {
compatible = "st,spear600-ehci", "usb-ehci";
reg = <0xe1800000 0x1000>;
interrupts = <26>;
status = "disabled";
};
- ohci@e1900000 {
+ usb@e1900000 {
compatible = "st,spear600-ohci", "usb-ohci";
reg = <0xe1900000 0x1000>;
interrupts = <25>;
status = "disabled";
};
- ohci@e2100000 {
+ usb@e2100000 {
compatible = "st,spear600-ohci", "usb-ohci";
reg = <0xe2100000 0x1000>;
interrupts = <27>;
diff --git a/arch/arm/boot/dts/st/spear600.dtsi b/arch/arm/boot/dts/st/spear600.dtsi
index 6b67c0ceaed9..9a93367445ca 100644
--- a/arch/arm/boot/dts/st/spear600.dtsi
+++ b/arch/arm/boot/dts/st/spear600.dtsi
@@ -91,7 +91,7 @@
status = "disabled";
};
- ehci_usb0: ehci@e1800000 {
+ ehci_usb0: usb@e1800000 {
compatible = "st,spear600-ehci", "usb-ehci";
reg = <0xe1800000 0x1000>;
interrupt-parent = <&vic1>;
@@ -99,7 +99,7 @@
status = "disabled";
};
- ehci_usb1: ehci@e2000000 {
+ ehci_usb1: usb@e2000000 {
compatible = "st,spear600-ehci", "usb-ehci";
reg = <0xe2000000 0x1000>;
interrupt-parent = <&vic1>;
@@ -107,7 +107,7 @@
status = "disabled";
};
- ohci_usb0: ohci@e1900000 {
+ ohci_usb0: usb@e1900000 {
compatible = "st,spear600-ohci", "usb-ohci";
reg = <0xe1900000 0x1000>;
interrupt-parent = <&vic1>;
@@ -115,7 +115,7 @@
status = "disabled";
};
- ohci_usb1: ohci@e2100000 {
+ ohci_usb1: usb@e2100000 {
compatible = "st,spear600-ohci", "usb-ohci";
reg = <0xe2100000 0x1000>;
interrupt-parent = <&vic1>;
diff --git a/arch/arm/boot/dts/st/stm32f746.dtsi b/arch/arm/boot/dts/st/stm32f746.dtsi
index 2537b3d47e6f..208f8c6dfc9d 100644
--- a/arch/arm/boot/dts/st/stm32f746.dtsi
+++ b/arch/arm/boot/dts/st/stm32f746.dtsi
@@ -43,6 +43,7 @@
#include "../armv7-m.dtsi"
#include <dt-bindings/clock/stm32fx-clock.h>
#include <dt-bindings/mfd/stm32f7-rcc.h>
+#include <dt-bindings/interrupt-controller/irq.h>
/ {
#address-cells = <1>;
@@ -245,6 +246,39 @@
};
};
+ lptimer1: timer@40002400 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-lptimer";
+ reg = <0x40002400 0x400>;
+ interrupts-extended = <&exti 23 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&rcc 1 CLK_LPTIMER>;
+ clock-names = "mux";
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ trigger@0 {
+ compatible = "st,stm32-lptimer-trigger";
+ reg = <0>;
+ status = "disabled";
+ };
+
+ counter {
+ compatible = "st,stm32-lptimer-counter";
+ status = "disabled";
+ };
+
+ timer {
+ compatible = "st,stm32-lptimer-timer";
+ status = "disabled";
+ };
+ };
+
rtc: rtc@40002800 {
compatible = "st,stm32-rtc";
reg = <0x40002800 0x400>;
diff --git a/arch/arm/boot/dts/st/stm32h7-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32h7-pinctrl.dtsi
index 7f1d234e1024..8a6db484383d 100644
--- a/arch/arm/boot/dts/st/stm32h7-pinctrl.dtsi
+++ b/arch/arm/boot/dts/st/stm32h7-pinctrl.dtsi
@@ -198,7 +198,7 @@
};
};
- uart4_pins: uart4-0 {
+ uart4_pins_a: uart4-0 {
pins1 {
pinmux = <STM32_PINMUX('A', 0, AF8)>; /* UART4_TX */
bias-disable;
@@ -211,7 +211,20 @@
};
};
- usart1_pins: usart1-0 {
+ uart8_pins_a: uart8-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('J', 8, AF8)>; /* UART8_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('J', 9, AF8)>; /* UART8_RX */
+ bias-disable;
+ };
+ };
+
+ usart1_pins_a: usart1-0 {
pins1 {
pinmux = <STM32_PINMUX('B', 14, AF4)>; /* USART1_TX */
bias-disable;
@@ -224,7 +237,20 @@
};
};
- usart2_pins: usart2-0 {
+ usart1_pins_b: usart1-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('A', 9, AF7)>; /* USART1_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('A', 10, AF7)>; /* USART1_RX */
+ bias-disable;
+ };
+ };
+
+ usart2_pins_a: usart2-0 {
pins1 {
pinmux = <STM32_PINMUX('D', 5, AF7)>; /* USART2_TX */
bias-disable;
@@ -237,7 +263,7 @@
};
};
- usart3_pins: usart3-0 {
+ usart3_pins_a: usart3-0 {
pins1 {
pinmux = <STM32_PINMUX('B', 10, AF7)>, /* USART3_TX */
<STM32_PINMUX('D', 12, AF7)>; /* USART3_RTS_DE */
diff --git a/arch/arm/boot/dts/st/stm32h743.dtsi b/arch/arm/boot/dts/st/stm32h743.dtsi
index b8d4c44c8a82..2f19cfbc57ad 100644
--- a/arch/arm/boot/dts/st/stm32h743.dtsi
+++ b/arch/arm/boot/dts/st/stm32h743.dtsi
@@ -211,6 +211,14 @@
};
};
+ uart8: serial@40007c00 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40007c00 0x400>;
+ interrupts = <83>;
+ status = "disabled";
+ clocks = <&rcc UART8_CK>;
+ };
+
usart1: serial@40011000 {
compatible = "st,stm32h7-uart";
reg = <0x40011000 0x400>;
diff --git a/arch/arm/boot/dts/st/stm32h743i-disco.dts b/arch/arm/boot/dts/st/stm32h743i-disco.dts
index 2b452883a708..8451a54a9a08 100644
--- a/arch/arm/boot/dts/st/stm32h743i-disco.dts
+++ b/arch/arm/boot/dts/st/stm32h743i-disco.dts
@@ -105,7 +105,7 @@
};
&usart2 {
- pinctrl-0 = <&usart2_pins>;
+ pinctrl-0 = <&usart2_pins_a>;
pinctrl-names = "default";
status = "okay";
};
diff --git a/arch/arm/boot/dts/st/stm32h743i-eval.dts b/arch/arm/boot/dts/st/stm32h743i-eval.dts
index 5c5d8059bdc7..4b0ced27b80e 100644
--- a/arch/arm/boot/dts/st/stm32h743i-eval.dts
+++ b/arch/arm/boot/dts/st/stm32h743i-eval.dts
@@ -145,7 +145,7 @@
};
&usart1 {
- pinctrl-0 = <&usart1_pins>;
+ pinctrl-0 = <&usart1_pins_a>;
pinctrl-names = "default";
status = "okay";
};
diff --git a/arch/arm/boot/dts/st/stm32h747i-disco.dts b/arch/arm/boot/dts/st/stm32h747i-disco.dts
new file mode 100644
index 000000000000..99f0255dae8e
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32h747i-disco.dts
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ */
+
+/dts-v1/;
+#include "stm32h743.dtsi"
+#include "stm32h7-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "STMicroelectronics STM32H747i-Discovery board";
+ compatible = "st,stm32h747i-disco", "st,stm32h747";
+
+ chosen {
+ bootargs = "root=/dev/ram";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@d0000000 {
+ device_type = "memory";
+ reg = <0xd0000000 0x2000000>;
+ };
+
+ aliases {
+ serial0 = &usart1;
+ serial1 = &uart8;
+ };
+
+ v3v3: regulator-v3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led-green {
+ gpios = <&gpioi 12 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+ led-orange {
+ gpios = <&gpioi 13 GPIO_ACTIVE_LOW>;
+ };
+ led-red {
+ gpios = <&gpioi 14 GPIO_ACTIVE_LOW>;
+ };
+ led-blue {
+ gpios = <&gpioi 15 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+ button-0 {
+ label = "User";
+ linux,code = <KEY_WAKEUP>;
+ gpios = <&gpioc 13 GPIO_ACTIVE_HIGH>;
+ };
+ button-1 {
+ label = "JoySel";
+ linux,code = <KEY_ENTER>;
+ gpios = <&gpiok 2 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ };
+ button-2 {
+ label = "JoyDown";
+ linux,code = <KEY_DOWN>;
+ gpios = <&gpiok 3 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ };
+ button-3 {
+ label = "JoyUp";
+ linux,code = <KEY_UP>;
+ gpios = <&gpiok 6 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ };
+ button-4 {
+ label = "JoyLeft";
+ linux,code = <KEY_LEFT>;
+ gpios = <&gpiok 4 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ };
+ button-5 {
+ label = "JoyRight";
+ linux,code = <KEY_RIGHT>;
+ gpios = <&gpiok 5 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ };
+ };
+};
+
+&clk_hse {
+ clock-frequency = <25000000>;
+};
+
+&mac {
+ status = "disabled";
+ pinctrl-0 = <&ethernet_rmii>;
+ pinctrl-names = "default";
+ phy-mode = "rmii";
+ phy-handle = <&phy0>;
+
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+};
+
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ cd-gpios = <&gpioi 8 GPIO_ACTIVE_LOW>;
+ broken-cd;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&v3v3>;
+ status = "okay";
+};
+
+&usart1 {
+ pinctrl-0 = <&usart1_pins_b>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&uart8 {
+ pinctrl-0 = <&uart8_pins_a>;
+ pinctrl-names = "default";
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/st/stm32h750i-art-pi.dts b/arch/arm/boot/dts/st/stm32h750i-art-pi.dts
index 44c307f8b09c..56c53e262da7 100644
--- a/arch/arm/boot/dts/st/stm32h750i-art-pi.dts
+++ b/arch/arm/boot/dts/st/stm32h750i-art-pi.dts
@@ -167,7 +167,7 @@
#address-cells = <1>;
#size-cells = <0>;
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
};
@@ -197,14 +197,14 @@
};
&usart2 {
- pinctrl-0 = <&usart2_pins>;
+ pinctrl-0 = <&usart2_pins_a>;
pinctrl-names = "default";
status = "disabled";
};
&usart3 {
pinctrl-names = "default";
- pinctrl-0 = <&usart3_pins>;
+ pinctrl-0 = <&usart3_pins_a>;
dmas = <&dmamux1 45 0x400 0x05>,
<&dmamux1 46 0x400 0x05>;
dma-names = "rx", "tx";
@@ -221,7 +221,7 @@
};
&uart4 {
- pinctrl-0 = <&uart4_pins>;
+ pinctrl-0 = <&uart4_pins_a>;
pinctrl-names = "default";
status = "okay";
};
diff --git a/arch/arm/boot/dts/st/stm32mp131.dtsi b/arch/arm/boot/dts/st/stm32mp131.dtsi
index 8512a6e46b33..492bcf586361 100644
--- a/arch/arm/boot/dts/st/stm32mp131.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp131.dtsi
@@ -1018,6 +1018,9 @@
reg = <0x4 0x2>;
bits = <0 12>;
};
+ vrefint: vrefin-cal@52 {
+ reg = <0x52 0x2>;
+ };
ts_cal1: calib@5c {
reg = <0x5c 0x2>;
};
@@ -1063,6 +1066,8 @@
interrupts = <0>;
dmas = <&dmamux1 10 0x400 0x80000001>;
dma-names = "rx";
+ nvmem-cells = <&vrefint>;
+ nvmem-cell-names = "vrefint";
status = "disabled";
channel@13 {
diff --git a/arch/arm/boot/dts/st/stm32mp133.dtsi b/arch/arm/boot/dts/st/stm32mp133.dtsi
index 73e470019ce4..e48838374f0d 100644
--- a/arch/arm/boot/dts/st/stm32mp133.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp133.dtsi
@@ -60,6 +60,8 @@
interrupts = <0>;
dmas = <&dmamux1 9 0x400 0x80000001>;
dma-names = "rx";
+ nvmem-cells = <&vrefint>;
+ nvmem-cell-names = "vrefint";
status = "disabled";
channel@18 {
diff --git a/arch/arm/boot/dts/st/stm32mp135f-dk.dts b/arch/arm/boot/dts/st/stm32mp135f-dk.dts
index 19a32f7d4d7d..9764a6bfa5b4 100644
--- a/arch/arm/boot/dts/st/stm32mp135f-dk.dts
+++ b/arch/arm/boot/dts/st/stm32mp135f-dk.dts
@@ -421,7 +421,7 @@
#size-cells = <0>;
status = "okay";
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi b/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
index 6236ce2a6968..c18156807027 100644
--- a/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
@@ -287,7 +287,7 @@
#address-cells = <1>;
#size-cells = <0>;
- brcmf: bcrmf@1 { /* muRata 1YN */
+ brcmf: wifi@1 { /* muRata 1YN */
reg = <1>;
compatible = "infineon,cyw43439-fmac", "brcm,bcm4329-fmac";
interrupt-parent = <&gpioe>;
diff --git a/arch/arm/boot/dts/st/stm32mp157a-iot-box.dts b/arch/arm/boot/dts/st/stm32mp157a-iot-box.dts
index 6a5a4af25bd9..84497026a106 100644
--- a/arch/arm/boot/dts/st/stm32mp157a-iot-box.dts
+++ b/arch/arm/boot/dts/st/stm32mp157a-iot-box.dts
@@ -46,7 +46,7 @@
#address-cells = <1>;
#size-cells = <0>;
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
};
diff --git a/arch/arm/boot/dts/st/stm32mp157c-dk2.dts b/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
index 324f7bb988d1..1b34fbe10b4f 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
@@ -115,7 +115,7 @@
#size-cells = <0>;
status = "okay";
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/st/stm32mp157c-ultra-fly-sbc.dts b/arch/arm/boot/dts/st/stm32mp157c-ultra-fly-sbc.dts
new file mode 100644
index 000000000000..ac42d462d449
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32mp157c-ultra-fly-sbc.dts
@@ -0,0 +1,1152 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) Ultratronik GmbH 2024-2025 - All Rights Reserved
+ */
+
+/dts-v1/;
+#include "stm32mp157.dtsi"
+#include "stm32mp15xc.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxac-pinctrl.dtsi"
+#include <dt-bindings/pinctrl/stm32-pinfunc.h>
+#include <dt-bindings/mfd/st,stpmic1.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "STM STM32MP15x Ultratronik MMI_A7 board";
+ compatible = "ultratronik,stm32mp157c-ultra-fly-sbc", "st,stm32mp157";
+
+ aliases {
+ ethernet0 = &ethernet0;
+ serial0 = &uart4;
+ serial1 = &uart5;
+ serial2 = &uart7;
+ serial3 = &usart1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@c0000000 {
+ device_type = "memory";
+ reg = <0xC0000000 0x40000000>;
+ };
+
+ usb_otg_vbus: regulator-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpioh 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ retram: retram@38000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x38000000 0x10000>;
+ no-map;
+ };
+
+ mcuram: mcuram@30000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x30000000 0x40000>;
+ no-map;
+ };
+
+ mcuram2: mcuram2@10000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10000000 0x40000>;
+ no-map;
+ };
+
+ vdev0vring0: vdev0vring0@10040000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10040000 0x2000>;
+ no-map;
+ };
+
+ vdev0vring1: vdev0vring1@10042000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10042000 0x2000>;
+ no-map;
+ };
+
+ vdev0buffer: vdev0buffer@10044000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10044000 0x4000>;
+ no-map;
+ };
+
+ gpu_reserved: gpu@f8000000 {
+ reg = <0xf8000000 0x8000000>;
+ no-map;
+ };
+ };
+
+ leds: leds {
+ compatible = "gpio-leds";
+
+ led0{
+ label = "buzzer";
+ gpios = <&gpiof 2 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ linux,default-trigger = "none";
+ };
+
+ led1 {
+ label = "led1";
+ gpios = <&gpioa 12 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led2 {
+ label = "led2";
+ gpios = <&gpioa 13 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led3 {
+ label = "led3";
+ gpios = <&gpioa 14 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ gpio_keys: gpio-keys {
+ compatible = "gpio-keys";
+
+ key-1 {
+ label = "KEY1";
+ gpios = <&gpiod 1 GPIO_ACTIVE_HIGH>;
+ wakeup-source;
+ linux,code = <2>;
+ };
+
+ key-2 {
+ label = "KEY2";
+ gpios = <&gpiod 7 GPIO_ACTIVE_HIGH>;
+ wakeup-source;
+ linux,code = <3>;
+ };
+ };
+};
+
+&adc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&adc1_ux_ain_pins_a>;
+ vdd-supply = <&vdd>;
+ vdda-supply = <&vdd>;
+ vref-supply = <&vrefbuf>;
+ status = "okay";
+
+ adc1: adc@0 {
+ st,min-sample-time-nsecs = <5000>;
+ st,adc-channels = <0 1 6 13>; /* ANA0 ANA1 PF12 PC3 */
+ status = "okay";
+ };
+
+ adc2: adc@100 {
+ st,adc-channels = <0 1 12>; /* ANA0 ANA1 INT_TEMP*/
+ st,min-sample-time-nsecs = <10000>;
+ status = "okay";
+
+ channel@12 {
+ reg = <12>; /* Channel 12 = internal temperature sensor */
+ label = "internal_temp";
+ };
+ };
+};
+
+&dac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&dac_ux_ch1_pins_a &dac_ux_ch2_pins_a>;
+ vref-supply = <&vrefbuf>;
+ status = "okay";
+
+ dac1: dac@1 {
+ status = "okay";
+ };
+
+ dac2: dac@2 {
+ status = "okay";
+ };
+};
+
+&dts {
+ compatible = "st,stm32-thermal";
+ status = "okay";
+};
+
+&ethernet0 {
+ status = "okay";
+ pinctrl-0 = <&ethernet0_ux_rgmii_pins_a>;
+ pinctrl-1 = <&ethernet0_ux_rgmii_pins_sleep_a>;
+ pinctrl-names = "default", "sleep";
+ phy-mode = "rgmii-id";
+ phy-handle = <&phy1>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
+};
+
+&gpioa {
+ gpio-line-names =
+ "#PMIC_IRQ", "", "", "", "DAC1", "DAC2", "", "",
+ "", "", "OTG_ID", "TIM1_4", "#LED1", "#LED2", "#LED3", "";
+};
+
+&gpiob {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpioc {
+ gpio-line-names =
+ "#AMP_SD", "", "", "ANA5", "", "", "", "",
+ "", "", "", "", "", "PMIC_WAKEUP", "", "";
+};
+
+&gpiod {
+ gpio-line-names =
+ "#G_INT", "#TASTER1", "", "", "GPIO1", "GPIO2", "", "#TASTER2",
+ "", "", "", "", "", "", "TIM4_3", "TIM4_4";
+};
+
+&gpioe {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "PWM2", "", "", "", "", "";
+};
+
+&gpiof {
+ gpio-line-names =
+ "#SD1_CD", "SD1_WP", "BUZZER", "#DISP_POW", "BKL_POW", "#CAM_RES", "", "",
+ "", "TIM17_1N", "", "CAM_PWDN", "ANA6", "ENA_USB", "", "";
+};
+
+&gpiog {
+ gpio-line-names =
+ "#ESP_RES", "#ESP_BOOT", "GPIO3", "GPIO4", "", "", "", "",
+ "", "#TOUCH_IRQ", "", "", "", "", "", "#PCAP_RES";
+};
+
+&gpioh {
+ gpio-line-names =
+ "", "CAM_LED", "", "USB_OTG_PWR", "", "USB_OTG_OC", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpioi {
+ gpio-line-names =
+ "BKL_PWM", "", "", "", "", "", "", "",
+ "#SPI_CS0", "", "", "#SPI_CS1", "", "", "", "";
+};
+
+&gpioj {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpiok {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpioz {
+ gpio-line-names =
+ "", "", "", "#SPI_CS2", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpu {
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c1_ux_pins_a>;
+ pinctrl-1 = <&i2c1_ux_pins_sleep_a>;
+ i2c-scl-rising-time-ns = <100>;
+ i2c-scl-falling-time-ns = <7>;
+ status = "okay";
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ rtc@32 {
+ compatible = "epson,rx8900";
+ reg = <0x32>;
+ epson,vdet-disable;
+ trickle-diode-disable;
+ };
+};
+
+&i2c4 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c4_ux_pins_a>;
+ pinctrl-1 = <&i2c4_ux_pins_sleep_a>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ pmic: pmic@33 {
+ compatible = "st,stpmic1";
+ reg = <0x33>;
+ interrupts-extended = <&exti 0 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ regulators {
+ compatible = "st,stpmic1-regulators";
+
+ ldo1-supply = <&v3v3>;
+ ldo3-supply = <&vdd_ddr>;
+ ldo6-supply = <&v3v3>;
+ pwr_sw1-supply = <&bst_out>;
+ pwr_sw2-supply = <&bst_out>;
+
+ vddcore: buck1 {
+ regulator-name = "vddcore";
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd_ddr: buck2 {
+ regulator-name = "vdd_ddr";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd: buck3 {
+ regulator-name = "vdd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ st,mask-reset;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ v3v3: buck4 {
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ regulator-initial-mode = <0>;
+ };
+
+ vtt_ddr: ldo3 {
+ regulator-name = "vtt_ddr";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <750000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ vdd_usb: ldo4 {
+ regulator-name = "vdd_usb";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ interrupts = <IT_CURLIM_LDO4 0>;
+ };
+
+ v1v8: ldo6 {
+ regulator-name = "v1v8";
+ regulator-min-microvolt = <1600000>;/* offset +200 mv ??? */
+ regulator-max-microvolt = <1600000>;/* real 1800000 */
+ regulator-always-on;
+ interrupts = <IT_CURLIM_LDO6 0>;
+ };
+
+ vref_ddr: vref_ddr {
+ regulator-name = "vref_ddr";
+ regulator-always-on;
+ };
+
+ bst_out: boost {
+ regulator-name = "bst_out";
+ interrupts = <IT_OCP_BOOST 0>;
+ };
+
+ vbus_otg: pwr_sw1 {
+ regulator-name = "vbus_otg";
+ interrupts = <IT_OCP_OTG 0>;
+ regulator-active-discharge = <1>;
+ };
+
+ vbus_sw: pwr_sw2 {
+ regulator-name = "vbus_sw";
+ interrupts = <IT_OCP_SWOUT 0>;
+ regulator-active-discharge = <1>;
+ };
+ };
+ };
+};
+
+&iwdg2 {
+ timeout-sec = <32>;
+ status = "okay";
+};
+
+&m_can2 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&m_can2_ux_pins_a>;
+ pinctrl-1 = <&m_can2_ux_sleep_pins_a>;
+ status = "okay";
+};
+
+&pinctrl {
+
+ adc1_ux_ain_pins_a: adc1-ux-ain-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F',12, ANALOG)>, /* ADC1 in6 */
+ <STM32_PINMUX('C', 3, ANALOG)>; /* ADC2 in13 */
+ };
+ };
+
+ dac_ux_ch1_pins_a: dac-ux-ch1-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 4, ANALOG)>;
+ };
+ };
+
+ dac_ux_ch2_pins_a: dac-ux-ch2-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 5, ANALOG)>;
+ };
+ };
+
+ ethernet0_ux_rgmii_pins_a: rgmii-ux-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 5, AF11)>, /* ETH_RGMII_CLK125 */
+ <STM32_PINMUX('G', 4, AF11)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('G', 13, AF11)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('G', 14, AF11)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('C', 2, AF11)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('E', 2, AF11)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('B', 11, AF11)>; /* ETH_RGMII_TX_CTL */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('C', 5, AF11)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('B', 0, AF11)>, /* ETH_RGMII_RXD2 */
+ <STM32_PINMUX('H', 7, AF11)>, /* ETH_RGMII_RXD3 */
+ <STM32_PINMUX('A', 1, AF11)>, /* ETH_RGMII_RX_CLK */
+ <STM32_PINMUX('A', 7, AF11)>; /* ETH_RGMII_RX_CTL */
+ bias-disable;
+ };
+ pins3 {
+ pinmux = <STM32_PINMUX('C', 1, AF11)>; /* ETH_MDC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins4 {
+ pinmux = <STM32_PINMUX('A', 2, AF11)>; /* ETH_MDIO */
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+ };
+ };
+
+ ethernet0_ux_rgmii_pins_sleep_a: rgmii-ux-sleep-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 5, ANALOG)>, /* ETH_RGMII_CLK125 */
+ <STM32_PINMUX('G', 4, ANALOG)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('G', 13, ANALOG)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('G', 14, ANALOG)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('C', 2, ANALOG)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('E', 2, ANALOG)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('B', 11, ANALOG)>, /* ETH_RGMII_TX_CTL */
+ <STM32_PINMUX('A', 2, ANALOG)>, /* ETH_MDIO */
+ <STM32_PINMUX('C', 1, ANALOG)>, /* ETH_MDC */
+ <STM32_PINMUX('C', 4, ANALOG)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('C', 5, ANALOG)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('B', 0, ANALOG)>, /* ETH_RGMII_RXD2 */
+ <STM32_PINMUX('B', 1, ANALOG)>, /* ETH_RGMII_RXD3 */
+ <STM32_PINMUX('A', 1, ANALOG)>, /* ETH_RGMII_RX_CLK */
+ <STM32_PINMUX('A', 7, ANALOG)>; /* ETH_RGMII_RX_CTL */
+ };
+ };
+
+ i2c1_ux_pins_a: i2c1-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 14, AF5)>, /* I2C1_SCL */
+ <STM32_PINMUX('F', 15, AF5)>; /* I2C1_SDA */
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+ };
+ };
+
+ i2c1_ux_pins_sleep_a: i2c1-1 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 14, ANALOG)>, /* I2C1_SCL */
+ <STM32_PINMUX('F', 15, ANALOG)>; /* I2C1_SDA */
+ };
+ };
+
+ m_can2_ux_pins_a: m-can2-ux-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 6, AF9)>; /* CAN1_TX */
+ slew-rate = <0>;
+ drive-push-pull;
+ bias-disable;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 5, AF9)>; /* CAN1_RX */
+ bias-disable;
+ };
+ };
+
+ m_can2_ux_sleep_pins_a: m-can2-ux-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 6, ANALOG)>, /* CAN1_TX */
+ <STM32_PINMUX('B', 5, ANALOG)>; /* CAN1_RX */
+ };
+ };
+ pwm1_ux_pins_a: pwm1-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A',11, AF1)>, /* TIM1_CH4 */
+ <STM32_PINMUX('E',10, AF1)>; /* TIM1_CH2N */
+ bias-pull-down;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ pwm1_ux_sleep_pins_a: pwm1-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A',11, ANALOG)>, /* TIM1_CH4 */
+ <STM32_PINMUX('E',10, ANALOG)>; /* TIM1_CH2N */
+ };
+ };
+
+ pwm4_ux_pins_a: pwm4-0 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 14, AF2)>, /* TIM4_CH3 */
+ <STM32_PINMUX('D', 15, AF2)>; /* TIM4_CH4 */
+ bias-disable;
+ };
+ };
+
+ pwm4_ux_sleep_pins_a: pwm4-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 14, ANALOG)>, /* TIM4_CH3 */
+ <STM32_PINMUX('D', 15, ANALOG)>; /* TIM4_CH4 */
+ };
+ };
+
+ pwm5_ux_pins_a: pwm5-0 {
+ pins {
+ pinmux = <STM32_PINMUX('I', 0, AF2)>; /* TIM5_CH4 */
+ bias-pull-down;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ pwm5_ux_sleep_pins_a: pwm5-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('I', 0, ANALOG)>; /* TIM5_CH4 */
+ };
+ };
+
+ pwm17_ux_pins_a: pwm17-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 9, AF1)>; /* TIM17_CH1N */
+ bias-pull-down;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ pwm17_ux_sleep_pins_a: pwm17-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 9, ANALOG)>; /* TIM17_CH1N */
+ };
+ };
+
+ qspi_bk1_ux_pins_a: qspi-bk1-ux-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 8, AF10)>, /* QSPI_BK1_IO0 */
+ <STM32_PINMUX('D',12, AF9)>, /* QSPI_BK1_IO1 */
+ <STM32_PINMUX('F', 7, AF9)>, /* QSPI_BK1_IO2 */
+ <STM32_PINMUX('F', 6, AF9)>; /* QSPI_BK1_IO3 */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('B',10, AF9)>; /* QSPI_BK1_NCS */
+ bias-pull-up;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+ };
+
+ qspi_bk1_ux_sleep_pins_a: qspi-bk1-ux-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 8, ANALOG)>, /* QSPI_BK1_IO0 */
+ <STM32_PINMUX('D',12, ANALOG)>, /* QSPI_BK1_IO1 */
+ <STM32_PINMUX('F', 7, ANALOG)>, /* QSPI_BK1_IO2 */
+ <STM32_PINMUX('F', 6, ANALOG)>, /* QSPI_BK1_IO3 */
+ <STM32_PINMUX('B',10, ANALOG)>; /* QSPI_BK1_NCS */
+ };
+ };
+
+ qspi_clk_ux_pins_a: qspi-clk_ux-0 {
+ pins {
+ pinmux = <STM32_PINMUX('G', 7, AF9)>; /* QSPI_CLK */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <3>;
+ };
+ };
+
+ qspi_clk_ux_sleep_pins_a: qspi-clk-ux-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('G', 7, ANALOG)>; /* QSPI_CLK */
+ };
+ };
+
+ sai2a_ux_pins_a: sai2a-0 {
+ pins {
+ pinmux = <STM32_PINMUX('I', 5, AF10)>, /* SAI2_SCK_A */
+ <STM32_PINMUX('D',11, AF10)>, /* SAI2_SD_A */
+ <STM32_PINMUX('I', 7, AF10)>, /* SAI2_FS_A */
+ <STM32_PINMUX('E', 0, AF10)>; /* SAI2_MCLK_A */
+ slew-rate = <0>;
+ drive-push-pull;
+ bias-disable;
+ };
+ };
+
+ sai2a_ux_sleep_pins_a: sai2a-1 {
+ pins {
+ pinmux = <STM32_PINMUX('I', 5, ANALOG)>, /* SAI2_SCK_A */
+ <STM32_PINMUX('D',11, ANALOG)>, /* SAI2_SD_A */
+ <STM32_PINMUX('I', 7, ANALOG)>, /* SAI2_FS_A */
+ <STM32_PINMUX('E', 0, ANALOG)>; /* SAI2_MCLK_A */
+ };
+ };
+
+ sdmmc1_ux_b4_pins_a: sdmmc1-ux-b4-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('C',10, AF12)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('C',11, AF12)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-disable;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('C', 12, AF12)>; /* SDMMC1_CK */
+ slew-rate = <2>;
+ drive-push-pull;
+ bias-disable;
+ };
+ };
+
+ sdmmc1_ux_b4_od_pins_a: sdmmc1-b4-od-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('C', 11, AF12)>; /* SDMMC1_D3 */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-disable;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('C', 12, AF12)>; /* SDMMC1_CK */
+ slew-rate = <2>;
+ drive-push-pull;
+ bias-disable;
+ };
+ pins3 {
+ pinmux = <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
+ slew-rate = <1>;
+ drive-open-drain;
+ bias-disable;
+ };
+ };
+
+ sdmmc1_ux_b4_sleep_pins_a: sdmmc1-b4-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('C', 8, ANALOG)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('C', 9, ANALOG)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('C', 10, ANALOG)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('C', 11, ANALOG)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('C', 12, ANALOG)>, /* SDMMC1_CK */
+ <STM32_PINMUX('D', 2, ANALOG)>; /* SDMMC1_CMD */
+ };
+ };
+
+ sdmmc2_ux_b4_pins_a: sdmmc2-ux-b4-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
+ <STM32_PINMUX('B', 15, AF9)>, /* SDMMC2_D1 */
+ <STM32_PINMUX('B', 3, AF9)>, /* SDMMC2_D2 */
+ <STM32_PINMUX('B', 4, AF9)>, /* SDMMC2_D3 */
+ <STM32_PINMUX('G', 6, AF10)>; /* SDMMC2_CMD */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 3, AF9)>; /* SDMMC2_CK */
+ slew-rate = <2>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ };
+
+ sdmmc2_ux_b4_od_pins_a: sdmmc2-ux-b4-od-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
+ <STM32_PINMUX('B', 15, AF9)>, /* SDMMC2_D1 */
+ <STM32_PINMUX('B', 3, AF9)>, /* SDMMC2_D2 */
+ <STM32_PINMUX('B', 4, AF9)>; /* SDMMC2_D3 */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 3, AF9)>; /* SDMMC2_CK */
+ slew-rate = <2>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ pins3 {
+ pinmux = <STM32_PINMUX('G', 6, AF10)>; /* SDMMC2_CMD */
+ slew-rate = <1>;
+ drive-open-drain;
+ bias-pull-up;
+ };
+ };
+
+ sdmmc2_ux_b4_sleep_pins_a: sdmmc2-ux-b4-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 14, ANALOG)>, /* SDMMC2_D0 */
+ <STM32_PINMUX('B', 15, ANALOG)>, /* SDMMC2_D1 */
+ <STM32_PINMUX('B', 3, ANALOG)>, /* SDMMC2_D2 */
+ <STM32_PINMUX('B', 4, ANALOG)>, /* SDMMC2_D3 */
+ <STM32_PINMUX('E', 3, ANALOG)>, /* SDMMC2_CK */
+ <STM32_PINMUX('G', 6, ANALOG)>; /* SDMMC2_CMD */
+ };
+ };
+
+ sdmmc2_ux_d47_pins_a: sdmmc2-ux-d47-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 8, AF9)>, /* SDMMC2_D4 */
+ <STM32_PINMUX('A', 9, AF10)>, /* SDMMC2_D5 */
+ <STM32_PINMUX('E', 5, AF9)>, /* SDMMC2_D6 */
+ <STM32_PINMUX('D', 3, AF9)>; /* SDMMC2_D7 */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ };
+
+ sdmmc2_ux_d47_sleep_pins_a: sdmmc2-ux-d47-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 8, ANALOG)>, /* SDMMC2_D4 */
+ <STM32_PINMUX('A', 9, ANALOG)>, /* SDMMC2_D5 */
+ <STM32_PINMUX('E', 5, ANALOG)>, /* SDMMC2_D6 */
+ <STM32_PINMUX('D', 3, ANALOG)>; /* SDMMC2_D7 */
+ };
+ };
+
+ uart4_ux_pins_a: uart4-ux-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 11, AF6)>; /* UART4_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 2, AF8)>; /* UART4_RX */
+ bias-disable;
+ };
+ };
+
+ uart4_ux_idle_pins_a: uart4-ux-idle-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 11, ANALOG)>; /* UART4_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 2, AF8)>; /* UART4_RX */
+ bias-disable;
+ };
+ };
+
+ uart4_ux_sleep_pins_a: uart4-ux-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('G', 11, ANALOG)>, /* UART4_TX */
+ <STM32_PINMUX('B', 2, ANALOG)>; /* UART4_RX */
+ };
+ };
+
+ uart5_ux_pins_a: uart5-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 13, AF14)>; /* UART5_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 12, AF14)>; /* UART5_RX */
+ bias-disable;
+ };
+ };
+
+ uart5_ux_idle_pins_a: uart5-idle-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 13, ANALOG)>; /* UART5_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 12, AF14)>; /* UART5_RX*/
+ bias-disable;
+ };
+ };
+
+ uart5_ux_sleep_pins_a: uart5-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 13, ANALOG)>, /* UART5_TX */
+ <STM32_PINMUX('B', 12, ANALOG)>; /* UART5_RX */
+ };
+ };
+
+ uart7_ux_pins_a: uart7-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('E', 8, AF7)>; /* USART7_TX */
+ bias-pull-up;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 7, AF7)>; /* USART7_RX */
+ bias-pull-up;
+ };
+ pins3 {
+ pinmux = <STM32_PINMUX('E', 9, AF7)>; /* USART7_RTS/DE */
+ };
+ };
+
+ uart7_ux_idle_pins_a: uart7-idle-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('E', 8, ANALOG)>, /* USART7_TX */
+ <STM32_PINMUX('E', 9, AF7)>; /* USART7_RTS/DE */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 7, AF7)>; /* USART7_RX */
+ bias-disable;
+ };
+ };
+
+ uart7_ux_sleep_pins_a: uart7-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('E', 8, ANALOG)>, /* USART7_TX */
+ <STM32_PINMUX('E', 9, AF7)>, /* USART7_RTS/DE */
+ <STM32_PINMUX('E', 7, ANALOG)>; /* USART7_RX */
+ };
+ };
+};
+
+&pinctrl_z {
+
+ i2c4_ux_pins_a: i2c4-ux-0 {
+ pins {
+ pinmux = <STM32_PINMUX('Z', 4, AF6)>, /* I2C4_SCL */
+ <STM32_PINMUX('Z', 5, AF6)>; /* I2C4_SDA */
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+ };
+ };
+
+ i2c4_ux_pins_sleep_a: i2c4-1 {
+ pins {
+ pinmux = <STM32_PINMUX('Z', 4, ANALOG)>, /* I2C4_SCL */
+ <STM32_PINMUX('Z', 5, ANALOG)>; /* I2C4_SDA */
+ };
+ };
+
+ spi1_ux_pins_a: spi1-ux-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('Z', 0, AF5)>, /* SPI1_SCK */
+ <STM32_PINMUX('Z', 2, AF5)>; /* SPI1_MOSI */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('Z', 1, AF5)>; /* SPI1_MISO */
+ bias-disable;
+ };
+ };
+
+ spi1_ux_sleep_pins_a: spi1-ux-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('Z', 0, ANALOG)>, /* SPI1_SCK */
+ <STM32_PINMUX('Z', 1, ANALOG)>, /* SPI1_MISO */
+ <STM32_PINMUX('Z', 2, ANALOG)>; /* SPI1_MOSI */
+ };
+ };
+};
+
+&pwr_regulators {
+ vdd-supply = <&vdd>;
+ vdd_3v3_usbfs-supply = <&vdd_usb>;
+};
+
+&qspi {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qspi_clk_ux_pins_a &qspi_bk1_ux_pins_a>;
+ pinctrl-1 = <&qspi_clk_ux_sleep_pins_a &qspi_bk1_ux_sleep_pins_a>;
+ reg = <0x58003000 0x1000>, <0x70000000 0x1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ flash0: flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <133000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_ux_b4_pins_a>;
+ pinctrl-1 = <&sdmmc1_ux_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc1_ux_b4_sleep_pins_a>;
+ broken-cd;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&v3v3>;
+ no-1-8-v;
+ status = "okay";
+};
+
+&sdmmc2 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc2_ux_b4_pins_a &sdmmc2_ux_d47_pins_a>;
+ pinctrl-1 = <&sdmmc2_ux_b4_od_pins_a &sdmmc2_ux_d47_pins_a>;
+ pinctrl-2 = <&sdmmc2_ux_b4_sleep_pins_a &sdmmc2_ux_d47_sleep_pins_a>;
+ non-removable;
+ no-sd;
+ no-sdio;
+ st,neg-edge;
+ bus-width = <8>;
+ vmmc-supply = <&v3v3>;
+ vqmmc-supply = <&v3v3>;
+ mmc-ddr-3_3v;
+ status = "okay";
+};
+
+&spi1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&spi1_ux_pins_a>;
+ pinctrl-1 = <&spi1_ux_sleep_pins_a>;
+ status = "okay";
+ cs-gpios = <&gpioi 8 0>, <&gpioi 11 0>, <&gpioz 3 0>;
+
+ flash: flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+};
+
+&timers1 {
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+
+ pwm {
+ pinctrl-0 = <&pwm1_ux_pins_a>;
+ pinctrl-1 = <&pwm1_ux_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ status = "okay";
+ };
+
+ timer@0 {
+ status = "okay";
+ };
+};
+
+&timers4 {
+ dmas = <&dmamux1 31 0x400 0x5>;
+ dma-names = "ch3";
+ status = "okay";
+
+ pwm4_4: pwm {
+ pinctrl-0 = <&pwm4_ux_pins_a>;
+ pinctrl-1 = <&pwm4_ux_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ status = "okay";
+ };
+};
+
+&timers5 {
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+
+ pwm5_4: pwm {
+ pinctrl-0 = <&pwm5_ux_pins_a>;
+ pinctrl-1 = <&pwm5_ux_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ status = "okay";
+ };
+
+ timer@4 {
+ status = "okay";
+ };
+};
+
+&timers17 {
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+
+ pwm17_4: pwm {
+ pinctrl-0 = <&pwm17_ux_pins_a>;
+ pinctrl-1 = <&pwm17_ux_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ status = "okay";
+ };
+
+ timer@16 {
+ status = "okay";
+ };
+};
+
+&uart4 {
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ pinctrl-names = "default", "sleep", "idle", "no_console_suspend";
+ pinctrl-0 = <&uart4_ux_pins_a>;
+ pinctrl-1 = <&uart4_ux_sleep_pins_a>;
+ pinctrl-2 = <&uart4_ux_idle_pins_a>;
+ pinctrl-3 = <&uart4_ux_pins_a>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&uart5_ux_pins_a>;
+ pinctrl-1 = <&uart5_ux_sleep_pins_a>;
+ pinctrl-2 = <&uart5_ux_idle_pins_a>;
+ status = "okay";
+};
+
+&uart7 {
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&uart7_ux_pins_a>;
+ pinctrl-1 = <&uart7_ux_sleep_pins_a>;
+ pinctrl-2 = <&uart7_ux_idle_pins_a>;
+ status = "okay";
+};
+
+&usart1 {
+ /*Muxing happens in uboot*/
+ status = "okay";
+};
+
+&usbh_ehci {
+ phys = <&usbphyc_port0>;
+ phy-names = "usb";
+ status = "okay";
+};
+
+&usbh_ohci {
+ phys = <&usbphyc_port0>;
+ phy-names = "usb";
+ status = "okay";
+};
+
+&usbotg_hs {
+ phys = <&usbphyc_port1 0>;
+ phy-names = "usb2-phy";
+ vbus-supply = <&usb_otg_vbus>;
+ status = "okay";
+};
+
+&usbphyc {
+ status = "okay";
+};
+
+&usbphyc_port0 {
+ phy-supply = <&vdd_usb>;
+ st,tune-hs-dc-level = <2>;
+ st,enable-fs-rftime-tuning;
+ st,enable-hs-rftime-reduction;
+ st,trim-hs-current = <15>;
+ st,trim-hs-impedance = <1>;
+ st,tune-squelch-level = <3>;
+ st,tune-hs-rx-offset = <2>;
+ st,no-lsfs-sc;
+};
+
+&usbphyc_port1 {
+ phy-supply = <&vdd_usb>;
+ st,tune-hs-dc-level = <2>;
+ st,enable-fs-rftime-tuning;
+ st,enable-hs-rftime-reduction;
+ st,trim-hs-current = <15>;
+ st,trim-hs-impedance = <1>;
+ st,tune-squelch-level = <3>;
+ st,tune-hs-rx-offset = <2>;
+ st,no-lsfs-sc;
+};
+
+&vrefbuf {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ vdda-supply = <&vdd>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcor-avenger96.dtsi
index 343a4613dfca..aceeff6c38ba 100644
--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcor-avenger96.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcor-avenger96.dtsi
@@ -435,7 +435,7 @@
#address-cells = <1>;
#size-cells = <0>;
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
};
diff --git a/arch/arm/boot/dts/ti/davinci/da850-evm.dts b/arch/arm/boot/dts/ti/davinci/da850-evm.dts
index 1f5cd35f8b74..38a191fb0414 100644
--- a/arch/arm/boot/dts/ti/davinci/da850-evm.dts
+++ b/arch/arm/boot/dts/ti/davinci/da850-evm.dts
@@ -60,7 +60,7 @@
sync-edge = <0>;
sync-ctrl = <1>;
raster-order = <0>;
- fifo-th = <0>;
+ fifo-th = <1>;
};
display-timings {
diff --git a/arch/arm/boot/dts/ti/omap/am335x-evm.dts b/arch/arm/boot/dts/ti/omap/am335x-evm.dts
index 61bf8bcd4c4e..20222f82f21b 100644
--- a/arch/arm/boot/dts/ti/omap/am335x-evm.dts
+++ b/arch/arm/boot/dts/ti/omap/am335x-evm.dts
@@ -388,7 +388,7 @@
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pins>;
-
+ wakeup-source;
status = "okay";
};
diff --git a/arch/arm/boot/dts/ti/omap/omap3-n900.dts b/arch/arm/boot/dts/ti/omap/omap3-n900.dts
index 4bde3342bb95..c50ca572d1b9 100644
--- a/arch/arm/boot/dts/ti/omap/omap3-n900.dts
+++ b/arch/arm/boot/dts/ti/omap/omap3-n900.dts
@@ -816,8 +816,6 @@
reg = <0x0c>;
VANA-supply = <&vaux4>;
-
- #io-channel-cells = <0>;
};
};
diff --git a/arch/arm/boot/dts/ti/omap/omap3.dtsi b/arch/arm/boot/dts/ti/omap/omap3.dtsi
index 92cd4c99dae7..817474ee2d13 100644
--- a/arch/arm/boot/dts/ti/omap/omap3.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap3.dtsi
@@ -862,14 +862,14 @@
#size-cells = <1>;
ranges;
- usbhsohci: ohci@48064400 {
+ usbhsohci: usb@48064400 {
compatible = "ti,ohci-omap3";
reg = <0x48064400 0x400>;
interrupts = <76>;
remote-wakeup-connected;
};
- usbhsehci: ehci@48064800 {
+ usbhsehci: usb@48064800 {
compatible = "ti,ehci-omap";
reg = <0x48064800 0x400>;
interrupts = <77>;
diff --git a/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi b/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
index 150dd84c9e0f..4ee53dfb71b4 100644
--- a/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
@@ -302,14 +302,14 @@
"refclk_60m_ext_p1",
"refclk_60m_ext_p2";
- usbhsohci: ohci@800 {
+ usbhsohci: usb@800 {
compatible = "ti,ohci-omap3";
reg = <0x800 0x400>;
interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
remote-wakeup-connected;
};
- usbhsehci: ehci@c00 {
+ usbhsehci: usb@c00 {
compatible = "ti,ehci-omap";
reg = <0xc00 0x400>;
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/ti/omap/omap4-panda-common.dtsi b/arch/arm/boot/dts/ti/omap/omap4-panda-common.dtsi
index 97706d6296a6..05c871d31d7b 100644
--- a/arch/arm/boot/dts/ti/omap/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap4-panda-common.dtsi
@@ -130,6 +130,12 @@
clock-frequency = <19200000>;
};
+ wl12xx_pwrseq: wl12xx-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&twl 0>;
+ clock-names = "ext_clock";
+ };
+
/* regulator for wl12xx on sdio5 */
wl12xx_vmmc: wl12xx_vmmc {
pinctrl-names = "default";
@@ -361,10 +367,8 @@
*/
wl12xx_gpio: wl12xx-gpio-pins {
pinctrl-single,pins = <
- OMAP4_IOPAD(0x066, PIN_OUTPUT | MUX_MODE3) /* gpmc_a19.gpio_43 */
- OMAP4_IOPAD(0x06c, PIN_OUTPUT | MUX_MODE3) /* gpmc_a22.gpio_46 */
+ OMAP4_IOPAD(0x066, PIN_OUTPUT | MUX_MODE3) /* gpmc_a19.gpio_43 - WLAN_EN */
OMAP4_IOPAD(0x070, PIN_OUTPUT_PULLUP | MUX_MODE3) /* gpmc_a24.gpio_48 */
- OMAP4_IOPAD(0x072, PIN_OUTPUT_PULLUP | MUX_MODE3) /* gpmc_a25.gpio_49 */
>;
};
@@ -387,6 +391,22 @@
OMAP4_IOPAD(0x114, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_121 */
>;
};
+
+ bt_pins: bt-pins {
+ pinctrl-single,pins = <
+ OMAP4_IOPAD(0x06c, PIN_OUTPUT | MUX_MODE3) /* gpmc_a22.gpio_46 - BTEN */
+ OMAP4_IOPAD(0x072, PIN_OUTPUT_PULLUP | MUX_MODE3) /* gpmc_a25.gpio_49 - BTWAKEUP */
+ >;
+ };
+
+ uart2_pins: uart2-pins {
+ pinctrl-single,pins = <
+ OMAP4_IOPAD(0x118, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts - HCI */
+ OMAP4_IOPAD(0x11a, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */
+ OMAP4_IOPAD(0x11c, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_rx.uart2_rx */
+ OMAP4_IOPAD(0x11e, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */
+ >;
+ };
};
&omap4_pmx_wkup {
@@ -408,6 +428,7 @@
reg = <0x48>;
/* IRQ# = 7 */
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; /* IRQ_SYS_1N cascaded to gic */
+ #clock-cells = <1>;
system-power-controller;
};
@@ -488,6 +509,7 @@
non-removable;
bus-width = <4>;
cap-power-off-card;
+ mmc-pwrseq = <&wl12xx_pwrseq>;
#address-cells = <1>;
#size-cells = <0>;
@@ -523,8 +545,19 @@
};
&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
interrupts-extended = <&wakeupgen GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH
&omap4_pmx_core OMAP4_UART2_RX>;
+
+ bluetooth {
+ compatible = "ti,wl1271-st";
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_pins>;
+ enable-gpios = <&gpio2 14 GPIO_ACTIVE_HIGH>; /* GPIO_46 */
+ clocks = <&twl 0>;
+ clock-names = "ext_clock";
+ };
};
&uart3 {
diff --git a/arch/arm/boot/dts/ti/omap/omap4-panda-es.dts b/arch/arm/boot/dts/ti/omap/omap4-panda-es.dts
index fe7b156d10ed..a933fe560834 100644
--- a/arch/arm/boot/dts/ti/omap/omap4-panda-es.dts
+++ b/arch/arm/boot/dts/ti/omap/omap4-panda-es.dts
@@ -49,22 +49,6 @@
OMAP4_IOPAD(0x0fc, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */
>;
};
-
- bt_pins: bt-pins {
- pinctrl-single,pins = <
- OMAP4_IOPAD(0x06c, PIN_OUTPUT | MUX_MODE3) /* gpmc_a22.gpio_46 - BTEN */
- OMAP4_IOPAD(0x072, PIN_OUTPUT_PULLUP | MUX_MODE3) /* gpmc_a25.gpio_49 - BTWAKEUP */
- >;
- };
-
- uart2_pins: uart2-pins {
- pinctrl-single,pins = <
- OMAP4_IOPAD(0x118, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts - HCI */
- OMAP4_IOPAD(0x11a, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */
- OMAP4_IOPAD(0x11c, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_rx.uart2_rx */
- OMAP4_IOPAD(0x11e, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */
- >;
- };
};
&led_wkgpio_pins {
@@ -96,19 +80,3 @@
&gpio1_target {
ti,no-reset-on-init;
};
-
-&wl12xx_gpio {
- pinctrl-single,pins = <
- OMAP4_IOPAD(0x066, PIN_OUTPUT | MUX_MODE3) /* gpmc_a19.gpio_43 */
- OMAP4_IOPAD(0x070, PIN_OUTPUT_PULLUP | MUX_MODE3) /* gpmc_a24.gpio_48 */
- >;
-};
-
-&uart2 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart2_pins &bt_pins>;
- bluetooth: tiwi {
- compatible = "ti,wl1271-st";
- enable-gpios = <&gpio2 14 GPIO_ACTIVE_HIGH>; /* GPIO_46 */
- };
-};
diff --git a/arch/arm/boot/dts/ti/omap/omap5-l4.dtsi b/arch/arm/boot/dts/ti/omap/omap5-l4.dtsi
index 3b505fe415ed..9f6100c7c34d 100644
--- a/arch/arm/boot/dts/ti/omap/omap5-l4.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap5-l4.dtsi
@@ -331,14 +331,14 @@
"refclk_60m_ext_p1",
"refclk_60m_ext_p2";
- usbhsohci: ohci@800 {
+ usbhsohci: usb@800 {
compatible = "ti,ohci-omap3";
reg = <0x800 0x400>;
interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
remote-wakeup-connected;
};
- usbhsehci: ehci@c00 {
+ usbhsehci: usb@c00 {
compatible = "ti,ehci-omap";
reg = <0xc00 0x400>;
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/vt8500/Makefile b/arch/arm/boot/dts/vt8500/Makefile
index 255f4403af91..6fd29c41f366 100644
--- a/arch/arm/boot/dts/vt8500/Makefile
+++ b/arch/arm/boot/dts/vt8500/Makefile
@@ -4,4 +4,5 @@ dtb-$(CONFIG_ARCH_VT8500) += \
wm8505-ref.dtb \
wm8650-mid.dtb \
wm8750-apc8750.dtb \
- wm8850-w70v2.dtb
+ wm8850-w70v2.dtb \
+ wm8950-apc-rock.dtb
diff --git a/arch/arm/boot/dts/vt8500/vt8500.dtsi b/arch/arm/boot/dts/vt8500/vt8500.dtsi
index f23cb5ee11ae..d1dd37220d41 100644
--- a/arch/arm/boot/dts/vt8500/vt8500.dtsi
+++ b/arch/arm/boot/dts/vt8500/vt8500.dtsi
@@ -55,6 +55,11 @@
#gpio-cells = <2>;
};
+ chipid@d8120000 {
+ compatible = "via,vt8500-scc-id";
+ reg = <0xd8120000 0x4>;
+ };
+
pmc@d8130000 {
compatible = "via,vt8500-pmc";
reg = <0xd8130000 0x1000>;
@@ -106,10 +111,10 @@
timer@d8130100 {
compatible = "via,vt8500-timer";
reg = <0xd8130100 0x28>;
- interrupts = <36>;
+ interrupts = <36>, <37>, <38>, <39>;
};
- ehci@d8007900 {
+ usb@d8007900 {
compatible = "via,vt8500-ehci";
reg = <0xd8007900 0x200>;
interrupts = <43>;
diff --git a/arch/arm/boot/dts/vt8500/wm8505.dtsi b/arch/arm/boot/dts/vt8500/wm8505.dtsi
index d9e1280372c5..2b1819f0c541 100644
--- a/arch/arm/boot/dts/vt8500/wm8505.dtsi
+++ b/arch/arm/boot/dts/vt8500/wm8505.dtsi
@@ -66,6 +66,11 @@
#gpio-cells = <2>;
};
+ chipid@d8120000 {
+ compatible = "via,vt8500-scc-id";
+ reg = <0xd8120000 0x4>;
+ };
+
pmc@d8130000 {
compatible = "via,vt8500-pmc";
reg = <0xd8130000 0x1000>;
@@ -204,10 +209,10 @@
timer@d8130100 {
compatible = "via,vt8500-timer";
reg = <0xd8130100 0x28>;
- interrupts = <36>;
+ interrupts = <36>, <37>, <38>, <39>;
};
- ehci@d8007100 {
+ usb@d8007100 {
compatible = "via,vt8500-ehci";
reg = <0xd8007100 0x200>;
interrupts = <1>;
diff --git a/arch/arm/boot/dts/vt8500/wm8650.dtsi b/arch/arm/boot/dts/vt8500/wm8650.dtsi
index 35d12d77efc0..042eec78c085 100644
--- a/arch/arm/boot/dts/vt8500/wm8650.dtsi
+++ b/arch/arm/boot/dts/vt8500/wm8650.dtsi
@@ -62,6 +62,11 @@
#gpio-cells = <2>;
};
+ chipid@d8120000 {
+ compatible = "via,vt8500-scc-id";
+ reg = <0xd8120000 0x4>;
+ };
+
pmc@d8130000 {
compatible = "via,vt8500-pmc";
reg = <0xd8130000 0x1000>;
@@ -176,10 +181,10 @@
timer@d8130100 {
compatible = "via,vt8500-timer";
reg = <0xd8130100 0x28>;
- interrupts = <36>;
+ interrupts = <36>, <37>, <38>, <39>;
};
- ehci@d8007900 {
+ usb@d8007900 {
compatible = "via,vt8500-ehci";
reg = <0xd8007900 0x200>;
interrupts = <43>;
diff --git a/arch/arm/boot/dts/vt8500/wm8750.dtsi b/arch/arm/boot/dts/vt8500/wm8750.dtsi
index b292f85d4e69..56342aa1d993 100644
--- a/arch/arm/boot/dts/vt8500/wm8750.dtsi
+++ b/arch/arm/boot/dts/vt8500/wm8750.dtsi
@@ -68,6 +68,11 @@
#gpio-cells = <2>;
};
+ chipid@d8120000 {
+ compatible = "via,vt8500-scc-id";
+ reg = <0xd8120000 0x4>;
+ };
+
pmc@d8130000 {
compatible = "via,vt8500-pmc";
reg = <0xd8130000 0x1000>;
@@ -248,10 +253,10 @@
timer@d8130100 {
compatible = "via,vt8500-timer";
reg = <0xd8130100 0x28>;
- interrupts = <36>;
+ interrupts = <36>, <37>, <38>, <39>;
};
- ehci@d8007900 {
+ usb@d8007900 {
compatible = "via,vt8500-ehci";
reg = <0xd8007900 0x200>;
interrupts = <26>;
diff --git a/arch/arm/boot/dts/vt8500/wm8850.dtsi b/arch/arm/boot/dts/vt8500/wm8850.dtsi
index c61717ebb4f1..03e72f28d31b 100644
--- a/arch/arm/boot/dts/vt8500/wm8850.dtsi
+++ b/arch/arm/boot/dts/vt8500/wm8850.dtsi
@@ -65,6 +65,11 @@
#gpio-cells = <2>;
};
+ chipid@d8120000 {
+ compatible = "via,vt8500-scc-id";
+ reg = <0xd8120000 0x4>;
+ };
+
pmc@d8130000 {
compatible = "via,vt8500-pmc";
reg = <0xd8130000 0x1000>;
@@ -235,10 +240,10 @@
timer@d8130100 {
compatible = "via,vt8500-timer";
reg = <0xd8130100 0x28>;
- interrupts = <36>;
+ interrupts = <36>, <37>, <38>, <39>;
};
- ehci@d8007900 {
+ usb@d8007900 {
compatible = "via,vt8500-ehci";
reg = <0xd8007900 0x200>;
interrupts = <26>;
diff --git a/arch/arm/boot/dts/vt8500/wm8950-apc-rock.dts b/arch/arm/boot/dts/vt8500/wm8950-apc-rock.dts
new file mode 100644
index 000000000000..58b3c8deb4f2
--- /dev/null
+++ b/arch/arm/boot/dts/vt8500/wm8950-apc-rock.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2025 Alexey Charkov <alchark@gmail.com>
+ */
+
+/dts-v1/;
+/include/ "wm8950.dtsi"
+
+/ {
+ model = "VIA APC Rock";
+ compatible = "via,apc-rock", "wm,wm8950";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x20000000>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/vt8500/wm8950.dtsi b/arch/arm/boot/dts/vt8500/wm8950.dtsi
new file mode 100644
index 000000000000..31fba05d3c3e
--- /dev/null
+++ b/arch/arm/boot/dts/vt8500/wm8950.dtsi
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2025 Alexey Charkov <alchark@gmail.com>
+ */
+
+/* No differences have been discovered vs. WM8850, but chip markings differ */
+/include/ "wm8850.dtsi"
+
+/ {
+ compatible = "wm,wm8950";
+};
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 9846f30990f7..86b271cc29e1 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -416,9 +416,9 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
writel_relaxed(~0, irqbase + SA1111_INTSTATCLR0);
writel_relaxed(~0, irqbase + SA1111_INTSTATCLR1);
- sachip->irqdomain = irq_domain_add_linear(NULL, SA1111_IRQ_NR,
- &sa1111_irqdomain_ops,
- sachip);
+ sachip->irqdomain = irq_domain_create_linear(NULL, SA1111_IRQ_NR,
+ &sa1111_irqdomain_ops,
+ sachip);
if (!sachip->irqdomain) {
irq_free_descs(sachip->irq_base, SA1111_IRQ_NR);
return -ENOMEM;
@@ -563,7 +563,7 @@ static int sa1111_gpio_get(struct gpio_chip *gc, unsigned offset)
return !!(readl_relaxed(reg + SA1111_GPIO_PXDRR) & mask);
}
-static void sa1111_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+static int sa1111_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct sa1111 *sachip = gc_to_sa1111(gc);
unsigned long flags;
@@ -574,6 +574,8 @@ static void sa1111_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
sa1111_gpio_modify(reg + SA1111_GPIO_PXDWR, mask, value ? mask : 0);
sa1111_gpio_modify(reg + SA1111_GPIO_PXSSR, mask, value ? mask : 0);
spin_unlock_irqrestore(&sachip->lock, flags);
+
+ return 0;
}
static void sa1111_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
@@ -613,7 +615,7 @@ static int sa1111_setup_gpios(struct sa1111 *sachip)
sachip->gc.direction_input = sa1111_gpio_direction_input;
sachip->gc.direction_output = sa1111_gpio_direction_output;
sachip->gc.get = sa1111_gpio_get;
- sachip->gc.set = sa1111_gpio_set;
+ sachip->gc.set_rv = sa1111_gpio_set;
sachip->gc.set_multiple = sa1111_gpio_set_multiple;
sachip->gc.to_irq = sa1111_gpio_to_irq;
sachip->gc.base = -1;
diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c
index 0b08b6621878..2d3ee76c8e17 100644
--- a/arch/arm/common/scoop.c
+++ b/arch/arm/common/scoop.c
@@ -63,7 +63,8 @@ static void __scoop_gpio_set(struct scoop_dev *sdev,
iowrite16(gpwr, sdev->base + SCOOP_GPWR);
}
-static void scoop_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int scoop_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct scoop_dev *sdev = gpiochip_get_data(chip);
unsigned long flags;
@@ -73,6 +74,8 @@ static void scoop_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
__scoop_gpio_set(sdev, offset, value);
spin_unlock_irqrestore(&sdev->scoop_lock, flags);
+
+ return 0;
}
static int scoop_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -215,7 +218,7 @@ static int scoop_probe(struct platform_device *pdev)
devptr->gpio.label = dev_name(&pdev->dev);
devptr->gpio.base = inf->gpio_base;
devptr->gpio.ngpio = 12; /* PA11 = 0, PA12 = 1, etc. up to PA22 = 11 */
- devptr->gpio.set = scoop_gpio_set;
+ devptr->gpio.set_rv = scoop_gpio_set;
devptr->gpio.get = scoop_gpio_get;
devptr->gpio.direction_input = scoop_gpio_direction_input;
devptr->gpio.direction_output = scoop_gpio_direction_output;
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index f2596a1b2f7d..ff13e1ecf4bb 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -232,7 +232,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_DEV_ATMEL_AES=y
CONFIG_CRYPTO_DEV_ATMEL_TDES=y
CONFIG_CRYPTO_DEV_ATMEL_SHA=y
-CONFIG_CRC_CCITT=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_ACORN_8x8=y
diff --git a/arch/arm/configs/collie_defconfig b/arch/arm/configs/collie_defconfig
index 42cb1c854118..578c6a4af620 100644
--- a/arch/arm/configs/collie_defconfig
+++ b/arch/arm/configs/collie_defconfig
@@ -78,7 +78,6 @@ CONFIG_ROMFS_FS=y
CONFIG_NLS_DEFAULT="cp437"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_CCITT=y
CONFIG_FONTS=y
CONFIG_FONT_MINI_4x6=y
# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 3474e475373a..e2ddaca0f89d 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -14,7 +14,6 @@ CONFIG_ARCH_MULTIPLATFORM=y
CONFIG_ARCH_MULTI_V5=y
# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_DAVINCI=y
-CONFIG_ARCH_DAVINCI_DA830=y
CONFIG_ARCH_DAVINCI_DA850=y
CONFIG_DAVINCI_MUX_DEBUG=y
CONFIG_DAVINCI_MUX_WARNINGS=y
@@ -249,7 +248,6 @@ CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=m
CONFIG_DMA_CMA=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_RT_MUTEXES=y
diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig
index b382a2e175fb..d76eb12d29a7 100644
--- a/arch/arm/configs/dove_defconfig
+++ b/arch/arm/configs/dove_defconfig
@@ -128,7 +128,6 @@ CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 7ad48fdda1da..f71af368674c 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -167,7 +167,7 @@ CONFIG_MFD_MAX77686=y
CONFIG_MFD_MAX77693=y
CONFIG_MFD_MAX8997=y
CONFIG_MFD_MAX8998=y
-CONFIG_MFD_SEC_CORE=y
+CONFIG_MFD_SEC_I2C=y
CONFIG_MFD_STMPE=y
CONFIG_STMPE_I2C=y
CONFIG_MFD_TPS65090=y
@@ -349,7 +349,7 @@ CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_XTS=m
@@ -364,13 +364,11 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_SHA1_ARM_NEON=m
-CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_CHACHA20_NEON=m
CONFIG_CRYPTO_DEV_EXYNOS_RNG=y
CONFIG_CRYPTO_DEV_S5P=y
-CONFIG_CRC_CCITT=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=96
CONFIG_FONTS=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 297c6a7b978a..062c1eb8dd60 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -481,8 +481,6 @@ CONFIG_SECURITYFS=y
CONFIG_CRYPTO_DEV_FSL_CAAM=y
CONFIG_CRYPTO_DEV_SAHARA=y
CONFIG_CRYPTO_DEV_MXS_DCP=y
-CONFIG_CRC_CCITT=m
-CONFIG_CRC_T10DIF=y
CONFIG_CMA_SIZE_MBYTES=64
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
diff --git a/arch/arm/configs/lpc18xx_defconfig b/arch/arm/configs/lpc18xx_defconfig
index 2aa2ac8c6507..2d489186e945 100644
--- a/arch/arm/configs/lpc18xx_defconfig
+++ b/arch/arm/configs/lpc18xx_defconfig
@@ -147,7 +147,6 @@ CONFIG_EXT2_FS=y
# CONFIG_INOTIFY_USER is not set
CONFIG_JFFS2_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_CRC_ITU_T=y
CONFIG_PRINTK_TIME=y
# CONFIG_ENABLE_MUST_CHECK is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index 98e267213b21..9afccd76446b 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -179,7 +179,6 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_ANSI_CPRNG=y
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/milbeaut_m10v_defconfig b/arch/arm/configs/milbeaut_m10v_defconfig
index acd16204f8d7..242e7d5a3f68 100644
--- a/arch/arm/configs/milbeaut_m10v_defconfig
+++ b/arch/arm/configs/milbeaut_m10v_defconfig
@@ -93,23 +93,19 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_KEYS=y
-CONFIG_CRYPTO_MANAGER=y
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_SELFTESTS=y
# CONFIG_CRYPTO_ECHAINIV is not set
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_SEQIV=m
CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_SHA1_ARM_NEON=m
CONFIG_CRYPTO_SHA1_ARM_CE=m
-CONFIG_CRYPTO_SHA2_ARM_CE=m
CONFIG_CRYPTO_SHA512_ARM=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_AES_ARM_CE=m
CONFIG_CRYPTO_CHACHA20_NEON=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=m
-CONFIG_CRC_ITU_T=m
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=64
CONFIG_PRINTK_TIME=y
diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig
index f6f9e135353e..842a989baa27 100644
--- a/arch/arm/configs/mmp2_defconfig
+++ b/arch/arm/configs/mmp2_defconfig
@@ -67,7 +67,6 @@ CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/multi_v4t_defconfig b/arch/arm/configs/multi_v4t_defconfig
index 27d650635d9b..1a86dc305523 100644
--- a/arch/arm/configs/multi_v4t_defconfig
+++ b/arch/arm/configs/multi_v4t_defconfig
@@ -91,6 +91,5 @@ CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_CRAMFS=y
CONFIG_MINIX_FS=y
-CONFIG_CRC_CCITT=y
# CONFIG_FTRACE is not set
CONFIG_DEBUG_USER=y
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
index db81862bdb93..b523bc246c09 100644
--- a/arch/arm/configs/multi_v5_defconfig
+++ b/arch/arm/configs/multi_v5_defconfig
@@ -12,7 +12,6 @@ CONFIG_MACH_ASPEED_G4=y
CONFIG_ARCH_AT91=y
CONFIG_SOC_AT91SAM9=y
CONFIG_ARCH_DAVINCI=y
-CONFIG_ARCH_DAVINCI_DA830=y
CONFIG_ARCH_DAVINCI_DA850=y
CONFIG_ARCH_MXC=y
CONFIG_SOC_IMX25=y
@@ -289,7 +288,6 @@ CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
-CONFIG_CRC_CCITT=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index ad037c175fdb..50c170b4619f 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -612,7 +612,7 @@ CONFIG_MFD_QCOM_RPM=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_RK8XX_I2C=y
CONFIG_MFD_RN5T618=y
-CONFIG_MFD_SEC_CORE=y
+CONFIG_MFD_SEC_I2C=y
CONFIG_MFD_STMPE=y
CONFIG_MFD_PALMAS=y
CONFIG_MFD_TPS65090=y
@@ -1121,25 +1121,6 @@ CONFIG_QCOM_SMSM=y
CONFIG_QCOM_SOCINFO=m
CONFIG_QCOM_STATS=m
CONFIG_QCOM_WCNSS_CTRL=m
-CONFIG_ARCH_EMEV2=y
-CONFIG_ARCH_R8A7794=y
-CONFIG_ARCH_R8A7779=y
-CONFIG_ARCH_R8A7790=y
-CONFIG_ARCH_R8A7778=y
-CONFIG_ARCH_R8A7793=y
-CONFIG_ARCH_R8A7791=y
-CONFIG_ARCH_R8A7792=y
-CONFIG_ARCH_R8A7740=y
-CONFIG_ARCH_R8A73A4=y
-CONFIG_ARCH_R7S72100=y
-CONFIG_ARCH_R7S9210=y
-CONFIG_ARCH_R8A77470=y
-CONFIG_ARCH_R8A7745=y
-CONFIG_ARCH_R8A7742=y
-CONFIG_ARCH_R8A7743=y
-CONFIG_ARCH_R8A7744=y
-CONFIG_ARCH_R9A06G032=y
-CONFIG_ARCH_SH73A0=y
CONFIG_ROCKCHIP_IODOMAIN=y
CONFIG_ARCH_TEGRA_2x_SOC=y
CONFIG_ARCH_TEGRA_3x_SOC=y
@@ -1203,7 +1184,7 @@ CONFIG_PWM_BCM2835=y
CONFIG_PWM_BRCMSTB=m
CONFIG_PWM_FSL_FTM=m
CONFIG_PWM_MESON=m
-CONFIG_PWM_RCAR=m
+CONFIG_PWM_RENESAS_RCAR=m
CONFIG_PWM_RENESAS_TPU=y
CONFIG_PWM_ROCKCHIP=m
CONFIG_PWM_SAMSUNG=m
@@ -1301,7 +1282,6 @@ CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_SHA1_ARM_NEON=m
CONFIG_CRYPTO_SHA1_ARM_CE=m
-CONFIG_CRYPTO_SHA2_ARM_CE=m
CONFIG_CRYPTO_SHA512_ARM=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig
index a518d4a2581e..23dbb80fcc2e 100644
--- a/arch/arm/configs/mvebu_v5_defconfig
+++ b/arch/arm/configs/mvebu_v5_defconfig
@@ -187,7 +187,6 @@ CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
-CONFIG_CRC_CCITT=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index d8a6e43c401e..c76d66135abb 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -160,7 +160,6 @@ CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
CONFIG_CRYPTO_DEV_MXS_DCP=y
-CONFIG_CRC_ITU_T=m
CONFIG_FONTS=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 113d6dfe5243..9f9780c8e62a 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -608,6 +608,7 @@ CONFIG_LEDS_LP5523=m
CONFIG_LEDS_PCA963X=m
CONFIG_LEDS_PWM=m
CONFIG_LEDS_BD2606MVV=m
+CONFIG_LEDS_TCA6507=m
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=m
CONFIG_LEDS_TRIGGER_ONESHOT=m
@@ -642,6 +643,8 @@ CONFIG_TI_EMIF_SRAM=m
CONFIG_IIO=m
CONFIG_IIO_SW_DEVICE=m
CONFIG_IIO_SW_TRIGGER=m
+CONFIG_BMA180=m
+CONFIG_BMC150_ACCEL=m
CONFIG_IIO_ST_ACCEL_3AXIS=m
CONFIG_KXCJK1013=m
CONFIG_CPCAP_ADC=m
@@ -649,10 +652,15 @@ CONFIG_INA2XX_ADC=m
CONFIG_TI_AM335X_ADC=m
CONFIG_TWL4030_MADC=m
CONFIG_TWL6030_GPADC=m
+CONFIG_BMG160=m
CONFIG_MPU3050_I2C=m
+CONFIG_ITG3200=m
+CONFIG_BOSCH_BNO055_I2C=m
CONFIG_INV_MPU6050_I2C=m
CONFIG_SENSORS_ISL29028=m
CONFIG_AK8975=m
+CONFIG_BMC150_MAGN_I2C=m
+CONFIG_SENSORS_HMC5843_I2C=m
CONFIG_BMP280=m
CONFIG_PWM=y
CONFIG_PWM_OMAP_DMTIMER=m
@@ -697,7 +705,6 @@ CONFIG_SECURITY=y
CONFIG_CRYPTO_MICHAEL_MIC=y
CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_SHA1_ARM_NEON=m
-CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
@@ -706,9 +713,6 @@ CONFIG_CRYPTO_DEV_OMAP=m
CONFIG_CRYPTO_DEV_OMAP_SHAM=m
CONFIG_CRYPTO_DEV_OMAP_AES=m
CONFIG_CRYPTO_DEV_OMAP_DES=m
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_T10DIF=y
-CONFIG_CRC_ITU_T=y
CONFIG_DMA_CMA=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index 0629b088a584..62b9c6102789 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -136,7 +136,6 @@ CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
-CONFIG_CRC_T10DIF=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/pxa168_defconfig b/arch/arm/configs/pxa168_defconfig
index ce10fe2104bf..4748c7d33cb8 100644
--- a/arch/arm/configs/pxa168_defconfig
+++ b/arch/arm/configs/pxa168_defconfig
@@ -41,7 +41,6 @@ CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/pxa910_defconfig b/arch/arm/configs/pxa910_defconfig
index 1f28aea86014..49b59c600ae1 100644
--- a/arch/arm/configs/pxa910_defconfig
+++ b/arch/arm/configs/pxa910_defconfig
@@ -50,7 +50,6 @@ CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index de0ac8f521d7..ff29c5b0e9c9 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -335,7 +335,7 @@ CONFIG_MFD_MAX77693=y
CONFIG_MFD_MAX8907=m
CONFIG_EZX_PCAP=y
CONFIG_UCB1400_CORE=m
-CONFIG_MFD_SEC_CORE=y
+CONFIG_MFD_SEC_I2C=y
CONFIG_MFD_PALMAS=y
CONFIG_MFD_TPS65090=y
CONFIG_MFD_TPS6586X=y
@@ -636,10 +636,9 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
CONFIG_TIMER_STATS=y
CONFIG_SECURITY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
@@ -660,11 +659,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_SHA1_ARM=m
-CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
CONFIG_CRYPTO_AES_ARM=m
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_T10DIF=m
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
diff --git a/arch/arm/configs/s5pv210_defconfig b/arch/arm/configs/s5pv210_defconfig
index 5dbe85c263de..02121eec3658 100644
--- a/arch/arm/configs/s5pv210_defconfig
+++ b/arch/arm/configs/s5pv210_defconfig
@@ -113,7 +113,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_CRC_CCITT=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/sama7_defconfig b/arch/arm/configs/sama7_defconfig
index ea7ddf640ba7..e14720a9a5ac 100644
--- a/arch/arm/configs/sama7_defconfig
+++ b/arch/arm/configs/sama7_defconfig
@@ -227,8 +227,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_DEV_ATMEL_AES=y
CONFIG_CRYPTO_DEV_ATMEL_TDES=y
CONFIG_CRYPTO_DEV_ATMEL_SHA=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_ITU_T=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=32
CONFIG_CMA_ALIGNMENT=9
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 8c30ed14e52c..7c3d6a8f0038 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -63,6 +63,7 @@ CONFIG_SMSC_PHY=y
CONFIG_CAN_RCAR=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_EDT_FT5X06=y
@@ -84,6 +85,7 @@ CONFIG_SERIAL_8250_EM=y
CONFIG_SERIAL_SH_SCI=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_DEMUX_PINCTRL=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_EMEV2=y
CONFIG_I2C_GPIO=y
CONFIG_I2C_RIIC=y
@@ -104,7 +106,7 @@ CONFIG_GPIO_PCF857X=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_RMOBILE=y
CONFIG_POWER_SUPPLY=y
-# CONFIG_HWMON is not set
+CONFIG_SENSORS_LM75=y
CONFIG_THERMAL=y
CONFIG_CPU_THERMAL=y
CONFIG_RCAR_THERMAL=y
@@ -174,6 +176,9 @@ CONFIG_USB_RENESAS_USBHS_UDC=y
CONFIG_USB_RENESAS_USBF=y
CONFIG_USB_ETH=y
CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
CONFIG_MMC_SDHI=y
CONFIG_MMC_SH_MMCIF=y
CONFIG_NEW_LEDS=y
@@ -195,29 +200,10 @@ CONFIG_RCAR_DMAC=y
CONFIG_RENESAS_USB_DMAC=y
CONFIG_RZ_DMAC=y
# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_ARCH_EMEV2=y
-CONFIG_ARCH_R8A7794=y
-CONFIG_ARCH_R8A7779=y
-CONFIG_ARCH_R8A7790=y
-CONFIG_ARCH_R8A7778=y
-CONFIG_ARCH_R8A7793=y
-CONFIG_ARCH_R8A7791=y
-CONFIG_ARCH_R8A7792=y
-CONFIG_ARCH_R8A7740=y
-CONFIG_ARCH_R8A73A4=y
-CONFIG_ARCH_R7S72100=y
-CONFIG_ARCH_R7S9210=y
-CONFIG_ARCH_R8A77470=y
-CONFIG_ARCH_R8A7745=y
-CONFIG_ARCH_R8A7742=y
-CONFIG_ARCH_R8A7743=y
-CONFIG_ARCH_R8A7744=y
-CONFIG_ARCH_R9A06G032=y
-CONFIG_ARCH_SH73A0=y
CONFIG_IIO=y
CONFIG_AK8975=y
CONFIG_PWM=y
-CONFIG_PWM_RCAR=y
+CONFIG_PWM_RENESAS_RCAR=y
CONFIG_PWM_RENESAS_TPU=y
CONFIG_PHY_RCAR_GEN2=y
CONFIG_PHY_RCAR_GEN3_USB2=y
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index ac5b7a5aaff6..ac2a0f998c73 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -215,7 +215,7 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_DEBUG_KERNEL=y
CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_AES=m
@@ -234,7 +234,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRC_CCITT=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig
index 423bb41c4225..dcd9c316072e 100644
--- a/arch/arm/configs/stm32_defconfig
+++ b/arch/arm/configs/stm32_defconfig
@@ -74,7 +74,6 @@ CONFIG_EXT3_FS=y
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
CONFIG_NLS=y
-CONFIG_CRC_ITU_T=y
CONFIG_PRINTK_TIME=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/wpcm450_defconfig b/arch/arm/configs/wpcm450_defconfig
index 5e4397f7f828..cd4b3e70ff68 100644
--- a/arch/arm/configs/wpcm450_defconfig
+++ b/arch/arm/configs/wpcm450_defconfig
@@ -191,8 +191,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
CONFIG_X509_CERTIFICATE_PARSER=y
CONFIG_PKCS7_MESSAGE_PARSER=y
CONFIG_SYSTEM_TRUSTED_KEYRING=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_ITU_T=m
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 23e4ea067ddb..7efb9a8596e4 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -46,30 +46,6 @@ config CRYPTO_NHPOLY1305_NEON
Architecture: arm using:
- NEON (Advanced SIMD) extensions
-config CRYPTO_POLY1305_ARM
- tristate
- select CRYPTO_HASH
- select CRYPTO_ARCH_HAVE_LIB_POLY1305
- default CRYPTO_LIB_POLY1305_INTERNAL
- help
- Poly1305 authenticator algorithm (RFC7539)
-
- Architecture: arm optionally using
- - NEON (Advanced SIMD) extensions
-
-config CRYPTO_BLAKE2S_ARM
- bool "Hash functions: BLAKE2s"
- select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
- help
- BLAKE2s cryptographic hash function (RFC 7693)
-
- Architecture: arm
-
- This is faster than the generic implementations of BLAKE2s and
- BLAKE2b, but slower than the NEON implementation of BLAKE2b.
- There is no NEON implementation of BLAKE2s, since NEON doesn't
- really help with it.
-
config CRYPTO_BLAKE2B_NEON
tristate "Hash functions: BLAKE2b (NEON)"
depends on KERNEL_MODE_NEON
@@ -117,27 +93,6 @@ config CRYPTO_SHA1_ARM_CE
Architecture: arm using ARMv8 Crypto Extensions
-config CRYPTO_SHA2_ARM_CE
- tristate "Hash functions: SHA-224 and SHA-256 (ARMv8 Crypto Extensions)"
- depends on KERNEL_MODE_NEON
- select CRYPTO_SHA256_ARM
- select CRYPTO_HASH
- help
- SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
-
- Architecture: arm using
- - ARMv8 Crypto Extensions
-
-config CRYPTO_SHA256_ARM
- tristate "Hash functions: SHA-224 and SHA-256 (NEON)"
- select CRYPTO_HASH
- depends on !CPU_V7M
- help
- SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
-
- Architecture: arm using
- - NEON (Advanced SIMD) extensions
-
config CRYPTO_SHA512_ARM
tristate "Hash functions: SHA-384 and SHA-512 (NEON)"
select CRYPTO_HASH
@@ -172,7 +127,6 @@ config CRYPTO_AES_ARM_BS
select CRYPTO_AES_ARM
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
- select CRYPTO_SIMD
help
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
with block cipher modes:
@@ -200,7 +154,6 @@ config CRYPTO_AES_ARM_CE
depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
- select CRYPTO_SIMD
help
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
with block cipher modes:
@@ -214,17 +167,5 @@ config CRYPTO_AES_ARM_CE
Architecture: arm using:
- ARMv8 Crypto Extensions
-config CRYPTO_CHACHA20_NEON
- tristate
- select CRYPTO_SKCIPHER
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
- default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
- stream cipher algorithms
-
- Architecture: arm using:
- - NEON (Advanced SIMD) extensions
-
endmenu
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 3d0e23ff9e74..8479137c6e80 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -7,37 +7,25 @@ obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
-obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
-obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += libblake2s-arm.o
obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o
-obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
-obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
obj-$(CONFIG_CRYPTO_CURVE25519_NEON) += curve25519-neon.o
obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
-obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o
obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
aes-arm-y := aes-cipher-core.o aes-cipher-glue.o
aes-arm-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
sha1-arm-y := sha1-armv4-large.o sha1_glue.o
sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o
-sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o
-sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y)
sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o
sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y)
-libblake2s-arm-y:= blake2s-core.o blake2s-glue.o
blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o
sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
-sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
-chacha-neon-y := chacha-scalar-core.o chacha-glue.o
-chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o
-poly1305-arm-y := poly1305-core.o poly1305-glue.o
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
curve25519-neon-y := curve25519-core.o curve25519-glue.o
@@ -47,14 +35,8 @@ quiet_cmd_perl = PERL $@
$(obj)/%-core.S: $(src)/%-armv4.pl
$(call cmd,perl)
-clean-files += poly1305-core.S sha256-core.S sha512-core.S
+clean-files += sha512-core.S
aflags-thumb2-$(CONFIG_THUMB2_KERNEL) := -U__thumb2__ -D__thumb2__=1
-AFLAGS_sha256-core.o += $(aflags-thumb2-y)
AFLAGS_sha512-core.o += $(aflags-thumb2-y)
-
-# massage the perlasm code a bit so we only get the NEON routine if we need it
-poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5
-poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7
-AFLAGS_poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y)
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index 1cf61f51e766..00591895d540 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -10,8 +10,6 @@
#include <asm/simd.h>
#include <linux/unaligned.h>
#include <crypto/aes.h>
-#include <crypto/ctr.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/cpufeature.h>
@@ -418,29 +416,6 @@ static int ctr_encrypt(struct skcipher_request *req)
return err;
}
-static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
-{
- struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned long flags;
-
- /*
- * Temporarily disable interrupts to avoid races where
- * cachelines are evicted when the CPU is interrupted
- * to do something else.
- */
- local_irq_save(flags);
- aes_encrypt(ctx, dst, src);
- local_irq_restore(flags);
-}
-
-static int ctr_encrypt_sync(struct skcipher_request *req)
-{
- if (!crypto_simd_usable())
- return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
-
- return ctr_encrypt(req);
-}
-
static int xts_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -586,10 +561,9 @@ static int xts_decrypt(struct skcipher_request *req)
}
static struct skcipher_alg aes_algs[] = { {
- .base.cra_name = "__ecb(aes)",
- .base.cra_driver_name = "__ecb-aes-ce",
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -600,10 +574,9 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(aes)",
- .base.cra_driver_name = "__cbc-aes-ce",
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -615,10 +588,9 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}, {
- .base.cra_name = "__cts(cbc(aes))",
- .base.cra_driver_name = "__cts-cbc-aes-ce",
+ .base.cra_name = "cts(cbc(aes))",
+ .base.cra_driver_name = "cts-cbc-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -631,10 +603,9 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = cts_cbc_encrypt,
.decrypt = cts_cbc_decrypt,
}, {
- .base.cra_name = "__ctr(aes)",
- .base.cra_driver_name = "__ctr-aes-ce",
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -647,25 +618,9 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = ctr_encrypt,
.decrypt = ctr_encrypt,
}, {
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-ce-sync",
- .base.cra_priority = 300 - 1,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .chunksize = AES_BLOCK_SIZE,
- .setkey = ce_aes_setkey,
- .encrypt = ctr_encrypt_sync,
- .decrypt = ctr_encrypt_sync,
-}, {
- .base.cra_name = "__xts(aes)",
- .base.cra_driver_name = "__xts-aes-ce",
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
.base.cra_module = THIS_MODULE,
@@ -679,51 +634,14 @@ static struct skcipher_alg aes_algs[] = { {
.decrypt = xts_decrypt,
} };
-static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
-
static void aes_exit(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++)
- simd_skcipher_free(aes_simd_algs[i]);
-
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
static int __init aes_init(void)
{
- struct simd_skcipher_alg *simd;
- const char *basename;
- const char *algname;
- const char *drvname;
- int err;
- int i;
-
- err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
- if (err)
- return err;
-
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
- continue;
-
- algname = aes_algs[i].base.cra_name + 2;
- drvname = aes_algs[i].base.cra_driver_name + 2;
- basename = aes_algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
- err = PTR_ERR(simd);
- if (IS_ERR(simd))
- goto unregister_simds;
-
- aes_simd_algs[i] = simd;
- }
-
- return 0;
-
-unregister_simds:
- aes_exit();
- return err;
+ return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
module_cpu_feature_match(AES, aes_init);
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index f6be80b5938b..c60104dc1585 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -8,8 +8,6 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/aes.h>
-#include <crypto/ctr.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
@@ -59,11 +57,6 @@ struct aesbs_xts_ctx {
struct crypto_aes_ctx tweak_key;
};
-struct aesbs_ctr_ctx {
- struct aesbs_ctx key; /* must be first member */
- struct crypto_aes_ctx fallback;
-};
-
static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -200,25 +193,6 @@ static int cbc_decrypt(struct skcipher_request *req)
return err;
}
-static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
- unsigned int key_len)
-{
- struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = aes_expandkey(&ctx->fallback, in_key, key_len);
- if (err)
- return err;
-
- ctx->key.rounds = 6 + key_len / 4;
-
- kernel_neon_begin();
- aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
- kernel_neon_end();
-
- return 0;
-}
-
static int ctr_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -254,21 +228,6 @@ static int ctr_encrypt(struct skcipher_request *req)
return err;
}
-static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
-{
- struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- __aes_arm_encrypt(ctx->fallback.key_enc, ctx->key.rounds, src, dst);
-}
-
-static int ctr_encrypt_sync(struct skcipher_request *req)
-{
- if (!crypto_simd_usable())
- return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
-
- return ctr_encrypt(req);
-}
-
static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -374,13 +333,12 @@ static int xts_decrypt(struct skcipher_request *req)
}
static struct skcipher_alg aes_algs[] = { {
- .base.cra_name = "__ecb(aes)",
- .base.cra_driver_name = "__ecb-aes-neonbs",
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -389,13 +347,12 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(aes)",
- .base.cra_driver_name = "__cbc-aes-neonbs",
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -405,13 +362,12 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}, {
- .base.cra_name = "__ctr(aes)",
- .base.cra_driver_name = "__ctr-aes-neonbs",
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -422,29 +378,12 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = ctr_encrypt,
.decrypt = ctr_encrypt,
}, {
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-neonbs-sync",
- .base.cra_priority = 250 - 1,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .chunksize = AES_BLOCK_SIZE,
- .walksize = 8 * AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aesbs_ctr_setkey_sync,
- .encrypt = ctr_encrypt_sync,
- .decrypt = ctr_encrypt_sync,
-}, {
- .base.cra_name = "__xts(aes)",
- .base.cra_driver_name = "__xts-aes-neonbs",
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -455,55 +394,18 @@ static struct skcipher_alg aes_algs[] = { {
.decrypt = xts_decrypt,
} };
-static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
-
static void aes_exit(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++)
- if (aes_simd_algs[i])
- simd_skcipher_free(aes_simd_algs[i]);
-
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
static int __init aes_init(void)
{
- struct simd_skcipher_alg *simd;
- const char *basename;
- const char *algname;
- const char *drvname;
- int err;
- int i;
-
if (!(elf_hwcap & HWCAP_NEON))
return -ENODEV;
- err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
- if (err)
- return err;
-
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
- continue;
-
- algname = aes_algs[i].base.cra_name + 2;
- drvname = aes_algs[i].base.cra_driver_name + 2;
- basename = aes_algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
- err = PTR_ERR(simd);
- if (IS_ERR(simd))
- goto unregister_simds;
-
- aes_simd_algs[i] = simd;
- }
- return 0;
-
-unregister_simds:
- aes_exit();
- return err;
+ return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
-late_initcall(aes_init);
+module_init(aes_init);
module_exit(aes_exit);
diff --git a/arch/arm/crypto/blake2b-neon-glue.c b/arch/arm/crypto/blake2b-neon-glue.c
index 4b59d027ba4a..2ff443a91724 100644
--- a/arch/arm/crypto/blake2b-neon-glue.c
+++ b/arch/arm/crypto/blake2b-neon-glue.c
@@ -7,7 +7,6 @@
#include <crypto/internal/blake2b.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <linux/module.h>
#include <linux/sizes.h>
@@ -21,11 +20,6 @@ asmlinkage void blake2b_compress_neon(struct blake2b_state *state,
static void blake2b_compress_arch(struct blake2b_state *state,
const u8 *block, size_t nblocks, u32 inc)
{
- if (!crypto_simd_usable()) {
- blake2b_compress_generic(state, block, nblocks, inc);
- return;
- }
-
do {
const size_t blocks = min_t(size_t, nblocks,
SZ_4K / BLAKE2B_BLOCK_SIZE);
@@ -42,12 +36,14 @@ static void blake2b_compress_arch(struct blake2b_state *state,
static int crypto_blake2b_update_neon(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2b_update(desc, in, inlen, blake2b_compress_arch);
+ return crypto_blake2b_update_bo(desc, in, inlen, blake2b_compress_arch);
}
-static int crypto_blake2b_final_neon(struct shash_desc *desc, u8 *out)
+static int crypto_blake2b_finup_neon(struct shash_desc *desc, const u8 *in,
+ unsigned int inlen, u8 *out)
{
- return crypto_blake2b_final(desc, out, blake2b_compress_arch);
+ return crypto_blake2b_finup(desc, in, inlen, out,
+ blake2b_compress_arch);
}
#define BLAKE2B_ALG(name, driver_name, digest_size) \
@@ -55,7 +51,9 @@ static int crypto_blake2b_final_neon(struct shash_desc *desc, u8 *out)
.base.cra_name = name, \
.base.cra_driver_name = driver_name, \
.base.cra_priority = 200, \
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY | \
+ CRYPTO_AHASH_ALG_BLOCK_ONLY | \
+ CRYPTO_AHASH_ALG_FINAL_NONZERO, \
.base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \
.base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \
.base.cra_module = THIS_MODULE, \
@@ -63,8 +61,9 @@ static int crypto_blake2b_final_neon(struct shash_desc *desc, u8 *out)
.setkey = crypto_blake2b_setkey, \
.init = crypto_blake2b_init, \
.update = crypto_blake2b_update_neon, \
- .final = crypto_blake2b_final_neon, \
+ .finup = crypto_blake2b_finup_neon, \
.descsize = sizeof(struct blake2b_state), \
+ .statesize = BLAKE2B_STATE_SIZE, \
}
static struct shash_alg blake2b_neon_algs[] = {
diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c
deleted file mode 100644
index 50e635512046..000000000000
--- a/arch/arm/crypto/chacha-glue.c
+++ /dev/null
@@ -1,352 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ARM NEON accelerated ChaCha and XChaCha stream ciphers,
- * including ChaCha20 (RFC7539)
- *
- * Copyright (C) 2016-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
- * Copyright (C) 2015 Martin Willi
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/jump_label.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/cputype.h>
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-
-asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
- int nrounds);
-asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
- int nrounds, unsigned int nbytes);
-asmlinkage void hchacha_block_arm(const u32 *state, u32 *out, int nrounds);
-asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
-
-asmlinkage void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes,
- const u32 *state, int nrounds);
-
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_neon);
-
-static inline bool neon_usable(void)
-{
- return static_branch_likely(&use_neon) && crypto_simd_usable();
-}
-
-static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds)
-{
- u8 buf[CHACHA_BLOCK_SIZE];
-
- while (bytes > CHACHA_BLOCK_SIZE) {
- unsigned int l = min(bytes, CHACHA_BLOCK_SIZE * 4U);
-
- chacha_4block_xor_neon(state, dst, src, nrounds, l);
- bytes -= l;
- src += l;
- dst += l;
- state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE);
- }
- if (bytes) {
- const u8 *s = src;
- u8 *d = dst;
-
- if (bytes != CHACHA_BLOCK_SIZE)
- s = d = memcpy(buf, src, bytes);
- chacha_block_xor_neon(state, d, s, nrounds);
- if (d != dst)
- memcpy(dst, buf, bytes);
- state[12]++;
- }
-}
-
-void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
-{
- if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) {
- hchacha_block_arm(state, stream, nrounds);
- } else {
- kernel_neon_begin();
- hchacha_block_neon(state, stream, nrounds);
- kernel_neon_end();
- }
-}
-EXPORT_SYMBOL(hchacha_block_arch);
-
-void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
- int nrounds)
-{
- if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable() ||
- bytes <= CHACHA_BLOCK_SIZE) {
- chacha_doarm(dst, src, bytes, state, nrounds);
- state[12] += DIV_ROUND_UP(bytes, CHACHA_BLOCK_SIZE);
- return;
- }
-
- do {
- unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
-
- kernel_neon_begin();
- chacha_doneon(state, dst, src, todo, nrounds);
- kernel_neon_end();
-
- bytes -= todo;
- src += todo;
- dst += todo;
- } while (bytes);
-}
-EXPORT_SYMBOL(chacha_crypt_arch);
-
-static int chacha_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv,
- bool neon)
-{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
- chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr,
- nbytes, state, ctx->nrounds);
- state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE);
- } else {
- kernel_neon_begin();
- chacha_doneon(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes, ctx->nrounds);
- kernel_neon_end();
- }
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int do_chacha(struct skcipher_request *req, bool neon)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_stream_xor(req, ctx, req->iv, neon);
-}
-
-static int chacha_arm(struct skcipher_request *req)
-{
- return do_chacha(req, false);
-}
-
-static int chacha_neon(struct skcipher_request *req)
-{
- return do_chacha(req, neon_usable());
-}
-
-static int do_xchacha(struct skcipher_request *req, bool neon)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- chacha_init(state, ctx->key, req->iv);
-
- if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
- hchacha_block_arm(state, subctx.key, ctx->nrounds);
- } else {
- kernel_neon_begin();
- hchacha_block_neon(state, subctx.key, ctx->nrounds);
- kernel_neon_end();
- }
- subctx.nrounds = ctx->nrounds;
-
- memcpy(&real_iv[0], req->iv + 24, 8);
- memcpy(&real_iv[8], req->iv + 16, 8);
- return chacha_stream_xor(req, &subctx, real_iv, neon);
-}
-
-static int xchacha_arm(struct skcipher_request *req)
-{
- return do_xchacha(req, false);
-}
-
-static int xchacha_neon(struct skcipher_request *req)
-{
- return do_xchacha(req, neon_usable());
-}
-
-static struct skcipher_alg arm_algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-arm",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_arm,
- .decrypt = chacha_arm,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-arm",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_arm,
- .decrypt = xchacha_arm,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-arm",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_arm,
- .decrypt = xchacha_arm,
- },
-};
-
-static struct skcipher_alg neon_algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_neon,
- .decrypt = chacha_neon,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_neon,
- .decrypt = xchacha_neon,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_neon,
- .decrypt = xchacha_neon,
- }
-};
-
-static int __init chacha_simd_mod_init(void)
-{
- int err = 0;
-
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) {
- err = crypto_register_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
- if (err)
- return err;
- }
-
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) {
- int i;
-
- switch (read_cpuid_part()) {
- case ARM_CPU_PART_CORTEX_A7:
- case ARM_CPU_PART_CORTEX_A5:
- /*
- * The Cortex-A7 and Cortex-A5 do not perform well with
- * the NEON implementation but do incredibly with the
- * scalar one and use less power.
- */
- for (i = 0; i < ARRAY_SIZE(neon_algs); i++)
- neon_algs[i].base.cra_priority = 0;
- break;
- default:
- static_branch_enable(&use_neon);
- }
-
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) {
- err = crypto_register_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
- if (err)
- crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
- }
- }
- return err;
-}
-
-static void __exit chacha_simd_mod_fini(void)
-{
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) {
- crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON))
- crypto_unregister_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
- }
-}
-
-module_init(chacha_simd_mod_init);
-module_exit(chacha_simd_mod_fini);
-
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (scalar and NEON accelerated)");
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-arm");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-arm");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-arm");
-#ifdef CONFIG_KERNEL_MODE_NEON
-MODULE_ALIAS_CRYPTO("chacha20-neon");
-MODULE_ALIAS_CRYPTO("xchacha20-neon");
-MODULE_ALIAS_CRYPTO("xchacha12-neon");
-#endif
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index aabfcf522a2c..a52dcc8c1e33 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -8,22 +8,22 @@
#include <asm/hwcap.h>
#include <asm/neon.h>
-#include <asm/simd.h>
-#include <linux/unaligned.h>
#include <crypto/aes.h>
-#include <crypto/gcm.h>
#include <crypto/b128ops.h>
-#include <crypto/cryptd.h>
+#include <crypto/gcm.h>
+#include <crypto/gf128mul.h>
+#include <crypto/ghash.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/gf128mul.h>
#include <crypto/scatterwalk.h>
#include <linux/cpufeature.h>
-#include <linux/crypto.h>
+#include <linux/errno.h>
#include <linux/jump_label.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
MODULE_DESCRIPTION("GHASH hash function using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
@@ -32,9 +32,6 @@ MODULE_ALIAS_CRYPTO("ghash");
MODULE_ALIAS_CRYPTO("gcm(aes)");
MODULE_ALIAS_CRYPTO("rfc4106(gcm(aes))");
-#define GHASH_BLOCK_SIZE 16
-#define GHASH_DIGEST_SIZE 16
-
#define RFC4106_NONCE_SIZE 4
struct ghash_key {
@@ -49,10 +46,8 @@ struct gcm_key {
u8 nonce[]; // for RFC4106 nonce
};
-struct ghash_desc_ctx {
+struct arm_ghash_desc_ctx {
u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
- u8 buf[GHASH_BLOCK_SIZE];
- u32 count;
};
asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
@@ -65,9 +60,9 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_p64);
static int ghash_init(struct shash_desc *desc)
{
- struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
- *ctx = (struct ghash_desc_ctx){};
+ *ctx = (struct arm_ghash_desc_ctx){};
return 0;
}
@@ -85,52 +80,49 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
static int ghash_update(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
- struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
- unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
-
- ctx->count += len;
+ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ int blocks;
- if ((partial + len) >= GHASH_BLOCK_SIZE) {
- struct ghash_key *key = crypto_shash_ctx(desc->tfm);
- int blocks;
+ blocks = len / GHASH_BLOCK_SIZE;
+ ghash_do_update(blocks, ctx->digest, src, key, NULL);
+ return len - blocks * GHASH_BLOCK_SIZE;
+}
- if (partial) {
- int p = GHASH_BLOCK_SIZE - partial;
+static int ghash_export(struct shash_desc *desc, void *out)
+{
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ u8 *dst = out;
- memcpy(ctx->buf + partial, src, p);
- src += p;
- len -= p;
- }
+ put_unaligned_be64(ctx->digest[1], dst);
+ put_unaligned_be64(ctx->digest[0], dst + 8);
+ return 0;
+}
- blocks = len / GHASH_BLOCK_SIZE;
- len %= GHASH_BLOCK_SIZE;
+static int ghash_import(struct shash_desc *desc, const void *in)
+{
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ const u8 *src = in;
- ghash_do_update(blocks, ctx->digest, src, key,
- partial ? ctx->buf : NULL);
- src += blocks * GHASH_BLOCK_SIZE;
- partial = 0;
- }
- if (len)
- memcpy(ctx->buf + partial, src, len);
+ ctx->digest[1] = get_unaligned_be64(src);
+ ctx->digest[0] = get_unaligned_be64(src + 8);
return 0;
}
-static int ghash_final(struct shash_desc *desc, u8 *dst)
+static int ghash_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
{
- struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
- unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
+ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
- if (partial) {
- struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+ if (len) {
+ u8 buf[GHASH_BLOCK_SIZE] = {};
- memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
- ghash_do_update(1, ctx->digest, ctx->buf, key, NULL);
+ memcpy(buf, src, len);
+ ghash_do_update(1, ctx->digest, buf, key, NULL);
+ memzero_explicit(buf, sizeof(buf));
}
- put_unaligned_be64(ctx->digest[1], dst);
- put_unaligned_be64(ctx->digest[0], dst + 8);
-
- *ctx = (struct ghash_desc_ctx){};
- return 0;
+ return ghash_export(desc, dst);
}
static void ghash_reflect(u64 h[], const be128 *k)
@@ -175,13 +167,17 @@ static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
- .final = ghash_final,
+ .finup = ghash_finup,
.setkey = ghash_setkey,
- .descsize = sizeof(struct ghash_desc_ctx),
+ .export = ghash_export,
+ .import = ghash_import,
+ .descsize = sizeof(struct arm_ghash_desc_ctx),
+ .statesize = sizeof(struct ghash_desc_ctx),
.base.cra_name = "ghash",
.base.cra_driver_name = "ghash-ce",
.base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = GHASH_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]),
.base.cra_module = THIS_MODULE,
@@ -317,9 +313,6 @@ static int gcm_encrypt(struct aead_request *req, const u8 *iv, u32 assoclen)
u8 *tag, *dst;
int tail, err;
- if (WARN_ON_ONCE(!may_use_simd()))
- return -EBUSY;
-
err = skcipher_walk_aead_encrypt(&walk, req, false);
kernel_neon_begin();
@@ -409,9 +402,6 @@ static int gcm_decrypt(struct aead_request *req, const u8 *iv, u32 assoclen)
u8 *tag, *dst;
int tail, err, ret;
- if (WARN_ON_ONCE(!may_use_simd()))
- return -EBUSY;
-
scatterwalk_map_and_copy(otag, req->src,
req->assoclen + req->cryptlen - authsize,
authsize, 0);
diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
deleted file mode 100644
index 4464ffbf8fd1..000000000000
--- a/arch/arm/crypto/poly1305-glue.c
+++ /dev/null
@@ -1,274 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * OpenSSL/Cryptogams accelerated Poly1305 transform for ARM
- *
- * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
- */
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <linux/unaligned.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/poly1305.h>
-#include <crypto/internal/simd.h>
-#include <linux/cpufeature.h>
-#include <linux/crypto.h>
-#include <linux/jump_label.h>
-#include <linux/module.h>
-
-void poly1305_init_arm(void *state, const u8 *key);
-void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit);
-void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit);
-void poly1305_emit_arm(void *state, u8 *digest, const u32 *nonce);
-
-void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
-{
-}
-
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
-
-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
-{
- poly1305_init_arm(&dctx->h, key);
- dctx->s[0] = get_unaligned_le32(key + 16);
- dctx->s[1] = get_unaligned_le32(key + 20);
- dctx->s[2] = get_unaligned_le32(key + 24);
- dctx->s[3] = get_unaligned_le32(key + 28);
- dctx->buflen = 0;
-}
-EXPORT_SYMBOL(poly1305_init_arch);
-
-static int arm_poly1305_init(struct shash_desc *desc)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- dctx->buflen = 0;
- dctx->rset = 0;
- dctx->sset = false;
-
- return 0;
-}
-
-static void arm_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
- u32 len, u32 hibit, bool do_neon)
-{
- if (unlikely(!dctx->sset)) {
- if (!dctx->rset) {
- poly1305_init_arm(&dctx->h, src);
- src += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- dctx->rset = 1;
- }
- if (len >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(src + 0);
- dctx->s[1] = get_unaligned_le32(src + 4);
- dctx->s[2] = get_unaligned_le32(src + 8);
- dctx->s[3] = get_unaligned_le32(src + 12);
- src += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- if (len < POLY1305_BLOCK_SIZE)
- return;
- }
-
- len &= ~(POLY1305_BLOCK_SIZE - 1);
-
- if (static_branch_likely(&have_neon) && likely(do_neon))
- poly1305_blocks_neon(&dctx->h, src, len, hibit);
- else
- poly1305_blocks_arm(&dctx->h, src, len, hibit);
-}
-
-static void arm_poly1305_do_update(struct poly1305_desc_ctx *dctx,
- const u8 *src, u32 len, bool do_neon)
-{
- if (unlikely(dctx->buflen)) {
- u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen);
-
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- len -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- arm_poly1305_blocks(dctx, dctx->buf,
- POLY1305_BLOCK_SIZE, 1, false);
- dctx->buflen = 0;
- }
- }
-
- if (likely(len >= POLY1305_BLOCK_SIZE)) {
- arm_poly1305_blocks(dctx, src, len, 1, do_neon);
- src += round_down(len, POLY1305_BLOCK_SIZE);
- len %= POLY1305_BLOCK_SIZE;
- }
-
- if (unlikely(len)) {
- dctx->buflen = len;
- memcpy(dctx->buf, src, len);
- }
-}
-
-static int arm_poly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- arm_poly1305_do_update(dctx, src, srclen, false);
- return 0;
-}
-
-static int __maybe_unused arm_poly1305_update_neon(struct shash_desc *desc,
- const u8 *src,
- unsigned int srclen)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- bool do_neon = crypto_simd_usable() && srclen > 128;
-
- if (static_branch_likely(&have_neon) && do_neon)
- kernel_neon_begin();
- arm_poly1305_do_update(dctx, src, srclen, do_neon);
- if (static_branch_likely(&have_neon) && do_neon)
- kernel_neon_end();
- return 0;
-}
-
-void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
- unsigned int nbytes)
-{
- bool do_neon = IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
- crypto_simd_usable();
-
- if (unlikely(dctx->buflen)) {
- u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen);
-
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- nbytes -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- poly1305_blocks_arm(&dctx->h, dctx->buf,
- POLY1305_BLOCK_SIZE, 1);
- dctx->buflen = 0;
- }
- }
-
- if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
- unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
-
- if (static_branch_likely(&have_neon) && do_neon) {
- do {
- unsigned int todo = min_t(unsigned int, len, SZ_4K);
-
- kernel_neon_begin();
- poly1305_blocks_neon(&dctx->h, src, todo, 1);
- kernel_neon_end();
-
- len -= todo;
- src += todo;
- } while (len);
- } else {
- poly1305_blocks_arm(&dctx->h, src, len, 1);
- src += len;
- }
- nbytes %= POLY1305_BLOCK_SIZE;
- }
-
- if (unlikely(nbytes)) {
- dctx->buflen = nbytes;
- memcpy(dctx->buf, src, nbytes);
- }
-}
-EXPORT_SYMBOL(poly1305_update_arch);
-
-void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
-{
- if (unlikely(dctx->buflen)) {
- dctx->buf[dctx->buflen++] = 1;
- memset(dctx->buf + dctx->buflen, 0,
- POLY1305_BLOCK_SIZE - dctx->buflen);
- poly1305_blocks_arm(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
- }
-
- poly1305_emit_arm(&dctx->h, dst, dctx->s);
- *dctx = (struct poly1305_desc_ctx){};
-}
-EXPORT_SYMBOL(poly1305_final_arch);
-
-static int arm_poly1305_final(struct shash_desc *desc, u8 *dst)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (unlikely(!dctx->sset))
- return -ENOKEY;
-
- poly1305_final_arch(dctx, dst);
- return 0;
-}
-
-static struct shash_alg arm_poly1305_algs[] = {{
- .init = arm_poly1305_init,
- .update = arm_poly1305_update,
- .final = arm_poly1305_final,
- .digestsize = POLY1305_DIGEST_SIZE,
- .descsize = sizeof(struct poly1305_desc_ctx),
-
- .base.cra_name = "poly1305",
- .base.cra_driver_name = "poly1305-arm",
- .base.cra_priority = 150,
- .base.cra_blocksize = POLY1305_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-#ifdef CONFIG_KERNEL_MODE_NEON
-}, {
- .init = arm_poly1305_init,
- .update = arm_poly1305_update_neon,
- .final = arm_poly1305_final,
- .digestsize = POLY1305_DIGEST_SIZE,
- .descsize = sizeof(struct poly1305_desc_ctx),
-
- .base.cra_name = "poly1305",
- .base.cra_driver_name = "poly1305-neon",
- .base.cra_priority = 200,
- .base.cra_blocksize = POLY1305_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-#endif
-}};
-
-static int __init arm_poly1305_mod_init(void)
-{
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
- (elf_hwcap & HWCAP_NEON))
- static_branch_enable(&have_neon);
- else if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
- /* register only the first entry */
- return crypto_register_shash(&arm_poly1305_algs[0]);
-
- return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
- crypto_register_shashes(arm_poly1305_algs,
- ARRAY_SIZE(arm_poly1305_algs)) : 0;
-}
-
-static void __exit arm_poly1305_mod_exit(void)
-{
- if (!IS_REACHABLE(CONFIG_CRYPTO_HASH))
- return;
- if (!static_branch_likely(&have_neon)) {
- crypto_unregister_shash(&arm_poly1305_algs[0]);
- return;
- }
- crypto_unregister_shashes(arm_poly1305_algs,
- ARRAY_SIZE(arm_poly1305_algs));
-}
-
-module_init(arm_poly1305_mod_init);
-module_exit(arm_poly1305_mod_exit);
-
-MODULE_DESCRIPTION("Accelerated Poly1305 transform for ARM");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("poly1305");
-MODULE_ALIAS_CRYPTO("poly1305-arm");
-MODULE_ALIAS_CRYPTO("poly1305-neon");
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c
index de9100c67b37..fac07a4799de 100644
--- a/arch/arm/crypto/sha1-ce-glue.c
+++ b/arch/arm/crypto/sha1-ce-glue.c
@@ -5,20 +5,14 @@
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
+#include <asm/neon.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
-#include <linux/crypto.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-
-#include "sha1.h"
-
MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
@@ -29,50 +23,36 @@ asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
- return sha1_update_arm(desc, data, len);
+ int remain;
kernel_neon_begin();
- sha1_base_do_update(desc, data, len, sha1_ce_transform);
+ remain = sha1_base_do_update_blocks(desc, data, len, sha1_ce_transform);
kernel_neon_end();
- return 0;
+ return remain;
}
static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!crypto_simd_usable())
- return sha1_finup_arm(desc, data, len, out);
-
kernel_neon_begin();
- if (len)
- sha1_base_do_update(desc, data, len, sha1_ce_transform);
- sha1_base_do_finalize(desc, sha1_ce_transform);
+ sha1_base_do_finup(desc, data, len, sha1_ce_transform);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
-static int sha1_ce_final(struct shash_desc *desc, u8 *out)
-{
- return sha1_ce_finup(desc, NULL, 0, out);
-}
-
static struct shash_alg alg = {
.init = sha1_base_init,
.update = sha1_ce_update,
- .final = sha1_ce_final,
.finup = sha1_ce_finup,
- .descsize = sizeof(struct sha1_state),
+ .descsize = SHA1_STATE_SIZE,
.digestsize = SHA1_DIGEST_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ce",
.cra_priority = 200,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha1.h b/arch/arm/crypto/sha1.h
deleted file mode 100644
index b1b7e21da2c3..000000000000
--- a/arch/arm/crypto/sha1.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_ARM_CRYPTO_SHA1_H
-#define ASM_ARM_CRYPTO_SHA1_H
-
-#include <linux/crypto.h>
-#include <crypto/sha1.h>
-
-extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
-
-#endif
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index 95a727bcd664..255da00c7d98 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -12,53 +12,42 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
-#include <asm/byteorder.h>
-
-#include "sha1.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
asmlinkage void sha1_block_data_order(struct sha1_state *digest,
const u8 *data, int rounds);
-int sha1_update_arm(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int sha1_update_arm(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
/* make sure signature matches sha1_block_fn() */
BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
- return sha1_base_do_update(desc, data, len, sha1_block_data_order);
+ return sha1_base_do_update_blocks(desc, data, len,
+ sha1_block_data_order);
}
-EXPORT_SYMBOL_GPL(sha1_update_arm);
-static int sha1_final(struct shash_desc *desc, u8 *out)
+static int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- sha1_base_do_finalize(desc, sha1_block_data_order);
+ sha1_base_do_finup(desc, data, len, sha1_block_data_order);
return sha1_base_finish(desc, out);
}
-int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- sha1_base_do_update(desc, data, len, sha1_block_data_order);
- return sha1_final(desc, out);
-}
-EXPORT_SYMBOL_GPL(sha1_finup_arm);
-
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = sha1_update_arm,
- .final = sha1_final,
.finup = sha1_finup_arm,
- .descsize = sizeof(struct sha1_state),
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-asm",
.cra_priority = 150,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index 9c70b87e69f7..d321850f22a6 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -13,18 +13,12 @@
* Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com>
*/
+#include <asm/neon.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-
-#include "sha1.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
asmlinkage void sha1_transform_neon(struct sha1_state *state_h,
const u8 *data, int rounds);
@@ -32,50 +26,37 @@ asmlinkage void sha1_transform_neon(struct sha1_state *state_h,
static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
- return sha1_update_arm(desc, data, len);
+ int remain;
kernel_neon_begin();
- sha1_base_do_update(desc, data, len, sha1_transform_neon);
+ remain = sha1_base_do_update_blocks(desc, data, len,
+ sha1_transform_neon);
kernel_neon_end();
- return 0;
+ return remain;
}
static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!crypto_simd_usable())
- return sha1_finup_arm(desc, data, len, out);
-
kernel_neon_begin();
- if (len)
- sha1_base_do_update(desc, data, len, sha1_transform_neon);
- sha1_base_do_finalize(desc, sha1_transform_neon);
+ sha1_base_do_finup(desc, data, len, sha1_transform_neon);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
-static int sha1_neon_final(struct shash_desc *desc, u8 *out)
-{
- return sha1_neon_finup(desc, NULL, 0, out);
-}
-
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = sha1_neon_update,
- .final = sha1_neon_final,
.finup = sha1_neon_finup,
- .descsize = sizeof(struct sha1_state),
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-neon",
.cra_priority = 250,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
deleted file mode 100644
index aeac45bfbf9f..000000000000
--- a/arch/arm/crypto/sha2-ce-glue.c
+++ /dev/null
@@ -1,109 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
- *
- * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <linux/cpufeature.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-
-#include <asm/hwcap.h>
-#include <asm/simd.h>
-#include <asm/neon.h>
-#include <linux/unaligned.h>
-
-#include "sha256_glue.h"
-
-MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
-
-asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
- int blocks);
-
-static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
- return crypto_sha256_arm_update(desc, data, len);
-
- kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
- kernel_neon_end();
-
- return 0;
-}
-
-static int sha2_ce_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- if (!crypto_simd_usable())
- return crypto_sha256_arm_finup(desc, data, len, out);
-
- kernel_neon_begin();
- if (len)
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
- sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
- kernel_neon_end();
-
- return sha256_base_finish(desc, out);
-}
-
-static int sha2_ce_final(struct shash_desc *desc, u8 *out)
-{
- return sha2_ce_finup(desc, NULL, 0, out);
-}
-
-static struct shash_alg algs[] = { {
- .init = sha224_base_init,
- .update = sha2_ce_update,
- .final = sha2_ce_final,
- .finup = sha2_ce_finup,
- .descsize = sizeof(struct sha256_state),
- .digestsize = SHA224_DIGEST_SIZE,
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "sha224-ce",
- .cra_priority = 300,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .init = sha256_base_init,
- .update = sha2_ce_update,
- .final = sha2_ce_final,
- .finup = sha2_ce_finup,
- .descsize = sizeof(struct sha256_state),
- .digestsize = SHA256_DIGEST_SIZE,
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-ce",
- .cra_priority = 300,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init sha2_ce_mod_init(void)
-{
- return crypto_register_shashes(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit sha2_ce_mod_fini(void)
-{
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
-}
-
-module_cpu_feature_match(SHA2, sha2_ce_mod_init);
-module_exit(sha2_ce_mod_fini);
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
deleted file mode 100644
index f85933fdec75..000000000000
--- a/arch/arm/crypto/sha256_glue.c
+++ /dev/null
@@ -1,117 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Glue code for the SHA256 Secure Hash Algorithm assembly implementation
- * using optimized ARM assembler and NEON instructions.
- *
- * Copyright © 2015 Google Inc.
- *
- * This file is based on sha256_ssse3_glue.c:
- * Copyright (C) 2013 Intel Corporation
- * Author: Tim Chen <tim.c.chen@linux.intel.com>
- */
-
-#include <crypto/internal/hash.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <asm/simd.h>
-#include <asm/neon.h>
-
-#include "sha256_glue.h"
-
-asmlinkage void sha256_block_data_order(struct sha256_state *state,
- const u8 *data, int num_blks);
-
-int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- /* make sure casting to sha256_block_fn() is safe */
- BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
-
- return sha256_base_do_update(desc, data, len, sha256_block_data_order);
-}
-EXPORT_SYMBOL(crypto_sha256_arm_update);
-
-static int crypto_sha256_arm_final(struct shash_desc *desc, u8 *out)
-{
- sha256_base_do_finalize(desc, sha256_block_data_order);
- return sha256_base_finish(desc, out);
-}
-
-int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- sha256_base_do_update(desc, data, len, sha256_block_data_order);
- return crypto_sha256_arm_final(desc, out);
-}
-EXPORT_SYMBOL(crypto_sha256_arm_finup);
-
-static struct shash_alg algs[] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = crypto_sha256_arm_update,
- .final = crypto_sha256_arm_final,
- .finup = crypto_sha256_arm_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-asm",
- .cra_priority = 150,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = crypto_sha256_arm_update,
- .final = crypto_sha256_arm_final,
- .finup = crypto_sha256_arm_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "sha224-asm",
- .cra_priority = 150,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init sha256_mod_init(void)
-{
- int res = crypto_register_shashes(algs, ARRAY_SIZE(algs));
-
- if (res < 0)
- return res;
-
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) {
- res = crypto_register_shashes(sha256_neon_algs,
- ARRAY_SIZE(sha256_neon_algs));
-
- if (res < 0)
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
- }
-
- return res;
-}
-
-static void __exit sha256_mod_fini(void)
-{
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
-
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon())
- crypto_unregister_shashes(sha256_neon_algs,
- ARRAY_SIZE(sha256_neon_algs));
-}
-
-module_init(sha256_mod_init);
-module_exit(sha256_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm (ARM), including NEON");
-
-MODULE_ALIAS_CRYPTO("sha256");
diff --git a/arch/arm/crypto/sha256_glue.h b/arch/arm/crypto/sha256_glue.h
deleted file mode 100644
index 9f0d578bab5f..000000000000
--- a/arch/arm/crypto/sha256_glue.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CRYPTO_SHA256_GLUE_H
-#define _CRYPTO_SHA256_GLUE_H
-
-#include <linux/crypto.h>
-
-extern struct shash_alg sha256_neon_algs[2];
-
-int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
-#endif /* _CRYPTO_SHA256_GLUE_H */
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
deleted file mode 100644
index ccdcfff71910..000000000000
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ /dev/null
@@ -1,92 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Glue code for the SHA256 Secure Hash Algorithm assembly implementation
- * using NEON instructions.
- *
- * Copyright © 2015 Google Inc.
- *
- * This file is based on sha512_neon_glue.c:
- * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
- */
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <asm/byteorder.h>
-#include <asm/simd.h>
-#include <asm/neon.h>
-
-#include "sha256_glue.h"
-
-asmlinkage void sha256_block_data_order_neon(struct sha256_state *digest,
- const u8 *data, int num_blks);
-
-static int crypto_sha256_neon_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
- return crypto_sha256_arm_update(desc, data, len);
-
- kernel_neon_begin();
- sha256_base_do_update(desc, data, len, sha256_block_data_order_neon);
- kernel_neon_end();
-
- return 0;
-}
-
-static int crypto_sha256_neon_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- if (!crypto_simd_usable())
- return crypto_sha256_arm_finup(desc, data, len, out);
-
- kernel_neon_begin();
- if (len)
- sha256_base_do_update(desc, data, len,
- sha256_block_data_order_neon);
- sha256_base_do_finalize(desc, sha256_block_data_order_neon);
- kernel_neon_end();
-
- return sha256_base_finish(desc, out);
-}
-
-static int crypto_sha256_neon_final(struct shash_desc *desc, u8 *out)
-{
- return crypto_sha256_neon_finup(desc, NULL, 0, out);
-}
-
-struct shash_alg sha256_neon_algs[] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = crypto_sha256_neon_update,
- .final = crypto_sha256_neon_final,
- .finup = crypto_sha256_neon_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-neon",
- .cra_priority = 250,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = crypto_sha256_neon_update,
- .final = crypto_sha256_neon_final,
- .finup = crypto_sha256_neon_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "sha224-neon",
- .cra_priority = 250,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
index 1be5bd498af3..f8a6480889b1 100644
--- a/arch/arm/crypto/sha512-glue.c
+++ b/arch/arm/crypto/sha512-glue.c
@@ -5,15 +5,14 @@
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
+#include <asm/hwcap.h>
+#include <asm/neon.h>
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
-#include <linux/crypto.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-
#include "sha512.h"
MODULE_DESCRIPTION("Accelerated SHA-384/SHA-512 secure hash for ARM");
@@ -28,50 +27,47 @@ MODULE_ALIAS_CRYPTO("sha512-arm");
asmlinkage void sha512_block_data_order(struct sha512_state *state,
u8 const *src, int blocks);
-int sha512_arm_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int sha512_arm_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- return sha512_base_do_update(desc, data, len, sha512_block_data_order);
+ return sha512_base_do_update_blocks(desc, data, len,
+ sha512_block_data_order);
}
-static int sha512_arm_final(struct shash_desc *desc, u8 *out)
+static int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- sha512_base_do_finalize(desc, sha512_block_data_order);
+ sha512_base_do_finup(desc, data, len, sha512_block_data_order);
return sha512_base_finish(desc, out);
}
-int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- sha512_base_do_update(desc, data, len, sha512_block_data_order);
- return sha512_arm_final(desc, out);
-}
-
static struct shash_alg sha512_arm_algs[] = { {
.init = sha384_base_init,
.update = sha512_arm_update,
- .final = sha512_arm_final,
.finup = sha512_arm_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA384_DIGEST_SIZE,
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-arm",
.cra_priority = 250,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.init = sha512_base_init,
.update = sha512_arm_update,
- .final = sha512_arm_final,
.finup = sha512_arm_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA512_DIGEST_SIZE,
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-arm",
.cra_priority = 250,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
index c6e58fe475ac..bd528077fefb 100644
--- a/arch/arm/crypto/sha512-neon-glue.c
+++ b/arch/arm/crypto/sha512-neon-glue.c
@@ -5,16 +5,13 @@
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
+#include <asm/neon.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
-#include <linux/crypto.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/simd.h>
-#include <asm/neon.h>
-
#include "sha512.h"
MODULE_ALIAS_CRYPTO("sha384-neon");
@@ -26,51 +23,36 @@ asmlinkage void sha512_block_data_order_neon(struct sha512_state *state,
static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
- return sha512_arm_update(desc, data, len);
+ int remain;
kernel_neon_begin();
- sha512_base_do_update(desc, data, len, sha512_block_data_order_neon);
+ remain = sha512_base_do_update_blocks(desc, data, len,
+ sha512_block_data_order_neon);
kernel_neon_end();
-
- return 0;
+ return remain;
}
static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!crypto_simd_usable())
- return sha512_arm_finup(desc, data, len, out);
-
kernel_neon_begin();
- if (len)
- sha512_base_do_update(desc, data, len,
- sha512_block_data_order_neon);
- sha512_base_do_finalize(desc, sha512_block_data_order_neon);
+ sha512_base_do_finup(desc, data, len, sha512_block_data_order_neon);
kernel_neon_end();
-
return sha512_base_finish(desc, out);
}
-static int sha512_neon_final(struct shash_desc *desc, u8 *out)
-{
- return sha512_neon_finup(desc, NULL, 0, out);
-}
-
struct shash_alg sha512_neon_algs[] = { {
.init = sha384_base_init,
.update = sha512_neon_update,
- .final = sha512_neon_final,
.finup = sha512_neon_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA384_DIGEST_SIZE,
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-neon",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
@@ -78,14 +60,15 @@ struct shash_alg sha512_neon_algs[] = { {
}, {
.init = sha512_base_init,
.update = sha512_neon_update,
- .final = sha512_neon_final,
.finup = sha512_neon_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA512_DIGEST_SIZE,
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-neon",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha512.h b/arch/arm/crypto/sha512.h
index e14572be76d1..eeaee52cda69 100644
--- a/arch/arm/crypto/sha512.h
+++ b/arch/arm/crypto/sha512.h
@@ -1,9 +1,3 @@
/* SPDX-License-Identifier: GPL-2.0 */
-int sha512_arm_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
-
extern struct shash_alg sha512_neon_algs[2];
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index fa5939eb9864..7b71a3d414b7 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -209,7 +209,6 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
-#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
/* No hardware dirty/accessed bits -- generic_pmdp_establish() fits */
#define pmdp_establish generic_pmdp_establish
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 6b986ef6042f..7f1c3b4e3e04 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -168,7 +168,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
-#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
diff --git a/arch/arm/include/asm/simd.h b/arch/arm/include/asm/simd.h
index 82191dbd7e78..d37559762180 100644
--- a/arch/arm/include/asm/simd.h
+++ b/arch/arm/include/asm/simd.h
@@ -1,8 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_SIMD_H
+#define _ASM_SIMD_H
-#include <linux/hardirq.h>
+#include <linux/compiler_attributes.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
static __must_check inline bool may_use_simd(void)
{
return IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && !in_hardirq();
}
+
+#endif /* _ASM_SIMD_H */
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index fe4326d938c1..18b102a30741 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -68,6 +68,30 @@ static inline void syscall_set_return_value(struct task_struct *task,
regs->ARM_r0 = (long) error ? error : val;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ if (nr == -1) {
+ task_thread_info(task)->abi_syscall = -1;
+ /*
+ * When the syscall number is set to -1, the syscall will be
+ * skipped. In this case the syscall return value has to be
+ * set explicitly, otherwise the first syscall argument is
+ * returned as the syscall return value.
+ */
+ syscall_set_return_value(task, regs, -ENOSYS, 0);
+ return;
+ }
+ if ((IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT))) {
+ task_thread_info(task)->abi_syscall = nr;
+ return;
+ }
+ task_thread_info(task)->abi_syscall =
+ (task_thread_info(task)->abi_syscall & ~__NR_SYSCALL_MASK) |
+ (nr & __NR_SYSCALL_MASK);
+}
+
#define SYSCALL_MAX_ARGS 7
static inline void syscall_get_arguments(struct task_struct *task,
@@ -80,6 +104,19 @@ static inline void syscall_get_arguments(struct task_struct *task,
memcpy(args, &regs->ARM_r0 + 1, 5 * sizeof(args[0]));
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ memcpy(&regs->ARM_r0, args, 6 * sizeof(args[0]));
+ /*
+ * Also copy the first argument into ARM_ORIG_r0
+ * so that syscall_get_arguments() would return it
+ * instead of the previous value.
+ */
+ regs->ARM_ORIG_r0 = regs->ARM_r0;
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
/* ARM tasks don't change audit architectures on the fly. */
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 007874320937..91ea0e29107a 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -5,6 +5,8 @@
# Copyright (C) 1995-2000 Russell King
#
+obj-y += crypto/
+
lib-y := changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
delay.o delay-loop.o findbit.o memchr.o memcpy.o \
@@ -47,7 +49,7 @@ endif
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
obj-$(CONFIG_CRC32_ARCH) += crc32-arm.o
-crc32-arm-y := crc32-glue.o crc32-core.o
+crc32-arm-y := crc32.o crc32-core.o
obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-arm.o
-crc-t10dif-arm-y := crc-t10dif-glue.o crc-t10dif-core.o
+crc-t10dif-arm-y := crc-t10dif.o crc-t10dif-core.o
diff --git a/arch/arm/lib/crc-t10dif-glue.c b/arch/arm/lib/crc-t10dif.c
index 6efad3d78284..1093f8ec13b0 100644
--- a/arch/arm/lib/crc-t10dif-glue.c
+++ b/arch/arm/lib/crc-t10dif.c
@@ -16,8 +16,8 @@
#include <asm/neon.h>
#include <asm/simd.h>
-static DEFINE_STATIC_KEY_FALSE(have_neon);
-static DEFINE_STATIC_KEY_FALSE(have_pmull);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pmull);
#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
@@ -60,7 +60,7 @@ static int __init crc_t10dif_arm_init(void)
}
return 0;
}
-arch_initcall(crc_t10dif_arm_init);
+subsys_initcall(crc_t10dif_arm_init);
static void __exit crc_t10dif_arm_exit(void)
{
diff --git a/arch/arm/lib/crc32-glue.c b/arch/arm/lib/crc32.c
index 4340351dbde8..f2bef8849c7c 100644
--- a/arch/arm/lib/crc32-glue.c
+++ b/arch/arm/lib/crc32.c
@@ -18,8 +18,8 @@
#include <asm/neon.h>
#include <asm/simd.h>
-static DEFINE_STATIC_KEY_FALSE(have_crc32);
-static DEFINE_STATIC_KEY_FALSE(have_pmull);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pmull);
#define PMULL_MIN_LEN 64 /* min size of buffer for pmull functions */
@@ -103,7 +103,7 @@ static int __init crc32_arm_init(void)
static_branch_enable(&have_pmull);
return 0;
}
-arch_initcall(crc32_arm_init);
+subsys_initcall(crc32_arm_init);
static void __exit crc32_arm_exit(void)
{
diff --git a/arch/arm/lib/crypto/.gitignore b/arch/arm/lib/crypto/.gitignore
new file mode 100644
index 000000000000..12d74d8b03d0
--- /dev/null
+++ b/arch/arm/lib/crypto/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+poly1305-core.S
+sha256-core.S
diff --git a/arch/arm/lib/crypto/Kconfig b/arch/arm/lib/crypto/Kconfig
new file mode 100644
index 000000000000..d1ad664f0c67
--- /dev/null
+++ b/arch/arm/lib/crypto/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CRYPTO_BLAKE2S_ARM
+ bool "Hash functions: BLAKE2s"
+ select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+ help
+ BLAKE2s cryptographic hash function (RFC 7693)
+
+ Architecture: arm
+
+ This is faster than the generic implementations of BLAKE2s and
+ BLAKE2b, but slower than the NEON implementation of BLAKE2b.
+ There is no NEON implementation of BLAKE2s, since NEON doesn't
+ really help with it.
+
+config CRYPTO_CHACHA20_NEON
+ tristate
+ default CRYPTO_LIB_CHACHA
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+
+config CRYPTO_POLY1305_ARM
+ tristate
+ default CRYPTO_LIB_POLY1305
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
+
+config CRYPTO_SHA256_ARM
+ tristate
+ depends on !CPU_V7M
+ default CRYPTO_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
diff --git a/arch/arm/lib/crypto/Makefile b/arch/arm/lib/crypto/Makefile
new file mode 100644
index 000000000000..431f77c3ff6f
--- /dev/null
+++ b/arch/arm/lib/crypto/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += libblake2s-arm.o
+libblake2s-arm-y := blake2s-core.o blake2s-glue.o
+
+obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
+chacha-neon-y := chacha-scalar-core.o chacha-glue.o
+chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o
+
+obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o
+poly1305-arm-y := poly1305-core.o poly1305-glue.o
+
+obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
+sha256-arm-y := sha256.o sha256-core.o
+sha256-arm-$(CONFIG_KERNEL_MODE_NEON) += sha256-ce.o
+
+quiet_cmd_perl = PERL $@
+ cmd_perl = $(PERL) $(<) > $(@)
+
+$(obj)/%-core.S: $(src)/%-armv4.pl
+ $(call cmd,perl)
+
+clean-files += poly1305-core.S sha256-core.S
+
+aflags-thumb2-$(CONFIG_THUMB2_KERNEL) := -U__thumb2__ -D__thumb2__=1
+
+# massage the perlasm code a bit so we only get the NEON routine if we need it
+poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5
+poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7
+AFLAGS_poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y)
+
+AFLAGS_sha256-core.o += $(aflags-thumb2-y)
diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/lib/crypto/blake2s-core.S
index df40e46601f1..df40e46601f1 100644
--- a/arch/arm/crypto/blake2s-core.S
+++ b/arch/arm/lib/crypto/blake2s-core.S
diff --git a/arch/arm/crypto/blake2s-glue.c b/arch/arm/lib/crypto/blake2s-glue.c
index 0238a70d9581..0238a70d9581 100644
--- a/arch/arm/crypto/blake2s-glue.c
+++ b/arch/arm/lib/crypto/blake2s-glue.c
diff --git a/arch/arm/lib/crypto/chacha-glue.c b/arch/arm/lib/crypto/chacha-glue.c
new file mode 100644
index 000000000000..88ec96415283
--- /dev/null
+++ b/arch/arm/lib/crypto/chacha-glue.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ChaCha and HChaCha functions (ARM optimized)
+ *
+ * Copyright (C) 2016-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2015 Martin Willi
+ */
+
+#include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/cputype.h>
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+
+asmlinkage void chacha_block_xor_neon(const struct chacha_state *state,
+ u8 *dst, const u8 *src, int nrounds);
+asmlinkage void chacha_4block_xor_neon(const struct chacha_state *state,
+ u8 *dst, const u8 *src,
+ int nrounds, unsigned int nbytes);
+asmlinkage void hchacha_block_arm(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds);
+asmlinkage void hchacha_block_neon(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds);
+
+asmlinkage void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes,
+ const struct chacha_state *state, int nrounds);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_neon);
+
+static inline bool neon_usable(void)
+{
+ return static_branch_likely(&use_neon) && crypto_simd_usable();
+}
+
+static void chacha_doneon(struct chacha_state *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ u8 buf[CHACHA_BLOCK_SIZE];
+
+ while (bytes > CHACHA_BLOCK_SIZE) {
+ unsigned int l = min(bytes, CHACHA_BLOCK_SIZE * 4U);
+
+ chacha_4block_xor_neon(state, dst, src, nrounds, l);
+ bytes -= l;
+ src += l;
+ dst += l;
+ state->x[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE);
+ }
+ if (bytes) {
+ const u8 *s = src;
+ u8 *d = dst;
+
+ if (bytes != CHACHA_BLOCK_SIZE)
+ s = d = memcpy(buf, src, bytes);
+ chacha_block_xor_neon(state, d, s, nrounds);
+ if (d != dst)
+ memcpy(dst, buf, bytes);
+ state->x[12]++;
+ }
+}
+
+void hchacha_block_arch(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds)
+{
+ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) {
+ hchacha_block_arm(state, out, nrounds);
+ } else {
+ kernel_neon_begin();
+ hchacha_block_neon(state, out, nrounds);
+ kernel_neon_end();
+ }
+}
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable() ||
+ bytes <= CHACHA_BLOCK_SIZE) {
+ chacha_doarm(dst, src, bytes, state, nrounds);
+ state->x[12] += DIV_ROUND_UP(bytes, CHACHA_BLOCK_SIZE);
+ return;
+ }
+
+ do {
+ unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+ kernel_neon_begin();
+ chacha_doneon(state, dst, src, todo, nrounds);
+ kernel_neon_end();
+
+ bytes -= todo;
+ src += todo;
+ dst += todo;
+ } while (bytes);
+}
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+bool chacha_is_arch_optimized(void)
+{
+ /* We always can use at least the ARM scalar implementation. */
+ return true;
+}
+EXPORT_SYMBOL(chacha_is_arch_optimized);
+
+static int __init chacha_arm_mod_init(void)
+{
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) {
+ switch (read_cpuid_part()) {
+ case ARM_CPU_PART_CORTEX_A7:
+ case ARM_CPU_PART_CORTEX_A5:
+ /*
+ * The Cortex-A7 and Cortex-A5 do not perform well with
+ * the NEON implementation but do incredibly with the
+ * scalar one and use less power.
+ */
+ break;
+ default:
+ static_branch_enable(&use_neon);
+ }
+ }
+ return 0;
+}
+subsys_initcall(chacha_arm_mod_init);
+
+static void __exit chacha_arm_mod_exit(void)
+{
+}
+module_exit(chacha_arm_mod_exit);
+
+MODULE_DESCRIPTION("ChaCha and HChaCha functions (ARM optimized)");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/crypto/chacha-neon-core.S b/arch/arm/lib/crypto/chacha-neon-core.S
index 13d12f672656..ddd62b6294a5 100644
--- a/arch/arm/crypto/chacha-neon-core.S
+++ b/arch/arm/lib/crypto/chacha-neon-core.S
@@ -1,5 +1,5 @@
/*
- * ChaCha/XChaCha NEON helper functions
+ * ChaCha/HChaCha NEON helper functions
*
* Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
diff --git a/arch/arm/crypto/chacha-scalar-core.S b/arch/arm/lib/crypto/chacha-scalar-core.S
index 083fe1ab96d0..4951df05c158 100644
--- a/arch/arm/crypto/chacha-scalar-core.S
+++ b/arch/arm/lib/crypto/chacha-scalar-core.S
@@ -367,7 +367,7 @@
/*
* void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes,
- * const u32 *state, int nrounds);
+ * const struct chacha_state *state, int nrounds);
*/
ENTRY(chacha_doarm)
cmp r2, #0 // len == 0?
@@ -407,7 +407,8 @@ ENTRY(chacha_doarm)
ENDPROC(chacha_doarm)
/*
- * void hchacha_block_arm(const u32 state[16], u32 out[8], int nrounds);
+ * void hchacha_block_arm(const struct chacha_state *state,
+ * u32 out[HCHACHA_OUT_WORDS], int nrounds);
*/
ENTRY(hchacha_block_arm)
push {r1,r4-r11,lr}
diff --git a/arch/arm/crypto/poly1305-armv4.pl b/arch/arm/lib/crypto/poly1305-armv4.pl
index 6d79498d3115..d57c6e2fc84a 100644
--- a/arch/arm/crypto/poly1305-armv4.pl
+++ b/arch/arm/lib/crypto/poly1305-armv4.pl
@@ -43,9 +43,9 @@ $code.=<<___;
#else
# define __ARM_ARCH__ __LINUX_ARM_ARCH__
# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__
-# define poly1305_init poly1305_init_arm
+# define poly1305_init poly1305_block_init_arch
# define poly1305_blocks poly1305_blocks_arm
-# define poly1305_emit poly1305_emit_arm
+# define poly1305_emit poly1305_emit_arch
.globl poly1305_blocks_neon
#endif
diff --git a/arch/arm/lib/crypto/poly1305-glue.c b/arch/arm/lib/crypto/poly1305-glue.c
new file mode 100644
index 000000000000..2603b0771f2c
--- /dev/null
+++ b/arch/arm/lib/crypto/poly1305-glue.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OpenSSL/Cryptogams accelerated Poly1305 transform for ARM
+ *
+ * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <crypto/internal/poly1305.h>
+#include <linux/cpufeature.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/unaligned.h>
+
+asmlinkage void poly1305_block_init_arch(
+ struct poly1305_block_state *state,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+EXPORT_SYMBOL_GPL(poly1305_block_init_arch);
+asmlinkage void poly1305_blocks_arm(struct poly1305_block_state *state,
+ const u8 *src, u32 len, u32 hibit);
+asmlinkage void poly1305_blocks_neon(struct poly1305_block_state *state,
+ const u8 *src, u32 len, u32 hibit);
+asmlinkage void poly1305_emit_arch(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4]);
+EXPORT_SYMBOL_GPL(poly1305_emit_arch);
+
+void __weak poly1305_blocks_neon(struct poly1305_block_state *state,
+ const u8 *src, u32 len, u32 hibit)
+{
+}
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+
+void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
+ unsigned int len, u32 padbit)
+{
+ len = round_down(len, POLY1305_BLOCK_SIZE);
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
+ static_branch_likely(&have_neon)) {
+ do {
+ unsigned int todo = min_t(unsigned int, len, SZ_4K);
+
+ kernel_neon_begin();
+ poly1305_blocks_neon(state, src, todo, padbit);
+ kernel_neon_end();
+
+ len -= todo;
+ src += todo;
+ } while (len);
+ } else
+ poly1305_blocks_arm(state, src, len, padbit);
+}
+EXPORT_SYMBOL_GPL(poly1305_blocks_arch);
+
+bool poly1305_is_arch_optimized(void)
+{
+ /* We always can use at least the ARM scalar implementation. */
+ return true;
+}
+EXPORT_SYMBOL(poly1305_is_arch_optimized);
+
+static int __init arm_poly1305_mod_init(void)
+{
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
+ (elf_hwcap & HWCAP_NEON))
+ static_branch_enable(&have_neon);
+ return 0;
+}
+subsys_initcall(arm_poly1305_mod_init);
+
+static void __exit arm_poly1305_mod_exit(void)
+{
+}
+module_exit(arm_poly1305_mod_exit);
+
+MODULE_DESCRIPTION("Accelerated Poly1305 transform for ARM");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/lib/crypto/sha256-armv4.pl
index f3a2b54efd4e..8122db7fd599 100644
--- a/arch/arm/crypto/sha256-armv4.pl
+++ b/arch/arm/lib/crypto/sha256-armv4.pl
@@ -204,18 +204,18 @@ K256:
.word 0 @ terminator
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
.LOPENSSL_armcap:
-.word OPENSSL_armcap_P-sha256_block_data_order
+.word OPENSSL_armcap_P-sha256_blocks_arch
#endif
.align 5
-.global sha256_block_data_order
-.type sha256_block_data_order,%function
-sha256_block_data_order:
-.Lsha256_block_data_order:
+.global sha256_blocks_arch
+.type sha256_blocks_arch,%function
+sha256_blocks_arch:
+.Lsha256_blocks_arch:
#if __ARM_ARCH__<7
- sub r3,pc,#8 @ sha256_block_data_order
+ sub r3,pc,#8 @ sha256_blocks_arch
#else
- adr r3,.Lsha256_block_data_order
+ adr r3,.Lsha256_blocks_arch
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
@@ -282,7 +282,7 @@ $code.=<<___;
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
#endif
-.size sha256_block_data_order,.-sha256_block_data_order
+.size sha256_blocks_arch,.-sha256_blocks_arch
___
######################################################################
# NEON stuff
@@ -470,8 +470,8 @@ sha256_block_data_order_neon:
stmdb sp!,{r4-r12,lr}
sub $H,sp,#16*4+16
- adr $Ktbl,.Lsha256_block_data_order
- sub $Ktbl,$Ktbl,#.Lsha256_block_data_order-K256
+ adr $Ktbl,.Lsha256_blocks_arch
+ sub $Ktbl,$Ktbl,#.Lsha256_blocks_arch-K256
bic $H,$H,#15 @ align for 128-bit stores
mov $t2,sp
mov sp,$H @ alloca
diff --git a/arch/arm/crypto/sha2-ce-core.S b/arch/arm/lib/crypto/sha256-ce.S
index b6369d2440a1..ac2c9b01b22d 100644
--- a/arch/arm/crypto/sha2-ce-core.S
+++ b/arch/arm/lib/crypto/sha256-ce.S
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * sha2-ce-core.S - SHA-224/256 secure hash using ARMv8 Crypto Extensions
+ * sha256-ce.S - SHA-224/256 secure hash using ARMv8 Crypto Extensions
*
* Copyright (C) 2015 Linaro Ltd.
* Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
@@ -67,10 +67,10 @@
.word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
/*
- * void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
- int blocks);
+ * void sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
+ * const u8 *data, size_t nblocks);
*/
-ENTRY(sha2_ce_transform)
+ENTRY(sha256_ce_transform)
/* load state */
vld1.32 {dga-dgb}, [r0]
@@ -120,4 +120,4 @@ ENTRY(sha2_ce_transform)
/* store new state */
vst1.32 {dga-dgb}, [r0]
bx lr
-ENDPROC(sha2_ce_transform)
+ENDPROC(sha256_ce_transform)
diff --git a/arch/arm/lib/crypto/sha256.c b/arch/arm/lib/crypto/sha256.c
new file mode 100644
index 000000000000..109192e54b0f
--- /dev/null
+++ b/arch/arm/lib/crypto/sha256.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SHA-256 optimized for ARM
+ *
+ * Copyright 2025 Google LLC
+ */
+#include <asm/neon.h>
+#include <crypto/internal/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+asmlinkage void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+asmlinkage void sha256_block_data_order_neon(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+asmlinkage void sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
+
+void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks)
+{
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
+ static_branch_likely(&have_neon)) {
+ kernel_neon_begin();
+ if (static_branch_likely(&have_ce))
+ sha256_ce_transform(state, data, nblocks);
+ else
+ sha256_block_data_order_neon(state, data, nblocks);
+ kernel_neon_end();
+ } else {
+ sha256_blocks_arch(state, data, nblocks);
+ }
+}
+EXPORT_SYMBOL_GPL(sha256_blocks_simd);
+
+bool sha256_is_arch_optimized(void)
+{
+ /* We always can use at least the ARM scalar implementation. */
+ return true;
+}
+EXPORT_SYMBOL_GPL(sha256_is_arch_optimized);
+
+static int __init sha256_arm_mod_init(void)
+{
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) {
+ static_branch_enable(&have_neon);
+ if (elf_hwcap2 & HWCAP2_SHA2)
+ static_branch_enable(&have_ce);
+ }
+ return 0;
+}
+subsys_initcall(sha256_arm_mod_init);
+
+static void __exit sha256_arm_mod_exit(void)
+{
+}
+module_exit(sha256_arm_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-256 optimized for ARM");
diff --git a/arch/arm/mach-aspeed/Kconfig b/arch/arm/mach-aspeed/Kconfig
index 080019aa6fcd..fcf287edd0e5 100644
--- a/arch/arm/mach-aspeed/Kconfig
+++ b/arch/arm/mach-aspeed/Kconfig
@@ -2,7 +2,6 @@
menuconfig ARCH_ASPEED
bool "Aspeed BMC architectures"
depends on (CPU_LITTLE_ENDIAN && ARCH_MULTI_V5) || ARCH_MULTI_V6 || ARCH_MULTI_V7
- select SRAM
select WATCHDOG
select ASPEED_WATCHDOG
select MFD_SYSCON
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 8f66de0405d9..6cd6d29a2c9d 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -19,13 +19,6 @@ if ARCH_DAVINCI
comment "DaVinci Core Type"
-config ARCH_DAVINCI_DA830
- bool "DA830/OMAP-L137/AM17x based system"
- select ARCH_DAVINCI_DA8XX
- # needed on silicon revs 1.0, 1.1:
- select CPU_DCACHE_WRITETHROUGH if !CPU_DCACHE_DISABLE
- select DAVINCI_CP_INTC
-
config ARCH_DAVINCI_DA850
bool "DA850/OMAP-L138/AM18x based system"
select ARCH_DAVINCI_DA8XX
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index 31d22a5d8e1e..7a210db669f4 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -10,7 +10,6 @@ obj-y := common.o sram.o devices-da8xx.o
obj-$(CONFIG_DAVINCI_MUX) += mux.o
# Chip specific
-obj-$(CONFIG_ARCH_DAVINCI_DA830) += da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += da850.o pdata-quirks.o
obj-y += da8xx-dt.o
diff --git a/arch/arm/mach-davinci/cputype.h b/arch/arm/mach-davinci/cputype.h
index 148a738391dc..a8f5330aaad1 100644
--- a/arch/arm/mach-davinci/cputype.h
+++ b/arch/arm/mach-davinci/cputype.h
@@ -25,7 +25,6 @@ struct davinci_id {
};
/* Can use lower 16 bits of cpu id for a variant when required */
-#define DAVINCI_CPU_ID_DA830 0x08300000
#define DAVINCI_CPU_ID_DA850 0x08500000
#endif
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
deleted file mode 100644
index a044ea5cb4f1..000000000000
--- a/arch/arm/mach-davinci/da830.c
+++ /dev/null
@@ -1,506 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * TI DA830/OMAP L137 chip specific setup
- *
- * Author: Mark A. Greer <mgreer@mvista.com>
- *
- * 2009 (c) MontaVista Software, Inc.
- */
-#include <linux/clk-provider.h>
-#include <linux/clk/davinci.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/io.h>
-
-#include <clocksource/timer-davinci.h>
-
-#include <asm/mach/map.h>
-
-#include "common.h"
-#include "cputype.h"
-#include "da8xx.h"
-#include "irqs.h"
-#include "mux.h"
-
-/* Offsets of the 8 compare registers on the da830 */
-#define DA830_CMP12_0 0x60
-#define DA830_CMP12_1 0x64
-#define DA830_CMP12_2 0x68
-#define DA830_CMP12_3 0x6c
-#define DA830_CMP12_4 0x70
-#define DA830_CMP12_5 0x74
-#define DA830_CMP12_6 0x78
-#define DA830_CMP12_7 0x7c
-
-#define DA830_REF_FREQ 24000000
-
-/*
- * Device specific mux setup
- *
- * soc description mux mode mode mux dbg
- * reg offset mask mode
- */
-static const struct mux_config da830_pins[] = {
-#ifdef CONFIG_DAVINCI_MUX
- MUX_CFG(DA830, GPIO7_14, 0, 0, 0xf, 1, false)
- MUX_CFG(DA830, RTCK, 0, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO7_15, 0, 4, 0xf, 1, false)
- MUX_CFG(DA830, EMU_0, 0, 4, 0xf, 8, false)
- MUX_CFG(DA830, EMB_SDCKE, 0, 8, 0xf, 1, false)
- MUX_CFG(DA830, EMB_CLK_GLUE, 0, 12, 0xf, 1, false)
- MUX_CFG(DA830, EMB_CLK, 0, 12, 0xf, 2, false)
- MUX_CFG(DA830, NEMB_CS_0, 0, 16, 0xf, 1, false)
- MUX_CFG(DA830, NEMB_CAS, 0, 20, 0xf, 1, false)
- MUX_CFG(DA830, NEMB_RAS, 0, 24, 0xf, 1, false)
- MUX_CFG(DA830, NEMB_WE, 0, 28, 0xf, 1, false)
- MUX_CFG(DA830, EMB_BA_1, 1, 0, 0xf, 1, false)
- MUX_CFG(DA830, EMB_BA_0, 1, 4, 0xf, 1, false)
- MUX_CFG(DA830, EMB_A_0, 1, 8, 0xf, 1, false)
- MUX_CFG(DA830, EMB_A_1, 1, 12, 0xf, 1, false)
- MUX_CFG(DA830, EMB_A_2, 1, 16, 0xf, 1, false)
- MUX_CFG(DA830, EMB_A_3, 1, 20, 0xf, 1, false)
- MUX_CFG(DA830, EMB_A_4, 1, 24, 0xf, 1, false)
- MUX_CFG(DA830, EMB_A_5, 1, 28, 0xf, 1, false)
- MUX_CFG(DA830, GPIO7_0, 1, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO7_1, 1, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO7_2, 1, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO7_3, 1, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO7_4, 1, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO7_5, 1, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO7_6, 1, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO7_7, 1, 28, 0xf, 8, false)
- MUX_CFG(DA830, EMB_A_6, 2, 0, 0xf, 1, false)
- MUX_CFG(DA830, EMB_A_7, 2, 4, 0xf, 1, false)
- MUX_CFG(DA830, EMB_A_8, 2, 8, 0xf, 1, false)
- MUX_CFG(DA830, EMB_A_9, 2, 12, 0xf, 1, false)
- MUX_CFG(DA830, EMB_A_10, 2, 16, 0xf, 1, false)
- MUX_CFG(DA830, EMB_A_11, 2, 20, 0xf, 1, false)
- MUX_CFG(DA830, EMB_A_12, 2, 24, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_31, 2, 28, 0xf, 1, false)
- MUX_CFG(DA830, GPIO7_8, 2, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO7_9, 2, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO7_10, 2, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO7_11, 2, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO7_12, 2, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO7_13, 2, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_13, 2, 24, 0xf, 8, false)
- MUX_CFG(DA830, EMB_D_30, 3, 0, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_29, 3, 4, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_28, 3, 8, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_27, 3, 12, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_26, 3, 16, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_25, 3, 20, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_24, 3, 24, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_23, 3, 28, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_22, 4, 0, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_21, 4, 4, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_20, 4, 8, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_19, 4, 12, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_18, 4, 16, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_17, 4, 20, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_16, 4, 24, 0xf, 1, false)
- MUX_CFG(DA830, NEMB_WE_DQM_3, 4, 28, 0xf, 1, false)
- MUX_CFG(DA830, NEMB_WE_DQM_2, 5, 0, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_0, 5, 4, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_1, 5, 8, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_2, 5, 12, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_3, 5, 16, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_4, 5, 20, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_5, 5, 24, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_6, 5, 28, 0xf, 1, false)
- MUX_CFG(DA830, GPIO6_0, 5, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO6_1, 5, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO6_2, 5, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO6_3, 5, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO6_4, 5, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO6_5, 5, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO6_6, 5, 28, 0xf, 8, false)
- MUX_CFG(DA830, EMB_D_7, 6, 0, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_8, 6, 4, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_9, 6, 8, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_10, 6, 12, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_11, 6, 16, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_12, 6, 20, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_13, 6, 24, 0xf, 1, false)
- MUX_CFG(DA830, EMB_D_14, 6, 28, 0xf, 1, false)
- MUX_CFG(DA830, GPIO6_7, 6, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO6_8, 6, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO6_9, 6, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO6_10, 6, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO6_11, 6, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO6_12, 6, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO6_13, 6, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO6_14, 6, 28, 0xf, 8, false)
- MUX_CFG(DA830, EMB_D_15, 7, 0, 0xf, 1, false)
- MUX_CFG(DA830, NEMB_WE_DQM_1, 7, 4, 0xf, 1, false)
- MUX_CFG(DA830, NEMB_WE_DQM_0, 7, 8, 0xf, 1, false)
- MUX_CFG(DA830, SPI0_SOMI_0, 7, 12, 0xf, 1, false)
- MUX_CFG(DA830, SPI0_SIMO_0, 7, 16, 0xf, 1, false)
- MUX_CFG(DA830, SPI0_CLK, 7, 20, 0xf, 1, false)
- MUX_CFG(DA830, NSPI0_ENA, 7, 24, 0xf, 1, false)
- MUX_CFG(DA830, NSPI0_SCS_0, 7, 28, 0xf, 1, false)
- MUX_CFG(DA830, EQEP0I, 7, 12, 0xf, 2, false)
- MUX_CFG(DA830, EQEP0S, 7, 16, 0xf, 2, false)
- MUX_CFG(DA830, EQEP1I, 7, 20, 0xf, 2, false)
- MUX_CFG(DA830, NUART0_CTS, 7, 24, 0xf, 2, false)
- MUX_CFG(DA830, NUART0_RTS, 7, 28, 0xf, 2, false)
- MUX_CFG(DA830, EQEP0A, 7, 24, 0xf, 4, false)
- MUX_CFG(DA830, EQEP0B, 7, 28, 0xf, 4, false)
- MUX_CFG(DA830, GPIO6_15, 7, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_14, 7, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_15, 7, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_0, 7, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_1, 7, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_2, 7, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_3, 7, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_4, 7, 28, 0xf, 8, false)
- MUX_CFG(DA830, SPI1_SOMI_0, 8, 0, 0xf, 1, false)
- MUX_CFG(DA830, SPI1_SIMO_0, 8, 4, 0xf, 1, false)
- MUX_CFG(DA830, SPI1_CLK, 8, 8, 0xf, 1, false)
- MUX_CFG(DA830, UART0_RXD, 8, 12, 0xf, 1, false)
- MUX_CFG(DA830, UART0_TXD, 8, 16, 0xf, 1, false)
- MUX_CFG(DA830, AXR1_10, 8, 20, 0xf, 1, false)
- MUX_CFG(DA830, AXR1_11, 8, 24, 0xf, 1, false)
- MUX_CFG(DA830, NSPI1_ENA, 8, 28, 0xf, 1, false)
- MUX_CFG(DA830, I2C1_SCL, 8, 0, 0xf, 2, false)
- MUX_CFG(DA830, I2C1_SDA, 8, 4, 0xf, 2, false)
- MUX_CFG(DA830, EQEP1S, 8, 8, 0xf, 2, false)
- MUX_CFG(DA830, I2C0_SDA, 8, 12, 0xf, 2, false)
- MUX_CFG(DA830, I2C0_SCL, 8, 16, 0xf, 2, false)
- MUX_CFG(DA830, UART2_RXD, 8, 28, 0xf, 2, false)
- MUX_CFG(DA830, TM64P0_IN12, 8, 12, 0xf, 4, false)
- MUX_CFG(DA830, TM64P0_OUT12, 8, 16, 0xf, 4, false)
- MUX_CFG(DA830, GPIO5_5, 8, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_6, 8, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_7, 8, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_8, 8, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_9, 8, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_10, 8, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_11, 8, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO5_12, 8, 28, 0xf, 8, false)
- MUX_CFG(DA830, NSPI1_SCS_0, 9, 0, 0xf, 1, false)
- MUX_CFG(DA830, USB0_DRVVBUS, 9, 4, 0xf, 1, false)
- MUX_CFG(DA830, AHCLKX0, 9, 8, 0xf, 1, false)
- MUX_CFG(DA830, ACLKX0, 9, 12, 0xf, 1, false)
- MUX_CFG(DA830, AFSX0, 9, 16, 0xf, 1, false)
- MUX_CFG(DA830, AHCLKR0, 9, 20, 0xf, 1, false)
- MUX_CFG(DA830, ACLKR0, 9, 24, 0xf, 1, false)
- MUX_CFG(DA830, AFSR0, 9, 28, 0xf, 1, false)
- MUX_CFG(DA830, UART2_TXD, 9, 0, 0xf, 2, false)
- MUX_CFG(DA830, AHCLKX2, 9, 8, 0xf, 2, false)
- MUX_CFG(DA830, ECAP0_APWM0, 9, 12, 0xf, 2, false)
- MUX_CFG(DA830, RMII_MHZ_50_CLK, 9, 20, 0xf, 2, false)
- MUX_CFG(DA830, ECAP1_APWM1, 9, 24, 0xf, 2, false)
- MUX_CFG(DA830, USB_REFCLKIN, 9, 8, 0xf, 4, false)
- MUX_CFG(DA830, GPIO5_13, 9, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_15, 9, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_11, 9, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_12, 9, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_13, 9, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_14, 9, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_15, 9, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_12, 9, 28, 0xf, 8, false)
- MUX_CFG(DA830, AMUTE0, 10, 0, 0xf, 1, false)
- MUX_CFG(DA830, AXR0_0, 10, 4, 0xf, 1, false)
- MUX_CFG(DA830, AXR0_1, 10, 8, 0xf, 1, false)
- MUX_CFG(DA830, AXR0_2, 10, 12, 0xf, 1, false)
- MUX_CFG(DA830, AXR0_3, 10, 16, 0xf, 1, false)
- MUX_CFG(DA830, AXR0_4, 10, 20, 0xf, 1, false)
- MUX_CFG(DA830, AXR0_5, 10, 24, 0xf, 1, false)
- MUX_CFG(DA830, AXR0_6, 10, 28, 0xf, 1, false)
- MUX_CFG(DA830, RMII_TXD_0, 10, 4, 0xf, 2, false)
- MUX_CFG(DA830, RMII_TXD_1, 10, 8, 0xf, 2, false)
- MUX_CFG(DA830, RMII_TXEN, 10, 12, 0xf, 2, false)
- MUX_CFG(DA830, RMII_CRS_DV, 10, 16, 0xf, 2, false)
- MUX_CFG(DA830, RMII_RXD_0, 10, 20, 0xf, 2, false)
- MUX_CFG(DA830, RMII_RXD_1, 10, 24, 0xf, 2, false)
- MUX_CFG(DA830, RMII_RXER, 10, 28, 0xf, 2, false)
- MUX_CFG(DA830, AFSR2, 10, 4, 0xf, 4, false)
- MUX_CFG(DA830, ACLKX2, 10, 8, 0xf, 4, false)
- MUX_CFG(DA830, AXR2_3, 10, 12, 0xf, 4, false)
- MUX_CFG(DA830, AXR2_2, 10, 16, 0xf, 4, false)
- MUX_CFG(DA830, AXR2_1, 10, 20, 0xf, 4, false)
- MUX_CFG(DA830, AFSX2, 10, 24, 0xf, 4, false)
- MUX_CFG(DA830, ACLKR2, 10, 28, 0xf, 4, false)
- MUX_CFG(DA830, NRESETOUT, 10, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_0, 10, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_1, 10, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_2, 10, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_3, 10, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_4, 10, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_5, 10, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_6, 10, 28, 0xf, 8, false)
- MUX_CFG(DA830, AXR0_7, 11, 0, 0xf, 1, false)
- MUX_CFG(DA830, AXR0_8, 11, 4, 0xf, 1, false)
- MUX_CFG(DA830, UART1_RXD, 11, 8, 0xf, 1, false)
- MUX_CFG(DA830, UART1_TXD, 11, 12, 0xf, 1, false)
- MUX_CFG(DA830, AXR0_11, 11, 16, 0xf, 1, false)
- MUX_CFG(DA830, AHCLKX1, 11, 20, 0xf, 1, false)
- MUX_CFG(DA830, ACLKX1, 11, 24, 0xf, 1, false)
- MUX_CFG(DA830, AFSX1, 11, 28, 0xf, 1, false)
- MUX_CFG(DA830, MDIO_CLK, 11, 0, 0xf, 2, false)
- MUX_CFG(DA830, MDIO_D, 11, 4, 0xf, 2, false)
- MUX_CFG(DA830, AXR0_9, 11, 8, 0xf, 2, false)
- MUX_CFG(DA830, AXR0_10, 11, 12, 0xf, 2, false)
- MUX_CFG(DA830, EPWM0B, 11, 20, 0xf, 2, false)
- MUX_CFG(DA830, EPWM0A, 11, 24, 0xf, 2, false)
- MUX_CFG(DA830, EPWMSYNCI, 11, 28, 0xf, 2, false)
- MUX_CFG(DA830, AXR2_0, 11, 16, 0xf, 4, false)
- MUX_CFG(DA830, EPWMSYNC0, 11, 28, 0xf, 4, false)
- MUX_CFG(DA830, GPIO3_7, 11, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_8, 11, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_9, 11, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_10, 11, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_11, 11, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_14, 11, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO3_15, 11, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_10, 11, 28, 0xf, 8, false)
- MUX_CFG(DA830, AHCLKR1, 12, 0, 0xf, 1, false)
- MUX_CFG(DA830, ACLKR1, 12, 4, 0xf, 1, false)
- MUX_CFG(DA830, AFSR1, 12, 8, 0xf, 1, false)
- MUX_CFG(DA830, AMUTE1, 12, 12, 0xf, 1, false)
- MUX_CFG(DA830, AXR1_0, 12, 16, 0xf, 1, false)
- MUX_CFG(DA830, AXR1_1, 12, 20, 0xf, 1, false)
- MUX_CFG(DA830, AXR1_2, 12, 24, 0xf, 1, false)
- MUX_CFG(DA830, AXR1_3, 12, 28, 0xf, 1, false)
- MUX_CFG(DA830, ECAP2_APWM2, 12, 4, 0xf, 2, false)
- MUX_CFG(DA830, EHRPWMGLUETZ, 12, 12, 0xf, 2, false)
- MUX_CFG(DA830, EQEP1A, 12, 28, 0xf, 2, false)
- MUX_CFG(DA830, GPIO4_11, 12, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_12, 12, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_13, 12, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_14, 12, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_0, 12, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_1, 12, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_2, 12, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_3, 12, 28, 0xf, 8, false)
- MUX_CFG(DA830, AXR1_4, 13, 0, 0xf, 1, false)
- MUX_CFG(DA830, AXR1_5, 13, 4, 0xf, 1, false)
- MUX_CFG(DA830, AXR1_6, 13, 8, 0xf, 1, false)
- MUX_CFG(DA830, AXR1_7, 13, 12, 0xf, 1, false)
- MUX_CFG(DA830, AXR1_8, 13, 16, 0xf, 1, false)
- MUX_CFG(DA830, AXR1_9, 13, 20, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_0, 13, 24, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_1, 13, 28, 0xf, 1, false)
- MUX_CFG(DA830, EQEP1B, 13, 0, 0xf, 2, false)
- MUX_CFG(DA830, EPWM2B, 13, 4, 0xf, 2, false)
- MUX_CFG(DA830, EPWM2A, 13, 8, 0xf, 2, false)
- MUX_CFG(DA830, EPWM1B, 13, 12, 0xf, 2, false)
- MUX_CFG(DA830, EPWM1A, 13, 16, 0xf, 2, false)
- MUX_CFG(DA830, MMCSD_DAT_0, 13, 24, 0xf, 2, false)
- MUX_CFG(DA830, MMCSD_DAT_1, 13, 28, 0xf, 2, false)
- MUX_CFG(DA830, UHPI_HD_0, 13, 24, 0xf, 4, false)
- MUX_CFG(DA830, UHPI_HD_1, 13, 28, 0xf, 4, false)
- MUX_CFG(DA830, GPIO4_4, 13, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_5, 13, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_6, 13, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_7, 13, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_8, 13, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO4_9, 13, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_0, 13, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_1, 13, 28, 0xf, 8, false)
- MUX_CFG(DA830, EMA_D_2, 14, 0, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_3, 14, 4, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_4, 14, 8, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_5, 14, 12, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_6, 14, 16, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_7, 14, 20, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_8, 14, 24, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_9, 14, 28, 0xf, 1, false)
- MUX_CFG(DA830, MMCSD_DAT_2, 14, 0, 0xf, 2, false)
- MUX_CFG(DA830, MMCSD_DAT_3, 14, 4, 0xf, 2, false)
- MUX_CFG(DA830, MMCSD_DAT_4, 14, 8, 0xf, 2, false)
- MUX_CFG(DA830, MMCSD_DAT_5, 14, 12, 0xf, 2, false)
- MUX_CFG(DA830, MMCSD_DAT_6, 14, 16, 0xf, 2, false)
- MUX_CFG(DA830, MMCSD_DAT_7, 14, 20, 0xf, 2, false)
- MUX_CFG(DA830, UHPI_HD_8, 14, 24, 0xf, 2, false)
- MUX_CFG(DA830, UHPI_HD_9, 14, 28, 0xf, 2, false)
- MUX_CFG(DA830, UHPI_HD_2, 14, 0, 0xf, 4, false)
- MUX_CFG(DA830, UHPI_HD_3, 14, 4, 0xf, 4, false)
- MUX_CFG(DA830, UHPI_HD_4, 14, 8, 0xf, 4, false)
- MUX_CFG(DA830, UHPI_HD_5, 14, 12, 0xf, 4, false)
- MUX_CFG(DA830, UHPI_HD_6, 14, 16, 0xf, 4, false)
- MUX_CFG(DA830, UHPI_HD_7, 14, 20, 0xf, 4, false)
- MUX_CFG(DA830, LCD_D_8, 14, 24, 0xf, 4, false)
- MUX_CFG(DA830, LCD_D_9, 14, 28, 0xf, 4, false)
- MUX_CFG(DA830, GPIO0_2, 14, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_3, 14, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_4, 14, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_5, 14, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_6, 14, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_7, 14, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_8, 14, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_9, 14, 28, 0xf, 8, false)
- MUX_CFG(DA830, EMA_D_10, 15, 0, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_11, 15, 4, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_12, 15, 8, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_13, 15, 12, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_14, 15, 16, 0xf, 1, false)
- MUX_CFG(DA830, EMA_D_15, 15, 20, 0xf, 1, false)
- MUX_CFG(DA830, EMA_A_0, 15, 24, 0xf, 1, false)
- MUX_CFG(DA830, EMA_A_1, 15, 28, 0xf, 1, false)
- MUX_CFG(DA830, UHPI_HD_10, 15, 0, 0xf, 2, false)
- MUX_CFG(DA830, UHPI_HD_11, 15, 4, 0xf, 2, false)
- MUX_CFG(DA830, UHPI_HD_12, 15, 8, 0xf, 2, false)
- MUX_CFG(DA830, UHPI_HD_13, 15, 12, 0xf, 2, false)
- MUX_CFG(DA830, UHPI_HD_14, 15, 16, 0xf, 2, false)
- MUX_CFG(DA830, UHPI_HD_15, 15, 20, 0xf, 2, false)
- MUX_CFG(DA830, LCD_D_7, 15, 24, 0xf, 2, false)
- MUX_CFG(DA830, MMCSD_CLK, 15, 28, 0xf, 2, false)
- MUX_CFG(DA830, LCD_D_10, 15, 0, 0xf, 4, false)
- MUX_CFG(DA830, LCD_D_11, 15, 4, 0xf, 4, false)
- MUX_CFG(DA830, LCD_D_12, 15, 8, 0xf, 4, false)
- MUX_CFG(DA830, LCD_D_13, 15, 12, 0xf, 4, false)
- MUX_CFG(DA830, LCD_D_14, 15, 16, 0xf, 4, false)
- MUX_CFG(DA830, LCD_D_15, 15, 20, 0xf, 4, false)
- MUX_CFG(DA830, UHPI_HCNTL0, 15, 28, 0xf, 4, false)
- MUX_CFG(DA830, GPIO0_10, 15, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_11, 15, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_12, 15, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_13, 15, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_14, 15, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO0_15, 15, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_0, 15, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_1, 15, 28, 0xf, 8, false)
- MUX_CFG(DA830, EMA_A_2, 16, 0, 0xf, 1, false)
- MUX_CFG(DA830, EMA_A_3, 16, 4, 0xf, 1, false)
- MUX_CFG(DA830, EMA_A_4, 16, 8, 0xf, 1, false)
- MUX_CFG(DA830, EMA_A_5, 16, 12, 0xf, 1, false)
- MUX_CFG(DA830, EMA_A_6, 16, 16, 0xf, 1, false)
- MUX_CFG(DA830, EMA_A_7, 16, 20, 0xf, 1, false)
- MUX_CFG(DA830, EMA_A_8, 16, 24, 0xf, 1, false)
- MUX_CFG(DA830, EMA_A_9, 16, 28, 0xf, 1, false)
- MUX_CFG(DA830, MMCSD_CMD, 16, 0, 0xf, 2, false)
- MUX_CFG(DA830, LCD_D_6, 16, 4, 0xf, 2, false)
- MUX_CFG(DA830, LCD_D_3, 16, 8, 0xf, 2, false)
- MUX_CFG(DA830, LCD_D_2, 16, 12, 0xf, 2, false)
- MUX_CFG(DA830, LCD_D_1, 16, 16, 0xf, 2, false)
- MUX_CFG(DA830, LCD_D_0, 16, 20, 0xf, 2, false)
- MUX_CFG(DA830, LCD_PCLK, 16, 24, 0xf, 2, false)
- MUX_CFG(DA830, LCD_HSYNC, 16, 28, 0xf, 2, false)
- MUX_CFG(DA830, UHPI_HCNTL1, 16, 0, 0xf, 4, false)
- MUX_CFG(DA830, GPIO1_2, 16, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_3, 16, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_4, 16, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_5, 16, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_6, 16, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_7, 16, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_8, 16, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_9, 16, 28, 0xf, 8, false)
- MUX_CFG(DA830, EMA_A_10, 17, 0, 0xf, 1, false)
- MUX_CFG(DA830, EMA_A_11, 17, 4, 0xf, 1, false)
- MUX_CFG(DA830, EMA_A_12, 17, 8, 0xf, 1, false)
- MUX_CFG(DA830, EMA_BA_1, 17, 12, 0xf, 1, false)
- MUX_CFG(DA830, EMA_BA_0, 17, 16, 0xf, 1, false)
- MUX_CFG(DA830, EMA_CLK, 17, 20, 0xf, 1, false)
- MUX_CFG(DA830, EMA_SDCKE, 17, 24, 0xf, 1, false)
- MUX_CFG(DA830, NEMA_CAS, 17, 28, 0xf, 1, false)
- MUX_CFG(DA830, LCD_VSYNC, 17, 0, 0xf, 2, false)
- MUX_CFG(DA830, NLCD_AC_ENB_CS, 17, 4, 0xf, 2, false)
- MUX_CFG(DA830, LCD_MCLK, 17, 8, 0xf, 2, false)
- MUX_CFG(DA830, LCD_D_5, 17, 12, 0xf, 2, false)
- MUX_CFG(DA830, LCD_D_4, 17, 16, 0xf, 2, false)
- MUX_CFG(DA830, OBSCLK, 17, 20, 0xf, 2, false)
- MUX_CFG(DA830, NEMA_CS_4, 17, 28, 0xf, 2, false)
- MUX_CFG(DA830, UHPI_HHWIL, 17, 12, 0xf, 4, false)
- MUX_CFG(DA830, AHCLKR2, 17, 20, 0xf, 4, false)
- MUX_CFG(DA830, GPIO1_10, 17, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_11, 17, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_12, 17, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_13, 17, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_14, 17, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO1_15, 17, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_0, 17, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_1, 17, 28, 0xf, 8, false)
- MUX_CFG(DA830, NEMA_RAS, 18, 0, 0xf, 1, false)
- MUX_CFG(DA830, NEMA_WE, 18, 4, 0xf, 1, false)
- MUX_CFG(DA830, NEMA_CS_0, 18, 8, 0xf, 1, false)
- MUX_CFG(DA830, NEMA_CS_2, 18, 12, 0xf, 1, false)
- MUX_CFG(DA830, NEMA_CS_3, 18, 16, 0xf, 1, false)
- MUX_CFG(DA830, NEMA_OE, 18, 20, 0xf, 1, false)
- MUX_CFG(DA830, NEMA_WE_DQM_1, 18, 24, 0xf, 1, false)
- MUX_CFG(DA830, NEMA_WE_DQM_0, 18, 28, 0xf, 1, false)
- MUX_CFG(DA830, NEMA_CS_5, 18, 0, 0xf, 2, false)
- MUX_CFG(DA830, UHPI_HRNW, 18, 4, 0xf, 2, false)
- MUX_CFG(DA830, NUHPI_HAS, 18, 8, 0xf, 2, false)
- MUX_CFG(DA830, NUHPI_HCS, 18, 12, 0xf, 2, false)
- MUX_CFG(DA830, NUHPI_HDS1, 18, 20, 0xf, 2, false)
- MUX_CFG(DA830, NUHPI_HDS2, 18, 24, 0xf, 2, false)
- MUX_CFG(DA830, NUHPI_HINT, 18, 28, 0xf, 2, false)
- MUX_CFG(DA830, AXR0_12, 18, 4, 0xf, 4, false)
- MUX_CFG(DA830, AMUTE2, 18, 16, 0xf, 4, false)
- MUX_CFG(DA830, AXR0_13, 18, 20, 0xf, 4, false)
- MUX_CFG(DA830, AXR0_14, 18, 24, 0xf, 4, false)
- MUX_CFG(DA830, AXR0_15, 18, 28, 0xf, 4, false)
- MUX_CFG(DA830, GPIO2_2, 18, 0, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_3, 18, 4, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_4, 18, 8, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_5, 18, 12, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_6, 18, 16, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_7, 18, 20, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_8, 18, 24, 0xf, 8, false)
- MUX_CFG(DA830, GPIO2_9, 18, 28, 0xf, 8, false)
- MUX_CFG(DA830, EMA_WAIT_0, 19, 0, 0xf, 1, false)
- MUX_CFG(DA830, NUHPI_HRDY, 19, 0, 0xf, 2, false)
- MUX_CFG(DA830, GPIO2_10, 19, 0, 0xf, 8, false)
-#endif
-};
-
-static struct map_desc da830_io_desc[] = {
- {
- .virtual = IO_VIRT,
- .pfn = __phys_to_pfn(IO_PHYS),
- .length = IO_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = DA8XX_CP_INTC_VIRT,
- .pfn = __phys_to_pfn(DA8XX_CP_INTC_BASE),
- .length = DA8XX_CP_INTC_SIZE,
- .type = MT_DEVICE
- },
-};
-
-/* Contents of JTAG ID register used to identify exact cpu type */
-static struct davinci_id da830_ids[] = {
- {
- .variant = 0x0,
- .part_no = 0xb7df,
- .manufacturer = 0x017, /* 0x02f >> 1 */
- .cpu_id = DAVINCI_CPU_ID_DA830,
- .name = "da830/omap-l137 rev1.0",
- },
- {
- .variant = 0x8,
- .part_no = 0xb7df,
- .manufacturer = 0x017,
- .cpu_id = DAVINCI_CPU_ID_DA830,
- .name = "da830/omap-l137 rev1.1",
- },
- {
- .variant = 0x9,
- .part_no = 0xb7df,
- .manufacturer = 0x017,
- .cpu_id = DAVINCI_CPU_ID_DA830,
- .name = "da830/omap-l137 rev2.0",
- },
-};
-
-static const struct davinci_soc_info davinci_soc_info_da830 = {
- .io_desc = da830_io_desc,
- .io_desc_num = ARRAY_SIZE(da830_io_desc),
- .jtag_id_reg = DA8XX_SYSCFG0_BASE + DA8XX_JTAG_ID_REG,
- .ids = da830_ids,
- .ids_num = ARRAY_SIZE(da830_ids),
- .pinmux_base = DA8XX_SYSCFG0_BASE + 0x120,
- .pinmux_pins = da830_pins,
- .pinmux_pins_num = ARRAY_SIZE(da830_pins),
-};
-
-void __init da830_init(void)
-{
- davinci_common_init(&davinci_soc_info_da830);
-
- da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K);
- WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module");
-}
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 287dd987908e..706f8241b5e7 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -4,7 +4,6 @@
*
* Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/
*
- * Derived from: arch/arm/mach-davinci/da830.c
* Original Copyrights follow:
*
* 2009 (c) MontaVista Software, Inc.
diff --git a/arch/arm/mach-davinci/da8xx.h b/arch/arm/mach-davinci/da8xx.h
index 54a255b8d8d8..70d14f7f3520 100644
--- a/arch/arm/mach-davinci/da8xx.h
+++ b/arch/arm/mach-davinci/da8xx.h
@@ -68,8 +68,6 @@ extern void __iomem *da8xx_syscfg1_base;
#define DA8XX_SHARED_RAM_BASE 0x80000000
#define DA8XX_ARM_RAM_BASE 0xffff0000
-void da830_init(void);
-
void da850_init(void);
int da850_register_vpif_display
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 5e73a725d5da..4e9ac55ae92d 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -33,7 +33,6 @@
#define DA8XX_PRUSS_MEM_BASE 0x01c30000
#define DA8XX_MMCSD0_BASE 0x01c40000
#define DA8XX_SPI0_BASE 0x01c41000
-#define DA830_SPI1_BASE 0x01e12000
#define DA8XX_LCD_CNTRL_BASE 0x01e13000
#define DA850_SATA_BASE 0x01e18000
#define DA850_MMCSD1_BASE 0x01e1b000
diff --git a/arch/arm/mach-davinci/irqs.h b/arch/arm/mach-davinci/irqs.h
index b1ceed81e9fa..23e8da5025ab 100644
--- a/arch/arm/mach-davinci/irqs.h
+++ b/arch/arm/mach-davinci/irqs.h
@@ -101,33 +101,6 @@
#define IRQ_DA8XX_ECAP2 71
#define IRQ_DA8XX_ARMCLKSTOPREQ 90
-/* DA830 specific interrupts */
-#define IRQ_DA830_MPUERR 27
-#define IRQ_DA830_IOPUERR 27
-#define IRQ_DA830_BOOTCFGERR 27
-#define IRQ_DA830_EHRPWM2 67
-#define IRQ_DA830_EHRPWM2TZ 68
-#define IRQ_DA830_EQEP0 72
-#define IRQ_DA830_EQEP1 73
-#define IRQ_DA830_T12CMPINT0_0 74
-#define IRQ_DA830_T12CMPINT1_0 75
-#define IRQ_DA830_T12CMPINT2_0 76
-#define IRQ_DA830_T12CMPINT3_0 77
-#define IRQ_DA830_T12CMPINT4_0 78
-#define IRQ_DA830_T12CMPINT5_0 79
-#define IRQ_DA830_T12CMPINT6_0 80
-#define IRQ_DA830_T12CMPINT7_0 81
-#define IRQ_DA830_T12CMPINT0_1 82
-#define IRQ_DA830_T12CMPINT1_1 83
-#define IRQ_DA830_T12CMPINT2_1 84
-#define IRQ_DA830_T12CMPINT3_1 85
-#define IRQ_DA830_T12CMPINT4_1 86
-#define IRQ_DA830_T12CMPINT5_1 87
-#define IRQ_DA830_T12CMPINT6_1 88
-#define IRQ_DA830_T12CMPINT7_1 89
-
-#define DA830_N_CP_INTC_IRQ 96
-
/* DA850 speicific interrupts */
#define IRQ_DA850_MPUADDRERR0 27
#define IRQ_DA850_MPUPROTERR0 27
diff --git a/arch/arm/mach-davinci/mux.h b/arch/arm/mach-davinci/mux.h
index 05fd3902df65..6325ea5a2730 100644
--- a/arch/arm/mach-davinci/mux.h
+++ b/arch/arm/mach-davinci/mux.h
@@ -21,410 +21,6 @@ struct mux_config {
bool debug;
};
-enum da830_index {
- DA830_GPIO7_14,
- DA830_RTCK,
- DA830_GPIO7_15,
- DA830_EMU_0,
- DA830_EMB_SDCKE,
- DA830_EMB_CLK_GLUE,
- DA830_EMB_CLK,
- DA830_NEMB_CS_0,
- DA830_NEMB_CAS,
- DA830_NEMB_RAS,
- DA830_NEMB_WE,
- DA830_EMB_BA_1,
- DA830_EMB_BA_0,
- DA830_EMB_A_0,
- DA830_EMB_A_1,
- DA830_EMB_A_2,
- DA830_EMB_A_3,
- DA830_EMB_A_4,
- DA830_EMB_A_5,
- DA830_GPIO7_0,
- DA830_GPIO7_1,
- DA830_GPIO7_2,
- DA830_GPIO7_3,
- DA830_GPIO7_4,
- DA830_GPIO7_5,
- DA830_GPIO7_6,
- DA830_GPIO7_7,
- DA830_EMB_A_6,
- DA830_EMB_A_7,
- DA830_EMB_A_8,
- DA830_EMB_A_9,
- DA830_EMB_A_10,
- DA830_EMB_A_11,
- DA830_EMB_A_12,
- DA830_EMB_D_31,
- DA830_GPIO7_8,
- DA830_GPIO7_9,
- DA830_GPIO7_10,
- DA830_GPIO7_11,
- DA830_GPIO7_12,
- DA830_GPIO7_13,
- DA830_GPIO3_13,
- DA830_EMB_D_30,
- DA830_EMB_D_29,
- DA830_EMB_D_28,
- DA830_EMB_D_27,
- DA830_EMB_D_26,
- DA830_EMB_D_25,
- DA830_EMB_D_24,
- DA830_EMB_D_23,
- DA830_EMB_D_22,
- DA830_EMB_D_21,
- DA830_EMB_D_20,
- DA830_EMB_D_19,
- DA830_EMB_D_18,
- DA830_EMB_D_17,
- DA830_EMB_D_16,
- DA830_NEMB_WE_DQM_3,
- DA830_NEMB_WE_DQM_2,
- DA830_EMB_D_0,
- DA830_EMB_D_1,
- DA830_EMB_D_2,
- DA830_EMB_D_3,
- DA830_EMB_D_4,
- DA830_EMB_D_5,
- DA830_EMB_D_6,
- DA830_GPIO6_0,
- DA830_GPIO6_1,
- DA830_GPIO6_2,
- DA830_GPIO6_3,
- DA830_GPIO6_4,
- DA830_GPIO6_5,
- DA830_GPIO6_6,
- DA830_EMB_D_7,
- DA830_EMB_D_8,
- DA830_EMB_D_9,
- DA830_EMB_D_10,
- DA830_EMB_D_11,
- DA830_EMB_D_12,
- DA830_EMB_D_13,
- DA830_EMB_D_14,
- DA830_GPIO6_7,
- DA830_GPIO6_8,
- DA830_GPIO6_9,
- DA830_GPIO6_10,
- DA830_GPIO6_11,
- DA830_GPIO6_12,
- DA830_GPIO6_13,
- DA830_GPIO6_14,
- DA830_EMB_D_15,
- DA830_NEMB_WE_DQM_1,
- DA830_NEMB_WE_DQM_0,
- DA830_SPI0_SOMI_0,
- DA830_SPI0_SIMO_0,
- DA830_SPI0_CLK,
- DA830_NSPI0_ENA,
- DA830_NSPI0_SCS_0,
- DA830_EQEP0I,
- DA830_EQEP0S,
- DA830_EQEP1I,
- DA830_NUART0_CTS,
- DA830_NUART0_RTS,
- DA830_EQEP0A,
- DA830_EQEP0B,
- DA830_GPIO6_15,
- DA830_GPIO5_14,
- DA830_GPIO5_15,
- DA830_GPIO5_0,
- DA830_GPIO5_1,
- DA830_GPIO5_2,
- DA830_GPIO5_3,
- DA830_GPIO5_4,
- DA830_SPI1_SOMI_0,
- DA830_SPI1_SIMO_0,
- DA830_SPI1_CLK,
- DA830_UART0_RXD,
- DA830_UART0_TXD,
- DA830_AXR1_10,
- DA830_AXR1_11,
- DA830_NSPI1_ENA,
- DA830_I2C1_SCL,
- DA830_I2C1_SDA,
- DA830_EQEP1S,
- DA830_I2C0_SDA,
- DA830_I2C0_SCL,
- DA830_UART2_RXD,
- DA830_TM64P0_IN12,
- DA830_TM64P0_OUT12,
- DA830_GPIO5_5,
- DA830_GPIO5_6,
- DA830_GPIO5_7,
- DA830_GPIO5_8,
- DA830_GPIO5_9,
- DA830_GPIO5_10,
- DA830_GPIO5_11,
- DA830_GPIO5_12,
- DA830_NSPI1_SCS_0,
- DA830_USB0_DRVVBUS,
- DA830_AHCLKX0,
- DA830_ACLKX0,
- DA830_AFSX0,
- DA830_AHCLKR0,
- DA830_ACLKR0,
- DA830_AFSR0,
- DA830_UART2_TXD,
- DA830_AHCLKX2,
- DA830_ECAP0_APWM0,
- DA830_RMII_MHZ_50_CLK,
- DA830_ECAP1_APWM1,
- DA830_USB_REFCLKIN,
- DA830_GPIO5_13,
- DA830_GPIO4_15,
- DA830_GPIO2_11,
- DA830_GPIO2_12,
- DA830_GPIO2_13,
- DA830_GPIO2_14,
- DA830_GPIO2_15,
- DA830_GPIO3_12,
- DA830_AMUTE0,
- DA830_AXR0_0,
- DA830_AXR0_1,
- DA830_AXR0_2,
- DA830_AXR0_3,
- DA830_AXR0_4,
- DA830_AXR0_5,
- DA830_AXR0_6,
- DA830_RMII_TXD_0,
- DA830_RMII_TXD_1,
- DA830_RMII_TXEN,
- DA830_RMII_CRS_DV,
- DA830_RMII_RXD_0,
- DA830_RMII_RXD_1,
- DA830_RMII_RXER,
- DA830_AFSR2,
- DA830_ACLKX2,
- DA830_AXR2_3,
- DA830_AXR2_2,
- DA830_AXR2_1,
- DA830_AFSX2,
- DA830_ACLKR2,
- DA830_NRESETOUT,
- DA830_GPIO3_0,
- DA830_GPIO3_1,
- DA830_GPIO3_2,
- DA830_GPIO3_3,
- DA830_GPIO3_4,
- DA830_GPIO3_5,
- DA830_GPIO3_6,
- DA830_AXR0_7,
- DA830_AXR0_8,
- DA830_UART1_RXD,
- DA830_UART1_TXD,
- DA830_AXR0_11,
- DA830_AHCLKX1,
- DA830_ACLKX1,
- DA830_AFSX1,
- DA830_MDIO_CLK,
- DA830_MDIO_D,
- DA830_AXR0_9,
- DA830_AXR0_10,
- DA830_EPWM0B,
- DA830_EPWM0A,
- DA830_EPWMSYNCI,
- DA830_AXR2_0,
- DA830_EPWMSYNC0,
- DA830_GPIO3_7,
- DA830_GPIO3_8,
- DA830_GPIO3_9,
- DA830_GPIO3_10,
- DA830_GPIO3_11,
- DA830_GPIO3_14,
- DA830_GPIO3_15,
- DA830_GPIO4_10,
- DA830_AHCLKR1,
- DA830_ACLKR1,
- DA830_AFSR1,
- DA830_AMUTE1,
- DA830_AXR1_0,
- DA830_AXR1_1,
- DA830_AXR1_2,
- DA830_AXR1_3,
- DA830_ECAP2_APWM2,
- DA830_EHRPWMGLUETZ,
- DA830_EQEP1A,
- DA830_GPIO4_11,
- DA830_GPIO4_12,
- DA830_GPIO4_13,
- DA830_GPIO4_14,
- DA830_GPIO4_0,
- DA830_GPIO4_1,
- DA830_GPIO4_2,
- DA830_GPIO4_3,
- DA830_AXR1_4,
- DA830_AXR1_5,
- DA830_AXR1_6,
- DA830_AXR1_7,
- DA830_AXR1_8,
- DA830_AXR1_9,
- DA830_EMA_D_0,
- DA830_EMA_D_1,
- DA830_EQEP1B,
- DA830_EPWM2B,
- DA830_EPWM2A,
- DA830_EPWM1B,
- DA830_EPWM1A,
- DA830_MMCSD_DAT_0,
- DA830_MMCSD_DAT_1,
- DA830_UHPI_HD_0,
- DA830_UHPI_HD_1,
- DA830_GPIO4_4,
- DA830_GPIO4_5,
- DA830_GPIO4_6,
- DA830_GPIO4_7,
- DA830_GPIO4_8,
- DA830_GPIO4_9,
- DA830_GPIO0_0,
- DA830_GPIO0_1,
- DA830_EMA_D_2,
- DA830_EMA_D_3,
- DA830_EMA_D_4,
- DA830_EMA_D_5,
- DA830_EMA_D_6,
- DA830_EMA_D_7,
- DA830_EMA_D_8,
- DA830_EMA_D_9,
- DA830_MMCSD_DAT_2,
- DA830_MMCSD_DAT_3,
- DA830_MMCSD_DAT_4,
- DA830_MMCSD_DAT_5,
- DA830_MMCSD_DAT_6,
- DA830_MMCSD_DAT_7,
- DA830_UHPI_HD_8,
- DA830_UHPI_HD_9,
- DA830_UHPI_HD_2,
- DA830_UHPI_HD_3,
- DA830_UHPI_HD_4,
- DA830_UHPI_HD_5,
- DA830_UHPI_HD_6,
- DA830_UHPI_HD_7,
- DA830_LCD_D_8,
- DA830_LCD_D_9,
- DA830_GPIO0_2,
- DA830_GPIO0_3,
- DA830_GPIO0_4,
- DA830_GPIO0_5,
- DA830_GPIO0_6,
- DA830_GPIO0_7,
- DA830_GPIO0_8,
- DA830_GPIO0_9,
- DA830_EMA_D_10,
- DA830_EMA_D_11,
- DA830_EMA_D_12,
- DA830_EMA_D_13,
- DA830_EMA_D_14,
- DA830_EMA_D_15,
- DA830_EMA_A_0,
- DA830_EMA_A_1,
- DA830_UHPI_HD_10,
- DA830_UHPI_HD_11,
- DA830_UHPI_HD_12,
- DA830_UHPI_HD_13,
- DA830_UHPI_HD_14,
- DA830_UHPI_HD_15,
- DA830_LCD_D_7,
- DA830_MMCSD_CLK,
- DA830_LCD_D_10,
- DA830_LCD_D_11,
- DA830_LCD_D_12,
- DA830_LCD_D_13,
- DA830_LCD_D_14,
- DA830_LCD_D_15,
- DA830_UHPI_HCNTL0,
- DA830_GPIO0_10,
- DA830_GPIO0_11,
- DA830_GPIO0_12,
- DA830_GPIO0_13,
- DA830_GPIO0_14,
- DA830_GPIO0_15,
- DA830_GPIO1_0,
- DA830_GPIO1_1,
- DA830_EMA_A_2,
- DA830_EMA_A_3,
- DA830_EMA_A_4,
- DA830_EMA_A_5,
- DA830_EMA_A_6,
- DA830_EMA_A_7,
- DA830_EMA_A_8,
- DA830_EMA_A_9,
- DA830_MMCSD_CMD,
- DA830_LCD_D_6,
- DA830_LCD_D_3,
- DA830_LCD_D_2,
- DA830_LCD_D_1,
- DA830_LCD_D_0,
- DA830_LCD_PCLK,
- DA830_LCD_HSYNC,
- DA830_UHPI_HCNTL1,
- DA830_GPIO1_2,
- DA830_GPIO1_3,
- DA830_GPIO1_4,
- DA830_GPIO1_5,
- DA830_GPIO1_6,
- DA830_GPIO1_7,
- DA830_GPIO1_8,
- DA830_GPIO1_9,
- DA830_EMA_A_10,
- DA830_EMA_A_11,
- DA830_EMA_A_12,
- DA830_EMA_BA_1,
- DA830_EMA_BA_0,
- DA830_EMA_CLK,
- DA830_EMA_SDCKE,
- DA830_NEMA_CAS,
- DA830_LCD_VSYNC,
- DA830_NLCD_AC_ENB_CS,
- DA830_LCD_MCLK,
- DA830_LCD_D_5,
- DA830_LCD_D_4,
- DA830_OBSCLK,
- DA830_NEMA_CS_4,
- DA830_UHPI_HHWIL,
- DA830_AHCLKR2,
- DA830_GPIO1_10,
- DA830_GPIO1_11,
- DA830_GPIO1_12,
- DA830_GPIO1_13,
- DA830_GPIO1_14,
- DA830_GPIO1_15,
- DA830_GPIO2_0,
- DA830_GPIO2_1,
- DA830_NEMA_RAS,
- DA830_NEMA_WE,
- DA830_NEMA_CS_0,
- DA830_NEMA_CS_2,
- DA830_NEMA_CS_3,
- DA830_NEMA_OE,
- DA830_NEMA_WE_DQM_1,
- DA830_NEMA_WE_DQM_0,
- DA830_NEMA_CS_5,
- DA830_UHPI_HRNW,
- DA830_NUHPI_HAS,
- DA830_NUHPI_HCS,
- DA830_NUHPI_HDS1,
- DA830_NUHPI_HDS2,
- DA830_NUHPI_HINT,
- DA830_AXR0_12,
- DA830_AMUTE2,
- DA830_AXR0_13,
- DA830_AXR0_14,
- DA830_AXR0_15,
- DA830_GPIO2_2,
- DA830_GPIO2_3,
- DA830_GPIO2_4,
- DA830_GPIO2_5,
- DA830_GPIO2_6,
- DA830_GPIO2_7,
- DA830_GPIO2_8,
- DA830_GPIO2_9,
- DA830_EMA_WAIT_0,
- DA830_NUHPI_HRDY,
- DA830_GPIO2_10,
-};
-
enum davinci_da850_index {
/* UART0 function */
DA850_NUART0_CTS,
diff --git a/arch/arm/mach-davinci/psc.h b/arch/arm/mach-davinci/psc.h
index acfef063295f..6c365a2e87fe 100644
--- a/arch/arm/mach-davinci/psc.h
+++ b/arch/arm/mach-davinci/psc.h
@@ -97,9 +97,7 @@
#define DA8XX_LPSC1_CPGMAC 5
#define DA8XX_LPSC1_EMIF3C 6
#define DA8XX_LPSC1_McASP0 7
-#define DA830_LPSC1_McASP1 8
#define DA850_LPSC1_SATA 8
-#define DA830_LPSC1_McASP2 9
#define DA850_LPSC1_VPIF 9
#define DA8XX_LPSC1_SPI1 10
#define DA8XX_LPSC1_I2C 11
@@ -111,7 +109,6 @@
#define DA8XX_LPSC1_PWM 17
#define DA850_LPSC1_MMC_SD1 18
#define DA8XX_LPSC1_ECAP 20
-#define DA830_LPSC1_EQEP 21
#define DA850_LPSC1_TPTC2 21
#define DA8XX_LPSC1_SCR_P0_SS 24
#define DA8XX_LPSC1_SCR_P1_SS 25
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index cac4e82f6c82..150a1e56dcae 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -209,9 +209,8 @@ static int __init exynos_pmu_irq_init(struct device_node *node,
return -ENOMEM;
}
- domain = irq_domain_add_hierarchy(parent_domain, 0, 0,
- node, &exynos_pmu_domain_ops,
- NULL);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, 0, of_fwnode_handle(node),
+ &exynos_pmu_domain_ops, NULL);
if (!domain) {
iounmap(pmu_base_addr);
pmu_base_addr = NULL;
diff --git a/arch/arm/mach-imx/avic.c b/arch/arm/mach-imx/avic.c
index cf6546ddc7a3..3067c06b4b8e 100644
--- a/arch/arm/mach-imx/avic.c
+++ b/arch/arm/mach-imx/avic.c
@@ -201,8 +201,8 @@ static void __init mxc_init_irq(void __iomem *irqbase)
WARN_ON(irq_base < 0);
np = of_find_compatible_node(NULL, NULL, "fsl,avic");
- domain = irq_domain_add_legacy(np, AVIC_NUM_IRQS, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ domain = irq_domain_create_legacy(of_fwnode_handle(np), AVIC_NUM_IRQS, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
WARN_ON(!domain);
for (i = 0; i < AVIC_NUM_IRQS / 32; i++, irq_base += 32)
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 5909088d5482..2e633569d2f8 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -245,9 +245,8 @@ static int __init imx_gpc_init(struct device_node *node,
if (WARN_ON(!gpc_base))
return -ENOMEM;
- domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
- node, &imx_gpc_domain_ops,
- NULL);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, GPC_MAX_IRQS, of_fwnode_handle(node),
+ &imx_gpc_domain_ops, NULL);
if (!domain) {
iounmap(gpc_base);
return -ENOMEM;
diff --git a/arch/arm/mach-imx/tzic.c b/arch/arm/mach-imx/tzic.c
index 8b3d98d288d9..50a5668e65d2 100644
--- a/arch/arm/mach-imx/tzic.c
+++ b/arch/arm/mach-imx/tzic.c
@@ -175,8 +175,8 @@ static int __init tzic_init_dt(struct device_node *np, struct device_node *p)
irq_base = irq_alloc_descs(-1, 0, TZIC_NUM_IRQS, numa_node_id());
WARN_ON(irq_base < 0);
- domain = irq_domain_add_legacy(np, TZIC_NUM_IRQS, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ domain = irq_domain_create_legacy(of_fwnode_handle(np), TZIC_NUM_IRQS, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
WARN_ON(!domain);
for (i = 0; i < 4; i++, irq_base += 32)
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index 9b587ecebb1c..bb1bc060ecd8 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -220,8 +220,7 @@ void __init omap1_init_irq(void)
omap_l2_irq = irq_base;
omap_l2_irq -= NR_IRQS_LEGACY;
- domain = irq_domain_add_legacy(NULL, nr_irqs, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ domain = irq_domain_create_legacy(NULL, nr_irqs, irq_base, 0, &irq_domain_simple_ops, NULL);
pr_info("Total of %lu interrupts in %i interrupt banks\n",
nr_irqs, irq_bank_count);
diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
index c36fb2721261..86a2f9e5d0ef 100644
--- a/arch/arm/mach-omap2/clockdomain.h
+++ b/arch/arm/mach-omap2/clockdomain.h
@@ -48,6 +48,7 @@
#define CLKDM_NO_AUTODEPS (1 << 4)
#define CLKDM_ACTIVE_WITH_MPU (1 << 5)
#define CLKDM_MISSING_IDLE_REPORTING (1 << 6)
+#define CLKDM_STANDBY_FORCE_WAKEUP BIT(7)
#define CLKDM_CAN_HWSUP (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO)
#define CLKDM_CAN_SWSUP (CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP)
diff --git a/arch/arm/mach-omap2/clockdomains33xx_data.c b/arch/arm/mach-omap2/clockdomains33xx_data.c
index 87f4e927eb18..c05a3c07d448 100644
--- a/arch/arm/mach-omap2/clockdomains33xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains33xx_data.c
@@ -19,7 +19,7 @@ static struct clockdomain l4ls_am33xx_clkdm = {
.pwrdm = { .name = "per_pwrdm" },
.cm_inst = AM33XX_CM_PER_MOD,
.clkdm_offs = AM33XX_CM_PER_L4LS_CLKSTCTRL_OFFSET,
- .flags = CLKDM_CAN_SWSUP,
+ .flags = CLKDM_CAN_SWSUP | CLKDM_STANDBY_FORCE_WAKEUP,
};
static struct clockdomain l3s_am33xx_clkdm = {
diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c
index acdf72a541c0..a4dd42abda89 100644
--- a/arch/arm/mach-omap2/cm33xx.c
+++ b/arch/arm/mach-omap2/cm33xx.c
@@ -20,6 +20,9 @@
#include "cm-regbits-34xx.h"
#include "cm-regbits-33xx.h"
#include "prm33xx.h"
+#if IS_ENABLED(CONFIG_SUSPEND)
+#include <linux/suspend.h>
+#endif
/*
* CLKCTRL_IDLEST_*: possible values for the CM_*_CLKCTRL.IDLEST bitfield:
@@ -328,8 +331,17 @@ static int am33xx_clkdm_clk_disable(struct clockdomain *clkdm)
{
bool hwsup = false;
+#if IS_ENABLED(CONFIG_SUSPEND)
+ /*
+ * In case of standby, Don't put the l4ls clk domain to sleep.
+ * Since CM3 PM FW doesn't wake-up/enable the l4ls clk domain
+ * upon wake-up, CM3 PM FW fails to wake-up th MPU.
+ */
+ if (pm_suspend_target_state == PM_SUSPEND_STANDBY &&
+ (clkdm->flags & CLKDM_STANDBY_FORCE_WAKEUP))
+ return 0;
+#endif
hwsup = am33xx_cm_is_clkdm_in_hwsup(clkdm->cm_inst, clkdm->clkdm_offs);
-
if (!hwsup && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP))
am33xx_clkdm_sleep(clkdm);
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 6f0d6120c174..a66b1dc61571 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -585,9 +585,8 @@ static int __init wakeupgen_init(struct device_node *node,
wakeupgen_ops = &am43xx_wakeupgen_ops;
}
- domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs,
- node, &wakeupgen_domain_ops,
- NULL);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, max_irqs, of_fwnode_handle(node),
+ &wakeupgen_domain_ops, NULL);
if (!domain) {
iounmap(wakeupgen_base);
return -ENOMEM;
diff --git a/arch/arm/mach-omap2/pmic-cpcap.c b/arch/arm/mach-omap2/pmic-cpcap.c
index 4f31e61c0c90..9f9a20274db8 100644
--- a/arch/arm/mach-omap2/pmic-cpcap.c
+++ b/arch/arm/mach-omap2/pmic-cpcap.c
@@ -264,7 +264,11 @@ int __init omap4_cpcap_init(void)
static int __init cpcap_late_init(void)
{
- omap4_vc_set_pmic_signaling(PWRDM_POWER_RET);
+ if (!of_find_compatible_node(NULL, NULL, "motorola,cpcap"))
+ return 0;
+
+ if (soc_is_omap443x() || soc_is_omap446x() || soc_is_omap447x())
+ omap4_vc_set_pmic_signaling(PWRDM_POWER_RET);
return 0;
}
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index d9cadd97748a..5bfce8aa4102 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -147,9 +147,8 @@ pxa_init_irq_common(struct device_node *node, int irq_nr,
int n;
pxa_internal_irq_nr = irq_nr;
- pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
- PXA_IRQ(0), 0,
- &pxa_irq_ops, NULL);
+ pxa_irq_domain = irq_domain_create_legacy(of_fwnode_handle(node), irq_nr, PXA_IRQ(0), 0,
+ &pxa_irq_ops, NULL);
if (!pxa_irq_domain)
panic("Unable to add PXA IRQ domain\n");
irq_set_default_domain(pxa_irq_domain);
diff --git a/arch/arm/mach-s3c/gpio-samsung.c b/arch/arm/mach-s3c/gpio-samsung.c
index 87daaa09e2c3..206a492fbaf5 100644
--- a/arch/arm/mach-s3c/gpio-samsung.c
+++ b/arch/arm/mach-s3c/gpio-samsung.c
@@ -11,9 +11,9 @@
// Samsung - GPIOlib support
#include <linux/kernel.h>
+#include <linux/gpio/driver.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/module.h>
@@ -430,8 +430,8 @@ static int samsung_gpiolib_4bit2_output(struct gpio_chip *chip,
return 0;
}
-static void samsung_gpiolib_set(struct gpio_chip *chip,
- unsigned offset, int value)
+static int samsung_gpiolib_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
void __iomem *base = ourchip->base;
@@ -447,6 +447,8 @@ static void samsung_gpiolib_set(struct gpio_chip *chip,
__raw_writel(dat, base + 0x04);
samsung_gpio_unlock(ourchip, flags);
+
+ return 0;
}
static int samsung_gpiolib_get(struct gpio_chip *chip, unsigned offset)
@@ -515,7 +517,7 @@ static void __init samsung_gpiolib_add(struct samsung_gpio_chip *chip)
if (!gc->direction_output)
gc->direction_output = samsung_gpiolib_2bit_output;
if (!gc->set)
- gc->set = samsung_gpiolib_set;
+ gc->set_rv = samsung_gpiolib_set;
if (!gc->get)
gc->get = samsung_gpiolib_get;
diff --git a/arch/arm/mach-stm32/board-dt.c b/arch/arm/mach-stm32/board-dt.c
index 5dcc4ddd1a56..e6233c8725ae 100644
--- a/arch/arm/mach-stm32/board-dt.c
+++ b/arch/arm/mach-stm32/board-dt.c
@@ -17,6 +17,7 @@ static const char *const stm32_compat[] __initconst = {
"st,stm32f746",
"st,stm32f769",
"st,stm32h743",
+ "st,stm32h747",
"st,stm32h750",
"st,stm32mp131",
"st,stm32mp133",
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 0749cf8a6637..5219158d54cf 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -227,9 +227,9 @@ void __flush_dcache_folio(struct address_space *mapping, struct folio *folio)
}
/*
- * If this is a page cache page, and we have an aliasing VIPT cache,
+ * If this is a page cache folio, and we have an aliasing VIPT cache,
* we only need to do one flush - which would be at the relevant
- * userspace colour, which is congruent with page->index.
+ * userspace colour, which is congruent with folio->index.
*/
if (mapping && cache_is_vipt_aliasing())
flush_pfn_alias(folio_pfn(folio), folio_pos(folio));
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index f02f872ea8a9..edb7f56b7c91 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -735,7 +735,7 @@ static void *__init late_alloc(unsigned long sz)
void *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_HIGHMEM,
get_order(sz));
- if (!ptdesc || !pagetable_pte_ctor(ptdesc))
+ if (!ptdesc || !pagetable_pte_ctor(NULL, ptdesc))
BUG();
return ptdesc_to_virt(ptdesc);
}
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index 595e9cb33c1d..ca1bd764cfa5 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -211,7 +211,7 @@ orion_gpio_direction_output(struct gpio_chip *chip, unsigned pin, int value)
return 0;
}
-static void orion_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
+static int orion_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct orion_gpio_chip *ochip = gpiochip_get_data(chip);
unsigned long flags;
@@ -219,6 +219,8 @@ static void orion_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
spin_lock_irqsave(&ochip->lock, flags);
__set_level(ochip, pin, value);
spin_unlock_irqrestore(&ochip->lock, flags);
+
+ return 0;
}
static int orion_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
@@ -496,11 +498,10 @@ static void orion_gpio_unmask_irq(struct irq_data *d)
u32 reg_val;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
reg_val = irq_reg_readl(gc, ct->regs.mask);
reg_val |= mask;
irq_reg_writel(gc, reg_val, ct->regs.mask);
- irq_gc_unlock(gc);
}
static void orion_gpio_mask_irq(struct irq_data *d)
@@ -510,11 +511,10 @@ static void orion_gpio_mask_irq(struct irq_data *d)
u32 mask = d->mask;
u32 reg_val;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
reg_val = irq_reg_readl(gc, ct->regs.mask);
reg_val &= ~mask;
irq_reg_writel(gc, reg_val, ct->regs.mask);
- irq_gc_unlock(gc);
}
void __init orion_gpio_init(int gpio_base, int ngpio,
@@ -540,7 +540,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
ochip->chip.direction_input = orion_gpio_direction_input;
ochip->chip.get = orion_gpio_get;
ochip->chip.direction_output = orion_gpio_direction_output;
- ochip->chip.set = orion_gpio_set;
+ ochip->chip.set_rv = orion_gpio_set;
ochip->chip.to_irq = orion_gpio_to_irq;
ochip->chip.base = gpio_base;
ochip->chip.ngpio = ngpio;
@@ -602,12 +602,12 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
/* Setup irq domain on top of the generic chip. */
- ochip->domain = irq_domain_add_legacy(NULL,
- ochip->chip.ngpio,
- ochip->secondary_irq_base,
- ochip->secondary_irq_base,
- &irq_domain_simple_ops,
- ochip);
+ ochip->domain = irq_domain_create_legacy(NULL,
+ ochip->chip.ngpio,
+ ochip->secondary_irq_base,
+ ochip->secondary_irq_base,
+ &irq_domain_simple_ops,
+ ochip);
if (!ochip->domain)
panic("%s: couldn't allocate irq domain (DT).\n",
ochip->chip.label);
diff --git a/arch/arm/probes/uprobes/core.c b/arch/arm/probes/uprobes/core.c
index f5f790c6e5f8..885e0c5e8c20 100644
--- a/arch/arm/probes/uprobes/core.c
+++ b/arch/arm/probes/uprobes/core.c
@@ -26,10 +26,10 @@ bool is_swbp_insn(uprobe_opcode_t *insn)
(UPROBE_SWBP_ARM_INSN & 0x0fffffff);
}
-int set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
+int set_swbp(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
unsigned long vaddr)
{
- return uprobe_write_opcode(auprobe, mm, vaddr,
+ return uprobe_write_opcode(auprobe, vma, vaddr,
__opcode_to_mem_arm(auprobe->bpinsn));
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a182295e6f08..55fc331af337 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -42,6 +42,7 @@ config ARM64
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT
+ select ARCH_HAS_PREEMPT_LAZY
select ARCH_HAS_PTDUMP
select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
@@ -134,7 +135,6 @@ config ARM64
select COMMON_CLK
select CPU_PM if (SUSPEND || CPU_IDLE)
select CPUMASK_OFFSTACK if NR_CPUS > 256
- select CRC32
select DCACHE_WORD_ACCESS
select DYNAMIC_FTRACE if FUNCTION_TRACER
select DMA_BOUNCE_UNALIGNED_KMALLOC
@@ -333,9 +333,9 @@ config ARCH_MMAP_RND_BITS_MAX
default 24 if ARM64_VA_BITS=39
default 27 if ARM64_VA_BITS=42
default 30 if ARM64_VA_BITS=47
- default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES
- default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES
- default 33 if ARM64_VA_BITS=48
+ default 29 if (ARM64_VA_BITS=48 || ARM64_VA_BITS=52) && ARM64_64K_PAGES
+ default 31 if (ARM64_VA_BITS=48 || ARM64_VA_BITS=52) && ARM64_16K_PAGES
+ default 33 if (ARM64_VA_BITS=48 || ARM64_VA_BITS=52)
default 14 if ARM64_64K_PAGES
default 16 if ARM64_16K_PAGES
default 18
@@ -464,6 +464,23 @@ config AMPERE_ERRATUM_AC03_CPU_38
If unsure, say Y.
+config AMPERE_ERRATUM_AC04_CPU_23
+ bool "AmpereOne: AC04_CPU_23: Failure to synchronize writes to HCR_EL2 may corrupt address translations."
+ default y
+ help
+ This option adds an alternative code sequence to work around Ampere
+ errata AC04_CPU_23 on AmpereOne.
+
+ Updates to HCR_EL2 can rarely corrupt simultaneous translations for
+ data addresses initiated by load/store instructions. Only
+ instruction initiated translations are vulnerable, not translations
+ from prefetches for example. A DSB before the store to HCR_EL2 is
+ sufficient to prevent older instructions from hitting the window
+ for corruption, and an ISB after is sufficient to prevent younger
+ instructions from hitting the window for corruption.
+
+ If unsure, say Y.
+
config ARM64_WORKAROUND_CLEAN_CACHE
bool
@@ -642,9 +659,6 @@ config ARM64_ERRATUM_843419
If unsure, say Y.
-config ARM64_LD_HAS_FIX_ERRATUM_843419
- def_bool $(ld-option,--fix-cortex-a53-843419)
-
config ARM64_ERRATUM_1024718
bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
default y
@@ -1602,6 +1616,9 @@ config ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG
config ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG
def_bool y
+config ARCH_SUPPORTS_KEXEC_HANDOVER
+ def_bool y
+
config ARCH_SUPPORTS_CRASH_DUMP
def_bool y
@@ -1890,13 +1907,9 @@ config ARM64_PAN
The feature is detected at runtime, and will remain as a 'nop'
instruction if the cpu does not implement the feature.
-config AS_HAS_LSE_ATOMICS
- def_bool $(as-instr,.arch_extension lse)
-
config ARM64_LSE_ATOMICS
bool
default ARM64_USE_LSE_ATOMICS
- depends on AS_HAS_LSE_ATOMICS
config ARM64_USE_LSE_ATOMICS
bool "Atomic instructions"
@@ -1908,20 +1921,12 @@ config ARM64_USE_LSE_ATOMICS
Say Y here to make use of these instructions for the in-kernel
atomic routines. This incurs a small overhead on CPUs that do
- not support these instructions and requires the kernel to be
- built with binutils >= 2.25 in order for the new instructions
- to be used.
+ not support these instructions.
endmenu # "ARMv8.1 architectural features"
menu "ARMv8.2 architectural features"
-config AS_HAS_ARMV8_2
- def_bool $(cc-option,-Wa$(comma)-march=armv8.2-a)
-
-config AS_HAS_SHA3
- def_bool $(as-instr,.arch armv8.2-a+sha3)
-
config ARM64_PMEM
bool "Enable support for persistent memory"
select ARCH_HAS_PMEM_API
@@ -1995,7 +2000,6 @@ config ARM64_PTR_AUTH_KERNEL
bool "Use pointer authentication for kernel"
default y
depends on ARM64_PTR_AUTH
- depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_ARMV8_3
# Modern compilers insert a .note.gnu.property section note for PAC
# which is only understood by binutils starting with version 2.33.1.
depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
@@ -2016,19 +2020,10 @@ config CC_HAS_BRANCH_PROT_PAC_RET
# GCC 9 or later, clang 8 or later
def_bool $(cc-option,-mbranch-protection=pac-ret+leaf)
-config CC_HAS_SIGN_RETURN_ADDRESS
- # GCC 7, 8
- def_bool $(cc-option,-msign-return-address=all)
-
-config AS_HAS_ARMV8_3
- def_bool $(cc-option,-Wa$(comma)-march=armv8.3-a)
-
config AS_HAS_CFI_NEGATE_RA_STATE
+ # binutils 2.34+
def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n)
-config AS_HAS_LDAPR
- def_bool $(as-instr,.arch_extension rcpc)
-
endmenu # "ARMv8.3 architectural features"
menu "ARMv8.4 architectural features"
@@ -2056,20 +2051,13 @@ config ARM64_AMU_EXTN
correctly reflect reality. Most commonly, the value read will be 0,
indicating that the counter is not enabled.
-config AS_HAS_ARMV8_4
- def_bool $(cc-option,-Wa$(comma)-march=armv8.4-a)
-
config ARM64_TLB_RANGE
bool "Enable support for tlbi range feature"
default y
- depends on AS_HAS_ARMV8_4
help
ARMv8.4-TLBI provides TLBI invalidation instruction that apply to a
range of input addresses.
- The feature introduces new assembly instructions, and they were
- support when binutils >= 2.30.
-
endmenu # "ARMv8.4 architectural features"
menu "ARMv8.5 architectural features"
@@ -2145,7 +2133,6 @@ config ARM64_MTE
default y
depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
depends on AS_HAS_ARMV8_5
- depends on AS_HAS_LSE_ATOMICS
# Required for tag checking in the uaccess routines
select ARM64_PAN
select ARCH_HAS_SUBPAGE_FAULTS
@@ -2285,7 +2272,6 @@ config ARM64_SME
bool "ARM Scalable Matrix Extension support"
default y
depends on ARM64_SVE
- depends on BROKEN
help
The Scalable Matrix Extension (SME) is an extension to the AArch64
execution state which utilises a substantial subset of the SVE
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 8b76821f190f..a541bb029aa4 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -269,7 +269,7 @@ config ARCH_QCOM
bool "Qualcomm Platforms"
select GPIOLIB
select PINCTRL
- select HAVE_PWRCTL if PCI
+ select HAVE_PWRCTRL if PCI
help
This enables support for the ARMv8 based Qualcomm chipsets.
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 1d5dfcd1c13e..73a10f65ce8b 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -16,14 +16,11 @@ ifeq ($(CONFIG_RELOCATABLE), y)
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
# for relative relocs, since this leads to better Image compression
# with the relocation offsets always being zero.
-LDFLAGS_vmlinux += -shared -Bsymbolic -z notext \
- $(call ld-option, --no-apply-dynamic-relocs)
+LDFLAGS_vmlinux += -shared -Bsymbolic -z notext --no-apply-dynamic-relocs
endif
ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
- ifeq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
LDFLAGS_vmlinux += --fix-cortex-a53-843419
- endif
endif
cc_has_k_constraint := $(call try-run,echo \
@@ -105,12 +102,8 @@ endif
# hardware.
ifeq ($(CONFIG_AS_HAS_ARMV8_5), y)
asm-arch := armv8.5-a
-else ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
+else
asm-arch := armv8.4-a
-else ifeq ($(CONFIG_AS_HAS_ARMV8_3), y)
- asm-arch := armv8.3-a
-else ifeq ($(CONFIG_AS_HAS_ARMV8_2), y)
- asm-arch := armv8.2-a
endif
ifdef asm-arch
@@ -201,16 +194,6 @@ install zinstall:
archprepare:
$(Q)$(MAKE) $(build)=arch/arm64/tools kapi
-ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
- ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
- @echo "warning: ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum" >&2
- endif
-endif
-ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS),y)
- ifneq ($(CONFIG_ARM64_LSE_ATOMICS),y)
- @echo "warning: LSE atomics not supported by binutils" >&2
- endif
-endif
ifeq ($(KBUILD_EXTMOD),)
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
diff --git a/arch/arm64/boot/dts/airoha/en7581-evb.dts b/arch/arm64/boot/dts/airoha/en7581-evb.dts
index d53b72d18242..99d2c4f1fc5a 100644
--- a/arch/arm64/boot/dts/airoha/en7581-evb.dts
+++ b/arch/arm64/boot/dts/airoha/en7581-evb.dts
@@ -65,6 +65,36 @@
};
};
+&en7581_pinctrl {
+ gpio-ranges = <&en7581_pinctrl 0 13 47>;
+
+ pcie0_rst_pins: pcie0-rst-pins {
+ conf {
+ pins = "pcie_reset0";
+ drive-open-drain = <1>;
+ };
+ };
+
+ pcie1_rst_pins: pcie1-rst-pins {
+ conf {
+ pins = "pcie_reset1";
+ drive-open-drain = <1>;
+ };
+ };
+};
+
+&pcie0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_rst_pins>;
+ status = "okay";
+};
+
+&pcie1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie1_rst_pins>;
+ status = "okay";
+};
+
&i2c0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/airoha/en7581.dtsi b/arch/arm64/boot/dts/airoha/en7581.dtsi
index 26b136940917..536ece69b935 100644
--- a/arch/arm64/boot/dts/airoha/en7581.dtsi
+++ b/arch/arm64/boot/dts/airoha/en7581.dtsi
@@ -180,6 +180,111 @@
#reset-cells = <1>;
};
+ pbus_csr: syscon@1fbe3400 {
+ compatible = "airoha,en7581-pbus-csr", "syscon";
+ reg = <0x0 0x1fbe3400 0x0 0xff>;
+ };
+
+ pciephy: phy@1fa5a000 {
+ compatible = "airoha,en7581-pcie-phy";
+ reg = <0x0 0x1fa5a000 0x0 0xfff>,
+ <0x0 0x1fa5b000 0x0 0xfff>,
+ <0x0 0x1fa5c000 0x0 0xfff>,
+ <0x0 0x1fc10044 0x0 0x4>,
+ <0x0 0x1fc30044 0x0 0x4>,
+ <0x0 0x1fc15030 0x0 0x104>;
+ reg-names = "csr-2l", "pma0", "pma1",
+ "p0-xr-dtime", "p1-xr-dtime",
+ "rx-aeq";
+ #phy-cells = <0>;
+ };
+
+ pcie0: pcie@1fc00000 {
+ compatible = "airoha,en7581-pcie";
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ reg = <0x0 0x1fc00000 0x0 0x1670>;
+ reg-names = "pcie-mac";
+
+ clocks = <&scuclk EN7523_CLK_PCIE>;
+ clock-names = "sys-ck";
+
+ phys = <&pciephy>;
+ phy-names = "pcie-phy";
+
+ ranges = <0x02000000 0 0x20000000 0x0 0x20000000 0 0x4000000>;
+
+ resets = <&scuclk EN7581_PCIE0_RST>,
+ <&scuclk EN7581_PCIE1_RST>,
+ <&scuclk EN7581_PCIE2_RST>;
+ reset-names = "phy-lane0", "phy-lane1", "phy-lane2";
+
+ mediatek,pbus-csr = <&pbus_csr 0x0 0x4>;
+
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ bus-range = <0x00 0xff>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+ <0 0 0 2 &pcie_intc0 1>,
+ <0 0 0 3 &pcie_intc0 2>,
+ <0 0 0 4 &pcie_intc0 3>;
+
+ status = "disabled";
+
+ pcie_intc0: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ pcie1: pcie@1fc20000 {
+ compatible = "airoha,en7581-pcie";
+ device_type = "pci";
+ linux,pci-domain = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ reg = <0x0 0x1fc20000 0x0 0x1670>;
+ reg-names = "pcie-mac";
+
+ clocks = <&scuclk EN7523_CLK_PCIE>;
+ clock-names = "sys-ck";
+
+ phys = <&pciephy>;
+ phy-names = "pcie-phy";
+
+ ranges = <0x02000000 0 0x24000000 0x0 0x24000000 0 0x4000000>;
+
+ resets = <&scuclk EN7581_PCIE0_RST>,
+ <&scuclk EN7581_PCIE1_RST>,
+ <&scuclk EN7581_PCIE2_RST>;
+ reset-names = "phy-lane0", "phy-lane1", "phy-lane2";
+
+ mediatek,pbus-csr = <&pbus_csr 0x8 0xc>;
+
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ bus-range = <0x00 0xff>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc1 0>,
+ <0 0 0 2 &pcie_intc1 1>,
+ <0 0 0 3 &pcie_intc1 2>,
+ <0 0 0 4 &pcie_intc1 3>;
+
+ status = "disabled";
+
+ pcie_intc1: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+
uart1: serial@1fbf0000 {
compatible = "ns16550";
reg = <0x0 0x1fbf0000 0x0 0x30>;
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
index 00bed412ee31..773cc02a13d0 100644
--- a/arch/arm64/boot/dts/allwinner/Makefile
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -18,6 +18,7 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-sopine-baseboard.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-teres-i.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h64-remix-mini-pc.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a100-allwinner-perf1.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a133-liontron-h-a133l.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-bananapi-m2-plus.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-bananapi-m2-plus-v1.2.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-emlid-neutis-n5-devboard.dtb
@@ -48,7 +49,11 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-longanpi-3h.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero2w.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero3.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-transpeed-8k618-t.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-yuzukihd-chameleon.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-2024.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-h.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-plus.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-sp.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun55i-a527-cubie-a5e.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun55i-h728-x96qpro+.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun55i-t527-avaota-a1.dtb
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi
index f9f6fea03b74..bd366389b238 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi
@@ -252,6 +252,7 @@
interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
+ max-frequency = <150000000>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -267,6 +268,7 @@
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
+ max-frequency = <150000000>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -282,6 +284,7 @@
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;
+ max-frequency = <150000000>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a133-liontron-h-a133l.dts b/arch/arm64/boot/dts/allwinner/sun50i-a133-liontron-h-a133l.dts
new file mode 100644
index 000000000000..fe77178d3e33
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a133-liontron-h-a133l.dts
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Arm Ltd.
+ */
+
+/dts-v1/;
+
+#include "sun50i-a100.dtsi"
+#include "sun50i-a100-cpu-opp.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+
+/{
+ model = "Liontron H-A133L";
+ compatible = "liontron,h-a133l", "allwinner,sun50i-a100";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led {
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&pio 7 16 GPIO_ACTIVE_LOW>; /* PH16 */
+ };
+ };
+
+ reg_vcc5v: vcc5v {
+ /* board wide 5V supply from a 12V->5V regulator */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_usb1_vbus: regulator-usb1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb1-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&reg_vcc5v>;
+ enable-active-high;
+ gpio = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&mmc0 {
+ vmmc-supply = <&reg_dcdc1>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+ bus-width = <4>;
+ status = "okay";
+};
+
+&mmc2 {
+ vmmc-supply = <&reg_dcdc1>;
+ vqmmc-supply = <&reg_eldo1>;
+ cap-mmc-hw-reset;
+ non-removable;
+ bus-width = <8>;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&pio {
+ vcc-pb-supply = <&reg_dcdc1>;
+ vcc-pc-supply = <&reg_eldo1>;
+ vcc-pf-supply = <&reg_dcdc1>;
+ vcc-ph-supply = <&reg_dcdc1>;
+};
+
+&r_i2c0 {
+ status = "okay";
+
+ axp803: pmic@34 {
+ compatible = "x-powers,axp803";
+ reg = <0x34>;
+ interrupt-parent = <&r_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+#include "axp803.dtsi"
+
+&ac_power_supply {
+ status = "okay";
+};
+
+&reg_aldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-codec-avcc";
+};
+
+&reg_aldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-dram-1";
+};
+
+&reg_aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-usb-pl";
+};
+
+&reg_dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-io-usb-pd-emmc";
+};
+
+&reg_dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <810000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vdd-cpux";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdd-usb-cpus";
+};
+
+&reg_dcdc4 {
+ regulator-always-on;
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <950000>;
+ regulator-name = "vdd-sys";
+};
+
+&reg_dcdc5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vcc-dram";
+};
+
+/* DCDC6 unused */
+/* DLDO3 unused */
+/* DLDO4 unused */
+
+&reg_eldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pc-emmc";
+};
+
+/* ELDO2 unused */
+/* ELDO3 unused */
+
+&reg_fldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdd-cpus-usb";
+};
+
+/* reg_drivevbus unused */
+/* dc1sw unused */
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pb_pins>;
+ status = "okay";
+};
+
+&usb_otg {
+ dr_mode = "host"; /* USB A type receptacle, always powered */
+ status = "okay";
+};
+
+&usbphy {
+ usb1_vbus-supply = <&reg_usb1_vbus>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index 09e71fd60785..3256acec1ff9 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -124,6 +124,17 @@
status = "okay";
};
+/* On Wifi/BT connector */
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ vmmc-supply = <&reg_dldo4>;
+ vqmmc-supply = <&reg_eldo1>;
+ bus-width = <4>;
+ non-removable;
+ status = "disabled";
+};
+
&ohci0 {
status = "okay";
};
@@ -286,6 +297,7 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
+ uart-has-rtscts;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
index be2347c8f267..231e652cab67 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
@@ -103,6 +103,17 @@
};
};
+/* On Wifi/BT connector */
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ vmmc-supply = <&reg_dldo4>;
+ vqmmc-supply = <&reg_eldo1>;
+ bus-width = <4>;
+ non-removable;
+ status = "disabled";
+};
+
&mmc2 {
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;
@@ -175,6 +186,14 @@
status = "okay";
};
+/* On Wifi/BT connector, with RTS/CTS */
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
+ uart-has-rtscts;
+ status = "disabled";
+};
+
/* On Pi-2 connector */
&uart2 {
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts
index 17e6aef67aaf..7906b79c0389 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts
@@ -79,6 +79,11 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&reg_dcdc1>;
+ status = "okay";
+};
+
&ir {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
index d3caf27b6a55..01a29c1988a6 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
@@ -16,7 +16,6 @@
reg = <0>;
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
- clock-latency-ns = <244144>; /* 8 32k periods */
#cooling-cells = <2>;
};
@@ -26,7 +25,6 @@
reg = <1>;
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
- clock-latency-ns = <244144>; /* 8 32k periods */
#cooling-cells = <2>;
};
@@ -36,7 +34,6 @@
reg = <2>;
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
- clock-latency-ns = <244144>; /* 8 32k periods */
#cooling-cells = <2>;
};
@@ -46,7 +43,6 @@
reg = <3>;
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
- clock-latency-ns = <244144>; /* 8 32k periods */
#cooling-cells = <2>;
};
};
@@ -207,7 +203,7 @@
};
cooling-maps {
- cpu-hot-limit {
+ map0 {
trip = <&cpu_hot_trip>;
cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
index 13a0e63afeaf..2c64d834a2c4 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
@@ -152,28 +152,12 @@
vcc-pg-supply = <&reg_aldo1>;
};
-&r_ir {
- linux,rc-map-name = "rc-beelink-gs1";
- status = "okay";
-};
-
-&r_pio {
- /*
- * FIXME: We can't add that supply for now since it would
- * create a circular dependency between pinctrl, the regulator
- * and the RSB Bus.
- *
- * vcc-pl-supply = <&reg_aldo1>;
- */
- vcc-pm-supply = <&reg_aldo1>;
-};
-
-&r_rsb {
+&r_i2c {
status = "okay";
- axp805: pmic@745 {
+ axp805: pmic@36 {
compatible = "x-powers,axp805", "x-powers,axp806";
- reg = <0x745>;
+ reg = <0x36>;
interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
@@ -291,6 +275,22 @@
};
};
+&r_ir {
+ linux,rc-map-name = "rc-beelink-gs1";
+ status = "okay";
+};
+
+&r_pio {
+ /*
+ * PL0 and PL1 are used for PMIC I2C
+ * don't enable the pl-supply else
+ * it will fail at boot
+ *
+ * vcc-pl-supply = <&reg_aldo1>;
+ */
+ vcc-pm-supply = <&reg_aldo1>;
+};
+
&spdif {
pinctrl-names = "default";
pinctrl-0 = <&spdif_tx_pin>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
index ab87c3447cd7..dc7381c944c9 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
@@ -144,7 +144,7 @@
non-removable;
status = "okay";
- brcm: sdio-wifi@1 {
+ brcm: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
interrupt-parent = <&r_pio>;
@@ -176,16 +176,12 @@
vcc-pg-supply = <&reg_vcc_wifi_io>;
};
-&r_ir {
- status = "okay";
-};
-
-&r_rsb {
+&r_i2c {
status = "okay";
- axp805: pmic@745 {
+ axp805: pmic@36 {
compatible = "x-powers,axp805", "x-powers,axp806";
- reg = <0x745>;
+ reg = <0x36>;
interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
@@ -296,6 +292,10 @@
};
};
+&r_ir {
+ status = "okay";
+};
+
&rtc {
clocks = <&ext_osc32k>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts
index a3f65a45bd26..0911c537cc6b 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts
@@ -28,7 +28,7 @@
non-removable;
status = "okay";
- brcm: sdio-wifi@1 {
+ brcm: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
interrupt-parent = <&r_pio>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
index d05dc5d6e6b9..e34dbb992021 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
@@ -113,20 +113,12 @@
vcc-pg-supply = <&reg_aldo1>;
};
-&r_ir {
- status = "okay";
-};
-
-&r_pio {
- vcc-pm-supply = <&reg_bldo3>;
-};
-
-&r_rsb {
+&r_i2c {
status = "okay";
- axp805: pmic@745 {
+ axp805: pmic@36 {
compatible = "x-powers,axp805", "x-powers,axp806";
- reg = <0x745>;
+ reg = <0x36>;
interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
@@ -241,6 +233,14 @@
};
};
+&r_ir {
+ status = "okay";
+};
+
+&r_pio {
+ vcc-pm-supply = <&reg_bldo3>;
+};
+
&rtc {
clocks = <&ext_osc32k>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index 2301c59b41b1..73e8604315c5 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -27,7 +27,6 @@
reg = <0>;
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
- clock-latency-ns = <244144>; /* 8 32k periods */
#cooling-cells = <2>;
i-cache-size = <0x8000>;
i-cache-line-size = <64>;
@@ -44,7 +43,6 @@
reg = <1>;
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
- clock-latency-ns = <244144>; /* 8 32k periods */
#cooling-cells = <2>;
i-cache-size = <0x8000>;
i-cache-line-size = <64>;
@@ -61,7 +59,6 @@
reg = <2>;
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
- clock-latency-ns = <244144>; /* 8 32k periods */
#cooling-cells = <2>;
i-cache-size = <0x8000>;
i-cache-line-size = <64>;
@@ -78,7 +75,6 @@
reg = <3>;
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
- clock-latency-ns = <244144>; /* 8 32k periods */
#cooling-cells = <2>;
i-cache-size = <0x8000>;
i-cache-line-size = <64>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616-bigtreetech-cb1.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h616-bigtreetech-cb1.dtsi
index d12b01c5f41b..bebfeb2a337a 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616-bigtreetech-cb1.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616-bigtreetech-cb1.dtsi
@@ -67,6 +67,11 @@
cpu-supply = <&reg_dcdc2>;
};
+&gpu {
+ mali-supply = <&reg_dcdc1>;
+ status = "okay";
+};
+
&mmc0 {
vmmc-supply = <&reg_dldo1>;
/* Card detection pin is not connected */
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi
index 908fa3b847a6..a8644fb52b04 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi
@@ -77,6 +77,10 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+};
+
&mdio0 {
ext_rgmii_phy: ethernet-phy@1 {
compatible = "ethernet-phy-ieee802.3-c22";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts
index a360d8567f95..f2e3300e078a 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts
@@ -24,6 +24,10 @@
phy-supply = <&reg_dcdce>;
};
+&gpu {
+ mali-supply = <&reg_dcdcc>;
+};
+
&mmc0 {
vmmc-supply = <&reg_dcdce>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616-x96-mate.dts b/arch/arm64/boot/dts/allwinner/sun50i-h616-x96-mate.dts
index 968960ebf1d1..085f3e4e8eaa 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616-x96-mate.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616-x96-mate.dts
@@ -50,6 +50,11 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&reg_dcdcc>;
+ status = "okay";
+};
+
&ir {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi
index cdce3dcb8ec0..ceedae9e399b 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi
@@ -150,6 +150,21 @@
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0x40000000>;
+ gpu: gpu@1800000 {
+ compatible = "allwinner,sun50i-h616-mali",
+ "arm,mali-bifrost";
+ reg = <0x1800000 0x40000>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "job", "mmu", "gpu";
+ clocks = <&ccu CLK_GPU0>, <&ccu CLK_BUS_GPU>;
+ clock-names = "core", "bus";
+ power-domains = <&prcm_ppu 2>;
+ resets = <&ccu RST_BUS_GPU>;
+ status = "disabled";
+ };
+
crypto: crypto@1904000 {
compatible = "allwinner,sun50i-h616-crypto";
reg = <0x01904000 0x800>;
@@ -874,6 +889,12 @@
#reset-cells = <1>;
};
+ prcm_ppu: power-controller@7010250 {
+ compatible = "allwinner,sun50i-h616-prcm-ppu";
+ reg = <0x07010250 0x10>;
+ #power-domain-cells = <1>;
+ };
+
nmi_intc: interrupt-controller@7010320 {
compatible = "allwinner,sun50i-h616-nmi",
"allwinner,sun9i-a80-nmi";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h618-longan-module-3h.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h618-longan-module-3h.dtsi
index e92d150aaf1c..3f416d129b72 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h618-longan-module-3h.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h618-longan-module-3h.dtsi
@@ -10,6 +10,11 @@
cpu-supply = <&reg_dcdc2>;
};
+&gpu {
+ mali-supply = <&reg_dcdc1>;
+ status = "okay";
+};
+
&mmc2 {
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero2w.dts b/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero2w.dts
index a0fe7a9afb77..b340bbcb710d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero2w.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero2w.dts
@@ -69,6 +69,11 @@
/* USB 2 & 3 are on the FPC connector (or the exansion board) */
+&gpu {
+ mali-supply = <&reg_dcdc1>;
+ status = "okay";
+};
+
&mmc0 {
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
bus-width = <4>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts
index e1cd7572a14c..c51d4d9120de 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts
@@ -27,6 +27,10 @@
motorcomm,clk-out-frequency-hz = <125000000>;
};
+&gpu {
+ mali-supply = <&reg_dcdc1>;
+};
+
&mmc0 {
/*
* The schematic shows the card detect pin wired up to PF6, via an
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h618-transpeed-8k618-t.dts b/arch/arm64/boot/dts/allwinner/sun50i-h618-transpeed-8k618-t.dts
index f828ca1ce51e..efe0faa252f5 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h618-transpeed-8k618-t.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h618-transpeed-8k618-t.dts
@@ -69,6 +69,11 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&reg_dcdc1>;
+ status = "okay";
+};
+
&ir {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h618-yuzukihd-chameleon.dts b/arch/arm64/boot/dts/allwinner/sun50i-h618-yuzukihd-chameleon.dts
new file mode 100644
index 000000000000..eae56908b9b4
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h618-yuzukihd-chameleon.dts
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ */
+
+/dts-v1/;
+
+#include "sun50i-h616.dtsi"
+#include "sun50i-h616-cpu-opp.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ model = "Yuzuki Chameleon";
+ compatible = "yuzukihd,chameleon", "allwinner,sun50i-h618";
+
+ aliases {
+ ethernet1 = &sdio_wifi;
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ reg_vcc5v: vcc5v {
+ /* board wide 5V supply directly from the USB-C socket */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ wifi_pwrseq: pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rtc CLK_OSC32K_FANOUT>;
+ clock-names = "ext_clock";
+ pinctrl-0 = <&x32clk_fanout_pin>;
+ pinctrl-names = "default";
+ reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>; /* PG11 */
+ };
+};
+
+&codec {
+ allwinner,audio-routing = "Line Out", "LINEOUT";
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ehci2 {
+ status = "okay";
+};
+
+&ehci3 {
+ status = "okay";
+};
+
+&mmc0 {
+ bus-width = <4>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+ disable-wp;
+ vmmc-supply = <&reg_dldo1>;
+ status = "okay";
+};
+
+&mmc1 {
+ bus-width = <4>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ non-removable;
+ vmmc-supply = <&reg_dldo1>;
+ vqmmc-supply = <&reg_dldo1>;
+ status = "okay";
+
+ sdio_wifi: wifi@1 {
+ reg = <1>;
+ interrupt-parent = <&pio>;
+ interrupts = <6 12 IRQ_TYPE_LEVEL_LOW>; /* PG12 */
+ interrupt-names = "host-wake";
+ };
+};
+
+&mmc2 {
+ bus-width = <8>;
+ cap-mmc-hw-reset;
+ mmc-ddr-3_3v;
+ non-removable;
+ vmmc-supply = <&reg_dldo1>;
+ vqmmc-supply = <&reg_dldo1>;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&ohci2 {
+ status = "okay";
+};
+
+&ohci3 {
+ status = "okay";
+};
+
+&pio {
+ vcc-pc-supply = <&reg_dldo1>;
+ vcc-pf-supply = <&reg_dldo1>; /* via VCC_IO */
+ vcc-pg-supply = <&reg_dldo1>;
+ vcc-ph-supply = <&reg_dldo1>; /* via VCC_IO */
+ vcc-pi-supply = <&reg_dldo1>;
+};
+
+&r_i2c {
+ status = "okay";
+
+ axp313: pmic@36 {
+ compatible = "x-powers,axp313a";
+ reg = <0x36>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ interrupt-parent = <&pio>;
+ interrupts = <2 2 IRQ_TYPE_LEVEL_LOW>; /* PC2 */
+
+ vin1-supply = <&reg_vcc5v>;
+ vin2-supply = <&reg_vcc5v>;
+ vin3-supply = <&reg_vcc5v>;
+
+ regulators {
+ /* Supplies VCC-PLL, so needs to be always on. */
+ reg_aldo1: aldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc1v8";
+ };
+
+ /* Supplies VCC-IO, so needs to be always on. */
+ reg_dldo1: dldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc3v3";
+ };
+
+ reg_dcdc1: dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <810000>;
+ regulator-max-microvolt = <990000>;
+ regulator-name = "vdd-gpu-sys";
+ };
+
+ reg_dcdc2: dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <810000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-cpu";
+ };
+
+ reg_dcdc3: dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "vdd-dram";
+ };
+ };
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_ph_pins>;
+ status = "okay";
+};
+
+/* Connected to the Bluetooth UART pins of the XR829 Wifi/BT chip. */
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&usbotg {
+ /*
+ * PHY0 pins are connected to a USB-C socket, but a role switch
+ * is not implemented: both CC pins are pulled to GND.
+ * The VBUS pins power the device, so a fixed peripheral mode
+ * is the best choice.
+ * The board can be powered via GPIOs, in this case port0 *can*
+ * act as a host (with a cable/adapter ignoring CC), as VBUS is
+ * then provided by the GPIOs. Any user of this setup would
+ * need to adjust the DT accordingly: dr_mode set to "host",
+ * enabling OHCI0 and EHCI0.
+ */
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usbphy {
+ usb0_id_det-gpios = <&pio 6 18 GPIO_ACTIVE_HIGH>; /* PG18 */
+ usb0_vbus-supply = <&reg_vcc5v>;
+ usb1_vbus-supply = <&reg_vcc5v>;
+ usb2_vbus-supply = <&reg_vcc5v>;
+ usb3_vbus-supply = <&reg_vcc5v>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
index 7e17ca07892d..1a750c5f6fac 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
@@ -184,8 +184,11 @@
};
&codec {
- allwinner,audio-routing = "Line Out", "LINEOUT";
+ /* Both speakers and headphone jack connected to 74HC4052D analog mux*/
+ allwinner,audio-routing = "Speaker", "LINEOUT",
+ "Headphone", "LINEOUT";
allwinner,pa-gpios = <&pio 8 5 GPIO_ACTIVE_HIGH>; // PI5
+ hp-det-gpios = <&pio 8 3 GPIO_ACTIVE_HIGH>; // PI3
status = "okay";
};
@@ -197,6 +200,11 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&reg_dcdc2>;
+ status = "okay";
+};
+
&mmc0 {
vmmc-supply = <&reg_cldo3>;
disable-wp;
diff --git a/arch/arm64/boot/dts/allwinner/sun55i-a523.dtsi b/arch/arm64/boot/dts/allwinner/sun55i-a523.dtsi
new file mode 100644
index 000000000000..8b7cbc2e78f5
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun55i-a523.dtsi
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+// Copyright (C) 2023-2024 Arm Ltd.
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/sun6i-rtc.h>
+#include <dt-bindings/clock/sun55i-a523-ccu.h>
+#include <dt-bindings/clock/sun55i-a523-r-ccu.h>
+#include <dt-bindings/reset/sun55i-a523-ccu.h>
+#include <dt-bindings/reset/sun55i-a523-r-ccu.h>
+
+/ {
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a55";
+ device_type = "cpu";
+ reg = <0x000>;
+ enable-method = "psci";
+ };
+
+ cpu1: cpu@100 {
+ compatible = "arm,cortex-a55";
+ device_type = "cpu";
+ reg = <0x100>;
+ enable-method = "psci";
+ };
+
+ cpu2: cpu@200 {
+ compatible = "arm,cortex-a55";
+ device_type = "cpu";
+ reg = <0x200>;
+ enable-method = "psci";
+ };
+
+ cpu3: cpu@300 {
+ compatible = "arm,cortex-a55";
+ device_type = "cpu";
+ reg = <0x300>;
+ enable-method = "psci";
+ };
+
+ cpu4: cpu@400 {
+ compatible = "arm,cortex-a55";
+ device_type = "cpu";
+ reg = <0x400>;
+ enable-method = "psci";
+ };
+
+ cpu5: cpu@500 {
+ compatible = "arm,cortex-a55";
+ device_type = "cpu";
+ reg = <0x500>;
+ enable-method = "psci";
+ };
+
+ cpu6: cpu@600 {
+ compatible = "arm,cortex-a55";
+ device_type = "cpu";
+ reg = <0x600>;
+ enable-method = "psci";
+ };
+
+ cpu7: cpu@700 {
+ compatible = "arm,cortex-a55";
+ device_type = "cpu";
+ reg = <0x700>;
+ enable-method = "psci";
+ };
+ };
+
+ osc24M: osc24M-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "osc24M";
+ };
+
+ pmu {
+ compatible = "arm,cortex-a55-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ arm,no-tick-in-suspend;
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x0 0x40000000>;
+
+ pio: pinctrl@2000000 {
+ compatible = "allwinner,sun55i-a523-pinctrl";
+ reg = <0x2000000 0x800>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_APB1>, <&osc24M>, <&rtc CLK_OSC32K>;
+ clock-names = "apb", "hosc", "losc";
+ gpio-controller;
+ #gpio-cells = <3>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ rgmii0_pins: rgmii0-pins {
+ pins = "PH0", "PH1", "PH2", "PH3", "PH4",
+ "PH5", "PH6", "PH7", "PH9", "PH10",
+ "PH14", "PH15", "PH16", "PH17", "PH18";
+ allwinner,pinmux = <5>;
+ function = "emac0";
+ drive-strength = <40>;
+ bias-disable;
+ };
+
+ mmc0_pins: mmc0-pins {
+ pins = "PF0" ,"PF1", "PF2", "PF3", "PF4", "PF5";
+ allwinner,pinmux = <2>;
+ function = "mmc0";
+ drive-strength = <30>;
+ bias-pull-up;
+ };
+
+ /omit-if-no-ref/
+ mmc1_pins: mmc1-pins {
+ pins = "PG0" ,"PG1", "PG2", "PG3", "PG4", "PG5";
+ allwinner,pinmux = <2>;
+ function = "mmc1";
+ drive-strength = <30>;
+ bias-pull-up;
+ };
+
+ mmc2_pins: mmc2-pins {
+ pins = "PC0", "PC1" ,"PC5", "PC6", "PC8",
+ "PC9", "PC10", "PC11", "PC13", "PC14",
+ "PC15", "PC16";
+ allwinner,pinmux = <3>;
+ function = "mmc2";
+ drive-strength = <30>;
+ bias-pull-up;
+ };
+
+ uart0_pb_pins: uart0-pb-pins {
+ pins = "PB9", "PB10";
+ allwinner,pinmux = <2>;
+ function = "uart0";
+ };
+ };
+
+ ccu: clock-controller@2001000 {
+ compatible = "allwinner,sun55i-a523-ccu";
+ reg = <0x02001000 0x1000>;
+ clocks = <&osc24M>, <&rtc CLK_OSC32K>,
+ <&rtc CLK_IOSC>, <&rtc CLK_OSC32K_FANOUT>;
+ clock-names = "hosc", "losc",
+ "iosc", "losc-fanout";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ mmc0: mmc@4020000 {
+ compatible = "allwinner,sun55i-a523-mmc",
+ "allwinner,sun20i-d1-mmc";
+ reg = <0x04020000 0x1000>;
+ clocks = <&ccu CLK_BUS_MMC0>, <&ccu CLK_MMC0>;
+ clock-names = "ahb", "mmc";
+ resets = <&ccu RST_BUS_MMC0>;
+ reset-names = "ahb";
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins>;
+ status = "disabled";
+
+ max-frequency = <150000000>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mmc1: mmc@4021000 {
+ compatible = "allwinner,sun55i-a523-mmc",
+ "allwinner,sun20i-d1-mmc";
+ reg = <0x04021000 0x1000>;
+ clocks = <&ccu CLK_BUS_MMC1>, <&ccu CLK_MMC1>;
+ clock-names = "ahb", "mmc";
+ resets = <&ccu RST_BUS_MMC1>;
+ reset-names = "ahb";
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ status = "disabled";
+
+ max-frequency = <150000000>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mmc2: mmc@4022000 {
+ compatible = "allwinner,sun55i-a523-mmc",
+ "allwinner,sun20i-d1-mmc";
+ reg = <0x04022000 0x1000>;
+ clocks = <&ccu CLK_BUS_MMC2>, <&ccu CLK_MMC2>;
+ clock-names = "ahb", "mmc";
+ resets = <&ccu RST_BUS_MMC2>;
+ reset-names = "ahb";
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins>;
+ status = "disabled";
+
+ max-frequency = <150000000>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ wdt: watchdog@2050000 {
+ compatible = "allwinner,sun55i-a523-wdt";
+ reg = <0x2050000 0x20>;
+ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&osc24M>, <&rtc CLK_OSC32K>;
+ clock-names = "hosc", "losc";
+ status = "okay";
+ };
+
+ uart0: serial@2500000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x02500000 0x400>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART0>;
+ resets = <&ccu RST_BUS_UART0>;
+ status = "disabled";
+ };
+
+ uart1: serial@2500400 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x02500400 0x400>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART1>;
+ resets = <&ccu RST_BUS_UART1>;
+ status = "disabled";
+ };
+
+ uart2: serial@2500800 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x02500800 0x400>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART2>;
+ resets = <&ccu RST_BUS_UART2>;
+ status = "disabled";
+ };
+
+ uart3: serial@2500c00 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x02500c00 0x400>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART3>;
+ resets = <&ccu RST_BUS_UART3>;
+ status = "disabled";
+ };
+
+ uart4: serial@2501000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x02501000 0x400>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART4>;
+ resets = <&ccu RST_BUS_UART4>;
+ status = "disabled";
+ };
+
+ uart5: serial@2501400 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x02501400 0x400>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART5>;
+ resets = <&ccu RST_BUS_UART5>;
+ status = "disabled";
+ };
+
+ uart6: serial@2501800 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x02501800 0x400>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART6>;
+ resets = <&ccu RST_BUS_UART6>;
+ status = "disabled";
+ };
+
+ uart7: serial@2501c00 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x02501c00 0x400>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART7>;
+ resets = <&ccu RST_BUS_UART7>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@2502000 {
+ compatible = "allwinner,sun55i-a523-i2c",
+ "allwinner,sun8i-v536-i2c",
+ "allwinner,sun6i-a31-i2c";
+ reg = <0x2502000 0x400>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_I2C0>;
+ resets = <&ccu RST_BUS_I2C0>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c1: i2c@2502400 {
+ compatible = "allwinner,sun55i-a523-i2c",
+ "allwinner,sun8i-v536-i2c",
+ "allwinner,sun6i-a31-i2c";
+ reg = <0x2502400 0x400>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_I2C1>;
+ resets = <&ccu RST_BUS_I2C1>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c2: i2c@2502800 {
+ compatible = "allwinner,sun55i-a523-i2c",
+ "allwinner,sun8i-v536-i2c",
+ "allwinner,sun6i-a31-i2c";
+ reg = <0x2502800 0x400>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_I2C2>;
+ resets = <&ccu RST_BUS_I2C2>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c3: i2c@2502c00 {
+ compatible = "allwinner,sun55i-a523-i2c",
+ "allwinner,sun8i-v536-i2c",
+ "allwinner,sun6i-a31-i2c";
+ reg = <0x2502c00 0x400>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_I2C3>;
+ resets = <&ccu RST_BUS_I2C3>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c4: i2c@2503000 {
+ compatible = "allwinner,sun55i-a523-i2c",
+ "allwinner,sun8i-v536-i2c",
+ "allwinner,sun6i-a31-i2c";
+ reg = <0x2503000 0x400>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_I2C4>;
+ resets = <&ccu RST_BUS_I2C4>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c5: i2c@2503400 {
+ compatible = "allwinner,sun55i-a523-i2c",
+ "allwinner,sun8i-v536-i2c",
+ "allwinner,sun6i-a31-i2c";
+ reg = <0x2503400 0x400>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_I2C5>;
+ resets = <&ccu RST_BUS_I2C5>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ syscon: syscon@3000000 {
+ compatible = "allwinner,sun55i-a523-system-control",
+ "allwinner,sun50i-a64-system-control";
+ reg = <0x03000000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ };
+
+ gic: interrupt-controller@3400000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <1>;
+ #interrupt-cells = <3>;
+ #size-cells = <1>;
+ ranges;
+ interrupt-controller;
+ reg = <0x3400000 0x10000>,
+ <0x3460000 0x100000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ dma-noncoherent;
+
+ its: msi-controller@3440000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x3440000 0x20000>;
+ msi-controller;
+ #msi-cells = <1>;
+ dma-noncoherent;
+ };
+ };
+
+ usb_otg: usb@4100000 {
+ compatible = "allwinner,sun55i-a523-musb",
+ "allwinner,sun8i-a33-musb";
+ reg = <0x4100000 0x400>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mc";
+ clocks = <&ccu CLK_BUS_OTG>;
+ resets = <&ccu RST_BUS_OTG>;
+ extcon = <&usbphy 0>;
+ phys = <&usbphy 0>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ usbphy: phy@4100400 {
+ compatible = "allwinner,sun55i-a523-usb-phy",
+ "allwinner,sun20i-d1-usb-phy";
+ reg = <0x4100400 0x100>,
+ <0x4101800 0x100>,
+ <0x4200800 0x100>;
+ reg-names = "phy_ctrl",
+ "pmu0",
+ "pmu1";
+ clocks = <&osc24M>,
+ <&osc24M>;
+ clock-names = "usb0_phy",
+ "usb1_phy";
+ resets = <&ccu RST_USB_PHY0>,
+ <&ccu RST_USB_PHY1>;
+ reset-names = "usb0_reset",
+ "usb1_reset";
+ status = "disabled";
+ #phy-cells = <1>;
+ };
+
+ ehci0: usb@4101000 {
+ compatible = "allwinner,sun55i-a523-ehci",
+ "generic-ehci";
+ reg = <0x4101000 0x100>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_OHCI0>,
+ <&ccu CLK_BUS_EHCI0>,
+ <&ccu CLK_USB_OHCI0>;
+ resets = <&ccu RST_BUS_OHCI0>,
+ <&ccu RST_BUS_EHCI0>;
+ phys = <&usbphy 0>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ ohci0: usb@4101400 {
+ compatible = "allwinner,sun55i-a523-ohci",
+ "generic-ohci";
+ reg = <0x4101400 0x100>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_OHCI0>,
+ <&ccu CLK_USB_OHCI0>;
+ resets = <&ccu RST_BUS_OHCI0>;
+ phys = <&usbphy 0>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ ehci1: usb@4200000 {
+ compatible = "allwinner,sun55i-a523-ehci",
+ "generic-ehci";
+ reg = <0x4200000 0x100>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_OHCI1>,
+ <&ccu CLK_BUS_EHCI1>,
+ <&ccu CLK_USB_OHCI1>;
+ resets = <&ccu RST_BUS_OHCI1>,
+ <&ccu RST_BUS_EHCI1>;
+ phys = <&usbphy 1>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ ohci1: usb@4200400 {
+ compatible = "allwinner,sun55i-a523-ohci",
+ "generic-ohci";
+ reg = <0x4200400 0x100>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_OHCI1>,
+ <&ccu CLK_USB_OHCI1>;
+ resets = <&ccu RST_BUS_OHCI1>;
+ phys = <&usbphy 1>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ emac0: ethernet@4500000 {
+ compatible = "allwinner,sun55i-a523-emac0",
+ "allwinner,sun50i-a64-emac";
+ reg = <0x04500000 0x10000>;
+ clocks = <&ccu CLK_BUS_EMAC0>;
+ clock-names = "stmmaceth";
+ resets = <&ccu RST_BUS_EMAC0>;
+ reset-names = "stmmaceth";
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii0_pins>;
+ syscon = <&syscon>;
+ status = "disabled";
+
+ mdio0: mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ r_ccu: clock-controller@7010000 {
+ compatible = "allwinner,sun55i-a523-r-ccu";
+ reg = <0x7010000 0x250>;
+ clocks = <&osc24M>,
+ <&rtc CLK_OSC32K>,
+ <&rtc CLK_IOSC>,
+ <&ccu CLK_PLL_PERIPH0_200M>,
+ <&ccu CLK_PLL_AUDIO0_4X>;
+ clock-names = "hosc",
+ "losc",
+ "iosc",
+ "pll-periph",
+ "pll-audio";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ nmi_intc: interrupt-controller@7010320 {
+ compatible = "allwinner,sun55i-a523-nmi";
+ reg = <0x07010320 0xc>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ r_pio: pinctrl@7022000 {
+ compatible = "allwinner,sun55i-a523-r-pinctrl";
+ reg = <0x7022000 0x800>;
+ interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&r_ccu CLK_R_APB0>,
+ <&osc24M>,
+ <&rtc CLK_OSC32K>;
+ clock-names = "apb", "hosc", "losc";
+ gpio-controller;
+ #gpio-cells = <3>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ r_i2c_pins: r-i2c-pins {
+ pins = "PL0" ,"PL1";
+ allwinner,pinmux = <2>;
+ function = "r_i2c0";
+ };
+ };
+
+ r_i2c0: i2c@7081400 {
+ compatible = "allwinner,sun55i-a523-i2c",
+ "allwinner,sun8i-v536-i2c",
+ "allwinner,sun6i-a31-i2c";
+ reg = <0x07081400 0x400>;
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&r_ccu CLK_BUS_R_I2C0>;
+ resets = <&r_ccu RST_BUS_R_I2C0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&r_i2c_pins>;
+ status = "disabled";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ rtc: rtc@7090000 {
+ compatible = "allwinner,sun55i-a523-rtc",
+ "allwinner,sun50i-r329-rtc";
+ reg = <0x7090000 0x400>;
+ interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&r_ccu CLK_BUS_R_RTC>,
+ <&osc24M>,
+ <&r_ccu CLK_R_AHB>;
+ clock-names = "bus", "hosc", "ahb";
+ #clock-cells = <1>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts b/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts
new file mode 100644
index 000000000000..0f58d92a6adc
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+// Copyright (C) 2025 Arm Ltd.
+
+/dts-v1/;
+
+#include "sun55i-a523.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Radxa Cubie A5E";
+ compatible = "radxa,cubie-a5e", "allwinner,sun55i-a527";
+
+ aliases {
+ ethernet0 = &emac0;
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ ext_osc32k: ext-osc32k-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "ext_osc32k";
+ };
+
+ reg_vcc5v: vcc5v {
+ /* board wide 5V supply from the USB-C connector */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_usb_vbus: vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&reg_vcc5v>;
+ gpio = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */
+ enable-active-high;
+ };
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&emac0 {
+ phy-mode = "rgmii-id";
+ phy-handle = <&ext_rgmii_phy>;
+ phy-supply = <&reg_cldo3>;
+
+ allwinner,tx-delay-ps = <300>;
+ allwinner,rx-delay-ps = <400>;
+
+ status = "okay";
+};
+
+&mdio0 {
+ ext_rgmii_phy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+};
+
+&mmc0 {
+ vmmc-supply = <&reg_cldo3>;
+ cd-gpios = <&pio 5 6 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PF6 */
+ bus-width = <4>;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&pio {
+ vcc-pb-supply = <&reg_cldo3>; /* via VCC-IO */
+ vcc-pc-supply = <&reg_cldo1>;
+ vcc-pd-supply = <&reg_cldo3>;
+ vcc-pe-supply = <&reg_aldo2>;
+ vcc-pf-supply = <&reg_cldo3>; /* actually switchable */
+ vcc-pg-supply = <&reg_bldo1>;
+ vcc-ph-supply = <&reg_cldo3>; /* via VCC-IO */
+ vcc-pi-supply = <&reg_cldo3>;
+ vcc-pj-supply = <&reg_cldo4>;
+ vcc-pk-supply = <&reg_cldo1>;
+};
+
+&r_i2c0 {
+ status = "okay";
+
+ axp717: pmic@34 {
+ compatible = "x-powers,axp717";
+ reg = <0x34>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+
+ vin1-supply = <&reg_vcc5v>;
+ vin2-supply = <&reg_vcc5v>;
+ vin3-supply = <&reg_vcc5v>;
+ vin4-supply = <&reg_vcc5v>;
+ aldoin-supply = <&reg_vcc5v>;
+ bldoin-supply = <&reg_vcc5v>;
+ cldoin-supply = <&reg_vcc5v>;
+
+ regulators {
+ /* Supplies the "little" cluster (1.4 GHz cores) */
+ reg_dcdc1: dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1160000>;
+ regulator-name = "vdd-cpul";
+ };
+
+ reg_dcdc2: dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <920000>;
+ regulator-max-microvolt = <920000>;
+ regulator-name = "vdd-gpu-sys";
+ };
+
+ reg_dcdc3: dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-dram";
+ };
+
+ reg_aldo1: aldo1 {
+ /* not connected */
+ };
+
+ reg_aldo2: aldo2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pe";
+ };
+
+ reg_aldo3: aldo3 {
+ /* supplies the I2C pins for this PMIC */
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-pl-usb";
+ };
+
+ reg_aldo4: aldo4 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pll-dxco-avcc";
+ };
+
+ reg_bldo1: bldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pg-iowifi";
+ };
+
+ reg_bldo2: bldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pm-lpddr4";
+ };
+
+ reg_bldo3: bldo3 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-mipi-cam";
+ };
+
+ reg_bldo4: bldo4 {
+ /* not connected */
+ };
+
+ reg_cldo1: cldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pc-and-their-dog";
+ };
+
+ reg_cldo2: cldo2 {
+ /* not connected */
+ };
+
+ reg_cldo3: cldo3 {
+ /* IO, USB-2, 3V3, card, NAND, sensor, PI */
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-io-mmc-spi-ana";
+ };
+
+ reg_cldo4: cldo4 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-pj-phy";
+ };
+
+ reg_cpusldo: cpusldo {
+ /* supplies the management core */
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdd-cpus";
+ };
+ };
+ };
+
+ axp323: pmic@36 {
+ compatible = "x-powers,axp323";
+ reg = <0x36>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ status = "okay";
+
+ vin1-supply = <&reg_vcc5v>;
+ vin2-supply = <&reg_vcc5v>;
+ vin3-supply = <&reg_vcc5v>;
+
+ regulators {
+ aldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-mipi-dsi";
+ };
+
+ dldo1 {
+ /* not connected */
+ };
+
+ /* Supplies the "big" cluster (1.8 GHz cores) */
+ reg_dcdc1_323: dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1160000>;
+ regulator-name = "vdd-cpub";
+ };
+
+ /* DCDC2 is polyphased with DCDC1 */
+
+ /* RISC-V management core supply */
+ reg_dcdc3_323: dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdd-dnr";
+ };
+ };
+ };
+};
+
+&r_pio {
+/*
+ * Specifying the supply would create a circular dependency.
+ *
+ * vcc-pl-supply = <&reg_aldo3>;
+ */
+ vcc-pm-supply = <&reg_aldo3>;
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pb_pins>;
+ status = "okay";
+};
+
+&usb_otg {
+ /*
+ * The USB-C port is the primary power supply, so in this configuration
+ * relies on the other end of the USB cable to supply the VBUS power.
+ * So use this port in peripheral mode.
+ * It is possible to supply the board with the 5V pins on the GPIO
+ * header, and since the DCIN_5V line is hardwired to the USB-C VBUS
+ * pins, the port turns into a host port, unconditionally supplying
+ * power. The dr_mode property should be changed to "host" here, if
+ * users choose this setup.
+ */
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+/*
+ * The schematic describes USB0_ID (PL10), measuring VBUS_5V, which looks to
+ * be always on. Also there is USB-VBUSDET (PL2), which is measuring the same
+ * VBUS_5V. There is also DCIN_DET, which measures DCIN_5V, so the power
+ * input rail.
+ * None of them seem to make any sense in relation to detecting USB devices
+ * or whether there is power provided via any USB pins: they would always
+ * report high, otherwise the system wouldn't be running.
+ * The AXP717C provides proper USB-C CC pin functionality, but the PMIC is
+ * not connected to those pins of the USB-C connector.
+ */
+&usbphy {
+ usb0_vbus-supply = <&reg_vcc5v>;
+ usb1_vbus-supply = <&reg_usb_vbus>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun55i-h728-x96qpro+.dts b/arch/arm64/boot/dts/allwinner/sun55i-h728-x96qpro+.dts
new file mode 100644
index 000000000000..59db103546f6
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun55i-h728-x96qpro+.dts
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+// Copyright (C) 2024 Arm Ltd.
+
+/dts-v1/;
+
+#include "sun55i-a523.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "X96Q Pro+";
+ compatible = "amediatech,x96q-pro-plus", "allwinner,sun55i-h728";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ ext_osc32k: ext-osc32k-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "ext_osc32k";
+ };
+
+ reg_vcc5v: vcc5v {
+ /* board wide 5V supply from the barrel plug */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_vcc3v3: vcc3v3 {
+ /* 3.3V dummy supply for the SD card */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&reg_vcc5v>;
+ regulator-always-on;
+ };
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&mmc0 {
+ vmmc-supply = <&reg_vcc3v3>;
+ cd-gpios = <&pio 5 6 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PF6 */
+ bus-width = <4>;
+ disable-wp;
+ status = "okay";
+};
+
+&mmc2 {
+ vmmc-supply = <&reg_cldo3>;
+ vqmmc-supply = <&reg_cldo1>;
+ bus-width = <8>;
+ non-removable;
+ cap-mmc-hw-reset;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&pio {
+ vcc-pb-supply = <&reg_cldo3>; /* via VCC-IO */
+ vcc-pc-supply = <&reg_cldo1>;
+ vcc-pd-supply = <&reg_dcdc4>;
+ vcc-pe-supply = <&reg_dcdc4>;
+ vcc-pf-supply = <&reg_cldo3>; /* actually switchable */
+ vcc-pg-supply = <&reg_bldo1>;
+ vcc-ph-supply = <&reg_cldo3>; /* via VCC-IO */
+ vcc-pi-supply = <&reg_dcdc4>;
+ vcc-pj-supply = <&reg_dcdc4>;
+ vcc-pk-supply = <&reg_bldo3>;
+};
+
+&r_i2c0 {
+ status = "okay";
+
+ axp717: pmic@34 {
+ compatible = "x-powers,axp717";
+ reg = <0x34>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+
+ vin1-supply = <&reg_vcc5v>;
+ vin2-supply = <&reg_vcc5v>;
+ vin3-supply = <&reg_vcc5v>;
+ vin4-supply = <&reg_vcc5v>;
+ aldoin-supply = <&reg_vcc5v>;
+ bldoin-supply = <&reg_vcc5v>;
+ cldoin-supply = <&reg_vcc5v>;
+
+ regulators {
+ /* Supplies the "little" cluster (1.0(?) GHz cores) */
+ reg_dcdc1: dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1160000>;
+ regulator-name = "vdd-cpul";
+ };
+
+ reg_dcdc2: dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <920000>;
+ regulator-max-microvolt = <920000>;
+ regulator-name = "vdd-gpu-sys";
+ };
+
+ reg_dcdc3: dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1360000>;
+ regulator-max-microvolt = <1360000>;
+ regulator-name = "vdd-dram";
+ };
+
+ reg_dcdc4: dcdc4 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-name = "vdd-dcdc4";
+ };
+
+ reg_aldo1: aldo1 {
+ /* not connected */
+ };
+
+ reg_aldo2: aldo2 {
+ /* not connected */
+ };
+
+ reg_aldo3: aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-aldo3";
+ };
+
+ reg_aldo4: aldo4 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pll-dxco-avcc";
+ };
+
+ reg_bldo1: bldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pg-wifi-lvds";
+ };
+
+ reg_bldo2: bldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-dram-1v8";
+ };
+
+ reg_bldo3: bldo3 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-name = "vcc-bldo3";
+ };
+
+ reg_bldo4: bldo4 {
+ /* not connected */
+ };
+
+ reg_cldo1: cldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-codec-sd";
+ };
+
+ reg_cldo2: cldo2 {
+ };
+
+ reg_cldo3: cldo3 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-codec-eth-sd";
+ };
+
+ reg_cldo4: cldo4 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-eth-phy";
+ };
+
+ reg_cpusldo: cpusldo {
+ /* supplies the management core */
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdd-cpus";
+ };
+ };
+ };
+
+ axp323: pmic@36 {
+ compatible = "x-powers,axp323";
+ reg = <0x36>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ status = "okay";
+
+ vin1-supply = <&reg_vcc5v>;
+ vin2-supply = <&reg_vcc5v>;
+ vin3-supply = <&reg_vcc5v>;
+
+ regulators {
+ aldo1 {
+ /* not connected */
+ };
+
+ dldo1 {
+ /* not connected */
+ };
+
+ /* Supplies the "big" cluster (1.8 GHz cores) */
+ reg_dcdc1_323: dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1160000>;
+ regulator-name = "vdd-cpub";
+ };
+
+ /* DCDC2 is polyphased with DCDC1 */
+
+ reg_dcdc3_323: dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-name = "vdd-dcdc3";
+ };
+ };
+ };
+};
+
+&r_pio {
+/*
+ * Specifying the supply would create a circular dependency.
+ *
+ * vcc-pl-supply = <&reg_aldo3>;
+ */
+ vcc-pm-supply = <&reg_aldo3>;
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pb_pins>;
+ status = "okay";
+};
+
+&usb_otg {
+ /* USB0 is a USB-A receptacle, always powered, so force host mode. */
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usbphy {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun55i-t527-avaota-a1.dts b/arch/arm64/boot/dts/allwinner/sun55i-t527-avaota-a1.dts
new file mode 100644
index 000000000000..08127f0cdd35
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun55i-t527-avaota-a1.dts
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+// Copyright (C) 2024 Arm Ltd.
+
+/dts-v1/;
+
+#include "sun55i-a523.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Avaota A1";
+ compatible = "yuzukihd,avaota-a1", "allwinner,sun55i-t527";
+
+ aliases {
+ ethernet0 = &emac0;
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ ext_osc32k: ext-osc32k-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "ext_osc32k";
+ };
+
+ reg_vcc12v: vcc12v {
+ /* DC input jack */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-12v";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ };
+
+ reg_vcc5v: vcc5v {
+ /* board wide 5V supply from the 12V->5V regulator */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&reg_vcc12v>;
+ regulator-always-on;
+ };
+
+ reg_usb_vbus: vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&reg_vcc5v>;
+ gpio = <&pio 8 12 GPIO_ACTIVE_HIGH>; /* PI12 */
+ enable-active-high;
+ };
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&emac0 {
+ phy-mode = "rgmii-id";
+ phy-handle = <&ext_rgmii_phy>;
+ phy-supply = <&reg_dcdc4>;
+
+ allwinner,tx-delay-ps = <100>;
+ allwinner,rx-delay-ps = <300>;
+
+ status = "okay";
+};
+
+&mdio0 {
+ ext_rgmii_phy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+};
+
+&mmc0 {
+ vmmc-supply = <&reg_cldo3>;
+ cd-gpios = <&pio 5 6 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PF6 */
+ bus-width = <4>;
+ status = "okay";
+};
+
+&mmc2 {
+ bus-width = <8>;
+ cap-mmc-hw-reset;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ non-removable;
+ vmmc-supply = <&reg_cldo3>;
+ vqmmc-supply = <&reg_cldo1>;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&pio {
+ vcc-pb-supply = <&reg_cldo3>; /* via VCC-IO */
+ vcc-pc-supply = <&reg_cldo1>;
+ vcc-pd-supply = <&reg_dcdc4>;
+ vcc-pe-supply = <&reg_dcdc4>;
+ vcc-pf-supply = <&reg_cldo3>; /* actually switchable */
+ vcc-pg-supply = <&reg_bldo1>;
+ vcc-ph-supply = <&reg_cldo3>; /* via VCC-IO */
+ vcc-pi-supply = <&reg_dcdc4>;
+ vcc-pj-supply = <&reg_dcdc4>;
+ vcc-pk-supply = <&reg_bldo3>;
+};
+
+&r_i2c0 {
+ status = "okay";
+
+ axp717: pmic@35 {
+ compatible = "x-powers,axp717";
+ reg = <0x35>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+
+ vin1-supply = <&reg_vcc5v>;
+ vin2-supply = <&reg_vcc5v>;
+ vin3-supply = <&reg_vcc5v>;
+ vin4-supply = <&reg_vcc5v>;
+ aldoin-supply = <&reg_vcc5v>;
+ bldoin-supply = <&reg_vcc5v>;
+ cldoin-supply = <&reg_vcc5v>;
+
+ regulators {
+ /* Supplies the "little" cluster (1.4 GHz cores) */
+ reg_dcdc1: dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1160000>;
+ regulator-name = "vdd-cpul";
+ };
+
+ reg_dcdc2: dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <920000>;
+ regulator-max-microvolt = <920000>;
+ regulator-name = "vdd-gpu-sys";
+ };
+
+ reg_dcdc3: dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1160000>;
+ regulator-max-microvolt = <1160000>;
+ regulator-name = "vdd-dram";
+ };
+
+ reg_dcdc4: dcdc4 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vdd-io";
+ };
+
+ reg_aldo1: aldo1 {
+ /* not connected */
+ };
+
+ reg_aldo2: aldo2 {
+ /* not connected */
+ };
+
+ reg_aldo3: aldo3 {
+ /* supplies the I2C pins for this PMIC */
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-pl-pm";
+ };
+
+ reg_aldo4: aldo4 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pll-dxco-avcc";
+ };
+
+ reg_bldo1: bldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pg-wifi-lvds";
+ };
+
+ reg_bldo2: bldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-dram-1v8";
+ };
+
+ reg_bldo3: bldo3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-cvp-pk-vid1v8";
+ };
+
+ reg_bldo4: bldo4 {
+ /* not connected */
+ };
+
+ reg_cldo1: cldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pc";
+ };
+
+ reg_cldo2: cldo2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-efuse";
+ };
+
+ reg_cldo3: cldo3 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-io-mmc-spi-ana";
+ };
+
+ reg_cldo4: cldo4 {
+ /* not connected */
+ };
+
+ reg_cpusldo: cpusldo {
+ /* supplies the management core */
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdd-cpus";
+ };
+ };
+ };
+
+ axp323: pmic@36 {
+ compatible = "x-powers,axp323";
+ reg = <0x36>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ status = "okay";
+
+ vin1-supply = <&reg_vcc5v>;
+ vin2-supply = <&reg_vcc5v>;
+ vin3-supply = <&reg_vcc5v>;
+
+ regulators {
+ aldo1 {
+ /* not connected */
+ };
+
+ dldo1 {
+ /* not connected */
+ };
+
+ /* Supplies the "big" cluster (1.8 GHz cores) */
+ reg_dcdc1_323: dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1160000>;
+ regulator-name = "vdd-cpub";
+ };
+
+ /* DCDC2 is polyphased with DCDC1 */
+
+ /* Some RISC-V management core related voltage */
+ reg_dcdc3_323: dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdd-dnr";
+ };
+ };
+ };
+};
+
+&r_pio {
+/*
+ * Specifying the supply would create a circular dependency.
+ *
+ * vcc-pl-supply = <&reg_aldo3>;
+ */
+ vcc-pm-supply = <&reg_aldo3>;
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pb_pins>;
+ status = "okay";
+};
+
+&usb_otg {
+ /*
+ * The CC pins of the USB-C port have two pull-down resistors
+ * connected to GND, which fixes this port to a peripheral role.
+ * There is a regulator, controlled by a GPIO, to provide VBUS power
+ * to the port, and a VBUSDET GPIO, to detect externally provided
+ * power, but without the CC pins there is no real way to do a
+ * runtime role detection.
+ */
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usbphy {
+ usb0_vbus-supply = <&reg_usb_vbus>;
+ usb0_vbus_det-gpios = <&pio 8 13 GPIO_ACTIVE_HIGH>; /* PI13 */
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
index da9de4986660..5a72f0b64247 100644
--- a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
+++ b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
@@ -151,7 +151,7 @@
al,msi-num-spis = <160>;
};
- io-fabric@fc000000 {
+ io-bus@fc000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
index 8b6156b5af65..dea60d136c2e 100644
--- a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
+++ b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
@@ -361,7 +361,7 @@
interrupt-parent = <&gic>;
};
- io-fabric@fc000000 {
+ io-bus@fc000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index 2fbda8419c65..15e7901c1268 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -3,6 +3,9 @@ dtb-$(CONFIG_ARCH_MESON) += amlogic-a4-a113l2-ba400.dtb
dtb-$(CONFIG_ARCH_MESON) += amlogic-a5-a113x2-av400.dtb
dtb-$(CONFIG_ARCH_MESON) += amlogic-c3-c302x-aw409.dtb
dtb-$(CONFIG_ARCH_MESON) += amlogic-c3-c308l-aw419.dtb
+dtb-$(CONFIG_ARCH_MESON) += amlogic-s6-s905x5-bl209.dtb
+dtb-$(CONFIG_ARCH_MESON) += amlogic-s7-s805x3-bp201.dtb
+dtb-$(CONFIG_ARCH_MESON) += amlogic-s7d-s905x5m-bm202.dtb
dtb-$(CONFIG_ARCH_MESON) += amlogic-t7-a311d2-an400.dtb
dtb-$(CONFIG_ARCH_MESON) += amlogic-t7-a311d2-khadas-vim4.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-a1-ad401.dtb
@@ -49,6 +52,7 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-wetek-hub.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-wetek-play2.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s805x-libretech-ac.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s805x-p241.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s805y-xiaomi-aquaman.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-libretech-pc.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-mecool-kii-pro.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p230.dtb
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a4-reset.h b/arch/arm64/boot/dts/amlogic/amlogic-a4-reset.h
new file mode 100644
index 000000000000..f6a4c90bab3c
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a4-reset.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2024 Amlogic, Inc. All rights reserved.
+ */
+
+#ifndef __DTS_AMLOGIC_A4_RESET_H
+#define __DTS_AMLOGIC_A4_RESET_H
+
+/* RESET0 */
+/* 0-3 */
+#define RESET_USB 4
+/* 5-6*/
+#define RESET_U2PHY22 7
+#define RESET_USBPHY20 8
+#define RESET_U2PHY21 9
+#define RESET_USB2DRD 10
+#define RESET_U2H 11
+#define RESET_LED_CTRL 12
+/* 13-31 */
+
+/* RESET1 */
+#define RESET_AUDIO 32
+#define RESET_AUDIO_VAD 33
+/* 34*/
+#define RESET_DDR_APB 35
+#define RESET_DDR 36
+#define RESET_VOUT_VENC 37
+#define RESET_VOUT 38
+/* 39-47 */
+#define RESET_ETHERNET 48
+/* 49-63 */
+
+/* RESET2 */
+#define RESET_DEVICE_MMC_ARB 64
+#define RESET_IRCTRL 65
+/* 66*/
+#define RESET_TS_PLL 67
+/* 68-72*/
+#define RESET_SPICC_0 73
+#define RESET_SPICC_1 74
+/* 75-79*/
+#define RESET_MSR_CLK 80
+/* 81*/
+#define RESET_SAR_ADC 82
+/* 83-87*/
+#define RESET_ACODEC 88
+/* 89-90*/
+#define RESET_WATCHDOG 91
+/* 92-95*/
+
+/* RESET3 */
+/* 96-127 */
+
+/* RESET4 */
+/* 128-131 */
+#define RESET_PWM_AB 132
+#define RESET_PWM_CD 133
+#define RESET_PWM_EF 134
+#define RESET_PWM_GH 135
+/* 136-137*/
+#define RESET_UART_A 138
+#define RESET_UART_B 139
+/* 140*/
+#define RESET_UART_D 141
+#define RESET_UART_E 142
+/* 143-144*/
+#define RESET_I2C_M_A 145
+#define RESET_I2C_M_B 146
+#define RESET_I2C_M_C 147
+#define RESET_I2C_M_D 148
+/* 149-151*/
+#define RESET_SDEMMC_A 152
+/* 153*/
+#define RESET_SDEMMC_C 154
+/* 155-159*/
+
+/* RESET5 */
+/* 160-175*/
+#define RESET_BRG_AO_NIC_SYS 176
+/* 177*/
+#define RESET_BRG_AO_NIC_MAIN 178
+#define RESET_BRG_AO_NIC_AUDIO 179
+/* 180-183*/
+#define RESET_BRG_AO_NIC_ALL 184
+/* 185*/
+#define RESET_BRG_NIC_SDIO 186
+#define RESET_BRG_NIC_EMMC 187
+#define RESET_BRG_NIC_DSU 188
+#define RESET_BRG_NIC_CLK81 189
+#define RESET_BRG_NIC_MAIN 190
+#define RESET_BRG_NIC_ALL 191
+
+#endif
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi
index a06838552f21..563bc2e662fa 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi
@@ -4,7 +4,9 @@
*/
#include "amlogic-a4-common.dtsi"
+#include "amlogic-a4-reset.h"
#include <dt-bindings/power/amlogic,a4-pwrc.h>
+#include <dt-bindings/pinctrl/amlogic,pinctrl.h>
/ {
cpus {
#address-cells = <2>;
@@ -50,6 +52,114 @@
};
&apb {
+ reset: reset-controller@2000 {
+ compatible = "amlogic,a4-reset",
+ "amlogic,meson-s4-reset";
+ reg = <0x0 0x2000 0x0 0x98>;
+ #reset-cells = <1>;
+ };
+
+ periphs_pinctrl: pinctrl@4000 {
+ compatible = "amlogic,pinctrl-a4";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0x4000 0x0 0x280>;
+
+ gpiox: gpio@100 {
+ reg = <0 0x100 0 0x40>, <0 0xc 0 0xc>;
+ reg-names = "gpio", "mux";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&periphs_pinctrl 0 (AMLOGIC_GPIO_X<<8) 18>;
+ };
+
+ gpiot: gpio@140 {
+ reg = <0 0x140 0 0x40>, <0 0x2c 0 0xc>;
+ reg-names = "gpio", "mux";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&periphs_pinctrl 0 (AMLOGIC_GPIO_T<<8) 23>;
+ };
+
+ gpiod: gpio@180 {
+ reg = <0 0x180 0 0x40>, <0 0x40 0 0x8>;
+ reg-names = "gpio", "mux";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&periphs_pinctrl 0 (AMLOGIC_GPIO_D<<8) 16>;
+ };
+
+ gpioe: gpio@1c0 {
+ reg = <0 0x1c0 0 0x40>, <0 0x48 0 0x4>;
+ reg-names = "gpio", "mux";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&periphs_pinctrl 0 (AMLOGIC_GPIO_E<<8) 2>;
+ };
+
+ gpiob: gpio@240 {
+ reg = <0 0x240 0 0x40>, <0 0 0 0x8>;
+ reg-names = "gpio", "mux";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&periphs_pinctrl 0 (AMLOGIC_GPIO_B<<8) 14>;
+ };
+
+ func-uart-a {
+ uart_a_default: group-uart-a-pins1 {
+ pinmux = <AML_PINMUX(AMLOGIC_GPIO_X, 11, 1)>,
+ <AML_PINMUX(AMLOGIC_GPIO_X, 12, 1)>,
+ <AML_PINMUX(AMLOGIC_GPIO_X, 13, 1)>,
+ <AML_PINMUX(AMLOGIC_GPIO_X, 14, 1)>;
+ };
+
+ group-uart-a-pins2 {
+ pinmux = <AML_PINMUX(AMLOGIC_GPIO_D, 2, 3)>,
+ <AML_PINMUX(AMLOGIC_GPIO_D, 3, 3)>;
+ bias-pull-up;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ func-uart-b {
+ uart_b_default: group-uart-b-pins {
+ pinmux = <AML_PINMUX(AMLOGIC_GPIO_E, 0, 3)>,
+ <AML_PINMUX(AMLOGIC_GPIO_E, 1, 3)>;
+ bias-pull-up;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ func-uart-d {
+ uart_d_default: group-uart-d-pins1 {
+ pinmux = <AML_PINMUX(AMLOGIC_GPIO_T, 18, 4)>,
+ <AML_PINMUX(AMLOGIC_GPIO_T, 19, 4)>;
+ bias-pull-up;
+ drive-strength-microamp = <4000>;
+ };
+
+ group-uart-d-pins2 {
+ pinmux = <AML_PINMUX(AMLOGIC_GPIO_T, 7, 2)>,
+ <AML_PINMUX(AMLOGIC_GPIO_T, 8, 2)>,
+ <AML_PINMUX(AMLOGIC_GPIO_T, 9, 2)>,
+ <AML_PINMUX(AMLOGIC_GPIO_T, 10, 2)>;
+ bias-pull-up;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ func-uart-e {
+ uart_e_default: group-uart-e-pins {
+ pinmux = <AML_PINMUX(AMLOGIC_GPIO_T, 14, 3)>,
+ <AML_PINMUX(AMLOGIC_GPIO_T, 15, 3)>,
+ <AML_PINMUX(AMLOGIC_GPIO_T, 16, 3)>,
+ <AML_PINMUX(AMLOGIC_GPIO_T, 17, 3)>;
+ bias-pull-up;
+ drive-strength-microamp = <4000>;
+ };
+ };
+ };
+
gpio_intc: interrupt-controller@4080 {
compatible = "amlogic,a4-gpio-intc",
"amlogic,meson-gpio-intc";
@@ -60,6 +170,29 @@
<10 11 12 13 14 15 16 17 18 19 20 21>;
};
+ ao_pinctrl: pinctrl@8e700 {
+ compatible = "amlogic,pinctrl-a4";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0x8e700 0x0 0x80>;
+
+ gpioao: gpio@4 {
+ reg = <0 0x4 0 0x16>, <0 0 0 0x4>;
+ reg-names = "gpio", "mux";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&periphs_pinctrl 0 (AMLOGIC_GPIO_AO<<8) 7>;
+ };
+
+ test_n: gpio@44 {
+ reg = <0 0x44 0 0x20>;
+ reg-names = "gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&periphs_pinctrl 0 (AMLOGIC_GPIO_TEST_N<<8) 1>;
+ };
+ };
+
gpio_ao_intc: interrupt-controller@8e72c {
compatible = "amlogic,a4-gpio-ao-intc",
"amlogic,meson-gpio-intc";
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a5-reset.h b/arch/arm64/boot/dts/amlogic/amlogic-a5-reset.h
new file mode 100644
index 000000000000..cdf0f5159620
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a5-reset.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2024 Amlogic, Inc. All rights reserved.
+ */
+
+#ifndef __DTS_AMLOGIC_A5_RESET_H
+#define __DTS_AMLOGIC_A5_RESET_H
+
+/* RESET0 */
+/* 0-3 */
+#define RESET_USB 4
+/* 5-7 */
+#define RESET_USBPHY20 8
+/* 9 */
+#define RESET_USB2DRD 10
+/* 11-31 */
+
+/* RESET1 */
+#define RESET_AUDIO 32
+#define RESET_AUDIO_VAD 33
+/* 34 */
+#define RESET_DDR_APB 35
+#define RESET_DDR 36
+/* 37-40 */
+#define RESET_DSPA_DEBUG 41
+/* 42 */
+#define RESET_DSPA 43
+/* 44-46 */
+#define RESET_NNA 47
+#define RESET_ETHERNET 48
+/* 49-63 */
+
+/* RESET2 */
+#define RESET_ABUS_ARB 64
+#define RESET_IRCTRL 65
+/* 66 */
+#define RESET_TS_PLL 67
+/* 68-72 */
+#define RESET_SPICC_0 73
+#define RESET_SPICC_1 74
+#define RESET_RSA 75
+
+/* 76-79 */
+#define RESET_MSR_CLK 80
+#define RESET_SPIFC 81
+#define RESET_SAR_ADC 82
+/* 83-90 */
+#define RESET_WATCHDOG 91
+/* 92-95 */
+
+/* RESET3 */
+/* 96-127 */
+
+/* RESET4 */
+#define RESET_RTC 128
+/* 129-131 */
+#define RESET_PWM_AB 132
+#define RESET_PWM_CD 133
+#define RESET_PWM_EF 134
+#define RESET_PWM_GH 135
+/* 104-105 */
+#define RESET_UART_A 138
+#define RESET_UART_B 139
+#define RESET_UART_C 140
+#define RESET_UART_D 141
+#define RESET_UART_E 142
+/* 143*/
+#define RESET_I2C_S_A 144
+#define RESET_I2C_M_A 145
+#define RESET_I2C_M_B 146
+#define RESET_I2C_M_C 147
+#define RESET_I2C_M_D 148
+/* 149-151 */
+#define RESET_SDEMMC_A 152
+/* 153 */
+#define RESET_SDEMMC_C 154
+/* 155-159*/
+
+/* RESET5 */
+/* 160-175 */
+#define RESET_BRG_AO_NIC_SYS 176
+#define RESET_BRG_AO_NIC_DSPA 177
+#define RESET_BRG_AO_NIC_MAIN 178
+#define RESET_BRG_AO_NIC_AUDIO 179
+/* 180-183 */
+#define RESET_BRG_AO_NIC_ALL 184
+#define RESET_BRG_NIC_NNA 185
+#define RESET_BRG_NIC_SDIO 186
+#define RESET_BRG_NIC_EMMC 187
+#define RESET_BRG_NIC_DSU 188
+#define RESET_BRG_NIC_SYSCLK 189
+#define RESET_BRG_NIC_MAIN 190
+#define RESET_BRG_NIC_ALL 191
+
+#endif
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi
index 32ed1776891b..b1da8cbaa25a 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi
@@ -4,6 +4,7 @@
*/
#include "amlogic-a4-common.dtsi"
+#include "amlogic-a5-reset.h"
#include <dt-bindings/power/amlogic,a5-pwrc.h>
/ {
cpus {
@@ -50,6 +51,13 @@
};
&apb {
+ reset: reset-controller@2000 {
+ compatible = "amlogic,a5-reset",
+ "amlogic,meson-s4-reset";
+ reg = <0x0 0x2000 0x0 0x98>;
+ #reset-cells = <1>;
+ };
+
gpio_intc: interrupt-controller@4080 {
compatible = "amlogic,a5-gpio-intc",
"amlogic,meson-gpio-intc";
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi
index fd0e557eba06..cb9ea3ca6ee0 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi
@@ -760,6 +760,11 @@
};
};
+ clk_msr: clock-measure@48000 {
+ compatible = "amlogic,c3-clk-measure";
+ reg = <0x0 0x48000 0x0 0x1c>;
+ };
+
spicc0: spi@50000 {
compatible = "amlogic,meson-g12a-spicc";
reg = <0x0 0x50000 0x0 0x44>;
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-s6-s905x5-bl209.dts b/arch/arm64/boot/dts/amlogic/amlogic-s6-s905x5-bl209.dts
new file mode 100644
index 000000000000..c45b22651798
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-s6-s905x5-bl209.dts
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Amlogic, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "amlogic-s6.dtsi"
+/ {
+ model = "Amlogic S905X5 BL209 Development Board";
+ compatible = "amlogic,bl209", "amlogic,s6";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ serial0 = &uart_b;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x000000 0x0 0xe0000000>,
+ <0x1 0x000000 0x0 0x20000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* 27 MiB reserved for ARM Trusted Firmware */
+ secmon_reserved: secmon@5000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x05000000 0x0 0x1b00000>;
+ no-map;
+ };
+ };
+};
+
+&uart_b {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-s6.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-s6.dtsi
new file mode 100644
index 000000000000..a8c90245c42a
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-s6.dtsi
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Amlogic, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/gpio/gpio.h>
+/ {
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a510";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ };
+
+ cpu1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a510";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ };
+
+ cpu2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a510";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ };
+
+ cpu3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a510";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ xtal: xtal-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "xtal";
+ #clock-cells = <0>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gic: interrupt-controller@ff200000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0xff200000 0 0x10000>,
+ <0x0 0xff240000 0 0x80000>;
+ interrupts = <GIC_PPI 9 0xf04>;
+ };
+
+ apb: bus@fe000000 {
+ compatible = "simple-bus";
+ reg = <0x0 0xfe000000 0x0 0x480000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0xfe000000 0x0 0x480000>;
+
+ uart_b: serial@7a000 {
+ compatible = "amlogic,s6-uart",
+ "amlogic,meson-s4-uart";
+ reg = <0x0 0x7a000 0x0 0x18>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&xtal>, <&xtal>, <&xtal>;
+ clock-names = "xtal", "pclk", "baud";
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-s7-s805x3-bp201.dts b/arch/arm64/boot/dts/amlogic/amlogic-s7-s805x3-bp201.dts
new file mode 100644
index 000000000000..7fd4ac9321a6
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-s7-s805x3-bp201.dts
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Amlogic, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "amlogic-s7.dtsi"
+/ {
+ model = "Amlogic S805X3 BP201 Development Board";
+ compatible = "amlogic,bp201", "amlogic,s7";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ serial0 = &uart_b;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x40000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* 35 MiB reserved for ARM Trusted Firmware */
+ secmon_reserved: secmon@5000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x05000000 0x0 0x2300000>;
+ no-map;
+ };
+ };
+};
+
+&uart_b {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-s7.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-s7.dtsi
new file mode 100644
index 000000000000..f0c172681bd1
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-s7.dtsi
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Amlogic, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ };
+
+ cpu1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ };
+
+ cpu2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ };
+
+ cpu3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ };
+
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ xtal: xtal-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "xtal";
+ #clock-cells = <0>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gic: interrupt-controller@fff01000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0xfff01000 0 0x1000>,
+ <0x0 0xfff02000 0 0x0100>;
+ interrupts = <GIC_PPI 9 0xf04>;
+ };
+
+ apb: bus@fe000000 {
+ compatible = "simple-bus";
+ reg = <0x0 0xfe000000 0x0 0x480000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0xfe000000 0x0 0x480000>;
+
+ uart_b: serial@7a000 {
+ compatible = "amlogic,s7-uart",
+ "amlogic,meson-s4-uart";
+ reg = <0x0 0x7a000 0x0 0x18>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&xtal>, <&xtal>, <&xtal>;
+ clock-names = "xtal", "pclk", "baud";
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-s7d-s905x5m-bm202.dts b/arch/arm64/boot/dts/amlogic/amlogic-s7d-s905x5m-bm202.dts
new file mode 100644
index 000000000000..2933fcdbc8ef
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-s7d-s905x5m-bm202.dts
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Amlogic, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "amlogic-s7d.dtsi"
+/ {
+ model = "Amlogic S905X5M BM202 Development Board";
+ compatible = "amlogic,bm202", "amlogic,s7d";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ serial0 = &uart_b;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* 36 MiB reserved for ARM Trusted Firmware */
+ secmon_reserved: secmon@5000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x05000000 0x0 0x2400000>;
+ no-map;
+ };
+ };
+};
+
+&uart_b {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-s7d.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-s7d.dtsi
new file mode 100644
index 000000000000..e1099bc1535d
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-s7d.dtsi
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Amlogic, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ };
+
+ cpu1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ };
+
+ cpu2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ };
+
+ cpu3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ };
+
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ xtal: xtal-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "xtal";
+ #clock-cells = <0>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gic: interrupt-controller@fff01000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0xfff01000 0 0x1000>,
+ <0x0 0xfff02000 0 0x0100>;
+ interrupts = <GIC_PPI 9 0xf04>;
+ };
+
+ apb: bus@fe000000 {
+ compatible = "simple-bus";
+ reg = <0x0 0xfe000000 0x0 0x480000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0xfe000000 0x0 0x480000>;
+
+ uart_b: serial@7a000 {
+ compatible = "amlogic,s7d-uart",
+ "amlogic,meson-s4-uart";
+ reg = <0x0 0x7a000 0x0 0x18>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&xtal>, <&xtal>, <&xtal>;
+ clock-names = "xtal", "pclk", "baud";
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
index 1eba0afb3fd9..f7f25a10f409 100644
--- a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
@@ -233,6 +233,7 @@
groups = "uart_a_tx",
"uart_a_rx";
function = "uart_a";
+ bias-pull-up;
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index a6924d246bb1..2df143aa77ce 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -1164,7 +1164,7 @@
groups = "uart_tx_a",
"uart_rx_a";
function = "uart_a";
- bias-disable;
+ bias-pull-up;
};
};
@@ -1182,7 +1182,7 @@
groups = "uart_tx_b_x",
"uart_rx_b_x";
function = "uart_b";
- bias-disable;
+ bias-pull-up;
};
};
@@ -1200,7 +1200,7 @@
groups = "uart_tx_b_z",
"uart_rx_b_z";
function = "uart_b";
- bias-disable;
+ bias-pull-up;
};
};
@@ -1218,7 +1218,7 @@
groups = "uart_ao_tx_b_z",
"uart_ao_rx_b_z";
function = "uart_ao_b_z";
- bias-disable;
+ bias-pull-up;
};
};
@@ -1654,7 +1654,7 @@
groups = "uart_ao_tx_a",
"uart_ao_rx_a";
function = "uart_ao_a";
- bias-disable;
+ bias-pull-up;
};
};
@@ -1672,7 +1672,7 @@
groups = "uart_ao_tx_b",
"uart_ao_rx_b";
function = "uart_ao_b";
- bias-disable;
+ bias-pull-up;
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index ab2b3f15ef19..dcc927a9da80 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -1503,7 +1503,7 @@
groups = "uart_a_tx",
"uart_a_rx";
function = "uart_a";
- bias-disable;
+ bias-pull-up;
};
};
@@ -1521,7 +1521,7 @@
groups = "uart_b_tx",
"uart_b_rx";
function = "uart_b";
- bias-disable;
+ bias-pull-up;
};
};
@@ -1918,7 +1918,7 @@
groups = "uart_ao_a_tx",
"uart_ao_a_rx";
function = "uart_ao_a";
- bias-disable;
+ bias-pull-up;
};
};
@@ -1936,7 +1936,7 @@
groups = "uart_ao_b_tx_2",
"uart_ao_b_rx_3";
function = "uart_ao_b";
- bias-disable;
+ bias-pull-up;
};
};
@@ -1945,7 +1945,7 @@
groups = "uart_ao_b_tx_8",
"uart_ao_b_rx_9";
function = "uart_ao_b";
- bias-disable;
+ bias-pull-up;
};
};
@@ -2313,7 +2313,7 @@
"amlogic,meson8-pwm-v2";
reg = <0x0 0x19000 0x0 0x20>;
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <0>, /* unknown/untested, the datasheet calls it "vid_pll" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
#pwm-cells = <3>;
@@ -2325,7 +2325,7 @@
"amlogic,meson8-pwm-v2";
reg = <0x0 0x1a000 0x0 0x20>;
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <0>, /* unknown/untested, the datasheet calls it "vid_pll" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
#pwm-cells = <3>;
@@ -2337,7 +2337,7 @@
"amlogic,meson8-pwm-v2";
reg = <0x0 0x1b000 0x0 0x20>;
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <0>, /* unknown/untested, the datasheet calls it "vid_pll" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
#pwm-cells = <3>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts
index 9aa36f17ffa2..d0a3b4b9229c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts
@@ -267,28 +267,24 @@
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu2 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu3 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&ethmac {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
index 952b8d02e5c2..4353485c6f26 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
@@ -220,28 +220,24 @@
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu2 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu3 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cvbs_vdac_port {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
index 52fbc5103e45..f39fcabc763f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
@@ -314,28 +314,24 @@
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu2 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu3 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cvbs_vdac_port {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
index 5407049d2647..b5bf8ecc91e6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
@@ -407,28 +407,24 @@
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu2 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu3 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&clkc_audio {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
index 01da83658ae3..5ab460a3e637 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
@@ -263,28 +263,24 @@
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu2 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu3 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cvbs_vdac_port {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
index 543e70669df5..deee61dbe074 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
@@ -62,6 +62,7 @@
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <731000>;
+ clock-latency-ns = <50000>;
};
opp-1200000000 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-libretech-cc.dts
index adedc1340c78..415248931ab1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-libretech-cc.dts
@@ -76,42 +76,36 @@
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu100 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu101 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu102 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu103 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&pwm_ab {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi
index 8e9ad1e51d66..8ecb5bd125c1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi
@@ -14,6 +14,7 @@
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <761000>;
+ clock-latency-ns = <50000>;
};
opp-1200000000 {
@@ -54,6 +55,7 @@
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <731000>;
+ clock-latency-ns = <50000>;
};
opp-1200000000 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4.dtsi
index 92e8b26ecccc..39011b645128 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4.dtsi
@@ -155,42 +155,36 @@
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu100 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu101 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu102 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu103 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&ext_mdio {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi
index 54663c55a20e..1b08303c4282 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi
@@ -263,42 +263,36 @@
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu100 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu101 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu102 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu103 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&ethmac {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi
index de35fa2d7a6d..8e3e3354ed67 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi
@@ -116,6 +116,10 @@
status = "okay";
};
+&clkc_audio {
+ status = "okay";
+};
+
&frddr_a {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
index 48650bad230d..fc737499f207 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
@@ -51,42 +51,36 @@
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu100 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu101 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu102 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu103 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&pwm_ab {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts
index e21831dfceee..d5938a4a6da3 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts
@@ -281,42 +281,36 @@
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu100 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu101 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu102 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu103 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
/* RK817 only supports 12.5mV steps, round up the values */
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid.dtsi
index 7e8964bacfce..3298d59833b6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid.dtsi
@@ -227,42 +227,36 @@
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu100 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu101 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu102 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu103 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu_thermal {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts
index fc05ecf90714..1e5c6f984945 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts
@@ -259,42 +259,36 @@
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu100 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu101 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu102 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu103 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu_thermal {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
index 44c23c984034..19cad93a6889 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
@@ -14,6 +14,7 @@
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <731000>;
+ clock-latency-ns = <50000>;
};
opp-1200000000 {
@@ -59,6 +60,7 @@
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <771000>;
+ clock-latency-ns = <50000>;
};
opp-1200000000 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi
index a7a0fc264cdc..9b6d780eada7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi
@@ -213,42 +213,36 @@
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu100 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu101 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu102 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cpu103 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
};
&cvbs_vdac_port {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 8ebce7114a60..f69923da07fe 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -105,7 +105,7 @@
mux {
groups = "uart_tx_ao_a", "uart_rx_ao_a";
function = "uart_ao";
- bias-disable;
+ bias-pull-up;
};
};
@@ -122,7 +122,7 @@
mux {
groups = "uart_tx_ao_b", "uart_rx_ao_b";
function = "uart_ao_b";
- bias-disable;
+ bias-pull-up;
};
};
@@ -520,7 +520,7 @@
groups = "uart_tx_a",
"uart_rx_a";
function = "uart_a";
- bias-disable;
+ bias-pull-up;
};
};
@@ -538,7 +538,7 @@
groups = "uart_tx_b",
"uart_rx_b";
function = "uart_b";
- bias-disable;
+ bias-pull-up;
};
};
@@ -556,7 +556,7 @@
groups = "uart_tx_c",
"uart_rx_c";
function = "uart_c";
- bias-disable;
+ bias-pull-up;
};
};
@@ -741,7 +741,7 @@
&pwm_ab {
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <0>, /* unknown/untested, the datasheet calls it "vid_pll" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
};
@@ -752,14 +752,14 @@
&pwm_cd {
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <0>, /* unknown/untested, the datasheet calls it "vid_pll" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
};
&pwm_ef {
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <0>, /* unknown/untested, the datasheet calls it "vid_pll" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805y-xiaomi-aquaman.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805y-xiaomi-aquaman.dts
new file mode 100644
index 000000000000..cac15b89c573
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805y-xiaomi-aquaman.dts
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Ferass El Hafidi <funderscore@postmarketos.org>
+ * Heavily based on meson-gxl-s805x-p241.dtb:
+ * - Copyright (c) 2018 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/sound/meson-aiu.h>
+
+#include "meson-gxl-s805y.dtsi"
+
+/ {
+ compatible = "xiaomi,aquaman", "amlogic,s805y", "amlogic,meson-gxl";
+ model = "Xiaomi Mi TV Stick (aquaman)";
+
+ aliases {
+ serial0 = &uart_AO;
+ serial1 = &uart_A;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+ };
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-white {
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio GPIODV_24 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ panic-indicator;
+ };
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x40000000>;
+ };
+
+ vddio_boot: regulator-vddio-boot {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDIO_BOOT";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vddao_3v3: regulator-vddao-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vddio_ao18: regulator-vddio-ao18 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDIO_AO18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vcc_3v3: regulator-vcc-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vcc_5v: regulator-vcc-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+ };
+
+ wifi32k: wifi32k {
+ compatible = "pwm-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
+ clocks = <&wifi32k>;
+ clock-names = "ext_clock";
+ };
+
+ sound {
+ compatible = "amlogic,gx-sound-card";
+ model = "XIAOMI-AQUAMAN";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
+ assigned-clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+ assigned-clock-parents = <0>, <0>, <0>;
+ assigned-clock-rates = <294912000>,
+ <270950400>,
+ <393216000>;
+
+ dai-link-0 {
+ sound-dai = <&aiu AIU_CPU CPU_I2S_FIFO>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&aiu AIU_CPU CPU_I2S_ENCODER>;
+ dai-format = "i2s";
+ mclk-fs = <256>;
+
+ codec-0 {
+ sound-dai = <&aiu AIU_HDMI CTRL_I2S>;
+ };
+ };
+
+ dai-link-2 {
+ sound-dai = <&aiu AIU_HDMI CTRL_OUT>;
+
+ codec-0 {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+ };
+};
+
+&aiu {
+ status = "okay";
+};
+
+&cec_AO {
+ status = "okay";
+ pinctrl-0 = <&ao_cec_pins>;
+ pinctrl-names = "default";
+ hdmi-phandle = <&hdmi_tx>;
+};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+ pinctrl-names = "default";
+ hdmi-supply = <&vcc_5v>;
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
+
+&saradc {
+ status = "okay";
+ vref-supply = <&vddio_ao18>;
+};
+
+/* Wireless SDIO Module (Amlogic W155S1 / Realtek RTL8821CS) */
+&sd_emmc_b {
+ status = "okay";
+ pinctrl-0 = <&sdio_pins>;
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bus-width = <4>;
+ cap-sd-highspeed;
+ max-frequency = <50000000>;
+
+ non-removable;
+ disable-wp;
+
+ /* WiFi firmware requires power to be kept while in suspend */
+ keep-power-in-suspend;
+
+ mmc-pwrseq = <&sdio_pwrseq>;
+
+ vmmc-supply = <&vddao_3v3>;
+ vqmmc-supply = <&vddio_boot>;
+
+ sdio: wifi@1 {
+ reg = <1>;
+ };
+};
+
+/* eMMC */
+&sd_emmc_c {
+ status = "okay";
+ pinctrl-0 = <&emmc_pins>, <&emmc_ds_pins>;
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
+
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ max-frequency = <200000000>;
+ non-removable;
+ disable-wp;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+
+ mmc-pwrseq = <&emmc_pwrseq>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vddio_boot>;
+};
+
+&pwm_ef {
+ status = "okay";
+ pinctrl-0 = <&pwm_e_pins>;
+ pinctrl-names = "default";
+};
+
+/*
+ * This is connected to the Bluetooth module
+ * Note: There's no driver for the Bluetooth module of some variants yet.
+ */
+&uart_A {
+ status = "okay";
+ pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+ pinctrl-names = "default";
+ uart-has-rtscts;
+};
+
+&uart_AO {
+ status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
+};
+
+&usb {
+ status = "okay";
+ dr_mode = "otg";
+ vbus-supply = <&vcc_5v>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805y.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s805y.dtsi
new file mode 100644
index 000000000000..49b29b71f732
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805y.dtsi
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Ferass El Hafidi <funderscore@postmarketos.org>
+ */
+
+#include "meson-gxl-s805x.dtsi"
+
+/ {
+ compatible = "amlogic,s805y", "amlogic,meson-gxl";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index 2dc2fdaecf9f..ba535010a3c9 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -163,7 +163,7 @@
mux {
groups = "uart_tx_ao_a", "uart_rx_ao_a";
function = "uart_ao";
- bias-disable;
+ bias-pull-up;
};
};
@@ -180,7 +180,7 @@
mux {
groups = "uart_tx_ao_b", "uart_rx_ao_b";
function = "uart_ao_b";
- bias-disable;
+ bias-pull-up;
};
};
@@ -188,7 +188,7 @@
mux {
groups = "uart_tx_ao_b_0", "uart_rx_ao_b_1";
function = "uart_ao_b";
- bias-disable;
+ bias-pull-up;
};
};
@@ -214,7 +214,7 @@
groups = "i2c_sck_ao",
"i2c_sda_ao";
function = "i2c_ao";
- bias-disable;
+ bias-pull-up;
};
};
@@ -522,7 +522,7 @@
groups = "uart_tx_a",
"uart_rx_a";
function = "uart_a";
- bias-disable;
+ bias-pull-up;
};
};
@@ -540,7 +540,7 @@
groups = "uart_tx_b",
"uart_rx_b";
function = "uart_b";
- bias-disable;
+ bias-pull-up;
};
};
@@ -558,7 +558,7 @@
groups = "uart_tx_c",
"uart_rx_c";
function = "uart_c";
- bias-disable;
+ bias-pull-up;
};
};
@@ -576,7 +576,7 @@
groups = "i2c_sck_a",
"i2c_sda_a";
function = "i2c_a";
- bias-disable;
+ bias-pull-up;
};
};
@@ -585,7 +585,7 @@
groups = "i2c_sck_b",
"i2c_sda_b";
function = "i2c_b";
- bias-disable;
+ bias-pull-up;
};
};
@@ -594,7 +594,7 @@
groups = "i2c_sck_c",
"i2c_sda_c";
function = "i2c_c";
- bias-disable;
+ bias-pull-up;
};
};
@@ -603,7 +603,7 @@
groups = "i2c_sck_c_dv19",
"i2c_sda_c_dv18";
function = "i2c_c";
- bias-disable;
+ bias-pull-up;
};
};
@@ -811,7 +811,7 @@
&pwm_ab {
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <0>, /* unknown/untested, the datasheet calls it "vid_pll" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
};
@@ -822,14 +822,14 @@
&pwm_cd {
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <0>, /* unknown/untested, the datasheet calls it "vid_pll" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
};
&pwm_ef {
clocks = <&xtal>,
- <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <0>, /* unknown/untested, the datasheet calls it "vid_pll" */
<&clkc CLKID_FCLK_DIV4>,
<&clkc CLKID_FCLK_DIV3>;
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts b/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts
index 942df754a0ed..1221f4545130 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts
@@ -38,6 +38,10 @@
};
};
+&saradc {
+ compatible = "amlogic,meson-gxlx-saradc", "amlogic,meson-saradc";
+};
+
&usb {
dr_mode = "host";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
index 957577d986c0..9d99ed2994df 100644
--- a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
@@ -629,6 +629,11 @@
};
};
+ clk_msr: clock-measure@48000 {
+ compatible = "amlogic,s4-clk-measure";
+ reg = <0x0 0x48000 0x0 0x1c>;
+ };
+
spicc0: spi@50000 {
compatible = "amlogic,meson-g12a-spicc";
reg = <0x0 0x50000 0x0 0x44>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-ac2xx.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1-ac2xx.dtsi
index a3463149db3d..9be3084b090d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-ac2xx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-ac2xx.dtsi
@@ -147,28 +147,24 @@
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU1_CLK>;
- clock-latency = <50000>;
};
&cpu2 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU2_CLK>;
- clock-latency = <50000>;
};
&cpu3 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU3_CLK>;
- clock-latency = <50000>;
};
&cvbs_vdac_port {
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi.dtsi
index 40db95f64636..538b35036954 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi.dtsi
@@ -185,28 +185,24 @@
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU1_CLK>;
- clock-latency = <50000>;
};
&cpu2 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU2_CLK>;
- clock-latency = <50000>;
};
&cpu3 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU3_CLK>;
- clock-latency = <50000>;
};
&ext_mdio {
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
index 5d75ad3f3e46..a3d9b66b6878 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
@@ -51,28 +51,24 @@
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU1_CLK>;
- clock-latency = <50000>;
};
&cpu2 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU2_CLK>;
- clock-latency = <50000>;
};
&cpu3 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU3_CLK>;
- clock-latency = <50000>;
};
&pwm_AO_cd {
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
index ad8d07883760..c4524eb4f099 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
@@ -250,28 +250,24 @@
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU1_CLK>;
- clock-latency = <50000>;
};
&cpu2 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU2_CLK>;
- clock-latency = <50000>;
};
&cpu3 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU3_CLK>;
- clock-latency = <50000>;
};
&ext_mdio {
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-s905d3-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-s905d3-libretech-cc.dts
index 537370db360f..5daadfb170b4 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-s905d3-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-s905d3-libretech-cc.dts
@@ -64,26 +64,22 @@
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU1_CLK>;
- clock-latency = <50000>;
};
&cpu2 {
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU2_CLK>;
- clock-latency = <50000>;
};
&cpu3 {
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU3_CLK>;
- clock-latency = <50000>;
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
index 37d7f64b6d5d..024d2eb8e6ee 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
@@ -359,28 +359,24 @@
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU1_CLK>;
- clock-latency = <50000>;
};
&cpu2 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU2_CLK>;
- clock-latency = <50000>;
};
&cpu3 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU3_CLK>;
- clock-latency = <50000>;
};
&ethmac {
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
index 97e4b52066dc..966ebb19cc55 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
@@ -100,6 +100,7 @@
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <770000>;
+ clock-latency-ns = <50000>;
};
opp-1200000000 {
diff --git a/arch/arm64/boot/dts/apple/s5l8960x.dtsi b/arch/arm64/boot/dts/apple/s5l8960x.dtsi
index d820b0e43050..5b5175d6978c 100644
--- a/arch/arm64/boot/dts/apple/s5l8960x.dtsi
+++ b/arch/arm64/boot/dts/apple/s5l8960x.dtsi
@@ -37,6 +37,9 @@
performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>;
+ d-cache-size = <0x10000>;
};
cpu1: cpu@1 {
@@ -47,6 +50,16 @@
performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>;
+ d-cache-size = <0x10000>;
+ };
+
+ l2_cache: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x100000>;
};
};
diff --git a/arch/arm64/boot/dts/apple/s800-0-3.dtsi b/arch/arm64/boot/dts/apple/s800-0-3.dtsi
index c0e9ae45627c..09db4ed64054 100644
--- a/arch/arm64/boot/dts/apple/s800-0-3.dtsi
+++ b/arch/arm64/boot/dts/apple/s800-0-3.dtsi
@@ -36,6 +36,9 @@
performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>;
+ d-cache-size = <0x10000>;
};
cpu1: cpu@1 {
@@ -46,6 +49,16 @@
performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>;
+ d-cache-size = <0x10000>;
+ };
+
+ l2_cache: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x300000>;
};
};
diff --git a/arch/arm64/boot/dts/apple/s8001.dtsi b/arch/arm64/boot/dts/apple/s8001.dtsi
index d56d49c048bb..fee350765894 100644
--- a/arch/arm64/boot/dts/apple/s8001.dtsi
+++ b/arch/arm64/boot/dts/apple/s8001.dtsi
@@ -36,6 +36,9 @@
performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>;
+ d-cache-size = <0x10000>;
};
cpu1: cpu@1 {
@@ -46,6 +49,16 @@
performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>;
+ d-cache-size = <0x10000>;
+ };
+
+ l2_cache: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x300000>;
};
};
diff --git a/arch/arm64/boot/dts/apple/t6001.dtsi b/arch/arm64/boot/dts/apple/t6001.dtsi
index 620b17e4031f..d2cf81926f28 100644
--- a/arch/arm64/boot/dts/apple/t6001.dtsi
+++ b/arch/arm64/boot/dts/apple/t6001.dtsi
@@ -11,6 +11,7 @@
#include <dt-bindings/interrupt-controller/apple-aic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pinctrl/apple.h>
+#include <dt-bindings/spmi/spmi.h>
#include "multi-die-cpp.h"
diff --git a/arch/arm64/boot/dts/apple/t6002.dtsi b/arch/arm64/boot/dts/apple/t6002.dtsi
index a963a5011799..e36f422d257d 100644
--- a/arch/arm64/boot/dts/apple/t6002.dtsi
+++ b/arch/arm64/boot/dts/apple/t6002.dtsi
@@ -11,6 +11,7 @@
#include <dt-bindings/interrupt-controller/apple-aic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pinctrl/apple.h>
+#include <dt-bindings/spmi/spmi.h>
#include "multi-die-cpp.h"
diff --git a/arch/arm64/boot/dts/apple/t600x-die0.dtsi b/arch/arm64/boot/dts/apple/t600x-die0.dtsi
index e9b3140ba1a9..110bc6719512 100644
--- a/arch/arm64/boot/dts/apple/t600x-die0.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-die0.dtsi
@@ -45,6 +45,63 @@
<AIC_IRQ 0 749 IRQ_TYPE_LEVEL_HIGH>;
};
+ nub_spmi0: spmi@2920a1300 {
+ compatible = "apple,t6000-spmi", "apple,spmi";
+ reg = <0x2 0x920a1300 0x0 0x100>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ pmic1: pmic@f {
+ compatible = "apple,maverick-pmic", "apple,spmi-nvmem";
+ reg = <0xf SPMI_USID>;
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pm_setting: pm-setting@1405 {
+ reg = <0x1405 0x1>;
+ };
+
+ rtc_offset: rtc-offset@1411 {
+ reg = <0x1411 0x6>;
+ };
+
+ boot_stage: boot-stage@6001 {
+ reg = <0x6001 0x1>;
+ };
+
+ boot_error_count: boot-error-count@6002 {
+ reg = <0x6002 0x1>;
+ bits = <0 4>;
+ };
+
+ panic_count: panic-count@6002 {
+ reg = <0x6002 0x1>;
+ bits = <4 4>;
+ };
+
+ boot_error_stage: boot-error-stage@6003 {
+ reg = <0x6003 0x1>;
+ };
+
+ shutdown_flag: shutdown-flag@600f {
+ reg = <0x600f 0x1>;
+ bits = <3 1>;
+ };
+
+ fault_shadow: fault-shadow@867b {
+ reg = <0x867b 0x10>;
+ };
+
+ socd: socd@8b00 {
+ reg = <0x8b00 0x400>;
+ };
+ };
+ };
+ };
+
wdt: watchdog@2922b0000 {
compatible = "apple,t6000-wdt", "apple,wdt";
reg = <0x2 0x922b0000 0x0 0x4000>;
diff --git a/arch/arm64/boot/dts/apple/t7000.dtsi b/arch/arm64/boot/dts/apple/t7000.dtsi
index 85a34dc7bc01..52edc8d776a9 100644
--- a/arch/arm64/boot/dts/apple/t7000.dtsi
+++ b/arch/arm64/boot/dts/apple/t7000.dtsi
@@ -37,6 +37,9 @@
operating-points-v2 = <&typhoon_opp>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>;
+ d-cache-size = <0x10000>;
};
cpu1: cpu@1 {
@@ -47,6 +50,16 @@
operating-points-v2 = <&typhoon_opp>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>;
+ d-cache-size = <0x10000>;
+ };
+
+ l2_cache: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x100000>;
};
};
diff --git a/arch/arm64/boot/dts/apple/t7001.dtsi b/arch/arm64/boot/dts/apple/t7001.dtsi
index 8e2c67e19c41..a2efa81305df 100644
--- a/arch/arm64/boot/dts/apple/t7001.dtsi
+++ b/arch/arm64/boot/dts/apple/t7001.dtsi
@@ -39,6 +39,9 @@
operating-points-v2 = <&typhoon_opp>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>;
+ d-cache-size = <0x10000>;
};
cpu1: cpu@1 {
@@ -49,6 +52,9 @@
operating-points-v2 = <&typhoon_opp>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>;
+ d-cache-size = <0x10000>;
};
cpu2: cpu@2 {
@@ -59,6 +65,16 @@
operating-points-v2 = <&typhoon_opp>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>;
+ d-cache-size = <0x10000>;
+ };
+
+ l2_cache: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x200000>;
};
};
diff --git a/arch/arm64/boot/dts/apple/t8010.dtsi b/arch/arm64/boot/dts/apple/t8010.dtsi
index 17e294bd7c44..b961d4f65bc3 100644
--- a/arch/arm64/boot/dts/apple/t8010.dtsi
+++ b/arch/arm64/boot/dts/apple/t8010.dtsi
@@ -36,6 +36,9 @@
performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>; /* P-core */
+ d-cache-size = <0x10000>; /* P-core */
};
cpu1: cpu@1 {
@@ -46,6 +49,16 @@
performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>; /* P-core */
+ d-cache-size = <0x10000>; /* P-core */
+ };
+
+ l2_cache: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x300000>; /* P-cluster */
};
};
diff --git a/arch/arm64/boot/dts/apple/t8011.dtsi b/arch/arm64/boot/dts/apple/t8011.dtsi
index 5b280c896b76..974f78cc77cf 100644
--- a/arch/arm64/boot/dts/apple/t8011.dtsi
+++ b/arch/arm64/boot/dts/apple/t8011.dtsi
@@ -36,6 +36,9 @@
performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>; /* P-core */
+ d-cache-size = <0x10000>; /* P-core */
};
cpu1: cpu@1 {
@@ -46,6 +49,9 @@
performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>; /* P-core */
+ d-cache-size = <0x10000>; /* P-core */
};
cpu2: cpu@2 {
@@ -56,6 +62,16 @@
performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>; /* P-core */
+ d-cache-size = <0x10000>; /* P-core */
+ };
+
+ l2_cache: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x800000>; /* P-cluster */
};
};
diff --git a/arch/arm64/boot/dts/apple/t8012.dtsi b/arch/arm64/boot/dts/apple/t8012.dtsi
index 42df2f51ad7b..a259e5735d93 100644
--- a/arch/arm64/boot/dts/apple/t8012.dtsi
+++ b/arch/arm64/boot/dts/apple/t8012.dtsi
@@ -36,6 +36,9 @@
performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>; /* P-core */
+ d-cache-size = <0x10000>; /* P-core */
};
cpu1: cpu@10001 {
@@ -46,6 +49,16 @@
performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache>;
+ i-cache-size = <0x10000>; /* P-core */
+ d-cache-size = <0x10000>; /* P-core */
+ };
+
+ l2_cache: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x300000>; /* P-cluster */
};
};
diff --git a/arch/arm64/boot/dts/apple/t8015.dtsi b/arch/arm64/boot/dts/apple/t8015.dtsi
index 4d54afcecd50..12acf8fc8bc6 100644
--- a/arch/arm64/boot/dts/apple/t8015.dtsi
+++ b/arch/arm64/boot/dts/apple/t8015.dtsi
@@ -63,6 +63,9 @@
capacity-dmips-mhz = <633>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache_0>;
+ i-cache-size = <0x8000>;
+ d-cache-size = <0x8000>;
};
cpu_e1: cpu@1 {
@@ -74,6 +77,9 @@
capacity-dmips-mhz = <633>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache_0>;
+ i-cache-size = <0x8000>;
+ d-cache-size = <0x8000>;
};
cpu_e2: cpu@2 {
@@ -85,6 +91,9 @@
capacity-dmips-mhz = <633>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache_0>;
+ i-cache-size = <0x8000>;
+ d-cache-size = <0x8000>;
};
cpu_e3: cpu@3 {
@@ -96,6 +105,9 @@
capacity-dmips-mhz = <633>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache_0>;
+ i-cache-size = <0x8000>;
+ d-cache-size = <0x8000>;
};
cpu_p0: cpu@10004 {
@@ -107,6 +119,9 @@
capacity-dmips-mhz = <1024>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache_1>;
+ i-cache-size = <0x10000>;
+ d-cache-size = <0x10000>;
};
cpu_p1: cpu@10005 {
@@ -118,6 +133,23 @@
capacity-dmips-mhz = <1024>;
enable-method = "spin-table";
device_type = "cpu";
+ next-level-cache = <&l2_cache_1>;
+ i-cache-size = <0x10000>;
+ d-cache-size = <0x10000>;
+ };
+
+ l2_cache_0: l2-cache-0 {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x100000>;
+ };
+
+ l2_cache_1: l2-cache-1 {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x800000>;
};
};
diff --git a/arch/arm64/boot/dts/apple/t8103-j293.dts b/arch/arm64/boot/dts/apple/t8103-j293.dts
index 2dfe7b895b2b..e2d9439397f7 100644
--- a/arch/arm64/boot/dts/apple/t8103-j293.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j293.dts
@@ -77,6 +77,16 @@
};
};
+/*
+ * The driver depends on boot loader initialized state which resets when this
+ * power-domain is powered off. This happens on suspend or when the driver is
+ * missing during boot. Mark the domain as always on until the driver can
+ * handle this.
+ */
+&ps_dispdfr_be {
+ apple,always-on;
+};
+
&display_dfr {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi
index 97b6a067394e..20faf0c0d809 100644
--- a/arch/arm64/boot/dts/apple/t8103.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103.dtsi
@@ -11,6 +11,7 @@
#include <dt-bindings/interrupt-controller/apple-aic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pinctrl/apple.h>
+#include <dt-bindings/spmi/spmi.h>
/ {
compatible = "apple,t8103", "apple,arm-platform";
@@ -741,6 +742,63 @@
};
};
+ nub_spmi: spmi@23d0d9300 {
+ compatible = "apple,t8103-spmi", "apple,spmi";
+ reg = <0x2 0x3d0d9300 0x0 0x100>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ pmic1: pmic@f {
+ compatible = "apple,sera-pmic", "apple,spmi-nvmem";
+ reg = <0xf SPMI_USID>;
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ boot_stage: boot-stage@9f01 {
+ reg = <0x9f01 0x1>;
+ };
+
+ boot_error_count: boot-error-count@9f02 {
+ reg = <0x9f02 0x1>;
+ bits = <0 4>;
+ };
+
+ panic_count: panic-count@9f02 {
+ reg = <0x9f02 0x1>;
+ bits = <4 4>;
+ };
+
+ boot_error_stage: boot-error-stage@9f03 {
+ reg = <0x9f03 0x1>;
+ };
+
+ shutdown_flag: shutdown-flag@9f0f {
+ reg = <0x9f0f 0x1>;
+ bits = <3 1>;
+ };
+
+ fault_shadow: fault-shadow@a67b {
+ reg = <0xa67b 0x10>;
+ };
+
+ socd: socd@ab00 {
+ reg = <0xab00 0x400>;
+ };
+
+ pm_setting: pm-setting@d001 {
+ reg = <0xd001 0x1>;
+ };
+
+ rtc_offset: rtc-offset@d100 {
+ reg = <0xd100 0x6>;
+ };
+ };
+ };
+ };
+
pinctrl_nub: pinctrl@23d1f0000 {
compatible = "apple,t8103-pinctrl", "apple,pinctrl";
reg = <0x2 0x3d1f0000 0x0 0x4000>;
diff --git a/arch/arm64/boot/dts/apple/t8112-j493.dts b/arch/arm64/boot/dts/apple/t8112-j493.dts
index 3d73f9ee2f46..be86d34c6696 100644
--- a/arch/arm64/boot/dts/apple/t8112-j493.dts
+++ b/arch/arm64/boot/dts/apple/t8112-j493.dts
@@ -40,6 +40,16 @@
};
};
+/*
+ * The driver depends on boot loader initialized state which resets when this
+ * power-domain is powered off. This happens on suspend or when the driver is
+ * missing during boot. Mark the domain as always on until the driver can
+ * handle this.
+ */
+&ps_dispdfr_be {
+ apple,always-on;
+};
+
&display_dfr {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/apple/t8112.dtsi b/arch/arm64/boot/dts/apple/t8112.dtsi
index d9b966d68e4f..e95711d8337f 100644
--- a/arch/arm64/boot/dts/apple/t8112.dtsi
+++ b/arch/arm64/boot/dts/apple/t8112.dtsi
@@ -782,6 +782,63 @@
interrupts = <AIC_IRQ 379 IRQ_TYPE_LEVEL_HIGH>;
};
+ nub_spmi: spmi@23d714000 {
+ compatible = "apple,t8112-spmi", "apple,spmi";
+ reg = <0x2 0x3d714000 0x0 0x100>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ pmic1: pmic@e {
+ compatible = "apple,stowe-pmic", "apple,spmi-nvmem";
+ reg = <0xe SPMI_USID>;
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ fault_shadow: fault-shadow@867b {
+ reg = <0x867b 0x10>;
+ };
+
+ socd: socd@8b00 {
+ reg = <0x8b00 0x400>;
+ };
+
+ boot_stage: boot-stage@f701 {
+ reg = <0xf701 0x1>;
+ };
+
+ boot_error_count: boot-error-count@f702 {
+ reg = <0xf702 0x1>;
+ bits = <0 4>;
+ };
+
+ panic_count: panic-count@f702 {
+ reg = <0xf702 0x1>;
+ bits = <4 4>;
+ };
+
+ boot_error_stage: boot-error-stage@f703 {
+ reg = <0xf703 0x1>;
+ };
+
+ shutdown_flag: shutdown-flag@f70f {
+ reg = <0xf70f 0x1>;
+ bits = <3 1>;
+ };
+
+ pm_setting: pm-setting@f801 {
+ reg = <0xf801 0x1>;
+ };
+
+ rtc_offset: rtc-offset@f900 {
+ reg = <0xf900 0x6>;
+ };
+ };
+ };
+ };
+
pinctrl_smc: pinctrl@23e820000 {
compatible = "apple,t8112-pinctrl", "apple,pinctrl";
reg = <0x2 0x3e820000 0x0 0x4000>;
diff --git a/arch/arm64/boot/dts/arm/corstone1000.dtsi b/arch/arm64/boot/dts/arm/corstone1000.dtsi
index 56ada8728b60..f35a5c96f3da 100644
--- a/arch/arm64/boot/dts/arm/corstone1000.dtsi
+++ b/arch/arm64/boot/dts/arm/corstone1000.dtsi
@@ -109,7 +109,6 @@
reg = <0x1a220000 0x1000>;
#address-cells = <1>;
#size-cells = <1>;
- clock-frequency = <50000000>;
ranges;
frame@1a230000 {
diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
index 083be35495b3..a4b2b78d4df3 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi
+++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
@@ -77,7 +77,6 @@
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
- clock-frequency = <100000000>;
};
pmu {
diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
index 9e10d7a6b5a2..68a69f17e93d 100644
--- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts
+++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
@@ -44,6 +44,30 @@
#address-cells = <2>;
#size-cells = <0>;
+ idle-states {
+ entry-method = "psci";
+
+ CPU_SLEEP_0: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x0010000>;
+ entry-latency-us = <40>;
+ exit-latency-us = <100>;
+ min-residency-us = <150>;
+ status = "disabled";
+ };
+
+ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x1010000>;
+ entry-latency-us = <500>;
+ exit-latency-us = <1000>;
+ min-residency-us = <2500>;
+ status = "disabled";
+ };
+ };
+
cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,armv8";
@@ -56,6 +80,7 @@
d-cache-line-size = <64>;
d-cache-sets = <256>;
next-level-cache = <&C0_L2>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu1: cpu@100 {
device_type = "cpu";
@@ -69,6 +94,7 @@
d-cache-line-size = <64>;
d-cache-sets = <256>;
next-level-cache = <&C0_L2>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu2: cpu@200 {
device_type = "cpu";
@@ -82,6 +108,7 @@
d-cache-line-size = <64>;
d-cache-sets = <256>;
next-level-cache = <&C0_L2>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu3: cpu@300 {
device_type = "cpu";
@@ -95,6 +122,7 @@
d-cache-line-size = <64>;
d-cache-sets = <256>;
next-level-cache = <&C0_L2>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu4: cpu@10000 {
device_type = "cpu";
@@ -108,6 +136,7 @@
d-cache-line-size = <64>;
d-cache-sets = <256>;
next-level-cache = <&C1_L2>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu5: cpu@10100 {
device_type = "cpu";
@@ -121,6 +150,7 @@
d-cache-line-size = <64>;
d-cache-sets = <256>;
next-level-cache = <&C1_L2>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu6: cpu@10200 {
device_type = "cpu";
@@ -134,6 +164,7 @@
d-cache-line-size = <64>;
d-cache-sets = <256>;
next-level-cache = <&C1_L2>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu7: cpu@10300 {
device_type = "cpu";
@@ -147,6 +178,7 @@
d-cache-line-size = <64>;
d-cache-sets = <256>;
next-level-cache = <&C1_L2>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
C0_L2: l2-cache0 {
compatible = "cache";
@@ -169,7 +201,7 @@
memory@80000000 {
device_type = "memory";
- reg = <0x00000000 0x80000000 0 0x80000000>,
+ reg = <0x00000000 0x80000000 0 0x7c000000>,
<0x00000008 0x80000000 0 0x80000000>;
};
@@ -217,6 +249,19 @@
<GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
};
+ timer@2a810000 {
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x0 0x2a810000 0x0 0x10000>;
+ ranges = <0 0x0 0x2a820000 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ frame@2a830000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x10000 0x10000>;
+ };
+ };
+
pmu {
compatible = "arm,armv8-pmuv3";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
@@ -227,6 +272,60 @@
interrupts = <GIC_PPI 5 IRQ_TYPE_LEVEL_HIGH>;
};
+ ete-0 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu0>;
+ status = "disabled";
+ };
+
+ ete-1 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu1>;
+ status = "disabled";
+ };
+
+ ete-2 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu2>;
+ status = "disabled";
+ };
+
+ ete-3 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu3>;
+ status = "disabled";
+ };
+
+ ete-4 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu4>;
+ status = "disabled";
+ };
+
+ ete-5 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu5>;
+ status = "disabled";
+ };
+
+ ete-6 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu6>;
+ status = "disabled";
+ };
+
+ ete-7 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu7>;
+ status = "disabled";
+ };
+
+ trbe {
+ compatible = "arm,trace-buffer-extension";
+ interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_LOW>;
+ status = "disabled";
+ };
+
pci: pci@40000000 {
#address-cells = <0x3>;
#size-cells = <0x2>;
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 055764d0b9e5..9ccb80821bdb 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -10,7 +10,6 @@
memtimer: timer@2a810000 {
compatible = "arm,armv7-timer-mem";
reg = <0x0 0x2a810000 0x0 0x10000>;
- clock-frequency = <50000000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x0 0x2a820000 0x20000>;
diff --git a/arch/arm64/boot/dts/arm/morello.dtsi b/arch/arm64/boot/dts/arm/morello.dtsi
index 0bab0b3ea969..5bc1c725dc86 100644
--- a/arch/arm64/boot/dts/arm/morello.dtsi
+++ b/arch/arm64/boot/dts/arm/morello.dtsi
@@ -44,7 +44,7 @@
next-level-cache = <&l2_0>;
clocks = <&scmi_dvfs 0>;
- l2_0: l2-cache-0 {
+ l2_0: l2-cache {
compatible = "cache";
cache-level = <2>;
/* 8 ways set associative */
@@ -53,13 +53,6 @@
cache-sets = <2048>;
cache-unified;
next-level-cache = <&l3_0>;
-
- l3_0: l3-cache {
- compatible = "cache";
- cache-level = <3>;
- cache-size = <0x100000>;
- cache-unified;
- };
};
};
@@ -78,7 +71,7 @@
next-level-cache = <&l2_1>;
clocks = <&scmi_dvfs 0>;
- l2_1: l2-cache-1 {
+ l2_1: l2-cache {
compatible = "cache";
cache-level = <2>;
/* 8 ways set associative */
@@ -105,7 +98,7 @@
next-level-cache = <&l2_2>;
clocks = <&scmi_dvfs 1>;
- l2_2: l2-cache-2 {
+ l2_2: l2-cache {
compatible = "cache";
cache-level = <2>;
/* 8 ways set associative */
@@ -132,7 +125,7 @@
next-level-cache = <&l2_3>;
clocks = <&scmi_dvfs 1>;
- l2_3: l2-cache-3 {
+ l2_3: l2-cache {
compatible = "cache";
cache-level = <2>;
/* 8 ways set associative */
@@ -143,6 +136,13 @@
next-level-cache = <&l3_0>;
};
};
+
+ l3_0: l3-cache {
+ compatible = "cache";
+ cache-level = <3>;
+ cache-size = <0x100000>;
+ cache-unified;
+ };
};
firmware {
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
index 7f7226711d4b..a4a29193d4eb 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
@@ -116,7 +116,6 @@
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
- clock-frequency = <100000000>;
};
pmu {
diff --git a/arch/arm64/boot/dts/blaize/blaize-blzp1600-cb2.dts b/arch/arm64/boot/dts/blaize/blaize-blzp1600-cb2.dts
index 7e3cef2ed352..fb5415eb347a 100644
--- a/arch/arm64/boot/dts/blaize/blaize-blzp1600-cb2.dts
+++ b/arch/arm64/boot/dts/blaize/blaize-blzp1600-cb2.dts
@@ -81,3 +81,39 @@
"UART1_TO_RSP"; /* GPIO_15 */
};
};
+
+&gpio0 {
+ status = "okay";
+ gpio-line-names = "PERST_N", /* GPIO_0 */
+ "LM96063_ALERT_N", /* GPIO_1 */
+ "INA3221_PV", /* GPIO_2 */
+ "INA3221_CRIT", /* GPIO_3 */
+ "INA3221_WARN", /* GPIO_4 */
+ "INA3221_TC", /* GPIO_5 */
+ "QSPI0_RST_N", /* GPIO_6 */
+ "LM96063_TCRIT_N", /* GPIO_7 */
+ "DSI_TCH_INT", /* GPIO_8 */
+ "DSI_RST", /* GPIO_9 */
+ "DSI_BL", /* GPIO_10 */
+ "DSI_INT", /* GPIO_11 */
+ "ETH_RST", /* GPIO_12 */
+ "CSI0_RST", /* GPIO_13 */
+ "CSI0_PWDN", /* GPIO_14 */
+ "CSI1_RST", /* GPIO_15 */
+ "CSI1_PWDN", /* GPIO_16 */
+ "CSI2_RST", /* GPIO_17 */
+ "CSI2_PWDN", /* GPIO_18 */
+ "CSI3_RST", /* GPIO_19 */
+ "CSI3_PWDN", /* GPIO_20 */
+ "ADAC_RST", /* GPIO_21 */
+ "SD_SW_VDD", /* GPIO_22 */
+ "SD_PON_VDD", /* GPIO_23 */
+ "GPIO_EXP_INT", /* GPIO_24 */
+ "BOARD_ID_0", /* GPIO_25 */
+ "SDIO1_SW_VDD", /* GPIO_26 */
+ "SDIO1_PON_VDD", /* GPIO_27 */
+ "SDIO2_SW_VDD", /* GPIO_28 */
+ "SDIO2_PON_VDD", /* GPIO_29 */
+ "BOARD_ID_1", /* GPIO_30 */
+ "BOARD_ID_2"; /* GPIO_31 */
+};
diff --git a/arch/arm64/boot/dts/blaize/blaize-blzp1600.dtsi b/arch/arm64/boot/dts/blaize/blaize-blzp1600.dtsi
index 7d399e6a532f..5a6c882b2f57 100644
--- a/arch/arm64/boot/dts/blaize/blaize-blzp1600.dtsi
+++ b/arch/arm64/boot/dts/blaize/blaize-blzp1600.dtsi
@@ -120,6 +120,18 @@
IRQ_TYPE_LEVEL_LOW)>;
};
+ gpio0: gpio@4c0000 {
+ compatible = "blaize,blzp1600-gpio";
+ reg = <0x4c0000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <32>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ status = "disabled";
+ };
+
uart0: serial@4d0000 {
compatible = "ns16550a";
reg = <0x4d0000 0x1000>;
diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile
index 3d0efb93b06d..01ecfa304184 100644
--- a/arch/arm64/boot/dts/broadcom/Makefile
+++ b/arch/arm64/boot/dts/broadcom/Makefile
@@ -8,6 +8,7 @@ dtb-$(CONFIG_ARCH_BCM2835) += bcm2711-rpi-400.dtb \
bcm2711-rpi-cm4-io.dtb \
bcm2712-rpi-5-b.dtb \
bcm2712-d-rpi-5-b.dtb \
+ bcm2837-rpi-2-b.dtb \
bcm2837-rpi-3-a-plus.dtb \
bcm2837-rpi-3-b.dtb \
bcm2837-rpi-3-b-plus.dtb \
diff --git a/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts b/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts
index fbc56309660f..34470e3d7171 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts
+++ b/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts
@@ -104,3 +104,11 @@
clocks = <&firmware_clocks 13>, <&firmware_clocks 14>, <&dvp 1>, <&clk_27MHz>;
clock-names = "hdmi", "bvb", "audio", "cec";
};
+
+&pcie1 {
+ status = "okay";
+};
+
+&pcie2 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
index 9e610a89a337..0a9212d3106f 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
@@ -64,7 +64,7 @@
i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set
next-level-cache = <&l2_cache_l0>;
- l2_cache_l0: l2-cache-l0 {
+ l2_cache_l0: l2-cache {
compatible = "cache";
cache-size = <0x80000>;
cache-line-size = <64>;
@@ -88,7 +88,7 @@
i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set
next-level-cache = <&l2_cache_l1>;
- l2_cache_l1: l2-cache-l1 {
+ l2_cache_l1: l2-cache {
compatible = "cache";
cache-size = <0x80000>;
cache-line-size = <64>;
@@ -112,7 +112,7 @@
i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set
next-level-cache = <&l2_cache_l2>;
- l2_cache_l2: l2-cache-l2 {
+ l2_cache_l2: l2-cache {
compatible = "cache";
cache-size = <0x80000>;
cache-line-size = <64>;
@@ -136,7 +136,7 @@
i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set
next-level-cache = <&l2_cache_l3>;
- l2_cache_l3: l2-cache-l3 {
+ l2_cache_l3: l2-cache {
compatible = "cache";
cache-size = <0x80000>;
cache-line-size = <64>;
@@ -192,6 +192,12 @@
#address-cells = <1>;
#size-cells = <1>;
+ pcie_rescal: reset-controller@119500 {
+ compatible = "brcm,bcm7216-pcie-sata-rescal";
+ reg = <0x00119500 0x10>;
+ #reset-cells = <0>;
+ };
+
sdio1: mmc@fff000 {
compatible = "brcm,bcm2712-sdhci",
"brcm,sdhci-brcmstb";
@@ -204,6 +210,12 @@
mmc-ddr-3_3v;
};
+ bcm_reset: reset-controller@1504318 {
+ compatible = "brcm,brcmstb-reset";
+ reg = <0x01504318 0x30>;
+ #reset-cells = <1>;
+ };
+
system_timer: timer@7c003000 {
compatible = "brcm,bcm2835-system-timer";
reg = <0x7c003000 0x1000>;
@@ -426,6 +438,141 @@
vc4: gpu {
compatible = "brcm,bcm2712-vc6";
};
+
+ pcie0: pcie@1000100000 {
+ compatible = "brcm,bcm2712-pcie";
+ reg = <0x10 0x00100000 0x00 0x9310>;
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ max-link-speed = <2>;
+ num-lanes = <1>;
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ interrupt-parent = <&gicv2>;
+ interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pcie", "msi";
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gicv2 GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gicv2 GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gicv2 GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&pcie_rescal>, <&bcm_reset 42>;
+ reset-names = "rescal", "bridge";
+ msi-controller;
+ msi-parent = <&pcie0>;
+
+ ranges =
+ /* ~4GiB, 32-bit, non-prefetchable at PCIe 00_0000_0000 */
+ <0x02000000 0x00 0x00000000 0x17 0x00000000 0x00 0xfffffffc>,
+ /* 12GiB, 64-bit, prefetchable at PCIe 04_0000_0000 */
+ <0x43000000 0x04 0x00000000 0x14 0x00000000 0x03 0x00000000>;
+
+ dma-ranges =
+ /* 64GiB, 64-bit, prefetchable at PCIe 10_0000_0000 */
+ <0x43000000 0x10 0x00000000 0x00 0x00000000 0x10 0x00000000>;
+
+ status = "disabled";
+ };
+
+ pcie1: pcie@1000110000 {
+ compatible = "brcm,bcm2712-pcie";
+ reg = <0x10 0x00110000 0x00 0x9310>;
+ device_type = "pci";
+ linux,pci-domain = <1>;
+ max-link-speed = <2>;
+ num-lanes = <1>;
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ interrupt-parent = <&gicv2>;
+ interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pcie", "msi";
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gicv2 GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gicv2 GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gicv2 GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&pcie_rescal>, <&bcm_reset 43>;
+ reset-names = "rescal", "bridge";
+ msi-controller;
+ msi-parent = <&mip1>;
+
+ ranges =
+ /* ~4GiB, 32-bit, non-prefetchable at PCIe 00_0000_0000 */
+ <0x02000000 0x00 0x00000000 0x1b 0x00000000 0x00 0xfffffffc>,
+ /* 12GiB, 64-bit, prefetchable at PCIe 04_0000_0000 */
+ <0x43000000 0x04 0x00000000 0x18 0x00000000 0x03 0x00000000>;
+
+ dma-ranges =
+ /* 64GiB, 64-bit, non-prefetchable at PCIe 10_0000_0000 */
+ <0x03000000 0x10 0x00000000 0x00 0x00000000 0x10 0x00000000>,
+ /* 4KiB, 64-bit, non-prefetchable at PCIe ff_ffff_f000 MIP1 */
+ <0x03000000 0xff 0xfffff000 0x10 0x00131000 0x00 0x00001000>;
+
+ status = "disabled";
+ };
+
+ pcie2: pcie@1000120000 {
+ compatible = "brcm,bcm2712-pcie";
+ reg = <0x10 0x00120000 0x00 0x9310>;
+ device_type = "pci";
+ linux,pci-domain = <2>;
+ max-link-speed = <2>;
+ num-lanes = <4>;
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ interrupt-parent = <&gicv2>;
+ interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pcie", "msi";
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gicv2 GIC_SPI 230 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gicv2 GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gicv2 GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&pcie_rescal>, <&bcm_reset 44>;
+ reset-names = "rescal", "bridge";
+ msi-controller;
+ msi-parent = <&mip0>;
+
+ ranges =
+ /* ~4GiB, 32-bit, non-prefetchable at PCIe 00_0000_0000 */
+ <0x02000000 0x00 0x00000000 0x1f 0x00000000 0x00 0xfffffffc>,
+ /* 12GiB, 64-bit, prefetchable at PCIe 04_0000_0000 */
+ <0x43000000 0x04 0x00000000 0x1c 0x00000000 0x03 0x00000000>;
+
+ dma-ranges =
+ /* 4MiB, 32-bit, non-prefetchable at PCIe 00_0000_0000 */
+ <0x02000000 0x00 0x00000000 0x1f 0x00000000 0x00 0x00400000>,
+ /* 64GiB, 64-bit, prefetchable at PCIe 10_0000_0000 */
+ <0x43000000 0x10 0x00000000 0x00 0x00000000 0x10 0x00000000>,
+ /* 4KiB, 64-bit, non-prefetchable at PCIe ff_ffff_f000 MIP0 */
+ <0x03000000 0xff 0xfffff000 0x10 0x00130000 0x00 0x00001000>;
+
+ status = "disabled";
+ };
+
+ mip0: msi-controller@1000130000 {
+ compatible = "brcm,bcm2712-mip";
+ reg = <0x10 0x00130000 0x00 0xc0>,
+ <0xff 0xfffff000 0x00 0x1000>;
+ msi-controller;
+ msi-ranges = <&gicv2 GIC_SPI 128 IRQ_TYPE_EDGE_RISING 64>;
+ brcm,msi-offset = <0>;
+ };
+
+ mip1: msi-controller@1000131000 {
+ compatible = "brcm,bcm2712-mip";
+ reg = <0x10 0x00131000 0x00 0xc0>,
+ <0xff 0xfffff000 0x00 0x1000>;
+ msi-controller;
+ msi-ranges = <&gicv2 GIC_SPI 247 IRQ_TYPE_EDGE_RISING 8>;
+ brcm,msi-offset = <8>;
+ };
};
timer {
diff --git a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-2-b.dts b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-2-b.dts
new file mode 100644
index 000000000000..57742ed40049
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-2-b.dts
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "arm/broadcom/bcm2837-rpi-2-b.dts"
diff --git a/arch/arm64/boot/dts/exynos/Makefile b/arch/arm64/boot/dts/exynos/Makefile
index f6f4bc650a94..89c90564c3d8 100644
--- a/arch/arm64/boot/dts/exynos/Makefile
+++ b/arch/arm64/boot/dts/exynos/Makefile
@@ -5,6 +5,9 @@ dtb-$(CONFIG_ARCH_EXYNOS) += \
exynos5433-tm2.dtb \
exynos5433-tm2e.dtb \
exynos7-espresso.dtb \
+ exynos7870-a2corelte.dtb \
+ exynos7870-j6lte.dtb \
+ exynos7870-on7xelte.dtb \
exynos7885-jackpotlte.dtb \
exynos850-e850-96.dtb \
exynos8895-dreamlte.dtb \
diff --git a/arch/arm64/boot/dts/exynos/exynos7870-a2corelte.dts b/arch/arm64/boot/dts/exynos/exynos7870-a2corelte.dts
new file mode 100644
index 000000000000..eb7b48593187
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos7870-a2corelte.dts
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Samsung Galaxy A2 Core (a2corelte) device tree source
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2025 Kaustabh Chakraborty <kauschluss@disroot.org>
+ */
+
+/dts-v1/;
+#include "exynos7870.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ model = "Samsung Galaxy A2 Core";
+ compatible = "samsung,a2corelte", "samsung,exynos7870";
+ chassis-type = "handset";
+
+ aliases {
+ mmc0 = &mmc0;
+ mmc1 = &mmc1;
+ mmc2 = &mmc2;
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ };
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+
+ stdout-path = &serial2;
+
+ framebuffer@67000000 {
+ compatible = "simple-framebuffer";
+ reg = <0x0 0x67000000 (540 * 960 * 4)>;
+ width = <540>;
+ height = <960>;
+ stride = <(540 * 4)>;
+ format = "a8r8g8b8";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ label = "GPIO Keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&key_power &key_voldown &key_volup>;
+
+ key-power {
+ label = "Power Key";
+ gpios = <&gpa0 0 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ };
+
+ key-voldown {
+ label = "Volume Down Key";
+ gpios = <&gpa2 1 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+
+ key-volup {
+ label = "Volume Up Key";
+ gpios = <&gpa2 0 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0x3f200000>;
+ };
+
+ pwrseq_mmc1: pwrseq-mmc1 {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpd3 6 GPIO_ACTIVE_LOW>;
+ };
+
+ /* mmc2: vmmc */
+ vdd_fixed_mmc2: regulator-fixed-mmc2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_fixed_mmc2";
+ regulator-max-microvolt = <2800000>;
+ regulator-min-microvolt = <2800000>;
+ gpio = <&gpc0 0 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vdd_fixed_proxled: regulator-fixed-proxled {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_fixed_proxled";
+ regulator-boot-on;
+ regulator-always-on;
+ gpio = <&gpd4 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+
+ ramoops@46800000 {
+ compatible = "ramoops";
+ reg = <0x0 0x46800000 0x8000>;
+ console-size = <0x4000>;
+ pmsg-size = <0x4000>;
+ };
+
+ framebuffer@67000000 {
+ reg = <0x0 0x67000000 (540 * 960 * 4)>;
+ no-map;
+ };
+ };
+
+ vibrator {
+ compatible = "regulator-haptic";
+ haptic-supply = <&vdd_ldo32>;
+ min-microvolt = <3300000>;
+ max-microvolt = <3300000>;
+ };
+};
+
+&gpu {
+ status = "okay";
+};
+
+&hsi2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "okay";
+
+ pmic@66 {
+ compatible = "samsung,s2mpu05-pmic";
+ reg = <0x66>;
+ interrupt-parent = <&gpa0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_irq>;
+
+ regulators {
+ vdd_buck1: buck1 {
+ regulator-name = "vdd_buck1";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_buck2: buck2 {
+ regulator-name = "vdd_buck2";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_buck3: buck3 {
+ regulator-name = "vdd_buck3";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_buck4: buck4 {
+ regulator-name = "vdd_buck4";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_buck5: buck5 {
+ regulator-name = "vdd_buck5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2100000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo1: ldo1 {
+ regulator-name = "vdd_ldo1";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* mmc2: vqmmc */
+ vdd_ldo2: ldo2 {
+ regulator-name = "vdd_ldo2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-ramp-delay = <12000>;
+ };
+
+ vdd_ldo3: ldo3 {
+ regulator-name = "vdd_ldo3";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2375000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo4: ldo4 {
+ regulator-name = "vdd_ldo4";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo5: ldo5 {
+ regulator-name = "vdd_ldo5";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo6: ldo6 {
+ regulator-name = "vdd_ldo6";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo7: ldo7 {
+ regulator-name = "vdd_ldo7";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2375000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* usbdrd: vdd33 */
+ vdd_ldo8: ldo8 {
+ regulator-name = "vdd_ldo8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3375000>;
+ regulator-ramp-delay = <12000>;
+ };
+
+ vdd_ldo9: ldo9 {
+ regulator-name = "vdd_ldo9";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo10: ldo10 {
+ regulator-name = "vdd_ldo10";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo25: ldo25 {
+ regulator-name = "vdd_ldo25";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2375000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* mmc0: vmmc */
+ vdd_ldo26: ldo26 {
+ regulator-name = "vdd_ldo26";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3375000>;
+ regulator-ramp-delay = <12000>;
+ };
+
+ /* mmc0: vqmmc */
+ vdd_ldo27: ldo27 {
+ regulator-name = "vdd_ldo27";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2375000>;
+ regulator-ramp-delay = <12000>;
+ };
+
+ vdd_ldo29: ldo29 {
+ regulator-name = "vdd_ldo29";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo30: ldo30 {
+ regulator-name = "vdd_ldo30";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo31: ldo31 {
+ regulator-name = "vdd_ldo31";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* vibrator: haptic */
+ vdd_ldo32: ldo32 {
+ regulator-name = "vdd_ldo32";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12000>;
+ };
+
+ vdd_ldo33: ldo33 {
+ regulator-name = "vdd_ldo33";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo34: ldo34 {
+ regulator-name = "vdd_ldo34";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* touchscreen: vdd */
+ vdd_ldo35: ldo35 {
+ regulator-name = "vdd_ldo35";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-ramp-delay = <12000>;
+ };
+ };
+ };
+};
+
+&i2c5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <400000>;
+
+ status = "okay";
+
+ accelerometer@1d {
+ compatible = "st,lis2ds12";
+ reg = <0x1d>;
+ interrupt-parent = <&gpa2>;
+ interrupts = <3 IRQ_TYPE_EDGE_RISING>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&accel_irq>;
+
+ st,drdy-int-pin = <1>;
+ };
+
+ proximity@48 {
+ compatible = "sensortek,stk3013", "sensortek,stk3310";
+ reg = <0x48>;
+ interrupt-parent = <&gpa0>;
+ interrupts = <5 IRQ_TYPE_EDGE_BOTH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&proxm_irq>;
+
+ proximity-near-level = <25>;
+ };
+};
+
+&i2c6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <400000>;
+
+ status = "okay";
+
+ touchscreen@4b {
+ compatible = "syna,rmi4-i2c";
+ reg = <0x4b>;
+ interrupt-parent = <&gpa0>;
+ interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&touch_irq>;
+
+ vdd-supply = <&vdd_ldo35>;
+
+ syna,reset-delay-ms = <200>;
+ syna,startup-delay-ms = <200>;
+
+ rmi4-f01@1 {
+ reg = <0x01>;
+ syna,nosleep-mode = <1>;
+ };
+
+ rmi4-f12@12 {
+ reg = <0x12>;
+ syna,sensor-type = <1>;
+ syna,rezero-wait-ms = <200>;
+ syna,clip-x-high = <539>;
+ syna,clip-y-high = <959>;
+ touchscreen-x-mm = <62>;
+ touchscreen-y-mm = <110>;
+ };
+ };
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_rdqs &sd0_bus1 &sd0_bus4 &sd0_bus8>;
+
+ vmmc-supply = <&vdd_ldo26>;
+ vqmmc-supply = <&vdd_ldo27>;
+
+ fifo-depth = <64>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 4>;
+ samsung,dw-mshc-ddr-timing = <2 4>;
+ non-removable;
+
+ status = "okay";
+};
+
+&mmc1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_bus1 &sd1_bus4>;
+
+ mmc-pwrseq = <&pwrseq_mmc1>;
+
+ bus-width = <4>;
+ fifo-depth = <64>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 3>;
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ non-removable;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+
+ status = "okay";
+
+ wifi@1 {
+ compatible = "brcm,bcm43430a1-fmac", "brcm,bcm4329-fmac";
+ reg = <0x1>;
+ interrupt-names = "host-wake";
+ interrupt-parent = <&gpa2>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&gpd3 6 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_bus1 &sd2_bus4 &dwmmc2_irq>;
+
+ vmmc-supply = <&vdd_fixed_mmc2>;
+ vqmmc-supply = <&vdd_ldo2>;
+
+ bus-width = <4>;
+ card-detect-delay = <200>;
+ fifo-depth = <64>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 3>;
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ broken-cd;
+ disable-wp;
+
+ status = "okay";
+};
+
+&oscclk {
+ clock-frequency = <26000000>;
+};
+
+&pinctrl_alive {
+ accel_irq: accel-irq-pins {
+ samsung,pins = "gpa2-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ dwmmc2_irq: dwmmc2-irq-pins {
+ samsung,pins = "gpa0-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ fuel_irq: fuel-irq-pins {
+ samsung,pins = "gpa0-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ key_power: key-power-pins {
+ samsung,pins = "gpa0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ key_voldown: key-voldown-pins {
+ samsung,pins = "gpa2-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ key_volup: key-volup-pins {
+ samsung,pins = "gpa2-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ pmic_irq: pmic-irq-pins {
+ samsung,pins = "gpa0-2";
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR4>;
+ };
+
+ proxm_irq: proxm-irq-pins {
+ samsung,pins = "gpa0-5";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ touch_irq: touch-irq-pins {
+ samsung,pins = "gpa0-6";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ wlan_hostwake: wlan-hostwake-pins {
+ samsung,pins = "gpa2-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ };
+};
+
+&pinctrl_top {
+ bt_enable: bt-enable-pins {
+ samsung,pins = "gpd4-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_PREV>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ };
+
+ wlan_enable: wlan-enable-pins {
+ samsung,pins = "gpd3-6";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_PREV>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR4>;
+ samsung,pin-val = <0>;
+ };
+};
+
+&serial1 {
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43430a1-bt";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_btwake &bt_hostwake &bt_enable>;
+
+ device-wakeup-gpios = <&gpa1 2 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpa1 6 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpd4 0 GPIO_ACTIVE_HIGH>;
+
+ max-speed = <3000000>;
+ };
+};
+
+&serial2 {
+ status = "okay";
+};
+
+&usbdrd {
+ vdd33-supply = <&vdd_ldo8>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos7870-j6lte.dts b/arch/arm64/boot/dts/exynos/exynos7870-j6lte.dts
new file mode 100644
index 000000000000..61eec1aff32e
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos7870-j6lte.dts
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Samsung Galaxy J6 (j6lte) device tree source
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2025 Kaustabh Chakraborty <kauschluss@disroot.org>
+ */
+
+/dts-v1/;
+#include "exynos7870.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ model = "Samsung Galaxy J6";
+ compatible = "samsung,j6lte", "samsung,exynos7870";
+ chassis-type = "handset";
+
+ aliases {
+ mmc0 = &mmc0;
+ mmc1 = &mmc1;
+ mmc2 = &mmc2;
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ };
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+
+ stdout-path = &serial2;
+
+ framebuffer@67000000 {
+ compatible = "simple-framebuffer";
+ reg = <0x0 0x67000000 (720 * 1480 * 4)>;
+ width = <720>;
+ height = <1480>;
+ stride = <(720 * 4)>;
+ format = "a8r8g8b8";
+ };
+ };
+
+ gpio-hall-effect-sensor {
+ compatible = "gpio-keys";
+ label = "GPIO Hall Effect Sensor";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&hall_irq>;
+
+ event-hall-effect-sensor {
+ label = "Hall Effect Sensor";
+ gpios = <&gpa1 3 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ linux,can-disable;
+ wakeup-source;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ label = "GPIO Keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&key_power &key_voldown &key_volup>;
+
+ key-power {
+ label = "Power Key";
+ gpios = <&gpa0 0 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ };
+
+ key-voldown {
+ label = "Volume Down Key";
+ gpios = <&gpa2 1 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+
+ key-volup {
+ label = "Volume Up Key";
+ gpios = <&gpa2 0 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0x3d800000>,
+ <0x0 0x80000000 0x7d800000>;
+ };
+
+ pwrseq_mmc1: pwrseq-mmc1 {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpd3 6 GPIO_ACTIVE_LOW>;
+ };
+
+ /* mmc2: vmmc */
+ vdd_fixed_mmc2: regulator-fixed-mmc2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_fixed_mmc2";
+ regulator-max-microvolt = <2800000>;
+ regulator-min-microvolt = <2800000>;
+ gpio = <&gpc0 0 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+
+ ramoops@46e00000 {
+ compatible = "ramoops";
+ reg = <0x0 0x46e00000 0x8000>;
+ console-size = <0x4000>;
+ pmsg-size = <0x4000>;
+ };
+
+ framebuffer@67000000 {
+ reg = <0x0 0x67000000 (720 * 1480 * 4)>;
+ no-map;
+ };
+ };
+
+ vibrator {
+ compatible = "regulator-haptic";
+ haptic-supply = <&vdd_ldo32>;
+ min-microvolt = <3300000>;
+ max-microvolt = <3300000>;
+ };
+};
+
+&gpu {
+ status = "okay";
+};
+
+&hsi2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "okay";
+
+ pmic@66 {
+ compatible = "samsung,s2mpu05-pmic";
+ reg = <0x66>;
+ interrupt-parent = <&gpa0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_irq>;
+
+ regulators {
+ vdd_buck1: buck1 {
+ regulator-name = "vdd_buck1";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_buck2: buck2 {
+ regulator-name = "vdd_buck2";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_buck3: buck3 {
+ regulator-name = "vdd_buck3";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_buck4: buck4 {
+ regulator-name = "vdd_buck4";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_buck5: buck5 {
+ regulator-name = "vdd_buck5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2100000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo1: ldo1 {
+ regulator-name = "vdd_ldo1";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* mmc2: vqmmc */
+ vdd_ldo2: ldo2 {
+ regulator-name = "vdd_ldo2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-ramp-delay = <12000>;
+ };
+
+ vdd_ldo3: ldo3 {
+ regulator-name = "vdd_ldo3";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2375000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo4: ldo4 {
+ regulator-name = "vdd_ldo4";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo5: ldo5 {
+ regulator-name = "vdd_ldo5";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo6: ldo6 {
+ regulator-name = "vdd_ldo6";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo7: ldo7 {
+ regulator-name = "vdd_ldo7";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2375000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* usbdrd: vdd33 */
+ vdd_ldo8: ldo8 {
+ regulator-name = "vdd_ldo8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3375000>;
+ regulator-ramp-delay = <12000>;
+ };
+
+ vdd_ldo9: ldo9 {
+ regulator-name = "vdd_ldo9";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo10: ldo10 {
+ regulator-name = "vdd_ldo10";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo25: ldo25 {
+ regulator-name = "vdd_ldo25";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2375000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* mmc0: vmmc */
+ vdd_ldo26: ldo26 {
+ regulator-name = "vdd_ldo26";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3375000>;
+ regulator-ramp-delay = <12000>;
+ };
+
+ /* mmc0: vqmmc */
+ vdd_ldo27: ldo27 {
+ regulator-name = "vdd_ldo27";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2375000>;
+ regulator-ramp-delay = <12000>;
+ };
+
+ vdd_ldo29: ldo29 {
+ regulator-name = "vdd_ldo29";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo30: ldo30 {
+ regulator-name = "vdd_ldo30";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo31: ldo31 {
+ regulator-name = "vdd_ldo31";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* vibrator: haptic */
+ vdd_ldo32: ldo32 {
+ regulator-name = "vdd_ldo32";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12000>;
+ };
+
+ vdd_ldo33: ldo33 {
+ regulator-name = "vdd_ldo33";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* touchscreen: vdd */
+ vdd_ldo34: ldo34 {
+ regulator-name = "vdd_ldo34";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vdd_ldo35: ldo35 {
+ regulator-name = "vdd_ldo35";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <400000>;
+
+ status = "okay";
+
+ accelerometer@1d {
+ compatible = "st,lis2ds12";
+ reg = <0x1d>;
+ interrupt-parent = <&gpa2>;
+ interrupts = <3 IRQ_TYPE_EDGE_RISING>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&accel_irq>;
+
+ mount-matrix = "-1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "-1";
+
+ st,drdy-int-pin = <1>;
+ };
+};
+
+&i2c6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <400000>;
+
+ status = "okay";
+
+ touchscreen@20 {
+ compatible = "zinitix,bt532";
+ reg = <0x20>;
+ interrupt-parent = <&gpa0>;
+ interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&touch_irq>;
+
+ touchscreen-size-x = <720>;
+ touchscreen-size-y = <1480>;
+
+ vdd-supply = <&vdd_ldo34>;
+ };
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_rdqs &sd0_bus1 &sd0_bus4 &sd0_bus8>;
+
+ vmmc-supply = <&vdd_ldo26>;
+ vqmmc-supply = <&vdd_ldo27>;
+
+ fifo-depth = <64>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 4>;
+ samsung,dw-mshc-ddr-timing = <2 4>;
+ non-removable;
+
+ status = "okay";
+};
+
+&mmc1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_bus1 &sd1_bus4>;
+
+ mmc-pwrseq = <&pwrseq_mmc1>;
+
+ bus-width = <4>;
+ fifo-depth = <64>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 3>;
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ non-removable;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+
+ status = "okay";
+
+ wifi@1 {
+ compatible = "brcm,bcm43430a1-fmac", "brcm,bcm4329-fmac";
+ reg = <0x1>;
+ interrupt-names = "host-wake";
+ interrupt-parent = <&gpa2>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&gpd3 6 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_bus1 &sd2_bus4 &dwmmc2_irq>;
+
+ vmmc-supply = <&vdd_fixed_mmc2>;
+ vqmmc-supply = <&vdd_ldo2>;
+
+ bus-width = <4>;
+ card-detect-delay = <200>;
+ fifo-depth = <64>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 3>;
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ broken-cd;
+ disable-wp;
+
+ status = "okay";
+};
+
+&oscclk {
+ clock-frequency = <26000000>;
+};
+
+&pinctrl_alive {
+ accel_irq: accel-irq-pins {
+ samsung,pins = "gpa2-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ dwmmc2_irq: dwmmc2-irq-pins {
+ samsung,pins = "gpa0-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ fuel_irq: fuel-irq-pins {
+ samsung,pins = "gpa0-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ hall_irq: hall-irq-pins {
+ samsung,pins = "gpa1-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ key_power: key-power-pins {
+ samsung,pins = "gpa0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ key_voldown: key-voldown-pins {
+ samsung,pins = "gpa2-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ key_volup: key-volup-pins {
+ samsung,pins = "gpa2-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ pmic_irq: pmic-irq-pins {
+ samsung,pins = "gpa0-2";
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR4>;
+ };
+
+ touch_irq: touch-irq-pins {
+ samsung,pins = "gpa0-6";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ wlan_hostwake: wlan-hostwake-pins {
+ samsung,pins = "gpa2-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ };
+};
+
+&pinctrl_top {
+ bt_enable: bt-enable-pins {
+ samsung,pins = "gpd4-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_PREV>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ };
+
+ wlan_enable: wlan-enable-pins {
+ samsung,pins = "gpd3-6";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_PREV>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR4>;
+ samsung,pin-val = <0>;
+ };
+};
+
+&serial1 {
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43430a1-bt";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_btwake &bt_hostwake &bt_enable>;
+
+ device-wakeup-gpios = <&gpa1 2 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpa1 6 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpd4 0 GPIO_ACTIVE_HIGH>;
+
+ max-speed = <3000000>;
+ };
+};
+
+&serial2 {
+ status = "okay";
+};
+
+&usbdrd {
+ vdd33-supply = <&vdd_ldo8>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos7870-on7xelte.dts b/arch/arm64/boot/dts/exynos/exynos7870-on7xelte.dts
new file mode 100644
index 000000000000..eb97dcc41542
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos7870-on7xelte.dts
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Samsung Galaxy J7 Prime (on7xelte) device tree source
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2025 Kaustabh Chakraborty <kauschluss@disroot.org>
+ */
+
+/dts-v1/;
+#include "exynos7870.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ model = "Samsung Galaxy J7 Prime";
+ compatible = "samsung,on7xelte", "samsung,exynos7870";
+ chassis-type = "handset";
+
+ aliases {
+ mmc0 = &mmc0;
+ mmc1 = &mmc1;
+ mmc2 = &mmc2;
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ };
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+
+ stdout-path = &serial2;
+
+ framebuffer@67000000 {
+ compatible = "simple-framebuffer";
+ reg = <0x0 0x67000000 (1080 * 1920 * 4)>;
+ width = <1080>;
+ height = <1920>;
+ stride = <(1080 * 4)>;
+ format = "a8r8g8b8";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ label = "GPIO Keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&key_home &key_power &key_voldown &key_volup>;
+
+ key-home {
+ label = "Home Key";
+ gpios = <&gpa1 7 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_HOMEPAGE>;
+ };
+
+ key-power {
+ label = "Power Key";
+ gpios = <&gpa0 0 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ };
+
+ key-voldown {
+ label = "Volume Down Key";
+ gpios = <&gpa2 1 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+
+ key-volup {
+ label = "Volume Up Key";
+ gpios = <&gpa2 0 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0x3e400000>,
+ <0x0 0x80000000 0xbe400000>;
+ };
+
+ pwrseq_mmc1: pwrseq-mmc1 {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpd3 6 GPIO_ACTIVE_LOW>;
+ };
+
+ /* mmc2: vmmc */
+ vdd_fixed_mmc2: regulator-fixed-mmc2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_fixed_mmc2";
+ regulator-max-microvolt = <2800000>;
+ regulator-min-microvolt = <2800000>;
+ gpio = <&gpc0 0 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+
+ ramoops@46e00000 {
+ compatible = "ramoops";
+ reg = <0x0 0x46e00000 0x8000>;
+ console-size = <0x4000>;
+ pmsg-size = <0x4000>;
+ };
+
+ framebuffer@67000000 {
+ reg = <0x0 0x67000000 (1080 * 1920 * 4)>;
+ no-map;
+ };
+ };
+
+ vibrator {
+ compatible = "regulator-haptic";
+ haptic-supply = <&vdd_ldo32>;
+ min-microvolt = <3300000>;
+ max-microvolt = <3300000>;
+ };
+};
+
+&gpu {
+ status = "okay";
+};
+
+&hsi2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "okay";
+
+ pmic@66 {
+ compatible = "samsung,s2mpu05-pmic";
+ reg = <0x66>;
+
+ interrupt-parent = <&gpa0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_irq>;
+
+ regulators {
+ vdd_buck1: buck1 {
+ regulator-name = "vdd_buck1";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_buck2: buck2 {
+ regulator-name = "vdd_buck2";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_buck3: buck3 {
+ regulator-name = "vdd_buck3";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_buck4: buck4 {
+ regulator-name = "vdd_buck4";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_buck5: buck5 {
+ regulator-name = "vdd_buck5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2100000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo1: ldo1 {
+ regulator-name = "vdd_ldo1";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* mmc2: vqmmc */
+ vdd_ldo2: ldo2 {
+ regulator-name = "vdd_ldo2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-ramp-delay = <12000>;
+ };
+
+ vdd_ldo3: ldo3 {
+ regulator-name = "vdd_ldo3";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2375000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo4: ldo4 {
+ regulator-name = "vdd_ldo4";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo5: ldo5 {
+ regulator-name = "vdd_ldo5";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo6: ldo6 {
+ regulator-name = "vdd_ldo6";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo7: ldo7 {
+ regulator-name = "vdd_ldo7";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2375000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* usbdrd: vdd33 */
+ vdd_ldo8: ldo8 {
+ regulator-name = "vdd_ldo8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3375000>;
+ regulator-ramp-delay = <12000>;
+ };
+
+ vdd_ldo9: ldo9 {
+ regulator-name = "vdd_ldo9";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo10: ldo10 {
+ regulator-name = "vdd_ldo10";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo25: ldo25 {
+ regulator-name = "vdd_ldo25";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2375000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* mmc0: vmmc */
+ vdd_ldo26: ldo26 {
+ regulator-name = "vdd_ldo26";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3375000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* mmc0: vqmmc */
+ vdd_ldo27: ldo27 {
+ regulator-name = "vdd_ldo27";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2375000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo29: ldo29 {
+ regulator-name = "vdd_ldo29";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo30: ldo30 {
+ regulator-name = "vdd_ldo30";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo31: ldo31 {
+ regulator-name = "vdd_ldo31";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* vibrator: haptic */
+ vdd_ldo32: ldo32 {
+ regulator-name = "vdd_ldo32";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12000>;
+ };
+
+ vdd_ldo33: ldo33 {
+ regulator-name = "vdd_ldo33";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo34: ldo34 {
+ regulator-name = "vdd_ldo34";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-ramp-delay = <12000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_ldo35: ldo35 {
+ regulator-name = "vdd_ldo35";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <400000>;
+
+ status = "okay";
+
+ touchscreen@70 {
+ compatible = "syna,rmi4-i2c";
+ reg = <0x70>;
+ interrupt-parent = <&gpc3>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&touch_irq>;
+
+ syna,reset-delay-ms = <200>;
+ syna,startup-delay-ms = <200>;
+
+ rmi4-f01@1 {
+ reg = <0x01>;
+ syna,nosleep-mode = <1>;
+ };
+
+ rmi4-f12@12 {
+ reg = <0x12>;
+ syna,sensor-type = <1>;
+ syna,rezero-wait-ms = <200>;
+ syna,clip-x-high = <1079>;
+ syna,clip-y-high = <1919>;
+ touchscreen-x-mm = <68>;
+ touchscreen-y-mm = <121>;
+ };
+ };
+};
+
+&i2c7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <400000>;
+
+ status = "okay";
+
+ accelerometer@1d {
+ compatible = "st,lis2hh12";
+ reg = <0x1d>;
+ interrupt-parent = <&gpa2>;
+ interrupts = <3 IRQ_TYPE_EDGE_RISING>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&accel_irq>;
+
+ mount-matrix = "1", "0", "0",
+ "0", "-1", "0",
+ "0", "0", "-1";
+
+ st,drdy-int-pin = <1>;
+ };
+};
+
+&i2c8 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <400000>;
+
+ status = "okay";
+
+ proximity@48 {
+ compatible = "sensortek,stk3013", "sensortek,stk3310";
+ reg = <0x48>;
+ interrupt-parent = <&gpa0>;
+ interrupts = <5 IRQ_TYPE_EDGE_BOTH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&proxm_irq>;
+
+ proximity-near-level = <25>;
+ };
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_rdqs &sd0_bus1 &sd0_bus4 &sd0_bus8>;
+
+ vmmc-supply = <&vdd_ldo26>;
+ vqmmc-supply = <&vdd_ldo27>;
+
+ fifo-depth = <64>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 4>;
+ samsung,dw-mshc-ddr-timing = <2 4>;
+ non-removable;
+
+ status = "okay";
+};
+
+&mmc1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_bus1 &sd1_bus4>;
+
+ mmc-pwrseq = <&pwrseq_mmc1>;
+
+ bus-width = <4>;
+ fifo-depth = <64>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 3>;
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ non-removable;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+
+ status = "okay";
+
+ wifi@1 {
+ compatible = "brcm,bcm43430a1-fmac", "brcm,bcm4329-fmac";
+ reg = <0x1>;
+ interrupt-names = "host-wake";
+ interrupt-parent = <&gpa2>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&gpd3 6 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_bus1 &sd2_bus4 &dwmmc2_irq>;
+
+ vmmc-supply = <&vdd_fixed_mmc2>;
+ vqmmc-supply = <&vdd_ldo2>;
+
+ bus-width = <4>;
+ card-detect-delay = <200>;
+ fifo-depth = <64>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 3>;
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ broken-cd;
+ disable-wp;
+
+ status = "okay";
+};
+
+&oscclk {
+ clock-frequency = <26000000>;
+};
+
+&pinctrl_alive {
+ accel_irq: accel-irq-pins {
+ samsung,pins = "gpa2-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ dwmmc2_irq: dwmmc2-irq-pins {
+ samsung,pins = "gpa0-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ fuel_irq: fuel-irq-pins {
+ samsung,pins = "gpa0-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ key_home: key-home-pins {
+ samsung,pins = "gpa1-7";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ key_power: key-power-pins {
+ samsung,pins = "gpa0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ key_voldown: key-voldown-pins {
+ samsung,pins = "gpa2-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ key_volup: key-volup-pins {
+ samsung,pins = "gpa2-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ pmic_irq: pmic-irq-pins {
+ samsung,pins = "gpa0-2";
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR4>;
+ };
+
+ proxm_irq: proxm-irq-pins {
+ samsung,pins = "gpa0-5";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ tkey_irq: tkey-irq-pins {
+ samsung,pins = "gpa1-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ wlan_hostwake: wlan-hostwake-pins {
+ samsung,pins = "gpa2-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ };
+};
+
+&pinctrl_top {
+ bt_enable: bt-enable-pins {
+ samsung,pins = "gpd4-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_PREV>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ };
+
+ wlan_enable: wlan-enable-pins {
+ samsung,pins = "gpd3-6";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_PREV>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR4>;
+ samsung,pin-val = <0>;
+ };
+};
+
+&pinctrl_touch {
+ touch_irq: touch-irq-pins {
+ samsung,pins = "gpc3-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+};
+
+&serial1 {
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43430a1-bt";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_btwake &bt_hostwake &bt_enable>;
+
+ device-wakeup-gpios = <&gpa1 2 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpa1 6 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpd4 1 GPIO_ACTIVE_HIGH>;
+
+ max-speed = <3000000>;
+ };
+};
+
+&serial2 {
+ status = "okay";
+};
+
+&usbdrd {
+ vdd33-supply = <&vdd_ldo8>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos7870-pinctrl.dtsi b/arch/arm64/boot/dts/exynos/exynos7870-pinctrl.dtsi
new file mode 100644
index 000000000000..99a28d06aee7
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos7870-pinctrl.dtsi
@@ -0,0 +1,1021 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Samsung Exynos7870 SoC pin-mux and pin-config device tree source
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2025 Kaustabh Chakraborty <kauschluss@disroot.org>
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "exynos-pinctrl.h"
+
+&pinctrl_alive {
+ etc0: etc0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ etc1: etc1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpa0: gpa0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpa1: gpa1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpa2: gpa2-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpq0: gpq0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ bt_btwake: bt-btwake-pins {
+ samsung,pins = "gpa1-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_PREV>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ };
+
+ bt_hostwake: bt-hostwake-pins {
+ samsung,pins = "gpa1-6";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_INPUT>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ };
+
+ gnss_sensor_i2c: gnss-sensor-i2c-pins {
+ samsung,pins = "gpa2-5", "gpa2-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_6>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ };
+
+ gnss_sensor_irq: gnss-sensor-irq-pins {
+ samsung,pins = "gpa2-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_6>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ };
+
+ nfc_int: nfc-int-pins {
+ samsung,pins = "gpa2-6";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ };
+
+ uart2_bus: uart2-bus-pins {
+ samsung,pins = "gpa1-1", "gpa1-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ };
+
+ uart2_sleep: uart2-sleep-pins {
+ samsung,pins = "gpa1-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ };
+};
+
+&pinctrl_dispaud {
+ gpz0: gpz0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpz1: gpz1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpz2: gpz2-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ i2s_amp_bus: i2s-amp-bus-pins {
+ samsung,pins = "gpz1-5";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ i2s_amp_bus_idle: i2s-amp-bus-idle-pins {
+ samsung,pins = "gpz1-5";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ i2s_bt_bus: i2s-bt-bus-pins {
+ samsung,pins = "gpz0-0", "gpz0-1", "gpz0-2", "gpz0-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ i2s_bt_bus_idle: i2s-bt-bus-idle-pins {
+ samsung,pins = "gpz0-0", "gpz0-1", "gpz0-2", "gpz0-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ i2s_pmic_amp_bus: i2s-pmic-amp-bus-pins {
+ samsung,pins = "gpz1-0", "gpz1-1", "gpz1-2", "gpz1-3", "gpz1-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR6>;
+ };
+
+ i2s_pmic_amp_bus_idle: i2s-pmic-amp-bus-idle-pins {
+ samsung,pins = "gpz1-0", "gpz1-1", "gpz1-2", "gpz1-3", "gpz1-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR6>;
+ };
+
+ i2s_pmic_bus: i2s-pmic-bus-pins {
+ samsung,pins = "gpz1-0", "gpz1-2", "gpz1-3", "gpz1-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR6>;
+ };
+
+ i2s_pmic_bus_idle: i2s-pmic-bus-idle-pins {
+ samsung,pins = "gpz1-0", "gpz1-2", "gpz1-3", "gpz1-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR6>;
+ };
+
+ i2s_fm_bus: i2s-fm-bus-pins {
+ samsung,pins = "gpz2-0", "gpz2-1", "gpz2-2", "gpz2-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ i2s_fm_bus_idle: i2s-fm-bus-idle-pins {
+ samsung,pins = "gpz2-0", "gpz2-1", "gpz2-2", "gpz2-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+};
+
+&pinctrl_ese {
+ gpc7: gpc7-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ spi0_bus: spi0-bus-pins {
+ samsung,pins = "gpc7-3", "gpc7-2", "gpc7-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ spi0_cs0: spi0-cs0-pins {
+ samsung,pins = "gpc7-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ spi0_cs1: spi0-cs1-pins {
+ samsung,pins = "gpc7-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+};
+
+&pinctrl_fsys {
+ gpr0: gpr0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpr1: gpr1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpr2: gpr2-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpr3: gpr3-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpr4: gpr4-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sd0_bus1: sd0-bus-width1-pins {
+ samsung,pins = "gpr1-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd0_bus4: sd0-bus-width4-pins {
+ samsung,pins = "gpr1-1", "gpr1-2", "gpr1-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd0_bus8: sd0-bus-width8-pins {
+ samsung,pins = "gpr1-4", "gpr1-5", "gpr1-6", "gpr1-7";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd0_clk: sd0-clk-pins {
+ samsung,pins = "gpr0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd0_clk_fast_slew_rate_1x: sd0-clk-fast-slew-rate-1x-pins {
+ samsung,pins = "gpr0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ sd0_clk_fast_slew_rate_2x: sd0-clk-fast-slew-rate-2x-pins {
+ samsung,pins = "gpr0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR2>;
+ };
+
+ sd0_clk_fast_slew_rate_3x: sd0-clk-fast-slew-rate-3x-pins {
+ samsung,pins = "gpr0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd0_clk_fast_slew_rate_4x: sd0-clk-fast-slew-rate-4x-pins {
+ samsung,pins = "gpr0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR4>;
+ };
+
+ sd0_clk_fast_slew_rate_5x: sd0-clk-fast-slew-rate-5x-pins {
+ samsung,pins = "gpr0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR5>;
+ };
+
+ sd0_clk_fast_slew_rate_6x: sd0-clk-fast-slew-rate-6x-pins {
+ samsung,pins = "gpr0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR6>;
+ };
+
+ sd0_cmd: sd0-cmd-pins {
+ samsung,pins = "gpr0-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd0_rdqs: sd0-rdqs-pins {
+ samsung,pins = "gpr0-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd1_bus1: sd1-bus-width1-pins {
+ samsung,pins = "gpr3-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_INPUT>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd1_bus4: sd1-bus-width4-pins {
+ samsung,pins = "gpr3-1", "gpr3-2", "gpr3-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_INPUT>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd1_clk: sd1-clk-pins {
+ samsung,pins = "gpr2-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd1_cmd: sd1-cmd-pins {
+ samsung,pins = "gpr2-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd2_bus1: sd2-bus-width1-pins {
+ samsung,pins = "gpr4-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd2_bus4: sd2-bus-width4-pins {
+ samsung,pins = "gpr4-3", "gpr4-4", "gpr4-5";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd2_clk: sd2-clk-pins {
+ samsung,pins = "gpr4-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd2_clk_fast_slew_rate_1x: sd2-clk-fast-slew-rate-1x-pins {
+ samsung,pins = "gpr4-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ sd2_clk_fast_slew_rate_2x: sd2-clk-fast-slew-rate-2x-pins {
+ samsung,pins = "gpr4-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR2>;
+ };
+
+ sd2_clk_fast_slew_rate_3x: sd2-clk-fast-slew-rate-3x-pins {
+ samsung,pins = "gpr4-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ sd2_clk_fast_slew_rate_4x: sd2-clk-fast-slew-rate-4x-pins {
+ samsung,pins = "gpr4-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR4>;
+ };
+
+ sd2_cmd: sd2-cmd-pins {
+ samsung,pins = "gpr4-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+};
+
+&pinctrl_mif {
+ gpm0: gpm0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ hsi2c0_bus: hsi2c0-bus-pins {
+ samsung,pins = "gpm0-1", "gpm0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_PREV>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+};
+
+&pinctrl_nfc {
+ gpc2: gpc2-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ i2c6_bus: i2c6-bus-pins {
+ samsung,pins = "gpc2-1", "gpc2-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ nfc_en: nfc-en-pins {
+ samsung,pins = "gpc2-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_PREV>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ samsung,pin-val = <1>;
+ };
+
+ nfc_n5_clk_req: nfc-n5-clk-req-pins {
+ samsung,pins = "gpc2-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ nfc_pd: nfc-pd-pins {
+ samsung,pins = "gpc2-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+};
+
+&pinctrl_top {
+ gpb0: gpb0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc0: gpc0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc1: gpc1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc4: gpc4-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc5: gpc5-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc6: gpc6-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc8: gpc8-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc9: gpc9-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd1: gpd1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd2: gpd2-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd3: gpd3-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd4: gpd4-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpe0: gpe0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpf0: gpf0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpf1: gpf1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpf2: gpf2-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpf3: gpf3-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpf4: gpf4-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ decon_te_off: decon-te-off-pins {
+ samsung,pins = "gpe0-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ };
+
+ decon_te_on: decon-te-on-pins {
+ samsung,pins = "gpe0-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ };
+
+ ese_pvdd_en: ese-pvdd-en-pins {
+ samsung,pins = "gpf4-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR4>;
+ };
+
+ fimc_is_flash: fimc-is-flash-pins {
+ samsung,pins = "gpd3-2", "gpd3-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ fimc_is_mclk0_fn: fimc-is-mclk0-fn-pins {
+ samsung,pins = "gpe0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR2>;
+ };
+
+ fimc_is_mclk0_in: fimc-is-mclk0-in-pins {
+ samsung,pins = "gpe0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ fimc_is_mclk0_out: fimc-is-mclk0-out-pins {
+ samsung,pins = "gpe0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ fimc_is_mclk1_fn: fimc-is-mclk1-fn-pins {
+ samsung,pins = "gpe0-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR2>;
+ };
+
+ fimc_is_mclk1_in: fimc-is-mclk1-in-pins {
+ samsung,pins = "gpe0-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ fimc_is_mclk1_out: fimc-is-mclk1-out-pins {
+ samsung,pins = "gpe0-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ fimc_is_mclk2_fn: fimc-is-mclk2-fn-pins {
+ samsung,pins = "gpf4-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ };
+
+ fimc_is_mclk2_out: fimc-is-mclk2-out-pins {
+ samsung,pins = "gpf4-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR3>;
+ samsung,pin-val = <0>;
+ };
+
+ hsi2c1_bus: hsi2c1-bus-pins {
+ samsung,pins = "gpf0-1", "gpf0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ hsi2c2_bus: hsi2c2-bus-pins {
+ samsung,pins = "gpf1-1", "gpf1-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ hsi2c3_bus: hsi2c3-bus-pins {
+ samsung,pins = "gpf0-3", "gpf0-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ hsi2c4_bus: hsi2c4-bus-pins {
+ samsung,pins = "gpf2-1", "gpf2-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ hsi2c5_bus: hsi2c5-bus-pins {
+ samsung,pins = "gpf3-0", "gpf3-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ hsi2c6_bus: hsi2c6-bus-pins {
+ samsung,pins = "gpf3-2", "gpf3-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ i2c0_bus: i2c0-bus-pins {
+ samsung,pins = "gpc4-1", "gpc4-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ i2c2_bus: i2c2-bus-pins {
+ samsung,pins = "gpc8-1", "gpc8-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ i2c3_bus: i2c3-bus-pins {
+ samsung,pins = "gpc9-1", "gpc9-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ i2c4_bus: i2c4-bus-pins {
+ samsung,pins = "gpc1-1", "gpc1-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_PREV>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ i2c5_bus: i2c5-bus-pins {
+ samsung,pins = "gpc1-3", "gpc1-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ i2c7_bus: i2c7-bus-pins {
+ samsung,pins = "gpc4-3", "gpc4-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ i2c8_bus: i2c8-bus-pins {
+ samsung,pins = "gpc5-1", "gpc5-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ nfc_n5_firm: nfc-n5-firm-pins {
+ samsung,pins = "gpd4-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-val = <1>;
+ };
+
+ nfc_pvdd_en: nfc-pvdd-en-pins {
+ samsung,pins = "gpd2-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR4>;
+ };
+
+ pwm_tout0: pwm-tout0-pins {
+ samsung,pins = "gpc0-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ pwm_tout1: pwm-tout1-pins {
+ samsung,pins = "gpc0-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ spi1_bus: spi1-bus-pins {
+ samsung,pins = "gpf3-3", "gpf3-2", "gpf3-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ spi1_cs0: spi1-cs0-pins {
+ samsung,pins = "gpf3-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ spi1_cs1: spi1-cs1-pins {
+ samsung,pins = "gpd1-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ spi2_bus: spi2-bus-pins {
+ samsung,pins = "gpf4-3", "gpf4-2", "gpf4-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ spi2_cs0: spi2-cs0-pins {
+ samsung,pins = "gpf4-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ spi2_cs1: spi2-cs1-pins {
+ samsung,pins = "gpd1-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ spi3_bus: spi3-bus-pins {
+ samsung,pins = "gpc6-0", "gpc6-2", "gpc6-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ spi3_bus_suspend: spi3-bus-suspend-pins {
+ samsung,pins = "gpc6-0", "gpc6-2", "gpc6-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ spi3_cs: spi3-cs-pins {
+ samsung,pins = "gpc6-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ spi4_bus: spi4-bus-pins {
+ samsung,pins = "gpc4-2", "gpc5-0", "gpc5-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ spi4_cs: spi4-cs-pins {
+ samsung,pins = "gpc4-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ spi4_cs_func: spi4-cs-func-pins {
+ samsung,pins = "gpc4-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ spi4_miso: spi4-miso-pins {
+ samsung,pins = "gpc5-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR4>;
+ };
+
+ spi4_mosi_sck_ssn: spi4-mosi-sck-ssn-pins {
+ samsung,pins = "gpc5-1", "gpc4-2", "gpc4-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR4>;
+ };
+
+ uart0_bus: uart0-bus-pins {
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pins = "gpd2-3", "gpd2-2", "gpd2-1", "gpd2-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ };
+
+ uart1_bus: uart1-bus-pins {
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pins = "gpb0-3", "gpb0-2", "gpb0-1", "gpb0-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ };
+};
+
+&pinctrl_touch {
+ gpc3: gpc3-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ i2c1_bus: i2c1-bus-pins {
+ samsung,pins = "gpc3-1", "gpc3-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+
+ ese_cs_func: ese-cs-func-pins {
+ samsung,pins = "gpc3-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ samsung,pin-val = <1>;
+ };
+
+ ese_cs_func_suspend: ese-cs-func-suspend-pins {
+ samsung,pins = "gpc3-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5433_PIN_DRV_FAST_SR1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos7870.dtsi b/arch/arm64/boot/dts/exynos/exynos7870.dtsi
new file mode 100644
index 000000000000..5cba8c9bb403
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos7870.dtsi
@@ -0,0 +1,712 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Samsung Exynos7870 SoC device tree source
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2025 Kaustabh Chakraborty <kauschluss@disroot.org>
+ */
+
+#include <dt-bindings/clock/samsung,exynos7870-cmu.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "samsung,exynos7870";
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ interrupt-parent = <&gic>;
+
+ aliases {
+ pinctrl0 = &pinctrl_alive;
+ pinctrl1 = &pinctrl_dispaud;
+ pinctrl2 = &pinctrl_ese;
+ pinctrl3 = &pinctrl_fsys;
+ pinctrl4 = &pinctrl_mif;
+ pinctrl5 = &pinctrl_nfc;
+ pinctrl6 = &pinctrl_top;
+ pinctrl7 = &pinctrl_touch;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+ core1 {
+ cpu = <&cpu1>;
+ };
+ core2 {
+ cpu = <&cpu2>;
+ };
+ core3 {
+ cpu = <&cpu3>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&cpu4>;
+ };
+ core1 {
+ cpu = <&cpu5>;
+ };
+ core2 {
+ cpu = <&cpu6>;
+ };
+ core3 {
+ cpu = <&cpu7>;
+ };
+ };
+ };
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0>;
+ enable-method = "psci";
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x1>;
+ enable-method = "psci";
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x2>;
+ enable-method = "psci";
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x3>;
+ enable-method = "psci";
+ };
+
+ cpu4: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x100>;
+ enable-method = "psci";
+ };
+
+ cpu5: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x101>;
+ enable-method = "psci";
+ };
+
+ cpu6: cpu@102 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x102>;
+ enable-method = "psci";
+ };
+
+ cpu7: cpu@103 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x103>;
+ enable-method = "psci";
+ };
+ };
+
+ oscclk: oscclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ cpu_suspend = <0xc4000001>;
+ cpu_off = <0x84000002>;
+ cpu_on = <0xc4000003>;
+ };
+
+ soc: soc@0 {
+ compatible = "simple-bus";
+ ranges = <0x0 0x0 0x0 0x20000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ chipid@10100000 {
+ compatible = "samsung,exynos7870-chipid",
+ "samsung,exynos4210-chipid";
+ reg = <0x10100000 0x100>;
+ };
+
+ cmu_peri: clock-controller@101f0000 {
+ compatible = "samsung,exynos7870-cmu-peri";
+ reg = <0x101f0000 0x1000>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk", "bus", "spi0", "spi1", "spi2",
+ "spi3", "spi4", "uart0", "uart1", "uart2";
+ clocks = <&oscclk>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_PERI_BUS>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_PERI_SPI0>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_PERI_SPI1>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_PERI_SPI2>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_PERI_SPI3>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_PERI_SPI4>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_PERI_UART0>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_PERI_UART1>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_PERI_UART2>;
+ };
+
+ cmu_mif: clock-controller@10460000 {
+ compatible = "samsung,exynos7870-cmu-mif";
+ reg = <0x10460000 0x1000>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk";
+ clocks = <&oscclk>;
+ };
+
+ pmu_system_controller: system-controller@10480000 {
+ compatible = "samsung,exynos7870-pmu",
+ "samsung,exynos7-pmu", "syscon";
+ reg = <0x10480000 0x10000>;
+
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x080c>;
+ mode-bootloader = <0x1234567d>;
+ mode-download = <0x12345671>;
+ mode-recovery = <0x12345674>;
+ };
+ };
+
+ gic: interrupt-controller@104e1000 {
+ compatible = "arm,cortex-a15-gic";
+ reg = <0x104e1000 0x1000>,
+ <0x104e2000 0x1000>,
+ <0x104e4000 0x2000>,
+ <0x104e6000 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(8) |
+ IRQ_TYPE_LEVEL_HIGH)>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <3>;
+ };
+
+ hsi2c0: i2c@10510000 {
+ compatible = "samsung,exynos7870-hsi2c",
+ "samsung,exynos7-hsi2c";
+ reg = <0x10510000 0x2000>;
+ interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&hsi2c0_bus>;
+
+ clock-names = "hsi2c";
+ clocks = <&cmu_mif CLK_GOUT_MIF_HSI2C_IPCLK>;
+
+ status = "disabled";
+ };
+
+ pinctrl_mif: pinctrl@10530000 {
+ compatible = "samsung,exynos7870-pinctrl";
+ reg = <0x10530000 0x1000>;
+ interrupts = <GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpu: gpu@11400000 {
+ compatible = "samsung,exynos7870-mali", "arm,mali-t830";
+ reg = <0x11400000 0x5000>;
+ interrupt-names = "job", "mmu", "gpu";
+ interrupts = <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>;
+
+ clock-names = "core", "bus";
+ clocks = <&cmu_g3d CLK_GOUT_G3D_CLK>,
+ <&cmu_g3d CLK_GOUT_G3D_ASYNCS_D0_CLK>;
+
+ status = "disabled";
+ };
+
+ cmu_g3d: clock-controller@11460000 {
+ compatible = "samsung,exynos7870-cmu-g3d";
+ reg = <0x11460000 0x1000>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk", "switch";
+ clocks = <&oscclk>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_G3D_SWITCH>;
+ };
+
+ cmu_mfcmscl: clock-controller@12cb0000 {
+ compatible = "samsung,exynos7870-cmu-mfcmscl";
+ reg = <0x12cb0000 0x1000>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk", "mfc", "mscl";
+ clocks = <&oscclk>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_MFCMSCL_MFC>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_MFCMSCL_MSCL>;
+ };
+
+ mmc0: mmc@13540000 {
+ compatible = "samsung,exynos7870-dw-mshc-smu";
+ reg = <0x13540000 0x2000>;
+ interrupts = <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
+
+ clock-names = "biu", "ciu";
+ clocks = <&cmu_fsys CLK_GOUT_FSYS_MMC0_ACLK>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_FSYS_MMC0>;
+
+ status = "disabled";
+ };
+
+ mmc1: mmc@13550000 {
+ compatible = "samsung,exynos7870-dw-mshc-smu";
+ reg = <0x13550000 0x2000>;
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+
+ clock-names = "biu", "ciu";
+ clocks = <&cmu_fsys CLK_GOUT_FSYS_MMC1_ACLK>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_FSYS_MMC1>;
+
+ status = "disabled";
+ };
+
+ mmc2: mmc@13560000 {
+ compatible = "samsung,exynos7870-dw-mshc-smu";
+ reg = <0x13560000 0x2000>;
+ interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
+
+ clock-names = "biu", "ciu";
+ clocks = <&cmu_fsys CLK_GOUT_FSYS_MMC2_ACLK>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_FSYS_MMC2>;
+
+ status = "disabled";
+ };
+
+ usbdrd_phy: phy@135c0000 {
+ compatible = "samsung,exynos7870-usbdrd-phy";
+ reg = <0x135c0000 0x100>;
+ #phy-cells = <1>;
+
+ clock-names = "phy", "ref";
+ clocks = <&cmu_fsys CLK_GOUT_FSYS_MUX_USB20DRD_PHYCLOCK_USER>,
+ <&cmu_fsys CLK_GOUT_FSYS_MUX_USB_PLL>;
+
+ samsung,pmu-syscon = <&pmu_system_controller>;
+ };
+
+ usbdrd: usb@13600000 {
+ compatible = "samsung,exynos7870-dwusb3";
+ ranges = <0x0 0x13600000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ clock-names = "bus_early", "ref", "ctrl";
+ clocks = <&cmu_fsys CLK_GOUT_FSYS_USB20DRD_ACLK_HSDRD>,
+ <&cmu_fsys CLK_GOUT_FSYS_USB20DRD_HSDRD_REF_CLK>,
+ <&cmu_fsys CLK_GOUT_FSYS_USB20DRD_HCLK_USB20_CTRL>;
+
+ status = "disabled";
+
+ usb@0 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0x10000>;
+ interrupts = <GIC_SPI 230 IRQ_TYPE_LEVEL_HIGH>;
+
+ phy-names = "usb2-phy";
+ phys = <&usbdrd_phy 0>;
+
+ usb-role-switch;
+ };
+ };
+
+ cmu_fsys: clock-controller@13730000 {
+ compatible = "samsung,exynos7870-cmu-fsys";
+ reg = <0x13730000 0x1000>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk", "bus", "usb20drd";
+ clocks = <&oscclk>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_FSYS_BUS>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_FSYS_USB20DRD_REFCLK>;
+ };
+
+ pinctrl_fsys: pinctrl@13750000 {
+ compatible = "samsung,exynos7870-pinctrl";
+ reg = <0x13750000 0x1000>;
+ interrupts = <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ serial0: serial@13800000 {
+ compatible = "samsung,exynos7870-uart",
+ "samsung,exynos8895-uart";
+ reg = <0x13800000 0x100>;
+ interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_bus>;
+
+ clock-names = "uart", "clk_uart_baud0";
+ clocks = <&cmu_peri CLK_GOUT_PERI_UART0_PCLK>,
+ <&cmu_peri CLK_GOUT_PERI_UART0_EXT_UCLK>;
+
+ samsung,uart-fifosize = <16>;
+
+ status = "disabled";
+ };
+
+ serial1: serial@13810000 {
+ compatible = "samsung,exynos7870-uart",
+ "samsung,exynos8895-uart";
+ reg = <0x13810000 0x100>;
+ interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_bus>;
+
+ clock-names = "uart", "clk_uart_baud0";
+ clocks = <&cmu_peri CLK_GOUT_PERI_UART1_PCLK>,
+ <&cmu_peri CLK_GOUT_PERI_UART1_EXT_UCLK>;
+
+ samsung,uart-fifosize = <256>;
+
+ status = "disabled";
+ };
+
+ serial2: serial@13820000 {
+ compatible = "samsung,exynos7870-uart",
+ "samsung,exynos8895-uart";
+ reg = <0x13820000 0x100>;
+ interrupts = <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_bus>;
+
+ clock-names = "uart", "clk_uart_baud0";
+ clocks = <&cmu_peri CLK_GOUT_PERI_UART2_PCLK>,
+ <&cmu_peri CLK_GOUT_PERI_UART2_EXT_UCLK>;
+
+ samsung,uart-fifosize = <256>;
+
+ status = "disabled";
+ };
+
+ i2c0: i2c@13830000 {
+ compatible = "samsung,exynos7870-i2c",
+ "samsung,s3c2440-i2c";
+ reg = <0x13830000 0x100>;
+ interrupts = <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_bus>;
+
+ clock-names = "i2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_I2C0_PCLK>;
+
+ status = "disabled";
+ };
+
+ i2c1: i2c@13840000 {
+ compatible = "samsung,exynos7870-i2c",
+ "samsung,s3c2440-i2c";
+ reg = <0x13840000 0x100>;
+ interrupts = <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_bus>;
+
+ clock-names = "i2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_I2C1_PCLK>;
+
+ status = "disabled";
+ };
+
+ i2c2: i2c@13850000 {
+ compatible = "samsung,exynos7870-i2c",
+ "samsung,s3c2440-i2c";
+ reg = <0x13850000 0x100>;
+ interrupts = <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_bus>;
+
+ clock-names = "i2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_I2C2_PCLK>;
+
+ status = "disabled";
+ };
+
+ i2c3: i2c@13860000 {
+ compatible = "samsung,exynos7870-i2c",
+ "samsung,s3c2440-i2c";
+ reg = <0x13860000 0x100>;
+ interrupts = <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_bus>;
+
+ clock-names = "i2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_I2C3_PCLK>;
+
+ status = "disabled";
+ };
+
+ i2c4: i2c@13870000 {
+ compatible = "samsung,exynos7870-i2c",
+ "samsung,s3c2440-i2c";
+ reg = <0x13870000 0x100>;
+ interrupts = <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_bus>;
+
+ clock-names = "i2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_I2C4_PCLK>;
+
+ status = "disabled";
+ };
+
+ i2c5: i2c@13880000 {
+ compatible = "samsung,exynos7870-i2c",
+ "samsung,s3c2440-i2c";
+ reg = <0x13880000 0x100>;
+ interrupts = <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_bus>;
+
+ clock-names = "i2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_I2C5_PCLK>;
+
+ status = "disabled";
+ };
+
+ i2c6: i2c@13890000 {
+ compatible = "samsung,exynos7870-i2c",
+ "samsung,s3c2440-i2c";
+ reg = <0x13890000 0x100>;
+ interrupts = <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_bus>;
+
+ clock-names = "i2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_I2C6_PCLK>;
+
+ status = "disabled";
+ };
+
+ hsi2c1: i2c@138a0000 {
+ compatible = "samsung,exynos7870-hsi2c",
+ "samsung,exynos7-hsi2c";
+ reg = <0x138a0000 0x1000>;
+ interrupts = <GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&hsi2c1_bus>;
+
+ clock-names = "hsi2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_HSI2C1_IPCLK>;
+
+ status = "disabled";
+ };
+
+ hsi2c2: i2c@138b0000 {
+ compatible = "samsung,exynos7870-hsi2c",
+ "samsung,exynos7-hsi2c";
+ reg = <0x138b0000 0x1000>;
+ interrupts = <GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&hsi2c2_bus>;
+
+ clock-names = "hsi2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_HSI2C2_IPCLK>;
+
+ status = "disabled";
+ };
+
+ hsi2c3: i2c@138c0000 {
+ compatible = "samsung,exynos7870-hsi2c",
+ "samsung,exynos7-hsi2c";
+ reg = <0x138c0000 0x1000>;
+ interrupts = <GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&hsi2c3_bus>;
+
+ clock-names = "hsi2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_HSI2C3_IPCLK>;
+
+ status = "disabled";
+ };
+
+ i2c7: i2c@138d0000 {
+ compatible = "samsung,exynos7870-i2c",
+ "samsung,s3c2440-i2c";
+ reg = <0x138d0000 0x100>;
+ interrupts = <GIC_SPI 453 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c7_bus>;
+
+ clock-names = "i2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_I2C7_PCLK>;
+
+ status = "disabled";
+ };
+
+ i2c8: i2c@138e0000 {
+ compatible = "samsung,exynos7870-i2c",
+ "samsung,s3c2440-i2c";
+ reg = <0x138e0000 0x100>;
+ interrupts = <GIC_SPI 454 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c8_bus>;
+
+ clock-names = "i2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_I2C8_PCLK>;
+
+ status = "disabled";
+ };
+
+ hsi2c4: i2c@138f0000 {
+ compatible = "samsung,exynos7870-hsi2c",
+ "samsung,exynos7-hsi2c";
+ reg = <0x138f0000 0x1000>;
+ interrupts = <GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&hsi2c4_bus>;
+
+ clock-names = "hsi2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_HSI2C4_IPCLK>;
+
+ status = "disabled";
+ };
+
+ hsi2c5: i2c@13950000 {
+ compatible = "samsung,exynos7870-hsi2c",
+ "samsung,exynos7-hsi2c";
+ reg = <0x13950000 0x1000>;
+ interrupts = <GIC_SPI 457 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&hsi2c5_bus>;
+
+ clock-names = "hsi2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_HSI2C5_IPCLK>;
+
+ status = "disabled";
+ };
+
+ hsi2c6: i2c@13960000 {
+ compatible = "samsung,exynos7870-hsi2c",
+ "samsung,exynos7-hsi2c";
+ reg = <0x13960000 0x1000>;
+ interrupts = <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&hsi2c6_bus>;
+
+ clock-names = "hsi2c";
+ clocks = <&cmu_peri CLK_GOUT_PERI_HSI2C6_IPCLK>;
+
+ status = "disabled";
+ };
+
+ pinctrl_top: pinctrl@139b0000 {
+ compatible = "samsung,exynos7870-pinctrl";
+ reg = <0x139b0000 0x1000>;
+ interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pinctrl_nfc: pinctrl@139c0000 {
+ compatible = "samsung,exynos7870-pinctrl";
+ reg = <0x139c0000 0x1000>;
+ interrupts = <GIC_SPI 439 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pinctrl_touch: pinctrl@139d0000 {
+ compatible = "samsung,exynos7870-pinctrl";
+ reg = <0x139d0000 0x1000>;
+ interrupts = <GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pinctrl_ese: pinctrl@139e0000 {
+ compatible = "samsung,exynos7870-pinctrl";
+ reg = <0x139e0000 0x1000>;
+ interrupts = <GIC_SPI 441 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pinctrl_alive: pinctrl@139f0000 {
+ compatible = "samsung,exynos7870-pinctrl";
+ reg = <0x139f0000 0x1000>;
+
+ wakeup-interrupt-controller {
+ compatible = "samsung,exynos7870-wakeup-eint",
+ "samsung,exynos7-wakeup-eint";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ cmu_isp: clock-controller@144d0000 {
+ compatible = "samsung,exynos7870-cmu-isp";
+ reg = <0x144d0000 0x1000>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk", "cam", "isp", "vra";
+ clocks = <&oscclk>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_ISP_CAM>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_ISP_ISP>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_ISP_VRA>;
+ };
+
+ pinctrl_dispaud: pinctrl@148c0000 {
+ compatible = "samsung,exynos7870-pinctrl";
+ reg = <0x148c0000 0x1000>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ cmu_dispaud: clock-controller@148d0000 {
+ compatible = "samsung,exynos7870-cmu-dispaud";
+ reg = <0x148d0000 0x1000>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk", "bus", "decon_eclk", "decon_vclk";
+ clocks = <&oscclk>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_DISPAUD_BUS>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_DISPAUD_DECON_ECLK>,
+ <&cmu_mif CLK_GOUT_MIF_CMU_DISPAUD_DECON_VCLK>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+
+ /*
+ * Non-updatable, broken stock Samsung bootloader does not
+ * configure CNTFRQ_EL0
+ */
+ clock-frequency = <26000000>;
+ };
+};
+
+#include "exynos7870-pinctrl.dtsi"
+#include "arm/samsung/exynos-syscon-restart.dtsi"
diff --git a/arch/arm64/boot/dts/exynos/exynos850.dtsi b/arch/arm64/boot/dts/exynos/exynos850.dtsi
index f1c8b4613cbc..cb55015c8dce 100644
--- a/arch/arm64/boot/dts/exynos/exynos850.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos850.dtsi
@@ -651,7 +651,7 @@
compatible = "samsung,exynos850-usi";
reg = <0x138200c0 0x20>;
samsung,sysreg = <&sysreg_peri 0x1010>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -677,7 +677,7 @@
compatible = "samsung,exynos850-usi";
reg = <0x138a00c0 0x20>;
samsung,sysreg = <&sysreg_peri 0x1020>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -706,7 +706,7 @@
compatible = "samsung,exynos850-usi";
reg = <0x138b00c0 0x20>;
samsung,sysreg = <&sysreg_peri 0x1030>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -735,7 +735,7 @@
compatible = "samsung,exynos850-usi";
reg = <0x138c00c0 0x20>;
samsung,sysreg = <&sysreg_peri 0x1040>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -764,7 +764,7 @@
compatible = "samsung,exynos850-usi";
reg = <0x139400c0 0x20>;
samsung,sysreg = <&sysreg_peri 0x1050>;
- samsung,mode = <USI_V2_SPI>;
+ samsung,mode = <USI_MODE_SPI>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -796,7 +796,7 @@
compatible = "samsung,exynos850-usi";
reg = <0x11d000c0 0x20>;
samsung,sysreg = <&sysreg_cmgp 0x2000>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -855,7 +855,7 @@
compatible = "samsung,exynos850-usi";
reg = <0x11d200c0 0x20>;
samsung,sysreg = <&sysreg_cmgp 0x2010>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
index b36292a7db64..66628cb32776 100644
--- a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
@@ -442,7 +442,7 @@
"samsung,exynos850-usi";
reg = <0x103000c0 0x20>;
samsung,sysreg = <&syscon_peric0 0x1000>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -505,7 +505,7 @@
"samsung,exynos850-usi";
reg = <0x103100c0 0x20>;
samsung,sysreg = <&syscon_peric0 0x1004>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -534,7 +534,7 @@
"samsung,exynos850-usi";
reg = <0x103200c0 0x20>;
samsung,sysreg = <&syscon_peric0 0x1008>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -597,7 +597,7 @@
"samsung,exynos850-usi";
reg = <0x103300c0 0x20>;
samsung,sysreg = <&syscon_peric0 0x100c>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -626,7 +626,7 @@
"samsung,exynos850-usi";
reg = <0x103400c0 0x20>;
samsung,sysreg = <&syscon_peric0 0x1010>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -689,7 +689,7 @@
"samsung,exynos850-usi";
reg = <0x103500c0 0x20>;
samsung,sysreg = <&syscon_peric0 0x1014>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -718,7 +718,7 @@
"samsung,exynos850-usi";
reg = <0x103600c0 0x20>;
samsung,sysreg = <&syscon_peric0 0x1018>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -781,7 +781,7 @@
"samsung,exynos850-usi";
reg = <0x103700c0 0x20>;
samsung,sysreg = <&syscon_peric0 0x101c>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -810,7 +810,7 @@
"samsung,exynos850-usi";
reg = <0x103800c0 0x20>;
samsung,sysreg = <&syscon_peric0 0x1020>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -873,7 +873,7 @@
"samsung,exynos850-usi";
reg = <0x103900c0 0x20>;
samsung,sysreg = <&syscon_peric0 0x1024>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -902,7 +902,7 @@
"samsung,exynos850-usi";
reg = <0x103a00c0 0x20>;
samsung,sysreg = <&syscon_peric0 0x1028>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -965,7 +965,7 @@
"samsung,exynos850-usi";
reg = <0x103b00c0 0x20>;
samsung,sysreg = <&syscon_peric0 0x102c>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -994,7 +994,7 @@
"samsung,exynos850-usi";
reg = <0x109000c0 0x20>;
samsung,sysreg = <&syscon_peric1 0x1000>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -1057,7 +1057,7 @@
"samsung,exynos850-usi";
reg = <0x109100c0 0x20>;
samsung,sysreg = <&syscon_peric1 0x1004>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -1086,7 +1086,7 @@
"samsung,exynos850-usi";
reg = <0x109200c0 0x20>;
samsung,sysreg = <&syscon_peric1 0x1008>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -1149,7 +1149,7 @@
"samsung,exynos850-usi";
reg = <0x109300c0 0x20>;
samsung,sysreg = <&syscon_peric1 0x100c>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -1178,7 +1178,7 @@
"samsung,exynos850-usi";
reg = <0x109400c0 0x20>;
samsung,sysreg = <&syscon_peric1 0x1010>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -1241,7 +1241,7 @@
"samsung,exynos850-usi";
reg = <0x109500c0 0x20>;
samsung,sysreg = <&syscon_peric1 0x1014>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -1270,7 +1270,7 @@
"samsung,exynos850-usi";
reg = <0x109600c0 0x20>;
samsung,sysreg = <&syscon_peric1 0x1018>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -1333,7 +1333,7 @@
"samsung,exynos850-usi";
reg = <0x109700c0 0x20>;
samsung,sysreg = <&syscon_peric1 0x101c>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -1362,7 +1362,7 @@
"samsung,exynos850-usi";
reg = <0x109800c0 0x20>;
samsung,sysreg = <&syscon_peric1 0x1020>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -1425,7 +1425,7 @@
"samsung,exynos850-usi";
reg = <0x109900c0 0x20>;
samsung,sysreg = <&syscon_peric1 0x1024>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -1454,7 +1454,7 @@
"samsung,exynos850-usi";
reg = <0x109a00c0 0x20>;
samsung,sysreg = <&syscon_peric1 0x1028>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -1515,7 +1515,7 @@
"samsung,exynos850-usi";
reg = <0x109b00c0 0x20>;
samsung,sysreg = <&syscon_peric1 0x102c>;
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm64/boot/dts/exynos/exynosautov920.dtsi b/arch/arm64/boot/dts/exynos/exynosautov920.dtsi
index fc6ac531d597..2cb8041c8a9f 100644
--- a/arch/arm64/boot/dts/exynos/exynosautov920.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynosautov920.dtsi
@@ -433,7 +433,7 @@
"samsung,exynos850-usi";
reg = <0x108800c0 0x20>;
samsung,sysreg = <&syscon_peric0 0x1000>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -457,6 +457,238 @@
};
};
+ usi_1: usi@108a00c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x108a00c0 0x20>;
+ samsung,sysreg = <&syscon_peric0 0x1008>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI01_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_1: serial@108a0000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x108a0000 0xc0>;
+ interrupts = <GIC_SPI 766 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_bus>;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI01_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <256>;
+ status = "disabled";
+ };
+ };
+
+ usi_2: usi@108c00c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x108c00c0 0x20>;
+ samsung,sysreg = <&syscon_peric0 0x1010>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI02_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_2: serial@108c0000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x108c0000 0xc0>;
+ interrupts = <GIC_SPI 768 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_bus>;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI02_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
+ usi_3: usi@108e00c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x108e00c0 0x20>;
+ samsung,sysreg = <&syscon_peric0 0x1018>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI03_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_3: serial@108e0000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x108e0000 0xc0>;
+ interrupts = <GIC_SPI 770 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_bus>;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI03_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
+ usi_4: usi@109000c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x109000c0 0x20>;
+ samsung,sysreg = <&syscon_peric0 0x1020>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI04_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_4: serial@10900000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10900000 0xc0>;
+ interrupts = <GIC_SPI 772 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_bus>;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI04_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
+ usi_5: usi@109200c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x109200c0 0x20>;
+ samsung,sysreg = <&syscon_peric0 0x1028>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI05_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_5: serial@10920000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10920000 0xc0>;
+ interrupts = <GIC_SPI 774 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart5_bus>;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI05_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
+ usi_6: usi@109400c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x109400c0 0x20>;
+ samsung,sysreg = <&syscon_peric0 0x1030>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI06_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_6: serial@10940000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10940000 0xc0>;
+ interrupts = <GIC_SPI 776 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart6_bus>;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI06_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
+ usi_7: usi@109600c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x109600c0 0x20>;
+ samsung,sysreg = <&syscon_peric0 0x1038>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI07_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_7: serial@10960000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10960000 0xc0>;
+ interrupts = <GIC_SPI 778 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart7_bus>;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI07_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
+ usi_8: usi@109800c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x109800c0 0x20>;
+ samsung,sysreg = <&syscon_peric0 0x1040>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI08_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_8: serial@10980000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10980000 0xc0>;
+ interrupts = <GIC_SPI 780 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart8_bus>;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI08_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
pwm: pwm@109b0000 {
compatible = "samsung,exynosautov920-pwm",
"samsung,exynos4210-pwm";
@@ -493,6 +725,267 @@
interrupts = <GIC_SPI 781 IRQ_TYPE_LEVEL_HIGH>;
};
+ usi_9: usi@10c800c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x10c800c0 0x20>;
+ samsung,sysreg = <&syscon_peric1 0x1000>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI09_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_9: serial@10c8000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10c80000 0xc0>;
+ interrupts = <GIC_SPI 787 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart9_bus>;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI09_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <256>;
+ status = "disabled";
+ };
+ };
+
+ usi_10: usi@10ca00c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x10ca00c0 0x20>;
+ samsung,sysreg = <&syscon_peric1 0x1008>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI10_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_10: serial@10ca0000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10ca0000 0xc0>;
+ interrupts = <GIC_SPI 789 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart10_bus>;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI10_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
+ usi_11: usi@10cc00c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x10cc00c0 0x20>;
+ samsung,sysreg = <&syscon_peric1 0x1010>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI11_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_11: serial@10cc0000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10cc0000 0xc0>;
+ interrupts = <GIC_SPI 791 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart11_bus>;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI11_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
+ usi_12: usi@10ce00c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x10ce00c0 0x20>;
+ samsung,sysreg = <&syscon_peric1 0x1018>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI12_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_12: serial@10ce0000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10ce0000 0xc0>;
+ interrupts = <GIC_SPI 793 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart12_bus>;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI12_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
+ usi_13: usi@10d000c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x10d000c0 0x20>;
+ samsung,sysreg = <&syscon_peric1 0x1020>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI13_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_13: serial@10d00000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10d00000 0xc0>;
+ interrupts = <GIC_SPI 795 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart13_bus>;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI13_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
+ usi_14: usi@10d200c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x10d200c0 0x20>;
+ samsung,sysreg = <&syscon_peric1 0x1028>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI14_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_14: serial@10d20000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10d20000 0xc0>;
+ interrupts = <GIC_SPI 797 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart14_bus>;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI14_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
+ usi_15: usi@10d400c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x10d400c0 0x20>;
+ samsung,sysreg = <&syscon_peric1 0x1030>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI15_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_15: serial@10d40000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10d40000 0xc0>;
+ interrupts = <GIC_SPI 799 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart15_bus>;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI15_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
+ usi_16: usi@10d600c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x10d600c0 0x20>;
+ samsung,sysreg = <&syscon_peric1 0x1038>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI16_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_16: serial@10d60000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10d60000 0xc0>;
+ interrupts = <GIC_SPI 801 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart16_bus>;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI16_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
+ usi_17: usi@10d800c0 {
+ compatible = "samsung,exynosautov920-usi",
+ "samsung,exynos850-usi";
+ reg = <0x10d800c0 0x20>;
+ samsung,sysreg = <&syscon_peric1 0x1040>;
+ samsung,mode = <USI_V2_UART>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI17_USI>;
+ clock-names = "pclk", "ipclk";
+ status = "disabled";
+
+ serial_17: serial@10d80000 {
+ compatible = "samsung,exynosautov920-uart",
+ "samsung,exynos850-uart";
+ reg = <0x10d80000 0xc0>;
+ interrupts = <GIC_SPI 803 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart17_bus>;
+ clocks = <&cmu_peric1 CLK_MOUT_PERIC1_NOC_USER>,
+ <&cmu_peric1 CLK_DOUT_PERIC1_USI17_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+ };
+
cmu_top: clock-controller@11000000 {
compatible = "samsung,exynosautov920-cmu-top";
reg = <0x11000000 0x8000>;
@@ -582,6 +1075,47 @@
compatible = "samsung,exynosautov920-pinctrl";
reg = <0x1a460000 0x10000>;
};
+
+ cmu_cpucl0: clock-controller@1ec00000 {
+ compatible = "samsung,exynosautov920-cmu-cpucl0";
+ reg = <0x1ec00000 0x8000>;
+ #clock-cells = <1>;
+
+ clocks = <&xtcxo>,
+ <&cmu_top DOUT_CLKCMU_CPUCL0_SWITCH>,
+ <&cmu_top DOUT_CLKCMU_CPUCL0_CLUSTER>,
+ <&cmu_top DOUT_CLKCMU_CPUCL0_DBG>;
+ clock-names = "oscclk",
+ "switch",
+ "cluster",
+ "dbg";
+ };
+
+ cmu_cpucl1: clock-controller@1ed00000 {
+ compatible = "samsung,exynosautov920-cmu-cpucl1";
+ reg = <0x1ed00000 0x8000>;
+ #clock-cells = <1>;
+
+ clocks = <&xtcxo>,
+ <&cmu_top DOUT_CLKCMU_CPUCL1_SWITCH>,
+ <&cmu_top DOUT_CLKCMU_CPUCL1_CLUSTER>;
+ clock-names = "oscclk",
+ "switch",
+ "cluster";
+ };
+
+ cmu_cpucl2: clock-controller@1ee00000 {
+ compatible = "samsung,exynosautov920-cmu-cpucl2";
+ reg = <0x1ee00000 0x8000>;
+ #clock-cells = <1>;
+
+ clocks = <&xtcxo>,
+ <&cmu_top DOUT_CLKCMU_CPUCL2_SWITCH>,
+ <&cmu_top DOUT_CLKCMU_CPUCL2_CLUSTER>;
+ clock-names = "oscclk",
+ "switch",
+ "cluster";
+ };
};
timer {
diff --git a/arch/arm64/boot/dts/exynos/google/gs101-pixel-common.dtsi b/arch/arm64/boot/dts/exynos/google/gs101-pixel-common.dtsi
index b25230495c64..d6ddcc13f7b2 100644
--- a/arch/arm64/boot/dts/exynos/google/gs101-pixel-common.dtsi
+++ b/arch/arm64/boot/dts/exynos/google/gs101-pixel-common.dtsi
@@ -279,12 +279,12 @@
};
&usi8 {
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
status = "okay";
};
&usi12 {
- samsung,mode = <USI_V2_I2C>;
+ samsung,mode = <USI_MODE_I2C>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/exynos/google/gs101.dtsi b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
index 3de3a758f113..48c691fd0a3a 100644
--- a/arch/arm64/boot/dts/exynos/google/gs101.dtsi
+++ b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
@@ -833,7 +833,7 @@
<&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_0>;
clock-names = "pclk", "ipclk";
samsung,sysreg = <&sysreg_peric0 0x1020>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
status = "disabled";
serial_0: serial@10a00000 {
@@ -1405,6 +1405,7 @@
pmu_system_controller: system-controller@17460000 {
compatible = "google,gs101-pmu", "syscon";
reg = <0x17460000 0x10000>;
+ google,pmu-intr-gen-syscon = <&pmu_intr_gen>;
poweroff: syscon-poweroff {
compatible = "syscon-poweroff";
@@ -1434,6 +1435,11 @@
};
};
+ pmu_intr_gen: syscon@17470000 {
+ compatible = "google,gs101-pmu-intr-gen", "syscon";
+ reg = <0x17470000 0x10000>;
+ };
+
pinctrl_gpio_alive: pinctrl@174d0000 {
compatible = "google,gs101-pinctrl";
reg = <0x174d0000 0x00001000>;
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index b6d3fe26d621..0b473a23d120 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -104,7 +104,12 @@ dtb-$(CONFIG_ARCH_MXC) += imx8dx-colibri-eval-v3.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8dx-colibri-iris-v2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8dx-colibri-iris.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8dxl-evk.dtb
+
+imx8dxl-evk-pcie-ep-dtbs += imx8dxl-evk.dtb imx-pcie0-ep.dtbo
+dtb-$(CONFIG_ARCH_MXC) += imx8dxl-evk-pcie-ep.dtb
+
dtb-$(CONFIG_ARCH_MXC) += imx8dxp-tqma8xdp-mba8xx.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8dxp-tqma8xdps-mb-smarc-2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-beacon-kit.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-data-modul-edm-sbc.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-ddr4-evk.dtb
@@ -112,6 +117,11 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mm-emcon-avari.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-emtop-baseboard.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-evkb.dtb
+
+imx8mm-evk-pcie-ep-dtbs += imx8mm-evk.dtb imx-pcie0-ep.dtbo
+imx8mm-evkb-pcie-ep-dtbs += imx8mm-evkb.dtb imx-pcie0-ep.dtbo
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk-pcie-ep.dtb imx8mm-evkb-pcie-ep.dtb
+
dtb-$(CONFIG_ARCH_MXC) += imx8mm-icore-mx8mm-ctouch2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-icore-mx8mm-edimm2.2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-iot-gateway.dtb
@@ -200,8 +210,12 @@ imx8mp-kontron-dl-dtbs += imx8mp-kontron-bl-osm-s.dtb imx8mp-kontron-dl.dtbo
dtb-$(CONFIG_ARCH_MXC) += imx8mp-kontron-dl.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-kontron-smarc-eval-carrier.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-libra-rdk-fpsc.dtb
+imx8mp-libra-rdk-fpsc-lvds-dtbs += imx8mp-libra-rdk-fpsc.dtb imx8mp-libra-rdk-fpsc-lvds-etml1010g3dra.dtbo
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-libra-rdk-fpsc-lvds.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-msc-sm2s-ep1.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-navqp.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-nitrogen-enc-carrier-board.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-nitrogen-smarc-universal-board.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-phyboard-pollux-rdk.dtb
imx8mp-phyboard-pollux-rdk-no-eth-dtbs += imx8mp-phyboard-pollux-rdk.dtb imx8mp-phycore-no-eth.dtbo
@@ -212,6 +226,7 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-lt6.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-mi1010ait-1cp1.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revc-bd500.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revc-tian-g07017.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-toradex-smarc-dev.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mpxl.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mp-ras314.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-var-som-symphony.dtb
@@ -237,7 +252,7 @@ imx8mp-evk-lvds0-imx-lvds-hdmi-dtbs += imx8mp-evk.dtb imx8mp-evk-lvds0-imx-lvds-
imx8mp-evk-lvds1-imx-dlvds-hdmi-channel0-dtbs += imx8mp-evk.dtb imx8mp-evk-lvds1-imx-dlvds-hdmi-channel0.dtbo
imx8mp-evk-lvds1-imx-lvds-hdmi-dtbs += imx8mp-evk.dtb imx8mp-evk-lvds1-imx-lvds-hdmi.dtbo
imx8mp-evk-mx8-dlvds-lcd1-dtbs += imx8mp-evk.dtb imx8mp-evk-mx8-dlvds-lcd1.dtbo
-imx8mp-evk-pcie-ep-dtbs += imx8mp-evk.dtb imx8mp-evk-pcie-ep.dtbo
+imx8mp-evk-pcie-ep-dtbs += imx8mp-evk.dtb imx-pcie0-ep.dtbo
dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk-lvds0-imx-dlvds-hdmi-channel0.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk-lvds0-imx-lvds-hdmi.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk-lvds1-imx-dlvds-hdmi-channel0.dtb
@@ -247,10 +262,19 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk-pcie-ep.dtb
imx8mp-tqma8mpql-mba8mpxl-lvds-dtbs += imx8mp-tqma8mpql-mba8mpxl.dtb imx8mp-tqma8mpql-mba8mpxl-lvds.dtbo
imx8mp-tqma8mpql-mba8mpxl-lvds-g133han01-dtbs += imx8mp-tqma8mpql-mba8mpxl.dtb imx8mp-tqma8mpql-mba8mpxl-lvds-g133han01.dtbo
+imx8mp-tqma8mpql-mba8mp-ras314-imx219-dtbs += imx8mp-tqma8mpql-mba8mp-ras314.dtb imx8mp-tqma8mpql-mba8mp-ras314-imx219.dtbo
+imx8mp-tqma8mpql-mba8mp-ras314-lvds-dtbs += imx8mp-tqma8mpql-mba8mp-ras314.dtb imx8mp-tqma8mpql-mba8mpxl-lvds.dtbo
+imx8mp-tqma8mpql-mba8mp-ras314-lvds-imx219-dtbs += imx8mp-tqma8mpql-mba8mp-ras314.dtb imx8mp-tqma8mpql-mba8mpxl-lvds.dtbo imx8mp-tqma8mpql-mba8mp-ras314-imx219.dtbo
dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mpxl-lvds.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mpxl-lvds-g133han01.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mp-ras314-imx219.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mp-ras314-lvds.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mp-ras314-lvds-imx219.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-evk.dtb
+imx8mq-evk-pcie1-ep-dtbs += imx8mq-evk.dtb imx-pcie1-ep.dtbo
+dtb-$(CONFIG_ARCH_MXC) += imx8mq-evk-pcie1-ep.dtb
+
dtb-$(CONFIG_ARCH_MXC) += imx8mq-hummingboard-pulse.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-kontron-pitx-imx8m.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-librem5-devkit.dtb
@@ -284,10 +308,11 @@ dtb-$(CONFIG_ARCH_MXC) += imx8qxp-colibri-iris.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-colibri-iris-v2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-mek.dtb
-imx8qxp-mek-pcie-ep-dtbs += imx8qxp-mek.dtb imx8qxp-mek-pcie-ep.dtbo
+imx8qxp-mek-pcie-ep-dtbs += imx8qxp-mek.dtb imx-pcie0-ep.dtbo
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-mek-pcie-ep.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-tqma8xqp-mba8xx.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8qxp-tqma8xqps-mb-smarc-2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8ulp-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-9x9-qsb.dtb
@@ -297,12 +322,23 @@ dtb-$(CONFIG_ARCH_MXC) += imx93-9x9-qsb-i3c.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-11x11-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-14x14-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-kontron-bl-osm-s.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx93-phyboard-nash.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-phyboard-segin.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx93-tqma9352-mba91xxca.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-tqma9352-mba93xxca.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-tqma9352-mba93xxla.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-var-som-symphony.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx943-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx95-15x15-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx95-19x19-evk.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx95-19x19-evk-sof.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx95-tqma9596sa-mb-smarc-2.dtb
+
+imx95-15x15-evk-pcie0-ep-dtbs = imx95-15x15-evk.dtb imx-pcie0-ep.dtbo
+dtb-$(CONFIG_ARCH_MXC) += imx95-15x15-evk-pcie0-ep.dtb
+imx95-19x19-evk-pcie0-ep-dtbs += imx95-19x19-evk.dtb imx-pcie0-ep.dtbo
+imx95-19x19-evk-pcie1-ep-dtbs += imx95-19x19-evk.dtb imx-pcie1-ep.dtbo
+dtb-$(CONFIG_ARCH_MXC) += imx95-19x19-evk-pcie0-ep.dtb imx95-19x19-evk-pcie1-ep.dtb
imx8mm-kontron-dl-dtbs := imx8mm-kontron-bl.dtb imx8mm-kontron-dl.dtbo
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk-pcie-ep.dtso b/arch/arm64/boot/dts/freescale/imx-pcie0-ep.dtso
index 244e820699b5..ed73284d9bb6 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-evk-pcie-ep.dtso
+++ b/arch/arm64/boot/dts/freescale/imx-pcie0-ep.dtso
@@ -6,12 +6,10 @@
/dts-v1/;
/plugin/;
-&pcie {
+&pcie0 {
status = "disabled";
};
-&pcie_ep {
- pinctrl-0 = <&pinctrl_pcie0>;
- pinctrl-names = "default";
+&pcie0_ep {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx-pcie1-ep.dtso b/arch/arm64/boot/dts/freescale/imx-pcie1-ep.dtso
new file mode 100644
index 000000000000..0e7ef7ef8560
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx-pcie1-ep.dtso
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2025 NXP
+ */
+
+/dts-v1/;
+/plugin/;
+
+&pcie1 {
+ status = "disabled";
+};
+
+&pcie1_ep {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8-apalis-eval.dtsi b/arch/arm64/boot/dts/freescale/imx8-apalis-eval.dtsi
index dc127298715b..311d4950793c 100644
--- a/arch/arm64/boot/dts/freescale/imx8-apalis-eval.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-apalis-eval.dtsi
@@ -104,7 +104,10 @@
status = "okay";
};
-/* TODO: Apalis PCIE1 */
+/* Apalis PCIE1 */
+&pciea {
+ status = "okay";
+};
/* TODO: Apalis BKL1_PWM */
@@ -121,7 +124,10 @@
status = "okay";
};
-/* TODO: Apalis SATA1 */
+/* Apalis SATA1 */
+&sata {
+ status = "okay";
+};
/* Apalis SPDIF1 */
&spdif0 {
diff --git a/arch/arm64/boot/dts/freescale/imx8-apalis-ixora-v1.1.dtsi b/arch/arm64/boot/dts/freescale/imx8-apalis-ixora-v1.1.dtsi
index d4a1ad528f65..3d8731504ce1 100644
--- a/arch/arm64/boot/dts/freescale/imx8-apalis-ixora-v1.1.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-apalis-ixora-v1.1.dtsi
@@ -191,7 +191,10 @@
status = "okay";
};
-/* TODO: Apalis PCIE1 */
+/* Apalis PCIE1 */
+&pciea {
+ status = "okay";
+};
/* TODO: Apalis BKL1_PWM */
@@ -208,7 +211,10 @@
status = "okay";
};
-/* TODO: Apalis SATA1 */
+/* Apalis SATA1 */
+&sata {
+ status = "okay";
+};
/* Apalis SPDIF1 */
&spdif0 {
diff --git a/arch/arm64/boot/dts/freescale/imx8-apalis-ixora-v1.2.dtsi b/arch/arm64/boot/dts/freescale/imx8-apalis-ixora-v1.2.dtsi
index 5e132c83e1b2..106e802a68ba 100644
--- a/arch/arm64/boot/dts/freescale/imx8-apalis-ixora-v1.2.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-apalis-ixora-v1.2.dtsi
@@ -240,7 +240,10 @@
status = "okay";
};
-/* TODO: Apalis PCIE1 */
+/* Apalis PCIE1 */
+&pciea {
+ status = "okay";
+};
/* TODO: Apalis BKL1_PWM */
@@ -257,7 +260,10 @@
status = "okay";
};
-/* TODO: Apalis SATA1 */
+/* Apalis SATA1 */
+&sata {
+ status = "okay";
+};
/* Apalis SPDIF1 */
&spdif0 {
diff --git a/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi b/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi
index dbea1eefdeec..6f27a9cc2494 100644
--- a/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi
@@ -339,6 +339,25 @@
pinctrl-0 = <&pinctrl_flexcan3>;
};
+&hsio_phy {
+ fsl,hsio-cfg = "pciea-pcieb-sata";
+ fsl,refclk-pad-mode = "input";
+ status = "okay";
+};
+
+&hsio_refa_clk {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie_sata_refclk>;
+ enable-gpios = <&lsio_gpio4 11 GPIO_ACTIVE_HIGH>;
+};
+
+&hsio_refb_clk {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie_wifi_refclk>;
+ clocks = <&hsio_refa_clk>;
+ enable-gpios = <&lsio_gpio2 11 GPIO_ACTIVE_HIGH>;
+};
+
/* TODO: Apalis HDMI1 */
&gpu_alert0 {
@@ -514,7 +533,10 @@
"MXM3_112",
"MXM3_118",
"MXM3_114",
- "MXM3_116";
+ "MXM3_116",
+ "",
+ "",
+ "MXM3_26";
};
&lsio_gpio1 {
@@ -586,15 +608,6 @@
"MXM3_183",
"MXM3_185",
"MXM3_187";
-
- pcie-wifi-hog {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_pcie_wifi_refclk>;
- gpio-hog;
- gpios = <11 GPIO_ACTIVE_HIGH>;
- line-name = "PCIE_WIFI_CLK";
- output-high;
- };
};
&lsio_gpio3 {
@@ -660,16 +673,6 @@
"MXM3_291",
"MXM3_289",
"MXM3_287";
-
- /* Enable pcie root / sata ref clock unconditionally */
- pcie-sata-hog {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_pcie_sata_refclk>;
- gpio-hog;
- gpios = <11 GPIO_ACTIVE_HIGH>;
- line-name = "PCIE_SATA_CLK";
- output-high;
- };
};
&lsio_gpio5 {
@@ -771,9 +774,30 @@
status = "okay";
};
-/* TODO: Apalis PCIE1 */
+/* Apalis PCIE1 */
+&pciea {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reset_moci>;
+ phys = <&hsio_phy 0 PHY_TYPE_PCIE 0>;
+ phy-names = "pcie-phy";
+ reset-gpio = <&lsio_gpio0 30 GPIO_ACTIVE_LOW>;
+ vpcie-supply = <&reg_pcie_switch>;
+};
+
+/* On-module Wi-Fi */
+&pcieb {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcieb>, <&pinctrl_wifi>;
+ phys = <&hsio_phy 1 PHY_TYPE_PCIE 1>;
+ phy-names = "pcie-phy";
+ reset-gpio = <&lsio_gpio5 0 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
-/* TODO: On-module Wi-Fi */
+&phyx2_lpcg {
+ clocks = <&hsio_refa_clk>, <&hsio_refb_clk>,
+ <&hsio_refa_clk>, <&hsio_per_clk>;
+};
/* TODO: Apalis BKL1_PWM */
@@ -806,8 +830,6 @@
<722534400>, <45158400>, <11289600>, <49152000>;
};
-/* TODO: Apalis SATA1 */
-
/* Apalis SPDIF1 */
&spdif0 {
assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
diff --git a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
index 5f3b4014e152..b6d64d3906ea 100644
--- a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
@@ -642,7 +642,7 @@
status = "okay";
};
-&pcieb {
+&pcie0 {
phys = <&hsio_phy 0 PHY_TYPE_PCIE 0>;
phy-names = "pcie-phy";
pinctrl-0 = <&pinctrl_pcieb>;
@@ -652,6 +652,16 @@
status = "okay";
};
+&pcie0_ep{
+ phys = <&hsio_phy 0 PHY_TYPE_PCIE 0>;
+ phy-names = "pcie-phy";
+ pinctrl-0 = <&pinctrl_pcieb>;
+ pinctrl-names = "default";
+ reset-gpio = <&lsio_gpio4 0 GPIO_ACTIVE_LOW>;
+ vpcie-supply = <&reg_pcieb>;
+ status = "disabled";
+};
+
&sai0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai0>;
diff --git a/arch/arm64/boot/dts/freescale/imx8dxl-ss-hsio.dtsi b/arch/arm64/boot/dts/freescale/imx8dxl-ss-hsio.dtsi
index afbe962d78ce..bbc6abb0fdf2 100644
--- a/arch/arm64/boot/dts/freescale/imx8dxl-ss-hsio.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8dxl-ss-hsio.dtsi
@@ -37,15 +37,20 @@
power-domains = <&pd IMX_SC_R_SERDES_1>;
status = "disabled";
};
-};
-&pcieb {
- #interrupt-cells = <1>;
- interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "msi";
- interrupt-map = <0 0 0 1 &gic 0 47 4>,
- <0 0 0 2 &gic 0 48 4>,
- <0 0 0 3 &gic 0 49 4>,
- <0 0 0 4 &gic 0 50 4>;
- interrupt-map-mask = <0 0 0 0x7>;
+ pcie0: pcie@5f010000 {
+ #interrupt-cells = <1>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ interrupt-map = <0 0 0 1 &gic 0 47 4>,
+ <0 0 0 2 &gic 0 48 4>,
+ <0 0 0 3 &gic 0 49 4>,
+ <0 0 0 4 &gic 0 50 4>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ };
+
+ pcie0_ep: pcie-ep@5f010000 {
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dma";
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/imx8dxp-tqma8xdps-mb-smarc-2.dts b/arch/arm64/boot/dts/freescale/imx8dxp-tqma8xdps-mb-smarc-2.dts
new file mode 100644
index 000000000000..331787df2fe4
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8dxp-tqma8xdps-mb-smarc-2.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright (c) 2021-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
+ */
+
+/dts-v1/;
+
+#include "imx8dxp-tqma8xdps.dtsi"
+#include "tqma8xxs-mb-smarc-2.dtsi"
+
+/ {
+ model = "TQ-Systems i.MX8DXP TQMa8XDPS on MB-SMARC-2";
+ compatible = "tq,imx8dxp-tqma8xdps-mb-smarc-2", "tq,imx8dxp-tqma8xdps", "fsl,imx8dxp";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8dxp-tqma8xdps.dtsi b/arch/arm64/boot/dts/freescale/imx8dxp-tqma8xdps.dtsi
new file mode 100644
index 000000000000..a97286fe7e0d
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8dxp-tqma8xdps.dtsi
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright (c) 2021-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
+ */
+
+#include "imx8dxp.dtsi"
+#include "tqma8xxs.dtsi"
+
+/ {
+ model = "TQ-Systems i.MX8DXP TQMa8XDPS";
+ compatible = "tq,imx8dxp-tqma8xdps", "fsl,imx8dxp";
+};
+
+&pmic0_thermal {
+ cooling-maps {
+ map0 {
+ cooling-device =
+ <&A35_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts
index 97ff1ddd6318..734a75198f06 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts
@@ -124,6 +124,7 @@
assigned-clock-parents = <&clk IMX8MM_AUDIO_PLL1_OUT>;
assigned-clock-rates = <24576000>;
#sound-dai-cells = <0>;
+ fsl,sai-mclk-direction-output;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
index 62ed64663f49..21bcd82fd092 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
@@ -78,6 +78,9 @@
ethphy0: ethernet-phy@0 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0>;
+ reset-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
};
};
};
@@ -233,6 +236,12 @@
rtc: rtc@51 {
compatible = "nxp,pcf85263";
reg = <0x51>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rtc>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <13 IRQ_TYPE_LEVEL_LOW>;
+ quartz-load-femtofarads = <12500>;
+ wakeup-source;
};
};
@@ -269,7 +278,7 @@
mmc-pwrseq = <&usdhc1_pwrseq>;
status = "okay";
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
pinctrl-names = "default";
@@ -314,6 +323,7 @@
MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x146
MX8MM_IOMUXC_SAI2_RXC_GPIO4_IO22 0x19
>;
};
@@ -349,6 +359,12 @@
>;
};
+ pinctrl_rtc: rtcgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x146
+ >;
+ };
+
pinctrl_uart1: uart1grp {
fsl,pins = <
MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
index 5f8336217bb8..622caaa78eaf 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
@@ -544,6 +544,19 @@
status = "okay";
};
+&pcie0_ep {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie0>;
+ clocks = <&clk IMX8MM_CLK_PCIE1_ROOT>, <&pcie0_refclk>,
+ <&clk IMX8MM_CLK_PCIE1_AUX>;
+ assigned-clocks = <&clk IMX8MM_CLK_PCIE1_AUX>,
+ <&clk IMX8MM_CLK_PCIE1_CTRL>;
+ assigned-clock-rates = <10000000>, <250000000>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_50M>,
+ <&clk IMX8MM_SYS_PLL2_250M>;
+ status = "disabled";
+};
+
&sai2 {
#sound-dai-cells = <0>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-peb-av-10.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-peb-av-10.dtso
index 840f83293452..e5ca5a664b61 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-peb-av-10.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-peb-av-10.dtso
@@ -186,6 +186,8 @@
reg = <2>;
bridge_out: endpoint {
remote-endpoint = <&panel_in>;
+ ti,lvds-vod-swing-clock-microvolt = <200000 600000>;
+ ti,lvds-vod-swing-data-microvolt = <200000 600000>;
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
index cdfacbc35db5..190bde4edcd7 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
@@ -306,7 +306,7 @@
keep-power-in-suspend;
status = "okay";
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
index 7251ad3a0017..d29710772569 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
@@ -144,6 +144,19 @@
startup-delay-us = <20000>;
};
+ reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
+ compatible = "regulator-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2_vsel>;
+ gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
+ states = <1800000 0x1>,
+ <3300000 0x0>;
+ regulator-name = "PMIC_USDHC_VSELECT";
+ vin-supply = <&reg_nvcc_sd>;
+ };
+
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
@@ -269,7 +282,7 @@
"SODIMM_19",
"",
"",
- "",
+ "PMIC_USDHC_VSELECT",
"",
"",
"",
@@ -528,7 +541,7 @@
};
eeprom@50 {
- compatible = "st,24c02";
+ compatible = "st,24c02", "atmel,24c02";
pagesize = <16>;
reg = <0x50>;
};
@@ -633,7 +646,7 @@
/* EEPROM on display adapter (MIPI DSI Display Adapter) */
eeprom_display_adapter: eeprom@50 {
- compatible = "st,24c02";
+ compatible = "st,24c02", "atmel,24c02";
pagesize = <16>;
reg = <0x50>;
status = "disabled";
@@ -641,7 +654,7 @@
/* EEPROM on carrier board */
eeprom_carrier_board: eeprom@57 {
- compatible = "st,24c02";
+ compatible = "st,24c02", "atmel,24c02";
pagesize = <16>;
reg = <0x57>;
status = "disabled";
@@ -785,6 +798,7 @@
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_cd>;
pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_cd_sleep>;
vmmc-supply = <&reg_usdhc2_vmmc>;
+ vqmmc-supply = <&reg_usdhc2_vqmmc>;
};
&wdog1 {
@@ -1206,13 +1220,17 @@
<MX8MM_IOMUXC_NAND_CLE_GPIO3_IO5 0x6>; /* SODIMM 76 */
};
+ pinctrl_usdhc2_vsel: usdhc2vselgrp {
+ fsl,pins =
+ <MX8MM_IOMUXC_GPIO1_IO04_GPIO1_IO4 0x10>; /* PMIC_USDHC_VSELECT */
+ };
+
/*
* Note: Due to ERR050080 we use discrete external on-module resistors pulling-up to the
* on-module +V3.3_1.8_SD (LDO5) rail and explicitly disable the internal pull-ups here.
*/
pinctrl_usdhc2: usdhc2grp {
fsl,pins =
- <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90>, /* SODIMM 78 */
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x90>, /* SODIMM 74 */
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x90>, /* SODIMM 80 */
@@ -1223,7 +1241,6 @@
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
fsl,pins =
- <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x94>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x94>,
@@ -1234,7 +1251,6 @@
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
fsl,pins =
- <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x96>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x96>,
@@ -1246,7 +1262,6 @@
/* Avoid backfeeding with removed card power */
pinctrl_usdhc2_sleep: usdhc2slpgrp {
fsl,pins =
- <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x0>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x0>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x0>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x0>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index 4de3bf22902b..cfebaa01217e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -62,7 +62,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0>;
- clock-latency = <61036>; /* two CLK32 periods */
clocks = <&clk IMX8MM_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -83,7 +82,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1>;
- clock-latency = <61036>; /* two CLK32 periods */
clocks = <&clk IMX8MM_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -102,7 +100,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x2>;
- clock-latency = <61036>; /* two CLK32 periods */
clocks = <&clk IMX8MM_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -121,7 +118,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x3>;
- clock-latency = <61036>; /* two CLK32 periods */
clocks = <&clk IMX8MM_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts
index 1df5ceb11387..37fc5ed98d7f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts
@@ -124,6 +124,7 @@
assigned-clock-parents = <&clk IMX8MN_AUDIO_PLL1_OUT>;
assigned-clock-rates = <24576000>;
#sound-dai-cells = <0>;
+ fsl,sai-mclk-direction-output;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
index 2a64115eebf1..67a99383a632 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
@@ -88,6 +88,9 @@
ethphy0: ethernet-phy@0 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0>;
+ reset-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
};
};
};
@@ -242,6 +245,12 @@
rtc: rtc@51 {
compatible = "nxp,pcf85263";
reg = <0x51>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rtc>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <13 IRQ_TYPE_LEVEL_LOW>;
+ quartz-load-femtofarads = <12500>;
+ wakeup-source;
};
};
@@ -280,7 +289,7 @@
mmc-pwrseq = <&usdhc1_pwrseq>;
status = "okay";
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
pinctrl-names = "default";
@@ -325,6 +334,7 @@
MX8MN_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
MX8MN_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
MX8MN_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ MX8MN_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x146
MX8MN_IOMUXC_SAI2_RXC_GPIO4_IO22 0x19
>;
};
@@ -360,6 +370,12 @@
>;
};
+ pinctrl_rtc: rtcgrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x146
+ >;
+ };
+
pinctrl_uart1: uart1grp {
fsl,pins = <
MX8MN_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2-common.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2-common.dtsi
index bbb07c650da9..d20393c2d901 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2-common.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2-common.dtsi
@@ -265,7 +265,7 @@
non-removable;
status = "okay";
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
compatible = "brcm,bcm4329-fmac";
reg = <1>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
index b364307868f2..38ef9e4fdf07 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
@@ -300,7 +300,7 @@
keep-power-in-suspend;
status = "okay";
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index a5f9cfb46e5d..848ba5e46ee6 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -62,7 +62,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0>;
- clock-latency = <61036>;
clocks = <&clk IMX8MN_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -83,7 +82,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1>;
- clock-latency = <61036>;
clocks = <&clk IMX8MN_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -102,7 +100,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x2>;
- clock-latency = <61036>;
clocks = <&clk IMX8MN_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -121,7 +118,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x3>;
- clock-latency = <61036>;
clocks = <&clk IMX8MN_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi
index 15f7ab58db36..6a62cb32e22e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi
@@ -257,6 +257,12 @@
rtc: rtc@51 {
compatible = "nxp,pcf85263";
reg = <0x51>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rtc>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <13 IRQ_TYPE_LEVEL_LOW>;
+ quartz-load-femtofarads = <12500>;
+ wakeup-source;
};
};
@@ -381,6 +387,12 @@
>;
};
+ pinctrl_rtc: rtcgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO13__GPIO1_IO13 0x1d0
+ >;
+ };
+
pinctrl_uart1: uart1grp {
fsl,pins = <
MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX 0x140
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
index a90e28c07e3f..7f754e0a5d69 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
@@ -590,7 +590,7 @@
#address-cells = <1>;
#size-cells = <0>;
- brcmf: bcrmf@1 { /* muRata 2AE */
+ brcmf: wifi@1 { /* muRata 2AE */
reg = <1>;
compatible = "cypress,cyw4373-fmac", "brcm,bcm4329-fmac";
/*
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
index c26954e5a605..1ba3018c621e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
@@ -309,6 +309,16 @@
status = "okay";
};
+&dsp_reserved {
+ status = "okay";
+};
+
+&dsp {
+ memory-region = <&dsp_vdev0buffer>, <&dsp_vdev0vring0>,
+ <&dsp_vdev0vring1>, <&dsp_reserved>;
+ status = "okay";
+};
+
&eqos {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_eqos>;
@@ -690,6 +700,10 @@
};
};
+&mu2 {
+ status = "okay";
+};
+
&pcie_phy {
fsl,refclk-pad-mode = <IMX8_PCIE_REFCLK_PAD_INPUT>;
clocks = <&pcie0_refclk>;
@@ -697,7 +711,7 @@
status = "okay";
};
-&pcie {
+&pcie0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pcie0>;
reset-gpio = <&gpio2 7 GPIO_ACTIVE_LOW>;
@@ -705,6 +719,12 @@
status = "okay";
};
+&pcie0_ep {
+ pinctrl-0 = <&pinctrl_pcie0>;
+ pinctrl-names = "default";
+ status = "disabled";
+};
+
&pwm1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-libra-rdk-fpsc-lvds-etml1010g3dra.dtso b/arch/arm64/boot/dts/freescale/imx8mp-libra-rdk-fpsc-lvds-etml1010g3dra.dtso
new file mode 100644
index 000000000000..1dcf249ca90d
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-libra-rdk-fpsc-lvds-etml1010g3dra.dtso
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright (C) 2025 PHYTEC Messtechnik GmbH
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clock/imx8mp-clock.h>
+
+/dts-v1/;
+/plugin/;
+
+&backlight_lvds0 {
+ brightness-levels = <0 8 16 32 64 128 255>;
+ default-brightness-level = <8>;
+ enable-gpios = <&gpio5 23 GPIO_ACTIVE_LOW>;
+ num-interpolated-steps = <2>;
+ pwms = <&pwm1 0 66667 0>;
+ status = "okay";
+};
+
+&lcdif2 {
+ status = "okay";
+};
+
+&lvds_bridge {
+ assigned-clocks = <&clk IMX8MP_CLK_MEDIA_LDB>, <&clk IMX8MP_VIDEO_PLL1>;
+ assigned-clock-parents = <&clk IMX8MP_VIDEO_PLL1_OUT>;
+ /*
+ * The LVDS panel uses 72.4 MHz pixel clock, set IMX8MP_VIDEO_PLL1 to
+ * 72.4 * 7 = 506.8 MHz so the LDB serializer and LCDIFv3 scanout
+ * engine can reach accurate pixel clock of exactly 72.4 MHz.
+ */
+ assigned-clock-rates = <0>, <506800000>;
+ status = "okay";
+};
+
+&panel0_lvds {
+ compatible = "edt,etml1010g3dra";
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-libra-rdk-fpsc.dts b/arch/arm64/boot/dts/freescale/imx8mp-libra-rdk-fpsc.dts
new file mode 100644
index 000000000000..6f3a7b863dca
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-libra-rdk-fpsc.dts
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright (C) 2025 PHYTEC Messtechnik GmbH
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/leds/leds-pca9532.h>
+#include <dt-bindings/phy/phy-imx8-pcie.h>
+#include <dt-bindings/pwm/pwm.h>
+#include "imx8mp-phycore-fpsc.dtsi"
+
+/ {
+ compatible = "phytec,imx8mp-libra-rdk-fpsc",
+ "phytec,imx8mp-phycore-fpsc", "fsl,imx8mp";
+ model = "PHYTEC i.MX8MP Libra RDK FPSC";
+
+ backlight_lvds0: backlight0 {
+ compatible = "pwm-backlight";
+ pinctrl-0 = <&pinctrl_lvds0>;
+ pinctrl-names = "default";
+ power-supply = <&reg_vdd_12v0>;
+ status = "disabled";
+ };
+
+ chosen {
+ stdout-path = &uart4;
+ };
+
+ panel0_lvds: panel-lvds {
+ /* compatible panel in overlay */
+ backlight = <&backlight_lvds0>;
+ power-supply = <&reg_vdd_3v3>;
+ status = "disabled";
+
+ port {
+ panel0_in: endpoint {
+ remote-endpoint = <&ldb_lvds_ch0>;
+ };
+ };
+ };
+
+ reg_can1_stby: regulator-can1-stby {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "can1-stby";
+ gpio = <&gpio_expander 10 GPIO_ACTIVE_LOW>;
+ };
+
+ reg_can2_stby: regulator-can2-stby {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "can2-stby";
+ gpio = <&gpio_expander 9 GPIO_ACTIVE_LOW>;
+ };
+
+ reg_vdd_12v0: regulator-vdd-12v0 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <12000000>;
+ regulator-min-microvolt = <12000000>;
+ regulator-name = "VDD_12V0";
+ };
+
+ reg_vdd_1v8: regulator-vdd-1v8 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "VDD_1V8";
+ };
+
+ reg_vdd_3v3: regulator-vdd-3v3 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "VDD_3V3";
+ };
+
+ reg_vdd_5v0: regulator-vdd-5v0 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <5000000>;
+ regulator-name = "VDD_5V0";
+ };
+};
+
+&eqos {
+ phy-handle = <&ethphy1>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x1>;
+ enet-phy-lane-no-swap;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_1_50_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_1_50_NS>;
+ };
+ };
+};
+
+/* CAN FD */
+&flexcan1 {
+ xceiver-supply = <&reg_can1_stby>;
+ status = "okay";
+};
+
+&flexcan2 {
+ xceiver-supply = <&reg_can2_stby>;
+ status = "okay";
+};
+
+&flexspi {
+ status = "okay";
+
+ spi_nor: flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <80000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ vcc-supply = <&reg_vdd_1v8>;
+ };
+};
+
+&gpio5 {
+ gpio-line-names = "", "", "", "", "I2C5_SDA",
+ "GPIO1", "", "", "", "SPI1_CS",
+ "", "", "", "SPI2_CS", "I2C1_SCL",
+ "I2C1_SDA", "I2C2_SCL", "I2C2_SDA", "I2C3_SCL", "I2C3_SDA",
+ "", "GPIO2", "", "LVDS1_BL_EN", "SPI3_CS",
+ "", "GPIO3";
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ eeprom@51 {
+ compatible = "atmel,24c02";
+ reg = <0x51>;
+ pagesize = <16>;
+ vcc-supply = <&reg_vdd_1v8>;
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ leds@62 {
+ compatible = "nxp,pca9533";
+ reg = <0x62>;
+
+ led-1 {
+ type = <PCA9532_TYPE_LED>;
+ };
+
+ led-2 {
+ type = <PCA9532_TYPE_LED>;
+ };
+
+ led-3 {
+ type = <PCA9532_TYPE_LED>;
+ };
+ };
+};
+
+&i2c5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ gpio_expander: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-line-names = "CSI1_CTRL1", "CSI1_CTRL2", "CSI1_CTRL3",
+ "CSI1_CTRL4", "CSI2_CTRL1", "CSI2_CTRL2",
+ "CSI2_CTRL3", "CSI2_CTRL4", "CLK_EN_AV",
+ "nCAN2_EN", "nCAN1_EN", "PCIE1_nWAKE",
+ "PCIE2_nWAKE", "PCIE2_nALERT_3V3",
+ "UART1_BT_RS_SEL", "UART1_RS232_485_SEL";
+ vcc-supply = <&reg_vdd_1v8>;
+
+ uart1_bt_rs_sel: bt-rs-hog {
+ gpios = <14 GPIO_ACTIVE_HIGH>;
+ gpio-hog;
+ line-name = "UART1_BT_RS_SEL";
+ output-low; /* default RS232/RS485 */
+ };
+
+ uart1_rs232_485_sel: rs232-485-hog {
+ gpios = <15 GPIO_ACTIVE_HIGH>;
+ gpio-hog;
+ line-name = "UART1_RS232_485_SEL";
+ output-high; /* default RS232 */
+ };
+ };
+};
+
+&iomuxc {
+ pinctrl_lvds0: lvds0grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART1_TXD__GPIO5_IO23 0x12
+ >;
+ };
+ pinctrl_rtc: rtcgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART2_TXD__GPIO5_IO25 0x1C0
+ >;
+ };
+};
+
+&lvds_bridge {
+ ports {
+ port@1 {
+ ldb_lvds_ch0: endpoint {
+ remote-endpoint = <&panel0_in>;
+ };
+ };
+ };
+};
+
+/* Mini PCIe */
+&pcie {
+ reset-gpio = <&gpio1 8 GPIO_ACTIVE_LOW>;
+ vpcie-supply = <&reg_vdd_3v3>;
+ status = "okay";
+};
+
+&pcie_phy {
+ clocks = <&hsio_blk_ctrl>;
+ clock-names = "ref";
+ fsl,clkreq-unsupported;
+ fsl,refclk-pad-mode = <IMX8_PCIE_REFCLK_PAD_OUTPUT>;
+ status = "okay";
+};
+
+&reg_vdd_io {
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+};
+
+&rv3028 {
+ interrupt-parent = <&gpio5>;
+ interrupts = <25 IRQ_TYPE_LEVEL_LOW>;
+ aux-voltage-chargeable = <1>;
+ pinctrl-0 = <&pinctrl_rtc>;
+ pinctrl-names = "default";
+ trickle-resistor-ohms = <3000>;
+ wakeup-source;
+};
+
+&snvs_pwrkey {
+ status = "okay";
+};
+
+/* debug console */
+&uart4 {
+ status = "okay";
+};
+
+/* SD-Card */
+&usdhc2 {
+ assigned-clocks = <&clk IMX8MP_CLK_USDHC2>;
+ assigned-clock-rates = <200000000>;
+ bus-width = <4>;
+ disable-wp;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-nitrogen-enc-carrier-board.dts b/arch/arm64/boot/dts/freescale/imx8mp-nitrogen-enc-carrier-board.dts
new file mode 100644
index 000000000000..1df9488aaeb2
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-nitrogen-enc-carrier-board.dts
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2020 Boundary Devices
+ * Copyright 2025 Collabora Ltd.
+ */
+
+/dts-v1/;
+
+#include "imx8mp-nitrogen-som.dtsi"
+
+/ {
+ model = "Boundary Devices Nitrogen8M Plus ENC Carrier Board";
+ compatible = "boundary,imx8mp-nitrogen-enc-carrier-board",
+ "boundary,imx8mp-nitrogen-som", "fsl,imx8mp";
+
+ chosen {
+ stdout-path = &uart2;
+ };
+
+ connector {
+ compatible = "usb-c-connector";
+ data-role = "dual";
+ label = "USB-C";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ hs_ep: endpoint {
+ remote-endpoint = <&usb3_hs_ep>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ ss_ep: endpoint {
+ remote-endpoint = <&hd3ss3220_in_ep>;
+ };
+ };
+ };
+ };
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "hdmi";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
+ reg_usb_vbus: regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 12 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usb_vbus>;
+ regulator-name = "usb_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+};
+
+&ecspi2 {
+ cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&pinctrl_ecspi2>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&gpio1 {
+ usb-hub-reset-hog {
+ gpio-hog;
+ gpios = <6 GPIO_ACTIVE_LOW>;
+ line-name = "usb-hub-reset";
+ output-low;
+ };
+};
+
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ pinctrl-0 = <&pinctrl_hdmi>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ ports {
+ port@1 {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
+&i2c2 {
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&pinctrl_i2c2_pca9546>;
+ pinctrl-names = "default";
+ reset-gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+ };
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+ };
+
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+
+ rtc@52 {
+ compatible = "microcrystal,rv3028";
+ reg = <0x52>;
+ interrupts-extended = <&gpio1 4 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rv3028>;
+ wakeup-source;
+ };
+ };
+ };
+};
+
+&i2c4 {
+ usb-mux@47 {
+ compatible = "ti,hd3ss3220";
+ reg = <0x47>;
+ interrupts-extended = <&gpio1 8 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4_hd3ss3220>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ hd3ss3220_in_ep: endpoint {
+ remote-endpoint = <&ss_ep>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ hd3ss3220_out_ep: endpoint {
+ remote-endpoint = <&usb3_role_switch>;
+ };
+ };
+ };
+ };
+};
+
+&isp_0 {
+ status = "okay";
+};
+
+&lcdif3 {
+ status = "okay";
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "okay";
+};
+
+&pwm2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm2>;
+ status = "okay";
+};
+
+&pwm4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm4>;
+ status = "okay";
+};
+
+&snvs_pwrkey {
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&uart3 {
+ assigned-clocks = <&clk IMX8MP_CLK_UART3>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_80M>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+&uart4 {
+ assigned-clocks = <&clk IMX8MP_CLK_UART4>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_80M>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&usb3_0 {
+ fsl,over-current-active-low;
+ status = "okay";
+};
+
+&usb3_1 {
+ status = "okay";
+};
+
+&usb3_phy0 {
+ vbus-supply = <&reg_usb_vbus>;
+ status = "okay";
+};
+
+&usb3_phy1 {
+ vbus-supply = <&reg_usb_vbus>;
+ status = "okay";
+};
+
+&usb_dwc3_0 {
+ dr_mode = "otg";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb3_0>;
+ usb-role-switch;
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb3_hs_ep: endpoint {
+ remote-endpoint = <&hs_ep>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb3_role_switch: endpoint {
+ remote-endpoint = <&hd3ss3220_out_ep>;
+ };
+ };
+ };
+};
+
+&usb_dwc3_1 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usdhc1 {
+ bus-width = <4>;
+ cd-gpios = <&gpio2 11 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>, <&pinctrl_usdhc1_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>, <&pinctrl_usdhc1_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>, <&pinctrl_usdhc1_gpio>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_ecspi2: ecspi2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_ECSPI2_MISO__ECSPI2_MISO 0x82
+ MX8MP_IOMUXC_ECSPI2_MOSI__ECSPI2_MOSI 0x82
+ MX8MP_IOMUXC_ECSPI2_SCLK__ECSPI2_SCLK 0x82
+ MX8MP_IOMUXC_ECSPI2_SS0__GPIO5_IO13 0x143
+ >;
+ };
+
+ pinctrl_hdmi: hdmigrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x40000019
+ MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x400001c3
+ MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x400001c3
+ MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x40000019
+ >;
+ };
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO06__GPIO1_IO06 0x100
+ MX8MP_IOMUXC_NAND_DATA01__GPIO3_IO07 0x119
+ MX8MP_IOMUXC_SAI1_MCLK__GPIO4_IO20 0x16
+ MX8MP_IOMUXC_SAI1_TXD7__GPIO4_IO19 0x1c4
+ MX8MP_IOMUXC_SD1_DATA4__GPIO2_IO06 0x41
+ MX8MP_IOMUXC_SD1_DATA5__GPIO2_IO07 0x41
+ MX8MP_IOMUXC_SD1_DATA6__GPIO2_IO08 0x41
+ MX8MP_IOMUXC_SD1_RESET_B__GPIO2_IO10 0x41
+ MX8MP_IOMUXC_SPDIF_EXT_CLK__GPIO5_IO05 0x41
+ MX8MP_IOMUXC_SPDIF_RX__GPIO5_IO04 0x41
+ MX8MP_IOMUXC_SPDIF_TX__GPIO5_IO03 0x41
+ >;
+ };
+
+ pinctrl_i2c2_pca9546: i2c2-pca9546grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO05__GPIO1_IO05 0x100
+ >;
+ };
+
+ pinctrl_i2c4_hd3ss3220: i2c4-hd3ss3220grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO08__GPIO1_IO08 0x16
+ MX8MP_IOMUXC_SAI1_RXD0__GPIO4_IO02 0x03
+ >;
+ };
+
+ pinctrl_pwm1: pwm1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO01__PWM1_OUT 0x100
+ >;
+ };
+
+ pinctrl_pwm2: pwm2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO09__PWM2_OUT 0xd6
+ MX8MP_IOMUXC_SAI5_RXD0__PWM2_OUT 0xd6
+ >;
+ };
+
+ pinctrl_pwm4: pwm4grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI5_RXFS__PWM4_OUT 0x116
+ >;
+ };
+
+ pinctrl_reg_usb_vbus: reg-usb-vbusgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO12__GPIO1_IO12 0x100
+ >;
+ };
+
+ pinctrl_rv3028: rv3028grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO04__GPIO1_IO04 0x1c0
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x140
+ MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_ECSPI1_MOSI__UART3_DCE_TX 0x140
+ MX8MP_IOMUXC_ECSPI1_SCLK__UART3_DCE_RX 0x140
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART4_RXD__UART4_DCE_RX 0x140
+ MX8MP_IOMUXC_UART4_TXD__UART4_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_usb3_0: usb3-0grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO13__USB1_OTG_OC 0x1c0
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO03__USDHC1_VSELECT 0x116
+ MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x190
+ MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x1d0
+ MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x1d0
+ MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x1d0
+ MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x1d0
+ MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x194
+ MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x1d4
+ MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x1d4
+ MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x1d4
+ MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x1d4
+ MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x1d4
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x196
+ MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x1d6
+ MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x1d6
+ MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x1d6
+ MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x1d6
+ MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x1d6
+ >;
+ };
+
+ pinctrl_usdhc1_gpio: usdhc1-gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD1_STROBE__GPIO2_IO11 0x1c4
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-nitrogen-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-nitrogen-som.dtsi
new file mode 100644
index 000000000000..f658309612ef
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-nitrogen-som.dtsi
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2020 Boundary Devices
+ * Copyright 2025 Collabora Ltd.
+ */
+
+#include "imx8mp.dtsi"
+
+/ {
+ model = "Boundary Devices Nitrogen8M Plus Som";
+ compatible = "boundary,imx8mp-nitrogen-som", "fsl,imx8mp";
+
+ rfkill-bt {
+ compatible = "rfkill-gpio";
+ label = "rfkill-bluetooth";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rfkill_bt>;
+ radio-type = "bluetooth";
+ shutdown-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
+ };
+
+ rfkill-wlan {
+ compatible = "rfkill-gpio";
+ label = "rfkill-wlan";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rfkill_wlan>;
+ radio-type = "wlan";
+ shutdown-gpios = <&gpio2 19 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&A53_0 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_1 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_2 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_3 {
+ cpu-supply = <&buck2>;
+};
+
+&eqos {
+ phy-handle = <&ethphy0>;
+ phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@4 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <4>;
+ eee-broken-1000t;
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio5 14 GPIO_OPEN_DRAIN>;
+ sda-gpios = <&gpio5 15 GPIO_OPEN_DRAIN>;
+ status = "okay";
+
+ pmic: pmic@25 {
+ compatible = "nxp,pca9450c";
+ reg = <0x25>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&pinctrl_pmic>;
+
+ regulators {
+
+ buck1: BUCK1 {
+ regulator-name = "VDD_SOC (BUCK1)";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <2187500>;
+ regulator-min-microvolt = <600000>;
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck2: BUCK2 {
+ regulator-name = "VDD_ARM (BUCK2)";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <2187500>;
+ regulator-min-microvolt = <600000>;
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck4: BUCK4 {
+ regulator-name = "VDD_3P3V (BUCK4)";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3400000>;
+ regulator-min-microvolt = <600000>;
+ };
+
+ buck5: BUCK5 {
+ regulator-name = "VDD_1P8V (BUCK5)";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3400000>;
+ regulator-min-microvolt = <600000>;
+ };
+
+ buck6: BUCK6 {
+ regulator-name = "NVCC_DRAM_1P1V (BUCK6)";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3400000>;
+ regulator-min-microvolt = <600000>;
+ };
+
+ ldo1: LDO1 {
+ regulator-name = "NVCC_SNVS_1V8 (LDO1)";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1600000>;
+ };
+
+ ldo3: LDO3 {
+ regulator-name = "VDDA_1V8 (LDO3)";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <800000>;
+ };
+
+ ldo5: LDO5 {
+ regulator-name = "NVCC_SD1 (LDO5)";
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ scl-gpios = <&gpio5 16 GPIO_OPEN_DRAIN>;
+ sda-gpios = <&gpio5 17 GPIO_OPEN_DRAIN>;
+ status = "okay";
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ pinctrl-1 = <&pinctrl_i2c3_gpio>;
+ scl-gpios = <&gpio5 18 GPIO_OPEN_DRAIN>;
+ sda-gpios = <&gpio5 19 GPIO_OPEN_DRAIN>;
+ status = "okay";
+};
+
+&i2c4 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ pinctrl-1 = <&pinctrl_i2c4_gpio>;
+ scl-gpios = <&gpio5 20 GPIO_OPEN_DRAIN>;
+ sda-gpios = <&gpio5 21 GPIO_OPEN_DRAIN>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&usdhc2 {
+ bus-width = <4>;
+ keep-power-in-suspend;
+ non-removable;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ status = "okay";
+};
+
+&usdhc3 {
+ bus-width = <8>;
+ non-removable;
+ no-mmc-hs400;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_eqos: eqosgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x20
+ MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0xa0
+ MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x91
+ MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x91
+ MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x91
+ MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x91
+ MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x91
+ MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x91
+ MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x1f
+ MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x1f
+ MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x1f
+ MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x1f
+ MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x1f
+ MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x1f
+
+ MX8MP_IOMUXC_NAND_CE1_B__GPIO3_IO02 0x10
+ MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16 0x100
+ >;
+ };
+
+ pinctrl_i2c1_gpio: i2c1gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C1_SCL__GPIO5_IO14 0x1c3
+ MX8MP_IOMUXC_I2C1_SDA__GPIO5_IO15 0x1c3
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c2_gpio: i2c2gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C2_SCL__GPIO5_IO16 0x1c3
+ MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17 0x1c3
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c3_gpio: i2c3gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C3_SCL__GPIO5_IO18 0x1c3
+ MX8MP_IOMUXC_I2C3_SDA__GPIO5_IO19 0x1c3
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c4_gpio: i2c4gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C4_SCL__GPIO5_IO20 0x1c3
+ MX8MP_IOMUXC_I2C4_SDA__GPIO5_IO21 0x1c3
+ >;
+ };
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C4_SCL__I2C4_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C4_SDA__I2C4_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_pmic: pmicirqgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_ALE__GPIO3_IO00 0x41
+ >;
+ };
+
+ pinctrl_rfkill_bt: rfkill-btgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_DATA03__GPIO3_IO09 0x119
+ >;
+ };
+
+ pinctrl_rfkill_wlan: rfkill-wlangrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x16
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX 0x140
+ MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX 0x140
+ MX8MP_IOMUXC_UART3_RXD__UART1_DCE_CTS 0x140
+ MX8MP_IOMUXC_UART3_TXD__UART1_DCE_RTS 0x140
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d0
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d0
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x196
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d6
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d6
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x10
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x150
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x150
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x150
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x150
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x150
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x150
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x150
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x150
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x150
+ MX8MP_IOMUXC_NAND_CE0_B__GPIO3_IO01 0x140
+
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x14
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x154
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x154
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x154
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x154
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x154
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x154
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x154
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x154
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x154
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x12
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x152
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x152
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x152
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x152
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x152
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x152
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x152
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x152
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x152
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO02__WDOG1_WDOG_B 0xc6
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi
index a1b75c9068b2..2ce1860b244d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi
@@ -24,6 +24,20 @@
fsl,operating-mode = "nominal";
};
+&gpu2d {
+ assigned-clocks = <&clk IMX8MP_CLK_GPU2D_CORE>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>;
+ assigned-clock-rates = <800000000>;
+};
+
+&gpu3d {
+ assigned-clocks = <&clk IMX8MP_CLK_GPU3D_CORE>,
+ <&clk IMX8MP_CLK_GPU3D_SHADER_CORE>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL1_800M>;
+ assigned-clock-rates = <800000000>, <800000000>;
+};
+
&pgc_hdmimix {
assigned-clocks = <&clk IMX8MP_CLK_HDMI_AXI>,
<&clk IMX8MP_CLK_HDMI_APB>;
@@ -46,6 +60,18 @@
assigned-clock-rates = <600000000>, <300000000>;
};
+&pgc_mlmix {
+ assigned-clocks = <&clk IMX8MP_CLK_ML_CORE>,
+ <&clk IMX8MP_CLK_ML_AXI>,
+ <&clk IMX8MP_CLK_ML_AHB>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL1_800M>;
+ assigned-clock-rates = <800000000>,
+ <800000000>,
+ <300000000>;
+};
+
&media_blk_ctrl {
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_AXI>,
<&clk IMX8MP_CLK_MEDIA_APB>,
@@ -62,3 +88,5 @@
<0>, <0>, <400000000>,
<1039500000>;
};
+
+/delete-node/ &{noc_opp_table/opp-1000000000};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-fpsc.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-fpsc.dtsi
new file mode 100644
index 000000000000..8b0e8cf86cad
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-fpsc.dtsi
@@ -0,0 +1,796 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright (C) 2025 PHYTEC Messtechnik GmbH
+ */
+
+#include <dt-bindings/net/ti-dp83867.h>
+#include "imx8mp.dtsi"
+
+/ {
+ compatible = "phytec,imx8mp-phycore-fpsc", "fsl,imx8mp";
+ model = "PHYTEC phyCORE-i.MX8MP FPSC";
+
+ aliases {
+ rtc0 = &rv3028;
+ rtc1 = &snvs_rtc;
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0x0 0x80000000>;
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ off-on-delay-us = <12000>;
+ pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
+ pinctrl-names = "default";
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "VDDSW_SD2";
+ startup-delay-us = <100>;
+ gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_vdd_io: regulator-vdd-io {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "VDD_IO";
+ };
+};
+
+&A53_0 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_1 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_2 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_3 {
+ cpu-supply = <&buck2>;
+};
+
+&ecspi1 { /* FPSC SPI1 */
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ pinctrl-names = "default";
+};
+
+&ecspi2 { /* FPSC SPI2 */
+ pinctrl-0 = <&pinctrl_ecspi2>;
+ pinctrl-names = "default";
+};
+
+&ecspi3 { /* FPSC SPI3 */
+ pinctrl-0 = <&pinctrl_ecspi3>;
+ pinctrl-names = "default";
+};
+
+&eqos { /* FPSC RGMII2 */
+ phy-mode = "rgmii-id";
+ pinctrl-0 = <&pinctrl_eqos>;
+ pinctrl-names = "default";
+};
+
+&fec { /* FPSC GB_ETH1 */
+ phy-handle = <&ethphy0>;
+ phy-mode = "rgmii-id";
+ pinctrl-0 = <&pinctrl_fec>;
+ pinctrl-names = "default";
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
+ enet-phy-lane-no-swap;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,min-output-impedance;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_1_50_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_1_50_NS>;
+ };
+ };
+};
+
+&flexcan1 { /* FPSC CAN1 */
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ pinctrl-names = "default";
+};
+
+&flexcan2 { /* FPSC CAN2 */
+ pinctrl-0 = <&pinctrl_flexcan2>;
+ pinctrl-names = "default";
+};
+
+&flexspi { /* FPSC QSPI */
+ pinctrl-0 = <&pinctrl_flexspi>;
+ pinctrl-names = "default";
+};
+
+&gpio1 {
+ gpio-line-names = "", "", "", "", "",
+ "", "", "", "PCIE1_nPERST";
+};
+
+&gpio2 {
+ gpio-line-names = "", "", "", "", "",
+ "", "", "", "", "",
+ "", "", "", "", "",
+ "", "", "", "", "SD2_RESET_B";
+};
+
+&gpio3 {
+ gpio-line-names = "", "", "", "", "",
+ "", "", "", "", "",
+ "", "", "", "", "",
+ "", "", "", "", "I2C6_SCL",
+ "I2C6_SDA", "I2C5_SCL";
+};
+
+&gpio4 { /* FPSC GPIO */
+ gpio-line-names = "GPIO6", "RGMII2_nINT", "GPIO7", "GPIO4", "",
+ "", "", "", "", "",
+ "", "", "", "", "",
+ "", "", "", "X_PMIC_IRQ_B", "",
+ "", "GPIO5", "", "", "RGMII2_EVENT_OUT",
+ "", "", "RGMII2_EVENT_IN";
+ pinctrl-0 = <&pinctrl_gpio4>;
+ pinctrl-names = "default";
+};
+
+&gpio5 { /* FPSC GPIO */
+ gpio-line-names = "", "", "", "", "I2C5_SDA",
+ "GPIO1", "", "", "", "SPI1_CS",
+ "", "", "", "SPI2_CS", "I2C1_SCL",
+ "I2C1_SDA", "I2C2_SCL", "I2C2_SDA", "I2C3_SCL", "I2C3_SDA",
+ "", "GPIO2", "", "", "SPI3_CS",
+ "", "GPIO3";
+ pinctrl-0 = <&pinctrl_gpio5>;
+ pinctrl-names = "default";
+};
+
+&i2c1 { /* FPSC I2C1 */
+ clock-frequency = <400000>;
+ pinctrl-0 = <&pinctrl_i2c1>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ pinctrl-names = "default", "gpio";
+ scl-gpios = <&gpio5 14 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "okay";
+
+ pmic: pmic@25 {
+ compatible = "nxp,pca9450c";
+ reg = <0x25>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&pinctrl_pmic>;
+ pinctrl-names = "default";
+
+ regulators {
+ buck1: BUCK1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <950000>;
+ regulator-min-microvolt = <850000>;
+ regulator-name = "VDD_SOC (BUCK1)";
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck2: BUCK2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1000000>;
+ regulator-min-microvolt = <850000>;
+ regulator-name = "VDD_ARM (BUCK2)";
+ regulator-ramp-delay = <3125>;
+ nxp,dvs-run-voltage = <950000>;
+ nxp,dvs-standby-voltage = <850000>;
+ };
+
+ buck4: BUCK4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "VDD_3V3 (BUCK4)";
+ };
+
+ buck5: BUCK5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "VDD_1V8 (BUCK5)";
+ };
+
+ buck6: BUCK6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1155000>;
+ regulator-min-microvolt = <1045000>;
+ regulator-name = "NVCC_DRAM_1V1 (BUCK6)";
+ };
+
+ ldo1: LDO1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "NVCC_SNVS_1V8 (LDO1)";
+ };
+
+ ldo3: LDO3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "VDDA_1V8 (LDO3)";
+ };
+
+ ldo5: LDO5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "NVCC_SD2 (LDO5)";
+ };
+ };
+ };
+
+ /* User EEPROM */
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ pagesize = <32>;
+ vcc-supply = <&reg_vdd_io>;
+ };
+
+ /* factory EEPROM */
+ eeprom@51 {
+ compatible = "atmel,24c32";
+ reg = <0x51>;
+ pagesize = <32>;
+ read-only;
+ vcc-supply = <&reg_vdd_io>;
+ };
+
+ rv3028: rtc@52 {
+ compatible = "microcrystal,rv3028";
+ reg = <0x52>;
+ };
+};
+
+&i2c2 { /* FPSC I2C2 */
+ pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ pinctrl-names = "default", "gpio";
+ scl-gpios = <&gpio5 16 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+};
+
+&i2c3 { /* FPSC I2C3 */
+ pinctrl-0 = <&pinctrl_i2c3>;
+ pinctrl-1 = <&pinctrl_i2c3_gpio>;
+ pinctrl-names = "default", "gpio";
+ scl-gpios = <&gpio5 18 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 19 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+};
+
+&i2c5 { /* FPSC I2C4 */
+ pinctrl-0 = <&pinctrl_i2c5>;
+ pinctrl-1 = <&pinctrl_i2c5_gpio>;
+ pinctrl-names = "default", "gpio";
+ scl-gpios = <&gpio3 21 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 4 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+};
+
+&i2c6 { /* FPSC I2C5 */
+ pinctrl-0 = <&pinctrl_i2c6>;
+ pinctrl-1 = <&pinctrl_i2c6_gpio>;
+ pinctrl-names = "default", "gpio";
+ scl-gpios = <&gpio3 19 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio3 20 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+};
+
+&iomuxc {
+ pinctrl_flexcan1: can1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI2_RXC__CAN1_TX 0x154 /* CAN1_TX */
+ MX8MP_IOMUXC_SAI2_TXC__CAN1_RX 0x154 /* CAN1_RX */
+ >;
+ };
+
+ pinctrl_flexcan2: can2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI2_TXD0__CAN2_TX 0x154 /* CAN2_TX */
+ MX8MP_IOMUXC_UART3_TXD__CAN2_RX 0x154 /* CAN2_RX */
+ >;
+ };
+
+ pinctrl_eqos: eqosgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI1_RXC__GPIO4_IO01 0x10 /* RGMII2_nINT */
+ MX8MP_IOMUXC_SAI2_MCLK__GPIO4_IO27 0x10 /* RGMII2_EVENT_IN */
+ MX8MP_IOMUXC_SAI2_TXFS__GPIO4_IO24 0x10 /* RGMII2_EVENT_OUT */
+ MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x2 /* RGMII2_MDIO */
+ MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x2 /* RGMII2_MDC */
+ MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x12 /* RGMII2_TX_D3 */
+ MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x12 /* RGMII2_TX_D2 */
+ MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x12 /* RGMII2_TX_D1 */
+ MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x12 /* RGMII2_TX_D0 */
+ MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x12 /* RGMII2_TX_CTL */
+ MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x12 /* RGMII2_TXC */
+ MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x90 /* RGMII2_RX_D3 */
+ MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x90 /* RGMII2_RX_D2 */
+ MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x90 /* RGMII2_RX_D1 */
+ MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x90 /* RGMII2_RX_D0 */
+ MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x90 /* RGMII2_RX_CTL */
+ MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x90 /* RGMII2_RXC */
+ >;
+ };
+
+ pinctrl_fec: fecgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI1_RXD2__ENET1_MDC 0x2
+ MX8MP_IOMUXC_SAI1_RXD3__ENET1_MDIO 0x2
+ MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0 0x90
+ MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1 0x90
+ MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2 0x90
+ MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3 0x90
+ MX8MP_IOMUXC_SAI1_TXD7__GPIO4_IO19 0x140
+ MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC 0x90
+ MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0 0x12
+ MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1 0x12
+ MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2 0x14
+ MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3 0x14
+ MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL 0x14
+ MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC 0x14
+ MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL 0x90
+ >;
+ };
+
+ pinctrl_flexspi: flexspigrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_CE0_B__FLEXSPI_A_SS0_B 0x82 /* QSPI_CE */
+ MX8MP_IOMUXC_NAND_ALE__FLEXSPI_A_SCLK 0x1c2 /* QSPI_CLK */
+ MX8MP_IOMUXC_NAND_DATA00__FLEXSPI_A_DATA00 0x82 /* QSPI_DATA_0 */
+ MX8MP_IOMUXC_NAND_DATA01__FLEXSPI_A_DATA01 0x82 /* QSPI_DATA_1 */
+ MX8MP_IOMUXC_NAND_DATA02__FLEXSPI_A_DATA02 0x82 /* QSPI_DATA_2 */
+ MX8MP_IOMUXC_NAND_DATA03__FLEXSPI_A_DATA03 0x82 /* QSPI_DATA_3 */
+ MX8MP_IOMUXC_NAND_DQS__FLEXSPI_A_DQS 0x82 /* QSPI_DQS */
+ >;
+ };
+
+ pinctrl_gpio4: gpio4grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI1_RXD1__GPIO4_IO03 0x40 /* GPIO4 */
+ MX8MP_IOMUXC_SAI2_RXFS__GPIO4_IO21 0x106 /* GPIO5 */
+ MX8MP_IOMUXC_SAI1_RXFS__GPIO4_IO00 0x106 /* GPIO6 */
+ MX8MP_IOMUXC_SAI1_RXD0__GPIO4_IO02 0x106 /* GPIO7 */
+ >;
+ };
+
+ pinctrl_gpio5: gpio5grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SPDIF_EXT_CLK__GPIO5_IO05 0x106 /* GPIO1 */
+ MX8MP_IOMUXC_I2C4_SDA__GPIO5_IO21 0x106 /* GPIO2 */
+ MX8MP_IOMUXC_UART3_RXD__GPIO5_IO26 0x106 /* GPIO3 */
+ >;
+ };
+
+ pinctrl_hdmi: hdmigrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x106 /* HDMI_CEC */
+ MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x106 /* HDMI_SCL */
+ MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x106 /* HDMI_SDA */
+ MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x106 /* HDMI_HPD */
+ >;
+ };
+
+ pinctrl_i2c1_gpio: i2c1gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C1_SDA__GPIO5_IO15 0x1e2
+ MX8MP_IOMUXC_I2C1_SCL__GPIO5_IO14 0x1e2
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA 0x400001c2 /* I2C1_SDA_DNU */
+ MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001c2 /* I2C1_SCL_DNU */
+ >;
+ };
+
+ pinctrl_i2c2_gpio: i2c2gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17 0x1e2
+ MX8MP_IOMUXC_I2C2_SCL__GPIO5_IO16 0x1e2
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c2 /* I2C2_SDA */
+ MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c2 /* I2C2_SCL */
+ >;
+ };
+
+ pinctrl_i2c3_gpio: i2c3gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C3_SDA__GPIO5_IO19 0x1e2
+ MX8MP_IOMUXC_I2C3_SCL__GPIO5_IO18 0x1e2
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c2 /* I2C3_SDA */
+ MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c2 /* I2C3_SCL */
+ >;
+ };
+
+ pinctrl_i2c5_gpio: i2c5gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SPDIF_RX__GPIO5_IO04 0x1e2
+ MX8MP_IOMUXC_SAI5_RXD0__GPIO3_IO21 0x1e2
+ >;
+ };
+
+ pinctrl_i2c5: i2c5grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SPDIF_RX__I2C5_SDA 0x400001c2 /* I2C4_SDA */
+ MX8MP_IOMUXC_SAI5_RXD0__I2C5_SCL 0x400001c2 /* I2C4_SCL */
+ >;
+ };
+
+ pinctrl_i2c6_gpio: i2c6gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI5_RXC__GPIO3_IO20 0x1e2
+ MX8MP_IOMUXC_SAI5_RXFS__GPIO3_IO19 0x1e2
+ >;
+ };
+
+ pinctrl_i2c6: i2c6grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI5_RXC__I2C6_SDA 0x400001c2 /* I2C5_SDA */
+ MX8MP_IOMUXC_SAI5_RXFS__I2C6_SCL 0x400001c2 /* I2C5_SCL */
+ >;
+ };
+
+ pinctrl_pcie0: pcie0grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C4_SCL__PCIE_CLKREQ_B 0x10 /* PCIE1_nCLKREQ */
+ MX8MP_IOMUXC_GPIO1_IO08__GPIO1_IO08 0x40 /* PCIE1_nPERST */
+ >;
+ };
+
+ pinctrl_pmic: pmicirqgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI1_TXD6__GPIO4_IO18 0x140
+ >;
+ };
+
+ pinctrl_pwm1: pwm1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO01__PWM1_OUT 0x106 /* PWM1 */
+ >;
+ };
+
+ pinctrl_pwm2: pwm2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO09__PWM2_OUT 0x106 /* PWM2 */
+ >;
+ };
+
+ pinctrl_pwm3: pwm3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SPDIF_TX__PWM3_OUT 0x106 /* PWM3 */
+ >;
+ };
+
+ pinctrl_pwm4: pwm4grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI3_MCLK__PWM4_OUT 0x106 /* PWM4 */
+ >;
+ };
+
+ pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x40
+ >;
+ };
+
+ pinctrl_sai5: sai5grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI5_MCLK__AUDIOMIX_SAI5_MCLK 0x106 /* SAI1_MCLK */
+ MX8MP_IOMUXC_SAI3_RXFS__AUDIOMIX_SAI5_RX_SYNC 0x106 /* SAI1_RX_SYNC */
+ MX8MP_IOMUXC_SAI3_RXC__AUDIOMIX_SAI5_RX_BCLK 0x106 /* SAI1_RX_BCLK */
+ MX8MP_IOMUXC_SAI3_RXD__AUDIOMIX_SAI5_RX_DATA00 0x106 /* SAI1_RX_DATA */
+ MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_SAI5_TX_SYNC 0x106 /* SAI1_TX_SYNC */
+ MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_SAI5_TX_BCLK 0x106 /* SAI1_TX_BCLK */
+ MX8MP_IOMUXC_SAI2_RXD0__AUDIOMIX_SAI5_TX_DATA00 0x106 /* SAI1_TX_DATA */
+ >;
+ };
+
+ pinctrl_ecspi1: spi1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_ECSPI1_SCLK__ECSPI1_SCLK 0x82 /* SPI1_SCLK */
+ MX8MP_IOMUXC_ECSPI1_MOSI__ECSPI1_MOSI 0x82 /* SPI1_MOSI */
+ MX8MP_IOMUXC_ECSPI1_MISO__ECSPI1_MISO 0x82 /* SPI1_MISO */
+ MX8MP_IOMUXC_ECSPI1_SS0__GPIO5_IO09 0x106 /* SPI1_CS */
+ >;
+ };
+
+ pinctrl_ecspi2: spi2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_ECSPI2_SCLK__ECSPI2_SCLK 0x82 /* SPI2_SCLK */
+ MX8MP_IOMUXC_ECSPI2_MOSI__ECSPI2_MOSI 0x82 /* SPI2_MOSI */
+ MX8MP_IOMUXC_ECSPI2_MISO__ECSPI2_MISO 0x82 /* SPI2_MISO */
+ MX8MP_IOMUXC_ECSPI2_SS0__GPIO5_IO13 0x106 /* SPI2_CS */
+ >;
+ };
+
+ pinctrl_ecspi3: spi3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART1_RXD__ECSPI3_SCLK 0x82 /* SPI3_SCLK */
+ MX8MP_IOMUXC_UART1_TXD__ECSPI3_MOSI 0x82 /* SPI3_MOSI */
+ MX8MP_IOMUXC_UART2_RXD__ECSPI3_MISO 0x82 /* SPI3_MISO */
+ MX8MP_IOMUXC_UART2_RXD__GPIO5_IO24 0x106 /* SPI3_CS */
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI3_TXC__UART2_DTE_RX 0x140 /* UART2_RXD */
+ MX8MP_IOMUXC_SAI3_TXFS__UART2_DTE_TX 0x140 /* UART2_TXD */
+ MX8MP_IOMUXC_SD1_DATA5__UART2_DTE_RTS 0x140 /* UART2_RTS */
+ MX8MP_IOMUXC_SD1_DATA4__UART2_DTE_CTS 0x140 /* UART2_CTS */
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD1_DATA6__UART3_DTE_RX 0x140 /* UART1_RXD */
+ MX8MP_IOMUXC_SD1_DATA7__UART3_DTE_TX 0x140 /* UART1_TXD */
+ MX8MP_IOMUXC_SD1_STROBE__UART3_DTE_RTS 0x140 /* UART1_RTS */
+ MX8MP_IOMUXC_SD1_RESET_B__UART3_DTE_CTS 0x140 /* UART1_CTS */
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART4_RXD__UART4_DCE_RX 0x140 /* UART3_RXD */
+ MX8MP_IOMUXC_UART4_TXD__UART4_DCE_TX 0x140 /* UART3_TXD */
+ >;
+ };
+
+ pinctrl_usb0: usb0grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO12__USB1_OTG_PWR 0x106 /* USB1_PWR_EN */
+ MX8MP_IOMUXC_GPIO1_IO13__USB1_OTG_OC 0x106 /* USB1_OC */
+ MX8MP_IOMUXC_GPIO1_IO10__USB1_OTG_ID 0x106 /* USB1_ID */
+ >;
+ };
+
+ pinctrl_usb1: usb1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO14__USB2_OTG_PWR 0x106 /* USB2_PWR_EN */
+ MX8MP_IOMUXC_GPIO1_IO15__USB2_OTG_OC 0x106 /* USB2_OC */
+ MX8MP_IOMUXC_GPIO1_IO11__USB2_OTG_ID 0x106 /* USB2_ID */
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO07__USDHC1_WP 0x106 /* SDIO_WP */
+ MX8MP_IOMUXC_GPIO1_IO06__USDHC1_CD_B 0x106 /* SDIO_CD */
+ MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x106 /* SDIO_CLK */
+ MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x106 /* SDIO_CLK */
+ MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x106 /* SDIO_DATA0 */
+ MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x106 /* SDIO_DATA1 */
+ MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x106 /* SDIO_DATA2 */
+ MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x106 /* SDIO_DATA3 */
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CD_B__USDHC2_CD_B 0x40 /* SDCARD_CD */
+ MX8MP_IOMUXC_SD2_WP__USDHC2_WP 0x40 /* SDCARD_WP */
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190 /* SDCARD_CLK */
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d0 /* SDCARD_CMD */
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d0 /* SDCARD_DATA0 */
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0 /* SDCARD_DATA1 */
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0 /* SDCARD_DATA2 */
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0 /* SDCARD_DATA3 */
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CD_B__USDHC2_CD_B 0x40 /* SDCARD_CD */
+ MX8MP_IOMUXC_SD2_WP__USDHC2_WP 0x40 /* SDCARD_WP */
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194 /* SDCARD_CLK */
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4 /* SDCARD_CMD */
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4 /* SDCARD_DATA0 */
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4 /* SDCARD_DATA1 */
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4 /* SDCARD_DATA2 */
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4 /* SDCARD_DATA3 */
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CD_B__USDHC2_CD_B 0x40 /* SDCARD_CD */
+ MX8MP_IOMUXC_SD2_WP__USDHC2_WP 0x40 /* SDCARD_WP */
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x196 /* SDCARD_CLK */
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d6 /* SDCARD_CMD */
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d6 /* SDCARD_DATA0 */
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6 /* SDCARD_DATA1 */
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6 /* SDCARD_DATA2 */
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6 /* SDCARD_DATA3 */
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x190
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d0
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d0
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d0
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d0
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d0
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d0
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d0
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d0
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x190
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x194
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d4
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d4
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d4
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d4
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d4
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d4
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d4
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d4
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x194
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d4
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x196
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d2
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d2
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d2
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d2
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d2
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d2
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d2
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d2
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x196
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d6
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO02__WDOG1_WDOG_B 0xe6
+ >;
+ };
+};
+
+&pcie { /* FPSC PCIE1 */
+ pinctrl-0 = <&pinctrl_pcie0>;
+ pinctrl-names = "default";
+};
+
+&pwm1 { /* FPSC PWM1 */
+ pinctrl-0 = <&pinctrl_pwm1>;
+ pinctrl-names = "default";
+};
+
+&pwm2 { /* FPSC PWM2 */
+ pinctrl-0 = <&pinctrl_pwm2>;
+ pinctrl-names = "default";
+};
+
+&pwm3 { /* FPSC PWM3 */
+ pinctrl-0 = <&pinctrl_pwm3>;
+ pinctrl-names = "default";
+};
+
+&pwm4 { /* FPSC PWM4 */
+ pinctrl-0 = <&pinctrl_pwm4>;
+ pinctrl-names = "default";
+};
+
+&sai5 { /* FPSC SAI1 */
+ pinctrl-0 = <&pinctrl_sai5>;
+ pinctrl-names = "default";
+};
+
+&uart2 { /* FPSC UART2 */
+ pinctrl-0 = <&pinctrl_uart2>;
+ pinctrl-names = "default";
+ fsl,dte-mode;
+};
+
+&uart3 { /* FPSC UART1 */
+ pinctrl-0 = <&pinctrl_uart3>;
+ pinctrl-names = "default";
+ fsl,dte-mode;
+};
+
+&uart4 { /* FPSC UART3 */
+ pinctrl-0 = <&pinctrl_uart4>;
+ pinctrl-names = "default";
+};
+
+&usb3_0 { /* FPSC USB1 */
+ pinctrl-0 = <&pinctrl_usb0>;
+ pinctrl-names = "default";
+};
+
+&usb3_1 { /* FPSC USB2 */
+ pinctrl-0 = <&pinctrl_usb1>;
+ pinctrl-names = "default";
+};
+
+&usdhc1 { /* FPSC SDIO */
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-names = "default";
+};
+
+&usdhc2 { /* FPSC SDCARD */
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ sd-uhs-sdr104;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ vqmmc-supply = <&ldo5>;
+};
+
+/* eMMC */
+&usdhc3 {
+ assigned-clocks = <&clk IMX8MP_CLK_USDHC3_ROOT>;
+ assigned-clock-rates = <400000000>;
+ bus-width = <8>;
+ non-removable;
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-0 = <&pinctrl_wdog>;
+ pinctrl-names = "default";
+ fsl,ext-reset-output;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-toradex-smarc-dev.dts b/arch/arm64/boot/dts/freescale/imx8mp-toradex-smarc-dev.dts
new file mode 100644
index 000000000000..55b8c5c14fb4
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-toradex-smarc-dev.dts
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/* Copyright (C) 2025 Toradex */
+
+/dts-v1/;
+
+#include <dt-bindings/pwm/pwm.h>
+
+#include "imx8mp-toradex-smarc.dtsi"
+
+/ {
+ model = "Toradex SMARC iMX8M Plus on Toradex SMARC Development Board";
+ compatible = "toradex,smarc-imx8mp-dev",
+ "toradex,smarc-imx8mp",
+ "fsl,imx8mp";
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "J64";
+ type = "a";
+
+ port {
+ native_hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
+ reg_carrier_1p8v: regulator-carrier-1p8v {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "On-carrier 1V8";
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,bitclock-master = <&codec_dai>;
+ simple-audio-card,format = "i2s";
+ simple-audio-card,frame-master = <&codec_dai>;
+ simple-audio-card,mclk-fs = <256>;
+ simple-audio-card,name = "tdx-smarc-wm8904";
+ simple-audio-card,routing =
+ "Headphone Jack", "HPOUTL",
+ "Headphone Jack", "HPOUTR",
+ "IN2L", "Line In Jack",
+ "IN2R", "Line In Jack",
+ "Microphone Jack", "MICBIAS",
+ "IN1L", "Microphone Jack";
+ simple-audio-card,widgets =
+ "Microphone", "Microphone Jack",
+ "Headphone", "Headphone Jack",
+ "Line", "Line In Jack";
+
+ codec_dai: simple-audio-card,codec {
+ clocks = <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1>;
+ sound-dai = <&wm8904_1a>;
+ };
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai1>;
+ };
+ };
+};
+
+&aud2htx {
+ status = "okay";
+};
+
+/* SMARC SPI0 */
+&ecspi1 {
+ status = "okay";
+};
+
+/* SMARC GBE0 */
+&eqos {
+ status = "okay";
+};
+
+/* SMARC GBE1 */
+&fec {
+ status = "okay";
+};
+
+/* SMARC CAN1 */
+&flexcan1 {
+ status = "okay";
+};
+
+/* SMARC CAN0 */
+&flexcan2 {
+ status = "okay";
+};
+
+&gpio1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio7>,
+ <&pinctrl_gpio8>,
+ <&pinctrl_gpio9>,
+ <&pinctrl_gpio10>,
+ <&pinctrl_gpio11>,
+ <&pinctrl_gpio12>,
+ <&pinctrl_gpio13>;
+};
+
+&gpio3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lvds_dsi_sel>;
+};
+
+&gpio4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio4>, <&pinctrl_gpio6>;
+};
+
+&hdmi_pvi {
+ status = "okay";
+};
+
+/* SMARC HDMI */
+&hdmi_tx {
+ status = "okay";
+
+ ports {
+ port@1 {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&native_hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
+/* SMARC I2C_LCD */
+&i2c2 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9543";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* I2C on DSI Connector Pins 4/6 */
+ i2c_dsi_0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ /* I2C on DSI Connector Pins 52/54 */
+ i2c_dsi_1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+/* SMARC I2C_CAM0 */
+&i2c3 {
+ status = "okay";
+};
+
+/* SMARC I2C_GP */
+&i2c4 {
+ /* Audio Codec */
+ wm8904_1a: audio-codec@1a {
+ compatible = "wlf,wm8904";
+ reg = <0x1a>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai1>, <&pinctrl_sai1_mclk>;
+ #sound-dai-cells = <0>;
+ clocks = <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1>;
+ clock-names = "mclk";
+ AVDD-supply = <&reg_carrier_1p8v>;
+ CPVDD-supply = <&reg_carrier_1p8v>;
+ DBVDD-supply = <&reg_carrier_1p8v>;
+ DCVDD-supply = <&reg_carrier_1p8v>;
+ MICVDD-supply = <&reg_carrier_1p8v>;
+ };
+
+ /* On-Carrier Temperature Sensor */
+ temperature-sensor@4f {
+ compatible = "ti,tmp1075";
+ reg = <0x4f>;
+ };
+
+ /* On-Carrier EEPROM */
+ eeprom@57 {
+ compatible = "st,24c02", "atmel,24c02";
+ reg = <0x57>;
+ pagesize = <16>;
+ };
+};
+
+/* SMARC I2C_CAM1 */
+&i2c5 {
+ status = "okay";
+};
+
+/* SMARC I2C_PM */
+&i2c6 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ /* Fan controller */
+ fan_controller: fan@18 {
+ compatible = "ti,amc6821";
+ reg = <0x18>;
+ #pwm-cells = <2>;
+
+ fan {
+ pwms = <&fan_controller 40000 PWM_POLARITY_INVERTED>;
+ };
+ };
+
+ /* Current measurement into module VDD */
+ hwmon@40 {
+ compatible = "ti,ina226";
+ reg = <0x40>;
+ shunt-resistor = <5000>;
+ };
+};
+
+&lcdif3 {
+ status = "okay";
+};
+
+/* SMARC PCIE_A, M2 Key B */
+&pcie {
+ status = "okay";
+};
+
+&pcie_phy {
+ status = "okay";
+};
+
+/* SMARC LCD1_BKLT_PWM */
+&pwm1 {
+ status = "okay";
+};
+
+/* SMARC LCD0_BKLT_PWM */
+&pwm2 {
+ status = "okay";
+};
+
+/* SMARC I2S0 */
+&sai1 {
+ assigned-clocks = <&clk IMX8MP_CLK_SAI1>;
+ assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <24576000>;
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
+/* SMARC HDMI Audio */
+&sound_hdmi {
+ status = "okay";
+};
+
+/* SMARC SER0, RS485. Optional M.2 KEY E */
+&uart1 {
+ linux,rs485-enabled-at-boot-time;
+ rs485-rts-active-low;
+ rs485-rx-during-tx;
+ status = "okay";
+};
+
+/* SMARC SER2 */
+&uart2 {
+ status = "okay";
+};
+
+/* SMARC SER1, used as the Linux Console */
+&uart4 {
+ status = "okay";
+};
+
+/* SMARC USB0 */
+&usb3_0 {
+ status = "okay";
+};
+
+/* SMARC USB1..4 */
+&usb3_1 {
+ status = "okay";
+};
+
+&usb3_phy0 {
+ status = "okay";
+};
+
+&usb3_phy1 {
+ status = "okay";
+};
+
+/* SMARC SDIO */
+&usdhc2 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-toradex-smarc.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-toradex-smarc.dtsi
new file mode 100644
index 000000000000..22f6daabdb90
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-toradex-smarc.dtsi
@@ -0,0 +1,1314 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/* Copyright (C) 2025 Toradex */
+
+#include <dt-bindings/phy/phy-imx8-pcie.h>
+#include <dt-bindings/net/ti-dp83867.h>
+#include "imx8mp.dtsi"
+
+/ {
+ aliases {
+ can0 = &flexcan2;
+ can1 = &flexcan1;
+ ethernet0 = &eqos;
+ ethernet1 = &fec;
+ mmc0 = &usdhc3;
+ mmc1 = &usdhc2;
+ mmc2 = &usdhc1;
+ rtc0 = &rtc_i2c;
+ rtc1 = &snvs_rtc;
+ serial0 = &uart1;
+ serial1 = &uart4;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ };
+
+ chosen {
+ stdout-path = &uart4;
+ };
+
+ connector {
+ compatible = "gpio-usb-b-connector", "usb-b-connector";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_id>;
+ id-gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+ label = "USB0";
+ self-powered;
+ type = "micro";
+ vbus-supply = <&reg_usb0_vbus>;
+
+ port {
+ usb_dr_connector: endpoint {
+ remote-endpoint = <&usb3_0_dwc>;
+ };
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sleep>;
+
+ smarc_key_sleep: key-sleep {
+ gpios = <&gpio3 1 GPIO_ACTIVE_LOW>;
+ label = "SMARC_SLEEP#";
+ wakeup-source;
+ linux,code = <KEY_SLEEP>;
+ };
+
+ smarc_switch_lid: switch-lid {
+ gpios = <&som_ec_gpio_expander 2 GPIO_ACTIVE_LOW>;
+ label = "SMARC_LID#";
+ linux,code = <SW_LID>;
+ linux,input-type = <EV_SW>;
+ };
+ };
+
+ reg_usb0_vbus: regulator-usb0-vbus {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_en_oc>;
+ gpios = <&gpio1 12 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-name = "USB0_EN_OC#";
+ };
+
+ reg_usb1_vbus: regulator-usb1-vbus {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1_en_oc>;
+ gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-name = "USB2_EN_OC#";
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2-vmmc {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2_pwr_en>;
+ gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ off-on-delay-us = <100000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "3V3_SD";
+ startup-delay-us = <20000>;
+ };
+
+ reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
+ compatible = "regulator-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2_vsel>;
+ gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
+ states = <1800000 0x1>,
+ <3300000 0x0>;
+ regulator-name = "PMIC_USDHC_VSELECT";
+ vin-supply = <&reg_sd_3v3_1v8>;
+ };
+
+ reg_wifi_en: regulator-wifi-en {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wifi_pwr_en>;
+ gpios = <&gpio3 14 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "CTRL_EN_WIFI";
+ startup-delay-us = <2000>;
+ };
+
+ reserved-memory {
+ linux,cma {
+ size = <0 0x20000000>;
+ alloc-ranges = <0 0x40000000 0 0x80000000>;
+ };
+ };
+
+ sound_hdmi: sound-hdmi {
+ compatible = "fsl,imx-audio-hdmi";
+ model = "audio-hdmi";
+ audio-cpu = <&aud2htx>;
+ hdmi-out;
+ status = "disabled";
+ };
+};
+
+&A53_0 {
+ cpu-supply = <&reg_vdd_arm>;
+};
+
+&A53_1 {
+ cpu-supply = <&reg_vdd_arm>;
+};
+
+&A53_2 {
+ cpu-supply = <&reg_vdd_arm>;
+};
+
+&A53_3 {
+ cpu-supply = <&reg_vdd_arm>;
+};
+
+/* SMARC SPI0 */
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>, <&gpio4 28 GPIO_ACTIVE_LOW>;
+};
+
+/* SMARC SPI1 */
+&ecspi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi2>, <&pinctrl_tpm_cs>;
+ cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>,
+ <&gpio4 3 GPIO_ACTIVE_LOW>,
+ <&gpio3 6 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ tpm@2 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ reg = <2>;
+ spi-max-frequency = <18500000>;
+ };
+};
+
+/* SMARC GBE0 */
+&eqos {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos>,
+ <&pinctrl_eth_mdio>,
+ <&pinctrl_eqos_1588_event>;
+ phy-handle = <&eqos_phy>;
+ phy-mode = "rgmii-id";
+ snps,force_thresh_dma_mode;
+ snps,mtl-rx-config = <&mtl_rx_setup>;
+ snps,mtl-tx-config = <&mtl_tx_setup>;
+
+ mdio: mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <5>;
+
+ queue0 {
+ snps,dcb-algorithm;
+ snps,priority = <0x1>;
+ snps,map-to-dma-channel = <0>;
+ };
+
+ queue1 {
+ snps,dcb-algorithm;
+ snps,priority = <0x2>;
+ snps,map-to-dma-channel = <1>;
+ };
+
+ queue2 {
+ snps,dcb-algorithm;
+ snps,priority = <0x4>;
+ snps,map-to-dma-channel = <2>;
+ };
+
+ queue3 {
+ snps,dcb-algorithm;
+ snps,priority = <0x8>;
+ snps,map-to-dma-channel = <3>;
+ };
+
+ queue4 {
+ snps,dcb-algorithm;
+ snps,priority = <0xf0>;
+ snps,map-to-dma-channel = <4>;
+ };
+ };
+
+ mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <5>;
+
+ queue0 {
+ snps,dcb-algorithm;
+ snps,priority = <0x1>;
+ };
+
+ queue1 {
+ snps,dcb-algorithm;
+ snps,priority = <0x2>;
+ };
+
+ queue2 {
+ snps,dcb-algorithm;
+ snps,priority = <0x4>;
+ };
+
+ queue3 {
+ snps,dcb-algorithm;
+ snps,priority = <0x8>;
+ };
+
+ queue4 {
+ snps,dcb-algorithm;
+ snps,priority = <0xf0>;
+ };
+ };
+};
+
+/* SMARC GBE1 */
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec>, <&pinctrl_fec_1588_event>;
+ phy-handle = <&fec_phy>;
+ phy-mode = "rgmii-id";
+ fsl,magic-packet;
+};
+
+/* SMARC CAN1 */
+&flexcan1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+};
+
+/* SMARC CAN0 */
+&flexcan2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan2>;
+};
+
+&gpio1 {
+ gpio-line-names = "SMARC_GPIO7", /* 0 */
+ "SMARC_GPIO8",
+ "",
+ "PMIC_INT#",
+ "PMIC_USDHC_VSELECT",
+ "SMARC_GPIO9",
+ "SMARC_GPIO10",
+ "SMARC_GPIO11",
+ "SMARC_GPIO12",
+ "",
+ "SMARC_GPIO5", /* 10 */
+ "",
+ "SMARC_USB0_EN_OC#",
+ "SMARC_GPIO13",
+ "SMARC_USB2_EN_OC#";
+};
+
+&gpio2 {
+ gpio-line-names = "", /* 0 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 10 */
+ "",
+ "SMARC_SDIO_CD#",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "SMARC_SDIO_PWR_EN",
+ "SMARC_SDIO_WP"; /* 20 */
+};
+
+&gpio3 {
+ gpio-line-names = "ETH_0_INT#", /* 0 */
+ "SLEEP#",
+ "",
+ "",
+ "",
+ "",
+ "TPM_CS#",
+ "LVDS_DSI_SEL",
+ "MCU_INT#",
+ "GPIO_EX_INT#",
+ "", /* 10 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "SMARC_SMB_ALERT#",
+ "",
+ "",
+ "",
+ "SMARC_I2C_PM_DAT", /* 20 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "SMARC_I2C_PM_CK";
+
+ lvds_dsi_mux_hog: lvds-dsi-mux-hog {
+ gpio-hog;
+ gpios = <7 GPIO_ACTIVE_HIGH>;
+ line-name = "LVDS_DSI_SEL";
+ /* LVDS_DSI_SEL as DSI */
+ output-low;
+ };
+};
+
+&gpio4 {
+ gpio-line-names = "SMARC_PCIE_WAKE#", /* 0 */
+ "",
+ "",
+ "SMARC_SPI1_CS1#",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 10 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "SMARC_GPIO4",
+ "SMARC_PCIE_A_RST#",
+ "", /* 20 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "SMARC_SPI0_CS1#",
+ "SMARC_GPIO6";
+};
+
+&gpio5 {
+ gpio-line-names = "", /* 0 */
+ "",
+ "SMARC_USB0_OTG_ID",
+ "SMARC_I2C_CAM1_CK",
+ "SMARC_I2C_CAM1_DAT",
+ "",
+ "",
+ "",
+ "",
+ "SMARC_SPI0_CS0#",
+ "", /* 10 */
+ "",
+ "",
+ "SMARC_SPI1_CS0#",
+ "CTRL_I2C_SCL",
+ "CTRL_I2C_SDA",
+ "SMARC_I2C_LCD_CK",
+ "SMARC_I2C_LCD_DAT",
+ "SMARC_I2C_CAM0_CK",
+ "SMARC_I2C_CAM0_DAT",
+ "SMARC_I2C_GP_CK", /* 20 */
+ "SMARC_I2C_GP_DAT";
+};
+
+/* SMARC HDMI */
+&hdmi_tx {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+};
+
+/* On-module I2C */
+&i2c1 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ clock-frequency = <400000>;
+ scl-gpios = <&gpio5 14 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ single-master;
+ status = "okay";
+
+ som_gpio_expander: gpio@21 {
+ compatible = "nxp,pcal6408";
+ reg = <0x21>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcal6408>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ interrupt-parent = <&gpio3>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-line-names =
+ "SMARC_GPIO0",
+ "SMARC_GPIO1",
+ "SMARC_GPIO2",
+ "SMARC_GPIO3",
+ "SMARC_LCD0_VDD_EN",
+ "SMARC_LCD0_BKLT_EN",
+ "SMARC_LCD1_VDD_EN",
+ "SMARC_LCD1_BKLT_EN";
+ };
+
+ pca9450: pmic@25 {
+ compatible = "nxp,pca9450c";
+ reg = <0x25>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ BUCK1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1000000>;
+ regulator-min-microvolt = <805000>;
+ regulator-name = "+VDD_SOC (PMIC BUCK1)";
+ regulator-ramp-delay = <3125>;
+ };
+
+ reg_vdd_arm: BUCK2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1000000>;
+ regulator-min-microvolt = <805000>;
+ regulator-name = "+VDD_ARM (PMIC BUCK2)";
+ regulator-ramp-delay = <3125>;
+ nxp,dvs-run-voltage = <950000>;
+ nxp,dvs-standby-voltage = <850000>;
+ };
+
+ reg_3v3: BUCK4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "+V3.3 (PMIC BUCK4)";
+ };
+
+ reg_1v8: BUCK5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "+V1.8 (PMIC BUCK5)";
+ };
+
+ BUCK6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1155000>;
+ regulator-min-microvolt = <1045000>;
+ regulator-name = "+VDD_DDR (PMIC BUCK6)";
+ };
+
+ LDO1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1950000>;
+ regulator-min-microvolt = <1710000>;
+ regulator-name = "+V1.8_SNVS (PMIC LDO1)";
+ };
+
+ LDO3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "+V1.8A (PMIC LDO3)";
+ };
+
+ LDO4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "+V3.3_ADC (PMIC LDO4)";
+ };
+
+ reg_sd_3v3_1v8: LDO5 {
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "+V3.3_1.8_SD (PMIC LDO5)";
+ };
+ };
+ };
+
+ embedded-controller@28 {
+ compatible = "toradex,smarc-imx8mp-ec", "toradex,smarc-ec";
+ reg = <0x28>;
+ };
+
+ som_ec_gpio_expander: gpio@29 {
+ compatible = "toradex,ecgpiol16", "nxp,pcal6416";
+ reg = <0x29>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mcu_int>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ interrupt-parent = <&gpio3>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-line-names =
+ "SMARC_CHARGER_PRSNT#",
+ "SMARC_CHARGING#",
+ "SMARC_LID#",
+ "SMARC_BATLOW#";
+ };
+
+ rtc_i2c: rtc@32 {
+ compatible = "epson,rx8130";
+ reg = <0x32>;
+ };
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp1075";
+ reg = <0x48>;
+ };
+
+ eeprom@50 {
+ compatible = "st,24c02", "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+};
+
+/* SMARC I2C_LCD */
+&i2c2 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ clock-frequency = <100000>;
+ scl-gpios = <&gpio5 16 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ single-master;
+};
+
+/* SMARC I2C_CAM0 */
+&i2c3 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ pinctrl-1 = <&pinctrl_i2c3_gpio>;
+ clock-frequency = <400000>;
+ scl-gpios = <&gpio5 18 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 19 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ single-master;
+};
+
+/* SMARC I2C_GP */
+&i2c4 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ pinctrl-1 = <&pinctrl_i2c4_gpio>;
+ clock-frequency = <400000>;
+ scl-gpios = <&gpio5 20 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 21 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ single-master;
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "st,24c32", "atmel,24c32";
+ reg = <0x50>;
+ pagesize = <32>;
+ };
+};
+
+/* SMARC I2C_CAM1 */
+&i2c5 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c5>;
+ pinctrl-1 = <&pinctrl_i2c5_gpio>;
+ clock-frequency = <400000>;
+ scl-gpios = <&gpio5 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 4 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ single-master;
+};
+
+/* SMARC I2C_PM */
+&i2c6 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c6>;
+ pinctrl-1 = <&pinctrl_i2c6_gpio>;
+ clock-frequency = <400000>;
+ scl-gpios = <&gpio3 28 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio3 20 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ single-master;
+};
+
+&mdio {
+ eqos_phy: ethernet-phy@1 {
+ reg = <1>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ };
+
+ fec_phy: ethernet-phy@2 {
+ reg = <2>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ };
+};
+
+/* SMARC PCIE_A */
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+ reset-gpios = <&gpio4 19 GPIO_ACTIVE_LOW>;
+};
+
+&pcie_phy {
+ clocks = <&hsio_blk_ctrl>;
+ clock-names = "ref";
+ fsl,clkreq-unsupported;
+ fsl,refclk-pad-mode = <IMX8_PCIE_REFCLK_PAD_OUTPUT>;
+};
+
+/* SMARC LCD1_BKLT_PWM */
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd1_bklt_pwm1>;
+};
+
+/* SMARC LCD0_BKLT_PWM */
+&pwm2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd0_bklt_pwm2>;
+};
+
+/* SMARC GPIO5 as PWM */
+&pwm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio5_pwm>;
+};
+
+&snvs_pwrkey {
+ status = "okay";
+};
+
+/* SMARC SER0 */
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ uart-has-rtscts;
+};
+
+/* SMARC SER2 */
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ uart-has-rtscts;
+};
+
+/* On-module Bluetooth, optional SMARC SER3 */
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_bt_uart>;
+ uart-has-rtscts;
+ status = "okay";
+
+ som_bt: bluetooth {
+ compatible = "mrvl,88w8997";
+ max-speed = <921600>;
+ };
+};
+
+/* SMARC SER1, used as the Linux Console */
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+};
+
+/* SMARC USB0 */
+&usb3_0 {
+ fsl,disable-port-power-control;
+};
+
+/* SMARC USB1..4 */
+&usb3_1 {
+ fsl,disable-port-power-control;
+};
+
+&usb3_phy1 {
+ vbus-supply = <&reg_usb1_vbus>;
+};
+
+&usb_dwc3_0 {
+ adp-disable;
+ dr_mode = "otg";
+ hnp-disable;
+ maximum-speed = "high-speed";
+ srp-disable;
+ usb-role-switch;
+
+ port {
+ usb3_0_dwc: endpoint {
+ remote-endpoint = <&usb_dr_connector>;
+ };
+ };
+};
+
+&usb_dwc3_1 {
+ dr_mode = "host";
+};
+
+/* On-module Wi-Fi */
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ keep-power-in-suspend;
+ non-removable;
+ vmmc-supply = <&reg_wifi_en>;
+ status = "okay";
+};
+
+/* SMARC SDIO */
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
+ pinctrl-0 = <&pinctrl_usdhc2>,
+ <&pinctrl_usdhc2_cd>,
+ <&pinctrl_usdhc2_wp>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>,
+ <&pinctrl_usdhc2_cd>,
+ <&pinctrl_usdhc2_wp>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>,
+ <&pinctrl_usdhc2_cd>,
+ <&pinctrl_usdhc2_wp>;
+ pinctrl-3 = <&pinctrl_usdhc2_sleep>,
+ <&pinctrl_usdhc2_cd_sleep>,
+ <&pinctrl_usdhc2_wp>;
+ assigned-clocks = <&clk IMX8MP_CLK_USDHC2>;
+ assigned-clock-rates = <400000000>;
+ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ vqmmc-supply = <&reg_usdhc2_vqmmc>;
+ wp-gpios = <&gpio2 20 GPIO_ACTIVE_HIGH>;
+};
+
+/* On-module eMMC */
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ assigned-clocks = <&clk IMX8MP_CLK_USDHC3_ROOT>;
+ assigned-clock-rates = <400000000>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ /* On-module Bluetooth */
+ pinctrl_bt_uart: btuartgrp {
+ fsl,pins = <MX8MP_IOMUXC_SD1_DATA6__UART3_DCE_TX 0x1c4>, /* WiFi_UART_TXD */
+ <MX8MP_IOMUXC_SD1_DATA7__UART3_DCE_RX 0x1c4>, /* WiFi_UART_RXD */
+ <MX8MP_IOMUXC_SD1_STROBE__UART3_DCE_CTS 0x1c4>, /* WiFi_UART_RTS */
+ <MX8MP_IOMUXC_SD1_RESET_B__UART3_DCE_RTS 0x1c4>; /* WiFi_UART_CTS */
+ };
+
+ /* SMARC CAM_MCK */
+ pinctrl_csi_mclk: csimclkgrp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO15__CCM_CLKO2 0x16>; /* SMARC S6 - CAM_MCK */
+ };
+
+ /* SMARC SPI0 */
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <MX8MP_IOMUXC_ECSPI1_MISO__ECSPI1_MISO 0x1c4>, /* SMARC P45 - SPI0_DIN */
+ <MX8MP_IOMUXC_ECSPI1_MOSI__ECSPI1_MOSI 0x4>, /* SMARC P46 - SPI0_DO */
+ <MX8MP_IOMUXC_ECSPI1_SCLK__ECSPI1_SCLK 0x4>, /* SMARC P44 - SPI0_CK */
+ <MX8MP_IOMUXC_ECSPI1_SS0__GPIO5_IO09 0x1c4>, /* SMARC P43 - SPI0_CS0# */
+ <MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28 0x1c4>; /* SMARC P31 - SPI0_CS1# */
+ };
+
+ /* SMARC SPI1 */
+ pinctrl_ecspi2: ecspi2grp {
+ fsl,pins = <MX8MP_IOMUXC_ECSPI2_MISO__ECSPI2_MISO 0x1c4>, /* SMARC P56 - SPI1_DIN */
+ <MX8MP_IOMUXC_ECSPI2_MOSI__ECSPI2_MOSI 0x4>, /* SMARC P57 - SPI1_DO */
+ <MX8MP_IOMUXC_ECSPI2_SCLK__ECSPI2_SCLK 0x4>, /* SMARC P58 - SPI1_CK */
+ <MX8MP_IOMUXC_ECSPI2_SS0__GPIO5_IO13 0x1c4>, /* SMARC P54 - SPI1_CS0# */
+ <MX8MP_IOMUXC_SAI1_RXD1__GPIO4_IO03 0x1c4>; /* SMARC P55 - SPI1_CS1# */
+ };
+
+ /* ETH_0 RGMII (On-module PHY) */
+ pinctrl_eqos: eqosgrp {
+ fsl,pins = <MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x90>, /* ETH0_RGMII_RXD0 */
+ <MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x90>, /* ETH0_RGMII_RXD1 */
+ <MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x90>, /* ETH0_RGMII_RXD2 */
+ <MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x90>, /* ETH0_RGMII_RXD3 */
+ <MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x90>, /* ETH0_RGMII_RXC */
+ <MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x90>, /* ETH0_RGMII_RX_CTL */
+ <MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x16>, /* ETH0_RGMII_TXD0 */
+ <MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x16>, /* ETH0_RGMII_TXD1 */
+ <MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x16>, /* ETH0_RGMII_TXD2 */
+ <MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x16>, /* ETH0_RGMII_TXD3 */
+ <MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x16>, /* ETH0_RGMII_TX_CTL */
+ <MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x16>; /* ETH0_RGMII_TXC */
+ };
+
+ /* SMARC GBE0_SDP */
+ pinctrl_eqos_1588_event: eqos1588eventgrp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO09__ENET_QOS_1588_EVENT0_OUT 0x4>; /* SMARC P6 - GBE0_SDP */
+ };
+
+ /* ETH_0_MDIO and ETH_0_INT# shared between ETH_PHY0 and ETH_PHY1 */
+ pinctrl_eth_mdio: ethmdiogrp {
+ fsl,pins = <MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x2>, /* ETH_0_MDC */
+ <MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x2>, /* ETH_0_MDIO */
+ <MX8MP_IOMUXC_NAND_ALE__GPIO3_IO00 0x80>; /* ETH_0_INT# */
+ };
+
+ /* ETH_1 RGMII (On-module PHY) */
+ pinctrl_fec: fecgrp {
+ fsl,pins = <MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0 0x90>, /* ETH1_RGMII_RXD0 */
+ <MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1 0x90>, /* ETH1_RGMII_RXD1 */
+ <MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2 0x90>, /* ETH1_RGMII_RXD2 */
+ <MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3 0x90>, /* ETH1_RGMII_RXD3 */
+ <MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC 0x90>, /* ETH1_RGMII_RXC */
+ <MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL 0x90>, /* ETH1_RGMII_RX_CTL */
+ <MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0 0x16>, /* ETH1_RGMII_TXD0 */
+ <MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1 0x16>, /* ETH1_RGMII_TXD1 */
+ <MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2 0x16>, /* ETH1_RGMII_TXD2 */
+ <MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3 0x16>, /* ETH1_RGMII_TXD3 */
+ <MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL 0x16>, /* ETH1_RGMII_TX_CTL */
+ <MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC 0x16>; /* ETH1_RGMII_TXC */
+ };
+
+ /* SMARC GBE1_SDP */
+ pinctrl_fec_1588_event: fec1588eventgrp {
+ fsl,pins = <MX8MP_IOMUXC_SAI1_RXC__ENET1_1588_EVENT0_OUT 0x4>; /* SMARC P5 - GBE1_SDP */
+ };
+
+ /* SMARC CAN1 */
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI2_TXC__CAN1_RX 0x154>, /* SMARC P146 - CAN1_RX */
+ <MX8MP_IOMUXC_SAI2_RXC__CAN1_TX 0x154>; /* SMARC P145 - CAN1_TX */
+ };
+
+ /* SMARC CAN0 */
+ pinctrl_flexcan2: flexcan2grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI2_MCLK__CAN2_RX 0x154>, /* SMARC P144 - CAN0_RX */
+ <MX8MP_IOMUXC_SAI2_TXD0__CAN2_TX 0x154>; /* SMARC P143 - CAN0_TX */
+ };
+
+ /* SMARC GPIO4 */
+ pinctrl_gpio4: gpio4grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI1_TXD6__GPIO4_IO18 0x144>; /* SMARC P112 - GPIO4 */
+ };
+
+ /* SMARC GPIO5 */
+ pinctrl_gpio5: gpio5grp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO10__GPIO1_IO10 0x144>; /* SMARC P113 - GPIO5 */
+ };
+
+ /* SMARC GPIO5 as PWM */
+ pinctrl_gpio5_pwm: gpio5pwmgrp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO10__PWM3_OUT 0x12>; /* SMARC P113 - PWM_OUT */
+ };
+
+ /* SMARC GPIO6 */
+ pinctrl_gpio6: gpio6grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI3_RXC__GPIO4_IO29 0x144>; /* SMARC P114 - GPIO6 */
+ };
+
+ /* SMARC GPIO7 */
+ pinctrl_gpio7: gpio7grp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO00__GPIO1_IO00 0x144>; /* SMARC P115 - GPIO7 */
+ };
+
+ /* SMARC GPIO8 */
+ pinctrl_gpio8: gpio8grp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO01__GPIO1_IO01 0x144>; /* SMARC P116 - GPIO8 */
+ };
+
+ /* SMARC GPIO9 */
+ pinctrl_gpio9: gpio9grp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO05__GPIO1_IO05 0x144>; /* SMARC P117 - GPIO9 */
+ };
+
+ /* SMARC GPIO10 */
+ pinctrl_gpio10: gpio10grp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO06__GPIO1_IO06 0x144>; /* SMARC P118 - GPIO10 */
+ };
+
+ /* SMARC GPIO11 */
+ pinctrl_gpio11: gpio11grp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO07__GPIO1_IO07 0x144>; /* SMARC P119 - GPIO11 */
+ };
+
+ /* SMARC GPIO12 */
+ pinctrl_gpio12: gpio12grp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO08__GPIO1_IO08 0x144>; /* SMARC S142 - GPIO12 */
+ };
+
+ /* SMARC GPIO13 */
+ pinctrl_gpio13: gpio13grp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO13__GPIO1_IO13 0x144>; /* SMARC S123 - GPIO13 */
+ };
+
+ /* SMARC HDMI */
+ pinctrl_hdmi: hdmigrp {
+ fsl,pins = <MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x400001c6>, /* SMARC P105 - HDMI_CTRL_CK */
+ <MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x400001c6>, /* SMARC P106 - HDMI_CTRL_DAT */
+ <MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x180>; /* SMARC P104 - HDMI_HPD */
+ };
+
+ /* On-module I2C */
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001c6>, /* CTRL_I2C_SCL */
+ <MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA 0x400001c6>; /* CTRL_I2C_SDA */
+ };
+
+ /* On-module I2C as GPIOs */
+ pinctrl_i2c1_gpio: i2c1gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_I2C1_SCL__GPIO5_IO14 0x400001c6>, /* CTRL_I2C_SCL */
+ <MX8MP_IOMUXC_I2C1_SDA__GPIO5_IO15 0x400001c6>; /* CTRL_I2C_SDA */
+ };
+
+ /* SMARC I2C_LCD */
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c6>, /* SMARC S139 - I2C_LCD_CK */
+ <MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c6>; /* SMARC S140 - I2C_LCD_DAT */
+ };
+
+ /* SMARC I2C_LCD as GPIOs */
+ pinctrl_i2c2_gpio: i2c2gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_I2C2_SCL__GPIO5_IO16 0x400001c6>, /* SMARC S139 - I2C_LCD_CK */
+ <MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17 0x400001c6>; /* SMARC S140 - I2C_LCD_DAT */
+ };
+
+ /* SMARC I2C_CAM0 */
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c6>, /* SMARC S5 - I2C_CAM0_CK */
+ <MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c6>; /* SMARC S7 - I2C_CAM0_DAT */
+ };
+
+ /* SMARC I2C_CAM0 as GPIOs */
+ pinctrl_i2c3_gpio: i2c3gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_I2C3_SCL__GPIO5_IO18 0x400001c6>, /* SMARC S5 - I2C_CAM0_CK */
+ <MX8MP_IOMUXC_I2C3_SDA__GPIO5_IO19 0x400001c6>; /* SMARC S7 - I2C_CAM0_DAT */
+ };
+
+ /* SMARC I2C_GP */
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <MX8MP_IOMUXC_I2C4_SCL__I2C4_SCL 0x400001c6>, /* SMARC S48 - I2C_GP_CK */
+ <MX8MP_IOMUXC_I2C4_SDA__I2C4_SDA 0x400001c6>; /* SMARC S49 - I2C_GP_DAT */
+ };
+
+ /* SMARC I2C_GP as GPIOs */
+ pinctrl_i2c4_gpio: i2c4gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_I2C4_SCL__GPIO5_IO20 0x400001c6>, /* SMARC S48 - I2C_GP_CK */
+ <MX8MP_IOMUXC_I2C4_SDA__GPIO5_IO21 0x400001c6>; /* SMARC S49 - I2C_GP_DAT */
+ };
+
+ /* SMARC I2C_CAM1 */
+ pinctrl_i2c5: i2c5grp {
+ fsl,pins = <MX8MP_IOMUXC_SPDIF_RX__I2C5_SDA 0x400001c6>, /* SMARC S2 - I2C_CAM1_DAT */
+ <MX8MP_IOMUXC_SPDIF_TX__I2C5_SCL 0x400001c6>; /* SMARC S1 - I2C_CAM1_CK */
+ };
+
+ /* SMARC I2C_CAM1 as GPIOs */
+ pinctrl_i2c5_gpio: i2c5gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_SPDIF_RX__GPIO5_IO04 0x400001c6>, /* SMARC S2 - I2C_CAM1_DAT */
+ <MX8MP_IOMUXC_SPDIF_TX__GPIO5_IO03 0x400001c6>; /* SMARC S1 - I2C_CAM1_CK */
+ };
+
+ /* SMARC I2C_PM */
+ pinctrl_i2c6: i2c6grp {
+ fsl,pins = <MX8MP_IOMUXC_HDMI_CEC__I2C6_SCL 0x400001c6>, /* SMARC P121 - I2C_PM_CK */
+ <MX8MP_IOMUXC_SAI5_RXC__I2C6_SDA 0x400001c6>; /* SMARC P122 - I2C_PM_DAT */
+ };
+
+ /* SMARC I2C_PM as GPIOs */
+ pinctrl_i2c6_gpio: i2c6gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_HDMI_CEC__GPIO3_IO28 0x400001c6>, /* SMARC P121 - I2C_PM_CK */
+ <MX8MP_IOMUXC_SAI5_RXC__GPIO3_IO20 0x400001c6>; /* SMARC P122 - I2C_PM_DAT */
+ };
+
+ pinctrl_lvds_dsi_sel: lvdsdsiselgrp {
+ fsl,pins = <MX8MP_IOMUXC_NAND_DATA01__GPIO3_IO07 0x104>; /* LVDS_DSI_SEL */
+ };
+
+ pinctrl_mcu_int: mcuintgrp {
+ fsl,pins = <MX8MP_IOMUXC_NAND_DATA02__GPIO3_IO08 0x1C0>; /* MCU_INT# */
+ };
+
+ /* SMARC LCD1_BKLT_PWM */
+ pinctrl_lcd1_bklt_pwm1: pwm1grp {
+ fsl,pins = <MX8MP_IOMUXC_SPDIF_EXT_CLK__PWM1_OUT 0x12>; /* SMARC S122 - LCD1_BKLT_PWM */
+ };
+
+ /* SMARC LCD0_BKLT_PWM */
+ pinctrl_lcd0_bklt_pwm2: pwm2grp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO11__PWM2_OUT 0x12>; /* SMARC S141 - LCD0_BKLT_PWM */
+ };
+
+ /* PCAL6408 Interrupt */
+ pinctrl_pcal6408: pcal6408intgrp {
+ fsl,pins = <MX8MP_IOMUXC_NAND_DATA03__GPIO3_IO09 0x1c4>; /* GPIO_EX_INT# */
+ };
+
+ /* SMARC PCIE_A */
+ pinctrl_pcie: pciegrp {
+ fsl,pins = <MX8MP_IOMUXC_SAI1_RXFS__GPIO4_IO00 0x1c0>, /* SMARC S146 - PCIE_WAKE# */
+ <MX8MP_IOMUXC_SAI1_TXD7__GPIO4_IO19 0x04>; /* SMARC P75 - PCIE_A_RST# */
+ };
+
+ /* PMIC Interrupt */
+ pinctrl_pmic: pmicintgrp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO03__GPIO1_IO03 0x1c4>; /* PMIC_INT# */
+ };
+
+ /* SMARC I2S0 */
+ pinctrl_sai1: sai1grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI5_MCLK__AUDIOMIX_SAI1_TX_BCLK 0x94>, /* SMARC S42 - I2S0_CK */
+ <MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_SAI1_TX_SYNC 0x94>, /* SMARC S39 - I2S0_LRCLK */
+ <MX8MP_IOMUXC_SAI1_RXD0__AUDIOMIX_SAI1_RX_DATA00 0x94>, /* SMARC S41 - I2S0_SDIN */
+ <MX8MP_IOMUXC_SAI5_RXFS__AUDIOMIX_SAI1_TX_DATA00 0x94>; /* SMARC S40 - I2S0_SDOUT */
+ };
+
+ /* SMARC AUDIO_MCK */
+ pinctrl_sai1_mclk: sai1mclkgrp {
+ fsl,pins = <MX8MP_IOMUXC_SAI1_MCLK__AUDIOMIX_SAI1_MCLK 0x96>; /* SMARC S38 - AUDIO_MCK */
+ };
+
+ /* SMARC I2S2 */
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI3_RXD__AUDIOMIX_SAI3_RX_DATA00 0x94>, /* SMARC S52 - I2S2_SDIN */
+ <MX8MP_IOMUXC_SAI3_TXC__AUDIOMIX_SAI3_TX_BCLK 0x94>, /* SMARC S53 - I2S2_CK */
+ <MX8MP_IOMUXC_SAI3_TXD__AUDIOMIX_SAI3_TX_DATA00 0x94>, /* SMARC S51 - I2S2_SDOUT */
+ <MX8MP_IOMUXC_SAI3_TXFS__AUDIOMIX_SAI3_TX_SYNC 0x94>; /* SMARC S50 - I2S2_LRCLK */
+ };
+
+ /* SMARC SLEEP# */
+ pinctrl_sleep: sleepgrp {
+ fsl,pins = <MX8MP_IOMUXC_NAND_CE0_B__GPIO3_IO01 0x1C0>; /* SMARC S149 - SLEEP# */
+ };
+
+ /* SMARC SMB_ALERT# */
+ pinctrl_smb_alert: smbalertgrp {
+ fsl,pins = <MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16 0x1C0>; /* SMARC P1 - SMB_ALERT# */
+ };
+
+ /* TPM_CS# */
+ pinctrl_tpm_cs: tpmcsgrp {
+ fsl,pins = <MX8MP_IOMUXC_NAND_DATA00__GPIO3_IO06 0x82>; /* TPM_CS# */
+ };
+
+ /* WIFI_BT_WKUP_HOST/TPM_INT# */
+ pinctrl_tpm_irq_wifi_bt_wkup: tpmirq-wifibtwkupgrp {
+ fsl,pins = <MX8MP_IOMUXC_SAI1_RXD2__GPIO4_IO04 0x16>; /* WIFI_BT_WKUP_HOST/TPM_INT# */
+ };
+
+ /* SMARC SER0 */
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI2_RXD0__UART1_DCE_RTS 0x1c4>, /* SMARC P132 - SER2_CTS */
+ <MX8MP_IOMUXC_SAI2_TXFS__UART1_DCE_CTS 0x1c4>, /* SMARC P131 - SER2_RTS */
+ <MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX 0x1c4>, /* SMARC P130 - SER2_RX */
+ <MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX 0x1c4>; /* SMARC P139 - SER2_TX */
+ };
+
+ /* SMARC SER2 */
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <MX8MP_IOMUXC_SD1_DATA4__UART2_DCE_RTS 0x1c4>, /* SMARC P139 - SER2_CTS */
+ <MX8MP_IOMUXC_SD1_DATA5__UART2_DCE_CTS 0x1c4>, /* SMARC P138 - SER2_RTS */
+ <MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x1c4>, /* SMARC P137 - SER2_RX */
+ <MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x1c4>; /* SMARC P136 - SER2_TX */
+ };
+
+ /* SMARC SER3 */
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <MX8MP_IOMUXC_UART3_RXD__UART3_DCE_RX 0x1c4>, /* SMARC P141 - SER3_RX */
+ <MX8MP_IOMUXC_UART3_TXD__UART3_DCE_TX 0x1c4>; /* SMARC P140 - SER3_TX */
+ };
+
+ /* SMARC SER1 */
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <MX8MP_IOMUXC_UART4_RXD__UART4_DCE_RX 0x1c4>, /* SMARC P135 - SER1_RX */
+ <MX8MP_IOMUXC_UART4_TXD__UART4_DCE_TX 0x1c4>; /* SMARC P134 - SER1_TX */
+ };
+
+ /* SMARC USB0_OTG_ID */
+ pinctrl_usb0_id: usb0idgrp {
+ fsl,pins = <MX8MP_IOMUXC_SAI3_MCLK__GPIO5_IO02 0x1c4>; /* SMARC P64 - USB0_OTG_ID */
+ };
+
+ /* SMARC USB0_EN_OC# */
+ pinctrl_usb0_en_oc: usb0enocgrp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO12__GPIO1_IO12 0x04>; /* SMARC P62 - USB0_EN_OC# */
+ };
+
+ /* On module USB Hub VBUS, or SMARC USB2_EN_OC# depending on assembling */
+ pinctrl_usb1_en_oc: usb1enocgrp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO14__GPIO1_IO14 0x04>; /* SMARC P71 - USB2_EN_OC# */
+ };
+
+ /* On-module Wi-Fi */
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x190>, /* WiFi_SDIO_CLK */
+ <MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x1d0>, /* WiFi_SDIO_CMD */
+ <MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x1d0>, /* WiFi_SDIO_DATA0 */
+ <MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x1d0>, /* WiFi_SDIO_DATA1 */
+ <MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x1d0>, /* WiFi_SDIO_DATA2 */
+ <MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x1d0>; /* WiFi_SDIO_DATA3 */
+ };
+
+ /* On-module Wi-Fi */
+ pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+ fsl,pins = <MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x194>, /* WiFi_SDIO_CLK */
+ <MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x1d4>, /* WiFi_SDIO_CMD */
+ <MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x1d4>, /* WiFi_SDIO_DATA0 */
+ <MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x1d4>, /* WiFi_SDIO_DATA1 */
+ <MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x1d4>, /* WiFi_SDIO_DATA2 */
+ <MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x1d4>; /* WiFi_SDIO_DATA3 */
+ };
+
+ /* On-module Wi-Fi */
+ pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+ fsl,pins = <MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x196>, /* WiFi_SDIO_CLK */
+ <MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x1d6>, /* WiFi_SDIO_CMD */
+ <MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x1d6>, /* WiFi_SDIO_DATA0 */
+ <MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x1d6>, /* WiFi_SDIO_DATA1 */
+ <MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x1d6>, /* WiFi_SDIO_DATA2 */
+ <MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x1d6>; /* WiFi_SDIO_DATA3 */
+ };
+
+ /* SMARC SDIO */
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190>, /* SMARC P36 - SDIO_CK */
+ <MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d0>, /* SMARC P34 - SDIO_CMD */
+ <MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d0>, /* SMARC P39 - SDIO_DO */
+ <MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0>, /* SMARC P40 - SDIO_D1 */
+ <MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0>, /* SMARC P41 - SDIO_D2 */
+ <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0>; /* SMARC P42 - SDIO_D3 */
+ };
+
+ /* SMARC SDIO 100MHz */
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194>, /* SMARC P36 - SDIO_CK */
+ <MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4>, /* SMARC P34 - SDIO_CMD */
+ <MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4>, /* SMARC P39 - SDIO_DO */
+ <MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4>, /* SMARC P40 - SDIO_D1 */
+ <MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4>, /* SMARC P41 - SDIO_D2 */
+ <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4>; /* SMARC P42 - SDIO_D3 */
+ };
+
+ /* SMARC SDIO 200MHz */
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x196>, /* SMARC P36 - SDIO_CK */
+ <MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d6>, /* SMARC P34 - SDIO_CMD */
+ <MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d6>, /* SMARC P39 - SDIO_DO */
+ <MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6>, /* SMARC P40 - SDIO_D1 */
+ <MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6>, /* SMARC P41 - SDIO_D2 */
+ <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6>; /* SMARC P42 - SDIO_D3 */
+ };
+
+ /* SMARC SDIO_CD# */
+ pinctrl_usdhc2_cd: usdhc2cdgrp {
+ fsl,pins = <MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0x1c4>; /* SMARC P35 - SDIO_CD# */
+ };
+
+ /* SMARC SDIO_CD# */
+ pinctrl_usdhc2_cd_sleep: usdhc2cdslpgrp {
+ fsl,pins = <MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0x0>; /* SMARC P35 - SDIO_CD# */
+ };
+
+ /* SMARC SDIO_PWR_EN */
+ pinctrl_usdhc2_pwr_en: usdhc2pwrengrp {
+ fsl,pins = <MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x1c4>; /* SMARC P37 - SDIO_PWR_EN */
+ };
+
+ /* SMARC SDIO Sleep - Avoid backfeeding with removed card power */
+ pinctrl_usdhc2_sleep: usdhc2slpgrp {
+ fsl,pins = <MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x100>, /* SMARC P36 - SDIO_CK */
+ <MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x100>, /* SMARC P34 - SDIO_CMD */
+ <MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x100>, /* SMARC P39 - SDIO_DO */
+ <MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x100>, /* SMARC P39 - SDIO_D1 */
+ <MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x100>, /* SMARC P39 - SDIO_D2 */
+ <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x100>; /* SMARC P39 - SDIO_D3 */
+ };
+
+ pinctrl_usdhc2_vsel: usdhc2vselgrp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO04__GPIO1_IO04 0x4>; /* PMIC_USDHC_VSELECT */
+ };
+
+ /* SMARC SDIO_WP */
+ pinctrl_usdhc2_wp: usdhc2wpgrp {
+ fsl,pins = <MX8MP_IOMUXC_SD2_WP__GPIO2_IO20 0x144>; /* SMARC P33 - SDIO_WP */
+ };
+
+ /* On-module eMMC */
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x190>, /* eMMC_STROBE */
+ <MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d0>, /* eMMC_DATA5 */
+ <MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d0>, /* eMMC_DATA6 */
+ <MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d0>, /* eMMC_DATA7 */
+ <MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d0>, /* eMMC_DATA0 */
+ <MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d0>, /* eMMC_DATA1 */
+ <MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d0>, /* eMMC_DATA2 */
+ <MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d0>, /* eMMC_DATA3 */
+ <MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d0>, /* eMMC_DATA4 */
+ <MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x190>, /* eMMC_CLK */
+ <MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d0>; /* eMMC_CMD */
+ };
+
+ /* On-module eMMC */
+ pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp {
+ fsl,pins = <MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x194>, /* eMMC_STROBE */
+ <MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d4>, /* eMMC_DATA5 */
+ <MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d4>, /* eMMC_DATA6 */
+ <MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d4>, /* eMMC_DATA7 */
+ <MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d4>, /* eMMC_DATA0 */
+ <MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d4>, /* eMMC_DATA1 */
+ <MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d4>, /* eMMC_DATA2 */
+ <MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d4>, /* eMMC_DATA3 */
+ <MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d4>, /* eMMC_DATA4 */
+ <MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x194>, /* eMMC_CLK */
+ <MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d4>; /* eMMC_CMD */
+ };
+
+ /* On-module eMMC */
+ pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp {
+ fsl,pins = <MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x196>, /* eMMC_STROBE */
+ <MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d2>, /* eMMC_DATA5 */
+ <MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d2>, /* eMMC_DATA6 */
+ <MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d2>, /* eMMC_DATA7 */
+ <MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d2>, /* eMMC_DATA0 */
+ <MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d2>, /* eMMC_DATA1 */
+ <MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d2>, /* eMMC_DATA2 */
+ <MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d2>, /* eMMC_DATA3 */
+ <MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d2>, /* eMMC_DATA4 */
+ <MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x196>, /* eMMC_CLK */
+ <MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d6>; /* eMMC_CMD */
+ };
+
+ /* SoC Watchdog */
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO02__WDOG1_WDOG_B 0x4>; /* CTRL_SOC_WDOG */
+ };
+
+ /* On-module Wi-Fi power enable */
+ pinctrl_wifi_pwr_en: wifipwrengrp {
+ fsl,pins = <MX8MP_IOMUXC_NAND_DQS__GPIO3_IO14 0x104>; /* CTRL_EN_WIFI */
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mp-ras314-imx219.dtso b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mp-ras314-imx219.dtso
new file mode 100644
index 000000000000..e5a2b3780215
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mp-ras314-imx219.dtso
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2022-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
+ */
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/media/video-interfaces.h>
+
+#include "imx8mp-pinfunc.h"
+
+&{/} {
+ /*
+ * The three camera regulators are controlled by a single GPIO. Declare
+ * a single regulator for the three supplies.
+ */
+ reg_cam: regulator-cam {
+ compatible = "regulator-fixed";
+ regulator-name = "reg_cam";
+ /* pad muxing already done in gpio2grp */
+ gpio = <&gpio2 6 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_vcc_3v3>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ cam24m: clock-cam24m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "cam24m";
+ };
+};
+
+&i2c2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ camera@10 {
+ compatible = "sony,imx219";
+ reg = <0x10>;
+ clocks = <&cam24m>;
+ VANA-supply = <&reg_cam>;
+ VDIG-supply = <&reg_cam>;
+ VDDL-supply = <&reg_cam>;
+ orientation = <2>;
+ rotation = <0>;
+
+ port {
+ sony_imx219: endpoint {
+ remote-endpoint = <&imx8mp_mipi_csi_in>;
+ clock-lanes = <0>;
+ clock-noncontinuous;
+ data-lanes = <1 2>;
+ link-frequencies = /bits/ 64 <456000000>;
+ };
+ };
+ };
+};
+
+&isi_0 {
+ status = "disabled";
+
+ ports {
+ port@0 {
+ /delete-node/ endpoint;
+ };
+ };
+};
+
+&isp_0 {
+ status = "okay";
+
+ ports {
+ port@1 {
+ isp0_in: endpoint {
+ bus-type = <MEDIA_BUS_TYPE_PARALLEL>;
+ remote-endpoint = <&mipi_csi_0_out>;
+ };
+ };
+ };
+};
+
+&mipi_csi_0 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ imx8mp_mipi_csi_in: endpoint {
+ remote-endpoint = <&sony_imx219>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+};
+
+&mipi_csi_0_out {
+ remote-endpoint = <&isp0_in>;
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi
index b2ac2583a592..b59da91fdd04 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi
@@ -35,7 +35,6 @@
<0x1 0x00000000 0 0xc0000000>;
};
-
reg_usdhc2_vmmc: regulator-usdhc2-vmmc {
compatible = "regulator-fixed";
regulator-name = "VSD_3V3";
@@ -46,6 +45,16 @@
startup-delay-us = <100>;
off-on-delay-us = <12000>;
};
+
+ reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
+ compatible = "regulator-gpio";
+ regulator-name = "VSD_VSEL";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ gpios = <&gpio2 12 GPIO_ACTIVE_HIGH>;
+ states = <3300000 0x0 1800000 0x1>;
+ vin-supply = <&ldo5>;
+ };
};
&A53_0 {
@@ -205,6 +214,7 @@
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
cd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
vmmc-supply = <&reg_usdhc2_vmmc>;
+ vqmmc-supply = <&reg_usdhc2_vqmmc>;
bus-width = <4>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
index e3869efe4fd0..d43ba0087126 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
@@ -657,7 +657,7 @@
};
eeprom@50 {
- compatible = "st,24c02";
+ compatible = "st,24c02", "atmel,24c02";
pagesize = <16>;
reg = <0x50>;
};
@@ -770,7 +770,7 @@
/* EEPROM on display adapter (MIPI DSI Display Adapter) */
eeprom_display_adapter: eeprom@50 {
- compatible = "st,24c02";
+ compatible = "st,24c02", "atmel,24c02";
pagesize = <16>;
reg = <0x50>;
status = "disabled";
@@ -778,7 +778,7 @@
/* EEPROM on carrier board */
eeprom_carrier_board: eeprom@57 {
- compatible = "st,24c02";
+ compatible = "st,24c02", "atmel,24c02";
pagesize = <16>;
reg = <0x57>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index ce6793b2d57e..948b88cf5e9d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/clock/imx8mp-clock.h>
#include <dt-bindings/power/imx8mp-power.h>
#include <dt-bindings/reset/imx8mp-reset.h>
+#include <dt-bindings/reset/imx8mp-reset-audiomix.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interconnect/fsl,imx8mp.h>
@@ -65,7 +66,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0>;
- clock-latency = <61036>;
clocks = <&clk IMX8MP_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -86,7 +86,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1>;
- clock-latency = <61036>;
clocks = <&clk IMX8MP_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -105,7 +104,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x2>;
- clock-latency = <61036>;
clocks = <&clk IMX8MP_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -124,7 +122,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x3>;
- clock-latency = <61036>;
clocks = <&clk IMX8MP_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -280,7 +277,7 @@
ranges;
dsp_reserved: dsp@92400000 {
- reg = <0 0x92400000 0 0x2000000>;
+ reg = <0 0x92400000 0 0x1000000>;
no-map;
status = "disabled";
};
@@ -1252,6 +1249,7 @@
reg = <0x30e60000 0x10000>;
interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <2>;
+ clocks = <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_MU2_ROOT>;
status = "disabled";
};
@@ -1645,6 +1643,12 @@
opp-hz = /bits/ 64 <200000000>;
};
+ /* Nominal drive mode maximum */
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ };
+
+ /* Overdrive mode maximum */
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
};
@@ -2155,7 +2159,7 @@
};
};
- pcie: pcie@33800000 {
+ pcie0: pcie: pcie@33800000 {
compatible = "fsl,imx8mp-pcie";
reg = <0x33800000 0x400000>, <0x1ff00000 0x80000>;
reg-names = "dbi", "config";
@@ -2193,7 +2197,7 @@
status = "disabled";
};
- pcie_ep: pcie-ep@33800000 {
+ pcie0_ep: pcie_ep: pcie-ep@33800000 {
compatible = "fsl,imx8mp-pcie-ep";
reg = <0x33800000 0x100000>,
<0x18000000 0x8000000>,
@@ -2415,13 +2419,19 @@
};
dsp: dsp@3b6e8000 {
- compatible = "fsl,imx8mp-dsp";
+ compatible = "fsl,imx8mp-hifi4";
reg = <0x3b6e8000 0x88000>;
- mbox-names = "txdb0", "txdb1",
- "rxdb0", "rxdb1";
- mboxes = <&mu2 2 0>, <&mu2 2 1>,
- <&mu2 3 0>, <&mu2 3 1>;
- memory-region = <&dsp_reserved>;
+ clocks = <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_DSP_ROOT>,
+ <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_OCRAMA_IPG>,
+ <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_DSP_ROOT>,
+ <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_DSPDBG_ROOT>;
+ clock-names = "ipg", "ocram", "core", "debug";
+ power-domains = <&pgc_audio>;
+ mbox-names = "tx", "rx", "rxdb";
+ mboxes = <&mu2 0 0>, <&mu2 1 0>, <&mu2 3 0>;
+ firmware-name = "imx/dsp/hifi4.bin";
+ resets = <&audio_blk_ctrl IMX8MP_AUDIOMIX_DSP_RUNSTALL>;
+ reset-names = "runstall";
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
index a87d0692c3bb..43e45b0bd0d1 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
@@ -377,6 +377,16 @@
status = "okay";
};
+&pcie0_ep {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie0>;
+ clocks = <&clk IMX8MQ_CLK_PCIE1_ROOT>,
+ <&pcie0_refclk>,
+ <&clk IMX8MQ_CLK_PCIE1_PHY>,
+ <&clk IMX8MQ_CLK_PCIE1_AUX>;
+ status = "disabled";
+};
+
&pcie1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pcie1>;
@@ -390,6 +400,16 @@
status = "okay";
};
+&pcie1_ep {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie1>;
+ clocks = <&clk IMX8MQ_CLK_PCIE2_ROOT>,
+ <&pcie0_refclk>,
+ <&clk IMX8MQ_CLK_PCIE2_PHY>,
+ <&clk IMX8MQ_CLK_PCIE2_AUX>;
+ status = "disabled";
+};
+
&pgc_gpu {
power-supply = <&sw1a_reg>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index d51de8d899b2..c9040d1131a8 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -106,7 +106,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0>;
- clock-latency = <61036>; /* two CLK32 periods */
clocks = <&clk IMX8MQ_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -126,7 +125,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1>;
- clock-latency = <61036>; /* two CLK32 periods */
clocks = <&clk IMX8MQ_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -144,7 +142,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x2>;
- clock-latency = <61036>; /* two CLK32 periods */
clocks = <&clk IMX8MQ_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -162,7 +159,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x3>;
- clock-latency = <61036>; /* two CLK32 periods */
clocks = <&clk IMX8MQ_CLK_ARM>;
enable-method = "psci";
i-cache-size = <0x8000>;
@@ -1774,6 +1770,41 @@
status = "disabled";
};
+ pcie0_ep: pcie-ep@33800000 {
+ compatible = "fsl,imx8mq-pcie-ep";
+ reg = <0x33800000 0x100000>,
+ <0x18000000 0x8000000>,
+ <0x33900000 0x100000>,
+ <0x33b00000 0x100000>;
+ reg-names = "dbi", "addr_space", "dbi2", "atu";
+ num-lanes = <1>;
+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dma";
+ linux,pci-domain = <0>;
+ clocks = <&clk IMX8MQ_CLK_PCIE2_ROOT>,
+ <&clk IMX8MQ_CLK_PCIE2_PHY>,
+ <&clk IMX8MQ_CLK_PCIE2_PHY>,
+ <&clk IMX8MQ_CLK_PCIE2_AUX>;
+ clock-names = "pcie", "pcie_bus", "pcie_phy", "pcie_aux";
+ power-domains = <&pgc_pcie>;
+ resets = <&src IMX8MQ_RESET_PCIEPHY2>,
+ <&src IMX8MQ_RESET_PCIE2_CTRL_APPS_EN>,
+ <&src IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF>;
+ reset-names = "pciephy", "apps", "turnoff";
+ assigned-clocks = <&clk IMX8MQ_CLK_PCIE2_CTRL>,
+ <&clk IMX8MQ_CLK_PCIE2_PHY>,
+ <&clk IMX8MQ_CLK_PCIE2_AUX>;
+ assigned-clock-parents = <&clk IMX8MQ_SYS2_PLL_250M>,
+ <&clk IMX8MQ_SYS2_PLL_100M>,
+ <&clk IMX8MQ_SYS1_PLL_80M>;
+ assigned-clock-rates = <250000000>, <100000000>,
+ <10000000>;
+ num-ib-windows = <4>;
+ num-ob-windows = <4>;
+ fsl,max-link-speed = <2>;
+ status = "disabled";
+ };
+
pcie1: pcie@33c00000 {
compatible = "fsl,imx8mq-pcie";
reg = <0x33c00000 0x400000>,
@@ -1828,6 +1859,7 @@
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "dma";
fsl,max-link-speed = <2>;
+ linux,pci-domain = <1>;
clocks = <&clk IMX8MQ_CLK_PCIE2_ROOT>,
<&clk IMX8MQ_CLK_PCIE2_PHY>,
<&clk IMX8MQ_CLK_PCIE2_PHY>,
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-apalis.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-apalis.dtsi
index c18f57039f6e..f97feee52c81 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-apalis.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm-apalis.dtsi
@@ -22,6 +22,10 @@
phy-mode = "rgmii-rxid";
};
+&hsio_refa_clk {
+ enable-gpios = <&lsio_gpio4 27 GPIO_ACTIVE_HIGH>;
+};
+
/* TODO: Apalis HDMI1 */
/* Apalis I2C2 (DDC) */
@@ -188,12 +192,6 @@
"MXM3_291",
"MXM3_289",
"MXM3_287";
-
- /* Enable pcie root / sata ref clock unconditionally */
- pcie-sata-hog {
- gpios = <27 GPIO_ACTIVE_HIGH>;
- };
-
};
&lsio_gpio5 {
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-hsio.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-hsio.dtsi
index e80f722dbe65..50c0f6b0f0bd 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-hsio.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-hsio.dtsi
@@ -12,7 +12,7 @@
#address-cells = <1>;
#size-cells = <1>;
- pciea: pcie@5f000000 {
+ pcie0: pciea: pcie@5f000000 {
compatible = "fsl,imx8q-pcie";
reg = <0x5f000000 0x10000>,
<0x4ff00000 0x80000>;
@@ -42,7 +42,7 @@
status = "disabled";
};
- pciea_ep: pcie-ep@5f000000 {
+ pcie0_ep: pciea_ep: pcie-ep@5f000000 {
compatible = "fsl,imx8q-pcie-ep";
reg = <0x5f000000 0x00010000>,
<0x40000000 0x10000000>;
@@ -61,7 +61,7 @@
status = "disabled";
};
- pcieb: pcie@5f010000 {
+ pcie1: pcieb: pcie@5f010000 {
compatible = "fsl,imx8q-pcie";
reg = <0x5f010000 0x10000>,
<0x8ff00000 0x80000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek-pcie-ep.dtso b/arch/arm64/boot/dts/freescale/imx8qxp-mek-pcie-ep.dtso
deleted file mode 100644
index 4f562eb5c5b1..000000000000
--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek-pcie-ep.dtso
+++ /dev/null
@@ -1,22 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-/*
- * Copyright 2025 NXP
- */
-
-#include <dt-bindings/phy/phy.h>
-
-/dts-v1/;
-/plugin/;
-
-&pcieb {
- status = "disabled";
-};
-
-&pcieb_ep {
- phys = <&hsio_phy 0 PHY_TYPE_PCIE 0>;
- phy-names = "pcie-phy";
- pinctrl-0 = <&pinctrl_pcieb>;
- pinctrl-names = "default";
- vpcie-supply = <&reg_pcieb>;
- status = "okay";
-};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
index a669a5d500d3..c93d123670bd 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
@@ -40,24 +40,6 @@
reg = <0x00000000 0x80000000 0 0x40000000>;
};
- reserved-memory {
- dsp_vdev0vring0: memory@942f0000 {
- reg = <0 0x942f0000 0 0x8000>;
- no-map;
- };
-
- dsp_vdev0vring1: memory@942f8000 {
- reg = <0 0x942f8000 0 0x8000>;
- no-map;
- };
-
- dsp_vdev0buffer: memory@94300000 {
- compatible = "shared-dma-pool";
- reg = <0 0x94300000 0 0x100000>;
- no-map;
- };
- };
-
reg_usdhc2_vmmc: usdhc2-vmmc {
compatible = "regulator-fixed";
regulator-name = "SD1_SPWR";
@@ -189,6 +171,22 @@
no-map;
};
+ dsp_vdev0vring0: memory@942f0000 {
+ reg = <0 0x942f0000 0 0x8000>;
+ no-map;
+ };
+
+ dsp_vdev0vring1: memory@942f8000 {
+ reg = <0 0x942f8000 0 0x8000>;
+ no-map;
+ };
+
+ dsp_vdev0buffer: memory@94300000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x94300000 0 0x100000>;
+ no-map;
+ };
+
gpu_reserved: memory@880000000 {
no-map;
reg = <0x8 0x80000000 0 0x10000000>;
@@ -539,7 +537,7 @@
status = "okay";
};
-&pcieb {
+&pcie0 {
phys = <&hsio_phy 0 PHY_TYPE_PCIE 0>;
phy-names = "pcie-phy";
pinctrl-0 = <&pinctrl_pcieb>;
@@ -549,6 +547,15 @@
status = "okay";
};
+&pcie0_ep {
+ phys = <&hsio_phy 0 PHY_TYPE_PCIE 0>;
+ phy-names = "pcie-phy";
+ pinctrl-0 = <&pinctrl_pcieb>;
+ pinctrl-names = "default";
+ vpcie-supply = <&reg_pcieb>;
+ status = "disabled";
+};
+
&scu_key {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-ss-hsio.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp-ss-hsio.dtsi
index 47fc6e0cff4a..255b8c91c88c 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-ss-hsio.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-ss-hsio.dtsi
@@ -38,4 +38,10 @@
power-domains = <&pd IMX_SC_R_SERDES_1>;
status = "disabled";
};
+
+ pcie0: pcie@5f010000 {
+ };
+
+ pcie0_ep: pcie-ep@5f010000 {
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-tqma8xqps-mb-smarc-2.dts b/arch/arm64/boot/dts/freescale/imx8qxp-tqma8xqps-mb-smarc-2.dts
new file mode 100644
index 000000000000..3fa9b5aee2c3
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-tqma8xqps-mb-smarc-2.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright (c) 2018-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
+ */
+
+/dts-v1/;
+
+#include "imx8qxp-tqma8xqps.dtsi"
+#include "tqma8xxs-mb-smarc-2.dtsi"
+
+/ {
+ model = "TQ-Systems i.MX8QXP TQMa8XQPS on MB-SMARC-2";
+ compatible = "tq,imx8qxp-tqma8xqps-mb-smarc-2", "tq,imx8qxp-tqma8xqps", "fsl,imx8qxp";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-tqma8xqps.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp-tqma8xqps.dtsi
new file mode 100644
index 000000000000..f008b7a34505
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-tqma8xqps.dtsi
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright (c) 2018-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
+ */
+
+#include "imx8qxp.dtsi"
+#include "tqma8xxs.dtsi"
+
+/ {
+ model = "TQ-Systems i.MX8QXP TQMa8XQPS";
+ compatible = "tq,imx8qxp-tqma8xqps", "fsl,imx8qxp";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi b/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi
index ecb35c6b67f5..e602d147e39b 100644
--- a/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi
@@ -52,6 +52,15 @@
regulator-name = "vref-1v8";
};
+ reg_module_wifi: regulator-module-wifi {
+ compatible = "regulator-fixed";
+ gpio = <&gpio_expander_43 6 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ regulator-name = "Wi-Fi_PDn";
+ startup-delay-us = <2000>;
+ };
+
reg_usbh_vbus: regulator-usbh-vbus {
compatible = "regulator-fixed";
pinctrl-names = "default";
@@ -261,6 +270,16 @@
};
};
+&hsio_phy {
+ fsl,hsio-cfg = "pciea-x2-pcieb";
+ fsl,refclk-pad-mode = "input";
+ status = "okay";
+};
+
+&hsio_refb_clk {
+ enable-gpios = <&gpio_expander_43 3 GPIO_ACTIVE_HIGH>;
+};
+
/* Colibri SPI */
&lpspi2 {
pinctrl-names = "default";
@@ -454,7 +473,15 @@
/* TODO MIPI DSI with DSI-to-HDMI bridge lt8912 */
-/* TODO on-module PCIe for Wi-Fi */
+/* On-module PCIe for Wi-Fi */
+&pcieb {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcieb>;
+ phys = <&hsio_phy 0 PHY_TYPE_PCIE 0>;
+ phy-names = "pcie-phy";
+ reset-gpios = <&lsio_gpio4 0 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
/* On-module I2S */
&sai0 {
diff --git a/arch/arm64/boot/dts/freescale/imx93-phyboard-nash.dts b/arch/arm64/boot/dts/freescale/imx93-phyboard-nash.dts
new file mode 100644
index 000000000000..7e9d031a2f0e
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx93-phyboard-nash.dts
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2025 PHYTEC Messtechnik GmbH
+ * Author: Primoz Fiser <primoz.fiser@norik.com>
+ *
+ * Product homepage:
+ * https://www.phytec.eu/en/produkte/development-kits/phyboard-nash/
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/net/ti-dp83867.h>
+#include "imx93-phycore-som.dtsi"
+
+/ {
+ model = "PHYTEC phyBOARD-Nash-i.MX93";
+ compatible = "phytec,imx93-phyboard-nash", "phytec,imx93-phycore-som",
+ "fsl,imx93";
+
+ aliases {
+ ethernet0 = &fec;
+ ethernet1 = &eqos;
+ rtc0 = &i2c_rtc;
+ rtc1 = &bbnsm_rtc;
+ };
+
+ chosen {
+ stdout-path = &lpuart1;
+ };
+
+ flexcan1_tc: can-phy0 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <8000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1_tc>;
+ standby-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ gpio = <&gpio3 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
+ regulator-name = "VCC_SD";
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ };
+
+ reg_vcc_1v8: regulator-vcc-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC1V8";
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ };
+
+ reg_vref_1v8: regulator-adc-vref {
+ compatible = "regulator-fixed";
+ regulator-name = "VREF_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+};
+
+/* ADC */
+&adc1 {
+ vref-supply = <&reg_vref_1v8>;
+ status = "okay";
+};
+
+/* Ethernet */
+&eqos {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy2>;
+ status = "okay";
+};
+
+&mdio {
+ ethphy2: ethernet-phy@2 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <2>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_1_75_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ };
+};
+
+/* CAN */
+&flexcan1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ phys = <&flexcan1_tc>;
+ status = "okay";
+};
+
+/* I2C2 */
+&lpi2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c2>;
+ status = "okay";
+
+ /* RTC */
+ i2c_rtc: rtc@52 {
+ compatible = "microcrystal,rv3028";
+ reg = <0x52>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rtc>;
+ trickle-resistor-ohms = <3000>;
+ wakeup-source;
+ };
+
+ /* EEPROM */
+ eeprom@54 {
+ compatible = "atmel,24c32";
+ reg = <0x54>;
+ pagesize = <32>;
+ vcc-supply = <&reg_vcc_1v8>;
+ };
+};
+
+/* SPI6 */
+&lpspi6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpspi6>;
+ cs-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ /* TPM */
+ tpm@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ reg = <0>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpm>;
+ spi-max-frequency = <10000000>;
+ };
+};
+
+/* Console */
+&lpuart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+/* RS-232/RS-485 */
+&lpuart7 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart7>;
+ status = "okay";
+};
+
+/* USB */
+&usbotg1 {
+ disable-over-current;
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usbotg2 {
+ disable-over-current;
+ dr_mode = "host";
+ status = "okay";
+};
+
+/* SD-Card */
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2_default>, <&pinctrl_usdhc2_cd>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_cd>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_cd>;
+ cd-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ bus-width = <4>;
+ disable-wp;
+ no-mmc;
+ no-sdio;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_eqos: eqosgrp {
+ fsl,pins = <
+ MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x57e
+ MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x57e
+ MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x57e
+ MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x57e
+ MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x5fe
+ MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x57e
+ MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x51e
+ MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x51e
+ MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x50e
+ MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x50e
+ MX93_PAD_ENET1_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x58e
+ MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x50e
+ MX93_PAD_CCM_CLKO1__GPIO3_IO26 0x1002
+ >;
+ };
+
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = <
+ MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x139e
+ MX93_PAD_PDM_CLK__CAN1_TX 0x1382
+ >;
+ };
+
+ pinctrl_flexcan1_tc: flexcan1tcgrp {
+ fsl,pins = <
+ MX93_PAD_ENET2_TD3__GPIO4_IO16 0x31e
+ >;
+ };
+
+ pinctrl_lpi2c2: lpi2c2grp {
+ fsl,pins = <
+ MX93_PAD_I2C2_SCL__LPI2C2_SCL 0x40000b9e
+ MX93_PAD_I2C2_SDA__LPI2C2_SDA 0x40000b9e
+ >;
+ };
+
+ pinctrl_lpspi6: lpspi6grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO00__GPIO2_IO00 0x386
+ MX93_PAD_GPIO_IO01__LPSPI6_SIN 0x3fe
+ MX93_PAD_GPIO_IO02__LPSPI6_SOUT 0x386
+ MX93_PAD_GPIO_IO03__LPSPI6_SCK 0x386
+ >;
+ };
+
+ pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_RESET_B__GPIO3_IO07 0x31e
+ >;
+ };
+
+ pinctrl_rtc: rtcgrp {
+ fsl,pins = <
+ MX93_PAD_ENET2_RD2__GPIO4_IO26 0x31e
+ >;
+ };
+
+ pinctrl_tpm: tpmgrp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO17__GPIO2_IO17 0x31e
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX93_PAD_UART1_RXD__LPUART1_RX 0x31e
+ MX93_PAD_UART1_TXD__LPUART1_TX 0x30e
+ >;
+ };
+
+ pinctrl_uart7: uart7grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO08__LPUART7_TX 0x30e
+ MX93_PAD_GPIO_IO09__LPUART7_RX 0x31e
+ MX93_PAD_GPIO_IO10__LPUART7_CTS_B 0x31e
+ MX93_PAD_GPIO_IO11__LPUART7_RTS_B 0x31e
+ >;
+ };
+
+ pinctrl_usdhc2_cd: usdhc2cdgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CD_B__GPIO3_IO00 0x31e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc2_default: usdhc2grp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x159e
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000178e
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x40001386
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x40001386
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x40001386
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000138e
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x159e
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000139e
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000139e
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000139e
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x400013be
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x159e
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000139e
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000139e
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000139e
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000139e
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx93-phyboard-segin.dts b/arch/arm64/boot/dts/freescale/imx93-phyboard-segin.dts
index 85fb188b057f..0c55b749c834 100644
--- a/arch/arm64/boot/dts/freescale/imx93-phyboard-segin.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-phyboard-segin.dts
@@ -17,10 +17,38 @@
compatible = "phytec,imx93-phyboard-segin", "phytec,imx93-phycore-som",
"fsl,imx93";
+ aliases {
+ rtc0 = &i2c_rtc;
+ rtc1 = &bbnsm_rtc;
+ };
+
chosen {
stdout-path = &lpuart1;
};
+ flexcan1_tc: can-phy0 {
+ compatible = "ti,tcan1043";
+ #phy-cells = <0>;
+ max-bitrate = <1000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1_tc>;
+ enable-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
+ };
+
+ reg_sound_1v8: regulator-sound-1v8 {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "VCC1V8_AUDIO";
+ };
+
+ reg_sound_3v3: regulator-sound-3v3 {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "VCC3V3_ANALOG";
+ };
+
reg_usdhc2_vmmc: regulator-usdhc2 {
compatible = "regulator-fixed";
enable-active-high;
@@ -31,6 +59,93 @@
regulator-max-microvolt = <3300000>;
regulator-name = "VCC_SD";
};
+
+ sound: sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "phyBOARD-Segin-TLV320AIC3007";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&dailink_master>;
+ simple-audio-card,frame-master = <&dailink_master>;
+ simple-audio-card,widgets =
+ "Line", "Line In",
+ "Line", "Line Out",
+ "Speaker", "Speaker";
+ simple-audio-card,routing =
+ "Line Out", "LLOUT",
+ "Line Out", "RLOUT",
+ "Speaker", "SPOP",
+ "Speaker", "SPOM",
+ "LINE1L", "Line In",
+ "LINE1R", "Line In";
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai1>;
+ };
+
+ dailink_master: simple-audio-card,codec {
+ sound-dai = <&audio_codec>;
+ clocks = <&clk IMX93_CLK_SAI1>;
+ };
+ };
+};
+
+/* Ethernet */
+&eqos {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos>;
+ phy-mode = "rmii";
+ phy-handle = <&ethphy2>;
+ assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>,
+ <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
+ assigned-clock-rates = <100000000>, <50000000>;
+ status = "okay";
+};
+
+&mdio {
+ ethphy2: ethernet-phy@2 {
+ compatible = "ethernet-phy-id0022.1561";
+ reg = <2>;
+ clocks = <&clk IMX93_CLK_ENET_REF_PHY>;
+ clock-names = "rmii-ref";
+ micrel,led-mode = <1>;
+ };
+};
+
+/* CAN */
+&flexcan1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ phys = <&flexcan1_tc>;
+ status = "okay";
+};
+
+/* I2C2 */
+&lpi2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c2>;
+ status = "okay";
+
+ /* Codec */
+ audio_codec: audio-codec@18 {
+ compatible = "ti,tlv320aic3007";
+ reg = <0x18>;
+ #sound-dai-cells = <0>;
+ AVDD-supply = <&reg_sound_3v3>;
+ IOVDD-supply = <&reg_sound_3v3>;
+ DRVDD-supply = <&reg_sound_3v3>;
+ DVDD-supply = <&reg_sound_1v8>;
+ };
+
+ /* RTC */
+ i2c_rtc: rtc@68 {
+ compatible = "microcrystal,rv4162";
+ reg = <0x68>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rtc>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+ };
};
/* Console */
@@ -40,9 +155,28 @@
status = "okay";
};
-/* eMMC */
-&usdhc1 {
- no-1-8-v;
+/* Audio */
+&sai1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai1>;
+ assigned-clocks = <&clk IMX93_CLK_SAI1>;
+ assigned-clock-parents = <&clk IMX93_CLK_AUDIO_PLL>;
+ assigned-clock-rates = <19200000>;
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
+/* USB */
+&usbotg1 {
+ disable-over-current;
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usbotg2 {
+ disable-over-current;
+ dr_mode = "host";
+ status = "okay";
};
/* SD-Card */
@@ -53,6 +187,7 @@
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_cd>;
bus-width = <4>;
cd-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
+ disable-wp;
no-mmc;
no-sdio;
vmmc-supply = <&reg_usdhc2_vmmc>;
@@ -60,10 +195,36 @@
};
&iomuxc {
- pinctrl_uart1: uart1grp {
+ pinctrl_eqos: eqosgrp {
fsl,pins = <
- MX93_PAD_UART1_RXD__LPUART1_RX 0x31e
- MX93_PAD_UART1_TXD__LPUART1_TX 0x30e
+ MX93_PAD_ENET1_TD2__CCM_ENET_QOS_CLOCK_GENERATE_REF_CLK 0x4000050e
+ MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x57e
+ MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x57e
+ MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x50e
+ MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x50e
+ MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x57e
+ MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x50e
+ MX93_PAD_ENET1_RXC__ENET_QOS_RX_ER 0x57e
+ >;
+ };
+
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = <
+ MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x139e
+ MX93_PAD_PDM_CLK__CAN1_TX 0x139e
+ >;
+ };
+
+ pinctrl_flexcan1_tc: flexcan1tcgrp {
+ fsl,pins = <
+ MX93_PAD_ENET2_TD3__GPIO4_IO16 0x31e
+ >;
+ };
+
+ pinctrl_lpi2c2: lpi2c2grp {
+ fsl,pins = <
+ MX93_PAD_I2C2_SCL__LPI2C2_SCL 0x40000b9e
+ MX93_PAD_I2C2_SDA__LPI2C2_SDA 0x40000b9e
>;
};
@@ -73,45 +234,71 @@
>;
};
+ pinctrl_rtc: rtcgrp {
+ fsl,pins = <
+ MX93_PAD_ENET2_RD2__GPIO4_IO26 0x31e
+ >;
+ };
+
+ pinctrl_sai1: sai1grp {
+ fsl,pins = <
+ MX93_PAD_UART2_RXD__SAI1_MCLK 0x1202
+ MX93_PAD_SAI1_TXFS__SAI1_TX_SYNC 0x1202
+ MX93_PAD_SAI1_TXC__SAI1_TX_BCLK 0x1202
+ MX93_PAD_SAI1_TXD0__SAI1_TX_DATA00 0x1402
+ MX93_PAD_SAI1_RXD0__SAI1_RX_DATA00 0x1402
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX93_PAD_UART1_RXD__LPUART1_RX 0x31e
+ MX93_PAD_UART1_TXD__LPUART1_TX 0x30e
+ >;
+ };
+
pinctrl_usdhc2_cd: usdhc2cdgrp {
fsl,pins = <
MX93_PAD_SD2_CD_B__GPIO3_IO00 0x31e
>;
};
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
pinctrl_usdhc2_default: usdhc2grp {
fsl,pins = <
- MX93_PAD_SD2_CLK__USDHC2_CLK 0x179e
- MX93_PAD_SD2_CMD__USDHC2_CMD 0x139e
- MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x138e
- MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x138e
- MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x138e
- MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x139e
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x159e
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000138e
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000138e
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000139e
MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
>;
};
- pinctrl_usdhc2_100mhz: usdhc2grp {
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
fsl,pins = <
- MX93_PAD_SD2_CLK__USDHC2_CLK 0x179e
- MX93_PAD_SD2_CMD__USDHC2_CMD 0x139e
- MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x138e
- MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x138e
- MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x139e
- MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x139e
- MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x159e
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000138e
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000139e
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000139e
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
>;
};
- pinctrl_usdhc2_200mhz: usdhc2grp {
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
fsl,pins = <
- MX93_PAD_SD2_CLK__USDHC2_CLK 0x178e
- MX93_PAD_SD2_CMD__USDHC2_CMD 0x139e
- MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x139e
- MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x139e
- MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x139e
- MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x139e
- MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x158e
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000139e
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000139e
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000139e
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000139e
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx93-phycore-som.dtsi
index 88c2657b50e6..22dbcc89e311 100644
--- a/arch/arm64/boot/dts/freescale/imx93-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx93-phycore-som.dtsi
@@ -72,12 +72,107 @@
};
};
+/* I2C3 */
+&lpi2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c3>;
+ status = "okay";
+
+ pmic@25 {
+ compatible = "nxp,pca9451a";
+ reg = <0x25>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ buck1: BUCK1 {
+ regulator-name = "VDD_SOC";
+ regulator-min-microvolt = <610000>;
+ regulator-max-microvolt = <950000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck2: BUCK2 {
+ regulator-name = "VDDQ_0V6";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <600000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck4: BUCK4 {
+ regulator-name = "VDD_3V3_BUCK";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck5: BUCK5 {
+ regulator-name = "VDD_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck6: BUCK6 {
+ regulator-name = "VDD_1V1";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1: LDO1 {
+ regulator-name = "PMIC_SNVS_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4: LDO4 {
+ regulator-name = "VDD_0V8";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo5: LDO5 {
+ regulator-name = "NVCC_SD2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+
+ /* EEPROM */
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ pagesize = <32>;
+ vcc-supply = <&buck4>;
+ };
+};
+
/* eMMC */
&usdhc1 {
- pinctrl-names = "default";
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
bus-width = <8>;
non-removable;
+ no-1-8-v;
status = "okay";
};
@@ -104,22 +199,70 @@
pinctrl_leds: ledsgrp {
fsl,pins = <
- MX93_PAD_I2C1_SDA__GPIO1_IO01 0x31e
+ MX93_PAD_I2C1_SDA__GPIO1_IO01 0x11e
>;
};
+ pinctrl_lpi2c3: lpi2c3grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x40000b9e
+ MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x40000b9e
+ >;
+ };
+
+ pinctrl_pmic: pmicgrp {
+ fsl,pins = <
+ MX93_PAD_ENET2_RD3__GPIO4_IO27 0x31e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
MX93_PAD_SD1_CLK__USDHC1_CLK 0x179e
- MX93_PAD_SD1_CMD__USDHC1_CMD 0x1386
- MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x138e
- MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x1386
- MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x138e
- MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x1386
- MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x1386
- MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x1386
- MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x1386
- MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x1386
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x40001386
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x4000138e
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x40001386
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x4000138e
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x40001386
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x40001386
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x40001386
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x40001386
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x40001386
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x179e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x17be
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x4000139e
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x4000138e
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x4000139e
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x400013be
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x4000139e
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x4000139e
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x4000139e
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x4000139e
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x4000139e
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x179e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x17be
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x4000139e
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x4000139e
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x400013be
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x400013be
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x400013be
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x400013be
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x400013be
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x400013be
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x400013be
MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x179e
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba91xxca.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba91xxca.dts
new file mode 100644
index 000000000000..9dbf41cf394b
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba91xxca.dts
@@ -0,0 +1,749 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright (c) 2022-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Markus Niebel
+ * Author: Alexander Stein
+ */
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/net/ti-dp83867.h>
+#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/usb/pd.h>
+#include "imx93-tqma9352.dtsi"
+
+/{
+ model = "TQ-Systems i.MX93 TQMa93xxLA/TQMa93xxCA on MBa91xxCA starter kit";
+ compatible = "tq,imx93-tqma9352-mba91xxca", "tq,imx93-tqma9352", "fsl,imx93";
+ chassis-type = "embedded";
+
+ chosen {
+ stdout-path = &lpuart1;
+ };
+
+ aliases {
+ eeprom0 = &eeprom0;
+ ethernet0 = &eqos;
+ ethernet1 = &fec;
+ rtc0 = &pcf85063;
+ rtc1 = &bbnsm_rtc;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&tpm2 2 5000000 0>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <7>;
+ power-supply = <&reg_12v0>;
+ enable-gpios = <&expander2 2 GPIO_ACTIVE_HIGH>;
+ status = "disabled";
+ };
+
+ display: display {
+ /*
+ * Display is not fixed, so compatible has to be added from
+ * DT overlay
+ */
+ power-supply = <&reg_3v3>;
+ enable-gpios = <&expander2 1 GPIO_ACTIVE_HIGH>;
+ backlight = <&backlight>;
+ status = "disabled";
+
+ port {
+ panel_in: endpoint {
+ };
+ };
+ };
+
+ fan0: gpio-fan {
+ compatible = "gpio-fan";
+ gpios = <&expander2 4 GPIO_ACTIVE_HIGH>;
+ gpio-fan,speed-map = <0 0>, <10000 1>;
+ fan-supply = <&reg_12v0>;
+ #cooling-cells = <2>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+
+ switch-a {
+ label = "switcha";
+ linux,code = <BTN_0>;
+ gpios = <&expander0 6 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+
+ switch-b {
+ label = "switchb";
+ linux,code = <BTN_1>;
+ gpios = <&expander0 7 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&expander2 6 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+
+ led-2 {
+ color = <LED_COLOR_ID_AMBER>;
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&expander2 7 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc1 0>, <&adc1 1>, <&adc1 2>, <&adc1 3>;
+ };
+
+ lvds_encoder: lvds-encoder {
+ compatible = "ti,sn75lvds83", "lvds-encoder";
+ powerdown-gpios = <&expander2 3 GPIO_ACTIVE_LOW>;
+ power-supply = <&reg_3v3>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ lvds_encoder_input: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ lvds_encoder_output: endpoint {
+ };
+ };
+ };
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "V_3V3_MB";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_5v0: regulator-5v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "V_5V0_MB";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_12v0: regulator-12v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "V_12V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ gpio = <&expander1 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_mpcie_1v5: regulator-mpcie-1v5 {
+ compatible = "regulator-fixed";
+ regulator-name = "V_1V5_MPCIE";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ gpio = <&expander0 2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ reg_mpcie_3v3: regulator-mpcie-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "V_3V3_MPCIE";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&expander0 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ thermal-zones {
+ cpu-thermal {
+ trips {
+ cpu_active: trip-active0 {
+ temperature = <40000>;
+ hysteresis = <5000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map1 {
+ trip = <&cpu_active>;
+ cooling-device = <&fan0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+};
+
+&adc1 {
+ status = "okay";
+};
+
+&eqos {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy_eqos>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy_eqos: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos_phy>;
+ reset-gpios = <&expander1 0 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <500000>;
+ reset-deassert-us = <50000>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
+ enet-phy-lane-no-swap;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,dp83867-rxctrl-strap-quirk;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ };
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy_fec>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <5000000>;
+
+ ethphy_fec: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec_phy>;
+ reset-gpios = <&expander1 1 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <500000>;
+ reset-deassert-us = <50000>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <27 IRQ_TYPE_EDGE_FALLING>;
+ enet-phy-lane-no-swap;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,dp83867-rxctrl-strap-quirk;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ };
+ };
+};
+
+&flexcan1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ xceiver-supply = <&reg_3v3>;
+ status = "okay";
+};
+
+&gpio1 {
+ gpio-line-names =
+ /* 00 */ "", "", "", "PMIC_IRQ#",
+ /* 04 */ "", "", "", "",
+ /* 08 */ "", "", "USB_C_ALERT#", "BM2_LCD_INT#",
+ /* 12 */ "PEX_INT#", "", "RTC_EVENT#", "",
+ /* 16 */ "", "", "", "",
+ /* 20 */ "", "", "", "",
+ /* 24 */ "", "", "", "",
+ /* 28 */ "", "", "", "";
+};
+
+&gpio2 {
+ gpio-line-names =
+ /* 00 */ "", "", "", "",
+ /* 04 */ "", "", "", "",
+ /* 08 */ "", "", "", "",
+ /* 12 */ "", "", "", "",
+ /* 16 */ "", "", "", "",
+ /* 20 */ "", "", "", "",
+ /* 24 */ "", "", "", "",
+ /* 28 */ "", "", "", "";
+};
+
+&gpio3 {
+ gpio-line-names =
+ /* 00 */ "SD2_CD#", "", "", "",
+ /* 04 */ "", "", "", "SD2_RST#",
+ /* 08 */ "", "", "", "",
+ /* 12 */ "", "", "", "",
+ /* 16 */ "", "", "", "",
+ /* 20 */ "", "", "", "",
+ /* 24 */ "", "", "ENET1_INT#", "ENET2_INT#",
+ /* 28 */ "", "", "", "";
+};
+
+&gpio4 {
+ gpio-line-names =
+ /* 00 */ "", "", "", "",
+ /* 04 */ "", "", "", "",
+ /* 08 */ "", "", "", "",
+ /* 12 */ "", "", "", "",
+ /* 16 */ "", "", "", "",
+ /* 20 */ "", "", "", "",
+ /* 24 */ "", "", "", "",
+ /* 28 */ "", "", "", "";
+};
+
+&lpi2c3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pinctrl_lpi2c3>;
+ pinctrl-1 = <&pinctrl_lpi2c3>;
+ status = "okay";
+
+ temperature-sensor@1c {
+ compatible = "nxp,se97b", "jedec,jc-42.4-temp";
+ reg = <0x1c>;
+ };
+
+ ptn5110: usb-typec@50 {
+ compatible = "nxp,ptn5110", "tcpci";
+ reg = <0x50>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_typec>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <10 IRQ_TYPE_EDGE_FALLING>;
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "X17";
+ power-role = "dual";
+ data-role = "dual";
+ try-power-role = "sink";
+ typec-power-opmode = "default";
+ pd-disable;
+ self-powered;
+
+ port {
+ typec_con_hs: endpoint {
+ remote-endpoint = <&typec_hs>;
+ };
+ };
+ };
+ };
+
+ eeprom2: eeprom@54 {
+ compatible = "nxp,se97b", "atmel,24c02";
+ reg = <0x54>;
+ pagesize = <16>;
+ vcc-supply = <&reg_3v3>;
+ };
+
+ expander0: gpio@70 {
+ compatible = "nxp,pca9538";
+ reg = <0x70>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pexp_irq>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+ vcc-supply = <&reg_3v3>;
+ gpio-line-names = "TEMP_EVENT_MOD#", "MPCIE_WAKE#",
+ "MPCIE_1V5_EN", "MPCIE_3V3_EN",
+ "MPCIE_PERST#", "MPCIE_WDISABLE#",
+ "BUTTON_A#", "BUTTON_B#";
+
+ temp-event-mod-hog {
+ gpio-hog;
+ gpios = <0 GPIO_ACTIVE_LOW>;
+ input;
+ line-name = "TEMP_EVENT_MOD#";
+ };
+
+ mpcie-wake-hog {
+ gpio-hog;
+ gpios = <1 GPIO_ACTIVE_LOW>;
+ input;
+ line-name = "MPCIE_WAKE#";
+ };
+
+ /*
+ * Controls the mPCIE slot reset which is low active as
+ * reset signal. The output-low states, the signal is
+ * inactive, e.g. not in reset
+ */
+ mpcie_rst_hog: mpcie-rst-hog {
+ gpio-hog;
+ gpios = <4 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "MPCIE_PERST#";
+ };
+
+ /*
+ * Controls the mPCIE slot WDISABLE pin which is low active
+ * as disable signal. The output-low states, the signal is
+ * inactive, e.g. not disabled
+ */
+ mpcie_wdisable_hog: mpcie-wdisable-hog {
+ gpio-hog;
+ gpios = <5 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "MPCIE_WDISABLE#";
+ };
+ };
+
+ expander1: gpio@71 {
+ compatible = "nxp,pca9538";
+ reg = <0x71>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ vcc-supply = <&reg_3v3>;
+ gpio-line-names = "ENET1_RESET#", "ENET2_RESET#",
+ "USB_RESET#", "",
+ "WLAN_PD#", "WLAN_W_DISABLE#",
+ "WLAN_PERST#", "12V_EN";
+
+ /*
+ * Controls the WiFi card PD pin which is low active
+ * as power down signal. The output-low states, the signal
+ * is inactive, e.g. not power down
+ */
+ wlan-pd-hog {
+ gpio-hog;
+ gpios = <4 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "WLAN_PD#";
+ };
+
+ /*
+ * Controls the WiFi card disable pin which is low active
+ * as disable signal. The output-low states, the signal
+ * is inactive, e.g. not disabled
+ */
+ wlan-wdisable-hog {
+ gpio-hog;
+ gpios = <5 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "WLAN_W_DISABLE#";
+ };
+
+ /*
+ * Controls the WiFi card reset pin which is low active
+ * as reset signal. The output-low states, the signal
+ * is inactive, e.g. not in reset
+ */
+ wlan-perst-hog {
+ gpio-hog;
+ gpios = <6 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "WLAN_PERST#";
+ };
+ };
+
+ expander2: gpio@72 {
+ compatible = "nxp,pca9538";
+ reg = <0x72>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ vcc-supply = <&reg_3v3>;
+ gpio-line-names = "LCD_RESET#", "LCD_PWR_EN",
+ "LCD_BLT_EN", "LVDS_SHDN#",
+ "FAN_PWR_EN", "",
+ "USER_LED1", "USER_LED2";
+ };
+};
+
+&lpuart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&lpuart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ linux,rs485-enabled-at-boot-time;
+ status = "okay";
+};
+
+&pcf85063 {
+ /* RTC_EVENT# from SoM is connected on mainboard */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcf85063>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <14 IRQ_TYPE_EDGE_FALLING>;
+};
+
+&se97_som {
+ /* TEMP_EVENT# from SoM is connected on mainboard */
+ interrupt-parent = <&expander0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&tpm2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpm2>;
+ status = "okay";
+};
+
+&usbotg1 {
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ usb-role-switch;
+ disable-over-current;
+ samsung,picophy-pre-emp-curr-control = <3>;
+ samsung,picophy-dc-vol-level-adjust = <7>;
+ status = "okay";
+
+ port {
+ typec_hs: endpoint {
+ remote-endpoint = <&typec_con_hs>;
+ };
+ };
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ disable-over-current;
+ samsung,picophy-pre-emp-curr-control = <3>;
+ samsung,picophy-dc-vol-level-adjust = <7>;
+ status = "okay";
+
+ hub_2_0: hub@1 {
+ compatible = "usb424,2517";
+ reg = <1>;
+ reset-gpios = <&expander1 2 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_3v3>;
+ };
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2_hs>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_uhs>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_uhs>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ bus-width = <4>;
+ no-sdio;
+ no-mmc;
+ disable-wp;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_eqos: eqosgrp {
+ fsl,pins = /* PD | FSEL_2 | DSE X4 */
+ <MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x51e>,
+ /* SION | HYS | ODE | FSEL_2 | DSE X4 */
+ <MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000111e>,
+ /* HYS | FSEL_0 | DSE no drive */
+ <MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x1000>,
+ <MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x1000>,
+ <MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x1000>,
+ <MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x1000>,
+ <MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x1000>,
+ /* HYS | PD | FSEL_0 | DSE no drive */
+ <MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x1400>,
+ /* PD | FSEL_2 | DSE X4 */
+ <MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x51e>,
+ <MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x51e>,
+ <MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x51e>,
+ <MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x51e>,
+ <MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x51e>,
+ /* PD | FSEL_3 | DSE X3 */
+ <MX93_PAD_ENET1_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x58e>;
+ };
+
+ pinctrl_eqos_phy: eqosphygrp {
+ fsl,pins = /* HYS | FSEL_0 | DSE no drive */
+ <MX93_PAD_CCM_CLKO1__GPIO3_IO26 0x1000>;
+ };
+
+ pinctrl_fec: fecgrp {
+ fsl,pins = /* PD | FSEL_2 | DSE X4 */
+ <MX93_PAD_ENET2_MDC__ENET1_MDC 0x51e>,
+ /* SION | HYS | ODE | FSEL_2 | DSE X4 */
+ <MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000111e>,
+ /* HYS | FSEL_0 | DSE no drive */
+ <MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x1000>,
+ <MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x1000>,
+ <MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x1000>,
+ <MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x1000>,
+ <MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x1000>,
+ /* HYS | PD | FSEL_0 | DSE no drive */
+ <MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x1400>,
+ /* PD | FSEL_2 | DSE X4 */
+ <MX93_PAD_ENET2_TD0__ENET1_RGMII_TD0 0x51e>,
+ <MX93_PAD_ENET2_TD1__ENET1_RGMII_TD1 0x51e>,
+ <MX93_PAD_ENET2_TD2__ENET1_RGMII_TD2 0x51e>,
+ <MX93_PAD_ENET2_TD3__ENET1_RGMII_TD3 0x51e>,
+ <MX93_PAD_ENET2_TX_CTL__ENET1_RGMII_TX_CTL 0x51e>,
+ /* PD | FSEL_3 | DSE X3 */
+ <MX93_PAD_ENET2_TXC__ENET1_RGMII_TXC 0x58e>;
+ };
+
+ pinctrl_fec_phy: fecphygrp {
+ fsl,pins = /* HYS | FSEL_0 | DSE no drive */
+ <MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x1000>;
+ };
+
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = /* HYS | PU | FSEL_0 | DSE no drive */
+ <MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x1200>,
+ /* PU | FSEL_3 | DSE X4 */
+ <MX93_PAD_PDM_CLK__CAN1_TX 0x039e>;
+ };
+
+ pinctrl_jtag: jtaggrp {
+ fsl,pins = <MX93_PAD_DAP_TCLK_SWCLK__JTAG_MUX_TCK 0x051e>,
+ <MX93_PAD_DAP_TDI__JTAG_MUX_TDI 0x1200>,
+ <MX93_PAD_DAP_TDO_TRACESWO__JTAG_MUX_TDO 0x031e>,
+ <MX93_PAD_DAP_TMS_SWDIO__JTAG_MUX_TMS 0x1200>;
+ };
+
+ pinctrl_lpi2c3: lpi2c3grp {
+ fsl,pins = /* SION | HYS | OD | FSEL_3 | DSE X4 */
+ <MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x4000199e>,
+ <MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x4000199e>;
+ };
+
+ pinctrl_pcf85063: pcf85063grp {
+ fsl,pins = <MX93_PAD_SAI1_RXD0__GPIO1_IO14 0x1000>;
+ };
+
+ pinctrl_pexp_irq: pexpirqgrp {
+ fsl,pins = /* HYS | FSEL_0 | No DSE */
+ <MX93_PAD_SAI1_TXC__GPIO1_IO12 0x1000>;
+ };
+
+ pinctrl_rgbdisp: rgbdispgrp {
+ fsl,pins = <MX93_PAD_GPIO_IO00__MEDIAMIX_DISP_CLK 0x31e>,
+ <MX93_PAD_GPIO_IO01__MEDIAMIX_DISP_DE 0x31e>,
+ <MX93_PAD_GPIO_IO02__MEDIAMIX_DISP_VSYNC 0x31e>,
+ <MX93_PAD_GPIO_IO03__MEDIAMIX_DISP_HSYNC 0x31e>,
+ <MX93_PAD_GPIO_IO04__MEDIAMIX_DISP_DATA00 0x31e>,
+ <MX93_PAD_GPIO_IO05__MEDIAMIX_DISP_DATA01 0x31e>,
+ <MX93_PAD_GPIO_IO06__MEDIAMIX_DISP_DATA02 0x31e>,
+ <MX93_PAD_GPIO_IO07__MEDIAMIX_DISP_DATA03 0x31e>,
+ <MX93_PAD_GPIO_IO08__MEDIAMIX_DISP_DATA04 0x31e>,
+ <MX93_PAD_GPIO_IO09__MEDIAMIX_DISP_DATA05 0x31e>,
+ <MX93_PAD_GPIO_IO10__MEDIAMIX_DISP_DATA06 0x31e>,
+ <MX93_PAD_GPIO_IO11__MEDIAMIX_DISP_DATA07 0x31e>,
+ <MX93_PAD_GPIO_IO12__MEDIAMIX_DISP_DATA08 0x31e>,
+ <MX93_PAD_GPIO_IO13__MEDIAMIX_DISP_DATA09 0x31e>,
+ <MX93_PAD_GPIO_IO14__MEDIAMIX_DISP_DATA10 0x31e>,
+ <MX93_PAD_GPIO_IO15__MEDIAMIX_DISP_DATA11 0x31e>,
+ <MX93_PAD_GPIO_IO16__MEDIAMIX_DISP_DATA12 0x31e>,
+ <MX93_PAD_GPIO_IO17__MEDIAMIX_DISP_DATA13 0x31e>,
+ <MX93_PAD_GPIO_IO18__MEDIAMIX_DISP_DATA14 0x31e>,
+ <MX93_PAD_GPIO_IO19__MEDIAMIX_DISP_DATA15 0x31e>,
+ <MX93_PAD_GPIO_IO20__MEDIAMIX_DISP_DATA16 0x31e>,
+ <MX93_PAD_GPIO_IO21__MEDIAMIX_DISP_DATA17 0x31e>,
+ <MX93_PAD_GPIO_IO22__MEDIAMIX_DISP_DATA18 0x31e>,
+ <MX93_PAD_GPIO_IO23__MEDIAMIX_DISP_DATA19 0x31e>,
+ <MX93_PAD_GPIO_IO24__MEDIAMIX_DISP_DATA20 0x31e>,
+ <MX93_PAD_GPIO_IO25__MEDIAMIX_DISP_DATA21 0x31e>,
+ <MX93_PAD_GPIO_IO26__MEDIAMIX_DISP_DATA22 0x31e>,
+ <MX93_PAD_GPIO_IO27__MEDIAMIX_DISP_DATA23 0x31e>;
+ };
+
+ pinctrl_touch: touchgrp {
+ fsl,pins = /* HYS | FSEL_0 | No DSE */
+ <MX93_PAD_SAI1_TXFS__GPIO1_IO11 0x1000>;
+ };
+
+ pinctrl_tpm2: tpm2grp {
+ fsl,pins = <MX93_PAD_I2C2_SCL__TPM2_CH2 0x57e>;
+ };
+
+ pinctrl_typec: typecgrp {
+ fsl,pins = /* HYS | FSEL_0 | No DSE */
+ <MX93_PAD_PDM_BIT_STREAM1__GPIO1_IO10 0x1000>;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = /* HYS | FSEL_0 | No DSE */
+ <MX93_PAD_UART1_RXD__LPUART1_RX 0x1000>,
+ /* FSEL_2 | DSE X4 */
+ <MX93_PAD_UART1_TXD__LPUART1_TX 0x011e>;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = /* HYS | FSEL_0 | No DSE */
+ <MX93_PAD_UART2_RXD__LPUART2_RX 0x1000>,
+ /* FSEL_2 | DSE X4 */
+ <MX93_PAD_UART2_TXD__LPUART2_TX 0x011e>,
+ /* FSEL_2 | DSE X4 */
+ <MX93_PAD_SAI1_TXD0__LPUART2_RTS_B 0x011e>;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+ fsl,pins = /* HYS | FSEL_0 | No DSE */
+ <MX93_PAD_SD2_CD_B__GPIO3_IO00 0x1000>;
+ };
+
+ /* enable SION for data and cmd pad due to ERR052021 */
+ pinctrl_usdhc2_hs: usdhc2hsgrp {
+ fsl,pins = /* PD | FSEL_3 | DSE X5 */
+ <MX93_PAD_SD2_CLK__USDHC2_CLK 0x05be>,
+ /* HYS | PU | FSEL_3 | DSE X4 */
+ <MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e>,
+ /* HYS | PU | FSEL_3 | DSE X3 */
+ <MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000138e>,
+ <MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e>,
+ <MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000138e>,
+ <MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000138e>,
+ /* FSEL_2 | DSE X3 */
+ <MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x010e>;
+ };
+
+ /* enable SION for data and cmd pad due to ERR052021 */
+ pinctrl_usdhc2_uhs: usdhc2uhsgrp {
+ fsl,pins = /* PD | FSEL_3 | DSE X6 */
+ <MX93_PAD_SD2_CLK__USDHC2_CLK 0x05fe>,
+ /* HYS | PU | FSEL_3 | DSE X4 */
+ <MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e>,
+ <MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000139e>,
+ <MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000139e>,
+ <MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000139e>,
+ <MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000139e>,
+ /* FSEL_2 | DSE X3 */
+ <MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x010e>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
index ebbac5f8d2b2..137b8ed242a2 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
@@ -627,8 +627,8 @@
fsl,pins = <
/* PD | FSEL_2 | DSE X4 */
MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x51e
- /* SION | HYS | ODE | FSEL_2 | DSE X4 */
- MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000191e
+ /* SION | HYS | FSEL_2 | DSE X4 */
+ MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000111e
/* HYS | FSEL_0 | DSE no drive */
MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x1000
MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x1000
@@ -659,8 +659,8 @@
fsl,pins = <
/* PD | FSEL_2 | DSE X4 */
MX93_PAD_ENET2_MDC__ENET1_MDC 0x51e
- /* SION | HYS | ODE | FSEL_2 | DSE X4 */
- MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000191e
+ /* SION | HYS | FSEL_2 | DSE X4 */
+ MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000111e
/* HYS | FSEL_0 | DSE no drive */
MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x1000
MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x1000
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
index 9e88c42c3d17..219f49a4f87f 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
@@ -597,8 +597,8 @@
fsl,pins = <
/* PD | FSEL_2 | DSE X4 */
MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x51e
- /* SION | HYS | ODE | FSEL_2 | DSE X4 */
- MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000191e
+ /* SION | HYS | FSEL_2 | DSE X4 */
+ MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000111e
/* HYS | FSEL_0 | DSE no drive */
MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x1000
MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x1000
@@ -629,8 +629,8 @@
fsl,pins = <
/* PD | FSEL_2 | DSE X4 */
MX93_PAD_ENET2_MDC__ENET1_MDC 0x51e
- /* SION | HYS | ODE | FSEL_2 | DSE X4 */
- MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000191e
+ /* SION | HYS | FSEL_2 | DSE X4 */
+ MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000111e
/* HYS | FSEL_0 | DSE no drive */
MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x1000
MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x1000
diff --git a/arch/arm64/boot/dts/freescale/imx94-clock.h b/arch/arm64/boot/dts/freescale/imx94-clock.h
new file mode 100644
index 000000000000..27e8c0839722
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx94-clock.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Copyright 2024-2025 NXP
+ */
+
+#ifndef __IMX94_CLOCK_H
+#define __IMX94_CLOCK_H
+
+#define IMX94_CLK_EXT 0
+#define IMX94_CLK_32K 1
+#define IMX94_CLK_24M 2
+#define IMX94_CLK_FRO 3
+#define IMX94_CLK_SYSPLL1_VCO 4
+#define IMX94_CLK_SYSPLL1_PFD0_UNGATED 5
+#define IMX94_CLK_SYSPLL1_PFD0 6
+#define IMX94_CLK_SYSPLL1_PFD0_DIV2 7
+#define IMX94_CLK_SYSPLL1_PFD1_UNGATED 8
+#define IMX94_CLK_SYSPLL1_PFD1 9
+#define IMX94_CLK_SYSPLL1_PFD1_DIV2 10
+#define IMX94_CLK_SYSPLL1_PFD2_UNGATED 11
+#define IMX94_CLK_SYSPLL1_PFD2 12
+#define IMX94_CLK_SYSPLL1_PFD2_DIV2 13
+#define IMX94_CLK_AUDIOPLL1_VCO 14
+#define IMX94_CLK_AUDIOPLL1 15
+#define IMX94_CLK_AUDIOPLL2_VCO 16
+#define IMX94_CLK_AUDIOPLL2 17
+#define IMX94_CLK_RESERVED18 18
+#define IMX94_CLK_RESERVED19 19
+#define IMX94_CLK_RESERVED20 20
+#define IMX94_CLK_RESERVED21 21
+#define IMX94_CLK_RESERVED22 22
+#define IMX94_CLK_RESERVED23 23
+#define IMX94_CLK_ENCPLL_VCO 24
+#define IMX94_CLK_ENCPLL_PFD0_UNGATED 25
+#define IMX94_CLK_ENCPLL_PFD0 26
+#define IMX94_CLK_ENCPLL_PFD1_UNGATED 27
+#define IMX94_CLK_ENCPLL_PFD1 28
+#define IMX94_CLK_ARMPLL_VCO 29
+#define IMX94_CLK_ARMPLL_PFD0_UNGATED 30
+#define IMX94_CLK_ARMPLL_PFD0 31
+#define IMX94_CLK_ARMPLL_PFD1_UNGATED 32
+#define IMX94_CLK_ARMPLL_PFD1 33
+#define IMX94_CLK_ARMPLL_PFD2_UNGATED 34
+#define IMX94_CLK_ARMPLL_PFD2 35
+#define IMX94_CLK_ARMPLL_PFD3_UNGATED 36
+#define IMX94_CLK_ARMPLL_PFD3 37
+#define IMX94_CLK_DRAMPLL_VCO 38
+#define IMX94_CLK_DRAMPLL 39
+#define IMX94_CLK_HSIOPLL_VCO 40
+#define IMX94_CLK_HSIOPLL 41
+#define IMX94_CLK_LDBPLL_VCO 42
+#define IMX94_CLK_LDBPLL 43
+#define IMX94_CLK_EXT1 44
+#define IMX94_CLK_EXT2 45
+#define IMX94_CLK_ADC 46
+#define IMX94_CLK_BUSAON 47
+#define IMX94_CLK_CAN1 48
+#define IMX94_CLK_GLITCHFILTER 49
+#define IMX94_CLK_GPT1 50
+#define IMX94_CLK_I3C1SLOW 51
+#define IMX94_CLK_LPI2C1 52
+#define IMX94_CLK_LPI2C2 53
+#define IMX94_CLK_LPSPI1 54
+#define IMX94_CLK_LPSPI2 55
+#define IMX94_CLK_LPTMR1 56
+#define IMX94_CLK_LPUART1 57
+#define IMX94_CLK_LPUART2 58
+#define IMX94_CLK_M33 59
+#define IMX94_CLK_M33SYSTICK 60
+#define IMX94_CLK_PDM 61
+#define IMX94_CLK_SAI1 62
+#define IMX94_CLK_TPM2 63
+#define IMX94_CLK_A55 64
+#define IMX94_CLK_A55MTRBUS 65
+#define IMX94_CLK_A55PERIPH 66
+#define IMX94_CLK_DRAMALT 67
+#define IMX94_CLK_DRAMAPB 68
+#define IMX94_CLK_DISPAPB 69
+#define IMX94_CLK_DISPAXI 70
+#define IMX94_CLK_DISPPIX 71
+#define IMX94_CLK_HSIOACSCAN480M 72
+#define IMX94_CLK_HSIOACSCAN80M 73
+#define IMX94_CLK_HSIO 74
+#define IMX94_CLK_HSIOPCIEAUX 75
+#define IMX94_CLK_HSIOPCIETEST160M 76
+#define IMX94_CLK_HSIOPCIETEST400M 77
+#define IMX94_CLK_HSIOPCIETEST500M 78
+#define IMX94_CLK_HSIOPCIETEST50M 79
+#define IMX94_CLK_HSIOUSBTEST60M 80
+#define IMX94_CLK_BUSM70 81
+#define IMX94_CLK_M70 82
+#define IMX94_CLK_M70SYSTICK 83
+#define IMX94_CLK_BUSM71 84
+#define IMX94_CLK_M71 85
+#define IMX94_CLK_M71SYSTICK 86
+#define IMX94_CLK_BUSNETCMIX 87
+#define IMX94_CLK_ECAT 88
+#define IMX94_CLK_ENET 89
+#define IMX94_CLK_ENETPHYTEST200M 90
+#define IMX94_CLK_ENETPHYTEST500M 91
+#define IMX94_CLK_ENETPHYTEST667M 92
+#define IMX94_CLK_ENETREF 93
+#define IMX94_CLK_ENETTIMER1 94
+#define IMX94_CLK_ENETTIMER2 95
+#define IMX94_CLK_ENETTIMER3 96
+#define IMX94_CLK_FLEXIO3 97
+#define IMX94_CLK_FLEXIO4 98
+#define IMX94_CLK_M33SYNC 99
+#define IMX94_CLK_M33SYNCSYSTICK 100
+#define IMX94_CLK_MAC0 101
+#define IMX94_CLK_MAC1 102
+#define IMX94_CLK_MAC2 103
+#define IMX94_CLK_MAC3 104
+#define IMX94_CLK_MAC4 105
+#define IMX94_CLK_MAC5 106
+#define IMX94_CLK_NOCAPB 107
+#define IMX94_CLK_NOC 108
+#define IMX94_CLK_NPUAPB 109
+#define IMX94_CLK_NPU 110
+#define IMX94_CLK_CCMCKO1 111
+#define IMX94_CLK_CCMCKO2 112
+#define IMX94_CLK_CCMCKO3 113
+#define IMX94_CLK_CCMCKO4 114
+#define IMX94_CLK_BISS 115
+#define IMX94_CLK_BUSWAKEUP 116
+#define IMX94_CLK_CAN2 117
+#define IMX94_CLK_CAN3 118
+#define IMX94_CLK_CAN4 119
+#define IMX94_CLK_CAN5 120
+#define IMX94_CLK_ENDAT21 121
+#define IMX94_CLK_ENDAT22 122
+#define IMX94_CLK_ENDAT31FAST 123
+#define IMX94_CLK_ENDAT31SLOW 124
+#define IMX94_CLK_FLEXIO1 125
+#define IMX94_CLK_FLEXIO2 126
+#define IMX94_CLK_GPT2 127
+#define IMX94_CLK_GPT3 128
+#define IMX94_CLK_GPT4 129
+#define IMX94_CLK_HIPERFACE1 130
+#define IMX94_CLK_HIPERFACE1SYNC 131
+#define IMX94_CLK_HIPERFACE2 132
+#define IMX94_CLK_HIPERFACE2SYNC 133
+#define IMX94_CLK_I3C2SLOW 134
+#define IMX94_CLK_LPI2C3 135
+#define IMX94_CLK_LPI2C4 136
+#define IMX94_CLK_LPI2C5 137
+#define IMX94_CLK_LPI2C6 138
+#define IMX94_CLK_LPI2C7 139
+#define IMX94_CLK_LPI2C8 140
+#define IMX94_CLK_LPSPI3 141
+#define IMX94_CLK_LPSPI4 142
+#define IMX94_CLK_LPSPI5 143
+#define IMX94_CLK_LPSPI6 144
+#define IMX94_CLK_LPSPI7 145
+#define IMX94_CLK_LPSPI8 146
+#define IMX94_CLK_LPTMR2 147
+#define IMX94_CLK_LPUART10 148
+#define IMX94_CLK_LPUART11 149
+#define IMX94_CLK_LPUART12 150
+#define IMX94_CLK_LPUART3 151
+#define IMX94_CLK_LPUART4 152
+#define IMX94_CLK_LPUART5 153
+#define IMX94_CLK_LPUART6 154
+#define IMX94_CLK_LPUART7 155
+#define IMX94_CLK_LPUART8 156
+#define IMX94_CLK_LPUART9 157
+#define IMX94_CLK_SAI2 158
+#define IMX94_CLK_SAI3 159
+#define IMX94_CLK_SAI4 160
+#define IMX94_CLK_SWOTRACE 161
+#define IMX94_CLK_TPM4 162
+#define IMX94_CLK_TPM5 163
+#define IMX94_CLK_TPM6 164
+#define IMX94_CLK_USBPHYBURUNIN 165
+#define IMX94_CLK_USDHC1 166
+#define IMX94_CLK_USDHC2 167
+#define IMX94_CLK_USDHC3 168
+#define IMX94_CLK_V2XPK 169
+#define IMX94_CLK_WAKEUPAXI 170
+#define IMX94_CLK_XSPISLVROOT 171
+#define IMX94_CLK_XSPI1 172
+#define IMX94_CLK_XSPI2 173
+#define IMX94_CLK_SEL_EXT 174
+#define IMX94_CLK_SEL_A55C0 175
+#define IMX94_CLK_SEL_A55C1 176
+#define IMX94_CLK_SEL_A55C2 177
+#define IMX94_CLK_SEL_A55C3 178
+#define IMX94_CLK_SEL_A55P 179
+#define IMX94_CLK_SEL_DRAM 180
+#define IMX94_CLK_SEL_TEMPSENSE 181
+#define IMX94_CLK_NPU_CGC 182
+
+#endif /* __IMX94_CLOCK_H */
diff --git a/arch/arm64/boot/dts/freescale/imx94-pinfunc.h b/arch/arm64/boot/dts/freescale/imx94-pinfunc.h
new file mode 100644
index 000000000000..00255db89185
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx94-pinfunc.h
@@ -0,0 +1,1570 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright 2024-2025 NXP
+ */
+
+#ifndef __DTS_IMX94_PINFUNC_H
+#define __DTS_IMX94_PINFUNC_H
+
+/* Drive Strength */
+#define IMX94_DSE_X1 0x2
+#define IMX94_DSE_X2 0x6
+#define IMX94_DSE_X3 0xe
+#define IMX94_DSE_X4 0x1e
+#define IMX94_DSE_X5 0x3e
+#define IMX94_DSE_X6 0x7e
+
+/* Slew Rate */
+#define IMX94_FSEL_FAST 0x180
+#define IMX94_FSEL_SLOW 0x100
+
+/* Pull Up */
+#define IMX94_PU_ENABLE 0x200
+#define IMX94_PU_DISABLE 0x0
+
+/* Pull Down */
+#define IMX94_PD_ENABLE 0x400
+#define IMX94_PD_DISABLE 0x0
+
+/* Open Drain */
+#define IMX94_OD_ENABLE 0x800
+#define IMX94_OD_DISABLE 0x0
+
+/* Schmitt trigger */
+#define IMX94_HYS_SCHMITT 0x1000
+#define IMX94_HYS_NO_SCHMITT 0x0
+
+/*
+ * The pin function ID is a tuple of <mux_reg conf_reg input_reg mux_mode input_val>
+ */
+#define IMX94_PAD_DAP_TDI__JTAG_MUX_TDI 0x0000 0x0304 0x092c 0x00 0x00
+#define IMX94_PAD_DAP_TDI__MQS2_LEFT 0x0000 0x0304 0x0000 0x01 0x00
+#define IMX94_PAD_DAP_TDI__ECAT_LED_ERR 0x0000 0x0304 0x0000 0x02 0x00
+#define IMX94_PAD_DAP_TDI__CAN2_TX 0x0000 0x0304 0x0000 0x03 0x00
+#define IMX94_PAD_DAP_TDI__SINC_FILTER_GLUE3_BREAK 0x0000 0x0304 0x0000 0x04 0x00
+#define IMX94_PAD_DAP_TDI__GPIO4_IO4 0x0000 0x0304 0x0000 0x05 0x00
+#define IMX94_PAD_DAP_TDI__LPUART5_RX 0x0000 0x0304 0x07bc 0x06 0x00
+#define IMX94_PAD_DAP_TDI__XBAR1_XBAR_INOUT26 0x0000 0x0304 0x0000 0x07 0x00
+
+#define IMX94_PAD_DAP_TMS_SWDIO__JTAG_MUX_TMS 0x0004 0x0308 0x0930 0x00 0x00
+#define IMX94_PAD_DAP_TMS_SWDIO__CAN5_TX 0x0004 0x0308 0x0000 0x01 0x00
+#define IMX94_PAD_DAP_TMS_SWDIO__GPT_MUX_INOUT10 0x0004 0x0308 0x0000 0x02 0x00
+#define IMX94_PAD_DAP_TMS_SWDIO__LPUART8_TX 0x0004 0x0308 0x07dc 0x03 0x00
+#define IMX94_PAD_DAP_TMS_SWDIO__SINC3_MOD_CLK1 0x0004 0x0308 0x0000 0x04 0x00
+#define IMX94_PAD_DAP_TMS_SWDIO__GPIO4_IO5 0x0004 0x0308 0x0000 0x05 0x00
+#define IMX94_PAD_DAP_TMS_SWDIO__LPUART5_RTS_B 0x0004 0x0308 0x0000 0x06 0x00
+#define IMX94_PAD_DAP_TMS_SWDIO__XBAR1_XBAR_INOUT27 0x0004 0x0308 0x0000 0x07 0x00
+
+#define IMX94_PAD_DAP_TCLK_SWCLK__JTAG_MUX_TCK 0x0008 0x030c 0x0928 0x00 0x00
+#define IMX94_PAD_DAP_TCLK_SWCLK__CAN5_RX 0x0008 0x030c 0x0688 0x01 0x00
+#define IMX94_PAD_DAP_TCLK_SWCLK__GPT_MUX_INOUT11 0x0008 0x030c 0x0000 0x02 0x00
+#define IMX94_PAD_DAP_TCLK_SWCLK__LPUART8_RX 0x0008 0x030c 0x07d8 0x03 0x00
+#define IMX94_PAD_DAP_TCLK_SWCLK__SINC3_MOD_CLK0 0x0008 0x030c 0x0000 0x04 0x00
+#define IMX94_PAD_DAP_TCLK_SWCLK__GPIO4_IO6 0x0008 0x030c 0x0000 0x05 0x00
+#define IMX94_PAD_DAP_TCLK_SWCLK__LPUART5_CTS_B 0x0008 0x030c 0x07b8 0x06 0x00
+#define IMX94_PAD_DAP_TCLK_SWCLK__XBAR1_XBAR_INOUT28 0x0008 0x030c 0x0000 0x07 0x00
+
+#define IMX94_PAD_DAP_TDO_TRACESWO__JTAG_MUX_TDO 0x000c 0x0310 0x0000 0x00 0x00
+#define IMX94_PAD_DAP_TDO_TRACESWO__MQS2_RIGHT 0x000c 0x0310 0x0000 0x01 0x00
+#define IMX94_PAD_DAP_TDO_TRACESWO__ECAT_RESET_OUT 0x000c 0x0310 0x0000 0x02 0x00
+#define IMX94_PAD_DAP_TDO_TRACESWO__CAN2_RX 0x000c 0x0310 0x067c 0x03 0x00
+#define IMX94_PAD_DAP_TDO_TRACESWO__SINC3_MOD_CLK2 0x000c 0x0310 0x0000 0x04 0x00
+#define IMX94_PAD_DAP_TDO_TRACESWO__GPIO4_IO7 0x000c 0x0310 0x0000 0x05 0x00
+#define IMX94_PAD_DAP_TDO_TRACESWO__LPUART5_TX 0x000c 0x0310 0x07c0 0x06 0x00
+#define IMX94_PAD_DAP_TDO_TRACESWO__XBAR1_XBAR_INOUT29 0x000c 0x0310 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO00__GPIO2_IO0 0x0010 0x0314 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO00__I3C2_PUR 0x0010 0x0314 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO00__XBAR1_XBAR_INOUT39 0x0010 0x0314 0x08d4 0x02 0x00
+#define IMX94_PAD_GPIO_IO00__I3C2_PUR_B 0x0010 0x0314 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO00__LPSPI6_PCS0 0x0010 0x0314 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO00__LPUART5_TX 0x0010 0x0314 0x07c0 0x05 0x01
+#define IMX94_PAD_GPIO_IO00__LPI2C5_SDA 0x0010 0x0314 0x0740 0x06 0x00
+#define IMX94_PAD_GPIO_IO00__FLEXIO1_FLEXIO0 0x0010 0x0314 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO01__GPIO2_IO1 0x0014 0x0318 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO01__I3C2_SCL 0x0014 0x0318 0x0720 0x01 0x00
+#define IMX94_PAD_GPIO_IO01__XBAR1_XBAR_INOUT40 0x0014 0x0318 0x08d8 0x02 0x00
+#define IMX94_PAD_GPIO_IO01__EWM_OUT_B 0x0014 0x0318 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO01__LPSPI6_SIN 0x0014 0x0318 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO01__LPUART5_RX 0x0014 0x0318 0x07bc 0x05 0x01
+#define IMX94_PAD_GPIO_IO01__LPI2C5_SCL 0x0014 0x0318 0x073c 0x06 0x00
+#define IMX94_PAD_GPIO_IO01__FLEXIO1_FLEXIO1 0x0014 0x0318 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO02__GPIO2_IO2 0x0018 0x031c 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO02__I3C2_SDA 0x0018 0x031c 0x0724 0x01 0x00
+#define IMX94_PAD_GPIO_IO02__XBAR1_XBAR_INOUT41 0x0018 0x031c 0x08dc 0x02 0x00
+#define IMX94_PAD_GPIO_IO02__GPT_MUX_INOUT1 0x0018 0x031c 0x0700 0x03 0x00
+#define IMX94_PAD_GPIO_IO02__LPSPI6_SOUT 0x0018 0x031c 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO02__LPUART5_CTS_B 0x0018 0x031c 0x07b8 0x05 0x01
+#define IMX94_PAD_GPIO_IO02__LPI2C6_SDA 0x0018 0x031c 0x074c 0x06 0x00
+#define IMX94_PAD_GPIO_IO02__FLEXIO1_FLEXIO2 0x0018 0x031c 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO03__GPIO2_IO3 0x001c 0x0320 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO03__EWM_OUT_B 0x001c 0x0320 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO03__XBAR1_XBAR_INOUT42 0x001c 0x0320 0x08e0 0x02 0x00
+#define IMX94_PAD_GPIO_IO03__GPT_MUX_INOUT4 0x001c 0x0320 0x0708 0x03 0x00
+#define IMX94_PAD_GPIO_IO03__LPSPI6_SCK 0x001c 0x0320 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO03__LPUART5_RTS_B 0x001c 0x0320 0x0000 0x05 0x00
+#define IMX94_PAD_GPIO_IO03__LPI2C6_SCL 0x001c 0x0320 0x0748 0x06 0x00
+#define IMX94_PAD_GPIO_IO03__FLEXIO1_FLEXIO3 0x001c 0x0320 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO04__GPIO2_IO4 0x0020 0x0324 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO04__TPM3_CH0 0x0020 0x0324 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO04__PDM_CLK 0x0020 0x0324 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO04__GPT_MUX_INOUT5 0x0020 0x0324 0x070c 0x03 0x00
+#define IMX94_PAD_GPIO_IO04__LPSPI7_PCS0 0x0020 0x0324 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO04__LPUART6_TX 0x0020 0x0324 0x07cc 0x05 0x00
+#define IMX94_PAD_GPIO_IO04__LPI2C6_SDA 0x0020 0x0324 0x074c 0x06 0x01
+#define IMX94_PAD_GPIO_IO04__FLEXIO1_FLEXIO4 0x0020 0x0324 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO05__GPIO2_IO5 0x0024 0x0328 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO05__TPM4_CH0 0x0024 0x0328 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO05__PDM_BIT_STREAM0 0x0024 0x0328 0x0610 0x02 0x00
+#define IMX94_PAD_GPIO_IO05__GPT_MUX_INOUT7 0x0024 0x0328 0x0714 0x03 0x00
+#define IMX94_PAD_GPIO_IO05__LPSPI7_SIN 0x0024 0x0328 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO05__LPUART6_RX 0x0024 0x0328 0x07c8 0x05 0x00
+#define IMX94_PAD_GPIO_IO05__LPI2C6_SCL 0x0024 0x0328 0x0748 0x06 0x01
+#define IMX94_PAD_GPIO_IO05__FLEXIO1_FLEXIO5 0x0024 0x0328 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO06__GPIO2_IO6 0x0028 0x032c 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO06__TPM5_CH0 0x0028 0x032c 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO06__PDM_BIT_STREAM1 0x0028 0x032c 0x0614 0x02 0x00
+#define IMX94_PAD_GPIO_IO06__GPT_MUX_INOUT8 0x0028 0x032c 0x0718 0x03 0x00
+#define IMX94_PAD_GPIO_IO06__LPSPI7_SOUT 0x0028 0x032c 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO06__LPUART6_CTS_B 0x0028 0x032c 0x07c4 0x05 0x00
+#define IMX94_PAD_GPIO_IO06__LPI2C7_SDA 0x0028 0x032c 0x0754 0x06 0x00
+#define IMX94_PAD_GPIO_IO06__FLEXIO1_FLEXIO6 0x0028 0x032c 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO07__GPIO2_IO7 0x002c 0x0330 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO07__LPSPI3_PCS1 0x002c 0x0330 0x0768 0x01 0x00
+#define IMX94_PAD_GPIO_IO07__XBAR1_XBAR_INOUT43 0x002c 0x0330 0x08e4 0x02 0x00
+#define IMX94_PAD_GPIO_IO07__GPT_MUX_INOUT3 0x002c 0x0330 0x0704 0x03 0x00
+#define IMX94_PAD_GPIO_IO07__LPSPI7_SCK 0x002c 0x0330 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO07__LPUART6_RTS_B 0x002c 0x0330 0x0000 0x05 0x00
+#define IMX94_PAD_GPIO_IO07__LPI2C7_SCL 0x002c 0x0330 0x0750 0x06 0x00
+#define IMX94_PAD_GPIO_IO07__FLEXIO1_FLEXIO7 0x002c 0x0330 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO08__GPIO2_IO8 0x0030 0x0334 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO08__LPSPI3_PCS0 0x0030 0x0334 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO08__USDHC2_WP 0x0030 0x0334 0x0854 0x02 0x00
+#define IMX94_PAD_GPIO_IO08__GPT_MUX_INOUT2 0x0030 0x0334 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO08__TPM6_CH0 0x0030 0x0334 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO08__LPUART7_TX 0x0030 0x0334 0x07d4 0x05 0x00
+#define IMX94_PAD_GPIO_IO08__LPI2C7_SDA 0x0030 0x0334 0x0754 0x06 0x01
+#define IMX94_PAD_GPIO_IO08__FLEXIO1_FLEXIO8 0x0030 0x0334 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO09__GPIO2_IO9 0x0034 0x0338 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO09__LPSPI3_SIN 0x0034 0x0338 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO09__XBAR1_XBAR_INOUT44 0x0034 0x0338 0x08e8 0x02 0x00
+#define IMX94_PAD_GPIO_IO09__GPT_MUX_INOUT0 0x0034 0x0338 0x06fc 0x03 0x00
+#define IMX94_PAD_GPIO_IO09__TPM3_EXTCLK 0x0034 0x0338 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO09__LPUART7_RX 0x0034 0x0338 0x07d0 0x05 0x00
+#define IMX94_PAD_GPIO_IO09__LPI2C7_SCL 0x0034 0x0338 0x0750 0x06 0x01
+#define IMX94_PAD_GPIO_IO09__FLEXIO1_FLEXIO9 0x0034 0x0338 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO10__GPIO2_IO10 0x0038 0x033c 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO10__LPSPI3_SOUT 0x0038 0x033c 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO10__XBAR1_XBAR_INOUT45 0x0038 0x033c 0x08ec 0x02 0x00
+#define IMX94_PAD_GPIO_IO10__GPT_MUX_INOUT6 0x0038 0x033c 0x0710 0x03 0x00
+#define IMX94_PAD_GPIO_IO10__TPM4_EXTCLK 0x0038 0x033c 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO10__LPUART7_CTS_B 0x0038 0x033c 0x0000 0x05 0x00
+#define IMX94_PAD_GPIO_IO10__LPI2C8_SDA 0x0038 0x033c 0x075c 0x06 0x00
+#define IMX94_PAD_GPIO_IO10__FLEXIO1_FLEXIO10 0x0038 0x033c 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO11__GPIO2_IO11 0x003c 0x0340 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO11__LPSPI3_SCK 0x003c 0x0340 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO11__XBAR1_XBAR_INOUT46 0x003c 0x0340 0x08f0 0x02 0x00
+#define IMX94_PAD_GPIO_IO11__GPT_MUX_INOUT9 0x003c 0x0340 0x071c 0x03 0x00
+#define IMX94_PAD_GPIO_IO11__TPM5_EXTCLK 0x003c 0x0340 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO11__LPUART7_RTS_B 0x003c 0x0340 0x0000 0x05 0x00
+#define IMX94_PAD_GPIO_IO11__LPI2C8_SCL 0x003c 0x0340 0x0758 0x06 0x00
+#define IMX94_PAD_GPIO_IO11__FLEXIO1_FLEXIO11 0x003c 0x0340 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO12__GPIO2_IO12 0x0040 0x0344 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO12__TPM3_CH2 0x0040 0x0344 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO12__PDM_BIT_STREAM2 0x0040 0x0344 0x0618 0x02 0x00
+#define IMX94_PAD_GPIO_IO12__FLEXIO1_FLEXIO12 0x0040 0x0344 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO12__LPSPI8_PCS0 0x0040 0x0344 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO12__LPUART8_TX 0x0040 0x0344 0x07dc 0x05 0x01
+#define IMX94_PAD_GPIO_IO12__LPI2C8_SDA 0x0040 0x0344 0x075c 0x06 0x01
+
+#define IMX94_PAD_GPIO_IO13__GPIO2_IO13 0x0044 0x0348 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO13__TPM4_CH2 0x0044 0x0348 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO13__PDM_BIT_STREAM3 0x0044 0x0348 0x061c 0x02 0x00
+#define IMX94_PAD_GPIO_IO13__XBAR1_XBAR_INOUT47 0x0044 0x0348 0x08f4 0x03 0x00
+#define IMX94_PAD_GPIO_IO13__LPSPI8_SIN 0x0044 0x0348 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO13__LPUART8_RX 0x0044 0x0348 0x07d8 0x05 0x01
+#define IMX94_PAD_GPIO_IO13__LPI2C8_SCL 0x0044 0x0348 0x0758 0x06 0x01
+#define IMX94_PAD_GPIO_IO13__FLEXIO1_FLEXIO13 0x0044 0x0348 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO14__GPIO2_IO14 0x0048 0x034c 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO14__LPUART10_CTS_B 0x0048 0x034c 0x078c 0x01 0x00
+#define IMX94_PAD_GPIO_IO14__ECAT_SDA 0x0048 0x034c 0x062c 0x02 0x00
+#define IMX94_PAD_GPIO_IO14__XBAR1_XBAR_INOUT48 0x0048 0x034c 0x08f8 0x03 0x00
+#define IMX94_PAD_GPIO_IO14__LPSPI8_SOUT 0x0048 0x034c 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO14__LPUART8_CTS_B 0x0048 0x034c 0x0000 0x05 0x00
+#define IMX94_PAD_GPIO_IO14__LPUART4_TX 0x0048 0x034c 0x07b4 0x06 0x00
+#define IMX94_PAD_GPIO_IO14__FLEXIO1_FLEXIO14 0x0048 0x034c 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO15__GPIO2_IO15 0x004c 0x0350 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO15__LPUART10_RTS_B 0x004c 0x0350 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO15__ECAT_SCL 0x004c 0x0350 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO15__XBAR1_XBAR_INOUT8 0x004c 0x0350 0x087c 0x03 0x00
+#define IMX94_PAD_GPIO_IO15__LPSPI8_SCK 0x004c 0x0350 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO15__LPUART8_RTS_B 0x004c 0x0350 0x0000 0x05 0x00
+#define IMX94_PAD_GPIO_IO15__LPUART4_RX 0x004c 0x0350 0x07b0 0x06 0x00
+#define IMX94_PAD_GPIO_IO15__FLEXIO1_FLEXIO15 0x004c 0x0350 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO16__GPIO2_IO16 0x0050 0x0354 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO16__LPI2C3_SDA 0x0050 0x0354 0x0730 0x01 0x00
+#define IMX94_PAD_GPIO_IO16__CAN3_TX 0x0050 0x0354 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO16__EWM_OUT_B 0x0050 0x0354 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO16__LPUART11_TX 0x0050 0x0354 0x079c 0x04 0x00
+#define IMX94_PAD_GPIO_IO16__GPT_MUX_INOUT0 0x0050 0x0354 0x06fc 0x05 0x01
+#define IMX94_PAD_GPIO_IO16__FLEXPWM4_PWMA0 0x0050 0x0354 0x06d4 0x06 0x00
+#define IMX94_PAD_GPIO_IO16__XBAR1_XBAR_INOUT30 0x0050 0x0354 0x08b0 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO17__GPIO2_IO17 0x0054 0x0358 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO17__LPI2C3_SCL 0x0054 0x0358 0x072c 0x01 0x00
+#define IMX94_PAD_GPIO_IO17__CAN3_RX 0x0054 0x0358 0x0680 0x02 0x00
+#define IMX94_PAD_GPIO_IO17__LPI2C6_HREQ 0x0054 0x0358 0x0744 0x03 0x00
+#define IMX94_PAD_GPIO_IO17__LPUART11_RX 0x0054 0x0358 0x0798 0x04 0x00
+#define IMX94_PAD_GPIO_IO17__GPT_MUX_INOUT3 0x0054 0x0358 0x0704 0x05 0x01
+#define IMX94_PAD_GPIO_IO17__FLEXPWM4_PWMB0 0x0054 0x0358 0x06e4 0x06 0x00
+#define IMX94_PAD_GPIO_IO17__XBAR1_XBAR_INOUT31 0x0054 0x0358 0x08b4 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO18__GPIO2_IO18 0x0058 0x035c 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO18__LPI2C4_SDA 0x0058 0x035c 0x0738 0x01 0x00
+#define IMX94_PAD_GPIO_IO18__LPUART10_TX 0x0058 0x035c 0x0794 0x02 0x00
+#define IMX94_PAD_GPIO_IO18__LPI2C7_HREQ 0x0058 0x035c 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO18__LPUART11_CTS_B 0x0058 0x035c 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO18__GPT_MUX_INOUT6 0x0058 0x035c 0x0710 0x05 0x01
+#define IMX94_PAD_GPIO_IO18__FLEXPWM4_PWMA1 0x0058 0x035c 0x06d8 0x06 0x00
+#define IMX94_PAD_GPIO_IO18__XBAR1_XBAR_INOUT32 0x0058 0x035c 0x08b8 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO19__GPIO2_IO19 0x005c 0x0360 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO19__LPI2C4_SCL 0x005c 0x0360 0x0734 0x01 0x00
+#define IMX94_PAD_GPIO_IO19__LPUART10_RX 0x005c 0x0360 0x0790 0x02 0x00
+#define IMX94_PAD_GPIO_IO19__LPI2C8_HREQ 0x005c 0x0360 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO19__LPUART11_RTS_B 0x005c 0x0360 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO19__GPT_MUX_INOUT9 0x005c 0x0360 0x071c 0x05 0x01
+#define IMX94_PAD_GPIO_IO19__FLEXPWM4_PWMB1 0x005c 0x0360 0x06e8 0x06 0x00
+#define IMX94_PAD_GPIO_IO19__XBAR1_XBAR_INOUT33 0x005c 0x0360 0x08bc 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO20__GPIO2_IO20 0x0060 0x0364 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO20__PCIE1_CLKREQ_B 0x0060 0x0364 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO20__LPUART6_TX 0x0060 0x0364 0x07cc 0x02 0x01
+#define IMX94_PAD_GPIO_IO20__LPI2C8_SDA 0x0060 0x0364 0x075c 0x03 0x02
+#define IMX94_PAD_GPIO_IO20__LPSPI4_PCS2 0x0060 0x0364 0x076c 0x04 0x00
+#define IMX94_PAD_GPIO_IO20__LPSPI3_PCS1 0x0060 0x0364 0x0768 0x05 0x01
+#define IMX94_PAD_GPIO_IO20__FLEXPWM4_PWMA2 0x0060 0x0364 0x06dc 0x06 0x00
+#define IMX94_PAD_GPIO_IO20__XBAR1_XBAR_INOUT34 0x0060 0x0364 0x08c0 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO21__GPIO2_IO21 0x0064 0x0368 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO21__SAI2_TX_BCLK 0x0064 0x0368 0x07f8 0x01 0x00
+#define IMX94_PAD_GPIO_IO21__LPUART6_RX 0x0064 0x0368 0x07c8 0x02 0x01
+#define IMX94_PAD_GPIO_IO21__LPI2C8_SCL 0x0064 0x0368 0x0758 0x03 0x02
+#define IMX94_PAD_GPIO_IO21__LPSPI4_PCS1 0x0064 0x0368 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO21__LPI2C3_HREQ 0x0064 0x0368 0x0728 0x05 0x00
+#define IMX94_PAD_GPIO_IO21__FLEXPWM4_PWMB2 0x0064 0x0368 0x06ec 0x06 0x00
+#define IMX94_PAD_GPIO_IO21__XBAR1_XBAR_INOUT35 0x0064 0x0368 0x08c4 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO22__GPIO2_IO22 0x0068 0x036c 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO22__SAI2_MCLK 0x0068 0x036c 0x07e8 0x01 0x00
+#define IMX94_PAD_GPIO_IO22__LPUART6_CTS_B 0x0068 0x036c 0x07c4 0x02 0x01
+#define IMX94_PAD_GPIO_IO22__XBAR1_XBAR_INOUT9 0x0068 0x036c 0x0880 0x03 0x00
+#define IMX94_PAD_GPIO_IO22__LPSPI4_PCS0 0x0068 0x036c 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO22__FLEXPWM3_PWMA3 0x0068 0x036c 0x06b4 0x05 0x00
+#define IMX94_PAD_GPIO_IO22__FLEXPWM4_PWMA3 0x0068 0x036c 0x06e0 0x06 0x00
+#define IMX94_PAD_GPIO_IO22__SINC4_EMCLK0 0x0068 0x036c 0x082c 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO23__GPIO2_IO23 0x006c 0x0370 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO23__PCIE2_CLKREQ_B 0x006c 0x0370 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO23__LPUART6_RTS_B 0x006c 0x0370 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO23__XBAR1_XBAR_INOUT10 0x006c 0x0370 0x0884 0x03 0x00
+#define IMX94_PAD_GPIO_IO23__LPSPI4_SIN 0x006c 0x0370 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO23__FLEXPWM3_PWMB3 0x006c 0x0370 0x06c4 0x05 0x00
+#define IMX94_PAD_GPIO_IO23__FLEXPWM4_PWMB3 0x006c 0x0370 0x06f0 0x06 0x00
+#define IMX94_PAD_GPIO_IO23__SINC4_EMBIT0 0x006c 0x0370 0x0820 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO24__GPIO2_IO24 0x0070 0x0374 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO24__SAI2_RX_BCLK 0x0070 0x0374 0x07ec 0x01 0x00
+#define IMX94_PAD_GPIO_IO24__LPUART11_TX 0x0070 0x0374 0x079c 0x02 0x01
+#define IMX94_PAD_GPIO_IO24__LPI2C3_HREQ 0x0070 0x0374 0x0728 0x03 0x01
+#define IMX94_PAD_GPIO_IO24__LPSPI4_SOUT 0x0070 0x0374 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO24__SINC_FILTER_GLUE2_BREAK 0x0070 0x0374 0x0000 0x05 0x00
+#define IMX94_PAD_GPIO_IO24__FLEXPWM4_PWMX0 0x0070 0x0374 0x06f4 0x06 0x00
+#define IMX94_PAD_GPIO_IO24__XBAR1_XBAR_INOUT36 0x0070 0x0374 0x08c8 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO25__GPIO2_IO25 0x0074 0x0378 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO25__SAI2_RX_SYNC 0x0074 0x0378 0x07f4 0x01 0x00
+#define IMX94_PAD_GPIO_IO25__LPUART11_RX 0x0074 0x0378 0x0798 0x02 0x01
+#define IMX94_PAD_GPIO_IO25__LPI2C4_HREQ 0x0074 0x0378 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO25__LPSPI4_SCK 0x0074 0x0378 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO25__SINC_FILTER_GLUE1_BREAK 0x0074 0x0378 0x0000 0x05 0x00
+#define IMX94_PAD_GPIO_IO25__FLEXPWM4_PWMX1 0x0074 0x0378 0x06f8 0x06 0x00
+#define IMX94_PAD_GPIO_IO25__XBAR1_XBAR_INOUT37 0x0074 0x0378 0x08cc 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO26__GPIO2_IO26 0x0078 0x037c 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO26__LPI2C5_SCL 0x0078 0x037c 0x073c 0x01 0x01
+#define IMX94_PAD_GPIO_IO26__LPUART12_TX 0x0078 0x037c 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO26__GPT_MUX_INOUT4 0x0078 0x037c 0x0708 0x03 0x01
+#define IMX94_PAD_GPIO_IO26__FLEXIO1_3_1_FLEXIO0 0x0078 0x037c 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO26__SAI2_RX_DATA0 0x0078 0x037c 0x07f0 0x05 0x00
+#define IMX94_PAD_GPIO_IO26__FLEXPWM4_PWMX2 0x0078 0x037c 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO26__XBAR1_XBAR_INOUT38 0x0078 0x037c 0x08d0 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO27__GPIO2_IO27 0x007c 0x0380 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO27__LPI2C5_SDA 0x007c 0x0380 0x0740 0x01 0x01
+#define IMX94_PAD_GPIO_IO27__LPUART12_RX 0x007c 0x0380 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO27__GPT_MUX_INOUT5 0x007c 0x0380 0x070c 0x03 0x01
+#define IMX94_PAD_GPIO_IO27__FLEXIO1_3_1_FLEXIO1 0x007c 0x0380 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO27__SAI2_TX_DATA0 0x007c 0x0380 0x0000 0x05 0x00
+#define IMX94_PAD_GPIO_IO27__FLEXPWM4_PWMX3 0x007c 0x0380 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO27__SINC4_MOD_CLK0 0x007c 0x0380 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO28__GPIO2_IO28 0x0080 0x0384 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO28__LPI2C6_SCL 0x0080 0x0384 0x0748 0x01 0x02
+#define IMX94_PAD_GPIO_IO28__LPUART12_CTS_B 0x0080 0x0384 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO28__GPT_MUX_INOUT7 0x0080 0x0384 0x0714 0x03 0x01
+#define IMX94_PAD_GPIO_IO28__FLEXIO1_3_1_FLEXIO2 0x0080 0x0384 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO28__SAI2_TX_SYNC 0x0080 0x0384 0x07fc 0x05 0x00
+#define IMX94_PAD_GPIO_IO28__FLEXPWM1_PWMX2 0x0080 0x0384 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO28__XBAR1_XBAR_INOUT4 0x0080 0x0384 0x086c 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO29__GPIO2_IO29 0x0084 0x0388 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO29__LPI2C6_SDA 0x0084 0x0388 0x074c 0x01 0x02
+#define IMX94_PAD_GPIO_IO29__LPUART12_RTS_B 0x0084 0x0388 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO29__I3C2_SDA 0x0084 0x0388 0x0724 0x03 0x01
+#define IMX94_PAD_GPIO_IO29__FLEXIO1_3_1_FLEXIO3 0x0084 0x0388 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO29__FLEXPWM3_PWMX0 0x0084 0x0388 0x06c8 0x05 0x00
+#define IMX94_PAD_GPIO_IO29__FLEXPWM1_PWMX3 0x0084 0x0388 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO29__XBAR1_XBAR_INOUT5 0x0084 0x0388 0x0870 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO30__GPIO2_IO30 0x0088 0x038c 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO30__LPIT2_TRIGGER0 0x0088 0x038c 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO30__LPUART3_TX 0x0088 0x038c 0x07a8 0x02 0x00
+#define IMX94_PAD_GPIO_IO30__I3C2_PUR 0x0088 0x038c 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO30__FLEXIO1_3_1_FLEXIO4 0x0088 0x038c 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO30__I3C2_PUR_B 0x0088 0x038c 0x0000 0x05 0x00
+#define IMX94_PAD_GPIO_IO30__FLEXPWM2_PWMX2 0x0088 0x038c 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO30__XBAR1_XBAR_INOUT6 0x0088 0x038c 0x0874 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO31__GPIO2_IO31 0x008c 0x0390 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO31__LPIT2_TRIGGER1 0x008c 0x0390 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO31__LPUART3_RX 0x008c 0x0390 0x07a4 0x02 0x00
+#define IMX94_PAD_GPIO_IO31__I3C2_SCL 0x008c 0x0390 0x0720 0x03 0x01
+#define IMX94_PAD_GPIO_IO31__FLEXIO1_3_1_FLEXIO5 0x008c 0x0390 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO31__FLEXPWM3_PWMX1 0x008c 0x0390 0x06cc 0x05 0x00
+#define IMX94_PAD_GPIO_IO31__FLEXPWM2_PWMX3 0x008c 0x0390 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO31__XBAR1_XBAR_INOUT7 0x008c 0x0390 0x0878 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO32__GPIO3_IO0 0x0090 0x0394 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO32__LPIT3_TRIGGER0 0x0090 0x0394 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO32__LPUART7_TX 0x0090 0x0394 0x07d4 0x02 0x01
+#define IMX94_PAD_GPIO_IO32__GPT_MUX_INOUT8 0x0090 0x0394 0x0718 0x03 0x01
+#define IMX94_PAD_GPIO_IO32__FLEXIO1_3_1_FLEXIO6 0x0090 0x0394 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO32__FLEXPWM3_PWMA0 0x0090 0x0394 0x06a8 0x05 0x00
+#define IMX94_PAD_GPIO_IO32__SINC_FILTER_GLUE2_BREAK 0x0090 0x0394 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO32__XBAR1_XBAR_INOUT8 0x0090 0x0394 0x087c 0x07 0x01
+
+#define IMX94_PAD_GPIO_IO33__GPIO3_IO1 0x0094 0x0398 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO33__LPIT3_TRIGGER1 0x0094 0x0398 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO33__LPUART7_RX 0x0094 0x0398 0x07d0 0x02 0x01
+#define IMX94_PAD_GPIO_IO33__GPT_MUX_INOUT1 0x0094 0x0398 0x0700 0x03 0x01
+#define IMX94_PAD_GPIO_IO33__FLEXIO1_3_1_FLEXIO7 0x0094 0x0398 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO33__FLEXPWM3_PWMB0 0x0094 0x0398 0x06b8 0x05 0x00
+#define IMX94_PAD_GPIO_IO33__SINC_FILTER_GLUE1_BREAK 0x0094 0x0398 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO33__XBAR1_XBAR_INOUT9 0x0094 0x0398 0x0880 0x07 0x01
+
+#define IMX94_PAD_GPIO_IO34__GPIO3_IO2 0x0098 0x039c 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO34__LPI2C7_SDA 0x0098 0x039c 0x0754 0x01 0x02
+#define IMX94_PAD_GPIO_IO34__CAN2_TX 0x0098 0x039c 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO34__ECAT_SDA 0x0098 0x039c 0x062c 0x03 0x01
+#define IMX94_PAD_GPIO_IO34__FLEXIO1_3_1_FLEXIO8 0x0098 0x039c 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO34__FLEXPWM3_PWMA1 0x0098 0x039c 0x06ac 0x05 0x00
+#define IMX94_PAD_GPIO_IO34__FLEXPWM1_PWMX0 0x0098 0x039c 0x0698 0x06 0x00
+#define IMX94_PAD_GPIO_IO34__XBAR1_XBAR_INOUT10 0x0098 0x039c 0x0884 0x07 0x01
+
+#define IMX94_PAD_GPIO_IO35__GPIO3_IO3 0x009c 0x03a0 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO35__LPI2C7_SCL 0x009c 0x03a0 0x0750 0x01 0x02
+#define IMX94_PAD_GPIO_IO35__CAN2_RX 0x009c 0x03a0 0x067c 0x02 0x01
+#define IMX94_PAD_GPIO_IO35__ECAT_SCL 0x009c 0x03a0 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO35__FLEXIO1_3_1_FLEXIO9 0x009c 0x03a0 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO35__FLEXPWM3_PWMB1 0x009c 0x03a0 0x06bc 0x05 0x00
+#define IMX94_PAD_GPIO_IO35__FLEXPWM1_PWMX1 0x009c 0x03a0 0x069c 0x06 0x00
+#define IMX94_PAD_GPIO_IO35__XBAR1_XBAR_INOUT11 0x009c 0x03a0 0x0888 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO36__USDHC2_WP 0x00a0 0x03a4 0x0854 0x03 0x01
+#define IMX94_PAD_GPIO_IO36__FLEXIO1_3_1_FLEXIO10 0x00a0 0x03a4 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO36__FLEXPWM3_PWMA2 0x00a0 0x03a4 0x06b0 0x05 0x00
+#define IMX94_PAD_GPIO_IO36__FLEXPWM2_PWMX0 0x00a0 0x03a4 0x06a0 0x06 0x00
+#define IMX94_PAD_GPIO_IO36__XBAR1_XBAR_INOUT12 0x00a0 0x03a4 0x088c 0x07 0x00
+#define IMX94_PAD_GPIO_IO36__GPIO3_IO4 0x00a0 0x03a4 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO36__LPI2C8_SDA 0x00a0 0x03a4 0x075c 0x01 0x03
+#define IMX94_PAD_GPIO_IO36__CAN4_TX 0x00a0 0x03a4 0x0000 0x02 0x00
+
+#define IMX94_PAD_GPIO_IO37__GPIO3_IO5 0x00a4 0x03a8 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO37__LPI2C8_SCL 0x00a4 0x03a8 0x0758 0x01 0x03
+#define IMX94_PAD_GPIO_IO37__CAN4_RX 0x00a4 0x03a8 0x0684 0x02 0x00
+#define IMX94_PAD_GPIO_IO37__LPI2C5_HREQ 0x00a4 0x03a8 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO37__FLEXIO1_3_1_FLEXIO11 0x00a4 0x03a8 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO37__FLEXPWM3_PWMB2 0x00a4 0x03a8 0x06c0 0x05 0x00
+#define IMX94_PAD_GPIO_IO37__FLEXPWM2_PWMX1 0x00a4 0x03a8 0x06a4 0x06 0x00
+#define IMX94_PAD_GPIO_IO37__XBAR1_XBAR_INOUT13 0x00a4 0x03a8 0x0890 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO38__GPIO3_IO6 0x00a8 0x03ac 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO38__NETC_1588MUX_INOUT0 0x00a8 0x03ac 0x064c 0x01 0x00
+#define IMX94_PAD_GPIO_IO38__LPI2C3_SDA 0x00a8 0x03ac 0x0730 0x02 0x01
+#define IMX94_PAD_GPIO_IO38__LPIT3_TRIGGER2 0x00a8 0x03ac 0x0764 0x03 0x00
+#define IMX94_PAD_GPIO_IO38__FLEXIO1_3_1_FLEXIO12 0x00a8 0x03ac 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO38__LPUART3_CTS_B 0x00a8 0x03ac 0x07a0 0x05 0x00
+#define IMX94_PAD_GPIO_IO38__FLEXPWM3_PWMX0 0x00a8 0x03ac 0x06c8 0x06 0x01
+#define IMX94_PAD_GPIO_IO38__XBAR1_XBAR_INOUT14 0x00a8 0x03ac 0x0894 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO39__GPIO3_IO7 0x00ac 0x03b0 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO39__NETC_1588MUX_INOUT1 0x00ac 0x03b0 0x0650 0x01 0x00
+#define IMX94_PAD_GPIO_IO39__LPI2C3_SCL 0x00ac 0x03b0 0x072c 0x02 0x01
+#define IMX94_PAD_GPIO_IO39__LPIT2_TRIGGER2 0x00ac 0x03b0 0x0760 0x03 0x00
+#define IMX94_PAD_GPIO_IO39__FLEXIO1_3_1_FLEXIO13 0x00ac 0x03b0 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO39__LPUART3_RTS_B 0x00ac 0x03b0 0x0000 0x05 0x00
+#define IMX94_PAD_GPIO_IO39__FLEXPWM3_PWMX1 0x00ac 0x03b0 0x06cc 0x06 0x01
+#define IMX94_PAD_GPIO_IO39__XBAR1_XBAR_INOUT15 0x00ac 0x03b0 0x0898 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO40__GPIO3_IO8 0x00b0 0x03b4 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO40__NETC_1588MUX_INOUT2 0x00b0 0x03b4 0x0654 0x01 0x00
+#define IMX94_PAD_GPIO_IO40__LPI2C7_SDA 0x00b0 0x03b4 0x0754 0x02 0x03
+#define IMX94_PAD_GPIO_IO40__LPUART4_TX 0x00b0 0x03b4 0x07b4 0x03 0x01
+#define IMX94_PAD_GPIO_IO40__FLEXIO1_3_1_FLEXIO14 0x00b0 0x03b4 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO40__FLEXPWM3_PWMX2 0x00b0 0x03b4 0x06d0 0x05 0x00
+#define IMX94_PAD_GPIO_IO40__FLEXPWM4_PWMX0 0x00b0 0x03b4 0x06f4 0x06 0x01
+#define IMX94_PAD_GPIO_IO40__XBAR1_XBAR_INOUT16 0x00b0 0x03b4 0x089c 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO41__GPIO3_IO9 0x00b4 0x03b8 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO41__NETC_1588MUX_INOUT3 0x00b4 0x03b8 0x0658 0x01 0x00
+#define IMX94_PAD_GPIO_IO41__LPI2C7_SCL 0x00b4 0x03b8 0x0750 0x02 0x03
+#define IMX94_PAD_GPIO_IO41__LPUART4_RX 0x00b4 0x03b8 0x07b0 0x03 0x01
+#define IMX94_PAD_GPIO_IO41__FLEXIO1_3_1_FLEXIO15 0x00b4 0x03b8 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO41__LPI2C6_HREQ 0x00b4 0x03b8 0x0744 0x05 0x01
+#define IMX94_PAD_GPIO_IO41__FLEXPWM4_PWMX1 0x00b4 0x03b8 0x06f8 0x06 0x01
+#define IMX94_PAD_GPIO_IO41__XBAR1_XBAR_INOUT17 0x00b4 0x03b8 0x08a0 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO42__GPIO3_IO10 0x00b8 0x03bc 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO42__SAI3_TX_BCLK 0x00b8 0x03bc 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO42__PDM_BIT_STREAM2 0x00b8 0x03bc 0x0618 0x02 0x01
+#define IMX94_PAD_GPIO_IO42__XBAR1_XBAR_INOUT11 0x00b8 0x03bc 0x0888 0x03 0x01
+#define IMX94_PAD_GPIO_IO42__LPUART3_TX 0x00b8 0x03bc 0x07a8 0x04 0x01
+#define IMX94_PAD_GPIO_IO42__LPSPI4_PCS2 0x00b8 0x03bc 0x076c 0x05 0x01
+#define IMX94_PAD_GPIO_IO42__LPUART4_CTS_B 0x00b8 0x03bc 0x07ac 0x06 0x00
+#define IMX94_PAD_GPIO_IO42__SINC4_EMCLK1 0x00b8 0x03bc 0x0830 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO43__GPIO3_IO11 0x00bc 0x03c0 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO43__SAI3_MCLK 0x00bc 0x03c0 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO43__XBAR1_XBAR_INOUT12 0x00bc 0x03c0 0x088c 0x03 0x01
+#define IMX94_PAD_GPIO_IO43__LPUART3_RX 0x00bc 0x03c0 0x07a4 0x04 0x01
+#define IMX94_PAD_GPIO_IO43__LPSPI3_PCS1 0x00bc 0x03c0 0x0768 0x05 0x02
+#define IMX94_PAD_GPIO_IO43__LPUART4_RTS_B 0x00bc 0x03c0 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO43__SINC4_EMBIT1 0x00bc 0x03c0 0x0824 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO44__GPIO3_IO12 0x00c0 0x03c4 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO44__SAI3_RX_BCLK 0x00c0 0x03c4 0x0800 0x01 0x00
+#define IMX94_PAD_GPIO_IO44__PDM_BIT_STREAM1 0x00c0 0x03c4 0x0614 0x02 0x01
+#define IMX94_PAD_GPIO_IO44__LPUART9_TX 0x00c0 0x03c4 0x07e4 0x03 0x00
+#define IMX94_PAD_GPIO_IO44__LPSPI5_PCS0 0x00c0 0x03c4 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO44__LPI2C3_SDA 0x00c0 0x03c4 0x0730 0x05 0x02
+#define IMX94_PAD_GPIO_IO44__TPM5_CH2 0x00c0 0x03c4 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO44__SINC_FILTER_GLUE4_BREAK 0x00c0 0x03c4 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO45__GPIO3_IO13 0x00c4 0x03c8 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO45__SAI3_RX_SYNC 0x00c4 0x03c8 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO45__PDM_BIT_STREAM3 0x00c4 0x03c8 0x061c 0x02 0x01
+#define IMX94_PAD_GPIO_IO45__LPUART9_RX 0x00c4 0x03c8 0x07e0 0x03 0x00
+#define IMX94_PAD_GPIO_IO45__LPSPI5_SIN 0x00c4 0x03c8 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO45__LPI2C3_SCL 0x00c4 0x03c8 0x072c 0x05 0x02
+#define IMX94_PAD_GPIO_IO45__TPM6_CH2 0x00c4 0x03c8 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO45__SAI3_TX_DATA0 0x00c4 0x03c8 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO46__GPIO3_IO14 0x00c8 0x03cc 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO46__SAI3_RX_DATA0 0x00c8 0x03cc 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO46__PDM_BIT_STREAM0 0x00c8 0x03cc 0x0610 0x02 0x01
+#define IMX94_PAD_GPIO_IO46__LPUART9_CTS_B 0x00c8 0x03cc 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO46__LPSPI5_SOUT 0x00c8 0x03cc 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO46__LPI2C4_SDA 0x00c8 0x03cc 0x0738 0x05 0x01
+#define IMX94_PAD_GPIO_IO46__TPM3_CH1 0x00c8 0x03cc 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO46__EWM_OUT_B 0x00c8 0x03cc 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO47__GPIO3_IO15 0x00cc 0x03d0 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO47__SAI3_TX_DATA0 0x00cc 0x03d0 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO47__PDM_CLK 0x00cc 0x03d0 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO47__LPUART9_RTS_B 0x00cc 0x03d0 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO47__LPSPI5_SCK 0x00cc 0x03d0 0x0000 0x04 0x00
+#define IMX94_PAD_GPIO_IO47__LPI2C4_SCL 0x00cc 0x03d0 0x0734 0x05 0x01
+#define IMX94_PAD_GPIO_IO47__TPM4_CH1 0x00cc 0x03d0 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO47__SAI3_RX_BCLK 0x00cc 0x03d0 0x0800 0x07 0x01
+
+#define IMX94_PAD_GPIO_IO48__GPIO3_IO16 0x00d0 0x03d4 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO48__USDHC3_CLK 0x00d0 0x03d4 0x0000 0x01 0x00
+#define IMX94_PAD_GPIO_IO48__CAN5_TX 0x00d0 0x03d4 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO48__LPUART10_TX 0x00d0 0x03d4 0x0794 0x03 0x01
+#define IMX94_PAD_GPIO_IO48__TPM5_CH1 0x00d0 0x03d4 0x0840 0x04 0x00
+#define IMX94_PAD_GPIO_IO48__TPM6_EXTCLK 0x00d0 0x03d4 0x0850 0x05 0x00
+#define IMX94_PAD_GPIO_IO48__LPI2C5_SDA 0x00d0 0x03d4 0x0740 0x06 0x02
+#define IMX94_PAD_GPIO_IO48__SINC4_EMCLK2 0x00d0 0x03d4 0x0834 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO49__GPIO3_IO17 0x00d4 0x03d8 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO49__USDHC3_CMD 0x00d4 0x03d8 0x0858 0x01 0x00
+#define IMX94_PAD_GPIO_IO49__CAN5_RX 0x00d4 0x03d8 0x0688 0x02 0x01
+#define IMX94_PAD_GPIO_IO49__LPUART10_RX 0x00d4 0x03d8 0x0790 0x03 0x01
+#define IMX94_PAD_GPIO_IO49__TPM6_CH1 0x00d4 0x03d8 0x0848 0x04 0x00
+#define IMX94_PAD_GPIO_IO49__XBAR1_XBAR_INOUT13 0x00d4 0x03d8 0x0890 0x05 0x01
+#define IMX94_PAD_GPIO_IO49__LPI2C5_SCL 0x00d4 0x03d8 0x073c 0x06 0x02
+#define IMX94_PAD_GPIO_IO49__SINC4_EMBIT2 0x00d4 0x03d8 0x0828 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO50__GPIO3_IO18 0x00d8 0x03dc 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO50__USDHC3_DATA0 0x00d8 0x03dc 0x085c 0x01 0x00
+#define IMX94_PAD_GPIO_IO50__XBAR1_XBAR_INOUT14 0x00d8 0x03dc 0x0894 0x02 0x01
+#define IMX94_PAD_GPIO_IO50__LPUART10_CTS_B 0x00d8 0x03dc 0x078c 0x03 0x01
+#define IMX94_PAD_GPIO_IO50__TPM3_CH3 0x00d8 0x03dc 0x0838 0x04 0x00
+#define IMX94_PAD_GPIO_IO50__JTAG_MUX_TDO 0x00d8 0x03dc 0x0000 0x05 0x00
+#define IMX94_PAD_GPIO_IO50__LPSPI6_PCS1 0x00d8 0x03dc 0x0774 0x06 0x00
+#define IMX94_PAD_GPIO_IO50__SINC4_EMCLK3 0x00d8 0x03dc 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO51__GPIO3_IO19 0x00dc 0x03e0 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO51__USDHC3_DATA1 0x00dc 0x03e0 0x0860 0x01 0x00
+#define IMX94_PAD_GPIO_IO51__CAN2_TX 0x00dc 0x03e0 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO51__LPUART10_RTS_B 0x00dc 0x03e0 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO51__TPM4_CH3 0x00dc 0x03e0 0x083c 0x04 0x00
+#define IMX94_PAD_GPIO_IO51__JTAG_MUX_TCK 0x00dc 0x03e0 0x0928 0x05 0x01
+#define IMX94_PAD_GPIO_IO51__LPSPI7_PCS1 0x00dc 0x03e0 0x0778 0x06 0x00
+#define IMX94_PAD_GPIO_IO51__SINC4_EMBIT3 0x00dc 0x03e0 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO52__GPIO3_IO20 0x00e0 0x03e4 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO52__USDHC3_DATA2 0x00e0 0x03e4 0x0864 0x01 0x00
+#define IMX94_PAD_GPIO_IO52__PDM_BIT_STREAM1 0x00e0 0x03e4 0x0614 0x02 0x02
+#define IMX94_PAD_GPIO_IO52__LPSPI4_PCS2 0x00e0 0x03e4 0x076c 0x03 0x02
+#define IMX94_PAD_GPIO_IO52__TPM5_CH3 0x00e0 0x03e4 0x0844 0x04 0x00
+#define IMX94_PAD_GPIO_IO52__JTAG_MUX_TDI 0x00e0 0x03e4 0x092c 0x05 0x01
+#define IMX94_PAD_GPIO_IO52__LPSPI8_PCS1 0x00e0 0x03e4 0x077c 0x06 0x00
+#define IMX94_PAD_GPIO_IO52__SAI3_TX_SYNC 0x00e0 0x03e4 0x0804 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO53__GPIO3_IO21 0x00e4 0x03e8 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO53__USDHC3_DATA3 0x00e4 0x03e8 0x0868 0x01 0x00
+#define IMX94_PAD_GPIO_IO53__CAN2_RX 0x00e4 0x03e8 0x067c 0x02 0x02
+#define IMX94_PAD_GPIO_IO53__LPSPI3_PCS1 0x00e4 0x03e8 0x0768 0x03 0x03
+#define IMX94_PAD_GPIO_IO53__TPM6_CH3 0x00e4 0x03e8 0x084c 0x04 0x00
+#define IMX94_PAD_GPIO_IO53__JTAG_MUX_TMS 0x00e4 0x03e8 0x0930 0x05 0x01
+#define IMX94_PAD_GPIO_IO53__LPSPI5_PCS1 0x00e4 0x03e8 0x0770 0x06 0x00
+#define IMX94_PAD_GPIO_IO53__SINC4_MOD_CLK1 0x00e4 0x03e8 0x0000 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO54__GPIO3_IO22 0x00e8 0x03ec 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO54__NETC_1588MUX_INOUT4 0x00e8 0x03ec 0x065c 0x01 0x00
+#define IMX94_PAD_GPIO_IO54__CAN4_TX 0x00e8 0x03ec 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO54__LPIT3_TRIGGER2 0x00e8 0x03ec 0x0764 0x03 0x01
+#define IMX94_PAD_GPIO_IO54__LPSPI6_PCS1 0x00e8 0x03ec 0x0774 0x04 0x01
+#define IMX94_PAD_GPIO_IO54__TPM3_CH3 0x00e8 0x03ec 0x0838 0x05 0x01
+#define IMX94_PAD_GPIO_IO54__SINC3_EMCLK0 0x00e8 0x03ec 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO54__XBAR1_XBAR_INOUT18 0x00e8 0x03ec 0x08a4 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO55__GPIO3_IO23 0x00ec 0x03f0 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO55__NETC_1588MUX_INOUT5 0x00ec 0x03f0 0x0660 0x01 0x00
+#define IMX94_PAD_GPIO_IO55__CAN4_RX 0x00ec 0x03f0 0x0684 0x02 0x01
+#define IMX94_PAD_GPIO_IO55__LPIT2_TRIGGER2 0x00ec 0x03f0 0x0760 0x03 0x01
+#define IMX94_PAD_GPIO_IO55__LPSPI7_PCS1 0x00ec 0x03f0 0x0778 0x04 0x01
+#define IMX94_PAD_GPIO_IO55__TPM4_CH3 0x00ec 0x03f0 0x083c 0x05 0x01
+#define IMX94_PAD_GPIO_IO55__SINC3_EMBIT0 0x00ec 0x03f0 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO55__XBAR1_XBAR_INOUT19 0x00ec 0x03f0 0x08a8 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO56__GPIO3_IO24 0x00f0 0x03f4 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO56__NETC_1588MUX_INOUT6 0x00f0 0x03f4 0x0664 0x01 0x00
+#define IMX94_PAD_GPIO_IO56__CAN5_TX 0x00f0 0x03f4 0x0000 0x02 0x00
+#define IMX94_PAD_GPIO_IO56__LPIT3_TRIGGER3 0x00f0 0x03f4 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO56__LPSPI8_PCS1 0x00f0 0x03f4 0x077c 0x04 0x01
+#define IMX94_PAD_GPIO_IO56__SAI3_TX_SYNC 0x00f0 0x03f4 0x0804 0x05 0x01
+#define IMX94_PAD_GPIO_IO56__SINC3_EMCLK1 0x00f0 0x03f4 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO56__XBAR1_XBAR_INOUT20 0x00f0 0x03f4 0x08ac 0x07 0x00
+
+#define IMX94_PAD_GPIO_IO57__GPIO3_IO25 0x00f4 0x03f8 0x0000 0x00 0x00
+#define IMX94_PAD_GPIO_IO57__NETC_1588MUX_INOUT7 0x00f4 0x03f8 0x0668 0x01 0x00
+#define IMX94_PAD_GPIO_IO57__CAN5_RX 0x00f4 0x03f8 0x0688 0x02 0x02
+#define IMX94_PAD_GPIO_IO57__LPIT2_TRIGGER3 0x00f4 0x03f8 0x0000 0x03 0x00
+#define IMX94_PAD_GPIO_IO57__LPSPI5_PCS1 0x00f4 0x03f8 0x0770 0x04 0x01
+#define IMX94_PAD_GPIO_IO57__TPM6_CH3 0x00f4 0x03f8 0x084c 0x05 0x01
+#define IMX94_PAD_GPIO_IO57__SINC3_EMBIT1 0x00f4 0x03f8 0x0000 0x06 0x00
+#define IMX94_PAD_GPIO_IO57__ENET_REF_CLK_ROOT 0x00f4 0x03f8 0x0000 0x07 0x00
+
+#define IMX94_PAD_CCM_CLKO1__CLKO_1 0x00f8 0x03fc 0x0000 0x00 0x00
+#define IMX94_PAD_CCM_CLKO1__NETC_1588MUX_INOUT8 0x00f8 0x03fc 0x066c 0x01 0x00
+#define IMX94_PAD_CCM_CLKO1__LPUART9_TX 0x00f8 0x03fc 0x07e4 0x02 0x01
+#define IMX94_PAD_CCM_CLKO1__ECAT_LED_RUN 0x00f8 0x03fc 0x0000 0x03 0x00
+#define IMX94_PAD_CCM_CLKO1__TPM6_EXTCLK 0x00f8 0x03fc 0x0850 0x04 0x01
+#define IMX94_PAD_CCM_CLKO1__GPIO4_IO0 0x00f8 0x03fc 0x0000 0x05 0x00
+#define IMX94_PAD_CCM_CLKO1__SINC3_EMCLK2 0x00f8 0x03fc 0x0000 0x06 0x00
+#define IMX94_PAD_CCM_CLKO1__XBAR1_XBAR_INOUT22 0x00f8 0x03fc 0x0000 0x07 0x00
+
+#define IMX94_PAD_CCM_CLKO2__CLKO_2 0x00fc 0x0400 0x0000 0x00 0x00
+#define IMX94_PAD_CCM_CLKO2__NETC_1588MUX_INOUT9 0x00fc 0x0400 0x0670 0x01 0x00
+#define IMX94_PAD_CCM_CLKO2__LPUART9_RX 0x00fc 0x0400 0x07e0 0x02 0x01
+#define IMX94_PAD_CCM_CLKO2__ECAT_LED_ERR 0x00fc 0x0400 0x0000 0x03 0x00
+#define IMX94_PAD_CCM_CLKO2__TPM5_CH1 0x00fc 0x0400 0x0840 0x04 0x01
+#define IMX94_PAD_CCM_CLKO2__GPIO4_IO1 0x00fc 0x0400 0x0000 0x05 0x00
+#define IMX94_PAD_CCM_CLKO2__SINC3_EMBIT2 0x00fc 0x0400 0x0000 0x06 0x00
+#define IMX94_PAD_CCM_CLKO2__XBAR1_XBAR_INOUT23 0x00fc 0x0400 0x0000 0x07 0x00
+
+#define IMX94_PAD_CCM_CLKO3__CLKO_3 0x0100 0x0404 0x0000 0x00 0x00
+#define IMX94_PAD_CCM_CLKO3__NETC_1588MUX_INOUT10 0x0100 0x0404 0x0674 0x01 0x00
+#define IMX94_PAD_CCM_CLKO3__CAN3_TX 0x0100 0x0404 0x0000 0x02 0x00
+#define IMX94_PAD_CCM_CLKO3__ECAT_LED_STATE_RUN 0x0100 0x0404 0x0000 0x03 0x00
+#define IMX94_PAD_CCM_CLKO3__TPM6_CH1 0x0100 0x0404 0x0848 0x04 0x01
+#define IMX94_PAD_CCM_CLKO3__GPIO4_IO2 0x0100 0x0404 0x0000 0x05 0x00
+#define IMX94_PAD_CCM_CLKO3__SINC3_EMCLK3 0x0100 0x0404 0x0000 0x06 0x00
+#define IMX94_PAD_CCM_CLKO3__ENET_REF_CLK_ROOT 0x0100 0x0404 0x0000 0x07 0x00
+
+#define IMX94_PAD_CCM_CLKO4__CLKO_4 0x0104 0x0408 0x0000 0x00 0x00
+#define IMX94_PAD_CCM_CLKO4__NETC_1588MUX_INOUT11 0x0104 0x0408 0x0000 0x01 0x00
+#define IMX94_PAD_CCM_CLKO4__CAN3_RX 0x0104 0x0408 0x0680 0x02 0x01
+#define IMX94_PAD_CCM_CLKO4__ECAT_RESET_OUT 0x0104 0x0408 0x0000 0x03 0x00
+#define IMX94_PAD_CCM_CLKO4__TPM5_CH3 0x0104 0x0408 0x0844 0x04 0x01
+#define IMX94_PAD_CCM_CLKO4__GPIO4_IO3 0x0104 0x0408 0x0000 0x05 0x00
+#define IMX94_PAD_CCM_CLKO4__SINC3_EMBIT3 0x0104 0x0408 0x0000 0x06 0x00
+#define IMX94_PAD_CCM_CLKO4__XBAR1_XBAR_INOUT25 0x0104 0x0408 0x0000 0x07 0x00
+
+#define IMX94_PAD_ETH2_MDC_GPIO1__NETC_EMDC 0x0108 0x040c 0x0000 0x00 0x00
+#define IMX94_PAD_ETH2_MDC_GPIO1__NETC_ETH2_SLV_MDC 0x0108 0x040c 0x0000 0x01 0x00
+#define IMX94_PAD_ETH2_MDC_GPIO1__I3C2_SCL 0x0108 0x040c 0x0720 0x02 0x02
+#define IMX94_PAD_ETH2_MDC_GPIO1__USB1_OTG_ID 0x0108 0x040c 0x0000 0x03 0x00
+#define IMX94_PAD_ETH2_MDC_GPIO1__FLEXIO2_FLEXIO0 0x0108 0x040c 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_MDC_GPIO1__GPIO6_IO0 0x0108 0x040c 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_MDC_GPIO1__FLEXPWM2_PWMX0 0x0108 0x040c 0x06a0 0x06 0x01
+#define IMX94_PAD_ETH2_MDC_GPIO1__XBAR1_XBAR_INOUT30 0x0108 0x040c 0x08b0 0x07 0x01
+
+#define IMX94_PAD_ETH2_MDIO_GPIO2__NETC_EMDIO 0x010c 0x0410 0x0678 0x00 0x00
+#define IMX94_PAD_ETH2_MDIO_GPIO2__NETC_ETH2_SLV_MDIO 0x010c 0x0410 0x0000 0x01 0x00
+#define IMX94_PAD_ETH2_MDIO_GPIO2__I3C2_SDA 0x010c 0x0410 0x0724 0x02 0x02
+#define IMX94_PAD_ETH2_MDIO_GPIO2__USB1_OTG_PWR 0x010c 0x0410 0x0000 0x03 0x00
+#define IMX94_PAD_ETH2_MDIO_GPIO2__FLEXIO2_FLEXIO1 0x010c 0x0410 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_MDIO_GPIO2__GPIO6_IO1 0x010c 0x0410 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_MDIO_GPIO2__FLEXPWM2_PWMX1 0x010c 0x0410 0x06a4 0x06 0x01
+#define IMX94_PAD_ETH2_MDIO_GPIO2__XBAR1_XBAR_INOUT31 0x010c 0x0410 0x08b4 0x07 0x01
+
+#define IMX94_PAD_ETH2_TXD3__NETC_PINMUX_ETH2_TXD3 0x0110 0x0414 0x0000 0x00 0x00
+#define IMX94_PAD_ETH2_TXD3__LPUART3_DCD_B 0x0110 0x0414 0x0000 0x01 0x00
+#define IMX94_PAD_ETH2_TXD3__CAN2_TX 0x0110 0x0414 0x0000 0x02 0x00
+#define IMX94_PAD_ETH2_TXD3__USB2_OTG_ID 0x0110 0x0414 0x0000 0x03 0x00
+#define IMX94_PAD_ETH2_TXD3__FLEXIO2_FLEXIO2 0x0110 0x0414 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_TXD3__GPIO6_IO2 0x0110 0x0414 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_TXD3__FLEXPWM2_PWMA0 0x0110 0x0414 0x0000 0x06 0x00
+#define IMX94_PAD_ETH2_TXD3__XBAR1_XBAR_INOUT32 0x0110 0x0414 0x08b8 0x07 0x01
+
+#define IMX94_PAD_ETH2_TXD2__NETC_PINMUX_ETH2_TXD2 0x0114 0x0418 0x0000 0x00 0x00
+#define IMX94_PAD_ETH2_TXD2__ETH2_RMII_REF50_CLK 0x0114 0x0418 0x0000 0x01 0x00
+#define IMX94_PAD_ETH2_TXD2__CAN2_RX 0x0114 0x0418 0x067c 0x02 0x03
+#define IMX94_PAD_ETH2_TXD2__USB2_OTG_OC 0x0114 0x0418 0x0000 0x03 0x00
+#define IMX94_PAD_ETH2_TXD2__FLEXIO2_FLEXIO3 0x0114 0x0418 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_TXD2__GPIO6_IO3 0x0114 0x0418 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_TXD2__FLEXPWM2_PWMB0 0x0114 0x0418 0x0000 0x06 0x00
+#define IMX94_PAD_ETH2_TXD2__XBAR1_XBAR_INOUT33 0x0114 0x0418 0x08bc 0x07 0x01
+
+#define IMX94_PAD_ETH2_TXD1__NETC_PINMUX_ETH2_TXD1 0x0118 0x041c 0x0000 0x00 0x00
+#define IMX94_PAD_ETH2_TXD1__LPUART3_RTS_B 0x0118 0x041c 0x0000 0x01 0x00
+#define IMX94_PAD_ETH2_TXD1__ECAT_CLK25 0x0118 0x041c 0x0000 0x02 0x00
+#define IMX94_PAD_ETH2_TXD1__USB1_OTG_OC 0x0118 0x041c 0x0000 0x03 0x00
+#define IMX94_PAD_ETH2_TXD1__FLEXIO2_FLEXIO4 0x0118 0x041c 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_TXD1__GPIO6_IO4 0x0118 0x041c 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_TXD1__FLEXPWM2_PWMA1 0x0118 0x041c 0x0000 0x06 0x00
+#define IMX94_PAD_ETH2_TXD1__XBAR1_XBAR_INOUT34 0x0118 0x041c 0x08c0 0x07 0x01
+
+#define IMX94_PAD_ETH2_TXD0__NETC_PINMUX_ETH2_TXD0 0x011c 0x0420 0x0000 0x00 0x00
+#define IMX94_PAD_ETH2_TXD0__LPUART3_TX 0x011c 0x0420 0x07a8 0x01 0x02
+#define IMX94_PAD_ETH2_TXD0__I3C2_PUR 0x011c 0x0420 0x0000 0x02 0x00
+#define IMX94_PAD_ETH2_TXD0__I3C2_PUR_B 0x011c 0x0420 0x0000 0x03 0x00
+#define IMX94_PAD_ETH2_TXD0__FLEXIO2_FLEXIO5 0x011c 0x0420 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_TXD0__GPIO6_IO5 0x011c 0x0420 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_TXD0__FLEXPWM2_PWMB1 0x011c 0x0420 0x0000 0x06 0x00
+#define IMX94_PAD_ETH2_TXD0__XBAR1_XBAR_INOUT35 0x011c 0x0420 0x08c4 0x07 0x01
+
+#define IMX94_PAD_ETH2_TX_CTL__NETC_PINMUX_ETH2_TX_CTL 0x0120 0x0424 0x0000 0x00 0x00
+#define IMX94_PAD_ETH2_TX_CTL__LPUART3_DTR_B 0x0120 0x0424 0x0000 0x01 0x00
+#define IMX94_PAD_ETH2_TX_CTL__ECAT_LED_RUN 0x0120 0x0424 0x0000 0x02 0x00
+#define IMX94_PAD_ETH2_TX_CTL__FLEXIO2_FLEXIO6 0x0120 0x0424 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_TX_CTL__GPIO6_IO6 0x0120 0x0424 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_TX_CTL__FLEXPWM2_PWMA2 0x0120 0x0424 0x0000 0x06 0x00
+#define IMX94_PAD_ETH2_TX_CTL__XBAR1_XBAR_INOUT36 0x0120 0x0424 0x08c8 0x07 0x01
+
+#define IMX94_PAD_ETH2_TX_CLK__NETC_PINMUX_ETH2_TX_CLK 0x0124 0x0428 0x0000 0x00 0x00
+#define IMX94_PAD_ETH2_TX_CLK__ECAT_LED_ERR 0x0124 0x0428 0x0000 0x02 0x00
+#define IMX94_PAD_ETH2_TX_CLK__FLEXIO2_FLEXIO7 0x0124 0x0428 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_TX_CLK__GPIO6_IO7 0x0124 0x0428 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_TX_CLK__FLEXPWM2_PWMB2 0x0124 0x0428 0x0000 0x06 0x00
+#define IMX94_PAD_ETH2_TX_CLK__XBAR1_XBAR_INOUT37 0x0124 0x0428 0x08cc 0x07 0x01
+
+#define IMX94_PAD_ETH2_RX_CTL__NETC_PINMUX_ETH2_RX_CTL 0x0128 0x042c 0x0000 0x00 0x00
+#define IMX94_PAD_ETH2_RX_CTL__LPUART3_DSR_B 0x0128 0x042c 0x0000 0x01 0x00
+#define IMX94_PAD_ETH2_RX_CTL__ECAT_LED_STATE_RUN 0x0128 0x042c 0x0000 0x02 0x00
+#define IMX94_PAD_ETH2_RX_CTL__USB2_OTG_PWR 0x0128 0x042c 0x0000 0x03 0x00
+#define IMX94_PAD_ETH2_RX_CTL__FLEXIO2_FLEXIO8 0x0128 0x042c 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_RX_CTL__GPIO6_IO8 0x0128 0x042c 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_RX_CTL__FLEXPWM2_PWMA3 0x0128 0x042c 0x0000 0x06 0x00
+#define IMX94_PAD_ETH2_RX_CTL__SINC4_EMCLK0 0x0128 0x042c 0x082c 0x07 0x01
+
+#define IMX94_PAD_ETH2_RX_CLK__NETC_PINMUX_ETH2_RX_CLK 0x012c 0x0430 0x0000 0x00 0x00
+#define IMX94_PAD_ETH2_RX_CLK__LPUART3_RIN_B 0x012c 0x0430 0x0000 0x01 0x00
+#define IMX94_PAD_ETH2_RX_CLK__ECAT_RESET_OUT 0x012c 0x0430 0x0000 0x02 0x00
+#define IMX94_PAD_ETH2_RX_CLK__XBAR1_XBAR_INOUT38 0x012c 0x0430 0x08d0 0x03 0x01
+#define IMX94_PAD_ETH2_RX_CLK__FLEXIO2_FLEXIO9 0x012c 0x0430 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_RX_CLK__GPIO6_IO9 0x012c 0x0430 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_RX_CLK__FLEXPWM2_PWMB3 0x012c 0x0430 0x0000 0x06 0x00
+#define IMX94_PAD_ETH2_RX_CLK__SINC4_EMBIT0 0x012c 0x0430 0x0820 0x07 0x01
+
+#define IMX94_PAD_ETH2_RXD0__NETC_PINMUX_ETH2_RXD0 0x0130 0x0434 0x0000 0x00 0x00
+#define IMX94_PAD_ETH2_RXD0__LPUART3_RX 0x0130 0x0434 0x07a4 0x01 0x02
+#define IMX94_PAD_ETH2_RXD0__FLEXIO2_FLEXIO10 0x0130 0x0434 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_RXD0__GPIO6_IO10 0x0130 0x0434 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_RXD0__DIG_ENCODER2_DATA_EN 0x0130 0x0434 0x0000 0x06 0x00
+#define IMX94_PAD_ETH2_RXD0__XBAR1_XBAR_INOUT39 0x0130 0x0434 0x08d4 0x07 0x01
+
+#define IMX94_PAD_ETH2_RXD1__NETC_PINMUX_ETH2_RXD1 0x0134 0x0438 0x0000 0x00 0x00
+#define IMX94_PAD_ETH2_RXD1__LPUART3_CTS_B 0x0134 0x0438 0x07a0 0x01 0x01
+#define IMX94_PAD_ETH2_RXD1__LPTMR2_ALT0 0x0134 0x0438 0x0780 0x03 0x00
+#define IMX94_PAD_ETH2_RXD1__FLEXIO2_FLEXIO11 0x0134 0x0438 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_RXD1__GPIO6_IO11 0x0134 0x0438 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_RXD1__DIG_ENCODER2_DATA_CLK 0x0134 0x0438 0x068c 0x06 0x00
+#define IMX94_PAD_ETH2_RXD1__XBAR1_XBAR_INOUT40 0x0134 0x0438 0x08d8 0x07 0x01
+
+#define IMX94_PAD_ETH2_RXD2__NETC_PINMUX_ETH2_RXD2 0x0138 0x043c 0x0000 0x00 0x00
+#define IMX94_PAD_ETH2_RXD2__LPTMR2_ALT1 0x0138 0x043c 0x0784 0x03 0x00
+#define IMX94_PAD_ETH2_RXD2__FLEXIO2_FLEXIO12 0x0138 0x043c 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_RXD2__GPIO6_IO12 0x0138 0x043c 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_RXD2__DIG_ENCODER2_DATA_OUT 0x0138 0x043c 0x0000 0x06 0x00
+#define IMX94_PAD_ETH2_RXD2__XBAR1_XBAR_INOUT41 0x0138 0x043c 0x08dc 0x07 0x01
+
+#define IMX94_PAD_ETH2_RXD3__NETC_PINMUX_ETH2_RXD3 0x013c 0x0440 0x0000 0x00 0x00
+#define IMX94_PAD_ETH2_RXD3__LPTMR2_ALT2 0x013c 0x0440 0x0788 0x03 0x00
+#define IMX94_PAD_ETH2_RXD3__FLEXIO2_FLEXIO13 0x013c 0x0440 0x0000 0x04 0x00
+#define IMX94_PAD_ETH2_RXD3__GPIO6_IO13 0x013c 0x0440 0x0000 0x05 0x00
+#define IMX94_PAD_ETH2_RXD3__DIG_ENCODER2_DATA_IN 0x013c 0x0440 0x0690 0x06 0x00
+#define IMX94_PAD_ETH2_RXD3__XBAR1_XBAR_INOUT42 0x013c 0x0440 0x08e0 0x07 0x01
+
+#define IMX94_PAD_ETH3_MDC_GPIO1__NETC_EMDC 0x0140 0x0444 0x0000 0x00 0x00
+#define IMX94_PAD_ETH3_MDC_GPIO1__LPUART4_DCD_B 0x0140 0x0444 0x0000 0x01 0x00
+#define IMX94_PAD_ETH3_MDC_GPIO1__NETC_ETH3_SLV_MDC 0x0140 0x0444 0x0000 0x02 0x00
+#define IMX94_PAD_ETH3_MDC_GPIO1__SAI4_TX_SYNC 0x0140 0x0444 0x081c 0x03 0x00
+#define IMX94_PAD_ETH3_MDC_GPIO1__FLEXIO2_FLEXIO14 0x0140 0x0444 0x0000 0x04 0x00
+#define IMX94_PAD_ETH3_MDC_GPIO1__GPIO6_IO14 0x0140 0x0444 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_MDC_GPIO1__FLEXPWM1_PWMX0 0x0140 0x0444 0x0698 0x06 0x01
+#define IMX94_PAD_ETH3_MDC_GPIO1__SINC4_MOD_CLK0 0x0140 0x0444 0x0000 0x07 0x00
+
+#define IMX94_PAD_ETH3_MDIO_GPIO2__NETC_EMDIO 0x0144 0x0448 0x0678 0x00 0x01
+#define IMX94_PAD_ETH3_MDIO_GPIO2__LPUART4_RIN_B 0x0144 0x0448 0x0000 0x01 0x00
+#define IMX94_PAD_ETH3_MDIO_GPIO2__NETC_ETH3_SLV_MDIO 0x0144 0x0448 0x0000 0x02 0x00
+#define IMX94_PAD_ETH3_MDIO_GPIO2__SAI4_TX_BCLK 0x0144 0x0448 0x0818 0x03 0x00
+#define IMX94_PAD_ETH3_MDIO_GPIO2__FLEXIO2_FLEXIO15 0x0144 0x0448 0x0000 0x04 0x00
+#define IMX94_PAD_ETH3_MDIO_GPIO2__GPIO6_IO15 0x0144 0x0448 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_MDIO_GPIO2__FLEXPWM1_PWMX1 0x0144 0x0448 0x069c 0x06 0x01
+#define IMX94_PAD_ETH3_MDIO_GPIO2__SINC4_MOD_CLK1 0x0144 0x0448 0x0000 0x07 0x00
+
+#define IMX94_PAD_ETH3_TXD3__NETC_PINMUX_ETH3_TXD3 0x0148 0x044c 0x0000 0x00 0x00
+#define IMX94_PAD_ETH3_TXD3__XSPI_SLV_DATA7 0x0148 0x044c 0x0924 0x02 0x00
+#define IMX94_PAD_ETH3_TXD3__SAI4_TX_DATA0 0x0148 0x044c 0x0000 0x03 0x00
+#define IMX94_PAD_ETH3_TXD3__LPUART3_TX 0x0148 0x044c 0x07a8 0x04 0x03
+#define IMX94_PAD_ETH3_TXD3__GPIO6_IO16 0x0148 0x044c 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_TXD3__FLEXPWM1_PWMA0 0x0148 0x044c 0x0000 0x06 0x00
+
+#define IMX94_PAD_ETH3_TXD2__NETC_PINMUX_ETH3_TXD2 0x014c 0x0450 0x0000 0x00 0x00
+#define IMX94_PAD_ETH3_TXD2__ETH3_RMII_REF50_CLK 0x014c 0x0450 0x0000 0x01 0x00
+#define IMX94_PAD_ETH3_TXD2__XSPI_SLV_DATA6 0x014c 0x0450 0x0920 0x02 0x00
+#define IMX94_PAD_ETH3_TXD2__SAI4_RX_SYNC 0x014c 0x0450 0x0814 0x03 0x00
+#define IMX94_PAD_ETH3_TXD2__GPIO6_IO17 0x014c 0x0450 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_TXD2__FLEXPWM1_PWMB0 0x014c 0x0450 0x0000 0x06 0x00
+
+#define IMX94_PAD_ETH3_TXD1__NETC_PINMUX_ETH3_TXD1 0x0150 0x0454 0x0000 0x00 0x00
+#define IMX94_PAD_ETH3_TXD1__LPUART4_RTS_B 0x0150 0x0454 0x0000 0x01 0x00
+#define IMX94_PAD_ETH3_TXD1__XSPI_SLV_DATA5 0x0150 0x0454 0x091c 0x02 0x00
+#define IMX94_PAD_ETH3_TXD1__SAI4_RX_BCLK 0x0150 0x0454 0x080c 0x03 0x00
+#define IMX94_PAD_ETH3_TXD1__GPIO6_IO18 0x0150 0x0454 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_TXD1__FLEXPWM1_PWMA1 0x0150 0x0454 0x0000 0x06 0x00
+
+#define IMX94_PAD_ETH3_TXD0__NETC_PINMUX_ETH3_TXD0 0x0154 0x0458 0x0000 0x00 0x00
+#define IMX94_PAD_ETH3_TXD0__LPUART4_TX 0x0154 0x0458 0x07b4 0x01 0x02
+#define IMX94_PAD_ETH3_TXD0__XSPI_SLV_DATA4 0x0154 0x0458 0x0918 0x02 0x00
+#define IMX94_PAD_ETH3_TXD0__SAI4_RX_DATA0 0x0154 0x0458 0x0810 0x03 0x00
+#define IMX94_PAD_ETH3_TXD0__GPIO6_IO19 0x0154 0x0458 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_TXD0__FLEXPWM1_PWMB1 0x0154 0x0458 0x0000 0x06 0x00
+
+#define IMX94_PAD_ETH3_TX_CTL__NETC_PINMUX_ETH3_TX_CTL 0x0158 0x045c 0x0000 0x00 0x00
+#define IMX94_PAD_ETH3_TX_CTL__LPUART4_DTR_B 0x0158 0x045c 0x0000 0x01 0x00
+#define IMX94_PAD_ETH3_TX_CTL__XSPI_SLV_DQS 0x0158 0x045c 0x0900 0x02 0x00
+#define IMX94_PAD_ETH3_TX_CTL__SAI4_MCLK 0x0158 0x045c 0x0808 0x03 0x00
+#define IMX94_PAD_ETH3_TX_CTL__LPUART3_RX 0x0158 0x045c 0x07a4 0x04 0x03
+#define IMX94_PAD_ETH3_TX_CTL__GPIO6_IO20 0x0158 0x045c 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_TX_CTL__FLEXPWM1_PWMA2 0x0158 0x045c 0x0000 0x06 0x00
+
+#define IMX94_PAD_ETH3_TX_CLK__NETC_PINMUX_ETH3_TX_CLK 0x015c 0x0460 0x0000 0x00 0x00
+#define IMX94_PAD_ETH3_TX_CLK__XSPI_SLV_CLK 0x015c 0x0460 0x0904 0x02 0x00
+#define IMX94_PAD_ETH3_TX_CLK__SAI2_TX_SYNC 0x015c 0x0460 0x07fc 0x03 0x01
+#define IMX94_PAD_ETH3_TX_CLK__LPUART3_CTS_B 0x015c 0x0460 0x07a0 0x04 0x02
+#define IMX94_PAD_ETH3_TX_CLK__GPIO6_IO21 0x015c 0x0460 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_TX_CLK__FLEXPWM1_PWMB2 0x015c 0x0460 0x0000 0x06 0x00
+
+#define IMX94_PAD_ETH3_RX_CTL__NETC_PINMUX_ETH3_RX_CTL 0x0160 0x0464 0x0000 0x00 0x00
+#define IMX94_PAD_ETH3_RX_CTL__LPUART4_DSR_B 0x0160 0x0464 0x0000 0x01 0x00
+#define IMX94_PAD_ETH3_RX_CTL__XSPI_SLV_CS 0x0160 0x0464 0x08fc 0x02 0x00
+#define IMX94_PAD_ETH3_RX_CTL__SAI2_TX_BCLK 0x0160 0x0464 0x07f8 0x03 0x01
+#define IMX94_PAD_ETH3_RX_CTL__XBAR1_XBAR_INOUT43 0x0160 0x0464 0x08e4 0x04 0x01
+#define IMX94_PAD_ETH3_RX_CTL__GPIO6_IO22 0x0160 0x0464 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_RX_CTL__FLEXPWM1_PWMA3 0x0160 0x0464 0x0000 0x06 0x00
+#define IMX94_PAD_ETH3_RX_CTL__SINC4_EMCLK1 0x0160 0x0464 0x0830 0x07 0x01
+
+#define IMX94_PAD_ETH3_RX_CLK__NETC_PINMUX_ETH3_RX_CLK 0x0164 0x0468 0x0000 0x00 0x00
+#define IMX94_PAD_ETH3_RX_CLK__LPUART4_CTS_B 0x0164 0x0468 0x07ac 0x01 0x01
+#define IMX94_PAD_ETH3_RX_CLK__XSPI_SLV_DATA3 0x0164 0x0468 0x0914 0x02 0x00
+#define IMX94_PAD_ETH3_RX_CLK__SAI2_TX_DATA0 0x0164 0x0468 0x0000 0x03 0x00
+#define IMX94_PAD_ETH3_RX_CLK__XBAR1_XBAR_INOUT44 0x0164 0x0468 0x08e8 0x04 0x01
+#define IMX94_PAD_ETH3_RX_CLK__GPIO6_IO23 0x0164 0x0468 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_RX_CLK__FLEXPWM1_PWMB3 0x0164 0x0468 0x0000 0x06 0x00
+#define IMX94_PAD_ETH3_RX_CLK__SINC4_EMBIT1 0x0164 0x0468 0x0824 0x07 0x01
+
+#define IMX94_PAD_ETH3_RXD0__NETC_PINMUX_ETH3_RXD0 0x0168 0x046c 0x0000 0x00 0x00
+#define IMX94_PAD_ETH3_RXD0__LPUART4_RX 0x0168 0x046c 0x07b0 0x01 0x02
+#define IMX94_PAD_ETH3_RXD0__XSPI_SLV_DATA2 0x0168 0x046c 0x0910 0x02 0x00
+#define IMX94_PAD_ETH3_RXD0__SAI2_RX_SYNC 0x0168 0x046c 0x07f4 0x03 0x01
+#define IMX94_PAD_ETH3_RXD0__GPIO6_IO24 0x0168 0x046c 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_RXD0__DIG_ENCODER1_DATA_EN 0x0168 0x046c 0x0000 0x06 0x00
+#define IMX94_PAD_ETH3_RXD0__XBAR1_XBAR_INOUT45 0x0168 0x046c 0x08ec 0x07 0x01
+
+#define IMX94_PAD_ETH3_RXD1__NETC_PINMUX_ETH3_RXD1 0x016c 0x0470 0x0000 0x00 0x00
+#define IMX94_PAD_ETH3_RXD1__XSPI_SLV_DATA1 0x016c 0x0470 0x090c 0x02 0x00
+#define IMX94_PAD_ETH3_RXD1__SAI2_RX_BCLK 0x016c 0x0470 0x07ec 0x03 0x01
+#define IMX94_PAD_ETH3_RXD1__LPUART3_RTS_B 0x016c 0x0470 0x0000 0x04 0x00
+#define IMX94_PAD_ETH3_RXD1__GPIO6_IO25 0x016c 0x0470 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_RXD1__DIG_ENCODER1_DATA_CLK 0x016c 0x0470 0x0000 0x06 0x00
+#define IMX94_PAD_ETH3_RXD1__XBAR1_XBAR_INOUT46 0x016c 0x0470 0x08f0 0x07 0x01
+
+#define IMX94_PAD_ETH3_RXD2__NETC_PINMUX_ETH3_RXD2 0x0170 0x0474 0x0000 0x00 0x00
+#define IMX94_PAD_ETH3_RXD2__MQS2_RIGHT 0x0170 0x0474 0x0000 0x01 0x00
+#define IMX94_PAD_ETH3_RXD2__XSPI_SLV_DATA0 0x0170 0x0474 0x0908 0x02 0x00
+#define IMX94_PAD_ETH3_RXD2__SAI2_RX_DATA0 0x0170 0x0474 0x07f0 0x03 0x01
+#define IMX94_PAD_ETH3_RXD2__GPIO6_IO26 0x0170 0x0474 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_RXD2__DIG_ENCODER1_DATA_OUT 0x0170 0x0474 0x0000 0x06 0x00
+#define IMX94_PAD_ETH3_RXD2__XBAR1_XBAR_INOUT47 0x0170 0x0474 0x08f4 0x07 0x01
+
+#define IMX94_PAD_ETH3_RXD3__NETC_PINMUX_ETH3_RXD3 0x0174 0x0478 0x0000 0x00 0x00
+#define IMX94_PAD_ETH3_RXD3__MQS2_LEFT 0x0174 0x0478 0x0000 0x01 0x00
+#define IMX94_PAD_ETH3_RXD3__SAI2_MCLK 0x0174 0x0478 0x07e8 0x03 0x01
+#define IMX94_PAD_ETH3_RXD3__GPIO6_IO27 0x0174 0x0478 0x0000 0x05 0x00
+#define IMX94_PAD_ETH3_RXD3__DIG_ENCODER1_DATA_IN 0x0174 0x0478 0x0000 0x06 0x00
+#define IMX94_PAD_ETH3_RXD3__XBAR1_XBAR_INOUT48 0x0174 0x0478 0x08f8 0x07 0x01
+
+#define IMX94_PAD_ETH4_MDC_GPIO1__NETC_EMDC 0x0178 0x047c 0x0000 0x00 0x00
+#define IMX94_PAD_ETH4_MDC_GPIO1__ECAT_MDC 0x0178 0x047c 0x0000 0x01 0x00
+#define IMX94_PAD_ETH4_MDC_GPIO1__ECAT_CLK25 0x0178 0x047c 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_MDC_GPIO1__NETC_ETH4_SLV_MDC 0x0178 0x047c 0x0000 0x03 0x00
+#define IMX94_PAD_ETH4_MDC_GPIO1__FLEXIO1_3_2_FLEXIO12 0x0178 0x047c 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_MDC_GPIO1__GPIO6_IO28 0x0178 0x047c 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_MDC_GPIO1__FLEXPWM4_PWMX0 0x0178 0x047c 0x06f4 0x06 0x02
+#define IMX94_PAD_ETH4_MDC_GPIO1__SINC4_MOD_CLK2 0x0178 0x047c 0x0000 0x07 0x00
+
+#define IMX94_PAD_ETH4_MDIO_GPIO2__NETC_EMDIO 0x017c 0x0480 0x0678 0x00 0x02
+#define IMX94_PAD_ETH4_MDIO_GPIO2__ECAT_MDIO 0x017c 0x0480 0x0628 0x01 0x00
+#define IMX94_PAD_ETH4_MDIO_GPIO2__ENET_REF_CLK_ROOT 0x017c 0x0480 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_MDIO_GPIO2__NETC_ETH4_SLV_MDIO 0x017c 0x0480 0x0000 0x03 0x00
+#define IMX94_PAD_ETH4_MDIO_GPIO2__FLEXIO1_3_2_FLEXIO13 0x017c 0x0480 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_MDIO_GPIO2__GPIO6_IO29 0x017c 0x0480 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_MDIO_GPIO2__FLEXPWM4_PWMX1 0x017c 0x0480 0x06f8 0x06 0x02
+#define IMX94_PAD_ETH4_MDIO_GPIO2__SINC_FILTER_GLUE4_BREAK 0x017c 0x0480 0x0000 0x07 0x00
+
+#define IMX94_PAD_ETH4_TX_CLK__NETC_PINMUX_ETH4_TX_CLK 0x0180 0x0484 0x0648 0x00 0x00
+#define IMX94_PAD_ETH4_TX_CLK__USDHC3_CLK 0x0180 0x0484 0x0000 0x01 0x00
+#define IMX94_PAD_ETH4_TX_CLK__XSPI2_A_SCLK 0x0180 0x0484 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_TX_CLK__ECAT_LED_ERR 0x0180 0x0484 0x0000 0x03 0x00
+#define IMX94_PAD_ETH4_TX_CLK__FLEXIO1_3_2_FLEXIO0 0x0180 0x0484 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_TX_CLK__GPIO6_IO30 0x0180 0x0484 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_TX_CLK__FLEXPWM4_PWMA0 0x0180 0x0484 0x06d4 0x06 0x01
+#define IMX94_PAD_ETH4_TX_CLK__XBAR1_XBAR_INOUT30 0x0180 0x0484 0x08b0 0x07 0x02
+
+#define IMX94_PAD_ETH4_TX_CTL__NETC_PINMUX_ETH4_TX_CTL 0x0184 0x0488 0x0000 0x00 0x00
+#define IMX94_PAD_ETH4_TX_CTL__USDHC3_CMD 0x0184 0x0488 0x0858 0x01 0x01
+#define IMX94_PAD_ETH4_TX_CTL__XSPI2_A_SS0_B 0x0184 0x0488 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_TX_CTL__ECAT_RESET_OUT 0x0184 0x0488 0x0000 0x03 0x00
+#define IMX94_PAD_ETH4_TX_CTL__FLEXIO1_3_2_FLEXIO1 0x0184 0x0488 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_TX_CTL__GPIO6_IO31 0x0184 0x0488 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_TX_CTL__FLEXPWM4_PWMB0 0x0184 0x0488 0x06e4 0x06 0x01
+#define IMX94_PAD_ETH4_TX_CTL__XBAR1_XBAR_INOUT31 0x0184 0x0488 0x08b4 0x07 0x02
+
+#define IMX94_PAD_ETH4_TXD0__NETC_PINMUX_ETH4_TXD0 0x0188 0x048c 0x0000 0x00 0x00
+#define IMX94_PAD_ETH4_TXD0__USDHC3_DATA0 0x0188 0x048c 0x085c 0x01 0x01
+#define IMX94_PAD_ETH4_TXD0__XSPI2_A_DATA0 0x0188 0x048c 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_TXD0__ECAT_LED_RUN 0x0188 0x048c 0x0000 0x03 0x00
+#define IMX94_PAD_ETH4_TXD0__FLEXIO1_3_2_FLEXIO2 0x0188 0x048c 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_TXD0__GPIO7_IO0 0x0188 0x048c 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_TXD0__FLEXPWM4_PWMA1 0x0188 0x048c 0x06d8 0x06 0x01
+#define IMX94_PAD_ETH4_TXD0__XBAR1_XBAR_INOUT32 0x0188 0x048c 0x08b8 0x07 0x02
+
+#define IMX94_PAD_ETH4_TXD1__NETC_PINMUX_ETH4_TXD1 0x018c 0x0490 0x0000 0x00 0x00
+#define IMX94_PAD_ETH4_TXD1__USDHC3_DATA1 0x018c 0x0490 0x0860 0x01 0x01
+#define IMX94_PAD_ETH4_TXD1__XSPI2_A_DATA1 0x018c 0x0490 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_TXD1__ECAT_LED_STATE_RUN 0x018c 0x0490 0x0000 0x03 0x00
+#define IMX94_PAD_ETH4_TXD1__FLEXIO1_3_2_FLEXIO3 0x018c 0x0490 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_TXD1__GPIO7_IO1 0x018c 0x0490 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_TXD1__FLEXPWM4_PWMB1 0x018c 0x0490 0x06e8 0x06 0x01
+#define IMX94_PAD_ETH4_TXD1__XBAR1_XBAR_INOUT33 0x018c 0x0490 0x08bc 0x07 0x02
+
+#define IMX94_PAD_ETH4_TXD2__NETC_PINMUX_ETH4_TXD2 0x0190 0x0494 0x0000 0x00 0x00
+#define IMX94_PAD_ETH4_TXD2__USDHC3_DATA2 0x0190 0x0494 0x0864 0x01 0x01
+#define IMX94_PAD_ETH4_TXD2__XSPI2_A_DATA2 0x0190 0x0494 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_TXD2__ECAT_CLK25 0x0190 0x0494 0x0000 0x03 0x00
+#define IMX94_PAD_ETH4_TXD2__FLEXIO1_3_2_FLEXIO4 0x0190 0x0494 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_TXD2__GPIO7_IO2 0x0190 0x0494 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_TXD2__FLEXPWM4_PWMA2 0x0190 0x0494 0x06dc 0x06 0x01
+#define IMX94_PAD_ETH4_TXD2__ETH4_RMII_REF50_CLK 0x0190 0x0494 0x0000 0x07 0x00
+
+#define IMX94_PAD_ETH4_TXD3__NETC_PINMUX_ETH4_TXD3 0x0194 0x0498 0x0000 0x00 0x00
+#define IMX94_PAD_ETH4_TXD3__USDHC3_DATA3 0x0194 0x0498 0x0868 0x01 0x01
+#define IMX94_PAD_ETH4_TXD3__XSPI2_A_DATA3 0x0194 0x0498 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_TXD3__FLEXIO1_3_2_FLEXIO5 0x0194 0x0498 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_TXD3__GPIO7_IO3 0x0194 0x0498 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_TXD3__FLEXPWM4_PWMB2 0x0194 0x0498 0x06ec 0x06 0x01
+#define IMX94_PAD_ETH4_TXD3__XBAR1_XBAR_INOUT35 0x0194 0x0498 0x08c4 0x07 0x02
+
+#define IMX94_PAD_ETH4_RXD0__NETC_PINMUX_ETH4_RXD0 0x0198 0x049c 0x0638 0x00 0x00
+#define IMX94_PAD_ETH4_RXD0__XSPI2_A_DATA4 0x0198 0x049c 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_RXD0__FLEXIO1_3_2_FLEXIO6 0x0198 0x049c 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_RXD0__GPIO7_IO4 0x0198 0x049c 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_RXD0__FLEXPWM4_PWMA3 0x0198 0x049c 0x06e0 0x06 0x01
+#define IMX94_PAD_ETH4_RXD0__SINC4_EMCLK2 0x0198 0x049c 0x0834 0x07 0x01
+
+#define IMX94_PAD_ETH4_RXD1__NETC_PINMUX_ETH4_RXD1 0x019c 0x04a0 0x063c 0x00 0x00
+#define IMX94_PAD_ETH4_RXD1__XSPI2_A_DATA5 0x019c 0x04a0 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_RXD1__FLEXIO2_4_1_FLEXIO11 0x019c 0x04a0 0x0694 0x03 0x00
+#define IMX94_PAD_ETH4_RXD1__FLEXIO1_3_2_FLEXIO7 0x019c 0x04a0 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_RXD1__GPIO7_IO5 0x019c 0x04a0 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_RXD1__FLEXPWM4_PWMB3 0x019c 0x04a0 0x06f0 0x06 0x01
+#define IMX94_PAD_ETH4_RXD1__SINC4_EMBIT2 0x019c 0x04a0 0x0828 0x07 0x01
+
+#define IMX94_PAD_ETH4_RXD2__NETC_PINMUX_ETH4_RXD2 0x01a0 0x04a4 0x0640 0x00 0x00
+#define IMX94_PAD_ETH4_RXD2__XSPI2_A_DATA6 0x01a0 0x04a4 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_RXD2__FLEXIO2_4_1_FLEXIO12 0x01a0 0x04a4 0x0000 0x03 0x00
+#define IMX94_PAD_ETH4_RXD2__FLEXIO1_3_2_FLEXIO8 0x01a0 0x04a4 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_RXD2__GPIO7_IO6 0x01a0 0x04a4 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_RXD2__DIG_ENCODER2_DATA_EN 0x01a0 0x04a4 0x0000 0x06 0x00
+#define IMX94_PAD_ETH4_RXD2__XBAR1_XBAR_INOUT4 0x01a0 0x04a4 0x086c 0x07 0x01
+
+#define IMX94_PAD_ETH4_RXD3__NETC_PINMUX_ETH4_RXD3 0x01a4 0x04a8 0x0644 0x00 0x00
+#define IMX94_PAD_ETH4_RXD3__ENET_REF_CLK_ROOT 0x01a4 0x04a8 0x0000 0x01 0x00
+#define IMX94_PAD_ETH4_RXD3__XSPI2_A_DATA7 0x01a4 0x04a8 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_RXD3__FLEXIO2_4_1_FLEXIO13 0x01a4 0x04a8 0x0000 0x03 0x00
+#define IMX94_PAD_ETH4_RXD3__FLEXIO1_3_2_FLEXIO9 0x01a4 0x04a8 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_RXD3__GPIO7_IO7 0x01a4 0x04a8 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_RXD3__DIG_ENCODER2_DATA_CLK 0x01a4 0x04a8 0x068c 0x06 0x01
+#define IMX94_PAD_ETH4_RXD3__XBAR1_XBAR_INOUT5 0x01a4 0x04a8 0x0870 0x07 0x01
+
+#define IMX94_PAD_ETH4_RX_CTL__NETC_PINMUX_ETH4_RX_CTL 0x01a8 0x04ac 0x0634 0x00 0x00
+#define IMX94_PAD_ETH4_RX_CTL__XSPI2_A_SS1_B 0x01a8 0x04ac 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_RX_CTL__FLEXIO2_4_1_FLEXIO14 0x01a8 0x04ac 0x0000 0x03 0x00
+#define IMX94_PAD_ETH4_RX_CTL__FLEXIO1_3_2_FLEXIO10 0x01a8 0x04ac 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_RX_CTL__GPIO7_IO8 0x01a8 0x04ac 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_RX_CTL__DIG_ENCODER2_DATA_OUT 0x01a8 0x04ac 0x0000 0x06 0x00
+#define IMX94_PAD_ETH4_RX_CTL__XBAR1_XBAR_INOUT6 0x01a8 0x04ac 0x0874 0x07 0x01
+
+#define IMX94_PAD_ETH4_RX_CLK__NETC_PINMUX_ETH4_RX_CLK 0x01ac 0x04b0 0x0630 0x00 0x00
+#define IMX94_PAD_ETH4_RX_CLK__XSPI2_A_DQS 0x01ac 0x04b0 0x0000 0x02 0x00
+#define IMX94_PAD_ETH4_RX_CLK__FLEXIO2_4_1_FLEXIO15 0x01ac 0x04b0 0x0000 0x03 0x00
+#define IMX94_PAD_ETH4_RX_CLK__FLEXIO1_3_2_FLEXIO11 0x01ac 0x04b0 0x0000 0x04 0x00
+#define IMX94_PAD_ETH4_RX_CLK__GPIO7_IO9 0x01ac 0x04b0 0x0000 0x05 0x00
+#define IMX94_PAD_ETH4_RX_CLK__DIG_ENCODER2_DATA_IN 0x01ac 0x04b0 0x0690 0x06 0x01
+#define IMX94_PAD_ETH4_RX_CLK__XBAR1_XBAR_INOUT7 0x01ac 0x04b0 0x0878 0x07 0x01
+
+#define IMX94_PAD_ETH0_TXD0__NETC_PINMUX_ETH0_TXD0 0x01b0 0x04b4 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_TXD0__ECAT_PT0_TXD0 0x01b0 0x04b4 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_TXD0__FLEXIO4_FLEXIO0 0x01b0 0x04b4 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_TXD0__GPIO5_IO0 0x01b0 0x04b4 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_TXD1__NETC_PINMUX_ETH0_TXD1 0x01b4 0x04b8 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_TXD1__ECAT_PT0_TXD1 0x01b4 0x04b8 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_TXD1__FLEXIO4_FLEXIO1 0x01b4 0x04b8 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_TXD1__GPIO5_IO1 0x01b4 0x04b8 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_TX_EN__NETC_PINMUX_ETH0_TX_EN 0x01b8 0x04bc 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_TX_EN__ECAT_PT0_TX_EN 0x01b8 0x04bc 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_TX_EN__FLEXIO4_FLEXIO2 0x01b8 0x04bc 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_TX_EN__GPIO5_IO2 0x01b8 0x04bc 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_TX_CLK__NETC_PINMUX_ETH0_TX_CLK 0x01bc 0x04c0 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_TX_CLK__ECAT_PT0_TX_CLK 0x01bc 0x04c0 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_TX_CLK__FLEXIO4_FLEXIO3 0x01bc 0x04c0 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_TX_CLK__GPIO5_IO3 0x01bc 0x04c0 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_RXD0__NETC_PINMUX_ETH0_RXD0 0x01c0 0x04c4 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_RXD0__ECAT_PT0_RXD0 0x01c0 0x04c4 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_RXD0__FLEXIO4_FLEXIO4 0x01c0 0x04c4 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_RXD0__GPIO5_IO4 0x01c0 0x04c4 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_RXD1__NETC_PINMUX_ETH0_RXD1 0x01c4 0x04c8 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_RXD1__ECAT_PT0_RXD1 0x01c4 0x04c8 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_RXD1__FLEXIO4_FLEXIO5 0x01c4 0x04c8 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_RXD1__GPIO5_IO5 0x01c4 0x04c8 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_RX_DV__NETC_PINMUX_ETH0_RX_DV 0x01c8 0x04cc 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_RX_DV__ECAT_PT0_RX_DV 0x01c8 0x04cc 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_RX_DV__FLEXIO4_FLEXIO6 0x01c8 0x04cc 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_RX_DV__GPIO5_IO6 0x01c8 0x04cc 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_TXD2__NETC_PINMUX_ETH0_TXD2 0x01cc 0x04d0 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_TXD2__ECAT_PT0_TXD2 0x01cc 0x04d0 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_TXD2__ETH0_RMII_REF50_CLK 0x01cc 0x04d0 0x0000 0x02 0x00
+#define IMX94_PAD_ETH0_TXD2__FLEXIO4_FLEXIO7 0x01cc 0x04d0 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_TXD2__GPIO5_IO7 0x01cc 0x04d0 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_TXD3__NETC_PINMUX_ETH0_TXD3 0x01d0 0x04d4 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_TXD3__ECAT_PT0_TXD3 0x01d0 0x04d4 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_TXD3__FLEXIO4_FLEXIO8 0x01d0 0x04d4 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_TXD3__GPIO5_IO8 0x01d0 0x04d4 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_RXD2__NETC_PINMUX_ETH0_RXD2 0x01d4 0x04d8 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_RXD2__ECAT_PT0_RXD2 0x01d4 0x04d8 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_RXD2__FLEXIO4_FLEXIO9 0x01d4 0x04d8 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_RXD2__GPIO5_IO9 0x01d4 0x04d8 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_RXD3__NETC_PINMUX_ETH0_RXD3 0x01d8 0x04dc 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_RXD3__ECAT_PT0_RXD3 0x01d8 0x04dc 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_RXD3__FLEXIO4_FLEXIO10 0x01d8 0x04dc 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_RXD3__GPIO5_IO10 0x01d8 0x04dc 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_RX_CLK__NETC_PINMUX_ETH0_RX_CLK 0x01dc 0x04e0 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_RX_CLK__ECAT_PT0_RX_CLK 0x01dc 0x04e0 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_RX_CLK__FLEXIO4_FLEXIO11 0x01dc 0x04e0 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_RX_CLK__GPIO5_IO11 0x01dc 0x04e0 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_RX_ER__NETC_PINMUX_ETH0_RX_ER 0x01e0 0x04e4 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_RX_ER__ECAT_PT0_RX_ER 0x01e0 0x04e4 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_RX_ER__FLEXIO4_FLEXIO12 0x01e0 0x04e4 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_RX_ER__GPIO5_IO12 0x01e0 0x04e4 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_TX_ER__NETC_PINMUX_ETH0_TX_ER 0x01e4 0x04e8 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_TX_ER__ECAT_LINK_ACT0 0x01e4 0x04e8 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_TX_ER__FLEXIO4_FLEXIO13 0x01e4 0x04e8 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_TX_ER__GPIO5_IO13 0x01e4 0x04e8 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH0_CRS__NETC_PINMUX_ETH0_CRS 0x01e8 0x04ec 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_CRS__ECAT_LINK0 0x01e8 0x04ec 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_CRS__NETC_EMDC 0x01e8 0x04ec 0x0000 0x02 0x00
+#define IMX94_PAD_ETH0_CRS__FLEXIO4_FLEXIO14 0x01e8 0x04ec 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_CRS__GPIO5_IO14 0x01e8 0x04ec 0x0000 0x05 0x00
+#define IMX94_PAD_ETH0_CRS__XBAR1_XBAR_INOUT8 0x01e8 0x04ec 0x087c 0x06 0x02
+#define IMX94_PAD_ETH0_CRS__SINC_FILTER_GLUE2_BREAK 0x01e8 0x04ec 0x0000 0x07 0x00
+
+#define IMX94_PAD_ETH0_COL__NETC_PINMUX_ETH0_COL 0x01ec 0x04f0 0x0000 0x00 0x00
+#define IMX94_PAD_ETH0_COL__ECAT_LINK1 0x01ec 0x04f0 0x0000 0x01 0x00
+#define IMX94_PAD_ETH0_COL__NETC_EMDIO 0x01ec 0x04f0 0x0678 0x02 0x03
+#define IMX94_PAD_ETH0_COL__FLEXIO4_FLEXIO15 0x01ec 0x04f0 0x0000 0x04 0x00
+#define IMX94_PAD_ETH0_COL__GPIO5_IO15 0x01ec 0x04f0 0x0000 0x05 0x00
+#define IMX94_PAD_ETH0_COL__XBAR1_XBAR_INOUT9 0x01ec 0x04f0 0x0880 0x06 0x02
+#define IMX94_PAD_ETH0_COL__SINC_FILTER_GLUE1_BREAK 0x01ec 0x04f0 0x0000 0x07 0x00
+
+#define IMX94_PAD_ETH1_TXD0__NETC_PINMUX_ETH1_TXD0 0x01f0 0x04f4 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_TXD0__ECAT_PT1_TXD0 0x01f0 0x04f4 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_TXD0__ENCODER_DIAG0 0x01f0 0x04f4 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_TXD0__FLEXIO3_FLEXIO0 0x01f0 0x04f4 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_TXD0__GPIO5_IO16 0x01f0 0x04f4 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_TXD1__NETC_PINMUX_ETH1_TXD1 0x01f4 0x04f8 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_TXD1__ECAT_PT1_TXD1 0x01f4 0x04f8 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_TXD1__ENCODER_DIAG1 0x01f4 0x04f8 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_TXD1__FLEXIO3_FLEXIO1 0x01f4 0x04f8 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_TXD1__GPIO5_IO17 0x01f4 0x04f8 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_TX_EN__NETC_PINMUX_ETH1_TX_EN 0x01f8 0x04fc 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_TX_EN__ECAT_PT1_TX_EN 0x01f8 0x04fc 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_TX_EN__ENCODER_DIAG2 0x01f8 0x04fc 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_TX_EN__FLEXIO3_FLEXIO2 0x01f8 0x04fc 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_TX_EN__GPIO5_IO18 0x01f8 0x04fc 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_TX_CLK__NETC_PINMUX_ETH1_TX_CLK 0x01fc 0x0500 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_TX_CLK__ECAT_PT1_TX_CLK 0x01fc 0x0500 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_TX_CLK__ENCODER_DIAG3 0x01fc 0x0500 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_TX_CLK__FLEXIO3_FLEXIO3 0x01fc 0x0500 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_TX_CLK__GPIO5_IO19 0x01fc 0x0500 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_RXD0__NETC_PINMUX_ETH1_RXD0 0x0200 0x0504 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_RXD0__ECAT_PT1_RXD0 0x0200 0x0504 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_RXD0__ENCODER_DIAG4 0x0200 0x0504 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_RXD0__FLEXIO3_FLEXIO4 0x0200 0x0504 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_RXD0__GPIO5_IO20 0x0200 0x0504 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_RXD1__NETC_PINMUX_ETH1_RXD1 0x0204 0x0508 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_RXD1__ECAT_PT1_RXD1 0x0204 0x0508 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_RXD1__ENCODER_DIAG5 0x0204 0x0508 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_RXD1__FLEXIO3_FLEXIO5 0x0204 0x0508 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_RXD1__GPIO5_IO21 0x0204 0x0508 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_RX_DV__NETC_PINMUX_ETH1_RX_DV 0x0208 0x050c 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_RX_DV__ECAT_PT1_RX_DV 0x0208 0x050c 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_RX_DV__ENCODER_DIAG6 0x0208 0x050c 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_RX_DV__FLEXIO3_FLEXIO6 0x0208 0x050c 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_RX_DV__GPIO5_IO22 0x0208 0x050c 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_TXD2__NETC_PINMUX_ETH1_TXD2 0x020c 0x0510 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_TXD2__ECAT_PT1_TXD2 0x020c 0x0510 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_TXD2__ETH1_RMII_REF50_CLK 0x020c 0x0510 0x0000 0x02 0x00
+#define IMX94_PAD_ETH1_TXD2__ENCODER_DIAG7 0x020c 0x0510 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_TXD2__FLEXIO3_FLEXIO7 0x020c 0x0510 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_TXD2__GPIO5_IO23 0x020c 0x0510 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_TXD3__NETC_PINMUX_ETH1_TXD3 0x0210 0x0514 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_TXD3__ECAT_PT1_TXD3 0x0210 0x0514 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_TXD3__ENCODER_DIAG8 0x0210 0x0514 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_TXD3__FLEXIO3_FLEXIO8 0x0210 0x0514 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_TXD3__GPIO5_IO24 0x0210 0x0514 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_RXD2__NETC_PINMUX_ETH1_RXD2 0x0214 0x0518 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_RXD2__ECAT_PT1_RXD2 0x0214 0x0518 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_RXD2__ENCODER_DIAG9 0x0214 0x0518 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_RXD2__FLEXIO3_FLEXIO9 0x0214 0x0518 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_RXD2__GPIO5_IO25 0x0214 0x0518 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_RXD3__NETC_PINMUX_ETH1_RXD3 0x0218 0x051c 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_RXD3__ECAT_PT1_RXD3 0x0218 0x051c 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_RXD3__ENCODER_DIAG10 0x0218 0x051c 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_RXD3__FLEXIO3_FLEXIO10 0x0218 0x051c 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_RXD3__GPIO5_IO26 0x0218 0x051c 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_RX_CLK__NETC_PINMUX_ETH1_RX_CLK 0x021c 0x0520 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_RX_CLK__ECAT_PT1_RX_CLK 0x021c 0x0520 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_RX_CLK__ENCODER_DIAG11 0x021c 0x0520 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_RX_CLK__FLEXIO3_FLEXIO11 0x021c 0x0520 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_RX_CLK__GPIO5_IO27 0x021c 0x0520 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_RX_ER__NETC_PINMUX_ETH1_RX_ER 0x0220 0x0524 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_RX_ER__ECAT_PT1_RX_ER 0x0220 0x0524 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_RX_ER__ENCODER_DIAG12 0x0220 0x0524 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_RX_ER__FLEXIO3_FLEXIO12 0x0220 0x0524 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_RX_ER__GPIO5_IO28 0x0220 0x0524 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_TX_ER__NETC_PINMUX_ETH1_TX_ER 0x0224 0x0528 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_TX_ER__ECAT_LINK_ACT1 0x0224 0x0528 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_TX_ER__ENCODER_DIAG13 0x0224 0x0528 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_TX_ER__FLEXIO3_FLEXIO13 0x0224 0x0528 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_TX_ER__GPIO5_IO29 0x0224 0x0528 0x0000 0x05 0x00
+
+#define IMX94_PAD_ETH1_CRS__NETC_PINMUX_ETH1_CRS 0x0228 0x052c 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_CRS__ECAT_MDC 0x0228 0x052c 0x0000 0x01 0x00
+#define IMX94_PAD_ETH1_CRS__NETC_EMDC 0x0228 0x052c 0x0000 0x02 0x00
+#define IMX94_PAD_ETH1_CRS__ENCODER_DIAG14 0x0228 0x052c 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_CRS__FLEXIO3_FLEXIO14 0x0228 0x052c 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_CRS__GPIO5_IO30 0x0228 0x052c 0x0000 0x05 0x00
+#define IMX94_PAD_ETH1_CRS__XBAR1_XBAR_INOUT10 0x0228 0x052c 0x0884 0x06 0x02
+#define IMX94_PAD_ETH1_CRS__SINC_FILTER_GLUE1_BREAK 0x0228 0x052c 0x0000 0x07 0x00
+
+#define IMX94_PAD_ETH1_COL__NETC_PINMUX_ETH1_COL 0x022c 0x0530 0x0000 0x00 0x00
+#define IMX94_PAD_ETH1_COL__ECAT_MDIO 0x022c 0x0530 0x0628 0x01 0x01
+#define IMX94_PAD_ETH1_COL__NETC_EMDIO 0x022c 0x0530 0x0678 0x02 0x04
+#define IMX94_PAD_ETH1_COL__ENCODER_DIAG15 0x022c 0x0530 0x0000 0x03 0x00
+#define IMX94_PAD_ETH1_COL__FLEXIO3_FLEXIO15 0x022c 0x0530 0x0000 0x04 0x00
+#define IMX94_PAD_ETH1_COL__GPIO5_IO31 0x022c 0x0530 0x0000 0x05 0x00
+#define IMX94_PAD_ETH1_COL__XBAR1_XBAR_INOUT11 0x022c 0x0530 0x0888 0x06 0x02
+#define IMX94_PAD_ETH1_COL__SINC_FILTER_GLUE2_BREAK 0x022c 0x0530 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD1_CLK__USDHC1_CLK 0x0230 0x0534 0x0000 0x00 0x00
+#define IMX94_PAD_SD1_CLK__SAI4_TX_BCLK 0x0230 0x0534 0x0818 0x01 0x01
+#define IMX94_PAD_SD1_CLK__CAN4_TX 0x0230 0x0534 0x0000 0x02 0x00
+#define IMX94_PAD_SD1_CLK__NETC_1588MUX_INOUT0 0x0230 0x0534 0x064c 0x03 0x01
+#define IMX94_PAD_SD1_CLK__FLEXIO2_4_1_FLEXIO0 0x0230 0x0534 0x0000 0x04 0x00
+#define IMX94_PAD_SD1_CLK__GPIO4_IO8 0x0230 0x0534 0x0000 0x05 0x00
+#define IMX94_PAD_SD1_CLK__FLEXPWM3_PWMX0 0x0230 0x0534 0x06c8 0x06 0x02
+#define IMX94_PAD_SD1_CLK__SINC1_EMCLK0 0x0230 0x0534 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD1_CMD__USDHC1_CMD 0x0234 0x0538 0x0000 0x00 0x00
+#define IMX94_PAD_SD1_CMD__SAI4_RX_BCLK 0x0234 0x0538 0x080c 0x01 0x01
+#define IMX94_PAD_SD1_CMD__CAN4_RX 0x0234 0x0538 0x0684 0x02 0x02
+#define IMX94_PAD_SD1_CMD__NETC_1588MUX_INOUT1 0x0234 0x0538 0x0650 0x03 0x01
+#define IMX94_PAD_SD1_CMD__FLEXIO2_4_1_FLEXIO1 0x0234 0x0538 0x0000 0x04 0x00
+#define IMX94_PAD_SD1_CMD__GPIO4_IO9 0x0234 0x0538 0x0000 0x05 0x00
+#define IMX94_PAD_SD1_CMD__FLEXPWM3_PWMX1 0x0234 0x0538 0x06cc 0x06 0x02
+#define IMX94_PAD_SD1_CMD__SINC1_EMBIT0 0x0234 0x0538 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD1_DATA0__USDHC1_DATA0 0x0238 0x053c 0x0000 0x00 0x00
+#define IMX94_PAD_SD1_DATA0__SAI4_RX_SYNC 0x0238 0x053c 0x0814 0x01 0x01
+#define IMX94_PAD_SD1_DATA0__CAN5_TX 0x0238 0x053c 0x0000 0x02 0x00
+#define IMX94_PAD_SD1_DATA0__NETC_1588MUX_INOUT2 0x0238 0x053c 0x0654 0x03 0x01
+#define IMX94_PAD_SD1_DATA0__FLEXIO2_4_1_FLEXIO2 0x0238 0x053c 0x0000 0x04 0x00
+#define IMX94_PAD_SD1_DATA0__GPIO4_IO10 0x0238 0x053c 0x0000 0x05 0x00
+#define IMX94_PAD_SD1_DATA0__FLEXPWM3_PWMX2 0x0238 0x053c 0x06d0 0x06 0x01
+#define IMX94_PAD_SD1_DATA0__SINC1_EMCLK1 0x0238 0x053c 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD1_DATA1__USDHC1_DATA1 0x023c 0x0540 0x0000 0x00 0x00
+#define IMX94_PAD_SD1_DATA1__SAI4_TX_SYNC 0x023c 0x0540 0x081c 0x01 0x01
+#define IMX94_PAD_SD1_DATA1__CAN5_RX 0x023c 0x0540 0x0688 0x02 0x03
+#define IMX94_PAD_SD1_DATA1__NETC_1588MUX_INOUT3 0x023c 0x0540 0x0658 0x03 0x01
+#define IMX94_PAD_SD1_DATA1__FLEXIO2_4_1_FLEXIO3 0x023c 0x0540 0x0000 0x04 0x00
+#define IMX94_PAD_SD1_DATA1__GPIO4_IO11 0x023c 0x0540 0x0000 0x05 0x00
+#define IMX94_PAD_SD1_DATA1__FLEXPWM3_PWMA3 0x023c 0x0540 0x06b4 0x06 0x01
+#define IMX94_PAD_SD1_DATA1__SINC1_EMBIT1 0x023c 0x0540 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD1_DATA2__USDHC1_DATA2 0x0240 0x0544 0x0000 0x00 0x00
+#define IMX94_PAD_SD1_DATA2__SAI4_TX_DATA0 0x0240 0x0544 0x0000 0x01 0x00
+#define IMX94_PAD_SD1_DATA2__PMIC_READY 0x0240 0x0544 0x0000 0x02 0x00
+#define IMX94_PAD_SD1_DATA2__NETC_1588MUX_INOUT4 0x0240 0x0544 0x065c 0x03 0x01
+#define IMX94_PAD_SD1_DATA2__FLEXIO2_4_1_FLEXIO4 0x0240 0x0544 0x0000 0x04 0x00
+#define IMX94_PAD_SD1_DATA2__GPIO4_IO12 0x0240 0x0544 0x0000 0x05 0x00
+#define IMX94_PAD_SD1_DATA2__FLEXPWM3_PWMB3 0x0240 0x0544 0x06c4 0x06 0x01
+#define IMX94_PAD_SD1_DATA2__SINC1_EMCLK2 0x0240 0x0544 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD1_DATA3__USDHC1_DATA3 0x0244 0x0548 0x0000 0x00 0x00
+#define IMX94_PAD_SD1_DATA3__SAI4_RX_DATA0 0x0244 0x0548 0x0810 0x01 0x01
+#define IMX94_PAD_SD1_DATA3__NETC_1588MUX_INOUT5 0x0244 0x0548 0x0660 0x03 0x01
+#define IMX94_PAD_SD1_DATA3__FLEXIO2_4_1_FLEXIO5 0x0244 0x0548 0x0000 0x04 0x00
+#define IMX94_PAD_SD1_DATA3__GPIO4_IO13 0x0244 0x0548 0x0000 0x05 0x00
+#define IMX94_PAD_SD1_DATA3__FLEXPWM3_PWMA2 0x0244 0x0548 0x06b0 0x06 0x01
+#define IMX94_PAD_SD1_DATA3__SINC1_EMBIT2 0x0244 0x0548 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD1_DATA4__USDHC1_DATA4 0x0248 0x054c 0x0000 0x00 0x00
+#define IMX94_PAD_SD1_DATA4__SAI2_RX_DATA0 0x0248 0x054c 0x07f0 0x01 0x02
+#define IMX94_PAD_SD1_DATA4__NETC_1588MUX_INOUT6 0x0248 0x054c 0x0664 0x03 0x01
+#define IMX94_PAD_SD1_DATA4__FLEXIO2_4_1_FLEXIO6 0x0248 0x054c 0x0000 0x04 0x00
+#define IMX94_PAD_SD1_DATA4__GPIO4_IO14 0x0248 0x054c 0x0000 0x05 0x00
+#define IMX94_PAD_SD1_DATA4__FLEXPWM3_PWMB2 0x0248 0x054c 0x06c0 0x06 0x01
+#define IMX94_PAD_SD1_DATA4__SINC1_EMCLK3 0x0248 0x054c 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD1_DATA5__USDHC1_DATA5 0x024c 0x0550 0x0000 0x00 0x00
+#define IMX94_PAD_SD1_DATA5__SAI2_TX_DATA0 0x024c 0x0550 0x0000 0x01 0x00
+#define IMX94_PAD_SD1_DATA5__USDHC1_RESET_B 0x024c 0x0550 0x0000 0x02 0x00
+#define IMX94_PAD_SD1_DATA5__NETC_1588MUX_INOUT7 0x024c 0x0550 0x0668 0x03 0x01
+#define IMX94_PAD_SD1_DATA5__FLEXIO2_4_1_FLEXIO7 0x024c 0x0550 0x0000 0x04 0x00
+#define IMX94_PAD_SD1_DATA5__GPIO4_IO15 0x024c 0x0550 0x0000 0x05 0x00
+#define IMX94_PAD_SD1_DATA5__FLEXPWM3_PWMA1 0x024c 0x0550 0x06ac 0x06 0x01
+#define IMX94_PAD_SD1_DATA5__SINC1_EMBIT3 0x024c 0x0550 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD1_DATA6__USDHC1_DATA6 0x0250 0x0554 0x0000 0x00 0x00
+#define IMX94_PAD_SD1_DATA6__SAI2_TX_BCLK 0x0250 0x0554 0x07f8 0x01 0x02
+#define IMX94_PAD_SD1_DATA6__USDHC1_CD_B 0x0250 0x0554 0x0000 0x02 0x00
+#define IMX94_PAD_SD1_DATA6__NETC_1588MUX_INOUT8 0x0250 0x0554 0x066c 0x03 0x01
+#define IMX94_PAD_SD1_DATA6__FLEXIO2_4_1_FLEXIO8 0x0250 0x0554 0x0000 0x04 0x00
+#define IMX94_PAD_SD1_DATA6__GPIO4_IO16 0x0250 0x0554 0x0000 0x05 0x00
+#define IMX94_PAD_SD1_DATA6__FLEXPWM3_PWMB1 0x0250 0x0554 0x06bc 0x06 0x01
+#define IMX94_PAD_SD1_DATA6__SINC1_MOD_CLK0 0x0250 0x0554 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD1_DATA7__USDHC1_DATA7 0x0254 0x0558 0x0000 0x00 0x00
+#define IMX94_PAD_SD1_DATA7__SAI2_RX_SYNC 0x0254 0x0558 0x07f4 0x01 0x02
+#define IMX94_PAD_SD1_DATA7__USDHC1_WP 0x0254 0x0558 0x0000 0x02 0x00
+#define IMX94_PAD_SD1_DATA7__NETC_1588MUX_INOUT9 0x0254 0x0558 0x0670 0x03 0x01
+#define IMX94_PAD_SD1_DATA7__FLEXIO2_4_1_FLEXIO9 0x0254 0x0558 0x0000 0x04 0x00
+#define IMX94_PAD_SD1_DATA7__GPIO4_IO17 0x0254 0x0558 0x0000 0x05 0x00
+#define IMX94_PAD_SD1_DATA7__FLEXPWM3_PWMA0 0x0254 0x0558 0x06a8 0x06 0x01
+#define IMX94_PAD_SD1_DATA7__SINC1_MOD_CLK1 0x0254 0x0558 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD1_STROBE__USDHC1_STROBE 0x0258 0x055c 0x0000 0x00 0x00
+#define IMX94_PAD_SD1_STROBE__SAI2_TX_SYNC 0x0258 0x055c 0x07fc 0x01 0x02
+#define IMX94_PAD_SD1_STROBE__NETC_1588MUX_INOUT10 0x0258 0x055c 0x0674 0x03 0x01
+#define IMX94_PAD_SD1_STROBE__FLEXIO2_4_1_FLEXIO10 0x0258 0x055c 0x0000 0x04 0x00
+#define IMX94_PAD_SD1_STROBE__GPIO4_IO18 0x0258 0x055c 0x0000 0x05 0x00
+#define IMX94_PAD_SD1_STROBE__FLEXPWM3_PWMB0 0x0258 0x055c 0x06b8 0x06 0x01
+#define IMX94_PAD_SD1_STROBE__SINC1_MOD_CLK2 0x0258 0x055c 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD2_VSELECT__USDHC2_VSELECT 0x025c 0x0560 0x0000 0x00 0x00
+#define IMX94_PAD_SD2_VSELECT__SAI4_MCLK 0x025c 0x0560 0x0808 0x01 0x01
+#define IMX94_PAD_SD2_VSELECT__USDHC2_WP 0x025c 0x0560 0x0854 0x02 0x02
+#define IMX94_PAD_SD2_VSELECT__NETC_1588MUX_INOUT10 0x025c 0x0560 0x0674 0x03 0x02
+#define IMX94_PAD_SD2_VSELECT__FLEXIO2_4_1_FLEXIO11 0x025c 0x0560 0x0694 0x04 0x01
+#define IMX94_PAD_SD2_VSELECT__GPIO4_IO19 0x025c 0x0560 0x0000 0x05 0x00
+#define IMX94_PAD_SD2_VSELECT__EXT_CLK1 0x025c 0x0560 0x0624 0x06 0x01
+#define IMX94_PAD_SD2_VSELECT__XBAR1_XBAR_INOUT12 0x025c 0x0560 0x088c 0x07 0x02
+
+#define IMX94_PAD_XSPI1_DATA0__XSPI1_A_DATA0 0x0260 0x0564 0x0000 0x00 0x00
+#define IMX94_PAD_XSPI1_DATA0__SAI2_RX_SYNC 0x0260 0x0564 0x07f4 0x01 0x03
+#define IMX94_PAD_XSPI1_DATA0__XSPI_SLV_DATA0 0x0260 0x0564 0x0908 0x03 0x01
+#define IMX94_PAD_XSPI1_DATA0__FLEXIO1_3_3_FLEXIO0 0x0260 0x0564 0x0000 0x04 0x00
+#define IMX94_PAD_XSPI1_DATA0__GPIO7_IO16 0x0260 0x0564 0x0000 0x05 0x00
+
+#define IMX94_PAD_XSPI1_DATA1__XSPI1_A_DATA1 0x0264 0x0568 0x0000 0x00 0x00
+#define IMX94_PAD_XSPI1_DATA1__SAI2_TX_SYNC 0x0264 0x0568 0x07fc 0x01 0x03
+#define IMX94_PAD_XSPI1_DATA1__XSPI_SLV_DATA1 0x0264 0x0568 0x090c 0x03 0x01
+#define IMX94_PAD_XSPI1_DATA1__FLEXIO1_3_3_FLEXIO1 0x0264 0x0568 0x0000 0x04 0x00
+#define IMX94_PAD_XSPI1_DATA1__GPIO7_IO17 0x0264 0x0568 0x0000 0x05 0x00
+
+#define IMX94_PAD_XSPI1_DATA2__XSPI1_A_DATA2 0x0268 0x056c 0x0000 0x00 0x00
+#define IMX94_PAD_XSPI1_DATA2__SAI2_TX_DATA0 0x0268 0x056c 0x0000 0x01 0x00
+#define IMX94_PAD_XSPI1_DATA2__XSPI_SLV_DATA2 0x0268 0x056c 0x0910 0x03 0x01
+#define IMX94_PAD_XSPI1_DATA2__FLEXIO1_3_3_FLEXIO2 0x0268 0x056c 0x0000 0x04 0x00
+#define IMX94_PAD_XSPI1_DATA2__GPIO7_IO18 0x0268 0x056c 0x0000 0x05 0x00
+
+#define IMX94_PAD_XSPI1_DATA3__XSPI1_A_DATA3 0x026c 0x0570 0x0000 0x00 0x00
+#define IMX94_PAD_XSPI1_DATA3__SAI2_RX_DATA0 0x026c 0x0570 0x07f0 0x01 0x03
+#define IMX94_PAD_XSPI1_DATA3__SAI2_MCLK 0x026c 0x0570 0x07e8 0x02 0x02
+#define IMX94_PAD_XSPI1_DATA3__XSPI_SLV_DATA3 0x026c 0x0570 0x0914 0x03 0x01
+#define IMX94_PAD_XSPI1_DATA3__FLEXIO1_3_3_FLEXIO3 0x026c 0x0570 0x0000 0x04 0x00
+#define IMX94_PAD_XSPI1_DATA3__GPIO7_IO19 0x026c 0x0570 0x0000 0x05 0x00
+
+#define IMX94_PAD_XSPI1_DATA4__XSPI1_A_DATA4 0x0270 0x0574 0x0000 0x00 0x00
+#define IMX94_PAD_XSPI1_DATA4__SAI4_RX_SYNC 0x0270 0x0574 0x0814 0x01 0x02
+#define IMX94_PAD_XSPI1_DATA4__XSPI_SLV_DATA4 0x0270 0x0574 0x0918 0x03 0x01
+#define IMX94_PAD_XSPI1_DATA4__FLEXIO1_3_3_FLEXIO4 0x0270 0x0574 0x0000 0x04 0x00
+#define IMX94_PAD_XSPI1_DATA4__GPIO7_IO20 0x0270 0x0574 0x0000 0x05 0x00
+
+#define IMX94_PAD_XSPI1_DATA5__XSPI1_A_DATA5 0x0274 0x0578 0x0000 0x00 0x00
+#define IMX94_PAD_XSPI1_DATA5__SAI4_TX_SYNC 0x0274 0x0578 0x081c 0x01 0x02
+#define IMX94_PAD_XSPI1_DATA5__XSPI_SLV_DATA5 0x0274 0x0578 0x091c 0x03 0x01
+#define IMX94_PAD_XSPI1_DATA5__FLEXIO1_3_3_FLEXIO5 0x0274 0x0578 0x0000 0x04 0x00
+#define IMX94_PAD_XSPI1_DATA5__GPIO7_IO21 0x0274 0x0578 0x0000 0x05 0x00
+
+#define IMX94_PAD_XSPI1_DATA6__XSPI1_A_DATA6 0x0278 0x057c 0x0000 0x00 0x00
+#define IMX94_PAD_XSPI1_DATA6__SAI4_TX_DATA0 0x0278 0x057c 0x0000 0x01 0x00
+#define IMX94_PAD_XSPI1_DATA6__XSPI_SLV_DATA6 0x0278 0x057c 0x0920 0x03 0x01
+#define IMX94_PAD_XSPI1_DATA6__FLEXIO1_3_3_FLEXIO6 0x0278 0x057c 0x0000 0x04 0x00
+#define IMX94_PAD_XSPI1_DATA6__GPIO7_IO22 0x0278 0x057c 0x0000 0x05 0x00
+
+#define IMX94_PAD_XSPI1_DATA7__XSPI1_A_DATA7 0x027c 0x0580 0x0000 0x00 0x00
+#define IMX94_PAD_XSPI1_DATA7__SAI4_RX_DATA0 0x027c 0x0580 0x0810 0x01 0x02
+#define IMX94_PAD_XSPI1_DATA7__SAI4_MCLK 0x027c 0x0580 0x0808 0x02 0x02
+#define IMX94_PAD_XSPI1_DATA7__XSPI_SLV_DATA7 0x027c 0x0580 0x0924 0x03 0x01
+#define IMX94_PAD_XSPI1_DATA7__FLEXIO1_3_3_FLEXIO7 0x027c 0x0580 0x0000 0x04 0x00
+#define IMX94_PAD_XSPI1_DATA7__GPIO7_IO23 0x027c 0x0580 0x0000 0x05 0x00
+
+#define IMX94_PAD_XSPI1_DQS__XSPI1_A_DQS 0x0280 0x0584 0x0000 0x00 0x00
+#define IMX94_PAD_XSPI1_DQS__SAI2_TX_BCLK 0x0280 0x0584 0x07f8 0x01 0x03
+#define IMX94_PAD_XSPI1_DQS__XSPI_SLV_DQS 0x0280 0x0584 0x0900 0x03 0x01
+#define IMX94_PAD_XSPI1_DQS__FLEXIO1_3_3_FLEXIO8 0x0280 0x0584 0x0000 0x04 0x00
+#define IMX94_PAD_XSPI1_DQS__GPIO7_IO24 0x0280 0x0584 0x0000 0x05 0x00
+
+#define IMX94_PAD_XSPI1_SCLK__XSPI1_A_SCLK 0x0284 0x0588 0x0000 0x00 0x00
+#define IMX94_PAD_XSPI1_SCLK__SAI4_TX_BCLK 0x0284 0x0588 0x0818 0x01 0x02
+#define IMX94_PAD_XSPI1_SCLK__XSPI_SLV_CLK 0x0284 0x0588 0x0904 0x03 0x01
+#define IMX94_PAD_XSPI1_SCLK__FLEXIO1_3_3_FLEXIO9 0x0284 0x0588 0x0000 0x04 0x00
+#define IMX94_PAD_XSPI1_SCLK__GPIO7_IO25 0x0284 0x0588 0x0000 0x05 0x00
+
+#define IMX94_PAD_XSPI1_SS0_B__XSPI1_A_SS0_B 0x0288 0x058c 0x0000 0x00 0x00
+#define IMX94_PAD_XSPI1_SS0_B__SAI4_RX_BCLK 0x0288 0x058c 0x080c 0x01 0x02
+#define IMX94_PAD_XSPI1_SS0_B__XSPI_SLV_CS 0x0288 0x058c 0x08fc 0x03 0x01
+#define IMX94_PAD_XSPI1_SS0_B__FLEXIO1_3_3_FLEXIO10 0x0288 0x058c 0x0000 0x04 0x00
+#define IMX94_PAD_XSPI1_SS0_B__GPIO7_IO26 0x0288 0x058c 0x0000 0x05 0x00
+
+#define IMX94_PAD_XSPI1_SS1_B__XSPI1_A_SS1_B 0x028c 0x0590 0x0000 0x00 0x00
+#define IMX94_PAD_XSPI1_SS1_B__SAI2_RX_BCLK 0x028c 0x0590 0x07ec 0x01 0x02
+#define IMX94_PAD_XSPI1_SS1_B__FLEXPWM3_PWMX3 0x028c 0x0590 0x0000 0x03 0x00
+#define IMX94_PAD_XSPI1_SS1_B__FLEXIO1_3_3_FLEXIO11 0x028c 0x0590 0x0000 0x04 0x00
+#define IMX94_PAD_XSPI1_SS1_B__GPIO7_IO27 0x028c 0x0590 0x0000 0x05 0x00
+#define IMX94_PAD_XSPI1_SS1_B__SINC1_MOD_CLK0 0x028c 0x0590 0x0000 0x06 0x00
+#define IMX94_PAD_XSPI1_SS1_B__SINC_FILTER_GLUE1_BREAK 0x028c 0x0590 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD2_CD_B__USDHC2_CD_B 0x0290 0x0594 0x0000 0x00 0x00
+#define IMX94_PAD_SD2_CD_B__NETC_PINMUX_ETH4_RX_CTL 0x0290 0x0594 0x0634 0x01 0x01
+#define IMX94_PAD_SD2_CD_B__I3C2_SCL 0x0290 0x0594 0x0720 0x02 0x03
+#define IMX94_PAD_SD2_CD_B__NETC_1588MUX_INOUT9 0x0290 0x0594 0x0670 0x03 0x02
+#define IMX94_PAD_SD2_CD_B__FLEXIO2_4_2_FLEXIO0 0x0290 0x0594 0x0000 0x04 0x00
+#define IMX94_PAD_SD2_CD_B__GPIO4_IO20 0x0290 0x0594 0x0000 0x05 0x00
+#define IMX94_PAD_SD2_CD_B__XBAR1_XBAR_INOUT13 0x0290 0x0594 0x0890 0x06 0x02
+#define IMX94_PAD_SD2_CD_B__SINC2_EMCLK0 0x0290 0x0594 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD2_CLK__USDHC2_CLK 0x0294 0x0598 0x0000 0x00 0x00
+#define IMX94_PAD_SD2_CLK__NETC_PINMUX_ETH4_TX_CLK 0x0294 0x0598 0x0648 0x01 0x01
+#define IMX94_PAD_SD2_CLK__I3C2_SDA 0x0294 0x0598 0x0724 0x02 0x03
+#define IMX94_PAD_SD2_CLK__NETC_1588MUX_INOUT8 0x0294 0x0598 0x066c 0x03 0x02
+#define IMX94_PAD_SD2_CLK__FLEXIO2_4_2_FLEXIO1 0x0294 0x0598 0x0000 0x04 0x00
+#define IMX94_PAD_SD2_CLK__GPIO4_IO21 0x0294 0x0598 0x0000 0x05 0x00
+#define IMX94_PAD_SD2_CLK__OBSERVE0 0x0294 0x0598 0x0000 0x06 0x00
+#define IMX94_PAD_SD2_CLK__SINC2_EMBIT0 0x0294 0x0598 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD2_CMD__USDHC2_CMD 0x0298 0x059c 0x0000 0x00 0x00
+#define IMX94_PAD_SD2_CMD__NETC_PINMUX_ETH4_TX_CTL 0x0298 0x059c 0x0000 0x01 0x00
+#define IMX94_PAD_SD2_CMD__I3C2_PUR 0x0298 0x059c 0x0000 0x02 0x00
+#define IMX94_PAD_SD2_CMD__I3C2_PUR_B 0x0298 0x059c 0x0000 0x03 0x00
+#define IMX94_PAD_SD2_CMD__FLEXIO2_4_2_FLEXIO2 0x0298 0x059c 0x0000 0x04 0x00
+#define IMX94_PAD_SD2_CMD__GPIO4_IO22 0x0298 0x059c 0x0000 0x05 0x00
+#define IMX94_PAD_SD2_CMD__OBSERVE1 0x0298 0x059c 0x0000 0x06 0x00
+#define IMX94_PAD_SD2_CMD__SINC2_EMCLK1 0x0298 0x059c 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD2_DATA0__USDHC2_DATA0 0x029c 0x05a0 0x0000 0x00 0x00
+#define IMX94_PAD_SD2_DATA0__NETC_PINMUX_ETH4_TXD0 0x029c 0x05a0 0x0000 0x01 0x00
+#define IMX94_PAD_SD2_DATA0__CAN2_TX 0x029c 0x05a0 0x0000 0x02 0x00
+#define IMX94_PAD_SD2_DATA0__NETC_1588MUX_INOUT7 0x029c 0x05a0 0x0668 0x03 0x02
+#define IMX94_PAD_SD2_DATA0__FLEXIO2_4_2_FLEXIO3 0x029c 0x05a0 0x0000 0x04 0x00
+#define IMX94_PAD_SD2_DATA0__GPIO4_IO23 0x029c 0x05a0 0x0000 0x05 0x00
+#define IMX94_PAD_SD2_DATA0__OBSERVE2 0x029c 0x05a0 0x0000 0x06 0x00
+#define IMX94_PAD_SD2_DATA0__SINC2_EMBIT1 0x029c 0x05a0 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD2_DATA1__USDHC2_DATA1 0x02a0 0x05a4 0x0000 0x00 0x00
+#define IMX94_PAD_SD2_DATA1__NETC_PINMUX_ETH4_TXD1 0x02a0 0x05a4 0x0000 0x01 0x00
+#define IMX94_PAD_SD2_DATA1__CAN2_RX 0x02a0 0x05a4 0x067c 0x02 0x04
+#define IMX94_PAD_SD2_DATA1__NETC_1588MUX_INOUT6 0x02a0 0x05a4 0x0664 0x03 0x02
+#define IMX94_PAD_SD2_DATA1__FLEXIO2_4_2_FLEXIO4 0x02a0 0x05a4 0x0000 0x04 0x00
+#define IMX94_PAD_SD2_DATA1__GPIO4_IO24 0x02a0 0x05a4 0x0000 0x05 0x00
+#define IMX94_PAD_SD2_DATA1__XBAR1_XBAR_INOUT14 0x02a0 0x05a4 0x0894 0x06 0x02
+#define IMX94_PAD_SD2_DATA1__SINC2_EMCLK2 0x02a0 0x05a4 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD2_DATA2__USDHC2_DATA2 0x02a4 0x05a8 0x0000 0x00 0x00
+#define IMX94_PAD_SD2_DATA2__NETC_PINMUX_ETH4_TXD2 0x02a4 0x05a8 0x0000 0x01 0x00
+#define IMX94_PAD_SD2_DATA2__MQS2_RIGHT 0x02a4 0x05a8 0x0000 0x02 0x00
+#define IMX94_PAD_SD2_DATA2__NETC_1588MUX_INOUT5 0x02a4 0x05a8 0x0660 0x03 0x02
+#define IMX94_PAD_SD2_DATA2__FLEXIO2_4_2_FLEXIO5 0x02a4 0x05a8 0x0000 0x04 0x00
+#define IMX94_PAD_SD2_DATA2__GPIO4_IO25 0x02a4 0x05a8 0x0000 0x05 0x00
+#define IMX94_PAD_SD2_DATA2__XBAR1_XBAR_INOUT15 0x02a4 0x05a8 0x0898 0x06 0x01
+#define IMX94_PAD_SD2_DATA2__SINC2_EMBIT2 0x02a4 0x05a8 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD2_DATA3__USDHC2_DATA3 0x02a8 0x05ac 0x0000 0x00 0x00
+#define IMX94_PAD_SD2_DATA3__NETC_PINMUX_ETH4_TXD3 0x02a8 0x05ac 0x0000 0x01 0x00
+#define IMX94_PAD_SD2_DATA3__MQS2_LEFT 0x02a8 0x05ac 0x0000 0x02 0x00
+#define IMX94_PAD_SD2_DATA3__LPTMR2_ALT0 0x02a8 0x05ac 0x0780 0x03 0x01
+#define IMX94_PAD_SD2_DATA3__FLEXIO2_4_2_FLEXIO6 0x02a8 0x05ac 0x0000 0x04 0x00
+#define IMX94_PAD_SD2_DATA3__GPIO4_IO26 0x02a8 0x05ac 0x0000 0x05 0x00
+#define IMX94_PAD_SD2_DATA3__XBAR1_XBAR_INOUT16 0x02a8 0x05ac 0x089c 0x06 0x01
+#define IMX94_PAD_SD2_DATA3__SINC2_EMCLK3 0x02a8 0x05ac 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD2_RESET_B__USDHC2_RESET_B 0x02ac 0x05b0 0x0000 0x00 0x00
+#define IMX94_PAD_SD2_RESET_B__NETC_PINMUX_ETH4_RXD0 0x02ac 0x05b0 0x0638 0x01 0x01
+#define IMX94_PAD_SD2_RESET_B__NETC_1588MUX_INOUT4 0x02ac 0x05b0 0x065c 0x02 0x02
+#define IMX94_PAD_SD2_RESET_B__LPTMR2_ALT1 0x02ac 0x05b0 0x0784 0x03 0x01
+#define IMX94_PAD_SD2_RESET_B__FLEXIO2_4_2_FLEXIO7 0x02ac 0x05b0 0x0000 0x04 0x00
+#define IMX94_PAD_SD2_RESET_B__GPIO4_IO27 0x02ac 0x05b0 0x0000 0x05 0x00
+#define IMX94_PAD_SD2_RESET_B__XBAR1_XBAR_INOUT17 0x02ac 0x05b0 0x08a0 0x06 0x01
+#define IMX94_PAD_SD2_RESET_B__SINC2_EMBIT3 0x02ac 0x05b0 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD2_GPIO0__USDHC2_WP 0x02b0 0x05b4 0x0854 0x00 0x03
+#define IMX94_PAD_SD2_GPIO0__NETC_PINMUX_ETH4_RXD1 0x02b0 0x05b4 0x063c 0x01 0x01
+#define IMX94_PAD_SD2_GPIO0__NETC_1588MUX_INOUT3 0x02b0 0x05b4 0x0658 0x03 0x02
+#define IMX94_PAD_SD2_GPIO0__FLEXIO2_4_2_FLEXIO8 0x02b0 0x05b4 0x0000 0x04 0x00
+#define IMX94_PAD_SD2_GPIO0__GPIO4_IO28 0x02b0 0x05b4 0x0000 0x05 0x00
+#define IMX94_PAD_SD2_GPIO0__XBAR1_XBAR_INOUT18 0x02b0 0x05b4 0x08a4 0x06 0x01
+#define IMX94_PAD_SD2_GPIO0__SINC2_MOD_CLK1 0x02b0 0x05b4 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD2_GPIO1__LPTMR2_ALT2 0x02b4 0x05b8 0x0788 0x00 0x01
+#define IMX94_PAD_SD2_GPIO1__NETC_PINMUX_ETH4_RXD2 0x02b4 0x05b8 0x0640 0x01 0x01
+#define IMX94_PAD_SD2_GPIO1__ECAT_CLK25 0x02b4 0x05b8 0x0000 0x02 0x00
+#define IMX94_PAD_SD2_GPIO1__NETC_1588MUX_INOUT2 0x02b4 0x05b8 0x0654 0x03 0x02
+#define IMX94_PAD_SD2_GPIO1__FLEXIO2_4_2_FLEXIO9 0x02b4 0x05b8 0x0000 0x04 0x00
+#define IMX94_PAD_SD2_GPIO1__GPIO4_IO29 0x02b4 0x05b8 0x0000 0x05 0x00
+#define IMX94_PAD_SD2_GPIO1__XBAR1_XBAR_INOUT19 0x02b4 0x05b8 0x08a8 0x06 0x01
+#define IMX94_PAD_SD2_GPIO1__SINC2_MOD_CLK0 0x02b4 0x05b8 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD2_GPIO2__NETC_PINMUX_ETH4_RXD3 0x02b8 0x05bc 0x0644 0x01 0x01
+#define IMX94_PAD_SD2_GPIO2__CAN5_TX 0x02b8 0x05bc 0x0000 0x02 0x00
+#define IMX94_PAD_SD2_GPIO2__NETC_1588MUX_INOUT1 0x02b8 0x05bc 0x0650 0x03 0x02
+#define IMX94_PAD_SD2_GPIO2__FLEXIO2_4_2_FLEXIO10 0x02b8 0x05bc 0x0000 0x04 0x00
+#define IMX94_PAD_SD2_GPIO2__GPIO4_IO30 0x02b8 0x05bc 0x0000 0x05 0x00
+#define IMX94_PAD_SD2_GPIO2__XBAR1_XBAR_INOUT20 0x02b8 0x05bc 0x08ac 0x06 0x01
+#define IMX94_PAD_SD2_GPIO2__SINC2_MOD_CLK2 0x02b8 0x05bc 0x0000 0x07 0x00
+
+#define IMX94_PAD_SD2_GPIO3__NETC_PINMUX_ETH4_RX_CLK 0x02bc 0x05c0 0x0630 0x01 0x01
+#define IMX94_PAD_SD2_GPIO3__CAN5_RX 0x02bc 0x05c0 0x0688 0x02 0x04
+#define IMX94_PAD_SD2_GPIO3__NETC_1588MUX_INOUT0 0x02bc 0x05c0 0x064c 0x03 0x02
+#define IMX94_PAD_SD2_GPIO3__FLEXIO2_4_2_FLEXIO11 0x02bc 0x05c0 0x0000 0x04 0x00
+#define IMX94_PAD_SD2_GPIO3__GPIO4_IO31 0x02bc 0x05c0 0x0000 0x05 0x00
+#define IMX94_PAD_SD2_GPIO3__XBAR1_XBAR_INOUT21 0x02bc 0x05c0 0x0000 0x06 0x00
+#define IMX94_PAD_SD2_GPIO3__SINC_FILTER_GLUE2_BREAK 0x02bc 0x05c0 0x0000 0x07 0x00
+
+#define IMX94_PAD_I2C1_SCL__LPI2C1_SCL 0x02c0 0x05c4 0x0000 0x00 0x00
+#define IMX94_PAD_I2C1_SCL__I3C1_SCL 0x02c0 0x05c4 0x0000 0x01 0x00
+#define IMX94_PAD_I2C1_SCL__LPUART1_DCD_B 0x02c0 0x05c4 0x0000 0x02 0x00
+#define IMX94_PAD_I2C1_SCL__TPM2_CH0 0x02c0 0x05c4 0x0000 0x03 0x00
+#define IMX94_PAD_I2C1_SCL__SAI1_RX_SYNC 0x02c0 0x05c4 0x0000 0x04 0x00
+#define IMX94_PAD_I2C1_SCL__GPIO1_IO0 0x02c0 0x05c4 0x0000 0x05 0x00
+
+#define IMX94_PAD_I2C1_SDA__LPI2C1_SDA 0x02c4 0x05c8 0x0000 0x00 0x00
+#define IMX94_PAD_I2C1_SDA__I3C1_SDA 0x02c4 0x05c8 0x0000 0x01 0x00
+#define IMX94_PAD_I2C1_SDA__LPUART1_RIN_B 0x02c4 0x05c8 0x0000 0x02 0x00
+#define IMX94_PAD_I2C1_SDA__TPM2_CH1 0x02c4 0x05c8 0x0000 0x03 0x00
+#define IMX94_PAD_I2C1_SDA__SAI1_RX_BCLK 0x02c4 0x05c8 0x0000 0x04 0x00
+#define IMX94_PAD_I2C1_SDA__GPIO1_IO1 0x02c4 0x05c8 0x0000 0x05 0x00
+
+#define IMX94_PAD_I2C2_SCL__LPI2C2_SCL 0x02c8 0x05cc 0x0000 0x00 0x00
+#define IMX94_PAD_I2C2_SCL__I3C1_PUR 0x02c8 0x05cc 0x0000 0x01 0x00
+#define IMX94_PAD_I2C2_SCL__LPUART2_DCD_B 0x02c8 0x05cc 0x0000 0x02 0x00
+#define IMX94_PAD_I2C2_SCL__TPM2_CH2 0x02c8 0x05cc 0x0000 0x03 0x00
+#define IMX94_PAD_I2C2_SCL__GPT1_CLK 0x02c8 0x05cc 0x060c 0x04 0x00
+#define IMX94_PAD_I2C2_SCL__GPIO1_IO2 0x02c8 0x05cc 0x0000 0x05 0x00
+#define IMX94_PAD_I2C2_SCL__I3C1_PUR_B 0x02c8 0x05cc 0x0000 0x06 0x00
+#define IMX94_PAD_I2C2_SCL__LPIT1_TRIGGER0 0x02c8 0x05cc 0x0000 0x07 0x00
+
+#define IMX94_PAD_I2C2_SDA__LPI2C2_SDA 0x02cc 0x05d0 0x0000 0x00 0x00
+#define IMX94_PAD_I2C2_SDA__LPI2C1_HREQ 0x02cc 0x05d0 0x0000 0x01 0x00
+#define IMX94_PAD_I2C2_SDA__LPUART2_RIN_B 0x02cc 0x05d0 0x0000 0x02 0x00
+#define IMX94_PAD_I2C2_SDA__TPM2_CH3 0x02cc 0x05d0 0x0000 0x03 0x00
+#define IMX94_PAD_I2C2_SDA__SAI1_MCLK 0x02cc 0x05d0 0x0620 0x04 0x00
+#define IMX94_PAD_I2C2_SDA__GPIO1_IO3 0x02cc 0x05d0 0x0000 0x05 0x00
+#define IMX94_PAD_I2C2_SDA__EWM_OUT_B 0x02cc 0x05d0 0x0000 0x06 0x00
+#define IMX94_PAD_I2C2_SDA__LPIT1_TRIGGER1 0x02cc 0x05d0 0x0000 0x07 0x00
+
+#define IMX94_PAD_UART1_RXD__LPUART1_RX 0x02d0 0x05d4 0x0000 0x00 0x00
+#define IMX94_PAD_UART1_RXD__S400_UART_RX 0x02d0 0x05d4 0x0000 0x01 0x00
+#define IMX94_PAD_UART1_RXD__LPSPI2_SIN 0x02d0 0x05d4 0x0000 0x02 0x00
+#define IMX94_PAD_UART1_RXD__TPM1_CH0 0x02d0 0x05d4 0x0000 0x03 0x00
+#define IMX94_PAD_UART1_RXD__GPT1_CAPTURE1 0x02d0 0x05d4 0x0000 0x04 0x00
+#define IMX94_PAD_UART1_RXD__GPIO1_IO4 0x02d0 0x05d4 0x0000 0x05 0x00
+
+#define IMX94_PAD_UART1_TXD__LPUART1_TX 0x02d4 0x05d8 0x0000 0x00 0x00
+#define IMX94_PAD_UART1_TXD__S400_UART_TX 0x02d4 0x05d8 0x0000 0x01 0x00
+#define IMX94_PAD_UART1_TXD__LPSPI2_PCS0 0x02d4 0x05d8 0x0000 0x02 0x00
+#define IMX94_PAD_UART1_TXD__TPM1_CH1 0x02d4 0x05d8 0x0000 0x03 0x00
+#define IMX94_PAD_UART1_TXD__GPT1_COMPARE1 0x02d4 0x05d8 0x0000 0x04 0x00
+#define IMX94_PAD_UART1_TXD__GPIO1_IO5 0x02d4 0x05d8 0x0000 0x05 0x00
+
+#define IMX94_PAD_UART2_RXD__LPUART2_RX 0x02d8 0x05dc 0x0000 0x00 0x00
+#define IMX94_PAD_UART2_RXD__LPUART1_CTS_B 0x02d8 0x05dc 0x0000 0x01 0x00
+#define IMX94_PAD_UART2_RXD__LPSPI2_SOUT 0x02d8 0x05dc 0x0000 0x02 0x00
+#define IMX94_PAD_UART2_RXD__TPM1_CH2 0x02d8 0x05dc 0x0000 0x03 0x00
+#define IMX94_PAD_UART2_RXD__SAI1_MCLK 0x02d8 0x05dc 0x0620 0x04 0x01
+#define IMX94_PAD_UART2_RXD__GPIO1_IO6 0x02d8 0x05dc 0x0000 0x05 0x00
+#define IMX94_PAD_UART2_RXD__GPT1_CLK 0x02d8 0x05dc 0x060c 0x06 0x01
+#define IMX94_PAD_UART2_RXD__LPIT1_TRIGGER2 0x02d8 0x05dc 0x0000 0x07 0x00
+
+#define IMX94_PAD_UART2_TXD__LPUART2_TX 0x02dc 0x05e0 0x0000 0x00 0x00
+#define IMX94_PAD_UART2_TXD__LPUART1_RTS_B 0x02dc 0x05e0 0x0000 0x01 0x00
+#define IMX94_PAD_UART2_TXD__LPSPI2_SCK 0x02dc 0x05e0 0x0000 0x02 0x00
+#define IMX94_PAD_UART2_TXD__TPM1_CH3 0x02dc 0x05e0 0x0000 0x03 0x00
+#define IMX94_PAD_UART2_TXD__GPIO1_IO7 0x02dc 0x05e0 0x0000 0x05 0x00
+
+#define IMX94_PAD_PDM_CLK__PDM_CLK 0x02e0 0x05e4 0x0000 0x00 0x00
+#define IMX94_PAD_PDM_CLK__MQS1_LEFT 0x02e0 0x05e4 0x0000 0x01 0x00
+#define IMX94_PAD_PDM_CLK__LPTMR1_ALT0 0x02e0 0x05e4 0x0000 0x04 0x00
+#define IMX94_PAD_PDM_CLK__GPIO1_IO8 0x02e0 0x05e4 0x0000 0x05 0x00
+#define IMX94_PAD_PDM_CLK__CAN1_TX 0x02e0 0x05e4 0x0000 0x06 0x00
+#define IMX94_PAD_PDM_CLK__EWM_OUT_B 0x02e0 0x05e4 0x0000 0x07 0x00
+
+#define IMX94_PAD_PDM_BIT_STREAM0__PDM_BIT_STREAM0 0x02e4 0x05e8 0x0610 0x00 0x02
+#define IMX94_PAD_PDM_BIT_STREAM0__MQS1_RIGHT 0x02e4 0x05e8 0x0000 0x01 0x00
+#define IMX94_PAD_PDM_BIT_STREAM0__LPSPI1_PCS1 0x02e4 0x05e8 0x0000 0x02 0x00
+#define IMX94_PAD_PDM_BIT_STREAM0__TPM1_EXTCLK 0x02e4 0x05e8 0x0000 0x03 0x00
+#define IMX94_PAD_PDM_BIT_STREAM0__LPTMR1_ALT1 0x02e4 0x05e8 0x0000 0x04 0x00
+#define IMX94_PAD_PDM_BIT_STREAM0__GPIO1_IO9 0x02e4 0x05e8 0x0000 0x05 0x00
+#define IMX94_PAD_PDM_BIT_STREAM0__CAN1_RX 0x02e4 0x05e8 0x0608 0x06 0x00
+
+#define IMX94_PAD_PDM_BIT_STREAM1__PDM_BIT_STREAM1 0x02e8 0x05ec 0x0614 0x00 0x03
+#define IMX94_PAD_PDM_BIT_STREAM1__NMI_GLUE_NMI 0x02e8 0x05ec 0x0000 0x01 0x00
+#define IMX94_PAD_PDM_BIT_STREAM1__LPSPI2_PCS1 0x02e8 0x05ec 0x0000 0x02 0x00
+#define IMX94_PAD_PDM_BIT_STREAM1__TPM2_EXTCLK 0x02e8 0x05ec 0x0000 0x03 0x00
+#define IMX94_PAD_PDM_BIT_STREAM1__LPTMR1_ALT2 0x02e8 0x05ec 0x0000 0x04 0x00
+#define IMX94_PAD_PDM_BIT_STREAM1__GPIO1_IO10 0x02e8 0x05ec 0x0000 0x05 0x00
+#define IMX94_PAD_PDM_BIT_STREAM1__EXT_CLK1 0x02e8 0x05ec 0x0624 0x06 0x00
+
+#define IMX94_PAD_SAI1_TXFS__SAI1_TX_SYNC 0x02ec 0x05f0 0x0000 0x00 0x00
+#define IMX94_PAD_SAI1_TXFS__SAI1_TX_DATA1 0x02ec 0x05f0 0x0000 0x01 0x00
+#define IMX94_PAD_SAI1_TXFS__LPSPI1_PCS0 0x02ec 0x05f0 0x0000 0x02 0x00
+#define IMX94_PAD_SAI1_TXFS__LPUART2_DTR_B 0x02ec 0x05f0 0x0000 0x03 0x00
+#define IMX94_PAD_SAI1_TXFS__MQS1_LEFT 0x02ec 0x05f0 0x0000 0x04 0x00
+#define IMX94_PAD_SAI1_TXFS__GPIO1_IO11 0x02ec 0x05f0 0x0000 0x05 0x00
+#define IMX94_PAD_SAI1_TXFS__EWM_OUT_B 0x02ec 0x05f0 0x0000 0x06 0x00
+
+#define IMX94_PAD_SAI1_TXC__SAI1_TX_BCLK 0x02f0 0x05f4 0x0000 0x00 0x00
+#define IMX94_PAD_SAI1_TXC__LPUART2_CTS_B 0x02f0 0x05f4 0x0000 0x01 0x00
+#define IMX94_PAD_SAI1_TXC__LPSPI1_SIN 0x02f0 0x05f4 0x0000 0x02 0x00
+#define IMX94_PAD_SAI1_TXC__LPUART1_DSR_B 0x02f0 0x05f4 0x0000 0x03 0x00
+#define IMX94_PAD_SAI1_TXC__CAN1_RX 0x02f0 0x05f4 0x0608 0x04 0x01
+#define IMX94_PAD_SAI1_TXC__GPIO1_IO12 0x02f0 0x05f4 0x0000 0x05 0x00
+
+#define IMX94_PAD_SAI1_TXD0__SAI1_TX_DATA0 0x02f4 0x05f8 0x0000 0x00 0x00
+#define IMX94_PAD_SAI1_TXD0__LPUART2_RTS_B 0x02f4 0x05f8 0x0000 0x01 0x00
+#define IMX94_PAD_SAI1_TXD0__LPSPI1_SCK 0x02f4 0x05f8 0x0000 0x02 0x00
+#define IMX94_PAD_SAI1_TXD0__LPUART1_DTR_B 0x02f4 0x05f8 0x0000 0x03 0x00
+#define IMX94_PAD_SAI1_TXD0__CAN1_TX 0x02f4 0x05f8 0x0000 0x04 0x00
+#define IMX94_PAD_SAI1_TXD0__GPIO1_IO13 0x02f4 0x05f8 0x0000 0x05 0x00
+
+#define IMX94_PAD_SAI1_RXD0__SAI1_RX_DATA0 0x02f8 0x05fc 0x0000 0x00 0x00
+#define IMX94_PAD_SAI1_RXD0__SAI1_MCLK 0x02f8 0x05fc 0x0620 0x01 0x02
+#define IMX94_PAD_SAI1_RXD0__LPSPI1_SOUT 0x02f8 0x05fc 0x0000 0x02 0x00
+#define IMX94_PAD_SAI1_RXD0__LPUART2_DSR_B 0x02f8 0x05fc 0x0000 0x03 0x00
+#define IMX94_PAD_SAI1_RXD0__MQS1_RIGHT 0x02f8 0x05fc 0x0000 0x04 0x00
+#define IMX94_PAD_SAI1_RXD0__GPIO1_IO14 0x02f8 0x05fc 0x0000 0x05 0x00
+#define IMX94_PAD_SAI1_RXD0__LPIT1_TRIGGER3 0x02f8 0x05fc 0x0000 0x07 0x00
+
+#define IMX94_PAD_WDOG_ANY__WDOG_ANY 0x02fc 0x0600 0x0000 0x00 0x00
+#define IMX94_PAD_WDOG_ANY__FCCU_EOUT1 0x02fc 0x0600 0x0000 0x01 0x00
+#define IMX94_PAD_WDOG_ANY__GPIO1_IO15 0x02fc 0x0600 0x0000 0x05 0x00
+#endif /* __DTS_IMX94_PINFUNC_H */
diff --git a/arch/arm64/boot/dts/freescale/imx94-power.h b/arch/arm64/boot/dts/freescale/imx94-power.h
new file mode 100644
index 000000000000..5209afed60ed
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx94-power.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright 2024-2025 NXP
+ */
+
+#ifndef __IMX94_POWER_H
+#define __IMX94_POWER_H
+
+#define IMX94_PD_ANA 0
+#define IMX94_PD_AON 1
+#define IMX94_PD_BBSM 2
+#define IMX94_PD_M71 3
+#define IMX94_PD_CCMSRCGPC 4
+#define IMX94_PD_A55C0 5
+#define IMX94_PD_A55C1 6
+#define IMX94_PD_A55C2 7
+#define IMX94_PD_A55C3 8
+#define IMX94_PD_A55P 9
+#define IMX94_PD_DDR 10
+#define IMX94_PD_DISPLAY 11
+#define IMX94_PD_M70 12
+#define IMX94_PD_HSIO_TOP 13
+#define IMX94_PD_HSIO_WAON 14
+#define IMX94_PD_NETC 15
+#define IMX94_PD_NOC 16
+#define IMX94_PD_NPU 17
+#define IMX94_PD_WAKEUP 18
+
+#define IMX94_PERF_M33 0
+#define IMX94_PERF_M33S 1
+#define IMX94_PERF_WAKEUP 2
+#define IMX94_PERF_M70 3
+#define IMX94_PERF_M71 4
+#define IMX94_PERF_DRAM 5
+#define IMX94_PERF_HSIO 6
+#define IMX94_PERF_NPU 7
+#define IMX94_PERF_NOC 8
+#define IMX94_PERF_A55 9
+#define IMX94_PERF_DISP 10
+
+#endif /* __IMX94_POWER_H */
diff --git a/arch/arm64/boot/dts/freescale/imx94.dtsi b/arch/arm64/boot/dts/freescale/imx94.dtsi
new file mode 100644
index 000000000000..3661ea48d7d2
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx94.dtsi
@@ -0,0 +1,1148 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024-2025 NXP
+ */
+
+#include <dt-bindings/dma/fsl-edma.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "imx94-clock.h"
+#include "imx94-pinfunc.h"
+#include "imx94-power.h"
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&gic>;
+
+ osc_24m: clock-24m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "osc_24m";
+ };
+
+ dummy: clock-dummy {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "dummy";
+ };
+
+ clk_ext1: clock-ext1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <133000000>;
+ clock-output-names = "clk_ext1";
+ };
+
+ sai1_mclk: clock-sai1-mclk1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "sai1_mclk";
+ };
+
+ sai2_mclk: clock-sai2-mclk1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "sai2_mclk";
+ };
+
+ sai3_mclk: clock-sai3-mclk1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "sai3_mclk";
+ };
+
+ sai4_mclk: clock-sai4-mclk1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "sai4_mclk";
+ };
+
+ firmware {
+ scmi {
+ compatible = "arm,scmi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ mboxes = <&mu2 5 0>, <&mu2 3 0>, <&mu2 3 1>, <&mu2 5 1>;
+ shmem = <&scmi_buf0>, <&scmi_buf1>;
+ arm,max-rx-timeout-ms = <5000>;
+
+ scmi_devpd: protocol@11 {
+ reg = <0x11>;
+ #power-domain-cells = <1>;
+ };
+
+ scmi_sys_power: protocol@12 {
+ reg = <0x12>;
+ };
+
+ scmi_perf: protocol@13 {
+ reg = <0x13>;
+ #power-domain-cells = <1>;
+ };
+
+ scmi_clk: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
+
+ scmi_iomuxc: protocol@19 {
+ reg = <0x19>;
+ };
+
+ scmi_bbm: protocol@81 {
+ reg = <0x81>;
+ };
+
+ scmi_misc: protocol@84 {
+ reg = <0x84>;
+ };
+ };
+ };
+
+ pmu {
+ compatible = "arm,cortex-a55-pmu";
+ interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>;
+ clock-frequency = <24000000>;
+ interrupt-parent = <&gic>;
+ arm,no-tick-in-suspend;
+ };
+
+ gic: interrupt-controller@48000000 {
+ compatible = "arm,gic-v3";
+ reg = <0 0x48000000 0 0x10000>,
+ <0 0x48060000 0 0xc0000>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dma-noncoherent;
+ interrupt-parent = <&gic>;
+
+ its: msi-controller@48040000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0 0x48040000 0 0x20000>;
+ #msi-cells = <1>;
+ dma-noncoherent;
+ msi-controller;
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aips2: bus@42000000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x0 0x42000000 0x0 0x800000>;
+ ranges = <0x42000000 0x0 0x42000000 0x8000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ edma2: dma-controller@42000000 {
+ compatible = "fsl,imx94-edma5", "fsl,imx95-edma5";
+ reg = <0x42000000 0x210000>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ clock-names = "dma";
+ #dma-cells = <3>;
+ dma-channels = <64>;
+ interrupts-extended = <&a55_irqsteer 0>, <&a55_irqsteer 1>,
+ <&a55_irqsteer 2>, <&a55_irqsteer 3>,
+ <&a55_irqsteer 4>, <&a55_irqsteer 5>,
+ <&a55_irqsteer 6>, <&a55_irqsteer 7>,
+ <&a55_irqsteer 8>, <&a55_irqsteer 9>,
+ <&a55_irqsteer 10>, <&a55_irqsteer 11>,
+ <&a55_irqsteer 12>, <&a55_irqsteer 13>,
+ <&a55_irqsteer 14>, <&a55_irqsteer 15>,
+ <&a55_irqsteer 16>, <&a55_irqsteer 17>,
+ <&a55_irqsteer 18>, <&a55_irqsteer 19>,
+ <&a55_irqsteer 20>, <&a55_irqsteer 21>,
+ <&a55_irqsteer 22>, <&a55_irqsteer 23>,
+ <&a55_irqsteer 24>, <&a55_irqsteer 25>,
+ <&a55_irqsteer 26>, <&a55_irqsteer 27>,
+ <&a55_irqsteer 28>, <&a55_irqsteer 29>,
+ <&a55_irqsteer 30>, <&a55_irqsteer 31>,
+ <&a55_irqsteer 64>, <&a55_irqsteer 65>,
+ <&a55_irqsteer 66>, <&a55_irqsteer 67>,
+ <&a55_irqsteer 68>, <&a55_irqsteer 69>,
+ <&a55_irqsteer 70>, <&a55_irqsteer 71>,
+ <&a55_irqsteer 72>, <&a55_irqsteer 73>,
+ <&a55_irqsteer 74>, <&a55_irqsteer 75>,
+ <&a55_irqsteer 76>, <&a55_irqsteer 77>,
+ <&a55_irqsteer 78>, <&a55_irqsteer 79>,
+ <&a55_irqsteer 80>, <&a55_irqsteer 81>,
+ <&a55_irqsteer 82>, <&a55_irqsteer 83>,
+ <&a55_irqsteer 84>, <&a55_irqsteer 85>,
+ <&a55_irqsteer 86>, <&a55_irqsteer 87>,
+ <&a55_irqsteer 88>, <&a55_irqsteer 89>,
+ <&a55_irqsteer 90>, <&a55_irqsteer 91>,
+ <&a55_irqsteer 92>, <&a55_irqsteer 93>,
+ <&a55_irqsteer 94>, <&a55_irqsteer 95>;
+ };
+
+ mu10: mailbox@42430000 {
+ compatible = "fsl,imx94-mu", "fsl,imx95-mu";
+ reg = <0x42430000 0x10000>;
+ interrupts = <GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ i3c2: i3c@42520000 {
+ compatible = "silvaco,i3c-master-v1";
+ reg = <0x42520000 0x10000>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX94_CLK_I3C2SLOW>,
+ <&dummy>;
+ clock-names = "pclk", "fast_clk", "slow_clk";
+ status = "disabled";
+ };
+
+ lpi2c3: i2c@42530000 {
+ compatible = "fsl,imx94-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x42530000 0x10000>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPI2C3>,
+ <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ dmas = <&edma2 5 0 0>, <&edma2 6 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpi2c4: i2c@42540000 {
+ compatible = "fsl,imx94-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x42540000 0x10000>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPI2C4>,
+ <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ dmas = <&edma4 4 0 0>, <&edma4 5 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpspi3: spi@42550000 {
+ compatible = "fsl,imx94-spi", "fsl,imx7ulp-spi";
+ reg = <0x42550000 0x10000>;
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPSPI3>,
+ <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ dmas = <&edma2 7 0 0>, <&edma2 8 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpspi4: spi@42560000 {
+ compatible = "fsl,imx94-spi", "fsl,imx7ulp-spi";
+ reg = <0x42560000 0x10000>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPSPI4>,
+ <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ dmas = <&edma4 6 0 0>, <&edma4 7 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpuart3: serial@42570000 {
+ compatible = "fsl,imx94-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x42570000 0x1000>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_LPUART3>;
+ clock-names = "ipg";
+ dmas = <&edma2 10 0 FSL_EDMA_RX>, <&edma2 9 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ lpuart4: serial@42580000 {
+ compatible = "fsl,imx94-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x42580000 0x1000>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_LPUART4>;
+ clock-names = "ipg";
+ dmas = <&edma4 10 0 FSL_EDMA_RX>, <&edma4 9 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ lpuart5: serial@42590000 {
+ compatible = "fsl,imx94-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x42590000 0x1000>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_LPUART5>;
+ clock-names = "ipg";
+ dmas = <&edma2 12 0 FSL_EDMA_RX>, <&edma2 11 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ lpuart6: serial@425a0000 {
+ compatible = "fsl,imx94-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x425a0000 0x1000>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_LPUART6>;
+ clock-names = "ipg";
+ dmas = <&edma4 12 0 FSL_EDMA_RX>, <&edma4 11 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ flexcan2: can@425b0000 {
+ compatible = "fsl,imx94-flexcan", "fsl,imx95-flexcan";
+ reg = <0x425b0000 0x10000>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX94_CLK_CAN2>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&scmi_clk IMX94_CLK_CAN2>;
+ assigned-clock-parents = <&scmi_clk IMX94_CLK_SYSPLL1_PFD1_DIV2>;
+ assigned-clock-rates = <80000000>;
+ fsl,clk-source = /bits/ 8 <0>;
+ status = "disabled";
+ };
+
+ flexcan3: can@425e0000 {
+ compatible = "fsl,imx94-flexcan", "fsl,imx95-flexcan";
+ reg = <0x425e0000 0x10000>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX94_CLK_CAN3>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&scmi_clk IMX94_CLK_CAN3>;
+ assigned-clock-parents = <&scmi_clk IMX94_CLK_SYSPLL1_PFD1_DIV2>;
+ assigned-clock-rates = <80000000>;
+ fsl,clk-source = /bits/ 8 <0>;
+ status = "disabled";
+ };
+
+ flexcan4: can@425f0000 {
+ compatible = "fsl,imx94-flexcan", "fsl,imx95-flexcan";
+ reg = <0x425f0000 0x10000>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX94_CLK_CAN4>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&scmi_clk IMX94_CLK_CAN4>;
+ assigned-clock-parents = <&scmi_clk IMX94_CLK_SYSPLL1_PFD1_DIV2>;
+ assigned-clock-rates = <80000000>;
+ fsl,clk-source = /bits/ 8 <0>;
+ status = "disabled";
+ };
+
+ flexcan5: can@42600000 {
+ compatible = "fsl,imx94-flexcan", "fsl,imx95-flexcan";
+ reg = <0x42600000 0x10000>;
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX94_CLK_CAN5>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&scmi_clk IMX94_CLK_CAN5>;
+ assigned-clock-parents = <&scmi_clk IMX94_CLK_SYSPLL1_PFD1_DIV2>;
+ assigned-clock-rates = <80000000>;
+ fsl,clk-source = /bits/ 8 <0>;
+ status = "disabled";
+ };
+
+ sai2: sai@42650000 {
+ compatible = "fsl,imx94-sai", "fsl,imx95-sai";
+ reg = <0x42650000 0x10000>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>, <&dummy>,
+ <&scmi_clk IMX94_CLK_SAI2>, <&dummy>, <&dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dmas = <&edma2 30 0 FSL_EDMA_RX>, <&edma2 29 0 0>;
+ dma-names = "rx", "tx";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ sai3: sai@42660000 {
+ compatible = "fsl,imx94-sai", "fsl,imx95-sai";
+ reg = <0x42660000 0x10000>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>, <&dummy>,
+ <&scmi_clk IMX94_CLK_SAI3>, <&dummy>, <&dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dmas = <&edma2 32 0 FSL_EDMA_RX>, <&edma2 31 0 0>;
+ dma-names = "rx", "tx";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ sai4: sai@42670000 {
+ compatible = "fsl,imx94-sai", "fsl,imx95-sai";
+ reg = <0x42670000 0x10000>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>, <&dummy>,
+ <&scmi_clk IMX94_CLK_SAI4>, <&dummy>, <&dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dmas = <&edma2 36 0 FSL_EDMA_RX>, <&edma2 35 0 0>;
+ dma-names = "rx", "tx";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ lpuart7: serial@42690000 {
+ compatible = "fsl,imx94-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x42690000 0x1000>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_LPUART7>;
+ clock-names = "ipg";
+ dmas = <&edma2 46 0 FSL_EDMA_RX>, <&edma2 45 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ lpuart8: serial@426a0000 {
+ compatible = "fsl,imx94-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x426a0000 0x1000>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_LPUART8>;
+ clock-names = "ipg";
+ dmas = <&edma4 39 0 FSL_EDMA_RX>, <&edma4 38 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ lpi2c5: i2c@426b0000 {
+ compatible = "fsl,imx94-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x426b0000 0x10000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPI2C5>,
+ <&scmi_clk IMX94_CLK_BUSAON>;
+ clock-names = "per", "ipg";
+ dmas = <&edma2 37 0 0>, <&edma2 38 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpi2c6: i2c@426c0000 {
+ compatible = "fsl,imx94-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x426c0000 0x10000>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPI2C6>,
+ <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ dmas = <&edma4 30 0 0>, <&edma4 31 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpi2c7: i2c@426d0000 {
+ compatible = "fsl,imx94-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x426d0000 0x10000>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPI2C7>,
+ <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ dmas = <&edma2 39 0 0>, <&edma2 40 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpi2c8: i2c@426e0000 {
+ compatible = "fsl,imx94-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x426e0000 0x10000>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPI2C8>,
+ <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ dmas = <&edma4 32 0 0>, <&edma4 33 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpspi5: spi@426f0000 {
+ compatible = "fsl,imx94-spi", "fsl,imx7ulp-spi";
+ reg = <0x426f0000 0x10000>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPSPI5>,
+ <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ dmas = <&edma2 41 0 0>, <&edma2 42 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpspi6: spi@42700000 {
+ compatible = "fsl,imx94-spi", "fsl,imx7ulp-spi";
+ reg = <0x42700000 0x10000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPSPI6>,
+ <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ dmas = <&edma4 34 0 0>, <&edma4 35 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpspi7: spi@42710000 {
+ compatible = "fsl,imx94-spi", "fsl,imx7ulp-spi";
+ reg = <0x42710000 0x10000>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPSPI7>,
+ <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ dmas = <&edma2 43 0 0>, <&edma2 44 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpspi8: spi@42720000 {
+ compatible = "fsl,imx94-spi", "fsl,imx7ulp-spi";
+ reg = <0x42720000 0x10000>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPSPI8>,
+ <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ dmas = <&edma4 36 0 0>, <&edma4 37 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ mu11: mailbox@42730000 {
+ compatible = "fsl,imx94-mu", "fsl,imx95-mu";
+ reg = <0x42730000 0x10000>;
+ interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ edma4: dma-controller@42df0000 {
+ compatible = "fsl,imx94-edma5", "fsl,imx95-edma5";
+ reg = <0x42df0000 0x210000>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ clock-names = "dma";
+ #dma-cells = <3>;
+ dma-channels = <64>;
+ interrupts-extended = <&a55_irqsteer 128>, <&a55_irqsteer 129>,
+ <&a55_irqsteer 130>, <&a55_irqsteer 131>,
+ <&a55_irqsteer 132>, <&a55_irqsteer 133>,
+ <&a55_irqsteer 134>, <&a55_irqsteer 135>,
+ <&a55_irqsteer 136>, <&a55_irqsteer 137>,
+ <&a55_irqsteer 138>, <&a55_irqsteer 139>,
+ <&a55_irqsteer 140>, <&a55_irqsteer 141>,
+ <&a55_irqsteer 142>, <&a55_irqsteer 143>,
+ <&a55_irqsteer 144>, <&a55_irqsteer 145>,
+ <&a55_irqsteer 146>, <&a55_irqsteer 147>,
+ <&a55_irqsteer 148>, <&a55_irqsteer 149>,
+ <&a55_irqsteer 150>, <&a55_irqsteer 151>,
+ <&a55_irqsteer 152>, <&a55_irqsteer 153>,
+ <&a55_irqsteer 154>, <&a55_irqsteer 155>,
+ <&a55_irqsteer 156>, <&a55_irqsteer 157>,
+ <&a55_irqsteer 158>, <&a55_irqsteer 159>,
+ <&a55_irqsteer 192>, <&a55_irqsteer 193>,
+ <&a55_irqsteer 194>, <&a55_irqsteer 195>,
+ <&a55_irqsteer 196>, <&a55_irqsteer 197>,
+ <&a55_irqsteer 198>, <&a55_irqsteer 199>,
+ <&a55_irqsteer 200>, <&a55_irqsteer 201>,
+ <&a55_irqsteer 202>, <&a55_irqsteer 203>,
+ <&a55_irqsteer 204>, <&a55_irqsteer 205>,
+ <&a55_irqsteer 206>, <&a55_irqsteer 207>,
+ <&a55_irqsteer 208>, <&a55_irqsteer 209>,
+ <&a55_irqsteer 210>, <&a55_irqsteer 211>,
+ <&a55_irqsteer 212>, <&a55_irqsteer 213>,
+ <&a55_irqsteer 214>, <&a55_irqsteer 215>,
+ <&a55_irqsteer 216>, <&a55_irqsteer 217>,
+ <&a55_irqsteer 218>, <&a55_irqsteer 219>,
+ <&a55_irqsteer 220>, <&a55_irqsteer 221>,
+ <&a55_irqsteer 222>, <&a55_irqsteer 223>;
+ };
+ };
+
+ aips3: bus@42800000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0 0x42800000 0 0x800000>;
+ ranges = <0x42800000 0x0 0x42800000 0x800000>,
+ <0x28000000 0x0 0x28000000 0x1000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ usdhc1: mmc@42850000 {
+ compatible = "fsl,imx94-usdhc", "fsl,imx8mm-usdhc";
+ reg = <0x42850000 0x10000>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX94_CLK_WAKEUPAXI>,
+ <&scmi_clk IMX94_CLK_USDHC1>;
+ clock-names = "ipg", "ahb", "per";
+ assigned-clocks = <&scmi_clk IMX94_CLK_USDHC1>;
+ assigned-clock-parents = <&scmi_clk IMX94_CLK_SYSPLL1_PFD1>;
+ assigned-clock-rates = <400000000>;
+ bus-width = <8>;
+ fsl,tuning-start-tap = <1>;
+ fsl,tuning-step = <2>;
+ status = "disabled";
+ };
+
+ usdhc2: mmc@42860000 {
+ compatible = "fsl,imx94-usdhc", "fsl,imx8mm-usdhc";
+ reg = <0x42860000 0x10000>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX94_CLK_WAKEUPAXI>,
+ <&scmi_clk IMX94_CLK_USDHC2>;
+ clock-names = "ipg", "ahb", "per";
+ assigned-clocks = <&scmi_clk IMX94_CLK_USDHC2>;
+ assigned-clock-parents = <&scmi_clk IMX94_CLK_SYSPLL1_PFD1>;
+ assigned-clock-rates = <200000000>;
+ bus-width = <4>;
+ fsl,tuning-start-tap = <1>;
+ fsl,tuning-step = <2>;
+ status = "disabled";
+ };
+
+ usdhc3: mmc@42880000 {
+ compatible = "fsl,imx94-usdhc", "fsl,imx8mm-usdhc";
+ reg = <0x42880000 0x10000>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX94_CLK_WAKEUPAXI>,
+ <&scmi_clk IMX94_CLK_USDHC3>;
+ clock-names = "ipg", "ahb", "per";
+ assigned-clocks = <&scmi_clk IMX94_CLK_USDHC3>;
+ assigned-clock-parents = <&scmi_clk IMX94_CLK_SYSPLL1_PFD1>;
+ assigned-clock-rates = <200000000>;
+ bus-width = <4>;
+ fsl,tuning-start-tap = <1>;
+ fsl,tuning-step = <2>;
+ status = "disabled";
+ };
+
+ lpuart9: serial@42a50000 {
+ compatible = "fsl,imx94-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x42a50000 0x1000>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_LPUART10>;
+ clock-names = "ipg";
+ dmas = <&edma2 51 0 FSL_EDMA_RX>, <&edma2 50 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ lpuart10: serial@42a60000 {
+ compatible = "fsl,imx94-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x42a60000 0x1000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_LPUART10>;
+ clock-names = "ipg";
+ dmas = <&edma4 47 0 FSL_EDMA_RX>, <&edma4 46 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ lpuart11: serial@42a70000 {
+ compatible = "fsl,imx94-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x42a70000 0x1000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_LPUART11>;
+ clock-names = "ipg";
+ dmas = <&edma2 53 0 FSL_EDMA_RX>, <&edma2 52 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ lpuart12: serial@42a80000 {
+ compatible = "fsl,imx94-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x42a80000 0x1000>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_LPUART12>;
+ clock-names = "ipg";
+ dmas = <&edma4 49 0 FSL_EDMA_RX>, <&edma4 48 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ mu12: mailbox@42ac0000 {
+ compatible = "fsl,imx94-mu", "fsl,imx95-mu";
+ reg = <0x42ac0000 0x10000>;
+ interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ mu13: mailbox@42ae0000 {
+ compatible = "fsl,imx94-mu", "fsl,imx95-mu";
+ reg = <0x42ae0000 0x10000>;
+ interrupts = <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ mu14: mailbox@42b00000 {
+ compatible = "fsl,imx94-mu", "fsl,imx95-mu";
+ reg = <0x42b00000 0x10000>;
+ interrupts = <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ mu15: mailbox@42b20000 {
+ compatible = "fsl,imx94-mu", "fsl,imx95-mu";
+ reg = <0x42b20000 0x10000>;
+ interrupts = <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ mu16: mailbox@42b40000 {
+ compatible = "fsl,imx94-mu", "fsl,imx95-mu";
+ reg = <0x42b40000 0x10000>;
+ interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ mu17: mailbox@42b60000 {
+ compatible = "fsl,imx94-mu", "fsl,imx95-mu";
+ reg = <0x42b60000 0x10000>;
+ interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+ };
+
+ gpio2: gpio@43810000 {
+ compatible = "fsl,imx94-gpio", "fsl,imx8ulp-gpio";
+ reg = <0x0 0x43810000 0x0 0x1000>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&scmi_iomuxc 0 4 32>;
+ };
+
+ gpio3: gpio@43820000 {
+ compatible = "fsl,imx94-gpio", "fsl,imx8ulp-gpio";
+ reg = <0x0 0x43820000 0x0 0x1000>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&scmi_iomuxc 0 36 26>;
+ };
+
+ gpio4: gpio@43840000 {
+ compatible = "fsl,imx94-gpio", "fsl,imx8ulp-gpio";
+ reg = <0x0 0x43840000 0x0 0x1000>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&scmi_iomuxc 0 62 4>, <&scmi_iomuxc 4 0 4>,
+ <&scmi_iomuxc 8 140 12>, <&scmi_iomuxc 20 164 12>;
+ };
+
+ gpio5: gpio@43850000 {
+ compatible = "fsl,imx94-gpio", "fsl,imx8ulp-gpio";
+ reg = <0x0 0x43850000 0x0 0x1000>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&scmi_iomuxc 0 108 32>;
+ };
+
+ gpio6: gpio@43860000 {
+ compatible = "fsl,imx94-gpio", "fsl,imx8ulp-gpio";
+ reg = <0x0 0x43860000 0x0 0x1000>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&scmi_iomuxc 0 66 32>;
+ };
+
+ gpio7: gpio@43870000 {
+ compatible = "fsl,imx94-gpio", "fsl,imx8ulp-gpio";
+ reg = <0x0 0x43870000 0x0 0x1000>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&scmi_iomuxc 0 98 10>, <&scmi_iomuxc 16 152 12>;
+ };
+
+ aips1: bus@44000000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x0 0x44000000 0x0 0x800000>;
+ ranges = <0x44000000 0x0 0x44000000 0x800000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ edma1: dma-controller@44000000 {
+ compatible = "fsl,imx94-edma3", "fsl,imx93-edma3";
+ reg = <0x44000000 0x210000>;
+ interrupts = <GIC_SPI 230 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 236 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 237 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSAON>;
+ clock-names = "dma";
+ #dma-cells = <3>;
+ dma-channels = <32>;
+ };
+
+ mu1: mailbox@44220000 {
+ compatible = "fsl,imx94-mu", "fsl,imx95-mu";
+ reg = <0x44220000 0x10000>;
+ interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSAON>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ system_counter: timer@44290000 {
+ compatible = "nxp,imx94-sysctr-timer", "nxp,imx95-sysctr-timer";
+ reg = <0x44290000 0x30000>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&osc_24m>;
+ clock-names = "per";
+ nxp,no-divider;
+ };
+
+ tpm1: pwm@44310000 {
+ compatible = "fsl,imx94-pwm", "fsl,imx7ulp-pwm";
+ reg = <0x44310000 0x1000>;
+ clocks = <&scmi_clk IMX94_CLK_BUSAON>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ tpm2: pwm@44320000 {
+ compatible = "fsl,imx94-pwm", "fsl,imx7ulp-pwm";
+ reg = <0x44320000 0x1000>;
+ clocks = <&scmi_clk IMX94_CLK_TPM2>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ i3c1: i3c@44330000 {
+ compatible = "silvaco,i3c-master-v1";
+ reg = <0x44330000 0x10000>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_BUSAON>,
+ <&scmi_clk IMX94_CLK_I3C1SLOW>,
+ <&dummy>;
+ clock-names = "pclk", "fast_clk", "slow_clk";
+ status = "disabled";
+ };
+
+ lpi2c1: i2c@44340000 {
+ compatible = "fsl,imx94-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x44340000 0x10000>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPI2C1>,
+ <&scmi_clk IMX94_CLK_BUSAON>;
+ clock-names = "per", "ipg";
+ dmas = <&edma1 12 0 0>, <&edma1 13 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpi2c2: i2c@44350000 {
+ compatible = "fsl,imx94-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x44350000 0x10000>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPI2C2>,
+ <&scmi_clk IMX94_CLK_BUSAON>;
+ clock-names = "per", "ipg";
+ dmas = <&edma1 14 0 0>, <&edma1 15 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpspi1: spi@44360000 {
+ compatible = "fsl,imx94-spi", "fsl,imx7ulp-spi";
+ reg = <0x44360000 0x10000>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPSPI2>,
+ <&scmi_clk IMX94_CLK_BUSAON>;
+ clock-names = "per", "ipg";
+ dmas = <&edma1 16 0 0>, <&edma1 17 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpspi2: spi@44370000 {
+ compatible = "fsl,imx94-spi", "fsl,imx7ulp-spi";
+ reg = <0x44370000 0x10000>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX94_CLK_LPSPI2>,
+ <&scmi_clk IMX94_CLK_BUSAON>;
+ clock-names = "per", "ipg";
+ dmas = <&edma1 18 0 0>, <&edma1 19 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ lpuart1: serial@44380000 {
+ compatible = "fsl,imx94-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x44380000 0x1000>;
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_LPUART1>;
+ clock-names = "ipg";
+ dmas = <&edma1 21 0 FSL_EDMA_RX>, <&edma1 20 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ lpuart2: serial@44390000 {
+ compatible = "fsl,imx94-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x44390000 0x1000>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_LPUART2>;
+ clock-names = "ipg";
+ dmas = <&edma1 23 0 FSL_EDMA_RX>, <&edma1 22 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ flexcan1: can@443a0000 {
+ compatible = "fsl,imx94-flexcan", "fsl,imx95-flexcan";
+ reg = <0x443a0000 0x10000>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ sai1: sai@443b0000 {
+ compatible = "fsl,imx94-sai", "fsl,imx95-sai";
+ reg = <0x443b0000 0x10000>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSAON>, <&dummy>,
+ <&scmi_clk IMX94_CLK_SAI1>, <&dummy>,
+ <&dummy>, <&dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dmas = <&edma1 25 0 FSL_EDMA_RX>, <&edma1 24 0 0>;
+ dma-names = "rx", "tx";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ adc1: adc@44530000 {
+ compatible = "nxp,imx94-adc", "nxp,imx93-adc";
+ reg = <0x44530000 0x10000>;
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_ADC>;
+ clock-names = "ipg";
+ #io-channel-cells = <1>;
+ status = "disabled";
+ };
+
+ mu2: mailbox@445b0000 {
+ compatible = "fsl,imx94-mu", "fsl,imx95-mu";
+ reg = <0x445b0000 0x1000>;
+ ranges;
+ interrupts = <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #mbox-cells = <2>;
+
+ sram0: sram@445b1000 {
+ compatible = "mmio-sram";
+ reg = <0x445b1000 0x400>;
+ ranges = <0x0 0x445b1000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ scmi_buf0: scmi-sram-section@0 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0x80>;
+ };
+
+ scmi_buf1: scmi-sram-section@80 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x80 0x80>;
+ };
+ };
+ };
+
+ mu3: mailbox@445d0000 {
+ compatible = "fsl,imx94-mu", "fsl,imx95-mu";
+ reg = <0x445d0000 0x10000>;
+ interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ mu4: mailbox@445f0000 {
+ compatible = "fsl,imx94-mu", "fsl,imx95-mu";
+ reg = <0x445f0000 0x10000>;
+ interrupts = <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ mu6: mailbox@44630000 {
+ compatible = "fsl,imx94-mu", "fsl,imx95-mu";
+ reg = <0x44630000 0x10000>;
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ a55_irqsteer: interrupt-controller@446a0000 {
+ compatible = "fsl,imx94-irqsteer", "fsl,imx-irqsteer";
+ reg = <0x446a0000 0x1000>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSAON>;
+ clock-names = "ipg";
+ fsl,channel = <0>;
+ fsl,num-irqs = <960>;
+ };
+ };
+
+ aips4: bus@49000000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x0 0x49000000 0x0 0x800000>;
+ ranges = <0x49000000 0x0 0x49000000 0x800000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ wdog3: watchdog@49220000 {
+ compatible = "fsl,imx94-wdt", "fsl,imx93-wdt";
+ reg = <0x49220000 0x10000>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX94_CLK_BUSWAKEUP>;
+ timeout-sec = <40>;
+ fsl,ext-reset-output;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx943-evk.dts b/arch/arm64/boot/dts/freescale/imx943-evk.dts
new file mode 100644
index 000000000000..cc8f3e6a1789
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx943-evk.dts
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024-2025 NXP
+ */
+
+/dts-v1/;
+
+#include "imx943.dtsi"
+
+/ {
+ compatible = "fsl,imx943-evk", "fsl,imx94";
+ model = "NXP i.MX943 EVK board";
+
+ aliases {
+ mmc0 = &usdhc1;
+ mmc1 = &usdhc2;
+ serial0 = &lpuart1;
+ };
+
+ chosen {
+ stdout-path = &lpuart1;
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ off-on-delay-us = <12000>;
+ pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
+ pinctrl-names = "default";
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "VDD_SD2_3V3";
+ gpio = <&gpio4 27 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reserved-memory {
+ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ linux,cma {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x80000000 0 0x7f000000>;
+ reusable;
+ size = <0 0x10000000>;
+ linux,cma-default;
+ };
+ };
+
+ memory@80000000 {
+ reg = <0x0 0x80000000 0x0 0x80000000>;
+ device_type = "memory";
+ };
+};
+
+&lpuart1 {
+ pinctrl-0 = <&pinctrl_uart1>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&scmi_iomuxc {
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ IMX94_PAD_UART1_TXD__LPUART1_TX 0x31e
+ IMX94_PAD_UART1_RXD__LPUART1_RX 0x31e
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+ fsl,pins = <
+ IMX94_PAD_SD1_CLK__USDHC1_CLK 0x158e
+ IMX94_PAD_SD1_CMD__USDHC1_CMD 0x138e
+ IMX94_PAD_SD1_DATA0__USDHC1_DATA0 0x138e
+ IMX94_PAD_SD1_DATA1__USDHC1_DATA1 0x138e
+ IMX94_PAD_SD1_DATA2__USDHC1_DATA2 0x138e
+ IMX94_PAD_SD1_DATA3__USDHC1_DATA3 0x138e
+ IMX94_PAD_SD1_DATA4__USDHC1_DATA4 0x138e
+ IMX94_PAD_SD1_DATA5__USDHC1_DATA5 0x138e
+ IMX94_PAD_SD1_DATA6__USDHC1_DATA6 0x138e
+ IMX94_PAD_SD1_DATA7__USDHC1_DATA7 0x138e
+ IMX94_PAD_SD1_STROBE__USDHC1_STROBE 0x158e
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+ fsl,pins = <
+ IMX94_PAD_SD1_CLK__USDHC1_CLK 0x15fe
+ IMX94_PAD_SD1_CMD__USDHC1_CMD 0x13fe
+ IMX94_PAD_SD1_DATA0__USDHC1_DATA0 0x13fe
+ IMX94_PAD_SD1_DATA1__USDHC1_DATA1 0x13fe
+ IMX94_PAD_SD1_DATA2__USDHC1_DATA2 0x13fe
+ IMX94_PAD_SD1_DATA3__USDHC1_DATA3 0x13fe
+ IMX94_PAD_SD1_DATA4__USDHC1_DATA4 0x13fe
+ IMX94_PAD_SD1_DATA5__USDHC1_DATA5 0x13fe
+ IMX94_PAD_SD1_DATA6__USDHC1_DATA6 0x13fe
+ IMX94_PAD_SD1_DATA7__USDHC1_DATA7 0x13fe
+ IMX94_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ IMX94_PAD_SD1_CLK__USDHC1_CLK 0x158e
+ IMX94_PAD_SD1_CMD__USDHC1_CMD 0x138e
+ IMX94_PAD_SD1_DATA0__USDHC1_DATA0 0x138e
+ IMX94_PAD_SD1_DATA1__USDHC1_DATA1 0x138e
+ IMX94_PAD_SD1_DATA2__USDHC1_DATA2 0x138e
+ IMX94_PAD_SD1_DATA3__USDHC1_DATA3 0x138e
+ IMX94_PAD_SD1_DATA4__USDHC1_DATA4 0x138e
+ IMX94_PAD_SD1_DATA5__USDHC1_DATA5 0x138e
+ IMX94_PAD_SD1_DATA6__USDHC1_DATA6 0x138e
+ IMX94_PAD_SD1_DATA7__USDHC1_DATA7 0x138e
+ IMX94_PAD_SD1_STROBE__USDHC1_STROBE 0x158e
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ IMX94_PAD_SD2_CLK__USDHC2_CLK 0x158e
+ IMX94_PAD_SD2_CMD__USDHC2_CMD 0x138e
+ IMX94_PAD_SD2_DATA0__USDHC2_DATA0 0x138e
+ IMX94_PAD_SD2_DATA1__USDHC2_DATA1 0x138e
+ IMX94_PAD_SD2_DATA2__USDHC2_DATA2 0x138e
+ IMX94_PAD_SD2_DATA3__USDHC2_DATA3 0x138e
+ IMX94_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ IMX94_PAD_SD2_CLK__USDHC2_CLK 0x15fe
+ IMX94_PAD_SD2_CMD__USDHC2_CMD 0x13fe
+ IMX94_PAD_SD2_DATA0__USDHC2_DATA0 0x13fe
+ IMX94_PAD_SD2_DATA1__USDHC2_DATA1 0x13fe
+ IMX94_PAD_SD2_DATA2__USDHC2_DATA2 0x13fe
+ IMX94_PAD_SD2_DATA3__USDHC2_DATA3 0x13fe
+ IMX94_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+ fsl,pins = <
+ IMX94_PAD_SD2_CD_B__GPIO4_IO20 0x31e
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ IMX94_PAD_SD2_CLK__USDHC2_CLK 0x158e
+ IMX94_PAD_SD2_CMD__USDHC2_CMD 0x138e
+ IMX94_PAD_SD2_DATA0__USDHC2_DATA0 0x138e
+ IMX94_PAD_SD2_DATA1__USDHC2_DATA1 0x138e
+ IMX94_PAD_SD2_DATA2__USDHC2_DATA2 0x138e
+ IMX94_PAD_SD2_DATA3__USDHC2_DATA3 0x138e
+ IMX94_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ pinctrl_reg_usdhc2_vmmc: usdhc2regvmmcgrp {
+ fsl,pins = <
+ IMX94_PAD_SD2_RESET_B__GPIO4_IO27 0x31e
+ >;
+ };
+};
+
+&usdhc1 {
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ bus-width = <8>;
+ non-removable;
+ no-sdio;
+ no-sd;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ bus-width = <4>;
+ no-mmc;
+ no-sdio;
+ cd-gpios = <&gpio4 20 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ status = "okay";
+};
+
+&wdog3 {
+ fsl,ext-reset-output;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx943.dtsi b/arch/arm64/boot/dts/freescale/imx943.dtsi
new file mode 100644
index 000000000000..45b8da758e87
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx943.dtsi
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2025 NXP
+ */
+
+#include "imx94.dtsi"
+
+/ {
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ idle-states {
+ entry-method = "psci";
+
+ cpu_pd_wait: cpu-pd-wait {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x0010033>;
+ local-timer-stop;
+ entry-latency-us = <1000>;
+ exit-latency-us = <700>;
+ min-residency-us = <2700>;
+ wakeup-latency-us = <1500>;
+ };
+ };
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a55";
+ device_type = "cpu";
+ reg = <0x0>;
+ enable-method = "psci";
+ #cooling-cells = <2>;
+ cpu-idle-states = <&cpu_pd_wait>;
+ power-domains = <&scmi_perf IMX94_PERF_A55>;
+ power-domain-names = "perf";
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l0>;
+ };
+
+ cpu1: cpu@100 {
+ compatible = "arm,cortex-a55";
+ device_type = "cpu";
+ reg = <0x100>;
+ enable-method = "psci";
+ #cooling-cells = <2>;
+ cpu-idle-states = <&cpu_pd_wait>;
+ power-domains = <&scmi_perf IMX94_PERF_A55>;
+ power-domain-names = "perf";
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l1>;
+ };
+
+ cpu2: cpu@200 {
+ compatible = "arm,cortex-a55";
+ device_type = "cpu";
+ reg = <0x200>;
+ enable-method = "psci";
+ #cooling-cells = <2>;
+ cpu-idle-states = <&cpu_pd_wait>;
+ power-domains = <&scmi_perf IMX94_PERF_A55>;
+ power-domain-names = "perf";
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l2>;
+ };
+
+ cpu3: cpu@300 {
+ compatible = "arm,cortex-a55";
+ device_type = "cpu";
+ reg = <0x300>;
+ enable-method = "psci";
+ #cooling-cells = <2>;
+ cpu-idle-states = <&cpu_pd_wait>;
+ power-domains = <&scmi_perf IMX94_PERF_A55>;
+ power-domain-names = "perf";
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l3>;
+ };
+
+ l2_cache_l0: l2-cache-l0 {
+ compatible = "cache";
+ cache-size = <65536>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_l1: l2-cache-l1 {
+ compatible = "cache";
+ cache-size = <65536>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_l2: l2-cache-l2 {
+ compatible = "cache";
+ cache-size = <65536>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_l3: l2-cache-l3 {
+ compatible = "cache";
+ cache-size = <65536>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l3_cache: l3-cache {
+ compatible = "cache";
+ cache-size = <1048576>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
+ cache-level = <3>;
+ cache-unified;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx95-15x15-evk.dts b/arch/arm64/boot/dts/freescale/imx95-15x15-evk.dts
index 514f2429dcbc..6c47f4b47356 100644
--- a/arch/arm64/boot/dts/freescale/imx95-15x15-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx95-15x15-evk.dts
@@ -136,6 +136,15 @@
startup-delay-us = <20000>;
};
+ reg_usb_vbus: regulator-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "USB_VBUS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pcal6524 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
reg_vcc_12v: regulator-vcc-12v {
compatible = "regulator-fixed";
regulator-max-microvolt = <12000000>;
@@ -525,6 +534,13 @@
status = "okay";
};
+&pcie0_ep {
+ pinctrl-0 = <&pinctrl_pcie0>;
+ pinctrl-names = "default";
+ vpcie-supply = <&reg_m2_pwr>;
+ status = "disabled";
+};
+
&sai1 {
assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>,
<&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>,
@@ -1023,6 +1039,13 @@
status = "okay";
};
+&usb2 {
+ dr_mode = "host";
+ vbus-supply = <&reg_usb_vbus>;
+ disable-over-current;
+ status = "okay";
+};
+
&usb3 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx95-19x19-evk-sof.dts b/arch/arm64/boot/dts/freescale/imx95-19x19-evk-sof.dts
new file mode 100644
index 000000000000..808a9fe3ebb2
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx95-19x19-evk-sof.dts
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2025 NXP
+ */
+
+/dts-v1/;
+
+#include "imx95-19x19-evk.dts"
+
+/ {
+ sof_cpu: cm7-cpu@80000000 {
+ compatible = "fsl,imx95-cm7-sof";
+ reg = <0x0 0x80000000 0x0 0x6100000>;
+ reg-names = "sram";
+ memory-region = <&adma_res>;
+ memory-region-names = "dma";
+ mboxes = <&mu7 2 0>, <&mu7 2 1>, <&mu7 3 0>, <&mu7 3 1>;
+ mbox-names = "txdb0", "txdb1", "rxdb0", "rxdb1";
+
+ sai3_cpu: port {
+ sai3_cpu_ep: endpoint {
+ remote-endpoint = <&wm8962_ep>;
+ };
+ };
+ };
+
+ reserved-memory {
+ adma_res: memory@86100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x86100000 0x0 0x100000>;
+ no-map;
+ };
+ };
+
+ sof-sound {
+ compatible = "audio-graph-card2";
+ links = <&sai3_cpu>;
+ label = "audio";
+ hp-det-gpios = <&gpio2 11 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hp>;
+ widgets = "Headphone", "Headphones",
+ "Microphone", "Headset Mic";
+ routing = "Headphones", "HPOUTL",
+ "Headphones", "HPOUTR",
+ "Headset Mic", "MICBIAS",
+ "IN3R", "Headset Mic",
+ "IN1R", "Headset Mic";
+ };
+
+ sound-wm8962 {
+ status = "disabled";
+ };
+
+};
+
+&edma2 {
+ /* channels 30 and 31 reserved for FW usage */
+ dma-channel-mask = <0xc0000000>, <0x0>;
+};
+
+&sai3 {
+ status = "disabled";
+};
+
+&wm8962 {
+ assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2>,
+ <&scmi_clk IMX95_CLK_SAI3>;
+ assigned-clock-parents = <0>, <0>, <0>, <0>, <&scmi_clk IMX95_CLK_AUDIOPLL1>;
+ assigned-clock-rates = <3932160000>, <3612672000>,
+ <393216000>, <361267200>,
+ <12288000>;
+
+ port {
+ wm8962_ep: endpoint {
+ bitclock-master;
+ frame-master;
+ remote-endpoint = <&sai3_cpu_ep>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts b/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
index 25ac331f0318..6886ea766655 100644
--- a/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
@@ -145,6 +145,15 @@
off-on-delay-us = <12000>;
};
+ reg_usb_vbus: regulator-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "USB_VBUS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&i2c7_pcal6524 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
sound-bt-sco {
compatible = "simple-audio-card";
simple-audio-card,name = "bt-sco-audio";
@@ -417,6 +426,13 @@
status = "okay";
};
+&pcie0_ep {
+ pinctrl-0 = <&pinctrl_pcie0>;
+ pinctrl-names = "default";
+ vpcie-supply = <&reg_pcie0>;
+ status = "disabled";
+};
+
&pcie1 {
pinctrl-0 = <&pinctrl_pcie1>;
pinctrl-names = "default";
@@ -425,6 +441,13 @@
status = "okay";
};
+&pcie1_ep {
+ pinctrl-0 = <&pinctrl_pcie1>;
+ pinctrl-names = "default";
+ vpcie-supply = <&reg_slot_pwr>;
+ status = "disabled";
+};
+
&sai1 {
#sound-dai-cells = <0>;
pinctrl-names = "default";
@@ -461,6 +484,13 @@
status = "okay";
};
+&usb2 {
+ dr_mode = "host";
+ disable-over-current;
+ vbus-supply = <&reg_usb_vbus>;
+ status = "okay";
+};
+
&usb3 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx95-tqma9596sa-mb-smarc-2.dts b/arch/arm64/boot/dts/freescale/imx95-tqma9596sa-mb-smarc-2.dts
new file mode 100644
index 000000000000..5b6b2bb80b28
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx95-tqma9596sa-mb-smarc-2.dts
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright (c) 2024 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/phy/phy-imx8-pcie.h>
+#include "imx95-tqma9596sa.dtsi"
+
+/ {
+ model = "TQ-Systems i.MX95 TQMa95xxSA on MB-SMARC-2";
+ compatible = "tq,imx95-tqma9596sa-mb-smarc-2", "tq,imx95-tqma9596sa", "fsl,imx95";
+
+ aliases {
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ i2c0 = &lpi2c1;
+ i2c1 = &lpi2c2;
+ i2c2 = &lpi2c3;
+ i2c3 = &lpi2c4;
+ i2c4 = &lpi2c5;
+ i2c5 = &lpi2c6;
+ i2c6 = &lpi2c7;
+ i2c7 = &lpi2c8;
+ mmc0 = &usdhc1;
+ mmc1 = &usdhc2;
+ rtc0 = &pcf85063;
+ rtc1 = &scmi_bbm;
+ serial0 = &lpuart1;
+ serial1 = &lpuart2;
+ serial2 = &lpuart3;
+ serial3 = &lpuart4;
+ serial4 = &lpuart5;
+ serial5 = &lpuart6;
+ serial6 = &lpuart7;
+ serial7 = &lpuart8;
+ };
+
+ chosen {
+ stdout-path = &lpuart7;
+ };
+
+ backlight_lvds0: backlight-lvds0 {
+ compatible = "pwm-backlight";
+ pwms = <&tpm3 0 100000 0>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <7>;
+ enable-gpios = <&expander2 1 GPIO_ACTIVE_HIGH>;
+ power-supply = <&reg_12v0>;
+ status = "disabled";
+ };
+
+ backlight_lvds1: backlight-lvds1 {
+ compatible = "pwm-backlight";
+ pwms = <&tpm4 0 100000 0>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <7>;
+ enable-gpios = <&expander2 3 GPIO_ACTIVE_HIGH>;
+ power-supply = <&reg_12v0>;
+ status = "disabled";
+ };
+
+ panel_lvds0: panel-lvds0 {
+ /*
+ * Display is not fixed, so compatible has to be added from
+ * DT overlay
+ */
+ backlight = <&backlight_lvds0>;
+ power-supply = <&reg_lvds0>;
+ status = "disabled";
+
+ port {
+ panel_in_lvds0: endpoint {
+ /* TODO: LVDS0 out */
+ };
+ };
+ };
+
+ panel_lvds1: panel-lvds1 {
+ /*
+ * Display is not fixed, so compatible has to be added from
+ * DT overlay
+ */
+ backlight = <&backlight_lvds1>;
+ power-supply = <&reg_lvds1>;
+ status = "disabled";
+
+ port {
+ panel_in_lvds1: endpoint {
+ /* TODO: LVDS1 out */
+ };
+ };
+ };
+
+ reg_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_12v0: regulator-12v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "12V0";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ };
+
+ reg_lvds0: regulator-lvds0 {
+ compatible = "regulator-fixed";
+ regulator-name = "LCD0_VDD_EN";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&expander2 2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_lvds1: regulator-lvds1 {
+ compatible = "regulator-fixed";
+ regulator-name = "LCD1_VDD_EN";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&expander2 4 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ sound {
+ compatible = "fsl,imx-audio-tlv320aic32x4";
+ model = "tqm-tlv320aic32";
+ audio-codec = <&tlv320aic3x04>;
+ audio-cpu = <&sai3>;
+ };
+};
+
+&enetc_port0 {
+ status = "okay";
+};
+
+&enetc_port1 {
+ status = "okay";
+};
+
+&expander2 {
+ pcie1-clk-en-hog {
+ gpio-hog;
+ gpios = <14 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "PCIE1_CLK_EN";
+ };
+
+ pcie2-clk-en-hog {
+ gpio-hog;
+ gpios = <15 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "PCIE2_CLK_EN";
+ };
+};
+
+&flexcan1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ xceiver-supply = <&reg_3v3>;
+ status = "okay";
+};
+
+&flexcan3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan3>;
+ xceiver-supply = <&reg_3v3>;
+ status = "okay";
+};
+
+&lpi2c1 {
+ tlv320aic3x04: audio-codec@18 {
+ compatible = "ti,tlv320aic32x4";
+ reg = <0x18>;
+ clocks = <&scmi_clk IMX95_CLK_SAI3>;
+ clock-names = "mclk";
+ iov-supply = <&reg_1v8>;
+ ldoin-supply = <&reg_3v3>;
+ };
+
+ eeprom2: eeprom@57 {
+ compatible = "atmel,24c32";
+ reg = <0x57>;
+ pagesize = <32>;
+ vcc-supply = <&reg_3v3>;
+ };
+};
+
+&lpspi3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpspi3>;
+ cs-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>, <&gpio2 7 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+/* SER0 */
+&lpuart1 {
+ status = "disabled";
+};
+
+/* SER3 */
+&lpuart5 {
+ status = "okay";
+};
+
+/* SER1 */
+&lpuart7 {
+ status = "okay";
+};
+
+/* SER2 */
+&lpuart8 {
+ status = "okay";
+};
+
+/* X44 mPCIe */
+&pcie0 {
+ pinctrl-0 = <&pinctrl_pcie0>;
+ pinctrl-names = "default";
+ clocks = <&scmi_clk IMX95_CLK_HSIO>,
+ <&pcieclk 1>,
+ <&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
+ <&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
+ clock-names = "pcie", "pcie_bus", "pcie_phy", "pcie_aux";
+ reset-gpio = <&expander2 9 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+/* X22 PCIe x1 socket */
+&pcie1 {
+ pinctrl-0 = <&pinctrl_pcie1>;
+ pinctrl-names = "default";
+ clocks = <&scmi_clk IMX95_CLK_HSIO>,
+ <&pcieclk 0>,
+ <&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
+ <&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
+ clock-names = "pcie", "pcie_bus", "pcie_phy", "pcie_aux";
+ reset-gpio = <&expander2 10 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&reg_sdvmmc {
+ status = "okay";
+};
+
+&sai3 {
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai3>;
+ assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2>,
+ <&scmi_clk IMX95_CLK_SAI3>;
+ assigned-clock-parents = <0>, <0>, <0>, <0>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>;
+ assigned-clock-rates = <3932160000>,
+ <3612672000>, <393216000>,
+ <361267200>, <12288000>;
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
+&sai5 {
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai5>;
+ assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2>,
+ <&scmi_clk IMX95_CLK_SAI5>;
+ assigned-clock-parents = <0>, <0>, <0>, <0>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>;
+ assigned-clock-rates = <3932160000>,
+ <3612672000>, <393216000>,
+ <361267200>, <12288000>;
+};
+
+/* X4 */
+&usb2 {
+ srp-disable;
+ hnp-disable;
+ adp-disable;
+ /* DR not yet supported */
+ dr_mode = "peripheral";
+ disable-over-current;
+ status = "okay";
+};
+
+
+/* X16 */
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ pinctrl-3 = <&pinctrl_usdhc2>;
+ vmmc-supply = <&reg_sdvmmc>;
+ cd-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
+ no-1-8-v;
+ no-mmc;
+ no-sdio;
+ disable-wp;
+ bus-width = <4>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx95-tqma9596sa.dtsi b/arch/arm64/boot/dts/freescale/imx95-tqma9596sa.dtsi
new file mode 100644
index 000000000000..180124cc5bce
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx95-tqma9596sa.dtsi
@@ -0,0 +1,698 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright (c) 2024 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/net/ti-dp83867.h>
+#include <dt-bindings/phy/phy-imx8-pcie.h>
+#include <dt-bindings/usb/pd.h>
+#include "imx95.dtsi"
+
+/ {
+ aliases {
+ ethernet0 = &enetc_port0;
+ ethernet1 = &enetc_port1;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /*
+ * DRAM base addr, size : 2048 MiB DRAM
+ * should be corrected by bootloader
+ */
+ reg = <0 0x80000000 0 0x80000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ linux_cma: linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0 0x28000000>;
+ alloc-ranges = <0 0x80000000 0 0x80000000>;
+ linux,cma-default;
+ };
+
+ vpu_boot: vpu_boot@a0000000 {
+ reg = <0 0xa0000000 0 0x100000>;
+ no-map;
+ };
+ };
+
+ clk_dp: clk-dp {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ };
+
+ clk_xtal25: clk-xtal25 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ };
+
+ reg_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "V_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "V_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ /* Controlled by system manager */
+ reg_sdvmmc: regulator-sdvmmc {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdvmmc>;
+ regulator-name = "SDIO_PWR_EN";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ status = "disabled";
+ };
+};
+
+&enetc_port0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enetc0>;
+ phy-handle = <&ethphy0>;
+ phy-mode = "rgmii-id";
+};
+
+&enetc_port1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enetc1>;
+ phy-handle = <&ethphy3>;
+ phy-mode = "rgmii-id";
+};
+
+&netc_timer {
+ status = "okay";
+};
+
+&flexspi1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pinctrl_flexspi1>;
+ pinctrl-1 = <&pinctrl_flexspi1>;
+ status = "okay";
+
+ flash0: flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <80000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ vcc-supply = <&reg_1v8>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+};
+
+&gpio1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio1>;
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "", "", "GPIO7", "GPIO8",
+ "", "GPIO9", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&gpio2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio2>;
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "SLEEP", "GPIO5",
+ "", "", "GPIO6", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&lpi2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pinctrl_lpi2c1>;
+ pinctrl-1 = <&pinctrl_lpi2c1>;
+ status = "okay";
+
+ tmp1075: temperature-sensor@4a {
+ compatible = "ti,tmp1075";
+ reg = <0x4a>;
+ vs-supply = <&reg_1v8>;
+ };
+
+ eeprom_smarc: eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ pagesize = <32>;
+ vcc-supply = <&reg_1v8>;
+ };
+
+ pcf85063: rtc@51 {
+ compatible = "nxp,pcf85063a";
+ reg = <0x51>;
+ quartz-load-femtofarads = <7000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcf85063>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <27 IRQ_TYPE_EDGE_FALLING>;
+ };
+
+ m24c64: eeprom@54 {
+ compatible = "atmel,24c64";
+ reg = <0x54>;
+ pagesize = <32>;
+ vcc-supply = <&reg_1v8>;
+ };
+
+ /* protectable identification memory (part of M24C64-D @50) */
+ eeprom@58 {
+ compatible = "atmel,24c64d-wl";
+ reg = <0x58>;
+ vcc-supply = <&reg_1v8>;
+ };
+
+ /* protectable identification memory (part of M24C64-D @54) */
+ eeprom@5c {
+ compatible = "atmel,24c64d-wl";
+ reg = <0x5c>;
+ vcc-supply = <&reg_1v8>;
+ };
+
+ pcieclk: clock-generator@6a {
+ compatible = "renesas,9fgv0441";
+ reg = <0x6a>;
+ clocks = <&clk_xtal25>;
+ #clock-cells = <1>;
+ };
+
+ imu@6b {
+ compatible = "st,ism330dhcx";
+ reg = <0x6b>;
+ vdd-supply = <&reg_3v3>;
+ vddio-supply = <&reg_3v3>;
+ };
+
+ /* D23 */
+ expander2: gpio@74 {
+ compatible = "ti,tca9539";
+ reg = <0x74>;
+ vcc-supply = <&reg_1v8>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "GPIO4", "LCD0_BLKT_EN", "LCD0_VDD_EN", "LCD1_BLKT_EN",
+ "LCD1_VDD_EN", "ENET1_RESET#", "ENET2_RESET#", "GBE0_SDP_DIR",
+ "GBE1_SDP_DIR", "PCIE1_RST#", "PCIE2_RST#", "DP_BRIDGE_EN",
+ "HUB_RST#", "QSPI_RESET#", "PCIE1_CLK_EN", "PCIE2_CLK_EN";
+ };
+
+ /* D21 */
+ expander1: gpio@75 {
+ compatible = "ti,tca9539";
+ reg = <0x75>;
+ vcc-supply = <&reg_1v8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_expander1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <27 IRQ_TYPE_EDGE_FALLING>;
+ gpio-line-names = "GPIO10", "GPIO11", "GPIO12", "GPIO13",
+ "CHG_PRSNT#", "CHARGING", "LID", "BATLOW#",
+ "TEMP_EVENT#", "PGOOD_ARM", "PGOOD_SOC", "PCIE_WAKE#_1V8",
+ "GPIO0", "GPIO1", "GPIO2", "GPIO3";
+ };
+};
+
+/* I2C_CAM0 */
+&lpi2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pinctrl_lpi2c3>;
+ pinctrl-1 = <&pinctrl_lpi2c3>;
+ status = "okay";
+
+ dp_bridge: dp-bridge@f {
+ compatible = "toshiba,tc9595", "toshiba,tc358767";
+ reg = <0x0f>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tc9595>;
+ clock-names = "ref";
+ clocks = <&clk_dp>;
+ reset-gpios = <&expander2 11 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <25 IRQ_TYPE_EDGE_RISING>;
+ toshiba,hpd-pin = <0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dp_dsi_in: endpoint {
+ /* TODO: DSI out */
+ data-lanes = <1 2 3 4>;
+ };
+ };
+ };
+ };
+};
+
+/* I2C_CAM1 */
+&lpi2c4 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pinctrl_lpi2c4>;
+ pinctrl-1 = <&pinctrl_lpi2c4>;
+ status = "okay";
+};
+
+/* I2C_LCD */
+&lpi2c6 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pinctrl_lpi2c6>;
+ pinctrl-1 = <&pinctrl_lpi2c6>;
+ status = "okay";
+};
+
+/* SER0 */
+&lpuart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart1>;
+};
+
+/* SER3 */
+&lpuart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart5>;
+};
+
+/* SER1 */
+&lpuart7 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart7>;
+};
+
+/* SER2 */
+&lpuart8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart8>;
+};
+
+&netc_blk_ctrl {
+ status = "okay";
+};
+
+&netc_emdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mdio>;
+ status = "okay";
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ethphy0>;
+ reset-gpios = <&expander2 5 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <500000>;
+ reset-deassert-us = <50000>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <13 IRQ_TYPE_LEVEL_LOW>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_50_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_50_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,dp83867-rxctrl-strap-quirk;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ };
+
+ ethphy3: ethernet-phy@3 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ethphy3>;
+ reset-gpios = <&expander2 6 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <500000>;
+ reset-deassert-us = <50000>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_50_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_50_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,dp83867-rxctrl-strap-quirk;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ };
+};
+
+&scmi_bbm {
+ linux,code = <KEY_POWER>;
+};
+
+&tpm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpm3>;
+};
+
+&tpm4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpm4>;
+};
+
+&tpm5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpm5>;
+};
+
+&usb3 {
+ status = "okay";
+};
+
+&usb3_dwc3 {
+ dr_mode = "host";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hub_2_0: hub@1 {
+ compatible = "usb451,8142";
+ reg = <1>;
+ peer-hub = <&hub_3_0>;
+ reset-gpios = <&expander2 12 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_3v3>;
+ };
+
+ hub_3_0: hub@2 {
+ compatible = "usb451,8140";
+ reg = <2>;
+ peer-hub = <&hub_2_0>;
+ reset-gpios = <&expander2 12 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_3v3>;
+ };
+};
+
+&usb3_phy {
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ pinctrl-3 = <&pinctrl_usdhc1>;
+ bus-width = <8>;
+ non-removable;
+ no-sdio;
+ no-sd;
+ status = "okay";
+};
+
+&wdog3 {
+ status = "okay";
+};
+
+&scmi_iomuxc {
+ pinctrl_ethphy0: ethphy0grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO33__GPIO5_IO_BIT13 0x1100>;
+ };
+
+ pinctrl_ethphy3: ethphy3grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO34__GPIO5_IO_BIT14 0x1100>;
+ };
+
+ pinctrl_enetc0: enetc0grp {
+ fsl,pins = <IMX95_PAD_ENET1_RD0__NETCMIX_TOP_ETH0_RGMII_RD0 0x1100>,
+ <IMX95_PAD_ENET1_RD1__NETCMIX_TOP_ETH0_RGMII_RD1 0x1100>,
+ <IMX95_PAD_ENET1_RD2__NETCMIX_TOP_ETH0_RGMII_RD2 0x1100>,
+ <IMX95_PAD_ENET1_RD3__NETCMIX_TOP_ETH0_RGMII_RD3 0x1100>,
+ <IMX95_PAD_ENET1_RXC__NETCMIX_TOP_ETH0_RGMII_RX_CLK 0x1100>,
+ <IMX95_PAD_ENET1_RX_CTL__NETCMIX_TOP_ETH0_RGMII_RX_CTL 0x1100>,
+ <IMX95_PAD_ENET1_TD0__NETCMIX_TOP_ETH0_RGMII_TD0 0x11e>,
+ <IMX95_PAD_ENET1_TD1__NETCMIX_TOP_ETH0_RGMII_TD1 0x11e>,
+ <IMX95_PAD_ENET1_TD2__NETCMIX_TOP_ETH0_RGMII_TD2 0x11e>,
+ <IMX95_PAD_ENET1_TD3__NETCMIX_TOP_ETH0_RGMII_TD3 0x11e>,
+ <IMX95_PAD_ENET1_TXC__NETCMIX_TOP_ETH0_RGMII_TX_CLK 0x11e>,
+ <IMX95_PAD_ENET1_TX_CTL__NETCMIX_TOP_ETH0_RGMII_TX_CTL 0x11e>,
+ <IMX95_PAD_GPIO_IO23__GPIO2_IO_BIT23 0x51e>;
+ };
+
+ pinctrl_enetc1: enetc1grp {
+ fsl,pins = <IMX95_PAD_ENET2_RD0__NETCMIX_TOP_ETH1_RGMII_RD0 0x1100>,
+ <IMX95_PAD_ENET2_RD1__NETCMIX_TOP_ETH1_RGMII_RD1 0x1100>,
+ <IMX95_PAD_ENET2_RD2__NETCMIX_TOP_ETH1_RGMII_RD2 0x1100>,
+ <IMX95_PAD_ENET2_RD3__NETCMIX_TOP_ETH1_RGMII_RD3 0x1100>,
+ <IMX95_PAD_ENET2_RXC__NETCMIX_TOP_ETH1_RGMII_RX_CLK 0x1100>,
+ <IMX95_PAD_ENET2_RX_CTL__NETCMIX_TOP_ETH1_RGMII_RX_CTL 0x1100>,
+ <IMX95_PAD_ENET2_TD0__NETCMIX_TOP_ETH1_RGMII_TD0 0x11e>,
+ <IMX95_PAD_ENET2_TD1__NETCMIX_TOP_ETH1_RGMII_TD1 0x11e>,
+ <IMX95_PAD_ENET2_TD2__NETCMIX_TOP_ETH1_RGMII_TD2 0x11e>,
+ <IMX95_PAD_ENET2_TD3__NETCMIX_TOP_ETH1_RGMII_TD3 0x11e>,
+ <IMX95_PAD_ENET2_TXC__NETCMIX_TOP_ETH1_RGMII_TX_CLK 0x11e>,
+ <IMX95_PAD_ENET2_TX_CTL__NETCMIX_TOP_ETH1_RGMII_TX_CTL 0x11e>,
+ <IMX95_PAD_GPIO_IO24__GPIO2_IO_BIT24 0x51e>;
+ };
+
+ pinctrl_expander1: expander1grp {
+ fsl,pins = <IMX95_PAD_CCM_CLKO2__GPIO3_IO_BIT27 0x1100>;
+ };
+
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = <IMX95_PAD_PDM_BIT_STREAM0__AONMIX_TOP_CAN1_RX 0x1300>,
+ <IMX95_PAD_PDM_CLK__AONMIX_TOP_CAN1_TX 0x31e>;
+ };
+
+ pinctrl_flexcan3: flexcan3grp {
+ fsl,pins = <IMX95_PAD_CCM_CLKO3__CAN3_TX 0x31e>,
+ <IMX95_PAD_CCM_CLKO4__CAN3_RX 0x1300>;
+ };
+
+ pinctrl_flexspi1: flexspi1grp {
+ fsl,pins = <IMX95_PAD_SD3_CLK__FLEXSPI1_A_SCLK 0x11e>,
+ <IMX95_PAD_SD3_CMD__FLEXSPI1_A_SS0_B 0x11e>,
+ <IMX95_PAD_SD3_DATA0__FLEXSPI1_A_DATA_BIT0 0x11e>,
+ <IMX95_PAD_SD3_DATA1__FLEXSPI1_A_DATA_BIT1 0x11e>,
+ <IMX95_PAD_SD3_DATA2__FLEXSPI1_A_DATA_BIT2 0x11e>,
+ <IMX95_PAD_SD3_DATA3__FLEXSPI1_A_DATA_BIT3 0x11e>;
+ };
+
+ pinctrl_gpio1: gpio1grp {
+ fsl,pins = <IMX95_PAD_PDM_BIT_STREAM1__AONMIX_TOP_GPIO1_IO_BIT10 0x111e>,
+ <IMX95_PAD_SAI1_TXD0__AONMIX_TOP_GPIO1_IO_BIT13 0x111e>,
+ <IMX95_PAD_SAI1_TXFS__AONMIX_TOP_GPIO1_IO_BIT11 0x111e>;
+ };
+
+ pinctrl_gpio2: gpio2grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO18__GPIO2_IO_BIT18 0x1100>,
+ <IMX95_PAD_GPIO_IO19__GPIO2_IO_BIT19 0x111e>,
+ <IMX95_PAD_GPIO_IO22__GPIO2_IO_BIT22 0x111e>;
+ };
+
+ pinctrl_lpi2c1: lpi2c1grp {
+ fsl,pins = <IMX95_PAD_I2C1_SCL__AONMIX_TOP_LPI2C1_SCL 0x4000191e>,
+ <IMX95_PAD_I2C1_SDA__AONMIX_TOP_LPI2C1_SDA 0x4000191e>;
+ };
+
+ pinctrl_lpi2c3: lpi2c3grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO28__LPI2C3_SDA 0x4000191e>,
+ <IMX95_PAD_GPIO_IO29__LPI2C3_SCL 0x4000191e>;
+ };
+
+ pinctrl_lpi2c4: lpi2c4grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO30__LPI2C4_SDA 0x4000191e>,
+ <IMX95_PAD_GPIO_IO31__LPI2C4_SCL 0x4000191e>;
+ };
+
+ pinctrl_lpi2c6: lpi2c6grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO02__LPI2C6_SDA 0x4000191e>,
+ <IMX95_PAD_GPIO_IO03__LPI2C6_SCL 0x4000191e>;
+ };
+
+ pinctrl_lpspi3: lpspi3grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO07__GPIO2_IO_BIT7 0x51e>,
+ <IMX95_PAD_GPIO_IO08__GPIO2_IO_BIT8 0x51e>,
+ <IMX95_PAD_GPIO_IO09__LPSPI3_SIN 0x51e>,
+ <IMX95_PAD_GPIO_IO10__LPSPI3_SOUT 0x51e>,
+ <IMX95_PAD_GPIO_IO11__LPSPI3_SCK 0x51e>;
+ };
+
+ pinctrl_lpuart1: lpuart1grp {
+ fsl,pins = <IMX95_PAD_UART1_RXD__AONMIX_TOP_LPUART1_RX 0x1300>,
+ <IMX95_PAD_UART1_TXD__AONMIX_TOP_LPUART1_TX 0x31e>,
+ <IMX95_PAD_UART2_TXD__AONMIX_TOP_LPUART1_RTS_B 0x1300>,
+ <IMX95_PAD_UART2_RXD__AONMIX_TOP_LPUART1_CTS_B 0x31e>;
+ };
+
+ pinctrl_lpuart5: lpuart5grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO00__LPUART5_TX 0x31e>,
+ <IMX95_PAD_GPIO_IO01__LPUART5_RX 0x1300>;
+ };
+
+ pinctrl_lpuart7: lpuart7grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO36__LPUART7_TX 0x31e>,
+ <IMX95_PAD_GPIO_IO37__LPUART7_RX 0x1300>;
+ };
+
+ pinctrl_lpuart8: lpuart8grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO12__LPUART8_TX 0x31e>,
+ <IMX95_PAD_GPIO_IO13__LPUART8_RX 0x1300>,
+ <IMX95_PAD_GPIO_IO14__LPUART8_CTS_B 0x31e>,
+ <IMX95_PAD_GPIO_IO15__LPUART8_RTS_B 0x1300>;
+ };
+
+ pinctrl_mdio: mdiogrp {
+ fsl,pins = <IMX95_PAD_ENET1_MDC__NETCMIX_TOP_NETC_MDC 0x51e>,
+ <IMX95_PAD_ENET1_MDIO__NETCMIX_TOP_NETC_MDIO 0x51e>;
+ };
+
+ pinctrl_pcf85063: pcf85063grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO27__GPIO2_IO_BIT27 0x1100>;
+ };
+
+ pinctrl_pcie0: pcie0grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO32__HSIOMIX_TOP_PCIE1_CLKREQ_B 0x111e>;
+ };
+
+ pinctrl_pcie1: pcie1grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO35__HSIOMIX_TOP_PCIE2_CLKREQ_B 0x111e>;
+ };
+
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO16__SAI3_TX_BCLK 0x51e>,
+ <IMX95_PAD_GPIO_IO17__SAI3_MCLK 0x51e>,
+ <IMX95_PAD_GPIO_IO20__SAI3_RX_DATA_BIT0 0x1300>,
+ <IMX95_PAD_GPIO_IO21__SAI3_TX_DATA_BIT0 0x51e>,
+ <IMX95_PAD_GPIO_IO26__SAI3_TX_SYNC 0x51e>;
+ };
+
+ pinctrl_sai5: sai5grp {
+ fsl,pins = <IMX95_PAD_XSPI1_DATA4__SAI5_TX_DATA_BIT0 0x51e>,
+ <IMX95_PAD_XSPI1_DATA5__SAI5_TX_SYNC 0x51e>,
+ <IMX95_PAD_XSPI1_DATA6__SAI5_TX_BCLK 0x51e>,
+ <IMX95_PAD_XSPI1_DATA7__SAI5_RX_DATA_BIT0 0x1300>;
+ };
+
+ pinctrl_sdvmmc: sdvmmcgrp {
+ fsl,pins = <IMX95_PAD_SD2_RESET_B__GPIO3_IO_BIT7 0x11e>;
+ };
+
+ pinctrl_tc9595: tc9595grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO25__GPIO2_IO_BIT25 0x1500>;
+ };
+
+ pinctrl_tpm3: tpm3grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO04__TPM3_CH0 0x51e>;
+ };
+
+ pinctrl_tpm4: tpm4grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO05__TPM4_CH0 0x51e>;
+ };
+
+ pinctrl_tpm5: tpm4grp {
+ fsl,pins = <IMX95_PAD_GPIO_IO06__TPM5_CH0 0x51e>;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <IMX95_PAD_SD1_CLK__USDHC1_CLK 0x158e>,
+ <IMX95_PAD_SD1_CMD__USDHC1_CMD 0x138e>,
+ <IMX95_PAD_SD1_DATA0__USDHC1_DATA0 0x138e>,
+ <IMX95_PAD_SD1_DATA1__USDHC1_DATA1 0x138e>,
+ <IMX95_PAD_SD1_DATA2__USDHC1_DATA2 0x138e>,
+ <IMX95_PAD_SD1_DATA3__USDHC1_DATA3 0x138e>,
+ <IMX95_PAD_SD1_DATA4__USDHC1_DATA4 0x138e>,
+ <IMX95_PAD_SD1_DATA5__USDHC1_DATA5 0x138e>,
+ <IMX95_PAD_SD1_DATA6__USDHC1_DATA6 0x138e>,
+ <IMX95_PAD_SD1_DATA7__USDHC1_DATA7 0x138e>,
+ <IMX95_PAD_SD1_STROBE__USDHC1_STROBE 0x158e>;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+ fsl,pins = <IMX95_PAD_SD1_CLK__USDHC1_CLK 0x158e>,
+ <IMX95_PAD_SD1_CMD__USDHC1_CMD 0x138e>,
+ <IMX95_PAD_SD1_DATA0__USDHC1_DATA0 0x138e>,
+ <IMX95_PAD_SD1_DATA1__USDHC1_DATA1 0x138e>,
+ <IMX95_PAD_SD1_DATA2__USDHC1_DATA2 0x138e>,
+ <IMX95_PAD_SD1_DATA3__USDHC1_DATA3 0x138e>,
+ <IMX95_PAD_SD1_DATA4__USDHC1_DATA4 0x138e>,
+ <IMX95_PAD_SD1_DATA5__USDHC1_DATA5 0x138e>,
+ <IMX95_PAD_SD1_DATA6__USDHC1_DATA6 0x138e>,
+ <IMX95_PAD_SD1_DATA7__USDHC1_DATA7 0x138e>,
+ <IMX95_PAD_SD1_STROBE__USDHC1_STROBE 0x158e>;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+ fsl,pins = <IMX95_PAD_SD1_CLK__USDHC1_CLK 0x15fe>,
+ <IMX95_PAD_SD1_CMD__USDHC1_CMD 0x13fe>,
+ <IMX95_PAD_SD1_DATA0__USDHC1_DATA0 0x13fe>,
+ <IMX95_PAD_SD1_DATA1__USDHC1_DATA1 0x13fe>,
+ <IMX95_PAD_SD1_DATA2__USDHC1_DATA2 0x13fe>,
+ <IMX95_PAD_SD1_DATA3__USDHC1_DATA3 0x13fe>,
+ <IMX95_PAD_SD1_DATA4__USDHC1_DATA4 0x13fe>,
+ <IMX95_PAD_SD1_DATA5__USDHC1_DATA5 0x13fe>,
+ <IMX95_PAD_SD1_DATA6__USDHC1_DATA6 0x13fe>,
+ <IMX95_PAD_SD1_DATA7__USDHC1_DATA7 0x13fe>,
+ <IMX95_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe>;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <IMX95_PAD_SD2_CD_B__GPIO3_IO_BIT0 0x1100>,
+ <IMX95_PAD_SD2_CLK__USDHC2_CLK 0x51e>,
+ <IMX95_PAD_SD2_CMD__USDHC2_CMD 0x31e>,
+ <IMX95_PAD_SD2_DATA0__USDHC2_DATA0 0x131e>,
+ <IMX95_PAD_SD2_DATA1__USDHC2_DATA1 0x131e>,
+ <IMX95_PAD_SD2_DATA2__USDHC2_DATA2 0x131e>,
+ <IMX95_PAD_SD2_DATA3__USDHC2_DATA3 0x131e>,
+ <IMX95_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e>;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <IMX95_PAD_SD2_CD_B__GPIO3_IO_BIT0 0x1100>,
+ <IMX95_PAD_SD2_CLK__USDHC2_CLK 0x158e>,
+ <IMX95_PAD_SD2_CMD__USDHC2_CMD 0x138e>,
+ <IMX95_PAD_SD2_DATA0__USDHC2_DATA0 0x138e>,
+ <IMX95_PAD_SD2_DATA1__USDHC2_DATA1 0x138e>,
+ <IMX95_PAD_SD2_DATA2__USDHC2_DATA2 0x138e>,
+ <IMX95_PAD_SD2_DATA3__USDHC2_DATA3 0x138e>,
+ <IMX95_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e>;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <IMX95_PAD_SD2_CD_B__GPIO3_IO_BIT0 0x1100>,
+ <IMX95_PAD_SD2_CLK__USDHC2_CLK 0x15fe>,
+ <IMX95_PAD_SD2_CMD__USDHC2_CMD 0x13fe>,
+ <IMX95_PAD_SD2_DATA0__USDHC2_DATA0 0x13fe>,
+ <IMX95_PAD_SD2_DATA1__USDHC2_DATA1 0x13fe>,
+ <IMX95_PAD_SD2_DATA2__USDHC2_DATA2 0x13fe>,
+ <IMX95_PAD_SD2_DATA3__USDHC2_DATA3 0x13fe>,
+ <IMX95_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
index 9bb26b466a06..632631a29112 100644
--- a/arch/arm64/boot/dts/freescale/imx95.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
@@ -470,6 +470,13 @@
};
};
+ usbphynop: usbphynop {
+ compatible = "usb-nop-xceiv";
+ clocks = <&scmi_clk IMX95_CLK_HSIO>;
+ clock-names = "main_clk";
+ #phy-cells = <0>;
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <2>;
@@ -1621,12 +1628,35 @@
status = "disabled";
};
+ usb2: usb@4c200000 {
+ compatible = "fsl,imx95-usb", "fsl,imx7d-usb", "fsl,imx27-usb";
+ reg = <0x0 0x4c200000 0x0 0x200>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_HSIO>,
+ <&scmi_clk IMX95_CLK_32K>;
+ clock-names = "usb_ctrl_root", "usb_wakeup";
+ iommus = <&smmu 0xf>;
+ phys = <&usbphynop>;
+ power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
+ fsl,usbmisc = <&usbmisc 0>;
+ status = "disabled";
+ };
+
+ usbmisc: usbmisc@4c200200 {
+ compatible = "fsl,imx95-usbmisc", "fsl,imx7d-usbmisc",
+ "fsl,imx6q-usbmisc";
+ reg = <0x0 0x4c200200 0x0 0x200>,
+ <0x0 0x4c010014 0x0 0x04>;
+ #index-cells = <1>;
+ };
+
pcie0: pcie@4c300000 {
compatible = "fsl,imx95-pcie";
reg = <0 0x4c300000 0 0x10000>,
<0 0x60100000 0 0xfe00000>,
<0 0x4c360000 0 0x10000>,
- <0 0x4c340000 0 0x2000>;
+ <0 0x4c340000 0 0x4000>;
reg-names = "dbi", "config", "atu", "app";
ranges = <0x81000000 0x0 0x00000000 0x0 0x6ff00000 0 0x00100000>,
<0x82000000 0x0 0x10000000 0x9 0x10000000 0 0x10000000>;
@@ -1673,7 +1703,7 @@
reg = <0 0x4c300000 0 0x10000>,
<0 0x4c360000 0 0x1000>,
<0 0x4c320000 0 0x1000>,
- <0 0x4c340000 0 0x2000>,
+ <0 0x4c340000 0 0x4000>,
<0 0x4c370000 0 0x10000>,
<0x9 0 1 0>;
reg-names = "dbi","atu", "dbi2", "app", "dma", "addr_space";
@@ -1700,7 +1730,7 @@
reg = <0 0x4c380000 0 0x10000>,
<8 0x80100000 0 0xfe00000>,
<0 0x4c3e0000 0 0x10000>,
- <0 0x4c3c0000 0 0x2000>;
+ <0 0x4c3c0000 0 0x4000>;
reg-names = "dbi", "config", "atu", "app";
ranges = <0x81000000 0 0x00000000 0x8 0x8ff00000 0 0x00100000>,
<0x82000000 0 0x10000000 0xa 0x10000000 0 0x10000000>;
@@ -1749,7 +1779,7 @@
reg = <0 0x4c380000 0 0x10000>,
<0 0x4c3e0000 0 0x1000>,
<0 0x4c3a0000 0 0x1000>,
- <0 0x4c3c0000 0 0x2000>,
+ <0 0x4c3c0000 0 0x4000>,
<0 0x4c3f0000 0 0x10000>,
<0xa 0 1 0>;
reg-names = "dbi", "atu", "dbi2", "app", "dma", "addr_space";
diff --git a/arch/arm64/boot/dts/freescale/s32gxxxa-rdb.dtsi b/arch/arm64/boot/dts/freescale/s32gxxxa-rdb.dtsi
index ba53ec622f0b..4587e1cb8835 100644
--- a/arch/arm64/boot/dts/freescale/s32gxxxa-rdb.dtsi
+++ b/arch/arm64/boot/dts/freescale/s32gxxxa-rdb.dtsi
@@ -153,6 +153,11 @@
gpio-controller;
#gpio-cells = <2>;
};
+
+ pca85073a: rtc@51 {
+ compatible = "nxp,pca85073a";
+ reg = <0x51>;
+ };
};
&i2c2 {
diff --git a/arch/arm64/boot/dts/freescale/tqma8xxs-mb-smarc-2.dtsi b/arch/arm64/boot/dts/freescale/tqma8xxs-mb-smarc-2.dtsi
new file mode 100644
index 000000000000..478cc8ede05e
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/tqma8xxs-mb-smarc-2.dtsi
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright (c) 2018-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
+ */
+
+/ {
+ aliases {
+ rtc0 = &rtc1;
+ rtc1 = &rtc;
+ };
+
+ backlight_lvds0: backlight-lvds0 {
+ compatible = "pwm-backlight";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_backlight_lvds0>;
+ /* PWM support still missing */
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <7>;
+ power-supply = <&reg_12v0>;
+ enable-gpios = <&lsio_gpio1 2 GPIO_ACTIVE_HIGH>;
+ status = "disabled";
+ };
+
+ backlight_lvds1: backlight-lvds1 {
+ compatible = "pwm-backlight";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_backlight_lvds1>;
+ /* PWM support still missing */
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <7>;
+ power-supply = <&reg_12v0>;
+ enable-gpios = <&lsio_gpio1 0 GPIO_ACTIVE_HIGH>;
+ status = "disabled";
+ };
+
+ chosen {
+ stdout-path = &lpuart0;
+ };
+
+ panel_lvds0: panel-lvds0 {
+ /*
+ * Display is not fixed, so compatible has to be added from
+ * DT
+ */
+ backlight = <&backlight_lvds0>;
+ power-supply = <&reg_lvds0>;
+ status = "disabled";
+
+ port {
+ panel_in_lvds0: endpoint {
+ };
+ };
+ };
+
+ panel_lvds1: panel-lvds1 {
+ /*
+ * Display is not fixed, so compatible has to be added from
+ * DT
+ */
+ backlight = <&backlight_lvds1>;
+ power-supply = <&reg_lvds1>;
+ status = "disabled";
+
+ port {
+ panel_in_lvds1: endpoint {
+ };
+ };
+ };
+
+ reg_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_12v0: regulator-12v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "12V0";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ };
+
+ sound {
+ compatible = "fsl,imx-audio-tlv320aic32x4";
+ model = "tqm-tlv320aic32";
+ ssi-controller = <&sai1>;
+ audio-codec = <&tlv320aic3x04>;
+ };
+};
+
+&fec1 {
+ status = "okay";
+};
+
+&fec2 {
+ status = "okay";
+};
+
+&flexcan2 {
+ xceiver-supply = <&reg_3v3>;
+ status = "okay";
+};
+
+&flexcan3 {
+ xceiver-supply = <&reg_3v3>;
+ status = "okay";
+};
+
+&i2c0 {
+ tlv320aic3x04: audio-codec@18 {
+ compatible = "ti,tlv320aic32x4";
+ reg = <0x18>;
+ clocks = <&mclkout0_lpcg 0>;
+ clock-names = "mclk";
+ iov-supply = <&reg_1v8>;
+ ldoin-supply = <&reg_3v3>;
+ };
+
+ eeprom2: eeprom@57 {
+ compatible = "atmel,24c32";
+ reg = <0x57>;
+ pagesize = <32>;
+ vcc-supply = <&reg_3v3>;
+ };
+};
+
+&lpspi1 {
+ status = "okay";
+};
+
+&lpuart0 {
+ status = "okay";
+};
+
+&lpuart3 {
+ status = "okay";
+};
+
+&reg_sdvmmc {
+ off-on-delay-us = <200000>;
+ status = "okay";
+};
+
+&usbotg1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg1>;
+ srp-disable;
+ hnp-disable;
+ adp-disable;
+ power-active-high;
+ over-current-active-low;
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usbotg3 {
+ status = "okay";
+};
+
+&usbotg3_cdns3 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb3_phy {
+ status = "okay";
+};
+
+&usbphy1 {
+ status = "okay";
+};
+
+&usdhc2 {
+ cd-gpios = <&lsio_gpio4 22 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&lsio_gpio4 21 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&reg_sdvmmc>;
+ no-1-8-v;
+ no-mmc;
+ no-sdio;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/tqma8xxs.dtsi b/arch/arm64/boot/dts/freescale/tqma8xxs.dtsi
new file mode 100644
index 000000000000..2d0a329c2fa5
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/tqma8xxs.dtsi
@@ -0,0 +1,768 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright (c) 2018-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
+ */
+
+#include <dt-bindings/net/ti-dp83867.h>
+
+/delete-node/ &encoder_rpc;
+
+/ {
+ memory@80000000 {
+ device_type = "memory";
+ /*
+ * DRAM base addr, minimal size : 1024 MiB DRAM
+ * should be corrected by bootloader
+ */
+ reg = <0x00000000 0x80000000 0 0x40000000>;
+ };
+
+ clk_xtal25: clk-xtal25 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ };
+
+ reg_tqma8xxs_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_lvds0: regulator-lvds0 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lvds0>;
+ regulator-name = "LCD0_VDD_EN";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&lsio_gpio1 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_lvds1: regulator-lvds1 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lvds1>;
+ regulator-name = "LCD1_VDD_EN";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&lsio_gpio1 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_sdvmmc: regulator-sdvmmc {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdvmmc>;
+ regulator-name = "SD1_VMMC";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ status = "disabled";
+ };
+
+ reg_vmmc: regulator-vmmc {
+ compatible = "regulator-fixed";
+ regulator-name = "MMC0_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_vqmmc: regulator-vqmmc {
+ compatible = "regulator-fixed";
+ regulator-name = "MMC0_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /*
+ * global autoconfigured region for contiguous allocations
+ * must not exceed memory size and region
+ */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0 0x20000000>;
+ alloc-ranges = <0 0x96000000 0 0x30000000>;
+ linux,cma-default;
+ };
+
+ decoder_boot: decoder-boot@84000000 {
+ reg = <0 0x84000000 0 0x2000000>;
+ no-map;
+ };
+
+ encoder_boot: encoder-boot@86000000 {
+ reg = <0 0x86000000 0 0x200000>;
+ no-map;
+ };
+
+ m4_reserved: m4@88000000 {
+ no-map;
+ reg = <0 0x88000000 0 0x8000000>;
+ status = "disabled";
+ };
+
+ vdev0vring0: vdev0vring0@90000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x90000000 0 0x8000>;
+ no-map;
+ status = "disabled";
+ };
+
+ vdev0vring1: vdev0vring1@90008000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x90008000 0 0x8000>;
+ no-map;
+ status = "disabled";
+ };
+
+ vdev1vring0: vdev1vring0@90010000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x90010000 0 0x8000>;
+ no-map;
+ status = "disabled";
+ };
+
+ vdev1vring1: vdev1vring1@90018000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x90018000 0 0x8000>;
+ no-map;
+ status = "disabled";
+ };
+
+ rsc_table: rsc-table@900ff000 {
+ reg = <0 0x900ff000 0 0x1000>;
+ no-map;
+ status = "disabled";
+ };
+
+ vdevbuffer: vdevbuffer@90400000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x90400000 0 0x100000>;
+ no-map;
+ status = "disabled";
+ };
+
+ decoder_rpc: decoder-rpc@92000000 {
+ reg = <0 0x92000000 0 0x100000>;
+ no-map;
+ };
+
+ encoder_rpc: encoder-rpc@92100000 {
+ reg = <0 0x92100000 0 0x700000>;
+ no-map;
+ };
+ };
+
+};
+
+/* TQMa8XxS only uses industrial grade, reduce trip points accordingly */
+&cpu_alert0 {
+ temperature = <95000>;
+};
+
+&cpu_crit0 {
+ temperature = <100000>;
+};
+/* end of temperature grade adjustments */
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ mac-address = [ 00 00 00 00 00 00 ];
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ethphy0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_50_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_50_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,dp83867-rxctrl-strap-quirk;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ reset-gpios = <&lsio_gpio3 22 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <500000>;
+ reset-deassert-us = <50000>;
+ enet-phy-lane-no-swap;
+ interrupt-parent = <&lsio_gpio1>;
+ interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ ethphy3: ethernet-phy@3 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ethphy1>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_50_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_50_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,dp83867-rxctrl-strap-quirk;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ reset-gpios = <&lsio_gpio0 24 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <500000>;
+ reset-deassert-us = <50000>;
+ enet-phy-lane-no-swap;
+ interrupt-parent = <&lsio_gpio1>;
+ interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+};
+
+&fec2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec2>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy3>;
+ fsl,magic-packet;
+ mac-address = [ 00 00 00 00 00 00 ];
+};
+
+&flexcan2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1>;
+};
+
+&flexcan3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can2>;
+};
+
+&flexspi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexspi0>;
+ status = "okay";
+
+ flash0: flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <66000000>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+};
+
+&lsio_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_smarc_fangpio>, <&pinctrl_smarc_mngtpio>;
+
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "LID", "SLEEP", "CHARGING#", "CHGPRSNT#",
+ "BATLOW#", "", "", "",
+ "", "SMARC_GPIO6", "SMARC_GPIO5", "",
+ "PHY3 RST#", "", "", "SPI0_CS0",
+ "", "SPI0_CS1", "", "";
+};
+
+&lsio_gpio1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_smarc_gpio>;
+
+ gpio-line-names = "LCD1_BLKT_EN", "LCD1_VDD_EN", "LCD0_BLKT_EN", "LCD0_VDD_EN",
+ "SMARC_GPIO0", "SMARC_GPIO1", "SMARC_GPIO2", "",
+ "SMARC_GPIO3", "SMARC_GPIO8", "SMARC_GPIO7", "SMARC_GPIO10",
+ "SMARC_GPIO9", "SMARC_GPIO4", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&lsio_gpio2 {
+ gpio-line-names = "RTC_INT#", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&lsio_gpio3 {
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "PHY0_RST#", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&lsio_gpio4 {
+ gpio-line-names = "PCIE_PERST#", "", "PCIE_WAKE#", "USB_OTG1_PWR",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "SDIO_PWR_EN",
+ "", "SDIO_WP", "SDIO_CD#", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&i2c0 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_lpi2c0>;
+ pinctrl-1 = <&pinctrl_lpi2c0_gpio>;
+ scl-gpios = <&lsio_gpio3 8 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&lsio_gpio3 7 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "okay";
+
+ /* NXP SE97BTP with temperature sensor + eeprom */
+ sensor0: temperature-sensor@1b {
+ compatible = "nxp,se97b", "jedec,jc-42.4-temp";
+ reg = <0x1b>;
+ };
+
+ eeprom0: eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ pagesize = <32>;
+ vcc-supply = <&reg_tqma8xxs_3v3>;
+ };
+
+ rtc1: rtc@51 {
+ compatible = "nxp,pcf85063a";
+ reg = <0x51>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rtc>;
+ quartz-load-femtofarads = <7000>;
+ interrupt-parent = <&lsio_gpio2>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ eeprom1: eeprom@53 {
+ compatible = "nxp,se97b", "atmel,24c02";
+ reg = <0x53>;
+ pagesize = <16>;
+ read-only;
+ vcc-supply = <&reg_tqma8xxs_3v3>;
+ };
+
+ pcieclk: clock-generator@6a {
+ compatible = "renesas,9fgv0241";
+ reg = <0x6a>;
+ clocks = <&clk_xtal25>;
+ #clock-cells = <1>;
+ };
+};
+
+&lpspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1>;
+ cs-gpios = <&lsio_gpio0 27 GPIO_ACTIVE_LOW>, <&lsio_gpio0 29 GPIO_ACTIVE_LOW>;
+};
+
+&lpuart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart0>;
+};
+
+&lpuart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart3>;
+};
+
+&mu_m0 {
+ status = "okay";
+};
+
+&mu1_m0 {
+ status = "okay";
+};
+
+&sai1 {
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai1_lpcg 0>;
+ assigned-clock-rates = <786432000>, <49152000>, <12288000>, <49152000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai1>;
+ status = "okay";
+};
+
+&thermal_zones {
+ pmic0_thermal: pmic0-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <2000>;
+ thermal-sensors = <&tsens IMX_SC_R_PMIC_0>;
+
+ trips {
+ pmic_alert0: trip0 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ pmic_crit0: trip1 {
+ temperature = <125000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&pmic_alert0>;
+ cooling-device =
+ <&A35_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ vmmc-supply = <&reg_vmmc>;
+ vqmmc-supply = <&reg_vqmmc>;
+ bus-width = <8>;
+ non-removable;
+ no-sd;
+ no-sdio;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ bus-width = <4>;
+ /* NOTE: CD / WP and VMMC support depends on mainboard */
+};
+
+&vpu {
+ compatible = "nxp,imx8qxp-vpu";
+ status = "okay";
+};
+
+&vpu_core0 {
+ memory-region = <&decoder_boot>, <&decoder_rpc>;
+ status = "okay";
+};
+
+&vpu_core1 {
+ memory-region = <&encoder_boot>, <&encoder_rpc>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_backlight_lvds0: backlight-lvds0grp {
+ fsl,pins = <IMX8QXP_SPI2_SDI_LSIO_GPIO1_IO02 0x00000021>;
+ };
+
+ pinctrl_backlight_lvds1: backlight-lvds1grp {
+ fsl,pins = <IMX8QXP_SPI2_CS0_LSIO_GPIO1_IO00 0x00000021>;
+ };
+
+ pinctrl_can1: can1grp {
+ fsl,pins = <IMX8QXP_UART2_TX_ADMA_FLEXCAN1_TX 0x00000021>,
+ <IMX8QXP_UART2_RX_ADMA_FLEXCAN1_RX 0x00000021>;
+ };
+
+ pinctrl_can2: can2grp {
+ fsl,pins = <IMX8QXP_FLEXCAN2_TX_ADMA_FLEXCAN2_TX 0x00000021>,
+ <IMX8QXP_FLEXCAN2_RX_ADMA_FLEXCAN2_RX 0x00000021>;
+ };
+
+ pinctrl_ethphy0: ethphy0grp {
+ fsl,pins = <IMX8QXP_MIPI_DSI1_I2C0_SDA_LSIO_GPIO1_IO30 0x00000040>,
+ <IMX8QXP_QSPI0B_DQS_LSIO_GPIO3_IO22 0x00000040>;
+ };
+
+ pinctrl_ethphy1: ethphy1grp {
+ fsl,pins = <IMX8QXP_ADC_IN4_LSIO_GPIO1_IO14 0x00000040>,
+ <IMX8QXP_UART1_CTS_B_LSIO_GPIO0_IO24 0x00000040>;
+ };
+
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <IMX8QXP_ENET0_MDC_CONN_ENET0_MDC 0x06000041>,
+ <IMX8QXP_ENET0_MDIO_CONN_ENET0_MDIO 0x06000041>,
+ <IMX8QXP_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL 0x00000040>,
+ <IMX8QXP_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC 0x00000040>,
+ <IMX8QXP_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 0x00000040>,
+ <IMX8QXP_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 0x00000040>,
+ <IMX8QXP_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 0x00000040>,
+ <IMX8QXP_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 0x00000040>,
+ <IMX8QXP_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC 0x00000040>,
+ <IMX8QXP_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL 0x00000040>,
+ <IMX8QXP_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 0x00000040>,
+ <IMX8QXP_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 0x00000040>,
+ <IMX8QXP_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 0x00000040>,
+ <IMX8QXP_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 0x00000040>;
+ };
+
+ pinctrl_fec2: fec2grp {
+ fsl,pins = <IMX8QXP_ESAI0_SCKR_CONN_ENET1_RGMII_TX_CTL 0x00000040>,
+ <IMX8QXP_ESAI0_FSR_CONN_ENET1_RGMII_TXC 0x00000040>,
+ <IMX8QXP_ESAI0_TX4_RX1_CONN_ENET1_RGMII_TXD0 0x00000040>,
+ <IMX8QXP_ESAI0_TX5_RX0_CONN_ENET1_RGMII_TXD1 0x00000040>,
+ <IMX8QXP_ESAI0_FST_CONN_ENET1_RGMII_TXD2 0x00000040>,
+ <IMX8QXP_ESAI0_SCKT_CONN_ENET1_RGMII_TXD3 0x00000040>,
+ <IMX8QXP_ESAI0_TX0_CONN_ENET1_RGMII_RXC 0x00000040>,
+ <IMX8QXP_SPDIF0_TX_CONN_ENET1_RGMII_RX_CTL 0x00000040>,
+ <IMX8QXP_SPDIF0_RX_CONN_ENET1_RGMII_RXD0 0x00000040>,
+ <IMX8QXP_ESAI0_TX3_RX2_CONN_ENET1_RGMII_RXD1 0x00000040>,
+ <IMX8QXP_ESAI0_TX2_RX3_CONN_ENET1_RGMII_RXD2 0x00000040>,
+ <IMX8QXP_ESAI0_TX1_CONN_ENET1_RGMII_RXD3 0x00000040>;
+ };
+
+ pinctrl_flexspi0: flexspi0grp {
+ fsl,pins = <IMX8QXP_QSPI0A_DATA0_LSIO_QSPI0A_DATA0 0x0000004d>,
+ <IMX8QXP_QSPI0A_DATA1_LSIO_QSPI0A_DATA1 0x0000004d>,
+ <IMX8QXP_QSPI0A_DATA2_LSIO_QSPI0A_DATA2 0x0000004d>,
+ <IMX8QXP_QSPI0A_DATA3_LSIO_QSPI0A_DATA3 0x0000004d>,
+ <IMX8QXP_QSPI0A_DQS_LSIO_QSPI0A_DQS 0x0000004d>,
+ <IMX8QXP_QSPI0A_SS0_B_LSIO_QSPI0A_SS0_B 0x0000004d>,
+ <IMX8QXP_QSPI0A_SCLK_LSIO_QSPI0A_SCLK 0x0000004d>,
+ <IMX8QXP_QSPI0B_SCLK_LSIO_QSPI0B_SCLK 0x0000004d>,
+ <IMX8QXP_QSPI0B_DATA0_LSIO_QSPI0B_DATA0 0x0000004d>,
+ <IMX8QXP_QSPI0B_DATA1_LSIO_QSPI0B_DATA1 0x0000004d>,
+ <IMX8QXP_QSPI0B_DATA2_LSIO_QSPI0B_DATA2 0x0000004d>,
+ <IMX8QXP_QSPI0B_DATA3_LSIO_QSPI0B_DATA3 0x0000004d>,
+ <IMX8QXP_QSPI0B_SS0_B_LSIO_QSPI0B_SS0_B 0x0000004d>,
+ <IMX8QXP_QSPI0B_SS1_B_LSIO_QSPI0B_SS1_B 0x0000004d>;
+ };
+
+ pinctrl_smarc_gpio: smarcgpiogrp {
+ fsl,pins = /* SMARC_GPIO0 / CAM0_PWR# */
+ <IMX8QXP_SPI0_SCK_LSIO_GPIO1_IO04 0x00000021>,
+ /* SMARC_GPIO1 / CAM1_PWR# */
+ <IMX8QXP_SPI0_SDI_LSIO_GPIO1_IO05 0x00000021>,
+ /* SMARC_GPIO2 / CAM0_RST# */
+ <IMX8QXP_SPI0_SDO_LSIO_GPIO1_IO06 0x00000021>,
+ /* SMARC_GPIO3 / CAM1_RST# */
+ <IMX8QXP_SPI0_CS0_LSIO_GPIO1_IO08 0x00000021>,
+ /* SMARC_GPIO4 / HDA_RST# */
+ <IMX8QXP_ADC_IN5_LSIO_GPIO1_IO13 0x00000021>,
+ /* SMARC_GPIO7 */
+ <IMX8QXP_ADC_IN0_LSIO_GPIO1_IO10 0x00000021>,
+ /* SMARC_GPIO8 */
+ <IMX8QXP_ADC_IN1_LSIO_GPIO1_IO09 0x00000021>,
+ /* SMARC_GPIO9 */
+ <IMX8QXP_ADC_IN2_LSIO_GPIO1_IO12 0x00000021>,
+ /* SMARC_GPIO10 */
+ <IMX8QXP_ADC_IN3_LSIO_GPIO1_IO11 0x00000021>;
+ };
+
+ pinctrl_smarc_fangpio: smarcfangpiogrp {
+ fsl,pins = /* SMARC_GPIO5 */
+ <IMX8QXP_UART1_RX_LSIO_GPIO0_IO22 0x00000021>,
+ /* SMARC_GPIO6 */
+ <IMX8QXP_UART1_TX_LSIO_GPIO0_IO21 0x00000021>;
+ };
+
+ pinctrl_smarc_mngtpio: smarcmngtgpiogrp {
+ fsl,pins = /* SMARC BATLOW# */
+ <IMX8QXP_SPI3_CS0_LSIO_GPIO0_IO16 0x00000021>,
+ /* SMARC SLEEP */
+ <IMX8QXP_SPI3_SCK_LSIO_GPIO0_IO13 0x00000021>,
+ /* SMARC CHGPRSNT# */
+ <IMX8QXP_SPI3_SDI_LSIO_GPIO0_IO15 0x00000021>,
+ /* SMARC CHARGING# */
+ <IMX8QXP_SPI3_SDO_LSIO_GPIO0_IO14 0x00000021>,
+ /* SMARC LID */
+ <IMX8QXP_SPDIF0_EXT_CLK_LSIO_GPIO0_IO12 0x00000021>;
+ };
+
+ pinctrl_lvds0: lbdpanel0grp {
+ fsl,pins = /* LCD PWR */
+ <IMX8QXP_SPI2_SCK_LSIO_GPIO1_IO03 0x00000021>;
+ };
+
+ pinctrl_lvds1: lbdpanel1grp {
+ fsl,pins = /* LCD PWR */
+ <IMX8QXP_SPI2_SDO_LSIO_GPIO1_IO01 0x00000021>;
+ };
+
+ pinctrl_lpi2c0: lpi2c0grp {
+ fsl,pins = <IMX8QXP_MIPI_CSI0_GPIO0_00_ADMA_I2C0_SCL 0x06000021>,
+ <IMX8QXP_MIPI_CSI0_GPIO0_01_ADMA_I2C0_SDA 0x06000021>;
+ };
+
+ pinctrl_lpi2c0_gpio: lpi2c0gpiogrp {
+ fsl,pins = <IMX8QXP_MIPI_CSI0_GPIO0_00_LSIO_GPIO3_IO08 0x00000021>,
+ <IMX8QXP_MIPI_CSI0_GPIO0_01_LSIO_GPIO3_IO07 0x00000021>;
+ };
+
+ pinctrl_lpuart0: lpuart0grp {
+ fsl,pins = <IMX8QXP_UART0_RX_ADMA_UART0_RX 0x06000020>,
+ <IMX8QXP_UART0_TX_ADMA_UART0_TX 0x06000020>,
+ <IMX8QXP_FLEXCAN0_RX_ADMA_UART0_RTS_B 0x06000020>,
+ <IMX8QXP_FLEXCAN0_TX_ADMA_UART0_CTS_B 0x06000020>;
+ };
+
+ pinctrl_lpuart3: lpuart3grp {
+ fsl,pins = <IMX8QXP_SCU_GPIO0_00_ADMA_UART3_RX 0x06000020>,
+ <IMX8QXP_SCU_GPIO0_01_ADMA_UART3_TX 0x06000020>;
+ };
+
+ pinctrl_i2c0_mipi_lvds0: mipi-lvds0-i2c0grp {
+ fsl,pins = <IMX8QXP_MIPI_DSI0_I2C0_SCL_MIPI_DSI0_I2C0_SCL 0x06000021>,
+ <IMX8QXP_MIPI_DSI0_I2C0_SDA_MIPI_DSI0_I2C0_SDA 0x06000021>;
+ };
+
+ pinctrl_i2c0_gpio_mipi_lvds0: mipi-lvds0-i2c0-gpiogrp {
+ fsl,pins = <IMX8QXP_MIPI_DSI0_I2C0_SCL_LSIO_GPIO1_IO25 0x0000021>,
+ <IMX8QXP_MIPI_DSI0_I2C0_SDA_LSIO_GPIO1_IO26 0x0000021>;
+ };
+
+ pinctrl_pcieb: pcieagrp {
+ fsl,pins = <IMX8QXP_PCIE_CTRL0_PERST_B_LSIO_GPIO4_IO00 0x06000041>,
+ <IMX8QXP_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO4_IO01 0x06000041>,
+ <IMX8QXP_PCIE_CTRL0_WAKE_B_LSIO_GPIO4_IO02 0x04000041>;
+ };
+
+ pinctrl_pwm_mipi_lvds0: mipi-lvds0-pwmgrp {
+ fsl,pins = <IMX8QXP_MIPI_DSI0_GPIO0_00_MIPI_DSI0_PWM0_OUT 0x00000021>;
+ };
+
+ pinctrl_pwm_mipi_lvds1: mipi-lvds1-pwmgrp {
+ fsl,pins = <IMX8QXP_MIPI_DSI1_GPIO0_00_MIPI_DSI1_PWM0_OUT 0x00000021>;
+ };
+
+ pinctrl_rtc: rtcgrp {
+ fsl,pins = <IMX8QXP_MIPI_DSI1_GPIO0_01_LSIO_GPIO2_IO00 0x00000021>;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <IMX8QXP_EMMC0_CLK_CONN_EMMC0_CLK 0x06000040>,
+ <IMX8QXP_EMMC0_CMD_CONN_EMMC0_CMD 0x00000020>,
+ <IMX8QXP_EMMC0_DATA0_CONN_EMMC0_DATA0 0x00000020>,
+ <IMX8QXP_EMMC0_DATA1_CONN_EMMC0_DATA1 0x00000020>,
+ <IMX8QXP_EMMC0_DATA2_CONN_EMMC0_DATA2 0x00000020>,
+ <IMX8QXP_EMMC0_DATA3_CONN_EMMC0_DATA3 0x00000020>,
+ <IMX8QXP_EMMC0_DATA4_CONN_EMMC0_DATA4 0x00000020>,
+ <IMX8QXP_EMMC0_DATA5_CONN_EMMC0_DATA5 0x00000020>,
+ <IMX8QXP_EMMC0_DATA6_CONN_EMMC0_DATA6 0x00000020>,
+ <IMX8QXP_EMMC0_DATA7_CONN_EMMC0_DATA7 0x00000020>,
+ <IMX8QXP_EMMC0_STROBE_CONN_EMMC0_STROBE 0x00000040>;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1100mhzgrp {
+ fsl,pins = <IMX8QXP_EMMC0_CLK_CONN_EMMC0_CLK 0x06000041>,
+ <IMX8QXP_EMMC0_CMD_CONN_EMMC0_CMD 0x00000021>,
+ <IMX8QXP_EMMC0_DATA0_CONN_EMMC0_DATA0 0x00000021>,
+ <IMX8QXP_EMMC0_DATA1_CONN_EMMC0_DATA1 0x00000021>,
+ <IMX8QXP_EMMC0_DATA2_CONN_EMMC0_DATA2 0x00000021>,
+ <IMX8QXP_EMMC0_DATA3_CONN_EMMC0_DATA3 0x00000021>,
+ <IMX8QXP_EMMC0_DATA4_CONN_EMMC0_DATA4 0x00000021>,
+ <IMX8QXP_EMMC0_DATA5_CONN_EMMC0_DATA5 0x00000021>,
+ <IMX8QXP_EMMC0_DATA6_CONN_EMMC0_DATA6 0x00000021>,
+ <IMX8QXP_EMMC0_DATA7_CONN_EMMC0_DATA7 0x00000021>,
+ <IMX8QXP_EMMC0_STROBE_CONN_EMMC0_STROBE 0x00000041>;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1200mhzgrp {
+ fsl,pins = <IMX8QXP_EMMC0_CLK_CONN_EMMC0_CLK 0x06000041>,
+ <IMX8QXP_EMMC0_CMD_CONN_EMMC0_CMD 0x00000021>,
+ <IMX8QXP_EMMC0_DATA0_CONN_EMMC0_DATA0 0x00000021>,
+ <IMX8QXP_EMMC0_DATA1_CONN_EMMC0_DATA1 0x00000021>,
+ <IMX8QXP_EMMC0_DATA2_CONN_EMMC0_DATA2 0x00000021>,
+ <IMX8QXP_EMMC0_DATA3_CONN_EMMC0_DATA3 0x00000021>,
+ <IMX8QXP_EMMC0_DATA4_CONN_EMMC0_DATA4 0x00000021>,
+ <IMX8QXP_EMMC0_DATA5_CONN_EMMC0_DATA5 0x00000021>,
+ <IMX8QXP_EMMC0_DATA6_CONN_EMMC0_DATA6 0x00000021>,
+ <IMX8QXP_EMMC0_DATA7_CONN_EMMC0_DATA7 0x00000021>,
+ <IMX8QXP_EMMC0_STROBE_CONN_EMMC0_STROBE 0x00000041>;
+ };
+
+ pinctrl_sdvmmc: sdvmmcgrp {
+ fsl,pins = <IMX8QXP_USDHC1_RESET_B_LSIO_GPIO4_IO19 0x00000021>;
+ };
+
+ pinctrl_spi1: spi1grp {
+ fsl,pins = /* PD + PDRV Low + INOUT - MEK has 0x0600004c */
+ <IMX8QXP_SAI0_TXC_ADMA_SPI1_SDI 0x06000041>,
+ <IMX8QXP_SAI0_TXD_ADMA_SPI1_SDO 0x06000041>,
+ <IMX8QXP_SAI0_TXFS_ADMA_SPI1_SCK 0x06000041>,
+ <IMX8QXP_SAI0_RXD_LSIO_GPIO0_IO27 0x00000021>,
+ <IMX8QXP_SAI1_RXD_LSIO_GPIO0_IO29 0x00000021>;
+ };
+
+ pinctrl_sai1: sai1grp {
+ fsl,pins = <IMX8QXP_MCLK_OUT0_ADMA_ACM_MCLK_OUT0 0x06000040>,
+ <IMX8QXP_SAI1_RXC_ADMA_SAI1_TXC 0x06000040>,
+ <IMX8QXP_SAI1_RXFS_ADMA_SAI1_TXFS 0x06000040>,
+ <IMX8QXP_FLEXCAN1_RX_ADMA_SAI1_TXD 0x06000040>,
+ <IMX8QXP_FLEXCAN1_TX_ADMA_SAI1_RXD 0x06000040>;
+ };
+
+ pinctrl_usbotg1: usbotg1grp {
+ fsl,pins = <IMX8QXP_USB_SS3_TC0_CONN_USB_OTG1_PWR 0x00000021>,
+ <IMX8QXP_USB_SS3_TC2_CONN_USB_OTG1_OC 0x00000021>;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+ fsl,pins = <IMX8QXP_USDHC1_WP_LSIO_GPIO4_IO21 0x00000021>,
+ <IMX8QXP_USDHC1_CD_B_LSIO_GPIO4_IO22 0x00000021>;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <IMX8QXP_USDHC1_CLK_CONN_USDHC1_CLK 0x06000041>,
+ <IMX8QXP_USDHC1_CMD_CONN_USDHC1_CMD 0x00000021>,
+ <IMX8QXP_USDHC1_DATA0_CONN_USDHC1_DATA0 0x00000021>,
+ <IMX8QXP_USDHC1_DATA1_CONN_USDHC1_DATA1 0x00000021>,
+ <IMX8QXP_USDHC1_DATA2_CONN_USDHC1_DATA2 0x00000021>,
+ <IMX8QXP_USDHC1_DATA3_CONN_USDHC1_DATA3 0x00000021>,
+ <IMX8QXP_USDHC1_VSELECT_CONN_USDHC1_VSELECT 0x00000021>;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2100mhzgrp {
+ fsl,pins = <IMX8QXP_USDHC1_CLK_CONN_USDHC1_CLK 0x06000040>,
+ <IMX8QXP_USDHC1_CMD_CONN_USDHC1_CMD 0x00000020>,
+ <IMX8QXP_USDHC1_DATA0_CONN_USDHC1_DATA0 0x00000020>,
+ <IMX8QXP_USDHC1_DATA1_CONN_USDHC1_DATA1 0x00000020>,
+ <IMX8QXP_USDHC1_DATA2_CONN_USDHC1_DATA2 0x00000020>,
+ <IMX8QXP_USDHC1_DATA3_CONN_USDHC1_DATA3 0x00000020>,
+ <IMX8QXP_USDHC1_VSELECT_CONN_USDHC1_VSELECT 0x00000020>;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2200mhzgrp {
+ fsl,pins = <IMX8QXP_USDHC1_CLK_CONN_USDHC1_CLK 0x06000040>,
+ <IMX8QXP_USDHC1_CMD_CONN_USDHC1_CMD 0x00000020>,
+ <IMX8QXP_USDHC1_DATA0_CONN_USDHC1_DATA0 0x00000020>,
+ <IMX8QXP_USDHC1_DATA1_CONN_USDHC1_DATA1 0x00000020>,
+ <IMX8QXP_USDHC1_DATA2_CONN_USDHC1_DATA2 0x00000020>,
+ <IMX8QXP_USDHC1_DATA3_CONN_USDHC1_DATA3 0x00000020>,
+ <IMX8QXP_USDHC1_VSELECT_CONN_USDHC1_VSELECT 0x00000020>;
+ };
+};
diff --git a/arch/arm64/boot/dts/intel/Makefile b/arch/arm64/boot/dts/intel/Makefile
index d39cfb723f5b..33f6d01266b1 100644
--- a/arch/arm64/boot/dts/intel/Makefile
+++ b/arch/arm64/boot/dts/intel/Makefile
@@ -3,5 +3,6 @@ dtb-$(CONFIG_ARCH_INTEL_SOCFPGA) += socfpga_agilex_n6000.dtb \
socfpga_agilex_socdk.dtb \
socfpga_agilex_socdk_nand.dtb \
socfpga_agilex5_socdk.dtb \
+ socfpga_agilex5_socdk_nand.dtb \
socfpga_n5x_socdk.dtb
dtb-$(CONFIG_ARCH_KEEMBAY) += keembay-evm.dtb
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
index 1235ba5a9865..a77a504effea 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
@@ -114,11 +114,13 @@
cb_intosc_hs_div2_clk: cb-intosc-hs-div2-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
+ clock-frequency = <200000000>;
};
cb_intosc_ls_clk: cb-intosc-ls-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
+ clock-frequency = <400000000>;
};
f2s_free_clk: f2s-free-clk {
@@ -457,6 +459,8 @@
reg-io-width = <4>;
num-cs = <4>;
clocks = <&clkmgr AGILEX_L4_MAIN_CLK>;
+ dmas = <&pdma 16>, <&pdma 17>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -471,6 +475,8 @@
reg-io-width = <4>;
num-cs = <4>;
clocks = <&clkmgr AGILEX_L4_MAIN_CLK>;
+ dmas = <&pdma 20>, <&pdma 21>;
+ dma-names = "tx", "rx";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi
index 51c6e19e40b8..7d9394a04302 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi
@@ -222,9 +222,9 @@
status = "disabled";
};
- gpio0: gpio@ffc03200 {
+ gpio0: gpio@10c03200 {
compatible = "snps,dw-apb-gpio";
- reg = <0xffc03200 0x100>;
+ reg = <0x10c03200 0x100>;
#address-cells = <1>;
#size-cells = <0>;
resets = <&rst GPIO0_RESET>;
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk.dts
index c533e5a3a610..d3b913b7902c 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk.dts
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk.dts
@@ -15,6 +15,26 @@
chosen {
stdout-path = "serial0:115200n8";
};
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ label = "hps_led0";
+ gpios = <&porta 11 GPIO_ACTIVE_HIGH>;
+ };
+
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0x0 0x80000000 0x0 0x0>;
+ };
+};
+
+&gpio0 {
+ status = "okay";
};
&gpio1 {
@@ -25,6 +45,37 @@
clock-frequency = <25000000>;
};
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "micron,mt25qu02g", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <100000000>;
+ m25p,fast-read;
+ cdns,read-delay = <2>;
+ cdns,tshsl-ns = <50>;
+ cdns,tsd2d-ns = <50>;
+ cdns,tchsh-ns = <4>;
+ cdns,tslch-ns = <4>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qspi_boot: partition@0 {
+ label = "u-boot";
+ reg = <0x0 0x04200000>;
+ };
+
+ root: partition@4200000 {
+ label = "root";
+ reg = <0x04200000 0x0be00000>;
+ };
+ };
+ };
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_nand.dts b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_nand.dts
new file mode 100644
index 000000000000..38a582ef86b4
--- /dev/null
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_nand.dts
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025, Altera Corporation
+ */
+#include "socfpga_agilex5.dtsi"
+
+/ {
+ model = "SoCFPGA Agilex5 SoCDK NAND daughter board";
+ compatible = "intel,socfpga-agilex5-socdk-nand", "intel,socfpga-agilex5";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led0 {
+ label = "hps_led0";
+ gpios = <&porta 6 GPIO_ACTIVE_HIGH>;
+ };
+
+ led1 {
+ label = "hps_led1";
+ gpios = <&porta 7 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0x0 0x80000000 0x0 0x0>;
+ };
+};
+
+&gpio0 {
+ status = "okay";
+};
+
+&gpio1 {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&i3c0 {
+ status = "okay";
+};
+
+&i3c1 {
+ status = "okay";
+};
+
+&nand {
+ status = "okay";
+
+ nand@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0>;
+ nand-bus-width = <8>;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0 0x200000>;
+ };
+ partition@200000 {
+ label = "root";
+ reg = <0x200000 0xffe00000>;
+ };
+ };
+};
+
+&osc1 {
+ clock-frequency = <25000000>;
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&watchdog0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi
index 3a9b6907185d..242820845707 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi
@@ -26,6 +26,8 @@
leds {
compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi_quad_pins>;
led-power1 {
label = "udpu:green:power";
@@ -82,8 +84,6 @@
&spi0 {
status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&spi_quad_pins>;
flash@0 {
compatible = "jedec,spi-nor";
@@ -108,6 +108,10 @@
};
};
+&spi_quad_pins {
+ function = "gpio";
+};
+
&pinctrl_nb {
i2c2_recovery_pins: i2c2-recovery-pins {
groups = "i2c2";
diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
index 2b5e45d2c5a6..3add6506ff20 100644
--- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
@@ -124,7 +124,6 @@
expander0: pca9555@21 {
compatible = "nxp,pca9555";
- pinctrl-names = "default";
gpio-controller;
#gpio-cells = <2>;
reg = <0x21>;
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
index 9d45e881a97d..21ecb9c12505 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
@@ -122,7 +122,6 @@
/* U31 */
expander0: pca9555@21 {
compatible = "nxp,pca9555";
- pinctrl-names = "default";
gpio-controller;
#gpio-cells = <2>;
reg = <0x21>;
@@ -131,7 +130,6 @@
/* U25 */
expander1: pca9555@25 {
compatible = "nxp,pca9555";
- pinctrl-names = "default";
gpio-controller;
#gpio-cells = <2>;
reg = <0x25>;
diff --git a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
index be56a2336265..50e9e0724828 100644
--- a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
+++ b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
@@ -209,7 +209,6 @@
/* U12 */
cp0_module_expander1: pca9555@21 {
compatible = "nxp,pca9555";
- pinctrl-names = "default";
gpio-controller;
#gpio-cells = <2>;
reg = <0x21>;
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index 58484e830063..f68865d06edd 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -22,6 +22,7 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-bananapi-bpi-r3-sd.dtbo
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-rfb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986b-rfb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7988a-bananapi-bpi-r4.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt7988a-bananapi-bpi-r4-2g5.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7988a-bananapi-bpi-r4-emmc.dtbo
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7988a-bananapi-bpi-r4-sd.dtbo
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8167-pumpkin.dtb
@@ -64,6 +65,8 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-chinchou-sku16.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-magneton-sku393216.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-magneton-sku393217.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-magneton-sku393218.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-ponyta-sku0.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-ponyta-sku1.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-rusty-sku196608.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-starmie-sku0.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-starmie-sku1.dtb
@@ -107,4 +110,5 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8516-pumpkin.dtb
DTC_FLAGS_mt7986a-bananapi-bpi-r3 := -@
DTC_FLAGS_mt7986a-bananapi-bpi-r3-mini := -@
DTC_FLAGS_mt7988a-bananapi-bpi-r4 := -@
+DTC_FLAGS_mt7988a-bananapi-bpi-r4-2g5 := -@
DTC_FLAGS_mt8395-radxa-nio-12l := -@
diff --git a/arch/arm64/boot/dts/mediatek/mt6357.dtsi b/arch/arm64/boot/dts/mediatek/mt6357.dtsi
index 5fafa842d312..dca4e5c3d8e2 100644
--- a/arch/arm64/boot/dts/mediatek/mt6357.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt6357.dtsi
@@ -60,7 +60,6 @@
};
mt6357_vfe28_reg: ldo-vfe28 {
- compatible = "regulator-fixed";
regulator-name = "vfe28";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
@@ -75,7 +74,6 @@
};
mt6357_vrf18_reg: ldo-vrf18 {
- compatible = "regulator-fixed";
regulator-name = "vrf18";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -83,7 +81,6 @@
};
mt6357_vrf12_reg: ldo-vrf12 {
- compatible = "regulator-fixed";
regulator-name = "vrf12";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
@@ -112,7 +109,6 @@
};
mt6357_vcn28_reg: ldo-vcn28 {
- compatible = "regulator-fixed";
regulator-name = "vcn28";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
@@ -120,7 +116,6 @@
};
mt6357_vcn18_reg: ldo-vcn18 {
- compatible = "regulator-fixed";
regulator-name = "vcn18";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -142,7 +137,6 @@
};
mt6357_vcamio_reg: ldo-vcamio18 {
- compatible = "regulator-fixed";
regulator-name = "vcamio";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -175,7 +169,6 @@
};
mt6357_vaux18_reg: ldo-vaux18 {
- compatible = "regulator-fixed";
regulator-name = "vaux18";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -183,7 +176,6 @@
};
mt6357_vaud28_reg: ldo-vaud28 {
- compatible = "regulator-fixed";
regulator-name = "vaud28";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
@@ -191,7 +183,6 @@
};
mt6357_vio28_reg: ldo-vio28 {
- compatible = "regulator-fixed";
regulator-name = "vio28";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
@@ -199,7 +190,6 @@
};
mt6357_vio18_reg: ldo-vio18 {
- compatible = "regulator-fixed";
regulator-name = "vio18";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt6359.dtsi b/arch/arm64/boot/dts/mediatek/mt6359.dtsi
index 7b10f9c59819..467d8a4c2aa7 100644
--- a/arch/arm64/boot/dts/mediatek/mt6359.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt6359.dtsi
@@ -20,6 +20,8 @@
};
regulators {
+ compatible = "mediatek,mt6359-regulator";
+
mt6359_vs1_buck_reg: buck_vs1 {
regulator-name = "vs1";
regulator-min-microvolt = <800000>;
@@ -298,7 +300,7 @@
};
};
- mt6359rtc: mt6359rtc {
+ mt6359rtc: rtc {
compatible = "mediatek,mt6358-rtc";
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt6893-pinfunc.h b/arch/arm64/boot/dts/mediatek/mt6893-pinfunc.h
new file mode 100644
index 000000000000..982bc95c471c
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt6893-pinfunc.h
@@ -0,0 +1,1356 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Copyright (c) 2025 Collabora Ltd
+ */
+
+#ifndef __MT6893_PINFUNC_H
+#define __MT6893_PINFUNC_H
+
+#include "mt65xx.h"
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_SPI6_CLK (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_I2S5_MCK (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_PWM_0 (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_MD_INT0 (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 5)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_SPI6_CSB (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_I2S5_BCK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_PWM_1 (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 5)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_SPI6_MI (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_I2S5_LRCK (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_PWM_2 (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 5)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_SPI6_MO (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_I2S5_DO (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_PWM_3 (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_CLKM0 (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 5)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_SPI7_A_CLK (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S2_MCK (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_DMIC1_CLK (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_PCM1_DI (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 5)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_SPI7_A_CSB (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S2_BCK (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_DMIC1_DAT (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_PCM1_CLK (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 5)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_SPI7_A_MI (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S2_LRCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_DMIC_CLK (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_PCM1_SYNC (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(6) | 6)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_SPI7_A_MO (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S2_DI (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_DMIC_DAT (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_PCM1_DO0 (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_WIFI_TXD (MTK_PIN_NO(7) | 6)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_SRCLKENAI1 (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_I2S2_DI2 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_KPCOL2 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_PCM1_DO1 (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_CLKM1 (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_CONN_BT_TXD (MTK_PIN_NO(8) | 6)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_SRCLKENAI0 (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_KPROW2 (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_PCM1_DO2 (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_CLKM3 (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_CMMCLK4 (MTK_PIN_NO(9) | 6)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_MSDC1_CLK_A (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_SPI4_B_CLK (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_I2S8_MCK (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_DSI1_TE (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_MD_INT0 (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_TP_GPIO0_AO (MTK_PIN_NO(10) | 6)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_MSDC1_CMD_A (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_SPI4_B_CSB (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_I2S8_BCK (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_LCM1_RST (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_TP_GPIO1_AO (MTK_PIN_NO(11) | 6)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_MSDC1_DAT3_A (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_SPI4_B_MI (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_I2S8_LRCK (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_DMIC1_CLK (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_TP_GPIO2_AO (MTK_PIN_NO(12) | 6)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_MSDC1_DAT0_A (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_SPI4_B_MO (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I2S8_DI (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_DMIC1_DAT (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_ANT_SEL10 (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_TP_GPIO3_AO (MTK_PIN_NO(13) | 6)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_MSDC1_DAT2_A (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_SPI5_C_CLK (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_I2S9_MCK (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_IDDIG (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_ANT_SEL11 (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_TP_GPIO4_AO (MTK_PIN_NO(14) | 6)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_MSDC1_DAT1_A (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_SPI5_C_CSB (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_I2S9_BCK (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_USB_DRVVBUS (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_ANT_SEL12 (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_TP_GPIO5_AO (MTK_PIN_NO(15) | 6)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_SRCLKENAI1 (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_SPI5_C_MI (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_I2S9_LRCK (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_KPCOL2 (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_TP_GPIO6_AO (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_DBG_MON_A30 (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_SRCLKENAI0 (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_SPI5_C_MO (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_I2S9_DO (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_KPROW2 (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_GPS_L5_ELNA_EN (MTK_PIN_NO(17) | 5)
+#define PINMUX_GPIO17__FUNC_TP_GPIO7_AO (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_DBG_MON_A31 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_DP_TX_HPD (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SPI4_C_MI (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_SPI1_B_MI (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_ANT_SEL10 (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_MD_INT0 (MTK_PIN_NO(18) | 6)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_SRCLKENAI1 (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SPI4_C_MO (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_SPI1_B_MO (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_GPS_L5_ELNA_EN (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_ANT_SEL11 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(19) | 6)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_SRCLKENAI0 (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SPI4_C_CLK (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_SPI1_B_CLK (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_PWM_3 (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_ANT_SEL12 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(20) | 6)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_DP_TX_HPD (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_SPI4_C_CSB (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_SPI1_B_CSB (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_I2S7_MCK (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_I2S9_MCK (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_IDDIG (MTK_PIN_NO(21) | 6)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_LCM1_RST (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_SPI0_C_CLK (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_SPI7_B_CLK (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_I2S7_BCK (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_I2S9_BCK (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_SCL13 (MTK_PIN_NO(22) | 6)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_DSI1_TE (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_SPI0_C_CSB (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_SPI7_B_CSB (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_I2S7_LRCK (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_I2S9_LRCK (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_SDA13 (MTK_PIN_NO(23) | 6)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_SRCLKENAI1 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_SPI0_C_MI (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_SPI7_B_MI (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_I2S6_DI (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_I2S8_DI (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_SCL_6306 (MTK_PIN_NO(24) | 6)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_SRCLKENAI0 (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_SPI0_C_MO (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SPI7_B_MO (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_I2S7_DO (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_I2S9_DO (MTK_PIN_NO(25) | 5)
+#define PINMUX_GPIO25__FUNC_SDA_6306 (MTK_PIN_NO(25) | 6)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_PWM_2 (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_CLKM0 (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_USB_DRVVBUS (MTK_PIN_NO(26) | 3)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_PWM_3 (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_CLKM1 (MTK_PIN_NO(27) | 2)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_PWM_0 (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_CLKM2 (MTK_PIN_NO(28) | 2)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_PWM_1 (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_CLKM3 (MTK_PIN_NO(29) | 2)
+#define PINMUX_GPIO29__FUNC_DSI1_TE (MTK_PIN_NO(29) | 3)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_PWM_2 (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_CLKM0 (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_LCM1_RST (MTK_PIN_NO(30) | 3)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_I2S3_MCK (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_I2S1_MCK (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_I2S5_MCK (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_SRCLKENAI0 (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_I2S0_MCK (MTK_PIN_NO(31) | 5)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_I2S3_BCK (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_I2S1_BCK (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_I2S5_BCK (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_PCM0_CLK (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_I2S0_BCK (MTK_PIN_NO(32) | 5)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_I2S3_LRCK (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_I2S1_LRCK (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_I2S5_LRCK (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_PCM0_SYNC (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_I2S0_LRCK (MTK_PIN_NO(33) | 5)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_I2S0_DI (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_I2S2_DI (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_I2S2_DI2 (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_PCM0_DI (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_I2S0_DI_A (MTK_PIN_NO(34) | 5)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_I2S3_DO (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_I2S1_DO (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_I2S5_DO (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_PCM0_DO (MTK_PIN_NO(35) | 4)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_SPI5_A_CLK (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_DMIC1_CLK (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_IDDIG (MTK_PIN_NO(36) | 3)
+#define PINMUX_GPIO36__FUNC_MD_URXD0 (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_UCTS0 (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_URXD1 (MTK_PIN_NO(36) | 6)
+#define PINMUX_GPIO36__FUNC_DBG_MON_A0 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_SPI5_A_CSB (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_DMIC1_DAT (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_USB_DRVVBUS (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_MD_UTXD0 (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_URTS0 (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_UTXD1 (MTK_PIN_NO(37) | 6)
+#define PINMUX_GPIO37__FUNC_DBG_MON_A1 (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_SPI5_A_MI (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_DMIC_CLK (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_DSI1_TE (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_MD_URXD1 (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_URXD0 (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_UCTS1 (MTK_PIN_NO(38) | 6)
+#define PINMUX_GPIO38__FUNC_DBG_MON_A2 (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_SPI5_A_MO (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_DMIC_DAT (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_LCM1_RST (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_MD_UTXD1 (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_UTXD0 (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_URTS1 (MTK_PIN_NO(39) | 6)
+#define PINMUX_GPIO39__FUNC_DBG_MON_A3 (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_DISP_PWM (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_DBG_MON_A6 (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_DSI_TE (MTK_PIN_NO(41) | 1)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_LCM_RST (MTK_PIN_NO(42) | 1)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(43) | 1)
+#define PINMUX_GPIO43__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(43) | 2)
+#define PINMUX_GPIO43__FUNC_SCL_6306 (MTK_PIN_NO(43) | 3)
+#define PINMUX_GPIO43__FUNC_ADSP_URXD0 (MTK_PIN_NO(43) | 4)
+#define PINMUX_GPIO43__FUNC_PTA_RXD (MTK_PIN_NO(43) | 5)
+#define PINMUX_GPIO43__FUNC_SSPM_URXD_AO (MTK_PIN_NO(43) | 6)
+#define PINMUX_GPIO43__FUNC_DBG_MON_A4 (MTK_PIN_NO(43) | 7)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(44) | 1)
+#define PINMUX_GPIO44__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(44) | 2)
+#define PINMUX_GPIO44__FUNC_SDA_6306 (MTK_PIN_NO(44) | 3)
+#define PINMUX_GPIO44__FUNC_ADSP_UTXD0 (MTK_PIN_NO(44) | 4)
+#define PINMUX_GPIO44__FUNC_PTA_TXD (MTK_PIN_NO(44) | 5)
+#define PINMUX_GPIO44__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(44) | 6)
+#define PINMUX_GPIO44__FUNC_DBG_MON_A5 (MTK_PIN_NO(44) | 7)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_MCUPM_JTAG_TDI (MTK_PIN_NO(45) | 3)
+#define PINMUX_GPIO45__FUNC_APU_JTAG_TDI (MTK_PIN_NO(45) | 4)
+#define PINMUX_GPIO45__FUNC_CCU_JTAG_TDI (MTK_PIN_NO(45) | 5)
+#define PINMUX_GPIO45__FUNC_LVTS_SCK (MTK_PIN_NO(45) | 6)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_MCUPM_JTAG_TMS (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_APU_JTAG_TMS (MTK_PIN_NO(46) | 4)
+#define PINMUX_GPIO46__FUNC_CCU_JTAG_TMS (MTK_PIN_NO(46) | 5)
+#define PINMUX_GPIO46__FUNC_LVTS_SDI (MTK_PIN_NO(46) | 6)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_MCUPM_JTAG_TDO (MTK_PIN_NO(47) | 3)
+#define PINMUX_GPIO47__FUNC_APU_JTAG_TDO (MTK_PIN_NO(47) | 4)
+#define PINMUX_GPIO47__FUNC_CCU_JTAG_TDO (MTK_PIN_NO(47) | 5)
+#define PINMUX_GPIO47__FUNC_LVTS_SCF (MTK_PIN_NO(47) | 6)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(48) | 2)
+#define PINMUX_GPIO48__FUNC_MCUPM_JTAG_TRSTN (MTK_PIN_NO(48) | 3)
+#define PINMUX_GPIO48__FUNC_APU_JTAG_TRST (MTK_PIN_NO(48) | 4)
+#define PINMUX_GPIO48__FUNC_CCU_JTAG_TRST (MTK_PIN_NO(48) | 5)
+#define PINMUX_GPIO48__FUNC_LVTS_FOUT (MTK_PIN_NO(48) | 6)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(49) | 2)
+#define PINMUX_GPIO49__FUNC_MCUPM_JTAG_TCK (MTK_PIN_NO(49) | 3)
+#define PINMUX_GPIO49__FUNC_APU_JTAG_TCK (MTK_PIN_NO(49) | 4)
+#define PINMUX_GPIO49__FUNC_CCU_JTAG_TCK (MTK_PIN_NO(49) | 5)
+#define PINMUX_GPIO49__FUNC_LVTS_SDO (MTK_PIN_NO(49) | 6)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(50) | 2)
+#define PINMUX_GPIO50__FUNC_LVTS_26M (MTK_PIN_NO(50) | 6)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_MSDC1_CLK (MTK_PIN_NO(51) | 1)
+#define PINMUX_GPIO51__FUNC_PCM1_CLK (MTK_PIN_NO(51) | 2)
+#define PINMUX_GPIO51__FUNC_VPU_UDI_TCK (MTK_PIN_NO(51) | 3)
+#define PINMUX_GPIO51__FUNC_UDI_TCK (MTK_PIN_NO(51) | 4)
+#define PINMUX_GPIO51__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(51) | 5)
+#define PINMUX_GPIO51__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(51) | 6)
+#define PINMUX_GPIO51__FUNC_JTCK_SEL3 (MTK_PIN_NO(51) | 7)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_MSDC1_CMD (MTK_PIN_NO(52) | 1)
+#define PINMUX_GPIO52__FUNC_PCM1_SYNC (MTK_PIN_NO(52) | 2)
+#define PINMUX_GPIO52__FUNC_VPU_UDI_TMS (MTK_PIN_NO(52) | 3)
+#define PINMUX_GPIO52__FUNC_UDI_TMS (MTK_PIN_NO(52) | 4)
+#define PINMUX_GPIO52__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(52) | 5)
+#define PINMUX_GPIO52__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(52) | 6)
+#define PINMUX_GPIO52__FUNC_JTMS_SEL3 (MTK_PIN_NO(52) | 7)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_MSDC1_DAT3 (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_PCM1_DI (MTK_PIN_NO(53) | 2)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_MSDC1_DAT0 (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_PCM1_DO0 (MTK_PIN_NO(54) | 2)
+#define PINMUX_GPIO54__FUNC_VPU_UDI_TDI (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_UDI_TDI (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(54) | 5)
+#define PINMUX_GPIO54__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(54) | 6)
+#define PINMUX_GPIO54__FUNC_JTDI_SEL3 (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_MSDC1_DAT2 (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_PCM1_DO2 (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_VPU_UDI_NTRST (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_UDI_NTRST (MTK_PIN_NO(55) | 4)
+#define PINMUX_GPIO55__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(55) | 5)
+#define PINMUX_GPIO55__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(55) | 6)
+#define PINMUX_GPIO55__FUNC_JTRSTN_SEL3 (MTK_PIN_NO(55) | 7)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_MSDC1_DAT1 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_PCM1_DO1 (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_VPU_UDI_TDO (MTK_PIN_NO(56) | 3)
+#define PINMUX_GPIO56__FUNC_UDI_TDO (MTK_PIN_NO(56) | 4)
+#define PINMUX_GPIO56__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(56) | 5)
+#define PINMUX_GPIO56__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(56) | 6)
+#define PINMUX_GPIO56__FUNC_JTDO_SEL3 (MTK_PIN_NO(56) | 7)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_MIPI2_D_SCLK (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_DBG_MON_A14 (MTK_PIN_NO(57) | 7)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_MIPI2_D_SDATA (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_DBG_MON_A15 (MTK_PIN_NO(58) | 7)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_MIPI_M_SCLK (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_DBG_MON_A17 (MTK_PIN_NO(59) | 7)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_MIPI_M_SDATA (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_DBG_MON_A18 (MTK_PIN_NO(60) | 7)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_MD_UCNT_A_TGL (MTK_PIN_NO(61) | 1)
+#define PINMUX_GPIO61__FUNC_DBG_MON_A16 (MTK_PIN_NO(61) | 7)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_DIGRF_IRQ (MTK_PIN_NO(62) | 1)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_BPI_BUS0 (MTK_PIN_NO(63) | 1)
+#define PINMUX_GPIO63__FUNC_DBG_MON_A19 (MTK_PIN_NO(63) | 7)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_BPI_BUS1 (MTK_PIN_NO(64) | 1)
+#define PINMUX_GPIO64__FUNC_DBG_MON_A20 (MTK_PIN_NO(64) | 7)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_BPI_BUS2 (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_DBG_MON_A21 (MTK_PIN_NO(65) | 7)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_BPI_BUS3 (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_DBG_MON_A22 (MTK_PIN_NO(66) | 7)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_BPI_BUS4 (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_BPI_BUS5 (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_BPI_BUS6 (MTK_PIN_NO(69) | 1)
+#define PINMUX_GPIO69__FUNC_CONN_BPI_BUS6 (MTK_PIN_NO(69) | 2)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_BPI_BUS7 (MTK_PIN_NO(70) | 1)
+#define PINMUX_GPIO70__FUNC_CONN_BPI_BUS7 (MTK_PIN_NO(70) | 2)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_BPI_BUS8 (MTK_PIN_NO(71) | 1)
+#define PINMUX_GPIO71__FUNC_CONN_BPI_BUS8 (MTK_PIN_NO(71) | 2)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_BPI_BUS9 (MTK_PIN_NO(72) | 1)
+#define PINMUX_GPIO72__FUNC_CONN_BPI_BUS9 (MTK_PIN_NO(72) | 2)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_BPI_BUS10 (MTK_PIN_NO(73) | 1)
+#define PINMUX_GPIO73__FUNC_CONN_BPI_BUS10 (MTK_PIN_NO(73) | 2)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_BPI_BUS11_OLAT0 (MTK_PIN_NO(74) | 1)
+#define PINMUX_GPIO74__FUNC_CONN_BPI_BUS11_OLAT0 (MTK_PIN_NO(74) | 2)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_BPI_BUS12_OLAT1 (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_CONN_BPI_BUS12_OLAT1 (MTK_PIN_NO(75) | 2)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_BPI_BUS13_OLAT2 (MTK_PIN_NO(76) | 1)
+#define PINMUX_GPIO76__FUNC_CONN_BPI_BUS13_OLAT2 (MTK_PIN_NO(76) | 2)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_BPI_BUS14_OLAT3 (MTK_PIN_NO(77) | 1)
+#define PINMUX_GPIO77__FUNC_CONN_BPI_BUS14_OLAT3 (MTK_PIN_NO(77) | 2)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_BPI_BUS15_OLAT4 (MTK_PIN_NO(78) | 1)
+#define PINMUX_GPIO78__FUNC_CONN_BPI_BUS15_OLAT4 (MTK_PIN_NO(78) | 2)
+#define PINMUX_GPIO78__FUNC_DBG_MON_A7 (MTK_PIN_NO(78) | 7)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_BUS16_OLAT5 (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_CONN_BPI_BUS16_OLAT5 (MTK_PIN_NO(79) | 2)
+#define PINMUX_GPIO79__FUNC_DBG_MON_A8 (MTK_PIN_NO(79) | 7)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_BUS17_ANT0 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_CONN_BPI_BUS17_ANT0 (MTK_PIN_NO(80) | 2)
+#define PINMUX_GPIO80__FUNC_DBG_MON_A9 (MTK_PIN_NO(80) | 7)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_BPI_BUS18_ANT1 (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_CONN_BPI_BUS18_ANT1 (MTK_PIN_NO(81) | 2)
+#define PINMUX_GPIO81__FUNC_DBG_MON_A10 (MTK_PIN_NO(81) | 7)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_BPI_BUS19_ANT2 (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_CONN_BPI_BUS19_ANT2 (MTK_PIN_NO(82) | 2)
+#define PINMUX_GPIO82__FUNC_DBG_MON_A11 (MTK_PIN_NO(82) | 7)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_BPI_BUS20_ANT3 (MTK_PIN_NO(83) | 1)
+#define PINMUX_GPIO83__FUNC_CONN_BPI_BUS20_ANT3 (MTK_PIN_NO(83) | 2)
+#define PINMUX_GPIO83__FUNC_DBG_MON_A12 (MTK_PIN_NO(83) | 7)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_BPI_BUS21_ANT4 (MTK_PIN_NO(84) | 1)
+#define PINMUX_GPIO84__FUNC_CONN_BPI_BUS21_ANT4 (MTK_PIN_NO(84) | 2)
+#define PINMUX_GPIO84__FUNC_DBG_MON_A13 (MTK_PIN_NO(84) | 7)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_MIPI1_D_SCLK (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_CONN_MIPI1_SCLK (MTK_PIN_NO(85) | 2)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_MIPI1_D_SDATA (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_CONN_MIPI1_SDATA (MTK_PIN_NO(86) | 2)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_MIPI0_D_SCLK (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_CONN_MIPI0_SCLK (MTK_PIN_NO(87) | 2)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_MIPI0_D_SDATA (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_CONN_MIPI0_SDATA (MTK_PIN_NO(88) | 2)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_SPMI_SCL (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_SCL10 (MTK_PIN_NO(89) | 2)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_SPMI_SDA (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_SDA10 (MTK_PIN_NO(90) | 2)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_AP_GOOD (MTK_PIN_NO(91) | 1)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_URXD0 (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_MD_URXD0 (MTK_PIN_NO(92) | 2)
+#define PINMUX_GPIO92__FUNC_MD_URXD1 (MTK_PIN_NO(92) | 3)
+#define PINMUX_GPIO92__FUNC_SSPM_URXD_AO (MTK_PIN_NO(92) | 4)
+#define PINMUX_GPIO92__FUNC_CONN_BGF_UART0_RXD (MTK_PIN_NO(92) | 5)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_UTXD0 (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_MD_UTXD0 (MTK_PIN_NO(93) | 2)
+#define PINMUX_GPIO93__FUNC_MD_UTXD1 (MTK_PIN_NO(93) | 3)
+#define PINMUX_GPIO93__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(93) | 4)
+#define PINMUX_GPIO93__FUNC_CONN_BGF_UART0_TXD (MTK_PIN_NO(93) | 5)
+#define PINMUX_GPIO93__FUNC_WIFI_TXD (MTK_PIN_NO(93) | 6)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_URXD1 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_ADSP_URXD0 (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_MD32_0_RXD (MTK_PIN_NO(94) | 3)
+#define PINMUX_GPIO94__FUNC_SSPM_URXD_AO (MTK_PIN_NO(94) | 4)
+#define PINMUX_GPIO94__FUNC_TP_URXD1_AO (MTK_PIN_NO(94) | 5)
+#define PINMUX_GPIO94__FUNC_TP_URXD2_AO (MTK_PIN_NO(94) | 6)
+#define PINMUX_GPIO94__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(94) | 7)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_UTXD1 (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_ADSP_UTXD0 (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_MD32_0_TXD (MTK_PIN_NO(95) | 3)
+#define PINMUX_GPIO95__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(95) | 4)
+#define PINMUX_GPIO95__FUNC_TP_UTXD1_AO (MTK_PIN_NO(95) | 5)
+#define PINMUX_GPIO95__FUNC_TP_UTXD2_AO (MTK_PIN_NO(95) | 6)
+#define PINMUX_GPIO95__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(95) | 7)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_TDM_LRCK (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_I2S7_LRCK (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_I2S9_LRCK (MTK_PIN_NO(96) | 3)
+#define PINMUX_GPIO96__FUNC_SPI4_A_CLK (MTK_PIN_NO(96) | 4)
+#define PINMUX_GPIO96__FUNC_ADSP_JTAG0_TDI (MTK_PIN_NO(96) | 5)
+#define PINMUX_GPIO96__FUNC_CONN_BGF_DSP_L1_JDI (MTK_PIN_NO(96) | 6)
+#define PINMUX_GPIO96__FUNC_IO_JTAG_TDI (MTK_PIN_NO(96) | 7)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_TDM_BCK (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_I2S7_BCK (MTK_PIN_NO(97) | 2)
+#define PINMUX_GPIO97__FUNC_I2S9_BCK (MTK_PIN_NO(97) | 3)
+#define PINMUX_GPIO97__FUNC_SPI4_A_CSB (MTK_PIN_NO(97) | 4)
+#define PINMUX_GPIO97__FUNC_ADSP_JTAG0_TRSTN (MTK_PIN_NO(97) | 5)
+#define PINMUX_GPIO97__FUNC_CONN_BGF_DSP_L1_JINTP (MTK_PIN_NO(97) | 6)
+#define PINMUX_GPIO97__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(97) | 7)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_TDM_MCK (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_I2S7_MCK (MTK_PIN_NO(98) | 2)
+#define PINMUX_GPIO98__FUNC_I2S9_MCK (MTK_PIN_NO(98) | 3)
+#define PINMUX_GPIO98__FUNC_SPI4_A_MI (MTK_PIN_NO(98) | 4)
+#define PINMUX_GPIO98__FUNC_ADSP_JTAG0_TCK (MTK_PIN_NO(98) | 5)
+#define PINMUX_GPIO98__FUNC_CONN_BGF_DSP_L1_JCK (MTK_PIN_NO(98) | 6)
+#define PINMUX_GPIO98__FUNC_IO_JTAG_TCK (MTK_PIN_NO(98) | 7)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_TDM_DATA0 (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_I2S6_DI (MTK_PIN_NO(99) | 2)
+#define PINMUX_GPIO99__FUNC_I2S8_DI (MTK_PIN_NO(99) | 3)
+#define PINMUX_GPIO99__FUNC_SPI4_A_MO (MTK_PIN_NO(99) | 4)
+#define PINMUX_GPIO99__FUNC_ADSP_JTAG0_TDO (MTK_PIN_NO(99) | 5)
+#define PINMUX_GPIO99__FUNC_CONN_BGF_DSP_L1_JDO (MTK_PIN_NO(99) | 6)
+#define PINMUX_GPIO99__FUNC_IO_JTAG_TDO (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_TDM_DATA1 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_I2S7_DO (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_I2S9_DO (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_DP_TX_HPD (MTK_PIN_NO(100) | 4)
+#define PINMUX_GPIO100__FUNC_ADSP_JTAG0_TMS (MTK_PIN_NO(100) | 5)
+#define PINMUX_GPIO100__FUNC_CONN_BGF_DSP_L1_JMS (MTK_PIN_NO(100) | 6)
+#define PINMUX_GPIO100__FUNC_IO_JTAG_TMS (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_TDM_DATA2 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_DMIC1_CLK (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_SRCLKENAI0 (MTK_PIN_NO(101) | 3)
+#define PINMUX_GPIO101__FUNC_SPI5_B_CLK (MTK_PIN_NO(101) | 4)
+#define PINMUX_GPIO101__FUNC_CLKM0 (MTK_PIN_NO(101) | 5)
+#define PINMUX_GPIO101__FUNC_DAP_MD32_SWD (MTK_PIN_NO(101) | 7)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_TDM_DATA3 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_DMIC1_DAT (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_SRCLKENAI1 (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_SPI5_B_CSB (MTK_PIN_NO(102) | 4)
+#define PINMUX_GPIO102__FUNC_DP_TX_HPD (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(102) | 6)
+#define PINMUX_GPIO102__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_SPI0_A_MI (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_SCP_SPI0_MI (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_DFD_TDO (MTK_PIN_NO(103) | 5)
+#define PINMUX_GPIO103__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(103) | 6)
+#define PINMUX_GPIO103__FUNC_JTDO_SEL1 (MTK_PIN_NO(103) | 7)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_SPI0_A_CSB (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_SCP_SPI0_CS (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_DFD_TMS (MTK_PIN_NO(104) | 5)
+#define PINMUX_GPIO104__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(104) | 6)
+#define PINMUX_GPIO104__FUNC_JTMS_SEL1 (MTK_PIN_NO(104) | 7)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_SPI0_A_MO (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_SCP_SPI0_MO (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_SCP_SDA0 (MTK_PIN_NO(105) | 3)
+#define PINMUX_GPIO105__FUNC_DFD_TDI (MTK_PIN_NO(105) | 5)
+#define PINMUX_GPIO105__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(105) | 6)
+#define PINMUX_GPIO105__FUNC_JTDI_SEL1 (MTK_PIN_NO(105) | 7)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_SPI0_A_CLK (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_SCP_SPI0_CK (MTK_PIN_NO(106) | 2)
+#define PINMUX_GPIO106__FUNC_SCP_SCL0 (MTK_PIN_NO(106) | 3)
+#define PINMUX_GPIO106__FUNC_DFD_TCK_XI (MTK_PIN_NO(106) | 5)
+#define PINMUX_GPIO106__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(106) | 6)
+#define PINMUX_GPIO106__FUNC_JTCK_SEL1 (MTK_PIN_NO(106) | 7)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DMIC_CLK (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_PWM_0 (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_CLKM2 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_SPI5_B_MI (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(107) | 6)
+#define PINMUX_GPIO107__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(107) | 7)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_DMIC_DAT (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_PWM_1 (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_CLKM3 (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_SPI5_B_MO (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_DAP_SONIC_SWD (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_I2S1_MCK (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_I2S3_MCK (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_I2S2_MCK (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_DP_TX_HPD (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_I2S2_MCK_A (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_SRCLKENAI0 (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_DAP_SONIC_SWCK (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_I2S1_BCK (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_I2S3_BCK (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_I2S2_BCK (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_PCM0_CLK (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_I2S2_BCK_A (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_CONN_BGF_MCU_TDO (MTK_PIN_NO(110) | 6)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_I2S1_LRCK (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_I2S3_LRCK (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_I2S2_LRCK (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_PCM0_SYNC (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_I2S2_LRCK_A (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_CONN_BGF_MCU_TDI (MTK_PIN_NO(111) | 6)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_I2S2_DI (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_I2S0_DI (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_I2S2_DI2 (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_PCM0_DI (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_I2S2_DI_A (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_CONN_BGF_MCU_TMS (MTK_PIN_NO(112) | 6)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_I2S1_DO (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_I2S3_DO (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_I2S5_DO (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_PCM0_DO (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_I2S2_DI2 (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_CONN_BGF_MCU_TCK (MTK_PIN_NO(113) | 6)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_SPI2_MI (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_SCP_SPI2_MI (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_CONN_BGF_MCU_TRST_B (MTK_PIN_NO(114) | 6)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_SPI2_CSB (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_SCP_SPI2_CS (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_CONN_BGF_MCU_DBGI_N (MTK_PIN_NO(115) | 6)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_SPI2_MO (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_SCP_SPI2_MO (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_SCP_SDA1 (MTK_PIN_NO(116) | 3)
+#define PINMUX_GPIO116__FUNC_CONN_BGF_MCU_DBGACK_N (MTK_PIN_NO(116) | 6)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_SPI2_CLK (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_SCP_SPI2_CK (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_SCP_SCL1 (MTK_PIN_NO(117) | 3)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_SCL1 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_SCP_SCL0 (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_SCP_SCL1 (MTK_PIN_NO(118) | 3)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_SDA1 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_SCP_SDA0 (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_SCP_SDA1 (MTK_PIN_NO(119) | 3)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_SCL9 (MTK_PIN_NO(120) | 1)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_SDA9 (MTK_PIN_NO(121) | 1)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_SCL8 (MTK_PIN_NO(122) | 1)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_SDA8 (MTK_PIN_NO(123) | 1)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_SCL7 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_DMIC1_CLK (MTK_PIN_NO(124) | 2)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_SDA7 (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_DMIC1_DAT (MTK_PIN_NO(125) | 2)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_CMFLASH0 (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_PWM_2 (MTK_PIN_NO(126) | 2)
+#define PINMUX_GPIO126__FUNC_TP_UCTS1_AO (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_UCTS0 (MTK_PIN_NO(126) | 4)
+#define PINMUX_GPIO126__FUNC_SCL11 (MTK_PIN_NO(126) | 5)
+#define PINMUX_GPIO126__FUNC_MD32_1_GPIO0 (MTK_PIN_NO(126) | 6)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_CMFLASH1 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_PWM_3 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_TP_URTS1_AO (MTK_PIN_NO(127) | 3)
+#define PINMUX_GPIO127__FUNC_URTS0 (MTK_PIN_NO(127) | 4)
+#define PINMUX_GPIO127__FUNC_SDA11 (MTK_PIN_NO(127) | 5)
+#define PINMUX_GPIO127__FUNC_MD32_1_GPIO1 (MTK_PIN_NO(127) | 6)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_CMFLASH2 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_PWM_0 (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_TP_UCTS2_AO (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_UCTS1 (MTK_PIN_NO(128) | 4)
+#define PINMUX_GPIO128__FUNC_SCL12 (MTK_PIN_NO(128) | 5)
+#define PINMUX_GPIO128__FUNC_MD32_1_GPIO2 (MTK_PIN_NO(128) | 6)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_CMFLASH3 (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_PWM_1 (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_TP_URTS2_AO (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_URTS1 (MTK_PIN_NO(129) | 4)
+#define PINMUX_GPIO129__FUNC_SDA12 (MTK_PIN_NO(129) | 5)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_CMVREF0 (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_ANT_SEL10 (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_SCP_JTAG0_TDO (MTK_PIN_NO(130) | 3)
+#define PINMUX_GPIO130__FUNC_MD32_0_JTAG_TDO (MTK_PIN_NO(130) | 4)
+#define PINMUX_GPIO130__FUNC_SCL11 (MTK_PIN_NO(130) | 5)
+#define PINMUX_GPIO130__FUNC_CONN_WF_MCU_TDO (MTK_PIN_NO(130) | 6)
+#define PINMUX_GPIO130__FUNC_DBG_MON_A23 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_CMVREF1 (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_ANT_SEL11 (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_SCP_JTAG0_TDI (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_MD32_0_JTAG_TDI (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_SDA11 (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_CONN_WF_MCU_TDI (MTK_PIN_NO(131) | 6)
+#define PINMUX_GPIO131__FUNC_DBG_MON_A26 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_CMVREF2 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_ANT_SEL12 (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_SCP_JTAG0_TMS (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_MD32_0_JTAG_TMS (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_CONN_WF_MCU_TMS (MTK_PIN_NO(132) | 6)
+#define PINMUX_GPIO132__FUNC_DBG_MON_A28 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_CMVREF3 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_SCP_JTAG0_TCK (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_MD32_0_JTAG_TCK (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_CONN_WF_MCU_TCK (MTK_PIN_NO(133) | 6)
+#define PINMUX_GPIO133__FUNC_DBG_MON_A24 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_CMVREF4 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_GPS_L5_ELNA_EN (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_SCP_JTAG0_TRSTN (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_MD32_0_JTAG_TRST (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_CONN_WF_MCU_TRST_B (MTK_PIN_NO(134) | 6)
+#define PINMUX_GPIO134__FUNC_DBG_MON_A27 (MTK_PIN_NO(134) | 7)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_PWM_0 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_SRCLKENAI1 (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_MD_URXD0 (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_MD32_0_RXD (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_CONN_WF_MCU_DBGI_N (MTK_PIN_NO(135) | 6)
+#define PINMUX_GPIO135__FUNC_DBG_MON_A29 (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_CMMCLK3 (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_CLKM1 (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_MD_UTXD0 (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_MD32_0_TXD (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_CONN_BT_TXD (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_CONN_WF_MCU_DBGACK_N (MTK_PIN_NO(136) | 6)
+#define PINMUX_GPIO136__FUNC_DBG_MON_A25 (MTK_PIN_NO(136) | 7)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_CMMCLK4 (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_CLKM2 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_MD_URXD1 (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_MD32_1_RXD (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_ILDO_DOUT0 (MTK_PIN_NO(137) | 5)
+#define PINMUX_GPIO137__FUNC_CONN_BGF_UART0_RXD (MTK_PIN_NO(137) | 6)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_CMMCLK5 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_CLKM3 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_MD_UTXD1 (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_MD32_1_TXD (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_ILDO_DOUT1 (MTK_PIN_NO(138) | 5)
+#define PINMUX_GPIO138__FUNC_CONN_BGF_UART0_TXD (MTK_PIN_NO(138) | 6)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_SCL4 (MTK_PIN_NO(139) | 1)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_SDA4 (MTK_PIN_NO(140) | 1)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_SCL2 (MTK_PIN_NO(141) | 1)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_SDA2 (MTK_PIN_NO(142) | 1)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_CMVREF0 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_SPI3_CLK (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_ADSP_JTAG1_TDO (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_SCP_JTAG1_TDO (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_MD32_1_JTAG_TDO (MTK_PIN_NO(143) | 5)
+#define PINMUX_GPIO143__FUNC_CONN_BGF_DSP_L5_JDO (MTK_PIN_NO(143) | 6)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_CMVREF1 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_SPI3_CSB (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_ADSP_JTAG1_TDI (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_SCP_JTAG1_TDI (MTK_PIN_NO(144) | 4)
+#define PINMUX_GPIO144__FUNC_MD32_1_JTAG_TDI (MTK_PIN_NO(144) | 5)
+#define PINMUX_GPIO144__FUNC_CONN_BGF_DSP_L5_JDI (MTK_PIN_NO(144) | 6)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_CMVREF2 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_SPI3_MI (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_ADSP_JTAG1_TMS (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_SCP_JTAG1_TMS (MTK_PIN_NO(145) | 4)
+#define PINMUX_GPIO145__FUNC_MD32_1_JTAG_TMS (MTK_PIN_NO(145) | 5)
+#define PINMUX_GPIO145__FUNC_CONN_BGF_DSP_L5_JMS (MTK_PIN_NO(145) | 6)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_CMVREF3 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_SPI3_MO (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_ADSP_JTAG1_TCK (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_SCP_JTAG1_TCK (MTK_PIN_NO(146) | 4)
+#define PINMUX_GPIO146__FUNC_MD32_1_JTAG_TCK (MTK_PIN_NO(146) | 5)
+#define PINMUX_GPIO146__FUNC_CONN_BGF_DSP_L5_JCK (MTK_PIN_NO(146) | 6)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_CMVREF4 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_ADSP_JTAG1_TRSTN (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_SCP_JTAG1_TRSTN (MTK_PIN_NO(147) | 4)
+#define PINMUX_GPIO147__FUNC_MD32_1_JTAG_TRST (MTK_PIN_NO(147) | 5)
+#define PINMUX_GPIO147__FUNC_CONN_BGF_DSP_L5_JINTP (MTK_PIN_NO(147) | 6)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_PWM_1 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_AGPS_SYNC (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_CMMCLK5 (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_CONN_WF_MCU_AICE_TMSC (MTK_PIN_NO(148) | 6)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_CMMCLK0 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_CLKM0 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_MD32_0_GPIO0 (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_CONN_WF_MCU_AICE_TCKC (MTK_PIN_NO(149) | 6)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_CMMCLK1 (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_CLKM1 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_MD32_0_GPIO1 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_CONN_BGF_MCU_AICE_TMSC (MTK_PIN_NO(150) | 6)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_CMMCLK2 (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_CLKM2 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_MD32_0_GPIO2 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_CONN_BGF_MCU_AICE_TCKC (MTK_PIN_NO(151) | 6)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_KPROW1 (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_PWM_2 (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_IDDIG (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_DP_TX_HPD (MTK_PIN_NO(152) | 4)
+#define PINMUX_GPIO152__FUNC_DSI1_TE (MTK_PIN_NO(152) | 5)
+#define PINMUX_GPIO152__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(152) | 6)
+#define PINMUX_GPIO152__FUNC_DBG_MON_B2 (MTK_PIN_NO(152) | 7)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_KPROW0 (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_DBG_MON_B1 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_KPCOL0 (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_DBG_MON_A32 (MTK_PIN_NO(154) | 7)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_KPCOL1 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_PWM_3 (MTK_PIN_NO(155) | 2)
+#define PINMUX_GPIO155__FUNC_USB_DRVVBUS (MTK_PIN_NO(155) | 3)
+#define PINMUX_GPIO155__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(155) | 4)
+#define PINMUX_GPIO155__FUNC_LCM1_RST (MTK_PIN_NO(155) | 5)
+#define PINMUX_GPIO155__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(155) | 6)
+#define PINMUX_GPIO155__FUNC_DBG_MON_B0 (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_SPI1_A_CLK (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_SCP_SPI1_A_CK (MTK_PIN_NO(156) | 2)
+#define PINMUX_GPIO156__FUNC_MRG_CLK (MTK_PIN_NO(156) | 3)
+#define PINMUX_GPIO156__FUNC_AGPS_SYNC (MTK_PIN_NO(156) | 4)
+#define PINMUX_GPIO156__FUNC_SCL12 (MTK_PIN_NO(156) | 5)
+#define PINMUX_GPIO156__FUNC_DBG_MON_B3 (MTK_PIN_NO(156) | 7)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_SPI1_A_CSB (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_SCP_SPI1_A_CS (MTK_PIN_NO(157) | 2)
+#define PINMUX_GPIO157__FUNC_MRG_SYNC (MTK_PIN_NO(157) | 3)
+#define PINMUX_GPIO157__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(157) | 4)
+#define PINMUX_GPIO157__FUNC_SDA12 (MTK_PIN_NO(157) | 5)
+#define PINMUX_GPIO157__FUNC_DBG_MON_B4 (MTK_PIN_NO(157) | 7)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_SPI1_A_MI (MTK_PIN_NO(158) | 1)
+#define PINMUX_GPIO158__FUNC_SCP_SPI1_A_MI (MTK_PIN_NO(158) | 2)
+#define PINMUX_GPIO158__FUNC_MRG_DI (MTK_PIN_NO(158) | 3)
+#define PINMUX_GPIO158__FUNC_PTA_RXD (MTK_PIN_NO(158) | 4)
+#define PINMUX_GPIO158__FUNC_SCL13 (MTK_PIN_NO(158) | 5)
+#define PINMUX_GPIO158__FUNC_DBG_MON_B5 (MTK_PIN_NO(158) | 7)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_SPI1_A_MO (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_SCP_SPI1_A_MO (MTK_PIN_NO(159) | 2)
+#define PINMUX_GPIO159__FUNC_MRG_DO (MTK_PIN_NO(159) | 3)
+#define PINMUX_GPIO159__FUNC_PTA_TXD (MTK_PIN_NO(159) | 4)
+#define PINMUX_GPIO159__FUNC_SDA13 (MTK_PIN_NO(159) | 5)
+#define PINMUX_GPIO159__FUNC_DBG_MON_B6 (MTK_PIN_NO(159) | 7)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_SCL3 (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_SCP_SCL0 (MTK_PIN_NO(160) | 2)
+#define PINMUX_GPIO160__FUNC_SCP_SCL1 (MTK_PIN_NO(160) | 3)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_SDA3 (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_SCP_SDA0 (MTK_PIN_NO(161) | 2)
+#define PINMUX_GPIO161__FUNC_SCP_SDA1 (MTK_PIN_NO(161) | 3)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_ANT_SEL0 (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(162) | 2)
+#define PINMUX_GPIO162__FUNC_DBG_MON_B7 (MTK_PIN_NO(162) | 7)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_ANT_SEL1 (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_GPS_L5_ELNA_EN (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_DBG_MON_B8 (MTK_PIN_NO(163) | 7)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_ANT_SEL2 (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_SCP_SPI1_B_CK (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_TP_URXD1_AO (MTK_PIN_NO(164) | 3)
+#define PINMUX_GPIO164__FUNC_UCTS0 (MTK_PIN_NO(164) | 5)
+#define PINMUX_GPIO164__FUNC_DBG_MON_B9 (MTK_PIN_NO(164) | 7)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_ANT_SEL3 (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_SCP_SPI1_B_CS (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_TP_UTXD1_AO (MTK_PIN_NO(165) | 3)
+#define PINMUX_GPIO165__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(165) | 4)
+#define PINMUX_GPIO165__FUNC_URTS0 (MTK_PIN_NO(165) | 5)
+#define PINMUX_GPIO165__FUNC_DBG_MON_B10 (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_ANT_SEL4 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_SCP_SPI1_B_MI (MTK_PIN_NO(166) | 2)
+#define PINMUX_GPIO166__FUNC_TP_URXD2_AO (MTK_PIN_NO(166) | 3)
+#define PINMUX_GPIO166__FUNC_SRCLKENAI1 (MTK_PIN_NO(166) | 4)
+#define PINMUX_GPIO166__FUNC_UCTS1 (MTK_PIN_NO(166) | 5)
+#define PINMUX_GPIO166__FUNC_DBG_MON_B11 (MTK_PIN_NO(166) | 7)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_ANT_SEL5 (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_SCP_SPI1_B_MO (MTK_PIN_NO(167) | 2)
+#define PINMUX_GPIO167__FUNC_TP_UTXD2_AO (MTK_PIN_NO(167) | 3)
+#define PINMUX_GPIO167__FUNC_SRCLKENAI0 (MTK_PIN_NO(167) | 4)
+#define PINMUX_GPIO167__FUNC_URTS1 (MTK_PIN_NO(167) | 5)
+#define PINMUX_GPIO167__FUNC_DBG_MON_B12 (MTK_PIN_NO(167) | 7)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_ANT_SEL6 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_SPI0_B_CLK (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_TP_UCTS1_AO (MTK_PIN_NO(168) | 3)
+#define PINMUX_GPIO168__FUNC_KPCOL2 (MTK_PIN_NO(168) | 4)
+#define PINMUX_GPIO168__FUNC_MD_UCTS0 (MTK_PIN_NO(168) | 5)
+#define PINMUX_GPIO168__FUNC_SCL12 (MTK_PIN_NO(168) | 6)
+#define PINMUX_GPIO168__FUNC_DBG_MON_B13 (MTK_PIN_NO(168) | 7)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_ANT_SEL7 (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_SPI0_B_CSB (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_TP_URTS1_AO (MTK_PIN_NO(169) | 3)
+#define PINMUX_GPIO169__FUNC_KPROW2 (MTK_PIN_NO(169) | 4)
+#define PINMUX_GPIO169__FUNC_MD_URTS0 (MTK_PIN_NO(169) | 5)
+#define PINMUX_GPIO169__FUNC_SDA12 (MTK_PIN_NO(169) | 6)
+#define PINMUX_GPIO169__FUNC_DBG_MON_B14 (MTK_PIN_NO(169) | 7)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_ANT_SEL8 (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_SPI0_B_MI (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_TP_UCTS2_AO (MTK_PIN_NO(170) | 3)
+#define PINMUX_GPIO170__FUNC_SRCLKENAI1 (MTK_PIN_NO(170) | 4)
+#define PINMUX_GPIO170__FUNC_MD_UCTS1 (MTK_PIN_NO(170) | 5)
+#define PINMUX_GPIO170__FUNC_SCL13 (MTK_PIN_NO(170) | 6)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_ANT_SEL9 (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_SPI0_B_MO (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_TP_URTS2_AO (MTK_PIN_NO(171) | 3)
+#define PINMUX_GPIO171__FUNC_SRCLKENAI0 (MTK_PIN_NO(171) | 4)
+#define PINMUX_GPIO171__FUNC_MD_URTS1 (MTK_PIN_NO(171) | 5)
+#define PINMUX_GPIO171__FUNC_SDA13 (MTK_PIN_NO(171) | 6)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_CONN_TOP_CLK (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_AUXIF_CLK0 (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_DBG_MON_B18 (MTK_PIN_NO(172) | 7)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_CONN_TOP_DATA (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_AUXIF_ST0 (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_DBG_MON_B19 (MTK_PIN_NO(173) | 7)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_CONN_HRST_B (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_DBG_MON_B17 (MTK_PIN_NO(174) | 7)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_CONN_WB_PTA (MTK_PIN_NO(175) | 1)
+#define PINMUX_GPIO175__FUNC_DBG_MON_B20 (MTK_PIN_NO(175) | 7)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_CONN_BT_CLK (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_AUXIF_CLK1 (MTK_PIN_NO(176) | 2)
+#define PINMUX_GPIO176__FUNC_DBG_MON_B15 (MTK_PIN_NO(176) | 7)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_CONN_BT_DATA (MTK_PIN_NO(177) | 1)
+#define PINMUX_GPIO177__FUNC_AUXIF_ST1 (MTK_PIN_NO(177) | 2)
+#define PINMUX_GPIO177__FUNC_DBG_MON_B16 (MTK_PIN_NO(177) | 7)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(178) | 1)
+#define PINMUX_GPIO178__FUNC_DBG_MON_B21 (MTK_PIN_NO(178) | 7)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(179) | 1)
+#define PINMUX_GPIO179__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(179) | 2)
+#define PINMUX_GPIO179__FUNC_DBG_MON_B22 (MTK_PIN_NO(179) | 7)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(180) | 1)
+#define PINMUX_GPIO180__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(180) | 2)
+#define PINMUX_GPIO180__FUNC_DBG_MON_B23 (MTK_PIN_NO(180) | 7)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_CONN_WF_CTRL3 (MTK_PIN_NO(181) | 1)
+#define PINMUX_GPIO181__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(181) | 2)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_CONN_WF_CTRL4 (MTK_PIN_NO(182) | 1)
+#define PINMUX_GPIO182__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(182) | 2)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_MSDC0_CMD (MTK_PIN_NO(183) | 1)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_MSDC0_DAT0 (MTK_PIN_NO(184) | 1)
+
+#define PINMUX_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define PINMUX_GPIO185__FUNC_MSDC0_DAT2 (MTK_PIN_NO(185) | 1)
+
+#define PINMUX_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define PINMUX_GPIO186__FUNC_MSDC0_DAT4 (MTK_PIN_NO(186) | 1)
+
+#define PINMUX_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define PINMUX_GPIO187__FUNC_MSDC0_DAT6 (MTK_PIN_NO(187) | 1)
+
+#define PINMUX_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define PINMUX_GPIO188__FUNC_MSDC0_DAT1 (MTK_PIN_NO(188) | 1)
+
+#define PINMUX_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define PINMUX_GPIO189__FUNC_MSDC0_DAT5 (MTK_PIN_NO(189) | 1)
+
+#define PINMUX_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define PINMUX_GPIO190__FUNC_MSDC0_DAT7 (MTK_PIN_NO(190) | 1)
+
+#define PINMUX_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define PINMUX_GPIO191__FUNC_MSDC0_DSL (MTK_PIN_NO(191) | 1)
+#define PINMUX_GPIO191__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(191) | 2)
+#define PINMUX_GPIO191__FUNC_IDDIG (MTK_PIN_NO(191) | 3)
+#define PINMUX_GPIO191__FUNC_DMIC_CLK (MTK_PIN_NO(191) | 4)
+#define PINMUX_GPIO191__FUNC_DSI1_TE (MTK_PIN_NO(191) | 5)
+
+#define PINMUX_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define PINMUX_GPIO192__FUNC_MSDC0_CLK (MTK_PIN_NO(192) | 1)
+#define PINMUX_GPIO192__FUNC_GPS_L5_ELNA_EN (MTK_PIN_NO(192) | 2)
+#define PINMUX_GPIO192__FUNC_USB_DRVVBUS (MTK_PIN_NO(192) | 3)
+#define PINMUX_GPIO192__FUNC_DMIC_DAT (MTK_PIN_NO(192) | 4)
+#define PINMUX_GPIO192__FUNC_LCM1_RST (MTK_PIN_NO(192) | 5)
+
+#define PINMUX_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define PINMUX_GPIO193__FUNC_MSDC0_DAT3 (MTK_PIN_NO(193) | 1)
+
+#define PINMUX_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define PINMUX_GPIO194__FUNC_MSDC0_RSTB (MTK_PIN_NO(194) | 1)
+
+#define PINMUX_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define PINMUX_GPIO195__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(195) | 1)
+#define PINMUX_GPIO195__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(195) | 2)
+
+#define PINMUX_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define PINMUX_GPIO196__FUNC_AUD_DAT_MOSI2 (MTK_PIN_NO(196) | 1)
+#define PINMUX_GPIO196__FUNC_DBG_MON_B27 (MTK_PIN_NO(196) | 7)
+
+#define PINMUX_GPIO197__FUNC_GPIO197 (MTK_PIN_NO(197) | 0)
+#define PINMUX_GPIO197__FUNC_AUD_NLE_MOSI1 (MTK_PIN_NO(197) | 1)
+#define PINMUX_GPIO197__FUNC_AUD_CLK_MISO (MTK_PIN_NO(197) | 2)
+#define PINMUX_GPIO197__FUNC_I2S2_MCK (MTK_PIN_NO(197) | 3)
+#define PINMUX_GPIO197__FUNC_I2S6_MCK (MTK_PIN_NO(197) | 4)
+#define PINMUX_GPIO197__FUNC_I2S8_MCK (MTK_PIN_NO(197) | 5)
+#define PINMUX_GPIO197__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(197) | 6)
+#define PINMUX_GPIO197__FUNC_DBG_MON_B28 (MTK_PIN_NO(197) | 7)
+
+#define PINMUX_GPIO198__FUNC_GPIO198 (MTK_PIN_NO(198) | 0)
+#define PINMUX_GPIO198__FUNC_AUD_NLE_MOSI0 (MTK_PIN_NO(198) | 1)
+#define PINMUX_GPIO198__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(198) | 2)
+#define PINMUX_GPIO198__FUNC_I2S2_BCK (MTK_PIN_NO(198) | 3)
+#define PINMUX_GPIO198__FUNC_I2S6_BCK (MTK_PIN_NO(198) | 4)
+#define PINMUX_GPIO198__FUNC_I2S8_BCK (MTK_PIN_NO(198) | 5)
+#define PINMUX_GPIO198__FUNC_DBG_MON_B29 (MTK_PIN_NO(198) | 7)
+
+#define PINMUX_GPIO199__FUNC_GPIO199 (MTK_PIN_NO(199) | 0)
+#define PINMUX_GPIO199__FUNC_AUD_DAT_MISO2 (MTK_PIN_NO(199) | 1)
+#define PINMUX_GPIO199__FUNC_I2S2_DI2 (MTK_PIN_NO(199) | 3)
+#define PINMUX_GPIO199__FUNC_DBG_MON_B32 (MTK_PIN_NO(199) | 7)
+
+#define PINMUX_GPIO200__FUNC_GPIO200 (MTK_PIN_NO(200) | 0)
+#define PINMUX_GPIO200__FUNC_SCL6 (MTK_PIN_NO(200) | 1)
+#define PINMUX_GPIO200__FUNC_SCP_SCL0 (MTK_PIN_NO(200) | 2)
+#define PINMUX_GPIO200__FUNC_SCP_SCL1 (MTK_PIN_NO(200) | 3)
+#define PINMUX_GPIO200__FUNC_SCL_6306 (MTK_PIN_NO(200) | 4)
+
+#define PINMUX_GPIO201__FUNC_GPIO201 (MTK_PIN_NO(201) | 0)
+#define PINMUX_GPIO201__FUNC_SDA6 (MTK_PIN_NO(201) | 1)
+#define PINMUX_GPIO201__FUNC_SCP_SDA0 (MTK_PIN_NO(201) | 2)
+#define PINMUX_GPIO201__FUNC_SCP_SDA1 (MTK_PIN_NO(201) | 3)
+#define PINMUX_GPIO201__FUNC_SDA_6306 (MTK_PIN_NO(201) | 4)
+
+#define PINMUX_GPIO202__FUNC_GPIO202 (MTK_PIN_NO(202) | 0)
+#define PINMUX_GPIO202__FUNC_SCL5 (MTK_PIN_NO(202) | 1)
+
+#define PINMUX_GPIO203__FUNC_GPIO203 (MTK_PIN_NO(203) | 0)
+#define PINMUX_GPIO203__FUNC_SDA5 (MTK_PIN_NO(203) | 1)
+
+#define PINMUX_GPIO204__FUNC_GPIO204 (MTK_PIN_NO(204) | 0)
+#define PINMUX_GPIO204__FUNC_SCL0 (MTK_PIN_NO(204) | 1)
+#define PINMUX_GPIO204__FUNC_SPI4_C_CLK (MTK_PIN_NO(204) | 2)
+#define PINMUX_GPIO204__FUNC_SPI7_B_CLK (MTK_PIN_NO(204) | 3)
+
+#define PINMUX_GPIO205__FUNC_GPIO205 (MTK_PIN_NO(205) | 0)
+#define PINMUX_GPIO205__FUNC_SDA0 (MTK_PIN_NO(205) | 1)
+#define PINMUX_GPIO205__FUNC_SPI4_C_CSB (MTK_PIN_NO(205) | 2)
+#define PINMUX_GPIO205__FUNC_SPI7_B_CSB (MTK_PIN_NO(205) | 3)
+
+#define PINMUX_GPIO206__FUNC_GPIO206 (MTK_PIN_NO(206) | 0)
+#define PINMUX_GPIO206__FUNC_SRCLKENA0 (MTK_PIN_NO(206) | 1)
+
+#define PINMUX_GPIO207__FUNC_GPIO207 (MTK_PIN_NO(207) | 0)
+#define PINMUX_GPIO207__FUNC_SRCLKENA1 (MTK_PIN_NO(207) | 1)
+
+#define PINMUX_GPIO208__FUNC_GPIO208 (MTK_PIN_NO(208) | 0)
+#define PINMUX_GPIO208__FUNC_WATCHDOG (MTK_PIN_NO(208) | 1)
+
+#define PINMUX_GPIO209__FUNC_GPIO209 (MTK_PIN_NO(209) | 0)
+#define PINMUX_GPIO209__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(209) | 1)
+#define PINMUX_GPIO209__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(209) | 2)
+
+#define PINMUX_GPIO210__FUNC_GPIO210 (MTK_PIN_NO(210) | 0)
+#define PINMUX_GPIO210__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(210) | 1)
+
+#define PINMUX_GPIO211__FUNC_GPIO211 (MTK_PIN_NO(211) | 0)
+#define PINMUX_GPIO211__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(211) | 1)
+#define PINMUX_GPIO211__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(211) | 2)
+
+#define PINMUX_GPIO212__FUNC_GPIO212 (MTK_PIN_NO(212) | 0)
+#define PINMUX_GPIO212__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(212) | 1)
+
+#define PINMUX_GPIO213__FUNC_GPIO213 (MTK_PIN_NO(213) | 0)
+#define PINMUX_GPIO213__FUNC_RTC32K_CK (MTK_PIN_NO(213) | 1)
+
+#define PINMUX_GPIO214__FUNC_GPIO214 (MTK_PIN_NO(214) | 0)
+#define PINMUX_GPIO214__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(214) | 1)
+#define PINMUX_GPIO214__FUNC_I2S1_MCK (MTK_PIN_NO(214) | 3)
+#define PINMUX_GPIO214__FUNC_I2S7_MCK (MTK_PIN_NO(214) | 4)
+#define PINMUX_GPIO214__FUNC_I2S9_MCK (MTK_PIN_NO(214) | 5)
+#define PINMUX_GPIO214__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(214) | 6)
+
+#define PINMUX_GPIO215__FUNC_GPIO215 (MTK_PIN_NO(215) | 0)
+#define PINMUX_GPIO215__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(215) | 1)
+#define PINMUX_GPIO215__FUNC_I2S1_BCK (MTK_PIN_NO(215) | 3)
+#define PINMUX_GPIO215__FUNC_I2S7_BCK (MTK_PIN_NO(215) | 4)
+#define PINMUX_GPIO215__FUNC_I2S9_BCK (MTK_PIN_NO(215) | 5)
+#define PINMUX_GPIO215__FUNC_DBG_MON_B24 (MTK_PIN_NO(215) | 7)
+
+#define PINMUX_GPIO216__FUNC_GPIO216 (MTK_PIN_NO(216) | 0)
+#define PINMUX_GPIO216__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(216) | 1)
+#define PINMUX_GPIO216__FUNC_I2S1_LRCK (MTK_PIN_NO(216) | 3)
+#define PINMUX_GPIO216__FUNC_I2S7_LRCK (MTK_PIN_NO(216) | 4)
+#define PINMUX_GPIO216__FUNC_I2S9_LRCK (MTK_PIN_NO(216) | 5)
+#define PINMUX_GPIO216__FUNC_DBG_MON_B25 (MTK_PIN_NO(216) | 7)
+
+#define PINMUX_GPIO217__FUNC_GPIO217 (MTK_PIN_NO(217) | 0)
+#define PINMUX_GPIO217__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(217) | 1)
+#define PINMUX_GPIO217__FUNC_I2S1_DO (MTK_PIN_NO(217) | 3)
+#define PINMUX_GPIO217__FUNC_I2S7_DO (MTK_PIN_NO(217) | 4)
+#define PINMUX_GPIO217__FUNC_I2S9_DO (MTK_PIN_NO(217) | 5)
+#define PINMUX_GPIO217__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(217) | 6)
+#define PINMUX_GPIO217__FUNC_DBG_MON_B26 (MTK_PIN_NO(217) | 7)
+
+#define PINMUX_GPIO218__FUNC_GPIO218 (MTK_PIN_NO(218) | 0)
+#define PINMUX_GPIO218__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(218) | 1)
+#define PINMUX_GPIO218__FUNC_VOW_DAT_MISO (MTK_PIN_NO(218) | 2)
+#define PINMUX_GPIO218__FUNC_I2S2_LRCK (MTK_PIN_NO(218) | 3)
+#define PINMUX_GPIO218__FUNC_I2S6_LRCK (MTK_PIN_NO(218) | 4)
+#define PINMUX_GPIO218__FUNC_I2S8_LRCK (MTK_PIN_NO(218) | 5)
+#define PINMUX_GPIO218__FUNC_DBG_MON_B30 (MTK_PIN_NO(218) | 7)
+
+#define PINMUX_GPIO219__FUNC_GPIO219 (MTK_PIN_NO(219) | 0)
+#define PINMUX_GPIO219__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(219) | 1)
+#define PINMUX_GPIO219__FUNC_VOW_CLK_MISO (MTK_PIN_NO(219) | 2)
+#define PINMUX_GPIO219__FUNC_I2S2_DI (MTK_PIN_NO(219) | 3)
+#define PINMUX_GPIO219__FUNC_I2S6_DI (MTK_PIN_NO(219) | 4)
+#define PINMUX_GPIO219__FUNC_I2S8_DI (MTK_PIN_NO(219) | 5)
+#define PINMUX_GPIO219__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(219) | 6)
+#define PINMUX_GPIO219__FUNC_DBG_MON_B31 (MTK_PIN_NO(219) | 7)
+
+#endif /* __MT6893-PINFUNC_H */
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
index d12eac9b3eeb..9f100b18a676 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
@@ -320,7 +320,7 @@
/* Attention: GPIO 90 is used to switch between PCIe@1,0 and
* SATA functions. i.e. output-high: PCIe, output-low: SATA
*/
- asm_sel {
+ asm-sel-hog {
gpio-hog;
gpios = <90 GPIO_ACTIVE_HIGH>;
output-high;
diff --git a/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4-2g5.dts b/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4-2g5.dts
new file mode 100644
index 000000000000..53de9c113f60
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4-2g5.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+/dts-v1/;
+
+#include "mt7988a-bananapi-bpi-r4.dtsi"
+
+/ {
+ compatible = "bananapi,bpi-r4-2g5", "bananapi,bpi-r4", "mediatek,mt7988a";
+ model = "Banana Pi BPI-R4 (1x SFP+, 1x 2.5GbE)";
+ chassis-type = "embedded";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dts b/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dts
index 6623112c24c7..36bd1ef2efab 100644
--- a/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dts
@@ -2,408 +2,18 @@
/dts-v1/;
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/regulator/richtek,rt5190a-regulator.h>
-
-#include "mt7988a.dtsi"
+#include "mt7988a-bananapi-bpi-r4.dtsi"
/ {
compatible = "bananapi,bpi-r4", "mediatek,mt7988a";
- model = "Banana Pi BPI-R4";
+ model = "Banana Pi BPI-R4 (2x SFP+)";
chassis-type = "embedded";
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
- reg_1p8v: regulator-1p8v {
- compatible = "regulator-fixed";
- regulator-name = "fixed-1.8V";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- reg_3p3v: regulator-3p3v {
- compatible = "regulator-fixed";
- regulator-name = "fixed-3.3V";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-boot-on;
- regulator-always-on;
- };
};
-&cpu0 {
- proc-supply = <&rt5190_buck3>;
-};
-
-&cpu1 {
- proc-supply = <&rt5190_buck3>;
-};
-
-&cpu2 {
- proc-supply = <&rt5190_buck3>;
-};
-
-&cpu3 {
- proc-supply = <&rt5190_buck3>;
-};
-
-&cpu_thermal {
- trips {
- cpu_trip_hot: hot {
- temperature = <120000>;
- hysteresis = <2000>;
- type = "hot";
- };
-
- cpu_trip_active_high: active-high {
- temperature = <115000>;
- hysteresis = <2000>;
- type = "active";
- };
-
- cpu_trip_active_med: active-med {
- temperature = <85000>;
- hysteresis = <2000>;
- type = "active";
- };
-
- cpu_trip_active_low: active-low {
- temperature = <40000>;
- hysteresis = <2000>;
- type = "active";
- };
- };
-};
-
-&i2c0 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins>;
- status = "okay";
-
- rt5190a_64: rt5190a@64 {
- compatible = "richtek,rt5190a";
- reg = <0x64>;
- vin2-supply = <&rt5190_buck1>;
- vin3-supply = <&rt5190_buck1>;
- vin4-supply = <&rt5190_buck1>;
-
- regulators {
- rt5190_buck1: buck1 {
- regulator-name = "rt5190a-buck1";
- regulator-min-microvolt = <5090000>;
- regulator-max-microvolt = <5090000>;
- regulator-allowed-modes =
- <RT5190A_OPMODE_AUTO>, <RT5190A_OPMODE_FPWM>;
- regulator-boot-on;
- regulator-always-on;
- };
- buck2 {
- regulator-name = "vcore";
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <1400000>;
- regulator-boot-on;
- regulator-always-on;
- };
- rt5190_buck3: buck3 {
- regulator-name = "vproc";
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <1400000>;
- regulator-boot-on;
- };
- buck4 {
- regulator-name = "rt5190a-buck4";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-allowed-modes =
- <RT5190A_OPMODE_AUTO>, <RT5190A_OPMODE_FPWM>;
- regulator-boot-on;
- regulator-always-on;
- };
- ldo {
- regulator-name = "rt5190a-ldo";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-boot-on;
- regulator-always-on;
- };
- };
- };
-};
-
-&i2c2 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c2_1_pins>;
- status = "okay";
-
- pca9545: i2c-mux@70 {
- compatible = "nxp,pca9545";
- reg = <0x70>;
- reset-gpios = <&pio 5 GPIO_ACTIVE_LOW>;
+&pca9545 {
+ i2c_sfp2: i2c@2 {
#address-cells = <1>;
#size-cells = <0>;
-
- i2c@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- pcf8563: rtc@51 {
- compatible = "nxp,pcf8563";
- reg = <0x51>;
- #clock-cells = <0>;
- };
-
- eeprom@57 {
- compatible = "atmel,24c02";
- reg = <0x57>;
- size = <256>;
- };
-
- };
-
- i2c_sfp1: i2c@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- };
-
- i2c_sfp2: i2c@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
- };
- };
-};
-
-/* mPCIe SIM2 */
-&pcie0 {
- status = "okay";
-};
-
-/* mPCIe SIM3 */
-&pcie1 {
- status = "okay";
-};
-
-/* M.2 key-B SIM1 */
-&pcie2 {
- status = "okay";
-};
-
-/* M.2 key-M SSD */
-&pcie3 {
- status = "okay";
-};
-
-&pio {
- mdio0_pins: mdio0-pins {
- mux {
- function = "eth";
- groups = "mdc_mdio0";
- };
-
- conf {
- pins = "SMI_0_MDC", "SMI_0_MDIO";
- drive-strength = <8>;
- };
- };
-
- i2c0_pins: i2c0-g0-pins {
- mux {
- function = "i2c";
- groups = "i2c0_1";
- };
- };
-
- i2c1_pins: i2c1-g0-pins {
- mux {
- function = "i2c";
- groups = "i2c1_0";
- };
- };
-
- i2c1_sfp_pins: i2c1-sfp-g0-pins {
- mux {
- function = "i2c";
- groups = "i2c1_sfp";
- };
- };
-
- i2c2_0_pins: i2c2-g0-pins {
- mux {
- function = "i2c";
- groups = "i2c2_0";
- };
+ reg = <2>;
};
-
- i2c2_1_pins: i2c2-g1-pins {
- mux {
- function = "i2c";
- groups = "i2c2_1";
- };
- };
-
- gbe0_led0_pins: gbe0-led0-pins {
- mux {
- function = "led";
- groups = "gbe0_led0";
- };
- };
-
- gbe1_led0_pins: gbe1-led0-pins {
- mux {
- function = "led";
- groups = "gbe1_led0";
- };
- };
-
- gbe2_led0_pins: gbe2-led0-pins {
- mux {
- function = "led";
- groups = "gbe2_led0";
- };
- };
-
- gbe3_led0_pins: gbe3-led0-pins {
- mux {
- function = "led";
- groups = "gbe3_led0";
- };
- };
-
- gbe0_led1_pins: gbe0-led1-pins {
- mux {
- function = "led";
- groups = "gbe0_led1";
- };
- };
-
- gbe1_led1_pins: gbe1-led1-pins {
- mux {
- function = "led";
- groups = "gbe1_led1";
- };
- };
-
- gbe2_led1_pins: gbe2-led1-pins {
- mux {
- function = "led";
- groups = "gbe2_led1";
- };
- };
-
- gbe3_led1_pins: gbe3-led1-pins {
- mux {
- function = "led";
- groups = "gbe3_led1";
- };
- };
-
- i2p5gbe_led0_pins: 2p5gbe-led0-pins {
- mux {
- function = "led";
- groups = "2p5gbe_led0";
- };
- };
-
- i2p5gbe_led1_pins: 2p5gbe-led1-pins {
- mux {
- function = "led";
- groups = "2p5gbe_led1";
- };
- };
-
- mmc0_pins_emmc_45: mmc0-emmc-45-pins {
- mux {
- function = "flash";
- groups = "emmc_45";
- };
- };
-
- mmc0_pins_emmc_51: mmc0-emmc-51-pins {
- mux {
- function = "flash";
- groups = "emmc_51";
- };
- };
-
- mmc0_pins_sdcard: mmc0-sdcard-pins {
- mux {
- function = "flash";
- groups = "sdcard";
- };
- };
-
- uart0_pins: uart0-pins {
- mux {
- function = "uart";
- groups = "uart0";
- };
- };
-
- snfi_pins: snfi-pins {
- mux {
- function = "flash";
- groups = "snfi";
- };
- };
-
- spi0_pins: spi0-pins {
- mux {
- function = "spi";
- groups = "spi0";
- };
- };
-
- spi0_flash_pins: spi0-flash-pins {
- mux {
- function = "spi";
- groups = "spi0", "spi0_wp_hold";
- };
- };
-
- spi1_pins: spi1-pins {
- mux {
- function = "spi";
- groups = "spi1";
- };
- };
-
- spi2_pins: spi2-pins {
- mux {
- function = "spi";
- groups = "spi2";
- };
- };
-
- spi2_flash_pins: spi2-flash-pins {
- mux {
- function = "spi";
- groups = "spi2", "spi2_wp_hold";
- };
- };
-};
-
-&pwm {
- status = "okay";
-};
-
-&serial0 {
- status = "okay";
-};
-
-&ssusb1 {
- status = "okay";
-};
-
-&tphy {
- status = "okay";
-};
-
-&watchdog {
- status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dtsi b/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dtsi
new file mode 100644
index 000000000000..81ba045e0e0e
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dtsi
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/richtek,rt5190a-regulator.h>
+
+#include "mt7988a.dtsi"
+
+/ {
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ /* cooling level (0, 1, 2, 3) : (0% duty, 30% duty, 50% duty, 100% duty) */
+ cooling-levels = <0 80 128 255>;
+ #cooling-cells = <2>;
+ pwms = <&pwm 0 50000>;
+ status = "okay";
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+};
+
+&cpu0 {
+ proc-supply = <&rt5190_buck3>;
+};
+
+&cpu1 {
+ proc-supply = <&rt5190_buck3>;
+};
+
+&cpu2 {
+ proc-supply = <&rt5190_buck3>;
+};
+
+&cpu3 {
+ proc-supply = <&rt5190_buck3>;
+};
+
+&cpu_thermal {
+ trips {
+ cpu_trip_hot: hot {
+ temperature = <120000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_trip_active_high: active-high {
+ temperature = <115000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu_trip_active_med: active-med {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu_trip_active_low: active-low {
+ temperature = <40000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map-cpu-active-high {
+ /* active: set fan to cooling level 2 */
+ cooling-device = <&fan 3 3>;
+ trip = <&cpu_trip_active_high>;
+ };
+
+ map-cpu-active-med {
+ /* active: set fan to cooling level 1 */
+ cooling-device = <&fan 2 2>;
+ trip = <&cpu_trip_active_med>;
+ };
+
+ map-cpu-active-low {
+ /* active: set fan to cooling level 0 */
+ cooling-device = <&fan 1 1>;
+ trip = <&cpu_trip_active_low>;
+ };
+ };
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ status = "okay";
+
+ rt5190a_64: rt5190a@64 {
+ compatible = "richtek,rt5190a";
+ reg = <0x64>;
+ vin2-supply = <&rt5190_buck1>;
+ vin3-supply = <&rt5190_buck1>;
+ vin4-supply = <&rt5190_buck1>;
+
+ regulators {
+ rt5190_buck1: buck1 {
+ regulator-name = "rt5190a-buck1";
+ regulator-min-microvolt = <5090000>;
+ regulator-max-microvolt = <5090000>;
+ regulator-allowed-modes =
+ <RT5190A_OPMODE_AUTO>, <RT5190A_OPMODE_FPWM>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ buck2 {
+ regulator-name = "vcore";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ rt5190_buck3: buck3 {
+ regulator-name = "vproc";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-boot-on;
+ };
+ buck4 {
+ regulator-name = "rt5190a-buck4";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-allowed-modes =
+ <RT5190A_OPMODE_AUTO>, <RT5190A_OPMODE_FPWM>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ ldo {
+ regulator-name = "rt5190a-ldo";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_1_pins>;
+ status = "okay";
+
+ pca9545: i2c-mux@70 {
+ compatible = "nxp,pca9545";
+ reg = <0x70>;
+ reset-gpios = <&pio 5 GPIO_ACTIVE_LOW>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ pcf8563: rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ };
+
+ eeprom@57 {
+ compatible = "atmel,24c02";
+ reg = <0x57>;
+ size = <256>;
+ };
+
+ };
+
+ i2c_sfp1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ };
+};
+
+/* mPCIe SIM2 */
+&pcie0 {
+ status = "okay";
+};
+
+/* mPCIe SIM3 */
+&pcie1 {
+ status = "okay";
+};
+
+/* M.2 key-B SIM1 */
+&pcie2 {
+ status = "okay";
+};
+
+/* M.2 key-M SSD */
+&pcie3 {
+ status = "okay";
+};
+
+&pio {
+ mdio0_pins: mdio0-pins {
+ mux {
+ function = "eth";
+ groups = "mdc_mdio0";
+ };
+
+ conf {
+ pins = "SMI_0_MDC", "SMI_0_MDIO";
+ drive-strength = <8>;
+ };
+ };
+
+ i2c0_pins: i2c0-g0-pins {
+ mux {
+ function = "i2c";
+ groups = "i2c0_1";
+ };
+ };
+
+ i2c1_pins: i2c1-g0-pins {
+ mux {
+ function = "i2c";
+ groups = "i2c1_0";
+ };
+ };
+
+ i2c1_sfp_pins: i2c1-sfp-g0-pins {
+ mux {
+ function = "i2c";
+ groups = "i2c1_sfp";
+ };
+ };
+
+ i2c2_0_pins: i2c2-g0-pins {
+ mux {
+ function = "i2c";
+ groups = "i2c2_0";
+ };
+ };
+
+ i2c2_1_pins: i2c2-g1-pins {
+ mux {
+ function = "i2c";
+ groups = "i2c2_1";
+ };
+ };
+
+ gbe0_led0_pins: gbe0-led0-pins {
+ mux {
+ function = "led";
+ groups = "gbe0_led0";
+ };
+ };
+
+ gbe1_led0_pins: gbe1-led0-pins {
+ mux {
+ function = "led";
+ groups = "gbe1_led0";
+ };
+ };
+
+ gbe2_led0_pins: gbe2-led0-pins {
+ mux {
+ function = "led";
+ groups = "gbe2_led0";
+ };
+ };
+
+ gbe3_led0_pins: gbe3-led0-pins {
+ mux {
+ function = "led";
+ groups = "gbe3_led0";
+ };
+ };
+
+ gbe0_led1_pins: gbe0-led1-pins {
+ mux {
+ function = "led";
+ groups = "gbe0_led1";
+ };
+ };
+
+ gbe1_led1_pins: gbe1-led1-pins {
+ mux {
+ function = "led";
+ groups = "gbe1_led1";
+ };
+ };
+
+ gbe2_led1_pins: gbe2-led1-pins {
+ mux {
+ function = "led";
+ groups = "gbe2_led1";
+ };
+ };
+
+ gbe3_led1_pins: gbe3-led1-pins {
+ mux {
+ function = "led";
+ groups = "gbe3_led1";
+ };
+ };
+
+ i2p5gbe_led0_pins: 2p5gbe-led0-pins {
+ mux {
+ function = "led";
+ groups = "2p5gbe_led0";
+ };
+ };
+
+ i2p5gbe_led1_pins: 2p5gbe-led1-pins {
+ mux {
+ function = "led";
+ groups = "2p5gbe_led1";
+ };
+ };
+
+ mmc0_pins_emmc_45: mmc0-emmc-45-pins {
+ mux {
+ function = "flash";
+ groups = "emmc_45";
+ };
+ };
+
+ mmc0_pins_emmc_51: mmc0-emmc-51-pins {
+ mux {
+ function = "flash";
+ groups = "emmc_51";
+ };
+ };
+
+ mmc0_pins_sdcard: mmc0-sdcard-pins {
+ mux {
+ function = "flash";
+ groups = "sdcard";
+ };
+ };
+
+ snfi_pins: snfi-pins {
+ mux {
+ function = "flash";
+ groups = "snfi";
+ };
+ };
+
+ spi0_pins: spi0-pins {
+ mux {
+ function = "spi";
+ groups = "spi0";
+ };
+ };
+
+ spi0_flash_pins: spi0-flash-pins {
+ mux {
+ function = "spi";
+ groups = "spi0", "spi0_wp_hold";
+ };
+ };
+
+ spi2_pins: spi2-pins {
+ mux {
+ function = "spi";
+ groups = "spi2";
+ };
+ };
+
+ spi2_flash_pins: spi2-flash-pins {
+ mux {
+ function = "spi";
+ groups = "spi2", "spi2_wp_hold";
+ };
+ };
+};
+
+&pwm {
+ status = "okay";
+};
+
+&serial0 {
+ status = "okay";
+};
+
+&spi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_flash_pins>;
+ status = "okay";
+
+ spi_nand: flash@0 {
+ compatible = "spi-nand";
+ reg = <0>;
+ spi-max-frequency = <52000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ };
+};
+
+&spi1 {
+ status = "okay";
+};
+
+&spi_nand {
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "bl2";
+ reg = <0x0 0x200000>;
+ read-only;
+ };
+ };
+};
+
+&ssusb1 {
+ status = "okay";
+};
+
+&tphy {
+ status = "okay";
+};
+
+&watchdog {
+ status = "okay";
+};
+
+&xsphy {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt7988a.dtsi b/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
index 88b56a24efca..c46b31f8d653 100644
--- a/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
@@ -209,6 +209,20 @@
"pcie_wake_n3_0";
};
};
+
+ spi1_pins: spi1-pins {
+ mux {
+ function = "spi";
+ groups = "spi1";
+ };
+ };
+
+ uart0_pins: uart0-pins {
+ mux {
+ function = "uart";
+ groups = "uart0";
+ };
+ };
};
pwm: pwm@10048000 {
@@ -244,6 +258,8 @@
clocks = <&topckgen CLK_TOP_UART_SEL>,
<&infracfg CLK_INFRA_52M_UART0_CK>;
clock-names = "baud", "bus";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
status = "disabled";
};
@@ -311,6 +327,53 @@
status = "disabled";
};
+ spi0: spi@11007000 {
+ compatible = "mediatek,mt7988-spi-quad", "mediatek,spi-ipm";
+ reg = <0 0x11007000 0 0x100>;
+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topckgen CLK_TOP_MPLL_D2>,
+ <&topckgen CLK_TOP_SPI_SEL>,
+ <&infracfg CLK_INFRA_104M_SPI0>,
+ <&infracfg CLK_INFRA_66M_SPI0_HCK>;
+ clock-names = "parent-clk", "sel-clk", "spi-clk",
+ "hclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi1: spi@11008000 {
+ compatible = "mediatek,mt7988-spi-single", "mediatek,spi-ipm";
+ reg = <0 0x11008000 0 0x100>;
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topckgen CLK_TOP_MPLL_D2>,
+ <&topckgen CLK_TOP_SPIM_MST_SEL>,
+ <&infracfg CLK_INFRA_104M_SPI1>,
+ <&infracfg CLK_INFRA_66M_SPI1_HCK>;
+ clock-names = "parent-clk", "sel-clk", "spi-clk",
+ "hclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_pins>;
+ status = "disabled";
+ };
+
+ spi2: spi@11009000 {
+ compatible = "mediatek,mt7988-spi-quad", "mediatek,spi-ipm";
+ reg = <0 0x11009000 0 0x100>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topckgen CLK_TOP_MPLL_D2>,
+ <&topckgen CLK_TOP_SPI_SEL>,
+ <&infracfg CLK_INFRA_104M_SPI2_BCK>,
+ <&infracfg CLK_INFRA_66M_SPI2_HCK>;
+ clock-names = "parent-clk", "sel-clk", "spi-clk",
+ "hclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
lvts: lvts@1100a000 {
compatible = "mediatek,mt7988-lvts-ap";
#thermal-sensor-cells = <1>;
@@ -334,6 +397,8 @@
<&infracfg CLK_INFRA_133M_USB_HCK>,
<&infracfg CLK_INFRA_USB_XHCI>;
clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck", "xhci_ck";
+ phys = <&xphyu2port0 PHY_TYPE_USB2>,
+ <&xphyu3port0 PHY_TYPE_USB3>;
status = "disabled";
};
@@ -398,6 +463,9 @@
pinctrl-0 = <&pcie2_pins>;
status = "disabled";
+ phys = <&xphyu3port0 PHY_TYPE_PCIE>;
+ phy-names = "pcie-phy";
+
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &pcie_intc2 0>,
@@ -548,6 +616,37 @@
};
};
+
+ topmisc: system-controller@11d10084 {
+ compatible = "mediatek,mt7988-topmisc",
+ "syscon";
+ reg = <0 0x11d10084 0 0xff80>;
+ };
+
+ xsphy: xs-phy@11e10000 {
+ compatible = "mediatek,mt7988-xsphy",
+ "mediatek,xsphy";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ xphyu2port0: usb-phy@11e10000 {
+ reg = <0 0x11e10000 0 0x400>;
+ clocks = <&infracfg CLK_INFRA_USB_UTMI>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ };
+
+ xphyu3port0: usb-phy@11e13000 {
+ reg = <0 0x11e13400 0 0x500>;
+ clocks = <&infracfg CLK_INFRA_USB_PIPE>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ mediatek,syscon-type = <&topmisc 0x194 0>;
+ };
+ };
+
clock-controller@11f40000 {
compatible = "mediatek,mt7988-xfi-pll";
reg = <0 0x11f40000 0 0x1000>;
@@ -564,6 +663,22 @@
lvts_calibration: calib@918 {
reg = <0x918 0x28>;
};
+
+ phy_calibration_p0: calib@940 {
+ reg = <0x940 0x10>;
+ };
+
+ phy_calibration_p1: calib@954 {
+ reg = <0x954 0x10>;
+ };
+
+ phy_calibration_p2: calib@968 {
+ reg = <0x968 0x10>;
+ };
+
+ phy_calibration_p3: calib@97c {
+ reg = <0x97c 0x10>;
+ };
};
clock-controller@15000000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index e1495f1900a7..ecc6c4d6f1cd 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -105,6 +105,7 @@
btsco: bt-sco {
compatible = "linux,bt-sco";
+ #sound-dai-cells = <0>;
};
wifi_pwrseq: wifi-pwrseq {
@@ -259,14 +260,10 @@
};
};
};
+};
- ports {
- port {
- dsi_out: endpoint {
- remote-endpoint = <&panel_in>;
- };
- };
- };
+&dsi_out {
+ remote-endpoint = <&panel_in>;
};
&gic {
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 0aa34e5bbaaa..3c1fe80e64b9 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -1836,6 +1836,10 @@
phys = <&mipi_tx0>;
phy-names = "dphy";
status = "disabled";
+
+ port {
+ dsi_out: endpoint { };
+ };
};
dpi0: dpi@14015000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-ponyta-sku0.dts b/arch/arm64/boot/dts/mediatek/mt8186-corsola-ponyta-sku0.dts
new file mode 100644
index 000000000000..986498af4c70
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-ponyta-sku0.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2023 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8186-corsola-ponyta.dtsi"
+
+/ {
+ model = "Google Ponyta sku0 board";
+ compatible = "google,ponyta-sku0", "google,ponyta", "mediatek,mt8186";
+};
+
+&i2c2 {
+ trackpad@15 {
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-ponyta-sku1.dts b/arch/arm64/boot/dts/mediatek/mt8186-corsola-ponyta-sku1.dts
new file mode 100644
index 000000000000..ff5eea0ddeb4
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-ponyta-sku1.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2023 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8186-corsola-ponyta.dtsi"
+
+/ {
+ model = "Google Ponyta sku1 board";
+ compatible = "google,ponyta-sku1", "google,ponyta", "mediatek,mt8186";
+};
+
+&i2c2 {
+ trackpad@2c {
+ status = "disabled";
+ };
+};
+
+&usb_c1 {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-ponyta.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola-ponyta.dtsi
new file mode 100644
index 000000000000..0abf69077089
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-ponyta.dtsi
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2023 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8186-corsola-steelix.dtsi"
+
+&keyboard_controller {
+ function-row-physmap = <
+ MATRIX_KEY(0x00, 0x02, 0) /* T1 */
+ MATRIX_KEY(0x03, 0x02, 0) /* T2 */
+ MATRIX_KEY(0x02, 0x02, 0) /* T3 */
+ MATRIX_KEY(0x01, 0x02, 0) /* T4 */
+ MATRIX_KEY(0x03, 0x04, 0) /* T5 */
+ MATRIX_KEY(0x02, 0x04, 0) /* T6 */
+ MATRIX_KEY(0x01, 0x04, 0) /* T7 */
+ MATRIX_KEY(0x00, 0x04, 0) /* T8 */
+ MATRIX_KEY(0x00, 0x01, 0) /* T9 */
+ MATRIX_KEY(0x02, 0x09, 0) /* T10 */
+ MATRIX_KEY(0x01, 0x09, 0) /* T11 */
+ MATRIX_KEY(0x01, 0x05, 0) /* T12 */
+ >;
+
+ linux,keymap = <
+ CROS_STD_MAIN_KEYMAP
+ MATRIX_KEY(0x00, 0x02, KEY_BACK)
+ MATRIX_KEY(0x03, 0x02, KEY_REFRESH)
+ MATRIX_KEY(0x02, 0x02, KEY_ZOOM)
+ MATRIX_KEY(0x01, 0x02, KEY_SCALE)
+ MATRIX_KEY(0x03, 0x04, KEY_SYSRQ)
+ MATRIX_KEY(0x02, 0x04, KEY_BRIGHTNESSDOWN)
+ MATRIX_KEY(0x01, 0x04, KEY_BRIGHTNESSUP)
+ MATRIX_KEY(0x00, 0x04, KEY_PLAYPAUSE)
+ MATRIX_KEY(0x00, 0x01, KEY_MICMUTE)
+ MATRIX_KEY(0x02, 0x09, KEY_MUTE)
+ MATRIX_KEY(0x01, 0x09, KEY_VOLUMEDOWN)
+ MATRIX_KEY(0x01, 0x05, KEY_VOLUMEUP)
+ >;
+};
+
+&mt6366codec {
+ mediatek,dmic-mode = <1>; /* one-wire */
+};
+
+&sound {
+ model = "mt8186_rt1019_rt5682s";
+};
+
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie.dtsi
index 5ea8bdc00e81..a8e79c2791ba 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie.dtsi
@@ -375,51 +375,6 @@
"TP",
"TP";
- dpi_default_pins: dpi-default-pins {
- pins-cmd-dat {
- pinmux = <PINMUX_GPIO103__FUNC_GPIO103>,
- <PINMUX_GPIO104__FUNC_GPIO104>,
- <PINMUX_GPIO105__FUNC_GPIO105>,
- <PINMUX_GPIO106__FUNC_GPIO106>,
- <PINMUX_GPIO107__FUNC_GPIO107>,
- <PINMUX_GPIO108__FUNC_GPIO108>,
- <PINMUX_GPIO109__FUNC_GPIO109>,
- <PINMUX_GPIO110__FUNC_GPIO110>,
- <PINMUX_GPIO111__FUNC_GPIO111>,
- <PINMUX_GPIO112__FUNC_GPIO112>,
- <PINMUX_GPIO113__FUNC_GPIO113>,
- <PINMUX_GPIO114__FUNC_GPIO114>,
- <PINMUX_GPIO101__FUNC_GPIO101>,
- <PINMUX_GPIO100__FUNC_GPIO100>,
- <PINMUX_GPIO102__FUNC_GPIO102>,
- <PINMUX_GPIO99__FUNC_GPIO99>;
- drive-strength = <10>;
- output-low;
- };
- };
-
- dpi_func_pins: dpi-func-pins {
- pins-cmd-dat {
- pinmux = <PINMUX_GPIO103__FUNC_DPI_DATA0>,
- <PINMUX_GPIO104__FUNC_DPI_DATA1>,
- <PINMUX_GPIO105__FUNC_DPI_DATA2>,
- <PINMUX_GPIO106__FUNC_DPI_DATA3>,
- <PINMUX_GPIO107__FUNC_DPI_DATA4>,
- <PINMUX_GPIO108__FUNC_DPI_DATA5>,
- <PINMUX_GPIO109__FUNC_DPI_DATA6>,
- <PINMUX_GPIO110__FUNC_DPI_DATA7>,
- <PINMUX_GPIO111__FUNC_DPI_DATA8>,
- <PINMUX_GPIO112__FUNC_DPI_DATA9>,
- <PINMUX_GPIO113__FUNC_DPI_DATA10>,
- <PINMUX_GPIO114__FUNC_DPI_DATA11>,
- <PINMUX_GPIO101__FUNC_DPI_HSYNC>,
- <PINMUX_GPIO100__FUNC_DPI_VSYNC>,
- <PINMUX_GPIO102__FUNC_DPI_DE>,
- <PINMUX_GPIO99__FUNC_DPI_PCLK>;
- drive-strength = <10>;
- };
- };
-
en_pp6000_mipi_disp_150ma_fixed_pins: en_pp6000-mipi-disp-150ma-fixed-pins {
pins-en {
pinmux = <PINMUX_GPIO154__FUNC_GPIO154>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
index cebb134331fb..fc78a79d96e9 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
@@ -518,7 +518,6 @@
cap-sdio-irq;
no-mmc;
no-sd;
- non-removable;
vmmc-supply = <&pp3300_s3>;
vqmmc-supply = <&mt6366_vio18_reg>;
mmc-pwrseq = <&wifi_pwrseq>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8188-evb.dts b/arch/arm64/boot/dts/mediatek/mt8188-evb.dts
index f89835ac36f3..f4c207d65b87 100644
--- a/arch/arm64/boot/dts/mediatek/mt8188-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8188-evb.dts
@@ -331,7 +331,11 @@
interrupts-extended = <&pio 222 IRQ_TYPE_LEVEL_HIGH>;
};
-&scp {
+&scp_cluster {
+ status = "okay";
+};
+
+&scp_c0 {
memory-region = <&scp_mem_reserved>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188.dtsi b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
index 69a8423d3858..202478407727 100644
--- a/arch/arm64/boot/dts/mediatek/mt8188.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
@@ -1382,12 +1382,30 @@
clocks = <&infracfg_ao CLK_INFRA_AO_GCE2>;
};
- scp: scp@10500000 {
- compatible = "mediatek,mt8188-scp";
- reg = <0 0x10500000 0 0x100000>,
- <0 0x10720000 0 0xe0000>;
- reg-names = "sram", "cfg";
- interrupts = <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH 0>;
+ scp_cluster: scp@10720000 {
+ compatible = "mediatek,mt8188-scp-dual";
+ reg = <0 0x10720000 0 0xe0000>;
+ reg-names = "cfg";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x10500000 0x100000>;
+ status = "disabled";
+
+ scp_c0: scp@0 {
+ compatible = "mediatek,scp-core";
+ reg = <0x0 0xd0000>;
+ reg-names = "sram";
+ interrupts = <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH 0>;
+ status = "disabled";
+ };
+
+ scp_c1: scp@d0000 {
+ compatible = "mediatek,scp-core";
+ reg = <0xd0000 0x2f000>;
+ reg-names = "sram";
+ interrupts = <GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH 0>;
+ status = "disabled";
+ };
};
afe: audio-controller@10b10000 {
@@ -2224,6 +2242,118 @@
#clock-cells = <1>;
};
+ dma-controller@14001000 {
+ compatible = "mediatek,mt8188-mdp3-rdma";
+ reg = <0 0x14001000 0 0x1000>;
+ #dma-cells = <1>;
+ clocks = <&vppsys0 CLK_VPP0_MDP_RDMA>;
+ mboxes = <&gce0 13 CMDQ_THR_PRIO_1>,
+ <&gce0 14 CMDQ_THR_PRIO_1>,
+ <&gce0 16 CMDQ_THR_PRIO_1>,
+ <&gce0 21 CMDQ_THR_PRIO_1>,
+ <&gce0 22 CMDQ_THR_PRIO_1>;
+ iommus = <&vpp_iommu M4U_PORT_L4_MDP_RDMA>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS0>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0x1000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_VPP0_MDP_RDMA_SOF>,
+ <CMDQ_EVENT_VPP0_MDP_RDMA_FRAME_DONE>;
+ mediatek,scp = <&scp_c0>;
+ };
+
+ display@14002000 {
+ compatible = "mediatek,mt8188-mdp3-fg", "mediatek,mt8195-mdp3-fg";
+ reg = <0 0x14002000 0 0x1000>;
+ clocks = <&vppsys0 CLK_VPP0_MDP_FG>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0x2000 0x1000>;
+ };
+
+ display@14004000 {
+ compatible = "mediatek,mt8188-mdp3-hdr", "mediatek,mt8195-mdp3-hdr";
+ reg = <0 0x14004000 0 0x1000>;
+ clocks = <&vppsys0 CLK_VPP0_MDP_HDR>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0x4000 0x1000>;
+ };
+
+ display@14005000 {
+ compatible = "mediatek,mt8188-mdp3-aal", "mediatek,mt8195-mdp3-aal";
+ reg = <0 0x14005000 0 0x1000>;
+ interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&vppsys0 CLK_VPP0_MDP_AAL>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS0>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0x5000 0x1000>;
+ };
+
+ display@14006000 {
+ compatible = "mediatek,mt8188-mdp3-rsz", "mediatek,mt8183-mdp3-rsz";
+ reg = <0 0x14006000 0 0x1000>;
+ clocks = <&vppsys0 CLK_VPP0_MDP_RSZ>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0x6000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_VPP0_MDP_RSZ_IN_RSZ_SOF>,
+ <CMDQ_EVENT_VPP0_MDP_RSZ_FRAME_DONE>;
+ };
+
+ display@14007000 {
+ compatible = "mediatek,mt8188-mdp3-tdshp", "mediatek,mt8195-mdp3-tdshp";
+ reg = <0 0x14007000 0 0x1000>;
+ clocks = <&vppsys0 CLK_VPP0_MDP_TDSHP>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0x7000 0x1000>;
+ };
+
+ display@14008000 {
+ compatible = "mediatek,mt8188-mdp3-color", "mediatek,mt8195-mdp3-color";
+ reg = <0 0x14008000 0 0x1000>;
+ interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&vppsys0 CLK_VPP0_MDP_COLOR>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS0>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0x8000 0x1000>;
+ };
+
+ display@14009000 {
+ compatible = "mediatek,mt8188-mdp3-ovl", "mediatek,mt8195-mdp3-ovl";
+ reg = <0 0x14009000 0 0x1000>;
+ interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&vppsys0 CLK_VPP0_MDP_OVL>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS0>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0x9000 0x1000>;
+ iommus = <&vpp_iommu M4U_PORT_L4_MDP_OVL>;
+ };
+
+ display@1400a000 {
+ compatible = "mediatek,mt8188-mdp3-padding", "mediatek,mt8195-mdp3-padding";
+ reg = <0 0x1400a000 0 0x1000>;
+ clocks = <&vppsys0 CLK_VPP0_PADDING>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS0>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0xa000 0x1000>;
+ };
+
+ display@1400b000 {
+ compatible = "mediatek,mt8188-mdp3-tcc", "mediatek,mt8195-mdp3-tcc";
+ reg = <0 0x1400b000 0 0x1000>;
+ clocks = <&vppsys0 CLK_VPP0_MDP_TCC>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0xb000 0x1000>;
+ };
+
+ display@1400c000 {
+ compatible = "mediatek,mt8188-mdp3-wrot", "mediatek,mt8183-mdp3-wrot";
+ reg = <0 0x1400c000 0 0x1000>;
+ #dma-cells = <1>;
+ clocks = <&vppsys0 CLK_VPP0_MDP_WROT>;
+ iommus = <&vpp_iommu M4U_PORT_L4_MDP_WROT>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS0>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0xc000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_VPP0_MDP_WROT_SOF>,
+ <CMDQ_EVENT_VPP0_MDP_WROT_VIDO_WDONE>;
+ };
+
+ mutex@1400f000 {
+ compatible = "mediatek,mt8188-vpp-mutex";
+ reg = <0 0x1400f000 0 0x1000>;
+ interrupts = <GIC_SPI 592 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&vppsys0 CLK_VPP0_MUTEX>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS0>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0xf000 0x1000>;
+ };
+
vpp_smi_common: smi@14012000 {
compatible = "mediatek,mt8188-smi-common-vpp";
reg = <0 0x14012000 0 0x1000>;
@@ -2255,6 +2385,184 @@
mediatek,larbs = <&larb1 &larb3 &larb4 &larb6 &larb7 &larb23>;
};
+ dma-controller@14f09000 {
+ compatible = "mediatek,mt8188-mdp3-rdma";
+ reg = <0 0x14f09000 0 0x1000>;
+ #dma-cells = <1>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP2_MDP_RDMA>;
+ iommus = <&vdo_iommu M4U_PORT_L5_SVPP2_MDP_RDMA>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f0XXXX 0x9000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_SOF>,
+ <CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_FRAME_DONE>;
+ };
+
+ dma-controller@14f0a000 {
+ compatible = "mediatek,mt8188-mdp3-rdma";
+ reg = <0 0x14f0a000 0 0x1000>;
+ #dma-cells = <1>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP3_MDP_RDMA>;
+ iommus = <&vpp_iommu M4U_PORT_L6_SVPP3_MDP_RDMA>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f0XXXX 0xa000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_SOF>,
+ <CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_FRAME_DONE>;
+ };
+
+ display@14f0c000 {
+ compatible = "mediatek,mt8188-mdp3-fg", "mediatek,mt8195-mdp3-fg";
+ reg = <0 0x14f0c000 0 0x1000>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP2_MDP_FG>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f0XXXX 0xc000 0x1000>;
+ };
+
+ display@14f0d000 {
+ compatible = "mediatek,mt8188-mdp3-fg", "mediatek,mt8195-mdp3-fg";
+ reg = <0 0x14f0d000 0 0x1000>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP3_MDP_FG>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f0XXXX 0xd000 0x1000>;
+ };
+
+ display@14f0f000 {
+ compatible = "mediatek,mt8188-mdp3-hdr", "mediatek,mt8195-mdp3-hdr";
+ reg = <0 0x14f0f000 0 0x1000>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP2_MDP_HDR>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f0XXXX 0xf000 0x1000>;
+ };
+
+ display@14f10000 {
+ compatible = "mediatek,mt8188-mdp3-hdr", "mediatek,mt8195-mdp3-hdr";
+ reg = <0 0x14f10000 0 0x1000>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP3_MDP_HDR>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f1XXXX 0 0x1000>;
+ };
+
+ display@14f12000 {
+ compatible = "mediatek,mt8188-mdp3-aal", "mediatek,mt8195-mdp3-aal";
+ reg = <0 0x14f12000 0 0x1000>;
+ interrupts = <GIC_SPI 618 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP2_MDP_AAL>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f1XXXX 0x2000 0x1000>;
+ };
+
+ display@14f13000 {
+ compatible = "mediatek,mt8188-mdp3-aal", "mediatek,mt8195-mdp3-aal";
+ reg = <0 0x14f13000 0 0x1000>;
+ interrupts = <GIC_SPI 619 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP3_MDP_AAL>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f1XXXX 0x3000 0x1000>;
+ };
+
+ display@14f15000 {
+ compatible = "mediatek,mt8188-mdp3-rsz", "mediatek,mt8183-mdp3-rsz";
+ reg = <0 0x14f15000 0 0x1000>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP2_MDP_RSZ>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f1XXXX 0x5000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_SOF>,
+ <CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_FRAME_DONE>;
+ };
+
+ display@14f16000 {
+ compatible = "mediatek,mt8188-mdp3-rsz", "mediatek,mt8183-mdp3-rsz";
+ reg = <0 0x14f16000 0 0x1000>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP3_MDP_RSZ>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f1XXXX 0x6000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_SOF>,
+ <CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_FRAME_DONE>;
+ };
+
+ display@14f18000 {
+ compatible = "mediatek,mt8188-mdp3-tdshp", "mediatek,mt8195-mdp3-tdshp";
+ reg = <0 0x14f18000 0 0x1000>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP2_MDP_TDSHP>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f1XXXX 0x8000 0x1000>;
+ };
+
+ display@14f19000 {
+ compatible = "mediatek,mt8188-mdp3-tdshp", "mediatek,mt8195-mdp3-tdshp";
+ reg = <0 0x14f19000 0 0x1000>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP3_MDP_TDSHP>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f1XXXX 0x9000 0x1000>;
+ };
+
+ display@14f1a000 {
+ compatible = "mediatek,mt8188-mdp3-merge", "mediatek,mt8195-mdp3-merge";
+ reg = <0 0x14f1a000 0 0x1000>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP2_VPP_MERGE>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f1XXXX 0xa000 0x1000>;
+ };
+
+ display@14f1b000 {
+ compatible = "mediatek,mt8188-mdp3-merge", "mediatek,mt8195-mdp3-merge";
+ reg = <0 0x14f1b000 0 0x1000>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP3_VPP_MERGE>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f1XXXX 0xb000 0x1000>;
+ };
+
+ display@14f1d000 {
+ compatible = "mediatek,mt8188-mdp3-color", "mediatek,mt8195-mdp3-color";
+ reg = <0 0x14f1d000 0 0x1000>;
+ interrupts = <GIC_SPI 629 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP2_MDP_COLOR>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f1XXXX 0xd000 0x1000>;
+ };
+
+ display@14f1e000 {
+ compatible = "mediatek,mt8188-mdp3-color", "mediatek,mt8195-mdp3-color";
+ reg = <0 0x14f1e000 0 0x1000>;
+ interrupts = <GIC_SPI 630 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP3_MDP_COLOR>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f1XXXX 0xe000 0x1000>;
+ };
+
+ display@14f21000 {
+ compatible = "mediatek,mt8188-mdp3-padding",
+ "mediatek,mt8195-mdp3-padding";
+ reg = <0 0x14f21000 0 0x1000>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP2_VPP_PAD>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f2XXXX 0x1000 0x1000>;
+ };
+
+ display@14f22000 {
+ compatible = "mediatek,mt8188-mdp3-padding",
+ "mediatek,mt8195-mdp3-padding";
+ reg = <0 0x14f22000 0 0x1000>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP3_VPP_PAD>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f2XXXX 0x2000 0x1000>;
+ };
+
+ display@14f24000 {
+ compatible = "mediatek,mt8188-mdp3-wrot", "mediatek,mt8183-mdp3-wrot";
+ reg = <0 0x14f24000 0 0x1000>;
+ #dma-cells = <1>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP2_MDP_WROT>;
+ iommus = <&vdo_iommu M4U_PORT_L5_SVPP2_MDP_WROT>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f2XXXX 0x4000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SOF>,
+ <CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_FRAME_DONE>;
+ };
+
+ display@14f25000 {
+ compatible = "mediatek,mt8188-mdp3-wrot", "mediatek,mt8183-mdp3-wrot";
+ reg = <0 0x14f25000 0 0x1000>;
+ #dma-cells = <1>;
+ clocks = <&vppsys1 CLK_VPP1_SVPP3_MDP_WROT>;
+ iommus = <&vpp_iommu M4U_PORT_L6_SVPP3_MDP_WROT>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f2XXXX 0x5000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SOF>,
+ <CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_FRAME_DONE>;
+ };
+
wpesys: clock-controller@14e00000 {
compatible = "mediatek,mt8188-wpesys";
reg = <0 0x14e00000 0 0x1000>;
@@ -2284,6 +2592,15 @@
#clock-cells = <1>;
};
+ mutex@14f01000 {
+ compatible = "mediatek,mt8188-vpp-mutex";
+ reg = <0 0x14f01000 0 0x1000>;
+ interrupts = <GIC_SPI 635 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&vppsys1 CLK_VPP1_DISP_MUTEX>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VPPSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f0XXXX 0x1000 0x1000>;
+ };
+
larb5: smi@14f02000 {
compatible = "mediatek,mt8188-smi-larb";
reg = <0 0x14f02000 0 0x1000>;
@@ -2316,36 +2633,42 @@
compatible = "mediatek,mt8188-imgsys1-dip-top";
reg = <0 0x15110000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
imgsys1_dip_nr: clock-controller@15130000 {
compatible = "mediatek,mt8188-imgsys1-dip-nr";
reg = <0 0x15130000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
imgsys_wpe1: clock-controller@15220000 {
compatible = "mediatek,mt8188-imgsys-wpe1";
reg = <0 0x15220000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
ipesys: clock-controller@15330000 {
compatible = "mediatek,mt8188-ipesys";
reg = <0 0x15330000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
imgsys_wpe2: clock-controller@15520000 {
compatible = "mediatek,mt8188-imgsys-wpe2";
reg = <0 0x15520000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
imgsys_wpe3: clock-controller@15620000 {
compatible = "mediatek,mt8188-imgsys-wpe3";
reg = <0 0x15620000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
camsys: clock-controller@16000000 {
@@ -2358,24 +2681,28 @@
compatible = "mediatek,mt8188-camsys-rawa";
reg = <0 0x1604f000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
camsys_yuva: clock-controller@1606f000 {
compatible = "mediatek,mt8188-camsys-yuva";
reg = <0 0x1606f000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
camsys_rawb: clock-controller@1608f000 {
compatible = "mediatek,mt8188-camsys-rawb";
reg = <0 0x1608f000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
camsys_yuvb: clock-controller@160af000 {
compatible = "mediatek,mt8188-camsys-yuvb";
reg = <0 0x160af000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
ccusys: clock-controller@17200000 {
@@ -2391,7 +2718,7 @@
iommus = <&vpp_iommu M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT>;
#address-cells = <2>;
#size-cells = <2>;
- mediatek,scp = <&scp>;
+ mediatek,scp = <&scp_c0>;
video-codec@10000 {
compatible = "mediatek,mtk-vcodec-lat";
@@ -2515,7 +2842,7 @@
<&vdo_iommu M4U_PORT_L19_VENC_SUB_W_LUMA>,
<&vdo_iommu M4U_PORT_L19_VENC_SUB_R_LUMA>;
power-domains = <&spm MT8188_POWER_DOMAIN_VENC>;
- mediatek,scp = <&scp>;
+ mediatek,scp = <&scp_c0>;
};
jpeg_encoder: jpeg-encoder@1a030000 {
@@ -2579,7 +2906,7 @@
reg = <0 0x1c002000 0 0x1000>;
clocks = <&vdosys0 CLK_VDO0_DISP_RDMA0>;
interrupts = <GIC_SPI 638 IRQ_TYPE_LEVEL_HIGH 0>;
- iommus = <&vdo_iommu M4U_PORT_L1_DISP_RDMA0>;
+ iommus = <&vpp_iommu M4U_PORT_L1_DISP_RDMA0>;
power-domains = <&spm MT8188_POWER_DOMAIN_VDOSYS0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x2000 0x1000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index 4f2dc0a75566..dd065b1bf94a 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -617,22 +617,6 @@
#size-cells = <0>;
#power-domain-cells = <1>;
- power-domain@MT8195_POWER_DOMAIN_VDEC1 {
- reg = <MT8195_POWER_DOMAIN_VDEC1>;
- clocks = <&vdecsys CLK_VDEC_LARB1>;
- clock-names = "vdec1-0";
- mediatek,infracfg = <&infracfg_ao>;
- #power-domain-cells = <0>;
- };
-
- power-domain@MT8195_POWER_DOMAIN_VENC_CORE1 {
- reg = <MT8195_POWER_DOMAIN_VENC_CORE1>;
- clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>;
- clock-names = "venc1-larb";
- mediatek,infracfg = <&infracfg_ao>;
- #power-domain-cells = <0>;
- };
-
power-domain@MT8195_POWER_DOMAIN_VDOSYS0 {
reg = <MT8195_POWER_DOMAIN_VDOSYS0>;
clocks = <&topckgen CLK_TOP_CFG_VDO0>,
@@ -678,15 +662,25 @@
clocks = <&vdecsys_soc CLK_VDEC_SOC_LARB1>;
clock-names = "vdec0-0";
mediatek,infracfg = <&infracfg_ao>;
+ #address-cells = <1>;
+ #size-cells = <0>;
#power-domain-cells = <0>;
- };
- power-domain@MT8195_POWER_DOMAIN_VDEC2 {
- reg = <MT8195_POWER_DOMAIN_VDEC2>;
- clocks = <&vdecsys_core1 CLK_VDEC_CORE1_LARB1>;
- clock-names = "vdec2-0";
- mediatek,infracfg = <&infracfg_ao>;
- #power-domain-cells = <0>;
+ power-domain@MT8195_POWER_DOMAIN_VDEC1 {
+ reg = <MT8195_POWER_DOMAIN_VDEC1>;
+ clocks = <&vdecsys CLK_VDEC_LARB1>;
+ clock-names = "vdec1-0";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8195_POWER_DOMAIN_VDEC2 {
+ reg = <MT8195_POWER_DOMAIN_VDEC2>;
+ clocks = <&vdecsys_core1 CLK_VDEC_CORE1_LARB1>;
+ clock-names = "vdec2-0";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
};
power-domain@MT8195_POWER_DOMAIN_VENC {
@@ -694,7 +688,17 @@
clocks = <&vencsys CLK_VENC_LARB>;
clock-names = "venc0-larb";
mediatek,infracfg = <&infracfg_ao>;
+ #address-cells = <1>;
+ #size-cells = <0>;
#power-domain-cells = <0>;
+
+ power-domain@MT8195_POWER_DOMAIN_VENC_CORE1 {
+ reg = <MT8195_POWER_DOMAIN_VENC_CORE1>;
+ clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>;
+ clock-names = "venc1-larb";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
};
power-domain@MT8195_POWER_DOMAIN_VDOSYS1 {
@@ -3378,6 +3382,7 @@
compatible = "mediatek,mt8195-dp-intf";
reg = <0 0x1c015000 0 0x1000>;
interrupts = <GIC_SPI 657 IRQ_TYPE_LEVEL_HIGH 0>;
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
clocks = <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>,
<&vdosys0 CLK_VDO0_DP_INTF0>,
<&apmixedsys CLK_APMIXED_TVDPLL1>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8196-pinfunc.h b/arch/arm64/boot/dts/mediatek/mt8196-pinfunc.h
new file mode 100644
index 000000000000..99535a6d5cba
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8196-pinfunc.h
@@ -0,0 +1,1574 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2025 MediaTek Inc.
+ * Author: Guodong Liu <guodong.liu@mediatek.com>
+ * Lei Xue <lei.xue@mediatek.com>
+ * Cathy Xu <ot_cathy.xu@mediatek.com>
+ */
+
+#ifndef __MT8196_PINFUNC_H
+#define __MT8196_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_DMIC1_CLK (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_SPI3_A_MO (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_FMI2S_B_LRCK (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_SCP_DMIC1_CLK (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_TP_GPIO14_AO (MTK_PIN_NO(0) | 6)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_DMIC1_DAT (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_SRCLKENAI1 (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_SPI3_A_MI (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_FMI2S_B_DI (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_SCP_DMIC1_DAT (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_TP_GPIO15_AO (MTK_PIN_NO(1) | 6)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_PWM_VLP (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_DSI_HSYNC (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_RG_TSFDC_LDO_EN (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_TP_GPIO8_AO (MTK_PIN_NO(2) | 6)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_MD_INT0 (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_DSI1_HSYNC (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_DA_TSFDC_LDO_MODE (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_TP_GPIO9_AO (MTK_PIN_NO(3) | 6)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_DISP_PWM1 (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_MD32_0_GPIO0 (MTK_PIN_NO(4) | 2)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_LCM1_RST (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_SPI7_A_CLK (MTK_PIN_NO(5) | 2)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_DSI1_TE (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_SPI7_A_CSB (MTK_PIN_NO(6) | 2)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_SPI7_A_MO (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_GPS_PPS0 (MTK_PIN_NO(7) | 3)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_SPI7_A_MI (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_EDP_TX_HPD (MTK_PIN_NO(8) | 3)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_I2SIN1_LRCK (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_RG_TSFDC_LDO_REFSEL0 (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_I2SOUT1_DO (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_RG_TSFDC_LDO_REFSEL1 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_FMI2S_B_BCK (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_DBG_MON_A30 (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_I2SIN1_DI_B (MTK_PIN_NO(12) | 3)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_EDP_TX_HPD (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_GPS_PPS1 (MTK_PIN_NO(13) | 2)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_SRCLKENA2 (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_DSI2_TE (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_SPMI_P_TRIG_FLAG (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_MD_INT3 (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_TP_GPIO8_AO (MTK_PIN_NO(14) | 6)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_SRCLKENAI0 (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_SPMI_M_TRIG_FLAG (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_UCTS0 (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_MD_INT4 (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_I2SOUT2_DO (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_TP_GPIO9_AO (MTK_PIN_NO(15) | 6)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_SRCLKENAI1 (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_DP_TX_HPD (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_URTS0 (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_GPS_L5_ELNA_EN (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_KPROW2 (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_TP_GPIO10_AO (MTK_PIN_NO(16) | 6)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_MD_INT0 (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_DP_OC_EN (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_UCTS1 (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_MD_NTN_URXD1 (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_KPCOL2 (MTK_PIN_NO(17) | 5)
+#define PINMUX_GPIO17__FUNC_TP_GPIO11_AO (MTK_PIN_NO(17) | 6)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_DMIC1_CLK (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_DP_RAUX_SBU1 (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_URTS1 (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_MD_NTN_UTXD1 (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_I2SIN2_DI (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_TP_UTXD_GNSS_VLP (MTK_PIN_NO(18) | 6)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_DMIC1_DAT (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_DP_RAUX_SBU2 (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_CLKM3_A (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_I2SIN2_BCK (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_TP_URXD_GNSS_VLP (MTK_PIN_NO(19) | 6)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_IDDIG (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_LCM2_RST (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_GPS_PPS1 (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_CLKM2_A (MTK_PIN_NO(20) | 4)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_BPI_BUS11 (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_PCIE_PERSTN_1P (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_DSI1_TE (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_DMIC_CLK (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_SCP_DMIC_CLK (MTK_PIN_NO(21) | 5)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_BPI_BUS12 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_PCIE_CLKREQN_1P (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_DSI2_TE (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_DMIC_DAT (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_SCP_DMIC_DAT (MTK_PIN_NO(22) | 5)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_BPI_BUS13 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_PCIE_WAKEN_1P (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_DSI3_TE (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_DMIC1_CLK (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_SCP_DMIC1_CLK (MTK_PIN_NO(23) | 5)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_BPI_BUS14 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_LCM1_RST (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_AGPS_SYNC (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_DMIC1_DAT (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_SCP_DMIC1_DAT (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_DISP_PWM1 (MTK_PIN_NO(24) | 6)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_BPI_BUS15 (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_LCM2_RST (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SRCLKENAI1 (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_DMIC2_CLK (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_DISP_PWM2 (MTK_PIN_NO(25) | 6)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_BPI_BUS16 (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_LCM3_RST (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_DMIC2_DAT (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_DISP_PWM3 (MTK_PIN_NO(26) | 6)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_BPI_BUS17 (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_UTXD4 (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_DISP_PWM4 (MTK_PIN_NO(27) | 6)
+#define PINMUX_GPIO27__FUNC_DBG_MON_A20 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_BPI_BUS18 (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_URXD4 (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_SPI2_A_MI (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_CLKM0_A (MTK_PIN_NO(28) | 4)
+#define PINMUX_GPIO28__FUNC_DBG_MON_A21 (MTK_PIN_NO(28) | 7)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_BPI_BUS19 (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_MD_NTN_UTXD1 (MTK_PIN_NO(29) | 2)
+#define PINMUX_GPIO29__FUNC_SPI2_A_MO (MTK_PIN_NO(29) | 3)
+#define PINMUX_GPIO29__FUNC_CLKM1_A (MTK_PIN_NO(29) | 4)
+#define PINMUX_GPIO29__FUNC_UCTS4 (MTK_PIN_NO(29) | 6)
+#define PINMUX_GPIO29__FUNC_DBG_MON_A17 (MTK_PIN_NO(29) | 7)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_BPI_BUS20 (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_MD_NTN_URXD1 (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_SPI2_A_CLK (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_CLKM2_A (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_DSI3_HSYNC (MTK_PIN_NO(30) | 5)
+#define PINMUX_GPIO30__FUNC_URTS4 (MTK_PIN_NO(30) | 6)
+#define PINMUX_GPIO30__FUNC_DBG_MON_A18 (MTK_PIN_NO(30) | 7)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_BPI_BUS21 (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_SPI2_A_CSB (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_CLKM3_A (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_EDP_TX_HPD (MTK_PIN_NO(31) | 6)
+#define PINMUX_GPIO31__FUNC_DBG_MON_A19 (MTK_PIN_NO(31) | 7)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_LCM4_RST (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_DP_TX_HPD (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_SSPM_JTAG_TCK_VLP (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_ADSP_JTAG0_TCK (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_SCP_JTAG0_TCK_VLP (MTK_PIN_NO(32) | 5)
+#define PINMUX_GPIO32__FUNC_SPU0_TCK (MTK_PIN_NO(32) | 6)
+#define PINMUX_GPIO32__FUNC_IO_JTAG_TCK (MTK_PIN_NO(32) | 7)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_DSI4_TE (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_DP_OC_EN (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_SSPM_JTAG_TRSTN_VLP (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_ADSP_JTAG0_TRSTN (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_SCP_JTAG0_TRSTN_VLP (MTK_PIN_NO(33) | 5)
+#define PINMUX_GPIO33__FUNC_SPU0_NTRST (MTK_PIN_NO(33) | 6)
+#define PINMUX_GPIO33__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(33) | 7)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_UCTS5 (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_DP_RAUX_SBU1 (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_SSPM_JTAG_TDI_VLP (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_ADSP_JTAG0_TDI (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_SCP_JTAG0_TDI_VLP (MTK_PIN_NO(34) | 5)
+#define PINMUX_GPIO34__FUNC_SPU0_TDI (MTK_PIN_NO(34) | 6)
+#define PINMUX_GPIO34__FUNC_IO_JTAG_TDI (MTK_PIN_NO(34) | 7)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_URTS5 (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_DP_RAUX_SBU2 (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_SSPM_JTAG_TDO_VLP (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_ADSP_JTAG0_TDO (MTK_PIN_NO(35) | 4)
+#define PINMUX_GPIO35__FUNC_SCP_JTAG0_TDO_VLP (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_SPU0_TDO (MTK_PIN_NO(35) | 6)
+#define PINMUX_GPIO35__FUNC_IO_JTAG_TDO (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_UTXD5 (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_SSPM_JTAG_TMS_VLP (MTK_PIN_NO(36) | 3)
+#define PINMUX_GPIO36__FUNC_ADSP_JTAG0_TMS (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_SCP_JTAG0_TMS_VLP (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_SPU0_TMS (MTK_PIN_NO(36) | 6)
+#define PINMUX_GPIO36__FUNC_IO_JTAG_TMS (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_URXD5 (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_MD_INT3 (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_CLKM0_B (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_TP_GPIO5_AO (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_SPU0_UTX (MTK_PIN_NO(37) | 6)
+#define PINMUX_GPIO37__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_SPMI_P_TRIG_FLAG (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_MD_INT4 (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_CLKM1_B (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_TP_GPIO6_AO (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_SPU0_URX (MTK_PIN_NO(38) | 6)
+#define PINMUX_GPIO38__FUNC_DAP_MD32_SWD (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_I2S_MCK0 (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_GPS_PPS0 (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_DBG_MON_B12 (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_I2SIN6_0_BCK (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_SPI4_B_CLK (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_UCTS2 (MTK_PIN_NO(40) | 4)
+#define PINMUX_GPIO40__FUNC_CCU1_UTXD (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_DBG_MON_B13 (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_I2SIN6_0_LRCK (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_SPI4_B_CSB (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_URTS2 (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_CCU1_URXD (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_DBG_MON_B14 (MTK_PIN_NO(41) | 7)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_I2SIN6_0_DI (MTK_PIN_NO(42) | 1)
+#define PINMUX_GPIO42__FUNC_SPI4_B_MI (MTK_PIN_NO(42) | 3)
+#define PINMUX_GPIO42__FUNC_URXD2 (MTK_PIN_NO(42) | 4)
+#define PINMUX_GPIO42__FUNC_CCU1_URTS (MTK_PIN_NO(42) | 5)
+#define PINMUX_GPIO42__FUNC_MD32_0_RXD (MTK_PIN_NO(42) | 6)
+#define PINMUX_GPIO42__FUNC_DBG_MON_B15 (MTK_PIN_NO(42) | 7)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_I2SOUT6_0_DO (MTK_PIN_NO(43) | 1)
+#define PINMUX_GPIO43__FUNC_SPI4_B_MO (MTK_PIN_NO(43) | 3)
+#define PINMUX_GPIO43__FUNC_UTXD2 (MTK_PIN_NO(43) | 4)
+#define PINMUX_GPIO43__FUNC_CCU1_UCTS (MTK_PIN_NO(43) | 5)
+#define PINMUX_GPIO43__FUNC_MD32_0_TXD (MTK_PIN_NO(43) | 6)
+#define PINMUX_GPIO43__FUNC_DBG_MON_B16 (MTK_PIN_NO(43) | 7)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(44) | 1)
+#define PINMUX_GPIO44__FUNC_SPI3_A_CLK (MTK_PIN_NO(44) | 3)
+#define PINMUX_GPIO44__FUNC_TP_GPIO10_AO (MTK_PIN_NO(44) | 6)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_DSI2_HSYNC (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_SPI3_A_CSB (MTK_PIN_NO(45) | 3)
+#define PINMUX_GPIO45__FUNC_PWM_VLP (MTK_PIN_NO(45) | 4)
+#define PINMUX_GPIO45__FUNC_TP_GPIO11_AO (MTK_PIN_NO(45) | 6)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_SCP_SCL4 (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_PWM_VLP (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_SCP_ILDO_DTEST1_VLP (MTK_PIN_NO(46) | 4)
+#define PINMUX_GPIO46__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(46) | 5)
+#define PINMUX_GPIO46__FUNC_TP_GPIO0_AO (MTK_PIN_NO(46) | 6)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_SCP_SDA4 (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_SCP_ILDO_DTEST2_VLP (MTK_PIN_NO(47) | 4)
+#define PINMUX_GPIO47__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(47) | 5)
+#define PINMUX_GPIO47__FUNC_TP_GPIO1_AO (MTK_PIN_NO(47) | 6)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_SCP_SCL5 (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_PWM_VLP (MTK_PIN_NO(48) | 2)
+#define PINMUX_GPIO48__FUNC_CCU0_UTXD (MTK_PIN_NO(48) | 3)
+#define PINMUX_GPIO48__FUNC_SCP_ILDO_DTEST3_VLP (MTK_PIN_NO(48) | 4)
+#define PINMUX_GPIO48__FUNC_TP_GPIO2_AO (MTK_PIN_NO(48) | 6)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_SCP_SDA5 (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_CCU0_URXD (MTK_PIN_NO(49) | 3)
+#define PINMUX_GPIO49__FUNC_SCP_ILDO_DTEST4_VLP (MTK_PIN_NO(49) | 4)
+#define PINMUX_GPIO49__FUNC_TP_GPIO3_AO (MTK_PIN_NO(49) | 6)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_SCP_SCL6 (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_PWM_VLP (MTK_PIN_NO(50) | 2)
+#define PINMUX_GPIO50__FUNC_CCU0_URTS (MTK_PIN_NO(50) | 3)
+#define PINMUX_GPIO50__FUNC_DSI_HSYNC (MTK_PIN_NO(50) | 4)
+#define PINMUX_GPIO50__FUNC_TP_GPIO4_AO (MTK_PIN_NO(50) | 6)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_SCP_SDA6 (MTK_PIN_NO(51) | 1)
+#define PINMUX_GPIO51__FUNC_CCU0_UCTS (MTK_PIN_NO(51) | 3)
+#define PINMUX_GPIO51__FUNC_DSI1_HSYNC (MTK_PIN_NO(51) | 4)
+#define PINMUX_GPIO51__FUNC_TP_GPIO5_AO (MTK_PIN_NO(51) | 6)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_SCP_SCL1 (MTK_PIN_NO(52) | 1)
+#define PINMUX_GPIO52__FUNC_TDM_DATA2 (MTK_PIN_NO(52) | 3)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_SCP_SDA1 (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_TDM_DATA3 (MTK_PIN_NO(53) | 3)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_TDM_MCK (MTK_PIN_NO(54) | 3)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_AUD_CLK_MISO (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_I2SOUT2_BCK (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_TDM_BCK (MTK_PIN_NO(55) | 3)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_I2SOUT2_LRCK (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_TDM_LRCK (MTK_PIN_NO(56) | 3)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_I2SOUT2_DO (MTK_PIN_NO(57) | 2)
+#define PINMUX_GPIO57__FUNC_TDM_DATA0 (MTK_PIN_NO(57) | 3)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_TDM_DATA1 (MTK_PIN_NO(58) | 3)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_I2SIN1_BCK (MTK_PIN_NO(59) | 3)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_KPCOL0 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_TP_GPIO13_AO (MTK_PIN_NO(60) | 6)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_MCU_M_PMIC_POC_I (MTK_PIN_NO(61) | 1)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_MCU_B_PMIC_POC_I (MTK_PIN_NO(62) | 1)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_MFG_PMIC_POC_I (MTK_PIN_NO(63) | 1)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_PRE_UVLO (MTK_PIN_NO(64) | 1)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_DPM2PMIC (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_SRCLKENA1 (MTK_PIN_NO(65) | 2)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_WATCHDOG (MTK_PIN_NO(66) | 1)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_SRCLKENA0 (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_RTC32K_CK (MTK_PIN_NO(69) | 1)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_CMFLASH0 (MTK_PIN_NO(70) | 1)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_DCXO_FPM_LPM (MTK_PIN_NO(74) | 1)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_SPMI_M_SCL (MTK_PIN_NO(75) | 1)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_SPMI_M_SDA (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_SPMI_P_SCL (MTK_PIN_NO(77) | 1)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_SPMI_P_SDA (MTK_PIN_NO(78) | 1)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_CMMCLK0 (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_MD_INT4 (MTK_PIN_NO(79) | 2)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_CMMCLK1 (MTK_PIN_NO(80) | 1)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_SCP_SPI0_CK (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_SPI6_B_CLK (MTK_PIN_NO(81) | 2)
+#define PINMUX_GPIO81__FUNC_PWM_VLP (MTK_PIN_NO(81) | 3)
+#define PINMUX_GPIO81__FUNC_I2SOUT5_BCK (MTK_PIN_NO(81) | 4)
+#define PINMUX_GPIO81__FUNC_TP_GPIO0_AO (MTK_PIN_NO(81) | 6)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_SCP_SPI0_CS (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_SPI6_B_CSB (MTK_PIN_NO(82) | 2)
+#define PINMUX_GPIO82__FUNC_I2SOUT5_LRCK (MTK_PIN_NO(82) | 4)
+#define PINMUX_GPIO82__FUNC_TP_GPIO1_AO (MTK_PIN_NO(82) | 6)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_SCP_SPI0_MO (MTK_PIN_NO(83) | 1)
+#define PINMUX_GPIO83__FUNC_SPI6_B_MO (MTK_PIN_NO(83) | 2)
+#define PINMUX_GPIO83__FUNC_I2SOUT5_DATA0 (MTK_PIN_NO(83) | 4)
+#define PINMUX_GPIO83__FUNC_TP_GPIO2_AO (MTK_PIN_NO(83) | 6)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_SCP_SPI0_MI (MTK_PIN_NO(84) | 1)
+#define PINMUX_GPIO84__FUNC_SPI6_B_MI (MTK_PIN_NO(84) | 2)
+#define PINMUX_GPIO84__FUNC_I2SOUT5_DATA1 (MTK_PIN_NO(84) | 4)
+#define PINMUX_GPIO84__FUNC_TP_GPIO3_AO (MTK_PIN_NO(84) | 6)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_SCP_SPI1_CK (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_SPI7_B_CLK (MTK_PIN_NO(85) | 2)
+#define PINMUX_GPIO85__FUNC_I2SIN5_DATA0 (MTK_PIN_NO(85) | 4)
+#define PINMUX_GPIO85__FUNC_PWM_VLP (MTK_PIN_NO(85) | 5)
+#define PINMUX_GPIO85__FUNC_TP_GPIO4_AO (MTK_PIN_NO(85) | 6)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_SCP_SPI1_CS (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_SPI7_B_CSB (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_I2SIN5_DATA1 (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_TP_GPIO5_AO (MTK_PIN_NO(86) | 6)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_SCP_SPI1_MO (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_SPI7_B_MO (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_I2SIN5_BCK (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_TP_GPIO6_AO (MTK_PIN_NO(87) | 6)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_SCP_SPI1_MI (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_SPI7_B_MI (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_I2SIN5_LRCK (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_TP_GPIO7_AO (MTK_PIN_NO(88) | 6)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_DSI_TE (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_DSI1_TE (MTK_PIN_NO(89) | 2)
+#define PINMUX_GPIO89__FUNC_DBG_MON_B30 (MTK_PIN_NO(89) | 7)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_LCM_RST (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_LCM1_RST (MTK_PIN_NO(90) | 2)
+#define PINMUX_GPIO90__FUNC_DBG_MON_B31 (MTK_PIN_NO(90) | 7)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_CMFLASH2 (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_SF_D0 (MTK_PIN_NO(91) | 2)
+#define PINMUX_GPIO91__FUNC_SRCLKENAI1 (MTK_PIN_NO(91) | 3)
+#define PINMUX_GPIO91__FUNC_KPCOL2 (MTK_PIN_NO(91) | 5)
+#define PINMUX_GPIO91__FUNC_TP_GPIO11_AO (MTK_PIN_NO(91) | 6)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_CMFLASH3 (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_SF_D1 (MTK_PIN_NO(92) | 2)
+#define PINMUX_GPIO92__FUNC_DISP_PWM1 (MTK_PIN_NO(92) | 4)
+#define PINMUX_GPIO92__FUNC_TP_GPIO12_AO (MTK_PIN_NO(92) | 6)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_CMFLASH1 (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_SF_D2 (MTK_PIN_NO(93) | 2)
+#define PINMUX_GPIO93__FUNC_SRCLKENAI0 (MTK_PIN_NO(93) | 3)
+#define PINMUX_GPIO93__FUNC_KPROW2 (MTK_PIN_NO(93) | 5)
+#define PINMUX_GPIO93__FUNC_TP_GPIO13_AO (MTK_PIN_NO(93) | 6)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_I2S_MCK1 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_SF_D3 (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_MD32_0_GPIO0 (MTK_PIN_NO(94) | 4)
+#define PINMUX_GPIO94__FUNC_CLKM0_A (MTK_PIN_NO(94) | 5)
+#define PINMUX_GPIO94__FUNC_TP_GPIO14_AO (MTK_PIN_NO(94) | 6)
+#define PINMUX_GPIO94__FUNC_DBG_MON_B18 (MTK_PIN_NO(94) | 7)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_I2SIN1_BCK (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_I2SIN4_BCK (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_SPI6_A_CLK (MTK_PIN_NO(95) | 3)
+#define PINMUX_GPIO95__FUNC_MD32_1_GPIO0 (MTK_PIN_NO(95) | 4)
+#define PINMUX_GPIO95__FUNC_CLKM1_A (MTK_PIN_NO(95) | 5)
+#define PINMUX_GPIO95__FUNC_TP_GPIO15_AO (MTK_PIN_NO(95) | 6)
+#define PINMUX_GPIO95__FUNC_DBG_MON_B19 (MTK_PIN_NO(95) | 7)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_I2SIN1_LRCK (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_I2SIN4_LRCK (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_SPI6_A_CSB (MTK_PIN_NO(96) | 3)
+#define PINMUX_GPIO96__FUNC_MD32_2_GPIO0 (MTK_PIN_NO(96) | 4)
+#define PINMUX_GPIO96__FUNC_CLKM2_A (MTK_PIN_NO(96) | 5)
+#define PINMUX_GPIO96__FUNC_DBG_MON_B20 (MTK_PIN_NO(96) | 7)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_I2SIN1_DI_A (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_I2SIN4_DATA0 (MTK_PIN_NO(97) | 2)
+#define PINMUX_GPIO97__FUNC_SPI6_A_MO (MTK_PIN_NO(97) | 3)
+#define PINMUX_GPIO97__FUNC_MD32_3_GPIO0 (MTK_PIN_NO(97) | 4)
+#define PINMUX_GPIO97__FUNC_CLKM3_A (MTK_PIN_NO(97) | 5)
+#define PINMUX_GPIO97__FUNC_DBG_MON_B21 (MTK_PIN_NO(97) | 7)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_I2SOUT1_DO (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_I2SOUT4_DATA0 (MTK_PIN_NO(98) | 2)
+#define PINMUX_GPIO98__FUNC_SPI6_A_MI (MTK_PIN_NO(98) | 3)
+#define PINMUX_GPIO98__FUNC_DBG_MON_B22 (MTK_PIN_NO(98) | 7)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_SCL0 (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_LCM2_RST (MTK_PIN_NO(99) | 2)
+#define PINMUX_GPIO99__FUNC_AUD_DAC_26M_CLK (MTK_PIN_NO(99) | 3)
+#define PINMUX_GPIO99__FUNC_SPU0_SCL (MTK_PIN_NO(99) | 4)
+#define PINMUX_GPIO99__FUNC_DBG_MON_B24 (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_SDA0 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_DSI2_TE (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_SPU0_SDA (MTK_PIN_NO(100) | 4)
+#define PINMUX_GPIO100__FUNC_DBG_MON_B25 (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_SCL10 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_SF_CS (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_SCP_DMIC1_CLK (MTK_PIN_NO(101) | 3)
+#define PINMUX_GPIO101__FUNC_I2SIN5_DATA2 (MTK_PIN_NO(101) | 4)
+#define PINMUX_GPIO101__FUNC_SCP_SCL_OIS (MTK_PIN_NO(101) | 5)
+#define PINMUX_GPIO101__FUNC_TP_GPIO10_AO (MTK_PIN_NO(101) | 6)
+#define PINMUX_GPIO101__FUNC_DBG_MON_B28 (MTK_PIN_NO(101) | 7)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_SDA10 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_SF_CK (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_SCP_DMIC1_DAT (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_I2SIN5_DATA3 (MTK_PIN_NO(102) | 4)
+#define PINMUX_GPIO102__FUNC_SCP_SDA_OIS (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_TP_GPIO11_AO (MTK_PIN_NO(102) | 6)
+#define PINMUX_GPIO102__FUNC_DBG_MON_B29 (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_DISP_PWM (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_DSI1_TE (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_I2S_MCK0 (MTK_PIN_NO(103) | 5)
+#define PINMUX_GPIO103__FUNC_DBG_MON_B23 (MTK_PIN_NO(103) | 7)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_SCL6 (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_SPU1_SCL (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_AUD_DAC_26M_CLK (MTK_PIN_NO(104) | 3)
+#define PINMUX_GPIO104__FUNC_USB_DRVVBUS_2P (MTK_PIN_NO(104) | 4)
+#define PINMUX_GPIO104__FUNC_I2S_MCK1 (MTK_PIN_NO(104) | 5)
+#define PINMUX_GPIO104__FUNC_IDDIG_2P (MTK_PIN_NO(104) | 6)
+#define PINMUX_GPIO104__FUNC_DBG_MON_B26 (MTK_PIN_NO(104) | 7)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_SDA6 (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_SPU1_SDA (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_DISP_PWM2 (MTK_PIN_NO(105) | 3)
+#define PINMUX_GPIO105__FUNC_VBUSVALID_2P (MTK_PIN_NO(105) | 4)
+#define PINMUX_GPIO105__FUNC_I2S_MCK2 (MTK_PIN_NO(105) | 5)
+#define PINMUX_GPIO105__FUNC_VBUSVALID_3P (MTK_PIN_NO(105) | 6)
+#define PINMUX_GPIO105__FUNC_DBG_MON_B27 (MTK_PIN_NO(105) | 7)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_SCP_SPI3_CK (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_SPI3_B_CLK (MTK_PIN_NO(106) | 2)
+#define PINMUX_GPIO106__FUNC_MD_UTXD0 (MTK_PIN_NO(106) | 3)
+#define PINMUX_GPIO106__FUNC_TP_UTXD1_VLP (MTK_PIN_NO(106) | 4)
+#define PINMUX_GPIO106__FUNC_CONN_BG_GPS_MCU_UART0_TXD (MTK_PIN_NO(106) | 5)
+#define PINMUX_GPIO106__FUNC_TP_GPIO6_AO (MTK_PIN_NO(106) | 6)
+#define PINMUX_GPIO106__FUNC_DBG_MON_B0 (MTK_PIN_NO(106) | 7)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_SCP_SPI3_CS (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_SPI3_B_CSB (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_MD_URXD0 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_TP_URXD1_VLP (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_CONN_BG_GPS_MCU_UART0_RXD (MTK_PIN_NO(107) | 5)
+#define PINMUX_GPIO107__FUNC_TP_GPIO7_AO (MTK_PIN_NO(107) | 6)
+#define PINMUX_GPIO107__FUNC_DBG_MON_B1 (MTK_PIN_NO(107) | 7)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_SCP_SPI3_MO (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_SPI3_B_MO (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_MD_UTXD1 (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_MD32PCM_UTXD_AO_VLP (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_CONN_BG_GPS_MCU_UART1_TXD (MTK_PIN_NO(108) | 5)
+#define PINMUX_GPIO108__FUNC_TP_GPIO8_AO (MTK_PIN_NO(108) | 6)
+#define PINMUX_GPIO108__FUNC_DBG_MON_B2 (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_SCP_SPI3_MI (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_SPI3_B_MI (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_MD_URXD1 (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_MD32PCM_URXD_AO_VLP (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_CONN_BG_GPS_MCU_UART1_RXD (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_TP_GPIO9_AO (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_DBG_MON_B3 (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_SPI1_CLK (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_PWM_0 (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_MD_UCTS0 (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_TP_UCTS1_VLP (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_SPU0_GPIO_O (MTK_PIN_NO(110) | 6)
+#define PINMUX_GPIO110__FUNC_DBG_MON_B4 (MTK_PIN_NO(110) | 7)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_SPI1_CSB (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_PWM_1 (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_MD_URTS0 (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_TP_URTS1_VLP (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_SPU0_GPIO_I (MTK_PIN_NO(111) | 6)
+#define PINMUX_GPIO111__FUNC_DBG_MON_B5 (MTK_PIN_NO(111) | 7)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_SPI1_MO (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_PWM_2 (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_MD_UCTS1 (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_SPU1_GPIO_O (MTK_PIN_NO(112) | 6)
+#define PINMUX_GPIO112__FUNC_DBG_MON_B6 (MTK_PIN_NO(112) | 7)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_SPI1_MI (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_PWM_3 (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_MD_URTS1 (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_SPU1_GPIO_I (MTK_PIN_NO(113) | 6)
+#define PINMUX_GPIO113__FUNC_DBG_MON_B7 (MTK_PIN_NO(113) | 7)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_SPI0_SPU_CLK (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_SPI4_A_CLK (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_CONN_BG_GPS_MCU_DBG_UART_TXD (MTK_PIN_NO(114) | 5)
+#define PINMUX_GPIO114__FUNC_DBG_MON_B8 (MTK_PIN_NO(114) | 7)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_SPI0_SPU_CSB (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_SPI4_A_CSB (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_DBG_MON_B9 (MTK_PIN_NO(115) | 7)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_SPI0_SPU_MO (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_SPI4_A_MO (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_LCM1_RST (MTK_PIN_NO(116) | 3)
+#define PINMUX_GPIO116__FUNC_DBG_MON_B10 (MTK_PIN_NO(116) | 7)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_SPI0_SPU_MI (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_SPI4_A_MI (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_DSI1_TE (MTK_PIN_NO(117) | 3)
+#define PINMUX_GPIO117__FUNC_DBG_MON_B11 (MTK_PIN_NO(117) | 7)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_SPI5_CLK (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_USB_DRVVBUS (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_DP_TX_HPD (MTK_PIN_NO(118) | 3)
+#define PINMUX_GPIO118__FUNC_AD_ILDO_DTEST0 (MTK_PIN_NO(118) | 4)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_SPI5_CSB (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_VBUSVALID (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_DP_OC_EN (MTK_PIN_NO(119) | 3)
+#define PINMUX_GPIO119__FUNC_AD_ILDO_DTEST1 (MTK_PIN_NO(119) | 4)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_SPI5_MO (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_LCM2_RST (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_DP_RAUX_SBU1 (MTK_PIN_NO(120) | 3)
+#define PINMUX_GPIO120__FUNC_AD_ILDO_DTEST2 (MTK_PIN_NO(120) | 4)
+#define PINMUX_GPIO120__FUNC_IDDIG_3P (MTK_PIN_NO(120) | 6)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_SPI5_MI (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_DSI2_TE (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_DP_RAUX_SBU2 (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_AD_ILDO_DTEST3 (MTK_PIN_NO(121) | 4)
+#define PINMUX_GPIO121__FUNC_USB_DRVVBUS_3P (MTK_PIN_NO(121) | 6)
+#define PINMUX_GPIO121__FUNC_DBG_MON_B17 (MTK_PIN_NO(121) | 7)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_AP_GOOD (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(122) | 2)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_SCL3 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_I2SIN2_LRCK (MTK_PIN_NO(123) | 5)
+#define PINMUX_GPIO123__FUNC_TP_UTXD_MD_VCORE (MTK_PIN_NO(123) | 6)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_SDA3 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_TP_URXD_MD_VCORE (MTK_PIN_NO(124) | 6)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_MSDC1_CLK (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(125) | 2)
+#define PINMUX_GPIO125__FUNC_HFRP_JTAG0_TCK (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_UDI_TCK (MTK_PIN_NO(125) | 4)
+#define PINMUX_GPIO125__FUNC_CONN_BGF_DSP_L1_JCK (MTK_PIN_NO(125) | 5)
+#define PINMUX_GPIO125__FUNC_SCP_JTAG_LITTLE_TCK_VLP (MTK_PIN_NO(125) | 6)
+#define PINMUX_GPIO125__FUNC_JTCK2_SEL1 (MTK_PIN_NO(125) | 7)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_MSDC1_CMD (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_HFRP_JTAG0_TMS (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_UDI_TMS (MTK_PIN_NO(126) | 4)
+#define PINMUX_GPIO126__FUNC_CONN_BGF_DSP_L1_JMS (MTK_PIN_NO(126) | 5)
+#define PINMUX_GPIO126__FUNC_SCP_JTAG_LITTLE_TMS_VLP (MTK_PIN_NO(126) | 6)
+#define PINMUX_GPIO126__FUNC_JTMS2_SEL1 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_MSDC1_DAT0 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_HFRP_JTAG0_TDI (MTK_PIN_NO(127) | 3)
+#define PINMUX_GPIO127__FUNC_UDI_TDI_0 (MTK_PIN_NO(127) | 4)
+#define PINMUX_GPIO127__FUNC_CONN_BGF_DSP_L1_JDI (MTK_PIN_NO(127) | 5)
+#define PINMUX_GPIO127__FUNC_SCP_JTAG_LITTLE_TDI_VLP (MTK_PIN_NO(127) | 6)
+#define PINMUX_GPIO127__FUNC_JTDI2_SEL1 (MTK_PIN_NO(127) | 7)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_MSDC1_DAT1 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_HFRP_JTAG0_TDO (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_UDI_TDO_0 (MTK_PIN_NO(128) | 4)
+#define PINMUX_GPIO128__FUNC_CONN_BGF_DSP_L1_JDO (MTK_PIN_NO(128) | 5)
+#define PINMUX_GPIO128__FUNC_SCP_JTAG_LITTLE_TDO_VLP (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_JTDO2_SEL1 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_MSDC1_DAT2 (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_DSI2_HSYNC (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_HFRP_JTAG0_TRSTN (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_UDI_NTRST (MTK_PIN_NO(129) | 4)
+#define PINMUX_GPIO129__FUNC_SCP_JTAG_LITTLE_TRSTN_VLP (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_JTRSTN2_SEL1 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_MSDC1_DAT3 (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_DSI3_HSYNC (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_CONN_BGF_DSP_L1_JINTP (MTK_PIN_NO(130) | 5)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_MCUPM_JTAG_TDI (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_CLKM0_A (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_CONN_BGF_DSP_L5_JDI (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_TSFDC_SCK (MTK_PIN_NO(131) | 6)
+#define PINMUX_GPIO131__FUNC_SCP_JTAG0_TDI_VCORE (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_MCUPM_JTAG_TMS (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_CLKM1_B (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_CONN_BGF_DSP_L5_JMS (MTK_PIN_NO(132) | 5)
+#define PINMUX_GPIO132__FUNC_TSFDC_SDI (MTK_PIN_NO(132) | 6)
+#define PINMUX_GPIO132__FUNC_SCP_JTAG0_TMS_VCORE (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_MCUPM_JTAG_TDO (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_CONN_BGF_DSP_L5_JDO (MTK_PIN_NO(133) | 5)
+#define PINMUX_GPIO133__FUNC_TSFDC_SCF (MTK_PIN_NO(133) | 6)
+#define PINMUX_GPIO133__FUNC_SCP_JTAG0_TDO_VCORE (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_TSFDC_26M (MTK_PIN_NO(134) | 6)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_MCUPM_JTAG_TCK (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_CONN_BGF_DSP_L5_JCK (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_TSFDC_SDO (MTK_PIN_NO(135) | 6)
+#define PINMUX_GPIO135__FUNC_SCP_JTAG0_TCK_VCORE (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_MCUPM_JTAG_TRSTN (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_CONN_BGF_DSP_L5_JINTP (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_TSFDC_FOUT (MTK_PIN_NO(136) | 6)
+#define PINMUX_GPIO136__FUNC_SCP_JTAG0_TRSTN_VCORE (MTK_PIN_NO(136) | 7)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_MIPI0_D_SCLK (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_BPI_BUS16 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_SPM_JTAG_TRSTN_VCORE (MTK_PIN_NO(137) | 6)
+#define PINMUX_GPIO137__FUNC_DBG_MON_A0 (MTK_PIN_NO(137) | 7)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_MIPI0_D_SDATA (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_BPI_BUS17 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_PCM0_LRCK (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_SPM_JTAG_TCK_VCORE (MTK_PIN_NO(138) | 6)
+#define PINMUX_GPIO138__FUNC_DBG_MON_A1 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_MIPI1_D_SCLK (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_BPI_BUS18 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_MD_GPS_BLANK (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_SPM_JTAG_TMS_VCORE (MTK_PIN_NO(139) | 6)
+#define PINMUX_GPIO139__FUNC_DBG_MON_A2 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_MIPI1_D_SDATA (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_BPI_BUS19 (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_MD_URXD1_CONN (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_SPM_JTAG_TDO_VCORE (MTK_PIN_NO(140) | 6)
+#define PINMUX_GPIO140__FUNC_DBG_MON_A3 (MTK_PIN_NO(140) | 7)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_MIPI2_D_SCLK (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_BPI_BUS20 (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_MD_UTXD1_CONN (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_SPM_JTAG_TDI_VCORE (MTK_PIN_NO(141) | 6)
+#define PINMUX_GPIO141__FUNC_DBG_MON_A4 (MTK_PIN_NO(141) | 7)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_MIPI2_D_SDATA (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_BPI_BUS21 (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_SSPM_JTAG_TRSTN_VCORE (MTK_PIN_NO(142) | 6)
+#define PINMUX_GPIO142__FUNC_DBG_MON_A5 (MTK_PIN_NO(142) | 7)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_MIPI3_D_SCLK (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_BPI_BUS22 (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_TP_UTXD_GNSS_VLP (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_MD_UTXD1_CONN (MTK_PIN_NO(143) | 5)
+#define PINMUX_GPIO143__FUNC_SSPM_JTAG_TCK_VCORE (MTK_PIN_NO(143) | 6)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_MIPI3_D_SDATA (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_BPI_BUS23 (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_TP_URXD_GNSS_VLP (MTK_PIN_NO(144) | 4)
+#define PINMUX_GPIO144__FUNC_MD_URXD1_CONN (MTK_PIN_NO(144) | 5)
+#define PINMUX_GPIO144__FUNC_SSPM_JTAG_TMS_VCORE (MTK_PIN_NO(144) | 6)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_BPI_BUS0 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_PCIE_WAKEN_1P (MTK_PIN_NO(145) | 4)
+#define PINMUX_GPIO145__FUNC_SSPM_JTAG_TDO_VCORE (MTK_PIN_NO(145) | 6)
+#define PINMUX_GPIO145__FUNC_DBG_MON_A10 (MTK_PIN_NO(145) | 7)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_BPI_BUS1 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_PCIE_PERSTN_1P (MTK_PIN_NO(146) | 4)
+#define PINMUX_GPIO146__FUNC_SSPM_JTAG_TDI_VCORE (MTK_PIN_NO(146) | 6)
+#define PINMUX_GPIO146__FUNC_DBG_MON_A11 (MTK_PIN_NO(146) | 7)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_BPI_BUS2 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_AUD_DAC_26M_CLK (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_PCIE_CLKREQN_1P (MTK_PIN_NO(147) | 4)
+#define PINMUX_GPIO147__FUNC_SCP_JTAG_LITTLE_TRSTN_VCORE (MTK_PIN_NO(147) | 6)
+#define PINMUX_GPIO147__FUNC_DBG_MON_A12 (MTK_PIN_NO(147) | 7)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_BPI_BUS3 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_AUD_DAC_26M_CLK (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_TP_UTXD_MD_VLP (MTK_PIN_NO(148) | 4)
+#define PINMUX_GPIO148__FUNC_TP_GPIO0_AO (MTK_PIN_NO(148) | 5)
+#define PINMUX_GPIO148__FUNC_SCP_JTAG_LITTLE_TCK_VCORE (MTK_PIN_NO(148) | 6)
+#define PINMUX_GPIO148__FUNC_DBG_MON_A13 (MTK_PIN_NO(148) | 7)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_BPI_BUS4 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_TP_URXD_MD_VLP (MTK_PIN_NO(149) | 4)
+#define PINMUX_GPIO149__FUNC_TP_GPIO1_AO (MTK_PIN_NO(149) | 5)
+#define PINMUX_GPIO149__FUNC_SCP_JTAG_LITTLE_TMS_VCORE (MTK_PIN_NO(149) | 6)
+#define PINMUX_GPIO149__FUNC_DBG_MON_A14 (MTK_PIN_NO(149) | 7)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_BPI_BUS5 (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_GPS_PPS0 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_TP_GPIO2_AO (MTK_PIN_NO(150) | 5)
+#define PINMUX_GPIO150__FUNC_SCP_JTAG_LITTLE_TDO_VCORE (MTK_PIN_NO(150) | 6)
+#define PINMUX_GPIO150__FUNC_DBG_MON_A15 (MTK_PIN_NO(150) | 7)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_BPI_BUS6 (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_GPS_PPS1 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_TP_GPIO3_AO (MTK_PIN_NO(151) | 5)
+#define PINMUX_GPIO151__FUNC_SCP_JTAG_LITTLE_TDI_VCORE (MTK_PIN_NO(151) | 6)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_BPI_BUS7 (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_EDP_TX_HPD (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_AGPS_SYNC (MTK_PIN_NO(152) | 5)
+#define PINMUX_GPIO152__FUNC_SSPM_UTXD_AO_VCORE (MTK_PIN_NO(152) | 6)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_MD_UCNT_A_TGL (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_TP_URTS1_VCORE (MTK_PIN_NO(153) | 6)
+#define PINMUX_GPIO153__FUNC_DBG_MON_A8 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_DIGRF_IRQ (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_TP_UCTS1_VCORE (MTK_PIN_NO(154) | 6)
+#define PINMUX_GPIO154__FUNC_DBG_MON_A9 (MTK_PIN_NO(154) | 7)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_MIPI_M_SCLK (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_UCTS2 (MTK_PIN_NO(155) | 4)
+#define PINMUX_GPIO155__FUNC_TP_UTXD_CONSYS_VCORE (MTK_PIN_NO(155) | 6)
+#define PINMUX_GPIO155__FUNC_DBG_MON_A6 (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_MIPI_M_SDATA (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_URTS2 (MTK_PIN_NO(156) | 4)
+#define PINMUX_GPIO156__FUNC_TP_URXD_CONSYS_VCORE (MTK_PIN_NO(156) | 6)
+#define PINMUX_GPIO156__FUNC_DBG_MON_A7 (MTK_PIN_NO(156) | 7)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_BPI_BUS8 (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_UTXD2 (MTK_PIN_NO(157) | 4)
+#define PINMUX_GPIO157__FUNC_CLKM0_A (MTK_PIN_NO(157) | 5)
+#define PINMUX_GPIO157__FUNC_SSPM_URXD_AO_VCORE (MTK_PIN_NO(157) | 6)
+#define PINMUX_GPIO157__FUNC_DBG_MON_A16 (MTK_PIN_NO(157) | 7)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_BPI_BUS9 (MTK_PIN_NO(158) | 1)
+#define PINMUX_GPIO158__FUNC_URXD2 (MTK_PIN_NO(158) | 4)
+#define PINMUX_GPIO158__FUNC_CLKM1_A (MTK_PIN_NO(158) | 5)
+#define PINMUX_GPIO158__FUNC_TP_UTXD1_VCORE (MTK_PIN_NO(158) | 6)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_BPI_BUS10 (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_MD_INT0 (MTK_PIN_NO(159) | 2)
+#define PINMUX_GPIO159__FUNC_SRCLKENAI1 (MTK_PIN_NO(159) | 3)
+#define PINMUX_GPIO159__FUNC_CLKM2_A (MTK_PIN_NO(159) | 5)
+#define PINMUX_GPIO159__FUNC_TP_URXD1_VCORE (MTK_PIN_NO(159) | 6)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_UTXD0 (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_MD_UTXD1 (MTK_PIN_NO(160) | 2)
+#define PINMUX_GPIO160__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(160) | 5)
+#define PINMUX_GPIO160__FUNC_CONN_BG_GPS_MCU_DBG_UART_TXD (MTK_PIN_NO(160) | 6)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_URXD0 (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_MD_URXD1 (MTK_PIN_NO(161) | 2)
+#define PINMUX_GPIO161__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(161) | 5)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_UTXD1 (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_MD_UTXD0 (MTK_PIN_NO(162) | 2)
+#define PINMUX_GPIO162__FUNC_TP_UTXD1_VLP (MTK_PIN_NO(162) | 3)
+#define PINMUX_GPIO162__FUNC_ADSP_UTXD0 (MTK_PIN_NO(162) | 4)
+#define PINMUX_GPIO162__FUNC_SSPM_UTXD_AO_VLP (MTK_PIN_NO(162) | 5)
+#define PINMUX_GPIO162__FUNC_HFRP_UTXD1 (MTK_PIN_NO(162) | 6)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_URXD1 (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_MD_URXD0 (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_TP_URXD1_VLP (MTK_PIN_NO(163) | 3)
+#define PINMUX_GPIO163__FUNC_ADSP_URXD0 (MTK_PIN_NO(163) | 4)
+#define PINMUX_GPIO163__FUNC_SSPM_URXD_AO_VLP (MTK_PIN_NO(163) | 5)
+#define PINMUX_GPIO163__FUNC_HFRP_URXD1 (MTK_PIN_NO(163) | 6)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_SCP_SCL0 (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_TP_GPIO0_AO (MTK_PIN_NO(164) | 6)
+#define PINMUX_GPIO164__FUNC_DBG_MON_A22 (MTK_PIN_NO(164) | 7)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_SCP_SDA0 (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_TP_GPIO1_AO (MTK_PIN_NO(165) | 6)
+#define PINMUX_GPIO165__FUNC_DBG_MON_A23 (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_SCP_SCL2 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_TP_GPIO2_AO (MTK_PIN_NO(166) | 6)
+#define PINMUX_GPIO166__FUNC_DBG_MON_A24 (MTK_PIN_NO(166) | 7)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_SCP_SDA2 (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_TP_GPIO3_AO (MTK_PIN_NO(167) | 6)
+#define PINMUX_GPIO167__FUNC_DBG_MON_A25 (MTK_PIN_NO(167) | 7)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_SCP_SPI2_CK (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_SPI2_B_CLK (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_PWM_VLP (MTK_PIN_NO(168) | 3)
+#define PINMUX_GPIO168__FUNC_SCP_SCL2 (MTK_PIN_NO(168) | 4)
+#define PINMUX_GPIO168__FUNC_DBG_MON_A26 (MTK_PIN_NO(168) | 7)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_SCP_SPI2_CS (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_SPI2_B_CSB (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_DBG_MON_A27 (MTK_PIN_NO(169) | 7)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_SCP_SPI2_MO (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_SPI2_B_MO (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_SCP_SDA2 (MTK_PIN_NO(170) | 4)
+#define PINMUX_GPIO170__FUNC_DBG_MON_A28 (MTK_PIN_NO(170) | 7)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_SCP_SPI2_MI (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_SPI2_B_MI (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_DBG_MON_A29 (MTK_PIN_NO(171) | 7)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(172) | 1)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_CMFLASH3 (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_PWM_3 (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_MD_GPS_L5_BLANK (MTK_PIN_NO(173) | 3)
+#define PINMUX_GPIO173__FUNC_CLKM1_A (MTK_PIN_NO(173) | 4)
+#define PINMUX_GPIO173__FUNC_DBG_MON_A31 (MTK_PIN_NO(173) | 7)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_CMFLASH0 (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_PWM_0 (MTK_PIN_NO(174) | 2)
+#define PINMUX_GPIO174__FUNC_VBUSVALID_1P (MTK_PIN_NO(174) | 3)
+#define PINMUX_GPIO174__FUNC_MD32_2_RXD (MTK_PIN_NO(174) | 4)
+#define PINMUX_GPIO174__FUNC_DISP_PWM3 (MTK_PIN_NO(174) | 5)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_CMFLASH1 (MTK_PIN_NO(175) | 1)
+#define PINMUX_GPIO175__FUNC_PWM_1 (MTK_PIN_NO(175) | 2)
+#define PINMUX_GPIO175__FUNC_EDP_TX_HPD (MTK_PIN_NO(175) | 3)
+#define PINMUX_GPIO175__FUNC_MD32_2_TXD (MTK_PIN_NO(175) | 4)
+#define PINMUX_GPIO175__FUNC_DISP_PWM4 (MTK_PIN_NO(175) | 5)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_SCL5 (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_LCM3_RST (MTK_PIN_NO(176) | 2)
+#define PINMUX_GPIO176__FUNC_MD_URXD1_CONN (MTK_PIN_NO(176) | 4)
+#define PINMUX_GPIO176__FUNC_TP_UTXD_GNSS_VCORE (MTK_PIN_NO(176) | 6)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_SDA5 (MTK_PIN_NO(177) | 1)
+#define PINMUX_GPIO177__FUNC_DSI3_TE (MTK_PIN_NO(177) | 2)
+#define PINMUX_GPIO177__FUNC_MD_UTXD1_CONN (MTK_PIN_NO(177) | 4)
+#define PINMUX_GPIO177__FUNC_TP_URXD_GNSS_VCORE (MTK_PIN_NO(177) | 6)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_DMIC_CLK (MTK_PIN_NO(178) | 1)
+#define PINMUX_GPIO178__FUNC_SCP_DMIC_CLK (MTK_PIN_NO(178) | 2)
+#define PINMUX_GPIO178__FUNC_SRCLKENAI0 (MTK_PIN_NO(178) | 3)
+#define PINMUX_GPIO178__FUNC_CLKM2_B (MTK_PIN_NO(178) | 4)
+#define PINMUX_GPIO178__FUNC_TP_GPIO7_AO (MTK_PIN_NO(178) | 5)
+#define PINMUX_GPIO178__FUNC_SPU1_UTX (MTK_PIN_NO(178) | 6)
+#define PINMUX_GPIO178__FUNC_DAP_SONIC_SWCK (MTK_PIN_NO(178) | 7)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_DMIC_DAT (MTK_PIN_NO(179) | 1)
+#define PINMUX_GPIO179__FUNC_SCP_DMIC_DAT (MTK_PIN_NO(179) | 2)
+#define PINMUX_GPIO179__FUNC_SRCLKENAI1 (MTK_PIN_NO(179) | 3)
+#define PINMUX_GPIO179__FUNC_CLKM3_B (MTK_PIN_NO(179) | 4)
+#define PINMUX_GPIO179__FUNC_TP_GPIO8_AO (MTK_PIN_NO(179) | 5)
+#define PINMUX_GPIO179__FUNC_SPU1_URX (MTK_PIN_NO(179) | 6)
+#define PINMUX_GPIO179__FUNC_DAP_SONIC_SWD (MTK_PIN_NO(179) | 7)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_IDDIG_1P (MTK_PIN_NO(180) | 1)
+#define PINMUX_GPIO180__FUNC_CMVREF0 (MTK_PIN_NO(180) | 2)
+#define PINMUX_GPIO180__FUNC_GPS_PPS1 (MTK_PIN_NO(180) | 3)
+#define PINMUX_GPIO180__FUNC_GPS_L5_ELNA_EN (MTK_PIN_NO(180) | 4)
+#define PINMUX_GPIO180__FUNC_DISP_PWM1 (MTK_PIN_NO(180) | 5)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_USB_DRVVBUS_1P (MTK_PIN_NO(181) | 1)
+#define PINMUX_GPIO181__FUNC_CMVREF1 (MTK_PIN_NO(181) | 2)
+#define PINMUX_GPIO181__FUNC_MFG_EB_JTAG_TRSTN (MTK_PIN_NO(181) | 3)
+#define PINMUX_GPIO181__FUNC_ADSP_JTAG1_TRSTN (MTK_PIN_NO(181) | 4)
+#define PINMUX_GPIO181__FUNC_HFRP_JTAG1_TRSTN (MTK_PIN_NO(181) | 5)
+#define PINMUX_GPIO181__FUNC_SPU1_NTRST (MTK_PIN_NO(181) | 6)
+#define PINMUX_GPIO181__FUNC_CONN_BG_GPS_MCU_TRST_B (MTK_PIN_NO(181) | 7)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_SCL11 (MTK_PIN_NO(182) | 1)
+#define PINMUX_GPIO182__FUNC_CMVREF2 (MTK_PIN_NO(182) | 2)
+#define PINMUX_GPIO182__FUNC_MFG_EB_JTAG_TCK (MTK_PIN_NO(182) | 3)
+#define PINMUX_GPIO182__FUNC_ADSP_JTAG1_TCK (MTK_PIN_NO(182) | 4)
+#define PINMUX_GPIO182__FUNC_HFRP_JTAG1_TCK (MTK_PIN_NO(182) | 5)
+#define PINMUX_GPIO182__FUNC_SPU1_TCK (MTK_PIN_NO(182) | 6)
+#define PINMUX_GPIO182__FUNC_CONN_BG_GPS_MCU_TCK (MTK_PIN_NO(182) | 7)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_SDA11 (MTK_PIN_NO(183) | 1)
+#define PINMUX_GPIO183__FUNC_CMVREF3 (MTK_PIN_NO(183) | 2)
+#define PINMUX_GPIO183__FUNC_MFG_EB_JTAG_TMS (MTK_PIN_NO(183) | 3)
+#define PINMUX_GPIO183__FUNC_ADSP_JTAG1_TMS (MTK_PIN_NO(183) | 4)
+#define PINMUX_GPIO183__FUNC_HFRP_JTAG1_TMS (MTK_PIN_NO(183) | 5)
+#define PINMUX_GPIO183__FUNC_SPU1_TMS (MTK_PIN_NO(183) | 6)
+#define PINMUX_GPIO183__FUNC_CONN_BG_GPS_MCU_TMS (MTK_PIN_NO(183) | 7)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_SCL12 (MTK_PIN_NO(184) | 1)
+#define PINMUX_GPIO184__FUNC_CMVREF4 (MTK_PIN_NO(184) | 2)
+#define PINMUX_GPIO184__FUNC_MFG_EB_JTAG_TDO (MTK_PIN_NO(184) | 3)
+#define PINMUX_GPIO184__FUNC_ADSP_JTAG1_TDO (MTK_PIN_NO(184) | 4)
+#define PINMUX_GPIO184__FUNC_HFRP_JTAG1_TDO (MTK_PIN_NO(184) | 5)
+#define PINMUX_GPIO184__FUNC_SPU1_TDO (MTK_PIN_NO(184) | 6)
+#define PINMUX_GPIO184__FUNC_CONN_BG_GPS_MCU_TDO (MTK_PIN_NO(184) | 7)
+
+#define PINMUX_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define PINMUX_GPIO185__FUNC_SDA12 (MTK_PIN_NO(185) | 1)
+#define PINMUX_GPIO185__FUNC_CMVREF5 (MTK_PIN_NO(185) | 2)
+#define PINMUX_GPIO185__FUNC_MFG_EB_JTAG_TDI (MTK_PIN_NO(185) | 3)
+#define PINMUX_GPIO185__FUNC_ADSP_JTAG1_TDI (MTK_PIN_NO(185) | 4)
+#define PINMUX_GPIO185__FUNC_HFRP_JTAG1_TDI (MTK_PIN_NO(185) | 5)
+#define PINMUX_GPIO185__FUNC_SPU1_TDI (MTK_PIN_NO(185) | 6)
+#define PINMUX_GPIO185__FUNC_CONN_BG_GPS_MCU_TDI (MTK_PIN_NO(185) | 7)
+
+#define PINMUX_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define PINMUX_GPIO186__FUNC_MD_GPS_L1_BLANK (MTK_PIN_NO(186) | 1)
+#define PINMUX_GPIO186__FUNC_PMSR_SMAP (MTK_PIN_NO(186) | 2)
+#define PINMUX_GPIO186__FUNC_TP_GPIO2_AO (MTK_PIN_NO(186) | 3)
+
+#define PINMUX_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define PINMUX_GPIO187__FUNC_MD_GPS_L5_BLANK (MTK_PIN_NO(187) | 1)
+#define PINMUX_GPIO187__FUNC_TP_GPIO4_AO (MTK_PIN_NO(187) | 3)
+
+#define PINMUX_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define PINMUX_GPIO188__FUNC_SCL2 (MTK_PIN_NO(188) | 1)
+#define PINMUX_GPIO188__FUNC_SCP_SCL8 (MTK_PIN_NO(188) | 2)
+
+#define PINMUX_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define PINMUX_GPIO189__FUNC_SDA2 (MTK_PIN_NO(189) | 1)
+#define PINMUX_GPIO189__FUNC_SCP_SDA8 (MTK_PIN_NO(189) | 2)
+
+#define PINMUX_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define PINMUX_GPIO190__FUNC_SCL4 (MTK_PIN_NO(190) | 1)
+#define PINMUX_GPIO190__FUNC_SCP_SCL9 (MTK_PIN_NO(190) | 2)
+#define PINMUX_GPIO190__FUNC_UDI_TDI_6 (MTK_PIN_NO(190) | 6)
+
+#define PINMUX_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define PINMUX_GPIO191__FUNC_SDA4 (MTK_PIN_NO(191) | 1)
+#define PINMUX_GPIO191__FUNC_SCP_SDA9 (MTK_PIN_NO(191) | 2)
+#define PINMUX_GPIO191__FUNC_UDI_TDI_7 (MTK_PIN_NO(191) | 6)
+
+#define PINMUX_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define PINMUX_GPIO192__FUNC_CMMCLK2 (MTK_PIN_NO(192) | 1)
+#define PINMUX_GPIO192__FUNC_MD32_3_RXD (MTK_PIN_NO(192) | 4)
+
+#define PINMUX_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define PINMUX_GPIO193__FUNC_CLKM0_B (MTK_PIN_NO(193) | 3)
+#define PINMUX_GPIO193__FUNC_MD32_3_TXD (MTK_PIN_NO(193) | 4)
+#define PINMUX_GPIO193__FUNC_UDI_TDO_7 (MTK_PIN_NO(193) | 6)
+
+#define PINMUX_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define PINMUX_GPIO194__FUNC_SCL7 (MTK_PIN_NO(194) | 1)
+#define PINMUX_GPIO194__FUNC_MD32_3_GPIO0 (MTK_PIN_NO(194) | 2)
+#define PINMUX_GPIO194__FUNC_CLKM2_B (MTK_PIN_NO(194) | 3)
+#define PINMUX_GPIO194__FUNC_UDI_TDI_2 (MTK_PIN_NO(194) | 6)
+
+#define PINMUX_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define PINMUX_GPIO195__FUNC_SDA7 (MTK_PIN_NO(195) | 1)
+#define PINMUX_GPIO195__FUNC_CLKM3_B (MTK_PIN_NO(195) | 3)
+#define PINMUX_GPIO195__FUNC_UDI_TDI_3 (MTK_PIN_NO(195) | 6)
+
+#define PINMUX_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define PINMUX_GPIO196__FUNC_CMMCLK3 (MTK_PIN_NO(196) | 1)
+
+#define PINMUX_GPIO197__FUNC_GPIO197 (MTK_PIN_NO(197) | 0)
+#define PINMUX_GPIO197__FUNC_CLKM1_B (MTK_PIN_NO(197) | 3)
+#define PINMUX_GPIO197__FUNC_UDI_TDI_1 (MTK_PIN_NO(197) | 6)
+
+#define PINMUX_GPIO198__FUNC_GPIO198 (MTK_PIN_NO(198) | 0)
+#define PINMUX_GPIO198__FUNC_SCL8 (MTK_PIN_NO(198) | 1)
+#define PINMUX_GPIO198__FUNC_UDI_TDI_4 (MTK_PIN_NO(198) | 6)
+
+#define PINMUX_GPIO199__FUNC_GPIO199 (MTK_PIN_NO(199) | 0)
+#define PINMUX_GPIO199__FUNC_SDA8 (MTK_PIN_NO(199) | 1)
+#define PINMUX_GPIO199__FUNC_UDI_TDI_5 (MTK_PIN_NO(199) | 6)
+
+#define PINMUX_GPIO200__FUNC_GPIO200 (MTK_PIN_NO(200) | 0)
+#define PINMUX_GPIO200__FUNC_SCL1 (MTK_PIN_NO(200) | 1)
+
+#define PINMUX_GPIO201__FUNC_GPIO201 (MTK_PIN_NO(201) | 0)
+#define PINMUX_GPIO201__FUNC_SDA1 (MTK_PIN_NO(201) | 1)
+#define PINMUX_GPIO201__FUNC_TSFDC_BG_COMP (MTK_PIN_NO(201) | 7)
+
+#define PINMUX_GPIO202__FUNC_GPIO202 (MTK_PIN_NO(202) | 0)
+#define PINMUX_GPIO202__FUNC_SCL9 (MTK_PIN_NO(202) | 1)
+#define PINMUX_GPIO202__FUNC_SCP_SCL7 (MTK_PIN_NO(202) | 2)
+#define PINMUX_GPIO202__FUNC_TP_GPIO15_AO (MTK_PIN_NO(202) | 6)
+
+#define PINMUX_GPIO203__FUNC_GPIO203 (MTK_PIN_NO(203) | 0)
+#define PINMUX_GPIO203__FUNC_SDA9 (MTK_PIN_NO(203) | 1)
+#define PINMUX_GPIO203__FUNC_SCP_SDA7 (MTK_PIN_NO(203) | 2)
+#define PINMUX_GPIO203__FUNC_TP_GPIO9_AO (MTK_PIN_NO(203) | 6)
+
+#define PINMUX_GPIO204__FUNC_GPIO204 (MTK_PIN_NO(204) | 0)
+#define PINMUX_GPIO204__FUNC_SCL13 (MTK_PIN_NO(204) | 1)
+#define PINMUX_GPIO204__FUNC_CMVREF6 (MTK_PIN_NO(204) | 2)
+#define PINMUX_GPIO204__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(204) | 3)
+#define PINMUX_GPIO204__FUNC_CLKM2_B (MTK_PIN_NO(204) | 5)
+#define PINMUX_GPIO204__FUNC_TP_GPIO12_AO (MTK_PIN_NO(204) | 6)
+
+#define PINMUX_GPIO205__FUNC_GPIO205 (MTK_PIN_NO(205) | 0)
+#define PINMUX_GPIO205__FUNC_SDA13 (MTK_PIN_NO(205) | 1)
+#define PINMUX_GPIO205__FUNC_CMVREF7 (MTK_PIN_NO(205) | 2)
+#define PINMUX_GPIO205__FUNC_GPS_L5_ELNA_EN (MTK_PIN_NO(205) | 3)
+#define PINMUX_GPIO205__FUNC_CLKM3_B (MTK_PIN_NO(205) | 5)
+#define PINMUX_GPIO205__FUNC_TP_GPIO13_AO (MTK_PIN_NO(205) | 6)
+
+#define PINMUX_GPIO206__FUNC_GPIO206 (MTK_PIN_NO(206) | 0)
+#define PINMUX_GPIO206__FUNC_MD32_2_GPIO0 (MTK_PIN_NO(206) | 2)
+#define PINMUX_GPIO206__FUNC_VBUSVALID (MTK_PIN_NO(206) | 5)
+#define PINMUX_GPIO206__FUNC_UDI_TDO_3 (MTK_PIN_NO(206) | 6)
+
+#define PINMUX_GPIO207__FUNC_GPIO207 (MTK_PIN_NO(207) | 0)
+#define PINMUX_GPIO207__FUNC_PCIE_WAKEN_2P (MTK_PIN_NO(207) | 1)
+#define PINMUX_GPIO207__FUNC_PMSR_SMAP_MAX (MTK_PIN_NO(207) | 2)
+#define PINMUX_GPIO207__FUNC_FMI2S_A_BCK (MTK_PIN_NO(207) | 4)
+#define PINMUX_GPIO207__FUNC_UDI_TDO_4 (MTK_PIN_NO(207) | 6)
+
+#define PINMUX_GPIO208__FUNC_GPIO208 (MTK_PIN_NO(208) | 0)
+#define PINMUX_GPIO208__FUNC_PCIE_CLKREQN_2P (MTK_PIN_NO(208) | 1)
+#define PINMUX_GPIO208__FUNC_PMSR_SMAP_MAX_W (MTK_PIN_NO(208) | 2)
+#define PINMUX_GPIO208__FUNC_FMI2S_A_LRCK (MTK_PIN_NO(208) | 4)
+#define PINMUX_GPIO208__FUNC_CLKM0_B (MTK_PIN_NO(208) | 5)
+#define PINMUX_GPIO208__FUNC_UDI_TDO_5 (MTK_PIN_NO(208) | 6)
+
+#define PINMUX_GPIO209__FUNC_GPIO209 (MTK_PIN_NO(209) | 0)
+#define PINMUX_GPIO209__FUNC_PCIE_PERSTN_2P (MTK_PIN_NO(209) | 1)
+#define PINMUX_GPIO209__FUNC_PMSR_SMAP (MTK_PIN_NO(209) | 2)
+#define PINMUX_GPIO209__FUNC_FMI2S_A_DI (MTK_PIN_NO(209) | 4)
+#define PINMUX_GPIO209__FUNC_CLKM1_B (MTK_PIN_NO(209) | 5)
+#define PINMUX_GPIO209__FUNC_UDI_TDO_6 (MTK_PIN_NO(209) | 6)
+
+#define PINMUX_GPIO210__FUNC_GPIO210 (MTK_PIN_NO(210) | 0)
+#define PINMUX_GPIO210__FUNC_CMMCLK4 (MTK_PIN_NO(210) | 1)
+
+#define PINMUX_GPIO211__FUNC_GPIO211 (MTK_PIN_NO(211) | 0)
+#define PINMUX_GPIO211__FUNC_CMMCLK5 (MTK_PIN_NO(211) | 1)
+#define PINMUX_GPIO211__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(211) | 2)
+
+#define PINMUX_GPIO212__FUNC_GPIO212 (MTK_PIN_NO(212) | 0)
+#define PINMUX_GPIO212__FUNC_CMMCLK6 (MTK_PIN_NO(212) | 1)
+#define PINMUX_GPIO212__FUNC_TP_GPIO10_AO (MTK_PIN_NO(212) | 2)
+#define PINMUX_GPIO212__FUNC_IDDIG (MTK_PIN_NO(212) | 5)
+#define PINMUX_GPIO212__FUNC_UDI_TDO_1 (MTK_PIN_NO(212) | 6)
+
+#define PINMUX_GPIO213__FUNC_GPIO213 (MTK_PIN_NO(213) | 0)
+#define PINMUX_GPIO213__FUNC_CMMCLK7 (MTK_PIN_NO(213) | 1)
+#define PINMUX_GPIO213__FUNC_TP_GPIO11_AO (MTK_PIN_NO(213) | 2)
+#define PINMUX_GPIO213__FUNC_USB_DRVVBUS (MTK_PIN_NO(213) | 5)
+#define PINMUX_GPIO213__FUNC_UDI_TDO_2 (MTK_PIN_NO(213) | 6)
+
+#define PINMUX_GPIO214__FUNC_GPIO214 (MTK_PIN_NO(214) | 0)
+#define PINMUX_GPIO214__FUNC_SCP_SCL3 (MTK_PIN_NO(214) | 1)
+#define PINMUX_GPIO214__FUNC_SDA14_E1 (MTK_PIN_NO(214) | 2)
+#define PINMUX_GPIO214__FUNC_SCL14_E2 (MTK_PIN_NO(214) | 2)
+#define PINMUX_GPIO214__FUNC_GBE1_MDC (MTK_PIN_NO(214) | 6)
+#define PINMUX_GPIO214__FUNC_GBE0_MDC (MTK_PIN_NO(214) | 7)
+
+#define PINMUX_GPIO215__FUNC_GPIO215 (MTK_PIN_NO(215) | 0)
+#define PINMUX_GPIO215__FUNC_SCP_SDA3 (MTK_PIN_NO(215) | 1)
+#define PINMUX_GPIO215__FUNC_SCL14_E1 (MTK_PIN_NO(215) | 2)
+#define PINMUX_GPIO215__FUNC_SDA14_E2 (MTK_PIN_NO(215) | 2)
+#define PINMUX_GPIO215__FUNC_GBE1_MDIO (MTK_PIN_NO(215) | 6)
+#define PINMUX_GPIO215__FUNC_GBE0_MDIO (MTK_PIN_NO(215) | 7)
+
+#define PINMUX_GPIO216__FUNC_GPIO216 (MTK_PIN_NO(216) | 0)
+#define PINMUX_GPIO216__FUNC_GPS_PPS0 (MTK_PIN_NO(216) | 1)
+
+#define PINMUX_GPIO217__FUNC_GPIO217 (MTK_PIN_NO(217) | 0)
+#define PINMUX_GPIO217__FUNC_KPROW0 (MTK_PIN_NO(217) | 1)
+#define PINMUX_GPIO217__FUNC_TP_GPIO12_AO (MTK_PIN_NO(217) | 6)
+
+#define PINMUX_GPIO218__FUNC_GPIO218 (MTK_PIN_NO(218) | 0)
+#define PINMUX_GPIO218__FUNC_KPROW1 (MTK_PIN_NO(218) | 1)
+#define PINMUX_GPIO218__FUNC_SPI0_WP (MTK_PIN_NO(218) | 2)
+#define PINMUX_GPIO218__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(218) | 3)
+#define PINMUX_GPIO218__FUNC_GPS_L5_ELNA_EN (MTK_PIN_NO(218) | 5)
+#define PINMUX_GPIO218__FUNC_TP_GPIO14_AO (MTK_PIN_NO(218) | 6)
+
+#define PINMUX_GPIO219__FUNC_GPIO219 (MTK_PIN_NO(219) | 0)
+#define PINMUX_GPIO219__FUNC_KPCOL1 (MTK_PIN_NO(219) | 1)
+#define PINMUX_GPIO219__FUNC_SPI0_HOLD (MTK_PIN_NO(219) | 2)
+#define PINMUX_GPIO219__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(219) | 3)
+#define PINMUX_GPIO219__FUNC_SPMI_M_TRIG_FLAG (MTK_PIN_NO(219) | 4)
+#define PINMUX_GPIO219__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(219) | 5)
+#define PINMUX_GPIO219__FUNC_SPM_JTAG_TRSTN_VLP (MTK_PIN_NO(219) | 6)
+#define PINMUX_GPIO219__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(219) | 7)
+
+#define PINMUX_GPIO220__FUNC_GPIO220 (MTK_PIN_NO(220) | 0)
+#define PINMUX_GPIO220__FUNC_SPI0_CLK (MTK_PIN_NO(220) | 1)
+#define PINMUX_GPIO220__FUNC_SPM_JTAG_TCK_VLP (MTK_PIN_NO(220) | 6)
+#define PINMUX_GPIO220__FUNC_JTCK_SEL1 (MTK_PIN_NO(220) | 7)
+
+#define PINMUX_GPIO221__FUNC_GPIO221 (MTK_PIN_NO(221) | 0)
+#define PINMUX_GPIO221__FUNC_SPI0_CSB (MTK_PIN_NO(221) | 1)
+#define PINMUX_GPIO221__FUNC_SPM_JTAG_TMS_VLP (MTK_PIN_NO(221) | 6)
+#define PINMUX_GPIO221__FUNC_JTMS_SEL1 (MTK_PIN_NO(221) | 7)
+
+#define PINMUX_GPIO222__FUNC_GPIO222 (MTK_PIN_NO(222) | 0)
+#define PINMUX_GPIO222__FUNC_SPI0_MO (MTK_PIN_NO(222) | 1)
+#define PINMUX_GPIO222__FUNC_SCP_SCL7 (MTK_PIN_NO(222) | 2)
+#define PINMUX_GPIO222__FUNC_SPM_JTAG_TDO_VLP (MTK_PIN_NO(222) | 6)
+#define PINMUX_GPIO222__FUNC_JTDO_SEL1 (MTK_PIN_NO(222) | 7)
+
+#define PINMUX_GPIO223__FUNC_GPIO223 (MTK_PIN_NO(223) | 0)
+#define PINMUX_GPIO223__FUNC_SPI0_MI (MTK_PIN_NO(223) | 1)
+#define PINMUX_GPIO223__FUNC_SCP_SDA7 (MTK_PIN_NO(223) | 2)
+#define PINMUX_GPIO223__FUNC_SPM_JTAG_TDI_VLP (MTK_PIN_NO(223) | 6)
+#define PINMUX_GPIO223__FUNC_JTDI_SEL1 (MTK_PIN_NO(223) | 7)
+
+#define PINMUX_GPIO224__FUNC_GPIO224 (MTK_PIN_NO(224) | 0)
+#define PINMUX_GPIO224__FUNC_MSDC2_CLK (MTK_PIN_NO(224) | 1)
+#define PINMUX_GPIO224__FUNC_DMIC2_CLK (MTK_PIN_NO(224) | 2)
+#define PINMUX_GPIO224__FUNC_GBE0_AUX_PPS0 (MTK_PIN_NO(224) | 3)
+#define PINMUX_GPIO224__FUNC_GBE0_TXER (MTK_PIN_NO(224) | 4)
+#define PINMUX_GPIO224__FUNC_GBE1_TXER (MTK_PIN_NO(224) | 5)
+#define PINMUX_GPIO224__FUNC_GBE1_AUX_PPS0 (MTK_PIN_NO(224) | 6)
+#define PINMUX_GPIO224__FUNC_MD32_1_TXD (MTK_PIN_NO(224) | 7)
+
+#define PINMUX_GPIO225__FUNC_GPIO225 (MTK_PIN_NO(225) | 0)
+#define PINMUX_GPIO225__FUNC_MSDC2_CMD (MTK_PIN_NO(225) | 1)
+#define PINMUX_GPIO225__FUNC_DMIC2_DAT (MTK_PIN_NO(225) | 2)
+#define PINMUX_GPIO225__FUNC_GBE0_AUX_PPS1 (MTK_PIN_NO(225) | 3)
+#define PINMUX_GPIO225__FUNC_GBE0_RXER (MTK_PIN_NO(225) | 4)
+#define PINMUX_GPIO225__FUNC_GBE1_RXER (MTK_PIN_NO(225) | 5)
+#define PINMUX_GPIO225__FUNC_GBE1_AUX_PPS1 (MTK_PIN_NO(225) | 6)
+#define PINMUX_GPIO225__FUNC_MD32_1_RXD (MTK_PIN_NO(225) | 7)
+
+#define PINMUX_GPIO226__FUNC_GPIO226 (MTK_PIN_NO(226) | 0)
+#define PINMUX_GPIO226__FUNC_MSDC2_DAT0 (MTK_PIN_NO(226) | 1)
+#define PINMUX_GPIO226__FUNC_I2SIN3_BCK (MTK_PIN_NO(226) | 2)
+#define PINMUX_GPIO226__FUNC_GBE0_AUX_PPS2 (MTK_PIN_NO(226) | 3)
+#define PINMUX_GPIO226__FUNC_GBE0_COL (MTK_PIN_NO(226) | 4)
+#define PINMUX_GPIO226__FUNC_GBE1_COL (MTK_PIN_NO(226) | 5)
+#define PINMUX_GPIO226__FUNC_GBE1_AUX_PPS2 (MTK_PIN_NO(226) | 6)
+#define PINMUX_GPIO226__FUNC_GBE1_MDC (MTK_PIN_NO(226) | 7)
+
+#define PINMUX_GPIO227__FUNC_GPIO227 (MTK_PIN_NO(227) | 0)
+#define PINMUX_GPIO227__FUNC_MSDC2_DAT1 (MTK_PIN_NO(227) | 1)
+#define PINMUX_GPIO227__FUNC_I2SIN3_LRCK (MTK_PIN_NO(227) | 2)
+#define PINMUX_GPIO227__FUNC_GBE0_AUX_PPS3 (MTK_PIN_NO(227) | 3)
+#define PINMUX_GPIO227__FUNC_GBE0_INTR (MTK_PIN_NO(227) | 4)
+#define PINMUX_GPIO227__FUNC_GBE1_INTR (MTK_PIN_NO(227) | 5)
+#define PINMUX_GPIO227__FUNC_GBE1_AUX_PPS3 (MTK_PIN_NO(227) | 6)
+#define PINMUX_GPIO227__FUNC_GBE1_MDIO (MTK_PIN_NO(227) | 7)
+
+#define PINMUX_GPIO228__FUNC_GPIO228 (MTK_PIN_NO(228) | 0)
+#define PINMUX_GPIO228__FUNC_MSDC2_DAT2 (MTK_PIN_NO(228) | 1)
+#define PINMUX_GPIO228__FUNC_I2SIN3_DI (MTK_PIN_NO(228) | 2)
+#define PINMUX_GPIO228__FUNC_GBE0_MDC (MTK_PIN_NO(228) | 3)
+#define PINMUX_GPIO228__FUNC_GBE1_MDC (MTK_PIN_NO(228) | 4)
+#define PINMUX_GPIO228__FUNC_CONN_BG_GPS_MCU_AICE_TCKC (MTK_PIN_NO(228) | 5)
+
+#define PINMUX_GPIO229__FUNC_GPIO229 (MTK_PIN_NO(229) | 0)
+#define PINMUX_GPIO229__FUNC_MSDC2_DAT3 (MTK_PIN_NO(229) | 1)
+#define PINMUX_GPIO229__FUNC_I2SOUT3_DO (MTK_PIN_NO(229) | 2)
+#define PINMUX_GPIO229__FUNC_GBE0_MDIO (MTK_PIN_NO(229) | 3)
+#define PINMUX_GPIO229__FUNC_GBE1_MDIO (MTK_PIN_NO(229) | 4)
+#define PINMUX_GPIO229__FUNC_CONN_BG_GPS_MCU_AICE_TMSC (MTK_PIN_NO(229) | 5)
+#define PINMUX_GPIO229__FUNC_AVB_CLK2 (MTK_PIN_NO(229) | 7)
+
+#define PINMUX_GPIO230__FUNC_GPIO230 (MTK_PIN_NO(230) | 0)
+#define PINMUX_GPIO230__FUNC_CONN_TOP_CLK (MTK_PIN_NO(230) | 1)
+
+#define PINMUX_GPIO231__FUNC_GPIO231 (MTK_PIN_NO(231) | 0)
+#define PINMUX_GPIO231__FUNC_CONN_TOP_DATA (MTK_PIN_NO(231) | 1)
+
+#define PINMUX_GPIO232__FUNC_GPIO232 (MTK_PIN_NO(232) | 0)
+#define PINMUX_GPIO232__FUNC_CONN_HRST_B (MTK_PIN_NO(232) | 1)
+
+#define PINMUX_GPIO233__FUNC_GPIO233 (MTK_PIN_NO(233) | 0)
+#define PINMUX_GPIO233__FUNC_I2SIN0_BCK (MTK_PIN_NO(233) | 1)
+
+#define PINMUX_GPIO234__FUNC_GPIO234 (MTK_PIN_NO(234) | 0)
+#define PINMUX_GPIO234__FUNC_I2SIN0_LRCK (MTK_PIN_NO(234) | 1)
+
+#define PINMUX_GPIO235__FUNC_GPIO235 (MTK_PIN_NO(235) | 0)
+#define PINMUX_GPIO235__FUNC_I2SIN0_DI (MTK_PIN_NO(235) | 1)
+
+#define PINMUX_GPIO236__FUNC_GPIO236 (MTK_PIN_NO(236) | 0)
+#define PINMUX_GPIO236__FUNC_I2SOUT0_DO (MTK_PIN_NO(236) | 1)
+
+#define PINMUX_GPIO237__FUNC_GPIO237 (MTK_PIN_NO(237) | 0)
+#define PINMUX_GPIO237__FUNC_CONN_UARTHUB_UART_TX (MTK_PIN_NO(237) | 1)
+#define PINMUX_GPIO237__FUNC_UTXD3 (MTK_PIN_NO(237) | 3)
+
+#define PINMUX_GPIO238__FUNC_GPIO238 (MTK_PIN_NO(238) | 0)
+#define PINMUX_GPIO238__FUNC_CONN_UARTHUB_UART_RX (MTK_PIN_NO(238) | 1)
+#define PINMUX_GPIO238__FUNC_URXD3 (MTK_PIN_NO(238) | 3)
+
+#define PINMUX_GPIO239__FUNC_GPIO239 (MTK_PIN_NO(239) | 0)
+#define PINMUX_GPIO239__FUNC_TP_UTXD_CONSYS_VLP (MTK_PIN_NO(239) | 1)
+#define PINMUX_GPIO239__FUNC_TP_URXD_CONSYS_VLP (MTK_PIN_NO(239) | 2)
+
+#define PINMUX_GPIO240__FUNC_GPIO240 (MTK_PIN_NO(240) | 0)
+#define PINMUX_GPIO240__FUNC_TP_URXD_CONSYS_VLP (MTK_PIN_NO(240) | 1)
+#define PINMUX_GPIO240__FUNC_TP_UTXD_CONSYS_VLP (MTK_PIN_NO(240) | 2)
+
+#define PINMUX_GPIO241__FUNC_GPIO241 (MTK_PIN_NO(241) | 0)
+#define PINMUX_GPIO241__FUNC_PCIE_PERSTN (MTK_PIN_NO(241) | 1)
+
+#define PINMUX_GPIO242__FUNC_GPIO242 (MTK_PIN_NO(242) | 0)
+#define PINMUX_GPIO242__FUNC_PCIE_WAKEN (MTK_PIN_NO(242) | 1)
+
+#define PINMUX_GPIO243__FUNC_GPIO243 (MTK_PIN_NO(243) | 0)
+#define PINMUX_GPIO243__FUNC_PCIE_CLKREQN (MTK_PIN_NO(243) | 1)
+
+#define PINMUX_GPIO244__FUNC_GPIO244 (MTK_PIN_NO(244) | 0)
+#define PINMUX_GPIO244__FUNC_CONN_RST (MTK_PIN_NO(244) | 1)
+
+#define PINMUX_GPIO245__FUNC_GPIO245 (MTK_PIN_NO(245) | 0)
+
+#define PINMUX_GPIO246__FUNC_GPIO246 (MTK_PIN_NO(246) | 0)
+#define PINMUX_GPIO246__FUNC_CONN_PTA_TXD0 (MTK_PIN_NO(246) | 1)
+
+#define PINMUX_GPIO247__FUNC_GPIO247 (MTK_PIN_NO(247) | 0)
+#define PINMUX_GPIO247__FUNC_CONN_PTA_RXD0 (MTK_PIN_NO(247) | 1)
+
+#define PINMUX_GPIO248__FUNC_GPIO248 (MTK_PIN_NO(248) | 0)
+#define PINMUX_GPIO248__FUNC_UCTS3 (MTK_PIN_NO(248) | 3)
+
+#define PINMUX_GPIO249__FUNC_GPIO249 (MTK_PIN_NO(249) | 0)
+#define PINMUX_GPIO249__FUNC_URTS3 (MTK_PIN_NO(249) | 3)
+
+#define PINMUX_GPIO250__FUNC_GPIO250 (MTK_PIN_NO(250) | 0)
+
+#define PINMUX_GPIO251__FUNC_GPIO251 (MTK_PIN_NO(251) | 0)
+#define PINMUX_GPIO251__FUNC_IDDIG_1P (MTK_PIN_NO(251) | 1)
+
+#define PINMUX_GPIO252__FUNC_GPIO252 (MTK_PIN_NO(252) | 0)
+#define PINMUX_GPIO252__FUNC_USB_DRVVBUS_1P (MTK_PIN_NO(252) | 1)
+
+#define PINMUX_GPIO253__FUNC_GPIO253 (MTK_PIN_NO(253) | 0)
+#define PINMUX_GPIO253__FUNC_VBUSVALID_1P (MTK_PIN_NO(253) | 1)
+
+#define PINMUX_GPIO254__FUNC_GPIO254 (MTK_PIN_NO(254) | 0)
+#define PINMUX_GPIO254__FUNC_IDDIG_2P (MTK_PIN_NO(254) | 1)
+
+#define PINMUX_GPIO255__FUNC_GPIO255 (MTK_PIN_NO(255) | 0)
+#define PINMUX_GPIO255__FUNC_USB_DRVVBUS_2P (MTK_PIN_NO(255) | 1)
+
+#define PINMUX_GPIO256__FUNC_GPIO256 (MTK_PIN_NO(256) | 0)
+#define PINMUX_GPIO256__FUNC_VBUSVALID_2P (MTK_PIN_NO(256) | 1)
+
+#define PINMUX_GPIO257__FUNC_GPIO257 (MTK_PIN_NO(257) | 0)
+#define PINMUX_GPIO257__FUNC_VBUSVALID_3P (MTK_PIN_NO(257) | 1)
+
+#define PINMUX_GPIO258__FUNC_GPIO258 (MTK_PIN_NO(258) | 0)
+#define PINMUX_GPIO258__FUNC_AVB_CLK1 (MTK_PIN_NO(258) | 7)
+
+#define PINMUX_GPIO259__FUNC_GPIO259 (MTK_PIN_NO(259) | 0)
+#define PINMUX_GPIO259__FUNC_GBE0_TXD0 (MTK_PIN_NO(259) | 1)
+#define PINMUX_GPIO259__FUNC_GBE1_TXD0 (MTK_PIN_NO(259) | 2)
+
+#define PINMUX_GPIO260__FUNC_GPIO260 (MTK_PIN_NO(260) | 0)
+#define PINMUX_GPIO260__FUNC_GBE0_TXD1 (MTK_PIN_NO(260) | 1)
+#define PINMUX_GPIO260__FUNC_GBE1_TXD1 (MTK_PIN_NO(260) | 2)
+
+#define PINMUX_GPIO261__FUNC_GPIO261 (MTK_PIN_NO(261) | 0)
+#define PINMUX_GPIO261__FUNC_GBE0_TXC (MTK_PIN_NO(261) | 1)
+#define PINMUX_GPIO261__FUNC_GBE1_TXC (MTK_PIN_NO(261) | 2)
+
+#define PINMUX_GPIO262__FUNC_GPIO262 (MTK_PIN_NO(262) | 0)
+#define PINMUX_GPIO262__FUNC_GBE0_TXEN (MTK_PIN_NO(262) | 1)
+#define PINMUX_GPIO262__FUNC_GBE1_TXEN (MTK_PIN_NO(262) | 2)
+
+#define PINMUX_GPIO263__FUNC_GPIO263 (MTK_PIN_NO(263) | 0)
+#define PINMUX_GPIO263__FUNC_GBE0_RXD0 (MTK_PIN_NO(263) | 1)
+#define PINMUX_GPIO263__FUNC_GBE1_RXD0 (MTK_PIN_NO(263) | 2)
+#define PINMUX_GPIO263__FUNC_GBE0_AUX_PPS0 (MTK_PIN_NO(263) | 3)
+
+#define PINMUX_GPIO264__FUNC_GPIO264 (MTK_PIN_NO(264) | 0)
+#define PINMUX_GPIO264__FUNC_GBE0_RXD1 (MTK_PIN_NO(264) | 1)
+#define PINMUX_GPIO264__FUNC_GBE1_RXD1 (MTK_PIN_NO(264) | 2)
+#define PINMUX_GPIO264__FUNC_GBE0_AUX_PPS1 (MTK_PIN_NO(264) | 3)
+
+#define PINMUX_GPIO265__FUNC_GPIO265 (MTK_PIN_NO(265) | 0)
+#define PINMUX_GPIO265__FUNC_GBE0_RXC (MTK_PIN_NO(265) | 1)
+#define PINMUX_GPIO265__FUNC_GBE1_RXC (MTK_PIN_NO(265) | 2)
+#define PINMUX_GPIO265__FUNC_GBE0_AUX_PPS2 (MTK_PIN_NO(265) | 3)
+
+#define PINMUX_GPIO266__FUNC_GPIO266 (MTK_PIN_NO(266) | 0)
+#define PINMUX_GPIO266__FUNC_GBE0_RXDV (MTK_PIN_NO(266) | 1)
+#define PINMUX_GPIO266__FUNC_GBE1_RXDV (MTK_PIN_NO(266) | 2)
+#define PINMUX_GPIO266__FUNC_GBE0_AUX_PPS3 (MTK_PIN_NO(266) | 3)
+
+#define PINMUX_GPIO267__FUNC_GPIO267 (MTK_PIN_NO(267) | 0)
+#define PINMUX_GPIO267__FUNC_GBE0_TXD2 (MTK_PIN_NO(267) | 1)
+#define PINMUX_GPIO267__FUNC_GBE1_TXD2 (MTK_PIN_NO(267) | 2)
+#define PINMUX_GPIO267__FUNC_GBE0_RXER (MTK_PIN_NO(267) | 3)
+#define PINMUX_GPIO267__FUNC_GBE1_RXER (MTK_PIN_NO(267) | 4)
+
+#define PINMUX_GPIO268__FUNC_GPIO268 (MTK_PIN_NO(268) | 0)
+#define PINMUX_GPIO268__FUNC_GBE0_TXD3 (MTK_PIN_NO(268) | 1)
+#define PINMUX_GPIO268__FUNC_GBE1_TXD3 (MTK_PIN_NO(268) | 2)
+
+#define PINMUX_GPIO269__FUNC_GPIO269 (MTK_PIN_NO(269) | 0)
+#define PINMUX_GPIO269__FUNC_GBE0_RXD2 (MTK_PIN_NO(269) | 1)
+#define PINMUX_GPIO269__FUNC_GBE1_RXD2 (MTK_PIN_NO(269) | 2)
+#define PINMUX_GPIO269__FUNC_GBE0_MDC (MTK_PIN_NO(269) | 3)
+
+#define PINMUX_GPIO270__FUNC_GPIO270 (MTK_PIN_NO(270) | 0)
+#define PINMUX_GPIO270__FUNC_GBE0_RXD3 (MTK_PIN_NO(270) | 1)
+#define PINMUX_GPIO270__FUNC_GBE1_RXD3 (MTK_PIN_NO(270) | 2)
+#define PINMUX_GPIO270__FUNC_GBE0_MDIO (MTK_PIN_NO(270) | 3)
+
+#endif /* __MT8196_PINFUNC_H */
diff --git a/arch/arm64/boot/dts/mediatek/mt8365-evk.dts b/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
index 1f8584bd66c3..c8418888268d 100644
--- a/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
@@ -78,6 +78,21 @@
enable-active-high;
};
+ reg_vsys: regulator-vsys {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ touch0_fixed_3v3: regulator-vio33tp {
+ compatible = "regulator-fixed";
+ regulator-name = "vio33_tp";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&reg_vsys>;
+ };
+
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
@@ -324,6 +339,18 @@
};
};
};
+
+ touchscreen@5d {
+ compatible = "goodix,gt9271";
+ reg = <0x5d>;
+ interrupts-extended = <&pio 78 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touch_pins>;
+ irq-gpios = <&pio 78 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 79 GPIO_ACTIVE_LOW>;
+ AVDD28-supply = <&touch0_fixed_3v3>;
+ VDDIO-supply = <&mt6357_vrf12_reg>;
+ };
};
&mmc0 {
@@ -650,6 +677,19 @@
};
};
+ touch_pins: touch-pins {
+ ctp-int1-pins {
+ pinmux = <MT8365_PIN_78_CMHSYNC__FUNC_GPIO78>;
+ input-enable;
+ bias-disable;
+ };
+
+ rst-pins {
+ pinmux = <MT8365_PIN_79_CMVSYNC__FUNC_GPIO79>;
+ output-low;
+ };
+ };
+
uart0_pins: uart0-pins {
pins {
pinmux = <MT8365_PIN_35_URXD0__FUNC_URXD0>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8390-genio-common.dtsi b/arch/arm64/boot/dts/mediatek/mt8390-genio-common.dtsi
index 60139e6dffd8..eaf45d42cd34 100644
--- a/arch/arm64/boot/dts/mediatek/mt8390-genio-common.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8390-genio-common.dtsi
@@ -21,6 +21,7 @@
/ {
aliases {
+ dsi0 = &disp_dsi0;
ethernet0 = &eth;
i2c0 = &i2c0;
i2c1 = &i2c1;
@@ -34,6 +35,15 @@
serial0 = &uart0;
};
+ backlight_lcm1: backlight-lcm1 {
+ compatible = "pwm-backlight";
+ brightness-levels = <0 1023>;
+ default-brightness-level = <576>;
+ num-interpolated-steps = <1023>;
+ power-supply = <&reg_vsys>;
+ pwms = <&disp_pwm1 0 500000>;
+ };
+
chosen {
stdout-path = "serial0:921600n8";
};
@@ -227,6 +237,28 @@
regulator-max-microvolt = <5000000>;
enable-active-high;
};
+
+ lcm1_iovcc: regulator-vio18-lcm1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vio18_lcm1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ enable-active-high;
+ gpio = <&pio 111 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dsi0_vreg_en_pins>;
+ vin-supply = <&reg_vsys>;
+ };
+
+ lcm1_vddp: regulator-vsys-lcm1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_lcm1";
+ regulator-min-microvolt = <4200000>;
+ regulator-max-microvolt = <4200000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&reg_vsys>;
+ };
};
&adsp {
@@ -239,6 +271,67 @@
status = "okay";
};
+&disp_dsi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ panel@0 {
+ compatible = "startek,kd070fhfid078", "himax,hx8279";
+ reg = <0>;
+ backlight = <&backlight_lcm1>;
+ enable-gpios = <&pio 45 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 25 GPIO_ACTIVE_HIGH>;
+ iovcc-supply = <&lcm1_iovcc>;
+ vdd-supply = <&lcm1_vddp>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_default_pins>;
+
+ port {
+ dsi_panel_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&dither0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ remote-endpoint = <&dsi_panel_in>;
+ };
+ };
+ };
+};
+
+&disp_pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&disp_pwm1_pins>;
+ status = "okay";
+};
+
+&dither0_in {
+ remote-endpoint = <&postmask0_out>;
+};
+
+&dither0_out {
+ remote-endpoint = <&dsi0_in>;
+};
+
+&gamma0_out {
+ remote-endpoint = <&postmask0_in>;
+};
+
&gpu {
mali-supply = <&mt6359_vproc2_buck_reg>;
status = "okay";
@@ -390,6 +483,10 @@
domain-supply = <&mt6359_vsram_others_ldo_reg>;
};
+&mipi_tx_config0 {
+ status = "okay";
+};
+
&mmc0 {
status = "okay";
pinctrl-names = "default", "state_uhs";
@@ -499,9 +596,13 @@
mediatek,mic-type-1 = <3>; /* DCC */
};
+&ovl0_in {
+ remote-endpoint = <&vdosys0_ep_main>;
+};
+
&pcie {
pinctrl-names = "default";
- pinctrl-0 = <&pcie_pins_default>;
+ pinctrl-0 = <&pcie_default_pins>;
status = "okay";
};
@@ -537,6 +638,12 @@
};
};
+ disp_pwm1_pins: disp-pwm1-pins {
+ pins-pwm {
+ pinmux = <PINMUX_GPIO30__FUNC_O_DISP_PWM1>;
+ };
+ };
+
dptx_pins: dptx-pins {
pins-cmd-dat {
pinmux = <PINMUX_GPIO46__FUNC_I0_DP_TX_HPD>;
@@ -857,25 +964,27 @@
};
};
- panel_default_pins: panel-default-pins {
- pins-dcdc {
- pinmux = <PINMUX_GPIO45__FUNC_B_GPIO45>;
- output-low;
- };
-
- pins-en {
+ dsi0_vreg_en_pins: dsi0-vreg-en-pins {
+ pins-pwr-en {
pinmux = <PINMUX_GPIO111__FUNC_B_GPIO111>;
output-low;
};
+ };
+ panel_default_pins: panel-default-pins {
pins-rst {
pinmux = <PINMUX_GPIO25__FUNC_B_GPIO25>;
- output-high;
+ output-low;
+ };
+
+ pins-en {
+ pinmux = <PINMUX_GPIO45__FUNC_B_GPIO45>;
+ output-low;
};
};
- pcie_pins_default: pcie-default {
- mux {
+ pcie_default_pins: pcie-default-pins {
+ pins {
pinmux = <PINMUX_GPIO47__FUNC_I1_WAKEN>,
<PINMUX_GPIO48__FUNC_O_PERSTN>,
<PINMUX_GPIO49__FUNC_B1_CLKREQN>;
@@ -1055,7 +1164,19 @@
};
};
-&scp {
+&postmask0_in {
+ remote-endpoint = <&gamma0_out>;
+};
+
+&postmask0_out {
+ remote-endpoint = <&dither0_in>;
+};
+
+&scp_cluster {
+ status = "okay";
+};
+
+&scp_c0 {
memory-region = <&scp_mem>;
status = "okay";
};
@@ -1119,6 +1240,18 @@
status = "okay";
};
+&vdosys0 {
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdosys0_ep_main: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&ovl0_in>;
+ };
+ };
+};
+
&u3phy0 {
status = "okay";
};
@@ -1199,8 +1332,18 @@
};
&ssusb2 {
+ /*
+ * the ssusb2 controller is one but we got two ports : one is routed
+ * to the M.2 slot, the other is on the RPi header who does support
+ * full OTG.
+ * As the controller is shared between them, the role switch default
+ * mode is set to host to make any peripheral inserted in the M.2
+ * slot (i.e BT/WIFI module) be detected when the other port is
+ * unused.
+ */
dr_mode = "otg";
maximum-speed = "high-speed";
+ role-switch-default-mode = "host";
usb-role-switch;
vusb33-supply = <&mt6359_vusb_ldo_reg>;
wakeup-source;
@@ -1211,7 +1354,7 @@
connector {
compatible = "gpio-usb-b-connector", "usb-b-connector";
type = "micro";
- id-gpios = <&pio 89 GPIO_ACTIVE_HIGH>;
+ id-gpios = <&pio 89 GPIO_ACTIVE_LOW>;
vbus-supply = <&usb_p2_vbus>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
index f02c32def593..be5e5f339e81 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
@@ -91,13 +91,12 @@
};
};
- backlight_lcd0: backlight-lcd0 {
+ backlight_lcm0: backlight-lcm0 {
compatible = "pwm-backlight";
- pwms = <&disp_pwm0 0 500000>;
- enable-gpios = <&pio 47 GPIO_ACTIVE_HIGH>;
brightness-levels = <0 1023>;
- num-interpolated-steps = <1023>;
default-brightness-level = <576>;
+ num-interpolated-steps = <1023>;
+ pwms = <&disp_pwm0 0 500000>;
};
backlight_lcd1: backlight-lcd1 {
@@ -107,6 +106,7 @@
brightness-levels = <0 1023>;
num-interpolated-steps = <1023>;
default-brightness-level = <576>;
+ status = "disabled";
};
can_clk: can-clk {
@@ -150,6 +150,24 @@
};
};
+ lcm0_iovcc: regulator-vio18-lcm0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vio18_lcm0";
+ enable-active-high;
+ gpio = <&pio 47 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dsi0_vreg_en_pins>;
+ vin-supply = <&mt6360_ldo2>;
+ };
+
+ lcm0_vddp: regulator-vsys-lcm0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_lcm0";
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&mt6360_ldo1>;
+ };
+
wifi_fixed_3v3: regulator-2 {
compatible = "regulator-fixed";
regulator-name = "wifi_3v3";
@@ -163,14 +181,65 @@
&disp_pwm0 {
pinctrl-names = "default";
- pinctrl-0 = <&pwm0_default_pins>;
+ pinctrl-0 = <&disp_pwm0_pins>;
status = "okay";
};
+&dither0_in {
+ remote-endpoint = <&gamma0_out>;
+};
+
+&dither0_out {
+ remote-endpoint = <&dsi0_in>;
+};
+
&dmic_codec {
wakeup-delay-ms = <200>;
};
+&dsi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ panel@0 {
+ compatible = "startek,kd070fhfid078", "himax,hx8279";
+ reg = <0>;
+ backlight = <&backlight_lcm0>;
+ enable-gpios = <&pio 48 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 108 GPIO_ACTIVE_HIGH>;
+ iovcc-supply = <&lcm0_iovcc>;
+ vdd-supply = <&lcm0_vddp>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_default_pins>;
+
+ port {
+ dsi_panel_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&dither0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ remote-endpoint = <&dsi_panel_in>;
+ };
+ };
+ };
+};
+
&eth {
phy-mode ="rgmii-rxid";
phy-handle = <&eth_phy0>;
@@ -194,6 +263,10 @@
};
};
+&gamma0_out {
+ remote-endpoint = <&dither0_in>;
+};
+
&gpu {
mali-supply = <&mt6315_7_vbuck1>;
status = "okay";
@@ -418,6 +491,10 @@
domain-supply = <&mt6359_vsram_others_ldo_reg>;
};
+&mipi_tx0 {
+ status = "okay";
+};
+
&mmc0 {
status = "okay";
pinctrl-names = "default", "state_uhs";
@@ -500,6 +577,10 @@
mediatek,mic-type-2 = <1>; /* ACC */
};
+&ovl0_in {
+ remote-endpoint = <&vdosys0_ep_main>;
+};
+
&pcie0 {
pinctrl-names = "default", "idle";
pinctrl-0 = <&pcie0_default_pins>;
@@ -777,6 +858,25 @@
};
};
+ dsi0_vreg_en_pins: dsi0-vreg-en-pins {
+ pins-pwr-en {
+ pinmux = <PINMUX_GPIO47__FUNC_GPIO47>;
+ output-low;
+ };
+ };
+
+ panel_default_pins: panel-default-pins {
+ pins-rst {
+ pinmux = <PINMUX_GPIO108__FUNC_GPIO108>;
+ output-high;
+ };
+
+ pins-en {
+ pinmux = <PINMUX_GPIO48__FUNC_GPIO48>;
+ output-low;
+ };
+ };
+
pcie0_default_pins: pcie0-default-pins {
pins {
pinmux = <PINMUX_GPIO19__FUNC_WAKEN>,
@@ -803,8 +903,8 @@
};
};
- pwm0_default_pins: pwm0-default-pins {
- pins-cmd-dat {
+ disp_pwm0_pins: disp-pwm0-pins {
+ pins-disp-pwm {
pinmux = <PINMUX_GPIO97__FUNC_DISP_PWM0>;
};
};
@@ -872,6 +972,7 @@
&scp {
memory-region = <&scp_mem>;
+ firmware-name = "mediatek/mt8195/scp.img";
status = "okay";
};
@@ -1014,6 +1115,18 @@
status = "okay";
};
+&vdosys0 {
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdosys0_ep_main: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&ovl0_in>;
+ };
+ };
+};
+
&xhci0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
index 1c922e98441a..329c60cc6a6b 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
@@ -139,9 +139,21 @@
no-map;
};
- afe_mem: memory@60000000 {
+ adsp_mem: memory@60000000 {
compatible = "shared-dma-pool";
- reg = <0 0x60000000 0 0x1100000>;
+ reg = <0 0x60000000 0 0xf00000>;
+ no-map;
+ };
+
+ afe_dma_mem: memory@60f00000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x60f00000 0 0x100000>;
+ no-map;
+ };
+
+ adsp_dma_mem: memory@61000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x61000000 0 0x100000>;
no-map;
};
@@ -152,6 +164,16 @@
};
};
+&adsp {
+ memory-region = <&adsp_dma_mem>, <&adsp_mem>;
+ status = "okay";
+};
+
+&afe {
+ memory-region = <&afe_dma_mem>;
+ status = "okay";
+};
+
&cpu0 {
cpu-supply = <&mt6359_vcore_buck_reg>;
};
@@ -514,6 +536,18 @@
&pio {
mediatek,rsel-resistance-in-si-unit;
+ audio_default_pins: audio-default-pins {
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO70__FUNC_AUD_SYNC_MOSI>,
+ <PINMUX_GPIO69__FUNC_AUD_CLK_MOSI>,
+ <PINMUX_GPIO71__FUNC_AUD_DAT_MOSI0>,
+ <PINMUX_GPIO72__FUNC_AUD_DAT_MOSI1>,
+ <PINMUX_GPIO73__FUNC_AUD_DAT_MISO0>,
+ <PINMUX_GPIO74__FUNC_AUD_DAT_MISO1>,
+ <PINMUX_GPIO75__FUNC_AUD_DAT_MISO2>;
+ };
+ };
+
dsi0_backlight_pins: dsi0-backlight-pins {
pins-backlight-en {
pinmux = <PINMUX_GPIO107__FUNC_GPIO107>;
@@ -850,9 +884,30 @@
&scp {
memory-region = <&scp_mem>;
+ firmware-name = "mediatek/mt8195/scp.img";
status = "okay";
};
+&sound {
+ compatible = "mediatek,mt8195_mt6359";
+ model = "mt8395-evk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&audio_default_pins>;
+ audio-routing =
+ "Headphone", "Headphone L",
+ "Headphone", "Headphone R";
+ mediatek,adsp = <&adsp>;
+ status = "okay";
+
+ headphone-dai-link {
+ link-name = "DL_SRC_BE";
+
+ codec {
+ sound-dai = <&pmic 0>;
+ };
+ };
+};
+
&spi1 {
/* Exposed at 40 pin connector */
pinctrl-0 = <&spi1_pins>;
diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi
index 32bb76b3202a..83bf5c81b5f7 100644
--- a/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi
@@ -12,10 +12,12 @@
&cpu0 {
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
};
&cpu1 {
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
};
&uart0 {
diff --git a/arch/arm64/boot/dts/nuvoton/nuvoton-common-npcm8xx.dtsi b/arch/arm64/boot/dts/nuvoton/nuvoton-common-npcm8xx.dtsi
index ecd171b2feba..fead4dde590d 100644
--- a/arch/arm64/boot/dts/nuvoton/nuvoton-common-npcm8xx.dtsi
+++ b/arch/arm64/boot/dts/nuvoton/nuvoton-common-npcm8xx.dtsi
@@ -176,4 +176,69 @@
};
};
};
+
+ pinctrl: pinctrl@f0010000 {
+ compatible = "nuvoton,npcm845-pinctrl";
+ ranges = <0x0 0x0 0xf0010000 0x8000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ nuvoton,sysgcr = <&gcr>;
+ status = "okay";
+ gpio0: gpio@f0010000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x0 0xB0>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ };
+ gpio1: gpio@f0011000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x1000 0xB0>;
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ };
+ gpio2: gpio@f0012000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x2000 0xB0>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ };
+ gpio3: gpio@f0013000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x3000 0xB0>;
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ };
+ gpio4: gpio@f0014000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x4000 0xB0>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 128 32>;
+ };
+ gpio5: gpio@f0015000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x5000 0xB0>;
+ interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 160 32>;
+ };
+ gpio6: gpio@f0016000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x6000 0xB0>;
+ interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 192 32>;
+ };
+ gpio7: gpio@f0017000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x7000 0xB0>;
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 224 32>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
index 15aa49fc4503..8b3736cee323 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
@@ -2394,6 +2394,12 @@
phy-names = "usb2-0";
};
+ cec@3960000 {
+ status = "okay";
+
+ hdmi-phandle = <&sor1>;
+ };
+
i2c@c250000 {
/* carrier board ID EEPROM */
eeprom@57 {
@@ -2409,6 +2415,10 @@
};
};
+ pwm@c340000 {
+ status = "okay";
+ };
+
pcie@10003000 {
status = "okay";
@@ -2508,6 +2518,16 @@
};
};
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ pwms = <&pwm4 0 45334>;
+ fan-supply = <&vdd_fan>;
+
+ /* cooling level (0, 1, 2, 3) - pwm inverted */
+ cooling-levels = <255 128 64 0>;
+ #cooling-cells = <2>;
+ };
+
vdd_sd: regulator-vdd-sd {
compatible = "regulator-fixed";
regulator-name = "SD_CARD_SW_PWR";
@@ -2556,6 +2576,17 @@
vin-supply = <&vdd_5v0_sys>;
};
+ vdd_fan: regulator-vdd-fan {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_FAN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ gpio = <&exp1 4 GPIO_ACTIVE_LOW>;
+
+ vin-supply = <&vdd_5v0_sys>;
+ };
+
sound {
compatible = "nvidia,tegra186-audio-graph-card";
status = "okay";
@@ -2621,4 +2652,88 @@
label = "NVIDIA Jetson TX2 APE";
};
+
+ thermal-zones {
+ cpu-thermal {
+ polling-delay = <0>;
+ polling-delay-passive = <500>;
+ status = "okay";
+
+ trips {
+ cpu_trip_critical: critical {
+ temperature = <96500>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+
+ cpu_trip_hot: hot {
+ temperature = <79000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_trip_active: active {
+ temperature = <62000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu_trip_passive: passive {
+ temperature = <45000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ cooling-device = <&fan 3 3>;
+ trip = <&cpu_trip_critical>;
+ };
+
+ map1 {
+ cooling-device = <&fan 2 2>;
+ trip = <&cpu_trip_hot>;
+ };
+
+ map2 {
+ cooling-device = <&fan 1 1>;
+ trip = <&cpu_trip_active>;
+ };
+
+ map3 {
+ cooling-device = <&fan 0 0>;
+ trip = <&cpu_trip_passive>;
+ };
+ };
+ };
+
+ aux-thermal {
+ polling-delay = <0>;
+ polling-delay-passive = <500>;
+ status = "okay";
+
+ trips {
+ aux_alert0: critical {
+ temperature = <90000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpu-thermal {
+ polling-delay = <0>;
+ polling-delay-passive = <500>;
+ status = "okay";
+
+ trips {
+ gpu_alert0: critical {
+ temperature = <99000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
index e2d6857a3709..970ce5a03540 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
@@ -61,6 +61,8 @@
};
serial@3100000 {
+ /delete-property/ dmas;
+ /delete-property/ dma-names;
status = "okay";
};
@@ -191,6 +193,10 @@
nvidia,invert-interrupt;
};
+ gpu@17000000 {
+ status = "okay";
+ };
+
bpmp {
i2c {
status = "okay";
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3509-0000+p3636-0001.dts b/arch/arm64/boot/dts/nvidia/tegra186-p3509-0000+p3636-0001.dts
index 26f71651933d..5f3f572ecea9 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p3509-0000+p3636-0001.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p3509-0000+p3636-0001.dts
@@ -549,6 +549,8 @@
};
serial@3100000 {
+ /delete-property/ dmas;
+ /delete-property/ dma-names;
status = "okay";
};
@@ -712,6 +714,12 @@
phy-names = "usb2-0";
};
+ cec@3960000 {
+ status = "okay";
+
+ hdmi-phandle = <&sor1>;
+ };
+
hsp@3c00000 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index 2b3bb5d0af17..5778c93af3e6 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -124,28 +124,28 @@
<&bpmp TEGRA186_CLK_APB2APE>;
clock-names = "ape", "apb2ape";
power-domains = <&bpmp TEGRA186_POWER_DOMAIN_AUD>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x02900000 0x0 0x02900000 0x200000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x02900000 0x0 0x02900000 0x0 0x200000>;
status = "disabled";
tegra_ahub: ahub@2900800 {
compatible = "nvidia,tegra186-ahub";
- reg = <0x02900800 0x800>;
+ reg = <0x0 0x02900800 0x0 0x800>;
clocks = <&bpmp TEGRA186_CLK_AHUB>;
clock-names = "ahub";
assigned-clocks = <&bpmp TEGRA186_CLK_AHUB>;
assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLP_OUT0>;
assigned-clock-rates = <81600000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x02900800 0x02900800 0x11800>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x02900800 0x0 0x02900800 0x0 0x11800>;
status = "disabled";
tegra_i2s1: i2s@2901000 {
compatible = "nvidia,tegra186-i2s",
"nvidia,tegra210-i2s";
- reg = <0x2901000 0x100>;
+ reg = <0x0 0x2901000 0x0 0x100>;
clocks = <&bpmp TEGRA186_CLK_I2S1>,
<&bpmp TEGRA186_CLK_I2S1_SYNC_INPUT>;
clock-names = "i2s", "sync_input";
@@ -159,7 +159,7 @@
tegra_i2s2: i2s@2901100 {
compatible = "nvidia,tegra186-i2s",
"nvidia,tegra210-i2s";
- reg = <0x2901100 0x100>;
+ reg = <0x0 0x2901100 0x0 0x100>;
clocks = <&bpmp TEGRA186_CLK_I2S2>,
<&bpmp TEGRA186_CLK_I2S2_SYNC_INPUT>;
clock-names = "i2s", "sync_input";
@@ -173,7 +173,7 @@
tegra_i2s3: i2s@2901200 {
compatible = "nvidia,tegra186-i2s",
"nvidia,tegra210-i2s";
- reg = <0x2901200 0x100>;
+ reg = <0x0 0x2901200 0x0 0x100>;
clocks = <&bpmp TEGRA186_CLK_I2S3>,
<&bpmp TEGRA186_CLK_I2S3_SYNC_INPUT>;
clock-names = "i2s", "sync_input";
@@ -187,7 +187,7 @@
tegra_i2s4: i2s@2901300 {
compatible = "nvidia,tegra186-i2s",
"nvidia,tegra210-i2s";
- reg = <0x2901300 0x100>;
+ reg = <0x0 0x2901300 0x0 0x100>;
clocks = <&bpmp TEGRA186_CLK_I2S4>,
<&bpmp TEGRA186_CLK_I2S4_SYNC_INPUT>;
clock-names = "i2s", "sync_input";
@@ -201,7 +201,7 @@
tegra_i2s5: i2s@2901400 {
compatible = "nvidia,tegra186-i2s",
"nvidia,tegra210-i2s";
- reg = <0x2901400 0x100>;
+ reg = <0x0 0x2901400 0x0 0x100>;
clocks = <&bpmp TEGRA186_CLK_I2S5>,
<&bpmp TEGRA186_CLK_I2S5_SYNC_INPUT>;
clock-names = "i2s", "sync_input";
@@ -215,7 +215,7 @@
tegra_i2s6: i2s@2901500 {
compatible = "nvidia,tegra186-i2s",
"nvidia,tegra210-i2s";
- reg = <0x2901500 0x100>;
+ reg = <0x0 0x2901500 0x0 0x100>;
clocks = <&bpmp TEGRA186_CLK_I2S6>,
<&bpmp TEGRA186_CLK_I2S6_SYNC_INPUT>;
clock-names = "i2s", "sync_input";
@@ -229,7 +229,7 @@
tegra_sfc1: sfc@2902000 {
compatible = "nvidia,tegra186-sfc",
"nvidia,tegra210-sfc";
- reg = <0x2902000 0x200>;
+ reg = <0x0 0x2902000 0x0 0x200>;
sound-name-prefix = "SFC1";
status = "disabled";
};
@@ -237,7 +237,7 @@
tegra_sfc2: sfc@2902200 {
compatible = "nvidia,tegra186-sfc",
"nvidia,tegra210-sfc";
- reg = <0x2902200 0x200>;
+ reg = <0x0 0x2902200 0x0 0x200>;
sound-name-prefix = "SFC2";
status = "disabled";
};
@@ -245,7 +245,7 @@
tegra_sfc3: sfc@2902400 {
compatible = "nvidia,tegra186-sfc",
"nvidia,tegra210-sfc";
- reg = <0x2902400 0x200>;
+ reg = <0x0 0x2902400 0x0 0x200>;
sound-name-prefix = "SFC3";
status = "disabled";
};
@@ -253,7 +253,7 @@
tegra_sfc4: sfc@2902600 {
compatible = "nvidia,tegra186-sfc",
"nvidia,tegra210-sfc";
- reg = <0x2902600 0x200>;
+ reg = <0x0 0x2902600 0x0 0x200>;
sound-name-prefix = "SFC4";
status = "disabled";
};
@@ -261,7 +261,7 @@
tegra_amx1: amx@2903000 {
compatible = "nvidia,tegra186-amx",
"nvidia,tegra210-amx";
- reg = <0x2903000 0x100>;
+ reg = <0x0 0x2903000 0x0 0x100>;
sound-name-prefix = "AMX1";
status = "disabled";
};
@@ -269,7 +269,7 @@
tegra_amx2: amx@2903100 {
compatible = "nvidia,tegra186-amx",
"nvidia,tegra210-amx";
- reg = <0x2903100 0x100>;
+ reg = <0x0 0x2903100 0x0 0x100>;
sound-name-prefix = "AMX2";
status = "disabled";
};
@@ -277,7 +277,7 @@
tegra_amx3: amx@2903200 {
compatible = "nvidia,tegra186-amx",
"nvidia,tegra210-amx";
- reg = <0x2903200 0x100>;
+ reg = <0x0 0x2903200 0x0 0x100>;
sound-name-prefix = "AMX3";
status = "disabled";
};
@@ -285,7 +285,7 @@
tegra_amx4: amx@2903300 {
compatible = "nvidia,tegra186-amx",
"nvidia,tegra210-amx";
- reg = <0x2903300 0x100>;
+ reg = <0x0 0x2903300 0x0 0x100>;
sound-name-prefix = "AMX4";
status = "disabled";
};
@@ -293,7 +293,7 @@
tegra_adx1: adx@2903800 {
compatible = "nvidia,tegra186-adx",
"nvidia,tegra210-adx";
- reg = <0x2903800 0x100>;
+ reg = <0x0 0x2903800 0x0 0x100>;
sound-name-prefix = "ADX1";
status = "disabled";
};
@@ -301,7 +301,7 @@
tegra_adx2: adx@2903900 {
compatible = "nvidia,tegra186-adx",
"nvidia,tegra210-adx";
- reg = <0x2903900 0x100>;
+ reg = <0x0 0x2903900 0x0 0x100>;
sound-name-prefix = "ADX2";
status = "disabled";
};
@@ -309,7 +309,7 @@
tegra_adx3: adx@2903a00 {
compatible = "nvidia,tegra186-adx",
"nvidia,tegra210-adx";
- reg = <0x2903a00 0x100>;
+ reg = <0x0 0x2903a00 0x0 0x100>;
sound-name-prefix = "ADX3";
status = "disabled";
};
@@ -317,14 +317,14 @@
tegra_adx4: adx@2903b00 {
compatible = "nvidia,tegra186-adx",
"nvidia,tegra210-adx";
- reg = <0x2903b00 0x100>;
+ reg = <0x0 0x2903b00 0x0 0x100>;
sound-name-prefix = "ADX4";
status = "disabled";
};
tegra_dmic1: dmic@2904000 {
compatible = "nvidia,tegra210-dmic";
- reg = <0x2904000 0x100>;
+ reg = <0x0 0x2904000 0x0 0x100>;
clocks = <&bpmp TEGRA186_CLK_DMIC1>;
clock-names = "dmic";
assigned-clocks = <&bpmp TEGRA186_CLK_DMIC1>;
@@ -336,7 +336,7 @@
tegra_dmic2: dmic@2904100 {
compatible = "nvidia,tegra210-dmic";
- reg = <0x2904100 0x100>;
+ reg = <0x0 0x2904100 0x0 0x100>;
clocks = <&bpmp TEGRA186_CLK_DMIC2>;
clock-names = "dmic";
assigned-clocks = <&bpmp TEGRA186_CLK_DMIC2>;
@@ -348,7 +348,7 @@
tegra_dmic3: dmic@2904200 {
compatible = "nvidia,tegra210-dmic";
- reg = <0x2904200 0x100>;
+ reg = <0x0 0x2904200 0x0 0x100>;
clocks = <&bpmp TEGRA186_CLK_DMIC3>;
clock-names = "dmic";
assigned-clocks = <&bpmp TEGRA186_CLK_DMIC3>;
@@ -360,7 +360,7 @@
tegra_dmic4: dmic@2904300 {
compatible = "nvidia,tegra210-dmic";
- reg = <0x2904300 0x100>;
+ reg = <0x0 0x2904300 0x0 0x100>;
clocks = <&bpmp TEGRA186_CLK_DMIC4>;
clock-names = "dmic";
assigned-clocks = <&bpmp TEGRA186_CLK_DMIC4>;
@@ -372,7 +372,7 @@
tegra_dspk1: dspk@2905000 {
compatible = "nvidia,tegra186-dspk";
- reg = <0x2905000 0x100>;
+ reg = <0x0 0x2905000 0x0 0x100>;
clocks = <&bpmp TEGRA186_CLK_DSPK1>;
clock-names = "dspk";
assigned-clocks = <&bpmp TEGRA186_CLK_DSPK1>;
@@ -384,7 +384,7 @@
tegra_dspk2: dspk@2905100 {
compatible = "nvidia,tegra186-dspk";
- reg = <0x2905100 0x100>;
+ reg = <0x0 0x2905100 0x0 0x100>;
clocks = <&bpmp TEGRA186_CLK_DSPK2>;
clock-names = "dspk";
assigned-clocks = <&bpmp TEGRA186_CLK_DSPK2>;
@@ -397,9 +397,9 @@
tegra_ope1: processing-engine@2908000 {
compatible = "nvidia,tegra186-ope",
"nvidia,tegra210-ope";
- reg = <0x2908000 0x100>;
- #address-cells = <1>;
- #size-cells = <1>;
+ reg = <0x0 0x2908000 0x0 0x100>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
sound-name-prefix = "OPE1";
status = "disabled";
@@ -407,20 +407,20 @@
equalizer@2908100 {
compatible = "nvidia,tegra186-peq",
"nvidia,tegra210-peq";
- reg = <0x2908100 0x100>;
+ reg = <0x0 0x2908100 0x0 0x100>;
};
dynamic-range-compressor@2908200 {
compatible = "nvidia,tegra186-mbdrc",
"nvidia,tegra210-mbdrc";
- reg = <0x2908200 0x200>;
+ reg = <0x0 0x2908200 0x0 0x200>;
};
};
tegra_mvc1: mvc@290a000 {
compatible = "nvidia,tegra186-mvc",
"nvidia,tegra210-mvc";
- reg = <0x290a000 0x200>;
+ reg = <0x0 0x290a000 0x0 0x200>;
sound-name-prefix = "MVC1";
status = "disabled";
};
@@ -428,7 +428,7 @@
tegra_mvc2: mvc@290a200 {
compatible = "nvidia,tegra186-mvc",
"nvidia,tegra210-mvc";
- reg = <0x290a200 0x200>;
+ reg = <0x0 0x290a200 0x0 0x200>;
sound-name-prefix = "MVC2";
status = "disabled";
};
@@ -436,14 +436,14 @@
tegra_amixer: amixer@290bb00 {
compatible = "nvidia,tegra186-amixer",
"nvidia,tegra210-amixer";
- reg = <0x290bb00 0x800>;
+ reg = <0x0 0x290bb00 0x0 0x800>;
sound-name-prefix = "MIXER1";
status = "disabled";
};
tegra_admaif: admaif@290f000 {
compatible = "nvidia,tegra186-admaif";
- reg = <0x0290f000 0x1000>;
+ reg = <0x0 0x0290f000 0x0 0x1000>;
dmas = <&adma 1>, <&adma 1>,
<&adma 2>, <&adma 2>,
<&adma 3>, <&adma 3>,
@@ -489,7 +489,7 @@
tegra_asrc: asrc@2910000 {
compatible = "nvidia,tegra186-asrc";
- reg = <0x2910000 0x2000>;
+ reg = <0x0 0x2910000 0x0 0x2000>;
sound-name-prefix = "ASRC1";
status = "disabled";
};
@@ -497,7 +497,7 @@
adma: dma-controller@2930000 {
compatible = "nvidia,tegra186-adma";
- reg = <0x02930000 0x20000>;
+ reg = <0x0 0x02930000 0x0 0x20000>;
interrupt-parent = <&agic>;
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
@@ -542,8 +542,8 @@
"nvidia,tegra210-agic";
#interrupt-cells = <3>;
interrupt-controller;
- reg = <0x02a41000 0x1000>,
- <0x02a42000 0x2000>;
+ reg = <0x0 0x02a41000 0x0 0x1000>,
+ <0x0 0x02a42000 0x0 0x2000>;
interrupts = <GIC_SPI 145
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
clocks = <&bpmp TEGRA186_CLK_APE>;
@@ -612,6 +612,8 @@
interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_UARTA>;
resets = <&bpmp TEGRA186_RESET_UARTA>;
+ dmas = <&gpcdma 8>, <&gpcdma 8>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -621,9 +623,9 @@
reg-shift = <2>;
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_UARTB>;
- clock-names = "serial";
resets = <&bpmp TEGRA186_RESET_UARTB>;
- reset-names = "serial";
+ dmas = <&gpcdma 9>, <&gpcdma 9>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -633,9 +635,9 @@
reg-shift = <2>;
interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_UARTD>;
- clock-names = "serial";
resets = <&bpmp TEGRA186_RESET_UARTD>;
- reset-names = "serial";
+ dmas = <&gpcdma 19>, <&gpcdma 19>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -645,9 +647,9 @@
reg-shift = <2>;
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_UARTE>;
- clock-names = "serial";
resets = <&bpmp TEGRA186_RESET_UARTE>;
- reset-names = "serial";
+ dmas = <&gpcdma 20>, <&gpcdma 20>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -657,9 +659,9 @@
reg-shift = <2>;
interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_UARTF>;
- clock-names = "serial";
resets = <&bpmp TEGRA186_RESET_UARTF>;
- reset-names = "serial";
+ dmas = <&gpcdma 12>, <&gpcdma 12>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -1183,7 +1185,7 @@
};
cec@3960000 {
- compatible = "nvidia,tegra186-cec";
+ compatible = "nvidia,tegra186-cec", "nvidia,tegra210-cec";
reg = <0x0 0x03960000 0x0 0x10000>;
interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_CEC>;
@@ -1236,9 +1238,9 @@
reg-shift = <2>;
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_UARTC>;
- clock-names = "serial";
resets = <&bpmp TEGRA186_RESET_UARTC>;
- reset-names = "serial";
+ dmas = <&gpcdma 3>, <&gpcdma 3>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -1248,9 +1250,9 @@
reg-shift = <2>;
interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_UARTG>;
- clock-names = "serial";
resets = <&bpmp TEGRA186_RESET_UARTG>;
- reset-names = "serial";
+ dmas = <&gpcdma 2>, <&gpcdma 2>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -1511,10 +1513,10 @@
resets = <&bpmp TEGRA186_RESET_HOST1X>;
reset-names = "host1x";
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
- ranges = <0x15000000 0x0 0x15000000 0x01000000>;
+ ranges = <0x0 0x15000000 0x0 0x15000000 0x0 0x01000000>;
interconnects = <&mc TEGRA186_MEMORY_CLIENT_HOST1XDMAR &emc>;
interconnect-names = "dma-mem";
@@ -1533,7 +1535,7 @@
dpaux1: dpaux@15040000 {
compatible = "nvidia,tegra186-dpaux";
- reg = <0x15040000 0x10000>;
+ reg = <0x0 0x15040000 0x0 0x10000>;
interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_DPAUX1>,
<&bpmp TEGRA186_CLK_PLLDP>;
@@ -1567,7 +1569,7 @@
display-hub@15200000 {
compatible = "nvidia,tegra186-display";
- reg = <0x15200000 0x00040000>;
+ reg = <0x0 0x15200000 0x0 0x00040000>;
resets = <&bpmp TEGRA186_RESET_NVDISPLAY0_MISC>,
<&bpmp TEGRA186_RESET_NVDISPLAY0_WGRP0>,
<&bpmp TEGRA186_RESET_NVDISPLAY0_WGRP1>,
@@ -1585,14 +1587,14 @@
power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>;
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
- ranges = <0x15200000 0x15200000 0x40000>;
+ ranges = <0x0 0x15200000 0x0 0x15200000 0x0 0x40000>;
display@15200000 {
compatible = "nvidia,tegra186-dc";
- reg = <0x15200000 0x10000>;
+ reg = <0x0 0x15200000 0x0 0x10000>;
interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_NVDISPLAY_P0>;
clock-names = "dc";
@@ -1611,7 +1613,7 @@
display@15210000 {
compatible = "nvidia,tegra186-dc";
- reg = <0x15210000 0x10000>;
+ reg = <0x0 0x15210000 0x0 0x10000>;
interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_NVDISPLAY_P1>;
clock-names = "dc";
@@ -1630,7 +1632,7 @@
display@15220000 {
compatible = "nvidia,tegra186-dc";
- reg = <0x15220000 0x10000>;
+ reg = <0x0 0x15220000 0x0 0x10000>;
interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_NVDISPLAY_P2>;
clock-names = "dc";
@@ -1650,7 +1652,7 @@
dsia: dsi@15300000 {
compatible = "nvidia,tegra186-dsi";
- reg = <0x15300000 0x10000>;
+ reg = <0x0 0x15300000 0x0 0x10000>;
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_DSI>,
<&bpmp TEGRA186_CLK_DSIA_LP>,
@@ -1665,7 +1667,7 @@
vic@15340000 {
compatible = "nvidia,tegra186-vic";
- reg = <0x15340000 0x40000>;
+ reg = <0x0 0x15340000 0x0 0x40000>;
interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_VIC>;
clock-names = "vic";
@@ -1681,7 +1683,7 @@
nvjpg@15380000 {
compatible = "nvidia,tegra186-nvjpg";
- reg = <0x15380000 0x40000>;
+ reg = <0x0 0x15380000 0x0 0x40000>;
clocks = <&bpmp TEGRA186_CLK_NVJPG>;
clock-names = "nvjpg";
resets = <&bpmp TEGRA186_RESET_NVJPG>;
@@ -1696,7 +1698,7 @@
dsib: dsi@15400000 {
compatible = "nvidia,tegra186-dsi";
- reg = <0x15400000 0x10000>;
+ reg = <0x0 0x15400000 0x0 0x10000>;
interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_DSIB>,
<&bpmp TEGRA186_CLK_DSIB_LP>,
@@ -1711,7 +1713,7 @@
nvdec@15480000 {
compatible = "nvidia,tegra186-nvdec";
- reg = <0x15480000 0x40000>;
+ reg = <0x0 0x15480000 0x0 0x40000>;
clocks = <&bpmp TEGRA186_CLK_NVDEC>;
clock-names = "nvdec";
resets = <&bpmp TEGRA186_RESET_NVDEC>;
@@ -1727,7 +1729,7 @@
nvenc@154c0000 {
compatible = "nvidia,tegra186-nvenc";
- reg = <0x154c0000 0x40000>;
+ reg = <0x0 0x154c0000 0x0 0x40000>;
clocks = <&bpmp TEGRA186_CLK_NVENC>;
clock-names = "nvenc";
resets = <&bpmp TEGRA186_RESET_NVENC>;
@@ -1742,7 +1744,7 @@
sor0: sor@15540000 {
compatible = "nvidia,tegra186-sor";
- reg = <0x15540000 0x10000>;
+ reg = <0x0 0x15540000 0x0 0x10000>;
interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_SOR0>,
<&bpmp TEGRA186_CLK_SOR0_OUT>,
@@ -1766,7 +1768,7 @@
sor1: sor@15580000 {
compatible = "nvidia,tegra186-sor";
- reg = <0x15580000 0x10000>;
+ reg = <0x0 0x15580000 0x0 0x10000>;
interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_SOR1>,
<&bpmp TEGRA186_CLK_SOR1_OUT>,
@@ -1790,7 +1792,7 @@
dpaux: dpaux@155c0000 {
compatible = "nvidia,tegra186-dpaux";
- reg = <0x155c0000 0x10000>;
+ reg = <0x0 0x155c0000 0x0 0x10000>;
interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_DPAUX>,
<&bpmp TEGRA186_CLK_PLLDP>;
@@ -1824,7 +1826,7 @@
padctl@15880000 {
compatible = "nvidia,tegra186-dsi-padctl";
- reg = <0x15880000 0x10000>;
+ reg = <0x0 0x15880000 0x0 0x10000>;
resets = <&bpmp TEGRA186_RESET_DSI>;
reset-names = "dsi";
status = "disabled";
@@ -1832,7 +1834,7 @@
dsic: dsi@15900000 {
compatible = "nvidia,tegra186-dsi";
- reg = <0x15900000 0x10000>;
+ reg = <0x0 0x15900000 0x0 0x10000>;
interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_DSIC>,
<&bpmp TEGRA186_CLK_DSIC_LP>,
@@ -1847,7 +1849,7 @@
dsid: dsi@15940000 {
compatible = "nvidia,tegra186-dsi";
- reg = <0x15940000 0x10000>;
+ reg = <0x0 0x15940000 0x0 0x10000>;
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_DSID>,
<&bpmp TEGRA186_CLK_DSID_LP>,
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
index e8b296d9e0d3..43942db6eac9 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
@@ -104,6 +104,8 @@
};
serial@3110000 {
+ /delete-property/ dmas;
+ /delete-property/ dma-names;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
index c32876699a43..ea6f397a2792 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
@@ -2121,6 +2121,12 @@
phy-names = "usb2-0", "usb2-1", "usb2-3", "usb3-0", "usb3-2", "usb3-3";
};
+ cec@3960000 {
+ status = "okay";
+
+ hdmi-phandle = <&sor2>;
+ };
+
i2c@c240000 {
typec@8 {
compatible = "cypress,cypd4226";
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi
index 4a17ea5e40fd..16cf4414de59 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi
@@ -2174,6 +2174,12 @@
phy-names = "usb2-1", "usb2-2", "usb3-2";
};
+ cec@3960000 {
+ status = "okay";
+
+ hdmi-phandle = <&sor1>;
+ };
+
host1x@13e00000 {
display-hub@15200000 {
status = "okay";
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
index 59860d19f0f6..a410fc335fa3 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
@@ -78,6 +78,8 @@
};
serial@3100000 {
+ /delete-property/ dmas;
+ /delete-property/ dma-names;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index 33f92b77cd9d..1399342f23e1 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -747,6 +747,8 @@
interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA194_CLK_UARTA>;
resets = <&bpmp TEGRA194_RESET_UARTA>;
+ dmas = <&gpcdma 8>, <&gpcdma 8>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -757,6 +759,8 @@
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA194_CLK_UARTB>;
resets = <&bpmp TEGRA194_RESET_UARTB>;
+ dmas = <&gpcdma 9>, <&gpcdma 9>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -766,9 +770,9 @@
reg-shift = <2>;
interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA194_CLK_UARTD>;
- clock-names = "serial";
resets = <&bpmp TEGRA194_RESET_UARTD>;
- reset-names = "serial";
+ dmas = <&gpcdma 19>, <&gpcdma 19>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -778,9 +782,9 @@
reg-shift = <2>;
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA194_CLK_UARTE>;
- clock-names = "serial";
resets = <&bpmp TEGRA194_RESET_UARTE>;
- reset-names = "serial";
+ dmas = <&gpcdma 20>, <&gpcdma 20>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -790,9 +794,9 @@
reg-shift = <2>;
interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA194_CLK_UARTF>;
- clock-names = "serial";
resets = <&bpmp TEGRA194_RESET_UARTF>;
- reset-names = "serial";
+ dmas = <&gpcdma 12>, <&gpcdma 12>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -817,9 +821,9 @@
reg-shift = <2>;
interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA194_CLK_UARTH>;
- clock-names = "serial";
resets = <&bpmp TEGRA194_RESET_UARTH>;
- reset-names = "serial";
+ dmas = <&gpcdma 13>, <&gpcdma 13>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -1339,7 +1343,7 @@
};
cec@3960000 {
- compatible = "nvidia,tegra194-cec";
+ compatible = "nvidia,tegra194-cec", "nvidia,tegra210-cec";
reg = <0x0 0x03960000 0x0 0x10000>;
interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA194_CLK_CEC>;
@@ -1616,9 +1620,9 @@
reg-shift = <2>;
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA194_CLK_UARTC>;
- clock-names = "serial";
resets = <&bpmp TEGRA194_RESET_UARTC>;
- reset-names = "serial";
+ dmas = <&gpcdma 3>, <&gpcdma 3>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -1628,9 +1632,9 @@
reg-shift = <2>;
interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA194_CLK_UARTG>;
- clock-names = "serial";
resets = <&bpmp TEGRA194_RESET_UARTG>;
- reset-names = "serial";
+ dmas = <&gpcdma 2>, <&gpcdma 2>;
+ dma-names = "rx", "tx";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
index 9b9d1d15b0c7..e07aeeee3586 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
@@ -11,6 +11,7 @@
rtc0 = "/i2c@7000d000/pmic@3c";
rtc1 = "/rtc@7000e000";
serial0 = &uarta;
+ serial3 = &uartd;
};
chosen {
@@ -24,6 +25,7 @@
gpu@57000000 {
vdd-supply = <&vdd_gpu>;
+ status = "okay";
};
/* debug port */
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
index a6a58e51822d..627abf51a5a4 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
@@ -90,6 +90,12 @@
};
};
+ cec@70015000 {
+ status = "okay";
+
+ hdmi-phandle = <&sor1>;
+ };
+
clock@70110000 {
status = "okay";
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index 83ed6ac2a8d8..584461f3a619 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -1623,6 +1623,18 @@
};
};
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ pwms = <&pwm 3 45334>;
+ fan-supply = <&vdd_fan>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(K, 7) IRQ_TYPE_EDGE_RISING>;
+
+ /* cooling level (0, 1, 2, 3) - pwm inverted */
+ cooling-levels = <255 128 64 0>;
+ #cooling-cells = <2>;
+ };
+
vdd_sys_mux: regulator-vdd-sys-mux {
compatible = "regulator-fixed";
regulator-name = "VDD_SYS_MUX";
@@ -1778,4 +1790,67 @@
enable-active-high;
vin-supply = <&vdd_5v0_sys>;
};
+
+ vdd_fan: regulator-vdd-fan {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_FAN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&exp1 4 GPIO_ACTIVE_LOW>;
+ vin-supply = <&vdd_5v0_sys>;
+
+ regulator-enable-ramp-delay = <284>;
+ };
+
+ thermal-zones {
+ cpu-thermal {
+ trips {
+ cpu_trip_critical: critical {
+ temperature = <96500>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+
+ cpu_trip_hot: hot {
+ temperature = <70000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_trip_active: active {
+ temperature = <50000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu_trip_passive: passive {
+ temperature = <30000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ cooling-device = <&fan 3 3>;
+ trip = <&cpu_trip_critical>;
+ };
+
+ map1 {
+ cooling-device = <&fan 2 2>;
+ trip = <&cpu_trip_hot>;
+ };
+
+ map2 {
+ cooling-device = <&fan 1 1>;
+ trip = <&cpu_trip_active>;
+ };
+
+ map3 {
+ cooling-device = <&fan 0 0>;
+ trip = <&cpu_trip_passive>;
+ };
+ };
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi
index bbd6ff0564da..b84a8e39b404 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi
@@ -1365,7 +1365,7 @@
};
};
- hog-0 {
+ max77620-hog {
gpio-hog;
output-high;
gpios = <2 GPIO_ACTIVE_HIGH>,
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
index 0ecdd7243b2e..ec0e84cb83ef 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
@@ -419,6 +419,12 @@
nvidia,sys-clock-req-active-high;
};
+ cec@70015000 {
+ status = "okay";
+
+ hdmi-phandle = <&sor1>;
+ };
+
hda@70030000 {
nvidia,model = "NVIDIA Jetson Nano HDA";
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index b6c84d195c0e..402b0ede1472 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -959,6 +959,15 @@
reset-names = "fuse";
};
+ cec@70015000 {
+ compatible = "nvidia,tegra210-cec";
+ reg = <0x0 0x070015000 0x0 0x1000>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA210_CLK_CEC>;
+ clock-names = "cec";
+ status = "disabled";
+ };
+
mc: memory-controller@70019000 {
compatible = "nvidia,tegra210-mc";
reg = <0x0 0x70019000 0x0 0x1000>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
index 2601b43b2d8c..df034dbb8285 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
@@ -16,6 +16,18 @@
#address-cells = <2>;
#size-cells = <2>;
+ aliases {
+ i2c0 = &gen1_i2c;
+ i2c1 = &gen2_i2c;
+ i2c2 = &cam_i2c;
+ i2c3 = &dp_aux_ch1_i2c;
+ i2c4 = &bpmp_i2c;
+ i2c5 = &dp_aux_ch0_i2c;
+ i2c6 = &dp_aux_ch2_i2c;
+ i2c7 = &gen8_i2c;
+ i2c8 = &dp_aux_ch3_i2c;
+ };
+
bus@0 {
compatible = "simple-bus";
@@ -2948,6 +2960,11 @@
<&bpmp TEGRA234_CLK_QSPI0_PM>;
clock-names = "qspi", "qspi_out";
resets = <&bpmp TEGRA234_RESET_QSPI0>;
+ iommus = <&smmu_niso1 TEGRA234_SID_QSPI0>;
+ assigned-clocks = <&bpmp TEGRA234_CLK_QSPI0_2X_PM>,
+ <&bpmp TEGRA234_CLK_QSPI0_PM>;
+ assigned-clock-rates = <199999999 99999999>;
+ assigned-clock-parents = <&bpmp TEGRA234_CLK_PLLC>;
status = "disabled";
};
@@ -3031,6 +3048,11 @@
<&bpmp TEGRA234_CLK_QSPI1_PM>;
clock-names = "qspi", "qspi_out";
resets = <&bpmp TEGRA234_RESET_QSPI1>;
+ iommus = <&smmu_niso1 TEGRA234_SID_QSPI1>;
+ assigned-clocks = <&bpmp TEGRA234_CLK_QSPI1_2X_PM>,
+ <&bpmp TEGRA234_CLK_QSPI1_PM>;
+ assigned-clock-rates = <199999999 99999999>;
+ assigned-clock-parents = <&bpmp TEGRA234_CLK_PLLC>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 140b0b2abfb5..669b888b27a1 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -116,6 +116,12 @@ dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-1000.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-4000.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs615-ride.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs6490-rb3gen2.dtb
+
+qcs6490-rb3gen2-vision-mezzanine-dtbs := qcs6490-rb3gen2.dtb qcs6490-rb3gen2-vision-mezzanine.dtbo
+qcs6490-rb3gen2-industrial-mezzanine-dtbs := qcs6490-rb3gen2.dtb qcs6490-rb3gen2-industrial-mezzanine.dtbo
+
+dtb-$(CONFIG_ARCH_QCOM) += qcs6490-rb3gen2-industrial-mezzanine.dtb
+dtb-$(CONFIG_ARCH_QCOM) += qcs6490-rb3gen2-vision-mezzanine.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs8300-ride.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs8550-aim300-aiot.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs9100-ride.dtb
@@ -134,7 +140,8 @@ dtb-$(CONFIG_ARCH_QCOM) += sa8295p-adp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sa8540p-ride.dtb
dtb-$(CONFIG_ARCH_QCOM) += sa8775p-ride.dtb
dtb-$(CONFIG_ARCH_QCOM) += sa8775p-ride-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM) += sc7180-acer-aspire1.dtb
+sc7180-acer-aspire1-el2-dtbs := sc7180-acer-aspire1.dtb sc7180-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-acer-aspire1.dtb sc7180-acer-aspire1-el2.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-idp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r1.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r1-lte.dtb
@@ -200,11 +207,16 @@ dtb-$(CONFIG_ARCH_QCOM) += sc7280-idp2.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7280-crd-r3.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc8180x-lenovo-flex-5g.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc8180x-primus.dtb
-dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-crd.dtb
-dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-huawei-gaokun3.dtb
-dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-lenovo-thinkpad-x13s.dtb
-dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-microsoft-arcata.dtb
-dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-microsoft-blackrock.dtb
+sc8280xp-crd-el2-dtbs := sc8280xp-crd.dtb sc8280xp-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-crd.dtb sc8280xp-crd-el2.dtb
+sc8280xp-huawei-gaokun3-el2-dtbs := sc8280xp-huawei-gaokun3.dtb sc8280xp-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-huawei-gaokun3.dtb sc8280xp-huawei-gaokun3-el2.dtb
+sc8280xp-lenovo-thinkpad-x13s-el2-dtbs := sc8280xp-lenovo-thinkpad-x13s.dtb sc8280xp-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-lenovo-thinkpad-x13s.dtb sc8280xp-lenovo-thinkpad-x13s-el2.dtb
+sc8280xp-microsoft-arcata-el2-dtbs := sc8280xp-microsoft-arcata.dtb sc8280xp-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-microsoft-arcata.dtb sc8280xp-microsoft-arcata-el2.dtb
+sc8280xp-microsoft-blackrock-el2-dtbs := sc8280xp-microsoft-blackrock.dtb sc8280xp-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-microsoft-blackrock.dtb sc8280xp-microsoft-blackrock-el2.dtb
dtb-$(CONFIG_ARCH_QCOM) += sda660-inforce-ifc6560.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm450-lenovo-tbx605f.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm450-motorola-ali.dtb
@@ -246,6 +258,7 @@ dtb-$(CONFIG_ARCH_QCOM) += sm4450-qrd.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm6115-fxtec-pro1x.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm6115p-lenovo-j606f.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm6125-sony-xperia-seine-pdx201.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm6125-xiaomi-ginkgo.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm6125-xiaomi-laurel-sprout.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm6350-sony-xperia-lena-pdx213.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm6375-sony-xperia-murray-pdx225.dtb
@@ -288,13 +301,29 @@ dtb-$(CONFIG_ARCH_QCOM) += sm8650-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8650-qrd.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8750-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8750-qrd.dtb
-dtb-$(CONFIG_ARCH_QCOM) += x1e001de-devkit.dtb
-dtb-$(CONFIG_ARCH_QCOM) += x1e78100-lenovo-thinkpad-t14s.dtb
-dtb-$(CONFIG_ARCH_QCOM) += x1e80100-asus-vivobook-s15.dtb
-dtb-$(CONFIG_ARCH_QCOM) += x1e80100-crd.dtb
-dtb-$(CONFIG_ARCH_QCOM) += x1e80100-dell-xps13-9345.dtb
-dtb-$(CONFIG_ARCH_QCOM) += x1e80100-hp-omnibook-x14.dtb
-dtb-$(CONFIG_ARCH_QCOM) += x1e80100-lenovo-yoga-slim7x.dtb
-dtb-$(CONFIG_ARCH_QCOM) += x1e80100-microsoft-romulus13.dtb
-dtb-$(CONFIG_ARCH_QCOM) += x1e80100-microsoft-romulus15.dtb
-dtb-$(CONFIG_ARCH_QCOM) += x1e80100-qcp.dtb
+x1e001de-devkit-el2-dtbs := x1e001de-devkit.dtb x1-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += x1e001de-devkit.dtb x1e001de-devkit-el2.dtb
+x1e78100-lenovo-thinkpad-t14s-el2-dtbs := x1e78100-lenovo-thinkpad-t14s.dtb x1-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += x1e78100-lenovo-thinkpad-t14s.dtb x1e78100-lenovo-thinkpad-t14s-el2.dtb
+x1e78100-lenovo-thinkpad-t14s-oled-el2-dtbs := x1e78100-lenovo-thinkpad-t14s-oled.dtb x1-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += x1e78100-lenovo-thinkpad-t14s-oled.dtb x1e78100-lenovo-thinkpad-t14s-oled-el2.dtb
+x1e80100-asus-vivobook-s15-el2-dtbs := x1e80100-asus-vivobook-s15.dtb x1-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-asus-vivobook-s15.dtb x1e80100-asus-vivobook-s15-el2.dtb
+x1e80100-crd-el2-dtbs := x1e80100-crd.dtb x1-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-crd.dtb x1e80100-crd-el2.dtb
+x1e80100-dell-xps13-9345-el2-dtbs := x1e80100-dell-xps13-9345.dtb x1-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-dell-xps13-9345.dtb x1e80100-dell-xps13-9345-el2.dtb
+x1e80100-hp-elitebook-ultra-g1q-el2-dtbs := x1e80100-hp-elitebook-ultra-g1q.dtb x1-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-hp-elitebook-ultra-g1q.dtb x1e80100-hp-elitebook-ultra-g1q-el2.dtb
+x1e80100-hp-omnibook-x14-el2-dtbs := x1e80100-hp-omnibook-x14.dtb x1-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-hp-omnibook-x14.dtb x1e80100-hp-omnibook-x14-el2.dtb
+x1e80100-lenovo-yoga-slim7x-el2-dtbs := x1e80100-lenovo-yoga-slim7x.dtb x1-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-lenovo-yoga-slim7x.dtb x1e80100-lenovo-yoga-slim7x-el2.dtb
+x1e80100-microsoft-romulus13-el2-dtbs := x1e80100-microsoft-romulus13.dtb x1-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-microsoft-romulus13.dtb x1e80100-microsoft-romulus13-el2.dtb
+x1e80100-microsoft-romulus15-el2-dtbs := x1e80100-microsoft-romulus15.dtb x1-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-microsoft-romulus15.dtb x1e80100-microsoft-romulus15-el2.dtb
+x1e80100-qcp-el2-dtbs := x1e80100-qcp.dtb x1-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-qcp.dtb x1e80100-qcp-el2.dtb
+x1p42100-crd-el2-dtbs := x1p42100-crd.dtb x1-el2.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += x1p42100-crd.dtb x1p42100-crd-el2.dtb
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
index aba08424aa38..b0c594c5f236 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
@@ -222,11 +222,17 @@
&blsp_uart1 {
status = "okay";
label = "LS-UART0";
+ pinctrl-0 = <&blsp_uart1_default>;
+ pinctrl-1 = <&blsp_uart1_sleep>;
+ pinctrl-names = "default", "sleep";
};
&blsp_uart2 {
status = "okay";
label = "LS-UART1";
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
};
&camss {
@@ -591,6 +597,21 @@
"USR_LED_2_CTRL", /* GPIO 120 */
"SB_HS_ID";
+ blsp_uart1_default: blsp-uart1-default-state {
+ /* TX, RX, CTS_N, RTS_N */
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ function = "blsp_uart1";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ blsp_uart1_sleep: blsp-uart1-sleep-state {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
sdc2_cd_default: sdc2-cd-default-state {
pins = "gpio38";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/apq8016-schneider-hmibsc.dts b/arch/arm64/boot/dts/qcom/apq8016-schneider-hmibsc.dts
index 75c6137e5a11..ce75046ffdac 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-schneider-hmibsc.dts
+++ b/arch/arm64/boot/dts/qcom/apq8016-schneider-hmibsc.dts
@@ -190,11 +190,17 @@
};
&blsp_uart1 {
+ pinctrl-0 = <&blsp_uart1_default>;
+ pinctrl-1 = <&blsp_uart1_sleep>;
+ pinctrl-names = "default", "sleep";
label = "UART0";
status = "okay";
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_default>;
+ pinctrl-1 = <&blsp_uart2_sleep>;
+ pinctrl-names = "default", "sleep";
label = "UART1";
status = "okay";
};
@@ -367,6 +373,37 @@
bias-disable;
};
+ blsp_uart1_default: blsp-uart1-default-state {
+ /* TX, RX, CTS_N, RTS_N */
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ function = "blsp_uart1";
+ drive-strength = <16>;
+ bias-disable;
+ bootph-all;
+ };
+
+ blsp_uart1_sleep: blsp-uart1-sleep-state {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ blsp_uart2_default: blsp-uart2-default-state {
+ /* TX, RX */
+ pins = "gpio4", "gpio5";
+ function = "blsp_uart2";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ blsp_uart2_sleep: blsp-uart2-sleep-state {
+ pins = "gpio4", "gpio5";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
msm_key_volp_n_default: msm-key-volp-n-default-state {
pins = "gpio107";
function = "gpio";
@@ -463,10 +500,6 @@
drive-strength = <16>;
};
-&blsp_uart1_default {
- bootph-all;
-};
-
/* Enable CoreSight */
&cti0 { status = "okay"; };
&cti1 { status = "okay"; };
diff --git a/arch/arm64/boot/dts/qcom/apq8039-t2.dts b/arch/arm64/boot/dts/qcom/apq8039-t2.dts
index 4f82bb668616..38c281f0fe65 100644
--- a/arch/arm64/boot/dts/qcom/apq8039-t2.dts
+++ b/arch/arm64/boot/dts/qcom/apq8039-t2.dts
@@ -116,18 +116,16 @@
};
&blsp_uart1 {
+ pinctrl-0 = <&blsp_uart1_default>;
+ pinctrl-1 = <&blsp_uart1_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
-&blsp_uart1_default {
- pins = "gpio0", "gpio1";
-};
-
-&blsp_uart1_sleep {
- pins = "gpio0", "gpio1";
-};
-
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
@@ -323,6 +321,20 @@
"USBC_GPIO7_1V8", /* GPIO_120 */
"NC";
+ blsp_uart1_default: blsp-uart1-default-state {
+ pins = "gpio0", "gpio1";
+ function = "blsp_uart1";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ blsp_uart1_sleep: blsp-uart1-sleep-state {
+ pins = "gpio0", "gpio1";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
pinctrl_backlight: backlight-state {
pins = "gpio98";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
index e8148b3d6c50..5b2e88915c2f 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
@@ -953,15 +953,15 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
};
@@ -1012,10 +1012,7 @@
&sound {
compatible = "qcom,apq8096-sndcard";
model = "DB820c";
- audio-routing = "RX_BIAS", "MCLK",
- "MM_DL1", "MultiMedia1 Playback",
- "MM_DL2", "MultiMedia2 Playback",
- "MultiMedia3 Capture", "MM_UL3";
+ audio-routing = "RX_BIAS", "MCLK";
mm1-dai-link {
link-name = "MultiMedia1";
diff --git a/arch/arm64/boot/dts/qcom/ipq5018-rdp432-c2.dts b/arch/arm64/boot/dts/qcom/ipq5018-rdp432-c2.dts
index 8460b538eb6a..43def95e9275 100644
--- a/arch/arm64/boot/dts/qcom/ipq5018-rdp432-c2.dts
+++ b/arch/arm64/boot/dts/qcom/ipq5018-rdp432-c2.dts
@@ -9,6 +9,8 @@
#include "ipq5018.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
/ {
model = "Qualcomm Technologies, Inc. IPQ5018/AP-RDP432.1-C2";
compatible = "qcom,ipq5018-rdp432-c2", "qcom,ipq5018";
@@ -28,6 +30,20 @@
status = "okay";
};
+&pcie0 {
+ pinctrl-0 = <&pcie0_default>;
+ pinctrl-names = "default";
+
+ perst-gpios = <&tlmm 15 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 16 GPIO_ACTIVE_LOW>;
+
+ status = "okay";
+};
+
+&pcie0_phy {
+ status = "okay";
+};
+
&sdhc_1 {
pinctrl-0 = <&sdc_default_state>;
pinctrl-names = "default";
@@ -43,6 +59,30 @@
};
&tlmm {
+ pcie0_default: pcie0-default-state {
+ clkreq-n-pins {
+ pins = "gpio14";
+ function = "pcie0_clk";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio15";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ output-low;
+ };
+
+ wake-n-pins {
+ pins = "gpio16";
+ function = "pcie0_wake";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ };
+
sdc_default_state: sdc-default-state {
clk-pins {
pins = "gpio9";
diff --git a/arch/arm64/boot/dts/qcom/ipq5018.dtsi b/arch/arm64/boot/dts/qcom/ipq5018.dtsi
index 8914f2ef0bc4..130360014c5e 100644
--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi
@@ -79,6 +79,7 @@
firmware {
scm {
compatible = "qcom,scm-ipq5018", "qcom,scm";
+ qcom,dload-mode = <&tcsr 0x6100>;
qcom,sdi-enabled;
};
};
@@ -147,6 +148,40 @@
status = "disabled";
};
+ pcie1_phy: phy@7e000 {
+ compatible = "qcom,ipq5018-uniphy-pcie-phy";
+ reg = <0x0007e000 0x800>;
+
+ clocks = <&gcc GCC_PCIE1_PIPE_CLK>;
+
+ resets = <&gcc GCC_PCIE1_PHY_BCR>,
+ <&gcc GCC_PCIE1PHY_PHY_BCR>;
+
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+
+ num-lanes = <1>;
+
+ status = "disabled";
+ };
+
+ pcie0_phy: phy@86000 {
+ compatible = "qcom,ipq5018-uniphy-pcie-phy";
+ reg = <0x00086000 0x1000>;
+
+ clocks = <&gcc GCC_PCIE0_PIPE_CLK>;
+
+ resets = <&gcc GCC_PCIE0_PHY_BCR>,
+ <&gcc GCC_PCIE0PHY_PHY_BCR>;
+
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+
+ num-lanes = <2>;
+
+ status = "disabled";
+ };
+
tlmm: pinctrl@1000000 {
compatible = "qcom,ipq5018-tlmm";
reg = <0x01000000 0x300000>;
@@ -170,8 +205,8 @@
reg = <0x01800000 0x80000>;
clocks = <&xo_board_clk>,
<&sleep_clk>,
- <0>,
- <0>,
+ <&pcie0_phy>,
+ <&pcie1_phy>,
<0>,
<0>,
<0>,
@@ -187,6 +222,11 @@
#hwlock-cells = <1>;
};
+ tcsr: syscon@1937000 {
+ compatible = "qcom,tcsr-ipq5018", "syscon";
+ reg = <0x01937000 0x21000>;
+ };
+
sdhc_1: mmc@7804000 {
compatible = "qcom,ipq5018-sdhci", "qcom,sdhci-msm-v5";
reg = <0x7804000 0x1000>;
@@ -387,6 +427,208 @@
status = "disabled";
};
};
+
+ pcie1: pcie@80000000 {
+ compatible = "qcom,pcie-ipq5018";
+ reg = <0x80000000 0xf1d>,
+ <0x80000f20 0xa8>,
+ <0x80001000 0x1000>,
+ <0x00078000 0x3000>,
+ <0x80100000 0x1000>,
+ <0x0007b000 0x1000>;
+ reg-names = "dbi",
+ "elbi",
+ "atu",
+ "parf",
+ "config",
+ "mhi";
+ device_type = "pci";
+ linux,pci-domain = <1>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ /* The controller supports Gen3, but the connected PHY is Gen2-capable */
+ max-link-speed = <2>;
+
+ phys = <&pcie1_phy>;
+ phy-names ="pciephy";
+
+ ranges = <0x01000000 0 0x00000000 0x80200000 0 0x00100000>,
+ <0x02000000 0 0x80300000 0x80300000 0 0x10000000>;
+
+ msi-map = <0x0 &v2m0 0x0 0xff8>;
+
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7",
+ "global";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 142 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 0 143 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 0 144 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 0 145 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_SYS_NOC_PCIE1_AXI_CLK>,
+ <&gcc GCC_PCIE1_AXI_M_CLK>,
+ <&gcc GCC_PCIE1_AXI_S_CLK>,
+ <&gcc GCC_PCIE1_AHB_CLK>,
+ <&gcc GCC_PCIE1_AUX_CLK>,
+ <&gcc GCC_PCIE1_AXI_S_BRIDGE_CLK>;
+ clock-names = "iface",
+ "axi_m",
+ "axi_s",
+ "ahb",
+ "aux",
+ "axi_bridge";
+
+ resets = <&gcc GCC_PCIE1_PIPE_ARES>,
+ <&gcc GCC_PCIE1_SLEEP_ARES>,
+ <&gcc GCC_PCIE1_CORE_STICKY_ARES>,
+ <&gcc GCC_PCIE1_AXI_MASTER_ARES>,
+ <&gcc GCC_PCIE1_AXI_SLAVE_ARES>,
+ <&gcc GCC_PCIE1_AHB_ARES>,
+ <&gcc GCC_PCIE1_AXI_MASTER_STICKY_ARES>,
+ <&gcc GCC_PCIE1_AXI_SLAVE_STICKY_ARES>;
+ reset-names = "pipe",
+ "sleep",
+ "sticky",
+ "axi_m",
+ "axi_s",
+ "ahb",
+ "axi_m_sticky",
+ "axi_s_sticky";
+
+ status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
+ };
+
+ pcie0: pcie@a0000000 {
+ compatible = "qcom,pcie-ipq5018";
+ reg = <0xa0000000 0xf1d>,
+ <0xa0000f20 0xa8>,
+ <0xa0001000 0x1000>,
+ <0x00080000 0x3000>,
+ <0xa0100000 0x1000>,
+ <0x00083000 0x1000>;
+ reg-names = "dbi",
+ "elbi",
+ "atu",
+ "parf",
+ "config",
+ "mhi";
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <2>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ /* The controller supports Gen3, but the connected PHY is Gen2-capable */
+ max-link-speed = <2>;
+
+ phys = <&pcie0_phy>;
+ phy-names ="pciephy";
+
+ ranges = <0x01000000 0 0x00000000 0xa0200000 0 0x00100000>,
+ <0x02000000 0 0xa0300000 0xa0300000 0 0x10000000>;
+
+ msi-map = <0x0 &v2m0 0x0 0xff8>;
+
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7",
+ "global";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 75 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 0 78 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 0 79 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 0 83 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
+ <&gcc GCC_PCIE0_AXI_M_CLK>,
+ <&gcc GCC_PCIE0_AXI_S_CLK>,
+ <&gcc GCC_PCIE0_AHB_CLK>,
+ <&gcc GCC_PCIE0_AUX_CLK>,
+ <&gcc GCC_PCIE0_AXI_S_BRIDGE_CLK>;
+ clock-names = "iface",
+ "axi_m",
+ "axi_s",
+ "ahb",
+ "aux",
+ "axi_bridge";
+
+ resets = <&gcc GCC_PCIE0_PIPE_ARES>,
+ <&gcc GCC_PCIE0_SLEEP_ARES>,
+ <&gcc GCC_PCIE0_CORE_STICKY_ARES>,
+ <&gcc GCC_PCIE0_AXI_MASTER_ARES>,
+ <&gcc GCC_PCIE0_AXI_SLAVE_ARES>,
+ <&gcc GCC_PCIE0_AHB_ARES>,
+ <&gcc GCC_PCIE0_AXI_MASTER_STICKY_ARES>,
+ <&gcc GCC_PCIE0_AXI_SLAVE_STICKY_ARES>;
+ reset-names = "pipe",
+ "sleep",
+ "sticky",
+ "axi_m",
+ "axi_s",
+ "ahb",
+ "axi_m_sticky",
+ "axi_s_sticky";
+
+ status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
+ };
};
timer {
diff --git a/arch/arm64/boot/dts/qcom/ipq5332-rdp441.dts b/arch/arm64/boot/dts/qcom/ipq5332-rdp441.dts
index 846413817e9a..79ec77cfe552 100644
--- a/arch/arm64/boot/dts/qcom/ipq5332-rdp441.dts
+++ b/arch/arm64/boot/dts/qcom/ipq5332-rdp441.dts
@@ -32,6 +32,34 @@
status = "okay";
};
+&pcie0 {
+ pinctrl-0 = <&pcie0_default>;
+ pinctrl-names = "default";
+
+ perst-gpios = <&tlmm 38 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 39 GPIO_ACTIVE_LOW>;
+
+ status = "okay";
+};
+
+&pcie0_phy {
+ status = "okay";
+};
+
+&pcie1 {
+ pinctrl-0 = <&pcie1_default>;
+ pinctrl-names = "default";
+
+ perst-gpios = <&tlmm 47 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 48 GPIO_ACTIVE_LOW>;
+
+ status = "okay";
+};
+
+&pcie1_phy {
+ status = "okay";
+};
+
&tlmm {
i2c_1_pins: i2c-1-state {
pins = "gpio29", "gpio30";
@@ -40,6 +68,54 @@
bias-pull-up;
};
+ pcie0_default: pcie0-default-state {
+ clkreq-n-pins {
+ pins = "gpio37";
+ function = "pcie0_clk";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio38";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ output-low;
+ };
+
+ wake-n-pins {
+ pins = "gpio39";
+ function = "pcie0_wake";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ };
+
+ pcie1_default: pcie1-default-state {
+ clkreq-n-pins {
+ pins = "gpio46";
+ function = "pcie1_clk";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio47";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ output-low;
+ };
+
+ wake-n-pins {
+ pins = "gpio48";
+ function = "pcie1_wake";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ };
+
sdc_default_state: sdc-default-state {
clk-pins {
pins = "gpio13";
diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
index ca3da95730bd..bd28c490415f 100644
--- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
@@ -177,6 +177,46 @@
reg = <0x1d 0x2>;
bits = <7 2>;
};
+
+ tsens_sens11_off: s11@3a5 {
+ reg = <0x3a5 0x1>;
+ bits = <4 4>;
+ };
+
+ tsens_sens12_off: s12@3a6 {
+ reg = <0x3a6 0x1>;
+ bits = <0 4>;
+ };
+
+ tsens_sens13_off: s13@3a6 {
+ reg = <0x3a6 0x1>;
+ bits = <4 4>;
+ };
+
+ tsens_sens14_off: s14@3ad {
+ reg = <0x3ad 0x2>;
+ bits = <7 4>;
+ };
+
+ tsens_sens15_off: s15@3ae {
+ reg = <0x3ae 0x1>;
+ bits = <3 4>;
+ };
+
+ tsens_mode: mode@3e1 {
+ reg = <0x3e1 0x1>;
+ bits = <0 3>;
+ };
+
+ tsens_base0: base0@3e1 {
+ reg = <0x3e1 0x2>;
+ bits = <3 10>;
+ };
+
+ tsens_base1: base1@3e2 {
+ reg = <0x3e2 0x2>;
+ bits = <5 10>;
+ };
};
rng: rng@e3000 {
@@ -186,6 +226,72 @@
clock-names = "core";
};
+ tsens: thermal-sensor@4a9000 {
+ compatible = "qcom,ipq5332-tsens";
+ reg = <0x004a9000 0x1000>,
+ <0x004a8000 0x1000>;
+ interrupts = <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "combined";
+ nvmem-cells = <&tsens_mode>,
+ <&tsens_base0>,
+ <&tsens_base1>,
+ <&tsens_sens11_off>,
+ <&tsens_sens12_off>,
+ <&tsens_sens13_off>,
+ <&tsens_sens14_off>,
+ <&tsens_sens15_off>;
+ nvmem-cell-names = "mode",
+ "base0",
+ "base1",
+ "tsens_sens11_off",
+ "tsens_sens12_off",
+ "tsens_sens13_off",
+ "tsens_sens14_off",
+ "tsens_sens15_off";
+ #qcom,sensors = <5>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ pcie0_phy: phy@4b0000 {
+ compatible = "qcom,ipq5332-uniphy-pcie-phy";
+ reg = <0x004b0000 0x800>;
+
+ clocks = <&gcc GCC_PCIE3X1_0_PIPE_CLK>,
+ <&gcc GCC_PCIE3X1_PHY_AHB_CLK>;
+
+ resets = <&gcc GCC_PCIE3X1_0_PHY_BCR>,
+ <&gcc GCC_PCIE3X1_PHY_AHB_CLK_ARES>,
+ <&gcc GCC_PCIE3X1_0_PHY_PHY_BCR>;
+
+ #clock-cells = <0>;
+
+ #phy-cells = <0>;
+
+ num-lanes = <1>;
+
+ status = "disabled";
+ };
+
+ pcie1_phy: phy@4b1000 {
+ compatible = "qcom,ipq5332-uniphy-pcie-phy";
+ reg = <0x004b1000 0x1000>;
+
+ clocks = <&gcc GCC_PCIE3X2_PIPE_CLK>,
+ <&gcc GCC_PCIE3X2_PHY_AHB_CLK>;
+
+ resets = <&gcc GCC_PCIE3X2_PHY_BCR>,
+ <&gcc GCC_PCIE3X2_PHY_AHB_CLK_ARES>,
+ <&gcc GCC_PCIE3X2PHY_PHY_BCR>;
+
+ #clock-cells = <0>;
+
+ #phy-cells = <0>;
+
+ num-lanes = <2>;
+
+ status = "disabled";
+ };
+
tlmm: pinctrl@1000000 {
compatible = "qcom,ipq5332-tlmm";
reg = <0x01000000 0x300000>;
@@ -212,8 +318,8 @@
#interconnect-cells = <1>;
clocks = <&xo_board>,
<&sleep_clk>,
- <0>,
- <0>,
+ <&pcie1_phy>,
+ <&pcie0_phy>,
<0>;
};
@@ -479,6 +585,283 @@
status = "disabled";
};
};
+
+ pcie1: pcie@18000000 {
+ compatible = "qcom,pcie-ipq5332", "qcom,pcie-ipq9574";
+ reg = <0x18000000 0xf1c>,
+ <0x18000f20 0xa8>,
+ <0x18001000 0x1000>,
+ <0x00088000 0x3000>,
+ <0x18100000 0x1000>,
+ <0x0008b000 0x1000>;
+ reg-names = "dbi",
+ "elbi",
+ "atu",
+ "parf",
+ "config",
+ "mhi";
+ device_type = "pci";
+ linux,pci-domain = <1>;
+ num-lanes = <2>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x00000000 0x18200000 0x0 0x00100000>,
+ <0x02000000 0x0 0x18300000 0x18300000 0x0 0x07d00000>;
+
+ msi-map = <0x0 &v2m0 0x0 0xffd>;
+
+ interrupts = <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 410 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 411 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7",
+ "global";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 412 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 0 413 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 0 414 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 0 415 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_PCIE3X2_AXI_M_CLK>,
+ <&gcc GCC_PCIE3X2_AXI_S_CLK>,
+ <&gcc GCC_PCIE3X2_AXI_S_BRIDGE_CLK>,
+ <&gcc GCC_PCIE3X2_RCHG_CLK>,
+ <&gcc GCC_PCIE3X2_AHB_CLK>,
+ <&gcc GCC_PCIE3X2_AUX_CLK>;
+ clock-names = "axi_m",
+ "axi_s",
+ "axi_bridge",
+ "rchng",
+ "ahb",
+ "aux";
+
+ assigned-clocks = <&gcc GCC_PCIE3X2_AUX_CLK>;
+
+ assigned-clock-rates = <2000000>;
+
+ resets = <&gcc GCC_PCIE3X2_PIPE_ARES>,
+ <&gcc GCC_PCIE3X2_CORE_STICKY_ARES>,
+ <&gcc GCC_PCIE3X2_AXI_S_STICKY_ARES>,
+ <&gcc GCC_PCIE3X2_AXI_S_CLK_ARES>,
+ <&gcc GCC_PCIE3X2_AXI_M_STICKY_ARES>,
+ <&gcc GCC_PCIE3X2_AXI_M_CLK_ARES>,
+ <&gcc GCC_PCIE3X2_AUX_CLK_ARES>,
+ <&gcc GCC_PCIE3X2_AHB_CLK_ARES>;
+ reset-names = "pipe",
+ "sticky",
+ "axi_s_sticky",
+ "axi_s",
+ "axi_m_sticky",
+ "axi_m",
+ "aux",
+ "ahb";
+
+ phys = <&pcie1_phy>;
+ phy-names = "pciephy";
+
+ interconnects = <&gcc MASTER_SNOC_PCIE3_2_M &gcc SLAVE_SNOC_PCIE3_2_M>,
+ <&gcc MASTER_ANOC_PCIE3_2_S &gcc SLAVE_ANOC_PCIE3_2_S>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+ status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
+ };
+
+ pcie0: pcie@20000000 {
+ compatible = "qcom,pcie-ipq5332", "qcom,pcie-ipq9574";
+ reg = <0x20000000 0xf1c>,
+ <0x20000f20 0xa8>,
+ <0x20001000 0x1000>,
+ <0x00080000 0x3000>,
+ <0x20100000 0x1000>,
+ <0x00083000 0x1000>;
+ reg-names = "dbi",
+ "elbi",
+ "atu",
+ "parf",
+ "config",
+ "mhi";
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ num-lanes = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x00000000 0x20200000 0x0 0x00100000>,
+ <0x02000000 0x0 0x20300000 0x20300000 0x0 0x0fd00000>;
+
+ msi-map = <0x0 &v2m0 0x0 0xffd>;
+
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7",
+ "global";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 35 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 0 36 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 0 37 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 0 38 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_PCIE3X1_0_AXI_M_CLK>,
+ <&gcc GCC_PCIE3X1_0_AXI_S_CLK>,
+ <&gcc GCC_PCIE3X1_0_AXI_S_BRIDGE_CLK>,
+ <&gcc GCC_PCIE3X1_0_RCHG_CLK>,
+ <&gcc GCC_PCIE3X1_0_AHB_CLK>,
+ <&gcc GCC_PCIE3X1_0_AUX_CLK>;
+ clock-names = "axi_m",
+ "axi_s",
+ "axi_bridge",
+ "rchng",
+ "ahb",
+ "aux";
+
+ assigned-clocks = <&gcc GCC_PCIE3X1_0_AUX_CLK>;
+
+ assigned-clock-rates = <2000000>;
+
+ resets = <&gcc GCC_PCIE3X1_0_PIPE_ARES>,
+ <&gcc GCC_PCIE3X1_0_CORE_STICKY_ARES>,
+ <&gcc GCC_PCIE3X1_0_AXI_S_STICKY_ARES>,
+ <&gcc GCC_PCIE3X1_0_AXI_S_CLK_ARES>,
+ <&gcc GCC_PCIE3X1_0_AXI_M_STICKY_ARES>,
+ <&gcc GCC_PCIE3X1_0_AXI_M_CLK_ARES>,
+ <&gcc GCC_PCIE3X1_0_AUX_CLK_ARES>,
+ <&gcc GCC_PCIE3X1_0_AHB_CLK_ARES>;
+ reset-names = "pipe",
+ "sticky",
+ "axi_s_sticky",
+ "axi_s",
+ "axi_m_sticky",
+ "axi_m",
+ "aux",
+ "ahb";
+
+ phys = <&pcie0_phy>;
+ phy-names = "pciephy";
+
+ interconnects = <&gcc MASTER_SNOC_PCIE3_1_M &gcc SLAVE_SNOC_PCIE3_1_M>,
+ <&gcc MASTER_ANOC_PCIE3_1_S &gcc SLAVE_ANOC_PCIE3_1_S>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+ status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
+ };
+ };
+
+ thermal-zones {
+ rfa-0-thermal {
+ thermal-sensors = <&tsens 11>;
+
+ trips {
+ rfa-0-critical {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ rfa-1-thermal {
+ thermal-sensors = <&tsens 12>;
+
+ trips {
+ rfa-1-critical {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ misc-thermal {
+ thermal-sensors = <&tsens 13>;
+
+ trips {
+ misc-critical {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-top-thermal {
+ polling-delay-passive = <100>;
+ thermal-sensors = <&tsens 14>;
+
+ trips {
+ cpu-top-critical {
+ temperature = <115000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+
+ cpu-passive {
+ temperature = <105000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ top-glue-thermal {
+ thermal-sensors = <&tsens 15>;
+
+ trips {
+ top-glue-critical {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
};
timer {
diff --git a/arch/arm64/boot/dts/qcom/ipq5424-rdp466.dts b/arch/arm64/boot/dts/qcom/ipq5424-rdp466.dts
index b6e4bb3328b3..1f89530cb035 100644
--- a/arch/arm64/boot/dts/qcom/ipq5424-rdp466.dts
+++ b/arch/arm64/boot/dts/qcom/ipq5424-rdp466.dts
@@ -7,6 +7,8 @@
/dts-v1/;
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
#include "ipq5424.dtsi"
/ {
@@ -17,6 +19,33 @@
serial0 = &uart1;
};
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&gpio_keys_default>;
+ pinctrl-names = "default";
+
+ button-wps {
+ label = "wps";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&tlmm 19 GPIO_ACTIVE_LOW>;
+ debounce-interval = <60>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-0 = <&gpio_leds_default>;
+ pinctrl-names = "default";
+
+ led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_WLAN;
+ gpios = <&tlmm 42 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "phy0tx";
+ default-state = "off";
+ };
+ };
+
vreg_misc_3p3: regulator-usb-3p3 {
compatible = "regulator-fixed";
regulator-min-microvolt = <3300000>;
@@ -53,6 +82,32 @@
dr_mode = "host";
};
+&pcie2 {
+ pinctrl-0 = <&pcie2_default_state>;
+ pinctrl-names = "default";
+
+ perst-gpios = <&tlmm 31 GPIO_ACTIVE_LOW>;
+
+ status = "okay";
+};
+
+&pcie2_phy {
+ status = "okay";
+};
+
+&pcie3 {
+ pinctrl-0 = <&pcie3_default_state>;
+ pinctrl-names = "default";
+
+ perst-gpios = <&tlmm 34 GPIO_ACTIVE_LOW>;
+
+ status = "okay";
+};
+
+&pcie3_phy {
+ status = "okay";
+};
+
&qusb_phy_0 {
vdd-supply = <&vreg_misc_0p925>;
vdda-pll-supply = <&vreg_misc_1p8>;
@@ -69,6 +124,13 @@
status = "okay";
};
+&sdhc {
+ pinctrl-0 = <&sdc_default_state>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
&sleep_clk {
clock-frequency = <32000>;
};
@@ -95,6 +157,20 @@
};
&tlmm {
+ gpio_keys_default: gpio-keys-default-state {
+ pins = "gpio19";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ gpio_leds_default: gpio-leds-default-state {
+ pins = "gpio42";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-down;
+ };
+
spi0_default_state: spi0-default-state {
clk-pins {
pins = "gpio6";
@@ -147,6 +223,20 @@
bias-pull-up;
};
};
+
+ pcie2_default_state: pcie2-default-state {
+ pins = "gpio31";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ pcie3_default_state: pcie3-default-state {
+ pins = "gpio34";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
};
&uart1 {
@@ -166,4 +256,3 @@
&xo_board {
clock-frequency = <24000000>;
};
-
diff --git a/arch/arm64/boot/dts/qcom/ipq5424.dtsi b/arch/arm64/boot/dts/qcom/ipq5424.dtsi
index 7034d378b1ef..66bd2261eb25 100644
--- a/arch/arm64/boot/dts/qcom/ipq5424.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq5424.dtsi
@@ -9,6 +9,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,ipq5424-gcc.h>
#include <dt-bindings/reset/qcom,ipq5424-gcc.h>
+#include <dt-bindings/interconnect/qcom,ipq5424.h>
#include <dt-bindings/gpio/gpio.h>
/ {
@@ -132,6 +133,11 @@
#size-cells = <2>;
ranges;
+ bootloader@8a200000 {
+ reg = <0x0 0x8a200000 0x0 0x400000>;
+ no-map;
+ };
+
tz@8a600000 {
reg = <0x0 0x8a600000 0x0 0x200000>;
no-map;
@@ -152,6 +158,197 @@
#size-cells = <2>;
ranges = <0 0 0 0 0x10 0>;
+ pcie0_phy: phy@84000 {
+ compatible = "qcom,ipq5424-qmp-gen3x1-pcie-phy",
+ "qcom,ipq9574-qmp-gen3x1-pcie-phy";
+ reg = <0x0 0x00084000 0x0 0x1000>;
+ clocks = <&gcc GCC_PCIE0_AUX_CLK>,
+ <&gcc GCC_PCIE0_AHB_CLK>,
+ <&gcc GCC_PCIE0_PIPE_CLK>;
+ clock-names = "aux",
+ "cfg_ahb",
+ "pipe";
+
+ assigned-clocks = <&gcc GCC_PCIE0_AUX_CLK>;
+ assigned-clock-rates = <20000000>;
+
+ resets = <&gcc GCC_PCIE0_PHY_BCR>,
+ <&gcc GCC_PCIE0PHY_PHY_BCR>;
+ reset-names = "phy",
+ "common";
+
+ #clock-cells = <0>;
+ clock-output-names = "gcc_pcie0_pipe_clk_src";
+
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ pcie1_phy: phy@8c000 {
+ compatible = "qcom,ipq5424-qmp-gen3x1-pcie-phy",
+ "qcom,ipq9574-qmp-gen3x1-pcie-phy";
+ reg = <0x0 0x0008c000 0x0 0x1000>;
+ clocks = <&gcc GCC_PCIE1_AUX_CLK>,
+ <&gcc GCC_PCIE1_AHB_CLK>,
+ <&gcc GCC_PCIE1_PIPE_CLK>;
+ clock-names = "aux",
+ "cfg_ahb",
+ "pipe";
+
+ assigned-clocks = <&gcc GCC_PCIE1_AUX_CLK>;
+ assigned-clock-rates = <20000000>;
+
+ resets = <&gcc GCC_PCIE1_PHY_BCR>,
+ <&gcc GCC_PCIE1PHY_PHY_BCR>;
+ reset-names = "phy",
+ "common";
+
+ #clock-cells = <0>;
+ clock-output-names = "gcc_pcie1_pipe_clk_src";
+
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ efuse@a4000 {
+ compatible = "qcom,ipq5424-qfprom", "qcom,qfprom";
+ reg = <0 0x000a4000 0 0x741>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ tsens_sens9_off: s9@3dc {
+ reg = <0x3dc 0x1>;
+ bits = <4 4>;
+ };
+
+ tsens_sens10_off: s10@3dd {
+ reg = <0x3dd 0x1>;
+ bits = <0 4>;
+ };
+
+ tsens_sens11_off: s11@3dd {
+ reg = <0x3dd 0x1>;
+ bits = <4 4>;
+ };
+
+ tsens_sens12_off: s12@3de {
+ reg = <0x3de 0x1>;
+ bits = <0 4>;
+ };
+
+ tsens_sens13_off: s13@3de {
+ reg = <0x3de 0x1>;
+ bits = <4 4>;
+ };
+
+ tsens_sens14_off: s14@3e5 {
+ reg = <0x3e5 0x2>;
+ bits = <7 4>;
+ };
+
+ tsens_sens15_off: s15@3e6 {
+ reg = <0x3e6 0x1>;
+ bits = <3 4>;
+ };
+
+ tsens_mode: mode@419 {
+ reg = <0x419 0x1>;
+ bits = <0 3>;
+ };
+
+ tsens_base0: base0@419 {
+ reg = <0x419 0x2>;
+ bits = <3 10>;
+ };
+
+ tsens_base1: base1@41a {
+ reg = <0x41a 0x2>;
+ bits = <5 10>;
+ };
+ };
+
+ pcie2_phy: phy@f4000 {
+ compatible = "qcom,ipq5424-qmp-gen3x2-pcie-phy",
+ "qcom,ipq9574-qmp-gen3x2-pcie-phy";
+ reg = <0x0 0x000f4000 0x0 0x2000>;
+ clocks = <&gcc GCC_PCIE2_AUX_CLK>,
+ <&gcc GCC_PCIE2_AHB_CLK>,
+ <&gcc GCC_PCIE2_PIPE_CLK>;
+ clock-names = "aux",
+ "cfg_ahb",
+ "pipe";
+
+ assigned-clocks = <&gcc GCC_PCIE2_AUX_CLK>;
+ assigned-clock-rates = <20000000>;
+
+ resets = <&gcc GCC_PCIE2_PHY_BCR>,
+ <&gcc GCC_PCIE2PHY_PHY_BCR>;
+ reset-names = "phy",
+ "common";
+
+ #clock-cells = <0>;
+ clock-output-names = "gcc_pcie2_pipe_clk_src";
+
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ pcie3_phy: phy@fc000 {
+ compatible = "qcom,ipq5424-qmp-gen3x2-pcie-phy",
+ "qcom,ipq9574-qmp-gen3x2-pcie-phy";
+ reg = <0x0 0x000fc000 0x0 0x2000>;
+ clocks = <&gcc GCC_PCIE3_AUX_CLK>,
+ <&gcc GCC_PCIE3_AHB_CLK>,
+ <&gcc GCC_PCIE3_PIPE_CLK>;
+ clock-names = "aux",
+ "cfg_ahb",
+ "pipe";
+
+ assigned-clocks = <&gcc GCC_PCIE3_AUX_CLK>;
+ assigned-clock-rates = <20000000>;
+
+ resets = <&gcc GCC_PCIE3_PHY_BCR>,
+ <&gcc GCC_PCIE3PHY_PHY_BCR>;
+ reset-names = "phy",
+ "common";
+
+ #clock-cells = <0>;
+ clock-output-names = "gcc_pcie3_pipe_clk_src";
+
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ tsens: thermal-sensor@4a9000 {
+ compatible = "qcom,ipq5424-tsens";
+ reg = <0 0x004a9000 0 0x1000>,
+ <0 0x004a8000 0 0x1000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "combined";
+ nvmem-cells = <&tsens_mode>,
+ <&tsens_base0>,
+ <&tsens_base1>,
+ <&tsens_sens9_off>,
+ <&tsens_sens10_off>,
+ <&tsens_sens11_off>,
+ <&tsens_sens12_off>,
+ <&tsens_sens13_off>,
+ <&tsens_sens14_off>,
+ <&tsens_sens15_off>;
+ nvmem-cell-names = "mode",
+ "base0",
+ "base1",
+ "tsens_sens9_off",
+ "tsens_sens10_off",
+ "tsens_sens11_off",
+ "tsens_sens12_off",
+ "tsens_sens13_off",
+ "tsens_sens14_off",
+ "tsens_sens15_off";
+ #qcom,sensors = <7>;
+ #thermal-sensor-cells = <1>;
+ };
+
rng: rng@4c3000 {
compatible = "qcom,ipq5424-trng", "qcom,trng";
reg = <0 0x004c3000 0 0x1000>;
@@ -189,10 +386,10 @@
reg = <0 0x01800000 0 0x40000>;
clocks = <&xo_board>,
<&sleep_clk>,
- <0>,
- <0>,
- <0>,
- <0>,
+ <&pcie0_phy>,
+ <&pcie1_phy>,
+ <&pcie2_phy>,
+ <&pcie3_phy>,
<0>;
#clock-cells = <1>;
#reset-cells = <1>;
@@ -265,6 +462,8 @@
<&xo_board>;
clock-names = "iface", "core", "xo";
+ supports-cqe;
+
status = "disabled";
};
@@ -506,6 +705,532 @@
};
};
+ pcie3: pcie@40000000 {
+ compatible = "qcom,pcie-ipq5424", "qcom,pcie-ipq9574";
+ reg = <0x0 0x40000000 0x0 0xf1c>,
+ <0x0 0x40000f20 0x0 0xa8>,
+ <0x0 0x40001000 0x0 0x1000>,
+ <0x0 0x000f8000 0x0 0x3000>,
+ <0x0 0x40100000 0x0 0x1000>,
+ <0x0 0x000fe000 0x0 0x1000>;
+ reg-names = "dbi",
+ "elbi",
+ "atu",
+ "parf",
+ "config",
+ "mhi";
+ device_type = "pci";
+ linux,pci-domain = <3>;
+ num-lanes = <2>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x00100000>,
+ <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x0fd00000>;
+
+ msi-map = <0x0 &intc 0x0 0x1000>;
+
+ interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7",
+ "global";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 479 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 480 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 481 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 482 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_PCIE3_AXI_M_CLK>,
+ <&gcc GCC_PCIE3_AXI_S_CLK>,
+ <&gcc GCC_PCIE3_AXI_S_BRIDGE_CLK>,
+ <&gcc GCC_PCIE3_RCHNG_CLK>,
+ <&gcc GCC_PCIE3_AHB_CLK>,
+ <&gcc GCC_PCIE3_AUX_CLK>;
+ clock-names = "axi_m",
+ "axi_s",
+ "axi_bridge",
+ "rchng",
+ "ahb",
+ "aux";
+
+ assigned-clocks = <&gcc GCC_PCIE3_RCHNG_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ resets = <&gcc GCC_PCIE3_PIPE_ARES>,
+ <&gcc GCC_PCIE3_CORE_STICKY_RESET>,
+ <&gcc GCC_PCIE3_AXI_S_STICKY_RESET>,
+ <&gcc GCC_PCIE3_AXI_S_ARES>,
+ <&gcc GCC_PCIE3_AXI_M_STICKY_RESET>,
+ <&gcc GCC_PCIE3_AXI_M_ARES>,
+ <&gcc GCC_PCIE3_AUX_ARES>,
+ <&gcc GCC_PCIE3_AHB_ARES>;
+ reset-names = "pipe",
+ "sticky",
+ "axi_s_sticky",
+ "axi_s",
+ "axi_m_sticky",
+ "axi_m",
+ "aux",
+ "ahb";
+
+ phys = <&pcie3_phy>;
+ phy-names = "pciephy";
+ interconnects = <&gcc MASTER_ANOC_PCIE3 &gcc SLAVE_ANOC_PCIE3>,
+ <&gcc MASTER_CNOC_PCIE3 &gcc SLAVE_CNOC_PCIE3>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+ status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
+ };
+
+ pcie2: pcie@50000000 {
+ compatible = "qcom,pcie-ipq5424", "qcom,pcie-ipq9574";
+ reg = <0x0 0x50000000 0x0 0xf1c>,
+ <0x0 0x50000f20 0x0 0xa8>,
+ <0x0 0x50001000 0x0 0x1000>,
+ <0x0 0x000f0000 0x0 0x3000>,
+ <0x0 0x50100000 0x0 0x1000>,
+ <0x0 0x000f6000 0x0 0x1000>;
+ reg-names = "dbi",
+ "elbi",
+ "atu",
+ "parf",
+ "config",
+ "mhi";
+ device_type = "pci";
+ linux,pci-domain = <2>;
+ num-lanes = <2>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x50200000 0x0 0x00100000>,
+ <0x02000000 0x0 0x50300000 0x0 0x50300000 0x0 0x0fd00000>;
+
+ msi-map = <0x0 &intc 0x0 0x1000>;
+
+ interrupts = <GIC_SPI 455 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 457 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 459 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 460 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7",
+ "global";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 464 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 465 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 466 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 467 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_PCIE2_AXI_M_CLK>,
+ <&gcc GCC_PCIE2_AXI_S_CLK>,
+ <&gcc GCC_PCIE2_AXI_S_BRIDGE_CLK>,
+ <&gcc GCC_PCIE2_RCHNG_CLK>,
+ <&gcc GCC_PCIE2_AHB_CLK>,
+ <&gcc GCC_PCIE2_AUX_CLK>;
+ clock-names = "axi_m",
+ "axi_s",
+ "axi_bridge",
+ "rchng",
+ "ahb",
+ "aux";
+
+ assigned-clocks = <&gcc GCC_PCIE2_RCHNG_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ resets = <&gcc GCC_PCIE2_PIPE_ARES>,
+ <&gcc GCC_PCIE2_CORE_STICKY_RESET>,
+ <&gcc GCC_PCIE2_AXI_S_STICKY_RESET>,
+ <&gcc GCC_PCIE2_AXI_S_ARES>,
+ <&gcc GCC_PCIE2_AXI_M_STICKY_RESET>,
+ <&gcc GCC_PCIE2_AXI_M_ARES>,
+ <&gcc GCC_PCIE2_AUX_ARES>,
+ <&gcc GCC_PCIE2_AHB_ARES>;
+ reset-names = "pipe",
+ "sticky",
+ "axi_s_sticky",
+ "axi_s",
+ "axi_m_sticky",
+ "axi_m",
+ "aux",
+ "ahb";
+
+ phys = <&pcie2_phy>;
+ phy-names = "pciephy";
+ interconnects = <&gcc MASTER_ANOC_PCIE2 &gcc SLAVE_ANOC_PCIE2>,
+ <&gcc MASTER_CNOC_PCIE2 &gcc SLAVE_CNOC_PCIE2>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+ status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
+ };
+
+ pcie1: pcie@60000000 {
+ compatible = "qcom,pcie-ipq5424", "qcom,pcie-ipq9574";
+ reg = <0x0 0x60000000 0x0 0xf1c>,
+ <0x0 0x60000f20 0x0 0xa8>,
+ <0x0 0x60001000 0x0 0x1000>,
+ <0x0 0x00088000 0x0 0x3000>,
+ <0x0 0x60100000 0x0 0x1000>,
+ <0x0 0x0008e000 0x0 0x1000>;
+ reg-names = "dbi",
+ "elbi",
+ "atu",
+ "parf",
+ "config",
+ "mhi";
+ device_type = "pci";
+ linux,pci-domain = <1>;
+ num-lanes = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x00100000>,
+ <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x0fd00000>;
+
+ msi-map = <0x0 &intc 0x0 0x1000>;
+
+ interrupts = <GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 441 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 442 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 443 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7",
+ "global";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 449 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 450 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 451 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 452 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_PCIE1_AXI_M_CLK>,
+ <&gcc GCC_PCIE1_AXI_S_CLK>,
+ <&gcc GCC_PCIE1_AXI_S_BRIDGE_CLK>,
+ <&gcc GCC_PCIE1_RCHNG_CLK>,
+ <&gcc GCC_PCIE1_AHB_CLK>,
+ <&gcc GCC_PCIE1_AUX_CLK>;
+ clock-names = "axi_m",
+ "axi_s",
+ "axi_bridge",
+ "rchng",
+ "ahb",
+ "aux";
+
+ assigned-clocks = <&gcc GCC_PCIE1_RCHNG_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ resets = <&gcc GCC_PCIE1_PIPE_ARES>,
+ <&gcc GCC_PCIE1_CORE_STICKY_RESET>,
+ <&gcc GCC_PCIE1_AXI_S_STICKY_RESET>,
+ <&gcc GCC_PCIE1_AXI_S_ARES>,
+ <&gcc GCC_PCIE1_AXI_M_STICKY_RESET>,
+ <&gcc GCC_PCIE1_AXI_M_ARES>,
+ <&gcc GCC_PCIE1_AUX_ARES>,
+ <&gcc GCC_PCIE1_AHB_ARES>;
+ reset-names = "pipe",
+ "sticky",
+ "axi_s_sticky",
+ "axi_s",
+ "axi_m_sticky",
+ "axi_m",
+ "aux",
+ "ahb";
+
+ phys = <&pcie1_phy>;
+ phy-names = "pciephy";
+ interconnects = <&gcc MASTER_ANOC_PCIE1 &gcc SLAVE_ANOC_PCIE1>,
+ <&gcc MASTER_CNOC_PCIE1 &gcc SLAVE_CNOC_PCIE1>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+ status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
+ };
+
+ pcie0: pcie@70000000 {
+ compatible = "qcom,pcie-ipq5424", "qcom,pcie-ipq9574";
+ reg = <0x0 0x70000000 0x0 0xf1c>,
+ <0x0 0x70000f20 0x0 0xa8>,
+ <0x0 0x70001000 0x0 0x1000>,
+ <0x0 0x00080000 0x0 0x3000>,
+ <0x0 0x70100000 0x0 0x1000>,
+ <0x0 0x00086000 0x0 0x1000>;
+ reg-names = "dbi",
+ "elbi",
+ "atu",
+ "parf",
+ "config",
+ "mhi";
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ num-lanes = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x70200000 0x0 0x00100000>,
+ <0x02000000 0x0 0x70300000 0x0 0x70300000 0x0 0x0fd00000>;
+
+ msi-map = <0x0 &intc 0x0 0x1000>;
+
+ interrupts = <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 433 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7",
+ "global";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 434 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 435 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 436 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 437 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_PCIE0_AXI_M_CLK>,
+ <&gcc GCC_PCIE0_AXI_S_CLK>,
+ <&gcc GCC_PCIE0_AXI_S_BRIDGE_CLK>,
+ <&gcc GCC_PCIE0_RCHNG_CLK>,
+ <&gcc GCC_PCIE0_AHB_CLK>,
+ <&gcc GCC_PCIE0_AUX_CLK>;
+ clock-names = "axi_m",
+ "axi_s",
+ "axi_bridge",
+ "rchng",
+ "ahb",
+ "aux";
+
+ assigned-clocks = <&gcc GCC_PCIE0_RCHNG_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ resets = <&gcc GCC_PCIE0_PIPE_ARES>,
+ <&gcc GCC_PCIE0_CORE_STICKY_RESET>,
+ <&gcc GCC_PCIE0_AXI_S_STICKY_RESET>,
+ <&gcc GCC_PCIE0_AXI_S_ARES>,
+ <&gcc GCC_PCIE0_AXI_M_STICKY_RESET>,
+ <&gcc GCC_PCIE0_AXI_M_ARES>,
+ <&gcc GCC_PCIE0_AUX_ARES>,
+ <&gcc GCC_PCIE0_AHB_ARES>;
+ reset-names = "pipe",
+ "sticky",
+ "axi_s_sticky",
+ "axi_s",
+ "axi_m_sticky",
+ "axi_m",
+ "aux",
+ "ahb";
+
+ phys = <&pcie0_phy>;
+ phy-names = "pciephy";
+ interconnects = <&gcc MASTER_ANOC_PCIE0 &gcc SLAVE_ANOC_PCIE0>,
+ <&gcc MASTER_CNOC_PCIE0 &gcc SLAVE_CNOC_PCIE0>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+ status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
+ };
+ };
+
+ thermal_zones: thermal-zones {
+ cpu0-thermal {
+ polling-delay-passive = <100>;
+ thermal-sensors = <&tsens 14>;
+
+ trips {
+ cpu-critical {
+ temperature = <120000>;
+ hysteresis = <9000>;
+ type = "critical";
+ };
+
+ cpu-passive {
+ temperature = <110000>;
+ hysteresis = <9000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu1-thermal {
+ polling-delay-passive = <100>;
+ thermal-sensors = <&tsens 12>;
+
+ trips {
+ cpu-critical {
+ temperature = <120000>;
+ hysteresis = <9000>;
+ type = "critical";
+ };
+
+ cpu-passive {
+ temperature = <110000>;
+ hysteresis = <9000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu2-thermal {
+ polling-delay-passive = <100>;
+ thermal-sensors = <&tsens 11>;
+
+ trips {
+ cpu-critical {
+ temperature = <120000>;
+ hysteresis = <9000>;
+ type = "critical";
+ };
+
+ cpu-passive {
+ temperature = <110000>;
+ hysteresis = <9000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu3-thermal {
+ polling-delay-passive = <100>;
+ thermal-sensors = <&tsens 13>;
+
+ trips {
+ cpu-critical {
+ temperature = <120000>;
+ hysteresis = <9000>;
+ type = "critical";
+ };
+
+ cpu-passive {
+ temperature = <110000>;
+ hysteresis = <9000>;
+ type = "passive";
+ };
+ };
+ };
+
+ wcss-tile2-thermal {
+ thermal-sensors = <&tsens 9>;
+
+ trips {
+ wcss-tile2-critical {
+ temperature = <125000>;
+ hysteresis = <9000>;
+ type = "critical";
+ };
+ };
+ };
+
+ wcss-tile3-thermal {
+ thermal-sensors = <&tsens 10>;
+
+ trips {
+ wcss-tile3-critical {
+ temperature = <125000>;
+ hysteresis = <9000>;
+ type = "critical";
+ };
+ };
+ };
+
+ top-glue-thermal {
+ thermal-sensors = <&tsens 15>;
+
+ trips {
+ top-glue-critical {
+ temperature = <125000>;
+ hysteresis = <9000>;
+ type = "critical";
+ };
+ };
+ };
};
timer {
diff --git a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts
index f5f4827c0e17..9c69d3027b43 100644
--- a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts
+++ b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts
@@ -7,7 +7,7 @@
/dts-v1/;
-#include "ipq6018.dtsi"
+#include "ipq6018-mp5496.dtsi"
/ {
model = "Qualcomm Technologies, Inc. IPQ6018/AP-CP01-C1";
diff --git a/arch/arm64/boot/dts/qcom/ipq6018-mp5496.dtsi b/arch/arm64/boot/dts/qcom/ipq6018-mp5496.dtsi
new file mode 100644
index 000000000000..d6b111a77f79
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/ipq6018-mp5496.dtsi
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * ipq6018-mp5496.dtsi describes common properties (e.g. regulators) that
+ * apply to most devices that make use of the IPQ6018 SoC and MP5496 PMIC.
+ */
+
+#include "ipq6018.dtsi"
+
+&cpu0 {
+ cpu-supply = <&mp5496_s2>;
+};
+
+&cpu1 {
+ cpu-supply = <&mp5496_s2>;
+};
+
+&cpu2 {
+ cpu-supply = <&mp5496_s2>;
+};
+
+&cpu3 {
+ cpu-supply = <&mp5496_s2>;
+};
+
+&rpm_requests {
+ regulators {
+ compatible = "qcom,rpm-mp5496-regulators";
+
+ mp5496_s2: s2 {
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1062500>;
+ regulator-always-on;
+ };
+
+ mp5496_l2: l2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ };
+};
+
+&sdhc {
+ vqmmc-supply = <&mp5496_l2>;
+};
diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
index dbf6716bcb59..7f0faf26b707 100644
--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
@@ -43,7 +43,6 @@
clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>;
clock-names = "cpu";
operating-points-v2 = <&cpu_opp_table>;
- cpu-supply = <&ipq6018_s2>;
#cooling-cells = <2>;
};
@@ -56,7 +55,6 @@
clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>;
clock-names = "cpu";
operating-points-v2 = <&cpu_opp_table>;
- cpu-supply = <&ipq6018_s2>;
#cooling-cells = <2>;
};
@@ -69,7 +67,6 @@
clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>;
clock-names = "cpu";
operating-points-v2 = <&cpu_opp_table>;
- cpu-supply = <&ipq6018_s2>;
#cooling-cells = <2>;
};
@@ -82,7 +79,6 @@
clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>;
clock-names = "cpu";
operating-points-v2 = <&cpu_opp_table>;
- cpu-supply = <&ipq6018_s2>;
#cooling-cells = <2>;
};
@@ -119,6 +115,13 @@
clock-latency-ns = <200000>;
};
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <850000>;
+ opp-supported-hw = <0x4>;
+ clock-latency-ns = <200000>;
+ };
+
opp-1320000000 {
opp-hz = /bits/ 64 <1320000000>;
opp-microvolt = <862500>;
@@ -133,6 +136,13 @@
clock-latency-ns = <200000>;
};
+ opp-1512000000 {
+ opp-hz = /bits/ 64 <1512000000>;
+ opp-microvolt = <937500>;
+ opp-supported-hw = <0x2>;
+ clock-latency-ns = <200000>;
+ };
+
opp-1608000000 {
opp-hz = /bits/ 64 <1608000000>;
opp-microvolt = <987500>;
@@ -170,16 +180,6 @@
rpm_requests: rpm-requests {
compatible = "qcom,rpm-ipq6018", "qcom,glink-smd-rpm";
qcom,glink-channels = "rpm_requests";
-
- regulators {
- compatible = "qcom,rpm-mp5496-regulators";
-
- ipq6018_s2: s2 {
- regulator-min-microvolt = <725000>;
- regulator-max-microvolt = <1062500>;
- regulator-always-on;
- };
- };
};
};
};
@@ -210,8 +210,11 @@
};
smem_region: memory@4aa00000 {
+ compatible = "qcom,smem";
reg = <0x0 0x4aa00000 0x0 0x100000>;
no-map;
+
+ hwlocks = <&tcsr_mutex 3>;
};
q6_region: memory@4ab00000 {
@@ -220,12 +223,6 @@
};
};
- smem {
- compatible = "qcom,smem";
- memory-region = <&smem_region>;
- hwlocks = <&tcsr_mutex 3>;
- };
-
soc: soc@0 {
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
index ae12f069f26f..bdb396afb992 100644
--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
@@ -111,6 +111,13 @@
regulator-always-on;
regulator-boot-on;
};
+
+ mp5496_l5: l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
};
};
@@ -139,6 +146,50 @@
drive-strength = <8>;
bias-pull-up;
};
+
+ qpic_snand_default_state: qpic-snand-default-state {
+ clock-pins {
+ pins = "gpio5";
+ function = "qspi_clk";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ cs-pins {
+ pins = "gpio4";
+ function = "qspi_cs";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ data-pins {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ function = "qspi_data";
+ drive-strength = <8>;
+ bias-disable;
+ };
+ };
+};
+
+&qpic_bam {
+ status = "okay";
+};
+
+&qpic_nand {
+ pinctrl-0 = <&qpic_snand_default_state>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ flash@0 {
+ compatible = "spi-nand";
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ nand-ecc-engine = <&qpic_nand>;
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+ };
};
&usb_0_dwc3 {
@@ -146,7 +197,7 @@
};
&usb_0_qmpphy {
- vdda-pll-supply = <&mp5496_l2>;
+ vdda-pll-supply = <&mp5496_l5>;
vdda-phy-supply = <&regulator_fixed_0p925>;
status = "okay";
@@ -154,7 +205,7 @@
&usb_0_qusbphy {
vdd-supply = <&regulator_fixed_0p925>;
- vdda-pll-supply = <&mp5496_l2>;
+ vdda-pll-supply = <&mp5496_l5>;
vdda-phy-dpdm-supply = <&regulator_fixed_3p3>;
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
index 165ebbb59511..fa7bb521e786 100644
--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
@@ -55,18 +55,6 @@
status = "okay";
};
-&sdhc_1 {
- pinctrl-0 = <&sdc_default_state>;
- pinctrl-names = "default";
- mmc-ddr-1_8v;
- mmc-hs200-1_8v;
- mmc-hs400-1_8v;
- mmc-hs400-enhanced-strobe;
- max-frequency = <384000000>;
- bus-width = <8>;
- status = "okay";
-};
-
&tlmm {
pcie1_default: pcie1-default-state {
diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
index 942290028972..815b5f9540b8 100644
--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
@@ -378,6 +378,8 @@
interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
qcom,ee = <1>;
+ qcom,num-ees = <4>;
+ num-channels = <16>;
qcom,controlled-remotely;
};
@@ -673,6 +675,33 @@
status = "disabled";
};
+ qpic_bam: dma-controller@7984000 {
+ compatible = "qcom,bam-v1.7.4", "qcom,bam-v1.7.0";
+ reg = <0x07984000 0x1c000>;
+ interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_QPIC_AHB_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ status = "disabled";
+ };
+
+ qpic_nand: spi@79b0000 {
+ compatible = "qcom,ipq9574-snand";
+ reg = <0x079b0000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&gcc GCC_QPIC_CLK>,
+ <&gcc GCC_QPIC_AHB_CLK>,
+ <&gcc GCC_QPIC_IO_MACRO_CLK>;
+ clock-names = "core", "aon", "iom";
+ dmas = <&qpic_bam 0>,
+ <&qpic_bam 1>,
+ <&qpic_bam 2>;
+ dma-names = "tx", "rx", "cmd";
+ status = "disabled";
+ };
+
usb_0_qusbphy: phy@7b000 {
compatible = "qcom,ipq9574-qusb2-phy";
reg = <0x0007b000 0x180>;
@@ -876,12 +905,18 @@
pcie1: pcie@10000000 {
compatible = "qcom,pcie-ipq9574";
- reg = <0x10000000 0xf1d>,
- <0x10000f20 0xa8>,
- <0x10001000 0x1000>,
- <0x000f8000 0x4000>,
- <0x10100000 0x1000>;
- reg-names = "dbi", "elbi", "atu", "parf", "config";
+ reg = <0x10000000 0xf1d>,
+ <0x10000f20 0xa8>,
+ <0x10001000 0x1000>,
+ <0x000f8000 0x4000>,
+ <0x10100000 0x1000>,
+ <0x000fe000 0x1000>;
+ reg-names = "dbi",
+ "elbi",
+ "atu",
+ "parf",
+ "config",
+ "mhi";
device_type = "pci";
linux,pci-domain = <1>;
bus-range = <0x00 0xff>;
@@ -956,12 +991,18 @@
pcie3: pcie@18000000 {
compatible = "qcom,pcie-ipq9574";
- reg = <0x18000000 0xf1d>,
- <0x18000f20 0xa8>,
- <0x18001000 0x1000>,
- <0x000f0000 0x4000>,
- <0x18100000 0x1000>;
- reg-names = "dbi", "elbi", "atu", "parf", "config";
+ reg = <0x18000000 0xf1d>,
+ <0x18000f20 0xa8>,
+ <0x18001000 0x1000>,
+ <0x000f0000 0x4000>,
+ <0x18100000 0x1000>,
+ <0x000f6000 0x1000>;
+ reg-names = "dbi",
+ "elbi",
+ "atu",
+ "parf",
+ "config",
+ "mhi";
device_type = "pci";
linux,pci-domain = <3>;
bus-range = <0x00 0xff>;
@@ -972,14 +1013,14 @@
ranges = <0x01000000 0x0 0x00000000 0x18200000 0x0 0x100000>,
<0x02000000 0x0 0x18300000 0x18300000 0x0 0x7d00000>;
- interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 415 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 494 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 495 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0",
"msi1",
"msi2",
@@ -1036,12 +1077,18 @@
pcie2: pcie@20000000 {
compatible = "qcom,pcie-ipq9574";
- reg = <0x20000000 0xf1d>,
- <0x20000f20 0xa8>,
- <0x20001000 0x1000>,
- <0x00088000 0x4000>,
- <0x20100000 0x1000>;
- reg-names = "dbi", "elbi", "atu", "parf", "config";
+ reg = <0x20000000 0xf1d>,
+ <0x20000f20 0xa8>,
+ <0x20001000 0x1000>,
+ <0x00088000 0x4000>,
+ <0x20100000 0x1000>,
+ <0x0008e000 0x1000>;
+ reg-names = "dbi",
+ "elbi",
+ "atu",
+ "parf",
+ "config",
+ "mhi";
device_type = "pci";
linux,pci-domain = <2>;
bus-range = <0x00 0xff>;
@@ -1116,12 +1163,18 @@
pcie0: pci@28000000 {
compatible = "qcom,pcie-ipq9574";
- reg = <0x28000000 0xf1d>,
- <0x28000f20 0xa8>,
- <0x28001000 0x1000>,
- <0x00080000 0x4000>,
- <0x28100000 0x1000>;
- reg-names = "dbi", "elbi", "atu", "parf", "config";
+ reg = <0x28000000 0xf1d>,
+ <0x28000f20 0xa8>,
+ <0x28001000 0x1000>,
+ <0x00080000 0x4000>,
+ <0x28100000 0x1000>,
+ <0x00086000 0x1000>;
+ reg-names = "dbi",
+ "elbi",
+ "atu",
+ "parf",
+ "config",
+ "mhi";
device_type = "pci";
linux,pci-domain = <0>;
bus-range = <0x00 0xff>;
@@ -1193,6 +1246,35 @@
status = "disabled";
};
+ nsscc: clock-controller@39b00000 {
+ compatible = "qcom,ipq9574-nsscc";
+ reg = <0x39b00000 0x80000>;
+ clocks = <&xo_board_clk>,
+ <&cmn_pll NSS_1200MHZ_CLK>,
+ <&cmn_pll PPE_353MHZ_CLK>,
+ <&gcc GPLL0_OUT_AUX>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <&gcc GCC_NSSCC_CLK>;
+ clock-names = "xo",
+ "nss_1200",
+ "ppe_353",
+ "gpll0_out",
+ "uniphy0_rx",
+ "uniphy0_tx",
+ "uniphy1_rx",
+ "uniphy1_tx",
+ "uniphy2_rx",
+ "uniphy2_tx",
+ "bus";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #interconnect-cells = <1>;
+ };
};
thermal-zones {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts b/arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts
index b4ce14a79370..3a6eba904641 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts
@@ -133,6 +133,9 @@
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts b/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts
index 3459145516a1..2de8b6f9531b 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts
@@ -214,6 +214,9 @@
&blsp_uart2 {
status = "okay";
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
};
&mpss_mem {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-asus-z00l.dts b/arch/arm64/boot/dts/qcom/msm8916-asus-z00l.dts
index 77618c7374df..29d61f8d5dc9 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-asus-z00l.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-asus-z00l.dts
@@ -130,6 +130,9 @@
&blsp_uart2 {
status = "okay";
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
};
&mpss_mem {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts b/arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts
index f7be7e371820..742a325245c5 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts
@@ -131,6 +131,9 @@
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-huawei-g7.dts b/arch/arm64/boot/dts/qcom/msm8916-huawei-g7.dts
index bf7fc89dd106..aa414b5d7ee4 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-huawei-g7.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-huawei-g7.dts
@@ -214,6 +214,9 @@
&blsp_uart2 {
status = "okay";
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
};
&lpass {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-lg-c50.dts b/arch/arm64/boot/dts/qcom/msm8916-lg-c50.dts
index a823a1c40208..22bc73b94344 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-lg-c50.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-lg-c50.dts
@@ -59,6 +59,9 @@
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-lg-m216.dts b/arch/arm64/boot/dts/qcom/msm8916-lg-m216.dts
index 07345e694f6f..c50374979939 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-lg-m216.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-lg-m216.dts
@@ -112,6 +112,9 @@
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
index 7f0c2c1b8a94..eb449112a226 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
@@ -254,6 +254,9 @@
&blsp_uart2 {
status = "okay";
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
};
&pm8916_bms {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts
index 2cc54eaf7202..887764dc55b2 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts
@@ -178,6 +178,9 @@
&blsp_uart2 {
status = "okay";
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
};
&mpss_mem {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-modem-qdsp6.dtsi b/arch/arm64/boot/dts/qcom/msm8916-modem-qdsp6.dtsi
index 039961622633..75103168c1fc 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-modem-qdsp6.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-modem-qdsp6.dtsi
@@ -58,19 +58,19 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
direction = <Q6ASM_DAI_RX>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
direction = <Q6ASM_DAI_TX>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
direction = <Q6ASM_DAI_RX>;
};
dai@3 {
- reg = <3>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA4>;
direction = <Q6ASM_DAI_RX>;
is-compress-dai;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-motorola-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-motorola-common.dtsi
index 6a27d0ecd2ad..4e202e7ed7db 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-motorola-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-motorola-common.dtsi
@@ -69,6 +69,9 @@
};
&blsp_uart1 {
+ pinctrl-0 = <&blsp_uart1_console_default>;
+ pinctrl-1 = <&blsp_uart1_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
@@ -129,14 +132,6 @@
status = "okay";
};
-/* CTS/RTX are not used */
-&blsp_uart1_default {
- pins = "gpio0", "gpio1";
-};
-&blsp_uart1_sleep {
- pins = "gpio0", "gpio1";
-};
-
&tlmm {
gpio_keys_default: gpio-keys-default-state {
pins = "gpio107";
diff --git a/arch/arm64/boot/dts/qcom/msm8916-mtp.dts b/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
index c11a845e91bb..63d476523544 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
@@ -23,5 +23,8 @@
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
index e6355e5e2177..6f75707b6f9b 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
@@ -302,6 +302,9 @@
&blsp_uart2 {
status = "okay";
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
};
&gpu {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi
index 7a7e99b015d9..fb790b02736a 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi
@@ -304,6 +304,9 @@
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-gt5-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-gt5-common.dtsi
index fbd2caf405d5..ff9679d3f664 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-gt5-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-gt5-common.dtsi
@@ -116,6 +116,9 @@
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-j5-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-j5-common.dtsi
index 5ca2ada266f4..697f25d51d9d 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-j5-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-j5-common.dtsi
@@ -135,6 +135,9 @@
&blsp_uart2 {
status = "okay";
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
};
&mpss_mem {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts
index caad1dead2e0..71b5c98458ff 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts
@@ -319,6 +319,9 @@
&blsp_uart2 {
status = "okay";
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
};
&gpu {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi b/arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi
index c77ed04bb6c3..5719933fa8e0 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi
@@ -72,6 +72,9 @@
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt865x8.dtsi b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt865x8.dtsi
index 1a7c347dc3f0..ebe85cd85ddf 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt865x8.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt865x8.dtsi
@@ -93,6 +93,9 @@
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
index 510b3b3c4e3c..68c8856d4c2e 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
@@ -169,6 +169,9 @@
&blsp_uart2 {
status = "okay";
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
};
&mpss_mem {
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 8f35c9af1878..de9fdc0dfc5f 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -4,6 +4,7 @@
*/
#include <dt-bindings/arm/coresight-cti-dt.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-msm8916.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/interconnect/qcom,msm8916.h>
@@ -1231,29 +1232,50 @@
bias-pull-down;
};
- blsp_uart1_default: blsp-uart1-default-state {
- /* TX, RX, CTS_N, RTS_N */
- pins = "gpio0", "gpio1", "gpio2", "gpio3";
- function = "blsp_uart1";
- drive-strength = <16>;
- bias-disable;
+ blsp_uart1_console_default: blsp-uart1-console-default-state {
+ tx-pins {
+ pins = "gpio0";
+ function = "blsp_uart1";
+ drive-strength = <16>;
+ bias-disable;
+ bootph-all;
+ };
+
+ rx-pins {
+ pins = "gpio1";
+ function = "blsp_uart1";
+ drive-strength = <16>;
+ bias-pull-up;
+ bootph-all;
+ };
};
- blsp_uart1_sleep: blsp-uart1-sleep-state {
- pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ blsp_uart1_console_sleep: blsp-uart1-console-sleep-state {
+ pins = "gpio0", "gpio1";
function = "gpio";
drive-strength = <2>;
bias-pull-down;
};
- blsp_uart2_default: blsp-uart2-default-state {
- pins = "gpio4", "gpio5";
- function = "blsp_uart2";
- drive-strength = <16>;
- bias-disable;
+ blsp_uart2_console_default: blsp-uart2-console-default-state {
+ tx-pins {
+ pins = "gpio4";
+ function = "blsp_uart2";
+ drive-strength = <16>;
+ bias-disable;
+ bootph-all;
+ };
+
+ rx-pins {
+ pins = "gpio5";
+ function = "blsp_uart2";
+ drive-strength = <16>;
+ bias-pull-up;
+ bootph-all;
+ };
};
- blsp_uart2_sleep: blsp-uart2-sleep-state {
+ blsp_uart2_console_sleep: blsp-uart2-console-sleep-state {
pins = "gpio4", "gpio5";
function = "gpio";
drive-strength = <2>;
@@ -1497,8 +1519,8 @@
reg = <0x01800000 0x80000>;
clocks = <&xo_board>,
<&sleep_clk>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi0_phy 0>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
<0>,
<0>,
<0>;
@@ -1590,8 +1612,8 @@
assigned-clocks = <&gcc BYTE0_CLK_SRC>,
<&gcc PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
clocks = <&gcc GCC_MDSS_MDP_CLK>,
<&gcc GCC_MDSS_AHB_CLK>,
@@ -2158,9 +2180,6 @@
clock-names = "core", "iface";
dmas = <&blsp_dma 0>, <&blsp_dma 1>;
dma-names = "tx", "rx";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp_uart1_default>;
- pinctrl-1 = <&blsp_uart1_sleep>;
status = "disabled";
};
@@ -2172,9 +2191,6 @@
clock-names = "core", "iface";
dmas = <&blsp_dma 2>, <&blsp_dma 3>;
dma-names = "tx", "rx";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp_uart2_default>;
- pinctrl-1 = <&blsp_uart2_sleep>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-xiaomi-riva.dts b/arch/arm64/boot/dts/qcom/msm8917-xiaomi-riva.dts
index f1d22535fedd..9db503e21888 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-xiaomi-riva.dts
+++ b/arch/arm64/boot/dts/qcom/msm8917-xiaomi-riva.dts
@@ -20,6 +20,14 @@
qcom,msm-id = <QCOM_ID_MSM8917 0>;
qcom,board-id = <0x1000b 2>, <0x2000b 2>;
+ pwm_backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pm8937_pwm 0 100000>;
+ brightness-levels = <0 255>;
+ num-interpolated-steps = <255>;
+ default-brightness-level = <128>;
+ };
+
battery: battery {
compatible = "simple-battery";
charge-full-design-microamp-hours = <3000000>;
@@ -119,7 +127,7 @@
monitored-battery = <&battery>;
};
- bq25601@6b{
+ bq25601@6b {
compatible = "ti,bq25601";
reg = <0x6b>;
interrupts-extended = <&tlmm 61 IRQ_TYPE_EDGE_FALLING>;
@@ -131,6 +139,23 @@
};
};
+&pm8937_gpios {
+ pwm_enable_default: pwm-enable-default-state {
+ pins = "gpio8";
+ function = "dtest2";
+ output-low;
+ bias-disable;
+ qcom,drive-strength = <2>;
+ };
+};
+
+&pm8937_pwm {
+ pinctrl-0 = <&pwm_enable_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
&pm8937_resin {
linux,code = <KEY_VOLUMEDOWN>;
diff --git a/arch/arm64/boot/dts/qcom/msm8917.dtsi b/arch/arm64/boot/dts/qcom/msm8917.dtsi
index 7bf58dd0146e..8a642fce2e40 100644
--- a/arch/arm64/boot/dts/qcom/msm8917.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917.dtsi
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-msm8917.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -587,7 +588,7 @@
bits = <1 6>;
};
- tsens_s9_p1: s9-p1@230{
+ tsens_s9_p1: s9-p1@230 {
reg = <0x230 1>;
bits = <0 6>;
};
@@ -961,8 +962,8 @@
#power-domain-cells = <1>;
clocks = <&xo_board>,
<&sleep_clk>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi0_phy 0>;
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>;
clock-names = "xo",
"sleep_clk",
"dsi0pll",
@@ -1051,8 +1052,8 @@
assigned-clocks = <&gcc BYTE0_CLK_SRC>,
<&gcc PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
clocks = <&gcc GCC_MDSS_MDP_CLK>,
<&gcc GCC_MDSS_AHB_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/msm8939-huawei-kiwi.dts b/arch/arm64/boot/dts/qcom/msm8939-huawei-kiwi.dts
index 3cec51891aed..18381a66daef 100644
--- a/arch/arm64/boot/dts/qcom/msm8939-huawei-kiwi.dts
+++ b/arch/arm64/boot/dts/qcom/msm8939-huawei-kiwi.dts
@@ -126,6 +126,9 @@
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts b/arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts
index b845da4fa23e..13422a19c26a 100644
--- a/arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts
+++ b/arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts
@@ -243,6 +243,9 @@
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts b/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts
index ceba6e73b211..07613080e79e 100644
--- a/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts
+++ b/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts
@@ -373,6 +373,9 @@
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dtsi b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dtsi
index 800e0747a2f7..a5187355f9fa 100644
--- a/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dtsi
@@ -126,6 +126,9 @@
};
&blsp_uart2 {
+ pinctrl-0 = <&blsp_uart2_console_default>;
+ pinctrl-1 = <&blsp_uart2_console_sleep>;
+ pinctrl-names = "default", "sleep";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
index 7cd5660de1b3..68b92fdb996c 100644
--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
@@ -4,6 +4,7 @@
* Copyright (c) 2020-2023, Linaro Limited
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-msm8939.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/interconnect/qcom,msm8939.h>
@@ -46,6 +47,7 @@
compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
reg = <0x100>;
next-level-cache = <&l2_1>;
qcom,acc = <&acc0>;
@@ -64,6 +66,7 @@
compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
reg = <0x101>;
next-level-cache = <&l2_1>;
qcom,acc = <&acc1>;
@@ -77,6 +80,7 @@
compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
reg = <0x102>;
next-level-cache = <&l2_1>;
qcom,acc = <&acc2>;
@@ -90,6 +94,7 @@
compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
reg = <0x103>;
next-level-cache = <&l2_1>;
qcom,acc = <&acc3>;
@@ -103,6 +108,7 @@
compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
reg = <0x0>;
qcom,acc = <&acc4>;
qcom,saw = <&saw4>;
@@ -121,6 +127,7 @@
compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
reg = <0x1>;
next-level-cache = <&l2_0>;
qcom,acc = <&acc5>;
@@ -134,6 +141,7 @@
compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
reg = <0x2>;
next-level-cache = <&l2_0>;
qcom,acc = <&acc6>;
@@ -147,6 +155,7 @@
compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
reg = <0x3>;
next-level-cache = <&l2_0>;
qcom,acc = <&acc7>;
@@ -896,28 +905,50 @@
bias-pull-down;
};
- blsp_uart1_default: blsp-uart1-default-state {
- pins = "gpio0", "gpio1", "gpio2", "gpio3";
- function = "blsp_uart1";
- drive-strength = <16>;
- bias-disable;
+ blsp_uart1_console_default: blsp-uart1-console-default-state {
+ tx-pins {
+ pins = "gpio0";
+ function = "blsp_uart1";
+ drive-strength = <16>;
+ bias-disable;
+ bootph-all;
+ };
+
+ rx-pins {
+ pins = "gpio1";
+ function = "blsp_uart1";
+ drive-strength = <16>;
+ bias-pull-up;
+ bootph-all;
+ };
};
- blsp_uart1_sleep: blsp-uart1-sleep-state {
- pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ blsp_uart1_console_sleep: blsp-uart1-console-sleep-state {
+ pins = "gpio0", "gpio1";
function = "gpio";
drive-strength = <2>;
bias-pull-down;
};
- blsp_uart2_default: blsp-uart2-default-state {
- pins = "gpio4", "gpio5";
- function = "blsp_uart2";
- drive-strength = <16>;
- bias-disable;
+ blsp_uart2_console_default: blsp-uart2-console-default-state {
+ tx-pins {
+ pins = "gpio4";
+ function = "blsp_uart2";
+ drive-strength = <16>;
+ bias-disable;
+ bootph-all;
+ };
+
+ rx-pins {
+ pins = "gpio5";
+ function = "blsp_uart2";
+ drive-strength = <16>;
+ bias-pull-up;
+ bootph-all;
+ };
};
- blsp_uart2_sleep: blsp-uart2-sleep-state {
+ blsp_uart2_console_sleep: blsp-uart2-console-sleep-state {
pins = "gpio4", "gpio5";
function = "gpio";
drive-strength = <2>;
@@ -1172,8 +1203,8 @@
reg = <0x01800000 0x80000>;
clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
<&sleep_clk>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi0_phy 0>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
<0>,
<0>,
<0>;
@@ -1291,8 +1322,8 @@
"core";
assigned-clocks = <&gcc BYTE0_CLK_SRC>,
<&gcc PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
phys = <&mdss_dsi0_phy>;
status = "disabled";
@@ -1360,8 +1391,8 @@
"core";
assigned-clocks = <&gcc BYTE1_CLK_SRC>,
<&gcc PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
phys = <&mdss_dsi1_phy>;
status = "disabled";
@@ -1761,9 +1792,6 @@
clock-names = "core", "iface";
dmas = <&blsp_dma 0>, <&blsp_dma 1>;
dma-names = "tx", "rx";
- pinctrl-0 = <&blsp_uart1_default>;
- pinctrl-1 = <&blsp_uart1_sleep>;
- pinctrl-names = "default", "sleep";
status = "disabled";
};
@@ -1775,9 +1803,6 @@
clock-names = "core", "iface";
dmas = <&blsp_dma 2>, <&blsp_dma 3>;
dma-names = "tx", "rx";
- pinctrl-0 = <&blsp_uart2_default>;
- pinctrl-1 = <&blsp_uart2_sleep>;
- pinctrl-names = "default", "sleep";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index af4c341e2533..273e79fb7569 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -1,9 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright (c) 2022, The Linux Foundation. All rights reserved. */
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-msm8953.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interconnect/qcom,msm8953.h>
+#include <dt-bindings/interconnect/qcom,rpm-icc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/soc/qcom,apr.h>
@@ -44,6 +47,8 @@
reg = <0x0>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ interconnects = <&bimc MAS_APPS_PROC RPM_ACTIVE_TAG
+ &bimc SLV_EBI RPM_ACTIVE_TAG>;
next-level-cache = <&l2_0>;
#cooling-cells = <2>;
};
@@ -54,6 +59,8 @@
reg = <0x1>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ interconnects = <&bimc MAS_APPS_PROC RPM_ACTIVE_TAG
+ &bimc SLV_EBI RPM_ACTIVE_TAG>;
next-level-cache = <&l2_0>;
#cooling-cells = <2>;
};
@@ -64,6 +71,8 @@
reg = <0x2>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ interconnects = <&bimc MAS_APPS_PROC RPM_ACTIVE_TAG
+ &bimc SLV_EBI RPM_ACTIVE_TAG>;
next-level-cache = <&l2_0>;
#cooling-cells = <2>;
};
@@ -74,6 +83,8 @@
reg = <0x3>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ interconnects = <&bimc MAS_APPS_PROC RPM_ACTIVE_TAG
+ &bimc SLV_EBI RPM_ACTIVE_TAG>;
next-level-cache = <&l2_0>;
#cooling-cells = <2>;
};
@@ -84,6 +95,8 @@
reg = <0x100>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ interconnects = <&bimc MAS_APPS_PROC RPM_ACTIVE_TAG
+ &bimc SLV_EBI RPM_ACTIVE_TAG>;
next-level-cache = <&l2_1>;
#cooling-cells = <2>;
};
@@ -94,6 +107,8 @@
reg = <0x101>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ interconnects = <&bimc MAS_APPS_PROC RPM_ACTIVE_TAG
+ &bimc SLV_EBI RPM_ACTIVE_TAG>;
next-level-cache = <&l2_1>;
#cooling-cells = <2>;
};
@@ -104,6 +119,8 @@
reg = <0x102>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ interconnects = <&bimc MAS_APPS_PROC RPM_ACTIVE_TAG
+ &bimc SLV_EBI RPM_ACTIVE_TAG>;
next-level-cache = <&l2_1>;
#cooling-cells = <2>;
};
@@ -114,6 +131,8 @@
reg = <0x103>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ interconnects = <&bimc MAS_APPS_PROC RPM_ACTIVE_TAG
+ &bimc SLV_EBI RPM_ACTIVE_TAG>;
next-level-cache = <&l2_1>;
#cooling-cells = <2>;
};
@@ -470,6 +489,13 @@
clock-names = "core";
};
+ bimc: interconnect@400000 {
+ compatible = "qcom,msm8953-bimc";
+ reg = <0x00400000 0x5a000>;
+
+ #interconnect-cells = <2>;
+ };
+
tsens0: thermal-sensor@4a9000 {
compatible = "qcom,msm8953-tsens", "qcom,tsens-v2";
reg = <0x004a9000 0x1000>, /* TM */
@@ -486,6 +512,29 @@
reg = <0x004ab000 0x4>;
};
+ pcnoc: interconnect@500000 {
+ compatible = "qcom,msm8953-pcnoc";
+ reg = <0x00500000 0x12080>;
+
+ clocks = <&gcc GCC_PCNOC_USB3_AXI_CLK>;
+ clock-names = "pcnoc_usb3_axi";
+
+ #interconnect-cells = <2>;
+ };
+
+ snoc: interconnect@580000 {
+ compatible = "qcom,msm8953-snoc";
+ reg = <0x00580000 0x16080>;
+
+ #interconnect-cells = <2>;
+
+ snoc_mm: interconnect-snoc {
+ compatible = "qcom,msm8953-snoc-mm";
+
+ #interconnect-cells = <2>;
+ };
+ };
+
tlmm: pinctrl@1000000 {
compatible = "qcom,msm8953-pinctrl";
reg = <0x01000000 0x300000>;
@@ -767,6 +816,20 @@
bias-disable;
};
+ uart_5_default: uart-5-default-state {
+ pins = "gpio16", "gpio17", "gpio18", "gpio19";
+ function = "blsp_uart5";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ uart_5_sleep: uart-5-sleep-state {
+ pins = "gpio16", "gpio17", "gpio18", "gpio19";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
wcnss_pin_a: wcnss-active-state {
wcss-wlan2-pins {
@@ -807,10 +870,10 @@
#power-domain-cells = <1>;
clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
<&sleep_clk>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi1_phy 1>,
- <&mdss_dsi1_phy 0>;
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>;
clock-names = "xo",
"sleep",
"dsi0pll",
@@ -849,6 +912,13 @@
interrupt-controller;
#interrupt-cells = <1>;
+ interconnects = <&snoc_mm MAS_MDP RPM_ALWAYS_TAG
+ &bimc SLV_EBI RPM_ALWAYS_TAG>,
+ <&bimc MAS_APPS_PROC RPM_ACTIVE_TAG
+ &pcnoc SLV_DISP_SS_CFG RPM_ACTIVE_TAG>;
+ interconnect-names = "mdp0-mem",
+ "cpu-cfg";
+
clocks = <&gcc GCC_MDSS_AHB_CLK>,
<&gcc GCC_MDSS_AXI_CLK>,
<&gcc GCC_MDSS_VSYNC_CLK>,
@@ -917,8 +987,8 @@
assigned-clocks = <&gcc BYTE0_CLK_SRC>,
<&gcc PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
clocks = <&gcc GCC_MDSS_MDP_CLK>,
<&gcc GCC_MDSS_AHB_CLK>,
@@ -987,8 +1057,8 @@
assigned-clocks = <&gcc BYTE1_CLK_SRC>,
<&gcc PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
clocks = <&gcc GCC_MDSS_MDP_CLK>,
<&gcc GCC_MDSS_AHB_CLK>,
@@ -1065,6 +1135,11 @@
"alwayson";
power-domains = <&gcc OXILI_GX_GDSC>;
+ interconnects = <&bimc MAS_OXILI RPM_ALWAYS_TAG
+ &bimc SLV_EBI RPM_ALWAYS_TAG>,
+ <&bimc MAS_APPS_PROC RPM_ACTIVE_TAG
+ &pcnoc SLV_GPU_CFG RPM_ACTIVE_TAG>;
+
iommus = <&gpu_iommu 0>;
operating-points-v2 = <&gpu_opp_table>;
@@ -1302,6 +1377,13 @@
<&gcc GCC_USB30_MASTER_CLK>;
assigned-clock-rates = <19200000>, <133330000>;
+ interconnects = <&pcnoc MAS_USB3 RPM_ALWAYS_TAG
+ &bimc SLV_EBI RPM_ALWAYS_TAG>,
+ <&bimc MAS_APPS_PROC RPM_ACTIVE_TAG
+ &pcnoc SLV_USB3 RPM_ACTIVE_TAG>;
+ interconnect-names = "usb-ddr",
+ "apps-usb";
+
power-domains = <&gcc USB30_GDSC>;
qcom,select-utmi-as-pipe-clk;
@@ -1354,6 +1436,13 @@
<&rpmcc RPM_SMD_XO_CLK_SRC>;
clock-names = "iface", "core", "xo";
+ interconnects = <&pcnoc MAS_SDCC_1 RPM_ALWAYS_TAG
+ &bimc SLV_EBI RPM_ALWAYS_TAG>,
+ <&bimc MAS_APPS_PROC RPM_ACTIVE_TAG
+ &pcnoc SLV_SDCC_1 RPM_ACTIVE_TAG>;
+ interconnect-names = "sdhc-ddr",
+ "cpu-sdhc";
+
power-domains = <&rpmpd MSM8953_VDDCX>;
operating-points-v2 = <&sdhc1_opp_table>;
@@ -1374,26 +1463,36 @@
opp-25000000 {
opp-hz = /bits/ 64 <25000000>;
+ opp-peak-kBps = <200000>, <100000>;
+ opp-avg-kBps = <65360>, <32768>;
required-opps = <&rpmpd_opp_low_svs>;
};
opp-50000000 {
opp-hz = /bits/ 64 <50000000>;
+ opp-peak-kBps = <400000>, <200000>;
+ opp-avg-kBps = <130718>, <65360>;
required-opps = <&rpmpd_opp_svs>;
};
opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
+ opp-peak-kBps = <400000>, <400000>;
+ opp-avg-kBps = <130718>, <65360>;
required-opps = <&rpmpd_opp_svs>;
};
opp-192000000 {
opp-hz = /bits/ 64 <192000000>;
+ opp-peak-kBps = <800000>, <600000>;
+ opp-avg-kBps = <261438>, <130718>;
required-opps = <&rpmpd_opp_nom>;
};
opp-384000000 {
opp-hz = /bits/ 64 <384000000>;
+ opp-peak-kBps = <800000>, <800000>;
+ opp-avg-kBps = <261438>, <300000>;
required-opps = <&rpmpd_opp_nom>;
};
};
@@ -1414,6 +1513,13 @@
<&rpmcc RPM_SMD_XO_CLK_SRC>;
clock-names = "iface", "core", "xo";
+ interconnects = <&pcnoc MAS_SDCC_2 RPM_ALWAYS_TAG
+ &bimc SLV_EBI RPM_ALWAYS_TAG>,
+ <&bimc MAS_APPS_PROC RPM_ACTIVE_TAG
+ &pcnoc SLV_SDCC_2 RPM_ACTIVE_TAG>;
+ interconnect-names = "sdhc-ddr",
+ "cpu-sdhc";
+
power-domains = <&rpmpd MSM8953_VDDCX>;
operating-points-v2 = <&sdhc2_opp_table>;
@@ -1430,26 +1536,36 @@
opp-25000000 {
opp-hz = /bits/ 64 <25000000>;
+ opp-peak-kBps = <200000>, <100000>;
+ opp-avg-kBps = <65360>, <32768>;
required-opps = <&rpmpd_opp_low_svs>;
};
opp-50000000 {
opp-hz = /bits/ 64 <50000000>;
+ opp-peak-kBps = <400000>, <400000>;
+ opp-avg-kBps = <130718>, <65360>;
required-opps = <&rpmpd_opp_svs>;
};
opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
+ opp-peak-kBps = <800000>, <400000>;
+ opp-avg-kBps = <130718>, <130718>;
required-opps = <&rpmpd_opp_svs>;
};
opp-177770000 {
opp-hz = /bits/ 64 <177770000>;
+ opp-peak-kBps = <600000>, <600000>;
+ opp-avg-kBps = <261438>, <130718>;
required-opps = <&rpmpd_opp_nom>;
};
opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
+ opp-peak-kBps = <800000>, <800000>;
+ opp-avg-kBps = <261438>, <130718>;
required-opps = <&rpmpd_opp_nom>;
};
};
@@ -1592,6 +1708,24 @@
qcom,controlled-remotely;
};
+ uart_5: serial@7aef000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x07aef000 0x200>;
+ interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART1_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core",
+ "iface";
+ dmas = <&blsp2_dma 0>, <&blsp2_dma 1>;
+ dma-names = "tx", "rx";
+
+ pinctrl-0 = <&uart_5_default>;
+ pinctrl-1 = <&uart_5_sleep>;
+ pinctrl-names = "default", "sleep";
+
+ status = "disabled";
+ };
+
i2c_5: i2c@7af5000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0x07af5000 0x600>;
@@ -1932,19 +2066,19 @@
#sound-dai-cells = <1>;
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
direction = <Q6ASM_DAI_RX>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
direction = <Q6ASM_DAI_TX>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
direction = <Q6ASM_DAI_RX>;
};
dai@3 {
- reg = <3>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA4>;
direction = <Q6ASM_DAI_RX>;
is-compress-dai;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi
index d036f31dfdca..e2ac2fd6882f 100644
--- a/arch/arm64/boot/dts/qcom/msm8976.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi
@@ -6,6 +6,7 @@
* Copyright (c) 2022, Marijn Suijten <marijn.suijten@somainline.org>
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-msm8976.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/gpio/gpio.h>
@@ -824,10 +825,10 @@
clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
<&rpmcc RPM_SMD_XO_A_CLK_SRC>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi1_phy 1>,
- <&mdss_dsi1_phy 0>;
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>;
clock-names = "xo",
"xo_a",
"dsi0pll",
@@ -970,8 +971,8 @@
assigned-clocks = <&gcc GCC_MDSS_BYTE0_CLK_SRC>,
<&gcc GCC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
phys = <&mdss_dsi0_phy>;
@@ -1046,8 +1047,8 @@
assigned-clocks = <&gcc GCC_MDSS_BYTE1_CLK_SRC>,
<&gcc GCC_MDSS_PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
phys = <&mdss_dsi1_phy>;
diff --git a/arch/arm64/boot/dts/qcom/msm8992-lg-h815.dts b/arch/arm64/boot/dts/qcom/msm8992-lg-h815.dts
index 4520d5d51a29..6a231afad85d 100644
--- a/arch/arm64/boot/dts/qcom/msm8992-lg-h815.dts
+++ b/arch/arm64/boot/dts/qcom/msm8992-lg-h815.dts
@@ -93,26 +93,32 @@
&cpu0 {
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
};
&cpu1 {
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
};
&cpu2 {
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
};
&cpu3 {
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
};
&cpu4 {
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
};
&cpu5 {
enable-method = "spin-table";
+ cpu-release-addr = /bits/ 64 <0>;
};
&pm8994_resin {
diff --git a/arch/arm64/boot/dts/qcom/msm8996-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/msm8996-oneplus-common.dtsi
index 38035e0db80b..63ab564655bc 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-oneplus-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996-oneplus-common.dtsi
@@ -288,15 +288,15 @@
#size-cells = <0>;
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
};
@@ -492,6 +492,11 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
+
+ vreg_lvs2a: lvs2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8996-oneplus3.dts b/arch/arm64/boot/dts/qcom/msm8996-oneplus3.dts
index dfe75119b8d2..220eeb31fdc7 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-oneplus3.dts
+++ b/arch/arm64/boot/dts/qcom/msm8996-oneplus3.dts
@@ -41,6 +41,8 @@
&slpi_pil {
firmware-name = "qcom/msm8996/oneplus3/slpi.mbn";
+ px-supply = <&vreg_lvs2a>;
+
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8996-oneplus3t.dts b/arch/arm64/boot/dts/qcom/msm8996-oneplus3t.dts
index 51fce65e89f1..f772618e80c7 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-oneplus3t.dts
+++ b/arch/arm64/boot/dts/qcom/msm8996-oneplus3t.dts
@@ -42,6 +42,8 @@
&slpi_pil {
firmware-name = "qcom/msm8996/oneplus3t/slpi.mbn";
+ px-supply = <&vreg_lvs2a>;
+
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
index dbad8f57f2fa..bd3f39e1b98f 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
@@ -137,15 +137,15 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
};
@@ -156,10 +156,7 @@
&sound {
compatible = "qcom,apq8096-sndcard";
model = "gemini";
- audio-routing = "RX_BIAS", "MCLK",
- "MM_DL1", "MultiMedia1 Playback",
- "MM_DL2", "MultiMedia2 Playback",
- "MultiMedia3 Capture", "MM_UL3";
+ audio-routing = "RX_BIAS", "MCLK";
mm1-dai-link {
link-name = "MultiMedia1";
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 4719e1fc70d2..ede851fbf628 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -4,6 +4,7 @@
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-msm8996.h>
#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
@@ -937,10 +938,10 @@
clocks = <&xo_board>,
<&gcc GPLL0>,
<&gcc GCC_MMSS_NOC_CFG_AHB_CLK>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi1_phy 1>,
- <&mdss_dsi1_phy 0>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
<&mdss_hdmi_phy>;
clock-names = "xo",
"gpll0",
@@ -1071,8 +1072,10 @@
"core_mmss",
"pixel",
"core";
- assigned-clocks = <&mmcc BYTE0_CLK_SRC>, <&mmcc PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+ assigned-clocks = <&mmcc BYTE0_CLK_SRC>,
+ <&mmcc PCLK0_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
phys = <&mdss_dsi0_phy>;
status = "disabled";
@@ -1139,8 +1142,10 @@
"core_mmss",
"pixel",
"core";
- assigned-clocks = <&mmcc BYTE1_CLK_SRC>, <&mmcc PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>, <&mdss_dsi1_phy 1>;
+ assigned-clocks = <&mmcc BYTE1_CLK_SRC>,
+ <&mmcc PCLK1_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
phys = <&mdss_dsi1_phy>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-natrium.dts b/arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-natrium.dts
index 5e3fd1637f44..443599a5a5dd 100644
--- a/arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-natrium.dts
+++ b/arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-natrium.dts
@@ -87,15 +87,15 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-scorpio.dts b/arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-scorpio.dts
index 5e3b9130e9c2..33d84ac541e1 100644
--- a/arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-scorpio.dts
+++ b/arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-scorpio.dts
@@ -139,15 +139,15 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8998-fxtec-pro1.dts b/arch/arm64/boot/dts/qcom/msm8998-fxtec-pro1.dts
index f1ceaedd9520..f5558495cb02 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-fxtec-pro1.dts
+++ b/arch/arm64/boot/dts/qcom/msm8998-fxtec-pro1.dts
@@ -372,6 +372,7 @@
&qusb2phy {
status = "okay";
+ vdd-supply = <&vreg_l1a_0p875>;
vdda-pll-supply = <&vreg_l12a_1p8>;
vdda-phy-dpdm-supply = <&vreg_l24a_3p075>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8998-lenovo-miix-630.dts b/arch/arm64/boot/dts/qcom/msm8998-lenovo-miix-630.dts
index 901f6ac0084d..c11b972771c3 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-lenovo-miix-630.dts
+++ b/arch/arm64/boot/dts/qcom/msm8998-lenovo-miix-630.dts
@@ -100,6 +100,12 @@
};
};
+&venus {
+ firmware-name = "qcom/msm8998/LENOVO/81F1/qcvss8998.mbn";
+
+ status = "okay";
+};
+
&wifi {
- qcom,ath10k-calibration-variant = "Lenovo_Miix630";
+ qcom,calibration-variant = "Lenovo_Miix630";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dts b/arch/arm64/boot/dts/qcom/msm8998-mtp.dts
index 7c77612fb990..ad425267e902 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dts
@@ -156,6 +156,7 @@
&qusb2phy {
status = "okay";
+ vdd-supply = <&vreg_l1a_0p875>;
vdda-pll-supply = <&vreg_l12a_1p8>;
vdda-phy-dpdm-supply = <&vreg_l24a_3p075>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi b/arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi
index d8cc0d729e99..3650f2501886 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi
@@ -450,6 +450,7 @@
&qusb2phy {
status = "okay";
+ vdd-supply = <&vreg_l1a_0p875>;
vdda-pll-supply = <&vreg_l12a_1p8>;
vdda-phy-dpdm-supply = <&vreg_l24a_3p075>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index c2caad85c668..58cee37cb8ee 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -2,6 +2,7 @@
/* Copyright (c) 2016, The Linux Foundation. All rights reserved. */
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-msm8998.h>
#include <dt-bindings/clock/qcom,gpucc-msm8998.h>
#include <dt-bindings/clock/qcom,mmcc-msm8998.h>
@@ -2790,11 +2791,11 @@
"gpll0_div";
clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
<&gcc GCC_MMSS_GPLL0_CLK>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi1_phy 1>,
- <&mdss_dsi1_phy 0>,
- <&mdss_hdmi_phy 0>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_hdmi_phy>,
<0>,
<0>,
<&gcc GCC_MMSS_GPLL0_DIV_CLK>;
@@ -2829,8 +2830,8 @@
compatible = "qcom,msm8998-dpu";
reg = <0x0c901000 0x8f000>,
<0x0c9a8e00 0xf0>,
- <0x0c9b0000 0x2008>,
- <0x0c9b8000 0x1040>;
+ <0x0c9b0000 0x3000>,
+ <0x0c9b8000 0x3000>;
reg-names = "mdp",
"regdma",
"vbif",
@@ -2932,8 +2933,8 @@
"bus";
assigned-clocks = <&mmcc BYTE0_CLK_SRC>,
<&mmcc PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmpd MSM8998_VDDCX>;
@@ -3008,8 +3009,8 @@
"bus";
assigned-clocks = <&mmcc BYTE1_CLK_SRC>,
<&mmcc PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmpd MSM8998_VDDCX>;
diff --git a/arch/arm64/boot/dts/qcom/pm8937.dtsi b/arch/arm64/boot/dts/qcom/pm8937.dtsi
index 42b3575b36ff..77809c3534a7 100644
--- a/arch/arm64/boot/dts/qcom/pm8937.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8937.dtsi
@@ -143,6 +143,14 @@
#address-cells = <1>;
#size-cells = <0>;
+ pm8937_pwm: pwm {
+ compatible = "qcom,pm8937-pwm", "qcom,pm8916-pwm";
+
+ #pwm-cells = <2>;
+
+ status = "disabled";
+ };
+
pm8937_spmi_regulators: regulators {
compatible = "qcom,pm8937-regulators";
};
diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
index f0746123e594..f49ac1c1f8a3 100644
--- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
@@ -6,6 +6,7 @@
*/
#include <dt-bindings/clock/qcom,dispcc-qcm2290.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-qcm2290.h>
#include <dt-bindings/clock/qcom,qcm2290-gpucc.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
@@ -550,6 +551,13 @@
bias-disable;
};
+ qup_uart3_default: qup-uart3-default-state {
+ pins = "gpio8", "gpio9", "gpio10", "gpio11";
+ function = "qup3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
qup_uart4_default: qup-uart4-default-state {
pins = "gpio12", "gpio13";
function = "qup4";
@@ -749,6 +757,30 @@
#interconnect-cells = <2>;
};
+ cryptobam: dma-controller@1b04000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0x0 0x01b04000 0x0 0x24000>;
+ interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rpmcc RPM_SMD_CE1_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ qcom,controlled-remotely;
+ iommus = <&apps_smmu 0x0084 0x11>,
+ <&apps_smmu 0x0086 0x11>;
+ };
+
+ crypto: crypto@1b3a000 {
+ compatible = "qcom,qcm2290-qce", "qcom,ipq4019-qce", "qcom,qce";
+ reg = <0x0 0x01b3a000 0x0 0x6000>;
+ clocks = <&rpmcc RPM_SMD_CE1_CLK>;
+ clock-names = "core";
+ dmas = <&cryptobam 6>, <&cryptobam 7>;
+ dma-names = "rx", "tx";
+ iommus = <&apps_smmu 0x0084 0x11>,
+ <&apps_smmu 0x0086 0x11>;
+ };
+
qfprom@1b44000 {
compatible = "qcom,qcm2290-qfprom", "qcom,qfprom";
reg = <0x0 0x01b44000 0x0 0x3000>;
@@ -1073,7 +1105,7 @@
interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
&qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
- &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
+ &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
interconnect-names = "qup-core",
"qup-config";
#address-cells = <1>;
@@ -1092,7 +1124,7 @@
interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
&qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
- &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
+ &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
interconnect-names = "qup-core",
"qup-config";
status = "disabled";
@@ -1137,7 +1169,7 @@
interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
&qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
- &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
+ &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
interconnect-names = "qup-core",
"qup-config";
#address-cells = <1>;
@@ -1184,7 +1216,7 @@
interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
&qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
- &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
+ &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
interconnect-names = "qup-core",
"qup-config";
#address-cells = <1>;
@@ -1231,7 +1263,7 @@
interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
&qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
- &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
+ &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
interconnect-names = "qup-core",
"qup-config";
#address-cells = <1>;
@@ -1239,6 +1271,23 @@
status = "disabled";
};
+ uart3: serial@4a8c000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0x04a8c000 0x0 0x4000>;
+ interrupts = <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart3_default>;
+ pinctrl-names = "default";
+ interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
+ &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
+ <&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
+ &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ status = "disabled";
+ };
+
i2c4: i2c@4a90000 {
compatible = "qcom,geni-i2c";
reg = <0x0 0x04a90000 0x0 0x4000>;
@@ -1278,7 +1327,7 @@
interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
&qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
- &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
+ &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
interconnect-names = "qup-core",
"qup-config";
#address-cells = <1>;
@@ -1297,7 +1346,7 @@
interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
&qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
- &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
+ &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
interconnect-names = "qup-core",
"qup-config";
status = "disabled";
@@ -1342,7 +1391,7 @@
interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
&qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
- &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
+ &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
interconnect-names = "qup-core",
"qup-config";
#address-cells = <1>;
@@ -1616,7 +1665,7 @@
mdp: display-controller@5e01000 {
compatible = "qcom,qcm2290-dpu";
reg = <0x0 0x05e01000 0x0 0x8f000>,
- <0x0 0x05eb0000 0x0 0x2008>;
+ <0x0 0x05eb0000 0x0 0x3000>;
reg-names = "mdp",
"vbif";
@@ -1702,8 +1751,8 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmpd QCM2290_VDDCX>;
@@ -1785,8 +1834,8 @@
<&rpmcc RPM_SMD_XO_A_CLK_SRC>,
<&gcc GCC_DISP_GPLL0_CLK_SRC>,
<&gcc GCC_DISP_GPLL0_DIV_CLK_SRC>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
clock-names = "bi_tcxo",
"bi_tcxo_ao",
"gcc_disp_gpll0_clk_src",
diff --git a/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts b/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts
index 769c66cb5d19..e115b6a52b29 100644
--- a/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts
+++ b/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts
@@ -14,6 +14,8 @@
#include <dt-bindings/leds/common.h>
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
+#include <dt-bindings/sound/qcom,q6dsp-lpass-ports.h>
#include "sc7280.dtsi"
#include "pm7250b.dtsi"
#include "pm7325.dtsi"
@@ -101,7 +103,15 @@
reg = <1>;
pmic_glink_ss_in: endpoint {
- remote-endpoint = <&usb_1_dwc3_ss>;
+ remote-endpoint = <&redriver_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_sbu: endpoint {
+ remote-endpoint = <&ocp96011_sbu_mux>;
};
};
};
@@ -138,6 +148,51 @@
vin-supply = <&vreg_s8b>;
};
+ vreg_oled_dvdd: regulator-oled-dvdd {
+ compatible = "regulator-fixed";
+ regulator-name = "oled_dvdd";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+
+ gpio = <&tlmm 51 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vreg_s1b>;
+
+ regulator-boot-on;
+ };
+
+ vreg_oled_vci: regulator-oled-vci {
+ compatible = "regulator-fixed";
+ regulator-name = "oled_vci";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ gpio = <&pm8350c_gpios 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vreg_l13c>;
+
+ regulator-boot-on;
+ };
+
+ vreg_usb_redrive_1v8: regulator-usb-redrive-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "USB_REDRIVE_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 61 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vreg_bob>;
+
+ regulator-boot-on;
+
+ pinctrl-0 = <&usb_redrive_1v8_en_default>;
+ pinctrl-names = "default";
+ };
+
reserved-memory {
cont_splash_mem: cont-splash@e1000000 {
reg = <0x0 0xe1000000 0x0 0x2300000>;
@@ -597,11 +652,6 @@
};
};
-&dispcc {
- /* Disable for now so simple-framebuffer continues working */
- status = "disabled";
-};
-
&gcc {
protected-clocks = <GCC_CFG_NOC_LPASS_CLK>,
<GCC_EDP_CLKREF_EN>,
@@ -628,6 +678,14 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+};
+
+&gpu_zap_shader {
+ firmware-name = "qcom/qcm6490/fairphone5/a660_zap.mbn";
+};
+
&i2c1 {
status = "okay";
@@ -702,7 +760,26 @@
};
/* Pixelworks @ 26 */
- /* FSA4480 USB audio switch @ 42 */
+
+ typec-mux@42 {
+ compatible = "ocs,ocp96011", "fcs,fsa4480";
+ reg = <0x42>;
+
+ interrupts-extended = <&tlmm 7 IRQ_TYPE_LEVEL_LOW>;
+
+ vcc-supply = <&vreg_bob>;
+
+ mode-switch;
+ orientation-switch;
+
+ port {
+ ocp96011_sbu_mux: endpoint {
+ remote-endpoint = <&pmic_glink_sbu>;
+ data-lanes = <1 0>;
+ };
+ };
+ };
+
/* AW86927FCR haptics @ 5a */
};
@@ -716,7 +793,36 @@
&i2c4 {
status = "okay";
- /* PTN36502 USB redriver @ 1a */
+ typec-mux@1a {
+ compatible = "nxp,ptn36502";
+ reg = <0x1a>;
+
+ vdd18-supply = <&vreg_usb_redrive_1v8>;
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ redriver_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ redriver_ss_in: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_out>;
+ };
+ };
+ };
+ };
};
&i2c9 {
@@ -733,6 +839,54 @@
status = "okay";
};
+&mdss {
+ status = "okay";
+};
+
+&mdss_dp {
+ status = "okay";
+};
+
+&mdss_dp_out {
+ data-lanes = <0 1>;
+};
+
+&mdss_dsi {
+ vdda-supply = <&vreg_l6b>;
+ status = "okay";
+
+ panel@0 {
+ compatible = "fairphone,fp5-rm692e5-boe", "raydium,rm692e5";
+ reg = <0>;
+
+ reset-gpios = <&tlmm 44 GPIO_ACTIVE_LOW>;
+
+ vci-supply = <&vreg_oled_vci>;
+ vddio-supply = <&vreg_l12c>;
+ dvdd-supply = <&vreg_oled_dvdd>;
+
+ pinctrl-0 = <&disp_reset_n_active>, <&mdp_vsync>;
+ pinctrl-1 = <&disp_reset_n_suspend>, <&mdp_vsync>;
+ pinctrl-names = "default", "sleep";
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+};
+
+&mdss_dsi0_out {
+ data-lanes = <0 1 2 3>;
+ remote-endpoint = <&panel_in>;
+};
+
+&mdss_dsi_phy {
+ vdds-supply = <&vreg_l10c>;
+ status = "okay";
+};
+
&pm7250b_adc {
pinctrl-0 = <&pm7250b_adc_default>;
pinctrl-names = "default";
@@ -995,10 +1149,49 @@
status = "okay";
};
+&sound {
+ compatible = "fairphone,fp5-sndcard";
+ model = "Fairphone 5";
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ displayport-rx-dai-link {
+ link-name = "DisplayPort Playback";
+
+ codec {
+ sound-dai = <&mdss_dp>;
+ };
+
+ cpu {
+ sound-dai = <&q6afedai DISPLAY_PORT_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+};
+
&spi13 {
status = "okay";
- /* Goodix touchscreen @ 0 */
+ touchscreen@0 {
+ compatible = "goodix,gt9897";
+ reg = <0>;
+ interrupts-extended = <&tlmm 81 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&tlmm 105 GPIO_ACTIVE_LOW>;
+ avdd-supply = <&vreg_l3c>;
+ vddio-supply = <&vreg_l2c>;
+ spi-max-frequency = <1000000>;
+ touchscreen-size-x = <1224>;
+ touchscreen-size-y = <2700>;
+ };
};
&tlmm {
@@ -1015,6 +1208,20 @@
bias-disable;
};
+ disp_reset_n_active: disp-reset-n-active-state {
+ pins = "gpio44";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ disp_reset_n_suspend: disp-reset-n-suspend-state {
+ pins = "gpio44";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
hall_sensor_default: hall-sensor-default-state {
pins = "gpio155";
function = "gpio";
@@ -1022,6 +1229,13 @@
bias-pull-up;
};
+ mdp_vsync: mdp-vsync-state {
+ pins = "gpio80";
+ function = "mdp_vsync";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
pm8008_int_default: pm8008-int-default-state {
pins = "gpio25";
function = "gpio";
@@ -1080,6 +1294,14 @@
function = "gpio";
bias-pull-down;
};
+
+ usb_redrive_1v8_en_default: usb-redrive-1v8-en-default-state {
+ pins = "gpio61";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
};
&uart5 {
@@ -1154,10 +1376,6 @@
remote-endpoint = <&pmic_glink_hs_in>;
};
-&usb_1_dwc3_ss {
- remote-endpoint = <&pmic_glink_ss_in>;
-};
-
&usb_1_hsphy {
vdda-pll-supply = <&vreg_l10c>;
vdda18-supply = <&vreg_l1c>;
@@ -1184,12 +1402,16 @@
status = "okay";
};
+&usb_dp_qmpphy_out {
+ remote-endpoint = <&redriver_ss_in>;
+};
+
&venus {
firmware-name = "qcom/qcm6490/fairphone5/venus.mbn";
status = "okay";
};
&wifi {
- qcom,ath11k-calibration-variant = "Fairphone_5";
+ qcom,calibration-variant = "Fairphone_5";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
index 9209efcc49b5..7a155ef6492e 100644
--- a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
+++ b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
@@ -507,6 +507,27 @@
};
};
+&gcc {
+ protected-clocks = <GCC_AGGRE_NOC_PCIE_1_AXI_CLK> ,<GCC_PCIE_1_AUX_CLK>,
+ <GCC_PCIE_1_AUX_CLK_SRC>, <GCC_PCIE_1_CFG_AHB_CLK>,
+ <GCC_PCIE_1_MSTR_AXI_CLK>, <GCC_PCIE_1_PHY_RCHNG_CLK_SRC>,
+ <GCC_PCIE_1_PIPE_CLK>, <GCC_PCIE_1_PIPE_CLK_SRC>,
+ <GCC_PCIE_1_SLV_AXI_CLK>, <GCC_PCIE_1_SLV_Q2A_AXI_CLK>,
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>, <GCC_QSPI_CORE_CLK>,
+ <GCC_QSPI_CORE_CLK_SRC>,<GCC_USB30_SEC_MASTER_CLK>,
+ <GCC_USB30_SEC_MASTER_CLK_SRC>, <GCC_USB30_SEC_MOCK_UTMI_CLK>,
+ <GCC_USB30_SEC_MOCK_UTMI_CLK_SRC>,
+ <GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC>, <GCC_USB30_SEC_SLEEP_CLK>,
+ <GCC_USB3_SEC_PHY_AUX_CLK>, <GCC_USB3_SEC_PHY_AUX_CLK_SRC>,
+ <GCC_USB3_SEC_PHY_COM_AUX_CLK>, <GCC_USB3_SEC_PHY_PIPE_CLK>,
+ <GCC_USB3_SEC_PHY_PIPE_CLK_SRC>, <GCC_CFG_NOC_LPASS_CLK>,
+ <GCC_MSS_GPLL0_MAIN_DIV_CLK_SRC>, <GCC_MSS_CFG_AHB_CLK>,
+ <GCC_MSS_OFFLINE_AXI_CLK>, <GCC_MSS_SNOC_AXI_CLK>,
+ <GCC_MSS_Q6_MEMNOC_AXI_CLK>, <GCC_MSS_Q6SS_BOOT_CLK_SRC>,
+ <GCC_SEC_CTRL_CLK_SRC>, <GCC_WPSS_AHB_CLK>,
+ <GCC_WPSS_AHB_BDG_MST_CLK>, <GCC_WPSS_RSCP_CLK>;
+};
+
&gpu {
status = "okay";
};
@@ -755,7 +776,12 @@
&wifi {
memory-region = <&wlan_fw_mem>;
- qcom,ath11k-calibration-variant = "Qualcomm_qcm6490idp";
+ qcom,calibration-variant = "Qualcomm_qcm6490idp";
status = "okay";
};
+
+&lpass_audiocc {
+ compatible = "qcom,qcm6490-lpassaudiocc";
+ /delete-property/ power-domains;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts b/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
index 75930f957696..b9a0f7ac4d9c 100644
--- a/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
+++ b/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright (c) 2023, Luca Weiss <luca.weiss@fairphone.com>
- * Copyright (c) 2024, Caleb Connolly <caleb@postmarketos.org>
+ * Copyright (c) 2024, Casey Connolly <casey.connolly@linaro.org>
*/
/dts-v1/;
@@ -953,7 +953,7 @@
};
&wifi {
- qcom,ath11k-calibration-variant = "SHIFTphone_8";
+ qcom,calibration-variant = "SHIFTphone_8";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/qcs615.dtsi b/arch/arm64/boot/dts/qcom/qcs615.dtsi
index f4abfad474ea..bb8b6c3ebd03 100644
--- a/arch/arm64/boot/dts/qcom/qcs615.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs615.dtsi
@@ -417,6 +417,12 @@
#size-cells = <2>;
ranges;
+ aop_cmd_db_mem: aop-cmd-db@85f20000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x85f20000 0x0 0x20000>;
+ no-map;
+ };
+
smem_region: smem@86000000 {
compatible = "qcom,smem";
reg = <0x0 0x86000000 0x0 0x200000>;
@@ -453,6 +459,11 @@
};
};
+ rng@793000 {
+ compatible = "qcom,qcs615-trng", "qcom,trng";
+ reg = <0x0 0x00793000 0x0 0x1000>;
+ };
+
sdhc_1: mmc@7c4000 {
compatible = "qcom,qcs615-sdhci", "qcom,sdhci-msm-v5";
reg = <0x0 0x007c4000 0x0 0x1000>,
@@ -1022,10 +1033,10 @@
"bus_aggr_clk",
"iface_clk",
"core_clk_unipro",
- "core_clk_ice",
"ref_clk",
"tx_lane0_sync_clk",
- "rx_lane0_sync_clk";
+ "rx_lane0_sync_clk",
+ "ice_core_clk";
resets = <&gcc GCC_UFS_PHY_BCR>;
reset-names = "rst";
@@ -1060,10 +1071,10 @@
/bits/ 64 <0>,
/bits/ 64 <0>,
/bits/ 64 <37500000>,
- /bits/ 64 <75000000>,
/bits/ 64 <0>,
/bits/ 64 <0>,
- /bits/ 64 <0>;
+ /bits/ 64 <0>,
+ /bits/ 64 <75000000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
@@ -1072,10 +1083,10 @@
/bits/ 64 <0>,
/bits/ 64 <0>,
/bits/ 64 <75000000>,
- /bits/ 64 <150000000>,
/bits/ 64 <0>,
/bits/ 64 <0>,
- /bits/ 64 <0>;
+ /bits/ 64 <0>,
+ /bits/ 64 <150000000>;
required-opps = <&rpmhpd_opp_svs>;
};
@@ -1084,10 +1095,10 @@
/bits/ 64 <0>,
/bits/ 64 <0>,
/bits/ 64 <150000000>,
- /bits/ 64 <300000000>,
/bits/ 64 <0>,
/bits/ 64 <0>,
- /bits/ 64 <0>;
+ /bits/ 64 <0>,
+ /bits/ 64 <300000000>;
required-opps = <&rpmhpd_opp_nom>;
};
};
@@ -1114,6 +1125,29 @@
status = "disabled";
};
+ cryptobam: dma-controller@1dc4000 {
+ compatible = "qcom,bam-v1.7.4", "qcom,bam-v1.7.0";
+ reg = <0x0 0x01dc4000 0x0 0x24000>;
+ interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ qcom,controlled-remotely;
+ num-channels = <16>;
+ qcom,num-ees = <4>;
+ iommus = <&apps_smmu 0x0104 0x0011>;
+ };
+
+ crypto: crypto@1dfa000 {
+ compatible = "qcom,qcs615-qce", "qcom,sm8150-qce", "qcom,qce";
+ reg = <0x0 0x01dfa000 0x0 0x6000>;
+ dmas = <&cryptobam 4>, <&cryptobam 5>;
+ dma-names = "rx", "tx";
+ iommus = <&apps_smmu 0x0104 0x0011>;
+ interconnects = <&aggre1_noc MASTER_CRYPTO QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "memory";
+ };
+
tcsr_mutex: hwlock@1f40000 {
compatible = "qcom,tcsr-mutex";
reg = <0x0 0x01f40000 0x0 0x20000>;
@@ -1819,7 +1853,7 @@
in-ports {
port {
replicator0_in: endpoint {
- remote-endpoint= <&tmc_etf_out>;
+ remote-endpoint = <&tmc_etf_out>;
};
};
};
@@ -1832,7 +1866,7 @@
reg = <1>;
replicator0_out1: endpoint {
- remote-endpoint= <&replicator1_in>;
+ remote-endpoint = <&replicator1_in>;
};
};
};
@@ -1872,7 +1906,7 @@
in-ports {
port {
replicator1_in: endpoint {
- remote-endpoint= <&replicator0_out1>;
+ remote-endpoint = <&replicator0_out1>;
};
};
};
@@ -1880,7 +1914,7 @@
out-ports {
port {
replicator1_out: endpoint {
- remote-endpoint= <&funnel_swao_in6>;
+ remote-endpoint = <&funnel_swao_in6>;
};
};
};
@@ -2311,7 +2345,7 @@
reg = <6>;
funnel_swao_in6: endpoint {
- remote-endpoint= <&replicator1_out>;
+ remote-endpoint = <&replicator1_out>;
};
};
@@ -2319,7 +2353,7 @@
reg = <7>;
funnel_swao_in7: endpoint {
- remote-endpoint= <&tpda_swao_out>;
+ remote-endpoint = <&tpda_swao_out>;
};
};
};
@@ -2343,7 +2377,7 @@
in-ports {
port {
tmc_etf_swao_in: endpoint {
- remote-endpoint= <&funnel_swao_out>;
+ remote-endpoint = <&funnel_swao_out>;
};
};
};
@@ -2351,7 +2385,7 @@
out-ports {
port {
tmc_etf_swao_out: endpoint {
- remote-endpoint= <&replicator_swao_in>;
+ remote-endpoint = <&replicator_swao_in>;
};
};
};
@@ -3197,14 +3231,13 @@
interrupt-controller;
};
- aoss_qmp: power-controller@c300000 {
+ aoss_qmp: power-management@c300000 {
compatible = "qcom,qcs615-aoss-qmp", "qcom,aoss-qmp";
reg = <0x0 0x0c300000 0x0 0x400>;
interrupts = <GIC_SPI 389 IRQ_TYPE_EDGE_RISING>;
mboxes = <&apss_shared 0>;
#clock-cells = <0>;
- #power-domain-cells = <1>;
};
sram@c3f0000 {
@@ -3304,7 +3337,6 @@
#interrupt-cells = <4>;
#address-cells = <2>;
#size-cells = <0>;
- cell-index = <0>;
qcom,channel = <0>;
qcom,ee = <0>;
};
@@ -3590,6 +3622,7 @@
snps,dis-u1-entry-quirk;
snps,dis-u2-entry-quirk;
snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,has-lpm-erratum;
snps,hird-threshold = /bits/ 8 <0x10>;
@@ -3651,6 +3684,7 @@
phy-names = "usb2-phy";
snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,has-lpm-erratum;
snps,hird-threshold = /bits/ 8 <0x10>;
diff --git a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2-industrial-mezzanine.dtso b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2-industrial-mezzanine.dtso
new file mode 100644
index 000000000000..619a42b5ef48
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2-industrial-mezzanine.dtso
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+*/
+
+/dts-v1/;
+/plugin/;
+#include <dt-bindings/clock/qcom,gcc-sc7280.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+&spi11 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ st33htpm0: tpm@0 {
+ compatible = "st,st33htpm-spi", "tcg,tpm_tis-spi";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2-vision-mezzanine.dtso b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2-vision-mezzanine.dtso
new file mode 100644
index 000000000000..b9e4a5214f70
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2-vision-mezzanine.dtso
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/*
+ * Camera Sensor overlay on top of rb3gen2 core kit.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,camcc-sc7280.h>
+#include <dt-bindings/gpio/gpio.h>
+
+&camss {
+ vdda-phy-supply = <&vreg_l10c_0p88>;
+ vdda-pll-supply = <&vreg_l6b_1p2>;
+
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* The port index denotes CSIPHY id i.e. csiphy3 */
+ port@3 {
+ reg = <3>;
+
+ csiphy3_ep: endpoint {
+ clock-lanes = <7>;
+ data-lanes = <0 1 2 3>;
+ remote-endpoint = <&imx577_ep>;
+ };
+ };
+ };
+};
+
+&cci1 {
+ status = "okay";
+};
+
+&cci1_i2c1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ camera@1a {
+ compatible = "sony,imx577";
+
+ reg = <0x1a>;
+
+ reset-gpios = <&tlmm 78 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default", "suspend";
+ pinctrl-0 = <&cam2_default>;
+ pinctrl-1 = <&cam2_suspend>;
+
+ clocks = <&camcc CAM_CC_MCLK3_CLK>;
+ assigned-clocks = <&camcc CAM_CC_MCLK3_CLK>;
+ assigned-clock-rates = <24000000>;
+
+ dovdd-supply = <&vreg_l18b_1p8>;
+ avdd-supply = <&vph_pwr>;
+ dvdd-supply = <&vph_pwr>;
+
+ port {
+ imx577_ep: endpoint {
+ link-frequencies = /bits/ 64 <600000000>;
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&csiphy3_ep>;
+ };
+ };
+ };
+};
+
+&tlmm {
+ cam2_default: cam2-default-state {
+ pins = "gpio67";
+ function = "cam_mclk";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ cam2_suspend: cam2-suspend-state {
+ pins = "gpio67";
+ function = "cam_mclk";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
index 7a36c90ad4ec..5fbcd48f2e2d 100644
--- a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
+++ b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause
/*
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/dts-v1/;
@@ -9,6 +9,8 @@
#define PM7250B_SID 8
#define PM7250B_SID1 9
+#include <dt-bindings/iio/qcom,spmi-adc7-pmk8350.h>
+#include <dt-bindings/iio/qcom,spmi-adc7-pm7325.h>
#include <dt-bindings/leds/common.h>
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
@@ -34,6 +36,7 @@
aliases {
serial0 = &uart5;
+ serial1 = &uart7;
};
chosen {
@@ -174,6 +177,7 @@
#address-cells = <1>;
#size-cells = <0>;
+ orientation-gpios = <&tlmm 140 GPIO_ACTIVE_HIGH>;
connector@0 {
compatible = "usb-c-connector";
@@ -212,12 +216,107 @@
};
};
+ thermal-zones {
+ sdm-skin-thermal {
+ thermal-sensors = <&pmk8350_adc_tm 3>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ quiet-thermal {
+ thermal-sensors = <&pmk8350_adc_tm 1>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ xo-thermal {
+ thermal-sensors = <&pmk8350_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+ };
+
vph_pwr: vph-pwr-regulator {
compatible = "regulator-fixed";
regulator-name = "vph_pwr";
regulator-min-microvolt = <3700000>;
regulator-max-microvolt = <3700000>;
};
+
+ wcn6750-pmu {
+ compatible = "qcom,wcn6750-pmu";
+ pinctrl-0 = <&bt_en>;
+ pinctrl-names = "default";
+ vddaon-supply = <&vreg_s7b_0p972>;
+ vddasd-supply = <&vreg_l11c_2p8>;
+ vddpmu-supply = <&vreg_s7b_0p972>;
+ vddrfa0p8-supply = <&vreg_s7b_0p972>;
+ vddrfa1p2-supply = <&vreg_s8b_1p272>;
+ vddrfa1p7-supply = <&vreg_s1b_1p872>;
+ vddrfa2p2-supply = <&vreg_s1c_2p19>;
+
+ bt-enable-gpios = <&tlmm 85 GPIO_ACTIVE_HIGH>;
+
+ regulators {
+ vreg_pmu_rfa_cmn: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn";
+ };
+
+ vreg_pmu_aon_0p59: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p59";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p85: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p85";
+ };
+
+ vreg_pmu_btcmx_0p85: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p85";
+ };
+
+ vreg_pmu_rfa_0p8: ldo5 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo6 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p7: ldo7 {
+ regulator-name = "vreg_pmu_rfa_1p7";
+ };
+
+ vreg_pmu_pcie_0p9: ldo8 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_pcie_1p8: ldo9 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+ };
+ };
};
&apps_rsc {
@@ -745,6 +844,36 @@
};
};
+&pm7325_temp_alarm {
+ io-channels = <&pmk8350_vadc PM7325_ADC7_DIE_TEMP>;
+ io-channel-names = "thermal";
+};
+
+&pmk8350_adc_tm {
+ status = "okay";
+
+ xo-therm@0 {
+ reg = <0>;
+ io-channels = <&pmk8350_vadc PMK8350_ADC7_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ quiet-therm@1 {
+ reg = <1>;
+ io-channels = <&pmk8350_vadc PM7325_ADC7_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ sdm-skin-therm@3 {
+ reg = <3>;
+ io-channels = <&pmk8350_vadc PM7325_ADC7_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
&pm8350c_pwm {
nvmem = <&pmk8350_sdam_21>,
<&pmk8350_sdam_22>;
@@ -789,6 +918,44 @@
status = "okay";
};
+&pmk8350_vadc {
+ channel@3 {
+ reg = <PMK8350_ADC7_DIE_TEMP>;
+ label = "pmk8350_die_temp";
+ qcom,pre-scaling = <1 1>;
+ };
+
+ channel@44 {
+ reg = <PMK8350_ADC7_AMUX_THM1_100K_PU>;
+ label = "xo_therm";
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ qcom,ratiometric;
+ };
+
+ channel@103 {
+ reg = <PM7325_ADC7_DIE_TEMP>;
+ label = "pm7325_die_temp";
+ qcom,pre-scaling = <1 1>;
+ };
+
+ channel@144 {
+ reg = <PM7325_ADC7_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ label = "pm7325_quiet_therm";
+ };
+
+ channel@146 {
+ reg = <PM7325_ADC7_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ label = "pm7325_sdm_skin_therm";
+ };
+};
+
&pon_pwrkey {
status = "okay";
};
@@ -799,6 +966,39 @@
status = "okay";
};
+&qup_uart7_cts {
+ /*
+ * Configure a bias-bus-hold on CTS to lower power
+ * usage when Bluetooth is turned off. Bus hold will
+ * maintain a low power state regardless of whether
+ * the Bluetooth module drives the pin in either
+ * direction or leaves the pin fully unpowered.
+ */
+ bias-bus-hold;
+};
+
+&qup_uart7_rts {
+ /* We'll drive RTS, so no pull */
+ drive-strength = <2>;
+ bias-disable;
+};
+
+&qup_uart7_rx {
+ /*
+ * Configure a pull-up on RX. This is needed to avoid
+ * garbage data when the TX pin of the Bluetooth module is
+ * in tri-state (module powered off or not driving the
+ * signal yet).
+ */
+ bias-pull-up;
+};
+
+&qup_uart7_tx {
+ /* We'll drive TX, so no pull */
+ drive-strength = <2>;
+ bias-disable;
+};
+
&qupv3_id_0 {
status = "okay";
};
@@ -842,12 +1042,90 @@
&tlmm {
gpio-reserved-ranges = <32 2>, /* ADSP */
<48 4>; /* NFC */
+
+ bt_en: bt-en-state {
+ pins = "gpio85";
+ function = "gpio";
+ output-low;
+ bias-disable;
+ };
+
+ qup_uart7_sleep_cts: qup-uart7-sleep-cts-state {
+ pins = "gpio28";
+ function = "gpio";
+ /*
+ * Configure a bias-bus-hold on CTS to lower power
+ * usage when Bluetooth is turned off. Bus hold will
+ * maintain a low power state regardless of whether
+ * the Bluetooth module drives the pin in either
+ * direction or leaves the pin fully unpowered.
+ */
+ bias-bus-hold;
+ };
+
+ qup_uart7_sleep_rts: qup-uart7-sleep-rts-state {
+ pins = "gpio29";
+ function = "gpio";
+ /*
+ * Configure pull-down on RTS. As RTS is active low
+ * signal, pull it low to indicate the BT SoC that it
+ * can wakeup the system anytime from suspend state by
+ * pulling RX low (by sending wakeup bytes).
+ */
+ bias-pull-down;
+ };
+
+ qup_uart7_sleep_rx: qup-uart7-sleep-rx-state {
+ pins = "gpio31";
+ function = "gpio";
+ /*
+ * Configure a pull-up on RX. This is needed to avoid
+ * garbage data when the TX pin of the Bluetooth module
+ * is floating which may cause spurious wakeups.
+ */
+ bias-pull-up;
+ };
+
+ qup_uart7_sleep_tx: qup-uart7-sleep-tx-state {
+ pins = "gpio30";
+ function = "gpio";
+ /*
+ * Configure pull-up on TX when it isn't actively driven
+ * to prevent BT SoC from receiving garbage during sleep.
+ */
+ bias-pull-up;
+ };
};
&uart5 {
status = "okay";
};
+&uart7 {
+ /delete-property/ interrupts;
+ interrupts-extended = <&intc GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>,
+ <&tlmm 31 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-1 = <&qup_uart7_sleep_cts>,
+ <&qup_uart7_sleep_rts>,
+ <&qup_uart7_sleep_tx>,
+ <&qup_uart7_sleep_rx>;
+ pinctrl-names = "default",
+ "sleep";
+
+ status = "okay";
+
+ bluetooth: bluetooth {
+ compatible = "qcom,wcn6750-bt";
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddbtcmx-supply = <&vreg_pmu_btcmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p7-supply = <&vreg_pmu_rfa_1p7>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ max-speed = <3200000>;
+ };
+};
+
&usb_1 {
status = "okay";
};
@@ -919,7 +1197,7 @@
&wifi {
memory-region = <&wlan_fw_mem>;
- qcom,ath11k-calibration-variant = "Qualcomm_rb3gen2";
+ qcom,calibration-variant = "Qualcomm_rb3gen2";
status = "okay";
};
@@ -986,3 +1264,8 @@
bias-pull-up;
};
};
+
+&lpass_audiocc {
+ compatible = "qcom,qcm6490-lpassaudiocc";
+ /delete-property/ power-domains;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs8300-pmics.dtsi b/arch/arm64/boot/dts/qcom/qcs8300-pmics.dtsi
new file mode 100644
index 000000000000..a94b0bfa98dc
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs8300-pmics.dtsi
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+ pmm8620au_0: pmic@0 {
+ compatible = "qcom,pmm8654au", "qcom,spmi-pmic";
+ reg = <0x0 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmm8620au_0_rtc: rtc@6100 {
+ compatible = "qcom,pmk8350-rtc";
+ reg = <0x6100>, <0x6200>;
+ reg-names = "rtc", "alarm";
+ interrupts = <0x0 0x62 0x1 IRQ_TYPE_EDGE_RISING>;
+ allow-set-time;
+ };
+
+ pmm8620au_0_gpios: gpio@8800 {
+ compatible = "qcom,pmm8654au-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pmm8620au_0_gpios 0 0 12>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ pmm8650au_1: pmic@2 {
+ compatible = "qcom,pmm8654au", "qcom,spmi-pmic";
+ reg = <0x2 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmm8650au_1_gpios: gpio@8800 {
+ compatible = "qcom,pmm8654au-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pmm8650au_1_gpios 0 0 12>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs8300-ride.dts b/arch/arm64/boot/dts/qcom/qcs8300-ride.dts
index b5c9f89b3435..3ff8f398cad3 100644
--- a/arch/arm64/boot/dts/qcom/qcs8300-ride.dts
+++ b/arch/arm64/boot/dts/qcom/qcs8300-ride.dts
@@ -9,6 +9,7 @@
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include "qcs8300.dtsi"
+#include "qcs8300-pmics.dtsi"
/ {
model = "Qualcomm Technologies, Inc. QCS8300 Ride";
compatible = "qcom,qcs8300-ride", "qcom,qcs8300";
@@ -21,6 +22,16 @@
chosen {
stdout-path = "serial0:115200n8";
};
+
+ regulator-usb2-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "USB2_VBUS";
+ gpio = <&pmm8650au_1_gpios 7 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&usb2_en>;
+ pinctrl-names = "default";
+ enable-active-high;
+ regulator-always-on;
+ };
};
&apps_rsc {
@@ -257,7 +268,6 @@
mtl_tx_setup: tx-queues-config {
snps,tx-queues-to-use = <4>;
- snps,tx-sched-sp;
queue0 {
snps,dcb-algorithm;
@@ -285,6 +295,15 @@
};
};
+&pmm8650au_1_gpios {
+ usb2_en: usb2-en-state {
+ pins = "gpio7";
+ function = "normal";
+ output-enable;
+ power-source = <0>;
+ };
+};
+
&qupv3_id_0 {
status = "okay";
};
@@ -354,6 +373,14 @@
status = "okay";
};
+&usb_2_hsphy {
+ vdda-pll-supply = <&vreg_l7a>;
+ vdda18-supply = <&vreg_l7c>;
+ vdda33-supply = <&vreg_l9a>;
+
+ status = "okay";
+};
+
&usb_qmpphy {
vdda-phy-supply = <&vreg_l7a>;
vdda-pll-supply = <&vreg_l5a>;
@@ -368,3 +395,11 @@
&usb_1_dwc3 {
dr_mode = "peripheral";
};
+
+&usb_2 {
+ status = "okay";
+};
+
+&usb_2_dwc3 {
+ dr_mode = "host";
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs8300.dtsi b/arch/arm64/boot/dts/qcom/qcs8300.dtsi
index 4a057f7c0d9f..009f9658a4fa 100644
--- a/arch/arm64/boot/dts/qcom/qcs8300.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs8300.dtsi
@@ -9,6 +9,7 @@
#include <dt-bindings/clock/qcom,sa8775p-dispcc.h>
#include <dt-bindings/clock/qcom,sa8775p-gpucc.h>
#include <dt-bindings/clock/qcom,sa8775p-videocc.h>
+#include <dt-bindings/dma/qcom-gpi.h>
#include <dt-bindings/firmware/qcom,scm.h>
#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interconnect/qcom,qcs8300-rpmh.h>
@@ -51,6 +52,7 @@
power-domain-names = "psci";
capacity-dmips-mhz = <1946>;
dynamic-power-coefficient = <472>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
l2_0: l2-cache {
compatible = "cache";
@@ -70,6 +72,7 @@
power-domain-names = "psci";
capacity-dmips-mhz = <1946>;
dynamic-power-coefficient = <472>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
l2_1: l2-cache {
compatible = "cache";
@@ -89,6 +92,7 @@
power-domain-names = "psci";
capacity-dmips-mhz = <1946>;
dynamic-power-coefficient = <507>;
+ qcom,freq-domain = <&cpufreq_hw 2>;
l2_2: l2-cache {
compatible = "cache";
@@ -108,6 +112,7 @@
power-domain-names = "psci";
capacity-dmips-mhz = <1946>;
dynamic-power-coefficient = <507>;
+ qcom,freq-domain = <&cpufreq_hw 2>;
l2_3: l2-cache {
compatible = "cache";
@@ -127,6 +132,7 @@
power-domain-names = "psci";
capacity-dmips-mhz = <1024>;
dynamic-power-coefficient = <100>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
l2_4: l2-cache {
compatible = "cache";
@@ -146,6 +152,7 @@
power-domain-names = "psci";
capacity-dmips-mhz = <1024>;
dynamic-power-coefficient = <100>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
l2_5: l2-cache {
compatible = "cache";
@@ -165,6 +172,7 @@
power-domain-names = "psci";
capacity-dmips-mhz = <1024>;
dynamic-power-coefficient = <100>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
l2_6: l2-cache {
compatible = "cache";
@@ -184,6 +192,7 @@
power-domain-names = "psci";
capacity-dmips-mhz = <1024>;
dynamic-power-coefficient = <100>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
l2_7: l2-cache {
compatible = "cache";
@@ -351,6 +360,15 @@
qcom,bcm-voters = <&apps_bcm_voter>;
};
+ qup_opp_table: opp-table-qup {
+ compatible = "operating-points-v2";
+
+ opp-120000000 {
+ opp-hz = /bits/ 64 <120000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+ };
+
pmu-a55 {
compatible = "arm,cortex-a55-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
@@ -627,6 +645,29 @@
#size-cells = <1>;
};
+ gpi_dma0: dma-controller@900000 {
+ compatible = "qcom,qcs8300-gpi-dma", "qcom,sm6350-gpi-dma";
+ reg = <0x0 0x900000 0x0 0x60000>;
+ #dma-cells = <3>;
+ interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x416 0x0>;
+ dma-channels = <12>;
+ dma-channel-mask = <0xfff>;
+ dma-coherent;
+ status = "disabled";
+ };
+
qupv3_id_0: geniqup@9c0000 {
compatible = "qcom,geni-se-qup";
reg = <0x0 0x9c0000 0x0 0x2000>;
@@ -637,14 +678,527 @@
"s-ahb";
#address-cells = <2>;
#size-cells = <2>;
+ iommus = <&apps_smmu 0x403 0x0>;
+ dma-coherent;
status = "disabled";
+ i2c0: i2c@980000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x980000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c0_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma0 0 0 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 0 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi0: spi@980000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x980000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi0_data_clk>, <&qup_spi0_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma0 0 0 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 0 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart0: serial@980000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0x980000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart0_cts>, <&qup_uart0_rts>,
+ <&qup_uart0_tx>, <&qup_uart0_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@984000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x984000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c1_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma0 0 1 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 1 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi1: spi@984000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x984000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi1_data_clk>, <&qup_spi1_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma0 0 1 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 1 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart1: serial@984000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0x984000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart1_cts>, <&qup_uart1_rts>,
+ <&qup_uart1_tx>, <&qup_uart1_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@988000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x988000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c2_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 529 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma0 0 2 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 2 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi2: spi@988000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x988000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi2_data_clk>, <&qup_spi2_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 529 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma0 0 2 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 2 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart2: serial@988000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0x988000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart2_cts>, <&qup_uart2_rts>,
+ <&qup_uart2_tx>, <&qup_uart2_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 529 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@98c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x98c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c3_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 530 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma0 0 3 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 3 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi3: spi@98c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x98c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi3_data_clk>, <&qup_spi3_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 530 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma0 0 3 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 3 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart3: serial@98c000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0x98c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart3_cts>, <&qup_uart3_rts>,
+ <&qup_uart3_tx>, <&qup_uart3_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 530 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@990000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x990000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c4_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 531 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma0 0 4 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 4 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi4: spi@990000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x990000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi4_data_clk>, <&qup_spi4_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 531 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma0 0 4 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 4 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart4: serial@990000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0x990000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart4_cts>, <&qup_uart4_rts>,
+ <&qup_uart4_tx>, <&qup_uart4_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 531 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@994000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x994000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c5_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma0 0 5 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 5 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi5: spi@994000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x994000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi5_data_clk>, <&qup_spi5_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma0 0 5 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 5 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart5: serial@994000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0x994000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart5_cts>, <&qup_uart5_rts>,
+ <&qup_uart5_tx>, <&qup_uart5_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@998000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x998000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c6_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 536 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma0 0 6 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 6 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi6: spi@998000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x998000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi6_data_clk>, <&qup_spi6_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 536 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma0 0 6 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 6 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart6: serial@998000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0x998000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart6_cts>, <&qup_uart6_rts>,
+ <&qup_uart6_tx>, <&qup_uart6_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 536 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
uart7: serial@99c000 {
compatible = "qcom,geni-debug-uart";
reg = <0x0 0x0099c000 0x0 0x4000>;
clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
clock-names = "se";
- pinctrl-0 = <&qup_uart7_default>;
+ pinctrl-0 = <&qup_uart7_tx>, <&qup_uart7_rx>;
pinctrl-names = "default";
interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
@@ -653,6 +1207,707 @@
&config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+ };
+
+ gpi_dma1: dma-controller@a00000 {
+ compatible = "qcom,qcs8300-gpi-dma", "qcom,sm6350-gpi-dma";
+ reg = <0x0 0xa00000 0x0 0x60000>;
+ #dma-cells = <3>;
+ interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 295 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 296 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x456 0x0>;
+ dma-channels = <12>;
+ dma-channel-mask = <0xfff>;
+ dma-coherent;
+ status = "disabled";
+ };
+
+ qupv3_id_1: geniqup@ac0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x0 0xac0000 0x0 0x2000>;
+ ranges;
+ clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ clock-names = "m-ahb",
+ "s-ahb";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ iommus = <&apps_smmu 0x443 0x0>;
+ dma-coherent;
+ status = "disabled";
+
+ i2c8: i2c@a80000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0xa80000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c8_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma1 0 0 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 0 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi8: spi@a80000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0xa80000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi8_data_clk>, <&qup_spi8_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma1 0 0 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 0 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart8: serial@a80000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0xa80000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart8_cts>, <&qup_uart8_rts>,
+ <&qup_uart8_tx>, <&qup_uart8_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c9: i2c@a84000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0xa84000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c9_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma1 0 1 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 1 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi9: spi@a84000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0xa84000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi9_data_clk>, <&qup_spi9_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma1 0 1 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 1 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart9: serial@a84000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0xa84000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart9_cts>, <&qup_uart9_rts>,
+ <&qup_uart9_tx>, <&qup_uart9_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c10: i2c@a88000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0xa88000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c10_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma1 0 2 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 2 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi10: spi@a88000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0xa88000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi10_data_clk>, <&qup_spi10_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma1 0 2 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 2 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart10: serial@a88000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0xa88000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart10_cts>, <&qup_uart10_rts>,
+ <&qup_uart10_tx>, <&qup_uart10_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c11: i2c@a8c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0xa8c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c11_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma1 0 3 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 3 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart11: serial@a8c000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0xa8c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart11_tx>, <&qup_uart11_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c12: i2c@a90000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0xa90000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c12_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma1 0 4 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 4 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi12: spi@a90000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0xa90000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi12_data_clk>, <&qup_spi12_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma1 0 4 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 4 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart12: serial@a90000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0xa90000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart12_cts>, <&qup_uart12_rts>,
+ <&qup_uart12_tx>, <&qup_uart12_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c13: i2c@a94000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0xa94000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c13_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma1 0 5 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 5 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi13: spi@a94000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0xa94000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi13_data_clk>, <&qup_spi13_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma1 0 5 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 5 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart13: serial@a94000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0xa94000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart13_cts>, <&qup_uart13_rts>,
+ <&qup_uart13_tx>, <&qup_uart13_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c14: i2c@a98000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0xa98000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c14_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 835 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma1 0 6 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 6 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi14: spi@a98000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0xa98000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi14_data_clk>, <&qup_spi14_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 835 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma1 0 6 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 6 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart14: serial@a98000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0xa98000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart14_cts>, <&qup_uart14_rts>,
+ <&qup_uart14_tx>, <&qup_uart14_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 835 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c15: i2c@a9c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0xa9c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c15_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma1 0 7 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 7 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi15: spi@a9c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0xa9c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi15_data_clk>, <&qup_spi15_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma1 0 7 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 7 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart15: serial@a9c000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0xa9c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart15_cts>, <&qup_uart15_rts>,
+ <&qup_uart15_tx>, <&qup_uart15_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+ };
+
+ gpi_dma3: dma-controller@b00000 {
+ compatible = "qcom,qcs8300-gpi-dma", "qcom,sm6350-gpi-dma";
+ reg = <0x0 0xb00000 0x0 0x60000>;
+ #dma-cells = <3>;
+ interrupts = <GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 527 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 528 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x56 0x0>;
+ dma-channels = <4>;
+ dma-channel-mask = <0xf>;
+ dma-coherent;
+ status = "disabled";
+ };
+
+ qupv3_id_3: geniqup@bc0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x0 0xbc0000 0x0 0x2000>;
+ ranges;
+ clocks = <&gcc GCC_QUPV3_WRAP_3_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_3_S_AHB_CLK>;
+ clock-names = "m-ahb",
+ "s-ahb";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ iommus = <&apps_smmu 0x43 0x0>;
+ dma-coherent;
+ status = "disabled";
+
+ i2c16: i2c@b80000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0xb80000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP3_S0_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c16_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 830 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_3 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_3 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_3 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_3 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma3 0 0 QCOM_GPI_I2C>,
+ <&gpi_dma3 1 0 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi16: spi@b80000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0xb80000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP3_S0_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi16_data_clk>, <&qup_spi16_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 830 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_3 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_3 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_3 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma3 0 0 QCOM_GPI_SPI>,
+ <&gpi_dma3 1 0 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart16: serial@b80000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0xb80000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP3_S0_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart16_cts>, <&qup_uart16_rts>,
+ <&qup_uart16_tx>, <&qup_uart16_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 830 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_3 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_3 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_3 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
status = "disabled";
};
};
@@ -798,18 +2053,6 @@
<&apps_smmu 0x481 0x00>;
};
- crypto: crypto@1dfa000 {
- compatible = "qcom,qcs8300-qce", "qcom,qce";
- reg = <0x0 0x01dfa000 0x0 0x6000>;
- dmas = <&cryptobam 4>, <&cryptobam 5>;
- dma-names = "rx", "tx";
- iommus = <&apps_smmu 0x480 0x00>,
- <&apps_smmu 0x481 0x00>;
- interconnects = <&aggre2_noc MASTER_CRYPTO_CORE0 QCOM_ICC_TAG_ALWAYS
- &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
- interconnect-names = "memory";
- };
-
ice: crypto@1d88000 {
compatible = "qcom,qcs8300-inline-crypto-engine",
"qcom,inline-crypto-engine";
@@ -2674,6 +3917,45 @@
#power-domain-cells = <1>;
};
+ adreno_smmu: iommu@3da0000 {
+ compatible = "qcom,qcs8300-smmu-500", "qcom,adreno-smmu",
+ "qcom,smmu-500", "arm,mmu-500";
+ reg = <0x0 0x3da0000 0x0 0x20000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <2>;
+
+ interrupts = <GIC_SPI 672 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 678 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 679 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 680 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 681 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 682 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 683 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 684 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 686 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 687 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&gcc GCC_GPU_SNOC_DVM_GFX_CLK>,
+ <&gpucc GPU_CC_AHB_CLK>,
+ <&gpucc GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK>,
+ <&gpucc GPU_CC_CX_GMU_CLK>,
+ <&gpucc GPU_CC_HUB_CX_INT_CLK>,
+ <&gpucc GPU_CC_HUB_AON_CLK>;
+
+ clock-names = "gcc_gpu_memnoc_gfx_clk",
+ "gcc_gpu_snoc_dvm_gfx_clk",
+ "gpu_cc_ahb_clk",
+ "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ "gpu_cc_cx_gmu_clk",
+ "gpu_cc_hub_cx_int_clk",
+ "gpu_cc_hub_aon_clk";
+ power-domains = <&gpucc GPU_CC_CX_GDSC>;
+ dma-coherent;
+ };
+
pmu@9091000 {
compatible = "qcom,qcs8300-llcc-bwmon", "qcom,sc7280-llcc-bwmon";
reg = <0x0 0x9091000 0x0 0x1000>;
@@ -3027,6 +4309,33 @@
#clock-cells = <0>;
};
+ sram@c3f0000 {
+ compatible = "qcom,rpmh-stats";
+ reg = <0x0 0x0c3f0000 0x0 0x400>;
+ };
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x0 0x0c440000 0x0 0x1100>,
+ <0x0 0x0c600000 0x0 0x2000000>,
+ <0x0 0x0e600000 0x0 0x100000>,
+ <0x0 0x0e700000 0x0 0xa0000>,
+ <0x0 0x0c40a000 0x0 0x26000>;
+ reg-names = "core",
+ "chnls",
+ "obsrvr",
+ "intr",
+ "cnfg";
+ qcom,channel = <0>;
+ qcom,ee = <0>;
+ interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "periph_irq";
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+
tlmm: pinctrl@f100000 {
compatible = "qcom,qcs8300-tlmm";
reg = <0x0 0x0f100000 0x0 0x300000>;
@@ -3038,11 +4347,630 @@
#interrupt-cells = <2>;
wakeup-parent = <&pdc>;
- qup_uart7_default: qup-uart7-state {
- /* TX, RX */
- pins = "gpio43", "gpio44";
+ qup_i2c0_data_clk: qup-i2c0-data-clk-state {
+ pins = "gpio17", "gpio18";
+ function = "qup0_se0";
+ };
+
+ qup_i2c1_data_clk: qup-i2c1-data-clk-state {
+ pins = "gpio19", "gpio20";
+ function = "qup0_se1";
+ };
+
+ qup_i2c2_data_clk: qup-i2c2-data-clk-state {
+ pins = "gpio33", "gpio34";
+ function = "qup0_se2";
+ };
+
+ qup_i2c3_data_clk: qup-i2c3-data-clk-state {
+ pins = "gpio25", "gpio26";
+ function = "qup0_se3";
+ };
+
+ qup_i2c4_data_clk: qup-i2c4-data-clk-state {
+ pins = "gpio29", "gpio30";
+ function = "qup0_se4";
+ };
+
+ qup_i2c5_data_clk: qup-i2c5-data-clk-state {
+ pins = "gpio21", "gpio22";
+ function = "qup0_se5";
+ };
+
+ qup_i2c6_data_clk: qup-i2c6-data-clk-state {
+ pins = "gpio80", "gpio81";
+ function = "qup0_se6";
+ };
+
+ qup_i2c8_data_clk: qup-i2c8-data-clk-state {
+ pins = "gpio37", "gpio38";
+ function = "qup1_se0";
+ };
+
+ qup_i2c9_data_clk: qup-i2c9-data-clk-state {
+ pins = "gpio39", "gpio40";
+ function = "qup1_se1";
+ };
+
+ qup_i2c10_data_clk: qup-i2c10-data-clk-state {
+ pins = "gpio84", "gpio85";
+ function = "qup1_se2";
+ };
+
+ qup_i2c11_data_clk: qup-i2c11-data-clk-state {
+ pins = "gpio41", "gpio42";
+ function = "qup1_se3";
+ };
+
+ qup_i2c12_data_clk: qup-i2c12-data-clk-state {
+ pins = "gpio45", "gpio46";
+ function = "qup1_se4";
+ };
+
+ qup_i2c13_data_clk: qup-i2c13-data-clk-state {
+ pins = "gpio49", "gpio50";
+ function = "qup1_se5";
+ };
+
+ qup_i2c14_data_clk: qup-i2c14-data-clk-state {
+ pins = "gpio89", "gpio90";
+ function = "qup1_se6";
+ };
+
+ qup_i2c15_data_clk: qup-i2c15-data-clk-state {
+ pins = "gpio91", "gpio92";
+ function = "qup1_se7";
+ };
+
+ qup_i2c16_data_clk: qup-i2c16-data-clk-state {
+ pins = "gpio10", "gpio11";
+ function = "qup2_se0";
+ };
+
+ qup_spi0_data_clk: qup-spi0-data-clk-state {
+ pins = "gpio17", "gpio18", "gpio19";
+ function = "qup0_se0";
+ };
+
+ qup_spi0_cs: qup-spi0-cs-state {
+ pins = "gpio20";
+ function = "qup0_se0";
+ };
+
+ qup_spi0_cs_gpio: qup-spi0-cs-gpio-state {
+ pins = "gpio20";
+ function = "gpio";
+ };
+
+ qup_spi1_data_clk: qup-spi1-data-clk-state {
+ pins = "gpio19", "gpio20", "gpio17";
+ function = "qup0_se1";
+ };
+
+ qup_spi1_cs: qup-spi1-cs-state {
+ pins = "gpio18";
+ function = "qup0_se1";
+ };
+
+ qup_spi1_cs_gpio: qup-spi1-cs-gpio-state {
+ pins = "gpio18";
+ function = "gpio";
+ };
+
+ qup_spi2_data_clk: qup-spi2-data-clk-state {
+ pins = "gpio33", "gpio34", "gpio35";
+ function = "qup0_se2";
+ };
+
+ qup_spi2_cs: qup-spi2-cs-state {
+ pins = "gpio36";
+ function = "qup0_se2";
+ };
+
+ qup_spi2_cs_gpio: qup-spi2-cs-gpio-state {
+ pins = "gpio36";
+ function = "gpio";
+ };
+
+ qup_spi3_data_clk: qup-spi3-data-clk-state {
+ pins = "gpio25", "gpio26", "gpio27";
+ function = "qup0_se3";
+ };
+
+ qup_spi3_cs: qup-spi3-cs-state {
+ pins = "gpio28";
+ function = "qup0_se3";
+ };
+
+ qup_spi3_cs_gpio: qup-spi3-cs-gpio-state {
+ pins = "gpio28";
+ function = "gpio";
+ };
+
+ qup_spi4_data_clk: qup-spi4-data-clk-state {
+ pins = "gpio29", "gpio30", "gpio31";
+ function = "qup0_se4";
+ };
+
+ qup_spi4_cs: qup-spi4-cs-state {
+ pins = "gpio32";
+ function = "qup0_se4";
+ };
+
+ qup_spi4_cs_gpio: qup-spi4-cs-gpio-state {
+ pins = "gpio32";
+ function = "gpio";
+ };
+
+ qup_spi5_data_clk: qup-spi5-data-clk-state {
+ pins = "gpio21", "gpio22", "gpio23";
+ function = "qup0_se5";
+ };
+
+ qup_spi5_cs: qup-spi5-cs-state {
+ pins = "gpio24";
+ function = "qup0_se5";
+ };
+
+ qup_spi5_cs_gpio: qup-spi5-cs-gpio-state {
+ pins = "gpio24";
+ function = "gpio";
+ };
+
+ qup_spi6_data_clk: qup-spi6-data-clk-state {
+ pins = "gpio80", "gpio81", "gpio82";
+ function = "qup0_se6";
+ };
+
+ qup_spi6_cs: qup-spi6-cs-state {
+ pins = "gpio83";
+ function = "qup0_se6";
+ };
+
+ qup_spi6_cs_gpio: qup-spi6-cs-gpio-state {
+ pins = "gpio83";
+ function = "gpio";
+ };
+
+ qup_spi8_data_clk: qup-spi8-data-clk-state {
+ pins = "gpio37", "gpio38", "gpio39";
+ function = "qup1_se0";
+ };
+
+ qup_spi8_cs: qup-spi8-cs-state {
+ pins = "gpio40";
+ function = "qup1_se0";
+ };
+
+ qup_spi8_cs_gpio: qup-spi8-cs-gpio-state {
+ pins = "gpio40";
+ function = "gpio";
+ };
+
+ qup_spi9_data_clk: qup-spi9-data-clk-state {
+ pins = "gpio39", "gpio40", "gpio37";
+ function = "qup1_se1";
+ };
+
+ qup_spi9_cs: qup-spi9-cs-state {
+ pins = "gpio38";
+ function = "qup1_se1";
+ };
+
+ qup_spi9_cs_gpio: qup-spi9-cs-gpio-state {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ qup_spi10_data_clk: qup-spi10-data-clk-state {
+ pins = "gpio84", "gpio85", "gpio86";
+ function = "qup1_se2";
+ };
+
+ qup_spi10_cs: qup-spi10-cs-state {
+ pins = "gpio87";
+ function = "qup1_se2";
+ };
+
+ qup_spi10_cs_gpio: qup-spi10-cs-gpio-state {
+ pins = "gpio87";
+ function = "gpio";
+ };
+
+ qup_spi12_data_clk: qup-spi12-data-clk-state {
+ pins = "gpio45", "gpio46", "gpio47";
+ function = "qup1_se4";
+ };
+
+ qup_spi12_cs: qup-spi12-cs-state {
+ pins = "gpio48";
+ function = "qup1_se4";
+ };
+
+ qup_spi12_cs_gpio: qup-spi12-cs-gpio-state {
+ pins = "gpio48";
+ function = "gpio";
+ };
+
+ qup_spi13_data_clk: qup-spi13-data-clk-state {
+ pins = "gpio49", "gpio50", "gpio51";
+ function = "qup1_se5";
+ };
+
+ qup_spi13_cs: qup-spi13-cs-state {
+ pins = "gpio52";
+ function = "qup1_se5";
+ };
+
+ qup_spi13_cs_gpio: qup-spi13-cs-gpio-state {
+ pins = "gpio52";
+ function = "gpio";
+ };
+
+ qup_spi14_data_clk: qup-spi14-data-clk-state {
+ pins = "gpio89", "gpio90", "gpio91";
+ function = "qup1_se6";
+ };
+
+ qup_spi14_cs: qup-spi14-cs-state {
+ pins = "gpio92";
+ function = "qup1_se6";
+ };
+
+ qup_spi14_cs_gpio: qup-spi14-cs-gpio-state {
+ pins = "gpio92";
+ function = "gpio";
+ };
+
+ qup_spi15_data_clk: qup-spi15-data-clk-state {
+ pins = "gpio91", "gpio92", "gpio89";
+ function = "qup1_se7";
+ };
+
+ qup_spi15_cs: qup-spi15-cs-state {
+ pins = "gpio90";
+ function = "qup1_se7";
+ };
+
+ qup_spi15_cs_gpio: qup-spi15-cs-gpio-state {
+ pins = "gpio90";
+ function = "gpio";
+ };
+
+ qup_spi16_data_clk: qup-spi16-data-clk-state {
+ pins = "gpio10", "gpio11", "gpio12";
+ function = "qup2_se0";
+ };
+
+ qup_spi16_cs: qup-spi16-cs-state {
+ pins = "gpio13";
+ function = "qup2_se0";
+ };
+
+ qup_spi16_cs_gpio: qup-spi16-cs-gpio-state {
+ pins = "gpio13";
+ function = "gpio";
+ };
+
+ qup_uart0_cts: qup-uart0-cts-state {
+ pins = "gpio17";
+ function = "qup0_se0";
+ };
+
+ qup_uart0_rts: qup-uart0-rts-state {
+ pins = "gpio18";
+ function = "qup0_se0";
+ };
+
+ qup_uart0_tx: qup-uart0-tx-state {
+ pins = "gpio19";
+ function = "qup0_se0";
+ };
+
+ qup_uart0_rx: qup-uart0-rx-state {
+ pins = "gpio20";
+ function = "qup0_se0";
+ };
+
+ qup_uart1_cts: qup-uart1-cts-state {
+ pins = "gpio19";
+ function = "qup0_se1";
+ };
+
+ qup_uart1_rts: qup-uart1-rts-state {
+ pins = "gpio20";
+ function = "qup0_se1";
+ };
+
+ qup_uart1_tx: qup-uart1-tx-state {
+ pins = "gpio17";
+ function = "qup0_se1";
+ };
+
+ qup_uart1_rx: qup-uart1-rx-state {
+ pins = "gpio18";
+ function = "qup0_se1";
+ };
+
+ qup_uart2_cts: qup-uart2-cts-state {
+ pins = "gpio33";
+ function = "qup0_se2";
+ };
+
+ qup_uart2_rts: qup-uart2-rts-state {
+ pins = "gpio34";
+ function = "qup0_se2";
+ };
+
+ qup_uart2_tx: qup-uart2-tx-state {
+ pins = "gpio35";
+ function = "qup0_se2";
+ };
+
+ qup_uart2_rx: qup-uart2-rx-state {
+ pins = "gpio36";
+ function = "qup0_se2";
+ };
+
+ qup_uart3_cts: qup-uart3-cts-state {
+ pins = "gpio25";
+ function = "qup0_se3";
+ };
+
+ qup_uart3_rts: qup-uart3-rts-state {
+ pins = "gpio26";
+ function = "qup0_se3";
+ };
+
+ qup_uart3_tx: qup-uart3-tx-state {
+ pins = "gpio27";
+ function = "qup0_se3";
+ };
+
+ qup_uart3_rx: qup-uart3-rx-state {
+ pins = "gpio28";
+ function = "qup0_se3";
+ };
+
+ qup_uart4_cts: qup-uart4-cts-state {
+ pins = "gpio29";
+ function = "qup0_se4";
+ };
+
+ qup_uart4_rts: qup-uart4-rts-state {
+ pins = "gpio30";
+ function = "qup0_se4";
+ };
+
+ qup_uart4_tx: qup-uart4-tx-state {
+ pins = "gpio31";
+ function = "qup0_se4";
+ };
+
+ qup_uart4_rx: qup-uart4-rx-state {
+ pins = "gpio32";
+ function = "qup0_se4";
+ };
+
+ qup_uart5_cts: qup-uart5-cts-state {
+ pins = "gpio21";
+ function = "qup0_se5";
+ };
+
+ qup_uart5_rts: qup-uart5-rts-state {
+ pins = "gpio22";
+ function = "qup0_se5";
+ };
+
+ qup_uart5_tx: qup-uart5-tx-state {
+ pins = "gpio23";
+ function = "qup0_se5";
+ };
+
+ qup_uart5_rx: qup-uart5-rx-state {
+ pins = "gpio23";
+ function = "qup0_se5";
+ };
+
+ qup_uart6_cts: qup-uart6-cts-state {
+ pins = "gpio80";
+ function = "qup0_se6";
+ };
+
+ qup_uart6_rts: qup-uart6-rts-state {
+ pins = "gpio81";
+ function = "qup0_se6";
+ };
+
+ qup_uart6_tx: qup-uart6-tx-state {
+ pins = "gpio82";
+ function = "qup0_se6";
+ };
+
+ qup_uart6_rx: qup-uart6-rx-state {
+ pins = "gpio83";
+ function = "qup0_se6";
+ };
+
+ qup_uart7_tx: qup-uart7-tx-state {
+ pins = "gpio43";
+ function = "qup0_se7";
+ };
+
+ qup_uart7_rx: qup-uart7-rx-state {
+ pins = "gpio44";
function = "qup0_se7";
};
+
+ qup_uart8_cts: qup-uart8-cts-state {
+ pins = "gpio37";
+ function = "qup1_se0";
+ };
+
+ qup_uart8_rts: qup-uart8-rts-state {
+ pins = "gpio38";
+ function = "qup1_se0";
+ };
+
+ qup_uart8_tx: qup-uart8-tx-state {
+ pins = "gpio39";
+ function = "qup1_se0";
+ };
+
+ qup_uart8_rx: qup-uart8-rx-state {
+ pins = "gpio40";
+ function = "qup1_se0";
+ };
+
+ qup_uart9_cts: qup-uart9-cts-state {
+ pins = "gpio39";
+ function = "qup1_se1";
+ };
+
+ qup_uart9_rts: qup-uart9-rts-state {
+ pins = "gpio40";
+ function = "qup1_se1";
+ };
+
+ qup_uart9_tx: qup-uart9-tx-state {
+ pins = "gpio37";
+ function = "qup1_se1";
+ };
+
+ qup_uart9_rx: qup-uart9-rx-state {
+ pins = "gpio38";
+ function = "qup1_se1";
+ };
+
+ qup_uart10_cts: qup-uart10-cts-state {
+ pins = "gpio84";
+ function = "qup1_se2";
+ };
+
+ qup_uart10_rts: qup-uart10-rts-state {
+ pins = "gpio84";
+ function = "qup1_se2";
+ };
+
+ qup_uart10_tx: qup-uart10-tx-state {
+ pins = "gpio85";
+ function = "qup1_se2";
+ };
+
+ qup_uart10_rx: qup-uart10-rx-state {
+ pins = "gpio87";
+ function = "qup1_se2";
+ };
+
+ qup_uart11_tx: qup-uart11-tx-state {
+ pins = "gpio41";
+ function = "qup1_se3";
+ };
+
+ qup_uart11_rx: qup-uart11-rx-state {
+ pins = "gpio42";
+ function = "qup1_se3";
+ };
+
+ qup_uart12_cts: qup-uart12-cts-state {
+ pins = "gpio45";
+ function = "qup1_se4";
+ };
+
+ qup_uart12_rts: qup-uart12-rts-state {
+ pins = "gpio46";
+ function = "qup1_se4";
+ };
+
+ qup_uart12_tx: qup-uart12-tx-state {
+ pins = "gpio47";
+ function = "qup1_se4";
+ };
+
+ qup_uart12_rx: qup-uart12-rx-state {
+ pins = "gpio48";
+ function = "qup1_se4";
+ };
+
+ qup_uart13_cts: qup-uart13-cts-state {
+ pins = "gpio49";
+ function = "qup1_se5";
+ };
+
+ qup_uart13_rts: qup-uart13-rts-state {
+ pins = "gpio50";
+ function = "qup1_se5";
+ };
+
+ qup_uart13_tx: qup-uart13-tx-state {
+ pins = "gpio51";
+ function = "qup1_se5";
+ };
+
+ qup_uart13_rx: qup-uart13-rx-state {
+ pins = "gpio52";
+ function = "qup1_se5";
+ };
+
+ qup_uart14_cts: qup-uart14-cts-state {
+ pins = "gpio89";
+ function = "qup1_se6";
+ };
+
+ qup_uart14_rts: qup-uart14-rts-state {
+ pins = "gpio90";
+ function = "qup1_se6";
+ };
+
+ qup_uart14_tx: qup-uart14-tx-state {
+ pins = "gpio91";
+ function = "qup1_se6";
+ };
+
+ qup_uart14_rx: qup-uart14-rx-state {
+ pins = "gpio92";
+ function = "qup1_se6";
+ };
+
+ qup_uart15_cts: qup-uart15-cts-state {
+ pins = "gpio91";
+ function = "qup1_se7";
+ };
+
+ qup_uart15_rts: qup-uart15-rts-state {
+ pins = "gpio92";
+ function = "qup1_se7";
+ };
+
+ qup_uart15_tx: qup-uart15-tx-state {
+ pins = "gpio89";
+ function = "qup1_se7";
+ };
+
+ qup_uart15_rx: qup-uart15-rx-state {
+ pins = "gpio90";
+ function = "qup1_se7";
+ };
+
+ qup_uart16_cts: qup-uart16-cts-state {
+ pins = "gpio10";
+ function = "qup2_se0";
+ };
+
+ qup_uart16_rts: qup-uart16-rts-state {
+ pins = "gpio11";
+ function = "qup2_se0";
+ };
+
+ qup_uart16_tx: qup-uart16-tx-state {
+ pins = "gpio12";
+ function = "qup2_se0";
+ };
+
+ qup_uart16_rx: qup-uart16-rx-state {
+ pins = "gpio13";
+ function = "qup2_se0";
+ };
};
sram: sram@146d8000 {
@@ -3199,6 +5127,81 @@
<GIC_SPI 895 IRQ_TYPE_LEVEL_HIGH>;
};
+ pcie_smmu: iommu@15200000 {
+ compatible = "qcom,qcs8300-smmu-500", "qcom,smmu-500", "arm,mmu-500";
+ reg = <0x0 0x15200000 0x0 0x80000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <2>;
+ dma-coherent;
+
+ interrupts = <GIC_SPI 920 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 921 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 925 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 926 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 927 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 928 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 950 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 951 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 952 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 953 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 954 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 955 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 956 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 957 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 958 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 885 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 886 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 887 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 888 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 820 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 822 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 823 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 452 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 840 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 841 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 842 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 843 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 844 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 845 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 846 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 847 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 848 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 849 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 802 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 803 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 804 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 805 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 806 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 807 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 808 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 809 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 810 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 811 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 812 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 813 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 814 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 836 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 837 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 838 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 839 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 854 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 855 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 856 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 790 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 791 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 792 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 793 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 794 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 795 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 796 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 639 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 640 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
intc: interrupt-controller@17a00000 {
compatible = "arm,gic-v3";
reg = <0x0 0x17a00000 0x0 0x10000>,
@@ -3359,6 +5362,28 @@
};
};
+ cpufreq_hw: cpufreq@18591000 {
+ compatible = "qcom,qcs8300-cpufreq-epss", "qcom,cpufreq-epss";
+ reg = <0x0 0x18591000 0x0 0x1000>,
+ <0x0 0x18593000 0x0 0x1000>,
+ <0x0 0x18594000 0x0 0x1000>;
+ reg-names = "freq-domain0",
+ "freq-domain1",
+ "freq-domain2";
+
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dcvsh-irq-0",
+ "dcvsh-irq-1",
+ "dcvsh-irq-2";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
+ clock-names = "xo", "alternate";
+
+ #freq-domain-cells = <1>;
+ };
+
remoteproc_gpdsp: remoteproc@20c00000 {
compatible = "qcom,qcs8300-gpdsp-pas", "qcom,sa8775p-gpdsp0-pas";
reg = <0x0 0x20c00000 0x0 0x10000>;
diff --git a/arch/arm64/boot/dts/qcom/qdu1000.dtsi b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
index f973aa8f7477..846e5e5899aa 100644
--- a/arch/arm64/boot/dts/qcom/qdu1000.dtsi
+++ b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
@@ -47,7 +47,7 @@
enable-method = "psci";
power-domains = <&cpu_pd0>;
power-domain-names = "psci";
- qcom,freq-domains = <&cpufreq_hw 0>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
next-level-cache = <&l2_0>;
l2_0: l2-cache {
compatible = "cache";
@@ -70,7 +70,7 @@
enable-method = "psci";
power-domains = <&cpu_pd1>;
power-domain-names = "psci";
- qcom,freq-domains = <&cpufreq_hw 0>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
next-level-cache = <&l2_100>;
l2_100: l2-cache {
compatible = "cache";
@@ -88,7 +88,7 @@
enable-method = "psci";
power-domains = <&cpu_pd2>;
power-domain-names = "psci";
- qcom,freq-domains = <&cpufreq_hw 0>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
next-level-cache = <&l2_200>;
l2_200: l2-cache {
compatible = "cache";
@@ -106,7 +106,7 @@
enable-method = "psci";
power-domains = <&cpu_pd3>;
power-domain-names = "psci";
- qcom,freq-domains = <&cpufreq_hw 0>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
next-level-cache = <&l2_300>;
l2_300: l2-cache {
compatible = "cache";
@@ -1022,6 +1022,7 @@
iommus = <&apps_smmu 0xc0 0x0>;
snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,dis-u1-entry-quirk;
snps,dis-u2-entry-quirk;
diff --git a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
index 7a789b41c2f1..b2e0fc5501c1 100644
--- a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
+++ b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
@@ -15,6 +15,7 @@
aliases {
serial0 = &uart4;
+ serial1 = &uart3;
sdhc1 = &sdhc_1;
sdhc2 = &sdhc_2;
};
@@ -549,6 +550,66 @@
};
&tlmm {
+ uart3_default: uart3-default-state {
+ cts-pins {
+ pins = "gpio8";
+ function = "qup3";
+ drive-strength = <2>;
+ bias-bus-hold;
+ };
+
+ rts-pins {
+ pins = "gpio9";
+ function = "qup3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tx-pins {
+ pins = "gpio10";
+ function = "qup3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rx-pins {
+ pins = "gpio11";
+ function = "qup3";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ uart3_sleep: uart3-sleep-state {
+ cts-pins {
+ pins = "gpio8";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-bus-hold;
+ };
+
+ rts-pins {
+ pins = "gpio9";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ tx-pins {
+ pins = "gpio10";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ rx-pins {
+ pins = "gpio11";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
lt9611_rst_pin: lt9611-rst-state {
pins = "gpio41";
function = "gpio";
@@ -584,6 +645,28 @@
};
};
+&uart3 {
+ /delete-property/ interrupts;
+ interrupts-extended = <&intc GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ <&tlmm 11 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart3_default>;
+ pinctrl-1 = <&uart3_sleep>;
+ pinctrl-names = "default", "sleep";
+
+ status = "okay";
+
+ bluetooth {
+ compatible = "qcom,wcn3950-bt";
+
+ vddio-supply = <&pm4125_l15>;
+ vddxo-supply = <&pm4125_l13>;
+ vddrf-supply = <&pm4125_l10>;
+ vddch0-supply = <&pm4125_l22>;
+ enable-gpios = <&tlmm 87 GPIO_ACTIVE_HIGH>;
+ max-speed = <3200000>;
+ };
+};
+
/* UART connected to the Micro-USB port via a FTDI chip */
&uart4 {
compatible = "qcom,geni-debug-uart";
@@ -620,7 +703,7 @@
vdd-1.8-xo-supply = <&pm4125_l13>;
vdd-1.3-rfa-supply = <&pm4125_l10>;
vdd-3.3-ch0-supply = <&pm4125_l22>;
- qcom,ath10k-calibration-variant = "Thundercomm_RB1";
+ qcom,calibration-variant = "Thundercomm_RB1";
firmware-name = "qcm2290";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
index 52db18847803..a37860175d27 100644
--- a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
+++ b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
@@ -110,8 +110,6 @@
pinctrl-0 = <&lpi_i2s2_active>;
pinctrl-names = "default";
model = "Qualcomm-RB2-WSA8815-Speakers-DMIC0";
- audio-routing = "MM_DL1", "MultiMedia1 Playback",
- "MM_DL2", "MultiMedia2 Playback";
mm1-dai-link {
link-name = "MultiMedia1";
@@ -749,7 +747,7 @@
vdd-1.8-xo-supply = <&vreg_l16a_1p3>;
vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
vdd-3.3-ch0-supply = <&vreg_l23a_3p3>;
- qcom,ath10k-calibration-variant = "Thundercomm_RB2";
+ qcom,calibration-variant = "Thundercomm_RB2";
firmware-name = "qrb4210";
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5-vision-mezzanine.dtso b/arch/arm64/boot/dts/qcom/qrb5165-rb5-vision-mezzanine.dtso
index ae256c713a36..5fe331923dd3 100644
--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5-vision-mezzanine.dtso
+++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5-vision-mezzanine.dtso
@@ -9,17 +9,6 @@
#include <dt-bindings/clock/qcom,camcc-sm8250.h>
#include <dt-bindings/gpio/gpio.h>
-/ {
- reserved-memory {
- linux,cma {
- compatible = "shared-dma-pool";
- size = <0x0 0x8000000>;
- reusable;
- linux,cma-default;
- };
- };
-};
-
&camcc {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
index 7afa5acac3fc..33ecbc81997c 100644
--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
@@ -1008,15 +1008,21 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
+ };
+
+ dai@3 {
+ direction = <Q6ASM_DAI_RX>;
+ is-compress-dai;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA4>;
};
};
@@ -1032,6 +1038,12 @@
no-mmc;
};
+&slpi {
+ firmware-name = "qcom/sm8250/Thundercomm/RB5/slpi.mbn";
+
+ status = "okay";
+};
+
&sound {
compatible = "qcom,qrb5165-rb5-sndcard";
pinctrl-0 = <&tert_mi2s_active>;
@@ -1041,10 +1053,7 @@
"SpkrLeft IN", "WSA_SPK1 OUT",
"SpkrRight IN", "WSA_SPK2 OUT",
"VA DMIC0", "vdd-micb",
- "VA DMIC1", "vdd-micb",
- "MM_DL1", "MultiMedia1 Playback",
- "MM_DL2", "MultiMedia2 Playback",
- "MultiMedia3 Capture", "MM_UL3";
+ "VA DMIC1", "vdd-micb";
mm1-dai-link {
link-name = "MultiMedia1";
@@ -1067,6 +1076,14 @@
};
};
+ mm4-dai-link {
+ link-name = "MultiMedia4";
+
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA4>;
+ };
+ };
+
hdmi-dai-link {
link-name = "HDMI Playback";
cpu {
diff --git a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
index 9e9c7f81096b..388d5ecee949 100644
--- a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
+++ b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
@@ -326,7 +326,6 @@
phy-handle = <&rgmii_phy>;
phy-mode = "rgmii";
- max-speed = <1000>;
mdio {
compatible = "snps,dwmac-mdio";
@@ -383,12 +382,12 @@
&remoteproc_adsp {
status = "okay";
- firmware-name = "qcom/sa8155p/adsp.mdt";
+ firmware-name = "qcom/sa8155p/adsp.mbn";
};
&remoteproc_cdsp {
status = "okay";
- firmware-name = "qcom/sa8155p/cdsp.mdt";
+ firmware-name = "qcom/sa8155p/cdsp.mbn";
};
&sdhc_2 {
diff --git a/arch/arm64/boot/dts/qcom/sa8540p-ride.dts b/arch/arm64/boot/dts/qcom/sa8540p-ride.dts
index 177b9dad6ff7..44177e9b64b5 100644
--- a/arch/arm64/boot/dts/qcom/sa8540p-ride.dts
+++ b/arch/arm64/boot/dts/qcom/sa8540p-ride.dts
@@ -155,7 +155,6 @@
snps,mtl-rx-config = <&ethernet0_mtl_rx_setup>;
snps,mtl-tx-config = <&ethernet0_mtl_tx_setup>;
- max-speed = <1000>;
phy-handle = <&rgmii_phy>;
phy-mode = "rgmii-txid";
@@ -225,7 +224,6 @@
ethernet0_mtl_tx_setup: tx-queues-config {
snps,tx-queues-to-use = <1>;
- snps,tx-sched-sp;
queue0 {
snps,dcb-algorithm;
@@ -257,7 +255,6 @@
snps,mtl-rx-config = <&ethernet1_mtl_rx_setup>;
snps,mtl-tx-config = <&ethernet1_mtl_tx_setup>;
- max-speed = <1000>;
phy-mode = "rgmii-txid";
pinctrl-names = "default";
@@ -302,7 +299,6 @@
ethernet1_mtl_tx_setup: tx-queues-config {
snps,tx-queues-to-use = <1>;
- snps,tx-sched-sp;
queue0 {
snps,dcb-algorithm;
diff --git a/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
index 175f8b1e3b2d..3ae416ab66e8 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
@@ -411,7 +411,6 @@
mtl_tx_setup: tx-queues-config {
snps,tx-queues-to-use = <4>;
- snps,tx-sched-sp;
queue0 {
snps,dcb-algorithm;
@@ -480,7 +479,6 @@
mtl_tx_setup1: tx-queues-config {
snps,tx-queues-to-use = <4>;
- snps,tx-sched-sp;
queue0 {
snps,dcb-algorithm;
@@ -510,15 +508,11 @@
&i2c11 {
clock-frequency = <400000>;
- pinctrl-0 = <&qup_i2c11_default>;
- pinctrl-names = "default";
status = "okay";
};
&i2c18 {
clock-frequency = <400000>;
- pinctrl-0 = <&qup_i2c18_default>;
- pinctrl-names = "default";
status = "okay";
};
@@ -665,6 +659,53 @@
status = "okay";
};
+&qup_spi16_default {
+ drive-strength = <6>;
+ bias-disable;
+};
+
+&qup_i2c11_default {
+ drive-strength = <2>;
+ bias-pull-up;
+};
+
+&qup_i2c18_default {
+ drive-strength = <2>;
+ bias-pull-up;
+};
+
+&qup_uart12_cts {
+ bias-disable;
+};
+
+&qup_uart12_rts {
+ bias-pull-down;
+};
+
+&qup_uart12_tx {
+ bias-pull-up;
+};
+
+&qup_uart12_rx {
+ bias-pull-down;
+};
+
+&qup_uart17_cts {
+ bias-disable;
+};
+
+&qup_uart17_rts {
+ bias-pull-down;
+};
+
+&qup_uart17_tx {
+ bias-pull-up;
+};
+
+&qup_uart17_rx {
+ bias-pull-down;
+};
+
&serdes0 {
phy-supply = <&vreg_l5a>;
status = "okay";
@@ -680,8 +721,6 @@
};
&spi16 {
- pinctrl-0 = <&qup_spi16_default>;
- pinctrl-names = "default";
status = "okay";
};
@@ -714,84 +753,6 @@
};
};
- qup_uart10_default: qup-uart10-state {
- pins = "gpio46", "gpio47";
- function = "qup1_se3";
- };
-
- qup_spi16_default: qup-spi16-state {
- pins = "gpio86", "gpio87", "gpio88", "gpio89";
- function = "qup2_se2";
- drive-strength = <6>;
- bias-disable;
- };
-
- qup_i2c11_default: qup-i2c11-state {
- pins = "gpio48", "gpio49";
- function = "qup1_se4";
- drive-strength = <2>;
- bias-pull-up;
- };
-
- qup_i2c18_default: qup-i2c18-state {
- pins = "gpio95", "gpio96";
- function = "qup2_se4";
- drive-strength = <2>;
- bias-pull-up;
- };
-
- qup_uart12_default: qup-uart12-state {
- qup_uart12_cts: qup-uart12-cts-pins {
- pins = "gpio52";
- function = "qup1_se5";
- bias-disable;
- };
-
- qup_uart12_rts: qup-uart12-rts-pins {
- pins = "gpio53";
- function = "qup1_se5";
- bias-pull-down;
- };
-
- qup_uart12_tx: qup-uart12-tx-pins {
- pins = "gpio54";
- function = "qup1_se5";
- bias-pull-up;
- };
-
- qup_uart12_rx: qup-uart12-rx-pins {
- pins = "gpio55";
- function = "qup1_se5";
- bias-pull-down;
- };
- };
-
- qup_uart17_default: qup-uart17-state {
- qup_uart17_cts: qup-uart17-cts-pins {
- pins = "gpio91";
- function = "qup2_se3";
- bias-disable;
- };
-
- qup_uart17_rts: qup0-uart17-rts-pins {
- pins = "gpio92";
- function = "qup2_se3";
- bias-pull-down;
- };
-
- qup_uart17_tx: qup0-uart17-tx-pins {
- pins = "gpio93";
- function = "qup2_se3";
- bias-pull-up;
- };
-
- qup_uart17_rx: qup0-uart17-rx-pins {
- pins = "gpio94";
- function = "qup2_se3";
- bias-pull-down;
- };
- };
-
pcie0_default_state: pcie0-default-state {
perst-pins {
pins = "gpio2";
@@ -878,7 +839,7 @@
compatible = "pci17cb,1101";
reg = <0x10000 0x0 0x0 0x0 0x0>;
- qcom,ath11k-calibration-variant = "QC_SA8775P_Ride";
+ qcom,calibration-variant = "QC_SA8775P_Ride";
vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
vddaon-supply = <&vreg_pmu_aon_0p59>;
@@ -919,8 +880,6 @@
&uart10 {
compatible = "qcom,geni-debug-uart";
- pinctrl-0 = <&qup_uart10_default>;
- pinctrl-names = "default";
status = "okay";
};
@@ -937,6 +896,7 @@
bluetooth {
compatible = "qcom,wcn6855-bt";
+ firmware-name = "QCA6698/hpnv21", "QCA6698/hpbtfw21.tlv";
vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
vddaon-supply = <&vreg_pmu_aon_0p59>;
diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
index 3394ae2d1300..45f536633f64 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
@@ -471,17 +471,17 @@
cluster_0_pd: power-domain-cluster0 {
#power-domain-cells = <0>;
- power-domains = <&cluster_2_pd>;
domain-idle-states = <&cluster_sleep_gold>;
+ power-domains = <&system_pd>;
};
cluster_1_pd: power-domain-cluster1 {
#power-domain-cells = <0>;
- power-domains = <&cluster_2_pd>;
domain-idle-states = <&cluster_sleep_gold>;
+ power-domains = <&system_pd>;
};
- cluster_2_pd: power-domain-cluster2 {
+ system_pd: power-domain-system {
#power-domain-cells = <0>;
domain-idle-states = <&cluster_sleep_apss_rsc_pc>;
};
@@ -913,6 +913,8 @@
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S0_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c14_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -938,6 +940,8 @@
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S0_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi14_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -961,6 +965,8 @@
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S0_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart14_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -978,6 +984,8 @@
interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S1_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c15_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1003,6 +1011,8 @@
interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S1_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi15_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1026,6 +1036,8 @@
interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S1_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart15_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1043,6 +1055,8 @@
interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S2_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c16_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1066,6 +1080,8 @@
interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S2_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi16_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1091,6 +1107,8 @@
interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S2_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart16_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1108,6 +1126,8 @@
interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S3_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c17_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1133,6 +1153,8 @@
interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S3_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi17_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1156,6 +1178,8 @@
interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S3_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart17_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1171,6 +1195,8 @@
interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S4_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c18_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1198,6 +1224,8 @@
interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S4_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi18_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1221,6 +1249,8 @@
interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S4_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart18_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1238,6 +1268,8 @@
interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c19_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1263,6 +1295,8 @@
interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi19_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1286,6 +1320,8 @@
interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart19_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1303,6 +1339,8 @@
interrupts = <GIC_SPI 834 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S6_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c20_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1328,6 +1366,8 @@
interrupts = <GIC_SPI 834 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S6_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi20_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1351,6 +1391,8 @@
interrupts = <GIC_SPI 834 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP2_S6_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart20_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1404,6 +1446,8 @@
interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c0_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1429,6 +1473,8 @@
interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi0_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1452,6 +1498,8 @@
interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart0_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1469,6 +1517,8 @@
interrupts = <GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c1_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1494,6 +1544,8 @@
interrupts = <GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi1_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1517,6 +1569,8 @@
interrupts = <GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart1_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1534,6 +1588,8 @@
interrupts = <GIC_SPI 529 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c2_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1559,6 +1615,8 @@
interrupts = <GIC_SPI 529 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi2_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1582,6 +1640,8 @@
interrupts = <GIC_SPI 529 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart2_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1599,6 +1659,8 @@
interrupts = <GIC_SPI 530 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c3_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1624,6 +1686,8 @@
interrupts = <GIC_SPI 530 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi3_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1647,6 +1711,8 @@
interrupts = <GIC_SPI 530 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart3_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1664,6 +1730,8 @@
interrupts = <GIC_SPI 531 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c4_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1689,6 +1757,8 @@
interrupts = <GIC_SPI 531 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi4_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1712,6 +1782,8 @@
interrupts = <GIC_SPI 531 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart4_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1729,6 +1801,8 @@
interrupts = <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c5_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1754,6 +1828,8 @@
interrupts = <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi5_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1777,6 +1853,8 @@
interrupts = <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart5_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1829,6 +1907,8 @@
interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c7_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1854,6 +1934,8 @@
interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi7_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1877,6 +1959,8 @@
interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ pinctrl-0 = <&qup_uart7_default>;
+ pinctrl-names = "default";
interconnect-names = "qup-core", "qup-config";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
@@ -1895,6 +1979,8 @@
interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c8_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1920,6 +2006,8 @@
interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi8_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1943,6 +2031,8 @@
interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ pinctrl-0 = <&qup_uart8_default>;
+ pinctrl-names = "default";
interconnect-names = "qup-core", "qup-config";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
@@ -1961,6 +2051,8 @@
interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c9_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -1986,6 +2078,8 @@
interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi9_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -2009,6 +2103,8 @@
interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart9_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -2026,6 +2122,8 @@
interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c10_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -2051,6 +2149,8 @@
interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi10_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -2074,6 +2174,8 @@
interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ pinctrl-0 = <&qup_uart10_default>;
+ pinctrl-names = "default";
interconnect-names = "qup-core", "qup-config";
interconnects = <&clk_virt MASTER_QUP_CORE_1 0
&clk_virt SLAVE_QUP_CORE_1 0>,
@@ -2092,6 +2194,8 @@
interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c11_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -2117,6 +2221,8 @@
interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi11_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -2140,6 +2246,8 @@
interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+ pinctrl-0 = <&qup_uart11_default>;
+ pinctrl-names = "default";
interconnect-names = "qup-core", "qup-config";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
@@ -2158,6 +2266,8 @@
interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c12_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -2183,6 +2293,8 @@
interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi12_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -2206,6 +2318,8 @@
interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_uart12_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -2223,6 +2337,8 @@
interrupts = <GIC_SPI 836 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c13_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -2276,6 +2392,8 @@
interrupts = <GIC_SPI 831 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP3_S0_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_i2c21_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_3 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_3 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -2301,6 +2419,8 @@
interrupts = <GIC_SPI 831 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QUPV3_WRAP3_S0_CLK>;
clock-names = "se";
+ pinctrl-0 = <&qup_spi21_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_3 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_3 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -2325,6 +2445,8 @@
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP3_S0_CLK>;
interconnect-names = "qup-core", "qup-config";
+ pinctrl-0 = <&qup_uart21_default>;
+ pinctrl-names = "default";
interconnects = <&clk_virt MASTER_QUP_CORE_3 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_3 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
@@ -2413,20 +2535,40 @@
interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
qcom,ee = <0>;
+ qcom,num-ees = <4>;
+ num-channels = <20>;
qcom,controlled-remotely;
iommus = <&apps_smmu 0x480 0x00>,
<&apps_smmu 0x481 0x00>;
};
- crypto: crypto@1dfa000 {
- compatible = "qcom,sa8775p-qce", "qcom,qce";
- reg = <0x0 0x01dfa000 0x0 0x6000>;
- dmas = <&cryptobam 4>, <&cryptobam 5>;
- dma-names = "rx", "tx";
- iommus = <&apps_smmu 0x480 0x00>,
- <&apps_smmu 0x481 0x00>;
- interconnects = <&aggre2_noc MASTER_CRYPTO_CORE0 0 &mc_virt SLAVE_EBI1 0>;
- interconnect-names = "memory";
+ ctcu@4001000 {
+ compatible = "qcom,sa8775p-ctcu";
+ reg = <0x0 0x04001000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ ctcu_in0: endpoint {
+ remote-endpoint = <&etr0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ ctcu_in1: endpoint {
+ remote-endpoint = <&etr1_out>;
+ };
+ };
+ };
};
stm: stm@4002000 {
@@ -2633,6 +2775,122 @@
};
};
+ replicator@4046000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0x0 0x04046000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ qdss_rep_in: endpoint {
+ remote-endpoint = <&swao_rep_out0>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ qdss_rep_out0: endpoint {
+ remote-endpoint = <&etr_rep_in>;
+ };
+ };
+ };
+ };
+
+ tmc_etr: tmc@4048000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x0 0x04048000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ iommus = <&apps_smmu 0x04c0 0x00>;
+
+ arm,scatter-gather;
+
+ in-ports {
+ port {
+ etr0_in: endpoint {
+ remote-endpoint = <&etr_rep_out0>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ etr0_out: endpoint {
+ remote-endpoint = <&ctcu_in0>;
+ };
+ };
+ };
+ };
+
+ replicator@404e000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0x0 0x0404e000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ etr_rep_in: endpoint {
+ remote-endpoint = <&qdss_rep_out0>;
+ };
+ };
+ };
+
+ out-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ etr_rep_out0: endpoint {
+ remote-endpoint = <&etr0_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ etr_rep_out1: endpoint {
+ remote-endpoint = <&etr1_in>;
+ };
+ };
+ };
+ };
+
+ tmc_etr1: tmc@404f000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x0 0x0404f000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ iommus = <&apps_smmu 0x04a0 0x40>;
+
+ arm,scatter-gather;
+ arm,buffer-size = <0x400000>;
+
+ in-ports {
+ port {
+ etr1_in: endpoint {
+ remote-endpoint = <&etr_rep_out1>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ etr1_out: endpoint {
+ remote-endpoint = <&ctcu_in1>;
+ };
+ };
+ };
+ };
+
funnel@4b04000 {
compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
reg = <0x0 0x4b04000 0x0 0x1000>;
@@ -2708,6 +2966,14 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+
+ swao_rep_out0: endpoint {
+ remote-endpoint = <&qdss_rep_in>;
+ };
+ };
+
port@1 {
reg = <1>;
swao_rep_out1: endpoint {
@@ -3815,10 +4081,10 @@
reg-names = "mdss";
/* same path used twice */
- interconnects = <&mmss_noc MASTER_MDP0 QCOM_ICC_TAG_ACTIVE_ONLY
- &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>,
- <&mmss_noc MASTER_MDP1 QCOM_ICC_TAG_ACTIVE_ONLY
- &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ interconnects = <&mmss_noc MASTER_MDP0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&mmss_noc MASTER_MDP1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
&config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "mdp0-mem",
@@ -3848,7 +4114,7 @@
mdss0_mdp: display-controller@ae01000 {
compatible = "qcom,sa8775p-dpu";
reg = <0x0 0x0ae01000 0x0 0x8f000>,
- <0x0 0x0aeb0000 0x0 0x2008>;
+ <0x0 0x0aeb0000 0x0 0x3000>;
reg-names = "mdp", "vbif";
clocks = <&gcc GCC_DISP_HF_AXI_CLK>,
@@ -4268,6 +4534,634 @@
#interrupt-cells = <2>;
gpio-ranges = <&tlmm 0 0 149>;
wakeup-parent = <&pdc>;
+
+ qup_i2c0_default: qup-i2c0-state {
+ pins = "gpio20", "gpio21";
+ function = "qup0_se0";
+ };
+
+ qup_i2c1_default: qup-i2c1-state {
+ pins = "gpio24", "gpio25";
+ function = "qup0_se1";
+ };
+
+ qup_i2c2_default: qup-i2c2-state {
+ pins = "gpio36", "gpio37";
+ function = "qup0_se2";
+ };
+
+ qup_i2c3_default: qup-i2c3-state {
+ pins = "gpio28", "gpio29";
+ function = "qup0_se3";
+ };
+
+ qup_i2c4_default: qup-i2c4-state {
+ pins = "gpio32", "gpio33";
+ function = "qup0_se4";
+ };
+
+ qup_i2c5_default: qup-i2c5-state {
+ pins = "gpio36", "gpio37";
+ function = "qup0_se5";
+ };
+
+ qup_i2c7_default: qup-i2c7-state {
+ pins = "gpio40", "gpio41";
+ function = "qup1_se0";
+ };
+
+ qup_i2c8_default: qup-i2c8-state {
+ pins = "gpio42", "gpio43";
+ function = "qup1_se1";
+ };
+
+ qup_i2c9_default: qup-i2c9-state {
+ pins = "gpio46", "gpio47";
+ function = "qup1_se2";
+ };
+
+ qup_i2c10_default: qup-i2c10-state {
+ pins = "gpio44", "gpio45";
+ function = "qup1_se3";
+ };
+
+ qup_i2c11_default: qup-i2c11-state {
+ pins = "gpio48", "gpio49";
+ function = "qup1_se4";
+ };
+
+ qup_i2c12_default: qup-i2c12-state {
+ pins = "gpio52", "gpio53";
+ function = "qup1_se5";
+ };
+
+ qup_i2c13_default: qup-i2c13-state {
+ pins = "gpio56", "gpio57";
+ function = "qup1_se6";
+ };
+
+ qup_i2c14_default: qup-i2c14-state {
+ pins = "gpio80", "gpio81";
+ function = "qup2_se0";
+ };
+
+ qup_i2c15_default: qup-i2c15-state {
+ pins = "gpio84", "gpio85";
+ function = "qup2_se1";
+ };
+
+ qup_i2c16_default: qup-i2c16-state {
+ pins = "gpio86", "gpio87";
+ function = "qup2_se2";
+ };
+
+ qup_i2c17_default: qup-i2c17-state {
+ pins = "gpio91", "gpio92";
+ function = "qup2_se3";
+ };
+
+ qup_i2c18_default: qup-i2c18-state {
+ pins = "gpio95", "gpio96";
+ function = "qup2_se4";
+ };
+
+ qup_i2c19_default: qup-i2c19-state {
+ pins = "gpio99", "gpio100";
+ function = "qup2_se5";
+ };
+
+ qup_i2c20_default: qup-i2c20-state {
+ pins = "gpio97", "gpio98";
+ function = "qup2_se6";
+ };
+
+ qup_i2c21_default: qup-i2c21-state {
+ pins = "gpio13", "gpio14";
+ function = "qup3_se0";
+ };
+
+ qup_spi0_default: qup-spi0-state {
+ pins = "gpio20", "gpio21", "gpio22", "gpio23";
+ function = "qup0_se0";
+ };
+
+ qup_spi1_default: qup-spi1-state {
+ pins = "gpio24", "gpio25", "gpio26", "gpio27";
+ function = "qup0_se1";
+ };
+
+ qup_spi2_default: qup-spi2-state {
+ pins = "gpio36", "gpio37", "gpio38", "gpio39";
+ function = "qup0_se2";
+ };
+
+ qup_spi3_default: qup-spi3-state {
+ pins = "gpio28", "gpio29", "gpio30", "gpio31";
+ function = "qup0_se3";
+ };
+
+ qup_spi4_default: qup-spi4-state {
+ pins = "gpio32", "gpio33", "gpio34", "gpio35";
+ function = "qup0_se4";
+ };
+
+ qup_spi5_default: qup-spi5-state {
+ pins = "gpio36", "gpio37", "gpio38", "gpio39";
+ function = "qup0_se5";
+ };
+
+ qup_spi7_default: qup-spi7-state {
+ pins = "gpio40", "gpio41", "gpio42", "gpio43";
+ function = "qup1_se0";
+ };
+
+ qup_spi8_default: qup-spi8-state {
+ pins = "gpio42", "gpio43", "gpio40", "gpio41";
+ function = "qup1_se1";
+ };
+
+ qup_spi9_default: qup-spi9-state {
+ pins = "gpio46", "gpio47", "gpio44", "gpio45";
+ function = "qup1_se2";
+ };
+
+ qup_spi10_default: qup-spi10-state {
+ pins = "gpio44", "gpio45", "gpio46", "gpio47";
+ function = "qup1_se3";
+ };
+
+ qup_spi11_default: qup-spi11-state {
+ pins = "gpio48", "gpio49", "gpio50", "gpio51";
+ function = "qup1_se4";
+ };
+
+ qup_spi12_default: qup-spi12-state {
+ pins = "gpio52", "gpio53", "gpio54", "gpio55";
+ function = "qup1_se5";
+ };
+
+ qup_spi14_default: qup-spi14-state {
+ pins = "gpio80", "gpio81", "gpio82", "gpio83";
+ function = "qup2_se0";
+ };
+
+ qup_spi15_default: qup-spi15-state {
+ pins = "gpio84", "gpio85", "gpio99", "gpio100";
+ function = "qup2_se1";
+ };
+
+ qup_spi16_default: qup-spi16-state {
+ pins = "gpio86", "gpio87", "gpio88", "gpio89";
+ function = "qup2_se2";
+ };
+
+ qup_spi17_default: qup-spi17-state {
+ pins = "gpio91", "gpio92", "gpio93", "gpio94";
+ function = "qup2_se3";
+ };
+
+ qup_spi18_default: qup-spi18-state {
+ pins = "gpio95", "gpio96", "gpio97", "gpio98";
+ function = "qup2_se4";
+ };
+
+ qup_spi19_default: qup-spi19-state {
+ pins = "gpio99", "gpio100", "gpio84", "gpio85";
+ function = "qup2_se5";
+ };
+
+ qup_spi20_default: qup-spi20-state {
+ pins = "gpio97", "gpio98", "gpio95", "gpio96";
+ function = "qup2_se6";
+ };
+
+ qup_spi21_default: qup-spi21-state {
+ pins = "gpio13", "gpio14", "gpio15", "gpio16";
+ function = "qup3_se0";
+ };
+
+ qup_uart0_default: qup-uart0-state {
+ qup_uart0_cts: qup-uart0-cts-pins {
+ pins = "gpio20";
+ function = "qup0_se0";
+ };
+
+ qup_uart0_rts: qup-uart0-rts-pins {
+ pins = "gpio21";
+ function = "qup0_se0";
+ };
+
+ qup_uart0_tx: qup-uart0-tx-pins {
+ pins = "gpio22";
+ function = "qup0_se0";
+ };
+
+ qup_uart0_rx: qup-uart0-rx-pins {
+ pins = "gpio23";
+ function = "qup0_se0";
+ };
+ };
+
+ qup_uart1_default: qup-uart1-state {
+ qup_uart1_cts: qup-uart1-cts-pins {
+ pins = "gpio24";
+ function = "qup0_se1";
+ };
+
+ qup_uart1_rts: qup-uart1-rts-pins {
+ pins = "gpio25";
+ function = "qup0_se1";
+ };
+
+ qup_uart1_tx: qup-uart1-tx-pins {
+ pins = "gpio26";
+ function = "qup0_se1";
+ };
+
+ qup_uart1_rx: qup-uart1-rx-pins {
+ pins = "gpio27";
+ function = "qup0_se1";
+ };
+ };
+
+ qup_uart2_default: qup-uart2-state {
+ qup_uart2_cts: qup-uart2-cts-pins {
+ pins = "gpio36";
+ function = "qup0_se2";
+ };
+
+ qup_uart2_rts: qup-uart2-rts-pins {
+ pins = "gpio37";
+ function = "qup0_se2";
+ };
+
+ qup_uart2_tx: qup-uart2-tx-pins {
+ pins = "gpio38";
+ function = "qup0_se2";
+ };
+
+ qup_uart2_rx: qup-uart2-rx-pins {
+ pins = "gpio39";
+ function = "qup0_se2";
+ };
+ };
+
+ qup_uart3_default: qup-uart3-state {
+ qup_uart3_cts: qup-uart3-cts-pins {
+ pins = "gpio28";
+ function = "qup0_se3";
+ };
+
+ qup_uart3_rts: qup-uart3-rts-pins {
+ pins = "gpio29";
+ function = "qup0_se3";
+ };
+
+ qup_uart3_tx: qup-uart3-tx-pins {
+ pins = "gpio30";
+ function = "qup0_se3";
+ };
+
+ qup_uart3_rx: qup-uart3-rx-pins {
+ pins = "gpio31";
+ function = "qup0_se3";
+ };
+ };
+
+ qup_uart4_default: qup-uart4-state {
+ qup_uart4_cts: qup-uart4-cts-pins {
+ pins = "gpio32";
+ function = "qup0_se4";
+ };
+
+ qup_uart4_rts: qup-uart4-rts-pins {
+ pins = "gpio33";
+ function = "qup0_se4";
+ };
+
+ qup_uart4_tx: qup-uart4-tx-pins {
+ pins = "gpio34";
+ function = "qup0_se4";
+ };
+
+ qup_uart4_rx: qup-uart4-rx-pins {
+ pins = "gpio35";
+ function = "qup0_se4";
+ };
+ };
+
+ qup_uart5_default: qup-uart5-state {
+ qup_uart5_cts: qup-uart5-cts-pins {
+ pins = "gpio36";
+ function = "qup0_se5";
+ };
+
+ qup_uart5_rts: qup-uart5-rts-pins {
+ pins = "gpio37";
+ function = "qup0_se5";
+ };
+
+ qup_uart5_tx: qup-uart5-tx-pins {
+ pins = "gpio38";
+ function = "qup0_se5";
+ };
+
+ qup_uart5_rx: qup-uart5-rx-pins {
+ pins = "gpio39";
+ function = "qup0_se5";
+ };
+ };
+
+ qup_uart7_default: qup-uart7-state {
+ qup_uart7_cts: qup-uart7-cts-pins {
+ pins = "gpio40";
+ function = "qup1_se0";
+ };
+
+ qup_uart7_rts: qup-uart7-rts-pins {
+ pins = "gpio41";
+ function = "qup1_se0";
+ };
+
+ qup_uart7_tx: qup-uart7-tx-pins {
+ pins = "gpio42";
+ function = "qup1_se0";
+ };
+
+ qup_uart7_rx: qup-uart7-rx-pins {
+ pins = "gpio43";
+ function = "qup1_se0";
+ };
+ };
+
+ qup_uart8_default: qup-uart8-state {
+ qup_uart8_cts: qup-uart8-cts-pins {
+ pins = "gpio42";
+ function = "qup1_se1";
+ };
+
+ qup_uart8_rts: qup-uart8-rts-pins {
+ pins = "gpio43";
+ function = "qup1_se1";
+ };
+
+ qup_uart8_tx: qup-uart8-tx-pins {
+ pins = "gpio40";
+ function = "qup1_se1";
+ };
+
+ qup_uart8_rx: qup-uart8-rx-pins {
+ pins = "gpio41";
+ function = "qup1_se1";
+ };
+ };
+
+ qup_uart9_default: qup-uart9-state {
+ qup_uart9_cts: qup-uart9-cts-pins {
+ pins = "gpio46";
+ function = "qup1_se2";
+ };
+
+ qup_uart9_rts: qup-uart9-rts-pins {
+ pins = "gpio47";
+ function = "qup1_se2";
+ };
+
+ qup_uart9_tx: qup-uart9-tx-pins {
+ pins = "gpio44";
+ function = "qup1_se2";
+ };
+
+ qup_uart9_rx: qup-uart9-rx-pins {
+ pins = "gpio45";
+ function = "qup1_se2";
+ };
+ };
+
+ qup_uart10_default: qup-uart10-state {
+ pins = "gpio46", "gpio47";
+ function = "qup1_se3";
+ };
+
+ qup_uart11_default: qup-uart11-state {
+ qup_uart11_cts: qup-uart11-cts-pins {
+ pins = "gpio48";
+ function = "qup1_se4";
+ };
+
+ qup_uart11_rts: qup-uart11-rts-pins {
+ pins = "gpio49";
+ function = "qup1_se4";
+ };
+
+ qup_uart11_tx: qup-uart11-tx-pins {
+ pins = "gpio50";
+ function = "qup1_se4";
+ };
+
+ qup_uart11_rx: qup-uart11-rx-pins {
+ pins = "gpio51";
+ function = "qup1_se4";
+ };
+ };
+
+ qup_uart12_default: qup-uart12-state {
+ qup_uart12_cts: qup-uart12-cts-pins {
+ pins = "gpio52";
+ function = "qup1_se5";
+ };
+
+ qup_uart12_rts: qup-uart12-rts-pins {
+ pins = "gpio53";
+ function = "qup1_se5";
+ };
+
+ qup_uart12_tx: qup-uart12-tx-pins {
+ pins = "gpio54";
+ function = "qup1_se5";
+ };
+
+ qup_uart12_rx: qup-uart12-rx-pins {
+ pins = "gpio55";
+ function = "qup1_se5";
+ };
+ };
+
+ qup_uart14_default: qup-uart14-state {
+ qup_uart14_cts: qup-uart14-cts-pins {
+ pins = "gpio80";
+ function = "qup2_se0";
+ };
+
+ qup_uart14_rts: qup-uart14-rts-pins {
+ pins = "gpio81";
+ function = "qup2_se0";
+ };
+
+ qup_uart14_tx: qup-uart14-tx-pins {
+ pins = "gpio82";
+ function = "qup2_se0";
+ };
+
+ qup_uart14_rx: qup-uart14-rx-pins {
+ pins = "gpio83";
+ function = "qup2_se0";
+ };
+ };
+
+ qup_uart15_default: qup-uart15-state {
+ qup_uart15_cts: qup-uart15-cts-pins {
+ pins = "gpio84";
+ function = "qup2_se1";
+ };
+
+ qup_uart15_rts: qup-uart15-rts-pins {
+ pins = "gpio85";
+ function = "qup2_se1";
+ };
+
+ qup_uart15_tx: qup-uart15-tx-pins {
+ pins = "gpio99";
+ function = "qup2_se1";
+ };
+
+ qup_uart15_rx: qup-uart15-rx-pins {
+ pins = "gpio100";
+ function = "qup2_se1";
+ };
+ };
+
+ qup_uart16_default: qup-uart16-state {
+ qup_uart16_cts: qup-uart16-cts-pins {
+ pins = "gpio86";
+ function = "qup2_se2";
+ };
+
+ qup_uart16_rts: qup-uart16-rts-pins {
+ pins = "gpio87";
+ function = "qup2_se2";
+ };
+
+ qup_uart16_tx: qup-uart16-tx-pins {
+ pins = "gpio88";
+ function = "qup2_se2";
+ };
+
+ qup_uart16_rx: qup-uart16-rx-pins {
+ pins = "gpio89";
+ function = "qup2_se2";
+ };
+ };
+
+ qup_uart17_default: qup-uart17-state {
+ qup_uart17_cts: qup-uart17-cts-pins {
+ pins = "gpio91";
+ function = "qup2_se3";
+ };
+
+ qup_uart17_rts: qup0-uart17-rts-pins {
+ pins = "gpio92";
+ function = "qup2_se3";
+ };
+
+ qup_uart17_tx: qup0-uart17-tx-pins {
+ pins = "gpio93";
+ function = "qup2_se3";
+ };
+
+ qup_uart17_rx: qup0-uart17-rx-pins {
+ pins = "gpio94";
+ function = "qup2_se3";
+ };
+ };
+
+ qup_uart18_default: qup-uart18-state {
+ qup_uart18_cts: qup-uart18-cts-pins {
+ pins = "gpio95";
+ function = "qup2_se4";
+ };
+
+ qup_uart18_rts: qup-uart18-rts-pins {
+ pins = "gpio96";
+ function = "qup2_se4";
+ };
+
+ qup_uart18_tx: qup-uart18-tx-pins {
+ pins = "gpio97";
+ function = "qup2_se4";
+ };
+
+ qup_uart18_rx: qup-uart18-rx-pins {
+ pins = "gpio98";
+ function = "qup2_se4";
+ };
+ };
+
+ qup_uart19_default: qup-uart19-state {
+ qup_uart19_cts: qup-uart19-cts-pins {
+ pins = "gpio99";
+ function = "qup2_se5";
+ };
+
+ qup_uart19_rts: qup-uart19-rts-pins {
+ pins = "gpio100";
+ function = "qup2_se5";
+ };
+
+ qup_uart19_tx: qup-uart19-tx-pins {
+ pins = "gpio84";
+ function = "qup2_se5";
+ };
+
+ qup_uart19_rx: qup-uart19-rx-pins {
+ pins = "gpio85";
+ function = "qup2_se5";
+ };
+ };
+
+ qup_uart20_default: qup-uart20-state {
+ qup_uart20_cts: qup-uart20-cts-pins {
+ pins = "gpio97";
+ function = "qup2_se6";
+ };
+
+ qup_uart20_rts: qup-uart20-rts-pins {
+ pins = "gpio98";
+ function = "qup2_se6";
+ };
+
+ qup_uart20_tx: qup-uart20-tx-pins {
+ pins = "gpio95";
+ function = "qup2_se6";
+ };
+
+ qup_uart20_rx: qup-uart20-rx-pins {
+ pins = "gpio96";
+ function = "qup2_se6";
+ };
+ };
+
+ qup_uart21_default: qup-uart21-state {
+ qup_uart21_cts: qup-uart21-cts-pins {
+ pins = "gpio13";
+ function = "qup3_se0";
+ };
+
+ qup_uart21_rts: qup-uart21-rts-pins {
+ pins = "gpio14";
+ function = "qup3_se0";
+ };
+
+ qup_uart21_tx: qup-uart21-tx-pins {
+ pins = "gpio15";
+ function = "qup3_se0";
+ };
+
+ qup_uart21_rx: qup-uart21-rx-pins {
+ pins = "gpio16";
+ function = "qup3_se0";
+ };
+ };
};
sram: sram@146d8000 {
@@ -4590,6 +5484,7 @@
<WAKE_TCS 3>,
<CONTROL_TCS 0>;
label = "apps_rsc";
+ power-domains = <&system_pd>;
apps_bcm_voter: bcm-voter {
compatible = "qcom,bcm-voter";
@@ -4660,6 +5555,10 @@
<0x0 0x18593000 0x0 0x1000>;
reg-names = "freq-domain0", "freq-domain1";
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dcvsh-irq-0", "dcvsh-irq-1";
+
clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
clock-names = "xo", "alternate";
@@ -4903,15 +5802,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <1>;
iommus = <&apps_smmu 0x2141 0x04a0>,
- <&apps_smmu 0x2161 0x04a0>,
- <&apps_smmu 0x2181 0x0400>,
- <&apps_smmu 0x21c1 0x04a0>,
- <&apps_smmu 0x21e1 0x04a0>,
- <&apps_smmu 0x2541 0x04a0>,
- <&apps_smmu 0x2561 0x04a0>,
- <&apps_smmu 0x2581 0x0400>,
- <&apps_smmu 0x25c1 0x04a0>,
- <&apps_smmu 0x25e1 0x04a0>;
+ <&apps_smmu 0x2181 0x0400>;
dma-coherent;
};
@@ -4919,15 +5810,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <2>;
iommus = <&apps_smmu 0x2142 0x04a0>,
- <&apps_smmu 0x2162 0x04a0>,
- <&apps_smmu 0x2182 0x0400>,
- <&apps_smmu 0x21c2 0x04a0>,
- <&apps_smmu 0x21e2 0x04a0>,
- <&apps_smmu 0x2542 0x04a0>,
- <&apps_smmu 0x2562 0x04a0>,
- <&apps_smmu 0x2582 0x0400>,
- <&apps_smmu 0x25c2 0x04a0>,
- <&apps_smmu 0x25e2 0x04a0>;
+ <&apps_smmu 0x2182 0x0400>;
dma-coherent;
};
@@ -4935,15 +5818,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <3>;
iommus = <&apps_smmu 0x2143 0x04a0>,
- <&apps_smmu 0x2163 0x04a0>,
- <&apps_smmu 0x2183 0x0400>,
- <&apps_smmu 0x21c3 0x04a0>,
- <&apps_smmu 0x21e3 0x04a0>,
- <&apps_smmu 0x2543 0x04a0>,
- <&apps_smmu 0x2563 0x04a0>,
- <&apps_smmu 0x2583 0x0400>,
- <&apps_smmu 0x25c3 0x04a0>,
- <&apps_smmu 0x25e3 0x04a0>;
+ <&apps_smmu 0x2183 0x0400>;
dma-coherent;
};
@@ -4951,15 +5826,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <4>;
iommus = <&apps_smmu 0x2144 0x04a0>,
- <&apps_smmu 0x2164 0x04a0>,
- <&apps_smmu 0x2184 0x0400>,
- <&apps_smmu 0x21c4 0x04a0>,
- <&apps_smmu 0x21e4 0x04a0>,
- <&apps_smmu 0x2544 0x04a0>,
- <&apps_smmu 0x2564 0x04a0>,
- <&apps_smmu 0x2584 0x0400>,
- <&apps_smmu 0x25c4 0x04a0>,
- <&apps_smmu 0x25e4 0x04a0>;
+ <&apps_smmu 0x2184 0x0400>;
dma-coherent;
};
@@ -4967,15 +5834,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <5>;
iommus = <&apps_smmu 0x2145 0x04a0>,
- <&apps_smmu 0x2165 0x04a0>,
- <&apps_smmu 0x2185 0x0400>,
- <&apps_smmu 0x21c5 0x04a0>,
- <&apps_smmu 0x21e5 0x04a0>,
- <&apps_smmu 0x2545 0x04a0>,
- <&apps_smmu 0x2565 0x04a0>,
- <&apps_smmu 0x2585 0x0400>,
- <&apps_smmu 0x25c5 0x04a0>,
- <&apps_smmu 0x25e5 0x04a0>;
+ <&apps_smmu 0x2185 0x0400>;
dma-coherent;
};
@@ -4983,15 +5842,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <6>;
iommus = <&apps_smmu 0x2146 0x04a0>,
- <&apps_smmu 0x2166 0x04a0>,
- <&apps_smmu 0x2186 0x0400>,
- <&apps_smmu 0x21c6 0x04a0>,
- <&apps_smmu 0x21e6 0x04a0>,
- <&apps_smmu 0x2546 0x04a0>,
- <&apps_smmu 0x2566 0x04a0>,
- <&apps_smmu 0x2586 0x0400>,
- <&apps_smmu 0x25c6 0x04a0>,
- <&apps_smmu 0x25e6 0x04a0>;
+ <&apps_smmu 0x2186 0x0400>;
dma-coherent;
};
@@ -4999,15 +5850,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <7>;
iommus = <&apps_smmu 0x2147 0x04a0>,
- <&apps_smmu 0x2167 0x04a0>,
- <&apps_smmu 0x2187 0x0400>,
- <&apps_smmu 0x21c7 0x04a0>,
- <&apps_smmu 0x21e7 0x04a0>,
- <&apps_smmu 0x2547 0x04a0>,
- <&apps_smmu 0x2567 0x04a0>,
- <&apps_smmu 0x2587 0x0400>,
- <&apps_smmu 0x25c7 0x04a0>,
- <&apps_smmu 0x25e7 0x04a0>;
+ <&apps_smmu 0x2187 0x0400>;
dma-coherent;
};
@@ -5015,15 +5858,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <8>;
iommus = <&apps_smmu 0x2148 0x04a0>,
- <&apps_smmu 0x2168 0x04a0>,
- <&apps_smmu 0x2188 0x0400>,
- <&apps_smmu 0x21c8 0x04a0>,
- <&apps_smmu 0x21e8 0x04a0>,
- <&apps_smmu 0x2548 0x04a0>,
- <&apps_smmu 0x2568 0x04a0>,
- <&apps_smmu 0x2588 0x0400>,
- <&apps_smmu 0x25c8 0x04a0>,
- <&apps_smmu 0x25e8 0x04a0>;
+ <&apps_smmu 0x2188 0x0400>;
dma-coherent;
};
@@ -5031,31 +5866,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <9>;
iommus = <&apps_smmu 0x2149 0x04a0>,
- <&apps_smmu 0x2169 0x04a0>,
- <&apps_smmu 0x2189 0x0400>,
- <&apps_smmu 0x21c9 0x04a0>,
- <&apps_smmu 0x21e9 0x04a0>,
- <&apps_smmu 0x2549 0x04a0>,
- <&apps_smmu 0x2569 0x04a0>,
- <&apps_smmu 0x2589 0x0400>,
- <&apps_smmu 0x25c9 0x04a0>,
- <&apps_smmu 0x25e9 0x04a0>;
- dma-coherent;
- };
-
- compute-cb@10 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <10>;
- iommus = <&apps_smmu 0x214a 0x04a0>,
- <&apps_smmu 0x216a 0x04a0>,
- <&apps_smmu 0x218a 0x0400>,
- <&apps_smmu 0x21ca 0x04a0>,
- <&apps_smmu 0x21ea 0x04a0>,
- <&apps_smmu 0x254a 0x04a0>,
- <&apps_smmu 0x256a 0x04a0>,
- <&apps_smmu 0x258a 0x0400>,
- <&apps_smmu 0x25ca 0x04a0>,
- <&apps_smmu 0x25ea 0x04a0>;
+ <&apps_smmu 0x2189 0x0400>;
dma-coherent;
};
@@ -5063,15 +5874,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <11>;
iommus = <&apps_smmu 0x214b 0x04a0>,
- <&apps_smmu 0x216b 0x04a0>,
- <&apps_smmu 0x218b 0x0400>,
- <&apps_smmu 0x21cb 0x04a0>,
- <&apps_smmu 0x21eb 0x04a0>,
- <&apps_smmu 0x254b 0x04a0>,
- <&apps_smmu 0x256b 0x04a0>,
- <&apps_smmu 0x258b 0x0400>,
- <&apps_smmu 0x25cb 0x04a0>,
- <&apps_smmu 0x25eb 0x04a0>;
+ <&apps_smmu 0x218b 0x0400>;
dma-coherent;
};
};
@@ -5131,15 +5934,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <1>;
iommus = <&apps_smmu 0x2941 0x04a0>,
- <&apps_smmu 0x2961 0x04a0>,
- <&apps_smmu 0x2981 0x0400>,
- <&apps_smmu 0x29c1 0x04a0>,
- <&apps_smmu 0x29e1 0x04a0>,
- <&apps_smmu 0x2d41 0x04a0>,
- <&apps_smmu 0x2d61 0x04a0>,
- <&apps_smmu 0x2d81 0x0400>,
- <&apps_smmu 0x2dc1 0x04a0>,
- <&apps_smmu 0x2de1 0x04a0>;
+ <&apps_smmu 0x2981 0x0400>;
dma-coherent;
};
@@ -5147,15 +5942,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <2>;
iommus = <&apps_smmu 0x2942 0x04a0>,
- <&apps_smmu 0x2962 0x04a0>,
- <&apps_smmu 0x2982 0x0400>,
- <&apps_smmu 0x29c2 0x04a0>,
- <&apps_smmu 0x29e2 0x04a0>,
- <&apps_smmu 0x2d42 0x04a0>,
- <&apps_smmu 0x2d62 0x04a0>,
- <&apps_smmu 0x2d82 0x0400>,
- <&apps_smmu 0x2dc2 0x04a0>,
- <&apps_smmu 0x2de2 0x04a0>;
+ <&apps_smmu 0x2982 0x0400>;
dma-coherent;
};
@@ -5163,15 +5950,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <3>;
iommus = <&apps_smmu 0x2943 0x04a0>,
- <&apps_smmu 0x2963 0x04a0>,
- <&apps_smmu 0x2983 0x0400>,
- <&apps_smmu 0x29c3 0x04a0>,
- <&apps_smmu 0x29e3 0x04a0>,
- <&apps_smmu 0x2d43 0x04a0>,
- <&apps_smmu 0x2d63 0x04a0>,
- <&apps_smmu 0x2d83 0x0400>,
- <&apps_smmu 0x2dc3 0x04a0>,
- <&apps_smmu 0x2de3 0x04a0>;
+ <&apps_smmu 0x2983 0x0400>;
dma-coherent;
};
@@ -5179,15 +5958,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <4>;
iommus = <&apps_smmu 0x2944 0x04a0>,
- <&apps_smmu 0x2964 0x04a0>,
- <&apps_smmu 0x2984 0x0400>,
- <&apps_smmu 0x29c4 0x04a0>,
- <&apps_smmu 0x29e4 0x04a0>,
- <&apps_smmu 0x2d44 0x04a0>,
- <&apps_smmu 0x2d64 0x04a0>,
- <&apps_smmu 0x2d84 0x0400>,
- <&apps_smmu 0x2dc4 0x04a0>,
- <&apps_smmu 0x2de4 0x04a0>;
+ <&apps_smmu 0x2984 0x0400>;
dma-coherent;
};
@@ -5195,15 +5966,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <5>;
iommus = <&apps_smmu 0x2945 0x04a0>,
- <&apps_smmu 0x2965 0x04a0>,
- <&apps_smmu 0x2985 0x0400>,
- <&apps_smmu 0x29c5 0x04a0>,
- <&apps_smmu 0x29e5 0x04a0>,
- <&apps_smmu 0x2d45 0x04a0>,
- <&apps_smmu 0x2d65 0x04a0>,
- <&apps_smmu 0x2d85 0x0400>,
- <&apps_smmu 0x2dc5 0x04a0>,
- <&apps_smmu 0x2de5 0x04a0>;
+ <&apps_smmu 0x2985 0x0400>;
dma-coherent;
};
@@ -5211,15 +5974,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <6>;
iommus = <&apps_smmu 0x2946 0x04a0>,
- <&apps_smmu 0x2966 0x04a0>,
- <&apps_smmu 0x2986 0x0400>,
- <&apps_smmu 0x29c6 0x04a0>,
- <&apps_smmu 0x29e6 0x04a0>,
- <&apps_smmu 0x2d46 0x04a0>,
- <&apps_smmu 0x2d66 0x04a0>,
- <&apps_smmu 0x2d86 0x0400>,
- <&apps_smmu 0x2dc6 0x04a0>,
- <&apps_smmu 0x2de6 0x04a0>;
+ <&apps_smmu 0x2986 0x0400>;
dma-coherent;
};
@@ -5227,15 +5982,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <7>;
iommus = <&apps_smmu 0x2947 0x04a0>,
- <&apps_smmu 0x2967 0x04a0>,
- <&apps_smmu 0x2987 0x0400>,
- <&apps_smmu 0x29c7 0x04a0>,
- <&apps_smmu 0x29e7 0x04a0>,
- <&apps_smmu 0x2d47 0x04a0>,
- <&apps_smmu 0x2d67 0x04a0>,
- <&apps_smmu 0x2d87 0x0400>,
- <&apps_smmu 0x2dc7 0x04a0>,
- <&apps_smmu 0x2de7 0x04a0>;
+ <&apps_smmu 0x2987 0x0400>;
dma-coherent;
};
@@ -5243,15 +5990,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <8>;
iommus = <&apps_smmu 0x2948 0x04a0>,
- <&apps_smmu 0x2968 0x04a0>,
- <&apps_smmu 0x2988 0x0400>,
- <&apps_smmu 0x29c8 0x04a0>,
- <&apps_smmu 0x29e8 0x04a0>,
- <&apps_smmu 0x2d48 0x04a0>,
- <&apps_smmu 0x2d68 0x04a0>,
- <&apps_smmu 0x2d88 0x0400>,
- <&apps_smmu 0x2dc8 0x04a0>,
- <&apps_smmu 0x2de8 0x04a0>;
+ <&apps_smmu 0x2988 0x0400>;
dma-coherent;
};
@@ -5259,15 +5998,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <9>;
iommus = <&apps_smmu 0x2949 0x04a0>,
- <&apps_smmu 0x2969 0x04a0>,
- <&apps_smmu 0x2989 0x0400>,
- <&apps_smmu 0x29c9 0x04a0>,
- <&apps_smmu 0x29e9 0x04a0>,
- <&apps_smmu 0x2d49 0x04a0>,
- <&apps_smmu 0x2d69 0x04a0>,
- <&apps_smmu 0x2d89 0x0400>,
- <&apps_smmu 0x2dc9 0x04a0>,
- <&apps_smmu 0x2de9 0x04a0>;
+ <&apps_smmu 0x2989 0x0400>;
dma-coherent;
};
@@ -5275,15 +6006,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <10>;
iommus = <&apps_smmu 0x294a 0x04a0>,
- <&apps_smmu 0x296a 0x04a0>,
- <&apps_smmu 0x298a 0x0400>,
- <&apps_smmu 0x29ca 0x04a0>,
- <&apps_smmu 0x29ea 0x04a0>,
- <&apps_smmu 0x2d4a 0x04a0>,
- <&apps_smmu 0x2d6a 0x04a0>,
- <&apps_smmu 0x2d8a 0x0400>,
- <&apps_smmu 0x2dca 0x04a0>,
- <&apps_smmu 0x2dea 0x04a0>;
+ <&apps_smmu 0x298a 0x0400>;
dma-coherent;
};
@@ -5291,15 +6014,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <11>;
iommus = <&apps_smmu 0x294b 0x04a0>,
- <&apps_smmu 0x296b 0x04a0>,
- <&apps_smmu 0x298b 0x0400>,
- <&apps_smmu 0x29cb 0x04a0>,
- <&apps_smmu 0x29eb 0x04a0>,
- <&apps_smmu 0x2d4b 0x04a0>,
- <&apps_smmu 0x2d6b 0x04a0>,
- <&apps_smmu 0x2d8b 0x0400>,
- <&apps_smmu 0x2dcb 0x04a0>,
- <&apps_smmu 0x2deb 0x04a0>;
+ <&apps_smmu 0x298b 0x0400>;
dma-coherent;
};
@@ -5307,15 +6022,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <12>;
iommus = <&apps_smmu 0x294c 0x04a0>,
- <&apps_smmu 0x296c 0x04a0>,
- <&apps_smmu 0x298c 0x0400>,
- <&apps_smmu 0x29cc 0x04a0>,
- <&apps_smmu 0x29ec 0x04a0>,
- <&apps_smmu 0x2d4c 0x04a0>,
- <&apps_smmu 0x2d6c 0x04a0>,
- <&apps_smmu 0x2d8c 0x0400>,
- <&apps_smmu 0x2dcc 0x04a0>,
- <&apps_smmu 0x2dec 0x04a0>;
+ <&apps_smmu 0x298c 0x0400>;
dma-coherent;
};
@@ -5323,15 +6030,7 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <13>;
iommus = <&apps_smmu 0x294d 0x04a0>,
- <&apps_smmu 0x296d 0x04a0>,
- <&apps_smmu 0x298d 0x0400>,
- <&apps_smmu 0x29Cd 0x04a0>,
- <&apps_smmu 0x29ed 0x04a0>,
- <&apps_smmu 0x2d4d 0x04a0>,
- <&apps_smmu 0x2d6d 0x04a0>,
- <&apps_smmu 0x2d8d 0x0400>,
- <&apps_smmu 0x2dcd 0x04a0>,
- <&apps_smmu 0x2ded 0x04a0>;
+ <&apps_smmu 0x298d 0x0400>;
dma-coherent;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sar2130p.dtsi b/arch/arm64/boot/dts/qcom/sar2130p.dtsi
index dd832e6816be..b0e342810ae7 100644
--- a/arch/arm64/boot/dts/qcom/sar2130p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sar2130p.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,sar2130p-gcc.h>
#include <dt-bindings/clock/qcom,sar2130p-gpucc.h>
+#include <dt-bindings/clock/qcom,sm8550-dispcc.h>
#include <dt-bindings/clock/qcom,sm8550-tcsr.h>
#include <dt-bindings/dma/qcom-gpi.h>
#include <dt-bindings/interconnect/qcom,icc.h>
@@ -1474,6 +1475,67 @@
};
};
+ pcie1_ep: pcie-ep@1c08000 {
+ compatible = "qcom,sar2130p-pcie-ep";
+ reg = <0x0 0x01c08000 0x0 0x3000>,
+ <0x0 0x40000000 0x0 0xf1d>,
+ <0x0 0x40000f20 0x0 0xa8>,
+ <0x0 0x40001000 0x0 0x1000>,
+ <0x0 0x40200000 0x0 0x1000000>,
+ <0x0 0x01c0b000 0x0 0x1000>,
+ <0x0 0x40002000 0x0 0x2000>;
+ reg-names = "parf",
+ "dbi",
+ "elbi",
+ "atu",
+ "addr_space",
+ "mmio",
+ "dma";
+
+ clocks = <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_Q2A_AXI_CLK>,
+ <&gcc GCC_DDRSS_PCIE_SF_CLK>,
+ <&gcc GCC_AGGRE_NOC_PCIE_1_AXI_CLK>,
+ <&gcc GCC_CFG_NOC_PCIE_ANOC_AHB_CLK>,
+ <&gcc GCC_QMIP_PCIE_AHB_CLK>;
+ clock-names = "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave",
+ "slave_q2a",
+ "ddrss_sf_tbu",
+ "aggre_noc_axi",
+ "cnoc_sf_axi",
+ "qmip_pcie_ahb";
+
+ interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global",
+ "doorbell",
+ "dma";
+
+ interconnects = <&pcie_noc MASTER_PCIE_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_PCIE_1 QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "pcie-mem",
+ "cpu-pcie";
+ iommus = <&apps_smmu 0x1e00 0x1>;
+ resets = <&gcc GCC_PCIE_1_BCR>;
+ reset-names = "core";
+ power-domains = <&gcc PCIE_1_GDSC>;
+ phys = <&pcie1_phy>;
+ phy-names = "pciephy";
+
+ num-lanes = <2>;
+
+ status = "disabled";
+ };
+
pcie1_phy: phy@1c0e000 {
compatible = "qcom,sar2130p-qmp-gen3x2-pcie-phy";
reg = <0x0 0x01c0e000 0x0 0x2000>;
@@ -1854,6 +1916,7 @@
reg = <2>;
usb_dp_qmpphy_dp_in: endpoint {
+ remote-endpoint = <&mdss_dp0_out>;
};
};
};
@@ -1951,6 +2014,398 @@
};
};
+ mdss: display-subsystem@ae00000 {
+ compatible = "qcom,sar2130p-mdss";
+ reg = <0x0 0x0ae00000 0x0 0x1000>;
+ reg-names = "mdss";
+
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&gcc GCC_DISP_AHB_CLK>,
+ <&gcc GCC_DISP_HF_AXI_CLK>,
+ <&dispcc DISP_CC_MDSS_MDP_CLK>;
+
+ resets = <&dispcc DISP_CC_MDSS_CORE_BCR>;
+
+ power-domains = <&dispcc MDSS_GDSC>;
+
+ interconnects = <&mmss_noc MASTER_MDP QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "mdp0-mem", "cpu-cfg";
+
+ iommus = <&apps_smmu 0x2000 0x402>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled";
+
+ mdss_mdp: display-controller@ae01000 {
+ compatible = "qcom,sar2130p-dpu";
+ reg = <0x0 0x0ae01000 0x0 0x8f000>,
+ <0x0 0x0aeb0000 0x0 0x2008>;
+ reg-names = "mdp",
+ "vbif";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0>;
+
+ clocks = <&gcc GCC_DISP_AHB_CLK>,
+ <&gcc GCC_DISP_HF_AXI_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc DISP_CC_MDSS_MDP_LUT_CLK>,
+ <&dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ clock-names = "bus",
+ "nrt_bus",
+ "iface",
+ "lut",
+ "core",
+ "vsync";
+
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ assigned-clock-rates = <19200000>;
+
+ operating-points-v2 = <&mdp_opp_table>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dpu_intf1_out: endpoint {
+ remote-endpoint = <&mdss_dsi0_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ dpu_intf2_out: endpoint {
+ remote-endpoint = <&mdss_dsi1_in>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ dpu_intf0_out: endpoint {
+ remote-endpoint = <&mdss_dp0_in>;
+ };
+ };
+ };
+
+ mdp_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-325000000 {
+ opp-hz = /bits/ 64 <325000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-514000000 {
+ opp-hz = /bits/ 64 <514000000>;
+ required-opps = <&rpmhpd_opp_turbo>;
+ };
+ };
+ };
+
+ mdss_dp0: displayport-controller@ae90000 {
+ compatible = "qcom,sar2130p-dp",
+ "qcom,sm8350-dp";
+ reg = <0x0 0xae90000 0x0 0x200>,
+ <0x0 0xae90200 0x0 0x200>,
+ <0x0 0xae90400 0x0 0xc00>,
+ <0x0 0xae91000 0x0 0x400>,
+ <0x0 0xae91400 0x0 0x400>;
+ interrupt-parent = <&mdss>;
+ interrupts = <12>;
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc DISP_CC_MDSS_DPTX0_AUX_CLK>,
+ <&dispcc DISP_CC_MDSS_DPTX0_LINK_CLK>,
+ <&dispcc DISP_CC_MDSS_DPTX0_LINK_INTF_CLK>,
+ <&dispcc DISP_CC_MDSS_DPTX0_PIXEL0_CLK>;
+ clock-names = "core_iface",
+ "core_aux",
+ "ctrl_link",
+ "ctrl_link_iface",
+ "stream_pixel";
+
+ assigned-clocks = <&dispcc DISP_CC_MDSS_DPTX0_LINK_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC>;
+ assigned-clock-parents = <&usb_dp_qmpphy QMP_USB43DP_DP_LINK_CLK>,
+ <&usb_dp_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
+
+ phys = <&usb_dp_qmpphy QMP_USB43DP_DP_PHY>;
+ phy-names = "dp";
+
+ #sound-dai-cells = <0>;
+
+ operating-points-v2 = <&dp_opp_table>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdss_dp0_in: endpoint {
+ remote-endpoint = <&dpu_intf0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mdss_dp0_out: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_dp_in>;
+ };
+ };
+ };
+
+ dp_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-162000000 {
+ opp-hz = /bits/ 64 <162000000>;
+ required-opps = <&rpmhpd_opp_low_svs_d1>;
+ };
+
+ opp-270000000 {
+ opp-hz = /bits/ 64 <270000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-540000000 {
+ opp-hz = /bits/ 64 <540000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-810000000 {
+ opp-hz = /bits/ 64 <810000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+
+ mdss_dsi0: dsi@ae94000 {
+ compatible = "qcom,sar2130p-dsi-ctrl",
+ "qcom,mdss-dsi-ctrl";
+ reg = <0x0 0x0ae94000 0x0 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4>;
+
+ clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
+ <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK>,
+ <&dispcc DISP_CC_MDSS_ESC0_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&gcc GCC_DISP_HF_AXI_CLK>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi0_phy 0>,
+ <&mdss_dsi0_phy 1>;
+
+ operating-points-v2 = <&mdss_dsi_opp_table>;
+
+ phys = <&mdss_dsi0_phy>;
+ phy-names = "dsi";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdss_dsi0_in: endpoint {
+ remote-endpoint = <&dpu_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ mdss_dsi0_out: endpoint {
+ };
+ };
+ };
+
+ mdss_dsi_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-187500000 {
+ opp-hz = /bits/ 64 <187500000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-358000000 {
+ opp-hz = /bits/ 64 <358000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+
+ mdss_dsi0_phy: phy@ae95000 {
+ compatible = "qcom,sar2130p-dsi-phy-5nm";
+ reg = <0x0 0x0ae95000 0x0 0x200>,
+ <0x0 0x0ae95200 0x0 0x280>,
+ <0x0 0x0ae95500 0x0 0x400>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ mdss_dsi1: dsi@ae96000 {
+ compatible = "qcom,sar2130p-dsi-ctrl",
+ "qcom,mdss-dsi-ctrl";
+ reg = <0x0 0x0ae96000 0x0 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <5>;
+
+ clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK>,
+ <&dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>,
+ <&dispcc DISP_CC_MDSS_PCLK1_CLK>,
+ <&dispcc DISP_CC_MDSS_ESC1_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&gcc GCC_DISP_HF_AXI_CLK>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi1_phy 0>,
+ <&mdss_dsi1_phy 1>;
+
+ operating-points-v2 = <&mdss_dsi_opp_table>;
+
+ phys = <&mdss_dsi1_phy>;
+ phy-names = "dsi";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdss_dsi1_in: endpoint {
+ remote-endpoint = <&dpu_intf2_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ mdss_dsi1_out: endpoint {
+ };
+ };
+ };
+ };
+
+ mdss_dsi1_phy: phy@ae97000 {
+ compatible = "qcom,sar2130p-dsi-phy-5nm";
+ reg = <0x0 0x0ae97000 0x0 0x200>,
+ <0x0 0x0ae97200 0x0 0x280>,
+ <0x0 0x0ae97500 0x0 0x400>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+ };
+
+ dispcc: clock-controller@af00000 {
+ compatible = "qcom,sar2130p-dispcc";
+ reg = <0x0 0x0af00000 0x0 0x20000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&gcc GCC_DISP_AHB_CLK>,
+ <&sleep_clk>,
+ <&mdss_dsi0_phy 0>,
+ <&mdss_dsi0_phy 1>,
+ <&mdss_dsi1_phy 0>,
+ <&mdss_dsi1_phy 1>,
+ <&usb_dp_qmpphy QMP_USB43DP_DP_LINK_CLK>,
+ <&usb_dp_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>,
+ <0>, /* dp1 */
+ <0>,
+ <0>, /* dp2 */
+ <0>,
+ <0>, /* dp3 */
+ <0>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
pdc: interrupt-controller@b220000 {
compatible = "qcom,sar2130p-pdc", "qcom,pdc";
reg = <0x0 0x0b220000 0x0 0x30000>, <0x0 0x174000f0 0x0 0x64>;
diff --git a/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts b/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts
index 3f0d3e33894a..672ac4c3afa3 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts
@@ -530,19 +530,19 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
dai@3 {
- reg = <3>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA4>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-el2.dtso b/arch/arm64/boot/dts/qcom/sc7180-el2.dtso
new file mode 100644
index 000000000000..49a98676ca4d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-el2.dtso
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+/*
+ * sc7180 specific modifications required to boot in EL2.
+ */
+
+/dts-v1/;
+/plugin/;
+
+/* We can't and don't need to use zap shader in EL2 as linux can zap the gpu on it's own. */
+&gpu {
+ zap-shader {
+ status = "disabled";
+ };
+};
+
+/* Venus can be used in EL2 if booted similarly to ChromeOS devices. */
+&venus {
+ video-firmware {
+ iommus = <&apps_smmu 0x0c42 0x0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
index f57976906d63..8fee8d7a7d4c 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
@@ -188,7 +188,7 @@ ap_ts_pen_1v8: &i2c4 {
};
&wifi {
- qcom,ath10k-calibration-variant = "GO_HOMESTAR";
+ qcom,calibration-variant = "GO_HOMESTAR";
};
/* PINCTRL - modifications to sc7180-trogdor.dtsi */
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dts
index 655bea928e52..26514640a1ae 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dts
@@ -79,7 +79,7 @@ ap_ts_pen_1v8: &i2c4 {
};
&wifi {
- qcom,ath10k-calibration-variant = "GO_KINGOFTOWN";
+ qcom,calibration-variant = "GO_KINGOFTOWN";
};
/* PINCTRL - modifications to sc7180-trogdor.dtsi */
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi
index c3fd6760de7a..eb9c9e713a89 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi
@@ -69,7 +69,7 @@ ap_ts_pen_1v8: &i2c4 {
};
&wifi {
- qcom,ath10k-calibration-variant = "GO_LAZOR";
+ qcom,calibration-variant = "GO_LAZOR";
};
/* PINCTRL - modifications to sc7180-trogdor.dtsi */
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel360.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel360.dtsi
index 89034b6702f4..a2224de841b1 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel360.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel360.dtsi
@@ -59,5 +59,5 @@ ap_ts_pen_1v8: &i2c4 {
};
&wifi {
- qcom,ath10k-calibration-variant = "GO_PAZQUEL360";
+ qcom,calibration-variant = "GO_PAZQUEL360";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
index f7300ffbb451..4f5ab378cf8e 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
@@ -181,7 +181,7 @@ ap_ts_pen_1v8: &i2c4 {
};
&wifi {
- qcom,ath10k-calibration-variant = "GO_POMPOM";
+ qcom,calibration-variant = "GO_POMPOM";
};
/* PINCTRL - board-specific pinctrl */
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi
index d4925be3b1fc..17908c936520 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi
@@ -196,7 +196,7 @@
};
&wifi {
- qcom,ath10k-calibration-variant = "GO_WORMDINGLER";
+ qcom,calibration-variant = "GO_WORMDINGLER";
};
/*
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index 87c432c12a24..01e727b021ec 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -6,6 +6,7 @@
*/
#include <dt-bindings/clock/qcom,dispcc-sc7180.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-sc7180.h>
#include <dt-bindings/clock/qcom,gpucc-sc7180.h>
#include <dt-bindings/clock/qcom,lpasscorecc-sc7180.h>
@@ -3195,7 +3196,7 @@
mdp: display-controller@ae01000 {
compatible = "qcom,sc7180-dpu";
reg = <0 0x0ae01000 0 0x8f000>,
- <0 0x0aeb0000 0 0x2008>;
+ <0 0x0aeb0000 0 0x3000>;
reg-names = "mdp", "vbif";
clocks = <&gcc GCC_DISP_HF_AXI_CLK>,
@@ -3284,8 +3285,10 @@
"iface",
"bus";
- assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+ assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd SC7180_CX>;
@@ -3433,8 +3436,8 @@
reg = <0 0x0af00000 0 0x200000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_DISP_GPLL0_CLK_SRC>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
clock-names = "bi_tcxo",
@@ -3720,7 +3723,7 @@
};
apps_rsc: rsc@18200000 {
- compatible = "qcom,rpmh-rsc";
+ compatible = "qcom,sc7180-rpmh-apps-rsc", "qcom,rpmh-rsc";
reg = <0 0x18200000 0 0x10000>,
<0 0x18210000 0 0x10000>,
<0 0x18220000 0 0x10000>;
diff --git a/arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-rt5682-3mic.dtsi b/arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-rt5682-3mic.dtsi
index a90c70b1b73e..0e07429982bd 100644
--- a/arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-rt5682-3mic.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-rt5682-3mic.dtsi
@@ -139,6 +139,7 @@ hp_i2c: &i2c2 {
vdd-micb-supply = <&pp1800_l2c>;
pinctrl-0 = <&lpass_dmic01_clk>, <&lpass_dmic01_data>, <&lpass_dmic23_clk>,
<&lpass_dmic23_data>;
+ pinctrl-names = "default";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-wcd9385.dtsi b/arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-wcd9385.dtsi
index 020ef666e35f..ce48e4cda170 100644
--- a/arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-wcd9385.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-wcd9385.dtsi
@@ -141,6 +141,9 @@
};
&lpass_va_macro {
+ pinctrl-0 = <&lpass_dmic01_clk>, <&lpass_dmic01_data>;
+ pinctrl-names = "default";
+
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
index 7370aa0dbf0e..90e5b9ab5b84 100644
--- a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
@@ -412,6 +412,8 @@
&lpass_va_macro {
status = "okay";
vdd-micb-supply = <&vreg_bob>;
+ pinctrl-0 = <&lpass_dmic01_clk>, <&lpass_dmic01_data>;
+ pinctrl-names = "default";
};
&pcie1 {
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index 0f2caf36910b..b1cc3bc1aec8 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -6,6 +6,7 @@
*/
#include <dt-bindings/clock/qcom,camcc-sc7280.h>
#include <dt-bindings/clock/qcom,dispcc-sc7280.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-sc7280.h>
#include <dt-bindings/clock/qcom,gpucc-sc7280.h>
#include <dt-bindings/clock/qcom,lpassaudiocc-sc7280.h>
@@ -27,6 +28,7 @@
#include <dt-bindings/soc/qcom,apr.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include <dt-bindings/sound/qcom,lpass.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
#include <dt-bindings/thermal/thermal.h>
/ {
@@ -2363,18 +2365,52 @@
"tx_lane0_sync_clk",
"rx_lane0_sync_clk",
"rx_lane1_sync_clk";
- freq-table-hz =
- <75000000 300000000>,
- <0 0>,
- <0 0>,
- <75000000 300000000>,
- <0 0>,
- <0 0>,
- <0 0>,
- <0 0>;
+
+ operating-points-v2 = <&ufs_opp_table>;
+
qcom,ice = <&ice>;
status = "disabled";
+
+ ufs_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-75000000 {
+ opp-hz = /bits/ 64 <75000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <75000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-150000000 {
+ opp-hz = /bits/ 64 <150000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <150000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <300000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
};
ufs_mem_phy: phy@1d87000 {
@@ -2617,9 +2653,6 @@
compatible = "qcom,sc7280-lpass-va-macro";
reg = <0 0x03370000 0 0x1000>;
- pinctrl-names = "default";
- pinctrl-0 = <&lpass_dmic01_clk>, <&lpass_dmic01_data>;
-
clocks = <&lpass_aon LPASS_AON_CC_TX_MCLK_CLK>;
clock-names = "mclk";
@@ -3639,6 +3672,8 @@
#clock-cells = <1>;
#phy-cells = <1>;
+ orientation-switch;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -3654,6 +3689,7 @@
reg = <1>;
usb_dp_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_ss>;
};
};
@@ -3661,6 +3697,7 @@
reg = <2>;
usb_dp_qmpphy_dp_in: endpoint {
+ remote-endpoint = <&mdss_dp_out>;
};
};
};
@@ -3832,15 +3869,15 @@
iommus = <&apps_smmu 0x1801 0x0>;
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
};
};
@@ -3869,18 +3906,21 @@
compatible = "qcom,fastrpc-compute-cb";
reg = <3>;
iommus = <&apps_smmu 0x1803 0x0>;
+ dma-coherent;
};
compute-cb@4 {
compatible = "qcom,fastrpc-compute-cb";
reg = <4>;
iommus = <&apps_smmu 0x1804 0x0>;
+ dma-coherent;
};
compute-cb@5 {
compatible = "qcom,fastrpc-compute-cb";
reg = <5>;
iommus = <&apps_smmu 0x1805 0x0>;
+ dma-coherent;
};
};
};
@@ -4108,6 +4148,7 @@
reg = <1>;
iommus = <&apps_smmu 0x11a1 0x0420>,
<&apps_smmu 0x1181 0x0420>;
+ dma-coherent;
};
compute-cb@2 {
@@ -4115,6 +4156,7 @@
reg = <2>;
iommus = <&apps_smmu 0x11a2 0x0420>,
<&apps_smmu 0x1182 0x0420>;
+ dma-coherent;
};
compute-cb@3 {
@@ -4122,6 +4164,7 @@
reg = <3>;
iommus = <&apps_smmu 0x11a3 0x0420>,
<&apps_smmu 0x1183 0x0420>;
+ dma-coherent;
};
compute-cb@4 {
@@ -4129,6 +4172,7 @@
reg = <4>;
iommus = <&apps_smmu 0x11a4 0x0420>,
<&apps_smmu 0x1184 0x0420>;
+ dma-coherent;
};
compute-cb@5 {
@@ -4136,6 +4180,7 @@
reg = <5>;
iommus = <&apps_smmu 0x11a5 0x0420>,
<&apps_smmu 0x1185 0x0420>;
+ dma-coherent;
};
compute-cb@6 {
@@ -4143,6 +4188,7 @@
reg = <6>;
iommus = <&apps_smmu 0x11a6 0x0420>,
<&apps_smmu 0x1186 0x0420>;
+ dma-coherent;
};
compute-cb@7 {
@@ -4150,6 +4196,7 @@
reg = <7>;
iommus = <&apps_smmu 0x11a7 0x0420>,
<&apps_smmu 0x1187 0x0420>;
+ dma-coherent;
};
compute-cb@8 {
@@ -4157,6 +4204,7 @@
reg = <8>;
iommus = <&apps_smmu 0x11a8 0x0420>,
<&apps_smmu 0x1188 0x0420>;
+ dma-coherent;
};
/* note: secure cb9 in downstream */
@@ -4166,6 +4214,7 @@
reg = <11>;
iommus = <&apps_smmu 0x11ab 0x0420>,
<&apps_smmu 0x118b 0x0420>;
+ dma-coherent;
};
compute-cb@12 {
@@ -4173,6 +4222,7 @@
reg = <12>;
iommus = <&apps_smmu 0x11ac 0x0420>,
<&apps_smmu 0x118c 0x0420>;
+ dma-coherent;
};
compute-cb@13 {
@@ -4180,6 +4230,7 @@
reg = <13>;
iommus = <&apps_smmu 0x11ad 0x0420>,
<&apps_smmu 0x118d 0x0420>;
+ dma-coherent;
};
compute-cb@14 {
@@ -4187,6 +4238,7 @@
reg = <14>;
iommus = <&apps_smmu 0x11ae 0x0420>,
<&apps_smmu 0x118e 0x0420>;
+ dma-coherent;
};
};
};
@@ -4267,6 +4319,7 @@
reg = <1>;
usb_1_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_usb_ss_in>;
};
};
};
@@ -4301,14 +4354,6 @@
status = "disabled";
- video-decoder {
- compatible = "venus-decoder";
- };
-
- video-encoder {
- compatible = "venus-encoder";
- };
-
venus_opp_table: opp-table {
compatible = "operating-points-v2";
@@ -4430,6 +4475,184 @@
};
};
+ camss: isp@acb3000 {
+ compatible = "qcom,sc7280-camss";
+
+ reg = <0x0 0x0acb3000 0x0 0x1000>,
+ <0x0 0x0acba000 0x0 0x1000>,
+ <0x0 0x0acc1000 0x0 0x1000>,
+ <0x0 0x0acc8000 0x0 0x1000>,
+ <0x0 0x0accf000 0x0 0x1000>,
+ <0x0 0x0ace0000 0x0 0x2000>,
+ <0x0 0x0ace2000 0x0 0x2000>,
+ <0x0 0x0ace4000 0x0 0x2000>,
+ <0x0 0x0ace6000 0x0 0x2000>,
+ <0x0 0x0ace8000 0x0 0x2000>,
+ <0x0 0x0acaf000 0x0 0x4000>,
+ <0x0 0x0acb6000 0x0 0x4000>,
+ <0x0 0x0acbd000 0x0 0x4000>,
+ <0x0 0x0acc4000 0x0 0x4000>,
+ <0x0 0x0accb000 0x0 0x4000>;
+ reg-names = "csid0",
+ "csid1",
+ "csid2",
+ "csid_lite0",
+ "csid_lite1",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "csiphy3",
+ "csiphy4",
+ "vfe0",
+ "vfe1",
+ "vfe2",
+ "vfe_lite0",
+ "vfe_lite1";
+
+ clocks = <&camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&camcc CAM_CC_CPAS_AHB_CLK>,
+ <&camcc CAM_CC_CSIPHY0_CLK>,
+ <&camcc CAM_CC_CSI0PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY1_CLK>,
+ <&camcc CAM_CC_CSI1PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY2_CLK>,
+ <&camcc CAM_CC_CSI2PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY3_CLK>,
+ <&camcc CAM_CC_CSI3PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY4_CLK>,
+ <&camcc CAM_CC_CSI4PHYTIMER_CLK>,
+ <&gcc GCC_CAMERA_HF_AXI_CLK>,
+ <&gcc GCC_CAMERA_SF_AXI_CLK>,
+ <&camcc CAM_CC_ICP_AHB_CLK>,
+ <&camcc CAM_CC_IFE_0_CLK>,
+ <&camcc CAM_CC_IFE_0_AXI_CLK>,
+ <&camcc CAM_CC_IFE_0_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_0_CSID_CLK>,
+ <&camcc CAM_CC_IFE_1_CLK>,
+ <&camcc CAM_CC_IFE_1_AXI_CLK>,
+ <&camcc CAM_CC_IFE_1_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_1_CSID_CLK>,
+ <&camcc CAM_CC_IFE_2_CLK>,
+ <&camcc CAM_CC_IFE_2_AXI_CLK>,
+ <&camcc CAM_CC_IFE_2_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_2_CSID_CLK>,
+ <&camcc CAM_CC_IFE_LITE_0_CLK>,
+ <&camcc CAM_CC_IFE_LITE_0_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_LITE_0_CSID_CLK>,
+ <&camcc CAM_CC_IFE_LITE_1_CLK>,
+ <&camcc CAM_CC_IFE_LITE_1_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_LITE_1_CSID_CLK>;
+ clock-names = "camnoc_axi",
+ "cpas_ahb",
+ "csiphy0",
+ "csiphy0_timer",
+ "csiphy1",
+ "csiphy1_timer",
+ "csiphy2",
+ "csiphy2_timer",
+ "csiphy3",
+ "csiphy3_timer",
+ "csiphy4",
+ "csiphy4_timer",
+ "gcc_axi_hf",
+ "gcc_axi_sf",
+ "icp_ahb",
+ "vfe0",
+ "vfe0_axi",
+ "vfe0_cphy_rx",
+ "vfe0_csid",
+ "vfe1",
+ "vfe1_axi",
+ "vfe1_cphy_rx",
+ "vfe1_csid",
+ "vfe2",
+ "vfe2_axi",
+ "vfe2_cphy_rx",
+ "vfe2_csid",
+ "vfe_lite0",
+ "vfe_lite0_cphy_rx",
+ "vfe_lite0_csid",
+ "vfe_lite1",
+ "vfe_lite1_cphy_rx",
+ "vfe_lite1_csid";
+
+ interrupts = <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 640 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 359 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 122 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 641 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 360 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "csid0",
+ "csid1",
+ "csid2",
+ "csid_lite0",
+ "csid_lite1",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "csiphy3",
+ "csiphy4",
+ "vfe0",
+ "vfe1",
+ "vfe2",
+ "vfe_lite0",
+ "vfe_lite1";
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &cnoc2 SLAVE_CAMERA_CFG QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mmss_noc MASTER_CAMNOC_HF QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "ahb",
+ "hf_0";
+
+ iommus = <&apps_smmu 0x800 0x4e0>;
+
+ power-domains = <&camcc CAM_CC_IFE_0_GDSC>,
+ <&camcc CAM_CC_IFE_1_GDSC>,
+ <&camcc CAM_CC_IFE_2_GDSC>,
+ <&camcc CAM_CC_TITAN_TOP_GDSC>;
+ power-domain-names = "ife0",
+ "ife1",
+ "ife2",
+ "top";
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ };
+
+ port@1 {
+ reg = <1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ };
+
+ port@3 {
+ reg = <3>;
+ };
+
+ port@4 {
+ reg = <4>;
+ };
+ };
+ };
+
camcc: clock-controller@ad00000 {
compatible = "qcom,sc7280-camcc";
reg = <0 0x0ad00000 0 0x10000>;
@@ -4447,8 +4670,8 @@
reg = <0 0x0af00000 0 0x20000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_DISP_GPLL0_CLK_SRC>,
- <&mdss_dsi_phy 0>,
- <&mdss_dsi_phy 1>,
+ <&mdss_dsi_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi_phy DSI_PIXEL_PLL_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>,
<&mdss_edp_phy 0>,
@@ -4502,7 +4725,7 @@
mdss_mdp: display-controller@ae01000 {
compatible = "qcom,sc7280-dpu";
reg = <0 0x0ae01000 0 0x8f030>,
- <0 0x0aeb0000 0 0x2008>;
+ <0 0x0aeb0000 0 0x3000>;
reg-names = "mdp", "vbif";
clocks = <&gcc GCC_DISP_HF_AXI_CLK>,
@@ -4605,8 +4828,10 @@
"iface",
"bus";
- assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi_phy 0>, <&mdss_dsi_phy 1>;
+ assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd SC7280_CX>;
@@ -4820,7 +5045,9 @@
port@1 {
reg = <1>;
- mdss_dp_out: endpoint { };
+ mdss_dp_out: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_dp_in>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
index 28693a3bfc7f..b84e47a461a0 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
@@ -5,6 +5,7 @@
*/
#include <dt-bindings/clock/qcom,dispcc-sm8250.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-sc8180x.h>
#include <dt-bindings/clock/qcom,gpucc-sm8150.h>
#include <dt-bindings/clock/qcom,rpmh.h>
@@ -2976,7 +2977,7 @@
mdss_mdp: mdp@ae01000 {
compatible = "qcom,sc8180x-dpu";
reg = <0 0x0ae01000 0 0x8f000>,
- <0 0x0aeb0000 0 0x2008>;
+ <0 0x0aeb0000 0 0x3000>;
reg-names = "mdp", "vbif";
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
@@ -3465,10 +3466,10 @@
compatible = "qcom,sc8180x-dispcc";
reg = <0 0x0af00000 0 0x20000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
<&usb_prim_qmpphy QMP_USB43DP_DP_LINK_CLK>,
<&usb_prim_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>,
<&edp_phy 0>,
@@ -3524,7 +3525,7 @@
#thermal-sensor-cells = <1>;
};
- aoss_qmp: power-controller@c300000 {
+ aoss_qmp: power-management@c300000 {
compatible = "qcom,sc8180x-aoss-qmp", "qcom,aoss-qmp";
reg = <0x0 0x0c300000 0x0 0x400>;
interrupts = <GIC_SPI 389 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
index 75adaa19d1c3..8e2c02497c05 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
@@ -37,6 +37,20 @@
stdout-path = "serial0:115200n8";
};
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&kypd_vol_up_n>;
+ pinctrl-names = "default";
+
+ key-vol-up {
+ label = "volume_up";
+ gpios = <&pmc8280_1_gpios 6 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ wakeup-source;
+ };
+ };
+
pmic-glink {
compatible = "qcom,sc8280xp-pmic-glink", "qcom,pmic-glink";
@@ -686,7 +700,7 @@
vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>;
- qcom,ath11k-calibration-variant = "QC_8280XP_CRD";
+ qcom,calibration-variant = "QC_8280XP_CRD";
};
};
@@ -737,6 +751,12 @@
status = "okay";
};
+&remoteproc_slpi {
+ firmware-name = "qcom/sc8280xp/qcslpi8280.mbn";
+
+ status = "okay";
+};
+
&sdc2 {
pinctrl-0 = <&sdc2_default_state>;
pinctrl-1 = <&sdc2_sleep_state>;
@@ -885,6 +905,14 @@
function = "normal";
};
+ kypd_vol_up_n: kypd-vol-up-n-state {
+ pins = "gpio6";
+ function = "normal";
+ power-source = <0>; /* 3.3 V */
+ bias-pull-up;
+ input-enable;
+ };
+
misc_3p3_reg_en: misc-3p3-reg-en-state {
pins = "gpio2";
function = "normal";
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-el2.dtso b/arch/arm64/boot/dts/qcom/sc8280xp-el2.dtso
new file mode 100644
index 000000000000..25d1fa4bc205
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-el2.dtso
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+/*
+ * sc8280xp specific modifications required to boot in EL2.
+ */
+
+/dts-v1/;
+/plugin/;
+
+/* We can't and don't need to use zap shader in EL2 as linux can zap the gpu on it's own. */
+&gpu {
+ zap-shader {
+ status = "disabled";
+ };
+};
+
+/*
+ * When running under QHEE, this IOMMU is controlled by the firmware,
+ * however when we take ownership of it in EL2, we need to configure
+ * it properly to use PCIe.
+ */
+&pcie2a {
+ iommu-map = <0 &pcie_smmu 0x20000 0x10000>;
+};
+
+&pcie2b {
+ iommu-map = <0 &pcie_smmu 0x30000 0x10000>;
+};
+
+&pcie3a {
+ iommu-map = <0 &pcie_smmu 0x40000 0x10000>;
+};
+
+&pcie3b {
+ iommu-map = <0 &pcie_smmu 0x50000 0x10000>;
+};
+
+&pcie4 {
+ iommu-map = <0 &pcie_smmu 0x60000 0x10000>;
+};
+
+&pcie_smmu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
index f3190f408f4b..ae7a275fd223 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
@@ -998,7 +998,7 @@
vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>;
- qcom,ath11k-calibration-variant = "LE_X13S";
+ qcom,calibration-variant = "LE_X13S";
};
};
@@ -1090,20 +1090,9 @@
};
&pmk8280_rtc {
- nvmem-cells = <&rtc_offset>;
- nvmem-cell-names = "offset";
-
status = "okay";
};
-&pmk8280_sdam_6 {
- status = "okay";
-
- rtc_offset: rtc-offset@bc {
- reg = <0xbc 0x4>;
- };
-};
-
&pmk8280_vadc {
channel@144 {
reg = <PM8350_ADC7_AMUX_THM1_100K_PU(1)>;
@@ -1186,6 +1175,12 @@
status = "okay";
};
+&remoteproc_slpi {
+ firmware-name = "qcom/sc8280xp/LENOVO/21BX/qcslpi8280.mbn";
+
+ status = "okay";
+};
+
&rxmacro {
status = "okay";
};
@@ -1202,9 +1197,6 @@
"VA DMIC0", "MIC BIAS1",
"VA DMIC1", "MIC BIAS1",
"VA DMIC2", "MIC BIAS3",
- "VA DMIC0", "VA MIC BIAS1",
- "VA DMIC1", "VA MIC BIAS1",
- "VA DMIC2", "VA MIC BIAS3",
"TX SWR_ADC1", "ADC2_OUTPUT";
wcd-playback-dai-link {
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-microsoft-arcata.dts b/arch/arm64/boot/dts/qcom/sc8280xp-microsoft-arcata.dts
index ae5daeac8fe2..d00889fa6f0b 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-microsoft-arcata.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-microsoft-arcata.dts
@@ -536,7 +536,7 @@
compatible = "pci17cb,1103";
reg = <0x10000 0x0 0x0 0x0 0x0>;
- qcom,ath11k-calibration-variant = "MS_SP9_5G";
+ qcom,calibration-variant = "MS_SP9_5G";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-microsoft-blackrock.dts b/arch/arm64/boot/dts/qcom/sc8280xp-microsoft-blackrock.dts
index fa9d94105052..812251324002 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-microsoft-blackrock.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-microsoft-blackrock.dts
@@ -670,7 +670,7 @@
vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>;
- qcom,ath11k-calibration-variant = "MS_Volterra";
+ qcom,calibration-variant = "MS_Volterra";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
index 1e3babf2e40d..307df1d3dcd2 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
@@ -32,6 +32,26 @@
};
};
+ pmc8280c_thermal: pmc8280c-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pmc8280c_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
pm8280_2_thermal: pm8280-2-thermal {
polling-delay-passive = <100>;
@@ -51,6 +71,26 @@
};
};
};
+
+ pmr735a_thermal: pmr735a-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pmr735a_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
};
};
@@ -181,6 +221,13 @@
#address-cells = <1>;
#size-cells = <0>;
+ pmc8280c_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts-extended = <&spmi_bus 0x2 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
pmc8280c_gpios: gpio@8800 {
compatible = "qcom,pm8350c-gpio", "qcom,spmi-gpio";
reg = <0x8800>;
@@ -212,7 +259,7 @@
pm8280_2_temp_alarm: temp-alarm@a00 {
compatible = "qcom,spmi-temp-alarm";
reg = <0xa00>;
- interrupts-extended = <&spmi_bus 0x2 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ interrupts-extended = <&spmi_bus 0x3 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
io-channels = <&pmk8280_vadc PM8350_ADC7_DIE_TEMP(3)>;
io-channel-names = "thermal";
#thermal-sensor-cells = <0>;
@@ -235,6 +282,15 @@
#address-cells = <1>;
#size-cells = <0>;
+ pmr735a_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts-extended = <&spmi_bus 0x4 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ io-channels = <&pmk8280_vadc PMR735A_ADC7_DIE_TEMP>;
+ io-channel-names = "thermal";
+ #thermal-sensor-cells = <0>;
+ };
+
pmr735a_gpios: gpio@8800 {
compatible = "qcom,pmr735a-gpio", "qcom,spmi-gpio";
reg = <0x8800>;
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
index 01501acb1790..87555a119d94 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
@@ -695,6 +695,11 @@
no-map;
};
+ pil_slpi_mem: slpi-region@88c00000 {
+ reg = <0 0x88c00000 0 0x1500000>;
+ no-map;
+ };
+
pil_nsp0_mem: cdsp0-region@8a100000 {
reg = <0 0x8a100000 0 0x1e00000>;
no-map;
@@ -783,6 +788,30 @@
};
};
+ smp2p-slpi {
+ compatible = "qcom,smp2p";
+ qcom,smem = <481>, <430>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_SLPI
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_SLPI
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <3>;
+
+ smp2p_slpi_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_slpi_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
soc: soc@0 {
compatible = "simple-bus";
#address-cells = <2>;
@@ -1221,7 +1250,7 @@
reg = <0 0x00980000 0 0x4000>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&gcc GCC_QUPV3_WRAP2_S0_CLK>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
clock-names = "se";
interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&rpmhpd SC8280XP_CX>;
@@ -1253,7 +1282,7 @@
reg = <0 0x00984000 0 0x4000>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&gcc GCC_QUPV3_WRAP2_S1_CLK>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
clock-names = "se";
interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&rpmhpd SC8280XP_CX>;
@@ -1285,7 +1314,7 @@
reg = <0 0x00988000 0 0x4000>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&gcc GCC_QUPV3_WRAP2_S2_CLK>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
clock-names = "se";
interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&rpmhpd SC8280XP_CX>;
@@ -1331,7 +1360,7 @@
reg = <0 0x0098c000 0 0x4000>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&gcc GCC_QUPV3_WRAP2_S3_CLK>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
clock-names = "se";
interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&rpmhpd SC8280XP_CX>;
@@ -1363,7 +1392,7 @@
reg = <0 0x00990000 0 0x4000>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&gcc GCC_QUPV3_WRAP2_S4_CLK>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
clock-names = "se";
interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&rpmhpd SC8280XP_CX>;
@@ -1395,7 +1424,7 @@
reg = <0 0x00994000 0 0x4000>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
clock-names = "se";
interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&rpmhpd SC8280XP_CX>;
@@ -1427,7 +1456,7 @@
reg = <0 0x00998000 0 0x4000>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&gcc GCC_QUPV3_WRAP2_S6_CLK>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
clock-names = "se";
interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&rpmhpd SC8280XP_CX>;
@@ -1459,7 +1488,7 @@
reg = <0 0x0099c000 0 0x4000>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&gcc GCC_QUPV3_WRAP2_S7_CLK>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
clock-names = "se";
interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&rpmhpd SC8280XP_CX>;
@@ -2454,291 +2483,74 @@
reg = <0x0 0x01fc0000 0x0 0x30000>;
};
- gpu: gpu@3d00000 {
- compatible = "qcom,adreno-690.0", "qcom,adreno";
+ remoteproc_slpi: remoteproc@2400000 {
+ compatible = "qcom,sc8280xp-slpi-pas", "qcom,sm8350-slpi-pas";
+ reg = <0 0x02400000 0 0x10000>;
- reg = <0 0x03d00000 0 0x40000>,
- <0 0x03d9e000 0 0x1000>,
- <0 0x03d61000 0 0x800>;
- reg-names = "kgsl_3d0_reg_memory",
- "cx_mem",
- "cx_dbgc";
- interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
- iommus = <&gpu_smmu 0 0xc00>, <&gpu_smmu 1 0xc00>;
- operating-points-v2 = <&gpu_opp_table>;
-
- qcom,gmu = <&gmu>;
- interconnects = <&gem_noc MASTER_GFX3D 0 &mc_virt SLAVE_EBI1 0>;
- interconnect-names = "gfx-mem";
- #cooling-cells = <2>;
-
- status = "disabled";
-
- gpu_opp_table: opp-table {
- compatible = "operating-points-v2";
+ interrupts-extended = <&pdc 9 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_slpi_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_slpi_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_slpi_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_slpi_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack";
- opp-270000000 {
- opp-hz = /bits/ 64 <270000000>;
- opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
- opp-peak-kBps = <451000>;
- };
-
- opp-410000000 {
- opp-hz = /bits/ 64 <410000000>;
- opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
- opp-peak-kBps = <1555000>;
- };
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
- opp-peak-kBps = <1555000>;
- };
-
- opp-547000000 {
- opp-hz = /bits/ 64 <547000000>;
- opp-level = <RPMH_REGULATOR_LEVEL_SVS_L2>;
- opp-peak-kBps = <1555000>;
- };
-
- opp-606000000 {
- opp-hz = /bits/ 64 <606000000>;
- opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
- opp-peak-kBps = <2736000>;
- };
-
- opp-640000000 {
- opp-hz = /bits/ 64 <640000000>;
- opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
- opp-peak-kBps = <2736000>;
- };
-
- opp-655000000 {
- opp-hz = /bits/ 64 <655000000>;
- opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
- opp-peak-kBps = <2736000>;
- };
-
- opp-690000000 {
- opp-hz = /bits/ 64 <690000000>;
- opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
- opp-peak-kBps = <2736000>;
- };
- };
- };
-
- gmu: gmu@3d6a000 {
- compatible = "qcom,adreno-gmu-690.0", "qcom,adreno-gmu";
- reg = <0 0x03d6a000 0 0x34000>,
- <0 0x03de0000 0 0x10000>,
- <0 0x0b290000 0 0x10000>;
- reg-names = "gmu", "rscc", "gmu_pdc";
- interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "hfi", "gmu";
- clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
- <&gpucc GPU_CC_CXO_CLK>,
- <&gcc GCC_DDRSS_GPU_AXI_CLK>,
- <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
- <&gpucc GPU_CC_AHB_CLK>,
- <&gpucc GPU_CC_HUB_CX_INT_CLK>,
- <&gpucc GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK>;
- clock-names = "gmu",
- "cxo",
- "axi",
- "memnoc",
- "ahb",
- "hub",
- "smmu_vote";
- power-domains = <&gpucc GPU_CC_CX_GDSC>,
- <&gpucc GPU_CC_GX_GDSC>;
- power-domain-names = "cx",
- "gx";
- iommus = <&gpu_smmu 5 0xc00>;
- operating-points-v2 = <&gmu_opp_table>;
-
- gmu_opp_table: opp-table {
- compatible = "operating-points-v2";
-
- opp-200000000 {
- opp-hz = /bits/ 64 <200000000>;
- opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
- };
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
- };
- };
- };
-
- gpucc: clock-controller@3d90000 {
- compatible = "qcom,sc8280xp-gpucc";
- reg = <0 0x03d90000 0 0x9000>;
- clocks = <&rpmhcc RPMH_CXO_CLK>,
- <&gcc GCC_GPU_GPLL0_CLK_SRC>,
- <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
- clock-names = "bi_tcxo",
- "gcc_gpu_gpll0_clk_src",
- "gcc_gpu_gpll0_div_clk_src";
-
- power-domains = <&rpmhpd SC8280XP_GFX>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- #power-domain-cells = <1>;
- };
-
- gpu_smmu: iommu@3da0000 {
- compatible = "qcom,sc8280xp-smmu-500", "qcom,adreno-smmu",
- "qcom,smmu-500", "arm,mmu-500";
- reg = <0 0x03da0000 0 0x20000>;
- #iommu-cells = <2>;
- #global-interrupts = <2>;
- interrupts = <GIC_SPI 672 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 678 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 679 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 680 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 681 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 682 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 683 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 684 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 686 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 687 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 688 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 689 IRQ_TYPE_LEVEL_HIGH>;
-
- clocks = <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
- <&gcc GCC_GPU_SNOC_DVM_GFX_CLK>,
- <&gpucc GPU_CC_AHB_CLK>,
- <&gpucc GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK>,
- <&gpucc GPU_CC_CX_GMU_CLK>,
- <&gpucc GPU_CC_HUB_CX_INT_CLK>,
- <&gpucc GPU_CC_HUB_AON_CLK>;
- clock-names = "gcc_gpu_memnoc_gfx_clk",
- "gcc_gpu_snoc_dvm_gfx_clk",
- "gpu_cc_ahb_clk",
- "gpu_cc_hlos1_vote_gpu_smmu_clk",
- "gpu_cc_cx_gmu_clk",
- "gpu_cc_hub_cx_int_clk",
- "gpu_cc_hub_aon_clk";
-
- power-domains = <&gpucc GPU_CC_CX_GDSC>;
- dma-coherent;
- };
-
- usb_0_hsphy: phy@88e5000 {
- compatible = "qcom,sc8280xp-usb-hs-phy",
- "qcom,usb-snps-hs-5nm-phy";
- reg = <0 0x088e5000 0 0x400>;
clocks = <&rpmhcc RPMH_CXO_CLK>;
- clock-names = "ref";
- resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
-
- #phy-cells = <0>;
-
- status = "disabled";
- };
-
- usb_2_hsphy0: phy@88e7000 {
- compatible = "qcom,sc8280xp-usb-hs-phy",
- "qcom,usb-snps-hs-5nm-phy";
- reg = <0 0x088e7000 0 0x400>;
- clocks = <&gcc GCC_USB2_HS0_CLKREF_CLK>;
- clock-names = "ref";
- resets = <&gcc GCC_QUSB2PHY_HS0_MP_BCR>;
-
- #phy-cells = <0>;
-
- status = "disabled";
- };
-
- usb_2_hsphy1: phy@88e8000 {
- compatible = "qcom,sc8280xp-usb-hs-phy",
- "qcom,usb-snps-hs-5nm-phy";
- reg = <0 0x088e8000 0 0x400>;
- clocks = <&gcc GCC_USB2_HS1_CLKREF_CLK>;
- clock-names = "ref";
- resets = <&gcc GCC_QUSB2PHY_HS1_MP_BCR>;
-
- #phy-cells = <0>;
-
- status = "disabled";
- };
-
- usb_2_hsphy2: phy@88e9000 {
- compatible = "qcom,sc8280xp-usb-hs-phy",
- "qcom,usb-snps-hs-5nm-phy";
- reg = <0 0x088e9000 0 0x400>;
- clocks = <&gcc GCC_USB2_HS2_CLKREF_CLK>;
- clock-names = "ref";
- resets = <&gcc GCC_QUSB2PHY_HS2_MP_BCR>;
-
- #phy-cells = <0>;
-
- status = "disabled";
- };
-
- usb_2_hsphy3: phy@88ea000 {
- compatible = "qcom,sc8280xp-usb-hs-phy",
- "qcom,usb-snps-hs-5nm-phy";
- reg = <0 0x088ea000 0 0x400>;
- clocks = <&gcc GCC_USB2_HS3_CLKREF_CLK>;
- clock-names = "ref";
- resets = <&gcc GCC_QUSB2PHY_HS3_MP_BCR>;
-
- #phy-cells = <0>;
-
- status = "disabled";
- };
-
- usb_2_qmpphy0: phy@88ef000 {
- compatible = "qcom,sc8280xp-qmp-usb3-uni-phy";
- reg = <0 0x088ef000 0 0x2000>;
-
- clocks = <&gcc GCC_USB3_MP_PHY_AUX_CLK>,
- <&gcc GCC_USB3_MP0_CLKREF_CLK>,
- <&gcc GCC_USB3_MP_PHY_COM_AUX_CLK>,
- <&gcc GCC_USB3_MP_PHY_PIPE_0_CLK>;
- clock-names = "aux", "ref", "com_aux", "pipe";
+ clock-names = "xo";
- resets = <&gcc GCC_USB3_UNIPHY_MP0_BCR>,
- <&gcc GCC_USB3UNIPHY_PHY_MP0_BCR>;
- reset-names = "phy", "phy_phy";
+ power-domains = <&rpmhpd SC8280XP_LCX>,
+ <&rpmhpd SC8280XP_LMX>;
+ power-domain-names = "lcx", "lmx";
- power-domains = <&gcc USB30_MP_GDSC>;
+ memory-region = <&pil_slpi_mem>;
- #clock-cells = <0>;
- clock-output-names = "usb2_phy0_pipe_clk";
+ qcom,qmp = <&aoss_qmp>;
- #phy-cells = <0>;
+ qcom,smem-states = <&smp2p_slpi_out 0>;
+ qcom,smem-state-names = "stop";
status = "disabled";
- };
-
- usb_2_qmpphy1: phy@88f1000 {
- compatible = "qcom,sc8280xp-qmp-usb3-uni-phy";
- reg = <0 0x088f1000 0 0x2000>;
- clocks = <&gcc GCC_USB3_MP_PHY_AUX_CLK>,
- <&gcc GCC_USB3_MP1_CLKREF_CLK>,
- <&gcc GCC_USB3_MP_PHY_COM_AUX_CLK>,
- <&gcc GCC_USB3_MP_PHY_PIPE_1_CLK>;
- clock-names = "aux", "ref", "com_aux", "pipe";
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_SLPI
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_SLPI
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
- resets = <&gcc GCC_USB3_UNIPHY_MP1_BCR>,
- <&gcc GCC_USB3UNIPHY_PHY_MP1_BCR>;
- reset-names = "phy", "phy_phy";
+ label = "slpi";
+ qcom,remote-pid = <3>;
- power-domains = <&gcc USB30_MP_GDSC>;
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "sdsp";
+ qcom,non-secure-domain;
+ #address-cells = <1>;
+ #size-cells = <0>;
- #clock-cells = <0>;
- clock-output-names = "usb2_phy1_pipe_clk";
+ compute-cb@1 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <1>;
+ iommus = <&apps_smmu 0x0521 0x0>;
+ };
- #phy-cells = <0>;
+ compute-cb@2 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <2>;
+ iommus = <&apps_smmu 0x0522 0x0>;
+ };
- status = "disabled";
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x0523 0x0>;
+ };
+ };
+ };
};
remoteproc_adsp: remoteproc@3000000 {
@@ -3166,6 +2978,180 @@
#reset-cells = <1>;
};
+ gpu: gpu@3d00000 {
+ compatible = "qcom,adreno-690.0", "qcom,adreno";
+
+ reg = <0 0x03d00000 0 0x40000>,
+ <0 0x03d9e000 0 0x1000>,
+ <0 0x03d61000 0 0x800>;
+ reg-names = "kgsl_3d0_reg_memory",
+ "cx_mem",
+ "cx_dbgc";
+ interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&gpu_smmu 0 0xc00>, <&gpu_smmu 1 0xc00>;
+ operating-points-v2 = <&gpu_opp_table>;
+
+ qcom,gmu = <&gmu>;
+ interconnects = <&gem_noc MASTER_GFX3D 0 &mc_virt SLAVE_EBI1 0>;
+ interconnect-names = "gfx-mem";
+ #cooling-cells = <2>;
+
+ status = "disabled";
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-270000000 {
+ opp-hz = /bits/ 64 <270000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ opp-peak-kBps = <451000>;
+ };
+
+ opp-410000000 {
+ opp-hz = /bits/ 64 <410000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ opp-peak-kBps = <1555000>;
+ };
+
+ opp-500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ opp-peak-kBps = <1555000>;
+ };
+
+ opp-547000000 {
+ opp-hz = /bits/ 64 <547000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L2>;
+ opp-peak-kBps = <1555000>;
+ };
+
+ opp-606000000 {
+ opp-hz = /bits/ 64 <606000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ opp-peak-kBps = <2736000>;
+ };
+
+ opp-640000000 {
+ opp-hz = /bits/ 64 <640000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ opp-peak-kBps = <2736000>;
+ };
+
+ opp-655000000 {
+ opp-hz = /bits/ 64 <655000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ opp-peak-kBps = <2736000>;
+ };
+
+ opp-690000000 {
+ opp-hz = /bits/ 64 <690000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ opp-peak-kBps = <2736000>;
+ };
+ };
+ };
+
+ gmu: gmu@3d6a000 {
+ compatible = "qcom,adreno-gmu-690.0", "qcom,adreno-gmu";
+ reg = <0 0x03d6a000 0 0x34000>,
+ <0 0x03de0000 0 0x10000>,
+ <0 0x0b290000 0 0x10000>;
+ reg-names = "gmu", "rscc", "gmu_pdc";
+ interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hfi", "gmu";
+ clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
+ <&gpucc GPU_CC_CXO_CLK>,
+ <&gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&gpucc GPU_CC_AHB_CLK>,
+ <&gpucc GPU_CC_HUB_CX_INT_CLK>,
+ <&gpucc GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK>;
+ clock-names = "gmu",
+ "cxo",
+ "axi",
+ "memnoc",
+ "ahb",
+ "hub",
+ "smmu_vote";
+ power-domains = <&gpucc GPU_CC_CX_GDSC>,
+ <&gpucc GPU_CC_GX_GDSC>;
+ power-domain-names = "cx",
+ "gx";
+ iommus = <&gpu_smmu 5 0xc00>;
+ operating-points-v2 = <&gmu_opp_table>;
+
+ gmu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+
+ opp-500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+ };
+ };
+
+ gpucc: clock-controller@3d90000 {
+ compatible = "qcom,sc8280xp-gpucc";
+ reg = <0 0x03d90000 0 0x9000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_GPU_GPLL0_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
+ clock-names = "bi_tcxo",
+ "gcc_gpu_gpll0_clk_src",
+ "gcc_gpu_gpll0_div_clk_src";
+
+ power-domains = <&rpmhpd SC8280XP_GFX>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ gpu_smmu: iommu@3da0000 {
+ compatible = "qcom,sc8280xp-smmu-500", "qcom,adreno-smmu",
+ "qcom,smmu-500", "arm,mmu-500";
+ reg = <0 0x03da0000 0 0x20000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <2>;
+ interrupts = <GIC_SPI 672 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 678 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 679 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 680 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 681 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 682 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 683 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 684 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 686 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 687 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 688 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 689 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&gcc GCC_GPU_SNOC_DVM_GFX_CLK>,
+ <&gpucc GPU_CC_AHB_CLK>,
+ <&gpucc GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK>,
+ <&gpucc GPU_CC_CX_GMU_CLK>,
+ <&gpucc GPU_CC_HUB_CX_INT_CLK>,
+ <&gpucc GPU_CC_HUB_AON_CLK>;
+ clock-names = "gcc_gpu_memnoc_gfx_clk",
+ "gcc_gpu_snoc_dvm_gfx_clk",
+ "gpu_cc_ahb_clk",
+ "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ "gpu_cc_cx_gmu_clk",
+ "gpu_cc_hub_cx_int_clk",
+ "gpu_cc_hub_aon_clk";
+
+ power-domains = <&gpucc GPU_CC_CX_GDSC>;
+ dma-coherent;
+ };
+
sdc2: mmc@8804000 {
compatible = "qcom,sc8280xp-sdhci", "qcom,sdhci-msm-v5";
reg = <0 0x08804000 0 0x1000>;
@@ -3209,6 +3195,71 @@
};
};
+ usb_0_hsphy: phy@88e5000 {
+ compatible = "qcom,sc8280xp-usb-hs-phy",
+ "qcom,usb-snps-hs-5nm-phy";
+ reg = <0 0x088e5000 0 0x400>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "ref";
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb_2_hsphy0: phy@88e7000 {
+ compatible = "qcom,sc8280xp-usb-hs-phy",
+ "qcom,usb-snps-hs-5nm-phy";
+ reg = <0 0x088e7000 0 0x400>;
+ clocks = <&gcc GCC_USB2_HS0_CLKREF_CLK>;
+ clock-names = "ref";
+ resets = <&gcc GCC_QUSB2PHY_HS0_MP_BCR>;
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb_2_hsphy1: phy@88e8000 {
+ compatible = "qcom,sc8280xp-usb-hs-phy",
+ "qcom,usb-snps-hs-5nm-phy";
+ reg = <0 0x088e8000 0 0x400>;
+ clocks = <&gcc GCC_USB2_HS1_CLKREF_CLK>;
+ clock-names = "ref";
+ resets = <&gcc GCC_QUSB2PHY_HS1_MP_BCR>;
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb_2_hsphy2: phy@88e9000 {
+ compatible = "qcom,sc8280xp-usb-hs-phy",
+ "qcom,usb-snps-hs-5nm-phy";
+ reg = <0 0x088e9000 0 0x400>;
+ clocks = <&gcc GCC_USB2_HS2_CLKREF_CLK>;
+ clock-names = "ref";
+ resets = <&gcc GCC_QUSB2PHY_HS2_MP_BCR>;
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb_2_hsphy3: phy@88ea000 {
+ compatible = "qcom,sc8280xp-usb-hs-phy",
+ "qcom,usb-snps-hs-5nm-phy";
+ reg = <0 0x088ea000 0 0x400>;
+ clocks = <&gcc GCC_USB2_HS3_CLKREF_CLK>;
+ clock-names = "ref";
+ resets = <&gcc GCC_QUSB2PHY_HS3_MP_BCR>;
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
usb_0_qmpphy: phy@88eb000 {
compatible = "qcom,sc8280xp-qmp-usb43dp-phy";
reg = <0 0x088eb000 0 0x4000>;
@@ -3256,6 +3307,54 @@
};
};
+ usb_2_qmpphy0: phy@88ef000 {
+ compatible = "qcom,sc8280xp-qmp-usb3-uni-phy";
+ reg = <0 0x088ef000 0 0x2000>;
+
+ clocks = <&gcc GCC_USB3_MP_PHY_AUX_CLK>,
+ <&gcc GCC_USB3_MP0_CLKREF_CLK>,
+ <&gcc GCC_USB3_MP_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_MP_PHY_PIPE_0_CLK>;
+ clock-names = "aux", "ref", "com_aux", "pipe";
+
+ resets = <&gcc GCC_USB3_UNIPHY_MP0_BCR>,
+ <&gcc GCC_USB3UNIPHY_PHY_MP0_BCR>;
+ reset-names = "phy", "phy_phy";
+
+ power-domains = <&gcc USB30_MP_GDSC>;
+
+ #clock-cells = <0>;
+ clock-output-names = "usb2_phy0_pipe_clk";
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb_2_qmpphy1: phy@88f1000 {
+ compatible = "qcom,sc8280xp-qmp-usb3-uni-phy";
+ reg = <0 0x088f1000 0 0x2000>;
+
+ clocks = <&gcc GCC_USB3_MP_PHY_AUX_CLK>,
+ <&gcc GCC_USB3_MP1_CLKREF_CLK>,
+ <&gcc GCC_USB3_MP_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_MP_PHY_PIPE_1_CLK>;
+ clock-names = "aux", "ref", "com_aux", "pipe";
+
+ resets = <&gcc GCC_USB3_UNIPHY_MP1_BCR>,
+ <&gcc GCC_USB3UNIPHY_PHY_MP1_BCR>;
+ reset-names = "phy", "phy_phy";
+
+ power-domains = <&gcc USB30_MP_GDSC>;
+
+ #clock-cells = <0>;
+ clock-output-names = "usb2_phy1_pipe_clk";
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
usb_1_hsphy: phy@8902000 {
compatible = "qcom,sc8280xp-usb-hs-phy",
"qcom,usb-snps-hs-5nm-phy";
@@ -4141,7 +4240,7 @@
mdss0_mdp: display-controller@ae01000 {
compatible = "qcom,sc8280xp-dpu";
reg = <0 0x0ae01000 0 0x8f000>,
- <0 0x0aeb0000 0 0x2008>;
+ <0 0x0aeb0000 0 0x3000>;
reg-names = "mdp", "vbif";
clocks = <&gcc GCC_DISP_HF_AXI_CLK>,
@@ -4927,6 +5026,20 @@
};
};
+ pcie_smmu: iommu@14f80000 {
+ compatible = "arm,smmu-v3";
+ reg = <0 0x14f80000 0 0x80000>;
+ #iommu-cells = <1>;
+ interrupts = <GIC_SPI 951 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 955 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 953 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eventq",
+ "gerror",
+ "cmdq-sync";
+ dma-coherent;
+ status = "reserved"; /* Controlled by QHEE. */
+ };
+
apps_smmu: iommu@15000000 {
compatible = "qcom,sc8280xp-smmu-500", "arm,mmu-500";
reg = <0 0x15000000 0 0x100000>;
@@ -5459,7 +5572,7 @@
mdss1_mdp: display-controller@22001000 {
compatible = "qcom,sc8280xp-dpu";
reg = <0 0x22001000 0 0x8f000>,
- <0 0x220b0000 0 0x2008>;
+ <0 0x220b0000 0 0x3000>;
reg-names = "mdp", "vbif";
clocks = <&gcc GCC_DISP_HF_AXI_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts b/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts
index d402f4c85b11..74cb29cb7f1a 100644
--- a/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts
+++ b/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts
@@ -175,6 +175,7 @@
* BAM DMA interconnects support is in place.
*/
/delete-property/ clocks;
+ /delete-property/ clock-names;
};
&blsp1_uart2 {
@@ -187,6 +188,7 @@
* BAM DMA interconnects support is in place.
*/
/delete-property/ clocks;
+ /delete-property/ clock-names;
};
&blsp2_uart1 {
@@ -507,7 +509,7 @@
vdd-3.3-ch0-supply = <&vreg_l19a_3p3>;
vdd-3.3-ch1-supply = <&vreg_l8b_3p3>;
- qcom,ath10k-calibration-variant = "Inforce_IFC6560";
+ qcom,calibration-variant = "Inforce_IFC6560";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm630-sony-xperia-nile.dtsi b/arch/arm64/boot/dts/qcom/sdm630-sony-xperia-nile.dtsi
index a4b722e0fc1e..40522e237eac 100644
--- a/arch/arm64/boot/dts/qcom/sdm630-sony-xperia-nile.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm630-sony-xperia-nile.dtsi
@@ -157,7 +157,7 @@
};
&adsp_pil {
- firmware-name = "qcom/sdm630/Sony/nile/adsp.mdt";
+ firmware-name = "qcom/sdm630/Sony/nile/adsp.mbn";
};
&blsp_i2c1 {
diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
index a2c079bac1a7..8b1a45a4e56e 100644
--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
@@ -4,6 +4,7 @@
* Copyright (c) 2020, AngeloGioacchino Del Regno <kholk11@gmail.com>
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-sdm660.h>
#include <dt-bindings/clock/qcom,gpucc-sdm660.h>
#include <dt-bindings/clock/qcom,mmcc-sdm660.h>
@@ -508,6 +509,12 @@
reg = <0x0 0xfed00000 0x0 0xa00000>;
no-map;
};
+
+ mdata_mem: mpss-metadata {
+ alloc-ranges = <0x0 0xa0000000 0x0 0x20000000>;
+ size = <0x0 0x4000>;
+ no-map;
+ };
};
smem: smem {
@@ -1055,7 +1062,7 @@
<&rpmpd SDM660_VDDMX>;
power-domain-names = "cx", "mx";
- memory-region = <&mba_region>, <&mpss_region>;
+ memory-region = <&mba_region>, <&mpss_region>, <&mdata_mem>;
status = "disabled";
@@ -1379,6 +1386,7 @@
<&xo_board>;
clock-names = "iface", "core", "xo";
+ resets = <&gcc GCC_SDCC2_BCR>;
interconnects = <&a2noc 3 &a2noc 10>,
<&gnoc 0 &cnoc 28>;
@@ -1433,6 +1441,8 @@
<&gcc GCC_SDCC1_ICE_CORE_CLK>;
clock-names = "iface", "core", "xo", "ice";
+ resets = <&gcc GCC_SDCC1_BCR>;
+
interconnects = <&a2noc 2 &a2noc 10>,
<&gnoc 0 &cnoc 27>;
interconnect-names = "sdhc-ddr", "cpu-sdhc";
@@ -1538,8 +1548,8 @@
<&sleep_clk>,
<&gcc GCC_MMSS_GPLL0_CLK>,
<&gcc GCC_MMSS_GPLL0_DIV_CLK>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi0_phy 0>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
<0>,
<0>,
<0>,
@@ -1661,8 +1671,8 @@
assigned-clocks = <&mmcc BYTE0_CLK_SRC>,
<&mmcc PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
clocks = <&mmcc MDSS_MDP_CLK>,
<&mmcc MDSS_BYTE0_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts b/arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts
index 2c1172aa97e4..31ed26c31e6e 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts
@@ -45,10 +45,11 @@
};
&hsusb_phy {
- status = "okay";
vdd-supply = <&pm8953_l3>;
vdda-pll-supply = <&pm8953_l7>;
vdda-phy-dpdm-supply = <&pm8953_l13>;
+
+ status = "okay";
};
&i2c_3 {
@@ -81,12 +82,22 @@
};
&lpass {
+ firmware-name = "qcom/msm8953/fairphone/fp3/adsp.mbn";
+
status = "okay";
};
-&pm8953_resin {
+&mpss {
+ firmware-name = "qcom/msm8953/fairphone/fp3/mba.mbn",
+ "qcom/msm8953/fairphone/fp3/modem.mbn";
+ pll-supply = <&pm8953_l7>;
+
status = "okay";
+};
+
+&pm8953_resin {
linux,code = <KEY_VOLUMEDOWN>;
+ status = "okay";
};
&pmi632_lpg {
@@ -148,17 +159,19 @@
};
&sdhc_1 {
- status = "okay";
vmmc-supply = <&pm8953_l8>;
vqmmc-supply = <&pm8953_l5>;
+
+ status = "okay";
};
&sdhc_2 {
- status = "okay";
vmmc-supply = <&pm8953_l11>;
vqmmc-supply = <&pm8953_l12>;
cd-gpios = <&tlmm 133 GPIO_ACTIVE_LOW>;
+
+ status = "okay";
};
&rpm_requests {
@@ -175,10 +188,12 @@
regulator-min-microvolt = <984000>;
regulator-max-microvolt = <1240000>;
};
+
pm8953_s4: s4 {
regulator-min-microvolt = <1036000>;
regulator-max-microvolt = <2040000>;
};
+
pm8953_s5: s5 {
regulator-min-microvolt = <1036000>;
regulator-max-microvolt = <2040000>;
@@ -188,66 +203,82 @@
regulator-min-microvolt = <975000>;
regulator-max-microvolt = <1050000>;
};
+
pm8953_l2: l2 {
regulator-min-microvolt = <975000>;
regulator-max-microvolt = <1175000>;
};
+
pm8953_l3: l3 {
regulator-min-microvolt = <925000>;
regulator-max-microvolt = <925000>;
};
+
pm8953_l5: l5 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
+
pm8953_l6: l6 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
+
pm8953_l7: l7 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1900000>;
};
+
pm8953_l8: l8 {
regulator-min-microvolt = <2900000>;
regulator-max-microvolt = <2900000>;
};
+
pm8953_l9: l9 {
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3300000>;
};
+
pm8953_l10: l10 {
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <3000000>;
};
+
pm8953_l11: l11 {
regulator-min-microvolt = <2950000>;
regulator-max-microvolt = <2950000>;
};
+
pm8953_l12: l12 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2950000>;
};
+
pm8953_l13: l13 {
regulator-min-microvolt = <3125000>;
regulator-max-microvolt = <3125000>;
};
+
pm8953_l16: l16 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
+
pm8953_l17: l17 {
regulator-min-microvolt = <2850000>;
regulator-max-microvolt = <2850000>;
};
+
pm8953_l19: l19 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1350000>;
};
+
pm8953_l22: l22 {
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
};
+
pm8953_l23: l23 {
regulator-min-microvolt = <975000>;
regulator-max-microvolt = <1225000>;
@@ -276,9 +307,14 @@
};
&wcnss {
+ firmware-name = "qcom/msm8953/fairphone/fp3/wcnss.mbn";
+ vddpx-supply = <&pm8953_l5>;
+
status = "okay";
+};
- vddpx-supply = <&pm8953_l5>;
+&wcnss_ctrl {
+ firmware-name = "qcom/msm8953/fairphone/fp3/WCNSS_qcom_wlan_nv.bin";
};
&wcnss_iris {
diff --git a/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts b/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts
index 7167f75bced3..a9926ad6c6f9 100644
--- a/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts
+++ b/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts
@@ -107,6 +107,7 @@
status = "okay";
vdd-supply = <&vreg_l1b_0p925>;
+ vdda-pll-supply = <&vreg_l10a_1p8>;
vdda-phy-dpdm-supply = <&vreg_l7b_3p125>;
};
@@ -404,6 +405,8 @@
&sdhc_2 {
status = "okay";
+ cd-gpios = <&tlmm 54 GPIO_ACTIVE_HIGH>;
+
vmmc-supply = <&vreg_l5b_2p95>;
vqmmc-supply = <&vreg_l2b_2p95>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm660.dtsi b/arch/arm64/boot/dts/qcom/sdm660.dtsi
index 3164a4817e32..ef4a563c0feb 100644
--- a/arch/arm64/boot/dts/qcom/sdm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm660.dtsi
@@ -170,8 +170,8 @@
assigned-clocks = <&mmcc BYTE1_CLK_SRC>,
<&mmcc PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
clocks = <&mmcc MDSS_MDP_CLK>,
<&mmcc MDSS_BYTE1_CLK>,
@@ -239,10 +239,10 @@
<&sleep_clk>,
<&gcc GCC_MMSS_GPLL0_CLK>,
<&gcc GCC_MMSS_GPLL0_DIV_CLK>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi1_phy 1>,
- <&mdss_dsi1_phy 0>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
<0>,
<0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 279e62ec5433..c33f3de779f6 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -6,7 +6,9 @@
* Copyright (c) 2022, Richard Acayan. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
@@ -1188,6 +1190,34 @@
gpio-ranges = <&tlmm 0 0 151>;
wakeup-parent = <&pdc>;
+ cci0_default: cci0-default-state {
+ pins = "gpio17", "gpio18";
+ function = "cci_i2c";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ cci0_sleep: cci0-sleep-state {
+ pins = "gpio17", "gpio18";
+ function = "cci_i2c";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ cci1_default: cci1-default-state {
+ pins = "gpio19", "gpio20";
+ function = "cci_i2c";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ cci1_sleep: cci1-sleep-state {
+ pins = "gpio19", "gpio20";
+ function = "cci_i2c";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
qup_i2c0_default: qup-i2c0-default-state {
pins = "gpio0", "gpio1";
function = "qup0";
@@ -1594,6 +1624,174 @@
#interrupt-cells = <4>;
};
+ cci: cci@ac4a000 {
+ compatible = "qcom,sdm670-cci", "qcom,msm8996-cci";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0 0x0ac4a000 0 0x4000>;
+ interrupts = <GIC_SPI 460 IRQ_TYPE_EDGE_RISING>;
+ power-domains = <&camcc TITAN_TOP_GDSC>;
+
+ clocks = <&camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&camcc CAM_CC_SOC_AHB_CLK>,
+ <&camcc CAM_CC_CPAS_AHB_CLK>,
+ <&camcc CAM_CC_CCI_CLK>;
+ clock-names = "camnoc_axi",
+ "soc_ahb",
+ "cpas_ahb",
+ "cci";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&cci0_default &cci1_default>;
+ pinctrl-1 = <&cci0_sleep &cci1_sleep>;
+
+ status = "disabled";
+
+ cci_i2c0: i2c-bus@0 {
+ reg = <0>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ cci_i2c1: i2c-bus@1 {
+ reg = <1>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ camss: isp@acb3000 {
+ compatible = "qcom,sdm670-camss";
+ reg = <0 0x0acb3000 0 0x1000>,
+ <0 0x0acba000 0 0x1000>,
+ <0 0x0acc8000 0 0x1000>,
+ <0 0x0ac65000 0 0x1000>,
+ <0 0x0ac66000 0 0x1000>,
+ <0 0x0ac67000 0 0x1000>,
+ <0 0x0acaf000 0 0x4000>,
+ <0 0x0acb6000 0 0x4000>,
+ <0 0x0acc4000 0 0x4000>;
+ reg-names = "csid0",
+ "csid1",
+ "csid2",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "vfe0",
+ "vfe1",
+ "vfe_lite";
+
+ interrupts = <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "csid0",
+ "csid1",
+ "csid2",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "vfe0",
+ "vfe1",
+ "vfe_lite";
+
+ clocks = <&camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&camcc CAM_CC_CPAS_AHB_CLK>,
+ <&camcc CAM_CC_IFE_0_CSID_CLK>,
+ <&camcc CAM_CC_IFE_1_CSID_CLK>,
+ <&camcc CAM_CC_IFE_LITE_CSID_CLK>,
+ <&camcc CAM_CC_CSIPHY0_CLK>,
+ <&camcc CAM_CC_CSI0PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY1_CLK>,
+ <&camcc CAM_CC_CSI1PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY2_CLK>,
+ <&camcc CAM_CC_CSI2PHYTIMER_CLK>,
+ <&gcc GCC_CAMERA_AHB_CLK>,
+ <&gcc GCC_CAMERA_AXI_CLK>,
+ <&camcc CAM_CC_SOC_AHB_CLK>,
+ <&camcc CAM_CC_IFE_0_CLK>,
+ <&camcc CAM_CC_IFE_0_AXI_CLK>,
+ <&camcc CAM_CC_IFE_0_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_1_CLK>,
+ <&camcc CAM_CC_IFE_1_AXI_CLK>,
+ <&camcc CAM_CC_IFE_1_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_LITE_CLK>,
+ <&camcc CAM_CC_IFE_LITE_CPHY_RX_CLK>;
+ clock-names = "camnoc_axi",
+ "cpas_ahb",
+ "csi0",
+ "csi1",
+ "csi2",
+ "csiphy0",
+ "csiphy0_timer",
+ "csiphy1",
+ "csiphy1_timer",
+ "csiphy2",
+ "csiphy2_timer",
+ "gcc_camera_ahb",
+ "gcc_camera_axi",
+ "soc_ahb",
+ "vfe0",
+ "vfe0_axi",
+ "vfe0_cphy_rx",
+ "vfe1",
+ "vfe1_axi",
+ "vfe1_cphy_rx",
+ "vfe_lite",
+ "vfe_lite_cphy_rx";
+
+ iommus = <&apps_smmu 0x808 0x0>,
+ <&apps_smmu 0x810 0x8>,
+ <&apps_smmu 0xc08 0x0>,
+ <&apps_smmu 0xc10 0x8>;
+
+ power-domains = <&camcc IFE_0_GDSC>,
+ <&camcc IFE_1_GDSC>,
+ <&camcc TITAN_TOP_GDSC>;
+ power-domain-names = "ife0",
+ "ife1",
+ "top";
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ camss_endpoint0: endpoint {
+ status = "disabled";
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ camss_endpoint1: endpoint {
+ status = "disabled";
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ camss_endpoint2: endpoint {
+ status = "disabled";
+ };
+ };
+ };
+ };
+
camcc: clock-controller@ad00000 {
compatible = "qcom,sdm670-camcc", "qcom,sdm845-camcc";
reg = <0 0x0ad00000 0 0x10000>;
@@ -1635,7 +1833,7 @@
mdss_mdp: display-controller@ae01000 {
compatible = "qcom,sdm670-dpu";
reg = <0 0x0ae01000 0 0x8f000>,
- <0 0x0aeb0000 0 0x2008>;
+ <0 0x0aeb0000 0 0x3000>;
reg-names = "mdp", "vbif";
clocks = <&gcc GCC_DISP_AXI_CLK>,
@@ -1720,8 +1918,8 @@
"bus";
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd SDM670_CX>;
@@ -1794,7 +1992,8 @@
"bus";
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>, <&mdss_dsi1_phy 1>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd SDM670_CX>;
@@ -1851,10 +2050,10 @@
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_DISP_GPLL0_CLK_SRC>,
<&gcc GCC_DISP_GPLL0_DIV_CLK_SRC>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
<0>,
<0>;
clock-names = "bi_tcxo",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
index 743c339ba108..b7e514f81f92 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
@@ -741,10 +741,6 @@ ap_ts_i2c: &i2c14 {
};
};
-&gmu {
- status = "okay";
-};
-
&gpu {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso b/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso
index 59970082da45..51f1a4883ab8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso
@@ -9,17 +9,6 @@
#include <dt-bindings/clock/qcom,camcc-sdm845.h>
#include <dt-bindings/gpio/gpio.h>
-/ {
- reserved-memory {
- linux,cma {
- compatible = "shared-dma-pool";
- size = <0x0 0x8000000>;
- reusable;
- linux,cma-default;
- };
- };
-};
-
&camss {
vdda-phy-supply = <&vreg_l1a_0p875>;
vdda-pll-supply = <&vreg_l26a_1p2>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index 1cc0f571e1f7..b5c63fa0365d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -444,10 +444,6 @@
<GCC_LPASS_SWAY_CLK>;
};
-&gmu {
- status = "okay";
-};
-
&gpi_dma0 {
status = "okay";
};
@@ -559,7 +555,8 @@
qcom,dual-dsi-mode;
/* DSI1 is slave, so use DSI0 clocks */
- assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
status = "okay";
@@ -717,19 +714,19 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
dai@3 {
- reg = <3>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA4>;
direction = <2>;
is-compress-dai;
};
@@ -756,6 +753,12 @@
cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
};
+&slpi_pas {
+ firmware-name = "qcom/sdm845/Thundercomm/db845c/slpi.mbn";
+
+ status = "okay";
+};
+
&sound {
compatible = "qcom,db845c-sndcard", "qcom,sdm845-sndcard";
pinctrl-0 = <&quat_mi2s_active
@@ -774,11 +777,7 @@
"DMIC2", "MIC BIAS3",
"DMIC3", "MIC BIAS3",
"SpkrLeft IN", "SPK1 OUT",
- "SpkrRight IN", "SPK2 OUT",
- "MM_DL1", "MultiMedia1 Playback",
- "MM_DL2", "MultiMedia2 Playback",
- "MM_DL4", "MultiMedia4 Playback",
- "MultiMedia3 Capture", "MM_UL3";
+ "SpkrRight IN", "SPK2 OUT";
mm1-dai-link {
link-name = "MultiMedia1";
@@ -1166,7 +1165,7 @@
vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
qcom,snoc-host-cap-8bit-quirk;
- qcom,ath10k-calibration-variant = "Thundercomm_DB845C";
+ qcom,calibration-variant = "Thundercomm_DB845C";
};
/* PINCTRL - additions to nodes defined in sdm845.dtsi */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index 2391f842c903..a98756e8b965 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -414,10 +414,6 @@
<GCC_LPASS_SWAY_CLK>;
};
-&gmu {
- status = "okay";
-};
-
&gpu {
status = "okay";
@@ -499,7 +495,8 @@
qcom,dual-dsi-mode;
/* DSI1 is slave, so use DSI0 clocks */
- assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
ports {
port@1 {
@@ -789,7 +786,7 @@
vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
qcom,snoc-host-cap-8bit-quirk;
- qcom,ath10k-calibration-variant = "Qualcomm_sdm845mtp";
+ qcom,calibration-variant = "Qualcomm_sdm845mtp";
};
/* PINCTRL - additions to nodes defined in sdm845.dtsi */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
index 46e25c53829a..b118d666e535 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
@@ -345,10 +345,6 @@
<GCC_LPASS_SWAY_CLK>;
};
-&gmu {
- status = "okay";
-};
-
&gpu {
status = "okay";
@@ -521,27 +517,27 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
dai@3 {
- reg = <3>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA4>;
};
dai@4 {
- reg = <4>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA5>;
};
dai@5 {
- reg = <5>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA6>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-samsung-starqltechn.dts b/arch/arm64/boot/dts/qcom/sdm845-samsung-starqltechn.dts
index d37a433130b9..d686531bf4ea 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-samsung-starqltechn.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-samsung-starqltechn.dts
@@ -7,15 +7,38 @@
/dts-v1/;
+#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
+#include <dt-bindings/sound/qcom,wcd934x.h>
+
#include "sdm845.dtsi"
+#include "pm8998.dtsi"
+#include "sdm845-wcd9340.dtsi"
+
+/delete-node/ &rmtfs_mem;
+/delete-node/ &spss_mem;
+/delete-node/ &adsp_mem;
+/delete-node/ &slpi_mem;
/ {
chassis-type = "handset";
model = "Samsung Galaxy S9 SM-G9600";
compatible = "samsung,starqltechn", "qcom,sdm845";
+ battery: battery {
+ compatible = "simple-battery";
+ constant-charge-current-max-microamp = <2150000>;
+ charge-full-design-microamp-hours = <3000000>;
+
+ over-voltage-threshold-microvolt = <4500000>;
+ voltage-min-design-microvolt = <3400000>;
+ voltage-max-design-microvolt = <4350000>;
+ };
+
chosen {
#address-cells = <2>;
#size-cells = <2>;
@@ -27,9 +50,25 @@
height = <2960>;
stride = <(1440 * 4)>;
format = "a8r8g8b8";
+ vci-supply = <&s2dos05_ldo4>;
+ vddr-supply = <&s2dos05_buck>;
+ vdd3-supply = <&s2dos05_ldo1>;
};
};
+ vib_regulator: gpio-regulator {
+ compatible = "regulator-fixed";
+
+ regulator-name = "haptic";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8998_gpios 18 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ regulator-boot-on;
+ };
+
vph_pwr: vph-pwr-regulator {
compatible = "regulator-fixed";
regulator-name = "vph_pwr";
@@ -68,9 +107,179 @@
ftrace-size = <0x40000>;
pmsg-size = <0x40000>;
};
+
+ /*
+ * It seems like reserving the old rmtfs_mem region is also needed to prevent
+ * random crashes which are most likely modem related, more testing needed.
+ */
+ removed_region: removed-region@88f00000 {
+ reg = <0 0x88f00000 0 0x1c00000>;
+ no-map;
+ };
+
+ slpi_mem: slpi@96700000 {
+ reg = <0 0x96700000 0 0xf00000>;
+ no-map;
+ };
+
+ spss_mem: spss@97700000 {
+ reg = <0 0x97700000 0 0x100000>;
+ no-map;
+ };
+
+ adsp_mem: memory@97800000 {
+ reg = <0 0x97800000 0 0x2000000>;
+ no-map;
+ };
+
+ rmtfs_mem: rmtfs-mem@fde00000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0 0xfde00000 0 0x202000>;
+ qcom,use-guard-pages;
+ no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <QCOM_SCM_VMID_MSS_MSA>;
+ };
+ };
+
+ i2c21 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&tlmm 127 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&tlmm 128 GPIO_ACTIVE_HIGH>;
+ i2c-gpio,delay-us = <2>;
+ pinctrl-0 = <&i2c21_sda_state &i2c21_scl_state>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@60 {
+ compatible = "samsung,s2dos05";
+ reg = <0x60>;
+
+ regulators {
+ s2dos05_ldo1: ldo1 {
+ regulator-active-discharge = <1>;
+ regulator-enable-ramp-delay = <12000>;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-name = "ldo1";
+ };
+
+ s2dos05_ldo2: ldo2 {
+ regulator-active-discharge = <1>;
+ regulator-boot-on;
+ regulator-enable-ramp-delay = <12000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "ldo2";
+ };
+
+ s2dos05_ldo3: ldo3 {
+ regulator-active-discharge = <1>;
+ regulator-boot-on;
+ regulator-enable-ramp-delay = <12000>;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "ldo3";
+ };
+
+ s2dos05_ldo4: ldo4 {
+ regulator-active-discharge = <1>;
+ regulator-enable-ramp-delay = <12000>;
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3775000>;
+ regulator-name = "ldo4";
+ };
+
+ s2dos05_buck: buck {
+ regulator-active-discharge = <1>;
+ regulator-enable-ramp-delay = <12000>;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <2100000>;
+ regulator-name = "buck";
+ };
+ };
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+
+ key-vol-up {
+ label = "Volume Up";
+ gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <15>;
+ };
+
+ key-wink {
+ label = "Bixby";
+ gpios = <&pm8998_gpios 19 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_ENTER>;
+ debounce-interval = <15>;
+ };
+ };
+
+ vib_pwm: pwm {
+ compatible = "clk-pwm";
+ #pwm-cells = <2>;
+ assigned-clock-parents = <&rpmhcc RPMH_CXO_CLK>;
+ assigned-clocks = <&gcc GCC_GP1_CLK_SRC>;
+ clocks = <&gcc GCC_GP1_CLK>;
+ pinctrl-0 = <&motor_pwm_default_state>;
+ pinctrl-1 = <&motor_pwm_suspend_state>;
+ pinctrl-names = "default", "suspend";
+ };
+};
+
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ memory-region = <&gpu_mem>;
+ firmware-name = "qcom/sdm845/starqltechn/a630_zap.mbn";
+ };
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dsi0 {
+ vdda-supply = <&vreg_l26a_1p2>;
+ status = "okay";
+
+ panel@0 {
+ compatible = "samsung,s6e3ha8";
+ reg = <0>;
+ vci-supply = <&s2dos05_ldo4>;
+ vddr-supply = <&s2dos05_buck>;
+ vdd3-supply = <&s2dos05_ldo1>;
+ te-gpios = <&tlmm 10 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&dsi_default &dsi_te>;
+ pinctrl-1 = <&dsi_suspend &dsi_te>;
+ pinctrl-names = "default", "suspend";
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
};
};
+&mdss_dsi0_out {
+ data-lanes = <0 1 2 3>;
+ remote-endpoint = <&panel_in>;
+};
+
+&mdss_dsi0_phy {
+ vdds-supply = <&vdda_mipi_dsi0_pll>;
+ status = "okay";
+};
&apps_rsc {
regulators-0 {
@@ -135,8 +344,6 @@
vdda_sp_sensor:
vdda_ufs1_core:
vdda_ufs2_core:
- vdda_usb1_ss_core:
- vdda_usb2_ss_core:
vreg_l1a_0p875: ldo1 {
regulator-min-microvolt = <880000>;
regulator-max-microvolt = <880000>;
@@ -157,6 +364,7 @@
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
+ vdda_usb1_ss_core:
vdd_wcss_cx:
vdd_wcss_mx:
vdda_wcss_pll:
@@ -365,10 +573,79 @@
status = "okay";
};
+&gpi_dma1 {
+ status = "okay";
+};
+
&uart9 {
status = "okay";
};
+&i2c14 {
+ status = "okay";
+
+ pmic@66 {
+ compatible = "maxim,max77705";
+ reg = <0x66>;
+ interrupt-parent = <&pm8998_gpios>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&pmic_int_default>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ leds {
+ compatible = "maxim,max77705-rgb";
+
+ multi-led {
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_STATUS;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@3 {
+ reg = <3>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+ };
+
+ haptic {
+ compatible = "maxim,max77705-haptic";
+ haptic-supply = <&vib_regulator>;
+ pwms = <&vib_pwm 0 52084>;
+ };
+ };
+
+ max77705_charger: charger@69 {
+ reg = <0x69>;
+ compatible = "maxim,max77705-charger";
+ monitored-battery = <&battery>;
+ interrupt-parent = <&pm8998_gpios>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+
+ };
+
+ fuel-gauge@36 {
+ reg = <0x36>;
+ compatible = "maxim,max77705-battery";
+ power-supplies = <&max77705_charger>;
+ maxim,rsns-microohm = <5000>;
+ interrupt-parent = <&pm8998_gpios>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
&ufs_mem_hc {
reset-gpios = <&tlmm 150 GPIO_ACTIVE_LOW>;
vcc-supply = <&vreg_l20a_2p95>;
@@ -383,14 +660,249 @@
};
&sdhc_2 {
- pinctrl-names = "default";
pinctrl-0 = <&sdc2_clk_state &sdc2_cmd_state &sdc2_data_state &sd_card_det_n_state>;
+ pinctrl-names = "default";
cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vreg_l21a_2p95>;
vqmmc-supply = <&vddpx_2>;
status = "okay";
};
+&i2c11 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ touchscreen@48 {
+ compatible = "samsung,s6sy761";
+ reg = <0x48>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <120 IRQ_TYPE_LEVEL_HIGH>;
+ vdd-supply = <&s2dos05_ldo2>;
+ avdd-supply = <&s2dos05_ldo3>;
+
+ pinctrl-0 = <&touch_irq_state>;
+ pinctrl-names = "default";
+ };
+};
+
+&adsp_pas {
+ firmware-name = "qcom/sdm845/starqltechn/adsp.mbn";
+ status = "okay";
+};
+
+&lpasscc {
+ status = "okay";
+};
+
+&sound {
+ compatible = "qcom,sdm845-sndcard";
+ model = "Samsung Galaxy S9";
+ pinctrl-0 = <&quat_mi2s_active &quat_mi2s_sd0_active &quat_mi2s_sd1_active>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ audio-routing = "RX_BIAS", "MCLK",
+ "AMIC2", "MIC BIAS2", /* Headset Mic */
+ "AMIC3", "MIC BIAS2", /* FM radio left Tx */
+ "AMIC4", "MIC BIAS2", /* FM radio right Tx */
+ "DMIC0", "MCLK", /* Bottom Mic */
+ "DMIC0", "MIC BIAS1",
+ "DMIC2", "MCLK", /* Top Mic */
+ "DMIC2", "MIC BIAS3";
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ mm2-dai-link {
+ link-name = "MultiMedia2";
+
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA2>;
+ };
+ };
+
+ mm3-dai-link {
+ link-name = "MultiMedia3";
+
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>;
+ };
+ };
+
+ mm4-dai-link {
+ link-name = "MultiMedia4";
+
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA4>;
+ };
+ };
+
+ mm5-dai-link {
+ link-name = "MultiMedia5";
+
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA5>;
+ };
+ };
+
+ mm6-dai-link {
+ link-name = "MultiMedia6";
+
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA6>;
+ };
+ };
+
+ slim-dai-link {
+ link-name = "SLIM Playback 1";
+
+ codec {
+ sound-dai = <&wcd9340 AIF1_PB>;
+ };
+
+ cpu {
+ sound-dai = <&q6afedai SLIMBUS_0_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+
+ slimcap-dai-link {
+ link-name = "SLIM Capture 1";
+
+ codec {
+ sound-dai = <&wcd9340 AIF1_CAP>;
+ };
+
+ cpu {
+ sound-dai = <&q6afedai SLIMBUS_0_TX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+
+ slim2-dai-link {
+ link-name = "SLIM Playback 2";
+
+ codec {
+ sound-dai = <&wcd9340 AIF2_PB>;
+ };
+
+ cpu {
+ sound-dai = <&q6afedai SLIMBUS_1_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+
+ slimcap2-dai-link {
+ link-name = "SLIM Capture 2";
+
+ codec {
+ sound-dai = <&wcd9340 AIF2_CAP>;
+ };
+
+ cpu {
+ sound-dai = <&q6afedai SLIMBUS_1_TX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+
+ slimcap3-dai-link {
+ link-name = "SLIM Capture 3";
+
+ codec {
+ sound-dai = <&wcd9340 AIF3_CAP>;
+ };
+
+ cpu {
+ sound-dai = <&q6afedai SLIMBUS_2_TX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+};
+
+&q6afedai {
+ dai@22 {
+ reg = <22>;
+ qcom,sd-lines = <1>;
+ };
+
+ dai@23 {
+ reg = <23>;
+ qcom,sd-lines = <0>;
+ };
+};
+
+&q6asmdai {
+ dai@0 {
+ reg = <0>;
+ };
+
+ dai@1 {
+ reg = <1>;
+ };
+
+ dai@2 {
+ reg = <2>;
+ };
+
+ dai@3 {
+ reg = <3>;
+ };
+
+ dai@4 {
+ reg = <4>;
+ };
+
+ dai@5 {
+ reg = <5>;
+ };
+};
+
+&wcd9340 {
+ reset-gpios = <&tlmm 64 GPIO_ACTIVE_HIGH>;
+ vdd-buck-supply = <&vreg_s4a_1p8>;
+ vdd-buck-sido-supply = <&vreg_s4a_1p8>;
+ vdd-tx-supply = <&vreg_s4a_1p8>;
+ vdd-rx-supply = <&vreg_s4a_1p8>;
+ vdd-io-supply = <&vreg_s4a_1p8>;
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <2700000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+};
+
+&mss_pil {
+ firmware-name = "qcom/sdm845/starqltechn/mba.mbn",
+ "qcom/sdm845/starqltechn/modem.mbn";
+ status = "okay";
+};
+
+&ipa {
+ qcom,gsi-loader = "self";
+ memory-region = <&ipa_fw_mem>;
+ firmware-name = "qcom/sdm845/starqltechn/ipa_fws.mbn";
+ status = "okay";
+};
+
&usb_1 {
status = "okay";
};
@@ -418,16 +930,75 @@
status = "okay";
};
-&wifi {
- vdd-0.8-cx-mx-supply = <&vreg_l5a_0p8>;
- vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
- vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
- vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
+&pm8998_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
status = "okay";
};
+&pm8998_gpios {
+ pmic_int_default: pmic-int-default-state {
+ pins = "gpio11";
+ function = "normal";
+ input-enable;
+ bias-disable;
+ power-source = <0>;
+ };
+};
+
&tlmm {
- gpio-reserved-ranges = <0 4>, <27 4>, <81 4>, <85 4>;
+ gpio-reserved-ranges = <27 4>, /* SPI (eSE - embedded Secure Element) */
+ <85 4>; /* SPI (fingerprint reader) */
+
+ dsi_default: dsi-default-state {
+ pins = "gpio6";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ dsi_suspend: dsi-suspend-state {
+ pins = "gpio6";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ dsi_te: dsi-te-state {
+ pins = "gpio10";
+ function = "mdp_vsync";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ i2c21_sda_state: i2c21-sda-state {
+ pins = "gpio127";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ i2c21_scl_state: i2c21-scl-state {
+ pins = "gpio128";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ motor_pwm_default_state: motor-pwm-active-state {
+ pins = "gpio57";
+ function = "gcc_gp1";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+
+ motor_pwm_suspend_state: motor-pwm-suspend-state {
+ pins = "gpio57";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
sdc2_clk_state: sdc2-clk-state {
pins = "sdc2_clk";
@@ -457,4 +1028,15 @@
function = "gpio";
bias-pull-up;
};
+
+ touch_irq_state: touch-irq-state {
+ pins = "gpio120";
+ function = "gpio";
+ bias-disable;
+ };
+};
+
+&qup_i2c11_default {
+ drive-strength = <2>;
+ bias-disable;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
index ddb82ecb0a92..2cf7b5e1243c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022, Alexander Martinz <amartinz@shiftphones.com>
- * Copyright (c) 2022, Caleb Connolly <caleb@connolly.tech>
+ * Copyright (c) 2022, Casey Connolly <casey.connolly@linaro.org>
* Copyright (c) 2022, Dylan Van Assche <me@dylanvanassche.be>
*/
@@ -419,10 +419,6 @@
<GCC_LPASS_SWAY_CLK>;
};
-&gmu {
- status = "okay";
-};
-
&gpu {
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi
index b02a1dc5fecd..a3a304e1ac87 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi
@@ -415,10 +415,6 @@
<GCC_LPASS_SWAY_CLK>;
};
-&gmu {
- status = "okay";
-};
-
&gpi_dma0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-common.dtsi
index 617b17b2d7d9..7810b0ce7591 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-common.dtsi
@@ -239,7 +239,7 @@
<GCC_LPASS_SWAY_CLK>;
};
-&gmu {
+&gpi_dma1 {
status = "okay";
};
@@ -392,15 +392,15 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
};
@@ -408,6 +408,10 @@
status = "okay";
};
+&qupv3_id_1 {
+ status = "okay";
+};
+
&sdhc_2 {
status = "okay";
@@ -513,6 +517,37 @@
function = "gpio";
bias-pull-up;
};
+
+ ts_int_default: ts-int-default-state {
+ pins = "gpio31";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-pull-down;
+ output-disable;
+ };
+
+ ts_reset_default: ts-reset-default-state {
+ pins = "gpio32";
+ function = "gpio";
+ drive-strength = <16>;
+ output-high;
+ };
+
+ ts_int_sleep: ts-int-sleep-state {
+ pins = "gpio31";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-disable;
+ };
+
+ ts_reset_sleep: ts-reset-sleep-state {
+ pins = "gpio32";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
};
&uart6 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-ebbg.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-ebbg.dts
index 76931ebad065..2d6f0e382a6c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-ebbg.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-ebbg.dts
@@ -13,3 +13,26 @@
compatible = "ebbg,ft8719";
status = "okay";
};
+
+&i2c14 {
+ status = "okay";
+
+ touchscreen@38 {
+ compatible = "focaltech,ft8719";
+ reg = <0x38>;
+
+ interrupts-extended = <&tlmm 31 IRQ_TYPE_EDGE_RISING>;
+ reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
+ panel = <&display_panel>;
+
+ iovcc-supply = <&vreg_l14a_1p8>;
+ vcc-supply = <&lab>;
+
+ pinctrl-0 = <&ts_int_default &ts_reset_default>;
+ pinctrl-1 = <&ts_int_sleep &ts_reset_sleep>;
+ pinctrl-names = "default", "sleep";
+
+ touchscreen-size-x = <1080>;
+ touchscreen-size-y = <2246>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-tianma.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-tianma.dts
index e9427851ebaa..b58964cde834 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-tianma.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-tianma.dts
@@ -13,3 +13,26 @@
compatible = "tianma,fhd-video", "novatek,nt36672a";
status = "okay";
};
+
+&i2c14 {
+ status = "okay";
+
+ touchscreen@1 {
+ compatible = "novatek,nt36672a-ts";
+ reg = <0x01>;
+
+ interrupts-extended = <&tlmm 31 IRQ_TYPE_EDGE_RISING>;
+ reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
+ panel = <&display_panel>;
+
+ iovcc-supply = <&vreg_l14a_1p8>;
+ vcc-supply = <&lab>;
+
+ pinctrl-0 = <&ts_int_default &ts_reset_default>;
+ pinctrl-1 = <&ts_int_sleep &ts_reset_sleep>;
+ pinctrl-names = "default", "sleep";
+
+ touchscreen-size-x = <1080>;
+ touchscreen-size-y = <2246>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
index e386b504e978..63cf879a7a29 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
@@ -381,10 +381,6 @@
<GCC_LPASS_SWAY_CLK>;
};
-&gmu {
- status = "okay";
-};
-
&gpi_dma0 {
status = "okay";
};
@@ -547,15 +543,15 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index d0314cdf0b92..3bc8471c658b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -7,6 +7,7 @@
#include <dt-bindings/clock/qcom,camcc-sdm845.h>
#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
#include <dt-bindings/clock/qcom,lpass-sdm845.h>
@@ -4545,7 +4546,7 @@
mdss_mdp: display-controller@ae01000 {
compatible = "qcom,sdm845-dpu";
reg = <0 0x0ae01000 0 0x8f000>,
- <0 0x0aeb0000 0 0x2008>;
+ <0 0x0aeb0000 0 0x3000>;
reg-names = "mdp", "vbif";
clocks = <&gcc GCC_DISP_AXI_CLK>,
@@ -4708,8 +4709,10 @@
"core",
"iface",
"bus";
- assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+ assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd SDM845_CX>;
@@ -4780,8 +4783,10 @@
"core",
"iface",
"bus";
- assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>, <&mdss_dsi1_phy 1>;
+ assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd SDM845_CX>;
@@ -4952,8 +4957,6 @@
operating-points-v2 = <&gmu_opp_table>;
- status = "disabled";
-
gmu_opp_table: opp-table {
compatible = "operating-points-v2";
@@ -4975,10 +4978,10 @@
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_DISP_GPLL0_CLK_SRC>,
<&gcc GCC_DISP_GPLL0_DIV_CLK_SRC>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
clock-names = "bi_tcxo",
@@ -5266,8 +5269,8 @@
};
apps_rsc: rsc@179c0000 {
+ compatible = "qcom,sdm845-rpmh-apps-rsc", "qcom,rpmh-rsc";
label = "apps_rsc";
- compatible = "qcom,rpmh-rsc";
reg = <0 0x179c0000 0 0x10000>,
<0 0x179d0000 0 0x10000>,
<0 0x179e0000 0 0x10000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
index f18050848cd8..3b28c543fd96 100644
--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
@@ -355,10 +355,6 @@
<GCC_LPASS_SWAY_CLK>;
};
-&gmu {
- status = "okay";
-};
-
&gpu {
status = "okay";
zap-shader {
@@ -616,15 +612,15 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
};
@@ -636,10 +632,7 @@
"RX_BIAS", "MCLK",
"AMIC2", "MIC BIAS2",
"SpkrLeft IN", "SPK1 OUT",
- "SpkrRight IN", "SPK2 OUT",
- "MM_DL1", "MultiMedia1 Playback",
- "MM_DL3", "MultiMedia3 Playback",
- "MultiMedia2 Capture", "MM_UL2";
+ "SpkrRight IN", "SPK2 OUT";
mm1-dai-link {
link-name = "MultiMedia1";
@@ -910,7 +903,7 @@
vdd-3.3-ch1-supply = <&vreg_l23a_3p3>;
qcom,snoc-host-cap-8bit-quirk;
- qcom,ath10k-calibration-variant = "Lenovo_C630";
+ qcom,calibration-variant = "Lenovo_C630";
};
&crypto {
diff --git a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts
index 26217836c270..a676d3ea01b9 100644
--- a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts
+++ b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts
@@ -425,15 +425,15 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
};
@@ -445,10 +445,7 @@
"RX_BIAS", "MCLK",
"AMIC2", "MIC BIAS2",
"SpkrLeft IN", "SPK1 OUT",
- "SpkrRight IN", "SPK2 OUT",
- "MM_DL1", "MultiMedia1 Playback",
- "MM_DL3", "MultiMedia3 Playback",
- "MultiMedia2 Capture", "MM_UL2";
+ "SpkrRight IN", "SPK2 OUT";
mm1-dai-link {
link-name = "MultiMedia1";
diff --git a/arch/arm64/boot/dts/qcom/sdx75-idp.dts b/arch/arm64/boot/dts/qcom/sdx75-idp.dts
index f1bbe7ab01ab..06cacec3461f 100644
--- a/arch/arm64/boot/dts/qcom/sdx75-idp.dts
+++ b/arch/arm64/boot/dts/qcom/sdx75-idp.dts
@@ -278,6 +278,24 @@
vdd3-supply = <&vreg_l10b_3p08>;
};
+&qpic_bam {
+ status = "okay";
+};
+
+&qpic_nand {
+ status = "okay";
+
+ nand@0 {
+ reg = <0>;
+
+ nand-ecc-strength = <8>;
+ nand-ecc-step-size = <512>;
+ nand-bus-width = <8>;
+ /* efs2 partition is secured */
+ secure-regions = /bits/ 64 <0x680000 0xb00000>;
+ };
+};
+
&qupv3_id_0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sdx75.dtsi b/arch/arm64/boot/dts/qcom/sdx75.dtsi
index b0a8a0fe5f39..75bfc19f412c 100644
--- a/arch/arm64/boot/dts/qcom/sdx75.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdx75.dtsi
@@ -880,6 +880,39 @@
qcom,bcm-voters = <&apps_bcm_voter>;
};
+ qpic_bam: dma-controller@1c9c000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0x0 0x01c9c000 0x0 0x1c000>;
+ interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rpmhcc RPMH_QPIC_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ qcom,controlled-remotely;
+ iommus = <&apps_smmu 0x100 0x3>;
+ dma-coherent;
+ status = "disabled";
+ };
+
+ qpic_nand: nand-controller@1cc8000 {
+ compatible = "qcom,sdx75-nand", "qcom,sdx55-nand";
+ reg = <0x0 0x01cc8000 0x0 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&rpmhcc RPMH_QPIC_CLK>,
+ <&sleep_clk>;
+ clock-names = "core",
+ "aon";
+ dmas = <&qpic_bam 0>,
+ <&qpic_bam 1>,
+ <&qpic_bam 2>;
+ dma-names = "tx",
+ "rx",
+ "cmd";
+ iommus = <&apps_smmu 0x100 0x3>;
+ status = "disabled";
+ };
+
tcsr_mutex: hwlock@1f40000 {
compatible = "qcom,tcsr-mutex";
reg = <0x0 0x01f40000 0x0 0x40000>;
@@ -1008,14 +1041,16 @@
<&gcc GCC_USB30_MASTER_CLK>;
assigned-clock-rates = <19200000>, <200000000>;
- interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 17 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 9 IRQ_TYPE_EDGE_RISING>,
- <&pdc 10 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "hs_phy_irq",
- "ss_phy_irq",
+ <&pdc 10 IRQ_TYPE_EDGE_RISING>,
+ <&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event",
+ "hs_phy_irq",
"dm_hs_phy_irq",
- "dp_hs_phy_irq";
+ "dp_hs_phy_irq",
+ "ss_phy_irq";
power-domains = <&gcc GCC_USB30_GDSC>;
@@ -1077,7 +1112,7 @@
interrupt-controller;
};
- aoss_qmp: power-controller@c310000 {
+ aoss_qmp: power-management@c310000 {
compatible = "qcom,sdx75-aoss-qmp", "qcom,aoss-qmp";
reg = <0 0x0c310000 0 0x1000>;
interrupt-parent = <&ipcc>;
diff --git a/arch/arm64/boot/dts/qcom/sm4450.dtsi b/arch/arm64/boot/dts/qcom/sm4450.dtsi
index 27453771aa68..d217d922811e 100644
--- a/arch/arm64/boot/dts/qcom/sm4450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm4450.dtsi
@@ -10,6 +10,8 @@
#include <dt-bindings/clock/qcom,sm4450-gpucc.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/qcom,rpmhpd.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
/ {
@@ -591,6 +593,72 @@
clocks = <&xo_board>;
clock-names = "xo";
};
+
+ rpmhpd: power-controller {
+ compatible = "qcom,sm4450-rpmhpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmhpd_opp_table>;
+
+ rpmhpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmhpd_opp_ret: opp-16 {
+ opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+ };
+
+ rpmhpd_opp_min_svs: opp-48 {
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+
+ rpmhpd_opp_low_svs_d1: opp-56 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D1>;
+ };
+
+ rpmhpd_opp_low_svs: opp-64 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ rpmhpd_opp_low_svs_l1: opp-80 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_L1>;
+ };
+
+ rpmhpd_opp_low_svs_l2: opp-96 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_L2>;
+ };
+
+ rpmhpd_opp_svs: opp-128 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+
+ rpmhpd_opp_svs_l1: opp-192 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ };
+
+ rpmhpd_opp_svs_l2: opp-224 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L2>;
+ };
+
+ rpmhpd_opp_nom: opp-256 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ };
+
+ rpmhpd_opp_nom_l1: opp-320 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ };
+
+ rpmhpd_opp_nom_l2: opp-336 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
+ };
+
+ rpmhpd_opp_turbo: opp-384 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ };
+
+ rpmhpd_opp_turbo_l1: opp-416 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ };
+ };
+ };
};
cpufreq_hw: cpufreq@17d91000 {
diff --git a/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts b/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts
index f60d36c03b9b..ad347ccd1975 100644
--- a/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts
+++ b/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts
@@ -566,7 +566,7 @@
vdd-1.3-rfa-supply = <&pm6125_l17a>;
vdd-3.3-ch0-supply = <&pm6125_l23a>;
- qcom,ath10k-calibration-variant = "Fxtec_QX1050";
+ qcom,calibration-variant = "Fxtec_QX1050";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
index 94c081bf7a89..c8865779173e 100644
--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
@@ -3,6 +3,7 @@
* Copyright (c) 2021, Iskren Chernev <iskren.chernev@gmail.com>
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-sm6115.h>
#include <dt-bindings/clock/qcom,sm6115-dispcc.h>
#include <dt-bindings/clock/qcom,sm6115-gpucc.h>
@@ -1873,7 +1874,7 @@
mdp: display-controller@5e01000 {
compatible = "qcom,sm6115-dpu";
reg = <0x0 0x05e01000 0x0 0x8f000>,
- <0x0 0x05eb0000 0x0 0x2008>;
+ <0x0 0x05eb0000 0x0 0x3000>;
reg-names = "mdp", "vbif";
clocks = <&gcc GCC_DISP_HF_AXI_CLK>,
@@ -1960,7 +1961,8 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmpd SM6115_VDDCX>;
@@ -2034,8 +2036,8 @@
reg = <0x0 0x05f00000 0 0x20000>;
clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
<&sleep_clk>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
<&gcc GCC_DISP_GPLL0_DIV_CLK_SRC>;
#clock-cells = <1>;
#reset-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sm6115p-lenovo-j606f.dts b/arch/arm64/boot/dts/qcom/sm6115p-lenovo-j606f.dts
index 9d78bb3f7190..c17545111f49 100644
--- a/arch/arm64/boot/dts/qcom/sm6115p-lenovo-j606f.dts
+++ b/arch/arm64/boot/dts/qcom/sm6115p-lenovo-j606f.dts
@@ -379,7 +379,7 @@
vdd-1.8-xo-supply = <&pm6125_l16>;
vdd-1.3-rfa-supply = <&pm6125_l17>;
vdd-3.3-ch0-supply = <&pm6125_l23>;
- qcom,ath10k-calibration-variant = "Lenovo_P11";
+ qcom,calibration-variant = "Lenovo_P11";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm6125-xiaomi-ginkgo.dts b/arch/arm64/boot/dts/qcom/sm6125-xiaomi-ginkgo.dts
new file mode 100644
index 000000000000..68a237215bd1
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm6125-xiaomi-ginkgo.dts
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2025, Gabriel Gonzales <semfault@disroot.org>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/arm/qcom,ids.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include "sm6125.dtsi"
+#include "pm6125.dtsi"
+
+/ {
+ model = "Xiaomi Redmi Note 8";
+ compatible = "xiaomi,ginkgo", "qcom,sm6125";
+ chassis-type = "handset";
+
+ /* required for bootloader to select correct board */
+ qcom,msm-id = <QCOM_ID_SM6125>;
+ qcom,board-id = <22 0>;
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ framebuffer0: framebuffer@5c000000 {
+ compatible = "simple-framebuffer";
+ reg = <0 0x5c000000 0 (2340 * 1080 * 4)>;
+ width = <1080>;
+ height = <2340>;
+ stride = <(1080 * 4)>;
+ format = "a8r8g8b8";
+ };
+ };
+
+ reserved-memory {
+ debug_mem: debug@ffb00000 {
+ reg = <0x0 0xffb00000 0x0 0xc0000>;
+ no-map;
+ };
+
+ last_log_mem: lastlog@ffbc0000 {
+ reg = <0x0 0xffbc0000 0x0 0x80000>;
+ no-map;
+ };
+
+ pstore_mem: ramoops@ffc00000 {
+ compatible = "ramoops";
+ reg = <0x0 0xffc40000 0x0 0xc0000>;
+ record-size = <0x1000>;
+ console-size = <0x40000>;
+ pmsg-size = <0x20000>;
+ };
+
+ cmdline_mem: memory@ffd00000 {
+ reg = <0x0 0xffd40000 0x0 0x1000>;
+ no-map;
+ };
+ };
+
+ extcon_usb: extcon-usb {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpios = <&tlmm 102 GPIO_ACTIVE_HIGH>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&vol_up_n>;
+ pinctrl-names = "default";
+
+ key-volume-up {
+ label = "Volume Up";
+ gpios = <&pm6125_gpios 6 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <15>;
+ linux,can-disable;
+ wakeup-source;
+ };
+ };
+};
+
+&pm6125_gpios {
+ vol_up_n: vol-up-n-state {
+ pins = "gpio6";
+ function = "normal";
+ power-source = <1>;
+ bias-pull-up;
+ input-enable;
+ };
+};
+
+&hsusb_phy1 {
+ vdd-supply = <&vreg_l7a>;
+ vdda-pll-supply = <&vreg_l10a>;
+ vdda-phy-dpdm-supply = <&vreg_l15a>;
+ status = "okay";
+};
+
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+ status = "okay";
+};
+
+&rpm_requests {
+ regulators-0 {
+ compatible = "qcom,rpm-pm6125-regulators";
+
+ vreg_s6a: s6 {
+ regulator-min-microvolt = <936000>;
+ regulator-max-microvolt = <1422000>;
+ };
+
+ vreg_l1a: l1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1256000>;
+ };
+
+ vreg_l2a: l2 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1056000>;
+ };
+
+ vreg_l3a: l3 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1064000>;
+ };
+
+ vreg_l4a: l4 {
+ regulator-min-microvolt = <872000>;
+ regulator-max-microvolt = <976000>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l5a: l5 {
+ regulator-min-microvolt = <1648000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l6a: l6 {
+ regulator-min-microvolt = <576000>;
+ regulator-max-microvolt = <656000>;
+ };
+
+ vreg_l7a: l7 {
+ regulator-min-microvolt = <872000>;
+ regulator-max-microvolt = <976000>;
+ };
+
+ vreg_l8a: l8 {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <728000>;
+ };
+
+ vreg_l9a: l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1896000>;
+ };
+
+ vreg_l10a: l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1896000>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l11a: l11 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1952000>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l12a: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1996000>;
+ };
+
+ vreg_l13a: l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1832000>;
+ };
+
+ vreg_l14a: l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1904000>;
+ };
+
+ vreg_l15a: l15 {
+ regulator-min-microvolt = <3104000>;
+ regulator-max-microvolt = <3232000>;
+ };
+
+ vreg_l16a: l16 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1904000>;
+ };
+
+ vreg_l17a: l17 {
+ regulator-min-microvolt = <1248000>;
+ regulator-max-microvolt = <1304000>;
+ };
+
+ vreg_l18a: l18 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1264000>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l19a: l19 {
+ regulator-min-microvolt = <1648000>;
+ regulator-max-microvolt = <2952000>;
+ };
+
+ vreg_l20a: l20 {
+ regulator-min-microvolt = <1648000>;
+ regulator-max-microvolt = <2952000>;
+ };
+
+ vreg_l21a: l21 {
+ regulator-min-microvolt = <2600000>;
+ regulator-max-microvolt = <2856000>;
+ };
+
+ vreg_l22a: l22 {
+ regulator-min-microvolt = <2944000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l23a: l23 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3400000>;
+ };
+
+ vreg_l24a: l24 {
+ regulator-min-microvolt = <2944000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+
+ };
+};
+
+&sdc2_off_state {
+ sd-cd-pins {
+ pins = "gpio98";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
+
+&sdc2_on_state {
+ sd-cd-pins {
+ pins = "gpio98";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+};
+
+&sdhc_1 {
+ vmmc-supply = <&vreg_l24a>;
+ vqmmc-supply = <&vreg_l11a>;
+ status = "okay";
+};
+
+&sdhc_2 {
+ cd-gpios = <&tlmm 98 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&vreg_l22a>;
+ vqmmc-supply = <&vreg_l5a>;
+ no-sdio;
+ no-mmc;
+ status = "okay";
+};
+
+&tlmm {
+ gpio-reserved-ranges = <22 2>, <28 6>;
+};
+
+&usb3 {
+ status = "okay";
+};
+
+&usb3_dwc3 {
+ extcon = <&extcon_usb>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
index 350d807a622f..8f2d65543373 100644
--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
@@ -4,6 +4,7 @@
*/
#include <dt-bindings/clock/qcom,dispcc-sm6125.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-sm6125.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/dma/qcom-gpi.h>
@@ -1250,7 +1251,7 @@
mdss_mdp: display-controller@5e01000 {
compatible = "qcom,sm6125-dpu";
reg = <0x05e01000 0x83208>,
- <0x05eb0000 0x2008>;
+ <0x05eb0000 0x3000>;
reg-names = "mdp", "vbif";
interrupt-parent = <&mdss>;
@@ -1340,7 +1341,8 @@
"bus";
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmpd SM6125_VDDCX>;
@@ -1415,8 +1417,8 @@
reg = <0x05f00000 0x20000>;
clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
<0>,
<0>,
<0>,
diff --git a/arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts b/arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts
index bf23033a294e..8848043f95f2 100644
--- a/arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts
+++ b/arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts
@@ -381,9 +381,16 @@
};
&usb_1_hsphy {
+ vdd-supply = <&pm6350_l18>;
+ vdda-phy-dpdm-supply = <&pm6350_l3>;
+ vdda-pll-supply = <&pm6350_l2>;
+
status = "okay";
};
&usb_1_qmpphy {
+ vdda-phy-supply = <&pm6350_l16>;
+ vdda-pll-supply = <&pm6350_l22>;
+
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
index 00ad1d09a195..f80b21d28a92 100644
--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
@@ -5,6 +5,7 @@
*/
#include <dt-bindings/clock/qcom,dispcc-sm6350.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-sm6350.h>
#include <dt-bindings/clock/qcom,gpucc-sm6350.h>
#include <dt-bindings/clock/qcom,rpmh.h>
@@ -566,114 +567,114 @@
ranges;
hyp_mem: memory@80000000 {
- reg = <0 0x80000000 0 0x600000>;
+ reg = <0x0 0x80000000 0x0 0x600000>;
no-map;
};
xbl_aop_mem: memory@80700000 {
- reg = <0 0x80700000 0 0x160000>;
+ reg = <0x0 0x80700000 0x0 0x160000>;
no-map;
};
cmd_db: memory@80860000 {
compatible = "qcom,cmd-db";
- reg = <0 0x80860000 0 0x20000>;
+ reg = <0x0 0x80860000 0x0 0x20000>;
no-map;
};
sec_apps_mem: memory@808ff000 {
- reg = <0 0x808ff000 0 0x1000>;
+ reg = <0x0 0x808ff000 0x0 0x1000>;
no-map;
};
smem_mem: memory@80900000 {
- reg = <0 0x80900000 0 0x200000>;
+ reg = <0x0 0x80900000 0x0 0x200000>;
no-map;
};
cdsp_sec_mem: memory@80b00000 {
- reg = <0 0x80b00000 0 0x1e00000>;
+ reg = <0x0 0x80b00000 0x0 0x1e00000>;
no-map;
};
pil_camera_mem: memory@86000000 {
- reg = <0 0x86000000 0 0x500000>;
+ reg = <0x0 0x86000000 0x0 0x500000>;
no-map;
};
pil_npu_mem: memory@86500000 {
- reg = <0 0x86500000 0 0x500000>;
+ reg = <0x0 0x86500000 0x0 0x500000>;
no-map;
};
pil_video_mem: memory@86a00000 {
- reg = <0 0x86a00000 0 0x500000>;
+ reg = <0x0 0x86a00000 0x0 0x500000>;
no-map;
};
pil_cdsp_mem: memory@86f00000 {
- reg = <0 0x86f00000 0 0x1e00000>;
+ reg = <0x0 0x86f00000 0x0 0x1e00000>;
no-map;
};
pil_adsp_mem: memory@88d00000 {
- reg = <0 0x88d00000 0 0x2800000>;
+ reg = <0x0 0x88d00000 0x0 0x2800000>;
no-map;
};
wlan_fw_mem: memory@8b500000 {
- reg = <0 0x8b500000 0 0x200000>;
+ reg = <0x0 0x8b500000 0x0 0x200000>;
no-map;
};
pil_ipa_fw_mem: memory@8b700000 {
- reg = <0 0x8b700000 0 0x10000>;
+ reg = <0x0 0x8b700000 0x0 0x10000>;
no-map;
};
pil_ipa_gsi_mem: memory@8b710000 {
- reg = <0 0x8b710000 0 0x5400>;
+ reg = <0x0 0x8b710000 0x0 0x5400>;
no-map;
};
pil_modem_mem: memory@8b800000 {
- reg = <0 0x8b800000 0 0xf800000>;
+ reg = <0x0 0x8b800000 0x0 0xf800000>;
no-map;
};
cont_splash_memory: memory@a0000000 {
- reg = <0 0xa0000000 0 0x2300000>;
+ reg = <0x0 0xa0000000 0x0 0x2300000>;
no-map;
};
dfps_data_memory: memory@a2300000 {
- reg = <0 0xa2300000 0 0x100000>;
+ reg = <0x0 0xa2300000 0x0 0x100000>;
no-map;
};
removed_region: memory@c0000000 {
- reg = <0 0xc0000000 0 0x3900000>;
+ reg = <0x0 0xc0000000 0x0 0x3900000>;
no-map;
};
pil_gpu_mem: memory@f0d00000 {
- reg = <0 0xf0d00000 0 0x1000>;
+ reg = <0x0 0xf0d00000 0x0 0x1000>;
no-map;
};
debug_region: memory@ffb00000 {
- reg = <0 0xffb00000 0 0xc0000>;
+ reg = <0x0 0xffb00000 0x0 0xc0000>;
no-map;
};
last_log_region: memory@ffbc0000 {
- reg = <0 0xffbc0000 0 0x40000>;
+ reg = <0x0 0xffbc0000 0x0 0x40000>;
no-map;
};
ramoops: ramoops@ffc00000 {
compatible = "ramoops";
- reg = <0 0xffc00000 0 0x100000>;
+ reg = <0x0 0xffc00000 0x0 0x100000>;
record-size = <0x1000>;
console-size = <0x40000>;
pmsg-size = <0x20000>;
@@ -682,7 +683,7 @@
};
cmdline_region: memory@ffd00000 {
- reg = <0 0xffd00000 0 0x1000>;
+ reg = <0x0 0xffd00000 0x0 0x1000>;
no-map;
};
};
@@ -786,7 +787,7 @@
gcc: clock-controller@100000 {
compatible = "qcom,gcc-sm6350";
- reg = <0 0x00100000 0 0x1f0000>;
+ reg = <0x0 0x00100000 0x0 0x1f0000>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
@@ -800,7 +801,7 @@
ipcc: mailbox@408000 {
compatible = "qcom,sm6350-ipcc", "qcom,ipcc";
- reg = <0 0x00408000 0 0x1000>;
+ reg = <0x0 0x00408000 0x0 0x1000>;
interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <3>;
@@ -809,7 +810,7 @@
qfprom: qfprom@784000 {
compatible = "qcom,sm6350-qfprom", "qcom,qfprom";
- reg = <0 0x00784000 0 0x3000>;
+ reg = <0x0 0x00784000 0x0 0x3000>;
#address-cells = <1>;
#size-cells = <1>;
@@ -821,16 +822,16 @@
rng: rng@793000 {
compatible = "qcom,prng-ee";
- reg = <0 0x00793000 0 0x1000>;
+ reg = <0x0 0x00793000 0x0 0x1000>;
clocks = <&gcc GCC_PRNG_AHB_CLK>;
clock-names = "core";
};
sdhc_1: mmc@7c4000 {
compatible = "qcom,sm6350-sdhci", "qcom,sdhci-msm-v5";
- reg = <0 0x007c4000 0 0x1000>,
- <0 0x007c5000 0 0x1000>,
- <0 0x007c8000 0 0x8000>;
+ reg = <0x0 0x007c4000 0x0 0x1000>,
+ <0x0 0x007c5000 0x0 0x1000>,
+ <0x0 0x007c8000 0x0 0x8000>;
reg-names = "hc", "cqhci", "ice";
interrupts = <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH>,
@@ -875,7 +876,7 @@
gpi_dma0: dma-controller@800000 {
compatible = "qcom,sm6350-gpi-dma";
- reg = <0 0x00800000 0 0x60000>;
+ reg = <0x0 0x00800000 0x0 0x60000>;
interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
@@ -907,7 +908,7 @@
i2c0: i2c@880000 {
compatible = "qcom,geni-i2c";
- reg = <0 0x00880000 0 0x4000>;
+ reg = <0x0 0x00880000 0x0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
pinctrl-names = "default";
@@ -927,7 +928,7 @@
uart1: serial@884000 {
compatible = "qcom,geni-uart";
- reg = <0 0x00884000 0 0x4000>;
+ reg = <0x0 0x00884000 0x0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
pinctrl-names = "default";
@@ -943,7 +944,7 @@
i2c2: i2c@888000 {
compatible = "qcom,geni-i2c";
- reg = <0 0x00888000 0 0x4000>;
+ reg = <0x0 0x00888000 0x0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
pinctrl-names = "default";
@@ -964,7 +965,7 @@
gpi_dma1: dma-controller@900000 {
compatible = "qcom,sm6350-gpi-dma";
- reg = <0 0x00900000 0 0x60000>;
+ reg = <0x0 0x00900000 0x0 0x60000>;
interrupts = <GIC_SPI 645 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 646 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 647 IRQ_TYPE_LEVEL_HIGH>,
@@ -996,7 +997,7 @@
i2c6: i2c@980000 {
compatible = "qcom,geni-i2c";
- reg = <0 0x00980000 0 0x4000>;
+ reg = <0x0 0x00980000 0x0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
pinctrl-names = "default";
@@ -1016,7 +1017,7 @@
i2c7: i2c@984000 {
compatible = "qcom,geni-i2c";
- reg = <0 0x00984000 0 0x4000>;
+ reg = <0x0 0x00984000 0x0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
pinctrl-names = "default";
@@ -1036,7 +1037,7 @@
i2c8: i2c@988000 {
compatible = "qcom,geni-i2c";
- reg = <0 0x00988000 0 0x4000>;
+ reg = <0x0 0x00988000 0x0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
pinctrl-names = "default";
@@ -1056,7 +1057,7 @@
uart9: serial@98c000 {
compatible = "qcom,geni-debug-uart";
- reg = <0 0x0098c000 0 0x4000>;
+ reg = <0x0 0x0098c000 0x0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
pinctrl-names = "default";
@@ -1070,7 +1071,7 @@
i2c10: i2c@990000 {
compatible = "qcom,geni-i2c";
- reg = <0 0x00990000 0 0x4000>;
+ reg = <0x0 0x00990000 0x0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
pinctrl-names = "default";
@@ -1091,14 +1092,14 @@
config_noc: interconnect@1500000 {
compatible = "qcom,sm6350-config-noc";
- reg = <0 0x01500000 0 0x28000>;
+ reg = <0x0 0x01500000 0x0 0x28000>;
#interconnect-cells = <2>;
qcom,bcm-voters = <&apps_bcm_voter>;
};
system_noc: interconnect@1620000 {
compatible = "qcom,sm6350-system-noc";
- reg = <0 0x01620000 0 0x17080>;
+ reg = <0x0 0x01620000 0x0 0x17080>;
#interconnect-cells = <2>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -1111,14 +1112,14 @@
aggre1_noc: interconnect@16e0000 {
compatible = "qcom,sm6350-aggre1-noc";
- reg = <0 0x016e0000 0 0x15080>;
+ reg = <0x0 0x016e0000 0x0 0x15080>;
#interconnect-cells = <2>;
qcom,bcm-voters = <&apps_bcm_voter>;
};
aggre2_noc: interconnect@1700000 {
compatible = "qcom,sm6350-aggre2-noc";
- reg = <0 0x01700000 0 0x1f880>;
+ reg = <0x0 0x01700000 0x0 0x1f880>;
#interconnect-cells = <2>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -1131,7 +1132,7 @@
mmss_noc: interconnect@1740000 {
compatible = "qcom,sm6350-mmss-noc";
- reg = <0 0x01740000 0 0x1c100>;
+ reg = <0x0 0x01740000 0x0 0x1c100>;
#interconnect-cells = <2>;
qcom,bcm-voters = <&apps_bcm_voter>;
};
@@ -1139,8 +1140,8 @@
ufs_mem_hc: ufshc@1d84000 {
compatible = "qcom,sm6350-ufshc", "qcom,ufshc",
"jedec,ufs-2.0";
- reg = <0 0x01d84000 0 0x3000>,
- <0 0x01d90000 0 0x8000>;
+ reg = <0x0 0x01d84000 0x0 0x3000>,
+ <0x0 0x01d90000 0x0 0x8000>;
reg-names = "std", "ice";
interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
phys = <&ufs_mem_phy>;
@@ -1188,7 +1189,7 @@
ufs_mem_phy: phy@1d87000 {
compatible = "qcom,sm6350-qmp-ufs-phy";
- reg = <0 0x01d87000 0 0x1000>;
+ reg = <0x0 0x01d87000 0x0 0x1000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_UFS_PHY_PHY_AUX_CLK>,
@@ -1209,7 +1210,7 @@
cryptobam: dma-controller@1dc4000 {
compatible = "qcom,bam-v1.7.4", "qcom,bam-v1.7.0";
- reg = <0 0x01dc4000 0 0x24000>;
+ reg = <0x0 0x01dc4000 0x0 0x24000>;
interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
qcom,ee = <0>;
@@ -1225,7 +1226,7 @@
crypto: crypto@1dfa000 {
compatible = "qcom,sm6350-qce", "qcom,sm8150-qce", "qcom,qce";
- reg = <0 0x01dfa000 0 0x6000>;
+ reg = <0x0 0x01dfa000 0x0 0x6000>;
dmas = <&cryptobam 4>, <&cryptobam 5>;
dma-names = "rx", "tx";
iommus = <&apps_smmu 0x426 0x11>,
@@ -1243,9 +1244,9 @@
iommus = <&apps_smmu 0x440 0x0>,
<&apps_smmu 0x442 0x0>;
- reg = <0 0x01e40000 0 0x8000>,
- <0 0x01e50000 0 0x3000>,
- <0 0x01e04000 0 0x23000>;
+ reg = <0x0 0x01e40000 0x0 0x8000>,
+ <0x0 0x01e50000 0x0 0x3000>,
+ <0x0 0x01e04000 0x0 0x23000>;
reg-names = "ipa-reg",
"ipa-shared",
"gsi";
@@ -1351,8 +1352,8 @@
gpu: gpu@3d00000 {
compatible = "qcom,adreno-619.0", "qcom,adreno";
- reg = <0 0x03d00000 0 0x40000>,
- <0 0x03d9e000 0 0x1000>;
+ reg = <0x0 0x03d00000 0x0 0x40000>,
+ <0x0 0x03d9e000 0x0 0x1000>;
reg-names = "kgsl_3d0_reg_memory",
"cx_mem";
interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
@@ -1419,7 +1420,7 @@
adreno_smmu: iommu@3d40000 {
compatible = "qcom,sm6350-smmu-v2", "qcom,adreno-smmu", "qcom,smmu-v2";
- reg = <0 0x03d40000 0 0x10000>;
+ reg = <0x0 0x03d40000 0x0 0x10000>;
#iommu-cells = <1>;
#global-interrupts = <2>;
interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
@@ -1445,9 +1446,9 @@
gmu: gmu@3d6a000 {
compatible = "qcom,adreno-gmu-619.0", "qcom,adreno-gmu";
- reg = <0 0x03d6a000 0 0x31000>,
- <0 0x0b290000 0 0x10000>,
- <0 0x0b490000 0 0x10000>;
+ reg = <0x0 0x03d6a000 0x0 0x31000>,
+ <0x0 0x0b290000 0x0 0x10000>,
+ <0x0 0x0b490000 0x0 0x10000>;
reg-names = "gmu",
"gmu_pdc",
"gmu_pdc_seq";
@@ -1489,7 +1490,7 @@
gpucc: clock-controller@3d90000 {
compatible = "qcom,sm6350-gpucc";
- reg = <0 0x03d90000 0 0x9000>;
+ reg = <0x0 0x03d90000 0x0 0x9000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_GPU_GPLL0_CLK>,
<&gcc GCC_GPU_GPLL0_DIV_CLK>;
@@ -1543,7 +1544,7 @@
cdsp: remoteproc@8300000 {
compatible = "qcom,sm6350-cdsp-pas";
- reg = <0 0x08300000 0 0x10000>;
+ reg = <0x0 0x08300000 0x0 0x10000>;
interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -1642,7 +1643,7 @@
sdhc_2: mmc@8804000 {
compatible = "qcom,sm6350-sdhci", "qcom,sdhci-msm-v5";
- reg = <0 0x08804000 0 0x1000>;
+ reg = <0x0 0x08804000 0x0 0x1000>;
interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
@@ -1691,7 +1692,7 @@
usb_1_hsphy: phy@88e3000 {
compatible = "qcom,sm6350-qusb2-phy", "qcom,qusb2-v2-phy";
- reg = <0 0x088e3000 0 0x400>;
+ reg = <0x0 0x088e3000 0x0 0x400>;
status = "disabled";
#phy-cells = <0>;
@@ -1703,7 +1704,7 @@
usb_1_qmpphy: phy@88e8000 {
compatible = "qcom,sm6350-qmp-usb3-dp-phy";
- reg = <0 0x088e8000 0 0x3000>;
+ reg = <0x0 0x088e8000 0x0 0x3000>;
clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
<&gcc GCC_USB3_PRIM_CLKREF_CLK>,
@@ -1754,27 +1755,27 @@
dc_noc: interconnect@9160000 {
compatible = "qcom,sm6350-dc-noc";
- reg = <0 0x09160000 0 0x3200>;
+ reg = <0x0 0x09160000 0x0 0x3200>;
#interconnect-cells = <2>;
qcom,bcm-voters = <&apps_bcm_voter>;
};
system-cache-controller@9200000 {
compatible = "qcom,sm6350-llcc";
- reg = <0 0x09200000 0 0x50000>, <0 0x09600000 0 0x50000>;
+ reg = <0x0 0x09200000 0x0 0x50000>, <0x0 0x09600000 0x0 0x50000>;
reg-names = "llcc0_base", "llcc_broadcast_base";
};
gem_noc: interconnect@9680000 {
compatible = "qcom,sm6350-gem-noc";
- reg = <0 0x09680000 0 0x3e200>;
+ reg = <0x0 0x09680000 0x0 0x3e200>;
#interconnect-cells = <2>;
qcom,bcm-voters = <&apps_bcm_voter>;
};
npu_noc: interconnect@9990000 {
compatible = "qcom,sm6350-npu-noc";
- reg = <0 0x09990000 0 0x1600>;
+ reg = <0x0 0x09990000 0x0 0x1600>;
#interconnect-cells = <2>;
qcom,bcm-voters = <&apps_bcm_voter>;
};
@@ -1878,7 +1879,7 @@
usb_1: usb@a6f8800 {
compatible = "qcom,sm6350-dwc3", "qcom,dwc3";
- reg = <0 0x0a6f8800 0 0x400>;
+ reg = <0x0 0x0a6f8800 0x0 0x400>;
status = "disabled";
#address-cells = <2>;
#size-cells = <2>;
@@ -1916,7 +1917,7 @@
usb_1_dwc3: usb@a600000 {
compatible = "snps,dwc3";
- reg = <0 0x0a600000 0 0xcd00>;
+ reg = <0x0 0x0a600000 0x0 0xcd00>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
iommus = <&apps_smmu 0x540 0x0>;
snps,dis_u2_susphy_quirk;
@@ -1954,7 +1955,7 @@
cci0: cci@ac4a000 {
compatible = "qcom,sm6350-cci", "qcom,msm8996-cci";
- reg = <0 0x0ac4a000 0 0x1000>;
+ reg = <0x0 0x0ac4a000 0x0 0x1000>;
interrupts = <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>;
power-domains = <&camcc TITAN_TOP_GDSC>;
@@ -2001,7 +2002,7 @@
cci1: cci@ac4b000 {
compatible = "qcom,sm6350-cci", "qcom,msm8996-cci";
- reg = <0 0x0ac4b000 0 0x1000>;
+ reg = <0x0 0x0ac4b000 0x0 0x1000>;
interrupts = <GIC_SPI 462 IRQ_TYPE_EDGE_RISING>;
power-domains = <&camcc TITAN_TOP_GDSC>;
@@ -2043,7 +2044,7 @@
camcc: clock-controller@ad00000 {
compatible = "qcom,sm6350-camcc";
- reg = <0 0x0ad00000 0 0x16000>;
+ reg = <0x0 0x0ad00000 0x0 0x16000>;
clocks = <&rpmhcc RPMH_CXO_CLK>;
#clock-cells = <1>;
#reset-cells = <1>;
@@ -2052,7 +2053,7 @@
mdss: display-subsystem@ae00000 {
compatible = "qcom,sm6350-mdss";
- reg = <0 0x0ae00000 0 0x1000>;
+ reg = <0x0 0x0ae00000 0x0 0x1000>;
reg-names = "mdss";
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
@@ -2084,8 +2085,8 @@
mdss_mdp: display-controller@ae01000 {
compatible = "qcom,sm6350-dpu";
- reg = <0 0x0ae01000 0 0x8f000>,
- <0 0x0aeb0000 0 0x2008>;
+ reg = <0x0 0x0ae01000 0x0 0x8f000>,
+ <0x0 0x0aeb0000 0x0 0x2008>;
reg-names = "mdp", "vbif";
interrupt-parent = <&mdss>;
@@ -2168,11 +2169,11 @@
mdss_dp: displayport-controller@ae90000 {
compatible = "qcom,sm6350-dp", "qcom,sm8350-dp";
- reg = <0 0xae90000 0 0x200>,
- <0 0xae90200 0 0x200>,
- <0 0xae90400 0 0x600>,
- <0 0xae91000 0 0x400>,
- <0 0xae91400 0 0x400>;
+ reg = <0x0 0xae90000 0x0 0x200>,
+ <0x0 0xae90200 0x0 0x200>,
+ <0x0 0xae90400 0x0 0x600>,
+ <0x0 0xae91000 0x0 0x400>,
+ <0x0 0xae91400 0x0 0x400>;
interrupt-parent = <&mdss>;
interrupts = <12>;
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
@@ -2248,7 +2249,7 @@
mdss_dsi0: dsi@ae94000 {
compatible = "qcom,sm6350-dsi-ctrl", "qcom,mdss-dsi-ctrl";
- reg = <0 0x0ae94000 0 0x400>;
+ reg = <0x0 0x0ae94000 0x0 0x400>;
reg-names = "dsi_ctrl";
interrupt-parent = <&mdss>;
@@ -2269,7 +2270,8 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&mdss_dsi_opp_table>;
power-domains = <&rpmhpd SM6350_MX>;
@@ -2324,9 +2326,9 @@
mdss_dsi0_phy: phy@ae94400 {
compatible = "qcom,dsi-phy-10nm";
- reg = <0 0x0ae94400 0 0x200>,
- <0 0x0ae94600 0 0x280>,
- <0 0x0ae94a00 0 0x1e0>;
+ reg = <0x0 0x0ae94400 0x0 0x200>,
+ <0x0 0x0ae94600 0x0 0x280>,
+ <0x0 0x0ae94a00 0x0 0x1e0>;
reg-names = "dsi_phy",
"dsi_phy_lane",
"dsi_pll";
@@ -2344,11 +2346,11 @@
dispcc: clock-controller@af00000 {
compatible = "qcom,sm6350-dispcc";
- reg = <0 0x0af00000 0 0x20000>;
+ reg = <0x0 0x0af00000 0x0 0x20000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_DISP_GPLL0_CLK>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
clock-names = "bi_tcxo",
@@ -2364,7 +2366,7 @@
pdc: interrupt-controller@b220000 {
compatible = "qcom,sm6350-pdc", "qcom,pdc";
- reg = <0 0x0b220000 0 0x30000>, <0 0x17c000f0 0 0x64>;
+ reg = <0x0 0x0b220000 0x0 0x30000>, <0x0 0x17c000f0 0x0 0x64>;
qcom,pdc-ranges = <0 480 94>, <94 609 31>,
<125 63 1>, <126 655 12>, <138 139 15>;
#interrupt-cells = <2>;
@@ -2374,8 +2376,8 @@
tsens0: thermal-sensor@c263000 {
compatible = "qcom,sm6350-tsens", "qcom,tsens-v2";
- reg = <0 0x0c263000 0 0x1ff>, /* TM */
- <0 0x0c222000 0 0x8>; /* SROT */
+ reg = <0x0 0x0c263000 0x0 0x1ff>, /* TM */
+ <0x0 0x0c222000 0x0 0x8>; /* SROT */
#qcom,sensors = <16>;
interrupts-extended = <&pdc 26 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 28 IRQ_TYPE_LEVEL_HIGH>;
@@ -2385,8 +2387,8 @@
tsens1: thermal-sensor@c265000 {
compatible = "qcom,sm6350-tsens", "qcom,tsens-v2";
- reg = <0 0x0c265000 0 0x1ff>, /* TM */
- <0 0x0c223000 0 0x8>; /* SROT */
+ reg = <0x0 0x0c265000 0x0 0x1ff>, /* TM */
+ <0x0 0x0c223000 0x0 0x8>; /* SROT */
#qcom,sensors = <16>;
interrupts-extended = <&pdc 27 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 29 IRQ_TYPE_LEVEL_HIGH>;
@@ -2396,7 +2398,7 @@
aoss_qmp: power-management@c300000 {
compatible = "qcom,sm6350-aoss-qmp", "qcom,aoss-qmp";
- reg = <0 0x0c300000 0 0x1000>;
+ reg = <0x0 0x0c300000 0x0 0x1000>;
interrupts-extended = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP
IRQ_TYPE_EDGE_RISING>;
mboxes = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP>;
@@ -2406,11 +2408,11 @@
spmi_bus: spmi@c440000 {
compatible = "qcom,spmi-pmic-arb";
- reg = <0 0x0c440000 0 0x1100>,
- <0 0x0c600000 0 0x2000000>,
- <0 0x0e600000 0 0x100000>,
- <0 0x0e700000 0 0xa0000>,
- <0 0x0c40a000 0 0x26000>;
+ reg = <0x0 0x0c440000 0x0 0x1100>,
+ <0x0 0x0c600000 0x0 0x2000000>,
+ <0x0 0x0e600000 0x0 0x100000>,
+ <0x0 0x0e700000 0x0 0xa0000>,
+ <0x0 0x0c40a000 0x0 0x26000>;
reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
interrupt-names = "periph_irq";
interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
@@ -2424,7 +2426,7 @@
tlmm: pinctrl@f100000 {
compatible = "qcom,sm6350-tlmm";
- reg = <0 0x0f100000 0 0x300000>;
+ reg = <0x0 0x0f100000 0x0 0x300000>;
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
@@ -2603,7 +2605,7 @@
apps_smmu: iommu@15000000 {
compatible = "qcom,sm6350-smmu-500", "arm,mmu-500";
- reg = <0 0x15000000 0 0x100000>;
+ reg = <0x0 0x15000000 0x0 0x100000>;
#iommu-cells = <2>;
#global-interrupts = <1>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
@@ -2701,7 +2703,7 @@
watchdog@17c10000 {
compatible = "qcom,apss-wdt-sm6350", "qcom,kpss-wdt";
- reg = <0 0x17c10000 0 0x1000>;
+ reg = <0x0 0x17c10000 0x0 0x1000>;
clocks = <&sleep_clk>;
interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
};
@@ -2855,7 +2857,7 @@
cpufreq_hw: cpufreq@18323000 {
compatible = "qcom,sm6350-cpufreq-hw", "qcom,cpufreq-hw";
- reg = <0 0x18323000 0 0x1000>, <0 0x18325800 0 0x1000>;
+ reg = <0x0 0x18323000 0x0 0x1000>, <0x0 0x18325800 0x0 0x1000>;
reg-names = "freq-domain0", "freq-domain1";
clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>;
clock-names = "xo", "alternate";
@@ -2866,7 +2868,7 @@
wifi: wifi@18800000 {
compatible = "qcom,wcn3990-wifi";
- reg = <0 0x18800000 0 0x800000>;
+ reg = <0x0 0x18800000 0x0 0x800000>;
reg-names = "membase";
memory-region = <&wlan_fw_mem>;
interrupts = <GIC_SPI 414 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/qcom/sm7325-nothing-spacewar.dts b/arch/arm64/boot/dts/qcom/sm7325-nothing-spacewar.dts
index a5cda478bd78..befbb40228b5 100644
--- a/arch/arm64/boot/dts/qcom/sm7325-nothing-spacewar.dts
+++ b/arch/arm64/boot/dts/qcom/sm7325-nothing-spacewar.dts
@@ -253,6 +253,124 @@
regulator-max-microvolt = <3700000>;
};
+ vreg_cam_vio_1p8: regulator-cam-vio {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_cam_vio_1p8";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 49 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ /* Always-on prevents CCI bus timeouts */
+ regulator-always-on;
+
+ vin-supply = <&vreg_bob>;
+ };
+
+ vreg_camf_vana_2p8: regulator-camf-vana {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_camf_vana_2p8";
+
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+
+ gpio = <&tlmm 43 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vreg_bob>;
+ };
+
+ vreg_camf_vdig_1p1: regulator-camf-vdig {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_camf_vdig_1p1";
+
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+
+ gpio = <&tlmm 35 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vreg_s8b_1p256>;
+ };
+
+ vreg_camu_vaf_1p8: regulator-camu-vaf {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_camu_vaf_1p8";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 71 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vreg_bob>;
+ };
+
+ vreg_camu_vana_2p8: regulator-camu-vana {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_camu_vana_2p8";
+
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ gpio = <&tlmm 68 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vreg_bob>;
+ };
+
+ vreg_camu_vdig_1p1: regulator-camu-vdig {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_camu_vdig_1p1";
+
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+
+ gpio = <&tlmm 50 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vreg_s8b_1p256>;
+ };
+
+ vreg_camw_vaf_1p8: regulator-camw-vaf {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_camw_vaf_1p8";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 96 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vreg_bob>;
+ };
+
+ vreg_camw_vana_2p8: regulator-camw-vana {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_camw_vana_2p8";
+
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+
+ gpio = <&tlmm 79 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vreg_bob>;
+ };
+
+ vreg_camw_vdig_1p1: regulator-camw-vdig {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_camw_vdig_1p1";
+
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+
+ gpio = <&tlmm 108 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vreg_s8b_1p256>;
+ };
+
// S2B is really ebi.lvl but it's there for supply map completeness sake.
vreg_s2b_0p7: smpa3-regulator {
compatible = "regulator-fixed";
@@ -714,11 +832,26 @@
};
&cci0 {
+ /*
+ * cci0_i2c1 bus is unused and GPIO 71&72 are repurposed.
+ * So set only cci0_i2c0 pinctrl here.
+ */
+ pinctrl-0 = <&cci0_default>;
+ pinctrl-1 = <&cci0_sleep>;
+
status = "okay";
};
&cci0_i2c0 {
- /* sony,imx471 (Front) */
+ /* D-PHY sony,imx471 (Front) @ 0x1a */
+
+ camf_p24c64f: eeprom@52 {
+ compatible = "puya,p24c64f",
+ "atmel,24c64";
+ reg = <0x52>;
+ vcc-supply = <&vreg_cam_vio_1p8>;
+ read-only;
+ };
};
&cci1 {
@@ -726,11 +859,29 @@
};
&cci1_i2c0 {
- /* samsung,s5kjn1 (Rear-aux UW) */
+ /* actuator (For Ultra Wide sensor) @ 0xc */
+ /* D-PHY samsung,s5kjn1 (Ultra Wide) @ 0x2d */
+
+ camu_gt24p128e: eeprom@51 {
+ compatible = "giantec,gt24p128e",
+ "atmel,24c128";
+ reg = <0x51>;
+ vcc-supply = <&vreg_cam_vio_1p8>;
+ read-only;
+ };
};
&cci1_i2c1 {
- /* sony,imx766 (Rear Wide) */
+ /* actuator (For Wide sensor) @ 0xc */
+ /* C-PHY sony,imx766 (Wide) @ 0x10 */
+
+ camw_gt24p128e: eeprom@50 {
+ compatible = "giantec,gt24p128e",
+ "atmel,24c128";
+ reg = <0x50>;
+ vcc-supply = <&vreg_cam_vio_1p8>;
+ read-only;
+ };
};
&gcc {
@@ -757,6 +908,10 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+};
+
&gpu_zap_shader {
firmware-name = "qcom/sm7325/nothing/spacewar/a660_zap.mbn";
};
@@ -823,15 +978,44 @@
status = "okay";
};
-/* MDSS remains disabled until the panel driver is present. */
+&mdss {
+ status = "okay";
+};
+
&mdss_dsi {
vdda-supply = <&vdd_a_dsi_0_1p2>;
+ status = "okay";
- /* Visionox RM692E5 panel */
+ panel: panel@0 {
+ compatible = "nothing,rm692e5-spacewar",
+ "visionox,rm692e5";
+ reg = <0>;
+
+ reset-gpios = <&tlmm 44 GPIO_ACTIVE_LOW>;
+
+ vdd-supply = <&vdd_oled>;
+ vddio-supply = <&vdd_io_oled>;
+
+ pinctrl-0 = <&lcd_reset_n>,
+ <&mdp_vsync_p>;
+ pinctrl-names = "default";
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+};
+
+&mdss_dsi0_out {
+ data-lanes = <0 1 2 3>;
+ remote-endpoint = <&panel_in>;
};
&mdss_dsi_phy {
vdds-supply = <&vdd_a_dsi_0_0p9>;
+ status = "okay";
};
&pm7325_gpios {
@@ -1039,7 +1223,7 @@
&q6asmdai {
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
};
@@ -1147,6 +1331,20 @@
bias-pull-down;
};
+ lcd_reset_n: lcd-reset-n-state {
+ pins = "gpio44";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ mdp_vsync_p: mdp-vsync-p-state {
+ pins = "gpio80";
+ function = "mdp_vsync";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
hst_bt_en: hst-bt-en-state {
pins = "gpio85";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
index 6ea883b1edfa..e1e294f0f462 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
@@ -500,7 +500,8 @@
qcom,dual-dsi-mode;
/* DSI1 is slave, so use DSI0 clocks */
- assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
status = "okay";
@@ -719,5 +720,5 @@
vdd-1.3-rfa-supply = <&vreg_l2c_1p3>;
vdd-3.3-ch0-supply = <&vreg_l11c_3p3>;
- qcom,ath10k-calibration-variant = "Qualcomm_sm8150hdk";
+ qcom,calibration-variant = "Qualcomm_sm8150hdk";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
index 9a3d0ac6c423..835ef929ff2d 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
+++ b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
@@ -453,22 +453,22 @@
&remoteproc_adsp {
status = "okay";
- firmware-name = "qcom/sm8150/microsoft/adsp.mdt";
+ firmware-name = "qcom/sm8150/microsoft/adsp.mbn";
};
&remoteproc_cdsp {
status = "okay";
- firmware-name = "qcom/sm8150/microsoft/cdsp.mdt";
+ firmware-name = "qcom/sm8150/microsoft/cdsp.mbn";
};
&remoteproc_mpss {
status = "okay";
- firmware-name = "qcom/sm8150/microsoft/modem.mdt";
+ firmware-name = "qcom/sm8150/microsoft/modem.mbn";
};
&remoteproc_slpi {
status = "okay";
- firmware-name = "qcom/sm8150/microsoft/slpi.mdt";
+ firmware-name = "qcom/sm8150/microsoft/slpi.mbn";
};
&pon_resin {
diff --git a/arch/arm64/boot/dts/qcom/sm8150-mtp.dts b/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
index 2e1c7afe0aa7..12e8e1ada6d8 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
@@ -379,22 +379,22 @@
&remoteproc_adsp {
status = "okay";
- firmware-name = "qcom/sm8150/adsp.mdt";
+ firmware-name = "qcom/sm8150/adsp.mbn";
};
&remoteproc_cdsp {
status = "okay";
- firmware-name = "qcom/sm8150/cdsp.mdt";
+ firmware-name = "qcom/sm8150/cdsp.mbn";
};
&remoteproc_mpss {
status = "okay";
- firmware-name = "qcom/sm8150/modem.mdt";
+ firmware-name = "qcom/sm8150/modem.mbn";
};
&remoteproc_slpi {
status = "okay";
- firmware-name = "qcom/sm8150/slpi.mdt";
+ firmware-name = "qcom/sm8150/slpi.mbn";
};
&tlmm {
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index 4dbda54b47a5..cdb47359c4c8 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -12,6 +12,7 @@
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,dispcc-sm8150.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-sm8150.h>
#include <dt-bindings/clock/qcom,gpucc-sm8150.h>
#include <dt-bindings/clock/qcom,videocc-sm8150.h>
@@ -3657,6 +3658,7 @@
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
iommus = <&apps_smmu 0x140 0>;
snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,dis-u1-entry-quirk;
snps,dis-u2-entry-quirk;
@@ -3736,6 +3738,7 @@
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
iommus = <&apps_smmu 0x160 0>;
snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,dis-u1-entry-quirk;
snps,dis-u2-entry-quirk;
@@ -3808,7 +3811,7 @@
mdss_mdp: display-controller@ae01000 {
compatible = "qcom,sm8150-dpu";
reg = <0 0x0ae01000 0 0x8f000>,
- <0 0x0aeb0000 0 0x2008>;
+ <0 0x0aeb0000 0 0x3000>;
reg-names = "mdp", "vbif";
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
@@ -3981,8 +3984,8 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd SM8150_MMCX>;
@@ -4074,8 +4077,8 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd SM8150_MMCX>;
@@ -4130,10 +4133,10 @@
compatible = "qcom,sm8150-dispcc";
reg = <0 0x0af00000 0 0x10000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
clock-names = "bi_tcxo",
diff --git a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi
index 813b009b7bd6..465fd6e954a3 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi
@@ -659,7 +659,8 @@
qcom,dual-dsi-mode;
qcom,sync-dual-dsi;
/* DSI1 is slave, so use DSI0 clocks */
- assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
status = "okay";
};
@@ -699,7 +700,7 @@
vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
- qcom,ath11k-calibration-variant = "Xiaomi_Pad_5Pro";
+ qcom,calibration-variant = "Xiaomi_Pad_5Pro";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index c2937b4d9f18..f0d18fd37aaf 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -5,6 +5,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,dispcc-sm8250.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-sm8250.h>
#include <dt-bindings/clock/qcom,gpucc-sm8250.h>
#include <dt-bindings/clock/qcom,rpmh.h>
@@ -606,7 +607,7 @@
};
cpu7_opp9: opp-1747200000 {
- opp-hz = /bits/ 64 <1708800000>;
+ opp-hz = /bits/ 64 <1747200000>;
opp-peak-kBps = <5412000 42393600>;
};
@@ -4690,7 +4691,7 @@
mdss_mdp: display-controller@ae01000 {
compatible = "qcom,sm8250-dpu";
reg = <0 0x0ae01000 0 0x8f000>,
- <0 0x0aeb0000 0 0x2008>;
+ <0 0x0aeb0000 0 0x3000>;
reg-names = "mdp", "vbif";
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
@@ -4861,8 +4862,10 @@
"iface",
"bus";
- assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+ assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd RPMHPD_MMCX>;
@@ -4953,8 +4956,10 @@
"iface",
"bus";
- assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>, <&mdss_dsi1_phy 1>;
+ assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd RPMHPD_MMCX>;
@@ -5011,10 +5016,10 @@
power-domains = <&rpmhpd RPMHPD_MMCX>;
required-opps = <&rpmhpd_opp_low_svs>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
clock-names = "bi_tcxo",
diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
index 69da30f35baa..971c828a7555 100644
--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/interconnect/qcom,sm8350.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,dispcc-sm8350.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-sm8350.h>
#include <dt-bindings/clock/qcom,gpucc-sm8350.h>
#include <dt-bindings/clock/qcom,rpmh.h>
@@ -21,6 +22,7 @@
#include <dt-bindings/soc/qcom,apr.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
#include <dt-bindings/thermal/thermal.h>
#include <dt-bindings/interconnect/qcom,sm8350.h>
@@ -455,7 +457,7 @@
no-map;
};
- pil_camera_mem: mmeory@85200000 {
+ pil_camera_mem: memory@85200000 {
reg = <0x0 0x85200000 0x0 0x500000>;
no-map;
};
@@ -1806,11 +1808,11 @@
interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
qcom,ee = <0>;
+ qcom,num-ees = <4>;
+ num-channels = <16>;
qcom,controlled-remotely;
iommus = <&apps_smmu 0x594 0x0011>,
<&apps_smmu 0x596 0x0011>;
- /* FIXME: Probing BAM DMA causes some abort and system hang */
- status = "fail";
};
crypto: crypto@1dfa000 {
@@ -1822,8 +1824,6 @@
<&apps_smmu 0x596 0x0011>;
interconnects = <&aggre2_noc MASTER_CRYPTO 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "memory";
- /* FIXME: dependency BAM DMA is disabled */
- status = "disabled";
};
ipa: ipa@1e40000 {
@@ -1958,15 +1958,15 @@
iommus = <&apps_smmu 0x1801 0x0>;
dai@0 {
- reg = <0>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
dai@1 {
- reg = <1>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
dai@2 {
- reg = <2>;
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
};
};
@@ -2653,6 +2653,7 @@
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
iommus = <&apps_smmu 0x0 0x0>;
snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,dis-u1-entry-quirk;
snps,dis-u2-entry-quirk;
@@ -2731,6 +2732,7 @@
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
iommus = <&apps_smmu 0x20 0x0>;
snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,dis-u1-entry-quirk;
snps,dis-u2-entry-quirk;
@@ -2776,7 +2778,7 @@
mdss_mdp: display-controller@ae01000 {
compatible = "qcom,sm8350-dpu";
reg = <0 0x0ae01000 0 0x8f000>,
- <0 0x0aeb0000 0 0x2008>;
+ <0 0x0aeb0000 0 0x3000>;
reg-names = "mdp", "vbif";
clocks = <&gcc GCC_DISP_HF_AXI_CLK>,
@@ -2960,8 +2962,8 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi0_opp_table>;
power-domains = <&rpmhpd RPMHPD_MMCX>;
@@ -3059,8 +3061,8 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&dsi1_opp_table>;
power-domains = <&rpmhpd RPMHPD_MMCX>;
@@ -3140,8 +3142,10 @@
compatible = "qcom,sm8350-dispcc";
reg = <0 0x0af00000 0 0x10000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
- <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>,
- <&mdss_dsi1_phy 0>, <&mdss_dsi1_phy 1>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
clock-names = "bi_tcxo",
diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
index 9c809fc5fa45..54c6d0fdb2af 100644
--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
@@ -4,6 +4,7 @@
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,gcc-sm8450.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,sm8450-camcc.h>
@@ -2262,6 +2263,68 @@
};
};
+ pcie1_ep: pcie-ep@1c08000 {
+ compatible = "qcom,sm8450-pcie-ep";
+ reg = <0x0 0x01c08000 0x0 0x3000>,
+ <0x0 0x40000000 0x0 0xf1d>,
+ <0x0 0x40000f20 0x0 0xa8>,
+ <0x0 0x40001000 0x0 0x1000>,
+ <0x0 0x40200000 0x0 0x1000000>,
+ <0x0 0x01c0b000 0x0 0x1000>,
+ <0x0 0x40002000 0x0 0x1000>;
+ reg-names = "parf",
+ "dbi",
+ "elbi",
+ "atu",
+ "addr_space",
+ "mmio",
+ "dma";
+
+ clocks = <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_Q2A_AXI_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_DDRSS_PCIE_SF_TBU_CLK>,
+ <&gcc GCC_AGGRE_NOC_PCIE_1_AXI_CLK>;
+ clock-names = "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave",
+ "slave_q2a",
+ "ref",
+ "ddrss_sf_tbu",
+ "aggre_noc_axi";
+
+ interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global",
+ "doorbell",
+ "dma";
+
+ interconnects = <&pcie_noc MASTER_PCIE_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_PCIE_1 QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "pcie-mem",
+ "cpu-pcie";
+
+ iommus = <&apps_smmu 0x1c80 0x7f>;
+ resets = <&gcc GCC_PCIE_1_BCR>;
+ reset-names = "core";
+ power-domains = <&gcc PCIE_1_GDSC>;
+ phys = <&pcie1_phy>;
+ phy-names = "pciephy";
+ num-lanes = <2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie1_default_state>;
+
+ status = "disabled";
+ };
+
pcie1_phy: phy@1c0e000 {
compatible = "qcom,sm8450-qmp-gen4x2-pcie-phy";
reg = <0 0x01c0e000 0 0x2000>;
@@ -3274,7 +3337,7 @@
mdss_mdp: display-controller@ae01000 {
compatible = "qcom,sm8450-dpu";
reg = <0 0x0ae01000 0 0x8f000>,
- <0 0x0aeb0000 0 0x2008>;
+ <0 0x0aeb0000 0 0x3000>;
reg-names = "mdp", "vbif";
clocks = <&gcc GCC_DISP_HF_AXI_CLK>,
@@ -3456,8 +3519,10 @@
"iface",
"bus";
- assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+ assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&mdss_dsi_opp_table>;
power-domains = <&rpmhpd RPMHPD_MMCX>;
@@ -3548,8 +3613,10 @@
"iface",
"bus";
- assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>, <&mdss_dsi1_phy 1>;
+ assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&mdss_dsi_opp_table>;
power-domains = <&rpmhpd RPMHPD_MMCX>;
@@ -3608,10 +3675,10 @@
<&rpmhcc RPMH_CXO_CLK_A>,
<&gcc GCC_DISP_AHB_CLK>,
<&sleep_clk>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>,
<0>, /* dp1 */
@@ -5283,6 +5350,8 @@
interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
qcom,ee = <0>;
+ qcom,num-ees = <4>;
+ num-channels = <16>;
qcom,controlled-remotely;
iommus = <&apps_smmu 0x584 0x11>,
<&apps_smmu 0x588 0x0>,
@@ -5397,6 +5466,7 @@
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
iommus = <&apps_smmu 0x0 0x0>;
snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,dis-u1-entry-quirk;
snps,dis-u2-entry-quirk;
diff --git a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
index 29bc1ddfc7b2..9dfb248f9ab5 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
@@ -945,6 +945,10 @@
status = "okay";
};
+&iris {
+ status = "okay";
+};
+
&gpi_dma1 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
index 5648ab60ba4c..fdcecd41297d 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
@@ -672,6 +672,10 @@
};
};
+&iris {
+ status = "okay";
+};
+
&lpass_tlmm {
spkr_1_sd_n_active: spkr-1-sd-n-active-state {
pins = "gpio17";
diff --git a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
index 3a6cb2791304..49438a7e77ce 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
@@ -779,6 +779,10 @@
status = "okay";
};
+&iris {
+ status = "okay";
+};
+
&gpi_dma1 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts b/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
index e8383faac576..7d29a57a2b54 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
@@ -547,20 +547,20 @@
};
&remoteproc_adsp {
- firmware-name = "qcom/sm8550/adsp.mdt",
- "qcom/sm8550/adsp_dtb.mdt";
+ firmware-name = "qcom/sm8550/adsp.mbn",
+ "qcom/sm8550/adsp_dtb.mbn";
status = "okay";
};
&remoteproc_cdsp {
- firmware-name = "qcom/sm8550/cdsp.mdt",
- "qcom/sm8550/cdsp_dtb.mdt";
+ firmware-name = "qcom/sm8550/cdsp.mbn",
+ "qcom/sm8550/cdsp_dtb.mbn";
status = "okay";
};
&remoteproc_mpss {
- firmware-name = "qcom/sm8550/modem.mdt",
- "qcom/sm8550/modem_dtb.mdt";
+ firmware-name = "qcom/sm8550/modem.mbn",
+ "qcom/sm8550/modem_dtb.mbn";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
index eac8de4005d8..71a7e3b57ece 100644
--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
@@ -3,6 +3,7 @@
* Copyright (c) 2022, Linaro Limited
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,sm8450-videocc.h>
#include <dt-bindings/clock/qcom,sm8550-camcc.h>
@@ -331,7 +332,8 @@
scm: scm {
compatible = "qcom,scm-sm8550", "qcom,scm";
qcom,dload-mode = <&tcsr 0x19000>;
- interconnects = <&aggre2_noc MASTER_CRYPTO 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&aggre2_noc MASTER_CRYPTO QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
};
};
@@ -347,6 +349,48 @@
qcom,bcm-voters = <&apps_bcm_voter>;
};
+ qup_opp_table_100mhz: opp-table-qup100mhz {
+ compatible = "operating-points-v2";
+
+ opp-75000000 {
+ opp-hz = /bits/ 64 <75000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-100000000 {
+ opp-hz = /bits/ 64 <100000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+ };
+
+ qup_opp_table_120mhz: opp-table-qup120mhz {
+ compatible = "operating-points-v2";
+
+ opp-75000000 {
+ opp-hz = /bits/ 64 <75000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-120000000 {
+ opp-hz = /bits/ 64 <120000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+ };
+
+ qup_opp_table_125mhz: opp-table-qup125mhz {
+ compatible = "operating-points-v2";
+
+ opp-75000000 {
+ opp-hz = /bits/ 64 <75000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-125000000 {
+ opp-hz = /bits/ 64 <125000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+ };
+
memory@a0000000 {
device_type = "memory";
/* We expect the bootloader to fill in the size */
@@ -850,13 +894,18 @@
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 0 QCOM_GPI_I2C>,
<&gpi_dma2 1 0 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
status = "disabled";
};
@@ -868,13 +917,18 @@
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi8_data_clk>, <&qup_spi8_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 0 QCOM_GPI_SPI>,
<&gpi_dma2 1 0 QCOM_GPI_SPI>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -890,13 +944,18 @@
interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 1 QCOM_GPI_I2C>,
<&gpi_dma2 1 1 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
status = "disabled";
};
@@ -908,13 +967,18 @@
interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi9_data_clk>, <&qup_spi9_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 1 QCOM_GPI_SPI>,
<&gpi_dma2 1 1 QCOM_GPI_SPI>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -930,13 +994,18 @@
interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 2 QCOM_GPI_I2C>,
<&gpi_dma2 1 2 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
status = "disabled";
};
@@ -948,13 +1017,18 @@
interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi10_data_clk>, <&qup_spi10_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 2 QCOM_GPI_SPI>,
<&gpi_dma2 1 2 QCOM_GPI_SPI>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -970,13 +1044,18 @@
interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 3 QCOM_GPI_I2C>,
<&gpi_dma2 1 3 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
status = "disabled";
};
@@ -988,13 +1067,18 @@
interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi11_data_clk>, <&qup_spi11_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 3 QCOM_GPI_I2C>,
<&gpi_dma2 1 3 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1010,13 +1094,18 @@
interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 4 QCOM_GPI_I2C>,
<&gpi_dma2 1 4 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
status = "disabled";
};
@@ -1028,13 +1117,18 @@
interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi12_data_clk>, <&qup_spi12_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 4 QCOM_GPI_I2C>,
<&gpi_dma2 1 4 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1050,13 +1144,18 @@
interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 5 QCOM_GPI_I2C>,
<&gpi_dma2 1 5 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
status = "disabled";
};
@@ -1068,13 +1167,18 @@
interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi13_data_clk>, <&qup_spi13_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 5 QCOM_GPI_SPI>,
<&gpi_dma2 1 5 QCOM_GPI_SPI>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1088,9 +1192,13 @@
pinctrl-names = "default";
pinctrl-0 = <&qup_uart14_default>, <&qup_uart14_cts_rts>;
interrupts = <GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core", "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_125mhz>;
status = "disabled";
};
@@ -1104,13 +1212,18 @@
interrupts = <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 7 QCOM_GPI_I2C>,
<&gpi_dma2 1 7 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
status = "disabled";
};
@@ -1122,13 +1235,18 @@
interrupts = <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi15_data_clk>, <&qup_spi15_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>,
- <&aggre2_noc MASTER_QUP_2 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma2 0 7 QCOM_GPI_SPI>,
<&gpi_dma2 1 7 QCOM_GPI_SPI>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1156,9 +1274,13 @@
interrupts = <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_0 0 &clk_virt SLAVE_QUP_CORE_0 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_I2C 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core", "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@@ -1173,9 +1295,13 @@
interrupts = <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_0 0 &clk_virt SLAVE_QUP_CORE_0 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_I2C 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core", "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@@ -1190,9 +1316,13 @@
interrupts = <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_0 0 &clk_virt SLAVE_QUP_CORE_0 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_I2C 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core", "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@@ -1207,9 +1337,13 @@
interrupts = <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_0 0 &clk_virt SLAVE_QUP_CORE_0 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_I2C 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core", "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@@ -1224,9 +1358,13 @@
interrupts = <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_0 0 &clk_virt SLAVE_QUP_CORE_0 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_I2C 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core", "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@@ -1241,9 +1379,13 @@
interrupts = <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_0 0 &clk_virt SLAVE_QUP_CORE_0 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_I2C 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core", "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@@ -1258,9 +1400,13 @@
interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_0 0 &clk_virt SLAVE_QUP_CORE_0 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_I2C 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core", "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@@ -1275,9 +1421,13 @@
interrupts = <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_0 0 &clk_virt SLAVE_QUP_CORE_0 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_I2C 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core", "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@@ -1292,9 +1442,13 @@
interrupts = <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_0 0 &clk_virt SLAVE_QUP_CORE_0 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_I2C 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core", "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@@ -1309,9 +1463,13 @@
interrupts = <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_0 0 &clk_virt SLAVE_QUP_CORE_0 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_I2C 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core", "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
};
@@ -1347,7 +1505,8 @@
clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
<&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
iommus = <&apps_smmu 0xa3 0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core";
dma-coherent;
#address-cells = <2>;
@@ -1364,13 +1523,18 @@
interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 0 QCOM_GPI_I2C>,
<&gpi_dma1 1 0 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
status = "disabled";
};
@@ -1382,13 +1546,18 @@
interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi0_data_clk>, <&qup_spi0_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 0 QCOM_GPI_SPI>,
<&gpi_dma1 1 0 QCOM_GPI_SPI>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1404,13 +1573,18 @@
interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 1 QCOM_GPI_I2C>,
<&gpi_dma1 1 1 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
status = "disabled";
};
@@ -1422,13 +1596,18 @@
interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi1_data_clk>, <&qup_spi1_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 1 QCOM_GPI_SPI>,
<&gpi_dma1 1 1 QCOM_GPI_SPI>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1444,13 +1623,18 @@
interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 2 QCOM_GPI_I2C>,
<&gpi_dma1 1 2 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
status = "disabled";
};
@@ -1462,13 +1646,18 @@
interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi2_data_clk>, <&qup_spi2_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 2 QCOM_GPI_SPI>,
<&gpi_dma1 1 2 QCOM_GPI_SPI>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1484,13 +1673,18 @@
interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 3 QCOM_GPI_I2C>,
<&gpi_dma1 1 3 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
status = "disabled";
};
@@ -1502,13 +1696,18 @@
interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi3_data_clk>, <&qup_spi3_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 3 QCOM_GPI_SPI>,
<&gpi_dma1 1 3 QCOM_GPI_SPI>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1524,13 +1723,18 @@
interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 4 QCOM_GPI_I2C>,
<&gpi_dma1 1 4 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
status = "disabled";
};
@@ -1542,13 +1746,18 @@
interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi4_data_clk>, <&qup_spi4_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 4 QCOM_GPI_SPI>,
<&gpi_dma1 1 4 QCOM_GPI_SPI>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1562,13 +1771,18 @@
pinctrl-names = "default";
pinctrl-0 = <&qup_i2c5_data_clk>;
interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 5 QCOM_GPI_I2C>,
<&gpi_dma1 1 5 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1582,13 +1796,18 @@
interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi5_data_clk>, <&qup_spi5_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 5 QCOM_GPI_SPI>,
<&gpi_dma1 1 5 QCOM_GPI_SPI>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1602,13 +1821,18 @@
pinctrl-names = "default";
pinctrl-0 = <&qup_i2c6_data_clk>;
interrupts = <GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 6 QCOM_GPI_I2C>,
<&gpi_dma1 1 6 QCOM_GPI_I2C>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1622,13 +1846,18 @@
interrupts = <GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&qup_spi6_data_clk>, <&qup_spi6_cs>;
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>,
- <&aggre1_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core", "qup-config", "qup-memory";
dmas = <&gpi_dma1 0 6 QCOM_GPI_SPI>,
<&gpi_dma1 1 6 QCOM_GPI_SPI>;
dma-names = "tx", "rx";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1643,8 +1872,12 @@
pinctrl-0 = <&qup_uart7_default>;
interrupts = <GIC_SPI 579 IRQ_TYPE_LEVEL_HIGH>;
interconnect-names = "qup-core", "qup-config";
- interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
status = "disabled";
};
};
@@ -1768,8 +2001,10 @@
"ddrss_sf_tbu",
"noc_aggr";
- interconnects = <&pcie_noc MASTER_PCIE_0 0 &mc_virt SLAVE_EBI1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &cnoc_main SLAVE_PCIE_0 0>;
+ interconnects = <&pcie_noc MASTER_PCIE_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &cnoc_main SLAVE_PCIE_0 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "pcie-mem", "cpu-pcie";
msi-map = <0x0 &gic_its 0x1400 0x1>,
@@ -1785,8 +2020,49 @@
phys = <&pcie0_phy>;
phy-names = "pciephy";
+ operating-points-v2 = <&pcie0_opp_table>;
+
status = "disabled";
+ pcie0_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ /* GEN 1 x1 */
+ opp-2500000 {
+ opp-hz = /bits/ 64 <2500000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <250000 1>;
+ };
+
+ /* GEN 1 x2 and GEN 2 x1 */
+ opp-5000000 {
+ opp-hz = /bits/ 64 <5000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <500000 1>;
+ };
+
+ /* GEN 2 x2 */
+ opp-10000000 {
+ opp-hz = /bits/ 64 <10000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <1000000 1>;
+ };
+
+ /* GEN 3 x1 */
+ opp-8000000 {
+ opp-hz = /bits/ 64 <8000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <984500 1>;
+ };
+
+ /* GEN 3 x2 */
+ opp-16000000 {
+ opp-hz = /bits/ 64 <16000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <1969000 1>;
+ };
+ };
+
pcieport0: pcie@0 {
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
@@ -1891,8 +2167,10 @@
assigned-clocks = <&gcc GCC_PCIE_1_AUX_CLK>;
assigned-clock-rates = <19200000>;
- interconnects = <&pcie_noc MASTER_PCIE_1 0 &mc_virt SLAVE_EBI1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &cnoc_main SLAVE_PCIE_1 0>;
+ interconnects = <&pcie_noc MASTER_PCIE_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &cnoc_main SLAVE_PCIE_1 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "pcie-mem", "cpu-pcie";
msi-map = <0x0 &gic_its 0x1480 0x1>,
@@ -1909,8 +2187,56 @@
phys = <&pcie1_phy>;
phy-names = "pciephy";
+ operating-points-v2 = <&pcie1_opp_table>;
+
status = "disabled";
+ pcie1_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ /* GEN 1 x1 */
+ opp-2500000 {
+ opp-hz = /bits/ 64 <2500000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <250000 1>;
+ };
+
+ /* GEN 1 x2 and GEN 2 x1 */
+ opp-5000000 {
+ opp-hz = /bits/ 64 <5000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <500000 1>;
+ };
+
+ /* GEN 2 x2 */
+ opp-10000000 {
+ opp-hz = /bits/ 64 <10000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <1000000 1>;
+ };
+
+ /* GEN 3 x1 */
+ opp-8000000 {
+ opp-hz = /bits/ 64 <8000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <984500 1>;
+ };
+
+ /* GEN 3 x2 and GEN 4 x1 */
+ opp-16000000 {
+ opp-hz = /bits/ 64 <16000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <1969000 1>;
+ };
+
+ /* GEN 4 x2 */
+ opp-32000000 {
+ opp-hz = /bits/ 64 <32000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <3938000 1>;
+ };
+ };
+
pcie@0 {
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
@@ -1957,6 +2283,8 @@
interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
qcom,ee = <0>;
+ qcom,num-ees = <4>;
+ num-channels = <20>;
qcom,controlled-remotely;
iommus = <&apps_smmu 0x480 0x0>,
<&apps_smmu 0x481 0x0>;
@@ -1969,7 +2297,8 @@
dma-names = "rx", "tx";
iommus = <&apps_smmu 0x480 0x0>,
<&apps_smmu 0x481 0x0>;
- interconnects = <&aggre2_noc MASTER_CRYPTO 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&aggre2_noc MASTER_CRYPTO QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "memory";
};
@@ -2013,8 +2342,10 @@
dma-coherent;
operating-points-v2 = <&ufs_opp_table>;
- interconnects = <&aggre1_noc MASTER_UFS_MEM 0 &mc_virt SLAVE_EBI1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_UFS_MEM_CFG 0>;
+ interconnects = <&aggre1_noc MASTER_UFS_MEM QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_UFS_MEM_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "ufs-ddr", "cpu-ufs";
clock-names = "core_clk",
@@ -2314,8 +2645,10 @@
clocks = <&rpmhcc RPMH_IPA_CLK>;
clock-names = "core";
- interconnects = <&aggre2_noc MASTER_IPA 0 &mc_virt SLAVE_EBI1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_IPA_CFG 0>;
+ interconnects = <&aggre2_noc MASTER_IPA QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_IPA_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "memory",
"config";
@@ -2349,7 +2682,8 @@
<&rpmhpd RPMHPD_MSS>;
power-domain-names = "cx", "mss";
- interconnects = <&mc_virt MASTER_LLCC 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
memory-region = <&mpss_mem>, <&q6_mpss_dtb_mem>, <&mpss_dsm_mem>;
@@ -2390,7 +2724,8 @@
<&rpmhpd RPMHPD_LMX>;
power-domain-names = "lcx", "lmx";
- interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
@@ -2848,8 +3183,10 @@
power-domains = <&rpmhpd RPMHPD_CX>;
operating-points-v2 = <&sdhc2_opp_table>;
- interconnects = <&aggre2_noc MASTER_SDCC_2 0 &mc_virt SLAVE_EBI1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_SDCC_2 0>;
+ interconnects = <&aggre2_noc MASTER_SDCC_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_SDCC_2 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "sdhc-ddr", "cpu-sdhc";
bus-width = <4>;
dma-coherent;
@@ -2884,6 +3221,87 @@
};
};
+ iris: video-codec@aa00000 {
+ compatible = "qcom,sm8550-iris";
+
+ reg = <0 0x0aa00000 0 0xf0000>;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+
+ power-domains = <&videocc VIDEO_CC_MVS0C_GDSC>,
+ <&videocc VIDEO_CC_MVS0_GDSC>,
+ <&rpmhpd RPMHPD_MXC>,
+ <&rpmhpd RPMHPD_MMCX>;
+ power-domain-names = "venus",
+ "vcodec0",
+ "mxc",
+ "mmcx";
+ operating-points-v2 = <&iris_opp_table>;
+
+ clocks = <&gcc GCC_VIDEO_AXI0_CLK>,
+ <&videocc VIDEO_CC_MVS0C_CLK>,
+ <&videocc VIDEO_CC_MVS0_CLK>;
+ clock-names = "iface",
+ "core",
+ "vcodec0_core";
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_VENUS_CFG QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mmss_noc MASTER_VIDEO QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "cpu-cfg",
+ "video-mem";
+
+ memory-region = <&video_mem>;
+
+ resets = <&gcc GCC_VIDEO_AXI0_CLK_ARES>;
+ reset-names = "bus";
+
+ iommus = <&apps_smmu 0x1940 0>,
+ <&apps_smmu 0x1947 0>;
+ dma-coherent;
+
+ /*
+ * IRIS firmware is signed by vendors, only
+ * enable in boards where the proper signed firmware
+ * is available.
+ */
+ status = "disabled";
+
+ iris_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-240000000 {
+ opp-hz = /bits/ 64 <240000000>;
+ required-opps = <&rpmhpd_opp_svs>,
+ <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-338000000 {
+ opp-hz = /bits/ 64 <338000000>;
+ required-opps = <&rpmhpd_opp_svs>,
+ <&rpmhpd_opp_svs>;
+ };
+
+ opp-366000000 {
+ opp-hz = /bits/ 64 <366000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>,
+ <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-444000000 {
+ opp-hz = /bits/ 64 <444000000>;
+ required-opps = <&rpmhpd_opp_nom>,
+ <&rpmhpd_opp_nom>;
+ };
+
+ opp-533333334 {
+ opp-hz = /bits/ 64 <533333334>;
+ required-opps = <&rpmhpd_opp_turbo>,
+ <&rpmhpd_opp_turbo>;
+ };
+ };
+ };
+
videocc: clock-controller@aaf0000 {
compatible = "qcom,sm8550-videocc";
reg = <0 0x0aaf0000 0 0x10000>;
@@ -3020,8 +3438,11 @@
power-domains = <&dispcc MDSS_GDSC>;
- interconnects = <&mmss_noc MASTER_MDP 0 &mc_virt SLAVE_EBI1 0>;
- interconnect-names = "mdp0-mem";
+ interconnects = <&mmss_noc MASTER_MDP QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "mdp0-mem", "cpu-cfg";
iommus = <&apps_smmu 0x1c00 0x2>;
@@ -3034,7 +3455,7 @@
mdss_mdp: display-controller@ae01000 {
compatible = "qcom,sm8550-dpu";
reg = <0 0x0ae01000 0 0x8f000>,
- <0 0x0aeb0000 0 0x2008>;
+ <0 0x0aeb0000 0 0x3000>;
reg-names = "mdp", "vbif";
interrupt-parent = <&mdss>;
@@ -3215,8 +3636,8 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&mdss_dsi_opp_table>;
@@ -3310,8 +3731,8 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&mdss_dsi_opp_table>;
@@ -3369,10 +3790,10 @@
<&bi_tcxo_ao_div2>,
<&gcc GCC_DISP_AHB_CLK>,
<&sleep_clk>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
<&usb_dp_qmpphy QMP_USB43DP_DP_LINK_CLK>,
<&usb_dp_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>,
<0>, /* dp1 */
@@ -3493,8 +3914,10 @@
resets = <&gcc GCC_USB30_PRIM_BCR>;
- interconnects = <&aggre1_noc MASTER_USB3_0 0 &mc_virt SLAVE_EBI1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_0 0>;
+ interconnects = <&aggre1_noc MASTER_USB3_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_USB3_0 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "usb-ddr", "apps-usb";
status = "disabled";
@@ -4617,7 +5040,8 @@
compatible = "qcom,sm8550-llcc-bwmon", "qcom,sc7280-llcc-bwmon";
reg = <0 0x24091000 0 0x1000>;
interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
- interconnects = <&mc_virt MASTER_LLCC 3 &mc_virt SLAVE_EBI1 3>;
+ interconnects = <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>;
operating-points-v2 = <&llcc_bwmon_opp_table>;
@@ -4666,7 +5090,8 @@
compatible = "qcom,sm8550-cpu-bwmon", "qcom,sdm845-bwmon";
reg = <0 0x240b6400 0 0x600>;
interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
- interconnects = <&gem_noc MASTER_APPSS_PROC 3 &gem_noc SLAVE_LLCC 3>;
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>;
operating-points-v2 = <&cpu_bwmon_opp_table>;
@@ -4750,7 +5175,8 @@
<&rpmhpd RPMHPD_NSP>;
power-domain-names = "cx", "mxc", "nsp";
- interconnects = <&nsp_noc MASTER_CDSP_PROC 0 &mc_virt SLAVE_EBI1 0>;
+ interconnects = <&nsp_noc MASTER_CDSP_PROC QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
memory-region = <&cdsp_mem>, <&q6_cdsp_dtb_mem>;
diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
index 86684cb9a932..495ea9bfd008 100644
--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
@@ -3,6 +3,7 @@
* Copyright (c) 2023, Linaro Limited
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,sm8650-camcc.h>
#include <dt-bindings/clock/qcom,sm8650-dispcc.h>
@@ -15,6 +16,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interconnect/qcom,sm8650-rpmh.h>
+#include <dt-bindings/interconnect/qcom,osm-l3.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/mailbox/qcom-ipcc.h>
#include <dt-bindings/phy/phy-qcom-qmp.h>
@@ -85,6 +87,15 @@
qcom,freq-domain = <&cpufreq_hw 0>;
+ operating-points-v2 = <&cpu0_opp_table>;
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&epss_l3 MASTER_EPSS_L3_APPS
+ &epss_l3 SLAVE_EPSS_L3_SHARED>;
+
#cooling-cells = <2>;
l2_0: l2-cache {
@@ -118,6 +129,15 @@
qcom,freq-domain = <&cpufreq_hw 0>;
+ operating-points-v2 = <&cpu0_opp_table>;
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&epss_l3 MASTER_EPSS_L3_APPS
+ &epss_l3 SLAVE_EPSS_L3_SHARED>;
+
#cooling-cells = <2>;
};
@@ -138,6 +158,15 @@
qcom,freq-domain = <&cpufreq_hw 3>;
+ operating-points-v2 = <&cpu2_opp_table>;
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&epss_l3 MASTER_EPSS_L3_APPS
+ &epss_l3 SLAVE_EPSS_L3_SHARED>;
+
#cooling-cells = <2>;
l2_200: l2-cache {
@@ -159,13 +188,29 @@
power-domain-names = "psci";
enable-method = "psci";
- next-level-cache = <&l2_200>;
+ next-level-cache = <&l2_300>;
capacity-dmips-mhz = <1792>;
dynamic-power-coefficient = <238>;
qcom,freq-domain = <&cpufreq_hw 3>;
+ operating-points-v2 = <&cpu2_opp_table>;
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&epss_l3 MASTER_EPSS_L3_APPS
+ &epss_l3 SLAVE_EPSS_L3_SHARED>;
+
#cooling-cells = <2>;
+
+ l2_300: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
};
cpu4: cpu@400 {
@@ -185,6 +230,15 @@
qcom,freq-domain = <&cpufreq_hw 3>;
+ operating-points-v2 = <&cpu2_opp_table>;
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&epss_l3 MASTER_EPSS_L3_APPS
+ &epss_l3 SLAVE_EPSS_L3_SHARED>;
+
#cooling-cells = <2>;
l2_400: l2-cache {
@@ -212,6 +266,15 @@
qcom,freq-domain = <&cpufreq_hw 1>;
+ operating-points-v2 = <&cpu5_opp_table>;
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&epss_l3 MASTER_EPSS_L3_APPS
+ &epss_l3 SLAVE_EPSS_L3_SHARED>;
+
#cooling-cells = <2>;
l2_500: l2-cache {
@@ -239,6 +302,15 @@
qcom,freq-domain = <&cpufreq_hw 1>;
+ operating-points-v2 = <&cpu5_opp_table>;
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&epss_l3 MASTER_EPSS_L3_APPS
+ &epss_l3 SLAVE_EPSS_L3_SHARED>;
+
#cooling-cells = <2>;
l2_600: l2-cache {
@@ -266,6 +338,15 @@
qcom,freq-domain = <&cpufreq_hw 2>;
+ operating-points-v2 = <&cpu7_opp_table>;
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&epss_l3 MASTER_EPSS_L3_APPS
+ &epss_l3 SLAVE_EPSS_L3_SHARED>;
+
#cooling-cells = <2>;
l2_700: l2-cache {
@@ -365,7 +446,7 @@
};
};
- ete0 {
+ ete-0 {
compatible = "arm,embedded-trace-extension";
cpu = <&cpu0>;
@@ -379,15 +460,174 @@
};
};
+ ete-1 {
+ compatible = "arm,embedded-trace-extension";
+
+ cpu = <&cpu1>;
+
+ out-ports {
+ port {
+ ete1_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete1>;
+ };
+ };
+ };
+ };
+
+ ete-2 {
+ compatible = "arm,embedded-trace-extension";
+
+ cpu = <&cpu2>;
+
+ out-ports {
+ port {
+ ete2_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete2>;
+ };
+ };
+ };
+ };
+
+ ete-3 {
+ compatible = "arm,embedded-trace-extension";
+
+ cpu = <&cpu3>;
+
+ out-ports {
+ port {
+ ete3_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete3>;
+ };
+ };
+ };
+ };
+
+ ete-4 {
+ compatible = "arm,embedded-trace-extension";
+
+ cpu = <&cpu4>;
+
+ out-ports {
+ port {
+ ete4_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete4>;
+ };
+ };
+ };
+ };
+
+ ete-5 {
+ compatible = "arm,embedded-trace-extension";
+
+ cpu = <&cpu5>;
+
+ out-ports {
+ port {
+ ete5_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete5>;
+ };
+ };
+ };
+ };
+
+ ete-6 {
+ compatible = "arm,embedded-trace-extension";
+
+ cpu = <&cpu6>;
+
+ out-ports {
+ port {
+ ete6_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete6>;
+ };
+ };
+ };
+ };
+
+ ete-7 {
+ compatible = "arm,embedded-trace-extension";
+
+ cpu = <&cpu7>;
+
+ out-ports {
+ port {
+ ete7_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete7>;
+ };
+ };
+ };
+ };
+
funnel-ete {
compatible = "arm,coresight-static-funnel";
in-ports {
- port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
funnel_ete_in_ete0: endpoint {
remote-endpoint = <&ete0_out_funnel_ete>;
};
};
+
+ port@1 {
+ reg = <1>;
+
+ funnel_ete_in_ete1: endpoint {
+ remote-endpoint = <&ete1_out_funnel_ete>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ funnel_ete_in_ete2: endpoint {
+ remote-endpoint = <&ete2_out_funnel_ete>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+
+ funnel_ete_in_ete3: endpoint {
+ remote-endpoint = <&ete3_out_funnel_ete>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ funnel_ete_in_ete4: endpoint {
+ remote-endpoint = <&ete4_out_funnel_ete>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+
+ funnel_ete_in_ete5: endpoint {
+ remote-endpoint = <&ete5_out_funnel_ete>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+
+ funnel_ete_in_ete6: endpoint {
+ remote-endpoint = <&ete6_out_funnel_ete>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+
+ funnel_ete_in_ete7: endpoint {
+ remote-endpoint = <&ete7_out_funnel_ete>;
+ };
+ };
};
out-ports {
@@ -420,25 +660,941 @@
qcom,bcm-voters = <&apps_bcm_voter>;
};
+ qup_opp_table_100mhz: opp-table-qup100mhz {
+ compatible = "operating-points-v2";
+
+ opp-75000000 {
+ opp-hz = /bits/ 64 <75000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-100000000 {
+ opp-hz = /bits/ 64 <100000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+ };
+
+ qup_opp_table_120mhz: opp-table-qup120mhz {
+ compatible = "operating-points-v2";
+
+ opp-75000000 {
+ opp-hz = /bits/ 64 <75000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-120000000 {
+ opp-hz = /bits/ 64 <120000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+ };
+
+ qup_opp_table_128mhz: opp-table-qup128mhz {
+ compatible = "operating-points-v2";
+
+ opp-75000000 {
+ opp-hz = /bits/ 64 <75000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-128000000 {
+ opp-hz = /bits/ 64 <128000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+ };
+
+ qup_opp_table_240mhz: opp-table-qup240mhz {
+ compatible = "operating-points-v2";
+
+ opp-150000000 {
+ opp-hz = /bits/ 64 <150000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-240000000 {
+ opp-hz = /bits/ 64 <240000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+ };
+
memory@a0000000 {
device_type = "memory";
/* We expect the bootloader to fill in the size */
reg = <0 0xa0000000 0 0>;
};
+ cpu0_opp_table: opp-table-cpu0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-307200000 {
+ opp-hz = /bits/ 64 <307200000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (307200 * 32)>;
+ };
+
+ opp-364800000 {
+ opp-hz = /bits/ 64 <364800000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (307200 * 32)>;
+ };
+
+ opp-460800000 {
+ opp-hz = /bits/ 64 <460800000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (384000 * 32)>;
+ };
+
+ opp-556800000 {
+ opp-hz = /bits/ 64 <556800000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (499200 * 32)>;
+ };
+
+ opp-672000000 {
+ opp-hz = /bits/ 64 <672000000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (499200 * 32)>;
+ };
+
+ opp-787200000 {
+ opp-hz = /bits/ 64 <787200000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (729600 * 32)>;
+ };
+
+ opp-902400000 {
+ opp-hz = /bits/ 64 <902400000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (844800 * 32)>;
+ };
+
+ opp-1017600000 {
+ opp-hz = /bits/ 64 <1017600000>;
+ opp-peak-kBps = <(466000 * 16) (547000 * 4) (940800 * 32)>;
+ };
+
+ opp-1132800000 {
+ opp-hz = /bits/ 64 <1132800000>;
+ opp-peak-kBps = <(466000 * 16) (547000 * 4) (1036800 * 32)>;
+ };
+
+ opp-1248000000 {
+ opp-hz = /bits/ 64 <1248000000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (1132800 * 32)>;
+ };
+
+ opp-1344000000 {
+ opp-hz = /bits/ 64 <1344000000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1440000000 {
+ opp-hz = /bits/ 64 <1440000000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1459200000 {
+ opp-hz = /bits/ 64 <1459200000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1536000000 {
+ opp-hz = /bits/ 64 <1536000000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (1440000 * 32)>;
+ };
+
+ opp-1574400000 {
+ opp-hz = /bits/ 64 <1574400000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (1440000 * 32)>;
+ };
+
+ opp-1651200000 {
+ opp-hz = /bits/ 64 <1651200000>;
+ opp-peak-kBps = <(600000 * 16) (1555000 * 4) (1440000 * 32)>;
+ };
+
+ opp-1689600000 {
+ opp-hz = /bits/ 64 <1689600000>;
+ opp-peak-kBps = <(600000 * 16) (1555000 * 4) (1440000 * 32)>;
+ };
+
+ opp-1747200000 {
+ opp-hz = /bits/ 64 <1747200000>;
+ opp-peak-kBps = <(600000 * 16) (1555000 * 4) (1440000 * 32)>;
+ };
+
+ opp-1804800000 {
+ opp-hz = /bits/ 64 <1804800000>;
+ opp-peak-kBps = <(600000 * 16) (1555000 * 4) (1555200 * 32)>;
+ };
+
+ opp-1843200000 {
+ opp-hz = /bits/ 64 <1843200000>;
+ opp-peak-kBps = <(600000 * 16) (1555000 * 4) (1555200 * 32)>;
+ };
+
+ opp-1920000000 {
+ opp-hz = /bits/ 64 <1920000000>;
+ opp-peak-kBps = <(600000 * 16) (1555000 * 4) (1651200 * 32)>;
+ };
+
+ opp-1939200000 {
+ opp-hz = /bits/ 64 <1939200000>;
+ opp-peak-kBps = <(600000 * 16) (1555000 * 4) (1651200 * 32)>;
+ };
+
+ opp-2035200000 {
+ opp-hz = /bits/ 64 <2035200000>;
+ opp-peak-kBps = <(600000 * 16) (1555000 * 4) (1843200 * 32)>;
+ };
+
+ opp-2150400000 {
+ opp-hz = /bits/ 64 <2150400000>;
+ opp-peak-kBps = <(600000 * 16) (1555000 * 4) (1843200 * 32)>;
+ };
+
+ opp-2265600000 {
+ opp-hz = /bits/ 64 <2265600000>;
+ opp-peak-kBps = <(600000 * 16) (1555000 * 4) (2035200 * 32)>;
+ };
+ };
+
+ cpu2_opp_table: opp-table-cpu2 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-460800000 {
+ opp-hz = /bits/ 64 <460800000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (307200 * 32)>;
+ };
+
+ opp-499200000 {
+ opp-hz = /bits/ 64 <499200000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (307200 * 32)>;
+ };
+
+ opp-576000000 {
+ opp-hz = /bits/ 64 <576000000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (307200 * 32)>;
+ };
+
+ opp-614400000 {
+ opp-hz = /bits/ 64 <614400000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (499200 * 32)>;
+ };
+
+ opp-691200000 {
+ opp-hz = /bits/ 64 <691200000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-729600000 {
+ opp-hz = /bits/ 64 <729600000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-806400000 {
+ opp-hz = /bits/ 64 <806400000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-844800000 {
+ opp-hz = /bits/ 64 <844800000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-902400000 {
+ opp-hz = /bits/ 64 <902400000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-960000000 {
+ opp-hz = /bits/ 64 <960000000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (844800 * 32)>;
+ };
+
+ opp-1036800000 {
+ opp-hz = /bits/ 64 <1036800000>;
+ opp-peak-kBps = <(466000 * 16) (1555000 * 4) (844800 * 32)>;
+ };
+
+ opp-1075200000 {
+ opp-hz = /bits/ 64 <1075200000>;
+ opp-peak-kBps = <(466000 * 16) (1555000 * 4) (844800 * 32)>;
+ };
+
+ opp-1152000000 {
+ opp-hz = /bits/ 64 <1152000000>;
+ opp-peak-kBps = <(466000 * 16) (1555000 * 4) (844800 * 32)>;
+ };
+
+ opp-1190400000 {
+ opp-hz = /bits/ 64 <1190400000>;
+ opp-peak-kBps = <(466000 * 16) (1555000 * 4) (1036800 * 32)>;
+ };
+
+ opp-1267200000 {
+ opp-hz = /bits/ 64 <1267200000>;
+ opp-peak-kBps = <(600000 * 16) (2092000 * 4) (1036800 * 32)>;
+ };
+
+ opp-1286400000 {
+ opp-hz = /bits/ 64 <1286400000>;
+ opp-peak-kBps = <(600000 * 16) (2092000 * 4) (1036800 * 32)>;
+ };
+
+ opp-1382400000 {
+ opp-hz = /bits/ 64 <1382400000>;
+ opp-peak-kBps = <(600000 * 16) (2092000 * 4) (1036800 * 32)>;
+ };
+
+ opp-1401600000 {
+ opp-hz = /bits/ 64 <1401600000>;
+ opp-peak-kBps = <(600000 * 16) (2092000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1497600000 {
+ opp-hz = /bits/ 64 <1497600000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1612800000 {
+ opp-hz = /bits/ 64 <1612800000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1708800000 {
+ opp-hz = /bits/ 64 <1708800000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1728000000 {
+ opp-hz = /bits/ 64 <1728000000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1824000000 {
+ opp-hz = /bits/ 64 <1824000000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1843200000 {
+ opp-hz = /bits/ 64 <1843200000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1920000000 {
+ opp-hz = /bits/ 64 <1920000000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1440000 * 32)>;
+ };
+
+ opp-1958400000 {
+ opp-hz = /bits/ 64 <1958400000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2035200000 {
+ opp-hz = /bits/ 64 <2035200000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2073600000 {
+ opp-hz = /bits/ 64 <2073600000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2131200000 {
+ opp-hz = /bits/ 64 <2131200000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2188800000 {
+ opp-hz = /bits/ 64 <2188800000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2246400000 {
+ opp-hz = /bits/ 64 <2246400000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2304000000 {
+ opp-hz = /bits/ 64 <2304000000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2323200000 {
+ opp-hz = /bits/ 64 <2323200000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2380800000 {
+ opp-hz = /bits/ 64 <2380800000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2400000000 {
+ opp-hz = /bits/ 64 <2400000000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2438400000 {
+ opp-hz = /bits/ 64 <2438400000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2515200000 {
+ opp-hz = /bits/ 64 <2515200000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2572800000 {
+ opp-hz = /bits/ 64 <2572800000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1651200 * 32)>;
+ };
+
+ opp-2630400000 {
+ opp-hz = /bits/ 64 <2630400000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1651200 * 32)>;
+ };
+
+ opp-2707200000 {
+ opp-hz = /bits/ 64 <2707200000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1651200 * 32)>;
+ };
+
+ opp-2764800000 {
+ opp-hz = /bits/ 64 <2764800000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1843200 * 32)>;
+ };
+
+ opp-2841600000 {
+ opp-hz = /bits/ 64 <2841600000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-2899200000 {
+ opp-hz = /bits/ 64 <2899200000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-2956800000 {
+ opp-hz = /bits/ 64 <2956800000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-3014400000 {
+ opp-hz = /bits/ 64 <3014400000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-3072000000 {
+ opp-hz = /bits/ 64 <3072000000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-3148800000 {
+ opp-hz = /bits/ 64 <3148800000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (2035200 * 32)>;
+ };
+ };
+
+ cpu5_opp_table: opp-table-cpu5 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-460800000 {
+ opp-hz = /bits/ 64 <460800000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (307200 * 32)>;
+ };
+
+ opp-499200000 {
+ opp-hz = /bits/ 64 <499200000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (307200 * 32)>;
+ };
+
+ opp-576000000 {
+ opp-hz = /bits/ 64 <576000000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (307200 * 32)>;
+ };
+
+ opp-614400000 {
+ opp-hz = /bits/ 64 <614400000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (499200 * 32)>;
+ };
+
+ opp-691200000 {
+ opp-hz = /bits/ 64 <691200000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-729600000 {
+ opp-hz = /bits/ 64 <729600000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-806400000 {
+ opp-hz = /bits/ 64 <806400000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-844800000 {
+ opp-hz = /bits/ 64 <844800000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-902400000 {
+ opp-hz = /bits/ 64 <902400000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-960000000 {
+ opp-hz = /bits/ 64 <960000000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (844800 * 32)>;
+ };
+
+ opp-1036800000 {
+ opp-hz = /bits/ 64 <1036800000>;
+ opp-peak-kBps = <(466000 * 16) (1555000 * 4) (844800 * 32)>;
+ };
+
+ opp-1075200000 {
+ opp-hz = /bits/ 64 <1075200000>;
+ opp-peak-kBps = <(466000 * 16) (1555000 * 4) (844800 * 32)>;
+ };
+
+ opp-1152000000 {
+ opp-hz = /bits/ 64 <1152000000>;
+ opp-peak-kBps = <(466000 * 16) (1555000 * 4) (844800 * 32)>;
+ };
+
+ opp-1190400000 {
+ opp-hz = /bits/ 64 <1190400000>;
+ opp-peak-kBps = <(466000 * 16) (1555000 * 4) (1036800 * 32)>;
+ };
+
+ opp-1267200000 {
+ opp-hz = /bits/ 64 <1267200000>;
+ opp-peak-kBps = <(600000 * 16) (2092000 * 4) (1036800 * 32)>;
+ };
+
+ opp-1286400000 {
+ opp-hz = /bits/ 64 <1286400000>;
+ opp-peak-kBps = <(600000 * 16) (2092000 * 4) (1036800 * 32)>;
+ };
+
+ opp-1382400000 {
+ opp-hz = /bits/ 64 <1382400000>;
+ opp-peak-kBps = <(600000 * 16) (2092000 * 4) (1036800 * 32)>;
+ };
+
+ opp-1401600000 {
+ opp-hz = /bits/ 64 <1401600000>;
+ opp-peak-kBps = <(600000 * 16) (2092000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1497600000 {
+ opp-hz = /bits/ 64 <1497600000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1612800000 {
+ opp-hz = /bits/ 64 <1612800000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1708800000 {
+ opp-hz = /bits/ 64 <1708800000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1728000000 {
+ opp-hz = /bits/ 64 <1728000000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1824000000 {
+ opp-hz = /bits/ 64 <1824000000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1843200000 {
+ opp-hz = /bits/ 64 <1843200000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1920000000 {
+ opp-hz = /bits/ 64 <1920000000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1440000 * 32)>;
+ };
+
+ opp-1958400000 {
+ opp-hz = /bits/ 64 <1958400000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2035200000 {
+ opp-hz = /bits/ 64 <2035200000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2073600000 {
+ opp-hz = /bits/ 64 <2073600000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2131200000 {
+ opp-hz = /bits/ 64 <2131200000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2188800000 {
+ opp-hz = /bits/ 64 <2188800000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2246400000 {
+ opp-hz = /bits/ 64 <2246400000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2304000000 {
+ opp-hz = /bits/ 64 <2304000000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2323200000 {
+ opp-hz = /bits/ 64 <2323200000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2380800000 {
+ opp-hz = /bits/ 64 <2380800000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2400000000 {
+ opp-hz = /bits/ 64 <2400000000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2438400000 {
+ opp-hz = /bits/ 64 <2438400000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2515200000 {
+ opp-hz = /bits/ 64 <2515200000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2572800000 {
+ opp-hz = /bits/ 64 <2572800000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1651200 * 32)>;
+ };
+
+ opp-2630400000 {
+ opp-hz = /bits/ 64 <2630400000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1651200 * 32)>;
+ };
+
+ opp-2707200000 {
+ opp-hz = /bits/ 64 <2707200000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1651200 * 32)>;
+ };
+
+ opp-2764800000 {
+ opp-hz = /bits/ 64 <2764800000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1843200 * 32)>;
+ };
+
+ opp-2841600000 {
+ opp-hz = /bits/ 64 <2841600000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-2899200000 {
+ opp-hz = /bits/ 64 <2899200000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-2956800000 {
+ opp-hz = /bits/ 64 <2956800000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-3014400000 {
+ opp-hz = /bits/ 64 <3014400000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-3072000000 {
+ opp-hz = /bits/ 64 <3072000000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-3148800000 {
+ opp-hz = /bits/ 64 <3148800000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (2035200 * 32)>;
+ };
+ };
+
+ cpu7_opp_table: opp-table-cpu7 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-480000000 {
+ opp-hz = /bits/ 64 <480000000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (307200 * 32)>;
+ };
+
+ opp-499200000 {
+ opp-hz = /bits/ 64 <499200000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (307200 * 32)>;
+ };
+
+ opp-576000000 {
+ opp-hz = /bits/ 64 <576000000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (307200 * 32)>;
+ };
+
+ opp-614400000 {
+ opp-hz = /bits/ 64 <614400000>;
+ opp-peak-kBps = <(300000 * 16) (547000 * 4) (499200 * 32)>;
+ };
+
+ opp-672000000 {
+ opp-hz = /bits/ 64 <672000000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-729600000 {
+ opp-hz = /bits/ 64 <729600000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-787200000 {
+ opp-hz = /bits/ 64 <787200000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-844800000 {
+ opp-hz = /bits/ 64 <844800000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-902400000 {
+ opp-hz = /bits/ 64 <902400000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-940800000 {
+ opp-hz = /bits/ 64 <940800000>;
+ opp-peak-kBps = <(466000 * 16) (768000 * 4) (499200 * 32)>;
+ };
+
+ opp-1017600000 {
+ opp-hz = /bits/ 64 <1017600000>;
+ opp-peak-kBps = <(466000 * 16) (1555000 * 4) (844800 * 32)>;
+ };
+
+ opp-1075200000 {
+ opp-hz = /bits/ 64 <1075200000>;
+ opp-peak-kBps = <(466000 * 16) (1555000 * 4) (844800 * 32)>;
+ };
+
+ opp-1132800000 {
+ opp-hz = /bits/ 64 <1132800000>;
+ opp-peak-kBps = <(466000 * 16) (1555000 * 4) (844800 * 32)>;
+ };
+
+ opp-1190400000 {
+ opp-hz = /bits/ 64 <1190400000>;
+ opp-peak-kBps = <(466000 * 16) (1555000 * 4) (1036800 * 32)>;
+ };
+
+ opp-1248000000 {
+ opp-hz = /bits/ 64 <1248000000>;
+ opp-peak-kBps = <(600000 * 16) (2092000 * 4) (1036800 * 32)>;
+ };
+
+ opp-1305600000 {
+ opp-hz = /bits/ 64 <1305600000>;
+ opp-peak-kBps = <(600000 * 16) (2092000 * 4) (1036800 * 32)>;
+ };
+
+ opp-1363200000 {
+ opp-hz = /bits/ 64 <1363200000>;
+ opp-peak-kBps = <(600000 * 16) (2092000 * 4) (1036800 * 32)>;
+ };
+
+ opp-1420800000 {
+ opp-hz = /bits/ 64 <1420800000>;
+ opp-peak-kBps = <(600000 * 16) (2092000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1478400000 {
+ opp-hz = /bits/ 64 <1478400000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1555200000 {
+ opp-hz = /bits/ 64 <1555200000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1593600000 {
+ opp-hz = /bits/ 64 <1593600000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1670400000 {
+ opp-hz = /bits/ 64 <1670400000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1708800000 {
+ opp-hz = /bits/ 64 <1708800000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1804800000 {
+ opp-hz = /bits/ 64 <1804800000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1824000000 {
+ opp-hz = /bits/ 64 <1824000000>;
+ opp-peak-kBps = <(806000 * 16) (2736000 * 4) (1248000 * 32)>;
+ };
+
+ opp-1939200000 {
+ opp-hz = /bits/ 64 <1939200000>;
+ opp-peak-kBps = <(806000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2035200000 {
+ opp-hz = /bits/ 64 <2035200000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2073600000 {
+ opp-hz = /bits/ 64 <2073600000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2112000000 {
+ opp-hz = /bits/ 64 <2112000000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2169600000 {
+ opp-hz = /bits/ 64 <2169600000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2208000000 {
+ opp-hz = /bits/ 64 <2208000000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2246400000 {
+ opp-hz = /bits/ 64 <2246400000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2304000000 {
+ opp-hz = /bits/ 64 <2304000000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2342400000 {
+ opp-hz = /bits/ 64 <2342400000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2380800000 {
+ opp-hz = /bits/ 64 <2380800000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2438400000 {
+ opp-hz = /bits/ 64 <2438400000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2457600000 {
+ opp-hz = /bits/ 64 <2457600000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2496000000 {
+ opp-hz = /bits/ 64 <2496000000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2553600000 {
+ opp-hz = /bits/ 64 <2553600000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1440000 * 32)>;
+ };
+
+ opp-2630400000 {
+ opp-hz = /bits/ 64 <2630400000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1651200 * 32)>;
+ };
+
+ opp-2688000000 {
+ opp-hz = /bits/ 64 <2688000000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1651200 * 32)>;
+ };
+
+ opp-2745600000 {
+ opp-hz = /bits/ 64 <2745600000>;
+ opp-peak-kBps = <(933000 * 16) (3686000 * 4) (1843200 * 32)>;
+ };
+
+ opp-2803200000 {
+ opp-hz = /bits/ 64 <2803200000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-2880000000 {
+ opp-hz = /bits/ 64 <2880000000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-2937600000 {
+ opp-hz = /bits/ 64 <2937600000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-2995200000 {
+ opp-hz = /bits/ 64 <2995200000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-3052800000 {
+ opp-hz = /bits/ 64 <3052800000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (1843200 * 32)>;
+ };
+
+ opp-3187200000 {
+ opp-hz = /bits/ 64 <3187200000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (2035200 * 32)>;
+ };
+
+ opp-3302400000 {
+ opp-hz = /bits/ 64 <3302400000>;
+ opp-peak-kBps = <(1066000 * 16) (4224000 * 4) (2035200 * 32)>;
+ };
+ };
+
pmu-a520 {
compatible = "arm,cortex-a520-pmu";
- interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW &ppi_cluster0>;
};
pmu-a720 {
compatible = "arm,cortex-a720-pmu";
- interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW &ppi_cluster1>;
};
pmu-x4 {
compatible = "arm,cortex-x4-pmu";
- interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW &ppi_cluster2>;
};
psci {
@@ -460,7 +1616,7 @@
cpu_pd2: power-domain-cpu2 {
#power-domain-cells = <0>;
power-domains = <&cluster_pd>;
- domain-idle-states = <&silver_cpu_sleep_0>;
+ domain-idle-states = <&gold_cpu_sleep_0>;
};
cpu_pd3: power-domain-cpu3 {
@@ -816,7 +1972,7 @@
compatible = "qcom,sm8650-ipcc", "qcom,ipcc";
reg = <0 0x00406000 0 0x1000>;
- interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-controller;
#interrupt-cells = <3>;
@@ -827,18 +1983,18 @@
compatible = "qcom,sm8650-gpi-dma", "qcom,sm6350-gpi-dma";
reg = <0 0x00800000 0 0x60000>;
- interrupts = <GIC_SPI 588 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 589 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 590 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 591 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 592 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 593 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 594 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 595 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 596 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 597 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 598 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 599 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 588 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 589 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 590 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 591 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 592 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 593 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 594 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 595 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 596 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 597 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 598 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 599 IRQ_TYPE_LEVEL_HIGH 0>;
dma-channels = <12>;
dma-channel-mask = <0x3f>;
@@ -874,21 +2030,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x00880000 0 0x4000>;
- interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S0_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma2 0 0 QCOM_GPI_I2C>,
<&gpi_dma2 1 0 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -907,21 +2067,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x00880000 0 0x4000>;
- interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S0_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma2 0 0 QCOM_GPI_SPI>,
<&gpi_dma2 1 0 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -940,21 +2104,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x00884000 0 0x4000>;
- interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S1_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma2 0 1 QCOM_GPI_I2C>,
<&gpi_dma2 1 1 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -973,21 +2141,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x00884000 0 0x4000>;
- interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S1_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma2 0 1 QCOM_GPI_SPI>,
<&gpi_dma2 1 1 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1006,21 +2178,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x00888000 0 0x4000>;
- interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S2_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma2 0 2 QCOM_GPI_I2C>,
<&gpi_dma2 1 2 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1039,21 +2215,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x00888000 0 0x4000>;
- interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S2_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma2 0 2 QCOM_GPI_SPI>,
<&gpi_dma2 1 2 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1072,21 +2252,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x0088c000 0 0x4000>;
- interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S3_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma2 0 3 QCOM_GPI_I2C>,
<&gpi_dma2 1 3 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1105,21 +2289,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x0088c000 0 0x4000>;
- interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S3_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma2 0 3 QCOM_GPI_SPI>,
<&gpi_dma2 1 3 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1138,21 +2326,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x00890000 0 0x4000>;
- interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S4_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma2 0 4 QCOM_GPI_I2C>,
<&gpi_dma2 1 4 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1171,21 +2363,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x00890000 0 0x4000>;
- interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S4_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma2 0 4 QCOM_GPI_SPI>,
<&gpi_dma2 1 4 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1204,21 +2400,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x00894000 0 0x4000>;
- interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma2 0 5 QCOM_GPI_I2C>,
<&gpi_dma2 1 5 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1237,21 +2437,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x00894000 0 0x4000>;
- interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma2 0 5 QCOM_GPI_SPI>,
<&gpi_dma2 1 5 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1270,18 +2474,22 @@
compatible = "qcom,geni-uart";
reg = <0 0x00898000 0 0x4000>;
- interrupts = <GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S6_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_128mhz>;
+
pinctrl-0 = <&qup_uart14_default>, <&qup_uart14_cts_rts>;
pinctrl-names = "default";
@@ -1292,18 +2500,22 @@
compatible = "qcom,geni-debug-uart";
reg = <0 0x0089c000 0 0x4000>;
- interrupts = <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP2_S7_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
pinctrl-0 = <&qup_uart15_default>;
pinctrl-names = "default";
@@ -1328,7 +2540,7 @@
compatible = "qcom,geni-i2c-master-hub";
reg = <0 0x00980000 0 0x4000>;
- interrupts = <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_I2C_S0_CLK>,
<&gcc GCC_QUPV3_I2C_CORE_CLK>;
@@ -1337,11 +2549,15 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ required-opps = <&rpmhpd_opp_low_svs>;
+
pinctrl-0 = <&hub_i2c0_data_clk>;
pinctrl-names = "default";
@@ -1355,7 +2571,7 @@
compatible = "qcom,geni-i2c-master-hub";
reg = <0 0x00984000 0 0x4000>;
- interrupts = <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_I2C_S1_CLK>,
<&gcc GCC_QUPV3_I2C_CORE_CLK>;
@@ -1364,11 +2580,15 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ required-opps = <&rpmhpd_opp_low_svs>;
+
pinctrl-0 = <&hub_i2c1_data_clk>;
pinctrl-names = "default";
@@ -1382,7 +2602,7 @@
compatible = "qcom,geni-i2c-master-hub";
reg = <0 0x00988000 0 0x4000>;
- interrupts = <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_I2C_S2_CLK>,
<&gcc GCC_QUPV3_I2C_CORE_CLK>;
@@ -1391,11 +2611,15 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ required-opps = <&rpmhpd_opp_low_svs>;
+
pinctrl-0 = <&hub_i2c2_data_clk>;
pinctrl-names = "default";
@@ -1409,7 +2633,7 @@
compatible = "qcom,geni-i2c-master-hub";
reg = <0 0x0098c000 0 0x4000>;
- interrupts = <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_I2C_S3_CLK>,
<&gcc GCC_QUPV3_I2C_CORE_CLK>;
@@ -1418,11 +2642,15 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ required-opps = <&rpmhpd_opp_low_svs>;
+
pinctrl-0 = <&hub_i2c3_data_clk>;
pinctrl-names = "default";
@@ -1436,7 +2664,7 @@
compatible = "qcom,geni-i2c-master-hub";
reg = <0 0x00990000 0 0x4000>;
- interrupts = <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_I2C_S4_CLK>,
<&gcc GCC_QUPV3_I2C_CORE_CLK>;
@@ -1445,11 +2673,15 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ required-opps = <&rpmhpd_opp_low_svs>;
+
pinctrl-0 = <&hub_i2c4_data_clk>;
pinctrl-names = "default";
@@ -1463,7 +2695,7 @@
compatible = "qcom,geni-i2c-master-hub";
reg = <0 0x00994000 0 0x4000>;
- interrupts = <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_I2C_S5_CLK>,
<&gcc GCC_QUPV3_I2C_CORE_CLK>;
@@ -1472,11 +2704,15 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ required-opps = <&rpmhpd_opp_low_svs>;
+
pinctrl-0 = <&hub_i2c5_data_clk>;
pinctrl-names = "default";
@@ -1490,7 +2726,7 @@
compatible = "qcom,geni-i2c-master-hub";
reg = <0 0x00998000 0 0x4000>;
- interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_I2C_S6_CLK>,
<&gcc GCC_QUPV3_I2C_CORE_CLK>;
@@ -1499,11 +2735,15 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ required-opps = <&rpmhpd_opp_low_svs>;
+
pinctrl-0 = <&hub_i2c6_data_clk>;
pinctrl-names = "default";
@@ -1517,7 +2757,7 @@
compatible = "qcom,geni-i2c-master-hub";
reg = <0 0x0099c000 0 0x4000>;
- interrupts = <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_I2C_S7_CLK>,
<&gcc GCC_QUPV3_I2C_CORE_CLK>;
@@ -1526,11 +2766,15 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ required-opps = <&rpmhpd_opp_low_svs>;
+
pinctrl-0 = <&hub_i2c7_data_clk>;
pinctrl-names = "default";
@@ -1544,7 +2788,7 @@
compatible = "qcom,geni-i2c-master-hub";
reg = <0 0x009a0000 0 0x4000>;
- interrupts = <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_I2C_S8_CLK>,
<&gcc GCC_QUPV3_I2C_CORE_CLK>;
@@ -1553,11 +2797,15 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ required-opps = <&rpmhpd_opp_low_svs>;
+
pinctrl-0 = <&hub_i2c8_data_clk>;
pinctrl-names = "default";
@@ -1571,7 +2819,7 @@
compatible = "qcom,geni-i2c-master-hub";
reg = <0 0x009a4000 0 0x4000>;
- interrupts = <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_I2C_S9_CLK>,
<&gcc GCC_QUPV3_I2C_CORE_CLK>;
@@ -1580,11 +2828,15 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ required-opps = <&rpmhpd_opp_low_svs>;
+
pinctrl-0 = <&hub_i2c9_data_clk>;
pinctrl-names = "default";
@@ -1599,18 +2851,18 @@
compatible = "qcom,sm8650-gpi-dma", "qcom,sm6350-gpi-dma";
reg = <0 0x00a00000 0 0x60000>;
- interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 295 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 296 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 295 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 296 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH 0>;
dma-channels = <12>;
dma-channel-mask = <0xc>;
@@ -1649,21 +2901,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x00a80000 0 0x4000>;
- interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma1 0 0 QCOM_GPI_I2C>,
<&gpi_dma1 1 0 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1682,21 +2938,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x00a80000 0 0x4000>;
- interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma1 0 0 QCOM_GPI_SPI>,
<&gpi_dma1 1 0 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1715,21 +2975,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x00a84000 0 0x4000>;
- interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma1 0 1 QCOM_GPI_I2C>,
<&gpi_dma1 1 1 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1748,21 +3012,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x00a84000 0 0x4000>;
- interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma1 0 1 QCOM_GPI_SPI>,
<&gpi_dma1 1 1 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1781,21 +3049,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x00a88000 0 0x4000>;
- interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_240mhz>;
+
dmas = <&gpi_dma1 0 2 QCOM_GPI_I2C>,
<&gpi_dma1 1 2 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1814,21 +3086,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x00a88000 0 0x4000>;
- interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_240mhz>;
+
dmas = <&gpi_dma1 0 2 QCOM_GPI_SPI>,
<&gpi_dma1 1 2 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1847,21 +3123,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x00a8c000 0 0x4000>;
- interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma1 0 3 QCOM_GPI_I2C>,
<&gpi_dma1 1 3 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1880,21 +3160,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x00a8c000 0 0x4000>;
- interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma1 0 3 QCOM_GPI_SPI>,
<&gpi_dma1 1 3 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1913,21 +3197,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x00a90000 0 0x4000>;
- interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma1 0 4 QCOM_GPI_I2C>,
<&gpi_dma1 1 4 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1946,21 +3234,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x00a90000 0 0x4000>;
- interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma1 0 4 QCOM_GPI_SPI>,
<&gpi_dma1 1 4 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1979,21 +3271,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x00a94000 0 0x4000>;
- interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma1 0 5 QCOM_GPI_I2C>,
<&gpi_dma1 1 5 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -2012,21 +3308,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x00a94000 0 0x4000>;
- interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma1 0 5 QCOM_GPI_SPI>,
<&gpi_dma1 1 5 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -2045,21 +3345,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x00a98000 0 0x4000>;
- interrupts = <GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma1 0 6 QCOM_GPI_I2C>,
<&gpi_dma1 1 6 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -2078,21 +3382,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x00a98000 0 0x4000>;
- interrupts = <GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma1 0 6 QCOM_GPI_SPI>,
<&gpi_dma1 1 6 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -2111,21 +3419,25 @@
compatible = "qcom,geni-i2c";
reg = <0 0x00a9c000 0 0x4000>;
- interrupts = <GIC_SPI 579 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 579 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma1 0 7 QCOM_GPI_I2C>,
<&gpi_dma1 1 7 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -2144,21 +3456,25 @@
compatible = "qcom,geni-spi";
reg = <0 0x00a9c000 0 0x4000>;
- interrupts = <GIC_SPI 579 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 579 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>;
clock-names = "se";
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma1 0 7 QCOM_GPI_SPI>,
<&gpi_dma1 1 7 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -2260,15 +3576,15 @@
<0 0x60100000 0 0x100000>;
reg-names = "parf", "dbi", "elbi", "atu", "config";
- interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "msi0",
"msi1",
"msi2",
@@ -2301,20 +3617,22 @@
interconnects = <&pcie_noc MASTER_PCIE_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &cnoc_main SLAVE_PCIE_0 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &cnoc_main SLAVE_PCIE_0 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "pcie-mem",
"cpu-pcie";
power-domains = <&gcc PCIE_0_GDSC>;
+ operating-points-v2 = <&pcie0_opp_table>;
+
iommu-map = <0 &apps_smmu 0x1400 0x1>,
<0x100 &apps_smmu 0x1401 0x1>;
- interrupt-map = <0 0 0 1 &intc 0 0 0 149 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &intc 0 0 0 150 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &intc 0 0 0 151 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &intc 0 0 0 152 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 1 &intc 0 0 0 149 IRQ_TYPE_LEVEL_HIGH 0>,
+ <0 0 0 2 &intc 0 0 0 150 IRQ_TYPE_LEVEL_HIGH 0>,
+ <0 0 0 3 &intc 0 0 0 151 IRQ_TYPE_LEVEL_HIGH 0>,
+ <0 0 0 4 &intc 0 0 0 152 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-map-mask = <0 0 0 0x7>;
#interrupt-cells = <1>;
@@ -2338,6 +3656,45 @@
status = "disabled";
+ pcie0_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ /* GEN 1 x1 */
+ opp-2500000 {
+ opp-hz = /bits/ 64 <2500000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <250000 1>;
+ };
+
+ /* GEN 1 x2 and GEN 2 x1 */
+ opp-5000000 {
+ opp-hz = /bits/ 64 <5000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <500000 1>;
+ };
+
+ /* GEN 2 x2 */
+ opp-10000000 {
+ opp-hz = /bits/ 64 <10000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <1000000 1>;
+ };
+
+ /* GEN 3 x1 */
+ opp-8000000 {
+ opp-hz = /bits/ 64 <8000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <984500 1>;
+ };
+
+ /* GEN 3 x2 */
+ opp-16000000 {
+ opp-hz = /bits/ 64 <16000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <1969000 1>;
+ };
+ };
+
pcieport0: pcie@0 {
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
@@ -2394,15 +3751,15 @@
"atu",
"config";
- interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "msi0",
"msi1",
"msi2",
@@ -2440,20 +3797,22 @@
interconnects = <&pcie_noc MASTER_PCIE_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &cnoc_main SLAVE_PCIE_1 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &cnoc_main SLAVE_PCIE_1 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "pcie-mem",
"cpu-pcie";
power-domains = <&gcc PCIE_1_GDSC>;
+ operating-points-v2 = <&pcie1_opp_table>;
+
iommu-map = <0 &apps_smmu 0x1480 0x1>,
<0x100 &apps_smmu 0x1481 0x1>;
- interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &intc 0 0 0 435 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &intc 0 0 0 438 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &intc 0 0 0 439 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH 0>,
+ <0 0 0 2 &intc 0 0 0 435 IRQ_TYPE_LEVEL_HIGH 0>,
+ <0 0 0 3 &intc 0 0 0 438 IRQ_TYPE_LEVEL_HIGH 0>,
+ <0 0 0 4 &intc 0 0 0 439 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-map-mask = <0 0 0 0x7>;
#interrupt-cells = <1>;
@@ -2477,6 +3836,52 @@
status = "disabled";
+ pcie1_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ /* GEN 1 x1 */
+ opp-2500000 {
+ opp-hz = /bits/ 64 <2500000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <250000 1>;
+ };
+
+ /* GEN 1 x2 and GEN 2 x1 */
+ opp-5000000 {
+ opp-hz = /bits/ 64 <5000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <500000 1>;
+ };
+
+ /* GEN 2 x2 */
+ opp-10000000 {
+ opp-hz = /bits/ 64 <10000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <1000000 1>;
+ };
+
+ /* GEN 3 x1 */
+ opp-8000000 {
+ opp-hz = /bits/ 64 <8000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <984500 1>;
+ };
+
+ /* GEN 3 x2 and GEN 4 x1 */
+ opp-16000000 {
+ opp-hz = /bits/ 64 <16000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <1969000 1>;
+ };
+
+ /* GEN 4 x2 */
+ opp-32000000 {
+ opp-hz = /bits/ 64 <32000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <3938000 1>;
+ };
+ };
+
pcie@0 {
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
@@ -2525,7 +3930,7 @@
compatible = "qcom,bam-v1.7.0";
reg = <0 0x01dc4000 0 0x28000>;
- interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH 0>;
#dma-cells = <1>;
@@ -2533,6 +3938,8 @@
<&apps_smmu 0x481 0>;
qcom,ee = <0>;
+ qcom,num-ees = <4>;
+ num-channels = <20>;
qcom,controlled-remotely;
};
@@ -2577,7 +3984,7 @@
compatible = "qcom,sm8650-ufshc", "qcom,ufshc", "jedec,ufs-2.0";
reg = <0 0x01d84000 0 0x3000>;
- interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_UFS_PHY_AXI_CLK>,
<&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
@@ -2595,28 +4002,22 @@
"tx_lane0_sync_clk",
"rx_lane0_sync_clk",
"rx_lane1_sync_clk";
- freq-table-hz = <100000000 403000000>,
- <0 0>,
- <0 0>,
- <100000000 403000000>,
- <100000000 403000000>,
- <0 0>,
- <0 0>,
- <0 0>;
resets = <&gcc GCC_UFS_PHY_BCR>;
reset-names = "rst";
interconnects = <&aggre1_noc MASTER_UFS_MEM QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_UFS_MEM_CFG QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_UFS_MEM_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "ufs-ddr",
"cpu-ufs";
power-domains = <&gcc UFS_PHY_GDSC>;
required-opps = <&rpmhpd_opp_nom>;
+ operating-points-v2 = <&ufs_opp_table>;
+
iommus = <&apps_smmu 0x60 0>;
lanes-per-direction = <2>;
@@ -2628,6 +4029,46 @@
#reset-cells = <1>;
status = "disabled";
+
+ ufs_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-100000000 {
+ opp-hz = /bits/ 64 <100000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <100000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-201500000 {
+ opp-hz = /bits/ 64 <201500000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <201500000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-403000000 {
+ opp-hz = /bits/ 64 <403000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <403000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
};
ice: crypto@1d88000 {
@@ -2664,7 +4105,7 @@
"cx_mem",
"cx_dbgc";
- interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH 0>;
iommus = <&adreno_smmu 0 0x0>,
<&adreno_smmu 1 0x0>;
@@ -2763,8 +4204,8 @@
<0x0 0x0b280000 0x0 0x10000>;
reg-names = "gmu", "rscc", "gmu_pdc";
- interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "hfi", "gmu";
clocks = <&gpucc GPU_CC_AHB_CLK>,
@@ -2827,32 +4268,32 @@
reg = <0x0 0x03da0000 0x0 0x40000>;
#iommu-cells = <2>;
#global-interrupts = <1>;
- interrupts = <GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 677 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 678 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 679 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 680 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 681 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 682 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 683 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 684 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 686 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 687 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 574 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 575 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 576 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 577 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 659 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 661 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 664 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 665 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 666 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 668 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 669 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 699 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 677 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 678 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 679 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 680 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 681 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 682 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 683 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 684 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 686 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 687 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 574 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 575 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 576 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 577 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 659 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 661 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 664 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 665 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 666 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 668 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 669 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 699 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gpucc GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK>,
<&gcc GCC_GPU_MEMNOC_GFX_CLK>,
<&gcc GCC_GPU_SNOC_DVM_GFX_CLK>,
@@ -2877,8 +4318,8 @@
"ipa-shared",
"gsi";
- interrupts-extended = <&intc GIC_SPI 654 IRQ_TYPE_EDGE_RISING>,
- <&intc GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 654 IRQ_TYPE_EDGE_RISING 0>,
+ <&intc GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH 0>,
<&ipa_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
<&ipa_smp2p_in 1 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "ipa",
@@ -2889,8 +4330,10 @@
clocks = <&rpmhcc RPMH_IPA_CLK>;
clock-names = "core";
- interconnects = <&aggre2_noc MASTER_IPA 0 &mc_virt SLAVE_EBI1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_IPA_CFG 0>;
+ interconnects = <&aggre2_noc MASTER_IPA QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_IPA_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "memory",
"config";
@@ -2908,7 +4351,7 @@
compatible = "qcom,sm8650-mpss-pas";
reg = <0x0 0x04080000 0x0 0x10000>;
- interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING 0>,
<&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_modem_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_modem_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -3125,7 +4568,7 @@
swr3: soundwire@6ab0000 {
compatible = "qcom,soundwire-v2.0.0";
reg = <0 0x06ab0000 0 0x10000>;
- interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&lpass_wsa2macro>;
clock-names = "iface";
label = "WSA2";
@@ -3172,7 +4615,7 @@
swr1: soundwire@6ad0000 {
compatible = "qcom,soundwire-v2.0.0";
reg = <0 0x06ad0000 0 0x10000>;
- interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&lpass_rxmacro>;
clock-names = "iface";
label = "RX";
@@ -3236,7 +4679,7 @@
swr0: soundwire@6b10000 {
compatible = "qcom,soundwire-v2.0.0";
reg = <0 0x06b10000 0 0x10000>;
- interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&lpass_wsamacro>;
clock-names = "iface";
label = "WSA";
@@ -3266,8 +4709,8 @@
swr2: soundwire@6d30000 {
compatible = "qcom,soundwire-v2.0.0";
reg = <0 0x06d30000 0 0x10000>;
- interrupts = <GIC_SPI 496 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 520 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 496 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 520 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "core", "wakeup";
clocks = <&lpass_txmacro>;
clock-names = "iface";
@@ -3458,8 +4901,8 @@
compatible = "qcom,sm8650-sdhci", "qcom,sdhci-msm-v5";
reg = <0 0x08804000 0 0x1000>;
- interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "hc_irq",
"pwr_irq";
@@ -3472,8 +4915,8 @@
interconnects = <&aggre2_noc MASTER_SDCC_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_SDCC_2 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_SDCC_2 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "sdhc-ddr",
"cpu-sdhc";
@@ -3533,7 +4976,7 @@
cci0: cci@ac15000 {
compatible = "qcom,sm8650-cci", "qcom,msm8996-cci";
reg = <0 0x0ac15000 0 0x1000>;
- interrupts = <GIC_SPI 426 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 426 IRQ_TYPE_EDGE_RISING 0>;
power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>;
clocks = <&camcc CAM_CC_CAMNOC_AXI_NRT_CLK>,
<&camcc CAM_CC_CPAS_AHB_CLK>,
@@ -3566,7 +5009,7 @@
cci1: cci@ac16000 {
compatible = "qcom,sm8650-cci", "qcom,msm8996-cci";
reg = <0 0x0ac16000 0 0x1000>;
- interrupts = <GIC_SPI 427 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 427 IRQ_TYPE_EDGE_RISING 0>;
power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>;
clocks = <&camcc CAM_CC_CAMNOC_AXI_NRT_CLK>,
<&camcc CAM_CC_CPAS_AHB_CLK>,
@@ -3599,7 +5042,7 @@
cci2: cci@ac17000 {
compatible = "qcom,sm8650-cci", "qcom,msm8996-cci";
reg = <0 0x0ac17000 0 0x1000>;
- interrupts = <GIC_SPI 428 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 428 IRQ_TYPE_EDGE_RISING 0>;
power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>;
clocks = <&camcc CAM_CC_CAMNOC_AXI_NRT_CLK>,
<&camcc CAM_CC_CPAS_AHB_CLK>,
@@ -3647,7 +5090,7 @@
reg = <0 0x0ae00000 0 0x1000>;
reg-names = "mdss";
- interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
<&gcc GCC_DISP_HF_AXI_CLK>,
@@ -3656,8 +5099,11 @@
resets = <&dispcc DISP_CC_MDSS_CORE_BCR>;
interconnects = <&mmss_noc MASTER_MDP QCOM_ICC_TAG_ALWAYS
- &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
- interconnect-names = "mdp0-mem";
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "mdp0-mem",
+ "cpu-cfg";
power-domains = <&dispcc MDSS_GDSC>;
@@ -3675,7 +5121,7 @@
mdss_mdp: display-controller@ae01000 {
compatible = "qcom,sm8650-dpu";
reg = <0 0x0ae01000 0 0x8f000>,
- <0 0x0aeb0000 0 0x2008>;
+ <0 0x0aeb0000 0 0x3000>;
reg-names = "mdp",
"vbif";
@@ -3775,8 +5221,8 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>;
+ assigned-clock-parents = <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&mdss_dsi_opp_table>;
@@ -3872,8 +5318,8 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
- assigned-clock-parents = <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>;
+ assigned-clock-parents = <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>;
operating-points-v2 = <&mdss_dsi_opp_table>;
@@ -4020,10 +5466,10 @@
<&bi_tcxo_ao_div2>,
<&gcc GCC_DISP_AHB_CLK>,
<&sleep_clk>,
- <&mdss_dsi0_phy 0>,
- <&mdss_dsi0_phy 1>,
- <&mdss_dsi1_phy 0>,
- <&mdss_dsi1_phy 1>,
+ <&mdss_dsi0_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi0_phy DSI_PIXEL_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_BYTE_PLL_CLK>,
+ <&mdss_dsi1_phy DSI_PIXEL_PLL_CLK>,
<&usb_dp_qmpphy QMP_USB43DP_DP_LINK_CLK>,
<&usb_dp_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>,
<0>, /* dp1 */
@@ -4116,8 +5562,8 @@
compatible = "qcom,sm8650-dwc3", "qcom,dwc3";
reg = <0 0x0a6f8800 0 0x400>;
- interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
- <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH 0>,
+ <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH 0>,
<&pdc 14 IRQ_TYPE_EDGE_RISING>,
<&pdc 15 IRQ_TYPE_EDGE_RISING>,
<&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
@@ -4146,6 +5592,13 @@
resets = <&gcc GCC_USB30_PRIM_BCR>;
+ interconnects = <&aggre1_noc MASTER_USB3_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_USB3_0 QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "usb-ddr",
+ "apps-usb";
+
power-domains = <&gcc USB30_PRIM_GDSC>;
required-opps = <&rpmhpd_opp_nom>;
@@ -4159,7 +5612,7 @@
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xcd00>;
- interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH 0>;
iommus = <&apps_smmu 0x40 0>;
@@ -4223,8 +5676,8 @@
reg = <0 0x0c228000 0 0x1000>, /* TM */
<0 0x0c222000 0 0x1000>; /* SROT */
- interrupts = <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 640 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 640 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "uplow",
"critical";
@@ -4238,8 +5691,8 @@
reg = <0 0x0c229000 0 0x1000>, /* TM */
<0 0x0c223000 0 0x1000>; /* SROT */
- interrupts = <GIC_SPI 507 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 507 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "uplow",
"critical";
@@ -4253,8 +5706,8 @@
reg = <0 0x0c22a000 0 0x1000>, /* TM */
<0 0x0c224000 0 0x1000>; /* SROT */
- interrupts = <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 642 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 642 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "uplow",
"critical";
@@ -4312,7 +5765,7 @@
compatible = "qcom,sm8650-tlmm";
reg = <0 0x0f100000 0 0x300000>;
- interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH 0>;
gpio-controller;
#gpio-cells = <2>;
@@ -5188,103 +6641,103 @@
compatible = "qcom,sm8650-smmu-500", "qcom,smmu-500", "arm,mmu-500";
reg = <0 0x15000000 0 0x100000>;
- interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 706 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 689 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 706 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 689 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH 0>;
#iommu-cells = <2>;
#global-interrupts = <1>;
@@ -5297,9 +6750,9 @@
reg = <0 0x17100000 0 0x10000>, /* GICD */
<0 0x17180000 0 0x200000>; /* GICR * 8 */
- interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW 0>;
- #interrupt-cells = <3>;
+ #interrupt-cells = <4>;
interrupt-controller;
#redistributor-regions = <1>;
@@ -5309,6 +6762,20 @@
#size-cells = <2>;
ranges;
+ ppi-partitions {
+ ppi_cluster0: interrupt-partition-0 {
+ affinity = <&cpu0 &cpu1>;
+ };
+
+ ppi_cluster1: interrupt-partition-1 {
+ affinity = <&cpu2 &cpu3 &cpu4 &cpu5 &cpu6>;
+ };
+
+ ppi_cluster2: interrupt-partition-2 {
+ affinity = <&cpu7>;
+ };
+ };
+
gic_its: msi-controller@17140000 {
compatible = "arm,gic-v3-its";
reg = <0 0x17140000 0 0x20000>;
@@ -5330,8 +6797,8 @@
reg = <0x17421000 0x1000>,
<0x17422000 0x1000>;
- interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH 0>;
frame-number = <0>;
};
@@ -5339,7 +6806,7 @@
frame@17423000 {
reg = <0x17423000 0x1000>;
- interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH 0>;
frame-number = <1>;
@@ -5349,7 +6816,7 @@
frame@17425000 {
reg = <0x17425000 0x1000>;
- interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH 0>;
frame-number = <2>;
@@ -5359,7 +6826,7 @@
frame@17427000 {
reg = <0x17427000 0x1000>;
- interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH 0>;
frame-number = <3>;
@@ -5369,7 +6836,7 @@
frame@17429000 {
reg = <0x17429000 0x1000>;
- interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH 0>;
frame-number = <4>;
@@ -5379,7 +6846,7 @@
frame@1742b000 {
reg = <0x1742b000 0x1000>;
- interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH 0>;
frame-number = <5>;
@@ -5389,7 +6856,7 @@
frame@1742d000 {
reg = <0x1742d000 0x1000>;
- interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH 0>;
frame-number = <6>;
@@ -5407,9 +6874,9 @@
"drv-1",
"drv-2";
- interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&cluster_pd>;
@@ -5506,6 +6973,16 @@
};
};
+ epss_l3: interconnect@17d90000 {
+ compatible = "qcom,sm8650-epss-l3", "qcom,epss-l3";
+ reg = <0 0x17d90000 0 0x1000>;
+
+ clocks = <&bi_tcxo_div2>, <&gcc GCC_GPLL0>;
+ clock-names = "xo", "alternate";
+
+ #interconnect-cells = <1>;
+ };
+
cpufreq_hw: cpufreq@17d91000 {
compatible = "qcom,sm8650-cpufreq-epss", "qcom,cpufreq-epss";
reg = <0 0x17d91000 0 0x1000>,
@@ -5517,10 +6994,10 @@
"freq-domain2",
"freq-domain3";
- interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 738 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 738 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "dcvsh-irq-0",
"dcvsh-irq-1",
"dcvsh-irq-2",
@@ -5537,7 +7014,7 @@
compatible = "qcom,sm8650-llcc-bwmon", "qcom,sc7280-llcc-bwmon";
reg = <0 0x24091000 0 0x1000>;
- interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH 0>;
interconnects = <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>;
@@ -5589,7 +7066,7 @@
compatible = "qcom,sm8650-cpu-bwmon", "qcom,sdm845-bwmon";
reg = <0 0x240b7400 0 0x600>;
- interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH 0>;
interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
&gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>;
@@ -5649,7 +7126,7 @@
"llcc_broadcast_base",
"llcc_broadcast_and_base";
- interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH 0>;
};
nsp_noc: interconnect@320c0000 {
@@ -5665,7 +7142,7 @@
compatible = "qcom,sm8650-cdsp-pas";
reg = <0x0 0x32300000 0x0 0x10000>;
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING 0>,
<&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -5843,14 +7320,14 @@
thermal-sensors = <&tsens0 0>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ aoss0-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
aoss0-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -5861,14 +7338,14 @@
thermal-sensors = <&tsens0 1>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ cpuss0-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
cpuss0-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -5879,14 +7356,14 @@
thermal-sensors = <&tsens0 2>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ cpuss1-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
cpuss1-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -5897,14 +7374,14 @@
thermal-sensors = <&tsens0 3>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ cpuss2-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
cpuss2-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -5915,14 +7392,14 @@
thermal-sensors = <&tsens0 4>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ cpuss3-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
cpuss3-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -5933,18 +7410,6 @@
thermal-sensors = <&tsens0 5>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu2-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -5957,18 +7422,6 @@
thermal-sensors = <&tsens0 6>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu2-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -5981,18 +7434,6 @@
thermal-sensors = <&tsens0 7>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu3-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -6005,18 +7446,6 @@
thermal-sensors = <&tsens0 8>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu3-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -6029,18 +7458,6 @@
thermal-sensors = <&tsens0 9>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu4-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -6053,18 +7470,6 @@
thermal-sensors = <&tsens0 10>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu4-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -6077,18 +7482,6 @@
thermal-sensors = <&tsens0 11>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu5-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -6101,18 +7494,6 @@
thermal-sensors = <&tsens0 12>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu5-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -6125,18 +7506,6 @@
thermal-sensors = <&tsens0 13>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu6-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -6149,18 +7518,6 @@
thermal-sensors = <&tsens0 14>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu6-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -6173,14 +7530,14 @@
thermal-sensors = <&tsens1 0>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ aoss1-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
aoss1-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6191,18 +7548,6 @@
thermal-sensors = <&tsens1 1>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu7-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -6215,18 +7560,6 @@
thermal-sensors = <&tsens1 2>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu7-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -6239,18 +7572,6 @@
thermal-sensors = <&tsens1 3>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu7-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -6263,18 +7584,6 @@
thermal-sensors = <&tsens1 4>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu0-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -6287,18 +7596,6 @@
thermal-sensors = <&tsens1 5>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu1-critical {
temperature = <110000>;
hysteresis = <1000>;
@@ -6308,19 +7605,17 @@
};
nsphvx0-thermal {
- polling-delay-passive = <10>;
-
thermal-sensors = <&tsens2 6>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ nsphvx0-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
- nsphvx1-critical {
- temperature = <110000>;
+ nsphvx0-critical {
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6328,19 +7623,17 @@
};
nsphvx1-thermal {
- polling-delay-passive = <10>;
-
thermal-sensors = <&tsens2 7>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ nsphvx1-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
nsphvx1-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6348,19 +7641,17 @@
};
nsphmx0-thermal {
- polling-delay-passive = <10>;
-
thermal-sensors = <&tsens2 8>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ nsphmx0-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
nsphmx0-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6368,19 +7659,17 @@
};
nsphmx1-thermal {
- polling-delay-passive = <10>;
-
thermal-sensors = <&tsens2 9>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ nsphmx1-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
nsphmx1-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6388,19 +7677,17 @@
};
nsphmx2-thermal {
- polling-delay-passive = <10>;
-
thermal-sensors = <&tsens2 10>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ nsphmx2-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
nsphmx2-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6408,19 +7695,17 @@
};
nsphmx3-thermal {
- polling-delay-passive = <10>;
-
thermal-sensors = <&tsens2 11>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ nsphmx3-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
nsphmx3-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6428,19 +7713,17 @@
};
video-thermal {
- polling-delay-passive = <10>;
-
thermal-sensors = <&tsens1 12>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ video-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
video-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6448,19 +7731,17 @@
};
ddr-thermal {
- polling-delay-passive = <10>;
-
thermal-sensors = <&tsens1 13>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ ddr-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
ddr-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6471,14 +7752,14 @@
thermal-sensors = <&tsens1 14>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ camera0-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
camera0-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6489,14 +7770,14 @@
thermal-sensors = <&tsens1 15>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ camera1-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
camera1-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6507,14 +7788,14 @@
thermal-sensors = <&tsens2 0>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ aoss2-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
aoss2-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6535,20 +7816,20 @@
trips {
gpu0_alert0: trip-point0 {
- temperature = <85000>;
+ temperature = <95000>;
hysteresis = <1000>;
type = "passive";
};
trip-point1 {
- temperature = <90000>;
+ temperature = <110000>;
hysteresis = <1000>;
type = "hot";
};
trip-point2 {
- temperature = <110000>;
- hysteresis = <1000>;
+ temperature = <115000>;
+ hysteresis = <0>;
type = "critical";
};
};
@@ -6568,20 +7849,20 @@
trips {
gpu1_alert0: trip-point0 {
- temperature = <85000>;
+ temperature = <95000>;
hysteresis = <1000>;
type = "passive";
};
trip-point1 {
- temperature = <90000>;
+ temperature = <110000>;
hysteresis = <1000>;
type = "hot";
};
trip-point2 {
- temperature = <110000>;
- hysteresis = <1000>;
+ temperature = <115000>;
+ hysteresis = <0>;
type = "critical";
};
};
@@ -6601,20 +7882,20 @@
trips {
gpu2_alert0: trip-point0 {
- temperature = <85000>;
+ temperature = <95000>;
hysteresis = <1000>;
type = "passive";
};
trip-point1 {
- temperature = <90000>;
+ temperature = <110000>;
hysteresis = <1000>;
type = "hot";
};
trip-point2 {
- temperature = <110000>;
- hysteresis = <1000>;
+ temperature = <115000>;
+ hysteresis = <0>;
type = "critical";
};
};
@@ -6634,20 +7915,20 @@
trips {
gpu3_alert0: trip-point0 {
- temperature = <85000>;
+ temperature = <95000>;
hysteresis = <1000>;
type = "passive";
};
trip-point1 {
- temperature = <90000>;
+ temperature = <110000>;
hysteresis = <1000>;
type = "hot";
};
trip-point2 {
- temperature = <110000>;
- hysteresis = <1000>;
+ temperature = <115000>;
+ hysteresis = <0>;
type = "critical";
};
};
@@ -6667,20 +7948,20 @@
trips {
gpu4_alert0: trip-point0 {
- temperature = <85000>;
+ temperature = <95000>;
hysteresis = <1000>;
type = "passive";
};
trip-point1 {
- temperature = <90000>;
+ temperature = <110000>;
hysteresis = <1000>;
type = "hot";
};
trip-point2 {
- temperature = <110000>;
- hysteresis = <1000>;
+ temperature = <115000>;
+ hysteresis = <0>;
type = "critical";
};
};
@@ -6700,20 +7981,20 @@
trips {
gpu5_alert0: trip-point0 {
- temperature = <85000>;
+ temperature = <95000>;
hysteresis = <1000>;
type = "passive";
};
trip-point1 {
- temperature = <90000>;
+ temperature = <110000>;
hysteresis = <1000>;
type = "hot";
};
trip-point2 {
- temperature = <110000>;
- hysteresis = <1000>;
+ temperature = <115000>;
+ hysteresis = <0>;
type = "critical";
};
};
@@ -6733,20 +8014,20 @@
trips {
gpu6_alert0: trip-point0 {
- temperature = <85000>;
+ temperature = <95000>;
hysteresis = <1000>;
type = "passive";
};
trip-point1 {
- temperature = <90000>;
+ temperature = <110000>;
hysteresis = <1000>;
type = "hot";
};
trip-point2 {
- temperature = <110000>;
- hysteresis = <1000>;
+ temperature = <115000>;
+ hysteresis = <0>;
type = "critical";
};
};
@@ -6766,20 +8047,20 @@
trips {
gpu7_alert0: trip-point0 {
- temperature = <85000>;
+ temperature = <95000>;
hysteresis = <1000>;
type = "passive";
};
trip-point1 {
- temperature = <90000>;
+ temperature = <110000>;
hysteresis = <1000>;
type = "hot";
};
trip-point2 {
- temperature = <110000>;
- hysteresis = <1000>;
+ temperature = <115000>;
+ hysteresis = <0>;
type = "critical";
};
};
@@ -6789,14 +8070,14 @@
thermal-sensors = <&tsens2 9>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ modem0-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
modem0-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6807,14 +8088,14 @@
thermal-sensors = <&tsens2 10>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ modem1-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
modem1-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6825,14 +8106,14 @@
thermal-sensors = <&tsens2 11>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ modem2-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
modem2-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6843,14 +8124,14 @@
thermal-sensors = <&tsens2 12>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
+ modem3-hot {
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "hot";
};
modem3-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -6861,9 +8142,9 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW 0>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW 0>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW 0>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW 0>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8750-mtp.dts b/arch/arm64/boot/dts/qcom/sm8750-mtp.dts
index 9e3aacad7bda..72f081a890df 100644
--- a/arch/arm64/boot/dts/qcom/sm8750-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8750-mtp.dts
@@ -784,6 +784,28 @@
status = "okay";
};
+&remoteproc_adsp {
+ firmware-name = "qcom/sm8750/adsp.mbn",
+ "qcom/sm8750/adsp_dtb.mbn";
+
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/sm8750/cdsp.mbn",
+ "qcom/sm8750/cdsp_dtb.mbn";
+
+ status = "okay";
+};
+
+&remoteproc_mpss {
+ firmware-name = "qcom/sm8750/modem.mbn",
+ "qcom/sm8750/modem_dtb.mbn";
+
+ /* Modem crashes after some time with "DOG detects stalled initialization" */
+ status = "fail";
+};
+
&tlmm {
/* reserved for secure world */
gpio-reserved-ranges = <36 4>, <74 1>;
diff --git a/arch/arm64/boot/dts/qcom/sm8750-qrd.dts b/arch/arm64/boot/dts/qcom/sm8750-qrd.dts
index f77efab0aef9..840a6d8f8a24 100644
--- a/arch/arm64/boot/dts/qcom/sm8750-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sm8750-qrd.dts
@@ -782,6 +782,27 @@
status = "okay";
};
+&remoteproc_adsp {
+ firmware-name = "qcom/sm8750/adsp.mbn",
+ "qcom/sm8750/adsp_dtb.mbn";
+
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/sm8750/cdsp.mbn",
+ "qcom/sm8750/cdsp_dtb.mbn";
+
+ status = "okay";
+};
+
+&remoteproc_mpss {
+ firmware-name = "qcom/sm8750/modem.mbn",
+ "qcom/sm8750/modem_dtb.mbn";
+
+ status = "okay";
+};
+
&tlmm {
/* reserved for secure world */
gpio-reserved-ranges = <36 4>, <74 1>;
diff --git a/arch/arm64/boot/dts/qcom/sm8750.dtsi b/arch/arm64/boot/dts/qcom/sm8750.dtsi
index 3bbd7d18598e..980ba1ca23c4 100644
--- a/arch/arm64/boot/dts/qcom/sm8750.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8750.dtsi
@@ -10,9 +10,12 @@
#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interconnect/qcom,sm8750-rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/mailbox/qcom-ipcc.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
#include <dt-bindings/power/qcom-rpmpd.h>
+#include <dt-bindings/soc/qcom,gpr.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+#include <dt-bindings/sound/qcom,q6dsp-lpass-ports.h>
/ {
interrupt-parent = <&intc>;
@@ -95,11 +98,11 @@
compatible = "qcom,oryon";
reg = <0x0 0x10000>;
enable-method = "psci";
- next-level-cache = <&L2_1>;
+ next-level-cache = <&l2_1>;
power-domains = <&cpu_pd6>;
power-domain-names = "psci";
- L2_1: l2-cache {
+ l2_1: l2-cache {
compatible = "cache";
cache-level = <2>;
cache-unified;
@@ -111,7 +114,7 @@
compatible = "qcom,oryon";
reg = <0x0 0x10100>;
enable-method = "psci";
- next-level-cache = <&L2_1>;
+ next-level-cache = <&l2_1>;
power-domains = <&cpu_pd7>;
power-domain-names = "psci";
};
@@ -233,53 +236,59 @@
cpu_pd0: power-domain-cpu0 {
#power-domain-cells = <0>;
- power-domains = <&cluster_pd>;
+ power-domains = <&cluster0_pd>;
domain-idle-states = <&cluster0_c4>;
};
cpu_pd1: power-domain-cpu1 {
#power-domain-cells = <0>;
- power-domains = <&cluster_pd>;
+ power-domains = <&cluster0_pd>;
domain-idle-states = <&cluster0_c4>;
};
cpu_pd2: power-domain-cpu2 {
#power-domain-cells = <0>;
- power-domains = <&cluster_pd>;
+ power-domains = <&cluster0_pd>;
domain-idle-states = <&cluster0_c4>;
};
cpu_pd3: power-domain-cpu3 {
#power-domain-cells = <0>;
- power-domains = <&cluster_pd>;
+ power-domains = <&cluster0_pd>;
domain-idle-states = <&cluster0_c4>;
};
cpu_pd4: power-domain-cpu4 {
#power-domain-cells = <0>;
- power-domains = <&cluster_pd>;
+ power-domains = <&cluster0_pd>;
domain-idle-states = <&cluster0_c4>;
};
cpu_pd5: power-domain-cpu5 {
#power-domain-cells = <0>;
- power-domains = <&cluster_pd>;
+ power-domains = <&cluster0_pd>;
domain-idle-states = <&cluster0_c4>;
};
cpu_pd6: power-domain-cpu6 {
#power-domain-cells = <0>;
- power-domains = <&cluster_pd>;
+ power-domains = <&cluster1_pd>;
domain-idle-states = <&cluster1_c4>;
};
cpu_pd7: power-domain-cpu7 {
#power-domain-cells = <0>;
- power-domains = <&cluster_pd>;
+ power-domains = <&cluster1_pd>;
domain-idle-states = <&cluster1_c4>;
};
- cluster_pd: power-domain-cluster {
+ cluster0_pd: power-domain-cluster0 {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&cluster_cl5>;
+ power-domains = <&system_pd>;
+ };
+
+ cluster1_pd: power-domain-cluster1 {
#power-domain-cells = <0>;
domain-idle-states = <&cluster_cl5>;
power-domains = <&system_pd>;
@@ -516,6 +525,97 @@
};
};
+ smp2p-adsp {
+ compatible = "qcom,smp2p";
+
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,smem = <443>, <429>;
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ smp2p_adsp_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_adsp_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-cdsp {
+ compatible = "qcom,smp2p";
+
+ interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,smem = <94>, <432>;
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <5>;
+
+ smp2p_cdsp_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_cdsp_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-modem {
+ compatible = "qcom,smp2p";
+
+ interrupts-extended = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,smem = <435>, <428>;
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ smp2p_modem_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_modem_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ ipa_smp2p_out: ipa-ap-to-modem {
+ qcom,entry-name = "ipa";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ ipa_smp2p_in: ipa-modem-to-ap {
+ qcom,entry-name = "ipa";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* TODO: smem mailbox in and out */
+ };
+
soc: soc@0 {
compatible = "simple-bus";
@@ -542,6 +642,17 @@
#power-domain-cells = <1>;
};
+ ipcc: mailbox@406000 {
+ compatible = "qcom,sm8750-ipcc", "qcom,ipcc";
+ reg = <0x0 0x00406000 0x0 0x1000>;
+
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ #mbox-cells = <2>;
+ };
+
gpi_dma2: dma-controller@800000 {
compatible = "qcom,sm8750-gpi-dma", "qcom,sm6350-gpi-dma";
reg = <0x0 0x00800000 0x0 0x60000>;
@@ -987,10 +1098,10 @@
interrupts = <GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>;
+ clocks = <&gcc GCC_QUPV3_WRAP2_S6_CLK>;
clock-names = "se";
- interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
&config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>;
@@ -1883,6 +1994,11 @@
};
};
+ rng: rng@10c3000 {
+ compatible = "qcom,sm8750-trng", "qcom,trng";
+ reg = <0x0 0x010c3000 0x0 0x1000>;
+ };
+
cnoc_main: interconnect@1500000 {
compatible = "qcom,sm8750-cnoc-main";
reg = <0x0 0x01500000 0x0 0x16080>;
@@ -1939,12 +2055,259 @@
#interconnect-cells = <2>;
};
+ ice: crypto@1d88000 {
+ compatible = "qcom,sm8750-inline-crypto-engine",
+ "qcom,inline-crypto-engine";
+ reg = <0x0 0x01d88000 0x0 0x18000>;
+
+ clocks = <&gcc GCC_UFS_PHY_ICE_CORE_CLK>;
+ };
+
+ cryptobam: dma-controller@1dc4000 {
+ compatible = "qcom,bam-v1.7.4", "qcom,bam-v1.7.0";
+ reg = <0x0 0x01dc4000 0x0 0x28000>;
+
+ interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+
+ #dma-cells = <1>;
+
+ iommus = <&apps_smmu 0x480 0>,
+ <&apps_smmu 0x481 0>;
+
+ qcom,ee = <0>;
+ qcom,controlled-remotely;
+ };
+
+ crypto: crypto@1dfa000 {
+ compatible = "qcom,sm8750-qce", "qcom,sm8150-qce", "qcom,qce";
+ reg = <0x0 0x01dfa000 0x0 0x6000>;
+
+ interconnects = <&aggre2_noc MASTER_CRYPTO QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "memory";
+
+ dmas = <&cryptobam 4>, <&cryptobam 5>;
+ dma-names = "rx", "tx";
+
+ iommus = <&apps_smmu 0x480 0>,
+ <&apps_smmu 0x481 0>;
+ };
+
tcsr_mutex: hwlock@1f40000 {
compatible = "qcom,tcsr-mutex";
reg = <0x0 0x01f40000 0x0 0x20000>;
#hwlock-cells = <1>;
};
+ remoteproc_mpss: remoteproc@4080000 {
+ compatible = "qcom,sm8750-mpss-pas";
+ reg = <0x0 0x04080000 0x0 0x10000>;
+
+ interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 3 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 7 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack",
+ "shutdown-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ interconnects = <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+
+ power-domains = <&rpmhpd RPMHPD_CX>,
+ <&rpmhpd RPMHPD_MSS>;
+ power-domain-names = "cx",
+ "mss";
+
+ memory-region = <&mpss_mem>, <&q6_mpss_dtb_mem>,
+ <&dsm_partition_1_mem>,
+ <&dsm_partition_2_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_modem_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ qcom,remote-pid = <1>;
+
+ label = "mpss";
+ };
+ };
+
+ remoteproc_adsp: remoteproc@6800000 {
+ compatible = "qcom,sm8750-adsp-pas", "qcom,sm8550-adsp-pas";
+ reg = <0x0 0x06800000 0x0 0x10000>;
+
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 7 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack",
+ "shutdown-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+
+ power-domains = <&rpmhpd RPMHPD_LCX>,
+ <&rpmhpd RPMHPD_LMX>;
+ power-domain-names = "lcx",
+ "lmx";
+
+ memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_adsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ remoteproc_adsp_glink: glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+ qcom,remote-pid = <2>;
+ label = "lpass";
+
+ gpr {
+ compatible = "qcom,gpr";
+ qcom,glink-channels = "adsp_apps";
+ qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+ qcom,intents = <512 20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ q6apm: service@1 {
+ compatible = "qcom,q6apm";
+ reg = <GPR_APM_MODULE_IID>;
+ #sound-dai-cells = <0>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+
+ q6apmbedai: bedais {
+ compatible = "qcom,q6apm-lpass-dais";
+ #sound-dai-cells = <1>;
+ };
+
+ q6apmdai: dais {
+ compatible = "qcom,q6apm-dais";
+ iommus = <&apps_smmu 0x1001 0x80>,
+ <&apps_smmu 0x1041 0x20>;
+ };
+ };
+
+ q6prm: service@2 {
+ compatible = "qcom,q6prm";
+ reg = <GPR_PRM_MODULE_IID>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+
+ q6prmcc: clock-controller {
+ compatible = "qcom,q6prm-lpass-clocks";
+ #clock-cells = <2>;
+ };
+ };
+ };
+ };
+ };
+
+ lpass_wsa2macro: codec@6aa0000 {
+ compatible = "qcom,sm8750-lpass-wsa-macro", "qcom,sm8550-lpass-wsa-macro";
+ reg = <0x0 0x06aa0000 0x0 0x1000>;
+ clocks = <&q6prmcc LPASS_CLK_ID_WSA2_CORE_TX_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6prmcc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6prmcc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&lpass_vamacro>;
+ clock-names = "mclk",
+ "macro",
+ "dcodec",
+ "fsgen";
+
+ #clock-cells = <0>;
+ clock-output-names = "wsa2-mclk";
+ #sound-dai-cells = <1>;
+ };
+
+ lpass_rxmacro: codec@6ac0000 {
+ compatible = "qcom,sm8750-lpass-rx-macro", "qcom,sm8550-lpass-rx-macro";
+ reg = <0x0 0x06ac0000 0x0 0x1000>;
+ clocks = <&q6prmcc LPASS_CLK_ID_RX_CORE_TX_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6prmcc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6prmcc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&lpass_vamacro>;
+ clock-names = "mclk",
+ "macro",
+ "dcodec",
+ "fsgen";
+
+ #clock-cells = <0>;
+ clock-output-names = "mclk";
+ #sound-dai-cells = <1>;
+ };
+
+ lpass_txmacro: codec@6ae0000 {
+ compatible = "qcom,sm8750-lpass-tx-macro", "qcom,sm8550-lpass-tx-macro";
+ reg = <0x0 0x06ae0000 0x0 0x1000>;
+ clocks = <&q6prmcc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6prmcc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6prmcc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&lpass_vamacro>;
+ clock-names = "mclk",
+ "macro",
+ "dcodec",
+ "fsgen";
+
+ #clock-cells = <0>;
+ clock-output-names = "mclk";
+ #sound-dai-cells = <1>;
+ };
+
+ lpass_wsamacro: codec@6b00000 {
+ compatible = "qcom,sm8750-lpass-wsa-macro", "qcom,sm8550-lpass-wsa-macro";
+ reg = <0x0 0x06b00000 0x0 0x1000>;
+ clocks = <&q6prmcc LPASS_CLK_ID_WSA_CORE_TX_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6prmcc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6prmcc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&lpass_vamacro>;
+ clock-names = "mclk",
+ "macro",
+ "dcodec",
+ "fsgen";
+
+ #clock-cells = <0>;
+ clock-output-names = "mclk";
+ #sound-dai-cells = <1>;
+ };
+
lpass_ag_noc: interconnect@7e40000 {
compatible = "qcom,sm8750-lpass-ag-noc";
reg = <0x0 0x07e40000 0x0 0xe080>;
@@ -1966,6 +2329,139 @@
#interconnect-cells = <2>;
};
+ lpass_vamacro: codec@7660000 {
+ compatible = "qcom,sm8750-lpass-va-macro", "qcom,sm8550-lpass-va-macro";
+ reg = <0x0 0x07660000 0x0 0x2000>;
+ clocks = <&q6prmcc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6prmcc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6prmcc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
+ clock-names = "mclk",
+ "macro",
+ "dcodec";
+
+ #clock-cells = <0>;
+ clock-output-names = "fsgen";
+ #sound-dai-cells = <1>;
+ };
+
+ lpass_tlmm: pinctrl@7760000 {
+ compatible = "qcom,sm8750-lpass-lpi-pinctrl",
+ "qcom,sm8650-lpass-lpi-pinctrl";
+ reg = <0x0 0x07760000 0x0 0x20000>;
+
+ clocks = <&q6prmcc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6prmcc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
+ clock-names = "core", "audio";
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&lpass_tlmm 0 0 23>;
+
+ tx_swr_active: tx-swr-active-state {
+ clk-pins {
+ pins = "gpio0";
+ function = "swr_tx_clk";
+ drive-strength = <2>;
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ data-pins {
+ pins = "gpio1", "gpio2", "gpio14";
+ function = "swr_tx_data";
+ drive-strength = <2>;
+ slew-rate = <1>;
+ bias-bus-hold;
+ };
+ };
+
+ rx_swr_active: rx-swr-active-state {
+ clk-pins {
+ pins = "gpio3";
+ function = "swr_rx_clk";
+ drive-strength = <2>;
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ data-pins {
+ pins = "gpio4", "gpio5";
+ function = "swr_rx_data";
+ drive-strength = <2>;
+ slew-rate = <1>;
+ bias-bus-hold;
+ };
+ };
+
+ dmic01_default: dmic01-default-state {
+ clk-pins {
+ pins = "gpio6";
+ function = "dmic1_clk";
+ drive-strength = <8>;
+ output-high;
+ };
+
+ data-pins {
+ pins = "gpio7";
+ function = "dmic1_data";
+ drive-strength = <8>;
+ input-enable;
+ };
+ };
+
+ dmic23_default: dmic23-default-state {
+ clk-pins {
+ pins = "gpio8";
+ function = "dmic2_clk";
+ drive-strength = <8>;
+ output-high;
+ };
+
+ data-pins {
+ pins = "gpio9";
+ function = "dmic2_data";
+ drive-strength = <8>;
+ input-enable;
+ };
+ };
+
+ wsa_swr_active: wsa-swr-active-state {
+ clk-pins {
+ pins = "gpio10";
+ function = "wsa_swr_clk";
+ drive-strength = <2>;
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ data-pins {
+ pins = "gpio11";
+ function = "wsa_swr_data";
+ drive-strength = <2>;
+ slew-rate = <1>;
+ bias-bus-hold;
+ };
+ };
+
+ wsa2_swr_active: wsa2-swr-active-state {
+ clk-pins {
+ pins = "gpio15";
+ function = "wsa2_swr_clk";
+ drive-strength = <2>;
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ data-pins {
+ pins = "gpio16";
+ function = "wsa2_swr_data";
+ drive-strength = <2>;
+ slew-rate = <1>;
+ bias-bus-hold;
+ };
+ };
+ };
+
pdc: interrupt-controller@b220000 {
compatible = "qcom,sm8750-pdc", "qcom,pdc";
reg = <0x0 0x0b220000 0x0 0x10000>, <0x0 0x164400f0 0x0 0x64>;
@@ -1978,6 +2474,24 @@
interrupt-controller;
};
+ aoss_qmp: power-management@c300000 {
+ compatible = "qcom,sm8750-aoss-qmp", "qcom,aoss-qmp";
+ reg = <0x0 0x0c300000 0x0 0x400>;
+
+ interrupt-parent = <&ipcc>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ #clock-cells = <0>;
+ };
+
+ sram@c3f0000 {
+ compatible = "qcom,rpmh-stats";
+ reg = <0x0 0x0c3f0000 0x0 0x400>;
+ };
+
spmi_bus: spmi@c400000 {
compatible = "qcom,spmi-pmic-arb";
reg = <0x0 0x0c400000 0x0 0x3000>,
@@ -2888,12 +3402,198 @@
#interconnect-cells = <2>;
};
+ system-cache-controller@24800000 {
+ compatible = "qcom,sm8750-llcc";
+ reg = <0x0 0x24800000 0x0 0x200000>,
+ <0x0 0x25800000 0x0 0x200000>,
+ <0x0 0x24c00000 0x0 0x200000>,
+ <0x0 0x25c00000 0x0 0x200000>,
+ <0x0 0x26800000 0x0 0x200000>,
+ <0x0 0x26c00000 0x0 0x200000>;
+ reg-names = "llcc0_base",
+ "llcc1_base",
+ "llcc2_base",
+ "llcc3_base",
+ "llcc_broadcast_base",
+ "llcc_broadcast_and_base";
+
+ interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
nsp_noc: interconnect@320c0000 {
compatible = "qcom,sm8750-nsp-noc";
reg = <0x0 0x320c0000 0x0 0x13080>;
qcom,bcm-voters = <&apps_bcm_voter>;
#interconnect-cells = <2>;
};
+
+ remoteproc_cdsp: remoteproc@32300000 {
+ compatible = "qcom,sm8750-cdsp-pas", "qcom,sm8650-cdsp-pas";
+ reg = <0x0 0x32300000 0x0 0x10000>;
+
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 3 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 7 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack",
+ "shutdown-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ interconnects = <&nsp_noc MASTER_CDSP_PROC QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+
+ power-domains = <&rpmhpd RPMHPD_CX>,
+ <&rpmhpd RPMHPD_MXC>,
+ <&rpmhpd RPMHPD_NSP>;
+ power-domain-names = "cx",
+ "mxc",
+ "nsp";
+
+ memory-region = <&cdsp_mem>, <&q6_cdsp_dtb_mem>, <&global_sync_mem>;
+ qcom,qmp = <&aoss_qmp>;
+ qcom,smem-states = <&smp2p_cdsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+ qcom,remote-pid = <5>;
+ label = "cdsp";
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "cdsp";
+ qcom,non-secure-domain;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@1 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <1>;
+ iommus = <&apps_smmu 0x19c1 0x0>,
+ <&apps_smmu 0x0c21 0x0>,
+ <&apps_smmu 0x0c01 0x40>;
+ dma-coherent;
+ };
+
+ compute-cb@2 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <2>;
+ iommus = <&apps_smmu 0x1962 0x0>,
+ <&apps_smmu 0x0c02 0x20>,
+ <&apps_smmu 0x0c42 0x0>,
+ <&apps_smmu 0x19c2 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x1963 0x0>,
+ <&apps_smmu 0x0c23 0x0>,
+ <&apps_smmu 0x0c03 0x40>,
+ <&apps_smmu 0x19c3 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x1964 0x0>,
+ <&apps_smmu 0x0c24 0x0>,
+ <&apps_smmu 0x0c04 0x40>,
+ <&apps_smmu 0x19c4 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&apps_smmu 0x1965 0x0>,
+ <&apps_smmu 0x0c25 0x0>,
+ <&apps_smmu 0x0c05 0x40>,
+ <&apps_smmu 0x19c5 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@6 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <6>;
+ iommus = <&apps_smmu 0x1966 0x0>,
+ <&apps_smmu 0x0c06 0x20>,
+ <&apps_smmu 0x0c46 0x0>,
+ <&apps_smmu 0x19c6 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@7 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <7>;
+ iommus = <&apps_smmu 0x1967 0x0>,
+ <&apps_smmu 0x0c27 0x0>,
+ <&apps_smmu 0x0c07 0x40>,
+ <&apps_smmu 0x19c7 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@8 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <8>;
+ iommus = <&apps_smmu 0x1968 0x0>,
+ <&apps_smmu 0x0c08 0x20>,
+ <&apps_smmu 0x0c48 0x0>,
+ <&apps_smmu 0x19c8 0x0>;
+ dma-coherent;
+ };
+
+ /* note: secure cb9 in downstream */
+
+ compute-cb@12 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <12>;
+ iommus = <&apps_smmu 0x196c 0x0>,
+ <&apps_smmu 0x0c2c 0x20>,
+ <&apps_smmu 0x0c0c 0x40>,
+ <&apps_smmu 0x19cc 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@13 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <13>;
+ iommus = <&apps_smmu 0x196d 0x0>,
+ <&apps_smmu 0x0c0d 0x20>,
+ <&apps_smmu 0x0c2e 0x0>,
+ <&apps_smmu 0x0c4d 0x0>,
+ <&apps_smmu 0x19cd 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@14 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <14>;
+ iommus = <&apps_smmu 0x196e 0x0>,
+ <&apps_smmu 0x0c0e 0x20>,
+ <&apps_smmu 0x19ce 0x0>;
+ dma-coherent;
+ };
+ };
+ };
+ };
};
timer {
diff --git a/arch/arm64/boot/dts/qcom/x1-crd.dtsi b/arch/arm64/boot/dts/qcom/x1-crd.dtsi
new file mode 100644
index 000000000000..c9f0d5052670
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1-crd.dtsi
@@ -0,0 +1,1749 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+#include "x1e80100-pmics.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. X1E80100 CRD";
+ compatible = "qcom,x1e80100-crd", "qcom,x1e80100";
+
+ aliases {
+ serial0 = &uart21;
+ };
+
+ wcd938x: audio-codec {
+ compatible = "qcom,wcd9385-codec";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wcd_default>;
+
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
+ qcom,rx-device = <&wcd_rx>;
+ qcom,tx-device = <&wcd_tx>;
+
+ reset-gpios = <&tlmm 191 GPIO_ACTIVE_LOW>;
+
+ vdd-buck-supply = <&vreg_l15b_1p8>;
+ vdd-rxtx-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l15b_1p8>;
+ vdd-mic-bias-supply = <&vreg_bob1>;
+
+ #sound-dai-cells = <1>;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&hall_int_n_default>, <&kypd_vol_up_n>;
+ pinctrl-names = "default";
+
+ key-vol-up {
+ label = "volume_up";
+ gpios = <&pm8550_gpios 6 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ wakeup-source;
+ };
+
+ switch-lid {
+ label = "lid";
+ gpios = <&tlmm 92 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ wakeup-source;
+ wakeup-event-action = <EV_ACT_DEASSERTED>;
+ };
+ };
+
+ pmic-glink {
+ compatible = "qcom,x1e80100-pmic-glink",
+ "qcom,sm8550-pmic-glink",
+ "qcom,pmic-glink";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>,
+ <&tlmm 123 GPIO_ACTIVE_HIGH>,
+ <&tlmm 125 GPIO_ACTIVE_HIGH>;
+
+ /* Left-side rear port */
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss0_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss0_ss_in: endpoint {
+ remote-endpoint = <&retimer_ss0_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss0_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss0_con_sbu_out>;
+ };
+ };
+ };
+ };
+
+ /* Left-side front port */
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss1_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss1_ss_in: endpoint {
+ remote-endpoint = <&retimer_ss1_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss1_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss1_con_sbu_out>;
+ };
+ };
+ };
+ };
+
+ /* Right-side port */
+ connector@2 {
+ compatible = "usb-c-connector";
+ reg = <2>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss2_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss2_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss2_ss_in: endpoint {
+ remote-endpoint = <&retimer_ss2_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss2_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss2_con_sbu_out>;
+ };
+ };
+ };
+ };
+ };
+
+ reserved-memory {
+ linux,cma {
+ compatible = "shared-dma-pool";
+ size = <0x0 0x8000000>;
+ reusable;
+ linux,cma-default;
+ };
+ };
+
+ sound {
+ compatible = "qcom,x1e80100-sndcard";
+ model = "X1E80100-CRD";
+ audio-routing = "WooferLeft IN", "WSA WSA_SPK1 OUT",
+ "TweeterLeft IN", "WSA WSA_SPK2 OUT",
+ "WooferRight IN", "WSA2 WSA_SPK2 OUT",
+ "TweeterRight IN", "WSA2 WSA_SPK2 OUT",
+ "IN1_HPHL", "HPHL_OUT",
+ "IN2_HPHR", "HPHR_OUT",
+ "AMIC2", "MIC BIAS2",
+ "VA DMIC0", "MIC BIAS3",
+ "VA DMIC1", "MIC BIAS3",
+ "VA DMIC2", "MIC BIAS1",
+ "VA DMIC3", "MIC BIAS1",
+ "TX SWR_INPUT1", "ADC2_OUTPUT";
+
+ wcd-playback-dai-link {
+ link-name = "WCD Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai RX_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 0>, <&swr1 0>, <&lpass_rxmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wcd-capture-dai-link {
+ link-name = "WCD Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 1>, <&swr2 1>, <&lpass_txmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wsa-dai-link {
+ link-name = "WSA Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&left_woofer>, <&left_tweeter>,
+ <&swr0 0>, <&lpass_wsamacro 0>,
+ <&right_woofer>, <&right_tweeter>,
+ <&swr3 0>, <&lpass_wsa2macro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ va-dai-link {
+ link-name = "VA Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai VA_CODEC_DMA_TX_0>;
+ };
+
+ codec {
+ sound-dai = <&lpass_vamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+ };
+
+ vreg_edp_3p3: regulator-edp-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_EDP_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 70 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&edp_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_misc_3p3: regulator-misc-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_MISC_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pm8550ve_8_gpios 6 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&misc_3p3_reg_en>;
+
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vreg_nvme: regulator-nvme {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_NVME_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&nvme_reg_en>;
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_1p15: regulator-rtmr0-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&pmc8380_5_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_pwr_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_1p8: regulator-rtmr0-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8550ve_9_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_3p3: regulator-rtmr0-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pm8550_gpios 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p15: regulator-rtmr1-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&tlmm 188 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb1_pwr_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p8: regulator-rtmr1-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 175 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb1_pwr_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_3p3: regulator-rtmr1-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 186 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb1_pwr_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr2_1p15: regulator-rtmr2-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR2_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&tlmm 189 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb2_pwr_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr2_1p8: regulator-rtmr2-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR2_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 126 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb2_pwr_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr2_3p3: regulator-rtmr2-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR2_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 187 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb2_pwr_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vph_pwr: regulator-vph-pwr {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vreg_wwan: regulator-wwan {
+ compatible = "regulator-fixed";
+
+ regulator-name = "SDX_VPH_PWR";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 221 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&wwan_sw_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-bob1-supply = <&vph_pwr>;
+ vdd-bob2-supply = <&vph_pwr>;
+ vdd-l1-l4-l10-supply = <&vreg_s4c_1p8>;
+ vdd-l2-l13-l14-supply = <&vreg_bob1>;
+ vdd-l5-l16-supply = <&vreg_bob1>;
+ vdd-l6-l7-supply = <&vreg_bob2>;
+ vdd-l8-l9-supply = <&vreg_bob1>;
+ vdd-l12-supply = <&vreg_s5j_1p2>;
+ vdd-l15-supply = <&vreg_s4c_1p8>;
+ vdd-l17-supply = <&vreg_bob2>;
+
+ vreg_bob1: bob1 {
+ regulator-name = "vreg_bob1";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob2: bob2 {
+ regulator-name = "vreg_bob2";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1b_1p8: ldo1 {
+ regulator-name = "vreg_l1b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p0: ldo2 {
+ regulator-name = "vreg_l2b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4b_1p8: ldo4 {
+ regulator-name = "vreg_l4b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5b_3p0: ldo5 {
+ regulator-name = "vreg_l5b_3p0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b_1p8: ldo6 {
+ regulator-name = "vreg_l6b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7b_2p8: ldo7 {
+ regulator-name = "vreg_l7b_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8b_3p0: ldo8 {
+ regulator-name = "vreg_l8b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9b_2p9: ldo9 {
+ regulator-name = "vreg_l9b_2p9";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10b_1p8: ldo10 {
+ regulator-name = "vreg_l10b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12b_1p2: ldo12 {
+ regulator-name = "vreg_l12b_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
+ };
+
+ vreg_l13b_3p0: ldo13 {
+ regulator-name = "vreg_l13b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b_3p0: ldo14 {
+ regulator-name = "vreg_l14b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15b_1p8: ldo15 {
+ regulator-name = "vreg_l15b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
+ };
+
+ vreg_l16b_2p9: ldo16 {
+ regulator-name = "vreg_l16b_2p9";
+ regulator-min-microvolt = <2912000>;
+ regulator-max-microvolt = <2912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17b_2p5: ldo17 {
+ regulator-name = "vreg_l17b_2p5";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s4-supply = <&vph_pwr>;
+
+ vreg_s4c_1p8: smps4 {
+ regulator-name = "vreg_s4c_1p8";
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c_1p2: ldo1 {
+ regulator-name = "vreg_l1c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c_0p8: ldo2 {
+ regulator-name = "vreg_l2c_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c_0p8: ldo3 {
+ regulator-name = "vreg_l3c_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "d";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s4c_1p8>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_l1d_0p8: ldo1 {
+ regulator-name = "vreg_l1d_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2d_0p9: ldo2 {
+ regulator-name = "vreg_l2d_0p9";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3d_1p8: ldo3 {
+ regulator-name = "vreg_l3d_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-3 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+
+ vreg_l2e_0p8: ldo2 {
+ regulator-name = "vreg_l2e_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3e_1p2: ldo3 {
+ regulator-name = "vreg_l3e_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_s1f_0p7: smps1 {
+ regulator-name = "vreg_s1f_0p7";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1f_1p0: ldo1 {
+ regulator-name = "vreg_l1f_1p0";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2f_1p0: ldo2 {
+ regulator-name = "vreg_l2f_1p0";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3f_1p0: ldo3 {
+ regulator-name = "vreg_l3f_1p0";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-6 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "i";
+
+ vdd-l1-supply = <&vreg_s4c_1p8>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+
+ vreg_s1i_0p9: smps1 {
+ regulator-name = "vreg_s1i_0p9";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s2i_1p0: smps2 {
+ regulator-name = "vreg_s2i_1p0";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1i_1p8: ldo1 {
+ regulator-name = "vreg_l1i_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2i_1p2: ldo2 {
+ regulator-name = "vreg_l2i_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3i_0p8: ldo3 {
+ regulator-name = "vreg_l3i_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-7 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "j";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s5-supply = <&vph_pwr>;
+
+ vreg_s5j_1p2: smps5 {
+ regulator-name = "vreg_s5j_1p2";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1j_0p8: ldo1 {
+ regulator-name = "vreg_l1j_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2j_1p2: ldo2 {
+ regulator-name = "vreg_l2j_1p2";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1256000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3j_0p8: ldo3 {
+ regulator-name = "vreg_l3j_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&gpu {
+ status = "okay";
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ touchpad@15 {
+ compatible = "hid-over-i2c";
+ reg = <0x15>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 3 IRQ_TYPE_LEVEL_LOW>;
+
+ vdd-supply = <&vreg_misc_3p3>;
+ vddl-supply = <&vreg_l12b_1p2>;
+
+ pinctrl-0 = <&tpad_default>;
+ pinctrl-names = "default";
+
+ wakeup-source;
+ };
+
+ keyboard@3a {
+ compatible = "hid-over-i2c";
+ reg = <0x3a>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 67 IRQ_TYPE_LEVEL_LOW>;
+
+ vdd-supply = <&vreg_misc_3p3>;
+ vddl-supply = <&vreg_l12b_1p2>;
+
+ pinctrl-0 = <&kybd_default>;
+ pinctrl-names = "default";
+
+ wakeup-source;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x08>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK5>;
+
+ vdd-supply = <&vreg_rtmr2_1p15>;
+ vdd33-supply = <&vreg_rtmr2_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr2_3p3>;
+ vddar-supply = <&vreg_rtmr2_1p15>;
+ vddat-supply = <&vreg_rtmr2_1p15>;
+ vddio-supply = <&vreg_rtmr2_1p8>;
+
+ reset-gpios = <&tlmm 185 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr2_default>;
+ pinctrl-names = "default";
+
+ orientation-switch;
+ retimer-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss2_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss2_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss2_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss2_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss2_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss2_con_sbu_in>;
+ };
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x08>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK3>;
+
+ vdd-supply = <&vreg_rtmr0_1p15>;
+ vdd33-supply = <&vreg_rtmr0_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr0_3p3>;
+ vddar-supply = <&vreg_rtmr0_1p15>;
+ vddat-supply = <&vreg_rtmr0_1p15>;
+ vddio-supply = <&vreg_rtmr0_1p8>;
+
+ reset-gpios = <&pm8550_gpios 10 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr0_default>;
+ pinctrl-names = "default";
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss0_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss0_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_con_sbu_in>;
+ };
+ };
+ };
+ };
+};
+
+&i2c7 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x8>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK4>;
+
+ vdd-supply = <&vreg_rtmr1_1p15>;
+ vdd33-supply = <&vreg_rtmr1_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr1_3p3>;
+ vddar-supply = <&vreg_rtmr1_1p15>;
+ vddat-supply = <&vreg_rtmr1_1p15>;
+ vddio-supply = <&vreg_rtmr1_1p8>;
+
+ reset-gpios = <&tlmm 176 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr1_default>;
+ pinctrl-names = "default";
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss1_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss1_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_con_sbu_in>;
+ };
+ };
+ };
+ };
+};
+
+&i2c8 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ touchscreen@10 {
+ compatible = "hid-over-i2c";
+ reg = <0x10>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 51 IRQ_TYPE_LEVEL_LOW>;
+
+ vdd-supply = <&vreg_misc_3p3>;
+ vddl-supply = <&vreg_l15b_1p8>;
+
+ pinctrl-0 = <&ts0_default>;
+ pinctrl-names = "default";
+ };
+};
+
+&lpass_tlmm {
+ spkr_01_sd_n_active: spkr-01-sd-n-active-state {
+ pins = "gpio12";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+
+ spkr_23_sd_n_active: spkr-23-sd-n-active-state {
+ pins = "gpio13";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+};
+
+&lpass_vamacro {
+ pinctrl-0 = <&dmic01_default>, <&dmic23_default>;
+ pinctrl-names = "default";
+
+ vdd-micb-supply = <&vreg_l1b_1p8>;
+ qcom,dmic-sample-rate = <4800000>;
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dp0 {
+ status = "okay";
+};
+
+&mdss_dp0_out {
+ data-lanes = <0 1>;
+};
+
+&mdss_dp1 {
+ status = "okay";
+};
+
+&mdss_dp1_out {
+ data-lanes = <0 1>;
+};
+
+&mdss_dp2 {
+ status = "okay";
+};
+
+&mdss_dp2_out {
+ data-lanes = <0 1>;
+};
+
+&mdss_dp3 {
+ /delete-property/ #sound-dai-cells;
+
+ status = "okay";
+
+ aux-bus {
+ panel {
+ compatible = "samsung,atna45af01", "samsung,atna33xc20";
+ enable-gpios = <&pmc8380_3_gpios 4 GPIO_ACTIVE_HIGH>;
+ power-supply = <&vreg_edp_3p3>;
+
+ pinctrl-0 = <&edp_bl_en>;
+ pinctrl-names = "default";
+
+ port {
+ edp_panel_in: endpoint {
+ remote-endpoint = <&mdss_dp3_out>;
+ };
+ };
+ };
+ };
+
+ ports {
+ port@1 {
+ reg = <1>;
+ mdss_dp3_out: endpoint {
+ data-lanes = <0 1 2 3>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+
+ remote-endpoint = <&edp_panel_in>;
+ };
+ };
+ };
+};
+
+&mdss_dp3_phy {
+ vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-pll-supply = <&vreg_l2j_1p2>;
+
+ status = "okay";
+};
+
+&pcie4 {
+ perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&pcie4_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie4_phy {
+ vdda-phy-supply = <&vreg_l3i_0p8>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+
+ status = "okay";
+};
+
+&pcie5 {
+ perst-gpios = <&tlmm 149 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 151 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_wwan>;
+
+ pinctrl-0 = <&pcie5_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie5_phy {
+ vdda-phy-supply = <&vreg_l3i_0p8>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+
+ status = "okay";
+};
+
+&pcie6a {
+ perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_nvme>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie6a_default>;
+
+ status = "okay";
+};
+
+&pcie6a_phy {
+ vdda-phy-supply = <&vreg_l1d_0p8>;
+ vdda-pll-supply = <&vreg_l2j_1p2>;
+
+ status = "okay";
+};
+
+&pm8550_gpios {
+ kypd_vol_up_n: kypd-vol-up-n-state {
+ pins = "gpio6";
+ function = "normal";
+ power-source = <1>; /* 1.8 V */
+ bias-pull-up;
+ input-enable;
+ };
+
+ rtmr0_default: rtmr0-reset-n-active-state {
+ pins = "gpio10";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+
+ usb0_3p3_reg_en: usb0-3p3-reg-en-state {
+ pins = "gpio11";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+};
+
+&pm8550ve_8_gpios {
+ misc_3p3_reg_en: misc-3p3-reg-en-state {
+ pins = "gpio6";
+ function = "normal";
+ bias-disable;
+ input-disable;
+ output-enable;
+ drive-push-pull;
+ power-source = <1>; /* 1.8 V */
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ };
+};
+
+&pm8550ve_9_gpios {
+ usb0_1p8_reg_en: usb0-1p8-reg-en-state {
+ pins = "gpio8";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+};
+
+&pmc8380_3_gpios {
+ edp_bl_en: edp-bl-en-state {
+ pins = "gpio4";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ input-disable;
+ output-enable;
+ };
+};
+
+&pmc8380_5_gpios {
+ usb0_pwr_1p15_reg_en: usb0-pwr-1p15-reg-en-state {
+ pins = "gpio8";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+};
+
+&qupv3_0 {
+ status = "okay";
+};
+
+&qupv3_1 {
+ status = "okay";
+};
+
+&qupv3_2 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/x1e80100/adsp.mbn",
+ "qcom/x1e80100/adsp_dtb.mbn";
+
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/x1e80100/cdsp.mbn",
+ "qcom/x1e80100/cdsp_dtb.mbn";
+
+ status = "okay";
+};
+
+&smb2360_0 {
+ status = "okay";
+};
+
+&smb2360_0_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l2b_3p0>;
+};
+
+&smb2360_1 {
+ status = "okay";
+};
+
+&smb2360_1_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l14b_3p0>;
+};
+
+&smb2360_2 {
+ status = "okay";
+};
+
+&smb2360_2_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l8b_3p0>;
+};
+
+&swr0 {
+ status = "okay";
+
+ pinctrl-0 = <&wsa_swr_active>, <&spkr_01_sd_n_active>;
+ pinctrl-names = "default";
+
+ /* WSA8845, Left Woofer */
+ left_woofer: speaker@0,0 {
+ compatible = "sdw20217020400";
+ reg = <0 0>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "WooferLeft";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <1 2 3 7 10 13>;
+ };
+
+ /* WSA8845, Left Tweeter */
+ left_tweeter: speaker@0,1 {
+ compatible = "sdw20217020400";
+ reg = <0 1>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "TweeterLeft";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <4 5 6 7 11 13>;
+ };
+};
+
+&swr1 {
+ status = "okay";
+
+ /* WCD9385 RX */
+ wcd_rx: codec@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+ qcom,rx-port-mapping = <1 2 3 4 5>;
+ };
+};
+
+&swr2 {
+ status = "okay";
+
+ /* WCD9385 TX */
+ wcd_tx: codec@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+ qcom,tx-port-mapping = <2 2 3 4>;
+ };
+};
+
+&swr3 {
+ status = "okay";
+
+ pinctrl-0 = <&wsa2_swr_active>, <&spkr_23_sd_n_active>;
+ pinctrl-names = "default";
+
+ /* WSA8845, Right Woofer */
+ right_woofer: speaker@0,0 {
+ compatible = "sdw20217020400";
+ reg = <0 0>;
+ reset-gpios = <&lpass_tlmm 13 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "WooferRight";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <1 2 3 7 10 13>;
+ };
+
+ /* WSA8845, Right Tweeter */
+ right_tweeter: speaker@0,1 {
+ compatible = "sdw20217020400";
+ reg = <0 1>;
+ reset-gpios = <&lpass_tlmm 13 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "TweeterRight";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <4 5 6 7 11 13>;
+ };
+};
+
+&tlmm {
+ gpio-reserved-ranges = <34 2>, /* Unused */
+ <44 4>, /* SPI (TPM) */
+ <238 1>; /* UFS Reset */
+
+ edp_reg_en: edp-reg-en-state {
+ pins = "gpio70";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ hall_int_n_default: hall-int-n-state {
+ pins = "gpio92";
+ function = "gpio";
+ bias-disable;
+ };
+
+ kybd_default: kybd-default-state {
+ pins = "gpio67";
+ function = "gpio";
+ bias-disable;
+ };
+
+ nvme_reg_en: nvme-reg-en-state {
+ pins = "gpio18";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pcie4_default: pcie4-default-state {
+ clkreq-n-pins {
+ pins = "gpio147";
+ function = "pcie4_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio146";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio148";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie5_default: pcie5-default-state {
+ clkreq-n-pins {
+ pins = "gpio150";
+ function = "pcie5_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio149";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio151";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie6a_default: pcie6a-default-state {
+ clkreq-n-pins {
+ pins = "gpio153";
+ function = "pcie6a_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio152";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio154";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ rtmr1_default: rtmr1-reset-n-active-state {
+ pins = "gpio176";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rtmr2_default: rtmr2-reset-n-active-state {
+ pins = "gpio185";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tpad_default: tpad-default-state {
+ pins = "gpio3";
+ function = "gpio";
+ bias-disable;
+ };
+
+ ts0_default: ts0-default-state {
+ int-n-pins {
+ pins = "gpio51";
+ function = "gpio";
+ bias-disable;
+ };
+
+ reset-n-pins {
+ pins = "gpio48";
+ function = "gpio";
+ output-high;
+ drive-strength = <16>;
+ };
+ };
+
+ usb1_pwr_1p15_reg_en: usb1-pwr-1p15-reg-en-state {
+ pins = "gpio188";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb1_pwr_1p8_reg_en: usb1-pwr-1p8-reg-en-state {
+ pins = "gpio175";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb1_pwr_3p3_reg_en: usb1-pwr-3p3-reg-en-state {
+ pins = "gpio186";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb2_pwr_1p15_reg_en: usb2-pwr-1p15-reg-en-state {
+ pins = "gpio189";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb2_pwr_1p8_reg_en: usb2-pwr-1p8-reg-en-state {
+ pins = "gpio126";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb2_pwr_3p3_reg_en: usb2-pwr-3p3-reg-en-state {
+ pins = "gpio187";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wcd_default: wcd-reset-n-active-state {
+ pins = "gpio191";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+
+ wwan_sw_en: wwan-sw-en-state {
+ pins = "gpio221";
+ function = "gpio";
+ drive-strength = <4>;
+ bias-disable;
+ };
+};
+
+&uart21 {
+ compatible = "qcom,geni-debug-uart";
+ status = "okay";
+};
+
+&usb_1_ss0_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_0_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss0_qmpphy {
+ vdda-phy-supply = <&vreg_l2j_1p2>;
+ vdda-pll-supply = <&vreg_l1j_0p8>;
+
+ status = "okay";
+};
+
+&usb_1_ss0 {
+ status = "okay";
+};
+
+&usb_1_ss0_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss0_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss0_hs_in>;
+};
+
+&usb_1_ss0_qmpphy_out {
+ remote-endpoint = <&retimer_ss0_ss_in>;
+};
+
+&usb_1_ss1_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_1_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss1_qmpphy {
+ vdda-phy-supply = <&vreg_l2j_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
+ status = "okay";
+};
+
+&usb_1_ss1 {
+ status = "okay";
+};
+
+&usb_1_ss1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss1_hs_in>;
+};
+
+&usb_1_ss1_qmpphy_out {
+ remote-endpoint = <&retimer_ss1_ss_in>;
+};
+
+&usb_1_ss2_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_2_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss2_qmpphy {
+ vdda-phy-supply = <&vreg_l2j_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
+ status = "okay";
+};
+
+&usb_1_ss2 {
+ status = "okay";
+};
+
+&usb_1_ss2_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss2_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss2_hs_in>;
+};
+
+&usb_1_ss2_qmpphy_out {
+ remote-endpoint = <&retimer_ss2_ss_in>;
+};
diff --git a/arch/arm64/boot/dts/qcom/x1-el2.dtso b/arch/arm64/boot/dts/qcom/x1-el2.dtso
new file mode 100644
index 000000000000..380441deca65
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1-el2.dtso
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+/*
+ * x1 specific modifications required to boot in EL2.
+ */
+
+/dts-v1/;
+/plugin/;
+
+/* We can't and don't need to use zap shader in EL2 as linux can zap the gpu on it's own. */
+&gpu_zap_shader {
+ status = "disabled";
+};
+
+/*
+ * When running under Gunyah, this IOMMU is controlled by the firmware,
+ * however when we take ownership of it in EL2, we need to configure
+ * it properly to use PCIe.
+ *
+ * Additionally, it seems like ITS emulation in Gunyah is broken so we
+ * can't use MSI on some PCIe controllers in EL1. But we can add them
+ * here for EL2.
+ */
+&pcie3 {
+ iommu-map = <0 &pcie_smmu 0x30000 0x10000>;
+ msi-map = <0 &gic_its 0xb0000 0x10000>;
+};
+
+&pcie4 {
+ iommu-map = <0 &pcie_smmu 0x40000 0x10000>;
+};
+
+&pcie5 {
+ iommu-map = <0 &pcie_smmu 0x50000 0x10000>;
+ msi-map = <0 &gic_its 0xd0000 0x10000>;
+};
+
+&pcie6a {
+ iommu-map = <0 &pcie_smmu 0x60000 0x10000>;
+};
+
+&pcie_smmu {
+ status = "okay";
+};
+
+/*
+ * The "SBSA watchdog" is implemented in software in Gunyah
+ * and can't be used when running in EL2.
+ */
+&sbsa_watchdog {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e001de-devkit.dts b/arch/arm64/boot/dts/qcom/x1e001de-devkit.dts
index 5e3970b26e2f..2d9627e6c798 100644
--- a/arch/arm64/boot/dts/qcom/x1e001de-devkit.dts
+++ b/arch/arm64/boot/dts/qcom/x1e001de-devkit.dts
@@ -507,6 +507,7 @@
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_l13b_3p0: ldo13 {
@@ -528,6 +529,7 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_l16b_2p9: ldo16 {
@@ -745,8 +747,8 @@
vreg_l2j_1p2: ldo2 {
regulator-name = "vreg_l2j_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1256000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
@@ -777,7 +779,6 @@
reg = <0x08>;
clocks = <&rpmhcc RPMH_RF_CLK5>;
- clock-names = "xo";
vdd-supply = <&vreg_rtmr2_1p15>;
vdd33-supply = <&vreg_rtmr2_3p3>;
@@ -786,7 +787,10 @@
vddat-supply = <&vreg_rtmr2_1p15>;
vddio-supply = <&vreg_rtmr2_1p8>;
- reset-gpios = <&tlmm 185 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&tlmm 185 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr2_default>;
+ pinctrl-names = "default";
orientation-switch;
retimer-switch;
@@ -832,7 +836,6 @@
reg = <0x08>;
clocks = <&rpmhcc RPMH_RF_CLK3>;
- clock-names = "xo";
vdd-supply = <&vreg_rtmr0_1p15>;
vdd33-supply = <&vreg_rtmr0_3p3>;
@@ -841,7 +844,10 @@
vddat-supply = <&vreg_rtmr0_1p15>;
vddio-supply = <&vreg_rtmr0_1p8>;
- reset-gpios = <&pm8550_gpios 10 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pm8550_gpios 10 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr0_default>;
+ pinctrl-names = "default";
retimer-switch;
orientation-switch;
@@ -877,6 +883,40 @@
};
};
+&i2c5 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ eusb3_repeater: redriver@47 {
+ compatible = "nxp,ptn3222";
+ reg = <0x47>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb3_reset_n>;
+ pinctrl-names = "default";
+ };
+
+ eusb6_repeater: redriver@4f {
+ compatible = "nxp,ptn3222";
+ reg = <0x4f>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 184 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb6_reset_n>;
+ pinctrl-names = "default";
+ };
+};
+
&i2c7 {
clock-frequency = <400000>;
@@ -887,7 +927,6 @@
reg = <0x8>;
clocks = <&rpmhcc RPMH_RF_CLK4>;
- clock-names = "xo";
vdd-supply = <&vreg_rtmr1_1p15>;
vdd33-supply = <&vreg_rtmr1_3p3>;
@@ -896,7 +935,10 @@
vddat-supply = <&vreg_rtmr1_1p15>;
vddio-supply = <&vreg_rtmr1_1p8>;
- reset-gpios = <&tlmm 176 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&tlmm 176 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr1_default>;
+ pinctrl-names = "default";
retimer-switch;
orientation-switch;
@@ -942,6 +984,7 @@
&mdss_dp0_out {
data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
};
&mdss_dp1 {
@@ -950,6 +993,7 @@
&mdss_dp1_out {
data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
};
&mdss_dp2 {
@@ -958,6 +1002,7 @@
&mdss_dp2_out {
data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
};
&pcie4 {
@@ -1016,9 +1061,22 @@
};
&pm8550_gpios {
+ rtmr0_default: rtmr0-reset-n-active-state {
+ pins = "gpio10";
+ function = "normal";
+ power-source = <1>; /* 1.8 V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+
usb0_3p3_reg_en: usb0-3p3-reg-en-state {
pins = "gpio11";
function = "normal";
+ power-source = <1>; /* 1.8 V */
+ bias-disable;
+ input-disable;
+ output-enable;
};
};
@@ -1026,6 +1084,10 @@
usb0_pwr_1p15_en: usb0-pwr-1p15-en-state {
pins = "gpio8";
function = "normal";
+ power-source = <1>; /* 1.8 V */
+ bias-disable;
+ input-disable;
+ output-enable;
};
};
@@ -1033,6 +1095,10 @@
usb0_1p8_reg_en: usb0-1p8-reg-en-state {
pins = "gpio8";
function = "normal";
+ power-source = <1>; /* 1.8 V */
+ bias-disable;
+ input-disable;
+ output-enable;
};
};
@@ -1127,6 +1193,22 @@
&tlmm {
gpio-reserved-ranges = <44 4>; /* SPI (TPM) */
+ eusb3_reset_n: eusb3-reset-n-state {
+ pins = "gpio6";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
+ eusb6_reset_n: eusb6-reset-n-state {
+ pins = "gpio184";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
nvme_reg_en: nvme-reg-en-state {
pins = "gpio18";
function = "gpio";
@@ -1203,6 +1285,20 @@
};
};
+ rtmr1_default: rtmr1-reset-n-active-state {
+ pins = "gpio176";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rtmr2_default: rtmr2-reset-n-active-state {
+ pins = "gpio185";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
rtmr1_1p15_reg_en: rtmr1-1p15-reg-en-state {
pins = "gpio188";
function = "gpio";
@@ -1369,3 +1465,39 @@
&usb_1_ss2_qmpphy_out {
remote-endpoint = <&retimer_ss2_ss_in>;
};
+
+&usb_mp {
+ status = "okay";
+};
+
+&usb_mp_hsphy0 {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb6_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp_hsphy1 {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb3_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy0 {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l3c_0p8>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy1 {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l3c_0p8>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s-oled.dts b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s-oled.dts
new file mode 100644
index 000000000000..be65fafafa73
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s-oled.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2025, Linaro Limited
+ */
+
+#include "x1e78100-lenovo-thinkpad-t14s.dtsi"
+
+/ {
+ model = "Lenovo ThinkPad T14s Gen 6 (OLED)";
+ compatible = "lenovo,thinkpad-t14s-oled", "lenovo,thinkpad-t14s",
+ "qcom,x1e78100", "qcom,x1e80100";
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
index b2c2347f54fa..5cc6a63d1ef6 100644
--- a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
+++ b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
@@ -1,1134 +1,60 @@
// SPDX-License-Identifier: BSD-3-Clause
/*
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
- * Copyright (c) 2024, Linaro Limited
+ * Copyright (c) 2025, Linaro Limited
*/
-/dts-v1/;
-
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/gpio-keys.h>
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
-
-#include "x1e80100.dtsi"
-#include "x1e80100-pmics.dtsi"
+#include "x1e78100-lenovo-thinkpad-t14s.dtsi"
/ {
- model = "Lenovo ThinkPad T14s Gen 6";
- compatible = "lenovo,thinkpad-t14s", "qcom,x1e78100", "qcom,x1e80100";
- chassis-type = "laptop";
-
- wcd938x: audio-codec {
- compatible = "qcom,wcd9385-codec";
-
- pinctrl-0 = <&wcd_default>;
- pinctrl-names = "default";
-
- qcom,micbias1-microvolt = <1800000>;
- qcom,micbias2-microvolt = <1800000>;
- qcom,micbias3-microvolt = <1800000>;
- qcom,micbias4-microvolt = <1800000>;
- qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
- qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
- qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
- qcom,rx-device = <&wcd_rx>;
- qcom,tx-device = <&wcd_tx>;
-
- reset-gpios = <&tlmm 191 GPIO_ACTIVE_LOW>;
-
- vdd-buck-supply = <&vreg_l15b_1p8>;
- vdd-rxtx-supply = <&vreg_l15b_1p8>;
- vdd-io-supply = <&vreg_l15b_1p8>;
- vdd-mic-bias-supply = <&vreg_bob1>;
-
- #sound-dai-cells = <1>;
- };
-
- gpio-keys {
- compatible = "gpio-keys";
-
- pinctrl-0 = <&hall_int_n_default>;
- pinctrl-names = "default";
-
- switch-lid {
- gpios = <&tlmm 92 GPIO_ACTIVE_LOW>;
- linux,input-type = <EV_SW>;
- linux,code = <SW_LID>;
- wakeup-source;
- wakeup-event-action = <EV_ACT_DEASSERTED>;
- };
- };
-
- pmic-glink {
- compatible = "qcom,x1e80100-pmic-glink",
- "qcom,sm8550-pmic-glink",
- "qcom,pmic-glink";
- orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>,
- <&tlmm 123 GPIO_ACTIVE_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* Display-adjacent port */
- connector@0 {
- compatible = "usb-c-connector";
- reg = <0>;
- power-role = "dual";
- data-role = "dual";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- pmic_glink_ss0_hs_in: endpoint {
- remote-endpoint = <&usb_1_ss0_dwc3_hs>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- pmic_glink_ss0_ss_in: endpoint {
- remote-endpoint = <&usb_1_ss0_qmpphy_out>;
- };
- };
- };
- };
+ model = "Lenovo ThinkPad T14s Gen 6 (LCD)";
+ compatible = "lenovo,thinkpad-t14s-lcd", "lenovo,thinkpad-t14s",
+ "qcom,x1e78100", "qcom,x1e80100";
- /* User-adjacent port */
- connector@1 {
- compatible = "usb-c-connector";
- reg = <1>;
- power-role = "dual";
- data-role = "dual";
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pmk8550_pwm 0 4266537>;
+ enable-gpios = <&pmc8380_3_gpios 4 GPIO_ACTIVE_HIGH>;
+ power-supply = <&vreg_edp_bl>;
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- pmic_glink_ss1_hs_in: endpoint {
- remote-endpoint = <&usb_1_ss1_dwc3_hs>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- pmic_glink_ss1_ss_in: endpoint {
- remote-endpoint = <&usb_1_ss1_qmpphy_out>;
- };
- };
- };
- };
- };
-
- reserved-memory {
- linux,cma {
- compatible = "shared-dma-pool";
- size = <0x0 0x8000000>;
- reusable;
- linux,cma-default;
- };
- };
-
- vreg_edp_3p3: regulator-edp-3p3 {
- compatible = "regulator-fixed";
-
- regulator-name = "VREG_EDP_3P3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
-
- gpio = <&tlmm 70 GPIO_ACTIVE_HIGH>;
- enable-active-high;
-
- pinctrl-0 = <&edp_reg_en>;
+ pinctrl-0 = <&edp_bl_en>, <&edp_bl_pwm>;
pinctrl-names = "default";
-
- regulator-boot-on;
};
- vreg_nvme: regulator-nvme {
+ vreg_edp_bl: regulator-edp-bl {
compatible = "regulator-fixed";
- regulator-name = "VREG_NVME_3P3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
+ regulator-name = "VBL9";
+ regulator-min-microvolt = <3600000>;
+ regulator-max-microvolt = <3600000>;
- gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>;
+ gpio = <&pmc8380_3_gpios 10 GPIO_ACTIVE_HIGH>;
enable-active-high;
- pinctrl-0 = <&nvme_reg_en>;
pinctrl-names = "default";
+ pinctrl-0 = <&edp_bl_reg_en>;
regulator-boot-on;
};
-
- vph_pwr: regulator-vph-pwr {
- compatible = "regulator-fixed";
-
- regulator-name = "vph_pwr";
- regulator-min-microvolt = <3700000>;
- regulator-max-microvolt = <3700000>;
-
- regulator-always-on;
- regulator-boot-on;
- };
-
- sound {
- compatible = "qcom,x1e80100-sndcard";
- model = "X1E80100-LENOVO-Thinkpad-T14s";
- audio-routing = "SpkrLeft IN", "WSA WSA_SPK1 OUT",
- "SpkrRight IN", "WSA WSA_SPK2 OUT",
- "IN1_HPHL", "HPHL_OUT",
- "IN2_HPHR", "HPHR_OUT",
- "AMIC2", "MIC BIAS2",
- "VA DMIC0", "MIC BIAS1",
- "VA DMIC1", "MIC BIAS1",
- "VA DMIC0", "VA MIC BIAS1",
- "VA DMIC1", "VA MIC BIAS1",
- "TX SWR_INPUT1", "ADC2_OUTPUT";
-
- wcd-playback-dai-link {
- link-name = "WCD Playback";
-
- cpu {
- sound-dai = <&q6apmbedai RX_CODEC_DMA_RX_0>;
- };
-
- codec {
- sound-dai = <&wcd938x 0>, <&swr1 0>, <&lpass_rxmacro 0>;
- };
-
- platform {
- sound-dai = <&q6apm>;
- };
- };
-
- wcd-capture-dai-link {
- link-name = "WCD Capture";
-
- cpu {
- sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
- };
-
- codec {
- sound-dai = <&wcd938x 1>, <&swr2 1>, <&lpass_txmacro 0>;
- };
-
- platform {
- sound-dai = <&q6apm>;
- };
- };
-
- wsa-dai-link {
- link-name = "WSA Playback";
-
- cpu {
- sound-dai = <&q6apmbedai WSA_CODEC_DMA_RX_0>;
- };
-
- codec {
- sound-dai = <&left_spkr>, <&right_spkr>, <&swr0 0>, <&lpass_wsamacro 0>;
- };
-
- platform {
- sound-dai = <&q6apm>;
- };
- };
-
- va-dai-link {
- link-name = "VA Capture";
-
- cpu {
- sound-dai = <&q6apmbedai VA_CODEC_DMA_TX_0>;
- };
-
- codec {
- sound-dai = <&lpass_vamacro 0>;
- };
-
- platform {
- sound-dai = <&q6apm>;
- };
- };
- };
};
-&apps_rsc {
- regulators-0 {
- compatible = "qcom,pm8550-rpmh-regulators";
- qcom,pmic-id = "b";
-
- vdd-bob1-supply = <&vph_pwr>;
- vdd-bob2-supply = <&vph_pwr>;
- vdd-l1-l4-l10-supply = <&vreg_s4c_1p8>;
- vdd-l2-l13-l14-supply = <&vreg_bob1>;
- vdd-l5-l16-supply = <&vreg_bob1>;
- vdd-l6-l7-supply = <&vreg_bob2>;
- vdd-l8-l9-supply = <&vreg_bob1>;
- vdd-l12-supply = <&vreg_s5j_1p2>;
- vdd-l15-supply = <&vreg_s4c_1p8>;
- vdd-l17-supply = <&vreg_bob2>;
-
- vreg_bob1: bob1 {
- regulator-name = "vreg_bob1";
- regulator-min-microvolt = <3008000>;
- regulator-max-microvolt = <3960000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_bob2: bob2 {
- regulator-name = "vreg_bob2";
- regulator-min-microvolt = <2504000>;
- regulator-max-microvolt = <3008000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l1b_1p8: ldo1 {
- regulator-name = "vreg_l1b_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l2b_3p0: ldo2 {
- regulator-name = "vreg_l2b_3p0";
- regulator-min-microvolt = <3072000>;
- regulator-max-microvolt = <3072000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l4b_1p8: ldo4 {
- regulator-name = "vreg_l4b_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l6b_1p8: ldo6 {
- regulator-name = "vreg_l6b_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2960000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l8b_3p0: ldo8 {
- regulator-name = "vreg_l8b_3p0";
- regulator-min-microvolt = <3072000>;
- regulator-max-microvolt = <3072000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l9b_2p9: ldo9 {
- regulator-name = "vreg_l9b_2p9";
- regulator-min-microvolt = <2960000>;
- regulator-max-microvolt = <2960000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l10b_1p8: ldo10 {
- regulator-name = "vreg_l10b_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l12b_1p2: ldo12 {
- regulator-name = "vreg_l12b_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l13b_3p0: ldo13 {
- regulator-name = "vreg_l13b_3p0";
- regulator-min-microvolt = <3072000>;
- regulator-max-microvolt = <3072000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l14b_3p0: ldo14 {
- regulator-name = "vreg_l14b_3p0";
- regulator-min-microvolt = <3072000>;
- regulator-max-microvolt = <3072000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l15b_1p8: ldo15 {
- regulator-name = "vreg_l15b_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l17b_2p5: ldo17 {
- regulator-name = "vreg_l17b_2p5";
- regulator-min-microvolt = <2504000>;
- regulator-max-microvolt = <2504000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-1 {
- compatible = "qcom,pm8550ve-rpmh-regulators";
- qcom,pmic-id = "c";
-
- vdd-l1-supply = <&vreg_s5j_1p2>;
- vdd-l2-supply = <&vreg_s1f_0p7>;
- vdd-l3-supply = <&vreg_s1f_0p7>;
- vdd-s4-supply = <&vph_pwr>;
-
- vreg_s4c_1p8: smps4 {
- regulator-name = "vreg_s4c_1p8";
- regulator-min-microvolt = <1856000>;
- regulator-max-microvolt = <2000000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l1c_1p2: ldo1 {
- regulator-name = "vreg_l1c_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l2c_0p8: ldo2 {
- regulator-name = "vreg_l2c_0p8";
- regulator-min-microvolt = <880000>;
- regulator-max-microvolt = <880000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l3c_0p8: ldo3 {
- regulator-name = "vreg_l3c_0p8";
- regulator-min-microvolt = <912000>;
- regulator-max-microvolt = <912000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-2 {
- compatible = "qcom,pmc8380-rpmh-regulators";
- qcom,pmic-id = "d";
-
- vdd-l1-supply = <&vreg_s1f_0p7>;
- vdd-l2-supply = <&vreg_s1f_0p7>;
- vdd-l3-supply = <&vreg_s4c_1p8>;
- vdd-s1-supply = <&vph_pwr>;
-
- vreg_l1d_0p8: ldo1 {
- regulator-name = "vreg_l1d_0p8";
- regulator-min-microvolt = <880000>;
- regulator-max-microvolt = <880000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l2d_0p9: ldo2 {
- regulator-name = "vreg_l2d_0p9";
- regulator-min-microvolt = <912000>;
- regulator-max-microvolt = <912000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l3d_1p8: ldo3 {
- regulator-name = "vreg_l3d_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-3 {
- compatible = "qcom,pmc8380-rpmh-regulators";
- qcom,pmic-id = "e";
-
- vdd-l2-supply = <&vreg_s1f_0p7>;
- vdd-l3-supply = <&vreg_s5j_1p2>;
-
- vreg_l2e_0p8: ldo2 {
- regulator-name = "vreg_l2e_0p8";
- regulator-min-microvolt = <880000>;
- regulator-max-microvolt = <880000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l3e_1p2: ldo3 {
- regulator-name = "vreg_l3e_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-4 {
- compatible = "qcom,pmc8380-rpmh-regulators";
- qcom,pmic-id = "f";
-
- vdd-l1-supply = <&vreg_s5j_1p2>;
- vdd-l2-supply = <&vreg_s5j_1p2>;
- vdd-l3-supply = <&vreg_s5j_1p2>;
- vdd-s1-supply = <&vph_pwr>;
-
- vreg_s1f_0p7: smps1 {
- regulator-name = "vreg_s1f_0p7";
- regulator-min-microvolt = <700000>;
- regulator-max-microvolt = <1100000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-6 {
- compatible = "qcom,pm8550ve-rpmh-regulators";
- qcom,pmic-id = "i";
-
- vdd-l1-supply = <&vreg_s4c_1p8>;
- vdd-l2-supply = <&vreg_s5j_1p2>;
- vdd-l3-supply = <&vreg_s1f_0p7>;
- vdd-s1-supply = <&vph_pwr>;
- vdd-s2-supply = <&vph_pwr>;
-
- vreg_l1i_1p8: ldo1 {
- regulator-name = "vreg_l1i_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l2i_1p2: ldo2 {
- regulator-name = "vreg_l2i_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l3i_0p8: ldo3 {
- regulator-name = "vreg_l3i_0p8";
- regulator-min-microvolt = <880000>;
- regulator-max-microvolt = <880000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-7 {
- compatible = "qcom,pm8550ve-rpmh-regulators";
- qcom,pmic-id = "j";
-
- vdd-l1-supply = <&vreg_s1f_0p7>;
- vdd-l2-supply = <&vreg_s5j_1p2>;
- vdd-l3-supply = <&vreg_s1f_0p7>;
- vdd-s5-supply = <&vph_pwr>;
-
- vreg_s5j_1p2: smps5 {
- regulator-name = "vreg_s5j_1p2";
- regulator-min-microvolt = <1256000>;
- regulator-max-microvolt = <1304000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l1j_0p8: ldo1 {
- regulator-name = "vreg_l1j_0p8";
- regulator-min-microvolt = <912000>;
- regulator-max-microvolt = <912000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l2j_1p2: ldo2 {
- regulator-name = "vreg_l2j_1p2";
- regulator-min-microvolt = <1256000>;
- regulator-max-microvolt = <1256000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l3j_0p8: ldo3 {
- regulator-name = "vreg_l3j_0p8";
- regulator-min-microvolt = <880000>;
- regulator-max-microvolt = <880000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-};
-
-&gpu {
- status = "okay";
-
- zap-shader {
- firmware-name = "qcom/x1e80100/LENOVO/21N1/qcdxkmsuc8380.mbn";
- };
-};
-
-&i2c0 {
- clock-frequency = <400000>;
-
- pinctrl-0 = <&qup_i2c0_data_clk>, <&tpad_default>;
- pinctrl-names = "default";
-
- status = "okay";
-
- /* ELAN06E2 or ELAN06E3 */
- touchpad@15 {
- compatible = "hid-over-i2c";
- reg = <0x15>;
-
- hid-descr-addr = <0x1>;
- interrupts-extended = <&tlmm 3 IRQ_TYPE_LEVEL_LOW>;
-
- wakeup-source;
- };
-
- /* SYNA8022 or SYNA8024 */
- touchpad@2c {
- compatible = "hid-over-i2c";
- reg = <0x2c>;
-
- hid-descr-addr = <0x20>;
- interrupts-extended = <&tlmm 3 IRQ_TYPE_LEVEL_LOW>;
-
- wakeup-source;
- };
-
- /* ELAN06F1 or SYNA06F2 */
- keyboard@3a {
- compatible = "hid-over-i2c";
- reg = <0x3a>;
-
- hid-descr-addr = <0x1>;
- interrupts-extended = <&tlmm 67 IRQ_TYPE_LEVEL_LOW>;
-
- pinctrl-0 = <&kybd_default>;
- pinctrl-names = "default";
-
- wakeup-source;
- };
-};
-
-&i2c5 {
- clock-frequency = <400000>;
-
- status = "okay";
-
- eusb5_repeater: redriver@43 {
- compatible = "nxp,ptn3222";
- reg = <0x43>;
- #phy-cells = <0>;
-
- vdd3v3-supply = <&vreg_l13b_3p0>;
- vdd1v8-supply = <&vreg_l4b_1p8>;
-
- reset-gpios = <&tlmm 7 GPIO_ACTIVE_LOW>;
-
- pinctrl-0 = <&eusb5_reset_n>;
- pinctrl-names = "default";
- };
-
- eusb3_repeater: redriver@47 {
- compatible = "nxp,ptn3222";
- reg = <0x47>;
- #phy-cells = <0>;
-
- vdd3v3-supply = <&vreg_l13b_3p0>;
- vdd1v8-supply = <&vreg_l4b_1p8>;
-
- reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
-
- pinctrl-0 = <&eusb3_reset_n>;
- pinctrl-names = "default";
- };
-
- eusb6_repeater: redriver@4f {
- compatible = "nxp,ptn3222";
- reg = <0x4f>;
- #phy-cells = <0>;
-
- vdd3v3-supply = <&vreg_l13b_3p0>;
- vdd1v8-supply = <&vreg_l4b_1p8>;
-
- reset-gpios = <&tlmm 184 GPIO_ACTIVE_LOW>;
-
- pinctrl-0 = <&eusb6_reset_n>;
- pinctrl-names = "default";
- };
-};
-
-&i2c8 {
- clock-frequency = <400000>;
-
- status = "okay";
-
- /* ILIT2911 or GTCH1563 */
- touchscreen@10 {
- compatible = "hid-over-i2c";
- reg = <0x10>;
-
- hid-descr-addr = <0x1>;
- interrupts-extended = <&tlmm 51 IRQ_TYPE_LEVEL_LOW>;
-
- pinctrl-0 = <&ts0_default>;
- pinctrl-names = "default";
- };
-
- /* TODO: second-sourced touchscreen @ 0x41 */
-};
-
-&lpass_tlmm {
- spkr_01_sd_n_active: spkr-01-sd-n-active-state {
- pins = "gpio12";
- function = "gpio";
- drive-strength = <16>;
- bias-disable;
- output-low;
- };
-};
-
-&lpass_vamacro {
- pinctrl-0 = <&dmic01_default>;
- pinctrl-names = "default";
-
- vdd-micb-supply = <&vreg_l1b_1p8>;
- qcom,dmic-sample-rate = <4800000>;
-};
-
-&mdss {
- status = "okay";
-};
-
-&mdss_dp3 {
- compatible = "qcom,x1e80100-dp";
- /delete-property/ #sound-dai-cells;
-
- status = "okay";
-
- aux-bus {
- panel {
- compatible = "edp-panel";
- enable-gpios = <&pmc8380_3_gpios 4 GPIO_ACTIVE_HIGH>;
- power-supply = <&vreg_edp_3p3>;
-
- pinctrl-0 = <&edp_bl_en>;
- pinctrl-names = "default";
-
- port {
- edp_panel_in: endpoint {
- remote-endpoint = <&mdss_dp3_out>;
- };
- };
- };
- };
-
- ports {
- port@1 {
- reg = <1>;
-
- mdss_dp3_out: endpoint {
- data-lanes = <0 1 2 3>;
- link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
-
- remote-endpoint = <&edp_panel_in>;
- };
- };
- };
-};
-
-&mdss_dp3_phy {
- vdda-phy-supply = <&vreg_l3j_0p8>;
- vdda-pll-supply = <&vreg_l2j_1p2>;
-
- status = "okay";
-};
-
-&pcie4 {
- perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>;
- wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>;
-
- pinctrl-0 = <&pcie4_default>;
- pinctrl-names = "default";
-
- status = "okay";
-};
-
-&pcie4_phy {
- vdda-phy-supply = <&vreg_l3i_0p8>;
- vdda-pll-supply = <&vreg_l3e_1p2>;
-
- status = "okay";
-};
-
-&pcie6a {
- perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
- wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
-
- vddpe-3v3-supply = <&vreg_nvme>;
-
- pinctrl-0 = <&pcie6a_default>;
- pinctrl-names = "default";
-
- status = "okay";
-};
-
-&pcie6a_phy {
- vdda-phy-supply = <&vreg_l1d_0p8>;
- vdda-pll-supply = <&vreg_l2j_1p2>;
-
- status = "okay";
+&panel {
+ backlight = <&backlight>;
};
&pmc8380_3_gpios {
- edp_bl_en: edp-bl-en-state {
- pins = "gpio4";
+ edp_bl_reg_en: edp-bl-reg-en-state {
+ pins = "gpio10";
function = "normal";
- power-source = <1>;
- input-disable;
- output-enable;
};
};
-&qupv3_0 {
- status = "okay";
-};
-
-&qupv3_1 {
- status = "okay";
-};
-
-&qupv3_2 {
- status = "okay";
-};
-
-&remoteproc_adsp {
- firmware-name = "qcom/x1e80100/LENOVO/21N1/qcadsp8380.mbn",
- "qcom/x1e80100/LENOVO/21N1/adsp_dtbs.elf";
-
- status = "okay";
-};
-
-&remoteproc_cdsp {
- firmware-name = "qcom/x1e80100/LENOVO/21N1/qccdsp8380.mbn",
- "qcom/x1e80100/LENOVO/21N1/cdsp_dtbs.elf";
-
- status = "okay";
-};
-
-&smb2360_0 {
- status = "okay";
-};
-
-&smb2360_0_eusb2_repeater {
- vdd18-supply = <&vreg_l3d_1p8>;
- vdd3-supply = <&vreg_l2b_3p0>;
-};
-
-&smb2360_1 {
- status = "okay";
-};
-
-&smb2360_1_eusb2_repeater {
- vdd18-supply = <&vreg_l3d_1p8>;
- vdd3-supply = <&vreg_l14b_3p0>;
-};
-
-&swr0 {
- status = "okay";
-
- pinctrl-0 = <&wsa_swr_active>, <&spkr_01_sd_n_active>;
- pinctrl-names = "default";
-
- /* WSA8845, Left Speaker */
- left_spkr: speaker@0,0 {
- compatible = "sdw20217020400";
- reg = <0 0>;
- reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
- #sound-dai-cells = <0>;
- sound-name-prefix = "SpkrLeft";
- vdd-1p8-supply = <&vreg_l15b_1p8>;
- vdd-io-supply = <&vreg_l12b_1p2>;
- qcom,port-mapping = <1 2 3 7 10 13>;
- };
-
- /* WSA8845, Right Speaker */
- right_spkr: speaker@0,1 {
- compatible = "sdw20217020400";
- reg = <0 1>;
- reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
- #sound-dai-cells = <0>;
- sound-name-prefix = "SpkrRight";
- vdd-1p8-supply = <&vreg_l15b_1p8>;
- vdd-io-supply = <&vreg_l12b_1p2>;
- qcom,port-mapping = <4 5 6 7 11 13>;
- };
-};
-
-&swr1 {
- status = "okay";
-
- /* WCD9385 RX */
- wcd_rx: codec@0,4 {
- compatible = "sdw20217010d00";
- reg = <0 4>;
- qcom,rx-port-mapping = <1 2 3 4 5>;
- };
-};
-
-&swr2 {
- status = "okay";
-
- /* WCD9385 TX */
- wcd_tx: codec@0,3 {
- compatible = "sdw20217010d00";
- reg = <0 3>;
- qcom,tx-port-mapping = <2 2 3 4>;
- };
-};
-
-&tlmm {
- gpio-reserved-ranges = <34 2>, /* Unused */
- <44 4>, /* SPI (TPM) */
- <72 2>, /* Secure EC I2C connection (?) */
- <238 1>; /* UFS Reset */
-
- eusb3_reset_n: eusb3-reset-n-state {
- pins = "gpio6";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- output-low;
- };
-
- eusb5_reset_n: eusb5-reset-n-state {
- pins = "gpio7";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- output-low;
- };
-
- eusb6_reset_n: eusb6-reset-n-state {
- pins = "gpio184";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- output-low;
+&pmk8550_gpios {
+ edp_bl_pwm: edp-bl-pwm-state {
+ pins = "gpio5";
+ function = "func3";
};
-
- tpad_default: tpad-default-state {
- pins = "gpio3";
- function = "gpio";
- bias-pull-up;
- };
-
- nvme_reg_en: nvme-reg-en-state {
- pins = "gpio18";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- };
-
- ts0_default: ts0-default-state {
- reset-n-pins {
- pins = "gpio48";
- function = "gpio";
- output-high;
- drive-strength = <16>;
- };
-
- int-n-pins {
- pins = "gpio51";
- function = "gpio";
- bias-disable;
- };
- };
-
- kybd_default: kybd-default-state {
- pins = "gpio67";
- function = "gpio";
- bias-disable;
- };
-
- edp_reg_en: edp-reg-en-state {
- pins = "gpio70";
- function = "gpio";
- drive-strength = <16>;
- bias-disable;
- };
-
- hall_int_n_default: hall-int-n-state {
- pins = "gpio92";
- function = "gpio";
- bias-disable;
- };
-
- pcie4_default: pcie4-default-state {
- clkreq-n-pins {
- pins = "gpio147";
- function = "pcie4_clk";
- drive-strength = <2>;
- bias-pull-up;
- };
-
- perst-n-pins {
- pins = "gpio146";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- };
-
- wake-n-pins {
- pins = "gpio148";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
-
- pcie6a_default: pcie6a-default-state {
- clkreq-n-pins {
- pins = "gpio153";
- function = "pcie6a_clk";
- drive-strength = <2>;
- bias-pull-up;
- };
-
- perst-n-pins {
- pins = "gpio152";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- };
-
- wake-n-pins {
- pins = "gpio154";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
-
- wcd_default: wcd-reset-n-active-state {
- pins = "gpio191";
- function = "gpio";
- drive-strength = <16>;
- bias-disable;
- output-low;
- };
-};
-
-&usb_1_ss0_hsphy {
- vdd-supply = <&vreg_l3j_0p8>;
- vdda12-supply = <&vreg_l2j_1p2>;
-
- phys = <&smb2360_0_eusb2_repeater>;
-
- status = "okay";
-};
-
-&usb_1_ss0_qmpphy {
- vdda-phy-supply = <&vreg_l2j_1p2>;
- vdda-pll-supply = <&vreg_l1j_0p8>;
-
- status = "okay";
-};
-
-&usb_1_ss0 {
- status = "okay";
-};
-
-&usb_1_ss0_dwc3 {
- dr_mode = "host";
-};
-
-&usb_1_ss0_dwc3_hs {
- remote-endpoint = <&pmic_glink_ss0_hs_in>;
};
-&usb_1_ss0_qmpphy_out {
- remote-endpoint = <&pmic_glink_ss0_ss_in>;
-};
-
-&usb_1_ss1_hsphy {
- vdd-supply = <&vreg_l3j_0p8>;
- vdda12-supply = <&vreg_l2j_1p2>;
-
- phys = <&smb2360_1_eusb2_repeater>;
-
- status = "okay";
-};
-
-&usb_1_ss1_qmpphy {
- vdda-phy-supply = <&vreg_l2j_1p2>;
- vdda-pll-supply = <&vreg_l2d_0p9>;
-
- status = "okay";
-};
-
-&usb_1_ss1 {
- status = "okay";
-};
-
-&usb_1_ss1_dwc3 {
- dr_mode = "host";
-};
-
-&usb_1_ss1_dwc3_hs {
- remote-endpoint = <&pmic_glink_ss1_hs_in>;
-};
-
-&usb_1_ss1_qmpphy_out {
- remote-endpoint = <&pmic_glink_ss1_ss_in>;
-};
-
-&usb_2 {
- status = "okay";
-};
-
-&usb_2_dwc3 {
- dr_mode = "host";
-};
-
-&usb_2_hsphy {
- vdd-supply = <&vreg_l2e_0p8>;
- vdda12-supply = <&vreg_l3e_1p2>;
-
- phys = <&eusb5_repeater>;
-
- status = "okay";
-};
-
-&usb_mp {
- status = "okay";
-};
-
-&usb_mp_hsphy0 {
- vdd-supply = <&vreg_l2e_0p8>;
- vdda12-supply = <&vreg_l3e_1p2>;
-
- phys = <&eusb6_repeater>;
-
- status = "okay";
-};
-
-&usb_mp_hsphy1 {
- vdd-supply = <&vreg_l2e_0p8>;
- vdda12-supply = <&vreg_l3e_1p2>;
-
- phys = <&eusb3_repeater>;
-
- status = "okay";
-};
-
-&usb_mp_qmpphy0 {
- vdda-phy-supply = <&vreg_l3e_1p2>;
- vdda-pll-supply = <&vreg_l3c_0p8>;
-
- status = "okay";
-};
-
-&usb_mp_qmpphy1 {
- vdda-phy-supply = <&vreg_l3e_1p2>;
- vdda-pll-supply = <&vreg_l3c_0p8>;
-
+&pmk8550_pwm {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dtsi b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dtsi
new file mode 100644
index 000000000000..ac1dddf27da3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dtsi
@@ -0,0 +1,1576 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+#include "x1e80100.dtsi"
+#include "x1e80100-pmics.dtsi"
+
+/ {
+ model = "Lenovo ThinkPad T14s Gen 6";
+ compatible = "lenovo,thinkpad-t14s", "qcom,x1e78100", "qcom,x1e80100";
+ chassis-type = "laptop";
+
+ wcd938x: audio-codec {
+ compatible = "qcom,wcd9385-codec";
+
+ pinctrl-0 = <&wcd_default>;
+ pinctrl-names = "default";
+
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
+ qcom,rx-device = <&wcd_rx>;
+ qcom,tx-device = <&wcd_tx>;
+
+ reset-gpios = <&tlmm 191 GPIO_ACTIVE_LOW>;
+ mux-controls = <&us_euro_mux_ctrl>;
+
+ vdd-buck-supply = <&vreg_l15b_1p8>;
+ vdd-rxtx-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l15b_1p8>;
+ vdd-mic-bias-supply = <&vreg_bob1>;
+
+ #sound-dai-cells = <1>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&hall_int_n_default>;
+ pinctrl-names = "default";
+
+ switch-lid {
+ gpios = <&tlmm 92 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ wakeup-source;
+ wakeup-event-action = <EV_ACT_DEASSERTED>;
+ };
+ };
+
+ pmic-glink {
+ compatible = "qcom,x1e80100-pmic-glink",
+ "qcom,sm8550-pmic-glink",
+ "qcom,pmic-glink";
+ orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>,
+ <&tlmm 123 GPIO_ACTIVE_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Display-adjacent port */
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss0_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss0_ss_in: endpoint {
+ remote-endpoint = <&retimer_ss0_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss0_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss0_con_sbu_out>;
+ };
+ };
+ };
+ };
+
+ /* User-adjacent port */
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss1_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss1_ss_in: endpoint {
+ remote-endpoint = <&retimer_ss1_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss1_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss1_con_sbu_out>;
+ };
+ };
+ };
+ };
+ };
+
+ /* two muxes together support CTIA and OMTP switching */
+ us_euro_mux_ctrl: mux-controller {
+ compatible = "gpio-mux";
+ pinctrl-0 = <&us_euro_hs_sel>;
+ pinctrl-names = "default";
+ mux-supply = <&vreg_l16b_2p5>;
+ #mux-control-cells = <0>;
+ mux-gpios = <&tlmm 68 GPIO_ACTIVE_HIGH>;
+ };
+
+ reserved-memory {
+ linux,cma {
+ compatible = "shared-dma-pool";
+ size = <0x0 0x8000000>;
+ reusable;
+ linux,cma-default;
+ };
+ };
+
+ vreg_edp_3p3: regulator-edp-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_EDP_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 70 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&edp_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_misc_3p3: regulator-misc-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VCC3B";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pm8550ve_8_gpios 6 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&misc_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vreg_nvme: regulator-nvme {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_NVME_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&nvme_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_1p15: regulator-rtmr0-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&pmc8380_5_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_pwr_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_1p8: regulator-rtmr0-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8550ve_9_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_3p3: regulator-rtmr0-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pm8550_gpios 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p15: regulator-rtmr1-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&tlmm 188 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb1_pwr_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p8: regulator-rtmr1-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 175 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb1_pwr_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_3p3: regulator-rtmr1-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 186 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb1_pwr_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vph_pwr: regulator-vph-pwr {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vreg_wwan: regulator-wwan {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VCC3B_WAN_RCM";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 221 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&wwan_sw_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ sound {
+ compatible = "qcom,x1e80100-sndcard";
+ model = "X1E80100-LENOVO-Thinkpad-T14s";
+ audio-routing = "SpkrLeft IN", "WSA WSA_SPK1 OUT",
+ "SpkrRight IN", "WSA WSA_SPK2 OUT",
+ "IN1_HPHL", "HPHL_OUT",
+ "IN2_HPHR", "HPHR_OUT",
+ "AMIC2", "MIC BIAS2",
+ "VA DMIC0", "MIC BIAS1",
+ "VA DMIC1", "MIC BIAS1",
+ "VA DMIC0", "VA MIC BIAS1",
+ "VA DMIC1", "VA MIC BIAS1",
+ "TX SWR_INPUT1", "ADC2_OUTPUT";
+
+ wcd-playback-dai-link {
+ link-name = "WCD Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai RX_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 0>, <&swr1 0>, <&lpass_rxmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wcd-capture-dai-link {
+ link-name = "WCD Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 1>, <&swr2 1>, <&lpass_txmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wsa-dai-link {
+ link-name = "WSA Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&left_spkr>, <&right_spkr>, <&swr0 0>, <&lpass_wsamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ va-dai-link {
+ link-name = "VA Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai VA_CODEC_DMA_TX_0>;
+ };
+
+ codec {
+ sound-dai = <&lpass_vamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-bob1-supply = <&vph_pwr>;
+ vdd-bob2-supply = <&vph_pwr>;
+ vdd-l1-l4-l10-supply = <&vreg_s4c_1p8>;
+ vdd-l2-l13-l14-supply = <&vreg_bob1>;
+ vdd-l5-l16-supply = <&vreg_bob1>;
+ vdd-l6-l7-supply = <&vreg_bob2>;
+ vdd-l8-l9-supply = <&vreg_bob1>;
+ vdd-l12-supply = <&vreg_s5j_1p2>;
+ vdd-l15-supply = <&vreg_s4c_1p8>;
+ vdd-l17-supply = <&vreg_bob2>;
+
+ vreg_bob1: bob1 {
+ regulator-name = "vreg_bob1";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob2: bob2 {
+ regulator-name = "vreg_bob2";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1b_1p8: ldo1 {
+ regulator-name = "vreg_l1b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p0: ldo2 {
+ regulator-name = "vreg_l2b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4b_1p8: ldo4 {
+ regulator-name = "vreg_l4b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b_1p8: ldo6 {
+ regulator-name = "vreg_l6b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8b_3p0: ldo8 {
+ regulator-name = "vreg_l8b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9b_2p9: ldo9 {
+ regulator-name = "vreg_l9b_2p9";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10b_1p8: ldo10 {
+ regulator-name = "vreg_l10b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12b_1p2: ldo12 {
+ regulator-name = "vreg_l12b_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
+ };
+
+ vreg_l13b_3p0: ldo13 {
+ regulator-name = "vreg_l13b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b_3p0: ldo14 {
+ regulator-name = "vreg_l14b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15b_1p8: ldo15 {
+ regulator-name = "vreg_l15b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
+ };
+
+ vreg_l16b_2p5: ldo16 {
+ regulator-name = "vreg_l16b_2p5";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17b_2p5: ldo17 {
+ regulator-name = "vreg_l17b_2p5";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s4-supply = <&vph_pwr>;
+
+ vreg_s4c_1p8: smps4 {
+ regulator-name = "vreg_s4c_1p8";
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c_1p2: ldo1 {
+ regulator-name = "vreg_l1c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c_0p8: ldo2 {
+ regulator-name = "vreg_l2c_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c_0p8: ldo3 {
+ regulator-name = "vreg_l3c_0p8";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "d";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s4c_1p8>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_l1d_0p8: ldo1 {
+ regulator-name = "vreg_l1d_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2d_0p9: ldo2 {
+ regulator-name = "vreg_l2d_0p9";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3d_1p8: ldo3 {
+ regulator-name = "vreg_l3d_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-3 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+
+ vreg_l2e_0p8: ldo2 {
+ regulator-name = "vreg_l2e_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3e_1p2: ldo3 {
+ regulator-name = "vreg_l3e_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_s1f_0p7: smps1 {
+ regulator-name = "vreg_s1f_0p7";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-6 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "i";
+
+ vdd-l1-supply = <&vreg_s4c_1p8>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+
+ vreg_l1i_1p8: ldo1 {
+ regulator-name = "vreg_l1i_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2i_1p2: ldo2 {
+ regulator-name = "vreg_l2i_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3i_0p8: ldo3 {
+ regulator-name = "vreg_l3i_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-7 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "j";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s5-supply = <&vph_pwr>;
+
+ vreg_s5j_1p2: smps5 {
+ regulator-name = "vreg_s5j_1p2";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1j_0p8: ldo1 {
+ regulator-name = "vreg_l1j_0p8";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2j_1p2: ldo2 {
+ regulator-name = "vreg_l2j_1p2";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1256000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3j_0p8: ldo3 {
+ regulator-name = "vreg_l3j_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ firmware-name = "qcom/x1e80100/LENOVO/21N1/qcdxkmsuc8380.mbn";
+ };
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+
+ pinctrl-0 = <&qup_i2c0_data_clk>, <&tpad_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ /* ELAN06E2 or ELAN06E3 */
+ touchpad@15 {
+ compatible = "hid-over-i2c";
+ reg = <0x15>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 3 IRQ_TYPE_LEVEL_LOW>;
+
+ vdd-supply = <&vreg_misc_3p3>;
+ vddl-supply = <&vreg_l12b_1p2>;
+
+ wakeup-source;
+ };
+
+ /* SYNA8022 or SYNA8024 */
+ touchpad@2c {
+ compatible = "hid-over-i2c";
+ reg = <0x2c>;
+
+ hid-descr-addr = <0x20>;
+ interrupts-extended = <&tlmm 3 IRQ_TYPE_LEVEL_LOW>;
+
+ vdd-supply = <&vreg_misc_3p3>;
+ vddl-supply = <&vreg_l12b_1p2>;
+
+ wakeup-source;
+ };
+
+ /* ELAN06F1 or SYNA06F2 */
+ keyboard@3a {
+ compatible = "hid-over-i2c";
+ reg = <0x3a>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 67 IRQ_TYPE_LEVEL_LOW>;
+
+ vdd-supply = <&vreg_misc_3p3>;
+ vddl-supply = <&vreg_l15b_1p8>;
+
+ pinctrl-0 = <&kybd_default>;
+ pinctrl-names = "default";
+
+ wakeup-source;
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x08>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK3>;
+
+ vdd-supply = <&vreg_rtmr0_1p15>;
+ vdd33-supply = <&vreg_rtmr0_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr0_3p3>;
+ vddar-supply = <&vreg_rtmr0_1p15>;
+ vddat-supply = <&vreg_rtmr0_1p15>;
+ vddio-supply = <&vreg_rtmr0_1p8>;
+
+ reset-gpios = <&pm8550_gpios 10 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr0_default>;
+ pinctrl-names = "default";
+
+ orientation-switch;
+ retimer-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss0_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss0_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_con_sbu_in>;
+ };
+ };
+ };
+ };
+};
+
+&i2c5 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ eusb5_repeater: redriver@43 {
+ compatible = "nxp,ptn3222";
+ reg = <0x43>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 7 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb5_reset_n>;
+ pinctrl-names = "default";
+ };
+
+ eusb3_repeater: redriver@47 {
+ compatible = "nxp,ptn3222";
+ reg = <0x47>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb3_reset_n>;
+ pinctrl-names = "default";
+ };
+
+ eusb6_repeater: redriver@4f {
+ compatible = "nxp,ptn3222";
+ reg = <0x4f>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 184 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb6_reset_n>;
+ pinctrl-names = "default";
+ };
+};
+
+&i2c7 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x8>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK4>;
+
+ vdd-supply = <&vreg_rtmr1_1p15>;
+ vdd33-supply = <&vreg_rtmr1_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr1_3p3>;
+ vddar-supply = <&vreg_rtmr1_1p15>;
+ vddat-supply = <&vreg_rtmr1_1p15>;
+ vddio-supply = <&vreg_rtmr1_1p8>;
+
+ reset-gpios = <&tlmm 176 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr1_default>;
+ pinctrl-names = "default";
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss1_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss1_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_con_sbu_in>;
+ };
+ };
+ };
+ };
+};
+
+&i2c8 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ /* ILIT2911 or GTCH1563 */
+ touchscreen@10 {
+ compatible = "hid-over-i2c";
+ reg = <0x10>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 51 IRQ_TYPE_LEVEL_LOW>;
+
+ vdd-supply = <&vreg_misc_3p3>;
+ vddl-supply = <&vreg_l15b_1p8>;
+
+ pinctrl-0 = <&ts0_default>;
+ pinctrl-names = "default";
+ };
+
+ /* TODO: second-sourced touchscreen @ 0x41 */
+};
+
+&lpass_tlmm {
+ spkr_01_sd_n_active: spkr-01-sd-n-active-state {
+ pins = "gpio12";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+};
+
+&lpass_vamacro {
+ pinctrl-0 = <&dmic01_default>;
+ pinctrl-names = "default";
+
+ vdd-micb-supply = <&vreg_l1b_1p8>;
+ qcom,dmic-sample-rate = <4800000>;
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dp0 {
+ status = "okay";
+};
+
+&mdss_dp0_out {
+ data-lanes = <0 1>;
+};
+
+&mdss_dp1 {
+ status = "okay";
+};
+
+&mdss_dp1_out {
+ data-lanes = <0 1>;
+};
+
+&mdss_dp3 {
+ /delete-property/ #sound-dai-cells;
+
+ status = "okay";
+
+ aux-bus {
+ panel: panel {
+ compatible = "edp-panel";
+ power-supply = <&vreg_edp_3p3>;
+
+ port {
+ edp_panel_in: endpoint {
+ remote-endpoint = <&mdss_dp3_out>;
+ };
+ };
+ };
+ };
+
+ ports {
+ port@1 {
+ reg = <1>;
+
+ mdss_dp3_out: endpoint {
+ data-lanes = <0 1 2 3>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+
+ remote-endpoint = <&edp_panel_in>;
+ };
+ };
+ };
+};
+
+&mdss_dp3_phy {
+ vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-pll-supply = <&vreg_l2j_1p2>;
+
+ status = "okay";
+};
+
+&pcie4 {
+ perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&pcie4_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie4_phy {
+ vdda-phy-supply = <&vreg_l3i_0p8>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+
+ status = "okay";
+};
+
+&pcie5 {
+ perst-gpios = <&tlmm 149 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 151 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_wwan>;
+
+ pinctrl-0 = <&pcie5_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie5_phy {
+ vdda-phy-supply = <&vreg_l3i_0p8>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+
+ status = "okay";
+};
+
+&pcie6a {
+ perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_nvme>;
+
+ pinctrl-0 = <&pcie6a_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie6a_phy {
+ vdda-phy-supply = <&vreg_l1d_0p8>;
+ vdda-pll-supply = <&vreg_l2j_1p2>;
+
+ status = "okay";
+};
+
+&pm8550_gpios {
+ rtmr0_default: rtmr0-reset-n-active-state {
+ pins = "gpio10";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+
+ usb0_3p3_reg_en: usb0-3p3-reg-en-state {
+ pins = "gpio11";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+};
+
+&pm8550ve_8_gpios {
+ misc_3p3_reg_en: misc-3p3-reg-en-state {
+ pins = "gpio6";
+ function = "normal";
+ bias-disable;
+ drive-push-pull;
+ input-disable;
+ output-enable;
+ power-source = <1>; /* 1.8 V */
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ };
+};
+
+&pm8550ve_9_gpios {
+ usb0_1p8_reg_en: usb0-1p8-reg-en-state {
+ pins = "gpio8";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+};
+
+&pmc8380_3_gpios {
+ edp_bl_en: edp-bl-en-state {
+ pins = "gpio4";
+ function = "normal";
+ power-source = <1>;
+ input-disable;
+ output-enable;
+ };
+};
+
+&pmc8380_5_gpios {
+ usb0_pwr_1p15_reg_en: usb0-pwr-1p15-reg-en-state {
+ pins = "gpio8";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+};
+
+&qupv3_0 {
+ status = "okay";
+};
+
+&qupv3_1 {
+ status = "okay";
+};
+
+&qupv3_2 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/x1e80100/LENOVO/21N1/qcadsp8380.mbn",
+ "qcom/x1e80100/LENOVO/21N1/adsp_dtbs.elf";
+
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/x1e80100/LENOVO/21N1/qccdsp8380.mbn",
+ "qcom/x1e80100/LENOVO/21N1/cdsp_dtbs.elf";
+
+ status = "okay";
+};
+
+&smb2360_0 {
+ status = "okay";
+};
+
+&smb2360_0_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l2b_3p0>;
+};
+
+&smb2360_1 {
+ status = "okay";
+};
+
+&smb2360_1_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l14b_3p0>;
+};
+
+&swr0 {
+ status = "okay";
+
+ pinctrl-0 = <&wsa_swr_active>, <&spkr_01_sd_n_active>;
+ pinctrl-names = "default";
+
+ /* WSA8845, Left Speaker */
+ left_spkr: speaker@0,0 {
+ compatible = "sdw20217020400";
+ reg = <0 0>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SpkrLeft";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <1 2 3 7 10 13>;
+ };
+
+ /* WSA8845, Right Speaker */
+ right_spkr: speaker@0,1 {
+ compatible = "sdw20217020400";
+ reg = <0 1>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SpkrRight";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <4 5 6 7 11 13>;
+ };
+};
+
+&swr1 {
+ status = "okay";
+
+ /* WCD9385 RX */
+ wcd_rx: codec@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+ qcom,rx-port-mapping = <1 2 3 4 5>;
+ };
+};
+
+&swr2 {
+ status = "okay";
+
+ /* WCD9385 TX */
+ wcd_tx: codec@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+ qcom,tx-port-mapping = <2 2 3 4>;
+ };
+};
+
+&tlmm {
+ gpio-reserved-ranges = <34 2>, /* Unused */
+ <44 4>, /* SPI (TPM) */
+ <72 2>, /* Secure EC I2C connection (?) */
+ <238 1>; /* UFS Reset */
+
+ eusb3_reset_n: eusb3-reset-n-state {
+ pins = "gpio6";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
+ eusb5_reset_n: eusb5-reset-n-state {
+ pins = "gpio7";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
+ eusb6_reset_n: eusb6-reset-n-state {
+ pins = "gpio184";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
+ tpad_default: tpad-default-state {
+ pins = "gpio3";
+ function = "gpio";
+ bias-pull-up;
+ };
+
+ nvme_reg_en: nvme-reg-en-state {
+ pins = "gpio18";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ ts0_default: ts0-default-state {
+ reset-n-pins {
+ pins = "gpio48";
+ function = "gpio";
+ output-high;
+ drive-strength = <16>;
+ };
+
+ int-n-pins {
+ pins = "gpio51";
+ function = "gpio";
+ bias-disable;
+ };
+ };
+
+ kybd_default: kybd-default-state {
+ pins = "gpio67";
+ function = "gpio";
+ bias-disable;
+ };
+
+ edp_reg_en: edp-reg-en-state {
+ pins = "gpio70";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ hall_int_n_default: hall-int-n-state {
+ pins = "gpio92";
+ function = "gpio";
+ bias-disable;
+ };
+
+ pcie4_default: pcie4-default-state {
+ clkreq-n-pins {
+ pins = "gpio147";
+ function = "pcie4_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio146";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio148";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie5_default: pcie5-default-state {
+ clkreq-n-pins {
+ pins = "gpio150";
+ function = "pcie5_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio149";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio151";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie6a_default: pcie6a-default-state {
+ clkreq-n-pins {
+ pins = "gpio153";
+ function = "pcie6a_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio152";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio154";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ rtmr1_default: rtmr1-reset-n-active-state {
+ pins = "gpio176";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ us_euro_hs_sel: us-euro-hs-sel-state {
+ pins = "gpio68";
+ function = "gpio";
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+
+ usb1_pwr_1p15_reg_en: usb1-pwr-1p15-reg-en-state {
+ pins = "gpio188";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb1_pwr_1p8_reg_en: usb1-pwr-1p8-reg-en-state {
+ pins = "gpio175";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb1_pwr_3p3_reg_en: usb1-pwr-3p3-reg-en-state {
+ pins = "gpio186";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wcd_default: wcd-reset-n-active-state {
+ pins = "gpio191";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+
+ wwan_sw_en: wwan-sw-en-state {
+ pins = "gpio221";
+ function = "gpio";
+ drive-strength = <4>;
+ bias-disable;
+ };
+};
+
+&usb_1_ss0_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_0_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss0_qmpphy {
+ vdda-phy-supply = <&vreg_l2j_1p2>;
+ vdda-pll-supply = <&vreg_l1j_0p8>;
+
+ status = "okay";
+};
+
+&usb_1_ss0 {
+ status = "okay";
+};
+
+&usb_1_ss0_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss0_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss0_hs_in>;
+};
+
+&usb_1_ss0_qmpphy_out {
+ remote-endpoint = <&retimer_ss0_ss_in>;
+};
+
+&usb_1_ss1_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_1_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss1_qmpphy {
+ vdda-phy-supply = <&vreg_l2j_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
+ status = "okay";
+};
+
+&usb_1_ss1 {
+ status = "okay";
+};
+
+&usb_1_ss1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss1_hs_in>;
+};
+
+&usb_1_ss1_qmpphy_out {
+ remote-endpoint = <&retimer_ss1_ss_in>;
+};
+
+&usb_2 {
+ status = "okay";
+};
+
+&usb_2_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_2_hsphy {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb5_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp {
+ status = "okay";
+};
+
+&usb_mp_hsphy0 {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb6_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp_hsphy1 {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb3_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy0 {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l3c_0p8>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy1 {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l3c_0p8>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
index 53781f9b13af..71b2cc6c392f 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
@@ -19,6 +19,10 @@
compatible = "asus,vivobook-s15", "qcom,x1e80100";
chassis-type = "laptop";
+ aliases {
+ serial1 = &uart14;
+ };
+
gpio-keys {
compatible = "gpio-keys";
pinctrl-0 = <&hall_int_n_default>;
@@ -153,6 +157,107 @@
regulator-always-on;
regulator-boot-on;
};
+
+ /*
+ * TODO: These two regulators are actually part of the removable M.2
+ * card and not the CRD mainboard. Need to describe this differently.
+ * Functionally it works correctly, because all we need to do is to
+ * turn on the actual 3.3V supply above.
+ */
+ vreg_wcn_0p95: regulator-wcn-0p95 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_WCN_0P95";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <950000>;
+
+ vin-supply = <&vreg_wcn_3p3>;
+ };
+
+ vreg_wcn_1p9: regulator-wcn-1p9 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_WCN_1P9";
+ regulator-min-microvolt = <1900000>;
+ regulator-max-microvolt = <1900000>;
+
+ vin-supply = <&vreg_wcn_3p3>;
+ };
+
+ vreg_wcn_3p3: regulator-wcn-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_WCN_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 214 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&wcn_sw_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ wcn7850-pmu {
+ compatible = "qcom,wcn7850-pmu";
+
+ vdd-supply = <&vreg_wcn_0p95>;
+ vddio-supply = <&vreg_l15b_1p8>;
+ vddaon-supply = <&vreg_wcn_0p95>;
+ vdddig-supply = <&vreg_wcn_0p95>;
+ vddrfa1p2-supply = <&vreg_wcn_1p9>;
+ vddrfa1p8-supply = <&vreg_wcn_1p9>;
+
+ wlan-enable-gpios = <&tlmm 117 GPIO_ACTIVE_HIGH>;
+ bt-enable-gpios = <&tlmm 116 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&wcn_wlan_en>, <&wcn_bt_en>;
+ pinctrl-names = "default";
+
+ regulators {
+ vreg_pmu_rfa_cmn: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn";
+ };
+
+ vreg_pmu_aon_0p59: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p59";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p85: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p85";
+ };
+
+ vreg_pmu_btcmx_0p85: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p85";
+ };
+
+ vreg_pmu_rfa_0p8: ldo5 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo6 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p8: ldo7 {
+ regulator-name = "vreg_pmu_rfa_1p8";
+ };
+
+ vreg_pmu_pcie_0p9: ldo8 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_pcie_1p8: ldo9 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+ };
+ };
};
&apps_rsc {
@@ -192,12 +297,33 @@
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
+ vreg_l4b_1p8: ldo4 {
+ regulator-name = "vreg_l4b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13b_3p0: ldo13 {
+ regulator-name = "vreg_l13b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
vreg_l14b_3p0: ldo14 {
regulator-name = "vreg_l14b_3p0";
regulator-min-microvolt = <3072000>;
regulator-max-microvolt = <3072000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
+
+ vreg_l15b_1p8: ldo15 {
+ regulator-name = "vreg_l15b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
};
regulators-1 {
@@ -209,6 +335,13 @@
vdd-l3-supply = <&vreg_s1f_0p7>;
vdd-s4-supply = <&vph_pwr>;
+ vreg_l3c_0p8: ldo3 {
+ regulator-name = "vreg_l3c_0p8";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
vreg_s4c_1p8: smps4 {
regulator-name = "vreg_s4c_1p8";
regulator-min-microvolt = <1856000>;
@@ -330,8 +463,8 @@
vreg_l2j_1p2: ldo2 {
regulator-name = "vreg_l2j_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1256000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
@@ -401,7 +534,49 @@
wakeup-source;
};
- /* EC? @ 0x5b, 0x76 */
+ eusb5_repeater: redriver@43 {
+ compatible = "nxp,ptn3222";
+ reg = <0x43>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 7 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb5_reset_n>;
+ pinctrl-names = "default";
+ };
+
+ eusb3_repeater: redriver@47 {
+ compatible = "nxp,ptn3222";
+ reg = <0x47>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb3_reset_n>;
+ pinctrl-names = "default";
+ };
+
+ eusb6_repeater: redriver@4f {
+ compatible = "nxp,ptn3222";
+ reg = <0x4f>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 184 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb6_reset_n>;
+ pinctrl-names = "default";
+ };
+
+ /* EC @ 0x76 */
};
&i2c7 {
@@ -416,7 +591,6 @@
};
&mdss_dp3 {
- compatible = "qcom,x1e80100-dp";
/delete-property/ #sound-dai-cells;
status = "okay";
@@ -476,6 +650,23 @@
status = "okay";
};
+&pcie4_port0 {
+ wifi@0 {
+ compatible = "pci17cb,1107";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
+ vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
+ vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
+ };
+};
+
&pcie6a {
perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
@@ -563,6 +754,30 @@
bias-disable;
};
+ eusb3_reset_n: eusb3-reset-n-state {
+ pins = "gpio6";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ output-low;
+ };
+
+ eusb5_reset_n: eusb5-reset-n-state {
+ pins = "gpio7";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ output-low;
+ };
+
+ eusb6_reset_n: eusb6-reset-n-state {
+ pins = "gpio184";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ output-low;
+ };
+
hall_int_n_default: hall-int-n-state {
pins = "gpio92";
function = "gpio";
@@ -633,6 +848,44 @@
function = "gpio";
bias-disable;
};
+
+ wcn_bt_en: wcn-bt-en-state {
+ pins = "gpio116";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-pull-down;
+ };
+
+ wcn_sw_en: wcn-sw-en-state {
+ pins = "gpio214";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ wcn_wlan_en: wcn-wlan-en-state {
+ pins = "gpio117";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+};
+
+&uart14 {
+ status = "okay";
+
+ bluetooth {
+ compatible = "qcom,wcn7850-bt";
+ max-speed = <3200000>;
+
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
+ };
};
&usb_1_ss0_hsphy {
@@ -698,3 +951,56 @@
&usb_1_ss1_qmpphy_out {
remote-endpoint = <&pmic_glink_ss1_ss_in>;
};
+
+&usb_2 {
+ status = "okay";
+};
+
+&usb_2_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_2_hsphy {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb5_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp {
+ status = "okay";
+};
+
+&usb_mp_hsphy0 {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb6_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp_hsphy1 {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb3_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy0 {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l3c_0p8>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy1 {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l3c_0p8>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
index ff5b3472fafd..976b8e44b576 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
@@ -5,1278 +5,14 @@
/dts-v1/;
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/gpio-keys.h>
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
-#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
-
#include "x1e80100.dtsi"
-#include "x1e80100-pmics.dtsi"
+#include "x1-crd.dtsi"
/ {
model = "Qualcomm Technologies, Inc. X1E80100 CRD";
compatible = "qcom,x1e80100-crd", "qcom,x1e80100";
-
- aliases {
- serial0 = &uart21;
- };
-
- wcd938x: audio-codec {
- compatible = "qcom,wcd9385-codec";
-
- pinctrl-names = "default";
- pinctrl-0 = <&wcd_default>;
-
- qcom,micbias1-microvolt = <1800000>;
- qcom,micbias2-microvolt = <1800000>;
- qcom,micbias3-microvolt = <1800000>;
- qcom,micbias4-microvolt = <1800000>;
- qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
- qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
- qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
- qcom,rx-device = <&wcd_rx>;
- qcom,tx-device = <&wcd_tx>;
-
- reset-gpios = <&tlmm 191 GPIO_ACTIVE_LOW>;
-
- vdd-buck-supply = <&vreg_l15b_1p8>;
- vdd-rxtx-supply = <&vreg_l15b_1p8>;
- vdd-io-supply = <&vreg_l15b_1p8>;
- vdd-mic-bias-supply = <&vreg_bob1>;
-
- #sound-dai-cells = <1>;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
- gpio-keys {
- compatible = "gpio-keys";
-
- pinctrl-0 = <&hall_int_n_default>;
- pinctrl-names = "default";
-
- switch-lid {
- gpios = <&tlmm 92 GPIO_ACTIVE_LOW>;
- linux,input-type = <EV_SW>;
- linux,code = <SW_LID>;
- wakeup-source;
- wakeup-event-action = <EV_ACT_DEASSERTED>;
- };
- };
-
- pmic-glink {
- compatible = "qcom,x1e80100-pmic-glink",
- "qcom,sm8550-pmic-glink",
- "qcom,pmic-glink";
- #address-cells = <1>;
- #size-cells = <0>;
- orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>,
- <&tlmm 123 GPIO_ACTIVE_HIGH>,
- <&tlmm 125 GPIO_ACTIVE_HIGH>;
-
- /* Left-side rear port */
- connector@0 {
- compatible = "usb-c-connector";
- reg = <0>;
- power-role = "dual";
- data-role = "dual";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- pmic_glink_ss0_hs_in: endpoint {
- remote-endpoint = <&usb_1_ss0_dwc3_hs>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- pmic_glink_ss0_ss_in: endpoint {
- remote-endpoint = <&usb_1_ss0_qmpphy_out>;
- };
- };
- };
- };
-
- /* Left-side front port */
- connector@1 {
- compatible = "usb-c-connector";
- reg = <1>;
- power-role = "dual";
- data-role = "dual";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- pmic_glink_ss1_hs_in: endpoint {
- remote-endpoint = <&usb_1_ss1_dwc3_hs>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- pmic_glink_ss1_ss_in: endpoint {
- remote-endpoint = <&usb_1_ss1_qmpphy_out>;
- };
- };
- };
- };
-
- /* Right-side port */
- connector@2 {
- compatible = "usb-c-connector";
- reg = <2>;
- power-role = "dual";
- data-role = "dual";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- pmic_glink_ss2_hs_in: endpoint {
- remote-endpoint = <&usb_1_ss2_dwc3_hs>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- pmic_glink_ss2_ss_in: endpoint {
- remote-endpoint = <&usb_1_ss2_qmpphy_out>;
- };
- };
- };
- };
- };
-
- reserved-memory {
- linux,cma {
- compatible = "shared-dma-pool";
- size = <0x0 0x8000000>;
- reusable;
- linux,cma-default;
- };
- };
-
- sound {
- compatible = "qcom,x1e80100-sndcard";
- model = "X1E80100-CRD";
- audio-routing = "WooferLeft IN", "WSA WSA_SPK1 OUT",
- "TweeterLeft IN", "WSA WSA_SPK2 OUT",
- "WooferRight IN", "WSA2 WSA_SPK2 OUT",
- "TweeterRight IN", "WSA2 WSA_SPK2 OUT",
- "IN1_HPHL", "HPHL_OUT",
- "IN2_HPHR", "HPHR_OUT",
- "AMIC2", "MIC BIAS2",
- "VA DMIC0", "MIC BIAS3",
- "VA DMIC1", "MIC BIAS3",
- "VA DMIC2", "MIC BIAS1",
- "VA DMIC3", "MIC BIAS1",
- "VA DMIC0", "VA MIC BIAS3",
- "VA DMIC1", "VA MIC BIAS3",
- "VA DMIC2", "VA MIC BIAS1",
- "VA DMIC3", "VA MIC BIAS1",
- "TX SWR_INPUT1", "ADC2_OUTPUT";
-
- wcd-playback-dai-link {
- link-name = "WCD Playback";
-
- cpu {
- sound-dai = <&q6apmbedai RX_CODEC_DMA_RX_0>;
- };
-
- codec {
- sound-dai = <&wcd938x 0>, <&swr1 0>, <&lpass_rxmacro 0>;
- };
-
- platform {
- sound-dai = <&q6apm>;
- };
- };
-
- wcd-capture-dai-link {
- link-name = "WCD Capture";
-
- cpu {
- sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
- };
-
- codec {
- sound-dai = <&wcd938x 1>, <&swr2 1>, <&lpass_txmacro 0>;
- };
-
- platform {
- sound-dai = <&q6apm>;
- };
- };
-
- wsa-dai-link {
- link-name = "WSA Playback";
-
- cpu {
- sound-dai = <&q6apmbedai WSA_CODEC_DMA_RX_0>;
- };
-
- codec {
- sound-dai = <&left_woofer>, <&left_tweeter>,
- <&swr0 0>, <&lpass_wsamacro 0>,
- <&right_woofer>, <&right_tweeter>,
- <&swr3 0>, <&lpass_wsa2macro 0>;
- };
-
- platform {
- sound-dai = <&q6apm>;
- };
- };
-
- va-dai-link {
- link-name = "VA Capture";
-
- cpu {
- sound-dai = <&q6apmbedai VA_CODEC_DMA_TX_0>;
- };
-
- codec {
- sound-dai = <&lpass_vamacro 0>;
- };
-
- platform {
- sound-dai = <&q6apm>;
- };
- };
- };
-
- vreg_edp_3p3: regulator-edp-3p3 {
- compatible = "regulator-fixed";
-
- regulator-name = "VREG_EDP_3P3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
-
- gpio = <&tlmm 70 GPIO_ACTIVE_HIGH>;
- enable-active-high;
-
- pinctrl-0 = <&edp_reg_en>;
- pinctrl-names = "default";
-
- regulator-boot-on;
- };
-
- vreg_misc_3p3: regulator-misc-3p3 {
- compatible = "regulator-fixed";
-
- regulator-name = "VREG_MISC_3P3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
-
- gpio = <&pm8550ve_8_gpios 6 GPIO_ACTIVE_HIGH>;
- enable-active-high;
-
- pinctrl-names = "default";
- pinctrl-0 = <&misc_3p3_reg_en>;
-
- regulator-boot-on;
- regulator-always-on;
- };
-
- vreg_nvme: regulator-nvme {
- compatible = "regulator-fixed";
-
- regulator-name = "VREG_NVME_3P3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
-
- gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>;
- enable-active-high;
-
- pinctrl-names = "default";
- pinctrl-0 = <&nvme_reg_en>;
-
- regulator-boot-on;
- };
-
- vph_pwr: regulator-vph-pwr {
- compatible = "regulator-fixed";
-
- regulator-name = "vph_pwr";
- regulator-min-microvolt = <3700000>;
- regulator-max-microvolt = <3700000>;
-
- regulator-always-on;
- regulator-boot-on;
- };
-
- vreg_wwan: regulator-wwan {
- compatible = "regulator-fixed";
-
- regulator-name = "SDX_VPH_PWR";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
-
- gpio = <&tlmm 221 GPIO_ACTIVE_HIGH>;
- enable-active-high;
-
- pinctrl-0 = <&wwan_sw_en>;
- pinctrl-names = "default";
-
- regulator-boot-on;
- };
-};
-
-&apps_rsc {
- regulators-0 {
- compatible = "qcom,pm8550-rpmh-regulators";
- qcom,pmic-id = "b";
-
- vdd-bob1-supply = <&vph_pwr>;
- vdd-bob2-supply = <&vph_pwr>;
- vdd-l1-l4-l10-supply = <&vreg_s4c_1p8>;
- vdd-l2-l13-l14-supply = <&vreg_bob1>;
- vdd-l5-l16-supply = <&vreg_bob1>;
- vdd-l6-l7-supply = <&vreg_bob2>;
- vdd-l8-l9-supply = <&vreg_bob1>;
- vdd-l12-supply = <&vreg_s5j_1p2>;
- vdd-l15-supply = <&vreg_s4c_1p8>;
- vdd-l17-supply = <&vreg_bob2>;
-
- vreg_bob1: bob1 {
- regulator-name = "vreg_bob1";
- regulator-min-microvolt = <3008000>;
- regulator-max-microvolt = <3960000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_bob2: bob2 {
- regulator-name = "vreg_bob2";
- regulator-min-microvolt = <2504000>;
- regulator-max-microvolt = <3008000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l1b_1p8: ldo1 {
- regulator-name = "vreg_l1b_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l2b_3p0: ldo2 {
- regulator-name = "vreg_l2b_3p0";
- regulator-min-microvolt = <3072000>;
- regulator-max-microvolt = <3100000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l4b_1p8: ldo4 {
- regulator-name = "vreg_l4b_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l5b_3p0: ldo5 {
- regulator-name = "vreg_l5b_3p0";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l6b_1p8: ldo6 {
- regulator-name = "vreg_l6b_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2960000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l7b_2p8: ldo7 {
- regulator-name = "vreg_l7b_2p8";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l8b_3p0: ldo8 {
- regulator-name = "vreg_l8b_3p0";
- regulator-min-microvolt = <3072000>;
- regulator-max-microvolt = <3072000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l9b_2p9: ldo9 {
- regulator-name = "vreg_l9b_2p9";
- regulator-min-microvolt = <2960000>;
- regulator-max-microvolt = <2960000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l10b_1p8: ldo10 {
- regulator-name = "vreg_l10b_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l12b_1p2: ldo12 {
- regulator-name = "vreg_l12b_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l13b_3p0: ldo13 {
- regulator-name = "vreg_l13b_3p0";
- regulator-min-microvolt = <3072000>;
- regulator-max-microvolt = <3100000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l14b_3p0: ldo14 {
- regulator-name = "vreg_l14b_3p0";
- regulator-min-microvolt = <3072000>;
- regulator-max-microvolt = <3072000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l15b_1p8: ldo15 {
- regulator-name = "vreg_l15b_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l16b_2p9: ldo16 {
- regulator-name = "vreg_l16b_2p9";
- regulator-min-microvolt = <2912000>;
- regulator-max-microvolt = <2912000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l17b_2p5: ldo17 {
- regulator-name = "vreg_l17b_2p5";
- regulator-min-microvolt = <2504000>;
- regulator-max-microvolt = <2504000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-1 {
- compatible = "qcom,pm8550ve-rpmh-regulators";
- qcom,pmic-id = "c";
-
- vdd-l1-supply = <&vreg_s5j_1p2>;
- vdd-l2-supply = <&vreg_s1f_0p7>;
- vdd-l3-supply = <&vreg_s1f_0p7>;
- vdd-s4-supply = <&vph_pwr>;
-
- vreg_s4c_1p8: smps4 {
- regulator-name = "vreg_s4c_1p8";
- regulator-min-microvolt = <1856000>;
- regulator-max-microvolt = <2000000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l1c_1p2: ldo1 {
- regulator-name = "vreg_l1c_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l2c_0p8: ldo2 {
- regulator-name = "vreg_l2c_0p8";
- regulator-min-microvolt = <880000>;
- regulator-max-microvolt = <920000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l3c_0p8: ldo3 {
- regulator-name = "vreg_l3c_0p8";
- regulator-min-microvolt = <880000>;
- regulator-max-microvolt = <920000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-2 {
- compatible = "qcom,pmc8380-rpmh-regulators";
- qcom,pmic-id = "d";
-
- vdd-l1-supply = <&vreg_s1f_0p7>;
- vdd-l2-supply = <&vreg_s1f_0p7>;
- vdd-l3-supply = <&vreg_s4c_1p8>;
- vdd-s1-supply = <&vph_pwr>;
-
- vreg_l1d_0p8: ldo1 {
- regulator-name = "vreg_l1d_0p8";
- regulator-min-microvolt = <880000>;
- regulator-max-microvolt = <920000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l2d_0p9: ldo2 {
- regulator-name = "vreg_l2d_0p9";
- regulator-min-microvolt = <912000>;
- regulator-max-microvolt = <920000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l3d_1p8: ldo3 {
- regulator-name = "vreg_l3d_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-3 {
- compatible = "qcom,pmc8380-rpmh-regulators";
- qcom,pmic-id = "e";
-
- vdd-l2-supply = <&vreg_s1f_0p7>;
- vdd-l3-supply = <&vreg_s5j_1p2>;
-
- vreg_l2e_0p8: ldo2 {
- regulator-name = "vreg_l2e_0p8";
- regulator-min-microvolt = <880000>;
- regulator-max-microvolt = <920000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l3e_1p2: ldo3 {
- regulator-name = "vreg_l3e_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-4 {
- compatible = "qcom,pmc8380-rpmh-regulators";
- qcom,pmic-id = "f";
-
- vdd-l1-supply = <&vreg_s5j_1p2>;
- vdd-l2-supply = <&vreg_s5j_1p2>;
- vdd-l3-supply = <&vreg_s5j_1p2>;
- vdd-s1-supply = <&vph_pwr>;
-
- vreg_s1f_0p7: smps1 {
- regulator-name = "vreg_s1f_0p7";
- regulator-min-microvolt = <700000>;
- regulator-max-microvolt = <1100000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l1f_1p0: ldo1 {
- regulator-name = "vreg_l1f_1p0";
- regulator-min-microvolt = <1024000>;
- regulator-max-microvolt = <1024000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l2f_1p0: ldo2 {
- regulator-name = "vreg_l2f_1p0";
- regulator-min-microvolt = <1024000>;
- regulator-max-microvolt = <1024000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l3f_1p0: ldo3 {
- regulator-name = "vreg_l3f_1p0";
- regulator-min-microvolt = <1024000>;
- regulator-max-microvolt = <1024000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-6 {
- compatible = "qcom,pm8550ve-rpmh-regulators";
- qcom,pmic-id = "i";
-
- vdd-l1-supply = <&vreg_s4c_1p8>;
- vdd-l2-supply = <&vreg_s5j_1p2>;
- vdd-l3-supply = <&vreg_s1f_0p7>;
- vdd-s1-supply = <&vph_pwr>;
- vdd-s2-supply = <&vph_pwr>;
-
- vreg_s1i_0p9: smps1 {
- regulator-name = "vreg_s1i_0p9";
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <920000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_s2i_1p0: smps2 {
- regulator-name = "vreg_s2i_1p0";
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1100000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l1i_1p8: ldo1 {
- regulator-name = "vreg_l1i_1p8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l2i_1p2: ldo2 {
- regulator-name = "vreg_l2i_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l3i_0p8: ldo3 {
- regulator-name = "vreg_l3i_0p8";
- regulator-min-microvolt = <880000>;
- regulator-max-microvolt = <920000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-7 {
- compatible = "qcom,pm8550ve-rpmh-regulators";
- qcom,pmic-id = "j";
-
- vdd-l1-supply = <&vreg_s1f_0p7>;
- vdd-l2-supply = <&vreg_s5j_1p2>;
- vdd-l3-supply = <&vreg_s1f_0p7>;
- vdd-s5-supply = <&vph_pwr>;
-
- vreg_s5j_1p2: smps5 {
- regulator-name = "vreg_s5j_1p2";
- regulator-min-microvolt = <1256000>;
- regulator-max-microvolt = <1304000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l1j_0p8: ldo1 {
- regulator-name = "vreg_l1j_0p8";
- regulator-min-microvolt = <880000>;
- regulator-max-microvolt = <920000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l2j_1p2: ldo2 {
- regulator-name = "vreg_l2j_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l3j_0p8: ldo3 {
- regulator-name = "vreg_l3j_0p8";
- regulator-min-microvolt = <880000>;
- regulator-max-microvolt = <920000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
- };
-};
-
-&gpu {
- status = "okay";
-
- zap-shader {
- firmware-name = "qcom/x1e80100/gen70500_zap.mbn";
- };
-};
-
-&i2c0 {
- clock-frequency = <400000>;
-
- status = "okay";
-
- touchpad@15 {
- compatible = "hid-over-i2c";
- reg = <0x15>;
-
- hid-descr-addr = <0x1>;
- interrupts-extended = <&tlmm 3 IRQ_TYPE_LEVEL_LOW>;
-
- vdd-supply = <&vreg_misc_3p3>;
- vddl-supply = <&vreg_l12b_1p2>;
-
- pinctrl-0 = <&tpad_default>;
- pinctrl-names = "default";
-
- wakeup-source;
- };
-
- keyboard@3a {
- compatible = "hid-over-i2c";
- reg = <0x3a>;
-
- hid-descr-addr = <0x1>;
- interrupts-extended = <&tlmm 67 IRQ_TYPE_LEVEL_LOW>;
-
- vdd-supply = <&vreg_misc_3p3>;
- vddl-supply = <&vreg_l12b_1p2>;
-
- pinctrl-0 = <&kybd_default>;
- pinctrl-names = "default";
-
- wakeup-source;
- };
-};
-
-&i2c8 {
- clock-frequency = <400000>;
-
- status = "okay";
-
- touchscreen@10 {
- compatible = "hid-over-i2c";
- reg = <0x10>;
-
- hid-descr-addr = <0x1>;
- interrupts-extended = <&tlmm 51 IRQ_TYPE_LEVEL_LOW>;
-
- vdd-supply = <&vreg_misc_3p3>;
- vddl-supply = <&vreg_l15b_1p8>;
-
- pinctrl-0 = <&ts0_default>;
- pinctrl-names = "default";
- };
-};
-
-&lpass_tlmm {
- spkr_01_sd_n_active: spkr-01-sd-n-active-state {
- pins = "gpio12";
- function = "gpio";
- drive-strength = <16>;
- bias-disable;
- output-low;
- };
-
- spkr_23_sd_n_active: spkr-23-sd-n-active-state {
- pins = "gpio13";
- function = "gpio";
- drive-strength = <16>;
- bias-disable;
- output-low;
- };
-};
-
-&lpass_vamacro {
- pinctrl-0 = <&dmic01_default>, <&dmic23_default>;
- pinctrl-names = "default";
-
- vdd-micb-supply = <&vreg_l1b_1p8>;
- qcom,dmic-sample-rate = <4800000>;
-};
-
-&mdss {
- status = "okay";
-};
-
-&mdss_dp3 {
- compatible = "qcom,x1e80100-dp";
- /delete-property/ #sound-dai-cells;
-
- status = "okay";
-
- aux-bus {
- panel {
- compatible = "samsung,atna45af01", "samsung,atna33xc20";
- enable-gpios = <&pmc8380_3_gpios 4 GPIO_ACTIVE_HIGH>;
- power-supply = <&vreg_edp_3p3>;
-
- pinctrl-0 = <&edp_bl_en>;
- pinctrl-names = "default";
-
- port {
- edp_panel_in: endpoint {
- remote-endpoint = <&mdss_dp3_out>;
- };
- };
- };
- };
-
- ports {
- port@1 {
- reg = <1>;
- mdss_dp3_out: endpoint {
- data-lanes = <0 1 2 3>;
- link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
-
- remote-endpoint = <&edp_panel_in>;
- };
- };
- };
-};
-
-&mdss_dp3_phy {
- vdda-phy-supply = <&vreg_l3j_0p8>;
- vdda-pll-supply = <&vreg_l2j_1p2>;
-
- status = "okay";
-};
-
-&pcie4 {
- perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>;
- wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>;
-
- pinctrl-0 = <&pcie4_default>;
- pinctrl-names = "default";
-
- status = "okay";
-};
-
-&pcie4_phy {
- vdda-phy-supply = <&vreg_l3i_0p8>;
- vdda-pll-supply = <&vreg_l3e_1p2>;
-
- status = "okay";
-};
-
-&pcie5 {
- perst-gpios = <&tlmm 149 GPIO_ACTIVE_LOW>;
- wake-gpios = <&tlmm 151 GPIO_ACTIVE_LOW>;
-
- vddpe-3v3-supply = <&vreg_wwan>;
-
- pinctrl-0 = <&pcie5_default>;
- pinctrl-names = "default";
-
- status = "okay";
-};
-
-&pcie5_phy {
- vdda-phy-supply = <&vreg_l3i_0p8>;
- vdda-pll-supply = <&vreg_l3e_1p2>;
-
- status = "okay";
-};
-
-&pcie6a {
- perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
- wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
-
- vddpe-3v3-supply = <&vreg_nvme>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&pcie6a_default>;
-
- status = "okay";
-};
-
-&pcie6a_phy {
- vdda-phy-supply = <&vreg_l1d_0p8>;
- vdda-pll-supply = <&vreg_l2j_1p2>;
-
- status = "okay";
-};
-
-&pm8550ve_8_gpios {
- misc_3p3_reg_en: misc-3p3-reg-en-state {
- pins = "gpio6";
- function = "normal";
- bias-disable;
- input-disable;
- output-enable;
- drive-push-pull;
- power-source = <1>; /* 1.8 V */
- qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
- };
-};
-
-&pmc8380_3_gpios {
- edp_bl_en: edp-bl-en-state {
- pins = "gpio4";
- function = "normal";
- power-source = <1>; /* 1.8V */
- input-disable;
- output-enable;
- };
-};
-
-&qupv3_0 {
- status = "okay";
-};
-
-&qupv3_1 {
- status = "okay";
-};
-
-&qupv3_2 {
- status = "okay";
-};
-
-&remoteproc_adsp {
- firmware-name = "qcom/x1e80100/adsp.mbn",
- "qcom/x1e80100/adsp_dtb.mbn";
-
- status = "okay";
-};
-
-&remoteproc_cdsp {
- firmware-name = "qcom/x1e80100/cdsp.mbn",
- "qcom/x1e80100/cdsp_dtb.mbn";
-
- status = "okay";
-};
-
-&smb2360_0 {
- status = "okay";
-};
-
-&smb2360_0_eusb2_repeater {
- vdd18-supply = <&vreg_l3d_1p8>;
- vdd3-supply = <&vreg_l2b_3p0>;
-};
-
-&smb2360_1 {
- status = "okay";
-};
-
-&smb2360_1_eusb2_repeater {
- vdd18-supply = <&vreg_l3d_1p8>;
- vdd3-supply = <&vreg_l14b_3p0>;
-};
-
-&smb2360_2 {
- status = "okay";
-};
-
-&smb2360_2_eusb2_repeater {
- vdd18-supply = <&vreg_l3d_1p8>;
- vdd3-supply = <&vreg_l8b_3p0>;
-};
-
-&swr0 {
- status = "okay";
-
- pinctrl-0 = <&wsa_swr_active>, <&spkr_01_sd_n_active>;
- pinctrl-names = "default";
-
- /* WSA8845, Left Woofer */
- left_woofer: speaker@0,0 {
- compatible = "sdw20217020400";
- reg = <0 0>;
- reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
- #sound-dai-cells = <0>;
- sound-name-prefix = "WooferLeft";
- vdd-1p8-supply = <&vreg_l15b_1p8>;
- vdd-io-supply = <&vreg_l12b_1p2>;
- qcom,port-mapping = <1 2 3 7 10 13>;
- };
-
- /* WSA8845, Left Tweeter */
- left_tweeter: speaker@0,1 {
- compatible = "sdw20217020400";
- reg = <0 1>;
- reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
- #sound-dai-cells = <0>;
- sound-name-prefix = "TweeterLeft";
- vdd-1p8-supply = <&vreg_l15b_1p8>;
- vdd-io-supply = <&vreg_l12b_1p2>;
- qcom,port-mapping = <4 5 6 7 11 13>;
- };
-};
-
-&swr1 {
- status = "okay";
-
- /* WCD9385 RX */
- wcd_rx: codec@0,4 {
- compatible = "sdw20217010d00";
- reg = <0 4>;
- qcom,rx-port-mapping = <1 2 3 4 5>;
- };
-};
-
-&swr2 {
- status = "okay";
-
- /* WCD9385 TX */
- wcd_tx: codec@0,3 {
- compatible = "sdw20217010d00";
- reg = <0 3>;
- qcom,tx-port-mapping = <2 2 3 4>;
- };
-};
-
-&swr3 {
- status = "okay";
-
- pinctrl-0 = <&wsa2_swr_active>, <&spkr_23_sd_n_active>;
- pinctrl-names = "default";
-
- /* WSA8845, Right Woofer */
- right_woofer: speaker@0,0 {
- compatible = "sdw20217020400";
- reg = <0 0>;
- reset-gpios = <&lpass_tlmm 13 GPIO_ACTIVE_LOW>;
- #sound-dai-cells = <0>;
- sound-name-prefix = "WooferRight";
- vdd-1p8-supply = <&vreg_l15b_1p8>;
- vdd-io-supply = <&vreg_l12b_1p2>;
- qcom,port-mapping = <1 2 3 7 10 13>;
- };
-
- /* WSA8845, Right Tweeter */
- right_tweeter: speaker@0,1 {
- compatible = "sdw20217020400";
- reg = <0 1>;
- reset-gpios = <&lpass_tlmm 13 GPIO_ACTIVE_LOW>;
- #sound-dai-cells = <0>;
- sound-name-prefix = "TweeterRight";
- vdd-1p8-supply = <&vreg_l15b_1p8>;
- vdd-io-supply = <&vreg_l12b_1p2>;
- qcom,port-mapping = <4 5 6 7 11 13>;
- };
-};
-
-&tlmm {
- gpio-reserved-ranges = <34 2>, /* Unused */
- <44 4>, /* SPI (TPM) */
- <238 1>; /* UFS Reset */
-
- edp_reg_en: edp-reg-en-state {
- pins = "gpio70";
- function = "gpio";
- drive-strength = <16>;
- bias-disable;
- };
-
- hall_int_n_default: hall-int-n-state {
- pins = "gpio92";
- function = "gpio";
- bias-disable;
- };
-
- kybd_default: kybd-default-state {
- pins = "gpio67";
- function = "gpio";
- bias-disable;
- };
-
- nvme_reg_en: nvme-reg-en-state {
- pins = "gpio18";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- };
-
- pcie4_default: pcie4-default-state {
- clkreq-n-pins {
- pins = "gpio147";
- function = "pcie4_clk";
- drive-strength = <2>;
- bias-pull-up;
- };
-
- perst-n-pins {
- pins = "gpio146";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- };
-
- wake-n-pins {
- pins = "gpio148";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
-
- pcie5_default: pcie5-default-state {
- clkreq-n-pins {
- pins = "gpio150";
- function = "pcie5_clk";
- drive-strength = <2>;
- bias-pull-up;
- };
-
- perst-n-pins {
- pins = "gpio149";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- };
-
- wake-n-pins {
- pins = "gpio151";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
-
- pcie6a_default: pcie6a-default-state {
- clkreq-n-pins {
- pins = "gpio153";
- function = "pcie6a_clk";
- drive-strength = <2>;
- bias-pull-up;
- };
-
- perst-n-pins {
- pins = "gpio152";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- };
-
- wake-n-pins {
- pins = "gpio154";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
-
- tpad_default: tpad-default-state {
- pins = "gpio3";
- function = "gpio";
- bias-disable;
- };
-
- ts0_default: ts0-default-state {
- int-n-pins {
- pins = "gpio51";
- function = "gpio";
- bias-disable;
- };
-
- reset-n-pins {
- pins = "gpio48";
- function = "gpio";
- output-high;
- drive-strength = <16>;
- };
- };
-
- wcd_default: wcd-reset-n-active-state {
- pins = "gpio191";
- function = "gpio";
- drive-strength = <16>;
- bias-disable;
- output-low;
- };
-
- wwan_sw_en: wwan-sw-en-state {
- pins = "gpio221";
- function = "gpio";
- drive-strength = <4>;
- bias-disable;
- };
-};
-
-&uart21 {
- compatible = "qcom,geni-debug-uart";
- status = "okay";
-};
-
-&usb_1_ss0_hsphy {
- vdd-supply = <&vreg_l3j_0p8>;
- vdda12-supply = <&vreg_l2j_1p2>;
-
- phys = <&smb2360_0_eusb2_repeater>;
-
- status = "okay";
-};
-
-&usb_1_ss0_qmpphy {
- vdda-phy-supply = <&vreg_l2j_1p2>;
- vdda-pll-supply = <&vreg_l1j_0p8>;
-
- status = "okay";
-};
-
-&usb_1_ss0 {
- status = "okay";
-};
-
-&usb_1_ss0_dwc3 {
- dr_mode = "host";
-};
-
-&usb_1_ss0_dwc3_hs {
- remote-endpoint = <&pmic_glink_ss0_hs_in>;
-};
-
-&usb_1_ss0_qmpphy_out {
- remote-endpoint = <&pmic_glink_ss0_ss_in>;
-};
-
-&usb_1_ss1_hsphy {
- vdd-supply = <&vreg_l3j_0p8>;
- vdda12-supply = <&vreg_l2j_1p2>;
-
- phys = <&smb2360_1_eusb2_repeater>;
-
- status = "okay";
-};
-
-&usb_1_ss1_qmpphy {
- vdda-phy-supply = <&vreg_l2j_1p2>;
- vdda-pll-supply = <&vreg_l2d_0p9>;
-
- status = "okay";
-};
-
-&usb_1_ss1 {
- status = "okay";
-};
-
-&usb_1_ss1_dwc3 {
- dr_mode = "host";
-};
-
-&usb_1_ss1_dwc3_hs {
- remote-endpoint = <&pmic_glink_ss1_hs_in>;
-};
-
-&usb_1_ss1_qmpphy_out {
- remote-endpoint = <&pmic_glink_ss1_ss_in>;
-};
-
-&usb_1_ss2_hsphy {
- vdd-supply = <&vreg_l3j_0p8>;
- vdda12-supply = <&vreg_l2j_1p2>;
-
- phys = <&smb2360_2_eusb2_repeater>;
-
- status = "okay";
-};
-
-&usb_1_ss2_qmpphy {
- vdda-phy-supply = <&vreg_l2j_1p2>;
- vdda-pll-supply = <&vreg_l2d_0p9>;
-
- status = "okay";
-};
-
-&usb_1_ss2 {
- status = "okay";
-};
-
-&usb_1_ss2_dwc3 {
- dr_mode = "host";
-};
-
-&usb_1_ss2_dwc3_hs {
- remote-endpoint = <&pmic_glink_ss2_hs_in>;
};
-&usb_1_ss2_qmpphy_out {
- remote-endpoint = <&pmic_glink_ss2_ss_in>;
+&gpu_zap_shader {
+ firmware-name = "qcom/x1e80100/gen70500_zap.mbn";
};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts b/arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts
index 86e87f03b0ec..967f6dba0878 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts
@@ -359,6 +359,7 @@
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_l13b_3p0: ldo13 {
@@ -380,6 +381,7 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_l17b_2p5: ldo17 {
@@ -612,7 +614,6 @@
reg = <0x08>;
clocks = <&rpmhcc RPMH_RF_CLK3>;
- clock-names = "xo";
vdd-supply = <&vreg_rtmr0_1p15>;
vdd33-supply = <&vreg_rtmr0_3p3>;
@@ -676,7 +677,6 @@
reg = <0x8>;
clocks = <&rpmhcc RPMH_RF_CLK4>;
- clock-names = "xo";
vdd-supply = <&vreg_rtmr1_1p15>;
vdd33-supply = <&vreg_rtmr1_3p3>;
@@ -770,6 +770,24 @@
status = "okay";
};
+&mdss_dp0 {
+ status = "okay";
+};
+
+&mdss_dp0_out {
+ data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+};
+
+&mdss_dp1 {
+ status = "okay";
+};
+
+&mdss_dp1_out {
+ data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+};
+
&mdss_dp3 {
/delete-property/ #sound-dai-cells;
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-hp-elitebook-ultra-g1q.dts b/arch/arm64/boot/dts/qcom/x1e80100-hp-elitebook-ultra-g1q.dts
new file mode 100644
index 000000000000..4ea00d823693
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1e80100-hp-elitebook-ultra-g1q.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+/dts-v1/;
+
+#include "x1e80100-hp-omnibook-x14.dts"
+
+/ {
+ model = "HP EliteBook Ultra G1q";
+ compatible = "hp,elitebook-ultra-g1q", "qcom,x1e80100";
+};
+
+&gpu {
+ zap-shader {
+ firmware-name = "qcom/x1e80100/hp/elitebook-ultra-g1q/qcdxkmsuc8380.mbn";
+ };
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/x1e80100/hp/elitebook-ultra-g1q/qcadsp8380.mbn",
+ "qcom/x1e80100/hp/elitebook-ultra-g1q/adsp_dtbs.elf";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/x1e80100/hp/elitebook-ultra-g1q/qccdsp8380.mbn",
+ "qcom/x1e80100/hp/elitebook-ultra-g1q/cdsp_dtbs.elf";
+};
+
+&sound {
+ model = "X1E80100-HP-ELITEBOOK-ULTRA-G1Q";
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-hp-omnibook-x14.dts b/arch/arm64/boot/dts/qcom/x1e80100-hp-omnibook-x14.dts
index cd860a246c45..10b3af5e79fb 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-hp-omnibook-x14.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-hp-omnibook-x14.dts
@@ -150,15 +150,7 @@
reg = <1>;
pmic_glink_ss1_ss_in: endpoint {
- remote-endpoint = <&retimer_ss1_ss_out>;
- };
- };
-
- port@2 {
- reg = <2>;
-
- pmic_glink_ss1_con_sbu_in: endpoint {
- remote-endpoint = <&retimer_ss1_con_sbu_out>;
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
};
};
};
@@ -174,7 +166,7 @@
};
};
- sound {
+ sound: sound {
compatible = "qcom,x1e80100-sndcard";
model = "X1E80100-HP-OMNIBOOK-X14";
audio-routing = "SpkrLeft IN", "WSA WSA_SPK1 OUT",
@@ -370,54 +362,6 @@
regulator-boot-on;
};
- vreg_rtmr1_1p15: regulator-rtmr1-1p15 {
- compatible = "regulator-fixed";
-
- regulator-name = "VREG_RTMR1_1P15";
- regulator-min-microvolt = <1150000>;
- regulator-max-microvolt = <1150000>;
-
- gpio = <&tlmm 188 GPIO_ACTIVE_HIGH>;
- enable-active-high;
-
- pinctrl-0 = <&usb1_pwr_1p15_reg_en>;
- pinctrl-names = "default";
-
- regulator-boot-on;
- };
-
- vreg_rtmr1_1p8: regulator-rtmr1-1p8 {
- compatible = "regulator-fixed";
-
- regulator-name = "VREG_RTMR1_1P8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
-
- gpio = <&tlmm 175 GPIO_ACTIVE_HIGH>;
- enable-active-high;
-
- pinctrl-0 = <&usb1_pwr_1p8_reg_en>;
- pinctrl-names = "default";
-
- regulator-boot-on;
- };
-
- vreg_rtmr1_3p3: regulator-rtmr1-3p3 {
- compatible = "regulator-fixed";
-
- regulator-name = "VREG_RTMR1_3P3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
-
- gpio = <&tlmm 186 GPIO_ACTIVE_HIGH>;
- enable-active-high;
-
- pinctrl-0 = <&usb1_pwr_3p3_reg_en>;
- pinctrl-names = "default";
-
- regulator-boot-on;
- };
-
vreg_vph_pwr: regulator-vph-pwr {
compatible = "regulator-fixed";
@@ -633,6 +577,7 @@
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_l13b_3p0: ldo13 {
@@ -654,6 +599,7 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_l16b_2p9: ldo16 {
@@ -871,8 +817,8 @@
vreg_l2j_1p2: ldo2 {
regulator-name = "vreg_l2j_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1256000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
@@ -1025,64 +971,6 @@
};
};
-&i2c7 {
- clock-frequency = <400000>;
-
- status = "okay";
-
- typec-mux@8 {
- compatible = "parade,ps8830";
- reg = <0x8>;
-
- clocks = <&rpmhcc RPMH_RF_CLK4>;
-
- vdd-supply = <&vreg_rtmr1_1p15>;
- vdd33-supply = <&vreg_rtmr1_3p3>;
- vdd33-cap-supply = <&vreg_rtmr1_3p3>;
- vddar-supply = <&vreg_rtmr1_1p15>;
- vddat-supply = <&vreg_rtmr1_1p15>;
- vddio-supply = <&vreg_rtmr1_1p8>;
-
- reset-gpios = <&tlmm 176 GPIO_ACTIVE_LOW>;
-
- pinctrl-0 = <&rtmr1_default>;
- pinctrl-names = "default";
-
- orientation-switch;
- retimer-switch;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- retimer_ss1_ss_out: endpoint {
- remote-endpoint = <&pmic_glink_ss1_ss_in>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- retimer_ss1_ss_in: endpoint {
- remote-endpoint = <&usb_1_ss1_qmpphy_out>;
- };
- };
-
- port@2 {
- reg = <2>;
-
- retimer_ss1_con_sbu_out: endpoint {
- remote-endpoint = <&pmic_glink_ss1_con_sbu_in>;
- };
- };
-
- };
- };
-};
-
&i2c8 {
clock-frequency = <400000>;
@@ -1139,6 +1027,7 @@
&mdss_dp0_out {
data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
};
&mdss_dp1 {
@@ -1147,10 +1036,10 @@
&mdss_dp1_out {
data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
};
&mdss_dp3 {
- compatible = "qcom,x1e80100-dp";
/delete-property/ #sound-dai-cells;
status = "okay";
@@ -1352,18 +1241,22 @@
status = "okay";
};
+&smb2360_0 {
+ status = "okay";
+};
+
&smb2360_0_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l2b_3p0>;
+};
+&smb2360_1 {
status = "okay";
};
&smb2360_1_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l14b_3p0>;
-
- status = "okay";
};
&swr0 {
@@ -1425,14 +1318,6 @@
<72 2>, /* Secure EC I2C connection (?) */
<238 1>; /* UFS Reset */
- bt_en_default: bt-en-sleep {
- pins = "gpio116";
- function = "gpio";
- output-low;
- bias-disable;
- drive-strength = <16>;
- };
-
edp_reg_en: edp-reg-en-state {
pins = "gpio70";
function = "gpio";
@@ -1513,13 +1398,6 @@
};
};
- rtmr1_default: rtmr1-reset-n-active-state {
- pins = "gpio176";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- };
-
tpad_default: tpad-default-state {
pins = "gpio3";
function = "gpio";
@@ -1541,27 +1419,6 @@
};
};
- usb1_pwr_1p15_reg_en: usb1-pwr-1p15-reg-en-state {
- pins = "gpio188";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- };
-
- usb1_pwr_1p8_reg_en: usb1-pwr-1p8-reg-en-state {
- pins = "gpio175";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- };
-
- usb1_pwr_3p3_reg_en: usb1-pwr-3p3-reg-en-state {
- pins = "gpio186";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- };
-
wcd_default: wcd-reset-n-active-state {
pins = "gpio191";
function = "gpio";
@@ -1664,7 +1521,7 @@
};
&usb_1_ss1_qmpphy_out {
- remote-endpoint = <&retimer_ss1_ss_in>;
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
};
&usb_mp {
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
index a3d53f2ba2c3..dad0f11e8e85 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
@@ -72,7 +72,15 @@
reg = <1>;
pmic_glink_ss0_ss_in: endpoint {
- remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ remote-endpoint = <&retimer_ss0_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss0_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss0_con_sbu_out>;
};
};
};
@@ -101,7 +109,15 @@
reg = <1>;
pmic_glink_ss1_ss_in: endpoint {
- remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ remote-endpoint = <&retimer_ss1_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss1_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss1_con_sbu_out>;
};
};
};
@@ -130,7 +146,15 @@
reg = <1>;
pmic_glink_ss2_ss_in: endpoint {
- remote-endpoint = <&usb_1_ss2_qmpphy_out>;
+ remote-endpoint = <&retimer_ss2_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss2_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss2_con_sbu_out>;
};
};
};
@@ -152,7 +176,11 @@
audio-routing = "WooferLeft IN", "WSA WSA_SPK1 OUT",
"TweeterLeft IN", "WSA WSA_SPK2 OUT",
"WooferRight IN", "WSA2 WSA_SPK2 OUT",
- "TweeterRight IN", "WSA2 WSA_SPK2 OUT";
+ "TweeterRight IN", "WSA2 WSA_SPK2 OUT",
+ "VA DMIC0", "vdd-micb",
+ "VA DMIC1", "vdd-micb",
+ "VA DMIC2", "vdd-micb",
+ "VA DMIC3", "vdd-micb";
wsa-dai-link {
link-name = "WSA Playback";
@@ -222,6 +250,150 @@
regulator-boot-on;
};
+ vreg_rtmr0_1p15: regulator-rtmr0-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&pmc8380_5_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_pwr_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_1p8: regulator-rtmr0-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8550ve_9_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_3p3: regulator-rtmr0-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pm8550_gpios 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p15: regulator-rtmr1-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&tlmm 188 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb1_pwr_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p8: regulator-rtmr1-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 175 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb1_pwr_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_3p3: regulator-rtmr1-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 186 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb1_pwr_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr2_1p15: regulator-rtmr2-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR2_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&tlmm 189 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb2_pwr_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr2_1p8: regulator-rtmr2-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR2_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 126 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb2_pwr_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr2_3p3: regulator-rtmr2-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR2_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 187 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb2_pwr_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
vph_pwr: regulator-vph-pwr {
compatible = "regulator-fixed";
@@ -290,6 +462,7 @@
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_l14b_3p0: ldo14 {
@@ -304,8 +477,8 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
-
};
regulators-1 {
@@ -508,8 +681,8 @@
vreg_l2j_1p2: ldo2 {
regulator-name = "vreg_l2j_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1256000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
@@ -562,6 +735,178 @@
};
};
+&i2c1 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x08>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK5>;
+
+ vdd-supply = <&vreg_rtmr2_1p15>;
+ vdd33-supply = <&vreg_rtmr2_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr2_3p3>;
+ vddar-supply = <&vreg_rtmr2_1p15>;
+ vddat-supply = <&vreg_rtmr2_1p15>;
+ vddio-supply = <&vreg_rtmr2_1p8>;
+
+ reset-gpios = <&tlmm 185 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr2_default>;
+ pinctrl-names = "default";
+
+ orientation-switch;
+ retimer-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss2_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss2_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss2_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss2_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss2_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss2_con_sbu_in>;
+ };
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x08>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK3>;
+
+ vdd-supply = <&vreg_rtmr0_1p15>;
+ vdd33-supply = <&vreg_rtmr0_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr0_3p3>;
+ vddar-supply = <&vreg_rtmr0_1p15>;
+ vddat-supply = <&vreg_rtmr0_1p15>;
+ vddio-supply = <&vreg_rtmr0_1p8>;
+
+ reset-gpios = <&pm8550_gpios 10 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr0_default>;
+ pinctrl-names = "default";
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss0_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss0_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_con_sbu_in>;
+ };
+ };
+ };
+ };
+};
+
+&i2c7 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x8>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK4>;
+
+ vdd-supply = <&vreg_rtmr1_1p15>;
+ vdd33-supply = <&vreg_rtmr1_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr1_3p3>;
+ vddar-supply = <&vreg_rtmr1_1p15>;
+ vddat-supply = <&vreg_rtmr1_1p15>;
+ vddio-supply = <&vreg_rtmr1_1p8>;
+
+ reset-gpios = <&tlmm 176 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr1_default>;
+ pinctrl-names = "default";
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss1_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss1_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_con_sbu_in>;
+ };
+ };
+
+ };
+ };
+};
+
&i2c8 {
clock-frequency = <400000>;
@@ -609,8 +954,34 @@
status = "okay";
};
+&mdss_dp0 {
+ status = "okay";
+};
+
+&mdss_dp0_out {
+ data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+};
+
+&mdss_dp1 {
+ status = "okay";
+};
+
+&mdss_dp1_out {
+ data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+};
+
+&mdss_dp2 {
+ status = "okay";
+};
+
+&mdss_dp2_out {
+ data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+};
+
&mdss_dp3 {
- compatible = "qcom,x1e80100-dp";
/delete-property/ #sound-dai-cells;
status = "okay";
@@ -674,8 +1045,6 @@
wifi@0 {
compatible = "pci17cb,1107";
reg = <0x10000 0x0 0x0 0x0 0x0>;
-
- qcom,ath12k-calibration-variant = "LES790";
};
};
@@ -698,6 +1067,37 @@
status = "okay";
};
+&pm8550_gpios {
+ rtmr0_default: rtmr0-reset-n-active-state {
+ pins = "gpio10";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+
+ usb0_3p3_reg_en: usb0-3p3-reg-en-state {
+ pins = "gpio11";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+};
+
+&pm8550ve_9_gpios {
+ usb0_1p8_reg_en: usb0-1p8-reg-en-state {
+ pins = "gpio8";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+};
+
&pmc8380_3_gpios {
edp_bl_en: edp-bl-en-state {
pins = "gpio4";
@@ -708,6 +1108,17 @@
};
};
+&pmc8380_5_gpios {
+ usb0_pwr_1p15_reg_en: usb0-pwr-1p15-reg-en-state {
+ pins = "gpio8";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+};
+
&qupv3_0 {
status = "okay";
};
@@ -916,6 +1327,20 @@
};
};
+ rtmr1_default: rtmr1-reset-n-active-state {
+ pins = "gpio176";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rtmr2_default: rtmr2-reset-n-active-state {
+ pins = "gpio185";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
tpad_default: tpad-default-state {
pins = "gpio3";
function = "gpio";
@@ -937,6 +1362,47 @@
};
};
+ usb1_pwr_1p15_reg_en: usb1-pwr-1p15-reg-en-state {
+ pins = "gpio188";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb1_pwr_1p8_reg_en: usb1-pwr-1p8-reg-en-state {
+ pins = "gpio175";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb1_pwr_3p3_reg_en: usb1-pwr-3p3-reg-en-state {
+ pins = "gpio186";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb2_pwr_1p15_reg_en: usb2-pwr-1p15-reg-en-state {
+ pins = "gpio189";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb2_pwr_1p8_reg_en: usb2-pwr-1p8-reg-en-state {
+ pins = "gpio126";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb2_pwr_3p3_reg_en: usb2-pwr-3p3-reg-en-state {
+ pins = "gpio187";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
};
&uart21 {
@@ -973,7 +1439,7 @@
};
&usb_1_ss0_qmpphy_out {
- remote-endpoint = <&pmic_glink_ss0_ss_in>;
+ remote-endpoint = <&retimer_ss0_ss_in>;
};
&usb_1_ss1_hsphy {
@@ -1005,7 +1471,7 @@
};
&usb_1_ss1_qmpphy_out {
- remote-endpoint = <&pmic_glink_ss1_ss_in>;
+ remote-endpoint = <&retimer_ss1_ss_in>;
};
&usb_1_ss2_hsphy {
@@ -1037,5 +1503,5 @@
};
&usb_1_ss2_qmpphy_out {
- remote-endpoint = <&pmic_glink_ss2_ss_in>;
+ remote-endpoint = <&retimer_ss2_ss_in>;
};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
index 5867953c7356..0fd8516580b2 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
@@ -510,6 +510,7 @@
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_l13b: ldo13 {
@@ -531,6 +532,7 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_l16b: ldo16 {
@@ -792,7 +794,6 @@
reset-gpios = <&pm8550_gpios 10 GPIO_ACTIVE_LOW>;
clocks = <&rpmhcc RPMH_RF_CLK3>;
- clock-names = "xo";
vdd-supply = <&vreg_rtmr0_1p15>;
vdd33-supply = <&vreg_rtmr0_3p3>;
@@ -878,7 +879,6 @@
reset-gpios = <&tlmm 176 GPIO_ACTIVE_LOW>;
clocks = <&rpmhcc RPMH_RF_CLK4>;
- clock-names = "xo";
vdd-supply = <&vreg_rtmr1_1p15>;
vdd33-supply = <&vreg_rtmr1_3p3>;
@@ -944,8 +944,25 @@
status = "okay";
};
+&mdss_dp0 {
+ status = "okay";
+};
+
+&mdss_dp0_out {
+ data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+};
+
+&mdss_dp1 {
+ status = "okay";
+};
+
+&mdss_dp1_out {
+ data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+};
+
&mdss_dp3 {
- compatible = "qcom,x1e80100-dp";
/delete-property/ #sound-dai-cells;
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
index d7a2a2b8fc6c..c02fd4d15c96 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
@@ -110,7 +110,7 @@
};
};
- pmc8380-6-thermal {
+ pmc8380_6_thermal: pmc8380-6-thermal {
polling-delay-passive = <100>;
thermal-sensors = <&pmc8380_6_temp_alarm>;
@@ -223,8 +223,7 @@
reg = <0x6100>, <0x6200>;
reg-names = "rtc", "alarm";
interrupts = <0x0 0x62 0x1 IRQ_TYPE_EDGE_RISING>;
- /* Not yet sure what blocks access */
- status = "reserved";
+ qcom,no-alarm; /* alarm owned by ADSP */
};
pmk8550_sdam_2: nvram@7100 {
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
index ec594628304a..4dfba835af6a 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
@@ -17,6 +17,7 @@
aliases {
serial0 = &uart21;
+ serial1 = &uart14;
};
wcd938x: audio-codec {
@@ -281,6 +282,42 @@
regulator-boot-on;
};
+ vreg_wcn_0p95: regulator-wcn-0p95 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_WCN_0P95";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <950000>;
+
+ vin-supply = <&vreg_wcn_3p3>;
+ };
+
+ vreg_wcn_1p9: regulator-wcn-1p9 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_WCN_1P9";
+ regulator-min-microvolt = <1900000>;
+ regulator-max-microvolt = <1900000>;
+
+ vin-supply = <&vreg_wcn_3p3>;
+ };
+
+ vreg_wcn_3p3: regulator-wcn-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_WCN_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 214 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&wcn_sw_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
usb-1-ss0-sbu-mux {
compatible = "onnn,fsusb42", "gpio-sbu-mux";
@@ -337,6 +374,65 @@
};
};
};
+
+ wcn7850-pmu {
+ compatible = "qcom,wcn7850-pmu";
+
+ vdd-supply = <&vreg_wcn_0p95>;
+ vddio-supply = <&vreg_l15b_1p8>;
+ vddaon-supply = <&vreg_wcn_0p95>;
+ vdddig-supply = <&vreg_wcn_0p95>;
+ vddrfa1p2-supply = <&vreg_wcn_1p9>;
+ vddrfa1p8-supply = <&vreg_wcn_1p9>;
+
+ wlan-enable-gpios = <&tlmm 117 GPIO_ACTIVE_HIGH>;
+ bt-enable-gpios = <&tlmm 116 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&wcn_wlan_bt_en>;
+ pinctrl-names = "default";
+
+ regulators {
+ vreg_pmu_rfa_cmn: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn";
+ };
+
+ vreg_pmu_aon_0p59: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p59";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p85: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p85";
+ };
+
+ vreg_pmu_btcmx_0p85: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p85";
+ };
+
+ vreg_pmu_rfa_0p8: ldo5 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo6 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p8: ldo7 {
+ regulator-name = "vreg_pmu_rfa_1p8";
+ };
+
+ vreg_pmu_pcie_0p9: ldo8 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_pcie_1p8: ldo9 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+ };
+ };
};
&apps_rsc {
@@ -437,6 +533,7 @@
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_l13b_3p0: ldo13 {
@@ -458,6 +555,7 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_l16b_2p9: ldo16 {
@@ -675,8 +773,8 @@
vreg_l2j_1p2: ldo2 {
regulator-name = "vreg_l2j_1p2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1256000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
@@ -751,6 +849,7 @@
&mdss_dp0_out {
data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
};
&mdss_dp1 {
@@ -759,6 +858,7 @@
&mdss_dp1_out {
data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
};
&mdss_dp2 {
@@ -767,10 +867,10 @@
&mdss_dp2_out {
data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
};
&mdss_dp3 {
- compatible = "qcom,x1e80100-dp";
/delete-property/ #sound-dai-cells;
status = "okay";
@@ -825,6 +925,23 @@
status = "okay";
};
+&pcie4_port0 {
+ wifi@0 {
+ compatible = "pci17cb,1107";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
+ vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
+ vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
+ };
+};
+
&pcie6a {
perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
@@ -1135,6 +1252,37 @@
bias-disable;
output-low;
};
+
+ wcn_wlan_bt_en: wcn-wlan-bt-en-state {
+ pins = "gpio116", "gpio117";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wcn_sw_en: wcn-sw-en-state {
+ pins = "gpio214";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
+
+&uart14 {
+ status = "okay";
+
+ bluetooth {
+ compatible = "qcom,wcn7850-bt";
+ max-speed = <3200000>;
+
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
+ };
};
&uart21 {
diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
index 4936fa5b98ff..a8eb4c5fe99f 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
@@ -20,6 +20,7 @@
#include <dt-bindings/soc/qcom,gpr.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include <dt-bindings/sound/qcom,q6dsp-lpass-ports.h>
+#include <dt-bindings/thermal/thermal.h>
/ {
interrupt-parent = <&intc>;
@@ -71,8 +72,8 @@
reg = <0x0 0x0>;
enable-method = "psci";
next-level-cache = <&l2_0>;
- power-domains = <&cpu_pd0>;
- power-domain-names = "psci";
+ power-domains = <&cpu_pd0>, <&scmi_dvfs 0>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&cluster_c4>;
l2_0: l2-cache {
@@ -88,8 +89,8 @@
reg = <0x0 0x100>;
enable-method = "psci";
next-level-cache = <&l2_0>;
- power-domains = <&cpu_pd1>;
- power-domain-names = "psci";
+ power-domains = <&cpu_pd1>, <&scmi_dvfs 0>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&cluster_c4>;
};
@@ -99,8 +100,8 @@
reg = <0x0 0x200>;
enable-method = "psci";
next-level-cache = <&l2_0>;
- power-domains = <&cpu_pd2>;
- power-domain-names = "psci";
+ power-domains = <&cpu_pd2>, <&scmi_dvfs 0>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&cluster_c4>;
};
@@ -110,8 +111,8 @@
reg = <0x0 0x300>;
enable-method = "psci";
next-level-cache = <&l2_0>;
- power-domains = <&cpu_pd3>;
- power-domain-names = "psci";
+ power-domains = <&cpu_pd3>, <&scmi_dvfs 0>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&cluster_c4>;
};
@@ -121,8 +122,8 @@
reg = <0x0 0x10000>;
enable-method = "psci";
next-level-cache = <&l2_1>;
- power-domains = <&cpu_pd4>;
- power-domain-names = "psci";
+ power-domains = <&cpu_pd4>, <&scmi_dvfs 1>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&cluster_c4>;
l2_1: l2-cache {
@@ -138,8 +139,8 @@
reg = <0x0 0x10100>;
enable-method = "psci";
next-level-cache = <&l2_1>;
- power-domains = <&cpu_pd5>;
- power-domain-names = "psci";
+ power-domains = <&cpu_pd5>, <&scmi_dvfs 1>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&cluster_c4>;
};
@@ -149,8 +150,8 @@
reg = <0x0 0x10200>;
enable-method = "psci";
next-level-cache = <&l2_1>;
- power-domains = <&cpu_pd6>;
- power-domain-names = "psci";
+ power-domains = <&cpu_pd6>, <&scmi_dvfs 1>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&cluster_c4>;
};
@@ -160,8 +161,8 @@
reg = <0x0 0x10300>;
enable-method = "psci";
next-level-cache = <&l2_1>;
- power-domains = <&cpu_pd7>;
- power-domain-names = "psci";
+ power-domains = <&cpu_pd7>, <&scmi_dvfs 1>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&cluster_c4>;
};
@@ -171,8 +172,8 @@
reg = <0x0 0x20000>;
enable-method = "psci";
next-level-cache = <&l2_2>;
- power-domains = <&cpu_pd8>;
- power-domain-names = "psci";
+ power-domains = <&cpu_pd8>, <&scmi_dvfs 2>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&cluster_c4>;
l2_2: l2-cache {
@@ -188,8 +189,8 @@
reg = <0x0 0x20100>;
enable-method = "psci";
next-level-cache = <&l2_2>;
- power-domains = <&cpu_pd9>;
- power-domain-names = "psci";
+ power-domains = <&cpu_pd9>, <&scmi_dvfs 2>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&cluster_c4>;
};
@@ -199,8 +200,8 @@
reg = <0x0 0x20200>;
enable-method = "psci";
next-level-cache = <&l2_2>;
- power-domains = <&cpu_pd10>;
- power-domain-names = "psci";
+ power-domains = <&cpu_pd10>, <&scmi_dvfs 2>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&cluster_c4>;
};
@@ -210,8 +211,8 @@
reg = <0x0 0x20300>;
enable-method = "psci";
next-level-cache = <&l2_2>;
- power-domains = <&cpu_pd11>;
- power-domain-names = "psci";
+ power-domains = <&cpu_pd11>, <&scmi_dvfs 2>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&cluster_c4>;
};
@@ -252,7 +253,7 @@
};
};
- cluster2 {
+ cpu_map_cluster2: cluster2 {
core0 {
cpu = <&cpu8>;
};
@@ -322,6 +323,21 @@
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
qcom,dload-mode = <&tcsr 0x19000>;
};
+
+ scmi {
+ compatible = "arm,scmi";
+ mboxes = <&cpucp_mbox 0>, <&cpucp_mbox 2>;
+ mbox-names = "tx", "rx";
+ shmem = <&cpu_scp_lpri0>, <&cpu_scp_lpri1>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_dvfs: protocol@13 {
+ reg = <0x13>;
+ #power-domain-cells = <1>;
+ };
+ };
};
clk_virt: interconnect-0 {
@@ -863,8 +879,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -899,8 +915,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -935,8 +951,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -971,8 +987,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1007,8 +1023,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1043,8 +1059,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1079,8 +1095,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1115,8 +1131,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1151,8 +1167,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1187,8 +1203,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1223,8 +1239,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1259,8 +1275,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1295,8 +1311,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
@@ -1320,8 +1336,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1356,8 +1372,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1392,8 +1408,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1428,8 +1444,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1508,8 +1524,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1544,8 +1560,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1580,8 +1596,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1616,8 +1632,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1652,8 +1668,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1688,8 +1704,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1724,8 +1740,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1760,8 +1776,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1796,8 +1812,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1832,8 +1848,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1868,8 +1884,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1904,8 +1920,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1940,8 +1956,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -1976,8 +1992,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2012,8 +2028,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
@@ -2037,8 +2053,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2073,8 +2089,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2152,8 +2168,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2188,8 +2204,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2224,8 +2240,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2260,8 +2276,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2296,8 +2312,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2332,8 +2348,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "qup-core",
"qup-config";
@@ -2357,8 +2373,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2393,8 +2409,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2429,8 +2445,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2465,8 +2481,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2501,8 +2517,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2537,8 +2553,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2573,8 +2589,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2609,8 +2625,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2645,8 +2661,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2681,8 +2697,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -2717,8 +2733,8 @@
interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
&clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ACTIVE_ONLY>,
<&aggre2_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
interconnect-names = "qup-core",
@@ -3125,7 +3141,7 @@
device_type = "pci";
compatible = "qcom,pcie-x1e80100";
reg = <0x0 0x01bd0000 0x0 0x3000>,
- <0x0 0x78000000 0x0 0xf1d>,
+ <0x0 0x78000000 0x0 0xf20>,
<0x0 0x78000f40 0x0 0xa8>,
<0x0 0x78001000 0x0 0x1000>,
<0x0 0x78100000 0x0 0x100000>,
@@ -3194,8 +3210,8 @@
interconnects = <&pcie_north_anoc MASTER_PCIE_3 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &cnoc_main SLAVE_PCIE_3 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &cnoc_main SLAVE_PCIE_3 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "pcie-mem",
"cpu-pcie";
@@ -3209,6 +3225,10 @@
phys = <&pcie3_phy>;
phy-names = "pciephy";
+ eq-presets-8gts = /bits/ 16 <0x5555 0x5555 0x5555 0x5555
+ 0x5555 0x5555 0x5555 0x5555>;
+ eq-presets-16gts = /bits/ 8 <0x55 0x55 0x55 0x55 0x55 0x55 0x55 0x55>;
+
operating-points-v2 = <&pcie3_opp_table>;
status = "disabled";
@@ -3395,8 +3415,8 @@
interconnects = <&pcie_south_anoc MASTER_PCIE_6A QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &cnoc_main SLAVE_PCIE_6A QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &cnoc_main SLAVE_PCIE_6A QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "pcie-mem",
"cpu-pcie";
@@ -3411,6 +3431,9 @@
phys = <&pcie6a_phy>;
phy-names = "pciephy";
+ eq-presets-8gts = /bits/ 16 <0x5555 0x5555 0x5555 0x5555>;
+ eq-presets-16gts = /bits/ 8 <0x55 0x55 0x55 0x55>;
+
status = "disabled";
};
@@ -3522,8 +3545,8 @@
interconnects = <&pcie_north_anoc MASTER_PCIE_5 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &cnoc_main SLAVE_PCIE_5 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &cnoc_main SLAVE_PCIE_5 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "pcie-mem",
"cpu-pcie";
@@ -3538,6 +3561,8 @@
phys = <&pcie5_phy>;
phy-names = "pciephy";
+ eq-presets-8gts = /bits/ 16 <0x5555 0x5555>;
+
status = "disabled";
};
@@ -3558,8 +3583,10 @@
"pipe",
"pipediv2";
- resets = <&gcc GCC_PCIE_5_PHY_BCR>;
- reset-names = "phy";
+ resets = <&gcc GCC_PCIE_5_PHY_BCR>,
+ <&gcc GCC_PCIE_5_NOCSR_COM_PHY_BCR>;
+ reset-names = "phy",
+ "phy_nocsr";
assigned-clocks = <&gcc GCC_PCIE_5_PHY_RCHNG_CLK>;
assigned-clock-rates = <100000000>;
@@ -3646,8 +3673,8 @@
interconnects = <&pcie_north_anoc MASTER_PCIE_4 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &cnoc_main SLAVE_PCIE_4 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &cnoc_main SLAVE_PCIE_4 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "pcie-mem",
"cpu-pcie";
@@ -3662,6 +3689,8 @@
phys = <&pcie4_phy>;
phy-names = "pciephy";
+ eq-presets-8gts = /bits/ 16 <0x5555 0x5555>;
+
status = "disabled";
pcie4_port0: pcie@0 {
@@ -3692,8 +3721,10 @@
"pipe",
"pipediv2";
- resets = <&gcc GCC_PCIE_4_PHY_BCR>;
- reset-names = "phy";
+ resets = <&gcc GCC_PCIE_4_PHY_BCR>,
+ <&gcc GCC_PCIE_4_NOCSR_COM_PHY_BCR>;
+ reset-names = "phy",
+ "phy_nocsr";
assigned-clocks = <&gcc GCC_PCIE_4_PHY_RCHNG_CLK>;
assigned-clock-rates = <100000000>;
@@ -3747,65 +3778,88 @@
status = "disabled";
- zap-shader {
+ gpu_zap_shader: zap-shader {
memory-region = <&gpu_microcode_mem>;
};
gpu_opp_table: opp-table {
- compatible = "operating-points-v2";
+ compatible = "operating-points-v2-adreno", "operating-points-v2";
+
+ opp-1250000000 {
+ opp-hz = /bits/ 64 <1250000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L3>;
+ opp-peak-kBps = <16500000>;
+ qcom,opp-acd-level = <0xa82a5ffd>;
+ };
+
+ opp-1175000000 {
+ opp-hz = /bits/ 64 <1175000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L2>;
+ opp-peak-kBps = <14398438>;
+ qcom,opp-acd-level = <0xa82a5ffd>;
+ };
opp-1100000000 {
opp-hz = /bits/ 64 <1100000000>;
opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
- opp-peak-kBps = <16500000>;
+ opp-peak-kBps = <14398438>;
+ qcom,opp-acd-level = <0xa82a5ffd>;
};
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
opp-peak-kBps = <14398438>;
+ qcom,opp-acd-level = <0xa82b5ffd>;
};
opp-925000000 {
opp-hz = /bits/ 64 <925000000>;
opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
opp-peak-kBps = <14398438>;
+ qcom,opp-acd-level = <0xa82b5ffd>;
};
opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
opp-peak-kBps = <12449219>;
+ qcom,opp-acd-level = <0xa82c5ffd>;
};
opp-744000000 {
opp-hz = /bits/ 64 <744000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS_L2>;
opp-peak-kBps = <10687500>;
+ qcom,opp-acd-level = <0x882e5ffd>;
};
opp-687000000 {
opp-hz = /bits/ 64 <687000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
opp-peak-kBps = <8171875>;
+ qcom,opp-acd-level = <0x882e5ffd>;
};
opp-550000000 {
opp-hz = /bits/ 64 <550000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
opp-peak-kBps = <6074219>;
+ qcom,opp-acd-level = <0xc0285ffd>;
};
opp-390000000 {
opp-hz = /bits/ 64 <390000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
opp-peak-kBps = <3000000>;
+ qcom,opp-acd-level = <0xc0285ffd>;
};
opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D1>;
opp-peak-kBps = <2136719>;
+ qcom,opp-acd-level = <0xc02b5ffd>;
};
};
};
@@ -4694,8 +4748,8 @@
interconnects = <&usb_south_anoc MASTER_USB3_2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_USB3_2 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_USB3_2 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "usb-ddr",
"apps-usb";
@@ -4794,8 +4848,8 @@
interconnects = <&usb_north_anoc MASTER_USB2 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_USB2 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_USB2 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "usb-ddr",
"apps-usb";
@@ -4814,6 +4868,8 @@
snps,dis-u1-entry-quirk;
snps,dis-u2-entry-quirk;
+ dma-coherent;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -4879,8 +4935,8 @@
interconnects = <&usb_north_anoc MASTER_USB3_MP QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_USB3_MP QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_USB3_MP QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "usb-ddr",
"apps-usb";
@@ -5053,8 +5109,8 @@
interconnects = <&usb_south_anoc MASTER_USB3_1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
- <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
- &config_noc SLAVE_USB3_1 QCOM_ICC_TAG_ALWAYS>;
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_USB3_1 QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "usb-ddr",
"apps-usb";
@@ -7933,6 +7989,20 @@
dma-coherent;
};
+ pcie_smmu: iommu@15400000 {
+ compatible = "arm,smmu-v3";
+ reg = <0 0x15400000 0 0x80000>;
+ #iommu-cells = <1>;
+ interrupts = <GIC_SPI 138 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 134 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 136 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eventq",
+ "gerror",
+ "cmdq-sync";
+ dma-coherent;
+ status = "reserved"; /* Controlled by Gunyah. */
+ };
+
intc: interrupt-controller@17000000 {
compatible = "arm,gic-v3";
reg = <0 0x17000000 0 0x10000>, /* GICD */
@@ -7959,6 +8029,13 @@
};
};
+ cpucp_mbox: mailbox@17430000 {
+ compatible = "qcom,x1e80100-cpucp-mbox";
+ reg = <0 0x17430000 0 0x10000>, <0 0x18830000 0 0x10000>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ };
+
apps_rsc: rsc@17500000 {
compatible = "qcom,rpmh-rsc";
reg = <0 0x17500000 0 0x10000>,
@@ -8142,6 +8219,32 @@
};
};
+ sram: sram@18b4e000 {
+ compatible = "mmio-sram";
+ reg = <0x0 0x18b4e000 0x0 0x400>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x18b4e000 0x400>;
+
+ cpu_scp_lpri0: scp-sram-section@0 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0x200>;
+ };
+
+ cpu_scp_lpri1: scp-sram-section@200 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x200 0x200>;
+ };
+ };
+
+ sbsa_watchdog: watchdog@1c840000 {
+ compatible = "arm,sbsa-gwdt";
+ reg = <0 0x1c840000 0 0x1000>,
+ <0 0x1c850000 0 0x1000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
pmu@24091000 {
compatible = "qcom,x1e80100-llcc-bwmon", "qcom,sc7280-llcc-bwmon";
reg = <0 0x24091000 0 0x1000>;
@@ -8199,7 +8302,7 @@
};
/* cluster0 */
- pmu@240b3400 {
+ bwmon_cluster0: pmu@240b3400 {
compatible = "qcom,x1e80100-cpu-bwmon", "qcom,sdm845-bwmon";
reg = <0 0x240b3400 0 0x600>;
@@ -8209,6 +8312,19 @@
&gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>;
operating-points-v2 = <&cpu_bwmon_opp_table>;
+ };
+
+ /* cluster2 */
+ bwmon_cluster2: pmu@240b5400 {
+ compatible = "qcom,x1e80100-cpu-bwmon", "qcom,sdm845-bwmon";
+ reg = <0 0x240b5400 0 0x600>;
+
+ interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>;
+
+ operating-points-v2 = <&cpu_bwmon_opp_table>;
cpu_bwmon_opp_table: opp-table {
compatible = "operating-points-v2";
@@ -8239,19 +8355,6 @@
};
};
- /* cluster2 */
- pmu@240b5400 {
- compatible = "qcom,x1e80100-cpu-bwmon", "qcom,sdm845-bwmon";
- reg = <0 0x240b5400 0 0x600>;
-
- interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
-
- interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
- &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>;
-
- operating-points-v2 = <&cpu_bwmon_opp_table>;
- };
-
/* cluster1 */
pmu@240b6400 {
compatible = "qcom,x1e80100-cpu-bwmon", "qcom,sdm845-bwmon";
@@ -8457,33 +8560,19 @@
};
aoss0-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
};
cpu0-0-top-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens0 1>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8491,25 +8580,11 @@
};
cpu0-0-btm-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens0 2>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8517,25 +8592,11 @@
};
cpu0-1-top-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens0 3>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8543,25 +8604,11 @@
};
cpu0-1-btm-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens0 4>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8569,25 +8616,11 @@
};
cpu0-2-top-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens0 5>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8595,25 +8628,11 @@
};
cpu0-2-btm-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens0 6>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8621,25 +8640,11 @@
};
cpu0-3-top-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens0 7>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8647,25 +8652,11 @@
};
cpu0-3-btm-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens0 8>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8676,15 +8667,9 @@
thermal-sensors = <&tsens0 9>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "hot";
- };
-
cpuss2-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -8694,15 +8679,9 @@
thermal-sensors = <&tsens0 10>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "hot";
- };
-
cpuss2-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -8719,7 +8698,7 @@
};
mem-critical {
- temperature = <125000>;
+ temperature = <115000>;
hysteresis = <0>;
type = "critical";
};
@@ -8727,15 +8706,19 @@
};
video-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens0 12>;
trips {
trip-point0 {
- temperature = <125000>;
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ video-critical {
+ temperature = <115000>;
hysteresis = <1000>;
- type = "passive";
+ type = "critical";
};
};
};
@@ -8751,33 +8734,19 @@
};
aoss0-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
};
cpu1-0-top-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens1 1>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8785,25 +8754,11 @@
};
cpu1-0-btm-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens1 2>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8811,25 +8766,11 @@
};
cpu1-1-top-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens1 3>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8837,25 +8778,11 @@
};
cpu1-1-btm-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens1 4>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8863,25 +8790,11 @@
};
cpu1-2-top-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens1 5>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8889,25 +8802,11 @@
};
cpu1-2-btm-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens1 6>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8915,25 +8814,11 @@
};
cpu1-3-top-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens1 7>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8941,25 +8826,11 @@
};
cpu1-3-btm-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens1 8>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -8970,15 +8841,9 @@
thermal-sensors = <&tsens1 9>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "hot";
- };
-
cpuss2-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -8988,15 +8853,9 @@
thermal-sensors = <&tsens1 10>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "hot";
- };
-
cpuss2-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -9013,33 +8872,19 @@
};
aoss0-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
};
cpu2-0-top-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens2 1>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9047,25 +8892,11 @@
};
cpu2-0-btm-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens2 2>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9073,25 +8904,11 @@
};
cpu2-1-top-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens2 3>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9099,25 +8916,11 @@
};
cpu2-1-btm-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens2 4>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9125,25 +8928,11 @@
};
cpu2-2-top-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens2 5>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9151,25 +8940,11 @@
};
cpu2-2-btm-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens2 6>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9177,25 +8952,11 @@
};
cpu2-3-top-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens2 7>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9203,25 +8964,11 @@
};
cpu2-3-btm-thermal {
- polling-delay-passive = <250>;
-
thermal-sensors = <&tsens2 8>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip-point1 {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
cpu-critical {
- temperature = <110000>;
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9232,15 +8979,9 @@
thermal-sensors = <&tsens2 9>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "hot";
- };
-
cpuss2-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -9250,15 +8991,9 @@
thermal-sensors = <&tsens2 10>;
trips {
- trip-point0 {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "hot";
- };
-
cpuss2-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -9275,8 +9010,8 @@
};
aoss0-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -9293,8 +9028,8 @@
};
nsp0-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -9311,8 +9046,8 @@
};
nsp1-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -9329,8 +9064,8 @@
};
nsp2-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -9347,33 +9082,34 @@
};
nsp3-critical {
- temperature = <125000>;
- hysteresis = <0>;
+ temperature = <115000>;
+ hysteresis = <1000>;
type = "critical";
};
};
};
gpuss-0-thermal {
- polling-delay-passive = <10>;
+ polling-delay-passive = <200>;
thermal-sensors = <&tsens3 5>;
- trips {
- trip-point0 {
- temperature = <85000>;
- hysteresis = <1000>;
- type = "passive";
+ cooling-maps {
+ map0 {
+ trip = <&gpuss0_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
+ };
- trip-point1 {
- temperature = <90000>;
+ trips {
+ gpuss0_alert0: trip-point0 {
+ temperature = <95000>;
hysteresis = <1000>;
- type = "hot";
+ type = "passive";
};
- trip-point2 {
- temperature = <125000>;
+ gpu-critical {
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9381,25 +9117,26 @@
};
gpuss-1-thermal {
- polling-delay-passive = <10>;
+ polling-delay-passive = <200>;
thermal-sensors = <&tsens3 6>;
- trips {
- trip-point0 {
- temperature = <85000>;
- hysteresis = <1000>;
- type = "passive";
+ cooling-maps {
+ map0 {
+ trip = <&gpuss1_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
+ };
- trip-point1 {
- temperature = <90000>;
+ trips {
+ gpuss1_alert0: trip-point0 {
+ temperature = <95000>;
hysteresis = <1000>;
- type = "hot";
+ type = "passive";
};
- trip-point2 {
- temperature = <125000>;
+ gpu-critical {
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9407,25 +9144,26 @@
};
gpuss-2-thermal {
- polling-delay-passive = <10>;
+ polling-delay-passive = <200>;
thermal-sensors = <&tsens3 7>;
- trips {
- trip-point0 {
- temperature = <85000>;
- hysteresis = <1000>;
- type = "passive";
+ cooling-maps {
+ map0 {
+ trip = <&gpuss2_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
+ };
- trip-point1 {
- temperature = <90000>;
+ trips {
+ gpuss2_alert0: trip-point0 {
+ temperature = <95000>;
hysteresis = <1000>;
- type = "hot";
+ type = "passive";
};
- trip-point2 {
- temperature = <125000>;
+ gpu-critical {
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9433,25 +9171,26 @@
};
gpuss-3-thermal {
- polling-delay-passive = <10>;
+ polling-delay-passive = <200>;
thermal-sensors = <&tsens3 8>;
- trips {
- trip-point0 {
- temperature = <85000>;
- hysteresis = <1000>;
- type = "passive";
+ cooling-maps {
+ map0 {
+ trip = <&gpuss3_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
+ };
- trip-point1 {
- temperature = <90000>;
+ trips {
+ gpuss3_alert0: trip-point0 {
+ temperature = <95000>;
hysteresis = <1000>;
- type = "hot";
+ type = "passive";
};
- trip-point2 {
- temperature = <125000>;
+ gpu-critical {
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9459,25 +9198,26 @@
};
gpuss-4-thermal {
- polling-delay-passive = <10>;
+ polling-delay-passive = <200>;
thermal-sensors = <&tsens3 9>;
- trips {
- trip-point0 {
- temperature = <85000>;
- hysteresis = <1000>;
- type = "passive";
+ cooling-maps {
+ map0 {
+ trip = <&gpuss4_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
+ };
- trip-point1 {
- temperature = <90000>;
+ trips {
+ gpuss4_alert0: trip-point0 {
+ temperature = <95000>;
hysteresis = <1000>;
- type = "hot";
+ type = "passive";
};
- trip-point2 {
- temperature = <125000>;
+ gpu-critical {
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9485,25 +9225,26 @@
};
gpuss-5-thermal {
- polling-delay-passive = <10>;
+ polling-delay-passive = <200>;
thermal-sensors = <&tsens3 10>;
- trips {
- trip-point0 {
- temperature = <85000>;
- hysteresis = <1000>;
- type = "passive";
+ cooling-maps {
+ map0 {
+ trip = <&gpuss5_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
+ };
- trip-point1 {
- temperature = <90000>;
+ trips {
+ gpuss5_alert0: trip-point0 {
+ temperature = <95000>;
hysteresis = <1000>;
- type = "hot";
+ type = "passive";
};
- trip-point2 {
- temperature = <125000>;
+ gpu-critical {
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9511,25 +9252,26 @@
};
gpuss-6-thermal {
- polling-delay-passive = <10>;
+ polling-delay-passive = <200>;
thermal-sensors = <&tsens3 11>;
- trips {
- trip-point0 {
- temperature = <85000>;
- hysteresis = <1000>;
- type = "passive";
+ cooling-maps {
+ map0 {
+ trip = <&gpuss6_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
+ };
- trip-point1 {
- temperature = <90000>;
+ trips {
+ gpuss6_alert0: trip-point0 {
+ temperature = <95000>;
hysteresis = <1000>;
- type = "hot";
+ type = "passive";
};
- trip-point2 {
- temperature = <125000>;
+ gpu-critical {
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9537,25 +9279,26 @@
};
gpuss-7-thermal {
- polling-delay-passive = <10>;
+ polling-delay-passive = <200>;
thermal-sensors = <&tsens3 12>;
- trips {
- trip-point0 {
- temperature = <85000>;
- hysteresis = <1000>;
- type = "passive";
+ cooling-maps {
+ map0 {
+ trip = <&gpuss7_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
+ };
- trip-point1 {
- temperature = <90000>;
+ trips {
+ gpuss7_alert0: trip-point0 {
+ temperature = <95000>;
hysteresis = <1000>;
- type = "hot";
+ type = "passive";
};
- trip-point2 {
- temperature = <125000>;
+ gpu-critical {
+ temperature = <115000>;
hysteresis = <1000>;
type = "critical";
};
@@ -9574,7 +9317,7 @@
camera0-critical {
temperature = <115000>;
- hysteresis = <0>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -9592,7 +9335,7 @@
camera0-critical {
temperature = <115000>;
- hysteresis = <0>;
+ hysteresis = <1000>;
type = "critical";
};
};
diff --git a/arch/arm64/boot/dts/qcom/x1p42100-crd.dts b/arch/arm64/boot/dts/qcom/x1p42100-crd.dts
new file mode 100644
index 000000000000..cf07860a63e9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1p42100-crd.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "x1p42100.dtsi"
+#include "x1-crd.dtsi"
+
+/delete-node/ &pmc8380_6;
+/delete-node/ &pmc8380_6_thermal;
+
+/ {
+ model = "Qualcomm Technologies, Inc. X1P42100 CRD";
+ compatible = "qcom,x1p42100-crd", "qcom,x1p42100";
+};
diff --git a/arch/arm64/boot/dts/qcom/x1p42100.dtsi b/arch/arm64/boot/dts/qcom/x1p42100.dtsi
new file mode 100644
index 000000000000..27f479010bc3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1p42100.dtsi
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/* X1P42100 is heavily based on X1E80100, with some meaningful differences */
+#include "x1e80100.dtsi"
+
+/delete-node/ &bwmon_cluster0;
+/delete-node/ &cluster_pd2;
+/delete-node/ &cpu_map_cluster2;
+/delete-node/ &cpu8;
+/delete-node/ &cpu9;
+/delete-node/ &cpu10;
+/delete-node/ &cpu11;
+/delete-node/ &cpu_pd8;
+/delete-node/ &cpu_pd9;
+/delete-node/ &cpu_pd10;
+/delete-node/ &cpu_pd11;
+/delete-node/ &pcie3_phy;
+
+&gcc {
+ compatible = "qcom,x1p42100-gcc", "qcom,x1e80100-gcc";
+};
+
+/* The GPU is physically different and will be brought up later */
+&gpu {
+ /delete-property/ compatible;
+};
+
+&gpucc {
+ compatible = "qcom,x1p42100-gpucc";
+};
+
+/* PCIe3 has half the lanes compared to X1E80100 */
+&pcie3 {
+ num-lanes = <4>;
+};
+
+&pcie6a_phy {
+ compatible = "qcom,x1p42100-qmp-gen4x4-pcie-phy";
+};
+
+&soc {
+ /* The PCIe3 PHY on X1P42100 uses a different IP block */
+ pcie3_phy: phy@1bd4000 {
+ compatible = "qcom,x1p42100-qmp-gen4x4-pcie-phy";
+ reg = <0x0 0x01bd4000 0x0 0x2000>,
+ <0x0 0x01bd6000 0x0 0x2000>;
+
+ clocks = <&gcc GCC_PCIE_3_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_3_CFG_AHB_CLK>,
+ <&tcsr TCSR_PCIE_8L_CLKREF_EN>,
+ <&gcc GCC_PCIE_3_PHY_RCHNG_CLK>,
+ <&gcc GCC_PCIE_3_PIPE_CLK>,
+ <&gcc GCC_PCIE_3_PIPEDIV2_CLK>;
+ clock-names = "aux",
+ "cfg_ahb",
+ "ref",
+ "rchng",
+ "pipe",
+ "pipediv2";
+
+ resets = <&gcc GCC_PCIE_3_PHY_BCR>,
+ <&gcc GCC_PCIE_3_NOCSR_COM_PHY_BCR>;
+ reset-names = "phy",
+ "phy_nocsr";
+
+ assigned-clocks = <&gcc GCC_PCIE_3_PHY_RCHNG_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ power-domains = <&gcc GCC_PCIE_3_PHY_GDSC>;
+
+ #clock-cells = <0>;
+ clock-output-names = "pcie3_pipe_clk";
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index d25e665ee4bf..aa7f996c0546 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -94,6 +94,11 @@ dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g2-white-hawk-single.dtb
r8a779g2-white-hawk-single-ard-audio-da7212-dtbs := r8a779g2-white-hawk-single.dtb white-hawk-ard-audio-da7212.dtbo
dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g2-white-hawk-single-ard-audio-da7212.dtb
+DTC_FLAGS_r8a779g3-sparrow-hawk += -Wno-spi_bus_bridge
+dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g3-sparrow-hawk.dtb
+r8a779g3-sparrow-hawk-fan-pwm-dtbs := r8a779g3-sparrow-hawk.dtb r8a779g3-sparrow-hawk-fan-pwm.dtbo
+dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g3-sparrow-hawk-fan-pwm.dtb
+
dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g3-white-hawk-single.dtb
r8a779g3-white-hawk-single-ard-audio-da7212-dtbs := r8a779g3-white-hawk-single.dtb white-hawk-ard-audio-da7212.dtbo
dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g3-white-hawk-single-ard-audio-da7212.dtb
@@ -152,6 +157,8 @@ dtb-$(CONFIG_ARCH_R9A09G011) += r9a09g011-v2mevk2.dtb
dtb-$(CONFIG_ARCH_R9A09G047) += r9a09g047e57-smarc.dtb
+dtb-$(CONFIG_ARCH_R9A09G056) += r9a09g056n48-rzv2n-evk.dtb
+
dtb-$(CONFIG_ARCH_R9A09G057) += r9a09g057h44-rzv2h-evk.dtb
dtb-$(CONFIG_ARCH_R9A09G057) += r9a09g057h48-kakip.dtb
diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
index 1489bc8d2f4e..d40a7224f9c3 100644
--- a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
@@ -300,7 +300,7 @@
#address-cells = <1>;
#size-cells = <0>;
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
interrupts-extended = <&gpio1 27 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
index f1613bfd1632..95ff69339991 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
@@ -2588,13 +2588,20 @@
isp0: isp@fed00000 {
compatible = "renesas,r8a779a0-isp",
"renesas,rcar-gen4-isp";
- reg = <0 0xfed00000 0 0x10000>;
- interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 612>;
+ reg = <0 0xfed00000 0 0x10000>, <0 0xfec00000 0 0x100000>;
+ reg-names = "cs", "core";
+ interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cs", "core";
+ clocks = <&cpg CPG_MOD 612>, <&cpg CPG_MOD 16>;
+ clock-names = "cs", "core";
power-domains = <&sysc R8A779A0_PD_A3ISP01>;
- resets = <&cpg 612>;
+ resets = <&cpg 612>, <&cpg 16>;
+ reset-names = "cs", "core";
status = "disabled";
+ renesas,vspx = <&vspx0>;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -2672,13 +2679,20 @@
isp1: isp@fed20000 {
compatible = "renesas,r8a779a0-isp",
"renesas,rcar-gen4-isp";
- reg = <0 0xfed20000 0 0x10000>;
- interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 613>;
+ reg = <0 0xfed20000 0 0x10000>, <0 0xfee00000 0 0x100000>;
+ reg-names = "cs", "core";
+ interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cs", "core";
+ clocks = <&cpg CPG_MOD 613>, <&cpg CPG_MOD 17>;
+ clock-names = "cs", "core";
power-domains = <&sysc R8A779A0_PD_A3ISP01>;
- resets = <&cpg 613>;
+ resets = <&cpg 613>, <&cpg 17>;
+ reset-names = "cs", "core";
status = "disabled";
+ renesas,vspx = <&vspx1>;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -2756,13 +2770,20 @@
isp2: isp@fed30000 {
compatible = "renesas,r8a779a0-isp",
"renesas,rcar-gen4-isp";
- reg = <0 0xfed30000 0 0x10000>;
- interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 614>;
+ reg = <0 0xfed30000 0 0x10000>, <0 0xfef00000 0 0x100000>;
+ reg-names = "cs", "core";
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cs", "core";
+ clocks = <&cpg CPG_MOD 614>, <&cpg CPG_MOD 18>;
+ clock-names = "cs", "core";
power-domains = <&sysc R8A779A0_PD_A3ISP23>;
- resets = <&cpg 614>;
+ resets = <&cpg 614>, <&cpg 18>;
+ reset-names = "cs", "core";
status = "disabled";
+ renesas,vspx = <&vspx2>;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -2840,13 +2861,20 @@
isp3: isp@fed40000 {
compatible = "renesas,r8a779a0-isp",
"renesas,rcar-gen4-isp";
- reg = <0 0xfed40000 0 0x10000>;
- interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 615>;
+ reg = <0 0xfed40000 0 0x10000>, <0 0xfe400000 0 0x100000>;
+ reg-names = "cs", "core";
+ interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cs", "core";
+ clocks = <&cpg CPG_MOD 615>, <&cpg CPG_MOD 19>;
+ clock-names = "cs", "core";
power-domains = <&sysc R8A779A0_PD_A3ISP23>;
- resets = <&cpg 615>;
+ resets = <&cpg 615>, <&cpg 19>;
+ reset-names = "cs", "core";
status = "disabled";
+ renesas,vspx = <&vspx3>;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a779f4.dtsi b/arch/arm64/boot/dts/renesas/r8a779f4.dtsi
index ebed41892df3..b216d605c321 100644
--- a/arch/arm64/boot/dts/renesas/r8a779f4.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779f4.dtsi
@@ -10,3 +10,20 @@
/ {
compatible = "renesas,r8a779f4", "renesas,r8a779f0";
};
+
+&fuse {
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ufs_tune: calib@144 {
+ reg = <0x144 0x08>;
+ };
+ };
+};
+
+&ufs {
+ nvmem-cells = <&ufs_tune>;
+ nvmem-cell-names = "calibration";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
index 1760720b7128..6dbf05a55935 100644
--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
@@ -2277,13 +2277,20 @@
isp0: isp@fed00000 {
compatible = "renesas,r8a779g0-isp",
"renesas,rcar-gen4-isp";
- reg = <0 0xfed00000 0 0x10000>;
- interrupts = <GIC_SPI 473 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&cpg CPG_MOD 612>;
+ reg = <0 0xfed00000 0 0x10000>, <0 0xfec00000 0 0x100000>;
+ reg-names = "cs", "core";
+ interrupts = <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cs", "core";
+ clocks = <&cpg CPG_MOD 612>, <&cpg CPG_MOD 16>;
+ clock-names = "cs", "core";
power-domains = <&sysc R8A779G0_PD_A3ISP0>;
- resets = <&cpg 612>;
+ resets = <&cpg 612>, <&cpg 16>;
+ reset-names = "cs", "core";
status = "disabled";
+ renesas,vspx = <&vspx0>;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -2361,13 +2368,20 @@
isp1: isp@fed20000 {
compatible = "renesas,r8a779g0-isp",
"renesas,rcar-gen4-isp";
- reg = <0 0xfed20000 0 0x10000>;
- interrupts = <GIC_SPI 474 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&cpg CPG_MOD 613>;
+ reg = <0 0xfed20000 0 0x10000>, <0 0xfee00000 0 0x100000>;
+ reg-names = "cs", "core";
+ interrupts = <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cs", "core";
+ clocks = <&cpg CPG_MOD 613>, <&cpg CPG_MOD 17>;
+ clock-names = "cs", "core";
power-domains = <&sysc R8A779G0_PD_A3ISP1>;
- resets = <&cpg 613>;
+ resets = <&cpg 613>, <&cpg 17>;
+ reset-names = "cs", "core";
status = "disabled";
+ renesas,vspx = <&vspx1>;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a779g3-sparrow-hawk-fan-pwm.dtso b/arch/arm64/boot/dts/renesas/r8a779g3-sparrow-hawk-fan-pwm.dtso
new file mode 100644
index 000000000000..50d53c8d76c5
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a779g3-sparrow-hawk-fan-pwm.dtso
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Overlay for the PWM controlled blower fan in connector J3:FAN
+ * on R-Car V4H ES3.0 Sparrow Hawk board
+ *
+ * Copyright (C) 2025 Marek Vasut <marek.vasut+renesas@mailbox.org>
+ *
+ * Example usage:
+ *
+ * # Localize hwmon sysfs directory that matches the PWM fan,
+ * # enable the PWM fan, and configure the fan speed manually.
+ * r8a779g3-sparrow-hawk$ grep -H . /sys/class/hwmon/hwmon?/name
+ * /sys/class/hwmon/hwmon0/name:sensor1_thermal
+ * /sys/class/hwmon/hwmon1/name:sensor2_thermal
+ * /sys/class/hwmon/hwmon2/name:sensor3_thermal
+ * /sys/class/hwmon/hwmon3/name:sensor4_thermal
+ * /sys/class/hwmon/hwmon4/name:pwmfan
+ * ^ ^^^^^^
+ *
+ * # Select mode 2 , enable fan PWM and regulator and keep them enabled.
+ * # For details, see Linux Documentation/hwmon/pwm-fan.rst
+ * r8a779g3-sparrow-hawk$ echo 2 > /sys/class/hwmon/hwmon4/pwm1_enable
+ *
+ * # Configure PWM fan speed in range 0..255 , 0 is stopped , 255 is full speed .
+ * # Fan speed 101 is about 2/5 of the PWM fan speed:
+ * r8a779g3-sparrow-hawk$ echo 101 > /sys/class/hwmon/hwmon4/pwm1
+ */
+
+/dts-v1/;
+/plugin/;
+
+/*
+ * Override default PWM fan settings. For a list of available properties,
+ * see schema Documentation/devicetree/bindings/hwmon/pwm-fan.yaml .
+ */
+&fan {
+ /* Available cooling levels */
+ cooling-levels = <0 50 100 150 200 255>;
+ /* Four pulses of tacho signal per one revolution */
+ pulses-per-revolution = <4>;
+ /* PWM period: 100us ~= 10 kHz */
+ pwms = <&pwm0 0 100000>;
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779g3-sparrow-hawk.dts b/arch/arm64/boot/dts/renesas/r8a779g3-sparrow-hawk.dts
new file mode 100644
index 000000000000..6955eafd8d6a
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a779g3-sparrow-hawk.dts
@@ -0,0 +1,772 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the R-Car V4H ES3.0 Sparrow Hawk board
+ *
+ * Copyright (C) 2025 Marek Vasut <marek.vasut+renesas@mailbox.org>
+ */
+/*
+ * DA7212 Codec settings
+ *
+ * for Playback
+ * > amixer set "Headphone" 40%
+ * > amixer set "Headphone" on
+ * > amixer set "Mixout Left DAC Left" on
+ * > amixer set "Mixout Right DAC Right" on
+ * > aplay xxx.wav
+ *
+ * for Capture (Aux/Mic)
+ *
+ * on/off (B)
+ * CONN3 (HeadSet) ---+----> MSIOF1
+ * |
+ * CONN4 AUX ---------+ on/off (A)
+ *
+ * > amixer set "Mixin PGA" on
+ * > amixer set "Mixin PGA" 50%
+ * > amixer set "ADC" on
+ * > amixer set "ADC" 80%
+ * > amixer set "Aux" on ^
+ * > amixer set "Aux" 80% | (A)
+ * > amixer set "Mixin Left Aux Left" on |
+ * > amixer set "Mixin Right Aux Right" on v
+ * > amixer set "Mic 1" on ^
+ * > amixer set "Mic 1" 80% | (B)
+ * > amixer set "Mixin Left Mic 1" on |
+ * > amixer set "Mixin Right Mic 1" on v
+ * > arecord -f cd xxx.wav
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+
+#include "r8a779g3.dtsi"
+
+/ {
+ model = "Retronix Sparrow Hawk board based on r8a779g3";
+ compatible = "retronix,sparrow-hawk", "renesas,r8a779g3",
+ "renesas,r8a779g0";
+
+ aliases {
+ ethernet0 = &avb0;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ serial0 = &hscif0;
+ serial1 = &hscif1;
+ serial2 = &hscif3;
+ spi0 = &rpc;
+ };
+
+ chosen {
+ bootargs = "ignore_loglevel rw root=/dev/nfs ip=on";
+ stdout-path = "serial0:921600n8";
+ };
+
+ /* Page 31 / FAN */
+ fan: pwm-fan {
+ pinctrl-0 = <&irq4_pins>;
+ pinctrl-names = "default";
+ compatible = "pwm-fan";
+ #cooling-cells = <2>;
+ interrupts-extended = <&intc_ex 4 IRQ_TYPE_EDGE_FALLING>;
+ /*
+ * The fan model connected to this device can be selected
+ * by user. Set "cooling-levels" DT property to single 255
+ * entry to force the fan PWM into constant HIGH, which
+ * forces the fan to spin at maximum RPM, thus providing
+ * maximum cooling to this device and protection against
+ * misconfigured PWM duty cycle to the fan.
+ *
+ * User has to configure "pwms" and "pulses-per-revolution"
+ * DT properties according to fan datasheet first, and then
+ * extend "cooling-levels = <0 m n ... 255>" property to
+ * achieve proper fan control compatible with fan model
+ * installed by user.
+ */
+ cooling-levels = <255>;
+ pulses-per-revolution = <2>;
+ pwms = <&pwm0 0 50000>;
+ };
+
+ /*
+ * Page 15 / LPDDR5
+ *
+ * This configuration listed below is for the 8 GiB board variant
+ * with MT62F1G64D8EK-023 WT:C LPDDR5 part populated on the board.
+ *
+ * A variant with 16 GiB MT62F2G64D8EK-023 WT:C part populated on
+ * the board is automatically handled by the bootloader, which
+ * adjusts the correct DRAM size into the memory nodes below.
+ */
+ memory@48000000 {
+ device_type = "memory";
+ /* first 128MB is reserved for secure area. */
+ reg = <0x0 0x48000000 0x0 0x78000000>;
+ };
+
+ memory@480000000 {
+ device_type = "memory";
+ reg = <0x4 0x80000000 0x0 0x80000000>;
+ };
+
+ memory@600000000 {
+ device_type = "memory";
+ reg = <0x6 0x00000000 0x1 0x00000000>;
+ };
+
+ /* Page 27 / DSI to Display */
+ mini-dp-con {
+ compatible = "dp-connector";
+ label = "CN6";
+ type = "full-size";
+
+ port {
+ mini_dp_con_in: endpoint {
+ remote-endpoint = <&sn65dsi86_out>;
+ };
+ };
+ };
+
+ reg_1p2v: regulator-1p2v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.2V";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* Page 27 / DSI to Display */
+ sn65dsi86_refclk: clk-x9 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ };
+
+ /* Page 30 / Audio_Codec */
+ sound_card: sound {
+ compatible = "audio-graph-card2";
+ links = <&msiof1_snd>;
+ };
+
+ /* Page 17 uSD-Slot */
+ vcc_sdhi: regulator-vcc-sdhi {
+ compatible = "regulator-gpio";
+ regulator-name = "SDHI VccQ";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ gpios = <&gpio8 13 GPIO_ACTIVE_HIGH>;
+ gpios-states = <1>;
+ states = <3300000 0>, <1800000 1>;
+ };
+};
+
+&audio_clkin {
+ clock-frequency = <24576000>;
+};
+
+/* Page 22 / Ether_AVB0 */
+&avb0 {
+ pinctrl-0 = <&avb0_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&avb0_phy>;
+ tx-internal-delay-ps = <2000>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ avb0_phy: ethernet-phy@0 { /* KSZ9031RNXVB */
+ compatible = "ethernet-phy-id0022.1622",
+ "ethernet-phy-ieee802.3-c22";
+ rxc-skew-ps = <1500>;
+ reg = <0>;
+ /* AVB0_PHY_INT_V */
+ interrupts-extended = <&gpio7 5 IRQ_TYPE_LEVEL_LOW>;
+ /* GP7_10/AVB0_RESETN_V */
+ reset-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <300>;
+ };
+ };
+};
+
+/* Page 28 / CANFD_IF */
+&can_clk {
+ clock-frequency = <40000000>;
+};
+
+/* Page 28 / CANFD_IF */
+&canfd {
+ pinctrl-0 = <&canfd3_pins>, <&canfd4_pins>, <&can_clk_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ channel3 {
+ status = "okay";
+ };
+
+ channel4 {
+ status = "okay";
+ };
+};
+
+/* Page 27 / DSI to Display */
+&dsi1 {
+ status = "okay";
+
+ ports {
+ port@1 {
+ dsi1_out: endpoint {
+ remote-endpoint = <&sn65dsi86_in>;
+ data-lanes = <1 2 3 4>;
+ };
+ };
+ };
+};
+
+/* Page 27 / DSI to Display */
+&du {
+ status = "okay";
+};
+
+/* Page 5 / R-Car V4H_INT_I2C */
+&extal_clk { /* X3 */
+ clock-frequency = <16666666>;
+};
+
+/* Page 5 / R-Car V4H_INT_I2C */
+&extalr_clk { /* X2 */
+ clock-frequency = <32768>;
+};
+
+/* Page 26 / 2230 Key M M.2 */
+&gpio4 {
+ /* 9FGV0441 nOE inputs 0 and 1 */
+ pcie-m2-oe-hog {
+ gpio-hog;
+ gpios = <21 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "PCIe-CLK-nOE-M2";
+ };
+
+ /* 9FGV0441 nOE inputs 2 and 3 */
+ pcie-usb-oe-hog {
+ gpio-hog;
+ gpios = <22 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "PCIe-CLK-nOE-USB";
+ };
+};
+
+/* Page 23 / DEBUG */
+&hscif0 { /* FTDI ADBUS[3:0] */
+ pinctrl-0 = <&hscif0_pins>;
+ pinctrl-names = "default";
+ uart-has-rtscts;
+ bootph-all;
+
+ status = "okay";
+};
+
+/* Page 23 / DEBUG */
+&hscif1 { /* FTDI BDBUS[3:0] */
+ pinctrl-0 = <&hscif1_pins>;
+ pinctrl-names = "default";
+ uart-has-rtscts;
+
+ status = "okay";
+};
+
+/* Page 24 / UART */
+&hscif3 { /* CN7 pins 8 (TX) and 10 (RX) */
+ pinctrl-0 = <&hscif3_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+/* Page 24 / I2C SWITCH */
+&i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&i2c0_pins>;
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+ status = "okay";
+
+ mux@71 {
+ compatible = "nxp,pca9544"; /* TCA9544 */
+ reg = <0x71>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ vdd-supply = <&reg_3p3v>;
+
+ i2c0_mux0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Page 27 / DSI to Display */
+ bridge@2c {
+ pinctrl-0 = <&irq0_pins>;
+ pinctrl-names = "default";
+
+ compatible = "ti,sn65dsi86";
+ reg = <0x2c>;
+
+ clocks = <&sn65dsi86_refclk>;
+ clock-names = "refclk";
+
+ interrupts-extended = <&intc_ex 0 IRQ_TYPE_LEVEL_HIGH>;
+
+ enable-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+
+ vccio-supply = <&reg_1p8v>;
+ vpll-supply = <&reg_1p8v>;
+ vcca-supply = <&reg_1p2v>;
+ vcc-supply = <&reg_1p2v>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ sn65dsi86_in: endpoint {
+ remote-endpoint = <&dsi1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ sn65dsi86_out: endpoint {
+ remote-endpoint = <&mini_dp_con_in>;
+ };
+ };
+ };
+ };
+ };
+
+ i2c0_mux1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Page 30 / Audio_Codec */
+ codec@1a {
+ compatible = "dlg,da7212";
+
+ #sound-dai-cells = <0>;
+ reg = <0x1a>;
+
+ clocks = <&rcar_sound>;
+ clock-names = "mclk";
+
+ VDDA-supply = <&reg_1p8v>;
+ VDDMIC-supply = <&reg_3p3v>;
+ VDDIO-supply = <&reg_3p3v>;
+
+ port {
+ da7212_endpoint: endpoint {
+ bitclock-master;
+ frame-master;
+ remote-endpoint = <&msiof1_snd_endpoint>;
+ };
+ };
+ };
+ };
+
+ i2c0_mux2: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c0_mux3: i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+/* Page 29 / CSI_IF_CN / CAM_CN0 */
+&i2c1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&i2c1_pins>;
+ pinctrl-names = "default";
+};
+
+/* Page 29 / CSI_IF_CN / CAM_CN1 */
+&i2c2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&i2c2_pins>;
+ pinctrl-names = "default";
+};
+
+/* Page 31 / IO_CN */
+&i2c3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&i2c3_pins>;
+ pinctrl-names = "default";
+};
+
+/* Page 31 / IO_CN */
+&i2c4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&i2c4_pins>;
+ pinctrl-names = "default";
+};
+
+/* Page 18 / POWER_CORE and Page 19 / POWER_PMIC */
+&i2c5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&i2c5_pins>;
+ pinctrl-names = "default";
+};
+
+/* Page 17 uSD-Slot */
+&mmc0 {
+ pinctrl-0 = <&sd_pins>;
+ pinctrl-1 = <&sd_uhs_pins>;
+ pinctrl-names = "default", "state_uhs";
+ bus-width = <4>;
+ cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>; /* SD_CD */
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&vcc_sdhi>;
+ status = "okay";
+};
+
+&msiof1 {
+ pinctrl-0 = <&msiof1_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ /* ignore DT warning */
+ /delete-property/#address-cells;
+ /delete-property/#size-cells;
+
+ msiof1_snd: port {
+ msiof1_snd_endpoint: endpoint {
+ remote-endpoint = <&da7212_endpoint>;
+ };
+ };
+};
+
+/* Page 26 / 2230 Key M M.2 */
+&pcie0_clkref {
+ clock-frequency = <100000000>;
+};
+
+&pciec0 {
+ reset-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+/* Page 25 / PCIe to USB */
+&pcie1_clkref {
+ clock-frequency = <100000000>;
+};
+
+&pciec1 {
+ /* uPD720201 is PCIe Gen2 x1 device */
+ num-lanes = <1>;
+ reset-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&pfc {
+ pinctrl-0 = <&scif_clk_pins>;
+ pinctrl-names = "default";
+
+ /* Page 22 / Ether_AVB0 */
+ avb0_pins: avb0 {
+ mux {
+ groups = "avb0_link", "avb0_mdio", "avb0_rgmii",
+ "avb0_txcrefclk";
+ function = "avb0";
+ };
+
+ pins-mdio {
+ groups = "avb0_mdio";
+ drive-strength = <21>;
+ };
+
+ pins-mii {
+ groups = "avb0_rgmii";
+ drive-strength = <21>;
+ };
+
+ };
+
+ /* Page 28 / CANFD_IF */
+ can_clk_pins: can-clk {
+ groups = "can_clk";
+ function = "can_clk";
+ };
+
+ /* Page 28 / CANFD_IF */
+ canfd3_pins: canfd3 {
+ groups = "canfd3_data";
+ function = "canfd3";
+ };
+
+ /* Page 28 / CANFD_IF */
+ canfd4_pins: canfd4 {
+ groups = "canfd4_data";
+ function = "canfd4";
+ };
+
+ /* Page 23 / DEBUG */
+ hscif0_pins: hscif0 {
+ groups = "hscif0_data", "hscif0_ctrl";
+ function = "hscif0";
+ };
+
+ /* Page 23 / DEBUG */
+ hscif1_pins: hscif1 {
+ groups = "hscif1_data_a", "hscif1_ctrl_a";
+ function = "hscif1";
+ };
+
+ /* Page 24 / UART */
+ hscif3_pins: hscif3 {
+ groups = "hscif3_data_a";
+ function = "hscif3";
+ };
+
+ /* Page 24 / I2C SWITCH */
+ i2c0_pins: i2c0 {
+ groups = "i2c0";
+ function = "i2c0";
+ };
+
+ /* Page 29 / CSI_IF_CN / CAM_CN0 */
+ i2c1_pins: i2c1 {
+ groups = "i2c1";
+ function = "i2c1";
+ };
+
+ /* Page 29 / CSI_IF_CN / CAM_CN1 */
+ i2c2_pins: i2c2 {
+ groups = "i2c2";
+ function = "i2c2";
+ };
+
+ /* Page 31 / IO_CN */
+ i2c3_pins: i2c3 {
+ groups = "i2c3";
+ function = "i2c3";
+ };
+
+ /* Page 31 / IO_CN */
+ i2c4_pins: i2c4 {
+ groups = "i2c4";
+ function = "i2c4";
+ };
+
+ /* Page 18 / POWER_CORE */
+ i2c5_pins: i2c5 {
+ groups = "i2c5";
+ function = "i2c5";
+ };
+
+ /* Page 27 / DSI to Display */
+ irq0_pins: irq0 {
+ groups = "intc_ex_irq0_a";
+ function = "intc_ex";
+ };
+
+ /* Page 31 / FAN */
+ irq4_pins: irq4 {
+ groups = "intc_ex_irq4_b";
+ function = "intc_ex";
+ };
+
+ /* Page 31 / FAN */
+ pwm0_pins: pwm0 {
+ groups = "pwm0";
+ function = "pwm0";
+ };
+
+ /* Page 31 / CN7 pin 12 */
+ pwm1_pins: pwm1 {
+ groups = "pwm1_b";
+ function = "pwm1";
+ };
+
+ /* Page 31 / CN7 pin 32 */
+ pwm6_pins: pwm6 {
+ groups = "pwm6";
+ function = "pwm6";
+ };
+
+ /* Page 31 / CN7 pin 33 */
+ pwm7_pins: pwm7 {
+ groups = "pwm7";
+ function = "pwm7";
+ };
+
+ /* Page 16 / QSPI_FLASH */
+ qspi0_pins: qspi0 {
+ groups = "qspi0_ctrl", "qspi0_data4";
+ function = "qspi0";
+ bootph-all;
+ };
+
+ /* Page 6 / SCIF_CLK_SOC_V */
+ scif_clk_pins: scif-clk {
+ groups = "scif_clk";
+ function = "scif_clk";
+ };
+
+ /* Page 17 uSD-Slot */
+ sd_pins: sd {
+ groups = "mmc_data4", "mmc_ctrl";
+ function = "mmc";
+ power-source = <3300>;
+ };
+
+ /* Page 17 uSD-Slot */
+ sd_uhs_pins: sd-uhs {
+ groups = "mmc_data4", "mmc_ctrl";
+ function = "mmc";
+ power-source = <1800>;
+ };
+
+ /* Page 30 / Audio_Codec */
+ msiof1_pins: sound {
+ groups = "msiof1_clk", "msiof1_sync", "msiof1_txd", "msiof1_rxd";
+ function = "msiof1";
+ };
+
+ /* Page 30 / Audio_Codec */
+ sound_clk_pins: sound-clk {
+ groups = "audio_clkin", "audio_clkout";
+ function = "audio_clk";
+ };
+};
+
+/* Page 30 / Audio_Codec */
+&rcar_sound {
+ pinctrl-0 = <&sound_clk_pins>;
+ pinctrl-names = "default";
+
+ /* It is used for ADG output as DA7212_MCLK */
+
+ /* audio_clkout */
+ clock-frequency = <12288000>; /* 48 kHz groups */
+
+ status = "okay";
+};
+
+/* Page 31 / FAN */
+&pwm0 {
+ pinctrl-0 = <&pwm0_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+/* Page 31 / CN7 pin 12 */
+&pwm1 {
+ pinctrl-0 = <&pwm1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+/* Page 31 / CN7 pin 32 */
+&pwm6 {
+ pinctrl-0 = <&pwm6_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+/* Page 31 / CN7 pin 33 */
+&pwm7 {
+ pinctrl-0 = <&pwm7_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+/* Page 16 / QSPI_FLASH */
+&rpc {
+ pinctrl-0 = <&qspi0_pins>;
+ pinctrl-names = "default";
+ bootph-all;
+
+ status = "okay";
+
+ flash@0 {
+ compatible = "spansion,s25fs512s", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <4>;
+ bootph-all;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ boot@0 {
+ reg = <0x0 0x1000000>;
+ read-only;
+ };
+
+ user@1000000 {
+ reg = <0x1000000 0x2f80000>;
+ };
+
+ env1@3f80000 {
+ reg = <0x3f80000 0x40000>;
+ };
+
+ env2@3fc0000 {
+ reg = <0x3fc0000 0x40000>;
+ };
+ };
+ };
+};
+
+&rwdt {
+ timeout-sec = <60>;
+ status = "okay";
+};
+
+/* Page 6 / SCIF_CLK_SOC_V */
+&scif_clk { /* X12 */
+ clock-frequency = <24000000>;
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
index 8524a1e7205e..ed1eefa3515d 100644
--- a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
@@ -1968,13 +1968,20 @@
isp0: isp@fed00000 {
compatible = "renesas,r8a779h0-isp",
"renesas,rcar-gen4-isp";
- reg = <0 0xfed00000 0 0x10000>;
- interrupts = <GIC_SPI 473 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&cpg CPG_MOD 612>;
+ reg = <0 0xfed00000 0 0x10000>, <0 0xfec00000 0 0x100000>;
+ reg-names = "cs", "core";
+ interrupts = <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cs", "core";
+ clocks = <&cpg CPG_MOD 612>, <&cpg CPG_MOD 16>;
+ clock-names = "cs", "core";
power-domains = <&sysc R8A779H0_PD_A3ISP0>;
- resets = <&cpg 612>;
+ resets = <&cpg 612>, <&cpg 16>;
+ reset-names = "cs", "core";
status = "disabled";
+ renesas,vspx = <&vspx0>;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -2053,10 +2060,14 @@
compatible = "renesas,r8a779h0-isp",
"renesas,rcar-gen4-isp";
reg = <0 0xfed20000 0 0x10000>;
- interrupts = <GIC_SPI 474 IRQ_TYPE_LEVEL_LOW>;
+ reg-names = "cs";
+ interrupts = <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cs";
clocks = <&cpg CPG_MOD 613>;
+ clock-names = "cs";
power-domains = <&sysc R8A779H0_PD_A3ISP0>;
resets = <&cpg 613>;
+ reset-names = "cs";
status = "disabled";
ports {
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
index 6b1c77cd8261..ecaa9c4f305c 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
@@ -244,6 +244,121 @@
status = "disabled";
};
+ gpt: pwm@10048000 {
+ compatible = "renesas,r9a07g044-gpt",
+ "renesas,rzg2l-gpt";
+ reg = <0 0x10048000 0 0x800>;
+ #pwm-cells = <3>;
+ interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 219 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 220 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 221 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 222 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 223 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 224 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 225 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 226 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 227 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 231 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 232 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 233 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 234 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 235 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 236 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 237 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 238 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 239 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 240 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 244 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 245 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 246 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 247 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 248 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 249 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 250 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 251 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 252 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 253 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 257 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 258 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 259 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 260 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 261 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 262 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 263 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 265 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 266 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 270 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 271 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 272 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 273 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 274 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 275 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 276 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 277 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 278 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 279 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 283 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 284 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 285 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 286 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 287 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 288 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 289 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 290 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 291 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 292 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 296 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 297 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 298 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 299 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 300 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 301 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 302 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 303 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 304 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 305 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 309 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 310 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 311 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 312 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 313 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 314 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 315 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 316 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 317 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 318 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ccmpa0", "ccmpb0", "cmpc0", "cmpd0",
+ "cmpe0", "cmpf0", "adtrga0", "adtrgb0",
+ "ovf0", "unf0",
+ "ccmpa1", "ccmpb1", "cmpc1", "cmpd1",
+ "cmpe1", "cmpf1", "adtrga1", "adtrgb1",
+ "ovf1", "unf1",
+ "ccmpa2", "ccmpb2", "cmpc2", "cmpd2",
+ "cmpe2", "cmpf2", "adtrga2", "adtrgb2",
+ "ovf2", "unf2",
+ "ccmpa3", "ccmpb3", "cmpc3", "cmpd3",
+ "cmpe3", "cmpf3", "adtrga3", "adtrgb3",
+ "ovf3", "unf3",
+ "ccmpa4", "ccmpb4", "cmpc4", "cmpd4",
+ "cmpe4", "cmpf4", "adtrga4", "adtrgb4",
+ "ovf4", "unf4",
+ "ccmpa5", "ccmpb5", "cmpc5", "cmpd5",
+ "cmpe5", "cmpf5", "adtrga5", "adtrgb5",
+ "ovf5", "unf5",
+ "ccmpa6", "ccmpb6", "cmpc6", "cmpd6",
+ "cmpe6", "cmpf6", "adtrga6", "adtrgb6",
+ "ovf6", "unf6",
+ "ccmpa7", "ccmpb7", "cmpc7", "cmpd7",
+ "cmpe7", "cmpf7", "adtrga7", "adtrgb7",
+ "ovf7", "unf7";
+ clocks = <&cpg CPG_MOD R9A07G044_GPT_PCLK>;
+ resets = <&cpg R9A07G044_GPT_RST_C>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
ssi0: ssi@10049c00 {
compatible = "renesas,r9a07g044-ssi",
"renesas,rz-ssi";
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044l2-smarc.dts b/arch/arm64/boot/dts/renesas/r9a07g044l2-smarc.dts
index 568d49cfe44a..b36749f94ccb 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g044l2-smarc.dts
+++ b/arch/arm64/boot/dts/renesas/r9a07g044l2-smarc.dts
@@ -27,6 +27,13 @@
#error "Cannot set 1 to MTU3_COUNTER_Z_PHASE_SIGNAL as PMOD_MTU3=0"
#endif
+/*
+ * To enable the GPT pins GTIOC4A(PMOD0_PIN7) and GTIOC4B(PMOD0_PIN10) on the
+ * PMOD0 connector (J1), enable PMOD0_GPT by setting "#define PMOD0_GPT 1"
+ * below.
+ */
+#define PMOD0_GPT 0
+
#include "r9a07g044l2.dtsi"
#include "rzg2l-smarc-som.dtsi"
#include "rzg2l-smarc-pinfunction.dtsi"
diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
index 01f59914dd09..669eca74da0a 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
@@ -244,6 +244,121 @@
status = "disabled";
};
+ gpt: pwm@10048000 {
+ compatible = "renesas,r9a07g054-gpt",
+ "renesas,rzg2l-gpt";
+ reg = <0 0x10048000 0 0x800>;
+ #pwm-cells = <3>;
+ interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 219 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 220 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 221 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 222 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 223 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 224 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 225 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 226 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 227 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 231 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 232 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 233 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 234 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 235 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 236 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 237 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 238 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 239 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 240 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 244 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 245 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 246 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 247 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 248 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 249 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 250 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 251 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 252 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 253 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 257 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 258 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 259 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 260 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 261 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 262 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 263 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 265 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 266 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 270 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 271 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 272 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 273 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 274 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 275 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 276 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 277 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 278 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 279 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 283 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 284 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 285 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 286 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 287 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 288 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 289 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 290 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 291 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 292 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 296 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 297 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 298 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 299 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 300 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 301 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 302 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 303 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 304 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 305 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 309 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 310 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 311 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 312 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 313 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 314 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 315 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 316 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 317 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 318 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ccmpa0", "ccmpb0", "cmpc0", "cmpd0",
+ "cmpe0", "cmpf0", "adtrga0", "adtrgb0",
+ "ovf0", "unf0",
+ "ccmpa1", "ccmpb1", "cmpc1", "cmpd1",
+ "cmpe1", "cmpf1", "adtrga1", "adtrgb1",
+ "ovf1", "unf1",
+ "ccmpa2", "ccmpb2", "cmpc2", "cmpd2",
+ "cmpe2", "cmpf2", "adtrga2", "adtrgb2",
+ "ovf2", "unf2",
+ "ccmpa3", "ccmpb3", "cmpc3", "cmpd3",
+ "cmpe3", "cmpf3", "adtrga3", "adtrgb3",
+ "ovf3", "unf3",
+ "ccmpa4", "ccmpb4", "cmpc4", "cmpd4",
+ "cmpe4", "cmpf4", "adtrga4", "adtrgb4",
+ "ovf4", "unf4",
+ "ccmpa5", "ccmpb5", "cmpc5", "cmpd5",
+ "cmpe5", "cmpf5", "adtrga5", "adtrgb5",
+ "ovf5", "unf5",
+ "ccmpa6", "ccmpb6", "cmpc6", "cmpd6",
+ "cmpe6", "cmpf6", "adtrga6", "adtrgb6",
+ "ovf6", "unf6",
+ "ccmpa7", "ccmpb7", "cmpc7", "cmpd7",
+ "cmpe7", "cmpf7", "adtrga7", "adtrgb7",
+ "ovf7", "unf7";
+ clocks = <&cpg CPG_MOD R9A07G054_GPT_PCLK>;
+ resets = <&cpg R9A07G054_GPT_RST_C>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
ssi0: ssi@10049c00 {
compatible = "renesas,r9a07g054-ssi",
"renesas,rz-ssi";
diff --git a/arch/arm64/boot/dts/renesas/r9a07g054l2-smarc.dts b/arch/arm64/boot/dts/renesas/r9a07g054l2-smarc.dts
index b3e6016880dd..43c456ffa63c 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g054l2-smarc.dts
+++ b/arch/arm64/boot/dts/renesas/r9a07g054l2-smarc.dts
@@ -26,6 +26,13 @@
#error "Cannot set 1 to MTU3_COUNTER_Z_PHASE_SIGNAL as PMOD_MTU3=0"
#endif
+/*
+ * To enable the GPT pins GTIOC4A(PMOD0_PIN7) and GTIOC4B(PMOD0_PIN10) on the
+ * PMOD0 connector (J1), enable PMOD0_GPT by setting "#define PMOD0_GPT 1"
+ * below.
+ */
+#define PMOD0_GPT 0
+
#include "r9a07g054l2.dtsi"
#include "rzg2l-smarc-som.dtsi"
#include "rzg2l-smarc-pinfunction.dtsi"
diff --git a/arch/arm64/boot/dts/renesas/r9a09g047.dtsi b/arch/arm64/boot/dts/renesas/r9a09g047.dtsi
index c93aa16d0a6e..876f70fed433 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g047.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a09g047.dtsi
@@ -105,6 +105,35 @@
};
};
+ gpu_opp_table: opp-table-1 {
+ compatible = "operating-points-v2";
+
+ opp-630000000 {
+ opp-hz = /bits/ 64 <630000000>;
+ opp-microvolt = <800000>;
+ };
+
+ opp-315000000 {
+ opp-hz = /bits/ 64 <315000000>;
+ opp-microvolt = <800000>;
+ };
+
+ opp-157500000 {
+ opp-hz = /bits/ 64 <157500000>;
+ opp-microvolt = <800000>;
+ };
+
+ opp-78750000 {
+ opp-hz = /bits/ 64 <78750000>;
+ opp-microvolt = <800000>;
+ };
+
+ opp-19687500 {
+ opp-hz = /bits/ 64 <19687500>;
+ opp-microvolt = <800000>;
+ };
+ };
+
psci {
compatible = "arm,psci-1.0", "arm,psci-0.2";
method = "smc";
@@ -131,6 +160,95 @@
#size-cells = <2>;
ranges;
+ icu: interrupt-controller@10400000 {
+ compatible = "renesas,r9a09g047-icu";
+ reg = <0 0x10400000 0 0x10000>;
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 433 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 434 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 435 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 436 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 437 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 439 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 441 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 442 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 443 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 262 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 263 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 265 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 452 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 453 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 454 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "nmi",
+ "port_irq0", "port_irq1", "port_irq2",
+ "port_irq3", "port_irq4", "port_irq5",
+ "port_irq6", "port_irq7", "port_irq8",
+ "port_irq9", "port_irq10", "port_irq11",
+ "port_irq12", "port_irq13", "port_irq14",
+ "port_irq15",
+ "tint0", "tint1", "tint2", "tint3",
+ "tint4", "tint5", "tint6", "tint7",
+ "tint8", "tint9", "tint10", "tint11",
+ "tint12", "tint13", "tint14", "tint15",
+ "tint16", "tint17", "tint18", "tint19",
+ "tint20", "tint21", "tint22", "tint23",
+ "tint24", "tint25", "tint26", "tint27",
+ "tint28", "tint29", "tint30", "tint31",
+ "int-ca55-0", "int-ca55-1",
+ "int-ca55-2", "int-ca55-3",
+ "icu-error-ca55",
+ "gpt-u0-gtciada", "gpt-u0-gtciadb",
+ "gpt-u1-gtciada", "gpt-u1-gtciadb";
+ clocks = <&cpg CPG_MOD 0x5>;
+ power-domains = <&cpg>;
+ resets = <&cpg 0x36>;
+ };
+
pinctrl: pinctrl@10410000 {
compatible = "renesas,r9a09g047-pinctrl";
reg = <0 0x10410000 0 0x10000>;
@@ -140,6 +258,7 @@
gpio-ranges = <&pinctrl 0 0 232>;
#interrupt-cells = <2>;
interrupt-controller;
+ interrupt-parent = <&icu>;
power-domains = <&cpg>;
resets = <&cpg 0xa5>, <&cpg 0xa6>;
};
@@ -182,6 +301,66 @@
status = "disabled";
};
+ canfd: can@12440000 {
+ compatible = "renesas,r9a09g047-canfd";
+ reg = <0 0x12440000 0 0x40000>;
+ interrupts = <GIC_SPI 709 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 710 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 703 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 711 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 698 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 704 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 712 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 699 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 705 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 713 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 700 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 706 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 714 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 701 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 715 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 702 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 708 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 716 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "g_err", "g_recc",
+ "ch0_err", "ch0_rec", "ch0_trx",
+ "ch1_err", "ch1_rec", "ch1_trx",
+ "ch2_err", "ch2_rec", "ch2_trx",
+ "ch3_err", "ch3_rec", "ch3_trx",
+ "ch4_err", "ch4_rec", "ch4_trx",
+ "ch5_err", "ch5_rec", "ch5_trx";
+ clocks = <&cpg CPG_MOD 0x9c>, <&cpg CPG_MOD 0x9d>,
+ <&cpg CPG_MOD 0x9e>;
+ clock-names = "fck", "ram_clk", "can_clk";
+ assigned-clocks = <&cpg CPG_MOD 0x9e>;
+ assigned-clock-rates = <80000000>;
+ resets = <&cpg 0xa1>, <&cpg 0xa2>;
+ reset-names = "rstp_n", "rstc_n";
+ power-domains = <&cpg>;
+ status = "disabled";
+
+ channel0 {
+ status = "disabled";
+ };
+ channel1 {
+ status = "disabled";
+ };
+ channel2 {
+ status = "disabled";
+ };
+ channel3 {
+ status = "disabled";
+ };
+ channel4 {
+ status = "disabled";
+ };
+ channel5 {
+ status = "disabled";
+ };
+ };
+
wdt1: watchdog@14400000 {
compatible = "renesas,r9a09g047-wdt", "renesas,r9a09g057-wdt";
reg = <0 0x14400000 0 0x400>;
@@ -401,6 +580,26 @@
status = "disabled";
};
+ gpu: gpu@14850000 {
+ compatible = "renesas,r9a09g047-mali",
+ "arm,mali-bifrost";
+ reg = <0x0 0x14850000 0x0 0x10000>;
+ interrupts = <GIC_SPI 884 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 885 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 883 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 886 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "job", "mmu", "gpu", "event";
+ clocks = <&cpg CPG_MOD 0xf0>,
+ <&cpg CPG_MOD 0xf1>,
+ <&cpg CPG_MOD 0xf2>;
+ clock-names = "gpu", "bus", "bus_ace";
+ power-domains = <&cpg>;
+ resets = <&cpg 0xdd>, <&cpg 0xde>, <&cpg 0xdf>;
+ reset-names = "rst", "axi_rst", "ace_rst";
+ operating-points-v2 = <&gpu_opp_table>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@14900000 {
compatible = "arm,gic-v3";
reg = <0x0 0x14900000 0 0x20000>,
@@ -410,6 +609,66 @@
interrupt-controller;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
};
+
+ sdhi0: mmc@15c00000 {
+ compatible = "renesas,sdhi-r9a09g047", "renesas,sdhi-r9a09g057";
+ reg = <0x0 0x15c00000 0 0x10000>;
+ interrupts = <GIC_SPI 735 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 736 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 0xa3>, <&cpg CPG_MOD 0xa5>,
+ <&cpg CPG_MOD 0xa4>, <&cpg CPG_MOD 0xa6>;
+ clock-names = "core", "clkh", "cd", "aclk";
+ resets = <&cpg 0xa7>;
+ power-domains = <&cpg>;
+ status = "disabled";
+
+ sdhi0_vqmmc: vqmmc-regulator {
+ regulator-name = "SDHI0-VQMMC";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ status = "disabled";
+ };
+ };
+
+ sdhi1: mmc@15c10000 {
+ compatible = "renesas,sdhi-r9a09g047", "renesas,sdhi-r9a09g057";
+ reg = <0x0 0x15c10000 0 0x10000>;
+ interrupts = <GIC_SPI 737 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 738 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 0xa7>, <&cpg CPG_MOD 0xa9>,
+ <&cpg CPG_MOD 0xa8>, <&cpg CPG_MOD 0xaa>;
+ clock-names = "core", "clkh", "cd", "aclk";
+ resets = <&cpg 0xa8>;
+ power-domains = <&cpg>;
+ status = "disabled";
+
+ sdhi1_vqmmc: vqmmc-regulator {
+ regulator-name = "SDHI1-VQMMC";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ status = "disabled";
+ };
+ };
+
+ sdhi2: mmc@15c20000 {
+ compatible = "renesas,sdhi-r9a09g047", "renesas,sdhi-r9a09g057";
+ reg = <0x0 0x15c20000 0 0x10000>;
+ interrupts = <GIC_SPI 739 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 740 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 0xab>, <&cpg CPG_MOD 0xad>,
+ <&cpg CPG_MOD 0xac>, <&cpg CPG_MOD 0xae>;
+ clock-names = "core", "clkh", "cd", "aclk";
+ resets = <&cpg 0xa9>;
+ power-domains = <&cpg>;
+ status = "disabled";
+
+ sdhi2_vqmmc: vqmmc-regulator {
+ regulator-name = "SDHI2-VQMMC";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ status = "disabled";
+ };
+ };
};
timer {
diff --git a/arch/arm64/boot/dts/renesas/r9a09g047e57-smarc.dts b/arch/arm64/boot/dts/renesas/r9a09g047e57-smarc.dts
index c063d47e2952..1f5e61a73c35 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g047e57-smarc.dts
+++ b/arch/arm64/boot/dts/renesas/r9a09g047e57-smarc.dts
@@ -7,6 +7,15 @@
/dts-v1/;
+/* Switch selection settings */
+#define SW_GPIO8_CAN0_STB 0
+#define SW_GPIO9_CAN1_STB 0
+#define SW_LCD_EN 0
+#define SW_PDM_EN 0
+#define SW_SD0_DEV_SEL 0
+#define SW_SDIO_M2E 0
+
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h>
#include "r9a09g047e57.dtsi"
#include "rzg3e-smarc-som.dtsi"
@@ -16,16 +25,109 @@
model = "Renesas SMARC EVK version 2 based on r9a09g047e57";
compatible = "renesas,smarc2-evk", "renesas,rzg3e-smarcm",
"renesas,r9a09g047e57", "renesas,r9a09g047";
+
+ vqmmc_sd1_pvdd: regulator-vqmmc-sd1-pvdd {
+ compatible = "regulator-gpio";
+ regulator-name = "SD1_PVDD";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ gpios = <&pinctrl RZG3E_GPIO(1, 5) GPIO_ACTIVE_HIGH>;
+ gpios-states = <0>;
+ states = <3300000 0>, <1800000 1>;
+ };
+};
+
+&canfd {
+ pinctrl-0 = <&canfd_pins>;
+ pinctrl-names = "default";
+
+#if (!SW_PDM_EN)
+ channel1 {
+ status = "okay";
+#if (!SW_LCD_EN) && (SW_GPIO9_CAN1_STB)
+ phys = <&can_transceiver1>;
+#endif
+ };
+#endif
+
+#if (!SW_LCD_EN)
+ channel4 {
+ status = "okay";
+#if (SW_GPIO8_CAN0_STB)
+ phys = <&can_transceiver0>;
+#endif
+ };
+#endif
+};
+
+#if (!SW_LCD_EN) && (SW_GPIO8_CAN0_STB)
+&can_transceiver0 {
+ standby-gpios = <&pinctrl RZG3E_GPIO(5, 4) GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+#endif
+
+#if (!SW_LCD_EN) && (SW_GPIO9_CAN1_STB)
+&can_transceiver1 {
+ standby-gpios = <&pinctrl RZG3E_GPIO(5, 5) GPIO_ACTIVE_HIGH>;
+ status = "okay";
};
+#endif
&pinctrl {
+ canfd_pins: canfd {
+ can1_pins: can1 {
+ pinmux = <RZG3E_PORT_PINMUX(L, 2, 3)>, /* RX */
+ <RZG3E_PORT_PINMUX(L, 3, 3)>; /* TX */
+ };
+
+ can4_pins: can4 {
+ pinmux = <RZG3E_PORT_PINMUX(5, 2, 3)>, /* RX */
+ <RZG3E_PORT_PINMUX(5, 3, 3)>; /* TX */
+ };
+ };
+
scif_pins: scif {
pins = "SCIF_TXD", "SCIF_RXD";
renesas,output-impedance = <1>;
};
+
+ sd1-pwr-en-hog {
+ gpio-hog;
+ gpios = <RZG3E_GPIO(1, 6) GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "sd1_pwr_en";
+ };
+
+ sdhi1_pins: sd1 {
+ sd1-cd {
+ pinmux = <RZG3E_PORT_PINMUX(1, 4, 8)>; /* SD1CD */
+ };
+
+ sd1-ctrl {
+ pinmux = <RZG3E_PORT_PINMUX(G, 0, 1)>, /* SD1CLK */
+ <RZG3E_PORT_PINMUX(G, 1, 1)>; /* SD1CMD */
+ };
+
+ sd1-data {
+ pinmux = <RZG3E_PORT_PINMUX(G, 2, 1)>, /* SD1DAT0 */
+ <RZG3E_PORT_PINMUX(G, 3, 1)>, /* SD1DAT1 */
+ <RZG3E_PORT_PINMUX(G, 4, 1)>, /* SD1DAT2 */
+ <RZG3E_PORT_PINMUX(G, 5, 1)>; /* SD1DAT3 */
+ };
+ };
};
&scif0 {
pinctrl-0 = <&scif_pins>;
pinctrl-names = "default";
};
+
+&sdhi1 {
+ pinctrl-0 = <&sdhi1_pins>;
+ pinctrl-1 = <&sdhi1_pins>;
+ pinctrl-names = "default", "state_uhs";
+
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&vqmmc_sd1_pvdd>;
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a09g056.dtsi b/arch/arm64/boot/dts/renesas/r9a09g056.dtsi
new file mode 100644
index 000000000000..90964bd864cc
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a09g056.dtsi
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/V2N SoC
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <dt-bindings/clock/renesas,r9a09g056-cpg.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+
+/* RZV2N_Px = Offset address of PFC_P_mn - 0x20 */
+#define RZV2N_P0 0
+#define RZV2N_P1 1
+#define RZV2N_P2 2
+#define RZV2N_P3 3
+#define RZV2N_P4 4
+#define RZV2N_P5 5
+#define RZV2N_P6 6
+#define RZV2N_P7 7
+#define RZV2N_P8 8
+#define RZV2N_P9 9
+#define RZV2N_PA 10
+#define RZV2N_PB 11
+
+#define RZV2N_PORT_PINMUX(b, p, f) RZG2L_PORT_PINMUX(RZV2N_P##b, p, f)
+#define RZV2N_GPIO(port, pin) RZG2L_GPIO(RZV2N_P##port, pin)
+
+/ {
+ compatible = "renesas,r9a09g056";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ audio_extal_clk: audio-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ /*
+ * The default cluster table is based on the assumption that the PLLCA55 clock
+ * frequency is set to 1.7GHz. The PLLCA55 clock frequency can be set to
+ * 1.7/1.6/1.5/1.1 GHz based on the BOOTPLLCA_0/1 pins (and additionally can be
+ * clocked to 1.8GHz as well). The table below should be overridden in the board
+ * DTS based on the PLLCA55 clock frequency.
+ */
+ cluster0_opp: opp-table-0 {
+ compatible = "operating-points-v2";
+
+ opp-1700000000 {
+ opp-hz = /bits/ 64 <1700000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <300000>;
+ };
+ opp-850000000 {
+ opp-hz = /bits/ 64 <850000000>;
+ opp-microvolt = <800000>;
+ clock-latency-ns = <300000>;
+ };
+ opp-425000000 {
+ opp-hz = /bits/ 64 <425000000>;
+ opp-microvolt = <800000>;
+ clock-latency-ns = <300000>;
+ };
+ opp-212500000 {
+ opp-hz = /bits/ 64 <212500000>;
+ opp-microvolt = <800000>;
+ clock-latency-ns = <300000>;
+ opp-suspend;
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a55";
+ reg = <0>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ clocks = <&cpg CPG_CORE R9A09G056_CA55_0_CORE_CLK0>;
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ cpu1: cpu@100 {
+ compatible = "arm,cortex-a55";
+ reg = <0x100>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ clocks = <&cpg CPG_CORE R9A09G056_CA55_0_CORE_CLK1>;
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ cpu2: cpu@200 {
+ compatible = "arm,cortex-a55";
+ reg = <0x200>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ clocks = <&cpg CPG_CORE R9A09G056_CA55_0_CORE_CLK2>;
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ cpu3: cpu@300 {
+ compatible = "arm,cortex-a55";
+ reg = <0x300>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ clocks = <&cpg CPG_CORE R9A09G056_CA55_0_CORE_CLK3>;
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ L3_CA55: cache-controller-0 {
+ compatible = "cache";
+ cache-unified;
+ cache-size = <0x100000>;
+ cache-level = <3>;
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0", "arm,psci-0.2";
+ method = "smc";
+ };
+
+ qextal_clk: qextal-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ rtxin_clk: rtxin-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ soc: soc {
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ pinctrl: pinctrl@10410000 {
+ compatible = "renesas,r9a09g056-pinctrl";
+ reg = <0 0x10410000 0 0x10000>;
+ clocks = <&cpg CPG_CORE R9A09G056_IOTOP_0_SHCLK>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 0 96>;
+ power-domains = <&cpg>;
+ resets = <&cpg 0xa5>, <&cpg 0xa6>;
+ };
+
+ cpg: clock-controller@10420000 {
+ compatible = "renesas,r9a09g056-cpg";
+ reg = <0 0x10420000 0 0x10000>;
+ clocks = <&audio_extal_clk>, <&rtxin_clk>, <&qextal_clk>;
+ clock-names = "audio_extal", "rtxin", "qextal";
+ #clock-cells = <2>;
+ #reset-cells = <1>;
+ #power-domain-cells = <0>;
+ };
+
+ sys: system-controller@10430000 {
+ compatible = "renesas,r9a09g056-sys";
+ reg = <0 0x10430000 0 0x10000>;
+ clocks = <&cpg CPG_CORE R9A09G056_SYS_0_PCLK>;
+ resets = <&cpg 0x30>;
+ };
+
+ scif: serial@11c01400 {
+ compatible = "renesas,scif-r9a09g056",
+ "renesas,scif-r9a09g057";
+ reg = <0 0x11c01400 0 0x400>;
+ interrupts = <GIC_SPI 529 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 532 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 533 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 530 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 534 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 531 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 536 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 537 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eri", "rxi", "txi", "bri", "dri",
+ "tei", "tei-dri", "rxi-edge", "txi-edge";
+ clocks = <&cpg CPG_MOD 0x8f>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ resets = <&cpg 0x95>;
+ status = "disabled";
+ };
+
+ gic: interrupt-controller@14900000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0x14900000 0 0x20000>,
+ <0x0 0x14940000 0 0x80000>;
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ sdhi0: mmc@15c00000 {
+ compatible = "renesas,sdhi-r9a09g056", "renesas,sdhi-r9a09g057";
+ reg = <0x0 0x15c00000 0 0x10000>;
+ interrupts = <GIC_SPI 735 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 736 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 0xa3>, <&cpg CPG_MOD 0xa5>,
+ <&cpg CPG_MOD 0xa4>, <&cpg CPG_MOD 0xa6>;
+ clock-names = "core", "clkh", "cd", "aclk";
+ resets = <&cpg 0xa7>;
+ power-domains = <&cpg>;
+ status = "disabled";
+
+ sdhi0_vqmmc: vqmmc-regulator {
+ regulator-name = "SDHI0-VQMMC";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ status = "disabled";
+ };
+ };
+
+ sdhi1: mmc@15c10000 {
+ compatible = "renesas,sdhi-r9a09g056", "renesas,sdhi-r9a09g057";
+ reg = <0x0 0x15c10000 0 0x10000>;
+ interrupts = <GIC_SPI 737 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 738 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 0xa7>, <&cpg CPG_MOD 0xa9>,
+ <&cpg CPG_MOD 0xa8>, <&cpg CPG_MOD 0xaa>;
+ clock-names = "core", "clkh", "cd", "aclk";
+ resets = <&cpg 0xa8>;
+ power-domains = <&cpg>;
+ status = "disabled";
+
+ sdhi1_vqmmc: vqmmc-regulator {
+ regulator-name = "SDHI1-VQMMC";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ status = "disabled";
+ };
+ };
+
+ sdhi2: mmc@15c20000 {
+ compatible = "renesas,sdhi-r9a09g056", "renesas,sdhi-r9a09g057";
+ reg = <0x0 0x15c20000 0 0x10000>;
+ interrupts = <GIC_SPI 739 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 740 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 0xab>, <&cpg CPG_MOD 0xad>,
+ <&cpg CPG_MOD 0xac>, <&cpg CPG_MOD 0xae>;
+ clock-names = "core", "clkh", "cd", "aclk";
+ resets = <&cpg 0xa9>;
+ power-domains = <&cpg>;
+ status = "disabled";
+
+ sdhi2_vqmmc: vqmmc-regulator {
+ regulator-name = "SDHI2-VQMMC";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ status = "disabled";
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys", "hyp-virt";
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a09g056n48-rzv2n-evk.dts b/arch/arm64/boot/dts/renesas/r9a09g056n48-rzv2n-evk.dts
new file mode 100644
index 000000000000..24343fce7f53
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a09g056n48-rzv2n-evk.dts
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/V2N EVK board
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include "r9a09g056.dtsi"
+
+/ {
+ model = "Renesas RZ/V2N EVK Board based on r9a09g056n48";
+ compatible = "renesas,rzv2n-evk", "renesas,r9a09g056n48", "renesas,r9a09g056";
+
+ aliases {
+ mmc1 = &sdhi1;
+ serial0 = &scif;
+ };
+
+ chosen {
+ bootargs = "ignore_loglevel";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@48000000 {
+ device_type = "memory";
+ /* first 128MB is reserved for secure area. */
+ reg = <0x0 0x48000000 0x1 0xf8000000>;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vqmmc_sdhi1: regulator-vqmmc-sdhi1 {
+ compatible = "regulator-gpio";
+ regulator-name = "SDHI1 VqmmC";
+ gpios = <&pinctrl RZV2N_GPIO(A, 2) GPIO_ACTIVE_HIGH>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ gpios-states = <0>;
+ states = <3300000 0>, <1800000 1>;
+ };
+};
+
+&audio_extal_clk {
+ clock-frequency = <22579200>;
+};
+
+&pinctrl {
+ scif_pins: scif {
+ pins = "SCIF_TXD", "SCIF_RXD";
+ renesas,output-impedance = <1>;
+ };
+
+ sd1-pwr-en-hog {
+ gpio-hog;
+ gpios = <RZV2N_GPIO(A, 3) GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "sd1_pwr_en";
+ };
+
+ sdhi1_pins: sd1 {
+ sd1-cd {
+ pinmux = <RZV2N_PORT_PINMUX(9, 4, 14)>; /* SD1_CD */
+ };
+
+ sd1-clk {
+ pins = "SD1CLK";
+ renesas,output-impedance = <3>;
+ slew-rate = <0>;
+ };
+
+ sd1-dat-cmd {
+ pins = "SD1DAT0", "SD1DAT1", "SD1DAT2", "SD1DAT3", "SD1CMD";
+ input-enable;
+ renesas,output-impedance = <3>;
+ slew-rate = <0>;
+ };
+ };
+};
+
+&qextal_clk {
+ clock-frequency = <24000000>;
+};
+
+&rtxin_clk {
+ clock-frequency = <32768>;
+};
+
+&scif {
+ pinctrl-0 = <&scif_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&sdhi1 {
+ pinctrl-0 = <&sdhi1_pins>;
+ pinctrl-1 = <&sdhi1_pins>;
+ pinctrl-names = "default", "state_uhs";
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&vqmmc_sdhi1>;
+ bus-width = <4>;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a09g057.dtsi b/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
index 0cd00bb05191..18ab5639b301 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
@@ -653,6 +653,13 @@
resets = <&cpg 0xa7>;
power-domains = <&cpg>;
status = "disabled";
+
+ sdhi0_vqmmc: vqmmc-regulator {
+ regulator-name = "SDHI0-VQMMC";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ status = "disabled";
+ };
};
sdhi1: mmc@15c10000 {
@@ -666,6 +673,13 @@
resets = <&cpg 0xa8>;
power-domains = <&cpg>;
status = "disabled";
+
+ sdhi1_vqmmc: vqmmc-regulator {
+ regulator-name = "SDHI1-VQMMC";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ status = "disabled";
+ };
};
sdhi2: mmc@15c20000 {
@@ -679,6 +693,13 @@
resets = <&cpg 0xa9>;
power-domains = <&cpg>;
status = "disabled";
+
+ sdhi2_vqmmc: vqmmc-regulator {
+ regulator-name = "SDHI2-VQMMC";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/renesas/renesas-smarc2.dtsi b/arch/arm64/boot/dts/renesas/renesas-smarc2.dtsi
index e378d55e6e9b..afdc1940e24a 100644
--- a/arch/arm64/boot/dts/renesas/renesas-smarc2.dtsi
+++ b/arch/arm64/boot/dts/renesas/renesas-smarc2.dtsi
@@ -5,6 +5,26 @@
* Copyright (C) 2024 Renesas Electronics Corp.
*/
+/*
+ * Please set the switch position SW_OPT_MUX.1 on the carrier board and the
+ * corresponding macro SW_SDIO_M2E on the board DTS:
+ *
+ * SW_SDIO_M2E:
+ * 0 - SMARC SDIO signal is connected to uSD1
+ * 1 - SMARC SDIO signal is connected to M.2 Key E connector
+ *
+ * Please set the switch position SW_GPIO_CAN_PMOD on the carrier board and the
+ * corresponding macro SW_GPIO8_CAN0_STB/SW_GPIO8_CAN0_STB on the board DTS:
+ *
+ * SW_GPIO8_CAN0_STB:
+ * 0 - Connect to GPIO8 PMOD (default)
+ * 1 - Connect to CAN0 transceiver STB pin
+ *
+ * SW_GPIO9_CAN1_STB:
+ * 0 - Connect to GPIO9 PMOD (default)
+ * 1 - Connect to CAN1 transceiver STB pin
+ */
+
/ {
model = "Renesas RZ SMARC Carrier-II Board";
compatible = "renesas,smarc2-evk";
@@ -16,9 +36,36 @@
aliases {
serial3 = &scif0;
+ mmc1 = &sdhi1;
+ };
+
+ can_transceiver0: can-phy0 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <8000000>;
+ status = "disabled";
};
+
+ can_transceiver1: can-phy1 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <8000000>;
+ status = "disabled";
+ };
+};
+
+&canfd {
+ status = "okay";
};
&scif0 {
status = "okay";
};
+
+&sdhi1 {
+ bus-width = <4>;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi
index e9f244c33d55..2616dbde4dd5 100644
--- a/arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi
@@ -38,6 +38,11 @@
line-name = "can1_stb";
};
+ gpt_pins: gpt {
+ pinmux = <RZG2L_PORT_PINMUX(43, 0, 2)>, /* GTIOC4A */
+ <RZG2L_PORT_PINMUX(43, 1, 2)>; /* GTIOC4B */
+ };
+
i2c0_pins: i2c0 {
pins = "RIIC0_SDA", "RIIC0_SCL";
input-enable;
diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
index 21cf198b3c17..d511e152d7c6 100644
--- a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
@@ -327,7 +327,7 @@
status = "okay";
flash@0 {
- compatible = "micron,mt25qu512a", "jedec,spi-nor";
+ compatible = "jedec,spi-nor";
reg = <0>;
m25p,fast-read;
spi-max-frequency = <50000000>;
diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi
index 789f7b0b5ebc..b76b55e7f09d 100644
--- a/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi
@@ -104,6 +104,14 @@
};
};
+#if PMOD0_GPT
+&gpt {
+ pinctrl-0 = <&gpt_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+#endif /* PMOD0_GPT */
+
&i2c3 {
pinctrl-0 = <&i2c3_pins>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
index 9aa729fbdce0..3e8909a872e3 100644
--- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
@@ -246,7 +246,7 @@
status = "okay";
flash@0 {
- compatible = "micron,mt25qu512a", "jedec,spi-nor";
+ compatible = "jedec,spi-nor";
reg = <0>;
m25p,fast-read;
spi-max-frequency = <50000000>;
diff --git a/arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi
index f4ba050beb0d..ecea29a76b14 100644
--- a/arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi
@@ -5,20 +5,185 @@
* Copyright (C) 2024 Renesas Electronics Corp.
*/
+/*
+ * Please set the below switch position on the SoM and the corresponding macro
+ * on the board DTS:
+ *
+ * Switch position SYS.1, Macro SW_SD0_DEV_SEL:
+ * 0 - SD0 is connected to eMMC (default)
+ * 1 - SD0 is connected to uSD0 card
+ *
+ * Switch position SYS.5, Macro SW_LCD_EN:
+ * 0 - Select Misc. Signals routing
+ * 1 - Select LCD
+ *
+ * Switch position BOOT.6, Macro SW_PDM_EN:
+ * 0 - Select CAN routing
+ * 1 - Select PDM
+ */
+
/ {
compatible = "renesas,rzg3e-smarcm", "renesas,r9a09g047e57", "renesas,r9a09g047";
+ aliases {
+ i2c2 = &i2c2;
+ mmc0 = &sdhi0;
+ mmc2 = &sdhi2;
+ };
+
memory@48000000 {
device_type = "memory";
/* First 128MB is reserved for secure area. */
reg = <0x0 0x48000000 0x0 0xf8000000>;
};
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_vdd0p8v_others: regulator-vdd0p8v-others {
+ compatible = "regulator-fixed";
+
+ regulator-name = "fixed-0.8V";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* 32.768kHz crystal */
+ x3: x3-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
};
&audio_extal_clk {
clock-frequency = <48000000>;
};
+&gpu {
+ status = "okay";
+ mali-supply = <&reg_vdd0p8v_others>;
+};
+
+&i2c2 {
+ pinctrl-0 = <&i2c2_pins>;
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+ status = "okay";
+
+ raa215300: pmic@12 {
+ compatible = "renesas,raa215300";
+ reg = <0x12>, <0x6f>;
+ reg-names = "main", "rtc";
+ clocks = <&x3>;
+ clock-names = "xin";
+
+ pinctrl-0 = <&rtc_irq_pin>;
+ pinctrl-names = "default";
+
+ interrupts-extended = <&pinctrl RZG3E_GPIO(S, 1) IRQ_TYPE_EDGE_FALLING>;
+ };
+};
+
+&pinctrl {
+ i2c2_pins: i2c {
+ pinmux = <RZG3E_PORT_PINMUX(3, 4, 1)>, /* SCL2 */
+ <RZG3E_PORT_PINMUX(3, 5, 1)>; /* SDA2 */
+ };
+
+ rtc_irq_pin: rtc-irq {
+ pins = "PS1";
+ bias-pull-up;
+ };
+
+ sdhi0_emmc_pins: sd0-emmc {
+ sd0-ctrl {
+ pins = "SD0CLK", "SD0CMD";
+ renesas,output-impedance = <3>;
+ };
+
+ sd0-data {
+ pins = "SD0DAT0", "SD0DAT1", "SD0DAT2", "SD0DAT3",
+ "SD0DAT4", "SD0DAT5", "SD0DAT6", "SD0DAT7";
+ renesas,output-impedance = <3>;
+ };
+
+ sd0-rst {
+ pins = "SD0RSTN";
+ renesas,output-impedance = <3>;
+ };
+ };
+
+ sdhi0_usd_pins: sd0-usd {
+ sd0-cd {
+ pinmux = <RZG3E_PORT_PINMUX(5, 0, 8)>;
+ };
+
+ sd0-ctrl {
+ pins = "SD0CLK", "SD0CMD";
+ renesas,output-impedance = <3>;
+ };
+
+ sd0-data {
+ pins = "SD0DAT0", "SD0DAT1", "SD0DAT2", "SD0DAT3";
+ renesas,output-impedance = <3>;
+ };
+
+ sd0-iovs {
+ pins = "SD0IOVS";
+ renesas,output-impedance = <3>;
+ };
+
+ sd0-pwen {
+ pins = "SD0PWEN";
+ renesas,output-impedance = <3>;
+ };
+ };
+
+ sdhi2_pins: sd2 {
+ sd2-cd {
+ pinmux = <RZG3E_PORT_PINMUX(K, 0, 1)>; /* SD2CD */
+ };
+
+ sd2-ctrl {
+ pinmux = <RZG3E_PORT_PINMUX(H, 0, 1)>, /* SD2CLK */
+ <RZG3E_PORT_PINMUX(H, 1, 1)>; /* SD2CMD */
+ };
+
+ sd2-data {
+ pinmux = <RZG3E_PORT_PINMUX(H, 2, 1)>, /* SD2DAT0 */
+ <RZG3E_PORT_PINMUX(H, 3, 1)>, /* SD2DAT1 */
+ <RZG3E_PORT_PINMUX(H, 4, 1)>, /* SD2DAT2 */
+ <RZG3E_PORT_PINMUX(H, 5, 1)>; /* SD2DAT3 */
+ };
+
+ sd2-iovs {
+ pinmux = <RZG3E_PORT_PINMUX(K, 1, 1)>; /* SD2IOVS */
+ };
+
+ sd2-pwen {
+ pinmux = <RZG3E_PORT_PINMUX(K, 2, 1)>; /* SD2PWEN */
+ };
+ };
+};
+
&qextal_clk {
clock-frequency = <24000000>;
};
@@ -27,6 +192,56 @@
clock-frequency = <32768>;
};
+#if (SW_SD0_DEV_SEL)
+&sdhi0 {
+ pinctrl-0 = <&sdhi0_usd_pins>;
+ pinctrl-1 = <&sdhi0_usd_pins>;
+ pinctrl-names = "default", "state_uhs";
+
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&sdhi0_vqmmc>;
+ bus-width = <4>;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ status = "okay";
+};
+
+&sdhi0_vqmmc {
+ status = "okay";
+};
+#else
+&sdhi0 {
+ pinctrl-0 = <&sdhi0_emmc_pins>;
+ pinctrl-1 = <&sdhi0_emmc_pins>;
+ pinctrl-names = "default", "state_uhs";
+
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&reg_1p8v>;
+ bus-width = <8>;
+ mmc-hs200-1_8v;
+ non-removable;
+ fixed-emmc-driver-type = <1>;
+ status = "okay";
+};
+#endif
+
+&sdhi2 {
+ pinctrl-0 = <&sdhi2_pins>;
+ pinctrl-1 = <&sdhi2_pins>;
+ pinctrl-names = "default", "state_uhs";
+
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&sdhi2_vqmmc>;
+ bus-width = <4>;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ status = "okay";
+};
+
+&sdhi2_vqmmc {
+ status = "okay";
+};
+
&wdt1 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/white-hawk-ard-audio-da7212.dtso b/arch/arm64/boot/dts/renesas/white-hawk-ard-audio-da7212.dtso
index c27b9b3d4e5f..f2d53e958da1 100644
--- a/arch/arm64/boot/dts/renesas/white-hawk-ard-audio-da7212.dtso
+++ b/arch/arm64/boot/dts/renesas/white-hawk-ard-audio-da7212.dtso
@@ -108,7 +108,7 @@
};
tpu0_pins: tpu0 {
- groups = "tpu_to0_a";
+ groups = "tpu_to0_b";
function = "tpu";
};
};
diff --git a/arch/arm64/boot/dts/renesas/white-hawk-single.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-single.dtsi
index 20e8232f2f32..976a3ab44e5a 100644
--- a/arch/arm64/boot/dts/renesas/white-hawk-single.dtsi
+++ b/arch/arm64/boot/dts/renesas/white-hawk-single.dtsi
@@ -11,6 +11,10 @@
/ {
model = "Renesas White Hawk Single board";
compatible = "renesas,white-hawk-single";
+
+ aliases {
+ ethernet3 = &tsn0;
+ };
};
&hscif0 {
@@ -53,7 +57,7 @@
pinctrl-0 = <&tsn0_pins>;
pinctrl-names = "default";
phy-mode = "rgmii";
- phy-handle = <&phy3>;
+ phy-handle = <&tsn0_phy>;
status = "okay";
mdio {
@@ -63,7 +67,7 @@
reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;
reset-post-delay-us = <4000>;
- phy3: ethernet-phy@0 {
+ tsn0_phy: ethernet-phy@0 {
compatible = "ethernet-phy-id002b.0980",
"ethernet-phy-ieee802.3-c22";
reg = <0>;
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index 3e8771ef69ba..4bf84622db47 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -1,9 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-cobra-ltk050h3146w-a2.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-cobra-ltk050h3146w.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-cobra-ltk050h3148w.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-cobra-ltk500hd1829.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-engicam-px30-core-ctouch2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-engicam-px30-core-ctouch2-of10.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-engicam-px30-core-edimm2.2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-firefly-jd4-core-mb.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-pp1516-ltk050h3146w-a2.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-pp1516-ltk050h3148w.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-ringneck-haikou.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-ringneck-haikou-lvds-9904379.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-ringneck-haikou-video-demo.dtbo
@@ -40,6 +46,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-px5-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-r88.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-eaidk-610.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-evb.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-evb-ind.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-ficus.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-firefly.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-bob.dtb
@@ -81,6 +88,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-sapphire.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-sapphire-excavator.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399pro-rock-pi-n10.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3528-radxa-e20c.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3562-evb2-v10.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg-arc-d.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg-arc-s.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg353p.dtb
@@ -147,6 +155,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6a-io.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6a-wifi.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6b-io.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-evb1-v10.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-evb2-v10.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-firefly-itx-3588j.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-friendlyelec-cm3588-nas.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-h96-max-v58.dtb
@@ -164,7 +173,9 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5-itx.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5b.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5b-pcie-ep.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5b-pcie-srns.dtbo
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5b-plus.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-tiger-haikou.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-tiger-haikou-video-demo.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-toybrick-x0.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-turing-rk1.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-coolpi-4b.dtb
@@ -233,3 +244,7 @@ rk3588-rock-5b-pcie-ep-dtbs := rk3588-rock-5b.dtb \
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5b-pcie-srns.dtb
rk3588-rock-5b-pcie-srns-dtbs := rk3588-rock-5b.dtb \
rk3588-rock-5b-pcie-srns.dtbo
+
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-tiger-haikou-haikou-video-demo.dtb
+rk3588-tiger-haikou-haikou-video-demo-dtbs := rk3588-tiger-haikou.dtb \
+ rk3588-tiger-haikou-video-demo.dtbo
diff --git a/arch/arm64/boot/dts/rockchip/px30-cobra-ltk050h3146w-a2.dts b/arch/arm64/boot/dts/rockchip/px30-cobra-ltk050h3146w-a2.dts
new file mode 100644
index 000000000000..1d26164be7b8
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/px30-cobra-ltk050h3146w-a2.dts
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Cherry Embedded Solutions GmbH
+ */
+
+/dts-v1/;
+#include "px30-cobra.dtsi"
+
+/ {
+ model = "Theobroma Systems Cobra with LTK050H3146W-A2 Display";
+ compatible = "tsd,px30-cobra-ltk050h3146w-a2", "tsd,px30-cobra", "rockchip,px30";
+};
+
+&dsi {
+ status = "okay";
+
+ panel@0 {
+ compatible = "leadtek,ltk050h3146w-a2";
+ reg = <0>;
+ backlight = <&backlight>;
+ iovcc-supply = <&vcc_1v8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dsp_rst>;
+ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+ vci-supply = <&vcc_2v8>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+};
+
+&dsi_out {
+ mipi_out_panel: endpoint {
+ remote-endpoint = <&mipi_in_panel>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/px30-cobra-ltk050h3146w.dts b/arch/arm64/boot/dts/rockchip/px30-cobra-ltk050h3146w.dts
new file mode 100644
index 000000000000..82c6acdb4fae
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/px30-cobra-ltk050h3146w.dts
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Cherry Embedded Solutions GmbH
+ */
+
+/dts-v1/;
+#include "px30-cobra.dtsi"
+
+/ {
+ model = "Theobroma Systems Cobra with LTK050H3146W Display";
+ compatible = "tsd,px30-cobra-ltk050h3146w", "tsd,px30-cobra", "rockchip,px30";
+};
+
+&dsi {
+ status = "okay";
+
+ panel@0 {
+ compatible = "leadtek,ltk050h3146w";
+ reg = <0>;
+ backlight = <&backlight>;
+ iovcc-supply = <&vcc_1v8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dsp_rst>;
+ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+ vci-supply = <&vcc_2v8>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+};
+
+&dsi_out {
+ mipi_out_panel: endpoint {
+ remote-endpoint = <&mipi_in_panel>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/px30-cobra-ltk050h3148w.dts b/arch/arm64/boot/dts/rockchip/px30-cobra-ltk050h3148w.dts
new file mode 100644
index 000000000000..94449132df38
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/px30-cobra-ltk050h3148w.dts
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Cherry Embedded Solutions GmbH
+ */
+
+/dts-v1/;
+#include "px30-cobra.dtsi"
+
+/ {
+ model = "Theobroma Systems Cobra with ltk050h3148w Display";
+ compatible = "tsd,px30-cobra-ltk050h3148w", "tsd,px30-cobra", "rockchip,px30";
+};
+
+&dsi {
+ status = "okay";
+
+ panel@0 {
+ compatible = "leadtek,ltk050h3148w";
+ reg = <0>;
+ backlight = <&backlight>;
+ iovcc-supply = <&vcc_1v8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dsp_rst>;
+ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+ vci-supply = <&vcc_2v8>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+};
+
+&dsi_out {
+ mipi_out_panel: endpoint {
+ remote-endpoint = <&mipi_in_panel>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/px30-cobra-ltk500hd1829.dts b/arch/arm64/boot/dts/rockchip/px30-cobra-ltk500hd1829.dts
new file mode 100644
index 000000000000..d7b639e7ccab
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/px30-cobra-ltk500hd1829.dts
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Cherry Embedded Solutions GmbH
+ */
+
+/dts-v1/;
+#include "px30-cobra.dtsi"
+
+/ {
+ model = "Theobroma Systems Cobra prototype with LTK500HD1829 Display";
+ compatible = "tsd,px30-cobra-ltk500hd1829", "tsd,px30-cobra", "rockchip,px30";
+
+ aliases {
+ mmc1 = &sdmmc;
+ };
+};
+
+&dsi {
+ status = "okay";
+
+ panel@0 {
+ compatible = "leadtek,ltk500hd1829";
+ reg = <0>;
+ backlight = <&backlight>;
+ iovcc-supply = <&vcc_1v8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dsp_rst>;
+ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+ vcc-supply = <&vcc_2v8>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+};
+
+&dsi_out {
+ mipi_out_panel: endpoint {
+ remote-endpoint = <&mipi_in_panel>;
+ };
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cobra_pin_hog>, <&cobra_proto_hog>;
+
+ hog {
+ cobra_proto_hog: cobra-proto-hog {
+ rockchip,pins =
+ /* STUSB4500 open drain outout POWER_OK2, needs pull-up */
+ <3 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>,
+ /* STUSB4500 open drain outout POWER_OK3, needs pull-up */
+ <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&sdmmc {
+ bus-width = <4>;
+ broken-cd;
+ cap-sd-highspeed;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vccio_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/px30-cobra.dtsi b/arch/arm64/boot/dts/rockchip/px30-cobra.dtsi
new file mode 100644
index 000000000000..b7e669d8ba4d
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/px30-cobra.dtsi
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Cherry Embedded Solutions GmbH
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "px30.dtsi"
+
+/ {
+ aliases {
+ ethernet0 = &gmac;
+ mmc0 = &emmc;
+ };
+
+ chosen {
+ stdout-path = "serial5:115200n8";
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ power-supply = <&vcc5v0_sys>;
+ pwms = <&pwm0 0 25000 0>;
+ };
+
+ beeper {
+ compatible = "pwm-beeper";
+ pwms = <&pwm1 0 1000 0>;
+ };
+
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ pinctrl-0 = <&emmc_reset>;
+ pinctrl-names = "default";
+ reset-gpios = <&gpio1 RK_PB3 GPIO_ACTIVE_HIGH>;
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&heartbeat_led_pin>;
+
+ /*
+ * LED14 on the PCB. Typically NOT populated.
+ */
+ led-0 {
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio0 RK_PA0 GPIO_ACTIVE_HIGH>;
+ label = "heartbeat";
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ pwm-leds {
+ compatible = "pwm-leds";
+
+ ring_red: led-0 {
+ color = <LED_COLOR_ID_RED>;
+ default-state = "off";
+ label = "ring_red";
+ pwms = <&pwm5 0 1000000 0>;
+ max-brightness = <255>;
+ };
+
+ ring_green: led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
+ label = "ring_green";
+ pwms = <&pwm6 0 1000000 0>;
+ max-brightness = <255>;
+ };
+
+ ring_blue: led-2 {
+ color = <LED_COLOR_ID_BLUE>;
+ default-state = "off";
+ label = "ring_blue";
+ pwms = <&pwm7 0 1000000 0>;
+ max-brightness = <255>;
+ };
+ };
+
+ /* also named 5V_Q7 in schematics */
+ vcc5v0_sys: regulator-vccsys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&display_subsystem {
+ status = "okay";
+};
+
+&dsi_dphy {
+ status = "okay";
+};
+
+&emmc {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ /*
+ * For hs200 support, U-Boot would have to set the RK809 DCDC4
+ * rail to 1.8V from the default of 3.0V. It doesn't do that on
+ * devices out in the field, so disable hs200.
+ * mmc-hs200-1_8v;
+ */
+ mmc-pwrseq = <&emmc_pwrseq>;
+ non-removable;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_emmc>;
+ status = "okay";
+};
+
+&gmac {
+ clock_in_out = "output";
+ phy-handle = <&dp83825>;
+ phy-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_log>;
+ status = "okay";
+};
+
+/* I2C0 = PMIC, STUSB4500, RTC */
+&i2c0 {
+ status = "okay";
+
+ rk809: pmic@20 {
+ compatible = "rockchip,rk809";
+ reg = <0x20>;
+ #clock-cells = <0>;
+ clock-output-names = "xin32k";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA7 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int>;
+ system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc_3v3>;
+ vcc6-supply = <&vcc_3v3>;
+ vcc7-supply = <&vcc_3v3>;
+ vcc9-supply = <&vcc5v0_sys>;
+
+ regulators {
+ vdd_log: DCDC_REG1 {
+ regulator-name = "vdd_log";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vdd_arm: DCDC_REG2 {
+ regulator-name = "vdd_arm";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_3v0_1v8: vcc_emmc: DCDC_REG4 {
+ regulator-name = "vcc_3v0_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcc_3v3: DCDC_REG5 {
+ regulator-name = "vcc_3v3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_1v8: LDO_REG2 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc_1v0: LDO_REG3 {
+ regulator-name = "vcc_1v0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_2v8: LDO_REG4 {
+ regulator-name = "vcc_2v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <2800000>;
+ };
+ };
+
+ /*
+ * vccio_sd also supplies the vmmc supply on prototypes
+ * with sd-slots, so needs to stay single voltage for
+ * those. Production models don't have sd-slots anymore
+ * and only supply vccio2 from this regulator.
+ */
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ /* vcc_sdio also supplies the pull-up resistors for i2c1 */
+ vcc_sdio: LDO_REG6 {
+ regulator-name = "vcc_sdio";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_lcd: LDO_REG7 {
+ regulator-name = "vcc_lcd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_1v8_lcd: LDO_REG8 {
+ regulator-name = "vcc_1v8_lcd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcca_1v8: LDO_REG9 {
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ i2c-scl-falling-time-ns = <50>;
+ i2c-scl-rising-time-ns = <300>;
+ status = "okay";
+
+ touchscreen@14 {
+ compatible = "goodix,gt911";
+ reg = <0x14>;
+ AVDD28-supply = <&vcc_2v8>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA1 IRQ_TYPE_LEVEL_LOW>;
+ irq-gpios = <&gpio0 RK_PA1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tch_int &tch_rst>;
+ reset-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
+ touchscreen-inverted-x;
+ VDDIO-supply = <&vcc_3v3>;
+ };
+};
+
+/*
+ * Enable pull-ups to prevent floating pins when the touch
+ * panel is not connected.
+ */
+&i2c2_xfer {
+ rockchip,pins =
+ <2 RK_PB7 2 &pcfg_pull_up>,
+ <2 RK_PC0 2 &pcfg_pull_up>;
+};
+
+&io_domains {
+ vccio1-supply = <&vcc_sdio>;
+ vccio2-supply = <&vccio_sd>;
+ vccio3-supply = <&vcc_3v3>;
+ vccio4-supply = <&vcc_3v3>;
+ vccio5-supply = <&vcc_1v8>;
+ vccio6-supply = <&vcc_emmc>;
+ status = "okay";
+};
+
+&mdio {
+ dp83825: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&phy_rst>;
+ reset-assert-us = <50000>;
+ reset-deassert-us = <50000>;
+ reset-gpios = <&gpio2 RK_PB6 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cobra_pin_hog>;
+
+ hog {
+ cobra_pin_hog: cobra-pin-hog {
+ rockchip,pins =
+ /* USB_HUB2_RESET */
+ <0 RK_PA5 RK_FUNC_GPIO &pcfg_output_high>,
+ /* USB_HUB1_RESET */
+ <0 RK_PB4 RK_FUNC_GPIO &pcfg_output_high>,
+ /* The default pull-down can keep the IC in reset. */
+ <3 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>,
+ /* USB-A 5V enable */
+ <3 RK_PC0 RK_FUNC_GPIO &pcfg_output_high>,
+ /* USB-A data enable */
+ <3 RK_PD3 RK_FUNC_GPIO &pcfg_output_high>;
+ };
+ };
+
+ emmc {
+ emmc_reset: emmc-reset {
+ rockchip,pins =
+ <1 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ ethernet {
+ phy_rst: phy-rst {
+ rockchip,pins =
+ <2 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ heartbeat_led_pin: heartbeat-led-pin {
+ rockchip,pins =
+ <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ panel {
+ dsp_rst: dsp-rst {
+ rockchip,pins =
+ <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ tch_int: tch-int {
+ rockchip,pins =
+ <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ tch_rst: tch-rst {
+ rockchip,pins =
+ <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int: pmic-int {
+ rockchip,pins =
+ <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc_3v3>;
+ pmuio2-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&pwm0 {
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&pwm5 {
+ status = "okay";
+};
+
+&pwm6 {
+ status = "okay";
+};
+
+&pwm7 {
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&u2phy {
+ status = "okay";
+};
+
+&u2phy_host {
+ status = "okay";
+};
+
+&u2phy_otg {
+ status = "okay";
+};
+
+&uart1 {
+ /delete-property/ dmas;
+ /delete-property/ dma-names;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_xfer>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-0 = <&uart5_xfer>;
+ status = "okay";
+};
+
+&usb20_otg {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&vopl {
+ status = "disabled";
+};
+
+&vopl_mmu {
+ status = "disabled";
+};
+
+&wdt {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi b/arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi
index 1edfd643b25a..a334ef0629d1 100644
--- a/arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi
@@ -31,7 +31,7 @@
};
vcc3v3_btreg: vcc3v3-btreg {
- compatible = "regulator-gpio";
+ compatible = "regulator-fixed";
enable-active-high;
pinctrl-names = "default";
pinctrl-0 = <&bt_enable_h>;
@@ -39,7 +39,6 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
- states = <3300000 0x0>;
};
vcc3v3_rf_aux_mod: regulator-vcc3v3-rf-aux-mod {
diff --git a/arch/arm64/boot/dts/rockchip/px30-engicam-ctouch2.dtsi b/arch/arm64/boot/dts/rockchip/px30-engicam-ctouch2.dtsi
index 80db778c9684..b60e68faa83a 100644
--- a/arch/arm64/boot/dts/rockchip/px30-engicam-ctouch2.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30-engicam-ctouch2.dtsi
@@ -26,5 +26,5 @@
};
&vcc3v3_btreg {
- enable-gpios = <&gpio1 RK_PC3 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpio1 RK_PC3 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core-edimm2.2.dts b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core-edimm2.2.dts
index 165d09ccb942..5886b802c520 100644
--- a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core-edimm2.2.dts
+++ b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core-edimm2.2.dts
@@ -39,5 +39,5 @@
};
&vcc3v3_btreg {
- enable-gpios = <&gpio1 RK_PC2 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpio1 RK_PC2 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/rockchip/px30-pp1516-ltk050h3146w-a2.dts b/arch/arm64/boot/dts/rockchip/px30-pp1516-ltk050h3146w-a2.dts
new file mode 100644
index 000000000000..b71929bcb33e
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/px30-pp1516-ltk050h3146w-a2.dts
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Cherry Embedded Solutions GmbH
+ */
+
+/dts-v1/;
+#include "px30-pp1516.dtsi"
+
+/ {
+ model = "Theobroma Systems PP-1516 with LTK050H3146W-A2 Display";
+ compatible = "tsd,px30-pp1516-ltk050h3146w-a2", "tsd,px30-pp1516", "rockchip,px30";
+};
+
+&dsi {
+ status = "okay";
+
+ panel@0 {
+ compatible = "leadtek,ltk050h3146w-a2";
+ reg = <0>;
+ backlight = <&backlight>;
+ iovcc-supply = <&vcc_1v8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dsp_rst>;
+ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+ vci-supply = <&vcc_2v8>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+};
+
+&dsi_out {
+ mipi_out_panel: endpoint {
+ remote-endpoint = <&mipi_in_panel>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/px30-pp1516-ltk050h3148w.dts b/arch/arm64/boot/dts/rockchip/px30-pp1516-ltk050h3148w.dts
new file mode 100644
index 000000000000..a9bd5936c701
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/px30-pp1516-ltk050h3148w.dts
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Cherry Embedded Solutions GmbH
+ */
+
+/dts-v1/;
+#include "px30-pp1516.dtsi"
+
+/ {
+ model = "Theobroma Systems PP-1516 with LTK050H3148W Display";
+ compatible = "tsd,px30-pp1516-ltk050h3148w", "tsd,px30-pp1516", "rockchip,px30";
+};
+
+&dsi {
+ status = "okay";
+
+ panel@0 {
+ compatible = "leadtek,ltk050h3148w";
+ reg = <0>;
+ backlight = <&backlight>;
+ iovcc-supply = <&vcc_1v8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dsp_rst>;
+ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+ vci-supply = <&vcc_2v8>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+};
+
+&dsi_out {
+ mipi_out_panel: endpoint {
+ remote-endpoint = <&mipi_in_panel>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/px30-pp1516.dtsi b/arch/arm64/boot/dts/rockchip/px30-pp1516.dtsi
new file mode 100644
index 000000000000..3f9a133d7373
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/px30-pp1516.dtsi
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Cherry Embedded Solutions GmbH
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "px30.dtsi"
+
+/ {
+ aliases {
+ mmc0 = &emmc;
+ };
+
+ chosen {
+ stdout-path = "serial5:115200n8";
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ power-supply = <&vcc5v0_sys>;
+ pwms = <&pwm0 0 25000 0>;
+ };
+
+ beeper {
+ compatible = "pwm-beeper";
+ pwms = <&pwm1 0 1000 0>;
+ };
+
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ pinctrl-0 = <&emmc_reset>;
+ pinctrl-names = "default";
+ reset-gpios = <&gpio1 RK_PB3 GPIO_ACTIVE_HIGH>;
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&debug_led_pin>, <&heartbeat_led_pin>;
+
+ /*
+ * LED2 on the PCB, left of the USB-C connector.
+ * Typically NOT populated.
+ */
+ debug: led-0 {
+ label = "debug";
+ gpios = <&gpio3 RK_PC3 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "none";
+ };
+
+ /*
+ * LED14 on the PCB, left of the PX30 SoC.
+ * Typically NOT populated.
+ */
+ heartbeat: led-1 {
+ label = "heartbeat";
+ gpios = <&gpio0 RK_PA0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ vcc5v0_sys: regulator-vccsys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc_cam_avdd: regulator-vcc-cam-avdd {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_cam_avdd";
+ gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_avdd_en>;
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vcc_2v8>;
+ };
+
+ vcc_cam_dovdd: regulator-vcc-cam-dovdd {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_cam_dovdd";
+ gpio = <&gpio3 RK_PC1 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_dovdd_en>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_1v8>;
+ };
+
+ vcc_cam_dvdd: regulator-vcc-cam-dvdd {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_cam_dvdd";
+ gpio = <&gpio3 RK_PC5 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_dvdd_en>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ vin-supply = <&vcc_3v3>;
+ };
+
+ vcc_lens_afvdd: regulator-vcc-lens-afvdd {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_lens_afvdd";
+ gpio = <&gpio3 RK_PB2 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_afvdd_en>;
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vcc_2v8>;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&csi_dphy {
+ status = "okay";
+};
+
+&display_subsystem {
+ status = "okay";
+};
+
+&dsi_dphy {
+ status = "okay";
+};
+
+&emmc {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ /*
+ * For hs200 support, U-Boot would have to set the RK809 DCDC4
+ * rail to 1.8V from the default of 3.0V. It doesn't do that on
+ * devices out in the field, so disable hs200.
+ * mmc-hs200-1_8v;
+ */
+ mmc-pwrseq = <&emmc_pwrseq>;
+ non-removable;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_emmc>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_log>;
+ status = "okay";
+};
+
+/* I2C0 = PMIC, Touchscreen */
+&i2c0 {
+ status = "okay";
+
+ touchscreen@14 {
+ compatible = "goodix,gt911";
+ reg = <0x14>;
+ AVDD28-supply = <&vcc_2v8>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA1 IRQ_TYPE_LEVEL_LOW>;
+ irq-gpios = <&gpio0 RK_PA1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tch_int &tch_rst>;
+ reset-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
+ VDDIO-supply = <&vcc_3v3>;
+ };
+
+ rk809: pmic@20 {
+ compatible = "rockchip,rk809";
+ reg = <0x20>;
+ #clock-cells = <0>;
+ clock-output-names = "xin32k";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA7 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int>;
+ system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc_3v3>;
+ vcc6-supply = <&vcc_3v3>;
+ vcc7-supply = <&vcc_3v3>;
+ vcc9-supply = <&vcc5v0_sys>;
+
+ regulators {
+ vdd_log: DCDC_REG1 {
+ regulator-name = "vdd_log";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vdd_arm: DCDC_REG2 {
+ regulator-name = "vdd_arm";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_3v0_1v8: vcc_emmc: DCDC_REG4 {
+ regulator-name = "vcc_3v0_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcc_3v3: DCDC_REG5 {
+ regulator-name = "vcc_3v3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_1v8: LDO_REG2 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc_1v0: LDO_REG3 {
+ regulator-name = "vcc_1v0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_2v8: LDO_REG4 {
+ regulator-name = "vcc_2v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <2800000>;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcc_sdio: LDO_REG6 {
+ regulator-name = "vcc_sdio";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc_lcd: LDO_REG7 {
+ regulator-name = "vcc_lcd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_1v8_lcd: LDO_REG8 {
+ regulator-name = "vcc_1v8_lcd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcca_1v8: LDO_REG9 {
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+/* I2C2 = Accelerometer + Camera */
+&i2c2 {
+ /* MEMSIC MXC4005 accelerometer is rated for I2C Fast Mode (<=400KHz) */
+ /* OmniVision OV5675 camera is rated for I2C Fast Mode (<=400KHz) */
+ clock-frequency = <400000>;
+ status = "okay";
+
+ focus: focus@c {
+ compatible = "dongwoon,dw9714";
+ reg = <0xc>;
+ vcc-supply = <&vcc_lens_afvdd>;
+ };
+
+ accel@15 {
+ compatible = "memsic,mxc4005";
+ reg = <0x15>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <RK_PB4 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&accel_int>;
+ };
+
+ camera@36 {
+ compatible = "ovti,ov5675";
+ reg = <0x36>;
+ clocks = <&cru SCLK_CIF_OUT>;
+ assigned-clocks = <&cru SCLK_CIF_OUT>;
+ assigned-clock-rates = <19200000>;
+ avdd-supply = <&vcc_cam_avdd>;
+ dvdd-supply = <&vcc_cam_dvdd>;
+ dovdd-supply = <&vcc_cam_dovdd>;
+ lens-focus = <&focus>;
+ orientation = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cif_clkout_m0 &cam_pwdn>;
+ reset-gpios = <&gpio2 RK_PB0 GPIO_ACTIVE_LOW>;
+ rotation = <0>;
+
+ port {
+ ucam_out: endpoint {
+ remote-endpoint = <&mipi_in_ucam>;
+ data-lanes = <1 2>;
+ link-frequencies = /bits/ 64 <450000000>;
+ };
+ };
+ };
+};
+
+&io_domains {
+ vccio1-supply = <&vcc_sdio>;
+ vccio2-supply = <&vccio_sd>;
+ vccio3-supply = <&vcc_1v8>;
+ vccio4-supply = <&vcc_3v3>;
+ vccio5-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_emmc>;
+ status = "okay";
+};
+
+&isp {
+ status = "okay";
+
+ ports {
+ port@0 {
+ mipi_in_ucam: endpoint@0 {
+ reg = <0>;
+ data-lanes = <1 2>;
+ remote-endpoint = <&ucam_out>;
+ };
+ };
+ };
+};
+
+&isp_mmu {
+ status = "okay";
+};
+
+&pinctrl {
+ accel {
+ accel_int: accel-int {
+ rockchip,pins =
+ <2 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ camera {
+ cam_afvdd_en: cam-afvdd-en {
+ rockchip,pins =
+ <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ cam_avdd_en: cam-avdd-en {
+ rockchip,pins =
+ <3 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ cam_dovdd_en: cam-dovdd-en {
+ rockchip,pins =
+ <3 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ cam_dvdd_en: cam-dvdd-en {
+ rockchip,pins =
+ <3 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ cam_pwdn: cam-pwdn {
+ rockchip,pins =
+ <2 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ emmc {
+ emmc_reset: emmc-reset {
+ rockchip,pins =
+ <1 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ debug_led_pin: debug-led-pin {
+ rockchip,pins =
+ <3 RK_PC3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ heartbeat_led_pin: heartbeat-led-pin {
+ rockchip,pins =
+ <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ panel {
+ dsp_rst: dsp-rst {
+ rockchip,pins =
+ <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ tch_int: tch-int {
+ rockchip,pins =
+ <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ tch_rst: tch-rst {
+ rockchip,pins =
+ <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int: pmic-int {
+ rockchip,pins =
+ <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc_3v3>;
+ pmuio2-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&pwm0 {
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&u2phy {
+ status = "okay";
+};
+
+&u2phy_host {
+ status = "okay";
+};
+
+&u2phy_otg {
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-0 = <&uart5_xfer>;
+ status = "okay";
+};
+
+&usb20_otg {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&wdt {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
index 142244d52706..ab232e5c7ad6 100644
--- a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
@@ -83,9 +83,7 @@
/* On-module TI DP83825I PHY but no connector, enable in carrierboard */
&gmac {
- snps,reset-gpio = <&gpio3 RK_PB0 GPIO_ACTIVE_LOW>;
- snps,reset-active-low;
- snps,reset-delays-us = <0 50000 50000>;
+ phy-handle = <&dp83825>;
phy-supply = <&vcc_3v3>;
clock_in_out = "output";
};
@@ -344,6 +342,18 @@
status = "okay";
};
+&mdio {
+ dp83825: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&phy_rst>;
+ reset-assert-us = <50000>;
+ reset-deassert-us = <50000>;
+ reset-gpios = <&gpio3 RK_PB0 GPIO_ACTIVE_LOW>;
+ };
+};
+
&pinctrl {
emmc {
emmc_reset: emmc-reset {
@@ -351,6 +361,12 @@
};
};
+ ethernet {
+ phy_rst: phy-rst {
+ rockchip,pins = <3 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
leds {
module_led_pin: module-led-pin {
rockchip,pins = <1 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
index 9137dd76e72c..feabdadfa440 100644
--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
@@ -985,6 +985,12 @@
resets = <&cru SRST_GMAC_A>;
reset-names = "stmmaceth";
status = "disabled";
+
+ mdio: mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
};
sdmmc: mmc@ff370000 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index e550b6eeeff3..5367e5fa9232 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -343,6 +343,7 @@
/* maximum speed for Rockchip SPI */
spi-max-frequency = <50000000>;
+ vcc-supply = <&vcc_io>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-evb-ind.dts b/arch/arm64/boot/dts/rockchip/rk3399-evb-ind.dts
new file mode 100644
index 000000000000..70aee1ab904c
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-evb-ind.dts
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ */
+
+/dts-v1/;
+#include "rk3399.dtsi"
+
+/ {
+ model = "Rockchip RK3399 EVB IND LPDDR4 Board";
+ compatible = "rockchip,rk3399-evb-ind", "rockchip,rk3399";
+
+ aliases {
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ vcc5v0_sys: regulator-vcc5v0-sys {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 RK_PD2 GPIO_ACTIVE_HIGH>;
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <5000000>;
+ };
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_b>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_b>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&emmc_phy {
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
+&hdmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_i2c_xfer>, <&hdmi_cec>;
+ status = "okay";
+};
+
+&hdmi_sound {
+ status = "okay";
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+ i2c-scl-falling-time-ns = <4>;
+ i2c-scl-rising-time-ns = <168>;
+ status = "okay";
+
+ vdd_gpu: regulator@10 {
+ compatible = "tcs,tcs4525";
+ reg = <0x10>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vsel2_gpio>;
+ regulator-name = "vdd_gpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1500000>;
+ regulator-min-microvolt = <712500>;
+ regulator-ramp-delay = <1000>;
+ vin-supply = <&vcc5v0_sys>;
+ vsel-gpios = <&gpio1 RK_PB6 GPIO_ACTIVE_HIGH>;
+ fcs,suspend-voltage-selector = <1>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_b: regulator@1c {
+ compatible = "tcs,tcs4525";
+ reg = <0x1c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vsel1_gpio>;
+ regulator-name = "vdd_cpu_b";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1500000>;
+ regulator-min-microvolt = <712500>;
+ regulator-ramp-delay = <1000>;
+ vin-supply = <&vcc5v0_sys>;
+ vsel-gpios = <&gpio1 RK_PC1 GPIO_ACTIVE_HIGH>;
+ fcs,suspend-voltage-selector = <1>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ rk809: pmic@20 {
+ compatible = "rockchip,rk809";
+ reg = <0x20>;
+ #clock-cells = <1>;
+ clock-output-names = "xin32k", "rk808-clkout2";
+ interrupt-parent = <&gpio1>;
+ interrupts = <RK_PC5 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>;
+ wakeup-source;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc_buck5>;
+ vcc6-supply = <&vcc_buck5>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+
+ regulators {
+ vdd_center: DCDC_REG1 {
+ regulator-name = "vdd_center";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-max-microvolt = <1350000>;
+ regulator-min-microvolt = <750000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_l: DCDC_REG2 {
+ regulator-name = "vdd_cpu_l";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-max-microvolt = <1350000>;
+ regulator-min-microvolt = <750000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc3v3_sys: DCDC_REG4 {
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_buck5: DCDC_REG5 {
+ regulator-name = "vcc_buck5";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <2200000>;
+ regulator-min-microvolt = <2200000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2200000>;
+ };
+ };
+
+ vcca_0v9: LDO_REG1 {
+ regulator-name = "vcca_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <900000>;
+ regulator-min-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8: LDO_REG2 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc0v9_soc: LDO_REG3 {
+ regulator-name = "vcc0v9_soc";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <900000>;
+ regulator-min-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vcca_1v8: LDO_REG4 {
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd1v5_dvp: LDO_REG5 {
+ regulator-name = "vdd1v5_dvp";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1500000>;
+ regulator-min-microvolt = <1500000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v5: LDO_REG6 {
+ regulator-name = "vcc_1v5";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1500000>;
+ regulator-min-microvolt = <1500000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v0: LDO_REG7 {
+ regulator-name = "vcc_3v0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3000000>;
+ regulator-min-microvolt = <3000000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd: LDO_REG8 {
+ regulator-name = "vccio_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_sd: LDO_REG9 {
+ regulator-name = "vcc_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc5v0_usb: SWITCH_REG1 {
+ regulator-name = "vcc5v0_usb";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vccio_3v3: SWITCH_REG2 {
+ regulator-name = "vccio_3v3";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&i2s2 {
+ status = "okay";
+};
+
+&io_domains {
+ audio-supply = <&vcca_1v8>;
+ bt656-supply = <&vcc_3v0>;
+ gpio1830-supply = <&vcc_3v0>;
+ sdmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&pinctrl {
+ pmic {
+ pmic_int_l: pmic-int-l {
+ rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ vsel1_gpio: vsel1 {
+ rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ vsel2_gpio: vsel2 {
+ rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmu1830-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ keep-power-in-suspend;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ no-sdio;
+ no-sd;
+ non-removable;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+ no-sdio;
+ no-mmc;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&tcphy0 {
+ status = "okay";
+};
+
+&tcphy1 {
+ status = "okay";
+};
+
+&tsadc {
+ /* tshut mode 0:CRU 1:GPIO */
+ rockchip,hw-tshut-mode = <1>;
+ /* tshut polarity 0:LOW 1:HIGH */
+ rockchip,hw-tshut-polarity = <1>;
+ status = "okay";
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_host {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_host {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usbdrd_dwc3_0 {
+ status = "okay";
+};
+
+&usbdrd3_0 {
+ status = "okay";
+};
+
+&usbdrd3_1 {
+ status = "okay";
+};
+
+&usbdrd_dwc3_1 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&vopl {
+ status = "okay";
+};
+
+&vopl_mmu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts
index 9e4b12ed62cb..be3ae473e562 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts
@@ -36,6 +36,6 @@
compatible = "qcom,ath10k";
reg = <0x00000000 0x0 0x00000000 0x0 0x00000000>,
<0x03000010 0x0 0x00000000 0x0 0x00200000>;
- qcom,ath10k-calibration-variant = "GO_DUMO";
+ qcom,calibration-variant = "GO_DUMO";
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
index f2234dabd664..70979079923c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
@@ -312,14 +312,6 @@
status = "okay";
};
-&usb_host0_ehci {
- status = "okay";
-};
-
-&usb_host0_ohci {
- status = "okay";
-};
-
&vopb {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index e00fbaa8acc1..587e89d7fc5e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -60,16 +60,6 @@
vin-supply = <&vcc5v0_sys>;
};
- vcc5v0_host: regulator-vcc5v0-host {
- compatible = "regulator-fixed";
- gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
- pinctrl-names = "default";
- pinctrl-0 = <&vcc5v0_host_en>;
- regulator-name = "vcc5v0_host";
- regulator-always-on;
- vin-supply = <&vcc5v0_sys>;
- };
-
vcc5v0_sys: regulator-vcc5v0-sys {
compatible = "regulator-fixed";
regulator-name = "vcc5v0_sys";
@@ -527,10 +517,10 @@
};
};
- usb2 {
- vcc5v0_host_en: vcc5v0-host-en {
+ usb {
+ cy3304_reset: cy3304-reset {
rockchip,pins =
- <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+ <4 RK_PA3 RK_FUNC_GPIO &pcfg_output_high>;
};
};
@@ -595,11 +585,6 @@
u2phy1_otg: otg-port {
status = "okay";
};
-
- u2phy1_host: host-port {
- phy-supply = <&vcc5v0_host>;
- status = "okay";
- };
};
&usbdrd3_1 {
@@ -609,12 +594,27 @@
&usbdrd_dwc3_1 {
status = "okay";
dr_mode = "host";
-};
+ pinctrl-names = "default";
+ pinctrl-0 = <&cy3304_reset>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hub_2_0: hub@1 {
+ compatible = "usb4b4,6502", "usb4b4,6506";
+ reg = <1>;
+ peer-hub = <&hub_3_0>;
+ reset-gpios = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>;
+ vdd-supply = <&vcc1v2_phy>;
+ vdd2-supply = <&vcc3v3_sys>;
-&usb_host1_ehci {
- status = "okay";
-};
+ };
-&usb_host1_ohci {
- status = "okay";
+ hub_3_0: hub@2 {
+ compatible = "usb4b4,6500", "usb4b4,6504";
+ reg = <2>;
+ peer-hub = <&hub_2_0>;
+ reset-gpios = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>;
+ vdd-supply = <&vcc1v2_phy>;
+ vdd2-supply = <&vcc3v3_sys>;
+ };
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
index 0393da25cdfb..fc9279627ef6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
@@ -736,6 +736,7 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <30000000>;
+ vcc-supply = <&vcc3v3_sys>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
index 541dca12bf1a..046dbe329017 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
@@ -43,7 +43,7 @@
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
clocks = <&rk808 1>;
- clock-names = "lpo";
+ clock-names = "ext_clock";
pinctrl-names = "default";
pinctrl-0 = <&wifi_enable_h>;
reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
index 51c6aa26d828..a7e4adf87e7a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
@@ -850,6 +850,7 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <10000000>;
+ vcc-supply = <&vcc_3v0>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3528-radxa-e20c.dts b/arch/arm64/boot/dts/rockchip/rk3528-radxa-e20c.dts
index 57a446b5cbd6..9f6ccd9dd1f7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3528-radxa-e20c.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3528-radxa-e20c.dts
@@ -9,6 +9,7 @@
#include <dt-bindings/input/input.h>
#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pwm/pwm.h>
#include "rk3528.dtsi"
/ {
@@ -16,7 +17,11 @@
compatible = "radxa,e20c", "rockchip,rk3528";
aliases {
+ ethernet0 = &gmac1;
+ i2c1 = &i2c1;
mmc0 = &sdhci;
+ mmc1 = &sdmmc;
+ serial0 = &uart0;
};
chosen {
@@ -80,6 +85,26 @@
};
};
+ vdd_0v9: regulator-0v9-vdd {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc_ddr: regulator-1v1-vcc-ddr {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
vcc_1v8: regulator-1v8-vcc {
compatible = "regulator-fixed";
regulator-name = "vcc_1v8";
@@ -108,9 +133,104 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
};
+
+ vccio_sd: regulator-vccio-sd {
+ compatible = "regulator-gpio";
+ gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_vol_ctrl_h>;
+ regulator-name = "vccio_sd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ states = <1800000 0x0>, <3300000 0x1>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vdd_arm: regulator-vdd-arm {
+ compatible = "pwm-regulator";
+ pwms = <&pwm1 0 5000 PWM_POLARITY_INVERTED>;
+ pwm-supply = <&vcc5v0_sys>;
+ regulator-name = "vdd_arm";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <746000>;
+ regulator-max-microvolt = <1201000>;
+ regulator-settling-time-up-us = <250>;
+ };
+
+ vdd_logic: regulator-vdd-logic {
+ compatible = "pwm-regulator";
+ pwms = <&pwm2 0 5000 PWM_POLARITY_INVERTED>;
+ pwm-supply = <&vcc5v0_sys>;
+ regulator-name = "vdd_logic";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <705000>;
+ regulator-max-microvolt = <1006000>;
+ regulator-settling-time-up-us = <250>;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&gmac1 {
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy>;
+ phy-mode = "rgmii-id";
+ phy-supply = <&vcc_3v3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_miim>, <&rgmii_tx_bus2>, <&rgmii_rx_bus2>,
+ <&rgmii_rgmii_clk>, <&rgmii_rgmii_bus>;
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1m0_xfer>;
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "belling,bl24c16a", "atmel,24c16";
+ reg = <0x50>;
+ pagesize = <16>;
+ read-only;
+ vcc-supply = <&vcc_3v3>;
+ };
+};
+
+&mdio1 {
+ rgmii_phy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1_rstn_l>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpios = <&gpio4 RK_PC2 GPIO_ACTIVE_LOW>;
+ };
};
&pinctrl {
+ ethernet {
+ gmac1_rstn_l: gmac1-rstn-l {
+ rockchip,pins = <4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
gpio-keys {
user_key: user-key {
rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
@@ -130,6 +250,24 @@
rockchip,pins = <4 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
+
+ sdmmc {
+ sdmmc_vol_ctrl_h: sdmmc-vol-ctrl-h {
+ rockchip,pins = <4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm1m0_pins>;
+ status = "okay";
+};
+
+&pwm2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm2m0_pins>;
+ status = "okay";
};
&saradc {
@@ -148,6 +286,17 @@
status = "okay";
};
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0m0_xfer>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3528.dtsi b/arch/arm64/boot/dts/rockchip/rk3528.dtsi
index 26c3559d6a6d..d1c72b52aa4e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3528.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3528.dtsi
@@ -24,14 +24,6 @@
gpio2 = &gpio2;
gpio3 = &gpio3;
gpio4 = &gpio4;
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- serial5 = &uart5;
- serial6 = &uart6;
- serial7 = &uart7;
};
cpus {
@@ -103,6 +95,74 @@
};
};
+ pinctrl: pinctrl {
+ compatible = "rockchip,rk3528-pinctrl";
+ rockchip,grf = <&ioc_grf>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gpio0: gpio@ff610000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff610000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO0>, <&cru DBCLK_GPIO0>;
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@ffaf0000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xffaf0000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO1>, <&cru DBCLK_GPIO1>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@ffb00000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xffb00000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO2>, <&cru DBCLK_GPIO2>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@ffb10000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xffb10000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO3>, <&cru DBCLK_GPIO3>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@ffb20000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xffb20000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO4>, <&cru DBCLK_GPIO4>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 128 32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
psci {
compatible = "arm,psci-1.0", "arm,psci-0.2";
method = "smc";
@@ -321,6 +381,16 @@
reg = <0x0 0xff280400 0x0 0x20>;
};
+ vpu_grf: syscon@ff340000 {
+ compatible = "rockchip,rk3528-vpu-grf", "syscon";
+ reg = <0x0 0xff340000 0x0 0x8000>;
+ };
+
+ vo_grf: syscon@ff360000 {
+ compatible = "rockchip,rk3528-vo-grf", "syscon";
+ reg = <0x0 0xff360000 0x0 0x10000>;
+ };
+
cru: clock-controller@ff4a0000 {
compatible = "rockchip,rk3528-cru";
reg = <0x0 0xff4a0000 0x0 0x30000>;
@@ -375,6 +445,7 @@
clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmac 8>, <&dmac 9>;
reg-io-width = <4>;
reg-shift = <2>;
status = "disabled";
@@ -386,6 +457,7 @@
clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmac 10>, <&dmac 11>;
reg-io-width = <4>;
reg-shift = <2>;
status = "disabled";
@@ -397,6 +469,7 @@
clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmac 12>, <&dmac 13>;
reg-io-width = <4>;
reg-shift = <2>;
status = "disabled";
@@ -404,9 +477,11 @@
uart3: serial@ffa08000 {
compatible = "rockchip,rk3528-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xffa08000 0x0 0x100>;
clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>;
clock-names = "baudclk", "apb_pclk";
- reg = <0x0 0xffa08000 0x0 0x100>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmac 14>, <&dmac 15>;
reg-io-width = <4>;
reg-shift = <2>;
status = "disabled";
@@ -418,6 +493,7 @@
clocks = <&cru SCLK_UART4>, <&cru PCLK_UART4>;
clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmac 16>, <&dmac 17>;
reg-io-width = <4>;
reg-shift = <2>;
status = "disabled";
@@ -429,6 +505,7 @@
clocks = <&cru SCLK_UART5>, <&cru PCLK_UART5>;
clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmac 18>, <&dmac 19>;
reg-io-width = <4>;
reg-shift = <2>;
status = "disabled";
@@ -440,6 +517,7 @@
clocks = <&cru SCLK_UART6>, <&cru PCLK_UART6>;
clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmac 20>, <&dmac 21>;
reg-io-width = <4>;
reg-shift = <2>;
status = "disabled";
@@ -451,11 +529,194 @@
clocks = <&cru SCLK_UART7>, <&cru PCLK_UART7>;
clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmac 22>, <&dmac 23>;
reg-io-width = <4>;
reg-shift = <2>;
status = "disabled";
};
+ i2c0: i2c@ffa50000 {
+ compatible = "rockchip,rk3528-i2c",
+ "rockchip,rk3399-i2c";
+ reg = <0x0 0xffa50000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C0>, <&cru PCLK_I2C0>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@ffa58000 {
+ compatible = "rockchip,rk3528-i2c",
+ "rockchip,rk3399-i2c";
+ reg = <0x0 0xffa58000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C1>, <&cru PCLK_I2C1>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@ffa60000 {
+ compatible = "rockchip,rk3528-i2c",
+ "rockchip,rk3399-i2c";
+ reg = <0x0 0xffa60000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C2>, <&cru PCLK_I2C2>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2m1_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@ffa68000 {
+ compatible = "rockchip,rk3528-i2c",
+ "rockchip,rk3399-i2c";
+ reg = <0x0 0xffa68000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C3>, <&cru PCLK_I2C3>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@ffa70000 {
+ compatible = "rockchip,rk3528-i2c",
+ "rockchip,rk3399-i2c";
+ reg = <0x0 0xffa70000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C4>, <&cru PCLK_I2C4>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@ffa78000 {
+ compatible = "rockchip,rk3528-i2c",
+ "rockchip,rk3399-i2c";
+ reg = <0x0 0xffa78000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C5>, <&cru PCLK_I2C5>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@ffa80000 {
+ compatible = "rockchip,rk3528-i2c",
+ "rockchip,rk3399-i2c";
+ reg = <0x0 0xffa80000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C6>, <&cru PCLK_I2C6>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@ffa88000 {
+ compatible = "rockchip,rk3528-i2c",
+ "rockchip,rk3399-i2c";
+ reg = <0x0 0xffa88000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C7>, <&cru PCLK_I2C7>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c7_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ pwm0: pwm@ffa90000 {
+ compatible = "rockchip,rk3528-pwm",
+ "rockchip,rk3328-pwm";
+ reg = <0x0 0xffa90000 0x0 0x10>;
+ clocks = <&cru CLK_PWM0>, <&cru PCLK_PWM0>;
+ clock-names = "pwm", "pclk";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@ffa90010 {
+ compatible = "rockchip,rk3528-pwm",
+ "rockchip,rk3328-pwm";
+ reg = <0x0 0xffa90010 0x0 0x10>;
+ clocks = <&cru CLK_PWM0>, <&cru PCLK_PWM0>;
+ clock-names = "pwm", "pclk";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@ffa90020 {
+ compatible = "rockchip,rk3528-pwm",
+ "rockchip,rk3328-pwm";
+ reg = <0x0 0xffa90020 0x0 0x10>;
+ clocks = <&cru CLK_PWM0>, <&cru PCLK_PWM0>;
+ clock-names = "pwm", "pclk";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@ffa90030 {
+ compatible = "rockchip,rk3528-pwm",
+ "rockchip,rk3328-pwm";
+ reg = <0x0 0xffa90030 0x0 0x10>;
+ clocks = <&cru CLK_PWM0>, <&cru PCLK_PWM0>;
+ clock-names = "pwm", "pclk";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@ffa98000 {
+ compatible = "rockchip,rk3528-pwm",
+ "rockchip,rk3328-pwm";
+ reg = <0x0 0xffa98000 0x0 0x10>;
+ clocks = <&cru CLK_PWM1>, <&cru PCLK_PWM1>;
+ clock-names = "pwm", "pclk";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm5: pwm@ffa98010 {
+ compatible = "rockchip,rk3528-pwm",
+ "rockchip,rk3328-pwm";
+ reg = <0x0 0xffa98010 0x0 0x10>;
+ clocks = <&cru CLK_PWM1>, <&cru PCLK_PWM1>;
+ clock-names = "pwm", "pclk";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm6: pwm@ffa98020 {
+ compatible = "rockchip,rk3528-pwm",
+ "rockchip,rk3328-pwm";
+ reg = <0x0 0xffa98020 0x0 0x10>;
+ clocks = <&cru CLK_PWM1>, <&cru PCLK_PWM1>;
+ clock-names = "pwm", "pclk";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm7: pwm@ffa98030 {
+ compatible = "rockchip,rk3528-pwm",
+ "rockchip,rk3328-pwm";
+ reg = <0x0 0xffa98030 0x0 0x10>;
+ clocks = <&cru CLK_PWM1>, <&cru PCLK_PWM1>;
+ clock-names = "pwm", "pclk";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
saradc: adc@ffae0000 {
compatible = "rockchip,rk3528-saradc";
reg = <0x0 0xffae0000 0x0 0x10000>;
@@ -468,6 +729,111 @@
status = "disabled";
};
+ gmac0: ethernet@ffbd0000 {
+ compatible = "rockchip,rk3528-gmac", "snps,dwmac-4.20a";
+ reg = <0x0 0xffbd0000 0x0 0x10000>;
+ clocks = <&cru CLK_GMAC0_SRC>, <&cru CLK_GMAC0_RMII_50M>,
+ <&cru CLK_GMAC0_RX>, <&cru CLK_GMAC0_TX>,
+ <&cru PCLK_MAC_VO>, <&cru ACLK_MAC_VO>;
+ clock-names = "stmmaceth", "clk_mac_ref",
+ "mac_clk_rx", "mac_clk_tx",
+ "pclk_mac", "aclk_mac";
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_wake_irq";
+ phy-handle = <&rmii0_phy>;
+ phy-mode = "rmii";
+ resets = <&cru SRST_A_MAC_VO>;
+ reset-names = "stmmaceth";
+ rockchip,grf = <&vo_grf>;
+ snps,axi-config = <&gmac0_stmmac_axi_setup>;
+ snps,mixed-burst;
+ snps,mtl-rx-config = <&gmac0_mtl_rx_setup>;
+ snps,mtl-tx-config = <&gmac0_mtl_tx_setup>;
+ snps,tso;
+ status = "disabled";
+
+ mdio0: mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+
+ rmii0_phy: ethernet-phy@2 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x2>;
+ clocks = <&cru CLK_MACPHY>;
+ phy-is-integrated;
+ pinctrl-names = "default";
+ pinctrl-0 = <&fephym0_led_link>,
+ <&fephym0_led_spd>;
+ resets = <&cru SRST_MACPHY>;
+ };
+ };
+
+ gmac0_stmmac_axi_setup: stmmac-axi-config {
+ snps,blen = <0 0 0 0 16 8 4>;
+ snps,rd_osr_lmt = <8>;
+ snps,wr_osr_lmt = <4>;
+ };
+
+ gmac0_mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <1>;
+ queue0 {};
+ };
+
+ gmac0_mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <1>;
+ queue0 {};
+ };
+ };
+
+ gmac1: ethernet@ffbe0000 {
+ compatible = "rockchip,rk3528-gmac", "snps,dwmac-4.20a";
+ reg = <0x0 0xffbe0000 0x0 0x10000>;
+ clocks = <&cru CLK_GMAC1_SRC_VPU>,
+ <&cru CLK_GMAC1_RMII_VPU>,
+ <&cru PCLK_MAC_VPU>,
+ <&cru ACLK_MAC_VPU>;
+ clock-names = "stmmaceth",
+ "clk_mac_ref",
+ "pclk_mac",
+ "aclk_mac";
+ interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_wake_irq";
+ resets = <&cru SRST_A_MAC>;
+ reset-names = "stmmaceth";
+ rockchip,grf = <&vpu_grf>;
+ snps,axi-config = <&gmac1_stmmac_axi_setup>;
+ snps,mixed-burst;
+ snps,mtl-rx-config = <&gmac1_mtl_rx_setup>;
+ snps,mtl-tx-config = <&gmac1_mtl_tx_setup>;
+ snps,tso;
+ status = "disabled";
+
+ mdio1: mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ };
+
+ gmac1_stmmac_axi_setup: stmmac-axi-config {
+ snps,blen = <0 0 0 0 16 8 4>;
+ snps,rd_osr_lmt = <8>;
+ snps,wr_osr_lmt = <4>;
+ };
+
+ gmac1_mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <1>;
+ queue0 {};
+ };
+
+ gmac1_mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <1>;
+ queue0 {};
+ };
+ };
+
sdhci: mmc@ffbf0000 {
compatible = "rockchip,rk3528-dwcmshc",
"rockchip,rk3588-dwcmshc";
@@ -492,72 +858,81 @@
status = "disabled";
};
- pinctrl: pinctrl {
- compatible = "rockchip,rk3528-pinctrl";
- rockchip,grf = <&ioc_grf>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- gpio0: gpio@ff610000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xff610000 0x0 0x200>;
- clocks = <&cru PCLK_GPIO0>, <&cru DBCLK_GPIO0>;
- interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pinctrl 0 0 32>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio1: gpio@ffaf0000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xffaf0000 0x0 0x200>;
- clocks = <&cru PCLK_GPIO1>, <&cru DBCLK_GPIO1>;
- interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pinctrl 0 32 32>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
+ sdio0: mmc@ffc10000 {
+ compatible = "rockchip,rk3528-dw-mshc",
+ "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xffc10000 0x0 0x4000>;
+ clocks = <&cru HCLK_SDIO0>,
+ <&cru CCLK_SRC_SDIO0>,
+ <&cru SCLK_SDIO0_DRV>,
+ <&cru SCLK_SDIO0_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
+ interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+ max-frequency = <200000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdio0_bus4>, <&sdio0_clk>, <&sdio0_cmd>;
+ resets = <&cru SRST_H_SDIO0>;
+ reset-names = "reset";
+ status = "disabled";
+ };
- gpio2: gpio@ffb00000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xffb00000 0x0 0x200>;
- clocks = <&cru PCLK_GPIO2>, <&cru DBCLK_GPIO2>;
- interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pinctrl 0 64 32>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
+ sdio1: mmc@ffc20000 {
+ compatible = "rockchip,rk3528-dw-mshc",
+ "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xffc20000 0x0 0x4000>;
+ clocks = <&cru HCLK_SDIO1>,
+ <&cru CCLK_SRC_SDIO1>,
+ <&cru SCLK_SDIO1_DRV>,
+ <&cru SCLK_SDIO1_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
+ interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ max-frequency = <200000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdio1_bus4>, <&sdio1_clk>, <&sdio1_cmd>;
+ resets = <&cru SRST_H_SDIO1>;
+ reset-names = "reset";
+ status = "disabled";
+ };
- gpio3: gpio@ffb10000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xffb10000 0x0 0x200>;
- clocks = <&cru PCLK_GPIO3>, <&cru DBCLK_GPIO3>;
- interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pinctrl 0 96 32>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
+ sdmmc: mmc@ffc30000 {
+ compatible = "rockchip,rk3528-dw-mshc",
+ "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xffc30000 0x0 0x4000>;
+ clocks = <&cru HCLK_SDMMC0>,
+ <&cru CCLK_SRC_SDMMC0>,
+ <&cru SCLK_SDMMC_DRV>,
+ <&cru SCLK_SDMMC_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ max-frequency = <150000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_bus4>, <&sdmmc_clk>, <&sdmmc_cmd>,
+ <&sdmmc_det>;
+ resets = <&cru SRST_H_SDMMC0>;
+ reset-names = "reset";
+ rockchip,default-sample-phase = <90>;
+ status = "disabled";
+ };
- gpio4: gpio@ffb20000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xffb20000 0x0 0x200>;
- clocks = <&cru PCLK_GPIO4>, <&cru DBCLK_GPIO4>;
- interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pinctrl 0 128 32>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
+ dmac: dma-controller@ffd60000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xffd60000 0x0 0x4000>;
+ clocks = <&cru ACLK_DMAC>;
+ clock-names = "apb_pclk";
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-periph-burst;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3562-evb2-v10.dts b/arch/arm64/boot/dts/rockchip/rk3562-evb2-v10.dts
new file mode 100644
index 000000000000..6a84db154a7d
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3562-evb2-v10.dts
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024-2025 Rockchip Electronics Co., Ltd.
+ *
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "rk3562.dtsi"
+
+/ {
+ model = "Rockchip RK3562 EVB V20 Board";
+ compatible = "rockchip,rk3562-evb2-v10", "rockchip,rk3562";
+
+ chosen: chosen {
+ stdout-path = "serial0:1500000n8";
+ };
+
+ adc_keys: adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc0 1>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-vol-up {
+ linux,code = <KEY_VOLUMEUP>;
+ label = "volume up";
+ press-threshold-microvolt = <17000>;
+ };
+
+ button-vol-down {
+ linux,code = <KEY_VOLUMEDOWN>;
+ label = "volume down";
+ press-threshold-microvolt = <414000>;
+ };
+
+ button-menu {
+ linux,code = <KEY_MENU>;
+ label = "menu";
+ press-threshold-microvolt = <800000>;
+ };
+
+ button-back {
+ linux,code = <KEY_BACK>;
+ label = "back";
+ press-threshold-microvolt = <1200000>;
+ };
+ };
+
+ leds: leds {
+ compatible = "gpio-leds";
+
+ work_led: led-0 {
+ gpios = <&gpio0 RK_PB4 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk809 1>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_enable_h>;
+
+ /*
+ * On the module itself this is one of these (depending
+ * on the actual card populated):
+ * - SDIO_RESET_L_WL_REG_ON
+ * - PDN (power down when low)
+ */
+ post-power-on-delay-ms = <200>;
+ reset-gpios = <&gpio0 RK_PB3 GPIO_ACTIVE_LOW>;
+ };
+
+ vcc12v_dcin: regulator-vcc12v-dcin {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc3v3_pcie20: regulator-vcc3v3-pcie20 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_pcie20";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ enable-active-high;
+ gpios = <&gpio0 RK_PB7 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <5000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_sys: regulator-vcc5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_usb: regulator-vcc5v0-usb {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_usb";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_usb_host: regulator-vcc5v0-usb-host {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_usb_host";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&vcc5v0_usb>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_host_pwren>;
+ };
+
+ vcc5v0_usb_otg: regulator-vcc5v0-usb-otg {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_usb_otg";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&gpio0 RK_PC0 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&vcc5v0_usb>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_otg_pwren>;
+ };
+
+ vcc3v3_clk: regulator-vcc3v3-clk {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_clk";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_sys: regulator-vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+};
+
+&combphy {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ rk809: pmic@20 {
+ compatible = "rockchip,rk809";
+ reg = <0x20>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-names = "default", "pmic-sleep",
+ "pmic-power-off", "pmic-reset";
+ pinctrl-0 = <&pmic_int>;
+ rockchip,system-power-controller;
+ wakeup-source;
+ #clock-cells = <1>;
+ clock-output-names = "rk808-clkout1", "rk808-clkout2";
+
+ vcc1-supply = <&vcc3v3_sys>;
+ vcc2-supply = <&vcc3v3_sys>;
+ vcc3-supply = <&vcc3v3_sys>;
+ vcc4-supply = <&vcc3v3_sys>;
+ vcc5-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc3v3_sys>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc3v3_sys>;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-initial-mode = <0x2>;
+ regulator-name = "vdd_logic";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu: DCDC_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-initial-mode = <0x2>;
+ regulator-name = "vdd_cpu";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-name = "vcc_ddr";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_gpu: DCDC_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-initial-mode = <0x2>;
+ regulator-name = "vdd_gpu";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc2v8_dvp: LDO_REG1 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-name = "vcc2v8_dvp";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v9: LDO_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdda_0v9";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_pmu: LDO_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdda0v9_pmu";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vccio_acodec: LDO_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vccio_acodec";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_sd";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc3v3_pmu";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcca_1v8: LDO_REG7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca_1v8";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pmu: LDO_REG8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca1v8_pmu";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc1v8_dvp: LDO_REG9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc1v8_dvp";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8: DCDC_REG5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3: SWITCH_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_3v3";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_sd: SWITCH_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc3v3_sd";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&pcie2x1 {
+ reset-gpios = <&gpio3 RK_PB0 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie20>;
+ status = "okay";
+};
+
+&pinctrl {
+ sdio-pwrseq {
+ wifi_enable_h: wifi-enable-h {
+ rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ usb_host_pwren: usb-host-pwren {
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usb_otg_pwren: usb-otg-pwren {
+ rockchip,pins = <0 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&saradc0 {
+ vref-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ no-sdio;
+ no-sd;
+ non-removable;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ full-pwr-cycle-in-suspend;
+ status = "okay";
+};
+
+&sdmmc0 {
+ no-sdio;
+ no-mmc;
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc3v3_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&sdmmc1 {
+ no-sd;
+ no-mmc;
+ bus-width = <4>;
+ disable-wp;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_cmd &sdmmc1_clk>;
+ sd-uhs-sdr104;
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3562-pinctrl.dtsi b/arch/arm64/boot/dts/rockchip/rk3562-pinctrl.dtsi
new file mode 100644
index 000000000000..b311448d77a3
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3562-pinctrl.dtsi
@@ -0,0 +1,2352 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ */
+
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "rockchip-pinconf.dtsi"
+
+/*
+ * This file is auto generated by pin2dts tool, please keep these code
+ * by adding changes at end of this file.
+ */
+&pinctrl {
+ cam {
+ /omit-if-no-ref/
+ camm0_clk0_out: camm0-clk0-out {
+ rockchip,pins =
+ /* camm0_clk0_out */
+ <3 RK_PB2 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ camm0_clk1_out: camm0-clk1-out {
+ rockchip,pins =
+ /* camm0_clk1_out */
+ <3 RK_PB3 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ camm1_clk0_out: camm1-clk0-out {
+ rockchip,pins =
+ /* camm1_clk0_out */
+ <4 RK_PB1 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ camm1_clk1_out: camm1-clk1-out {
+ rockchip,pins =
+ /* camm1_clk1_out */
+ <4 RK_PB7 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ cam_clk2_out: cam-clk2-out {
+ rockchip,pins =
+ /* cam_clk2_out */
+ <3 RK_PB4 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ cam_clk3_out: cam-clk3-out {
+ rockchip,pins =
+ /* cam_clk3_out */
+ <3 RK_PB5 2 &pcfg_pull_none>;
+ };
+ };
+
+ can0 {
+ /omit-if-no-ref/
+ can0m0_pins: can0m0-pins {
+ rockchip,pins =
+ /* can0_rx_m0 */
+ <3 RK_PA1 4 &pcfg_pull_none>,
+ /* can0_tx_m0 */
+ <3 RK_PA0 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ can0m1_pins: can0m1-pins {
+ rockchip,pins =
+ /* can0_rx_m1 */
+ <3 RK_PB7 6 &pcfg_pull_none>,
+ /* can0_tx_m1 */
+ <3 RK_PB6 6 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ can0m2_pins: can0m2-pins {
+ rockchip,pins =
+ /* can0_rx_m2 */
+ <0 RK_PC7 2 &pcfg_pull_none>,
+ /* can0_tx_m2 */
+ <0 RK_PC6 2 &pcfg_pull_none>;
+ };
+ };
+
+ can1 {
+ /omit-if-no-ref/
+ can1m0_pins: can1m0-pins {
+ rockchip,pins =
+ /* can1_rx_m0 */
+ <1 RK_PB7 4 &pcfg_pull_none>,
+ /* can1_tx_m0 */
+ <1 RK_PC0 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ can1m1_pins: can1m1-pins {
+ rockchip,pins =
+ /* can1_rx_m1 */
+ <0 RK_PC1 4 &pcfg_pull_none>,
+ /* can1_tx_m1 */
+ <0 RK_PC0 4 &pcfg_pull_none>;
+ };
+ };
+
+ clk {
+ /omit-if-no-ref/
+ clk_32k_in: clk-32k-in {
+ rockchip,pins =
+ /* clk_32k_in */
+ <0 RK_PB0 1 &pcfg_pull_none>;
+ };
+ };
+
+ clk0 {
+ /omit-if-no-ref/
+ clk0_32k_out: clk0-32k-out {
+ rockchip,pins =
+ /* clk0_32k_out */
+ <0 RK_PB0 2 &pcfg_pull_none>;
+ };
+ };
+
+ clk1 {
+ /omit-if-no-ref/
+ clk1_32k_out: clk1-32k-out {
+ rockchip,pins =
+ /* clk1_32k_out */
+ <2 RK_PA1 3 &pcfg_pull_none>;
+ };
+ };
+
+ cpu {
+ /omit-if-no-ref/
+ cpu_pins: cpu-pins {
+ rockchip,pins =
+ /* cpu_avs */
+ <0 RK_PB7 3 &pcfg_pull_none>;
+ };
+ };
+
+ dsm {
+ /omit-if-no-ref/
+ dsm_pins: dsm-pins {
+ rockchip,pins =
+ /* dsm_aud_ln */
+ <1 RK_PB4 5 &pcfg_pull_none>,
+ /* dsm_aud_lp */
+ <1 RK_PB3 5 &pcfg_pull_none>,
+ /* dsm_aud_rn */
+ <1 RK_PB6 6 &pcfg_pull_none>,
+ /* dsm_aud_rp */
+ <1 RK_PB5 6 &pcfg_pull_none>;
+ };
+ };
+
+ emmc {
+ /omit-if-no-ref/
+ emmc_bus8: emmc-bus8 {
+ rockchip,pins =
+ /* emmc_d0 */
+ <1 RK_PA0 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d1 */
+ <1 RK_PA1 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d2 */
+ <1 RK_PA2 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d3 */
+ <1 RK_PA3 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d4 */
+ <1 RK_PA4 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d5 */
+ <1 RK_PA5 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d6 */
+ <1 RK_PA6 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d7 */
+ <1 RK_PA7 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ emmc_clk: emmc-clk {
+ rockchip,pins =
+ /* emmc_clk */
+ <1 RK_PB1 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ emmc_cmd: emmc-cmd {
+ rockchip,pins =
+ /* emmc_cmd */
+ <1 RK_PB0 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ emmc_strb: emmc-strb {
+ rockchip,pins =
+ /* emmc_strb */
+ <1 RK_PB2 1 &pcfg_pull_none>;
+ };
+ };
+
+ eth {
+ /omit-if-no-ref/
+ ethm0_pins: ethm0-pins {
+ rockchip,pins =
+ /* eth_clk_25m_out_m0 */
+ <4 RK_PB1 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ ethm1_pins: ethm1-pins {
+ rockchip,pins =
+ /* eth_clk_25m_out_m1 */
+ <2 RK_PA1 2 &pcfg_pull_none>;
+ };
+ };
+
+ fspi {
+ /omit-if-no-ref/
+ fspi_pins: fspi-pins {
+ rockchip,pins =
+ /* fspi_clk */
+ <1 RK_PB1 2 &pcfg_pull_none>,
+ /* fspi_d0 */
+ <1 RK_PA0 2 &pcfg_pull_none>,
+ /* fspi_d1 */
+ <1 RK_PA1 2 &pcfg_pull_none>,
+ /* fspi_d2 */
+ <1 RK_PA2 2 &pcfg_pull_none>,
+ /* fspi_d3 */
+ <1 RK_PA3 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ fspi_csn0: fspi-csn0 {
+ rockchip,pins =
+ /* fspi_csn0 */
+ <1 RK_PB0 2 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ fspi_csn1: fspi-csn1 {
+ rockchip,pins =
+ /* fspi_csn1 */
+ <1 RK_PB2 2 &pcfg_pull_none>;
+ };
+ };
+
+ gpu {
+ /omit-if-no-ref/
+ gpu_pins: gpu-pins {
+ rockchip,pins =
+ /* gpu_avs */
+ <0 RK_PC0 3 &pcfg_pull_none>;
+ };
+ };
+
+ i2c0 {
+ /omit-if-no-ref/
+ i2c0_xfer: i2c0-xfer {
+ rockchip,pins =
+ /* i2c0_scl */
+ <0 RK_PB1 1 &pcfg_pull_none_smt>,
+ /* i2c0_sda */
+ <0 RK_PB2 1 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c1 {
+ /omit-if-no-ref/
+ i2c1m0_xfer: i2c1m0-xfer {
+ rockchip,pins =
+ /* i2c1_scl_m0 */
+ <0 RK_PB3 1 &pcfg_pull_none_smt>,
+ /* i2c1_sda_m0 */
+ <0 RK_PB4 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c1m1_xfer: i2c1m1-xfer {
+ rockchip,pins =
+ /* i2c1_scl_m1 */
+ <4 RK_PB4 5 &pcfg_pull_none_smt>,
+ /* i2c1_sda_m1 */
+ <4 RK_PB5 5 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c2 {
+ /omit-if-no-ref/
+ i2c2m0_xfer: i2c2m0-xfer {
+ rockchip,pins =
+ /* i2c2_scl_m0 */
+ <0 RK_PB5 1 &pcfg_pull_none_smt>,
+ /* i2c2_sda_m0 */
+ <0 RK_PB6 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c2m1_xfer: i2c2m1-xfer {
+ rockchip,pins =
+ /* i2c2_scl_m1 */
+ <3 RK_PD2 5 &pcfg_pull_none_smt>,
+ /* i2c2_sda_m1 */
+ <3 RK_PD3 5 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c3 {
+ /omit-if-no-ref/
+ i2c3m0_xfer: i2c3m0-xfer {
+ rockchip,pins =
+ /* i2c3_scl_m0 */
+ <3 RK_PA0 1 &pcfg_pull_none_smt>,
+ /* i2c3_sda_m0 */
+ <3 RK_PA1 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c3m1_xfer: i2c3m1-xfer {
+ rockchip,pins =
+ /* i2c3_scl_m1 */
+ <4 RK_PA5 5 &pcfg_pull_none_smt>,
+ /* i2c3_sda_m1 */
+ <4 RK_PA6 5 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c4 {
+ /omit-if-no-ref/
+ i2c4m0_xfer: i2c4m0-xfer {
+ rockchip,pins =
+ /* i2c4_scl_m0 */
+ <3 RK_PB6 5 &pcfg_pull_none_smt>,
+ /* i2c4_sda_m0 */
+ <3 RK_PB7 5 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c4m1_xfer: i2c4m1-xfer {
+ rockchip,pins =
+ /* i2c4_scl_m1 */
+ <0 RK_PA5 2 &pcfg_pull_none_smt>,
+ /* i2c4_sda_m1 */
+ <0 RK_PA4 2 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c5 {
+ /omit-if-no-ref/
+ i2c5m0_xfer: i2c5m0-xfer {
+ rockchip,pins =
+ /* i2c5_scl_m0 */
+ <3 RK_PC2 1 &pcfg_pull_none_smt>,
+ /* i2c5_sda_m0 */
+ <3 RK_PC3 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c5m1_xfer: i2c5m1-xfer {
+ rockchip,pins =
+ /* i2c5_scl_m1 */
+ <1 RK_PC7 4 &pcfg_pull_none_smt>,
+ /* i2c5_sda_m1 */
+ <1 RK_PD0 4 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2s0 {
+ /omit-if-no-ref/
+ i2s0m0_lrck: i2s0m0-lrck {
+ rockchip,pins =
+ /* i2s0_lrck_m0 */
+ <3 RK_PA4 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m0_mclk: i2s0m0-mclk {
+ rockchip,pins =
+ /* i2s0_mclk_m0 */
+ <3 RK_PA2 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m0_sclk: i2s0m0-sclk {
+ rockchip,pins =
+ /* i2s0_sclk_m0 */
+ <3 RK_PA3 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m0_sdi0: i2s0m0-sdi0 {
+ rockchip,pins =
+ /* i2s0_sdi0_m0 */
+ <3 RK_PB1 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m0_sdi1: i2s0m0-sdi1 {
+ rockchip,pins =
+ /* i2s0_sdi1_m0 */
+ <3 RK_PB0 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m0_sdi2: i2s0m0-sdi2 {
+ rockchip,pins =
+ /* i2s0_sdi2_m0 */
+ <3 RK_PA7 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m0_sdi3: i2s0m0-sdi3 {
+ rockchip,pins =
+ /* i2s0_sdi3_m0 */
+ <3 RK_PA6 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m0_sdo0: i2s0m0-sdo0 {
+ rockchip,pins =
+ /* i2s0_sdo0_m0 */
+ <3 RK_PA5 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m0_sdo1: i2s0m0-sdo1 {
+ rockchip,pins =
+ /* i2s0_sdo1_m0 */
+ <3 RK_PA6 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m0_sdo2: i2s0m0-sdo2 {
+ rockchip,pins =
+ /* i2s0_sdo2_m0 */
+ <3 RK_PA7 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m0_sdo3: i2s0m0-sdo3 {
+ rockchip,pins =
+ /* i2s0_sdo3_m0 */
+ <3 RK_PB0 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_lrck: i2s0m1-lrck {
+ rockchip,pins =
+ /* i2s0_lrck_m1 */
+ <1 RK_PC4 3 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_mclk: i2s0m1-mclk {
+ rockchip,pins =
+ /* i2s0_mclk_m1 */
+ <1 RK_PC6 3 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_sclk: i2s0m1-sclk {
+ rockchip,pins =
+ /* i2s0_sclk_m1 */
+ <1 RK_PC5 3 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_sdi0: i2s0m1-sdi0 {
+ rockchip,pins =
+ /* i2s0_sdi0_m1 */
+ <1 RK_PC1 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_sdi1: i2s0m1-sdi1 {
+ rockchip,pins =
+ /* i2s0_sdi1_m1 */
+ <1 RK_PC2 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_sdi2: i2s0m1-sdi2 {
+ rockchip,pins =
+ /* i2s0_sdi2_m1 */
+ <1 RK_PD3 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_sdi3: i2s0m1-sdi3 {
+ rockchip,pins =
+ /* i2s0_sdi3_m1 */
+ <1 RK_PD4 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_sdo0: i2s0m1-sdo0 {
+ rockchip,pins =
+ /* i2s0_sdo0_m1 */
+ <1 RK_PC3 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_sdo1: i2s0m1-sdo1 {
+ rockchip,pins =
+ /* i2s0_sdo1_m1 */
+ <1 RK_PD1 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_sdo2: i2s0m1-sdo2 {
+ rockchip,pins =
+ /* i2s0_sdo2_m1 */
+ <1 RK_PD2 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_sdo3: i2s0m1-sdo3 {
+ rockchip,pins =
+ /* i2s0_sdo3_m1 */
+ <2 RK_PA1 5 &pcfg_pull_none>;
+ };
+ };
+
+ i2s1 {
+ /omit-if-no-ref/
+ i2s1m0_lrck: i2s1m0-lrck {
+ rockchip,pins =
+ /* i2s1_lrck_m0 */
+ <3 RK_PC6 2 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_mclk: i2s1m0-mclk {
+ rockchip,pins =
+ /* i2s1_mclk_m0 */
+ <3 RK_PC4 2 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sclk: i2s1m0-sclk {
+ rockchip,pins =
+ /* i2s1_sclk_m0 */
+ <3 RK_PC5 2 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdi0: i2s1m0-sdi0 {
+ rockchip,pins =
+ /* i2s1_sdi0_m0 */
+ <3 RK_PD0 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdi1: i2s1m0-sdi1 {
+ rockchip,pins =
+ /* i2s1_sdi1_m0 */
+ <3 RK_PD1 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdi2: i2s1m0-sdi2 {
+ rockchip,pins =
+ /* i2s1_sdi2_m0 */
+ <3 RK_PD2 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdi3: i2s1m0-sdi3 {
+ rockchip,pins =
+ /* i2s1_sdi3_m0 */
+ <3 RK_PD3 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdo0: i2s1m0-sdo0 {
+ rockchip,pins =
+ /* i2s1_sdo0_m0 */
+ <3 RK_PC7 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdo1: i2s1m0-sdo1 {
+ rockchip,pins =
+ /* i2s1_sdo1_m0 */
+ <4 RK_PB4 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdo2: i2s1m0-sdo2 {
+ rockchip,pins =
+ /* i2s1_sdo2_m0 */
+ <4 RK_PB5 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdo3: i2s1m0-sdo3 {
+ rockchip,pins =
+ /* i2s1_sdo3_m0 */
+ <4 RK_PB6 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_lrck: i2s1m1-lrck {
+ rockchip,pins =
+ /* i2s1_lrck_m1 */
+ <3 RK_PB4 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_mclk: i2s1m1-mclk {
+ rockchip,pins =
+ /* i2s1_mclk_m1 */
+ <3 RK_PB2 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sclk: i2s1m1-sclk {
+ rockchip,pins =
+ /* i2s1_sclk_m1 */
+ <3 RK_PB3 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdi0: i2s1m1-sdi0 {
+ rockchip,pins =
+ /* i2s1_sdi0_m1 */
+ <3 RK_PC1 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdi1: i2s1m1-sdi1 {
+ rockchip,pins =
+ /* i2s1_sdi1_m1 */
+ <3 RK_PC0 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdi2: i2s1m1-sdi2 {
+ rockchip,pins =
+ /* i2s1_sdi2_m1 */
+ <3 RK_PB7 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdi3: i2s1m1-sdi3 {
+ rockchip,pins =
+ /* i2s1_sdi3_m1 */
+ <3 RK_PB6 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdo0: i2s1m1-sdo0 {
+ rockchip,pins =
+ /* i2s1_sdo0_m1 */
+ <3 RK_PB5 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdo1: i2s1m1-sdo1 {
+ rockchip,pins =
+ /* i2s1_sdo1_m1 */
+ <3 RK_PB6 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdo2: i2s1m1-sdo2 {
+ rockchip,pins =
+ /* i2s1_sdo2_m1 */
+ <3 RK_PB7 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdo3: i2s1m1-sdo3 {
+ rockchip,pins =
+ /* i2s1_sdo3_m1 */
+ <3 RK_PC0 1 &pcfg_pull_none>;
+ };
+ };
+
+ i2s2 {
+ /omit-if-no-ref/
+ i2s2m0_lrck: i2s2m0-lrck {
+ rockchip,pins =
+ /* i2s2_lrck_m0 */
+ <1 RK_PD6 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m0_mclk: i2s2m0-mclk {
+ rockchip,pins =
+ /* i2s2_mclk_m0 */
+ <2 RK_PA1 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m0_sclk: i2s2m0-sclk {
+ rockchip,pins =
+ /* i2s2_sclk_m0 */
+ <1 RK_PD5 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m0_sdi: i2s2m0-sdi {
+ rockchip,pins =
+ /* i2s2_sdi_m0 */
+ <2 RK_PA0 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m0_sdo: i2s2m0-sdo {
+ rockchip,pins =
+ /* i2s2_sdo_m0 */
+ <1 RK_PD7 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m1_lrck: i2s2m1-lrck {
+ rockchip,pins =
+ /* i2s2_lrck_m1 */
+ <4 RK_PA1 3 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m1_mclk: i2s2m1-mclk {
+ rockchip,pins =
+ /* i2s2_mclk_m1 */
+ <3 RK_PD6 3 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m1_sclk: i2s2m1-sclk {
+ rockchip,pins =
+ /* i2s2_sclk_m1 */
+ <4 RK_PB1 4 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m1_sdi: i2s2m1-sdi {
+ rockchip,pins =
+ /* i2s2_sdi_m1 */
+ <3 RK_PD4 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m1_sdo: i2s2m1-sdo {
+ rockchip,pins =
+ /* i2s2_sdo_m1 */
+ <3 RK_PD5 4 &pcfg_pull_none>;
+ };
+ };
+
+ isp {
+ /omit-if-no-ref/
+ isp_pins: isp-pins {
+ rockchip,pins =
+ /* isp_flash_trigin */
+ <3 RK_PC1 2 &pcfg_pull_none>,
+ /* isp_flash_trigout */
+ <3 RK_PC3 2 &pcfg_pull_none>,
+ /* isp_prelight_trigout */
+ <3 RK_PC2 2 &pcfg_pull_none>;
+ };
+ };
+
+ jtag {
+ /omit-if-no-ref/
+ jtagm0_pins: jtagm0-pins {
+ rockchip,pins =
+ /* jtag_cpu_mcu_tck_m0 */
+ <0 RK_PD1 2 &pcfg_pull_none>,
+ /* jtag_cpu_mcu_tms_m0 */
+ <0 RK_PD0 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ jtagm1_pins: jtagm1-pins {
+ rockchip,pins =
+ /* jtag_cpu_mcu_tck_m1 */
+ <1 RK_PB5 2 &pcfg_pull_none>,
+ /* jtag_cpu_mcu_tms_m1 */
+ <1 RK_PB6 2 &pcfg_pull_none>;
+ };
+ };
+
+ npu {
+ /omit-if-no-ref/
+ npu_pins: npu-pins {
+ rockchip,pins =
+ /* npu_avs */
+ <0 RK_PC1 3 &pcfg_pull_none>;
+ };
+ };
+
+ pcie20 {
+ /omit-if-no-ref/
+ pcie20m0_pins: pcie20m0-pins {
+ rockchip,pins =
+ /* pcie20_clkreqn_m0 */
+ <0 RK_PA6 1 &pcfg_pull_none>,
+ /* pcie20_perstn_m0 */
+ <0 RK_PB5 2 &pcfg_pull_none>,
+ /* pcie20_waken_m0 */
+ <0 RK_PB6 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pcie20m1_pins: pcie20m1-pins {
+ rockchip,pins =
+ /* pcie20_clkreqn_m1 */
+ <3 RK_PA6 4 &pcfg_pull_none>,
+ /* pcie20_perstn_m1 */
+ <3 RK_PB0 4 &pcfg_pull_none>,
+ /* pcie20_waken_m1 */
+ <3 RK_PA7 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pcie20_buttonrstn: pcie20-buttonrstn {
+ rockchip,pins =
+ /* pcie20_buttonrstn */
+ <0 RK_PB0 3 &pcfg_pull_none>;
+ };
+ };
+
+ pdm {
+ /omit-if-no-ref/
+ pdmm0_clk0: pdmm0-clk0 {
+ rockchip,pins =
+ /* pdm_clk0_m0 */
+ <3 RK_PA6 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm0_clk1: pdmm0-clk1 {
+ rockchip,pins =
+ /* pdm_clk1_m0 */
+ <3 RK_PA2 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm0_sdi0: pdmm0-sdi0 {
+ rockchip,pins =
+ /* pdm_sdi0_m0 */
+ <3 RK_PB1 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm0_sdi1: pdmm0-sdi1 {
+ rockchip,pins =
+ /* pdm_sdi1_m0 */
+ <3 RK_PB0 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm0_sdi2: pdmm0-sdi2 {
+ rockchip,pins =
+ /* pdm_sdi2_m0 */
+ <3 RK_PA7 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm0_sdi3: pdmm0-sdi3 {
+ rockchip,pins =
+ /* pdm_sdi3_m0 */
+ <3 RK_PA0 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm1_clk0: pdmm1-clk0 {
+ rockchip,pins =
+ /* pdm_clk0_m1 */
+ <4 RK_PB7 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm1_clk1: pdmm1-clk1 {
+ rockchip,pins =
+ /* pdm_clk1_m1 */
+ <4 RK_PB1 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm1_sdi0: pdmm1-sdi0 {
+ rockchip,pins =
+ /* pdm_sdi0_m1 */
+ <4 RK_PA7 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm1_sdi1: pdmm1-sdi1 {
+ rockchip,pins =
+ /* pdm_sdi1_m1 */
+ <4 RK_PB0 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm1_sdi2: pdmm1-sdi2 {
+ rockchip,pins =
+ /* pdm_sdi2_m1 */
+ <4 RK_PA5 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm1_sdi3: pdmm1-sdi3 {
+ rockchip,pins =
+ /* pdm_sdi3_m1 */
+ <4 RK_PA6 4 &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ /omit-if-no-ref/
+ pmic_int: pmic-int {
+ rockchip,pins =
+ <0 RK_PA3 0 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ soc_slppin_gpio: soc-slppin-gpio {
+ rockchip,pins =
+ <0 RK_PA2 0 &pcfg_output_low>;
+ };
+
+ /omit-if-no-ref/
+ soc_slppin_slp: soc-slppin-slp {
+ rockchip,pins =
+ <0 RK_PA2 1 &pcfg_pull_none>;
+ };
+ };
+
+ pmu {
+ /omit-if-no-ref/
+ pmu_pins: pmu-pins {
+ rockchip,pins =
+ /* pmu_debug */
+ <0 RK_PA5 3 &pcfg_pull_none>;
+ };
+ };
+
+ pwm0 {
+ /omit-if-no-ref/
+ pwm0m0_pins: pwm0m0-pins {
+ rockchip,pins =
+ /* pwm0_m0 */
+ <0 RK_PC3 2 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm0m1_pins: pwm0m1-pins {
+ rockchip,pins =
+ /* pwm0_m1 */
+ <1 RK_PC5 4 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm1 {
+ /omit-if-no-ref/
+ pwm1m0_pins: pwm1m0-pins {
+ rockchip,pins =
+ /* pwm1_m0 */
+ <0 RK_PC4 2 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm1m1_pins: pwm1m1-pins {
+ rockchip,pins =
+ /* pwm1_m1 */
+ <1 RK_PC6 4 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm2 {
+ /omit-if-no-ref/
+ pwm2m0_pins: pwm2m0-pins {
+ rockchip,pins =
+ /* pwm2_m0 */
+ <0 RK_PC5 2 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm2m1_pins: pwm2m1-pins {
+ rockchip,pins =
+ /* pwm2_m1 */
+ <1 RK_PC7 3 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm3 {
+ /omit-if-no-ref/
+ pwm3m0_pins: pwm3m0-pins {
+ rockchip,pins =
+ /* pwm3_m0 */
+ <0 RK_PA7 1 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm3m1_pins: pwm3m1-pins {
+ rockchip,pins =
+ /* pwm3_m1 */
+ <1 RK_PD0 3 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm4 {
+ /omit-if-no-ref/
+ pwm4m0_pins: pwm4m0-pins {
+ rockchip,pins =
+ /* pwm4_m0 */
+ <0 RK_PB7 2 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm4m1_pins: pwm4m1-pins {
+ rockchip,pins =
+ /* pwm4_m1 */
+ <1 RK_PD1 4 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm5 {
+ /omit-if-no-ref/
+ pwm5m0_pins: pwm5m0-pins {
+ rockchip,pins =
+ /* pwm5_m0 */
+ <0 RK_PC2 2 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm5m1_pins: pwm5m1-pins {
+ rockchip,pins =
+ /* pwm5_m1 */
+ <1 RK_PD2 4 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm6 {
+ /omit-if-no-ref/
+ pwm6m0_pins: pwm6m0-pins {
+ rockchip,pins =
+ /* pwm6_m0 */
+ <0 RK_PC1 2 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm6m1_pins: pwm6m1-pins {
+ rockchip,pins =
+ /* pwm6_m1 */
+ <1 RK_PD3 4 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm7 {
+ /omit-if-no-ref/
+ pwm7m0_pins: pwm7m0-pins {
+ rockchip,pins =
+ /* pwm7_m0 */
+ <0 RK_PC0 2 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm7m1_pins: pwm7m1-pins {
+ rockchip,pins =
+ /* pwm7_m1 */
+ <1 RK_PD4 4 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm8 {
+ /omit-if-no-ref/
+ pwm8m0_pins: pwm8m0-pins {
+ rockchip,pins =
+ /* pwm8_m0 */
+ <3 RK_PA4 2 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm8m1_pins: pwm8m1-pins {
+ rockchip,pins =
+ /* pwm8_m1 */
+ <1 RK_PC1 4 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm9 {
+ /omit-if-no-ref/
+ pwm9m0_pins: pwm9m0-pins {
+ rockchip,pins =
+ /* pwm9_m0 */
+ <3 RK_PA5 2 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm9m1_pins: pwm9m1-pins {
+ rockchip,pins =
+ /* pwm9_m1 */
+ <1 RK_PC2 4 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm10 {
+ /omit-if-no-ref/
+ pwm10m0_pins: pwm10m0-pins {
+ rockchip,pins =
+ /* pwm10_m0 */
+ <1 RK_PB5 5 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm10m1_pins: pwm10m1-pins {
+ rockchip,pins =
+ /* pwm10_m1 */
+ <1 RK_PC3 4 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm11 {
+ /omit-if-no-ref/
+ pwm11m0_pins: pwm11m0-pins {
+ rockchip,pins =
+ /* pwm11_m0 */
+ <1 RK_PB6 5 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm11m1_pins: pwm11m1-pins {
+ rockchip,pins =
+ /* pwm11_m1 */
+ <1 RK_PC4 4 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm12 {
+ /omit-if-no-ref/
+ pwm12m0_pins: pwm12m0-pins {
+ rockchip,pins =
+ /* pwm12_m0 */
+ <4 RK_PA1 4 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm12m1_pins: pwm12m1-pins {
+ rockchip,pins =
+ /* pwm12_m1 */
+ <3 RK_PB4 5 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm13 {
+ /omit-if-no-ref/
+ pwm13m0_pins: pwm13m0-pins {
+ rockchip,pins =
+ /* pwm13_m0 */
+ <4 RK_PA4 3 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm13m1_pins: pwm13m1-pins {
+ rockchip,pins =
+ /* pwm13_m1 */
+ <3 RK_PB5 5 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm14 {
+ /omit-if-no-ref/
+ pwm14m0_pins: pwm14m0-pins {
+ rockchip,pins =
+ /* pwm14_m0 */
+ <3 RK_PC5 4 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm14m1_pins: pwm14m1-pins {
+ rockchip,pins =
+ /* pwm14_m1 */
+ <1 RK_PD7 5 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwm15 {
+ /omit-if-no-ref/
+ pwm15m0_pins: pwm15m0-pins {
+ rockchip,pins =
+ /* pwm15_m0 */
+ <3 RK_PC6 4 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ pwm15m1_pins: pwm15m1-pins {
+ rockchip,pins =
+ /* pwm15_m1 */
+ <2 RK_PA0 5 &pcfg_pull_none_drv_level_1>;
+ };
+ };
+
+ pwr {
+ /omit-if-no-ref/
+ pwr_pins: pwr-pins {
+ rockchip,pins =
+ /* pwr_ctrl0 */
+ <0 RK_PA2 1 &pcfg_pull_none>,
+ /* pwr_ctrl1 */
+ <0 RK_PA3 1 &pcfg_pull_none>;
+ };
+ };
+
+ ref {
+ /omit-if-no-ref/
+ ref_pins: ref-pins {
+ rockchip,pins =
+ /* ref_clk_out */
+ <0 RK_PA0 1 &pcfg_pull_none>;
+ };
+ };
+
+ rgmii {
+ /omit-if-no-ref/
+ rgmiim0_miim: rgmiim0-miim {
+ rockchip,pins =
+ /* rgmii_mdc_m0 */
+ <4 RK_PB2 2 &pcfg_pull_none>,
+ /* rgmii_mdio_m0 */
+ <4 RK_PB3 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmiim0_rx_er: rgmiim0-rx_er {
+ rockchip,pins =
+ /* rgmii_rxer_m0 */
+ <4 RK_PB0 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmiim0_rx_bus2: rgmiim0-rx_bus2 {
+ rockchip,pins =
+ /* rgmii_rxd0_m0 */
+ <4 RK_PA5 2 &pcfg_pull_none>,
+ /* rgmii_rxd1_m0 */
+ <4 RK_PA6 2 &pcfg_pull_none>,
+ /* rgmii_rxdv_m0 */
+ <4 RK_PA7 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmiim0_tx_bus2: rgmiim0-tx_bus2 {
+ rockchip,pins =
+ /* rgmii_txd0_m0 */
+ <4 RK_PA2 2 &pcfg_pull_none>,
+ /* rgmii_txd1_m0 */
+ <4 RK_PA3 2 &pcfg_pull_none>,
+ /* rgmii_txen_m0 */
+ <4 RK_PA4 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmiim0_rgmii_clk: rgmiim0-rgmii_clk {
+ rockchip,pins =
+ /* rgmii_rxclk_m0 */
+ <4 RK_PA1 2 &pcfg_pull_none>,
+ /* rgmii_txclk_m0 */
+ <3 RK_PD6 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmiim0_rgmii_bus: rgmiim0-rgmii_bus {
+ rockchip,pins =
+ /* rgmii_rxd2_m0 */
+ <3 RK_PD7 2 &pcfg_pull_none>,
+ /* rgmii_rxd3_m0 */
+ <4 RK_PA0 2 &pcfg_pull_none>,
+ /* rgmii_txd2_m0 */
+ <3 RK_PD4 2 &pcfg_pull_none>,
+ /* rgmii_txd3_m0 */
+ <3 RK_PD5 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmiim0_clk: rgmiim0-clk {
+ rockchip,pins =
+ /* rgmiim0_clk */
+ <4 RK_PB7 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmiim1_miim: rgmiim1-miim {
+ rockchip,pins =
+ /* rgmii_mdc_m1 */
+ <1 RK_PC7 2 &pcfg_pull_none>,
+ /* rgmii_mdio_m1 */
+ <1 RK_PD0 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmiim1_rx_er: rgmiim1-rx_er {
+ rockchip,pins =
+ /* rgmii_rxer_m1 */
+ <2 RK_PA0 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmiim1_rx_bus2: rgmiim1-rx_bus2 {
+ rockchip,pins =
+ /* rgmii_rxd0_m1 */
+ <1 RK_PD4 2 &pcfg_pull_none>,
+ /* rgmii_rxd1_m1 */
+ <1 RK_PD7 2 &pcfg_pull_none>,
+ /* rgmii_rxdv_m1 */
+ <1 RK_PD6 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmiim1_tx_bus2: rgmiim1-tx_bus2 {
+ rockchip,pins =
+ /* rgmii_txd0_m1 */
+ <1 RK_PD1 2 &pcfg_pull_none>,
+ /* rgmii_txd1_m1 */
+ <1 RK_PD2 2 &pcfg_pull_none>,
+ /* rgmii_txen_m1 */
+ <1 RK_PD3 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmiim1_rgmii_clk: rgmiim1-rgmii_clk {
+ rockchip,pins =
+ /* rgmii_rxclk_m1 */
+ <1 RK_PC6 2 &pcfg_pull_none>,
+ /* rgmii_txclk_m1 */
+ <1 RK_PC3 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmiim1_rgmii_bus: rgmiim1-rgmii_bus {
+ rockchip,pins =
+ /* rgmii_rxd2_m1 */
+ <1 RK_PC4 2 &pcfg_pull_none>,
+ /* rgmii_rxd3_m1 */
+ <1 RK_PC5 2 &pcfg_pull_none>,
+ /* rgmii_txd2_m1 */
+ <1 RK_PC1 2 &pcfg_pull_none>,
+ /* rgmii_txd3_m1 */
+ <1 RK_PC2 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmiim1_clk: rgmiim1-clk {
+ rockchip,pins =
+ /* rgmiim1_clk */
+ <1 RK_PD5 2 &pcfg_pull_none>;
+ };
+ };
+
+ rmii {
+ /omit-if-no-ref/
+ rmii_pins: rmii-pins {
+ rockchip,pins =
+ /* rmii_clk */
+ <1 RK_PD5 5 &pcfg_pull_none>,
+ /* rmii_mdc */
+ <1 RK_PC7 5 &pcfg_pull_none>,
+ /* rmii_mdio */
+ <1 RK_PD0 5 &pcfg_pull_none>,
+ /* rmii_rxd0 */
+ <1 RK_PD4 5 &pcfg_pull_none>,
+ /* rmii_rxd1 */
+ <1 RK_PD7 6 &pcfg_pull_none>,
+ /* rmii_rxdv_crs */
+ <1 RK_PD6 5 &pcfg_pull_none>,
+ /* rmii_rxer */
+ <2 RK_PA0 6 &pcfg_pull_none>,
+ /* rmii_txd0 */
+ <1 RK_PD1 5 &pcfg_pull_none>,
+ /* rmii_txd1 */
+ <1 RK_PD2 5 &pcfg_pull_none>,
+ /* rmii_txen */
+ <1 RK_PD3 5 &pcfg_pull_none>;
+ };
+ };
+
+ sdmmc0 {
+ /omit-if-no-ref/
+ sdmmc0_bus4: sdmmc0-bus4 {
+ rockchip,pins =
+ /* sdmmc0_d0 */
+ <1 RK_PB3 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc0_d1 */
+ <1 RK_PB4 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc0_d2 */
+ <1 RK_PB5 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc0_d3 */
+ <1 RK_PB6 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc0_clk: sdmmc0-clk {
+ rockchip,pins =
+ /* sdmmc0_clk */
+ <1 RK_PC0 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc0_cmd: sdmmc0-cmd {
+ rockchip,pins =
+ /* sdmmc0_cmd */
+ <1 RK_PB7 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc0_det: sdmmc0-det {
+ rockchip,pins =
+ /* sdmmc0_detn */
+ <0 RK_PA4 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc0_pwren: sdmmc0-pwren {
+ rockchip,pins =
+ /* sdmmc0_pwren */
+ <0 RK_PA5 1 &pcfg_pull_none>;
+ };
+ };
+
+ sdmmc1 {
+ /omit-if-no-ref/
+ sdmmc1_bus4: sdmmc1-bus4 {
+ rockchip,pins =
+ /* sdmmc1_d0 */
+ <1 RK_PC1 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc1_d1 */
+ <1 RK_PC2 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc1_d2 */
+ <1 RK_PC3 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc1_d3 */
+ <1 RK_PC4 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc1_clk: sdmmc1-clk {
+ rockchip,pins =
+ /* sdmmc1_clk */
+ <1 RK_PC6 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc1_cmd: sdmmc1-cmd {
+ rockchip,pins =
+ /* sdmmc1_cmd */
+ <1 RK_PC5 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc1_det: sdmmc1-det {
+ rockchip,pins =
+ /* sdmmc1_detn */
+ <1 RK_PD0 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc1_pwren: sdmmc1-pwren {
+ rockchip,pins =
+ /* sdmmc1_pwren */
+ <1 RK_PC7 1 &pcfg_pull_none>;
+ };
+ };
+
+ spdif {
+ /omit-if-no-ref/
+ spdifm0_pins: spdifm0-pins {
+ rockchip,pins =
+ /* spdif_tx_m0 */
+ <3 RK_PA1 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spdifm1_pins: spdifm1-pins {
+ rockchip,pins =
+ /* spdif_tx_m1 */
+ <0 RK_PB7 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spdifm2_pins: spdifm2-pins {
+ rockchip,pins =
+ /* spdif_tx_m2 */
+ <1 RK_PB7 2 &pcfg_pull_none>;
+ };
+ };
+
+ spi0 {
+ /omit-if-no-ref/
+ spi0m0_pins: spi0m0-pins {
+ rockchip,pins =
+ /* spi0_clk_m0 */
+ <0 RK_PC3 3 &pcfg_pull_none_drv_level_3>,
+ /* spi0_miso_m0 */
+ <0 RK_PC5 3 &pcfg_pull_none_drv_level_3>,
+ /* spi0_mosi_m0 */
+ <0 RK_PC4 3 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ spi0m0_csn0: spi0m0-csn0 {
+ rockchip,pins =
+ /* spi0m0_csn0 */
+ <0 RK_PC2 3 &pcfg_pull_none_drv_level_3>;
+ };
+ /omit-if-no-ref/
+ spi0m0_csn1: spi0m0-csn1 {
+ rockchip,pins =
+ /* spi0m0_csn1 */
+ <0 RK_PB7 1 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ spi0m1_pins: spi0m1-pins {
+ rockchip,pins =
+ /* spi0_clk_m1 */
+ <3 RK_PB5 4 &pcfg_pull_none_drv_level_3>,
+ /* spi0_miso_m1 */
+ <3 RK_PC0 4 &pcfg_pull_none_drv_level_3>,
+ /* spi0_mosi_m1 */
+ <3 RK_PB4 4 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ spi0m1_csn0: spi0m1-csn0 {
+ rockchip,pins =
+ /* spi0m1_csn0 */
+ <3 RK_PB7 4 &pcfg_pull_none_drv_level_3>;
+ };
+ /omit-if-no-ref/
+ spi0m1_csn1: spi0m1-csn1 {
+ rockchip,pins =
+ /* spi0m1_csn1 */
+ <3 RK_PB6 4 &pcfg_pull_none_drv_level_3>;
+ };
+ };
+
+ spi1 {
+ /omit-if-no-ref/
+ spi1m0_pins: spi1m0-pins {
+ rockchip,pins =
+ /* spi1_clk_m0 */
+ <3 RK_PD6 4 &pcfg_pull_none_drv_level_3>,
+ /* spi1_miso_m0 */
+ <4 RK_PA3 4 &pcfg_pull_none_drv_level_3>,
+ /* spi1_mosi_m0 */
+ <4 RK_PA2 4 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ spi1m0_csn0: spi1m0-csn0 {
+ rockchip,pins =
+ /* spi1m0_csn0 */
+ <3 RK_PD7 4 &pcfg_pull_none_drv_level_3>;
+ };
+ /omit-if-no-ref/
+ spi1m0_csn1: spi1m0-csn1 {
+ rockchip,pins =
+ /* spi1m0_csn1 */
+ <4 RK_PA0 4 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ spi1m1_pins: spi1m1-pins {
+ rockchip,pins =
+ /* spi1_clk_m1 */
+ <1 RK_PC0 4 &pcfg_pull_none_drv_level_3>,
+ /* spi1_miso_m1 */
+ <1 RK_PB4 4 &pcfg_pull_none_drv_level_3>,
+ /* spi1_mosi_m1 */
+ <1 RK_PB3 4 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ spi1m1_csn0: spi1m1-csn0 {
+ rockchip,pins =
+ /* spi1m1_csn0 */
+ <1 RK_PB6 4 &pcfg_pull_none_drv_level_3>;
+ };
+ /omit-if-no-ref/
+ spi1m1_csn1: spi1m1-csn1 {
+ rockchip,pins =
+ /* spi1m1_csn1 */
+ <1 RK_PB5 4 &pcfg_pull_none_drv_level_3>;
+ };
+ };
+
+ spi2 {
+ /omit-if-no-ref/
+ spi2m0_pins: spi2m0-pins {
+ rockchip,pins =
+ /* spi2_clk_m0 */
+ <4 RK_PB6 4 &pcfg_pull_none_drv_level_3>,
+ /* spi2_miso_m0 */
+ <3 RK_PD2 4 &pcfg_pull_none_drv_level_3>,
+ /* spi2_mosi_m0 */
+ <3 RK_PD3 4 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ spi2m0_csn0: spi2m0-csn0 {
+ rockchip,pins =
+ /* spi2m0_csn0 */
+ <4 RK_PB5 4 &pcfg_pull_none_drv_level_3>;
+ };
+ /omit-if-no-ref/
+ spi2m0_csn1: spi2m0-csn1 {
+ rockchip,pins =
+ /* spi2m0_csn1 */
+ <4 RK_PB4 4 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ spi2m1_pins: spi2m1-pins {
+ rockchip,pins =
+ /* spi2_clk_m1 */
+ <2 RK_PA1 4 &pcfg_pull_none_drv_level_3>,
+ /* spi2_miso_m1 */
+ <2 RK_PA0 4 &pcfg_pull_none_drv_level_3>,
+ /* spi2_mosi_m1 */
+ <1 RK_PD7 4 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ spi2m1_csn0: spi2m1-csn0 {
+ rockchip,pins =
+ /* spi2m1_csn0 */
+ <1 RK_PD6 4 &pcfg_pull_none_drv_level_3>;
+ };
+ /omit-if-no-ref/
+ spi2m1_csn1: spi2m1-csn1 {
+ rockchip,pins =
+ /* spi2m1_csn1 */
+ <1 RK_PD5 4 &pcfg_pull_none_drv_level_3>;
+ };
+ };
+
+ tsadc {
+ /omit-if-no-ref/
+ tsadcm0_pins: tsadcm0-pins {
+ rockchip,pins =
+ /* tsadc_shut_m0 */
+ <0 RK_PA1 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ tsadcm1_pins: tsadcm1-pins {
+ rockchip,pins =
+ /* tsadc_shut_m1 */
+ <0 RK_PA2 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ tsadc_shut_org: tsadc-shut-org {
+ rockchip,pins =
+ /* tsadc_shut_org */
+ <0 RK_PA1 2 &pcfg_pull_none>;
+ };
+ };
+
+ uart0 {
+ /omit-if-no-ref/
+ uart0m0_xfer: uart0m0-xfer {
+ rockchip,pins =
+ /* uart0_rx_m0 */
+ <0 RK_PD0 1 &pcfg_pull_up>,
+ /* uart0_tx_m0 */
+ <0 RK_PD1 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart0m1_xfer: uart0m1-xfer {
+ rockchip,pins =
+ /* uart0_rx_m1 */
+ <1 RK_PB3 2 &pcfg_pull_up>,
+ /* uart0_tx_m1 */
+ <1 RK_PB4 2 &pcfg_pull_up>;
+ };
+ };
+
+ uart1 {
+ /omit-if-no-ref/
+ uart1m0_xfer: uart1m0-xfer {
+ rockchip,pins =
+ /* uart1_rx_m0 */
+ <1 RK_PD1 1 &pcfg_pull_up>,
+ /* uart1_tx_m0 */
+ <1 RK_PD2 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart1m0_ctsn: uart1m0-ctsn {
+ rockchip,pins =
+ /* uart1m0_ctsn */
+ <1 RK_PD4 1 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart1m0_rtsn: uart1m0-rtsn {
+ rockchip,pins =
+ /* uart1m0_rtsn */
+ <1 RK_PD3 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart1m1_xfer: uart1m1-xfer {
+ rockchip,pins =
+ /* uart1_rx_m1 */
+ <4 RK_PA6 3 &pcfg_pull_up>,
+ /* uart1_tx_m1 */
+ <4 RK_PA5 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart1m1_ctsn: uart1m1-ctsn {
+ rockchip,pins =
+ /* uart1m1_ctsn */
+ <4 RK_PB0 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart1m1_rtsn: uart1m1-rtsn {
+ rockchip,pins =
+ /* uart1m1_rtsn */
+ <4 RK_PA7 3 &pcfg_pull_none>;
+ };
+ };
+
+ uart2 {
+ /omit-if-no-ref/
+ uart2m0_xfer: uart2m0-xfer {
+ rockchip,pins =
+ /* uart2_rx_m0 */
+ <0 RK_PC1 1 &pcfg_pull_up>,
+ /* uart2_tx_m0 */
+ <0 RK_PC0 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart2m0_ctsn: uart2m0-ctsn {
+ rockchip,pins =
+ /* uart2m0_ctsn */
+ <0 RK_PC2 1 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart2m0_rtsn: uart2m0-rtsn {
+ rockchip,pins =
+ /* uart2m0_rtsn */
+ <0 RK_PC3 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart2m1_xfer: uart2m1-xfer {
+ rockchip,pins =
+ /* uart2_rx_m1 */
+ <3 RK_PA1 2 &pcfg_pull_up>,
+ /* uart2_tx_m1 */
+ <3 RK_PA0 2 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart2m1_ctsn: uart2m1-ctsn {
+ rockchip,pins =
+ /* uart2m1_ctsn */
+ <3 RK_PA2 2 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart2m1_rtsn: uart2m1-rtsn {
+ rockchip,pins =
+ /* uart2m1_rtsn */
+ <3 RK_PA3 2 &pcfg_pull_none>;
+ };
+ };
+
+ uart3 {
+ /omit-if-no-ref/
+ uart3m0_xfer: uart3m0-xfer {
+ rockchip,pins =
+ /* uart3_rx_m0 */
+ <4 RK_PB5 6 &pcfg_pull_up>,
+ /* uart3_tx_m0 */
+ <4 RK_PB4 6 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart3m0_ctsn: uart3m0-ctsn {
+ rockchip,pins =
+ /* uart3m0_ctsn */
+ <4 RK_PB6 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart3m0_rtsn: uart3m0-rtsn {
+ rockchip,pins =
+ /* uart3m0_rtsn */
+ <3 RK_PD1 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart3m1_xfer: uart3m1-xfer {
+ rockchip,pins =
+ /* uart3_rx_m1 */
+ <3 RK_PC0 3 &pcfg_pull_up>,
+ /* uart3_tx_m1 */
+ <3 RK_PB7 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart3m1_ctsn: uart3m1-ctsn {
+ rockchip,pins =
+ /* uart3m1_ctsn */
+ <3 RK_PB6 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart3m1_rtsn: uart3m1-rtsn {
+ rockchip,pins =
+ /* uart3m1_rtsn */
+ <3 RK_PC1 3 &pcfg_pull_none>;
+ };
+ };
+
+ uart4 {
+ /omit-if-no-ref/
+ uart4m0_xfer: uart4m0-xfer {
+ rockchip,pins =
+ /* uart4_rx_m0 */
+ <3 RK_PD1 3 &pcfg_pull_up>,
+ /* uart4_tx_m0 */
+ <3 RK_PD0 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart4m0_ctsn: uart4m0-ctsn {
+ rockchip,pins =
+ /* uart4m0_ctsn */
+ <3 RK_PC5 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart4m0_rtsn: uart4m0-rtsn {
+ rockchip,pins =
+ /* uart4m0_rtsn */
+ <3 RK_PC6 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart4m1_xfer: uart4m1-xfer {
+ rockchip,pins =
+ /* uart4_rx_m1 */
+ <1 RK_PD5 3 &pcfg_pull_up>,
+ /* uart4_tx_m1 */
+ <1 RK_PD6 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart4m1_ctsn: uart4m1-ctsn {
+ rockchip,pins =
+ /* uart4m1_ctsn */
+ <2 RK_PA0 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart4m1_rtsn: uart4m1-rtsn {
+ rockchip,pins =
+ /* uart4m1_rtsn */
+ <1 RK_PD7 3 &pcfg_pull_none>;
+ };
+ };
+
+ uart5 {
+ /omit-if-no-ref/
+ uart5m0_xfer: uart5m0-xfer {
+ rockchip,pins =
+ /* uart5_rx_m0 */
+ <1 RK_PB7 3 &pcfg_pull_up>,
+ /* uart5_tx_m0 */
+ <1 RK_PC0 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart5m0_ctsn: uart5m0-ctsn {
+ rockchip,pins =
+ /* uart5m0_ctsn */
+ <1 RK_PB5 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart5m0_rtsn: uart5m0-rtsn {
+ rockchip,pins =
+ /* uart5m0_rtsn */
+ <1 RK_PB6 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart5m1_xfer: uart5m1-xfer {
+ rockchip,pins =
+ /* uart5_rx_m1 */
+ <3 RK_PA7 5 &pcfg_pull_up>,
+ /* uart5_tx_m1 */
+ <3 RK_PA6 5 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart5m1_ctsn: uart5m1-ctsn {
+ rockchip,pins =
+ /* uart5m1_ctsn */
+ <3 RK_PA0 5 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart5m1_rtsn: uart5m1-rtsn {
+ rockchip,pins =
+ /* uart5m1_rtsn */
+ <3 RK_PA1 5 &pcfg_pull_none>;
+ };
+ };
+
+ uart6 {
+ /omit-if-no-ref/
+ uart6m0_xfer: uart6m0-xfer {
+ rockchip,pins =
+ /* uart6_rx_m0 */
+ <0 RK_PC7 1 &pcfg_pull_up>,
+ /* uart6_tx_m0 */
+ <0 RK_PC6 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart6m0_ctsn: uart6m0-ctsn {
+ rockchip,pins =
+ /* uart6m0_ctsn */
+ <0 RK_PC4 1 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart6m0_rtsn: uart6m0-rtsn {
+ rockchip,pins =
+ /* uart6m0_rtsn */
+ <0 RK_PC5 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart6m1_xfer: uart6m1-xfer {
+ rockchip,pins =
+ /* uart6_rx_m1 */
+ <4 RK_PB0 5 &pcfg_pull_up>,
+ /* uart6_tx_m1 */
+ <4 RK_PA7 5 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart6m1_ctsn: uart6m1-ctsn {
+ rockchip,pins =
+ /* uart6m1_ctsn */
+ <4 RK_PA2 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart6m1_rtsn: uart6m1-rtsn {
+ rockchip,pins =
+ /* uart6m1_rtsn */
+ <4 RK_PA3 3 &pcfg_pull_none>;
+ };
+ };
+
+ uart7 {
+ /omit-if-no-ref/
+ uart7m0_xfer: uart7m0-xfer {
+ rockchip,pins =
+ /* uart7_rx_m0 */
+ <3 RK_PC7 3 &pcfg_pull_up>,
+ /* uart7_tx_m0 */
+ <3 RK_PC4 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart7m0_ctsn: uart7m0-ctsn {
+ rockchip,pins =
+ /* uart7m0_ctsn */
+ <3 RK_PD2 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart7m0_rtsn: uart7m0-rtsn {
+ rockchip,pins =
+ /* uart7m0_rtsn */
+ <3 RK_PD3 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart7m1_xfer: uart7m1-xfer {
+ rockchip,pins =
+ /* uart7_rx_m1 */
+ <1 RK_PB3 3 &pcfg_pull_up>,
+ /* uart7_tx_m1 */
+ <1 RK_PB4 3 &pcfg_pull_up>;
+ };
+ };
+
+ uart8 {
+ /omit-if-no-ref/
+ uart8m0_xfer: uart8m0-xfer {
+ rockchip,pins =
+ /* uart8_rx_m0 */
+ <3 RK_PB3 3 &pcfg_pull_up>,
+ /* uart8_tx_m0 */
+ <3 RK_PB2 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart8m0_ctsn: uart8m0-ctsn {
+ rockchip,pins =
+ /* uart8m0_ctsn */
+ <3 RK_PB4 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart8m0_rtsn: uart8m0-rtsn {
+ rockchip,pins =
+ /* uart8m0_rtsn */
+ <3 RK_PB5 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart8m1_xfer: uart8m1-xfer {
+ rockchip,pins =
+ /* uart8_rx_m1 */
+ <3 RK_PD5 3 &pcfg_pull_up>,
+ /* uart8_tx_m1 */
+ <3 RK_PD4 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart8m1_ctsn: uart8m1-ctsn {
+ rockchip,pins =
+ /* uart8m1_ctsn */
+ <3 RK_PD7 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart8m1_rtsn: uart8m1-rtsn {
+ rockchip,pins =
+ /* uart8m1_rtsn */
+ <4 RK_PA0 3 &pcfg_pull_none>;
+ };
+ };
+
+ uart9 {
+ /omit-if-no-ref/
+ uart9m0_xfer: uart9m0-xfer {
+ rockchip,pins =
+ /* uart9_rx_m0 */
+ <4 RK_PB3 3 &pcfg_pull_up>,
+ /* uart9_tx_m0 */
+ <4 RK_PB2 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart9m0_ctsn: uart9m0-ctsn {
+ rockchip,pins =
+ /* uart9m0_ctsn */
+ <4 RK_PB4 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart9m0_rtsn: uart9m0-rtsn {
+ rockchip,pins =
+ /* uart9m0_rtsn */
+ <4 RK_PB5 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart9m1_xfer: uart9m1-xfer {
+ rockchip,pins =
+ /* uart9_rx_m1 */
+ <3 RK_PC3 3 &pcfg_pull_up>,
+ /* uart9_tx_m1 */
+ <3 RK_PC2 3 &pcfg_pull_up>;
+ };
+ };
+
+ vo {
+ /omit-if-no-ref/
+ vo_pins: vo-pins {
+ rockchip,pins =
+ /* vo_lcdc_clk */
+ <4 RK_PB7 1 &pcfg_pull_none_drv_level_4>,
+ /* vo_lcdc_d0 */
+ <4 RK_PA4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d1 */
+ <4 RK_PA5 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d2 */
+ <4 RK_PB2 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d3 */
+ <3 RK_PC4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d4 */
+ <3 RK_PC5 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d5 */
+ <3 RK_PC6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d6 */
+ <3 RK_PC7 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d7 */
+ <3 RK_PD0 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d8 */
+ <4 RK_PA6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d9 */
+ <4 RK_PA7 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d10 */
+ <3 RK_PD1 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d11 */
+ <3 RK_PD2 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d12 */
+ <3 RK_PD3 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d13 */
+ <3 RK_PD4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d14 */
+ <3 RK_PD5 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d15 */
+ <3 RK_PD6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d16 */
+ <4 RK_PB0 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d17 */
+ <4 RK_PB1 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d18 */
+ <4 RK_PB3 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d19 */
+ <3 RK_PD7 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d20 */
+ <4 RK_PA0 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d21 */
+ <4 RK_PA1 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d22 */
+ <4 RK_PA2 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d23 */
+ <4 RK_PA3 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_den */
+ <4 RK_PB6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_hsync */
+ <4 RK_PB4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_vsync */
+ <4 RK_PB5 1 &pcfg_pull_none_drv_level_3>;
+ };
+ };
+};
+
+/*
+ * This part is edited handly.
+ */
+&pinctrl {
+ vo {
+ /omit-if-no-ref/
+ bt1120_pins: bt1120-pins {
+ rockchip,pins =
+ /* vo_lcdc_clk */
+ <4 RK_PB7 1 &pcfg_pull_none_drv_level_4>,
+ /* vo_lcdc_d3 */
+ <3 RK_PC4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d4 */
+ <3 RK_PC5 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d5 */
+ <3 RK_PC6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d6 */
+ <3 RK_PC7 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d7 */
+ <3 RK_PD0 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d10 */
+ <3 RK_PD1 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d11 */
+ <3 RK_PD2 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d12 */
+ <3 RK_PD3 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d13 */
+ <3 RK_PD4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d14 */
+ <3 RK_PD5 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d15 */
+ <3 RK_PD6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d19 */
+ <3 RK_PD7 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d20 */
+ <4 RK_PA0 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d21 */
+ <4 RK_PA1 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d22 */
+ <4 RK_PA2 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d23 */
+ <4 RK_PA3 1 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ bt656_pins: bt656-pins {
+ rockchip,pins =
+ /* vo_lcdc_clk */
+ <4 RK_PB7 1 &pcfg_pull_none_drv_level_4>,
+ /* vo_lcdc_d3 */
+ <3 RK_PC4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d4 */
+ <3 RK_PC5 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d5 */
+ <3 RK_PC6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d6 */
+ <3 RK_PC7 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d7 */
+ <3 RK_PD0 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d10 */
+ <3 RK_PD1 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d11 */
+ <3 RK_PD2 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d12 */
+ <3 RK_PD3 1 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ rgb3x8_pins_m0: rgb3x8-pins-m0 {
+ rockchip,pins =
+ /* vo_lcdc_clk */
+ <4 RK_PB7 1 &pcfg_pull_none_drv_level_4>,
+ /* vo_lcdc_d3 */
+ <3 RK_PC4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d4 */
+ <3 RK_PC5 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d5 */
+ <3 RK_PC6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d6 */
+ <3 RK_PC7 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d7 */
+ <3 RK_PD0 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d10 */
+ <3 RK_PD1 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d11 */
+ <3 RK_PD2 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d12 */
+ <3 RK_PD3 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_den */
+ <4 RK_PB6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_hsync */
+ <4 RK_PB4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_vsync */
+ <4 RK_PB5 1 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ rgb3x8_pins_m1: rgb3x8-pins-m1 {
+ rockchip,pins =
+ /* vo_lcdc_clk */
+ <4 RK_PB7 1 &pcfg_pull_none_drv_level_4>,
+ /* vo_lcdc_d13 */
+ <3 RK_PD4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d14 */
+ <3 RK_PD5 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d15 */
+ <3 RK_PD6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d19 */
+ <3 RK_PD7 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d20 */
+ <4 RK_PA0 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d21 */
+ <4 RK_PA1 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d22 */
+ <4 RK_PA2 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d23 */
+ <4 RK_PA3 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_den */
+ <4 RK_PB6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_hsync */
+ <4 RK_PB4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_vsync */
+ <4 RK_PB5 1 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ rgb565_pins: rgb565-pins {
+ rockchip,pins =
+ /* vo_lcdc_clk */
+ <4 RK_PB7 1 &pcfg_pull_none_drv_level_4>,
+ /* vo_lcdc_d3 */
+ <3 RK_PC4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d4 */
+ <3 RK_PC5 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d5 */
+ <3 RK_PC6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d6 */
+ <3 RK_PC7 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d7 */
+ <3 RK_PD0 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d10 */
+ <3 RK_PD1 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d11 */
+ <3 RK_PD2 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d12 */
+ <3 RK_PD3 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d13 */
+ <3 RK_PD4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d14 */
+ <3 RK_PD5 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d15 */
+ <3 RK_PD6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d19 */
+ <3 RK_PD7 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d20 */
+ <4 RK_PA0 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d21 */
+ <4 RK_PA1 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d22 */
+ <4 RK_PA2 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d23 */
+ <4 RK_PA3 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_den */
+ <4 RK_PB6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_hsync */
+ <4 RK_PB4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_vsync */
+ <4 RK_PB5 1 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ rgb666_pins: rgb666-pins {
+ rockchip,pins =
+ /* vo_lcdc_clk */
+ <4 RK_PB7 1 &pcfg_pull_none_drv_level_4>,
+ /* vo_lcdc_d2 */
+ <4 RK_PB2 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d3 */
+ <3 RK_PC4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d4 */
+ <3 RK_PC5 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d5 */
+ <3 RK_PC6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d6 */
+ <3 RK_PC7 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d7 */
+ <3 RK_PD0 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d10 */
+ <3 RK_PD1 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d11 */
+ <3 RK_PD2 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d12 */
+ <3 RK_PD3 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d13 */
+ <3 RK_PD4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d14 */
+ <3 RK_PD5 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d15 */
+ <3 RK_PD6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d18 */
+ <4 RK_PB3 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d19 */
+ <3 RK_PD7 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d20 */
+ <4 RK_PA0 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d21 */
+ <4 RK_PA1 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d22 */
+ <4 RK_PA2 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_d23 */
+ <4 RK_PA3 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_den */
+ <4 RK_PB6 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_hsync */
+ <4 RK_PB4 1 &pcfg_pull_none_drv_level_3>,
+ /* vo_lcdc_vsync */
+ <4 RK_PB5 1 &pcfg_pull_none_drv_level_3>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3562.dtsi b/arch/arm64/boot/dts/rockchip/rk3562.dtsi
new file mode 100644
index 000000000000..def504ffa326
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3562.dtsi
@@ -0,0 +1,1185 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ */
+
+#include <dt-bindings/clock/rockchip,rk3562-cru.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/reset/rockchip,rk3562-cru.h>
+#include <dt-bindings/soc/rockchip,boot-mode.h>
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+ compatible = "rockchip,rk3562";
+
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ gpio2 = &gpio2;
+ gpio3 = &gpio3;
+ gpio4 = &gpio4;
+ };
+
+ xin32k: clock-xin32k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "xin32k";
+ };
+
+ xin24m: clock-xin24m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "xin24m";
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ clocks = <&scmi_clk ARMCLK>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
+ dynamic-power-coefficient = <138>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ clocks = <&scmi_clk ARMCLK>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
+ dynamic-power-coefficient = <138>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ clocks = <&scmi_clk ARMCLK>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
+ dynamic-power-coefficient = <138>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ clocks = <&scmi_clk ARMCLK>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
+ dynamic-power-coefficient = <138>;
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ CPU_SLEEP: cpu-sleep {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x0010000>;
+ entry-latency-us = <120>;
+ exit-latency-us = <250>;
+ min-residency-us = <900>;
+ };
+ };
+ };
+
+ cpu0_opp_table: opp-table-cpu0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-408000000 {
+ opp-hz = /bits/ 64 <408000000>;
+ opp-microvolt = <825000 825000 1150000>;
+ clock-latency-ns = <40000>;
+ opp-suspend;
+ };
+ opp-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <825000 825000 1150000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-816000000 {
+ opp-hz = /bits/ 64 <816000000>;
+ opp-microvolt = <825000 825000 1150000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1008000000 {
+ opp-hz = /bits/ 64 <1008000000>;
+ opp-microvolt = <850000 850000 1150000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <925000 925000 1150000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1416000000 {
+ opp-hz = /bits/ 64 <1416000000>;
+ opp-microvolt = <1000000 1000000 1150000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1608000000 {
+ opp-supported-hw = <0xf9 0xffff>;
+ opp-hz = /bits/ 64 <1608000000>;
+ opp-microvolt = <1037500 1037500 1150000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1800000000 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-microvolt = <1125000 1125000 1150000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-2016000000 {
+ opp-hz = /bits/ 64 <2016000000>;
+ opp-microvolt = <1150000 1150000 1150000>;
+ clock-latency-ns = <40000>;
+ };
+
+ };
+
+ gpu_opp_table: opp-table-gpu {
+ compatible = "operating-points-v2";
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <825000 825000 1000000>;
+ };
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <825000 825000 1000000>;
+ };
+ opp-500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-microvolt = <825000 825000 1000000>;
+ };
+ opp-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <825000 825000 1000000>;
+ };
+ opp-700000000 {
+ opp-hz = /bits/ 64 <700000000>;
+ opp-microvolt = <900000 900000 1000000>;
+ };
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <950000 950000 1000000>;
+ };
+ opp-900000000 {
+ opp-hz = /bits/ 64 <900000000>;
+ opp-microvolt = <1000000 1000000 1000000>;
+ };
+ };
+
+ arm_pmu: arm-pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 230 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+ };
+
+ firmware {
+ scmi: scmi {
+ compatible = "arm,scmi-smc";
+ shmem = <&scmi_shmem>;
+ arm,smc-id = <0x82000010>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_clk: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
+ };
+ };
+
+ pinctrl: pinctrl {
+ compatible = "rockchip,rk3562-pinctrl";
+ rockchip,grf = <&ioc_grf>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gpio0: gpio@ff260000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff260000 0x0 0x100>;
+ clocks = <&cru PCLK_PMU0_GPIO0>, <&cru DBCLK_PMU0_GPIO0>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@ff620000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff620000 0x0 0x100>;
+ clocks = <&cru PCLK_PERI_GPIO1>, <&cru DCLK_PERI_GPIO1>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@ff630000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff630000 0x0 0x100>;
+ clocks = <&cru PCLK_PERI_GPIO2>, <&cru DCLK_PERI_GPIO2>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@ffac0000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xffac0000 0x0 0x100>;
+ clocks = <&cru PCLK_GPIO3_VCCIO156>, <&cru DCLK_BUS_GPIO3>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@ffad0000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xffad0000 0x0 0x100>;
+ clocks = <&cru PCLK_GPIO4_VCCIO156>, <&cru DCLK_BUS_GPIO4>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 128 32>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ scmi_shmem: shmem@10f000 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0x0010f000 0x0 0x100>;
+ no-map;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ pcie2x1: pcie@fe000000 {
+ compatible = "rockchip,rk3562-pcie", "rockchip,rk3568-pcie";
+ reg = <0x0 0xfe000000 0x0 0x400000>,
+ <0x0 0xff500000 0x0 0x10000>,
+ <0x0 0xfc000000 0x0 0x100000>;
+ reg-names = "dbi", "apb", "config";
+ bus-range = <0x0 0xff>;
+ clocks = <&cru ACLK_PCIE20_MST>, <&cru ACLK_PCIE20_SLV>,
+ <&cru ACLK_PCIE20_DBI>, <&cru PCLK_PCIE20>,
+ <&cru CLK_PCIE20_AUX>;
+ clock-names = "aclk_mst", "aclk_slv",
+ "aclk_dbi", "pclk", "aux";
+ device_type = "pci";
+ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "sys", "pmc", "msg", "legacy", "err", "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie2x1_intc 0>,
+ <0 0 0 2 &pcie2x1_intc 1>,
+ <0 0 0 3 &pcie2x1_intc 2>,
+ <0 0 0 4 &pcie2x1_intc 3>;
+ linux,pci-domain = <0>;
+ max-link-speed = <2>;
+ num-ib-windows = <8>;
+ num-viewport = <8>;
+ num-ob-windows = <2>;
+ num-lanes = <1>;
+ phys = <&combphy PHY_TYPE_PCIE>;
+ phy-names = "pcie-phy";
+ power-domains = <&power 15>;
+ ranges = <0x01000000 0x0 0xfc100000 0x0 0xfc100000 0x0 0x100000
+ 0x02000000 0x0 0xfc200000 0x0 0xfc200000 0x0 0x1e00000
+ 0x03000000 0x3 0x00000000 0x3 0x00000000 0x0 0x40000000>;
+ resets = <&cru SRST_PCIE20_POWERUP>;
+ reset-names = "pipe";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ status = "disabled";
+
+ pcie2x1_intc: legacy-interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ gic: interrupt-controller@fe901000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0xfe901000 0 0x1000>,
+ <0x0 0xfe902000 0 0x2000>,
+ <0x0 0xfe904000 0 0x2000>,
+ <0x0 0xfe906000 0 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ qos_dma2ddr: qos@fee03800 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee03800 0x0 0x20>;
+ };
+
+ qos_mcu: qos@fee10000 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee10000 0x0 0x20>;
+ };
+
+ qos_dft_apb: qos@fee10100 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee10100 0x0 0x20>;
+ };
+
+ qos_gmac: qos@fee10200 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee10200 0x0 0x20>;
+ };
+
+ qos_mac100: qos@fee10300 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee10300 0x0 0x20>;
+ };
+
+ qos_dcf: qos@fee10400 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee10400 0x0 0x20>;
+ };
+
+ qos_cpu: qos@fee20000 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee20000 0x0 0x20>;
+ };
+
+ qos_gpu: qos@fee30000 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee30000 0x0 0x20>;
+ };
+
+ qos_npu: qos@fee40000 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee40000 0x0 0x20>;
+ };
+
+ qos_rkvdec: qos@fee50000 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee50000 0x0 0x20>;
+ };
+
+ qos_vepu: qos@fee60000 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee60000 0x0 0x20>;
+ };
+
+ qos_isp: qos@fee70000 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee70000 0x0 0x20>;
+ };
+
+ qos_vicap: qos@fee70100 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee70100 0x0 0x20>;
+ };
+
+ qos_vop: qos@fee80000 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee80000 0x0 0x20>;
+ };
+
+ qos_jpeg: qos@fee90000 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee90000 0x0 0x20>;
+ };
+
+ qos_rga_rd: qos@fee90100 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee90100 0x0 0x20>;
+ };
+
+ qos_rga_wr: qos@fee90200 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfee90200 0x0 0x20>;
+ };
+
+ qos_pcie: qos@feea0000 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfeea0000 0x0 0x20>;
+ };
+
+ qos_usb3: qos@feea0100 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfeea0100 0x0 0x20>;
+ };
+
+ qos_crypto_apb: qos@feeb0000 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfeeb0000 0x0 0x20>;
+ };
+
+ qos_crypto: qos@feeb0100 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfeeb0100 0x0 0x20>;
+ };
+
+ qos_dmac: qos@feeb0200 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfeeb0200 0x0 0x20>;
+ };
+
+ qos_emmc: qos@feeb0300 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfeeb0300 0x0 0x20>;
+ };
+
+ qos_fspi: qos@feeb0400 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfeeb0400 0x0 0x20>;
+ };
+
+ qos_rkdma: qos@feeb0500 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfeeb0500 0x0 0x20>;
+ };
+
+ qos_sdmmc0: qos@feeb0600 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfeeb0600 0x0 0x20>;
+ };
+
+ qos_sdmmc1: qos@feeb0700 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfeeb0700 0x0 0x20>;
+ };
+
+ qos_usb2: qos@feeb0800 {
+ compatible = "rockchip,rk3562-qos", "syscon";
+ reg = <0x0 0xfeeb0800 0x0 0x20>;
+ };
+
+ pmu_grf: syscon@ff010000 {
+ compatible = "rockchip,rk3562-pmu-grf", "syscon", "simple-mfd";
+ reg = <0x0 0xff010000 0x0 0x10000>;
+
+ reboot_mode: reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x220>;
+ mode-normal = <BOOT_NORMAL>;
+ mode-loader = <BOOT_BL_DOWNLOAD>;
+ mode-recovery = <BOOT_RECOVERY>;
+ mode-bootloader = <BOOT_FASTBOOT>;
+ };
+ };
+
+ sys_grf: syscon@ff030000 {
+ compatible = "rockchip,rk3562-sys-grf", "syscon";
+ reg = <0x0 0xff030000 0x0 0x10000>;
+ };
+
+ peri_grf: syscon@ff040000 {
+ compatible = "rockchip,rk3562-peri-grf", "syscon";
+ reg = <0x0 0xff040000 0x0 0x10000>;
+ };
+
+ ioc_grf: syscon@ff060000 {
+ compatible = "rockchip,rk3562-ioc-grf", "syscon";
+ reg = <0x0 0xff060000 0x0 0x30000>;
+ };
+
+ usbphy_grf: syscon@ff090000 {
+ compatible = "rockchip,rk3562-usbphy-grf", "syscon";
+ reg = <0x0 0xff090000 0x0 0x8000>;
+ };
+
+ pipephy_grf: syscon@ff098000 {
+ compatible = "rockchip,rk3562-pipephy-grf", "syscon";
+ reg = <0x0 0xff098000 0x0 0x8000>;
+ };
+
+ cru: clock-controller@ff100000 {
+ compatible = "rockchip,rk3562-cru";
+ reg = <0x0 0xff100000 0x0 0x40000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+
+ assigned-clocks = <&cru PLL_GPLL>, <&cru PLL_CPLL>,
+ <&cru PLL_HPLL>;
+ assigned-clock-rates = <1188000000>, <1000000000>,
+ <983040000>;
+ };
+
+ i2c0: i2c@ff200000 {
+ compatible = "rockchip,rk3562-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xff200000 0x0 0x1000>;
+ clocks = <&cru CLK_PMU0_I2C0>, <&cru PCLK_PMU0_I2C0>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ uart0: serial@ff210000 {
+ compatible = "rockchip,rk3562-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff210000 0x0 0x100>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_PMU1_UART0>, <&cru PCLK_PMU1_UART0>;
+ clock-names = "baudclk", "apb_pclk";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ spi0: spi@ff220000 {
+ compatible = "rockchip,rk3562-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xff220000 0x0 0x1000>;
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_PMU1_SPI0>, <&cru PCLK_PMU1_SPI0>;
+ clock-names = "spiclk", "apb_pclk";
+ dmas = <&dmac 13>, <&dmac 12>;
+ dma-names = "tx", "rx";
+ num-cs = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0m0_csn0 &spi0m0_csn1 &spi0m0_pins>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ pwm0: pwm@ff230000 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff230000 0x0 0x10>;
+ clocks = <&cru CLK_PMU1_PWM0>, <&cru PCLK_PMU1_PWM0>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@ff230010 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff230010 0x0 0x10>;
+ clocks = <&cru CLK_PMU1_PWM0>, <&cru PCLK_PMU1_PWM0>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm1m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@ff230020 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff230020 0x0 0x10>;
+ clocks = <&cru CLK_PMU1_PWM0>, <&cru PCLK_PMU1_PWM0>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm2m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@ff230030 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff230030 0x0 0x10>;
+ clocks = <&cru CLK_PMU1_PWM0>, <&cru PCLK_PMU1_PWM0>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm3m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pmu: power-management@ff258000 {
+ compatible = "rockchip,rk3562-pmu", "syscon", "simple-mfd";
+ reg = <0x0 0xff258000 0x0 0x1000>;
+
+ power: power-controller {
+ compatible = "rockchip,rk3562-power-controller";
+ #power-domain-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-domain@8 {
+ reg = <8>;
+ pm_qos = <&qos_gpu>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@7 {
+ reg = <7>;
+ pm_qos = <&qos_npu>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@11 {
+ reg = <11>;
+ pm_qos = <&qos_rkvdec>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@12 {
+ reg = <12>;
+ pm_qos = <&qos_isp>,
+ <&qos_vicap>;
+ #power-domain-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-domain@10 {
+ reg = <10>;
+ pm_qos = <&qos_vepu>;
+ #power-domain-cells = <0>;
+ };
+ };
+
+ power-domain@13 {
+ reg = <13>;
+ pm_qos = <&qos_vop>;
+ #power-domain-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-domain@14 {
+ reg = <14>;
+ pm_qos = <&qos_rga_rd>,
+ <&qos_rga_wr>,
+ <&qos_jpeg>;
+ #power-domain-cells = <0>;
+ };
+ };
+
+ power-domain@15 {
+ reg = <15>;
+ pm_qos = <&qos_pcie>,
+ <&qos_usb3>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+
+ gpu: gpu@ff320000 {
+ compatible = "rockchip,rk3562-mali", "arm,mali-bifrost";
+ reg = <0x0 0xff320000 0x0 0x4000>;
+ clocks = <&cru CLK_GPU>, <&cru CLK_GPU_BRG>,
+ <&cru ACLK_GPU_PRE>;
+ clock-names = "clk_gpu", "clk_gpu_brg", "aclk_gpu";
+ dynamic-power-coefficient = <820>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "job", "mmu", "gpu";
+ operating-points-v2 = <&gpu_opp_table>;
+ power-domains = <&power 8>;
+ #cooling-cells = <2>;
+ status = "disabled";
+ };
+
+ spi1: spi@ff640000 {
+ compatible = "rockchip,rk3066-spi";
+ reg = <0x0 0xff640000 0x0 0x1000>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_SPI1>, <&cru PCLK_SPI1>;
+ clock-names = "spiclk", "apb_pclk";
+ dmas = <&dmac 15>, <&dmac 14>;
+ dma-names = "tx", "rx";
+ num-cs = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1m0_csn0 &spi1m0_csn1 &spi1m0_pins>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi2: spi@ff650000 {
+ compatible = "rockchip,rk3066-spi";
+ reg = <0x0 0xff650000 0x0 0x1000>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_SPI2>, <&cru PCLK_SPI2>;
+ clock-names = "spiclk", "apb_pclk";
+ dmas = <&dmac 17>, <&dmac 16>;
+ dma-names = "tx", "rx";
+ num-cs = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m0_csn0 &spi2m0_csn1 &spi2m0_pins>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ uart1: serial@ff670000 {
+ compatible = "rockchip,rk3562-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff670000 0x0 0x100>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
+ clock-names = "baudclk", "apb_pclk";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart2: serial@ff680000 {
+ compatible = "rockchip,rk3562-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff680000 0x0 0x100>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
+ clock-names = "baudclk", "apb_pclk";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart3: serial@ff690000 {
+ compatible = "rockchip,rk3562-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff690000 0x0 0x100>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>;
+ clock-names = "baudclk", "apb_pclk";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart4: serial@ff6a0000 {
+ compatible = "rockchip,rk3562-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff6a0000 0x0 0x100>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART4>, <&cru PCLK_UART4>;
+ clock-names = "baudclk", "apb_pclk";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart5: serial@ff6b0000 {
+ compatible = "rockchip,rk3562-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff6b0000 0x0 0x100>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART5>, <&cru PCLK_UART5>;
+ clock-names = "baudclk", "apb_pclk";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart6: serial@ff6c0000 {
+ compatible = "rockchip,rk3562-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff6c0000 0x0 0x100>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART6>, <&cru PCLK_UART6>;
+ clock-names = "baudclk", "apb_pclk";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart7: serial@ff6d0000 {
+ compatible = "rockchip,rk3562-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff6d0000 0x0 0x100>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART7>, <&cru PCLK_UART7>;
+ clock-names = "baudclk", "apb_pclk";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart8: serial@ff6e0000 {
+ compatible = "rockchip,rk3562-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff6e0000 0x0 0x100>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART8>, <&cru PCLK_UART8>;
+ clock-names = "baudclk", "apb_pclk";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart9: serial@ff6f0000 {
+ compatible = "rockchip,rk3562-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff6f0000 0x0 0x100>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART9>, <&cru PCLK_UART9>;
+ clock-names = "baudclk", "apb_pclk";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@ff700000 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff700000 0x0 0x10>;
+ clocks = <&cru CLK_PWM1_PERI>, <&cru PCLK_PWM1_PERI>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm4m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm5: pwm@ff700010 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff700010 0x0 0x10>;
+ clocks = <&cru CLK_PWM1_PERI>, <&cru PCLK_PWM1_PERI>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm5m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm6: pwm@ff700020 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff700020 0x0 0x10>;
+ clocks = <&cru CLK_PWM1_PERI>, <&cru PCLK_PWM1_PERI>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm6m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm7: pwm@ff700030 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff700030 0x0 0x10>;
+ clocks = <&cru CLK_PWM1_PERI>, <&cru PCLK_PWM1_PERI>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm7m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm8: pwm@ff710000 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff710000 0x0 0x10>;
+ clocks = <&cru CLK_PWM2_PERI>, <&cru PCLK_PWM2_PERI>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm8m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm9: pwm@ff710010 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff710010 0x0 0x10>;
+ clocks = <&cru CLK_PWM2_PERI>, <&cru PCLK_PWM2_PERI>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm9m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm10: pwm@ff710020 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff710020 0x0 0x10>;
+ clocks = <&cru CLK_PWM2_PERI>, <&cru PCLK_PWM2_PERI>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm10m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm11: pwm@ff710030 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff710030 0x0 0x10>;
+ clocks = <&cru CLK_PWM2_PERI>, <&cru PCLK_PWM2_PERI>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm11m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm12: pwm@ff720000 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff720000 0x0 0x10>;
+ clocks = <&cru CLK_PWM3_PERI>, <&cru PCLK_PWM3_PERI>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm12m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm13: pwm@ff720010 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff720010 0x0 0x10>;
+ clocks = <&cru CLK_PWM3_PERI>, <&cru PCLK_PWM3_PERI>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm13m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm14: pwm@ff720020 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff720020 0x0 0x10>;
+ clocks = <&cru CLK_PWM3_PERI>, <&cru PCLK_PWM3_PERI>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm14m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm15: pwm@ff720030 {
+ compatible = "rockchip,rk3562-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xff720030 0x0 0x10>;
+ clocks = <&cru CLK_PWM3_PERI>, <&cru PCLK_PWM3_PERI>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm15m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ saradc0: adc@ff730000 {
+ compatible = "rockchip,rk3562-saradc";
+ reg = <0x0 0xff730000 0x0 0x100>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_SARADC>, <&cru PCLK_SARADC>;
+ clock-names = "saradc", "apb_pclk";
+ resets = <&cru SRST_P_SARADC>;
+ reset-names = "saradc-apb";
+ #io-channel-cells = <1>;
+ status = "disabled";
+ };
+
+ combphy: phy@ff750000 {
+ compatible = "rockchip,rk3562-naneng-combphy";
+ reg = <0x0 0xff750000 0x0 0x100>;
+ #phy-cells = <1>;
+ clocks = <&cru CLK_PIPEPHY_REF>, <&cru PCLK_PIPEPHY>,
+ <&cru PCLK_PHP>;
+ clock-names = "ref", "apb", "pipe";
+ assigned-clocks = <&cru CLK_PIPEPHY_REF>;
+ assigned-clock-rates = <100000000>;
+ resets = <&cru SRST_PIPEPHY>;
+ reset-names = "phy";
+ rockchip,pipe-grf = <&peri_grf>;
+ rockchip,pipe-phy-grf = <&pipephy_grf>;
+ status = "disabled";
+ };
+
+ sfc: spi@ff860000 {
+ compatible = "rockchip,sfc";
+ reg = <0x0 0xff860000 0x0 0x10000>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_SFC>, <&cru HCLK_SFC>;
+ clock-names = "clk_sfc", "hclk_sfc";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ sdhci: mmc@ff870000 {
+ compatible = "rockchip,rk3562-dwcmshc", "rockchip,rk3588-dwcmshc";
+ reg = <0x0 0xff870000 0x0 0x10000>;
+ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ assigned-clocks = <&cru BCLK_EMMC>, <&cru CCLK_EMMC>;
+ assigned-clock-rates = <200000000>, <200000000>;
+ clocks = <&cru CCLK_EMMC>, <&cru HCLK_EMMC>,
+ <&cru ACLK_EMMC>, <&cru BCLK_EMMC>,
+ <&cru TMCLK_EMMC>;
+ clock-names = "core", "bus", "axi", "block", "timer";
+ resets = <&cru SRST_C_EMMC>, <&cru SRST_H_EMMC>,
+ <&cru SRST_A_EMMC>, <&cru SRST_B_EMMC>,
+ <&cru SRST_T_EMMC>;
+ reset-names = "core", "bus", "axi", "block", "timer";
+ max-frequency = <200000000>;
+ status = "disabled";
+ };
+
+ sdmmc0: mmc@ff880000 {
+ compatible = "rockchip,rk3562-dw-mshc",
+ "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xff880000 0x0 0x10000>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_SDMMC0>, <&cru CCLK_SDMMC0>,
+ <&cru SCLK_SDMMC0_DRV>, <&cru SCLK_SDMMC0_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
+ max-frequency = <200000000>;
+ resets = <&cru SRST_H_SDMMC0>;
+ reset-names = "reset";
+ status = "disabled";
+ };
+
+ sdmmc1: mmc@ff890000 {
+ compatible = "rockchip,rk3562-dw-mshc",
+ "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xff890000 0x0 0x10000>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_SDMMC1>, <&cru CCLK_SDMMC1>,
+ <&cru SCLK_SDMMC1_DRV>, <&cru SCLK_SDMMC1_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
+ max-frequency = <200000000>;
+ resets = <&cru SRST_H_SDMMC1>;
+ reset-names = "reset";
+ status = "disabled";
+ };
+
+ dmac: dma-controller@ff990000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff990000 0x0 0x4000>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC>;
+ clock-names = "apb_pclk";
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ };
+
+ i2c1: i2c@ffa00000 {
+ compatible = "rockchip,rk3562-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xffa00000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C1>, <&cru PCLK_I2C1>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1m0_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@ffa10000 {
+ compatible = "rockchip,rk3562-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xffa10000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C2>, <&cru PCLK_I2C2>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2m0_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@ffa20000 {
+ compatible = "rockchip,rk3562-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xffa20000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C3>, <&cru PCLK_I2C3>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3m0_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@ffa30000 {
+ compatible = "rockchip,rk3562-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xffa30000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C4>, <&cru PCLK_I2C4>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4m0_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@ffa40000 {
+ compatible = "rockchip,rk3562-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xffa40000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C5>, <&cru PCLK_I2C5>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5m0_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ saradc1: adc@ffaa0000 {
+ compatible = "rockchip,rk3562-saradc";
+ reg = <0x0 0xffaa0000 0x0 0x100>;
+ interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_SARADC_VCCIO156>, <&cru PCLK_SARADC_VCCIO156>;
+ clock-names = "saradc", "apb_pclk";
+ resets = <&cru SRST_P_SARADC_VCCIO156>;
+ reset-names = "saradc-apb";
+ #io-channel-cells = <1>;
+ status = "disabled";
+ };
+ };
+};
+
+#include "rk3562-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi
index a48351471764..e7ba477e75f9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi
@@ -775,7 +775,7 @@
rockchip,default-sample-phase = <90>;
status = "okay";
- sdio-wifi@1 {
+ wifi@1 {
compatible = "brcm,bcm4329-fmac";
reg = <1>;
interrupt-parent = <&gpio2>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts b/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts
index 7d4680933823..decc6deeef4e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts
@@ -19,9 +19,9 @@
aliases {
ethernet0 = &gmac1;
- mmc0 = &sdmmc0;
- mmc1 = &sdmmc1;
- mmc2 = &sdhci;
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc0;
+ mmc2 = &sdmmc1;
};
chosen: chosen {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-pinetab2.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-pinetab2.dtsi
index 26cf765a7297..3473b1eef5cd 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-pinetab2.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3566-pinetab2.dtsi
@@ -867,6 +867,7 @@
spi-max-frequency = <100000000>;
spi-rx-bus-width = <2>;
spi-tx-bus-width = <1>;
+ vcc-supply = <&vcc_1v8>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
index 5707321a1144..f8cf03380636 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
@@ -648,6 +648,7 @@
spi-max-frequency = <24000000>;
spi-rx-bus-width = <4>;
spi-tx-bus-width = <1>;
+ vcc-supply = <&vcc_1v8>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts b/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts
index 53e71528e4c4..6224d72813e5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts
@@ -636,6 +636,7 @@
spi-max-frequency = <104000000>;
spi-rx-bus-width = <4>;
spi-tx-bus-width = <1>;
+ vcc-supply = <&vcc_1v8>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dts b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dts
index b6ad8328c7eb..3b31f0dd8f3b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dts
@@ -20,33 +20,43 @@
gpio-leds {
compatible = "gpio-leds";
pinctrl-names = "default";
- pinctrl-0 = <&lan1_led_pin>, <&lan2_led_pin>, <&power_led_pin>, <&wan_led_pin>;
+ pinctrl-0 = <&lan1_led_pin>, <&lan2_led_pin>, <&sys_led_pin>, <&wan_led_pin>;
led-lan1 {
color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
function = LED_FUNCTION_LAN;
function-enumerator = <1>;
gpios = <&gpio3 RK_PD6 GPIO_ACTIVE_HIGH>;
+ label = "LAN-1";
+ linux,default-trigger = "netdev";
};
led-lan2 {
color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
function = LED_FUNCTION_LAN;
function-enumerator = <2>;
gpios = <&gpio3 RK_PD7 GPIO_ACTIVE_HIGH>;
+ label = "LAN-2";
+ linux,default-trigger = "netdev";
};
- power_led: led-power {
+ power_led: led-sys {
color = <LED_COLOR_ID_RED>;
function = LED_FUNCTION_POWER;
- linux,default-trigger = "heartbeat";
gpios = <&gpio4 RK_PD2 GPIO_ACTIVE_HIGH>;
+ label = "SYS";
+ linux,default-trigger = "heartbeat";
};
led-wan {
color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
function = LED_FUNCTION_WAN;
gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>;
+ label = "WAN";
+ linux,default-trigger = "netdev";
};
};
};
@@ -58,6 +68,7 @@
clock_in_out = "output";
phy-handle = <&rgmii_phy0>;
phy-mode = "rgmii";
+ phy-supply = <&vcc_3v3>;
pinctrl-names = "default";
pinctrl-0 = <&gmac0_miim
&gmac0_tx_bus2
@@ -125,7 +136,7 @@
rockchip,pins = <3 RK_PD7 RK_FUNC_GPIO &pcfg_pull_none>;
};
- power_led_pin: power-led-pin {
+ sys_led_pin: sys-led-pin {
rockchip,pins = <4 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi
index 00c479aa1871..a28b4af10d13 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi
@@ -486,9 +486,12 @@
&sdhci {
bus-width = <8>;
max-frequency = <200000000>;
+ mmc-hs200-1_8v;
non-removable;
pinctrl-names = "default";
- pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd>;
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_1v8>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts b/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts
index 7bd32d230ad2..6ae4316761c4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts
@@ -481,9 +481,14 @@
};
&mdio0 {
- rgmii_phy0: ethernet-phy@0 {
+ rgmii_phy0: ethernet-phy@3 {
+ /* Motorcomm YT8521 phy */
compatible = "ethernet-phy-ieee802.3-c22";
- reg = <0x0>;
+ reg = <0x3>;
+ pinctrl-0 = <&eth_phy0_reset_pin>;
+ pinctrl-names = "default";
+ reset-assert-us = <10000>;
+ reset-gpios = <&gpio0 RK_PC6 GPIO_ACTIVE_LOW>;
};
};
@@ -556,6 +561,12 @@
};
&pinctrl {
+ gmac0 {
+ eth_phy0_reset_pin: eth-phy0-reset-pin {
+ rockchip,pins = <0 RK_PC6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
keys {
copy_button_pin: copy-button-pin {
rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_up>;
@@ -619,6 +630,8 @@
bus-width = <8>;
max-frequency = <200000000>;
non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568.dtsi b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
index 695cccbdab0f..e719a3df126c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
@@ -152,7 +152,7 @@
compatible = "rockchip,rk3568-pcie";
#address-cells = <3>;
#size-cells = <2>;
- bus-range = <0x0 0xf>;
+ bus-range = <0x10 0x1f>;
clocks = <&cru ACLK_PCIE30X1_MST>, <&cru ACLK_PCIE30X1_SLV>,
<&cru ACLK_PCIE30X1_DBI>, <&cru PCLK_PCIE30X1>,
<&cru CLK_PCIE30X1_AUX_NDFT>;
@@ -175,7 +175,7 @@
num-ib-windows = <6>;
num-ob-windows = <2>;
max-link-speed = <3>;
- msi-map = <0x0 &gic 0x1000 0x1000>;
+ msi-map = <0x1000 &its 0x1000 0x1000>;
num-lanes = <1>;
phys = <&pcie30phy>;
phy-names = "pcie-phy";
@@ -205,7 +205,7 @@
compatible = "rockchip,rk3568-pcie";
#address-cells = <3>;
#size-cells = <2>;
- bus-range = <0x0 0xf>;
+ bus-range = <0x20 0x2f>;
clocks = <&cru ACLK_PCIE30X2_MST>, <&cru ACLK_PCIE30X2_SLV>,
<&cru ACLK_PCIE30X2_DBI>, <&cru PCLK_PCIE30X2>,
<&cru CLK_PCIE30X2_AUX_NDFT>;
@@ -228,7 +228,7 @@
num-ib-windows = <6>;
num-ob-windows = <2>;
max-link-speed = <3>;
- msi-map = <0x0 &gic 0x2000 0x1000>;
+ msi-map = <0x2000 &its 0x2000 0x1000>;
num-lanes = <2>;
phys = <&pcie30phy>;
phy-names = "pcie-phy";
diff --git a/arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts b/arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts
index 828bde7fab68..b09e789c75c4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts
@@ -56,6 +56,34 @@
};
};
+ es8388_sound: es8388-sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,mclk-fs = <256>;
+ simple-audio-card,name = "On-board Analog ES8388";
+ simple-audio-card,widgets = "Microphone", "Headphone Mic",
+ "Microphone", "Mic Pads",
+ "Headphone", "Headphone",
+ "Line Out", "Line Out";
+ simple-audio-card,routing = "Headphone", "LOUT1",
+ "Headphone", "ROUT1",
+ "Line Out", "LOUT2",
+ "Line Out", "ROUT2",
+ "RINPUT1", "Headphone Mic",
+ "LINPUT2", "Mic Pads",
+ "RINPUT2", "Mic Pads";
+ simple-audio-card,pin-switches = "Headphone", "Line Out";
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai1>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&es8388>;
+ system-clock-frequency = <12288000>;
+ };
+ };
+
vcc_12v0_dcin: regulator-vcc-12v0-dcin {
compatible = "regulator-fixed";
regulator-name = "vcc_12v0_dcin";
@@ -117,6 +145,8 @@
vcc_3v3_pcie: regulator-vcc-3v3-pcie {
compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pwr_en>;
regulator-name = "vcc_3v3_pcie";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
@@ -177,6 +207,10 @@
};
};
+&combphy0_ps {
+ status = "okay";
+};
+
&cpu_l0 {
cpu-supply = <&vdd_cpu_lit_s0>;
};
@@ -241,6 +275,10 @@
};
};
+&hdmi_sound {
+ status = "okay";
+};
+
&hdptxphy {
status = "okay";
};
@@ -610,7 +648,7 @@
reg = <0x51>;
clock-output-names = "hym8563";
interrupt-parent = <&gpio0>;
- interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <RK_PA0 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&hym8563_int>;
wakeup-source;
@@ -618,6 +656,25 @@
};
};
+&i2c3 {
+ status = "okay";
+
+ es8388: audio-codec@10 {
+ compatible = "everest,es8388", "everest,es8328";
+ reg = <0x10>;
+ clocks = <&cru CLK_SAI1_MCLKOUT_TO_IO>;
+ AVDD-supply = <&vcca_3v3_s0>;
+ DVDD-supply = <&vcc_3v3_s0>;
+ HPVDD-supply = <&vcca_3v3_s0>;
+ PVDD-supply = <&vcc_3v3_s0>;
+ assigned-clocks = <&cru CLK_SAI1_MCLKOUT_TO_IO>;
+ assigned-clock-rates = <12288000>;
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sai1m0_mclk>;
+ };
+};
+
&mdio0 {
rgmii_phy0: phy@1 {
compatible = "ethernet-phy-ieee802.3-c22";
@@ -634,6 +691,14 @@
};
};
+&pcie0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_reset>;
+ reset-gpios = <&gpio2 RK_PB4 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc_3v3_pcie>;
+ status = "okay";
+};
+
&pinctrl {
headphone {
hp_det: hp-det {
@@ -655,6 +720,28 @@
rockchip,pins = <4 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
+
+ pcie {
+ pcie_pwr_en: pcie-pwr-en {
+ rockchip,pins = <3 RK_PD6 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ pcie_reset: pcie-reset {
+ rockchip,pins = <2 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&sai1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sai1m0_lrck
+ &sai1m0_sclk
+ &sai1m0_sdi0
+ &sai1m0_sdo0>;
+ status = "okay";
+};
+
+&sai6 {
+ status = "okay";
};
&sdhci {
diff --git a/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts
index e368691fd28e..0902d694cef4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts
@@ -128,7 +128,7 @@
vin-supply = <&vcc_3v3_s0>;
};
- vcc3v3_pcie0: regulator-vcc3v3-pcie0 {
+ vcc3v3_pcie1: regulator-vcc3v3-pcie1 {
compatible = "regulator-fixed";
regulator-name = "vcc3v3_pcie1";
regulator-min-microvolt = <3300000>;
@@ -691,6 +691,17 @@
};
};
+&pcie1 {
+ reset-gpios = <&gpio4 RK_PC4 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie1>;
+
+ /*
+ * Disable usb_drd1_dwc3 if enabling pcie1 and set Dial_Switch_1
+ * to low state according to the schematic of page 17.
+ */
+ status = "disabled";
+};
+
&pinctrl {
usb {
usb_host_pwren: usb-host-pwren {
@@ -747,6 +758,10 @@
status = "okay";
};
+&ufshc {
+ status = "okay";
+};
+
&usbdp_phy {
rockchip,dp-lane-mux = <2 3>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3576-roc-pc.dts b/arch/arm64/boot/dts/rockchip/rk3576-roc-pc.dts
index 612b7bb0b749..d4e437ea6cd8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3576-roc-pc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3576-roc-pc.dts
@@ -10,6 +10,7 @@
#include <dt-bindings/input/input.h>
#include <dt-bindings/pinctrl/rockchip.h>
#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
#include <dt-bindings/usb/pd.h>
#include "rk3576.dtsi"
@@ -54,6 +55,17 @@
};
};
+ hdmi-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
vbus5v0_typec: regulator-vbus5v0-typec {
compatible = "regulator-fixed";
enable-active-high;
@@ -258,6 +270,26 @@
status = "okay";
};
+&hdmi {
+ status = "okay";
+};
+
+&hdmi_in {
+ hdmi_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi>;
+ };
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdptxphy {
+ status = "okay";
+};
+
&mdio0 {
status = "okay";
@@ -734,3 +766,18 @@
pinctrl-0 = <&uart6m3_xfer>;
status = "okay";
};
+
+&vop {
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3576.dtsi b/arch/arm64/boot/dts/rockchip/rk3576.dtsi
index ebb5fc8bb8b1..1086482f0479 100644
--- a/arch/arm64/boot/dts/rockchip/rk3576.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3576.dtsi
@@ -413,6 +413,90 @@
};
};
+ hdmi_sound: hdmi-sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "HDMI";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,mclk-fs = <256>;
+ status = "disabled";
+
+ simple-audio-card,codec {
+ sound-dai = <&hdmi>;
+ };
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai6>;
+ };
+ };
+
+ pinctrl: pinctrl {
+ compatible = "rockchip,rk3576-pinctrl";
+ rockchip,grf = <&ioc_grf>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gpio0: gpio@27320000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0x27320000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO0>, <&cru DBCLK_GPIO0>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@2ae10000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0x2ae10000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO1>, <&cru DBCLK_GPIO1>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@2ae20000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0x2ae20000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO2>, <&cru DBCLK_GPIO2>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@2ae30000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0x2ae30000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO3>, <&cru DBCLK_GPIO3>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@2ae40000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0x2ae40000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO4>, <&cru DBCLK_GPIO4>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 128 32>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+ };
+
pmu_a53: pmu-a53 {
compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
@@ -450,6 +534,114 @@
#size-cells = <2>;
ranges;
+ pcie0: pcie@22000000 {
+ compatible = "rockchip,rk3576-pcie", "rockchip,rk3568-pcie";
+ reg = <0x0 0x22000000 0x0 0x00400000>,
+ <0x0 0x2a200000 0x0 0x00010000>,
+ <0x0 0x20000000 0x0 0x00100000>;
+ reg-names = "dbi", "apb", "config";
+ bus-range = <0x0 0xf>;
+ clocks = <&cru ACLK_PCIE0_MST>, <&cru ACLK_PCIE0_SLV>,
+ <&cru ACLK_PCIE0_DBI>, <&cru PCLK_PCIE0>,
+ <&cru CLK_PCIE0_AUX>;
+ clock-names = "aclk_mst", "aclk_slv",
+ "aclk_dbi", "pclk",
+ "aux";
+ device_type = "pci";
+ interrupts = <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "sys", "pmc", "msg", "legacy", "err", "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie0_intc 0>,
+ <0 0 0 2 &pcie0_intc 1>,
+ <0 0 0 3 &pcie0_intc 2>,
+ <0 0 0 4 &pcie0_intc 3>;
+ linux,pci-domain = <0>;
+ max-link-speed = <2>;
+ num-ib-windows = <8>;
+ num-viewport = <8>;
+ num-ob-windows = <2>;
+ num-lanes = <1>;
+ phys = <&combphy0_ps PHY_TYPE_PCIE>;
+ phy-names = "pcie-phy";
+ power-domains = <&power RK3576_PD_PHP>;
+ ranges = <0x01000000 0x0 0x20100000 0x0 0x20100000 0x0 0x00100000
+ 0x02000000 0x0 0x20200000 0x0 0x20200000 0x0 0x00e00000
+ 0x03000000 0x9 0x00000000 0x9 0x00000000 0x0 0x80000000>;
+ resets = <&cru SRST_PCIE0_POWER_UP>, <&cru SRST_P_PCIE0>;
+ reset-names = "pwr", "pipe";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ status = "disabled";
+
+ pcie0_intc: legacy-interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 280 IRQ_TYPE_EDGE_RISING>;
+ };
+ };
+
+ pcie1: pcie@22400000 {
+ compatible = "rockchip,rk3576-pcie", "rockchip,rk3568-pcie";
+ reg = <0x0 0x22400000 0x0 0x00400000>,
+ <0x0 0x2a210000 0x0 0x00010000>,
+ <0x0 0x21000000 0x0 0x00100000>;
+ reg-names = "dbi", "apb", "config";
+ bus-range = <0x20 0x2f>;
+ clocks = <&cru ACLK_PCIE1_MST>, <&cru ACLK_PCIE1_SLV>,
+ <&cru ACLK_PCIE1_DBI>, <&cru PCLK_PCIE1>,
+ <&cru CLK_PCIE1_AUX>;
+ clock-names = "aclk_mst", "aclk_slv",
+ "aclk_dbi", "pclk",
+ "aux";
+ device_type = "pci";
+ interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "sys", "pmc", "msg", "legacy", "err", "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie1_intc 0>,
+ <0 0 0 2 &pcie1_intc 1>,
+ <0 0 0 3 &pcie1_intc 2>,
+ <0 0 0 4 &pcie1_intc 3>;
+ linux,pci-domain = <0>;
+ max-link-speed = <2>;
+ num-ib-windows = <8>;
+ num-viewport = <8>;
+ num-ob-windows = <2>;
+ num-lanes = <1>;
+ phys = <&combphy1_psu PHY_TYPE_PCIE>;
+ phy-names = "pcie-phy";
+ power-domains = <&power RK3576_PD_SUBPHP>;
+ ranges = <0x01000000 0x0 0x21100000 0x0 0x21100000 0x0 0x00100000
+ 0x02000000 0x0 0x21200000 0x0 0x21200000 0x0 0x00e00000
+ 0x03000000 0x9 0x80000000 0x9 0x80000000 0x0 0x80000000>;
+ resets = <&cru SRST_PCIE1_POWER_UP>, <&cru SRST_P_PCIE1>;
+ reset-names = "pwr", "pipe";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ status = "disabled";
+
+ pcie1_intc: legacy-interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 266 IRQ_TYPE_EDGE_RISING>;
+ };
+ };
+
usb_drd0_dwc3: usb@23000000 {
compatible = "rockchip,rk3576-dwc3", "snps,dwc3";
reg = <0x0 0x23000000 0x0 0x400000>;
@@ -1010,6 +1202,41 @@
status = "disabled";
};
+ sai5: sai@27d40000 {
+ compatible = "rockchip,rk3576-sai";
+ reg = <0x0 0x27d40000 0x0 0x1000>;
+ interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru MCLK_SAI5_8CH>, <&cru HCLK_SAI5_8CH>;
+ clock-names = "mclk", "hclk";
+ dmas = <&dmac2 3>;
+ dma-names = "rx";
+ power-domains = <&power RK3576_PD_VO0>;
+ resets = <&cru SRST_M_SAI5_8CH>, <&cru SRST_H_SAI5_8CH>;
+ reset-names = "m", "h";
+ rockchip,sai-rx-route = <0 1 2 3>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SAI5";
+ status = "disabled";
+ };
+
+ sai6: sai@27d50000 {
+ compatible = "rockchip,rk3576-sai";
+ reg = <0x0 0x27d50000 0x0 0x1000>;
+ interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru MCLK_SAI6_8CH>, <&cru HCLK_SAI6_8CH>;
+ clock-names = "mclk", "hclk";
+ dmas = <&dmac2 4>, <&dmac2 5>;
+ dma-names = "tx", "rx";
+ power-domains = <&power RK3576_PD_VO0>;
+ resets = <&cru SRST_M_SAI6_8CH>, <&cru SRST_H_SAI6_8CH>;
+ reset-names = "m", "h";
+ rockchip,sai-rx-route = <0 1 2 3>;
+ rockchip,sai-tx-route = <0 1 2 3>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SAI6";
+ status = "disabled";
+ };
+
hdmi: hdmi@27da0000 {
compatible = "rockchip,rk3576-dw-hdmi-qp";
reg = <0x0 0x27da0000 0x0 0x20000>;
@@ -1034,6 +1261,7 @@
reset-names = "ref", "hdp";
rockchip,grf = <&ioc_grf>;
rockchip,vo-grf = <&vo0_grf>;
+ #sound-dai-cells = <0>;
status = "disabled";
ports {
@@ -1050,6 +1278,57 @@
};
};
+ sai7: sai@27ed0000 {
+ compatible = "rockchip,rk3576-sai";
+ reg = <0x0 0x27ed0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru MCLK_SAI7_8CH>, <&cru HCLK_SAI7_8CH>;
+ clock-names = "mclk", "hclk";
+ dmas = <&dmac2 19>;
+ dma-names = "tx";
+ power-domains = <&power RK3576_PD_VO1>;
+ resets = <&cru SRST_M_SAI7_8CH>, <&cru SRST_H_SAI7_8CH>;
+ reset-names = "m", "h";
+ rockchip,sai-tx-route = <0 1 2 3>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SAI7";
+ status = "disabled";
+ };
+
+ sai8: sai@27ee0000 {
+ compatible = "rockchip,rk3576-sai";
+ reg = <0x0 0x27ee0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru MCLK_SAI8_8CH>, <&cru HCLK_SAI8_8CH>;
+ clock-names = "mclk", "hclk";
+ dmas = <&dmac1 7>;
+ dma-names = "tx";
+ power-domains = <&power RK3576_PD_VO1>;
+ resets = <&cru SRST_M_SAI8_8CH>, <&cru SRST_H_SAI8_8CH>;
+ reset-names = "m", "h";
+ rockchip,sai-tx-route = <0 1 2 3>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SAI8";
+ status = "disabled";
+ };
+
+ sai9: sai@27ef0000 {
+ compatible = "rockchip,rk3576-sai";
+ reg = <0x0 0x27ef0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru MCLK_SAI9_8CH>, <&cru HCLK_SAI9_8CH>;
+ clock-names = "mclk", "hclk";
+ dmas = <&dmac0 26>;
+ dma-names = "tx";
+ power-domains = <&power RK3576_PD_VO1>;
+ resets = <&cru SRST_M_SAI9_8CH>, <&cru SRST_H_SAI9_8CH>;
+ reset-names = "m", "h";
+ rockchip,sai-tx-route = <0 1 2 3>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SAI9";
+ status = "disabled";
+ };
+
qos_hdcp1: qos@27f02000 {
compatible = "rockchip,rk3576-qos", "syscon";
reg = <0x0 0x27f02000 0x0 0x20>;
@@ -1334,6 +1613,36 @@
};
};
+ sata0: sata@2a240000 {
+ compatible = "rockchip,rk3576-dwc-ahci", "snps,dwc-ahci";
+ reg = <0x0 0x2a240000 0x0 0x1000>;
+ clocks = <&cru ACLK_SATA0>, <&cru CLK_PMALIVE0>,
+ <&cru CLK_RXOOB0>;
+ clock-names = "sata", "pmalive", "rxoob";
+ interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&power RK3576_PD_SUBPHP>;
+ phys = <&combphy0_ps PHY_TYPE_SATA>;
+ phy-names = "sata-phy";
+ ports-implemented = <0x1>;
+ dma-coherent;
+ status = "disabled";
+ };
+
+ sata1: sata@2a250000 {
+ compatible = "rockchip,rk3576-dwc-ahci", "snps,dwc-ahci";
+ reg = <0x0 0x2a250000 0x0 0x1000>;
+ clocks = <&cru ACLK_SATA1>, <&cru CLK_PMALIVE1>,
+ <&cru CLK_RXOOB1>;
+ clock-names = "sata", "pmalive", "rxoob";
+ interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&power RK3576_PD_SUBPHP>;
+ phys = <&combphy1_psu PHY_TYPE_SATA>;
+ phy-names = "sata-phy";
+ ports-implemented = <0x1>;
+ dma-coherent;
+ status = "disabled";
+ };
+
ufshc: ufshc@2a2d0000 {
compatible = "rockchip,rk3576-ufshc";
reg = <0x0 0x2a2d0000 0x0 0x10000>,
@@ -1364,6 +1673,7 @@
interrupts = <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_FSPI1_X2>, <&cru HCLK_FSPI1>;
clock-names = "clk_sfc", "hclk_sfc";
+ power-domains = <&power RK3576_PD_SDGMAC>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1414,11 +1724,20 @@
interrupts = <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_FSPI_X2>, <&cru HCLK_FSPI>;
clock-names = "clk_sfc", "hclk_sfc";
+ power-domains = <&power RK3576_PD_NVM>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
+ rng: rng@2a410000 {
+ compatible = "rockchip,rk3576-rng";
+ reg = <0x0 0x2a410000 0x0 0x200>;
+ clocks = <&cru HCLK_TRNG_NS>;
+ interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&cru SRST_H_TRNG_NS>;
+ };
+
otp: otp@2a580000 {
compatible = "rockchip,rk3576-otp";
reg = <0x0 0x2a580000 0x0 0x400>;
@@ -1458,6 +1777,120 @@
};
};
+ sai0: sai@2a600000 {
+ compatible = "rockchip,rk3576-sai";
+ reg = <0x0 0x2a600000 0x0 0x1000>;
+ interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru MCLK_SAI0_8CH>, <&cru HCLK_SAI0_8CH>;
+ clock-names = "mclk", "hclk";
+ dmas = <&dmac0 0>, <&dmac0 1>;
+ dma-names = "tx", "rx";
+ power-domains = <&power RK3576_PD_AUDIO>;
+ resets = <&cru SRST_M_SAI0_8CH>, <&cru SRST_H_SAI0_8CH>;
+ reset-names = "m", "h";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sai0m0_lrck
+ &sai0m0_sclk
+ &sai0m0_sdi0
+ &sai0m0_sdi1
+ &sai0m0_sdi2
+ &sai0m0_sdi3
+ &sai0m0_sdo0
+ &sai0m0_sdo1
+ &sai0m0_sdo2
+ &sai0m0_sdo3>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SAI0";
+ status = "disabled";
+ };
+
+ sai1: sai@2a610000 {
+ compatible = "rockchip,rk3576-sai";
+ reg = <0x0 0x2a610000 0x0 0x1000>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru MCLK_SAI1_8CH>, <&cru HCLK_SAI1_8CH>;
+ clock-names = "mclk", "hclk";
+ dmas = <&dmac0 2>, <&dmac0 3>;
+ dma-names = "tx", "rx";
+ power-domains = <&power RK3576_PD_AUDIO>;
+ resets = <&cru SRST_M_SAI1_8CH>, <&cru SRST_H_SAI1_8CH>;
+ reset-names = "m", "h";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sai1m0_lrck
+ &sai1m0_sclk
+ &sai1m0_sdi0
+ &sai1m0_sdo0
+ &sai1m0_sdo1
+ &sai1m0_sdo2
+ &sai1m0_sdo3>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SAI1";
+ status = "disabled";
+ };
+
+ sai2: sai@2a620000 {
+ compatible = "rockchip,rk3576-sai";
+ reg = <0x0 0x2a620000 0x0 0x1000>;
+ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru MCLK_SAI2_2CH>, <&cru HCLK_SAI2_2CH>;
+ clock-names = "mclk", "hclk";
+ dmas = <&dmac1 0>, <&dmac1 1>;
+ dma-names = "tx", "rx";
+ power-domains = <&power RK3576_PD_AUDIO>;
+ resets = <&cru SRST_M_SAI2_2CH>, <&cru SRST_H_SAI2_2CH>;
+ reset-names = "m", "h";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sai2m0_lrck
+ &sai2m0_sclk
+ &sai2m0_sdi
+ &sai2m0_sdo>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SAI2";
+ status = "disabled";
+ };
+
+ sai3: sai@2a630000 {
+ compatible = "rockchip,rk3576-sai";
+ reg = <0x0 0x2a630000 0x0 0x1000>;
+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru MCLK_SAI3_2CH>, <&cru HCLK_SAI3_2CH>;
+ clock-names = "mclk", "hclk";
+ dmas = <&dmac1 2>, <&dmac1 3>;
+ dma-names = "tx", "rx";
+ power-domains = <&power RK3576_PD_AUDIO>;
+ resets = <&cru SRST_M_SAI3_2CH>, <&cru SRST_H_SAI3_2CH>;
+ reset-names = "m", "h";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sai3m0_lrck
+ &sai3m0_sclk
+ &sai3m0_sdi
+ &sai3m0_sdo>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SAI3";
+ status = "disabled";
+ };
+
+ sai4: sai@2a640000 {
+ compatible = "rockchip,rk3576-sai";
+ reg = <0x0 0x2a640000 0x0 0x1000>;
+ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru MCLK_SAI4_2CH>, <&cru HCLK_SAI4_2CH>;
+ clock-names = "mclk", "hclk";
+ dmas = <&dmac2 0>, <&dmac2 1>;
+ dma-names = "tx", "rx";
+ power-domains = <&power RK3576_PD_AUDIO>;
+ resets = <&cru SRST_M_SAI4_2CH>, <&cru SRST_H_SAI4_2CH>;
+ reset-names = "m", "h";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sai4m0_lrck
+ &sai4m0_sclk
+ &sai4m0_sdi
+ &sai4m0_sdo>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SAI4";
+ status = "disabled";
+ };
+
gic: interrupt-controller@2a701000 {
compatible = "arm,gic-400";
reg = <0x0 0x2a701000 0 0x10000>,
@@ -1569,7 +2002,6 @@
status = "disabled";
};
-
i2c6: i2c@2ac90000 {
compatible = "rockchip,rk3576-i2c", "rockchip,rk3399-i2c";
reg = <0x0 0x2ac90000 0x0 0x1000>;
@@ -1984,74 +2416,6 @@
compatible = "arm,scmi-shmem";
reg = <0x0 0x4010f000 0x0 0x100>;
};
-
- pinctrl: pinctrl {
- compatible = "rockchip,rk3576-pinctrl";
- rockchip,grf = <&ioc_grf>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- gpio0: gpio@27320000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0x27320000 0x0 0x200>;
- clocks = <&cru PCLK_GPIO0>, <&cru DBCLK_GPIO0>;
- gpio-controller;
- gpio-ranges = <&pinctrl 0 0 32>;
- interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-controller;
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- };
-
- gpio1: gpio@2ae10000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0x2ae10000 0x0 0x200>;
- clocks = <&cru PCLK_GPIO1>, <&cru DBCLK_GPIO1>;
- gpio-controller;
- gpio-ranges = <&pinctrl 0 32 32>;
- interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-controller;
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- };
-
- gpio2: gpio@2ae20000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0x2ae20000 0x0 0x200>;
- clocks = <&cru PCLK_GPIO2>, <&cru DBCLK_GPIO2>;
- gpio-controller;
- gpio-ranges = <&pinctrl 0 64 32>;
- interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-controller;
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- };
-
- gpio3: gpio@2ae30000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0x2ae30000 0x0 0x200>;
- clocks = <&cru PCLK_GPIO3>, <&cru DBCLK_GPIO3>;
- gpio-controller;
- gpio-ranges = <&pinctrl 0 96 32>;
- interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-controller;
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- };
-
- gpio4: gpio@2ae40000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0x2ae40000 0x0 0x200>;
- clocks = <&cru PCLK_GPIO4>, <&cru DBCLK_GPIO4>;
- gpio-controller;
- gpio-ranges = <&pinctrl 0 128 32>;
- interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-controller;
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- };
- };
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-armsom-w3.dts b/arch/arm64/boot/dts/rockchip/rk3588-armsom-w3.dts
index 779cd1b1798c..6ad2759ddcca 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-armsom-w3.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-armsom-w3.dts
@@ -4,6 +4,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/leds/common.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
#include "rk3588-armsom-lm7.dtsi"
/ {
@@ -32,6 +33,28 @@
pinctrl-0 = <&hp_detect>;
};
+ hdmi0-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi0_con_in: endpoint {
+ remote-endpoint = <&hdmi0_out_con>;
+ };
+ };
+ };
+
+ hdmi1-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi1_con_in: endpoint {
+ remote-endpoint = <&hdmi1_out_con>;
+ };
+ };
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -138,6 +161,54 @@
status = "okay";
};
+&hdmi0 {
+ status = "okay";
+};
+
+&hdmi0_in {
+ hdmi0_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi0>;
+ };
+};
+
+&hdmi0_out {
+ hdmi0_out_con: endpoint {
+ remote-endpoint = <&hdmi0_con_in>;
+ };
+};
+
+&hdmi0_sound {
+ status = "okay";
+};
+
+&hdmi1 {
+ status = "okay";
+};
+
+&hdmi1_in {
+ hdmi1_in_vp1: endpoint {
+ remote-endpoint = <&vp1_out_hdmi1>;
+ };
+};
+
+&hdmi1_out {
+ hdmi1_out_con: endpoint {
+ remote-endpoint = <&hdmi1_con_in>;
+ };
+};
+
+&hdmi1_sound {
+ status = "okay";
+};
+
+&hdptxphy0 {
+ status = "okay";
+};
+
+&hdptxphy1 {
+ status = "okay";
+};
+
&i2c6 {
status = "okay";
@@ -192,6 +263,14 @@
};
};
+&i2s5_8ch {
+ status = "okay";
+};
+
+&i2s6_8ch {
+ status = "okay";
+};
+
&package_thermal {
polling-delay = <1000>;
@@ -406,3 +485,25 @@
&usb_host2_xhci {
status = "okay";
};
+
+&vop {
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi0: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi0_in_vp0>;
+ };
+};
+
+&vp1 {
+ vp1_out_hdmi1: endpoint@ROCKCHIP_VOP2_EP_HDMI1 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI1>;
+ remote-endpoint = <&hdmi1_in_vp1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
index 1e18ad93ba0e..70f03e68ba55 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/clock/rockchip,rk3588-cru.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/phy/phy.h>
#include <dt-bindings/power/rk3588-power.h>
#include <dt-bindings/reset/rockchip,rk3588-cru.h>
#include <dt-bindings/phy/phy.h>
@@ -95,8 +96,6 @@
enable-method = "psci";
capacity-dmips-mhz = <530>;
clocks = <&scmi_clk SCMI_CLK_CPUL>;
- assigned-clocks = <&scmi_clk SCMI_CLK_CPUL>;
- assigned-clock-rates = <816000000>;
cpu-idle-states = <&CPU_SLEEP>;
i-cache-size = <32768>;
i-cache-line-size = <64>;
@@ -173,8 +172,6 @@
enable-method = "psci";
capacity-dmips-mhz = <1024>;
clocks = <&scmi_clk SCMI_CLK_CPUB01>;
- assigned-clocks = <&scmi_clk SCMI_CLK_CPUB01>;
- assigned-clock-rates = <816000000>;
cpu-idle-states = <&CPU_SLEEP>;
i-cache-size = <65536>;
i-cache-line-size = <64>;
@@ -213,8 +210,6 @@
enable-method = "psci";
capacity-dmips-mhz = <1024>;
clocks = <&scmi_clk SCMI_CLK_CPUB23>;
- assigned-clocks = <&scmi_clk SCMI_CLK_CPUB23>;
- assigned-clock-rates = <816000000>;
cpu-idle-states = <&CPU_SLEEP>;
i-cache-size = <65536>;
i-cache-line-size = <64>;
@@ -439,16 +434,15 @@
#clock-cells = <0>;
};
- pmu_sram: sram@10f000 {
- compatible = "mmio-sram";
- reg = <0x0 0x0010f000 0x0 0x100>;
- ranges = <0 0x0 0x0010f000 0x100>;
- #address-cells = <1>;
- #size-cells = <1>;
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
- scmi_shmem: sram@0 {
+ scmi_shmem: shmem@10f000 {
compatible = "arm,scmi-shmem";
- reg = <0x0 0x100>;
+ reg = <0x0 0x0010f000 0x0 0x100>;
+ no-map;
};
};
@@ -590,6 +584,16 @@
reg = <0x0 0xfd58c000 0x0 0x1000>;
};
+ mipidcphy0_grf: syscon@fd5e8000 {
+ compatible = "rockchip,rk3588-dcphy-grf", "syscon";
+ reg = <0x0 0xfd5e8000 0x0 0x4000>;
+ };
+
+ mipidcphy1_grf: syscon@fd5ec000 {
+ compatible = "rockchip,rk3588-dcphy-grf", "syscon";
+ reg = <0x0 0xfd5ec000 0x0 0x4000>;
+ };
+
vop_grf: syscon@fd5a4000 {
compatible = "rockchip,rk3588-vop-grf", "syscon";
reg = <0x0 0xfd5a4000 0x0 0x2000>;
@@ -1412,6 +1416,62 @@
status = "disabled";
};
+ dsi0: dsi@fde20000 {
+ compatible = "rockchip,rk3588-mipi-dsi2";
+ reg = <0x0 0xfde20000 0x0 0x10000>;
+ interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_DSIHOST0>, <&cru CLK_DSIHOST0>;
+ clock-names = "pclk", "sys";
+ resets = <&cru SRST_P_DSIHOST0>;
+ reset-names = "apb";
+ power-domains = <&power RK3588_PD_VOP>;
+ phys = <&mipidcphy0 PHY_TYPE_DPHY>;
+ phy-names = "dcphy";
+ rockchip,grf = <&vop_grf>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dsi0_in: port@0 {
+ reg = <0>;
+ };
+
+ dsi0_out: port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
+ dsi1: dsi@fde30000 {
+ compatible = "rockchip,rk3588-mipi-dsi2";
+ reg = <0x0 0xfde30000 0x0 0x10000>;
+ interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_DSIHOST1>, <&cru CLK_DSIHOST1>;
+ clock-names = "pclk", "sys";
+ resets = <&cru SRST_P_DSIHOST1>;
+ reset-names = "apb";
+ power-domains = <&power RK3588_PD_VOP>;
+ phys = <&mipidcphy1 PHY_TYPE_DPHY>;
+ phy-names = "dcphy";
+ rockchip,grf = <&vop_grf>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dsi1_in: port@0 {
+ reg = <0>;
+ };
+
+ dsi1_out: port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
hdmi0: hdmi@fde80000 {
compatible = "rockchip,rk3588-dw-hdmi-qp";
reg = <0x0 0xfde80000 0x0 0x20000>;
@@ -1454,6 +1514,34 @@
};
};
+ edp0: edp@fdec0000 {
+ compatible = "rockchip,rk3588-edp";
+ reg = <0x0 0xfdec0000 0x0 0x1000>;
+ clocks = <&cru CLK_EDP0_24M>, <&cru PCLK_EDP0>;
+ clock-names = "dp", "pclk";
+ interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH 0>;
+ phys = <&hdptxphy0>;
+ phy-names = "dp";
+ power-domains = <&power RK3588_PD_VO1>;
+ resets = <&cru SRST_EDP0_24M>, <&cru SRST_P_EDP0>;
+ reset-names = "dp", "apb";
+ rockchip,grf = <&vo1_grf>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ edp0_in: port@0 {
+ reg = <0>;
+ };
+
+ edp0_out: port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
qos_gpu_m0: qos@fdf35000 {
compatible = "rockchip,rk3588-qos", "syscon";
reg = <0x0 0xfdf35000 0x0 0x20>;
@@ -1970,7 +2058,7 @@
reg = <0x0 0xfe378000 0x0 0x200>;
interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&scmi_clk SCMI_HCLK_SECURE_NS>;
- resets = <&scmi_reset 48>;
+ resets = <&scmi_reset SCMI_SRST_H_TRNG_NS>;
};
i2s0_8ch: i2s@fe470000 {
@@ -2935,6 +3023,38 @@
status = "disabled";
};
+ mipidcphy0: phy@feda0000 {
+ compatible = "rockchip,rk3588-mipi-dcphy";
+ reg = <0x0 0xfeda0000 0x0 0x10000>;
+ rockchip,grf = <&mipidcphy0_grf>;
+ clocks = <&cru PCLK_MIPI_DCPHY0>,
+ <&cru CLK_USBDPPHY_MIPIDCPPHY_REF>;
+ clock-names = "pclk", "ref";
+ resets = <&cru SRST_M_MIPI_DCPHY0>,
+ <&cru SRST_P_MIPI_DCPHY0>,
+ <&cru SRST_P_MIPI_DCPHY0_GRF>,
+ <&cru SRST_S_MIPI_DCPHY0>;
+ reset-names = "m_phy", "apb", "grf", "s_phy";
+ #phy-cells = <1>;
+ status = "disabled";
+ };
+
+ mipidcphy1: phy@fedb0000 {
+ compatible = "rockchip,rk3588-mipi-dcphy";
+ reg = <0x0 0xfedb0000 0x0 0x10000>;
+ rockchip,grf = <&mipidcphy1_grf>;
+ clocks = <&cru PCLK_MIPI_DCPHY1>,
+ <&cru CLK_USBDPPHY_MIPIDCPPHY_REF>;
+ clock-names = "pclk", "ref";
+ resets = <&cru SRST_M_MIPI_DCPHY1>,
+ <&cru SRST_P_MIPI_DCPHY1>,
+ <&cru SRST_P_MIPI_DCPHY1_GRF>,
+ <&cru SRST_S_MIPI_DCPHY1>;
+ reset-names = "m_phy", "apb", "grf", "s_phy";
+ #phy-cells = <1>;
+ status = "disabled";
+ };
+
combphy0_ps: phy@fee00000 {
compatible = "rockchip,rk3588-naneng-combphy";
reg = <0x0 0xfee00000 0x0 0x100>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts
index 9eda69722665..3d5c8b753208 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts
@@ -23,17 +23,28 @@
pwms = <&pwm2 0 25000 0>;
};
- hdmi-con {
+ hdmi0-con {
compatible = "hdmi-connector";
type = "a";
port {
- hdmi_con_in: endpoint {
+ hdmi0_con_in: endpoint {
remote-endpoint = <&hdmi0_out_con>;
};
};
};
+ hdmi1-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi1_con_in: endpoint {
+ remote-endpoint = <&hdmi1_out_con>;
+ };
+ };
+ };
+
leds: leds {
compatible = "gpio-leds";
@@ -117,6 +128,10 @@
status = "okay";
};
+&hdmi0_sound {
+ status = "okay";
+};
+
&hdmi0_in {
hdmi0_in_vp0: endpoint {
remote-endpoint = <&vp0_out_hdmi0>;
@@ -125,14 +140,48 @@
&hdmi0_out {
hdmi0_out_con: endpoint {
- remote-endpoint = <&hdmi_con_in>;
+ remote-endpoint = <&hdmi0_con_in>;
+ };
+};
+
+&hdmi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmim2_tx1_cec &hdmim0_tx1_hpd &hdmim1_tx1_scl &hdmim1_tx1_sda>;
+ status = "okay";
+};
+
+&hdmi1_in {
+ hdmi1_in_vp1: endpoint {
+ remote-endpoint = <&vp1_out_hdmi1>;
};
};
+&hdmi1_out {
+ hdmi1_out_con: endpoint {
+ remote-endpoint = <&hdmi1_con_in>;
+ };
+};
+
+&hdmi1_sound {
+ status = "okay";
+};
+
&hdptxphy0 {
status = "okay";
};
+&hdptxphy1 {
+ status = "okay";
+};
+
+&i2s5_8ch {
+ status = "okay";
+};
+
+&i2s6_8ch {
+ status = "okay";
+};
+
/* M.2 E-Key */
&pcie2x1l1 {
reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
@@ -261,3 +310,10 @@
remote-endpoint = <&hdmi0_in_vp0>;
};
};
+
+&vp1 {
+ vp1_out_hdmi1: endpoint@ROCKCHIP_VOP2_EP_HDMI1 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI1>;
+ remote-endpoint = <&hdmi1_in_vp1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
index 6dc10da5215f..738637ecaf55 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
@@ -148,6 +148,40 @@
};
};
+&edp1 {
+ force-hpd;
+ status = "okay";
+
+ aux-bus {
+ panel {
+ compatible = "edp-panel";
+ hpd-absent-delay-ms = <200>;
+ no-hpd;
+ backlight = <&backlight>;
+ power-supply = <&vcc3v3_lcd>;
+
+ port {
+ panel_in_edp: endpoint {
+ remote-endpoint = <&edp_out_panel>;
+ };
+ };
+ };
+ };
+};
+
+&edp1_in {
+ edp1_in_vp2: endpoint {
+ remote-endpoint = <&vp2_out_edp1>;
+ };
+};
+
+&edp1_out {
+ edp_out_panel: endpoint {
+ remote-endpoint = <&panel_in_edp>;
+ };
+};
+
+
/* HDMI CEC is not used */
&hdmi0 {
pinctrl-0 = <&hdmim0_tx0_hpd &hdmim0_tx0_scl &hdmim0_tx0_sda>;
@@ -170,6 +204,10 @@
status = "okay";
};
+&hdptxphy1 {
+ status = "okay";
+};
+
&i2c4 {
status = "okay";
pinctrl-names = "default";
@@ -383,6 +421,8 @@
};
&vop {
+ assigned-clocks = <&cru DCLK_VOP2_SRC>;
+ assigned-clock-parents = <&cru PLL_V0PLL>;
status = "okay";
};
@@ -396,3 +436,10 @@
remote-endpoint = <&hdmi0_in_vp0>;
};
};
+
+&vp2 {
+ vp2_out_edp1: endpoint@ROCKCHIP_VOP2_EP_EDP1 {
+ reg = <ROCKCHIP_VOP2_EP_EDP1>;
+ remote-endpoint = <&edp1_in_vp2>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-evb2-v10.dts b/arch/arm64/boot/dts/rockchip/rk3588-evb2-v10.dts
new file mode 100644
index 000000000000..91fe810d38d8
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-evb2-v10.dts
@@ -0,0 +1,931 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ *
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/clock/rockchip,rk3588-cru.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "rk3588.dtsi"
+
+/ {
+ model = "Rockchip RK3588 EVB2 V10 Board";
+ compatible = "rockchip,rk3588-evb2-v10", "rockchip,rk3588";
+
+ aliases {
+ mmc0 = &sdhci;
+ serial2 = &uart2;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ hdmi-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi0_out_con>;
+ };
+ };
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&hym8563>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_enable_h>;
+ /*
+ * On the module itself this is one of these (depending
+ * on the actual card populated):
+ * - SDIO_RESET_L_WL_REG_ON
+ * - PDN (power down when low)
+ */
+ post-power-on-delay-ms = <200>;
+ reset-gpios = <&gpio2 RK_PB6 GPIO_ACTIVE_LOW>;
+ };
+
+ vcc12v_dcin: vcc12v-dcin-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc5v0_host: vcc5v0-host {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_host";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&gpio4 RK_PA1 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&vcc5v0_usb>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
+ };
+
+ vcc5v0_usb: regulator-vcc5v0-usb {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_usb";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_usbdcin>;
+ };
+
+ vcc5v0_sys: vcc5v0-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_usbdcin: regulator-vcc5v0-usbdcin {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_usbdcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ sram-supply = <&vdd_gpu_mem_s0>;
+ status = "okay";
+};
+
+&hdmi0 {
+ status = "okay";
+};
+
+&hdmi0_in {
+ hdmi0_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi0>;
+ };
+};
+
+&hdmi0_out {
+ hdmi0_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdptxphy0 {
+ status = "okay";
+};
+
+&i2c2 {
+ status = "okay";
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-output-names = "hym8563";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PD4 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hym8563_int>;
+ wakeup-source;
+ };
+};
+
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
+&pinctrl {
+ hym8563 {
+ hym8563_int: hym8563-int {
+ rockchip,pins = <0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ usb {
+ vcc5v0_host_en: vcc5v0-host-en {
+ rockchip,pins = <4 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wifi {
+ wifi_enable_h: wifi-enable-h {
+ rockchip,pins = <2 RK_PB6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ wifi_host_wake_irq: wifi-host-wake-irq {
+ rockchip,pins = <2 RK_PB4 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+};
+
+&sdhci {
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ non-removable;
+ no-sd;
+ no-sdio;
+ status = "okay";
+};
+
+&sdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdiom0_pins>;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ disable-wp;
+ keep-power-in-suspend;
+ max-frequency = <150000000>;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ no-mmc;
+ non-removable;
+ no-sd;
+ sd-uhs-sdr104;
+ status = "okay";
+
+ brcmf: wifi@1 {
+ compatible = "brcm,bcm4329-fmac";
+ reg = <1>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <RK_PB4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ pinctrl-0 = <&wifi_host_wake_irq>;
+ pinctrl-names = "default";
+ };
+};
+
+&spi2 {
+ status = "okay";
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ num-cs = <2>;
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ reg = <0x0>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ pinctrl-names = "default";
+ spi-max-frequency = <1000000>;
+ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc5v0_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc5v0_sys>;
+ vcc13-supply = <&vcc5v0_sys>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc5v0_sys>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+
+ regulators {
+ vdd_gpu_s0: dcdc-reg1 {
+ /* regulator coupling requires always-on */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-enable-ramp-delay = <400>;
+ regulator-coupled-with = <&vdd_gpu_mem_s0>;
+ regulator-coupled-max-spread = <10000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_npu_s0: dcdc-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_npu_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_log_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_vdenc_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+
+ };
+
+ vdd_gpu_mem_s0: dcdc-reg5 {
+ /* regulator coupling requires always-on */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <400>;
+ regulator-name = "vdd_gpu_mem_s0";
+ regulator-coupled-with = <&vdd_gpu_s0>;
+ regulator-coupled-max-spread = <10000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+
+ };
+
+ vdd_npu_mem_s0: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_npu_mem_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_2v0_pldo_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vdd_vdenc_mem_s0: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_vdenc_mem_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_1v1_nldo_s3: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_1v1_nldo_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1100000>;
+ };
+ };
+
+ avcc_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "avcc_1v8_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd1_1v8_ddr_s3: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd1_1v8_ddr_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avcc_1v8_codec_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "avcc_1v8_codec_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_s3: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_3v3_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vccio_sd_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_1v8_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vccio_1v8_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_0v75_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd2l_0v9_ddr_s3: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdd2l_0v9_ddr_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vdd_0v75_hdmi_edp_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_hdmi_edp_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ avdd_0v75_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "avdd_0v75_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v85_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_0v85_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+
+ pmic@1 {
+ compatible = "rockchip,rk806";
+ reg = <0x01>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&rk806_slave_dvs1_null>, <&rk806_slave_dvs2_null>,
+ <&rk806_slave_dvs3_null>;
+ pinctrl-names = "default";
+ spi-max-frequency = <1000000>;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc5v0_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc5v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_2v0_pldo_s3>;
+ vcca-supply = <&vcc5v0_sys>;
+
+ rk806_slave_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_slave_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_slave_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_cpu_big1_s0: dcdc-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_big1_mem_s0>;
+ regulator-coupled-max-spread = <10000>;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big0_s0: dcdc-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_big0_mem_s0>;
+ regulator-coupled-max-spread = <10000>;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_lit_mem_s0>;
+ regulator-coupled-max-spread = <10000>;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_lit_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_s0: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_3v3_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_mem_s0: dcdc-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_big1_s0>;
+ regulator-coupled-max-spread = <10000>;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_big1_mem_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+
+ vdd_cpu_big0_mem_s0: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_big0_s0>;
+ regulator-coupled-max-spread = <10000>;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_big0_mem_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s0: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_1v8_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_mem_s0: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_lit_s0>;
+ regulator-coupled-max-spread = <10000>;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_lit_mem_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_ddr_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_cam_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_1v8_cam_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ avdd1v8_ddr_pll_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "avdd1v8_ddr_pll_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_1v8_pll_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_1v8_pll_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_sd_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_3v3_sd_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_2v8_cam_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_2v8_cam_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ pldo6_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "pldo6_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_pll_s0: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_0v75_pll_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_ddr_pll_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ avdd_0v85_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "avdd_0v85_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ avdd_1v2_cam_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "avdd_1v2_cam_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ avdd_1v2_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "avdd_1v2_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&u2phy2 {
+ status = "okay";
+};
+
+&u2phy2_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&u2phy3 {
+ status = "okay";
+};
+
+&u2phy3_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
+
+&usbdp_phy0 {
+ rockchip,dp-lane-mux = <2 3>;
+ status = "okay";
+};
+
+&usbdp_phy1 {
+ rockchip,dp-lane-mux = <2 3>;
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&vop {
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi0: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi0_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
index 099edb3fd0f6..90414486e466 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
@@ -252,6 +252,34 @@
};
};
+ edp1: edp@fded0000 {
+ compatible = "rockchip,rk3588-edp";
+ reg = <0x0 0xfded0000 0x0 0x1000>;
+ clocks = <&cru CLK_EDP1_24M>, <&cru PCLK_EDP1>;
+ clock-names = "dp", "pclk";
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH 0>;
+ phys = <&hdptxphy1>;
+ phy-names = "dp";
+ power-domains = <&power RK3588_PD_VO1>;
+ resets = <&cru SRST_EDP1_24M>, <&cru SRST_P_EDP1>;
+ reset-names = "dp", "apb";
+ rockchip,grf = <&vo1_grf>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ edp1_in: port@0 {
+ reg = <0>;
+ };
+
+ edp1_out: port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
hdmi_receiver: hdmi_receiver@fdee0000 {
compatible = "rockchip,rk3588-hdmirx-ctrler", "snps,dw-hdmi-rx";
reg = <0x0 0xfdee0000 0x0 0x6000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi
index 1af0a30866f6..af431fdcbea7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi
@@ -222,6 +222,10 @@
compatible = "realtek,rt5616";
reg = <0x1b>;
#sound-dai-cells = <0>;
+ assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
+ assigned-clock-rates = <12288000>;
+ clocks = <&cru I2S0_8CH_MCLKOUT>;
+ clock-names = "mclk";
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
index 9fceea6c1398..ebe77cdd24e8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
@@ -303,6 +303,10 @@
};
};
+&hdmi0_sound {
+ status = "okay";
+};
+
&hdptxphy0 {
status = "okay";
};
@@ -512,6 +516,10 @@
};
};
+&i2s5_8ch {
+ status = "okay";
+};
+
&mdio0 {
rgmii_phy: ethernet-phy@6 {
/* KSZ9031 or KSZ9131 */
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi
index bbe500cc924b..3d8b6f0c5541 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi
@@ -52,6 +52,17 @@
};
};
+ hdmi1-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi1_con_in: endpoint {
+ remote-endpoint = <&hdmi1_out_con>;
+ };
+ };
+ };
+
ir-receiver {
compatible = "gpio-ir-receiver";
gpios = <&gpio0 RK_PD4 GPIO_ACTIVE_LOW>;
@@ -163,8 +174,6 @@
gpio = <&gpio1 RK_PD2 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&typec5v_pwren>;
- regulator-always-on;
- regulator-boot-on;
regulator-name = "vbus5v0_typec";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
@@ -177,8 +186,6 @@
gpio = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&usb5v_pwren>;
- regulator-always-on;
- regulator-boot-on;
regulator-name = "vbus5v0_usb";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
@@ -360,10 +367,38 @@
};
};
+&hdmi0_sound {
+ status = "okay";
+};
+
+&hdmi1 {
+ status = "okay";
+};
+
+&hdmi1_in {
+ hdmi1_in_vp1: endpoint {
+ remote-endpoint = <&vp1_out_hdmi1>;
+ };
+};
+
+&hdmi1_out {
+ hdmi1_out_con: endpoint {
+ remote-endpoint = <&hdmi1_con_in>;
+ };
+};
+
+&hdmi1_sound {
+ status = "okay";
+};
+
&hdptxphy0 {
status = "okay";
};
+&hdptxphy1 {
+ status = "okay";
+};
+
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0m2_xfer>;
@@ -426,24 +461,30 @@
};
&i2c6 {
- clock-frequency = <200000>;
status = "okay";
- fusb302: typec-portc@22 {
+ usbc0: usb-typec@22 {
compatible = "fcs,fusb302";
reg = <0x22>;
interrupt-parent = <&gpio0>;
interrupts = <RK_PD3 IRQ_TYPE_LEVEL_LOW>;
- pinctrl-0 = <&usbc0_int>;
pinctrl-names = "default";
+ pinctrl-0 = <&usbc0_int>;
vbus-supply = <&vbus5v0_typec>;
+ status = "okay";
connector {
compatible = "usb-c-connector";
data-role = "dual";
label = "USB-C";
- power-role = "source";
+ op-sink-microwatt = <1000000>;
+ /* fusb302 supports PD Rev 2.0 Ver 1.2 */
+ pd-revision = /bits/ 8 <0x2 0x0 0x1 0x2>;
+ power-role = "dual";
+ sink-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
source-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)>;
+ try-power-role = "source";
+ typec-power-opmode = "1.5A";
ports {
#address-cells = <1>;
@@ -531,6 +572,14 @@
};
};
+&i2s5_8ch {
+ status = "okay";
+};
+
+&i2s6_8ch {
+ status = "okay";
+};
+
&pcie2x1l0 {
reset-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>;
vpcie3v3-supply = <&vcc_3v3_pcie20>;
@@ -1088,9 +1137,8 @@
};
&usb_host0_xhci {
- dr_mode = "host";
- status = "okay";
usb-role-switch;
+ status = "okay";
port {
usb_host0_xhci_drd_sw: endpoint {
@@ -1126,3 +1174,10 @@
remote-endpoint = <&hdmi0_in_vp0>;
};
};
+
+&vp1 {
+ vp1_out_hdmi1: endpoint@ROCKCHIP_VOP2_EP_HDMI1 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI1>;
+ remote-endpoint = <&hdmi1_in_vp1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi
index f748c6f760d8..9343dfc86941 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi
@@ -87,6 +87,20 @@
rockchip,pins = <3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
+
+ wireless-bluetooth {
+ bt_reg_on: bt-reg-on {
+ rockchip,pins = <4 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_wake_host: bt-wake-host {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ host_wake_bt: host-wake-bt {
+ rockchip,pins = <4 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
};
&pwm4 {
@@ -120,6 +134,28 @@
phy-supply = <&vcc5v0_usb20>;
};
+&uart7 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart7m0_xfer &uart7m0_ctsn &uart7m0_rtsn>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ clocks = <&hym8563>;
+ clock-names = "lpo";
+ device-wakeup-gpios = <&gpio4 RK_PC5 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA0 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "host-wakeup";
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_reg_on>, <&host_wake_bt>, <&bt_wake_host>;
+ shutdown-gpios = <&gpio4 RK_PC4 GPIO_ACTIVE_HIGH>;
+ vbat-supply = <&vcc_3v3_s3>;
+ vddio-supply = <&vcc_1v8_s3>;
+ };
+};
+
&usb_host0_xhci {
dr_mode = "host";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b-plus.dts b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b-plus.dts
new file mode 100644
index 000000000000..74c7b6502e4d
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b-plus.dts
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include "rk3588-rock-5b.dtsi"
+
+/ {
+ model = "Radxa ROCK 5B+";
+ compatible = "radxa,rock-5b-plus", "rockchip,rk3588";
+
+ rfkill-wwan {
+ compatible = "rfkill-gpio";
+ label = "rfkill-m2-wwan";
+ radio-type = "wwan";
+ shutdown-gpios = <&gpio3 RK_PA6 GPIO_ACTIVE_HIGH>;
+ };
+
+ vcc3v3_4g: regulator-vcc3v3-4g {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio1 RK_PD2 GPIO_ACTIVE_HIGH>;
+ /* pinctrl for the GPIO is requested by vcc3v3_pcie2x1l0 */
+ regulator-name = "vcc3v3_4g";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <50000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_wwan_pwr: regulator-vcc3v3-wwan {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio2 RK_PB1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wwan_power_en>;
+ regulator-name = "vcc3v3_wwan_pwr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc3v3_4g>;
+ };
+};
+
+&gpio0 {
+ wwan-disable2-n-hog {
+ gpios = <RK_PB2 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "M.2 B-key W_DISABLE2#";
+ gpio-hog;
+ };
+};
+
+&gpio2 {
+ wwan-reset-n-hog {
+ gpios = <RK_PB3 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "M.2 B-key RESET#";
+ gpio-hog;
+ };
+
+ wwan-wake-n-hog {
+ gpios = <RK_PB2 GPIO_ACTIVE_LOW>;
+ input;
+ line-name = "M.2 B-key WoWWAN#";
+ gpio-hog;
+ };
+};
+
+&pcie30phy {
+ data-lanes = <1 1 2 2>;
+};
+
+&pcie3x2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie3x2_rst>;
+ reset-gpios = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie30>;
+ status = "okay";
+};
+
+&pcie3x4 {
+ num-lanes = <2>;
+};
+
+&pinctrl {
+ wwan {
+ wwan_power_en: wwan-pwr-en {
+ rockchip,pins = <2 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie3 {
+ pcie3x2_rst: pcie3x2-rst {
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ vcc5v0_host_en: vcc5v0-host-en {
+ rockchip,pins = <1 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&vcc5v0_host {
+ enable-active-high;
+ gpio = <&gpio1 RK_PA1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
index d22068475c5d..9407a7c9910a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
@@ -2,532 +2,11 @@
/dts-v1/;
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/leds/common.h>
-#include <dt-bindings/soc/rockchip,vop2.h>
-#include "rk3588.dtsi"
+#include "rk3588-rock-5b.dtsi"
/ {
model = "Radxa ROCK 5B";
compatible = "radxa,rock-5b", "rockchip,rk3588";
-
- aliases {
- mmc0 = &sdhci;
- mmc1 = &sdmmc;
- mmc2 = &sdio;
- };
-
- chosen {
- stdout-path = "serial2:1500000n8";
- };
-
- analog-sound {
- compatible = "audio-graph-card";
- label = "rk3588-es8316";
-
- widgets = "Microphone", "Mic Jack",
- "Headphone", "Headphones";
-
- routing = "MIC2", "Mic Jack",
- "Headphones", "HPOL",
- "Headphones", "HPOR";
-
- dais = <&i2s0_8ch_p0>;
- hp-det-gpios = <&gpio1 RK_PD5 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&hp_detect>;
- };
-
- hdmi0-con {
- compatible = "hdmi-connector";
- type = "a";
-
- port {
- hdmi0_con_in: endpoint {
- remote-endpoint = <&hdmi0_out_con>;
- };
- };
- };
-
- hdmi1-con {
- compatible = "hdmi-connector";
- type = "a";
-
- port {
- hdmi1_con_in: endpoint {
- remote-endpoint = <&hdmi1_out_con>;
- };
- };
- };
-
- leds {
- compatible = "gpio-leds";
- pinctrl-names = "default";
- pinctrl-0 = <&led_rgb_b>;
-
- led_rgb_b {
- function = LED_FUNCTION_STATUS;
- color = <LED_COLOR_ID_BLUE>;
- gpios = <&gpio0 RK_PB7 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "heartbeat";
- };
- };
-
- fan: pwm-fan {
- compatible = "pwm-fan";
- cooling-levels = <0 120 150 180 210 240 255>;
- fan-supply = <&vcc5v0_sys>;
- pwms = <&pwm1 0 50000 0>;
- #cooling-cells = <2>;
- };
-
- rfkill {
- compatible = "rfkill-gpio";
- label = "rfkill-m2-wlan";
- radio-type = "wlan";
- shutdown-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
- };
-
- rfkill-bt {
- compatible = "rfkill-gpio";
- label = "rfkill-m2-bt";
- radio-type = "bluetooth";
- shutdown-gpios = <&gpio3 RK_PD5 GPIO_ACTIVE_HIGH>;
- };
-
- vcc3v3_pcie2x1l0: regulator-vcc3v3-pcie2x1l0 {
- compatible = "regulator-fixed";
- enable-active-high;
- gpios = <&gpio1 RK_PD2 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&pcie2_0_vcc3v3_en>;
- regulator-name = "vcc3v3_pcie2x1l0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- startup-delay-us = <50000>;
- vin-supply = <&vcc5v0_sys>;
- };
-
- vcc3v3_pcie2x1l2: regulator-vcc3v3-pcie2x1l2 {
- compatible = "regulator-fixed";
- regulator-name = "vcc3v3_pcie2x1l2";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- startup-delay-us = <5000>;
- vin-supply = <&vcc_3v3_s3>;
- };
-
- vcc3v3_pcie30: regulator-vcc3v3-pcie30 {
- compatible = "regulator-fixed";
- enable-active-high;
- gpios = <&gpio1 RK_PA4 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&pcie3_vcc3v3_en>;
- regulator-name = "vcc3v3_pcie30";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- startup-delay-us = <5000>;
- vin-supply = <&vcc5v0_sys>;
- };
-
- vcc5v0_host: regulator-vcc5v0-host {
- compatible = "regulator-fixed";
- regulator-name = "vcc5v0_host";
- regulator-boot-on;
- regulator-always-on;
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- enable-active-high;
- gpio = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&vcc5v0_host_en>;
- vin-supply = <&vcc5v0_sys>;
- };
-
- vcc5v0_sys: regulator-vcc5v0-sys {
- compatible = "regulator-fixed";
- regulator-name = "vcc5v0_sys";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- };
-
- vcc_1v1_nldo_s3: regulator-vcc-1v1-nldo-s3 {
- compatible = "regulator-fixed";
- regulator-name = "vcc_1v1_nldo_s3";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1100000>;
- regulator-max-microvolt = <1100000>;
- vin-supply = <&vcc5v0_sys>;
- };
-};
-
-&combphy0_ps {
- status = "okay";
-};
-
-&combphy1_ps {
- status = "okay";
-};
-
-&combphy2_psu {
- status = "okay";
-};
-
-&cpu_b0 {
- cpu-supply = <&vdd_cpu_big0_s0>;
-};
-
-&cpu_b1 {
- cpu-supply = <&vdd_cpu_big0_s0>;
-};
-
-&cpu_b2 {
- cpu-supply = <&vdd_cpu_big1_s0>;
-};
-
-&cpu_b3 {
- cpu-supply = <&vdd_cpu_big1_s0>;
-};
-
-&cpu_l0 {
- cpu-supply = <&vdd_cpu_lit_s0>;
-};
-
-&cpu_l1 {
- cpu-supply = <&vdd_cpu_lit_s0>;
-};
-
-&cpu_l2 {
- cpu-supply = <&vdd_cpu_lit_s0>;
-};
-
-&cpu_l3 {
- cpu-supply = <&vdd_cpu_lit_s0>;
-};
-
-&gpu {
- mali-supply = <&vdd_gpu_s0>;
- status = "okay";
-};
-
-&hdmi0 {
- status = "okay";
-};
-
-&hdmi0_in {
- hdmi0_in_vp0: endpoint {
- remote-endpoint = <&vp0_out_hdmi0>;
- };
-};
-
-&hdmi0_out {
- hdmi0_out_con: endpoint {
- remote-endpoint = <&hdmi0_con_in>;
- };
-};
-
-&hdmi0_sound {
- status = "okay";
-};
-
-&hdmi1 {
- pinctrl-0 = <&hdmim0_tx1_cec &hdmim0_tx1_hpd
- &hdmim1_tx1_scl &hdmim1_tx1_sda>;
- status = "okay";
-};
-
-&hdmi1_in {
- hdmi1_in_vp1: endpoint {
- remote-endpoint = <&vp1_out_hdmi1>;
- };
-};
-
-&hdmi1_out {
- hdmi1_out_con: endpoint {
- remote-endpoint = <&hdmi1_con_in>;
- };
-};
-
-&hdmi1_sound {
- status = "okay";
-};
-
-&hdmi_receiver_cma {
- status = "okay";
-};
-
-&hdmi_receiver {
- hpd-gpios = <&gpio1 RK_PC6 GPIO_ACTIVE_LOW>;
- pinctrl-0 = <&hdmim1_rx_cec &hdmim1_rx_hpdin &hdmim1_rx_scl &hdmim1_rx_sda &hdmirx_hpd>;
- pinctrl-names = "default";
- status = "okay";
-};
-
-&hdptxphy0 {
- status = "okay";
-};
-
-&hdptxphy1 {
- status = "okay";
-};
-
-&i2c0 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0m2_xfer>;
- status = "okay";
-
- vdd_cpu_big0_s0: regulator@42 {
- compatible = "rockchip,rk8602";
- reg = <0x42>;
- fcs,suspend-voltage-selector = <1>;
- regulator-name = "vdd_cpu_big0_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <1050000>;
- regulator-ramp-delay = <2300>;
- vin-supply = <&vcc5v0_sys>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_cpu_big1_s0: regulator@43 {
- compatible = "rockchip,rk8603", "rockchip,rk8602";
- reg = <0x43>;
- fcs,suspend-voltage-selector = <1>;
- regulator-name = "vdd_cpu_big1_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <1050000>;
- regulator-ramp-delay = <2300>;
- vin-supply = <&vcc5v0_sys>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-};
-
-&i2c6 {
- status = "okay";
-
- hym8563: rtc@51 {
- compatible = "haoyu,hym8563";
- reg = <0x51>;
- #clock-cells = <0>;
- clock-output-names = "hym8563";
- pinctrl-names = "default";
- pinctrl-0 = <&hym8563_int>;
- interrupt-parent = <&gpio0>;
- interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
- wakeup-source;
- };
-};
-
-&i2c7 {
- status = "okay";
-
- es8316: audio-codec@11 {
- compatible = "everest,es8316";
- reg = <0x11>;
- clocks = <&cru I2S0_8CH_MCLKOUT>;
- clock-names = "mclk";
- assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
- assigned-clock-rates = <12288000>;
- #sound-dai-cells = <0>;
-
- port {
- es8316_p0_0: endpoint {
- remote-endpoint = <&i2s0_8ch_p0_0>;
- };
- };
- };
-};
-
-&i2s0_8ch {
- pinctrl-names = "default";
- pinctrl-0 = <&i2s0_lrck
- &i2s0_mclk
- &i2s0_sclk
- &i2s0_sdi0
- &i2s0_sdo0>;
- status = "okay";
-
- i2s0_8ch_p0: port {
- i2s0_8ch_p0_0: endpoint {
- dai-format = "i2s";
- mclk-fs = <256>;
- remote-endpoint = <&es8316_p0_0>;
- };
- };
-};
-
-&i2s5_8ch {
- status = "okay";
-};
-
-&i2s6_8ch {
- status = "okay";
-};
-
-&package_thermal {
- polling-delay = <1000>;
-
- trips {
- package_fan0: package-fan0 {
- temperature = <55000>;
- hysteresis = <2000>;
- type = "active";
- };
-
- package_fan1: package-fan1 {
- temperature = <65000>;
- hysteresis = <2000>;
- type = "active";
- };
- };
-
- cooling-maps {
- map0 {
- trip = <&package_fan0>;
- cooling-device = <&fan THERMAL_NO_LIMIT 1>;
- };
-
- map1 {
- trip = <&package_fan1>;
- cooling-device = <&fan 2 THERMAL_NO_LIMIT>;
- };
- };
-};
-
-&pcie2x1l0 {
- pinctrl-names = "default";
- pinctrl-0 = <&pcie2_0_rst>;
- reset-gpios = <&gpio4 RK_PA5 GPIO_ACTIVE_HIGH>;
- vpcie3v3-supply = <&vcc3v3_pcie2x1l0>;
- status = "okay";
-};
-
-&pcie2x1l2 {
- pinctrl-names = "default";
- pinctrl-0 = <&pcie2_2_rst>;
- reset-gpios = <&gpio3 RK_PB0 GPIO_ACTIVE_HIGH>;
- vpcie3v3-supply = <&vcc3v3_pcie2x1l2>;
- status = "okay";
-};
-
-&pcie30phy {
- status = "okay";
-};
-
-&pcie3x4 {
- pinctrl-names = "default";
- pinctrl-0 = <&pcie3_rst>;
- reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
- vpcie3v3-supply = <&vcc3v3_pcie30>;
- status = "okay";
-};
-
-&pd_gpu {
- domain-supply = <&vdd_gpu_s0>;
-};
-
-&pinctrl {
- hdmirx {
- hdmirx_hpd: hdmirx-5v-detection {
- rockchip,pins = <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
-
- hym8563 {
- hym8563_int: hym8563-int {
- rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
-
- leds {
- led_rgb_b: led-rgb-b {
- rockchip,pins = <0 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
-
- sound {
- hp_detect: hp-detect {
- rockchip,pins = <1 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
-
- pcie2 {
- pcie2_0_rst: pcie2-0-rst {
- rockchip,pins = <4 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-
- pcie2_0_vcc3v3_en: pcie2-0-vcc-en {
- rockchip,pins = <1 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-
- pcie2_2_rst: pcie2-2-rst {
- rockchip,pins = <3 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
-
- pcie3 {
- pcie3_rst: pcie3-rst {
- rockchip,pins = <4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-
- pcie3_vcc3v3_en: pcie3-vcc3v3-en {
- rockchip,pins = <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
-
- usb {
- vcc5v0_host_en: vcc5v0-host-en {
- rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
-};
-
-&pwm1 {
- status = "okay";
-};
-
-&saradc {
- vref-supply = <&avcc_1v8_s0>;
- status = "okay";
-};
-
-&sdhci {
- bus-width = <8>;
- no-sdio;
- no-sd;
- non-removable;
- mmc-hs400-1_8v;
- mmc-hs400-enhanced-strobe;
- status = "okay";
-};
-
-&sdmmc {
- max-frequency = <200000000>;
- no-sdio;
- no-mmc;
- bus-width = <4>;
- cap-mmc-highspeed;
- cap-sd-highspeed;
- cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
- disable-wp;
- sd-uhs-sdr104;
- vmmc-supply = <&vcc_3v3_s3>;
- vqmmc-supply = <&vccio_sd_s0>;
- status = "okay";
};
&sdio {
@@ -551,434 +30,23 @@
status = "okay";
};
-&sfc {
- pinctrl-names = "default";
- pinctrl-0 = <&fspim2_pins>;
- status = "okay";
-
- flash@0 {
- compatible = "jedec,spi-nor";
- reg = <0>;
- spi-max-frequency = <104000000>;
- spi-rx-bus-width = <4>;
- spi-tx-bus-width = <1>;
- };
-};
-
&uart6 {
pinctrl-names = "default";
pinctrl-0 = <&uart6m1_xfer &uart6m1_ctsn &uart6m1_rtsn>;
status = "okay";
};
-&spi2 {
- status = "okay";
- assigned-clocks = <&cru CLK_SPI2>;
- assigned-clock-rates = <200000000>;
- pinctrl-names = "default";
- pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
- num-cs = <1>;
-
- pmic@0 {
- compatible = "rockchip,rk806";
- spi-max-frequency = <1000000>;
- reg = <0x0>;
-
- interrupt-parent = <&gpio0>;
- interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
- <&rk806_dvs2_null>, <&rk806_dvs3_null>;
-
- system-power-controller;
-
- vcc1-supply = <&vcc5v0_sys>;
- vcc2-supply = <&vcc5v0_sys>;
- vcc3-supply = <&vcc5v0_sys>;
- vcc4-supply = <&vcc5v0_sys>;
- vcc5-supply = <&vcc5v0_sys>;
- vcc6-supply = <&vcc5v0_sys>;
- vcc7-supply = <&vcc5v0_sys>;
- vcc8-supply = <&vcc5v0_sys>;
- vcc9-supply = <&vcc5v0_sys>;
- vcc10-supply = <&vcc5v0_sys>;
- vcc11-supply = <&vcc_2v0_pldo_s3>;
- vcc12-supply = <&vcc5v0_sys>;
- vcc13-supply = <&vcc_1v1_nldo_s3>;
- vcc14-supply = <&vcc_1v1_nldo_s3>;
- vcca-supply = <&vcc5v0_sys>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl1";
- function = "pin_fun0";
- };
-
- rk806_dvs2_null: dvs2-null-pins {
- pins = "gpio_pwrctrl2";
- function = "pin_fun0";
- };
-
- rk806_dvs3_null: dvs3-null-pins {
- pins = "gpio_pwrctrl3";
- function = "pin_fun0";
- };
-
- regulators {
- vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 {
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <950000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vdd_gpu_s0";
- regulator-enable-ramp-delay = <400>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <950000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vdd_cpu_lit_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_log_s0: dcdc-reg3 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <675000>;
- regulator-max-microvolt = <750000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vdd_log_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <750000>;
- };
- };
-
- vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <950000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vdd_vdenc_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_ddr_s0: dcdc-reg5 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <675000>;
- regulator-max-microvolt = <900000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vdd_ddr_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <850000>;
- };
- };
-
- vdd2_ddr_s3: dcdc-reg6 {
- regulator-always-on;
- regulator-boot-on;
- regulator-name = "vdd2_ddr_s3";
-
- regulator-state-mem {
- regulator-on-in-suspend;
- };
- };
-
- vcc_2v0_pldo_s3: dcdc-reg7 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <2000000>;
- regulator-max-microvolt = <2000000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vdd_2v0_pldo_s3";
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <2000000>;
- };
- };
-
- vcc_3v3_s3: dcdc-reg8 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc_3v3_s3";
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <3300000>;
- };
- };
-
- vddq_ddr_s0: dcdc-reg9 {
- regulator-always-on;
- regulator-boot-on;
- regulator-name = "vddq_ddr_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vcc_1v8_s3: dcdc-reg10 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-name = "vcc_1v8_s3";
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <1800000>;
- };
- };
-
- avcc_1v8_s0: pldo-reg1 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-name = "avcc_1v8_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vcc_1v8_s0: pldo-reg2 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-name = "vcc_1v8_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <1800000>;
- };
- };
-
- avdd_1v2_s0: pldo-reg3 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-name = "avdd_1v2_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vcc_3v3_s0: pldo-reg4 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vcc_3v3_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vccio_sd_s0: pldo-reg5 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vccio_sd_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- pldo6_s3: pldo-reg6 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-name = "pldo6_s3";
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <1800000>;
- };
- };
-
- vdd_0v75_s3: nldo-reg1 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <750000>;
- regulator-max-microvolt = <750000>;
- regulator-name = "vdd_0v75_s3";
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <750000>;
- };
- };
-
- vdd_ddr_pll_s0: nldo-reg2 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <850000>;
- regulator-name = "vdd_ddr_pll_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <850000>;
- };
- };
-
- avdd_0v75_s0: nldo-reg3 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <750000>;
- regulator-max-microvolt = <750000>;
- regulator-name = "avdd_0v75_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_0v85_s0: nldo-reg4 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <850000>;
- regulator-name = "vdd_0v85_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_0v75_s0: nldo-reg5 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <750000>;
- regulator-max-microvolt = <750000>;
- regulator-name = "vdd_0v75_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
+&pinctrl {
+ usb {
+ vcc5v0_host_en: vcc5v0-host-en {
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
-&tsadc {
- status = "okay";
-};
-
-&uart2 {
- pinctrl-0 = <&uart2m0_xfer>;
- status = "okay";
-};
-
-&u2phy1 {
- status = "okay";
-};
-
-&u2phy1_otg {
- status = "okay";
-};
-
-&u2phy2 {
- status = "okay";
-};
-
-&u2phy2_host {
- /* connected to USB hub, which is powered by vcc5v0_sys */
- phy-supply = <&vcc5v0_sys>;
- status = "okay";
-};
-
-&u2phy3 {
- status = "okay";
-};
-
-&u2phy3_host {
- phy-supply = <&vcc5v0_host>;
- status = "okay";
-};
-
-&usbdp_phy1 {
- status = "okay";
-};
-
-&usb_host0_ehci {
- status = "okay";
-};
-
-&usb_host0_ohci {
- status = "okay";
-};
-
-&usb_host1_ehci {
- status = "okay";
-};
-
-&usb_host1_ohci {
- status = "okay";
-};
-
-&usb_host1_xhci {
- dr_mode = "host";
- status = "okay";
-};
-
-&usb_host2_xhci {
- status = "okay";
-};
-
-&vop {
- status = "okay";
-};
-
-&vop_mmu {
- status = "okay";
-};
-
-&vp0 {
- vp0_out_hdmi0: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
- reg = <ROCKCHIP_VOP2_EP_HDMI0>;
- remote-endpoint = <&hdmi0_in_vp0>;
- };
-};
-
-&vp1 {
- vp1_out_hdmi1: endpoint@ROCKCHIP_VOP2_EP_HDMI1 {
- reg = <ROCKCHIP_VOP2_EP_HDMI1>;
- remote-endpoint = <&hdmi1_in_vp1>;
- };
+&vcc5v0_host {
+ enable-active-high;
+ gpio = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dtsi
new file mode 100644
index 000000000000..6052787d2560
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dtsi
@@ -0,0 +1,945 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "rk3588.dtsi"
+
+/ {
+ aliases {
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc;
+ mmc2 = &sdio;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ analog-sound {
+ compatible = "audio-graph-card";
+ label = "rk3588-es8316";
+
+ widgets = "Microphone", "Mic Jack",
+ "Headphone", "Headphones";
+
+ routing = "MIC2", "Mic Jack",
+ "Headphones", "HPOL",
+ "Headphones", "HPOR";
+
+ dais = <&i2s0_8ch_p0>;
+ hp-det-gpios = <&gpio1 RK_PD5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hp_detect>;
+ };
+
+ hdmi0-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi0_con_in: endpoint {
+ remote-endpoint = <&hdmi0_out_con>;
+ };
+ };
+ };
+
+ hdmi1-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi1_con_in: endpoint {
+ remote-endpoint = <&hdmi1_out_con>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_rgb_b>;
+
+ led_rgb_b {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio0 RK_PB7 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ cooling-levels = <0 120 150 180 210 240 255>;
+ fan-supply = <&vcc5v0_sys>;
+ pwms = <&pwm1 0 50000 0>;
+ #cooling-cells = <2>;
+ };
+
+ rfkill {
+ compatible = "rfkill-gpio";
+ label = "rfkill-m2-wlan";
+ radio-type = "wlan";
+ shutdown-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
+ };
+
+ rfkill-bt {
+ compatible = "rfkill-gpio";
+ label = "rfkill-m2-bt";
+ radio-type = "bluetooth";
+ shutdown-gpios = <&gpio3 RK_PD5 GPIO_ACTIVE_HIGH>;
+ };
+
+ vcc3v3_pcie2x1l0: regulator-vcc3v3-pcie2x1l0 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio1 RK_PD2 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_0_vcc3v3_en>;
+ regulator-name = "vcc3v3_pcie2x1l0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <50000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_pcie2x1l2: regulator-vcc3v3-pcie2x1l2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_pcie2x1l2";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <5000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc3v3_pcie30: regulator-vcc3v3-pcie30 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio1 RK_PA4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie3_vcc3v3_en>;
+ regulator-name = "vcc3v3_pcie30";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <5000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_host: regulator-vcc5v0-host {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_host";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_sys: regulator-vcc5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc_1v1_nldo_s3: regulator-vcc-1v1-nldo-s3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v1_nldo_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+};
+
+&combphy0_ps {
+ status = "okay";
+};
+
+&combphy1_ps {
+ status = "okay";
+};
+
+&combphy2_psu {
+ status = "okay";
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
+&hdmi0 {
+ status = "okay";
+};
+
+&hdmi0_in {
+ hdmi0_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi0>;
+ };
+};
+
+&hdmi0_out {
+ hdmi0_out_con: endpoint {
+ remote-endpoint = <&hdmi0_con_in>;
+ };
+};
+
+&hdmi0_sound {
+ status = "okay";
+};
+
+&hdmi1 {
+ pinctrl-0 = <&hdmim0_tx1_cec &hdmim0_tx1_hpd
+ &hdmim1_tx1_scl &hdmim1_tx1_sda>;
+ status = "okay";
+};
+
+&hdmi1_in {
+ hdmi1_in_vp1: endpoint {
+ remote-endpoint = <&vp1_out_hdmi1>;
+ };
+};
+
+&hdmi1_out {
+ hdmi1_out_con: endpoint {
+ remote-endpoint = <&hdmi1_con_in>;
+ };
+};
+
+&hdmi1_sound {
+ status = "okay";
+};
+
+&hdmi_receiver_cma {
+ status = "okay";
+};
+
+&hdmi_receiver {
+ hpd-gpios = <&gpio1 RK_PC6 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&hdmim1_rx_cec &hdmim1_rx_hpdin &hdmim1_rx_scl &hdmim1_rx_sda &hdmirx_hpd>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&hdptxphy0 {
+ status = "okay";
+};
+
+&hdptxphy1 {
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0m2_xfer>;
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c6 {
+ status = "okay";
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-output-names = "hym8563";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hym8563_int>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
+ wakeup-source;
+ };
+};
+
+&i2c7 {
+ status = "okay";
+
+ es8316: audio-codec@11 {
+ compatible = "everest,es8316";
+ reg = <0x11>;
+ clocks = <&cru I2S0_8CH_MCLKOUT>;
+ clock-names = "mclk";
+ assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
+ assigned-clock-rates = <12288000>;
+ #sound-dai-cells = <0>;
+
+ port {
+ es8316_p0_0: endpoint {
+ remote-endpoint = <&i2s0_8ch_p0_0>;
+ };
+ };
+ };
+};
+
+&i2s0_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_lrck
+ &i2s0_mclk
+ &i2s0_sclk
+ &i2s0_sdi0
+ &i2s0_sdo0>;
+ status = "okay";
+
+ i2s0_8ch_p0: port {
+ i2s0_8ch_p0_0: endpoint {
+ dai-format = "i2s";
+ mclk-fs = <256>;
+ remote-endpoint = <&es8316_p0_0>;
+ };
+ };
+};
+
+&i2s5_8ch {
+ status = "okay";
+};
+
+&i2s6_8ch {
+ status = "okay";
+};
+
+&package_thermal {
+ polling-delay = <1000>;
+
+ trips {
+ package_fan0: package-fan0 {
+ temperature = <55000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ package_fan1: package-fan1 {
+ temperature = <65000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&package_fan0>;
+ cooling-device = <&fan THERMAL_NO_LIMIT 1>;
+ };
+
+ map1 {
+ trip = <&package_fan1>;
+ cooling-device = <&fan 2 THERMAL_NO_LIMIT>;
+ };
+ };
+};
+
+&pcie2x1l0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_0_rst>;
+ reset-gpios = <&gpio4 RK_PA5 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie2x1l0>;
+ status = "okay";
+};
+
+&pcie2x1l2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_2_rst>;
+ reset-gpios = <&gpio3 RK_PB0 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie2x1l2>;
+ status = "okay";
+};
+
+&pcie30phy {
+ status = "okay";
+};
+
+&pcie3x4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie3_rst>;
+ reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie30>;
+ status = "okay";
+};
+
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
+&pinctrl {
+ hdmirx {
+ hdmirx_hpd: hdmirx-5v-detection {
+ rockchip,pins = <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ hym8563 {
+ hym8563_int: hym8563-int {
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ led_rgb_b: led-rgb-b {
+ rockchip,pins = <0 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sound {
+ hp_detect: hp-detect {
+ rockchip,pins = <1 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie2 {
+ pcie2_0_rst: pcie2-0-rst {
+ rockchip,pins = <4 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie2_0_vcc3v3_en: pcie2-0-vcc-en {
+ rockchip,pins = <1 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie2_2_rst: pcie2-2-rst {
+ rockchip,pins = <3 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie3 {
+ pcie3_rst: pcie3-rst {
+ rockchip,pins = <4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie3_vcc3v3_en: pcie3-vcc3v3-en {
+ rockchip,pins = <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&avcc_1v8_s0>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ no-sdio;
+ no-sd;
+ non-removable;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ status = "okay";
+};
+
+&sdmmc {
+ max-frequency = <200000000>;
+ no-sdio;
+ no-mmc;
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_3v3_s3>;
+ vqmmc-supply = <&vccio_sd_s0>;
+ status = "okay";
+};
+
+&sfc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&fspim2_pins>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <104000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ vcc-supply = <&vcc_3v3_s3>;
+ };
+};
+
+&spi2 {
+ status = "okay";
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
+ num-cs = <1>;
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ spi-max-frequency = <1000000>;
+ reg = <0x0>;
+
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+
+ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc5v0_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc5v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc5v0_sys>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 {
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_lit_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_log_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_vdenc_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_2v0_pldo_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avcc_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "avcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avdd_1v2_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "avdd_1v2_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_3v3_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vccio_sd_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ pldo6_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "pldo6_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_ddr_pll_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ avdd_0v75_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "avdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v85_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_0v85_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v75_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ status = "okay";
+};
+
+&u2phy2 {
+ status = "okay";
+};
+
+&u2phy2_host {
+ /* connected to USB hub, which is powered by vcc5v0_sys */
+ phy-supply = <&vcc5v0_sys>;
+ status = "okay";
+};
+
+&u2phy3 {
+ status = "okay";
+};
+
+&u2phy3_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&usbdp_phy1 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_host2_xhci {
+ status = "okay";
+};
+
+&vop {
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi0: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi0_in_vp0>;
+ };
+};
+
+&vp1 {
+ vp1_out_hdmi1: endpoint@ROCKCHIP_VOP2_EP_HDMI1 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI1>;
+ remote-endpoint = <&hdmi1_in_vp1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou-video-demo.dtso b/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou-video-demo.dtso
new file mode 100644
index 000000000000..b8636fcb4f39
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou-video-demo.dtso
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2024 Cherry Embedded Solutions GmbH
+ *
+ * DEVKIT ADDON CAM-TS-A01
+ * https://embedded.cherry.de/product/development-kit/
+ *
+ * DT-overlay for the camera / DSI demo appliance for Haikou boards.
+ * In the flavour for use with a Tiger system-on-module.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+
+&{/} {
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ power-supply = <&dc_12v>;
+ pwms = <&pwm0 0 25000 0>;
+ };
+
+ vcc1v8_video: regulator-vcc1v8-video {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc1v8-video";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc3v3_baseboard>;
+ };
+
+ vcc2v8_video: regulator-vcc2v8-video {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc2v8-video";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vcc3v3_baseboard>;
+ };
+
+ video-adapter-leds {
+ compatible = "gpio-leds";
+
+ video-adapter-led {
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&pca9670 7 GPIO_ACTIVE_HIGH>;
+ label = "video-adapter-led";
+ linux,default-trigger = "none";
+ };
+ };
+};
+
+&dsi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ panel@0 {
+ compatible = "leadtek,ltk050h3148w";
+ reg = <0>;
+ backlight = <&backlight>;
+ iovcc-supply = <&vcc1v8_video>;
+ reset-gpios = <&pca9670 0 GPIO_ACTIVE_LOW>;
+ vci-supply = <&vcc2v8_video>;
+
+ port {
+ mipi_panel_in: endpoint {
+ remote-endpoint = <&dsi0_out_panel>;
+ };
+ };
+ };
+};
+
+&dsi0_in {
+ dsi0_in_vp3: endpoint {
+ remote-endpoint = <&vp3_out_dsi0>;
+ };
+};
+
+&dsi0_out {
+ dsi0_out_panel: endpoint {
+ remote-endpoint = <&mipi_panel_in>;
+ };
+};
+
+&i2c6 {
+ /* OV5675, GT911, DW9714 are limited to 400KHz */
+ clock-frequency = <400000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen@14 {
+ compatible = "goodix,gt911";
+ reg = <0x14>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <RK_PC3 IRQ_TYPE_LEVEL_LOW>;
+ irq-gpios = <&gpio3 RK_PC3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touch_int>;
+ reset-gpios = <&pca9670 1 GPIO_ACTIVE_HIGH>;
+ AVDD28-supply = <&vcc2v8_video>;
+ VDDIO-supply = <&vcc3v3_baseboard>;
+ };
+
+ pca9670: gpio@27 {
+ compatible = "nxp,pca9670";
+ reg = <0x27>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pca9670_resetn>;
+ reset-gpios = <&gpio3 RK_PC1 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&mipidcphy0 {
+ status = "okay";
+};
+
+&pinctrl {
+ pca9670 {
+ pca9670_resetn: pca9670-resetn {
+ rockchip,pins = <3 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ touch {
+ touch_int: touch-int {
+ rockchip,pins = <3 RK_PC3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pwm0 {
+ status = "okay";
+};
+
+&vp3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vp3_out_dsi0: endpoint@ROCKCHIP_VOP2_EP_MIPI0 {
+ reg = <ROCKCHIP_VOP2_EP_MIPI0>;
+ remote-endpoint = <&dsi0_in_vp3>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts
index a3d8ff647839..caa43d1abf17 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts
@@ -189,6 +189,10 @@
};
};
+&hdmi0_sound {
+ status = "okay";
+};
+
&hdptxphy0 {
status = "okay";
};
@@ -228,6 +232,10 @@
status = "okay";
};
+&i2s5_8ch {
+ status = "okay";
+};
+
&pcie30phy {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
index 711ac4f2c7cb..60ad272982ad 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
@@ -214,6 +214,8 @@
};
&package_thermal {
+ polling-delay = <1000>;
+
trips {
package_active1: trip-active1 {
temperature = <45000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588j.dtsi b/arch/arm64/boot/dts/rockchip/rk3588j.dtsi
index bce72bac4503..3045cb3bd68c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588j.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588j.dtsi
@@ -11,20 +11,15 @@
compatible = "operating-points-v2";
opp-shared;
- opp-1416000000 {
- opp-hz = /bits/ 64 <1416000000>;
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <750000 750000 950000>;
clock-latency-ns = <40000>;
opp-suspend;
};
- opp-1608000000 {
- opp-hz = /bits/ 64 <1608000000>;
- opp-microvolt = <887500 887500 950000>;
- clock-latency-ns = <40000>;
- };
- opp-1704000000 {
- opp-hz = /bits/ 64 <1704000000>;
- opp-microvolt = <937500 937500 950000>;
+ opp-1296000000 {
+ opp-hz = /bits/ 64 <1296000000>;
+ opp-microvolt = <775000 775000 950000>;
clock-latency-ns = <40000>;
};
};
@@ -33,9 +28,14 @@
compatible = "operating-points-v2";
opp-shared;
+ opp-1200000000{
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <750000 750000 950000>;
+ clock-latency-ns = <40000>;
+ };
opp-1416000000 {
opp-hz = /bits/ 64 <1416000000>;
- opp-microvolt = <750000 750000 950000>;
+ opp-microvolt = <762500 762500 950000>;
clock-latency-ns = <40000>;
};
opp-1608000000 {
@@ -43,25 +43,20 @@
opp-microvolt = <787500 787500 950000>;
clock-latency-ns = <40000>;
};
- opp-1800000000 {
- opp-hz = /bits/ 64 <1800000000>;
- opp-microvolt = <875000 875000 950000>;
- clock-latency-ns = <40000>;
- };
- opp-2016000000 {
- opp-hz = /bits/ 64 <2016000000>;
- opp-microvolt = <950000 950000 950000>;
- clock-latency-ns = <40000>;
- };
};
cluster2_opp_table: opp-table-cluster2 {
compatible = "operating-points-v2";
opp-shared;
+ opp-1200000000{
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <750000 750000 950000>;
+ clock-latency-ns = <40000>;
+ };
opp-1416000000 {
opp-hz = /bits/ 64 <1416000000>;
- opp-microvolt = <750000 750000 950000>;
+ opp-microvolt = <762500 762500 950000>;
clock-latency-ns = <40000>;
};
opp-1608000000 {
@@ -69,16 +64,6 @@
opp-microvolt = <787500 787500 950000>;
clock-latency-ns = <40000>;
};
- opp-1800000000 {
- opp-hz = /bits/ 64 <1800000000>;
- opp-microvolt = <875000 875000 950000>;
- clock-latency-ns = <40000>;
- };
- opp-2016000000 {
- opp-hz = /bits/ 64 <2016000000>;
- opp-microvolt = <950000 950000 950000>;
- clock-latency-ns = <40000>;
- };
};
gpu_opp_table: opp-table {
@@ -104,10 +89,6 @@
opp-hz = /bits/ 64 <700000000>;
opp-microvolt = <750000 750000 850000>;
};
- opp-850000000 {
- opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <787500 787500 850000>;
- };
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3588s-evb1-v10.dts
index 9f4aca9c2e3f..0df3e80f2dd9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-evb1-v10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-evb1-v10.dts
@@ -9,6 +9,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
#include <dt-bindings/usb/pd.h>
#include "rk3588s.dtsi"
@@ -238,6 +239,42 @@
status = "okay";
};
+&edp0 {
+ force-hpd;
+ status = "okay";
+
+ aux-bus {
+ panel {
+ compatible = "edp-panel";
+ backlight = <&backlight>;
+ power-supply = <&vcc3v3_lcd_edp>;
+ no-hpd;
+
+ port {
+ panel_in_edp: endpoint {
+ remote-endpoint = <&edp_out_panel>;
+ };
+ };
+ };
+ };
+};
+
+&edp0_in {
+ edp0_in_vp2: endpoint {
+ remote-endpoint = <&vp2_out_edp0>;
+ };
+};
+
+&edp0_out {
+ edp_out_panel: endpoint {
+ remote-endpoint = <&panel_in_edp>;
+ };
+};
+
+&hdptxphy0 {
+ status = "okay";
+};
+
&i2c3 {
status = "okay";
@@ -403,6 +440,7 @@
};
&pwm12 {
+ pinctrl-0 = <&pwm12m1_pins>;
status = "okay";
};
@@ -1172,3 +1210,20 @@
};
};
};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vop {
+ assigned-clocks = <&cru DCLK_VOP2_SRC>;
+ assigned-clock-parents = <&cru PLL_V0PLL>;
+ status = "okay";
+};
+
+&vp2 {
+ vp2_out_edp0: endpoint@ROCKCHIP_VOP2_EP_EDP0 {
+ reg = <ROCKCHIP_VOP2_EP_EDP0>;
+ remote-endpoint = <&edp0_in_vp2>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
index 4189a88ecf40..4ec7bc4a9e96 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
@@ -278,6 +278,10 @@
};
};
+&hdmi0_sound {
+ status = "okay";
+};
+
&hdptxphy0 {
status = "okay";
};
@@ -449,6 +453,10 @@
};
};
+&i2s5_8ch {
+ status = "okay";
+};
+
&pcie2x1l2 {
pinctrl-0 = <&rtl8111_perstb>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts b/arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts
index 88a5e822ed17..2c22abaf40a8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts
@@ -6,6 +6,7 @@
#include <dt-bindings/input/input.h>
#include <dt-bindings/pinctrl/rockchip.h>
#include <dt-bindings/leds/common.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
#include "rk3588s.dtsi"
/ {
@@ -42,6 +43,17 @@
pinctrl-0 = <&ir_receiver_pin>;
};
+ hdmi0-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi0_con_in: endpoint {
+ remote-endpoint = <&hdmi0_out_con>;
+ };
+ };
+ };
+
leds {
compatible = "pwm-leds";
@@ -181,6 +193,34 @@
status = "okay";
};
+&hdmi0 {
+ status = "okay";
+};
+
+&hdmi0_in {
+ hdmi0_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi0>;
+ };
+};
+
+&hdmi0_out {
+ hdmi0_out_con: endpoint {
+ remote-endpoint = <&hdmi0_con_in>;
+ };
+};
+
+&hdmi0_sound {
+ status = "okay";
+};
+
+&hdptxphy0 {
+ status = "okay";
+};
+
+&hdmi0_sound {
+ status = "okay";
+};
+
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0m2_xfer>;
@@ -233,6 +273,10 @@
};
};
+&i2s5_8ch {
+ status = "okay";
+};
+
&pd_gpu {
domain-supply = <&vdd_gpu_s0>;
};
@@ -697,8 +741,24 @@
&uart9 {
pinctrl-names = "default";
- pinctrl-0 = <&uart9m2_xfer &uart9m2_ctsn>;
+ pinctrl-0 = <&uart9m2_xfer &uart9m2_ctsn &uart9m2_rtsn>;
status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ clocks = <&hym8563>;
+ clock-names = "lpo";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PD5 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wakeup";
+ device-wakeup-gpios = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio0 RK_PD4 GPIO_ACTIVE_HIGH>;
+ max-speed = <1500000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_wake_host_irq &bt_wake_pin &bt_reset_pin>;
+ vbat-supply = <&vcc_3v3_s3>;
+ vddio-supply = <&vcc_1v8_s3>;
+ };
};
&u2phy2 {
@@ -738,3 +798,18 @@
&usb_host2_xhci {
status = "okay";
};
+
+&vop {
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi0: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi0_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp211.dtsi b/arch/arm64/boot/dts/st/stm32mp211.dtsi
index 6dd1377f3e1d..bf888d60cd4f 100644
--- a/arch/arm64/boot/dts/st/stm32mp211.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp211.dtsi
@@ -116,11 +116,11 @@
};
intc: interrupt-controller@4ac10000 {
- compatible = "arm,cortex-a7-gic";
+ compatible = "arm,gic-400";
reg = <0x4ac10000 0x0 0x1000>,
- <0x4ac20000 0x0 0x2000>,
- <0x4ac40000 0x0 0x2000>,
- <0x4ac60000 0x0 0x2000>;
+ <0x4ac20000 0x0 0x20000>,
+ <0x4ac40000 0x0 0x20000>,
+ <0x4ac60000 0x0 0x20000>;
#interrupt-cells = <3>;
interrupt-controller;
};
diff --git a/arch/arm64/boot/dts/st/stm32mp231.dtsi b/arch/arm64/boot/dts/st/stm32mp231.dtsi
index 8820d219a33e..75697acd1345 100644
--- a/arch/arm64/boot/dts/st/stm32mp231.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp231.dtsi
@@ -1201,13 +1201,12 @@
};
intc: interrupt-controller@4ac10000 {
- compatible = "arm,cortex-a7-gic";
+ compatible = "arm,gic-400";
reg = <0x4ac10000 0x1000>,
- <0x4ac20000 0x2000>,
- <0x4ac40000 0x2000>,
- <0x4ac60000 0x2000>;
+ <0x4ac20000 0x20000>,
+ <0x4ac40000 0x20000>,
+ <0x4ac60000 0x20000>;
#interrupt-cells = <3>;
- #address-cells = <1>;
interrupt-controller;
};
};
diff --git a/arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi b/arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi
index 8fdd5f020425..aba90d555f4e 100644
--- a/arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi
@@ -82,6 +82,57 @@
};
};
+ ospi_port1_clk_pins_a: ospi-port1-clk-0 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 0, AF10)>; /* OSPI1_CLK */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ };
+
+ ospi_port1_clk_sleep_pins_a: ospi-port1-clk-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 0, ANALOG)>; /* OSPI1_CLK */
+ };
+ };
+
+ ospi_port1_cs0_pins_a: ospi-port1-cs0-0 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 3, AF10)>; /* OSPI_NCS0 */
+ bias-pull-up;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ ospi_port1_cs0_sleep_pins_a: ospi-port1-cs0-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 3, ANALOG)>; /* OSPI_NCS0 */
+ };
+ };
+
+ ospi_port1_io03_pins_a: ospi-port1-io03-0 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 4, AF10)>, /* OSPI_IO0 */
+ <STM32_PINMUX('D', 5, AF10)>, /* OSPI_IO1 */
+ <STM32_PINMUX('D', 6, AF10)>, /* OSPI_IO2 */
+ <STM32_PINMUX('D', 7, AF10)>; /* OSPI_IO3 */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ ospi_port1_io03_sleep_pins_a: ospi-port1-io03-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 4, ANALOG)>, /* OSPI_IO0 */
+ <STM32_PINMUX('D', 5, ANALOG)>, /* OSPI_IO1 */
+ <STM32_PINMUX('D', 6, ANALOG)>, /* OSPI_IO2 */
+ <STM32_PINMUX('D', 7, ANALOG)>; /* OSPI_IO3 */
+ };
+ };
+
sdmmc1_b4_pins_a: sdmmc1-b4-0 {
pins1 {
pinmux = <STM32_PINMUX('E', 4, AF10)>, /* SDMMC1_D0 */
diff --git a/arch/arm64/boot/dts/st/stm32mp251.dtsi b/arch/arm64/boot/dts/st/stm32mp251.dtsi
index f3c6cdfd7008..8d87865850a7 100644
--- a/arch/arm64/boot/dts/st/stm32mp251.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp251.dtsi
@@ -115,14 +115,13 @@
};
intc: interrupt-controller@4ac00000 {
- compatible = "arm,cortex-a7-gic";
+ compatible = "arm,gic-400";
#interrupt-cells = <3>;
- #address-cells = <1>;
interrupt-controller;
reg = <0x0 0x4ac10000 0x0 0x1000>,
- <0x0 0x4ac20000 0x0 0x2000>,
- <0x0 0x4ac40000 0x0 0x2000>,
- <0x0 0x4ac60000 0x0 0x2000>;
+ <0x0 0x4ac20000 0x0 0x20000>,
+ <0x0 0x4ac40000 0x0 0x20000>,
+ <0x0 0x4ac60000 0x0 0x20000>;
};
psci {
@@ -230,6 +229,60 @@
#dma-cells = <3>;
};
+ ommanager: ommanager@40500000 {
+ compatible = "st,stm32mp25-omm";
+ reg = <0x40500000 0x400>, <0x60000000 0x10000000>;
+ reg-names = "regs", "memory_map";
+ ranges = <0 0 0x40430000 0x400>,
+ <1 0 0x40440000 0x400>;
+ clocks = <&rcc CK_BUS_OSPIIOM>,
+ <&scmi_clk CK_SCMI_OSPI1>,
+ <&scmi_clk CK_SCMI_OSPI2>;
+ clock-names = "omm", "ospi1", "ospi2";
+ resets = <&rcc OSPIIOM_R>,
+ <&scmi_reset RST_SCMI_OSPI1>,
+ <&scmi_reset RST_SCMI_OSPI2>;
+ reset-names = "omm", "ospi1", "ospi2";
+ access-controllers = <&rifsc 111>;
+ power-domains = <&CLUSTER_PD>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ st,syscfg-amcr = <&syscfg 0x2c00 0x7>;
+ status = "disabled";
+
+ ospi1: spi@0 {
+ compatible = "st,stm32mp25-ospi";
+ reg = <0 0 0x400>;
+ interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&hpdma 2 0x62 0x3121>,
+ <&hpdma 2 0x42 0x3112>;
+ dma-names = "tx", "rx";
+ clocks = <&scmi_clk CK_SCMI_OSPI1>;
+ resets = <&scmi_reset RST_SCMI_OSPI1>,
+ <&scmi_reset RST_SCMI_OSPI1DLL>;
+ access-controllers = <&rifsc 74>;
+ power-domains = <&CLUSTER_PD>;
+ st,syscfg-dlyb = <&syscfg 0x1000>;
+ status = "disabled";
+ };
+
+ ospi2: spi@1 {
+ compatible = "st,stm32mp25-ospi";
+ reg = <1 0 0x400>;
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&hpdma 3 0x62 0x3121>,
+ <&hpdma 3 0x42 0x3112>;
+ dma-names = "tx", "rx";
+ clocks = <&scmi_clk CK_SCMI_OSPI2>;
+ resets = <&scmi_reset RST_SCMI_OSPI2>,
+ <&scmi_reset RST_SCMI_OSPI2DLL>;
+ access-controllers = <&rifsc 75>;
+ power-domains = <&CLUSTER_PD>;
+ st,syscfg-dlyb = <&syscfg 0x1400>;
+ status = "disabled";
+ };
+ };
+
rifsc: bus@42080000 {
compatible = "st,stm32mp25-rifsc", "simple-bus";
reg = <0x42080000 0x1000>;
@@ -238,6 +291,78 @@
#access-controller-cells = <1>;
ranges;
+ lptimer1: timer@40090000 {
+ compatible = "st,stm32mp25-lptimer", "st,stm32-lptimer";
+ reg = <0x40090000 0x400>;
+ interrupts-extended = <&exti1 47 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_LPTIM1>;
+ clock-names = "mux";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&rifsc 17>;
+ power-domains = <&RET_PD>;
+ wakeup-source;
+ status = "disabled";
+
+ counter {
+ compatible = "st,stm32mp25-lptimer-counter", "st,stm32-lptimer-counter";
+ status = "disabled";
+ };
+
+ pwm {
+ compatible = "st,stm32mp25-pwm-lp", "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer {
+ compatible = "st,stm32mp25-lptimer-timer", "st,stm32-lptimer-timer";
+ status = "disabled";
+ };
+
+ trigger@0 {
+ compatible = "st,stm32mp25-lptimer-trigger", "st,stm32-lptimer-trigger";
+ reg = <0>;
+ status = "disabled";
+ };
+ };
+
+ lptimer2: timer@400a0000 {
+ compatible = "st,stm32mp25-lptimer", "st,stm32-lptimer";
+ reg = <0x400a0000 0x400>;
+ interrupts-extended = <&exti1 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_LPTIM2>;
+ clock-names = "mux";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&rifsc 18>;
+ power-domains = <&RET_PD>;
+ wakeup-source;
+ status = "disabled";
+
+ counter {
+ compatible = "st,stm32mp25-lptimer-counter", "st,stm32-lptimer-counter";
+ status = "disabled";
+ };
+
+ pwm {
+ compatible = "st,stm32mp25-pwm-lp", "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer {
+ compatible = "st,stm32mp25-lptimer-timer", "st,stm32-lptimer-timer";
+ status = "disabled";
+ };
+
+ trigger@1 {
+ compatible = "st,stm32mp25-lptimer-trigger", "st,stm32-lptimer-trigger";
+ reg = <1>;
+ status = "disabled";
+ };
+ };
+
i2s2: audio-controller@400b0000 {
compatible = "st,stm32mp25-i2s";
reg = <0x400b0000 0x400>;
@@ -799,6 +924,111 @@
status = "disabled";
};
+ lptimer3: timer@46050000 {
+ compatible = "st,stm32mp25-lptimer", "st,stm32-lptimer";
+ reg = <0x46050000 0x400>;
+ interrupts-extended = <&exti2 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_LPTIM3>;
+ clock-names = "mux";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&rifsc 19>;
+ wakeup-source;
+ status = "disabled";
+
+ counter {
+ compatible = "st,stm32mp25-lptimer-counter", "st,stm32-lptimer-counter";
+ status = "disabled";
+ };
+
+ pwm {
+ compatible = "st,stm32mp25-pwm-lp", "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer {
+ compatible = "st,stm32mp25-lptimer-timer", "st,stm32-lptimer-timer";
+ status = "disabled";
+ };
+
+ trigger@2 {
+ compatible = "st,stm32mp25-lptimer-trigger", "st,stm32-lptimer-trigger";
+ reg = <2>;
+ status = "disabled";
+ };
+ };
+
+ lptimer4: timer@46060000 {
+ compatible = "st,stm32mp25-lptimer", "st,stm32-lptimer";
+ reg = <0x46060000 0x400>;
+ interrupts-extended = <&exti2 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_LPTIM4>;
+ clock-names = "mux";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&rifsc 20>;
+ wakeup-source;
+ status = "disabled";
+
+ counter {
+ compatible = "st,stm32mp25-lptimer-counter", "st,stm32-lptimer-counter";
+ status = "disabled";
+ };
+
+ pwm {
+ compatible = "st,stm32mp25-pwm-lp", "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer {
+ compatible = "st,stm32mp25-lptimer-timer", "st,stm32-lptimer-timer";
+ status = "disabled";
+ };
+
+ trigger@3 {
+ compatible = "st,stm32mp25-lptimer-trigger", "st,stm32-lptimer-trigger";
+ reg = <3>;
+ status = "disabled";
+ };
+ };
+
+ lptimer5: timer@46070000 {
+ compatible = "st,stm32mp25-lptimer", "st,stm32-lptimer";
+ reg = <0x46070000 0x400>;
+ interrupts-extended = <&exti2 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_LPTIM5>;
+ clock-names = "mux";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&rifsc 21>;
+ wakeup-source;
+ status = "disabled";
+
+ counter {
+ compatible = "st,stm32mp25-lptimer-counter", "st,stm32-lptimer-counter";
+ status = "disabled";
+ };
+
+ pwm {
+ compatible = "st,stm32mp25-pwm-lp", "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer {
+ compatible = "st,stm32mp25-lptimer-timer", "st,stm32-lptimer-timer";
+ status = "disabled";
+ };
+
+ trigger@4 {
+ compatible = "st,stm32mp25-lptimer-trigger", "st,stm32-lptimer-trigger";
+ reg = <4>;
+ status = "disabled";
+ };
+ };
+
csi: csi@48020000 {
compatible = "st,stm32mp25-csi";
reg = <0x48020000 0x2000>;
diff --git a/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts b/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts
index 1b88485a62a1..2f561ad40665 100644
--- a/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts
+++ b/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts
@@ -80,6 +80,11 @@
reg = <0x0 0x80000000 0x0 0x4000000>;
no-map;
};
+
+ mm_ospi1: mm-ospi@60000000 {
+ reg = <0x0 0x60000000 0x0 0x10000000>;
+ no-map;
+ };
};
};
@@ -190,6 +195,41 @@
status = "disabled";
};
+&ommanager {
+ memory-region = <&mm_ospi1>;
+ pinctrl-0 = <&ospi_port1_clk_pins_a
+ &ospi_port1_io03_pins_a
+ &ospi_port1_cs0_pins_a>;
+ pinctrl-1 = <&ospi_port1_clk_sleep_pins_a
+ &ospi_port1_io03_sleep_pins_a
+ &ospi_port1_cs0_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ status = "okay";
+
+ spi@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ memory-region = <&mm_ospi1>;
+ status = "okay";
+
+ flash0: flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ };
+ };
+};
+
+/* use LPTIMER with tick broadcast for suspend mode */
+&lptimer3 {
+ status = "okay";
+ timer {
+ status = "okay";
+ };
+};
+
&rtc {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/tesla/fsd-evb.dts b/arch/arm64/boot/dts/tesla/fsd-evb.dts
index 8d7794642900..9ff22e1c8723 100644
--- a/arch/arm64/boot/dts/tesla/fsd-evb.dts
+++ b/arch/arm64/boot/dts/tesla/fsd-evb.dts
@@ -64,6 +64,26 @@
};
};
+&ethernet0 {
+ status = "okay";
+
+ phy-mode = "rgmii-id";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+};
+
+&ethernet1 {
+ status = "okay";
+
+ phy-mode = "rgmii-id";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+};
+
&fin_pll {
clock-frequency = <24000000>;
};
diff --git a/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi b/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi
index 3f898cf4874c..6f4658f57453 100644
--- a/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi
@@ -64,6 +64,62 @@
samsung,pin-pud = <FSD_PIN_PULL_UP>;
samsung,pin-drv = <FSD_PIN_DRV_LV4>;
};
+
+ eth0_tx_clk: eth0-tx-clk-pins {
+ samsung,pins = "gpf0-0";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_DOWN>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV6>;
+ };
+
+ eth0_tx_data: eth0-tx-data-pins {
+ samsung,pins = "gpf0-1", "gpf0-2", "gpf0-3", "gpf0-4";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV6>;
+ };
+
+ eth0_tx_ctrl: eth0-tx-ctrl-pins {
+ samsung,pins = "gpf0-5";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV6>;
+ };
+
+ eth0_phy_intr: eth0-phy-intr-pins {
+ samsung,pins = "gpf0-6";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_NONE>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ eth0_rx_clk: eth0-rx-clk-pins {
+ samsung,pins = "gpf1-0";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV6>;
+ };
+
+ eth0_rx_data: eth0-rx-data-pins {
+ samsung,pins = "gpf1-1", "gpf1-2", "gpf1-3", "gpf1-4";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV6>;
+ };
+
+ eth0_rx_ctrl: eth0-rx-ctrl-pins {
+ samsung,pins = "gpf1-5";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV6>;
+ };
+
+ eth0_mdio: eth0-mdio-pins {
+ samsung,pins = "gpf1-6", "gpf1-7";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_NONE>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
};
&pinctrl_peric {
@@ -381,6 +437,62 @@
samsung,pin-pud = <FSD_PIN_PULL_DOWN>;
samsung,pin-drv = <FSD_PIN_DRV_LV4>;
};
+
+ eth1_tx_clk: eth1-tx-clk-pins {
+ samsung,pins = "gpf2-0";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_DOWN>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV6>;
+ };
+
+ eth1_tx_data: eth1-tx-data-pins {
+ samsung,pins = "gpf2-1", "gpf2-2", "gpf2-3", "gpf2-4";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV6>;
+ };
+
+ eth1_tx_ctrl: eth1-tx-ctrl-pins {
+ samsung,pins = "gpf2-5";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV6>;
+ };
+
+ eth1_phy_intr: eth1-phy-intr-pins {
+ samsung,pins = "gpf2-6";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ eth1_rx_clk: eth1-rx-clk-pins {
+ samsung,pins = "gpf3-0";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV6>;
+ };
+
+ eth1_rx_data: eth1-rx-data-pins {
+ samsung,pins = "gpf3-1", "gpf3-2", "gpf3-3", "gpf3-4";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV6>;
+ };
+
+ eth1_rx_ctrl: eth1-rx-ctrl-pins {
+ samsung,pins = "gpf3-5";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV6>;
+ };
+
+ eth1_mdio: eth1-mdio-pins {
+ samsung,pins = "gpf3-6", "gpf3-7";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
};
&pinctrl_pmu {
diff --git a/arch/arm64/boot/dts/tesla/fsd.dtsi b/arch/arm64/boot/dts/tesla/fsd.dtsi
index 9951eef9507c..a5ebb3f9b18f 100644
--- a/arch/arm64/boot/dts/tesla/fsd.dtsi
+++ b/arch/arm64/boot/dts/tesla/fsd.dtsi
@@ -979,6 +979,36 @@
memory-region = <&mfc_left>;
};
+ ethernet1: ethernet@14300000 {
+ compatible = "tesla,fsd-ethqos";
+ reg = <0x0 0x14300000 0x0 0x10000>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clocks = <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I>,
+ <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_ACLK_I>,
+ <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_HCLK_I>,
+ <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_RGMII_CLK_I>,
+ <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_CLK_RX_I>,
+ <&clock_peric PERIC_BUS_D_PERIC_IPCLKPORT_EQOSCLK>,
+ <&clock_peric PERIC_BUS_P_PERIC_IPCLKPORT_EQOSCLK>,
+ <&clock_peric PERIC_EQOS_PHYRXCLK_MUX>,
+ <&clock_peric PERIC_EQOS_PHYRXCLK>,
+ <&clock_peric PERIC_DOUT_RGMII_CLK>;
+ clock-names = "ptp_ref", "master_bus", "slave_bus", "tx", "rx",
+ "master2_bus", "slave2_bus", "eqos_rxclk_mux",
+ "eqos_phyrxclk", "dout_peric_rgmii_clk";
+ assigned-clocks = <&clock_peric PERIC_EQOS_PHYRXCLK_MUX>,
+ <&clock_peric PERIC_EQOS_PHYRXCLK>;
+ assigned-clock-parents = <&clock_peric PERIC_EQOS_PHYRXCLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&eth1_tx_clk>, <&eth1_tx_data>, <&eth1_tx_ctrl>,
+ <&eth1_phy_intr>, <&eth1_rx_clk>, <&eth1_rx_data>,
+ <&eth1_rx_ctrl>, <&eth1_mdio>;
+ local-mac-address = [00 00 00 00 00 00];
+ iommus = <&smmu_peric 0x0 0x1>;
+ status = "disabled";
+ };
+
ufs: ufs@15120000 {
compatible = "tesla,fsd-ufs";
reg = <0x0 0x15120000 0x0 0x200>, /* 0: HCI standard */
@@ -1007,6 +1037,26 @@
clocks = <&clock_fsys0 UFS0_MPHY_REFCLK_IXTAL26>;
clock-names = "ref_clk";
};
+
+ ethernet0: ethernet@15300000 {
+ compatible = "tesla,fsd-ethqos";
+ reg = <0x0 0x15300000 0x0 0x10000>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clocks = <&clock_fsys0 FSYS0_EQOS_TOP0_IPCLKPORT_CLK_PTP_REF_I>,
+ <&clock_fsys0 FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I>,
+ <&clock_fsys0 FSYS0_EQOS_TOP0_IPCLKPORT_HCLK_I>,
+ <&clock_fsys0 FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I>,
+ <&clock_fsys0 FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I>;
+ clock-names = "ptp_ref", "master_bus", "slave_bus", "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&eth0_tx_clk>, <&eth0_tx_data>, <&eth0_tx_ctrl>,
+ <&eth0_phy_intr>, <&eth0_rx_clk>, <&eth0_rx_data>,
+ <&eth0_rx_ctrl>, <&eth0_mdio>;
+ local-mac-address = [00 00 00 00 00 00];
+ iommus = <&smmu_fsys0 0x0 0x1>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile
index 03d4cecfc001..c6171de9fe88 100644
--- a/arch/arm64/boot/dts/ti/Makefile
+++ b/arch/arm64/boot/dts/ti/Makefile
@@ -27,6 +27,7 @@ dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-yavia.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am62x-phyboard-lyra-gpio-fan.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am62-lp-sk.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am62-lp-sk-nand.dtbo
+dtb-$(CONFIG_ARCH_K3) += k3-am62-pocketbeagle2.dtb
# Boards with AM62Ax SoC
dtb-$(CONFIG_ARCH_K3) += k3-am62a7-sk.dtb
@@ -34,6 +35,16 @@ dtb-$(CONFIG_ARCH_K3) += k3-am62a7-phyboard-lyra-rdk.dtb
# Boards with AM62Px SoC
dtb-$(CONFIG_ARCH_K3) += k3-am62p5-sk.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am62p5-verdin-nonwifi-dahlia.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am62p5-verdin-nonwifi-dev.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am62p5-verdin-nonwifi-ivy.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am62p5-verdin-nonwifi-mallow.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am62p5-verdin-nonwifi-yavia.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am62p5-verdin-wifi-dahlia.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am62p5-verdin-wifi-dev.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am62p5-verdin-wifi-ivy.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am62p5-verdin-wifi-mallow.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am62p5-verdin-wifi-yavia.dtb
# Common overlays for SK-AM62* family of boards
dtb-$(CONFIG_ARCH_K3) += k3-am62x-sk-csi2-ov5640.dtbo
@@ -109,6 +120,7 @@ dtb-$(CONFIG_ARCH_K3) += k3-j721e-sk.dtb
dtb-$(CONFIG_ARCH_K3) += k3-j721e-sk-csi2-dual-imx219.dtbo
# Boards with J721s2 SoC
+dtb-$(CONFIG_ARCH_K3) += k3-am68-phyboard-izar.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am68-sk-base-board.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am68-sk-base-board-pcie1-ep.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-j721s2-common-proc-board.dtb
@@ -120,6 +132,8 @@ dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm-pcie1-ep.dtbo
# Boards with J722s SoC
dtb-$(CONFIG_ARCH_K3) += k3-am67a-beagley-ai.dtb
dtb-$(CONFIG_ARCH_K3) += k3-j722s-evm.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-j722s-evm-csi2-quad-rpi-cam-imx219.dtbo
+dtb-$(CONFIG_ARCH_K3) += k3-j722s-evm-csi2-quad-tevi-ov5640.dtbo
# Boards with J784s4 SoC
dtb-$(CONFIG_ARCH_K3) += k3-am69-sk.dtb
@@ -128,6 +142,7 @@ dtb-$(CONFIG_ARCH_K3) += k3-j784s4-evm.dtb
dtb-$(CONFIG_ARCH_K3) += k3-j784s4-evm-pcie0-pcie1-ep.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-j784s4-evm-quad-port-eth-exp1.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-j784s4-evm-usxgmii-exp1-exp2.dtbo
+dtb-$(CONFIG_ARCH_K3) += k3-j784s4-j742s2-evm-usb0-type-a.dtbo
# Boards with J742S2 SoC
dtb-$(CONFIG_ARCH_K3) += k3-j742s2-evm.dtb
@@ -212,10 +227,18 @@ k3-j721e-sk-csi2-dual-imx219-dtbs := k3-j721e-sk.dtb \
k3-j721e-sk-csi2-dual-imx219.dtbo
k3-j721s2-evm-pcie1-ep-dtbs := k3-j721s2-common-proc-board.dtb \
k3-j721s2-evm-pcie1-ep.dtbo
+k3-j722s-evm-csi2-quad-rpi-cam-imx219-dtbs := k3-j722s-evm.dtb \
+ k3-j722s-evm-csi2-quad-rpi-cam-imx219.dtbo
+k3-j722s-evm-csi2-quad-tevi-ov5640-dtbs := k3-j722s-evm.dtb \
+ k3-j722s-evm-csi2-quad-tevi-ov5640.dtbo
+k3-j742s2-evm-usb0-type-a-dtbs := k3-j742s2-evm.dtb \
+ k3-j784s4-j742s2-evm-usb0-type-a.dtbo
k3-j784s4-evm-pcie0-pcie1-ep-dtbs := k3-j784s4-evm.dtb \
k3-j784s4-evm-pcie0-pcie1-ep.dtbo
k3-j784s4-evm-quad-port-eth-exp1-dtbs := k3-j784s4-evm.dtb \
k3-j784s4-evm-quad-port-eth-exp1.dtbo
+k3-j784s4-evm-usb0-type-a-dtbs := k3-j784s4-evm.dtb \
+ k3-j784s4-j742s2-evm-usb0-type-a.dtbo
k3-j784s4-evm-usxgmii-exp1-exp2-dtbs := k3-j784s4-evm.dtb \
k3-j784s4-evm-usxgmii-exp1-exp2.dtbo
dtb- += k3-am625-beagleplay-csi2-ov5640.dtb \
@@ -246,8 +269,12 @@ dtb- += k3-am625-beagleplay-csi2-ov5640.dtb \
k3-j721e-evm-pcie1-ep.dtb \
k3-j721e-sk-csi2-dual-imx219.dtb \
k3-j721s2-evm-pcie1-ep.dtb \
+ k3-j722s-evm-csi2-quad-rpi-cam-imx219.dtb \
+ k3-j722s-evm-csi2-quad-tevi-ov5640.dtb \
+ k3-j742s2-evm-usb0-type-a.dtb \
k3-j784s4-evm-pcie0-pcie1-ep.dtb \
k3-j784s4-evm-quad-port-eth-exp1.dtb \
+ k3-j784s4-evm-usb0-type-a.dtb \
k3-j784s4-evm-usxgmii-exp1-exp2.dtb
# Enable support for device-tree overlays
@@ -269,5 +296,6 @@ DTC_FLAGS_k3-j721e-common-proc-board += -@
DTC_FLAGS_k3-j721e-evm-pcie0-ep += -@
DTC_FLAGS_k3-j721e-sk += -@
DTC_FLAGS_k3-j721s2-common-proc-board += -@
+DTC_FLAGS_k3-j722s-evm += -@
DTC_FLAGS_k3-j784s4-evm += -@
DTC_FLAGS_k3-j742s2-evm += -@
diff --git a/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts b/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts
index 8e9fc00a6b3c..aafdb90c0eb7 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts
@@ -69,6 +69,7 @@
gpios = <&main_gpio0 31 GPIO_ACTIVE_HIGH>;
states = <1800000 0x0>,
<3300000 0x1>;
+ bootph-all;
};
};
@@ -77,12 +78,14 @@
pinctrl-single,pins = <
AM62X_IOPAD(0x07c, PIN_OUTPUT, 7) /* (M19) GPMC0_CLK.GPIO0_31 */
>;
+ bootph-all;
};
main_gpio1_ioexp_intr_pins_default: main-gpio1-ioexp-intr-default-pins {
pinctrl-single,pins = <
AM62X_IOPAD(0x01d4, PIN_INPUT, 7) /* (C13) UART0_RTSn.GPIO1_23 */
>;
+ bootph-all;
};
pmic_irq_pins_default: pmic-irq-default-pins {
@@ -118,6 +121,7 @@
pinctrl-names = "default";
pinctrl-0 = <&main_gpio1_ioexp_intr_pins_default>;
+ bootph-all;
};
exp2: gpio@23 {
@@ -229,6 +233,14 @@
DVDD-supply = <&buck2_reg>;
};
+&main_gpio0 {
+ bootph-all;
+};
+
+&main_gpio1 {
+ bootph-all;
+};
+
&gpmc0 {
ranges = <0 0 0x00 0x51000000 0x01000000>; /* CS0 space. Min partition = 16MB */
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
index 7d355aa73ea2..9e0b6eee9ac7 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
@@ -552,8 +552,6 @@
power-domains = <&k3_pds 57 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 57 5>, <&k3_clks 57 6>;
clock-names = "clk_ahb", "clk_xin";
- assigned-clocks = <&k3_clks 57 6>;
- assigned-clock-parents = <&k3_clks 57 8>;
bus-width = <8>;
mmc-ddr-1_8v;
mmc-hs200-1_8v;
@@ -691,12 +689,14 @@
};
gpu: gpu@fd00000 {
- compatible = "ti,am62-gpu", "img,img-axe";
+ compatible = "ti,am62-gpu", "img,img-axe-1-16m", "img,img-axe",
+ "img,img-rogue";
reg = <0x00 0x0fd00000 0x00 0x20000>;
clocks = <&k3_clks 187 0>;
clock-names = "core";
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&k3_pds 187 TI_SCI_PD_EXCLUSIVE>;
+ power-domain-names = "a";
};
cpsw3g: ethernet@8000000 {
@@ -1079,6 +1079,96 @@
status = "disabled";
};
+ pruss: pruss@30040000 {
+ compatible = "ti,am625-pruss";
+ reg = <0x00 0x30040000 0x00 0x80000>;
+ power-domains = <&k3_pds 81 TI_SCI_PD_EXCLUSIVE>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x00 0x30040000 0x80000>;
+
+ pruss_mem: memories@0 {
+ reg = <0x0 0x2000>,
+ <0x2000 0x2000>,
+ <0x10000 0x10000>;
+ reg-names = "dram0", "dram1", "shrdram2";
+ };
+
+ pruss_cfg: cfg@26000 {
+ compatible = "ti,pruss-cfg", "syscon";
+ reg = <0x26000 0x200>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x26000 0x2000>;
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pruss_coreclk_mux: coreclk-mux@3c {
+ reg = <0x3c>;
+ #clock-cells = <0>;
+ clocks = <&k3_clks 81 0>, /* pruss_core_clk */
+ <&k3_clks 81 14>; /* pruss_iclk */
+ assigned-clocks = <&pruss_coreclk_mux>;
+ assigned-clock-parents = <&k3_clks 81 14>;
+ };
+
+ pruss_iepclk_mux: iepclk-mux@30 {
+ reg = <0x30>;
+ #clock-cells = <0>;
+ clocks = <&k3_clks 81 3>, /* pruss_iep_clk */
+ <&pruss_coreclk_mux>; /* pruss_coreclk_mux */
+ assigned-clocks = <&pruss_iepclk_mux>;
+ assigned-clock-parents = <&pruss_coreclk_mux>;
+ };
+ };
+ };
+
+ pruss_intc: interrupt-controller@20000 {
+ compatible = "ti,pruss-intc";
+ reg = <0x20000 0x2000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host_intr0", "host_intr1",
+ "host_intr2", "host_intr3",
+ "host_intr4", "host_intr5",
+ "host_intr6", "host_intr7";
+ };
+
+ pru0: pru@34000 {
+ compatible = "ti,am625-pru";
+ reg = <0x34000 0x3000>,
+ <0x22000 0x100>,
+ <0x22400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am62x-pru0-fw";
+ interrupt-parent = <&pruss_intc>;
+ interrupts = <16 2 2>;
+ interrupt-names = "vring";
+ };
+
+ pru1: pru@38000 {
+ compatible = "ti,am625-pru";
+ reg = <0x38000 0x3000>,
+ <0x24000 0x100>,
+ <0x24400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am62x-pru1-fw";
+ interrupt-parent = <&pruss_intc>;
+ interrupts = <18 3 3>;
+ interrupt-names = "vring";
+ };
+ };
+
gpmc0: memory-controller@3b000000 {
compatible = "ti,am64-gpmc";
power-domains = <&k3_pds 80 TI_SCI_PD_EXCLUSIVE>;
diff --git a/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
index 55ed418c023b..10e6b5c08619 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
@@ -64,6 +64,18 @@
no-map;
};
+ wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@9da00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9da00000 0x00 0x100000>;
+ no-map;
+ };
+
+ wkup_r5fss0_core0_memory_region: r5f-memory@9db00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9db00000 0x00 0xc00000>;
+ no-map;
+ };
+
secure_tfa_ddr: tfa@9e780000 {
reg = <0x00 0x9e780000 0x00 0x80000>;
alignment = <0x1000>;
@@ -75,12 +87,6 @@
alignment = <0x1000>;
no-map;
};
-
- wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@9db00000 {
- compatible = "shared-dma-pool";
- reg = <0x00 0x9db00000 0x00 0x00c00000>;
- no-map;
- };
};
vcc_5v0_som: regulator-vcc-5v0-som {
@@ -240,10 +246,17 @@
};
&mailbox0_cluster0 {
+ status = "okay";
+
mbox_m4_0: mbox-m4-0 {
ti,mbox-rx = <0 0 0>;
ti,mbox-tx = <1 0 0>;
};
+
+ mbox_r5_0: mbox-r5-0 {
+ ti,mbox-rx = <2 0 0>;
+ ti,mbox-tx = <3 0 0>;
+ };
};
&main_pktdma {
@@ -381,8 +394,17 @@
&sdhci0 {
pinctrl-names = "default";
pinctrl-0 = <&main_mmc0_pins_default>;
- disable-wp;
non-removable;
bootph-all;
status = "okay";
};
+
+&wkup_r5fss0 {
+ status = "okay";
+};
+
+&wkup_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster0 &mbox_r5_0>;
+ memory-region = <&wkup_r5fss0_core0_dma_memory_region>,
+ <&wkup_r5fss0_core0_memory_region>;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-pocketbeagle2.dts b/arch/arm64/boot/dts/ti/k3-am62-pocketbeagle2.dts
new file mode 100644
index 000000000000..2e4cf65ee323
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62-pocketbeagle2.dts
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * https://www.beagleboard.org/boards/pocketbeagle-2
+ *
+ * Copyright (C) 2025 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2025 Robert Nelson, BeagleBoard.org Foundation
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include "k3-am625.dtsi"
+
+/ {
+ compatible = "beagle,am62-pocketbeagle2", "ti,am625";
+ model = "BeagleBoard.org PocketBeagle2";
+
+ aliases {
+ serial0 = &wkup_uart0;
+ serial1 = &main_uart1;
+ serial2 = &main_uart6;
+ serial3 = &main_uart0;
+ mmc1 = &sdhci1;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ i2c0 = &main_i2c0;
+ i2c2 = &main_i2c2;
+ i2c3 = &wkup_i2c0;
+ };
+
+ chosen {
+ stdout-path = &main_uart6;
+ };
+
+ memory@80000000 {
+ /* 512MB RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x20000000>;
+ device_type = "memory";
+ bootph-pre-ram;
+ };
+
+ reserved_memory: reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* global cma region */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x00 0x8000000>;
+ linux,cma-default;
+ };
+
+ mcu_m4fss_dma_memory_region: m4f-dma-memory@9cb00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9cb00000 0x00 0x100000>;
+ no-map;
+ };
+
+ mcu_m4fss_memory_region: m4f-memory@9cc00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9cc00000 0x00 0xe00000>;
+ no-map;
+ };
+
+ secure_tfa_ddr: tfa@9e780000 {
+ reg = <0x00 0x9e780000 0x00 0x80000>;
+ alignment = <0x1000>;
+ no-map;
+ };
+
+ secure_ddr: optee@9e800000 {
+ reg = <0x00 0x9e800000 0x00 0x01800000>;
+ alignment = <0x1000>;
+ no-map;
+ };
+
+ wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@9db00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9db00000 0x00 0xc00000>;
+ no-map;
+ };
+ };
+
+ vsys_5v0: regulator-1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ bootph-all;
+ };
+
+ vdd_3v3: regulator-2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vsys_5v0>;
+ regulator-always-on;
+ regulator-boot-on;
+ bootph-all;
+ };
+
+ vdd_mmc1: regulator-3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_mmc1";
+ pinctrl-names = "default";
+ pinctrl-0 = <&vdd_3v3_sd_pins_default>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ enable-active-high;
+ regulator-always-on;
+ vin-supply = <&vdd_3v3>;
+ gpio = <&main_gpio0 0 GPIO_ACTIVE_HIGH>;
+ bootph-all;
+ };
+
+ vdd_sd_dv: regulator-4 {
+ compatible = "regulator-gpio";
+ regulator-name = "sd_hs200_switch";
+ pinctrl-names = "default";
+ pinctrl-0 = <&vdd_sd_dv_pins_default>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ vin-supply = <&vdd_3v3>;
+ gpios = <&main_gpio1 49 GPIO_ACTIVE_HIGH>;
+ states = <1800000 0x0>,
+ <3300000 0x1>;
+ bootph-all;
+ };
+
+ adc_vref: regulator-5 {
+ compatible = "regulator-fixed";
+ regulator-name = "default";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins_default>;
+ bootph-all;
+
+ led-1 {
+ function = LED_FUNCTION_HEARTBEAT;
+ color = <LED_COLOR_ID_GREEN>;
+ linux,default-trigger = "heartbeat";
+ gpios = <&main_gpio0 6 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ bootph-all;
+ };
+
+ led-2 {
+ function = LED_FUNCTION_DISK_ACTIVITY;
+ color = <LED_COLOR_ID_GREEN>;
+ linux,default-trigger = "mmc1";
+ gpios = <&main_gpio0 5 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ bootph-all;
+ };
+
+ led-3 {
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&main_gpio0 4 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ bootph-all;
+ };
+
+ led-4 {
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&main_gpio0 3 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ bootph-all;
+ };
+ };
+};
+
+&main_pmx0 {
+ led_pins_default: led-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x000c, PIN_OUTPUT, 7) /* (E25) OSPI0_D0.GPIO0_3 */
+ AM62X_IOPAD(0x0010, PIN_OUTPUT, 7) /* (G24) OSPI0_D1.GPIO0_4 */
+ AM62X_IOPAD(0x0014, PIN_OUTPUT, 7) /* (F25) OSPI0_D2.GPIO0_5 */
+ AM62X_IOPAD(0x0018, PIN_OUTPUT, 7) /* (F24) OSPI0_D3.GPIO0_6 */
+ >;
+ bootph-all;
+ };
+
+ main_i2c0_pins_default: main-i2c0-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x01e0, PIN_INPUT_PULLUP, 0) /* (B16) I2C0_SCL */
+ AM62X_IOPAD(0x01e4, PIN_INPUT_PULLUP, 0) /* (A16) I2C0_SDA */
+ >;
+ bootph-all;
+ };
+
+ main_i2c2_pins_default: main-i2c2-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x00b0, PIN_INPUT_PULLUP, 1) /* (K22) GPMC0_CSn2.I2C2_SCL */
+ AM62X_IOPAD(0x00b4, PIN_INPUT_PULLUP, 1) /* (K24) GPMC0_CSn3.I2C2_SDA */
+ >;
+ bootph-all;
+ };
+
+ main_uart0_pins_default: main-uart0-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x1c8, PIN_INPUT, 0) /* (D14/A13) UART0_RXD */
+ AM62X_IOPAD(0x1cc, PIN_OUTPUT, 0) /* (E14/E11) UART0_TXD */
+ >;
+ bootph-all;
+ };
+
+ main_uart1_pins_default: main-uart1-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x194, PIN_INPUT, 2) /* (B19/B18) MCASP0_AXR3.UART1_CTSn */
+ AM62X_IOPAD(0x198, PIN_OUTPUT, 2) /* (A19/B17) MCASP0_AXR2.UART1_RTSn */
+ AM62X_IOPAD(0x1ac, PIN_INPUT, 2) /* (E19/D15) MCASP0_AFSR.UART1_RXD */
+ AM62X_IOPAD(0x1b0, PIN_OUTPUT, 2) /* (A20/D16) MCASP0_ACLKR.UART1_TXD */
+ >;
+ bootph-all;
+ };
+
+ main_uart6_pins_default: main-uart6-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x001c, PIN_INPUT, 3) /* (J23) OSPI0_D4.UART6_RXD */
+ AM62X_IOPAD(0x0020, PIN_OUTPUT, 3) /* (J25) OSPI0_D5.UART6_TXD */
+ >;
+ };
+
+ main_mmc1_pins_default: main-mmc1-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x23c, PIN_INPUT, 0) /* (A21/C18) MMC1_CMD */
+ AM62X_IOPAD(0x234, PIN_INPUT, 0) /* (B22/A20) MMC1_CLK */
+ AM62X_IOPAD(0x230, PIN_INPUT, 0) /* (A22/A19) MMC1_DAT0 */
+ AM62X_IOPAD(0x22c, PIN_INPUT, 0) /* (B21/B19) MMC1_DAT1 */
+ AM62X_IOPAD(0x228, PIN_INPUT, 0) /* (C21/B20) MMC1_DAT2 */
+ AM62X_IOPAD(0x224, PIN_INPUT, 0) /* (D22/C19) MMC1_DAT3 */
+ AM62X_IOPAD(0x240, PIN_INPUT, 7) /* (D17/C15) MMC1_SDCD.GPIO1_48 */
+ >;
+ bootph-all;
+ };
+
+ vdd_sd_dv_pins_default: vdd-sd-dv-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x0244, PIN_OUTPUT, 7) /* (P25) GPMC0_CLK.GPIO1_49 */
+ >;
+ bootph-all;
+ };
+
+ pmic_irq_pins_default: pmic-irq-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x01f4, PIN_INPUT_PULLUP, 0) /* (D16) EXTINTn */
+ >;
+ bootph-all;
+ };
+
+ vdd_3v3_sd_pins_default: vdd-3v3-sd-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x0000, PIN_OUTPUT, 7) /* (H24) OSPI0_CLK.GPIO0_0 */
+ >;
+ bootph-all;
+ };
+
+ usb1_pins_default: usb1-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x0258, PIN_INPUT, 0) /* (F18) USB1_DRVVBUS */
+ >;
+ bootph-all;
+ };
+
+ epwm2_pins_default: epwm2-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x01e8, PIN_OUTPUT, 8) /* (B17) I2C1_SCL.EHRPWM2_A */
+ >;
+ };
+};
+
+&epwm2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&epwm2_pins_default>;
+};
+
+&mailbox0_cluster0 {
+ mbox_m4_0: mbox-m4-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&main_uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart0_pins_default>;
+ bootph-all;
+ status = "okay";
+};
+
+&main_uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart1_pins_default>;
+ bootph-pre-ram;
+ status = "reserved";
+};
+
+&main_uart6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart6_pins_default>;
+ bootph-all;
+ status = "okay";
+};
+
+&main_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c0_pins_default>;
+ clock-frequency = <400000>;
+ bootph-all;
+ status = "okay";
+
+ ad7291: adc@20 {
+ /* Emulated with MSPM0L1105 */
+ compatible = "adi,ad7291";
+ reg = <0x20>;
+ vref-supply = <&adc_vref>;
+ };
+
+ eeprom: eeprom@50 {
+ /* Emulated with MSPM0L1105 */
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+};
+
+&main_i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c2_pins_default>;
+ clock-frequency = <400000>;
+ bootph-all;
+ status = "okay";
+};
+
+&mcu_m4fss {
+ mboxes = <&mailbox0_cluster0 &mbox_m4_0>;
+ memory-region = <&mcu_m4fss_dma_memory_region>,
+ <&mcu_m4fss_memory_region>;
+ status = "okay";
+};
+
+&mcu_pmx0 {
+ wkup_uart0_pins_default: wkup-uart0-default-pins {
+ pinctrl-single,pins = <
+ AM62X_MCU_IOPAD(0x02c, PIN_INPUT, 0) /* (C6/A7) WKUP_UART0_CTSn */
+ AM62X_MCU_IOPAD(0x030, PIN_OUTPUT, 0) /* (A4/B4) WKUP_UART0_RTSn */
+ AM62X_MCU_IOPAD(0x024, PIN_INPUT, 0) /* (B4/B5) WKUP_UART0_RXD */
+ AM62X_MCU_IOPAD(0x028, PIN_OUTPUT, 0) /* (C5/C6) WKUP_UART0_TXD */
+ >;
+ bootph-all;
+ };
+
+ wkup_i2c0_pins_default: wkup-i2c0-default-pins {
+ pinctrl-single,pins = <
+ AM62X_MCU_IOPAD(0x004c, PIN_INPUT_PULLUP, 0) /* (B9) WKUP_I2C0_SCL */
+ AM62X_MCU_IOPAD(0x0050, PIN_INPUT_PULLUP, 0) /* (A9) WKUP_I2C0_SDA */
+ >;
+ bootph-all;
+ };
+};
+
+&sdhci1 {
+ /* SD/MMC */
+ vmmc-supply = <&vdd_mmc1>;
+ vqmmc-supply = <&vdd_sd_dv>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mmc1_pins_default>;
+ disable-wp;
+ cd-gpios = <&main_gpio1 48 GPIO_ACTIVE_LOW>;
+ cd-debounce-delay-ms = <100>;
+ bootph-all;
+ ti,fails-without-test-cd;
+ status = "okay";
+};
+
+&usbss0 {
+ bootph-all;
+ ti,vbus-divider;
+ status = "okay";
+};
+
+&usb0 {
+ /* This is a Type-C socket, but wired as USB 2.0 */
+ dr_mode = "peripheral";
+ bootph-all;
+};
+
+&usbss1 {
+ ti,vbus-divider;
+ status = "okay";
+};
+
+&usb1 {
+ /*
+ * Default set here is compatible with original PocketBeagle,
+ * Expansion boards assumed this was pre-setup as host.
+ */
+ dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb1_pins_default>;
+};
+
+&wkup_uart0 {
+ /* WKUP UART0 is used by Device Manager firmware */
+ pinctrl-names = "default";
+ pinctrl-0 = <&wkup_uart0_pins_default>;
+ bootph-all;
+ status = "reserved";
+};
+
+&wkup_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wkup_i2c0_pins_default>;
+ clock-frequency = <100000>;
+ bootph-all;
+ status = "okay";
+
+ tps65219: pmic@30 {
+ compatible = "ti,tps65219";
+ reg = <0x30>;
+ buck1-supply = <&vsys_5v0>;
+ buck2-supply = <&vsys_5v0>;
+ buck3-supply = <&vsys_5v0>;
+ ldo1-supply = <&vdd_3v3>;
+ ldo2-supply = <&buck2_reg>;
+ ldo3-supply = <&vdd_3v3>;
+ ldo4-supply = <&vdd_3v3>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_irq_pins_default>;
+ interrupt-parent = <&gic500>;
+ interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ bootph-all;
+ system-power-controller;
+ ti,power-button;
+
+ regulators {
+ buck1_reg: buck1 {
+ regulator-name = "VDD_CORE";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck2_reg: buck2 {
+ regulator-name = "VDD_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck3_reg: buck3 {
+ regulator-name = "VDD_1V2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1_reg: ldo1 {
+ /*
+ * Regulator is left as is unused, vdd_sd
+ * is controlled via GPIO with bypass config
+ * as per the NVM configuration
+ */
+ regulator-name = "VDD_SD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-allow-bypass;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: ldo2 {
+ regulator-name = "VDDA_0V85";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3_reg: ldo3 {
+ regulator-name = "VDDA_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4_reg: ldo4 {
+ regulator-name = "VDD_2V5";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi
index fcc4cb2e9389..2b5f5e50b578 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi
@@ -114,7 +114,7 @@
/* EEPROM */
eeprom@57 {
- compatible = "st,24c02";
+ compatible = "st,24c02", "atmel,24c02";
reg = <0x57>;
pagesize = <16>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin-yavia.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin-yavia.dtsi
index 7372d392ec8a..9a2483cf5d70 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-verdin-yavia.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin-yavia.dtsi
@@ -118,7 +118,7 @@
/* EEPROM */
eeprom@57 {
- compatible = "st,24c02";
+ compatible = "st,24c02", "atmel,24c02";
reg = <0x57>;
pagesize = <16>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi
index 9b8a1f85aa15..6549b7efa656 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi
@@ -106,6 +106,31 @@
status = "reserved";
};
+ wkup_r5fss0: r5fss@78000000 {
+ compatible = "ti,am62-r5fss";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x78000000 0x00 0x78000000 0x8000>,
+ <0x78100000 0x00 0x78100000 0x8000>;
+ power-domains = <&k3_pds 119 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+
+ wkup_r5fss0_core0: r5f@78000000 {
+ compatible = "ti,am62-r5f";
+ reg = <0x78000000 0x00008000>,
+ <0x78100000 0x00008000>;
+ reg-names = "atcm", "btcm";
+ resets = <&k3_reset 121 1>;
+ firmware-name = "am62-wkup-r5f0_0-fw";
+ ti,atcm-enable = <1>;
+ ti,btcm-enable = <1>;
+ ti,loczrama = <1>;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <121>;
+ ti,sci-proc-ids = <0x01 0xff>;
+ };
+ };
+
wkup_vtm0: temperature-sensor@b00000 {
compatible = "ti,j7200-vtm";
reg = <0x00 0xb00000 0x00 0x400>,
diff --git a/arch/arm64/boot/dts/ti/k3-am62.dtsi b/arch/arm64/boot/dts/ti/k3-am62.dtsi
index bfb55ca11323..59f6dff552ed 100644
--- a/arch/arm64/boot/dts/ti/k3-am62.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62.dtsi
@@ -86,7 +86,9 @@
/* Wakeup Domain Range */
<0x00 0x00b00000 0x00 0x00b00000 0x00 0x00002400>, /* VTM */
<0x00 0x2b000000 0x00 0x2b000000 0x00 0x00300400>,
- <0x00 0x43000000 0x00 0x43000000 0x00 0x00020000>;
+ <0x00 0x43000000 0x00 0x43000000 0x00 0x00020000>,
+ <0x00 0x78000000 0x00 0x78000000 0x00 0x00008000>, /* DM R5 ATCM*/
+ <0x00 0x78100000 0x00 0x78100000 0x00 0x00008000>; /* DM R5 BTCM*/
cbass_mcu: bus@4000000 {
bootph-all;
@@ -103,7 +105,9 @@
#size-cells = <2>;
ranges = <0x00 0x00b00000 0x00 0x00b00000 0x00 0x00002400>, /* VTM */
<0x00 0x2b000000 0x00 0x2b000000 0x00 0x00300400>, /* Peripheral Window */
- <0x00 0x43000000 0x00 0x43000000 0x00 0x00020000>;
+ <0x00 0x43000000 0x00 0x43000000 0x00 0x00020000>,
+ <0x00 0x78000000 0x00 0x78000000 0x00 0x00008000>, /* DM R5 ATCM*/
+ <0x00 0x78100000 0x00 0x78100000 0x00 0x00008000>; /* DM R5 BTCM*/
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay-csi2-ov5640.dtso b/arch/arm64/boot/dts/ti/k3-am625-beagleplay-csi2-ov5640.dtso
index 3b4643b7d19c..000305c9e366 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay-csi2-ov5640.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay-csi2-ov5640.dtso
@@ -15,6 +15,33 @@
#clock-cells = <0>;
clock-frequency = <12000000>;
};
+
+ reg_2p8v: regulator-2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vdd_3v3>;
+ regulator-always-on;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vdd_3v3>;
+ regulator-always-on;
+ };
+
+ reg_1p5v: regulator-1p5v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P5V";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ vin-supply = <&vdd_3v3>;
+ regulator-always-on;
+ };
};
&main_gpio0 {
@@ -39,6 +66,10 @@
clocks = <&clk_ov5640_fixed>;
clock-names = "xclk";
+ AVDD-supply = <&reg_2p8v>;
+ DOVDD-supply = <&reg_1p8v>;
+ DVDD-supply = <&reg_1p5v>;
+
port {
csi2_cam0: endpoint {
remote-endpoint = <&csi2rx0_in_sensor>;
diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay-csi2-tevi-ov5640.dtso b/arch/arm64/boot/dts/ti/k3-am625-beagleplay-csi2-tevi-ov5640.dtso
index 81a2763d43c6..8a7a9ece08af 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay-csi2-tevi-ov5640.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay-csi2-tevi-ov5640.dtso
@@ -15,6 +15,33 @@
#clock-cells = <0>;
clock-frequency = <24000000>;
};
+
+ reg_2p8v: regulator-2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vdd_3v3>;
+ regulator-always-on;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vdd_3v3>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vdd_3v3>;
+ regulator-always-on;
+ };
};
&main_gpio0 {
@@ -39,6 +66,10 @@
clocks = <&clk_ov5640_fixed>;
clock-names = "xclk";
+ AVDD-supply = <&reg_2p8v>;
+ DOVDD-supply = <&reg_1p8v>;
+ DVDD-supply = <&reg_3p3v>;
+
port {
csi2_cam0: endpoint {
remote-endpoint = <&csi2rx0_in_sensor>;
diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
index a5469f2712f0..72b09f9c69d8 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
@@ -832,9 +832,9 @@
&sdhci0 {
bootph-all;
+ non-removable;
pinctrl-names = "default";
pinctrl-0 = <&emmc_pins_default>;
- disable-wp;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
index a1daba7b1fad..63e097ddf988 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
@@ -575,8 +575,6 @@
power-domains = <&k3_pds 57 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 57 5>, <&k3_clks 57 6>;
clock-names = "clk_ahb", "clk_xin";
- assigned-clocks = <&k3_clks 57 6>;
- assigned-clock-parents = <&k3_clks 57 8>;
bus-width = <8>;
mmc-hs200-1_8v;
ti,clkbuf-sel = <0x7>;
@@ -1123,6 +1121,18 @@
power-domains = <&k3_pds 204 TI_SCI_PD_EXCLUSIVE>;
};
+ c7x_0: dsp@7e000000 {
+ compatible = "ti,am62a-c7xv-dsp";
+ reg = <0x00 0x7e000000 0x00 0x00100000>;
+ reg-names = "l2sram";
+ resets = <&k3_reset 208 1>;
+ firmware-name = "am62a-c71_0-fw";
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <208>;
+ ti,sci-proc-ids = <0x04 0xff>;
+ status = "disabled";
+ };
+
e5010: jpeg-encoder@fd20000 {
compatible = "ti,am62a-jpeg-enc", "img,e5010-jpeg-enc";
reg = <0x00 0xfd20000 0x00 0x100>,
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi
index 9ed9d703ff24..ee961ced7208 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi
@@ -174,4 +174,29 @@
bosch,mram-cfg = <0x0 128 64 64 64 64 32 32>;
status = "disabled";
};
+
+ mcu_r5fss0: r5fss@79000000 {
+ compatible = "ti,am62-r5fss";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x79000000 0x00 0x79000000 0x8000>,
+ <0x79020000 0x00 0x79020000 0x8000>;
+ power-domains = <&k3_pds 7 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+
+ mcu_r5fss0_core0: r5f@79000000 {
+ compatible = "ti,am62-r5f";
+ reg = <0x79000000 0x00008000>,
+ <0x79020000 0x00008000>;
+ reg-names = "atcm", "btcm";
+ resets = <&k3_reset 9 1>;
+ firmware-name = "am62a-mcu-r5f0_0-fw";
+ ti,atcm-enable = <0>;
+ ti,btcm-enable = <1>;
+ ti,loczrama = <0>;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <9>;
+ ti,sci-proc-ids = <0x03 0xff>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
index 147d56b87984..5dc5d2cb20cc 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
@@ -59,6 +59,42 @@
linux,cma-default;
};
+ c7x_0_dma_memory_region: c7x-dma-memory@99800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x99800000 0x00 0x100000>;
+ no-map;
+ };
+
+ c7x_0_memory_region: c7x-memory@99900000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x99900000 0x00 0xf00000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@9b800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9b800000 0x00 0x100000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core0_memory_region: r5f-dma-memory@9b900000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9b900000 0x00 0xf00000>;
+ no-map;
+ };
+
+ wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@9c800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9c800000 0x00 0x100000>;
+ no-map;
+ };
+
+ wkup_r5fss0_core0_memory_region: r5f-dma-memory@9c900000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9c900000 0x00 0xf00000>;
+ no-map;
+ };
+
secure_tfa_ddr: tfa@9e780000 {
reg = <0x00 0x9e780000 0x00 0x80000>;
alignment = <0x1000>;
@@ -70,12 +106,6 @@
alignment = <0x1000>;
no-map;
};
-
- wkup_r5fss0_core0_memory_region: r5f-dma-memory@9c900000 {
- compatible = "shared-dma-pool";
- reg = <0x00 0x9c900000 0x00 0x01e00000>;
- no-map;
- };
};
vcc_5v0_som: regulator-vcc-5v0-som {
@@ -170,6 +200,13 @@
};
};
+&c7x_0 {
+ mboxes = <&mailbox0_cluster1 &mbox_c7x_0>;
+ memory-region = <&c7x_0_dma_memory_region>,
+ <&c7x_0_memory_region>;
+ status = "okay";
+};
+
&cpsw3g {
pinctrl-names = "default";
pinctrl-0 = <&main_rgmii1_pins_default>;
@@ -200,6 +237,33 @@
status = "okay";
};
+&mailbox0_cluster0 {
+ status = "okay";
+
+ mbox_r5_0: mbox-r5-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&mailbox0_cluster1 {
+ status = "okay";
+
+ mbox_c7x_0: mbox-c7x-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&mailbox0_cluster2 {
+ status = "okay";
+
+ mbox_mcu_r5_0: mbox-mcu-r5-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
&main_i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&main_i2c0_pins_default>;
@@ -315,6 +379,26 @@
bootph-all;
};
+/* main_rti4 is used by C7x DSP */
+&main_rti4 {
+ status = "reserved";
+};
+
+/* main_timer2 is used by C7x DSP */
+&main_timer2 {
+ status = "reserved";
+};
+
+&mcu_r5fss0 {
+ status = "okay";
+};
+
+&mcu_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster2 &mbox_mcu_r5_0>;
+ memory-region = <&mcu_r5fss0_core0_dma_memory_region>,
+ <&mcu_r5fss0_core0_memory_region>;
+};
+
&ospi0 {
pinctrl-names = "default";
pinctrl-0 = <&ospi0_pins_default>;
@@ -338,8 +422,17 @@
&sdhci0 {
pinctrl-names = "default";
pinctrl-0 = <&main_mmc0_pins_default>;
- disable-wp;
non-removable;
bootph-all;
status = "okay";
};
+
+&wkup_r5fss0 {
+ status = "okay";
+};
+
+&wkup_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster0 &mbox_r5_0>;
+ memory-region = <&wkup_r5fss0_core0_dma_memory_region>,
+ <&wkup_r5fss0_core0_memory_region>;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-thermal.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-thermal.dtsi
index c7486fb2a5b4..3aa127157d24 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-thermal.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-thermal.dtsi
@@ -12,12 +12,29 @@ thermal_zones: thermal-zones {
thermal-sensors = <&wkup_vtm0 0>;
trips {
+ main0_alert: main0-alert {
+ temperature = <115000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
main0_crit: main0-crit {
temperature = <125000>; /* milliCelsius */
hysteresis = <2000>; /* milliCelsius */
type = "critical";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&main0_alert>;
+ cooling-device =
+ <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
main1_thermal: main1-thermal {
@@ -26,25 +43,59 @@ thermal_zones: thermal-zones {
thermal-sensors = <&wkup_vtm0 1>;
trips {
+ main1_alert: main1-alert {
+ temperature = <115000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
main1_crit: main1-crit {
temperature = <125000>; /* milliCelsius */
hysteresis = <2000>; /* milliCelsius */
type = "critical";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&main1_alert>;
+ cooling-device =
+ <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
main2_thermal: main2-thermal {
- polling-delay-passive = <250>; /* milliSeconds */
- polling-delay = <500>; /* milliSeconds */
- thermal-sensors = <&wkup_vtm0 2>;
+ polling-delay-passive = <250>; /* milliSeconds */
+ polling-delay = <500>; /* milliSeconds */
+ thermal-sensors = <&wkup_vtm0 2>;
trips {
+ main2_alert: main2-alert {
+ temperature = <115000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
main2_crit: main2-crit {
temperature = <125000>; /* milliCelsius */
hysteresis = <2000>; /* milliCelsius */
type = "critical";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&main2_alert>;
+ cooling-device =
+ <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi
index b2c8f5351743..259ae6ebbfb5 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi
@@ -103,6 +103,31 @@
status = "reserved";
};
+ wkup_r5fss0: r5fss@78000000 {
+ compatible = "ti,am62-r5fss";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x78000000 0x00 0x78000000 0x8000>,
+ <0x78100000 0x00 0x78100000 0x8000>;
+ power-domains = <&k3_pds 119 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+
+ wkup_r5fss0_core0: r5f@78000000 {
+ compatible = "ti,am62-r5f";
+ reg = <0x78000000 0x00008000>,
+ <0x78100000 0x00008000>;
+ reg-names = "atcm", "btcm";
+ resets = <&k3_reset 121 1>;
+ firmware-name = "am62a-wkup-r5f0_0-fw";
+ ti,atcm-enable = <1>;
+ ti,btcm-enable = <1>;
+ ti,loczrama = <1>;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <121>;
+ ti,sci-proc-ids = <0x01 0xff>;
+ };
+ };
+
wkup_vtm0: temperature-sensor@b00000 {
compatible = "ti,j7200-vtm";
reg = <0x00 0xb00000 0x00 0x400>,
diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
index 1c9d95696c83..b27759026014 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
@@ -52,6 +52,42 @@
linux,cma-default;
};
+ c7x_0_dma_memory_region: c7x-dma-memory@99800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x99800000 0x00 0x100000>;
+ no-map;
+ };
+
+ c7x_0_memory_region: c7x-memory@99900000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x99900000 0x00 0xf00000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@9b800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9b800000 0x00 0x100000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core0_memory_region: r5f-dma-memory@9b900000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9b900000 0x00 0xf00000>;
+ no-map;
+ };
+
+ wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@9c800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9c800000 0x00 0x100000>;
+ no-map;
+ };
+
+ wkup_r5fss0_core0_memory_region: r5f-dma-memory@9c900000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9c900000 0x00 0xf00000>;
+ no-map;
+ };
+
secure_tfa_ddr: tfa@9e780000 {
reg = <0x00 0x9e780000 0x00 0x80000>;
alignment = <0x1000>;
@@ -63,12 +99,6 @@
alignment = <0x1000>;
no-map;
};
-
- wkup_r5fss0_core0_memory_region: r5f-dma-memory@9c900000 {
- compatible = "shared-dma-pool";
- reg = <0x00 0x9c900000 0x00 0x01e00000>;
- no-map;
- };
};
opp-table {
@@ -313,6 +343,7 @@
AM62AX_IOPAD(0x1fc, PIN_INPUT_PULLUP, 0) /* (AD2) MMC0_DAT6 */
AM62AX_IOPAD(0x1f8, PIN_INPUT_PULLUP, 0) /* (AC2) MMC0_DAT7 */
>;
+ bootph-all;
};
main_mmc1_pins_default: main-mmc1-default-pins {
@@ -383,6 +414,25 @@
AM62AX_IOPAD(0x07c, PIN_OUTPUT, 7) /* (M19) GPMC0_CLK.GPIO0_31 */
>;
};
+
+ main_ecap0_pins_default: main-ecap0-default-pins {
+ pinctrl-single,pins = <
+ AM62AX_IOPAD(0x01b8, PIN_OUTPUT, 3) /* (C16) SPI0_CS1.ECAP0_IN_APWM_OUT */
+ >;
+ };
+
+ main_ecap2_pins_default: main-ecap2-default-pins {
+ pinctrl-single,pins = <
+ AM62AX_IOPAD(0x01a4, PIN_OUTPUT, 2) /* (A19) MCASP0_ACLKX.ECAP2_IN_APWM_OUT */
+ >;
+ };
+
+ main_epwm1_pins_default: main-epwm1-default-pins {
+ pinctrl-single,pins = <
+ AM62AX_IOPAD(0x019c, PIN_OUTPUT, 6) /* (B18) MCASP0_AXR1.EHRPWM1_A */
+ AM62AX_IOPAD(0x01a0, PIN_OUTPUT, 6) /* (B20) MCASP0_AXR0.EHRPWM1_B */
+ >;
+ };
};
&mcu_pmx0 {
@@ -614,7 +664,7 @@
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&main_mmc0_pins_default>;
- disable-wp;
+ bootph-all;
};
&sdhci1 {
@@ -652,6 +702,11 @@
status = "reserved";
};
+/* main_timer2 is used by C7x DSP */
+&main_timer2 {
+ status = "reserved";
+};
+
&usbss0 {
status = "okay";
ti,vbus-divider;
@@ -741,3 +796,83 @@
};
};
};
+
+&ecap0 {
+ /* P26 of J3 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_ecap0_pins_default>;
+ status = "okay";
+};
+
+&ecap2 {
+ /* P11 of J3 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_ecap2_pins_default>;
+ status = "okay";
+};
+
+&epwm1 {
+ /* P36/P33 of J3 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_epwm1_pins_default>;
+ status = "okay";
+};
+
+&mailbox0_cluster0 {
+ status = "okay";
+
+ mbox_r5_0: mbox-r5-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&mailbox0_cluster1 {
+ status = "okay";
+
+ mbox_c7x_0: mbox-c7x-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&mailbox0_cluster2 {
+ status = "okay";
+
+ mbox_mcu_r5_0: mbox-mcu-r5-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&wkup_r5fss0 {
+ status = "okay";
+};
+
+&wkup_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster0>, <&mbox_r5_0>;
+ memory-region = <&wkup_r5fss0_core0_dma_memory_region>,
+ <&wkup_r5fss0_core0_memory_region>;
+};
+
+&mcu_r5fss0 {
+ status = "okay";
+};
+
+&mcu_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster2>, <&mbox_mcu_r5_0>;
+ memory-region = <&mcu_r5fss0_core0_dma_memory_region>,
+ <&mcu_r5fss0_core0_memory_region>;
+};
+
+&c7x_0 {
+ mboxes = <&mailbox0_cluster1>, <&mbox_c7x_0>;
+ memory-region = <&c7x_0_dma_memory_region>,
+ <&c7x_0_memory_region>;
+ status = "okay";
+};
+
+/* main_rti4 is used by C7x DSP */
+&main_rti4 {
+ status = "reserved";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a7.dtsi b/arch/arm64/boot/dts/ti/k3-am62a7.dtsi
index 6c99221beb6b..b6e5eee99370 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a7.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a7.dtsi
@@ -50,6 +50,7 @@
next-level-cache = <&L2_0>;
operating-points-v2 = <&a53_opp_table>;
clocks = <&k3_clks 135 0>;
+ #cooling-cells = <2>;
};
cpu1: cpu@1 {
@@ -66,6 +67,7 @@
next-level-cache = <&L2_0>;
operating-points-v2 = <&a53_opp_table>;
clocks = <&k3_clks 136 0>;
+ #cooling-cells = <2>;
};
cpu2: cpu@2 {
@@ -82,6 +84,7 @@
next-level-cache = <&L2_0>;
operating-points-v2 = <&a53_opp_table>;
clocks = <&k3_clks 137 0>;
+ #cooling-cells = <2>;
};
cpu3: cpu@3 {
@@ -98,6 +101,7 @@
next-level-cache = <&L2_0>;
operating-points-v2 = <&a53_opp_table>;
clocks = <&k3_clks 138 0>;
+ #cooling-cells = <2>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
index 6e3beb5c2e01..fa55c43ca28d 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
@@ -227,9 +227,18 @@
reg = <0x00 0x40900000 0x00 0x1200>;
#address-cells = <2>;
#size-cells = <2>;
+ ranges = <0x00 0x40900000 0x00 0x40900000 0x00 0x30000>;
+
dmas = <&main_pktdma 0xf501 0>, <&main_pktdma 0x7506 0>,
<&main_pktdma 0x7507 0>;
dma-names = "tx", "rx1", "rx2";
+
+ rng: rng@40910000 {
+ compatible = "inside-secure,safexcel-eip76";
+ reg = <0x00 0x40910000 0x0 0x7d>;
+ interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>;
+ status = "reserved";
+ };
};
secure_proxy_sa3: mailbox@43600000 {
@@ -564,8 +573,6 @@
power-domains = <&k3_pds 57 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 57 1>, <&k3_clks 57 2>;
clock-names = "clk_ahb", "clk_xin";
- assigned-clocks = <&k3_clks 57 2>;
- assigned-clock-parents = <&k3_clks 57 4>;
bus-width = <8>;
mmc-ddr-1_8v;
mmc-hs200-1_8v;
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-verdin-dahlia.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-verdin-dahlia.dtsi
new file mode 100644
index 000000000000..ee3feac6ea5d
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p-verdin-dahlia.dtsi
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * Common dtsi for Verdin AM62P SoM on Dahlia carrier board
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/dahlia-carrier-board-kit
+ */
+
+/ {
+ aliases {
+ eeprom1 = &carrier_eeprom;
+ };
+
+ reg_1v8_sw: regulator-1v8-sw {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "On-carrier +V1.8_SW";
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,bitclock-master = <&codec_dai>;
+ simple-audio-card,format = "i2s";
+ simple-audio-card,frame-master = <&codec_dai>;
+ simple-audio-card,mclk-fs = <256>;
+ simple-audio-card,name = "verdin-wm8904";
+ simple-audio-card,routing =
+ "Headphone Jack", "HPOUTL",
+ "Headphone Jack", "HPOUTR",
+ "IN2L", "Line In Jack",
+ "IN2R", "Line In Jack",
+ "Microphone Jack", "MICBIAS",
+ "IN1L", "Microphone Jack";
+ simple-audio-card,widgets =
+ "Microphone", "Microphone Jack",
+ "Headphone", "Headphone Jack",
+ "Line", "Line In Jack";
+
+ codec_dai: simple-audio-card,codec {
+ sound-dai = <&wm8904_1a>;
+ };
+
+ simple-audio-card,cpu {
+ sound-dai = <&mcasp0>;
+ };
+ };
+};
+
+/* Verdin ETHs */
+&cpsw3g {
+ status = "okay";
+};
+
+/* MDIO, shared by Verdin ETH_1 (On-module PHY) and Verdin ETH_2_RGMII */
+&cpsw3g_mdio {
+ status = "okay";
+};
+
+/* Verdin ETH_1 (On-module PHY) */
+&cpsw_port1 {
+ status = "okay";
+};
+
+/* Verdin PWM_3_DSI */
+&epwm0 {
+ status = "okay";
+};
+
+/* Verdin PWM_1, PWM_2 */
+&epwm2 {
+ status = "okay";
+};
+
+&main_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie_1_reset>,
+ <&pinctrl_gpio_5>,
+ <&pinctrl_gpio_6>,
+ <&pinctrl_gpio_7>,
+ <&pinctrl_gpio_8>;
+};
+
+/* Verdin I2C_1 */
+&main_i2c0 {
+ status = "okay";
+
+ wm8904_1a: audio-codec@1a {
+ compatible = "wlf,wm8904";
+ reg = <0x1a>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2s1_mclk>;
+ clocks = <&audio_refclk0>;
+ clock-names = "mclk";
+ #sound-dai-cells = <0>;
+ AVDD-supply = <&reg_1v8_sw>;
+ CPVDD-supply = <&reg_1v8_sw>;
+ DBVDD-supply = <&reg_1v8_sw>;
+ DCVDD-supply = <&reg_1v8_sw>;
+ MICVDD-supply = <&reg_1v8_sw>;
+ };
+
+ /* Current measurement into module VCC */
+ hwmon@40 {
+ compatible = "ti,ina219";
+ reg = <0x40>;
+ shunt-resistor = <10000>;
+ };
+
+ temperature-sensor@4f {
+ compatible = "ti,tmp75c";
+ reg = <0x4f>;
+ };
+
+ carrier_eeprom: eeprom@57 {
+ compatible = "st,24c02", "atmel,24c02";
+ reg = <0x57>;
+ pagesize = <16>;
+ };
+};
+
+/* Verdin I2C_2_DSI */
+&main_i2c1 {
+ status = "okay";
+};
+
+/* Verdin I2C_4_CSI */
+&main_i2c3 {
+ status = "okay";
+};
+
+/* Verdin CAN_1 */
+&main_mcan0 {
+ status = "okay";
+};
+
+/* Verdin SPI_1 */
+&main_spi1 {
+ status = "okay";
+};
+
+/* Verdin UART_3, used as the Linux console */
+&main_uart0 {
+ status = "okay";
+};
+
+/* Verdin UART_1 */
+&main_uart1 {
+ status = "okay";
+};
+
+/* Verdin I2S_1 */
+&mcasp0 {
+ status = "okay";
+};
+
+&mcu_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_1>,
+ <&pinctrl_gpio_2>,
+ <&pinctrl_gpio_3>,
+ <&pinctrl_gpio_4>;
+};
+
+/* Verdin I2C_3_HDMI */
+&mcu_i2c0 {
+ status = "okay";
+};
+
+/* Verdin CAN_2 */
+&mcu_mcan0 {
+ status = "okay";
+};
+
+/* Verdin UART_4 */
+&mcu_uart0 {
+ status = "okay";
+};
+
+/* Verdin QSPI_1 */
+&ospi0 {
+ status = "okay";
+};
+
+/* We support turning off sleep moci on Dahlia */
+&reg_force_sleep_moci {
+ status = "disabled";
+};
+
+/* Verdin SD_1 */
+&sdhci1 {
+ status = "okay";
+};
+
+/* Verdin USB_1 */
+&usbss0 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
+
+/* Verdin USB_2 */
+&usbss1 {
+ status = "okay";
+};
+
+&usb1 {
+ status = "okay";
+};
+
+/* Verdin CTRL_WAKE1_MICO# */
+&verdin_gpio_keys {
+ status = "okay";
+};
+
+/* Verdin PCIE_1_RESET# */
+&verdin_pcie_1_reset_hog {
+ status = "okay";
+};
+
+/* Verdin UART_2 */
+&wkup_uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-verdin-dev.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-verdin-dev.dtsi
new file mode 100644
index 000000000000..0679d76f31bd
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p-verdin-dev.dtsi
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * Common dtsi for Verdin AM62P SoM on Development carrier board
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/verdin-development-board-kit
+ */
+
+/ {
+ aliases {
+ eeprom1 = &carrier_eeprom;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,bitclock-master = <&codec_dai>;
+ simple-audio-card,format = "i2s";
+ simple-audio-card,frame-master = <&codec_dai>;
+ simple-audio-card,mclk-fs = <256>;
+ simple-audio-card,name = "verdin-nau8822";
+ simple-audio-card,routing =
+ "Headphones", "LHP",
+ "Headphones", "RHP",
+ "Speaker", "LSPK",
+ "Speaker", "RSPK",
+ "Line Out", "AUXOUT1",
+ "Line Out", "AUXOUT2",
+ "LAUX", "Line In",
+ "RAUX", "Line In",
+ "LMICP", "Mic In",
+ "RMICP", "Mic In";
+ simple-audio-card,widgets =
+ "Headphones", "Headphones",
+ "Line Out", "Line Out",
+ "Speaker", "Speaker",
+ "Microphone", "Mic In",
+ "Line", "Line In";
+
+ codec_dai: simple-audio-card,codec {
+ sound-dai = <&nau8822_1a>;
+ };
+
+ simple-audio-card,cpu {
+ sound-dai = <&mcasp0>;
+ };
+ };
+};
+
+/* Verdin ETHs */
+&cpsw3g {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii1>, <&pinctrl_rgmii2>;
+ status = "okay";
+};
+
+/* MDIO, shared by Verdin ETH_1 (On-module PHY) and Verdin ETH_2_RGMII */
+&cpsw3g_mdio {
+ status = "okay";
+
+ carrier_eth_phy: ethernet-phy@7 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <7>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eth2_rgmii_int>;
+ interrupt-parent = <&main_gpio0>;
+ interrupts = <42 IRQ_TYPE_EDGE_FALLING>;
+ micrel,led-mode = <0>;
+ };
+};
+
+/* Verdin ETH_1 (On-module PHY) */
+&cpsw_port1 {
+ status = "okay";
+};
+
+/* Verdin ETH_2_RGMII */
+&cpsw_port2 {
+ phy-handle = <&carrier_eth_phy>;
+ phy-mode = "rgmii-rxid";
+ status = "okay";
+};
+
+/* Verdin PWM_3_DSI */
+&epwm0 {
+ status = "okay";
+};
+
+/* Verdin PWM_1, PWM_2 */
+&epwm2 {
+ status = "okay";
+};
+
+&main_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie_1_reset>,
+ <&pinctrl_gpio_5>,
+ <&pinctrl_gpio_6>,
+ <&pinctrl_gpio_7>,
+ <&pinctrl_gpio_8>;
+};
+
+/* Verdin I2C_1 */
+&main_i2c0 {
+ status = "okay";
+
+ nau8822_1a: audio-codec@1a {
+ compatible = "nuvoton,nau8822";
+ reg = <0x1a>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2s1_mclk>;
+ clock-names = "mclk";
+ clocks = <&audio_refclk0>;
+ #sound-dai-cells = <0>;
+ };
+
+ carrier_gpio_expander: gpio@21 {
+ compatible = "nxp,pcal6416";
+ reg = <0x21>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+
+ /* Current measurement into module VCC */
+ hwmon@40 {
+ compatible = "ti,ina219";
+ reg = <0x40>;
+ shunt-resistor = <10000>;
+ };
+
+ temperature-sensor@4f {
+ compatible = "ti,tmp75c";
+ reg = <0x4f>;
+ };
+
+ carrier_eeprom: eeprom@57 {
+ compatible = "st,24c02", "atmel,24c02";
+ reg = <0x57>;
+ pagesize = <16>;
+ };
+};
+
+/* Verdin I2C_2_DSI */
+&main_i2c1 {
+ status = "okay";
+};
+
+/* Verdin I2C_4_CSI */
+&main_i2c3 {
+ status = "okay";
+};
+
+/* Verdin CAN_1 */
+&main_mcan0 {
+ status = "okay";
+};
+
+/* Verdin SPI_1 */
+&main_spi1 {
+ status = "okay";
+};
+
+/* Verdin UART_3, used as the Linux console */
+&main_uart0 {
+ status = "okay";
+};
+
+/* Verdin UART_1, connector X50 through RS485 transceiver */
+&main_uart1 {
+ rs485-rx-during-tx;
+ linux,rs485-enabled-at-boot-time;
+ status = "okay";
+};
+
+/* Verdin I2S_1 */
+&mcasp0 {
+ status = "okay";
+};
+
+&mcu_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_1>,
+ <&pinctrl_gpio_2>,
+ <&pinctrl_gpio_3>,
+ <&pinctrl_gpio_4>;
+};
+
+/* Verdin I2C_3_HDMI */
+&mcu_i2c0 {
+ status = "okay";
+};
+
+/* Verdin CAN_2 */
+&mcu_mcan0 {
+ status = "okay";
+};
+
+/* Verdin UART_4 */
+&mcu_uart0 {
+ status = "okay";
+};
+
+/* Verdin QSPI_1 */
+&ospi0 {
+ status = "okay";
+};
+
+/* Verdin SD_1 */
+&sdhci1 {
+ status = "okay";
+};
+
+/* Verdin USB_1 */
+&usbss0 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
+
+/* Verdin USB_2 */
+&usbss1 {
+ status = "okay";
+};
+
+&usb1 {
+ status = "okay";
+};
+
+/* Verdin CTRL_WAKE1_MICO# */
+&verdin_gpio_keys {
+ status = "okay";
+};
+
+/* Verdin PCIE_1_RESET# */
+&verdin_pcie_1_reset_hog {
+ status = "okay";
+};
+
+/* Verdin UART_2 */
+&wkup_uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-verdin-ivy.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-verdin-ivy.dtsi
new file mode 100644
index 000000000000..317c8818f9ee
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p-verdin-ivy.dtsi
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * Common dtsi for Verdin AM62P SoM on Ivy carrier board
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/ivy-carrier-board
+ */
+
+#include <dt-bindings/mux/mux.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/net/ti-dp83867.h>
+
+/ {
+ aliases {
+ eeprom1 = &carrier_eeprom;
+ };
+
+ /* AIN1 Voltage w/o AIN1_MODE gpio control */
+ ain1_voltage_unmanaged: voltage-divider-ain1 {
+ compatible = "voltage-divider";
+ #io-channel-cells = <1>;
+ io-channels = <&ivy_adc1 0>;
+ full-ohms = <19>;
+ output-ohms = <1>;
+ };
+
+ /* AIN1 Current w/o AIN1_MODE gpio control */
+ ain1_current_unmanaged: current-sense-shunt-ain1 {
+ compatible = "current-sense-shunt";
+ #io-channel-cells = <0>;
+ io-channels = <&ivy_adc1 1>;
+ shunt-resistor-micro-ohms = <100000000>;
+ };
+
+ /* AIN1_MODE - SODIMM 216 */
+ ain1_mode_mux_ctrl: mux-controller-0 {
+ compatible = "gpio-mux";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_5>;
+ #mux-control-cells = <0>;
+ mux-gpios = <&main_gpio0 49 GPIO_ACTIVE_HIGH>;
+ };
+
+ ain1-voltage {
+ compatible = "io-channel-mux";
+ channels = "ain1_voltage", "";
+ io-channels = <&ain1_voltage_unmanaged 0>;
+ io-channel-names = "parent";
+ mux-controls = <&ain1_mode_mux_ctrl>;
+ settle-time-us = <1000>;
+ };
+
+ ain1-current {
+ compatible = "io-channel-mux";
+ channels = "", "ain1_current";
+ io-channels = <&ain1_current_unmanaged>;
+ io-channel-names = "parent";
+ mux-controls = <&ain1_mode_mux_ctrl>;
+ settle-time-us = <1000>;
+ };
+
+ /* AIN2 Voltage w/o AIN2_MODE gpio control */
+ ain2_voltage_unmanaged: voltage-divider-ain2 {
+ compatible = "voltage-divider";
+ #io-channel-cells = <1>;
+ io-channels = <&ivy_adc2 0>;
+ full-ohms = <19>;
+ output-ohms = <1>;
+ };
+
+ /* AIN2 Current w/o AIN2_MODE gpio control */
+ ain2_current_unmanaged: current-sense-shunt-ain2 {
+ compatible = "current-sense-shunt";
+ #io-channel-cells = <0>;
+ io-channels = <&ivy_adc2 1>;
+ shunt-resistor-micro-ohms = <100000000>;
+ };
+
+ /* AIN2_MODE - SODIMM 218 */
+ ain2_mode_mux_ctrl: mux-controller-1 {
+ compatible = "gpio-mux";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_6>;
+ #mux-control-cells = <0>;
+ mux-gpios = <&main_gpio0 50 GPIO_ACTIVE_HIGH>;
+ };
+
+ ain2-voltage {
+ compatible = "io-channel-mux";
+ channels = "ain2_voltage", "";
+ io-channels = <&ain2_voltage_unmanaged 0>;
+ io-channel-names = "parent";
+ mux-controls = <&ain2_mode_mux_ctrl>;
+ settle-time-us = <1000>;
+ };
+
+ ain2-current {
+ compatible = "io-channel-mux";
+ channels = "", "ain2_current";
+ io-channels = <&ain2_current_unmanaged>;
+ io-channel-names = "parent";
+ mux-controls = <&ain2_mode_mux_ctrl>;
+ settle-time-us = <1000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ivy_leds>;
+
+ /* D7 Blue - SODIMM 30 - LEDs.GPIO1 */
+ led-0 {
+ color = <LED_COLOR_ID_BLUE>;
+ default-state = "off";
+ function = LED_FUNCTION_STATUS;
+ function-enumerator = <1>;
+ gpios = <&main_gpio1 11 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* D7 Green - SODIMM 32 - LEDs.GPIO2 */
+ led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
+ function = LED_FUNCTION_STATUS;
+ function-enumerator = <1>;
+ gpios = <&main_gpio1 12 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* D7 Red - SODIMM 34 - LEDs.GPIO3 */
+ led-2 {
+ color = <LED_COLOR_ID_RED>;
+ default-state = "off";
+ function = LED_FUNCTION_STATUS;
+ function-enumerator = <1>;
+ gpios = <&main_gpio1 10 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* D8 Blue - SODIMM 36 - LEDs.GPIO4 */
+ led-3 {
+ color = <LED_COLOR_ID_BLUE>;
+ default-state = "off";
+ function = LED_FUNCTION_STATUS;
+ function-enumerator = <2>;
+ gpios = <&main_gpio1 9 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* D8 Green - SODIMM 54 - LEDs.GPIO5 */
+ led-4 {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
+ function = LED_FUNCTION_STATUS;
+ function-enumerator = <2>;
+ gpios = <&main_gpio0 11 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* D8 Red - SODIMM 44 - LEDs.GPIO6 */
+ led-5 {
+ color = <LED_COLOR_ID_RED>;
+ default-state = "off";
+ function = LED_FUNCTION_STATUS;
+ function-enumerator = <2>;
+ gpios = <&main_gpio0 37 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* D9 Blue - SODIMM 46 - LEDs.GPIO7 */
+ led-6 {
+ color = <LED_COLOR_ID_BLUE>;
+ default-state = "off";
+ function = LED_FUNCTION_STATUS;
+ function-enumerator = <3>;
+ gpios = <&main_gpio0 34 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* D9 Red - SODIMM 48 - LEDs.GPIO8 */
+ led-7 {
+ color = <LED_COLOR_ID_RED>;
+ default-state = "off";
+ function = LED_FUNCTION_STATUS;
+ function-enumerator = <3>;
+ gpios = <&main_gpio0 33 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ reg_3v2_ain1: regulator-3v2-ain1 {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <3200000>;
+ regulator-min-microvolt = <3200000>;
+ regulator-name = "+3V2_AIN1";
+ };
+
+ reg_3v2_ain2: regulator-3v2-ain2 {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <3200000>;
+ regulator-min-microvolt = <3200000>;
+ regulator-name = "+3V2_AIN2";
+ };
+
+ /* Ivy Power Supply Input Voltage */
+ ivy-input-voltage {
+ compatible = "voltage-divider";
+ /* Verdin ADC_1 */
+ io-channels = <&som_adc 7>;
+ full-ohms = <204700>; /* 200K + 4.7K */
+ output-ohms = <4700>;
+ };
+
+ ivy-5v-voltage {
+ compatible = "voltage-divider";
+ /* Verdin ADC_2 */
+ io-channels = <&som_adc 6>;
+ full-ohms = <39000>; /* 27K + 12K */
+ output-ohms = <12000>;
+ };
+
+ ivy-3v3-voltage {
+ compatible = "voltage-divider";
+ /* Verdin ADC_3 */
+ io-channels = <&som_adc 5>;
+ full-ohms = <54000>; /* 27K + 27K */
+ output-ohms = <27000>;
+ };
+
+ ivy-1v8-voltage {
+ compatible = "voltage-divider";
+ /* Verdin ADC_4 */
+ io-channels = <&som_adc 4>;
+ full-ohms = <39000>; /* 12K + 27K */
+ output-ohms = <27000>;
+ };
+};
+
+&main_pmx0 {
+ pinctrl_ivy_leds: ivy-leds-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x019c, PIN_INPUT, 7) /* (E24) MCASP0_AXR1.GPIO1_9 */ /* SODIMM 36 */
+ AM62PX_IOPAD(0x01a0, PIN_INPUT, 7) /* (F23) MCASP0_AXR0.GPIO1_10 */ /* SODIMM 34 */
+ AM62PX_IOPAD(0x01a4, PIN_INPUT, 7) /* (F24) MCASP0_ACLKX.GPIO1_11 */ /* SODIMM 30 */
+ AM62PX_IOPAD(0x01a8, PIN_INPUT, 7) /* (F25) MCASP0_AFSX.GPIO1_12 */ /* SODIMM 32 */
+ AM62PX_IOPAD(0x0088, PIN_INPUT, 7) /* (R24) GPMC0_OEn_REn.GPIO0_33 */ /* SODIMM 48 */
+ AM62PX_IOPAD(0x0098, PIN_INPUT, 7) /* (AA24) GPMC0_WAIT0.GPIO0_37 */ /* SODIMM 44 */
+ AM62PX_IOPAD(0x008c, PIN_INPUT, 7) /* (T25) GPMC0_WEn.GPIO0_34 */ /* SODIMM 46 */
+ AM62PX_IOPAD(0x002c, PIN_INPUT, 7) /* (M25) OSPI0_CSn0.GPIO0_11 */ /* SODIMM 54 */
+ >;
+ };
+};
+
+/* Verdin ETHs */
+&cpsw3g {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii1>, <&pinctrl_rgmii2>;
+ status = "okay";
+};
+
+/* MDIO, shared by Verdin ETH_1 (On-module PHY) and Verdin ETH_2_RGMII */
+&cpsw3g_mdio {
+ status = "okay";
+
+ carrier_eth_phy: ethernet-phy@2 {
+ reg = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eth2_rgmii_int>;
+ interrupt-parent = <&main_gpio0>;
+ interrupts = <42 IRQ_TYPE_EDGE_FALLING>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ };
+};
+
+/* Verdin ETH_1 (On-module PHY) */
+&cpsw_port1 {
+ status = "okay";
+};
+
+/* Verdin ETH_2_RGMII */
+&cpsw_port2 {
+ phy-handle = <&carrier_eth_phy>;
+ phy-mode = "rgmii-rxid";
+ status = "okay";
+};
+
+&main_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie_1_reset>,
+ <&pinctrl_qspi1_cs2_gpio>,
+ <&pinctrl_qspi1_dqs_gpio>,
+ <&pinctrl_qspi1_io0_gpio>,
+ <&pinctrl_qspi1_io1_gpio>,
+ <&pinctrl_qspi1_io2_gpio>,
+ <&pinctrl_qspi1_io3_gpio>;
+ gpio-line-names =
+ "", /* 0 */
+ "",
+ "REL4", /* SODIMM 66 */
+ "DIGI_1", /* SODIMM 56 */
+ "DIGI_2", /* SODIMM 58 */
+ "REL1", /* SODIMM 60 */
+ "REL2", /* SODIMM 62 */
+ "",
+ "",
+ "",
+ "", /* 10 */
+ "",
+ "REL3", /* SODIMM 64 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 20 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 30 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 40 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 50 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 60 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 70 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 80 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 90 */
+ "";
+};
+
+&main_gpio1 {
+ gpio-line-names =
+ "", /* 0 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 10 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 20 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 30 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 40 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 50 */
+ "";
+};
+
+/* Verdin I2C_1 */
+&main_i2c0 {
+ status = "okay";
+
+ temperature-sensor@4f {
+ compatible = "ti,tmp1075";
+ reg = <0x4f>;
+ };
+
+ carrier_eeprom: eeprom@57 {
+ compatible = "st,24c02", "atmel,24c02";
+ reg = <0x57>;
+ pagesize = <16>;
+ };
+};
+
+/* Verdin I2C_4_CSI */
+&main_i2c3 {
+ status = "okay";
+
+ ivy_adc1: adc@40 {
+ compatible = "ti,ads1119";
+ reg = <0x40>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_7>;
+ interrupt-parent = <&main_gpio0>;
+ interrupts = <51 IRQ_TYPE_EDGE_FALLING>;
+ avdd-supply = <&reg_3v2_ain1>;
+ dvdd-supply = <&reg_3v2_ain1>;
+ vref-supply = <&reg_3v2_ain1>;
+ #address-cells = <1>;
+ #io-channel-cells = <1>;
+ #size-cells = <0>;
+
+ /* AIN1 0-33V Voltage Input */
+ channel@0 {
+ reg = <0>;
+ diff-channels = <0 1>;
+ };
+
+ /* AIN1 0-20mA Current Input */
+ channel@1 {
+ reg = <1>;
+ diff-channels = <2 3>;
+ };
+ };
+
+ ivy_adc2: adc@41 {
+ compatible = "ti,ads1119";
+ reg = <0x41>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_8>;
+ interrupt-parent = <&main_gpio0>;
+ interrupts = <52 IRQ_TYPE_EDGE_FALLING>;
+ avdd-supply = <&reg_3v2_ain2>;
+ dvdd-supply = <&reg_3v2_ain2>;
+ vref-supply = <&reg_3v2_ain2>;
+ #address-cells = <1>;
+ #io-channel-cells = <1>;
+ #size-cells = <0>;
+
+ /* AIN2 0-33V Voltage Input */
+ channel@0 {
+ reg = <0>;
+ diff-channels = <0 1>;
+ };
+
+ /* AIN2 0-20mA Current Input */
+ channel@1 {
+ reg = <1>;
+ diff-channels = <2 3>;
+ };
+ };
+};
+
+/* Verdin CAN_1 */
+&main_mcan0 {
+ status = "okay";
+};
+
+/* Verdin SPI_1 */
+&main_spi1 {
+ pinctrl-0 = <&pinctrl_main_spi1>,
+ <&pinctrl_main_spi1_cs0>,
+ <&pinctrl_gpio_1>,
+ <&pinctrl_gpio_4>;
+ cs-gpios = <0>,
+ <&mcu_gpio0 1 GPIO_ACTIVE_LOW>,
+ <&mcu_gpio0 4 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ tpm@1 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ reg = <1>;
+ spi-max-frequency = <18500000>;
+ };
+
+ fram@2 {
+ compatible = "fujitsu,mb85rs256", "atmel,at25";
+ reg = <2>;
+ address-width = <16>;
+ size = <32768>;
+ spi-max-frequency = <33000000>;
+ pagesize = <1>;
+ };
+};
+
+/* Verdin UART_3, used as the Linux console */
+&main_uart0 {
+ status = "okay";
+};
+
+/* Verdin UART_1 */
+&main_uart1 {
+ status = "okay";
+};
+
+&mcu_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_2>,
+ <&pinctrl_gpio_3>;
+ gpio-line-names =
+ "",
+ "",
+ "GPIO2", /* Verdin GPIO_2 - SODIMM 208 */
+ "GPIO3", /* Verdin GPIO_3 - SODIMM 210 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 10 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 20 */
+ "",
+ "",
+ "";
+};
+
+/* Verdin CAN_2 */
+&mcu_mcan0 {
+ status = "okay";
+};
+
+/* Verdin SD_1 */
+&sdhci1 {
+ status = "okay";
+};
+
+/* Verdin USB_1 */
+&usbss0 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
+
+/* Verdin USB_2 */
+&usbss1 {
+ status = "okay";
+};
+
+&usb1 {
+ status = "okay";
+};
+
+/* Verdin PCIE_1_RESET# */
+&verdin_pcie_1_reset_hog {
+ status = "okay";
+};
+
+/* Verdin UART_2 */
+&wkup_uart0 {
+ rs485-rts-active-low;
+ rs485-rx-during-tx;
+ linux,rs485-enabled-at-boot-time;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-verdin-mallow.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-verdin-mallow.dtsi
new file mode 100644
index 000000000000..37c0b9da82da
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p-verdin-mallow.dtsi
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * Common dtsi for Verdin AM62P SoM on Mallow carrier board
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/mallow-carrier-board
+ */
+
+#include <dt-bindings/leds/common.h>
+
+/ {
+ aliases {
+ eeprom1 = &carrier_eeprom;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_qspi1_clk_gpio>,
+ <&pinctrl_qspi1_cs_gpio>,
+ <&pinctrl_qspi1_io0_gpio>,
+ <&pinctrl_qspi1_io1_gpio>;
+
+ /* SODIMM 52 - USER_LED_1_RED */
+ led-0 {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_DEBUG;
+ function-enumerator = <1>;
+ gpios = <&main_gpio0 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* SODIMM 54 - USER_LED_1_GREEN */
+ led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DEBUG;
+ function-enumerator = <1>;
+ gpios = <&main_gpio0 11 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* SODIMM 56 - USER_LED_2_RED */
+ led-2 {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_DEBUG;
+ function-enumerator = <2>;
+ gpios = <&main_gpio0 3 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* SODIMM 58 - USER_LED_2_GREEN */
+ led-3 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DEBUG;
+ function-enumerator = <2>;
+ gpios = <&main_gpio0 4 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
+
+/* Verdin ETHs */
+&cpsw3g {
+ status = "okay";
+};
+
+/* MDIO, shared by Verdin ETH_1 (On-module PHY) and Verdin ETH_2_RGMII */
+&cpsw3g_mdio {
+ status = "okay";
+};
+
+/* Verdin ETH_1 (On-module PHY) */
+&cpsw_port1 {
+ status = "okay";
+};
+
+/* Verdin PWM_3_DSI */
+&epwm0 {
+ status = "okay";
+};
+
+/* Verdin PWM_1, PWM_2 */
+&epwm2 {
+ status = "okay";
+};
+
+&main_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie_1_reset>,
+ <&pinctrl_gpio_5>,
+ <&pinctrl_gpio_6>,
+ <&pinctrl_gpio_7>,
+ <&pinctrl_gpio_8>;
+};
+
+/* Verdin I2C_1 */
+&main_i2c0 {
+ status = "okay";
+
+ temperature-sensor@4f {
+ compatible = "ti,tmp1075";
+ reg = <0x4f>;
+ };
+
+ carrier_eeprom: eeprom@57 {
+ compatible = "st,24c02", "atmel,24c02";
+ reg = <0x57>;
+ pagesize = <16>;
+ };
+};
+
+/* Verdin I2C_2_DSI */
+&main_i2c1 {
+ status = "okay";
+};
+
+/* Verdin I2C_4_CSI */
+&main_i2c3 {
+ status = "okay";
+};
+
+/* Verdin CAN_1 */
+&main_mcan0 {
+ status = "okay";
+};
+
+/* Verdin SPI_1 */
+&main_spi1 {
+ pinctrl-0 = <&pinctrl_main_spi1>,
+ <&pinctrl_main_spi1_cs0>,
+ <&pinctrl_qspi1_cs2_gpio>;
+ cs-gpios = <0>, <&main_gpio0 12 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ tpm@1 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ reg = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_qspi1_dqs_gpio>;
+ interrupt-parent = <&main_gpio0>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+ spi-max-frequency = <18500000>;
+ };
+};
+
+/* Verdin UART_3, used as the Linux console */
+&main_uart0 {
+ status = "okay";
+};
+
+/* Verdin UART_1 */
+&main_uart1 {
+ status = "okay";
+};
+
+&mcu_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_1>,
+ <&pinctrl_gpio_2>,
+ <&pinctrl_gpio_3>,
+ <&pinctrl_gpio_4>;
+};
+
+/* Verdin I2C_3_HDMI */
+&mcu_i2c0 {
+ status = "okay";
+};
+
+/* Verdin CAN_2 */
+&mcu_mcan0 {
+ status = "okay";
+};
+
+/* Verdin UART_4 */
+&mcu_uart0 {
+ status = "okay";
+};
+
+/* Verdin SD_1 */
+&sdhci1 {
+ status = "okay";
+};
+
+/* Verdin USB_1 */
+&usbss0 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
+
+/* Verdin USB_2 */
+&usbss1 {
+ status = "okay";
+};
+
+&usb1 {
+ status = "okay";
+};
+
+/* Verdin CTRL_WAKE1_MICO# */
+&verdin_gpio_keys {
+ status = "okay";
+};
+
+/* Verdin PCIE_1_RESET# */
+&verdin_pcie_1_reset_hog {
+ status = "okay";
+};
+
+/* Verdin UART_2 */
+&wkup_uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-verdin-nonwifi.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-verdin-nonwifi.dtsi
new file mode 100644
index 000000000000..8e7019f00e65
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p-verdin-nonwifi.dtsi
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * Common dtsi for Verdin AM62P SoM non-WB variant
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ */
+
+/* SDIO on MSP 30, 31, 32, 33, 34, 35 */
+&sdhci2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci2>;
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-verdin-wifi.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-verdin-wifi.dtsi
new file mode 100644
index 000000000000..04d3124b5e0f
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p-verdin-wifi.dtsi
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * Common dtsi for Verdin AM62P SoM WB variant
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ */
+
+/* On-module Bluetooth */
+&main_uart6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart6>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "nxp,88w8987-bt";
+ fw-init-baudrate = <3000000>;
+ };
+};
+
+/* On-module Wi-Fi */
+&sdhci2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci2>;
+ keep-power-in-suspend;
+ non-removable;
+ ti,fails-without-test-cd;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-verdin-yavia.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-verdin-yavia.dtsi
new file mode 100644
index 000000000000..b7423a774dc5
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p-verdin-yavia.dtsi
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * Common dtsi for Verdin AM62P SoM on Yavia carrier board
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/yavia
+ */
+
+#include <dt-bindings/leds/common.h>
+
+/ {
+ aliases {
+ eeprom1 = &carrier_eeprom;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_qspi1_clk_gpio>,
+ <&pinctrl_qspi1_cs_gpio>,
+ <&pinctrl_qspi1_io0_gpio>,
+ <&pinctrl_qspi1_io1_gpio>,
+ <&pinctrl_qspi1_io2_gpio>,
+ <&pinctrl_qspi1_io3_gpio>;
+
+ /* SODIMM 52 - LD1_RED */
+ led-0 {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_DEBUG;
+ function-enumerator = <1>;
+ gpios = <&main_gpio0 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* SODIMM 54 - LD1_GREEN */
+ led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DEBUG;
+ function-enumerator = <1>;
+ gpios = <&main_gpio0 11 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* SODIMM 56 - LD1_BLUE */
+ led-2 {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_DEBUG;
+ function-enumerator = <1>;
+ gpios = <&main_gpio0 3 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* SODIMM 58 - LD2_RED */
+ led-3 {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_DEBUG;
+ function-enumerator = <2>;
+ gpios = <&main_gpio0 4 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* SODIMM 60 - LD2_GREEN */
+ led-4 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DEBUG;
+ function-enumerator = <2>;
+ gpios = <&main_gpio0 5 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* SODIMM 62 - LD2_BLUE */
+ led-5 {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_DEBUG;
+ function-enumerator = <2>;
+ gpios = <&main_gpio0 6 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
+
+/* Verdin ETHs */
+&cpsw3g {
+ status = "okay";
+};
+
+/* MDIO, shared by Verdin ETH_1 (On-module PHY) and Verdin ETH_2_RGMII */
+&cpsw3g_mdio {
+ status = "okay";
+};
+
+/* Verdin ETH_1 (On-module PHY) */
+&cpsw_port1 {
+ status = "okay";
+};
+
+/* Verdin PWM_3_DSI */
+&epwm0 {
+ status = "okay";
+};
+
+/* Verdin PWM_1, PWM_2 */
+&epwm2 {
+ status = "okay";
+};
+
+&main_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie_1_reset>,
+ <&pinctrl_qspi1_cs2_gpio>,
+ <&pinctrl_qspi1_dqs_gpio>,
+ <&pinctrl_gpio_5>,
+ <&pinctrl_gpio_6>,
+ <&pinctrl_gpio_7>,
+ <&pinctrl_gpio_8>;
+};
+
+/* Verdin I2C_1 */
+&main_i2c0 {
+ status = "okay";
+
+ temperature-sensor@4f {
+ compatible = "ti,tmp75c";
+ reg = <0x4f>;
+ };
+
+ carrier_eeprom: eeprom@57 {
+ compatible = "st,24c02", "atmel,24c02";
+ reg = <0x57>;
+ pagesize = <16>;
+ };
+};
+
+/* Verdin I2C_2_DSI */
+&main_i2c1 {
+ status = "okay";
+};
+
+/* Verdin I2C_4_CSI */
+&main_i2c3 {
+ status = "okay";
+};
+
+/* Verdin CAN_1 */
+&main_mcan0 {
+ status = "okay";
+};
+
+/* Verdin SPI_1 */
+&main_spi1 {
+ status = "okay";
+};
+
+/* Verdin UART_3, used as the Linux console */
+&main_uart0 {
+ status = "okay";
+};
+
+/* Verdin UART_1 */
+&main_uart1 {
+ status = "okay";
+};
+
+&mcu_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_1>,
+ <&pinctrl_gpio_2>,
+ <&pinctrl_gpio_3>,
+ <&pinctrl_gpio_4>;
+};
+
+/* Verdin I2C_3_HDMI */
+&mcu_i2c0 {
+ status = "okay";
+};
+
+/* Verdin CAN_2 */
+&mcu_mcan0 {
+ status = "okay";
+};
+
+/* Verdin UART_4 */
+&mcu_uart0 {
+ status = "okay";
+};
+
+/* Verdin SD_1 */
+&sdhci1 {
+ status = "okay";
+};
+
+/* Verdin USB_1 */
+&usbss0 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
+
+/* Verdin USB_2 */
+&usbss1 {
+ status = "okay";
+};
+
+&usb1 {
+ status = "okay";
+};
+
+/* Verdin CTRL_WAKE1_MICO# */
+&verdin_gpio_keys {
+ status = "okay";
+};
+
+/* Verdin PCIE_1_RESET# */
+&verdin_pcie_1_reset_hog {
+ status = "okay";
+};
+
+/* Verdin UART_2 */
+&wkup_uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-verdin.dtsi
new file mode 100644
index 000000000000..226398c37fa9
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p-verdin.dtsi
@@ -0,0 +1,1404 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * Common dtsi for Verdin AM62P SoM
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/net/ti-dp83867.h>
+
+/ {
+ aliases {
+ can0 = &main_mcan0;
+ can1 = &mcu_mcan0;
+ eeprom0 = &som_eeprom;
+ ethernet0 = &cpsw_port1;
+ ethernet1 = &cpsw_port2;
+ i2c0 = &wkup_i2c0;
+ i2c1 = &main_i2c0;
+ i2c2 = &main_i2c1;
+ i2c3 = &mcu_i2c0;
+ i2c4 = &main_i2c3;
+ mmc0 = &sdhci0;
+ mmc1 = &sdhci1;
+ mmc2 = &sdhci2;
+ rtc0 = &som_rtc_i2c;
+ rtc1 = &wkup_rtc0;
+ serial0 = &main_uart1;
+ serial1 = &wkup_uart0;
+ serial2 = &main_uart0;
+ serial3 = &mcu_uart0;
+ serial4 = &main_uart6;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ };
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ connector {
+ compatible = "gpio-usb-b-connector", "usb-b-connector";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_id>;
+ id-gpios = <&main_gpio0 31 GPIO_ACTIVE_HIGH>;
+ label = "USB_1";
+ self-powered;
+ vbus-supply = <&reg_usb0_vbus>;
+
+ port {
+ usb_dr_connector: endpoint {
+ remote-endpoint = <&usb0_ep>;
+ };
+ };
+ };
+
+ verdin_gpio_keys: gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ctrl_wake1_mico>;
+ status = "disabled";
+
+ key-wakeup {
+ debounce-interval = <10>;
+ /* Verdin CTRL_WAKE1_MICO# (SODIMM 252) */
+ gpios = <&main_gpio0 1 GPIO_ACTIVE_LOW>;
+ label = "Wake-Up";
+ linux,code = <KEY_WAKEUP>;
+ wakeup-source;
+ };
+ };
+
+ memory@80000000 {
+ /* 2G RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>;
+ device_type = "memory";
+ };
+
+ opp-table {
+ /* Add 1.4GHz OPP. Requires VDD_CORE to be at 0.85V */
+ opp-1400000000 {
+ opp-hz = /bits/ 64 <1400000000>;
+ opp-supported-hw = <0x01 0x0004>;
+ clock-latency-ns = <6000000>;
+ };
+ };
+
+ reg_force_sleep_moci: regulator-force-sleep-moci {
+ compatible = "regulator-fixed";
+ /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */
+ gpios = <&som_gpio_expander 0 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "CTRL_SLEEP_MOCI#";
+ };
+
+ /* Verdin SD_1 Power Supply */
+ reg_sd1_vmmc: regulator-sdhci1-vmmc {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sd1_pwr_en>;
+ /* Verdin SD_1_PWR_EN (SODIMM 76) */
+ gpios = <&main_gpio0 47 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ off-on-delay-us = <100000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "+V3.3_SD";
+ startup-delay-us = <2000>;
+ };
+
+ reg_sd1_vqmmc: regulator-sdhci1-vqmmc {
+ compatible = "regulator-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sd_vsel>;
+ /* PMIC_VSEL_SD */
+ gpios = <&main_gpio0 21 GPIO_ACTIVE_HIGH>;
+ regulator-name = "LDO1-VSEL-SD (PMIC)";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ states = <1800000 0x0>,
+ <3300000 0x1>;
+ vin-supply = <&reg_sd_3v3_1v8>;
+ };
+
+ reg_usb0_vbus: regulator-usb0-vbus {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_en>;
+ /* Verdin USB_1_EN (SODIMM 155) */
+ gpios = <&main_gpio1 50 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <5000000>;
+ regulator-name = "USB_1_EN";
+ };
+
+ /* Module Power Supply */
+ reg_vsodimm: regulator-vsodimm {
+ compatible = "regulator-fixed";
+ regulator-name = "+V_SODIMM";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ secure_tfa_ddr: tfa@9e780000 {
+ reg = <0x00 0x9e780000 0x00 0x80000>;
+ no-map;
+ };
+
+ secure_ddr: optee@9e800000 {
+ reg = <0x00 0x9e800000 0x00 0x01800000>; /* for OP-TEE */
+ no-map;
+ };
+
+ wkup_r5fss0_core0_memory_region: r5f-dma-memory@9c900000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9c900000 0x00 0x01e00000>;
+ no-map;
+ };
+ };
+};
+
+&main_pmx0 {
+ /* Verdin PWM_3_DSI */
+ pinctrl_epwm0_b: main-epwm0b-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01b8, PIN_OUTPUT, 2) /* (E20) SPI0_CS1.EHRPWM0_B */ /* SODIMM 19 */
+ >;
+ };
+
+ /* Verdin PWM_2 */
+ pinctrl_epwm2_a: main-epwm2a-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0124, PIN_OUTPUT, 4) /* (J25) MMC2_SDCD.EHRPWM2_A */ /* SODIMM 16 */
+ >;
+ };
+
+ /* Verdin PWM_1 */
+ pinctrl_epwm2_b: main-epwm2b-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0128, PIN_OUTPUT, 4) /* (K25) MMC2_SDWP.EHRPWM2_B */ /* SODIMM 15 */
+ >;
+ };
+
+ /* Verdin QSPI_1_CLK as GPIO (conflict with Verdin QSPI_1 interface) */
+ pinctrl_qspi1_clk_gpio: main-gpio0-0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0000, PIN_INPUT, 7) /* (P23) OSPI0_CLK.GPIO0_0 */ /* SODIMM 52 */
+ >;
+ };
+
+ /* Verdin CTRL_WAKE1_MICO# */
+ pinctrl_ctrl_wake1_mico: main-gpio0-1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0004, PIN_INPUT, 7) /* (N23) OSPI0_LBCLKO.GPIO0_1 */ /* SODIMM 252 */
+ >;
+ };
+
+ /* Verdin QSPI_1_DQS as GPIO (conflict with Verdin QSPI_1 interface) */
+ pinctrl_qspi1_dqs_gpio: main-gpio0-2-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0008, PIN_INPUT, 7) /* (P22) OSPI0_DQS.GPIO0_2 */ /* SODIMM 66 */
+ >;
+ };
+
+ /* Verdin QSPI_1_IO0 as GPIO (conflict with Verdin QSPI_1 interface) */
+ pinctrl_qspi1_io0_gpio: main-gpio0-3-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x000c, PIN_INPUT, 7) /* (L25) OSPI0_D0.GPIO0_3 */ /* SODIMM 56 */
+ >;
+ };
+
+ /* Verdin QSPI_1_IO1 as GPIO (conflict with Verdin QSPI_1 interface) */
+ pinctrl_qspi1_io1_gpio: main-gpio0-4-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0010, PIN_INPUT, 7) /* (N24) OSPI0_D1.GPIO0_4 */ /* SODIMM 58 */
+ >;
+ };
+
+ /* Verdin QSPI_1_IO2 as GPIO (conflict with Verdin QSPI_1 interface) */
+ pinctrl_qspi1_io2_gpio: main-gpio0-5-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0014, PIN_INPUT, 7) /* (N25) OSPI0_D2.GPIO0_5 */ /* SODIMM 60 */
+ >;
+ };
+
+ /* Verdin QSPI_1_IO3 as GPIO (conflict with Verdin QSPI_1 interface) */
+ pinctrl_qspi1_io3_gpio: main-gpio0-6-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0018, PIN_INPUT, 7) /* (M24) OSPI0_D3.GPIO0_6 */ /* SODIMM 62 */
+ >;
+ };
+
+ /* Verdin QSPI_1_CS# as GPIO (conflict with Verdin QSPI_1 interface) */
+ pinctrl_qspi1_cs_gpio: main-gpio0-11-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x002c, PIN_INPUT, 7) /* (M25) OSPI0_CSn0.GPIO0_11 */ /* SODIMM 54 */
+ >;
+ };
+
+ /* Verdin QSPI_1_CS2# as GPIO (conflict with Verdin QSPI_1 interface) */
+ pinctrl_qspi1_cs2_gpio: main-gpio0-12-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0030, PIN_INPUT, 7) /* (L24) OSPI0_CSn1.GPIO0_12 */ /* SODIMM 64 */
+ >;
+ };
+
+ /* Verdin MSP_37 as GPIO */
+ pinctrl_msp37_gpio: main-gpio0-13-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0034, PIN_INPUT, 7) /* (L22) OSPI0_CSn2.GPIO0_13 */ /* SODIMM 174 - WiFi_W_WKUP_HOST# */
+ >;
+ };
+
+ /* Verdin PCIE_1_RESET# */
+ pinctrl_pcie_1_reset: main-gpio0-14-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0038, PIN_INPUT, 7) /* (L23) OSPI0_CSn3.GPIO0_14 */ /* SODIMM 244 */
+ >;
+ };
+
+ pinctrl_sd_vsel: main-gpio0-21-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0054, PIN_INPUT, 7) /* (V24) GPMC0_AD6.GPIO0_21 */ /* PMIC_SD_VSEL */
+ >;
+ };
+
+ pinctrl_tpm_extint: main-gpio0-25-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0064, PIN_INPUT, 7) /* (AA25) GPMC0_AD10.GPIO0_25 */ /* TPM_EXTINT# */
+ >;
+ };
+
+ pinctrl_wifi_wkup_bt: main-gpio0-29-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0074, PIN_INPUT, 7) /* (AB24) GPMC0_AD14.GPIO0_29 */ /* WiFi_WKUP_BT# */
+ >;
+ };
+
+ pinctrl_wifi_wkup_wlan: main-gpio0-30-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0078, PIN_INPUT, 7) /* (AC24) GPMC0_AD15.GPIO0_30 */ /* WiFi_WKUP_WLAN# */
+ >;
+ };
+
+ /* Verdin USB_1_ID */
+ pinctrl_usb0_id: main-gpio0-31-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x007c, PIN_INPUT, 7) /* (Y25) GPMC0_CLK.GPIO0_31 */ /* SODIMM 161 */
+ >;
+ };
+
+ /* Verdin USB_1_OC# */
+ pinctrl_usb1_oc: main-gpio0-32-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0084, PIN_INPUT, 7) /* (R25) GPMC0_ADVn_ALE.GPIO0_32 */ /* SODIMM 157 */
+ >;
+ };
+
+ /* Verdin I2S_2_D_IN as GPIO (conflict with Verdin I2S_2 interface) */
+ pinctrl_i2s_2_d_in_gpio: main-gpio0-33-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0088, PIN_INPUT, 7) /* (R24) GPMC0_OEn_REn.GPIO0_33 */ /* SODIMM 48 */
+ >;
+ };
+
+ /* Verdin I2S_2_D_OUT as GPIO (conflict with Verdin I2S_2 interface) */
+ pinctrl_i2s_2_d_out_gpio: main-gpio0-34-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x008c, PIN_INPUT, 7) /* (T25) GPMC0_WEn.GPIO0_34 */ /* SODIMM 46 */
+ >;
+ };
+
+ /* Verdin I2S_2_BCLK as GPIO (conflict with Verdin I2S_2 interface) */
+ pinctrl_i2s_2_bclk_gpio: main-gpio0-35-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0090, PIN_INPUT, 7) /* (U24) GPMC0_BE0n_CLE.GPIO0_35 */ /* SODIMM 42 */
+ >;
+ };
+
+ pinctrl_eth_int: main-gpio0-36-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0094, PIN_INPUT, 7) /* (T24) GPMC0_BE1n.GPIO0_36 */ /* ETH_INT# */
+ >;
+ };
+
+ /* Verdin I2S_2_SYNC as GPIO (conflict with Verdin I2S_2 interface) */
+ pinctrl_i2s_2_sync_gpio: main-gpio0-37-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0098, PIN_INPUT, 7) /* (AA24) GPMC0_WAIT0.GPIO0_37 */ /* SODIMM 44 */
+ >;
+ };
+
+ /* Verdin DSI_1_INT# */
+ pinctrl_dsi1_int: main-gpio0-38-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x009c, PIN_INPUT, 7) /* (AD24) GPMC0_WAIT1.GPIO0_38 */ /* SODIMM 17 */
+ >;
+ };
+
+ /* Verdin DSI_1_BLK_EN# */
+ pinctrl_dsi1_bkl_en: main-gpio0-39-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00a0, PIN_INPUT, 7) /* (P24) GPMC0_WPn.GPIO0_39 */ /* SODIMM 21 */
+ >;
+ };
+
+ /* Verdin USB_2_OC# */
+ pinctrl_usb2_oc: main-gpio0-41-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00a8, PIN_INPUT, 7) /* (T23) GPMC0_CSn0.GPIO0_41 */ /* SODIMM 187 */
+ >;
+ };
+
+ /* Verdin ETH_2_RGMII_INT# */
+ pinctrl_eth2_rgmii_int: main-gpio0-42-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00ac, PIN_INPUT, 7) /* (U23) GPMC0_CSn1.GPIO0_42 */ /* SODIMM 189 */
+ >;
+ };
+
+ /* Verdin SD_1_PWR_EN */
+ pinctrl_sd1_pwr_en: main-gpio0-47-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00c0, PIN_INPUT, 7) /* (AA23) VOUT0_DATA2.GPIO0_47 */ /* SODIMM 76 */
+ >;
+ };
+
+ /* Verdin GPIO_5 */
+ pinctrl_gpio_5: main-gpio0-49-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00c8, PIN_INPUT, 7) /* (AB23) VOUT0_DATA4.GPIO0_49 */ /* SODIMM 216 */
+ >;
+ };
+
+ /* Verdin GPIO_6 */
+ pinctrl_gpio_6: main-gpio0-50-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00cc, PIN_INPUT, 7) /* (AD23) VOUT0_DATA5.GPIO0_50 */ /* SODIMM 218 */
+ >;
+ };
+
+ /* Verdin GPIO_7 */
+ pinctrl_gpio_7: main-gpio0-51-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00d0, PIN_INPUT, 7) /* (AC23) VOUT0_DATA6.GPIO0_51 */ /* SODIMM 220 */
+ >;
+ };
+
+ /* Verdin GPIO_8 */
+ pinctrl_gpio_8: main-gpio0-52-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00d4, PIN_INPUT, 7) /* (AE23) VOUT0_DATA7.GPIO0_52 */ /* SODIMM 222 */
+ >;
+ };
+
+ /* Verdin MSP_36 as GPIO */
+ pinctrl_msp36_gpio: main-gpio0-57-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00e8, PIN_INPUT, 7) /* (AD21) VOUT0_DATA12.GPIO0_57 */ /* SODIMM 172 - WiFi_BT_WKUP_HOST# */
+ >;
+ };
+
+ pinctrl_wifi_sd_int: main-gpio0-59-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00f0, PIN_INPUT, 7) /* (AA20) VOUT0_DATA14.GPIO0_59 */ /* WIFI_SD_INT */
+ >;
+ };
+
+ pinctrl_wifi_spi_cs: main-gpio0-60-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00f4, PIN_INPUT, 7) /* (Y20) VOUT0_DATA15.GPIO0_60 */ /* WIFI_SPI_CS# */
+ >;
+ };
+
+ /* Verdin PWM_3_DSI as GPIO */
+ pinctrl_pwm3_dsi_gpio: main-gpio1-16-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01b8, PIN_OUTPUT, 7) /* (E20) SPI0_CS1.GPIO1_16 */ /* SODIMM 19 */
+ >;
+ };
+
+ /* Verdin SD_1_CD# */
+ pinctrl_sd1_cd: main-gpio1-48-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0240, PIN_INPUT, 7) /* (D23) MMC1_SDCD.GPIO1_48 */ /* SODIMM 84 */
+ >;
+ };
+
+ /* Verdin MSP_29 as GPIO */
+ pinctl_msp29_gpio: main-gpio1-49-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0244, PIN_INPUT, 7) /* (D24) MMC1_SDWP.GPIO1_49 */ /* SODIMM 154 */
+ >;
+ };
+
+ /* Verdin USB_1_EN */
+ pinctrl_usb0_en: main-gpio1-50-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0254, PIN_INPUT, 7) /* (G22) USB0_DRVVBUS.GPIO1_50 */ /* SODIMM 155 */
+ >;
+ };
+
+ /* Verdin I2C_1 */
+ pinctrl_main_i2c0: main-i2c0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01e0, PIN_INPUT_PULLUP, 0) /* (B25) I2C0_SCL */ /* SODIMM 14 */
+ AM62PX_IOPAD(0x01e4, PIN_INPUT_PULLUP, 0) /* (A24) I2C0_SDA */ /* SODIMM 12 */
+ >;
+ };
+
+ /* Verdin I2C_2_DSI */
+ pinctrl_main_i2c1: main-i2c1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01e8, PIN_INPUT_PULLUP, 0) /* (C24) I2C1_SCL */ /* SODIMM 55 */
+ AM62PX_IOPAD(0x01ec, PIN_INPUT_PULLUP, 0) /* (B24) I2C1_SDA */ /* SODIMM 53 */
+ >;
+ };
+
+ /* Verdin I2C_4_CSI */
+ pinctrl_main_i2c3: main-i2c3-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01d0, PIN_INPUT_PULLUP, 2) /* (A23) UART0_CTSn.I2C3_SCL */ /* SODIMM 95 */
+ AM62PX_IOPAD(0x01d4, PIN_INPUT_PULLUP, 2) /* (C22) UART0_RTSn.I2C3_SDA */ /* SODIMM 93 */
+ >;
+ };
+
+ /* Verdin CAN_1 */
+ pinctrl_main_mcan0: main-mcan0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01dc, PIN_INPUT, 0) /* (F20) MCAN0_RX */ /* SODIMM 22 */
+ AM62PX_IOPAD(0x01d8, PIN_OUTPUT, 0) /* (B23) MCAN0_TX */ /* SODIMM 20 */
+ >;
+ };
+
+ /* Verdin MSP_3/MSP_8 as CAN */
+ pinctrl_main_mcan1: main-mcan1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00b4, PIN_INPUT, 5) /* (U25) GPMC0_CSn3.MCAN1_RX */ /* SODIMM 92 */
+ AM62PX_IOPAD(0x00b0, PIN_OUTPUT, 5) /* (T22) GPMC0_CSn2.MCAN1_TX */ /* SODIMM 104 */
+ >;
+ };
+
+ /* Verdin SD_1 */
+ pinctrl_sdhci1: main-mmc1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x023c, PIN_INPUT, 0) /* (H20) MMC1_CMD */ /* SODIMM 74 */
+ AM62PX_IOPAD(0x0234, PIN_OUTPUT, 0) /* (J24) MMC1_CLK */ /* SODIMM 78 */
+ AM62PX_IOPAD(0x0230, PIN_INPUT, 0) /* (H21) MMC1_DAT0 */ /* SODIMM 80 */
+ AM62PX_IOPAD(0x022c, PIN_INPUT, 0) /* (H23) MMC1_DAT1 */ /* SODIMM 82 */
+ AM62PX_IOPAD(0x0228, PIN_INPUT, 0) /* (H22) MMC1_DAT2 */ /* SODIMM 70 */
+ AM62PX_IOPAD(0x0224, PIN_INPUT, 0) /* (H25) MMC1_DAT3 */ /* SODIMM 72 */
+ >;
+ };
+
+ /* On-module Wi-Fi on WB SKUs, module-specific SDIO otherwise */
+ pinctrl_sdhci2: main-mmc2-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0120, PIN_INPUT, 0) /* (K24) MMC2_CMD */ /* SODIMM 160, WiFi_SDIO_CMD */
+ AM62PX_IOPAD(0x0118, PIN_OUTPUT, 0) /* (K21) MMC2_CLK */ /* SODIMM 156, WiFi_SDIO_CLK */
+ AM62PX_IOPAD(0x011C, PIN_INPUT, 0) /* () MMC2_CLKLB */
+ AM62PX_IOPAD(0x0114, PIN_INPUT, 0) /* (K23) MMC2_DAT0 */ /* SODIMM 162, WiFi_SDIO_DATA0 */
+ AM62PX_IOPAD(0x0110, PIN_INPUT, 0) /* (K22) MMC2_DAT1 */ /* SODIMM 164, WiFi_SDIO_DATA1 */
+ AM62PX_IOPAD(0x010c, PIN_INPUT, 0) /* (L20) MMC2_DAT2 */ /* SODIMM 166, WiFi_SDIO_DATA2 */
+ AM62PX_IOPAD(0x0108, PIN_INPUT, 0) /* (L21) MMC2_DAT3 */ /* SODIMM 168, WiFi_SDIO_DATA3 */
+ >;
+ };
+
+ /* Verdin QSPI_1 */
+ pinctrl_ospi0: main-ospi0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0000, PIN_OUTPUT, 0) /* (P23) OSPI0_CLK */ /* SODIMM 52 */
+ AM62PX_IOPAD(0x002c, PIN_OUTPUT, 0) /* (M25) OSPI0_CSn0 */ /* SODIMM 54 */
+ AM62PX_IOPAD(0x0030, PIN_OUTPUT, 0) /* (L24) OSPI0_CSn1 */ /* SODIMM 64 */
+ AM62PX_IOPAD(0x000c, PIN_INPUT, 0) /* (L25) OSPI0_D0 */ /* SODIMM 56 */
+ AM62PX_IOPAD(0x0010, PIN_INPUT, 0) /* (N24) OSPI0_D1 */ /* SODIMM 58 */
+ AM62PX_IOPAD(0x0014, PIN_INPUT, 0) /* (N25) OSPI0_D2 */ /* SODIMM 60 */
+ AM62PX_IOPAD(0x0018, PIN_INPUT, 0) /* (M24) OSPI0_D3 */ /* SODIMM 62 */
+ AM62PX_IOPAD(0x0008, PIN_INPUT, 0) /* (P22) OSPI0_DQS */ /* SODIMM 66 */
+ >;
+ };
+
+ /* Verdin ETH_1 RGMII (On-module PHY) */
+ pinctrl_rgmii1: main-rgmii1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x014c, PIN_INPUT, 0) /* (B15) RGMII1_RD0 */ /* RGMII_RXD0 */
+ AM62PX_IOPAD(0x0150, PIN_INPUT, 0) /* (B16) RGMII1_RD1 */ /* RGMII_RXD1 */
+ AM62PX_IOPAD(0x0154, PIN_INPUT, 0) /* (A14) RGMII1_RD2 */ /* RGMII_RXD2 */
+ AM62PX_IOPAD(0x0158, PIN_INPUT, 0) /* (B14) RGMII1_RD3 */ /* RGMII_RXD3 */
+ AM62PX_IOPAD(0x0148, PIN_INPUT, 0) /* (A16) RGMII1_RXC */ /* RGMII_RXC */
+ AM62PX_IOPAD(0x0144, PIN_INPUT, 0) /* (A15) RGMII1_RX_CTL */ /* RGMII_RX_CTL */
+ AM62PX_IOPAD(0x0134, PIN_INPUT, 0) /* (A18) RGMII1_TD0 */ /* RGMII_TXD0 */
+ AM62PX_IOPAD(0x0138, PIN_INPUT, 0) /* (C17) RGMII1_TD1 */ /* RGMII_TXD1 */
+ AM62PX_IOPAD(0x013c, PIN_INPUT, 0) /* (A17) RGMII1_TD2 */ /* RGMII_TXD2 */
+ AM62PX_IOPAD(0x0140, PIN_INPUT, 0) /* (C16) RGMII1_TD3 */ /* RGMII_TXD3 */
+ AM62PX_IOPAD(0x0130, PIN_INPUT, 0) /* (B17) RGMII1_TXC */ /* RGMII_TXC */
+ AM62PX_IOPAD(0x012c, PIN_INPUT, 0) /* (B18) RGMII1_TX_CTL */ /* RGMII_TX_CTL */
+ >;
+ };
+
+ /* Verdin ETH_2 RGMII */
+ pinctrl_rgmii2: main-rgmii2-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0184, PIN_INPUT, 0) /* (E19) RGMII2_RD0 */ /* SODIMM 201 */
+ AM62PX_IOPAD(0x0188, PIN_INPUT, 0) /* (E16) RGMII2_RD1 */ /* SODIMM 203 */
+ AM62PX_IOPAD(0x018c, PIN_INPUT, 0) /* (E17) RGMII2_RD2 */ /* SODIMM 205 */
+ AM62PX_IOPAD(0x0190, PIN_INPUT, 0) /* (C19) RGMII2_RD3 */ /* SODIMM 207 */
+ AM62PX_IOPAD(0x0180, PIN_INPUT, 0) /* (D19) RGMII2_RXC */ /* SODIMM 197 */
+ AM62PX_IOPAD(0x017c, PIN_INPUT, 0) /* (F19) RGMII2_RX_CTL */ /* SODIMM 199 */
+ AM62PX_IOPAD(0x016c, PIN_INPUT, 0) /* (B19) RGMII2_TD0 */ /* SODIMM 221 */
+ AM62PX_IOPAD(0x0170, PIN_INPUT, 0) /* (A21) RGMII2_TD1 */ /* SODIMM 219 */
+ AM62PX_IOPAD(0x0174, PIN_INPUT, 0) /* (D17) RGMII2_TD2 */ /* SODIMM 217 */
+ AM62PX_IOPAD(0x0178, PIN_INPUT, 0) /* (A19) RGMII2_TD3 */ /* SODIMM 215 */
+ AM62PX_IOPAD(0x0168, PIN_INPUT, 0) /* (D16) RGMII2_TXC */ /* SODIMM 213 */
+ AM62PX_IOPAD(0x0164, PIN_INPUT, 0) /* (A20) RGMII2_TX_CTL */ /* SODIMM 211 */
+ >;
+ };
+
+ /* TPM SPI, Optional Module Specific SPI */
+ pinctrl_main_spi0: main-spi0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01bc, PIN_INPUT, 0) /* (B21) SPI0_CLK */ /* TPM_SPI_CLK - SODIMM 148 */
+ AM62PX_IOPAD(0x01c0, PIN_OUTPUT, 0) /* (B20) SPI0_D0 */ /* TPM_SPI_MOSI - SODIMM 150 */
+ AM62PX_IOPAD(0x01c4, PIN_INPUT, 0) /* (C21) SPI0_D1 */ /* TPM_SPI_MISO - SODIMM 152 */
+ AM62PX_IOPAD(0x01b4, PIN_INPUT, 0) /* (D20) SPI0_CS0 */ /* TPM_SPI_CS */
+ >;
+ };
+
+ /* Verdin SPI_1 */
+ pinctrl_main_spi1: main-spi1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0020, PIN_INPUT, 1) /* (N22) OSPI0_D5.SPI1_CLK */ /* SODIMM 196 */
+ AM62PX_IOPAD(0x0024, PIN_OUTPUT, 1) /* (P21) OSPI0_D6.SPI1_D0 */ /* SODIMM 200 */
+ AM62PX_IOPAD(0x0028, PIN_INPUT, 1) /* (N20) OSPI0_D7.SPI1_D1 */ /* SODIMM 198 */
+ >;
+ };
+
+ /* Verdin SPI_1_CS */
+ pinctrl_main_spi1_cs0: main-spi1-cs0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x001c, PIN_OUTPUT, 1) /* (N21) OSPI0_D4.SPI1_CS0 */ /* SODIMM 202 */
+ >;
+ };
+
+ /* Verdin I2S_1 MCLK */
+ pinctrl_i2s1_mclk: main-system-audio-ext-reflock0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00c4, PIN_OUTPUT, 5) /* (Y23) VOUT0_DATA3.AUDIO_EXT_REFCLK0 */ /* SODIMM 38 */
+ >;
+ };
+
+ pinctrl_eth_clock: main-system-clkout0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01f0, PIN_OUTPUT_PULLUP, 5) /* (C25) EXT_REFCLK1.CLKOUT0 */ /* ETH_25MHz_CLK */
+ >;
+ };
+
+ pinctrl_pmic_extint: main-system-extint-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01f4, PIN_INPUT, 0) /* (C23) EXTINTn */ /* PMIC_EXTINT# */
+ >;
+ };
+
+ /* Verdin UART_3, used as the Linux console */
+ pinctrl_uart0: main-uart0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x1c8, PIN_INPUT, 0) /* (A22) UART0_RXD */ /* SODIMM 147 */
+ AM62PX_IOPAD(0x1cc, PIN_OUTPUT, 0) /* (B22) UART0_TXD */ /* SODIMM 149 */
+ >;
+ };
+
+ /* Verdin UART_1 */
+ pinctrl_uart1: main-uart1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01ac, PIN_INPUT, 2) /* (G23) MCASP0_AFSR.UART1_RXD */ /* SODIMM 129 */
+ AM62PX_IOPAD(0x01b0, PIN_OUTPUT, 2) /* (G20) MCASP0_ACLKR.UART1_TXD */ /* SODIMM 131 */
+ AM62PX_IOPAD(0x0194, PIN_INPUT, 2) /* (D25) MCASP0_AXR3.UART1_CTSn */ /* SODIMM 135 */
+ AM62PX_IOPAD(0x0198, PIN_OUTPUT, 2) /* (E25) MCASP0_AXR2.UART1_RTSn */ /* SODIMM 133 */
+ >;
+ };
+
+ /* Verdin MSP 41, 42, 44 and 45 as UART */
+ pinctrl_uart2: main-uart2-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00b8, PIN_INPUT, 4) /* (AE24) VOUT0_DATA0.UART2_RXD */ /* SODIMM 192 */
+ AM62PX_IOPAD(0x00bc, PIN_OUTPUT, 4) /* (W23) VOUT0_DATA1.UART2_TXD */ /* SODIMM 190 */
+ AM62PX_IOPAD(0x0104, PIN_INPUT, 4) /* (Y21) VOUT0_PCLK.UART2_CTSn */ /* SODIMM 184 */
+ AM62PX_IOPAD(0x0100, PIN_OUTPUT, 4) /* (W20) VOUT0_VSYNC.UART2_RTSn */ /* SODIMM 186 */
+ >;
+ };
+
+ /* Bluetooth on WB SKUs */
+ pinctrl_uart6: main-uart6-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x00d8, PIN_INPUT, 4) /* (AE22) VOUT0_DATA8.UART6_RXD */ /* WiFi_UART_RXD */
+ AM62PX_IOPAD(0x00dc, PIN_OUTPUT, 4) /* (AC22) VOUT0_DATA9.UART6_TXD */ /* WiFi_UART_TXD */
+ AM62PX_IOPAD(0x00e4, PIN_INPUT, 4) /* (AE21) VOUT0_DATA11.UART6_CTSn */ /* WiFi_UART_CTS */
+ AM62PX_IOPAD(0x00e0, PIN_OUTPUT, 4) /* (W22) VOUT0_DATA10.UART6_RTSn */ /* WiFi_UART_RTS */
+ >;
+ };
+
+ /* Verdin USB_2_EN */
+ pinctrl_usb1: main-usb1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0258, PIN_OUTPUT, 0) /* (G21) USB1_DRVVBUS */ /* SODIMM 185 */
+ >;
+ };
+
+ /* Verdin I2S_1 */
+ pinctrl_mcasp0: mcasp0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01a4, PIN_INPUT, 0) /* (F24) MCASP0_ACLKX */ /* SODIMM 30 */
+ AM62PX_IOPAD(0x01a8, PIN_INPUT, 0) /* (F25) MCASP0_AFSX */ /* SODIMM 32 */
+ AM62PX_IOPAD(0x01a0, PIN_INPUT, 0) /* (F23) MCASP0_AXR0 */ /* SODIMM 34 */
+ AM62PX_IOPAD(0x019c, PIN_INPUT, 0) /* (E24) MCASP0_AXR1 */ /* SODIMM 36 */
+ >;
+ };
+
+ /* Verdin I2S_2 */
+ pinctrl_mcasp1: mcasp1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0090, PIN_INPUT, 2) /* (U24) GPMC0_BE0n_CLE.MCASP1_ACLKX */ /* SODIMM 42 */
+ AM62PX_IOPAD(0x0098, PIN_INPUT, 2) /* (AA24) GPMC0_WAIT0.MCASP1_AFSX */ /* SODIMM 44 */
+ AM62PX_IOPAD(0x008c, PIN_INPUT, 2) /* (T25) GPMC0_WEn.MCASP1_AXR0 */ /* SODIMM 46 */
+ AM62PX_IOPAD(0x0088, PIN_INPUT, 2) /* (R24) GPMC0_OEn_REn.MCASP1_AXR1 */ /* SODIMM 48 */
+ >;
+ };
+
+ /* MDIO, shared by Verdin ETH_1 (On-module PHY) and Verdin ETH_2_RGMII */
+ pinctrl_mdio: mdio0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0160, PIN_OUTPUT, 0) /* (F17) MDIO0_MDC */ /* SODIMM 193 */
+ AM62PX_IOPAD(0x015c, PIN_INPUT, 0) /* (F16) MDIO0_MDIO */ /* SODIMM 191 */
+ >;
+ };
+};
+
+&mcu_pmx0 {
+ /* Verdin GPIO_1 */
+ pinctrl_gpio_1: mcu-gpio0-1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_MCU_IOPAD(0x0004, PIN_INPUT, 7) /* (E10) MCU_SPI0_CS1.MCU_GPIO0_1 */ /* SODIMM 206 */
+ >;
+ };
+
+ /* Verdin GPIO_2 */
+ pinctrl_gpio_2: mcu-gpio0-2-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_MCU_IOPAD(0x0008, PIN_INPUT, 7) /* (C10) MCU_SPI0_CLK.MCU_GPIO0_2 */ /* SODIMM 208 */
+ >;
+ };
+
+ /* Verdin GPIO_3 */
+ pinctrl_gpio_3: mcu-gpio0-3-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_MCU_IOPAD(0x000c, PIN_INPUT, 7) /* (B11) MCU_SPI0_D0.MCU_GPIO0_3 */ /* SODIMM 210 */
+ >;
+ };
+
+ /* Verdin GPIO_4 */
+ pinctrl_gpio_4: mcu-gpio0-4-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_MCU_IOPAD(0x0010, PIN_INPUT, 7) /* (D10) MCU_SPI0_D1.MCU_GPIO0_4 */ /* SODIMM 212 */
+ >;
+ };
+
+ /* Verdin I2C_3_HDMI */
+ pinctrl_mcu_i2c0: mcu-i2c0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_MCU_IOPAD(0x0044, PIN_INPUT, 0) /* (E11) MCU_I2C0_SCL */ /* SODIMM 59 */
+ AM62PX_MCU_IOPAD(0x0048, PIN_INPUT, 0) /* (D11) MCU_I2C0_SDA */ /* SODIMM 57 */
+ >;
+ };
+
+ /* Verdin CAN_2 */
+ pinctrl_mcu_mcan0: mcu-mcan0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_MCU_IOPAD(0x0038, PIN_INPUT, 0) /* (D6) MCU_MCAN0_RX */ /* SODIMM 22 */
+ AM62PX_MCU_IOPAD(0x0034, PIN_OUTPUT, 0) /* (E8) MCU_MCAN0_TX */ /* SODIMM 20 */
+ >;
+ };
+
+ /* Verdin MSP_13/MSP_18 as CAN */
+ pinctrl_mcu_mcan1: mcu-mcan1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_MCU_IOPAD(0x0040, PIN_INPUT, 0) /* (E7) MCU_MCAN1_RX */ /* SODIMM 116 */
+ AM62PX_MCU_IOPAD(0x003c, PIN_OUTPUT, 0) /* (F8) MCU_MCAN1_TX */ /* SODIMM 128 */
+ >;
+ };
+
+ /* Verdin UART_4 */
+ pinctrl_mcu_uart0: mcu-uart0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_MCU_IOPAD(0x0014, PIN_INPUT, 0) /* (B6) MCU_UART0_RXD */ /* SODIMM 151 */
+ AM62PX_MCU_IOPAD(0x0018, PIN_OUTPUT, 0) /* (C8) MCU_UART0_TXD */ /* SODIMM 153 */
+ >;
+ };
+
+ /* On-module I2C - PMIC_I2C */
+ pinctrl_wkup_i2c0: wkup-i2c0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_MCU_IOPAD(0x004c, PIN_INPUT, 0) /* (A13) WKUP_I2C0_SCL */ /* PMIC_I2C_SCL */
+ AM62PX_MCU_IOPAD(0x0050, PIN_INPUT, 0) /* (C11) WKUP_I2C0_SDA */ /* PMIC_I2C_SDA */
+ >;
+ };
+
+ /* Verdin CSI_1_MCLK */
+ pinctrl_wkup_clkout0: wkup-system-clkout0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_MCU_IOPAD(0x0084, PIN_OUTPUT, 0) /* (F13) WKUP_CLKOUT0 */ /* SODIMM 91 */
+ >;
+ };
+
+ /* Verdin UART_2 */
+ pinctrl_wkup_uart0: wkup-uart0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_MCU_IOPAD(0x002c, PIN_INPUT, 0) /* (C7) WKUP_UART0_CTSn */ /* SODIMM 143 */
+ AM62PX_MCU_IOPAD(0x0030, PIN_OUTPUT, 0) /* (C6) WKUP_UART0_RTSn */ /* SODIMM 141 */
+ AM62PX_MCU_IOPAD(0x0024, PIN_INPUT, 0) /* (D8) WKUP_UART0_RXD */ /* SODIMM 137 */
+ AM62PX_MCU_IOPAD(0x0028, PIN_OUTPUT, 0) /* (D7) WKUP_UART0_TXD */ /* SODIMM 139 */
+ >;
+ };
+};
+
+/* Verdin I2S_1_MCLK */
+&audio_refclk0 {
+ assigned-clock-rates = <25000000>;
+};
+
+&cpsw3g {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii1>;
+ status = "disabled";
+};
+
+/* MDIO, shared by Verdin ETH_1 (On-module PHY) and Verdin ETH_2_RGMII */
+&cpsw3g_mdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mdio>, <&pinctrl_eth_clock>;
+ assigned-clocks = <&k3_clks 157 36>;
+ assigned-clock-parents = <&k3_clks 157 38>;
+ assigned-clock-rates = <25000000>;
+ status = "disabled";
+
+ som_eth_phy: ethernet-phy@0 {
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eth_int>;
+ interrupt-parent = <&main_gpio0>;
+ interrupts = <36 IRQ_TYPE_EDGE_FALLING>;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ };
+};
+
+/* Verdin ETH_1 (On-module PHY) */
+&cpsw_port1 {
+ phy-handle = <&som_eth_phy>;
+ phy-mode = "rgmii-rxid";
+ status = "disabled";
+};
+
+/* Verdin ETH_2_RGMII */
+&cpsw_port2 {
+ status = "disabled";
+};
+
+/* Verdin PWM_3_DSI */
+&epwm0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_epwm0_b>;
+ status = "disabled";
+};
+
+/* Verdin PWM_1, PWM_2 */
+&epwm2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_epwm2_a>, <&pinctrl_epwm2_b>;
+ status = "disabled";
+};
+
+&mailbox0_cluster0 {
+ status = "okay";
+
+ mbox_r5_0: mbox-r5-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&mailbox0_cluster1 {
+ status = "okay";
+
+ mbox_mcu_r5_0: mbox-mcu-r5-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&main_gpio0 {
+ gpio-line-names =
+ "SODIMM_52",
+ "SODIMM_252",
+ "SODIMM_66",
+ "SODIMM_56",
+ "SODIMM_58",
+ "SODIMM_60",
+ "SODIMM_62",
+ "",
+ "",
+ "",
+ "", /* 10 */
+ "SODIMM_54",
+ "SODIMM_64",
+ "SODIMM_174",
+ "SODIMM_244",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 20 */
+ "PMIC_SD_VSEL",
+ "",
+ "",
+ "",
+ "TPM_EXTINT#",
+ "",
+ "",
+ "",
+ "WiFi_WKUP_BT#",
+ "WiFi_WKUP_WLAN#", /* 30 */
+ "SODIMM_161",
+ "SODIMM_157",
+ "",
+ "",
+ "",
+ "ETH_INT#",
+ "",
+ "SODIMM_17",
+ "SODIMM_21",
+ "", /* 40 */
+ "SODIMM_187",
+ "SODIMM_189",
+ "",
+ "",
+ "",
+ "",
+ "SODIMM_76",
+ "",
+ "SODIMM_216",
+ "SODIMM_218", /* 50 */
+ "SODIMM_220",
+ "SODIMM_222",
+ "",
+ "",
+ "",
+ "",
+ "SODIMM_172",
+ "",
+ "WIFI_SD_INT",
+ "WIFI_SPI_CS#", /* 60 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 70 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 80 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 90 */
+ "";
+
+ verdin_pcie_1_reset_hog: pcie-1-reset-hog {
+ gpio-hog;
+ /* Verdin PCIE_1_RESET# (SODIMM 244) */
+ gpios = <14 GPIO_ACTIVE_LOW>;
+ line-name = "PCIE_1_RESET#";
+ output-low;
+ status = "disabled";
+ };
+};
+
+&main_gpio1 {
+ gpio-line-names =
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 10 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 20 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 30 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 40 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "SODIMM_84",
+ "SODIMM_154",
+ "SODIMM_155", /* 50 */
+ "";
+};
+
+/* Verdin I2C_1 */
+&main_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_main_i2c0>;
+ status = "disabled";
+};
+
+/* Verdin I2C_2_DSI */
+&main_i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_main_i2c1>;
+ status = "disabled";
+};
+
+/* Verdin I2C_4_CSI */
+&main_i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_main_i2c3>;
+ status = "disabled";
+};
+
+/* Verdin CAN_1 */
+&main_mcan0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_main_mcan0>;
+ status = "disabled";
+};
+
+/* TPM SPI, optional SPI on module specific pins */
+&main_spi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_main_spi0>;
+ ti,pindir-d0-out-d1-in;
+ status = "okay";
+
+ tpm@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpm_extint>;
+ interrupt-parent = <&main_gpio0>;
+ interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
+ spi-max-frequency = <18500000>;
+ };
+};
+
+/* Verdin SPI_1 */
+&main_spi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_main_spi1>, <&pinctrl_main_spi1_cs0>;
+ ti,pindir-d0-out-d1-in;
+ status = "disabled";
+};
+
+/* Verdin UART_3, used as the Linux console */
+&main_uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0>;
+ status = "disabled";
+};
+
+/* Verdin UART_1 */
+&main_uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ uart-has-rtscts;
+ status = "disabled";
+};
+
+/* Verdin I2S_1 */
+&mcasp0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mcasp0>;
+ op-mode = <0>; /* I2S mode */
+ serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
+ 1 2 0 0
+ 0 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+ >;
+ tdm-slots = <2>;
+ rx-num-evt = <0>;
+ tx-num-evt = <0>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+};
+
+/* Verdin I2S_2 */
+&mcasp1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mcasp1>;
+ op-mode = <0>; /* I2S mode */
+ serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
+ 1 2 0 0
+ 0 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+ >;
+ tdm-slots = <2>;
+ rx-num-evt = <0>;
+ tx-num-evt = <0>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+};
+
+&mcu_gpio0 {
+ gpio-line-names =
+ "",
+ "SODIMM_206",
+ "SODIMM_208",
+ "SODIMM_210",
+ "SODIMM_212",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 10 */
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "", /* 20 */
+ "",
+ "",
+ "";
+};
+
+/* Verdin I2C_3_HDMI */
+&mcu_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mcu_i2c0>;
+ status = "disabled";
+};
+
+/* Verdin CAN_2 */
+&mcu_mcan0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mcu_mcan0>;
+ status = "disabled";
+};
+
+/* Verdin UART_4 */
+&mcu_uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mcu_uart0>;
+ status = "disabled";
+};
+
+/* Verdin QSPI_1 */
+&ospi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ospi0>;
+ status = "disabled";
+};
+
+/* On-module eMMC */
+&sdhci0 {
+ no-mmc-hs400;
+ non-removable;
+ ti,driver-strength-ohm = <50>;
+ status = "okay";
+};
+
+/* Verdin SD_1 */
+&sdhci1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1>, <&pinctrl_sd1_cd>;
+ cd-gpios = <&main_gpio1 48 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ vmmc-supply = <&reg_sd1_vmmc>;
+ vqmmc-supply = <&reg_sd1_vqmmc>;
+ ti,fails-without-test-cd;
+ status = "disabled";
+};
+
+/* Verdin USB_1 */
+&usbss0 {
+ ti,vbus-divider;
+ status = "disabled";
+};
+
+&usb0 {
+ adp-disable;
+ usb-role-switch;
+ status = "disabled";
+
+ port {
+ usb0_ep: endpoint {
+ remote-endpoint = <&usb_dr_connector>;
+ };
+ };
+};
+
+/* Verdin USB_2 */
+&usbss1 {
+ ti,vbus-divider;
+ status = "disabled";
+};
+
+&usb1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1>;
+ dr_mode = "host";
+ status = "disabled";
+};
+
+/* On-module I2C - PMIC_I2C */
+&wkup_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wkup_i2c0>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ som_gpio_expander: gpio@21 {
+ compatible = "nxp,pcal6408";
+ reg = <0x21>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-line-names = "SODIMM_256";
+ };
+
+ pmic@30 {
+ compatible = "ti,tps65219";
+ reg = <0x30>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic_extint>;
+ interrupt-parent = <&gic500>;
+ interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
+
+ buck1-supply = <&reg_vsodimm>;
+ buck2-supply = <&reg_vsodimm>;
+ buck3-supply = <&reg_vsodimm>;
+ ldo1-supply = <&reg_3v3>;
+ ldo2-supply = <&reg_1v8>;
+ ldo3-supply = <&reg_3v3>;
+ ldo4-supply = <&reg_3v3>;
+ system-power-controller;
+ ti,power-button;
+
+ regulators {
+ reg_3v3: buck1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "+V3.3 (PMIC BUCK1)";
+ };
+
+ reg_1v8: buck2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "+V1.8 (PMIC BUCK2)"; /* On-module and SODIMM 214 */
+ };
+
+ buck3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1100000>;
+ regulator-min-microvolt = <1100000>;
+ regulator-name = "+VDD_DDR (PMIC BUCK3)";
+ };
+
+ reg_sd_3v3_1v8: ldo1 {
+ regulator-allow-bypass;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "+V3.3_1.8_SD (PMIC LDO1)";
+ };
+
+ ldo2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <850000>;
+ regulator-min-microvolt = <850000>;
+ regulator-name = "+V_PMIC_LDO2 (PMIC LDO4)"; // +VDDR_CORE
+ };
+
+ ldo3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "+V1.8A (PMIC LDO3)";
+ };
+
+ ldo4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <2500000>;
+ regulator-min-microvolt = <2500000>;
+ regulator-name = "+V2.5_ETH (PMIC LDO4)";
+ };
+ };
+ };
+
+ som_rtc_i2c: rtc@32 {
+ compatible = "epson,rx8130";
+ reg = <0x32>;
+ };
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp1075";
+ reg = <0x48>;
+ };
+
+ som_adc: adc@49 {
+ compatible = "ti,tla2024";
+ reg = <0x49>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+
+ /* Verdin (ADC_4 - ADC_3) */
+ channel@0 {
+ reg = <0>;
+ ti,datarate = <4>;
+ ti,gain = <2>;
+ };
+
+ /* Verdin (ADC_4 - ADC_1) */
+ channel@1 {
+ reg = <1>;
+ ti,datarate = <4>;
+ ti,gain = <2>;
+ };
+
+ /* Verdin (ADC_3 - ADC_1) */
+ channel@2 {
+ reg = <2>;
+ ti,datarate = <4>;
+ ti,gain = <2>;
+ };
+
+ /* Verdin (ADC_2 - ADC_1) */
+ channel@3 {
+ reg = <3>;
+ ti,datarate = <4>;
+ ti,gain = <2>;
+ };
+
+ /* Verdin ADC_4 */
+ channel@4 {
+ reg = <4>;
+ ti,datarate = <4>;
+ ti,gain = <2>;
+ };
+
+ /* Verdin ADC_3 */
+ channel@5 {
+ reg = <5>;
+ ti,datarate = <4>;
+ ti,gain = <2>;
+ };
+
+ /* Verdin ADC_2 */
+ channel@6 {
+ reg = <6>;
+ ti,datarate = <4>;
+ ti,gain = <2>;
+ };
+
+ /* Verdin ADC_1 */
+ channel@7 {
+ reg = <7>;
+ ti,datarate = <4>;
+ ti,gain = <2>;
+ };
+ };
+
+ som_eeprom: eeprom@50 {
+ compatible = "st,24c02", "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+};
+
+/* Verdin UART_2 */
+&wkup_uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wkup_uart0>;
+ uart-has-rtscts;
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
index d29f524600af..83c37de7d338 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
@@ -49,19 +49,37 @@
#size-cells = <2>;
ranges;
- secure_tfa_ddr: tfa@9e780000 {
- reg = <0x00 0x9e780000 0x00 0x80000>;
+ mcu_r5fss0_core0_dma_memory_region: mcu-r5fss-dma-memory-region@9b800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9b800000 0x00 0x100000>;
no-map;
};
- secure_ddr: optee@9e800000 {
- reg = <0x00 0x9e800000 0x00 0x01800000>; /* for OP-TEE */
+ mcu_r5fss0_core0_memory_region: mcu-r5fss-memory-region@9b900000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9b900000 0x00 0xf00000>;
+ no-map;
+ };
+
+ wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@9c800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9c800000 0x00 0x100000>;
no-map;
};
- wkup_r5fss0_core0_memory_region: r5f-dma-memory@9c900000 {
+ wkup_r5fss0_core0_memory_region: r5f-memory@9c900000 {
compatible = "shared-dma-pool";
- reg = <0x00 0x9c900000 0x00 0x01e00000>;
+ reg = <0x00 0x9c900000 0x00 0xf00000>;
+ no-map;
+ };
+
+ secure_tfa_ddr: tfa@9e780000 {
+ reg = <0x00 0x9e780000 0x00 0x80000>;
+ no-map;
+ };
+
+ secure_ddr: optee@9e800000 {
+ reg = <0x00 0x9e800000 0x00 0x01800000>; /* for OP-TEE */
no-map;
};
};
@@ -115,6 +133,28 @@
bootph-all;
};
+ vcc_3v3_main: regulator-4 {
+ /* output of LM5141-Q1 */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_main";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vmain_pd>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc_3v3_sys: regulator-5 {
+ /* output of TPS222965DSGT */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_sys";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_main>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -356,6 +396,32 @@
AM62PX_IOPAD(0x0124, PIN_INPUT, 7) /* (J25) MMC2_SDCD.GPIO0_71 */
>;
};
+
+ main_ecap1_pins_default: main-ecap1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x019c, PIN_OUTPUT, 2) /* (E24) MCASP0_AXR1.ECAP1_IN_APWM_OUT */
+ >;
+ };
+
+ main_ecap2_pins_default: main-ecap2-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01a4, PIN_OUTPUT, 2) /* (F24) MCASP0_ACLKX.ECAP2_IN_APWM_OUT */
+ >;
+ };
+
+ main_epwm0_pins_default: main-epwm0-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01b4, PIN_OUTPUT, 2) /* (D20) SPI0_CS0.EHRPWM0_A */
+ AM62PX_IOPAD(0x01b8, PIN_OUTPUT, 2) /* (E20) SPI0_CS1.EHRPWM0_B */
+ >;
+ };
+
+ main_epwm1_pins_default: main-epwm1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x01bc, PIN_OUTPUT, 2) /* (B21) SPI0_CLK.EHRPWM1_A */
+ AM62PX_IOPAD(0x01c0, PIN_OUTPUT, 2) /* (B20) SPI0_D0.EHRPWM1_B */
+ >;
+ };
};
&main_i2c0 {
@@ -454,8 +520,8 @@
&sdhci0 {
status = "okay";
+ non-removable;
ti,driver-strength-ohm = <50>;
- disable-wp;
bootph-all;
};
@@ -640,6 +706,26 @@
};
};
+&wkup_r5fss0 {
+ status = "okay";
+};
+
+&wkup_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster0 &mbox_r5_0>;
+ memory-region = <&wkup_r5fss0_core0_dma_memory_region>,
+ <&wkup_r5fss0_core0_memory_region>;
+};
+
+&mcu_r5fss0 {
+ status = "okay";
+};
+
+&mcu_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster1 &mbox_mcu_r5_0>;
+ memory-region = <&mcu_r5fss0_core0_dma_memory_region>,
+ <&mcu_r5fss0_core0_memory_region>;
+};
+
&main_uart0 {
pinctrl-names = "default";
pinctrl-0 = <&main_uart0_pins_default>;
@@ -683,3 +769,31 @@
&mcu_gpio_intr {
status = "reserved";
};
+
+&ecap1 {
+ /* P36 of J4 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_ecap1_pins_default>;
+ status = "okay";
+};
+
+&ecap2 {
+ /* P11 of J4 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_ecap2_pins_default>;
+ status = "okay";
+};
+
+&epwm0 {
+ /* P24/P26 of J4 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_epwm0_pins_default>;
+ status = "okay";
+};
+
+&epwm1 {
+ /* P23/P19 of J4 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_epwm1_pins_default>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-dahlia.dts b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-dahlia.dts
new file mode 100644
index 000000000000..1790e572eaa0
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-dahlia.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/dahlia-carrier-board-kit
+ */
+
+/dts-v1/;
+
+#include "k3-am62p5.dtsi"
+#include "k3-am62p-verdin.dtsi"
+#include "k3-am62p-verdin-nonwifi.dtsi"
+#include "k3-am62p-verdin-dahlia.dtsi"
+
+/ {
+ model = "Toradex Verdin AM62P on Dahlia Board";
+ compatible = "toradex,verdin-am62p-nonwifi-dahlia",
+ "toradex,verdin-am62p-nonwifi",
+ "toradex,verdin-am62p",
+ "ti,am62p5";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-dev.dts b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-dev.dts
new file mode 100644
index 000000000000..5794f650f751
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-dev.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/verdin-development-board-kit
+ */
+
+/dts-v1/;
+
+#include "k3-am62p5.dtsi"
+#include "k3-am62p-verdin.dtsi"
+#include "k3-am62p-verdin-nonwifi.dtsi"
+#include "k3-am62p-verdin-dev.dtsi"
+
+/ {
+ model = "Toradex Verdin AM62P on Verdin Development Board";
+ compatible = "toradex,verdin-am62p-nonwifi-dev",
+ "toradex,verdin-am62p-nonwifi",
+ "toradex,verdin-am62p",
+ "ti,am62p5";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-ivy.dts b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-ivy.dts
new file mode 100644
index 000000000000..a777513f26ec
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-ivy.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/ivy-carrier-board
+ */
+
+/dts-v1/;
+
+#include "k3-am62p5.dtsi"
+#include "k3-am62p-verdin.dtsi"
+#include "k3-am62p-verdin-nonwifi.dtsi"
+#include "k3-am62p-verdin-ivy.dtsi"
+
+/ {
+ model = "Toradex Verdin AM62P on Ivy Board";
+ compatible = "toradex,verdin-am62p-nonwifi-ivy",
+ "toradex,verdin-am62p-nonwifi",
+ "toradex,verdin-am62p",
+ "ti,am62p5";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-mallow.dts b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-mallow.dts
new file mode 100644
index 000000000000..52823874eadc
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-mallow.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/mallow-carrier-board
+ */
+
+/dts-v1/;
+
+#include "k3-am62p5.dtsi"
+#include "k3-am62p-verdin.dtsi"
+#include "k3-am62p-verdin-nonwifi.dtsi"
+#include "k3-am62p-verdin-mallow.dtsi"
+
+/ {
+ model = "Toradex Verdin AM62P on Mallow Board";
+ compatible = "toradex,verdin-am62p-nonwifi-mallow",
+ "toradex,verdin-am62p-nonwifi",
+ "toradex,verdin-am62p",
+ "ti,am62p5";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-yavia.dts b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-yavia.dts
new file mode 100644
index 000000000000..c27bda794b51
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-nonwifi-yavia.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/yavia
+ */
+
+/dts-v1/;
+
+#include "k3-am62p5.dtsi"
+#include "k3-am62p-verdin.dtsi"
+#include "k3-am62p-verdin-nonwifi.dtsi"
+#include "k3-am62p-verdin-yavia.dtsi"
+
+/ {
+ model = "Toradex Verdin AM62P on Yavia Board";
+ compatible = "toradex,verdin-am62p-nonwifi-yavia",
+ "toradex,verdin-am62p-nonwifi",
+ "toradex,verdin-am62p",
+ "ti,am62p5";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-dahlia.dts b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-dahlia.dts
new file mode 100644
index 000000000000..12b57985f38e
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-dahlia.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/dahlia-carrier-board-kit
+ */
+
+/dts-v1/;
+
+#include "k3-am62p5.dtsi"
+#include "k3-am62p-verdin.dtsi"
+#include "k3-am62p-verdin-wifi.dtsi"
+#include "k3-am62p-verdin-dahlia.dtsi"
+
+/ {
+ model = "Toradex Verdin AM62P WB on Dahlia Board";
+ compatible = "toradex,verdin-am62p-wifi-dahlia",
+ "toradex,verdin-am62p-wifi",
+ "toradex,verdin-am62p",
+ "ti,am62p5";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-dev.dts b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-dev.dts
new file mode 100644
index 000000000000..bbc2770d5f5d
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-dev.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/verdin-development-board-kit
+ */
+
+/dts-v1/;
+
+#include "k3-am62p5.dtsi"
+#include "k3-am62p-verdin.dtsi"
+#include "k3-am62p-verdin-wifi.dtsi"
+#include "k3-am62p-verdin-dev.dtsi"
+
+/ {
+ model = "Toradex Verdin AM62P WB on Verdin Development Board";
+ compatible = "toradex,verdin-am62p-wifi-dev",
+ "toradex,verdin-am62p-wifi",
+ "toradex,verdin-am62p",
+ "ti,am62p5";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-ivy.dts b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-ivy.dts
new file mode 100644
index 000000000000..27467281bc3a
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-ivy.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/ivy-carrier-board
+ */
+
+/dts-v1/;
+
+#include "k3-am62p5.dtsi"
+#include "k3-am62p-verdin.dtsi"
+#include "k3-am62p-verdin-wifi.dtsi"
+#include "k3-am62p-verdin-ivy.dtsi"
+
+/ {
+ model = "Toradex Verdin AM62P WB on Ivy Board";
+ compatible = "toradex,verdin-am62p-wifi-ivy",
+ "toradex,verdin-am62p-wifi",
+ "toradex,verdin-am62p",
+ "ti,am62p5";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-mallow.dts b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-mallow.dts
new file mode 100644
index 000000000000..e35851451cd4
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-mallow.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/mallow-carrier-board
+ */
+
+/dts-v1/;
+
+#include "k3-am62p5.dtsi"
+#include "k3-am62p-verdin.dtsi"
+#include "k3-am62p-verdin-wifi.dtsi"
+#include "k3-am62p-verdin-mallow.dtsi"
+
+/ {
+ model = "Toradex Verdin AM62P WB on Mallow Board";
+ compatible = "toradex,verdin-am62p-wifi-mallow",
+ "toradex,verdin-am62p-wifi",
+ "toradex,verdin-am62p",
+ "ti,am62p5";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-yavia.dts b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-yavia.dts
new file mode 100644
index 000000000000..25e0842bc905
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-verdin-wifi-yavia.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2025 Toradex
+ *
+ * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62p
+ * https://www.toradex.com/products/carrier-board/yavia
+ */
+
+/dts-v1/;
+
+#include "k3-am62p5.dtsi"
+#include "k3-am62p-verdin.dtsi"
+#include "k3-am62p-verdin-wifi.dtsi"
+#include "k3-am62p-verdin-yavia.dtsi"
+
+/ {
+ model = "Toradex Verdin AM62P WB on Yavia Board";
+ compatible = "toradex,verdin-am62p-wifi-yavia",
+ "toradex,verdin-am62p-wifi",
+ "toradex,verdin-am62p",
+ "ti,am62p5";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra-gpio-fan.dtso b/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra-gpio-fan.dtso
index f0b2fd4165a7..1fd0aaff3193 100644
--- a/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra-gpio-fan.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra-gpio-fan.dtso
@@ -33,7 +33,7 @@
&thermal_zones {
main0_thermal: main0-thermal {
trips {
- main0_thermal_trip0: main0-thermal-trip {
+ main0_fan: main0-fan {
temperature = <65000>; /* millicelsius */
hysteresis = <2000>; /* millicelsius */
type = "active";
@@ -42,7 +42,17 @@
cooling-maps {
map0 {
- trip = <&main0_thermal_trip0>;
+ trip = <&main0_alert>;
+ cooling-device =
+ <&fan THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map1 {
+ trip = <&main0_fan>;
cooling-device = <&fan THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
index d52cb2a5a589..ee8337bfbbfd 100644
--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
@@ -70,6 +70,18 @@
no-map;
};
+ wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@9da00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9da00000 0x00 0x100000>;
+ no-map;
+ };
+
+ wkup_r5fss0_core0_memory_region: r5f-memory@9db00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9db00000 0x00 0xc00000>;
+ no-map;
+ };
+
secure_tfa_ddr: tfa@9e780000 {
reg = <0x00 0x9e780000 0x00 0x80000>;
alignment = <0x1000>;
@@ -82,11 +94,6 @@
no-map;
};
- wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@9db00000 {
- compatible = "shared-dma-pool";
- reg = <0x00 0x9db00000 0x00 0xc00000>;
- no-map;
- };
};
leds {
@@ -303,6 +310,25 @@
AM62X_IOPAD(0x078, PIN_OUTPUT, 1) /* (U24) GPMC0_AD15.VOUT0_DATA23 */
>;
};
+
+ main_ecap0_pins_default: main-ecap0-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x01b8, PIN_OUTPUT, 3) /* (C13) SPI0_CS1.ECAP0_IN_APWM_OUT */
+ >;
+ };
+
+ main_ecap2_pins_default: main-ecap2-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x01a4, PIN_OUTPUT, 2) /* (B20) MCASP0_ACLKX.ECAP2_IN_APWM_OUT */
+ >;
+ };
+
+ main_epwm1_pins_default: main-epwm1-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x019c, PIN_OUTPUT, 6) /* (B18) MCASP0_AXR1.EHRPWM1_A */
+ AM62X_IOPAD(0x01a0, PIN_OUTPUT, 6) /* (E18) MCASP0_AXR0.EHRPWM1_B */
+ >;
+ };
};
&mcu_pmx0 {
@@ -434,9 +460,9 @@
&sdhci0 {
bootph-all;
status = "okay";
+ non-removable;
pinctrl-names = "default";
pinctrl-0 = <&main_mmc0_pins_default>;
- disable-wp;
};
&sdhci1 {
@@ -476,10 +502,17 @@
};
&mailbox0_cluster0 {
+ status = "okay";
+
mbox_m4_0: mbox-m4-0 {
ti,mbox-rx = <0 0 0>;
ti,mbox-tx = <1 0 0>;
};
+
+ mbox_r5_0: mbox-r5-0 {
+ ti,mbox-rx = <2 0 0>;
+ ti,mbox-tx = <3 0 0>;
+ };
};
&mcu_m4fss {
@@ -489,6 +522,16 @@
status = "okay";
};
+&wkup_r5fss0 {
+ status = "okay";
+};
+
+&wkup_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster0 &mbox_r5_0>;
+ memory-region = <&wkup_r5fss0_core0_dma_memory_region>,
+ <&wkup_r5fss0_core0_memory_region>;
+};
+
&usbss0 {
bootph-all;
status = "okay";
@@ -560,3 +603,24 @@
&mcu_gpio_intr {
status = "reserved";
};
+
+&ecap0 {
+ /* P26 of J3 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_ecap0_pins_default>;
+ status = "okay";
+};
+
+&ecap2 {
+ /* P11 of J3 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_ecap2_pins_default>;
+ status = "okay";
+};
+
+&epwm1 {
+ /* P36/P33 of J3 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_epwm1_pins_default>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-imx219.dtso b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-imx219.dtso
index 76ca02127f95..149c59c07182 100644
--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-imx219.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-imx219.dtso
@@ -15,6 +15,33 @@
#clock-cells = <0>;
clock-frequency = <24000000>;
};
+
+ reg_2p8v: regulator-2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vcc_3v3_sys>;
+ regulator-always-on;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3_sys>;
+ regulator-always-on;
+ };
+
+ reg_1p2v: regulator-1p2v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P2V";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ vin-supply = <&vcc_3v3_sys>;
+ regulator-always-on;
+ };
};
&main_i2c2 {
@@ -22,7 +49,7 @@
#size-cells = <0>;
status = "okay";
- i2c-switch@71 {
+ i2c-mux@71 {
compatible = "nxp,pca9543";
#address-cells = <1>;
#size-cells = <0>;
@@ -39,7 +66,10 @@
reg = <0x10>;
clocks = <&clk_imx219_fixed>;
- clock-names = "xclk";
+
+ VANA-supply = <&reg_2p8v>;
+ VDIG-supply = <&reg_1p8v>;
+ VDDL-supply = <&reg_1p2v>;
reset-gpios = <&exp1 13 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-ov5640.dtso b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-ov5640.dtso
index ccc7f5e43184..fc77fc77fe0b 100644
--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-ov5640.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-ov5640.dtso
@@ -15,6 +15,33 @@
#clock-cells = <0>;
clock-frequency = <12000000>;
};
+
+ reg_2p8v: regulator-2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vcc_3v3_sys>;
+ regulator-always-on;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3_sys>;
+ regulator-always-on;
+ };
+
+ reg_1p5v: regulator-1p5v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P5V";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ vin-supply = <&vcc_3v3_sys>;
+ regulator-always-on;
+ };
};
&main_i2c2 {
@@ -22,7 +49,7 @@
#size-cells = <0>;
status = "okay";
- i2c-switch@71 {
+ i2c-mux@71 {
compatible = "nxp,pca9543";
#address-cells = <1>;
#size-cells = <0>;
@@ -40,6 +67,11 @@
clocks = <&clk_ov5640_fixed>;
clock-names = "xclk";
+
+ AVDD-supply = <&reg_2p8v>;
+ DOVDD-supply = <&reg_1p8v>;
+ DVDD-supply = <&reg_1p5v>;
+
powerdown-gpios = <&exp1 13 GPIO_ACTIVE_LOW>;
port {
diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-tevi-ov5640.dtso b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-tevi-ov5640.dtso
index 4eaf9d757dd0..fe3bc29632fa 100644
--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-tevi-ov5640.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-tevi-ov5640.dtso
@@ -15,6 +15,33 @@
#clock-cells = <0>;
clock-frequency = <24000000>;
};
+
+ reg_2p8v: regulator-2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vcc_3v3_sys>;
+ regulator-always-on;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3_sys>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_sys>;
+ regulator-always-on;
+ };
};
&main_i2c2 {
@@ -22,7 +49,7 @@
#size-cells = <0>;
status = "okay";
- i2c-switch@71 {
+ i2c-mux@71 {
compatible = "nxp,pca9543";
#address-cells = <1>;
#size-cells = <0>;
@@ -40,6 +67,11 @@
clocks = <&clk_ov5640_fixed>;
clock-names = "xclk";
+
+ AVDD-supply = <&reg_2p8v>;
+ DOVDD-supply = <&reg_1p8v>;
+ DVDD-supply = <&reg_3p3v>;
+
powerdown-gpios = <&exp1 13 GPIO_ACTIVE_LOW>;
port {
diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
index 324eb44c258d..c7e5da37486a 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
@@ -51,6 +51,11 @@
reg = <0x00000014 0x4>;
};
+ pcie0_ctrl: pcie-ctrl@4070 {
+ compatible = "ti,j784s4-pcie-ctrl", "syscon";
+ reg = <0x4070 0x4>;
+ };
+
serdes_ln_ctrl: mux-controller@4080 {
compatible = "reg-mux";
reg = <0x4080 0x4>;
@@ -1031,12 +1036,12 @@
reg = <0x00 0x0f102000 0x00 0x1000>,
<0x00 0x0f100000 0x00 0x400>,
<0x00 0x0d000000 0x00 0x00800000>,
- <0x00 0x68000000 0x00 0x00001000>;
+ <0x06 0x00000000 0x00 0x00001000>; /* ECAM (4 KB) */
reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
interrupt-names = "link_state";
interrupts = <GIC_SPI 203 IRQ_TYPE_EDGE_RISING>;
device_type = "pci";
- ti,syscon-pcie-ctrl = <&main_conf 0x4070>;
+ ti,syscon-pcie-ctrl = <&pcie0_ctrl 0x0>;
max-link-speed = <2>;
num-lanes = <1>;
power-domains = <&k3_pds 114 TI_SCI_PD_EXCLUSIVE>;
@@ -1049,8 +1054,8 @@
vendor-id = <0x104c>;
device-id = <0xb010>;
msi-map = <0x0 &gic_its 0x0 0x10000>;
- ranges = <0x01000000 0x00 0x68001000 0x00 0x68001000 0x00 0x0010000>,
- <0x02000000 0x00 0x68011000 0x00 0x68011000 0x00 0x7fef000>;
+ ranges = <0x01000000 0x00 0x00001000 0x06 0x00001000 0x00 0x00100000>, /* IO (1 MB) */
+ <0x02000000 0x00 0x00101000 0x06 0x00101000 0x00 0xffeff000>; /* 32-bit Non-Prefetchable MEM (4 GB - 1 MB - 4 KB) */
dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x00000010 0x0>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm-pcie0-ep.dtso b/arch/arm64/boot/dts/ti/k3-am642-evm-pcie0-ep.dtso
index 6b029539e0db..432751774853 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-evm-pcie0-ep.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am642-evm-pcie0-ep.dtso
@@ -46,6 +46,6 @@
max-functions = /bits/ 8 <1>;
phys = <&serdes0_pcie_link>;
phy-names = "pcie-phy";
- ti,syscon-pcie-ctrl = <&main_conf 0x4070>;
+ ti,syscon-pcie-ctrl = <&pcie0_ctrl 0x0>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm.dts b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
index f8ec40523254..e01866372293 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
@@ -597,7 +597,6 @@
status = "okay";
non-removable;
ti,driver-strength-ohm = <50>;
- disable-wp;
bootph-all;
};
@@ -796,6 +795,26 @@
status = "okay";
};
+/* main_timer8 is used by r5f0-0 */
+&main_timer8 {
+ status = "reserved";
+};
+
+/* main_timer9 is used by r5f0-1 */
+&main_timer9 {
+ status = "reserved";
+};
+
+/* main_timer10 is used by r5f1-0 */
+&main_timer10 {
+ status = "reserved";
+};
+
+/* main_timer11 is used by r5f1-1 */
+&main_timer11 {
+ status = "reserved";
+};
+
&serdes_ln_ctrl {
idle-states = <AM64_SERDES0_LANE0_PCIE0>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am642-sk.dts b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
index 33e421ec18ab..1deaa0be0085 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
@@ -710,6 +710,26 @@
status = "okay";
};
+/* main_timer8 is used by r5f0-0 */
+&main_timer8 {
+ status = "reserved";
+};
+
+/* main_timer9 is used by r5f0-1 */
+&main_timer9 {
+ status = "reserved";
+};
+
+/* main_timer10 is used by r5f1-0 */
+&main_timer10 {
+ status = "reserved";
+};
+
+/* main_timer11 is used by r5f1-1 */
+&main_timer11 {
+ status = "reserved";
+};
+
&ecap0 {
status = "okay";
/* PWM is available on Pin 1 of header J3 */
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index 94a812a1355b..b085e7361116 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -449,6 +449,8 @@
ti,otap-del-sel-mmc-hs = <0x0>;
ti,otap-del-sel-ddr52 = <0x5>;
ti,otap-del-sel-hs200 = <0x5>;
+ ti,itap-del-sel-legacy = <0xa>;
+ ti,itap-del-sel-mmc-hs = <0x1>;
ti,itap-del-sel-ddr52 = <0x0>;
dma-coherent;
status = "disabled";
@@ -479,7 +481,7 @@
};
scm_conf: scm-conf@100000 {
- compatible = "syscon", "simple-mfd";
+ compatible = "ti,am654-system-controller", "syscon", "simple-mfd";
reg = <0 0x00100000 0 0x1c000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board-rocktech-rk101-panel.dtso b/arch/arm64/boot/dts/ti/k3-am654-base-board-rocktech-rk101-panel.dtso
index 364c57b3b3a0..7a3953d64fd8 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board-rocktech-rk101-panel.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board-rocktech-rk101-panel.dtso
@@ -15,8 +15,20 @@
#include <dt-bindings/interrupt-controller/irq.h>
&{/} {
+ vcc_5v0: lcd-regulator {
+ /* Output of LM5140 */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&evm_12v0>;
+ };
+
display0 {
compatible = "rocktech,rk101ii01d-ct";
+ power-supply = <&vcc_5v0>;
backlight = <&lcd_bl>;
enable-gpios = <&pca9555 8 GPIO_ACTIVE_HIGH>;
port {
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index aa7139cc8a92..c30425960398 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -456,7 +456,6 @@
bus-width = <8>;
non-removable;
ti,driver-strength-ohm = <50>;
- disable-wp;
};
/*
diff --git a/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-common.dtsi b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-common.dtsi
index ae842b85b70d..12af6cb7f65c 100644
--- a/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-common.dtsi
@@ -50,5 +50,4 @@
bus-width = <8>;
non-removable;
ti,driver-strength-ohm = <50>;
- disable-wp;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts b/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts
index 9be6bba28c26..bf9b23df1da2 100644
--- a/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts
+++ b/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts
@@ -309,6 +309,7 @@
};
&main_gpio1 {
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/ti/k3-am68-phyboard-izar.dts b/arch/arm64/boot/dts/ti/k3-am68-phyboard-izar.dts
new file mode 100644
index 000000000000..41c8f8526e15
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am68-phyboard-izar.dts
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright (C) 2025 PHYTEC Messtechnik GmbH
+ * Author: Dominik Haller <d.haller@phytec.de>
+ *
+ * https://www.phytec.eu/en/produkte/development-kits/phyboard-izar/
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/leds/leds-pca9532.h>
+#include <dt-bindings/net/ti-dp83867.h>
+#include <dt-bindings/phy/phy-cadence.h>
+#include <dt-bindings/phy/phy.h>
+#include "k3-am68-phycore-som.dtsi"
+
+#include "k3-serdes.h"
+
+/ {
+ compatible = "phytec,am68-phyboard-izar",
+ "phytec,am68-phycore-som", "ti,j721s2";
+ model = "PHYTEC phyBOARD-Izar-AM68x";
+
+ aliases {
+ serial0 = &mcu_uart0;
+ serial1 = &main_uart1;
+ serial2 = &main_uart8;
+ serial3 = &main_uart2;
+ mmc1 = &main_sdhci1;
+ ethernet0 = &cpsw_port1;
+ };
+
+ chosen {
+ stdout-path = &main_uart8;
+ };
+
+ transceiver1: can-phy1 {
+ compatible = "ti,tcan1043";
+ #phy-cells = <0>;
+ max-bitrate = <8000000>;
+ };
+
+ transceiver2: can-phy2 {
+ compatible = "ti,tcan1043";
+ #phy-cells = <0>;
+ max-bitrate = <8000000>;
+ };
+
+ transceiver3: can-phy3 {
+ compatible = "ti,tcan1043";
+ #phy-cells = <0>;
+ max-bitrate = <8000000>;
+ };
+
+ transceiver4: can-phy4 {
+ compatible = "ti,tcan1043";
+ #phy-cells = <0>;
+ max-bitrate = <8000000>;
+ };
+
+ vcc_12v0: regulator-12v0 {
+ /* main supply */
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_IN";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc_1v8: regulator-vcc-1v8 {
+ /* Output of TLV7158P */
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_3v3>;
+ };
+
+ vcc_3v3: regulator-vcc-3v3 {
+ /* Output of SiC431 */
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_5v0>;
+ };
+
+ vcc_5v0: regulator-vcc-5v0 {
+ /* Output of LM5116 */
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_5V0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_12v0>;
+ };
+};
+
+&main_pmx0 {
+ main_i2c2_pins_default: main-i2c2-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x04c, PIN_INPUT_PULLUP, 13) /* (V27) MCASP1_AXR1.I2C2_SCL */
+ J721S2_IOPAD(0x050, PIN_INPUT_PULLUP, 13) /* (W27) MCASP1_AXR2.I2C2_SDA */
+ >;
+ };
+
+ main_i2c4_pins_default: main-i2c4-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x014, PIN_INPUT_PULLUP, 8) /* (AD25) MCAN14_TX.I2C4_SCL */
+ J721S2_IOPAD(0x010, PIN_INPUT_PULLUP, 8) /* (AF28) MCAN13_RX.I2C4_SDA */
+ >;
+ };
+
+ main_i2c5_pins_default: main-i2c5-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x01c, PIN_INPUT_PULLUP, 8) /* (Y24) MCAN15_TX.I2C5_SCL */
+ J721S2_IOPAD(0x018, PIN_INPUT_PULLUP, 8) /* (W23) MCAN14_RX.I2C5_SDA */
+ >;
+ };
+
+ main_gpio0_ioexp_intr_pins_default: main-gpio0-ioexp-intr-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x020, PIN_INPUT, 7) /* (AA23) MCAN15_RX.GPIO0_8 */
+ >;
+ };
+
+ main_mcan1_pins_default: main-mcan1-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x0c8, PIN_INPUT, 4) /* (AD28) EXT_REFCLK1.MCAN1_RX */
+ J721S2_IOPAD(0x06c, PIN_OUTPUT, 0) /* (V26) MCAN1_TX */
+ >;
+ };
+
+ main_mcan13_pins_default: main-mcan13-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x0ec, PIN_INPUT, 9) /* (AG25) TIMER_IO1.MCAN13_RX */
+ J721S2_IOPAD(0x00c, PIN_OUTPUT, 0) /* (AE28) MCAN13_TX */
+ >;
+ };
+
+ main_mcan16_pins_default: main-mcan16-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x028, PIN_INPUT, 0) /* (AB24) MCAN16_RX */
+ J721S2_IOPAD(0x024, PIN_OUTPUT, 0) /* (Y28) MCAN16_TX */
+ >;
+ };
+
+ main_mmc1_pins_default: main-mmc1-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x104, PIN_INPUT, 0) /* (P23) MMC1_CLK */
+ J721S2_IOPAD(0x108, PIN_INPUT, 0) /* (N24) MMC1_CMD */
+ J721S2_IOPAD(0x100, PIN_INPUT, 0) /* (###) MMC1_CLKLB */
+ J721S2_IOPAD(0x0fc, PIN_INPUT, 0) /* (M23) MMC1_DAT0 */
+ J721S2_IOPAD(0x0f8, PIN_INPUT, 0) /* (P24) MMC1_DAT1 */
+ J721S2_IOPAD(0x0f4, PIN_INPUT, 0) /* (R24) MMC1_DAT2 */
+ J721S2_IOPAD(0x0f0, PIN_INPUT, 0) /* (R22) MMC1_DAT3 */
+ J721S2_IOPAD(0x0e8, PIN_INPUT, 8) /* (AE25) TIMER_IO0.MMC1_SDCD */
+ >;
+ bootph-all;
+ };
+
+ main_spi6_pins_default: main-spi6-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x030, PIN_INPUT, 8) /* (T26) GPIO0_12.SPI6_CLK */
+ J721S2_IOPAD(0x080, PIN_INPUT, 8) /* (U26) MCASP0_AXR4.SPI6_CS2 */
+ J721S2_IOPAD(0x0c4, PIN_OUTPUT, 8) /* (AB26) ECAP0_IN_APWM_OUT.SPI6_D0 */
+ J721S2_IOPAD(0x074, PIN_INPUT, 8) /* (R28) MCAN2_TX.SPI6_D1 */
+ J721S2_IOPAD(0x0dc, PIN_OUTPUT, 7) /* (AH26) SPI0_D1.GPIO0_55 */
+ >;
+ };
+
+ main_uart1_pins_default: main-uart1-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x05c, PIN_INPUT, 11) /* (AA26) MCASP2_AXR0.UART1_CTSn */
+ J721S2_IOPAD(0x060, PIN_OUTPUT, 11) /* (AC27) MCASP2_AXR1.UART1_RTSn */
+ J721S2_IOPAD(0x054, PIN_INPUT, 11) /* (Y27) MCASP2_ACLKX.UART1_RXD */
+ J721S2_IOPAD(0x058, PIN_OUTPUT, 11) /* (AA27) MCASP2_AFSX.UART1_TXD */
+ >;
+ };
+
+ main_uart2_pins_default: main-uart2-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x0d8, PIN_INPUT, 11) /* (AG26) SPI0_D0.UART2_RXD */
+ J721S2_IOPAD(0x068, PIN_OUTPUT, 11) /* (U28) MCAN0_RX.UART2_TXD */
+ >;
+ };
+
+ main_uart8_pins_default: main-uart8-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x0d0, PIN_INPUT, 11) /* (AF26) SPI0_CS1.UART8_RXD */
+ J721S2_IOPAD(0x0d4, PIN_OUTPUT, 11) /* (AH27) SPI0_CLK.UART8_TXD */
+ >;
+ bootph-all;
+ };
+};
+
+&wkup_pmx1 {
+ mcu_fss0_ospi1_pins_default: mcu-fss0-ospi1-default-pins {
+ pinctrl-single,pins = <
+ J721S2_WKUP_IOPAD(0x008, PIN_OUTPUT, 0) /* (A19) MCU_OSPI1_CLK */
+ J721S2_WKUP_IOPAD(0x024, PIN_OUTPUT, 0) /* (D20) MCU_OSPI1_CSn0 */
+ J721S2_WKUP_IOPAD(0x014, PIN_INPUT, 0) /* (D21) MCU_OSPI1_D0 */
+ J721S2_WKUP_IOPAD(0x018, PIN_INPUT, 0) /* (G20) MCU_OSPI1_D1 */
+ J721S2_WKUP_IOPAD(0x01c, PIN_INPUT, 0) /* (C20) MCU_OSPI1_D2 */
+ J721S2_WKUP_IOPAD(0x020, PIN_INPUT, 0) /* (A20) MCU_OSPI1_D3 */
+ >;
+ };
+};
+
+&wkup_pmx2 {
+ mcu_cpsw_pins_default: mcu-cpsw-default-pins {
+ pinctrl-single,pins = <
+ J721S2_WKUP_IOPAD(0x02c, PIN_INPUT, 0) /* (B22) MCU_RGMII1_RD0 */
+ J721S2_WKUP_IOPAD(0x028, PIN_INPUT, 0) /* (B21) MCU_RGMII1_RD1 */
+ J721S2_WKUP_IOPAD(0x024, PIN_INPUT, 0) /* (C22) MCU_RGMII1_RD2 */
+ J721S2_WKUP_IOPAD(0x020, PIN_INPUT, 0) /* (D23) MCU_RGMII1_RD3 */
+ J721S2_WKUP_IOPAD(0x01c, PIN_INPUT, 0) /* (D22) MCU_RGMII1_RXC */
+ J721S2_WKUP_IOPAD(0x004, PIN_INPUT, 0) /* (E23) MCU_RGMII1_RX_CTL */
+ J721S2_WKUP_IOPAD(0x014, PIN_OUTPUT, 0) /* (F23) MCU_RGMII1_TD0 */
+ J721S2_WKUP_IOPAD(0x010, PIN_OUTPUT, 0) /* (G22) MCU_RGMII1_TD1 */
+ J721S2_WKUP_IOPAD(0x00c, PIN_OUTPUT, 0) /* (E21) MCU_RGMII1_TD2 */
+ J721S2_WKUP_IOPAD(0x008, PIN_OUTPUT, 0) /* (E22) MCU_RGMII1_TD3 */
+ J721S2_WKUP_IOPAD(0x018, PIN_OUTPUT, 0) /* (F21) MCU_RGMII1_TXC */
+ J721S2_WKUP_IOPAD(0x000, PIN_OUTPUT, 0) /* (F22) MCU_RGMII1_TX_CTL */
+ >;
+ };
+
+ mcu_i2c1_pins_default: mcu-i2c1-default-pins {
+ pinctrl-single,pins = <
+ J721S2_WKUP_IOPAD(0x078, PIN_INPUT_PULLUP, 0) /* (F24) WKUP_GPIO0_8.MCU_I2C1_SCL */
+ J721S2_WKUP_IOPAD(0x07c, PIN_INPUT_PULLUP, 0) /* (H26) WKUP_GPIO0_9.MCU_I2C1_SDA */
+ >;
+ };
+
+ mcu_mcan0_pins_default: mcu-mcan0-default-pins {
+ pinctrl-single,pins = <
+ J721S2_WKUP_IOPAD(0x054, PIN_INPUT, 0) /* (E28) MCU_MCAN0_RX */
+ J721S2_WKUP_IOPAD(0x050, PIN_OUTPUT, 0) /* (E27) MCU_MCAN0_TX */
+ >;
+ };
+
+ mcu_mdio_pins_default: mcu-mdio-default-pins {
+ pinctrl-single,pins = <
+ J721S2_WKUP_IOPAD(0x034, PIN_OUTPUT, 0) /* (A21) MCU_MDIO0_MDC */
+ J721S2_WKUP_IOPAD(0x030, PIN_INPUT, 0) /* (A22) MCU_MDIO0_MDIO */
+ >;
+ };
+
+ mcu_spi0_pins_default: mcu-spi0-default-pins {
+ pinctrl-single,pins = <
+ J721S2_WKUP_IOPAD(0x038, PIN_INPUT, 0) /* (B27) MCU_SPI0_CLK */
+ J721S2_WKUP_IOPAD(0x044, PIN_INPUT, 0) /* (B26) MCU_SPI0_CS0 */
+ J721S2_WKUP_IOPAD(0x068, PIN_INPUT, 2) /* (C23) WKUP_GPIO0_4.MCU_SPI0_CS3 */
+ J721S2_WKUP_IOPAD(0x03c, PIN_INPUT, 0) /* (D24) MCU_SPI0_D0 */
+ J721S2_WKUP_IOPAD(0x040, PIN_INPUT, 0) /* (B25) MCU_SPI0_D1 */
+ >;
+ };
+
+ mcu_uart0_pins_default: mcu-uart0-default-pins {
+ pinctrl-single,pins = <
+ J721S2_WKUP_IOPAD(0x090, PIN_INPUT, 0) /* (B24) WKUP_GPIO0_14.MCU_UART0_CTSn */
+ J721S2_WKUP_IOPAD(0x094, PIN_OUTPUT, 0) /* (D25) WKUP_GPIO0_15.MCU_UART0_RTSn */
+ J721S2_WKUP_IOPAD(0x08c, PIN_INPUT, 0) /* (C24) WKUP_GPIO0_13.MCU_UART0_RXD */
+ J721S2_WKUP_IOPAD(0x088, PIN_OUTPUT, 0) /* (C25) WKUP_GPIO0_12.MCU_UART0_TXD */
+ >;
+ };
+
+ wkup_uart0_pins_default: wkup-uart0-default-pins {
+ pinctrl-single,pins = <
+ J721S2_WKUP_IOPAD(0x048, PIN_INPUT, 0) /* (D28) WKUP_UART0_RXD */
+ J721S2_WKUP_IOPAD(0x04c, PIN_OUTPUT, 0) /* (D27) WKUP_UART0_TXD */
+ >;
+ bootph-all;
+ };
+};
+
+&cpsw_port1 {
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&phy0>;
+};
+
+&davinci_mdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_mdio_pins_default>;
+
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,min-output-impedance;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ };
+};
+
+&i2c_som_rtc {
+ trickle-resistor-ohms = <3000>;
+};
+
+&main_i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c2_pins_default>;
+ status = "okay";
+
+ exp1: gpio@20 {
+ compatible = "nxp,pca9672";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "HALF/nFULL_EN", "RS485/nRS232_EN", "MCU_ETH_nRESET", "",
+ "PCIe_nRESET", "USB2.0-Hub_nRESET", "USB3.0-Hub_nRESET", "PEB_AV_BL_EN";
+ interrupt-parent = <&main_gpio0>;
+ interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ exp2: gpio@22 {
+ compatible = "ti,tca6424";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "RPI_GPIO4", "RPI_GPIO5", "RPI_GPIO6", "RPI_GPIO19",
+ "RPI_GPIO20", "RPI_GPIO21", "RPI_GPIO22", "RPI_GPIO23",
+ "RPI_GPIO24", "RPI_GPIO25", "RPI_GPIO26", "RPI_GPIO20",
+ "LVDS_BL_nEN", "LVDS_REG_nEN", "CSI_CAM0_nRESET", "CSI_CAM1_nRESET",
+ "CSI0_CTRL1", "CSI0_CTRL2", "CSI0_CTRL3", "CSI0_CTRL4",
+ "CSI1_CTRL1", "CSI1_CTRL2", "CSI1_CTRL3", "CSI1_CTRL4";
+ interrupt-parent = <&main_gpio0>;
+ interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_gpio0_ioexp_intr_pins_default>;
+ };
+};
+
+/* CSI0 + RPI */
+&main_i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c4_pins_default>;
+};
+
+/* CSI1 + PCIe */
+&main_i2c5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c5_pins_default>;
+};
+
+&main_mcan1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcan1_pins_default>;
+ phys = <&transceiver1>;
+ status = "okay";
+};
+
+&main_mcan13 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcan13_pins_default>;
+ phys = <&transceiver2>;
+ status = "okay";
+};
+
+&main_mcan16 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcan16_pins_default>;
+ phys = <&transceiver3>;
+ status = "okay";
+};
+
+/* SD-Card */
+&main_sdhci1 {
+ pinctrl-0 = <&main_mmc1_pins_default>;
+ pinctrl-names = "default";
+ disable-wp;
+ vmmc-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&main_spi6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_spi6_pins_default>;
+ cs-gpios = <&main_gpio0 55 GPIO_ACTIVE_LOW>;
+ ti,spi-num-cs = <1>;
+ ti,pindir-d0-out-d1-in;
+ status = "okay";
+
+ tpm@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ };
+};
+
+&main_uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart1_pins_default>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&main_uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart2_pins_default>;
+ status = "okay";
+};
+
+&main_uart8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart8_pins_default>;
+ /* Shared with TFA on this platform */
+ power-domains = <&k3_pds 357 TI_SCI_PD_SHARED>;
+ bootph-all;
+ status = "okay";
+};
+
+&mcu_cpsw {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_cpsw_pins_default>;
+};
+
+&mcu_i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_i2c1_pins_default>;
+ status = "okay";
+};
+
+&mcu_mcan0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_mcan0_pins_default>;
+ phys = <&transceiver4>;
+ status = "okay";
+};
+
+/* RPI-Header */
+&mcu_spi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_spi0_pins_default>;
+};
+
+/* RPI-Header */
+&mcu_uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_uart0_pins_default>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&ospi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_fss0_ospi1_pins_default>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <40000000>;
+ cdns,tshsl-ns = <60>;
+ cdns,tsd2d-ns = <60>;
+ cdns,tchsh-ns = <60>;
+ cdns,tslch-ns = <60>;
+ cdns,read-delay = <2>;
+ };
+};
+
+&pcie1_rc {
+ num-lanes = <1>;
+ phys = <&serdes0_pcie_link>;
+ phy-names = "pcie-phy";
+ reset-gpios = <&exp1 4 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&serdes_ln_ctrl {
+ idle-states = <J721S2_SERDES0_LANE0_PCIE1_LANE0>, <J721S2_SERDES0_LANE1_USB>,
+ <J721S2_SERDES0_LANE2_EDP_LANE2>, <J721S2_SERDES0_LANE3_EDP_LANE3>;
+};
+
+&serdes_refclk {
+ clock-frequency = <100000000>;
+};
+
+&serdes0 {
+ status = "okay";
+
+ serdes0_pcie_link: phy@0 {
+ reg = <0>;
+ cdns,num-lanes = <1>;
+ #phy-cells = <0>;
+ resets = <&serdes_wiz0 1>;
+ cdns,phy-type = <PHY_TYPE_PCIE>;
+ };
+
+ serdes0_usb_link: phy@1 {
+ reg = <1>;
+ cdns,num-lanes = <1>;
+ #phy-cells = <0>;
+ resets = <&serdes_wiz0 2>;
+ cdns,phy-type = <PHY_TYPE_USB3>;
+ };
+};
+
+&tscadc0 {
+ status = "okay";
+
+ adc {
+ ti,adc-channels = <0 1 2 3 4 5 6 7>;
+ };
+};
+
+&tscadc1 {
+ status = "okay";
+
+ adc {
+ ti,adc-channels = <3 4 5 6 7>;
+ };
+};
+
+&usbss0 {
+ ti,vbus-divider;
+ status = "okay";
+};
+
+&usb0 {
+ dr_mode = "host";
+ phys = <&serdes0_usb_link>;
+ phy-names = "cdns3,usb3-phy";
+};
+
+&usb_serdes_mux {
+ idle-states = <1>; /* USB0 to SERDES lane 1 */
+};
+
+&wkup_i2c0 {
+ eeprom@57 {
+ compatible = "atmel,24c32";
+ reg = <0x57>;
+ pagesize = <32>;
+ };
+
+ led-controller@62 {
+ compatible = "nxp,pca9533";
+ reg = <0x62>;
+
+ led-1 {
+ label = "user-led1";
+ type = <PCA9532_TYPE_LED>;
+ };
+
+ led-2 {
+ label = "user-led2";
+ type = <PCA9532_TYPE_LED>;
+ };
+
+ led-3 {
+ label = "user-led3";
+ type = <PCA9532_TYPE_LED>;
+ };
+ };
+};
+
+/* Shared with TIFS */
+&wkup_uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wkup_uart0_pins_default>;
+ bootph-all;
+ status = "reserved";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am68-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am68-phycore-som.dtsi
new file mode 100644
index 000000000000..fd715fee8170
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am68-phycore-som.dtsi
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright (C) 2025 PHYTEC Messtechnik GmbH
+ * Author: Dominik Haller <d.haller@phytec.de>
+ *
+ * https://www.phytec.eu/en/produkte/system-on-modules/phycore-am68x-tda4x/
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/net/ti-dp83867.h>
+#include "k3-j721s2.dtsi"
+
+/ {
+ compatible = "phytec,am68-phycore-som", "ti,j721s2";
+ model = "PHYTEC phyCORE-AM68x";
+
+ aliases {
+ ethernet1 = &main_cpsw_port1;
+ mmc0 = &main_sdhci0;
+ rtc0 = &i2c_som_rtc;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* 4GB RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>,
+ <0x00000008 0x80000000 0x00000000 0x80000000>;
+ bootph-all;
+ };
+
+ reserved_memory: reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* global cma region */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x00 0x20000000>;
+ linux,cma-default;
+ };
+
+ secure_ddr: optee@9e800000 {
+ reg = <0x00 0x9e800000 0x00 0x01800000>;
+ alignment = <0x1000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@a0000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa0000000 0x00 0x100000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core0_memory_region: r5f-memory@a0100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa0100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@a1000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa1000000 0x00 0x100000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core1_memory_region: r5f-memory@a1100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa1100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss0_core0_dma_memory_region: r5f-dma-memory@a2000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa2000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss0_core0_memory_region: r5f-memory@a2100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa2100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss0_core1_dma_memory_region: r5f-dma-memory@a3000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa3000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss0_core1_memory_region: r5f-memory@a3100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa3100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss1_core0_dma_memory_region: r5f-dma-memory@a4000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa4000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss1_core0_memory_region: r5f-memory@a4100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa4100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss1_core1_dma_memory_region: r5f-dma-memory@a5000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa5000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss1_core1_memory_region: r5f-memory@a5100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa5100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ c71_0_dma_memory_region: c71-dma-memory@a6000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa6000000 0x00 0x100000>;
+ no-map;
+ };
+
+ c71_0_memory_region: c71-memory@a6100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa6100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ c71_1_dma_memory_region: c71-dma-memory@a7000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa7000000 0x00 0x100000>;
+ no-map;
+ };
+
+ c71_1_memory_region: c71-memory@a7100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa7100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ rtos_ipc_memory_region: ipc-memories@a8000000 {
+ reg = <0x00 0xa8000000 0x00 0x01c00000>;
+ alignment = <0x1000>;
+ no-map;
+ };
+ };
+
+ vdd_sd_dv: regulator-sd {
+ /* Output of TLV71033 */
+ compatible = "regulator-gpio";
+ regulator-name = "VDD_SD_DV";
+ pinctrl-names = "default";
+ pinctrl-0 = <&vdd_sd_dv_pins_default>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ gpios = <&main_gpio0 1 GPIO_ACTIVE_HIGH>;
+ states = <3300000 0x0>,
+ <1800000 0x1>;
+ };
+};
+
+&main_pmx0 {
+ main_cpsw_mdio_pins_default: main-cpsw-mdio-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x0c0, PIN_OUTPUT, 6) /* (T28) MCASP1_AXR0.MDIO0_MDC */
+ J721S2_IOPAD(0x0bc, PIN_INPUT, 6) /* (V28) MCASP1_AFSX.MDIO0_MDIO */
+ >;
+ };
+
+ main_i2c0_pins_default: main-i2c0-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x0e0, PIN_INPUT_PULLUP, 0) /* (AH25) I2C0_SCL */
+ J721S2_IOPAD(0x0e4, PIN_INPUT_PULLUP, 0) /* (AE24) I2C0_SDA */
+ >;
+ };
+
+ rgmii1_pins_default: rgmii1-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x0b8, PIN_INPUT, 6) /* (AA24) MCASP1_ACLKX.RGMII1_RD0 */
+ J721S2_IOPAD(0x0a0, PIN_INPUT, 6) /* (AB25) MCASP0_AXR12.RGMII1_RD1 */
+ J721S2_IOPAD(0x0a4, PIN_INPUT, 6) /* (T23) MCASP0_AXR13.RGMII1_RD2 */
+ J721S2_IOPAD(0x0a8, PIN_INPUT, 6) /* (U24) MCASP0_AXR14.RGMII1_RD3 */
+ J721S2_IOPAD(0x0b0, PIN_INPUT, 6) /* (AD26) MCASP1_AXR3.RGMII1_RXC */
+ J721S2_IOPAD(0x0ac, PIN_INPUT, 6) /* (AC25) MCASP0_AXR15.RGMII1_RX_CTL */
+ J721S2_IOPAD(0x08c, PIN_OUTPUT, 6) /* (T25) MCASP0_AXR7.RGMII1_TD0 */
+ J721S2_IOPAD(0x090, PIN_OUTPUT, 6) /* (W24) MCASP0_AXR8.RGMII1_TD1 */
+ J721S2_IOPAD(0x094, PIN_OUTPUT, 6) /* (AA25) MCASP0_AXR9.RGMII1_TD2 */
+ J721S2_IOPAD(0x098, PIN_OUTPUT, 6) /* (V25) MCASP0_AXR10.RGMII1_TD3 */
+ J721S2_IOPAD(0x0b4, PIN_OUTPUT, 6) /* (U25) MCASP1_AXR4.RGMII1_TXC */
+ J721S2_IOPAD(0x09c, PIN_OUTPUT, 6) /* (T24) MCASP0_AXR11.RGMII1_TX_CTL */
+ >;
+ };
+
+ vdd_sd_dv_pins_default: vdd-sd-dv-default-pins {
+ pinctrl-single,pins = <
+ J721S2_IOPAD(0x004, PIN_OUTPUT, 7) /* (W25) MCAN12_TX.GPIO0_1 */
+ >;
+ };
+};
+
+&wkup_pmx0 {
+ mcu_fss0_ospi0_pins_default: mcu-fss0-ospi0-default-pins {
+ pinctrl-single,pins = <
+ J721S2_WKUP_IOPAD(0x000, PIN_OUTPUT, 0) /* (D19) MCU_OSPI0_CLK */
+ J721S2_WKUP_IOPAD(0x02c, PIN_OUTPUT, 0) /* (F15) MCU_OSPI0_CSn0 */
+ J721S2_WKUP_IOPAD(0x00c, PIN_INPUT, 0) /* (C19) MCU_OSPI0_D0 */
+ J721S2_WKUP_IOPAD(0x010, PIN_INPUT, 0) /* (F16) MCU_OSPI0_D1 */
+ J721S2_WKUP_IOPAD(0x014, PIN_INPUT, 0) /* (G15) MCU_OSPI0_D2 */
+ J721S2_WKUP_IOPAD(0x018, PIN_INPUT, 0) /* (F18) MCU_OSPI0_D3 */
+ J721S2_WKUP_IOPAD(0x01c, PIN_INPUT, 0) /* (E19) MCU_OSPI0_D4 */
+ J721S2_WKUP_IOPAD(0x020, PIN_INPUT, 0) /* (G19) MCU_OSPI0_D5 */
+ J721S2_WKUP_IOPAD(0x024, PIN_INPUT, 0) /* (F19) MCU_OSPI0_D6 */
+ J721S2_WKUP_IOPAD(0x028, PIN_INPUT, 0) /* (F20) MCU_OSPI0_D7 */
+ J721S2_WKUP_IOPAD(0x008, PIN_INPUT, 0) /* (E18) MCU_OSPI0_DQS */
+ >;
+ bootph-all;
+ };
+};
+
+&wkup_pmx1 {
+ pmic_irq_pins_default: pmic-irq-default-pins {
+ pinctrl-single,pins = <
+ J721S2_WKUP_IOPAD(0x028, PIN_INPUT, 7) /* (C21) MCU_OSPI1_CSn1.WKUP_GPIO0_39 */
+ >;
+ };
+};
+
+&wkup_pmx2 {
+ wkup_i2c0_pins_default: wkup-i2c0-default-pins {
+ pinctrl-single,pins = <
+ J721S2_WKUP_IOPAD(0x098, PIN_INPUT_PULLUP, 0) /* (H24) WKUP_I2C0_SCL */
+ J721S2_WKUP_IOPAD(0x09c, PIN_INPUT_PULLUP, 0) /* (H27) WKUP_I2C0_SDA */
+ >;
+ bootph-all;
+ };
+};
+
+&c71_0 {
+ mboxes = <&mailbox0_cluster4 &mbox_c71_0>;
+ memory-region = <&c71_0_dma_memory_region>,
+ <&c71_0_memory_region>;
+ status = "okay";
+};
+
+&c71_1 {
+ mboxes = <&mailbox0_cluster4 &mbox_c71_1>;
+ memory-region = <&c71_1_dma_memory_region>,
+ <&c71_1_memory_region>;
+ status = "okay";
+};
+
+&mailbox0_cluster0 {
+ interrupts = <436>;
+ status = "okay";
+
+ mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+
+ mbox_mcu_r5fss0_core1: mbox-mcu-r5fss0-core1 {
+ ti,mbox-rx = <2 0 0>;
+ ti,mbox-tx = <3 0 0>;
+ };
+};
+
+&mailbox0_cluster1 {
+ interrupts = <432>;
+ status = "okay";
+
+ mbox_main_r5fss0_core0: mbox-main-r5fss0-core0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+
+ mbox_main_r5fss0_core1: mbox-main-r5fss0-core1 {
+ ti,mbox-rx = <2 0 0>;
+ ti,mbox-tx = <3 0 0>;
+ };
+};
+
+&mailbox0_cluster2 {
+ interrupts = <428>;
+ status = "okay";
+
+ mbox_main_r5fss1_core0: mbox-main-r5fss1-core0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+
+ mbox_main_r5fss1_core1: mbox-main-r5fss1-core1 {
+ ti,mbox-rx = <2 0 0>;
+ ti,mbox-tx = <3 0 0>;
+ };
+};
+
+&mailbox0_cluster4 {
+ interrupts = <420>;
+ status = "okay";
+
+ mbox_c71_0: mbox-c71-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+
+ mbox_c71_1: mbox-c71-1 {
+ ti,mbox-rx = <2 0 0>;
+ ti,mbox-tx = <3 0 0>;
+ };
+};
+
+&main_cpsw {
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii1_pins_default>;
+ status = "okay";
+};
+
+&main_cpsw_mdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_cpsw_mdio_pins_default>;
+ status = "okay";
+
+ phy1: ethernet-phy@0 {
+ reg = <0>;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,min-output-impedance;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ };
+};
+
+&main_cpsw_port1 {
+ phy-handle = <&phy1>;
+ phy-mode = "rgmii-rxid";
+ status = "okay";
+};
+
+&main_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c0_pins_default>;
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp102";
+ reg = <0x48>;
+ };
+
+ temperature-sensor@49 {
+ compatible = "ti,tmp102";
+ reg = <0x49>;
+ };
+
+ i2c_som_rtc: rtc@52 {
+ compatible = "microcrystal,rv3028";
+ reg = <0x52>;
+ };
+};
+
+&main_gpio0 {
+ status = "okay";
+};
+
+&main_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>;
+ memory-region = <&main_r5fss0_core0_dma_memory_region>,
+ <&main_r5fss0_core0_memory_region>;
+};
+
+&main_r5fss0_core1 {
+ mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core1>;
+ memory-region = <&main_r5fss0_core1_dma_memory_region>,
+ <&main_r5fss0_core1_memory_region>;
+};
+
+&main_r5fss1_core0 {
+ mboxes = <&mailbox0_cluster2 &mbox_main_r5fss1_core0>;
+ memory-region = <&main_r5fss1_core0_dma_memory_region>,
+ <&main_r5fss1_core0_memory_region>;
+};
+
+&main_r5fss1_core1 {
+ mboxes = <&mailbox0_cluster2 &mbox_main_r5fss1_core1>;
+ memory-region = <&main_r5fss1_core1_dma_memory_region>,
+ <&main_r5fss1_core1_memory_region>;
+};
+
+/* eMMC */
+&main_sdhci0 {
+ non-removable;
+ ti,driver-strength-ohm = <50>;
+ bootph-all;
+ status = "okay";
+};
+
+/* SD card */
+&main_sdhci1 {
+ vqmmc-supply = <&vdd_sd_dv>;
+ bootph-all;
+};
+
+&main_r5fss0 {
+ ti,cluster-mode = <0>;
+};
+
+&main_r5fss1 {
+ ti,cluster-mode = <0>;
+};
+
+/* Timers are used by Remoteproc firmware */
+&main_timer0 {
+ status = "reserved";
+};
+
+&main_timer1 {
+ status = "reserved";
+};
+
+&main_timer2 {
+ status = "reserved";
+};
+
+&main_timer3 {
+ status = "reserved";
+};
+
+&main_timer4 {
+ status = "reserved";
+};
+
+&main_timer5 {
+ status = "reserved";
+};
+
+&mcu_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster0 &mbox_mcu_r5fss0_core0>;
+ memory-region = <&mcu_r5fss0_core0_dma_memory_region>,
+ <&mcu_r5fss0_core0_memory_region>;
+};
+
+&mcu_r5fss0_core1 {
+ mboxes = <&mailbox0_cluster0 &mbox_mcu_r5fss0_core1>;
+ memory-region = <&mcu_r5fss0_core1_dma_memory_region>,
+ <&mcu_r5fss0_core1_memory_region>;
+};
+
+&ospi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_fss0_ospi0_pins_default>;
+ status = "okay";
+
+ serial_flash: flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-tx-bus-width = <8>;
+ spi-rx-bus-width = <8>;
+ spi-max-frequency = <25000000>;
+ cdns,tshsl-ns = <60>;
+ cdns,tsd2d-ns = <60>;
+ cdns,tchsh-ns = <60>;
+ cdns,tslch-ns = <60>;
+ cdns,read-delay = <2>;
+ bootph-all;
+ };
+};
+
+&wkup_gpio0 {
+ status = "okay";
+};
+
+&wkup_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wkup_i2c0_pins_default>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ vdd_cpu_avs: regulator@40 {
+ compatible = "ti,tps62873";
+ reg = <0x40>;
+ regulator-name = "VDD_CPU_AVS";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ bootph-pre-ram;
+ };
+
+ pmic@48 {
+ compatible = "ti,tps6594-q1";
+ reg = <0x48>;
+ system-power-controller;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_irq_pins_default>;
+ interrupt-parent = <&wkup_gpio0>;
+ interrupts = <39 IRQ_TYPE_EDGE_FALLING>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ buck12-supply = <&vcc_3v3>;
+ buck3-supply = <&vcc_3v3>;
+ buck4-supply = <&vcc_3v3>;
+ buck5-supply = <&vcc_3v3>;
+ ldo1-supply = <&vcc_3v3>;
+ ldo2-supply = <&vcc_3v3>;
+ ldo3-supply = <&vcc_3v3>;
+ ldo4-supply = <&vcc_3v3>;
+ ti,primary-pmic;
+
+ regulators {
+ bucka12: buck12 {
+ regulator-name = "VDD_DDR_1V1";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-boot-on;
+ regulator-always-on;
+ bootph-all;
+ };
+
+ bucka3: buck3 {
+ regulator-name = "VDD_RAM_0V85";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ bootph-all;
+ };
+
+ bucka4: buck4 {
+ regulator-name = "VDD_IO_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ bootph-all;
+ };
+
+ bucka5: buck5 {
+ regulator-name = "VDD_MCU_0V85";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ bootph-all;
+ };
+
+ ldoa1: ldo1 {
+ regulator-name = "VDD_MCUIO_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ bootph-all;
+ };
+
+ ldoa2: ldo2 {
+ regulator-name = "VDD_MCUIO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ bootph-all;
+ };
+
+ ldoa3: ldo3 {
+ regulator-name = "VDDA_DLL_0V8";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ bootph-all;
+ };
+
+ ldoa4: ldo4 {
+ regulator-name = "VDDA_MCU_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ bootph-all;
+ };
+ };
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ pagesize = <32>;
+ bootph-all;
+ };
+
+ som_eeprom_opt: eeprom@51 {
+ compatible = "atmel,24c32";
+ reg = <0x51>;
+ pagesize = <32>;
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board-pcie1-ep.dtso b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board-pcie1-ep.dtso
index 455736e378cc..ba521d661144 100644
--- a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board-pcie1-ep.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board-pcie1-ep.dtso
@@ -48,6 +48,6 @@
dma-coherent;
phys = <&serdes0_pcie_link>;
phy-names = "pcie-phy";
- ti,syscon-pcie-ctrl = <&scm_conf 0x074>;
+ ti,syscon-pcie-ctrl = <&pcie1_ctrl 0x0>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts
index 11522b36e0ce..5fa70a874d7b 100644
--- a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts
@@ -44,6 +44,17 @@
regulator-boot-on;
};
+ vsys_5v0: regulator-vsys5v0 {
+ /* Output of LM61460 */
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vusb_main>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
vsys_3v3: regulator-vsys3v3 {
/* Output of LM5141 */
compatible = "regulator-fixed";
@@ -76,7 +87,7 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
- vin-supply = <&vsys_3v3>;
+ vin-supply = <&vsys_5v0>;
gpios = <&main_gpio0 49 GPIO_ACTIVE_HIGH>;
states = <1800000 0x0>,
<3300000 0x1>;
diff --git a/arch/arm64/boot/dts/ti/k3-am69-sk.dts b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
index b85227052f97..f28375629739 100644
--- a/arch/arm64/boot/dts/ti/k3-am69-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
@@ -940,7 +940,6 @@
status = "okay";
non-removable;
ti,driver-strength-ohm = <50>;
- disable-wp;
};
&main_sdhci1 {
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-evm-pcie1-ep.dtso b/arch/arm64/boot/dts/ti/k3-j7200-evm-pcie1-ep.dtso
index 3cc315a0e084..281076d905f3 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-evm-pcie1-ep.dtso
+++ b/arch/arm64/boot/dts/ti/k3-j7200-evm-pcie1-ep.dtso
@@ -48,6 +48,6 @@
dma-coherent;
phys = <&serdes0_pcie_link>;
phy-names = "pcie-phy";
- ti,syscon-pcie-ctrl = <&scm_conf 0x4074>;
+ ti,syscon-pcie-ctrl = <&pcie1_ctrl 0x0>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
index 5ab510a0605f..5ce5f0a3d6f5 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
@@ -32,6 +32,11 @@
#size-cells = <1>;
ranges = <0x00 0x00 0x00100000 0x1c000>;
+ pcie1_ctrl: pcie-ctrl@4074 {
+ compatible = "ti,j784s4-pcie-ctrl", "syscon";
+ reg = <0x4074 0x4>;
+ };
+
serdes_ln_ctrl: mux-controller@4080 {
compatible = "reg-mux";
reg = <0x4080 0x20>;
@@ -759,12 +764,12 @@
reg = <0x00 0x02910000 0x00 0x1000>,
<0x00 0x02917000 0x00 0x400>,
<0x00 0x0d800000 0x00 0x00800000>,
- <0x00 0x18000000 0x00 0x00001000>;
+ <0x41 0x00000000 0x00 0x00001000>; /* ECAM (4 KB) */
reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
interrupt-names = "link_state";
interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
device_type = "pci";
- ti,syscon-pcie-ctrl = <&scm_conf 0x4074>;
+ ti,syscon-pcie-ctrl = <&pcie1_ctrl 0x0>;
max-link-speed = <3>;
num-lanes = <4>;
power-domains = <&k3_pds 240 TI_SCI_PD_EXCLUSIVE>;
@@ -778,8 +783,8 @@
device-id = <0xb00f>;
msi-map = <0x0 &gic_its 0x0 0x10000>;
dma-coherent;
- ranges = <0x01000000 0x0 0x18001000 0x00 0x18001000 0x0 0x0010000>,
- <0x02000000 0x0 0x18011000 0x00 0x18011000 0x0 0x7fef000>;
+ ranges = <0x01000000 0x00 0x00001000 0x41 0x00001000 0x00 0x00100000>, /* IO (1 MB) */
+ <0x02000000 0x00 0x00101000 0x41 0x00101000 0x00 0xffeff000>; /* 32-bit Non-Prefetchable MEM (4 GB - 1 MB - 4 KB) */
dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board-infotainment.dtso b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board-infotainment.dtso
index 65a7e54f0884..e4e5f941f20b 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board-infotainment.dtso
+++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board-infotainment.dtso
@@ -15,12 +15,11 @@
#include "k3-pinctrl.h"
&{/} {
- hdmi-connector {
+ connector-hdmi {
compatible = "hdmi-connector";
label = "hdmi";
type = "a";
ddc-i2c-bus = <&main_i2c1>;
- digital;
/* P12 - HDMI_HPD */
hpd-gpios = <&exp6 10 GPIO_ACTIVE_HIGH>;
@@ -31,28 +30,32 @@
};
};
- dvi-bridge {
- #address-cells = <1>;
- #size-cells = <0>;
+ bridge-dvi {
compatible = "ti,tfp410";
/* P10 - HDMI_PDn */
powerdown-gpios = <&exp6 8 GPIO_ACTIVE_LOW>;
+ ti,deskew = <0>;
- port@0 {
- reg = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
- tfp410_in: endpoint {
- remote-endpoint = <&dpi_out0>;
- pclk-sample = <1>;
+ tfp410_in: endpoint {
+ remote-endpoint = <&dpi_out0>;
+ pclk-sample = <1>;
+ };
};
- };
- port@1 {
- reg = <1>;
+ port@1 {
+ reg = <1>;
- tfp410_out: endpoint {
- remote-endpoint =
- <&hdmi_connector_in>;
+ tfp410_out: endpoint {
+ remote-endpoint =
+ <&hdmi_connector_in>;
+ };
};
};
};
@@ -148,17 +151,23 @@
&dss {
pinctrl-names = "default";
pinctrl-0 = <&dss_vout0_pins_default>;
-};
-&dss_ports {
- #address-cells = <1>;
- #size-cells = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
- port@1 {
- reg = <1>;
+ port@0 {
+ reg = <0>;
+ dpi0_out: endpoint {
+ remote-endpoint = <&dp0_in>;
+ };
+ };
- dpi_out0: endpoint {
- remote-endpoint = <&tfp410_in>;
+ port@1 {
+ reg = <1>;
+ dpi_out0: endpoint {
+ remote-endpoint = <&tfp410_in>;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
index 4421852161dd..45311438315f 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
@@ -573,6 +573,7 @@
&ospi1 {
pinctrl-names = "default";
pinctrl-0 = <&mcu_fss0_ospi1_pins_default>;
+ status = "okay";
flash@0 {
compatible = "jedec,spi-nor";
@@ -804,7 +805,11 @@
};
&dss_ports {
- port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
dpi0_out: endpoint {
remote-endpoint = <&dp0_in>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-evm-pcie0-ep.dtso b/arch/arm64/boot/dts/ti/k3-j721e-evm-pcie0-ep.dtso
index 4062709d6579..a8a502a6207f 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-evm-pcie0-ep.dtso
+++ b/arch/arm64/boot/dts/ti/k3-j721e-evm-pcie0-ep.dtso
@@ -38,7 +38,7 @@
reg-names = "intd_cfg", "user_cfg", "reg", "mem";
interrupt-names = "link_state";
interrupts = <GIC_SPI 318 IRQ_TYPE_EDGE_RISING>;
- ti,syscon-pcie-ctrl = <&scm_conf 0x4070>;
+ ti,syscon-pcie-ctrl = <&pcie0_ctrl 0x0>;
max-link-speed = <3>;
num-lanes = <1>;
power-domains = <&k3_pds 239 TI_SCI_PD_EXCLUSIVE>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-evm-pcie1-ep.dtso b/arch/arm64/boot/dts/ti/k3-j721e-evm-pcie1-ep.dtso
index a8cccdcf3e3b..436085157a69 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-evm-pcie1-ep.dtso
+++ b/arch/arm64/boot/dts/ti/k3-j721e-evm-pcie1-ep.dtso
@@ -48,6 +48,6 @@
dma-coherent;
phys = <&serdes1_pcie_link>;
phy-names = "pcie-phy";
- ti,syscon-pcie-ctrl = <&scm_conf 0x4074>;
+ ti,syscon-pcie-ctrl = <&pcie1_ctrl 0x0>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index af3d730154ac..5bd0d36bf33e 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -44,6 +44,26 @@
#size-cells = <1>;
ranges = <0x0 0x0 0x00100000 0x1c000>;
+ pcie0_ctrl: pcie-ctrl@4070 {
+ compatible = "ti,j784s4-pcie-ctrl", "syscon";
+ reg = <0x4070 0x4>;
+ };
+
+ pcie1_ctrl: pcie-ctrl@4074 {
+ compatible = "ti,j784s4-pcie-ctrl", "syscon";
+ reg = <0x4074 0x4>;
+ };
+
+ pcie2_ctrl: pcie-ctrl@4078 {
+ compatible = "ti,j784s4-pcie-ctrl", "syscon";
+ reg = <0x4078 0x4>;
+ };
+
+ pcie3_ctrl: pcie-ctrl@407c {
+ compatible = "ti,j784s4-pcie-ctrl", "syscon";
+ reg = <0x407c 0x4>;
+ };
+
serdes_ln_ctrl: mux-controller@4080 {
compatible = "reg-mux";
reg = <0x4080 0x50>;
@@ -941,12 +961,12 @@
reg = <0x00 0x02900000 0x00 0x1000>,
<0x00 0x02907000 0x00 0x400>,
<0x00 0x0d000000 0x00 0x00800000>,
- <0x00 0x10000000 0x00 0x00001000>;
+ <0x40 0x00000000 0x00 0x00001000>; /* ECAM (4 KB) */
reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
interrupt-names = "link_state";
interrupts = <GIC_SPI 318 IRQ_TYPE_EDGE_RISING>;
device_type = "pci";
- ti,syscon-pcie-ctrl = <&scm_conf 0x4070>;
+ ti,syscon-pcie-ctrl = <&pcie0_ctrl 0x0>;
max-link-speed = <3>;
num-lanes = <2>;
power-domains = <&k3_pds 239 TI_SCI_PD_EXCLUSIVE>;
@@ -959,8 +979,8 @@
device-id = <0xb00d>;
msi-map = <0x0 &gic_its 0x0 0x10000>;
dma-coherent;
- ranges = <0x01000000 0x0 0x10001000 0x0 0x10001000 0x0 0x0010000>,
- <0x02000000 0x0 0x10011000 0x0 0x10011000 0x0 0x7fef000>;
+ ranges = <0x01000000 0x00 0x00001000 0x40 0x00001000 0x00 0x00100000>, /* IO (1 MB) */
+ <0x02000000 0x00 0x00101000 0x40 0x00101000 0x00 0xffeff000>; /* 32-bit Non-Prefetchable MEM (4 GB - 1 MB - 4 KB) */
dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
status = "disabled";
};
@@ -970,12 +990,12 @@
reg = <0x00 0x02910000 0x00 0x1000>,
<0x00 0x02917000 0x00 0x400>,
<0x00 0x0d800000 0x00 0x00800000>,
- <0x00 0x18000000 0x00 0x00001000>;
+ <0x41 0x00000000 0x00 0x00001000>; /* ECAM (4 KB) */
reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
interrupt-names = "link_state";
interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
device_type = "pci";
- ti,syscon-pcie-ctrl = <&scm_conf 0x4074>;
+ ti,syscon-pcie-ctrl = <&pcie1_ctrl 0x0>;
max-link-speed = <3>;
num-lanes = <2>;
power-domains = <&k3_pds 240 TI_SCI_PD_EXCLUSIVE>;
@@ -988,8 +1008,8 @@
device-id = <0xb00d>;
msi-map = <0x0 &gic_its 0x10000 0x10000>;
dma-coherent;
- ranges = <0x01000000 0x0 0x18001000 0x0 0x18001000 0x0 0x0010000>,
- <0x02000000 0x0 0x18011000 0x0 0x18011000 0x0 0x7fef000>;
+ ranges = <0x01000000 0x00 0x00001000 0x41 0x00001000 0x00 0x00100000>, /* IO (1 MB) */
+ <0x02000000 0x00 0x00101000 0x41 0x00101000 0x00 0xffeff000>; /* 32-bit Non-Prefetchable MEM (4 GB - 1 MB - 4 KB) */
dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
status = "disabled";
};
@@ -1004,7 +1024,7 @@
interrupt-names = "link_state";
interrupts = <GIC_SPI 342 IRQ_TYPE_EDGE_RISING>;
device_type = "pci";
- ti,syscon-pcie-ctrl = <&scm_conf 0x4078>;
+ ti,syscon-pcie-ctrl = <&pcie2_ctrl 0x0>;
max-link-speed = <3>;
num-lanes = <2>;
power-domains = <&k3_pds 241 TI_SCI_PD_EXCLUSIVE>;
@@ -1033,7 +1053,7 @@
interrupt-names = "link_state";
interrupts = <GIC_SPI 354 IRQ_TYPE_EDGE_RISING>;
device_type = "pci";
- ti,syscon-pcie-ctrl = <&scm_conf 0x407c>;
+ ti,syscon-pcie-ctrl = <&pcie3_ctrl 0x0>;
max-link-speed = <3>;
num-lanes = <2>;
power-domains = <&k3_pds 242 TI_SCI_PD_EXCLUSIVE>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk-csi2-dual-imx219.dtso b/arch/arm64/boot/dts/ti/k3-j721e-sk-csi2-dual-imx219.dtso
index 47bb5480b5b0..4eb3cffab032 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-sk-csi2-dual-imx219.dtso
+++ b/arch/arm64/boot/dts/ti/k3-j721e-sk-csi2-dual-imx219.dtso
@@ -19,6 +19,33 @@
#clock-cells = <0>;
clock-frequency = <24000000>;
};
+
+ reg_2p8v: regulator-2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vdd_sd_dv>;
+ regulator-always-on;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vdd_sd_dv>;
+ regulator-always-on;
+ };
+
+ reg_1p2v: regulator-1p2v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P2V";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ vin-supply = <&vdd_sd_dv>;
+ regulator-always-on;
+ };
};
&csi_mux {
@@ -34,7 +61,9 @@
reg = <0x10>;
clocks = <&clk_imx219_fixed>;
- clock-names = "xclk";
+ VANA-supply = <&reg_2p8v>;
+ VDIG-supply = <&reg_1p8v>;
+ VDDL-supply = <&reg_1p2v>;
port {
csi2_cam0: endpoint {
@@ -56,7 +85,9 @@
reg = <0x10>;
clocks = <&clk_imx219_fixed>;
- clock-names = "xclk";
+ VANA-supply = <&reg_2p8v>;
+ VDIG-supply = <&reg_1p8v>;
+ VDDL-supply = <&reg_1p2v>;
port {
csi2_cam1: endpoint {
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
index 440ef57be294..ffef3d1cfd55 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
@@ -184,6 +184,17 @@
regulator-boot-on;
};
+ vsys_5v0: fixedregulator-vsys5v0 {
+ /* Output of LM61460 */
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vusb_main>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
vdd_mmc1: fixedregulator-sd {
compatible = "regulator-fixed";
pinctrl-names = "default";
@@ -211,6 +222,20 @@
<3300000 0x1>;
};
+ vdd_sd_dv: gpio-regulator-TLV71033 {
+ compatible = "regulator-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&vdd_sd_dv_pins_default>;
+ regulator-name = "tlv71033";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ vin-supply = <&vsys_5v0>;
+ gpios = <&main_gpio0 118 GPIO_ACTIVE_HIGH>;
+ states = <1800000 0x0>,
+ <3300000 0x1>;
+ };
+
transceiver1: can-phy1 {
compatible = "ti,tcan1042";
#phy-cells = <0>;
@@ -613,6 +638,12 @@
>;
};
+ vdd_sd_dv_pins_default: vdd-sd-dv-default-pins {
+ pinctrl-single,pins = <
+ J721E_IOPAD(0x1dc, PIN_OUTPUT, 7) /* (Y1) SPI1_CLK.GPIO0_118 */
+ >;
+ };
+
wkup_uart0_pins_default: wkup-uart0-default-pins {
pinctrl-single,pins = <
J721E_WKUP_IOPAD(0xa0, PIN_INPUT, 0) /* (J29) WKUP_UART0_RXD */
diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi
index a7f2f52f42f7..b6e22c242951 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi
@@ -126,6 +126,8 @@
<0x00 0x10000000 0x00 0x10000000 0x00 0x10000000>, /* PCIe DAT */
<0x00 0x64800000 0x00 0x64800000 0x00 0x00800000>, /* C71 */
<0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */
+ <0x40 0x00000000 0x40 0x00000000 0x01 0x00000000>, /* PCIe0 DAT1 */
+ <0x41 0x00000000 0x41 0x00000000 0x01 0x00000000>, /* PCIe1 DAT1 */
<0x44 0x00000000 0x44 0x00000000 0x00 0x08000000>, /* PCIe2 DAT */
<0x44 0x10000000 0x44 0x10000000 0x00 0x08000000>, /* PCIe3 DAT */
<0x4d 0x80800000 0x4d 0x80800000 0x00 0x00800000>, /* C66_0 */
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-evm-pcie1-ep.dtso b/arch/arm64/boot/dts/ti/k3-j721s2-evm-pcie1-ep.dtso
index 5ff390915b75..8c2cd99cf2b4 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-evm-pcie1-ep.dtso
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-evm-pcie1-ep.dtso
@@ -38,7 +38,7 @@
reg-names = "intd_cfg", "user_cfg", "reg", "mem";
interrupt-names = "link_state";
interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
- ti,syscon-pcie-ctrl = <&scm_conf 0x074>;
+ ti,syscon-pcie-ctrl = <&pcie1_ctrl 0x0>;
max-link-speed = <3>;
num-lanes = <1>;
power-domains = <&k3_pds 276 TI_SCI_PD_EXCLUSIVE>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
index 92bf48fdbeba..83cf0adb2cb7 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
@@ -57,6 +57,11 @@
#phy-cells = <1>;
};
+ pcie1_ctrl: pcie-ctrl@74 {
+ compatible = "ti,j784s4-pcie-ctrl", "syscon";
+ reg = <0x74 0x4>;
+ };
+
serdes_ln_ctrl: mux-controller@80 {
compatible = "reg-mux";
reg = <0x80 0x10>;
@@ -1394,12 +1399,12 @@
reg = <0x00 0x02910000 0x00 0x1000>,
<0x00 0x02917000 0x00 0x400>,
<0x00 0x0d800000 0x00 0x800000>,
- <0x00 0x18000000 0x00 0x1000>;
+ <0x41 0x00000000 0x00 0x1000>; /* ECAM (4 KB) */
reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
interrupt-names = "link_state";
interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
device_type = "pci";
- ti,syscon-pcie-ctrl = <&scm_conf 0x074>;
+ ti,syscon-pcie-ctrl = <&pcie1_ctrl 0x0>;
max-link-speed = <3>;
num-lanes = <4>;
power-domains = <&k3_pds 276 TI_SCI_PD_EXCLUSIVE>;
@@ -1412,8 +1417,8 @@
device-id = <0xb013>;
msi-map = <0x0 &gic_its 0x0 0x10000>;
dma-coherent;
- ranges = <0x01000000 0x0 0x18001000 0x00 0x18001000 0x0 0x0010000>,
- <0x02000000 0x0 0x18011000 0x00 0x18011000 0x0 0x7fef000>;
+ ranges = <0x01000000 0x00 0x00001000 0x41 0x00001000 0x00 0x00100000>, /* IO (1 MB) */
+ <0x02000000 0x00 0x00101000 0x41 0x00101000 0x00 0xffeff000>; /* 32-bit Non-Prefetchable MEM (4 GB - 1 MB - 4 KB) */
dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
@@ -2048,4 +2053,18 @@
/* reserved for MAIN_R5F1_1 */
status = "reserved";
};
+
+ gpu: gpu@4e20000000 {
+ compatible = "ti,j721s2-gpu", "img,img-bxs-4-64", "img,img-rogue";
+ reg = <0x4e 0x20000000 0x00 0x80000>;
+ clocks = <&k3_clks 130 1>;
+ clock-names = "core";
+ assigned-clocks = <&k3_clks 130 1>;
+ assigned-clock-rates = <800000000>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 130 TI_SCI_PD_EXCLUSIVE>,
+ <&k3_pds 373 TI_SCI_PD_EXCLUSIVE>;
+ power-domain-names = "a", "b";
+ dma-coherent;
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm-csi2-quad-rpi-cam-imx219.dtso b/arch/arm64/boot/dts/ti/k3-j722s-evm-csi2-quad-rpi-cam-imx219.dtso
new file mode 100644
index 000000000000..4107ef8c7b74
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-j722s-evm-csi2-quad-rpi-cam-imx219.dtso
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * DT Overlay for 4 x RPi Camera V2.1 on J722S-EVM board.
+ *
+ * Copyright (C) 2025 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ * Schematics: https://datasheets.raspberrypi.com/camera/camera-v2-schematics.pdf
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include "k3-pinctrl.h"
+
+&main_pmx0 {
+ cam0_reset_pins_default: cam0-default-reset-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x03c, PIN_OUTPUT, 7) /* (R22) GPIO0_15 */
+ >;
+ };
+
+ cam1_reset_pins_default: cam1-default-reset-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x044, PIN_OUTPUT, 7) /* (R26) GPIO0_17 */
+ >;
+ };
+
+ cam2_reset_pins_default: cam2-default-reset-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x04c, PIN_OUTPUT, 7) /* (T25) GPIO0_19 */
+ >;
+ };
+
+ cam3_reset_pins_default: cam3-default-reset-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x054, PIN_OUTPUT, 7) /* (T21) GPIO0_21 */
+ >;
+ };
+};
+
+&{/} {
+ clk_imx219_fixed: clock-24000000 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+
+ reg_2p8v: regulator-2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vsys_3v3_exp>;
+ regulator-always-on;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vsys_3v3_exp>;
+ regulator-always-on;
+ };
+
+ reg_1p2v: regulator-1p2v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P2V";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ vin-supply = <&vsys_3v3_exp>;
+ regulator-always-on;
+ };
+};
+
+&csi01_mux {
+ idle-state = <1>;
+};
+
+&csi23_mux {
+ idle-state = <1>;
+};
+
+&pca9543_0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* CAM0 I2C */
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ imx219_0: sensor@10 {
+ compatible = "sony,imx219";
+ reg = <0x10>;
+
+ clocks = <&clk_imx219_fixed>;
+
+ VANA-supply = <&reg_2p8v>;
+ VDIG-supply = <&reg_1p8v>;
+ VDDL-supply = <&reg_1p2v>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam0_reset_pins_default>;
+
+ reset-gpios = <&main_gpio0 15 GPIO_ACTIVE_HIGH>;
+
+ port {
+ csi2_cam0: endpoint {
+ remote-endpoint = <&csi2rx0_in_sensor>;
+ link-frequencies = /bits/ 64 <456000000>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+ };
+
+ /* CAM1 I2C */
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ imx219_1: sensor@10 {
+ compatible = "sony,imx219";
+ reg = <0x10>;
+
+ clocks = <&clk_imx219_fixed>;
+
+ VANA-supply = <&reg_2p8v>;
+ VDIG-supply = <&reg_1p8v>;
+ VDDL-supply = <&reg_1p2v>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam1_reset_pins_default>;
+
+ reset-gpios = <&main_gpio0 17 GPIO_ACTIVE_HIGH>;
+
+ port {
+ csi2_cam1: endpoint {
+ remote-endpoint = <&csi2rx1_in_sensor>;
+ link-frequencies = /bits/ 64 <456000000>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+ };
+};
+
+&pca9543_1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* CAM0 I2C */
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ imx219_2: sensor@10 {
+ compatible = "sony,imx219";
+ reg = <0x10>;
+
+ clocks = <&clk_imx219_fixed>;
+
+ VANA-supply = <&reg_2p8v>;
+ VDIG-supply = <&reg_1p8v>;
+ VDDL-supply = <&reg_1p2v>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam2_reset_pins_default>;
+
+ reset-gpios = <&main_gpio0 19 GPIO_ACTIVE_HIGH>;
+
+ port {
+ csi2_cam2: endpoint {
+ remote-endpoint = <&csi2rx2_in_sensor>;
+ link-frequencies = /bits/ 64 <456000000>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+ };
+
+ /* CAM1 I2C */
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ imx219_3: sensor@10 {
+ compatible = "sony,imx219";
+ reg = <0x10>;
+
+ clocks = <&clk_imx219_fixed>;
+
+ VANA-supply = <&reg_2p8v>;
+ VDIG-supply = <&reg_1p8v>;
+ VDDL-supply = <&reg_1p2v>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam3_reset_pins_default>;
+
+ reset-gpios = <&main_gpio0 21 GPIO_ACTIVE_HIGH>;
+
+ port {
+ csi2_cam3: endpoint {
+ remote-endpoint = <&csi2rx3_in_sensor>;
+ link-frequencies = /bits/ 64 <456000000>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+ };
+};
+
+&cdns_csi2rx0 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi0_port0: port@0 {
+ reg = <0>;
+ status = "okay";
+
+ csi2rx0_in_sensor: endpoint {
+ remote-endpoint = <&csi2_cam0>;
+ bus-type = <4>; /* CSI2 DPHY */
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+};
+
+&cdns_csi2rx1 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi1_port0: port@0 {
+ reg = <0>;
+ status = "okay";
+
+ csi2rx1_in_sensor: endpoint {
+ remote-endpoint = <&csi2_cam1>;
+ bus-type = <4>; /* CSI2 DPHY */
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+};
+
+&cdns_csi2rx2 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi2_port0: port@0 {
+ reg = <0>;
+ status = "okay";
+
+ csi2rx2_in_sensor: endpoint {
+ remote-endpoint = <&csi2_cam2>;
+ bus-type = <4>; /* CSI2 DPHY */
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+};
+
+&cdns_csi2rx3 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi3_port0: port@0 {
+ reg = <0>;
+ status = "okay";
+
+ csi2rx3_in_sensor: endpoint {
+ remote-endpoint = <&csi2_cam3>;
+ bus-type = <4>; /* CSI2 DPHY */
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+};
+
+&ti_csi2rx0 {
+ status = "okay";
+};
+
+&dphy0 {
+ status = "okay";
+};
+
+&ti_csi2rx1 {
+ status = "okay";
+};
+
+&dphy1 {
+ status = "okay";
+};
+
+&ti_csi2rx2 {
+ status = "okay";
+};
+
+&dphy2 {
+ status = "okay";
+};
+
+&ti_csi2rx3 {
+ status = "okay";
+};
+
+&dphy3 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm-csi2-quad-tevi-ov5640.dtso b/arch/arm64/boot/dts/ti/k3-j722s-evm-csi2-quad-tevi-ov5640.dtso
new file mode 100644
index 000000000000..575113d7b481
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-j722s-evm-csi2-quad-tevi-ov5640.dtso
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * DT Overlay for 4 x TEVI OV5640 MIPI Camera module on J722S-EVM board.
+ *
+ * Copyright (C) 2025 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include "k3-pinctrl.h"
+
+&main_pmx0 {
+ cam0_reset_pins_default: cam0-default-reset-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x03c, PIN_OUTPUT, 7) /* (R22) GPIO0_15 */
+ >;
+ };
+
+ cam1_reset_pins_default: cam1-default-reset-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x044, PIN_OUTPUT, 7) /* (R26) GPIO0_17 */
+ >;
+ };
+
+ cam2_reset_pins_default: cam2-default-reset-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x04c, PIN_OUTPUT, 7) /* (T25) GPIO0_19 */
+ >;
+ };
+
+ cam3_reset_pins_default: cam3-default-reset-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x054, PIN_OUTPUT, 7) /* (T21) GPIO0_21 */
+ >;
+ };
+};
+
+&{/} {
+ clk_ov5640_fixed: clock-24000000 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+
+ reg_2p8v: regulator-2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vsys_3v3_exp>;
+ regulator-always-on;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vsys_3v3_exp>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vsys_3v3_exp>;
+ regulator-always-on;
+ };
+};
+
+&csi01_mux {
+ idle-state = <1>;
+};
+
+&csi23_mux {
+ idle-state = <1>;
+};
+
+&pca9543_0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* CAM0 I2C */
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ ov5640_0: camera@3c {
+ compatible = "ovti,ov5640";
+ reg = <0x3c>;
+ clocks = <&clk_ov5640_fixed>;
+ clock-names = "xclk";
+
+ AVDD-supply = <&reg_2p8v>;
+ DOVDD-supply = <&reg_1p8v>;
+ DVDD-supply = <&reg_3p3v>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam0_reset_pins_default>;
+
+ reset-gpios = <&main_gpio0 15 GPIO_ACTIVE_HIGH>;
+
+ port {
+ csi2_cam0: endpoint {
+ remote-endpoint = <&csi2rx0_in_sensor>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+ };
+
+ /* CAM1 I2C */
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ ov5640_1: camera@3c {
+ compatible = "ovti,ov5640";
+ reg = <0x3c>;
+ clocks = <&clk_ov5640_fixed>;
+ clock-names = "xclk";
+
+ AVDD-supply = <&reg_2p8v>;
+ DOVDD-supply = <&reg_1p8v>;
+ DVDD-supply = <&reg_3p3v>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam1_reset_pins_default>;
+
+ reset-gpios = <&main_gpio0 17 GPIO_ACTIVE_HIGH>;
+
+ port {
+ csi2_cam1: endpoint {
+ remote-endpoint = <&csi2rx1_in_sensor>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+ };
+};
+
+&pca9543_1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* CAM0 I2C */
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ ov5640_2: camera@3c {
+ compatible = "ovti,ov5640";
+ reg = <0x3c>;
+ clocks = <&clk_ov5640_fixed>;
+ clock-names = "xclk";
+
+ AVDD-supply = <&reg_2p8v>;
+ DOVDD-supply = <&reg_1p8v>;
+ DVDD-supply = <&reg_3p3v>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam2_reset_pins_default>;
+
+ reset-gpios = <&main_gpio0 19 GPIO_ACTIVE_HIGH>;
+
+ port {
+ csi2_cam2: endpoint {
+ remote-endpoint = <&csi2rx2_in_sensor>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+ };
+
+ /* CAM1 I2C */
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ ov5640_3: camera@3c {
+ compatible = "ovti,ov5640";
+ reg = <0x3c>;
+ clocks = <&clk_ov5640_fixed>;
+ clock-names = "xclk";
+
+ AVDD-supply = <&reg_2p8v>;
+ DOVDD-supply = <&reg_1p8v>;
+ DVDD-supply = <&reg_3p3v>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam3_reset_pins_default>;
+
+ reset-gpios = <&main_gpio0 21 GPIO_ACTIVE_HIGH>;
+
+ port {
+ csi2_cam3: endpoint {
+ remote-endpoint = <&csi2rx3_in_sensor>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+ };
+};
+
+&cdns_csi2rx0 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi0_port0: port@0 {
+ reg = <0>;
+ status = "okay";
+
+ csi2rx0_in_sensor: endpoint {
+ remote-endpoint = <&csi2_cam0>;
+ bus-type = <4>; /* CSI2 DPHY */
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+};
+
+&cdns_csi2rx1 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi1_port0: port@0 {
+ reg = <0>;
+ status = "okay";
+
+ csi2rx1_in_sensor: endpoint {
+ remote-endpoint = <&csi2_cam1>;
+ bus-type = <4>; /* CSI2 DPHY */
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+};
+
+&cdns_csi2rx2 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi2_port0: port@0 {
+ reg = <0>;
+ status = "okay";
+
+ csi2rx2_in_sensor: endpoint {
+ remote-endpoint = <&csi2_cam2>;
+ bus-type = <4>; /* CSI2 DPHY */
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+};
+
+&cdns_csi2rx3 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi3_port0: port@0 {
+ reg = <0>;
+ status = "okay";
+
+ csi2rx3_in_sensor: endpoint {
+ remote-endpoint = <&csi2_cam3>;
+ bus-type = <4>; /* CSI2 DPHY */
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+};
+
+&ti_csi2rx0 {
+ status = "okay";
+};
+
+&dphy0 {
+ status = "okay";
+};
+
+&ti_csi2rx1 {
+ status = "okay";
+};
+
+&dphy1 {
+ status = "okay";
+};
+
+&ti_csi2rx2 {
+ status = "okay";
+};
+
+&dphy2 {
+ status = "okay";
+};
+
+&ti_csi2rx3 {
+ status = "okay";
+};
+
+&dphy3 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
index 2127316f36a3..a47852fdca70 100644
--- a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
@@ -141,6 +141,17 @@
regulator-boot-on;
};
+ vsys_3v3: regulator-vsys3v3 {
+ /* output of LM5141-Q1 */
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vmain_pd>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
vdd_mmc1: regulator-mmc1 {
/* TPS22918DBVR */
compatible = "regulator-fixed";
@@ -153,6 +164,17 @@
bootph-all;
};
+ vsys_3v3_exp: regulator-TPS22990 {
+ /* output of TPS22990 */
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_3v3_exp";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vsys_3v3>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
vdd_sd_dv: regulator-TLV71033 {
compatible = "regulator-gpio";
regulator-name = "tlv71033";
@@ -244,6 +266,20 @@
max-bitrate = <5000000>;
standby-gpios = <&exp1 17 GPIO_ACTIVE_HIGH>;
};
+
+ csi01_mux: mux-controller-0 {
+ compatible = "gpio-mux";
+ #mux-state-cells = <1>;
+ mux-gpios = <&exp1 6 GPIO_ACTIVE_HIGH>;
+ idle-state = <0>;
+ };
+
+ csi23_mux: mux-controller-1 {
+ compatible = "gpio-mux";
+ #mux-state-cells = <1>;
+ mux-gpios = <&exp1 7 GPIO_ACTIVE_HIGH>;
+ idle-state = <0>;
+ };
};
&main_pmx0 {
@@ -843,8 +879,11 @@
<J722S_SERDES1_LANE0_PCIE0_LANE0>;
};
-&serdes0 {
+&serdes_wiz0 {
status = "okay";
+};
+
+&serdes0 {
serdes0_usb_link: phy@0 {
reg = <0>;
cdns,num-lanes = <1>;
@@ -854,8 +893,11 @@
};
};
-&serdes1 {
+&serdes_wiz1 {
status = "okay";
+};
+
+&serdes1 {
serdes1_pcie_link: phy@0 {
reg = <0>;
cdns,num-lanes = <1>;
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
index 6850f50530f1..78d7e800b311 100644
--- a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
@@ -32,6 +32,8 @@
assigned-clocks = <&k3_clks 279 1>;
assigned-clock-parents = <&k3_clks 279 5>;
+ status = "disabled";
+
serdes0: serdes@f000000 {
compatible = "ti,j721e-serdes-10g";
reg = <0x0f000000 0x00010000>;
@@ -50,8 +52,6 @@
#address-cells = <1>;
#size-cells = <0>;
#clock-cells = <1>;
-
- status = "disabled"; /* Needs lane config */
};
};
@@ -70,6 +70,8 @@
assigned-clocks = <&k3_clks 280 1>;
assigned-clock-parents = <&k3_clks 280 5>;
+ status = "disabled";
+
serdes1: serdes@f010000 {
compatible = "ti,j721e-serdes-10g";
reg = <0x0f010000 0x00010000>;
@@ -88,8 +90,6 @@
#address-cells = <1>;
#size-cells = <0>;
#clock-cells = <1>;
-
- status = "disabled"; /* Needs lane config */
};
};
@@ -98,10 +98,10 @@
reg = <0x00 0x0f102000 0x00 0x1000>,
<0x00 0x0f100000 0x00 0x400>,
<0x00 0x0d000000 0x00 0x00800000>,
- <0x00 0x68000000 0x00 0x00001000>;
+ <0x06 0x00000000 0x00 0x00001000>; /* ECAM (4 KB) */
reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
- ranges = <0x01000000 0x00 0x68001000 0x00 0x68001000 0x00 0x0010000>,
- <0x02000000 0x00 0x68011000 0x00 0x68011000 0x00 0x7fef000>;
+ ranges = <0x01000000 0x00 0x00001000 0x06 0x00001000 0x00 0x00100000>, /* IO (1 MB) */
+ <0x02000000 0x00 0x00101000 0x06 0x00101000 0x00 0xffeff000>; /* 32-bit Non-Prefetchable MEM (4 GB - 1 MB - 4 KB) */
dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
interrupt-names = "link_state";
interrupts = <GIC_SPI 99 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm-usxgmii-exp1-exp2.dtso b/arch/arm64/boot/dts/ti/k3-j784s4-evm-usxgmii-exp1-exp2.dtso
index d5f8c8531923..424628c63c2d 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-evm-usxgmii-exp1-exp2.dtso
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm-usxgmii-exp1-exp2.dtso
@@ -24,7 +24,6 @@
};
&main_cpsw0 {
- pinctrl-names = "default";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi
index 2664f74a9c7a..fa656b7b13a1 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi
@@ -5,6 +5,9 @@
* EVM Board Schematics(j784s4): https://www.ti.com/lit/zip/sprr458
* EVM Board Schematics(j742s2): https://www.ti.com/lit/zip/SPAC001
*/
+
+#include <dt-bindings/phy/phy-cadence.h>
+
/ {
chosen {
stdout-path = "serial2:115200n8";
@@ -1407,10 +1410,13 @@
&pcie1_rc {
status = "okay";
+ clocks = <&k3_clks 333 0>, <&serdes0 CDNS_TORRENT_REFCLK_DRIVER>;
+ clock-names = "fck", "pcie_refclk";
num-lanes = <2>;
reset-gpios = <&exp1 2 GPIO_ACTIVE_HIGH>;
phys = <&serdes0_pcie1_link>;
phy-names = "pcie-phy";
+ ti,syscon-acspcie-proxy-ctrl = <&acspcie0_proxy_ctrl 0x1>;
};
&serdes1 {
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-usb0-type-a.dtso b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-usb0-type-a.dtso
new file mode 100644
index 000000000000..ba15d72d86d6
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-usb0-type-a.dtso
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/**
+ * DT Overlay for enabling USB0 instance of USB on J784S4 and J742S2 EVMs for
+ * Host Mode of operation with the Type-A Connector.
+ *
+ * J784S4 EVM Product Link: https://www.ti.com/tool/J784S4XEVM
+ * J742S2 EVM Product Link: https://www.ti.com/tool/J742S2XH01EVM
+ *
+ * Copyright (C) 2025 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+
+&exp2 {
+ p12-hog {
+ /* P12 - USB2.0_MUX_SEL */
+ gpio-hog;
+ gpios = <12 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "USB2.0_MUX_SEL";
+ };
+};
+
+&usb0 {
+ dr_mode = "host";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
index 1944616ab357..363d68fec387 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
@@ -77,7 +77,7 @@
serdes_ln_ctrl: mux-controller@4080 {
compatible = "reg-mux";
- reg = <0x00004080 0x30>;
+ reg = <0x00004080 0x50>;
#mux-control-cells = <1>;
mux-reg-masks = <0x0 0x3>, <0x4 0x3>, /* SERDES0 lane0/1 select */
<0x8 0x3>, <0xc 0x3>, /* SERDES0 lane2/3 select */
@@ -126,6 +126,11 @@
assigned-clock-parents = <&k3_clks 157 63>;
#clock-cells = <0>;
};
+
+ acspcie0_proxy_ctrl: clock-controller@1a090 {
+ compatible = "ti,j784s4-acspcie-proxy-ctrl", "syscon";
+ reg = <0x1a090 0x4>;
+ };
};
main_ehrpwm0: pwm@3000000 {
@@ -1055,7 +1060,7 @@
reg = <0x00 0x02900000 0x00 0x1000>,
<0x00 0x02907000 0x00 0x400>,
<0x00 0x0d000000 0x00 0x00800000>,
- <0x00 0x10000000 0x00 0x00001000>;
+ <0x40 0x00000000 0x00 0x00001000>; /* ECAM (4 KB) */
reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
interrupt-names = "link_state";
interrupts = <GIC_SPI 318 IRQ_TYPE_EDGE_RISING>;
@@ -1073,8 +1078,8 @@
device-id = <0xb012>;
msi-map = <0x0 &gic_its 0x0 0x10000>;
dma-coherent;
- ranges = <0x01000000 0x0 0x10001000 0x0 0x10001000 0x0 0x0010000>,
- <0x02000000 0x0 0x10011000 0x0 0x10011000 0x0 0x7fef000>;
+ ranges = <0x01000000 0x00 0x00001000 0x40 0x00001000 0x00 0x00100000>, /* IO (1 MB) */
+ <0x02000000 0x00 0x00101000 0x40 0x00101000 0x00 0xffeff000>; /* 32-bit Non-Prefetchable MEM (4 GB - 1 MB - 4 KB) */
dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
status = "disabled";
};
@@ -1084,7 +1089,7 @@
reg = <0x00 0x02910000 0x00 0x1000>,
<0x00 0x02917000 0x00 0x400>,
<0x00 0x0d800000 0x00 0x00800000>,
- <0x00 0x18000000 0x00 0x00001000>;
+ <0x41 0x00000000 0x00 0x00001000>; /* ECAM (4 KB) */
reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
interrupt-names = "link_state";
interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
@@ -1102,8 +1107,8 @@
device-id = <0xb012>;
msi-map = <0x0 &gic_its 0x10000 0x10000>;
dma-coherent;
- ranges = <0x01000000 0x0 0x18001000 0x00 0x18001000 0x0 0x0010000>,
- <0x02000000 0x0 0x18011000 0x00 0x18011000 0x0 0x7fef000>;
+ ranges = <0x01000000 0x00 0x00001000 0x41 0x00001000 0x00 0x00100000>, /* IO (1 MB) */
+ <0x02000000 0x00 0x00101000 0x41 0x00101000 0x00 0xffeff000>; /* 32-bit Non-Prefetchable MEM (4 GB - 1 MB - 4 KB) */
dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
status = "disabled";
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 5bb8f09422a2..897fc686e6a9 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -265,6 +265,7 @@ CONFIG_IMX_SCU=y
CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE=y
CONFIG_QCOM_QSEECOM=y
CONFIG_QCOM_QSEECOM_UEFISECAPP=y
+CONFIG_EXYNOS_ACPM_PROTOCOL=m
CONFIG_GNSS=m
CONFIG_GNSS_MTK_SERIAL=m
CONFIG_MTD=y
@@ -605,6 +606,7 @@ CONFIG_PINCTRL_IMX93=y
CONFIG_PINCTRL_MSM=y
CONFIG_PINCTRL_IPQ5018=y
CONFIG_PINCTRL_IPQ5332=y
+CONFIG_PINCTRL_IPQ5424=y
CONFIG_PINCTRL_IPQ8074=y
CONFIG_PINCTRL_IPQ6018=y
CONFIG_PINCTRL_IPQ9574=y
@@ -672,12 +674,14 @@ CONFIG_GPIO_PCA953X=y
CONFIG_GPIO_PCA953X_IRQ=y
CONFIG_GPIO_ADP5585=m
CONFIG_GPIO_PCF857X=m
+CONFIG_GPIO_TPIC2810=m
CONFIG_GPIO_BD9571MWV=m
CONFIG_GPIO_MAX77620=y
CONFIG_GPIO_SL28CPLD=m
CONFIG_GPIO_AGGREGATOR=m
CONFIG_POWER_RESET_MSM=y
CONFIG_POWER_RESET_QCOM_PON=m
+CONFIG_POWER_RESET_TORADEX_EC=m
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_POWER_RESET_SYSCON_POWEROFF=y
@@ -702,6 +706,7 @@ CONFIG_SENSORS_RASPBERRYPI_HWMON=m
CONFIG_SENSORS_SL28CPLD=m
CONFIG_SENSORS_INA2XX=m
CONFIG_SENSORS_INA3221=m
+CONFIG_SENSORS_TMP102=m
CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
CONFIG_CPU_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y
@@ -769,7 +774,7 @@ CONFIG_MFD_MT6397=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_RK8XX_I2C=y
CONFIG_MFD_RK8XX_SPI=y
-CONFIG_MFD_SEC_CORE=y
+CONFIG_MFD_SEC_I2C=y
CONFIG_MFD_SL28CPLD=y
CONFIG_RZ_MTU3=y
CONFIG_MFD_TI_AM335X_TSCADC=m
@@ -777,6 +782,7 @@ CONFIG_MFD_TI_LP873X=m
CONFIG_MFD_TPS65219=y
CONFIG_MFD_TPS6594_I2C=m
CONFIG_MFD_ROHM_BD718XX=y
+CONFIG_MFD_STM32_LPTIMER=m
CONFIG_MFD_WCD934X=m
CONFIG_MFD_KHADAS_MCU=m
CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -841,6 +847,7 @@ CONFIG_VIDEO_MEDIATEK_JPEG=m
CONFIG_VIDEO_MEDIATEK_VCODEC=m
CONFIG_VIDEO_WAVE_VPU=m
CONFIG_VIDEO_E5010_JPEG_ENC=m
+CONFIG_VIDEO_MEDIATEK_MDP3=m
CONFIG_VIDEO_IMX7_CSI=m
CONFIG_VIDEO_IMX_MIPI_CSIS=m
CONFIG_VIDEO_IMX8_ISI=m
@@ -907,6 +914,7 @@ CONFIG_DRM_PANEL_BOE_TV101WUM_NL6=m
CONFIG_DRM_PANEL_LVDS=m
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_PANEL_EDP=m
+CONFIG_DRM_PANEL_HIMAX_HX8279=m
CONFIG_DRM_PANEL_ILITEK_ILI9882T=m
CONFIG_DRM_PANEL_KHADAS_TS050=m
CONFIG_DRM_PANEL_MANTIX_MLAF057WE51=m
@@ -914,6 +922,7 @@ CONFIG_DRM_PANEL_NOVATEK_NT36672E=m
CONFIG_DRM_PANEL_RAYDIUM_RM67191=m
CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20=m
CONFIG_DRM_PANEL_SITRONIX_ST7703=m
+CONFIG_DRM_PANEL_STARTEK_KD070FHFID015=m
CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m
CONFIG_DRM_PANEL_VISIONOX_VTDR6130=m
CONFIG_DRM_DISPLAY_CONNECTOR=m
@@ -937,6 +946,7 @@ CONFIG_DRM_TI_SN65DSI86=m
CONFIG_DRM_ANALOGIX_ANX7625=m
CONFIG_DRM_I2C_ADV7511=m
CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_DRM_CDNS_DSI=m
CONFIG_DRM_CDNS_MHDP8546=m
CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE=m
CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
@@ -1005,11 +1015,13 @@ CONFIG_SND_SOC_SC7280=m
CONFIG_SND_SOC_X1E80100=m
CONFIG_SND_SOC_ROCKCHIP=m
CONFIG_SND_SOC_ROCKCHIP_I2S_TDM=m
+CONFIG_SND_SOC_ROCKCHIP_SAI=m
CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
CONFIG_SND_SOC_ROCKCHIP_RT5645=m
CONFIG_SND_SOC_RK3399_GRU_SOUND=m
CONFIG_SND_SOC_SAMSUNG=y
CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_MSIOF=m
CONFIG_SND_SOC_RZ=m
CONFIG_SND_SOC_SOF_TOPLEVEL=y
CONFIG_SND_SOC_SOF_OF=y
@@ -1043,6 +1055,8 @@ CONFIG_SND_SOC_DA7213=m
CONFIG_SND_SOC_ES7134=m
CONFIG_SND_SOC_ES7241=m
CONFIG_SND_SOC_ES8316=m
+CONFIG_SND_SOC_ES8328=m
+CONFIG_SND_SOC_ES8328_I2C=m
CONFIG_SND_SOC_GTM601=m
CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m
CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m
@@ -1142,6 +1156,8 @@ CONFIG_USB_MASS_STORAGE=m
CONFIG_TYPEC=m
CONFIG_TYPEC_TCPM=m
CONFIG_TYPEC_TCPCI=m
+CONFIG_TYPEC_RT1711H=m
+CONFIG_TYPEC_MT6360=m
CONFIG_TYPEC_TCPCI_MAXIM=m
CONFIG_TYPEC_FUSB302=m
CONFIG_TYPEC_QCOM_PMIC=m
@@ -1152,6 +1168,7 @@ CONFIG_TYPEC_TPS6598X=m
CONFIG_TYPEC_HD3SS3220=m
CONFIG_TYPEC_MUX_FSA4480=m
CONFIG_TYPEC_MUX_GPIO_SBU=m
+CONFIG_TYPEC_MUX_IT5205=m
CONFIG_TYPEC_MUX_NB7VPQ904M=m
CONFIG_TYPEC_MUX_PS883X=m
CONFIG_TYPEC_MUX_WCD939X_USBSS=m
@@ -1261,6 +1278,7 @@ CONFIG_PL330_DMA=y
CONFIG_TEGRA186_GPC_DMA=y
CONFIG_TEGRA20_APB_DMA=y
CONFIG_TEGRA210_ADMA=m
+CONFIG_MTK_UART_APDMA=m
CONFIG_QCOM_BAM_DMA=y
CONFIG_QCOM_GPI_DMA=m
CONFIG_QCOM_HIDMA_MGMT=y
@@ -1341,6 +1359,7 @@ CONFIG_IPQ_APSS_5018=y
CONFIG_IPQ_CMN_PLL=m
CONFIG_IPQ_GCC_5018=y
CONFIG_IPQ_GCC_5332=y
+CONFIG_IPQ_GCC_5424=y
CONFIG_IPQ_GCC_6018=y
CONFIG_IPQ_GCC_8074=y
CONFIG_IPQ_GCC_9574=y
@@ -1412,10 +1431,13 @@ CONFIG_CLK_GFM_LPASS_SM8250=m
CONFIG_CLK_RCAR_USB2_CLOCK_SEL=y
CONFIG_CLK_RENESAS_VBATTB=m
CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_OMAP=m
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_TEGRA186_TIMER=y
+CONFIG_CLKSRC_STM32_LP=y
CONFIG_RENESAS_OSTM=y
CONFIG_ARM_MHU=y
+CONFIG_EXYNOS_MBOX=m
CONFIG_IMX_MBOX=y
CONFIG_OMAP2PLUS_MBOX=m
CONFIG_PLATFORM_MHU=y
@@ -1474,29 +1496,6 @@ CONFIG_QCOM_WCNSS_CTRL=m
CONFIG_QCOM_APR=m
CONFIG_QCOM_ICC_BWMON=m
CONFIG_QCOM_PBS=m
-CONFIG_ARCH_R8A77995=y
-CONFIG_ARCH_R8A77990=y
-CONFIG_ARCH_R8A77951=y
-CONFIG_ARCH_R8A77965=y
-CONFIG_ARCH_R8A77960=y
-CONFIG_ARCH_R8A77961=y
-CONFIG_ARCH_R8A779F0=y
-CONFIG_ARCH_R8A77980=y
-CONFIG_ARCH_R8A77970=y
-CONFIG_ARCH_R8A779A0=y
-CONFIG_ARCH_R8A779G0=y
-CONFIG_ARCH_R8A779H0=y
-CONFIG_ARCH_R8A774C0=y
-CONFIG_ARCH_R8A774E1=y
-CONFIG_ARCH_R8A774A1=y
-CONFIG_ARCH_R8A774B1=y
-CONFIG_ARCH_R9A07G043=y
-CONFIG_ARCH_R9A07G044=y
-CONFIG_ARCH_R9A07G054=y
-CONFIG_ARCH_R9A08G045=y
-CONFIG_ARCH_R9A09G011=y
-CONFIG_ARCH_R9A09G047=y
-CONFIG_ARCH_R9A09G057=y
CONFIG_ROCKCHIP_IODOMAIN=y
CONFIG_ARCH_TEGRA_132_SOC=y
CONFIG_ARCH_TEGRA_210_SOC=y
@@ -1525,6 +1524,7 @@ CONFIG_EXYNOS_ADC=y
CONFIG_IMX8QXP_ADC=m
CONFIG_IMX93_ADC=m
CONFIG_MAX9611=m
+CONFIG_MEDIATEK_MT6359_AUXADC=m
CONFIG_MEDIATEK_MT6577_AUXADC=m
CONFIG_QCOM_SPMI_VADC=m
CONFIG_QCOM_SPMI_ADC5=m
@@ -1550,10 +1550,11 @@ CONFIG_PWM_IMX27=m
CONFIG_PWM_MESON=m
CONFIG_PWM_MTK_DISP=m
CONFIG_PWM_MEDIATEK=m
-CONFIG_PWM_RCAR=m
+CONFIG_PWM_RENESAS_RCAR=m
+CONFIG_PWM_RENESAS_RZG2L_GPT=m
+CONFIG_PWM_RENESAS_RZ_MTU3=m
CONFIG_PWM_RENESAS_TPU=m
CONFIG_PWM_ROCKCHIP=y
-CONFIG_PWM_RZ_MTU3=m
CONFIG_PWM_SAMSUNG=y
CONFIG_PWM_SL28CPLD=m
CONFIG_PWM_SUN4I=m
@@ -1577,6 +1578,7 @@ CONFIG_PHY_CAN_TRANSCEIVER=m
CONFIG_PHY_NXP_PTN3222=m
CONFIG_PHY_SUN4I_USB=y
CONFIG_PHY_CADENCE_TORRENT=m
+CONFIG_PHY_CADENCE_DPHY=m
CONFIG_PHY_CADENCE_DPHY_RX=m
CONFIG_PHY_CADENCE_SIERRA=m
CONFIG_PHY_CADENCE_SALVO=m
@@ -1586,7 +1588,11 @@ CONFIG_PHY_HI6220_USB=y
CONFIG_PHY_HISTB_COMBPHY=y
CONFIG_PHY_HISI_INNO_USB2=y
CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_MTK_PCIE=m
CONFIG_PHY_MTK_TPHY=y
+CONFIG_PHY_MTK_HDMI=m
+CONFIG_PHY_MTK_MIPI_DSI=m
+CONFIG_PHY_MTK_DP=m
CONFIG_PHY_QCOM_EDP=m
CONFIG_PHY_QCOM_PCIE2=m
CONFIG_PHY_QCOM_QMP=m
@@ -1695,6 +1701,7 @@ CONFIG_INTERCONNECT_QCOM_SM8650=y
CONFIG_INTERCONNECT_QCOM_SM8750=y
CONFIG_INTERCONNECT_QCOM_X1E80100=y
CONFIG_COUNTER=m
+CONFIG_TI_EQEP=m
CONFIG_RZ_MTU3_CNT=m
CONFIG_HTE=y
CONFIG_HTE_TEGRA194=y
@@ -1729,15 +1736,14 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_USER=y
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CHACHA20=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_USER_API_RNG=m
-CONFIG_CRYPTO_CHACHA20_NEON=m
CONFIG_CRYPTO_GHASH_ARM64_CE=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
-CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_CRYPTO_SHA512_ARM64_CE=m
CONFIG_CRYPTO_SHA3_ARM64=m
CONFIG_CRYPTO_SM3_ARM64_CE=m
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 3418c8d3c78d..c44b0f202a1f 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -25,18 +25,6 @@ config CRYPTO_NHPOLY1305_NEON
Architecture: arm64 using:
- NEON (Advanced SIMD) extensions
-config CRYPTO_POLY1305_NEON
- tristate
- depends on KERNEL_MODE_NEON
- select CRYPTO_HASH
- select CRYPTO_ARCH_HAVE_LIB_POLY1305
- default CRYPTO_LIB_POLY1305_INTERNAL
- help
- Poly1305 authenticator algorithm (RFC7539)
-
- Architecture: arm64 using:
- - NEON (Advanced SIMD) extensions
-
config CRYPTO_SHA1_ARM64_CE
tristate "Hash functions: SHA-1 (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
@@ -48,25 +36,6 @@ config CRYPTO_SHA1_ARM64_CE
Architecture: arm64 using:
- ARMv8 Crypto Extensions
-config CRYPTO_SHA256_ARM64
- tristate "Hash functions: SHA-224 and SHA-256"
- select CRYPTO_HASH
- help
- SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
-
- Architecture: arm64
-
-config CRYPTO_SHA2_ARM64_CE
- tristate "Hash functions: SHA-224 and SHA-256 (ARMv8 Crypto Extensions)"
- depends on KERNEL_MODE_NEON
- select CRYPTO_HASH
- select CRYPTO_SHA256_ARM64
- help
- SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
-
- Architecture: arm64 using:
- - ARMv8 Crypto Extensions
-
config CRYPTO_SHA512_ARM64
tristate "Hash functions: SHA-384 and SHA-512"
select CRYPTO_HASH
@@ -101,7 +70,7 @@ config CRYPTO_SM3_NEON
tristate "Hash functions: SM3 (NEON)"
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
- select CRYPTO_SM3
+ select CRYPTO_LIB_SM3
help
SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012)
@@ -112,7 +81,7 @@ config CRYPTO_SM3_ARM64_CE
tristate "Hash functions: SM3 (ARMv8.2 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
- select CRYPTO_SM3
+ select CRYPTO_LIB_SM3
help
SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012)
@@ -143,7 +112,7 @@ config CRYPTO_AES_ARM64
config CRYPTO_AES_ARM64_CE
tristate "Ciphers: AES (ARMv8 Crypto Extensions)"
- depends on ARM64 && KERNEL_MODE_NEON
+ depends on KERNEL_MODE_NEON
select CRYPTO_ALGAPI
select CRYPTO_LIB_AES
help
@@ -186,20 +155,6 @@ config CRYPTO_AES_ARM64_NEON_BLK
Architecture: arm64 using:
- NEON (Advanced SIMD) extensions
-config CRYPTO_CHACHA20_NEON
- tristate
- depends on KERNEL_MODE_NEON
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_CHACHA_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
- default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
- stream cipher algorithms
-
- Architecture: arm64 using:
- - NEON (Advanced SIMD) extensions
-
config CRYPTO_AES_ARM64_BS
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XCTR/XTS modes (bit-sliced NEON)"
depends on KERNEL_MODE_NEON
@@ -267,7 +222,7 @@ config CRYPTO_SM4_ARM64_NEON_BLK
config CRYPTO_AES_ARM64_CE_CCM
tristate "AEAD cipher: AES in CCM mode (ARMv8 Crypto Extensions)"
- depends on ARM64 && KERNEL_MODE_NEON
+ depends on KERNEL_MODE_NEON
select CRYPTO_ALGAPI
select CRYPTO_AES_ARM64_CE
select CRYPTO_AES_ARM64_CE_BLK
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index e7139c4768ce..c231c980c514 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -8,9 +8,6 @@
obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o
sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o
-obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o
-sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o
-
obj-$(CONFIG_CRYPTO_SHA512_ARM64_CE) += sha512-ce.o
sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o
@@ -56,19 +53,9 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o
obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
aes-neon-blk-y := aes-glue-neon.o aes-neon.o
-obj-$(CONFIG_CRYPTO_SHA256_ARM64) += sha256-arm64.o
-sha256-arm64-y := sha256-glue.o sha256-core.o
-
obj-$(CONFIG_CRYPTO_SHA512_ARM64) += sha512-arm64.o
sha512-arm64-y := sha512-glue.o sha512-core.o
-obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
-chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o
-
-obj-$(CONFIG_CRYPTO_POLY1305_NEON) += poly1305-neon.o
-poly1305-neon-y := poly1305-core.o poly1305-glue.o
-AFLAGS_poly1305-core.o += -Dpoly1305_init=poly1305_init_arm64
-
obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
@@ -81,10 +68,7 @@ aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $(<) void $(@)
-$(obj)/%-core.S: $(src)/%-armv8.pl
- $(call cmd,perlasm)
-
-$(obj)/sha256-core.S: $(src)/sha512-armv8.pl
+$(obj)/sha512-core.S: $(src)/../lib/crypto/sha2-armv8.pl
$(call cmd,perlasm)
-clean-files += poly1305-core.S sha256-core.S sha512-core.S
+clean-files += sha512-core.S
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index b0150999743f..81560f722b9d 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -5,19 +5,20 @@
* Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
-#include <asm/neon.h>
#include <asm/hwcap.h>
-#include <asm/simd.h>
+#include <asm/neon.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/sha2.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
-#include <linux/module.h>
-#include <linux/cpufeature.h>
+#include <crypto/sha2.h>
+#include <crypto/utils.h>
#include <crypto/xts.h>
+#include <linux/cpufeature.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include "aes-ce-setkey.h"
@@ -130,7 +131,6 @@ struct mac_tfm_ctx {
};
struct mac_desc_ctx {
- unsigned int len;
u8 dg[AES_BLOCK_SIZE];
};
@@ -869,109 +869,64 @@ static int mac_init(struct shash_desc *desc)
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
memset(ctx->dg, 0, AES_BLOCK_SIZE);
- ctx->len = 0;
-
return 0;
}
static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
- u8 dg[], int enc_before, int enc_after)
+ u8 dg[], int enc_before)
{
int rounds = 6 + ctx->key_length / 4;
+ int rem;
- if (crypto_simd_usable()) {
- int rem;
-
- do {
- kernel_neon_begin();
- rem = aes_mac_update(in, ctx->key_enc, rounds, blocks,
- dg, enc_before, enc_after);
- kernel_neon_end();
- in += (blocks - rem) * AES_BLOCK_SIZE;
- blocks = rem;
- enc_before = 0;
- } while (blocks);
- } else {
- if (enc_before)
- aes_encrypt(ctx, dg, dg);
-
- while (blocks--) {
- crypto_xor(dg, in, AES_BLOCK_SIZE);
- in += AES_BLOCK_SIZE;
-
- if (blocks || enc_after)
- aes_encrypt(ctx, dg, dg);
- }
- }
+ do {
+ kernel_neon_begin();
+ rem = aes_mac_update(in, ctx->key_enc, rounds, blocks,
+ dg, enc_before, !enc_before);
+ kernel_neon_end();
+ in += (blocks - rem) * AES_BLOCK_SIZE;
+ blocks = rem;
+ } while (blocks);
}
static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
{
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
+ int blocks = len / AES_BLOCK_SIZE;
- while (len > 0) {
- unsigned int l;
-
- if ((ctx->len % AES_BLOCK_SIZE) == 0 &&
- (ctx->len + len) > AES_BLOCK_SIZE) {
-
- int blocks = len / AES_BLOCK_SIZE;
-
- len %= AES_BLOCK_SIZE;
-
- mac_do_update(&tctx->key, p, blocks, ctx->dg,
- (ctx->len != 0), (len != 0));
-
- p += blocks * AES_BLOCK_SIZE;
-
- if (!len) {
- ctx->len = AES_BLOCK_SIZE;
- break;
- }
- ctx->len = 0;
- }
-
- l = min(len, AES_BLOCK_SIZE - ctx->len);
-
- if (l <= AES_BLOCK_SIZE) {
- crypto_xor(ctx->dg + ctx->len, p, l);
- ctx->len += l;
- len -= l;
- p += l;
- }
- }
-
- return 0;
+ len %= AES_BLOCK_SIZE;
+ mac_do_update(&tctx->key, p, blocks, ctx->dg, 0);
+ return len;
}
-static int cbcmac_final(struct shash_desc *desc, u8 *out)
+static int cbcmac_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
- mac_do_update(&tctx->key, NULL, 0, ctx->dg, (ctx->len != 0), 0);
-
+ if (len) {
+ crypto_xor(ctx->dg, src, len);
+ mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1);
+ }
memcpy(out, ctx->dg, AES_BLOCK_SIZE);
-
return 0;
}
-static int cmac_final(struct shash_desc *desc, u8 *out)
+static int cmac_finup(struct shash_desc *desc, const u8 *src, unsigned int len,
+ u8 *out)
{
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
u8 *consts = tctx->consts;
- if (ctx->len != AES_BLOCK_SIZE) {
- ctx->dg[ctx->len] ^= 0x80;
+ crypto_xor(ctx->dg, src, len);
+ if (len != AES_BLOCK_SIZE) {
+ ctx->dg[len] ^= 0x80;
consts += AES_BLOCK_SIZE;
}
-
- mac_do_update(&tctx->key, consts, 1, ctx->dg, 0, 1);
-
+ mac_do_update(&tctx->key, consts, 1, ctx->dg, 0);
memcpy(out, ctx->dg, AES_BLOCK_SIZE);
-
return 0;
}
@@ -979,6 +934,8 @@ static struct shash_alg mac_algs[] = { {
.base.cra_name = "cmac(aes)",
.base.cra_driver_name = "cmac-aes-" MODE,
.base.cra_priority = PRIO,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
2 * AES_BLOCK_SIZE,
@@ -987,13 +944,15 @@ static struct shash_alg mac_algs[] = { {
.digestsize = AES_BLOCK_SIZE,
.init = mac_init,
.update = mac_update,
- .final = cmac_final,
+ .finup = cmac_finup,
.setkey = cmac_setkey,
.descsize = sizeof(struct mac_desc_ctx),
}, {
.base.cra_name = "xcbc(aes)",
.base.cra_driver_name = "xcbc-aes-" MODE,
.base.cra_priority = PRIO,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
2 * AES_BLOCK_SIZE,
@@ -1002,21 +961,22 @@ static struct shash_alg mac_algs[] = { {
.digestsize = AES_BLOCK_SIZE,
.init = mac_init,
.update = mac_update,
- .final = cmac_final,
+ .finup = cmac_finup,
.setkey = xcbc_setkey,
.descsize = sizeof(struct mac_desc_ctx),
}, {
.base.cra_name = "cbcmac(aes)",
.base.cra_driver_name = "cbcmac-aes-" MODE,
.base.cra_priority = PRIO,
- .base.cra_blocksize = 1,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx),
.base.cra_module = THIS_MODULE,
.digestsize = AES_BLOCK_SIZE,
.init = mac_init,
.update = mac_update,
- .final = cbcmac_final,
+ .finup = cbcmac_finup,
.setkey = cbcmac_setkey,
.descsize = sizeof(struct mac_desc_ctx),
} };
diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c
deleted file mode 100644
index 229876acfc58..000000000000
--- a/arch/arm64/crypto/chacha-neon-glue.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * ARM NEON and scalar accelerated ChaCha and XChaCha stream ciphers,
- * including ChaCha20 (RFC7539)
- *
- * Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Based on:
- * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
- *
- * Copyright (C) 2015 Martin Willi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/jump_label.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-
-asmlinkage void chacha_block_xor_neon(u32 *state, u8 *dst, const u8 *src,
- int nrounds);
-asmlinkage void chacha_4block_xor_neon(u32 *state, u8 *dst, const u8 *src,
- int nrounds, int bytes);
-asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
-
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
-
-static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
- int bytes, int nrounds)
-{
- while (bytes > 0) {
- int l = min(bytes, CHACHA_BLOCK_SIZE * 5);
-
- if (l <= CHACHA_BLOCK_SIZE) {
- u8 buf[CHACHA_BLOCK_SIZE];
-
- memcpy(buf, src, l);
- chacha_block_xor_neon(state, buf, buf, nrounds);
- memcpy(dst, buf, l);
- state[12] += 1;
- break;
- }
- chacha_4block_xor_neon(state, dst, src, nrounds, l);
- bytes -= l;
- src += l;
- dst += l;
- state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE);
- }
-}
-
-void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
-{
- if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) {
- hchacha_block_generic(state, stream, nrounds);
- } else {
- kernel_neon_begin();
- hchacha_block_neon(state, stream, nrounds);
- kernel_neon_end();
- }
-}
-EXPORT_SYMBOL(hchacha_block_arch);
-
-void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
- int nrounds)
-{
- if (!static_branch_likely(&have_neon) || bytes <= CHACHA_BLOCK_SIZE ||
- !crypto_simd_usable())
- return chacha_crypt_generic(state, dst, src, bytes, nrounds);
-
- do {
- unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
-
- kernel_neon_begin();
- chacha_doneon(state, dst, src, todo, nrounds);
- kernel_neon_end();
-
- bytes -= todo;
- src += todo;
- dst += todo;
- } while (bytes);
-}
-EXPORT_SYMBOL(chacha_crypt_arch);
-
-static int chacha_neon_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
-{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = rounddown(nbytes, walk.stride);
-
- if (!static_branch_likely(&have_neon) ||
- !crypto_simd_usable()) {
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes,
- ctx->nrounds);
- } else {
- kernel_neon_begin();
- chacha_doneon(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes, ctx->nrounds);
- kernel_neon_end();
- }
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int chacha_neon(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_neon_stream_xor(req, ctx, req->iv);
-}
-
-static int xchacha_neon(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- chacha_init(state, ctx->key, req->iv);
- hchacha_block_arch(state, subctx.key, ctx->nrounds);
- subctx.nrounds = ctx->nrounds;
-
- memcpy(&real_iv[0], req->iv + 24, 8);
- memcpy(&real_iv[8], req->iv + 16, 8);
- return chacha_neon_stream_xor(req, &subctx, real_iv);
-}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 5 * CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_neon,
- .decrypt = chacha_neon,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 5 * CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_neon,
- .decrypt = xchacha_neon,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 5 * CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_neon,
- .decrypt = xchacha_neon,
- }
-};
-
-static int __init chacha_simd_mod_init(void)
-{
- if (!cpu_have_named_feature(ASIMD))
- return 0;
-
- static_branch_enable(&have_neon);
-
- return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
- crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0;
-}
-
-static void __exit chacha_simd_mod_fini(void)
-{
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) && cpu_have_named_feature(ASIMD))
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-module_init(chacha_simd_mod_init);
-module_exit(chacha_simd_mod_fini);
-
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (NEON accelerated)");
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-neon");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-neon");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-neon");
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 071e122f9c37..4995b6e22335 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -6,30 +6,27 @@
*/
#include <asm/neon.h>
-#include <asm/simd.h>
-#include <linux/unaligned.h>
#include <crypto/aes.h>
-#include <crypto/gcm.h>
-#include <crypto/algapi.h>
#include <crypto/b128ops.h>
+#include <crypto/gcm.h>
+#include <crypto/ghash.h>
#include <crypto/gf128mul.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/cpufeature.h>
-#include <linux/crypto.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
MODULE_DESCRIPTION("GHASH and AES-GCM using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("ghash");
-#define GHASH_BLOCK_SIZE 16
-#define GHASH_DIGEST_SIZE 16
-
#define RFC4106_NONCE_SIZE 4
struct ghash_key {
@@ -37,10 +34,8 @@ struct ghash_key {
u64 h[][2];
};
-struct ghash_desc_ctx {
+struct arm_ghash_desc_ctx {
u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
- u8 buf[GHASH_BLOCK_SIZE];
- u32 count;
};
struct gcm_aes_ctx {
@@ -65,36 +60,12 @@ asmlinkage int pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[],
static int ghash_init(struct shash_desc *desc)
{
- struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
- *ctx = (struct ghash_desc_ctx){};
+ *ctx = (struct arm_ghash_desc_ctx){};
return 0;
}
-static void ghash_do_update(int blocks, u64 dg[], const char *src,
- struct ghash_key *key, const char *head)
-{
- be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
-
- do {
- const u8 *in = src;
-
- if (head) {
- in = head;
- blocks++;
- head = NULL;
- } else {
- src += GHASH_BLOCK_SIZE;
- }
-
- crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE);
- gf128mul_lle(&dst, &key->k);
- } while (--blocks);
-
- dg[0] = be64_to_cpu(dst.b);
- dg[1] = be64_to_cpu(dst.a);
-}
-
static __always_inline
void ghash_do_simd_update(int blocks, u64 dg[], const char *src,
struct ghash_key *key, const char *head,
@@ -103,13 +74,9 @@ void ghash_do_simd_update(int blocks, u64 dg[], const char *src,
u64 const h[][2],
const char *head))
{
- if (likely(crypto_simd_usable())) {
- kernel_neon_begin();
- simd_update(blocks, dg, src, key->h, head);
- kernel_neon_end();
- } else {
- ghash_do_update(blocks, dg, src, key, head);
- }
+ kernel_neon_begin();
+ simd_update(blocks, dg, src, key->h, head);
+ kernel_neon_end();
}
/* avoid hogging the CPU for too long */
@@ -118,61 +85,59 @@ void ghash_do_simd_update(int blocks, u64 dg[], const char *src,
static int ghash_update(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
- struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
- unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+ int blocks;
- ctx->count += len;
+ blocks = len / GHASH_BLOCK_SIZE;
+ len -= blocks * GHASH_BLOCK_SIZE;
- if ((partial + len) >= GHASH_BLOCK_SIZE) {
- struct ghash_key *key = crypto_shash_ctx(desc->tfm);
- int blocks;
-
- if (partial) {
- int p = GHASH_BLOCK_SIZE - partial;
+ do {
+ int chunk = min(blocks, MAX_BLOCKS);
- memcpy(ctx->buf + partial, src, p);
- src += p;
- len -= p;
- }
+ ghash_do_simd_update(chunk, ctx->digest, src, key, NULL,
+ pmull_ghash_update_p8);
+ blocks -= chunk;
+ src += chunk * GHASH_BLOCK_SIZE;
+ } while (unlikely(blocks > 0));
+ return len;
+}
- blocks = len / GHASH_BLOCK_SIZE;
- len %= GHASH_BLOCK_SIZE;
+static int ghash_export(struct shash_desc *desc, void *out)
+{
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ u8 *dst = out;
- do {
- int chunk = min(blocks, MAX_BLOCKS);
+ put_unaligned_be64(ctx->digest[1], dst);
+ put_unaligned_be64(ctx->digest[0], dst + 8);
+ return 0;
+}
- ghash_do_simd_update(chunk, ctx->digest, src, key,
- partial ? ctx->buf : NULL,
- pmull_ghash_update_p8);
+static int ghash_import(struct shash_desc *desc, const void *in)
+{
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ const u8 *src = in;
- blocks -= chunk;
- src += chunk * GHASH_BLOCK_SIZE;
- partial = 0;
- } while (unlikely(blocks > 0));
- }
- if (len)
- memcpy(ctx->buf + partial, src, len);
+ ctx->digest[1] = get_unaligned_be64(src);
+ ctx->digest[0] = get_unaligned_be64(src + 8);
return 0;
}
-static int ghash_final(struct shash_desc *desc, u8 *dst)
+static int ghash_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
{
- struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
- unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
-
- if (partial) {
- struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
- memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
+ if (len) {
+ u8 buf[GHASH_BLOCK_SIZE] = {};
- ghash_do_simd_update(1, ctx->digest, ctx->buf, key, NULL,
+ memcpy(buf, src, len);
+ ghash_do_simd_update(1, ctx->digest, src, key, NULL,
pmull_ghash_update_p8);
+ memzero_explicit(buf, sizeof(buf));
}
- put_unaligned_be64(ctx->digest[1], dst);
- put_unaligned_be64(ctx->digest[0], dst + 8);
-
- memzero_explicit(ctx, sizeof(*ctx));
- return 0;
+ return ghash_export(desc, dst);
}
static void ghash_reflect(u64 h[], const be128 *k)
@@ -205,6 +170,7 @@ static struct shash_alg ghash_alg = {
.base.cra_name = "ghash",
.base.cra_driver_name = "ghash-neon",
.base.cra_priority = 150,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = GHASH_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]),
.base.cra_module = THIS_MODULE,
@@ -212,9 +178,12 @@ static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
- .final = ghash_final,
+ .finup = ghash_finup,
.setkey = ghash_setkey,
- .descsize = sizeof(struct ghash_desc_ctx),
+ .export = ghash_export,
+ .import = ghash_import,
+ .descsize = sizeof(struct arm_ghash_desc_ctx),
+ .statesize = sizeof(struct ghash_desc_ctx),
};
static int num_rounds(struct crypto_aes_ctx *ctx)
diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
deleted file mode 100644
index 18883ea438f3..000000000000
--- a/arch/arm64/crypto/poly1305-glue.c
+++ /dev/null
@@ -1,232 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * OpenSSL/Cryptogams accelerated Poly1305 transform for arm64
- *
- * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
- */
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <linux/unaligned.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/poly1305.h>
-#include <crypto/internal/simd.h>
-#include <linux/cpufeature.h>
-#include <linux/crypto.h>
-#include <linux/jump_label.h>
-#include <linux/module.h>
-
-asmlinkage void poly1305_init_arm64(void *state, const u8 *key);
-asmlinkage void poly1305_blocks(void *state, const u8 *src, u32 len, u32 hibit);
-asmlinkage void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit);
-asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
-
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
-
-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
-{
- poly1305_init_arm64(&dctx->h, key);
- dctx->s[0] = get_unaligned_le32(key + 16);
- dctx->s[1] = get_unaligned_le32(key + 20);
- dctx->s[2] = get_unaligned_le32(key + 24);
- dctx->s[3] = get_unaligned_le32(key + 28);
- dctx->buflen = 0;
-}
-EXPORT_SYMBOL(poly1305_init_arch);
-
-static int neon_poly1305_init(struct shash_desc *desc)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- dctx->buflen = 0;
- dctx->rset = 0;
- dctx->sset = false;
-
- return 0;
-}
-
-static void neon_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
- u32 len, u32 hibit, bool do_neon)
-{
- if (unlikely(!dctx->sset)) {
- if (!dctx->rset) {
- poly1305_init_arm64(&dctx->h, src);
- src += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- dctx->rset = 1;
- }
- if (len >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(src + 0);
- dctx->s[1] = get_unaligned_le32(src + 4);
- dctx->s[2] = get_unaligned_le32(src + 8);
- dctx->s[3] = get_unaligned_le32(src + 12);
- src += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- if (len < POLY1305_BLOCK_SIZE)
- return;
- }
-
- len &= ~(POLY1305_BLOCK_SIZE - 1);
-
- if (static_branch_likely(&have_neon) && likely(do_neon))
- poly1305_blocks_neon(&dctx->h, src, len, hibit);
- else
- poly1305_blocks(&dctx->h, src, len, hibit);
-}
-
-static void neon_poly1305_do_update(struct poly1305_desc_ctx *dctx,
- const u8 *src, u32 len, bool do_neon)
-{
- if (unlikely(dctx->buflen)) {
- u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen);
-
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- len -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- neon_poly1305_blocks(dctx, dctx->buf,
- POLY1305_BLOCK_SIZE, 1, false);
- dctx->buflen = 0;
- }
- }
-
- if (likely(len >= POLY1305_BLOCK_SIZE)) {
- neon_poly1305_blocks(dctx, src, len, 1, do_neon);
- src += round_down(len, POLY1305_BLOCK_SIZE);
- len %= POLY1305_BLOCK_SIZE;
- }
-
- if (unlikely(len)) {
- dctx->buflen = len;
- memcpy(dctx->buf, src, len);
- }
-}
-
-static int neon_poly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
-{
- bool do_neon = crypto_simd_usable() && srclen > 128;
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (static_branch_likely(&have_neon) && do_neon)
- kernel_neon_begin();
- neon_poly1305_do_update(dctx, src, srclen, do_neon);
- if (static_branch_likely(&have_neon) && do_neon)
- kernel_neon_end();
- return 0;
-}
-
-void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
- unsigned int nbytes)
-{
- if (unlikely(dctx->buflen)) {
- u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen);
-
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- nbytes -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 1);
- dctx->buflen = 0;
- }
- }
-
- if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
- unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
-
- if (static_branch_likely(&have_neon) && crypto_simd_usable()) {
- do {
- unsigned int todo = min_t(unsigned int, len, SZ_4K);
-
- kernel_neon_begin();
- poly1305_blocks_neon(&dctx->h, src, todo, 1);
- kernel_neon_end();
-
- len -= todo;
- src += todo;
- } while (len);
- } else {
- poly1305_blocks(&dctx->h, src, len, 1);
- src += len;
- }
- nbytes %= POLY1305_BLOCK_SIZE;
- }
-
- if (unlikely(nbytes)) {
- dctx->buflen = nbytes;
- memcpy(dctx->buf, src, nbytes);
- }
-}
-EXPORT_SYMBOL(poly1305_update_arch);
-
-void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
-{
- if (unlikely(dctx->buflen)) {
- dctx->buf[dctx->buflen++] = 1;
- memset(dctx->buf + dctx->buflen, 0,
- POLY1305_BLOCK_SIZE - dctx->buflen);
- poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
- }
-
- poly1305_emit(&dctx->h, dst, dctx->s);
- memzero_explicit(dctx, sizeof(*dctx));
-}
-EXPORT_SYMBOL(poly1305_final_arch);
-
-static int neon_poly1305_final(struct shash_desc *desc, u8 *dst)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (unlikely(!dctx->sset))
- return -ENOKEY;
-
- poly1305_final_arch(dctx, dst);
- return 0;
-}
-
-static struct shash_alg neon_poly1305_alg = {
- .init = neon_poly1305_init,
- .update = neon_poly1305_update,
- .final = neon_poly1305_final,
- .digestsize = POLY1305_DIGEST_SIZE,
- .descsize = sizeof(struct poly1305_desc_ctx),
-
- .base.cra_name = "poly1305",
- .base.cra_driver_name = "poly1305-neon",
- .base.cra_priority = 200,
- .base.cra_blocksize = POLY1305_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-};
-
-static int __init neon_poly1305_mod_init(void)
-{
- if (!cpu_have_named_feature(ASIMD))
- return 0;
-
- static_branch_enable(&have_neon);
-
- return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
- crypto_register_shash(&neon_poly1305_alg) : 0;
-}
-
-static void __exit neon_poly1305_mod_exit(void)
-{
- if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && cpu_have_named_feature(ASIMD))
- crypto_unregister_shash(&neon_poly1305_alg);
-}
-
-module_init(neon_poly1305_mod_init);
-module_exit(neon_poly1305_mod_exit);
-
-MODULE_DESCRIPTION("Poly1305 transform using NEON instructions");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("poly1305");
-MODULE_ALIAS_CRYPTO("poly1305-neon");
diff --git a/arch/arm64/crypto/polyval-ce-glue.c b/arch/arm64/crypto/polyval-ce-glue.c
index 0a3b5718df85..c4e653688ea0 100644
--- a/arch/arm64/crypto/polyval-ce-glue.c
+++ b/arch/arm64/crypto/polyval-ce-glue.c
@@ -15,17 +15,15 @@
* ARMv8 Crypto Extensions instructions to implement the finite field operations.
*/
-#include <crypto/algapi.h>
+#include <asm/neon.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/polyval.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
+#include <crypto/utils.h>
+#include <linux/cpufeature.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/cpufeature.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
+#include <linux/string.h>
#define NUM_KEY_POWERS 8
@@ -38,7 +36,6 @@ struct polyval_tfm_ctx {
struct polyval_desc_ctx {
u8 buffer[POLYVAL_BLOCK_SIZE];
- u32 bytes;
};
asmlinkage void pmull_polyval_update(const struct polyval_tfm_ctx *keys,
@@ -48,25 +45,16 @@ asmlinkage void pmull_polyval_mul(u8 *op1, const u8 *op2);
static void internal_polyval_update(const struct polyval_tfm_ctx *keys,
const u8 *in, size_t nblocks, u8 *accumulator)
{
- if (likely(crypto_simd_usable())) {
- kernel_neon_begin();
- pmull_polyval_update(keys, in, nblocks, accumulator);
- kernel_neon_end();
- } else {
- polyval_update_non4k(keys->key_powers[NUM_KEY_POWERS-1], in,
- nblocks, accumulator);
- }
+ kernel_neon_begin();
+ pmull_polyval_update(keys, in, nblocks, accumulator);
+ kernel_neon_end();
}
static void internal_polyval_mul(u8 *op1, const u8 *op2)
{
- if (likely(crypto_simd_usable())) {
- kernel_neon_begin();
- pmull_polyval_mul(op1, op2);
- kernel_neon_end();
- } else {
- polyval_mul_non4k(op1, op2);
- }
+ kernel_neon_begin();
+ pmull_polyval_mul(op1, op2);
+ kernel_neon_end();
}
static int polyval_arm64_setkey(struct crypto_shash *tfm,
@@ -103,49 +91,27 @@ static int polyval_arm64_update(struct shash_desc *desc,
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- u8 *pos;
unsigned int nblocks;
- unsigned int n;
-
- if (dctx->bytes) {
- n = min(srclen, dctx->bytes);
- pos = dctx->buffer + POLYVAL_BLOCK_SIZE - dctx->bytes;
-
- dctx->bytes -= n;
- srclen -= n;
- while (n--)
- *pos++ ^= *src++;
-
- if (!dctx->bytes)
- internal_polyval_mul(dctx->buffer,
- tctx->key_powers[NUM_KEY_POWERS-1]);
- }
-
- while (srclen >= POLYVAL_BLOCK_SIZE) {
+ do {
/* allow rescheduling every 4K bytes */
nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE;
internal_polyval_update(tctx, src, nblocks, dctx->buffer);
srclen -= nblocks * POLYVAL_BLOCK_SIZE;
src += nblocks * POLYVAL_BLOCK_SIZE;
- }
+ } while (srclen >= POLYVAL_BLOCK_SIZE);
- if (srclen) {
- dctx->bytes = POLYVAL_BLOCK_SIZE - srclen;
- pos = dctx->buffer;
- while (srclen--)
- *pos++ ^= *src++;
- }
-
- return 0;
+ return srclen;
}
-static int polyval_arm64_final(struct shash_desc *desc, u8 *dst)
+static int polyval_arm64_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- if (dctx->bytes) {
+ if (len) {
+ crypto_xor(dctx->buffer, src, len);
internal_polyval_mul(dctx->buffer,
tctx->key_powers[NUM_KEY_POWERS-1]);
}
@@ -159,13 +125,14 @@ static struct shash_alg polyval_alg = {
.digestsize = POLYVAL_DIGEST_SIZE,
.init = polyval_arm64_init,
.update = polyval_arm64_update,
- .final = polyval_arm64_final,
+ .finup = polyval_arm64_finup,
.setkey = polyval_arm64_setkey,
.descsize = sizeof(struct polyval_desc_ctx),
.base = {
.cra_name = "polyval",
.cra_driver_name = "polyval-ce",
.cra_priority = 200,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = POLYVAL_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct polyval_tfm_ctx),
.cra_module = THIS_MODULE,
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index cbd14f208f83..65b6980817e5 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -7,14 +7,14 @@
#include <asm/neon.h>
#include <asm/simd.h>
-#include <linux/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
-#include <linux/crypto.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
@@ -56,79 +56,49 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
- if (!crypto_simd_usable())
- return crypto_sha1_update(desc, data, len);
-
sctx->finalize = 0;
- sha1_base_do_update(desc, data, len, sha1_ce_transform);
-
- return 0;
+ return sha1_base_do_update_blocks(desc, data, len, sha1_ce_transform);
}
static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
- bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
-
- if (!crypto_simd_usable())
- return crypto_sha1_finup(desc, data, len, out);
+ bool finalized = false;
/*
* Allow the asm code to perform the finalization if there is no
* partial data and the input is a round multiple of the block size.
*/
- sctx->finalize = finalize;
-
- sha1_base_do_update(desc, data, len, sha1_ce_transform);
- if (!finalize)
- sha1_base_do_finalize(desc, sha1_ce_transform);
- return sha1_base_finish(desc, out);
-}
-
-static int sha1_ce_final(struct shash_desc *desc, u8 *out)
-{
- struct sha1_ce_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable())
- return crypto_sha1_finup(desc, NULL, 0, out);
-
- sctx->finalize = 0;
- sha1_base_do_finalize(desc, sha1_ce_transform);
+ if (len >= SHA1_BLOCK_SIZE) {
+ unsigned int remain = len - round_down(len, SHA1_BLOCK_SIZE);
+
+ finalized = !remain;
+ sctx->finalize = finalized;
+ sha1_base_do_update_blocks(desc, data, len, sha1_ce_transform);
+ data += len - remain;
+ len = remain;
+ }
+ if (!finalized) {
+ sctx->finalize = 0;
+ sha1_base_do_finup(desc, data, len, sha1_ce_transform);
+ }
return sha1_base_finish(desc, out);
}
-static int sha1_ce_export(struct shash_desc *desc, void *out)
-{
- struct sha1_ce_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, &sctx->sst, sizeof(struct sha1_state));
- return 0;
-}
-
-static int sha1_ce_import(struct shash_desc *desc, const void *in)
-{
- struct sha1_ce_state *sctx = shash_desc_ctx(desc);
-
- memcpy(&sctx->sst, in, sizeof(struct sha1_state));
- sctx->finalize = 0;
- return 0;
-}
-
static struct shash_alg alg = {
.init = sha1_base_init,
.update = sha1_ce_update,
- .final = sha1_ce_final,
.finup = sha1_ce_finup,
- .import = sha1_ce_import,
- .export = sha1_ce_export,
.descsize = sizeof(struct sha1_ce_state),
- .statesize = sizeof(struct sha1_state),
+ .statesize = SHA1_STATE_SIZE,
.digestsize = SHA1_DIGEST_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ce",
.cra_priority = 200,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
deleted file mode 100644
index 6b4866a88ded..000000000000
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ /dev/null
@@ -1,192 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
- *
- * Copyright (C) 2014 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <linux/unaligned.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <linux/cpufeature.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-
-MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("sha224");
-MODULE_ALIAS_CRYPTO("sha256");
-
-struct sha256_ce_state {
- struct sha256_state sst;
- u32 finalize;
-};
-
-extern const u32 sha256_ce_offsetof_count;
-extern const u32 sha256_ce_offsetof_finalize;
-
-asmlinkage int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
- int blocks);
-
-static void sha256_ce_transform(struct sha256_state *sst, u8 const *src,
- int blocks)
-{
- while (blocks) {
- int rem;
-
- kernel_neon_begin();
- rem = __sha256_ce_transform(container_of(sst,
- struct sha256_ce_state,
- sst), src, blocks);
- kernel_neon_end();
- src += (blocks - rem) * SHA256_BLOCK_SIZE;
- blocks = rem;
- }
-}
-
-const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
- sst.count);
-const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
- finalize);
-
-asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
-
-static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src,
- int blocks)
-{
- sha256_block_data_order(sst->state, src, blocks);
-}
-
-static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha256_ce_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable())
- return sha256_base_do_update(desc, data, len,
- sha256_arm64_transform);
-
- sctx->finalize = 0;
- sha256_base_do_update(desc, data, len, sha256_ce_transform);
-
- return 0;
-}
-
-static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct sha256_ce_state *sctx = shash_desc_ctx(desc);
- bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
-
- if (!crypto_simd_usable()) {
- if (len)
- sha256_base_do_update(desc, data, len,
- sha256_arm64_transform);
- sha256_base_do_finalize(desc, sha256_arm64_transform);
- return sha256_base_finish(desc, out);
- }
-
- /*
- * Allow the asm code to perform the finalization if there is no
- * partial data and the input is a round multiple of the block size.
- */
- sctx->finalize = finalize;
-
- sha256_base_do_update(desc, data, len, sha256_ce_transform);
- if (!finalize)
- sha256_base_do_finalize(desc, sha256_ce_transform);
- return sha256_base_finish(desc, out);
-}
-
-static int sha256_ce_final(struct shash_desc *desc, u8 *out)
-{
- struct sha256_ce_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable()) {
- sha256_base_do_finalize(desc, sha256_arm64_transform);
- return sha256_base_finish(desc, out);
- }
-
- sctx->finalize = 0;
- sha256_base_do_finalize(desc, sha256_ce_transform);
- return sha256_base_finish(desc, out);
-}
-
-static int sha256_ce_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- sha256_base_init(desc);
- return sha256_ce_finup(desc, data, len, out);
-}
-
-static int sha256_ce_export(struct shash_desc *desc, void *out)
-{
- struct sha256_ce_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, &sctx->sst, sizeof(struct sha256_state));
- return 0;
-}
-
-static int sha256_ce_import(struct shash_desc *desc, const void *in)
-{
- struct sha256_ce_state *sctx = shash_desc_ctx(desc);
-
- memcpy(&sctx->sst, in, sizeof(struct sha256_state));
- sctx->finalize = 0;
- return 0;
-}
-
-static struct shash_alg algs[] = { {
- .init = sha224_base_init,
- .update = sha256_ce_update,
- .final = sha256_ce_final,
- .finup = sha256_ce_finup,
- .export = sha256_ce_export,
- .import = sha256_ce_import,
- .descsize = sizeof(struct sha256_ce_state),
- .statesize = sizeof(struct sha256_state),
- .digestsize = SHA224_DIGEST_SIZE,
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "sha224-ce",
- .cra_priority = 200,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .init = sha256_base_init,
- .update = sha256_ce_update,
- .final = sha256_ce_final,
- .finup = sha256_ce_finup,
- .digest = sha256_ce_digest,
- .export = sha256_ce_export,
- .import = sha256_ce_import,
- .descsize = sizeof(struct sha256_ce_state),
- .statesize = sizeof(struct sha256_state),
- .digestsize = SHA256_DIGEST_SIZE,
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-ce",
- .cra_priority = 200,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init sha2_ce_mod_init(void)
-{
- return crypto_register_shashes(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit sha2_ce_mod_fini(void)
-{
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
-}
-
-module_cpu_feature_match(SHA2, sha2_ce_mod_init);
-module_exit(sha2_ce_mod_fini);
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
deleted file mode 100644
index 35356987cc1e..000000000000
--- a/arch/arm64/crypto/sha256-glue.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64
- *
- * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
- */
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64");
-MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("sha224");
-MODULE_ALIAS_CRYPTO("sha256");
-
-asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
- unsigned int num_blks);
-EXPORT_SYMBOL(sha256_block_data_order);
-
-static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src,
- int blocks)
-{
- sha256_block_data_order(sst->state, src, blocks);
-}
-
-asmlinkage void sha256_block_neon(u32 *digest, const void *data,
- unsigned int num_blks);
-
-static void sha256_neon_transform(struct sha256_state *sst, u8 const *src,
- int blocks)
-{
- sha256_block_neon(sst->state, src, blocks);
-}
-
-static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- return sha256_base_do_update(desc, data, len, sha256_arm64_transform);
-}
-
-static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- if (len)
- sha256_base_do_update(desc, data, len, sha256_arm64_transform);
- sha256_base_do_finalize(desc, sha256_arm64_transform);
-
- return sha256_base_finish(desc, out);
-}
-
-static int crypto_sha256_arm64_final(struct shash_desc *desc, u8 *out)
-{
- return crypto_sha256_arm64_finup(desc, NULL, 0, out);
-}
-
-static struct shash_alg algs[] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = crypto_sha256_arm64_update,
- .final = crypto_sha256_arm64_final,
- .finup = crypto_sha256_arm64_finup,
- .descsize = sizeof(struct sha256_state),
- .base.cra_name = "sha256",
- .base.cra_driver_name = "sha256-arm64",
- .base.cra_priority = 125,
- .base.cra_blocksize = SHA256_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = crypto_sha256_arm64_update,
- .final = crypto_sha256_arm64_final,
- .finup = crypto_sha256_arm64_finup,
- .descsize = sizeof(struct sha256_state),
- .base.cra_name = "sha224",
- .base.cra_driver_name = "sha224-arm64",
- .base.cra_priority = 125,
- .base.cra_blocksize = SHA224_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-} };
-
-static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable())
- return sha256_base_do_update(desc, data, len,
- sha256_arm64_transform);
-
- while (len > 0) {
- unsigned int chunk = len;
-
- /*
- * Don't hog the CPU for the entire time it takes to process all
- * input when running on a preemptible kernel, but process the
- * data block by block instead.
- */
- if (IS_ENABLED(CONFIG_PREEMPTION) &&
- chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
- chunk = SHA256_BLOCK_SIZE -
- sctx->count % SHA256_BLOCK_SIZE;
-
- kernel_neon_begin();
- sha256_base_do_update(desc, data, chunk, sha256_neon_transform);
- kernel_neon_end();
- data += chunk;
- len -= chunk;
- }
- return 0;
-}
-
-static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- if (!crypto_simd_usable()) {
- if (len)
- sha256_base_do_update(desc, data, len,
- sha256_arm64_transform);
- sha256_base_do_finalize(desc, sha256_arm64_transform);
- } else {
- if (len)
- sha256_update_neon(desc, data, len);
- kernel_neon_begin();
- sha256_base_do_finalize(desc, sha256_neon_transform);
- kernel_neon_end();
- }
- return sha256_base_finish(desc, out);
-}
-
-static int sha256_final_neon(struct shash_desc *desc, u8 *out)
-{
- return sha256_finup_neon(desc, NULL, 0, out);
-}
-
-static struct shash_alg neon_algs[] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = sha256_update_neon,
- .final = sha256_final_neon,
- .finup = sha256_finup_neon,
- .descsize = sizeof(struct sha256_state),
- .base.cra_name = "sha256",
- .base.cra_driver_name = "sha256-arm64-neon",
- .base.cra_priority = 150,
- .base.cra_blocksize = SHA256_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = sha256_update_neon,
- .final = sha256_final_neon,
- .finup = sha256_finup_neon,
- .descsize = sizeof(struct sha256_state),
- .base.cra_name = "sha224",
- .base.cra_driver_name = "sha224-arm64-neon",
- .base.cra_priority = 150,
- .base.cra_blocksize = SHA224_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-} };
-
-static int __init sha256_mod_init(void)
-{
- int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
- if (ret)
- return ret;
-
- if (cpu_have_named_feature(ASIMD)) {
- ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs));
- if (ret)
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
- }
- return ret;
-}
-
-static void __exit sha256_mod_fini(void)
-{
- if (cpu_have_named_feature(ASIMD))
- crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs));
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
-}
-
-module_init(sha256_mod_init);
-module_exit(sha256_mod_fini);
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
index 5662c3ac49e9..b4f1001046c9 100644
--- a/arch/arm64/crypto/sha3-ce-glue.c
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -12,13 +12,13 @@
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
-#include <linux/unaligned.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/sha3.h>
#include <linux/cpufeature.h>
-#include <linux/crypto.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
@@ -35,74 +35,55 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
-
- if (!crypto_simd_usable())
- return crypto_sha3_update(desc, data, len);
-
- if ((sctx->partial + len) >= sctx->rsiz) {
- int blocks;
-
- if (sctx->partial) {
- int p = sctx->rsiz - sctx->partial;
-
- memcpy(sctx->buf + sctx->partial, data, p);
- kernel_neon_begin();
- sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size);
- kernel_neon_end();
-
- data += p;
- len -= p;
- sctx->partial = 0;
- }
-
- blocks = len / sctx->rsiz;
- len %= sctx->rsiz;
-
- while (blocks) {
- int rem;
-
- kernel_neon_begin();
- rem = sha3_ce_transform(sctx->st, data, blocks,
- digest_size);
- kernel_neon_end();
- data += (blocks - rem) * sctx->rsiz;
- blocks = rem;
- }
- }
-
- if (len) {
- memcpy(sctx->buf + sctx->partial, data, len);
- sctx->partial += len;
- }
- return 0;
+ struct crypto_shash *tfm = desc->tfm;
+ unsigned int bs, ds;
+ int blocks;
+
+ ds = crypto_shash_digestsize(tfm);
+ bs = crypto_shash_blocksize(tfm);
+ blocks = len / bs;
+ len -= blocks * bs;
+ do {
+ int rem;
+
+ kernel_neon_begin();
+ rem = sha3_ce_transform(sctx->st, data, blocks, ds);
+ kernel_neon_end();
+ data += (blocks - rem) * bs;
+ blocks = rem;
+ } while (blocks);
+ return len;
}
-static int sha3_final(struct shash_desc *desc, u8 *out)
+static int sha3_finup(struct shash_desc *desc, const u8 *src, unsigned int len,
+ u8 *out)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ struct crypto_shash *tfm = desc->tfm;
__le64 *digest = (__le64 *)out;
+ u8 block[SHA3_224_BLOCK_SIZE];
+ unsigned int bs, ds;
int i;
- if (!crypto_simd_usable())
- return crypto_sha3_final(desc, out);
+ ds = crypto_shash_digestsize(tfm);
+ bs = crypto_shash_blocksize(tfm);
+ memcpy(block, src, len);
- sctx->buf[sctx->partial++] = 0x06;
- memset(sctx->buf + sctx->partial, 0, sctx->rsiz - sctx->partial);
- sctx->buf[sctx->rsiz - 1] |= 0x80;
+ block[len++] = 0x06;
+ memset(block + len, 0, bs - len);
+ block[bs - 1] |= 0x80;
kernel_neon_begin();
- sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size);
+ sha3_ce_transform(sctx->st, block, 1, ds);
kernel_neon_end();
+ memzero_explicit(block , sizeof(block));
- for (i = 0; i < digest_size / 8; i++)
+ for (i = 0; i < ds / 8; i++)
put_unaligned_le64(sctx->st[i], digest++);
- if (digest_size & 4)
+ if (ds & 4)
put_unaligned_le32(sctx->st[i], (__le32 *)digest);
- memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
@@ -110,10 +91,11 @@ static struct shash_alg algs[] = { {
.digestsize = SHA3_224_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-224",
.base.cra_driver_name = "sha3-224-ce",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@@ -121,10 +103,11 @@ static struct shash_alg algs[] = { {
.digestsize = SHA3_256_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-256",
.base.cra_driver_name = "sha3-256-ce",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@@ -132,10 +115,11 @@ static struct shash_alg algs[] = { {
.digestsize = SHA3_384_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-384",
.base.cra_driver_name = "sha3-384-ce",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@@ -143,10 +127,11 @@ static struct shash_alg algs[] = { {
.digestsize = SHA3_512_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-512",
.base.cra_driver_name = "sha3-512-ce",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 071f64293227..6fb3001fa2c9 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -10,14 +10,11 @@
*/
#include <asm/neon.h>
-#include <asm/simd.h>
-#include <linux/unaligned.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <linux/cpufeature.h>
-#include <linux/crypto.h>
+#include <linux/kernel.h>
#include <linux/module.h>
MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions");
@@ -29,12 +26,10 @@ MODULE_ALIAS_CRYPTO("sha512");
asmlinkage int __sha512_ce_transform(struct sha512_state *sst, u8 const *src,
int blocks);
-asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
-
static void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
int blocks)
{
- while (blocks) {
+ do {
int rem;
kernel_neon_begin();
@@ -42,67 +37,47 @@ static void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
kernel_neon_end();
src += (blocks - rem) * SHA512_BLOCK_SIZE;
blocks = rem;
- }
-}
-
-static void sha512_arm64_transform(struct sha512_state *sst, u8 const *src,
- int blocks)
-{
- sha512_block_data_order(sst->state, src, blocks);
+ } while (blocks);
}
static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- sha512_block_fn *fn = crypto_simd_usable() ? sha512_ce_transform
- : sha512_arm64_transform;
-
- sha512_base_do_update(desc, data, len, fn);
- return 0;
+ return sha512_base_do_update_blocks(desc, data, len,
+ sha512_ce_transform);
}
static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- sha512_block_fn *fn = crypto_simd_usable() ? sha512_ce_transform
- : sha512_arm64_transform;
-
- sha512_base_do_update(desc, data, len, fn);
- sha512_base_do_finalize(desc, fn);
- return sha512_base_finish(desc, out);
-}
-
-static int sha512_ce_final(struct shash_desc *desc, u8 *out)
-{
- sha512_block_fn *fn = crypto_simd_usable() ? sha512_ce_transform
- : sha512_arm64_transform;
-
- sha512_base_do_finalize(desc, fn);
+ sha512_base_do_finup(desc, data, len, sha512_ce_transform);
return sha512_base_finish(desc, out);
}
static struct shash_alg algs[] = { {
.init = sha384_base_init,
.update = sha512_ce_update,
- .final = sha512_ce_final,
.finup = sha512_ce_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA384_DIGEST_SIZE,
.base.cra_name = "sha384",
.base.cra_driver_name = "sha384-ce",
.base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.init = sha512_base_init,
.update = sha512_ce_update,
- .final = sha512_ce_final,
.finup = sha512_ce_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA512_DIGEST_SIZE,
.base.cra_name = "sha512",
.base.cra_driver_name = "sha512-ce",
.base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index 62f129dea83d..15aa9d8b7b2c 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -6,11 +6,10 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/types.h>
-#include <linux/string.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
-#include <asm/neon.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash for arm64");
MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
@@ -19,59 +18,53 @@ MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha384");
MODULE_ALIAS_CRYPTO("sha512");
-asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
- unsigned int num_blks);
-EXPORT_SYMBOL(sha512_block_data_order);
+asmlinkage void sha512_blocks_arch(u64 *digest, const void *data,
+ unsigned int num_blks);
static void sha512_arm64_transform(struct sha512_state *sst, u8 const *src,
int blocks)
{
- sha512_block_data_order(sst->state, src, blocks);
+ sha512_blocks_arch(sst->state, src, blocks);
}
static int sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sha512_base_do_update(desc, data, len, sha512_arm64_transform);
+ return sha512_base_do_update_blocks(desc, data, len,
+ sha512_arm64_transform);
}
static int sha512_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (len)
- sha512_base_do_update(desc, data, len, sha512_arm64_transform);
- sha512_base_do_finalize(desc, sha512_arm64_transform);
-
+ sha512_base_do_finup(desc, data, len, sha512_arm64_transform);
return sha512_base_finish(desc, out);
}
-static int sha512_final(struct shash_desc *desc, u8 *out)
-{
- return sha512_finup(desc, NULL, 0, out);
-}
-
static struct shash_alg algs[] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = sha512_update,
- .final = sha512_final,
.finup = sha512_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base.cra_name = "sha512",
.base.cra_driver_name = "sha512-arm64",
.base.cra_priority = 150,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = sha512_update,
- .final = sha512_final,
.finup = sha512_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base.cra_name = "sha384",
.base.cra_driver_name = "sha384-arm64",
.base.cra_priority = 150,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.base.cra_blocksize = SHA384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c
index 1a71788c4cda..eac6f5fa0abe 100644
--- a/arch/arm64/crypto/sm3-ce-glue.c
+++ b/arch/arm64/crypto/sm3-ce-glue.c
@@ -6,14 +6,11 @@
*/
#include <asm/neon.h>
-#include <asm/simd.h>
-#include <linux/unaligned.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
#include <linux/cpufeature.h>
-#include <linux/crypto.h>
+#include <linux/kernel.h>
#include <linux/module.h>
MODULE_DESCRIPTION("SM3 secure hash using ARMv8 Crypto Extensions");
@@ -26,50 +23,20 @@ asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src,
static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- if (!crypto_simd_usable()) {
- sm3_update(shash_desc_ctx(desc), data, len);
- return 0;
- }
+ int remain;
kernel_neon_begin();
- sm3_base_do_update(desc, data, len, sm3_ce_transform);
+ remain = sm3_base_do_update_blocks(desc, data, len, sm3_ce_transform);
kernel_neon_end();
-
- return 0;
-}
-
-static int sm3_ce_final(struct shash_desc *desc, u8 *out)
-{
- if (!crypto_simd_usable()) {
- sm3_final(shash_desc_ctx(desc), out);
- return 0;
- }
-
- kernel_neon_begin();
- sm3_base_do_finalize(desc, sm3_ce_transform);
- kernel_neon_end();
-
- return sm3_base_finish(desc, out);
+ return remain;
}
static int sm3_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!crypto_simd_usable()) {
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- if (len)
- sm3_update(sctx, data, len);
- sm3_final(sctx, out);
- return 0;
- }
-
kernel_neon_begin();
- if (len)
- sm3_base_do_update(desc, data, len, sm3_ce_transform);
- sm3_base_do_finalize(desc, sm3_ce_transform);
+ sm3_base_do_finup(desc, data, len, sm3_ce_transform);
kernel_neon_end();
-
return sm3_base_finish(desc, out);
}
@@ -77,11 +44,12 @@ static struct shash_alg sm3_alg = {
.digestsize = SM3_DIGEST_SIZE,
.init = sm3_base_init,
.update = sm3_ce_update,
- .final = sm3_ce_final,
.finup = sm3_ce_finup,
- .descsize = sizeof(struct sm3_state),
+ .descsize = SM3_STATE_SIZE,
.base.cra_name = "sm3",
.base.cra_driver_name = "sm3-ce",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.base.cra_blocksize = SM3_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 400,
diff --git a/arch/arm64/crypto/sm3-neon-glue.c b/arch/arm64/crypto/sm3-neon-glue.c
index 8dd71ce79b69..6c4611a503a3 100644
--- a/arch/arm64/crypto/sm3-neon-glue.c
+++ b/arch/arm64/crypto/sm3-neon-glue.c
@@ -6,14 +6,11 @@
*/
#include <asm/neon.h>
-#include <asm/simd.h>
-#include <linux/unaligned.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
#include <linux/cpufeature.h>
-#include <linux/crypto.h>
+#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,50 +20,20 @@ asmlinkage void sm3_neon_transform(struct sm3_state *sst, u8 const *src,
static int sm3_neon_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- if (!crypto_simd_usable()) {
- sm3_update(shash_desc_ctx(desc), data, len);
- return 0;
- }
+ int remain;
kernel_neon_begin();
- sm3_base_do_update(desc, data, len, sm3_neon_transform);
+ remain = sm3_base_do_update_blocks(desc, data, len, sm3_neon_transform);
kernel_neon_end();
-
- return 0;
-}
-
-static int sm3_neon_final(struct shash_desc *desc, u8 *out)
-{
- if (!crypto_simd_usable()) {
- sm3_final(shash_desc_ctx(desc), out);
- return 0;
- }
-
- kernel_neon_begin();
- sm3_base_do_finalize(desc, sm3_neon_transform);
- kernel_neon_end();
-
- return sm3_base_finish(desc, out);
+ return remain;
}
static int sm3_neon_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!crypto_simd_usable()) {
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- if (len)
- sm3_update(sctx, data, len);
- sm3_final(sctx, out);
- return 0;
- }
-
kernel_neon_begin();
- if (len)
- sm3_base_do_update(desc, data, len, sm3_neon_transform);
- sm3_base_do_finalize(desc, sm3_neon_transform);
+ sm3_base_do_finup(desc, data, len, sm3_neon_transform);
kernel_neon_end();
-
return sm3_base_finish(desc, out);
}
@@ -74,11 +41,12 @@ static struct shash_alg sm3_alg = {
.digestsize = SM3_DIGEST_SIZE,
.init = sm3_base_init,
.update = sm3_neon_update,
- .final = sm3_neon_final,
.finup = sm3_neon_finup,
- .descsize = sizeof(struct sm3_state),
+ .descsize = SM3_STATE_SIZE,
.base.cra_name = "sm3",
.base.cra_driver_name = "sm3-neon",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.base.cra_blocksize = SM3_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c
index 43741bed874e..7a60e7b559dc 100644
--- a/arch/arm64/crypto/sm4-ce-glue.c
+++ b/arch/arm64/crypto/sm4-ce-glue.c
@@ -8,19 +8,18 @@
* Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <linux/cpufeature.h>
#include <asm/neon.h>
-#include <asm/simd.h>
#include <crypto/b128ops.h>
-#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
-#include <crypto/xts.h>
#include <crypto/sm4.h>
+#include <crypto/utils.h>
+#include <crypto/xts.h>
+#include <linux/cpufeature.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
#define BYTES2BLKS(nbytes) ((nbytes) >> 4)
@@ -64,7 +63,6 @@ struct sm4_mac_tfm_ctx {
};
struct sm4_mac_desc_ctx {
- unsigned int len;
u8 digest[SM4_BLOCK_SIZE];
};
@@ -591,8 +589,6 @@ static int sm4_mac_init(struct shash_desc *desc)
struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
memset(ctx->digest, 0, SM4_BLOCK_SIZE);
- ctx->len = 0;
-
return 0;
}
@@ -601,87 +597,50 @@ static int sm4_mac_update(struct shash_desc *desc, const u8 *p,
{
struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
- unsigned int l, nblocks;
-
- if (len == 0)
- return 0;
-
- if (ctx->len || ctx->len + len < SM4_BLOCK_SIZE) {
- l = min(len, SM4_BLOCK_SIZE - ctx->len);
-
- crypto_xor(ctx->digest + ctx->len, p, l);
- ctx->len += l;
- len -= l;
- p += l;
- }
-
- if (len && (ctx->len % SM4_BLOCK_SIZE) == 0) {
- kernel_neon_begin();
-
- if (len < SM4_BLOCK_SIZE && ctx->len == SM4_BLOCK_SIZE) {
- sm4_ce_crypt_block(tctx->key.rkey_enc,
- ctx->digest, ctx->digest);
- ctx->len = 0;
- } else {
- nblocks = len / SM4_BLOCK_SIZE;
- len %= SM4_BLOCK_SIZE;
+ unsigned int nblocks = len / SM4_BLOCK_SIZE;
- sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, p,
- nblocks, (ctx->len == SM4_BLOCK_SIZE),
- (len != 0));
-
- p += nblocks * SM4_BLOCK_SIZE;
-
- if (len == 0)
- ctx->len = SM4_BLOCK_SIZE;
- }
-
- kernel_neon_end();
-
- if (len) {
- crypto_xor(ctx->digest, p, len);
- ctx->len = len;
- }
- }
-
- return 0;
+ len %= SM4_BLOCK_SIZE;
+ kernel_neon_begin();
+ sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, p,
+ nblocks, false, true);
+ kernel_neon_end();
+ return len;
}
-static int sm4_cmac_final(struct shash_desc *desc, u8 *out)
+static int sm4_cmac_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
const u8 *consts = tctx->consts;
- if (ctx->len != SM4_BLOCK_SIZE) {
- ctx->digest[ctx->len] ^= 0x80;
+ crypto_xor(ctx->digest, src, len);
+ if (len != SM4_BLOCK_SIZE) {
+ ctx->digest[len] ^= 0x80;
consts += SM4_BLOCK_SIZE;
}
-
kernel_neon_begin();
sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, consts, 1,
false, true);
kernel_neon_end();
-
memcpy(out, ctx->digest, SM4_BLOCK_SIZE);
-
return 0;
}
-static int sm4_cbcmac_final(struct shash_desc *desc, u8 *out)
+static int sm4_cbcmac_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
- if (ctx->len) {
+ if (len) {
+ crypto_xor(ctx->digest, src, len);
kernel_neon_begin();
sm4_ce_crypt_block(tctx->key.rkey_enc, ctx->digest,
ctx->digest);
kernel_neon_end();
}
-
memcpy(out, ctx->digest, SM4_BLOCK_SIZE);
-
return 0;
}
@@ -691,6 +650,8 @@ static struct shash_alg sm4_mac_algs[] = {
.cra_name = "cmac(sm4)",
.cra_driver_name = "cmac-sm4-ce",
.cra_priority = 400,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx)
+ SM4_BLOCK_SIZE * 2,
@@ -699,7 +660,7 @@ static struct shash_alg sm4_mac_algs[] = {
.digestsize = SM4_BLOCK_SIZE,
.init = sm4_mac_init,
.update = sm4_mac_update,
- .final = sm4_cmac_final,
+ .finup = sm4_cmac_finup,
.setkey = sm4_cmac_setkey,
.descsize = sizeof(struct sm4_mac_desc_ctx),
}, {
@@ -707,6 +668,8 @@ static struct shash_alg sm4_mac_algs[] = {
.cra_name = "xcbc(sm4)",
.cra_driver_name = "xcbc-sm4-ce",
.cra_priority = 400,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx)
+ SM4_BLOCK_SIZE * 2,
@@ -715,7 +678,7 @@ static struct shash_alg sm4_mac_algs[] = {
.digestsize = SM4_BLOCK_SIZE,
.init = sm4_mac_init,
.update = sm4_mac_update,
- .final = sm4_cmac_final,
+ .finup = sm4_cmac_finup,
.setkey = sm4_xcbc_setkey,
.descsize = sizeof(struct sm4_mac_desc_ctx),
}, {
@@ -723,14 +686,15 @@ static struct shash_alg sm4_mac_algs[] = {
.cra_name = "cbcmac(sm4)",
.cra_driver_name = "cbcmac-sm4-ce",
.cra_priority = 400,
- .cra_blocksize = 1,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
+ .cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx),
.cra_module = THIS_MODULE,
},
.digestsize = SM4_BLOCK_SIZE,
.init = sm4_mac_init,
.update = sm4_mac_update,
- .final = sm4_cbcmac_final,
+ .finup = sm4_cbcmac_finup,
.setkey = sm4_cbcmac_setkey,
.descsize = sizeof(struct sm4_mac_desc_ctx),
}
diff --git a/arch/arm64/hyperv/mshyperv.c b/arch/arm64/hyperv/mshyperv.c
index 4e27cc29c79e..4fdc26ade1d7 100644
--- a/arch/arm64/hyperv/mshyperv.c
+++ b/arch/arm64/hyperv/mshyperv.c
@@ -28,6 +28,48 @@ int hv_get_hypervisor_version(union hv_hypervisor_version_info *info)
}
EXPORT_SYMBOL_GPL(hv_get_hypervisor_version);
+#ifdef CONFIG_ACPI
+
+static bool __init hyperv_detect_via_acpi(void)
+{
+ if (acpi_disabled)
+ return false;
+ /*
+ * Hypervisor ID is only available in ACPI v6+, and the
+ * structure layout was extended in v6 to accommodate that
+ * new field.
+ *
+ * At the very minimum, this check makes sure not to read
+ * past the FADT structure.
+ *
+ * It is also needed to catch running in some unknown
+ * non-Hyper-V environment that has ACPI 5.x or less.
+ * In such a case, it can't be Hyper-V.
+ */
+ if (acpi_gbl_FADT.header.revision < 6)
+ return false;
+ return strncmp((char *)&acpi_gbl_FADT.hypervisor_id, "MsHyperV", 8) == 0;
+}
+
+#else
+
+static bool __init hyperv_detect_via_acpi(void)
+{
+ return false;
+}
+
+#endif
+
+static bool __init hyperv_detect_via_smccc(void)
+{
+ uuid_t hyperv_uuid = UUID_INIT(
+ 0x58ba324d, 0x6447, 0x24cd,
+ 0x75, 0x6c, 0xef, 0x8e,
+ 0x24, 0x70, 0x59, 0x16);
+
+ return arm_smccc_hypervisor_has_uuid(&hyperv_uuid);
+}
+
static int __init hyperv_init(void)
{
struct hv_get_vp_registers_output result;
@@ -36,13 +78,11 @@ static int __init hyperv_init(void)
/*
* Allow for a kernel built with CONFIG_HYPERV to be running in
- * a non-Hyper-V environment, including on DT instead of ACPI.
+ * a non-Hyper-V environment.
+ *
* In such cases, do nothing and return success.
*/
- if (acpi_disabled)
- return 0;
-
- if (strncmp((char *)&acpi_gbl_FADT.hypervisor_id, "MsHyperV", 8))
+ if (!hyperv_detect_via_acpi() && !hyperv_detect_via_smccc())
return 0;
/* Setup the guest ID */
@@ -77,6 +117,9 @@ static int __init hyperv_init(void)
if (ms_hyperv.priv_high & HV_ACCESS_PARTITION_ID)
hv_get_partition_id();
+ ms_hyperv.vtl = get_vtl();
+ if (ms_hyperv.vtl > 0) /* non default VTL */
+ pr_info("Linux runs in Hyper-V Virtual Trust Level %d\n", ms_hyperv.vtl);
ms_hyperv_late_init();
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 81e4157f92b7..71493b760b83 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -44,6 +44,7 @@ struct cpuinfo_arm64 {
u64 reg_dczid;
u64 reg_midr;
u64 reg_revidr;
+ u64 reg_aidr;
u64 reg_gmid;
u64 reg_smidr;
u64 reg_mpamidr;
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index d1cc0571798b..661735616787 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -81,6 +81,7 @@
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46
+#define ARM_CPU_PART_CORTEX_X1C 0xD4C
#define ARM_CPU_PART_CORTEX_A520 0xD80
#define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_A715 0xD4D
@@ -133,6 +134,7 @@
#define HISI_CPU_PART_TSV110 0xD01
#define HISI_CPU_PART_HIP09 0xD02
+#define HISI_CPU_PART_HIP12 0xD06
#define APPLE_CPU_PART_M1_ICESTORM 0x022
#define APPLE_CPU_PART_M1_FIRESTORM 0x023
@@ -168,6 +170,7 @@
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
+#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
@@ -220,6 +223,7 @@
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09)
+#define MIDR_HISI_HIP12 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP12)
#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
#define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO)
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index ebceaae3c749..1e7c7475e43f 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -38,7 +38,7 @@
orr x0, x0, #HCR_E2H
.LnVHE_\@:
- msr hcr_el2, x0
+ msr_hcr_el2 x0
isb
.endm
@@ -52,7 +52,7 @@
mrs x0, id_aa64mmfr1_el1
ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
cbz x0, .Lskip_hcrx_\@
- mov_q x0, HCRX_HOST_FLAGS
+ mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
/* Enable GCS if supported */
mrs_s x1, SYS_ID_AA64PFR1_EL1
@@ -204,26 +204,28 @@
orr x0, x0, #(1 << 62)
.Lskip_spe_fgt_\@:
+
+.Lset_debug_fgt_\@:
msr_s SYS_HDFGRTR_EL2, x0
msr_s SYS_HDFGWTR_EL2, x0
mov x0, xzr
mrs x1, id_aa64pfr1_el1
ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
- cbz x1, .Lskip_debug_fgt_\@
+ cbz x1, .Lskip_sme_fgt_\@
/* Disable nVHE traps of TPIDR2 and SMPRI */
- orr x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK
- orr x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK
+ orr x0, x0, #HFGRTR_EL2_nSMPRI_EL1_MASK
+ orr x0, x0, #HFGRTR_EL2_nTPIDR2_EL0_MASK
-.Lskip_debug_fgt_\@:
+.Lskip_sme_fgt_\@:
mrs_s x1, SYS_ID_AA64MMFR3_EL1
ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
cbz x1, .Lskip_pie_fgt_\@
/* Disable trapping of PIR_EL1 / PIRE0_EL1 */
- orr x0, x0, #HFGxTR_EL2_nPIR_EL1
- orr x0, x0, #HFGxTR_EL2_nPIRE0_EL1
+ orr x0, x0, #HFGRTR_EL2_nPIR_EL1
+ orr x0, x0, #HFGRTR_EL2_nPIRE0_EL1
.Lskip_pie_fgt_\@:
mrs_s x1, SYS_ID_AA64MMFR3_EL1
@@ -231,17 +233,19 @@
cbz x1, .Lskip_poe_fgt_\@
/* Disable trapping of POR_EL0 */
- orr x0, x0, #HFGxTR_EL2_nPOR_EL0
+ orr x0, x0, #HFGRTR_EL2_nPOR_EL0
.Lskip_poe_fgt_\@:
/* GCS depends on PIE so we don't check it if PIE is absent */
mrs_s x1, SYS_ID_AA64PFR1_EL1
ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4
- cbz x1, .Lset_fgt_\@
+ cbz x1, .Lskip_gce_fgt_\@
/* Disable traps of access to GCS registers at EL0 and EL1 */
- orr x0, x0, #HFGxTR_EL2_nGCS_EL1_MASK
- orr x0, x0, #HFGxTR_EL2_nGCS_EL0_MASK
+ orr x0, x0, #HFGRTR_EL2_nGCS_EL1_MASK
+ orr x0, x0, #HFGRTR_EL2_nGCS_EL0_MASK
+
+.Lskip_gce_fgt_\@:
.Lset_fgt_\@:
msr_s SYS_HFGRTR_EL2, x0
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index d1b1a33f9a8b..e1deed824464 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -20,7 +20,8 @@
#define ESR_ELx_EC_FP_ASIMD UL(0x07)
#define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */
#define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */
-/* Unallocated EC: 0x0A - 0x0B */
+#define ESR_ELx_EC_OTHER UL(0x0A)
+/* Unallocated EC: 0x0B */
#define ESR_ELx_EC_CP14_64 UL(0x0C)
#define ESR_ELx_EC_BTI UL(0x0D)
#define ESR_ELx_EC_ILL UL(0x0E)
@@ -99,6 +100,8 @@
#define ESR_ELx_AET_CE (UL(6) << ESR_ELx_AET_SHIFT)
/* Shared ISS field definitions for Data/Instruction aborts */
+#define ESR_ELx_VNCR_SHIFT (13)
+#define ESR_ELx_VNCR (UL(1) << ESR_ELx_VNCR_SHIFT)
#define ESR_ELx_SET_SHIFT (11)
#define ESR_ELx_SET_MASK (UL(3) << ESR_ELx_SET_SHIFT)
#define ESR_ELx_FnV_SHIFT (10)
@@ -121,6 +124,15 @@
#define ESR_ELx_FSC_SEA_TTW(n) (0x14 + (n))
#define ESR_ELx_FSC_SECC (0x18)
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
+#define ESR_ELx_FSC_ADDRSZ (0x00)
+
+/*
+ * Annoyingly, the negative levels for Address size faults aren't laid out
+ * contiguously (or in the desired order)
+ */
+#define ESR_ELx_FSC_ADDRSZ_nL(n) ((n) == -1 ? 0x25 : 0x2C)
+#define ESR_ELx_FSC_ADDRSZ_L(n) ((n) < 0 ? ESR_ELx_FSC_ADDRSZ_nL(n) : \
+ (ESR_ELx_FSC_ADDRSZ + (n)))
/* Status codes for individual page table levels */
#define ESR_ELx_FSC_ACCESS_L(n) (ESR_ELx_FSC_ACCESS + (n))
@@ -161,8 +173,6 @@
#define ESR_ELx_Xs_MASK (GENMASK_ULL(4, 0))
/* ISS field definitions for exceptions taken in to Hyp */
-#define ESR_ELx_FSC_ADDRSZ (0x00)
-#define ESR_ELx_FSC_ADDRSZ_L(n) (ESR_ELx_FSC_ADDRSZ + (n))
#define ESR_ELx_CV (UL(1) << 24)
#define ESR_ELx_COND_SHIFT (20)
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
@@ -174,6 +184,13 @@
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
#define ESR_ELx_xVC_IMM_MASK ((UL(1) << 16) - 1)
+/* ISS definitions for LD64B/ST64B/{T,P}SBCSYNC instructions */
+#define ESR_ELx_ISS_OTHER_ST64BV (0)
+#define ESR_ELx_ISS_OTHER_ST64BV0 (1)
+#define ESR_ELx_ISS_OTHER_LDST64B (2)
+#define ESR_ELx_ISS_OTHER_TSBCSYNC (3)
+#define ESR_ELx_ISS_OTHER_PSBCSYNC (4)
+
#define DISR_EL1_IDS (UL(1) << 24)
/*
* DISR_EL1 and ESR_ELx share the bottom 13 bits, but the RES0 bits may mean
@@ -371,12 +388,14 @@
/*
* ISS values for SME traps
*/
+#define ESR_ELx_SME_ISS_SMTC_MASK GENMASK(2, 0)
+#define ESR_ELx_SME_ISS_SMTC(esr) ((esr) & ESR_ELx_SME_ISS_SMTC_MASK)
-#define ESR_ELx_SME_ISS_SME_DISABLED 0
-#define ESR_ELx_SME_ISS_ILL 1
-#define ESR_ELx_SME_ISS_SM_DISABLED 2
-#define ESR_ELx_SME_ISS_ZA_DISABLED 3
-#define ESR_ELx_SME_ISS_ZT_DISABLED 4
+#define ESR_ELx_SME_ISS_SMTC_SME_DISABLED 0
+#define ESR_ELx_SME_ISS_SMTC_ILL 1
+#define ESR_ELx_SME_ISS_SMTC_SM_DISABLED 2
+#define ESR_ELx_SME_ISS_SMTC_ZA_DISABLED 3
+#define ESR_ELx_SME_ISS_SMTC_ZT_DISABLED 4
/* ISS field definitions for MOPS exceptions */
#define ESR_ELx_MOPS_ISS_MEM_INST (UL(1) << 24)
@@ -433,6 +452,11 @@ static inline bool esr_is_cfi_brk(unsigned long esr)
(esr_brk_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE;
}
+static inline bool esr_is_ubsan_brk(unsigned long esr)
+{
+ return (esr_brk_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM;
+}
+
static inline bool esr_fsc_is_translation_fault(unsigned long esr)
{
esr = esr & ESR_ELx_FSC;
@@ -464,6 +488,39 @@ static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
(esr == ESR_ELx_FSC_ACCESS_L(0));
}
+static inline bool esr_fsc_is_addr_sz_fault(unsigned long esr)
+{
+ esr &= ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_ADDRSZ_L(3)) ||
+ (esr == ESR_ELx_FSC_ADDRSZ_L(2)) ||
+ (esr == ESR_ELx_FSC_ADDRSZ_L(1)) ||
+ (esr == ESR_ELx_FSC_ADDRSZ_L(0)) ||
+ (esr == ESR_ELx_FSC_ADDRSZ_L(-1));
+}
+
+static inline bool esr_fsc_is_sea_ttw(unsigned long esr)
+{
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_SEA_TTW(3)) ||
+ (esr == ESR_ELx_FSC_SEA_TTW(2)) ||
+ (esr == ESR_ELx_FSC_SEA_TTW(1)) ||
+ (esr == ESR_ELx_FSC_SEA_TTW(0)) ||
+ (esr == ESR_ELx_FSC_SEA_TTW(-1));
+}
+
+static inline bool esr_fsc_is_secc_ttw(unsigned long esr)
+{
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_SECC_TTW(3)) ||
+ (esr == ESR_ELx_FSC_SECC_TTW(2)) ||
+ (esr == ESR_ELx_FSC_SECC_TTW(1)) ||
+ (esr == ESR_ELx_FSC_SECC_TTW(0)) ||
+ (esr == ESR_ELx_FSC_SECC_TTW(-1));
+}
+
/* Indicate whether ESR.EC==0x1A is for an ERETAx instruction */
static inline bool esr_iss_is_eretax(unsigned long esr)
{
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 87e307804b99..635a43c4ec85 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -48,6 +48,12 @@ enum fixed_addresses {
FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0,
+#ifdef CONFIG_KVM
+ /* One slot per CPU, mapping the guest's VNCR page at EL2. */
+ FIX_VNCR_END,
+ FIX_VNCR = FIX_VNCR_END + NR_CPUS,
+#endif
+
#ifdef CONFIG_ACPI_APEI_GHES
/* Used for GHES mapping from assorted contexts */
FIX_APEI_GHES_IRQ,
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 564bc09b3e06..b8cf0ea43cc0 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -6,6 +6,7 @@
#define __ASM_FP_H
#include <asm/errno.h>
+#include <asm/percpu.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
@@ -76,7 +77,6 @@ extern void fpsimd_load_state(struct user_fpsimd_state *state);
extern void fpsimd_thread_switch(struct task_struct *next);
extern void fpsimd_flush_thread(void);
-extern void fpsimd_signal_preserve_current_state(void);
extern void fpsimd_preserve_current_state(void);
extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
@@ -93,9 +93,12 @@ struct cpu_fp_state {
enum fp_type to_save;
};
+DECLARE_PER_CPU(struct cpu_fp_state, fpsimd_last_state);
+
extern void fpsimd_bind_state_to_cpu(struct cpu_fp_state *fp_state);
extern void fpsimd_flush_task_state(struct task_struct *target);
+extern void fpsimd_save_and_flush_current_state(void);
extern void fpsimd_save_and_flush_cpu_state(void);
static inline bool thread_sm_enabled(struct thread_struct *thread)
@@ -108,6 +111,8 @@ static inline bool thread_za_enabled(struct thread_struct *thread)
return system_supports_sme() && (thread->svcr & SVCR_ZA_MASK);
}
+extern void task_smstop_sm(struct task_struct *task);
+
/* Maximum VL that SVE/SME VL-agnostic software can transparently support */
#define VL_ARCH_MAX 0x100
@@ -195,10 +200,8 @@ struct vl_info {
extern void sve_alloc(struct task_struct *task, bool flush);
extern void fpsimd_release_task(struct task_struct *task);
-extern void fpsimd_sync_to_sve(struct task_struct *task);
-extern void fpsimd_force_sync_to_sve(struct task_struct *task);
-extern void sve_sync_to_fpsimd(struct task_struct *task);
-extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
+extern void fpsimd_sync_from_effective_state(struct task_struct *task);
+extern void fpsimd_sync_to_effective_state_zeropad(struct task_struct *task);
extern int vec_set_vector_length(struct task_struct *task, enum vec_type type,
unsigned long vl, unsigned long flags);
@@ -292,14 +295,29 @@ static inline bool sve_vq_available(unsigned int vq)
return vq_available(ARM64_VEC_SVE, vq);
}
-size_t sve_state_size(struct task_struct const *task);
+static inline size_t __sve_state_size(unsigned int sve_vl, unsigned int sme_vl)
+{
+ unsigned int vl = max(sve_vl, sme_vl);
+ return SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl));
+}
+
+/*
+ * Return how many bytes of memory are required to store the full SVE
+ * state for task, given task's currently configured vector length.
+ */
+static inline size_t sve_state_size(struct task_struct const *task)
+{
+ unsigned int sve_vl = task_get_sve_vl(task);
+ unsigned int sme_vl = task_get_sme_vl(task);
+ return __sve_state_size(sve_vl, sme_vl);
+}
#else /* ! CONFIG_ARM64_SVE */
static inline void sve_alloc(struct task_struct *task, bool flush) { }
static inline void fpsimd_release_task(struct task_struct *task) { }
-static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
-static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
+static inline void fpsimd_sync_from_effective_state(struct task_struct *task) { }
+static inline void fpsimd_sync_to_effective_state_zeropad(struct task_struct *task) { }
static inline int sve_max_virtualisable_vl(void)
{
@@ -333,6 +351,11 @@ static inline void vec_update_vq_map(enum vec_type t) { }
static inline int vec_verify_vq_map(enum vec_type t) { return 0; }
static inline void sve_setup(void) { }
+static inline size_t __sve_state_size(unsigned int sve_vl, unsigned int sme_vl)
+{
+ return 0;
+}
+
static inline size_t sve_state_size(struct task_struct const *task)
{
return 0;
@@ -385,6 +408,16 @@ extern int sme_set_current_vl(unsigned long arg);
extern int sme_get_current_vl(void);
extern void sme_suspend_exit(void);
+static inline size_t __sme_state_size(unsigned int sme_vl)
+{
+ size_t size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(sme_vl));
+
+ if (system_supports_sme2())
+ size += ZT_SIG_REG_SIZE;
+
+ return size;
+}
+
/*
* Return how many bytes of memory are required to store the full SME
* specific state for task, given task's currently configured vector
@@ -392,15 +425,7 @@ extern void sme_suspend_exit(void);
*/
static inline size_t sme_state_size(struct task_struct const *task)
{
- unsigned int vl = task_get_sme_vl(task);
- size_t size;
-
- size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl));
-
- if (system_supports_sme2())
- size += ZT_SIG_REG_SIZE;
-
- return size;
+ return __sme_state_size(task_get_sme_vl(task));
}
#else
@@ -421,6 +446,11 @@ static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
static inline int sme_get_current_vl(void) { return -EINVAL; }
static inline void sme_suspend_exit(void) { }
+static inline size_t __sme_state_size(unsigned int sme_vl)
+{
+ return 0;
+}
+
static inline size_t sme_state_size(struct task_struct const *task)
{
return 0;
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index cbfa7b6f2e09..77d6b8c63d4e 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -41,7 +41,7 @@ do { \
\
___hcr = read_sysreg(hcr_el2); \
if (!(___hcr & HCR_TGE)) { \
- write_sysreg(___hcr | HCR_TGE, hcr_el2); \
+ write_sysreg_hcr(___hcr | HCR_TGE); \
isb(); \
} \
/* \
@@ -82,7 +82,7 @@ do { \
*/ \
barrier(); \
if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
- write_sysreg(___hcr, hcr_el2); \
+ write_sysreg_hcr(___hcr); \
} while (0)
static inline void ack_bad_irq(unsigned int irq)
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 07fbf5bf85a7..2a8155c4a882 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -69,29 +69,38 @@ extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
#include <asm-generic/hugetlb.h>
-#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
-static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end)
+static inline void __flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ unsigned long stride,
+ bool last_level)
{
- unsigned long stride = huge_page_size(hstate_vma(vma));
-
switch (stride) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
- __flush_tlb_range(vma, start, end, PUD_SIZE, false, 1);
+ __flush_tlb_range(vma, start, end, PUD_SIZE, last_level, 1);
break;
#endif
case CONT_PMD_SIZE:
case PMD_SIZE:
- __flush_tlb_range(vma, start, end, PMD_SIZE, false, 2);
+ __flush_tlb_range(vma, start, end, PMD_SIZE, last_level, 2);
break;
case CONT_PTE_SIZE:
- __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 3);
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, 3);
break;
default:
- __flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, TLBI_TTL_UNKNOWN);
}
}
+#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+{
+ unsigned long stride = huge_page_size(hstate_vma(vma));
+
+ __flush_hugetlb_tlb_range(vma, start, end, stride, false);
+}
+
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 39577f1d079a..18c7811774d3 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -706,6 +706,7 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
}
#endif
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
+u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type);
u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
enum aarch64_insn_system_register sysreg);
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 974d72b5905b..1da290aeedce 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -12,67 +12,70 @@
#include <asm/sysreg.h>
#include <asm/types.h>
-/* Hyp Configuration Register (HCR) bits */
-
-#define HCR_TID5 (UL(1) << 58)
-#define HCR_DCT (UL(1) << 57)
-#define HCR_ATA_SHIFT 56
-#define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
-#define HCR_TTLBOS (UL(1) << 55)
-#define HCR_TTLBIS (UL(1) << 54)
-#define HCR_ENSCXT (UL(1) << 53)
-#define HCR_TOCU (UL(1) << 52)
-#define HCR_AMVOFFEN (UL(1) << 51)
-#define HCR_TICAB (UL(1) << 50)
-#define HCR_TID4 (UL(1) << 49)
-#define HCR_FIEN (UL(1) << 47)
-#define HCR_FWB (UL(1) << 46)
-#define HCR_NV2 (UL(1) << 45)
-#define HCR_AT (UL(1) << 44)
-#define HCR_NV1 (UL(1) << 43)
-#define HCR_NV (UL(1) << 42)
-#define HCR_API (UL(1) << 41)
-#define HCR_APK (UL(1) << 40)
-#define HCR_TEA (UL(1) << 37)
-#define HCR_TERR (UL(1) << 36)
-#define HCR_TLOR (UL(1) << 35)
-#define HCR_E2H (UL(1) << 34)
-#define HCR_ID (UL(1) << 33)
-#define HCR_CD (UL(1) << 32)
-#define HCR_RW_SHIFT 31
-#define HCR_RW (UL(1) << HCR_RW_SHIFT)
-#define HCR_TRVM (UL(1) << 30)
-#define HCR_HCD (UL(1) << 29)
-#define HCR_TDZ (UL(1) << 28)
-#define HCR_TGE (UL(1) << 27)
-#define HCR_TVM (UL(1) << 26)
-#define HCR_TTLB (UL(1) << 25)
-#define HCR_TPU (UL(1) << 24)
-#define HCR_TPC (UL(1) << 23) /* HCR_TPCP if FEAT_DPB */
-#define HCR_TSW (UL(1) << 22)
-#define HCR_TACR (UL(1) << 21)
-#define HCR_TIDCP (UL(1) << 20)
-#define HCR_TSC (UL(1) << 19)
-#define HCR_TID3 (UL(1) << 18)
-#define HCR_TID2 (UL(1) << 17)
-#define HCR_TID1 (UL(1) << 16)
-#define HCR_TID0 (UL(1) << 15)
-#define HCR_TWE (UL(1) << 14)
-#define HCR_TWI (UL(1) << 13)
-#define HCR_DC (UL(1) << 12)
-#define HCR_BSU (3 << 10)
-#define HCR_BSU_IS (UL(1) << 10)
-#define HCR_FB (UL(1) << 9)
-#define HCR_VSE (UL(1) << 8)
-#define HCR_VI (UL(1) << 7)
-#define HCR_VF (UL(1) << 6)
-#define HCR_AMO (UL(1) << 5)
-#define HCR_IMO (UL(1) << 4)
-#define HCR_FMO (UL(1) << 3)
-#define HCR_PTW (UL(1) << 2)
-#define HCR_SWIO (UL(1) << 1)
-#define HCR_VM (UL(1) << 0)
-#define HCR_RES0 ((UL(1) << 48) | (UL(1) << 39))
+/*
+ * Because I'm terribly lazy and that repainting the whole of the KVM
+ * code with the proper names is a pain, use a helper to map the names
+ * inherited from AArch32 with the new fancy nomenclature. One day...
+ */
+#define __HCR(x) HCR_EL2_##x
+
+#define HCR_TID5 __HCR(TID5)
+#define HCR_DCT __HCR(DCT)
+#define HCR_ATA_SHIFT __HCR(ATA_SHIFT)
+#define HCR_ATA __HCR(ATA)
+#define HCR_TTLBOS __HCR(TTLBOS)
+#define HCR_TTLBIS __HCR(TTLBIS)
+#define HCR_ENSCXT __HCR(EnSCXT)
+#define HCR_TOCU __HCR(TOCU)
+#define HCR_AMVOFFEN __HCR(AMVOFFEN)
+#define HCR_TICAB __HCR(TICAB)
+#define HCR_TID4 __HCR(TID4)
+#define HCR_FIEN __HCR(FIEN)
+#define HCR_FWB __HCR(FWB)
+#define HCR_NV2 __HCR(NV2)
+#define HCR_AT __HCR(AT)
+#define HCR_NV1 __HCR(NV1)
+#define HCR_NV __HCR(NV)
+#define HCR_API __HCR(API)
+#define HCR_APK __HCR(APK)
+#define HCR_TEA __HCR(TEA)
+#define HCR_TERR __HCR(TERR)
+#define HCR_TLOR __HCR(TLOR)
+#define HCR_E2H __HCR(E2H)
+#define HCR_ID __HCR(ID)
+#define HCR_CD __HCR(CD)
+#define HCR_RW __HCR(RW)
+#define HCR_TRVM __HCR(TRVM)
+#define HCR_HCD __HCR(HCD)
+#define HCR_TDZ __HCR(TDZ)
+#define HCR_TGE __HCR(TGE)
+#define HCR_TVM __HCR(TVM)
+#define HCR_TTLB __HCR(TTLB)
+#define HCR_TPU __HCR(TPU)
+#define HCR_TPC __HCR(TPCP)
+#define HCR_TSW __HCR(TSW)
+#define HCR_TACR __HCR(TACR)
+#define HCR_TIDCP __HCR(TIDCP)
+#define HCR_TSC __HCR(TSC)
+#define HCR_TID3 __HCR(TID3)
+#define HCR_TID2 __HCR(TID2)
+#define HCR_TID1 __HCR(TID1)
+#define HCR_TID0 __HCR(TID0)
+#define HCR_TWE __HCR(TWE)
+#define HCR_TWI __HCR(TWI)
+#define HCR_DC __HCR(DC)
+#define HCR_BSU __HCR(BSU)
+#define HCR_BSU_IS __HCR(BSU_IS)
+#define HCR_FB __HCR(FB)
+#define HCR_VSE __HCR(VSE)
+#define HCR_VI __HCR(VI)
+#define HCR_VF __HCR(VF)
+#define HCR_AMO __HCR(AMO)
+#define HCR_IMO __HCR(IMO)
+#define HCR_FMO __HCR(FMO)
+#define HCR_PTW __HCR(PTW)
+#define HCR_SWIO __HCR(SWIO)
+#define HCR_VM __HCR(VM)
/*
* The bits we set in HCR:
@@ -100,9 +103,8 @@
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
-#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
+#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H | HCR_AMO | HCR_IMO | HCR_FMO)
-#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
#define MPAMHCR_HOST_FLAGS 0
/* TCR_EL2 Registers bits */
@@ -313,56 +315,19 @@
GENMASK(15, 0))
/*
- * FGT register definitions
- *
- * RES0 and polarity masks as of DDI0487J.a, to be updated as needed.
- * We're not using the generated masks as they are usually ahead of
- * the published ARM ARM, which we use as a reference.
- *
- * Once we get to a point where the two describe the same thing, we'll
- * merge the definitions. One day.
- */
-#define __HFGRTR_EL2_RES0 HFGxTR_EL2_RES0
-#define __HFGRTR_EL2_MASK GENMASK(49, 0)
-#define __HFGRTR_EL2_nMASK ~(__HFGRTR_EL2_RES0 | __HFGRTR_EL2_MASK)
-
-/*
- * The HFGWTR bits are a subset of HFGRTR bits. To ensure we don't miss any
- * future additions, define __HFGWTR* macros relative to __HFGRTR* ones.
+ * Polarity masks for HCRX_EL2, limited to the bits that we know about
+ * at this point in time. It doesn't mean that we actually *handle*
+ * them, but that at least those that are not advertised to a guest
+ * will be RES0 for that guest.
*/
-#define __HFGRTR_ONLY_MASK (BIT(46) | BIT(42) | BIT(40) | BIT(28) | \
- GENMASK(26, 25) | BIT(21) | BIT(18) | \
- GENMASK(15, 14) | GENMASK(10, 9) | BIT(2))
-#define __HFGWTR_EL2_RES0 (__HFGRTR_EL2_RES0 | __HFGRTR_ONLY_MASK)
-#define __HFGWTR_EL2_MASK (__HFGRTR_EL2_MASK & ~__HFGRTR_ONLY_MASK)
-#define __HFGWTR_EL2_nMASK ~(__HFGWTR_EL2_RES0 | __HFGWTR_EL2_MASK)
-
-#define __HFGITR_EL2_RES0 HFGITR_EL2_RES0
-#define __HFGITR_EL2_MASK (BIT(62) | BIT(60) | GENMASK(54, 0))
-#define __HFGITR_EL2_nMASK ~(__HFGITR_EL2_RES0 | __HFGITR_EL2_MASK)
-
-#define __HDFGRTR_EL2_RES0 HDFGRTR_EL2_RES0
-#define __HDFGRTR_EL2_MASK (BIT(63) | GENMASK(58, 50) | GENMASK(48, 43) | \
- GENMASK(41, 40) | GENMASK(37, 22) | \
- GENMASK(19, 9) | GENMASK(7, 0))
-#define __HDFGRTR_EL2_nMASK ~(__HDFGRTR_EL2_RES0 | __HDFGRTR_EL2_MASK)
-
-#define __HDFGWTR_EL2_RES0 HDFGWTR_EL2_RES0
-#define __HDFGWTR_EL2_MASK (GENMASK(57, 52) | GENMASK(50, 48) | \
- GENMASK(46, 44) | GENMASK(42, 41) | \
- GENMASK(37, 35) | GENMASK(33, 31) | \
- GENMASK(29, 23) | GENMASK(21, 10) | \
- GENMASK(8, 7) | GENMASK(5, 0))
-#define __HDFGWTR_EL2_nMASK ~(__HDFGWTR_EL2_RES0 | __HDFGWTR_EL2_MASK)
-
-#define __HAFGRTR_EL2_RES0 HAFGRTR_EL2_RES0
-#define __HAFGRTR_EL2_MASK (GENMASK(49, 17) | GENMASK(4, 0))
-#define __HAFGRTR_EL2_nMASK ~(__HAFGRTR_EL2_RES0 | __HAFGRTR_EL2_MASK)
-
-/* Similar definitions for HCRX_EL2 */
-#define __HCRX_EL2_RES0 HCRX_EL2_RES0
-#define __HCRX_EL2_MASK (BIT(6))
-#define __HCRX_EL2_nMASK ~(__HCRX_EL2_RES0 | __HCRX_EL2_MASK)
+#define __HCRX_EL2_MASK (BIT_ULL(6))
+#define __HCRX_EL2_nMASK (GENMASK_ULL(24, 14) | \
+ GENMASK_ULL(11, 7) | \
+ GENMASK_ULL(5, 0))
+#define __HCRX_EL2_RES0 ~(__HCRX_EL2_nMASK | __HCRX_EL2_MASK)
+#define __HCRX_EL2_RES1 ~(__HCRX_EL2_nMASK | \
+ __HCRX_EL2_MASK | \
+ __HCRX_EL2_RES0)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~UL(0xf))
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index d7cf66573aca..bd020fc28aa9 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -305,7 +305,12 @@ static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vc
static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
{
- return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
+ u64 hpfar = vcpu->arch.fault.hpfar_el2;
+
+ if (unlikely(!(hpfar & HPFAR_EL2_NS)))
+ return INVALID_GPA;
+
+ return FIELD_GET(HPFAR_EL2_FIPA, hpfar) << 12;
}
static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e98cfe7855a6..6ce2c5173482 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -39,7 +39,7 @@
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
-#define KVM_VCPU_MAX_FEATURES 7
+#define KVM_VCPU_MAX_FEATURES 9
#define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
#define KVM_REQ_SLEEP \
@@ -53,6 +53,7 @@
#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
#define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8)
#define KVM_REQ_GUEST_HYP_IRQ_PENDING KVM_ARCH_REQ(9)
+#define KVM_REQ_MAP_L1_VNCR_EL2 KVM_ARCH_REQ(10)
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)
@@ -273,11 +274,17 @@ struct kvm_sysreg_masks;
enum fgt_group_id {
__NO_FGT_GROUP__,
- HFGxTR_GROUP,
+ HFGRTR_GROUP,
+ HFGWTR_GROUP = HFGRTR_GROUP,
HDFGRTR_GROUP,
HDFGWTR_GROUP = HDFGRTR_GROUP,
HFGITR_GROUP,
HAFGRTR_GROUP,
+ HFGRTR2_GROUP,
+ HFGWTR2_GROUP = HFGRTR2_GROUP,
+ HDFGRTR2_GROUP,
+ HDFGWTR2_GROUP = HDFGRTR2_GROUP,
+ HFGITR2_GROUP,
/* Must be last */
__NR_FGT_GROUP_IDS__
@@ -359,8 +366,8 @@ struct kvm_arch {
cpumask_var_t supported_cpus;
- /* PMCR_EL0.N value for the guest */
- u8 pmcr_n;
+ /* Maximum number of counters for the guest */
+ u8 nr_pmu_counters;
/* Iterator for idreg debugfs */
u8 idreg_debugfs_iter;
@@ -389,6 +396,9 @@ struct kvm_arch {
/* Masks for VNCR-backed and general EL2 sysregs */
struct kvm_sysreg_masks *sysreg_masks;
+ /* Count the number of VNCR_EL2 currently mapped */
+ atomic_t vncr_map_count;
+
/*
* For an untrusted host VM, 'pkvm.handle' is used to lookup
* the associated pKVM instance in the hypervisor.
@@ -561,6 +571,13 @@ enum vcpu_sysreg {
VNCR(HDFGRTR_EL2),
VNCR(HDFGWTR_EL2),
VNCR(HAFGRTR_EL2),
+ VNCR(HFGRTR2_EL2),
+ VNCR(HFGWTR2_EL2),
+ VNCR(HFGITR2_EL2),
+ VNCR(HDFGRTR2_EL2),
+ VNCR(HDFGWTR2_EL2),
+
+ VNCR(VNCR_EL2),
VNCR(CNTVOFF_EL2),
VNCR(CNTV_CVAL_EL0),
@@ -606,6 +623,37 @@ struct kvm_sysreg_masks {
} mask[NR_SYS_REGS - __SANITISED_REG_START__];
};
+struct fgt_masks {
+ const char *str;
+ u64 mask;
+ u64 nmask;
+ u64 res0;
+};
+
+extern struct fgt_masks hfgrtr_masks;
+extern struct fgt_masks hfgwtr_masks;
+extern struct fgt_masks hfgitr_masks;
+extern struct fgt_masks hdfgrtr_masks;
+extern struct fgt_masks hdfgwtr_masks;
+extern struct fgt_masks hafgrtr_masks;
+extern struct fgt_masks hfgrtr2_masks;
+extern struct fgt_masks hfgwtr2_masks;
+extern struct fgt_masks hfgitr2_masks;
+extern struct fgt_masks hdfgrtr2_masks;
+extern struct fgt_masks hdfgwtr2_masks;
+
+extern struct fgt_masks kvm_nvhe_sym(hfgrtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgwtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgitr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hdfgrtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hdfgwtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hafgrtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgrtr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgwtr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgitr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(hdfgrtr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(hdfgwtr2_masks);
+
struct kvm_cpu_context {
struct user_pt_regs regs; /* sp = sp_el0 */
@@ -654,6 +702,8 @@ struct kvm_host_data {
#define KVM_HOST_DATA_FLAG_HAS_TRBE 1
#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4
#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5
+#define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 6
+#define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 7
unsigned long flags;
struct kvm_cpu_context host_ctxt;
@@ -730,6 +780,8 @@ struct vcpu_reset_state {
bool reset;
};
+struct vncr_tlb;
+
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
@@ -824,6 +876,9 @@ struct kvm_vcpu_arch {
/* Per-vcpu CCSIDR override or NULL */
u32 *ccsidr;
+
+ /* Per-vcpu TLB for VNCR_EL2 -- NULL when !NV */
+ struct vncr_tlb *vncr_tlb;
};
/*
@@ -971,20 +1026,22 @@ struct kvm_vcpu_arch {
#define vcpu_sve_zcr_elx(vcpu) \
(unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
-#define vcpu_sve_state_size(vcpu) ({ \
+#define sve_state_size_from_vl(sve_max_vl) ({ \
size_t __size_ret; \
- unsigned int __vcpu_vq; \
+ unsigned int __vq; \
\
- if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
+ if (WARN_ON(!sve_vl_valid(sve_max_vl))) { \
__size_ret = 0; \
} else { \
- __vcpu_vq = vcpu_sve_max_vq(vcpu); \
- __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
+ __vq = sve_vq_from_vl(sve_max_vl); \
+ __size_ret = SVE_SIG_REGS_SIZE(__vq); \
} \
\
__size_ret; \
})
+#define vcpu_sve_state_size(vcpu) sve_state_size_from_vl((vcpu)->arch.sve_max_vl)
+
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
KVM_GUESTDBG_USE_SW_BP | \
KVM_GUESTDBG_USE_HW | \
@@ -1263,9 +1320,6 @@ int __init populate_sysreg_config(const struct sys_reg_desc *sr,
unsigned int idx);
int __init populate_nv_trap_config(void);
-bool lock_all_vcpus(struct kvm *kvm);
-void unlock_all_vcpus(struct kvm *kvm);
-
void kvm_calculate_traps(struct kvm_vcpu *vcpu);
/* MMIO helpers */
@@ -1550,12 +1604,16 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
kvm_cmp_feat_signed(kvm, id, fld, op, limit) : \
kvm_cmp_feat_unsigned(kvm, id, fld, op, limit))
-#define kvm_has_feat(kvm, id, fld, limit) \
+#define __kvm_has_feat(kvm, id, fld, limit) \
kvm_cmp_feat(kvm, id, fld, >=, limit)
-#define kvm_has_feat_enum(kvm, id, fld, val) \
+#define kvm_has_feat(kvm, ...) __kvm_has_feat(kvm, __VA_ARGS__)
+
+#define __kvm_has_feat_enum(kvm, id, fld, val) \
kvm_cmp_feat_unsigned(kvm, id, fld, ==, val)
+#define kvm_has_feat_enum(kvm, ...) __kvm_has_feat_enum(kvm, __VA_ARGS__)
+
#define kvm_has_feat_range(kvm, id, fld, min, max) \
(kvm_cmp_feat(kvm, id, fld, >=, min) && \
kvm_cmp_feat(kvm, id, fld, <=, max))
@@ -1588,4 +1646,14 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
#define kvm_has_s1poe(k) \
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
+static inline bool kvm_arch_has_irq_bypass(void)
+{
+ return true;
+}
+
+void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt);
+void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1);
+void check_feature_map(void);
+
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 692f403c1896..0bd07ea068a1 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -231,6 +231,38 @@ static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
shift; \
})
+static inline u64 decode_range_tlbi(u64 val, u64 *range, u16 *asid)
+{
+ u64 base, tg, num, scale;
+ int shift;
+
+ tg = FIELD_GET(GENMASK(47, 46), val);
+
+ switch(tg) {
+ case 1:
+ shift = 12;
+ break;
+ case 2:
+ shift = 14;
+ break;
+ case 3:
+ default: /* IMPDEF: handle tg==0 as 64k */
+ shift = 16;
+ break;
+ }
+
+ base = (val & GENMASK(36, 0)) << shift;
+
+ if (asid)
+ *asid = FIELD_GET(TLBIR_ASID_MASK, val);
+
+ scale = FIELD_GET(GENMASK(45, 44), val);
+ num = FIELD_GET(GENMASK(43, 39), val);
+ *range = __TLBI_RANGE_PAGES(num, scale) << shift;
+
+ return base;
+}
+
static inline unsigned int ps_to_output_size(unsigned int ps)
{
switch (ps) {
@@ -245,4 +277,72 @@ static inline unsigned int ps_to_output_size(unsigned int ps)
}
}
+enum trans_regime {
+ TR_EL10,
+ TR_EL20,
+ TR_EL2,
+};
+
+struct s1_walk_info {
+ u64 baddr;
+ enum trans_regime regime;
+ unsigned int max_oa_bits;
+ unsigned int pgshift;
+ unsigned int txsz;
+ int sl;
+ bool as_el0;
+ bool hpd;
+ bool e0poe;
+ bool poe;
+ bool pan;
+ bool be;
+ bool s2;
+};
+
+struct s1_walk_result {
+ union {
+ struct {
+ u64 desc;
+ u64 pa;
+ s8 level;
+ u8 APTable;
+ bool nG;
+ u16 asid;
+ bool UXNTable;
+ bool PXNTable;
+ bool uwxn;
+ bool uov;
+ bool ur;
+ bool uw;
+ bool ux;
+ bool pwxn;
+ bool pov;
+ bool pr;
+ bool pw;
+ bool px;
+ };
+ struct {
+ u8 fst;
+ bool ptw;
+ bool s2;
+ };
+ };
+ bool failed;
+};
+
+int __kvm_translate_va(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
+ struct s1_walk_result *wr, u64 va);
+
+/* VNCR management */
+int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu);
+int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu);
+void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val);
+
+#define vncr_fixmap(c) \
+ ({ \
+ u32 __c = (c); \
+ BUG_ON(__c >= NR_CPUS); \
+ (FIX_VNCR - __c); \
+ })
+
#endif /* __ARM64_KVM_NESTED_H */
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 6b9d274052c7..2888b5d03757 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -59,6 +59,11 @@ typedef u64 kvm_pte_t;
#define KVM_PHYS_INVALID (-1ULL)
+#define KVM_PTE_TYPE BIT(1)
+#define KVM_PTE_TYPE_BLOCK 0
+#define KVM_PTE_TYPE_PAGE 1
+#define KVM_PTE_TYPE_TABLE 1
+
#define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
@@ -413,7 +418,7 @@ static inline bool kvm_pgtable_walk_lock_held(void)
*/
struct kvm_pgtable {
union {
- struct rb_root pkvm_mappings;
+ struct rb_root_cached pkvm_mappings;
struct {
u32 ia_bits;
s8 start_level;
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
index abd693ce5b93..ea58282f59bb 100644
--- a/arch/arm64/include/asm/kvm_pkvm.h
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -135,6 +135,12 @@ static inline unsigned long host_s2_pgtable_pages(void)
return res;
}
+#ifdef CONFIG_NVHE_EL2_DEBUG
+static inline unsigned long pkvm_selftest_pages(void) { return 32; }
+#else
+static inline unsigned long pkvm_selftest_pages(void) { return 0; }
+#endif
+
#define KVM_FFA_MBOX_NR_PAGES 1
static inline unsigned long hyp_ffa_proxy_pages(void)
@@ -167,6 +173,8 @@ struct pkvm_mapping {
struct rb_node node;
u64 gfn;
u64 pfn;
+ u64 nr_pages;
+ u64 __subtree_last; /* Internal member for interval tree */
};
int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
diff --git a/arch/arm64/include/asm/kvm_ras.h b/arch/arm64/include/asm/kvm_ras.h
index 87e10d9a635b..9398ade632aa 100644
--- a/arch/arm64/include/asm/kvm_ras.h
+++ b/arch/arm64/include/asm/kvm_ras.h
@@ -14,7 +14,7 @@
* Was this synchronous external abort a RAS notification?
* Returns '0' for errors handled by some RAS subsystem, or -ENOENT.
*/
-static inline int kvm_handle_guest_sea(phys_addr_t addr, u64 esr)
+static inline int kvm_handle_guest_sea(void)
{
/* apei_claim_sea(NULL) expects to mask interrupts itself */
lockdep_assert_irqs_enabled();
diff --git a/arch/arm64/include/asm/mem_encrypt.h b/arch/arm64/include/asm/mem_encrypt.h
index a2a1eeb36d4b..314b2b52025f 100644
--- a/arch/arm64/include/asm/mem_encrypt.h
+++ b/arch/arm64/include/asm/mem_encrypt.h
@@ -4,6 +4,8 @@
#include <asm/rsi.h>
+struct device;
+
struct arm64_mem_crypt_ops {
int (*encrypt)(unsigned long addr, int numpages);
int (*decrypt)(unsigned long addr, int numpages);
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 30a29e88994b..6e8aa8e72601 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -94,17 +94,6 @@ static inline bool kaslr_requires_kpti(void)
return false;
}
- /*
- * Systems affected by Cavium erratum 24756 are incompatible
- * with KPTI.
- */
- if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
- extern const struct midr_range cavium_erratum_27456_cpus[];
-
- if (is_midr_in_range_list(cavium_erratum_27456_cpus))
- return false;
- }
-
return true;
}
diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h
index 6d6d4065b0cb..265e8301d7ba 100644
--- a/arch/arm64/include/asm/pgtable-types.h
+++ b/arch/arm64/include/asm/pgtable-types.h
@@ -11,11 +11,19 @@
#include <asm/types.h>
-typedef u64 pteval_t;
-typedef u64 pmdval_t;
-typedef u64 pudval_t;
-typedef u64 p4dval_t;
-typedef u64 pgdval_t;
+/*
+ * Page Table Descriptor
+ *
+ * Generic page table descriptor format from which
+ * all level specific descriptors can be derived.
+ */
+typedef u64 ptdesc_t;
+
+typedef ptdesc_t pteval_t;
+typedef ptdesc_t pmdval_t;
+typedef ptdesc_t pudval_t;
+typedef ptdesc_t p4dval_t;
+typedef ptdesc_t pgdval_t;
/*
* These are used to make use of C type-checking..
@@ -46,7 +54,7 @@ typedef struct { pgdval_t pgd; } pgd_t;
#define pgd_val(x) ((x).pgd)
#define __pgd(x) ((pgd_t) { (x) } )
-typedef struct { pteval_t pgprot; } pgprot_t;
+typedef struct { ptdesc_t pgprot; } pgprot_t;
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index d3b538be1500..88db8a0c0b37 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -40,6 +40,85 @@
#include <linux/sched.h>
#include <linux/page_table_check.h>
+static inline void emit_pte_barriers(void)
+{
+ /*
+ * These barriers are emitted under certain conditions after a pte entry
+ * was modified (see e.g. __set_pte_complete()). The dsb makes the store
+ * visible to the table walker. The isb ensures that any previous
+ * speculative "invalid translation" marker that is in the CPU's
+ * pipeline gets cleared, so that any access to that address after
+ * setting the pte to valid won't cause a spurious fault. If the thread
+ * gets preempted after storing to the pgtable but before emitting these
+ * barriers, __switch_to() emits a dsb which ensure the walker gets to
+ * see the store. There is no guarantee of an isb being issued though.
+ * This is safe because it will still get issued (albeit on a
+ * potentially different CPU) when the thread starts running again,
+ * before any access to the address.
+ */
+ dsb(ishst);
+ isb();
+}
+
+static inline void queue_pte_barriers(void)
+{
+ unsigned long flags;
+
+ if (in_interrupt()) {
+ emit_pte_barriers();
+ return;
+ }
+
+ flags = read_thread_flags();
+
+ if (flags & BIT(TIF_LAZY_MMU)) {
+ /* Avoid the atomic op if already set. */
+ if (!(flags & BIT(TIF_LAZY_MMU_PENDING)))
+ set_thread_flag(TIF_LAZY_MMU_PENDING);
+ } else {
+ emit_pte_barriers();
+ }
+}
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+static inline void arch_enter_lazy_mmu_mode(void)
+{
+ /*
+ * lazy_mmu_mode is not supposed to permit nesting. But in practice this
+ * does happen with CONFIG_DEBUG_PAGEALLOC, where a page allocation
+ * inside a lazy_mmu_mode section (such as zap_pte_range()) will change
+ * permissions on the linear map with apply_to_page_range(), which
+ * re-enters lazy_mmu_mode. So we tolerate nesting in our
+ * implementation. The first call to arch_leave_lazy_mmu_mode() will
+ * flush and clear the flag such that the remainder of the work in the
+ * outer nest behaves as if outside of lazy mmu mode. This is safe and
+ * keeps tracking simple.
+ */
+
+ if (in_interrupt())
+ return;
+
+ set_thread_flag(TIF_LAZY_MMU);
+}
+
+static inline void arch_flush_lazy_mmu_mode(void)
+{
+ if (in_interrupt())
+ return;
+
+ if (test_and_clear_thread_flag(TIF_LAZY_MMU_PENDING))
+ emit_pte_barriers();
+}
+
+static inline void arch_leave_lazy_mmu_mode(void)
+{
+ if (in_interrupt())
+ return;
+
+ arch_flush_lazy_mmu_mode();
+ clear_thread_flag(TIF_LAZY_MMU);
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
@@ -320,18 +399,20 @@ static inline void __set_pte_nosync(pte_t *ptep, pte_t pte)
WRITE_ONCE(*ptep, pte);
}
-static inline void __set_pte(pte_t *ptep, pte_t pte)
+static inline void __set_pte_complete(pte_t pte)
{
- __set_pte_nosync(ptep, pte);
-
/*
* Only if the new pte is valid and kernel, otherwise TLB maintenance
- * or update_mmu_cache() have the necessary barriers.
+ * has the necessary barriers.
*/
- if (pte_valid_not_user(pte)) {
- dsb(ishst);
- isb();
- }
+ if (pte_valid_not_user(pte))
+ queue_pte_barriers();
+}
+
+static inline void __set_pte(pte_t *ptep, pte_t pte)
+{
+ __set_pte_nosync(ptep, pte);
+ __set_pte_complete(pte);
}
static inline pte_t __ptep_get(pte_t *ptep)
@@ -423,23 +504,6 @@ static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte));
}
-static inline void __set_ptes(struct mm_struct *mm,
- unsigned long __always_unused addr,
- pte_t *ptep, pte_t pte, unsigned int nr)
-{
- page_table_check_ptes_set(mm, ptep, pte, nr);
- __sync_cache_and_tags(pte, nr);
-
- for (;;) {
- __check_safe_pte_update(mm, ptep, pte);
- __set_pte(ptep, pte);
- if (--nr == 0)
- break;
- ptep++;
- pte = pte_advance_pfn(pte, 1);
- }
-}
-
/*
* Hugetlb definitions.
*/
@@ -609,7 +673,6 @@ static inline pmd_t pmd_mkspecial(pmd_t pmd)
#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
#define pud_young(pud) pte_young(pud_pte(pud))
#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
@@ -649,30 +712,64 @@ static inline pgprot_t pud_pgprot(pud_t pud)
return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud));
}
-static inline void __set_pte_at(struct mm_struct *mm,
- unsigned long __always_unused addr,
- pte_t *ptep, pte_t pte, unsigned int nr)
+static inline void __set_ptes_anysz(struct mm_struct *mm, pte_t *ptep,
+ pte_t pte, unsigned int nr,
+ unsigned long pgsize)
+{
+ unsigned long stride = pgsize >> PAGE_SHIFT;
+
+ switch (pgsize) {
+ case PAGE_SIZE:
+ page_table_check_ptes_set(mm, ptep, pte, nr);
+ break;
+ case PMD_SIZE:
+ page_table_check_pmds_set(mm, (pmd_t *)ptep, pte_pmd(pte), nr);
+ break;
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SIZE:
+ page_table_check_puds_set(mm, (pud_t *)ptep, pte_pud(pte), nr);
+ break;
+#endif
+ default:
+ VM_WARN_ON(1);
+ }
+
+ __sync_cache_and_tags(pte, nr * stride);
+
+ for (;;) {
+ __check_safe_pte_update(mm, ptep, pte);
+ __set_pte_nosync(ptep, pte);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte = pte_advance_pfn(pte, stride);
+ }
+
+ __set_pte_complete(pte);
+}
+
+static inline void __set_ptes(struct mm_struct *mm,
+ unsigned long __always_unused addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
{
- __sync_cache_and_tags(pte, nr);
- __check_safe_pte_update(mm, ptep, pte);
- __set_pte(ptep, pte);
+ __set_ptes_anysz(mm, ptep, pte, nr, PAGE_SIZE);
}
-static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp, pmd_t pmd)
+static inline void __set_pmds(struct mm_struct *mm,
+ unsigned long __always_unused addr,
+ pmd_t *pmdp, pmd_t pmd, unsigned int nr)
{
- page_table_check_pmd_set(mm, pmdp, pmd);
- return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd),
- PMD_SIZE >> PAGE_SHIFT);
+ __set_ptes_anysz(mm, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE);
}
+#define set_pmd_at(mm, addr, pmdp, pmd) __set_pmds(mm, addr, pmdp, pmd, 1)
-static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
- pud_t *pudp, pud_t pud)
+static inline void __set_puds(struct mm_struct *mm,
+ unsigned long __always_unused addr,
+ pud_t *pudp, pud_t pud, unsigned int nr)
{
- page_table_check_pud_set(mm, pudp, pud);
- return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud),
- PUD_SIZE >> PAGE_SHIFT);
+ __set_ptes_anysz(mm, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE);
}
+#define set_pud_at(mm, addr, pudp, pud) __set_puds(mm, addr, pudp, pud, 1)
#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
@@ -739,8 +836,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
* If pmd is present-invalid, pmd_table() won't detect it
* as a table, so force the valid bit for the comparison.
*/
- return pmd_val(pmd) && pmd_present(pmd) &&
- !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID));
+ return pmd_present(pmd) && !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -754,8 +850,6 @@ static inline bool pud_table(pud_t pud) { return true; }
PUD_TYPE_TABLE)
#endif
-extern pgd_t init_pg_dir[];
-extern pgd_t init_pg_end[];
extern pgd_t swapper_pg_dir[];
extern pgd_t idmap_pg_dir[];
extern pgd_t tramp_pg_dir[];
@@ -780,10 +874,8 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
WRITE_ONCE(*pmdp, pmd);
- if (pmd_valid(pmd)) {
- dsb(ishst);
- isb();
- }
+ if (pmd_valid(pmd))
+ queue_pte_barriers();
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -813,12 +905,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
/* use ONLY for statically allocated translation tables */
#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
-
#if CONFIG_PGTABLE_LEVELS > 2
#define pmd_ERROR(e) \
@@ -848,10 +934,8 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
WRITE_ONCE(*pudp, pud);
- if (pud_valid(pud)) {
- dsb(ishst);
- isb();
- }
+ if (pud_valid(pud))
+ queue_pte_barriers();
}
static inline void pud_clear(pud_t *pudp)
@@ -930,8 +1014,7 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
}
WRITE_ONCE(*p4dp, p4d);
- dsb(ishst);
- isb();
+ queue_pte_barriers();
}
static inline void p4d_clear(p4d_t *p4dp)
@@ -1059,8 +1142,7 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
}
WRITE_ONCE(*pgdp, pgd);
- dsb(ishst);
- isb();
+ queue_pte_barriers();
}
static inline void pgd_clear(pgd_t *pgdp)
@@ -1301,16 +1383,37 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
-static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
- unsigned long address, pte_t *ptep)
+static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm,
+ pte_t *ptep,
+ unsigned long pgsize)
{
pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
- page_table_check_pte_clear(mm, pte);
+ switch (pgsize) {
+ case PAGE_SIZE:
+ page_table_check_pte_clear(mm, pte);
+ break;
+ case PMD_SIZE:
+ page_table_check_pmd_clear(mm, pte_pmd(pte));
+ break;
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SIZE:
+ page_table_check_pud_clear(mm, pte_pud(pte));
+ break;
+#endif
+ default:
+ VM_WARN_ON(1);
+ }
return pte;
}
+static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ return __ptep_get_and_clear_anysz(mm, ptep, PAGE_SIZE);
+}
+
static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned int nr, int full)
{
@@ -1347,11 +1450,7 @@ static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm,
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
- pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0));
-
- page_table_check_pmd_clear(mm, pmd);
-
- return pmd;
+ return pte_pmd(__ptep_get_and_clear_anysz(mm, (pte_t *)pmdp, PMD_SIZE));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index b2931d1ae0fb..fded5358641f 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -24,8 +24,8 @@ struct ptdump_info {
};
struct ptdump_prot_bits {
- u64 mask;
- u64 val;
+ ptdesc_t mask;
+ ptdesc_t val;
const char *set;
const char *clear;
};
@@ -34,7 +34,7 @@ struct ptdump_pg_level {
const struct ptdump_prot_bits *bits;
char name[4];
int num;
- u64 mask;
+ ptdesc_t mask;
};
/*
@@ -51,7 +51,7 @@ struct ptdump_pg_state {
const struct mm_struct *mm;
unsigned long start_address;
int level;
- u64 current_prot;
+ ptdesc_t current_prot;
bool check_wx;
unsigned long wx_pages;
unsigned long uxn_pages;
@@ -59,7 +59,13 @@ struct ptdump_pg_state {
void ptdump_walk(struct seq_file *s, struct ptdump_info *info);
void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
- u64 val);
+ pteval_t val);
+void note_page_pte(struct ptdump_state *st, unsigned long addr, pte_t pte);
+void note_page_pmd(struct ptdump_state *st, unsigned long addr, pmd_t pmd);
+void note_page_pud(struct ptdump_state *st, unsigned long addr, pud_t pud);
+void note_page_p4d(struct ptdump_state *st, unsigned long addr, p4d_t p4d);
+void note_page_pgd(struct ptdump_state *st, unsigned long addr, pgd_t pgd);
+void note_page_flush(struct ptdump_state *st);
#ifdef CONFIG_PTDUMP_DEBUGFS
#define EFI_RUNTIME_MAP_END DEFAULT_MAP_WINDOW_64
void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name);
@@ -69,7 +75,13 @@ static inline void ptdump_debugfs_register(struct ptdump_info *info,
#endif /* CONFIG_PTDUMP_DEBUGFS */
#else
static inline void note_page(struct ptdump_state *pt_st, unsigned long addr,
- int level, u64 val) { }
+ int level, pteval_t val) { }
+static inline void note_page_pte(struct ptdump_state *st, unsigned long addr, pte_t pte) { }
+static inline void note_page_pmd(struct ptdump_state *st, unsigned long addr, pmd_t pmd) { }
+static inline void note_page_pud(struct ptdump_state *st, unsigned long addr, pud_t pud) { }
+static inline void note_page_p4d(struct ptdump_state *st, unsigned long addr, p4d_t p4d) { }
+static inline void note_page_pgd(struct ptdump_state *st, unsigned long addr, pgd_t pgd) { }
+static inline void note_page_flush(struct ptdump_state *st) { }
#endif /* CONFIG_PTDUMP */
#endif /* __ASM_PTDUMP_H */
diff --git a/arch/arm64/include/asm/rqspinlock.h b/arch/arm64/include/asm/rqspinlock.h
index 5b80785324b6..9ea0a74e5892 100644
--- a/arch/arm64/include/asm/rqspinlock.h
+++ b/arch/arm64/include/asm/rqspinlock.h
@@ -86,7 +86,7 @@
#endif
-#define res_smp_cond_load_acquire_timewait(v, c) smp_cond_load_acquire_timewait(v, c, 0, 1)
+#define res_smp_cond_load_acquire(v, c) smp_cond_load_acquire_timewait(v, c, 0, 1)
#include <asm-generic/rqspinlock.h>
diff --git a/arch/arm64/include/asm/rsi_cmds.h b/arch/arm64/include/asm/rsi_cmds.h
index e6a211001bd3..2c8763876dfb 100644
--- a/arch/arm64/include/asm/rsi_cmds.h
+++ b/arch/arm64/include/asm/rsi_cmds.h
@@ -7,6 +7,8 @@
#define __ASM_RSI_CMDS_H
#include <linux/arm-smccc.h>
+#include <linux/string.h>
+#include <asm/memory.h>
#include <asm/rsi_smc.h>
diff --git a/arch/arm64/include/asm/rwonce.h b/arch/arm64/include/asm/rwonce.h
index 56f7b1d4d54b..97d9256d33c9 100644
--- a/arch/arm64/include/asm/rwonce.h
+++ b/arch/arm64/include/asm/rwonce.h
@@ -12,16 +12,12 @@
#ifndef BUILD_VDSO
-#ifdef CONFIG_AS_HAS_LDAPR
#define __LOAD_RCPC(sfx, regs...) \
ALTERNATIVE( \
"ldar" #sfx "\t" #regs, \
".arch_extension rcpc\n" \
"ldapr" #sfx "\t" #regs, \
ARM64_HAS_LDAPR)
-#else
-#define __LOAD_RCPC(sfx, regs...) "ldar" #sfx "\t" #regs
-#endif /* CONFIG_AS_HAS_LDAPR */
/*
* When building with LTO, there is an increased risk of the compiler
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 40971ac1303f..51b0d594239e 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -11,6 +11,7 @@ extern char __alt_instructions[], __alt_instructions_end[];
extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
extern char __hyp_text_start[], __hyp_text_end[];
+extern char __hyp_data_start[], __hyp_data_end[];
extern char __hyp_rodata_start[], __hyp_rodata_end[];
extern char __hyp_reloc_begin[], __hyp_reloc_end[];
extern char __hyp_bss_start[], __hyp_bss_end[];
diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h
index f1524cdeacf1..8fef12626090 100644
--- a/arch/arm64/include/asm/spectre.h
+++ b/arch/arm64/include/asm/spectre.h
@@ -97,6 +97,9 @@ enum mitigation_state arm64_get_meltdown_state(void);
enum mitigation_state arm64_get_spectre_bhb_state(void);
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
+extern bool __nospectre_bhb;
+u8 get_spectre_bhb_loop_value(void);
+bool is_spectre_bhb_fw_mitigated(void);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index ab8e14b96f68..712daa90e643 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -61,6 +61,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
regs->regs[0] = val;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->syscallno = nr;
+ if (nr == -1) {
+ /*
+ * When the syscall number is set to -1, the syscall will be
+ * skipped. In this case the syscall return value has to be
+ * set explicitly, otherwise the first syscall argument is
+ * returned as the syscall return value.
+ */
+ syscall_set_return_value(task, regs, -ENOSYS, 0);
+ }
+}
+
#define SYSCALL_MAX_ARGS 6
static inline void syscall_get_arguments(struct task_struct *task,
@@ -73,6 +89,19 @@ static inline void syscall_get_arguments(struct task_struct *task,
memcpy(args, &regs->regs[1], 5 * sizeof(args[0]));
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ memcpy(&regs->regs[0], args, 6 * sizeof(args[0]));
+ /*
+ * Also copy the first argument into orig_x0
+ * so that syscall_get_arguments() would return it
+ * instead of the previous value.
+ */
+ regs->orig_x0 = regs->regs[0];
+}
+
/*
* We don't care about endianness (__AUDIT_ARCH_LE bit) here because
* AArch64 has the same system calls both on little- and big- endian.
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 2639d3633073..f1bb0d10c39a 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -12,6 +12,7 @@
#include <linux/bits.h>
#include <linux/stringify.h>
#include <linux/kasan-tags.h>
+#include <linux/kconfig.h>
#include <asm/gpr-num.h>
@@ -117,6 +118,7 @@
#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
+/* Data cache zero operations */
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
@@ -153,11 +155,13 @@
#define SYS_DC_CIGVAC sys_insn(1, 3, 7, 14, 3)
#define SYS_DC_CIGDVAC sys_insn(1, 3, 7, 14, 5)
-/* Data cache zero operations */
#define SYS_DC_ZVA sys_insn(1, 3, 7, 4, 1)
#define SYS_DC_GVA sys_insn(1, 3, 7, 4, 3)
#define SYS_DC_GZVA sys_insn(1, 3, 7, 4, 4)
+#define SYS_DC_CIVAPS sys_insn(1, 0, 7, 15, 1)
+#define SYS_DC_CIGDVAPS sys_insn(1, 0, 7, 15, 5)
+
/*
* Automatically generated definitions for system registers, the
* manual encodings below are in the process of being converted to
@@ -497,12 +501,22 @@
#define __PMEV_op2(n) ((n) & 0x7)
#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
+#define SYS_PMEVCNTSVRn_EL1(n) sys_reg(2, 0, 14, __CNTR_CRm(n), __PMEV_op2(n))
#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n))
#define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3))
#define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n))
#define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7)
+#define SYS_SPMCGCRn_EL1(n) sys_reg(2, 0, 9, 13, ((n) & 1))
+
+#define __SPMEV_op2(n) ((n) & 0x7)
+#define __SPMEV_crm(p, n) ((((p) & 7) << 1) | (((n) >> 3) & 1))
+#define SYS_SPMEVCNTRn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b000, n), __SPMEV_op2(n))
+#define SYS_SPMEVFILT2Rn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b011, n), __SPMEV_op2(n))
+#define SYS_SPMEVFILTRn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b010, n), __SPMEV_op2(n))
+#define SYS_SPMEVTYPERn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b001, n), __SPMEV_op2(n))
+
#define SYS_VPIDR_EL2 sys_reg(3, 4, 0, 0, 0)
#define SYS_VMPIDR_EL2 sys_reg(3, 4, 0, 0, 5)
@@ -521,7 +535,6 @@
#define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0)
#define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2)
-#define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0)
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
@@ -608,28 +621,18 @@
/* VHE encodings for architectural EL0/1 system registers */
#define SYS_BRBCR_EL12 sys_reg(2, 5, 9, 0, 0)
-#define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0)
-#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2)
-#define SYS_SCTLR2_EL12 sys_reg(3, 5, 1, 0, 3)
-#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0)
-#define SYS_TRFCR_EL12 sys_reg(3, 5, 1, 2, 1)
-#define SYS_SMCR_EL12 sys_reg(3, 5, 1, 2, 6)
#define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0)
#define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1)
-#define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2)
-#define SYS_TCR2_EL12 sys_reg(3, 5, 2, 0, 3)
#define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0)
#define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1)
#define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0)
#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1)
#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0)
#define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0)
-#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0)
#define SYS_PMSCR_EL12 sys_reg(3, 5, 9, 9, 0)
#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0)
#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0)
#define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0)
-#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1)
#define SYS_SCXTNUM_EL12 sys_reg(3, 5, 13, 0, 7)
#define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0)
#define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0)
@@ -1091,6 +1094,15 @@
__emit_inst(0xd5000000|(\sreg)|(.L__gpr_num_\rt))
.endm
+ .macro msr_hcr_el2, reg
+#if IS_ENABLED(CONFIG_AMPERE_ERRATUM_AC04_CPU_23)
+ dsb nsh
+ msr hcr_el2, \reg
+ isb
+#else
+ msr hcr_el2, \reg
+#endif
+ .endm
#else
#include <linux/bitfield.h>
@@ -1178,6 +1190,13 @@
write_sysreg(__scs_new, sysreg); \
} while (0)
+#define sysreg_clear_set_hcr(clear, set) do { \
+ u64 __scs_val = read_sysreg(hcr_el2); \
+ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
+ if (__scs_new != __scs_val) \
+ write_sysreg_hcr(__scs_new); \
+} while (0)
+
#define sysreg_clear_set_s(sysreg, clear, set) do { \
u64 __scs_val = read_sysreg_s(sysreg); \
u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
@@ -1185,6 +1204,17 @@
write_sysreg_s(__scs_new, sysreg); \
} while (0)
+#define write_sysreg_hcr(__val) do { \
+ if (IS_ENABLED(CONFIG_AMPERE_ERRATUM_AC04_CPU_23) && \
+ (!system_capabilities_finalized() || \
+ alternative_has_cap_unlikely(ARM64_WORKAROUND_AMPERE_AC04_CPU_23))) \
+ asm volatile("dsb nsh; msr hcr_el2, %x0; isb" \
+ : : "rZ" (__val)); \
+ else \
+ asm volatile("msr hcr_el2, %x0" \
+ : : "rZ" (__val)); \
+} while (0)
+
#define read_sysreg_par() ({ \
u64 par; \
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 1114c1c3300a..1269c2487574 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -59,11 +59,12 @@ void arch_setup_new_exec(void);
#define TIF_SIGPENDING 0 /* signal pending */
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
-#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
-#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
-#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
-#define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */
-#define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */
+#define TIF_NEED_RESCHED_LAZY 2 /* Lazy rescheduling needed */
+#define TIF_NOTIFY_RESUME 3 /* callback before returning to user */
+#define TIF_FOREIGN_FPSTATE 4 /* CPU's FP state is not current's */
+#define TIF_UPROBE 5 /* uprobe breakpoint or singlestep */
+#define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */
+#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
@@ -82,9 +83,12 @@ void arch_setup_new_exec(void);
#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */
#define TIF_KERNEL_FPSTATE 29 /* Task is in a kernel mode FPSIMD section */
#define TIF_TSC_SIGSEGV 30 /* SIGSEGV on counter-timer access */
+#define TIF_LAZY_MMU 31 /* Task in lazy mmu mode */
+#define TIF_LAZY_MMU_PENDING 32 /* Ops pending for lazy mmu mode exit */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
@@ -100,10 +104,10 @@ void arch_setup_new_exec(void);
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_TSC_SIGSEGV (1 << TIF_TSC_SIGSEGV)
-#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
_TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \
- _TIF_NOTIFY_SIGNAL)
+ _TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h
index 92a2b59a9f3d..da1ab8759592 100644
--- a/arch/arm64/include/asm/vdso/gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/gettimeofday.h
@@ -8,6 +8,7 @@
#ifndef __ASSEMBLY__
#include <asm/alternative.h>
+#include <asm/arch_timer.h>
#include <asm/barrier.h>
#include <asm/unistd.h>
#include <asm/sysreg.h>
@@ -69,8 +70,6 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
const struct vdso_time_data *vd)
{
- u64 res;
-
/*
* Core checks for mode already, so this raced against a concurrent
* update. Return something. Core will do another round and then
@@ -79,25 +78,21 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
if (clock_mode == VDSO_CLOCKMODE_NONE)
return 0;
- /*
- * If FEAT_ECV is available, use the self-synchronizing counter.
- * Otherwise the isb is required to prevent that the counter value
- * is speculated.
- */
- asm volatile(
- ALTERNATIVE("isb\n"
- "mrs %0, cntvct_el0",
- "nop\n"
- __mrs_s("%0", SYS_CNTVCTSS_EL0),
- ARM64_HAS_ECV)
- : "=r" (res)
- :
- : "memory");
+ return __arch_counter_get_cntvct();
+}
+
+#if IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB)
+static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
+{
+ const struct vdso_time_data *ret = &vdso_u_time_data;
- arch_counter_enforce_ordering(res);
+ /* Work around invalid absolute relocations */
+ OPTIMIZER_HIDE_VAR(ret);
- return res;
+ return ret;
}
+#define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data
+#endif /* IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB) */
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index ebf4a9f943ed..aa280f356b96 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -67,7 +67,8 @@
* __boot_cpu_mode records what mode CPUs were booted in.
* A correctly-implemented bootloader must start all CPUs in the same mode:
* In this case, both 32bit halves of __boot_cpu_mode will contain the
- * same value (either 0 if booted in EL1, BOOT_CPU_MODE_EL2 if booted in EL2).
+ * same value (either BOOT_CPU_MODE_EL1 if booted in EL1, BOOT_CPU_MODE_EL2 if
+ * booted in EL2).
*
* Should the bootloader fail to do this, the two values will be different.
* This allows the kernel to flag an error when the secondaries have come up.
diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h
index 38fafffe699f..12f534e8f3ed 100644
--- a/arch/arm64/include/asm/vmalloc.h
+++ b/arch/arm64/include/asm/vmalloc.h
@@ -23,6 +23,51 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot)
return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
}
+#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr,
+ unsigned long end, u64 pfn,
+ unsigned int max_page_shift)
+{
+ /*
+ * If the block is at least CONT_PTE_SIZE in size, and is naturally
+ * aligned in both virtual and physical space, then we can pte-map the
+ * block using the PTE_CONT bit for more efficient use of the TLB.
+ */
+ if (max_page_shift < CONT_PTE_SHIFT)
+ return PAGE_SIZE;
+
+ if (end - addr < CONT_PTE_SIZE)
+ return PAGE_SIZE;
+
+ if (!IS_ALIGNED(addr, CONT_PTE_SIZE))
+ return PAGE_SIZE;
+
+ if (!IS_ALIGNED(PFN_PHYS(pfn), CONT_PTE_SIZE))
+ return PAGE_SIZE;
+
+ return CONT_PTE_SIZE;
+}
+
+#define arch_vmap_pte_range_unmap_size arch_vmap_pte_range_unmap_size
+static inline unsigned long arch_vmap_pte_range_unmap_size(unsigned long addr,
+ pte_t *ptep)
+{
+ /*
+ * The caller handles alignment so it's sufficient just to check
+ * PTE_CONT.
+ */
+ return pte_valid_cont(__ptep_get(ptep)) ? CONT_PTE_SIZE : PAGE_SIZE;
+}
+
+#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ if (size >= CONT_PTE_SIZE)
+ return CONT_PTE_SHIFT;
+
+ return PAGE_SHIFT;
+}
+
#endif
#define arch_vmap_pgprot_tagged arch_vmap_pgprot_tagged
diff --git a/arch/arm64/include/asm/vncr_mapping.h b/arch/arm64/include/asm/vncr_mapping.h
index 4f9bbd4d6c26..6f556e993644 100644
--- a/arch/arm64/include/asm/vncr_mapping.h
+++ b/arch/arm64/include/asm/vncr_mapping.h
@@ -35,6 +35,8 @@
#define VNCR_CNTP_CTL_EL0 0x180
#define VNCR_SCXTNUM_EL1 0x188
#define VNCR_TFSR_EL1 0x190
+#define VNCR_HDFGRTR2_EL2 0x1A0
+#define VNCR_HDFGWTR2_EL2 0x1B0
#define VNCR_HFGRTR_EL2 0x1B8
#define VNCR_HFGWTR_EL2 0x1C0
#define VNCR_HFGITR_EL2 0x1C8
@@ -52,6 +54,9 @@
#define VNCR_PIRE0_EL1 0x290
#define VNCR_PIR_EL1 0x2A0
#define VNCR_POR_EL1 0x2A8
+#define VNCR_HFGRTR2_EL2 0x2C0
+#define VNCR_HFGWTR2_EL2 0x2C8
+#define VNCR_HFGITR2_EL2 0x310
#define VNCR_ICH_LR0_EL2 0x400
#define VNCR_ICH_LR1_EL2 0x408
#define VNCR_ICH_LR2_EL2 0x410
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index af9d9acaf997..ed5f3892674c 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -431,10 +431,11 @@ enum {
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
-#define KVM_ARM_VCPU_PMU_V3_IRQ 0
-#define KVM_ARM_VCPU_PMU_V3_INIT 1
-#define KVM_ARM_VCPU_PMU_V3_FILTER 2
-#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3
+#define KVM_ARM_VCPU_PMU_V3_IRQ 0
+#define KVM_ARM_VCPU_PMU_V3_INIT 1
+#define KVM_ARM_VCPU_PMU_V3_FILTER 2
+#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3
+#define KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS 4
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index eb1a840e4110..30d4bbe68661 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -182,5 +182,7 @@ int main(void)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
DEFINE(FTRACE_OPS_DIRECT_CALL, offsetof(struct ftrace_ops, direct_call));
#endif
+ DEFINE(PIE_E0_ASM, PIE_E0);
+ DEFINE(PIE_E1_ASM, PIE_E1);
return 0;
}
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index b55f5f705750..59d723c9ab8f 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -335,7 +335,7 @@ static const struct midr_range cavium_erratum_23154_cpus[] = {
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_27456
-const struct midr_range cavium_erratum_27456_cpus[] = {
+static const struct midr_range cavium_erratum_27456_cpus[] = {
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
/* Cavium ThunderX, T81 pass 1.0 */
@@ -557,6 +557,13 @@ static const struct midr_range erratum_ac03_cpu_38_list[] = {
};
#endif
+#ifdef CONFIG_AMPERE_ERRATUM_AC04_CPU_23
+static const struct midr_range erratum_ac04_cpu_23_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
+ {},
+};
+#endif
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
@@ -876,6 +883,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
},
#endif
+#ifdef CONFIG_AMPERE_ERRATUM_AC04_CPU_23
+ {
+ .desc = "AmpereOne erratum AC04_CPU_23",
+ .capability = ARM64_WORKAROUND_AMPERE_AC04_CPU_23,
+ ERRATA_MIDR_RANGE_LIST(erratum_ac04_cpu_23_list),
+ },
+#endif
{
.desc = "Broken CNTVOFF_EL2",
.capability = ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF,
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 9c4d6d552b25..45ea79cacf46 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -114,7 +114,14 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NC
DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
-bool arm64_use_ng_mappings = false;
+/*
+ * arm64_use_ng_mappings must be placed in the .data section, otherwise it
+ * ends up in the .bss section where it is initialized in early_map_kernel()
+ * after the MMU (with the idmap) was enabled. create_init_idmap() - which
+ * runs before early_map_kernel() and reads the variable via PTE_MAYBE_NG -
+ * may end up generating an incorrect idmap page table attributes.
+ */
+bool arm64_use_ng_mappings __read_mostly = false;
EXPORT_SYMBOL(arm64_use_ng_mappings);
DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
@@ -298,6 +305,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_GCS),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_GCS_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_frac_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0),
@@ -758,17 +766,17 @@ static const struct arm64_ftr_bits ftr_raz[] = {
#define ARM64_FTR_REG(id, table) \
__ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)
-struct arm64_ftr_override id_aa64mmfr0_override;
-struct arm64_ftr_override id_aa64mmfr1_override;
-struct arm64_ftr_override id_aa64mmfr2_override;
-struct arm64_ftr_override id_aa64pfr0_override;
-struct arm64_ftr_override id_aa64pfr1_override;
-struct arm64_ftr_override id_aa64zfr0_override;
-struct arm64_ftr_override id_aa64smfr0_override;
-struct arm64_ftr_override id_aa64isar1_override;
-struct arm64_ftr_override id_aa64isar2_override;
+struct arm64_ftr_override __read_mostly id_aa64mmfr0_override;
+struct arm64_ftr_override __read_mostly id_aa64mmfr1_override;
+struct arm64_ftr_override __read_mostly id_aa64mmfr2_override;
+struct arm64_ftr_override __read_mostly id_aa64pfr0_override;
+struct arm64_ftr_override __read_mostly id_aa64pfr1_override;
+struct arm64_ftr_override __read_mostly id_aa64zfr0_override;
+struct arm64_ftr_override __read_mostly id_aa64smfr0_override;
+struct arm64_ftr_override __read_mostly id_aa64isar1_override;
+struct arm64_ftr_override __read_mostly id_aa64isar2_override;
-struct arm64_ftr_override arm64_sw_feature_override;
+struct arm64_ftr_override __read_mostly arm64_sw_feature_override;
static const struct __ftr_reg_entry {
u32 sys_id;
@@ -1403,6 +1411,8 @@ void update_cpu_features(int cpu,
info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR3_EL1, cpu,
info->reg_id_aa64mmfr3, boot->reg_id_aa64mmfr3);
+ taint |= check_update_ftr_reg(SYS_ID_AA64MMFR4_EL1, cpu,
+ info->reg_id_aa64mmfr4, boot->reg_id_aa64mmfr4);
taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
@@ -2876,6 +2886,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, FGT, IMP)
},
+ {
+ .desc = "Fine Grained Traps 2",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_HAS_FGT2,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, FGT, FGT2)
+ },
#ifdef CONFIG_ARM64_SME
{
.desc = "Scalable Matrix Extension",
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 285d7d538342..94525abd1c22 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -209,80 +209,79 @@ static const char *const compat_hwcap2_str[] = {
static int c_show(struct seq_file *m, void *v)
{
- int i, j;
+ int j;
+ int cpu = m->index;
bool compat = personality(current->personality) == PER_LINUX32;
+ struct cpuinfo_arm64 *cpuinfo = v;
+ u32 midr = cpuinfo->reg_midr;
- for_each_online_cpu(i) {
- struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
- u32 midr = cpuinfo->reg_midr;
-
- /*
- * glibc reads /proc/cpuinfo to determine the number of
- * online processors, looking for lines beginning with
- * "processor". Give glibc what it expects.
- */
- seq_printf(m, "processor\t: %d\n", i);
- if (compat)
- seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
- MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
+ /*
+ * glibc reads /proc/cpuinfo to determine the number of
+ * online processors, looking for lines beginning with
+ * "processor". Give glibc what it expects.
+ */
+ seq_printf(m, "processor\t: %d\n", cpu);
+ if (compat)
+ seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
+ MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
- seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
- loops_per_jiffy / (500000UL/HZ),
- loops_per_jiffy / (5000UL/HZ) % 100);
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ loops_per_jiffy / (500000UL/HZ),
+ loops_per_jiffy / (5000UL/HZ) % 100);
- /*
- * Dump out the common processor features in a single line.
- * Userspace should read the hwcaps with getauxval(AT_HWCAP)
- * rather than attempting to parse this, but there's a body of
- * software which does already (at least for 32-bit).
- */
- seq_puts(m, "Features\t:");
- if (compat) {
+ /*
+ * Dump out the common processor features in a single line.
+ * Userspace should read the hwcaps with getauxval(AT_HWCAP)
+ * rather than attempting to parse this, but there's a body of
+ * software which does already (at least for 32-bit).
+ */
+ seq_puts(m, "Features\t:");
+ if (compat) {
#ifdef CONFIG_COMPAT
- for (j = 0; j < ARRAY_SIZE(compat_hwcap_str); j++) {
- if (compat_elf_hwcap & (1 << j)) {
- /*
- * Warn once if any feature should not
- * have been present on arm64 platform.
- */
- if (WARN_ON_ONCE(!compat_hwcap_str[j]))
- continue;
-
- seq_printf(m, " %s", compat_hwcap_str[j]);
- }
+ for (j = 0; j < ARRAY_SIZE(compat_hwcap_str); j++) {
+ if (compat_elf_hwcap & (1 << j)) {
+ /*
+ * Warn once if any feature should not
+ * have been present on arm64 platform.
+ */
+ if (WARN_ON_ONCE(!compat_hwcap_str[j]))
+ continue;
+
+ seq_printf(m, " %s", compat_hwcap_str[j]);
}
+ }
- for (j = 0; j < ARRAY_SIZE(compat_hwcap2_str); j++)
- if (compat_elf_hwcap2 & (1 << j))
- seq_printf(m, " %s", compat_hwcap2_str[j]);
+ for (j = 0; j < ARRAY_SIZE(compat_hwcap2_str); j++)
+ if (compat_elf_hwcap2 & (1 << j))
+ seq_printf(m, " %s", compat_hwcap2_str[j]);
#endif /* CONFIG_COMPAT */
- } else {
- for (j = 0; j < ARRAY_SIZE(hwcap_str); j++)
- if (cpu_have_feature(j))
- seq_printf(m, " %s", hwcap_str[j]);
- }
- seq_puts(m, "\n");
-
- seq_printf(m, "CPU implementer\t: 0x%02x\n",
- MIDR_IMPLEMENTOR(midr));
- seq_printf(m, "CPU architecture: 8\n");
- seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
- seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
- seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
+ } else {
+ for (j = 0; j < ARRAY_SIZE(hwcap_str); j++)
+ if (cpu_have_feature(j))
+ seq_printf(m, " %s", hwcap_str[j]);
}
+ seq_puts(m, "\n");
+
+ seq_printf(m, "CPU implementer\t: 0x%02x\n",
+ MIDR_IMPLEMENTOR(midr));
+ seq_puts(m, "CPU architecture: 8\n");
+ seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
+ seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
+ seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
- return *pos < 1 ? (void *)1 : NULL;
+ *pos = cpumask_next(*pos - 1, cpu_online_mask);
+ return *pos < nr_cpu_ids ? &per_cpu(cpu_data, *pos) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
- return NULL;
+ return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
@@ -328,11 +327,13 @@ static const struct kobj_type cpuregs_kobj_type = {
CPUREGS_ATTR_RO(midr_el1, midr);
CPUREGS_ATTR_RO(revidr_el1, revidr);
+CPUREGS_ATTR_RO(aidr_el1, aidr);
CPUREGS_ATTR_RO(smidr_el1, smidr);
static struct attribute *cpuregs_id_attrs[] = {
&cpuregs_attr_midr_el1.attr,
&cpuregs_attr_revidr_el1.attr,
+ &cpuregs_attr_aidr_el1.attr,
NULL
};
@@ -469,6 +470,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_dczid = read_cpuid(DCZID_EL0);
info->reg_midr = read_cpuid_id();
info->reg_revidr = read_cpuid(REVIDR_EL1);
+ info->reg_aidr = read_cpuid(AIDR_EL1);
info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S
index 11d7f7de202d..329e8df9215f 100644
--- a/arch/arm64/kernel/efi-header.S
+++ b/arch/arm64/kernel/efi-header.S
@@ -28,7 +28,7 @@
.macro __EFI_PE_HEADER
#ifdef CONFIG_EFI
.set .Lpe_header_offset, . - .L_head
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
.short IMAGE_FILE_MACHINE_ARM64 // Machine
.short .Lsection_count // NumberOfSections
.long 0 // TimeDateStamp
@@ -40,7 +40,7 @@
IMAGE_FILE_LINE_NUMS_STRIPPED // Characteristics
.Loptional_header:
- .short PE_OPT_MAGIC_PE32PLUS // PE32+ format
+ .short IMAGE_NT_OPTIONAL_HDR64_MAGIC // PE32+ format
.byte 0x02 // MajorLinkerVersion
.byte 0x14 // MinorLinkerVersion
.long __initdata_begin - .Lefi_header_end // SizeOfCode
@@ -66,7 +66,7 @@
.long .Lefi_header_end - .L_head // SizeOfHeaders
.long 0 // CheckSum
.short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem
- .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT // DllCharacteristics
+ .short IMAGE_DLLCHARACTERISTICS_NX_COMPAT // DllCharacteristics
.quad 0 // SizeOfStackReserve
.quad 0 // SizeOfStackCommit
.quad 0 // SizeOfHeapReserve
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 1d25d8899dbf..3857fd7ee8d4 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -29,7 +29,7 @@ static bool region_is_misaligned(const efi_memory_desc_t *md)
* executable, everything else can be mapped with the XN bits
* set. Also take the new (optional) RO/XP bits into account.
*/
-static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
+static __init ptdesc_t create_mapping_protection(efi_memory_desc_t *md)
{
u64 attr = md->attribute;
u32 type = md->type;
@@ -83,7 +83,7 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
{
- pteval_t prot_val = create_mapping_protection(md);
+ ptdesc_t prot_val = create_mapping_protection(md);
bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_RUNTIME_SERVICES_DATA);
@@ -169,14 +169,14 @@ static DEFINE_RAW_SPINLOCK(efi_rt_lock);
void arch_efi_call_virt_setup(void)
{
efi_virtmap_load();
- __efi_fpsimd_begin();
raw_spin_lock(&efi_rt_lock);
+ __efi_fpsimd_begin();
}
void arch_efi_call_virt_teardown(void)
{
- raw_spin_unlock(&efi_rt_lock);
__efi_fpsimd_end();
+ raw_spin_unlock(&efi_rt_lock);
efi_virtmap_unload();
}
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index b260ddc4d3e9..7c1970b341b8 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -132,7 +132,7 @@ static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
do {
local_irq_enable();
- if (thread_flags & _TIF_NEED_RESCHED)
+ if (thread_flags & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
schedule();
if (thread_flags & _TIF_UPROBE)
@@ -393,20 +393,16 @@ static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
* As per the ABI exit SME streaming mode and clear the SVE state not
* shared with FPSIMD on syscall entry.
*/
-static inline void fp_user_discard(void)
+static inline void fpsimd_syscall_enter(void)
{
- /*
- * If SME is active then exit streaming mode. If ZA is active
- * then flush the SVE registers but leave userspace access to
- * both SVE and SME enabled, otherwise disable SME for the
- * task and fall through to disabling SVE too. This means
- * that after a syscall we never have any streaming mode
- * register state to track, if this changes the KVM code will
- * need updating.
- */
+ /* Ensure PSTATE.SM is clear, but leave PSTATE.ZA as-is. */
if (system_supports_sme())
sme_smstop_sm();
+ /*
+ * The CPU is not in streaming mode. If non-streaming SVE is not
+ * supported, there is no SVE state that needs to be discarded.
+ */
if (!system_supports_sve())
return;
@@ -416,6 +412,33 @@ static inline void fp_user_discard(void)
sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1;
sve_flush_live(true, sve_vq_minus_one);
}
+
+ /*
+ * Any live non-FPSIMD SVE state has been zeroed. Allow
+ * fpsimd_save_user_state() to lazily discard SVE state until either
+ * the live state is unbound or fpsimd_syscall_exit() is called.
+ */
+ __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_FPSIMD);
+}
+
+static __always_inline void fpsimd_syscall_exit(void)
+{
+ if (!system_supports_sve())
+ return;
+
+ /*
+ * The current task's user FPSIMD/SVE/SME state is now bound to this
+ * CPU. The fpsimd_last_state.to_save value is either:
+ *
+ * - FP_STATE_FPSIMD, if the state has not been reloaded on this CPU
+ * since fpsimd_syscall_enter().
+ *
+ * - FP_STATE_CURRENT, if the state has been reloaded on this CPU at
+ * any point.
+ *
+ * Reset this to FP_STATE_CURRENT to stop lazy discarding.
+ */
+ __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT);
}
UNHANDLED(el1t, 64, sync)
@@ -739,10 +762,11 @@ static void noinstr el0_svc(struct pt_regs *regs)
{
enter_from_user_mode(regs);
cortex_a76_erratum_1463225_svc_handler();
- fp_user_discard();
+ fpsimd_syscall_enter();
local_daif_restore(DAIF_PROCCTX);
do_el0_svc(regs);
exit_to_user_mode(regs);
+ fpsimd_syscall_exit();
}
static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 8370d55f0353..c37f02d7194e 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -119,7 +119,7 @@
* whatever is in the FPSIMD registers is not saved to memory, but discarded.
*/
-static DEFINE_PER_CPU(struct cpu_fp_state, fpsimd_last_state);
+DEFINE_PER_CPU(struct cpu_fp_state, fpsimd_last_state);
__ro_after_init struct vl_info vl_info[ARM64_VEC_MAX] = {
#ifdef CONFIG_ARM64_SVE
@@ -180,12 +180,12 @@ static inline void set_sve_default_vl(int val)
set_default_vl(ARM64_VEC_SVE, val);
}
-static void __percpu *efi_sve_state;
+static u8 *efi_sve_state;
#else /* ! CONFIG_ARM64_SVE */
/* Dummy declaration for code that will be optimised out: */
-extern void __percpu *efi_sve_state;
+extern u8 *efi_sve_state;
#endif /* ! CONFIG_ARM64_SVE */
@@ -359,20 +359,15 @@ static void task_fpsimd_load(void)
WARN_ON(preemptible());
WARN_ON(test_thread_flag(TIF_KERNEL_FPSTATE));
- if (system_supports_fpmr())
- write_sysreg_s(current->thread.uw.fpmr, SYS_FPMR);
-
if (system_supports_sve() || system_supports_sme()) {
switch (current->thread.fp_type) {
case FP_STATE_FPSIMD:
/* Stop tracking SVE for this task until next use. */
- if (test_and_clear_thread_flag(TIF_SVE))
- sve_user_disable();
+ clear_thread_flag(TIF_SVE);
break;
case FP_STATE_SVE:
- if (!thread_sm_enabled(&current->thread) &&
- !WARN_ON_ONCE(!test_and_set_thread_flag(TIF_SVE)))
- sve_user_enable();
+ if (!thread_sm_enabled(&current->thread))
+ WARN_ON_ONCE(!test_and_set_thread_flag(TIF_SVE));
if (test_thread_flag(TIF_SVE))
sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1);
@@ -413,6 +408,9 @@ static void task_fpsimd_load(void)
restore_ffr = system_supports_fa64();
}
+ if (system_supports_fpmr())
+ write_sysreg_s(current->thread.uw.fpmr, SYS_FPMR);
+
if (restore_sve_regs) {
WARN_ON_ONCE(current->thread.fp_type != FP_STATE_SVE);
sve_load_state(sve_pffr(&current->thread),
@@ -453,12 +451,15 @@ static void fpsimd_save_user_state(void)
*(last->fpmr) = read_sysreg_s(SYS_FPMR);
/*
- * If a task is in a syscall the ABI allows us to only
- * preserve the state shared with FPSIMD so don't bother
- * saving the full SVE state in that case.
+ * Save SVE state if it is live.
+ *
+ * The syscall ABI discards live SVE state at syscall entry. When
+ * entering a syscall, fpsimd_syscall_enter() sets to_save to
+ * FP_STATE_FPSIMD to allow the SVE state to be lazily discarded until
+ * either new SVE state is loaded+bound or fpsimd_syscall_exit() is
+ * called prior to a return to userspace.
*/
- if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE) &&
- !in_syscall(current_pt_regs())) ||
+ if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE)) ||
last->to_save == FP_STATE_SVE) {
save_sve_regs = true;
save_ffr = true;
@@ -651,7 +652,7 @@ static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
* task->thread.uw.fpsimd_state must be up to date before calling this
* function.
*/
-static void fpsimd_to_sve(struct task_struct *task)
+static inline void fpsimd_to_sve(struct task_struct *task)
{
unsigned int vq;
void *sst = task->thread.sve_state;
@@ -675,7 +676,7 @@ static void fpsimd_to_sve(struct task_struct *task)
* bytes of allocated kernel memory.
* task->thread.sve_state must be up to date before calling this function.
*/
-static void sve_to_fpsimd(struct task_struct *task)
+static inline void sve_to_fpsimd(struct task_struct *task)
{
unsigned int vq, vl;
void const *sst = task->thread.sve_state;
@@ -694,44 +695,39 @@ static void sve_to_fpsimd(struct task_struct *task)
}
}
-void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__always_unused p)
+static inline void __fpsimd_zero_vregs(struct user_fpsimd_state *fpsimd)
{
- write_sysreg_s(read_sysreg_s(SYS_SCTLR_EL1) | SCTLR_EL1_EnFPM_MASK,
- SYS_SCTLR_EL1);
+ memset(&fpsimd->vregs, 0, sizeof(fpsimd->vregs));
}
-#ifdef CONFIG_ARM64_SVE
/*
- * Call __sve_free() directly only if you know task can't be scheduled
- * or preempted.
+ * Simulate the effects of an SMSTOP SM instruction.
*/
-static void __sve_free(struct task_struct *task)
+void task_smstop_sm(struct task_struct *task)
{
- kfree(task->thread.sve_state);
- task->thread.sve_state = NULL;
-}
+ if (!thread_sm_enabled(&task->thread))
+ return;
-static void sve_free(struct task_struct *task)
-{
- WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
+ __fpsimd_zero_vregs(&task->thread.uw.fpsimd_state);
+ task->thread.uw.fpsimd_state.fpsr = 0x0800009f;
+ if (system_supports_fpmr())
+ task->thread.uw.fpmr = 0;
- __sve_free(task);
+ task->thread.svcr &= ~SVCR_SM_MASK;
+ task->thread.fp_type = FP_STATE_FPSIMD;
}
-/*
- * Return how many bytes of memory are required to store the full SVE
- * state for task, given task's currently configured vector length.
- */
-size_t sve_state_size(struct task_struct const *task)
+void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__always_unused p)
{
- unsigned int vl = 0;
-
- if (system_supports_sve())
- vl = task_get_sve_vl(task);
- if (system_supports_sme())
- vl = max(vl, task_get_sme_vl(task));
+ write_sysreg_s(read_sysreg_s(SYS_SCTLR_EL1) | SCTLR_EL1_EnFPM_MASK,
+ SYS_SCTLR_EL1);
+}
- return SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl));
+#ifdef CONFIG_ARM64_SVE
+static void sve_free(struct task_struct *task)
+{
+ kfree(task->thread.sve_state);
+ task->thread.sve_state = NULL;
}
/*
@@ -758,69 +754,34 @@ void sve_alloc(struct task_struct *task, bool flush)
kzalloc(sve_state_size(task), GFP_KERNEL);
}
-
-/*
- * Force the FPSIMD state shared with SVE to be updated in the SVE state
- * even if the SVE state is the current active state.
- *
- * This should only be called by ptrace. task must be non-runnable.
- * task->thread.sve_state must point to at least sve_state_size(task)
- * bytes of allocated kernel memory.
- */
-void fpsimd_force_sync_to_sve(struct task_struct *task)
-{
- fpsimd_to_sve(task);
-}
-
-/*
- * Ensure that task->thread.sve_state is up to date with respect to
- * the user task, irrespective of when SVE is in use or not.
- *
- * This should only be called by ptrace. task must be non-runnable.
- * task->thread.sve_state must point to at least sve_state_size(task)
- * bytes of allocated kernel memory.
- */
-void fpsimd_sync_to_sve(struct task_struct *task)
-{
- if (!test_tsk_thread_flag(task, TIF_SVE) &&
- !thread_sm_enabled(&task->thread))
- fpsimd_to_sve(task);
-}
-
/*
- * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
- * the user task, irrespective of whether SVE is in use or not.
+ * Ensure that task->thread.uw.fpsimd_state is up to date with respect to the
+ * task's currently effective FPSIMD/SVE state.
*
- * This should only be called by ptrace. task must be non-runnable.
- * task->thread.sve_state must point to at least sve_state_size(task)
- * bytes of allocated kernel memory.
+ * The task's FPSIMD/SVE/SME state must not be subject to concurrent
+ * manipulation.
*/
-void sve_sync_to_fpsimd(struct task_struct *task)
+void fpsimd_sync_from_effective_state(struct task_struct *task)
{
if (task->thread.fp_type == FP_STATE_SVE)
sve_to_fpsimd(task);
}
/*
- * Ensure that task->thread.sve_state is up to date with respect to
- * the task->thread.uw.fpsimd_state.
+ * Ensure that the task's currently effective FPSIMD/SVE state is up to date
+ * with respect to task->thread.uw.fpsimd_state, zeroing any effective
+ * non-FPSIMD (S)SVE state.
*
- * This should only be called by ptrace to merge new FPSIMD register
- * values into a task for which SVE is currently active.
- * task must be non-runnable.
- * task->thread.sve_state must point to at least sve_state_size(task)
- * bytes of allocated kernel memory.
- * task->thread.uw.fpsimd_state must already have been initialised with
- * the new FPSIMD register values to be merged in.
+ * The task's FPSIMD/SVE/SME state must not be subject to concurrent
+ * manipulation.
*/
-void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
+void fpsimd_sync_to_effective_state_zeropad(struct task_struct *task)
{
unsigned int vq;
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
- if (!test_tsk_thread_flag(task, TIF_SVE) &&
- !thread_sm_enabled(&task->thread))
+ if (task->thread.fp_type != FP_STATE_SVE)
return;
vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
@@ -829,10 +790,73 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
__fpsimd_to_sve(sst, fst, vq);
}
+static int change_live_vector_length(struct task_struct *task,
+ enum vec_type type,
+ unsigned long vl)
+{
+ unsigned int sve_vl = task_get_sve_vl(task);
+ unsigned int sme_vl = task_get_sme_vl(task);
+ void *sve_state = NULL, *sme_state = NULL;
+
+ if (type == ARM64_VEC_SME)
+ sme_vl = vl;
+ else
+ sve_vl = vl;
+
+ /*
+ * Allocate the new sve_state and sme_state before freeing the old
+ * copies so that allocation failure can be handled without needing to
+ * mutate the task's state in any way.
+ *
+ * Changes to the SVE vector length must not discard live ZA state or
+ * clear PSTATE.ZA, as userspace code which is unaware of the AAPCS64
+ * ZA lazy saving scheme may attempt to change the SVE vector length
+ * while unsaved/dormant ZA state exists.
+ */
+ sve_state = kzalloc(__sve_state_size(sve_vl, sme_vl), GFP_KERNEL);
+ if (!sve_state)
+ goto out_mem;
+
+ if (type == ARM64_VEC_SME) {
+ sme_state = kzalloc(__sme_state_size(sme_vl), GFP_KERNEL);
+ if (!sme_state)
+ goto out_mem;
+ }
+
+ if (task == current)
+ fpsimd_save_and_flush_current_state();
+ else
+ fpsimd_flush_task_state(task);
+
+ /*
+ * Always preserve PSTATE.SM and the effective FPSIMD state, zeroing
+ * other SVE state.
+ */
+ fpsimd_sync_from_effective_state(task);
+ task_set_vl(task, type, vl);
+ kfree(task->thread.sve_state);
+ task->thread.sve_state = sve_state;
+ fpsimd_sync_to_effective_state_zeropad(task);
+
+ if (type == ARM64_VEC_SME) {
+ task->thread.svcr &= ~SVCR_ZA_MASK;
+ kfree(task->thread.sme_state);
+ task->thread.sme_state = sme_state;
+ }
+
+ return 0;
+
+out_mem:
+ kfree(sve_state);
+ kfree(sme_state);
+ return -ENOMEM;
+}
+
int vec_set_vector_length(struct task_struct *task, enum vec_type type,
unsigned long vl, unsigned long flags)
{
- bool free_sme = false;
+ bool onexec = flags & PR_SVE_SET_VL_ONEXEC;
+ bool inherit = flags & PR_SVE_VL_INHERIT;
if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
PR_SVE_SET_VL_ONEXEC))
@@ -852,71 +876,17 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
vl = find_supported_vector_length(type, vl);
- if (flags & (PR_SVE_VL_INHERIT |
- PR_SVE_SET_VL_ONEXEC))
+ if (!onexec && vl != task_get_vl(task, type)) {
+ if (change_live_vector_length(task, type, vl))
+ return -ENOMEM;
+ }
+
+ if (onexec || inherit)
task_set_vl_onexec(task, type, vl);
else
/* Reset VL to system default on next exec: */
task_set_vl_onexec(task, type, 0);
- /* Only actually set the VL if not deferred: */
- if (flags & PR_SVE_SET_VL_ONEXEC)
- goto out;
-
- if (vl == task_get_vl(task, type))
- goto out;
-
- /*
- * To ensure the FPSIMD bits of the SVE vector registers are preserved,
- * write any live register state back to task_struct, and convert to a
- * regular FPSIMD thread.
- */
- if (task == current) {
- get_cpu_fpsimd_context();
-
- fpsimd_save_user_state();
- }
-
- fpsimd_flush_task_state(task);
- if (test_and_clear_tsk_thread_flag(task, TIF_SVE) ||
- thread_sm_enabled(&task->thread)) {
- sve_to_fpsimd(task);
- task->thread.fp_type = FP_STATE_FPSIMD;
- }
-
- if (system_supports_sme()) {
- if (type == ARM64_VEC_SME ||
- !(task->thread.svcr & (SVCR_SM_MASK | SVCR_ZA_MASK))) {
- /*
- * We are changing the SME VL or weren't using
- * SME anyway, discard the state and force a
- * reallocation.
- */
- task->thread.svcr &= ~(SVCR_SM_MASK |
- SVCR_ZA_MASK);
- clear_tsk_thread_flag(task, TIF_SME);
- free_sme = true;
- }
- }
-
- if (task == current)
- put_cpu_fpsimd_context();
-
- task_set_vl(task, type, vl);
-
- /*
- * Free the changed states if they are not in use, SME will be
- * reallocated to the correct size on next use and we just
- * allocate SVE now in case it is needed for use in streaming
- * mode.
- */
- sve_free(task);
- sve_alloc(task, true);
-
- if (free_sme)
- sme_free(task);
-
-out:
update_tsk_thread_flag(task, vec_vl_inherit_flag(type),
flags & PR_SVE_VL_INHERIT);
@@ -1131,15 +1101,15 @@ static void __init sve_efi_setup(void)
if (!sve_vl_valid(max_vl))
goto fail;
- efi_sve_state = __alloc_percpu(
- SVE_SIG_REGS_SIZE(sve_vq_from_vl(max_vl)), SVE_VQ_BYTES);
+ efi_sve_state = kmalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(max_vl)),
+ GFP_KERNEL);
if (!efi_sve_state)
goto fail;
return;
fail:
- panic("Cannot allocate percpu memory for EFI SVE save/restore");
+ panic("Cannot allocate memory for EFI SVE save/restore");
}
void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p)
@@ -1212,7 +1182,7 @@ void __init sve_setup(void)
*/
void fpsimd_release_task(struct task_struct *dead_task)
{
- __sve_free(dead_task);
+ sve_free(dead_task);
sme_free(dead_task);
}
@@ -1436,7 +1406,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
* If this not a trap due to SME being disabled then something
* is being used in the wrong mode, report as SIGILL.
*/
- if (ESR_ELx_ISS(esr) != ESR_ELx_SME_ISS_SME_DISABLED) {
+ if (ESR_ELx_SME_ISS_SMTC(esr) != ESR_ELx_SME_ISS_SMTC_SME_DISABLED) {
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
return;
}
@@ -1460,6 +1430,8 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
sme_set_vq(vq_minus_one);
fpsimd_bind_task_to_cpu();
+ } else {
+ fpsimd_flush_task_state(current);
}
put_cpu_fpsimd_context();
@@ -1573,8 +1545,8 @@ void fpsimd_thread_switch(struct task_struct *next)
fpsimd_save_user_state();
if (test_tsk_thread_flag(next, TIF_KERNEL_FPSTATE)) {
- fpsimd_load_kernel_state(next);
fpsimd_flush_cpu_state();
+ fpsimd_load_kernel_state(next);
} else {
/*
* Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
@@ -1661,6 +1633,9 @@ void fpsimd_flush_thread(void)
current->thread.svcr = 0;
}
+ if (system_supports_fpmr())
+ current->thread.uw.fpmr = 0;
+
current->thread.fp_type = FP_STATE_FPSIMD;
put_cpu_fpsimd_context();
@@ -1683,18 +1658,6 @@ void fpsimd_preserve_current_state(void)
}
/*
- * Like fpsimd_preserve_current_state(), but ensure that
- * current->thread.uw.fpsimd_state is updated so that it can be copied to
- * the signal frame.
- */
-void fpsimd_signal_preserve_current_state(void)
-{
- fpsimd_preserve_current_state();
- if (current->thread.fp_type == FP_STATE_SVE)
- sve_to_fpsimd(current);
-}
-
-/*
* Associate current's FPSIMD context with this cpu
* The caller must have ownership of the cpu FPSIMD context before calling
* this function.
@@ -1786,30 +1749,14 @@ void fpsimd_restore_current_state(void)
put_cpu_fpsimd_context();
}
-/*
- * Load an updated userland FPSIMD state for 'current' from memory and set the
- * flag that indicates that the FPSIMD register contents are the most recent
- * FPSIMD state of 'current'. This is used by the signal code to restore the
- * register state when returning from a signal handler in FPSIMD only cases,
- * any SVE context will be discarded.
- */
void fpsimd_update_current_state(struct user_fpsimd_state const *state)
{
if (WARN_ON(!system_supports_fpsimd()))
return;
- get_cpu_fpsimd_context();
-
current->thread.uw.fpsimd_state = *state;
- if (test_thread_flag(TIF_SVE))
+ if (current->thread.fp_type == FP_STATE_SVE)
fpsimd_to_sve(current);
-
- task_fpsimd_load();
- fpsimd_bind_task_to_cpu();
-
- clear_thread_flag(TIF_FOREIGN_FPSTATE);
-
- put_cpu_fpsimd_context();
}
/*
@@ -1839,6 +1786,17 @@ void fpsimd_flush_task_state(struct task_struct *t)
barrier();
}
+void fpsimd_save_and_flush_current_state(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ get_cpu_fpsimd_context();
+ fpsimd_save_user_state();
+ fpsimd_flush_task_state(current);
+ put_cpu_fpsimd_context();
+}
+
/*
* Save the FPSIMD state to memory and invalidate cpu view.
* This function must be called with preemption disabled.
@@ -1948,10 +1906,10 @@ EXPORT_SYMBOL_GPL(kernel_neon_end);
#ifdef CONFIG_EFI
-static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
-static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
-static DEFINE_PER_CPU(bool, efi_sve_state_used);
-static DEFINE_PER_CPU(bool, efi_sm_state);
+static struct user_fpsimd_state efi_fpsimd_state;
+static bool efi_fpsimd_state_used;
+static bool efi_sve_state_used;
+static bool efi_sm_state;
/*
* EFI runtime services support functions
@@ -1984,18 +1942,16 @@ void __efi_fpsimd_begin(void)
* If !efi_sve_state, SVE can't be in use yet and doesn't need
* preserving:
*/
- if (system_supports_sve() && likely(efi_sve_state)) {
- char *sve_state = this_cpu_ptr(efi_sve_state);
+ if (system_supports_sve() && efi_sve_state != NULL) {
bool ffr = true;
u64 svcr;
- __this_cpu_write(efi_sve_state_used, true);
+ efi_sve_state_used = true;
if (system_supports_sme()) {
svcr = read_sysreg_s(SYS_SVCR);
- __this_cpu_write(efi_sm_state,
- svcr & SVCR_SM_MASK);
+ efi_sm_state = svcr & SVCR_SM_MASK;
/*
* Unless we have FA64 FFR does not
@@ -2005,19 +1961,18 @@ void __efi_fpsimd_begin(void)
ffr = !(svcr & SVCR_SM_MASK);
}
- sve_save_state(sve_state + sve_ffr_offset(sve_max_vl()),
- &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
- ffr);
+ sve_save_state(efi_sve_state + sve_ffr_offset(sve_max_vl()),
+ &efi_fpsimd_state.fpsr, ffr);
if (system_supports_sme())
sysreg_clear_set_s(SYS_SVCR,
SVCR_SM_MASK, 0);
} else {
- fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
+ fpsimd_save_state(&efi_fpsimd_state);
}
- __this_cpu_write(efi_fpsimd_state_used, true);
+ efi_fpsimd_state_used = true;
}
}
@@ -2029,12 +1984,10 @@ void __efi_fpsimd_end(void)
if (!system_supports_fpsimd())
return;
- if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
+ if (!efi_fpsimd_state_used) {
kernel_neon_end();
} else {
- if (system_supports_sve() &&
- likely(__this_cpu_read(efi_sve_state_used))) {
- char const *sve_state = this_cpu_ptr(efi_sve_state);
+ if (system_supports_sve() && efi_sve_state_used) {
bool ffr = true;
/*
@@ -2043,7 +1996,7 @@ void __efi_fpsimd_end(void)
* streaming mode.
*/
if (system_supports_sme()) {
- if (__this_cpu_read(efi_sm_state)) {
+ if (efi_sm_state) {
sysreg_clear_set_s(SYS_SVCR,
0,
SVCR_SM_MASK);
@@ -2057,14 +2010,15 @@ void __efi_fpsimd_end(void)
}
}
- sve_load_state(sve_state + sve_ffr_offset(sve_max_vl()),
- &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
- ffr);
+ sve_load_state(efi_sve_state + sve_ffr_offset(sve_max_vl()),
+ &efi_fpsimd_state.fpsr, ffr);
- __this_cpu_write(efi_sve_state_used, false);
+ efi_sve_state_used = false;
} else {
- fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
+ fpsimd_load_state(&efi_fpsimd_state);
}
+
+ efi_fpsimd_state_used = false;
}
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 2ce73525de2c..ca04b338cb0d 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -89,7 +89,7 @@ SYM_CODE_START(primary_entry)
adrp x1, early_init_stack
mov sp, x1
mov x29, xzr
- adrp x0, init_idmap_pg_dir
+ adrp x0, __pi_init_idmap_pg_dir
mov x1, xzr
bl __pi_create_init_idmap
@@ -101,7 +101,7 @@ SYM_CODE_START(primary_entry)
cbnz x19, 0f
dmb sy
mov x1, x0 // end of used region
- adrp x0, init_idmap_pg_dir
+ adrp x0, __pi_init_idmap_pg_dir
adr_l x2, dcache_inval_poc
blr x2
b 1f
@@ -507,7 +507,7 @@ SYM_FUNC_END(__no_granule_support)
SYM_FUNC_START_LOCAL(__primary_switch)
adrp x1, reserved_pg_dir
- adrp x2, init_idmap_pg_dir
+ adrp x2, __pi_init_idmap_pg_dir
bl __enable_mmu
adrp x1, early_init_stack
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index ae990da1eae5..36e2d26b54f5 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -97,7 +97,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
2:
// Engage the VHE magic!
mov_q x0, HCR_HOST_VHE_FLAGS
- msr hcr_el2, x0
+ msr_hcr_el2 x0
isb
// Use the EL1 allocated stack, per-cpu offset
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 5e3c4b58f279..5a69b6eb4090 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -10,6 +10,12 @@
#error This file should only be included in vmlinux.lds.S
#endif
+#define PI_EXPORT_SYM(sym) \
+ __PI_EXPORT_SYM(sym, __pi_ ## sym, Cannot export BSS symbol sym to startup code)
+#define __PI_EXPORT_SYM(sym, pisym, msg)\
+ PROVIDE(pisym = sym); \
+ ASSERT((sym - KIMAGE_VADDR) < (__bss_start - KIMAGE_VADDR), #msg)
+
PROVIDE(__efistub_primary_entry = primary_entry);
/*
@@ -36,41 +42,30 @@ PROVIDE(__pi___memcpy = __pi_memcpy);
PROVIDE(__pi___memmove = __pi_memmove);
PROVIDE(__pi___memset = __pi_memset);
-PROVIDE(__pi_id_aa64isar1_override = id_aa64isar1_override);
-PROVIDE(__pi_id_aa64isar2_override = id_aa64isar2_override);
-PROVIDE(__pi_id_aa64mmfr0_override = id_aa64mmfr0_override);
-PROVIDE(__pi_id_aa64mmfr1_override = id_aa64mmfr1_override);
-PROVIDE(__pi_id_aa64mmfr2_override = id_aa64mmfr2_override);
-PROVIDE(__pi_id_aa64pfr0_override = id_aa64pfr0_override);
-PROVIDE(__pi_id_aa64pfr1_override = id_aa64pfr1_override);
-PROVIDE(__pi_id_aa64smfr0_override = id_aa64smfr0_override);
-PROVIDE(__pi_id_aa64zfr0_override = id_aa64zfr0_override);
-PROVIDE(__pi_arm64_sw_feature_override = arm64_sw_feature_override);
-PROVIDE(__pi_arm64_use_ng_mappings = arm64_use_ng_mappings);
-#ifdef CONFIG_CAVIUM_ERRATUM_27456
-PROVIDE(__pi_cavium_erratum_27456_cpus = cavium_erratum_27456_cpus);
-PROVIDE(__pi_is_midr_in_range_list = is_midr_in_range_list);
-#endif
-PROVIDE(__pi__ctype = _ctype);
-PROVIDE(__pi_memstart_offset_seed = memstart_offset_seed);
-
-PROVIDE(__pi_init_idmap_pg_dir = init_idmap_pg_dir);
-PROVIDE(__pi_init_idmap_pg_end = init_idmap_pg_end);
-PROVIDE(__pi_init_pg_dir = init_pg_dir);
-PROVIDE(__pi_init_pg_end = init_pg_end);
-PROVIDE(__pi_swapper_pg_dir = swapper_pg_dir);
-
-PROVIDE(__pi__text = _text);
-PROVIDE(__pi__stext = _stext);
-PROVIDE(__pi__etext = _etext);
-PROVIDE(__pi___start_rodata = __start_rodata);
-PROVIDE(__pi___inittext_begin = __inittext_begin);
-PROVIDE(__pi___inittext_end = __inittext_end);
-PROVIDE(__pi___initdata_begin = __initdata_begin);
-PROVIDE(__pi___initdata_end = __initdata_end);
-PROVIDE(__pi__data = _data);
-PROVIDE(__pi___bss_start = __bss_start);
-PROVIDE(__pi__end = _end);
+PI_EXPORT_SYM(id_aa64isar1_override);
+PI_EXPORT_SYM(id_aa64isar2_override);
+PI_EXPORT_SYM(id_aa64mmfr0_override);
+PI_EXPORT_SYM(id_aa64mmfr1_override);
+PI_EXPORT_SYM(id_aa64mmfr2_override);
+PI_EXPORT_SYM(id_aa64pfr0_override);
+PI_EXPORT_SYM(id_aa64pfr1_override);
+PI_EXPORT_SYM(id_aa64smfr0_override);
+PI_EXPORT_SYM(id_aa64zfr0_override);
+PI_EXPORT_SYM(arm64_sw_feature_override);
+PI_EXPORT_SYM(arm64_use_ng_mappings);
+PI_EXPORT_SYM(_ctype);
+
+PI_EXPORT_SYM(swapper_pg_dir);
+
+PI_EXPORT_SYM(_text);
+PI_EXPORT_SYM(_stext);
+PI_EXPORT_SYM(_etext);
+PI_EXPORT_SYM(__start_rodata);
+PI_EXPORT_SYM(__inittext_begin);
+PI_EXPORT_SYM(__inittext_end);
+PI_EXPORT_SYM(__initdata_begin);
+PI_EXPORT_SYM(__initdata_end);
+PI_EXPORT_SYM(_data);
#ifdef CONFIG_KVM
@@ -131,6 +126,8 @@ KVM_NVHE_ALIAS(__hyp_text_start);
KVM_NVHE_ALIAS(__hyp_text_end);
KVM_NVHE_ALIAS(__hyp_bss_start);
KVM_NVHE_ALIAS(__hyp_bss_end);
+KVM_NVHE_ALIAS(__hyp_data_start);
+KVM_NVHE_ALIAS(__hyp_data_end);
KVM_NVHE_ALIAS(__hyp_rodata_start);
KVM_NVHE_ALIAS(__hyp_rodata_end);
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 1da3e25f9d9e..c9503ed45a6c 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -10,8 +10,6 @@
#include <asm/cpufeature.h>
#include <asm/memory.h>
-u16 __initdata memstart_offset_seed;
-
bool __ro_after_init __kaslr_is_enabled = false;
void __init kaslr_init(void)
diff --git a/arch/arm64/kernel/pi/kaslr_early.c b/arch/arm64/kernel/pi/kaslr_early.c
index 0257b43819db..e0e018046a46 100644
--- a/arch/arm64/kernel/pi/kaslr_early.c
+++ b/arch/arm64/kernel/pi/kaslr_early.c
@@ -18,8 +18,6 @@
#include "pi.h"
-extern u16 memstart_offset_seed;
-
static u64 __init get_kaslr_seed(void *fdt, int node)
{
static char const seed_str[] __initconst = "kaslr-seed";
@@ -53,8 +51,6 @@ u64 __init kaslr_early_init(void *fdt, int chosen)
return 0;
}
- memstart_offset_seed = seed & U16_MAX;
-
/*
* OK, so we are proceeding with KASLR enabled. Calculate a suitable
* kernel image offset from the seed. Let's place the kernel in the
diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
index e57b043f324b..0f4bd7771859 100644
--- a/arch/arm64/kernel/pi/map_kernel.c
+++ b/arch/arm64/kernel/pi/map_kernel.c
@@ -159,7 +159,7 @@ static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr)
static void __init remap_idmap_for_lpa2(void)
{
/* clear the bits that change meaning once LPA2 is turned on */
- pteval_t mask = PTE_SHARED;
+ ptdesc_t mask = PTE_SHARED;
/*
* We have to clear bits [9:8] in all block or page descriptors in the
@@ -207,6 +207,29 @@ static void __init map_fdt(u64 fdt)
dsb(ishst);
}
+/*
+ * PI version of the Cavium Eratum 27456 detection, which makes it
+ * impossible to use non-global mappings.
+ */
+static bool __init ng_mappings_allowed(void)
+{
+ static const struct midr_range cavium_erratum_27456_cpus[] __initconst = {
+ /* Cavium ThunderX, T88 pass 1.x - 2.1 */
+ MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
+ /* Cavium ThunderX, T81 pass 1.0 */
+ MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
+ {},
+ };
+
+ for (const struct midr_range *r = cavium_erratum_27456_cpus; r->model; r++) {
+ if (midr_is_cpu_model_range(read_cpuid_id(), r->model,
+ r->rv_min, r->rv_max))
+ return false;
+ }
+
+ return true;
+}
+
asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
{
static char const chosen_str[] __initconst = "/chosen";
@@ -246,7 +269,7 @@ asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
u64 kaslr_seed = kaslr_early_init(fdt, chosen);
if (kaslr_seed && kaslr_requires_kpti())
- arm64_use_ng_mappings = true;
+ arm64_use_ng_mappings = ng_mappings_allowed();
kaslr_offset |= kaslr_seed & ~(MIN_KIMG_ALIGN - 1);
}
diff --git a/arch/arm64/kernel/pi/map_range.c b/arch/arm64/kernel/pi/map_range.c
index 81345f68f9fc..7982788e7b9a 100644
--- a/arch/arm64/kernel/pi/map_range.c
+++ b/arch/arm64/kernel/pi/map_range.c
@@ -30,7 +30,7 @@ void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot,
int level, pte_t *tbl, bool may_use_cont, u64 va_offset)
{
u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX;
- pteval_t protval = pgprot_val(prot) & ~PTE_TYPE_MASK;
+ ptdesc_t protval = pgprot_val(prot) & ~PTE_TYPE_MASK;
int lshift = (3 - level) * PTDESC_TABLE_SHIFT;
u64 lmask = (PAGE_SIZE << lshift) - 1;
@@ -87,7 +87,7 @@ void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot,
}
}
-asmlinkage u64 __init create_init_idmap(pgd_t *pg_dir, pteval_t clrmask)
+asmlinkage u64 __init create_init_idmap(pgd_t *pg_dir, ptdesc_t clrmask)
{
u64 ptep = (u64)pg_dir + PAGE_SIZE;
pgprot_t text_prot = PAGE_KERNEL_ROX;
diff --git a/arch/arm64/kernel/pi/pi.h b/arch/arm64/kernel/pi/pi.h
index c91e5e965cd3..46cafee7829f 100644
--- a/arch/arm64/kernel/pi/pi.h
+++ b/arch/arm64/kernel/pi/pi.h
@@ -22,6 +22,7 @@ static inline void *prel64_to_pointer(const prel64_t *offset)
extern bool dynamic_scs_is_enabled;
extern pgd_t init_idmap_pg_dir[], init_idmap_pg_end[];
+extern pgd_t init_pg_dir[], init_pg_end[];
void init_feature_override(u64 boot_status, const void *fdt, int chosen);
u64 kaslr_early_init(void *fdt, int chosen);
@@ -33,4 +34,4 @@ void map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot,
asmlinkage void early_map_kernel(u64 boot_status, void *fdt);
-asmlinkage u64 create_init_idmap(pgd_t *pgd, pteval_t clrmask);
+asmlinkage u64 create_init_idmap(pgd_t *pgd, ptdesc_t clrmask);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 42faebb7b712..a5ca15daeb8a 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -344,50 +344,34 @@ void arch_release_task_struct(struct task_struct *tsk)
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- if (current->mm)
- fpsimd_preserve_current_state();
+ /*
+ * The current/src task's FPSIMD state may or may not be live, and may
+ * have been altered by ptrace after entry to the kernel. Save the
+ * effective FPSIMD state so that this will be copied into dst.
+ */
+ fpsimd_save_and_flush_current_state();
+ fpsimd_sync_from_effective_state(src);
+
*dst = *src;
/*
- * Detach src's sve_state (if any) from dst so that it does not
- * get erroneously used or freed prematurely. dst's copies
- * will be allocated on demand later on if dst uses SVE.
- * For consistency, also clear TIF_SVE here: this could be done
- * later in copy_process(), but to avoid tripping up future
- * maintainers it is best not to leave TIF flags and buffers in
- * an inconsistent state, even temporarily.
+ * Drop stale reference to src's sve_state and convert dst to
+ * non-streaming FPSIMD mode.
*/
+ dst->thread.fp_type = FP_STATE_FPSIMD;
dst->thread.sve_state = NULL;
clear_tsk_thread_flag(dst, TIF_SVE);
+ task_smstop_sm(dst);
/*
- * In the unlikely event that we create a new thread with ZA
- * enabled we should retain the ZA and ZT state so duplicate
- * it here. This may be shortly freed if we exec() or if
- * CLONE_SETTLS but it's simpler to do it here. To avoid
- * confusing the rest of the code ensure that we have a
- * sve_state allocated whenever sme_state is allocated.
+ * Drop stale reference to src's sme_state and ensure dst has ZA
+ * disabled.
+ *
+ * When necessary, ZA will be inherited later in copy_thread_za().
*/
- if (thread_za_enabled(&src->thread)) {
- dst->thread.sve_state = kzalloc(sve_state_size(src),
- GFP_KERNEL);
- if (!dst->thread.sve_state)
- return -ENOMEM;
-
- dst->thread.sme_state = kmemdup(src->thread.sme_state,
- sme_state_size(src),
- GFP_KERNEL);
- if (!dst->thread.sme_state) {
- kfree(dst->thread.sve_state);
- dst->thread.sve_state = NULL;
- return -ENOMEM;
- }
- } else {
- dst->thread.sme_state = NULL;
- clear_tsk_thread_flag(dst, TIF_SME);
- }
-
- dst->thread.fp_type = FP_STATE_FPSIMD;
+ dst->thread.sme_state = NULL;
+ clear_tsk_thread_flag(dst, TIF_SME);
+ dst->thread.svcr &= ~SVCR_ZA_MASK;
/* clear any pending asynchronous tag fault raised by the parent */
clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
@@ -395,6 +379,31 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
+static int copy_thread_za(struct task_struct *dst, struct task_struct *src)
+{
+ if (!thread_za_enabled(&src->thread))
+ return 0;
+
+ dst->thread.sve_state = kzalloc(sve_state_size(src),
+ GFP_KERNEL);
+ if (!dst->thread.sve_state)
+ return -ENOMEM;
+
+ dst->thread.sme_state = kmemdup(src->thread.sme_state,
+ sme_state_size(src),
+ GFP_KERNEL);
+ if (!dst->thread.sme_state) {
+ kfree(dst->thread.sve_state);
+ dst->thread.sve_state = NULL;
+ return -ENOMEM;
+ }
+
+ set_tsk_thread_flag(dst, TIF_SME);
+ dst->thread.svcr |= SVCR_ZA_MASK;
+
+ return 0;
+}
+
asmlinkage void ret_from_fork(void) asm("ret_from_fork");
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
@@ -427,8 +436,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
* out-of-sync with the saved value.
*/
*task_user_tls(p) = read_sysreg(tpidr_el0);
- if (system_supports_tpidr2())
- p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
if (system_supports_poe())
p->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
@@ -441,13 +448,39 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
}
/*
+ * Due to the AAPCS64 "ZA lazy saving scheme", PSTATE.ZA and
+ * TPIDR2 need to be manipulated as a pair, and either both
+ * need to be inherited or both need to be reset.
+ *
+ * Within a process, child threads must not inherit their
+ * parent's TPIDR2 value or they may clobber their parent's
+ * stack at some later point.
+ *
+ * When a process is fork()'d, the child must inherit ZA and
+ * TPIDR2 from its parent in case there was dormant ZA state.
+ *
+ * Use CLONE_VM to determine when the child will share the
+ * address space with the parent, and cannot safely inherit the
+ * state.
+ */
+ if (system_supports_sme()) {
+ if (!(clone_flags & CLONE_VM)) {
+ p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
+ ret = copy_thread_za(p, current);
+ if (ret)
+ return ret;
+ } else {
+ p->thread.tpidr2_el0 = 0;
+ WARN_ON_ONCE(p->thread.svcr & SVCR_ZA_MASK);
+ }
+ }
+
+ /*
* If a TLS pointer was passed to clone, use it for the new
- * thread. We also reset TPIDR2 if it's in use.
+ * thread.
*/
- if (clone_flags & CLONE_SETTLS) {
+ if (clone_flags & CLONE_SETTLS)
p->thread.uw.tp_value = tls;
- p->thread.tpidr2_el0 = 0;
- }
ret = copy_thread_gcs(p, args);
if (ret != 0)
@@ -680,10 +713,11 @@ struct task_struct *__switch_to(struct task_struct *prev,
gcs_thread_switch(next);
/*
- * Complete any pending TLB or cache maintenance on this CPU in case
- * the thread migrates to a different CPU.
- * This full barrier is also required by the membarrier system
- * call.
+ * Complete any pending TLB or cache maintenance on this CPU in case the
+ * thread migrates to a different CPU. This full barrier is also
+ * required by the membarrier system call. Additionally it makes any
+ * in-progress pgtable writes visible to the table walker; See
+ * emit_pte_barriers().
*/
dsb(ish);
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index b198dde79e59..edf1783ffc81 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -879,16 +879,19 @@ static u8 spectre_bhb_loop_affected(void)
static const struct midr_range spectre_bhb_k132_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
+ {},
};
static const struct midr_range spectre_bhb_k38_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
+ {},
};
static const struct midr_range spectre_bhb_k32_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
@@ -997,6 +1000,11 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
return true;
}
+u8 get_spectre_bhb_loop_value(void)
+{
+ return max_bhb_k;
+}
+
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
{
const char *v = arm64_get_bp_hardening_vector(slot);
@@ -1014,7 +1022,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
isb();
}
-static bool __read_mostly __nospectre_bhb;
+bool __read_mostly __nospectre_bhb;
static int __init parse_spectre_bhb_param(char *str)
{
__nospectre_bhb = true;
@@ -1092,6 +1100,11 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
update_mitigation_state(&spectre_bhb_state, state);
}
+bool is_spectre_bhb_fw_mitigated(void)
+{
+ return test_bit(BHB_FW, &system_bhb_mitigations);
+}
+
/* Patched to NOP when enabled */
void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
__le32 *origptr,
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index f79b0d5f71ac..a360e52db02f 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -594,7 +594,7 @@ static int __fpr_get(struct task_struct *target,
{
struct user_fpsimd_state *uregs;
- sve_sync_to_fpsimd(target);
+ fpsimd_sync_from_effective_state(target);
uregs = &target->thread.uw.fpsimd_state;
@@ -626,7 +626,7 @@ static int __fpr_set(struct task_struct *target,
* Ensure target->thread.uw.fpsimd_state is up to date, so that a
* short copyin can't resurrect stale data.
*/
- sve_sync_to_fpsimd(target);
+ fpsimd_sync_from_effective_state(target);
newstate = target->thread.uw.fpsimd_state;
@@ -653,7 +653,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
if (ret)
return ret;
- sve_sync_from_fpsimd_zeropad(target);
+ fpsimd_sync_to_effective_state_zeropad(target);
fpsimd_flush_task_state(target);
return ret;
@@ -775,6 +775,11 @@ static void sve_init_header_from_task(struct user_sve_header *header,
task_type = ARM64_VEC_SVE;
active = (task_type == type);
+ if (active && target->thread.fp_type == FP_STATE_SVE)
+ header->flags = SVE_PT_REGS_SVE;
+ else
+ header->flags = SVE_PT_REGS_FPSIMD;
+
switch (type) {
case ARM64_VEC_SVE:
if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
@@ -789,19 +794,14 @@ static void sve_init_header_from_task(struct user_sve_header *header,
return;
}
- if (active) {
- if (target->thread.fp_type == FP_STATE_FPSIMD) {
- header->flags |= SVE_PT_REGS_FPSIMD;
- } else {
- header->flags |= SVE_PT_REGS_SVE;
- }
- }
-
header->vl = task_get_vl(target, type);
vq = sve_vq_from_vl(header->vl);
header->max_vl = vec_max_vl(type);
- header->size = SVE_PT_SIZE(vq, header->flags);
+ if (active)
+ header->size = SVE_PT_SIZE(vq, header->flags);
+ else
+ header->size = sizeof(header);
header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
SVE_PT_REGS_SVE);
}
@@ -820,18 +820,25 @@ static int sve_get_common(struct task_struct *target,
unsigned int vq;
unsigned long start, end;
+ if (target == current)
+ fpsimd_preserve_current_state();
+
/* Header */
sve_init_header_from_task(&header, target, type);
vq = sve_vq_from_vl(header.vl);
membuf_write(&to, &header, sizeof(header));
- if (target == current)
- fpsimd_preserve_current_state();
-
BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
+ /*
+ * When the requested vector type is not active, do not present data
+ * from the other mode to userspace.
+ */
+ if (header.size == sizeof(header))
+ return 0;
+
switch ((header.flags & SVE_PT_REGS_MASK)) {
case SVE_PT_REGS_FPSIMD:
return __fpr_get(target, regset, to);
@@ -859,7 +866,7 @@ static int sve_get_common(struct task_struct *target,
return membuf_zero(&to, end - start);
default:
- return 0;
+ BUILD_BUG();
}
}
@@ -883,6 +890,9 @@ static int sve_set_common(struct task_struct *target,
struct user_sve_header header;
unsigned int vq;
unsigned long start, end;
+ bool fpsimd;
+
+ fpsimd_flush_task_state(target);
/* Header */
if (count < sizeof(header))
@@ -890,7 +900,16 @@ static int sve_set_common(struct task_struct *target,
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
0, sizeof(header));
if (ret)
- goto out;
+ return ret;
+
+ /*
+ * Streaming SVE data is always stored and presented in SVE format.
+ * Require the user to provide SVE formatted data for consistency, and
+ * to avoid the risk that we configure the task into an invalid state.
+ */
+ fpsimd = (header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD;
+ if (fpsimd && type == ARM64_VEC_SME)
+ return -EINVAL;
/*
* Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
@@ -899,7 +918,21 @@ static int sve_set_common(struct task_struct *target,
ret = vec_set_vector_length(target, type, header.vl,
((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
if (ret)
- goto out;
+ return ret;
+
+ /* Allocate SME storage if necessary, preserving any existing ZA/ZT state */
+ if (type == ARM64_VEC_SME) {
+ sme_alloc(target, false);
+ if (!target->thread.sme_state)
+ return -ENOMEM;
+ }
+
+ /* Allocate SVE storage if necessary, zeroing any existing SVE state */
+ if (!fpsimd) {
+ sve_alloc(target, true);
+ if (!target->thread.sve_state)
+ return -ENOMEM;
+ }
/*
* Actual VL set may be different from what the user asked
@@ -910,81 +943,47 @@ static int sve_set_common(struct task_struct *target,
/* Enter/exit streaming mode */
if (system_supports_sme()) {
- u64 old_svcr = target->thread.svcr;
-
switch (type) {
case ARM64_VEC_SVE:
target->thread.svcr &= ~SVCR_SM_MASK;
+ set_tsk_thread_flag(target, TIF_SVE);
break;
case ARM64_VEC_SME:
target->thread.svcr |= SVCR_SM_MASK;
-
- /*
- * Disable traps and ensure there is SME storage but
- * preserve any currently set values in ZA/ZT.
- */
- sme_alloc(target, false);
set_tsk_thread_flag(target, TIF_SME);
break;
default:
WARN_ON_ONCE(1);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
-
- /*
- * If we switched then invalidate any existing SVE
- * state and ensure there's storage.
- */
- if (target->thread.svcr != old_svcr)
- sve_alloc(target, true);
}
+ /* Always zero V regs, FPSR, and FPCR */
+ memset(&current->thread.uw.fpsimd_state, 0,
+ sizeof(current->thread.uw.fpsimd_state));
+
/* Registers: FPSIMD-only case */
BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
- if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
- ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
- SVE_PT_FPSIMD_OFFSET);
+ if (fpsimd) {
clear_tsk_thread_flag(target, TIF_SVE);
target->thread.fp_type = FP_STATE_FPSIMD;
- goto out;
+ ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
+ SVE_PT_FPSIMD_OFFSET);
+ return ret;
}
- /*
- * Otherwise: no registers or full SVE case. For backwards
- * compatibility reasons we treat empty flags as SVE registers.
- */
+ /* Otherwise: no registers or full SVE case. */
+
+ target->thread.fp_type = FP_STATE_SVE;
/*
* If setting a different VL from the requested VL and there is
* register data, the data layout will be wrong: don't even
* try to set the registers in this case.
*/
- if (count && vq != sve_vq_from_vl(header.vl)) {
- ret = -EIO;
- goto out;
- }
-
- sve_alloc(target, true);
- if (!target->thread.sve_state) {
- ret = -ENOMEM;
- clear_tsk_thread_flag(target, TIF_SVE);
- target->thread.fp_type = FP_STATE_FPSIMD;
- goto out;
- }
-
- /*
- * Ensure target->thread.sve_state is up to date with target's
- * FPSIMD regs, so that a short copyin leaves trailing
- * registers unmodified. Only enable SVE if we are
- * configuring normal SVE, a system with streaming SVE may not
- * have normal SVE.
- */
- fpsimd_sync_to_sve(target);
- if (type == ARM64_VEC_SVE)
- set_tsk_thread_flag(target, TIF_SVE);
- target->thread.fp_type = FP_STATE_SVE;
+ if (count && vq != sve_vq_from_vl(header.vl))
+ return -EIO;
BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
start = SVE_PT_SVE_OFFSET;
@@ -993,7 +992,7 @@ static int sve_set_common(struct task_struct *target,
target->thread.sve_state,
start, end);
if (ret)
- goto out;
+ return ret;
start = end;
end = SVE_PT_SVE_FPSR_OFFSET(vq);
@@ -1009,8 +1008,6 @@ static int sve_set_common(struct task_struct *target,
&target->thread.uw.fpsimd_state.fpsr,
start, end);
-out:
- fpsimd_flush_task_state(target);
return ret;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 85104587f849..77c7926a4df6 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -169,7 +169,7 @@ static void __init smp_build_mpidr_hash(void)
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
- int size;
+ int size = 0;
void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
const char *name;
@@ -182,10 +182,10 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
*/
if (!early_init_dt_scan(dt_virt, dt_phys)) {
pr_crit("\n"
- "Error: invalid device tree blob at physical address %pa (virtual address 0x%px)\n"
- "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
- "\nPlease check your bootloader.",
- &dt_phys, dt_virt);
+ "Error: invalid device tree blob: PA=%pa, VA=%px, size=%d bytes\n"
+ "The dtb must be 8-byte aligned and must not exceed 2 MB in size.\n"
+ "\nPlease check your bootloader.\n",
+ &dt_phys, dt_virt, size);
/*
* Note that in this _really_ early stage we cannot even BUG()
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index a7c37afb4ebe..417140cd399b 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -250,6 +250,8 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
&current->thread.uw.fpsimd_state;
int err;
+ fpsimd_sync_from_effective_state(current);
+
/* copy the FP and status/control registers */
err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
__put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
@@ -262,37 +264,46 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
return err ? -EFAULT : 0;
}
-static int restore_fpsimd_context(struct user_ctxs *user)
+static int read_fpsimd_context(struct user_fpsimd_state *fpsimd,
+ struct user_ctxs *user)
{
- struct user_fpsimd_state fpsimd;
- int err = 0;
+ int err;
/* check the size information */
if (user->fpsimd_size != sizeof(struct fpsimd_context))
return -EINVAL;
/* copy the FP and status/control registers */
- err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs),
- sizeof(fpsimd.vregs));
- __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err);
- __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err);
+ err = __copy_from_user(fpsimd->vregs, &(user->fpsimd->vregs),
+ sizeof(fpsimd->vregs));
+ __get_user_error(fpsimd->fpsr, &(user->fpsimd->fpsr), err);
+ __get_user_error(fpsimd->fpcr, &(user->fpsimd->fpcr), err);
+
+ return err ? -EFAULT : 0;
+}
+
+static int restore_fpsimd_context(struct user_ctxs *user)
+{
+ struct user_fpsimd_state fpsimd;
+ int err;
+
+ err = read_fpsimd_context(&fpsimd, user);
+ if (err)
+ return err;
clear_thread_flag(TIF_SVE);
+ current->thread.svcr &= ~SVCR_SM_MASK;
current->thread.fp_type = FP_STATE_FPSIMD;
/* load the hardware registers from the fpsimd_state structure */
- if (!err)
- fpsimd_update_current_state(&fpsimd);
-
- return err ? -EFAULT : 0;
+ fpsimd_update_current_state(&fpsimd);
+ return 0;
}
static int preserve_fpmr_context(struct fpmr_context __user *ctx)
{
int err = 0;
- current->thread.uw.fpmr = read_sysreg_s(SYS_FPMR);
-
__put_user_error(FPMR_MAGIC, &ctx->head.magic, err);
__put_user_error(sizeof(*ctx), &ctx->head.size, err);
__put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err);
@@ -310,7 +321,7 @@ static int restore_fpmr_context(struct user_ctxs *user)
__get_user_error(fpmr, &user->fpmr->fpmr, err);
if (!err)
- write_sysreg_s(fpmr, SYS_FPMR);
+ current->thread.uw.fpmr = fpmr;
return err;
}
@@ -372,11 +383,6 @@ static int preserve_sve_context(struct sve_context __user *ctx)
err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
if (vq) {
- /*
- * This assumes that the SVE state has already been saved to
- * the task struct by calling the function
- * fpsimd_signal_preserve_current_state().
- */
err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
current->thread.sve_state,
SVE_SIG_REGS_SIZE(vq));
@@ -391,6 +397,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
unsigned int vl, vq;
struct user_fpsimd_state fpsimd;
u16 user_vl, flags;
+ bool sm;
if (user->sve_size < sizeof(*user->sve))
return -EINVAL;
@@ -400,7 +407,8 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
if (err)
return err;
- if (flags & SVE_SIG_FLAG_SM) {
+ sm = flags & SVE_SIG_FLAG_SM;
+ if (sm) {
if (!system_supports_sme())
return -EINVAL;
@@ -420,28 +428,23 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
if (user_vl != vl)
return -EINVAL;
- if (user->sve_size == sizeof(*user->sve)) {
- clear_thread_flag(TIF_SVE);
- current->thread.svcr &= ~SVCR_SM_MASK;
- current->thread.fp_type = FP_STATE_FPSIMD;
- goto fpsimd_only;
- }
+ /*
+ * Non-streaming SVE state may be preserved without an SVE payload, in
+ * which case the SVE context only has a header with VL==0, and all
+ * state can be restored from the FPSIMD context.
+ *
+ * Streaming SVE state is always preserved with an SVE payload. For
+ * consistency and robustness, reject restoring streaming SVE state
+ * without an SVE payload.
+ */
+ if (!sm && user->sve_size == sizeof(*user->sve))
+ return restore_fpsimd_context(user);
vq = sve_vq_from_vl(vl);
if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq))
return -EINVAL;
- /*
- * Careful: we are about __copy_from_user() directly into
- * thread.sve_state with preemption enabled, so protection is
- * needed to prevent a racing context switch from writing stale
- * registers back over the new data.
- */
-
- fpsimd_flush_task_state(current);
- /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
-
sve_alloc(current, true);
if (!current->thread.sve_state) {
clear_thread_flag(TIF_SVE);
@@ -461,19 +464,14 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
set_thread_flag(TIF_SVE);
current->thread.fp_type = FP_STATE_SVE;
-fpsimd_only:
- /* copy the FP and status/control registers */
- /* restore_sigframe() already checked that user->fpsimd != NULL. */
- err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
- sizeof(fpsimd.vregs));
- __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
- __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
+ err = read_fpsimd_context(&fpsimd, user);
+ if (err)
+ return err;
- /* load the hardware registers from the fpsimd_state structure */
- if (!err)
- fpsimd_update_current_state(&fpsimd);
+ /* Merge the FPSIMD registers into the SVE state */
+ fpsimd_update_current_state(&fpsimd);
- return err ? -EFAULT : 0;
+ return 0;
}
#else /* ! CONFIG_ARM64_SVE */
@@ -493,13 +491,12 @@ extern int preserve_sve_context(void __user *ctx);
static int preserve_tpidr2_context(struct tpidr2_context __user *ctx)
{
+ u64 tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
int err = 0;
- current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
-
__put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err);
__put_user_error(sizeof(*ctx), &ctx->head.size, err);
- __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err);
+ __put_user_error(tpidr2_el0, &ctx->tpidr2, err);
return err;
}
@@ -541,11 +538,6 @@ static int preserve_za_context(struct za_context __user *ctx)
err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
if (vq) {
- /*
- * This assumes that the ZA state has already been saved to
- * the task struct by calling the function
- * fpsimd_signal_preserve_current_state().
- */
err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
current->thread.sme_state,
ZA_SIG_REGS_SIZE(vq));
@@ -580,16 +572,6 @@ static int restore_za_context(struct user_ctxs *user)
if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq))
return -EINVAL;
- /*
- * Careful: we are about __copy_from_user() directly into
- * thread.sme_state with preemption enabled, so protection is
- * needed to prevent a racing context switch from writing stale
- * registers back over the new data.
- */
-
- fpsimd_flush_task_state(current);
- /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
-
sme_alloc(current, true);
if (!current->thread.sme_state) {
current->thread.svcr &= ~SVCR_ZA_MASK;
@@ -627,11 +609,6 @@ static int preserve_zt_context(struct zt_context __user *ctx)
BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
- /*
- * This assumes that the ZT state has already been saved to
- * the task struct by calling the function
- * fpsimd_signal_preserve_current_state().
- */
err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET,
thread_zt_state(&current->thread),
ZT_SIG_REGS_SIZE(1));
@@ -657,16 +634,6 @@ static int restore_zt_context(struct user_ctxs *user)
if (nregs != 1)
return -EINVAL;
- /*
- * Careful: we are about __copy_from_user() directly into
- * thread.zt_state with preemption enabled, so protection is
- * needed to prevent a racing context switch from writing stale
- * registers back over the new data.
- */
-
- fpsimd_flush_task_state(current);
- /* From now, fpsimd_thread_switch() won't touch ZT in thread state */
-
err = __copy_from_user(thread_zt_state(&current->thread),
(char __user const *)user->zt +
ZT_SIG_REGS_OFFSET,
@@ -1017,6 +984,8 @@ static int restore_sigframe(struct pt_regs *regs,
*/
forget_syscall(regs);
+ fpsimd_save_and_flush_current_state();
+
err |= !valid_user_regs(&regs->user_regs, current);
if (err == 0)
err = parse_user_sigframe(&user, sf);
@@ -1507,21 +1476,9 @@ static int setup_return(struct pt_regs *regs, struct ksignal *ksig,
/* Signal handlers are invoked with ZA and streaming mode disabled */
if (system_supports_sme()) {
- /*
- * If we were in streaming mode the saved register
- * state was SVE but we will exit SM and use the
- * FPSIMD register state - flush the saved FPSIMD
- * register state in case it gets loaded.
- */
- if (current->thread.svcr & SVCR_SM_MASK) {
- memset(&current->thread.uw.fpsimd_state, 0,
- sizeof(current->thread.uw.fpsimd_state));
- current->thread.fp_type = FP_STATE_FPSIMD;
- }
-
- current->thread.svcr &= ~(SVCR_ZA_MASK |
- SVCR_SM_MASK);
- sme_smstop();
+ task_smstop_sm(current);
+ current->thread.svcr &= ~SVCR_ZA_MASK;
+ write_sysreg_s(0, SYS_TPIDR2_EL0);
}
return 0;
@@ -1535,7 +1492,7 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct user_access_state ua_state;
int err = 0;
- fpsimd_signal_preserve_current_state();
+ fpsimd_save_and_flush_current_state();
if (get_sigframe(&user, ksig, regs))
return 1;
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 81e798b6dada..bb3b526ff43f 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -103,7 +103,7 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
* Note that this also saves V16-31, which aren't visible
* in AArch32.
*/
- fpsimd_signal_preserve_current_state();
+ fpsimd_save_and_flush_current_state();
/* Place structure header on the stack */
__put_user_error(magic, &frame->magic, err);
@@ -169,14 +169,17 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
fpsimd.fpsr = fpscr & VFP_FPSCR_STAT_MASK;
fpsimd.fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
+ if (err)
+ return -EFAULT;
+
/*
* We don't need to touch the exception register, so
* reload the hardware state.
*/
- if (!err)
- fpsimd_update_current_state(&fpsimd);
+ fpsimd_save_and_flush_current_state();
+ current->thread.uw.fpsimd_state = fpsimd;
- return err ? -EFAULT : 0;
+ return 0;
}
static int compat_restore_sigframe(struct pt_regs *regs,
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 529cff825531..9bfa5c944379 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -1118,7 +1118,7 @@ static struct break_hook kasan_break_hook = {
#ifdef CONFIG_UBSAN_TRAP
static int ubsan_handler(struct pt_regs *regs, unsigned long esr)
{
- die(report_ubsan_failure(regs, esr & UBSAN_BRK_MASK), regs, esr);
+ die(report_ubsan_failure(esr & UBSAN_BRK_MASK), regs, esr);
return DBG_HOOK_HANDLED;
}
@@ -1145,7 +1145,7 @@ int __init early_brk64(unsigned long addr, unsigned long esr,
return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
#endif
#ifdef CONFIG_UBSAN_TRAP
- if ((esr_brk_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM)
+ if (esr_is_ubsan_brk(esr))
return ubsan_handler(regs, esr) != DBG_HOOK_HANDLED;
#endif
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index e73326bd3ff7..ad6133b89e7a 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -13,7 +13,7 @@
*(__kvm_ex_table) \
__stop___kvm_ex_table = .;
-#define HYPERVISOR_DATA_SECTIONS \
+#define HYPERVISOR_RODATA_SECTIONS \
HYP_SECTION_NAME(.rodata) : { \
. = ALIGN(PAGE_SIZE); \
__hyp_rodata_start = .; \
@@ -23,6 +23,15 @@
__hyp_rodata_end = .; \
}
+#define HYPERVISOR_DATA_SECTION \
+ HYP_SECTION_NAME(.data) : { \
+ . = ALIGN(PAGE_SIZE); \
+ __hyp_data_start = .; \
+ *(HYP_SECTION_NAME(.data)) \
+ . = ALIGN(PAGE_SIZE); \
+ __hyp_data_end = .; \
+ }
+
#define HYPERVISOR_PERCPU_SECTION \
. = ALIGN(PAGE_SIZE); \
HYP_SECTION_NAME(.data..percpu) : { \
@@ -51,7 +60,8 @@
#define SBSS_ALIGN PAGE_SIZE
#else /* CONFIG_KVM */
#define HYPERVISOR_EXTABLE
-#define HYPERVISOR_DATA_SECTIONS
+#define HYPERVISOR_RODATA_SECTIONS
+#define HYPERVISOR_DATA_SECTION
#define HYPERVISOR_PERCPU_SECTION
#define HYPERVISOR_RELOC_SECTION
#define SBSS_ALIGN 0
@@ -190,7 +200,7 @@ SECTIONS
/* everything from this point to __init_begin will be marked RO NX */
RO_DATA(PAGE_SIZE)
- HYPERVISOR_DATA_SECTIONS
+ HYPERVISOR_RODATA_SECTIONS
.got : { *(.got) }
/*
@@ -249,9 +259,9 @@ SECTIONS
__inittext_end = .;
__initdata_begin = .;
- init_idmap_pg_dir = .;
+ __pi_init_idmap_pg_dir = .;
. += INIT_IDMAP_DIR_SIZE;
- init_idmap_pg_end = .;
+ __pi_init_idmap_pg_end = .;
.init.data : {
INIT_DATA
@@ -295,6 +305,8 @@ SECTIONS
_sdata = .;
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
+ HYPERVISOR_DATA_SECTION
+
/*
* Data written with the MMU off but read with the MMU on requires
* cache lines to be invalidated, discarding up to a Cache Writeback
@@ -319,11 +331,12 @@ SECTIONS
/* start of zero-init region */
BSS_SECTION(SBSS_ALIGN, 0, 0)
+ __pi___bss_start = __bss_start;
. = ALIGN(PAGE_SIZE);
- init_pg_dir = .;
+ __pi_init_pg_dir = .;
. += INIT_DIR_SIZE;
- init_pg_end = .;
+ __pi_init_pg_end = .;
/* end of zero-init region */
. += SZ_4K; /* stack for the early C runtime */
@@ -332,6 +345,7 @@ SECTIONS
. = ALIGN(SEGMENT_ALIGN);
__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
_end = .;
+ __pi__end = .;
STABS_DEBUG
DWARF_DEBUG
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 096e45acadb2..713248f240e0 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -19,7 +19,6 @@ if VIRTUALIZATION
menuconfig KVM
bool "Kernel-based Virtual Machine (KVM) support"
- depends on AS_HAS_ARMV8_4
select KVM_COMMON
select KVM_GENERIC_HARDWARE_ENABLING
select KVM_GENERIC_MMU_NOTIFIER
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 209bc76263f1..7c329e01c557 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -14,7 +14,7 @@ CFLAGS_sys_regs.o += -Wno-override-init
CFLAGS_handle_exit.o += -Wno-override-init
kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
- inject_fault.o va_layout.o handle_exit.o \
+ inject_fault.o va_layout.o handle_exit.o config.o \
guest.o debug.o reset.o sys_regs.o stacktrace.o \
vgic-sys-reg-v3.o fpsimd.o pkvm.o \
arch_timer.o trng.o vmid.o emulate-nested.o nested.o at.o \
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 5133dcbfe9f7..fdbc8beec930 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -1766,7 +1766,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
mutex_lock(&kvm->lock);
- if (lock_all_vcpus(kvm)) {
+ if (!kvm_trylock_all_vcpus(kvm)) {
set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags);
/*
@@ -1778,7 +1778,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
kvm->arch.timer_data.voffset = offset->counter_offset;
kvm->arch.timer_data.poffset = offset->counter_offset;
- unlock_all_vcpus(kvm);
+ kvm_unlock_all_vcpus(kvm);
} else {
ret = -EBUSY;
}
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 68fec8c95fee..de2b4e9c9f9f 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -368,6 +368,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_EL1_32BIT:
r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1);
break;
+ case KVM_CAP_ARM_EL2:
+ r = cpus_have_final_cap(ARM64_HAS_NESTED_VIRT);
+ break;
+ case KVM_CAP_ARM_EL2_E2H0:
+ r = cpus_have_final_cap(ARM64_HAS_HCR_NV1);
+ break;
case KVM_CAP_GUEST_DEBUG_HW_BPS:
r = get_num_brps();
break;
@@ -843,6 +849,10 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
return ret;
if (vcpu_has_nv(vcpu)) {
+ ret = kvm_vcpu_allocate_vncr_tlb(vcpu);
+ if (ret)
+ return ret;
+
ret = kvm_vgic_vcpu_nv_init(vcpu);
if (ret)
return ret;
@@ -1914,49 +1924,6 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
}
}
-/* unlocks vcpus from @vcpu_lock_idx and smaller */
-static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
-{
- struct kvm_vcpu *tmp_vcpu;
-
- for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
- tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
- mutex_unlock(&tmp_vcpu->mutex);
- }
-}
-
-void unlock_all_vcpus(struct kvm *kvm)
-{
- lockdep_assert_held(&kvm->lock);
-
- unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
-}
-
-/* Returns true if all vcpus were locked, false otherwise */
-bool lock_all_vcpus(struct kvm *kvm)
-{
- struct kvm_vcpu *tmp_vcpu;
- unsigned long c;
-
- lockdep_assert_held(&kvm->lock);
-
- /*
- * Any time a vcpu is in an ioctl (including running), the
- * core KVM code tries to grab the vcpu->mutex.
- *
- * By grabbing the vcpu->mutex of all VCPUs we ensure that no
- * other VCPUs can fiddle with the state while we access it.
- */
- kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
- if (!mutex_trylock(&tmp_vcpu->mutex)) {
- unlock_vcpus(kvm, c - 1);
- return false;
- }
- }
-
- return true;
-}
-
static unsigned long nvhe_percpu_size(void)
{
return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
@@ -2450,6 +2417,19 @@ static void kvm_hyp_init_symbols(void)
kvm_nvhe_sym(__icache_flags) = __icache_flags;
kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
+ /* Propagate the FGT state to the the nVHE side */
+ kvm_nvhe_sym(hfgrtr_masks) = hfgrtr_masks;
+ kvm_nvhe_sym(hfgwtr_masks) = hfgwtr_masks;
+ kvm_nvhe_sym(hfgitr_masks) = hfgitr_masks;
+ kvm_nvhe_sym(hdfgrtr_masks) = hdfgrtr_masks;
+ kvm_nvhe_sym(hdfgwtr_masks) = hdfgwtr_masks;
+ kvm_nvhe_sym(hafgrtr_masks) = hafgrtr_masks;
+ kvm_nvhe_sym(hfgrtr2_masks) = hfgrtr2_masks;
+ kvm_nvhe_sym(hfgwtr2_masks) = hfgwtr2_masks;
+ kvm_nvhe_sym(hfgitr2_masks) = hfgitr2_masks;
+ kvm_nvhe_sym(hdfgrtr2_masks)= hdfgrtr2_masks;
+ kvm_nvhe_sym(hdfgwtr2_masks)= hdfgwtr2_masks;
+
/*
* Flush entire BSS since part of its data containing init symbols is read
* while the MMU is off.
@@ -2604,6 +2584,13 @@ static int __init init_hyp_mode(void)
goto out_err;
}
+ err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_start),
+ kvm_ksym_ref(__hyp_data_end), PAGE_HYP);
+ if (err) {
+ kvm_err("Cannot map .hyp.data section\n");
+ goto out_err;
+ }
+
err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
if (err) {
@@ -2743,11 +2730,6 @@ bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
return irqchip_in_kernel(kvm);
}
-bool kvm_arch_has_irq_bypass(void)
-{
- return true;
-}
-
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
struct irq_bypass_producer *prod)
{
@@ -2765,6 +2747,7 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
&irqfd->irq_entry);
}
+
void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
struct irq_bypass_producer *prod)
{
@@ -2775,8 +2758,29 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
if (irq_entry->type != KVM_IRQ_ROUTING_MSI)
return;
- kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
- &irqfd->irq_entry);
+ kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq);
+}
+
+bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
+ struct kvm_kernel_irq_routing_entry *new)
+{
+ if (new->type != KVM_IRQ_ROUTING_MSI)
+ return true;
+
+ return memcmp(&old->msi, &new->msi, sizeof(new->msi));
+}
+
+int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set)
+{
+ /*
+ * Remapping the vLPI requires taking the its_lock mutex to resolve
+ * the new translation. We're in spinlock land at this point, so no
+ * chance of resolving the translation.
+ *
+ * Unmap the vLPI and fall back to software LPI injection.
+ */
+ return kvm_vgic_v4_unset_forwarding(kvm, host_irq);
}
void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index f74a66ce3064..a25be111cd8f 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -10,61 +10,11 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
-enum trans_regime {
- TR_EL10,
- TR_EL20,
- TR_EL2,
-};
-
-struct s1_walk_info {
- u64 baddr;
- enum trans_regime regime;
- unsigned int max_oa_bits;
- unsigned int pgshift;
- unsigned int txsz;
- int sl;
- bool hpd;
- bool e0poe;
- bool poe;
- bool pan;
- bool be;
- bool s2;
-};
-
-struct s1_walk_result {
- union {
- struct {
- u64 desc;
- u64 pa;
- s8 level;
- u8 APTable;
- bool UXNTable;
- bool PXNTable;
- bool uwxn;
- bool uov;
- bool ur;
- bool uw;
- bool ux;
- bool pwxn;
- bool pov;
- bool pr;
- bool pw;
- bool px;
- };
- struct {
- u8 fst;
- bool ptw;
- bool s2;
- };
- };
- bool failed;
-};
-
-static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool ptw, bool s2)
+static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw)
{
wr->fst = fst;
- wr->ptw = ptw;
- wr->s2 = s2;
+ wr->ptw = s1ptw;
+ wr->s2 = s1ptw;
wr->failed = true;
}
@@ -145,20 +95,15 @@ static void compute_s1poe(struct kvm_vcpu *vcpu, struct s1_walk_info *wi)
}
}
-static int setup_s1_walk(struct kvm_vcpu *vcpu, u32 op, struct s1_walk_info *wi,
+static int setup_s1_walk(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
struct s1_walk_result *wr, u64 va)
{
u64 hcr, sctlr, tcr, tg, ps, ia_bits, ttbr;
unsigned int stride, x;
- bool va55, tbi, lva, as_el0;
+ bool va55, tbi, lva;
hcr = __vcpu_sys_reg(vcpu, HCR_EL2);
- wi->regime = compute_translation_regime(vcpu, op);
- as_el0 = (op == OP_AT_S1E0R || op == OP_AT_S1E0W);
- wi->pan = (op == OP_AT_S1E1RP || op == OP_AT_S1E1WP) &&
- (*vcpu_cpsr(vcpu) & PSR_PAN_BIT);
-
va55 = va & BIT(55);
if (wi->regime == TR_EL2 && va55)
@@ -319,7 +264,7 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, u32 op, struct s1_walk_info *wi,
/* R_BNDVG and following statements */
if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR2_EL1, E0PD, IMP) &&
- as_el0 && (tcr & (va55 ? TCR_E0PD1 : TCR_E0PD0)))
+ wi->as_el0 && (tcr & (va55 ? TCR_E0PD1 : TCR_E0PD0)))
goto transfault_l0;
/* AArch64.S1StartLevel() */
@@ -345,11 +290,11 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, u32 op, struct s1_walk_info *wi,
return 0;
addrsz: /* Address Size Fault level 0 */
- fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(0), false, false);
+ fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(0), false);
return -EFAULT;
transfault_l0: /* Translation Fault level 0 */
- fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(0), false, false);
+ fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(0), false);
return -EFAULT;
}
@@ -380,13 +325,13 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
if (ret) {
fail_s1_walk(wr,
(s2_trans.esr & ~ESR_ELx_FSC_LEVEL) | level,
- true, true);
+ true);
return ret;
}
if (!kvm_s2_trans_readable(&s2_trans)) {
fail_s1_walk(wr, ESR_ELx_FSC_PERM_L(level),
- true, true);
+ true);
return -EPERM;
}
@@ -396,8 +341,7 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
ret = kvm_read_guest(vcpu->kvm, ipa, &desc, sizeof(desc));
if (ret) {
- fail_s1_walk(wr, ESR_ELx_FSC_SEA_TTW(level),
- true, false);
+ fail_s1_walk(wr, ESR_ELx_FSC_SEA_TTW(level), false);
return ret;
}
@@ -457,6 +401,11 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
if (check_output_size(desc & GENMASK(47, va_bottom), wi))
goto addrsz;
+ if (!(desc & PTE_AF)) {
+ fail_s1_walk(wr, ESR_ELx_FSC_ACCESS_L(level), false);
+ return -EACCES;
+ }
+
va_bottom += contiguous_bit_shift(desc, wi, level);
wr->failed = false;
@@ -465,13 +414,40 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
wr->pa = desc & GENMASK(47, va_bottom);
wr->pa |= va & GENMASK_ULL(va_bottom - 1, 0);
+ wr->nG = (wi->regime != TR_EL2) && (desc & PTE_NG);
+ if (wr->nG) {
+ u64 asid_ttbr, tcr;
+
+ switch (wi->regime) {
+ case TR_EL10:
+ tcr = vcpu_read_sys_reg(vcpu, TCR_EL1);
+ asid_ttbr = ((tcr & TCR_A1) ?
+ vcpu_read_sys_reg(vcpu, TTBR1_EL1) :
+ vcpu_read_sys_reg(vcpu, TTBR0_EL1));
+ break;
+ case TR_EL20:
+ tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
+ asid_ttbr = ((tcr & TCR_A1) ?
+ vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
+ vcpu_read_sys_reg(vcpu, TTBR0_EL2));
+ break;
+ default:
+ BUG();
+ }
+
+ wr->asid = FIELD_GET(TTBR_ASID_MASK, asid_ttbr);
+ if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
+ !(tcr & TCR_ASID16))
+ wr->asid &= GENMASK(7, 0);
+ }
+
return 0;
addrsz:
- fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(level), true, false);
+ fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(level), false);
return -EINVAL;
transfault:
- fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(level), true, false);
+ fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(level), false);
return -ENOENT;
}
@@ -488,7 +464,6 @@ struct mmu_config {
u64 sctlr;
u64 vttbr;
u64 vtcr;
- u64 hcr;
};
static void __mmu_config_save(struct mmu_config *config)
@@ -511,13 +486,10 @@ static void __mmu_config_save(struct mmu_config *config)
config->sctlr = read_sysreg_el1(SYS_SCTLR);
config->vttbr = read_sysreg(vttbr_el2);
config->vtcr = read_sysreg(vtcr_el2);
- config->hcr = read_sysreg(hcr_el2);
}
static void __mmu_config_restore(struct mmu_config *config)
{
- write_sysreg(config->hcr, hcr_el2);
-
/*
* ARM errata 1165522 and 1530923 require TGE to be 1 before
* we update the guest state.
@@ -1155,7 +1127,12 @@ static u64 handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
bool perm_fail = false;
int ret, idx;
- ret = setup_s1_walk(vcpu, op, &wi, &wr, vaddr);
+ wi.regime = compute_translation_regime(vcpu, op);
+ wi.as_el0 = (op == OP_AT_S1E0R || op == OP_AT_S1E0W);
+ wi.pan = (op == OP_AT_S1E1RP || op == OP_AT_S1E1WP) &&
+ (*vcpu_cpsr(vcpu) & PSR_PAN_BIT);
+
+ ret = setup_s1_walk(vcpu, &wi, &wr, vaddr);
if (ret)
goto compute_par;
@@ -1198,7 +1175,7 @@ static u64 handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
}
if (perm_fail)
- fail_s1_walk(&wr, ESR_ELx_FSC_PERM_L(wr.level), false, false);
+ fail_s1_walk(&wr, ESR_ELx_FSC_PERM_L(wr.level), false);
compute_par:
return compute_par_s1(vcpu, &wr, wi.regime);
@@ -1210,7 +1187,8 @@ compute_par:
* If the translation is unsuccessful, the value may only contain
* PAR_EL1.F, and cannot be taken at face value. It isn't an
* indication of the translation having failed, only that the fast
- * path did not succeed, *unless* it indicates a S1 permission fault.
+ * path did not succeed, *unless* it indicates a S1 permission or
+ * access fault.
*/
static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
{
@@ -1266,8 +1244,8 @@ static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
__load_stage2(mmu, mmu->arch);
skip_mmu_switch:
- /* Clear TGE, enable S2 translation, we're rolling */
- write_sysreg((config.hcr & ~HCR_TGE) | HCR_VM, hcr_el2);
+ /* Temporarily switch back to guest context */
+ write_sysreg_hcr(vcpu->arch.hcr_el2);
isb();
switch (op) {
@@ -1299,6 +1277,8 @@ skip_mmu_switch:
if (!fail)
par = read_sysreg_par();
+ write_sysreg_hcr(HCR_HOST_VHE_FLAGS);
+
if (!(vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)))
__mmu_config_restore(&config);
@@ -1313,19 +1293,29 @@ static bool par_check_s1_perm_fault(u64 par)
!(par & SYS_PAR_EL1_S));
}
+static bool par_check_s1_access_fault(u64 par)
+{
+ u8 fst = FIELD_GET(SYS_PAR_EL1_FST, par);
+
+ return ((fst & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS &&
+ !(par & SYS_PAR_EL1_S));
+}
+
void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
{
u64 par = __kvm_at_s1e01_fast(vcpu, op, vaddr);
/*
- * If PAR_EL1 reports that AT failed on a S1 permission fault, we
- * know for sure that the PTW was able to walk the S1 tables and
- * there's nothing else to do.
+ * If PAR_EL1 reports that AT failed on a S1 permission or access
+ * fault, we know for sure that the PTW was able to walk the S1
+ * tables and there's nothing else to do.
*
* If AT failed for any other reason, then we must walk the guest S1
* to emulate the instruction.
*/
- if ((par & SYS_PAR_EL1_F) && !par_check_s1_perm_fault(par))
+ if ((par & SYS_PAR_EL1_F) &&
+ !par_check_s1_perm_fault(par) &&
+ !par_check_s1_access_fault(par))
par = handle_at_slow(vcpu, op, vaddr);
vcpu_write_sys_reg(vcpu, par, PAR_EL1);
@@ -1350,7 +1340,7 @@ void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
if (!vcpu_el2_e2h_is_set(vcpu))
val |= HCR_NV | HCR_NV1;
- write_sysreg(val, hcr_el2);
+ write_sysreg_hcr(val);
isb();
par = SYS_PAR_EL1_F;
@@ -1375,7 +1365,7 @@ void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
if (!fail)
par = read_sysreg_par();
- write_sysreg(hcr, hcr_el2);
+ write_sysreg_hcr(hcr);
isb();
}
@@ -1444,3 +1434,31 @@ void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
par = compute_par_s12(vcpu, par, &out);
vcpu_write_sys_reg(vcpu, par, PAR_EL1);
}
+
+/*
+ * Translate a VA for a given EL in a given translation regime, with
+ * or without PAN. This requires wi->{regime, as_el0, pan} to be
+ * set. The rest of the wi and wr should be 0-initialised.
+ */
+int __kvm_translate_va(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
+ struct s1_walk_result *wr, u64 va)
+{
+ int ret;
+
+ ret = setup_s1_walk(vcpu, wi, wr, va);
+ if (ret)
+ return ret;
+
+ if (wr->level == S1_MMU_DISABLED) {
+ wr->ur = wr->uw = wr->ux = true;
+ wr->pr = wr->pw = wr->px = true;
+ } else {
+ ret = walk_s1(vcpu, wi, wr, va);
+ if (ret)
+ return ret;
+
+ compute_s1_permissions(vcpu, wi, wr);
+ }
+
+ return 0;
+}
diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c
new file mode 100644
index 000000000000..54911a93b001
--- /dev/null
+++ b/arch/arm64/kvm/config.c
@@ -0,0 +1,1085 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Google LLC
+ * Author: Marc Zyngier <maz@kernel.org>
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/sysreg.h>
+
+struct reg_bits_to_feat_map {
+ u64 bits;
+
+#define NEVER_FGU BIT(0) /* Can trap, but never UNDEF */
+#define CALL_FUNC BIT(1) /* Needs to evaluate tons of crap */
+#define FIXED_VALUE BIT(2) /* RAZ/WI or RAO/WI in KVM */
+ unsigned long flags;
+
+ union {
+ struct {
+ u8 regidx;
+ u8 shift;
+ u8 width;
+ bool sign;
+ s8 lo_lim;
+ };
+ bool (*match)(struct kvm *);
+ bool (*fval)(struct kvm *, u64 *);
+ };
+};
+
+#define __NEEDS_FEAT_3(m, f, id, fld, lim) \
+ { \
+ .bits = (m), \
+ .flags = (f), \
+ .regidx = IDREG_IDX(SYS_ ## id), \
+ .shift = id ##_## fld ## _SHIFT, \
+ .width = id ##_## fld ## _WIDTH, \
+ .sign = id ##_## fld ## _SIGNED, \
+ .lo_lim = id ##_## fld ##_## lim \
+ }
+
+#define __NEEDS_FEAT_2(m, f, fun, dummy) \
+ { \
+ .bits = (m), \
+ .flags = (f) | CALL_FUNC, \
+ .fval = (fun), \
+ }
+
+#define __NEEDS_FEAT_1(m, f, fun) \
+ { \
+ .bits = (m), \
+ .flags = (f) | CALL_FUNC, \
+ .match = (fun), \
+ }
+
+#define NEEDS_FEAT_FLAG(m, f, ...) \
+ CONCATENATE(__NEEDS_FEAT_, COUNT_ARGS(__VA_ARGS__))(m, f, __VA_ARGS__)
+
+#define NEEDS_FEAT_FIXED(m, ...) \
+ NEEDS_FEAT_FLAG(m, FIXED_VALUE, __VA_ARGS__, 0)
+
+#define NEEDS_FEAT(m, ...) NEEDS_FEAT_FLAG(m, 0, __VA_ARGS__)
+
+#define FEAT_SPE ID_AA64DFR0_EL1, PMSVer, IMP
+#define FEAT_SPE_FnE ID_AA64DFR0_EL1, PMSVer, V1P2
+#define FEAT_BRBE ID_AA64DFR0_EL1, BRBE, IMP
+#define FEAT_TRC_SR ID_AA64DFR0_EL1, TraceVer, IMP
+#define FEAT_PMUv3 ID_AA64DFR0_EL1, PMUVer, IMP
+#define FEAT_PMUv3p9 ID_AA64DFR0_EL1, PMUVer, V3P9
+#define FEAT_TRBE ID_AA64DFR0_EL1, TraceBuffer, IMP
+#define FEAT_TRBEv1p1 ID_AA64DFR0_EL1, TraceBuffer, TRBE_V1P1
+#define FEAT_DoubleLock ID_AA64DFR0_EL1, DoubleLock, IMP
+#define FEAT_TRF ID_AA64DFR0_EL1, TraceFilt, IMP
+#define FEAT_AA32EL0 ID_AA64PFR0_EL1, EL0, AARCH32
+#define FEAT_AA32EL1 ID_AA64PFR0_EL1, EL1, AARCH32
+#define FEAT_AA64EL1 ID_AA64PFR0_EL1, EL1, IMP
+#define FEAT_AA64EL3 ID_AA64PFR0_EL1, EL3, IMP
+#define FEAT_AIE ID_AA64MMFR3_EL1, AIE, IMP
+#define FEAT_S2POE ID_AA64MMFR3_EL1, S2POE, IMP
+#define FEAT_S1POE ID_AA64MMFR3_EL1, S1POE, IMP
+#define FEAT_S1PIE ID_AA64MMFR3_EL1, S1PIE, IMP
+#define FEAT_THE ID_AA64PFR1_EL1, THE, IMP
+#define FEAT_SME ID_AA64PFR1_EL1, SME, IMP
+#define FEAT_GCS ID_AA64PFR1_EL1, GCS, IMP
+#define FEAT_LS64 ID_AA64ISAR1_EL1, LS64, LS64
+#define FEAT_LS64_V ID_AA64ISAR1_EL1, LS64, LS64_V
+#define FEAT_LS64_ACCDATA ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA
+#define FEAT_RAS ID_AA64PFR0_EL1, RAS, IMP
+#define FEAT_RASv2 ID_AA64PFR0_EL1, RAS, V2
+#define FEAT_GICv3 ID_AA64PFR0_EL1, GIC, IMP
+#define FEAT_LOR ID_AA64MMFR1_EL1, LO, IMP
+#define FEAT_SPEv1p4 ID_AA64DFR0_EL1, PMSVer, V1P4
+#define FEAT_SPEv1p5 ID_AA64DFR0_EL1, PMSVer, V1P5
+#define FEAT_ATS1A ID_AA64ISAR2_EL1, ATS1A, IMP
+#define FEAT_SPECRES2 ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX
+#define FEAT_SPECRES ID_AA64ISAR1_EL1, SPECRES, IMP
+#define FEAT_TLBIRANGE ID_AA64ISAR0_EL1, TLB, RANGE
+#define FEAT_TLBIOS ID_AA64ISAR0_EL1, TLB, OS
+#define FEAT_PAN2 ID_AA64MMFR1_EL1, PAN, PAN2
+#define FEAT_DPB2 ID_AA64ISAR1_EL1, DPB, DPB2
+#define FEAT_AMUv1 ID_AA64PFR0_EL1, AMU, IMP
+#define FEAT_AMUv1p1 ID_AA64PFR0_EL1, AMU, V1P1
+#define FEAT_CMOW ID_AA64MMFR1_EL1, CMOW, IMP
+#define FEAT_D128 ID_AA64MMFR3_EL1, D128, IMP
+#define FEAT_DoubleFault2 ID_AA64PFR1_EL1, DF2, IMP
+#define FEAT_FPMR ID_AA64PFR2_EL1, FPMR, IMP
+#define FEAT_MOPS ID_AA64ISAR2_EL1, MOPS, IMP
+#define FEAT_NMI ID_AA64PFR1_EL1, NMI, IMP
+#define FEAT_SCTLR2 ID_AA64MMFR3_EL1, SCTLRX, IMP
+#define FEAT_SYSREG128 ID_AA64ISAR2_EL1, SYSREG_128, IMP
+#define FEAT_TCR2 ID_AA64MMFR3_EL1, TCRX, IMP
+#define FEAT_XS ID_AA64ISAR1_EL1, XS, IMP
+#define FEAT_EVT ID_AA64MMFR2_EL1, EVT, IMP
+#define FEAT_EVT_TTLBxS ID_AA64MMFR2_EL1, EVT, TTLBxS
+#define FEAT_MTE2 ID_AA64PFR1_EL1, MTE, MTE2
+#define FEAT_RME ID_AA64PFR0_EL1, RME, IMP
+#define FEAT_MPAM ID_AA64PFR0_EL1, MPAM, 1
+#define FEAT_S2FWB ID_AA64MMFR2_EL1, FWB, IMP
+#define FEAT_TME ID_AA64ISAR0_EL1, TME, IMP
+#define FEAT_TWED ID_AA64MMFR1_EL1, TWED, IMP
+#define FEAT_E2H0 ID_AA64MMFR4_EL1, E2H0, IMP
+#define FEAT_SRMASK ID_AA64MMFR4_EL1, SRMASK, IMP
+#define FEAT_PoPS ID_AA64MMFR4_EL1, PoPS, IMP
+#define FEAT_PFAR ID_AA64PFR1_EL1, PFAR, IMP
+#define FEAT_Debugv8p9 ID_AA64DFR0_EL1, PMUVer, V3P9
+#define FEAT_PMUv3_SS ID_AA64DFR0_EL1, PMSS, IMP
+#define FEAT_SEBEP ID_AA64DFR0_EL1, SEBEP, IMP
+#define FEAT_EBEP ID_AA64DFR1_EL1, EBEP, IMP
+#define FEAT_ITE ID_AA64DFR1_EL1, ITE, IMP
+#define FEAT_PMUv3_ICNTR ID_AA64DFR1_EL1, PMICNTR, IMP
+#define FEAT_SPMU ID_AA64DFR1_EL1, SPMU, IMP
+#define FEAT_SPE_nVM ID_AA64DFR2_EL1, SPE_nVM, IMP
+#define FEAT_STEP2 ID_AA64DFR2_EL1, STEP, IMP
+
+static bool not_feat_aa64el3(struct kvm *kvm)
+{
+ return !kvm_has_feat(kvm, FEAT_AA64EL3);
+}
+
+static bool feat_nv2(struct kvm *kvm)
+{
+ return ((kvm_has_feat(kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY) &&
+ kvm_has_feat_enum(kvm, ID_AA64MMFR2_EL1, NV, NI)) ||
+ kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2));
+}
+
+static bool feat_nv2_e2h0_ni(struct kvm *kvm)
+{
+ return feat_nv2(kvm) && !kvm_has_feat(kvm, FEAT_E2H0);
+}
+
+static bool feat_rasv1p1(struct kvm *kvm)
+{
+ return (kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) ||
+ (kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, RAS, IMP) &&
+ kvm_has_feat(kvm, ID_AA64PFR1_EL1, RAS_frac, RASv1p1)));
+}
+
+static bool feat_csv2_2_csv2_1p2(struct kvm *kvm)
+{
+ return (kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) ||
+ (kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2) &&
+ kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, CSV2, IMP)));
+}
+
+static bool feat_pauth(struct kvm *kvm)
+{
+ return kvm_has_pauth(kvm, PAuth);
+}
+
+static bool feat_pauth_lr(struct kvm *kvm)
+{
+ return kvm_has_pauth(kvm, PAuth_LR);
+}
+
+static bool feat_aderr(struct kvm *kvm)
+{
+ return (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, FEAT_ADERR) &&
+ kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SDERR, FEAT_ADERR));
+}
+
+static bool feat_anerr(struct kvm *kvm)
+{
+ return (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ANERR, FEAT_ANERR) &&
+ kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SNERR, FEAT_ANERR));
+}
+
+static bool feat_sme_smps(struct kvm *kvm)
+{
+ /*
+ * Revists this if KVM ever supports SME -- this really should
+ * look at the guest's view of SMIDR_EL1. Funnily enough, this
+ * is not captured in the JSON file, but only as a note in the
+ * ARM ARM.
+ */
+ return (kvm_has_feat(kvm, FEAT_SME) &&
+ (read_sysreg_s(SYS_SMIDR_EL1) & SMIDR_EL1_SMPS));
+}
+
+static bool feat_spe_fds(struct kvm *kvm)
+{
+ /*
+ * Revists this if KVM ever supports SPE -- this really should
+ * look at the guest's view of PMSIDR_EL1.
+ */
+ return (kvm_has_feat(kvm, FEAT_SPEv1p4) &&
+ (read_sysreg_s(SYS_PMSIDR_EL1) & PMSIDR_EL1_FDS));
+}
+
+static bool feat_trbe_mpam(struct kvm *kvm)
+{
+ /*
+ * Revists this if KVM ever supports both MPAM and TRBE --
+ * this really should look at the guest's view of TRBIDR_EL1.
+ */
+ return (kvm_has_feat(kvm, FEAT_TRBE) &&
+ kvm_has_feat(kvm, FEAT_MPAM) &&
+ (read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_MPAM));
+}
+
+static bool feat_ebep_pmuv3_ss(struct kvm *kvm)
+{
+ return kvm_has_feat(kvm, FEAT_EBEP) || kvm_has_feat(kvm, FEAT_PMUv3_SS);
+}
+
+static bool compute_hcr_rw(struct kvm *kvm, u64 *bits)
+{
+ /* This is purely academic: AArch32 and NV are mutually exclusive */
+ if (bits) {
+ if (kvm_has_feat(kvm, FEAT_AA32EL1))
+ *bits &= ~HCR_EL2_RW;
+ else
+ *bits |= HCR_EL2_RW;
+ }
+
+ return true;
+}
+
+static bool compute_hcr_e2h(struct kvm *kvm, u64 *bits)
+{
+ if (bits) {
+ if (kvm_has_feat(kvm, FEAT_E2H0))
+ *bits &= ~HCR_EL2_E2H;
+ else
+ *bits |= HCR_EL2_E2H;
+ }
+
+ return true;
+}
+
+static const struct reg_bits_to_feat_map hfgrtr_feat_map[] = {
+ NEEDS_FEAT(HFGRTR_EL2_nAMAIR2_EL1 |
+ HFGRTR_EL2_nMAIR2_EL1,
+ FEAT_AIE),
+ NEEDS_FEAT(HFGRTR_EL2_nS2POR_EL1, FEAT_S2POE),
+ NEEDS_FEAT(HFGRTR_EL2_nPOR_EL1 |
+ HFGRTR_EL2_nPOR_EL0,
+ FEAT_S1POE),
+ NEEDS_FEAT(HFGRTR_EL2_nPIR_EL1 |
+ HFGRTR_EL2_nPIRE0_EL1,
+ FEAT_S1PIE),
+ NEEDS_FEAT(HFGRTR_EL2_nRCWMASK_EL1, FEAT_THE),
+ NEEDS_FEAT(HFGRTR_EL2_nTPIDR2_EL0 |
+ HFGRTR_EL2_nSMPRI_EL1,
+ FEAT_SME),
+ NEEDS_FEAT(HFGRTR_EL2_nGCS_EL1 |
+ HFGRTR_EL2_nGCS_EL0,
+ FEAT_GCS),
+ NEEDS_FEAT(HFGRTR_EL2_nACCDATA_EL1, FEAT_LS64_ACCDATA),
+ NEEDS_FEAT(HFGRTR_EL2_ERXADDR_EL1 |
+ HFGRTR_EL2_ERXMISCn_EL1 |
+ HFGRTR_EL2_ERXSTATUS_EL1 |
+ HFGRTR_EL2_ERXCTLR_EL1 |
+ HFGRTR_EL2_ERXFR_EL1 |
+ HFGRTR_EL2_ERRSELR_EL1 |
+ HFGRTR_EL2_ERRIDR_EL1,
+ FEAT_RAS),
+ NEEDS_FEAT(HFGRTR_EL2_ERXPFGCDN_EL1 |
+ HFGRTR_EL2_ERXPFGCTL_EL1 |
+ HFGRTR_EL2_ERXPFGF_EL1,
+ feat_rasv1p1),
+ NEEDS_FEAT(HFGRTR_EL2_ICC_IGRPENn_EL1, FEAT_GICv3),
+ NEEDS_FEAT(HFGRTR_EL2_SCXTNUM_EL0 |
+ HFGRTR_EL2_SCXTNUM_EL1,
+ feat_csv2_2_csv2_1p2),
+ NEEDS_FEAT(HFGRTR_EL2_LORSA_EL1 |
+ HFGRTR_EL2_LORN_EL1 |
+ HFGRTR_EL2_LORID_EL1 |
+ HFGRTR_EL2_LOREA_EL1 |
+ HFGRTR_EL2_LORC_EL1,
+ FEAT_LOR),
+ NEEDS_FEAT(HFGRTR_EL2_APIBKey |
+ HFGRTR_EL2_APIAKey |
+ HFGRTR_EL2_APGAKey |
+ HFGRTR_EL2_APDBKey |
+ HFGRTR_EL2_APDAKey,
+ feat_pauth),
+ NEEDS_FEAT_FLAG(HFGRTR_EL2_VBAR_EL1 |
+ HFGRTR_EL2_TTBR1_EL1 |
+ HFGRTR_EL2_TTBR0_EL1 |
+ HFGRTR_EL2_TPIDR_EL0 |
+ HFGRTR_EL2_TPIDRRO_EL0 |
+ HFGRTR_EL2_TPIDR_EL1 |
+ HFGRTR_EL2_TCR_EL1 |
+ HFGRTR_EL2_SCTLR_EL1 |
+ HFGRTR_EL2_REVIDR_EL1 |
+ HFGRTR_EL2_PAR_EL1 |
+ HFGRTR_EL2_MPIDR_EL1 |
+ HFGRTR_EL2_MIDR_EL1 |
+ HFGRTR_EL2_MAIR_EL1 |
+ HFGRTR_EL2_ISR_EL1 |
+ HFGRTR_EL2_FAR_EL1 |
+ HFGRTR_EL2_ESR_EL1 |
+ HFGRTR_EL2_DCZID_EL0 |
+ HFGRTR_EL2_CTR_EL0 |
+ HFGRTR_EL2_CSSELR_EL1 |
+ HFGRTR_EL2_CPACR_EL1 |
+ HFGRTR_EL2_CONTEXTIDR_EL1|
+ HFGRTR_EL2_CLIDR_EL1 |
+ HFGRTR_EL2_CCSIDR_EL1 |
+ HFGRTR_EL2_AMAIR_EL1 |
+ HFGRTR_EL2_AIDR_EL1 |
+ HFGRTR_EL2_AFSR1_EL1 |
+ HFGRTR_EL2_AFSR0_EL1,
+ NEVER_FGU, FEAT_AA64EL1),
+};
+
+static const struct reg_bits_to_feat_map hfgwtr_feat_map[] = {
+ NEEDS_FEAT(HFGWTR_EL2_nAMAIR2_EL1 |
+ HFGWTR_EL2_nMAIR2_EL1,
+ FEAT_AIE),
+ NEEDS_FEAT(HFGWTR_EL2_nS2POR_EL1, FEAT_S2POE),
+ NEEDS_FEAT(HFGWTR_EL2_nPOR_EL1 |
+ HFGWTR_EL2_nPOR_EL0,
+ FEAT_S1POE),
+ NEEDS_FEAT(HFGWTR_EL2_nPIR_EL1 |
+ HFGWTR_EL2_nPIRE0_EL1,
+ FEAT_S1PIE),
+ NEEDS_FEAT(HFGWTR_EL2_nRCWMASK_EL1, FEAT_THE),
+ NEEDS_FEAT(HFGWTR_EL2_nTPIDR2_EL0 |
+ HFGWTR_EL2_nSMPRI_EL1,
+ FEAT_SME),
+ NEEDS_FEAT(HFGWTR_EL2_nGCS_EL1 |
+ HFGWTR_EL2_nGCS_EL0,
+ FEAT_GCS),
+ NEEDS_FEAT(HFGWTR_EL2_nACCDATA_EL1, FEAT_LS64_ACCDATA),
+ NEEDS_FEAT(HFGWTR_EL2_ERXADDR_EL1 |
+ HFGWTR_EL2_ERXMISCn_EL1 |
+ HFGWTR_EL2_ERXSTATUS_EL1 |
+ HFGWTR_EL2_ERXCTLR_EL1 |
+ HFGWTR_EL2_ERRSELR_EL1,
+ FEAT_RAS),
+ NEEDS_FEAT(HFGWTR_EL2_ERXPFGCDN_EL1 |
+ HFGWTR_EL2_ERXPFGCTL_EL1,
+ feat_rasv1p1),
+ NEEDS_FEAT(HFGWTR_EL2_ICC_IGRPENn_EL1, FEAT_GICv3),
+ NEEDS_FEAT(HFGWTR_EL2_SCXTNUM_EL0 |
+ HFGWTR_EL2_SCXTNUM_EL1,
+ feat_csv2_2_csv2_1p2),
+ NEEDS_FEAT(HFGWTR_EL2_LORSA_EL1 |
+ HFGWTR_EL2_LORN_EL1 |
+ HFGWTR_EL2_LOREA_EL1 |
+ HFGWTR_EL2_LORC_EL1,
+ FEAT_LOR),
+ NEEDS_FEAT(HFGWTR_EL2_APIBKey |
+ HFGWTR_EL2_APIAKey |
+ HFGWTR_EL2_APGAKey |
+ HFGWTR_EL2_APDBKey |
+ HFGWTR_EL2_APDAKey,
+ feat_pauth),
+ NEEDS_FEAT_FLAG(HFGWTR_EL2_VBAR_EL1 |
+ HFGWTR_EL2_TTBR1_EL1 |
+ HFGWTR_EL2_TTBR0_EL1 |
+ HFGWTR_EL2_TPIDR_EL0 |
+ HFGWTR_EL2_TPIDRRO_EL0 |
+ HFGWTR_EL2_TPIDR_EL1 |
+ HFGWTR_EL2_TCR_EL1 |
+ HFGWTR_EL2_SCTLR_EL1 |
+ HFGWTR_EL2_PAR_EL1 |
+ HFGWTR_EL2_MAIR_EL1 |
+ HFGWTR_EL2_FAR_EL1 |
+ HFGWTR_EL2_ESR_EL1 |
+ HFGWTR_EL2_CSSELR_EL1 |
+ HFGWTR_EL2_CPACR_EL1 |
+ HFGWTR_EL2_CONTEXTIDR_EL1|
+ HFGWTR_EL2_AMAIR_EL1 |
+ HFGWTR_EL2_AFSR1_EL1 |
+ HFGWTR_EL2_AFSR0_EL1,
+ NEVER_FGU, FEAT_AA64EL1),
+};
+
+static const struct reg_bits_to_feat_map hdfgrtr_feat_map[] = {
+ NEEDS_FEAT(HDFGRTR_EL2_PMBIDR_EL1 |
+ HDFGRTR_EL2_PMSLATFR_EL1 |
+ HDFGRTR_EL2_PMSIRR_EL1 |
+ HDFGRTR_EL2_PMSIDR_EL1 |
+ HDFGRTR_EL2_PMSICR_EL1 |
+ HDFGRTR_EL2_PMSFCR_EL1 |
+ HDFGRTR_EL2_PMSEVFR_EL1 |
+ HDFGRTR_EL2_PMSCR_EL1 |
+ HDFGRTR_EL2_PMBSR_EL1 |
+ HDFGRTR_EL2_PMBPTR_EL1 |
+ HDFGRTR_EL2_PMBLIMITR_EL1,
+ FEAT_SPE),
+ NEEDS_FEAT(HDFGRTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE),
+ NEEDS_FEAT(HDFGRTR_EL2_nBRBDATA |
+ HDFGRTR_EL2_nBRBCTL |
+ HDFGRTR_EL2_nBRBIDR,
+ FEAT_BRBE),
+ NEEDS_FEAT(HDFGRTR_EL2_TRCVICTLR |
+ HDFGRTR_EL2_TRCSTATR |
+ HDFGRTR_EL2_TRCSSCSRn |
+ HDFGRTR_EL2_TRCSEQSTR |
+ HDFGRTR_EL2_TRCPRGCTLR |
+ HDFGRTR_EL2_TRCOSLSR |
+ HDFGRTR_EL2_TRCIMSPECn |
+ HDFGRTR_EL2_TRCID |
+ HDFGRTR_EL2_TRCCNTVRn |
+ HDFGRTR_EL2_TRCCLAIM |
+ HDFGRTR_EL2_TRCAUXCTLR |
+ HDFGRTR_EL2_TRCAUTHSTATUS |
+ HDFGRTR_EL2_TRC,
+ FEAT_TRC_SR),
+ NEEDS_FEAT(HDFGRTR_EL2_PMCEIDn_EL0 |
+ HDFGRTR_EL2_PMUSERENR_EL0 |
+ HDFGRTR_EL2_PMMIR_EL1 |
+ HDFGRTR_EL2_PMSELR_EL0 |
+ HDFGRTR_EL2_PMOVS |
+ HDFGRTR_EL2_PMINTEN |
+ HDFGRTR_EL2_PMCNTEN |
+ HDFGRTR_EL2_PMCCNTR_EL0 |
+ HDFGRTR_EL2_PMCCFILTR_EL0 |
+ HDFGRTR_EL2_PMEVTYPERn_EL0 |
+ HDFGRTR_EL2_PMEVCNTRn_EL0,
+ FEAT_PMUv3),
+ NEEDS_FEAT(HDFGRTR_EL2_TRBTRG_EL1 |
+ HDFGRTR_EL2_TRBSR_EL1 |
+ HDFGRTR_EL2_TRBPTR_EL1 |
+ HDFGRTR_EL2_TRBMAR_EL1 |
+ HDFGRTR_EL2_TRBLIMITR_EL1 |
+ HDFGRTR_EL2_TRBIDR_EL1 |
+ HDFGRTR_EL2_TRBBASER_EL1,
+ FEAT_TRBE),
+ NEEDS_FEAT_FLAG(HDFGRTR_EL2_OSDLR_EL1, NEVER_FGU,
+ FEAT_DoubleLock),
+ NEEDS_FEAT_FLAG(HDFGRTR_EL2_OSECCR_EL1 |
+ HDFGRTR_EL2_OSLSR_EL1 |
+ HDFGRTR_EL2_DBGPRCR_EL1 |
+ HDFGRTR_EL2_DBGAUTHSTATUS_EL1|
+ HDFGRTR_EL2_DBGCLAIM |
+ HDFGRTR_EL2_MDSCR_EL1 |
+ HDFGRTR_EL2_DBGWVRn_EL1 |
+ HDFGRTR_EL2_DBGWCRn_EL1 |
+ HDFGRTR_EL2_DBGBVRn_EL1 |
+ HDFGRTR_EL2_DBGBCRn_EL1,
+ NEVER_FGU, FEAT_AA64EL1)
+};
+
+static const struct reg_bits_to_feat_map hdfgwtr_feat_map[] = {
+ NEEDS_FEAT(HDFGWTR_EL2_PMSLATFR_EL1 |
+ HDFGWTR_EL2_PMSIRR_EL1 |
+ HDFGWTR_EL2_PMSICR_EL1 |
+ HDFGWTR_EL2_PMSFCR_EL1 |
+ HDFGWTR_EL2_PMSEVFR_EL1 |
+ HDFGWTR_EL2_PMSCR_EL1 |
+ HDFGWTR_EL2_PMBSR_EL1 |
+ HDFGWTR_EL2_PMBPTR_EL1 |
+ HDFGWTR_EL2_PMBLIMITR_EL1,
+ FEAT_SPE),
+ NEEDS_FEAT(HDFGWTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE),
+ NEEDS_FEAT(HDFGWTR_EL2_nBRBDATA |
+ HDFGWTR_EL2_nBRBCTL,
+ FEAT_BRBE),
+ NEEDS_FEAT(HDFGWTR_EL2_TRCVICTLR |
+ HDFGWTR_EL2_TRCSSCSRn |
+ HDFGWTR_EL2_TRCSEQSTR |
+ HDFGWTR_EL2_TRCPRGCTLR |
+ HDFGWTR_EL2_TRCOSLAR |
+ HDFGWTR_EL2_TRCIMSPECn |
+ HDFGWTR_EL2_TRCCNTVRn |
+ HDFGWTR_EL2_TRCCLAIM |
+ HDFGWTR_EL2_TRCAUXCTLR |
+ HDFGWTR_EL2_TRC,
+ FEAT_TRC_SR),
+ NEEDS_FEAT(HDFGWTR_EL2_PMUSERENR_EL0 |
+ HDFGWTR_EL2_PMCR_EL0 |
+ HDFGWTR_EL2_PMSWINC_EL0 |
+ HDFGWTR_EL2_PMSELR_EL0 |
+ HDFGWTR_EL2_PMOVS |
+ HDFGWTR_EL2_PMINTEN |
+ HDFGWTR_EL2_PMCNTEN |
+ HDFGWTR_EL2_PMCCNTR_EL0 |
+ HDFGWTR_EL2_PMCCFILTR_EL0 |
+ HDFGWTR_EL2_PMEVTYPERn_EL0 |
+ HDFGWTR_EL2_PMEVCNTRn_EL0,
+ FEAT_PMUv3),
+ NEEDS_FEAT(HDFGWTR_EL2_TRBTRG_EL1 |
+ HDFGWTR_EL2_TRBSR_EL1 |
+ HDFGWTR_EL2_TRBPTR_EL1 |
+ HDFGWTR_EL2_TRBMAR_EL1 |
+ HDFGWTR_EL2_TRBLIMITR_EL1 |
+ HDFGWTR_EL2_TRBBASER_EL1,
+ FEAT_TRBE),
+ NEEDS_FEAT_FLAG(HDFGWTR_EL2_OSDLR_EL1,
+ NEVER_FGU, FEAT_DoubleLock),
+ NEEDS_FEAT_FLAG(HDFGWTR_EL2_OSECCR_EL1 |
+ HDFGWTR_EL2_OSLAR_EL1 |
+ HDFGWTR_EL2_DBGPRCR_EL1 |
+ HDFGWTR_EL2_DBGCLAIM |
+ HDFGWTR_EL2_MDSCR_EL1 |
+ HDFGWTR_EL2_DBGWVRn_EL1 |
+ HDFGWTR_EL2_DBGWCRn_EL1 |
+ HDFGWTR_EL2_DBGBVRn_EL1 |
+ HDFGWTR_EL2_DBGBCRn_EL1,
+ NEVER_FGU, FEAT_AA64EL1),
+ NEEDS_FEAT(HDFGWTR_EL2_TRFCR_EL1, FEAT_TRF),
+};
+
+
+static const struct reg_bits_to_feat_map hfgitr_feat_map[] = {
+ NEEDS_FEAT(HFGITR_EL2_PSBCSYNC, FEAT_SPEv1p5),
+ NEEDS_FEAT(HFGITR_EL2_ATS1E1A, FEAT_ATS1A),
+ NEEDS_FEAT(HFGITR_EL2_COSPRCTX, FEAT_SPECRES2),
+ NEEDS_FEAT(HFGITR_EL2_nGCSEPP |
+ HFGITR_EL2_nGCSSTR_EL1 |
+ HFGITR_EL2_nGCSPUSHM_EL1,
+ FEAT_GCS),
+ NEEDS_FEAT(HFGITR_EL2_nBRBIALL |
+ HFGITR_EL2_nBRBINJ,
+ FEAT_BRBE),
+ NEEDS_FEAT(HFGITR_EL2_CPPRCTX |
+ HFGITR_EL2_DVPRCTX |
+ HFGITR_EL2_CFPRCTX,
+ FEAT_SPECRES),
+ NEEDS_FEAT(HFGITR_EL2_TLBIRVAALE1 |
+ HFGITR_EL2_TLBIRVALE1 |
+ HFGITR_EL2_TLBIRVAAE1 |
+ HFGITR_EL2_TLBIRVAE1 |
+ HFGITR_EL2_TLBIRVAALE1IS |
+ HFGITR_EL2_TLBIRVALE1IS |
+ HFGITR_EL2_TLBIRVAAE1IS |
+ HFGITR_EL2_TLBIRVAE1IS |
+ HFGITR_EL2_TLBIRVAALE1OS |
+ HFGITR_EL2_TLBIRVALE1OS |
+ HFGITR_EL2_TLBIRVAAE1OS |
+ HFGITR_EL2_TLBIRVAE1OS,
+ FEAT_TLBIRANGE),
+ NEEDS_FEAT(HFGITR_EL2_TLBIVAALE1OS |
+ HFGITR_EL2_TLBIVALE1OS |
+ HFGITR_EL2_TLBIVAAE1OS |
+ HFGITR_EL2_TLBIASIDE1OS |
+ HFGITR_EL2_TLBIVAE1OS |
+ HFGITR_EL2_TLBIVMALLE1OS,
+ FEAT_TLBIOS),
+ NEEDS_FEAT(HFGITR_EL2_ATS1E1WP |
+ HFGITR_EL2_ATS1E1RP,
+ FEAT_PAN2),
+ NEEDS_FEAT(HFGITR_EL2_DCCVADP, FEAT_DPB2),
+ NEEDS_FEAT_FLAG(HFGITR_EL2_DCCVAC |
+ HFGITR_EL2_SVC_EL1 |
+ HFGITR_EL2_SVC_EL0 |
+ HFGITR_EL2_ERET |
+ HFGITR_EL2_TLBIVAALE1 |
+ HFGITR_EL2_TLBIVALE1 |
+ HFGITR_EL2_TLBIVAAE1 |
+ HFGITR_EL2_TLBIASIDE1 |
+ HFGITR_EL2_TLBIVAE1 |
+ HFGITR_EL2_TLBIVMALLE1 |
+ HFGITR_EL2_TLBIVAALE1IS |
+ HFGITR_EL2_TLBIVALE1IS |
+ HFGITR_EL2_TLBIVAAE1IS |
+ HFGITR_EL2_TLBIASIDE1IS |
+ HFGITR_EL2_TLBIVAE1IS |
+ HFGITR_EL2_TLBIVMALLE1IS|
+ HFGITR_EL2_ATS1E0W |
+ HFGITR_EL2_ATS1E0R |
+ HFGITR_EL2_ATS1E1W |
+ HFGITR_EL2_ATS1E1R |
+ HFGITR_EL2_DCZVA |
+ HFGITR_EL2_DCCIVAC |
+ HFGITR_EL2_DCCVAP |
+ HFGITR_EL2_DCCVAU |
+ HFGITR_EL2_DCCISW |
+ HFGITR_EL2_DCCSW |
+ HFGITR_EL2_DCISW |
+ HFGITR_EL2_DCIVAC |
+ HFGITR_EL2_ICIVAU |
+ HFGITR_EL2_ICIALLU |
+ HFGITR_EL2_ICIALLUIS,
+ NEVER_FGU, FEAT_AA64EL1),
+};
+
+static const struct reg_bits_to_feat_map hafgrtr_feat_map[] = {
+ NEEDS_FEAT(HAFGRTR_EL2_AMEVTYPER115_EL0 |
+ HAFGRTR_EL2_AMEVTYPER114_EL0 |
+ HAFGRTR_EL2_AMEVTYPER113_EL0 |
+ HAFGRTR_EL2_AMEVTYPER112_EL0 |
+ HAFGRTR_EL2_AMEVTYPER111_EL0 |
+ HAFGRTR_EL2_AMEVTYPER110_EL0 |
+ HAFGRTR_EL2_AMEVTYPER19_EL0 |
+ HAFGRTR_EL2_AMEVTYPER18_EL0 |
+ HAFGRTR_EL2_AMEVTYPER17_EL0 |
+ HAFGRTR_EL2_AMEVTYPER16_EL0 |
+ HAFGRTR_EL2_AMEVTYPER15_EL0 |
+ HAFGRTR_EL2_AMEVTYPER14_EL0 |
+ HAFGRTR_EL2_AMEVTYPER13_EL0 |
+ HAFGRTR_EL2_AMEVTYPER12_EL0 |
+ HAFGRTR_EL2_AMEVTYPER11_EL0 |
+ HAFGRTR_EL2_AMEVTYPER10_EL0 |
+ HAFGRTR_EL2_AMEVCNTR115_EL0 |
+ HAFGRTR_EL2_AMEVCNTR114_EL0 |
+ HAFGRTR_EL2_AMEVCNTR113_EL0 |
+ HAFGRTR_EL2_AMEVCNTR112_EL0 |
+ HAFGRTR_EL2_AMEVCNTR111_EL0 |
+ HAFGRTR_EL2_AMEVCNTR110_EL0 |
+ HAFGRTR_EL2_AMEVCNTR19_EL0 |
+ HAFGRTR_EL2_AMEVCNTR18_EL0 |
+ HAFGRTR_EL2_AMEVCNTR17_EL0 |
+ HAFGRTR_EL2_AMEVCNTR16_EL0 |
+ HAFGRTR_EL2_AMEVCNTR15_EL0 |
+ HAFGRTR_EL2_AMEVCNTR14_EL0 |
+ HAFGRTR_EL2_AMEVCNTR13_EL0 |
+ HAFGRTR_EL2_AMEVCNTR12_EL0 |
+ HAFGRTR_EL2_AMEVCNTR11_EL0 |
+ HAFGRTR_EL2_AMEVCNTR10_EL0 |
+ HAFGRTR_EL2_AMCNTEN1 |
+ HAFGRTR_EL2_AMCNTEN0 |
+ HAFGRTR_EL2_AMEVCNTR03_EL0 |
+ HAFGRTR_EL2_AMEVCNTR02_EL0 |
+ HAFGRTR_EL2_AMEVCNTR01_EL0 |
+ HAFGRTR_EL2_AMEVCNTR00_EL0,
+ FEAT_AMUv1),
+};
+
+static const struct reg_bits_to_feat_map hfgitr2_feat_map[] = {
+ NEEDS_FEAT(HFGITR2_EL2_nDCCIVAPS, FEAT_PoPS),
+ NEEDS_FEAT(HFGITR2_EL2_TSBCSYNC, FEAT_TRBEv1p1)
+};
+
+static const struct reg_bits_to_feat_map hfgrtr2_feat_map[] = {
+ NEEDS_FEAT(HFGRTR2_EL2_nPFAR_EL1, FEAT_PFAR),
+ NEEDS_FEAT(HFGRTR2_EL2_nERXGSR_EL1, FEAT_RASv2),
+ NEEDS_FEAT(HFGRTR2_EL2_nACTLRALIAS_EL1 |
+ HFGRTR2_EL2_nACTLRMASK_EL1 |
+ HFGRTR2_EL2_nCPACRALIAS_EL1 |
+ HFGRTR2_EL2_nCPACRMASK_EL1 |
+ HFGRTR2_EL2_nSCTLR2MASK_EL1 |
+ HFGRTR2_EL2_nSCTLRALIAS2_EL1 |
+ HFGRTR2_EL2_nSCTLRALIAS_EL1 |
+ HFGRTR2_EL2_nSCTLRMASK_EL1 |
+ HFGRTR2_EL2_nTCR2ALIAS_EL1 |
+ HFGRTR2_EL2_nTCR2MASK_EL1 |
+ HFGRTR2_EL2_nTCRALIAS_EL1 |
+ HFGRTR2_EL2_nTCRMASK_EL1,
+ FEAT_SRMASK),
+ NEEDS_FEAT(HFGRTR2_EL2_nRCWSMASK_EL1, FEAT_THE),
+};
+
+static const struct reg_bits_to_feat_map hfgwtr2_feat_map[] = {
+ NEEDS_FEAT(HFGWTR2_EL2_nPFAR_EL1, FEAT_PFAR),
+ NEEDS_FEAT(HFGWTR2_EL2_nACTLRALIAS_EL1 |
+ HFGWTR2_EL2_nACTLRMASK_EL1 |
+ HFGWTR2_EL2_nCPACRALIAS_EL1 |
+ HFGWTR2_EL2_nCPACRMASK_EL1 |
+ HFGWTR2_EL2_nSCTLR2MASK_EL1 |
+ HFGWTR2_EL2_nSCTLRALIAS2_EL1 |
+ HFGWTR2_EL2_nSCTLRALIAS_EL1 |
+ HFGWTR2_EL2_nSCTLRMASK_EL1 |
+ HFGWTR2_EL2_nTCR2ALIAS_EL1 |
+ HFGWTR2_EL2_nTCR2MASK_EL1 |
+ HFGWTR2_EL2_nTCRALIAS_EL1 |
+ HFGWTR2_EL2_nTCRMASK_EL1,
+ FEAT_SRMASK),
+ NEEDS_FEAT(HFGWTR2_EL2_nRCWSMASK_EL1, FEAT_THE),
+};
+
+static const struct reg_bits_to_feat_map hdfgrtr2_feat_map[] = {
+ NEEDS_FEAT(HDFGRTR2_EL2_nMDSELR_EL1, FEAT_Debugv8p9),
+ NEEDS_FEAT(HDFGRTR2_EL2_nPMECR_EL1, feat_ebep_pmuv3_ss),
+ NEEDS_FEAT(HDFGRTR2_EL2_nTRCITECR_EL1, FEAT_ITE),
+ NEEDS_FEAT(HDFGRTR2_EL2_nPMICFILTR_EL0 |
+ HDFGRTR2_EL2_nPMICNTR_EL0,
+ FEAT_PMUv3_ICNTR),
+ NEEDS_FEAT(HDFGRTR2_EL2_nPMUACR_EL1, FEAT_PMUv3p9),
+ NEEDS_FEAT(HDFGRTR2_EL2_nPMSSCR_EL1 |
+ HDFGRTR2_EL2_nPMSSDATA,
+ FEAT_PMUv3_SS),
+ NEEDS_FEAT(HDFGRTR2_EL2_nPMIAR_EL1, FEAT_SEBEP),
+ NEEDS_FEAT(HDFGRTR2_EL2_nPMSDSFR_EL1, feat_spe_fds),
+ NEEDS_FEAT(HDFGRTR2_EL2_nPMBMAR_EL1, FEAT_SPE_nVM),
+ NEEDS_FEAT(HDFGRTR2_EL2_nSPMACCESSR_EL1 |
+ HDFGRTR2_EL2_nSPMCNTEN |
+ HDFGRTR2_EL2_nSPMCR_EL0 |
+ HDFGRTR2_EL2_nSPMDEVAFF_EL1 |
+ HDFGRTR2_EL2_nSPMEVCNTRn_EL0 |
+ HDFGRTR2_EL2_nSPMEVTYPERn_EL0|
+ HDFGRTR2_EL2_nSPMID |
+ HDFGRTR2_EL2_nSPMINTEN |
+ HDFGRTR2_EL2_nSPMOVS |
+ HDFGRTR2_EL2_nSPMSCR_EL1 |
+ HDFGRTR2_EL2_nSPMSELR_EL0,
+ FEAT_SPMU),
+ NEEDS_FEAT(HDFGRTR2_EL2_nMDSTEPOP_EL1, FEAT_STEP2),
+ NEEDS_FEAT(HDFGRTR2_EL2_nTRBMPAM_EL1, feat_trbe_mpam),
+};
+
+static const struct reg_bits_to_feat_map hdfgwtr2_feat_map[] = {
+ NEEDS_FEAT(HDFGWTR2_EL2_nMDSELR_EL1, FEAT_Debugv8p9),
+ NEEDS_FEAT(HDFGWTR2_EL2_nPMECR_EL1, feat_ebep_pmuv3_ss),
+ NEEDS_FEAT(HDFGWTR2_EL2_nTRCITECR_EL1, FEAT_ITE),
+ NEEDS_FEAT(HDFGWTR2_EL2_nPMICFILTR_EL0 |
+ HDFGWTR2_EL2_nPMICNTR_EL0,
+ FEAT_PMUv3_ICNTR),
+ NEEDS_FEAT(HDFGWTR2_EL2_nPMUACR_EL1 |
+ HDFGWTR2_EL2_nPMZR_EL0,
+ FEAT_PMUv3p9),
+ NEEDS_FEAT(HDFGWTR2_EL2_nPMSSCR_EL1, FEAT_PMUv3_SS),
+ NEEDS_FEAT(HDFGWTR2_EL2_nPMIAR_EL1, FEAT_SEBEP),
+ NEEDS_FEAT(HDFGWTR2_EL2_nPMSDSFR_EL1, feat_spe_fds),
+ NEEDS_FEAT(HDFGWTR2_EL2_nPMBMAR_EL1, FEAT_SPE_nVM),
+ NEEDS_FEAT(HDFGWTR2_EL2_nSPMACCESSR_EL1 |
+ HDFGWTR2_EL2_nSPMCNTEN |
+ HDFGWTR2_EL2_nSPMCR_EL0 |
+ HDFGWTR2_EL2_nSPMEVCNTRn_EL0 |
+ HDFGWTR2_EL2_nSPMEVTYPERn_EL0|
+ HDFGWTR2_EL2_nSPMINTEN |
+ HDFGWTR2_EL2_nSPMOVS |
+ HDFGWTR2_EL2_nSPMSCR_EL1 |
+ HDFGWTR2_EL2_nSPMSELR_EL0,
+ FEAT_SPMU),
+ NEEDS_FEAT(HDFGWTR2_EL2_nMDSTEPOP_EL1, FEAT_STEP2),
+ NEEDS_FEAT(HDFGWTR2_EL2_nTRBMPAM_EL1, feat_trbe_mpam),
+};
+
+static const struct reg_bits_to_feat_map hcrx_feat_map[] = {
+ NEEDS_FEAT(HCRX_EL2_PACMEn, feat_pauth_lr),
+ NEEDS_FEAT(HCRX_EL2_EnFPM, FEAT_FPMR),
+ NEEDS_FEAT(HCRX_EL2_GCSEn, FEAT_GCS),
+ NEEDS_FEAT(HCRX_EL2_EnIDCP128, FEAT_SYSREG128),
+ NEEDS_FEAT(HCRX_EL2_EnSDERR, feat_aderr),
+ NEEDS_FEAT(HCRX_EL2_TMEA, FEAT_DoubleFault2),
+ NEEDS_FEAT(HCRX_EL2_EnSNERR, feat_anerr),
+ NEEDS_FEAT(HCRX_EL2_D128En, FEAT_D128),
+ NEEDS_FEAT(HCRX_EL2_PTTWI, FEAT_THE),
+ NEEDS_FEAT(HCRX_EL2_SCTLR2En, FEAT_SCTLR2),
+ NEEDS_FEAT(HCRX_EL2_TCR2En, FEAT_TCR2),
+ NEEDS_FEAT(HCRX_EL2_MSCEn |
+ HCRX_EL2_MCE2,
+ FEAT_MOPS),
+ NEEDS_FEAT(HCRX_EL2_CMOW, FEAT_CMOW),
+ NEEDS_FEAT(HCRX_EL2_VFNMI |
+ HCRX_EL2_VINMI |
+ HCRX_EL2_TALLINT,
+ FEAT_NMI),
+ NEEDS_FEAT(HCRX_EL2_SMPME, feat_sme_smps),
+ NEEDS_FEAT(HCRX_EL2_FGTnXS |
+ HCRX_EL2_FnXS,
+ FEAT_XS),
+ NEEDS_FEAT(HCRX_EL2_EnASR, FEAT_LS64_V),
+ NEEDS_FEAT(HCRX_EL2_EnALS, FEAT_LS64),
+ NEEDS_FEAT(HCRX_EL2_EnAS0, FEAT_LS64_ACCDATA),
+};
+
+static const struct reg_bits_to_feat_map hcr_feat_map[] = {
+ NEEDS_FEAT(HCR_EL2_TID0, FEAT_AA32EL0),
+ NEEDS_FEAT_FIXED(HCR_EL2_RW, compute_hcr_rw),
+ NEEDS_FEAT(HCR_EL2_HCD, not_feat_aa64el3),
+ NEEDS_FEAT(HCR_EL2_AMO |
+ HCR_EL2_BSU |
+ HCR_EL2_CD |
+ HCR_EL2_DC |
+ HCR_EL2_FB |
+ HCR_EL2_FMO |
+ HCR_EL2_ID |
+ HCR_EL2_IMO |
+ HCR_EL2_MIOCNCE |
+ HCR_EL2_PTW |
+ HCR_EL2_SWIO |
+ HCR_EL2_TACR |
+ HCR_EL2_TDZ |
+ HCR_EL2_TGE |
+ HCR_EL2_TID1 |
+ HCR_EL2_TID2 |
+ HCR_EL2_TID3 |
+ HCR_EL2_TIDCP |
+ HCR_EL2_TPCP |
+ HCR_EL2_TPU |
+ HCR_EL2_TRVM |
+ HCR_EL2_TSC |
+ HCR_EL2_TSW |
+ HCR_EL2_TTLB |
+ HCR_EL2_TVM |
+ HCR_EL2_TWE |
+ HCR_EL2_TWI |
+ HCR_EL2_VF |
+ HCR_EL2_VI |
+ HCR_EL2_VM |
+ HCR_EL2_VSE,
+ FEAT_AA64EL1),
+ NEEDS_FEAT(HCR_EL2_AMVOFFEN, FEAT_AMUv1p1),
+ NEEDS_FEAT(HCR_EL2_EnSCXT, feat_csv2_2_csv2_1p2),
+ NEEDS_FEAT(HCR_EL2_TICAB |
+ HCR_EL2_TID4 |
+ HCR_EL2_TOCU,
+ FEAT_EVT),
+ NEEDS_FEAT(HCR_EL2_TTLBIS |
+ HCR_EL2_TTLBOS,
+ FEAT_EVT_TTLBxS),
+ NEEDS_FEAT(HCR_EL2_TLOR, FEAT_LOR),
+ NEEDS_FEAT(HCR_EL2_ATA |
+ HCR_EL2_DCT |
+ HCR_EL2_TID5,
+ FEAT_MTE2),
+ NEEDS_FEAT(HCR_EL2_AT | /* Ignore the original FEAT_NV */
+ HCR_EL2_NV2 |
+ HCR_EL2_NV,
+ feat_nv2),
+ NEEDS_FEAT(HCR_EL2_NV1, feat_nv2_e2h0_ni), /* Missing from JSON */
+ NEEDS_FEAT(HCR_EL2_API |
+ HCR_EL2_APK,
+ feat_pauth),
+ NEEDS_FEAT(HCR_EL2_TEA |
+ HCR_EL2_TERR,
+ FEAT_RAS),
+ NEEDS_FEAT(HCR_EL2_FIEN, feat_rasv1p1),
+ NEEDS_FEAT(HCR_EL2_GPF, FEAT_RME),
+ NEEDS_FEAT(HCR_EL2_FWB, FEAT_S2FWB),
+ NEEDS_FEAT(HCR_EL2_TME, FEAT_TME),
+ NEEDS_FEAT(HCR_EL2_TWEDEL |
+ HCR_EL2_TWEDEn,
+ FEAT_TWED),
+ NEEDS_FEAT_FIXED(HCR_EL2_E2H, compute_hcr_e2h),
+};
+
+static void __init check_feat_map(const struct reg_bits_to_feat_map *map,
+ int map_size, u64 res0, const char *str)
+{
+ u64 mask = 0;
+
+ for (int i = 0; i < map_size; i++)
+ mask |= map[i].bits;
+
+ if (mask != ~res0)
+ kvm_err("Undefined %s behaviour, bits %016llx\n",
+ str, mask ^ ~res0);
+}
+
+void __init check_feature_map(void)
+{
+ check_feat_map(hfgrtr_feat_map, ARRAY_SIZE(hfgrtr_feat_map),
+ hfgrtr_masks.res0, hfgrtr_masks.str);
+ check_feat_map(hfgwtr_feat_map, ARRAY_SIZE(hfgwtr_feat_map),
+ hfgwtr_masks.res0, hfgwtr_masks.str);
+ check_feat_map(hfgitr_feat_map, ARRAY_SIZE(hfgitr_feat_map),
+ hfgitr_masks.res0, hfgitr_masks.str);
+ check_feat_map(hdfgrtr_feat_map, ARRAY_SIZE(hdfgrtr_feat_map),
+ hdfgrtr_masks.res0, hdfgrtr_masks.str);
+ check_feat_map(hdfgwtr_feat_map, ARRAY_SIZE(hdfgwtr_feat_map),
+ hdfgwtr_masks.res0, hdfgwtr_masks.str);
+ check_feat_map(hafgrtr_feat_map, ARRAY_SIZE(hafgrtr_feat_map),
+ hafgrtr_masks.res0, hafgrtr_masks.str);
+ check_feat_map(hcrx_feat_map, ARRAY_SIZE(hcrx_feat_map),
+ __HCRX_EL2_RES0, "HCRX_EL2");
+ check_feat_map(hcr_feat_map, ARRAY_SIZE(hcr_feat_map),
+ HCR_EL2_RES0, "HCR_EL2");
+}
+
+static bool idreg_feat_match(struct kvm *kvm, const struct reg_bits_to_feat_map *map)
+{
+ u64 regval = kvm->arch.id_regs[map->regidx];
+ u64 regfld = (regval >> map->shift) & GENMASK(map->width - 1, 0);
+
+ if (map->sign) {
+ s64 sfld = sign_extend64(regfld, map->width - 1);
+ s64 slim = sign_extend64(map->lo_lim, map->width - 1);
+ return sfld >= slim;
+ } else {
+ return regfld >= map->lo_lim;
+ }
+}
+
+static u64 __compute_fixed_bits(struct kvm *kvm,
+ const struct reg_bits_to_feat_map *map,
+ int map_size,
+ u64 *fixed_bits,
+ unsigned long require,
+ unsigned long exclude)
+{
+ u64 val = 0;
+
+ for (int i = 0; i < map_size; i++) {
+ bool match;
+
+ if ((map[i].flags & require) != require)
+ continue;
+
+ if (map[i].flags & exclude)
+ continue;
+
+ if (map[i].flags & CALL_FUNC)
+ match = (map[i].flags & FIXED_VALUE) ?
+ map[i].fval(kvm, fixed_bits) :
+ map[i].match(kvm);
+ else
+ match = idreg_feat_match(kvm, &map[i]);
+
+ if (!match || (map[i].flags & FIXED_VALUE))
+ val |= map[i].bits;
+ }
+
+ return val;
+}
+
+static u64 compute_res0_bits(struct kvm *kvm,
+ const struct reg_bits_to_feat_map *map,
+ int map_size,
+ unsigned long require,
+ unsigned long exclude)
+{
+ return __compute_fixed_bits(kvm, map, map_size, NULL,
+ require, exclude | FIXED_VALUE);
+}
+
+static u64 compute_fixed_bits(struct kvm *kvm,
+ const struct reg_bits_to_feat_map *map,
+ int map_size,
+ u64 *fixed_bits,
+ unsigned long require,
+ unsigned long exclude)
+{
+ return __compute_fixed_bits(kvm, map, map_size, fixed_bits,
+ require | FIXED_VALUE, exclude);
+}
+
+void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt)
+{
+ u64 val = 0;
+
+ switch (fgt) {
+ case HFGRTR_GROUP:
+ val |= compute_res0_bits(kvm, hfgrtr_feat_map,
+ ARRAY_SIZE(hfgrtr_feat_map),
+ 0, NEVER_FGU);
+ val |= compute_res0_bits(kvm, hfgwtr_feat_map,
+ ARRAY_SIZE(hfgwtr_feat_map),
+ 0, NEVER_FGU);
+ break;
+ case HFGITR_GROUP:
+ val |= compute_res0_bits(kvm, hfgitr_feat_map,
+ ARRAY_SIZE(hfgitr_feat_map),
+ 0, NEVER_FGU);
+ break;
+ case HDFGRTR_GROUP:
+ val |= compute_res0_bits(kvm, hdfgrtr_feat_map,
+ ARRAY_SIZE(hdfgrtr_feat_map),
+ 0, NEVER_FGU);
+ val |= compute_res0_bits(kvm, hdfgwtr_feat_map,
+ ARRAY_SIZE(hdfgwtr_feat_map),
+ 0, NEVER_FGU);
+ break;
+ case HAFGRTR_GROUP:
+ val |= compute_res0_bits(kvm, hafgrtr_feat_map,
+ ARRAY_SIZE(hafgrtr_feat_map),
+ 0, NEVER_FGU);
+ break;
+ case HFGRTR2_GROUP:
+ val |= compute_res0_bits(kvm, hfgrtr2_feat_map,
+ ARRAY_SIZE(hfgrtr2_feat_map),
+ 0, NEVER_FGU);
+ val |= compute_res0_bits(kvm, hfgwtr2_feat_map,
+ ARRAY_SIZE(hfgwtr2_feat_map),
+ 0, NEVER_FGU);
+ break;
+ case HFGITR2_GROUP:
+ val |= compute_res0_bits(kvm, hfgitr2_feat_map,
+ ARRAY_SIZE(hfgitr2_feat_map),
+ 0, NEVER_FGU);
+ break;
+ case HDFGRTR2_GROUP:
+ val |= compute_res0_bits(kvm, hdfgrtr2_feat_map,
+ ARRAY_SIZE(hdfgrtr2_feat_map),
+ 0, NEVER_FGU);
+ val |= compute_res0_bits(kvm, hdfgwtr2_feat_map,
+ ARRAY_SIZE(hdfgwtr2_feat_map),
+ 0, NEVER_FGU);
+ break;
+ default:
+ BUG();
+ }
+
+ kvm->arch.fgu[fgt] = val;
+}
+
+void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1)
+{
+ u64 fixed = 0, mask;
+
+ switch (reg) {
+ case HFGRTR_EL2:
+ *res0 = compute_res0_bits(kvm, hfgrtr_feat_map,
+ ARRAY_SIZE(hfgrtr_feat_map), 0, 0);
+ *res0 |= hfgrtr_masks.res0;
+ *res1 = HFGRTR_EL2_RES1;
+ break;
+ case HFGWTR_EL2:
+ *res0 = compute_res0_bits(kvm, hfgwtr_feat_map,
+ ARRAY_SIZE(hfgwtr_feat_map), 0, 0);
+ *res0 |= hfgwtr_masks.res0;
+ *res1 = HFGWTR_EL2_RES1;
+ break;
+ case HFGITR_EL2:
+ *res0 = compute_res0_bits(kvm, hfgitr_feat_map,
+ ARRAY_SIZE(hfgitr_feat_map), 0, 0);
+ *res0 |= hfgitr_masks.res0;
+ *res1 = HFGITR_EL2_RES1;
+ break;
+ case HDFGRTR_EL2:
+ *res0 = compute_res0_bits(kvm, hdfgrtr_feat_map,
+ ARRAY_SIZE(hdfgrtr_feat_map), 0, 0);
+ *res0 |= hdfgrtr_masks.res0;
+ *res1 = HDFGRTR_EL2_RES1;
+ break;
+ case HDFGWTR_EL2:
+ *res0 = compute_res0_bits(kvm, hdfgwtr_feat_map,
+ ARRAY_SIZE(hdfgwtr_feat_map), 0, 0);
+ *res0 |= hdfgwtr_masks.res0;
+ *res1 = HDFGWTR_EL2_RES1;
+ break;
+ case HAFGRTR_EL2:
+ *res0 = compute_res0_bits(kvm, hafgrtr_feat_map,
+ ARRAY_SIZE(hafgrtr_feat_map), 0, 0);
+ *res0 |= hafgrtr_masks.res0;
+ *res1 = HAFGRTR_EL2_RES1;
+ break;
+ case HFGRTR2_EL2:
+ *res0 = compute_res0_bits(kvm, hfgrtr2_feat_map,
+ ARRAY_SIZE(hfgrtr2_feat_map), 0, 0);
+ *res0 |= hfgrtr2_masks.res0;
+ *res1 = HFGRTR2_EL2_RES1;
+ break;
+ case HFGWTR2_EL2:
+ *res0 = compute_res0_bits(kvm, hfgwtr2_feat_map,
+ ARRAY_SIZE(hfgwtr2_feat_map), 0, 0);
+ *res0 |= hfgwtr2_masks.res0;
+ *res1 = HFGWTR2_EL2_RES1;
+ break;
+ case HFGITR2_EL2:
+ *res0 = compute_res0_bits(kvm, hfgitr2_feat_map,
+ ARRAY_SIZE(hfgitr2_feat_map), 0, 0);
+ *res0 |= hfgitr2_masks.res0;
+ *res1 = HFGITR2_EL2_RES1;
+ break;
+ case HDFGRTR2_EL2:
+ *res0 = compute_res0_bits(kvm, hdfgrtr2_feat_map,
+ ARRAY_SIZE(hdfgrtr2_feat_map), 0, 0);
+ *res0 |= hdfgrtr2_masks.res0;
+ *res1 = HDFGRTR2_EL2_RES1;
+ break;
+ case HDFGWTR2_EL2:
+ *res0 = compute_res0_bits(kvm, hdfgwtr2_feat_map,
+ ARRAY_SIZE(hdfgwtr2_feat_map), 0, 0);
+ *res0 |= hdfgwtr2_masks.res0;
+ *res1 = HDFGWTR2_EL2_RES1;
+ break;
+ case HCRX_EL2:
+ *res0 = compute_res0_bits(kvm, hcrx_feat_map,
+ ARRAY_SIZE(hcrx_feat_map), 0, 0);
+ *res0 |= __HCRX_EL2_RES0;
+ *res1 = __HCRX_EL2_RES1;
+ break;
+ case HCR_EL2:
+ mask = compute_fixed_bits(kvm, hcr_feat_map,
+ ARRAY_SIZE(hcr_feat_map), &fixed,
+ 0, 0);
+ *res0 = compute_res0_bits(kvm, hcr_feat_map,
+ ARRAY_SIZE(hcr_feat_map), 0, 0);
+ *res0 |= HCR_EL2_RES0 | (mask & ~fixed);
+ *res1 = HCR_EL2_RES1 | (mask & fixed);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ *res0 = *res1 = 0;
+ break;
+ }
+}
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 0fcfcc0478f9..3a384e9660b8 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -622,6 +622,11 @@ struct encoding_to_trap_config {
const unsigned int line;
};
+/*
+ * WARNING: using ranges is a treacherous endeavour, as sysregs that
+ * are part of an architectural range are not necessarily contiguous
+ * in the [Op0,Op1,CRn,CRm,Ops] space. Tread carefully.
+ */
#define SR_RANGE_TRAP(sr_start, sr_end, trap_id) \
{ \
.encoding = sr_start, \
@@ -1279,98 +1284,128 @@ enum fg_filter_id {
__NR_FG_FILTER_IDS__
};
-#define SR_FGF(sr, g, b, p, f) \
- { \
- .encoding = sr, \
- .end = sr, \
- .tc = { \
+#define __FGT(g, b, p, f) \
+ { \
.fgt = g ## _GROUP, \
.bit = g ## _EL2_ ## b ## _SHIFT, \
.pol = p, \
.fgf = f, \
- }, \
+ }
+
+#define FGT(g, b, p) __FGT(g, b, p, __NO_FGF__)
+
+/*
+ * See the warning next to SR_RANGE_TRAP(), and apply the same
+ * level of caution.
+ */
+#define SR_FGF_RANGE(sr, e, g, b, p, f) \
+ { \
+ .encoding = sr, \
+ .end = e, \
+ .tc = __FGT(g, b, p, f), \
.line = __LINE__, \
}
-#define SR_FGT(sr, g, b, p) SR_FGF(sr, g, b, p, __NO_FGF__)
+#define SR_FGF(sr, g, b, p, f) SR_FGF_RANGE(sr, sr, g, b, p, f)
+#define SR_FGT(sr, g, b, p) SR_FGF_RANGE(sr, sr, g, b, p, __NO_FGF__)
+#define SR_FGT_RANGE(sr, end, g, b, p) \
+ SR_FGF_RANGE(sr, end, g, b, p, __NO_FGF__)
static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
/* HFGRTR_EL2, HFGWTR_EL2 */
- SR_FGT(SYS_AMAIR2_EL1, HFGxTR, nAMAIR2_EL1, 0),
- SR_FGT(SYS_MAIR2_EL1, HFGxTR, nMAIR2_EL1, 0),
- SR_FGT(SYS_S2POR_EL1, HFGxTR, nS2POR_EL1, 0),
- SR_FGT(SYS_POR_EL1, HFGxTR, nPOR_EL1, 0),
- SR_FGT(SYS_POR_EL0, HFGxTR, nPOR_EL0, 0),
- SR_FGT(SYS_PIR_EL1, HFGxTR, nPIR_EL1, 0),
- SR_FGT(SYS_PIRE0_EL1, HFGxTR, nPIRE0_EL1, 0),
- SR_FGT(SYS_RCWMASK_EL1, HFGxTR, nRCWMASK_EL1, 0),
- SR_FGT(SYS_TPIDR2_EL0, HFGxTR, nTPIDR2_EL0, 0),
- SR_FGT(SYS_SMPRI_EL1, HFGxTR, nSMPRI_EL1, 0),
- SR_FGT(SYS_GCSCR_EL1, HFGxTR, nGCS_EL1, 0),
- SR_FGT(SYS_GCSPR_EL1, HFGxTR, nGCS_EL1, 0),
- SR_FGT(SYS_GCSCRE0_EL1, HFGxTR, nGCS_EL0, 0),
- SR_FGT(SYS_GCSPR_EL0, HFGxTR, nGCS_EL0, 0),
- SR_FGT(SYS_ACCDATA_EL1, HFGxTR, nACCDATA_EL1, 0),
- SR_FGT(SYS_ERXADDR_EL1, HFGxTR, ERXADDR_EL1, 1),
- SR_FGT(SYS_ERXPFGCDN_EL1, HFGxTR, ERXPFGCDN_EL1, 1),
- SR_FGT(SYS_ERXPFGCTL_EL1, HFGxTR, ERXPFGCTL_EL1, 1),
- SR_FGT(SYS_ERXPFGF_EL1, HFGxTR, ERXPFGF_EL1, 1),
- SR_FGT(SYS_ERXMISC0_EL1, HFGxTR, ERXMISCn_EL1, 1),
- SR_FGT(SYS_ERXMISC1_EL1, HFGxTR, ERXMISCn_EL1, 1),
- SR_FGT(SYS_ERXMISC2_EL1, HFGxTR, ERXMISCn_EL1, 1),
- SR_FGT(SYS_ERXMISC3_EL1, HFGxTR, ERXMISCn_EL1, 1),
- SR_FGT(SYS_ERXSTATUS_EL1, HFGxTR, ERXSTATUS_EL1, 1),
- SR_FGT(SYS_ERXCTLR_EL1, HFGxTR, ERXCTLR_EL1, 1),
- SR_FGT(SYS_ERXFR_EL1, HFGxTR, ERXFR_EL1, 1),
- SR_FGT(SYS_ERRSELR_EL1, HFGxTR, ERRSELR_EL1, 1),
- SR_FGT(SYS_ERRIDR_EL1, HFGxTR, ERRIDR_EL1, 1),
- SR_FGT(SYS_ICC_IGRPEN0_EL1, HFGxTR, ICC_IGRPENn_EL1, 1),
- SR_FGT(SYS_ICC_IGRPEN1_EL1, HFGxTR, ICC_IGRPENn_EL1, 1),
- SR_FGT(SYS_VBAR_EL1, HFGxTR, VBAR_EL1, 1),
- SR_FGT(SYS_TTBR1_EL1, HFGxTR, TTBR1_EL1, 1),
- SR_FGT(SYS_TTBR0_EL1, HFGxTR, TTBR0_EL1, 1),
- SR_FGT(SYS_TPIDR_EL0, HFGxTR, TPIDR_EL0, 1),
- SR_FGT(SYS_TPIDRRO_EL0, HFGxTR, TPIDRRO_EL0, 1),
- SR_FGT(SYS_TPIDR_EL1, HFGxTR, TPIDR_EL1, 1),
- SR_FGT(SYS_TCR_EL1, HFGxTR, TCR_EL1, 1),
- SR_FGT(SYS_TCR2_EL1, HFGxTR, TCR_EL1, 1),
- SR_FGT(SYS_SCXTNUM_EL0, HFGxTR, SCXTNUM_EL0, 1),
- SR_FGT(SYS_SCXTNUM_EL1, HFGxTR, SCXTNUM_EL1, 1),
- SR_FGT(SYS_SCTLR_EL1, HFGxTR, SCTLR_EL1, 1),
- SR_FGT(SYS_REVIDR_EL1, HFGxTR, REVIDR_EL1, 1),
- SR_FGT(SYS_PAR_EL1, HFGxTR, PAR_EL1, 1),
- SR_FGT(SYS_MPIDR_EL1, HFGxTR, MPIDR_EL1, 1),
- SR_FGT(SYS_MIDR_EL1, HFGxTR, MIDR_EL1, 1),
- SR_FGT(SYS_MAIR_EL1, HFGxTR, MAIR_EL1, 1),
- SR_FGT(SYS_LORSA_EL1, HFGxTR, LORSA_EL1, 1),
- SR_FGT(SYS_LORN_EL1, HFGxTR, LORN_EL1, 1),
- SR_FGT(SYS_LORID_EL1, HFGxTR, LORID_EL1, 1),
- SR_FGT(SYS_LOREA_EL1, HFGxTR, LOREA_EL1, 1),
- SR_FGT(SYS_LORC_EL1, HFGxTR, LORC_EL1, 1),
- SR_FGT(SYS_ISR_EL1, HFGxTR, ISR_EL1, 1),
- SR_FGT(SYS_FAR_EL1, HFGxTR, FAR_EL1, 1),
- SR_FGT(SYS_ESR_EL1, HFGxTR, ESR_EL1, 1),
- SR_FGT(SYS_DCZID_EL0, HFGxTR, DCZID_EL0, 1),
- SR_FGT(SYS_CTR_EL0, HFGxTR, CTR_EL0, 1),
- SR_FGT(SYS_CSSELR_EL1, HFGxTR, CSSELR_EL1, 1),
- SR_FGT(SYS_CPACR_EL1, HFGxTR, CPACR_EL1, 1),
- SR_FGT(SYS_CONTEXTIDR_EL1, HFGxTR, CONTEXTIDR_EL1, 1),
- SR_FGT(SYS_CLIDR_EL1, HFGxTR, CLIDR_EL1, 1),
- SR_FGT(SYS_CCSIDR_EL1, HFGxTR, CCSIDR_EL1, 1),
- SR_FGT(SYS_APIBKEYLO_EL1, HFGxTR, APIBKey, 1),
- SR_FGT(SYS_APIBKEYHI_EL1, HFGxTR, APIBKey, 1),
- SR_FGT(SYS_APIAKEYLO_EL1, HFGxTR, APIAKey, 1),
- SR_FGT(SYS_APIAKEYHI_EL1, HFGxTR, APIAKey, 1),
- SR_FGT(SYS_APGAKEYLO_EL1, HFGxTR, APGAKey, 1),
- SR_FGT(SYS_APGAKEYHI_EL1, HFGxTR, APGAKey, 1),
- SR_FGT(SYS_APDBKEYLO_EL1, HFGxTR, APDBKey, 1),
- SR_FGT(SYS_APDBKEYHI_EL1, HFGxTR, APDBKey, 1),
- SR_FGT(SYS_APDAKEYLO_EL1, HFGxTR, APDAKey, 1),
- SR_FGT(SYS_APDAKEYHI_EL1, HFGxTR, APDAKey, 1),
- SR_FGT(SYS_AMAIR_EL1, HFGxTR, AMAIR_EL1, 1),
- SR_FGT(SYS_AIDR_EL1, HFGxTR, AIDR_EL1, 1),
- SR_FGT(SYS_AFSR1_EL1, HFGxTR, AFSR1_EL1, 1),
- SR_FGT(SYS_AFSR0_EL1, HFGxTR, AFSR0_EL1, 1),
+ SR_FGT(SYS_AMAIR2_EL1, HFGRTR, nAMAIR2_EL1, 0),
+ SR_FGT(SYS_MAIR2_EL1, HFGRTR, nMAIR2_EL1, 0),
+ SR_FGT(SYS_S2POR_EL1, HFGRTR, nS2POR_EL1, 0),
+ SR_FGT(SYS_POR_EL1, HFGRTR, nPOR_EL1, 0),
+ SR_FGT(SYS_POR_EL0, HFGRTR, nPOR_EL0, 0),
+ SR_FGT(SYS_PIR_EL1, HFGRTR, nPIR_EL1, 0),
+ SR_FGT(SYS_PIRE0_EL1, HFGRTR, nPIRE0_EL1, 0),
+ SR_FGT(SYS_RCWMASK_EL1, HFGRTR, nRCWMASK_EL1, 0),
+ SR_FGT(SYS_TPIDR2_EL0, HFGRTR, nTPIDR2_EL0, 0),
+ SR_FGT(SYS_SMPRI_EL1, HFGRTR, nSMPRI_EL1, 0),
+ SR_FGT(SYS_GCSCR_EL1, HFGRTR, nGCS_EL1, 0),
+ SR_FGT(SYS_GCSPR_EL1, HFGRTR, nGCS_EL1, 0),
+ SR_FGT(SYS_GCSCRE0_EL1, HFGRTR, nGCS_EL0, 0),
+ SR_FGT(SYS_GCSPR_EL0, HFGRTR, nGCS_EL0, 0),
+ SR_FGT(SYS_ACCDATA_EL1, HFGRTR, nACCDATA_EL1, 0),
+ SR_FGT(SYS_ERXADDR_EL1, HFGRTR, ERXADDR_EL1, 1),
+ SR_FGT(SYS_ERXPFGCDN_EL1, HFGRTR, ERXPFGCDN_EL1, 1),
+ SR_FGT(SYS_ERXPFGCTL_EL1, HFGRTR, ERXPFGCTL_EL1, 1),
+ SR_FGT(SYS_ERXPFGF_EL1, HFGRTR, ERXPFGF_EL1, 1),
+ SR_FGT(SYS_ERXMISC0_EL1, HFGRTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXMISC1_EL1, HFGRTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXMISC2_EL1, HFGRTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXMISC3_EL1, HFGRTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXSTATUS_EL1, HFGRTR, ERXSTATUS_EL1, 1),
+ SR_FGT(SYS_ERXCTLR_EL1, HFGRTR, ERXCTLR_EL1, 1),
+ SR_FGT(SYS_ERXFR_EL1, HFGRTR, ERXFR_EL1, 1),
+ SR_FGT(SYS_ERRSELR_EL1, HFGRTR, ERRSELR_EL1, 1),
+ SR_FGT(SYS_ERRIDR_EL1, HFGRTR, ERRIDR_EL1, 1),
+ SR_FGT(SYS_ICC_IGRPEN0_EL1, HFGRTR, ICC_IGRPENn_EL1, 1),
+ SR_FGT(SYS_ICC_IGRPEN1_EL1, HFGRTR, ICC_IGRPENn_EL1, 1),
+ SR_FGT(SYS_VBAR_EL1, HFGRTR, VBAR_EL1, 1),
+ SR_FGT(SYS_TTBR1_EL1, HFGRTR, TTBR1_EL1, 1),
+ SR_FGT(SYS_TTBR0_EL1, HFGRTR, TTBR0_EL1, 1),
+ SR_FGT(SYS_TPIDR_EL0, HFGRTR, TPIDR_EL0, 1),
+ SR_FGT(SYS_TPIDRRO_EL0, HFGRTR, TPIDRRO_EL0, 1),
+ SR_FGT(SYS_TPIDR_EL1, HFGRTR, TPIDR_EL1, 1),
+ SR_FGT(SYS_TCR_EL1, HFGRTR, TCR_EL1, 1),
+ SR_FGT(SYS_TCR2_EL1, HFGRTR, TCR_EL1, 1),
+ SR_FGT(SYS_SCXTNUM_EL0, HFGRTR, SCXTNUM_EL0, 1),
+ SR_FGT(SYS_SCXTNUM_EL1, HFGRTR, SCXTNUM_EL1, 1),
+ SR_FGT(SYS_SCTLR_EL1, HFGRTR, SCTLR_EL1, 1),
+ SR_FGT(SYS_REVIDR_EL1, HFGRTR, REVIDR_EL1, 1),
+ SR_FGT(SYS_PAR_EL1, HFGRTR, PAR_EL1, 1),
+ SR_FGT(SYS_MPIDR_EL1, HFGRTR, MPIDR_EL1, 1),
+ SR_FGT(SYS_MIDR_EL1, HFGRTR, MIDR_EL1, 1),
+ SR_FGT(SYS_MAIR_EL1, HFGRTR, MAIR_EL1, 1),
+ SR_FGT(SYS_LORSA_EL1, HFGRTR, LORSA_EL1, 1),
+ SR_FGT(SYS_LORN_EL1, HFGRTR, LORN_EL1, 1),
+ SR_FGT(SYS_LORID_EL1, HFGRTR, LORID_EL1, 1),
+ SR_FGT(SYS_LOREA_EL1, HFGRTR, LOREA_EL1, 1),
+ SR_FGT(SYS_LORC_EL1, HFGRTR, LORC_EL1, 1),
+ SR_FGT(SYS_ISR_EL1, HFGRTR, ISR_EL1, 1),
+ SR_FGT(SYS_FAR_EL1, HFGRTR, FAR_EL1, 1),
+ SR_FGT(SYS_ESR_EL1, HFGRTR, ESR_EL1, 1),
+ SR_FGT(SYS_DCZID_EL0, HFGRTR, DCZID_EL0, 1),
+ SR_FGT(SYS_CTR_EL0, HFGRTR, CTR_EL0, 1),
+ SR_FGT(SYS_CSSELR_EL1, HFGRTR, CSSELR_EL1, 1),
+ SR_FGT(SYS_CPACR_EL1, HFGRTR, CPACR_EL1, 1),
+ SR_FGT(SYS_CONTEXTIDR_EL1, HFGRTR, CONTEXTIDR_EL1, 1),
+ SR_FGT(SYS_CLIDR_EL1, HFGRTR, CLIDR_EL1, 1),
+ SR_FGT(SYS_CCSIDR_EL1, HFGRTR, CCSIDR_EL1, 1),
+ SR_FGT(SYS_APIBKEYLO_EL1, HFGRTR, APIBKey, 1),
+ SR_FGT(SYS_APIBKEYHI_EL1, HFGRTR, APIBKey, 1),
+ SR_FGT(SYS_APIAKEYLO_EL1, HFGRTR, APIAKey, 1),
+ SR_FGT(SYS_APIAKEYHI_EL1, HFGRTR, APIAKey, 1),
+ SR_FGT(SYS_APGAKEYLO_EL1, HFGRTR, APGAKey, 1),
+ SR_FGT(SYS_APGAKEYHI_EL1, HFGRTR, APGAKey, 1),
+ SR_FGT(SYS_APDBKEYLO_EL1, HFGRTR, APDBKey, 1),
+ SR_FGT(SYS_APDBKEYHI_EL1, HFGRTR, APDBKey, 1),
+ SR_FGT(SYS_APDAKEYLO_EL1, HFGRTR, APDAKey, 1),
+ SR_FGT(SYS_APDAKEYHI_EL1, HFGRTR, APDAKey, 1),
+ SR_FGT(SYS_AMAIR_EL1, HFGRTR, AMAIR_EL1, 1),
+ SR_FGT(SYS_AIDR_EL1, HFGRTR, AIDR_EL1, 1),
+ SR_FGT(SYS_AFSR1_EL1, HFGRTR, AFSR1_EL1, 1),
+ SR_FGT(SYS_AFSR0_EL1, HFGRTR, AFSR0_EL1, 1),
+
+ /* HFGRTR2_EL2, HFGWTR2_EL2 */
+ SR_FGT(SYS_ACTLRALIAS_EL1, HFGRTR2, nACTLRALIAS_EL1, 0),
+ SR_FGT(SYS_ACTLRMASK_EL1, HFGRTR2, nACTLRMASK_EL1, 0),
+ SR_FGT(SYS_CPACRALIAS_EL1, HFGRTR2, nCPACRALIAS_EL1, 0),
+ SR_FGT(SYS_CPACRMASK_EL1, HFGRTR2, nCPACRMASK_EL1, 0),
+ SR_FGT(SYS_PFAR_EL1, HFGRTR2, nPFAR_EL1, 0),
+ SR_FGT(SYS_RCWSMASK_EL1, HFGRTR2, nRCWSMASK_EL1, 0),
+ SR_FGT(SYS_SCTLR2ALIAS_EL1, HFGRTR2, nSCTLRALIAS2_EL1, 0),
+ SR_FGT(SYS_SCTLR2MASK_EL1, HFGRTR2, nSCTLR2MASK_EL1, 0),
+ SR_FGT(SYS_SCTLRALIAS_EL1, HFGRTR2, nSCTLRALIAS_EL1, 0),
+ SR_FGT(SYS_SCTLRMASK_EL1, HFGRTR2, nSCTLRMASK_EL1, 0),
+ SR_FGT(SYS_TCR2ALIAS_EL1, HFGRTR2, nTCR2ALIAS_EL1, 0),
+ SR_FGT(SYS_TCR2MASK_EL1, HFGRTR2, nTCR2MASK_EL1, 0),
+ SR_FGT(SYS_TCRALIAS_EL1, HFGRTR2, nTCRALIAS_EL1, 0),
+ SR_FGT(SYS_TCRMASK_EL1, HFGRTR2, nTCRMASK_EL1, 0),
+ SR_FGT(SYS_ERXGSR_EL1, HFGRTR2, nERXGSR_EL1, 0),
+
/* HFGITR_EL2 */
SR_FGT(OP_AT_S1E1A, HFGITR, ATS1E1A, 1),
SR_FGT(OP_COSP_RCTX, HFGITR, COSPRCTX, 1),
@@ -1480,6 +1515,11 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
SR_FGT(SYS_IC_IVAU, HFGITR, ICIVAU, 1),
SR_FGT(SYS_IC_IALLU, HFGITR, ICIALLU, 1),
SR_FGT(SYS_IC_IALLUIS, HFGITR, ICIALLUIS, 1),
+
+ /* HFGITR2_EL2 */
+ SR_FGT(SYS_DC_CIGDVAPS, HFGITR2, nDCCIVAPS, 0),
+ SR_FGT(SYS_DC_CIVAPS, HFGITR2, nDCCIVAPS, 0),
+
/* HDFGRTR_EL2 */
SR_FGT(SYS_PMBIDR_EL1, HDFGRTR, PMBIDR_EL1, 1),
SR_FGT(SYS_PMSNEVFR_EL1, HDFGRTR, nPMSNEVFR_EL1, 0),
@@ -1789,68 +1829,12 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
SR_FGT(SYS_PMCNTENSET_EL0, HDFGRTR, PMCNTEN, 1),
SR_FGT(SYS_PMCCNTR_EL0, HDFGRTR, PMCCNTR_EL0, 1),
SR_FGT(SYS_PMCCFILTR_EL0, HDFGRTR, PMCCFILTR_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(0), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(1), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(2), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(3), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(4), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(5), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(6), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(7), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(8), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(9), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(10), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(11), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(12), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(13), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(14), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(15), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(16), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(17), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(18), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(19), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(20), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(21), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(22), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(23), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(24), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(25), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(26), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(27), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(28), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(29), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(30), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(0), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(1), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(2), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(3), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(4), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(5), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(6), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(7), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(8), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(9), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(10), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(11), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(12), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(13), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(14), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(15), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(16), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(17), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(18), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(19), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(20), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(21), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(22), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(23), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(24), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(25), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(26), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(27), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(28), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(29), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(30), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT_RANGE(SYS_PMEVTYPERn_EL0(0),
+ SYS_PMEVTYPERn_EL0(30),
+ HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT_RANGE(SYS_PMEVCNTRn_EL0(0),
+ SYS_PMEVCNTRn_EL0(30),
+ HDFGRTR, PMEVCNTRn_EL0, 1),
SR_FGT(SYS_OSDLR_EL1, HDFGRTR, OSDLR_EL1, 1),
SR_FGT(SYS_OSECCR_EL1, HDFGRTR, OSECCR_EL1, 1),
SR_FGT(SYS_OSLSR_EL1, HDFGRTR, OSLSR_EL1, 1),
@@ -1928,6 +1912,59 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
SR_FGT(SYS_DBGBCRn_EL1(13), HDFGRTR, DBGBCRn_EL1, 1),
SR_FGT(SYS_DBGBCRn_EL1(14), HDFGRTR, DBGBCRn_EL1, 1),
SR_FGT(SYS_DBGBCRn_EL1(15), HDFGRTR, DBGBCRn_EL1, 1),
+
+ /* HDFGRTR2_EL2 */
+ SR_FGT(SYS_MDSELR_EL1, HDFGRTR2, nMDSELR_EL1, 0),
+ SR_FGT(SYS_MDSTEPOP_EL1, HDFGRTR2, nMDSTEPOP_EL1, 0),
+ SR_FGT(SYS_PMCCNTSVR_EL1, HDFGRTR2, nPMSSDATA, 0),
+ SR_FGT_RANGE(SYS_PMEVCNTSVRn_EL1(0),
+ SYS_PMEVCNTSVRn_EL1(30),
+ HDFGRTR2, nPMSSDATA, 0),
+ SR_FGT(SYS_PMICNTSVR_EL1, HDFGRTR2, nPMSSDATA, 0),
+ SR_FGT(SYS_PMECR_EL1, HDFGRTR2, nPMECR_EL1, 0),
+ SR_FGT(SYS_PMIAR_EL1, HDFGRTR2, nPMIAR_EL1, 0),
+ SR_FGT(SYS_PMICFILTR_EL0, HDFGRTR2, nPMICFILTR_EL0, 0),
+ SR_FGT(SYS_PMICNTR_EL0, HDFGRTR2, nPMICNTR_EL0, 0),
+ SR_FGT(SYS_PMSSCR_EL1, HDFGRTR2, nPMSSCR_EL1, 0),
+ SR_FGT(SYS_PMUACR_EL1, HDFGRTR2, nPMUACR_EL1, 0),
+ SR_FGT(SYS_SPMACCESSR_EL1, HDFGRTR2, nSPMACCESSR_EL1, 0),
+ SR_FGT(SYS_SPMCFGR_EL1, HDFGRTR2, nSPMID, 0),
+ SR_FGT(SYS_SPMDEVARCH_EL1, HDFGRTR2, nSPMID, 0),
+ SR_FGT(SYS_SPMCGCRn_EL1(0), HDFGRTR2, nSPMID, 0),
+ SR_FGT(SYS_SPMCGCRn_EL1(1), HDFGRTR2, nSPMID, 0),
+ SR_FGT(SYS_SPMIIDR_EL1, HDFGRTR2, nSPMID, 0),
+ SR_FGT(SYS_SPMCNTENCLR_EL0, HDFGRTR2, nSPMCNTEN, 0),
+ SR_FGT(SYS_SPMCNTENSET_EL0, HDFGRTR2, nSPMCNTEN, 0),
+ SR_FGT(SYS_SPMCR_EL0, HDFGRTR2, nSPMCR_EL0, 0),
+ SR_FGT(SYS_SPMDEVAFF_EL1, HDFGRTR2, nSPMDEVAFF_EL1, 0),
+ /*
+ * We have up to 64 of these registers in ranges of 16, banked via
+ * SPMSELR_EL0.BANK. We're only concerned with the accessors here,
+ * not the architectural registers.
+ */
+ SR_FGT_RANGE(SYS_SPMEVCNTRn_EL0(0),
+ SYS_SPMEVCNTRn_EL0(15),
+ HDFGRTR2, nSPMEVCNTRn_EL0, 0),
+ SR_FGT_RANGE(SYS_SPMEVFILT2Rn_EL0(0),
+ SYS_SPMEVFILT2Rn_EL0(15),
+ HDFGRTR2, nSPMEVTYPERn_EL0, 0),
+ SR_FGT_RANGE(SYS_SPMEVFILTRn_EL0(0),
+ SYS_SPMEVFILTRn_EL0(15),
+ HDFGRTR2, nSPMEVTYPERn_EL0, 0),
+ SR_FGT_RANGE(SYS_SPMEVTYPERn_EL0(0),
+ SYS_SPMEVTYPERn_EL0(15),
+ HDFGRTR2, nSPMEVTYPERn_EL0, 0),
+ SR_FGT(SYS_SPMINTENCLR_EL1, HDFGRTR2, nSPMINTEN, 0),
+ SR_FGT(SYS_SPMINTENSET_EL1, HDFGRTR2, nSPMINTEN, 0),
+ SR_FGT(SYS_SPMOVSCLR_EL0, HDFGRTR2, nSPMOVS, 0),
+ SR_FGT(SYS_SPMOVSSET_EL0, HDFGRTR2, nSPMOVS, 0),
+ SR_FGT(SYS_SPMSCR_EL1, HDFGRTR2, nSPMSCR_EL1, 0),
+ SR_FGT(SYS_SPMSELR_EL0, HDFGRTR2, nSPMSELR_EL0, 0),
+ SR_FGT(SYS_TRCITECR_EL1, HDFGRTR2, nTRCITECR_EL1, 0),
+ SR_FGT(SYS_PMBMAR_EL1, HDFGRTR2, nPMBMAR_EL1, 0),
+ SR_FGT(SYS_PMSDSFR_EL1, HDFGRTR2, nPMSDSFR_EL1, 0),
+ SR_FGT(SYS_TRBMPAM_EL1, HDFGRTR2, nTRBMPAM_EL1, 0),
+
/*
* HDFGWTR_EL2
*
@@ -1938,12 +1975,19 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
* read-side mappings, and only the write-side mappings that
* differ from the read side, and the trap handler will pick
* the correct shadow register based on the access type.
+ *
+ * Same model applies to the FEAT_FGT2 registers.
*/
SR_FGT(SYS_TRFCR_EL1, HDFGWTR, TRFCR_EL1, 1),
SR_FGT(SYS_TRCOSLAR, HDFGWTR, TRCOSLAR, 1),
SR_FGT(SYS_PMCR_EL0, HDFGWTR, PMCR_EL0, 1),
SR_FGT(SYS_PMSWINC_EL0, HDFGWTR, PMSWINC_EL0, 1),
SR_FGT(SYS_OSLAR_EL1, HDFGWTR, OSLAR_EL1, 1),
+
+ /* HDFGWTR2_EL2 */
+ SR_FGT(SYS_PMZR_EL0, HDFGWTR2, nPMZR_EL0, 0),
+ SR_FGT(SYS_SPMZR_EL0, HDFGWTR2, nSPMEVCNTRn_EL0, 0),
+
/*
* HAFGRTR_EL2
*/
@@ -1989,6 +2033,20 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
SR_FGT(SYS_AMEVCNTR0_EL0(0), HAFGRTR, AMEVCNTR00_EL0, 1),
};
+/*
+ * Additional FGTs that do not fire with ESR_EL2.EC==0x18. This table
+ * isn't used for exception routing, but only as a promise that the
+ * trap is handled somewhere else.
+ */
+static const union trap_config non_0x18_fgt[] __initconst = {
+ FGT(HFGITR, PSBCSYNC, 1),
+ FGT(HFGITR, nGCSSTR_EL1, 0),
+ FGT(HFGITR, SVC_EL1, 1),
+ FGT(HFGITR, SVC_EL0, 1),
+ FGT(HFGITR, ERET, 1),
+ FGT(HFGITR2, TSBCSYNC, 1),
+};
+
static union trap_config get_trap_config(u32 sysreg)
{
return (union trap_config) {
@@ -2033,6 +2091,130 @@ static u32 encoding_next(u32 encoding)
return sys_reg(op0 + 1, 0, 0, 0, 0);
}
+#define FGT_MASKS(__n, __m) \
+ struct fgt_masks __n = { .str = #__m, .res0 = __m, }
+
+FGT_MASKS(hfgrtr_masks, HFGRTR_EL2_RES0);
+FGT_MASKS(hfgwtr_masks, HFGWTR_EL2_RES0);
+FGT_MASKS(hfgitr_masks, HFGITR_EL2_RES0);
+FGT_MASKS(hdfgrtr_masks, HDFGRTR_EL2_RES0);
+FGT_MASKS(hdfgwtr_masks, HDFGWTR_EL2_RES0);
+FGT_MASKS(hafgrtr_masks, HAFGRTR_EL2_RES0);
+FGT_MASKS(hfgrtr2_masks, HFGRTR2_EL2_RES0);
+FGT_MASKS(hfgwtr2_masks, HFGWTR2_EL2_RES0);
+FGT_MASKS(hfgitr2_masks, HFGITR2_EL2_RES0);
+FGT_MASKS(hdfgrtr2_masks, HDFGRTR2_EL2_RES0);
+FGT_MASKS(hdfgwtr2_masks, HDFGWTR2_EL2_RES0);
+
+static __init bool aggregate_fgt(union trap_config tc)
+{
+ struct fgt_masks *rmasks, *wmasks;
+
+ switch (tc.fgt) {
+ case HFGRTR_GROUP:
+ rmasks = &hfgrtr_masks;
+ wmasks = &hfgwtr_masks;
+ break;
+ case HDFGRTR_GROUP:
+ rmasks = &hdfgrtr_masks;
+ wmasks = &hdfgwtr_masks;
+ break;
+ case HAFGRTR_GROUP:
+ rmasks = &hafgrtr_masks;
+ wmasks = NULL;
+ break;
+ case HFGITR_GROUP:
+ rmasks = &hfgitr_masks;
+ wmasks = NULL;
+ break;
+ case HFGRTR2_GROUP:
+ rmasks = &hfgrtr2_masks;
+ wmasks = &hfgwtr2_masks;
+ break;
+ case HDFGRTR2_GROUP:
+ rmasks = &hdfgrtr2_masks;
+ wmasks = &hdfgwtr2_masks;
+ break;
+ case HFGITR2_GROUP:
+ rmasks = &hfgitr2_masks;
+ wmasks = NULL;
+ break;
+ }
+
+ /*
+ * A bit can be reserved in either the R or W register, but
+ * not both.
+ */
+ if ((BIT(tc.bit) & rmasks->res0) &&
+ (!wmasks || (BIT(tc.bit) & wmasks->res0)))
+ return false;
+
+ if (tc.pol)
+ rmasks->mask |= BIT(tc.bit) & ~rmasks->res0;
+ else
+ rmasks->nmask |= BIT(tc.bit) & ~rmasks->res0;
+
+ if (wmasks) {
+ if (tc.pol)
+ wmasks->mask |= BIT(tc.bit) & ~wmasks->res0;
+ else
+ wmasks->nmask |= BIT(tc.bit) & ~wmasks->res0;
+ }
+
+ return true;
+}
+
+static __init int check_fgt_masks(struct fgt_masks *masks)
+{
+ unsigned long duplicate = masks->mask & masks->nmask;
+ u64 res0 = masks->res0;
+ int ret = 0;
+
+ if (duplicate) {
+ int i;
+
+ for_each_set_bit(i, &duplicate, 64) {
+ kvm_err("%s[%d] bit has both polarities\n",
+ masks->str, i);
+ }
+
+ ret = -EINVAL;
+ }
+
+ masks->res0 = ~(masks->mask | masks->nmask);
+ if (masks->res0 != res0)
+ kvm_info("Implicit %s = %016llx, expecting %016llx\n",
+ masks->str, masks->res0, res0);
+
+ return ret;
+}
+
+static __init int check_all_fgt_masks(int ret)
+{
+ static struct fgt_masks * const masks[] __initconst = {
+ &hfgrtr_masks,
+ &hfgwtr_masks,
+ &hfgitr_masks,
+ &hdfgrtr_masks,
+ &hdfgwtr_masks,
+ &hafgrtr_masks,
+ &hfgrtr2_masks,
+ &hfgwtr2_masks,
+ &hfgitr2_masks,
+ &hdfgrtr2_masks,
+ &hdfgwtr2_masks,
+ };
+ int err = 0;
+
+ for (int i = 0; i < ARRAY_SIZE(masks); i++)
+ err |= check_fgt_masks(masks[i]);
+
+ return ret ?: err;
+}
+
+#define for_each_encoding_in(__x, __s, __e) \
+ for (u32 __x = __s; __x <= __e; __x = encoding_next(__x))
+
int __init populate_nv_trap_config(void)
{
int ret = 0;
@@ -2041,6 +2223,7 @@ int __init populate_nv_trap_config(void)
BUILD_BUG_ON(__NR_CGT_GROUP_IDS__ > BIT(TC_CGT_BITS));
BUILD_BUG_ON(__NR_FGT_GROUP_IDS__ > BIT(TC_FGT_BITS));
BUILD_BUG_ON(__NR_FG_FILTER_IDS__ > BIT(TC_FGF_BITS));
+ BUILD_BUG_ON(__HCRX_EL2_MASK & __HCRX_EL2_nMASK);
for (int i = 0; i < ARRAY_SIZE(encoding_to_cgt); i++) {
const struct encoding_to_trap_config *cgt = &encoding_to_cgt[i];
@@ -2051,7 +2234,7 @@ int __init populate_nv_trap_config(void)
ret = -EINVAL;
}
- for (u32 enc = cgt->encoding; enc <= cgt->end; enc = encoding_next(enc)) {
+ for_each_encoding_in(enc, cgt->encoding, cgt->end) {
prev = xa_store(&sr_forward_xa, enc,
xa_mk_value(cgt->tc.val), GFP_KERNEL);
if (prev && !xa_is_err(prev)) {
@@ -2066,6 +2249,10 @@ int __init populate_nv_trap_config(void)
}
}
+ if (__HCRX_EL2_RES0 != HCRX_EL2_RES0)
+ kvm_info("Sanitised HCR_EL2_RES0 = %016llx, expecting %016llx\n",
+ __HCRX_EL2_RES0, HCRX_EL2_RES0);
+
kvm_info("nv: %ld coarse grained trap handlers\n",
ARRAY_SIZE(encoding_to_cgt));
@@ -2082,23 +2269,39 @@ int __init populate_nv_trap_config(void)
print_nv_trap_error(fgt, "Invalid FGT", ret);
}
- tc = get_trap_config(fgt->encoding);
+ for_each_encoding_in(enc, fgt->encoding, fgt->end) {
+ tc = get_trap_config(enc);
- if (tc.fgt) {
- ret = -EINVAL;
- print_nv_trap_error(fgt, "Duplicate FGT", ret);
- }
+ if (tc.fgt) {
+ ret = -EINVAL;
+ print_nv_trap_error(fgt, "Duplicate FGT", ret);
+ }
+
+ tc.val |= fgt->tc.val;
+ prev = xa_store(&sr_forward_xa, enc,
+ xa_mk_value(tc.val), GFP_KERNEL);
+
+ if (xa_is_err(prev)) {
+ ret = xa_err(prev);
+ print_nv_trap_error(fgt, "Failed FGT insertion", ret);
+ }
- tc.val |= fgt->tc.val;
- prev = xa_store(&sr_forward_xa, fgt->encoding,
- xa_mk_value(tc.val), GFP_KERNEL);
+ if (!aggregate_fgt(tc)) {
+ ret = -EINVAL;
+ print_nv_trap_error(fgt, "FGT bit is reserved", ret);
+ }
+ }
+ }
- if (xa_is_err(prev)) {
- ret = xa_err(prev);
- print_nv_trap_error(fgt, "Failed FGT insertion", ret);
+ for (int i = 0; i < ARRAY_SIZE(non_0x18_fgt); i++) {
+ if (!aggregate_fgt(non_0x18_fgt[i])) {
+ ret = -EINVAL;
+ kvm_err("non_0x18_fgt[%d] is reserved\n", i);
}
}
+ ret = check_all_fgt_masks(ret);
+
kvm_info("nv: %ld fine grained trap handlers\n",
ARRAY_SIZE(encoding_to_fgt));
@@ -2215,11 +2418,11 @@ static u64 kvm_get_sysreg_res0(struct kvm *kvm, enum vcpu_sysreg sr)
return masks->mask[sr - __VNCR_START__].res0;
}
-static bool check_fgt_bit(struct kvm_vcpu *vcpu, bool is_read,
- u64 val, const union trap_config tc)
+static bool check_fgt_bit(struct kvm_vcpu *vcpu, enum vcpu_sysreg sr,
+ const union trap_config tc)
{
struct kvm *kvm = vcpu->kvm;
- enum vcpu_sysreg sr;
+ u64 val;
/*
* KVM doesn't know about any FGTs that apply to the host, and hopefully
@@ -2228,6 +2431,8 @@ static bool check_fgt_bit(struct kvm_vcpu *vcpu, bool is_read,
if (is_hyp_ctxt(vcpu))
return false;
+ val = __vcpu_sys_reg(vcpu, sr);
+
if (tc.pol)
return (val & BIT(tc.bit));
@@ -2242,38 +2447,17 @@ static bool check_fgt_bit(struct kvm_vcpu *vcpu, bool is_read,
if (val & BIT(tc.bit))
return false;
- switch ((enum fgt_group_id)tc.fgt) {
- case HFGxTR_GROUP:
- sr = is_read ? HFGRTR_EL2 : HFGWTR_EL2;
- break;
-
- case HDFGRTR_GROUP:
- sr = is_read ? HDFGRTR_EL2 : HDFGWTR_EL2;
- break;
-
- case HAFGRTR_GROUP:
- sr = HAFGRTR_EL2;
- break;
-
- case HFGITR_GROUP:
- sr = HFGITR_EL2;
- break;
-
- default:
- WARN_ONCE(1, "Unhandled FGT group");
- return false;
- }
-
return !(kvm_get_sysreg_res0(kvm, sr) & BIT(tc.bit));
}
bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
{
+ enum vcpu_sysreg fgtreg;
union trap_config tc;
enum trap_behaviour b;
bool is_read;
u32 sysreg;
- u64 esr, val;
+ u64 esr;
esr = kvm_vcpu_get_esr(vcpu);
sysreg = esr_sys64_to_sysreg(esr);
@@ -2319,26 +2503,20 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
case __NO_FGT_GROUP__:
break;
- case HFGxTR_GROUP:
- if (is_read)
- val = __vcpu_sys_reg(vcpu, HFGRTR_EL2);
- else
- val = __vcpu_sys_reg(vcpu, HFGWTR_EL2);
+ case HFGRTR_GROUP:
+ fgtreg = is_read ? HFGRTR_EL2 : HFGWTR_EL2;
break;
case HDFGRTR_GROUP:
- if (is_read)
- val = __vcpu_sys_reg(vcpu, HDFGRTR_EL2);
- else
- val = __vcpu_sys_reg(vcpu, HDFGWTR_EL2);
+ fgtreg = is_read ? HDFGRTR_EL2 : HDFGWTR_EL2;
break;
case HAFGRTR_GROUP:
- val = __vcpu_sys_reg(vcpu, HAFGRTR_EL2);
+ fgtreg = HAFGRTR_EL2;
break;
case HFGITR_GROUP:
- val = __vcpu_sys_reg(vcpu, HFGITR_EL2);
+ fgtreg = HFGITR_EL2;
switch (tc.fgf) {
u64 tmp;
@@ -2352,13 +2530,26 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
}
break;
- case __NR_FGT_GROUP_IDS__:
+ case HFGRTR2_GROUP:
+ fgtreg = is_read ? HFGRTR2_EL2 : HFGWTR2_EL2;
+ break;
+
+ case HDFGRTR2_GROUP:
+ fgtreg = is_read ? HDFGRTR2_EL2 : HDFGWTR2_EL2;
+ break;
+
+ case HFGITR2_GROUP:
+ fgtreg = HFGITR2_EL2;
+ break;
+
+ default:
/* Something is really wrong, bail out */
- WARN_ONCE(1, "__NR_FGT_GROUP_IDS__");
+ WARN_ONCE(1, "Bad FGT group (encoding %08x, config %016llx)\n",
+ sysreg, tc.val);
goto local;
}
- if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu, is_read, val, tc))
+ if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu, fgtreg, tc))
goto inject;
b = compute_trap_behaviour(vcpu, tc);
@@ -2471,13 +2662,6 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
{
u64 spsr, elr, esr;
- /*
- * Forward this trap to the virtual EL2 if the virtual
- * HCR_EL2.NV bit is set and this is coming from !EL2.
- */
- if (forward_hcr_traps(vcpu, HCR_NV))
- return;
-
spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2);
spsr = kvm_check_illegal_exception_return(vcpu, spsr);
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index b73dc26bc44b..453266c96481 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -10,6 +10,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <linux/ubsan.h>
#include <asm/esr.h>
#include <asm/exception.h>
@@ -298,6 +299,81 @@ static int handle_svc(struct kvm_vcpu *vcpu)
return 1;
}
+static int kvm_handle_gcs(struct kvm_vcpu *vcpu)
+{
+ /* We don't expect GCS, so treat it with contempt */
+ if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, GCS, IMP))
+ WARN_ON_ONCE(1);
+
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+static int handle_other(struct kvm_vcpu *vcpu)
+{
+ bool is_l2 = vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
+ u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+ u64 iss = ESR_ELx_ISS(esr);
+ struct kvm *kvm = vcpu->kvm;
+ bool allowed, fwd = false;
+
+ /*
+ * We only trap for two reasons:
+ *
+ * - the feature is disabled, and the only outcome is to
+ * generate an UNDEF.
+ *
+ * - the feature is enabled, but a NV guest wants to trap the
+ * feature used by its L2 guest. We forward the exception in
+ * this case.
+ *
+ * What we don't expect is to end-up here if the guest is
+ * expected be be able to directly use the feature, hence the
+ * WARN_ON below.
+ */
+ switch (iss) {
+ case ESR_ELx_ISS_OTHER_ST64BV:
+ allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
+ if (is_l2)
+ fwd = !(hcrx & HCRX_EL2_EnASR);
+ break;
+ case ESR_ELx_ISS_OTHER_ST64BV0:
+ allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
+ if (is_l2)
+ fwd = !(hcrx & HCRX_EL2_EnAS0);
+ break;
+ case ESR_ELx_ISS_OTHER_LDST64B:
+ allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
+ if (is_l2)
+ fwd = !(hcrx & HCRX_EL2_EnALS);
+ break;
+ case ESR_ELx_ISS_OTHER_TSBCSYNC:
+ allowed = kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, TRBE_V1P1);
+ if (is_l2)
+ fwd = (__vcpu_sys_reg(vcpu, HFGITR2_EL2) & HFGITR2_EL2_TSBCSYNC);
+ break;
+ case ESR_ELx_ISS_OTHER_PSBCSYNC:
+ allowed = kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P5);
+ if (is_l2)
+ fwd = (__vcpu_sys_reg(vcpu, HFGITR_EL2) & HFGITR_EL2_PSBCSYNC);
+ break;
+ default:
+ /* Clearly, we're missing something. */
+ WARN_ON_ONCE(1);
+ allowed = false;
+ }
+
+ WARN_ON_ONCE(allowed && !fwd);
+
+ if (allowed && fwd)
+ kvm_inject_nested_sync(vcpu, esr);
+ else
+ kvm_inject_undefined(vcpu);
+
+ return 1;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
@@ -307,6 +383,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
[ESR_ELx_EC_CP10_ID] = kvm_handle_cp10_id,
[ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
+ [ESR_ELx_EC_OTHER] = handle_other,
[ESR_ELx_EC_HVC32] = handle_hvc,
[ESR_ELx_EC_SMC32] = handle_smc,
[ESR_ELx_EC_HVC64] = handle_hvc,
@@ -317,6 +394,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_ERET] = kvm_handle_eret,
[ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
+ [ESR_ELx_EC_DABT_CUR] = kvm_handle_vncr_abort,
[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
[ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
@@ -324,6 +402,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
[ESR_ELx_EC_FP_ASIMD] = kvm_handle_fpasimd,
[ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
+ [ESR_ELx_EC_GCS] = kvm_handle_gcs,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
@@ -474,6 +553,11 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
print_nvhe_hyp_panic("BUG", panic_addr);
} else if (IS_ENABLED(CONFIG_CFI_CLANG) && esr_is_cfi_brk(esr)) {
kvm_nvhe_report_cfi_failure(panic_addr);
+ } else if (IS_ENABLED(CONFIG_UBSAN_KVM_EL2) &&
+ ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
+ esr_is_ubsan_brk(esr)) {
+ print_nvhe_hyp_panic(report_ubsan_failure(esr & UBSAN_BRK_MASK),
+ panic_addr);
} else {
print_nvhe_hyp_panic("panic", panic_addr);
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/fault.h b/arch/arm64/kvm/hyp/include/hyp/fault.h
index 17df94570f03..fc573fc767b0 100644
--- a/arch/arm64/kvm/hyp/include/hyp/fault.h
+++ b/arch/arm64/kvm/hyp/include/hyp/fault.h
@@ -12,6 +12,16 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
+static inline bool __fault_safe_to_translate(u64 esr)
+{
+ u64 fsc = esr & ESR_ELx_FSC;
+
+ if (esr_fsc_is_sea_ttw(esr) || esr_fsc_is_secc_ttw(esr))
+ return false;
+
+ return !(fsc == ESR_ELx_FSC_EXTABT && (esr & ESR_ELx_FnV));
+}
+
static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
{
int ret;
@@ -44,34 +54,50 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
return true;
}
-static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
+/*
+ * Checks for the conditions when HPFAR_EL2 is written, per ARM ARM R_FKLWR.
+ */
+static inline bool __hpfar_valid(u64 esr)
{
- u64 hpfar, far;
-
- far = read_sysreg_el2(SYS_FAR);
-
/*
- * The HPFAR can be invalid if the stage 2 fault did not
- * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
- * bit is clear) and one of the two following cases are true:
- * 1. The fault was due to a permission fault
- * 2. The processor carries errata 834220
+ * CPUs affected by ARM erratum #834220 may incorrectly report a
+ * stage-2 translation fault when a stage-1 permission fault occurs.
*
- * Therefore, for all non S1PTW faults where we either have a
- * permission fault or the errata workaround is enabled, we
- * resolve the IPA using the AT instruction.
+ * Re-walk the page tables to determine if a stage-1 fault actually
+ * occurred.
*/
- if (!(esr & ESR_ELx_S1PTW) &&
- (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
- esr_fsc_is_permission_fault(esr))) {
- if (!__translate_far_to_hpfar(far, &hpfar))
- return false;
- } else {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_834220) &&
+ esr_fsc_is_translation_fault(esr))
+ return false;
+
+ if (esr_fsc_is_translation_fault(esr) || esr_fsc_is_access_flag_fault(esr))
+ return true;
+
+ if ((esr & ESR_ELx_S1PTW) && esr_fsc_is_permission_fault(esr))
+ return true;
+
+ return esr_fsc_is_addr_sz_fault(esr);
+}
+
+static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
+{
+ u64 hpfar;
+
+ fault->far_el2 = read_sysreg_el2(SYS_FAR);
+ fault->hpfar_el2 = 0;
+
+ if (__hpfar_valid(esr))
hpfar = read_sysreg(hpfar_el2);
- }
+ else if (unlikely(!__fault_safe_to_translate(esr)))
+ return true;
+ else if (!__translate_far_to_hpfar(fault->far_el2, &hpfar))
+ return false;
- fault->far_el2 = far;
- fault->hpfar_el2 = hpfar;
+ /*
+ * Hijack HPFAR_EL2.NS (RES0 in Non-secure) to indicate a valid
+ * HPFAR value.
+ */
+ fault->hpfar_el2 = hpfar | HPFAR_EL2_NS;
return true;
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index b741ea6aefa5..bb9f2eecfb67 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -65,12 +65,56 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
}
}
+#define reg_to_fgt_masks(reg) \
+ ({ \
+ struct fgt_masks *m; \
+ switch(reg) { \
+ case HFGRTR_EL2: \
+ m = &hfgrtr_masks; \
+ break; \
+ case HFGWTR_EL2: \
+ m = &hfgwtr_masks; \
+ break; \
+ case HFGITR_EL2: \
+ m = &hfgitr_masks; \
+ break; \
+ case HDFGRTR_EL2: \
+ m = &hdfgrtr_masks; \
+ break; \
+ case HDFGWTR_EL2: \
+ m = &hdfgwtr_masks; \
+ break; \
+ case HAFGRTR_EL2: \
+ m = &hafgrtr_masks; \
+ break; \
+ case HFGRTR2_EL2: \
+ m = &hfgrtr2_masks; \
+ break; \
+ case HFGWTR2_EL2: \
+ m = &hfgwtr2_masks; \
+ break; \
+ case HFGITR2_EL2: \
+ m = &hfgitr2_masks; \
+ break; \
+ case HDFGRTR2_EL2: \
+ m = &hdfgrtr2_masks; \
+ break; \
+ case HDFGWTR2_EL2: \
+ m = &hdfgwtr2_masks; \
+ break; \
+ default: \
+ BUILD_BUG_ON(1); \
+ } \
+ \
+ m; \
+ })
+
#define compute_clr_set(vcpu, reg, clr, set) \
do { \
- u64 hfg; \
- hfg = __vcpu_sys_reg(vcpu, reg) & ~__ ## reg ## _RES0; \
- set |= hfg & __ ## reg ## _MASK; \
- clr |= ~hfg & __ ## reg ## _nMASK; \
+ u64 hfg = __vcpu_sys_reg(vcpu, reg); \
+ struct fgt_masks *m = reg_to_fgt_masks(reg); \
+ set |= hfg & m->mask; \
+ clr |= ~hfg & m->nmask; \
} while(0)
#define reg_to_fgt_group_id(reg) \
@@ -79,7 +123,7 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
switch(reg) { \
case HFGRTR_EL2: \
case HFGWTR_EL2: \
- id = HFGxTR_GROUP; \
+ id = HFGRTR_GROUP; \
break; \
case HFGITR_EL2: \
id = HFGITR_GROUP; \
@@ -91,6 +135,17 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
case HAFGRTR_EL2: \
id = HAFGRTR_GROUP; \
break; \
+ case HFGRTR2_EL2: \
+ case HFGWTR2_EL2: \
+ id = HFGRTR2_GROUP; \
+ break; \
+ case HFGITR2_EL2: \
+ id = HFGITR2_GROUP; \
+ break; \
+ case HDFGRTR2_EL2: \
+ case HDFGWTR2_EL2: \
+ id = HDFGRTR2_GROUP; \
+ break; \
default: \
BUILD_BUG_ON(1); \
} \
@@ -101,13 +156,16 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \
do { \
u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \
- set |= hfg & __ ## reg ## _MASK; \
- clr |= hfg & __ ## reg ## _nMASK; \
+ struct fgt_masks *m = reg_to_fgt_masks(reg); \
+ set |= hfg & m->mask; \
+ clr |= hfg & m->nmask; \
} while(0)
#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \
do { \
- u64 c = 0, s = 0; \
+ struct fgt_masks *m = reg_to_fgt_masks(reg); \
+ u64 c = clr, s = set; \
+ u64 val; \
\
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \
@@ -115,30 +173,15 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
\
compute_undef_clr_set(vcpu, kvm, reg, c, s); \
\
- s |= set; \
- c |= clr; \
- if (c || s) { \
- u64 val = __ ## reg ## _nMASK; \
- val |= s; \
- val &= ~c; \
- write_sysreg_s(val, SYS_ ## reg); \
- } \
+ val = m->nmask; \
+ val |= s; \
+ val &= ~c; \
+ write_sysreg_s(val, SYS_ ## reg); \
} while(0)
#define update_fgt_traps(hctxt, vcpu, kvm, reg) \
update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0)
-/*
- * Validate the fine grain trap masks.
- * Check that the masks do not overlap and that all bits are accounted for.
- */
-#define CHECK_FGT_MASKS(reg) \
- do { \
- BUILD_BUG_ON((__ ## reg ## _MASK) & (__ ## reg ## _nMASK)); \
- BUILD_BUG_ON(~((__ ## reg ## _RES0) ^ (__ ## reg ## _MASK) ^ \
- (__ ## reg ## _nMASK))); \
- } while(0)
-
static inline bool cpu_has_amu(void)
{
u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
@@ -152,56 +195,60 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
- CHECK_FGT_MASKS(HFGRTR_EL2);
- CHECK_FGT_MASKS(HFGWTR_EL2);
- CHECK_FGT_MASKS(HFGITR_EL2);
- CHECK_FGT_MASKS(HDFGRTR_EL2);
- CHECK_FGT_MASKS(HDFGWTR_EL2);
- CHECK_FGT_MASKS(HAFGRTR_EL2);
- CHECK_FGT_MASKS(HCRX_EL2);
-
if (!cpus_have_final_cap(ARM64_HAS_FGT))
return;
update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0,
cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ?
- HFGxTR_EL2_TCR_EL1_MASK : 0);
+ HFGWTR_EL2_TCR_EL1_MASK : 0);
update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
if (cpu_has_amu())
update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
+
+ if (!cpus_have_final_cap(ARM64_HAS_FGT2))
+ return;
+
+ update_fgt_traps(hctxt, vcpu, kvm, HFGRTR2_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HFGWTR2_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HFGITR2_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR2_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR2_EL2);
}
-#define __deactivate_fgt(htcxt, vcpu, kvm, reg) \
+#define __deactivate_fgt(htcxt, vcpu, reg) \
do { \
- if ((vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) || \
- kvm->arch.fgu[reg_to_fgt_group_id(reg)]) \
- write_sysreg_s(ctxt_sys_reg(hctxt, reg), \
- SYS_ ## reg); \
+ write_sysreg_s(ctxt_sys_reg(hctxt, reg), \
+ SYS_ ## reg); \
} while(0)
static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
- struct kvm *kvm = kern_hyp_va(vcpu->kvm);
if (!cpus_have_final_cap(ARM64_HAS_FGT))
return;
- __deactivate_fgt(hctxt, vcpu, kvm, HFGRTR_EL2);
- if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
- write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
- else
- __deactivate_fgt(hctxt, vcpu, kvm, HFGWTR_EL2);
- __deactivate_fgt(hctxt, vcpu, kvm, HFGITR_EL2);
- __deactivate_fgt(hctxt, vcpu, kvm, HDFGRTR_EL2);
- __deactivate_fgt(hctxt, vcpu, kvm, HDFGWTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, HFGRTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, HFGWTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, HFGITR_EL2);
+ __deactivate_fgt(hctxt, vcpu, HDFGRTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, HDFGWTR_EL2);
if (cpu_has_amu())
- __deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, HAFGRTR_EL2);
+
+ if (!cpus_have_final_cap(ARM64_HAS_FGT2))
+ return;
+
+ __deactivate_fgt(hctxt, vcpu, HFGRTR2_EL2);
+ __deactivate_fgt(hctxt, vcpu, HFGWTR2_EL2);
+ __deactivate_fgt(hctxt, vcpu, HFGITR2_EL2);
+ __deactivate_fgt(hctxt, vcpu, HDFGRTR2_EL2);
+ __deactivate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
}
static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu)
@@ -235,6 +282,8 @@ static inline void __deactivate_traps_mpam(void)
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
{
+ struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
+
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
@@ -245,11 +294,8 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
* EL1 instead of being trapped to EL2.
*/
if (system_supports_pmuv3()) {
- struct kvm_cpu_context *hctxt;
-
write_sysreg(0, pmselr_el0);
- hctxt = host_data_ptr(host_ctxt);
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
@@ -261,14 +307,12 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
u64 hcrx = vcpu->arch.hcrx_el2;
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
- u64 clr = 0, set = 0;
-
- compute_clr_set(vcpu, HCRX_EL2, clr, set);
-
- hcrx |= set;
- hcrx &= ~clr;
+ u64 val = __vcpu_sys_reg(vcpu, HCRX_EL2);
+ hcrx |= val & __HCRX_EL2_MASK;
+ hcrx &= ~(~val & __HCRX_EL2_nMASK);
}
+ ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2);
write_sysreg_s(hcrx, SYS_HCRX_EL2);
}
@@ -278,19 +322,18 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
{
+ struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
+
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
write_sysreg(0, hstr_el2);
if (system_supports_pmuv3()) {
- struct kvm_cpu_context *hctxt;
-
- hctxt = host_data_ptr(host_ctxt);
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
}
if (cpus_have_final_cap(ARM64_HAS_HCX))
- write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
+ write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2);
__deactivate_traps_hfgxtr(vcpu);
__deactivate_traps_mpam();
@@ -301,7 +344,7 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
hcr |= HCR_TVM;
- write_sysreg(hcr, hcr_el2);
+ write_sysreg_hcr(hcr);
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index ea0a704da9b8..5f9d56754e39 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -39,12 +39,12 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
-int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
+int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,
enum kvm_pgtable_prot prot);
-int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm);
+int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *hyp_vm);
int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot);
-int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm);
-int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm);
+int __pkvm_host_wrprotect_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *hyp_vm);
+int __pkvm_host_test_clear_young_guest(u64 gfn, u64 nr_pages, bool mkold, struct pkvm_hyp_vm *vm);
int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu);
bool addr_is_memory(phys_addr_t phys);
@@ -67,4 +67,10 @@ static __always_inline void __load_host_stage2(void)
else
write_sysreg(0, vttbr_el2);
}
+
+#ifdef CONFIG_NVHE_EL2_DEBUG
+void pkvm_ownership_selftest(void *base);
+#else
+static inline void pkvm_ownership_selftest(void *base) { }
+#endif
#endif /* __KVM_NVHE_MEM_PROTECT__ */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
index 34233d586060..dee1a406b0c2 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -8,23 +8,30 @@
#include <linux/types.h>
/*
- * Bits 0-1 are reserved to track the memory ownership state of each page:
- * 00: The page is owned exclusively by the page-table owner.
- * 01: The page is owned by the page-table owner, but is shared
- * with another entity.
- * 10: The page is shared with, but not owned by the page-table owner.
- * 11: Reserved for future use (lending).
+ * Bits 0-1 are used to encode the memory ownership state of each page from the
+ * point of view of a pKVM "component" (host, hyp, guest, ... see enum
+ * pkvm_component_id):
+ * 00: The page is owned and exclusively accessible by the component;
+ * 01: The page is owned and accessible by the component, but is also
+ * accessible by another component;
+ * 10: The page is accessible but not owned by the component;
+ * The storage of this state depends on the component: either in the
+ * hyp_vmemmap for the host and hyp states or in PTE software bits for guests.
*/
enum pkvm_page_state {
PKVM_PAGE_OWNED = 0ULL,
PKVM_PAGE_SHARED_OWNED = BIT(0),
PKVM_PAGE_SHARED_BORROWED = BIT(1),
- __PKVM_PAGE_RESERVED = BIT(0) | BIT(1),
- /* Meta-states which aren't encoded directly in the PTE's SW bits */
- PKVM_NOPAGE = BIT(2),
+ /*
+ * 'Meta-states' are not stored directly in PTE SW bits for guest
+ * states, but inferred from the context (e.g. invalid PTE entries).
+ * For the host and hyp, meta-states are stored directly in the
+ * struct hyp_page.
+ */
+ PKVM_NOPAGE = BIT(0) | BIT(1),
};
-#define PKVM_PAGE_META_STATES_MASK (~__PKVM_PAGE_RESERVED)
+#define PKVM_PAGE_STATE_MASK (BIT(0) | BIT(1))
#define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
@@ -44,8 +51,15 @@ struct hyp_page {
u16 refcount;
u8 order;
- /* Host (non-meta) state. Guarded by the host stage-2 lock. */
- enum pkvm_page_state host_state : 8;
+ /* Host state. Guarded by the host stage-2 lock. */
+ unsigned __host_state : 4;
+
+ /*
+ * Complement of the hyp state. Guarded by the hyp stage-1 lock. We use
+ * the complement so that the initial 0 in __hyp_state_comp (due to the
+ * entire vmemmap starting off zeroed) encodes PKVM_NOPAGE.
+ */
+ unsigned __hyp_state_comp : 4;
u32 host_share_guest_count;
};
@@ -82,6 +96,26 @@ static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys)
#define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page))
#define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool)
+static inline enum pkvm_page_state get_host_state(struct hyp_page *p)
+{
+ return p->__host_state;
+}
+
+static inline void set_host_state(struct hyp_page *p, enum pkvm_page_state state)
+{
+ p->__host_state = state;
+}
+
+static inline enum pkvm_page_state get_hyp_state(struct hyp_page *p)
+{
+ return p->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
+}
+
+static inline void set_hyp_state(struct hyp_page *p, enum pkvm_page_state state)
+{
+ p->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK;
+}
+
/*
* Refcounting for 'struct hyp_page'.
* hyp_pool::lock must be held if atomic access to the refcount is required.
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h
index 230e4f2527de..6e83ce35c2f2 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h
@@ -13,9 +13,11 @@
extern struct kvm_pgtable pkvm_pgtable;
extern hyp_spinlock_t pkvm_pgd_lock;
-int hyp_create_pcpu_fixmap(void);
+int hyp_create_fixmap(void);
void *hyp_fixmap_map(phys_addr_t phys);
void hyp_fixmap_unmap(void);
+void *hyp_fixblock_map(phys_addr_t phys, size_t *size);
+void hyp_fixblock_unmap(void);
int hyp_create_idmap(u32 hyp_va_bits);
int hyp_map_vectors(void);
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index b43426a493df..a76522d63c3e 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -99,3 +99,9 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS), $(KBUILD_CFLAG
# causes a build failure. Remove profile optimization flags.
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%, $(KBUILD_CFLAGS))
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
+
+ifeq ($(CONFIG_UBSAN_KVM_EL2),y)
+UBSAN_SANITIZE := y
+# Always use brk and not hooks
+ccflags-y += $(CFLAGS_UBSAN_TRAP)
+endif
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c
index e433dfab882a..3369dd0c4009 100644
--- a/arch/arm64/kvm/hyp/nvhe/ffa.c
+++ b/arch/arm64/kvm/hyp/nvhe/ffa.c
@@ -730,10 +730,10 @@ static void do_ffa_version(struct arm_smccc_res *res,
hyp_ffa_version = ffa_req_version;
}
- if (hyp_ffa_post_init())
+ if (hyp_ffa_post_init()) {
res->a0 = FFA_RET_NOT_SUPPORTED;
- else {
- has_version_negotiated = true;
+ } else {
+ smp_store_release(&has_version_negotiated, true);
res->a0 = hyp_ffa_version;
}
unlock:
@@ -809,7 +809,8 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
if (!is_ffa_call(func_id))
return false;
- if (!has_version_negotiated && func_id != FFA_VERSION) {
+ if (func_id != FFA_VERSION &&
+ !smp_load_acquire(&has_version_negotiated)) {
ffa_to_smccc_error(&res, FFA_RET_INVALID_PARAMETERS);
goto out_handled;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 58f0cb2298cc..eef15b374abb 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -124,7 +124,7 @@ SYM_FUNC_START(__hyp_do_panic)
/* Ensure host stage-2 is disabled */
mrs x0, hcr_el2
bic x0, x0, #HCR_VM
- msr hcr_el2, x0
+ msr_hcr_el2 x0
isb
tlbi vmalls12e1
dsb nsh
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index f8af11189572..aada42522e7b 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -100,7 +100,7 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init)
msr mair_el2, x1
ldr x1, [x0, #NVHE_INIT_HCR_EL2]
- msr hcr_el2, x1
+ msr_hcr_el2 x1
mov x2, #HCR_E2H
and x2, x1, x2
@@ -262,7 +262,7 @@ reset:
alternative_if ARM64_KVM_PROTECTED_MODE
mov_q x5, HCR_HOST_NVHE_FLAGS
- msr hcr_el2, x5
+ msr_hcr_el2 x5
alternative_else_nop_endif
/* Install stub vectors */
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 2c37680d954c..8e8848de4d47 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -123,10 +123,6 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
- hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
- /* Limit guest vector length to the maximum supported by the host. */
- hyp_vcpu->vcpu.arch.sve_max_vl = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl);
-
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE);
hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) &
@@ -249,7 +245,8 @@ static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, pfn, host_ctxt, 1);
DECLARE_REG(u64, gfn, host_ctxt, 2);
- DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
+ DECLARE_REG(u64, nr_pages, host_ctxt, 3);
+ DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 4);
struct pkvm_hyp_vcpu *hyp_vcpu;
int ret = -EINVAL;
@@ -264,7 +261,7 @@ static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt)
if (ret)
goto out;
- ret = __pkvm_host_share_guest(pfn, gfn, hyp_vcpu, prot);
+ ret = __pkvm_host_share_guest(pfn, gfn, nr_pages, hyp_vcpu, prot);
out:
cpu_reg(host_ctxt, 1) = ret;
}
@@ -273,6 +270,7 @@ static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
DECLARE_REG(u64, gfn, host_ctxt, 2);
+ DECLARE_REG(u64, nr_pages, host_ctxt, 3);
struct pkvm_hyp_vm *hyp_vm;
int ret = -EINVAL;
@@ -283,7 +281,7 @@ static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt)
if (!hyp_vm)
goto out;
- ret = __pkvm_host_unshare_guest(gfn, hyp_vm);
+ ret = __pkvm_host_unshare_guest(gfn, nr_pages, hyp_vm);
put_pkvm_hyp_vm(hyp_vm);
out:
cpu_reg(host_ctxt, 1) = ret;
@@ -312,6 +310,7 @@ static void handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context *host_ctxt
{
DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
DECLARE_REG(u64, gfn, host_ctxt, 2);
+ DECLARE_REG(u64, nr_pages, host_ctxt, 3);
struct pkvm_hyp_vm *hyp_vm;
int ret = -EINVAL;
@@ -322,7 +321,7 @@ static void handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context *host_ctxt
if (!hyp_vm)
goto out;
- ret = __pkvm_host_wrprotect_guest(gfn, hyp_vm);
+ ret = __pkvm_host_wrprotect_guest(gfn, nr_pages, hyp_vm);
put_pkvm_hyp_vm(hyp_vm);
out:
cpu_reg(host_ctxt, 1) = ret;
@@ -332,7 +331,8 @@ static void handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context *ho
{
DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
DECLARE_REG(u64, gfn, host_ctxt, 2);
- DECLARE_REG(bool, mkold, host_ctxt, 3);
+ DECLARE_REG(u64, nr_pages, host_ctxt, 3);
+ DECLARE_REG(bool, mkold, host_ctxt, 4);
struct pkvm_hyp_vm *hyp_vm;
int ret = -EINVAL;
@@ -343,7 +343,7 @@ static void handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context *ho
if (!hyp_vm)
goto out;
- ret = __pkvm_host_test_clear_young_guest(gfn, mkold, hyp_vm);
+ ret = __pkvm_host_test_clear_young_guest(gfn, nr_pages, mkold, hyp_vm);
put_pkvm_hyp_vm(hyp_vm);
out:
cpu_reg(host_ctxt, 1) = ret;
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
index f4562f417d3f..d724f6d69302 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
@@ -25,5 +25,7 @@ SECTIONS {
BEGIN_HYP_SECTION(.data..percpu)
PERCPU_INPUT(L1_CACHE_BYTES)
END_HYP_SECTION
+
HYP_SECTION(.bss)
+ HYP_SECTION(.data)
}
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index f34f11c720d7..95d7534c9679 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -60,6 +60,11 @@ static void hyp_unlock_component(void)
hyp_spin_unlock(&pkvm_pgd_lock);
}
+#define for_each_hyp_page(__p, __st, __sz) \
+ for (struct hyp_page *__p = hyp_phys_to_page(__st), \
+ *__e = __p + ((__sz) >> PAGE_SHIFT); \
+ __p < __e; __p++)
+
static void *host_s2_zalloc_pages_exact(size_t size)
{
void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
@@ -161,12 +166,6 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
return 0;
}
-static bool guest_stage2_force_pte_cb(u64 addr, u64 end,
- enum kvm_pgtable_prot prot)
-{
- return true;
-}
-
static void *guest_s2_zalloc_pages_exact(size_t size)
{
void *addr = hyp_alloc_pages(&current_vm->pool, get_order(size));
@@ -217,16 +216,42 @@ static void guest_s2_put_page(void *addr)
hyp_put_page(&current_vm->pool, addr);
}
+static void __apply_guest_page(void *va, size_t size,
+ void (*func)(void *addr, size_t size))
+{
+ size += va - PTR_ALIGN_DOWN(va, PAGE_SIZE);
+ va = PTR_ALIGN_DOWN(va, PAGE_SIZE);
+ size = PAGE_ALIGN(size);
+
+ while (size) {
+ size_t map_size = PAGE_SIZE;
+ void *map;
+
+ if (IS_ALIGNED((unsigned long)va, PMD_SIZE) && size >= PMD_SIZE)
+ map = hyp_fixblock_map(__hyp_pa(va), &map_size);
+ else
+ map = hyp_fixmap_map(__hyp_pa(va));
+
+ func(map, map_size);
+
+ if (map_size == PMD_SIZE)
+ hyp_fixblock_unmap();
+ else
+ hyp_fixmap_unmap();
+
+ size -= map_size;
+ va += map_size;
+ }
+}
+
static void clean_dcache_guest_page(void *va, size_t size)
{
- __clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
- hyp_fixmap_unmap();
+ __apply_guest_page(va, size, __clean_dcache_guest_page);
}
static void invalidate_icache_guest_page(void *va, size_t size)
{
- __invalidate_icache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
- hyp_fixmap_unmap();
+ __apply_guest_page(va, size, __invalidate_icache_guest_page);
}
int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
@@ -255,8 +280,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
};
guest_lock_component(vm);
- ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0,
- guest_stage2_force_pte_cb);
+ ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, NULL);
guest_unlock_component(vm);
if (ret)
return ret;
@@ -309,7 +333,7 @@ int __pkvm_prot_finalize(void)
*/
kvm_flush_dcache_to_poc(params, sizeof(*params));
- write_sysreg(params->hcr_el2, hcr_el2);
+ write_sysreg_hcr(params->hcr_el2);
__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
/*
@@ -467,7 +491,8 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
return -EAGAIN;
if (pte) {
- WARN_ON(addr_is_memory(addr) && hyp_phys_to_page(addr)->host_state != PKVM_NOPAGE);
+ WARN_ON(addr_is_memory(addr) &&
+ get_host_state(hyp_phys_to_page(addr)) != PKVM_NOPAGE);
return -EPERM;
}
@@ -493,17 +518,15 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
static void __host_update_page_state(phys_addr_t addr, u64 size, enum pkvm_page_state state)
{
- phys_addr_t end = addr + size;
-
- for (; addr < end; addr += PAGE_SIZE)
- hyp_phys_to_page(addr)->host_state = state;
+ for_each_hyp_page(page, addr, size)
+ set_host_state(page, state);
}
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
{
int ret;
- if (!addr_is_memory(addr))
+ if (!range_is_memory(addr, addr + size))
return -EPERM;
ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
@@ -578,7 +601,14 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
return;
}
- addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
+
+ /*
+ * Yikes, we couldn't resolve the fault IPA. This should reinject an
+ * abort into the host when we figure out how to do that.
+ */
+ BUG_ON(!(fault.hpfar_el2 & HPFAR_EL2_NS));
+ addr = FIELD_GET(HPFAR_EL2_FIPA, fault.hpfar_el2) << 12;
+
ret = host_stage2_idmap(addr);
BUG_ON(ret && ret != -EAGAIN);
}
@@ -611,16 +641,16 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
static int __host_check_page_state_range(u64 addr, u64 size,
enum pkvm_page_state state)
{
- u64 end = addr + size;
int ret;
- ret = check_range_allowed_memory(addr, end);
+ ret = check_range_allowed_memory(addr, addr + size);
if (ret)
return ret;
hyp_assert_lock_held(&host_mmu.lock);
- for (; addr < end; addr += PAGE_SIZE) {
- if (hyp_phys_to_page(addr)->host_state != state)
+
+ for_each_hyp_page(page, addr, size) {
+ if (get_host_state(page) != state)
return -EPERM;
}
@@ -630,7 +660,7 @@ static int __host_check_page_state_range(u64 addr, u64 size,
static int __host_set_page_state_range(u64 addr, u64 size,
enum pkvm_page_state state)
{
- if (hyp_phys_to_page(addr)->host_state == PKVM_NOPAGE) {
+ if (get_host_state(hyp_phys_to_page(addr)) == PKVM_NOPAGE) {
int ret = host_stage2_idmap_locked(addr, size, PKVM_HOST_MEM_PROT);
if (ret)
@@ -642,24 +672,20 @@ static int __host_set_page_state_range(u64 addr, u64 size,
return 0;
}
-static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
+static void __hyp_set_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
{
- if (!kvm_pte_valid(pte))
- return PKVM_NOPAGE;
-
- return pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
+ for_each_hyp_page(page, phys, size)
+ set_hyp_state(page, state);
}
-static int __hyp_check_page_state_range(u64 addr, u64 size,
- enum pkvm_page_state state)
+static int __hyp_check_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
{
- struct check_walk_data d = {
- .desired = state,
- .get_page_state = hyp_get_page_state,
- };
+ for_each_hyp_page(page, phys, size) {
+ if (get_hyp_state(page) != state)
+ return -EPERM;
+ }
- hyp_assert_lock_held(&pkvm_pgd_lock);
- return check_page_state_range(&pkvm_pgtable, addr, size, &d);
+ return 0;
}
static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
@@ -670,10 +696,9 @@ static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
}
-static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
+static int __guest_check_page_state_range(struct pkvm_hyp_vm *vm, u64 addr,
u64 size, enum pkvm_page_state state)
{
- struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
struct check_walk_data d = {
.desired = state,
.get_page_state = guest_get_page_state,
@@ -686,8 +711,6 @@ static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
int __pkvm_host_share_hyp(u64 pfn)
{
u64 phys = hyp_pfn_to_phys(pfn);
- void *virt = __hyp_va(phys);
- enum kvm_pgtable_prot prot;
u64 size = PAGE_SIZE;
int ret;
@@ -697,14 +720,11 @@ int __pkvm_host_share_hyp(u64 pfn)
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
- if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
- ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
- if (ret)
- goto unlock;
- }
+ ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
+ if (ret)
+ goto unlock;
- prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
- WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
+ __hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED));
unlock:
@@ -727,7 +747,7 @@ int __pkvm_host_unshare_hyp(u64 pfn)
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
if (ret)
goto unlock;
- ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_SHARED_BORROWED);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
if (ret)
goto unlock;
if (hyp_page_count((void *)virt)) {
@@ -735,7 +755,7 @@ int __pkvm_host_unshare_hyp(u64 pfn)
goto unlock;
}
- WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
+ __hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED));
unlock:
@@ -750,7 +770,6 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
u64 phys = hyp_pfn_to_phys(pfn);
u64 size = PAGE_SIZE * nr_pages;
void *virt = __hyp_va(phys);
- enum kvm_pgtable_prot prot;
int ret;
host_lock_component();
@@ -759,14 +778,12 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
- if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
- ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
- if (ret)
- goto unlock;
- }
+ ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
+ if (ret)
+ goto unlock;
- prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
- WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
+ __hyp_set_page_state_range(phys, size, PKVM_PAGE_OWNED);
+ WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP));
unlock:
@@ -786,15 +803,14 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
host_lock_component();
hyp_lock_component();
- ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_OWNED);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
+ if (ret)
+ goto unlock;
+ ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE);
if (ret)
goto unlock;
- if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
- ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE);
- if (ret)
- goto unlock;
- }
+ __hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST));
@@ -809,24 +825,30 @@ int hyp_pin_shared_mem(void *from, void *to)
{
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
u64 end = PAGE_ALIGN((u64)to);
+ u64 phys = __hyp_pa(start);
u64 size = end - start;
+ struct hyp_page *p;
int ret;
host_lock_component();
hyp_lock_component();
- ret = __host_check_page_state_range(__hyp_pa(start), size,
- PKVM_PAGE_SHARED_OWNED);
+ ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
if (ret)
goto unlock;
- ret = __hyp_check_page_state_range(start, size,
- PKVM_PAGE_SHARED_BORROWED);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
if (ret)
goto unlock;
- for (cur = start; cur < end; cur += PAGE_SIZE)
- hyp_page_ref_inc(hyp_virt_to_page(cur));
+ for (cur = start; cur < end; cur += PAGE_SIZE) {
+ p = hyp_virt_to_page(cur);
+ hyp_page_ref_inc(p);
+ if (p->refcount == 1)
+ WARN_ON(pkvm_create_mappings_locked((void *)cur,
+ (void *)cur + PAGE_SIZE,
+ PAGE_HYP));
+ }
unlock:
hyp_unlock_component();
@@ -839,12 +861,17 @@ void hyp_unpin_shared_mem(void *from, void *to)
{
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
u64 end = PAGE_ALIGN((u64)to);
+ struct hyp_page *p;
host_lock_component();
hyp_lock_component();
- for (cur = start; cur < end; cur += PAGE_SIZE)
- hyp_page_ref_dec(hyp_virt_to_page(cur));
+ for (cur = start; cur < end; cur += PAGE_SIZE) {
+ p = hyp_virt_to_page(cur);
+ if (p->refcount == 1)
+ WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, cur, PAGE_SIZE) != PAGE_SIZE);
+ hyp_page_ref_dec(p);
+ }
hyp_unlock_component();
host_unlock_component();
@@ -880,49 +907,84 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
return ret;
}
-int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
+static int __guest_check_transition_size(u64 phys, u64 ipa, u64 nr_pages, u64 *size)
+{
+ size_t block_size;
+
+ if (nr_pages == 1) {
+ *size = PAGE_SIZE;
+ return 0;
+ }
+
+ /* We solely support second to last level huge mapping */
+ block_size = kvm_granule_size(KVM_PGTABLE_LAST_LEVEL - 1);
+
+ if (nr_pages != block_size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(phys | ipa, block_size))
+ return -EINVAL;
+
+ *size = block_size;
+ return 0;
+}
+
+int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,
enum kvm_pgtable_prot prot)
{
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
u64 phys = hyp_pfn_to_phys(pfn);
u64 ipa = hyp_pfn_to_phys(gfn);
- struct hyp_page *page;
+ u64 size;
int ret;
if (prot & ~KVM_PGTABLE_PROT_RWX)
return -EINVAL;
- ret = check_range_allowed_memory(phys, phys + PAGE_SIZE);
+ ret = __guest_check_transition_size(phys, ipa, nr_pages, &size);
+ if (ret)
+ return ret;
+
+ ret = check_range_allowed_memory(phys, phys + size);
if (ret)
return ret;
host_lock_component();
guest_lock_component(vm);
- ret = __guest_check_page_state_range(vcpu, ipa, PAGE_SIZE, PKVM_NOPAGE);
+ ret = __guest_check_page_state_range(vm, ipa, size, PKVM_NOPAGE);
if (ret)
goto unlock;
- page = hyp_phys_to_page(phys);
- switch (page->host_state) {
- case PKVM_PAGE_OWNED:
- WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_OWNED));
- break;
- case PKVM_PAGE_SHARED_OWNED:
- if (page->host_share_guest_count)
- break;
- /* Only host to np-guest multi-sharing is tolerated */
- WARN_ON(1);
- fallthrough;
- default:
- ret = -EPERM;
- goto unlock;
+ for_each_hyp_page(page, phys, size) {
+ switch (get_host_state(page)) {
+ case PKVM_PAGE_OWNED:
+ continue;
+ case PKVM_PAGE_SHARED_OWNED:
+ if (page->host_share_guest_count == U32_MAX) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /* Only host to np-guest multi-sharing is tolerated */
+ if (page->host_share_guest_count)
+ continue;
+
+ fallthrough;
+ default:
+ ret = -EPERM;
+ goto unlock;
+ }
+ }
+
+ for_each_hyp_page(page, phys, size) {
+ set_host_state(page, PKVM_PAGE_SHARED_OWNED);
+ page->host_share_guest_count++;
}
- WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys,
+ WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, size, phys,
pkvm_mkstate(prot, PKVM_PAGE_SHARED_BORROWED),
&vcpu->vcpu.arch.pkvm_memcache, 0));
- page->host_share_guest_count++;
unlock:
guest_unlock_component(vm);
@@ -931,10 +993,9 @@ unlock:
return ret;
}
-static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa)
+static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa, u64 size)
{
enum pkvm_page_state state;
- struct hyp_page *page;
kvm_pte_t pte;
u64 phys;
s8 level;
@@ -945,7 +1006,7 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip
return ret;
if (!kvm_pte_valid(pte))
return -ENOENT;
- if (level != KVM_PGTABLE_LAST_LEVEL)
+ if (kvm_granule_size(level) != size)
return -E2BIG;
state = guest_get_page_state(pte, ipa);
@@ -953,43 +1014,49 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip
return -EPERM;
phys = kvm_pte_to_phys(pte);
- ret = check_range_allowed_memory(phys, phys + PAGE_SIZE);
+ ret = check_range_allowed_memory(phys, phys + size);
if (WARN_ON(ret))
return ret;
- page = hyp_phys_to_page(phys);
- if (page->host_state != PKVM_PAGE_SHARED_OWNED)
- return -EPERM;
- if (WARN_ON(!page->host_share_guest_count))
- return -EINVAL;
+ for_each_hyp_page(page, phys, size) {
+ if (get_host_state(page) != PKVM_PAGE_SHARED_OWNED)
+ return -EPERM;
+ if (WARN_ON(!page->host_share_guest_count))
+ return -EINVAL;
+ }
*__phys = phys;
return 0;
}
-int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *vm)
+int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm)
{
u64 ipa = hyp_pfn_to_phys(gfn);
- struct hyp_page *page;
- u64 phys;
+ u64 size, phys;
int ret;
+ ret = __guest_check_transition_size(0, ipa, nr_pages, &size);
+ if (ret)
+ return ret;
+
host_lock_component();
guest_lock_component(vm);
- ret = __check_host_shared_guest(vm, &phys, ipa);
+ ret = __check_host_shared_guest(vm, &phys, ipa, size);
if (ret)
goto unlock;
- ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE);
+ ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, size);
if (ret)
goto unlock;
- page = hyp_phys_to_page(phys);
- page->host_share_guest_count--;
- if (!page->host_share_guest_count)
- WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_OWNED));
+ for_each_hyp_page(page, phys, size) {
+ /* __check_host_shared_guest() protects against underflow */
+ page->host_share_guest_count--;
+ if (!page->host_share_guest_count)
+ set_host_state(page, PKVM_PAGE_OWNED);
+ }
unlock:
guest_unlock_component(vm);
@@ -998,7 +1065,7 @@ unlock:
return ret;
}
-static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa)
+static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa, u64 size)
{
u64 phys;
int ret;
@@ -1009,7 +1076,7 @@ static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa)
host_lock_component();
guest_lock_component(vm);
- ret = __check_host_shared_guest(vm, &phys, ipa);
+ ret = __check_host_shared_guest(vm, &phys, ipa, size);
guest_unlock_component(vm);
host_unlock_component();
@@ -1029,7 +1096,7 @@ int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_
if (prot & ~KVM_PGTABLE_PROT_RWX)
return -EINVAL;
- assert_host_shared_guest(vm, ipa);
+ assert_host_shared_guest(vm, ipa, PAGE_SIZE);
guest_lock_component(vm);
ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0);
guest_unlock_component(vm);
@@ -1037,33 +1104,41 @@ int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_
return ret;
}
-int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *vm)
+int __pkvm_host_wrprotect_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm)
{
- u64 ipa = hyp_pfn_to_phys(gfn);
+ u64 size, ipa = hyp_pfn_to_phys(gfn);
int ret;
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
- assert_host_shared_guest(vm, ipa);
+ ret = __guest_check_transition_size(0, ipa, nr_pages, &size);
+ if (ret)
+ return ret;
+
+ assert_host_shared_guest(vm, ipa, size);
guest_lock_component(vm);
- ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, PAGE_SIZE);
+ ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, size);
guest_unlock_component(vm);
return ret;
}
-int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm)
+int __pkvm_host_test_clear_young_guest(u64 gfn, u64 nr_pages, bool mkold, struct pkvm_hyp_vm *vm)
{
- u64 ipa = hyp_pfn_to_phys(gfn);
+ u64 size, ipa = hyp_pfn_to_phys(gfn);
int ret;
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
- assert_host_shared_guest(vm, ipa);
+ ret = __guest_check_transition_size(0, ipa, nr_pages, &size);
+ if (ret)
+ return ret;
+
+ assert_host_shared_guest(vm, ipa, size);
guest_lock_component(vm);
- ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, PAGE_SIZE, mkold);
+ ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, size, mkold);
guest_unlock_component(vm);
return ret;
@@ -1077,10 +1152,210 @@ int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu)
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
- assert_host_shared_guest(vm, ipa);
+ assert_host_shared_guest(vm, ipa, PAGE_SIZE);
guest_lock_component(vm);
kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0);
guest_unlock_component(vm);
return 0;
}
+
+#ifdef CONFIG_NVHE_EL2_DEBUG
+struct pkvm_expected_state {
+ enum pkvm_page_state host;
+ enum pkvm_page_state hyp;
+ enum pkvm_page_state guest[2]; /* [ gfn, gfn + 1 ] */
+};
+
+static struct pkvm_expected_state selftest_state;
+static struct hyp_page *selftest_page;
+
+static struct pkvm_hyp_vm selftest_vm = {
+ .kvm = {
+ .arch = {
+ .mmu = {
+ .arch = &selftest_vm.kvm.arch,
+ .pgt = &selftest_vm.pgt,
+ },
+ },
+ },
+};
+
+static struct pkvm_hyp_vcpu selftest_vcpu = {
+ .vcpu = {
+ .arch = {
+ .hw_mmu = &selftest_vm.kvm.arch.mmu,
+ },
+ .kvm = &selftest_vm.kvm,
+ },
+};
+
+static void init_selftest_vm(void *virt)
+{
+ struct hyp_page *p = hyp_virt_to_page(virt);
+ int i;
+
+ selftest_vm.kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
+ WARN_ON(kvm_guest_prepare_stage2(&selftest_vm, virt));
+
+ for (i = 0; i < pkvm_selftest_pages(); i++) {
+ if (p[i].refcount)
+ continue;
+ p[i].refcount = 1;
+ hyp_put_page(&selftest_vm.pool, hyp_page_to_virt(&p[i]));
+ }
+}
+
+static u64 selftest_ipa(void)
+{
+ return BIT(selftest_vm.pgt.ia_bits - 1);
+}
+
+static void assert_page_state(void)
+{
+ void *virt = hyp_page_to_virt(selftest_page);
+ u64 size = PAGE_SIZE << selftest_page->order;
+ struct pkvm_hyp_vcpu *vcpu = &selftest_vcpu;
+ u64 phys = hyp_virt_to_phys(virt);
+ u64 ipa[2] = { selftest_ipa(), selftest_ipa() + PAGE_SIZE };
+ struct pkvm_hyp_vm *vm;
+
+ vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
+
+ host_lock_component();
+ WARN_ON(__host_check_page_state_range(phys, size, selftest_state.host));
+ host_unlock_component();
+
+ hyp_lock_component();
+ WARN_ON(__hyp_check_page_state_range(phys, size, selftest_state.hyp));
+ hyp_unlock_component();
+
+ guest_lock_component(&selftest_vm);
+ WARN_ON(__guest_check_page_state_range(vm, ipa[0], size, selftest_state.guest[0]));
+ WARN_ON(__guest_check_page_state_range(vm, ipa[1], size, selftest_state.guest[1]));
+ guest_unlock_component(&selftest_vm);
+}
+
+#define assert_transition_res(res, fn, ...) \
+ do { \
+ WARN_ON(fn(__VA_ARGS__) != res); \
+ assert_page_state(); \
+ } while (0)
+
+void pkvm_ownership_selftest(void *base)
+{
+ enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_RWX;
+ void *virt = hyp_alloc_pages(&host_s2_pool, 0);
+ struct pkvm_hyp_vcpu *vcpu = &selftest_vcpu;
+ struct pkvm_hyp_vm *vm = &selftest_vm;
+ u64 phys, size, pfn, gfn;
+
+ WARN_ON(!virt);
+ selftest_page = hyp_virt_to_page(virt);
+ selftest_page->refcount = 0;
+ init_selftest_vm(base);
+
+ size = PAGE_SIZE << selftest_page->order;
+ phys = hyp_virt_to_phys(virt);
+ pfn = hyp_phys_to_pfn(phys);
+ gfn = hyp_phys_to_pfn(selftest_ipa());
+
+ selftest_state.host = PKVM_NOPAGE;
+ selftest_state.hyp = PKVM_PAGE_OWNED;
+ selftest_state.guest[0] = selftest_state.guest[1] = PKVM_NOPAGE;
+ assert_page_state();
+ assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1);
+ assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size);
+ assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
+ assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
+
+ selftest_state.host = PKVM_PAGE_OWNED;
+ selftest_state.hyp = PKVM_NOPAGE;
+ assert_transition_res(0, __pkvm_hyp_donate_host, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1);
+ assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
+ assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size);
+
+ selftest_state.host = PKVM_PAGE_SHARED_OWNED;
+ selftest_state.hyp = PKVM_PAGE_SHARED_BORROWED;
+ assert_transition_res(0, __pkvm_host_share_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
+ assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
+
+ assert_transition_res(0, hyp_pin_shared_mem, virt, virt + size);
+ assert_transition_res(0, hyp_pin_shared_mem, virt, virt + size);
+ hyp_unpin_shared_mem(virt, virt + size);
+ WARN_ON(hyp_page_count(virt) != 1);
+ assert_transition_res(-EBUSY, __pkvm_host_unshare_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
+ assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
+
+ hyp_unpin_shared_mem(virt, virt + size);
+ assert_page_state();
+ WARN_ON(hyp_page_count(virt));
+
+ selftest_state.host = PKVM_PAGE_OWNED;
+ selftest_state.hyp = PKVM_NOPAGE;
+ assert_transition_res(0, __pkvm_host_unshare_hyp, pfn);
+
+ selftest_state.host = PKVM_PAGE_SHARED_OWNED;
+ selftest_state.hyp = PKVM_NOPAGE;
+ assert_transition_res(0, __pkvm_host_share_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
+ assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
+ assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size);
+
+ selftest_state.host = PKVM_PAGE_OWNED;
+ selftest_state.hyp = PKVM_NOPAGE;
+ assert_transition_res(0, __pkvm_host_unshare_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1);
+
+ selftest_state.host = PKVM_PAGE_SHARED_OWNED;
+ selftest_state.guest[0] = PKVM_PAGE_SHARED_BORROWED;
+ assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
+ assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
+ assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
+ assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size);
+
+ selftest_state.guest[1] = PKVM_PAGE_SHARED_BORROWED;
+ assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn + 1, 1, vcpu, prot);
+ WARN_ON(hyp_virt_to_page(virt)->host_share_guest_count != 2);
+
+ selftest_state.guest[0] = PKVM_NOPAGE;
+ assert_transition_res(0, __pkvm_host_unshare_guest, gfn, 1, vm);
+
+ selftest_state.guest[1] = PKVM_NOPAGE;
+ selftest_state.host = PKVM_PAGE_OWNED;
+ assert_transition_res(0, __pkvm_host_unshare_guest, gfn + 1, 1, vm);
+
+ selftest_state.host = PKVM_NOPAGE;
+ selftest_state.hyp = PKVM_PAGE_OWNED;
+ assert_transition_res(0, __pkvm_host_donate_hyp, pfn, 1);
+
+ selftest_page->refcount = 1;
+ hyp_put_page(&host_s2_pool, virt);
+}
+#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index f41c7440b34b..ae8391baebc3 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -229,9 +229,8 @@ int hyp_map_vectors(void)
return 0;
}
-void *hyp_fixmap_map(phys_addr_t phys)
+static void *fixmap_map_slot(struct hyp_fixmap_slot *slot, phys_addr_t phys)
{
- struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots);
kvm_pte_t pte, *ptep = slot->ptep;
pte = *ptep;
@@ -243,10 +242,21 @@ void *hyp_fixmap_map(phys_addr_t phys)
return (void *)slot->addr;
}
+void *hyp_fixmap_map(phys_addr_t phys)
+{
+ return fixmap_map_slot(this_cpu_ptr(&fixmap_slots), phys);
+}
+
static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
{
kvm_pte_t *ptep = slot->ptep;
u64 addr = slot->addr;
+ u32 level;
+
+ if (FIELD_GET(KVM_PTE_TYPE, *ptep) == KVM_PTE_TYPE_PAGE)
+ level = KVM_PGTABLE_LAST_LEVEL;
+ else
+ level = KVM_PGTABLE_LAST_LEVEL - 1; /* create_fixblock() guarantees PMD level */
WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID);
@@ -260,7 +270,7 @@ static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
* https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
*/
dsb(ishst);
- __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), KVM_PGTABLE_LAST_LEVEL);
+ __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
dsb(ish);
isb();
}
@@ -273,9 +283,9 @@ void hyp_fixmap_unmap(void)
static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx,
enum kvm_pgtable_walk_flags visit)
{
- struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)ctx->arg);
+ struct hyp_fixmap_slot *slot = (struct hyp_fixmap_slot *)ctx->arg;
- if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_LAST_LEVEL)
+ if (!kvm_pte_valid(ctx->old) || (ctx->end - ctx->start) != kvm_granule_size(ctx->level))
return -EINVAL;
slot->addr = ctx->addr;
@@ -296,13 +306,84 @@ static int create_fixmap_slot(u64 addr, u64 cpu)
struct kvm_pgtable_walker walker = {
.cb = __create_fixmap_slot_cb,
.flags = KVM_PGTABLE_WALK_LEAF,
- .arg = (void *)cpu,
+ .arg = per_cpu_ptr(&fixmap_slots, cpu),
};
return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
}
-int hyp_create_pcpu_fixmap(void)
+#if PAGE_SHIFT < 16
+#define HAS_FIXBLOCK
+static struct hyp_fixmap_slot hyp_fixblock_slot;
+static DEFINE_HYP_SPINLOCK(hyp_fixblock_lock);
+#endif
+
+static int create_fixblock(void)
+{
+#ifdef HAS_FIXBLOCK
+ struct kvm_pgtable_walker walker = {
+ .cb = __create_fixmap_slot_cb,
+ .flags = KVM_PGTABLE_WALK_LEAF,
+ .arg = &hyp_fixblock_slot,
+ };
+ unsigned long addr;
+ phys_addr_t phys;
+ int ret, i;
+
+ /* Find a RAM phys address, PMD aligned */
+ for (i = 0; i < hyp_memblock_nr; i++) {
+ phys = ALIGN(hyp_memory[i].base, PMD_SIZE);
+ if (phys + PMD_SIZE < (hyp_memory[i].base + hyp_memory[i].size))
+ break;
+ }
+
+ if (i >= hyp_memblock_nr)
+ return -EINVAL;
+
+ hyp_spin_lock(&pkvm_pgd_lock);
+ addr = ALIGN(__io_map_base, PMD_SIZE);
+ ret = __pkvm_alloc_private_va_range(addr, PMD_SIZE);
+ if (ret)
+ goto unlock;
+
+ ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PMD_SIZE, phys, PAGE_HYP);
+ if (ret)
+ goto unlock;
+
+ ret = kvm_pgtable_walk(&pkvm_pgtable, addr, PMD_SIZE, &walker);
+
+unlock:
+ hyp_spin_unlock(&pkvm_pgd_lock);
+
+ return ret;
+#else
+ return 0;
+#endif
+}
+
+void *hyp_fixblock_map(phys_addr_t phys, size_t *size)
+{
+#ifdef HAS_FIXBLOCK
+ *size = PMD_SIZE;
+ hyp_spin_lock(&hyp_fixblock_lock);
+ return fixmap_map_slot(&hyp_fixblock_slot, phys);
+#else
+ *size = PAGE_SIZE;
+ return hyp_fixmap_map(phys);
+#endif
+}
+
+void hyp_fixblock_unmap(void)
+{
+#ifdef HAS_FIXBLOCK
+ fixmap_clear_slot(&hyp_fixblock_slot);
+ hyp_spin_unlock(&hyp_fixblock_lock);
+#else
+ hyp_fixmap_unmap();
+#endif
+}
+
+int hyp_create_fixmap(void)
{
unsigned long addr, i;
int ret;
@@ -322,7 +403,7 @@ int hyp_create_pcpu_fixmap(void)
return ret;
}
- return 0;
+ return create_fixblock();
}
int hyp_create_idmap(u32 hyp_va_bits)
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 5a335a51deca..338505cb0171 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -372,6 +372,18 @@ static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
}
+static void unpin_host_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ void *sve_state;
+
+ if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE))
+ return;
+
+ sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state);
+ hyp_unpin_shared_mem(sve_state,
+ sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu));
+}
+
static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
unsigned int nr_vcpus)
{
@@ -384,6 +396,7 @@ static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
continue;
unpin_host_vcpu(hyp_vcpu->host_vcpu);
+ unpin_host_sve_state(hyp_vcpu);
}
}
@@ -398,12 +411,40 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
pkvm_init_features_from_host(hyp_vm, host_kvm);
}
-static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
+static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
{
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
+ unsigned int sve_max_vl;
+ size_t sve_state_size;
+ void *sve_state;
+ int ret = 0;
- if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
+ if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
+ return 0;
+ }
+
+ /* Limit guest vector length to the maximum supported by the host. */
+ sve_max_vl = min(READ_ONCE(host_vcpu->arch.sve_max_vl), kvm_host_sve_max_vl);
+ sve_state_size = sve_state_size_from_vl(sve_max_vl);
+ sve_state = kern_hyp_va(READ_ONCE(host_vcpu->arch.sve_state));
+
+ if (!sve_state || !sve_state_size) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = hyp_pin_shared_mem(sve_state, sve_state + sve_state_size);
+ if (ret)
+ goto err;
+
+ vcpu->arch.sve_state = sve_state;
+ vcpu->arch.sve_max_vl = sve_max_vl;
+
+ return 0;
+err:
+ clear_bit(KVM_ARM_VCPU_SVE, vcpu->kvm->arch.vcpu_features);
+ return ret;
}
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
@@ -432,7 +473,7 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
if (ret)
goto done;
- pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
+ ret = pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
done:
if (ret)
unpin_host_vcpu(host_vcpu);
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index d62bcb5634a2..a48d3f5a5afb 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -28,6 +28,7 @@ static void *vmemmap_base;
static void *vm_table_base;
static void *hyp_pgt_base;
static void *host_s2_pgt_base;
+static void *selftest_base;
static void *ffa_proxy_pages;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool;
@@ -38,6 +39,11 @@ static int divide_memory_pool(void *virt, unsigned long size)
hyp_early_alloc_init(virt, size);
+ nr_pages = pkvm_selftest_pages();
+ selftest_base = hyp_early_alloc_contig(nr_pages);
+ if (nr_pages && !selftest_base)
+ return -ENOMEM;
+
nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
vmemmap_base = hyp_early_alloc_contig(nr_pages);
if (!vmemmap_base)
@@ -119,6 +125,10 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
if (ret)
return ret;
+ ret = pkvm_create_mappings(__hyp_data_start, __hyp_data_end, PAGE_HYP);
+ if (ret)
+ return ret;
+
ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
if (ret)
return ret;
@@ -180,6 +190,7 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
enum kvm_pgtable_walk_flags visit)
{
enum pkvm_page_state state;
+ struct hyp_page *page;
phys_addr_t phys;
if (!kvm_pte_valid(ctx->old))
@@ -192,19 +203,25 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
if (!addr_is_memory(phys))
return -EINVAL;
+ page = hyp_phys_to_page(phys);
+
/*
* Adjust the host stage-2 mappings to match the ownership attributes
- * configured in the hypervisor stage-1.
+ * configured in the hypervisor stage-1, and make sure to propagate them
+ * to the hyp_vmemmap state.
*/
state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
switch (state) {
case PKVM_PAGE_OWNED:
+ set_hyp_state(page, PKVM_PAGE_OWNED);
return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
case PKVM_PAGE_SHARED_OWNED:
- hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_BORROWED;
+ set_hyp_state(page, PKVM_PAGE_SHARED_OWNED);
+ set_host_state(page, PKVM_PAGE_SHARED_BORROWED);
break;
case PKVM_PAGE_SHARED_BORROWED:
- hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_OWNED;
+ set_hyp_state(page, PKVM_PAGE_SHARED_BORROWED);
+ set_host_state(page, PKVM_PAGE_SHARED_OWNED);
break;
default:
return -EINVAL;
@@ -295,7 +312,7 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
- ret = hyp_create_pcpu_fixmap();
+ ret = hyp_create_fixmap();
if (ret)
goto out;
@@ -304,6 +321,8 @@ void __noreturn __pkvm_init_finalise(void)
goto out;
pkvm_hyp_vm_table_init(vm_table_base);
+
+ pkvm_ownership_selftest(selftest_base);
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 7d2ba6ef0261..73affe1333a4 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -33,6 +33,18 @@ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
+struct fgt_masks hfgrtr_masks;
+struct fgt_masks hfgwtr_masks;
+struct fgt_masks hfgitr_masks;
+struct fgt_masks hdfgrtr_masks;
+struct fgt_masks hdfgwtr_masks;
+struct fgt_masks hafgrtr_masks;
+struct fgt_masks hfgrtr2_masks;
+struct fgt_masks hfgwtr2_masks;
+struct fgt_masks hfgitr2_masks;
+struct fgt_masks hdfgrtr2_masks;
+struct fgt_masks hdfgwtr2_masks;
+
extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
@@ -142,7 +154,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
__deactivate_traps_common(vcpu);
- write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
+ write_sysreg_hcr(this_cpu_ptr(&kvm_init_params)->hcr_el2);
__deactivate_cptr_traps(vcpu);
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index df5cc74a7dd0..c351b4abd5db 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -11,12 +11,6 @@
#include <asm/kvm_pgtable.h>
#include <asm/stage2_pgtable.h>
-
-#define KVM_PTE_TYPE BIT(1)
-#define KVM_PTE_TYPE_BLOCK 0
-#define KVM_PTE_TYPE_PAGE 1
-#define KVM_PTE_TYPE_TABLE 1
-
struct kvm_pgtable_walk_data {
struct kvm_pgtable_walker *walker;
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index ed363aa3027e..f162b0df5cae 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -429,23 +429,27 @@ u64 __vgic_v3_get_gic_config(void)
/*
* To check whether we have a MMIO-based (GICv2 compatible)
* CPU interface, we need to disable the system register
- * view. To do that safely, we have to prevent any interrupt
- * from firing (which would be deadly).
+ * view.
*
- * Note that this only makes sense on VHE, as interrupts are
- * already masked for nVHE as part of the exception entry to
- * EL2.
- */
- if (has_vhe())
- flags = local_daif_save();
-
- /*
* Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
* that to be able to set ICC_SRE_EL1.SRE to 0, all the
* interrupt overrides must be set. You've got to love this.
+ *
+ * As we always run VHE with HCR_xMO set, no extra xMO
+ * manipulation is required in that case.
+ *
+ * To safely disable SRE, we have to prevent any interrupt
+ * from firing (which would be deadly). This only makes sense
+ * on VHE, as interrupts are already masked for nVHE as part
+ * of the exception entry to EL2.
*/
- sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
- isb();
+ if (has_vhe()) {
+ flags = local_daif_save();
+ } else {
+ sysreg_clear_set_hcr(0, HCR_AMO | HCR_FMO | HCR_IMO);
+ isb();
+ }
+
write_gicreg(0, ICC_SRE_EL1);
isb();
@@ -453,11 +457,13 @@ u64 __vgic_v3_get_gic_config(void)
write_gicreg(sre, ICC_SRE_EL1);
isb();
- sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
- isb();
- if (has_vhe())
+ if (has_vhe()) {
local_daif_restore(flags);
+ } else {
+ sysreg_clear_set_hcr(HCR_AMO | HCR_FMO | HCR_IMO, 0);
+ isb();
+ }
val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
val |= read_gicreg(ICH_VTR_EL2);
@@ -1052,11 +1058,11 @@ static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu,
switch (sysreg) {
case SYS_ICC_IGRPEN0_EL1:
if (is_read &&
- (__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1))
+ (__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGRTR_EL2_ICC_IGRPENn_EL1))
return true;
if (!is_read &&
- (__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1))
+ (__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGWTR_EL2_ICC_IGRPENn_EL1))
return true;
fallthrough;
@@ -1073,11 +1079,11 @@ static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu,
case SYS_ICC_IGRPEN1_EL1:
if (is_read &&
- (__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1))
+ (__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGRTR_EL2_ICC_IGRPENn_EL1))
return true;
if (!is_read &&
- (__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1))
+ (__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGWTR_EL2_ICC_IGRPENn_EL1))
return true;
fallthrough;
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 731a0378ed13..c9b330dc2066 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -48,21 +48,46 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
static u64 __compute_hcr(struct kvm_vcpu *vcpu)
{
+ u64 guest_hcr = __vcpu_sys_reg(vcpu, HCR_EL2);
u64 hcr = vcpu->arch.hcr_el2;
if (!vcpu_has_nv(vcpu))
return hcr;
+ /*
+ * We rely on the invariant that a vcpu entered from HYP
+ * context must also exit in the same context, as only an ERET
+ * instruction can kick us out of it, and we obviously trap
+ * that sucker. PSTATE.M will get fixed-up on exit.
+ */
if (is_hyp_ctxt(vcpu)) {
+ host_data_set_flag(VCPU_IN_HYP_CONTEXT);
+
hcr |= HCR_NV | HCR_NV2 | HCR_AT | HCR_TTLB;
if (!vcpu_el2_e2h_is_set(vcpu))
hcr |= HCR_NV1;
write_sysreg_s(vcpu->arch.ctxt.vncr_array, SYS_VNCR_EL2);
+ } else {
+ host_data_clear_flag(VCPU_IN_HYP_CONTEXT);
+
+ if (guest_hcr & HCR_NV) {
+ u64 va = __fix_to_virt(vncr_fixmap(smp_processor_id()));
+
+ /* Inherit the low bits from the actual register */
+ va |= __vcpu_sys_reg(vcpu, VNCR_EL2) & GENMASK(PAGE_SHIFT - 1, 0);
+ write_sysreg_s(va, SYS_VNCR_EL2);
+
+ /* Force NV2 in case the guest is forgetful... */
+ guest_hcr |= HCR_NV2;
+ }
}
- return hcr | (__vcpu_sys_reg(vcpu, HCR_EL2) & ~NV_HCR_GUEST_EXCLUDE);
+ BUG_ON(host_data_test_flag(VCPU_IN_HYP_CONTEXT) &&
+ host_data_test_flag(L1_VNCR_MAPPED));
+
+ return hcr | (guest_hcr & ~NV_HCR_GUEST_EXCLUDE);
}
static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
@@ -184,7 +209,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
___deactivate_traps(vcpu);
- write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+ write_sysreg_hcr(HCR_HOST_VHE_FLAGS);
if (has_cntpoff()) {
struct timer_map map;
@@ -459,6 +484,14 @@ static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
if (ret)
return false;
+ /*
+ * If we have to check for any VNCR mapping being invalidated,
+ * go back to the slow path for further processing.
+ */
+ if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu) &&
+ atomic_read(&vcpu->kvm->arch.vncr_map_count))
+ return false;
+
__kvm_skip_instr(vcpu);
return true;
@@ -568,9 +601,12 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
/*
* If we were in HYP context on entry, adjust the PSTATE view
- * so that the usual helpers work correctly.
+ * so that the usual helpers work correctly. This enforces our
+ * invariant that the guest's HYP context status is preserved
+ * across a run.
*/
- if (vcpu_has_nv(vcpu) && (read_sysreg(hcr_el2) & HCR_NV)) {
+ if (vcpu_has_nv(vcpu) &&
+ unlikely(host_data_test_flag(VCPU_IN_HYP_CONTEXT))) {
u64 mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
switch (mode) {
@@ -586,6 +622,10 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
*vcpu_cpsr(vcpu) |= mode;
}
+ /* Apply extreme paranoia! */
+ BUG_ON(vcpu_has_nv(vcpu) &&
+ !!host_data_test_flag(VCPU_IN_HYP_CONTEXT) != is_hyp_ctxt(vcpu));
+
return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers);
}
diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index 3d50a1bd2bdb..ec2569818629 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -63,7 +63,7 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu,
__load_stage2(mmu, mmu->arch);
val = read_sysreg(hcr_el2);
val &= ~HCR_TGE;
- write_sysreg(val, hcr_el2);
+ write_sysreg_hcr(val);
isb();
}
@@ -73,7 +73,7 @@ static void exit_vmid_context(struct tlb_inv_context *cxt)
* We're done with the TLB operation, let's restore the host's
* view of HCR_EL2.
*/
- write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+ write_sysreg_hcr(HCR_HOST_VHE_FLAGS);
isb();
/* ... and the stage-2 MMU context that we switched away from */
diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
index 569941eeb3fe..58c5fe7d7572 100644
--- a/arch/arm64/kvm/hypercalls.c
+++ b/arch/arm64/kvm/hypercalls.c
@@ -270,6 +270,7 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
u32 feature;
u8 action;
gpa_t gpa;
+ uuid_t uuid;
action = kvm_smccc_get_action(vcpu, func_id);
switch (action) {
@@ -355,10 +356,11 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
val[0] = gpa;
break;
case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
- val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0;
- val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1;
- val[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2;
- val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3;
+ uuid = ARM_SMCCC_VENDOR_HYP_UID_KVM;
+ val[0] = smccc_uuid_to_reg(&uuid, 0);
+ val[1] = smccc_uuid_to_reg(&uuid, 1);
+ val[2] = smccc_uuid_to_reg(&uuid, 2);
+ val[3] = smccc_uuid_to_reg(&uuid, 3);
break;
case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
val[0] = smccc_feat->vendor_hyp_bmap;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 2feb6c6b63af..2942ec92c5a4 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1304,6 +1304,10 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
if (map_size == PAGE_SIZE)
return true;
+ /* pKVM only supports PMD_SIZE huge-mappings */
+ if (is_protected_kvm_enabled() && map_size != PMD_SIZE)
+ return false;
+
size = memslot->npages * PAGE_SIZE;
gpa_start = memslot->base_gfn << PAGE_SHIFT;
@@ -1501,6 +1505,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
+ if (!is_protected_kvm_enabled())
+ memcache = &vcpu->arch.mmu_page_cache;
+ else
+ memcache = &vcpu->arch.pkvm_memcache;
+
/*
* Permission faults just need to update the existing leaf entry,
* and so normally don't require allocations from the memcache. The
@@ -1510,13 +1519,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (!fault_is_perm || (logging_active && write_fault)) {
int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
- if (!is_protected_kvm_enabled()) {
- memcache = &vcpu->arch.mmu_page_cache;
+ if (!is_protected_kvm_enabled())
ret = kvm_mmu_topup_memory_cache(memcache, min_pages);
- } else {
- memcache = &vcpu->arch.pkvm_memcache;
+ else
ret = topup_hyp_memcache(memcache, min_pages);
- }
+
if (ret)
return ret;
}
@@ -1537,7 +1544,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* logging_active is guaranteed to never be true for VM_PFNMAP
* memslots.
*/
- if (logging_active || is_protected_kvm_enabled()) {
+ if (logging_active) {
force_pte = true;
vma_shift = PAGE_SHIFT;
} else {
@@ -1794,9 +1801,28 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
gfn_t gfn;
int ret, idx;
+ /* Synchronous External Abort? */
+ if (kvm_vcpu_abt_issea(vcpu)) {
+ /*
+ * For RAS the host kernel may handle this abort.
+ * There is no need to pass the error into the guest.
+ */
+ if (kvm_handle_guest_sea())
+ kvm_inject_vabt(vcpu);
+
+ return 1;
+ }
+
esr = kvm_vcpu_get_esr(vcpu);
+ /*
+ * The fault IPA should be reliable at this point as we're not dealing
+ * with an SEA.
+ */
ipa = fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
+ if (KVM_BUG_ON(ipa == INVALID_GPA, vcpu->kvm))
+ return -EFAULT;
+
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
if (esr_fsc_is_translation_fault(esr)) {
@@ -1818,18 +1844,6 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
}
}
- /* Synchronous External Abort? */
- if (kvm_vcpu_abt_issea(vcpu)) {
- /*
- * For RAS the host kernel may handle this abort.
- * There is no need to pass the error into the guest.
- */
- if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
- kvm_inject_vabt(vcpu);
-
- return 1;
- }
-
trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
kvm_vcpu_get_hfar(vcpu), fault_ipa);
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 4a3fc11f7ecf..4a53e4147fb0 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -8,6 +8,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <asm/fixmap.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
@@ -16,6 +17,24 @@
#include "sys_regs.h"
+struct vncr_tlb {
+ /* The guest's VNCR_EL2 */
+ u64 gva;
+ struct s1_walk_info wi;
+ struct s1_walk_result wr;
+
+ u64 hpa;
+
+ /* -1 when not mapped on a CPU */
+ int cpu;
+
+ /*
+ * true if the TLB is valid. Can only be changed with the
+ * mmu_lock held.
+ */
+ bool valid;
+};
+
/*
* Ratio of live shadow S2 MMU per vcpu. This is a trade-off between
* memory usage and potential number of different sets of S2 PTs in
@@ -28,6 +47,7 @@ void kvm_init_nested(struct kvm *kvm)
{
kvm->arch.nested_mmus = NULL;
kvm->arch.nested_mmus_size = 0;
+ atomic_set(&kvm->arch.vncr_map_count, 0);
}
static int init_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
@@ -55,6 +75,13 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
return -EINVAL;
+ if (!vcpu->arch.ctxt.vncr_array)
+ vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL_ACCOUNT |
+ __GFP_ZERO);
+
+ if (!vcpu->arch.ctxt.vncr_array)
+ return -ENOMEM;
+
/*
* Let's treat memory allocation failures as benign: If we fail to
* allocate anything, return an error and keep the allocated array
@@ -85,6 +112,9 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
for (int i = kvm->arch.nested_mmus_size; i < num_mmus; i++)
kvm_free_stage2_pgd(&kvm->arch.nested_mmus[i]);
+ free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
+ vcpu->arch.ctxt.vncr_array = NULL;
+
return ret;
}
@@ -405,6 +435,30 @@ static unsigned int ttl_to_size(u8 ttl)
return max_size;
}
+static u8 pgshift_level_to_ttl(u16 shift, u8 level)
+{
+ u8 ttl;
+
+ switch(shift) {
+ case 12:
+ ttl = TLBI_TTL_TG_4K;
+ break;
+ case 14:
+ ttl = TLBI_TTL_TG_16K;
+ break;
+ case 16:
+ ttl = TLBI_TTL_TG_64K;
+ break;
+ default:
+ BUG();
+ }
+
+ ttl <<= 2;
+ ttl |= level & 3;
+
+ return ttl;
+}
+
/*
* Compute the equivalent of the TTL field by parsing the shadow PT. The
* granule size is extracted from the cached VTCR_EL2.TG0 while the level is
@@ -676,23 +730,36 @@ void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu)
void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu)
{
/*
- * The vCPU kept its reference on the MMU after the last put, keep
- * rolling with it.
+ * If the vCPU kept its reference on the MMU after the last put,
+ * keep rolling with it.
*/
- if (vcpu->arch.hw_mmu)
- return;
-
if (is_hyp_ctxt(vcpu)) {
- vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
+ if (!vcpu->arch.hw_mmu)
+ vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
} else {
- write_lock(&vcpu->kvm->mmu_lock);
- vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
- write_unlock(&vcpu->kvm->mmu_lock);
+ if (!vcpu->arch.hw_mmu) {
+ scoped_guard(write_lock, &vcpu->kvm->mmu_lock)
+ vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
+ }
+
+ if (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_NV)
+ kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu);
}
}
void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
{
+ /* Unconditionally drop the VNCR mapping if we have one */
+ if (host_data_test_flag(L1_VNCR_MAPPED)) {
+ BUG_ON(vcpu->arch.vncr_tlb->cpu != smp_processor_id());
+ BUG_ON(is_hyp_ctxt(vcpu));
+
+ clear_fixmap(vncr_fixmap(vcpu->arch.vncr_tlb->cpu));
+ vcpu->arch.vncr_tlb->cpu = -1;
+ host_data_clear_flag(L1_VNCR_MAPPED);
+ atomic_dec(&vcpu->kvm->arch.vncr_map_count);
+ }
+
/*
* Keep a reference on the associated stage-2 MMU if the vCPU is
* scheduling out and not in WFI emulation, suggesting it is likely to
@@ -743,6 +810,247 @@ int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
return kvm_inject_nested_sync(vcpu, esr_el2);
}
+static void invalidate_vncr(struct vncr_tlb *vt)
+{
+ vt->valid = false;
+ if (vt->cpu != -1)
+ clear_fixmap(vncr_fixmap(vt->cpu));
+}
+
+static void kvm_invalidate_vncr_ipa(struct kvm *kvm, u64 start, u64 end)
+{
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
+ return;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
+ u64 ipa_start, ipa_end, ipa_size;
+
+ /*
+ * Careful here: We end-up here from an MMU notifier,
+ * and this can race against a vcpu not being onlined
+ * yet, without the pseudo-TLB being allocated.
+ *
+ * Skip those, as they obviously don't participate in
+ * the invalidation at this stage.
+ */
+ if (!vt)
+ continue;
+
+ if (!vt->valid)
+ continue;
+
+ ipa_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
+ vt->wr.level));
+ ipa_start = vt->wr.pa & (ipa_size - 1);
+ ipa_end = ipa_start + ipa_size;
+
+ if (ipa_end <= start || ipa_start >= end)
+ continue;
+
+ invalidate_vncr(vt);
+ }
+}
+
+struct s1e2_tlbi_scope {
+ enum {
+ TLBI_ALL,
+ TLBI_VA,
+ TLBI_VAA,
+ TLBI_ASID,
+ } type;
+
+ u16 asid;
+ u64 va;
+ u64 size;
+};
+
+static void invalidate_vncr_va(struct kvm *kvm,
+ struct s1e2_tlbi_scope *scope)
+{
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
+ u64 va_start, va_end, va_size;
+
+ if (!vt->valid)
+ continue;
+
+ va_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
+ vt->wr.level));
+ va_start = vt->gva & (va_size - 1);
+ va_end = va_start + va_size;
+
+ switch (scope->type) {
+ case TLBI_ALL:
+ break;
+
+ case TLBI_VA:
+ if (va_end <= scope->va ||
+ va_start >= (scope->va + scope->size))
+ continue;
+ if (vt->wr.nG && vt->wr.asid != scope->asid)
+ continue;
+ break;
+
+ case TLBI_VAA:
+ if (va_end <= scope->va ||
+ va_start >= (scope->va + scope->size))
+ continue;
+ break;
+
+ case TLBI_ASID:
+ if (!vt->wr.nG || vt->wr.asid != scope->asid)
+ continue;
+ break;
+ }
+
+ invalidate_vncr(vt);
+ }
+}
+
+#define tlbi_va_s1_to_va(v) (u64)sign_extend64((v) << 12, 48)
+
+static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val,
+ struct s1e2_tlbi_scope *scope)
+{
+ switch (inst) {
+ case OP_TLBI_ALLE2:
+ case OP_TLBI_ALLE2IS:
+ case OP_TLBI_ALLE2OS:
+ case OP_TLBI_VMALLE1:
+ case OP_TLBI_VMALLE1IS:
+ case OP_TLBI_VMALLE1OS:
+ case OP_TLBI_ALLE2NXS:
+ case OP_TLBI_ALLE2ISNXS:
+ case OP_TLBI_ALLE2OSNXS:
+ case OP_TLBI_VMALLE1NXS:
+ case OP_TLBI_VMALLE1ISNXS:
+ case OP_TLBI_VMALLE1OSNXS:
+ scope->type = TLBI_ALL;
+ break;
+ case OP_TLBI_VAE2:
+ case OP_TLBI_VAE2IS:
+ case OP_TLBI_VAE2OS:
+ case OP_TLBI_VAE1:
+ case OP_TLBI_VAE1IS:
+ case OP_TLBI_VAE1OS:
+ case OP_TLBI_VAE2NXS:
+ case OP_TLBI_VAE2ISNXS:
+ case OP_TLBI_VAE2OSNXS:
+ case OP_TLBI_VAE1NXS:
+ case OP_TLBI_VAE1ISNXS:
+ case OP_TLBI_VAE1OSNXS:
+ case OP_TLBI_VALE2:
+ case OP_TLBI_VALE2IS:
+ case OP_TLBI_VALE2OS:
+ case OP_TLBI_VALE1:
+ case OP_TLBI_VALE1IS:
+ case OP_TLBI_VALE1OS:
+ case OP_TLBI_VALE2NXS:
+ case OP_TLBI_VALE2ISNXS:
+ case OP_TLBI_VALE2OSNXS:
+ case OP_TLBI_VALE1NXS:
+ case OP_TLBI_VALE1ISNXS:
+ case OP_TLBI_VALE1OSNXS:
+ scope->type = TLBI_VA;
+ scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
+ if (!scope->size)
+ scope->size = SZ_1G;
+ scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
+ scope->asid = FIELD_GET(TLBIR_ASID_MASK, val);
+ break;
+ case OP_TLBI_ASIDE1:
+ case OP_TLBI_ASIDE1IS:
+ case OP_TLBI_ASIDE1OS:
+ case OP_TLBI_ASIDE1NXS:
+ case OP_TLBI_ASIDE1ISNXS:
+ case OP_TLBI_ASIDE1OSNXS:
+ scope->type = TLBI_ASID;
+ scope->asid = FIELD_GET(TLBIR_ASID_MASK, val);
+ break;
+ case OP_TLBI_VAAE1:
+ case OP_TLBI_VAAE1IS:
+ case OP_TLBI_VAAE1OS:
+ case OP_TLBI_VAAE1NXS:
+ case OP_TLBI_VAAE1ISNXS:
+ case OP_TLBI_VAAE1OSNXS:
+ case OP_TLBI_VAALE1:
+ case OP_TLBI_VAALE1IS:
+ case OP_TLBI_VAALE1OS:
+ case OP_TLBI_VAALE1NXS:
+ case OP_TLBI_VAALE1ISNXS:
+ case OP_TLBI_VAALE1OSNXS:
+ scope->type = TLBI_VAA;
+ scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
+ if (!scope->size)
+ scope->size = SZ_1G;
+ scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
+ break;
+ case OP_TLBI_RVAE2:
+ case OP_TLBI_RVAE2IS:
+ case OP_TLBI_RVAE2OS:
+ case OP_TLBI_RVAE1:
+ case OP_TLBI_RVAE1IS:
+ case OP_TLBI_RVAE1OS:
+ case OP_TLBI_RVAE2NXS:
+ case OP_TLBI_RVAE2ISNXS:
+ case OP_TLBI_RVAE2OSNXS:
+ case OP_TLBI_RVAE1NXS:
+ case OP_TLBI_RVAE1ISNXS:
+ case OP_TLBI_RVAE1OSNXS:
+ case OP_TLBI_RVALE2:
+ case OP_TLBI_RVALE2IS:
+ case OP_TLBI_RVALE2OS:
+ case OP_TLBI_RVALE1:
+ case OP_TLBI_RVALE1IS:
+ case OP_TLBI_RVALE1OS:
+ case OP_TLBI_RVALE2NXS:
+ case OP_TLBI_RVALE2ISNXS:
+ case OP_TLBI_RVALE2OSNXS:
+ case OP_TLBI_RVALE1NXS:
+ case OP_TLBI_RVALE1ISNXS:
+ case OP_TLBI_RVALE1OSNXS:
+ scope->type = TLBI_VA;
+ scope->va = decode_range_tlbi(val, &scope->size, &scope->asid);
+ break;
+ case OP_TLBI_RVAAE1:
+ case OP_TLBI_RVAAE1IS:
+ case OP_TLBI_RVAAE1OS:
+ case OP_TLBI_RVAAE1NXS:
+ case OP_TLBI_RVAAE1ISNXS:
+ case OP_TLBI_RVAAE1OSNXS:
+ case OP_TLBI_RVAALE1:
+ case OP_TLBI_RVAALE1IS:
+ case OP_TLBI_RVAALE1OS:
+ case OP_TLBI_RVAALE1NXS:
+ case OP_TLBI_RVAALE1ISNXS:
+ case OP_TLBI_RVAALE1OSNXS:
+ scope->type = TLBI_VAA;
+ scope->va = decode_range_tlbi(val, &scope->size, NULL);
+ break;
+ }
+}
+
+void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val)
+{
+ struct s1e2_tlbi_scope scope = {};
+
+ compute_s1_tlbi_range(vcpu, inst, val, &scope);
+
+ guard(write_lock)(&vcpu->kvm->mmu_lock);
+ invalidate_vncr_va(vcpu->kvm, &scope);
+}
+
void kvm_nested_s2_wp(struct kvm *kvm)
{
int i;
@@ -755,6 +1063,8 @@ void kvm_nested_s2_wp(struct kvm *kvm)
if (kvm_s2_mmu_valid(mmu))
kvm_stage2_wp_range(mmu, 0, kvm_phys_size(mmu));
}
+
+ kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits));
}
void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block)
@@ -769,6 +1079,8 @@ void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block)
if (kvm_s2_mmu_valid(mmu))
kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), may_block);
}
+
+ kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits));
}
void kvm_nested_s2_flush(struct kvm *kvm)
@@ -802,6 +1114,295 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
}
/*
+ * Dealing with VNCR_EL2 exposed by the *guest* is a complicated matter:
+ *
+ * - We introduce an internal representation of a vcpu-private TLB,
+ * representing the mapping between the guest VA contained in VNCR_EL2,
+ * the IPA the guest's EL2 PTs point to, and the actual PA this lives at.
+ *
+ * - On translation fault from a nested VNCR access, we create such a TLB.
+ * If there is no mapping to describe, the guest inherits the fault.
+ * Crucially, no actual mapping is done at this stage.
+ *
+ * - On vcpu_load() in a non-HYP context with HCR_EL2.NV==1, if the above
+ * TLB exists, we map it in the fixmap for this CPU, and run with it. We
+ * have to respect the permissions dictated by the guest, but not the
+ * memory type (FWB is a must).
+ *
+ * - Note that we usually don't do a vcpu_load() on the back of a fault
+ * (unless we are preempted), so the resolution of a translation fault
+ * must go via a request that will map the VNCR page in the fixmap.
+ * vcpu_load() might as well use the same mechanism.
+ *
+ * - On vcpu_put() in a non-HYP context with HCR_EL2.NV==1, if the TLB was
+ * mapped, we unmap it. Yes it is that simple. The TLB still exists
+ * though, and may be reused at a later load.
+ *
+ * - On permission fault, we simply forward the fault to the guest's EL2.
+ * Get out of my way.
+ *
+ * - On any TLBI for the EL2&0 translation regime, we must find any TLB that
+ * intersects with the TLBI request, invalidate it, and unmap the page
+ * from the fixmap. Because we need to look at all the vcpu-private TLBs,
+ * this requires some wide-ranging locking to ensure that nothing races
+ * against it. This may require some refcounting to avoid the search when
+ * no such TLB is present.
+ *
+ * - On MMU notifiers, we must invalidate our TLB in a similar way, but
+ * looking at the IPA instead. The funny part is that there may not be a
+ * stage-2 mapping for this page if L1 hasn't accessed it using LD/ST
+ * instructions.
+ */
+
+int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
+ return 0;
+
+ vcpu->arch.vncr_tlb = kzalloc(sizeof(*vcpu->arch.vncr_tlb),
+ GFP_KERNEL_ACCOUNT);
+ if (!vcpu->arch.vncr_tlb)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static u64 read_vncr_el2(struct kvm_vcpu *vcpu)
+{
+ return (u64)sign_extend64(__vcpu_sys_reg(vcpu, VNCR_EL2), 48);
+}
+
+static int kvm_translate_vncr(struct kvm_vcpu *vcpu)
+{
+ bool write_fault, writable;
+ unsigned long mmu_seq;
+ struct vncr_tlb *vt;
+ struct page *page;
+ u64 va, pfn, gfn;
+ int ret;
+
+ vt = vcpu->arch.vncr_tlb;
+
+ /*
+ * If we're about to walk the EL2 S1 PTs, we must invalidate the
+ * current TLB, as it could be sampled from another vcpu doing a
+ * TLBI *IS. A real CPU wouldn't do that, but we only keep a single
+ * translation, so not much of a choice.
+ *
+ * We also prepare the next walk wilst we're at it.
+ */
+ scoped_guard(write_lock, &vcpu->kvm->mmu_lock) {
+ invalidate_vncr(vt);
+
+ vt->wi = (struct s1_walk_info) {
+ .regime = TR_EL20,
+ .as_el0 = false,
+ .pan = false,
+ };
+ vt->wr = (struct s1_walk_result){};
+ }
+
+ guard(srcu)(&vcpu->kvm->srcu);
+
+ va = read_vncr_el2(vcpu);
+
+ ret = __kvm_translate_va(vcpu, &vt->wi, &vt->wr, va);
+ if (ret)
+ return ret;
+
+ write_fault = kvm_is_write_fault(vcpu);
+
+ mmu_seq = vcpu->kvm->mmu_invalidate_seq;
+ smp_rmb();
+
+ gfn = vt->wr.pa >> PAGE_SHIFT;
+ pfn = kvm_faultin_pfn(vcpu, gfn, write_fault, &writable, &page);
+ if (is_error_noslot_pfn(pfn) || (write_fault && !writable))
+ return -EFAULT;
+
+ scoped_guard(write_lock, &vcpu->kvm->mmu_lock) {
+ if (mmu_invalidate_retry(vcpu->kvm, mmu_seq))
+ return -EAGAIN;
+
+ vt->gva = va;
+ vt->hpa = pfn << PAGE_SHIFT;
+ vt->valid = true;
+ vt->cpu = -1;
+
+ kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu);
+ kvm_release_faultin_page(vcpu->kvm, page, false, vt->wr.pw);
+ }
+
+ if (vt->wr.pw)
+ mark_page_dirty(vcpu->kvm, gfn);
+
+ return 0;
+}
+
+static void inject_vncr_perm(struct kvm_vcpu *vcpu)
+{
+ struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+
+ /* Adjust the fault level to reflect that of the guest's */
+ esr &= ~ESR_ELx_FSC;
+ esr |= FIELD_PREP(ESR_ELx_FSC,
+ ESR_ELx_FSC_PERM_L(vt->wr.level));
+
+ kvm_inject_nested_sync(vcpu, esr);
+}
+
+static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
+{
+ struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
+
+ lockdep_assert_held_read(&vcpu->kvm->mmu_lock);
+
+ if (!vt->valid)
+ return false;
+
+ if (read_vncr_el2(vcpu) != vt->gva)
+ return false;
+
+ if (vt->wr.nG) {
+ u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
+ u64 ttbr = ((tcr & TCR_A1) ?
+ vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
+ vcpu_read_sys_reg(vcpu, TTBR0_EL2));
+ u16 asid;
+
+ asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
+ if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
+ !(tcr & TCR_ASID16))
+ asid &= GENMASK(7, 0);
+
+ return asid != vt->wr.asid;
+ }
+
+ return true;
+}
+
+int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu)
+{
+ struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+
+ BUG_ON(!(esr & ESR_ELx_VNCR_SHIFT));
+
+ if (esr_fsc_is_permission_fault(esr)) {
+ inject_vncr_perm(vcpu);
+ } else if (esr_fsc_is_translation_fault(esr)) {
+ bool valid;
+ int ret;
+
+ scoped_guard(read_lock, &vcpu->kvm->mmu_lock)
+ valid = kvm_vncr_tlb_lookup(vcpu);
+
+ if (!valid)
+ ret = kvm_translate_vncr(vcpu);
+ else
+ ret = -EPERM;
+
+ switch (ret) {
+ case -EAGAIN:
+ case -ENOMEM:
+ /* Let's try again... */
+ break;
+ case -EFAULT:
+ case -EINVAL:
+ case -ENOENT:
+ case -EACCES:
+ /*
+ * Translation failed, inject the corresponding
+ * exception back to EL2.
+ */
+ BUG_ON(!vt->wr.failed);
+
+ esr &= ~ESR_ELx_FSC;
+ esr |= FIELD_PREP(ESR_ELx_FSC, vt->wr.fst);
+
+ kvm_inject_nested_sync(vcpu, esr);
+ break;
+ case -EPERM:
+ /* Hack to deal with POE until we get kernel support */
+ inject_vncr_perm(vcpu);
+ break;
+ case 0:
+ break;
+ }
+ } else {
+ WARN_ONCE(1, "Unhandled VNCR abort, ESR=%llx\n", esr);
+ }
+
+ return 1;
+}
+
+static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
+{
+ struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
+ pgprot_t prot;
+
+ guard(preempt)();
+ guard(read_lock)(&vcpu->kvm->mmu_lock);
+
+ /*
+ * The request to map VNCR may have raced against some other
+ * event, such as an interrupt, and may not be valid anymore.
+ */
+ if (is_hyp_ctxt(vcpu))
+ return;
+
+ /*
+ * Check that the pseudo-TLB is valid and that VNCR_EL2 still
+ * contains the expected value. If it doesn't, we simply bail out
+ * without a mapping -- a transformed MSR/MRS will generate the
+ * fault and allows us to populate the pseudo-TLB.
+ */
+ if (!vt->valid)
+ return;
+
+ if (read_vncr_el2(vcpu) != vt->gva)
+ return;
+
+ if (vt->wr.nG) {
+ u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
+ u64 ttbr = ((tcr & TCR_A1) ?
+ vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
+ vcpu_read_sys_reg(vcpu, TTBR0_EL2));
+ u16 asid;
+
+ asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
+ if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
+ !(tcr & TCR_ASID16))
+ asid &= GENMASK(7, 0);
+
+ if (asid != vt->wr.asid)
+ return;
+ }
+
+ vt->cpu = smp_processor_id();
+
+ if (vt->wr.pw && vt->wr.pr)
+ prot = PAGE_KERNEL;
+ else if (vt->wr.pr)
+ prot = PAGE_KERNEL_RO;
+ else
+ prot = PAGE_NONE;
+
+ /*
+ * We can't map write-only (or no permission at all) in the kernel,
+ * but the guest can do it if using POE, so we'll have to turn a
+ * translation fault into a permission fault at runtime.
+ * FIXME: WO doesn't work at all, need POE support in the kernel.
+ */
+ if (pgprot_val(prot) != pgprot_val(PAGE_NONE)) {
+ __set_fixmap(vncr_fixmap(vt->cpu), vt->hpa, prot);
+ host_data_set_flag(L1_VNCR_MAPPED);
+ atomic_inc(&vcpu->kvm->arch.vncr_map_count);
+ }
+}
+
+/*
* Our emulated CPU doesn't support all the possible features. For the
* sake of simplicity (and probably mental sanity), wipe out a number
* of feature bits we don't intend to support for the time being.
@@ -1018,216 +1619,49 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
/* HCR_EL2 */
- res0 = BIT(48);
- res1 = HCR_RW;
- if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, TWED, IMP))
- res0 |= GENMASK(63, 59);
- if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, MTE2))
- res0 |= (HCR_TID5 | HCR_DCT | HCR_ATA);
- if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, TTLBxS))
- res0 |= (HCR_TTLBIS | HCR_TTLBOS);
- if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
- !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
- res0 |= HCR_ENSCXT;
- if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, IMP))
- res0 |= (HCR_TOCU | HCR_TICAB | HCR_TID4);
- if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
- res0 |= HCR_AMVOFFEN;
- if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1))
- res0 |= HCR_FIEN;
- if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP))
- res0 |= HCR_FWB;
- /* Implementation choice: NV2 is the only supported config */
- if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
- res0 |= (HCR_NV2 | HCR_NV | HCR_AT);
- if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, NI))
- res0 |= HCR_NV1;
- if (!(kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
- kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
- res0 |= (HCR_API | HCR_APK);
- if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP))
- res0 |= BIT(39);
- if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
- res0 |= (HCR_TEA | HCR_TERR);
- if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
- res0 |= HCR_TLOR;
- if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP))
- res0 |= HCR_E2H;
- if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP))
- res1 |= HCR_E2H;
+ get_reg_fixed_bits(kvm, HCR_EL2, &res0, &res1);
set_sysreg_masks(kvm, HCR_EL2, res0, res1);
/* HCRX_EL2 */
- res0 = HCRX_EL2_RES0;
- res1 = HCRX_EL2_RES1;
- if (!kvm_has_feat(kvm, ID_AA64ISAR3_EL1, PACM, TRIVIAL_IMP))
- res0 |= HCRX_EL2_PACMEn;
- if (!kvm_has_feat(kvm, ID_AA64PFR2_EL1, FPMR, IMP))
- res0 |= HCRX_EL2_EnFPM;
- if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
- res0 |= HCRX_EL2_GCSEn;
- if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, SYSREG_128, IMP))
- res0 |= HCRX_EL2_EnIDCP128;
- if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, DEV_ASYNC))
- res0 |= (HCRX_EL2_EnSDERR | HCRX_EL2_EnSNERR);
- if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, DF2, IMP))
- res0 |= HCRX_EL2_TMEA;
- if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP))
- res0 |= HCRX_EL2_D128En;
- if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
- res0 |= HCRX_EL2_PTTWI;
- if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SCTLRX, IMP))
- res0 |= HCRX_EL2_SCTLR2En;
- if (!kvm_has_tcr2(kvm))
- res0 |= HCRX_EL2_TCR2En;
- if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
- res0 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
- if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, CMOW, IMP))
- res0 |= HCRX_EL2_CMOW;
- if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP))
- res0 |= (HCRX_EL2_VFNMI | HCRX_EL2_VINMI | HCRX_EL2_TALLINT);
- if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) ||
- !(read_sysreg_s(SYS_SMIDR_EL1) & SMIDR_EL1_SMPS))
- res0 |= HCRX_EL2_SMPME;
- if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
- res0 |= (HCRX_EL2_FGTnXS | HCRX_EL2_FnXS);
- if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V))
- res0 |= HCRX_EL2_EnASR;
- if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64))
- res0 |= HCRX_EL2_EnALS;
- if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
- res0 |= HCRX_EL2_EnAS0;
+ get_reg_fixed_bits(kvm, HCRX_EL2, &res0, &res1);
set_sysreg_masks(kvm, HCRX_EL2, res0, res1);
/* HFG[RW]TR_EL2 */
- res0 = res1 = 0;
- if (!(kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
- kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
- res0 |= (HFGxTR_EL2_APDAKey | HFGxTR_EL2_APDBKey |
- HFGxTR_EL2_APGAKey | HFGxTR_EL2_APIAKey |
- HFGxTR_EL2_APIBKey);
- if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
- res0 |= (HFGxTR_EL2_LORC_EL1 | HFGxTR_EL2_LOREA_EL1 |
- HFGxTR_EL2_LORID_EL1 | HFGxTR_EL2_LORN_EL1 |
- HFGxTR_EL2_LORSA_EL1);
- if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
- !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
- res0 |= (HFGxTR_EL2_SCXTNUM_EL1 | HFGxTR_EL2_SCXTNUM_EL0);
- if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP))
- res0 |= HFGxTR_EL2_ICC_IGRPENn_EL1;
- if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
- res0 |= (HFGxTR_EL2_ERRIDR_EL1 | HFGxTR_EL2_ERRSELR_EL1 |
- HFGxTR_EL2_ERXFR_EL1 | HFGxTR_EL2_ERXCTLR_EL1 |
- HFGxTR_EL2_ERXSTATUS_EL1 | HFGxTR_EL2_ERXMISCn_EL1 |
- HFGxTR_EL2_ERXPFGF_EL1 | HFGxTR_EL2_ERXPFGCTL_EL1 |
- HFGxTR_EL2_ERXPFGCDN_EL1 | HFGxTR_EL2_ERXADDR_EL1);
- if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
- res0 |= HFGxTR_EL2_nACCDATA_EL1;
- if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
- res0 |= (HFGxTR_EL2_nGCS_EL0 | HFGxTR_EL2_nGCS_EL1);
- if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP))
- res0 |= (HFGxTR_EL2_nSMPRI_EL1 | HFGxTR_EL2_nTPIDR2_EL0);
- if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
- res0 |= HFGxTR_EL2_nRCWMASK_EL1;
- if (!kvm_has_s1pie(kvm))
- res0 |= (HFGxTR_EL2_nPIRE0_EL1 | HFGxTR_EL2_nPIR_EL1);
- if (!kvm_has_s1poe(kvm))
- res0 |= (HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nPOR_EL1);
- if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
- res0 |= HFGxTR_EL2_nS2POR_EL1;
- if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP))
- res0 |= (HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nAMAIR2_EL1);
- set_sysreg_masks(kvm, HFGRTR_EL2, res0 | __HFGRTR_EL2_RES0, res1);
- set_sysreg_masks(kvm, HFGWTR_EL2, res0 | __HFGWTR_EL2_RES0, res1);
+ get_reg_fixed_bits(kvm, HFGRTR_EL2, &res0, &res1);
+ set_sysreg_masks(kvm, HFGRTR_EL2, res0, res1);
+ get_reg_fixed_bits(kvm, HFGWTR_EL2, &res0, &res1);
+ set_sysreg_masks(kvm, HFGWTR_EL2, res0, res1);
/* HDFG[RW]TR_EL2 */
- res0 = res1 = 0;
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
- res0 |= HDFGRTR_EL2_OSDLR_EL1;
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
- res0 |= (HDFGRTR_EL2_PMEVCNTRn_EL0 | HDFGRTR_EL2_PMEVTYPERn_EL0 |
- HDFGRTR_EL2_PMCCFILTR_EL0 | HDFGRTR_EL2_PMCCNTR_EL0 |
- HDFGRTR_EL2_PMCNTEN | HDFGRTR_EL2_PMINTEN |
- HDFGRTR_EL2_PMOVS | HDFGRTR_EL2_PMSELR_EL0 |
- HDFGRTR_EL2_PMMIR_EL1 | HDFGRTR_EL2_PMUSERENR_EL0 |
- HDFGRTR_EL2_PMCEIDn_EL0);
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP))
- res0 |= (HDFGRTR_EL2_PMBLIMITR_EL1 | HDFGRTR_EL2_PMBPTR_EL1 |
- HDFGRTR_EL2_PMBSR_EL1 | HDFGRTR_EL2_PMSCR_EL1 |
- HDFGRTR_EL2_PMSEVFR_EL1 | HDFGRTR_EL2_PMSFCR_EL1 |
- HDFGRTR_EL2_PMSICR_EL1 | HDFGRTR_EL2_PMSIDR_EL1 |
- HDFGRTR_EL2_PMSIRR_EL1 | HDFGRTR_EL2_PMSLATFR_EL1 |
- HDFGRTR_EL2_PMBIDR_EL1);
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP))
- res0 |= (HDFGRTR_EL2_TRC | HDFGRTR_EL2_TRCAUTHSTATUS |
- HDFGRTR_EL2_TRCAUXCTLR | HDFGRTR_EL2_TRCCLAIM |
- HDFGRTR_EL2_TRCCNTVRn | HDFGRTR_EL2_TRCID |
- HDFGRTR_EL2_TRCIMSPECn | HDFGRTR_EL2_TRCOSLSR |
- HDFGRTR_EL2_TRCPRGCTLR | HDFGRTR_EL2_TRCSEQSTR |
- HDFGRTR_EL2_TRCSSCSRn | HDFGRTR_EL2_TRCSTATR |
- HDFGRTR_EL2_TRCVICTLR);
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP))
- res0 |= (HDFGRTR_EL2_TRBBASER_EL1 | HDFGRTR_EL2_TRBIDR_EL1 |
- HDFGRTR_EL2_TRBLIMITR_EL1 | HDFGRTR_EL2_TRBMAR_EL1 |
- HDFGRTR_EL2_TRBPTR_EL1 | HDFGRTR_EL2_TRBSR_EL1 |
- HDFGRTR_EL2_TRBTRG_EL1);
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP))
- res0 |= (HDFGRTR_EL2_nBRBIDR | HDFGRTR_EL2_nBRBCTL |
- HDFGRTR_EL2_nBRBDATA);
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P2))
- res0 |= HDFGRTR_EL2_nPMSNEVFR_EL1;
- set_sysreg_masks(kvm, HDFGRTR_EL2, res0 | HDFGRTR_EL2_RES0, res1);
-
- /* Reuse the bits from the read-side and add the write-specific stuff */
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
- res0 |= (HDFGWTR_EL2_PMCR_EL0 | HDFGWTR_EL2_PMSWINC_EL0);
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP))
- res0 |= HDFGWTR_EL2_TRCOSLAR;
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
- res0 |= HDFGWTR_EL2_TRFCR_EL1;
- set_sysreg_masks(kvm, HFGWTR_EL2, res0 | HDFGWTR_EL2_RES0, res1);
+ get_reg_fixed_bits(kvm, HDFGRTR_EL2, &res0, &res1);
+ set_sysreg_masks(kvm, HDFGRTR_EL2, res0, res1);
+ get_reg_fixed_bits(kvm, HDFGWTR_EL2, &res0, &res1);
+ set_sysreg_masks(kvm, HDFGWTR_EL2, res0, res1);
/* HFGITR_EL2 */
- res0 = HFGITR_EL2_RES0;
- res1 = HFGITR_EL2_RES1;
- if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, DPB, DPB2))
- res0 |= HFGITR_EL2_DCCVADP;
- if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2))
- res0 |= (HFGITR_EL2_ATS1E1RP | HFGITR_EL2_ATS1E1WP);
- if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
- res0 |= (HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS |
- HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS |
- HFGITR_EL2_TLBIVAALE1OS | HFGITR_EL2_TLBIVALE1OS |
- HFGITR_EL2_TLBIVAAE1OS | HFGITR_EL2_TLBIASIDE1OS |
- HFGITR_EL2_TLBIVAE1OS | HFGITR_EL2_TLBIVMALLE1OS);
- if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
- res0 |= (HFGITR_EL2_TLBIRVAALE1 | HFGITR_EL2_TLBIRVALE1 |
- HFGITR_EL2_TLBIRVAAE1 | HFGITR_EL2_TLBIRVAE1 |
- HFGITR_EL2_TLBIRVAALE1IS | HFGITR_EL2_TLBIRVALE1IS |
- HFGITR_EL2_TLBIRVAAE1IS | HFGITR_EL2_TLBIRVAE1IS |
- HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS |
- HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS);
- if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, IMP))
- res0 |= (HFGITR_EL2_CFPRCTX | HFGITR_EL2_DVPRCTX |
- HFGITR_EL2_CPPRCTX);
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP))
- res0 |= (HFGITR_EL2_nBRBINJ | HFGITR_EL2_nBRBIALL);
- if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
- res0 |= (HFGITR_EL2_nGCSPUSHM_EL1 | HFGITR_EL2_nGCSSTR_EL1 |
- HFGITR_EL2_nGCSEPP);
- if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX))
- res0 |= HFGITR_EL2_COSPRCTX;
- if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP))
- res0 |= HFGITR_EL2_ATS1E1A;
+ get_reg_fixed_bits(kvm, HFGITR_EL2, &res0, &res1);
set_sysreg_masks(kvm, HFGITR_EL2, res0, res1);
/* HAFGRTR_EL2 - not a lot to see here */
- res0 = HAFGRTR_EL2_RES0;
- res1 = HAFGRTR_EL2_RES1;
- if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
- res0 |= ~(res0 | res1);
+ get_reg_fixed_bits(kvm, HAFGRTR_EL2, &res0, &res1);
set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1);
+ /* HFG[RW]TR2_EL2 */
+ get_reg_fixed_bits(kvm, HFGRTR2_EL2, &res0, &res1);
+ set_sysreg_masks(kvm, HFGRTR2_EL2, res0, res1);
+ get_reg_fixed_bits(kvm, HFGWTR2_EL2, &res0, &res1);
+ set_sysreg_masks(kvm, HFGWTR2_EL2, res0, res1);
+
+ /* HDFG[RW]TR2_EL2 */
+ get_reg_fixed_bits(kvm, HDFGRTR2_EL2, &res0, &res1);
+ set_sysreg_masks(kvm, HDFGRTR2_EL2, res0, res1);
+ get_reg_fixed_bits(kvm, HDFGWTR2_EL2, &res0, &res1);
+ set_sysreg_masks(kvm, HDFGWTR2_EL2, res0, res1);
+
+ /* HFGITR2_EL2 */
+ get_reg_fixed_bits(kvm, HFGITR2_EL2, &res0, &res1);
+ set_sysreg_masks(kvm, HFGITR2_EL2, res0, res1);
+
/* TCR2_EL2 */
res0 = TCR2_EL2_RES0;
res1 = TCR2_EL2_RES1;
@@ -1318,6 +1752,9 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
res0 |= ICH_HCR_EL2_DVIM | ICH_HCR_EL2_vSGIEOICount;
set_sysreg_masks(kvm, ICH_HCR_EL2, res0, res1);
+ /* VNCR_EL2 */
+ set_sysreg_masks(kvm, VNCR_EL2, VNCR_EL2_RES0, VNCR_EL2_RES1);
+
out:
for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
(void)__vcpu_sys_reg(vcpu, sr);
@@ -1338,6 +1775,9 @@ void check_nested_vcpu_requests(struct kvm_vcpu *vcpu)
write_unlock(&vcpu->kvm->mmu_lock);
}
+ if (kvm_check_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu))
+ kvm_map_l1_vncr(vcpu);
+
/* Must be last, as may switch context! */
if (kvm_check_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu))
kvm_inject_nested_irq(vcpu);
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index 0f89157d31fd..fcd70bfe44fb 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -5,12 +5,12 @@
*/
#include <linux/init.h>
+#include <linux/interval_tree_generic.h>
#include <linux/kmemleak.h>
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
#include <linux/memblock.h>
#include <linux/mutex.h>
-#include <linux/sort.h>
#include <asm/kvm_pkvm.h>
@@ -24,23 +24,6 @@ static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
phys_addr_t hyp_mem_base;
phys_addr_t hyp_mem_size;
-static int cmp_hyp_memblock(const void *p1, const void *p2)
-{
- const struct memblock_region *r1 = p1;
- const struct memblock_region *r2 = p2;
-
- return r1->base < r2->base ? -1 : (r1->base > r2->base);
-}
-
-static void __init sort_memblock_regions(void)
-{
- sort(hyp_memory,
- *hyp_memblock_nr_ptr,
- sizeof(struct memblock_region),
- cmp_hyp_memblock,
- NULL);
-}
-
static int __init register_memblock_regions(void)
{
struct memblock_region *reg;
@@ -52,7 +35,6 @@ static int __init register_memblock_regions(void)
hyp_memory[*hyp_memblock_nr_ptr] = *reg;
(*hyp_memblock_nr_ptr)++;
}
- sort_memblock_regions();
return 0;
}
@@ -79,6 +61,7 @@ void __init kvm_hyp_reserve(void)
hyp_mem_pages += host_s2_pgtable_pages();
hyp_mem_pages += hyp_vm_table_pages();
hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
+ hyp_mem_pages += pkvm_selftest_pages();
hyp_mem_pages += hyp_ffa_proxy_pages();
/*
@@ -262,6 +245,7 @@ static int __init finalize_pkvm(void)
* at, which would end badly once inaccessible.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
+ kmemleak_free_part(__hyp_data_start, __hyp_data_end - __hyp_data_start);
kmemleak_free_part(__hyp_rodata_start, __hyp_rodata_end - __hyp_rodata_start);
kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
@@ -273,80 +257,68 @@ static int __init finalize_pkvm(void)
}
device_initcall_sync(finalize_pkvm);
-static int cmp_mappings(struct rb_node *node, const struct rb_node *parent)
+static u64 __pkvm_mapping_start(struct pkvm_mapping *m)
{
- struct pkvm_mapping *a = rb_entry(node, struct pkvm_mapping, node);
- struct pkvm_mapping *b = rb_entry(parent, struct pkvm_mapping, node);
-
- if (a->gfn < b->gfn)
- return -1;
- if (a->gfn > b->gfn)
- return 1;
- return 0;
+ return m->gfn * PAGE_SIZE;
}
-static struct rb_node *find_first_mapping_node(struct rb_root *root, u64 gfn)
+static u64 __pkvm_mapping_end(struct pkvm_mapping *m)
{
- struct rb_node *node = root->rb_node, *prev = NULL;
- struct pkvm_mapping *mapping;
-
- while (node) {
- mapping = rb_entry(node, struct pkvm_mapping, node);
- if (mapping->gfn == gfn)
- return node;
- prev = node;
- node = (gfn < mapping->gfn) ? node->rb_left : node->rb_right;
- }
-
- return prev;
+ return (m->gfn + m->nr_pages) * PAGE_SIZE - 1;
}
+INTERVAL_TREE_DEFINE(struct pkvm_mapping, node, u64, __subtree_last,
+ __pkvm_mapping_start, __pkvm_mapping_end, static,
+ pkvm_mapping);
+
/*
- * __tmp is updated to rb_next(__tmp) *before* entering the body of the loop to allow freeing
- * of __map inline.
+ * __tmp is updated to iter_first(pkvm_mappings) *before* entering the body of the loop to allow
+ * freeing of __map inline.
*/
#define for_each_mapping_in_range_safe(__pgt, __start, __end, __map) \
- for (struct rb_node *__tmp = find_first_mapping_node(&(__pgt)->pkvm_mappings, \
- ((__start) >> PAGE_SHIFT)); \
+ for (struct pkvm_mapping *__tmp = pkvm_mapping_iter_first(&(__pgt)->pkvm_mappings, \
+ __start, __end - 1); \
__tmp && ({ \
- __map = rb_entry(__tmp, struct pkvm_mapping, node); \
- __tmp = rb_next(__tmp); \
+ __map = __tmp; \
+ __tmp = pkvm_mapping_iter_next(__map, __start, __end - 1); \
true; \
}); \
- ) \
- if (__map->gfn < ((__start) >> PAGE_SHIFT)) \
- continue; \
- else if (__map->gfn >= ((__end) >> PAGE_SHIFT)) \
- break; \
- else
+ )
int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
struct kvm_pgtable_mm_ops *mm_ops)
{
- pgt->pkvm_mappings = RB_ROOT;
+ pgt->pkvm_mappings = RB_ROOT_CACHED;
pgt->mmu = mmu;
return 0;
}
-void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
+static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 end)
{
struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
pkvm_handle_t handle = kvm->arch.pkvm.handle;
struct pkvm_mapping *mapping;
- struct rb_node *node;
+ int ret;
if (!handle)
- return;
+ return 0;
- node = rb_first(&pgt->pkvm_mappings);
- while (node) {
- mapping = rb_entry(node, struct pkvm_mapping, node);
- kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn);
- node = rb_next(node);
- rb_erase(&mapping->node, &pgt->pkvm_mappings);
+ for_each_mapping_in_range_safe(pgt, start, end, mapping) {
+ ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn,
+ mapping->nr_pages);
+ if (WARN_ON(ret))
+ return ret;
+ pkvm_mapping_remove(mapping, &pgt->pkvm_mappings);
kfree(mapping);
}
+
+ return 0;
+}
+
+void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
+{
+ __pkvm_pgtable_stage2_unmap(pgt, 0, ~(0ULL));
}
int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
@@ -360,42 +332,46 @@ int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 pfn = phys >> PAGE_SHIFT;
int ret;
- if (size != PAGE_SIZE)
+ if (size != PAGE_SIZE && size != PMD_SIZE)
return -EINVAL;
lockdep_assert_held_write(&kvm->mmu_lock);
- ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, prot);
- if (ret) {
- /* Is the gfn already mapped due to a racing vCPU? */
- if (ret == -EPERM)
+
+ /*
+ * Calling stage2_map() on top of existing mappings is either happening because of a race
+ * with another vCPU, or because we're changing between page and block mappings. As per
+ * user_mem_abort(), same-size permission faults are handled in the relax_perms() path.
+ */
+ mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, addr + size - 1);
+ if (mapping) {
+ if (size == (mapping->nr_pages * PAGE_SIZE))
return -EAGAIN;
+
+ /* Remove _any_ pkvm_mapping overlapping with the range, bigger or smaller. */
+ ret = __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size);
+ if (ret)
+ return ret;
+ mapping = NULL;
}
+ ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, size / PAGE_SIZE, prot);
+ if (WARN_ON(ret))
+ return ret;
+
swap(mapping, cache->mapping);
mapping->gfn = gfn;
mapping->pfn = pfn;
- WARN_ON(rb_find_add(&mapping->node, &pgt->pkvm_mappings, cmp_mappings));
+ mapping->nr_pages = size / PAGE_SIZE;
+ pkvm_mapping_insert(mapping, &pgt->pkvm_mappings);
return ret;
}
int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
- struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
- pkvm_handle_t handle = kvm->arch.pkvm.handle;
- struct pkvm_mapping *mapping;
- int ret = 0;
-
- lockdep_assert_held_write(&kvm->mmu_lock);
- for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) {
- ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn);
- if (WARN_ON(ret))
- break;
- rb_erase(&mapping->node, &pgt->pkvm_mappings);
- kfree(mapping);
- }
+ lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(pgt->mmu)->mmu_lock);
- return ret;
+ return __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size);
}
int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
@@ -407,7 +383,8 @@ int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
lockdep_assert_held(&kvm->mmu_lock);
for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) {
- ret = kvm_call_hyp_nvhe(__pkvm_host_wrprotect_guest, handle, mapping->gfn);
+ ret = kvm_call_hyp_nvhe(__pkvm_host_wrprotect_guest, handle, mapping->gfn,
+ mapping->nr_pages);
if (WARN_ON(ret))
break;
}
@@ -422,7 +399,8 @@ int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
lockdep_assert_held(&kvm->mmu_lock);
for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping)
- __clean_dcache_guest_page(pfn_to_kaddr(mapping->pfn), PAGE_SIZE);
+ __clean_dcache_guest_page(pfn_to_kaddr(mapping->pfn),
+ PAGE_SIZE * mapping->nr_pages);
return 0;
}
@@ -437,7 +415,7 @@ bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64
lockdep_assert_held(&kvm->mmu_lock);
for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping)
young |= kvm_call_hyp_nvhe(__pkvm_host_test_clear_young_guest, handle, mapping->gfn,
- mkold);
+ mapping->nr_pages, mkold);
return young;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index a1bc10d7116a..25c29107f13f 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -280,7 +280,7 @@ static u64 kvm_pmu_hyp_counter_mask(struct kvm_vcpu *vcpu)
return 0;
hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
- n = vcpu->kvm->arch.pmcr_n;
+ n = vcpu->kvm->arch.nr_pmu_counters;
/*
* Programming HPMN to a value greater than PMCR_EL0.N is
@@ -608,14 +608,12 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
if (val & ARMV8_PMU_PMCR_P) {
- /*
- * Unlike other PMU sysregs, the controls in PMCR_EL0 always apply
- * to the 'guest' range of counters and never the 'hyp' range.
- */
unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu) &
- ~kvm_pmu_hyp_counter_mask(vcpu) &
~BIT(ARMV8_PMU_CYCLE_IDX);
+ if (!vcpu_is_el2(vcpu))
+ mask &= ~kvm_pmu_hyp_counter_mask(vcpu);
+
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
}
@@ -1027,12 +1025,30 @@ u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
return bitmap_weight(arm_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS);
}
+static void kvm_arm_set_nr_counters(struct kvm *kvm, unsigned int nr)
+{
+ kvm->arch.nr_pmu_counters = nr;
+
+ /* Reset MDCR_EL2.HPMN behind the vcpus' back... */
+ if (test_bit(KVM_ARM_VCPU_HAS_EL2, kvm->arch.vcpu_features)) {
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ u64 val = __vcpu_sys_reg(vcpu, MDCR_EL2);
+ val &= ~MDCR_EL2_HPMN;
+ val |= FIELD_PREP(MDCR_EL2_HPMN, kvm->arch.nr_pmu_counters);
+ __vcpu_sys_reg(vcpu, MDCR_EL2) = val;
+ }
+ }
+}
+
static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
{
lockdep_assert_held(&kvm->arch.config_lock);
kvm->arch.arm_pmu = arm_pmu;
- kvm->arch.pmcr_n = kvm_arm_pmu_get_max_counters(kvm);
+ kvm_arm_set_nr_counters(kvm, kvm_arm_pmu_get_max_counters(kvm));
}
/**
@@ -1088,6 +1104,20 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
return ret;
}
+static int kvm_arm_pmu_v3_set_nr_counters(struct kvm_vcpu *vcpu, unsigned int n)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ if (!kvm->arch.arm_pmu)
+ return -EINVAL;
+
+ if (n > kvm_arm_pmu_get_max_counters(kvm))
+ return -EINVAL;
+
+ kvm_arm_set_nr_counters(kvm, n);
+ return 0;
+}
+
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
struct kvm *kvm = vcpu->kvm;
@@ -1184,6 +1214,15 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
}
+ case KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS: {
+ unsigned int __user *uaddr = (unsigned int __user *)(long)attr->addr;
+ unsigned int n;
+
+ if (get_user(n, uaddr))
+ return -EFAULT;
+
+ return kvm_arm_pmu_v3_set_nr_counters(vcpu, n);
+ }
case KVM_ARM_VCPU_PMU_V3_INIT:
return kvm_arm_pmu_v3_init(vcpu);
}
@@ -1222,6 +1261,7 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
case KVM_ARM_VCPU_PMU_V3_INIT:
case KVM_ARM_VCPU_PMU_V3_FILTER:
case KVM_ARM_VCPU_PMU_V3_SET_PMU:
+ case KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS:
if (kvm_vcpu_has_pmu(vcpu))
return 0;
}
@@ -1260,8 +1300,12 @@ u8 kvm_arm_pmu_get_pmuver_limit(void)
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
{
u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
+ u64 n = vcpu->kvm->arch.nr_pmu_counters;
+
+ if (vcpu_has_nv(vcpu) && !vcpu_is_el2(vcpu))
+ n = FIELD_GET(MDCR_EL2_HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
- return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N);
+ return u64_replace_bits(pmcr, n, ARMV8_PMU_PMCR_N);
}
void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index f82fcc614e13..959532422d3a 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -158,6 +158,8 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
if (sve_state)
kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
kfree(sve_state);
+ free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
+ kfree(vcpu->arch.vncr_tlb);
kfree(vcpu->arch.ccsidr);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 005ad28f7306..a6cf2888d150 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -785,7 +785,7 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
- u8 n = vcpu->kvm->arch.pmcr_n;
+ u8 n = vcpu->kvm->arch.nr_pmu_counters;
if (n)
mask |= GENMASK(n - 1, 0);
@@ -1216,8 +1216,9 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
* with the existing KVM behavior.
*/
if (!kvm_vm_has_ran_once(kvm) &&
+ !vcpu_has_nv(vcpu) &&
new_n <= kvm_arm_pmu_get_max_counters(kvm))
- kvm->arch.pmcr_n = new_n;
+ kvm->arch.nr_pmu_counters = new_n;
mutex_unlock(&kvm->arch.config_lock);
@@ -1600,13 +1601,14 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val = sanitise_id_aa64pfr0_el1(vcpu, val);
break;
case SYS_ID_AA64PFR1_EL1:
- if (!kvm_has_mte(vcpu->kvm))
+ if (!kvm_has_mte(vcpu->kvm)) {
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
+ }
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI);
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
@@ -1945,6 +1947,12 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
+ /* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
+ if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
+ !FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
+ (vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
+ return -EINVAL;
+
return set_id_reg(vcpu, rd, user_val);
}
@@ -1953,11 +1961,34 @@ static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
{
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
+ u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val);
+ u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val);
+ u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val);
/* See set_id_aa64pfr0_el1 for comment about MPAM */
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
+ /*
+ * Previously MTE_frac was hidden from guest. However, if the
+ * hardware supports MTE2 but not MTE_ASYM_FAULT then a value
+ * of 0 for this field indicates that the hardware supports
+ * MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported.
+ *
+ * As KVM must accept values from KVM provided by user-space,
+ * when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set
+ * ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid
+ * incorrectly claiming hardware support for MTE_ASYNC in the
+ * guest.
+ */
+
+ if (mte == ID_AA64PFR1_EL1_MTE_MTE2 &&
+ hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI &&
+ user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) {
+ user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;
+ user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK;
+ }
+
return set_id_reg(vcpu, rd, user_val);
}
@@ -2281,15 +2312,6 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
"trap of EL2 register redirected to EL1");
}
-#define EL2_REG(name, acc, rst, v) { \
- SYS_DESC(SYS_##name), \
- .access = acc, \
- .reset = rst, \
- .reg = name, \
- .visibility = el2_visibility, \
- .val = v, \
-}
-
#define EL2_REG_FILTERED(name, acc, rst, v, filter) { \
SYS_DESC(SYS_##name), \
.access = acc, \
@@ -2299,6 +2321,9 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
.val = v, \
}
+#define EL2_REG(name, acc, rst, v) \
+ EL2_REG_FILTERED(name, acc, rst, v, el2_visibility)
+
#define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
#define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
@@ -2446,6 +2471,16 @@ static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
return __el2_visibility(vcpu, rd, sve_visibility);
}
+static unsigned int vncr_el2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (el2_visibility(vcpu, rd) == 0 &&
+ kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
static bool access_zcr_el2(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
@@ -2570,16 +2605,33 @@ static bool access_mdcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 old = __vcpu_sys_reg(vcpu, MDCR_EL2);
+ u64 hpmn, val, old = __vcpu_sys_reg(vcpu, MDCR_EL2);
- if (!access_rw(vcpu, p, r))
- return false;
+ if (!p->is_write) {
+ p->regval = old;
+ return true;
+ }
+
+ val = p->regval;
+ hpmn = FIELD_GET(MDCR_EL2_HPMN, val);
+
+ /*
+ * If HPMN is out of bounds, limit it to what we actually
+ * support. This matches the UNKNOWN definition of the field
+ * in that case, and keeps the emulation simple. Sort of.
+ */
+ if (hpmn > vcpu->kvm->arch.nr_pmu_counters) {
+ hpmn = vcpu->kvm->arch.nr_pmu_counters;
+ u64_replace_bits(val, hpmn, MDCR_EL2_HPMN);
+ }
+
+ __vcpu_sys_reg(vcpu, MDCR_EL2) = val;
/*
- * Request a reload of the PMU to enable/disable the counters affected
- * by HPME.
+ * Request a reload of the PMU to enable/disable the counters
+ * affected by HPME.
*/
- if ((old ^ __vcpu_sys_reg(vcpu, MDCR_EL2)) & MDCR_EL2_HPME)
+ if ((old ^ val) & MDCR_EL2_HPME)
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
return true;
@@ -2698,6 +2750,12 @@ static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
.set_user = set_imp_id_reg, \
.reset = reset_imp_id_reg, \
.val = mask, \
+ }
+
+static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ __vcpu_sys_reg(vcpu, r->reg) = vcpu->kvm->arch.nr_pmu_counters;
+ return vcpu->kvm->arch.nr_pmu_counters;
}
/*
@@ -3243,7 +3301,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
- EL2_REG(MDCR_EL2, access_mdcr, reset_val, 0),
+ EL2_REG(MDCR_EL2, access_mdcr, reset_mdcr, 0),
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),
@@ -3263,6 +3321,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
tcr2_el2_visibility),
EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
+ EL2_REG_FILTERED(VNCR_EL2, bad_vncr_trap, reset_val, 0,
+ vncr_el2_visibility),
{ SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 },
EL2_REG_VNCR(HDFGRTR_EL2, reset_val, 0),
@@ -3546,8 +3606,7 @@ static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
- u64 base, range, tg, num, scale;
- int shift;
+ u64 base, range;
if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
return undef_access(vcpu, p, r);
@@ -3557,26 +3616,7 @@ static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* of the guest's S2 (different base granule size, for example), we
* decide to ignore TTL and only use the described range.
*/
- tg = FIELD_GET(GENMASK(47, 46), p->regval);
- scale = FIELD_GET(GENMASK(45, 44), p->regval);
- num = FIELD_GET(GENMASK(43, 39), p->regval);
- base = p->regval & GENMASK(36, 0);
-
- switch(tg) {
- case 1:
- shift = 12;
- break;
- case 2:
- shift = 14;
- break;
- case 3:
- default: /* IMPDEF: handle tg==0 as 64k */
- shift = 16;
- break;
- }
-
- base <<= shift;
- range = __TLBI_RANGE_PAGES(num, scale) << shift;
+ base = decode_range_tlbi(p->regval, &range, NULL);
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
&(union tlbi_info) {
@@ -3642,11 +3682,22 @@ static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
}
+static bool handle_tlbi_el2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+
+ if (!kvm_supported_tlbi_s1e2_op(vcpu, sys_encoding))
+ return undef_access(vcpu, p, r);
+
+ kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
+ return true;
+}
+
static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
- u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
/*
* If we're here, this is because we've trapped on a EL1 TLBI
@@ -3657,6 +3708,13 @@ static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* - HCR_EL2.E2H == 0 : a non-VHE guest
* - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
*
+ * Another possibility is that we are invalidating the EL2 context
+ * using EL1 instructions, but that we landed here because we need
+ * additional invalidation for structures that are not held in the
+ * CPU TLBs (such as the VNCR pseudo-TLB and its EL2 mapping). In
+ * that case, we are guaranteed that HCR_EL2.{E2H,TGE} == { 1, 1 }
+ * as we don't allow an NV-capable L1 in a nVHE configuration.
+ *
* We don't expect these helpers to ever be called when running
* in a vEL1 context.
*/
@@ -3666,7 +3724,13 @@ static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding))
return undef_access(vcpu, p, r);
- kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
+ if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) {
+ kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
+ return true;
+ }
+
+ kvm_s2_mmu_iterate_by_vmid(vcpu->kvm,
+ get_vmid(__vcpu_sys_reg(vcpu, VTTBR_EL2)),
&(union tlbi_info) {
.va = {
.addr = p->regval,
@@ -3788,16 +3852,21 @@ static struct sys_reg_desc sys_insn_descs[] = {
SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
- SYS_INSN(TLBI_ALLE2OS, undef_access),
- SYS_INSN(TLBI_VAE2OS, undef_access),
+ SYS_INSN(TLBI_ALLE2OS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VAE2OS, handle_tlbi_el2),
SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
- SYS_INSN(TLBI_VALE2OS, undef_access),
+ SYS_INSN(TLBI_VALE2OS, handle_tlbi_el2),
SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
- SYS_INSN(TLBI_RVAE2IS, undef_access),
- SYS_INSN(TLBI_RVALE2IS, undef_access),
+ SYS_INSN(TLBI_RVAE2IS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVALE2IS, handle_tlbi_el2),
+ SYS_INSN(TLBI_ALLE2IS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VAE2IS, handle_tlbi_el2),
SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
+
+ SYS_INSN(TLBI_VALE2IS, handle_tlbi_el2),
+
SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is),
SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is),
@@ -3807,11 +3876,17 @@ static struct sys_reg_desc sys_insn_descs[] = {
SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
- SYS_INSN(TLBI_RVAE2OS, undef_access),
- SYS_INSN(TLBI_RVALE2OS, undef_access),
- SYS_INSN(TLBI_RVAE2, undef_access),
- SYS_INSN(TLBI_RVALE2, undef_access),
+ SYS_INSN(TLBI_RVAE2OS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVALE2OS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVAE2, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVALE2, handle_tlbi_el2),
+ SYS_INSN(TLBI_ALLE2, handle_tlbi_el2),
+ SYS_INSN(TLBI_VAE2, handle_tlbi_el2),
+
SYS_INSN(TLBI_ALLE1, handle_alle1is),
+
+ SYS_INSN(TLBI_VALE2, handle_tlbi_el2),
+
SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is),
@@ -3819,19 +3894,19 @@ static struct sys_reg_desc sys_insn_descs[] = {
SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
- SYS_INSN(TLBI_ALLE2OSNXS, undef_access),
- SYS_INSN(TLBI_VAE2OSNXS, undef_access),
+ SYS_INSN(TLBI_ALLE2OSNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VAE2OSNXS, handle_tlbi_el2),
SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
- SYS_INSN(TLBI_VALE2OSNXS, undef_access),
+ SYS_INSN(TLBI_VALE2OSNXS, handle_tlbi_el2),
SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
- SYS_INSN(TLBI_RVAE2ISNXS, undef_access),
- SYS_INSN(TLBI_RVALE2ISNXS, undef_access),
- SYS_INSN(TLBI_ALLE2ISNXS, undef_access),
- SYS_INSN(TLBI_VAE2ISNXS, undef_access),
+ SYS_INSN(TLBI_RVAE2ISNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVALE2ISNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_ALLE2ISNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VAE2ISNXS, handle_tlbi_el2),
SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
- SYS_INSN(TLBI_VALE2ISNXS, undef_access),
+ SYS_INSN(TLBI_VALE2ISNXS, handle_tlbi_el2),
SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
@@ -3841,14 +3916,14 @@ static struct sys_reg_desc sys_insn_descs[] = {
SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
- SYS_INSN(TLBI_RVAE2OSNXS, undef_access),
- SYS_INSN(TLBI_RVALE2OSNXS, undef_access),
- SYS_INSN(TLBI_RVAE2NXS, undef_access),
- SYS_INSN(TLBI_RVALE2NXS, undef_access),
- SYS_INSN(TLBI_ALLE2NXS, undef_access),
- SYS_INSN(TLBI_VAE2NXS, undef_access),
+ SYS_INSN(TLBI_RVAE2OSNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVALE2OSNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVAE2NXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVALE2NXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_ALLE2NXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VAE2NXS, handle_tlbi_el2),
SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
- SYS_INSN(TLBI_VALE2NXS, undef_access),
+ SYS_INSN(TLBI_VALE2NXS, handle_tlbi_el2),
SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
};
@@ -5147,65 +5222,13 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
goto out;
- kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 |
- HFGxTR_EL2_nMAIR2_EL1 |
- HFGxTR_EL2_nS2POR_EL1 |
- HFGxTR_EL2_nACCDATA_EL1 |
- HFGxTR_EL2_nSMPRI_EL1_MASK |
- HFGxTR_EL2_nTPIDR2_EL0_MASK);
-
- if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
- kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1OS|
- HFGITR_EL2_TLBIRVALE1OS |
- HFGITR_EL2_TLBIRVAAE1OS |
- HFGITR_EL2_TLBIRVAE1OS |
- HFGITR_EL2_TLBIVAALE1OS |
- HFGITR_EL2_TLBIVALE1OS |
- HFGITR_EL2_TLBIVAAE1OS |
- HFGITR_EL2_TLBIASIDE1OS |
- HFGITR_EL2_TLBIVAE1OS |
- HFGITR_EL2_TLBIVMALLE1OS);
-
- if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
- kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1 |
- HFGITR_EL2_TLBIRVALE1 |
- HFGITR_EL2_TLBIRVAAE1 |
- HFGITR_EL2_TLBIRVAE1 |
- HFGITR_EL2_TLBIRVAALE1IS|
- HFGITR_EL2_TLBIRVALE1IS |
- HFGITR_EL2_TLBIRVAAE1IS |
- HFGITR_EL2_TLBIRVAE1IS |
- HFGITR_EL2_TLBIRVAALE1OS|
- HFGITR_EL2_TLBIRVALE1OS |
- HFGITR_EL2_TLBIRVAAE1OS |
- HFGITR_EL2_TLBIRVAE1OS);
-
- if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP))
- kvm->arch.fgu[HFGITR_GROUP] |= HFGITR_EL2_ATS1E1A;
-
- if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2))
- kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_ATS1E1RP |
- HFGITR_EL2_ATS1E1WP);
-
- if (!kvm_has_s1pie(kvm))
- kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 |
- HFGxTR_EL2_nPIR_EL1);
-
- if (!kvm_has_s1poe(kvm))
- kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPOR_EL1 |
- HFGxTR_EL2_nPOR_EL0);
-
- if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
- kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 |
- HAFGRTR_EL2_RES1);
-
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP)) {
- kvm->arch.fgu[HDFGRTR_GROUP] |= (HDFGRTR_EL2_nBRBDATA |
- HDFGRTR_EL2_nBRBCTL |
- HDFGRTR_EL2_nBRBIDR);
- kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_nBRBINJ |
- HFGITR_EL2_nBRBIALL);
- }
+ compute_fgu(kvm, HFGRTR_GROUP);
+ compute_fgu(kvm, HFGITR_GROUP);
+ compute_fgu(kvm, HDFGRTR_GROUP);
+ compute_fgu(kvm, HAFGRTR_GROUP);
+ compute_fgu(kvm, HFGRTR2_GROUP);
+ compute_fgu(kvm, HFGITR2_GROUP);
+ compute_fgu(kvm, HDFGRTR2_GROUP);
set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
out:
@@ -5263,6 +5286,8 @@ int __init kvm_sys_reg_table_init(void)
ret = populate_nv_trap_config();
+ check_feature_map();
+
for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
ret = populate_sysreg_config(sys_reg_descs + i, i);
diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
index c18c1a95831e..9c60f6465c78 100644
--- a/arch/arm64/kvm/trace_arm.h
+++ b/arch/arm64/kvm/trace_arm.h
@@ -176,7 +176,7 @@ TRACE_EVENT(kvm_set_way_flush,
),
TP_printk("S/W flush at 0x%016lx (cache %s)",
- __entry->vcpu_pc, __entry->cache ? "on" : "off")
+ __entry->vcpu_pc, str_on_off(__entry->cache))
);
TRACE_EVENT(kvm_toggle_cache,
@@ -196,8 +196,8 @@ TRACE_EVENT(kvm_toggle_cache,
),
TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
- __entry->vcpu_pc, __entry->was ? "on" : "off",
- __entry->now ? "on" : "off")
+ __entry->vcpu_pc, str_on_off(__entry->was),
+ str_on_off(__entry->now))
);
/*
diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c
index afb018528bc3..2684f273d9e1 100644
--- a/arch/arm64/kvm/vgic/vgic-debug.c
+++ b/arch/arm64/kvm/vgic/vgic-debug.c
@@ -320,3 +320,230 @@ void vgic_debug_init(struct kvm *kvm)
void vgic_debug_destroy(struct kvm *kvm)
{
}
+
+/**
+ * struct vgic_its_iter - Iterator for traversing VGIC ITS device tables.
+ * @dev: Pointer to the current its_device being processed.
+ * @ite: Pointer to the current its_ite within the device being processed.
+ *
+ * This structure is used to maintain the current position during iteration
+ * over the ITS device tables. It holds pointers to both the current device
+ * and the current ITE within that device.
+ */
+struct vgic_its_iter {
+ struct its_device *dev;
+ struct its_ite *ite;
+};
+
+/**
+ * end_of_iter - Checks if the iterator has reached the end.
+ * @iter: The iterator to check.
+ *
+ * When the iterator completed processing the final ITE in the last device
+ * table, it was marked to indicate the end of iteration by setting its
+ * device and ITE pointers to NULL.
+ * This function checks whether the iterator was marked as end.
+ *
+ * Return: True if the iterator is marked as end, false otherwise.
+ */
+static inline bool end_of_iter(struct vgic_its_iter *iter)
+{
+ return !iter->dev && !iter->ite;
+}
+
+/**
+ * vgic_its_iter_next - Advances the iterator to the next entry in the ITS tables.
+ * @its: The VGIC ITS structure.
+ * @iter: The iterator to advance.
+ *
+ * This function moves the iterator to the next ITE within the current device,
+ * or to the first ITE of the next device if the current ITE is the last in
+ * the device. If the current device is the last device, the iterator is set
+ * to indicate the end of iteration.
+ */
+static void vgic_its_iter_next(struct vgic_its *its, struct vgic_its_iter *iter)
+{
+ struct its_device *dev = iter->dev;
+ struct its_ite *ite = iter->ite;
+
+ if (!ite || list_is_last(&ite->ite_list, &dev->itt_head)) {
+ if (list_is_last(&dev->dev_list, &its->device_list)) {
+ dev = NULL;
+ ite = NULL;
+ } else {
+ dev = list_next_entry(dev, dev_list);
+ ite = list_first_entry_or_null(&dev->itt_head,
+ struct its_ite,
+ ite_list);
+ }
+ } else {
+ ite = list_next_entry(ite, ite_list);
+ }
+
+ iter->dev = dev;
+ iter->ite = ite;
+}
+
+/**
+ * vgic_its_debug_start - Start function for the seq_file interface.
+ * @s: The seq_file structure.
+ * @pos: The starting position (offset).
+ *
+ * This function initializes the iterator to the beginning of the ITS tables
+ * and advances it to the specified position. It acquires the its_lock mutex
+ * to protect shared data.
+ *
+ * Return: An iterator pointer on success, NULL if no devices are found or
+ * the end of the list is reached, or ERR_PTR(-ENOMEM) on memory
+ * allocation failure.
+ */
+static void *vgic_its_debug_start(struct seq_file *s, loff_t *pos)
+{
+ struct vgic_its *its = s->private;
+ struct vgic_its_iter *iter;
+ struct its_device *dev;
+ loff_t offset = *pos;
+
+ mutex_lock(&its->its_lock);
+
+ dev = list_first_entry_or_null(&its->device_list,
+ struct its_device, dev_list);
+ if (!dev)
+ return NULL;
+
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return ERR_PTR(-ENOMEM);
+
+ iter->dev = dev;
+ iter->ite = list_first_entry_or_null(&dev->itt_head,
+ struct its_ite, ite_list);
+
+ while (!end_of_iter(iter) && offset--)
+ vgic_its_iter_next(its, iter);
+
+ if (end_of_iter(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+/**
+ * vgic_its_debug_next - Next function for the seq_file interface.
+ * @s: The seq_file structure.
+ * @v: The current iterator.
+ * @pos: The current position (offset).
+ *
+ * This function advances the iterator to the next entry and increments the
+ * position.
+ *
+ * Return: An iterator pointer on success, or NULL if the end of the list is
+ * reached.
+ */
+static void *vgic_its_debug_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct vgic_its *its = s->private;
+ struct vgic_its_iter *iter = v;
+
+ ++*pos;
+ vgic_its_iter_next(its, iter);
+
+ if (end_of_iter(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+ return iter;
+}
+
+/**
+ * vgic_its_debug_stop - Stop function for the seq_file interface.
+ * @s: The seq_file structure.
+ * @v: The current iterator.
+ *
+ * This function frees the iterator and releases the its_lock mutex.
+ */
+static void vgic_its_debug_stop(struct seq_file *s, void *v)
+{
+ struct vgic_its *its = s->private;
+ struct vgic_its_iter *iter = v;
+
+ if (!IS_ERR_OR_NULL(iter))
+ kfree(iter);
+ mutex_unlock(&its->its_lock);
+}
+
+/**
+ * vgic_its_debug_show - Show function for the seq_file interface.
+ * @s: The seq_file structure.
+ * @v: The current iterator.
+ *
+ * This function formats and prints the ITS table entry information to the
+ * seq_file output.
+ *
+ * Return: 0 on success.
+ */
+static int vgic_its_debug_show(struct seq_file *s, void *v)
+{
+ struct vgic_its_iter *iter = v;
+ struct its_device *dev = iter->dev;
+ struct its_ite *ite = iter->ite;
+
+ if (!ite)
+ return 0;
+
+ if (list_is_first(&ite->ite_list, &dev->itt_head)) {
+ seq_printf(s, "\n");
+ seq_printf(s, "Device ID: 0x%x, Event ID Range: [0 - %llu]\n",
+ dev->device_id, BIT_ULL(dev->num_eventid_bits) - 1);
+ seq_printf(s, "EVENT_ID INTID HWINTID TARGET COL_ID HW\n");
+ seq_printf(s, "-----------------------------------------------\n");
+ }
+
+ if (ite->irq && ite->collection) {
+ seq_printf(s, "%8u %8u %8u %8u %8u %2d\n",
+ ite->event_id, ite->irq->intid, ite->irq->hwintid,
+ ite->collection->target_addr,
+ ite->collection->collection_id, ite->irq->hw);
+ }
+
+ return 0;
+}
+
+static const struct seq_operations vgic_its_debug_sops = {
+ .start = vgic_its_debug_start,
+ .next = vgic_its_debug_next,
+ .stop = vgic_its_debug_stop,
+ .show = vgic_its_debug_show
+};
+
+DEFINE_SEQ_ATTRIBUTE(vgic_its_debug);
+
+/**
+ * vgic_its_debug_init - Initializes the debugfs interface for VGIC ITS.
+ * @dev: The KVM device structure.
+ *
+ * This function creates a debugfs file named "vgic-its-state@%its_base"
+ * to expose the ITS table information.
+ *
+ * Return: 0 on success.
+ */
+int vgic_its_debug_init(struct kvm_device *dev)
+{
+ struct vgic_its *its = dev->private;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "vgic-its-state@%llx", (u64)its->vgic_its_base);
+ if (!name)
+ return -ENOMEM;
+
+ debugfs_create_file(name, 0444, dev->kvm->debugfs_dentry, its, &vgic_its_debug_fops);
+
+ kfree(name);
+ return 0;
+}
+
+void vgic_its_debug_destroy(struct kvm_device *dev)
+{
+}
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 1f33e71c2a73..eb1205654ac8 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -84,15 +84,40 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
!kvm_vgic_global_state.can_emulate_gicv2)
return -ENODEV;
- /* Must be held to avoid race with vCPU creation */
+ /*
+ * Ensure mutual exclusion with vCPU creation and any vCPU ioctls by:
+ *
+ * - Holding kvm->lock to prevent KVM_CREATE_VCPU from reaching
+ * kvm_arch_vcpu_precreate() and ensuring created_vcpus is stable.
+ * This alone is insufficient, as kvm_vm_ioctl_create_vcpu() drops
+ * the kvm->lock before completing the vCPU creation.
+ */
lockdep_assert_held(&kvm->lock);
+ /*
+ * - Acquiring the vCPU mutex for every *online* vCPU to prevent
+ * concurrent vCPU ioctls for vCPUs already visible to userspace.
+ */
ret = -EBUSY;
- if (!lock_all_vcpus(kvm))
+ if (kvm_trylock_all_vcpus(kvm))
return ret;
+ /*
+ * - Taking the config_lock which protects VGIC data structures such
+ * as the per-vCPU arrays of private IRQs (SGIs, PPIs).
+ */
mutex_lock(&kvm->arch.config_lock);
+ /*
+ * - Bailing on the entire thing if a vCPU is in the middle of creation,
+ * dropped the kvm->lock, but hasn't reached kvm_arch_vcpu_create().
+ *
+ * The whole combination of this guarantees that no vCPU can get into
+ * KVM with a VGIC configuration inconsistent with the VM's VGIC.
+ */
+ if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
+ goto out_unlock;
+
if (irqchip_in_kernel(kvm)) {
ret = -EEXIST;
goto out_unlock;
@@ -142,7 +167,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
- unlock_all_vcpus(kvm);
+ kvm_unlock_all_vcpus(kvm);
return ret;
}
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index fb96802799c6..534049c7c94b 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -154,36 +154,6 @@ out_unlock:
return irq;
}
-struct its_device {
- struct list_head dev_list;
-
- /* the head for the list of ITTEs */
- struct list_head itt_head;
- u32 num_eventid_bits;
- gpa_t itt_addr;
- u32 device_id;
-};
-
-#define COLLECTION_NOT_MAPPED ((u32)~0)
-
-struct its_collection {
- struct list_head coll_list;
-
- u32 collection_id;
- u32 target_addr;
-};
-
-#define its_is_collection_mapped(coll) ((coll) && \
- ((coll)->target_addr != COLLECTION_NOT_MAPPED))
-
-struct its_ite {
- struct list_head ite_list;
-
- struct vgic_irq *irq;
- struct its_collection *collection;
- u32 event_id;
-};
-
/**
* struct vgic_its_abi - ITS abi ops and settings
* @cte_esz: collection table entry size
@@ -336,39 +306,34 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
}
}
- raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
-
if (irq->hw)
- return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
+ ret = its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
- return 0;
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ return ret;
}
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
{
- int ret = 0;
- unsigned long flags;
+ struct its_vlpi_map map;
+ int ret;
- raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ guard(raw_spinlock_irqsave)(&irq->irq_lock);
irq->target_vcpu = vcpu;
- raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
- if (irq->hw) {
- struct its_vlpi_map map;
+ if (!irq->hw)
+ return 0;
- ret = its_get_vlpi(irq->host_irq, &map);
- if (ret)
- return ret;
+ ret = its_get_vlpi(irq->host_irq, &map);
+ if (ret)
+ return ret;
- if (map.vpe)
- atomic_dec(&map.vpe->vlpi_count);
- map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
- atomic_inc(&map.vpe->vlpi_count);
+ if (map.vpe)
+ atomic_dec(&map.vpe->vlpi_count);
- ret = its_map_vlpi(irq->host_irq, &map);
- }
-
- return ret;
+ map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+ atomic_inc(&map.vpe->vlpi_count);
+ return its_map_vlpi(irq->host_irq, &map);
}
static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
@@ -786,12 +751,17 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
/* Requires the its_lock to be held. */
static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
{
+ struct vgic_irq *irq = ite->irq;
list_del(&ite->ite_list);
/* This put matches the get in vgic_add_lpi. */
- if (ite->irq) {
- if (ite->irq->hw)
- WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
+ if (irq) {
+ scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) {
+ if (irq->hw)
+ WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
+
+ irq->hw = false;
+ }
vgic_put_irq(kvm, ite->irq);
}
@@ -1938,6 +1908,8 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev)
mutex_lock(&its->its_lock);
+ vgic_its_debug_destroy(kvm_dev);
+
vgic_its_free_device_list(kvm, its);
vgic_its_free_collection_list(kvm, its);
vgic_its_invalidate_cache(its);
@@ -1999,7 +1971,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
- if (!lock_all_vcpus(dev->kvm)) {
+ if (kvm_trylock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
@@ -2034,7 +2006,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
}
out:
mutex_unlock(&dev->kvm->arch.config_lock);
- unlock_all_vcpus(dev->kvm);
+ kvm_unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return ret;
}
@@ -2704,7 +2676,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
mutex_lock(&kvm->lock);
- if (!lock_all_vcpus(kvm)) {
+ if (kvm_trylock_all_vcpus(kvm)) {
mutex_unlock(&kvm->lock);
return -EBUSY;
}
@@ -2726,7 +2698,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
mutex_unlock(&its->its_lock);
mutex_unlock(&kvm->arch.config_lock);
- unlock_all_vcpus(kvm);
+ kvm_unlock_all_vcpus(kvm);
mutex_unlock(&kvm->lock);
return ret;
}
@@ -2771,7 +2743,12 @@ static int vgic_its_set_attr(struct kvm_device *dev,
if (ret)
return ret;
- return vgic_register_its_iodev(dev->kvm, its, addr);
+ ret = vgic_register_its_iodev(dev->kvm, its, addr);
+ if (ret)
+ return ret;
+
+ return vgic_its_debug_init(dev);
+
}
case KVM_DEV_ARM_VGIC_GRP_CTRL:
return vgic_its_ctrl(dev->kvm, its, attr->attr);
diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
index 359094f68c23..f9ae790163fb 100644
--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
+++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
@@ -268,7 +268,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
return -ENXIO;
mutex_lock(&dev->kvm->lock);
- if (!lock_all_vcpus(dev->kvm)) {
+ if (kvm_trylock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
@@ -276,7 +276,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_v3_save_pending_tables(dev->kvm);
mutex_unlock(&dev->kvm->arch.config_lock);
- unlock_all_vcpus(dev->kvm);
+ kvm_unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return r;
}
@@ -390,7 +390,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
- if (!lock_all_vcpus(dev->kvm)) {
+ if (kvm_trylock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
@@ -415,7 +415,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
out:
mutex_unlock(&dev->kvm->arch.config_lock);
- unlock_all_vcpus(dev->kvm);
+ kvm_unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
if (!ret && !is_write)
@@ -554,7 +554,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
- if (!lock_all_vcpus(dev->kvm)) {
+ if (kvm_trylock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
@@ -611,7 +611,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
out:
mutex_unlock(&dev->kvm->arch.config_lock);
- unlock_all_vcpus(dev->kvm);
+ kvm_unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
if (!ret && uaccess && !is_write) {
diff --git a/arch/arm64/kvm/vgic/vgic-v3-nested.c b/arch/arm64/kvm/vgic/vgic-v3-nested.c
index bfa5bde1f106..4f6954c30674 100644
--- a/arch/arm64/kvm/vgic/vgic-v3-nested.c
+++ b/arch/arm64/kvm/vgic/vgic-v3-nested.c
@@ -240,9 +240,6 @@ static void vgic_v3_create_shadow_lr(struct kvm_vcpu *vcpu,
goto next;
}
- /* It is illegal to have the EOI bit set with HW */
- lr &= ~ICH_LR_EOI;
-
/* Translate the virtual mapping to the real one */
lr &= ~ICH_LR_PHYS_ID_MASK;
lr |= FIELD_PREP(ICH_LR_PHYS_ID_MASK, (u64)irq->hwintid);
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index c7de6154627c..193946108192 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -444,7 +444,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
if (IS_ERR(its))
return 0;
- mutex_lock(&its->its_lock);
+ guard(mutex)(&its->its_lock);
/*
* Perform the actual DevID/EventID -> LPI translation.
@@ -455,11 +455,13 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
*/
if (vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
irq_entry->msi.data, &irq))
- goto out;
+ return 0;
+
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* Silently exit if the vLPI is already mapped */
if (irq->hw)
- goto out;
+ goto out_unlock_irq;
/*
* Emit the mapping request. If it fails, the ITS probably
@@ -479,68 +481,74 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
ret = its_map_vlpi(virq, &map);
if (ret)
- goto out;
+ goto out_unlock_irq;
irq->hw = true;
irq->host_irq = virq;
atomic_inc(&map.vpe->vlpi_count);
/* Transfer pending state */
- raw_spin_lock_irqsave(&irq->irq_lock, flags);
- if (irq->pending_latch) {
- ret = irq_set_irqchip_state(irq->host_irq,
- IRQCHIP_STATE_PENDING,
- irq->pending_latch);
- WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
+ if (!irq->pending_latch)
+ goto out_unlock_irq;
- /*
- * Clear pending_latch and communicate this state
- * change via vgic_queue_irq_unlock.
- */
- irq->pending_latch = false;
- vgic_queue_irq_unlock(kvm, irq, flags);
- } else {
- raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
- }
+ ret = irq_set_irqchip_state(irq->host_irq, IRQCHIP_STATE_PENDING,
+ irq->pending_latch);
+ WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
-out:
- mutex_unlock(&its->its_lock);
+ /*
+ * Clear pending_latch and communicate this state
+ * change via vgic_queue_irq_unlock.
+ */
+ irq->pending_latch = false;
+ vgic_queue_irq_unlock(kvm, irq, flags);
+ return ret;
+
+out_unlock_irq:
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
return ret;
}
-int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
- struct kvm_kernel_irq_routing_entry *irq_entry)
+static struct vgic_irq *__vgic_host_irq_get_vlpi(struct kvm *kvm, int host_irq)
{
- struct vgic_its *its;
struct vgic_irq *irq;
- int ret;
+ unsigned long idx;
+
+ guard(rcu)();
+ xa_for_each(&kvm->arch.vgic.lpi_xa, idx, irq) {
+ if (!irq->hw || irq->host_irq != host_irq)
+ continue;
+
+ if (!vgic_try_get_irq_kref(irq))
+ return NULL;
+
+ return irq;
+ }
+
+ return NULL;
+}
+
+int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq)
+{
+ struct vgic_irq *irq;
+ unsigned long flags;
+ int ret = 0;
if (!vgic_supports_direct_msis(kvm))
return 0;
- /*
- * Get the ITS, and escape early on error (not a valid
- * doorbell for any of our vITSs).
- */
- its = vgic_get_its(kvm, irq_entry);
- if (IS_ERR(its))
+ irq = __vgic_host_irq_get_vlpi(kvm, host_irq);
+ if (!irq)
return 0;
- mutex_lock(&its->its_lock);
-
- ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
- irq_entry->msi.data, &irq);
- if (ret)
- goto out;
-
- WARN_ON(irq->hw && irq->host_irq != virq);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ WARN_ON(irq->hw && irq->host_irq != host_irq);
if (irq->hw) {
atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
irq->hw = false;
- ret = its_unmap_vlpi(virq);
+ ret = its_unmap_vlpi(host_irq);
}
-out:
- mutex_unlock(&its->its_lock);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(kvm, irq);
return ret;
}
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 0c5a63712702..4349084cb9a6 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -172,6 +172,36 @@ struct vgic_reg_attr {
gpa_t addr;
};
+struct its_device {
+ struct list_head dev_list;
+
+ /* the head for the list of ITTEs */
+ struct list_head itt_head;
+ u32 num_eventid_bits;
+ gpa_t itt_addr;
+ u32 device_id;
+};
+
+#define COLLECTION_NOT_MAPPED ((u32)~0)
+
+struct its_collection {
+ struct list_head coll_list;
+
+ u32 collection_id;
+ u32 target_addr;
+};
+
+#define its_is_collection_mapped(coll) ((coll) && \
+ ((coll)->target_addr != COLLECTION_NOT_MAPPED))
+
+struct its_ite {
+ struct list_head ite_list;
+
+ struct vgic_irq *irq;
+ struct its_collection *collection;
+ u32 event_id;
+};
+
int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
struct vgic_reg_attr *reg_attr);
int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
@@ -359,4 +389,7 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu);
void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu);
void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu);
+int vgic_its_debug_init(struct kvm_device *dev);
+void vgic_its_debug_destroy(struct kvm_device *dev);
+
#endif
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 4d49dff721a8..027bfa9689c6 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+
+obj-y += crypto/
+
lib-y := clear_user.o delay.o copy_from_user.o \
copy_to_user.o copy_page.o \
clear_page.o csum.o insn.o memchr.o memcpy.o \
@@ -14,10 +17,10 @@ endif
lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
obj-$(CONFIG_CRC32_ARCH) += crc32-arm64.o
-crc32-arm64-y := crc32.o crc32-glue.o
+crc32-arm64-y := crc32.o crc32-core.o
obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-arm64.o
-crc-t10dif-arm64-y := crc-t10dif-glue.o crc-t10dif-core.o
+crc-t10dif-arm64-y := crc-t10dif.o crc-t10dif-core.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/arm64/lib/crc-t10dif-glue.c b/arch/arm64/lib/crc-t10dif.c
index bacd18f23168..c2ffe4fdb59d 100644
--- a/arch/arm64/lib/crc-t10dif-glue.c
+++ b/arch/arm64/lib/crc-t10dif.c
@@ -17,8 +17,8 @@
#include <asm/neon.h>
#include <asm/simd.h>
-static DEFINE_STATIC_KEY_FALSE(have_asimd);
-static DEFINE_STATIC_KEY_FALSE(have_pmull);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_asimd);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pmull);
#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
@@ -61,7 +61,7 @@ static int __init crc_t10dif_arm64_init(void)
}
return 0;
}
-arch_initcall(crc_t10dif_arm64_init);
+subsys_initcall(crc_t10dif_arm64_init);
static void __exit crc_t10dif_arm64_exit(void)
{
diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32-core.S
index 68825317460f..68825317460f 100644
--- a/arch/arm64/lib/crc32.S
+++ b/arch/arm64/lib/crc32-core.S
diff --git a/arch/arm64/lib/crc32-glue.c b/arch/arm64/lib/crc32.c
index ed3acd71178f..ed3acd71178f 100644
--- a/arch/arm64/lib/crc32-glue.c
+++ b/arch/arm64/lib/crc32.c
diff --git a/arch/arm64/lib/crypto/.gitignore b/arch/arm64/lib/crypto/.gitignore
new file mode 100644
index 000000000000..12d74d8b03d0
--- /dev/null
+++ b/arch/arm64/lib/crypto/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+poly1305-core.S
+sha256-core.S
diff --git a/arch/arm64/lib/crypto/Kconfig b/arch/arm64/lib/crypto/Kconfig
new file mode 100644
index 000000000000..129a7685cb4c
--- /dev/null
+++ b/arch/arm64/lib/crypto/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CRYPTO_CHACHA20_NEON
+ tristate
+ depends on KERNEL_MODE_NEON
+ default CRYPTO_LIB_CHACHA
+ select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+
+config CRYPTO_POLY1305_NEON
+ tristate
+ depends on KERNEL_MODE_NEON
+ default CRYPTO_LIB_POLY1305
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
+
+config CRYPTO_SHA256_ARM64
+ tristate
+ default CRYPTO_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
diff --git a/arch/arm64/lib/crypto/Makefile b/arch/arm64/lib/crypto/Makefile
new file mode 100644
index 000000000000..946c09903711
--- /dev/null
+++ b/arch/arm64/lib/crypto/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
+chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o
+
+obj-$(CONFIG_CRYPTO_POLY1305_NEON) += poly1305-neon.o
+poly1305-neon-y := poly1305-core.o poly1305-glue.o
+AFLAGS_poly1305-core.o += -Dpoly1305_init=poly1305_block_init_arch
+AFLAGS_poly1305-core.o += -Dpoly1305_emit=poly1305_emit_arch
+
+obj-$(CONFIG_CRYPTO_SHA256_ARM64) += sha256-arm64.o
+sha256-arm64-y := sha256.o sha256-core.o
+sha256-arm64-$(CONFIG_KERNEL_MODE_NEON) += sha256-ce.o
+
+quiet_cmd_perlasm = PERLASM $@
+ cmd_perlasm = $(PERL) $(<) void $(@)
+
+$(obj)/%-core.S: $(src)/%-armv8.pl
+ $(call cmd,perlasm)
+
+$(obj)/sha256-core.S: $(src)/sha2-armv8.pl
+ $(call cmd,perlasm)
+
+clean-files += poly1305-core.S sha256-core.S
diff --git a/arch/arm64/crypto/chacha-neon-core.S b/arch/arm64/lib/crypto/chacha-neon-core.S
index b70ac76f2610..80079586ecc7 100644
--- a/arch/arm64/crypto/chacha-neon-core.S
+++ b/arch/arm64/lib/crypto/chacha-neon-core.S
@@ -1,5 +1,5 @@
/*
- * ChaCha/XChaCha NEON helper functions
+ * ChaCha/HChaCha NEON helper functions
*
* Copyright (C) 2016-2018 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
diff --git a/arch/arm64/lib/crypto/chacha-neon-glue.c b/arch/arm64/lib/crypto/chacha-neon-glue.c
new file mode 100644
index 000000000000..d0188f974ca5
--- /dev/null
+++ b/arch/arm64/lib/crypto/chacha-neon-glue.c
@@ -0,0 +1,119 @@
+/*
+ * ChaCha and HChaCha functions (ARM64 optimized)
+ *
+ * Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on:
+ * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+
+asmlinkage void chacha_block_xor_neon(const struct chacha_state *state,
+ u8 *dst, const u8 *src, int nrounds);
+asmlinkage void chacha_4block_xor_neon(const struct chacha_state *state,
+ u8 *dst, const u8 *src,
+ int nrounds, int bytes);
+asmlinkage void hchacha_block_neon(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+
+static void chacha_doneon(struct chacha_state *state, u8 *dst, const u8 *src,
+ int bytes, int nrounds)
+{
+ while (bytes > 0) {
+ int l = min(bytes, CHACHA_BLOCK_SIZE * 5);
+
+ if (l <= CHACHA_BLOCK_SIZE) {
+ u8 buf[CHACHA_BLOCK_SIZE];
+
+ memcpy(buf, src, l);
+ chacha_block_xor_neon(state, buf, buf, nrounds);
+ memcpy(dst, buf, l);
+ state->x[12] += 1;
+ break;
+ }
+ chacha_4block_xor_neon(state, dst, src, nrounds, l);
+ bytes -= l;
+ src += l;
+ dst += l;
+ state->x[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE);
+ }
+}
+
+void hchacha_block_arch(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds)
+{
+ if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) {
+ hchacha_block_generic(state, out, nrounds);
+ } else {
+ kernel_neon_begin();
+ hchacha_block_neon(state, out, nrounds);
+ kernel_neon_end();
+ }
+}
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ if (!static_branch_likely(&have_neon) || bytes <= CHACHA_BLOCK_SIZE ||
+ !crypto_simd_usable())
+ return chacha_crypt_generic(state, dst, src, bytes, nrounds);
+
+ do {
+ unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+ kernel_neon_begin();
+ chacha_doneon(state, dst, src, todo, nrounds);
+ kernel_neon_end();
+
+ bytes -= todo;
+ src += todo;
+ dst += todo;
+ } while (bytes);
+}
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+bool chacha_is_arch_optimized(void)
+{
+ return static_key_enabled(&have_neon);
+}
+EXPORT_SYMBOL(chacha_is_arch_optimized);
+
+static int __init chacha_simd_mod_init(void)
+{
+ if (cpu_have_named_feature(ASIMD))
+ static_branch_enable(&have_neon);
+ return 0;
+}
+subsys_initcall(chacha_simd_mod_init);
+
+static void __exit chacha_simd_mod_exit(void)
+{
+}
+module_exit(chacha_simd_mod_exit);
+
+MODULE_DESCRIPTION("ChaCha and HChaCha functions (ARM64 optimized)");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/poly1305-armv8.pl b/arch/arm64/lib/crypto/poly1305-armv8.pl
index 22c9069c0650..22c9069c0650 100644
--- a/arch/arm64/crypto/poly1305-armv8.pl
+++ b/arch/arm64/lib/crypto/poly1305-armv8.pl
diff --git a/arch/arm64/lib/crypto/poly1305-glue.c b/arch/arm64/lib/crypto/poly1305-glue.c
new file mode 100644
index 000000000000..6a661cf04821
--- /dev/null
+++ b/arch/arm64/lib/crypto/poly1305-glue.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OpenSSL/Cryptogams accelerated Poly1305 transform for arm64
+ *
+ * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <crypto/internal/poly1305.h>
+#include <linux/cpufeature.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/unaligned.h>
+
+asmlinkage void poly1305_block_init_arch(
+ struct poly1305_block_state *state,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+EXPORT_SYMBOL_GPL(poly1305_block_init_arch);
+asmlinkage void poly1305_blocks(struct poly1305_block_state *state,
+ const u8 *src, u32 len, u32 hibit);
+asmlinkage void poly1305_blocks_neon(struct poly1305_block_state *state,
+ const u8 *src, u32 len, u32 hibit);
+asmlinkage void poly1305_emit_arch(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4]);
+EXPORT_SYMBOL_GPL(poly1305_emit_arch);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+
+void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
+ unsigned int len, u32 padbit)
+{
+ len = round_down(len, POLY1305_BLOCK_SIZE);
+ if (static_branch_likely(&have_neon)) {
+ do {
+ unsigned int todo = min_t(unsigned int, len, SZ_4K);
+
+ kernel_neon_begin();
+ poly1305_blocks_neon(state, src, todo, 1);
+ kernel_neon_end();
+
+ len -= todo;
+ src += todo;
+ } while (len);
+ } else
+ poly1305_blocks(state, src, len, 1);
+}
+EXPORT_SYMBOL_GPL(poly1305_blocks_arch);
+
+bool poly1305_is_arch_optimized(void)
+{
+ /* We always can use at least the ARM64 scalar implementation. */
+ return true;
+}
+EXPORT_SYMBOL(poly1305_is_arch_optimized);
+
+static int __init neon_poly1305_mod_init(void)
+{
+ if (cpu_have_named_feature(ASIMD))
+ static_branch_enable(&have_neon);
+ return 0;
+}
+subsys_initcall(neon_poly1305_mod_init);
+
+static void __exit neon_poly1305_mod_exit(void)
+{
+}
+module_exit(neon_poly1305_mod_exit);
+
+MODULE_DESCRIPTION("Poly1305 authenticator (ARM64 optimized)");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/sha512-armv8.pl b/arch/arm64/lib/crypto/sha2-armv8.pl
index 35ec9ae99fe1..4aebd20c498b 100644
--- a/arch/arm64/crypto/sha512-armv8.pl
+++ b/arch/arm64/lib/crypto/sha2-armv8.pl
@@ -95,7 +95,7 @@ if ($output =~ /512/) {
$reg_t="w";
}
-$func="sha${BITS}_block_data_order";
+$func="sha${BITS}_blocks_arch";
($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/lib/crypto/sha256-ce.S
index fce84d88ddb2..f3e21c6d87d2 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/lib/crypto/sha256-ce.S
@@ -71,8 +71,8 @@
.word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
/*
- * int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
- * int blocks)
+ * size_t __sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
+ * const u8 *data, size_t nblocks);
*/
.text
SYM_FUNC_START(__sha256_ce_transform)
@@ -86,20 +86,16 @@ SYM_FUNC_START(__sha256_ce_transform)
/* load state */
ld1 {dgav.4s, dgbv.4s}, [x0]
- /* load sha256_ce_state::finalize */
- ldr_l w4, sha256_ce_offsetof_finalize, x4
- ldr w4, [x0, x4]
-
/* load input */
0: ld1 {v16.4s-v19.4s}, [x1], #64
- sub w2, w2, #1
+ sub x2, x2, #1
CPU_LE( rev32 v16.16b, v16.16b )
CPU_LE( rev32 v17.16b, v17.16b )
CPU_LE( rev32 v18.16b, v18.16b )
CPU_LE( rev32 v19.16b, v19.16b )
-1: add t0.4s, v16.4s, v0.4s
+ add t0.4s, v16.4s, v0.4s
mov dg0v.16b, dgav.16b
mov dg1v.16b, dgbv.16b
@@ -127,31 +123,14 @@ CPU_LE( rev32 v19.16b, v19.16b )
add dgav.4s, dgav.4s, dg0v.4s
add dgbv.4s, dgbv.4s, dg1v.4s
- /* handled all input blocks? */
- cbz w2, 2f
- cond_yield 3f, x5, x6
- b 0b
+ /* return early if voluntary preemption is needed */
+ cond_yield 1f, x5, x6
- /*
- * Final block: add padding and total bit count.
- * Skip if the input size was not a round multiple of the block size,
- * the padding is handled by the C code in that case.
- */
-2: cbz x4, 3f
- ldr_l w4, sha256_ce_offsetof_count, x4
- ldr x4, [x0, x4]
- movi v17.2d, #0
- mov x8, #0x80000000
- movi v18.2d, #0
- ror x7, x4, #29 // ror(lsl(x4, 3), 32)
- fmov d16, x8
- mov x4, #0
- mov v19.d[0], xzr
- mov v19.d[1], x7
- b 1b
+ /* handled all input blocks? */
+ cbnz x2, 0b
/* store new state */
-3: st1 {dgav.4s, dgbv.4s}, [x0]
- mov w0, w2
+1: st1 {dgav.4s, dgbv.4s}, [x0]
+ mov x0, x2
ret
SYM_FUNC_END(__sha256_ce_transform)
diff --git a/arch/arm64/lib/crypto/sha256.c b/arch/arm64/lib/crypto/sha256.c
new file mode 100644
index 000000000000..bcf7a3adc0c4
--- /dev/null
+++ b/arch/arm64/lib/crypto/sha256.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SHA-256 optimized for ARM64
+ *
+ * Copyright 2025 Google LLC
+ */
+#include <asm/neon.h>
+#include <crypto/internal/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+asmlinkage void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+asmlinkage void sha256_block_neon(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+asmlinkage size_t __sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
+
+void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks)
+{
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
+ static_branch_likely(&have_neon)) {
+ if (static_branch_likely(&have_ce)) {
+ do {
+ size_t rem;
+
+ kernel_neon_begin();
+ rem = __sha256_ce_transform(state,
+ data, nblocks);
+ kernel_neon_end();
+ data += (nblocks - rem) * SHA256_BLOCK_SIZE;
+ nblocks = rem;
+ } while (nblocks);
+ } else {
+ kernel_neon_begin();
+ sha256_block_neon(state, data, nblocks);
+ kernel_neon_end();
+ }
+ } else {
+ sha256_blocks_arch(state, data, nblocks);
+ }
+}
+EXPORT_SYMBOL_GPL(sha256_blocks_simd);
+
+bool sha256_is_arch_optimized(void)
+{
+ /* We always can use at least the ARM64 scalar implementation. */
+ return true;
+}
+EXPORT_SYMBOL_GPL(sha256_is_arch_optimized);
+
+static int __init sha256_arm64_mod_init(void)
+{
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
+ cpu_have_named_feature(ASIMD)) {
+ static_branch_enable(&have_neon);
+ if (cpu_have_named_feature(SHA2))
+ static_branch_enable(&have_ce);
+ }
+ return 0;
+}
+subsys_initcall(sha256_arm64_mod_init);
+
+static void __exit sha256_arm64_mod_exit(void)
+{
+}
+module_exit(sha256_arm64_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-256 optimized for ARM64");
diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
index 9bef696e2230..4e298baddc2e 100644
--- a/arch/arm64/lib/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/printk.h>
@@ -1500,43 +1501,41 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
}
-u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
+static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type)
{
- u32 opt;
- u32 insn;
-
switch (type) {
case AARCH64_INSN_MB_SY:
- opt = 0xf;
- break;
+ return 0xf;
case AARCH64_INSN_MB_ST:
- opt = 0xe;
- break;
+ return 0xe;
case AARCH64_INSN_MB_LD:
- opt = 0xd;
- break;
+ return 0xd;
case AARCH64_INSN_MB_ISH:
- opt = 0xb;
- break;
+ return 0xb;
case AARCH64_INSN_MB_ISHST:
- opt = 0xa;
- break;
+ return 0xa;
case AARCH64_INSN_MB_ISHLD:
- opt = 0x9;
- break;
+ return 0x9;
case AARCH64_INSN_MB_NSH:
- opt = 0x7;
- break;
+ return 0x7;
case AARCH64_INSN_MB_NSHST:
- opt = 0x6;
- break;
+ return 0x6;
case AARCH64_INSN_MB_NSHLD:
- opt = 0x5;
- break;
+ return 0x5;
default:
- pr_err("%s: unknown dmb type %d\n", __func__, type);
+ pr_err("%s: unknown barrier type %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
+}
+
+u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
+{
+ u32 opt;
+ u32 insn;
+
+ opt = __get_barrier_crm_val(type);
+ if (opt == AARCH64_BREAK_FAULT)
+ return AARCH64_BREAK_FAULT;
insn = aarch64_insn_get_dmb_value();
insn &= ~GENMASK(11, 8);
@@ -1545,6 +1544,21 @@ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
return insn;
}
+u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type)
+{
+ u32 opt, insn;
+
+ opt = __get_barrier_crm_val(type);
+ if (opt == AARCH64_BREAK_FAULT)
+ return AARCH64_BREAK_FAULT;
+
+ insn = aarch64_insn_get_dsb_base_value();
+ insn &= ~GENMASK(11, 8);
+ insn |= (opt << 8);
+
+ return insn;
+}
+
u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
enum aarch64_insn_system_register sysreg)
{
diff --git a/arch/arm64/lib/xor-neon.c b/arch/arm64/lib/xor-neon.c
index f9a53b7f9842..8fffebfa17b2 100644
--- a/arch/arm64/lib/xor-neon.c
+++ b/arch/arm64/lib/xor-neon.c
@@ -319,7 +319,7 @@ static void xor_arm64_eor3_5(unsigned long bytes,
static int __init xor_neon_init(void)
{
- if (IS_ENABLED(CONFIG_AS_HAS_SHA3) && cpu_have_named_feature(SHA3)) {
+ if (cpu_have_named_feature(SHA3)) {
xor_block_inner_neon.do_3 = xor_arm64_eor3_3;
xor_block_inner_neon.do_4 = xor_arm64_eor3_4;
xor_block_inner_neon.do_5 = xor_arm64_eor3_5;
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index cfe8cb8ba1cc..0c8737f4f2ce 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -129,7 +129,7 @@ pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
if (!pte_present(orig_pte) || !pte_cont(orig_pte))
return orig_pte;
- ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
+ ncontig = find_num_contig(mm, addr, ptep, &pgsize);
for (i = 0; i < ncontig; i++, ptep++) {
pte_t pte = __ptep_get(ptep);
@@ -159,12 +159,11 @@ static pte_t get_clear_contig(struct mm_struct *mm,
pte_t pte, tmp_pte;
bool present;
- pte = __ptep_get_and_clear(mm, addr, ptep);
+ pte = __ptep_get_and_clear_anysz(mm, ptep, pgsize);
present = pte_present(pte);
while (--ncontig) {
ptep++;
- addr += pgsize;
- tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
+ tmp_pte = __ptep_get_and_clear_anysz(mm, ptep, pgsize);
if (present) {
if (pte_dirty(tmp_pte))
pte = pte_mkdirty(pte);
@@ -183,8 +182,9 @@ static pte_t get_clear_contig_flush(struct mm_struct *mm,
{
pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
+ unsigned long end = addr + (pgsize * ncontig);
- flush_tlb_range(&vma, addr, addr + (pgsize * ncontig));
+ __flush_hugetlb_tlb_range(&vma, addr, end, pgsize, true);
return orig_pte;
}
@@ -207,9 +207,12 @@ static void clear_flush(struct mm_struct *mm,
unsigned long i, saddr = addr;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
- __ptep_get_and_clear(mm, addr, ptep);
+ __ptep_get_and_clear_anysz(mm, ptep, pgsize);
- flush_tlb_range(&vma, saddr, addr);
+ if (mm == &init_mm)
+ flush_tlb_kernel_range(saddr, addr);
+ else
+ __flush_hugetlb_tlb_range(&vma, saddr, addr, pgsize, true);
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
@@ -218,30 +221,20 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
size_t pgsize;
int i;
int ncontig;
- unsigned long pfn, dpfn;
- pgprot_t hugeprot;
ncontig = num_contig_ptes(sz, &pgsize);
if (!pte_present(pte)) {
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
- __set_ptes(mm, addr, ptep, pte, 1);
- return;
- }
-
- if (!pte_cont(pte)) {
- __set_ptes(mm, addr, ptep, pte, 1);
+ __set_ptes_anysz(mm, ptep, pte, 1, pgsize);
return;
}
- pfn = pte_pfn(pte);
- dpfn = pgsize >> PAGE_SHIFT;
- hugeprot = pte_pgprot(pte);
-
- clear_flush(mm, addr, ptep, pgsize, ncontig);
+ /* Only need to "break" if transitioning valid -> valid. */
+ if (pte_cont(pte) && pte_valid(__ptep_get(ptep)))
+ clear_flush(mm, addr, ptep, pgsize, ncontig);
- for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
- __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
+ __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize);
}
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -431,23 +424,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
- int ncontig, i;
+ int ncontig;
size_t pgsize = 0;
- unsigned long pfn = pte_pfn(pte), dpfn;
struct mm_struct *mm = vma->vm_mm;
- pgprot_t hugeprot;
pte_t orig_pte;
+ VM_WARN_ON(!pte_present(pte));
+
if (!pte_cont(pte))
return __ptep_set_access_flags(vma, addr, ptep, pte, dirty);
- ncontig = find_num_contig(mm, addr, ptep, &pgsize);
- dpfn = pgsize >> PAGE_SHIFT;
+ ncontig = num_contig_ptes(huge_page_size(hstate_vma(vma)), &pgsize);
if (!__cont_access_flags_changed(ptep, pte, ncontig))
return 0;
orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
+ VM_WARN_ON(!pte_present(orig_pte));
/* Make sure we don't lose the dirty or young state */
if (pte_dirty(orig_pte))
@@ -456,38 +449,31 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
if (pte_young(orig_pte))
pte = pte_mkyoung(pte);
- hugeprot = pte_pgprot(pte);
- for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
- __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
-
+ __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize);
return 1;
}
void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- unsigned long pfn, dpfn;
- pgprot_t hugeprot;
- int ncontig, i;
+ int ncontig;
size_t pgsize;
pte_t pte;
- if (!pte_cont(__ptep_get(ptep))) {
+ pte = __ptep_get(ptep);
+ VM_WARN_ON(!pte_present(pte));
+
+ if (!pte_cont(pte)) {
__ptep_set_wrprotect(mm, addr, ptep);
return;
}
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
- dpfn = pgsize >> PAGE_SHIFT;
pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
pte = pte_wrprotect(pte);
- hugeprot = pte_pgprot(pte);
- pfn = pte_pfn(pte);
-
- for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
- __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
+ __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize);
}
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
@@ -497,10 +483,7 @@ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
size_t pgsize;
int ncontig;
- if (!pte_cont(__ptep_get(ptep)))
- return ptep_clear_flush(vma, addr, ptep);
-
- ncontig = find_num_contig(mm, addr, ptep, &pgsize);
+ ncontig = num_contig_ptes(huge_page_size(hstate_vma(vma)), &pgsize);
return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
}
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index b99bf3980fc6..0c8c35dd645e 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -275,26 +275,6 @@ void __init arm64_memblock_init(void)
}
}
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- extern u16 memstart_offset_seed;
- u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
- int parange = cpuid_feature_extract_unsigned_field(
- mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
- s64 range = linear_region_size -
- BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
-
- /*
- * If the size of the linear region exceeds, by a sufficient
- * margin, the size of the region that the physical memory can
- * span, randomize the linear region as well.
- */
- if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
- range /= ARM64_MEMSTART_ALIGN;
- memstart_addr -= ARM64_MEMSTART_ALIGN *
- ((range * memstart_offset_seed) >> 16);
- }
- }
-
/*
* Register the kernel text, kernel data, initrd, and initial
* pagetables with memblock.
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 07aeab8a7606..c86c348857c4 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -83,7 +83,7 @@ arch_initcall(adjust_protection_map);
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
- pteval_t prot;
+ ptdesc_t prot;
/* Short circuit GCS to avoid bloating the table. */
if (system_supports_gcs() && (vm_flags & VM_SHADOW_STACK)) {
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index ea6695d53fb9..8fcf59ba39db 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -46,6 +46,13 @@
#define NO_CONT_MAPPINGS BIT(1)
#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */
+enum pgtable_type {
+ TABLE_PTE,
+ TABLE_PMD,
+ TABLE_PUD,
+ TABLE_P4D,
+};
+
u64 kimage_voffset __ro_after_init;
EXPORT_SYMBOL(kimage_voffset);
@@ -107,7 +114,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
}
EXPORT_SYMBOL(phys_mem_access_prot);
-static phys_addr_t __init early_pgtable_alloc(int shift)
+static phys_addr_t __init early_pgtable_alloc(enum pgtable_type pgtable_type)
{
phys_addr_t phys;
@@ -192,7 +199,7 @@ static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
unsigned long end, phys_addr_t phys,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
+ phys_addr_t (*pgtable_alloc)(enum pgtable_type),
int flags)
{
unsigned long next;
@@ -207,7 +214,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
if (flags & NO_EXEC_MAPPINGS)
pmdval |= PMD_TABLE_PXN;
BUG_ON(!pgtable_alloc);
- pte_phys = pgtable_alloc(PAGE_SHIFT);
+ pte_phys = pgtable_alloc(TABLE_PTE);
ptep = pte_set_fixmap(pte_phys);
init_clear_pgtable(ptep);
ptep += pte_index(addr);
@@ -243,7 +250,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
static void init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int), int flags)
+ phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags)
{
unsigned long next;
@@ -277,7 +284,8 @@ static void init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
unsigned long end, phys_addr_t phys,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int), int flags)
+ phys_addr_t (*pgtable_alloc)(enum pgtable_type),
+ int flags)
{
unsigned long next;
pud_t pud = READ_ONCE(*pudp);
@@ -294,7 +302,7 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
if (flags & NO_EXEC_MAPPINGS)
pudval |= PUD_TABLE_PXN;
BUG_ON(!pgtable_alloc);
- pmd_phys = pgtable_alloc(PMD_SHIFT);
+ pmd_phys = pgtable_alloc(TABLE_PMD);
pmdp = pmd_set_fixmap(pmd_phys);
init_clear_pgtable(pmdp);
pmdp += pmd_index(addr);
@@ -325,7 +333,7 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
+ phys_addr_t (*pgtable_alloc)(enum pgtable_type),
int flags)
{
unsigned long next;
@@ -339,7 +347,7 @@ static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
if (flags & NO_EXEC_MAPPINGS)
p4dval |= P4D_TABLE_PXN;
BUG_ON(!pgtable_alloc);
- pud_phys = pgtable_alloc(PUD_SHIFT);
+ pud_phys = pgtable_alloc(TABLE_PUD);
pudp = pud_set_fixmap(pud_phys);
init_clear_pgtable(pudp);
pudp += pud_index(addr);
@@ -383,7 +391,7 @@ static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
static void alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
+ phys_addr_t (*pgtable_alloc)(enum pgtable_type),
int flags)
{
unsigned long next;
@@ -397,7 +405,7 @@ static void alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
if (flags & NO_EXEC_MAPPINGS)
pgdval |= PGD_TABLE_PXN;
BUG_ON(!pgtable_alloc);
- p4d_phys = pgtable_alloc(P4D_SHIFT);
+ p4d_phys = pgtable_alloc(TABLE_P4D);
p4dp = p4d_set_fixmap(p4d_phys);
init_clear_pgtable(p4dp);
p4dp += p4d_index(addr);
@@ -427,7 +435,7 @@ static void alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
+ phys_addr_t (*pgtable_alloc)(enum pgtable_type),
int flags)
{
unsigned long addr, end, next;
@@ -455,7 +463,7 @@ static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
+ phys_addr_t (*pgtable_alloc)(enum pgtable_type),
int flags)
{
mutex_lock(&fixmap_lock);
@@ -468,37 +476,48 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
extern __alias(__create_pgd_mapping_locked)
void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int), int flags);
+ phys_addr_t (*pgtable_alloc)(enum pgtable_type),
+ int flags);
#endif
-static phys_addr_t __pgd_pgtable_alloc(int shift)
+static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm,
+ enum pgtable_type pgtable_type)
{
/* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */
- void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL & ~__GFP_ZERO);
+ struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_ZERO, 0);
+ phys_addr_t pa;
+
+ BUG_ON(!ptdesc);
+ pa = page_to_phys(ptdesc_page(ptdesc));
+
+ switch (pgtable_type) {
+ case TABLE_PTE:
+ BUG_ON(!pagetable_pte_ctor(mm, ptdesc));
+ break;
+ case TABLE_PMD:
+ BUG_ON(!pagetable_pmd_ctor(mm, ptdesc));
+ break;
+ case TABLE_PUD:
+ pagetable_pud_ctor(ptdesc);
+ break;
+ case TABLE_P4D:
+ pagetable_p4d_ctor(ptdesc);
+ break;
+ }
- BUG_ON(!ptr);
- return __pa(ptr);
+ return pa;
}
-static phys_addr_t pgd_pgtable_alloc(int shift)
+static phys_addr_t __maybe_unused
+pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type)
{
- phys_addr_t pa = __pgd_pgtable_alloc(shift);
- struct ptdesc *ptdesc = page_ptdesc(phys_to_page(pa));
-
- /*
- * Call proper page table ctor in case later we need to
- * call core mm functions like apply_to_page_range() on
- * this pre-allocated page table.
- *
- * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
- * folded, and if so pagetable_pte_ctor() becomes nop.
- */
- if (shift == PAGE_SHIFT)
- BUG_ON(!pagetable_pte_ctor(ptdesc));
- else if (shift == PMD_SHIFT)
- BUG_ON(!pagetable_pmd_ctor(ptdesc));
+ return __pgd_pgtable_alloc(&init_mm, pgtable_type);
+}
- return pa;
+static phys_addr_t
+pgd_pgtable_alloc_special_mm(enum pgtable_type pgtable_type)
+{
+ return __pgd_pgtable_alloc(NULL, pgtable_type);
}
/*
@@ -530,7 +549,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
- pgd_pgtable_alloc, flags);
+ pgd_pgtable_alloc_special_mm, flags);
}
static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
@@ -744,7 +763,7 @@ static int __init map_entry_trampoline(void)
memset(tramp_pg_dir, 0, PGD_SIZE);
__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
entry_tramp_text_size(), prot,
- __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
+ pgd_pgtable_alloc_init_mm, NO_BLOCK_MAPPINGS);
/* Map both the text and data into the kernel page table */
for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
@@ -1350,7 +1369,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
- size, params->pgprot, __pgd_pgtable_alloc,
+ size, params->pgprot, pgd_pgtable_alloc_init_mm,
flags);
memblock_clear_nomap(start, size);
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 39fd1f7ff02a..04d4a8f676db 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -96,8 +96,8 @@ static int change_memory_common(unsigned long addr, int numpages,
* we are operating on does not result in such splitting.
*
* Let's restrict ourselves to mappings created by vmalloc (or vmap).
- * Those are guaranteed to consist entirely of page mappings, and
- * splitting is never needed.
+ * Disallow VM_ALLOW_HUGE_VMAP mappings to guarantee that only page
+ * mappings are updated and splitting is never needed.
*
* So check whether the [addr, addr + size) interval is entirely
* covered by precisely one VM area that has the VM_ALLOC flag set.
@@ -105,7 +105,7 @@ static int change_memory_common(unsigned long addr, int numpages,
area = find_vm_area((void *)addr);
if (!area ||
end > (unsigned long)kasan_reset_tag(area->addr) + area->size ||
- !(area->flags & VM_ALLOC))
+ ((area->flags & (VM_ALLOC | VM_ALLOW_HUGE_VMAP)) != VM_ALLOC))
return -EINVAL;
if (!numpages)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index fb30c8804f87..80d470aa469d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -512,26 +512,11 @@ alternative_else_nop_endif
ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
cbz x1, .Lskip_indirection
- /*
- * The PROT_* macros describing the various memory types may resolve to
- * C expressions if they include the PTE_MAYBE_* macros, and so they
- * can only be used from C code. The PIE_E* constants below are also
- * defined in terms of those macros, but will mask out those
- * PTE_MAYBE_* constants, whether they are set or not. So #define them
- * as 0x0 here so we can evaluate the PIE_E* constants in asm context.
- */
-
-#define PTE_MAYBE_NG 0
-#define PTE_MAYBE_SHARED 0
-
- mov_q x0, PIE_E0
+ mov_q x0, PIE_E0_ASM
msr REG_PIRE0_EL1, x0
- mov_q x0, PIE_E1
+ mov_q x0, PIE_E1_ASM
msr REG_PIR_EL1, x0
-#undef PTE_MAYBE_NG
-#undef PTE_MAYBE_SHARED
-
orr tcr2, tcr2, TCR2_EL1_PIE
msr REG_TCR2_EL1, x0
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index 8cec0da4cff2..421a5de806c6 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -189,12 +189,12 @@ static void note_prot_wx(struct ptdump_pg_state *st, unsigned long addr)
}
void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
- u64 val)
+ pteval_t val)
{
struct ptdump_pg_state *st = container_of(pt_st, struct ptdump_pg_state, ptdump);
struct ptdump_pg_level *pg_level = st->pg_level;
static const char units[] = "KMGTPE";
- u64 prot = 0;
+ ptdesc_t prot = 0;
/* check if the current level has been folded dynamically */
if (st->mm && ((level == 1 && mm_p4d_folded(st->mm)) ||
@@ -251,6 +251,38 @@ void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
}
+void note_page_pte(struct ptdump_state *pt_st, unsigned long addr, pte_t pte)
+{
+ note_page(pt_st, addr, 4, pte_val(pte));
+}
+
+void note_page_pmd(struct ptdump_state *pt_st, unsigned long addr, pmd_t pmd)
+{
+ note_page(pt_st, addr, 3, pmd_val(pmd));
+}
+
+void note_page_pud(struct ptdump_state *pt_st, unsigned long addr, pud_t pud)
+{
+ note_page(pt_st, addr, 2, pud_val(pud));
+}
+
+void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d)
+{
+ note_page(pt_st, addr, 1, p4d_val(p4d));
+}
+
+void note_page_pgd(struct ptdump_state *pt_st, unsigned long addr, pgd_t pgd)
+{
+ note_page(pt_st, addr, 0, pgd_val(pgd));
+}
+
+void note_page_flush(struct ptdump_state *pt_st)
+{
+ pte_t pte_zero = {0};
+
+ note_page(pt_st, 0, -1, pte_val(pte_zero));
+}
+
void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
{
unsigned long end = ~0UL;
@@ -266,7 +298,12 @@ void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
.pg_level = &kernel_pg_levels[0],
.level = -1,
.ptdump = {
- .note_page = note_page,
+ .note_page_pte = note_page_pte,
+ .note_page_pmd = note_page_pmd,
+ .note_page_pud = note_page_pud,
+ .note_page_p4d = note_page_p4d,
+ .note_page_pgd = note_page_pgd,
+ .note_page_flush = note_page_flush,
.range = (struct ptdump_range[]){
{info->base_addr, end},
{0, 0}
@@ -303,7 +340,12 @@ bool ptdump_check_wx(void)
.level = -1,
.check_wx = true,
.ptdump = {
- .note_page = note_page,
+ .note_page_pte = note_page_pte,
+ .note_page_pmd = note_page_pmd,
+ .note_page_pud = note_page_pud,
+ .note_page_p4d = note_page_p4d,
+ .note_page_pgd = note_page_pgd,
+ .note_page_flush = note_page_flush,
.range = (struct ptdump_range[]) {
{_PAGE_OFFSET(vabits_actual), ~0UL},
{0, 0}
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 70d7c89d3ac9..da8b89dd2910 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) "bpf_jit: " fmt
+#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/filter.h>
@@ -17,6 +18,7 @@
#include <asm/asm-extable.h>
#include <asm/byteorder.h>
#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
#include <asm/insn.h>
#include <asm/text-patching.h>
@@ -939,7 +941,51 @@ static void build_plt(struct jit_ctx *ctx)
plt->target = (u64)&dummy_tramp;
}
-static void build_epilogue(struct jit_ctx *ctx)
+/* Clobbers BPF registers 1-4, aka x0-x3 */
+static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx)
+{
+ const u8 r1 = bpf2a64[BPF_REG_1]; /* aka x0 */
+ u8 k = get_spectre_bhb_loop_value();
+
+ if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
+ cpu_mitigations_off() || __nospectre_bhb ||
+ arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE)
+ return;
+
+ if (capable(CAP_SYS_ADMIN))
+ return;
+
+ if (supports_clearbhb(SCOPE_SYSTEM)) {
+ emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx);
+ return;
+ }
+
+ if (k) {
+ emit_a64_mov_i64(r1, k, ctx);
+ emit(A64_B(1), ctx);
+ emit(A64_SUBS_I(true, r1, r1, 1), ctx);
+ emit(A64_B_(A64_COND_NE, -2), ctx);
+ emit(aarch64_insn_gen_dsb(AARCH64_INSN_MB_ISH), ctx);
+ emit(aarch64_insn_get_isb_value(), ctx);
+ }
+
+ if (is_spectre_bhb_fw_mitigated()) {
+ emit(A64_ORR_I(false, r1, AARCH64_INSN_REG_ZR,
+ ARM_SMCCC_ARCH_WORKAROUND_3), ctx);
+ switch (arm_smccc_1_1_get_conduit()) {
+ case SMCCC_CONDUIT_HVC:
+ emit(aarch64_insn_get_hvc_value(), ctx);
+ break;
+ case SMCCC_CONDUIT_SMC:
+ emit(aarch64_insn_get_smc_value(), ctx);
+ break;
+ default:
+ pr_err_once("Firmware mitigation enabled with unknown conduit\n");
+ }
+ }
+}
+
+static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
{
const u8 r0 = bpf2a64[BPF_REG_0];
const u8 ptr = bpf2a64[TCCNT_PTR];
@@ -952,10 +998,13 @@ static void build_epilogue(struct jit_ctx *ctx)
emit(A64_POP(A64_ZR, ptr, A64_SP), ctx);
+ if (was_classic)
+ build_bhb_mitigation(ctx);
+
/* Restore FP/LR registers */
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
- /* Set return value */
+ /* Move the return value from bpf:r0 (aka x7) to x0 */
emit(A64_MOV(1, A64_R(0), r0), ctx);
/* Authenticate lr */
@@ -1898,7 +1947,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
ctx.epilogue_offset = ctx.idx;
- build_epilogue(&ctx);
+ build_epilogue(&ctx, was_classic);
build_plt(&ctx);
extable_align = __alignof__(struct exception_table_entry);
@@ -1961,7 +2010,7 @@ skip_init_ctx:
goto out_free_hdr;
}
- build_epilogue(&ctx);
+ build_epilogue(&ctx, was_classic);
build_plt(&ctx);
/* Extra pass to validate JITed code. */
@@ -2064,7 +2113,7 @@ bool bpf_jit_supports_subprog_tailcalls(void)
}
static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
- int args_off, int retval_off, int run_ctx_off,
+ int bargs_off, int retval_off, int run_ctx_off,
bool save_ret)
{
__le32 *branch;
@@ -2106,7 +2155,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
branch = ctx->image + ctx->idx;
emit(A64_NOP, ctx);
- emit(A64_ADD_I(1, A64_R(0), A64_SP, args_off), ctx);
+ emit(A64_ADD_I(1, A64_R(0), A64_SP, bargs_off), ctx);
if (!p->jited)
emit_addr_mov_i64(A64_R(1), (const u64)p->insnsi, ctx);
@@ -2131,7 +2180,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
}
static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
- int args_off, int retval_off, int run_ctx_off,
+ int bargs_off, int retval_off, int run_ctx_off,
__le32 **branches)
{
int i;
@@ -2141,7 +2190,7 @@ static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
*/
emit(A64_STR64I(A64_ZR, A64_SP, retval_off), ctx);
for (i = 0; i < tl->nr_links; i++) {
- invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off,
+ invoke_bpf_prog(ctx, tl->links[i], bargs_off, retval_off,
run_ctx_off, true);
/* if (*(u64 *)(sp + retval_off) != 0)
* goto do_fexit;
@@ -2155,23 +2204,125 @@ static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
}
}
-static void save_args(struct jit_ctx *ctx, int args_off, int nregs)
+struct arg_aux {
+ /* how many args are passed through registers, the rest of the args are
+ * passed through stack
+ */
+ int args_in_regs;
+ /* how many registers are used to pass arguments */
+ int regs_for_args;
+ /* how much stack is used for additional args passed to bpf program
+ * that did not fit in original function registers
+ */
+ int bstack_for_args;
+ /* home much stack is used for additional args passed to the
+ * original function when called from trampoline (this one needs
+ * arguments to be properly aligned)
+ */
+ int ostack_for_args;
+};
+
+static int calc_arg_aux(const struct btf_func_model *m,
+ struct arg_aux *a)
{
- int i;
+ int stack_slots, nregs, slots, i;
+
+ /* verifier ensures m->nr_args <= MAX_BPF_FUNC_ARGS */
+ for (i = 0, nregs = 0; i < m->nr_args; i++) {
+ slots = (m->arg_size[i] + 7) / 8;
+ if (nregs + slots <= 8) /* passed through register ? */
+ nregs += slots;
+ else
+ break;
+ }
+
+ a->args_in_regs = i;
+ a->regs_for_args = nregs;
+ a->ostack_for_args = 0;
+ a->bstack_for_args = 0;
- for (i = 0; i < nregs; i++) {
- emit(A64_STR64I(i, A64_SP, args_off), ctx);
- args_off += 8;
+ /* the rest arguments are passed through stack */
+ for (; i < m->nr_args; i++) {
+ /* We can not know for sure about exact alignment needs for
+ * struct passed on stack, so deny those
+ */
+ if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
+ return -ENOTSUPP;
+ stack_slots = (m->arg_size[i] + 7) / 8;
+ a->bstack_for_args += stack_slots * 8;
+ a->ostack_for_args = a->ostack_for_args + stack_slots * 8;
}
+
+ return 0;
}
-static void restore_args(struct jit_ctx *ctx, int args_off, int nregs)
+static void clear_garbage(struct jit_ctx *ctx, int reg, int effective_bytes)
+{
+ if (effective_bytes) {
+ int garbage_bits = 64 - 8 * effective_bytes;
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ /* garbage bits are at the right end */
+ emit(A64_LSR(1, reg, reg, garbage_bits), ctx);
+ emit(A64_LSL(1, reg, reg, garbage_bits), ctx);
+#else
+ /* garbage bits are at the left end */
+ emit(A64_LSL(1, reg, reg, garbage_bits), ctx);
+ emit(A64_LSR(1, reg, reg, garbage_bits), ctx);
+#endif
+ }
+}
+
+static void save_args(struct jit_ctx *ctx, int bargs_off, int oargs_off,
+ const struct btf_func_model *m,
+ const struct arg_aux *a,
+ bool for_call_origin)
{
int i;
+ int reg;
+ int doff;
+ int soff;
+ int slots;
+ u8 tmp = bpf2a64[TMP_REG_1];
+
+ /* store arguments to the stack for the bpf program, or restore
+ * arguments from stack for the original function
+ */
+ for (reg = 0; reg < a->regs_for_args; reg++) {
+ emit(for_call_origin ?
+ A64_LDR64I(reg, A64_SP, bargs_off) :
+ A64_STR64I(reg, A64_SP, bargs_off),
+ ctx);
+ bargs_off += 8;
+ }
- for (i = 0; i < nregs; i++) {
- emit(A64_LDR64I(i, A64_SP, args_off), ctx);
- args_off += 8;
+ soff = 32; /* on stack arguments start from FP + 32 */
+ doff = (for_call_origin ? oargs_off : bargs_off);
+
+ /* save on stack arguments */
+ for (i = a->args_in_regs; i < m->nr_args; i++) {
+ slots = (m->arg_size[i] + 7) / 8;
+ /* verifier ensures arg_size <= 16, so slots equals 1 or 2 */
+ while (slots-- > 0) {
+ emit(A64_LDR64I(tmp, A64_FP, soff), ctx);
+ /* if there is unused space in the last slot, clear
+ * the garbage contained in the space.
+ */
+ if (slots == 0 && !for_call_origin)
+ clear_garbage(ctx, tmp, m->arg_size[i] % 8);
+ emit(A64_STR64I(tmp, A64_SP, doff), ctx);
+ soff += 8;
+ doff += 8;
+ }
+ }
+}
+
+static void restore_args(struct jit_ctx *ctx, int bargs_off, int nregs)
+{
+ int reg;
+
+ for (reg = 0; reg < nregs; reg++) {
+ emit(A64_LDR64I(reg, A64_SP, bargs_off), ctx);
+ bargs_off += 8;
}
}
@@ -2194,17 +2345,21 @@ static bool is_struct_ops_tramp(const struct bpf_tramp_links *fentry_links)
*/
static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
struct bpf_tramp_links *tlinks, void *func_addr,
- int nregs, u32 flags)
+ const struct btf_func_model *m,
+ const struct arg_aux *a,
+ u32 flags)
{
int i;
int stack_size;
int retaddr_off;
int regs_off;
int retval_off;
- int args_off;
- int nregs_off;
+ int bargs_off;
+ int nfuncargs_off;
int ip_off;
int run_ctx_off;
+ int oargs_off;
+ int nfuncargs;
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
@@ -2213,31 +2368,38 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
bool is_struct_ops = is_struct_ops_tramp(fentry);
/* trampoline stack layout:
- * [ parent ip ]
- * [ FP ]
- * SP + retaddr_off [ self ip ]
- * [ FP ]
+ * [ parent ip ]
+ * [ FP ]
+ * SP + retaddr_off [ self ip ]
+ * [ FP ]
*
- * [ padding ] align SP to multiples of 16
+ * [ padding ] align SP to multiples of 16
*
- * [ x20 ] callee saved reg x20
- * SP + regs_off [ x19 ] callee saved reg x19
+ * [ x20 ] callee saved reg x20
+ * SP + regs_off [ x19 ] callee saved reg x19
*
- * SP + retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or
- * BPF_TRAMP_F_RET_FENTRY_RET
+ * SP + retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or
+ * BPF_TRAMP_F_RET_FENTRY_RET
+ * [ arg reg N ]
+ * [ ... ]
+ * SP + bargs_off [ arg reg 1 ] for bpf
*
- * [ arg reg N ]
- * [ ... ]
- * SP + args_off [ arg reg 1 ]
+ * SP + nfuncargs_off [ arg regs count ]
*
- * SP + nregs_off [ arg regs count ]
+ * SP + ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
*
- * SP + ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
+ * SP + run_ctx_off [ bpf_tramp_run_ctx ]
*
- * SP + run_ctx_off [ bpf_tramp_run_ctx ]
+ * [ stack arg N ]
+ * [ ... ]
+ * SP + oargs_off [ stack arg 1 ] for original func
*/
stack_size = 0;
+ oargs_off = stack_size;
+ if (flags & BPF_TRAMP_F_CALL_ORIG)
+ stack_size += a->ostack_for_args;
+
run_ctx_off = stack_size;
/* room for bpf_tramp_run_ctx */
stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
@@ -2247,13 +2409,14 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
if (flags & BPF_TRAMP_F_IP_ARG)
stack_size += 8;
- nregs_off = stack_size;
+ nfuncargs_off = stack_size;
/* room for args count */
stack_size += 8;
- args_off = stack_size;
+ bargs_off = stack_size;
/* room for args */
- stack_size += nregs * 8;
+ nfuncargs = a->regs_for_args + a->bstack_for_args / 8;
+ stack_size += 8 * nfuncargs;
/* room for return value */
retval_off = stack_size;
@@ -2300,11 +2463,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
}
/* save arg regs count*/
- emit(A64_MOVZ(1, A64_R(10), nregs, 0), ctx);
- emit(A64_STR64I(A64_R(10), A64_SP, nregs_off), ctx);
+ emit(A64_MOVZ(1, A64_R(10), nfuncargs, 0), ctx);
+ emit(A64_STR64I(A64_R(10), A64_SP, nfuncargs_off), ctx);
- /* save arg regs */
- save_args(ctx, args_off, nregs);
+ /* save args for bpf */
+ save_args(ctx, bargs_off, oargs_off, m, a, false);
/* save callee saved registers */
emit(A64_STR64I(A64_R(19), A64_SP, regs_off), ctx);
@@ -2320,7 +2483,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
}
for (i = 0; i < fentry->nr_links; i++)
- invoke_bpf_prog(ctx, fentry->links[i], args_off,
+ invoke_bpf_prog(ctx, fentry->links[i], bargs_off,
retval_off, run_ctx_off,
flags & BPF_TRAMP_F_RET_FENTRY_RET);
@@ -2330,12 +2493,13 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
if (!branches)
return -ENOMEM;
- invoke_bpf_mod_ret(ctx, fmod_ret, args_off, retval_off,
+ invoke_bpf_mod_ret(ctx, fmod_ret, bargs_off, retval_off,
run_ctx_off, branches);
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- restore_args(ctx, args_off, nregs);
+ /* save args for original func */
+ save_args(ctx, bargs_off, oargs_off, m, a, true);
/* call original func */
emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx);
emit(A64_ADR(A64_LR, AARCH64_INSN_SIZE * 2), ctx);
@@ -2354,7 +2518,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
}
for (i = 0; i < fexit->nr_links; i++)
- invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off,
+ invoke_bpf_prog(ctx, fexit->links[i], bargs_off, retval_off,
run_ctx_off, false);
if (flags & BPF_TRAMP_F_CALL_ORIG) {
@@ -2368,7 +2532,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
}
if (flags & BPF_TRAMP_F_RESTORE_REGS)
- restore_args(ctx, args_off, nregs);
+ restore_args(ctx, bargs_off, a->regs_for_args);
/* restore callee saved register x19 and x20 */
emit(A64_LDR64I(A64_R(19), A64_SP, regs_off), ctx);
@@ -2405,21 +2569,6 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
return ctx->idx;
}
-static int btf_func_model_nregs(const struct btf_func_model *m)
-{
- int nregs = m->nr_args;
- int i;
-
- /* extra registers needed for struct argument */
- for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
- /* The arg_size is at most 16 bytes, enforced by the verifier. */
- if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
- nregs += (m->arg_size[i] + 7) / 8 - 1;
- }
-
- return nregs;
-}
-
int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks, void *func_addr)
{
@@ -2428,14 +2577,14 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
.idx = 0,
};
struct bpf_tramp_image im;
- int nregs, ret;
+ struct arg_aux aaux;
+ int ret;
- nregs = btf_func_model_nregs(m);
- /* the first 8 registers are used for arguments */
- if (nregs > 8)
- return -ENOTSUPP;
+ ret = calc_arg_aux(m, &aaux);
+ if (ret < 0)
+ return ret;
- ret = prepare_trampoline(&ctx, &im, tlinks, func_addr, nregs, flags);
+ ret = prepare_trampoline(&ctx, &im, tlinks, func_addr, m, &aaux, flags);
if (ret < 0)
return ret;
@@ -2462,9 +2611,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
u32 flags, struct bpf_tramp_links *tlinks,
void *func_addr)
{
- int ret, nregs;
- void *image, *tmp;
u32 size = ro_image_end - ro_image;
+ struct arg_aux aaux;
+ void *image, *tmp;
+ int ret;
/* image doesn't need to be in module memory range, so we can
* use kvmalloc.
@@ -2480,13 +2630,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
.write = true,
};
- nregs = btf_func_model_nregs(m);
- /* the first 8 registers are used for arguments */
- if (nregs > 8)
- return -ENOTSUPP;
jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image));
- ret = prepare_trampoline(&ctx, im, tlinks, func_addr, nregs, flags);
+ ret = calc_arg_aux(m, &aaux);
+ if (ret)
+ goto out;
+ ret = prepare_trampoline(&ctx, im, tlinks, func_addr, m, &aaux, flags);
if (ret > 0 && validate_code(&ctx) < 0) {
ret = -EINVAL;
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 772c1b008e43..10effd4cff6b 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -28,6 +28,7 @@ HAS_EPAN
HAS_EVT
HAS_FPMR
HAS_FGT
+HAS_FGT2
HAS_FPSIMD
HAS_GCS
HAS_GENERIC_AUTH
@@ -94,6 +95,7 @@ WORKAROUND_2457168
WORKAROUND_2645198
WORKAROUND_2658417
WORKAROUND_AMPERE_AC03_CPU_38
+WORKAROUND_AMPERE_AC04_CPU_23
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index f9476848a2ed..8a8cf6874298 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -101,6 +101,17 @@ Res0 63:32
Field 31:0 DTRTX
EndSysreg
+Sysreg MDSELR_EL1 2 0 0 4 2
+Res0 63:6
+Field 5:4 BANK
+Res0 3:0
+EndSysreg
+
+Sysreg MDSTEPOP_EL1 2 0 0 5 2
+Res0 63:32
+Field 31:0 OPCODE
+EndSysreg
+
Sysreg OSECCR_EL1 2 0 0 6 2
Res0 63:32
Field 31:0 EDECCR
@@ -111,6 +122,285 @@ Res0 63:1
Field 0 OSLK
EndSysreg
+Sysreg SPMACCESSR_EL1 2 0 9 13 3
+UnsignedEnum 63:62 P31
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 61:60 P30
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 59:58 P29
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 57:56 P28
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 55:54 P27
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 53:52 P26
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 51:50 P25
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 49:48 P24
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 47:46 P23
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 45:44 P22
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 43:42 P21
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 41:40 P20
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 39:38 P19
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 37:36 P18
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 35:34 P17
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 33:32 P16
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 31:30 P15
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 29:28 P14
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 27:26 P13
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 25:24 P12
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 23:22 P11
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 21:20 P10
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 19:18 P9
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 17:16 P8
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 15:14 P7
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 13:12 P6
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 11:10 P5
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 9:8 P4
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 7:6 P3
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 5:4 P2
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 3:2 P1
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+UnsignedEnum 1:0 P0
+ 0b00 TRAP_RW
+ 0b01 TRAP_W
+ 0b11 NOTRAP
+EndEnum
+EndSysreg
+
+Sysreg SPMACCESSR_EL12 2 5 9 13 3
+Mapping SPMACCESSR_EL1
+EndSysreg
+
+Sysreg SPMIIDR_EL1 2 0 9 13 4
+Res0 63:32
+Field 31:20 ProductID
+Field 19:16 Variant
+Field 15:12 Revision
+Field 11:0 Implementer
+EndSysreg
+
+Sysreg SPMDEVARCH_EL1 2 0 9 13 5
+Res0 63:32
+Field 31:21 ARCHITECT
+Field 20 PRESENT
+Field 19:16 REVISION
+Field 15:12 ARCHVER
+Field 11:0 ARCHPART
+EndSysreg
+
+Sysreg SPMDEVAFF_EL1 2 0 9 13 6
+Res0 63:40
+Field 39:32 Aff3
+Field 31 F0V
+Field 30 U
+Res0 29:25
+Field 24 MT
+Field 23:16 Aff2
+Field 15:8 Aff1
+Field 7:0 Aff0
+EndSysreg
+
+Sysreg SPMCFGR_EL1 2 0 9 13 7
+Res0 63:32
+Field 31:28 NCG
+Res0 27:25
+Field 24 HDBG
+Field 23 TRO
+Field 22 SS
+Field 21 FZO
+Field 20 MSI
+Field 19 RAO
+Res0 18
+Field 17 NA
+Field 16 EX
+Field 15:14 RAZ
+Field 13:8 SIZE
+Field 7:0 N
+EndSysreg
+
+Sysreg SPMINTENSET_EL1 2 0 9 14 1
+Field 63:0 P
+EndSysreg
+
+Sysreg SPMINTENCLR_EL1 2 0 9 14 2
+Field 63:0 P
+EndSysreg
+
+Sysreg PMCCNTSVR_EL1 2 0 14 11 7
+Field 63:0 CCNT
+EndSysreg
+
+Sysreg PMICNTSVR_EL1 2 0 14 12 0
+Field 63:0 ICNT
+EndSysreg
+
+Sysreg SPMCR_EL0 2 3 9 12 0
+Res0 63:12
+Field 11 TRO
+Field 10 HDBG
+Field 9 FZO
+Field 8 NA
+Res0 7:5
+Field 4 EX
+Res0 3:2
+Field 1 P
+Field 0 E
+EndSysreg
+
+Sysreg SPMCNTENSET_EL0 2 3 9 12 1
+Field 63:0 P
+EndSysreg
+
+Sysreg SPMCNTENCLR_EL0 2 3 9 12 2
+Field 63:0 P
+EndSysreg
+
+Sysreg SPMOVSCLR_EL0 2 3 9 12 3
+Field 63:0 P
+EndSysreg
+
+Sysreg SPMZR_EL0 2 3 9 12 4
+Field 63:0 P
+EndSysreg
+
+Sysreg SPMSELR_EL0 2 3 9 12 5
+Res0 63:10
+Field 9:4 SYSPMUSEL
+Res0 3:2
+Field 1:0 BANK
+EndSysreg
+
+Sysreg SPMOVSSET_EL0 2 3 9 14 3
+Field 63:0 P
+EndSysreg
+
+Sysreg SPMSCR_EL1 2 7 9 14 7
+Field 63:32 IMPDEF
+Field 31 RAO
+Res0 30:5
+Field 4 NAO
+Res0 3:1
+Field 0 SO
+EndSysreg
+
Sysreg ID_PFR0_EL1 3 0 0 1 0
Res0 63:32
UnsignedEnum 31:28 RAS
@@ -907,6 +1197,7 @@ UnsignedEnum 31:28 RAS
0b0000 NI
0b0001 IMP
0b0010 V1P1
+ 0b0011 V2
EndEnum
UnsignedEnum 27:24 GIC
0b0000 NI
@@ -1466,6 +1757,7 @@ UnsignedEnum 63:60 LS64
0b0001 LS64
0b0010 LS64_V
0b0011 LS64_ACCDATA
+ 0b0100 LS64WB
EndEnum
UnsignedEnum 59:56 XS
0b0000 NI
@@ -1945,12 +2237,21 @@ EndEnum
EndSysreg
Sysreg ID_AA64MMFR4_EL1 3 0 0 7 4
-Res0 63:40
+Res0 63:48
+UnsignedEnum 47:44 SRMASK
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Res0 43:40
UnsignedEnum 39:36 E3DSE
0b0000 NI
0b0001 IMP
EndEnum
-Res0 35:28
+Res0 35:32
+UnsignedEnum 31:28 RMEGDI
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
SignedEnum 27:24 E2H0
0b0000 IMP
0b1110 NI_NV1
@@ -1959,6 +2260,7 @@ EndEnum
UnsignedEnum 23:20 NV_frac
0b0000 NV_NV2
0b0001 NV2_ONLY
+ 0b0010 NV2P1
EndEnum
UnsignedEnum 19:16 FGWTE3
0b0000 NI
@@ -1978,7 +2280,10 @@ SignedEnum 7:4 EIESB
0b0010 ToELx
0b1111 ANY
EndEnum
-Res0 3:0
+UnsignedEnum 3:0 PoPS
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
EndSysreg
Sysreg SCTLR_EL1 3 0 1 0 0
@@ -2053,8 +2358,30 @@ Field 1 A
Field 0 M
EndSysreg
+Sysreg SCTLR_EL12 3 5 1 0 0
+Mapping SCTLR_EL1
+EndSysreg
+
+Sysreg SCTLRALIAS_EL1 3 0 1 4 6
+Mapping SCTLR_EL1
+EndSysreg
+
+Sysreg ACTLR_EL1 3 0 1 0 1
+Field 63:0 IMPDEF
+EndSysreg
+
+Sysreg ACTLR_EL12 3 5 1 0 1
+Mapping ACTLR_EL1
+EndSysreg
+
+Sysreg ACTLRALIAS_EL1 3 0 1 4 5
+Mapping ACTLR_EL1
+EndSysreg
+
Sysreg CPACR_EL1 3 0 1 0 2
-Res0 63:30
+Res0 63:32
+Field 31 TCPAC
+Field 30 TAM
Field 29 E0POE
Field 28 TTA
Res0 27:26
@@ -2066,6 +2393,323 @@ Field 17:16 ZEN
Res0 15:0
EndSysreg
+Sysreg CPACR_EL12 3 5 1 0 2
+Mapping CPACR_EL1
+EndSysreg
+
+Sysreg CPACRALIAS_EL1 3 0 1 4 4
+Mapping CPACR_EL1
+EndSysreg
+
+Sysreg ACTLRMASK_EL1 3 0 1 4 1
+Field 63:0 IMPDEF
+EndSysreg
+
+Sysreg ACTLRMASK_EL12 3 5 1 4 1
+Mapping ACTLRMASK_EL1
+EndSysreg
+
+Sysreg CPACRMASK_EL1 3 0 1 4 2
+Res0 63:32
+Field 31 TCPAC
+Field 30 TAM
+Field 29 E0POE
+Field 28 TTA
+Res0 27:25
+Field 24 SMEN
+Res0 23:21
+Field 20 FPEN
+Res0 19:17
+Field 16 ZEN
+Res0 15:0
+EndSysreg
+
+Sysreg CPACRMASK_EL12 3 5 1 4 2
+Mapping CPACRMASK_EL1
+EndSysreg
+
+Sysreg PFAR_EL1 3 0 6 0 5
+Field 63 NS
+Field 62 NSE
+Res0 61:56
+Field 55:52 PA_55_52
+Field 51:48 PA_51_48
+Field 47:0 PA
+EndSysreg
+
+Sysreg PFAR_EL12 3 5 6 0 5
+Mapping PFAR_EL1
+EndSysreg
+
+Sysreg RCWSMASK_EL1 3 0 13 0 3
+Field 63:0 RCWSMASK
+EndSysreg
+
+Sysreg SCTLR2_EL1 3 0 1 0 3
+Res0 63:13
+Field 12 CPTM0
+Field 11 CPTM
+Field 10 CPTA0
+Field 9 CPTA
+Field 8 EnPACM0
+Field 7 EnPACM
+Field 6 EnIDCP128
+Field 5 EASE
+Field 4 EnANERR
+Field 3 EnADERR
+Field 2 NMEA
+Res0 1:0
+EndSysreg
+
+Sysreg SCTLR2_EL12 3 5 1 0 3
+Mapping SCTLR2_EL1
+EndSysreg
+
+Sysreg SCTLR2ALIAS_EL1 3 0 1 4 7
+Mapping SCTLR2_EL1
+EndSysreg
+
+Sysreg SCTLR2MASK_EL1 3 0 1 4 3
+Res0 63:13
+Field 12 CPTM0
+Field 11 CPTM
+Field 10 CPTA0
+Field 9 CPTA
+Field 8 EnPACM0
+Field 7 EnPACM
+Field 6 EnIDCP128
+Field 5 EASE
+Field 4 EnANERR
+Field 3 EnADERR
+Field 2 NMEA
+Res0 1:0
+EndSysreg
+
+Sysreg SCTLR2MASK_EL12 3 5 1 4 3
+Mapping SCTLR2MASK_EL1
+EndSysreg
+
+Sysreg SCTLRMASK_EL1 3 0 1 4 0
+Field 63 TIDCP
+Field 62 SPINTMASK
+Field 61 NMI
+Field 60 EnTP2
+Field 59 TCSO
+Field 58 TCSO0
+Field 57 EPAN
+Field 56 EnALS
+Field 55 EnAS0
+Field 54 EnASR
+Field 53 TME
+Field 52 TME0
+Field 51 TMT
+Field 50 TMT0
+Res0 49:47
+Field 46 TWEDEL
+Field 45 TWEDEn
+Field 44 DSSBS
+Field 43 ATA
+Field 42 ATA0
+Res0 41
+Field 40 TCF
+Res0 39
+Field 38 TCF0
+Field 37 ITFSB
+Field 36 BT1
+Field 35 BT0
+Field 34 EnFPM
+Field 33 MSCEn
+Field 32 CMOW
+Field 31 EnIA
+Field 30 EnIB
+Field 29 LSMAOE
+Field 28 nTLSMD
+Field 27 EnDA
+Field 26 UCI
+Field 25 EE
+Field 24 E0E
+Field 23 SPAN
+Field 22 EIS
+Field 21 IESB
+Field 20 TSCXT
+Field 19 WXN
+Field 18 nTWE
+Res0 17
+Field 16 nTWI
+Field 15 UCT
+Field 14 DZE
+Field 13 EnDB
+Field 12 I
+Field 11 EOS
+Field 10 EnRCTX
+Field 9 UMA
+Field 8 SED
+Field 7 ITD
+Field 6 nAA
+Field 5 CP15BEN
+Field 4 SA0
+Field 3 SA
+Field 2 C
+Field 1 A
+Field 0 M
+EndSysreg
+
+Sysreg SCTLRMASK_EL12 3 5 1 4 0
+Mapping SCTLRMASK_EL1
+EndSysreg
+
+Sysreg TCR2MASK_EL1 3 0 2 7 3
+Res0 63:22
+Field 21 FNGNA1
+Field 20 FNGNA0
+Res0 19
+Field 18 FNG1
+Field 17 FNG0
+Field 16 A2
+Field 15 DisCH1
+Field 14 DisCH0
+Res0 13:12
+Field 11 HAFT
+Field 10 PTTWI
+Res0 9:6
+Field 5 D128
+Field 4 AIE
+Field 3 POE
+Field 2 E0POE
+Field 1 PIE
+Field 0 PnCH
+EndSysreg
+
+Sysreg TCR2MASK_EL12 3 5 2 7 3
+Mapping TCR2MASK_EL1
+EndSysreg
+
+Sysreg TCRMASK_EL1 3 0 2 7 2
+Res0 63:62
+Field 61 MTX1
+Field 60 MTX0
+Field 59 DS
+Field 58 TCMA1
+Field 57 TCMA0
+Field 56 E0PD1
+Field 55 E0PD0
+Field 54 NFD1
+Field 53 NFD0
+Field 52 TBID1
+Field 51 TBID0
+Field 50 HWU162
+Field 49 HWU161
+Field 48 HWU160
+Field 47 HWU159
+Field 46 HWU062
+Field 45 HWU061
+Field 44 HWU060
+Field 43 HWU059
+Field 42 HPD1
+Field 41 HPD0
+Field 40 HD
+Field 39 HA
+Field 38 TBI1
+Field 37 TBI0
+Field 36 AS
+Res0 35:33
+Field 32 IPS
+Res0 31
+Field 30 TG1
+Res0 29
+Field 28 SH1
+Res0 27
+Field 26 ORGN1
+Res0 25
+Field 24 IRGN1
+Field 23 EPD1
+Field 22 A1
+Res0 21:17
+Field 16 T1SZ
+Res0 15
+Field 14 TG0
+Res0 13
+Field 12 SH0
+Res0 11
+Field 10 ORGN0
+Res0 9
+Field 8 IRGN0
+Field 7 EPD0
+Res0 6:1
+Field 0 T0SZ
+EndSysreg
+
+Sysreg TCRMASK_EL12 3 5 2 7 2
+Mapping TCRMASK_EL1
+EndSysreg
+
+Sysreg ERXGSR_EL1 3 0 5 3 2
+Field 63 S63
+Field 62 S62
+Field 61 S61
+Field 60 S60
+Field 59 S59
+Field 58 S58
+Field 57 S57
+Field 56 S56
+Field 55 S55
+Field 54 S54
+Field 53 S53
+Field 52 S52
+Field 51 S51
+Field 50 S50
+Field 49 S49
+Field 48 S48
+Field 47 S47
+Field 46 S46
+Field 45 S45
+Field 44 S44
+Field 43 S43
+Field 42 S42
+Field 41 S41
+Field 40 S40
+Field 39 S39
+Field 38 S38
+Field 37 S37
+Field 36 S36
+Field 35 S35
+Field 34 S34
+Field 33 S33
+Field 32 S32
+Field 31 S31
+Field 30 S30
+Field 29 S29
+Field 28 S28
+Field 27 S27
+Field 26 S26
+Field 25 S25
+Field 24 S24
+Field 23 S23
+Field 22 S22
+Field 21 S21
+Field 20 S20
+Field 19 S19
+Field 18 S18
+Field 17 S17
+Field 16 S16
+Field 15 S15
+Field 14 S14
+Field 13 S13
+Field 12 S12
+Field 11 S11
+Field 10 S10
+Field 9 S9
+Field 8 S8
+Field 7 S7
+Field 6 S6
+Field 5 S5
+Field 4 S4
+Field 3 S3
+Field 2 S2
+Field 1 S1
+Field 0 S0
+EndSysreg
+
Sysreg TRFCR_EL1 3 0 1 2 1
Res0 63:7
UnsignedEnum 6:5 TS
@@ -2078,6 +2722,16 @@ Field 1 ExTRE
Field 0 E0TRE
EndSysreg
+Sysreg TRCITECR_EL1 3 0 1 2 3
+Res0 63:2
+Field 1 E1E
+Field 0 E0E
+EndSysreg
+
+Sysreg TRCITECR_EL12 3 5 1 2 3
+Mapping TRCITECR_EL1
+EndSysreg
+
Sysreg SMPRI_EL1 3 0 1 2 4
Res0 63:4
Field 3:0 PRIORITY
@@ -2226,7 +2880,28 @@ Field 15:0 MINLAT
EndSysreg
Sysreg PMSIDR_EL1 3 0 9 9 7
-Res0 63:25
+Res0 63:33
+UnsignedEnum 32 SME
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+UnsignedEnum 31:28 ALTCLK
+ 0b0000 NI
+ 0b0001 IMP
+ 0b1111 IMPDEF
+EndEnum
+UnsignedEnum 27 FPF
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+UnsignedEnum 26 EFT
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+UnsignedEnum 25 CRR
+ 0b0 NI
+ 0b1 IMP
+EndEnum
Field 24 PBT
Field 23:20 FORMAT
Enum 19:16 COUNTSIZE
@@ -2244,7 +2919,10 @@ Enum 11:8 INTERVAL
0b0111 3072
0b1000 4096
EndEnum
-Res0 7
+UnsignedEnum 7 FDS
+ 0b0 NI
+ 0b1 IMP
+EndEnum
Field 6 FnE
Field 5 ERND
Field 4 LDS
@@ -2287,6 +2965,16 @@ Field 16 COLL
Field 15:0 MSS
EndSysreg
+Sysreg PMSDSFR_EL1 3 0 9 10 4
+Field 63:0 S
+EndSysreg
+
+Sysreg PMBMAR_EL1 3 0 9 10 5
+Res0 63:10
+Field 9:8 SH
+Field 7:0 Attr
+EndSysreg
+
Sysreg PMBIDR_EL1 3 0 9 10 7
Res0 63:12
Enum 11:8 EA
@@ -2300,6 +2988,21 @@ Field 4 P
Field 3:0 ALIGN
EndSysreg
+Sysreg TRBMPAM_EL1 3 0 9 11 5
+Res0 63:27
+Field 26 EN
+Field 25:24 MPAM_SP
+Field 23:16 PMG
+Field 15:0 PARTID
+EndSysreg
+
+Sysreg PMSSCR_EL1 3 0 9 13 3
+Res0 63:33
+Field 32 NC
+Res0 31:1
+Field 0 SS
+EndSysreg
+
Sysreg PMUACR_EL1 3 0 9 14 4
Res0 63:33
Field 32 F0
@@ -2307,11 +3010,29 @@ Field 31 C
Field 30:0 P
EndSysreg
+Sysreg PMECR_EL1 3 0 9 14 5
+Res0 63:5
+Field 4:3 SSE
+Field 2 KPME
+Field 1:0 PMEE
+EndSysreg
+
+Sysreg PMIAR_EL1 3 0 9 14 7
+Field 63:0 ADDRESS
+EndSysreg
+
Sysreg PMSELR_EL0 3 3 9 12 5
Res0 63:5
Field 4:0 SEL
EndSysreg
+Sysreg PMZR_EL0 3 3 9 13 4
+Res0 63:33
+Field 32 F0
+Field 31 C
+Field 30:0 P
+EndSysreg
+
SysregFields CONTEXTIDR_ELx
Res0 63:32
Field 31:0 PROCID
@@ -2450,7 +3171,110 @@ UnsignedEnum 2:0 F8S1
EndEnum
EndSysreg
-SysregFields HFGxTR_EL2
+Sysreg HCR_EL2 3 4 1 1 0
+Field 63:60 TWEDEL
+Field 59 TWEDEn
+Field 58 TID5
+Field 57 DCT
+Field 56 ATA
+Field 55 TTLBOS
+Field 54 TTLBIS
+Field 53 EnSCXT
+Field 52 TOCU
+Field 51 AMVOFFEN
+Field 50 TICAB
+Field 49 TID4
+Field 48 GPF
+Field 47 FIEN
+Field 46 FWB
+Field 45 NV2
+Field 44 AT
+Field 43 NV1
+Field 42 NV
+Field 41 API
+Field 40 APK
+Field 39 TME
+Field 38 MIOCNCE
+Field 37 TEA
+Field 36 TERR
+Field 35 TLOR
+Field 34 E2H
+Field 33 ID
+Field 32 CD
+Field 31 RW
+Field 30 TRVM
+Field 29 HCD
+Field 28 TDZ
+Field 27 TGE
+Field 26 TVM
+Field 25 TTLB
+Field 24 TPU
+Field 23 TPCP
+Field 22 TSW
+Field 21 TACR
+Field 20 TIDCP
+Field 19 TSC
+Field 18 TID3
+Field 17 TID2
+Field 16 TID1
+Field 15 TID0
+Field 14 TWE
+Field 13 TWI
+Field 12 DC
+UnsignedEnum 11:10 BSU
+ 0b00 NONE
+ 0b01 IS
+ 0b10 OS
+ 0b11 FS
+EndEnum
+Field 9 FB
+Field 8 VSE
+Field 7 VI
+Field 6 VF
+Field 5 AMO
+Field 4 IMO
+Field 3 FMO
+Field 2 PTW
+Field 1 SWIO
+Field 0 VM
+EndSysreg
+
+Sysreg MDCR_EL2 3 4 1 1 1
+Res0 63:51
+Field 50 EnSTEPOP
+Res0 49:44
+Field 43 EBWE
+Res0 42
+Field 41:40 PMEE
+Res0 39:37
+Field 36 HPMFZS
+Res0 35:32
+Field 31:30 PMSSE
+Field 29 HPMFZO
+Field 28 MTPME
+Field 27 TDCC
+Field 26 HLP
+Field 25:24 E2TB
+Field 23 HCCD
+Res0 22:20
+Field 19 TTRF
+Res0 18
+Field 17 HPMD
+Res0 16
+Field 15 EnSPM
+Field 14 TPMS
+Field 13:12 E2PB
+Field 11 TDRA
+Field 10 TDOSA
+Field 9 TDA
+Field 8 TDE
+Field 7 HPME
+Field 6 TPM
+Field 5 TPMCR
+Field 4:0 HPMN
+EndSysreg
+
+Sysreg HFGRTR_EL2 3 4 1 1 4
Field 63 nAMAIR2_EL1
Field 62 nMAIR2_EL1
Field 61 nS2POR_EL1
@@ -2515,53 +3339,74 @@ Field 3 AMAIR_EL1
Field 2 AIDR_EL1
Field 1 AFSR1_EL1
Field 0 AFSR0_EL1
-EndSysregFields
-
-Sysreg MDCR_EL2 3 4 1 1 1
-Res0 63:51
-Field 50 EnSTEPOP
-Res0 49:44
-Field 43 EBWE
-Res0 42
-Field 41:40 PMEE
-Res0 39:37
-Field 36 HPMFZS
-Res0 35:32
-Field 31:30 PMSSE
-Field 29 HPMFZO
-Field 28 MTPME
-Field 27 TDCC
-Field 26 HLP
-Field 25:24 E2TB
-Field 23 HCCD
-Res0 22:20
-Field 19 TTRF
-Res0 18
-Field 17 HPMD
-Res0 16
-Field 15 EnSPM
-Field 14 TPMS
-Field 13:12 E2PB
-Field 11 TDRA
-Field 10 TDOSA
-Field 9 TDA
-Field 8 TDE
-Field 7 HPME
-Field 6 TPM
-Field 5 TPMCR
-Field 4:0 HPMN
-EndSysreg
-
-Sysreg HFGRTR_EL2 3 4 1 1 4
-Fields HFGxTR_EL2
EndSysreg
Sysreg HFGWTR_EL2 3 4 1 1 5
-Fields HFGxTR_EL2
+Field 63 nAMAIR2_EL1
+Field 62 nMAIR2_EL1
+Field 61 nS2POR_EL1
+Field 60 nPOR_EL1
+Field 59 nPOR_EL0
+Field 58 nPIR_EL1
+Field 57 nPIRE0_EL1
+Field 56 nRCWMASK_EL1
+Field 55 nTPIDR2_EL0
+Field 54 nSMPRI_EL1
+Field 53 nGCS_EL1
+Field 52 nGCS_EL0
+Res0 51
+Field 50 nACCDATA_EL1
+Field 49 ERXADDR_EL1
+Field 48 ERXPFGCDN_EL1
+Field 47 ERXPFGCTL_EL1
+Res0 46
+Field 45 ERXMISCn_EL1
+Field 44 ERXSTATUS_EL1
+Field 43 ERXCTLR_EL1
+Res0 42
+Field 41 ERRSELR_EL1
+Res0 40
+Field 39 ICC_IGRPENn_EL1
+Field 38 VBAR_EL1
+Field 37 TTBR1_EL1
+Field 36 TTBR0_EL1
+Field 35 TPIDR_EL0
+Field 34 TPIDRRO_EL0
+Field 33 TPIDR_EL1
+Field 32 TCR_EL1
+Field 31 SCXTNUM_EL0
+Field 30 SCXTNUM_EL1
+Field 29 SCTLR_EL1
+Res0 28
+Field 27 PAR_EL1
+Res0 26:25
+Field 24 MAIR_EL1
+Field 23 LORSA_EL1
+Field 22 LORN_EL1
+Res0 21
+Field 20 LOREA_EL1
+Field 19 LORC_EL1
+Res0 18
+Field 17 FAR_EL1
+Field 16 ESR_EL1
+Res0 15:14
+Field 13 CSSELR_EL1
+Field 12 CPACR_EL1
+Field 11 CONTEXTIDR_EL1
+Res0 10:9
+Field 8 APIBKey
+Field 7 APIAKey
+Field 6 APGAKey
+Field 5 APDBKey
+Field 4 APDAKey
+Field 3 AMAIR_EL1
+Res0 2
+Field 1 AFSR1_EL1
+Field 0 AFSR0_EL1
EndSysreg
Sysreg HFGITR_EL2 3 4 1 1 6
-Res0 63
+Field 63 PSBCSYNC
Field 62 ATS1E1A
Res0 61
Field 60 COSPRCTX
@@ -2971,6 +3816,12 @@ Sysreg SMCR_EL2 3 4 1 2 6
Fields SMCR_ELx
EndSysreg
+Sysreg VNCR_EL2 3 4 2 2 0
+Field 63:57 RESS
+Field 56:12 BADDR
+Res0 11:0
+EndSysreg
+
Sysreg GCSCR_EL2 3 4 2 5 0
Fields GCSCR_ELx
EndSysreg
@@ -3244,6 +4095,60 @@ Sysreg TTBR1_EL1 3 0 2 0 1
Fields TTBRx_EL1
EndSysreg
+Sysreg TCR_EL1 3 0 2 0 2
+Res0 63:62
+Field 61 MTX1
+Field 60 MTX0
+Field 59 DS
+Field 58 TCMA1
+Field 57 TCMA0
+Field 56 E0PD1
+Field 55 E0PD0
+Field 54 NFD1
+Field 53 NFD0
+Field 52 TBID1
+Field 51 TBID0
+Field 50 HWU162
+Field 49 HWU161
+Field 48 HWU160
+Field 47 HWU159
+Field 46 HWU062
+Field 45 HWU061
+Field 44 HWU060
+Field 43 HWU059
+Field 42 HPD1
+Field 41 HPD0
+Field 40 HD
+Field 39 HA
+Field 38 TBI1
+Field 37 TBI0
+Field 36 AS
+Res0 35
+Field 34:32 IPS
+Field 31:30 TG1
+Field 29:28 SH1
+Field 27:26 ORGN1
+Field 25:24 IRGN1
+Field 23 EPD1
+Field 22 A1
+Field 21:16 T1SZ
+Field 15:14 TG0
+Field 13:12 SH0
+Field 11:10 ORGN0
+Field 9:8 IRGN0
+Field 7 EPD0
+Res0 6
+Field 5:0 T0SZ
+EndSysreg
+
+Sysreg TCR_EL12 3 5 2 0 2
+Mapping TCR_EL1
+EndSysreg
+
+Sysreg TCRALIAS_EL1 3 0 2 7 6
+Mapping TCR_EL1
+EndSysreg
+
Sysreg TCR2_EL1 3 0 2 0 3
Res0 63:16
Field 15 DisCH1
@@ -3264,6 +4169,10 @@ Sysreg TCR2_EL12 3 5 2 0 3
Mapping TCR2_EL1
EndSysreg
+Sysreg TCR2ALIAS_EL1 3 0 2 7 7
+Mapping TCR2_EL1
+EndSysreg
+
Sysreg TCR2_EL2 3 4 2 0 3
Res0 63:16
Field 15 DisCH1
@@ -3525,7 +4434,12 @@ Field 31:0 TRG
EndSysreg
Sysreg TRBIDR_EL1 3 0 9 11 7
-Res0 63:12
+Res0 63:16
+UnsignedEnum 15:12 MPAM
+ 0b0000 NI
+ 0b0001 DEFAULT
+ 0b0010 IMP
+EndEnum
Enum 11:8 EA
0b0000 NON_DESC
0b0001 IGNORE
@@ -3536,3 +4450,10 @@ Field 5 F
Field 4 P
Field 3:0 Align
EndSysreg
+
+Sysreg HPFAR_EL2 3 4 6 0 4
+Field 63 NS
+Res0 62:48
+Field 47:4 FIPA
+Res0 3:0
+EndSysreg
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 9d01361696a1..ae551b857137 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -83,7 +83,26 @@ HYPERCALL3(vcpu_op);
HYPERCALL1(platform_op_raw);
HYPERCALL2(multicall);
HYPERCALL2(vm_assist);
-HYPERCALL3(dm_op);
+
+SYM_FUNC_START(HYPERVISOR_dm_op)
+ mov x16, #__HYPERVISOR_dm_op; \
+ /*
+ * dm_op hypercalls are issued by the userspace. The kernel needs to
+ * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
+ * translations to user memory via AT instructions. Since AT
+ * instructions are not affected by the PAN bit (ARMv8.1), we only
+ * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
+ * is enabled (it implies that hardware UAO and PAN disabled).
+ */
+ uaccess_ttbr0_enable x6, x7, x8
+ hvc XEN_IMM
+
+ /*
+ * Disable userspace access from kernel once the hyp call completed.
+ */
+ uaccess_ttbr0_disable x6, x7
+ ret
+SYM_FUNC_END(HYPERVISOR_dm_op);
SYM_FUNC_START(privcmd_call)
mov x16, x0
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index 11055c574968..9ed2b15ffd94 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -29,7 +29,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
pte_t *pte;
unsigned long i;
- pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ pte = __pte_alloc_one_kernel(mm);
if (!pte)
return NULL;
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index a397e1718ab6..b8378431aeff 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -249,11 +249,6 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
return __pgprot(prot);
}
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h
index 0de5734950bf..717f44b4d26f 100644
--- a/arch/csky/include/asm/syscall.h
+++ b/arch/csky/include/asm/syscall.h
@@ -59,6 +59,19 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
memcpy(args, &regs->a1, 5 * sizeof(args[0]));
}
+static inline void
+syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ const unsigned long *args)
+{
+ memcpy(&regs->a0, args, 6 * sizeof(regs->a0));
+ /*
+ * Also copy the first argument into orig_a0
+ * so that syscall_get_arguments() would return it
+ * instead of the previous value.
+ */
+ regs->orig_a0 = regs->a0;
+}
+
static inline int
syscall_get_arch(struct task_struct *task)
{
diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c
index e5f18420ce64..e0a36acd265b 100644
--- a/arch/csky/kernel/perf_event.c
+++ b/arch/csky/kernel/perf_event.c
@@ -1139,8 +1139,7 @@ static irqreturn_t csky_pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0, hwc->last_period);
csky_pmu_event_set_period(event);
- if (perf_event_overflow(event, &data, regs))
- csky_pmu_stop_event(event);
+ perf_event_overflow(event, &data, regs);
}
csky_pmu_enable(&csky_pmu.pmu);
diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig
index 469c025297c6..c6108f000288 100644
--- a/arch/hexagon/configs/comet_defconfig
+++ b/arch/hexagon/configs/comet_defconfig
@@ -72,9 +72,6 @@ CONFIG_INET=y
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
-CONFIG_CRC_T10DIF=y
CONFIG_FRAME_WARN=0
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index 8c5b7a1c3d90..9fbdfdbc539f 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -238,9 +238,6 @@ static inline int pte_present(pte_t pte)
return pte_val(pte) & _PAGE_PRESENT;
}
-/* mk_pte - make a PTE out of a page pointer and protection bits */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
/* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */
#define pte_page(x) pfn_to_page(pte_pfn(x))
diff --git a/arch/hexagon/include/asm/syscall.h b/arch/hexagon/include/asm/syscall.h
index f6e454f18038..70637261817a 100644
--- a/arch/hexagon/include/asm/syscall.h
+++ b/arch/hexagon/include/asm/syscall.h
@@ -26,6 +26,13 @@ static inline long syscall_get_nr(struct task_struct *task,
return regs->r06;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->r06 = nr;
+}
+
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned long *args)
@@ -33,6 +40,13 @@ static inline void syscall_get_arguments(struct task_struct *task,
memcpy(args, &(&regs->r00)[0], 6 * sizeof(args[0]));
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ memcpy(&(&regs->r00)[0], args, 6 * sizeof(args[0]));
+}
+
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
@@ -45,6 +59,13 @@ static inline long syscall_get_return_value(struct task_struct *task,
return regs->r00;
}
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->r00 = (long) error ?: val;
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_HEXAGON;
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 067c0b994648..1a2cf012b8f2 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -73,6 +73,7 @@ config LOONGARCH
select ARCH_SUPPORTS_RT
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_DEFAULT_BPF_JIT
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index 90f21dfe22b1..0d59af6007b7 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -1026,7 +1026,7 @@ CONFIG_SECURITY_APPARMOR=y
CONFIG_SECURITY_YAMA=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_SELFTESTS=y
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_ANUBIS=m
diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h
index 51f224bcfc65..704066b4f736 100644
--- a/arch/loongarch/include/asm/asm-prototypes.h
+++ b/arch/loongarch/include/asm/asm-prototypes.h
@@ -12,3 +12,11 @@ __int128_t __ashlti3(__int128_t a, int b);
__int128_t __ashrti3(__int128_t a, int b);
__int128_t __lshrti3(__int128_t a, int b);
#endif
+
+asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev,
+ struct pt_regs *regs);
+
+asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev,
+ struct pt_regs *regs,
+ int (*fn)(void *),
+ void *fn_arg);
diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h
index 3177674228f8..45514f314664 100644
--- a/arch/loongarch/include/asm/fpu.h
+++ b/arch/loongarch/include/asm/fpu.h
@@ -22,22 +22,29 @@
struct sigcontext;
#define kernel_fpu_available() cpu_has_fpu
-extern void kernel_fpu_begin(void);
-extern void kernel_fpu_end(void);
-
-extern void _init_fpu(unsigned int);
-extern void _save_fp(struct loongarch_fpu *);
-extern void _restore_fp(struct loongarch_fpu *);
-
-extern void _save_lsx(struct loongarch_fpu *fpu);
-extern void _restore_lsx(struct loongarch_fpu *fpu);
-extern void _init_lsx_upper(void);
-extern void _restore_lsx_upper(struct loongarch_fpu *fpu);
-
-extern void _save_lasx(struct loongarch_fpu *fpu);
-extern void _restore_lasx(struct loongarch_fpu *fpu);
-extern void _init_lasx_upper(void);
-extern void _restore_lasx_upper(struct loongarch_fpu *fpu);
+
+void kernel_fpu_begin(void);
+void kernel_fpu_end(void);
+
+asmlinkage void _init_fpu(unsigned int);
+asmlinkage void _save_fp(struct loongarch_fpu *);
+asmlinkage void _restore_fp(struct loongarch_fpu *);
+asmlinkage int _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+asmlinkage int _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+
+asmlinkage void _save_lsx(struct loongarch_fpu *fpu);
+asmlinkage void _restore_lsx(struct loongarch_fpu *fpu);
+asmlinkage void _init_lsx_upper(void);
+asmlinkage void _restore_lsx_upper(struct loongarch_fpu *fpu);
+asmlinkage int _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+asmlinkage int _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+
+asmlinkage void _save_lasx(struct loongarch_fpu *fpu);
+asmlinkage void _restore_lasx(struct loongarch_fpu *fpu);
+asmlinkage void _init_lasx_upper(void);
+asmlinkage void _restore_lasx_upper(struct loongarch_fpu *fpu);
+asmlinkage int _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+asmlinkage int _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
static inline void enable_lsx(void);
static inline void disable_lsx(void);
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index f457c2662e2f..a3c4cc46c892 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -301,7 +301,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
/* MMU handling */
void kvm_flush_tlb_all(void);
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
-int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
+int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write, int ecode);
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index 2c349f961bfb..f1efd7cfbc20 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -37,7 +37,7 @@
#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff
typedef union loongarch_instruction larch_inst;
-typedef int (*exit_handle_fn)(struct kvm_vcpu *);
+typedef int (*exit_handle_fn)(struct kvm_vcpu *, int);
int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
diff --git a/arch/loongarch/include/asm/lbt.h b/arch/loongarch/include/asm/lbt.h
index e671978bf552..38566574e562 100644
--- a/arch/loongarch/include/asm/lbt.h
+++ b/arch/loongarch/include/asm/lbt.h
@@ -12,9 +12,13 @@
#include <asm/loongarch.h>
#include <asm/processor.h>
-extern void _init_lbt(void);
-extern void _save_lbt(struct loongarch_lbt *);
-extern void _restore_lbt(struct loongarch_lbt *);
+asmlinkage void _init_lbt(void);
+asmlinkage void _save_lbt(struct loongarch_lbt *);
+asmlinkage void _restore_lbt(struct loongarch_lbt *);
+asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
+asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
+asmlinkage int _save_ftop_context(void __user *ftop);
+asmlinkage int _restore_ftop_context(void __user *ftop);
static inline int is_lbt_enabled(void)
{
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index b58f587f0f0a..1c63a9d9a6d3 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -69,7 +69,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
if (!ptdesc)
return NULL;
- if (!pagetable_pmd_ctor(ptdesc)) {
+ if (!pagetable_pmd_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index da346733a1da..a3f17914dbab 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -255,7 +255,6 @@ static inline void pmd_clear(pmd_t *pmdp)
#define pmd_page_vaddr(pmd) pmd_val(pmd)
-extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -426,12 +425,6 @@ static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
return false;
}
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
index f3ddaed9ef7f..e5d21e836d99 100644
--- a/arch/loongarch/include/asm/ptrace.h
+++ b/arch/loongarch/include/asm/ptrace.h
@@ -33,9 +33,9 @@ struct pt_regs {
unsigned long __last[];
} __aligned(8);
-static inline int regs_irqs_disabled(struct pt_regs *regs)
+static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{
- return arch_irqs_disabled_flags(regs->csr_prmd);
+ return !(regs->csr_prmd & CSR_PRMD_PIE);
}
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
@@ -55,7 +55,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long v
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
-#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last) - sizeof(unsigned long))
/**
* regs_get_register() - get register value from its offset
diff --git a/arch/loongarch/include/asm/syscall.h b/arch/loongarch/include/asm/syscall.h
index e286dc58476e..81d2733f7b94 100644
--- a/arch/loongarch/include/asm/syscall.h
+++ b/arch/loongarch/include/asm/syscall.h
@@ -26,6 +26,13 @@ static inline long syscall_get_nr(struct task_struct *task,
return regs->regs[11];
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->regs[11] = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -61,6 +68,14 @@ static inline void syscall_get_arguments(struct task_struct *task,
memcpy(&args[1], &regs->regs[5], 5 * sizeof(long));
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ regs->orig_a0 = args[0];
+ memcpy(&regs->regs[5], &args[1], 5 * sizeof(long));
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_LOONGARCH64;
diff --git a/arch/loongarch/include/asm/uprobes.h b/arch/loongarch/include/asm/uprobes.h
index 99a0d198927f..025fc3f0a102 100644
--- a/arch/loongarch/include/asm/uprobes.h
+++ b/arch/loongarch/include/asm/uprobes.h
@@ -15,7 +15,6 @@ typedef u32 uprobe_opcode_t;
#define UPROBE_XOLBP_INSN __emit_break(BRK_UPROBE_XOLBP)
struct arch_uprobe {
- unsigned long resume_era;
u32 insn[2];
u32 ixol[2];
bool simulate;
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 4853e8b04c6f..f9dcaa60033d 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -21,10 +21,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
-CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_traps.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_module.o += $(call cc-disable-warning, override-init)
+CFLAGS_syscall.o += $(call cc-disable-warning, override-init)
+CFLAGS_traps.o += $(call cc-disable-warning, override-init)
+CFLAGS_perf_event.o += $(call cc-disable-warning, override-init)
ifdef CONFIG_FUNCTION_TRACER
ifndef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S
index 5f23b85d78ca..ba0bdbf86aa8 100644
--- a/arch/loongarch/kernel/efi-header.S
+++ b/arch/loongarch/kernel/efi-header.S
@@ -7,7 +7,7 @@
#include <linux/sizes.h>
.macro __EFI_PE_HEADER
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
.Lcoff_header:
.short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */
.short .Lsection_count /* NumberOfSections */
@@ -20,7 +20,7 @@
IMAGE_FILE_LINE_NUMS_STRIPPED /* Characteristics */
.Loptional_header:
- .short PE_OPT_MAGIC_PE32PLUS /* PE32+ format */
+ .short IMAGE_NT_OPTIONAL_HDR64_MAGIC /* PE32+ format */
.byte 0x02 /* MajorLinkerVersion */
.byte 0x14 /* MinorLinkerVersion */
.long __inittext_end - .Lefi_header_end /* SizeOfCode */
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
index 48e7e34e355e..2abc29e57381 100644
--- a/arch/loongarch/kernel/entry.S
+++ b/arch/loongarch/kernel/entry.S
@@ -77,24 +77,22 @@ SYM_CODE_START(handle_syscall)
SYM_CODE_END(handle_syscall)
_ASM_NOKPROBE(handle_syscall)
-SYM_CODE_START(ret_from_fork)
+SYM_CODE_START(ret_from_fork_asm)
UNWIND_HINT_REGS
- bl schedule_tail # a0 = struct task_struct *prev
- move a0, sp
- bl syscall_exit_to_user_mode
+ move a1, sp
+ bl ret_from_fork
RESTORE_STATIC
RESTORE_SOME
RESTORE_SP_AND_RET
-SYM_CODE_END(ret_from_fork)
+SYM_CODE_END(ret_from_fork_asm)
-SYM_CODE_START(ret_from_kernel_thread)
+SYM_CODE_START(ret_from_kernel_thread_asm)
UNWIND_HINT_REGS
- bl schedule_tail # a0 = struct task_struct *prev
- move a0, s1
- jirl ra, s0, 0
- move a0, sp
- bl syscall_exit_to_user_mode
+ move a1, sp
+ move a2, s0
+ move a3, s1
+ bl ret_from_kernel_thread
RESTORE_STATIC
RESTORE_SOME
RESTORE_SP_AND_RET
-SYM_CODE_END(ret_from_kernel_thread)
+SYM_CODE_END(ret_from_kernel_thread_asm)
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
index 6ab640101457..28caf416ae36 100644
--- a/arch/loongarch/kernel/fpu.S
+++ b/arch/loongarch/kernel/fpu.S
@@ -458,6 +458,7 @@ SYM_FUNC_START(_save_fp_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_fp_context)
+EXPORT_SYMBOL_GPL(_save_fp_context)
/*
* a0: fpregs
@@ -471,6 +472,7 @@ SYM_FUNC_START(_restore_fp_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_fp_context)
+EXPORT_SYMBOL_GPL(_restore_fp_context)
/*
* a0: fpregs
@@ -484,6 +486,7 @@ SYM_FUNC_START(_save_lsx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lsx_context)
+EXPORT_SYMBOL_GPL(_save_lsx_context)
/*
* a0: fpregs
@@ -497,6 +500,7 @@ SYM_FUNC_START(_restore_lsx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lsx_context)
+EXPORT_SYMBOL_GPL(_restore_lsx_context)
/*
* a0: fpregs
@@ -510,6 +514,7 @@ SYM_FUNC_START(_save_lasx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lasx_context)
+EXPORT_SYMBOL_GPL(_save_lasx_context)
/*
* a0: fpregs
@@ -523,6 +528,7 @@ SYM_FUNC_START(_restore_lasx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lasx_context)
+EXPORT_SYMBOL_GPL(_restore_lasx_context)
.L_fpu_fault:
li.w a0, -EFAULT # failure
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
index 4f0912141781..733a7665e434 100644
--- a/arch/loongarch/kernel/genex.S
+++ b/arch/loongarch/kernel/genex.S
@@ -16,6 +16,7 @@
#include <asm/stackframe.h>
#include <asm/thread_info.h>
+ .section .cpuidle.text, "ax"
.align 5
SYM_FUNC_START(__arch_cpu_idle)
/* start of idle interrupt region */
@@ -31,14 +32,16 @@ SYM_FUNC_START(__arch_cpu_idle)
*/
idle 0
/* end of idle interrupt region */
-1: jr ra
+idle_exit:
+ jr ra
SYM_FUNC_END(__arch_cpu_idle)
+ .previous
SYM_CODE_START(handle_vint)
UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
- la_abs t1, 1b
+ la_abs t1, idle_exit
LONG_L t0, sp, PT_ERA
/* 3 instructions idle interrupt region */
ori t0, t0, 0b1100
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index 506a99a5bbc7..e3865e92a917 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -20,7 +20,7 @@
__HEAD
_head:
- .word MZ_MAGIC /* "MZ", MS-DOS header */
+ .word IMAGE_DOS_SIGNATURE /* "MZ", MS-DOS header */
.org 0x8
.dword _kernel_entry /* Kernel entry point (physical address) */
.dword _kernel_asize /* Kernel image effective size */
diff --git a/arch/loongarch/kernel/kfpu.c b/arch/loongarch/kernel/kfpu.c
index ec5b28e570c9..4c476904227f 100644
--- a/arch/loongarch/kernel/kfpu.c
+++ b/arch/loongarch/kernel/kfpu.c
@@ -18,11 +18,28 @@ static unsigned int euen_mask = CSR_EUEN_FPEN;
static DEFINE_PER_CPU(bool, in_kernel_fpu);
static DEFINE_PER_CPU(unsigned int, euen_current);
+static inline void fpregs_lock(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ else
+ local_bh_disable();
+}
+
+static inline void fpregs_unlock(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ else
+ local_bh_enable();
+}
+
void kernel_fpu_begin(void)
{
unsigned int *euen_curr;
- preempt_disable();
+ if (!irqs_disabled())
+ fpregs_lock();
WARN_ON(this_cpu_read(in_kernel_fpu));
@@ -73,7 +90,8 @@ void kernel_fpu_end(void)
this_cpu_write(in_kernel_fpu, false);
- preempt_enable();
+ if (!irqs_disabled())
+ fpregs_unlock();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);
diff --git a/arch/loongarch/kernel/lbt.S b/arch/loongarch/kernel/lbt.S
index 001f061d226a..71678912d24c 100644
--- a/arch/loongarch/kernel/lbt.S
+++ b/arch/loongarch/kernel/lbt.S
@@ -90,6 +90,7 @@ SYM_FUNC_START(_save_lbt_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lbt_context)
+EXPORT_SYMBOL_GPL(_save_lbt_context)
/*
* a0: scr
@@ -110,6 +111,7 @@ SYM_FUNC_START(_restore_lbt_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lbt_context)
+EXPORT_SYMBOL_GPL(_restore_lbt_context)
/*
* a0: ftop
@@ -120,6 +122,7 @@ SYM_FUNC_START(_save_ftop_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_ftop_context)
+EXPORT_SYMBOL_GPL(_save_ftop_context)
/*
* a0: ftop
@@ -150,6 +153,7 @@ SYM_FUNC_START(_restore_ftop_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_ftop_context)
+EXPORT_SYMBOL_GPL(_restore_ftop_context)
.L_lbt_fault:
li.w a0, -EFAULT # failure
diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
index f86a4b838dd7..8ad098703488 100644
--- a/arch/loongarch/kernel/perf_event.c
+++ b/arch/loongarch/kernel/perf_event.c
@@ -479,8 +479,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc, int idx,
if (!loongarch_pmu_event_set_period(event, hwc, idx))
return;
- if (perf_event_overflow(event, data, regs))
- loongarch_pmu_disable_event(idx);
+ perf_event_overflow(event, data, regs);
}
static irqreturn_t pmu_handle_irq(int irq, void *dev)
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index 6e58f65455c7..3582f591bab2 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@@ -34,6 +35,7 @@
#include <linux/nmi.h>
#include <asm/asm.h>
+#include <asm/asm-prototypes.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/elf.h>
@@ -47,6 +49,7 @@
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/reg.h>
+#include <asm/switch_to.h>
#include <asm/unwind.h>
#include <asm/vdso.h>
@@ -63,8 +66,9 @@ EXPORT_SYMBOL(__stack_chk_guard);
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
-asmlinkage void ret_from_fork(void);
-asmlinkage void ret_from_kernel_thread(void);
+asmlinkage void restore_and_ret(void);
+asmlinkage void ret_from_fork_asm(void);
+asmlinkage void ret_from_kernel_thread_asm(void);
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
@@ -138,6 +142,23 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
+asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev,
+ struct pt_regs *regs)
+{
+ schedule_tail(prev);
+ syscall_exit_to_user_mode(regs);
+}
+
+asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev,
+ struct pt_regs *regs,
+ int (*fn)(void *),
+ void *fn_arg)
+{
+ schedule_tail(prev);
+ fn(fn_arg);
+ syscall_exit_to_user_mode(regs);
+}
+
/*
* Copy architecture-specific thread state
*/
@@ -165,8 +186,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.reg03 = childksp;
p->thread.reg23 = (unsigned long)args->fn;
p->thread.reg24 = (unsigned long)args->fn_arg;
- p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
- p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
+ p->thread.reg01 = (unsigned long)ret_from_kernel_thread_asm;
+ p->thread.sched_ra = (unsigned long)ret_from_kernel_thread_asm;
memset(childregs, 0, sizeof(struct pt_regs));
childregs->csr_euen = p->thread.csr_euen;
childregs->csr_crmd = p->thread.csr_crmd;
@@ -182,8 +203,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
childregs->regs[3] = usp;
p->thread.reg03 = (unsigned long) childregs;
- p->thread.reg01 = (unsigned long) ret_from_fork;
- p->thread.sched_ra = (unsigned long) ret_from_fork;
+ p->thread.reg01 = (unsigned long) ret_from_fork_asm;
+ p->thread.sched_ra = (unsigned long) ret_from_fork_asm;
/*
* New tasks lose permission to use the fpu. This accelerates context
diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
index 7a555b600171..4740cb5b2388 100644
--- a/arch/loongarch/kernel/signal.c
+++ b/arch/loongarch/kernel/signal.c
@@ -51,27 +51,6 @@
#define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
#define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
-/* Assembly functions to move context to/from the FPU */
-extern asmlinkage int
-_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
-extern asmlinkage int
-_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
-extern asmlinkage int
-_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-extern asmlinkage int
-_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-extern asmlinkage int
-_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-extern asmlinkage int
-_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-
-#ifdef CONFIG_CPU_HAS_LBT
-extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
-extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
-extern asmlinkage int _save_ftop_context(void __user *ftop);
-extern asmlinkage int _restore_ftop_context(void __user *ftop);
-#endif
-
struct rt_sigframe {
struct siginfo rs_info;
struct ucontext rs_uctx;
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index e2d3bfeb6366..bc75a3a69fc8 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -111,7 +111,7 @@ static unsigned long __init get_loops_per_jiffy(void)
return lpj;
}
-static long init_offset __nosavedata;
+static long init_offset;
void save_counter(void)
{
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index 2ec3106c0da3..47fc2de6d150 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -553,9 +553,10 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
#else
+ bool pie = regs_irqs_disabled(regs);
unsigned int *pc;
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
@@ -582,7 +583,7 @@ sigbus:
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
#endif
irqentry_exit(regs, state);
@@ -621,12 +622,13 @@ static void bug_handler(struct pt_regs *regs)
asmlinkage void noinstr do_bce(struct pt_regs *regs)
{
bool user = user_mode(regs);
+ bool pie = regs_irqs_disabled(regs);
unsigned long era = exception_era(regs);
u64 badv = 0, lower = 0, upper = ULONG_MAX;
union loongarch_instruction insn;
irqentry_state_t state = irqentry_enter(regs);
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
current->thread.trap_nr = read_csr_excode();
@@ -692,7 +694,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
@@ -710,11 +712,12 @@ bad_era:
asmlinkage void noinstr do_bp(struct pt_regs *regs)
{
bool user = user_mode(regs);
+ bool pie = regs_irqs_disabled(regs);
unsigned int opcode, bcode;
unsigned long era = exception_era(regs);
irqentry_state_t state = irqentry_enter(regs);
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
if (__get_inst(&opcode, (u32 *)era, user))
@@ -780,7 +783,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
}
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
@@ -1015,6 +1018,7 @@ static void init_restore_lbt(void)
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
{
+ bool pie = regs_irqs_disabled(regs);
irqentry_state_t state = irqentry_enter(regs);
/*
@@ -1024,7 +1028,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
* (including the user using 'MOVGR2GCSR' to turn on TM, which
* will not trigger the BTE), we need to check PRMD first.
*/
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
if (!cpu_has_lbt) {
@@ -1038,7 +1042,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
preempt_enable();
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
diff --git a/arch/loongarch/kernel/uprobes.c b/arch/loongarch/kernel/uprobes.c
index 87abc7137b73..6022eb0f71db 100644
--- a/arch/loongarch/kernel/uprobes.c
+++ b/arch/loongarch/kernel/uprobes.c
@@ -42,7 +42,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
utask->autask.saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
instruction_pointer_set(regs, utask->xol_vaddr);
- user_enable_single_step(current);
return 0;
}
@@ -53,13 +52,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
current->thread.trap_nr = utask->autask.saved_trap_nr;
-
- if (auprobe->simulate)
- instruction_pointer_set(regs, auprobe->resume_era);
- else
- instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
-
- user_disable_single_step(current);
+ instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
return 0;
}
@@ -70,7 +63,6 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
current->thread.trap_nr = utask->autask.saved_trap_nr;
instruction_pointer_set(regs, utask->vaddr);
- user_disable_single_step(current);
}
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
@@ -90,7 +82,6 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
insn.word = auprobe->insn[0];
arch_simulate_insn(insn, regs);
- auprobe->resume_era = regs->csr_era;
return true;
}
diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile
index f4c8e35c216a..cb41d9265662 100644
--- a/arch/loongarch/kvm/Makefile
+++ b/arch/loongarch/kvm/Makefile
@@ -21,4 +21,4 @@ kvm-y += intc/eiointc.o
kvm-y += intc/pch_pic.o
kvm-y += irqfd.o
-CFLAGS_exit.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_exit.o += $(call cc-disable-warning, override-init)
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index ea321403644a..fa52251b3bf1 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -341,7 +341,7 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
* 2) Execute CACOP/IDLE instructions;
* 3) Access to unimplemented CSRs/IOCSRs.
*/
-static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
+static int kvm_handle_gspr(struct kvm_vcpu *vcpu, int ecode)
{
int ret = RESUME_GUEST;
enum emulation_result er = EMULATE_DONE;
@@ -661,7 +661,7 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
return ret;
}
-static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
+static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write, int ecode)
{
int ret;
larch_inst inst;
@@ -675,7 +675,7 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
return RESUME_GUEST;
}
- ret = kvm_handle_mm_fault(vcpu, badv, write);
+ ret = kvm_handle_mm_fault(vcpu, badv, write, ecode);
if (ret) {
/* Treat as MMIO */
inst.word = vcpu->arch.badi;
@@ -705,14 +705,14 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
return ret;
}
-static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
+static int kvm_handle_read_fault(struct kvm_vcpu *vcpu, int ecode)
{
- return kvm_handle_rdwr_fault(vcpu, false);
+ return kvm_handle_rdwr_fault(vcpu, false, ecode);
}
-static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
+static int kvm_handle_write_fault(struct kvm_vcpu *vcpu, int ecode)
{
- return kvm_handle_rdwr_fault(vcpu, true);
+ return kvm_handle_rdwr_fault(vcpu, true, ecode);
}
int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run)
@@ -726,11 +726,12 @@ int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run)
/**
* kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use fpu which hasn't been allowed
* by the root context.
*/
-static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode)
{
struct kvm_run *run = vcpu->run;
@@ -783,11 +784,12 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu)
/*
* kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use LSX when it is disabled in the root
* context.
*/
-static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lsx(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
@@ -798,11 +800,12 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
/*
* kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use LASX when it is disabled in the root
* context.
*/
-static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lasx(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
@@ -810,7 +813,7 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lbt(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
@@ -872,7 +875,7 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu)
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
}
-static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
+static int kvm_handle_hypercall(struct kvm_vcpu *vcpu, int ecode)
{
int ret;
larch_inst inst;
@@ -932,16 +935,14 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
/*
* LoongArch KVM callback handling for unimplemented guest exiting
*/
-static int kvm_fault_ni(struct kvm_vcpu *vcpu)
+static int kvm_fault_ni(struct kvm_vcpu *vcpu, int ecode)
{
- unsigned int ecode, inst;
- unsigned long estat, badv;
+ unsigned int inst;
+ unsigned long badv;
/* Fetch the instruction */
inst = vcpu->arch.badi;
badv = vcpu->arch.badv;
- estat = vcpu->arch.host_estat;
- ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
kvm_arch_vcpu_dump_regs(vcpu);
@@ -966,5 +967,5 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
{
- return kvm_fault_tables[fault](vcpu);
+ return kvm_fault_tables[fault](vcpu, fault);
}
diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c
index 93f4acd44523..fe734dc062ed 100644
--- a/arch/loongarch/kvm/intc/ipi.c
+++ b/arch/loongarch/kvm/intc/ipi.c
@@ -111,7 +111,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (unlikely(ret)) {
- kvm_err("%s: : read date from addr %llx failed\n", __func__, addr);
+ kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
return ret;
}
/* Construct the mask by scanning the bit 27-30 */
@@ -127,7 +127,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (unlikely(ret))
- kvm_err("%s: : write date to addr %llx failed\n", __func__, addr);
+ kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
return ret;
}
diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
index d165cd38c6bb..80ea63d465b8 100644
--- a/arch/loongarch/kvm/main.c
+++ b/arch/loongarch/kvm/main.c
@@ -296,10 +296,10 @@ int kvm_arch_enable_virtualization_cpu(void)
/*
* Enable virtualization features granting guest direct control of
* certain features:
- * GCI=2: Trap on init or unimplement cache instruction.
+ * GCI=2: Trap on init or unimplemented cache instruction.
* TORU=0: Trap on Root Unimplement.
* CACTRL=1: Root control cache.
- * TOP=0: Trap on Previlege.
+ * TOP=0: Trap on Privilege.
* TOE=0: Trap on Exception.
* TIT=0: Trap on Timer.
*/
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index 4d203294767c..ed956c5cf2cc 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -912,7 +912,7 @@ out:
return err;
}
-int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
+int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write, int ecode)
{
int ret;
@@ -921,8 +921,17 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
return ret;
/* Invalidate this entry in the TLB */
- vcpu->arch.flush_gpa = gpa;
- kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
+ if (!cpu_has_ptw || (ecode == EXCCODE_TLBM)) {
+ /*
+ * With HW PTW, invalid TLB is not added when page fault. But
+ * for EXCCODE_TLBM exception, stale TLB may exist because of
+ * the last read access.
+ *
+ * With SW PTW, invalid TLB is added in TLB refill exception.
+ */
+ vcpu->arch.flush_gpa = gpa;
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
+ }
return 0;
}
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 8e427b379661..5af32ec62cb1 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -294,6 +294,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
+ kvm_lose_pmu(vcpu);
/* make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
local_irq_enable();
@@ -902,6 +903,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
vcpu->arch.st.guest_addr = 0;
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
+
+ /*
+ * When vCPU reset, clear the ESTAT and GINTC registers
+ * Other CSR registers are cleared with function _kvm_setcsr().
+ */
+ kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
+ kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
break;
default:
ret = -EINVAL;
diff --git a/arch/loongarch/lib/crc32-loongarch.c b/arch/loongarch/lib/crc32-loongarch.c
index c44ee4f32557..b37cd8537b45 100644
--- a/arch/loongarch/lib/crc32-loongarch.c
+++ b/arch/loongarch/lib/crc32-loongarch.c
@@ -26,7 +26,7 @@ do { \
#define CRC32(crc, value, size) _CRC32(crc, value, size, crc)
#define CRC32C(crc, value, size) _CRC32(crc, value, size, crcc)
-static DEFINE_STATIC_KEY_FALSE(have_crc32);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32);
u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
{
@@ -114,7 +114,7 @@ static int __init crc32_loongarch_init(void)
static_branch_enable(&have_crc32);
return 0;
}
-arch_initcall(crc32_loongarch_init);
+subsys_initcall(crc32_loongarch_init);
static void __exit crc32_loongarch_exit(void)
{
diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c
index e4068906143b..cea84d7f2b91 100644
--- a/arch/loongarch/mm/hugetlbpage.c
+++ b/arch/loongarch/mm/hugetlbpage.c
@@ -47,7 +47,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
pmd = pmd_offset(pud, addr);
}
}
- return (pte_t *) pmd;
+ return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd;
}
uint64_t pmd_to_entrylo(unsigned long pmd_val)
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index fdb7f73ad160..06f11d9e4ec1 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -65,9 +65,6 @@ void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
-#ifdef CONFIG_ZONE_DMA
- max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
-#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
index 22a94bb3e6e8..352d9b2e02ab 100644
--- a/arch/loongarch/mm/pgtable.c
+++ b/arch/loongarch/mm/pgtable.c
@@ -135,15 +135,6 @@ void kernel_pte_init(void *addr)
} while (p != end);
}
-pmd_t mk_pmd(struct page *page, pgprot_t prot)
-{
- pmd_t pmd;
-
- pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
-
- return pmd;
-}
-
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
diff --git a/arch/loongarch/power/hibernate.c b/arch/loongarch/power/hibernate.c
index 1e0590542f98..e7b7346592cb 100644
--- a/arch/loongarch/power/hibernate.c
+++ b/arch/loongarch/power/hibernate.c
@@ -2,6 +2,7 @@
#include <asm/fpu.h>
#include <asm/loongson.h>
#include <asm/sections.h>
+#include <asm/time.h>
#include <asm/tlbflush.h>
#include <linux/suspend.h>
@@ -14,6 +15,7 @@ struct pt_regs saved_regs;
void save_processor_state(void)
{
+ save_counter();
saved_crmd = csr_read32(LOONGARCH_CSR_CRMD);
saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
@@ -26,6 +28,7 @@ void save_processor_state(void)
void restore_processor_state(void)
{
+ sync_counter();
csr_write32(saved_crmd, LOONGARCH_CSR_CRMD);
csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
csr_write32(saved_euen, LOONGARCH_CSR_EUEN);
diff --git a/arch/m68k/coldfire/gpio.c b/arch/m68k/coldfire/gpio.c
index ca26de257871..30e5a4ed799d 100644
--- a/arch/m68k/coldfire/gpio.c
+++ b/arch/m68k/coldfire/gpio.c
@@ -123,10 +123,12 @@ static int mcfgpio_direction_output(struct gpio_chip *chip, unsigned offset,
return __mcfgpio_direction_output(offset, value);
}
-static void mcfgpio_set_value(struct gpio_chip *chip, unsigned offset,
- int value)
+static int mcfgpio_set_value(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
__mcfgpio_set_value(offset, value);
+
+ return 0;
}
static int mcfgpio_request(struct gpio_chip *chip, unsigned offset)
@@ -158,7 +160,7 @@ static struct gpio_chip mcfgpio_chip = {
.direction_input = mcfgpio_direction_input,
.direction_output = mcfgpio_direction_output,
.get = mcfgpio_get_value,
- .set = mcfgpio_set_value,
+ .set_rv = mcfgpio_set_value,
.to_irq = mcfgpio_to_irq,
.base = 0,
.ngpio = MCFGPIO_PIN_MAX,
diff --git a/arch/m68k/coldfire/m5272.c b/arch/m68k/coldfire/m5272.c
index 734dab657fe3..5b70dfdab368 100644
--- a/arch/m68k/coldfire/m5272.c
+++ b/arch/m68k/coldfire/m5272.c
@@ -119,7 +119,7 @@ static struct fixed_phy_status nettel_fixed_phy_status __initdata = {
static int __init init_BSP(void)
{
m5272_uarts_init();
- fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status);
+ fixed_phy_add(0, &nettel_fixed_phy_status);
clkdev_add_table(m5272_clk_lookup, ARRAY_SIZE(m5272_clk_lookup));
return 0;
}
diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig
index 67a0d157122d..60767811e34a 100644
--- a/arch/m68k/configs/amcore_defconfig
+++ b/arch/m68k/configs/amcore_defconfig
@@ -2,7 +2,6 @@ CONFIG_LOCALVERSION="amcore-002"
CONFIG_DEFAULT_HOSTNAME="amcore"
CONFIG_SYSVIPC=y
# CONFIG_FHANDLE is not set
-# CONFIG_USELIB is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_AIO is not set
@@ -89,4 +88,3 @@ CONFIG_PANIC_ON_OOPS=y
# CONFIG_CRYPTO_ECHAINIV is not set
CONFIG_CRYPTO_ANSI_CPRNG=y
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=y
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 31ecb8b7b9f1..d05690289e33 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -267,8 +267,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -551,7 +549,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
@@ -602,7 +600,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -621,8 +618,6 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
@@ -632,7 +627,6 @@ CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
CONFIG_TEST_BPF=m
-CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 1f57514624d5..a1747fbe23fb 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -263,8 +263,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -508,7 +506,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
@@ -559,7 +557,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -578,8 +575,6 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
@@ -589,7 +584,6 @@ CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
CONFIG_TEST_BPF=m
-CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 02db7a48e57e..74293551f66b 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -270,8 +270,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -528,7 +526,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
@@ -579,7 +577,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -598,8 +595,6 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
@@ -609,7 +604,6 @@ CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
CONFIG_TEST_BPF=m
-CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index f0e673cb17eb..419b13ae950a 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -260,8 +260,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -500,7 +498,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
@@ -551,7 +549,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -570,8 +567,6 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
@@ -581,7 +576,6 @@ CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
CONFIG_TEST_BPF=m
-CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index e8ca5a50b86d..4c81d756587c 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -262,8 +262,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -510,7 +508,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
@@ -561,7 +559,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -580,8 +577,6 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
@@ -591,7 +586,6 @@ CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
CONFIG_TEST_BPF=m
-CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index b3a270441bb1..daa01d7fb462 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -261,8 +261,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -527,7 +525,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
@@ -578,7 +576,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -597,8 +594,6 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
@@ -608,7 +603,6 @@ CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
CONFIG_TEST_BPF=m
-CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index d215dba006ce..641ca22eb3b2 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -281,8 +281,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -614,7 +612,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
@@ -665,7 +663,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -684,8 +681,6 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
@@ -695,7 +690,6 @@ CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
CONFIG_TEST_BPF=m
-CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index a888ed93ff82..f98ffa7a1640 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -259,8 +259,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -500,7 +498,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
@@ -551,7 +549,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -570,8 +567,6 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
@@ -581,7 +576,6 @@ CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
CONFIG_TEST_BPF=m
-CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index b481782375f6..2bfc3f4b48f9 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -260,8 +260,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -501,7 +499,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
@@ -552,7 +550,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -571,8 +568,6 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
@@ -582,7 +577,6 @@ CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
CONFIG_TEST_BPF=m
-CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 6eba743d8eb5..2bd46cbcca2a 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -261,8 +261,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -517,7 +515,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
@@ -568,7 +566,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -587,8 +584,6 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
@@ -598,7 +593,6 @@ CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
CONFIG_TEST_BPF=m
-CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 9bdbb418ffa8..dc7fc94fc669 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -256,8 +256,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -498,7 +496,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
@@ -549,7 +547,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -567,8 +564,6 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
@@ -578,7 +573,6 @@ CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
CONFIG_TEST_BPF=m
-CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index e1cf20fa5343..b026a54867f5 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -257,8 +257,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -498,7 +496,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
@@ -549,7 +547,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -568,8 +565,6 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
@@ -579,7 +574,6 @@ CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
CONFIG_TEST_BPF=m
-CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index 4c648b51e7fd..fc5454d37da3 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -7,7 +7,7 @@
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- pagetable_free(virt_to_ptdesc(pte));
+ pagetable_dtor_free(virt_to_ptdesc(pte));
}
extern const char bad_pmd_string[];
@@ -19,6 +19,10 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
if (!ptdesc)
return NULL;
+ if (!pagetable_pte_ctor(mm, ptdesc)) {
+ pagetable_free(ptdesc);
+ return NULL;
+ }
return ptdesc_address(ptdesc);
}
@@ -48,7 +52,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
if (!ptdesc)
return NULL;
- if (!pagetable_pte_ctor(ptdesc)) {
+ if (!pagetable_pte_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index 48f87a8a8832..f5c596b211d4 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -96,12 +96,6 @@
#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot);
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index 5abe7da8ac5a..1091fb0affbe 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -15,7 +15,7 @@ enum m68k_table_types {
};
extern void init_pointer_table(void *table, int type);
-extern void *get_pointer_table(int type);
+extern void *get_pointer_table(struct mm_struct *mm, int type);
extern int free_pointer_table(void *table, int type);
/*
@@ -26,7 +26,7 @@ extern int free_pointer_table(void *table, int type);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
- return get_pointer_table(TABLE_PTE);
+ return get_pointer_table(mm, TABLE_PTE);
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
@@ -36,7 +36,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
- return get_pointer_table(TABLE_PTE);
+ return get_pointer_table(mm, TABLE_PTE);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
@@ -53,7 +53,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- return get_pointer_table(TABLE_PMD);
+ return get_pointer_table(mm, TABLE_PMD);
}
static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd)
@@ -75,7 +75,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return get_pointer_table(TABLE_PGD);
+ return get_pointer_table(mm, TABLE_PGD);
}
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 9866c7acdabe..040ac3bad713 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -81,12 +81,6 @@ extern unsigned long mm_cachebits;
#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index 30081aee8164..73745dc0ec0e 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -76,12 +76,6 @@
#ifndef __ASSEMBLY__
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & SUN3_PAGE_CHG_MASK) | pgprot_val(newprot);
diff --git a/arch/m68k/include/asm/syscall.h b/arch/m68k/include/asm/syscall.h
index d1453e850cdd..bf84b160c2eb 100644
--- a/arch/m68k/include/asm/syscall.h
+++ b/arch/m68k/include/asm/syscall.h
@@ -14,6 +14,13 @@ static inline int syscall_get_nr(struct task_struct *task,
return regs->orig_d0;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->orig_d0 = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 0fba32552836..c7e8de0d34bb 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -484,7 +484,7 @@ static int hardware_proc_show(struct seq_file *m, void *v)
if (mach_get_model)
mach_get_model(model);
else
- strcpy(model, "Unknown m68k");
+ strscpy(model, "Unknown m68k");
seq_printf(m, "Model:\t\t%s\n", model);
for (mem = 0, i = 0; i < m68k_num_memory; i++)
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index f9872098f5ca..f724875b15cc 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -145,8 +145,7 @@ void __init setup_arch(char **cmdline_p)
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
- memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
- boot_command_line[COMMAND_LINE_SIZE-1] = 0;
+ strscpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
/*
* Give all the memory to the bootmap allocator, tell it to put the
diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c
index 5e52ea150d5c..fa7c279ead5d 100644
--- a/arch/m68k/kernel/uboot.c
+++ b/arch/m68k/kernel/uboot.c
@@ -73,7 +73,7 @@ static void __init parse_uboot_commandline(char *commandp, int size)
uboot_cmd_end = sp[5];
if (uboot_cmd_start && uboot_cmd_end)
- strncpy(commandp, (const char *)uboot_cmd_start, size);
+ strscpy(commandp, (const char *)uboot_cmd_start, size);
#if defined(CONFIG_BLK_DEV_INITRD)
uboot_initrd_start = sp[2];
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index e324410ef239..d26c7f4f8c36 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -793,7 +793,7 @@ static void __init mac_identify(void)
}
macintosh_config = mac_data_table;
- for (m = macintosh_config; m->ident != -1; m++) {
+ for (m = &mac_data_table[1]; m->ident != -1; m++) {
if (m->ident == model) {
macintosh_config = m;
break;
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 73651e093c4d..745bd575dcfa 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -105,7 +105,8 @@ static struct list_head ptable_list[3] = {
#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
-#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
+#define PD_PTDESC(ptable) (list_entry(ptable, struct ptdesc, pt_list))
+#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PTDESC(dp)->pt_index)
static const int ptable_shift[3] = {
7+2, /* PGD */
@@ -139,7 +140,7 @@ void __init init_pointer_table(void *table, int type)
return;
}
-void *get_pointer_table(int type)
+void *get_pointer_table(struct mm_struct *mm, int type)
{
ptable_desc *dp = ptable_list[type].next;
unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
@@ -164,10 +165,10 @@ void *get_pointer_table(int type)
* m68k doesn't have SPLIT_PTE_PTLOCKS for not having
* SMP.
*/
- pagetable_pte_ctor(virt_to_ptdesc(page));
+ pagetable_pte_ctor(mm, virt_to_ptdesc(page));
break;
case TABLE_PMD:
- pagetable_pmd_ctor(virt_to_ptdesc(page));
+ pagetable_pmd_ctor(mm, virt_to_ptdesc(page));
break;
case TABLE_PGD:
pagetable_pgd_ctor(virt_to_ptdesc(page));
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index e4ea2ec3642f..b1bb2c65dd04 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -285,14 +285,6 @@ static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
return pte;
}
-#define mk_pte(page, pgprot) \
-({ \
- pte_t pte; \
- pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
- pgprot_val(pgprot); \
- pte; \
-})
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h
index 5eb3f624cc59..b5b6b91fae3e 100644
--- a/arch/microblaze/include/asm/syscall.h
+++ b/arch/microblaze/include/asm/syscall.h
@@ -14,6 +14,13 @@ static inline long syscall_get_nr(struct task_struct *task,
return regs->r12;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->r12 = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 26c385582c3b..ccb4b4b59bca 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -252,7 +252,7 @@ static int __init xilinx_timer_init(struct device_node *timer)
int ret;
/* If this property is present, the device is a PWM and not a timer */
- if (of_property_read_bool(timer, "#pwm-cells"))
+ if (of_property_present(timer, "#pwm-cells"))
return 0;
if (initialized)
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 9f73265aad4e..e96dd1b7aba4 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -245,7 +245,7 @@ unsigned long iopa(unsigned long addr)
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
if (mem_init_done)
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ return __pte_alloc_one_kernel(mm);
else
return memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
MEMBLOCK_LOW_LIMIT,
diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c
index 8ccf167c167e..e8c38aaf46a2 100644
--- a/arch/mips/ath25/ar2315.c
+++ b/arch/mips/ath25/ar2315.c
@@ -149,8 +149,8 @@ void __init ar2315_arch_init_irq(void)
ath25_irq_dispatch = ar2315_irq_dispatch;
- domain = irq_domain_add_linear(NULL, AR2315_MISC_IRQ_COUNT,
- &ar2315_misc_irq_domain_ops, NULL);
+ domain = irq_domain_create_linear(NULL, AR2315_MISC_IRQ_COUNT,
+ &ar2315_misc_irq_domain_ops, NULL);
if (!domain)
panic("Failed to add IRQ domain");
diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c
index cfa103518113..4a1d874be766 100644
--- a/arch/mips/ath25/ar5312.c
+++ b/arch/mips/ath25/ar5312.c
@@ -143,8 +143,8 @@ void __init ar5312_arch_init_irq(void)
ath25_irq_dispatch = ar5312_irq_dispatch;
- domain = irq_domain_add_linear(NULL, AR5312_MISC_IRQ_COUNT,
- &ar5312_misc_irq_domain_ops, NULL);
+ domain = irq_domain_create_linear(NULL, AR5312_MISC_IRQ_COUNT,
+ &ar5312_misc_irq_domain_ops, NULL);
if (!domain)
panic("Failed to add IRQ domain");
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 247be207f293..de426a474b5b 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -282,7 +282,7 @@ static int __init bcm47xx_register_bus_complete(void)
bcm47xx_leds_register();
bcm47xx_workarounds();
- fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status);
+ fixed_phy_add(0, &bcm47xx_fixed_phy_status);
return 0;
}
device_initcall(bcm47xx_register_bus_complete);
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index 450e979ef5d9..11f4aa6e80e9 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -23,6 +23,12 @@ config CAVIUM_OCTEON_CVMSEG_SIZE
legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
between zero and 6192 bytes).
+config CRYPTO_SHA256_OCTEON
+ tristate
+ default CRYPTO_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256
+ select CRYPTO_LIB_SHA256_GENERIC
+
endif # CPU_CAVIUM_OCTEON
if CAVIUM_OCTEON_SOC
diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c
index 5ee4ade99b99..fbc84eb7fedf 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-md5.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c
@@ -19,22 +19,26 @@
* any later version.
*/
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
#include <crypto/md5.h>
-#include <linux/init.h>
-#include <linux/types.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
-#include <asm/byteorder.h>
-#include <asm/octeon/octeon.h>
-#include <crypto/internal/hash.h>
+#include <linux/unaligned.h>
#include "octeon-crypto.h"
+struct octeon_md5_state {
+ __le32 hash[MD5_HASH_WORDS];
+ u64 byte_count;
+};
+
/*
* We pass everything as 64-bit. OCTEON can handle misaligned data.
*/
-static void octeon_md5_store_hash(struct md5_state *ctx)
+static void octeon_md5_store_hash(struct octeon_md5_state *ctx)
{
u64 *hash = (u64 *)ctx->hash;
@@ -42,7 +46,7 @@ static void octeon_md5_store_hash(struct md5_state *ctx)
write_octeon_64bit_hash_dword(hash[1], 1);
}
-static void octeon_md5_read_hash(struct md5_state *ctx)
+static void octeon_md5_read_hash(struct octeon_md5_state *ctx)
{
u64 *hash = (u64 *)ctx->hash;
@@ -66,13 +70,12 @@ static void octeon_md5_transform(const void *_block)
static int octeon_md5_init(struct shash_desc *desc)
{
- struct md5_state *mctx = shash_desc_ctx(desc);
+ struct octeon_md5_state *mctx = shash_desc_ctx(desc);
- mctx->hash[0] = MD5_H0;
- mctx->hash[1] = MD5_H1;
- mctx->hash[2] = MD5_H2;
- mctx->hash[3] = MD5_H3;
- cpu_to_le32_array(mctx->hash, 4);
+ mctx->hash[0] = cpu_to_le32(MD5_H0);
+ mctx->hash[1] = cpu_to_le32(MD5_H1);
+ mctx->hash[2] = cpu_to_le32(MD5_H2);
+ mctx->hash[3] = cpu_to_le32(MD5_H3);
mctx->byte_count = 0;
return 0;
@@ -81,52 +84,38 @@ static int octeon_md5_init(struct shash_desc *desc)
static int octeon_md5_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct md5_state *mctx = shash_desc_ctx(desc);
- const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
+ struct octeon_md5_state *mctx = shash_desc_ctx(desc);
struct octeon_cop2_state state;
unsigned long flags;
mctx->byte_count += len;
-
- if (avail > len) {
- memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
- data, len);
- return 0;
- }
-
- memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data,
- avail);
-
flags = octeon_crypto_enable(&state);
octeon_md5_store_hash(mctx);
- octeon_md5_transform(mctx->block);
- data += avail;
- len -= avail;
-
- while (len >= sizeof(mctx->block)) {
+ do {
octeon_md5_transform(data);
- data += sizeof(mctx->block);
- len -= sizeof(mctx->block);
- }
+ data += MD5_HMAC_BLOCK_SIZE;
+ len -= MD5_HMAC_BLOCK_SIZE;
+ } while (len >= MD5_HMAC_BLOCK_SIZE);
octeon_md5_read_hash(mctx);
octeon_crypto_disable(&state, flags);
-
- memcpy(mctx->block, data, len);
-
- return 0;
+ mctx->byte_count -= len;
+ return len;
}
-static int octeon_md5_final(struct shash_desc *desc, u8 *out)
+static int octeon_md5_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int offset, u8 *out)
{
- struct md5_state *mctx = shash_desc_ctx(desc);
- const unsigned int offset = mctx->byte_count & 0x3f;
- char *p = (char *)mctx->block + offset;
+ struct octeon_md5_state *mctx = shash_desc_ctx(desc);
int padding = 56 - (offset + 1);
struct octeon_cop2_state state;
+ u32 block[MD5_BLOCK_WORDS];
unsigned long flags;
+ char *p;
+ p = memcpy(block, src, offset);
+ p += offset;
*p++ = 0x80;
flags = octeon_crypto_enable(&state);
@@ -134,39 +123,56 @@ static int octeon_md5_final(struct shash_desc *desc, u8 *out)
if (padding < 0) {
memset(p, 0x00, padding + sizeof(u64));
- octeon_md5_transform(mctx->block);
- p = (char *)mctx->block;
+ octeon_md5_transform(block);
+ p = (char *)block;
padding = 56;
}
memset(p, 0, padding);
- mctx->block[14] = mctx->byte_count << 3;
- mctx->block[15] = mctx->byte_count >> 29;
- cpu_to_le32_array(mctx->block + 14, 2);
- octeon_md5_transform(mctx->block);
+ mctx->byte_count += offset;
+ block[14] = mctx->byte_count << 3;
+ block[15] = mctx->byte_count >> 29;
+ cpu_to_le32_array(block + 14, 2);
+ octeon_md5_transform(block);
octeon_md5_read_hash(mctx);
octeon_crypto_disable(&state, flags);
+ memzero_explicit(block, sizeof(block));
memcpy(out, mctx->hash, sizeof(mctx->hash));
- memset(mctx, 0, sizeof(*mctx));
return 0;
}
static int octeon_md5_export(struct shash_desc *desc, void *out)
{
- struct md5_state *ctx = shash_desc_ctx(desc);
-
- memcpy(out, ctx, sizeof(*ctx));
+ struct octeon_md5_state *ctx = shash_desc_ctx(desc);
+ union {
+ u8 *u8;
+ u32 *u32;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
+
+ for (i = 0; i < MD5_HASH_WORDS; i++)
+ put_unaligned(le32_to_cpu(ctx->hash[i]), p.u32++);
+ put_unaligned(ctx->byte_count, p.u64);
return 0;
}
static int octeon_md5_import(struct shash_desc *desc, const void *in)
{
- struct md5_state *ctx = shash_desc_ctx(desc);
-
- memcpy(ctx, in, sizeof(*ctx));
+ struct octeon_md5_state *ctx = shash_desc_ctx(desc);
+ union {
+ const u8 *u8;
+ const u32 *u32;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
+
+ for (i = 0; i < MD5_HASH_WORDS; i++)
+ ctx->hash[i] = cpu_to_le32(get_unaligned(p.u32++));
+ ctx->byte_count = get_unaligned(p.u64);
return 0;
}
@@ -174,15 +180,16 @@ static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = octeon_md5_init,
.update = octeon_md5_update,
- .final = octeon_md5_final,
+ .finup = octeon_md5_finup,
.export = octeon_md5_export,
.import = octeon_md5_import,
- .descsize = sizeof(struct md5_state),
- .statesize = sizeof(struct md5_state),
+ .statesize = MD5_STATE_SIZE,
+ .descsize = sizeof(struct octeon_md5_state),
.base = {
.cra_name = "md5",
.cra_driver_name= "octeon-md5",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha1.c b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
index 37a07b3c4568..e70f21a473da 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha1.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
@@ -13,15 +13,13 @@
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
*/
-#include <linux/mm.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
-#include <linux/init.h>
-#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/byteorder.h>
-#include <asm/octeon/octeon.h>
-#include <crypto/internal/hash.h>
#include "octeon-crypto.h"
@@ -58,49 +56,23 @@ static void octeon_sha1_read_hash(struct sha1_state *sctx)
memzero_explicit(&hash_tail.dword, sizeof(hash_tail.dword));
}
-static void octeon_sha1_transform(const void *_block)
+static void octeon_sha1_transform(struct sha1_state *sctx, const u8 *src,
+ int blocks)
{
- const u64 *block = _block;
-
- write_octeon_64bit_block_dword(block[0], 0);
- write_octeon_64bit_block_dword(block[1], 1);
- write_octeon_64bit_block_dword(block[2], 2);
- write_octeon_64bit_block_dword(block[3], 3);
- write_octeon_64bit_block_dword(block[4], 4);
- write_octeon_64bit_block_dword(block[5], 5);
- write_octeon_64bit_block_dword(block[6], 6);
- octeon_sha1_start(block[7]);
-}
-
-static void __octeon_sha1_update(struct sha1_state *sctx, const u8 *data,
- unsigned int len)
-{
- unsigned int partial;
- unsigned int done;
- const u8 *src;
-
- partial = sctx->count % SHA1_BLOCK_SIZE;
- sctx->count += len;
- done = 0;
- src = data;
-
- if ((partial + len) >= SHA1_BLOCK_SIZE) {
- if (partial) {
- done = -partial;
- memcpy(sctx->buffer + partial, data,
- done + SHA1_BLOCK_SIZE);
- src = sctx->buffer;
- }
-
- do {
- octeon_sha1_transform(src);
- done += SHA1_BLOCK_SIZE;
- src = data + done;
- } while (done + SHA1_BLOCK_SIZE <= len);
-
- partial = 0;
- }
- memcpy(sctx->buffer + partial, src, len - done);
+ do {
+ const u64 *block = (const u64 *)src;
+
+ write_octeon_64bit_block_dword(block[0], 0);
+ write_octeon_64bit_block_dword(block[1], 1);
+ write_octeon_64bit_block_dword(block[2], 2);
+ write_octeon_64bit_block_dword(block[3], 3);
+ write_octeon_64bit_block_dword(block[4], 4);
+ write_octeon_64bit_block_dword(block[5], 5);
+ write_octeon_64bit_block_dword(block[6], 6);
+ octeon_sha1_start(block[7]);
+
+ src += SHA1_BLOCK_SIZE;
+ } while (--blocks);
}
static int octeon_sha1_update(struct shash_desc *desc, const u8 *data,
@@ -109,95 +81,47 @@ static int octeon_sha1_update(struct shash_desc *desc, const u8 *data,
struct sha1_state *sctx = shash_desc_ctx(desc);
struct octeon_cop2_state state;
unsigned long flags;
-
- /*
- * Small updates never reach the crypto engine, so the generic sha1 is
- * faster because of the heavyweight octeon_crypto_enable() /
- * octeon_crypto_disable().
- */
- if ((sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
- return crypto_sha1_update(desc, data, len);
+ int remain;
flags = octeon_crypto_enable(&state);
octeon_sha1_store_hash(sctx);
- __octeon_sha1_update(sctx, data, len);
+ remain = sha1_base_do_update_blocks(desc, data, len,
+ octeon_sha1_transform);
octeon_sha1_read_hash(sctx);
octeon_crypto_disable(&state, flags);
-
- return 0;
+ return remain;
}
-static int octeon_sha1_final(struct shash_desc *desc, u8 *out)
+static int octeon_sha1_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
- static const u8 padding[64] = { 0x80, };
struct octeon_cop2_state state;
- __be32 *dst = (__be32 *)out;
- unsigned int pad_len;
unsigned long flags;
- unsigned int index;
- __be64 bits;
- int i;
-
- /* Save number of bits. */
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64. */
- index = sctx->count & 0x3f;
- pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
flags = octeon_crypto_enable(&state);
octeon_sha1_store_hash(sctx);
- __octeon_sha1_update(sctx, padding, pad_len);
-
- /* Append length (before padding). */
- __octeon_sha1_update(sctx, (const u8 *)&bits, sizeof(bits));
+ sha1_base_do_finup(desc, src, len, octeon_sha1_transform);
octeon_sha1_read_hash(sctx);
octeon_crypto_disable(&state, flags);
-
- /* Store state in digest */
- for (i = 0; i < 5; i++)
- dst[i] = cpu_to_be32(sctx->state[i]);
-
- /* Zeroize sensitive information. */
- memset(sctx, 0, sizeof(*sctx));
-
- return 0;
-}
-
-static int octeon_sha1_export(struct shash_desc *desc, void *out)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
-}
-
-static int octeon_sha1_import(struct shash_desc *desc, const void *in)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
+ return sha1_base_finish(desc, out);
}
static struct shash_alg octeon_sha1_alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = octeon_sha1_update,
- .final = octeon_sha1_final,
- .export = octeon_sha1_export,
- .import = octeon_sha1_import,
- .descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
+ .finup = octeon_sha1_finup,
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name= "octeon-sha1",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha256.c b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
index 435e4a6e7f13..f93faaf1f4af 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha256.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Cryptographic API.
- *
- * SHA-224 and SHA-256 Secure Hash Algorithm.
+ * SHA-256 Secure Hash Algorithm.
*
* Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
*
@@ -14,15 +12,10 @@
* SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
*/
-#include <linux/mm.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <asm/byteorder.h>
#include <asm/octeon/octeon.h>
-#include <crypto/internal/hash.h>
+#include <crypto/internal/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include "octeon-crypto.h"
@@ -30,212 +23,51 @@
* We pass everything as 64-bit. OCTEON can handle misaligned data.
*/
-static void octeon_sha256_store_hash(struct sha256_state *sctx)
-{
- u64 *hash = (u64 *)sctx->state;
-
- write_octeon_64bit_hash_dword(hash[0], 0);
- write_octeon_64bit_hash_dword(hash[1], 1);
- write_octeon_64bit_hash_dword(hash[2], 2);
- write_octeon_64bit_hash_dword(hash[3], 3);
-}
-
-static void octeon_sha256_read_hash(struct sha256_state *sctx)
-{
- u64 *hash = (u64 *)sctx->state;
-
- hash[0] = read_octeon_64bit_hash_dword(0);
- hash[1] = read_octeon_64bit_hash_dword(1);
- hash[2] = read_octeon_64bit_hash_dword(2);
- hash[3] = read_octeon_64bit_hash_dword(3);
-}
-
-static void octeon_sha256_transform(const void *_block)
-{
- const u64 *block = _block;
-
- write_octeon_64bit_block_dword(block[0], 0);
- write_octeon_64bit_block_dword(block[1], 1);
- write_octeon_64bit_block_dword(block[2], 2);
- write_octeon_64bit_block_dword(block[3], 3);
- write_octeon_64bit_block_dword(block[4], 4);
- write_octeon_64bit_block_dword(block[5], 5);
- write_octeon_64bit_block_dword(block[6], 6);
- octeon_sha256_start(block[7]);
-}
-
-static void __octeon_sha256_update(struct sha256_state *sctx, const u8 *data,
- unsigned int len)
-{
- unsigned int partial;
- unsigned int done;
- const u8 *src;
-
- partial = sctx->count % SHA256_BLOCK_SIZE;
- sctx->count += len;
- done = 0;
- src = data;
-
- if ((partial + len) >= SHA256_BLOCK_SIZE) {
- if (partial) {
- done = -partial;
- memcpy(sctx->buf + partial, data,
- done + SHA256_BLOCK_SIZE);
- src = sctx->buf;
- }
-
- do {
- octeon_sha256_transform(src);
- done += SHA256_BLOCK_SIZE;
- src = data + done;
- } while (done + SHA256_BLOCK_SIZE <= len);
-
- partial = 0;
- }
- memcpy(sctx->buf + partial, src, len - done);
-}
-
-static int octeon_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- struct octeon_cop2_state state;
- unsigned long flags;
-
- /*
- * Small updates never reach the crypto engine, so the generic sha256 is
- * faster because of the heavyweight octeon_crypto_enable() /
- * octeon_crypto_disable().
- */
- if ((sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
- return crypto_sha256_update(desc, data, len);
-
- flags = octeon_crypto_enable(&state);
- octeon_sha256_store_hash(sctx);
-
- __octeon_sha256_update(sctx, data, len);
-
- octeon_sha256_read_hash(sctx);
- octeon_crypto_disable(&state, flags);
-
- return 0;
-}
-
-static int octeon_sha256_final(struct shash_desc *desc, u8 *out)
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- static const u8 padding[64] = { 0x80, };
- struct octeon_cop2_state state;
- __be32 *dst = (__be32 *)out;
- unsigned int pad_len;
+ struct octeon_cop2_state cop2_state;
+ u64 *state64 = (u64 *)state;
unsigned long flags;
- unsigned int index;
- __be64 bits;
- int i;
-
- /* Save number of bits. */
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64. */
- index = sctx->count & 0x3f;
- pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
-
- flags = octeon_crypto_enable(&state);
- octeon_sha256_store_hash(sctx);
-
- __octeon_sha256_update(sctx, padding, pad_len);
-
- /* Append length (before padding). */
- __octeon_sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
-
- octeon_sha256_read_hash(sctx);
- octeon_crypto_disable(&state, flags);
-
- /* Store state in digest */
- for (i = 0; i < 8; i++)
- dst[i] = cpu_to_be32(sctx->state[i]);
-
- /* Zeroize sensitive information. */
- memset(sctx, 0, sizeof(*sctx));
-
- return 0;
-}
-
-static int octeon_sha224_final(struct shash_desc *desc, u8 *hash)
-{
- u8 D[SHA256_DIGEST_SIZE];
-
- octeon_sha256_final(desc, D);
- memcpy(hash, D, SHA224_DIGEST_SIZE);
- memzero_explicit(D, SHA256_DIGEST_SIZE);
-
- return 0;
-}
-
-static int octeon_sha256_export(struct shash_desc *desc, void *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
-}
-
-static int octeon_sha256_import(struct shash_desc *desc, const void *in)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
-}
-
-static struct shash_alg octeon_sha256_algs[2] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = octeon_sha256_update,
- .final = octeon_sha256_final,
- .export = octeon_sha256_export,
- .import = octeon_sha256_import,
- .descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name= "octeon-sha256",
- .cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = octeon_sha256_update,
- .final = octeon_sha224_final,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name= "octeon-sha224",
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init octeon_sha256_mod_init(void)
-{
if (!octeon_has_crypto())
- return -ENOTSUPP;
- return crypto_register_shashes(octeon_sha256_algs,
- ARRAY_SIZE(octeon_sha256_algs));
+ return sha256_blocks_generic(state, data, nblocks);
+
+ flags = octeon_crypto_enable(&cop2_state);
+ write_octeon_64bit_hash_dword(state64[0], 0);
+ write_octeon_64bit_hash_dword(state64[1], 1);
+ write_octeon_64bit_hash_dword(state64[2], 2);
+ write_octeon_64bit_hash_dword(state64[3], 3);
+
+ do {
+ const u64 *block = (const u64 *)data;
+
+ write_octeon_64bit_block_dword(block[0], 0);
+ write_octeon_64bit_block_dword(block[1], 1);
+ write_octeon_64bit_block_dword(block[2], 2);
+ write_octeon_64bit_block_dword(block[3], 3);
+ write_octeon_64bit_block_dword(block[4], 4);
+ write_octeon_64bit_block_dword(block[5], 5);
+ write_octeon_64bit_block_dword(block[6], 6);
+ octeon_sha256_start(block[7]);
+
+ data += SHA256_BLOCK_SIZE;
+ } while (--nblocks);
+
+ state64[0] = read_octeon_64bit_hash_dword(0);
+ state64[1] = read_octeon_64bit_hash_dword(1);
+ state64[2] = read_octeon_64bit_hash_dword(2);
+ state64[3] = read_octeon_64bit_hash_dword(3);
+ octeon_crypto_disable(&cop2_state, flags);
}
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
-static void __exit octeon_sha256_mod_fini(void)
+bool sha256_is_arch_optimized(void)
{
- crypto_unregister_shashes(octeon_sha256_algs,
- ARRAY_SIZE(octeon_sha256_algs));
+ return octeon_has_crypto();
}
-
-module_init(octeon_sha256_mod_init);
-module_exit(octeon_sha256_mod_fini);
+EXPORT_SYMBOL_GPL(sha256_is_arch_optimized);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm (OCTEON)");
+MODULE_DESCRIPTION("SHA-256 Secure Hash Algorithm (OCTEON)");
MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha512.c b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
index 2dee9354e33f..215311053db3 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha512.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
@@ -13,15 +13,12 @@
* Copyright (c) 2003 Kyle McMartin <kyle@debian.org>
*/
-#include <linux/mm.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
-#include <linux/init.h>
-#include <linux/types.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/byteorder.h>
-#include <asm/octeon/octeon.h>
-#include <crypto/internal/hash.h>
#include "octeon-crypto.h"
@@ -53,60 +50,31 @@ static void octeon_sha512_read_hash(struct sha512_state *sctx)
sctx->state[7] = read_octeon_64bit_hash_sha512(7);
}
-static void octeon_sha512_transform(const void *_block)
+static void octeon_sha512_transform(struct sha512_state *sctx,
+ const u8 *src, int blocks)
{
- const u64 *block = _block;
-
- write_octeon_64bit_block_sha512(block[0], 0);
- write_octeon_64bit_block_sha512(block[1], 1);
- write_octeon_64bit_block_sha512(block[2], 2);
- write_octeon_64bit_block_sha512(block[3], 3);
- write_octeon_64bit_block_sha512(block[4], 4);
- write_octeon_64bit_block_sha512(block[5], 5);
- write_octeon_64bit_block_sha512(block[6], 6);
- write_octeon_64bit_block_sha512(block[7], 7);
- write_octeon_64bit_block_sha512(block[8], 8);
- write_octeon_64bit_block_sha512(block[9], 9);
- write_octeon_64bit_block_sha512(block[10], 10);
- write_octeon_64bit_block_sha512(block[11], 11);
- write_octeon_64bit_block_sha512(block[12], 12);
- write_octeon_64bit_block_sha512(block[13], 13);
- write_octeon_64bit_block_sha512(block[14], 14);
- octeon_sha512_start(block[15]);
-}
-
-static void __octeon_sha512_update(struct sha512_state *sctx, const u8 *data,
- unsigned int len)
-{
- unsigned int part_len;
- unsigned int index;
- unsigned int i;
-
- /* Compute number of bytes mod 128. */
- index = sctx->count[0] % SHA512_BLOCK_SIZE;
-
- /* Update number of bytes. */
- if ((sctx->count[0] += len) < len)
- sctx->count[1]++;
-
- part_len = SHA512_BLOCK_SIZE - index;
-
- /* Transform as many times as possible. */
- if (len >= part_len) {
- memcpy(&sctx->buf[index], data, part_len);
- octeon_sha512_transform(sctx->buf);
-
- for (i = part_len; i + SHA512_BLOCK_SIZE <= len;
- i += SHA512_BLOCK_SIZE)
- octeon_sha512_transform(&data[i]);
-
- index = 0;
- } else {
- i = 0;
- }
-
- /* Buffer remaining input. */
- memcpy(&sctx->buf[index], &data[i], len - i);
+ do {
+ const u64 *block = (const u64 *)src;
+
+ write_octeon_64bit_block_sha512(block[0], 0);
+ write_octeon_64bit_block_sha512(block[1], 1);
+ write_octeon_64bit_block_sha512(block[2], 2);
+ write_octeon_64bit_block_sha512(block[3], 3);
+ write_octeon_64bit_block_sha512(block[4], 4);
+ write_octeon_64bit_block_sha512(block[5], 5);
+ write_octeon_64bit_block_sha512(block[6], 6);
+ write_octeon_64bit_block_sha512(block[7], 7);
+ write_octeon_64bit_block_sha512(block[8], 8);
+ write_octeon_64bit_block_sha512(block[9], 9);
+ write_octeon_64bit_block_sha512(block[10], 10);
+ write_octeon_64bit_block_sha512(block[11], 11);
+ write_octeon_64bit_block_sha512(block[12], 12);
+ write_octeon_64bit_block_sha512(block[13], 13);
+ write_octeon_64bit_block_sha512(block[14], 14);
+ octeon_sha512_start(block[15]);
+
+ src += SHA512_BLOCK_SIZE;
+ } while (--blocks);
}
static int octeon_sha512_update(struct shash_desc *desc, const u8 *data,
@@ -115,89 +83,48 @@ static int octeon_sha512_update(struct shash_desc *desc, const u8 *data,
struct sha512_state *sctx = shash_desc_ctx(desc);
struct octeon_cop2_state state;
unsigned long flags;
-
- /*
- * Small updates never reach the crypto engine, so the generic sha512 is
- * faster because of the heavyweight octeon_crypto_enable() /
- * octeon_crypto_disable().
- */
- if ((sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
- return crypto_sha512_update(desc, data, len);
+ int remain;
flags = octeon_crypto_enable(&state);
octeon_sha512_store_hash(sctx);
- __octeon_sha512_update(sctx, data, len);
+ remain = sha512_base_do_update_blocks(desc, data, len,
+ octeon_sha512_transform);
octeon_sha512_read_hash(sctx);
octeon_crypto_disable(&state, flags);
-
- return 0;
+ return remain;
}
-static int octeon_sha512_final(struct shash_desc *desc, u8 *hash)
+static int octeon_sha512_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *hash)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
- static u8 padding[128] = { 0x80, };
struct octeon_cop2_state state;
- __be64 *dst = (__be64 *)hash;
- unsigned int pad_len;
unsigned long flags;
- unsigned int index;
- __be64 bits[2];
- int i;
-
- /* Save number of bits. */
- bits[1] = cpu_to_be64(sctx->count[0] << 3);
- bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
-
- /* Pad out to 112 mod 128. */
- index = sctx->count[0] & 0x7f;
- pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
flags = octeon_crypto_enable(&state);
octeon_sha512_store_hash(sctx);
- __octeon_sha512_update(sctx, padding, pad_len);
-
- /* Append length (before padding). */
- __octeon_sha512_update(sctx, (const u8 *)bits, sizeof(bits));
+ sha512_base_do_finup(desc, src, len, octeon_sha512_transform);
octeon_sha512_read_hash(sctx);
octeon_crypto_disable(&state, flags);
-
- /* Store state in digest. */
- for (i = 0; i < 8; i++)
- dst[i] = cpu_to_be64(sctx->state[i]);
-
- /* Zeroize sensitive information. */
- memset(sctx, 0, sizeof(struct sha512_state));
-
- return 0;
-}
-
-static int octeon_sha384_final(struct shash_desc *desc, u8 *hash)
-{
- u8 D[64];
-
- octeon_sha512_final(desc, D);
-
- memcpy(hash, D, 48);
- memzero_explicit(D, 64);
-
- return 0;
+ return sha512_base_finish(desc, hash);
}
static struct shash_alg octeon_sha512_algs[2] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = octeon_sha512_update,
- .final = octeon_sha512_final,
- .descsize = sizeof(struct sha512_state),
+ .finup = octeon_sha512_finup,
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha512",
.cra_driver_name= "octeon-sha512",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -205,12 +132,14 @@ static struct shash_alg octeon_sha512_algs[2] = { {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = octeon_sha512_update,
- .final = octeon_sha384_final,
- .descsize = sizeof(struct sha512_state),
+ .finup = octeon_sha512_finup,
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha384",
.cra_driver_name= "octeon-sha384",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index e6b4d9c0c169..5c3de175ef5b 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1503,8 +1503,8 @@ static int __init octeon_irq_init_ciu(
/* Mips internal */
octeon_irq_init_core();
- ciu_domain = irq_domain_add_tree(
- ciu_node, &octeon_irq_domain_ciu_ops, dd);
+ ciu_domain = irq_domain_create_tree(of_fwnode_handle(ciu_node), &octeon_irq_domain_ciu_ops,
+ dd);
irq_set_default_domain(ciu_domain);
/* CIU_0 */
@@ -1637,8 +1637,8 @@ static int __init octeon_irq_init_gpio(
if (gpiod) {
/* gpio domain host_data is the base hwirq number. */
gpiod->base_hwirq = base_hwirq;
- irq_domain_add_linear(
- gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
+ irq_domain_create_linear(of_fwnode_handle(gpio_node), 16,
+ &octeon_irq_domain_gpio_ops, gpiod);
} else {
pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
return -ENOMEM;
@@ -2074,8 +2074,8 @@ static int __init octeon_irq_init_ciu2(
/* Mips internal */
octeon_irq_init_core();
- ciu_domain = irq_domain_add_tree(
- ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
+ ciu_domain = irq_domain_create_tree(of_fwnode_handle(ciu_node), &octeon_irq_domain_ciu2_ops,
+ NULL);
irq_set_default_domain(ciu_domain);
/* CUI2 */
@@ -2331,11 +2331,12 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
}
host_data->max_bits = val;
- cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
- &octeon_irq_domain_cib_ops,
- host_data);
+ cib_domain = irq_domain_create_linear(of_fwnode_handle(ciu_node),
+ host_data->max_bits,
+ &octeon_irq_domain_cib_ops,
+ host_data);
if (!cib_domain) {
- pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
+ pr_err("ERROR: Couldn't irq_domain_create_linear()\n");
return -ENOMEM;
}
@@ -2918,8 +2919,8 @@ static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
* Initialize all domains to use the default domain. Specific major
* blocks will overwrite the default domain as needed.
*/
- domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops,
- ciu3_info);
+ domain = irq_domain_create_tree(of_fwnode_handle(ciu_node), &octeon_dflt_domain_ciu3_ops,
+ ciu3_info);
for (i = 0; i < MAX_CIU3_DOMAINS; i++)
ciu3_info->domain[i] = domain;
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 8caa03a41327..cba0b85c6707 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -82,7 +82,6 @@ CONFIG_LEDS_GPIO=y
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
# CONFIG_PROC_PAGE_MONITOR is not set
-CONFIG_CRC_ITU_T=m
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index fe282630b51c..97d2cd997285 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -81,7 +81,6 @@ CONFIG_IP_VS_SH=m
CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
-CONFIG_IP_DCCP=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
@@ -238,7 +237,6 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
-CONFIG_CRC_T10DIF=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index f523ee6f25bf..88ae0aa85364 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -157,7 +157,6 @@ CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5_OCTEON=y
CONFIG_CRYPTO_SHA1_OCTEON=m
-CONFIG_CRYPTO_SHA256_OCTEON=m
CONFIG_CRYPTO_SHA512_OCTEON=m
CONFIG_CRYPTO_DES=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig
index 9655567614aa..85a4472cb058 100644
--- a/arch/mips/configs/decstation_64_defconfig
+++ b/arch/mips/configs/decstation_64_defconfig
@@ -168,7 +168,6 @@ CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
CONFIG_CRYPTO_RSA=m
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index 1539fe8eb34d..a3b2c8da2dde 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -163,7 +163,6 @@ CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
CONFIG_CRYPTO_RSA=m
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig
index 58c36720c94a..a476717b8a6a 100644
--- a/arch/mips/configs/decstation_r4k_defconfig
+++ b/arch/mips/configs/decstation_r4k_defconfig
@@ -163,7 +163,6 @@ CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
CONFIG_CRYPTO_RSA=m
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 5ab149cd3178..114fcd67898d 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -218,4 +218,3 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=y
diff --git a/arch/mips/configs/gcw0_defconfig b/arch/mips/configs/gcw0_defconfig
index bc1ef66e3999..8b7ad877e07a 100644
--- a/arch/mips/configs/gcw0_defconfig
+++ b/arch/mips/configs/gcw0_defconfig
@@ -13,7 +13,6 @@ CONFIG_MIPS_CMDLINE_DTB_EXTEND=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_BOUNCE is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
index 12f3eed8a946..437ef6dc0b4c 100644
--- a/arch/mips/configs/gpr_defconfig
+++ b/arch/mips/configs/gpr_defconfig
@@ -84,7 +84,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_IP_DCCP=m
CONFIG_IP_SCTP=m
CONFIG_TIPC=m
CONFIG_ATM=y
@@ -273,7 +272,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRYPTO_AUTHENC=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 31ca93d3acc5..f1a8ccf2c459 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -326,5 +326,4 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=m
CONFIG_DEBUG_MEMORY_INIT=y
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index b8907b3d7a33..5d079941fd20 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -317,4 +317,3 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
-CONFIG_CRC_T10DIF=m
diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
index e0040110a3ee..6db21e498faa 100644
--- a/arch/mips/configs/ip28_defconfig
+++ b/arch/mips/configs/ip28_defconfig
@@ -60,6 +60,5 @@ CONFIG_TMPFS_POSIX_ACL=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_ROOT_NFS=y
-CONFIG_CRYPTO_MANAGER=y
# CONFIG_CRYPTO_HW is not set
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/configs/ip30_defconfig b/arch/mips/configs/ip30_defconfig
index 270181a7320a..a4524e785469 100644
--- a/arch/mips/configs/ip30_defconfig
+++ b/arch/mips/configs/ip30_defconfig
@@ -179,4 +179,3 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
-CONFIG_CRC_T10DIF=m
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index 121e7e48fa77..d8ac11427f69 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -177,7 +177,6 @@ CONFIG_CRYPTO_SERPENT=y
CONFIG_CRYPTO_TEA=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRC_T10DIF=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 71d6340497c9..5038a27d035f 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -297,7 +297,7 @@ CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 06b7a0b97eca..e4bcdb64df6c 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -130,7 +130,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_IP_DCCP=m
CONFIG_IP_SCTP=m
CONFIG_TIPC=m
CONFIG_ATM=y
@@ -662,7 +661,7 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
diff --git a/arch/mips/configs/omega2p_defconfig b/arch/mips/configs/omega2p_defconfig
index 128f9abab7fc..e2bcdfd290a1 100644
--- a/arch/mips/configs/omega2p_defconfig
+++ b/arch/mips/configs/omega2p_defconfig
@@ -111,7 +111,6 @@ CONFIG_NLS_KOI8_U=y
CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
-CONFIG_CRC16=y
CONFIG_XZ_DEC=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index 0261969a6e45..9fb114ef5e2d 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -153,7 +153,6 @@ CONFIG_JFFS2_FS=y
CONFIG_JFFS2_SUMMARY=y
CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_SQUASHFS=y
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
CONFIG_STRIP_ASM_SYMS=y
diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig
index 8404e0a9d8b2..8f9701efef19 100644
--- a/arch/mips/configs/rt305x_defconfig
+++ b/arch/mips/configs/rt305x_defconfig
@@ -128,7 +128,6 @@ CONFIG_SQUASHFS=y
# CONFIG_SQUASHFS_ZLIB is not set
CONFIG_SQUASHFS_XZ=y
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRC_ITU_T=m
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_POWERPC is not set
# CONFIG_XZ_DEC_IA64 is not set
diff --git a/arch/mips/configs/sb1250_swarm_defconfig b/arch/mips/configs/sb1250_swarm_defconfig
index ce855b644bb0..ae2afff00e01 100644
--- a/arch/mips/configs/sb1250_swarm_defconfig
+++ b/arch/mips/configs/sb1250_swarm_defconfig
@@ -99,4 +99,3 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
diff --git a/arch/mips/configs/vocore2_defconfig b/arch/mips/configs/vocore2_defconfig
index 917967fed45f..2a9a9b12847d 100644
--- a/arch/mips/configs/vocore2_defconfig
+++ b/arch/mips/configs/vocore2_defconfig
@@ -111,7 +111,6 @@ CONFIG_NLS_KOI8_U=y
CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
-CONFIG_CRC16=y
CONFIG_XZ_DEC=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/mips/configs/xway_defconfig b/arch/mips/configs/xway_defconfig
index 7b91edfe3e07..aae8497b6872 100644
--- a/arch/mips/configs/xway_defconfig
+++ b/arch/mips/configs/xway_defconfig
@@ -140,7 +140,6 @@ CONFIG_SQUASHFS=y
# CONFIG_SQUASHFS_ZLIB is not set
CONFIG_SQUASHFS_XZ=y
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRC_ITU_T=m
CONFIG_PRINTK_TIME=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
diff --git a/arch/mips/crypto/Kconfig b/arch/mips/crypto/Kconfig
index 545fc0e12422..6bf073ae7613 100644
--- a/arch/mips/crypto/Kconfig
+++ b/arch/mips/crypto/Kconfig
@@ -2,17 +2,6 @@
menu "Accelerated Cryptographic Algorithms for CPU (mips)"
-config CRYPTO_POLY1305_MIPS
- tristate
- depends on MIPS
- select CRYPTO_HASH
- select CRYPTO_ARCH_HAVE_LIB_POLY1305
- default CRYPTO_LIB_POLY1305_INTERNAL
- help
- Poly1305 authenticator algorithm (RFC7539)
-
- Architecture: mips
-
config CRYPTO_MD5_OCTEON
tristate "Digests: MD5 (OCTEON)"
depends on CPU_CAVIUM_OCTEON
@@ -33,16 +22,6 @@ config CRYPTO_SHA1_OCTEON
Architecture: mips OCTEON
-config CRYPTO_SHA256_OCTEON
- tristate "Hash functions: SHA-224 and SHA-256 (OCTEON)"
- depends on CPU_CAVIUM_OCTEON
- select CRYPTO_SHA256
- select CRYPTO_HASH
- help
- SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
-
- Architecture: mips OCTEON using crypto instructions, when available
-
config CRYPTO_SHA512_OCTEON
tristate "Hash functions: SHA-384 and SHA-512 (OCTEON)"
depends on CPU_CAVIUM_OCTEON
@@ -53,16 +32,4 @@ config CRYPTO_SHA512_OCTEON
Architecture: mips OCTEON using crypto instructions, when available
-config CRYPTO_CHACHA_MIPS
- tristate
- depends on CPU_MIPS32_R2
- select CRYPTO_SKCIPHER
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
- default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
- stream cipher algorithms
-
- Architecture: MIPS32r2
-
endmenu
diff --git a/arch/mips/crypto/Makefile b/arch/mips/crypto/Makefile
index fddc88281412..5adb631a69c1 100644
--- a/arch/mips/crypto/Makefile
+++ b/arch/mips/crypto/Makefile
@@ -3,20 +3,3 @@
# Makefile for MIPS crypto files..
#
-obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o
-chacha-mips-y := chacha-core.o chacha-glue.o
-AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
-
-obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o
-poly1305-mips-y := poly1305-core.o poly1305-glue.o
-
-perlasm-flavour-$(CONFIG_32BIT) := o32
-perlasm-flavour-$(CONFIG_64BIT) := 64
-
-quiet_cmd_perlasm = PERLASM $@
- cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@)
-
-$(obj)/poly1305-core.S: $(src)/poly1305-mips.pl FORCE
- $(call if_changed,perlasm)
-
-targets += poly1305-core.S
diff --git a/arch/mips/crypto/chacha-glue.c b/arch/mips/crypto/chacha-glue.c
deleted file mode 100644
index f6fc2e1079a1..000000000000
--- a/arch/mips/crypto/chacha-glue.c
+++ /dev/null
@@ -1,146 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * MIPS accelerated ChaCha and XChaCha stream ciphers,
- * including ChaCha20 (RFC7539)
- *
- * Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
- */
-
-#include <asm/byteorder.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-asmlinkage void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds);
-EXPORT_SYMBOL(chacha_crypt_arch);
-
-asmlinkage void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds);
-EXPORT_SYMBOL(hchacha_block_arch);
-
-static int chacha_mips_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
-{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- chacha_crypt(state, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes, ctx->nrounds);
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int chacha_mips(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_mips_stream_xor(req, ctx, req->iv);
-}
-
-static int xchacha_mips(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- chacha_init(state, ctx->key, req->iv);
-
- hchacha_block(state, subctx.key, ctx->nrounds);
- subctx.nrounds = ctx->nrounds;
-
- memcpy(&real_iv[0], req->iv + 24, 8);
- memcpy(&real_iv[8], req->iv + 16, 8);
- return chacha_mips_stream_xor(req, &subctx, real_iv);
-}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-mips",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_mips,
- .decrypt = chacha_mips,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-mips",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_mips,
- .decrypt = xchacha_mips,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-mips",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_mips,
- .decrypt = xchacha_mips,
- }
-};
-
-static int __init chacha_simd_mod_init(void)
-{
- return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
- crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0;
-}
-
-static void __exit chacha_simd_mod_fini(void)
-{
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER))
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-module_init(chacha_simd_mod_init);
-module_exit(chacha_simd_mod_fini);
-
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (MIPS accelerated)");
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-mips");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-mips");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-mips");
diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c
deleted file mode 100644
index c03ad0bbe69c..000000000000
--- a/arch/mips/crypto/poly1305-glue.c
+++ /dev/null
@@ -1,192 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * OpenSSL/Cryptogams accelerated Poly1305 transform for MIPS
- *
- * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
- */
-
-#include <linux/unaligned.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/poly1305.h>
-#include <linux/cpufeature.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-
-asmlinkage void poly1305_init_mips(void *state, const u8 *key);
-asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
-asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
-
-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
-{
- poly1305_init_mips(&dctx->h, key);
- dctx->s[0] = get_unaligned_le32(key + 16);
- dctx->s[1] = get_unaligned_le32(key + 20);
- dctx->s[2] = get_unaligned_le32(key + 24);
- dctx->s[3] = get_unaligned_le32(key + 28);
- dctx->buflen = 0;
-}
-EXPORT_SYMBOL(poly1305_init_arch);
-
-static int mips_poly1305_init(struct shash_desc *desc)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- dctx->buflen = 0;
- dctx->rset = 0;
- dctx->sset = false;
-
- return 0;
-}
-
-static void mips_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
- u32 len, u32 hibit)
-{
- if (unlikely(!dctx->sset)) {
- if (!dctx->rset) {
- poly1305_init_mips(&dctx->h, src);
- src += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- dctx->rset = 1;
- }
- if (len >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(src + 0);
- dctx->s[1] = get_unaligned_le32(src + 4);
- dctx->s[2] = get_unaligned_le32(src + 8);
- dctx->s[3] = get_unaligned_le32(src + 12);
- src += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- if (len < POLY1305_BLOCK_SIZE)
- return;
- }
-
- len &= ~(POLY1305_BLOCK_SIZE - 1);
-
- poly1305_blocks_mips(&dctx->h, src, len, hibit);
-}
-
-static int mips_poly1305_update(struct shash_desc *desc, const u8 *src,
- unsigned int len)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (unlikely(dctx->buflen)) {
- u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen);
-
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- len -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- mips_poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 1);
- dctx->buflen = 0;
- }
- }
-
- if (likely(len >= POLY1305_BLOCK_SIZE)) {
- mips_poly1305_blocks(dctx, src, len, 1);
- src += round_down(len, POLY1305_BLOCK_SIZE);
- len %= POLY1305_BLOCK_SIZE;
- }
-
- if (unlikely(len)) {
- dctx->buflen = len;
- memcpy(dctx->buf, src, len);
- }
- return 0;
-}
-
-void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
- unsigned int nbytes)
-{
- if (unlikely(dctx->buflen)) {
- u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen);
-
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- nbytes -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- poly1305_blocks_mips(&dctx->h, dctx->buf,
- POLY1305_BLOCK_SIZE, 1);
- dctx->buflen = 0;
- }
- }
-
- if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
- unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
-
- poly1305_blocks_mips(&dctx->h, src, len, 1);
- src += len;
- nbytes %= POLY1305_BLOCK_SIZE;
- }
-
- if (unlikely(nbytes)) {
- dctx->buflen = nbytes;
- memcpy(dctx->buf, src, nbytes);
- }
-}
-EXPORT_SYMBOL(poly1305_update_arch);
-
-void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
-{
- if (unlikely(dctx->buflen)) {
- dctx->buf[dctx->buflen++] = 1;
- memset(dctx->buf + dctx->buflen, 0,
- POLY1305_BLOCK_SIZE - dctx->buflen);
- poly1305_blocks_mips(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
- }
-
- poly1305_emit_mips(&dctx->h, dst, dctx->s);
- *dctx = (struct poly1305_desc_ctx){};
-}
-EXPORT_SYMBOL(poly1305_final_arch);
-
-static int mips_poly1305_final(struct shash_desc *desc, u8 *dst)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (unlikely(!dctx->sset))
- return -ENOKEY;
-
- poly1305_final_arch(dctx, dst);
- return 0;
-}
-
-static struct shash_alg mips_poly1305_alg = {
- .init = mips_poly1305_init,
- .update = mips_poly1305_update,
- .final = mips_poly1305_final,
- .digestsize = POLY1305_DIGEST_SIZE,
- .descsize = sizeof(struct poly1305_desc_ctx),
-
- .base.cra_name = "poly1305",
- .base.cra_driver_name = "poly1305-mips",
- .base.cra_priority = 200,
- .base.cra_blocksize = POLY1305_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-};
-
-static int __init mips_poly1305_mod_init(void)
-{
- return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
- crypto_register_shash(&mips_poly1305_alg) : 0;
-}
-
-static void __exit mips_poly1305_mod_exit(void)
-{
- if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
- crypto_unregister_shash(&mips_poly1305_alg);
-}
-
-module_init(mips_poly1305_mod_init);
-module_exit(mips_poly1305_mod_exit);
-
-MODULE_DESCRIPTION("Poly1305 transform (MIPS accelerated");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("poly1305");
-MODULE_ALIAS_CRYPTO("poly1305-mips");
diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h
index 0992cad9c632..c7d75807d13f 100644
--- a/arch/mips/include/asm/idle.h
+++ b/arch/mips/include/asm/idle.h
@@ -6,11 +6,10 @@
#include <linux/linkage.h>
extern void (*cpu_wait)(void);
-extern void r4k_wait(void);
-extern asmlinkage void __r4k_wait(void);
+extern asmlinkage void r4k_wait(void);
extern void r4k_wait_irqoff(void);
-static inline int using_rollback_handler(void)
+static inline int using_skipover_handler(void)
{
return cpu_wait == r4k_wait;
}
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index bbca420c96d3..942af87f1cdd 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -62,7 +62,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
if (!ptdesc)
return NULL;
- if (!pagetable_pmd_ctor(ptdesc)) {
+ if (!pagetable_pmd_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index c29a551eb0ca..4852b005a72d 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -504,12 +504,6 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
return true;
}
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
#if defined(CONFIG_XPA)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
@@ -719,9 +713,6 @@ static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
-/* Extern to avoid header file madness */
-extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
-
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index 85fa9962266a..ef72c46b5568 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -65,7 +65,8 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
-#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
+#define MAX_REG_OFFSET \
+ (offsetof(struct pt_regs, __last) - sizeof(unsigned long))
/**
* regs_get_register() - get register value from its offset
diff --git a/arch/mips/include/asm/socket.h b/arch/mips/include/asm/socket.h
index 4724a563c5bf..43a09f0dd3ff 100644
--- a/arch/mips/include/asm/socket.h
+++ b/arch/mips/include/asm/socket.h
@@ -36,15 +36,6 @@ enum sock_type {
SOCK_PACKET = 10,
};
-#define SOCK_MAX (SOCK_PACKET + 1)
-/* Mask which covers at least up to SOCK_MASK-1. The
- * * remaining bits are used as flags. */
-#define SOCK_TYPE_MASK 0xf
-
-/* Flags for socket, socketpair, paccept */
-#define SOCK_CLOEXEC O_CLOEXEC
-#define SOCK_NONBLOCK O_NONBLOCK
-
#define ARCH_HAS_SOCKET_TYPES 1
#endif /* _ASM_SOCKET_H */
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 056aa1b713e2..d19e67e2aa6a 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -41,6 +41,21 @@ static inline long syscall_get_nr(struct task_struct *task,
return task_thread_info(task)->syscall;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ /*
+ * New syscall number has to be assigned to regs[2] because
+ * it is loaded from there unconditionally after return from
+ * syscall_trace_enter() invocation.
+ *
+ * Consequently, if the syscall was indirect and nr != __NR_syscall,
+ * then after this assignment the syscall will cease to be indirect.
+ */
+ task_thread_info(task)->syscall = regs->regs[2] = nr;
+}
+
static inline void mips_syscall_update_nr(struct task_struct *task,
struct pt_regs *regs)
{
@@ -74,6 +89,23 @@ static inline void mips_get_syscall_arg(unsigned long *arg,
#endif
}
+static inline void mips_set_syscall_arg(unsigned long *arg,
+ struct task_struct *task, struct pt_regs *regs, unsigned int n)
+{
+#ifdef CONFIG_32BIT
+ switch (n) {
+ case 0: case 1: case 2: case 3:
+ regs->regs[4 + n] = *arg;
+ return;
+ case 4: case 5: case 6: case 7:
+ *arg = regs->args[n] = *arg;
+ return;
+ }
+#else
+ regs->regs[4 + n] = *arg;
+#endif
+}
+
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
@@ -120,6 +152,17 @@ static inline void syscall_get_arguments(struct task_struct *task,
mips_get_syscall_arg(args++, task, regs, i++);
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ unsigned int i = 0;
+ unsigned int n = 6;
+
+ while (n--)
+ mips_set_syscall_arg(args++, task, regs, i++);
+}
+
extern const unsigned long sys_call_table[];
extern const unsigned long sys32_call_table[];
extern const unsigned long sysn32_call_table[];
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 22fa8f19924a..31ac655b7837 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -161,6 +161,8 @@
#define SO_RCVPRIORITY 82
+#define SO_PASSRIGHTS 83
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index a572ce36a24f..08c0a01d9a29 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -104,48 +104,59 @@ handle_vcei:
__FINIT
- .align 5 /* 32 byte rollback region */
-LEAF(__r4k_wait)
- .set push
- .set noreorder
- /* start of rollback region */
- LONG_L t0, TI_FLAGS($28)
- nop
- andi t0, _TIF_NEED_RESCHED
- bnez t0, 1f
- nop
- nop
- nop
-#ifdef CONFIG_CPU_MICROMIPS
- nop
- nop
- nop
- nop
-#endif
+ .section .cpuidle.text,"ax"
+ /* Align to 32 bytes for the maximum idle interrupt region size. */
+ .align 5
+LEAF(r4k_wait)
+ /* Keep the ISA bit clear for calculations on local labels here. */
+0: .fill 0
+ /* Start of idle interrupt region. */
+ local_irq_enable
+ /*
+ * If an interrupt lands here, before going idle on the next
+ * instruction, we must *NOT* go idle since the interrupt could
+ * have set TIF_NEED_RESCHED or caused a timer to need resched.
+ * Fall through -- see skipover_handler below -- and have the
+ * idle loop take care of things.
+ */
+1: .fill 0
+ /* The R2 EI/EHB sequence takes 8 bytes, otherwise pad up. */
+ .if 1b - 0b > 32
+ .error "overlong idle interrupt region"
+ .elseif 1b - 0b > 8
+ .align 4
+ .endif
+2: .fill 0
+ .equ r4k_wait_idle_size, 2b - 0b
+ /* End of idle interrupt region; size has to be a power of 2. */
.set MIPS_ISA_ARCH_LEVEL_RAW
+r4k_wait_insn:
wait
- /* end of rollback region (the region size must be power of two) */
-1:
+r4k_wait_exit:
+ .set mips0
+ local_irq_disable
jr ra
- nop
- .set pop
- END(__r4k_wait)
+ END(r4k_wait)
+ .previous
- .macro BUILD_ROLLBACK_PROLOGUE handler
- FEXPORT(rollback_\handler)
+ .macro BUILD_SKIPOVER_PROLOGUE handler
+ FEXPORT(skipover_\handler)
.set push
.set noat
MFC0 k0, CP0_EPC
- PTR_LA k1, __r4k_wait
- ori k0, 0x1f /* 32 byte rollback region */
- xori k0, 0x1f
+ /* Subtract/add 2 to let the ISA bit propagate through the mask. */
+ PTR_LA k1, r4k_wait_insn - 2
+ ori k0, r4k_wait_idle_size - 2
+ .set noreorder
bne k0, k1, \handler
+ PTR_ADDIU k0, r4k_wait_exit - r4k_wait_insn + 2
+ .set reorder
MTC0 k0, CP0_EPC
.set pop
.endm
.align 5
-BUILD_ROLLBACK_PROLOGUE handle_int
+BUILD_SKIPOVER_PROLOGUE handle_int
NESTED(handle_int, PT_SIZE, sp)
.cfi_signal_frame
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -265,7 +276,7 @@ NESTED(except_vec_ejtag_debug, 0, sp)
* This prototype is copied to ebase + n*IntCtl.VS and patched
* to invoke the handler
*/
-BUILD_ROLLBACK_PROLOGUE except_vec_vi
+BUILD_SKIPOVER_PROLOGUE except_vec_vi
NESTED(except_vec_vi, 0, sp)
SAVE_SOME docfi=1
SAVE_AT docfi=1
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 5abc8b7340f8..80e8a04a642e 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -35,13 +35,6 @@ static void __cpuidle r3081_wait(void)
write_c0_conf(cfg | R30XX_CONF_HALT);
}
-void __cpuidle r4k_wait(void)
-{
- raw_local_irq_enable();
- __r4k_wait();
- raw_local_irq_disable();
-}
-
/*
* This variant is preferable as it allows testing need_resched and going to
* sleep depending on the outcome atomically. Unfortunately the "It is
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index c4d6b09136b1..196a070349b0 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -791,8 +791,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc,
if (!mipspmu_event_set_period(event, hwc, idx))
return;
- if (perf_event_overflow(event, data, regs))
- mipsxx_pmu_disable_event(idx);
+ perf_event_overflow(event, data, regs);
}
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index e85bd087467e..cc26d56f3ab6 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -332,6 +332,8 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
mips_cps_cluster_bootcfg = kcalloc(nclusters,
sizeof(*mips_cps_cluster_bootcfg),
GFP_KERNEL);
+ if (!mips_cps_cluster_bootcfg)
+ goto err_out;
if (nclusters > 1)
mips_cm_update_property();
@@ -348,6 +350,8 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
mips_cps_cluster_bootcfg[cl].core_power =
kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long),
GFP_KERNEL);
+ if (!mips_cps_cluster_bootcfg[cl].core_power)
+ goto err_out;
/* Allocate VPE boot configuration structs */
for (c = 0; c < ncores; c++) {
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 39e248d0ed59..8ec1e185b35c 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -77,7 +77,7 @@
#include "access-helper.h"
extern void check_wait(void);
-extern asmlinkage void rollback_handle_int(void);
+extern asmlinkage void skipover_handle_int(void);
extern asmlinkage void handle_int(void);
extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void);
@@ -2066,7 +2066,7 @@ void *set_vi_handler(int n, vi_handler_t addr)
{
extern const u8 except_vec_vi[];
extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
- extern const u8 rollback_except_vec_vi[];
+ extern const u8 skipover_except_vec_vi[];
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
int srssets = current_cpu_data.srsets;
@@ -2095,7 +2095,7 @@ void *set_vi_handler(int n, vi_handler_t addr)
change_c0_srsmap(0xf << n*4, 0 << n*4);
}
- vec_start = using_rollback_handler() ? rollback_except_vec_vi :
+ vec_start = using_skipover_handler() ? skipover_except_vec_vi :
except_vec_vi;
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
ori_offset = except_vec_vi_ori - vec_start + 2;
@@ -2426,8 +2426,8 @@ void __init trap_init(void)
if (board_be_init)
board_be_init();
- set_except_vector(EXCCODE_INT, using_rollback_handler() ?
- rollback_handle_int : handle_int);
+ set_except_vector(EXCCODE_INT, using_skipover_handler() ?
+ skipover_handle_int : handle_int);
set_except_vector(EXCCODE_MOD, handle_tlbm);
set_except_vector(EXCCODE_TLBL, handle_tlbl);
set_except_vector(EXCCODE_TLBS, handle_tlbs);
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 8f208007b8e8..a112573b6e37 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -377,7 +377,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
for (i = 0; i < MAX_IM; i++)
irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
- ltq_domain = irq_domain_add_linear(node,
+ ltq_domain = irq_domain_create_linear(of_fwnode_handle(node),
(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
&irq_domain_ops, 0);
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 9c024e6d5e54..9d75845ef78e 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -3,6 +3,8 @@
# Makefile for MIPS-specific library files..
#
+obj-y += crypto/
+
lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
mips-atomic.o strncpy_user.o \
strnlen_user.o uncached.o
diff --git a/arch/mips/lib/crc32-mips.c b/arch/mips/lib/crc32-mips.c
index 676a4b3e290b..45e4d2c9fbf5 100644
--- a/arch/mips/lib/crc32-mips.c
+++ b/arch/mips/lib/crc32-mips.c
@@ -62,7 +62,7 @@ do { \
#define CRC32C(crc, value, size) \
_CRC32(crc, value, size, crc32c)
-static DEFINE_STATIC_KEY_FALSE(have_crc32);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32);
u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
{
@@ -163,7 +163,7 @@ static int __init crc32_mips_init(void)
static_branch_enable(&have_crc32);
return 0;
}
-arch_initcall(crc32_mips_init);
+subsys_initcall(crc32_mips_init);
static void __exit crc32_mips_exit(void)
{
diff --git a/arch/mips/lib/crypto/.gitignore b/arch/mips/lib/crypto/.gitignore
new file mode 100644
index 000000000000..0d47d4f21c6d
--- /dev/null
+++ b/arch/mips/lib/crypto/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+poly1305-core.S
diff --git a/arch/mips/lib/crypto/Kconfig b/arch/mips/lib/crypto/Kconfig
new file mode 100644
index 000000000000..0670a170c1be
--- /dev/null
+++ b/arch/mips/lib/crypto/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CRYPTO_CHACHA_MIPS
+ tristate
+ depends on CPU_MIPS32_R2
+ default CRYPTO_LIB_CHACHA
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+
+config CRYPTO_POLY1305_MIPS
+ tristate
+ default CRYPTO_LIB_POLY1305
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
diff --git a/arch/mips/lib/crypto/Makefile b/arch/mips/lib/crypto/Makefile
new file mode 100644
index 000000000000..804488c7aded
--- /dev/null
+++ b/arch/mips/lib/crypto/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o
+chacha-mips-y := chacha-core.o chacha-glue.o
+AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
+
+obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o
+poly1305-mips-y := poly1305-core.o poly1305-glue.o
+
+perlasm-flavour-$(CONFIG_32BIT) := o32
+perlasm-flavour-$(CONFIG_64BIT) := 64
+
+quiet_cmd_perlasm = PERLASM $@
+ cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@)
+
+$(obj)/poly1305-core.S: $(src)/poly1305-mips.pl FORCE
+ $(call if_changed,perlasm)
+
+targets += poly1305-core.S
diff --git a/arch/mips/crypto/chacha-core.S b/arch/mips/lib/crypto/chacha-core.S
index 5755f69cfe00..5755f69cfe00 100644
--- a/arch/mips/crypto/chacha-core.S
+++ b/arch/mips/lib/crypto/chacha-core.S
diff --git a/arch/mips/lib/crypto/chacha-glue.c b/arch/mips/lib/crypto/chacha-glue.c
new file mode 100644
index 000000000000..88c097594eb0
--- /dev/null
+++ b/arch/mips/lib/crypto/chacha-glue.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ChaCha and HChaCha functions (MIPS optimized)
+ *
+ * Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <crypto/chacha.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+asmlinkage void chacha_crypt_arch(struct chacha_state *state,
+ u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds);
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+asmlinkage void hchacha_block_arch(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds);
+EXPORT_SYMBOL(hchacha_block_arch);
+
+bool chacha_is_arch_optimized(void)
+{
+ return true;
+}
+EXPORT_SYMBOL(chacha_is_arch_optimized);
+
+MODULE_DESCRIPTION("ChaCha and HChaCha functions (MIPS optimized)");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/mips/lib/crypto/poly1305-glue.c b/arch/mips/lib/crypto/poly1305-glue.c
new file mode 100644
index 000000000000..764a38a65200
--- /dev/null
+++ b/arch/mips/lib/crypto/poly1305-glue.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OpenSSL/Cryptogams accelerated Poly1305 transform for MIPS
+ *
+ * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <crypto/internal/poly1305.h>
+#include <linux/cpufeature.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/unaligned.h>
+
+asmlinkage void poly1305_block_init_arch(
+ struct poly1305_block_state *state,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+EXPORT_SYMBOL_GPL(poly1305_block_init_arch);
+asmlinkage void poly1305_blocks_arch(struct poly1305_block_state *state,
+ const u8 *src, u32 len, u32 hibit);
+EXPORT_SYMBOL_GPL(poly1305_blocks_arch);
+asmlinkage void poly1305_emit_arch(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4]);
+EXPORT_SYMBOL_GPL(poly1305_emit_arch);
+
+bool poly1305_is_arch_optimized(void)
+{
+ return true;
+}
+EXPORT_SYMBOL(poly1305_is_arch_optimized);
+
+MODULE_DESCRIPTION("Poly1305 transform (MIPS accelerated");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/mips/crypto/poly1305-mips.pl b/arch/mips/lib/crypto/poly1305-mips.pl
index b05bab884ed2..399f10c3e385 100644
--- a/arch/mips/crypto/poly1305-mips.pl
+++ b/arch/mips/lib/crypto/poly1305-mips.pl
@@ -93,9 +93,9 @@ $code.=<<___;
#endif
#ifdef __KERNEL__
-# define poly1305_init poly1305_init_mips
-# define poly1305_blocks poly1305_blocks_mips
-# define poly1305_emit poly1305_emit_mips
+# define poly1305_init poly1305_block_init_arch
+# define poly1305_blocks poly1305_blocks_arch
+# define poly1305_emit poly1305_emit_arch
#endif
#if defined(__MIPSEB__) && !defined(MIPSEB)
@@ -565,9 +565,9 @@ $code.=<<___;
#endif
#ifdef __KERNEL__
-# define poly1305_init poly1305_init_mips
-# define poly1305_blocks poly1305_blocks_mips
-# define poly1305_emit poly1305_emit_mips
+# define poly1305_init poly1305_block_init_arch
+# define poly1305_blocks poly1305_blocks_arch
+# define poly1305_emit poly1305_emit_arch
#endif
#if defined(__MIPSEB__) && !defined(MIPSEB)
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index 84dd5136d53a..e2cf2166d5cb 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -31,16 +31,6 @@ void pgd_init(void *addr)
}
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
-pmd_t mk_pmd(struct page *page, pgprot_t prot)
-{
- pmd_t pmd;
-
- pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
-
- return pmd;
-}
-
-
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 1e544827dea9..b24f865de357 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -90,15 +90,6 @@ void pud_init(void *addr)
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-pmd_t mk_pmd(struct page *page, pgprot_t prot)
-{
- pmd_t pmd;
-
- pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
-
- return pmd;
-}
-
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
index a925842ee125..17fa97ec6ffb 100644
--- a/arch/mips/pci/pci-ar2315.c
+++ b/arch/mips/pci/pci-ar2315.c
@@ -469,8 +469,8 @@ static int ar2315_pci_probe(struct platform_device *pdev)
if (err)
return err;
- apc->domain = irq_domain_add_linear(NULL, AR2315_PCI_IRQ_COUNT,
- &ar2315_pci_irq_domain_ops, apc);
+ apc->domain = irq_domain_create_linear(NULL, AR2315_PCI_IRQ_COUNT,
+ &ar2315_pci_irq_domain_ops, apc);
if (!apc->domain) {
dev_err(dev, "failed to add IRQ domain\n");
return -ENOMEM;
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index 4ac68a534e4f..14454ece485d 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -208,9 +208,10 @@ static int rt3883_pci_irq_init(struct device *dev,
rt3883_pci_w32(rpc, 0, RT3883_PCI_REG_PCIENA);
rpc->irq_domain =
- irq_domain_add_linear(rpc->intc_of_node, RT3883_PCI_IRQ_COUNT,
- &rt3883_pci_irq_domain_ops,
- rpc);
+ irq_domain_create_linear(of_fwnode_handle(rpc->intc_of_node),
+ RT3883_PCI_IRQ_COUNT,
+ &rt3883_pci_irq_domain_ops,
+ rpc);
if (!rpc->irq_domain) {
dev_err(dev, "unable to add IRQ domain\n");
return -ENODEV;
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 46aef0a1b22a..af5bbbea949b 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -176,7 +176,7 @@ static int __init intc_of_init(struct device_node *node,
/* route all INTC interrupts to MIPS HW0 interrupt */
rt_intc_w32(0, INTC_REG_TYPE);
- domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT,
+ domain = irq_domain_create_legacy(of_fwnode_handle(node), RALINK_INTC_IRQ_COUNT,
RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL);
if (!domain)
panic("Failed to add irqdomain");
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index eab87c6beacb..e98578e27e26 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -221,12 +221,6 @@ static inline void pte_clear(struct mm_struct *mm,
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-#define mk_pte(page, prot) (pfn_pte(page_to_pfn(page), prot))
-
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
#define pmd_pfn(pmd) (pmd_phys(pmd) >> PAGE_SHIFT)
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
@@ -291,4 +285,20 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+static inline int pte_same(pte_t pte_a, pte_t pte_b);
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ if (!pte_same(*ptep, entry))
+ set_ptes(vma->vm_mm, address, ptep, entry, 1);
+ /*
+ * update_mmu_cache will unconditionally execute, handling both
+ * the case that the PTE changed and the spurious fault case.
+ */
+ return true;
+}
+
#endif /* _ASM_NIOS2_PGTABLE_H */
diff --git a/arch/nios2/include/asm/syscall.h b/arch/nios2/include/asm/syscall.h
index fff52205fb65..8e3eb1d689bb 100644
--- a/arch/nios2/include/asm/syscall.h
+++ b/arch/nios2/include/asm/syscall.h
@@ -15,6 +15,11 @@ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
return regs->r2;
}
+static inline void syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr)
+{
+ regs->r2 = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -58,6 +63,17 @@ static inline void syscall_get_arguments(struct task_struct *task,
*args = regs->r9;
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs, const unsigned long *args)
+{
+ regs->r4 = *args++;
+ regs->r5 = *args++;
+ regs->r6 = *args++;
+ regs->r7 = *args++;
+ regs->r8 = *args++;
+ regs->r9 = *args;
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_NIOS2;
diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c
index 7b1e8f9128e9..55882feb6249 100644
--- a/arch/nios2/kernel/cpuinfo.c
+++ b/arch/nios2/kernel/cpuinfo.c
@@ -46,10 +46,7 @@ void __init setup_cpuinfo(void)
cpuinfo.cpu_clock_freq = fcpu(cpu, "clock-frequency");
str = of_get_property(cpu, "altr,implementation", &len);
- if (str)
- strscpy(cpuinfo.cpu_impl, str, sizeof(cpuinfo.cpu_impl));
- else
- strcpy(cpuinfo.cpu_impl, "<unknown>");
+ strscpy(cpuinfo.cpu_impl, str ?: "<unknown>");
cpuinfo.has_div = of_property_read_bool(cpu, "altr,has-div");
cpuinfo.has_mul = of_property_read_bool(cpu, "altr,has-mul");
diff --git a/arch/nios2/kernel/irq.c b/arch/nios2/kernel/irq.c
index 8fa280660051..73568d8e21e0 100644
--- a/arch/nios2/kernel/irq.c
+++ b/arch/nios2/kernel/irq.c
@@ -69,7 +69,8 @@ void __init init_IRQ(void)
BUG_ON(!node);
- domain = irq_domain_add_linear(node, NIOS2_CPU_NR_IRQS, &irq_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(node),
+ NIOS2_CPU_NR_IRQS, &irq_ops, NULL);
BUG_ON(!domain);
irq_set_default_domain(domain);
diff --git a/arch/nios2/mm/tlb.c b/arch/nios2/mm/tlb.c
index f90ac35f05f3..a9cbe20f9e79 100644
--- a/arch/nios2/mm/tlb.c
+++ b/arch/nios2/mm/tlb.c
@@ -144,10 +144,11 @@ static void flush_tlb_one(unsigned long addr)
if (((pteaddr >> 2) & 0xfffff) != (addr >> PAGE_SHIFT))
continue;
+ tlbmisc = RDCTL(CTL_TLBMISC);
pr_debug("Flush entry by writing way=%dl pid=%ld\n",
- way, (pid_misc >> TLBMISC_PID_SHIFT));
+ way, ((tlbmisc >> TLBMISC_PID_SHIFT) & TLBMISC_PID_MASK));
- tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT);
+ tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT) | (tlbmisc & TLBMISC_PID);
WRCTL(CTL_TLBMISC, tlbmisc);
WRCTL(CTL_PTEADDR, pteaddr_invalid(addr));
WRCTL(CTL_TLBACC, 0);
@@ -237,7 +238,8 @@ void flush_tlb_pid(unsigned long mmu_pid)
if (pid != mmu_pid)
continue;
- tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT);
+ tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT) |
+ (pid << TLBMISC_PID_SHIFT);
WRCTL(CTL_TLBMISC, tlbmisc);
WRCTL(CTL_TLBACC, 0);
}
@@ -272,15 +274,17 @@ void flush_tlb_all(void)
/* remember pid/way until we return */
get_misc_and_pid(&org_misc, &pid_misc);
- /* Start at way 0, way is auto-incremented after each TLBACC write */
- WRCTL(CTL_TLBMISC, TLBMISC_WE);
-
/* Map each TLB entry to physcal address 0 with no-access and a
bad ptbase */
for (line = 0; line < cpuinfo.tlb_num_lines; line++) {
WRCTL(CTL_PTEADDR, pteaddr_invalid(addr));
- for (way = 0; way < cpuinfo.tlb_num_ways; way++)
+ for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
+ // Code such as replace_tlb_one_pid assumes that no duplicate entries exist
+ // for a single address across ways, so also use way as a dummy PID
+ WRCTL(CTL_TLBMISC, TLBMISC_WE | (way << TLBMISC_WAY_SHIFT) |
+ (way << TLBMISC_PID_SHIFT));
WRCTL(CTL_TLBACC, 0);
+ }
addr += PAGE_SIZE;
}
diff --git a/arch/openrisc/include/asm/cacheflush.h b/arch/openrisc/include/asm/cacheflush.h
index 984c331ff5f4..0e60af486ec1 100644
--- a/arch/openrisc/include/asm/cacheflush.h
+++ b/arch/openrisc/include/asm/cacheflush.h
@@ -23,6 +23,9 @@
*/
extern void local_dcache_page_flush(struct page *page);
extern void local_icache_page_inv(struct page *page);
+extern void local_dcache_range_flush(unsigned long start, unsigned long end);
+extern void local_dcache_range_inv(unsigned long start, unsigned long end);
+extern void local_icache_range_inv(unsigned long start, unsigned long end);
/*
* Data cache flushing always happen on the local cpu. Instruction cache
@@ -39,6 +42,20 @@ extern void smp_icache_page_inv(struct page *page);
#endif /* CONFIG_SMP */
/*
+ * Even if the actual block size is larger than L1_CACHE_BYTES, paddr
+ * can be incremented by L1_CACHE_BYTES. When paddr is written to the
+ * invalidate register, the entire cache line encompassing this address
+ * is invalidated. Each subsequent reference to the same cache line will
+ * not affect the invalidation process.
+ */
+#define local_dcache_block_flush(addr) \
+ local_dcache_range_flush(addr, addr + L1_CACHE_BYTES)
+#define local_dcache_block_inv(addr) \
+ local_dcache_range_inv(addr, addr + L1_CACHE_BYTES)
+#define local_icache_block_inv(addr) \
+ local_icache_range_inv(addr, addr + L1_CACHE_BYTES)
+
+/*
* Synchronizes caches. Whenever a cpu writes executable code to memory, this
* should be called to make sure the processor sees the newly written code.
*/
diff --git a/arch/openrisc/include/asm/cpuinfo.h b/arch/openrisc/include/asm/cpuinfo.h
index 5e4744153d0e..3cfc4cf0b019 100644
--- a/arch/openrisc/include/asm/cpuinfo.h
+++ b/arch/openrisc/include/asm/cpuinfo.h
@@ -15,16 +15,21 @@
#ifndef __ASM_OPENRISC_CPUINFO_H
#define __ASM_OPENRISC_CPUINFO_H
+#include <asm/spr.h>
+#include <asm/spr_defs.h>
+
+struct cache_desc {
+ u32 size;
+ u32 sets;
+ u32 block_size;
+ u32 ways;
+};
+
struct cpuinfo_or1k {
u32 clock_frequency;
- u32 icache_size;
- u32 icache_block_size;
- u32 icache_ways;
-
- u32 dcache_size;
- u32 dcache_block_size;
- u32 dcache_ways;
+ struct cache_desc icache;
+ struct cache_desc dcache;
u16 coreid;
};
@@ -32,4 +37,9 @@ struct cpuinfo_or1k {
extern struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS];
extern void setup_cpuinfo(void);
+/*
+ * Check if the cache component exists.
+ */
+extern bool cpu_cache_is_present(const unsigned int cache_type);
+
#endif /* __ASM_OPENRISC_CPUINFO_H */
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 60c6ce7ff2dc..71bfb8c8c482 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -299,8 +299,6 @@ static inline pte_t __mk_pte(void *page, pgprot_t pgprot)
return pte;
}
-#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))
-
#define mk_pte_phys(physpage, pgprot) \
({ \
pte_t __pte; \
diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h
index 903ed882bdec..5e037d9659c5 100644
--- a/arch/openrisc/include/asm/syscall.h
+++ b/arch/openrisc/include/asm/syscall.h
@@ -26,6 +26,12 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
}
static inline void
+syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr)
+{
+ regs->orig_gpr11 = nr;
+}
+
+static inline void
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
{
regs->gpr[11] = regs->orig_gpr11;
@@ -57,6 +63,13 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
memcpy(args, &regs->gpr[3], 6 * sizeof(args[0]));
}
+static inline void
+syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ const unsigned long *args)
+{
+ memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_OPENRISC;
diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile
index 79129161f3e0..e4c7d9bdd598 100644
--- a/arch/openrisc/kernel/Makefile
+++ b/arch/openrisc/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := vmlinux.lds
obj-y := head.o setup.o or32_ksyms.o process.o dma.o \
traps.o time.o irq.o entry.o ptrace.o signal.o \
- sys_call_table.o unwinder.o
+ sys_call_table.o unwinder.o cacheinfo.o
obj-$(CONFIG_SMP) += smp.o sync-timer.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/openrisc/kernel/cacheinfo.c b/arch/openrisc/kernel/cacheinfo.c
new file mode 100644
index 000000000000..61230545e4ff
--- /dev/null
+++ b/arch/openrisc/kernel/cacheinfo.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OpenRISC cacheinfo support
+ *
+ * Based on work done for MIPS and LoongArch. All original copyrights
+ * apply as per the original source declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2025 Sahil Siddiq <sahilcdq@proton.me>
+ */
+
+#include <linux/cacheinfo.h>
+#include <asm/cpuinfo.h>
+#include <asm/spr.h>
+#include <asm/spr_defs.h>
+
+static inline void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type,
+ unsigned int level, struct cache_desc *cache, int cpu)
+{
+ this_leaf->type = type;
+ this_leaf->level = level;
+ this_leaf->coherency_line_size = cache->block_size;
+ this_leaf->number_of_sets = cache->sets;
+ this_leaf->ways_of_associativity = cache->ways;
+ this_leaf->size = cache->size;
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+}
+
+int init_cache_level(unsigned int cpu)
+{
+ struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ int leaves = 0, levels = 0;
+ unsigned long upr = mfspr(SPR_UPR);
+ unsigned long iccfgr, dccfgr;
+
+ if (!(upr & SPR_UPR_UP)) {
+ printk(KERN_INFO
+ "-- no UPR register... unable to detect configuration\n");
+ return -ENOENT;
+ }
+
+ if (cpu_cache_is_present(SPR_UPR_DCP)) {
+ dccfgr = mfspr(SPR_DCCFGR);
+ cpuinfo->dcache.ways = 1 << (dccfgr & SPR_DCCFGR_NCW);
+ cpuinfo->dcache.sets = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3);
+ cpuinfo->dcache.block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7);
+ cpuinfo->dcache.size =
+ cpuinfo->dcache.sets * cpuinfo->dcache.ways * cpuinfo->dcache.block_size;
+ leaves += 1;
+ printk(KERN_INFO
+ "-- dcache: %d bytes total, %d bytes/line, %d set(s), %d way(s)\n",
+ cpuinfo->dcache.size, cpuinfo->dcache.block_size,
+ cpuinfo->dcache.sets, cpuinfo->dcache.ways);
+ } else
+ printk(KERN_INFO "-- dcache disabled\n");
+
+ if (cpu_cache_is_present(SPR_UPR_ICP)) {
+ iccfgr = mfspr(SPR_ICCFGR);
+ cpuinfo->icache.ways = 1 << (iccfgr & SPR_ICCFGR_NCW);
+ cpuinfo->icache.sets = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3);
+ cpuinfo->icache.block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7);
+ cpuinfo->icache.size =
+ cpuinfo->icache.sets * cpuinfo->icache.ways * cpuinfo->icache.block_size;
+ leaves += 1;
+ printk(KERN_INFO
+ "-- icache: %d bytes total, %d bytes/line, %d set(s), %d way(s)\n",
+ cpuinfo->icache.size, cpuinfo->icache.block_size,
+ cpuinfo->icache.sets, cpuinfo->icache.ways);
+ } else
+ printk(KERN_INFO "-- icache disabled\n");
+
+ if (!leaves)
+ return -ENOENT;
+
+ levels = 1;
+
+ this_cpu_ci->num_leaves = leaves;
+ this_cpu_ci->num_levels = levels;
+
+ return 0;
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+ struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ int level = 1;
+
+ if (cpu_cache_is_present(SPR_UPR_DCP)) {
+ ci_leaf_init(this_leaf, CACHE_TYPE_DATA, level, &cpuinfo->dcache, cpu);
+ this_leaf->attributes = ((mfspr(SPR_DCCFGR) & SPR_DCCFGR_CWS) >> 8) ?
+ CACHE_WRITE_BACK : CACHE_WRITE_THROUGH;
+ this_leaf++;
+ }
+
+ if (cpu_cache_is_present(SPR_UPR_ICP))
+ ci_leaf_init(this_leaf, CACHE_TYPE_INST, level, &cpuinfo->icache, cpu);
+
+ this_cpu_ci->cpu_map_populated = true;
+
+ return 0;
+}
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index b3edbb33b621..3a7b5baaa450 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -17,6 +17,7 @@
#include <linux/pagewalk.h>
#include <asm/cpuinfo.h>
+#include <asm/cacheflush.h>
#include <asm/spr_defs.h>
#include <asm/tlbflush.h>
@@ -24,9 +25,6 @@ static int
page_set_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- unsigned long cl;
- struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
-
pte_val(*pte) |= _PAGE_CI;
/*
@@ -36,8 +34,7 @@ page_set_nocache(pte_t *pte, unsigned long addr,
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
/* Flush page out of dcache */
- for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
- mtspr(SPR_DCBFR, cl);
+ local_dcache_range_flush(__pa(addr), __pa(next));
return 0;
}
@@ -98,21 +95,14 @@ void arch_dma_clear_uncached(void *cpu_addr, size_t size)
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
enum dma_data_direction dir)
{
- unsigned long cl;
- struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
-
switch (dir) {
case DMA_TO_DEVICE:
/* Flush the dcache for the requested range */
- for (cl = addr; cl < addr + size;
- cl += cpuinfo->dcache_block_size)
- mtspr(SPR_DCBFR, cl);
+ local_dcache_range_flush(addr, addr + size);
break;
case DMA_FROM_DEVICE:
/* Invalidate the dcache for the requested range */
- for (cl = addr; cl < addr + size;
- cl += cpuinfo->dcache_block_size)
- mtspr(SPR_DCBIR, cl);
+ local_dcache_range_inv(addr, addr + size);
break;
default:
/*
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index be56eaafc8b9..a9fb9cc6779e 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -113,21 +113,6 @@ static void print_cpuinfo(void)
return;
}
- if (upr & SPR_UPR_DCP)
- printk(KERN_INFO
- "-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n",
- cpuinfo->dcache_size, cpuinfo->dcache_block_size,
- cpuinfo->dcache_ways);
- else
- printk(KERN_INFO "-- dcache disabled\n");
- if (upr & SPR_UPR_ICP)
- printk(KERN_INFO
- "-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n",
- cpuinfo->icache_size, cpuinfo->icache_block_size,
- cpuinfo->icache_ways);
- else
- printk(KERN_INFO "-- icache disabled\n");
-
if (upr & SPR_UPR_DMP)
printk(KERN_INFO "-- dmmu: %4d entries, %lu way(s)\n",
1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
@@ -155,8 +140,6 @@ static void print_cpuinfo(void)
void __init setup_cpuinfo(void)
{
struct device_node *cpu;
- unsigned long iccfgr, dccfgr;
- unsigned long cache_set_size;
int cpu_id = smp_processor_id();
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu_id];
@@ -164,20 +147,6 @@ void __init setup_cpuinfo(void)
if (!cpu)
panic("Couldn't find CPU%d in device tree...\n", cpu_id);
- iccfgr = mfspr(SPR_ICCFGR);
- cpuinfo->icache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW);
- cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3);
- cpuinfo->icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7);
- cpuinfo->icache_size =
- cache_set_size * cpuinfo->icache_ways * cpuinfo->icache_block_size;
-
- dccfgr = mfspr(SPR_DCCFGR);
- cpuinfo->dcache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW);
- cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3);
- cpuinfo->dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7);
- cpuinfo->dcache_size =
- cache_set_size * cpuinfo->dcache_ways * cpuinfo->dcache_block_size;
-
if (of_property_read_u32(cpu, "clock-frequency",
&cpuinfo->clock_frequency)) {
printk(KERN_WARNING
@@ -294,14 +263,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
unsigned int vr, cpucfgr;
unsigned int avr;
unsigned int version;
+#ifdef CONFIG_SMP
struct cpuinfo_or1k *cpuinfo = v;
+ seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid);
+#endif
vr = mfspr(SPR_VR);
cpucfgr = mfspr(SPR_CPUCFGR);
-#ifdef CONFIG_SMP
- seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid);
-#endif
if (vr & SPR_VR_UVRP) {
vr = mfspr(SPR_VR2);
version = vr & SPR_VR2_VER;
@@ -320,14 +289,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "revision\t\t: %d\n", vr & SPR_VR_REV);
}
seq_printf(m, "frequency\t\t: %ld\n", loops_per_jiffy * HZ);
- seq_printf(m, "dcache size\t\t: %d bytes\n", cpuinfo->dcache_size);
- seq_printf(m, "dcache block size\t: %d bytes\n",
- cpuinfo->dcache_block_size);
- seq_printf(m, "dcache ways\t\t: %d\n", cpuinfo->dcache_ways);
- seq_printf(m, "icache size\t\t: %d bytes\n", cpuinfo->icache_size);
- seq_printf(m, "icache block size\t: %d bytes\n",
- cpuinfo->icache_block_size);
- seq_printf(m, "icache ways\t\t: %d\n", cpuinfo->icache_ways);
seq_printf(m, "immu\t\t\t: %d entries, %lu ways\n",
1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW));
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
index eb43b73f3855..0f265b8e73ec 100644
--- a/arch/openrisc/mm/cache.c
+++ b/arch/openrisc/mm/cache.c
@@ -14,31 +14,70 @@
#include <asm/spr_defs.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
+#include <asm/cpuinfo.h>
#include <asm/tlbflush.h>
-static __always_inline void cache_loop(struct page *page, const unsigned int reg)
+/*
+ * Check if the cache component exists.
+ */
+bool cpu_cache_is_present(const unsigned int cache_type)
{
- unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
- unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
+ unsigned long upr = mfspr(SPR_UPR);
+ unsigned long mask = SPR_UPR_UP | cache_type;
+
+ return !((upr & mask) ^ mask);
+}
+
+static __always_inline void cache_loop(unsigned long paddr, unsigned long end,
+ const unsigned short reg, const unsigned int cache_type)
+{
+ if (!cpu_cache_is_present(cache_type))
+ return;
- while (line < paddr + PAGE_SIZE) {
- mtspr(reg, line);
- line += L1_CACHE_BYTES;
+ while (paddr < end) {
+ mtspr(reg, paddr);
+ paddr += L1_CACHE_BYTES;
}
}
+static __always_inline void cache_loop_page(struct page *page, const unsigned short reg,
+ const unsigned int cache_type)
+{
+ unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
+ unsigned long end = paddr + PAGE_SIZE;
+
+ paddr &= ~(L1_CACHE_BYTES - 1);
+
+ cache_loop(paddr, end, reg, cache_type);
+}
+
void local_dcache_page_flush(struct page *page)
{
- cache_loop(page, SPR_DCBFR);
+ cache_loop_page(page, SPR_DCBFR, SPR_UPR_DCP);
}
EXPORT_SYMBOL(local_dcache_page_flush);
void local_icache_page_inv(struct page *page)
{
- cache_loop(page, SPR_ICBIR);
+ cache_loop_page(page, SPR_ICBIR, SPR_UPR_ICP);
}
EXPORT_SYMBOL(local_icache_page_inv);
+void local_dcache_range_flush(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_DCBFR, SPR_UPR_DCP);
+}
+
+void local_dcache_range_inv(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_DCBIR, SPR_UPR_DCP);
+}
+
+void local_icache_range_inv(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_ICBIR, SPR_UPR_ICP);
+}
+
void update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *pte)
{
@@ -58,4 +97,3 @@ void update_cache(struct vm_area_struct *vma, unsigned long address,
sync_icache_dcache(folio_page(folio, nr));
}
}
-
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index be1c2eb8bb94..e4904ca6f0a0 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -35,6 +35,7 @@
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
+#include <asm/cacheflush.h>
int mem_init_done;
@@ -176,8 +177,8 @@ void __init paging_init(void)
barrier();
/* Invalidate instruction caches after code modification */
- mtspr(SPR_ICBIR, 0x900);
- mtspr(SPR_ICBIR, 0xa00);
+ local_icache_block_inv(0x900);
+ local_icache_block_inv(0xa00);
/* New TLB miss handlers and kernel page tables are in now place.
* Make sure that page flags get updated for all pages in TLB by
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 8e63e86251ca..3b352f97fecb 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -36,7 +36,7 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
pte_t *pte;
if (likely(mem_init_done)) {
- pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
+ pte = __pte_alloc_one_kernel(mm);
} else {
pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
}
diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile
index 92227fa813dc..17c42d718eb3 100644
--- a/arch/parisc/boot/compressed/Makefile
+++ b/arch/parisc/boot/compressed/Makefile
@@ -18,6 +18,7 @@ KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os
ifndef CONFIG_64BIT
KBUILD_CFLAGS += -mfast-indirect-calls
endif
+KBUILD_CFLAGS += -std=gnu11
LDFLAGS_vmlinux := -X -e startup --as-needed -T
$(obj)/vmlinux: $(obj)/vmlinux.lds $(addprefix $(obj)/, $(OBJECTS)) $(LIBGCC) FORCE
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index f5fffc24c3bc..94928d114d4c 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -251,7 +251,7 @@ CONFIG_CIFS=m
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
# CONFIG_CIFS_DEBUG is not set
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
@@ -264,8 +264,6 @@ CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRC_CCITT=m
-CONFIG_CRC_T10DIF=y
CONFIG_FONTS=y
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index 2487765b7be3..d8cd7f858b2a 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -283,7 +283,6 @@ CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_2=m
CONFIG_NLS_UTF8=m
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
@@ -292,7 +291,6 @@ CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=m
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_STRIP_ASM_SYMS=y
diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h
index 1eb488f25b83..1601ae4b888d 100644
--- a/arch/parisc/include/asm/alternative.h
+++ b/arch/parisc/include/asm/alternative.h
@@ -13,7 +13,7 @@
#define INSN_PxTLB 0x02 /* modify pdtlb, pitlb */
#define INSN_NOP 0x08000240 /* nop */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/init.h>
#include <linux/types.h>
@@ -61,6 +61,6 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
.word (new_instr_ptr - .) ! \
.previous
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_PARISC_ALTERNATIVE_H */
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 000a28e1c5e8..c20261604f09 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -53,7 +53,7 @@
#define SR_TEMP2 2
#define SR_USER 3
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#ifdef CONFIG_64BIT
#define LDREG ldd
@@ -582,5 +582,5 @@
.previous
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
index c705decf2bed..519b1903c5ed 100644
--- a/arch/parisc/include/asm/barrier.h
+++ b/arch/parisc/include/asm/barrier.h
@@ -4,7 +4,7 @@
#include <asm/alternative.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* The synchronize caches instruction executes as a nop on systems in
which all memory references are performed in order. */
@@ -93,5 +93,5 @@ do { \
})
#include <asm-generic/barrier.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_BARRIER_H */
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index a3f0f100f219..3f8d3be6ef24 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -16,7 +16,7 @@
#define L1_CACHE_BYTES 16
#define L1_CACHE_SHIFT 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define SMP_CACHE_BYTES L1_CACHE_BYTES
@@ -66,7 +66,7 @@ void parisc_setup_cache_timing(void);
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory")
#define asm_syncdma() asm volatile("syncdma" :::"memory")
-#endif /* ! __ASSEMBLY__ */
+#endif /* ! __ASSEMBLER__ */
/* Classes of processor wrt: disabling space register hashing */
diff --git a/arch/parisc/include/asm/current.h b/arch/parisc/include/asm/current.h
index dc7aea07c3f3..2814529a4c28 100644
--- a/arch/parisc/include/asm/current.h
+++ b/arch/parisc/include/asm/current.h
@@ -2,7 +2,7 @@
#ifndef _ASM_PARISC_CURRENT_H
#define _ASM_PARISC_CURRENT_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct task_struct;
static __always_inline struct task_struct *get_current(void)
@@ -16,6 +16,6 @@ static __always_inline struct task_struct *get_current(void)
#define current get_current()
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_PARISC_CURRENT_H */
diff --git a/arch/parisc/include/asm/dwarf.h b/arch/parisc/include/asm/dwarf.h
index f4512db86a19..526f4a79262c 100644
--- a/arch/parisc/include/asm/dwarf.h
+++ b/arch/parisc/include/asm/dwarf.h
@@ -6,7 +6,7 @@
#ifndef _ASM_PARISC_DWARF_H
#define _ASM_PARISC_DWARF_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
@@ -15,6 +15,6 @@
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_UNDEFINED .cfi_undefined
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_PARISC_DWARF_H */
diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h
index 5cd80ce1163a..9cafa449c4a7 100644
--- a/arch/parisc/include/asm/fixmap.h
+++ b/arch/parisc/include/asm/fixmap.h
@@ -39,7 +39,7 @@
#define KERNEL_MAP_START (GATEWAY_PAGE_SIZE)
#define KERNEL_MAP_END (FIXMAP_START)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
enum fixed_addresses {
@@ -59,6 +59,6 @@ extern void *parisc_vmalloc_start;
void set_fixmap(enum fixed_addresses idx, phys_addr_t phys);
void clear_fixmap(enum fixed_addresses idx);
-#endif /*__ASSEMBLY__*/
+#endif /*__ASSEMBLER__*/
#endif /*_ASM_FIXMAP_H*/
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index f1cc1ee3a647..8b89d2b642eb 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -2,7 +2,7 @@
#ifndef _ASM_PARISC_FTRACE_H
#define _ASM_PARISC_FTRACE_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern void mcount(void);
#define MCOUNT_ADDR ((unsigned long)mcount)
@@ -29,6 +29,6 @@ unsigned long ftrace_call_adjust(unsigned long addr);
#define ftrace_return_address(n) return_address(n)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_PARISC_FTRACE_H */
diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h
index 317ebc5edc9f..f325ae3c622f 100644
--- a/arch/parisc/include/asm/jump_label.h
+++ b/arch/parisc/include/asm/jump_label.h
@@ -2,7 +2,7 @@
#ifndef _ASM_PARISC_JUMP_LABEL_H
#define _ASM_PARISC_JUMP_LABEL_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/stringify.h>
@@ -44,5 +44,5 @@ l_yes:
return true;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/parisc/include/asm/kexec.h b/arch/parisc/include/asm/kexec.h
index 87e174006995..bf31e2d50df9 100644
--- a/arch/parisc/include/asm/kexec.h
+++ b/arch/parisc/include/asm/kexec.h
@@ -14,7 +14,7 @@
#define KEXEC_ARCH KEXEC_ARCH_PARISC
#define ARCH_HAS_KIMAGE_ARCH
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct kimage_arch {
unsigned long initrd_start;
@@ -28,6 +28,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
/* Dummy implementation for now */
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_PARISC_KEXEC_H */
diff --git a/arch/parisc/include/asm/kgdb.h b/arch/parisc/include/asm/kgdb.h
index 317cd434bee3..9ece98bc6d9d 100644
--- a/arch/parisc/include/asm/kgdb.h
+++ b/arch/parisc/include/asm/kgdb.h
@@ -21,7 +21,7 @@
#define CACHE_FLUSH_IS_SAFE 1
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
static inline void arch_kgdb_breakpoint(void)
{
diff --git a/arch/parisc/include/asm/linkage.h b/arch/parisc/include/asm/linkage.h
index cd6fe4febead..d4cad492b971 100644
--- a/arch/parisc/include/asm/linkage.h
+++ b/arch/parisc/include/asm/linkage.h
@@ -15,7 +15,7 @@
*/
#define ASM_NL !
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define ENTRY(name) \
ALIGN !\
@@ -35,6 +35,6 @@ name: ASM_NL\
.procend ASM_NL\
ENDPROC(name)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_PARISC_LINKAGE_H */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 7fd447092630..8f4e51071ea1 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -8,7 +8,7 @@
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/types.h>
#include <asm/cache.h>
@@ -93,7 +93,7 @@ typedef struct __physmem_range {
extern physmem_range_t pmem_ranges[];
extern int npmem_ranges;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* WARNING: The definitions below must match exactly to sizeof(pte_t)
* etc
@@ -139,7 +139,7 @@ extern int npmem_ranges;
#define KERNEL_BINARY_TEXT_START (__PAGE_OFFSET + 0x100000)
/* These macros don't work for 64-bit C code -- don't allow in C at all */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
# define PA(x) ((x)-__PAGE_OFFSET)
# define VA(x) ((x)+__PAGE_OFFSET)
#endif
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
index 5d2d9737e579..6080a1516b34 100644
--- a/arch/parisc/include/asm/pdc.h
+++ b/arch/parisc/include/asm/pdc.h
@@ -4,7 +4,7 @@
#include <uapi/asm/pdc.h>
-#if !defined(__ASSEMBLY__)
+#if !defined(__ASSEMBLER__)
extern int parisc_narrow_firmware;
@@ -109,5 +109,5 @@ static inline char * os_id_to_string(u16 os_id) {
}
}
-#endif /* !defined(__ASSEMBLY__) */
+#endif /* !defined(__ASSEMBLER__) */
#endif /* _PARISC_PDC_H */
diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h
index 8f160375b865..84ac81b1adde 100644
--- a/arch/parisc/include/asm/pdcpat.h
+++ b/arch/parisc/include/asm/pdcpat.h
@@ -210,7 +210,7 @@
#define PDC_PAT_SYSTEM_INFO 76L
/* PDC_PAT_SYSTEM_INFO uses the same options as PDC_SYSTEM_INFO function. */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#ifdef CONFIG_64BIT
@@ -389,6 +389,6 @@ extern int pdc_pat_mem_get_dimm_phys_location(
struct pdc_pat_mem_phys_mem_location *pret,
unsigned long phys_addr);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* ! __PARISC_PATPDC_H */
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index 2ca74a56415c..3b84ee93edaa 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -39,7 +39,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
ptdesc = pagetable_alloc(gfp, PMD_TABLE_ORDER);
if (!ptdesc)
return NULL;
- if (!pagetable_pmd_ctor(ptdesc)) {
+ if (!pagetable_pmd_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index babf65751e81..80f5e2a28413 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -12,7 +12,7 @@
#include <asm/fixmap.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* we simulate an x86-style page table for the linux mm code
*/
@@ -73,7 +73,7 @@ extern void __update_cache(pte_t pte);
mb(); \
} while(0)
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
@@ -226,7 +226,7 @@ extern void __update_cache(pte_t pte);
#define PxD_FLAG_SHIFT (4)
#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
@@ -338,10 +338,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; re
#endif
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
#define __mk_pte(addr,pgprot) \
({ \
pte_t __pte; \
@@ -351,8 +347,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; re
__pte; \
})
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
{
pte_t pte;
@@ -477,7 +471,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#define pte_same(A,B) (pte_val(A) == pte_val(B))
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* TLB page size encoding - see table 3-1 in parisc20.pdf */
diff --git a/arch/parisc/include/asm/prefetch.h b/arch/parisc/include/asm/prefetch.h
index 6e63f720024d..748eefb27c68 100644
--- a/arch/parisc/include/asm/prefetch.h
+++ b/arch/parisc/include/asm/prefetch.h
@@ -16,7 +16,7 @@
#ifndef __ASM_PARISC_PREFETCH_H
#define __ASM_PARISC_PREFETCH_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_PREFETCH
#define ARCH_HAS_PREFETCH
@@ -40,6 +40,6 @@ static inline void prefetchw(const void *addr)
#endif /* CONFIG_PA20 */
#endif /* CONFIG_PREFETCH */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_PARISC_PROCESSOR_H */
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 77fac02188e1..4c14bde39aac 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -9,7 +9,7 @@
#ifndef __ASM_PARISC_PROCESSOR_H
#define __ASM_PARISC_PROCESSOR_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/threads.h>
#include <linux/irqreturn.h>
@@ -20,7 +20,7 @@
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/percpu.h>
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#define HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -45,7 +45,7 @@
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX DEFAULT_TASK_SIZE
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct rlimit;
unsigned long mmap_upper_limit(struct rlimit *rlim_stack);
@@ -325,6 +325,6 @@ extern void sba_directed_lmmio(struct parisc_device *, struct resource *);
extern void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask);
extern void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_PARISC_PROCESSOR_H */
diff --git a/arch/parisc/include/asm/psw.h b/arch/parisc/include/asm/psw.h
index 46921ffcc407..9140e1ab7e63 100644
--- a/arch/parisc/include/asm/psw.h
+++ b/arch/parisc/include/asm/psw.h
@@ -60,7 +60,7 @@
#define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)
#define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* The program status word as bitfields. */
struct pa_psw {
@@ -99,6 +99,6 @@ struct pa_psw {
#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW))
#endif
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h
index e84883c6b4c7..85c3d7409bbc 100644
--- a/arch/parisc/include/asm/signal.h
+++ b/arch/parisc/include/asm/signal.h
@@ -4,12 +4,12 @@
#include <uapi/asm/signal.h>
-# ifndef __ASSEMBLY__
+# ifndef __ASSEMBLER__
/* Most things should be clean enough to redefine this at will, if care
is taken to make libc match. */
#include <asm/sigcontext.h>
-#endif /* !__ASSEMBLY */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_PARISC_SIGNAL_H */
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index 94d1f21ce99a..0cf1c3a2696a 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -12,7 +12,7 @@ extern int init_per_cpu(int cpuid);
#define PDC_OS_BOOT_RENDEZVOUS 0x10
#define PDC_OS_BOOT_RENDEZVOUS_HI 0x28
-#ifndef ASSEMBLY
+#ifndef __ASSEMBLER__
#include <linux/bitops.h>
#include <linux/threads.h> /* for NR_CPUS */
#include <linux/cpumask.h>
@@ -34,7 +34,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#define raw_smp_processor_id() (current_thread_info()->cpu)
-#endif /* !ASSEMBLY */
+#endif /* !__ASSEMBLER__ */
#else /* CONFIG_SMP */
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index 7b986b09dba8..8e6889bc23cc 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -6,7 +6,7 @@
#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
typedef struct {
volatile unsigned int lock[4];
@@ -26,7 +26,7 @@ typedef struct {
volatile unsigned int counter;
} arch_rwlock_t;
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
index 00b127a5e09b..c11222798ab2 100644
--- a/arch/parisc/include/asm/syscall.h
+++ b/arch/parisc/include/asm/syscall.h
@@ -17,6 +17,13 @@ static inline long syscall_get_nr(struct task_struct *tsk,
return regs->gr[20];
}
+static inline void syscall_set_nr(struct task_struct *tsk,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->gr[20] = nr;
+}
+
static inline void syscall_get_arguments(struct task_struct *tsk,
struct pt_regs *regs,
unsigned long *args)
@@ -29,6 +36,18 @@ static inline void syscall_get_arguments(struct task_struct *tsk,
args[0] = regs->gr[26];
}
+static inline void syscall_set_arguments(struct task_struct *tsk,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ regs->gr[21] = args[5];
+ regs->gr[22] = args[4];
+ regs->gr[23] = args[3];
+ regs->gr[24] = args[2];
+ regs->gr[25] = args[1];
+ regs->gr[26] = args[0];
+}
+
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 1a58795f785c..b283738bb6da 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -2,7 +2,7 @@
#ifndef _ASM_PARISC_THREAD_INFO_H
#define _ASM_PARISC_THREAD_INFO_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/processor.h>
#include <asm/special_insns.h>
@@ -20,7 +20,7 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#endif /* !__ASSEMBLY */
+#endif /* !__ASSEMBLER__ */
/* thread information allocation */
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
index 0ccdb738a9a3..10c8fb68e404 100644
--- a/arch/parisc/include/asm/traps.h
+++ b/arch/parisc/include/asm/traps.h
@@ -4,7 +4,7 @@
#define PARISC_ITLB_TRAP 6 /* defined by architecture. Do not change. */
-#if !defined(__ASSEMBLY__)
+#if !defined(__ASSEMBLER__)
struct pt_regs;
/* traps.c */
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index a97c0fd55f91..3e46c6ea9df6 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -6,7 +6,7 @@
#define __NR_Linux_syscalls __NR_syscalls
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define SYS_ify(syscall_name) __NR_##syscall_name
@@ -144,7 +144,7 @@
#define __ARCH_WANT_SYS_UTIME
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#undef STR
diff --git a/arch/parisc/include/asm/vdso.h b/arch/parisc/include/asm/vdso.h
index 5f581c1d6460..81bc1d42802a 100644
--- a/arch/parisc/include/asm/vdso.h
+++ b/arch/parisc/include/asm/vdso.h
@@ -2,7 +2,7 @@
#ifndef __PARISC_VDSO_H__
#define __PARISC_VDSO_H__
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_64BIT
#include <generated/vdso64-offsets.h>
@@ -12,7 +12,7 @@
#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name))
#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name))
-#endif /* __ASSEMBLY __ */
+#endif /* __ASSEMBLER__ */
/* Default link addresses for the vDSOs */
#define VDSO_LBASE 0
diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
index fef4f2e96160..65031ddf8372 100644
--- a/arch/parisc/include/uapi/asm/pdc.h
+++ b/arch/parisc/include/uapi/asm/pdc.h
@@ -361,7 +361,7 @@
/* size of the pdc_result buffer for firmware.c */
#define NUM_PDC_RESULT 32
-#if !defined(__ASSEMBLY__)
+#if !defined(__ASSEMBLER__)
/* flags for hardware_path */
#define PF_AUTOBOOT 0x80
@@ -741,6 +741,6 @@ struct pdc_firm_test_get_rtn_block { /* PDC_MODEL/PDC_FIRM_TEST_GET */
#define PIRANHA_CPU_ID 0x13
#define MAKO_CPU_ID 0x14
-#endif /* !defined(__ASSEMBLY__) */
+#endif /* !defined(__ASSEMBLER__) */
#endif /* _UAPI_PARISC_PDC_H */
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index 40d7a574c5dd..d99accf37341 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -61,7 +61,7 @@
#define _NSIG_BPW (sizeof(unsigned long) * 8)
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
-# ifndef __ASSEMBLY__
+# ifndef __ASSEMBLER__
# include <linux/types.h>
@@ -80,5 +80,5 @@ typedef struct sigaltstack {
__kernel_size_t ss_size;
} stack_t;
-#endif /* !__ASSEMBLY */
+#endif /* !__ASSEMBLER__ */
#endif /* _UAPI_ASM_PARISC_SIGNAL_H */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 96831c988606..1f2d5b7a7f5d 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -142,6 +142,8 @@
#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
#define SO_DEVMEM_DONTNEED 0x4050
+#define SO_PASSRIGHTS 0x4051
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index f4626943633a..00e97204783e 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -25,7 +25,7 @@
#define DPRINTF(fmt, args...)
#endif
-#define RFMT "%#08lx"
+#define RFMT "0x%08lx"
/* 1111 1100 0000 0000 0001 0011 1100 0000 */
#define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6)
diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c
index 34495446e051..71829cb7bc81 100644
--- a/arch/parisc/math-emu/driver.c
+++ b/arch/parisc/math-emu/driver.c
@@ -97,9 +97,19 @@ handle_fpe(struct pt_regs *regs)
memcpy(regs->fr, frcopy, sizeof regs->fr);
if (signalcode != 0) {
- force_sig_fault(signalcode >> 24, signalcode & 0xffffff,
- (void __user *) regs->iaoq[0]);
- return -1;
+ int sig = signalcode >> 24;
+
+ if (sig == SIGFPE) {
+ /*
+ * Clear floating point trap bit to avoid trapping
+ * again on the first floating-point instruction in
+ * the userspace signal handler.
+ */
+ regs->fr[0] &= ~(1ULL << 38);
+ }
+ force_sig_fault(sig, signalcode & 0xffffff,
+ (void __user *) regs->iaoq[0]);
+ return -1;
}
return signalcode ? -1 : 0;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6722625a406a..c3e0cc83f120 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -277,6 +277,7 @@ config PPC
select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_PREEMPT_DYNAMIC_KEY
select HAVE_RETHOOK if KPROBES
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
@@ -894,7 +895,7 @@ config DATA_SHIFT
int "Data shift" if DATA_SHIFT_BOOL
default 24 if STRICT_KERNEL_RWX && PPC64
range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
- range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
+ range 14 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_85xx
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
@@ -907,10 +908,10 @@ config DATA_SHIFT
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.
Smaller is the alignment, greater is the number of necessary DBATs.
- On 8xx, large pages (512kb or 8M) are used to map kernel linear
- memory. Aligning to 8M reduces TLB misses as only 8M pages are used
- in that case. If PIN_TLB is selected, it must be aligned to 8M as
- 8M pages will be pinned.
+ On 8xx, large pages (16kb or 512kb or 8M) are used to map kernel
+ linear memory. Aligning to 8M reduces TLB misses as only 8M pages
+ are used in that case. If PIN_TLB is selected, it must be aligned
+ to 8M as 8M pages will be pinned.
config ARCH_FORCE_MAX_ORDER
int "Order of maximal physically contiguous allocations"
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 184d0680e661..a7ab087d412c 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -70,6 +70,7 @@ BOOTCPPFLAGS := -nostdinc $(LINUXINCLUDE)
BOOTCPPFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include)
BOOTCFLAGS := $(BOOTTARGETFLAGS) \
+ -std=gnu11 \
-Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -O2 \
-msoft-float -mno-altivec -mno-vsx \
diff --git a/arch/powerpc/boot/rs6000.h b/arch/powerpc/boot/rs6000.h
index a9d879155ef9..16df8f3c43f1 100644
--- a/arch/powerpc/boot/rs6000.h
+++ b/arch/powerpc/boot/rs6000.h
@@ -1,11 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* IBM RS/6000 "XCOFF" file definitions for BFD.
Copyright (C) 1990, 1991 Free Software Foundation, Inc.
- FIXME: Can someone provide a transliteration of this name into ASCII?
- Using the following chars caused a compiler warning on HIUX (so I replaced
- them with octal escapes), and isn't useful without an understanding of what
- character set it is.
- Written by Mimi Ph\373\364ng-Th\345o V\365 of IBM
+ Written by Mimi Phuong-Thao Vo of IBM
and John Gilmore of Cygnus Support. */
/********************** FILE HEADER **********************/
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 1db60fe13802..3d8dc822282a 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -234,10 +234,8 @@ fi
# suppress some warnings in recent ld versions
nowarn="-z noexecstack"
-if ! ld_is_lld; then
- if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then
- nowarn="$nowarn --no-warn-rwx-segments"
- fi
+if "${CROSS}ld" -v --no-warn-rwx-segments >/dev/null 2>&1; then
+ nowarn="$nowarn --no-warn-rwx-segments"
fi
platformo=$object/"$platform".o
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index 2479ab62d12f..98221bda380d 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -91,5 +91,4 @@ CONFIG_AFFS_FS=m
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index 20891c413149..5757625469c4 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -85,8 +85,6 @@ CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_T10DIF=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
index 1715ff547442..b99caba8724a 100644
--- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
@@ -73,6 +73,5 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_NLS_ISO8859_8=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
index e65c0057147f..11163052fdba 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
@@ -80,5 +80,4 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
index 17714bf0ed40..312d39e4242c 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
@@ -72,5 +72,4 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
index 58fae5131fa7..ac27f99faab8 100644
--- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
@@ -75,6 +75,5 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index 6f58ee1edf1f..7beb36a41d45 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -221,8 +221,6 @@ CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_MD5=y
diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig
index e7080497048d..0a42072fa23c 100644
--- a/arch/powerpc/configs/85xx/stx_gp3_defconfig
+++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig
@@ -60,8 +60,6 @@ CONFIG_CRAMFS=m
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_NLS=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_T10DIF=m
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_BDI_SWITCH=y
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
index 3a6381aa9fdc..488d03ae6d6c 100644
--- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
+++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
@@ -132,7 +132,6 @@ CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_T10DIF=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_CRYPTO_HMAC=y
diff --git a/arch/powerpc/configs/86xx-hw.config b/arch/powerpc/configs/86xx-hw.config
index 0cb24b33c88e..e7bd265fae5a 100644
--- a/arch/powerpc/configs/86xx-hw.config
+++ b/arch/powerpc/configs/86xx-hw.config
@@ -5,7 +5,6 @@ CONFIG_BROADCOM_PHY=y
# CONFIG_CARDBUS is not set
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_ST=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_HMAC=y
CONFIG_DS1682=y
CONFIG_EEPROM_LEGACY=y
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index 200bb1ecb560..69ef3dc31c4b 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -106,7 +106,6 @@ CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=m
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index fb314f75ad4b..b799c95480ae 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -110,7 +110,6 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=m
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index d6d2a458847b..2f81bc2d819e 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -15,7 +15,6 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHED=y
CONFIG_CGROUPS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_CRC_T10DIF=y
CONFIG_CPUSETS=y
CONFIG_CRAMFS=y
CONFIG_CRYPTO_MD4=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 9215bed53291..428f17b45513 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -231,12 +231,11 @@ CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_BOOTX_TEXT=y
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index d77eeb525366..cdd99657b71b 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -82,7 +82,6 @@ CONFIG_ROOT_NFS=y
CONFIG_CIFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig
index fa707de761be..b564f9e33a0d 100644
--- a/arch/powerpc/configs/linkstation_defconfig
+++ b/arch/powerpc/configs/linkstation_defconfig
@@ -125,8 +125,6 @@ CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_932=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_CRC_CCITT=m
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 83c4710017e9..a815d9e5e3e8 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -97,7 +97,6 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index a0d27c59ea78..dfbdd5e8e108 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -38,4 +38,3 @@ CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_CCITT=y
diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig
index d1c7fd5bf34b..fa2b3b9c5945 100644
--- a/arch/powerpc/configs/mvme5100_defconfig
+++ b/arch/powerpc/configs/mvme5100_defconfig
@@ -107,8 +107,6 @@ CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_932=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_CRC_CCITT=m
-CONFIG_CRC_T10DIF=y
CONFIG_XZ_DEC=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 61993944db40..8bbf51b38480 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -159,7 +159,6 @@ CONFIG_NFSD=y
CONFIG_NFSD_V4=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index e8b3f67bf3f5..ae45f70b29f0 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -87,7 +87,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_IP_DCCP=m
CONFIG_BT=m
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
@@ -276,7 +275,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 6b6d7467fecf..379229c982a4 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -317,7 +317,7 @@ CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
CONFIG_MSI_BITMAP_SELFTEST=y
CONFIG_XMON=y
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5_PPC=m
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 8b595f67068c..41c930f74ed4 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -90,7 +90,6 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
-CONFIG_CRC_T10DIF=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_CRYPTO_ECB=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 5fa154185efa..3423c405cad4 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -377,7 +377,7 @@ CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
CONFIG_IMA_ARCH_POLICY=y
CONFIG_IMA_APPRAISE_MODSIG=y
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_SERPENT=m
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 4c05f4e4d505..90247b2a0ab0 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -207,7 +207,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
@@ -221,7 +220,7 @@ CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
CONFIG_MSI_BITMAP_SELFTEST=y
CONFIG_XMON=y
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index a91a766b71a4..f96f8ed9856c 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -225,7 +225,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
CONFIG_TIPC=m
CONFIG_ATM=m
CONFIG_ATM_CLIP=m
@@ -1073,7 +1072,7 @@ CONFIG_SECURITY_NETWORK_XFRM=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 2b175ddf82f0..0b48d2b776c4 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -148,8 +148,6 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_LZO=m
-CONFIG_CRC_CCITT=m
-CONFIG_CRC_T10DIF=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 3086c4a12d6d..2b71a6dc399e 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -278,8 +278,6 @@ CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY=y
# CONFIG_INTEGRITY is not set
CONFIG_LSM="yama,loadpin,safesetid,integrity"
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=y
-CONFIG_CRC_ITU_T=y
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_IA64 is not set
# CONFIG_XZ_DEC_ARM is not set
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index 7a978d396991..e415222bd839 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -75,4 +75,3 @@ CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_CRC_T10DIF=y
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index 5017a697b67b..7c714a19221e 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -114,7 +114,6 @@ CONFIG_ROOT_NFS=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SPINLOCK=y
diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
index 370db8192ce6..caaa359f4742 100644
--- a/arch/powerpc/crypto/Kconfig
+++ b/arch/powerpc/crypto/Kconfig
@@ -17,7 +17,6 @@ config CRYPTO_CURVE25519_PPC64
config CRYPTO_MD5_PPC
tristate "Digests: MD5"
- depends on PPC
select CRYPTO_HASH
help
MD5 message digest algorithm (RFC1321)
@@ -26,7 +25,6 @@ config CRYPTO_MD5_PPC
config CRYPTO_SHA1_PPC
tristate "Hash functions: SHA-1"
- depends on PPC
help
SHA-1 secure hash algorithm (FIPS 180)
@@ -34,27 +32,16 @@ config CRYPTO_SHA1_PPC
config CRYPTO_SHA1_PPC_SPE
tristate "Hash functions: SHA-1 (SPE)"
- depends on PPC && SPE
+ depends on SPE
help
SHA-1 secure hash algorithm (FIPS 180)
Architecture: powerpc using
- SPE (Signal Processing Engine) extensions
-config CRYPTO_SHA256_PPC_SPE
- tristate "Hash functions: SHA-224 and SHA-256 (SPE)"
- depends on PPC && SPE
- select CRYPTO_SHA256
- select CRYPTO_HASH
- help
- SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
-
- Architecture: powerpc using
- - SPE (Signal Processing Engine) extensions
-
config CRYPTO_AES_PPC_SPE
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (SPE)"
- depends on PPC && SPE
+ depends on SPE
select CRYPTO_SKCIPHER
help
Block ciphers: AES cipher algorithms (FIPS-197)
@@ -92,33 +79,6 @@ config CRYPTO_AES_GCM_P10
Support for cryptographic acceleration instructions on Power10 or
later CPU. This module supports stitched acceleration for AES/GCM.
-config CRYPTO_CHACHA20_P10
- tristate
- depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_CHACHA_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
- default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
- stream cipher algorithms
-
- Architecture: PowerPC64
- - Power10 or later
- - Little-endian
-
-config CRYPTO_POLY1305_P10
- tristate "Hash functions: Poly1305 (P10 or later)"
- depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
- select CRYPTO_HASH
- select CRYPTO_LIB_POLY1305_GENERIC
- help
- Poly1305 authenticator algorithm (RFC7539)
-
- Architecture: PowerPC64
- - Power10 or later
- - Little-endian
-
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
depends on PPC64 && VSX
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
index 2f00b22b0823..8c2936ae466f 100644
--- a/arch/powerpc/crypto/Makefile
+++ b/arch/powerpc/crypto/Makefile
@@ -9,10 +9,7 @@ obj-$(CONFIG_CRYPTO_AES_PPC_SPE) += aes-ppc-spe.o
obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o
obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o
obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o
-obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o
-obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o
-obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o
obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
obj-$(CONFIG_CRYPTO_CURVE25519_PPC64) += curve25519-ppc64le.o
@@ -20,10 +17,7 @@ aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-
md5-ppc-y := md5-asm.o md5-glue.o
sha1-powerpc-y := sha1-powerpc-asm.o sha1.o
sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
-sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
-chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o
-poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o
vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
curve25519-ppc64le-y := curve25519-ppc64le-core.o curve25519-ppc64le_asm.o
diff --git a/arch/powerpc/crypto/aes.c b/arch/powerpc/crypto/aes.c
index ec06189fbf99..3f1e5e894902 100644
--- a/arch/powerpc/crypto/aes.c
+++ b/arch/powerpc/crypto/aes.c
@@ -7,15 +7,15 @@
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
*/
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
#include "aesp8-ppc.h"
diff --git a/arch/powerpc/crypto/aes_cbc.c b/arch/powerpc/crypto/aes_cbc.c
index ed0debc7acb5..5f2a4f375eef 100644
--- a/arch/powerpc/crypto/aes_cbc.c
+++ b/arch/powerpc/crypto/aes_cbc.c
@@ -12,6 +12,10 @@
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
#include "aesp8-ppc.h"
diff --git a/arch/powerpc/crypto/aes_ctr.c b/arch/powerpc/crypto/aes_ctr.c
index 3da75f42529a..e27c4036e711 100644
--- a/arch/powerpc/crypto/aes_ctr.c
+++ b/arch/powerpc/crypto/aes_ctr.c
@@ -12,6 +12,10 @@
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
#include "aesp8-ppc.h"
diff --git a/arch/powerpc/crypto/aes_xts.c b/arch/powerpc/crypto/aes_xts.c
index dabbccb41550..9440e771cede 100644
--- a/arch/powerpc/crypto/aes_xts.c
+++ b/arch/powerpc/crypto/aes_xts.c
@@ -13,6 +13,10 @@
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
#include "aesp8-ppc.h"
diff --git a/arch/powerpc/crypto/chacha-p10-glue.c b/arch/powerpc/crypto/chacha-p10-glue.c
deleted file mode 100644
index d8796decc1fb..000000000000
--- a/arch/powerpc/crypto/chacha-p10-glue.c
+++ /dev/null
@@ -1,221 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * PowerPC P10 (ppc64le) accelerated ChaCha and XChaCha stream ciphers,
- * including ChaCha20 (RFC7539)
- *
- * Copyright 2023- IBM Corp. All rights reserved.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/cpufeature.h>
-#include <linux/sizes.h>
-#include <asm/simd.h>
-#include <asm/switch_to.h>
-
-asmlinkage void chacha_p10le_8x(u32 *state, u8 *dst, const u8 *src,
- unsigned int len, int nrounds);
-
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10);
-
-static void vsx_begin(void)
-{
- preempt_disable();
- enable_kernel_vsx();
-}
-
-static void vsx_end(void)
-{
- disable_kernel_vsx();
- preempt_enable();
-}
-
-static void chacha_p10_do_8x(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds)
-{
- unsigned int l = bytes & ~0x0FF;
-
- if (l > 0) {
- chacha_p10le_8x(state, dst, src, l, nrounds);
- bytes -= l;
- src += l;
- dst += l;
- state[12] += l / CHACHA_BLOCK_SIZE;
- }
-
- if (bytes > 0)
- chacha_crypt_generic(state, dst, src, bytes, nrounds);
-}
-
-void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
-{
- hchacha_block_generic(state, stream, nrounds);
-}
-EXPORT_SYMBOL(hchacha_block_arch);
-
-void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
- int nrounds)
-{
- if (!static_branch_likely(&have_p10) || bytes <= CHACHA_BLOCK_SIZE ||
- !crypto_simd_usable())
- return chacha_crypt_generic(state, dst, src, bytes, nrounds);
-
- do {
- unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
-
- vsx_begin();
- chacha_p10_do_8x(state, dst, src, todo, nrounds);
- vsx_end();
-
- bytes -= todo;
- src += todo;
- dst += todo;
- } while (bytes);
-}
-EXPORT_SYMBOL(chacha_crypt_arch);
-
-static int chacha_p10_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
-{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
- if (err)
- return err;
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = rounddown(nbytes, walk.stride);
-
- if (!crypto_simd_usable()) {
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes,
- ctx->nrounds);
- } else {
- vsx_begin();
- chacha_p10_do_8x(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes, ctx->nrounds);
- vsx_end();
- }
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- if (err)
- break;
- }
-
- return err;
-}
-
-static int chacha_p10(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_p10_stream_xor(req, ctx, req->iv);
-}
-
-static int xchacha_p10(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- chacha_init(state, ctx->key, req->iv);
- hchacha_block_arch(state, subctx.key, ctx->nrounds);
- subctx.nrounds = ctx->nrounds;
-
- memcpy(&real_iv[0], req->iv + 24, 8);
- memcpy(&real_iv[8], req->iv + 16, 8);
- return chacha_p10_stream_xor(req, &subctx, real_iv);
-}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-p10",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_p10,
- .decrypt = chacha_p10,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-p10",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_p10,
- .decrypt = xchacha_p10,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-p10",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_p10,
- .decrypt = xchacha_p10,
- }
-};
-
-static int __init chacha_p10_init(void)
-{
- if (!cpu_has_feature(CPU_FTR_ARCH_31))
- return 0;
-
- static_branch_enable(&have_p10);
-
- return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit chacha_p10_exit(void)
-{
- if (!static_branch_likely(&have_p10))
- return;
-
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-module_init(chacha_p10_init);
-module_exit(chacha_p10_exit);
-
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)");
-MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-p10");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-p10");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-p10");
diff --git a/arch/powerpc/crypto/ghash.c b/arch/powerpc/crypto/ghash.c
index 77eca20bc7ac..7308735bdb33 100644
--- a/arch/powerpc/crypto/ghash.c
+++ b/arch/powerpc/crypto/ghash.c
@@ -11,19 +11,18 @@
* Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
-#include <asm/simd.h>
+#include "aesp8-ppc.h"
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/gf128mul.h>
#include <crypto/ghash.h>
-#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <crypto/b128ops.h>
-#include "aesp8-ppc.h"
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
@@ -39,15 +38,12 @@ struct p8_ghash_ctx {
struct p8_ghash_desc_ctx {
u64 shash[2];
- u8 buffer[GHASH_DIGEST_SIZE];
- int bytes;
};
static int p8_ghash_init(struct shash_desc *desc)
{
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- dctx->bytes = 0;
memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
return 0;
}
@@ -74,27 +70,30 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
}
static inline void __ghash_block(struct p8_ghash_ctx *ctx,
- struct p8_ghash_desc_ctx *dctx)
+ struct p8_ghash_desc_ctx *dctx,
+ const u8 *src)
{
if (crypto_simd_usable()) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
- gcm_ghash_p8(dctx->shash, ctx->htable,
- dctx->buffer, GHASH_DIGEST_SIZE);
+ gcm_ghash_p8(dctx->shash, ctx->htable, src, GHASH_BLOCK_SIZE);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
} else {
- crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
+ crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
gf128mul_lle((be128 *)dctx->shash, &ctx->key);
}
}
-static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
- struct p8_ghash_desc_ctx *dctx,
- const u8 *src, unsigned int srclen)
+static inline int __ghash_blocks(struct p8_ghash_ctx *ctx,
+ struct p8_ghash_desc_ctx *dctx,
+ const u8 *src, unsigned int srclen)
{
+ int remain = srclen - round_down(srclen, GHASH_BLOCK_SIZE);
+
+ srclen -= remain;
if (crypto_simd_usable()) {
preempt_disable();
pagefault_disable();
@@ -105,62 +104,38 @@ static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
pagefault_enable();
preempt_enable();
} else {
- while (srclen >= GHASH_BLOCK_SIZE) {
+ do {
crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
gf128mul_lle((be128 *)dctx->shash, &ctx->key);
srclen -= GHASH_BLOCK_SIZE;
src += GHASH_BLOCK_SIZE;
- }
+ } while (srclen);
}
+
+ return remain;
}
static int p8_ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
- unsigned int len;
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (dctx->bytes) {
- if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
- memcpy(dctx->buffer + dctx->bytes, src,
- srclen);
- dctx->bytes += srclen;
- return 0;
- }
- memcpy(dctx->buffer + dctx->bytes, src,
- GHASH_DIGEST_SIZE - dctx->bytes);
-
- __ghash_block(ctx, dctx);
-
- src += GHASH_DIGEST_SIZE - dctx->bytes;
- srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
- dctx->bytes = 0;
- }
- len = srclen & ~(GHASH_DIGEST_SIZE - 1);
- if (len) {
- __ghash_blocks(ctx, dctx, src, len);
- src += len;
- srclen -= len;
- }
- if (srclen) {
- memcpy(dctx->buffer, src, srclen);
- dctx->bytes = srclen;
- }
- return 0;
+ return __ghash_blocks(ctx, dctx, src, srclen);
}
-static int p8_ghash_final(struct shash_desc *desc, u8 *out)
+static int p8_ghash_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
- int i;
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (dctx->bytes) {
- for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
- dctx->buffer[i] = 0;
- __ghash_block(ctx, dctx);
- dctx->bytes = 0;
+ if (len) {
+ u8 buf[GHASH_BLOCK_SIZE] = {};
+
+ memcpy(buf, src, len);
+ __ghash_block(ctx, dctx, buf);
+ memzero_explicit(buf, sizeof(buf));
}
memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
return 0;
@@ -170,14 +145,14 @@ struct shash_alg p8_ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = p8_ghash_init,
.update = p8_ghash_update,
- .final = p8_ghash_final,
+ .finup = p8_ghash_finup,
.setkey = p8_ghash_setkey,
- .descsize = sizeof(struct p8_ghash_desc_ctx)
- + sizeof(struct ghash_desc_ctx),
+ .descsize = sizeof(struct p8_ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "p8_ghash",
.cra_priority = 1000,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct p8_ghash_ctx),
.cra_module = THIS_MODULE,
diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c
index c24f605033bd..204440a90cd8 100644
--- a/arch/powerpc/crypto/md5-glue.c
+++ b/arch/powerpc/crypto/md5-glue.c
@@ -8,25 +8,13 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/md5.h>
-#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
extern void ppc_md5_transform(u32 *state, const u8 *src, u32 blocks);
-static inline void ppc_md5_clear_context(struct md5_state *sctx)
-{
- int count = sizeof(struct md5_state) >> 2;
- u32 *ptr = (u32 *)sctx;
-
- /* make sure we can clear the fast way */
- BUILD_BUG_ON(sizeof(struct md5_state) % 4);
- do { *ptr++ = 0; } while (--count);
-}
-
static int ppc_md5_init(struct shash_desc *desc)
{
struct md5_state *sctx = shash_desc_ctx(desc);
@@ -44,79 +32,34 @@ static int ppc_md5_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct md5_state *sctx = shash_desc_ctx(desc);
- const unsigned int offset = sctx->byte_count & 0x3f;
- unsigned int avail = 64 - offset;
- const u8 *src = data;
- sctx->byte_count += len;
-
- if (avail > len) {
- memcpy((char *)sctx->block + offset, src, len);
- return 0;
- }
-
- if (offset) {
- memcpy((char *)sctx->block + offset, src, avail);
- ppc_md5_transform(sctx->hash, (const u8 *)sctx->block, 1);
- len -= avail;
- src += avail;
- }
-
- if (len > 63) {
- ppc_md5_transform(sctx->hash, src, len >> 6);
- src += len & ~0x3f;
- len &= 0x3f;
- }
-
- memcpy((char *)sctx->block, src, len);
- return 0;
+ sctx->byte_count += round_down(len, MD5_HMAC_BLOCK_SIZE);
+ ppc_md5_transform(sctx->hash, data, len >> 6);
+ return len - round_down(len, MD5_HMAC_BLOCK_SIZE);
}
-static int ppc_md5_final(struct shash_desc *desc, u8 *out)
+static int ppc_md5_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int offset, u8 *out)
{
struct md5_state *sctx = shash_desc_ctx(desc);
- const unsigned int offset = sctx->byte_count & 0x3f;
- const u8 *src = (const u8 *)sctx->block;
- u8 *p = (u8 *)src + offset;
- int padlen = 55 - offset;
- __le64 *pbits = (__le64 *)((char *)sctx->block + 56);
+ __le64 block[MD5_BLOCK_WORDS] = {};
+ u8 *p = memcpy(block, src, offset);
__le32 *dst = (__le32 *)out;
+ __le64 *pbits;
+ src = p;
+ p += offset;
*p++ = 0x80;
-
- if (padlen < 0) {
- memset(p, 0x00, padlen + sizeof (u64));
- ppc_md5_transform(sctx->hash, src, 1);
- p = (char *)sctx->block;
- padlen = 56;
- }
-
- memset(p, 0, padlen);
+ sctx->byte_count += offset;
+ pbits = &block[(MD5_BLOCK_WORDS / (offset > 55 ? 1 : 2)) - 1];
*pbits = cpu_to_le64(sctx->byte_count << 3);
- ppc_md5_transform(sctx->hash, src, 1);
+ ppc_md5_transform(sctx->hash, src, (pbits - block + 1) / 8);
+ memzero_explicit(block, sizeof(block));
dst[0] = cpu_to_le32(sctx->hash[0]);
dst[1] = cpu_to_le32(sctx->hash[1]);
dst[2] = cpu_to_le32(sctx->hash[2]);
dst[3] = cpu_to_le32(sctx->hash[3]);
-
- ppc_md5_clear_context(sctx);
- return 0;
-}
-
-static int ppc_md5_export(struct shash_desc *desc, void *out)
-{
- struct md5_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
-}
-
-static int ppc_md5_import(struct shash_desc *desc, const void *in)
-{
- struct md5_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
return 0;
}
@@ -124,15 +67,13 @@ static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = ppc_md5_init,
.update = ppc_md5_update,
- .final = ppc_md5_final,
- .export = ppc_md5_export,
- .import = ppc_md5_import,
- .descsize = sizeof(struct md5_state),
- .statesize = sizeof(struct md5_state),
+ .finup = ppc_md5_finup,
+ .descsize = MD5_STATE_SIZE,
.base = {
.cra_name = "md5",
.cra_driver_name= "md5-ppc",
.cra_priority = 200,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/poly1305-p10-glue.c b/arch/powerpc/crypto/poly1305-p10-glue.c
deleted file mode 100644
index 369686e9370b..000000000000
--- a/arch/powerpc/crypto/poly1305-p10-glue.c
+++ /dev/null
@@ -1,186 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Poly1305 authenticator algorithm, RFC7539.
- *
- * Copyright 2023- IBM Corp. All rights reserved.
- */
-
-#include <crypto/algapi.h>
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/jump_label.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/poly1305.h>
-#include <crypto/internal/simd.h>
-#include <linux/cpufeature.h>
-#include <linux/unaligned.h>
-#include <asm/simd.h>
-#include <asm/switch_to.h>
-
-asmlinkage void poly1305_p10le_4blocks(void *h, const u8 *m, u32 mlen);
-asmlinkage void poly1305_64s(void *h, const u8 *m, u32 mlen, int highbit);
-asmlinkage void poly1305_emit_64(void *h, void *s, u8 *dst);
-
-static void vsx_begin(void)
-{
- preempt_disable();
- enable_kernel_vsx();
-}
-
-static void vsx_end(void)
-{
- disable_kernel_vsx();
- preempt_enable();
-}
-
-static int crypto_poly1305_p10_init(struct shash_desc *desc)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- poly1305_core_init(&dctx->h);
- dctx->buflen = 0;
- dctx->rset = 0;
- dctx->sset = false;
-
- return 0;
-}
-
-static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx,
- const u8 *inp, unsigned int len)
-{
- unsigned int acc = 0;
-
- if (unlikely(!dctx->sset)) {
- if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) {
- struct poly1305_core_key *key = &dctx->core_r;
-
- key->key.r64[0] = get_unaligned_le64(&inp[0]);
- key->key.r64[1] = get_unaligned_le64(&inp[8]);
- inp += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- acc += POLY1305_BLOCK_SIZE;
- dctx->rset = 1;
- }
- if (len >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(&inp[0]);
- dctx->s[1] = get_unaligned_le32(&inp[4]);
- dctx->s[2] = get_unaligned_le32(&inp[8]);
- dctx->s[3] = get_unaligned_le32(&inp[12]);
- acc += POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- }
- return acc;
-}
-
-static int crypto_poly1305_p10_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- unsigned int bytes, used;
-
- if (unlikely(dctx->buflen)) {
- bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- srclen -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf,
- POLY1305_BLOCK_SIZE))) {
- vsx_begin();
- poly1305_64s(&dctx->h, dctx->buf,
- POLY1305_BLOCK_SIZE, 1);
- vsx_end();
- }
- dctx->buflen = 0;
- }
- }
-
- if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
- bytes = round_down(srclen, POLY1305_BLOCK_SIZE);
- used = crypto_poly1305_setdctxkey(dctx, src, bytes);
- if (likely(used)) {
- srclen -= used;
- src += used;
- }
- if (crypto_simd_usable() && (srclen >= POLY1305_BLOCK_SIZE*4)) {
- vsx_begin();
- poly1305_p10le_4blocks(&dctx->h, src, srclen);
- vsx_end();
- src += srclen - (srclen % (POLY1305_BLOCK_SIZE * 4));
- srclen %= POLY1305_BLOCK_SIZE * 4;
- }
- while (srclen >= POLY1305_BLOCK_SIZE) {
- vsx_begin();
- poly1305_64s(&dctx->h, src, POLY1305_BLOCK_SIZE, 1);
- vsx_end();
- srclen -= POLY1305_BLOCK_SIZE;
- src += POLY1305_BLOCK_SIZE;
- }
- }
-
- if (unlikely(srclen)) {
- dctx->buflen = srclen;
- memcpy(dctx->buf, src, srclen);
- }
-
- return 0;
-}
-
-static int crypto_poly1305_p10_final(struct shash_desc *desc, u8 *dst)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (unlikely(!dctx->sset))
- return -ENOKEY;
-
- if ((dctx->buflen)) {
- dctx->buf[dctx->buflen++] = 1;
- memset(dctx->buf + dctx->buflen, 0,
- POLY1305_BLOCK_SIZE - dctx->buflen);
- vsx_begin();
- poly1305_64s(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
- vsx_end();
- dctx->buflen = 0;
- }
-
- poly1305_emit_64(&dctx->h, &dctx->s, dst);
- return 0;
-}
-
-static struct shash_alg poly1305_alg = {
- .digestsize = POLY1305_DIGEST_SIZE,
- .init = crypto_poly1305_p10_init,
- .update = crypto_poly1305_p10_update,
- .final = crypto_poly1305_p10_final,
- .descsize = sizeof(struct poly1305_desc_ctx),
- .base = {
- .cra_name = "poly1305",
- .cra_driver_name = "poly1305-p10",
- .cra_priority = 300,
- .cra_blocksize = POLY1305_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- },
-};
-
-static int __init poly1305_p10_init(void)
-{
- return crypto_register_shash(&poly1305_alg);
-}
-
-static void __exit poly1305_p10_exit(void)
-{
- crypto_unregister_shash(&poly1305_alg);
-}
-
-module_cpu_feature_match(PPC_MODULE_FEATURE_P10, poly1305_p10_init);
-module_exit(poly1305_p10_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>");
-MODULE_DESCRIPTION("Optimized Poly1305 for P10");
-MODULE_ALIAS_CRYPTO("poly1305");
-MODULE_ALIAS_CRYPTO("poly1305-p10");
diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c
index 9170892a8557..04c88e173ce1 100644
--- a/arch/powerpc/crypto/sha1-spe-glue.c
+++ b/arch/powerpc/crypto/sha1-spe-glue.c
@@ -7,16 +7,13 @@
* Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
*/
+#include <asm/switch_to.h>
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
-#include <asm/byteorder.h>
-#include <asm/switch_to.h>
-#include <linux/hardirq.h>
+#include <linux/kernel.h>
+#include <linux/preempt.h>
+#include <linux/module.h>
/*
* MAX_BYTES defines the number of bytes that are allowed to be processed
@@ -30,7 +27,7 @@
*/
#define MAX_BYTES 2048
-extern void ppc_spe_sha1_transform(u32 *state, const u8 *src, u32 blocks);
+asmlinkage void ppc_spe_sha1_transform(u32 *state, const u8 *src, u32 blocks);
static void spe_begin(void)
{
@@ -46,126 +43,45 @@ static void spe_end(void)
preempt_enable();
}
-static inline void ppc_sha1_clear_context(struct sha1_state *sctx)
+static void ppc_spe_sha1_block(struct sha1_state *sctx, const u8 *src,
+ int blocks)
{
- int count = sizeof(struct sha1_state) >> 2;
- u32 *ptr = (u32 *)sctx;
-
- /* make sure we can clear the fast way */
- BUILD_BUG_ON(sizeof(struct sha1_state) % 4);
- do { *ptr++ = 0; } while (--count);
-}
-
-static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- const unsigned int offset = sctx->count & 0x3f;
- const unsigned int avail = 64 - offset;
- unsigned int bytes;
- const u8 *src = data;
-
- if (avail > len) {
- sctx->count += len;
- memcpy((char *)sctx->buffer + offset, src, len);
- return 0;
- }
-
- sctx->count += len;
-
- if (offset) {
- memcpy((char *)sctx->buffer + offset, src, avail);
+ do {
+ int unit = min(blocks, MAX_BYTES / SHA1_BLOCK_SIZE);
spe_begin();
- ppc_spe_sha1_transform(sctx->state, (const u8 *)sctx->buffer, 1);
+ ppc_spe_sha1_transform(sctx->state, src, unit);
spe_end();
- len -= avail;
- src += avail;
- }
-
- while (len > 63) {
- bytes = (len > MAX_BYTES) ? MAX_BYTES : len;
- bytes = bytes & ~0x3f;
-
- spe_begin();
- ppc_spe_sha1_transform(sctx->state, src, bytes >> 6);
- spe_end();
-
- src += bytes;
- len -= bytes;
- }
-
- memcpy((char *)sctx->buffer, src, len);
- return 0;
-}
-
-static int ppc_spe_sha1_final(struct shash_desc *desc, u8 *out)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- const unsigned int offset = sctx->count & 0x3f;
- char *p = (char *)sctx->buffer + offset;
- int padlen;
- __be64 *pbits = (__be64 *)(((char *)&sctx->buffer) + 56);
- __be32 *dst = (__be32 *)out;
-
- padlen = 55 - offset;
- *p++ = 0x80;
-
- spe_begin();
-
- if (padlen < 0) {
- memset(p, 0x00, padlen + sizeof (u64));
- ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1);
- p = (char *)sctx->buffer;
- padlen = 56;
- }
-
- memset(p, 0, padlen);
- *pbits = cpu_to_be64(sctx->count << 3);
- ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1);
-
- spe_end();
-
- dst[0] = cpu_to_be32(sctx->state[0]);
- dst[1] = cpu_to_be32(sctx->state[1]);
- dst[2] = cpu_to_be32(sctx->state[2]);
- dst[3] = cpu_to_be32(sctx->state[3]);
- dst[4] = cpu_to_be32(sctx->state[4]);
-
- ppc_sha1_clear_context(sctx);
- return 0;
+ src += unit * SHA1_BLOCK_SIZE;
+ blocks -= unit;
+ } while (blocks);
}
-static int ppc_spe_sha1_export(struct shash_desc *desc, void *out)
+static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
+ return sha1_base_do_update_blocks(desc, data, len, ppc_spe_sha1_block);
}
-static int ppc_spe_sha1_import(struct shash_desc *desc, const void *in)
+static int ppc_spe_sha1_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
+ sha1_base_do_finup(desc, src, len, ppc_spe_sha1_block);
+ return sha1_base_finish(desc, out);
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = ppc_spe_sha1_update,
- .final = ppc_spe_sha1_final,
- .export = ppc_spe_sha1_export,
- .import = ppc_spe_sha1_import,
- .descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
+ .finup = ppc_spe_sha1_finup,
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-ppc-spe",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index f283bbd3f121..4593946aa9b3 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -13,107 +13,46 @@
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
-#include <asm/byteorder.h>
-
-void powerpc_sha_transform(u32 *state, const u8 *src);
-
-static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
-
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
-
- if ((partial + len) > 63) {
-
- if (partial) {
- done = -partial;
- memcpy(sctx->buffer + partial, data, done + 64);
- src = sctx->buffer;
- }
-
- do {
- powerpc_sha_transform(sctx->state, src);
- done += 64;
- src = data + done;
- } while (done + 63 < len);
-
- partial = 0;
- }
- memcpy(sctx->buffer + partial, src, len - done);
-
- return 0;
-}
+#include <linux/kernel.h>
+#include <linux/module.h>
+asmlinkage void powerpc_sha_transform(u32 *state, const u8 *src);
-/* Add padding and return the message digest. */
-static int powerpc_sha1_final(struct shash_desc *desc, u8 *out)
+static void powerpc_sha_block(struct sha1_state *sctx, const u8 *data,
+ int blocks)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- __be32 *dst = (__be32 *)out;
- u32 i, index, padlen;
- __be64 bits;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64 */
- index = sctx->count & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- powerpc_sha1_update(desc, padding, padlen);
-
- /* Append length */
- powerpc_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Store state in digest */
- for (i = 0; i < 5; i++)
- dst[i] = cpu_to_be32(sctx->state[i]);
-
- /* Wipe context */
- memset(sctx, 0, sizeof *sctx);
-
- return 0;
+ do {
+ powerpc_sha_transform(sctx->state, data);
+ data += 64;
+ } while (--blocks);
}
-static int powerpc_sha1_export(struct shash_desc *desc, void *out)
+static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
+ return sha1_base_do_update_blocks(desc, data, len, powerpc_sha_block);
}
-static int powerpc_sha1_import(struct shash_desc *desc, const void *in)
+/* Add padding and return the message digest. */
+static int powerpc_sha1_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
+ sha1_base_do_finup(desc, src, len, powerpc_sha_block);
+ return sha1_base_finish(desc, out);
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = powerpc_sha1_update,
- .final = powerpc_sha1_final,
- .export = powerpc_sha1_export,
- .import = powerpc_sha1_import,
- .descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
+ .finup = powerpc_sha1_finup,
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-powerpc",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c
deleted file mode 100644
index 2997d13236e0..000000000000
--- a/arch/powerpc/crypto/sha256-spe-glue.c
+++ /dev/null
@@ -1,235 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Glue code for SHA-256 implementation for SPE instructions (PPC)
- *
- * Based on generic implementation. The assembler module takes care
- * about the SPE registers so it can run from interrupt context.
- *
- * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
- */
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <asm/byteorder.h>
-#include <asm/switch_to.h>
-#include <linux/hardirq.h>
-
-/*
- * MAX_BYTES defines the number of bytes that are allowed to be processed
- * between preempt_disable() and preempt_enable(). SHA256 takes ~2,000
- * operations per 64 bytes. e500 cores can issue two arithmetic instructions
- * per clock cycle using one 32/64 bit unit (SU1) and one 32 bit unit (SU2).
- * Thus 1KB of input data will need an estimated maximum of 18,000 cycles.
- * Headroom for cache misses included. Even with the low end model clocked
- * at 667 MHz this equals to a critical time window of less than 27us.
- *
- */
-#define MAX_BYTES 1024
-
-extern void ppc_spe_sha256_transform(u32 *state, const u8 *src, u32 blocks);
-
-static void spe_begin(void)
-{
- /* We just start SPE operations and will save SPE registers later. */
- preempt_disable();
- enable_kernel_spe();
-}
-
-static void spe_end(void)
-{
- disable_kernel_spe();
- /* reenable preemption */
- preempt_enable();
-}
-
-static inline void ppc_sha256_clear_context(struct sha256_state *sctx)
-{
- int count = sizeof(struct sha256_state) >> 2;
- u32 *ptr = (u32 *)sctx;
-
- /* make sure we can clear the fast way */
- BUILD_BUG_ON(sizeof(struct sha256_state) % 4);
- do { *ptr++ = 0; } while (--count);
-}
-
-static int ppc_spe_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- const unsigned int offset = sctx->count & 0x3f;
- const unsigned int avail = 64 - offset;
- unsigned int bytes;
- const u8 *src = data;
-
- if (avail > len) {
- sctx->count += len;
- memcpy((char *)sctx->buf + offset, src, len);
- return 0;
- }
-
- sctx->count += len;
-
- if (offset) {
- memcpy((char *)sctx->buf + offset, src, avail);
-
- spe_begin();
- ppc_spe_sha256_transform(sctx->state, (const u8 *)sctx->buf, 1);
- spe_end();
-
- len -= avail;
- src += avail;
- }
-
- while (len > 63) {
- /* cut input data into smaller blocks */
- bytes = (len > MAX_BYTES) ? MAX_BYTES : len;
- bytes = bytes & ~0x3f;
-
- spe_begin();
- ppc_spe_sha256_transform(sctx->state, src, bytes >> 6);
- spe_end();
-
- src += bytes;
- len -= bytes;
- }
-
- memcpy((char *)sctx->buf, src, len);
- return 0;
-}
-
-static int ppc_spe_sha256_final(struct shash_desc *desc, u8 *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- const unsigned int offset = sctx->count & 0x3f;
- char *p = (char *)sctx->buf + offset;
- int padlen;
- __be64 *pbits = (__be64 *)(((char *)&sctx->buf) + 56);
- __be32 *dst = (__be32 *)out;
-
- padlen = 55 - offset;
- *p++ = 0x80;
-
- spe_begin();
-
- if (padlen < 0) {
- memset(p, 0x00, padlen + sizeof (u64));
- ppc_spe_sha256_transform(sctx->state, sctx->buf, 1);
- p = (char *)sctx->buf;
- padlen = 56;
- }
-
- memset(p, 0, padlen);
- *pbits = cpu_to_be64(sctx->count << 3);
- ppc_spe_sha256_transform(sctx->state, sctx->buf, 1);
-
- spe_end();
-
- dst[0] = cpu_to_be32(sctx->state[0]);
- dst[1] = cpu_to_be32(sctx->state[1]);
- dst[2] = cpu_to_be32(sctx->state[2]);
- dst[3] = cpu_to_be32(sctx->state[3]);
- dst[4] = cpu_to_be32(sctx->state[4]);
- dst[5] = cpu_to_be32(sctx->state[5]);
- dst[6] = cpu_to_be32(sctx->state[6]);
- dst[7] = cpu_to_be32(sctx->state[7]);
-
- ppc_sha256_clear_context(sctx);
- return 0;
-}
-
-static int ppc_spe_sha224_final(struct shash_desc *desc, u8 *out)
-{
- __be32 D[SHA256_DIGEST_SIZE >> 2];
- __be32 *dst = (__be32 *)out;
-
- ppc_spe_sha256_final(desc, (u8 *)D);
-
- /* avoid bytewise memcpy */
- dst[0] = D[0];
- dst[1] = D[1];
- dst[2] = D[2];
- dst[3] = D[3];
- dst[4] = D[4];
- dst[5] = D[5];
- dst[6] = D[6];
-
- /* clear sensitive data */
- memzero_explicit(D, SHA256_DIGEST_SIZE);
- return 0;
-}
-
-static int ppc_spe_sha256_export(struct shash_desc *desc, void *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
-}
-
-static int ppc_spe_sha256_import(struct shash_desc *desc, const void *in)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
-}
-
-static struct shash_alg algs[2] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = ppc_spe_sha256_update,
- .final = ppc_spe_sha256_final,
- .export = ppc_spe_sha256_export,
- .import = ppc_spe_sha256_import,
- .descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name= "sha256-ppc-spe",
- .cra_priority = 300,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = ppc_spe_sha256_update,
- .final = ppc_spe_sha224_final,
- .export = ppc_spe_sha256_export,
- .import = ppc_spe_sha256_import,
- .descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name= "sha224-ppc-spe",
- .cra_priority = 300,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init ppc_spe_sha256_mod_init(void)
-{
- return crypto_register_shashes(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit ppc_spe_sha256_mod_fini(void)
-{
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
-}
-
-module_init(ppc_spe_sha256_mod_init);
-module_exit(ppc_spe_sha256_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, SPE optimized");
-
-MODULE_ALIAS_CRYPTO("sha224");
-MODULE_ALIAS_CRYPTO("sha224-ppc-spe");
-MODULE_ALIAS_CRYPTO("sha256");
-MODULE_ALIAS_CRYPTO("sha256-ppc-spe");
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 6d98e6f08d4d..6ed93e290c2f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1096,7 +1096,6 @@ static inline bool pmd_access_permitted(pmd_t pmd, bool write)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
extern pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot);
-extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
extern pud_t pud_modify(pud_t pud, pgprot_t newprot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/powerpc/include/asm/guest-state-buffer.h b/arch/powerpc/include/asm/guest-state-buffer.h
index d107abe1468f..acd61eb36d59 100644
--- a/arch/powerpc/include/asm/guest-state-buffer.h
+++ b/arch/powerpc/include/asm/guest-state-buffer.h
@@ -28,6 +28,21 @@
/* Process Table Info */
#define KVMPPC_GSID_PROCESS_TABLE 0x0006
+/* Guest Management Heap Size */
+#define KVMPPC_GSID_L0_GUEST_HEAP 0x0800
+
+/* Guest Management Heap Max Size */
+#define KVMPPC_GSID_L0_GUEST_HEAP_MAX 0x0801
+
+/* Guest Pagetable Size */
+#define KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE 0x0802
+
+/* Guest Pagetable Max Size */
+#define KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX 0x0803
+
+/* Guest Pagetable Reclaim in bytes */
+#define KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM 0x0804
+
/* H_GUEST_RUN_VCPU input buffer Info */
#define KVMPPC_GSID_RUN_INPUT 0x0C00
/* H_GUEST_RUN_VCPU output buffer Info */
@@ -106,6 +121,11 @@
#define KVMPPC_GSE_GUESTWIDE_COUNT \
(KVMPPC_GSE_GUESTWIDE_END - KVMPPC_GSE_GUESTWIDE_START + 1)
+#define KVMPPC_GSE_HOSTWIDE_START KVMPPC_GSID_L0_GUEST_HEAP
+#define KVMPPC_GSE_HOSTWIDE_END KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM
+#define KVMPPC_GSE_HOSTWIDE_COUNT \
+ (KVMPPC_GSE_HOSTWIDE_END - KVMPPC_GSE_HOSTWIDE_START + 1)
+
#define KVMPPC_GSE_META_START KVMPPC_GSID_RUN_INPUT
#define KVMPPC_GSE_META_END KVMPPC_GSID_VPA
#define KVMPPC_GSE_META_COUNT (KVMPPC_GSE_META_END - KVMPPC_GSE_META_START + 1)
@@ -130,7 +150,8 @@
(KVMPPC_GSE_INTR_REGS_END - KVMPPC_GSE_INTR_REGS_START + 1)
#define KVMPPC_GSE_IDEN_COUNT \
- (KVMPPC_GSE_GUESTWIDE_COUNT + KVMPPC_GSE_META_COUNT + \
+ (KVMPPC_GSE_HOSTWIDE_COUNT + \
+ KVMPPC_GSE_GUESTWIDE_COUNT + KVMPPC_GSE_META_COUNT + \
KVMPPC_GSE_DW_REGS_COUNT + KVMPPC_GSE_W_REGS_COUNT + \
KVMPPC_GSE_VSRS_COUNT + KVMPPC_GSE_INTR_REGS_COUNT)
@@ -139,10 +160,11 @@
*/
enum {
KVMPPC_GS_CLASS_GUESTWIDE = 0x01,
- KVMPPC_GS_CLASS_META = 0x02,
- KVMPPC_GS_CLASS_DWORD_REG = 0x04,
- KVMPPC_GS_CLASS_WORD_REG = 0x08,
- KVMPPC_GS_CLASS_VECTOR = 0x10,
+ KVMPPC_GS_CLASS_HOSTWIDE = 0x02,
+ KVMPPC_GS_CLASS_META = 0x04,
+ KVMPPC_GS_CLASS_DWORD_REG = 0x08,
+ KVMPPC_GS_CLASS_WORD_REG = 0x10,
+ KVMPPC_GS_CLASS_VECTOR = 0x18,
KVMPPC_GS_CLASS_INTR = 0x20,
};
@@ -164,6 +186,7 @@ enum {
*/
enum {
KVMPPC_GS_FLAGS_WIDE = 0x01,
+ KVMPPC_GS_FLAGS_HOST_WIDE = 0x02,
};
/**
@@ -287,7 +310,7 @@ struct kvmppc_gs_msg_ops {
* struct kvmppc_gs_msg - a guest state message
* @bitmap: the guest state ids that should be included
* @ops: modify message behavior for reading and writing to buffers
- * @flags: guest wide or thread wide
+ * @flags: host wide, guest wide or thread wide
* @data: location where buffer data will be written to or from.
*
* A guest state message is allows flexibility in sending in receiving data
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index eeef13db2770..6df6dbbe1e7c 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -490,14 +490,15 @@
#define H_RPTI_PAGE_ALL (-1UL)
/* Flags for H_GUEST_{S,G}_STATE */
-#define H_GUEST_FLAGS_WIDE (1UL<<(63-0))
+#define H_GUEST_FLAGS_WIDE (1UL << (63 - 0))
+#define H_GUEST_FLAGS_HOST_WIDE (1UL << (63 - 1))
/* Flag values used for H_{S,G}SET_GUEST_CAPABILITIES */
-#define H_GUEST_CAP_COPY_MEM (1UL<<(63-0))
-#define H_GUEST_CAP_POWER9 (1UL<<(63-1))
-#define H_GUEST_CAP_POWER10 (1UL<<(63-2))
-#define H_GUEST_CAP_POWER11 (1UL<<(63-3))
-#define H_GUEST_CAP_BITMAP2 (1UL<<(63-63))
+#define H_GUEST_CAP_COPY_MEM (1UL << (63 - 0))
+#define H_GUEST_CAP_POWER9 (1UL << (63 - 1))
+#define H_GUEST_CAP_POWER10 (1UL << (63 - 2))
+#define H_GUEST_CAP_POWER11 (1UL << (63 - 3))
+#define H_GUEST_CAP_BITMAP2 (1UL << (63 - 63))
/*
* Defines for H_HTM - Macros for hardware trace macro (HTM) function.
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 2f72ad885332..93d77ad5a92f 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -53,9 +53,8 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
#define MAX_PTRS_PER_PGD PTRS_PER_PGD
#endif
-/* Keep these as a macros to avoid include dependency mess */
+/* Keep this as a macro to avoid include dependency mess */
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
static inline unsigned long pte_pfn(pte_t pte)
{
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 91be7b885944..f2b6cc4341bb 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -65,6 +65,14 @@ static inline long register_dtl(unsigned long cpu, unsigned long vpa)
return vpa_call(H_VPA_REG_DTL, cpu, vpa);
}
+/*
+ * Invokes H_HTM hcall with parameters passed from htm_hcall_wrapper.
+ * flags: Set to hardwareTarget.
+ * target: Specifies target using node index, nodal chip index and core index.
+ * operation : action to perform ie configure, start, stop, deconfigure, trace
+ * based on the HTM type.
+ * param1, param2, param3: parameters for each action.
+ */
static inline long htm_call(unsigned long flags, unsigned long target,
unsigned long operation, unsigned long param1,
unsigned long param2, unsigned long param3)
@@ -73,17 +81,17 @@ static inline long htm_call(unsigned long flags, unsigned long target,
param1, param2, param3);
}
-static inline long htm_get_dump_hardware(unsigned long nodeindex,
+static inline long htm_hcall_wrapper(unsigned long flags, unsigned long nodeindex,
unsigned long nodalchipindex, unsigned long coreindexonchip,
- unsigned long type, unsigned long addr, unsigned long size,
- unsigned long offset)
+ unsigned long type, unsigned long htm_op, unsigned long param1, unsigned long param2,
+ unsigned long param3)
{
- return htm_call(H_HTM_FLAGS_HARDWARE_TARGET,
+ return htm_call(H_HTM_FLAGS_HARDWARE_TARGET | flags,
H_HTM_TARGET_NODE_INDEX(nodeindex) |
H_HTM_TARGET_NODAL_CHIP_INDEX(nodalchipindex) |
H_HTM_TARGET_CORE_INDEX_ON_CHIP(coreindexonchip),
- H_HTM_OP(H_HTM_OP_DUMP_DATA) | H_HTM_TYPE(type),
- addr, size, offset);
+ H_HTM_OP(htm_op) | H_HTM_TYPE(type),
+ param1, param2, param3);
}
extern void vpa_init(int cpu);
diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h
new file mode 100644
index 000000000000..000e2b9681f3
--- /dev/null
+++ b/arch/powerpc/include/asm/preempt.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_PREEMPT_H
+#define __ASM_POWERPC_PREEMPT_H
+
+#include <asm-generic/preempt.h>
+
+#if defined(CONFIG_PREEMPT_DYNAMIC)
+#include <linux/jump_label.h>
+DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+#define need_irq_preemption() \
+ (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
+#else
+#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
+#endif
+
+#endif /* __ASM_POWERPC_PREEMPT_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 04406162fc5a..75fa0293c508 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -515,6 +515,10 @@ extern char rtas_data_buf[RTAS_DATA_BUF_SIZE];
extern unsigned long rtas_rmo_buf;
extern struct mutex rtas_ibm_get_vpd_lock;
+extern struct mutex rtas_ibm_get_indices_lock;
+extern struct mutex rtas_ibm_set_dynamic_indicator_lock;
+extern struct mutex rtas_ibm_get_dynamic_sensor_state_lock;
+extern struct mutex rtas_ibm_physical_attestation_lock;
#define GLOBAL_INTERRUPT_QUEUE 9005
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 3dd36c5e334a..4b3c52ed6e9d 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -39,6 +39,16 @@ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
return -1;
}
+static inline void syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr)
+{
+ /*
+ * Unlike syscall_get_nr(), syscall_set_nr() can be called only when
+ * the target task is stopped for tracing on entering syscall, so
+ * there is no need to have the same check syscall_get_nr() has.
+ */
+ regs->gpr[0] = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -110,6 +120,16 @@ static inline void syscall_get_arguments(struct task_struct *task,
}
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
+
+ /* Also copy the first argument into orig_gpr3 */
+ regs->orig_gpr3 = args[0];
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
if (is_tsk_32bit_task(task))
diff --git a/arch/powerpc/include/uapi/asm/papr-indices.h b/arch/powerpc/include/uapi/asm/papr-indices.h
new file mode 100644
index 000000000000..c2999d89d52a
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/papr-indices.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_PAPR_INDICES_H_
+#define _UAPI_PAPR_INDICES_H_
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+#include <asm/papr-miscdev.h>
+
+#define LOC_CODE_SIZE 80
+#define RTAS_GET_INDICES_BUF_SIZE SZ_4K
+
+struct papr_indices_io_block {
+ union {
+ struct {
+ __u8 is_sensor; /* 0 for indicator and 1 for sensor */
+ __u32 indice_type;
+ } indices;
+ struct {
+ __u32 token; /* Sensor or indicator token */
+ __u32 state; /* get / set state */
+ /*
+ * PAPR+ 12.3.2.4 Converged Location Code Rules - Length
+ * Restrictions. 79 characters plus null.
+ */
+ char location_code_str[LOC_CODE_SIZE]; /* location code */
+ } dynamic_param;
+ };
+};
+
+/*
+ * ioctls for /dev/papr-indices.
+ * PAPR_INDICES_IOC_GET: Returns a get-indices handle fd to read data
+ * PAPR_DYNAMIC_SENSOR_IOC_GET: Gets the state of the input sensor
+ * PAPR_DYNAMIC_INDICATOR_IOC_SET: Sets the new state for the input indicator
+ */
+#define PAPR_INDICES_IOC_GET _IOW(PAPR_MISCDEV_IOC_ID, 3, struct papr_indices_io_block)
+#define PAPR_DYNAMIC_SENSOR_IOC_GET _IOWR(PAPR_MISCDEV_IOC_ID, 4, struct papr_indices_io_block)
+#define PAPR_DYNAMIC_INDICATOR_IOC_SET _IOW(PAPR_MISCDEV_IOC_ID, 5, struct papr_indices_io_block)
+
+
+#endif /* _UAPI_PAPR_INDICES_H_ */
diff --git a/arch/powerpc/include/uapi/asm/papr-physical-attestation.h b/arch/powerpc/include/uapi/asm/papr-physical-attestation.h
new file mode 100644
index 000000000000..ea746837bb9a
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/papr-physical-attestation.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_PAPR_PHYSICAL_ATTESTATION_H_
+#define _UAPI_PAPR_PHYSICAL_ATTESTATION_H_
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+#include <asm/papr-miscdev.h>
+
+#define PAPR_PHYATTEST_MAX_INPUT 4084 /* Max 4K buffer: 4K-12 */
+
+/*
+ * Defined in PAPR 2.13+ 21.6 Attestation Command Structures.
+ * User space pass this struct and the max size should be 4K.
+ */
+struct papr_phy_attest_io_block {
+ __u8 version;
+ __u8 command;
+ __u8 TCG_major_ver;
+ __u8 TCG_minor_ver;
+ __be32 length;
+ __be32 correlator;
+ __u8 payload[PAPR_PHYATTEST_MAX_INPUT];
+};
+
+/*
+ * ioctl for /dev/papr-physical-attestation. Returns a attestation
+ * command fd handle
+ */
+#define PAPR_PHY_ATTEST_IOC_HANDLE _IOW(PAPR_MISCDEV_IOC_ID, 8, struct papr_phy_attest_io_block)
+
+#endif /* _UAPI_PAPR_PHYSICAL_ATTESTATION_H_ */
diff --git a/arch/powerpc/include/uapi/asm/papr-platform-dump.h b/arch/powerpc/include/uapi/asm/papr-platform-dump.h
new file mode 100644
index 000000000000..8a1c060e89a9
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/papr-platform-dump.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_PAPR_PLATFORM_DUMP_H_
+#define _UAPI_PAPR_PLATFORM_DUMP_H_
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+#include <asm/papr-miscdev.h>
+
+/*
+ * ioctl for /dev/papr-platform-dump. Returns a platform-dump handle fd
+ * corresponding to dump tag.
+ */
+#define PAPR_PLATFORM_DUMP_IOC_CREATE_HANDLE _IOW(PAPR_MISCDEV_IOC_ID, 6, __u64)
+#define PAPR_PLATFORM_DUMP_IOC_INVALIDATE _IOW(PAPR_MISCDEV_IOC_ID, 7, __u64)
+
+#endif /* _UAPI_PAPR_PLATFORM_DUMP_H_ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 6ac621155ec3..9d1ab3971694 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -160,9 +160,7 @@ endif
obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM) += tm.o
-ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE)(CONFIG_PPC_BOOK3S),)
obj-y += ppc_save_regs.o
-endif
obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o
obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index df16c7f547ab..8ca49e40c473 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -290,10 +290,8 @@ static void __init fadump_show_config(void)
if (!fw_dump.fadump_supported)
return;
- pr_debug("Fadump enabled : %s\n",
- (fw_dump.fadump_enabled ? "yes" : "no"));
- pr_debug("Dump Active : %s\n",
- (fw_dump.dump_active ? "yes" : "no"));
+ pr_debug("Fadump enabled : %s\n", str_yes_no(fw_dump.fadump_enabled));
+ pr_debug("Dump Active : %s\n", str_yes_no(fw_dump.dump_active));
pr_debug("Dump section sizes:\n");
pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size);
pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size);
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index 8f4acc55407b..e0c681d0b076 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -25,6 +25,10 @@
unsigned long global_dbcr0[NR_CPUS];
#endif
+#if defined(CONFIG_PREEMPT_DYNAMIC)
+DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
static inline bool exit_must_hard_disable(void)
@@ -396,7 +400,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
/* Returning to a kernel context with local irqs enabled. */
WARN_ON_ONCE(!(regs->msr & MSR_EE));
again:
- if (IS_ENABLED(CONFIG_PREEMPTION)) {
+ if (need_irq_preemption()) {
/* Return to preemptible kernel context */
if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
if (preempt_count() == 0)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 0ebae6e4c19d..244eb4857e7f 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -16,6 +16,7 @@
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/dma-mapping.h>
#include <linux/bitmap.h>
#include <linux/iommu-helper.h>
@@ -769,8 +770,8 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
iommu_table_clear(tbl);
if (!welcomed) {
- printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
- novmerge ? "disabled" : "enabled");
+ pr_info("IOMMU table initialized, virtual merging %s\n",
+ str_disabled_enabled(novmerge));
welcomed = 1;
}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 34a5aec4908f..126bf3b06ab7 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -258,10 +258,6 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
break;
}
}
- if (i == hdr->e_shnum) {
- pr_err("%s: doesn't contain __patchable_function_entries.\n", me->name);
- return -ENOEXEC;
- }
#endif
pr_debug("Looks like a total of %lu stubs, max\n", relocs);
diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c
index 3816a2bf2b84..d083b4517065 100644
--- a/arch/powerpc/kernel/proc_powerpc.c
+++ b/arch/powerpc/kernel/proc_powerpc.c
@@ -9,6 +9,7 @@
#include <linux/proc_fs.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/string.h>
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
@@ -56,7 +57,7 @@ static int __init proc_ppc64_init(void)
{
struct proc_dir_entry *pde;
- strcpy((char *)systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
+ strscpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
systemcfg->version.major = SYSTEMCFG_MAJOR;
systemcfg->version.minor = SYSTEMCFG_MINOR;
systemcfg->processor = mfspr(SPRN_PVR);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ef91f71e07c4..855e09886503 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1000,7 +1000,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
WARN_ON(tm_suspend_disabled);
- TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
+ TM_DEBUG("---- tm_reclaim on pid %d (NIP=%lx, "
"ccr=%lx, msr=%lx, trap=%lx)\n",
tsk->pid, thr->regs->nip,
thr->regs->ccr, thr->regs->msr,
@@ -1008,7 +1008,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
- TM_DEBUG("--- tm_reclaim on pid %d complete\n",
+ TM_DEBUG("---- tm_reclaim on pid %d complete\n",
tsk->pid);
out_and_saveregs:
@@ -2367,14 +2367,14 @@ void __no_sanitize_address show_stack(struct task_struct *tsk,
(sp + STACK_INT_FRAME_REGS);
lr = regs->link;
- printk("%s--- interrupt: %lx at %pS\n",
+ printk("%s---- interrupt: %lx at %pS\n",
loglvl, regs->trap, (void *)regs->nip);
// Detect the case of an empty pt_regs at the very base
// of the stack and suppress showing it in full.
if (!empty_user_regs(regs, tsk)) {
__show_regs(regs);
- printk("%s--- interrupt: %lx\n", loglvl, regs->trap);
+ printk("%s---- interrupt: %lx\n", loglvl, regs->trap);
}
firstframe = 1;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index d7a738f1858d..e61245c4468e 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -92,12 +92,12 @@ struct rtas_function {
* Per-function locks for sequence-based RTAS functions.
*/
static DEFINE_MUTEX(rtas_ibm_activate_firmware_lock);
-static DEFINE_MUTEX(rtas_ibm_get_dynamic_sensor_state_lock);
-static DEFINE_MUTEX(rtas_ibm_get_indices_lock);
static DEFINE_MUTEX(rtas_ibm_lpar_perftools_lock);
-static DEFINE_MUTEX(rtas_ibm_physical_attestation_lock);
-static DEFINE_MUTEX(rtas_ibm_set_dynamic_indicator_lock);
+DEFINE_MUTEX(rtas_ibm_physical_attestation_lock);
DEFINE_MUTEX(rtas_ibm_get_vpd_lock);
+DEFINE_MUTEX(rtas_ibm_get_indices_lock);
+DEFINE_MUTEX(rtas_ibm_set_dynamic_indicator_lock);
+DEFINE_MUTEX(rtas_ibm_get_dynamic_sensor_state_lock);
static struct rtas_function rtas_function_table[] __ro_after_init = {
[RTAS_FNIDX__CHECK_EXCEPTION] = {
diff --git a/arch/powerpc/kernel/trace/ftrace_entry.S b/arch/powerpc/kernel/trace/ftrace_entry.S
index 2c1b24100eca..3565c67fc638 100644
--- a/arch/powerpc/kernel/trace/ftrace_entry.S
+++ b/arch/powerpc/kernel/trace/ftrace_entry.S
@@ -212,10 +212,10 @@
bne- 1f
mr r3, r15
+1: mtlr r3
.if \allregs == 0
REST_GPR(15, r1)
.endif
-1: mtlr r3
#endif
/* Restore gprs */
diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c
index 9ac3266e4965..a325c1c02f96 100644
--- a/arch/powerpc/kexec/crash.c
+++ b/arch/powerpc/kexec/crash.c
@@ -359,7 +359,10 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
if (TRAP(regs) == INTERRUPT_SYSTEM_RESET)
is_via_system_reset = 1;
- crash_smp_send_stop();
+ if (IS_ENABLED(CONFIG_SMP))
+ crash_smp_send_stop();
+ else
+ crash_kexec_prepare();
crash_save_cpu(regs, crashing_cpu);
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index dbfdc126bf14..2f2702c867f7 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -83,6 +83,7 @@ config KVM_BOOK3S_64_HV
depends on KVM_BOOK3S_64 && PPC_POWERNV
select KVM_BOOK3S_HV_POSSIBLE
select KVM_GENERIC_MMU_NOTIFIER
+ select KVM_BOOK3S_HV_PMU
select CMA
help
Support running unmodified book3s_64 guest kernels in
@@ -171,6 +172,18 @@ config KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND
those buggy L1s which saves the L2 state, at the cost of performance
in all nested-capable guest entry/exit.
+config KVM_BOOK3S_HV_PMU
+ tristate "Hypervisor Perf events for KVM Book3s-HV"
+ depends on KVM_BOOK3S_64_HV
+ help
+ Enable Book3s-HV Hypervisor Perf events PMU named 'kvm-hv'. These
+ Perf events give an overview of hypervisor performance overall
+ instead of a specific guests. Currently the PMU reports
+ L0-Hypervisor stats on a kvm-hv enabled PSeries LPAR like:
+ * Total/Used Guest-Heap
+ * Total/Used Guest Page-table Memory
+ * Total amount of Guest Page-table Memory reclaimed
+
config KVM_BOOKE_HV
bool
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 19f4d298dd17..7667563fb9ff 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -6541,10 +6541,6 @@ static struct kvmppc_ops kvm_ops_hv = {
.fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
.arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
.hcall_implemented = kvmppc_hcall_impl_hv,
-#ifdef CONFIG_KVM_XICS
- .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv,
- .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv,
-#endif
.configure_mmu = kvmhv_configure_mmu,
.get_rmmu_info = kvmhv_get_rmmu_info,
.set_smt_mode = kvmhv_set_smt_mode,
@@ -6662,6 +6658,22 @@ static int kvmppc_book3s_init_hv(void)
return r;
}
+#if defined(CONFIG_KVM_XICS)
+ /*
+ * IRQ bypass is supported only for interrupts whose EOI operations are
+ * handled via OPAL calls. Therefore, register IRQ bypass handlers
+ * exclusively for PowerNV KVM when booted with 'xive=off', indicating
+ * the use of the emulated XICS interrupt controller.
+ */
+ if (!kvmhv_on_pseries()) {
+ pr_info("KVM-HV: Enabling IRQ bypass\n");
+ kvm_ops_hv.irq_bypass_add_producer =
+ kvmppc_irq_bypass_add_producer_hv;
+ kvm_ops_hv.irq_bypass_del_producer =
+ kvmppc_irq_bypass_del_producer_hv;
+ }
+#endif
+
kvm_ops_hv.owner = THIS_MODULE;
kvmppc_hv_ops = &kvm_ops_hv;
diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c
index e5c7ce1fb761..87691cf86cae 100644
--- a/arch/powerpc/kvm/book3s_hv_nestedv2.c
+++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c
@@ -123,6 +123,12 @@ static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm)
case KVMPPC_GSID_PROCESS_TABLE:
case KVMPPC_GSID_RUN_INPUT:
case KVMPPC_GSID_RUN_OUTPUT:
+ /* Host wide counters */
+ case KVMPPC_GSID_L0_GUEST_HEAP:
+ case KVMPPC_GSID_L0_GUEST_HEAP_MAX:
+ case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE:
+ case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX:
+ case KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM:
break;
default:
size += kvmppc_gse_total_size(kvmppc_gsid_size(iden));
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 6a4805968966..791d1942a058 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -572,7 +572,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
/*
* Return the number of jiffies until the next timeout. If the timeout is
- * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
+ * longer than the TIMER_NEXT_MAX_DELTA, then return TIMER_NEXT_MAX_DELTA
* because the larger value can break the timer APIs.
*/
static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
@@ -598,7 +598,7 @@ static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
if (do_div(nr_jiffies, tb_ticks_per_jiffy))
nr_jiffies++;
- return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
+ return min_t(unsigned long long, nr_jiffies, TIMER_NEXT_MAX_DELTA);
}
static void arm_next_watchdog(struct kvm_vcpu *vcpu)
@@ -616,10 +616,10 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu)
spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
nr_jiffies = watchdog_next_timeout(vcpu);
/*
- * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
+ * If the number of jiffies of watchdog timer >= TIMER_NEXT_MAX_DELTA
* then do not run the watchdog timer as this can break timer APIs.
*/
- if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
+ if (nr_jiffies < TIMER_NEXT_MAX_DELTA)
mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
else
timer_delete(&vcpu->arch.wdt_timer);
diff --git a/arch/powerpc/kvm/guest-state-buffer.c b/arch/powerpc/kvm/guest-state-buffer.c
index b80dbc58621f..871cf60ddeb6 100644
--- a/arch/powerpc/kvm/guest-state-buffer.c
+++ b/arch/powerpc/kvm/guest-state-buffer.c
@@ -92,6 +92,10 @@ static int kvmppc_gsid_class(u16 iden)
(iden <= KVMPPC_GSE_GUESTWIDE_END))
return KVMPPC_GS_CLASS_GUESTWIDE;
+ if ((iden >= KVMPPC_GSE_HOSTWIDE_START) &&
+ (iden <= KVMPPC_GSE_HOSTWIDE_END))
+ return KVMPPC_GS_CLASS_HOSTWIDE;
+
if ((iden >= KVMPPC_GSE_META_START) && (iden <= KVMPPC_GSE_META_END))
return KVMPPC_GS_CLASS_META;
@@ -118,6 +122,21 @@ static int kvmppc_gsid_type(u16 iden)
int type = -1;
switch (kvmppc_gsid_class(iden)) {
+ case KVMPPC_GS_CLASS_HOSTWIDE:
+ switch (iden) {
+ case KVMPPC_GSID_L0_GUEST_HEAP:
+ fallthrough;
+ case KVMPPC_GSID_L0_GUEST_HEAP_MAX:
+ fallthrough;
+ case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE:
+ fallthrough;
+ case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX:
+ fallthrough;
+ case KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM:
+ type = KVMPPC_GSE_BE64;
+ break;
+ }
+ break;
case KVMPPC_GS_CLASS_GUESTWIDE:
switch (iden) {
case KVMPPC_GSID_HOST_STATE_SIZE:
@@ -187,6 +206,9 @@ unsigned long kvmppc_gsid_flags(u16 iden)
case KVMPPC_GS_CLASS_GUESTWIDE:
flags = KVMPPC_GS_FLAGS_WIDE;
break;
+ case KVMPPC_GS_CLASS_HOSTWIDE:
+ flags = KVMPPC_GS_FLAGS_HOST_WIDE;
+ break;
case KVMPPC_GS_CLASS_META:
case KVMPPC_GS_CLASS_DWORD_REG:
case KVMPPC_GS_CLASS_WORD_REG:
@@ -310,6 +332,13 @@ static inline int kvmppc_gse_flatten_iden(u16 iden)
bit += KVMPPC_GSE_GUESTWIDE_COUNT;
+ if (class == KVMPPC_GS_CLASS_HOSTWIDE) {
+ bit += iden - KVMPPC_GSE_HOSTWIDE_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_HOSTWIDE_COUNT;
+
if (class == KVMPPC_GS_CLASS_META) {
bit += iden - KVMPPC_GSE_META_START;
return bit;
@@ -356,6 +385,12 @@ static inline u16 kvmppc_gse_unflatten_iden(int bit)
}
bit -= KVMPPC_GSE_GUESTWIDE_COUNT;
+ if (bit < KVMPPC_GSE_HOSTWIDE_COUNT) {
+ iden = KVMPPC_GSE_HOSTWIDE_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_HOSTWIDE_COUNT;
+
if (bit < KVMPPC_GSE_META_COUNT) {
iden = KVMPPC_GSE_META_START + bit;
return iden;
@@ -588,6 +623,8 @@ int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags)
if (flags & KVMPPC_GS_FLAGS_WIDE)
hflags |= H_GUEST_FLAGS_WIDE;
+ if (flags & KVMPPC_GS_FLAGS_HOST_WIDE)
+ hflags |= H_GUEST_FLAGS_HOST_WIDE;
rc = plpar_guest_set_state(hflags, gsb->guest_id, gsb->vcpu_id,
__pa(gsb->hdr), gsb->capacity, &i);
@@ -613,6 +650,8 @@ int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags)
if (flags & KVMPPC_GS_FLAGS_WIDE)
hflags |= H_GUEST_FLAGS_WIDE;
+ if (flags & KVMPPC_GS_FLAGS_HOST_WIDE)
+ hflags |= H_GUEST_FLAGS_HOST_WIDE;
rc = plpar_guest_get_state(hflags, gsb->guest_id, gsb->vcpu_id,
__pa(gsb->hdr), gsb->capacity, &i);
diff --git a/arch/powerpc/kvm/test-guest-state-buffer.c b/arch/powerpc/kvm/test-guest-state-buffer.c
index bfd225329a18..5ccca306997a 100644
--- a/arch/powerpc/kvm/test-guest-state-buffer.c
+++ b/arch/powerpc/kvm/test-guest-state-buffer.c
@@ -5,6 +5,7 @@
#include <kunit/test.h>
#include <asm/guest-state-buffer.h>
+#include <asm/kvm_ppc.h>
static void test_creating_buffer(struct kunit *test)
{
@@ -141,6 +142,16 @@ static void test_gs_bitmap(struct kunit *test)
i++;
}
+ for (u16 iden = KVMPPC_GSID_L0_GUEST_HEAP;
+ iden <= KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
for (u16 iden = KVMPPC_GSID_RUN_INPUT; iden <= KVMPPC_GSID_VPA;
iden++) {
kvmppc_gsbm_set(&gsbm, iden);
@@ -309,12 +320,215 @@ static void test_gs_msg(struct kunit *test)
kvmppc_gsm_free(gsm);
}
+/* Test data struct for hostwide/L0 counters */
+struct kvmppc_gs_msg_test_hostwide_data {
+ u64 guest_heap;
+ u64 guest_heap_max;
+ u64 guest_pgtable_size;
+ u64 guest_pgtable_size_max;
+ u64 guest_pgtable_reclaim;
+};
+
+static size_t test_hostwide_get_size(struct kvmppc_gs_msg *gsm)
+
+{
+ size_t size = 0;
+ u16 ids[] = {
+ KVMPPC_GSID_L0_GUEST_HEAP,
+ KVMPPC_GSID_L0_GUEST_HEAP_MAX,
+ KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE,
+ KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX,
+ KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(ids); i++)
+ size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
+ return size;
+}
+
+static int test_hostwide_fill_info(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ struct kvmppc_gs_msg_test_hostwide_data *data = gsm->data;
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP))
+ kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_HEAP,
+ data->guest_heap);
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX))
+ kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_HEAP_MAX,
+ data->guest_heap_max);
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE))
+ kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE,
+ data->guest_pgtable_size);
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX))
+ kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX,
+ data->guest_pgtable_size_max);
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM))
+ kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM,
+ data->guest_pgtable_reclaim);
+
+ return 0;
+}
+
+static int test_hostwide_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmppc_gs_msg_test_hostwide_data *data = gsm->data;
+ struct kvmppc_gs_elem *gse;
+ int rc;
+
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ if (rc < 0)
+ return rc;
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP);
+ if (gse)
+ data->guest_heap = kvmppc_gse_get_u64(gse);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
+ if (gse)
+ data->guest_heap_max = kvmppc_gse_get_u64(gse);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
+ if (gse)
+ data->guest_pgtable_size = kvmppc_gse_get_u64(gse);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
+ if (gse)
+ data->guest_pgtable_size_max = kvmppc_gse_get_u64(gse);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
+ if (gse)
+ data->guest_pgtable_reclaim = kvmppc_gse_get_u64(gse);
+
+ return 0;
+}
+
+static struct kvmppc_gs_msg_ops gs_msg_test_hostwide_ops = {
+ .get_size = test_hostwide_get_size,
+ .fill_info = test_hostwide_fill_info,
+ .refresh_info = test_hostwide_refresh_info,
+};
+
+static void test_gs_hostwide_msg(struct kunit *test)
+{
+ struct kvmppc_gs_msg_test_hostwide_data test_data = {
+ .guest_heap = 0xdeadbeef,
+ .guest_heap_max = ~0ULL,
+ .guest_pgtable_size = 0xff,
+ .guest_pgtable_size_max = 0xffffff,
+ .guest_pgtable_reclaim = 0xdeadbeef,
+ };
+ struct kvmppc_gs_msg *gsm;
+ struct kvmppc_gs_buff *gsb;
+
+ gsm = kvmppc_gsm_new(&gs_msg_test_hostwide_ops, &test_data, GSM_SEND,
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm);
+
+ gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP);
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
+
+ kvmppc_gsm_fill_info(gsm, gsb);
+
+ memset(&test_data, 0, sizeof(test_data));
+
+ kvmppc_gsm_refresh_info(gsm, gsb);
+ KUNIT_EXPECT_EQ(test, test_data.guest_heap, 0xdeadbeef);
+ KUNIT_EXPECT_EQ(test, test_data.guest_heap_max, ~0ULL);
+ KUNIT_EXPECT_EQ(test, test_data.guest_pgtable_size, 0xff);
+ KUNIT_EXPECT_EQ(test, test_data.guest_pgtable_size_max, 0xffffff);
+ KUNIT_EXPECT_EQ(test, test_data.guest_pgtable_reclaim, 0xdeadbeef);
+
+ kvmppc_gsm_free(gsm);
+}
+
+/* Test if the H_GUEST_GET_STATE for hostwide counters works */
+static void test_gs_hostwide_counters(struct kunit *test)
+{
+ struct kvmppc_gs_msg_test_hostwide_data test_data;
+ struct kvmppc_gs_parser gsp = { 0 };
+
+ struct kvmppc_gs_msg *gsm;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_elem *gse;
+ int rc;
+
+ if (!kvmhv_on_pseries())
+ kunit_skip(test, "This test need a kmv-hv guest");
+
+ gsm = kvmppc_gsm_new(&gs_msg_test_hostwide_ops, &test_data, GSM_SEND,
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm);
+
+ gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP);
+
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
+
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
+
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
+
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
+
+ kvmppc_gsm_fill_info(gsm, gsb);
+
+ /* With HOST_WIDE flags guestid and vcpuid will be ignored */
+ rc = kvmppc_gsb_recv(gsb, KVMPPC_GS_FLAGS_HOST_WIDE);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ /* Parse the guest state buffer is successful */
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ /* Parse the GSB and get the counters */
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP);
+ KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 Heap counter missing");
+ kunit_info(test, "Guest Heap Size=%llu bytes",
+ kvmppc_gse_get_u64(gse));
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
+ KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 Heap counter max missing");
+ kunit_info(test, "Guest Heap Size Max=%llu bytes",
+ kvmppc_gse_get_u64(gse));
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
+ KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 page-table size missing");
+ kunit_info(test, "Guest Page-table Size=%llu bytes",
+ kvmppc_gse_get_u64(gse));
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
+ KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 page-table size-max missing");
+ kunit_info(test, "Guest Page-table Size Max=%llu bytes",
+ kvmppc_gse_get_u64(gse));
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
+ KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 page-table reclaim size missing");
+ kunit_info(test, "Guest Page-table Reclaim Size=%llu bytes",
+ kvmppc_gse_get_u64(gse));
+
+ kvmppc_gsm_free(gsm);
+ kvmppc_gsb_free(gsb);
+}
+
static struct kunit_case guest_state_buffer_testcases[] = {
KUNIT_CASE(test_creating_buffer),
KUNIT_CASE(test_adding_element),
KUNIT_CASE(test_gs_bitmap),
KUNIT_CASE(test_gs_parsing),
KUNIT_CASE(test_gs_msg),
+ KUNIT_CASE(test_gs_hostwide_msg),
+ KUNIT_CASE(test_gs_hostwide_counters),
{}
};
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h
index 45817ab82bb4..14b0e23f601f 100644
--- a/arch/powerpc/kvm/timing.h
+++ b/arch/powerpc/kvm/timing.h
@@ -38,11 +38,7 @@ static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) {}
static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
{
/* type has to be known at build time for optimization */
-
- /* The BUILD_BUG_ON below breaks in funny ways, commented out
- * for now ... -BenH
BUILD_BUG_ON(!__builtin_constant_p(type));
- */
switch (type) {
case EXT_INTR_EXITS:
vcpu->stat.ext_intr_exits++;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index dd8a4b52a0cc..481f968e42c7 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -3,6 +3,8 @@
# Makefile for ppc-specific library files..
#
+obj-y += crypto/
+
CFLAGS_code-patching.o += -fno-stack-protector
CFLAGS_feature-fixups.o += -fno-stack-protector
@@ -79,9 +81,9 @@ CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec)
CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include)
obj-$(CONFIG_CRC32_ARCH) += crc32-powerpc.o
-crc32-powerpc-y := crc32-glue.o crc32c-vpmsum_asm.o
+crc32-powerpc-y := crc32.o crc32c-vpmsum_asm.o
obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-powerpc.o
-crc-t10dif-powerpc-y := crc-t10dif-glue.o crct10dif-vpmsum_asm.o
+crc-t10dif-powerpc-y := crc-t10dif.o crct10dif-vpmsum_asm.o
obj-$(CONFIG_PPC64) += $(obj64-y)
diff --git a/arch/powerpc/lib/crc-t10dif-glue.c b/arch/powerpc/lib/crc-t10dif.c
index f411b0120cc5..be23ded3a9df 100644
--- a/arch/powerpc/lib/crc-t10dif-glue.c
+++ b/arch/powerpc/lib/crc-t10dif.c
@@ -6,22 +6,22 @@
* [based on crc32c-vpmsum_glue.c]
*/
-#include <linux/crc-t10dif.h>
+#include <asm/switch_to.h>
#include <crypto/internal/simd.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
#include <linux/cpufeature.h>
-#include <asm/simd.h>
-#include <asm/switch_to.h>
+#include <linux/crc-t10dif.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
#define VMX_ALIGN 16
#define VMX_ALIGN_MASK (VMX_ALIGN-1)
#define VECTOR_BREAKPOINT 64
-static DEFINE_STATIC_KEY_FALSE(have_vec_crypto);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_vec_crypto);
u32 __crct10dif_vpmsum(u32 crc, unsigned char const *p, size_t len);
@@ -71,7 +71,7 @@ static int __init crc_t10dif_powerpc_init(void)
static_branch_enable(&have_vec_crypto);
return 0;
}
-arch_initcall(crc_t10dif_powerpc_init);
+subsys_initcall(crc_t10dif_powerpc_init);
static void __exit crc_t10dif_powerpc_exit(void)
{
diff --git a/arch/powerpc/lib/crc32-vpmsum_core.S b/arch/powerpc/lib/crc-vpmsum-template.S
index b0f87f595b26..b0f87f595b26 100644
--- a/arch/powerpc/lib/crc32-vpmsum_core.S
+++ b/arch/powerpc/lib/crc-vpmsum-template.S
diff --git a/arch/powerpc/lib/crc32-glue.c b/arch/powerpc/lib/crc32.c
index dbd10f339183..0d9befb6e7b8 100644
--- a/arch/powerpc/lib/crc32-glue.c
+++ b/arch/powerpc/lib/crc32.c
@@ -1,19 +1,20 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/crc32.h>
+#include <asm/switch_to.h>
#include <crypto/internal/simd.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/cpufeature.h>
-#include <asm/simd.h>
-#include <asm/switch_to.h>
+#include <linux/crc32.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
#define VMX_ALIGN 16
#define VMX_ALIGN_MASK (VMX_ALIGN-1)
#define VECTOR_BREAKPOINT 512
-static DEFINE_STATIC_KEY_FALSE(have_vec_crypto);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_vec_crypto);
u32 __crc32c_vpmsum(u32 crc, const u8 *p, size_t len);
@@ -72,7 +73,7 @@ static int __init crc32_powerpc_init(void)
static_branch_enable(&have_vec_crypto);
return 0;
}
-arch_initcall(crc32_powerpc_init);
+subsys_initcall(crc32_powerpc_init);
static void __exit crc32_powerpc_exit(void)
{
diff --git a/arch/powerpc/lib/crc32c-vpmsum_asm.S b/arch/powerpc/lib/crc32c-vpmsum_asm.S
index bf442004ea1f..1b35c55cce0a 100644
--- a/arch/powerpc/lib/crc32c-vpmsum_asm.S
+++ b/arch/powerpc/lib/crc32c-vpmsum_asm.S
@@ -839,4 +839,4 @@
#define CRC_FUNCTION_NAME __crc32c_vpmsum
#define REFLECT
-#include "crc32-vpmsum_core.S"
+#include "crc-vpmsum-template.S"
diff --git a/arch/powerpc/lib/crct10dif-vpmsum_asm.S b/arch/powerpc/lib/crct10dif-vpmsum_asm.S
index f0b93a0fe168..47a6266d89a8 100644
--- a/arch/powerpc/lib/crct10dif-vpmsum_asm.S
+++ b/arch/powerpc/lib/crct10dif-vpmsum_asm.S
@@ -842,4 +842,4 @@
.octa 0x0000000000000000000000018bb70000
#define CRC_FUNCTION_NAME __crct10dif_vpmsum
-#include "crc32-vpmsum_core.S"
+#include "crc-vpmsum-template.S"
diff --git a/arch/powerpc/lib/crypto/Kconfig b/arch/powerpc/lib/crypto/Kconfig
new file mode 100644
index 000000000000..3f9e1bbd9905
--- /dev/null
+++ b/arch/powerpc/lib/crypto/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CRYPTO_CHACHA20_P10
+ tristate
+ depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
+ default CRYPTO_LIB_CHACHA
+ select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+
+config CRYPTO_POLY1305_P10
+ tristate
+ depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
+ depends on BROKEN # Needs to be fixed to work in softirq context
+ default CRYPTO_LIB_POLY1305
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
+ select CRYPTO_LIB_POLY1305_GENERIC
+
+config CRYPTO_SHA256_PPC_SPE
+ tristate
+ depends on SPE
+ default CRYPTO_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256
diff --git a/arch/powerpc/lib/crypto/Makefile b/arch/powerpc/lib/crypto/Makefile
new file mode 100644
index 000000000000..27f231f8e334
--- /dev/null
+++ b/arch/powerpc/lib/crypto/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o
+chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o
+
+obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o
+poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o
+
+obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
+sha256-ppc-spe-y := sha256.o sha256-spe-asm.o
diff --git a/arch/powerpc/lib/crypto/chacha-p10-glue.c b/arch/powerpc/lib/crypto/chacha-p10-glue.c
new file mode 100644
index 000000000000..fcd23c6f1590
--- /dev/null
+++ b/arch/powerpc/lib/crypto/chacha-p10-glue.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ChaCha stream cipher (P10 accelerated)
+ *
+ * Copyright 2023- IBM Corp. All rights reserved.
+ */
+
+#include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+#include <linux/sizes.h>
+#include <asm/simd.h>
+#include <asm/switch_to.h>
+
+asmlinkage void chacha_p10le_8x(const struct chacha_state *state, u8 *dst,
+ const u8 *src, unsigned int len, int nrounds);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10);
+
+static void vsx_begin(void)
+{
+ preempt_disable();
+ enable_kernel_vsx();
+}
+
+static void vsx_end(void)
+{
+ disable_kernel_vsx();
+ preempt_enable();
+}
+
+static void chacha_p10_do_8x(struct chacha_state *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ unsigned int l = bytes & ~0x0FF;
+
+ if (l > 0) {
+ chacha_p10le_8x(state, dst, src, l, nrounds);
+ bytes -= l;
+ src += l;
+ dst += l;
+ state->x[12] += l / CHACHA_BLOCK_SIZE;
+ }
+
+ if (bytes > 0)
+ chacha_crypt_generic(state, dst, src, bytes, nrounds);
+}
+
+void hchacha_block_arch(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds)
+{
+ hchacha_block_generic(state, out, nrounds);
+}
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ if (!static_branch_likely(&have_p10) || bytes <= CHACHA_BLOCK_SIZE ||
+ !crypto_simd_usable())
+ return chacha_crypt_generic(state, dst, src, bytes, nrounds);
+
+ do {
+ unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+ vsx_begin();
+ chacha_p10_do_8x(state, dst, src, todo, nrounds);
+ vsx_end();
+
+ bytes -= todo;
+ src += todo;
+ dst += todo;
+ } while (bytes);
+}
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+bool chacha_is_arch_optimized(void)
+{
+ return static_key_enabled(&have_p10);
+}
+EXPORT_SYMBOL(chacha_is_arch_optimized);
+
+static int __init chacha_p10_init(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ static_branch_enable(&have_p10);
+ return 0;
+}
+subsys_initcall(chacha_p10_init);
+
+static void __exit chacha_p10_exit(void)
+{
+}
+module_exit(chacha_p10_exit);
+
+MODULE_DESCRIPTION("ChaCha stream cipher (P10 accelerated)");
+MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/powerpc/crypto/chacha-p10le-8x.S b/arch/powerpc/lib/crypto/chacha-p10le-8x.S
index 17bedb66b822..b29562bd5d40 100644
--- a/arch/powerpc/crypto/chacha-p10le-8x.S
+++ b/arch/powerpc/lib/crypto/chacha-p10le-8x.S
@@ -7,9 +7,6 @@
#===================================================================================
# Written by Danny Tsen <dtsen@us.ibm.com>
#
-# chacha_p10le_8x(u32 *state, byte *dst, const byte *src,
-# size_t len, int nrounds);
-#
# do rounds, 8 quarter rounds
# 1. a += b; d ^= a; d <<<= 16;
# 2. c += d; b ^= c; b <<<= 12;
@@ -575,7 +572,8 @@
.endm
#
-# chacha20_p10le_8x(u32 *state, byte *dst, const byte *src, size_t len, int nrounds);
+# void chacha_p10le_8x(const struct chacha_state *state, u8 *dst, const u8 *src,
+# unsigned int len, int nrounds);
#
SYM_FUNC_START(chacha_p10le_8x)
.align 5
diff --git a/arch/powerpc/lib/crypto/poly1305-p10-glue.c b/arch/powerpc/lib/crypto/poly1305-p10-glue.c
new file mode 100644
index 000000000000..3f1664a724b6
--- /dev/null
+++ b/arch/powerpc/lib/crypto/poly1305-p10-glue.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Poly1305 authenticator algorithm, RFC7539.
+ *
+ * Copyright 2023- IBM Corp. All rights reserved.
+ */
+#include <asm/switch_to.h>
+#include <crypto/internal/poly1305.h>
+#include <linux/cpufeature.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/unaligned.h>
+
+asmlinkage void poly1305_p10le_4blocks(struct poly1305_block_state *state, const u8 *m, u32 mlen);
+asmlinkage void poly1305_64s(struct poly1305_block_state *state, const u8 *m, u32 mlen, int highbit);
+asmlinkage void poly1305_emit_64(const struct poly1305_state *state, const u32 nonce[4], u8 digest[POLY1305_DIGEST_SIZE]);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10);
+
+static void vsx_begin(void)
+{
+ preempt_disable();
+ enable_kernel_vsx();
+}
+
+static void vsx_end(void)
+{
+ disable_kernel_vsx();
+ preempt_enable();
+}
+
+void poly1305_block_init_arch(struct poly1305_block_state *dctx,
+ const u8 raw_key[POLY1305_BLOCK_SIZE])
+{
+ if (!static_key_enabled(&have_p10))
+ return poly1305_block_init_generic(dctx, raw_key);
+
+ dctx->h = (struct poly1305_state){};
+ dctx->core_r.key.r64[0] = get_unaligned_le64(raw_key + 0);
+ dctx->core_r.key.r64[1] = get_unaligned_le64(raw_key + 8);
+}
+EXPORT_SYMBOL_GPL(poly1305_block_init_arch);
+
+void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
+ unsigned int len, u32 padbit)
+{
+ if (!static_key_enabled(&have_p10))
+ return poly1305_blocks_generic(state, src, len, padbit);
+ vsx_begin();
+ if (len >= POLY1305_BLOCK_SIZE * 4) {
+ poly1305_p10le_4blocks(state, src, len);
+ src += len - (len % (POLY1305_BLOCK_SIZE * 4));
+ len %= POLY1305_BLOCK_SIZE * 4;
+ }
+ while (len >= POLY1305_BLOCK_SIZE) {
+ poly1305_64s(state, src, POLY1305_BLOCK_SIZE, padbit);
+ len -= POLY1305_BLOCK_SIZE;
+ src += POLY1305_BLOCK_SIZE;
+ }
+ vsx_end();
+}
+EXPORT_SYMBOL_GPL(poly1305_blocks_arch);
+
+void poly1305_emit_arch(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4])
+{
+ if (!static_key_enabled(&have_p10))
+ return poly1305_emit_generic(state, digest, nonce);
+ poly1305_emit_64(state, nonce, digest);
+}
+EXPORT_SYMBOL_GPL(poly1305_emit_arch);
+
+bool poly1305_is_arch_optimized(void)
+{
+ return static_key_enabled(&have_p10);
+}
+EXPORT_SYMBOL(poly1305_is_arch_optimized);
+
+static int __init poly1305_p10_init(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ static_branch_enable(&have_p10);
+ return 0;
+}
+subsys_initcall(poly1305_p10_init);
+
+static void __exit poly1305_p10_exit(void)
+{
+}
+module_exit(poly1305_p10_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>");
+MODULE_DESCRIPTION("Optimized Poly1305 for P10");
diff --git a/arch/powerpc/crypto/poly1305-p10le_64.S b/arch/powerpc/lib/crypto/poly1305-p10le_64.S
index a3c1987f1ecd..a3c1987f1ecd 100644
--- a/arch/powerpc/crypto/poly1305-p10le_64.S
+++ b/arch/powerpc/lib/crypto/poly1305-p10le_64.S
diff --git a/arch/powerpc/crypto/sha256-spe-asm.S b/arch/powerpc/lib/crypto/sha256-spe-asm.S
index cd99d71dae34..cd99d71dae34 100644
--- a/arch/powerpc/crypto/sha256-spe-asm.S
+++ b/arch/powerpc/lib/crypto/sha256-spe-asm.S
diff --git a/arch/powerpc/lib/crypto/sha256.c b/arch/powerpc/lib/crypto/sha256.c
new file mode 100644
index 000000000000..6b0f079587eb
--- /dev/null
+++ b/arch/powerpc/lib/crypto/sha256.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SHA-256 Secure Hash Algorithm, SPE optimized
+ *
+ * Based on generic implementation. The assembler module takes care
+ * about the SPE registers so it can run from interrupt context.
+ *
+ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
+ */
+
+#include <asm/switch_to.h>
+#include <crypto/internal/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+
+/*
+ * MAX_BYTES defines the number of bytes that are allowed to be processed
+ * between preempt_disable() and preempt_enable(). SHA256 takes ~2,000
+ * operations per 64 bytes. e500 cores can issue two arithmetic instructions
+ * per clock cycle using one 32/64 bit unit (SU1) and one 32 bit unit (SU2).
+ * Thus 1KB of input data will need an estimated maximum of 18,000 cycles.
+ * Headroom for cache misses included. Even with the low end model clocked
+ * at 667 MHz this equals to a critical time window of less than 27us.
+ *
+ */
+#define MAX_BYTES 1024
+
+extern void ppc_spe_sha256_transform(u32 *state, const u8 *src, u32 blocks);
+
+static void spe_begin(void)
+{
+ /* We just start SPE operations and will save SPE registers later. */
+ preempt_disable();
+ enable_kernel_spe();
+}
+
+static void spe_end(void)
+{
+ disable_kernel_spe();
+ /* reenable preemption */
+ preempt_enable();
+}
+
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks)
+{
+ do {
+ /* cut input data into smaller blocks */
+ u32 unit = min_t(size_t, nblocks,
+ MAX_BYTES / SHA256_BLOCK_SIZE);
+
+ spe_begin();
+ ppc_spe_sha256_transform(state, data, unit);
+ spe_end();
+
+ data += unit * SHA256_BLOCK_SIZE;
+ nblocks -= unit;
+ } while (nblocks);
+}
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+
+bool sha256_is_arch_optimized(void)
+{
+ return true;
+}
+EXPORT_SYMBOL_GPL(sha256_is_arch_optimized);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-256 Secure Hash Algorithm, SPE optimized");
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index 58ed6bd613a6..54340912398f 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -45,7 +45,7 @@ int exit_vmx_usercopy(void)
* set and we are preemptible. The hack here is to schedule a
* decrementer to fire here and reschedule for us if necessary.
*/
- if (IS_ENABLED(CONFIG_PREEMPTION) && need_resched())
+ if (need_irq_preemption() && need_resched())
set_dec(1);
return 0;
}
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 8f7d41ce2ca1..0db01e10a3f8 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -269,11 +269,6 @@ pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot)
return __pud_mkhuge(pud_set_protbits(__pud(pudv), pgprot));
}
-pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
-{
- return pfn_pmd(page_to_pfn(page), pgprot);
-}
-
pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
unsigned long pmdv;
@@ -422,7 +417,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
ptdesc = pagetable_alloc(gfp, 0);
if (!ptdesc)
return NULL;
- if (!pagetable_pmd_ctor(ptdesc)) {
+ if (!pagetable_pmd_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 311e2112d782..9f764bc42b8c 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -976,7 +976,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
return 0;
}
-
+#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
{
if (radix_enabled())
@@ -984,6 +984,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
return false;
}
+#endif
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
unsigned long addr, unsigned long next)
@@ -1120,6 +1121,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
pmd_t *pmd;
pte_t *pte;
+ /*
+ * Make sure we align the start vmemmap addr so that we calculate
+ * the correct start_pfn in altmap boundary check to decided whether
+ * we should use altmap or RAM based backing memory allocation. Also
+ * the address need to be aligned for set_pte operation.
+
+ * If the start addr is already PMD_SIZE aligned we will try to use
+ * a pmd mapping. We don't want to be too aggressive here beacause
+ * that will cause more allocations in RAM. So only if the namespace
+ * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
+ */
+
+ start = ALIGN_DOWN(start, PAGE_SIZE);
for (addr = start; addr < end; addr = next) {
next = pmd_addr_end(addr, end);
@@ -1145,8 +1159,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
* in altmap block allocation failures, in which case
* we fallback to RAM for vmemmap allocation.
*/
- if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
- altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
+ if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap &&
+ altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
/*
* make sure we don't create altmap mappings
* covering things outside the device.
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index c156fe0d53c3..806c74e0d5ab 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/pagemap.h>
#include <linux/ptrace.h>
@@ -218,7 +219,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
// Read/write fault blocked by KUAP is bad, it can never succeed.
if (bad_kuap_fault(regs, address, is_write)) {
pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n",
- is_write ? "write" : "read", address,
+ str_write_read(is_write), address,
from_kuid(&init_user_ns, current_uid()));
// Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad
@@ -625,7 +626,7 @@ static void __bad_page_fault(struct pt_regs *regs, int sig)
case INTERRUPT_DATA_STORAGE:
case INTERRUPT_H_DATA_STORAGE:
pr_alert("BUG: %s on %s at 0x%08lx\n", msg,
- is_write ? "write" : "read", regs->dar);
+ str_write_read(is_write), regs->dar);
break;
case INTERRUPT_DATA_SEGMENT:
pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar);
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 8b54f12d1889..ab1505cf42bf 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -54,20 +54,13 @@ static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
{
pmd_t *pmdp = pmd_off_k(va);
pte_t *ptep;
-
- if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
- return -EINVAL;
+ unsigned int shift = mmu_psize_to_shift(psize);
if (new) {
if (WARN_ON(slab_is_available()))
return -EINVAL;
- if (psize == MMU_PAGE_512K) {
- ptep = early_pte_alloc_kernel(pmdp, va);
- /* The PTE should never be already present */
- if (WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
- return -EINVAL;
- } else {
+ if (psize == MMU_PAGE_8M) {
if (WARN_ON(!pmd_none(*pmdp) || !pmd_none(*(pmdp + 1))))
return -EINVAL;
@@ -78,20 +71,25 @@ static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
pmd_populate_kernel(&init_mm, pmdp + 1, ptep);
ptep = (pte_t *)pmdp;
+ } else {
+ ptep = early_pte_alloc_kernel(pmdp, va);
+ /* The PTE should never be already present */
+ if (WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
+ return -EINVAL;
}
} else {
- if (psize == MMU_PAGE_512K)
- ptep = pte_offset_kernel(pmdp, va);
- else
+ if (psize == MMU_PAGE_8M)
ptep = (pte_t *)pmdp;
+ else
+ ptep = pte_offset_kernel(pmdp, va);
}
if (WARN_ON(!ptep))
return -ENOMEM;
set_huge_pte_at(&init_mm, va, ptep,
- pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)),
- 1UL << mmu_psize_to_shift(psize));
+ arch_make_huge_pte(pfn_pte(pa >> PAGE_SHIFT, prot), shift, 0),
+ 1UL << shift);
return 0;
}
@@ -123,14 +121,18 @@ static int mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
unsigned long p = offset;
int err = 0;
- WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
+ WARN_ON(!IS_ALIGNED(offset, SZ_16K) || !IS_ALIGNED(top, SZ_16K));
+ for (; p < ALIGN(p, SZ_512K) && p < top && !err; p += SZ_16K, v += SZ_16K)
+ err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_16K, new);
for (; p < ALIGN(p, SZ_8M) && p < top && !err; p += SZ_512K, v += SZ_512K)
err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
for (; p < ALIGN_DOWN(top, SZ_8M) && p < top && !err; p += SZ_8M, v += SZ_8M)
err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
for (; p < ALIGN_DOWN(top, SZ_512K) && p < top && !err; p += SZ_512K, v += SZ_512K)
err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+ for (; p < ALIGN_DOWN(top, SZ_16K) && p < top && !err; p += SZ_16K, v += SZ_16K)
+ err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_16K, new);
if (!new)
flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
index 713268ccb1a0..77e55eac16e4 100644
--- a/arch/powerpc/mm/pgtable-frag.c
+++ b/arch/powerpc/mm/pgtable-frag.c
@@ -56,19 +56,17 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
{
void *ret = NULL;
struct ptdesc *ptdesc;
+ gfp_t gfp = PGALLOC_GFP;
- if (!kernel) {
- ptdesc = pagetable_alloc(PGALLOC_GFP | __GFP_ACCOUNT, 0);
- if (!ptdesc)
- return NULL;
- if (!pagetable_pte_ctor(ptdesc)) {
- pagetable_free(ptdesc);
- return NULL;
- }
- } else {
- ptdesc = pagetable_alloc(PGALLOC_GFP, 0);
- if (!ptdesc)
- return NULL;
+ if (!kernel)
+ gfp |= __GFP_ACCOUNT;
+
+ ptdesc = pagetable_alloc(gfp, 0);
+ if (!ptdesc)
+ return NULL;
+ if (!pagetable_pte_ctor(mm, ptdesc)) {
+ pagetable_free(ptdesc);
+ return NULL;
}
atomic_set(&ptdesc->pt_frag_refcount, 1);
@@ -124,12 +122,10 @@ void pte_fragment_free(unsigned long *table, int kernel)
BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) {
- if (kernel)
- pagetable_free(ptdesc);
- else if (folio_test_clear_active(ptdesc_folio(ptdesc)))
- call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
- else
+ if (kernel || !folio_test_clear_active(ptdesc_folio(ptdesc)))
pte_free_now(&ptdesc->pt_rcu_head);
+ else
+ call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
}
}
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index 9dc239967b77..b2358d794855 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -298,6 +298,38 @@ static void populate_markers(void)
#endif
}
+static void note_page_pte(struct ptdump_state *pt_st, unsigned long addr, pte_t pte)
+{
+ note_page(pt_st, addr, 4, pte_val(pte));
+}
+
+static void note_page_pmd(struct ptdump_state *pt_st, unsigned long addr, pmd_t pmd)
+{
+ note_page(pt_st, addr, 3, pmd_val(pmd));
+}
+
+static void note_page_pud(struct ptdump_state *pt_st, unsigned long addr, pud_t pud)
+{
+ note_page(pt_st, addr, 2, pud_val(pud));
+}
+
+static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d)
+{
+ note_page(pt_st, addr, 1, p4d_val(p4d));
+}
+
+static void note_page_pgd(struct ptdump_state *pt_st, unsigned long addr, pgd_t pgd)
+{
+ note_page(pt_st, addr, 0, pgd_val(pgd));
+}
+
+static void note_page_flush(struct ptdump_state *pt_st)
+{
+ pte_t pte_zero = {0};
+
+ note_page(pt_st, 0, -1, pte_val(pte_zero));
+}
+
static int ptdump_show(struct seq_file *m, void *v)
{
struct pg_state st = {
@@ -305,7 +337,12 @@ static int ptdump_show(struct seq_file *m, void *v)
.marker = address_markers,
.level = -1,
.ptdump = {
- .note_page = note_page,
+ .note_page_pte = note_page_pte,
+ .note_page_pmd = note_page_pmd,
+ .note_page_pud = note_page_pud,
+ .note_page_p4d = note_page_p4d,
+ .note_page_pgd = note_page_pgd,
+ .note_page_flush = note_page_flush,
.range = ptdump_range,
}
};
@@ -338,7 +375,12 @@ bool ptdump_check_wx(void)
.level = -1,
.check_wx = true,
.ptdump = {
- .note_page = note_page,
+ .note_page_pte = note_page_pte,
+ .note_page_pmd = note_page_pmd,
+ .note_page_pud = note_page_pud,
+ .note_page_p4d = note_page_p4d,
+ .note_page_pgd = note_page_pgd,
+ .note_page_flush = note_page_flush,
.range = ptdump_range,
}
};
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 6beacaec63d3..4c26912c2e3c 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -51,8 +51,16 @@
EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
} while (0)
-/* Sign-extended 32-bit immediate load */
+/*
+ * Sign-extended 32-bit immediate load
+ *
+ * If this is a dummy pass (!image), account for
+ * maximum possible instructions.
+ */
#define PPC_LI32(d, i) do { \
+ if (!image) \
+ ctx->idx += 2; \
+ else { \
if ((int)(uintptr_t)(i) >= -32768 && \
(int)(uintptr_t)(i) < 32768) \
EMIT(PPC_RAW_LI(d, i)); \
@@ -60,10 +68,15 @@
EMIT(PPC_RAW_LIS(d, IMM_H(i))); \
if (IMM_L(i)) \
EMIT(PPC_RAW_ORI(d, d, IMM_L(i))); \
- } } while(0)
+ } \
+ } } while (0)
#ifdef CONFIG_PPC64
+/* If dummy pass (!image), account for maximum possible instructions */
#define PPC_LI64(d, i) do { \
+ if (!image) \
+ ctx->idx += 5; \
+ else { \
if ((long)(i) >= -2147483648 && \
(long)(i) < 2147483648) \
PPC_LI32(d, i); \
@@ -84,7 +97,8 @@
if ((uintptr_t)(i) & 0x000000000000ffffULL) \
EMIT(PPC_RAW_ORI(d, d, (uintptr_t)(i) & \
0xffff)); \
- } } while (0)
+ } \
+ } } while (0)
#define PPC_LI_ADDR PPC_LI64
#ifndef CONFIG_PPC_KERNEL_PCREL
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 2991bb171a9b..c0684733e9d6 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -504,10 +504,11 @@ static int invoke_bpf_prog(u32 *image, u32 *ro_image, struct codegen_context *ct
EMIT(PPC_RAW_ADDI(_R3, _R1, regs_off));
if (!p->jited)
PPC_LI_ADDR(_R4, (unsigned long)p->insnsi);
- if (!create_branch(&branch_insn, (u32 *)&ro_image[ctx->idx], (unsigned long)p->bpf_func,
- BRANCH_SET_LINK)) {
- if (image)
- image[ctx->idx] = ppc_inst_val(branch_insn);
+ /* Account for max possible instructions during dummy pass for size calculation */
+ if (image && !create_branch(&branch_insn, (u32 *)&ro_image[ctx->idx],
+ (unsigned long)p->bpf_func,
+ BRANCH_SET_LINK)) {
+ image[ctx->idx] = ppc_inst_val(branch_insn);
ctx->idx++;
} else {
EMIT(PPC_RAW_LL(_R12, _R25, offsetof(struct bpf_prog, bpf_func)));
@@ -889,7 +890,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
bpf_trampoline_restore_tail_call_cnt(image, ctx, func_frame_offset, r4_off);
/* Reserve space to patch branch instruction to skip fexit progs */
- im->ip_after_call = &((u32 *)ro_image)[ctx->idx];
+ if (ro_image) /* image is NULL for dummy pass */
+ im->ip_after_call = &((u32 *)ro_image)[ctx->idx];
EMIT(PPC_RAW_NOP());
}
@@ -912,7 +914,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- im->ip_epilogue = &((u32 *)ro_image)[ctx->idx];
+ if (ro_image) /* image is NULL for dummy pass */
+ im->ip_epilogue = &((u32 *)ro_image)[ctx->idx];
PPC_LI_ADDR(_R3, im);
ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx,
(unsigned long)__bpf_tramp_exit);
@@ -973,25 +976,9 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks, void *func_addr)
{
struct bpf_tramp_image im;
- void *image;
int ret;
- /*
- * Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
- * This will NOT cause fragmentation in direct map, as we do not
- * call set_memory_*() on this buffer.
- *
- * We cannot use kvmalloc here, because we need image to be in
- * module memory range.
- */
- image = bpf_jit_alloc_exec(PAGE_SIZE);
- if (!image)
- return -ENOMEM;
-
- ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
- m, flags, tlinks, func_addr);
- bpf_jit_free_exec(image);
-
+ ret = __arch_prepare_bpf_trampoline(&im, NULL, NULL, NULL, m, flags, tlinks, func_addr);
return ret;
}
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index c4db278dae36..0aace304dfe1 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -313,7 +313,6 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
u64 func_addr;
u32 true_cond;
u32 tmp_idx;
- int j;
if (i && (BPF_CLASS(code) == BPF_ALU64 || BPF_CLASS(code) == BPF_ALU) &&
(BPF_CLASS(prevcode) == BPF_ALU64 || BPF_CLASS(prevcode) == BPF_ALU) &&
@@ -1099,13 +1098,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* 16 byte instruction that uses two 'struct bpf_insn'
*/
case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
- tmp_idx = ctx->idx;
PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
PPC_LI32(dst_reg, (u32)insn[i].imm);
- /* padding to allow full 4 instructions for later patching */
- if (!image)
- for (j = ctx->idx - tmp_idx; j < 4; j++)
- EMIT(PPC_RAW_NOP());
/* Adjust for two bpf instructions */
addrs[++i] = ctx->idx * 4;
break;
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 233703b06d7c..5daa77aee7f7 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -227,7 +227,14 @@ int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *
#ifdef CONFIG_PPC_KERNEL_PCREL
reladdr = func_addr - local_paca->kernelbase;
- if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
+ /*
+ * If fimage is NULL (the initial pass to find image size),
+ * account for the maximum no. of instructions possible.
+ */
+ if (!fimage) {
+ ctx->idx += 7;
+ return 0;
+ } else if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
/* Align for subsequent prefix instruction */
if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
@@ -412,7 +419,6 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
u64 imm64;
u32 true_cond;
u32 tmp_idx;
- int j;
/*
* addrs[] maps a BPF bytecode address into a real offset from
@@ -1046,12 +1052,7 @@ emit_clear:
case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
imm64 = ((u64)(u32) insn[i].imm) |
(((u64)(u32) insn[i+1].imm) << 32);
- tmp_idx = ctx->idx;
PPC_LI64(dst_reg, imm64);
- /* padding to allow full 5 instructions for later patching */
- if (!image)
- for (j = ctx->idx - tmp_idx; j < 5; j++)
- EMIT(PPC_RAW_NOP());
/* Adjust for two bpf instructions */
addrs[++i] = ctx->idx * 4;
break;
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index ac2cf58d62db..7f53fcb7495a 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -18,6 +18,8 @@ obj-$(CONFIG_HV_PERF_CTRS) += hv-24x7.o hv-gpci.o hv-common.o
obj-$(CONFIG_VPA_PMU) += vpa-pmu.o
+obj-$(CONFIG_KVM_BOOK3S_HV_PMU) += kvm-hv-pmu.o
+
obj-$(CONFIG_PPC_8xx) += 8xx-pmu.o
obj-$(CONFIG_PPC64) += $(obj64-y)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index b906d28f74fd..8b0081441f85 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2239,6 +2239,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs)
{
u64 period = event->hw.sample_period;
+ const u64 last_period = event->hw.last_period;
s64 prev, delta, left;
int record = 0;
@@ -2320,7 +2321,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (record) {
struct perf_sample_data data;
- perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
+ perf_sample_data_init(&data, ~0ULL, last_period);
if (event->attr.sample_type & PERF_SAMPLE_ADDR_TYPE)
perf_get_data_addr(event, regs, &data.addr);
@@ -2343,12 +2344,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
ppmu->get_mem_weight(&data.weight.full, event->attr.sample_type);
data.sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
}
- if (perf_event_overflow(event, &data, regs))
- power_pmu_stop(event, 0);
+ perf_event_overflow(event, &data, regs);
} else if (period) {
/* Account for interrupt in case of invalid SIAR */
- if (perf_event_account_interrupt(event))
- power_pmu_stop(event, 0);
+ perf_event_account_interrupt(event);
}
}
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
index 1a53ab08447c..7120ab20cbfe 100644
--- a/arch/powerpc/perf/core-fsl-emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
@@ -590,6 +590,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs)
{
u64 period = event->hw.sample_period;
+ const u64 last_period = event->hw.last_period;
s64 prev, delta, left;
int record = 0;
@@ -632,10 +633,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (record) {
struct perf_sample_data data;
- perf_sample_data_init(&data, 0, event->hw.last_period);
+ perf_sample_data_init(&data, 0, last_period);
- if (perf_event_overflow(event, &data, regs))
- fsl_emb_pmu_stop(event, 0);
+ perf_event_overflow(event, &data, regs);
}
}
diff --git a/arch/powerpc/perf/kvm-hv-pmu.c b/arch/powerpc/perf/kvm-hv-pmu.c
new file mode 100644
index 000000000000..ae264c9080ef
--- /dev/null
+++ b/arch/powerpc/perf/kvm-hv-pmu.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Description: PMUs specific to running nested KVM-HV guests
+ * on Book3S processors (specifically POWER9 and later).
+ */
+
+#define pr_fmt(fmt) "kvmppc-pmu: " fmt
+
+#include "asm-generic/local64.h"
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/ratelimit.h>
+#include <linux/kvm_host.h>
+#include <linux/gfp_types.h>
+#include <linux/pgtable.h>
+#include <linux/perf_event.h>
+#include <linux/spinlock_types.h>
+#include <linux/spinlock.h>
+
+#include <asm/types.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu.h>
+#include <asm/pgalloc.h>
+#include <asm/pte-walk.h>
+#include <asm/reg.h>
+#include <asm/plpar_wrappers.h>
+#include <asm/firmware.h>
+
+#include "asm/guest-state-buffer.h"
+
+enum kvmppc_pmu_eventid {
+ KVMPPC_EVENT_HOST_HEAP,
+ KVMPPC_EVENT_HOST_HEAP_MAX,
+ KVMPPC_EVENT_HOST_PGTABLE,
+ KVMPPC_EVENT_HOST_PGTABLE_MAX,
+ KVMPPC_EVENT_HOST_PGTABLE_RECLAIM,
+ KVMPPC_EVENT_MAX,
+};
+
+#define KVMPPC_PMU_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, kvmppc_events_sysfs_show, _id)
+
+static ssize_t kvmppc_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+/* Holds the hostwide stats */
+static struct kvmppc_hostwide_stats {
+ u64 guest_heap;
+ u64 guest_heap_max;
+ u64 guest_pgtable_size;
+ u64 guest_pgtable_size_max;
+ u64 guest_pgtable_reclaim;
+} l0_stats;
+
+/* Protect access to l0_stats */
+static DEFINE_SPINLOCK(lock_l0_stats);
+
+/* GSB related structs needed to talk to L0 */
+static struct kvmppc_gs_msg *gsm_l0_stats;
+static struct kvmppc_gs_buff *gsb_l0_stats;
+static struct kvmppc_gs_parser gsp_l0_stats;
+
+static struct attribute *kvmppc_pmu_events_attr[] = {
+ KVMPPC_PMU_EVENT_ATTR(host_heap, KVMPPC_EVENT_HOST_HEAP),
+ KVMPPC_PMU_EVENT_ATTR(host_heap_max, KVMPPC_EVENT_HOST_HEAP_MAX),
+ KVMPPC_PMU_EVENT_ATTR(host_pagetable, KVMPPC_EVENT_HOST_PGTABLE),
+ KVMPPC_PMU_EVENT_ATTR(host_pagetable_max, KVMPPC_EVENT_HOST_PGTABLE_MAX),
+ KVMPPC_PMU_EVENT_ATTR(host_pagetable_reclaim, KVMPPC_EVENT_HOST_PGTABLE_RECLAIM),
+ NULL,
+};
+
+static const struct attribute_group kvmppc_pmu_events_group = {
+ .name = "events",
+ .attrs = kvmppc_pmu_events_attr,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-5");
+static struct attribute *kvmppc_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group kvmppc_pmu_format_group = {
+ .name = "format",
+ .attrs = kvmppc_pmu_format_attr,
+};
+
+static const struct attribute_group *kvmppc_pmu_attr_groups[] = {
+ &kvmppc_pmu_events_group,
+ &kvmppc_pmu_format_group,
+ NULL,
+};
+
+/*
+ * Issue the hcall to get the L0-host stats.
+ * Should be called with l0-stat lock held
+ */
+static int kvmppc_update_l0_stats(void)
+{
+ int rc;
+
+ /* With HOST_WIDE flags guestid and vcpuid will be ignored */
+ rc = kvmppc_gsb_recv(gsb_l0_stats, KVMPPC_GS_FLAGS_HOST_WIDE);
+ if (rc)
+ goto out;
+
+ /* Parse the guest state buffer is successful */
+ rc = kvmppc_gse_parse(&gsp_l0_stats, gsb_l0_stats);
+ if (rc)
+ goto out;
+
+ /* Update the l0 returned stats*/
+ memset(&l0_stats, 0, sizeof(l0_stats));
+ rc = kvmppc_gsm_refresh_info(gsm_l0_stats, gsb_l0_stats);
+
+out:
+ return rc;
+}
+
+/* Update the value of the given perf_event */
+static int kvmppc_pmu_event_update(struct perf_event *event)
+{
+ int rc;
+ u64 curr_val, prev_val;
+ unsigned long flags;
+ unsigned int config = event->attr.config;
+
+ /* Ensure no one else is modifying the l0_stats */
+ spin_lock_irqsave(&lock_l0_stats, flags);
+
+ rc = kvmppc_update_l0_stats();
+ if (!rc) {
+ switch (config) {
+ case KVMPPC_EVENT_HOST_HEAP:
+ curr_val = l0_stats.guest_heap;
+ break;
+ case KVMPPC_EVENT_HOST_HEAP_MAX:
+ curr_val = l0_stats.guest_heap_max;
+ break;
+ case KVMPPC_EVENT_HOST_PGTABLE:
+ curr_val = l0_stats.guest_pgtable_size;
+ break;
+ case KVMPPC_EVENT_HOST_PGTABLE_MAX:
+ curr_val = l0_stats.guest_pgtable_size_max;
+ break;
+ case KVMPPC_EVENT_HOST_PGTABLE_RECLAIM:
+ curr_val = l0_stats.guest_pgtable_reclaim;
+ break;
+ default:
+ rc = -ENOENT;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&lock_l0_stats, flags);
+
+ /* If no error than update the perf event */
+ if (!rc) {
+ prev_val = local64_xchg(&event->hw.prev_count, curr_val);
+ if (curr_val > prev_val)
+ local64_add(curr_val - prev_val, &event->count);
+ }
+
+ return rc;
+}
+
+static int kvmppc_pmu_event_init(struct perf_event *event)
+{
+ unsigned int config = event->attr.config;
+
+ pr_debug("%s: Event(%p) id=%llu cpu=%x on_cpu=%x config=%u",
+ __func__, event, event->id, event->cpu,
+ event->oncpu, config);
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (config >= KVMPPC_EVENT_MAX)
+ return -EINVAL;
+
+ local64_set(&event->hw.prev_count, 0);
+ local64_set(&event->count, 0);
+
+ return 0;
+}
+
+static void kvmppc_pmu_del(struct perf_event *event, int flags)
+{
+ kvmppc_pmu_event_update(event);
+}
+
+static int kvmppc_pmu_add(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_START)
+ return kvmppc_pmu_event_update(event);
+ return 0;
+}
+
+static void kvmppc_pmu_read(struct perf_event *event)
+{
+ kvmppc_pmu_event_update(event);
+}
+
+/* Return the size of the needed guest state buffer */
+static size_t hostwide_get_size(struct kvmppc_gs_msg *gsm)
+
+{
+ size_t size = 0;
+ const u16 ids[] = {
+ KVMPPC_GSID_L0_GUEST_HEAP,
+ KVMPPC_GSID_L0_GUEST_HEAP_MAX,
+ KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE,
+ KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX,
+ KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(ids); i++)
+ size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
+ return size;
+}
+
+/* Populate the request guest state buffer */
+static int hostwide_fill_info(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ int rc = 0;
+ struct kvmppc_hostwide_stats *stats = gsm->data;
+
+ /*
+ * It doesn't matter what values are put into request buffer as
+ * they are going to be overwritten anyways. But for the sake of
+ * testcode and symmetry contents of existing stats are put
+ * populated into the request guest state buffer.
+ */
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP))
+ rc = kvmppc_gse_put_u64(gsb,
+ KVMPPC_GSID_L0_GUEST_HEAP,
+ stats->guest_heap);
+
+ if (!rc && kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX))
+ rc = kvmppc_gse_put_u64(gsb,
+ KVMPPC_GSID_L0_GUEST_HEAP_MAX,
+ stats->guest_heap_max);
+
+ if (!rc && kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE))
+ rc = kvmppc_gse_put_u64(gsb,
+ KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE,
+ stats->guest_pgtable_size);
+ if (!rc &&
+ kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX))
+ rc = kvmppc_gse_put_u64(gsb,
+ KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX,
+ stats->guest_pgtable_size_max);
+ if (!rc &&
+ kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM))
+ rc = kvmppc_gse_put_u64(gsb,
+ KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM,
+ stats->guest_pgtable_reclaim);
+
+ return rc;
+}
+
+/* Parse and update the host wide stats from returned gsb */
+static int hostwide_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmppc_hostwide_stats *stats = gsm->data;
+ struct kvmppc_gs_elem *gse;
+ int rc;
+
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ if (rc < 0)
+ return rc;
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP);
+ if (gse)
+ stats->guest_heap = kvmppc_gse_get_u64(gse);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
+ if (gse)
+ stats->guest_heap_max = kvmppc_gse_get_u64(gse);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
+ if (gse)
+ stats->guest_pgtable_size = kvmppc_gse_get_u64(gse);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
+ if (gse)
+ stats->guest_pgtable_size_max = kvmppc_gse_get_u64(gse);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
+ if (gse)
+ stats->guest_pgtable_reclaim = kvmppc_gse_get_u64(gse);
+
+ return 0;
+}
+
+/* gsb-message ops for setting up/parsing */
+static struct kvmppc_gs_msg_ops gsb_ops_l0_stats = {
+ .get_size = hostwide_get_size,
+ .fill_info = hostwide_fill_info,
+ .refresh_info = hostwide_refresh_info,
+};
+
+static int kvmppc_init_hostwide(void)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lock_l0_stats, flags);
+
+ /* already registered ? */
+ if (gsm_l0_stats) {
+ rc = 0;
+ goto out;
+ }
+
+ /* setup the Guest state message/buffer to talk to L0 */
+ gsm_l0_stats = kvmppc_gsm_new(&gsb_ops_l0_stats, &l0_stats,
+ GSM_SEND, GFP_KERNEL);
+ if (!gsm_l0_stats) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Populate the Idents */
+ kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_HEAP);
+ kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
+ kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
+ kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
+ kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
+
+ /* allocate GSB. Guest/Vcpu Id is ignored */
+ gsb_l0_stats = kvmppc_gsb_new(kvmppc_gsm_size(gsm_l0_stats), 0, 0,
+ GFP_KERNEL);
+ if (!gsb_l0_stats) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* ask the ops to fill in the info */
+ rc = kvmppc_gsm_fill_info(gsm_l0_stats, gsb_l0_stats);
+
+out:
+ if (rc) {
+ if (gsm_l0_stats)
+ kvmppc_gsm_free(gsm_l0_stats);
+ if (gsb_l0_stats)
+ kvmppc_gsb_free(gsb_l0_stats);
+ gsm_l0_stats = NULL;
+ gsb_l0_stats = NULL;
+ }
+ spin_unlock_irqrestore(&lock_l0_stats, flags);
+ return rc;
+}
+
+static void kvmppc_cleanup_hostwide(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lock_l0_stats, flags);
+
+ if (gsm_l0_stats)
+ kvmppc_gsm_free(gsm_l0_stats);
+ if (gsb_l0_stats)
+ kvmppc_gsb_free(gsb_l0_stats);
+ gsm_l0_stats = NULL;
+ gsb_l0_stats = NULL;
+
+ spin_unlock_irqrestore(&lock_l0_stats, flags);
+}
+
+/* L1 wide counters PMU */
+static struct pmu kvmppc_pmu = {
+ .module = THIS_MODULE,
+ .task_ctx_nr = perf_sw_context,
+ .name = "kvm-hv",
+ .event_init = kvmppc_pmu_event_init,
+ .add = kvmppc_pmu_add,
+ .del = kvmppc_pmu_del,
+ .read = kvmppc_pmu_read,
+ .attr_groups = kvmppc_pmu_attr_groups,
+ .type = -1,
+ .scope = PERF_PMU_SCOPE_SYS_WIDE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
+};
+
+static int __init kvmppc_register_pmu(void)
+{
+ int rc = -EOPNOTSUPP;
+
+ /* only support events for nestedv2 right now */
+ if (kvmhv_is_nestedv2()) {
+ rc = kvmppc_init_hostwide();
+ if (rc)
+ goto out;
+
+ /* Register the pmu */
+ rc = perf_pmu_register(&kvmppc_pmu, kvmppc_pmu.name, -1);
+ if (rc)
+ goto out;
+
+ pr_info("Registered kvm-hv pmu");
+ }
+
+out:
+ return rc;
+}
+
+static void __exit kvmppc_unregister_pmu(void)
+{
+ if (kvmhv_is_nestedv2()) {
+ kvmppc_cleanup_hostwide();
+
+ if (kvmppc_pmu.type != -1)
+ perf_pmu_unregister(&kvmppc_pmu);
+
+ pr_info("kvmhv_pmu unregistered.\n");
+ }
+}
+
+module_init(kvmppc_register_pmu);
+module_exit(kvmppc_unregister_pmu);
+MODULE_DESCRIPTION("KVM PPC Book3s-hv PMU");
+MODULE_AUTHOR("Vaibhav Jain <vaibhav@linux.ibm.com>");
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/44x/gpio.c b/arch/powerpc/platforms/44x/gpio.c
index e5f2319e5cbe..d540e261d85a 100644
--- a/arch/powerpc/platforms/44x/gpio.c
+++ b/arch/powerpc/platforms/44x/gpio.c
@@ -75,8 +75,7 @@ __ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
clrbits32(&regs->or, GPIO_MASK(gpio));
}
-static void
-ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc);
unsigned long flags;
@@ -88,6 +87,8 @@ ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
spin_unlock_irqrestore(&chip->lock, flags);
pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+
+ return 0;
}
static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
@@ -179,7 +180,7 @@ static int __init ppc4xx_add_gpiochips(void)
gc->direction_input = ppc4xx_gpio_dir_in;
gc->direction_output = ppc4xx_gpio_dir_out;
gc->get = ppc4xx_gpio_get;
- gc->set = ppc4xx_gpio_set;
+ gc->set_rv = ppc4xx_gpio_set;
ret = of_mm_gpiochip_add_data(np, mm_gc, ppc4xx_gc);
if (ret)
diff --git a/arch/powerpc/platforms/44x/uic.c b/arch/powerpc/platforms/44x/uic.c
index 31f760c2ec5d..85daf841fd3f 100644
--- a/arch/powerpc/platforms/44x/uic.c
+++ b/arch/powerpc/platforms/44x/uic.c
@@ -254,8 +254,9 @@ static struct uic * __init uic_init_one(struct device_node *node)
}
uic->dcrbase = *dcrreg;
- uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops,
- uic);
+ uic->irqhost = irq_domain_create_linear(of_fwnode_handle(node),
+ NR_UIC_INTS, &uic_host_ops,
+ uic);
if (! uic->irqhost)
return NULL; /* FIXME: panic? */
@@ -327,5 +328,5 @@ unsigned int uic_get_irq(void)
msr = mfdcr(primary_uic->dcrbase + UIC_MSR);
src = 32 - ffs(msr);
- return irq_linear_revmap(primary_uic->irqhost, src);
+ return irq_find_mapping(primary_uic->irqhost, src);
}
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index e995eb30bf09..2cf3c6237337 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -188,7 +188,8 @@ mpc5121_ads_cpld_pic_init(void)
cpld_pic_node = of_node_get(np);
- cpld_pic_host = irq_domain_add_linear(np, 16, &cpld_pic_host_ops, NULL);
+ cpld_pic_host = irq_domain_create_linear(of_fwnode_handle(np), 16,
+ &cpld_pic_host_ops, NULL);
if (!cpld_pic_host) {
printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n");
goto end;
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 19626cd42406..bc7f83cfec1d 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -168,7 +168,7 @@ static void __init media5200_init_irq(void)
spin_lock_init(&media5200_irq.lock);
- media5200_irq.irqhost = irq_domain_add_linear(fpga_np,
+ media5200_irq.irqhost = irq_domain_create_linear(of_fwnode_handle(fpga_np),
MEDIA5200_NUM_IRQS, &media5200_irq_ops, &media5200_irq);
if (!media5200_irq.irqhost)
goto out;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 1ea591ec6083..bda707d848a6 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -247,9 +247,9 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
if (!cascade_virq)
return;
- gpt->irqhost = irq_domain_add_linear(node, 1, &mpc52xx_gpt_irq_ops, gpt);
+ gpt->irqhost = irq_domain_create_linear(of_fwnode_handle(node), 1, &mpc52xx_gpt_irq_ops, gpt);
if (!gpt->irqhost) {
- dev_err(gpt->dev, "irq_domain_add_linear() failed\n");
+ dev_err(gpt->dev, "irq_domain_create_linear() failed\n");
return;
}
@@ -280,7 +280,7 @@ static int mpc52xx_gpt_gpio_get(struct gpio_chip *gc, unsigned int gpio)
return (in_be32(&gpt->regs->status) >> 8) & 1;
}
-static void
+static int
mpc52xx_gpt_gpio_set(struct gpio_chip *gc, unsigned int gpio, int v)
{
struct mpc52xx_gpt_priv *gpt = gpiochip_get_data(gc);
@@ -293,6 +293,8 @@ mpc52xx_gpt_gpio_set(struct gpio_chip *gc, unsigned int gpio, int v)
raw_spin_lock_irqsave(&gpt->lock, flags);
clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_GPIO_MASK, r);
raw_spin_unlock_irqrestore(&gpt->lock, flags);
+
+ return 0;
}
static int mpc52xx_gpt_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
@@ -334,7 +336,7 @@ static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt)
gpt->gc.direction_input = mpc52xx_gpt_gpio_dir_in;
gpt->gc.direction_output = mpc52xx_gpt_gpio_dir_out;
gpt->gc.get = mpc52xx_gpt_gpio_get;
- gpt->gc.set = mpc52xx_gpt_gpio_set;
+ gpt->gc.set_rv = mpc52xx_gpt_gpio_set;
gpt->gc.base = -1;
gpt->gc.parent = gpt->dev;
@@ -369,7 +371,7 @@ struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq)
mutex_lock(&mpc52xx_gpt_list_mutex);
list_for_each(pos, &mpc52xx_gpt_list) {
gpt = container_of(pos, struct mpc52xx_gpt_priv, list);
- if (gpt->irqhost && irq == irq_linear_revmap(gpt->irqhost, 0)) {
+ if (gpt->irqhost && irq == irq_find_mapping(gpt->irqhost, 0)) {
mutex_unlock(&mpc52xx_gpt_list_mutex);
return gpt;
}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 43c881d31ca6..eb6a4e745c08 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -446,7 +446,7 @@ void __init mpc52xx_init_irq(void)
* As last step, add an irq host to translate the real
* hw irq information provided by the ofw to linux virq
*/
- mpc52xx_irqhost = irq_domain_add_linear(picnode,
+ mpc52xx_irqhost = irq_domain_create_linear(of_fwnode_handle(picnode),
MPC52xx_IRQ_HIGHTESTHWIRQ,
&mpc52xx_irqhost_ops, NULL);
@@ -515,5 +515,5 @@ unsigned int mpc52xx_get_irq(void)
return 0;
}
- return irq_linear_revmap(mpc52xx_irqhost, irq);
+ return irq_find_mapping(mpc52xx_irqhost, irq);
}
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index 4d8fa9ed1a67..6e37dfc6c5c9 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -92,10 +92,11 @@ static void mcu_power_off(void)
mutex_unlock(&mcu->lock);
}
-static void mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct mcu *mcu = gpiochip_get_data(gc);
u8 bit = 1 << (4 + gpio);
+ int ret;
mutex_lock(&mcu->lock);
if (val)
@@ -103,14 +104,16 @@ static void mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
else
mcu->reg_ctrl |= bit;
- i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL, mcu->reg_ctrl);
+ ret = i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL,
+ mcu->reg_ctrl);
mutex_unlock(&mcu->lock);
+
+ return ret;
}
static int mcu_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- mcu_gpio_set(gc, gpio, val);
- return 0;
+ return mcu_gpio_set(gc, gpio, val);
}
static int mcu_gpiochip_add(struct mcu *mcu)
@@ -123,7 +126,7 @@ static int mcu_gpiochip_add(struct mcu *mcu)
gc->can_sleep = 1;
gc->ngpio = MCU_NUM_GPIO;
gc->base = -1;
- gc->set = mcu_gpio_set;
+ gc->set_rv = mcu_gpio_set;
gc->direction_output = mcu_gpio_dir_out;
gc->parent = dev;
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 60e0b8947ce6..4b69fb321a68 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -83,7 +83,7 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq)
if (cause >> (i + 16))
break;
}
- return irq_linear_revmap(socrates_fpga_pic_irq_host,
+ return irq_find_mapping(socrates_fpga_pic_irq_host,
(irq_hw_number_t)i);
}
@@ -278,7 +278,7 @@ void __init socrates_fpga_pic_init(struct device_node *pic)
int i;
/* Setup an irq_domain structure */
- socrates_fpga_pic_irq_host = irq_domain_add_linear(pic,
+ socrates_fpga_pic_irq_host = irq_domain_create_linear(of_fwnode_handle(pic),
SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL);
if (socrates_fpga_pic_irq_host == NULL) {
pr_err("FPGA PIC: Unable to allocate host\n");
diff --git a/arch/powerpc/platforms/8xx/cpm1-ic.c b/arch/powerpc/platforms/8xx/cpm1-ic.c
index a18fc7c99f83..a49d4a9ab3bc 100644
--- a/arch/powerpc/platforms/8xx/cpm1-ic.c
+++ b/arch/powerpc/platforms/8xx/cpm1-ic.c
@@ -59,7 +59,7 @@ static int cpm_get_irq(struct irq_desc *desc)
cpm_vec = in_be16(&data->reg->cpic_civr);
cpm_vec >>= 11;
- return irq_linear_revmap(data->host, cpm_vec);
+ return irq_find_mapping(data->host, cpm_vec);
}
static void cpm_cascade(struct irq_desc *desc)
@@ -110,7 +110,8 @@ static int cpm_pic_probe(struct platform_device *pdev)
out_be32(&data->reg->cpic_cimr, 0);
- data->host = irq_domain_add_linear(dev->of_node, 64, &cpm_pic_host_ops, data);
+ data->host = irq_domain_create_linear(of_fwnode_handle(dev->of_node),
+ 64, &cpm_pic_host_ops, data);
if (!data->host)
return -ENODEV;
diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c
index 1dc095ad48fc..7462c221115c 100644
--- a/arch/powerpc/platforms/8xx/cpm1.c
+++ b/arch/powerpc/platforms/8xx/cpm1.c
@@ -417,7 +417,7 @@ static void __cpm1_gpio16_set(struct cpm1_gpio16_chip *cpm1_gc, u16 pin_mask, in
out_be16(&iop->dat, cpm1_gc->cpdata);
}
-static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value)
+static int cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc);
unsigned long flags;
@@ -428,6 +428,8 @@ static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value)
__cpm1_gpio16_set(cpm1_gc, pin_mask, value);
spin_unlock_irqrestore(&cpm1_gc->lock, flags);
+
+ return 0;
}
static int cpm1_gpio16_to_irq(struct gpio_chip *gc, unsigned int gpio)
@@ -497,7 +499,7 @@ int cpm1_gpiochip_add16(struct device *dev)
gc->direction_input = cpm1_gpio16_dir_in;
gc->direction_output = cpm1_gpio16_dir_out;
gc->get = cpm1_gpio16_get;
- gc->set = cpm1_gpio16_set;
+ gc->set_rv = cpm1_gpio16_set;
gc->to_irq = cpm1_gpio16_to_irq;
gc->parent = dev;
gc->owner = THIS_MODULE;
@@ -554,7 +556,7 @@ static void __cpm1_gpio32_set(struct cpm1_gpio32_chip *cpm1_gc, u32 pin_mask, in
out_be32(&iop->dat, cpm1_gc->cpdata);
}
-static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value)
+static int cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc);
unsigned long flags;
@@ -565,6 +567,8 @@ static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value)
__cpm1_gpio32_set(cpm1_gc, pin_mask, value);
spin_unlock_irqrestore(&cpm1_gc->lock, flags);
+
+ return 0;
}
static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -618,7 +622,7 @@ int cpm1_gpiochip_add32(struct device *dev)
gc->direction_input = cpm1_gpio32_dir_in;
gc->direction_output = cpm1_gpio32_dir_out;
gc->get = cpm1_gpio32_get;
- gc->set = cpm1_gpio32_set;
+ gc->set_rv = cpm1_gpio32_set;
gc->parent = dev;
gc->owner = THIS_MODULE;
diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c
index ea6b0e523c60..933d6ab7f512 100644
--- a/arch/powerpc/platforms/8xx/pic.c
+++ b/arch/powerpc/platforms/8xx/pic.c
@@ -80,7 +80,7 @@ unsigned int mpc8xx_get_irq(void)
if (irq == PIC_VEC_SPURRIOUS)
return 0;
- return irq_linear_revmap(mpc8xx_pic_host, irq);
+ return irq_find_mapping(mpc8xx_pic_host, irq);
}
@@ -146,7 +146,8 @@ void __init mpc8xx_pic_init(void)
if (!siu_reg)
goto out;
- mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL);
+ mpc8xx_pic_host = irq_domain_create_linear(of_fwnode_handle(np), 64,
+ &mpc8xx_pic_host_ops, NULL);
if (!mpc8xx_pic_host)
printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index 013d66304c31..91a8f0a7086e 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -149,8 +149,9 @@ static struct irq_domain * __init flipper_pic_init(struct device_node *np)
__flipper_quiesce(io_base);
- irq_domain = irq_domain_add_linear(np, FLIPPER_NR_IRQS,
- &flipper_irq_domain_ops, io_base);
+ irq_domain = irq_domain_create_linear(of_fwnode_handle(np),
+ FLIPPER_NR_IRQS,
+ &flipper_irq_domain_ops, io_base);
if (!irq_domain) {
pr_err("failed to allocate irq_domain\n");
return NULL;
@@ -172,7 +173,7 @@ unsigned int flipper_pic_get_irq(void)
return 0; /* no more IRQs pending */
irq = __ffs(irq_status);
- return irq_linear_revmap(flipper_irq_host, irq);
+ return irq_find_mapping(flipper_irq_host, irq);
}
/*
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 4d2d92de30af..b57e87b0b3ce 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -175,8 +175,9 @@ static struct irq_domain *__init hlwd_pic_init(struct device_node *np)
__hlwd_quiesce(io_base);
- irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS,
- &hlwd_irq_domain_ops, io_base);
+ irq_domain = irq_domain_create_linear(of_fwnode_handle(np),
+ HLWD_NR_IRQS,
+ &hlwd_irq_domain_ops, io_base);
if (!irq_domain) {
pr_err("failed to allocate irq_domain\n");
iounmap(io_base);
@@ -189,7 +190,7 @@ static struct irq_domain *__init hlwd_pic_init(struct device_node *np)
unsigned int hlwd_pic_get_irq(void)
{
unsigned int hwirq = __hlwd_pic_get_irq(hlwd_irq_host);
- return hwirq ? irq_linear_revmap(hlwd_irq_host, hwirq) : 0;
+ return hwirq ? irq_find_mapping(hlwd_irq_host, hwirq) : 0;
}
/*
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 03a7c51f2645..c37783a03d25 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -250,7 +250,7 @@ static unsigned int pmac_pic_get_irq(void)
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
if (unlikely(irq < 0))
return 0;
- return irq_linear_revmap(pmac_pic_host, irq);
+ return irq_find_mapping(pmac_pic_host, irq);
}
static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node,
@@ -327,8 +327,9 @@ static void __init pmac_pic_probe_oldstyle(void)
/*
* Allocate an irq host
*/
- pmac_pic_host = irq_domain_add_linear(master, max_irqs,
- &pmac_pic_host_ops, NULL);
+ pmac_pic_host = irq_domain_create_linear(of_fwnode_handle(master),
+ max_irqs,
+ &pmac_pic_host_ops, NULL);
BUG_ON(pmac_pic_host == NULL);
irq_set_default_domain(pmac_pic_host);
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 6de1cd5d8a58..e119ced05d10 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -45,6 +45,7 @@
#include <linux/root_dev.h>
#include <linux/bitops.h>
#include <linux/suspend.h>
+#include <linux/string_choices.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -238,8 +239,7 @@ static void __init l2cr_init(void)
_set_L2CR(0);
_set_L2CR(*l2cr);
pr_info("L2CR overridden (0x%x), backside cache is %s\n",
- *l2cr, ((*l2cr) & 0x80000000) ?
- "enabled" : "disabled");
+ *l2cr, str_enabled_disabled((*l2cr) & 0x80000000));
}
of_node_put(np);
break;
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 09e7fe24fac1..88e92af8acf9 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -190,7 +190,7 @@ static int __init psurge_secondary_ipi_init(void)
{
int rc = -ENOMEM;
- psurge_host = irq_domain_add_nomap(NULL, ~0, &psurge_host_ops, NULL);
+ psurge_host = irq_domain_create_nomap(NULL, ~0, &psurge_host_ops, NULL);
if (psurge_host)
psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index 8633891b7aa5..b4426a35aca3 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/time.h>
@@ -77,7 +78,7 @@ long __init pmac_time_init(void)
delta |= 0xFF000000UL;
dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
- dst ? "on" : "off");
+ str_on_off(dst));
#endif
return delta;
}
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 3fbe0295ce14..95d7ba73d43d 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -17,7 +17,7 @@ config PPC_POWERNV
select MMU_NOTIFIER
select FORCE_SMP
select ARCH_SUPPORTS_PER_VMA_LOCK
- select PPC_RADIX_BROADCAST_TLBIE
+ select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU
default y
config OPAL_PRD
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index d92759c21fae..e180bd8e1400 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -191,7 +191,8 @@ int __init opal_event_init(void)
* fall back to the legacy method (opal_event_request(...))
* anyway. */
dn = of_find_compatible_node(NULL, NULL, "ibm,opal-event");
- opal_event_irqchip.domain = irq_domain_add_linear(dn, MAX_NUM_EVENTS,
+ opal_event_irqchip.domain = irq_domain_create_linear(of_fwnode_handle(dn),
+ MAX_NUM_EVENTS,
&opal_event_domain_ops, &opal_event_irqchip);
of_node_put(dn);
if (!opal_event_irqchip.domain) {
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index ae4b549b5ca0..d8ccf2c9b98a 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1897,7 +1897,7 @@ static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned
return -ENOMEM;
}
- hose->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(hose->dn),
+ hose->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(hose->dn),
&pnv_msi_domain_info,
hose->dev_domain);
if (!hose->msi_domain) {
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index 61722133eb2d..22d91ac424dd 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/reboot.h>
#include <linux/rcuwait.h>
+#include <linux/string_choices.h>
#include <asm/firmware.h>
#include <asm/lv1call.h>
@@ -724,7 +725,7 @@ static irqreturn_t ps3_notification_interrupt(int irq, void *data)
static int ps3_notification_read_write(struct ps3_notification_device *dev,
u64 lpar, int write)
{
- const char *op = write ? "write" : "read";
+ const char *op = str_write_read(write);
unsigned long flags;
int res;
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 95e96bd61a20..a4ad4b49eef7 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -743,7 +743,7 @@ void __init ps3_init_IRQ(void)
unsigned cpu;
struct irq_domain *host;
- host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL);
+ host = irq_domain_create_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL);
irq_set_default_domain(host);
for_each_possible_cpu(cpu) {
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index a934c2a262f6..fa3c2fff082a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -23,7 +23,7 @@ config PPC_PSERIES
select FORCE_SMP
select SWIOTLB
select ARCH_SUPPORTS_PER_VMA_LOCK
- select PPC_RADIX_BROADCAST_TLBIE
+ select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU
default y
config PARAVIRT
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 3f3e3492e436..57222678bb3f 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -3,7 +3,8 @@ ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG
obj-y := lpar.o hvCall.o nvram.o reconfig.o \
of_helpers.o rtas-work-area.o papr-sysparm.o \
- papr-vpd.o \
+ papr-rtas-common.o papr-vpd.o papr-indices.o \
+ papr-platform-dump.o papr-phy-attest.o \
setup.o iommu.o event_sources.o ras.o \
firmware.o power.o dlpar.o mobility.o rng.o \
pci.o pci_dlpar.o eeh_pseries.o msi.o \
diff --git a/arch/powerpc/platforms/pseries/htmdump.c b/arch/powerpc/platforms/pseries/htmdump.c
index 57fc1700f604..742ec52c9d4d 100644
--- a/arch/powerpc/platforms/pseries/htmdump.c
+++ b/arch/powerpc/platforms/pseries/htmdump.c
@@ -10,28 +10,40 @@
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/plpar_wrappers.h>
+#include <asm/kvm_guest.h>
static void *htm_buf;
+static void *htm_status_buf;
+static void *htm_info_buf;
+static void *htm_caps_buf;
static u32 nodeindex;
static u32 nodalchipindex;
static u32 coreindexonchip;
static u32 htmtype;
+static u32 htmconfigure;
+static u32 htmstart;
+static u32 htmsetup;
+static u64 htmflags;
+
static struct dentry *htmdump_debugfs_dir;
+#define HTM_ENABLE 1
+#define HTM_DISABLE 0
+#define HTM_NOWRAP 1
+#define HTM_WRAP 0
-static ssize_t htmdump_read(struct file *filp, char __user *ubuf,
- size_t count, loff_t *ppos)
+/*
+ * Check the return code for H_HTM hcall.
+ * Return non-zero value (1) if either H_PARTIAL or H_SUCCESS
+ * is returned. For other return codes:
+ * Return zero if H_NOT_AVAILABLE.
+ * Return -EBUSY if hcall return busy.
+ * Return -EINVAL if any parameter or operation is not valid.
+ * Return -EPERM if HTM Virtualization Engine Technology code
+ * is not applied.
+ * Return -EIO if the HTM state is not valid.
+ */
+static ssize_t htm_return_check(long rc)
{
- void *htm_buf = filp->private_data;
- unsigned long page, read_size, available;
- loff_t offset;
- long rc;
-
- page = ALIGN_DOWN(*ppos, PAGE_SIZE);
- offset = (*ppos) % PAGE_SIZE;
-
- rc = htm_get_dump_hardware(nodeindex, nodalchipindex, coreindexonchip,
- htmtype, virt_to_phys(htm_buf), PAGE_SIZE, page);
-
switch (rc) {
case H_SUCCESS:
/* H_PARTIAL for the case where all available data can't be
@@ -65,6 +77,38 @@ static ssize_t htmdump_read(struct file *filp, char __user *ubuf,
return -EPERM;
}
+ /*
+ * Return 1 for H_SUCCESS/H_PARTIAL
+ */
+ return 1;
+}
+
+static ssize_t htmdump_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ void *htm_buf = filp->private_data;
+ unsigned long page, read_size, available;
+ loff_t offset;
+ long rc, ret;
+
+ page = ALIGN_DOWN(*ppos, PAGE_SIZE);
+ offset = (*ppos) % PAGE_SIZE;
+
+ /*
+ * Invoke H_HTM call with:
+ * - operation as htm dump (H_HTM_OP_DUMP_DATA)
+ * - last three values are address, size and offset
+ */
+ rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip,
+ htmtype, H_HTM_OP_DUMP_DATA, virt_to_phys(htm_buf),
+ PAGE_SIZE, page);
+
+ ret = htm_return_check(rc);
+ if (ret <= 0) {
+ pr_debug("H_HTM hcall failed for op: H_HTM_OP_DUMP_DATA, returning %ld\n", ret);
+ return ret;
+ }
+
available = PAGE_SIZE;
read_size = min(count, available);
*ppos += read_size;
@@ -77,6 +121,292 @@ static const struct file_operations htmdump_fops = {
.open = simple_open,
};
+static int htmconfigure_set(void *data, u64 val)
+{
+ long rc, ret;
+ unsigned long param1 = -1, param2 = -1;
+
+ /*
+ * value as 1 : configure HTM.
+ * value as 0 : deconfigure HTM. Return -EINVAL for
+ * other values.
+ */
+ if (val == HTM_ENABLE) {
+ /*
+ * Invoke H_HTM call with:
+ * - operation as htm configure (H_HTM_OP_CONFIGURE)
+ * - If htmflags is set, param1 and param2 will be -1
+ * which is an indicator to use default htm mode reg mask
+ * and htm mode reg value.
+ * - last three values are unused, hence set to zero
+ */
+ if (!htmflags) {
+ param1 = 0;
+ param2 = 0;
+ }
+
+ rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip,
+ htmtype, H_HTM_OP_CONFIGURE, param1, param2, 0);
+ } else if (val == HTM_DISABLE) {
+ /*
+ * Invoke H_HTM call with:
+ * - operation as htm deconfigure (H_HTM_OP_DECONFIGURE)
+ * - last three values are unused, hence set to zero
+ */
+ rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip,
+ htmtype, H_HTM_OP_DECONFIGURE, 0, 0, 0);
+ } else
+ return -EINVAL;
+
+ ret = htm_return_check(rc);
+ if (ret <= 0) {
+ pr_debug("H_HTM hcall failed, returning %ld\n", ret);
+ return ret;
+ }
+
+ /* Set htmconfigure if operation succeeds */
+ htmconfigure = val;
+
+ return 0;
+}
+
+static int htmconfigure_get(void *data, u64 *val)
+{
+ *val = htmconfigure;
+ return 0;
+}
+
+static int htmstart_set(void *data, u64 val)
+{
+ long rc, ret;
+
+ /*
+ * value as 1: start HTM
+ * value as 0: stop HTM
+ * Return -EINVAL for other values.
+ */
+ if (val == HTM_ENABLE) {
+ /*
+ * Invoke H_HTM call with:
+ * - operation as htm start (H_HTM_OP_START)
+ * - last three values are unused, hence set to zero
+ */
+ rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip,
+ htmtype, H_HTM_OP_START, 0, 0, 0);
+
+ } else if (val == HTM_DISABLE) {
+ /*
+ * Invoke H_HTM call with:
+ * - operation as htm stop (H_HTM_OP_STOP)
+ * - last three values are unused, hence set to zero
+ */
+ rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip,
+ htmtype, H_HTM_OP_STOP, 0, 0, 0);
+ } else
+ return -EINVAL;
+
+ ret = htm_return_check(rc);
+ if (ret <= 0) {
+ pr_debug("H_HTM hcall failed, returning %ld\n", ret);
+ return ret;
+ }
+
+ /* Set htmstart if H_HTM_OP_START/H_HTM_OP_STOP operation succeeds */
+ htmstart = val;
+
+ return 0;
+}
+
+static int htmstart_get(void *data, u64 *val)
+{
+ *val = htmstart;
+ return 0;
+}
+
+static ssize_t htmstatus_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ void *htm_status_buf = filp->private_data;
+ long rc, ret;
+ u64 *num_entries;
+ u64 to_copy;
+ int htmstatus_flag;
+
+ /*
+ * Invoke H_HTM call with:
+ * - operation as htm status (H_HTM_OP_STATUS)
+ * - last three values as addr, size and offset
+ */
+ rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip,
+ htmtype, H_HTM_OP_STATUS, virt_to_phys(htm_status_buf),
+ PAGE_SIZE, 0);
+
+ ret = htm_return_check(rc);
+ if (ret <= 0) {
+ pr_debug("H_HTM hcall failed for op: H_HTM_OP_STATUS, returning %ld\n", ret);
+ return ret;
+ }
+
+ /*
+ * HTM status buffer, start of buffer + 0x10 gives the
+ * number of HTM entries in the buffer. Each nest htm status
+ * entry is 0x6 bytes where each core htm status entry is
+ * 0x8 bytes.
+ * So total count to copy is:
+ * 32 bytes (for first 7 fields) + (number of HTM entries * entry size)
+ */
+ num_entries = htm_status_buf + 0x10;
+ if (htmtype == 0x2)
+ htmstatus_flag = 0x8;
+ else
+ htmstatus_flag = 0x6;
+ to_copy = 32 + (be64_to_cpu(*num_entries) * htmstatus_flag);
+ return simple_read_from_buffer(ubuf, count, ppos, htm_status_buf, to_copy);
+}
+
+static const struct file_operations htmstatus_fops = {
+ .llseek = NULL,
+ .read = htmstatus_read,
+ .open = simple_open,
+};
+
+static ssize_t htminfo_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ void *htm_info_buf = filp->private_data;
+ long rc, ret;
+ u64 *num_entries;
+ u64 to_copy;
+
+ /*
+ * Invoke H_HTM call with:
+ * - operation as htm status (H_HTM_OP_STATUS)
+ * - last three values as addr, size and offset
+ */
+ rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip,
+ htmtype, H_HTM_OP_DUMP_SYSPROC_CONF, virt_to_phys(htm_info_buf),
+ PAGE_SIZE, 0);
+
+ ret = htm_return_check(rc);
+ if (ret <= 0) {
+ pr_debug("H_HTM hcall failed for op: H_HTM_OP_DUMP_SYSPROC_CONF, returning %ld\n", ret);
+ return ret;
+ }
+
+ /*
+ * HTM status buffer, start of buffer + 0x10 gives the
+ * number of HTM entries in the buffer. Each entry of processor
+ * is 16 bytes.
+ *
+ * So total count to copy is:
+ * 32 bytes (for first 5 fields) + (number of HTM entries * entry size)
+ */
+ num_entries = htm_info_buf + 0x10;
+ to_copy = 32 + (be64_to_cpu(*num_entries) * 16);
+ return simple_read_from_buffer(ubuf, count, ppos, htm_info_buf, to_copy);
+}
+
+static ssize_t htmcaps_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ void *htm_caps_buf = filp->private_data;
+ long rc, ret;
+
+ /*
+ * Invoke H_HTM call with:
+ * - operation as htm capabilities (H_HTM_OP_CAPABILITIES)
+ * - last three values as addr, size (0x80 for Capabilities Output Buffer
+ * and zero
+ */
+ rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip,
+ htmtype, H_HTM_OP_CAPABILITIES, virt_to_phys(htm_caps_buf),
+ 0x80, 0);
+
+ ret = htm_return_check(rc);
+ if (ret <= 0) {
+ pr_debug("H_HTM hcall failed for op: H_HTM_OP_CAPABILITIES, returning %ld\n", ret);
+ return ret;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, htm_caps_buf, 0x80);
+}
+
+static const struct file_operations htminfo_fops = {
+ .llseek = NULL,
+ .read = htminfo_read,
+ .open = simple_open,
+};
+
+static const struct file_operations htmcaps_fops = {
+ .llseek = NULL,
+ .read = htmcaps_read,
+ .open = simple_open,
+};
+
+static int htmsetup_set(void *data, u64 val)
+{
+ long rc, ret;
+
+ /*
+ * Input value: HTM buffer size in the power of 2
+ * example: hex value 0x21 ( decimal: 33 ) is for
+ * 8GB
+ * Invoke H_HTM call with:
+ * - operation as htm start (H_HTM_OP_SETUP)
+ * - parameter 1 set to input value.
+ * - last two values are unused, hence set to zero
+ */
+ rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip,
+ htmtype, H_HTM_OP_SETUP, val, 0, 0);
+
+ ret = htm_return_check(rc);
+ if (ret <= 0) {
+ pr_debug("H_HTM hcall failed for op: H_HTM_OP_SETUP, returning %ld\n", ret);
+ return ret;
+ }
+
+ /* Set htmsetup if H_HTM_OP_SETUP operation succeeds */
+ htmsetup = val;
+
+ return 0;
+}
+
+static int htmsetup_get(void *data, u64 *val)
+{
+ *val = htmsetup;
+ return 0;
+}
+
+static int htmflags_set(void *data, u64 val)
+{
+ /*
+ * Input value:
+ * Currently supported flag value is to enable/disable
+ * HTM buffer wrap. wrap is used along with "configure"
+ * to prevent HTM buffer from wrapping.
+ * Writing 1 will set noWrap while configuring HTM
+ */
+ if (val == HTM_NOWRAP)
+ htmflags = H_HTM_FLAGS_NOWRAP;
+ else if (val == HTM_WRAP)
+ htmflags = 0;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int htmflags_get(void *data, u64 *val)
+{
+ *val = htmflags;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(htmconfigure_fops, htmconfigure_get, htmconfigure_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(htmstart_fops, htmstart_get, htmstart_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(htmsetup_fops, htmsetup_get, htmsetup_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(htmflags_fops, htmflags_get, htmflags_set, "%llu\n");
+
static int htmdump_init_debugfs(void)
{
htm_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -98,11 +428,50 @@ static int htmdump_init_debugfs(void)
htmdump_debugfs_dir, &htmtype);
debugfs_create_file("trace", 0400, htmdump_debugfs_dir, htm_buf, &htmdump_fops);
+ /*
+ * Debugfs interface files to control HTM operations:
+ */
+ debugfs_create_file("htmconfigure", 0600, htmdump_debugfs_dir, NULL, &htmconfigure_fops);
+ debugfs_create_file("htmstart", 0600, htmdump_debugfs_dir, NULL, &htmstart_fops);
+ debugfs_create_file("htmsetup", 0600, htmdump_debugfs_dir, NULL, &htmsetup_fops);
+ debugfs_create_file("htmflags", 0600, htmdump_debugfs_dir, NULL, &htmflags_fops);
+
+ /* Debugfs interface file to present status of HTM */
+ htm_status_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!htm_status_buf) {
+ pr_err("Failed to allocate htmstatus buf\n");
+ return -ENOMEM;
+ }
+
+ /* Debugfs interface file to present System Processor Configuration */
+ htm_info_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!htm_info_buf) {
+ pr_err("Failed to allocate htm info buf\n");
+ return -ENOMEM;
+ }
+
+ /* Debugfs interface file to present HTM capabilities */
+ htm_caps_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!htm_caps_buf) {
+ pr_err("Failed to allocate htm caps buf\n");
+ return -ENOMEM;
+ }
+
+ debugfs_create_file("htmstatus", 0400, htmdump_debugfs_dir, htm_status_buf, &htmstatus_fops);
+ debugfs_create_file("htminfo", 0400, htmdump_debugfs_dir, htm_info_buf, &htminfo_fops);
+ debugfs_create_file("htmcaps", 0400, htmdump_debugfs_dir, htm_caps_buf, &htmcaps_fops);
+
return 0;
}
static int __init htmdump_init(void)
{
+ /* Disable on kvm guest */
+ if (is_kvm_guest()) {
+ pr_info("htmdump not supported inside KVM guest\n");
+ return -EOPNOTSUPP;
+ }
+
if (htmdump_init_debugfs())
return -ENOMEM;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index d6ebc19fb99c..eec333dd2e59 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -197,7 +197,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
static void tce_free_pSeries(struct iommu_table *tbl)
{
- if (!tbl->it_userspace)
+ if (tbl->it_userspace)
tce_iommu_userspace_view_free(tbl);
}
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index f9d80111c322..ee1c8c6898a3 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -525,7 +525,12 @@ static struct msi_domain_info pseries_msi_domain_info = {
static void pseries_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
- __pci_read_msi_msg(irq_data_get_msi_desc(data), msg);
+ struct pci_dev *dev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
+
+ if (dev->current_state == PCI_D0)
+ __pci_read_msi_msg(irq_data_get_msi_desc(data), msg);
+ else
+ get_cached_msi_msg(data->irq, msg);
}
static struct irq_chip pseries_msi_irq_chip = {
@@ -628,7 +633,7 @@ static int __pseries_msi_allocate_domains(struct pci_controller *phb,
return -ENOMEM;
}
- phb->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(phb->dn),
+ phb->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(phb->dn),
&pseries_msi_domain_info,
phb->dev_domain);
if (!phb->msi_domain) {
diff --git a/arch/powerpc/platforms/pseries/papr-indices.c b/arch/powerpc/platforms/pseries/papr-indices.c
new file mode 100644
index 000000000000..3c7545591c45
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/papr-indices.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define pr_fmt(fmt) "papr-indices: " fmt
+
+#include <linux/build_bug.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/lockdep.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+#include <linux/uaccess.h>
+#include <asm/machdep.h>
+#include <asm/rtas-work-area.h>
+#include <asm/rtas.h>
+#include <uapi/asm/papr-indices.h>
+#include "papr-rtas-common.h"
+
+/*
+ * Function-specific return values for ibm,set-dynamic-indicator and
+ * ibm,get-dynamic-sensor-state RTAS calls.
+ * PAPR+ v2.13 7.3.18 and 7.3.19.
+ */
+#define RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR -3
+
+/**
+ * struct rtas_get_indices_params - Parameters (in and out) for
+ * ibm,get-indices.
+ * @is_sensor: In: Caller-provided whether sensor or indicator.
+ * @indice_type:In: Caller-provided indice (sensor or indicator) token
+ * @work_area: In: Caller-provided work area buffer for results.
+ * @next: In: Sequence number. Out: Next sequence number.
+ * @status: Out: RTAS call status.
+ */
+struct rtas_get_indices_params {
+ u8 is_sensor;
+ u32 indice_type;
+ struct rtas_work_area *work_area;
+ u32 next;
+ s32 status;
+};
+
+/*
+ * rtas_ibm_get_indices() - Call ibm,get-indices to fill a work area buffer.
+ * @params: See &struct rtas_ibm_get_indices_params.
+ *
+ * Calls ibm,get-indices until it errors or successfully deposits data
+ * into the supplied work area. Handles RTAS retry statuses. Maps RTAS
+ * error statuses to reasonable errno values.
+ *
+ * The caller is expected to invoke rtas_ibm_get_indices() multiple times
+ * to retrieve all indices data for the provided indice type. Only one
+ * sequence should be in progress at any time; starting a new sequence
+ * will disrupt any sequence already in progress. Serialization of
+ * indices retrieval sequences is the responsibility of the caller.
+ *
+ * The caller should inspect @params.status to determine whether more
+ * calls are needed to complete the sequence.
+ *
+ * Context: May sleep.
+ * Return: -ve on error, 0 otherwise.
+ */
+static int rtas_ibm_get_indices(struct rtas_get_indices_params *params)
+{
+ struct rtas_work_area *work_area = params->work_area;
+ const s32 token = rtas_function_token(RTAS_FN_IBM_GET_INDICES);
+ u32 rets;
+ s32 fwrc;
+ int ret;
+
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -ENOENT;
+
+ lockdep_assert_held(&rtas_ibm_get_indices_lock);
+
+ do {
+ fwrc = rtas_call(token, 5, 2, &rets, params->is_sensor,
+ params->indice_type,
+ rtas_work_area_phys(work_area),
+ rtas_work_area_size(work_area),
+ params->next);
+ } while (rtas_busy_delay(fwrc));
+
+ switch (fwrc) {
+ case RTAS_HARDWARE_ERROR:
+ ret = -EIO;
+ break;
+ case RTAS_INVALID_PARAMETER: /* Indicator type is not supported */
+ ret = -EINVAL;
+ break;
+ case RTAS_SEQ_START_OVER:
+ ret = -EAGAIN;
+ pr_info_ratelimited("Indices changed during retrieval, retrying\n");
+ params->next = 1;
+ break;
+ case RTAS_SEQ_MORE_DATA:
+ params->next = rets;
+ ret = 0;
+ break;
+ case RTAS_SEQ_COMPLETE:
+ params->next = 0;
+ ret = 0;
+ break;
+ default:
+ ret = -EIO;
+ pr_err_ratelimited("unexpected ibm,get-indices status %d\n", fwrc);
+ break;
+ }
+
+ params->status = fwrc;
+ return ret;
+}
+
+/*
+ * Internal indices sequence APIs. A sequence is a series of calls to
+ * ibm,get-indices for a given location code. The sequence ends when
+ * an error is encountered or all indices for the input has been
+ * returned.
+ */
+
+/*
+ * indices_sequence_begin() - Begin a indices retrieval sequence.
+ *
+ * Context: May sleep.
+ */
+static void indices_sequence_begin(struct papr_rtas_sequence *seq)
+{
+ struct rtas_get_indices_params *param;
+
+ param = (struct rtas_get_indices_params *)seq->params;
+ /*
+ * We could allocate the work area before acquiring the
+ * function lock, but that would allow concurrent requests to
+ * exhaust the limited work area pool for no benefit. So
+ * allocate the work area under the lock.
+ */
+ mutex_lock(&rtas_ibm_get_indices_lock);
+ param->work_area = rtas_work_area_alloc(RTAS_GET_INDICES_BUF_SIZE);
+ param->next = 1;
+ param->status = 0;
+}
+
+/*
+ * indices_sequence_end() - Finalize a indices retrieval sequence.
+ *
+ * Releases resources obtained by indices_sequence_begin().
+ */
+static void indices_sequence_end(struct papr_rtas_sequence *seq)
+{
+ struct rtas_get_indices_params *param;
+
+ param = (struct rtas_get_indices_params *)seq->params;
+ rtas_work_area_free(param->work_area);
+ mutex_unlock(&rtas_ibm_get_indices_lock);
+}
+
+/*
+ * Work function to be passed to papr_rtas_blob_generate().
+ *
+ * ibm,get-indices RTAS call fills the work area with the certain
+ * format but does not return the bytes written in the buffer. So
+ * instead of kernel parsing this work area to determine the buffer
+ * length, copy the complete work area (RTAS_GET_INDICES_BUF_SIZE)
+ * to the blob and let the user space to obtain the data.
+ * Means RTAS_GET_INDICES_BUF_SIZE data will be returned for each
+ * read().
+ */
+
+static const char *indices_sequence_fill_work_area(struct papr_rtas_sequence *seq,
+ size_t *len)
+{
+ struct rtas_get_indices_params *p;
+ bool init_state;
+
+ p = (struct rtas_get_indices_params *)seq->params;
+ init_state = (p->next == 1) ? true : false;
+
+ if (papr_rtas_sequence_should_stop(seq, p->status, init_state))
+ return NULL;
+ if (papr_rtas_sequence_set_err(seq, rtas_ibm_get_indices(p)))
+ return NULL;
+
+ *len = RTAS_GET_INDICES_BUF_SIZE;
+ return rtas_work_area_raw_buf(p->work_area);
+}
+
+/*
+ * papr_indices_handle_read - returns indices blob data to the user space
+ *
+ * ibm,get-indices RTAS call fills the work area with the certian
+ * format but does not return the bytes written in the buffer and
+ * copied RTAS_GET_INDICES_BUF_SIZE data to the blob for each RTAS
+ * call. So send RTAS_GET_INDICES_BUF_SIZE buffer to the user space
+ * for each read().
+ */
+static ssize_t papr_indices_handle_read(struct file *file,
+ char __user *buf, size_t size, loff_t *off)
+{
+ const struct papr_rtas_blob *blob = file->private_data;
+
+ /* we should not instantiate a handle without any data attached. */
+ if (!papr_rtas_blob_has_data(blob)) {
+ pr_err_once("handle without data\n");
+ return -EIO;
+ }
+
+ if (size < RTAS_GET_INDICES_BUF_SIZE) {
+ pr_err_once("Invalid buffer length %ld, expect %d\n",
+ size, RTAS_GET_INDICES_BUF_SIZE);
+ return -EINVAL;
+ } else if (size > RTAS_GET_INDICES_BUF_SIZE)
+ size = RTAS_GET_INDICES_BUF_SIZE;
+
+ return simple_read_from_buffer(buf, size, off, blob->data, blob->len);
+}
+
+static const struct file_operations papr_indices_handle_ops = {
+ .read = papr_indices_handle_read,
+ .llseek = papr_rtas_common_handle_seek,
+ .release = papr_rtas_common_handle_release,
+};
+
+/*
+ * papr_indices_create_handle() - Create a fd-based handle for reading
+ * indices data
+ * @ubuf: Input parameters to RTAS call such as whether sensor or indicator
+ * and indice type in user memory
+ *
+ * Handler for PAPR_INDICES_IOC_GET ioctl command. Validates @ubuf
+ * and instantiates an immutable indices "blob" for it. The blob is
+ * attached to a file descriptor for reading by user space. The memory
+ * backing the blob is freed when the file is released.
+ *
+ * The entire requested indices is retrieved by this call and all
+ * necessary RTAS interactions are performed before returning the fd
+ * to user space. This keeps the read handler simple and ensures that
+ * the kernel can prevent interleaving of ibm,get-indices call sequences.
+ *
+ * Return: The installed fd number if successful, -ve errno otherwise.
+ */
+static long papr_indices_create_handle(struct papr_indices_io_block __user *ubuf)
+{
+ struct papr_rtas_sequence seq = {};
+ struct rtas_get_indices_params params = {};
+ int fd;
+
+ if (get_user(params.is_sensor, &ubuf->indices.is_sensor))
+ return -EFAULT;
+
+ if (get_user(params.indice_type, &ubuf->indices.indice_type))
+ return -EFAULT;
+
+ seq = (struct papr_rtas_sequence) {
+ .begin = indices_sequence_begin,
+ .end = indices_sequence_end,
+ .work = indices_sequence_fill_work_area,
+ };
+
+ seq.params = &params;
+ fd = papr_rtas_setup_file_interface(&seq,
+ &papr_indices_handle_ops, "[papr-indices]");
+
+ return fd;
+}
+
+/*
+ * Create work area with the input parameters. This function is used
+ * for both ibm,set-dynamic-indicator and ibm,get-dynamic-sensor-state
+ * RTAS Calls.
+ */
+static struct rtas_work_area *
+papr_dynamic_indice_buf_from_user(struct papr_indices_io_block __user *ubuf,
+ struct papr_indices_io_block *kbuf)
+{
+ struct rtas_work_area *work_area;
+ u32 length;
+ __be32 len_be;
+
+ if (copy_from_user(kbuf, ubuf, sizeof(*kbuf)))
+ return ERR_PTR(-EFAULT);
+
+
+ if (!string_is_terminated(kbuf->dynamic_param.location_code_str,
+ ARRAY_SIZE(kbuf->dynamic_param.location_code_str)))
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * The input data in the work area should be as follows:
+ * - 32-bit integer length of the location code string,
+ * including NULL.
+ * - Location code string, NULL terminated, identifying the
+ * token (sensor or indicator).
+ * PAPR 2.13 - R1–7.3.18–5 ibm,set-dynamic-indicator
+ * - R1–7.3.19–5 ibm,get-dynamic-sensor-state
+ */
+ /*
+ * Length that user space passed should also include NULL
+ * terminator.
+ */
+ length = strlen(kbuf->dynamic_param.location_code_str) + 1;
+ if (length > LOC_CODE_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ len_be = cpu_to_be32(length);
+
+ work_area = rtas_work_area_alloc(LOC_CODE_SIZE + sizeof(u32));
+ memcpy(rtas_work_area_raw_buf(work_area), &len_be, sizeof(u32));
+ memcpy((rtas_work_area_raw_buf(work_area) + sizeof(u32)),
+ &kbuf->dynamic_param.location_code_str, length);
+
+ return work_area;
+}
+
+/**
+ * papr_dynamic_indicator_ioc_set - ibm,set-dynamic-indicator RTAS Call
+ * PAPR 2.13 7.3.18
+ *
+ * @ubuf: Input parameters to RTAS call such as indicator token and
+ * new state.
+ *
+ * Returns success or -errno.
+ */
+static long papr_dynamic_indicator_ioc_set(struct papr_indices_io_block __user *ubuf)
+{
+ struct papr_indices_io_block kbuf;
+ struct rtas_work_area *work_area;
+ s32 fwrc, token, ret;
+
+ token = rtas_function_token(RTAS_FN_IBM_SET_DYNAMIC_INDICATOR);
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -ENOENT;
+
+ mutex_lock(&rtas_ibm_set_dynamic_indicator_lock);
+ work_area = papr_dynamic_indice_buf_from_user(ubuf, &kbuf);
+ if (IS_ERR(work_area)) {
+ ret = PTR_ERR(work_area);
+ goto out;
+ }
+
+ do {
+ fwrc = rtas_call(token, 3, 1, NULL,
+ kbuf.dynamic_param.token,
+ kbuf.dynamic_param.state,
+ rtas_work_area_phys(work_area));
+ } while (rtas_busy_delay(fwrc));
+
+ rtas_work_area_free(work_area);
+
+ switch (fwrc) {
+ case RTAS_SUCCESS:
+ ret = 0;
+ break;
+ case RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR: /* No such indicator */
+ ret = -EOPNOTSUPP;
+ break;
+ default:
+ pr_err("unexpected ibm,set-dynamic-indicator result %d\n",
+ fwrc);
+ fallthrough;
+ case RTAS_HARDWARE_ERROR: /* Hardware/platform error */
+ ret = -EIO;
+ break;
+ }
+
+out:
+ mutex_unlock(&rtas_ibm_set_dynamic_indicator_lock);
+ return ret;
+}
+
+/**
+ * papr_dynamic_sensor_ioc_get - ibm,get-dynamic-sensor-state RTAS Call
+ * PAPR 2.13 7.3.19
+ *
+ * @ubuf: Input parameters to RTAS call such as sensor token
+ * Copies the state in user space buffer.
+ *
+ *
+ * Returns success or -errno.
+ */
+
+static long papr_dynamic_sensor_ioc_get(struct papr_indices_io_block __user *ubuf)
+{
+ struct papr_indices_io_block kbuf;
+ struct rtas_work_area *work_area;
+ s32 fwrc, token, ret;
+ u32 rets;
+
+ token = rtas_function_token(RTAS_FN_IBM_GET_DYNAMIC_SENSOR_STATE);
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -ENOENT;
+
+ mutex_lock(&rtas_ibm_get_dynamic_sensor_state_lock);
+ work_area = papr_dynamic_indice_buf_from_user(ubuf, &kbuf);
+ if (IS_ERR(work_area)) {
+ ret = PTR_ERR(work_area);
+ goto out;
+ }
+
+ do {
+ fwrc = rtas_call(token, 2, 2, &rets,
+ kbuf.dynamic_param.token,
+ rtas_work_area_phys(work_area));
+ } while (rtas_busy_delay(fwrc));
+
+ rtas_work_area_free(work_area);
+
+ switch (fwrc) {
+ case RTAS_SUCCESS:
+ if (put_user(rets, &ubuf->dynamic_param.state))
+ ret = -EFAULT;
+ else
+ ret = 0;
+ break;
+ case RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR: /* No such indicator */
+ ret = -EOPNOTSUPP;
+ break;
+ default:
+ pr_err("unexpected ibm,get-dynamic-sensor result %d\n",
+ fwrc);
+ fallthrough;
+ case RTAS_HARDWARE_ERROR: /* Hardware/platform error */
+ ret = -EIO;
+ break;
+ }
+
+out:
+ mutex_unlock(&rtas_ibm_get_dynamic_sensor_state_lock);
+ return ret;
+}
+
+/*
+ * Top-level ioctl handler for /dev/papr-indices.
+ */
+static long papr_indices_dev_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+{
+ void __user *argp = (__force void __user *)arg;
+ long ret;
+
+ switch (ioctl) {
+ case PAPR_INDICES_IOC_GET:
+ ret = papr_indices_create_handle(argp);
+ break;
+ case PAPR_DYNAMIC_SENSOR_IOC_GET:
+ ret = papr_dynamic_sensor_ioc_get(argp);
+ break;
+ case PAPR_DYNAMIC_INDICATOR_IOC_SET:
+ if (filp->f_mode & FMODE_WRITE)
+ ret = papr_dynamic_indicator_ioc_set(argp);
+ else
+ ret = -EBADF;
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations papr_indices_ops = {
+ .unlocked_ioctl = papr_indices_dev_ioctl,
+};
+
+static struct miscdevice papr_indices_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "papr-indices",
+ .fops = &papr_indices_ops,
+};
+
+static __init int papr_indices_init(void)
+{
+ if (!rtas_function_implemented(RTAS_FN_IBM_GET_INDICES))
+ return -ENODEV;
+
+ if (!rtas_function_implemented(RTAS_FN_IBM_SET_DYNAMIC_INDICATOR))
+ return -ENODEV;
+
+ if (!rtas_function_implemented(RTAS_FN_IBM_GET_DYNAMIC_SENSOR_STATE))
+ return -ENODEV;
+
+ return misc_register(&papr_indices_dev);
+}
+machine_device_initcall(pseries, papr_indices_init);
diff --git a/arch/powerpc/platforms/pseries/papr-phy-attest.c b/arch/powerpc/platforms/pseries/papr-phy-attest.c
new file mode 100644
index 000000000000..1907f2411567
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/papr-phy-attest.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define pr_fmt(fmt) "papr-phy-attest: " fmt
+
+#include <linux/build_bug.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/lockdep.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+#include <linux/uaccess.h>
+#include <asm/machdep.h>
+#include <asm/rtas-work-area.h>
+#include <asm/rtas.h>
+#include <uapi/asm/papr-physical-attestation.h>
+#include "papr-rtas-common.h"
+
+/**
+ * struct rtas_phy_attest_params - Parameters (in and out) for
+ * ibm,physical-attestation.
+ *
+ * @cmd: In: Caller-provided attestation command buffer. Must be
+ * RTAS-addressable.
+ * @work_area: In: Caller-provided work area buffer for attestation
+ * command structure
+ * Out: Caller-provided work area buffer for the response
+ * @cmd_len: In: Caller-provided attestation command structure
+ * length
+ * @sequence: In: Sequence number. Out: Next sequence number.
+ * @written: Out: Bytes written by ibm,physical-attestation to
+ * @work_area.
+ * @status: Out: RTAS call status.
+ */
+struct rtas_phy_attest_params {
+ struct papr_phy_attest_io_block cmd;
+ struct rtas_work_area *work_area;
+ u32 cmd_len;
+ u32 sequence;
+ u32 written;
+ s32 status;
+};
+
+/**
+ * rtas_physical_attestation() - Call ibm,physical-attestation to
+ * fill a work area buffer.
+ * @params: See &struct rtas_phy_attest_params.
+ *
+ * Calls ibm,physical-attestation until it errors or successfully
+ * deposits data into the supplied work area. Handles RTAS retry
+ * statuses. Maps RTAS error statuses to reasonable errno values.
+ *
+ * The caller is expected to invoke rtas_physical_attestation()
+ * multiple times to retrieve all the data for the provided
+ * attestation command. Only one sequence should be in progress at
+ * any time; starting a new sequence will disrupt any sequence
+ * already in progress. Serialization of attestation retrieval
+ * sequences is the responsibility of the caller.
+ *
+ * The caller should inspect @params.status to determine whether more
+ * calls are needed to complete the sequence.
+ *
+ * Context: May sleep.
+ * Return: -ve on error, 0 otherwise.
+ */
+static int rtas_physical_attestation(struct rtas_phy_attest_params *params)
+{
+ struct rtas_work_area *work_area;
+ s32 fwrc, token;
+ u32 rets[2];
+ int ret;
+
+ work_area = params->work_area;
+ token = rtas_function_token(RTAS_FN_IBM_PHYSICAL_ATTESTATION);
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -ENOENT;
+
+ lockdep_assert_held(&rtas_ibm_physical_attestation_lock);
+
+ do {
+ fwrc = rtas_call(token, 3, 3, rets,
+ rtas_work_area_phys(work_area),
+ params->cmd_len,
+ params->sequence);
+ } while (rtas_busy_delay(fwrc));
+
+ switch (fwrc) {
+ case RTAS_HARDWARE_ERROR:
+ ret = -EIO;
+ break;
+ case RTAS_INVALID_PARAMETER:
+ ret = -EINVAL;
+ break;
+ case RTAS_SEQ_MORE_DATA:
+ params->sequence = rets[0];
+ fallthrough;
+ case RTAS_SEQ_COMPLETE:
+ params->written = rets[1];
+ /*
+ * Kernel or firmware bug, do not continue.
+ */
+ if (WARN(params->written > rtas_work_area_size(work_area),
+ "possible write beyond end of work area"))
+ ret = -EFAULT;
+ else
+ ret = 0;
+ break;
+ default:
+ ret = -EIO;
+ pr_err_ratelimited("unexpected ibm,get-phy_attest status %d\n", fwrc);
+ break;
+ }
+
+ params->status = fwrc;
+ return ret;
+}
+
+/*
+ * Internal physical-attestation sequence APIs. A physical-attestation
+ * sequence is a series of calls to get ibm,physical-attestation
+ * for a given attestation command. The sequence ends when an error
+ * is encountered or all data for the attestation command has been
+ * returned.
+ */
+
+/**
+ * phy_attest_sequence_begin() - Begin a response data for attestation
+ * command retrieval sequence.
+ * @seq: user specified parameters for RTAS call from seq struct.
+ *
+ * Context: May sleep.
+ */
+static void phy_attest_sequence_begin(struct papr_rtas_sequence *seq)
+{
+ struct rtas_phy_attest_params *param;
+
+ /*
+ * We could allocate the work area before acquiring the
+ * function lock, but that would allow concurrent requests to
+ * exhaust the limited work area pool for no benefit. So
+ * allocate the work area under the lock.
+ */
+ mutex_lock(&rtas_ibm_physical_attestation_lock);
+ param = (struct rtas_phy_attest_params *)seq->params;
+ param->work_area = rtas_work_area_alloc(SZ_4K);
+ memcpy(rtas_work_area_raw_buf(param->work_area), &param->cmd,
+ param->cmd_len);
+ param->sequence = 1;
+ param->status = 0;
+}
+
+/**
+ * phy_attest_sequence_end() - Finalize a attestation command
+ * response retrieval sequence.
+ * @seq: Sequence state.
+ *
+ * Releases resources obtained by phy_attest_sequence_begin().
+ */
+static void phy_attest_sequence_end(struct papr_rtas_sequence *seq)
+{
+ struct rtas_phy_attest_params *param;
+
+ param = (struct rtas_phy_attest_params *)seq->params;
+ rtas_work_area_free(param->work_area);
+ mutex_unlock(&rtas_ibm_physical_attestation_lock);
+ kfree(param);
+}
+
+/*
+ * Generator function to be passed to papr_rtas_blob_generate().
+ */
+static const char *phy_attest_sequence_fill_work_area(struct papr_rtas_sequence *seq,
+ size_t *len)
+{
+ struct rtas_phy_attest_params *p;
+ bool init_state;
+
+ p = (struct rtas_phy_attest_params *)seq->params;
+ init_state = (p->written == 0) ? true : false;
+
+ if (papr_rtas_sequence_should_stop(seq, p->status, init_state))
+ return NULL;
+ if (papr_rtas_sequence_set_err(seq, rtas_physical_attestation(p)))
+ return NULL;
+ *len = p->written;
+ return rtas_work_area_raw_buf(p->work_area);
+}
+
+static const struct file_operations papr_phy_attest_handle_ops = {
+ .read = papr_rtas_common_handle_read,
+ .llseek = papr_rtas_common_handle_seek,
+ .release = papr_rtas_common_handle_release,
+};
+
+/**
+ * papr_phy_attest_create_handle() - Create a fd-based handle for
+ * reading the response for the given attestation command.
+ * @ulc: Attestation command in user memory; defines the scope of
+ * data for the attestation command to retrieve.
+ *
+ * Handler for PAPR_PHYSICAL_ATTESTATION_IOC_CREATE_HANDLE ioctl
+ * command. Validates @ulc and instantiates an immutable response
+ * "blob" for attestation command. The blob is attached to a file
+ * descriptor for reading by user space. The memory backing the blob
+ * is freed when the file is released.
+ *
+ * The entire requested response buffer for the attestation command
+ * retrieved by this call and all necessary RTAS interactions are
+ * performed before returning the fd to user space. This keeps the
+ * read handler simple and ensures that kernel can prevent
+ * interleaving ibm,physical-attestation call sequences.
+ *
+ * Return: The installed fd number if successful, -ve errno otherwise.
+ */
+static long papr_phy_attest_create_handle(struct papr_phy_attest_io_block __user *ulc)
+{
+ struct rtas_phy_attest_params *params;
+ struct papr_rtas_sequence seq = {};
+ int fd;
+
+ /*
+ * Freed in phy_attest_sequence_end().
+ */
+ params = kzalloc(sizeof(*params), GFP_KERNEL_ACCOUNT);
+ if (!params)
+ return -ENOMEM;
+
+ if (copy_from_user(&params->cmd, ulc,
+ sizeof(struct papr_phy_attest_io_block)))
+ return -EFAULT;
+
+ params->cmd_len = be32_to_cpu(params->cmd.length);
+ seq = (struct papr_rtas_sequence) {
+ .begin = phy_attest_sequence_begin,
+ .end = phy_attest_sequence_end,
+ .work = phy_attest_sequence_fill_work_area,
+ };
+
+ seq.params = (void *)params;
+
+ fd = papr_rtas_setup_file_interface(&seq,
+ &papr_phy_attest_handle_ops,
+ "[papr-physical-attestation]");
+
+ return fd;
+}
+
+/*
+ * Top-level ioctl handler for /dev/papr-physical-attestation.
+ */
+static long papr_phy_attest_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (__force void __user *)arg;
+ long ret;
+
+ switch (ioctl) {
+ case PAPR_PHY_ATTEST_IOC_HANDLE:
+ ret = papr_phy_attest_create_handle(argp);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations papr_phy_attest_ops = {
+ .unlocked_ioctl = papr_phy_attest_dev_ioctl,
+};
+
+static struct miscdevice papr_phy_attest_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "papr-physical-attestation",
+ .fops = &papr_phy_attest_ops,
+};
+
+static __init int papr_phy_attest_init(void)
+{
+ if (!rtas_function_implemented(RTAS_FN_IBM_PHYSICAL_ATTESTATION))
+ return -ENODEV;
+
+ return misc_register(&papr_phy_attest_dev);
+}
+machine_device_initcall(pseries, papr_phy_attest_init);
diff --git a/arch/powerpc/platforms/pseries/papr-platform-dump.c b/arch/powerpc/platforms/pseries/papr-platform-dump.c
new file mode 100644
index 000000000000..f8d55eccdb6b
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/papr-platform-dump.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define pr_fmt(fmt) "papr-platform-dump: " fmt
+
+#include <linux/anon_inodes.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <asm/machdep.h>
+#include <asm/rtas-work-area.h>
+#include <asm/rtas.h>
+#include <uapi/asm/papr-platform-dump.h>
+
+/*
+ * Function-specific return values for ibm,platform-dump, derived from
+ * PAPR+ v2.13 7.3.3.4.1 "ibm,platform-dump RTAS Call".
+ */
+#define RTAS_IBM_PLATFORM_DUMP_COMPLETE 0 /* Complete dump retrieved. */
+#define RTAS_IBM_PLATFORM_DUMP_CONTINUE 1 /* Continue dump */
+#define RTAS_NOT_AUTHORIZED -9002 /* Not Authorized */
+
+#define RTAS_IBM_PLATFORM_DUMP_START 2 /* Linux status to start dump */
+
+/**
+ * struct ibm_platform_dump_params - Parameters (in and out) for
+ * ibm,platform-dump
+ * @work_area: In: work area buffer for results.
+ * @buf_length: In: work area buffer length in bytes
+ * @dump_tag_hi: In: Most-significant 32 bits of a Dump_Tag representing
+ * an id of the dump being processed.
+ * @dump_tag_lo: In: Least-significant 32 bits of a Dump_Tag representing
+ * an id of the dump being processed.
+ * @sequence_hi: In: Sequence number in most-significant 32 bits.
+ * Out: Next sequence number in most-significant 32 bits.
+ * @sequence_lo: In: Sequence number in Least-significant 32 bits
+ * Out: Next sequence number in Least-significant 32 bits.
+ * @bytes_ret_hi: Out: Bytes written in most-significant 32 bits.
+ * @bytes_ret_lo: Out: Bytes written in Least-significant 32 bits.
+ * @status: Out: RTAS call status.
+ * @list: Maintain the list of dumps are in progress. Can
+ * retrieve multiple dumps with different dump IDs at
+ * the same time but not with the same dump ID. This list
+ * is used to determine whether the dump for the same ID
+ * is in progress.
+ */
+struct ibm_platform_dump_params {
+ struct rtas_work_area *work_area;
+ u32 buf_length;
+ u32 dump_tag_hi;
+ u32 dump_tag_lo;
+ u32 sequence_hi;
+ u32 sequence_lo;
+ u32 bytes_ret_hi;
+ u32 bytes_ret_lo;
+ s32 status;
+ struct list_head list;
+};
+
+/*
+ * Multiple dumps with different dump IDs can be retrieved at the same
+ * time, but not with dame dump ID. platform_dump_list_mutex and
+ * platform_dump_list are used to prevent this behavior.
+ */
+static DEFINE_MUTEX(platform_dump_list_mutex);
+static LIST_HEAD(platform_dump_list);
+
+/**
+ * rtas_ibm_platform_dump() - Call ibm,platform-dump to fill a work area
+ * buffer.
+ * @params: See &struct ibm_platform_dump_params.
+ * @buf_addr: Address of dump buffer (work_area)
+ * @buf_length: Length of the buffer in bytes (min. 1024)
+ *
+ * Calls ibm,platform-dump until it errors or successfully deposits data
+ * into the supplied work area. Handles RTAS retry statuses. Maps RTAS
+ * error statuses to reasonable errno values.
+ *
+ * Can request multiple dumps with different dump IDs at the same time,
+ * but not with the same dump ID which is prevented with the check in
+ * the ioctl code (papr_platform_dump_create_handle()).
+ *
+ * The caller should inspect @params.status to determine whether more
+ * calls are needed to complete the sequence.
+ *
+ * Context: May sleep.
+ * Return: -ve on error, 0 for dump complete and 1 for continue dump
+ */
+static int rtas_ibm_platform_dump(struct ibm_platform_dump_params *params,
+ phys_addr_t buf_addr, u32 buf_length)
+{
+ u32 rets[4];
+ s32 fwrc;
+ int ret = 0;
+
+ do {
+ fwrc = rtas_call(rtas_function_token(RTAS_FN_IBM_PLATFORM_DUMP),
+ 6, 5,
+ rets,
+ params->dump_tag_hi,
+ params->dump_tag_lo,
+ params->sequence_hi,
+ params->sequence_lo,
+ buf_addr,
+ buf_length);
+ } while (rtas_busy_delay(fwrc));
+
+ switch (fwrc) {
+ case RTAS_HARDWARE_ERROR:
+ ret = -EIO;
+ break;
+ case RTAS_NOT_AUTHORIZED:
+ ret = -EPERM;
+ break;
+ case RTAS_IBM_PLATFORM_DUMP_CONTINUE:
+ case RTAS_IBM_PLATFORM_DUMP_COMPLETE:
+ params->sequence_hi = rets[0];
+ params->sequence_lo = rets[1];
+ params->bytes_ret_hi = rets[2];
+ params->bytes_ret_lo = rets[3];
+ break;
+ default:
+ ret = -EIO;
+ pr_err_ratelimited("unexpected ibm,platform-dump status %d\n",
+ fwrc);
+ break;
+ }
+
+ params->status = fwrc;
+ return ret;
+}
+
+/*
+ * Platform dump is used with multiple RTAS calls to retrieve the
+ * complete dump for the provided dump ID. Once the complete dump is
+ * retrieved, the hypervisor returns dump complete status (0) for the
+ * last RTAS call and expects the caller issues one more call with
+ * NULL buffer to invalidate the dump so that the hypervisor can remove
+ * the dump.
+ *
+ * After the specific dump is invalidated in the hypervisor, expect the
+ * dump complete status for the new sequence - the user space initiates
+ * new request for the same dump ID.
+ */
+static ssize_t papr_platform_dump_handle_read(struct file *file,
+ char __user *buf, size_t size, loff_t *off)
+{
+ struct ibm_platform_dump_params *params = file->private_data;
+ u64 total_bytes;
+ s32 fwrc;
+
+ /*
+ * Dump already completed with the previous read calls.
+ * In case if the user space issues further reads, returns
+ * -EINVAL.
+ */
+ if (!params->buf_length) {
+ pr_warn_once("Platform dump completed for dump ID %llu\n",
+ (u64) (((u64)params->dump_tag_hi << 32) |
+ params->dump_tag_lo));
+ return -EINVAL;
+ }
+
+ /*
+ * The hypervisor returns status 0 if no more data available to
+ * download. The dump will be invalidated with ioctl (see below).
+ */
+ if (params->status == RTAS_IBM_PLATFORM_DUMP_COMPLETE) {
+ params->buf_length = 0;
+ /*
+ * Returns 0 to the user space so that user
+ * space read stops.
+ */
+ return 0;
+ }
+
+ if (size < SZ_1K) {
+ pr_err_once("Buffer length should be minimum 1024 bytes\n");
+ return -EINVAL;
+ } else if (size > params->buf_length) {
+ /*
+ * Allocate 4K work area. So if the user requests > 4K,
+ * resize the buffer length.
+ */
+ size = params->buf_length;
+ }
+
+ fwrc = rtas_ibm_platform_dump(params,
+ rtas_work_area_phys(params->work_area),
+ size);
+ if (fwrc < 0)
+ return fwrc;
+
+ total_bytes = (u64) (((u64)params->bytes_ret_hi << 32) |
+ params->bytes_ret_lo);
+
+ /*
+ * Kernel or firmware bug, do not continue.
+ */
+ if (WARN(total_bytes > size, "possible write beyond end of work area"))
+ return -EFAULT;
+
+ if (copy_to_user(buf, rtas_work_area_raw_buf(params->work_area),
+ total_bytes))
+ return -EFAULT;
+
+ return total_bytes;
+}
+
+static int papr_platform_dump_handle_release(struct inode *inode,
+ struct file *file)
+{
+ struct ibm_platform_dump_params *params = file->private_data;
+
+ if (params->work_area)
+ rtas_work_area_free(params->work_area);
+
+ mutex_lock(&platform_dump_list_mutex);
+ list_del(&params->list);
+ mutex_unlock(&platform_dump_list_mutex);
+
+ kfree(params);
+ file->private_data = NULL;
+ return 0;
+}
+
+/*
+ * This ioctl is used to invalidate the dump assuming the user space
+ * issue this ioctl after obtain the complete dump.
+ * Issue the last RTAS call with NULL buffer to invalidate the dump
+ * which means dump will be freed in the hypervisor.
+ */
+static long papr_platform_dump_invalidate_ioctl(struct file *file,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct ibm_platform_dump_params *params;
+ u64 __user *argp = (void __user *)arg;
+ u64 param_dump_tag, dump_tag;
+
+ if (ioctl != PAPR_PLATFORM_DUMP_IOC_INVALIDATE)
+ return -ENOIOCTLCMD;
+
+ if (get_user(dump_tag, argp))
+ return -EFAULT;
+
+ /*
+ * private_data is freeded during release(), so should not
+ * happen.
+ */
+ if (!file->private_data) {
+ pr_err("No valid FD to invalidate dump for the ID(%llu)\n",
+ dump_tag);
+ return -EINVAL;
+ }
+
+ params = file->private_data;
+ param_dump_tag = (u64) (((u64)params->dump_tag_hi << 32) |
+ params->dump_tag_lo);
+ if (dump_tag != param_dump_tag) {
+ pr_err("Invalid dump ID(%llu) to invalidate dump\n",
+ dump_tag);
+ return -EINVAL;
+ }
+
+ if (params->status != RTAS_IBM_PLATFORM_DUMP_COMPLETE) {
+ pr_err("Platform dump is not complete, but requested "
+ "to invalidate dump for ID(%llu)\n",
+ dump_tag);
+ return -EINPROGRESS;
+ }
+
+ return rtas_ibm_platform_dump(params, 0, 0);
+}
+
+static const struct file_operations papr_platform_dump_handle_ops = {
+ .read = papr_platform_dump_handle_read,
+ .release = papr_platform_dump_handle_release,
+ .unlocked_ioctl = papr_platform_dump_invalidate_ioctl,
+};
+
+/**
+ * papr_platform_dump_create_handle() - Create a fd-based handle for
+ * reading platform dump
+ *
+ * Handler for PAPR_PLATFORM_DUMP_IOC_CREATE_HANDLE ioctl command
+ * Allocates RTAS parameter struct and work area and attached to the
+ * file descriptor for reading by user space with the multiple RTAS
+ * calls until the dump is completed. This memory allocation is freed
+ * when the file is released.
+ *
+ * Multiple dump requests with different IDs are allowed at the same
+ * time, but not with the same dump ID. So if the user space is
+ * already opened file descriptor for the specific dump ID, return
+ * -EALREADY for the next request.
+ *
+ * @dump_tag: Dump ID for the dump requested to retrieve from the
+ * hypervisor
+ *
+ * Return: The installed fd number if successful, -ve errno otherwise.
+ */
+static long papr_platform_dump_create_handle(u64 dump_tag)
+{
+ struct ibm_platform_dump_params *params;
+ u64 param_dump_tag;
+ struct file *file;
+ long err;
+ int fd;
+
+ /*
+ * Return failure if the user space is already opened FD for
+ * the specific dump ID. This check will prevent multiple dump
+ * requests for the same dump ID at the same time. Generally
+ * should not expect this, but in case.
+ */
+ list_for_each_entry(params, &platform_dump_list, list) {
+ param_dump_tag = (u64) (((u64)params->dump_tag_hi << 32) |
+ params->dump_tag_lo);
+ if (dump_tag == param_dump_tag) {
+ pr_err("Platform dump for ID(%llu) is already in progress\n",
+ dump_tag);
+ return -EALREADY;
+ }
+ }
+
+ params = kzalloc(sizeof(struct ibm_platform_dump_params),
+ GFP_KERNEL_ACCOUNT);
+ if (!params)
+ return -ENOMEM;
+
+ params->work_area = rtas_work_area_alloc(SZ_4K);
+ params->buf_length = SZ_4K;
+ params->dump_tag_hi = (u32)(dump_tag >> 32);
+ params->dump_tag_lo = (u32)(dump_tag & 0x00000000ffffffffULL);
+ params->status = RTAS_IBM_PLATFORM_DUMP_START;
+
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ err = fd;
+ goto free_area;
+ }
+
+ file = anon_inode_getfile_fmode("[papr-platform-dump]",
+ &papr_platform_dump_handle_ops,
+ (void *)params, O_RDONLY,
+ FMODE_LSEEK | FMODE_PREAD);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ goto put_fd;
+ }
+
+ fd_install(fd, file);
+
+ list_add(&params->list, &platform_dump_list);
+
+ pr_info("%s (%d) initiated platform dump for dump tag %llu\n",
+ current->comm, current->pid, dump_tag);
+ return fd;
+put_fd:
+ put_unused_fd(fd);
+free_area:
+ rtas_work_area_free(params->work_area);
+ kfree(params);
+ return err;
+}
+
+/*
+ * Top-level ioctl handler for /dev/papr-platform-dump.
+ */
+static long papr_platform_dump_dev_ioctl(struct file *filp,
+ unsigned int ioctl,
+ unsigned long arg)
+{
+ u64 __user *argp = (void __user *)arg;
+ u64 dump_tag;
+ long ret;
+
+ if (get_user(dump_tag, argp))
+ return -EFAULT;
+
+ switch (ioctl) {
+ case PAPR_PLATFORM_DUMP_IOC_CREATE_HANDLE:
+ mutex_lock(&platform_dump_list_mutex);
+ ret = papr_platform_dump_create_handle(dump_tag);
+ mutex_unlock(&platform_dump_list_mutex);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations papr_platform_dump_ops = {
+ .unlocked_ioctl = papr_platform_dump_dev_ioctl,
+};
+
+static struct miscdevice papr_platform_dump_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "papr-platform-dump",
+ .fops = &papr_platform_dump_ops,
+};
+
+static __init int papr_platform_dump_init(void)
+{
+ if (!rtas_function_implemented(RTAS_FN_IBM_PLATFORM_DUMP))
+ return -ENODEV;
+
+ return misc_register(&papr_platform_dump_dev);
+}
+machine_device_initcall(pseries, papr_platform_dump_init);
diff --git a/arch/powerpc/platforms/pseries/papr-rtas-common.c b/arch/powerpc/platforms/pseries/papr-rtas-common.c
new file mode 100644
index 000000000000..33c606e3378a
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/papr-rtas-common.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define pr_fmt(fmt) "papr-common: " fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/sched/signal.h>
+#include "papr-rtas-common.h"
+
+/*
+ * Sequence based RTAS HCALL has to issue multiple times to retrieve
+ * complete data from the hypervisor. For some of these RTAS calls,
+ * the OS should not interleave calls with different input until the
+ * sequence is completed. So data is collected for these calls during
+ * ioctl handle and export to user space with read() handle.
+ * This file provides common functions needed for such sequence based
+ * RTAS calls Ex: ibm,get-vpd and ibm,get-indices.
+ */
+
+bool papr_rtas_blob_has_data(const struct papr_rtas_blob *blob)
+{
+ return blob->data && blob->len;
+}
+
+void papr_rtas_blob_free(const struct papr_rtas_blob *blob)
+{
+ if (blob) {
+ kvfree(blob->data);
+ kfree(blob);
+ }
+}
+
+/**
+ * papr_rtas_blob_extend() - Append data to a &struct papr_rtas_blob.
+ * @blob: The blob to extend.
+ * @data: The new data to append to @blob.
+ * @len: The length of @data.
+ *
+ * Context: May sleep.
+ * Return: -ENOMEM on allocation failure, 0 otherwise.
+ */
+static int papr_rtas_blob_extend(struct papr_rtas_blob *blob,
+ const char *data, size_t len)
+{
+ const size_t new_len = blob->len + len;
+ const size_t old_len = blob->len;
+ const char *old_ptr = blob->data;
+ char *new_ptr;
+
+ new_ptr = kvrealloc(old_ptr, new_len, GFP_KERNEL_ACCOUNT);
+ if (!new_ptr)
+ return -ENOMEM;
+
+ memcpy(&new_ptr[old_len], data, len);
+ blob->data = new_ptr;
+ blob->len = new_len;
+ return 0;
+}
+
+/**
+ * papr_rtas_blob_generate() - Construct a new &struct papr_rtas_blob.
+ * @seq: work function of the caller that is called to obtain
+ * data with the caller RTAS call.
+ *
+ * The @work callback is invoked until it returns NULL. @seq is
+ * passed to @work in its first argument on each call. When
+ * @work returns data, it should store the data length in its
+ * second argument.
+ *
+ * Context: May sleep.
+ * Return: A completely populated &struct papr_rtas_blob, or NULL on error.
+ */
+static const struct papr_rtas_blob *
+papr_rtas_blob_generate(struct papr_rtas_sequence *seq)
+{
+ struct papr_rtas_blob *blob;
+ const char *buf;
+ size_t len;
+ int err = 0;
+
+ blob = kzalloc(sizeof(*blob), GFP_KERNEL_ACCOUNT);
+ if (!blob)
+ return NULL;
+
+ if (!seq->work)
+ return ERR_PTR(-EINVAL);
+
+
+ while (err == 0 && (buf = seq->work(seq, &len)))
+ err = papr_rtas_blob_extend(blob, buf, len);
+
+ if (err != 0 || !papr_rtas_blob_has_data(blob))
+ goto free_blob;
+
+ return blob;
+free_blob:
+ papr_rtas_blob_free(blob);
+ return NULL;
+}
+
+int papr_rtas_sequence_set_err(struct papr_rtas_sequence *seq, int err)
+{
+ /* Preserve the first error recorded. */
+ if (seq->error == 0)
+ seq->error = err;
+
+ return seq->error;
+}
+
+/*
+ * Higher-level retrieval code below. These functions use the
+ * papr_rtas_blob_* and sequence_* APIs defined above to create fd-based
+ * handles for consumption by user space.
+ */
+
+/**
+ * papr_rtas_run_sequence() - Run a single retrieval sequence.
+ * @seq: Functions of the caller to complete the sequence
+ *
+ * Context: May sleep. Holds a mutex and an RTAS work area for its
+ * duration. Typically performs multiple sleepable slab
+ * allocations.
+ *
+ * Return: A populated &struct papr_rtas_blob on success. Encoded error
+ * pointer otherwise.
+ */
+static const struct papr_rtas_blob *papr_rtas_run_sequence(struct papr_rtas_sequence *seq)
+{
+ const struct papr_rtas_blob *blob;
+
+ if (seq->begin)
+ seq->begin(seq);
+
+ blob = papr_rtas_blob_generate(seq);
+ if (!blob)
+ papr_rtas_sequence_set_err(seq, -ENOMEM);
+
+ if (seq->end)
+ seq->end(seq);
+
+
+ if (seq->error) {
+ papr_rtas_blob_free(blob);
+ return ERR_PTR(seq->error);
+ }
+
+ return blob;
+}
+
+/**
+ * papr_rtas_retrieve() - Return the data blob that is exposed to
+ * user space.
+ * @seq: RTAS call specific functions to be invoked until the
+ * sequence is completed.
+ *
+ * Run sequences against @param until a blob is successfully
+ * instantiated, or a hard error is encountered, or a fatal signal is
+ * pending.
+ *
+ * Context: May sleep.
+ * Return: A fully populated data blob when successful. Encoded error
+ * pointer otherwise.
+ */
+const struct papr_rtas_blob *papr_rtas_retrieve(struct papr_rtas_sequence *seq)
+{
+ const struct papr_rtas_blob *blob;
+
+ /*
+ * EAGAIN means the sequence returns error with a -4 (data
+ * changed and need to start the sequence) status from RTAS calls
+ * and we should attempt a new sequence. PAPR+ (v2.13 R1–7.3.20–5
+ * - ibm,get-vpd, R1–7.3.17–6 - ibm,get-indices) indicates that
+ * this should be a transient condition, not something that
+ * happens continuously. But we'll stop trying on a fatal signal.
+ */
+ do {
+ blob = papr_rtas_run_sequence(seq);
+ if (!IS_ERR(blob)) /* Success. */
+ break;
+ if (PTR_ERR(blob) != -EAGAIN) /* Hard error. */
+ break;
+ cond_resched();
+ } while (!fatal_signal_pending(current));
+
+ return blob;
+}
+
+/**
+ * papr_rtas_setup_file_interface - Complete the sequence and obtain
+ * the data and export to user space with fd-based handles. Then the
+ * user spave gets the data with read() handle.
+ * @seq: RTAS call specific functions to get the data.
+ * @fops: RTAS call specific file operations such as read().
+ * @name: RTAS call specific char device node.
+ *
+ * Return: FD handle for consumption by user space
+ */
+long papr_rtas_setup_file_interface(struct papr_rtas_sequence *seq,
+ const struct file_operations *fops,
+ char *name)
+{
+ const struct papr_rtas_blob *blob;
+ struct file *file;
+ long ret;
+ int fd;
+
+ blob = papr_rtas_retrieve(seq);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
+
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ goto free_blob;
+ }
+
+ file = anon_inode_getfile_fmode(name, fops, (void *)blob,
+ O_RDONLY, FMODE_LSEEK | FMODE_PREAD);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto put_fd;
+ }
+
+ fd_install(fd, file);
+ return fd;
+
+put_fd:
+ put_unused_fd(fd);
+free_blob:
+ papr_rtas_blob_free(blob);
+ return ret;
+}
+
+/*
+ * papr_rtas_sequence_should_stop() - Determine whether RTAS retrieval
+ * sequence should continue.
+ *
+ * Examines the sequence error state and outputs of the last call to
+ * the specific RTAS to determine whether the sequence in progress
+ * should continue or stop.
+ *
+ * Return: True if the sequence has encountered an error or if all data
+ * for this sequence has been retrieved. False otherwise.
+ */
+bool papr_rtas_sequence_should_stop(const struct papr_rtas_sequence *seq,
+ s32 status, bool init_state)
+{
+ bool done;
+
+ if (seq->error)
+ return true;
+
+ switch (status) {
+ case RTAS_SEQ_COMPLETE:
+ if (init_state)
+ done = false; /* Initial state. */
+ else
+ done = true; /* All data consumed. */
+ break;
+ case RTAS_SEQ_MORE_DATA:
+ done = false; /* More data available. */
+ break;
+ default:
+ done = true; /* Error encountered. */
+ break;
+ }
+
+ return done;
+}
+
+/*
+ * User space read to retrieve data for the corresponding RTAS call.
+ * papr_rtas_blob is filled with the data using the corresponding RTAS
+ * call sequence API.
+ */
+ssize_t papr_rtas_common_handle_read(struct file *file,
+ char __user *buf, size_t size, loff_t *off)
+{
+ const struct papr_rtas_blob *blob = file->private_data;
+
+ /* We should not instantiate a handle without any data attached. */
+ if (!papr_rtas_blob_has_data(blob)) {
+ pr_err_once("handle without data\n");
+ return -EIO;
+ }
+
+ return simple_read_from_buffer(buf, size, off, blob->data, blob->len);
+}
+
+int papr_rtas_common_handle_release(struct inode *inode,
+ struct file *file)
+{
+ const struct papr_rtas_blob *blob = file->private_data;
+
+ papr_rtas_blob_free(blob);
+
+ return 0;
+}
+
+loff_t papr_rtas_common_handle_seek(struct file *file, loff_t off,
+ int whence)
+{
+ const struct papr_rtas_blob *blob = file->private_data;
+
+ return fixed_size_llseek(file, off, whence, blob->len);
+}
diff --git a/arch/powerpc/platforms/pseries/papr-rtas-common.h b/arch/powerpc/platforms/pseries/papr-rtas-common.h
new file mode 100644
index 000000000000..4ceabcaf4905
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/papr-rtas-common.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_POWERPC_PAPR_RTAS_COMMON_H
+#define _ASM_POWERPC_PAPR_RTAS_COMMON_H
+
+#include <linux/types.h>
+
+/*
+ * Return codes for sequence based RTAS calls.
+ * Not listed under PAPR+ v2.13 7.2.8: "Return Codes".
+ * But defined in the specific section of each RTAS call.
+ */
+#define RTAS_SEQ_COMPLETE 0 /* All data has been retrieved. */
+#define RTAS_SEQ_MORE_DATA 1 /* More data is available */
+#define RTAS_SEQ_START_OVER -4 /* Data changed, restart call sequence. */
+
+/*
+ * Internal "blob" APIs for accumulating RTAS call results into
+ * an immutable buffer to be attached to a file descriptor.
+ */
+struct papr_rtas_blob {
+ const char *data;
+ size_t len;
+};
+
+/**
+ * struct papr_sequence - State for managing a sequence of RTAS calls.
+ * @error: Shall be zero as long as the sequence has not encountered an error,
+ * -ve errno otherwise. Use papr_rtas_sequence_set_err() to update.
+ * @params: Parameter block to pass to rtas_*() calls.
+ * @begin: Work area allocation and initialize the needed parameter
+ * values passed to RTAS call
+ * @end: Free the allocated work area
+ * @work: Obtain data with RTAS call and invoke it until the sequence is
+ * completed.
+ *
+ */
+struct papr_rtas_sequence {
+ int error;
+ void *params;
+ void (*begin)(struct papr_rtas_sequence *seq);
+ void (*end)(struct papr_rtas_sequence *seq);
+ const char *(*work)(struct papr_rtas_sequence *seq, size_t *len);
+};
+
+extern bool papr_rtas_blob_has_data(const struct papr_rtas_blob *blob);
+extern void papr_rtas_blob_free(const struct papr_rtas_blob *blob);
+extern int papr_rtas_sequence_set_err(struct papr_rtas_sequence *seq,
+ int err);
+extern const struct papr_rtas_blob *papr_rtas_retrieve(struct papr_rtas_sequence *seq);
+extern long papr_rtas_setup_file_interface(struct papr_rtas_sequence *seq,
+ const struct file_operations *fops, char *name);
+extern bool papr_rtas_sequence_should_stop(const struct papr_rtas_sequence *seq,
+ s32 status, bool init_state);
+extern ssize_t papr_rtas_common_handle_read(struct file *file,
+ char __user *buf, size_t size, loff_t *off);
+extern int papr_rtas_common_handle_release(struct inode *inode,
+ struct file *file);
+extern loff_t papr_rtas_common_handle_seek(struct file *file, loff_t off,
+ int whence);
+#endif /* _ASM_POWERPC_PAPR_RTAS_COMMON_H */
+
diff --git a/arch/powerpc/platforms/pseries/papr-vpd.c b/arch/powerpc/platforms/pseries/papr-vpd.c
index c86950d7105a..f38c188fc4a1 100644
--- a/arch/powerpc/platforms/pseries/papr-vpd.c
+++ b/arch/powerpc/platforms/pseries/papr-vpd.c
@@ -2,7 +2,6 @@
#define pr_fmt(fmt) "papr-vpd: " fmt
-#include <linux/anon_inodes.h>
#include <linux/build_bug.h>
#include <linux/file.h>
#include <linux/fs.h>
@@ -20,14 +19,7 @@
#include <asm/rtas-work-area.h>
#include <asm/rtas.h>
#include <uapi/asm/papr-vpd.h>
-
-/*
- * Function-specific return values for ibm,get-vpd, derived from PAPR+
- * v2.13 7.3.20 "ibm,get-vpd RTAS Call".
- */
-#define RTAS_IBM_GET_VPD_COMPLETE 0 /* All VPD has been retrieved. */
-#define RTAS_IBM_GET_VPD_MORE_DATA 1 /* More VPD is available. */
-#define RTAS_IBM_GET_VPD_START_OVER -4 /* VPD changed, restart call sequence. */
+#include "papr-rtas-common.h"
/**
* struct rtas_ibm_get_vpd_params - Parameters (in and out) for ibm,get-vpd.
@@ -91,13 +83,14 @@ static int rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params *params)
case RTAS_INVALID_PARAMETER:
ret = -EINVAL;
break;
- case RTAS_IBM_GET_VPD_START_OVER:
+ case RTAS_SEQ_START_OVER:
ret = -EAGAIN;
+ pr_info_ratelimited("VPD changed during retrieval, retrying\n");
break;
- case RTAS_IBM_GET_VPD_MORE_DATA:
+ case RTAS_SEQ_MORE_DATA:
params->sequence = rets[0];
fallthrough;
- case RTAS_IBM_GET_VPD_COMPLETE:
+ case RTAS_SEQ_COMPLETE:
params->written = rets[1];
/*
* Kernel or firmware bug, do not continue.
@@ -119,91 +112,6 @@ static int rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params *params)
}
/*
- * Internal VPD "blob" APIs for accumulating ibm,get-vpd results into
- * an immutable buffer to be attached to a file descriptor.
- */
-struct vpd_blob {
- const char *data;
- size_t len;
-};
-
-static bool vpd_blob_has_data(const struct vpd_blob *blob)
-{
- return blob->data && blob->len;
-}
-
-static void vpd_blob_free(const struct vpd_blob *blob)
-{
- if (blob) {
- kvfree(blob->data);
- kfree(blob);
- }
-}
-
-/**
- * vpd_blob_extend() - Append data to a &struct vpd_blob.
- * @blob: The blob to extend.
- * @data: The new data to append to @blob.
- * @len: The length of @data.
- *
- * Context: May sleep.
- * Return: -ENOMEM on allocation failure, 0 otherwise.
- */
-static int vpd_blob_extend(struct vpd_blob *blob, const char *data, size_t len)
-{
- const size_t new_len = blob->len + len;
- const size_t old_len = blob->len;
- const char *old_ptr = blob->data;
- char *new_ptr;
-
- new_ptr = kvrealloc(old_ptr, new_len, GFP_KERNEL_ACCOUNT);
- if (!new_ptr)
- return -ENOMEM;
-
- memcpy(&new_ptr[old_len], data, len);
- blob->data = new_ptr;
- blob->len = new_len;
- return 0;
-}
-
-/**
- * vpd_blob_generate() - Construct a new &struct vpd_blob.
- * @generator: Function that supplies the blob data.
- * @arg: Context pointer supplied by caller, passed to @generator.
- *
- * The @generator callback is invoked until it returns NULL. @arg is
- * passed to @generator in its first argument on each call. When
- * @generator returns data, it should store the data length in its
- * second argument.
- *
- * Context: May sleep.
- * Return: A completely populated &struct vpd_blob, or NULL on error.
- */
-static const struct vpd_blob *
-vpd_blob_generate(const char * (*generator)(void *, size_t *), void *arg)
-{
- struct vpd_blob *blob;
- const char *buf;
- size_t len;
- int err = 0;
-
- blob = kzalloc(sizeof(*blob), GFP_KERNEL_ACCOUNT);
- if (!blob)
- return NULL;
-
- while (err == 0 && (buf = generator(arg, &len)))
- err = vpd_blob_extend(blob, buf, len);
-
- if (err != 0 || !vpd_blob_has_data(blob))
- goto free_blob;
-
- return blob;
-free_blob:
- vpd_blob_free(blob);
- return NULL;
-}
-
-/*
* Internal VPD sequence APIs. A VPD sequence is a series of calls to
* ibm,get-vpd for a given location code. The sequence ends when an
* error is encountered or all VPD for the location code has been
@@ -211,30 +119,14 @@ free_blob:
*/
/**
- * struct vpd_sequence - State for managing a VPD sequence.
- * @error: Shall be zero as long as the sequence has not encountered an error,
- * -ve errno otherwise. Use vpd_sequence_set_err() to update this.
- * @params: Parameter block to pass to rtas_ibm_get_vpd().
- */
-struct vpd_sequence {
- int error;
- struct rtas_ibm_get_vpd_params params;
-};
-
-/**
* vpd_sequence_begin() - Begin a VPD retrieval sequence.
- * @seq: Uninitialized sequence state.
- * @loc_code: Location code that defines the scope of the VPD to return.
- *
- * Initializes @seq with the resources necessary to carry out a VPD
- * sequence. Callers must pass @seq to vpd_sequence_end() regardless
- * of whether the sequence succeeds.
+ * @seq: vpd call parameters from sequence struct
*
* Context: May sleep.
*/
-static void vpd_sequence_begin(struct vpd_sequence *seq,
- const struct papr_location_code *loc_code)
+static void vpd_sequence_begin(struct papr_rtas_sequence *seq)
{
+ struct rtas_ibm_get_vpd_params *vpd_params;
/*
* Use a static data structure for the location code passed to
* RTAS to ensure it's in the RMA and avoid a separate work
@@ -242,6 +134,7 @@ static void vpd_sequence_begin(struct vpd_sequence *seq,
*/
static struct papr_location_code static_loc_code;
+ vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params;
/*
* We could allocate the work area before acquiring the
* function lock, but that would allow concurrent requests to
@@ -249,14 +142,12 @@ static void vpd_sequence_begin(struct vpd_sequence *seq,
* allocate the work area under the lock.
*/
mutex_lock(&rtas_ibm_get_vpd_lock);
- static_loc_code = *loc_code;
- *seq = (struct vpd_sequence) {
- .params = {
- .work_area = rtas_work_area_alloc(SZ_4K),
- .loc_code = &static_loc_code,
- .sequence = 1,
- },
- };
+ static_loc_code = *(struct papr_location_code *)vpd_params->loc_code;
+ vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params;
+ vpd_params->work_area = rtas_work_area_alloc(SZ_4K);
+ vpd_params->loc_code = &static_loc_code;
+ vpd_params->sequence = 1;
+ vpd_params->status = 0;
}
/**
@@ -265,180 +156,39 @@ static void vpd_sequence_begin(struct vpd_sequence *seq,
*
* Releases resources obtained by vpd_sequence_begin().
*/
-static void vpd_sequence_end(struct vpd_sequence *seq)
+static void vpd_sequence_end(struct papr_rtas_sequence *seq)
{
- rtas_work_area_free(seq->params.work_area);
- mutex_unlock(&rtas_ibm_get_vpd_lock);
-}
-
-/**
- * vpd_sequence_should_stop() - Determine whether a VPD retrieval sequence
- * should continue.
- * @seq: VPD sequence state.
- *
- * Examines the sequence error state and outputs of the last call to
- * ibm,get-vpd to determine whether the sequence in progress should
- * continue or stop.
- *
- * Return: True if the sequence has encountered an error or if all VPD for
- * this sequence has been retrieved. False otherwise.
- */
-static bool vpd_sequence_should_stop(const struct vpd_sequence *seq)
-{
- bool done;
-
- if (seq->error)
- return true;
+ struct rtas_ibm_get_vpd_params *vpd_params;
- switch (seq->params.status) {
- case 0:
- if (seq->params.written == 0)
- done = false; /* Initial state. */
- else
- done = true; /* All data consumed. */
- break;
- case 1:
- done = false; /* More data available. */
- break;
- default:
- done = true; /* Error encountered. */
- break;
- }
-
- return done;
-}
-
-static int vpd_sequence_set_err(struct vpd_sequence *seq, int err)
-{
- /* Preserve the first error recorded. */
- if (seq->error == 0)
- seq->error = err;
-
- return seq->error;
+ vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params;
+ rtas_work_area_free(vpd_params->work_area);
+ mutex_unlock(&rtas_ibm_get_vpd_lock);
}
/*
- * Generator function to be passed to vpd_blob_generate().
+ * Generator function to be passed to papr_rtas_blob_generate().
*/
-static const char *vpd_sequence_fill_work_area(void *arg, size_t *len)
+static const char *vpd_sequence_fill_work_area(struct papr_rtas_sequence *seq,
+ size_t *len)
{
- struct vpd_sequence *seq = arg;
- struct rtas_ibm_get_vpd_params *p = &seq->params;
+ struct rtas_ibm_get_vpd_params *p;
+ bool init_state;
- if (vpd_sequence_should_stop(seq))
+ p = (struct rtas_ibm_get_vpd_params *)seq->params;
+ init_state = (p->written == 0) ? true : false;
+
+ if (papr_rtas_sequence_should_stop(seq, p->status, init_state))
return NULL;
- if (vpd_sequence_set_err(seq, rtas_ibm_get_vpd(p)))
+ if (papr_rtas_sequence_set_err(seq, rtas_ibm_get_vpd(p)))
return NULL;
*len = p->written;
return rtas_work_area_raw_buf(p->work_area);
}
-/*
- * Higher-level VPD retrieval code below. These functions use the
- * vpd_blob_* and vpd_sequence_* APIs defined above to create fd-based
- * VPD handles for consumption by user space.
- */
-
-/**
- * papr_vpd_run_sequence() - Run a single VPD retrieval sequence.
- * @loc_code: Location code that defines the scope of VPD to return.
- *
- * Context: May sleep. Holds a mutex and an RTAS work area for its
- * duration. Typically performs multiple sleepable slab
- * allocations.
- *
- * Return: A populated &struct vpd_blob on success. Encoded error
- * pointer otherwise.
- */
-static const struct vpd_blob *papr_vpd_run_sequence(const struct papr_location_code *loc_code)
-{
- const struct vpd_blob *blob;
- struct vpd_sequence seq;
-
- vpd_sequence_begin(&seq, loc_code);
- blob = vpd_blob_generate(vpd_sequence_fill_work_area, &seq);
- if (!blob)
- vpd_sequence_set_err(&seq, -ENOMEM);
- vpd_sequence_end(&seq);
-
- if (seq.error) {
- vpd_blob_free(blob);
- return ERR_PTR(seq.error);
- }
-
- return blob;
-}
-
-/**
- * papr_vpd_retrieve() - Return the VPD for a location code.
- * @loc_code: Location code that defines the scope of VPD to return.
- *
- * Run VPD sequences against @loc_code until a blob is successfully
- * instantiated, or a hard error is encountered, or a fatal signal is
- * pending.
- *
- * Context: May sleep.
- * Return: A fully populated VPD blob when successful. Encoded error
- * pointer otherwise.
- */
-static const struct vpd_blob *papr_vpd_retrieve(const struct papr_location_code *loc_code)
-{
- const struct vpd_blob *blob;
-
- /*
- * EAGAIN means the sequence errored with a -4 (VPD changed)
- * status from ibm,get-vpd, and we should attempt a new
- * sequence. PAPR+ v2.13 R1–7.3.20–5 indicates that this
- * should be a transient condition, not something that happens
- * continuously. But we'll stop trying on a fatal signal.
- */
- do {
- blob = papr_vpd_run_sequence(loc_code);
- if (!IS_ERR(blob)) /* Success. */
- break;
- if (PTR_ERR(blob) != -EAGAIN) /* Hard error. */
- break;
- pr_info_ratelimited("VPD changed during retrieval, retrying\n");
- cond_resched();
- } while (!fatal_signal_pending(current));
-
- return blob;
-}
-
-static ssize_t papr_vpd_handle_read(struct file *file, char __user *buf, size_t size, loff_t *off)
-{
- const struct vpd_blob *blob = file->private_data;
-
- /* bug: we should not instantiate a handle without any data attached. */
- if (!vpd_blob_has_data(blob)) {
- pr_err_once("handle without data\n");
- return -EIO;
- }
-
- return simple_read_from_buffer(buf, size, off, blob->data, blob->len);
-}
-
-static int papr_vpd_handle_release(struct inode *inode, struct file *file)
-{
- const struct vpd_blob *blob = file->private_data;
-
- vpd_blob_free(blob);
-
- return 0;
-}
-
-static loff_t papr_vpd_handle_seek(struct file *file, loff_t off, int whence)
-{
- const struct vpd_blob *blob = file->private_data;
-
- return fixed_size_llseek(file, off, whence, blob->len);
-}
-
-
static const struct file_operations papr_vpd_handle_ops = {
- .read = papr_vpd_handle_read,
- .llseek = papr_vpd_handle_seek,
- .release = papr_vpd_handle_release,
+ .read = papr_rtas_common_handle_read,
+ .llseek = papr_rtas_common_handle_seek,
+ .release = papr_rtas_common_handle_release,
};
/**
@@ -460,10 +210,9 @@ static const struct file_operations papr_vpd_handle_ops = {
*/
static long papr_vpd_create_handle(struct papr_location_code __user *ulc)
{
+ struct rtas_ibm_get_vpd_params vpd_params = {};
+ struct papr_rtas_sequence seq = {};
struct papr_location_code klc;
- const struct vpd_blob *blob;
- struct file *file;
- long err;
int fd;
if (copy_from_user(&klc, ulc, sizeof(klc)))
@@ -472,30 +221,19 @@ static long papr_vpd_create_handle(struct papr_location_code __user *ulc)
if (!string_is_terminated(klc.str, ARRAY_SIZE(klc.str)))
return -EINVAL;
- blob = papr_vpd_retrieve(&klc);
- if (IS_ERR(blob))
- return PTR_ERR(blob);
+ seq = (struct papr_rtas_sequence) {
+ .begin = vpd_sequence_begin,
+ .end = vpd_sequence_end,
+ .work = vpd_sequence_fill_work_area,
+ };
- fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
- if (fd < 0) {
- err = fd;
- goto free_blob;
- }
+ vpd_params.loc_code = &klc;
+ seq.params = (void *)&vpd_params;
+
+ fd = papr_rtas_setup_file_interface(&seq, &papr_vpd_handle_ops,
+ "[papr-vpd]");
- file = anon_inode_getfile_fmode("[papr-vpd]", &papr_vpd_handle_ops,
- (void *)blob, O_RDONLY,
- FMODE_LSEEK | FMODE_PREAD);
- if (IS_ERR(file)) {
- err = PTR_ERR(file);
- goto put_fd;
- }
- fd_install(fd, file);
return fd;
-put_fd:
- put_unused_fd(fd);
-free_blob:
- vpd_blob_free(blob);
- return err;
}
/*
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index e14493685fe8..4a59ed1d62ce 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -207,7 +207,7 @@ unsigned int cpm2_get_irq(void)
if (irq == 0)
return(-1);
- return irq_linear_revmap(cpm2_pic_host, irq);
+ return irq_find_mapping(cpm2_pic_host, irq);
}
static int cpm2_pic_host_map(struct irq_domain *h, unsigned int virq,
@@ -259,7 +259,8 @@ void cpm2_pic_init(struct device_node *node)
out_be32(&cpm2_intctl->ic_scprrl, 0x05309770);
/* create a legacy host */
- cpm2_pic_host = irq_domain_add_linear(node, 64, &cpm2_pic_host_ops, NULL);
+ cpm2_pic_host = irq_domain_create_linear(of_fwnode_handle(node), 64,
+ &cpm2_pic_host_ops, NULL);
if (cpm2_pic_host == NULL) {
printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
return;
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 47db732981a8..e22fc638dbc7 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -138,7 +138,7 @@ static void __cpm2_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask,
out_be32(&iop->dat, cpm2_gc->cpdata);
}
-static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value)
+static int cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(gc);
@@ -150,6 +150,8 @@ static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value)
__cpm2_gpio32_set(mm_gc, pin_mask, value);
spin_unlock_irqrestore(&cpm2_gc->lock, flags);
+
+ return 0;
}
static int cpm2_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -208,7 +210,7 @@ int cpm2_gpiochip_add32(struct device *dev)
gc->direction_input = cpm2_gpio32_dir_in;
gc->direction_output = cpm2_gpio32_dir_out;
gc->get = cpm2_gpio32_get;
- gc->set = cpm2_gpio32_set;
+ gc->set_rv = cpm2_gpio32_set;
gc->parent = dev;
gc->owner = THIS_MODULE;
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index fb502b72fca1..b6f9774038e1 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -175,7 +175,7 @@ unsigned int ehv_pic_get_irq(void)
* this will also setup revmap[] in the slow path for the first
* time, next calls will always use fast path by indexing revmap
*/
- return irq_linear_revmap(global_ehv_pic->irqhost, irq);
+ return irq_find_mapping(global_ehv_pic->irqhost, irq);
}
static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node,
@@ -269,8 +269,9 @@ void __init ehv_pic_init(void)
return;
}
- ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS,
- &ehv_pic_host_ops, ehv_pic);
+ ehv_pic->irqhost = irq_domain_create_linear(of_fwnode_handle(np),
+ NR_EHV_PIC_INTS,
+ &ehv_pic_host_ops, ehv_pic);
if (!ehv_pic->irqhost) {
of_node_put(np);
kfree(ehv_pic);
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 7b9a5ea9cad9..4fe8a7b1b288 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -412,7 +412,7 @@ static int fsl_of_msi_probe(struct platform_device *dev)
}
platform_set_drvdata(dev, msi);
- msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
+ msi->irqhost = irq_domain_create_linear(of_fwnode_handle(dev->dev.of_node),
NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
if (msi->irqhost == NULL) {
diff --git a/arch/powerpc/sysdev/ge/ge_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c
index a6c424680c37..0bc3f0b36528 100644
--- a/arch/powerpc/sysdev/ge/ge_pic.c
+++ b/arch/powerpc/sysdev/ge/ge_pic.c
@@ -214,8 +214,9 @@ void __init gef_pic_init(struct device_node *np)
}
/* Setup an irq_domain structure */
- gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS,
- &gef_pic_host_ops, NULL);
+ gef_pic_irq_host = irq_domain_create_linear(of_fwnode_handle(np),
+ GEF_PIC_NUM_IRQS,
+ &gef_pic_host_ops, NULL);
if (gef_pic_irq_host == NULL)
return;
@@ -244,7 +245,7 @@ unsigned int gef_pic_get_irq(void)
if (active & (0x1 << hwirq))
break;
}
- virq = irq_linear_revmap(gef_pic_irq_host,
+ virq = irq_find_mapping(gef_pic_irq_host,
(irq_hw_number_t)hwirq);
}
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index 06e391485da7..99bb2b916949 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -260,8 +260,8 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
raw_spin_unlock_irqrestore(&i8259_lock, flags);
/* create a legacy host */
- i8259_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
- &i8259_host_ops, NULL);
+ i8259_host = irq_domain_create_legacy(of_fwnode_handle(node), NR_IRQS_LEGACY, 0, 0,
+ &i8259_host_ops, NULL);
if (i8259_host == NULL) {
printk(KERN_ERR "i8259: failed to allocate irq host !\n");
return;
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index a35be0232978..70be2105865d 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -711,8 +711,9 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
if (ipic == NULL)
return NULL;
- ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS,
- &ipic_host_ops, ipic);
+ ipic->irqhost = irq_domain_create_linear(of_fwnode_handle(node),
+ NR_IPIC_INTS,
+ &ipic_host_ops, ipic);
if (ipic->irqhost == NULL) {
kfree(ipic);
return NULL;
@@ -800,7 +801,7 @@ unsigned int ipic_get_irq(void)
if (irq == 0) /* 0 --> no irq is pending */
return 0;
- return irq_linear_revmap(primary_ipic->irqhost, irq);
+ return irq_find_mapping(primary_ipic->irqhost, irq);
}
#ifdef CONFIG_SUSPEND
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 4afbab83a2e2..ad7310bba00b 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -27,6 +27,7 @@
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/syscore_ops.h>
#include <linux/ratelimit.h>
#include <linux/pgtable.h>
@@ -474,9 +475,9 @@ static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase,
addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32);
}
- printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n",
- PCI_SLOT(devfn), PCI_FUNC(devfn),
- flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr);
+ pr_debug("mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n",
+ PCI_SLOT(devfn), PCI_FUNC(devfn),
+ str_enabled_disabled(flags & HT_MSI_FLAGS_ENABLE), addr);
if (!(flags & HT_MSI_FLAGS_ENABLE))
writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS);
@@ -1483,9 +1484,9 @@ struct mpic * __init mpic_alloc(struct device_node *node,
mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
mpic->isu_mask = (1 << mpic->isu_shift) - 1;
- mpic->irqhost = irq_domain_add_linear(mpic->node,
- intvec_top,
- &mpic_host_ops, mpic);
+ mpic->irqhost = irq_domain_create_linear(of_fwnode_handle(mpic->node),
+ intvec_top,
+ &mpic_host_ops, mpic);
/*
* FIXME: The code leaks the MPIC object and mappings here; this
@@ -1785,7 +1786,7 @@ static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
return 0;
}
- return irq_linear_revmap(mpic->irqhost, src);
+ return irq_find_mapping(mpic->irqhost, src);
}
unsigned int mpic_get_one_irq(struct mpic *mpic)
@@ -1823,7 +1824,7 @@ unsigned int mpic_get_coreint_irq(void)
return 0;
}
- return irq_linear_revmap(mpic->irqhost, src);
+ return irq_find_mapping(mpic->irqhost, src);
#else
return 0;
#endif
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 0e42f7bad7db..07d0f6a83879 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -404,8 +404,8 @@ void __init tsi108_pci_int_init(struct device_node *node)
{
DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
- pci_irq_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
- &pci_irq_domain_ops, NULL);
+ pci_irq_host = irq_domain_create_legacy(of_fwnode_handle(node), NR_IRQS_LEGACY, 0, 0,
+ &pci_irq_domain_ops, NULL);
if (pci_irq_host == NULL) {
printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n");
return;
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index dc2e61837396..f10592405024 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -1464,7 +1464,7 @@ static const struct irq_domain_ops xive_irq_domain_ops = {
static void __init xive_init_host(struct device_node *np)
{
- xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL);
+ xive_irq_domain = irq_domain_create_tree(of_fwnode_handle(np), &xive_irq_domain_ops, NULL);
if (WARN_ON(xive_irq_domain == NULL))
return;
irq_set_default_domain(xive_irq_domain);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 88abffa8b54c..cb3a3244ae6f 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1770,7 +1770,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
sp + STACK_INT_FRAME_REGS);
break;
}
- printf("--- Exception: %lx %s at ", regs.trap,
+ printf("---- Exception: %lx %s at ", regs.trap,
getvecname(TRAP(&regs)));
pc = regs.nip;
lr = regs.link;
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 8b503e54fa1b..a9c3d2f6debc 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -54,6 +54,7 @@ config ARCH_THEAD
bool "T-HEAD RISC-V SoCs"
depends on MMU && !XIP_KERNEL
select ERRATA_THEAD
+ select PM_GENERIC_DOMAINS if PM
help
This enables support for the RISC-V based T-HEAD SoCs.
diff --git a/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi b/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi
index e0ddf8f602c7..a8bcb26f4270 100644
--- a/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi
+++ b/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi
@@ -143,7 +143,8 @@
};
l2cache: cache-controller@13400000 {
- compatible = "andestech,ax45mp-cache", "cache";
+ compatible = "renesas,r9a07g043f-ax45mp-cache", "andestech,ax45mp-cache",
+ "cache";
reg = <0x0 0x13400000 0x0 0x100000>;
interrupts = <SOC_PERIPHERAL_IRQ(476) IRQ_TYPE_LEVEL_HIGH>;
cache-size = <0x40000>;
diff --git a/arch/riscv/boot/dts/sophgo/Makefile b/arch/riscv/boot/dts/sophgo/Makefile
index 47d4243a8f35..85966306801e 100644
--- a/arch/riscv/boot/dts/sophgo/Makefile
+++ b/arch/riscv/boot/dts/sophgo/Makefile
@@ -3,3 +3,4 @@ dtb-$(CONFIG_ARCH_SOPHGO) += cv1800b-milkv-duo.dtb
dtb-$(CONFIG_ARCH_SOPHGO) += cv1812h-huashan-pi.dtb
dtb-$(CONFIG_ARCH_SOPHGO) += sg2002-licheerv-nano-b.dtb
dtb-$(CONFIG_ARCH_SOPHGO) += sg2042-milkv-pioneer.dtb
+dtb-$(CONFIG_ARCH_SOPHGO) += sg2044-sophgo-srd3-10.dtb
diff --git a/arch/riscv/boot/dts/sophgo/cv1800b.dtsi b/arch/riscv/boot/dts/sophgo/cv1800b.dtsi
index aa1f5df100f0..90de978f69c1 100644
--- a/arch/riscv/boot/dts/sophgo/cv1800b.dtsi
+++ b/arch/riscv/boot/dts/sophgo/cv1800b.dtsi
@@ -3,8 +3,11 @@
* Copyright (C) 2023 Jisheng Zhang <jszhang@kernel.org>
*/
+#define SOC_PERIPHERAL_IRQ(nr) ((nr) + 16)
+
#include <dt-bindings/pinctrl/pinctrl-cv1800b.h>
-#include "cv18xx.dtsi"
+#include "cv180x-cpus.dtsi"
+#include "cv180x.dtsi"
/ {
compatible = "sophgo,cv1800b";
@@ -15,23 +18,37 @@
};
soc {
+ interrupt-parent = <&plic>;
+ dma-noncoherent;
+
pinctrl: pinctrl@3001000 {
compatible = "sophgo,cv1800b-pinctrl";
reg = <0x03001000 0x1000>,
<0x05027000 0x1000>;
reg-names = "sys", "rtc";
};
- };
-};
-&plic {
- compatible = "sophgo,cv1800b-plic", "thead,c900-plic";
-};
+ clk: clock-controller@3002000 {
+ compatible = "sophgo,cv1800b-clk";
+ reg = <0x03002000 0x1000>;
+ clocks = <&osc>;
+ #clock-cells = <1>;
+ };
-&clint {
- compatible = "sophgo,cv1800b-clint", "thead,c900-clint";
-};
+ plic: interrupt-controller@70000000 {
+ compatible = "sophgo,cv1800b-plic", "thead,c900-plic";
+ reg = <0x70000000 0x4000000>;
+ interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ riscv,ndev = <101>;
+ };
-&clk {
- compatible = "sophgo,cv1800-clk";
+ clint: timer@74000000 {
+ compatible = "sophgo,cv1800b-clint", "thead,c900-clint";
+ reg = <0x74000000 0x10000>;
+ interrupts-extended = <&cpu0_intc 3>, <&cpu0_intc 7>;
+ };
+ };
};
diff --git a/arch/riscv/boot/dts/sophgo/cv180x-cpus.dtsi b/arch/riscv/boot/dts/sophgo/cv180x-cpus.dtsi
new file mode 100644
index 000000000000..93fd9e47a195
--- /dev/null
+++ b/arch/riscv/boot/dts/sophgo/cv180x-cpus.dtsi
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2023 Jisheng Zhang <jszhang@kernel.org>
+ * Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+/ {
+ cpus: cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ timebase-frequency = <25000000>;
+
+ cpu0: cpu@0 {
+ compatible = "thead,c906", "riscv";
+ device_type = "cpu";
+ reg = <0>;
+ d-cache-block-size = <64>;
+ d-cache-sets = <512>;
+ d-cache-size = <65536>;
+ i-cache-block-size = <64>;
+ i-cache-sets = <128>;
+ i-cache-size = <32768>;
+ mmu-type = "riscv,sv39";
+ riscv,isa = "rv64imafdc";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "zicntr", "zicsr",
+ "zifencei", "zihpm";
+
+ cpu0_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi b/arch/riscv/boot/dts/sophgo/cv180x.dtsi
index c18822ec849f..ed06c3609fb2 100644
--- a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi
+++ b/arch/riscv/boot/dts/sophgo/cv180x.dtsi
@@ -12,35 +12,6 @@
#address-cells = <1>;
#size-cells = <1>;
- cpus: cpus {
- #address-cells = <1>;
- #size-cells = <0>;
- timebase-frequency = <25000000>;
-
- cpu0: cpu@0 {
- compatible = "thead,c906", "riscv";
- device_type = "cpu";
- reg = <0>;
- d-cache-block-size = <64>;
- d-cache-sets = <512>;
- d-cache-size = <65536>;
- i-cache-block-size = <64>;
- i-cache-sets = <128>;
- i-cache-size = <32768>;
- mmu-type = "riscv,sv39";
- riscv,isa = "rv64imafdc";
- riscv,isa-base = "rv64i";
- riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "zicntr", "zicsr",
- "zifencei", "zihpm";
-
- cpu0_intc: interrupt-controller {
- compatible = "riscv,cpu-intc";
- interrupt-controller;
- #interrupt-cells = <1>;
- };
- };
- };
-
osc: oscillator {
compatible = "fixed-clock";
clock-output-names = "osc_25m";
@@ -49,18 +20,10 @@
soc {
compatible = "simple-bus";
- interrupt-parent = <&plic>;
#address-cells = <1>;
#size-cells = <1>;
- dma-noncoherent;
ranges;
- clk: clock-controller@3002000 {
- reg = <0x03002000 0x1000>;
- clocks = <&osc>;
- #clock-cells = <1>;
- };
-
gpio0: gpio@3020000 {
compatible = "snps,dw-apb-gpio";
reg = <0x3020000 0x1000>;
@@ -75,7 +38,7 @@
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
- interrupts = <60 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(44) IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -93,7 +56,7 @@
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
- interrupts = <61 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(45) IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -111,7 +74,7 @@
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
- interrupts = <62 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(46) IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -129,7 +92,7 @@
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
- interrupts = <63 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(47) IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -137,7 +100,7 @@
compatible = "sophgo,cv1800b-saradc";
reg = <0x030f0000 0x1000>;
clocks = <&clk CLK_SARADC>;
- interrupts = <100 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(84) IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -162,7 +125,7 @@
#size-cells = <0>;
clocks = <&clk CLK_I2C>, <&clk CLK_APB_I2C0>;
clock-names = "ref", "pclk";
- interrupts = <49 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(33) IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -173,7 +136,7 @@
#size-cells = <0>;
clocks = <&clk CLK_I2C>, <&clk CLK_APB_I2C1>;
clock-names = "ref", "pclk";
- interrupts = <50 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(34) IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -184,7 +147,7 @@
#size-cells = <0>;
clocks = <&clk CLK_I2C>, <&clk CLK_APB_I2C2>;
clock-names = "ref", "pclk";
- interrupts = <51 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(35) IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -195,7 +158,7 @@
#size-cells = <0>;
clocks = <&clk CLK_I2C>, <&clk CLK_APB_I2C3>;
clock-names = "ref", "pclk";
- interrupts = <52 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(36) IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -206,14 +169,14 @@
#size-cells = <0>;
clocks = <&clk CLK_I2C>, <&clk CLK_APB_I2C4>;
clock-names = "ref", "pclk";
- interrupts = <53 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(37) IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
uart0: serial@4140000 {
compatible = "snps,dw-apb-uart";
reg = <0x04140000 0x100>;
- interrupts = <44 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(28) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk CLK_UART0>, <&clk CLK_APB_UART0>;
clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
@@ -224,7 +187,7 @@
uart1: serial@4150000 {
compatible = "snps,dw-apb-uart";
reg = <0x04150000 0x100>;
- interrupts = <45 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(29) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk CLK_UART1>, <&clk CLK_APB_UART1>;
clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
@@ -235,7 +198,7 @@
uart2: serial@4160000 {
compatible = "snps,dw-apb-uart";
reg = <0x04160000 0x100>;
- interrupts = <46 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(30) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk CLK_UART2>, <&clk CLK_APB_UART2>;
clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
@@ -246,7 +209,7 @@
uart3: serial@4170000 {
compatible = "snps,dw-apb-uart";
reg = <0x04170000 0x100>;
- interrupts = <47 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(31) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk CLK_UART3>, <&clk CLK_APB_UART3>;
clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
@@ -261,7 +224,7 @@
#size-cells = <0>;
clocks = <&clk CLK_SPI>, <&clk CLK_APB_SPI0>;
clock-names = "ssi_clk", "pclk";
- interrupts = <54 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(38) IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -272,7 +235,7 @@
#size-cells = <0>;
clocks = <&clk CLK_SPI>, <&clk CLK_APB_SPI1>;
clock-names = "ssi_clk", "pclk";
- interrupts = <55 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(39) IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -283,7 +246,7 @@
#size-cells = <0>;
clocks = <&clk CLK_SPI>, <&clk CLK_APB_SPI2>;
clock-names = "ssi_clk", "pclk";
- interrupts = <56 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(40) IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -294,14 +257,14 @@
#size-cells = <0>;
clocks = <&clk CLK_SPI>, <&clk CLK_APB_SPI3>;
clock-names = "ssi_clk", "pclk";
- interrupts = <57 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(41) IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
uart4: serial@41c0000 {
compatible = "snps,dw-apb-uart";
reg = <0x041c0000 0x100>;
- interrupts = <48 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(32) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk CLK_UART4>, <&clk CLK_APB_UART4>;
clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
@@ -312,7 +275,7 @@
sdhci0: mmc@4310000 {
compatible = "sophgo,cv1800b-dwcmshc";
reg = <0x4310000 0x1000>;
- interrupts = <36 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(20) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk CLK_AXI4_SD0>,
<&clk CLK_SD0>;
clock-names = "core", "bus";
@@ -322,7 +285,7 @@
sdhci1: mmc@4320000 {
compatible = "sophgo,cv1800b-dwcmshc";
reg = <0x4320000 0x1000>;
- interrupts = <38 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(22) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk CLK_AXI4_SD1>,
<&clk CLK_SD1>;
clock-names = "core", "bus";
@@ -332,7 +295,7 @@
dmac: dma-controller@4330000 {
compatible = "snps,axi-dma-1.01a";
reg = <0x04330000 0x1000>;
- interrupts = <29 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(13) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk CLK_SDMA_AXI>, <&clk CLK_SDMA_AXI>;
clock-names = "core-clk", "cfgr-clk";
#dma-cells = <1>;
@@ -341,22 +304,8 @@
1024 1024 1024 1024>;
snps,priority = <0 1 2 3 4 5 6 7>;
snps,dma-masters = <2>;
- snps,data-width = <4>;
+ snps,data-width = <2>;
status = "disabled";
};
-
- plic: interrupt-controller@70000000 {
- reg = <0x70000000 0x4000000>;
- interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>;
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- riscv,ndev = <101>;
- };
-
- clint: timer@74000000 {
- reg = <0x74000000 0x10000>;
- interrupts-extended = <&cpu0_intc 3>, <&cpu0_intc 7>;
- };
};
};
diff --git a/arch/riscv/boot/dts/sophgo/cv1812h.dtsi b/arch/riscv/boot/dts/sophgo/cv1812h.dtsi
index 8a1b95c5116b..9a2a314d3347 100644
--- a/arch/riscv/boot/dts/sophgo/cv1812h.dtsi
+++ b/arch/riscv/boot/dts/sophgo/cv1812h.dtsi
@@ -3,9 +3,12 @@
* Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com>
*/
+#define SOC_PERIPHERAL_IRQ(nr) ((nr) + 16)
+
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pinctrl/pinctrl-cv1812h.h>
-#include "cv18xx.dtsi"
+#include "cv180x-cpus.dtsi"
+#include "cv180x.dtsi"
#include "cv181x.dtsi"
/ {
@@ -17,23 +20,37 @@
};
soc {
+ interrupt-parent = <&plic>;
+ dma-noncoherent;
+
pinctrl: pinctrl@3001000 {
compatible = "sophgo,cv1812h-pinctrl";
reg = <0x03001000 0x1000>,
<0x05027000 0x1000>;
reg-names = "sys", "rtc";
};
- };
-};
-&plic {
- compatible = "sophgo,cv1812h-plic", "thead,c900-plic";
-};
+ clk: clock-controller@3002000 {
+ compatible = "sophgo,cv1812h-clk";
+ reg = <0x03002000 0x1000>;
+ clocks = <&osc>;
+ #clock-cells = <1>;
+ };
-&clint {
- compatible = "sophgo,cv1812h-clint", "thead,c900-clint";
-};
+ plic: interrupt-controller@70000000 {
+ compatible = "sophgo,cv1812h-plic", "thead,c900-plic";
+ reg = <0x70000000 0x4000000>;
+ interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ riscv,ndev = <101>;
+ };
-&clk {
- compatible = "sophgo,cv1810-clk";
+ clint: timer@74000000 {
+ compatible = "sophgo,cv1812h-clint", "thead,c900-clint";
+ reg = <0x74000000 0x10000>;
+ interrupts-extended = <&cpu0_intc 3>, <&cpu0_intc 7>;
+ };
+ };
};
diff --git a/arch/riscv/boot/dts/sophgo/cv181x.dtsi b/arch/riscv/boot/dts/sophgo/cv181x.dtsi
index 5fd14dd1b14f..bbdb30653e9a 100644
--- a/arch/riscv/boot/dts/sophgo/cv181x.dtsi
+++ b/arch/riscv/boot/dts/sophgo/cv181x.dtsi
@@ -11,7 +11,7 @@
emmc: mmc@4300000 {
compatible = "sophgo,cv1800b-dwcmshc";
reg = <0x4300000 0x1000>;
- interrupts = <34 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(18) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk CLK_AXI4_EMMC>,
<&clk CLK_EMMC>;
clock-names = "core", "bus";
diff --git a/arch/riscv/boot/dts/sophgo/sg2002.dtsi b/arch/riscv/boot/dts/sophgo/sg2002.dtsi
index 7f79de33163c..98001cce238e 100644
--- a/arch/riscv/boot/dts/sophgo/sg2002.dtsi
+++ b/arch/riscv/boot/dts/sophgo/sg2002.dtsi
@@ -3,9 +3,12 @@
* Copyright (C) 2024 Thomas Bonnefille <thomas.bonnefille@bootlin.com>
*/
+#define SOC_PERIPHERAL_IRQ(nr) ((nr) + 16)
+
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pinctrl/pinctrl-sg2002.h>
-#include "cv18xx.dtsi"
+#include "cv180x-cpus.dtsi"
+#include "cv180x.dtsi"
#include "cv181x.dtsi"
/ {
@@ -17,25 +20,39 @@
};
soc {
+ interrupt-parent = <&plic>;
+ dma-noncoherent;
+
pinctrl: pinctrl@3001000 {
compatible = "sophgo,sg2002-pinctrl";
reg = <0x03001000 0x1000>,
<0x05027000 0x1000>;
reg-names = "sys", "rtc";
};
- };
-};
-&plic {
- compatible = "sophgo,sg2002-plic", "thead,c900-plic";
-};
+ clk: clock-controller@3002000 {
+ compatible = "sophgo,sg2002-clk", "sophgo,sg2000-clk";
+ reg = <0x03002000 0x1000>;
+ clocks = <&osc>;
+ #clock-cells = <1>;
+ };
-&clint {
- compatible = "sophgo,sg2002-clint", "thead,c900-clint";
-};
+ plic: interrupt-controller@70000000 {
+ compatible = "sophgo,sg2002-plic", "thead,c900-plic";
+ reg = <0x70000000 0x4000000>;
+ interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ riscv,ndev = <101>;
+ };
-&clk {
- compatible = "sophgo,sg2000-clk";
+ clint: timer@74000000 {
+ compatible = "sophgo,sg2002-clint", "thead,c900-clint";
+ reg = <0x74000000 0x10000>;
+ interrupts-extended = <&cpu0_intc 3>, <&cpu0_intc 7>;
+ };
+ };
};
&sdhci0 {
diff --git a/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts b/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts
index 34645a5f6038..ef3a602172b1 100644
--- a/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts
+++ b/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts
@@ -42,6 +42,8 @@
};
&emmc {
+ pinctrl-0 = <&emmc_cfg>;
+ pinctrl-names = "default";
bus-width = <4>;
no-sdio;
no-sd;
@@ -51,6 +53,8 @@
};
&i2c1 {
+ pinctrl-0 = <&i2c1_cfg>;
+ pinctrl-names = "default";
status = "okay";
mcu: syscon@17 {
@@ -60,7 +64,73 @@
};
};
+&pinctrl {
+ emmc_cfg: sdhci-emmc-cfg {
+ sdhci-emmc-wp-pins {
+ pinmux = <PINMUX(PIN_EMMC_WP, 0)>;
+ bias-disable;
+ drive-strength-microamp = <26800>;
+ input-schmitt-disable;
+ };
+
+ sdhci-emmc-cd-pins {
+ pinmux = <PINMUX(PIN_EMMC_CD, 0)>;
+ bias-pull-up;
+ drive-strength-microamp = <26800>;
+ input-schmitt-enable;
+ };
+
+ sdhci-emmc-rst-pwr-pins {
+ pinmux = <PINMUX(PIN_EMMC_RST, 0)>,
+ <PINMUX(PIN_EMMC_PWR_EN, 0)>;
+ bias-disable;
+ drive-strength-microamp = <26800>;
+ input-schmitt-disable;
+ };
+ };
+
+ i2c1_cfg: i2c1-cfg {
+ i2c1-pins {
+ pinmux = <PINMUX(PIN_IIC1_SDA, 0)>,
+ <PINMUX(PIN_IIC1_SCL, 0)>;
+ bias-pull-up;
+ drive-strength-microamp = <26800>;
+ input-schmitt-enable;
+ };
+ };
+
+ sd_cfg: sdhci-sd-cfg {
+ sdhci-sd-cd-wp-pins {
+ pinmux = <PINMUX(PIN_SDIO_CD, 0)>,
+ <PINMUX(PIN_SDIO_WP, 0)>;
+ bias-pull-up;
+ drive-strength-microamp = <26800>;
+ input-schmitt-enable;
+ };
+
+ sdhci-sd-rst-pwr-pins {
+ pinmux = <PINMUX(PIN_SDIO_RST, 0)>,
+ <PINMUX(PIN_SDIO_PWR_EN, 0)>;
+ bias-disable;
+ drive-strength-microamp = <26800>;
+ input-schmitt-disable;
+ };
+ };
+
+ uart0_cfg: uart0-cfg {
+ uart0-rx-pins {
+ pinmux = <PINMUX(PIN_UART0_TX, 0)>,
+ <PINMUX(PIN_UART0_RX, 0)>;
+ bias-pull-up;
+ drive-strength-microamp = <26800>;
+ input-schmitt-enable;
+ };
+ };
+};
+
&sd {
+ pinctrl-0 = <&sd_cfg>;
+ pinctrl-names = "default";
bus-width = <4>;
no-sdio;
no-mmc;
@@ -69,6 +139,8 @@
};
&uart0 {
+ pinctrl-0 = <&uart0_cfg>;
+ pinctrl-names = "default";
status = "okay";
};
diff --git a/arch/riscv/boot/dts/sophgo/sg2042.dtsi b/arch/riscv/boot/dts/sophgo/sg2042.dtsi
index aa8b7fcc125d..85636d1798f1 100644
--- a/arch/riscv/boot/dts/sophgo/sg2042.dtsi
+++ b/arch/riscv/boot/dts/sophgo/sg2042.dtsi
@@ -8,6 +8,7 @@
#include <dt-bindings/clock/sophgo,sg2042-pll.h>
#include <dt-bindings/clock/sophgo,sg2042-rpgate.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/pinctrl-sg2042.h>
#include <dt-bindings/reset/sophgo,sg2042-reset.h>
#include "sg2042-cpus.dtsi"
@@ -200,6 +201,11 @@
#clock-cells = <1>;
};
+ pinctrl: pinctrl@7030011000 {
+ compatible = "sophgo,sg2042-pinctrl";
+ reg = <0x70 0x30011000 0x0 0x1000>;
+ };
+
clkgen: clock-controller@7030012000 {
compatible = "sophgo,sg2042-clkgen";
reg = <0x70 0x30012000 0x0 0x1000>;
@@ -537,6 +543,32 @@
status = "disabled";
};
+ spi0: spi@7040004000 {
+ compatible = "sophgo,sg2042-spi", "snps,dw-apb-ssi";
+ reg = <0x70 0x40004000 0x00 0x1000>;
+ clocks = <&clkgen GATE_CLK_APB_SPI>;
+ interrupt-parent = <&intc>;
+ interrupts = <110 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ num-cs = <2>;
+ resets = <&rstgen RST_SPI0>;
+ status = "disabled";
+ };
+
+ spi1: spi@7040005000 {
+ compatible = "sophgo,sg2042-spi", "snps,dw-apb-ssi";
+ reg = <0x70 0x40005000 0x00 0x1000>;
+ clocks = <&clkgen GATE_CLK_APB_SPI>;
+ interrupt-parent = <&intc>;
+ interrupts = <111 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ num-cs = <2>;
+ resets = <&rstgen RST_SPI1>;
+ status = "disabled";
+ };
+
emmc: mmc@704002a000 {
compatible = "sophgo,sg2042-dwcmshc";
reg = <0x70 0x4002a000 0x0 0x1000>;
diff --git a/arch/riscv/boot/dts/sophgo/sg2044-cpus.dtsi b/arch/riscv/boot/dts/sophgo/sg2044-cpus.dtsi
new file mode 100644
index 000000000000..2a4267078ce6
--- /dev/null
+++ b/arch/riscv/boot/dts/sophgo/sg2044-cpus.dtsi
@@ -0,0 +1,3002 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ timebase-frequency = <50000000>;
+
+ cpu0: cpu@0 {
+ compatible = "thead,c920", "riscv";
+ reg = <0>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache0>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu0_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu1: cpu@1 {
+ compatible = "thead,c920", "riscv";
+ reg = <1>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache0>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu1_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu2: cpu@2 {
+ compatible = "thead,c920", "riscv";
+ reg = <2>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache0>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu2_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu3: cpu@3 {
+ compatible = "thead,c920", "riscv";
+ reg = <3>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache0>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu3_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu4: cpu@4 {
+ compatible = "thead,c920", "riscv";
+ reg = <4>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache1>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu4_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu5: cpu@5 {
+ compatible = "thead,c920", "riscv";
+ reg = <5>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache1>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu5_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu6: cpu@6 {
+ compatible = "thead,c920", "riscv";
+ reg = <6>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache1>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu6_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu7: cpu@7 {
+ compatible = "thead,c920", "riscv";
+ reg = <7>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache1>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu7_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu8: cpu@8 {
+ compatible = "thead,c920", "riscv";
+ reg = <8>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache2>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu8_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu9: cpu@9 {
+ compatible = "thead,c920", "riscv";
+ reg = <9>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache2>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu9_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu10: cpu@10 {
+ compatible = "thead,c920", "riscv";
+ reg = <10>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache2>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu10_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu11: cpu@11 {
+ compatible = "thead,c920", "riscv";
+ reg = <11>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache2>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu11_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu12: cpu@12 {
+ compatible = "thead,c920", "riscv";
+ reg = <12>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache3>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu12_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu13: cpu@13 {
+ compatible = "thead,c920", "riscv";
+ reg = <13>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache3>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu13_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu14: cpu@14 {
+ compatible = "thead,c920", "riscv";
+ reg = <14>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache3>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu14_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu15: cpu@15 {
+ compatible = "thead,c920", "riscv";
+ reg = <15>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache3>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu15_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu16: cpu@16 {
+ compatible = "thead,c920", "riscv";
+ reg = <16>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache4>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu16_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu17: cpu@17 {
+ compatible = "thead,c920", "riscv";
+ reg = <17>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache4>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu17_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu18: cpu@18 {
+ compatible = "thead,c920", "riscv";
+ reg = <18>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache4>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu18_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu19: cpu@19 {
+ compatible = "thead,c920", "riscv";
+ reg = <19>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache4>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu19_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu20: cpu@20 {
+ compatible = "thead,c920", "riscv";
+ reg = <20>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache5>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu20_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu21: cpu@21 {
+ compatible = "thead,c920", "riscv";
+ reg = <21>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache5>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu21_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu22: cpu@22 {
+ compatible = "thead,c920", "riscv";
+ reg = <22>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache5>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu22_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu23: cpu@23 {
+ compatible = "thead,c920", "riscv";
+ reg = <23>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache5>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu23_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu24: cpu@24 {
+ compatible = "thead,c920", "riscv";
+ reg = <24>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache6>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu24_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu25: cpu@25 {
+ compatible = "thead,c920", "riscv";
+ reg = <25>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache6>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu25_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu26: cpu@26 {
+ compatible = "thead,c920", "riscv";
+ reg = <26>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache6>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu26_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu27: cpu@27 {
+ compatible = "thead,c920", "riscv";
+ reg = <27>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache6>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu27_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu28: cpu@28 {
+ compatible = "thead,c920", "riscv";
+ reg = <28>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache7>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu28_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu29: cpu@29 {
+ compatible = "thead,c920", "riscv";
+ reg = <29>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache7>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu29_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu30: cpu@30 {
+ compatible = "thead,c920", "riscv";
+ reg = <30>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache7>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu30_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu31: cpu@31 {
+ compatible = "thead,c920", "riscv";
+ reg = <31>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache7>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu31_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu32: cpu@32 {
+ compatible = "thead,c920", "riscv";
+ reg = <32>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache8>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu32_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu33: cpu@33 {
+ compatible = "thead,c920", "riscv";
+ reg = <33>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache8>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu33_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu34: cpu@34 {
+ compatible = "thead,c920", "riscv";
+ reg = <34>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache8>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu34_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu35: cpu@35 {
+ compatible = "thead,c920", "riscv";
+ reg = <35>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache8>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu35_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu36: cpu@36 {
+ compatible = "thead,c920", "riscv";
+ reg = <36>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache9>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu36_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu37: cpu@37 {
+ compatible = "thead,c920", "riscv";
+ reg = <37>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache9>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu37_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu38: cpu@38 {
+ compatible = "thead,c920", "riscv";
+ reg = <38>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache9>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu38_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu39: cpu@39 {
+ compatible = "thead,c920", "riscv";
+ reg = <39>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache9>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu39_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu40: cpu@40 {
+ compatible = "thead,c920", "riscv";
+ reg = <40>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache10>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu40_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu41: cpu@41 {
+ compatible = "thead,c920", "riscv";
+ reg = <41>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache10>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu41_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu42: cpu@42 {
+ compatible = "thead,c920", "riscv";
+ reg = <42>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache10>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu42_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu43: cpu@43 {
+ compatible = "thead,c920", "riscv";
+ reg = <43>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache10>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu43_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu44: cpu@44 {
+ compatible = "thead,c920", "riscv";
+ reg = <44>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache11>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu44_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu45: cpu@45 {
+ compatible = "thead,c920", "riscv";
+ reg = <45>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache11>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu45_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu46: cpu@46 {
+ compatible = "thead,c920", "riscv";
+ reg = <46>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache11>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu46_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu47: cpu@47 {
+ compatible = "thead,c920", "riscv";
+ reg = <47>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache11>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu47_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu48: cpu@48 {
+ compatible = "thead,c920", "riscv";
+ reg = <48>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache12>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu48_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu49: cpu@49 {
+ compatible = "thead,c920", "riscv";
+ reg = <49>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache12>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu49_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu50: cpu@50 {
+ compatible = "thead,c920", "riscv";
+ reg = <50>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache12>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu50_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu51: cpu@51 {
+ compatible = "thead,c920", "riscv";
+ reg = <51>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache12>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu51_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu52: cpu@52 {
+ compatible = "thead,c920", "riscv";
+ reg = <52>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache13>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu52_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu53: cpu@53 {
+ compatible = "thead,c920", "riscv";
+ reg = <53>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache13>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu53_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu54: cpu@54 {
+ compatible = "thead,c920", "riscv";
+ reg = <54>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache13>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu54_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu55: cpu@55 {
+ compatible = "thead,c920", "riscv";
+ reg = <55>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache13>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu55_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu56: cpu@56 {
+ compatible = "thead,c920", "riscv";
+ reg = <56>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache14>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu56_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu57: cpu@57 {
+ compatible = "thead,c920", "riscv";
+ reg = <57>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache14>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu57_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu58: cpu@58 {
+ compatible = "thead,c920", "riscv";
+ reg = <58>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache14>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu58_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu59: cpu@59 {
+ compatible = "thead,c920", "riscv";
+ reg = <59>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache14>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu59_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu60: cpu@60 {
+ compatible = "thead,c920", "riscv";
+ reg = <60>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache15>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu60_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu61: cpu@61 {
+ compatible = "thead,c920", "riscv";
+ reg = <61>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache15>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu61_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu62: cpu@62 {
+ compatible = "thead,c920", "riscv";
+ reg = <62>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache15>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu62_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu63: cpu@63 {
+ compatible = "thead,c920", "riscv";
+ reg = <63>;
+ i-cache-block-size = <64>;
+ i-cache-size = <65536>;
+ i-cache-sets = <512>;
+ d-cache-block-size = <64>;
+ d-cache-size = <65536>;
+ d-cache-sets = <512>;
+ device_type = "cpu";
+ mmu-type = "riscv,sv48";
+ next-level-cache = <&l2_cache15>;
+ riscv,isa = "rv64imafdcv";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
+ "v", "sscofpmf", "sstc",
+ "svinval", "svnapot", "svpbmt",
+ "zawrs", "zba", "zbb", "zbc",
+ "zbs", "zca", "zcb", "zcd",
+ "zfa", "zfbfmin", "zfh", "zfhmin",
+ "zicbom", "zicbop", "zicboz",
+ "zicntr", "zicond","zicsr", "zifencei",
+ "zihintntl", "zihintpause", "zihpm",
+ "zvfbfmin", "zvfbfwma", "zvfh",
+ "zvfhmin";
+ riscv,cbom-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+
+ cpu63_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu-map {
+ socket0 {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&cpu4>;
+ };
+
+ core1 {
+ cpu = <&cpu5>;
+ };
+
+ core2 {
+ cpu = <&cpu6>;
+ };
+
+ core3 {
+ cpu = <&cpu7>;
+ };
+ };
+
+ cluster2 {
+ core0 {
+ cpu = <&cpu8>;
+ };
+
+ core1 {
+ cpu = <&cpu9>;
+ };
+
+ core2 {
+ cpu = <&cpu10>;
+ };
+
+ core3 {
+ cpu = <&cpu11>;
+ };
+ };
+
+ cluster3 {
+ core0 {
+ cpu = <&cpu12>;
+ };
+
+ core1 {
+ cpu = <&cpu13>;
+ };
+
+ core2 {
+ cpu = <&cpu14>;
+ };
+
+ core3 {
+ cpu = <&cpu15>;
+ };
+ };
+
+ cluster4 {
+ core0 {
+ cpu = <&cpu16>;
+ };
+
+ core1 {
+ cpu = <&cpu17>;
+ };
+
+ core2 {
+ cpu = <&cpu18>;
+ };
+
+ core3 {
+ cpu = <&cpu19>;
+ };
+ };
+
+ cluster5 {
+ core0 {
+ cpu = <&cpu20>;
+ };
+
+ core1 {
+ cpu = <&cpu21>;
+ };
+
+ core2 {
+ cpu = <&cpu22>;
+ };
+
+ core3 {
+ cpu = <&cpu23>;
+ };
+ };
+
+ cluster6 {
+ core0 {
+ cpu = <&cpu24>;
+ };
+
+ core1 {
+ cpu = <&cpu25>;
+ };
+
+ core2 {
+ cpu = <&cpu26>;
+ };
+
+ core3 {
+ cpu = <&cpu27>;
+ };
+ };
+
+ cluster7 {
+ core0 {
+ cpu = <&cpu28>;
+ };
+
+ core1 {
+ cpu = <&cpu29>;
+ };
+
+ core2 {
+ cpu = <&cpu30>;
+ };
+
+ core3 {
+ cpu = <&cpu31>;
+ };
+ };
+
+ cluster8 {
+ core0 {
+ cpu = <&cpu32>;
+ };
+
+ core1 {
+ cpu = <&cpu33>;
+ };
+
+ core2 {
+ cpu = <&cpu34>;
+ };
+
+ core3 {
+ cpu = <&cpu35>;
+ };
+ };
+
+ cluster9 {
+ core0 {
+ cpu = <&cpu36>;
+ };
+
+ core1 {
+ cpu = <&cpu37>;
+ };
+
+ core2 {
+ cpu = <&cpu38>;
+ };
+
+ core3 {
+ cpu = <&cpu39>;
+ };
+ };
+
+ cluster10 {
+ core0 {
+ cpu = <&cpu40>;
+ };
+
+ core1 {
+ cpu = <&cpu41>;
+ };
+
+ core2 {
+ cpu = <&cpu42>;
+ };
+
+ core3 {
+ cpu = <&cpu43>;
+ };
+ };
+
+ cluster11 {
+ core0 {
+ cpu = <&cpu44>;
+ };
+
+ core1 {
+ cpu = <&cpu45>;
+ };
+
+ core2 {
+ cpu = <&cpu46>;
+ };
+
+ core3 {
+ cpu = <&cpu47>;
+ };
+ };
+
+ cluster12 {
+ core0 {
+ cpu = <&cpu48>;
+ };
+
+ core1 {
+ cpu = <&cpu49>;
+ };
+
+ core2 {
+ cpu = <&cpu50>;
+ };
+
+ core3 {
+ cpu = <&cpu51>;
+ };
+ };
+
+ cluster13 {
+ core0 {
+ cpu = <&cpu52>;
+ };
+
+ core1 {
+ cpu = <&cpu53>;
+ };
+
+ core2 {
+ cpu = <&cpu54>;
+ };
+
+ core3 {
+ cpu = <&cpu55>;
+ };
+ };
+
+ cluster14 {
+ core0 {
+ cpu = <&cpu56>;
+ };
+
+ core1 {
+ cpu = <&cpu57>;
+ };
+
+ core2 {
+ cpu = <&cpu58>;
+ };
+
+ core3 {
+ cpu = <&cpu59>;
+ };
+ };
+
+ cluster15 {
+ core0 {
+ cpu = <&cpu60>;
+ };
+
+ core1 {
+ cpu = <&cpu61>;
+ };
+
+ core2 {
+ cpu = <&cpu62>;
+ };
+
+ core3 {
+ cpu = <&cpu63>;
+ };
+ };
+ };
+ };
+
+ l2_cache0: cache-controller-0 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache1: cache-controller-1 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache2: cache-controller-2 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache3: cache-controller-3 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache4: cache-controller-4 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache5: cache-controller-5 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache6: cache-controller-6 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache7: cache-controller-7 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache8: cache-controller-8 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache9: cache-controller-9 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache10: cache-controller-10 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache11: cache-controller-11 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache12: cache-controller-12 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache13: cache-controller-13 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache14: cache-controller-14 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache15: cache-controller-15 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <2097152>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l3_cache: cache-controller-16 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <3>;
+ cache-size = <67108864>;
+ cache-sets = <4096>;
+ cache-unified;
+ };
+ };
+
+ soc {
+ intc: interrupt-controller@6d40000000 {
+ compatible = "sophgo,sg2044-plic", "thead,c900-plic";
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x6d 0x40000000 0x0 0x4000000>;
+ interrupt-controller;
+ interrupts-extended =
+ <&cpu0_intc 11>, <&cpu0_intc 9>,
+ <&cpu1_intc 11>, <&cpu1_intc 9>,
+ <&cpu2_intc 11>, <&cpu2_intc 9>,
+ <&cpu3_intc 11>, <&cpu3_intc 9>,
+ <&cpu4_intc 11>, <&cpu4_intc 9>,
+ <&cpu5_intc 11>, <&cpu5_intc 9>,
+ <&cpu6_intc 11>, <&cpu6_intc 9>,
+ <&cpu7_intc 11>, <&cpu7_intc 9>,
+ <&cpu8_intc 11>, <&cpu8_intc 9>,
+ <&cpu9_intc 11>, <&cpu9_intc 9>,
+ <&cpu10_intc 11>, <&cpu10_intc 9>,
+ <&cpu11_intc 11>, <&cpu11_intc 9>,
+ <&cpu12_intc 11>, <&cpu12_intc 9>,
+ <&cpu13_intc 11>, <&cpu13_intc 9>,
+ <&cpu14_intc 11>, <&cpu14_intc 9>,
+ <&cpu15_intc 11>, <&cpu15_intc 9>,
+ <&cpu16_intc 11>, <&cpu16_intc 9>,
+ <&cpu17_intc 11>, <&cpu17_intc 9>,
+ <&cpu18_intc 11>, <&cpu18_intc 9>,
+ <&cpu19_intc 11>, <&cpu19_intc 9>,
+ <&cpu20_intc 11>, <&cpu20_intc 9>,
+ <&cpu21_intc 11>, <&cpu21_intc 9>,
+ <&cpu22_intc 11>, <&cpu22_intc 9>,
+ <&cpu23_intc 11>, <&cpu23_intc 9>,
+ <&cpu24_intc 11>, <&cpu24_intc 9>,
+ <&cpu25_intc 11>, <&cpu25_intc 9>,
+ <&cpu26_intc 11>, <&cpu26_intc 9>,
+ <&cpu27_intc 11>, <&cpu27_intc 9>,
+ <&cpu28_intc 11>, <&cpu28_intc 9>,
+ <&cpu29_intc 11>, <&cpu29_intc 9>,
+ <&cpu30_intc 11>, <&cpu30_intc 9>,
+ <&cpu31_intc 11>, <&cpu31_intc 9>,
+ <&cpu32_intc 11>, <&cpu32_intc 9>,
+ <&cpu33_intc 11>, <&cpu33_intc 9>,
+ <&cpu34_intc 11>, <&cpu34_intc 9>,
+ <&cpu35_intc 11>, <&cpu35_intc 9>,
+ <&cpu36_intc 11>, <&cpu36_intc 9>,
+ <&cpu37_intc 11>, <&cpu37_intc 9>,
+ <&cpu38_intc 11>, <&cpu38_intc 9>,
+ <&cpu39_intc 11>, <&cpu39_intc 9>,
+ <&cpu40_intc 11>, <&cpu40_intc 9>,
+ <&cpu41_intc 11>, <&cpu41_intc 9>,
+ <&cpu42_intc 11>, <&cpu42_intc 9>,
+ <&cpu43_intc 11>, <&cpu43_intc 9>,
+ <&cpu44_intc 11>, <&cpu44_intc 9>,
+ <&cpu45_intc 11>, <&cpu45_intc 9>,
+ <&cpu46_intc 11>, <&cpu46_intc 9>,
+ <&cpu47_intc 11>, <&cpu47_intc 9>,
+ <&cpu48_intc 11>, <&cpu48_intc 9>,
+ <&cpu49_intc 11>, <&cpu49_intc 9>,
+ <&cpu50_intc 11>, <&cpu50_intc 9>,
+ <&cpu51_intc 11>, <&cpu51_intc 9>,
+ <&cpu52_intc 11>, <&cpu52_intc 9>,
+ <&cpu53_intc 11>, <&cpu53_intc 9>,
+ <&cpu54_intc 11>, <&cpu54_intc 9>,
+ <&cpu55_intc 11>, <&cpu55_intc 9>,
+ <&cpu56_intc 11>, <&cpu56_intc 9>,
+ <&cpu57_intc 11>, <&cpu57_intc 9>,
+ <&cpu58_intc 11>, <&cpu58_intc 9>,
+ <&cpu59_intc 11>, <&cpu59_intc 9>,
+ <&cpu60_intc 11>, <&cpu60_intc 9>,
+ <&cpu61_intc 11>, <&cpu61_intc 9>,
+ <&cpu62_intc 11>, <&cpu62_intc 9>,
+ <&cpu63_intc 11>, <&cpu63_intc 9>;
+ riscv,ndev = <863>;
+ };
+
+ aclint_mswi: interrupt-controller@6d44000000 {
+ compatible = "sophgo,sg2044-aclint-mswi", "thead,c900-aclint-mswi";
+ reg = <0x6d 0x44000000 0x0 0x4000>;
+ interrupts-extended = <&cpu0_intc 3>,
+ <&cpu1_intc 3>,
+ <&cpu2_intc 3>,
+ <&cpu3_intc 3>,
+ <&cpu4_intc 3>,
+ <&cpu5_intc 3>,
+ <&cpu6_intc 3>,
+ <&cpu7_intc 3>,
+ <&cpu8_intc 3>,
+ <&cpu9_intc 3>,
+ <&cpu10_intc 3>,
+ <&cpu11_intc 3>,
+ <&cpu12_intc 3>,
+ <&cpu13_intc 3>,
+ <&cpu14_intc 3>,
+ <&cpu15_intc 3>,
+ <&cpu16_intc 3>,
+ <&cpu17_intc 3>,
+ <&cpu18_intc 3>,
+ <&cpu19_intc 3>,
+ <&cpu20_intc 3>,
+ <&cpu21_intc 3>,
+ <&cpu22_intc 3>,
+ <&cpu23_intc 3>,
+ <&cpu24_intc 3>,
+ <&cpu25_intc 3>,
+ <&cpu26_intc 3>,
+ <&cpu27_intc 3>,
+ <&cpu28_intc 3>,
+ <&cpu29_intc 3>,
+ <&cpu30_intc 3>,
+ <&cpu31_intc 3>,
+ <&cpu32_intc 3>,
+ <&cpu33_intc 3>,
+ <&cpu34_intc 3>,
+ <&cpu35_intc 3>,
+ <&cpu36_intc 3>,
+ <&cpu37_intc 3>,
+ <&cpu38_intc 3>,
+ <&cpu39_intc 3>,
+ <&cpu40_intc 3>,
+ <&cpu41_intc 3>,
+ <&cpu42_intc 3>,
+ <&cpu43_intc 3>,
+ <&cpu44_intc 3>,
+ <&cpu45_intc 3>,
+ <&cpu46_intc 3>,
+ <&cpu47_intc 3>,
+ <&cpu48_intc 3>,
+ <&cpu49_intc 3>,
+ <&cpu50_intc 3>,
+ <&cpu51_intc 3>,
+ <&cpu52_intc 3>,
+ <&cpu53_intc 3>,
+ <&cpu54_intc 3>,
+ <&cpu55_intc 3>,
+ <&cpu56_intc 3>,
+ <&cpu57_intc 3>,
+ <&cpu58_intc 3>,
+ <&cpu59_intc 3>,
+ <&cpu60_intc 3>,
+ <&cpu61_intc 3>,
+ <&cpu62_intc 3>,
+ <&cpu63_intc 3>;
+ };
+
+ aclint_mtimer: timer@6d44004000 {
+ compatible = "sophgo,sg2044-aclint-mtimer", "thead,c900-aclint-mtimer";
+ reg = <0x6d 0x44004000 0x0 0x8000>;
+ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu0_intc 7>,
+ <&cpu1_intc 7>,
+ <&cpu2_intc 7>,
+ <&cpu3_intc 7>,
+ <&cpu4_intc 7>,
+ <&cpu5_intc 7>,
+ <&cpu6_intc 7>,
+ <&cpu7_intc 7>,
+ <&cpu8_intc 7>,
+ <&cpu9_intc 7>,
+ <&cpu10_intc 7>,
+ <&cpu11_intc 7>,
+ <&cpu12_intc 7>,
+ <&cpu13_intc 7>,
+ <&cpu14_intc 7>,
+ <&cpu15_intc 7>,
+ <&cpu16_intc 7>,
+ <&cpu17_intc 7>,
+ <&cpu18_intc 7>,
+ <&cpu19_intc 7>,
+ <&cpu20_intc 7>,
+ <&cpu21_intc 7>,
+ <&cpu22_intc 7>,
+ <&cpu23_intc 7>,
+ <&cpu24_intc 7>,
+ <&cpu25_intc 7>,
+ <&cpu26_intc 7>,
+ <&cpu27_intc 7>,
+ <&cpu28_intc 7>,
+ <&cpu29_intc 7>,
+ <&cpu30_intc 7>,
+ <&cpu31_intc 7>,
+ <&cpu32_intc 7>,
+ <&cpu33_intc 7>,
+ <&cpu34_intc 7>,
+ <&cpu35_intc 7>,
+ <&cpu36_intc 7>,
+ <&cpu37_intc 7>,
+ <&cpu38_intc 7>,
+ <&cpu39_intc 7>,
+ <&cpu40_intc 7>,
+ <&cpu41_intc 7>,
+ <&cpu42_intc 7>,
+ <&cpu43_intc 7>,
+ <&cpu44_intc 7>,
+ <&cpu45_intc 7>,
+ <&cpu46_intc 7>,
+ <&cpu47_intc 7>,
+ <&cpu48_intc 7>,
+ <&cpu49_intc 7>,
+ <&cpu50_intc 7>,
+ <&cpu51_intc 7>,
+ <&cpu52_intc 7>,
+ <&cpu53_intc 7>,
+ <&cpu54_intc 7>,
+ <&cpu55_intc 7>,
+ <&cpu56_intc 7>,
+ <&cpu57_intc 7>,
+ <&cpu58_intc 7>,
+ <&cpu59_intc 7>,
+ <&cpu60_intc 7>,
+ <&cpu61_intc 7>,
+ <&cpu62_intc 7>,
+ <&cpu63_intc 7>;
+ };
+
+ aclint_sswi: interrupt-controller@6d4400c000 {
+ compatible = "sophgo,sg2044-aclint-sswi", "thead,c900-aclint-sswi";
+ reg = <0x6d 0x4400c000 0x0 0x1000>;
+ #interrupt-cells = <0>;
+ interrupt-controller;
+ interrupts-extended = <&cpu0_intc 1>,
+ <&cpu1_intc 1>,
+ <&cpu2_intc 1>,
+ <&cpu3_intc 1>,
+ <&cpu4_intc 1>,
+ <&cpu5_intc 1>,
+ <&cpu6_intc 1>,
+ <&cpu7_intc 1>,
+ <&cpu8_intc 1>,
+ <&cpu9_intc 1>,
+ <&cpu10_intc 1>,
+ <&cpu11_intc 1>,
+ <&cpu12_intc 1>,
+ <&cpu13_intc 1>,
+ <&cpu14_intc 1>,
+ <&cpu15_intc 1>,
+ <&cpu16_intc 1>,
+ <&cpu17_intc 1>,
+ <&cpu18_intc 1>,
+ <&cpu19_intc 1>,
+ <&cpu20_intc 1>,
+ <&cpu21_intc 1>,
+ <&cpu22_intc 1>,
+ <&cpu23_intc 1>,
+ <&cpu24_intc 1>,
+ <&cpu25_intc 1>,
+ <&cpu26_intc 1>,
+ <&cpu27_intc 1>,
+ <&cpu28_intc 1>,
+ <&cpu29_intc 1>,
+ <&cpu30_intc 1>,
+ <&cpu31_intc 1>,
+ <&cpu32_intc 1>,
+ <&cpu33_intc 1>,
+ <&cpu34_intc 1>,
+ <&cpu35_intc 1>,
+ <&cpu36_intc 1>,
+ <&cpu37_intc 1>,
+ <&cpu38_intc 1>,
+ <&cpu39_intc 1>,
+ <&cpu40_intc 1>,
+ <&cpu41_intc 1>,
+ <&cpu42_intc 1>,
+ <&cpu43_intc 1>,
+ <&cpu44_intc 1>,
+ <&cpu45_intc 1>,
+ <&cpu46_intc 1>,
+ <&cpu47_intc 1>,
+ <&cpu48_intc 1>,
+ <&cpu49_intc 1>,
+ <&cpu50_intc 1>,
+ <&cpu51_intc 1>,
+ <&cpu52_intc 1>,
+ <&cpu53_intc 1>,
+ <&cpu54_intc 1>,
+ <&cpu55_intc 1>,
+ <&cpu56_intc 1>,
+ <&cpu57_intc 1>,
+ <&cpu58_intc 1>,
+ <&cpu59_intc 1>,
+ <&cpu60_intc 1>,
+ <&cpu61_intc 1>,
+ <&cpu62_intc 1>,
+ <&cpu63_intc 1>;
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/sophgo/sg2044-reset.h b/arch/riscv/boot/dts/sophgo/sg2044-reset.h
new file mode 100644
index 000000000000..3a7bbfdb4bae
--- /dev/null
+++ b/arch/riscv/boot/dts/sophgo/sg2044-reset.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#ifndef _SG2044_RESET_H
+#define _SG2044_RESET_H
+
+#define RST_AP_SYS 0
+#define RST_AP_SYS_CORE0 1
+#define RST_AP_SYS_CORE1 2
+#define RST_AP_SYS_CORE2 3
+#define RST_AP_SYS_CORE3 4
+#define RST_AP_PIC 5
+#define RST_AP_TDT 6
+#define RST_RP_PIC_TDT 7
+#define RST_HSDMA 8
+#define RST_SYSDMA 9
+#define RST_EFUSE0 10
+#define RST_EFUSE1 11
+#define RST_RTC 12
+#define RST_TIMER 13
+#define RST_WDT 14
+#define RST_AHB_ROM0 15
+#define RST_AHB_ROM1 16
+#define RST_I2C0 17
+#define RST_I2C1 18
+#define RST_I2C2 19
+#define RST_I2C3 20
+#define RST_GPIO0 21
+#define RST_GPIO1 22
+#define RST_GPIO2 23
+#define RST_PWM 24
+#define RST_AXI_SRAM0 25
+#define RST_AXI_SRAM1 26
+#define RST_SPIFMC0 27
+#define RST_SPIFMC1 28
+#define RST_MAILBOX 29
+#define RST_ETH0 30
+#define RST_EMMC 31
+#define RST_SD 32
+#define RST_UART0 33
+#define RST_UART1 34
+#define RST_UART2 35
+#define RST_UART3 36
+#define RST_SPI0 37
+#define RST_SPI1 38
+#define RST_MTLI 39
+#define RST_DBG_I2C 40
+#define RST_C2C0 41
+#define RST_C2C1 42
+#define RST_C2C2 43
+#define RST_C2C3 44
+#define RST_CXP 45
+#define RST_DDR0 46
+#define RST_DDR1 47
+#define RST_DDR2 48
+#define RST_DDR3 49
+#define RST_DDR4 50
+#define RST_DDR5 51
+#define RST_DDR6 52
+#define RST_DDR7 53
+#define RST_DDR8 54
+#define RST_DDR9 55
+#define RST_DDR10 56
+#define RST_DDR11 57
+#define RST_DDR12 58
+#define RST_DDR13 59
+#define RST_DDR14 60
+#define RST_DDR15 61
+#define RST_BAR 62
+#define RST_K2K 63
+#define RST_CC_SYS_X1Y1 64
+#define RST_CC_SYS_X1Y2 65
+#define RST_CC_SYS_X1Y3 66
+#define RST_CC_SYS_X1Y4 67
+#define RST_CC_SYS_X0Y1 68
+#define RST_CC_SYS_X0Y2 69
+#define RST_CC_SYS_X0Y3 70
+#define RST_CC_SYS_X0Y4 71
+#define RST_SC_X1Y1 80
+#define RST_SC_X1Y2 81
+#define RST_SC_X1Y3 82
+#define RST_SC_X1Y4 83
+#define RST_SC_X0Y1 84
+#define RST_SC_X0Y2 85
+#define RST_SC_X0Y3 86
+#define RST_SC_X0Y4 87
+#define RST_RP_CLUSTER_X1Y1_S0 160
+#define RST_RP_CLUSTER_X1Y1_S1 161
+#define RST_RP_CLUSTER_X1Y2_S0 162
+#define RST_RP_CLUSTER_X1Y2_S1 163
+#define RST_RP_CLUSTER_X1Y3_S0 164
+#define RST_RP_CLUSTER_X1Y3_S1 165
+#define RST_RP_CLUSTER_X1Y4_S0 166
+#define RST_RP_CLUSTER_X1Y4_S1 167
+#define RST_RP_CLUSTER_X0Y1_W0 168
+#define RST_RP_CLUSTER_X0Y1_W1 169
+#define RST_RP_CLUSTER_X0Y2_W0 170
+#define RST_RP_CLUSTER_X0Y2_W1 171
+#define RST_RP_CLUSTER_X0Y3_W0 172
+#define RST_RP_CLUSTER_X0Y3_W1 173
+#define RST_RP_CLUSTER_X0Y4_W0 174
+#define RST_RP_CLUSTER_X0Y4_W1 175
+#define RST_TPSYS_X1Y1 180
+#define RST_TPSYS_X1Y2 181
+#define RST_TPSYS_X1Y3 182
+#define RST_TPSYS_X1Y4 183
+#define RST_TPSYS_X0Y1 184
+#define RST_TPSYS_X0Y2 185
+#define RST_TPSYS_X0Y3 186
+#define RST_TPSYS_X0Y4 187
+#define RST_SPACC 188
+#define RST_PKA 189
+#define RST_SE_TRNG 190
+#define RST_SE_DBG 191
+#define RST_SE_FAB_FW 192
+#define RST_SE_CTRL 193
+#define RST_MAILBOX0 194
+#define RST_MAILBOX1 195
+#define RST_MAILBOX2 196
+#define RST_MAILBOX3 197
+#define RST_INTC0 198
+#define RST_INTC1 199
+#define RST_INTC2 200
+#define RST_INTC3 201
+
+#endif /* _DT_BINDINGS_SG2044_RESET_H */
diff --git a/arch/riscv/boot/dts/sophgo/sg2044-sophgo-srd3-10.dts b/arch/riscv/boot/dts/sophgo/sg2044-sophgo-srd3-10.dts
new file mode 100644
index 000000000000..54cdf4239d5f
--- /dev/null
+++ b/arch/riscv/boot/dts/sophgo/sg2044-sophgo-srd3-10.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "sg2044.dtsi"
+
+/ {
+ model = "Sophgo SG2044 SRD3-10";
+ compatible = "sophgo,srd3-10", "sophgo,sg2044";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ };
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
+};
+
+&osc {
+ clock-frequency = <25000000>;
+};
+
+&uart1 {
+ status = "okay";
+};
diff --git a/arch/riscv/boot/dts/sophgo/sg2044.dtsi b/arch/riscv/boot/dts/sophgo/sg2044.dtsi
new file mode 100644
index 000000000000..d67e45f77d6e
--- /dev/null
+++ b/arch/riscv/boot/dts/sophgo/sg2044.dtsi
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#include "sg2044-cpus.dtsi"
+#include "sg2044-reset.h"
+
+/ {
+ compatible = "sophgo,sg2044";
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000 0x00000010 0x00000000>;
+ };
+
+ osc: oscillator {
+ compatible = "fixed-clock";
+ clock-output-names = "osc";
+ #clock-cells = <0>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ uart0: serial@7030000000 {
+ compatible = "sophgo,sg2044-uart", "snps,dw-apb-uart";
+ reg = <0x70 0x30000000 0x0 0x1000>;
+ clock-frequency = <500000000>;
+ interrupt-parent = <&intc>;
+ interrupts = <41 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ resets = <&rst RST_UART0>;
+ status = "disabled";
+ };
+
+ uart1: serial@7030001000 {
+ compatible = "sophgo,sg2044-uart", "snps,dw-apb-uart";
+ reg = <0x70 0x30001000 0x0 0x1000>;
+ clock-frequency = <500000000>;
+ interrupt-parent = <&intc>;
+ interrupts = <42 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ resets = <&rst RST_UART1>;
+ status = "disabled";
+ };
+
+ uart2: serial@7030002000 {
+ compatible = "sophgo,sg2044-uart", "snps,dw-apb-uart";
+ reg = <0x70 0x30002000 0x0 0x1000>;
+ clock-frequency = <500000000>;
+ interrupt-parent = <&intc>;
+ interrupts = <43 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ resets = <&rst RST_UART2>;
+ status = "disabled";
+ };
+
+ uart3: serial@7030003000 {
+ compatible = "sophgo,sg2044-uart", "snps,dw-apb-uart";
+ reg = <0x70 0x30003000 0x0 0x1000>;
+ clock-frequency = <500000000>;
+ interrupt-parent = <&intc>;
+ interrupts = <44 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ resets = <&rst RST_UART3>;
+ status = "disabled";
+ };
+
+ rst: reset-controller@7050003000 {
+ compatible = "sophgo,sg2044-reset",
+ "sophgo,sg2042-reset";
+ reg = <0x70 0x50003000 0x0 0x1000>;
+ #reset-cells = <1>;
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts b/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts
index 1d617b40a2d5..816ef1bc358e 100644
--- a/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts
+++ b/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts
@@ -17,6 +17,17 @@
chosen {
stdout-path = "serial0";
};
+
+ leds {
+ compatible = "gpio-leds";
+
+ led1 {
+ label = "sys-led";
+ gpios = <&gpio K1_GPIO(96) GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "on";
+ };
+ };
};
&uart0 {
diff --git a/arch/riscv/boot/dts/spacemit/k1-pinctrl.dtsi b/arch/riscv/boot/dts/spacemit/k1-pinctrl.dtsi
index a8eac5517f85..283663647a86 100644
--- a/arch/riscv/boot/dts/spacemit/k1-pinctrl.dtsi
+++ b/arch/riscv/boot/dts/spacemit/k1-pinctrl.dtsi
@@ -7,6 +7,9 @@
#define K1_PADCONF(pin, func) (((pin) << 16) | (func))
+/* Map GPIO pin to each bank's <index, offset> */
+#define K1_GPIO(x) (x / 32) (x % 32)
+
&pinctrl {
uart0_2_cfg: uart0-2-cfg {
uart0-2-pins {
diff --git a/arch/riscv/boot/dts/spacemit/k1.dtsi b/arch/riscv/boot/dts/spacemit/k1.dtsi
index c670ebf8fa12..c0f8c5fca975 100644
--- a/arch/riscv/boot/dts/spacemit/k1.dtsi
+++ b/arch/riscv/boot/dts/spacemit/k1.dtsi
@@ -3,6 +3,8 @@
* Copyright (C) 2024 Yangyu Chen <cyy@cyyself.name>
*/
+#include <dt-bindings/clock/spacemit,k1-syscon.h>
+
/dts-v1/;
/ {
#address-cells = <2>;
@@ -306,6 +308,36 @@
};
};
+ clocks {
+ vctcxo_1m: clock-1m {
+ compatible = "fixed-clock";
+ clock-frequency = <1000000>;
+ clock-output-names = "vctcxo_1m";
+ #clock-cells = <0>;
+ };
+
+ vctcxo_24m: clock-24m {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "vctcxo_24m";
+ #clock-cells = <0>;
+ };
+
+ vctcxo_3m: clock-3m {
+ compatible = "fixed-clock";
+ clock-frequency = <3000000>;
+ clock-output-names = "vctcxo_3m";
+ #clock-cells = <0>;
+ };
+
+ osc_32k: clock-32k {
+ compatible = "fixed-clock";
+ clock-frequency = <32000>;
+ clock-output-names = "osc_32k";
+ #clock-cells = <0>;
+ };
+ };
+
soc {
compatible = "simple-bus";
interrupt-parent = <&plic>;
@@ -314,11 +346,24 @@
dma-noncoherent;
ranges;
+ syscon_apbc: system-controller@d4015000 {
+ compatible = "spacemit,k1-syscon-apbc";
+ reg = <0x0 0xd4015000 0x0 0x1000>;
+ clocks = <&osc_32k>, <&vctcxo_1m>, <&vctcxo_3m>,
+ <&vctcxo_24m>;
+ clock-names = "osc", "vctcxo_1m", "vctcxo_3m",
+ "vctcxo_24m";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
uart0: serial@d4017000 {
compatible = "spacemit,k1-uart", "intel,xscale-uart";
reg = <0x0 0xd4017000 0x0 0x100>;
+ clocks = <&syscon_apbc CLK_UART0>,
+ <&syscon_apbc CLK_UART0_BUS>;
+ clock-names = "core", "bus";
interrupts = <42>;
- clock-frequency = <14857000>;
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -327,8 +372,10 @@
uart2: serial@d4017100 {
compatible = "spacemit,k1-uart", "intel,xscale-uart";
reg = <0x0 0xd4017100 0x0 0x100>;
+ clocks = <&syscon_apbc CLK_UART2>,
+ <&syscon_apbc CLK_UART2_BUS>;
+ clock-names = "core", "bus";
interrupts = <44>;
- clock-frequency = <14857000>;
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -337,8 +384,10 @@
uart3: serial@d4017200 {
compatible = "spacemit,k1-uart", "intel,xscale-uart";
reg = <0x0 0xd4017200 0x0 0x100>;
+ clocks = <&syscon_apbc CLK_UART3>,
+ <&syscon_apbc CLK_UART3_BUS>;
+ clock-names = "core", "bus";
interrupts = <45>;
- clock-frequency = <14857000>;
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -347,8 +396,10 @@
uart4: serial@d4017300 {
compatible = "spacemit,k1-uart", "intel,xscale-uart";
reg = <0x0 0xd4017300 0x0 0x100>;
+ clocks = <&syscon_apbc CLK_UART4>,
+ <&syscon_apbc CLK_UART4_BUS>;
+ clock-names = "core", "bus";
interrupts = <46>;
- clock-frequency = <14857000>;
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -357,8 +408,10 @@
uart5: serial@d4017400 {
compatible = "spacemit,k1-uart", "intel,xscale-uart";
reg = <0x0 0xd4017400 0x0 0x100>;
+ clocks = <&syscon_apbc CLK_UART5>,
+ <&syscon_apbc CLK_UART5_BUS>;
+ clock-names = "core", "bus";
interrupts = <47>;
- clock-frequency = <14857000>;
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -367,8 +420,10 @@
uart6: serial@d4017500 {
compatible = "spacemit,k1-uart", "intel,xscale-uart";
reg = <0x0 0xd4017500 0x0 0x100>;
+ clocks = <&syscon_apbc CLK_UART6>,
+ <&syscon_apbc CLK_UART6_BUS>;
+ clock-names = "core", "bus";
interrupts = <48>;
- clock-frequency = <14857000>;
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -377,8 +432,10 @@
uart7: serial@d4017600 {
compatible = "spacemit,k1-uart", "intel,xscale-uart";
reg = <0x0 0xd4017600 0x0 0x100>;
+ clocks = <&syscon_apbc CLK_UART7>,
+ <&syscon_apbc CLK_UART7_BUS>;
+ clock-names = "core", "bus";
interrupts = <49>;
- clock-frequency = <14857000>;
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -387,8 +444,10 @@
uart8: serial@d4017700 {
compatible = "spacemit,k1-uart", "intel,xscale-uart";
reg = <0x0 0xd4017700 0x0 0x100>;
+ clocks = <&syscon_apbc CLK_UART8>,
+ <&syscon_apbc CLK_UART8_BUS>;
+ clock-names = "core", "bus";
interrupts = <50>;
- clock-frequency = <14857000>;
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -397,16 +456,71 @@
uart9: serial@d4017800 {
compatible = "spacemit,k1-uart", "intel,xscale-uart";
reg = <0x0 0xd4017800 0x0 0x100>;
+ clocks = <&syscon_apbc CLK_UART9>,
+ <&syscon_apbc CLK_UART9_BUS>;
+ clock-names = "core", "bus";
interrupts = <51>;
- clock-frequency = <14857000>;
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
};
+ gpio: gpio@d4019000 {
+ compatible = "spacemit,k1-gpio";
+ reg = <0x0 0xd4019000 0x0 0x100>;
+ clocks = <&syscon_apbc CLK_GPIO>,
+ <&syscon_apbc CLK_GPIO_BUS>;
+ clock-names = "core", "bus";
+ gpio-controller;
+ #gpio-cells = <3>;
+ interrupts = <58>;
+ interrupt-parent = <&plic>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ gpio-ranges = <&pinctrl 0 0 0 32>,
+ <&pinctrl 1 0 32 32>,
+ <&pinctrl 2 0 64 32>,
+ <&pinctrl 3 0 96 32>;
+ };
+
pinctrl: pinctrl@d401e000 {
compatible = "spacemit,k1-pinctrl";
reg = <0x0 0xd401e000 0x0 0x400>;
+ clocks = <&syscon_apbc CLK_AIB>,
+ <&syscon_apbc CLK_AIB_BUS>;
+ clock-names = "func", "bus";
+ };
+
+ syscon_mpmu: system-controller@d4050000 {
+ compatible = "spacemit,k1-syscon-mpmu";
+ reg = <0x0 0xd4050000 0x0 0x209c>;
+ clocks = <&osc_32k>, <&vctcxo_1m>, <&vctcxo_3m>,
+ <&vctcxo_24m>;
+ clock-names = "osc", "vctcxo_1m", "vctcxo_3m",
+ "vctcxo_24m";
+ #clock-cells = <1>;
+ #power-domain-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ pll: clock-controller@d4090000 {
+ compatible = "spacemit,k1-pll";
+ reg = <0x0 0xd4090000 0x0 0x1000>;
+ clocks = <&vctcxo_24m>;
+ spacemit,mpmu = <&syscon_mpmu>;
+ #clock-cells = <1>;
+ };
+
+ syscon_apmu: system-controller@d4282800 {
+ compatible = "spacemit,k1-syscon-apmu";
+ reg = <0x0 0xd4282800 0x0 0x400>;
+ clocks = <&osc_32k>, <&vctcxo_1m>, <&vctcxo_3m>,
+ <&vctcxo_24m>;
+ clock-names = "osc", "vctcxo_1m", "vctcxo_3m",
+ "vctcxo_24m";
+ #clock-cells = <1>;
+ #power-domain-cells = <1>;
+ #reset-cells = <1>;
};
plic: interrupt-controller@e0000000 {
diff --git a/arch/riscv/boot/dts/starfive/jh7110-common.dtsi b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
index c2f70f5e2918..4baeb981d4df 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
@@ -8,6 +8,7 @@
#include "jh7110.dtsi"
#include "jh7110-pinfunc.h"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/starfive,jh7110-pinctrl.h>
/ {
aliases {
@@ -28,6 +29,7 @@
memory@40000000 {
device_type = "memory";
reg = <0x0 0x40000000 0x1 0x0>;
+ bootph-pre-ram;
};
gpio-restart {
@@ -245,6 +247,13 @@
};
};
};
+
+ eeprom@50 {
+ compatible = "atmel,24c04";
+ reg = <0x50>;
+ bootph-pre-ram;
+ pagesize = <16>;
+ };
};
&i2c6 {
@@ -262,6 +271,7 @@
assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO0_SDCARD>;
assigned-clock-rates = <50000000>;
bus-width = <8>;
+ bootph-pre-ram;
cap-mmc-highspeed;
mmc-ddr-1_8v;
mmc-hs200-1_8v;
@@ -279,6 +289,7 @@
assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO1_SDCARD>;
assigned-clock-rates = <50000000>;
bus-width = <4>;
+ bootph-pre-ram;
no-sdio;
no-mmc;
cd-gpios = <&sysgpio 41 GPIO_ACTIVE_LOW>;
@@ -317,8 +328,9 @@
nor_flash: flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
- cdns,read-delay = <5>;
- spi-max-frequency = <12000000>;
+ bootph-pre-ram;
+ cdns,read-delay = <2>;
+ spi-max-frequency = <100000000>;
cdns,tshsl-ns = <1>;
cdns,tsd2d-ns = <1>;
cdns,tchsh-ns = <1>;
@@ -353,9 +365,17 @@
};
&syscrg {
- assigned-clocks = <&syscrg JH7110_SYSCLK_CPU_CORE>,
+ assigned-clocks = <&syscrg JH7110_SYSCLK_CPU_ROOT>,
+ <&syscrg JH7110_SYSCLK_BUS_ROOT>,
+ <&syscrg JH7110_SYSCLK_PERH_ROOT>,
+ <&syscrg JH7110_SYSCLK_QSPI_REF>,
+ <&syscrg JH7110_SYSCLK_CPU_CORE>,
<&pllclk JH7110_PLLCLK_PLL0_OUT>;
- assigned-clock-rates = <500000000>, <1500000000>;
+ assigned-clock-parents = <&pllclk JH7110_PLLCLK_PLL0_OUT>,
+ <&pllclk JH7110_PLLCLK_PLL2_OUT>,
+ <&pllclk JH7110_PLLCLK_PLL2_OUT>,
+ <&syscrg JH7110_SYSCLK_QSPI_REF_SRC>;
+ assigned-clock-rates = <0>, <0>, <0>, <0>, <500000000>, <1500000000>;
};
&sysgpio {
@@ -388,6 +408,8 @@
};
i2c5_pins: i2c5-0 {
+ bootph-pre-ram;
+
i2c-pins {
pinmux = <GPIOMUX(19, GPOUT_LOW,
GPOEN_SYS_I2C5_CLK,
@@ -396,6 +418,7 @@
GPOEN_SYS_I2C5_DATA,
GPI_SYS_I2C5_DATA)>;
bias-disable; /* external pull-up */
+ bootph-pre-ram;
input-enable;
input-schmitt-enable;
};
@@ -428,16 +451,16 @@
};
mmc-pins {
- pinmux = <PINMUX(64, 0)>,
- <PINMUX(65, 0)>,
- <PINMUX(66, 0)>,
- <PINMUX(67, 0)>,
- <PINMUX(68, 0)>,
- <PINMUX(69, 0)>,
- <PINMUX(70, 0)>,
- <PINMUX(71, 0)>,
- <PINMUX(72, 0)>,
- <PINMUX(73, 0)>;
+ pinmux = <PINMUX(PAD_SD0_CLK, 0)>,
+ <PINMUX(PAD_SD0_CMD, 0)>,
+ <PINMUX(PAD_SD0_DATA0, 0)>,
+ <PINMUX(PAD_SD0_DATA1, 0)>,
+ <PINMUX(PAD_SD0_DATA2, 0)>,
+ <PINMUX(PAD_SD0_DATA3, 0)>,
+ <PINMUX(PAD_SD0_DATA4, 0)>,
+ <PINMUX(PAD_SD0_DATA5, 0)>,
+ <PINMUX(PAD_SD0_DATA6, 0)>,
+ <PINMUX(PAD_SD0_DATA7, 0)>;
bias-pull-up;
drive-strength = <12>;
input-enable;
@@ -624,6 +647,7 @@
};
&uart0 {
+ bootph-pre-ram;
pinctrl-names = "default";
pinctrl-0 = <&uart0_pins>;
status = "okay";
diff --git a/arch/riscv/boot/dts/starfive/jh7110-deepcomputing-fml13v01.dts b/arch/riscv/boot/dts/starfive/jh7110-deepcomputing-fml13v01.dts
index 8d9ce8b69a71..f2857d021d68 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-deepcomputing-fml13v01.dts
+++ b/arch/riscv/boot/dts/starfive/jh7110-deepcomputing-fml13v01.dts
@@ -43,9 +43,28 @@
slew-rate = <0>;
};
};
+
+ usb0_pins: usb0-0 {
+ vbus-pins {
+ pinmux = <GPIOMUX(25, GPOUT_SYS_USB_DRIVE_VBUS,
+ GPOEN_ENABLE,
+ GPI_NONE)>;
+ bias-disable;
+ input-disable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+ };
};
&usb0 {
dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_pins>;
status = "okay";
};
+
+&usb_cdns3 {
+ phys = <&usbphy0>, <&pciephy0>;
+ phy-names = "cdns3,usb2-phy", "cdns3,usb3-phy";
+};
diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi
index 527336417765..1db0054c4e09 100644
--- a/arch/riscv/boot/dts/thead/th1520.dtsi
+++ b/arch/riscv/boot/dts/thead/th1520.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/clock/thead,th1520-clk-ap.h>
+#include <dt-bindings/power/thead,th1520-power.h>
/ {
compatible = "thead,th1520";
@@ -229,6 +230,13 @@
snps,blen = <0 0 64 32 0 0 0>;
};
+ aon: aon {
+ compatible = "thead,th1520-aon";
+ mboxes = <&mbox_910t 1>;
+ mbox-names = "aon";
+ #power-domain-cells = <1>;
+ };
+
soc {
compatible = "simple-bus";
interrupt-parent = <&plic>;
@@ -489,6 +497,19 @@
#clock-cells = <1>;
};
+ rst: reset-controller@ffef528000 {
+ compatible = "thead,th1520-reset";
+ reg = <0xff 0xef528000 0x0 0x4f>;
+ #reset-cells = <1>;
+ };
+
+ clk_vo: clock-controller@ffef528050 {
+ compatible = "thead,th1520-clk-vo";
+ reg = <0xff 0xef528050 0x0 0xfb0>;
+ clocks = <&clk CLK_VIDEO_PLL>;
+ #clock-cells = <1>;
+ };
+
dmac0: dma-controller@ffefc00000 {
compatible = "snps,axi-dma-1.01a";
reg = <0xff 0xefc00000 0x0 0x1000>;
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 3c8e16d71e17..eea825ee58e1 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -169,6 +169,7 @@ CONFIG_PINCTRL_SOPHGO_SG2002=y
CONFIG_PINCTRL_TH1520=y
CONFIG_GPIO_DWAPB=y
CONFIG_GPIO_SIFIVE=y
+CONFIG_GPIO_SPACEMIT_K1=y
CONFIG_POWER_RESET_GPIO_RESTART=y
CONFIG_SENSORS_SFCTEMP=m
CONFIG_CPU_THERMAL=y
@@ -250,6 +251,8 @@ CONFIG_CLK_SOPHGO_CV1800=y
CONFIG_CLK_SOPHGO_SG2042_PLL=y
CONFIG_CLK_SOPHGO_SG2042_CLKGEN=y
CONFIG_CLK_SOPHGO_SG2042_RPGATE=y
+CONFIG_SPACEMIT_CCU=y
+CONFIG_SPACEMIT_K1_CCU=y
CONFIG_SUN8I_DE2_CCU=m
CONFIG_SUN50I_IOMMU=y
CONFIG_RPMSG_CHAR=y
diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig
index c67095a3d669..cd9b776602f8 100644
--- a/arch/riscv/crypto/Kconfig
+++ b/arch/riscv/crypto/Kconfig
@@ -18,16 +18,6 @@ config CRYPTO_AES_RISCV64
- Zvkb vector crypto extension (CTR)
- Zvkg vector crypto extension (XTS)
-config CRYPTO_CHACHA_RISCV64
- tristate "Ciphers: ChaCha"
- depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
- select CRYPTO_SKCIPHER
- help
- Length-preserving ciphers: ChaCha20 stream cipher algorithm
-
- Architecture: riscv64 using:
- - Zvkb vector crypto extension
-
config CRYPTO_GHASH_RISCV64
tristate "Hash functions: GHASH"
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
@@ -38,17 +28,6 @@ config CRYPTO_GHASH_RISCV64
Architecture: riscv64 using:
- Zvkg vector crypto extension
-config CRYPTO_SHA256_RISCV64
- tristate "Hash functions: SHA-224 and SHA-256"
- depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
- select CRYPTO_SHA256
- help
- SHA-224 and SHA-256 secure hash algorithm (FIPS 180)
-
- Architecture: riscv64 using:
- - Zvknha or Zvknhb vector crypto extensions
- - Zvkb vector crypto extension
-
config CRYPTO_SHA512_RISCV64
tristate "Hash functions: SHA-384 and SHA-512"
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
@@ -64,7 +43,7 @@ config CRYPTO_SM3_RISCV64
tristate "Hash functions: SM3 (ShangMi 3)"
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
select CRYPTO_HASH
- select CRYPTO_SM3
+ select CRYPTO_LIB_SM3
help
SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012)
diff --git a/arch/riscv/crypto/Makefile b/arch/riscv/crypto/Makefile
index 247c7bc7288c..e10e8257734e 100644
--- a/arch/riscv/crypto/Makefile
+++ b/arch/riscv/crypto/Makefile
@@ -4,15 +4,9 @@ obj-$(CONFIG_CRYPTO_AES_RISCV64) += aes-riscv64.o
aes-riscv64-y := aes-riscv64-glue.o aes-riscv64-zvkned.o \
aes-riscv64-zvkned-zvbb-zvkg.o aes-riscv64-zvkned-zvkb.o
-obj-$(CONFIG_CRYPTO_CHACHA_RISCV64) += chacha-riscv64.o
-chacha-riscv64-y := chacha-riscv64-glue.o chacha-riscv64-zvkb.o
-
obj-$(CONFIG_CRYPTO_GHASH_RISCV64) += ghash-riscv64.o
ghash-riscv64-y := ghash-riscv64-glue.o ghash-riscv64-zvkg.o
-obj-$(CONFIG_CRYPTO_SHA256_RISCV64) += sha256-riscv64.o
-sha256-riscv64-y := sha256-riscv64-glue.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o
-
obj-$(CONFIG_CRYPTO_SHA512_RISCV64) += sha512-riscv64.o
sha512-riscv64-y := sha512-riscv64-glue.o sha512-riscv64-zvknhb-zvkb.o
diff --git a/arch/riscv/crypto/chacha-riscv64-glue.c b/arch/riscv/crypto/chacha-riscv64-glue.c
deleted file mode 100644
index 10b46f36375a..000000000000
--- a/arch/riscv/crypto/chacha-riscv64-glue.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ChaCha20 using the RISC-V vector crypto extensions
- *
- * Copyright (C) 2023 SiFive, Inc.
- * Author: Jerry Shih <jerry.shih@sifive.com>
- */
-
-#include <asm/simd.h>
-#include <asm/vector.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/linkage.h>
-#include <linux/module.h>
-
-asmlinkage void chacha20_zvkb(const u32 key[8], const u8 *in, u8 *out,
- size_t len, const u32 iv[4]);
-
-static int riscv64_chacha20_crypt(struct skcipher_request *req)
-{
- u32 iv[CHACHA_IV_SIZE / sizeof(u32)];
- u8 block_buffer[CHACHA_BLOCK_SIZE];
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- unsigned int tail_bytes;
- int err;
-
- iv[0] = get_unaligned_le32(req->iv);
- iv[1] = get_unaligned_le32(req->iv + 4);
- iv[2] = get_unaligned_le32(req->iv + 8);
- iv[3] = get_unaligned_le32(req->iv + 12);
-
- err = skcipher_walk_virt(&walk, req, false);
- while (walk.nbytes) {
- nbytes = walk.nbytes & ~(CHACHA_BLOCK_SIZE - 1);
- tail_bytes = walk.nbytes & (CHACHA_BLOCK_SIZE - 1);
- kernel_vector_begin();
- if (nbytes) {
- chacha20_zvkb(ctx->key, walk.src.virt.addr,
- walk.dst.virt.addr, nbytes, iv);
- iv[0] += nbytes / CHACHA_BLOCK_SIZE;
- }
- if (walk.nbytes == walk.total && tail_bytes > 0) {
- memcpy(block_buffer, walk.src.virt.addr + nbytes,
- tail_bytes);
- chacha20_zvkb(ctx->key, block_buffer, block_buffer,
- CHACHA_BLOCK_SIZE, iv);
- memcpy(walk.dst.virt.addr + nbytes, block_buffer,
- tail_bytes);
- tail_bytes = 0;
- }
- kernel_vector_end();
-
- err = skcipher_walk_done(&walk, tail_bytes);
- }
-
- return err;
-}
-
-static struct skcipher_alg riscv64_chacha_alg = {
- .setkey = chacha20_setkey,
- .encrypt = riscv64_chacha20_crypt,
- .decrypt = riscv64_chacha20_crypt,
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .base = {
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct chacha_ctx),
- .cra_priority = 300,
- .cra_name = "chacha20",
- .cra_driver_name = "chacha20-riscv64-zvkb",
- .cra_module = THIS_MODULE,
- },
-};
-
-static int __init riscv64_chacha_mod_init(void)
-{
- if (riscv_isa_extension_available(NULL, ZVKB) &&
- riscv_vector_vlen() >= 128)
- return crypto_register_skcipher(&riscv64_chacha_alg);
-
- return -ENODEV;
-}
-
-static void __exit riscv64_chacha_mod_exit(void)
-{
- crypto_unregister_skcipher(&riscv64_chacha_alg);
-}
-
-module_init(riscv64_chacha_mod_init);
-module_exit(riscv64_chacha_mod_exit);
-
-MODULE_DESCRIPTION("ChaCha20 (RISC-V accelerated)");
-MODULE_AUTHOR("Jerry Shih <jerry.shih@sifive.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("chacha20");
diff --git a/arch/riscv/crypto/ghash-riscv64-glue.c b/arch/riscv/crypto/ghash-riscv64-glue.c
index 312e7891fd0a..d86073d25387 100644
--- a/arch/riscv/crypto/ghash-riscv64-glue.c
+++ b/arch/riscv/crypto/ghash-riscv64-glue.c
@@ -11,11 +11,16 @@
#include <asm/simd.h>
#include <asm/vector.h>
+#include <crypto/b128ops.h>
+#include <crypto/gf128mul.h>
#include <crypto/ghash.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <linux/linkage.h>
+#include <crypto/utils.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
asmlinkage void ghash_zvkg(be128 *accumulator, const be128 *key, const u8 *data,
size_t len);
@@ -26,8 +31,6 @@ struct riscv64_ghash_tfm_ctx {
struct riscv64_ghash_desc_ctx {
be128 accumulator;
- u8 buffer[GHASH_BLOCK_SIZE];
- u32 bytes;
};
static int riscv64_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -78,50 +81,24 @@ static int riscv64_ghash_update(struct shash_desc *desc, const u8 *src,
{
const struct riscv64_ghash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct riscv64_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- unsigned int len;
-
- if (dctx->bytes) {
- if (dctx->bytes + srclen < GHASH_BLOCK_SIZE) {
- memcpy(dctx->buffer + dctx->bytes, src, srclen);
- dctx->bytes += srclen;
- return 0;
- }
- memcpy(dctx->buffer + dctx->bytes, src,
- GHASH_BLOCK_SIZE - dctx->bytes);
- riscv64_ghash_blocks(tctx, dctx, dctx->buffer,
- GHASH_BLOCK_SIZE);
- src += GHASH_BLOCK_SIZE - dctx->bytes;
- srclen -= GHASH_BLOCK_SIZE - dctx->bytes;
- dctx->bytes = 0;
- }
-
- len = round_down(srclen, GHASH_BLOCK_SIZE);
- if (len) {
- riscv64_ghash_blocks(tctx, dctx, src, len);
- src += len;
- srclen -= len;
- }
- if (srclen) {
- memcpy(dctx->buffer, src, srclen);
- dctx->bytes = srclen;
- }
-
- return 0;
+ riscv64_ghash_blocks(tctx, dctx, src,
+ round_down(srclen, GHASH_BLOCK_SIZE));
+ return srclen - round_down(srclen, GHASH_BLOCK_SIZE);
}
-static int riscv64_ghash_final(struct shash_desc *desc, u8 *out)
+static int riscv64_ghash_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
const struct riscv64_ghash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct riscv64_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- int i;
- if (dctx->bytes) {
- for (i = dctx->bytes; i < GHASH_BLOCK_SIZE; i++)
- dctx->buffer[i] = 0;
+ if (len) {
+ u8 buf[GHASH_BLOCK_SIZE] = {};
- riscv64_ghash_blocks(tctx, dctx, dctx->buffer,
- GHASH_BLOCK_SIZE);
+ memcpy(buf, src, len);
+ riscv64_ghash_blocks(tctx, dctx, buf, GHASH_BLOCK_SIZE);
+ memzero_explicit(buf, sizeof(buf));
}
memcpy(out, &dctx->accumulator, GHASH_DIGEST_SIZE);
@@ -131,7 +108,7 @@ static int riscv64_ghash_final(struct shash_desc *desc, u8 *out)
static struct shash_alg riscv64_ghash_alg = {
.init = riscv64_ghash_init,
.update = riscv64_ghash_update,
- .final = riscv64_ghash_final,
+ .finup = riscv64_ghash_finup,
.setkey = riscv64_ghash_setkey,
.descsize = sizeof(struct riscv64_ghash_desc_ctx),
.digestsize = GHASH_DIGEST_SIZE,
@@ -139,6 +116,7 @@ static struct shash_alg riscv64_ghash_alg = {
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct riscv64_ghash_tfm_ctx),
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_name = "ghash",
.cra_driver_name = "ghash-riscv64-zvkg",
.cra_module = THIS_MODULE,
diff --git a/arch/riscv/crypto/sha256-riscv64-glue.c b/arch/riscv/crypto/sha256-riscv64-glue.c
deleted file mode 100644
index 71e051e40a64..000000000000
--- a/arch/riscv/crypto/sha256-riscv64-glue.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SHA-256 and SHA-224 using the RISC-V vector crypto extensions
- *
- * Copyright (C) 2022 VRULL GmbH
- * Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
- *
- * Copyright (C) 2023 SiFive, Inc.
- * Author: Jerry Shih <jerry.shih@sifive.com>
- */
-
-#include <asm/simd.h>
-#include <asm/vector.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <crypto/sha256_base.h>
-#include <linux/linkage.h>
-#include <linux/module.h>
-
-/*
- * Note: the asm function only uses the 'state' field of struct sha256_state.
- * It is assumed to be the first field.
- */
-asmlinkage void sha256_transform_zvknha_or_zvknhb_zvkb(
- struct sha256_state *state, const u8 *data, int num_blocks);
-
-static int riscv64_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- /*
- * Ensure struct sha256_state begins directly with the SHA-256
- * 256-bit internal state, as this is what the asm function expects.
- */
- BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
-
- if (crypto_simd_usable()) {
- kernel_vector_begin();
- sha256_base_do_update(desc, data, len,
- sha256_transform_zvknha_or_zvknhb_zvkb);
- kernel_vector_end();
- } else {
- crypto_sha256_update(desc, data, len);
- }
- return 0;
-}
-
-static int riscv64_sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- if (crypto_simd_usable()) {
- kernel_vector_begin();
- if (len)
- sha256_base_do_update(
- desc, data, len,
- sha256_transform_zvknha_or_zvknhb_zvkb);
- sha256_base_do_finalize(
- desc, sha256_transform_zvknha_or_zvknhb_zvkb);
- kernel_vector_end();
-
- return sha256_base_finish(desc, out);
- }
-
- return crypto_sha256_finup(desc, data, len, out);
-}
-
-static int riscv64_sha256_final(struct shash_desc *desc, u8 *out)
-{
- return riscv64_sha256_finup(desc, NULL, 0, out);
-}
-
-static int riscv64_sha256_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return sha256_base_init(desc) ?:
- riscv64_sha256_finup(desc, data, len, out);
-}
-
-static struct shash_alg riscv64_sha256_algs[] = {
- {
- .init = sha256_base_init,
- .update = riscv64_sha256_update,
- .final = riscv64_sha256_final,
- .finup = riscv64_sha256_finup,
- .digest = riscv64_sha256_digest,
- .descsize = sizeof(struct sha256_state),
- .digestsize = SHA256_DIGEST_SIZE,
- .base = {
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_priority = 300,
- .cra_name = "sha256",
- .cra_driver_name = "sha256-riscv64-zvknha_or_zvknhb-zvkb",
- .cra_module = THIS_MODULE,
- },
- }, {
- .init = sha224_base_init,
- .update = riscv64_sha256_update,
- .final = riscv64_sha256_final,
- .finup = riscv64_sha256_finup,
- .descsize = sizeof(struct sha256_state),
- .digestsize = SHA224_DIGEST_SIZE,
- .base = {
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_priority = 300,
- .cra_name = "sha224",
- .cra_driver_name = "sha224-riscv64-zvknha_or_zvknhb-zvkb",
- .cra_module = THIS_MODULE,
- },
- },
-};
-
-static int __init riscv64_sha256_mod_init(void)
-{
- /* Both zvknha and zvknhb provide the SHA-256 instructions. */
- if ((riscv_isa_extension_available(NULL, ZVKNHA) ||
- riscv_isa_extension_available(NULL, ZVKNHB)) &&
- riscv_isa_extension_available(NULL, ZVKB) &&
- riscv_vector_vlen() >= 128)
- return crypto_register_shashes(riscv64_sha256_algs,
- ARRAY_SIZE(riscv64_sha256_algs));
-
- return -ENODEV;
-}
-
-static void __exit riscv64_sha256_mod_exit(void)
-{
- crypto_unregister_shashes(riscv64_sha256_algs,
- ARRAY_SIZE(riscv64_sha256_algs));
-}
-
-module_init(riscv64_sha256_mod_init);
-module_exit(riscv64_sha256_mod_exit);
-
-MODULE_DESCRIPTION("SHA-256 (RISC-V accelerated)");
-MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("sha256");
-MODULE_ALIAS_CRYPTO("sha224");
diff --git a/arch/riscv/crypto/sha512-riscv64-glue.c b/arch/riscv/crypto/sha512-riscv64-glue.c
index 43b56a08aeb5..4634fca78ae2 100644
--- a/arch/riscv/crypto/sha512-riscv64-glue.c
+++ b/arch/riscv/crypto/sha512-riscv64-glue.c
@@ -14,7 +14,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha512_base.h>
-#include <linux/linkage.h>
+#include <linux/kernel.h>
#include <linux/module.h>
/*
@@ -24,8 +24,8 @@
asmlinkage void sha512_transform_zvknhb_zvkb(
struct sha512_state *state, const u8 *data, int num_blocks);
-static int riscv64_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static void sha512_block(struct sha512_state *state, const u8 *data,
+ int num_blocks)
{
/*
* Ensure struct sha512_state begins directly with the SHA-512
@@ -35,35 +35,24 @@ static int riscv64_sha512_update(struct shash_desc *desc, const u8 *data,
if (crypto_simd_usable()) {
kernel_vector_begin();
- sha512_base_do_update(desc, data, len,
- sha512_transform_zvknhb_zvkb);
+ sha512_transform_zvknhb_zvkb(state, data, num_blocks);
kernel_vector_end();
} else {
- crypto_sha512_update(desc, data, len);
+ sha512_generic_block_fn(state, data, num_blocks);
}
- return 0;
}
-static int riscv64_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int riscv64_sha512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- if (crypto_simd_usable()) {
- kernel_vector_begin();
- if (len)
- sha512_base_do_update(desc, data, len,
- sha512_transform_zvknhb_zvkb);
- sha512_base_do_finalize(desc, sha512_transform_zvknhb_zvkb);
- kernel_vector_end();
-
- return sha512_base_finish(desc, out);
- }
-
- return crypto_sha512_finup(desc, data, len, out);
+ return sha512_base_do_update_blocks(desc, data, len, sha512_block);
}
-static int riscv64_sha512_final(struct shash_desc *desc, u8 *out)
+static int riscv64_sha512_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- return riscv64_sha512_finup(desc, NULL, 0, out);
+ sha512_base_do_finup(desc, data, len, sha512_block);
+ return sha512_base_finish(desc, out);
}
static int riscv64_sha512_digest(struct shash_desc *desc, const u8 *data,
@@ -77,14 +66,15 @@ static struct shash_alg riscv64_sha512_algs[] = {
{
.init = sha512_base_init,
.update = riscv64_sha512_update,
- .final = riscv64_sha512_final,
.finup = riscv64_sha512_finup,
.digest = riscv64_sha512_digest,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA512_DIGEST_SIZE,
.base = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_name = "sha512",
.cra_driver_name = "sha512-riscv64-zvknhb-zvkb",
.cra_module = THIS_MODULE,
@@ -92,13 +82,14 @@ static struct shash_alg riscv64_sha512_algs[] = {
}, {
.init = sha384_base_init,
.update = riscv64_sha512_update,
- .final = riscv64_sha512_final,
.finup = riscv64_sha512_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA384_DIGEST_SIZE,
.base = {
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_name = "sha384",
.cra_driver_name = "sha384-riscv64-zvknhb-zvkb",
.cra_module = THIS_MODULE,
diff --git a/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S b/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S
index 3a9ae210f915..89f4a10d12dd 100644
--- a/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S
+++ b/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S
@@ -43,7 +43,7 @@
// - RISC-V Vector SHA-2 Secure Hash extension ('Zvknhb')
// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
-#include <linux/cfi_types.h>
+#include <linux/linkage.h>
.text
.option arch, +zvknhb, +zvkb
@@ -95,7 +95,7 @@
// void sha512_transform_zvknhb_zvkb(u64 state[8], const u8 *data,
// int num_blocks);
-SYM_TYPED_FUNC_START(sha512_transform_zvknhb_zvkb)
+SYM_FUNC_START(sha512_transform_zvknhb_zvkb)
// Setup mask for the vmerge to replace the first word (idx==0) in
// message scheduling. There are 4 words, so an 8-bit mask suffices.
diff --git a/arch/riscv/crypto/sm3-riscv64-glue.c b/arch/riscv/crypto/sm3-riscv64-glue.c
index e1737a970c7c..abdfe4a63a27 100644
--- a/arch/riscv/crypto/sm3-riscv64-glue.c
+++ b/arch/riscv/crypto/sm3-riscv64-glue.c
@@ -13,8 +13,9 @@
#include <asm/vector.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
+#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
-#include <linux/linkage.h>
+#include <linux/kernel.h>
#include <linux/module.h>
/*
@@ -24,8 +25,8 @@
asmlinkage void sm3_transform_zvksh_zvkb(
struct sm3_state *state, const u8 *data, int num_blocks);
-static int riscv64_sm3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static void sm3_block(struct sm3_state *state, const u8 *data,
+ int num_blocks)
{
/*
* Ensure struct sm3_state begins directly with the SM3
@@ -35,52 +36,36 @@ static int riscv64_sm3_update(struct shash_desc *desc, const u8 *data,
if (crypto_simd_usable()) {
kernel_vector_begin();
- sm3_base_do_update(desc, data, len, sm3_transform_zvksh_zvkb);
+ sm3_transform_zvksh_zvkb(state, data, num_blocks);
kernel_vector_end();
} else {
- sm3_update(shash_desc_ctx(desc), data, len);
+ sm3_block_generic(state, data, num_blocks);
}
- return 0;
}
-static int riscv64_sm3_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int riscv64_sm3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- struct sm3_state *ctx;
-
- if (crypto_simd_usable()) {
- kernel_vector_begin();
- if (len)
- sm3_base_do_update(desc, data, len,
- sm3_transform_zvksh_zvkb);
- sm3_base_do_finalize(desc, sm3_transform_zvksh_zvkb);
- kernel_vector_end();
-
- return sm3_base_finish(desc, out);
- }
-
- ctx = shash_desc_ctx(desc);
- if (len)
- sm3_update(ctx, data, len);
- sm3_final(ctx, out);
-
- return 0;
+ return sm3_base_do_update_blocks(desc, data, len, sm3_block);
}
-static int riscv64_sm3_final(struct shash_desc *desc, u8 *out)
+static int riscv64_sm3_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- return riscv64_sm3_finup(desc, NULL, 0, out);
+ sm3_base_do_finup(desc, data, len, sm3_block);
+ return sm3_base_finish(desc, out);
}
static struct shash_alg riscv64_sm3_alg = {
.init = sm3_base_init,
.update = riscv64_sm3_update,
- .final = riscv64_sm3_final,
.finup = riscv64_sm3_finup,
- .descsize = sizeof(struct sm3_state),
+ .descsize = SM3_STATE_SIZE,
.digestsize = SM3_DIGEST_SIZE,
.base = {
.cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_priority = 300,
.cra_name = "sm3",
.cra_driver_name = "sm3-riscv64-zvksh-zvkb",
diff --git a/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S b/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S
index a2b65d961c04..4fe754846f65 100644
--- a/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S
+++ b/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S
@@ -43,7 +43,7 @@
// - RISC-V Vector SM3 Secure Hash extension ('Zvksh')
// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
-#include <linux/cfi_types.h>
+#include <linux/linkage.h>
.text
.option arch, +zvksh, +zvkb
@@ -81,7 +81,7 @@
.endm
// void sm3_transform_zvksh_zvkb(u32 state[8], const u8 *data, int num_blocks);
-SYM_TYPED_FUNC_START(sm3_transform_zvksh_zvkb)
+SYM_FUNC_START(sm3_transform_zvksh_zvkb)
// Load the state and endian-swap each 32-bit word.
vsetivli zero, 8, e32, m2, ta, ma
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
index 721ec275ce57..231d777d936c 100644
--- a/arch/riscv/include/asm/alternative-macros.h
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -115,24 +115,19 @@
\old_c
.endm
-#define _ALTERNATIVE_CFG(old_c, ...) \
- ALTERNATIVE_CFG old_c
-
-#define _ALTERNATIVE_CFG_2(old_c, ...) \
- ALTERNATIVE_CFG old_c
+#define __ALTERNATIVE_CFG(old_c, ...) ALTERNATIVE_CFG old_c
+#define __ALTERNATIVE_CFG_2(old_c, ...) ALTERNATIVE_CFG old_c
#else /* !__ASSEMBLY__ */
-#define __ALTERNATIVE_CFG(old_c) \
- old_c "\n"
+#define __ALTERNATIVE_CFG(old_c, ...) old_c "\n"
+#define __ALTERNATIVE_CFG_2(old_c, ...) old_c "\n"
-#define _ALTERNATIVE_CFG(old_c, ...) \
- __ALTERNATIVE_CFG(old_c)
+#endif /* __ASSEMBLY__ */
-#define _ALTERNATIVE_CFG_2(old_c, ...) \
- __ALTERNATIVE_CFG(old_c)
+#define _ALTERNATIVE_CFG(old_c, ...) __ALTERNATIVE_CFG(old_c)
+#define _ALTERNATIVE_CFG_2(old_c, ...) __ALTERNATIVE_CFG_2(old_c)
-#endif /* __ASSEMBLY__ */
#endif /* CONFIG_RISCV_ALTERNATIVE */
/*
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
index cd627ec289f1..bfc8ea5f9319 100644
--- a/arch/riscv/include/asm/asm-prototypes.h
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -52,6 +52,8 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
DECLARE_DO_ERROR_INFO(do_trap_break);
+asmlinkage void ret_from_fork_kernel(void *fn_arg, int (*fn)(void *), struct pt_regs *regs);
+asmlinkage void ret_from_fork_user(struct pt_regs *regs);
asmlinkage void handle_bad_stack(struct pt_regs *regs);
asmlinkage void do_page_fault(struct pt_regs *regs);
asmlinkage void do_irq(struct pt_regs *regs);
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 8de73f91bfa3..b59ffeb668d6 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -34,11 +34,6 @@ static inline void flush_dcache_page(struct page *page)
flush_dcache_folio(page_folio(page));
}
-/*
- * RISC-V doesn't have an instruction to flush parts of the instruction cache,
- * so instead we just flush the whole thing.
- */
-#define flush_icache_range(start, end) flush_icache_all()
#define flush_icache_user_page(vma, pg, addr, len) \
do { \
if (vma->vm_flags & VM_EXEC) \
@@ -78,6 +73,16 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
#endif /* CONFIG_SMP */
+/*
+ * RISC-V doesn't have an instruction to flush parts of the instruction cache,
+ * so instead we just flush the whole thing.
+ */
+#define flush_icache_range flush_icache_range
+static inline void flush_icache_range(unsigned long start, unsigned long end)
+{
+ flush_icache_all();
+}
+
extern unsigned int riscv_cbom_block_size;
extern unsigned int riscv_cboz_block_size;
void riscv_init_cbo_blocksizes(void);
diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h
index 46677daf708b..cc11c4544cff 100644
--- a/arch/riscv/include/asm/kgdb.h
+++ b/arch/riscv/include/asm/kgdb.h
@@ -19,16 +19,9 @@
#ifndef __ASSEMBLY__
+void arch_kgdb_breakpoint(void);
extern unsigned long kgdb_compiled_break;
-static inline void arch_kgdb_breakpoint(void)
-{
- asm(".global kgdb_compiled_break\n"
- ".option norvc\n"
- "kgdb_compiled_break: ebreak\n"
- ".option rvc\n");
-}
-
#endif /* !__ASSEMBLY__ */
#define DBG_REG_ZERO "zero"
diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h
index 1f37b600ca47..3b643b9efc07 100644
--- a/arch/riscv/include/asm/kvm_aia.h
+++ b/arch/riscv/include/asm/kvm_aia.h
@@ -63,9 +63,6 @@ struct kvm_vcpu_aia {
/* CPU AIA CSR context of Guest VCPU */
struct kvm_vcpu_aia_csr guest_csr;
- /* CPU AIA CSR context upon Guest VCPU reset */
- struct kvm_vcpu_aia_csr guest_reset_csr;
-
/* Guest physical address of IMSIC for this VCPU */
gpa_t imsic_addr;
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 0e9c2fab6378..85cfebc32e4c 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -119,6 +119,9 @@ struct kvm_arch {
/* AIA Guest/VM context */
struct kvm_aia aia;
+
+ /* KVM_CAP_RISCV_MP_STATE_RESET */
+ bool mp_state_reset;
};
struct kvm_cpu_trap {
@@ -193,6 +196,12 @@ struct kvm_vcpu_smstateen_csr {
unsigned long sstateen0;
};
+struct kvm_vcpu_reset_state {
+ spinlock_t lock;
+ unsigned long pc;
+ unsigned long a1;
+};
+
struct kvm_vcpu_arch {
/* VCPU ran at least once */
bool ran_atleast_once;
@@ -227,12 +236,8 @@ struct kvm_vcpu_arch {
/* CPU Smstateen CSR context of Guest VCPU */
struct kvm_vcpu_smstateen_csr smstateen_csr;
- /* CPU context upon Guest VCPU reset */
- struct kvm_cpu_context guest_reset_context;
- spinlock_t reset_cntx_lock;
-
- /* CPU CSR context upon Guest VCPU reset */
- struct kvm_vcpu_csr guest_reset_csr;
+ /* CPU reset state of Guest VCPU */
+ struct kvm_vcpu_reset_state reset_state;
/*
* VCPU interrupts
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
index 4ed6203cdd30..439ab2b3534f 100644
--- a/arch/riscv/include/asm/kvm_vcpu_sbi.h
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
@@ -55,6 +55,9 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
struct kvm_run *run,
u32 type, u64 flags);
+void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
+ unsigned long pc, unsigned long a1);
+void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg);
diff --git a/arch/riscv/include/asm/kvm_vcpu_vector.h b/arch/riscv/include/asm/kvm_vcpu_vector.h
index 27f5bccdd8b0..57a798a4cb0d 100644
--- a/arch/riscv/include/asm/kvm_vcpu_vector.h
+++ b/arch/riscv/include/asm/kvm_vcpu_vector.h
@@ -33,8 +33,7 @@ void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
unsigned long *isa);
void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx);
void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx);
-int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
- struct kvm_cpu_context *cntx);
+int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu);
#else
@@ -62,8 +61,7 @@ static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cn
{
}
-static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
- struct kvm_cpu_context *cntx)
+static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu)
{
return 0;
}
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 0897dd99ab8d..188fadc1c21f 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -262,8 +262,6 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
return __page_val_to_pfn(pmd_val(pmd));
}
-#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
-
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 428e48e5f57d..f19240fd018e 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -343,8 +343,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
}
-#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-
#define pte_pgprot pte_pgprot
static inline pgprot_t pte_pgprot(pte_t pte)
{
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 121fff429dce..34313387f977 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -30,6 +30,13 @@ static inline int syscall_get_nr(struct task_struct *task,
return regs->a7;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->a7 = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -62,8 +69,23 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned long *args)
{
args[0] = regs->orig_a0;
- args++;
- memcpy(args, &regs->a1, 5 * sizeof(args[0]));
+ args[1] = regs->a1;
+ args[2] = regs->a2;
+ args[3] = regs->a3;
+ args[4] = regs->a4;
+ args[5] = regs->a5;
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ regs->orig_a0 = args[0];
+ regs->a1 = args[1];
+ regs->a2 = args[2];
+ regs->a3 = args[3];
+ regs->a4 = args[4];
+ regs->a5 = args[5];
}
static inline int syscall_get_arch(struct task_struct *task)
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 8d186bfced45..f7480c9c6f8d 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -9,8 +9,8 @@ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
endif
-CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_syscall_table.o += $(call cc-disable-warning, override-init)
+CFLAGS_compat_syscall_table.o += $(call cc-disable-warning, override-init)
ifdef CONFIG_KEXEC_CORE
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
diff --git a/arch/riscv/kernel/efi-header.S b/arch/riscv/kernel/efi-header.S
index c5f17c2710b5..2efc3aaf4a8c 100644
--- a/arch/riscv/kernel/efi-header.S
+++ b/arch/riscv/kernel/efi-header.S
@@ -9,7 +9,7 @@
#include <asm/set_memory.h>
.macro __EFI_PE_HEADER
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
coff_header:
#ifdef CONFIG_64BIT
.short IMAGE_FILE_MACHINE_RISCV64 // Machine
@@ -27,9 +27,9 @@ coff_header:
optional_header:
#ifdef CONFIG_64BIT
- .short PE_OPT_MAGIC_PE32PLUS // PE32+ format
+ .short IMAGE_NT_OPTIONAL_HDR64_MAGIC // PE32+ format
#else
- .short PE_OPT_MAGIC_PE32 // PE32 format
+ .short IMAGE_NT_OPTIONAL_HDR32_MAGIC // PE32 format
#endif
.byte 0x02 // MajorLinkerVersion
.byte 0x14 // MinorLinkerVersion
@@ -64,7 +64,7 @@ extra_header_fields:
.long efi_header_end - _start // SizeOfHeaders
.long 0 // CheckSum
.short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem
- .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT // DllCharacteristics
+ .short IMAGE_DLLCHARACTERISTICS_NX_COMPAT // DllCharacteristics
.quad 0 // SizeOfStackReserve
.quad 0 // SizeOfStackCommit
.quad 0 // SizeOfHeapReserve
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 33a5a9f2a0d4..0fb338000c6d 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -319,17 +319,21 @@ SYM_CODE_END(handle_kernel_stack_overflow)
ASM_NOKPROBE(handle_kernel_stack_overflow)
#endif
-SYM_CODE_START(ret_from_fork)
+SYM_CODE_START(ret_from_fork_kernel_asm)
+ call schedule_tail
+ move a0, s1 /* fn_arg */
+ move a1, s0 /* fn */
+ move a2, sp /* pt_regs */
+ call ret_from_fork_kernel
+ j ret_from_exception
+SYM_CODE_END(ret_from_fork_kernel_asm)
+
+SYM_CODE_START(ret_from_fork_user_asm)
call schedule_tail
- beqz s0, 1f /* not from kernel thread */
- /* Call fn(arg) */
- move a0, s1
- jalr s0
-1:
move a0, sp /* pt_regs */
- call syscall_exit_to_user_mode
+ call ret_from_fork_user
j ret_from_exception
-SYM_CODE_END(ret_from_fork)
+SYM_CODE_END(ret_from_fork_user_asm)
#ifdef CONFIG_IRQ_STACKS
/*
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 356d5397b2a2..bdf3352acf4c 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -131,6 +131,12 @@ secondary_start_sbi:
csrw CSR_IE, zero
csrw CSR_IP, zero
+#ifndef CONFIG_RISCV_M_MODE
+ /* Enable time CSR */
+ li t0, 0x2
+ csrw CSR_SCOUNTEREN, t0
+#endif
+
/* Load the global pointer */
load_global_pointer
@@ -226,6 +232,10 @@ SYM_CODE_START(_start_kernel)
* to hand it to us.
*/
csrr a0, CSR_MHARTID
+#else
+ /* Enable time CSR */
+ li t0, 0x2
+ csrw CSR_SCOUNTEREN, t0
#endif /* CONFIG_RISCV_M_MODE */
/* Load the global pointer */
diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c
index 2e0266ae6bd7..9f3db3503dab 100644
--- a/arch/riscv/kernel/kgdb.c
+++ b/arch/riscv/kernel/kgdb.c
@@ -254,6 +254,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
regs->epc = pc;
}
+noinline void arch_kgdb_breakpoint(void)
+{
+ asm(".global kgdb_compiled_break\n"
+ "kgdb_compiled_break: ebreak\n");
+}
+
void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
char *remcom_out_buffer)
{
diff --git a/arch/riscv/kernel/module-sections.c b/arch/riscv/kernel/module-sections.c
index e264e59e596e..91d0b355ceef 100644
--- a/arch/riscv/kernel/module-sections.c
+++ b/arch/riscv/kernel/module-sections.c
@@ -73,16 +73,17 @@ static bool duplicate_rela(const Elf_Rela *rela, int idx)
static void count_max_entries(Elf_Rela *relas, int num,
unsigned int *plts, unsigned int *gots)
{
- unsigned int type, i;
-
- for (i = 0; i < num; i++) {
- type = ELF_RISCV_R_TYPE(relas[i].r_info);
- if (type == R_RISCV_CALL_PLT) {
+ for (int i = 0; i < num; i++) {
+ switch (ELF_R_TYPE(relas[i].r_info)) {
+ case R_RISCV_CALL_PLT:
+ case R_RISCV_PLT32:
if (!duplicate_rela(relas, i))
(*plts)++;
- } else if (type == R_RISCV_GOT_HI20) {
+ break;
+ case R_RISCV_GOT_HI20:
if (!duplicate_rela(relas, i))
(*gots)++;
+ break;
}
}
}
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 47d0ebeec93c..7f6147c18033 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -648,7 +648,7 @@ process_accumulated_relocations(struct module *me,
kfree(bucket_iter);
}
- kfree(*relocation_hashtable);
+ kvfree(*relocation_hashtable);
}
static int add_relocation_to_accumulate(struct module *me, int type,
@@ -752,9 +752,10 @@ initialize_relocation_hashtable(unsigned int num_relocations,
hashtable_size <<= should_double_size;
- *relocation_hashtable = kmalloc_array(hashtable_size,
- sizeof(**relocation_hashtable),
- GFP_KERNEL);
+ /* Number of relocations may be large, so kvmalloc it */
+ *relocation_hashtable = kvmalloc_array(hashtable_size,
+ sizeof(**relocation_hashtable),
+ GFP_KERNEL);
if (!*relocation_hashtable)
return 0;
@@ -859,7 +860,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
}
j++;
- if (j > sechdrs[relsec].sh_size / sizeof(*rel))
+ if (j == num_relocations)
j = 0;
} while (j_idx != j);
diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
index 4b3dc8beaf77..cc15f7ca6cc1 100644
--- a/arch/riscv/kernel/probes/uprobes.c
+++ b/arch/riscv/kernel/probes/uprobes.c
@@ -167,6 +167,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
/* Initialize the slot */
void *kaddr = kmap_atomic(page);
void *dst = kaddr + (vaddr & ~PAGE_MASK);
+ unsigned long start = (unsigned long)dst;
memcpy(dst, src, len);
@@ -176,13 +177,6 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
*(uprobe_opcode_t *)dst = __BUG_INSN_32;
}
+ flush_icache_range(start, start + len);
kunmap_atomic(kaddr);
-
- /*
- * We probably need flush_icache_user_page() but it needs vma.
- * This should work on most of architectures by default. If
- * architecture needs to do something different it can define
- * its own version of the function.
- */
- flush_dcache_page(page);
}
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 7c244de77180..bbf7ec6a75c0 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -17,7 +17,9 @@
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <linux/personality.h>
+#include <linux/entry-common.h>
+#include <asm/asm-prototypes.h>
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/csr.h>
@@ -36,7 +38,8 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
-extern asmlinkage void ret_from_fork(void);
+extern asmlinkage void ret_from_fork_kernel_asm(void);
+extern asmlinkage void ret_from_fork_user_asm(void);
void noinstr arch_cpu_idle(void)
{
@@ -206,6 +209,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
+asmlinkage void ret_from_fork_kernel(void *fn_arg, int (*fn)(void *), struct pt_regs *regs)
+{
+ fn(fn_arg);
+
+ syscall_exit_to_user_mode(regs);
+}
+
+asmlinkage void ret_from_fork_user(struct pt_regs *regs)
+{
+ syscall_exit_to_user_mode(regs);
+}
+
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
@@ -228,6 +243,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.s[0] = (unsigned long)args->fn;
p->thread.s[1] = (unsigned long)args->fn_arg;
+ p->thread.ra = (unsigned long)ret_from_fork_kernel_asm;
} else {
*childregs = *(current_pt_regs());
/* Turn off status.VS */
@@ -237,12 +253,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
if (clone_flags & CLONE_SETTLS)
childregs->tp = tls;
childregs->a0 = 0; /* Return value of fork() */
- p->thread.s[0] = 0;
+ p->thread.ra = (unsigned long)ret_from_fork_user_asm;
}
p->thread.riscv_v_flags = 0;
if (has_vector() || has_xtheadvector())
riscv_v_thread_alloc(p);
- p->thread.ra = (unsigned long)ret_from_fork;
p->thread.sp = (unsigned long)childregs; /* kernel sp */
return 0;
}
@@ -275,6 +290,9 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
unsigned long pmm;
u8 pmlen;
+ if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
+ return -EINVAL;
+
if (is_compat_thread(ti))
return -EINVAL;
@@ -330,6 +348,9 @@ long get_tagged_addr_ctrl(struct task_struct *task)
struct thread_info *ti = task_thread_info(task);
long ret = 0;
+ if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
+ return -EINVAL;
+
if (is_compat_thread(ti))
return -EINVAL;
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index c174544eefc8..f7c9a1caa83e 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -66,6 +66,9 @@ static struct resource bss_res = { .name = "Kernel bss", };
static struct resource elfcorehdr_res = { .name = "ELF Core hdr", };
#endif
+static int num_standard_resources;
+static struct resource *standard_resources;
+
static int __init add_resource(struct resource *parent,
struct resource *res)
{
@@ -139,7 +142,7 @@ static void __init init_resources(void)
struct resource *res = NULL;
struct resource *mem_res = NULL;
size_t mem_res_sz = 0;
- int num_resources = 0, res_idx = 0;
+ int num_resources = 0, res_idx = 0, non_resv_res = 0;
int ret = 0;
/* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
@@ -193,6 +196,7 @@ static void __init init_resources(void)
/* Add /memory regions to the resource tree */
for_each_mem_region(region) {
res = &mem_res[res_idx--];
+ non_resv_res++;
if (unlikely(memblock_is_nomap(region))) {
res->name = "Reserved";
@@ -210,6 +214,9 @@ static void __init init_resources(void)
goto error;
}
+ num_standard_resources = non_resv_res;
+ standard_resources = &mem_res[res_idx + 1];
+
/* Clean-up any unused pre-allocated resources */
if (res_idx >= 0)
memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res));
@@ -221,6 +228,33 @@ static void __init init_resources(void)
memblock_free(mem_res, mem_res_sz);
}
+static int __init reserve_memblock_reserved_regions(void)
+{
+ u64 i, j;
+
+ for (i = 0; i < num_standard_resources; i++) {
+ struct resource *mem = &standard_resources[i];
+ phys_addr_t r_start, r_end, mem_size = resource_size(mem);
+
+ if (!memblock_is_region_reserved(mem->start, mem_size))
+ continue;
+
+ for_each_reserved_mem_range(j, &r_start, &r_end) {
+ resource_size_t start, end;
+
+ start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
+ end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
+
+ if (start > mem->end || end < mem->start)
+ continue;
+
+ reserve_region_with_split(mem, start, end, "Reserved");
+ }
+ }
+
+ return 0;
+}
+arch_initcall(reserve_memblock_reserved_regions);
static void __init parse_dtb(void)
{
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 8ff8e8b36524..9c83848797a7 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -198,47 +198,57 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
DO_ERROR_INFO(do_trap_load_fault,
SIGSEGV, SEGV_ACCERR, "load access fault");
-asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
+enum misaligned_access_type {
+ MISALIGNED_STORE,
+ MISALIGNED_LOAD,
+};
+static const struct {
+ const char *type_str;
+ int (*handler)(struct pt_regs *regs);
+} misaligned_handler[] = {
+ [MISALIGNED_STORE] = {
+ .type_str = "Oops - store (or AMO) address misaligned",
+ .handler = handle_misaligned_store,
+ },
+ [MISALIGNED_LOAD] = {
+ .type_str = "Oops - load address misaligned",
+ .handler = handle_misaligned_load,
+ },
+};
+
+static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type type)
{
+ irqentry_state_t state;
+
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
+ local_irq_enable();
+ } else {
+ state = irqentry_nmi_enter(regs);
+ }
- if (handle_misaligned_load(regs))
- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
- "Oops - load address misaligned");
+ if (misaligned_handler[type].handler(regs))
+ do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+ misaligned_handler[type].type_str);
+ if (user_mode(regs)) {
+ local_irq_disable();
irqentry_exit_to_user_mode(regs);
} else {
- irqentry_state_t state = irqentry_nmi_enter(regs);
-
- if (handle_misaligned_load(regs))
- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
- "Oops - load address misaligned");
-
irqentry_nmi_exit(regs, state);
}
}
-asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
+asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
{
- if (user_mode(regs)) {
- irqentry_enter_from_user_mode(regs);
-
- if (handle_misaligned_store(regs))
- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
- "Oops - store (or AMO) address misaligned");
-
- irqentry_exit_to_user_mode(regs);
- } else {
- irqentry_state_t state = irqentry_nmi_enter(regs);
-
- if (handle_misaligned_store(regs))
- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
- "Oops - store (or AMO) address misaligned");
+ do_trap_misaligned(regs, MISALIGNED_LOAD);
+}
- irqentry_nmi_exit(regs, state);
- }
+asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
+{
+ do_trap_misaligned(regs, MISALIGNED_STORE);
}
+
DO_ERROR_INFO(do_trap_store_fault,
SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
DO_ERROR_INFO(do_trap_ecall_s,
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 4354c87c0376..77c788660223 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -88,6 +88,13 @@
#define INSN_MATCH_C_FSWSP 0xe002
#define INSN_MASK_C_FSWSP 0xe003
+#define INSN_MATCH_C_LHU 0x8400
+#define INSN_MASK_C_LHU 0xfc43
+#define INSN_MATCH_C_LH 0x8440
+#define INSN_MASK_C_LH 0xfc43
+#define INSN_MATCH_C_SH 0x8c00
+#define INSN_MASK_C_SH 0xfc43
+
#define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4)
#if defined(CONFIG_64BIT)
@@ -268,7 +275,7 @@ static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
int __ret; \
\
if (user_mode(regs)) { \
- __ret = __get_user(insn, (type __user *) insn_addr); \
+ __ret = get_user(insn, (type __user *) insn_addr); \
} else { \
insn = *(type *)insn_addr; \
__ret = 0; \
@@ -431,6 +438,13 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
fp = 1;
len = 4;
#endif
+ } else if ((insn & INSN_MASK_C_LHU) == INSN_MATCH_C_LHU) {
+ len = 2;
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_LH) == INSN_MATCH_C_LH) {
+ len = 2;
+ shift = 8 * (sizeof(ulong) - len);
+ insn = RVC_RS2S(insn) << SH_RD;
} else {
regs->epc = epc;
return -1;
@@ -530,6 +544,9 @@ static int handle_scalar_misaligned_store(struct pt_regs *regs)
len = 4;
val.data_ulong = GET_F32_RS2C(insn, regs);
#endif
+ } else if ((insn & INSN_MASK_C_SH) == INSN_MATCH_C_SH) {
+ len = 2;
+ val.data_ulong = GET_RS2S(insn, regs);
} else {
regs->epc = epc;
return -1;
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index 585d2dcf2dab..b8ba13819d05 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -439,29 +439,36 @@ static int __init check_unaligned_access_all_cpus(void)
{
int cpu;
- if (unaligned_scalar_speed_param == RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN &&
- !check_unaligned_access_emulated_all_cpus()) {
- check_unaligned_access_speed_all_cpus();
- } else {
- pr_info("scalar unaligned access speed set to '%s' by command line\n",
- speed_str[unaligned_scalar_speed_param]);
+ if (unaligned_scalar_speed_param != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
+ pr_info("scalar unaligned access speed set to '%s' (%lu) by command line\n",
+ speed_str[unaligned_scalar_speed_param], unaligned_scalar_speed_param);
for_each_online_cpu(cpu)
per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param;
+ } else if (!check_unaligned_access_emulated_all_cpus()) {
+ check_unaligned_access_speed_all_cpus();
+ }
+
+ if (unaligned_vector_speed_param != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) {
+ if (!has_vector() &&
+ unaligned_vector_speed_param != RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED) {
+ pr_warn("vector support is not available, ignoring unaligned_vector_speed=%s\n",
+ speed_str[unaligned_vector_speed_param]);
+ } else {
+ pr_info("vector unaligned access speed set to '%s' (%lu) by command line\n",
+ speed_str[unaligned_vector_speed_param], unaligned_vector_speed_param);
+ }
}
if (!has_vector())
unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
- if (unaligned_vector_speed_param == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN &&
- !check_vector_unaligned_access_emulated_all_cpus() &&
- IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
- kthread_run(vec_check_unaligned_access_speed_all_cpus,
- NULL, "vec_check_unaligned_access_speed_all_cpus");
- } else {
- pr_info("vector unaligned access speed set to '%s' by command line\n",
- speed_str[unaligned_vector_speed_param]);
+ if (unaligned_vector_speed_param != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) {
for_each_online_cpu(cpu)
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
+ } else if (!check_vector_unaligned_access_emulated_all_cpus() &&
+ IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
+ kthread_run(vec_check_unaligned_access_speed_all_cpus,
+ NULL, "vec_check_unaligned_access_speed_all_cpus");
}
/*
diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig
index 0c3cbb0915ff..704c2899197e 100644
--- a/arch/riscv/kvm/Kconfig
+++ b/arch/riscv/kvm/Kconfig
@@ -18,7 +18,7 @@ menuconfig VIRTUALIZATION
if VIRTUALIZATION
config KVM
- tristate "Kernel-based Virtual Machine (KVM) support (EXPERIMENTAL)"
+ tristate "Kernel-based Virtual Machine (KVM) support"
depends on RISCV_SBI && MMU
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c
index 39cd26af5a69..806c41931cde 100644
--- a/arch/riscv/kvm/aia_device.c
+++ b/arch/riscv/kvm/aia_device.c
@@ -12,36 +12,6 @@
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
-static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
-{
- struct kvm_vcpu *tmp_vcpu;
-
- for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
- tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
- mutex_unlock(&tmp_vcpu->mutex);
- }
-}
-
-static void unlock_all_vcpus(struct kvm *kvm)
-{
- unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
-}
-
-static bool lock_all_vcpus(struct kvm *kvm)
-{
- struct kvm_vcpu *tmp_vcpu;
- unsigned long c;
-
- kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
- if (!mutex_trylock(&tmp_vcpu->mutex)) {
- unlock_vcpus(kvm, c - 1);
- return false;
- }
- }
-
- return true;
-}
-
static int aia_create(struct kvm_device *dev, u32 type)
{
int ret;
@@ -53,7 +23,7 @@ static int aia_create(struct kvm_device *dev, u32 type)
return -EEXIST;
ret = -EBUSY;
- if (!lock_all_vcpus(kvm))
+ if (kvm_trylock_all_vcpus(kvm))
return ret;
kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -65,7 +35,7 @@ static int aia_create(struct kvm_device *dev, u32 type)
kvm->arch.aia.in_kernel = true;
out_unlock:
- unlock_all_vcpus(kvm);
+ kvm_unlock_all_vcpus(kvm);
return ret;
}
@@ -526,12 +496,10 @@ int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
- struct kvm_vcpu_aia_csr *reset_csr =
- &vcpu->arch.aia_context.guest_reset_csr;
if (!kvm_riscv_aia_available())
return;
- memcpy(csr, reset_csr, sizeof(*csr));
+ memset(csr, 0, sizeof(*csr));
/* Proceed only if AIA was initialized successfully */
if (!kvm_riscv_aia_initialized(vcpu->kvm))
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 60d684c76c58..e0a01af426ff 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -51,12 +51,33 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};
-static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
+static void kvm_riscv_vcpu_context_reset(struct kvm_vcpu *vcpu,
+ bool kvm_sbi_reset)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
- struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
- struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
+ void *vector_datap = cntx->vector.datap;
+
+ memset(cntx, 0, sizeof(*cntx));
+ memset(csr, 0, sizeof(*csr));
+ memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));
+
+ /* Restore datap as it's not a part of the guest context. */
+ cntx->vector.datap = vector_datap;
+
+ if (kvm_sbi_reset)
+ kvm_riscv_vcpu_sbi_load_reset_state(vcpu);
+
+ /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
+ cntx->sstatus = SR_SPP | SR_SPIE;
+
+ cntx->hstatus |= HSTATUS_VTW;
+ cntx->hstatus |= HSTATUS_SPVP;
+ cntx->hstatus |= HSTATUS_SPV;
+}
+
+static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu, bool kvm_sbi_reset)
+{
bool loaded;
/**
@@ -71,11 +92,7 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.last_exit_cpu = -1;
- memcpy(csr, reset_csr, sizeof(*csr));
-
- spin_lock(&vcpu->arch.reset_cntx_lock);
- memcpy(cntx, reset_cntx, sizeof(*cntx));
- spin_unlock(&vcpu->arch.reset_cntx_lock);
+ kvm_riscv_vcpu_context_reset(vcpu, kvm_sbi_reset);
kvm_riscv_vcpu_fp_reset(vcpu);
@@ -110,8 +127,6 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
int rc;
- struct kvm_cpu_context *cntx;
- struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
spin_lock_init(&vcpu->arch.mp_state_lock);
@@ -131,24 +146,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Setup VCPU hfence queue */
spin_lock_init(&vcpu->arch.hfence_lock);
- /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
- spin_lock_init(&vcpu->arch.reset_cntx_lock);
+ spin_lock_init(&vcpu->arch.reset_state.lock);
- spin_lock(&vcpu->arch.reset_cntx_lock);
- cntx = &vcpu->arch.guest_reset_context;
- cntx->sstatus = SR_SPP | SR_SPIE;
- cntx->hstatus = 0;
- cntx->hstatus |= HSTATUS_VTW;
- cntx->hstatus |= HSTATUS_SPVP;
- cntx->hstatus |= HSTATUS_SPV;
- spin_unlock(&vcpu->arch.reset_cntx_lock);
-
- if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx))
+ if (kvm_riscv_vcpu_alloc_vector_context(vcpu))
return -ENOMEM;
- /* By default, make CY, TM, and IR counters accessible in VU mode */
- reset_csr->scounteren = 0x7;
-
/* Setup VCPU timer */
kvm_riscv_vcpu_timer_init(vcpu);
@@ -167,7 +169,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_sbi_init(vcpu);
/* Reset VCPU */
- kvm_riscv_reset_vcpu(vcpu);
+ kvm_riscv_reset_vcpu(vcpu, false);
return 0;
}
@@ -516,6 +518,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
case KVM_MP_STATE_STOPPED:
__kvm_riscv_vcpu_power_off(vcpu);
break;
+ case KVM_MP_STATE_INIT_RECEIVED:
+ if (vcpu->kvm->arch.mp_state_reset)
+ kvm_riscv_reset_vcpu(vcpu, false);
+ else
+ ret = -EINVAL;
+ break;
default:
ret = -EINVAL;
}
@@ -704,7 +712,7 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
}
if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
- kvm_riscv_reset_vcpu(vcpu);
+ kvm_riscv_reset_vcpu(vcpu, true);
if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
kvm_riscv_gstage_update_hgatp(vcpu);
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index d1c83a77735e..6e09b518a5d1 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -143,9 +143,9 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
struct kvm_vcpu *tmp;
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
- spin_lock(&vcpu->arch.mp_state_lock);
+ spin_lock(&tmp->arch.mp_state_lock);
WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
- spin_unlock(&vcpu->arch.mp_state_lock);
+ spin_unlock(&tmp->arch.mp_state_lock);
}
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
@@ -156,6 +156,34 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}
+void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
+ unsigned long pc, unsigned long a1)
+{
+ spin_lock(&vcpu->arch.reset_state.lock);
+ vcpu->arch.reset_state.pc = pc;
+ vcpu->arch.reset_state.a1 = a1;
+ spin_unlock(&vcpu->arch.reset_state.lock);
+
+ kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
+}
+
+void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+ struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+ struct kvm_vcpu_reset_state *reset_state = &vcpu->arch.reset_state;
+
+ cntx->a0 = vcpu->vcpu_id;
+
+ spin_lock(&vcpu->arch.reset_state.lock);
+ cntx->sepc = reset_state->pc;
+ cntx->a1 = reset_state->a1;
+ spin_unlock(&vcpu->arch.reset_state.lock);
+
+ cntx->sstatus &= ~SR_SIE;
+ csr->vsatp = 0;
+}
+
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c
index 3070bb31745d..f26207f84bab 100644
--- a/arch/riscv/kvm/vcpu_sbi_hsm.c
+++ b/arch/riscv/kvm/vcpu_sbi_hsm.c
@@ -15,7 +15,6 @@
static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *reset_cntx;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
struct kvm_vcpu *target_vcpu;
unsigned long target_vcpuid = cp->a0;
@@ -32,17 +31,7 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
goto out;
}
- spin_lock(&target_vcpu->arch.reset_cntx_lock);
- reset_cntx = &target_vcpu->arch.guest_reset_context;
- /* start address */
- reset_cntx->sepc = cp->a1;
- /* target vcpu id to start */
- reset_cntx->a0 = target_vcpuid;
- /* private data passed from kernel */
- reset_cntx->a1 = cp->a2;
- spin_unlock(&target_vcpu->arch.reset_cntx_lock);
-
- kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
+ kvm_riscv_vcpu_sbi_request_reset(target_vcpu, cp->a1, cp->a2);
__kvm_riscv_vcpu_power_on(target_vcpu);
diff --git a/arch/riscv/kvm/vcpu_sbi_system.c b/arch/riscv/kvm/vcpu_sbi_system.c
index bc0ebba89003..359be90b0fc5 100644
--- a/arch/riscv/kvm/vcpu_sbi_system.c
+++ b/arch/riscv/kvm/vcpu_sbi_system.c
@@ -13,7 +13,6 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_vcpu_sbi_return *retdata)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
- struct kvm_cpu_context *reset_cntx;
unsigned long funcid = cp->a6;
unsigned long hva, i;
struct kvm_vcpu *tmp;
@@ -45,14 +44,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
}
- spin_lock(&vcpu->arch.reset_cntx_lock);
- reset_cntx = &vcpu->arch.guest_reset_context;
- reset_cntx->sepc = cp->a1;
- reset_cntx->a0 = vcpu->vcpu_id;
- reset_cntx->a1 = cp->a2;
- spin_unlock(&vcpu->arch.reset_cntx_lock);
-
- kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
+ kvm_riscv_vcpu_sbi_request_reset(vcpu, cp->a1, cp->a2);
/* userspace provides the suspend implementation */
kvm_riscv_vcpu_sbi_forward(vcpu, run);
diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c
index d92d1348045c..a5f88cb717f3 100644
--- a/arch/riscv/kvm/vcpu_vector.c
+++ b/arch/riscv/kvm/vcpu_vector.c
@@ -22,6 +22,9 @@ void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
cntx->sstatus &= ~SR_VS;
+
+ cntx->vector.vlenb = riscv_v_vsize / 32;
+
if (riscv_isa_extension_available(isa, v)) {
cntx->sstatus |= SR_VS_INITIAL;
WARN_ON(!cntx->vector.datap);
@@ -70,13 +73,11 @@ void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
__kvm_riscv_vector_restore(cntx);
}
-int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
- struct kvm_cpu_context *cntx)
+int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu)
{
- cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL);
- if (!cntx->vector.datap)
+ vcpu->arch.guest_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
+ if (!vcpu->arch.guest_context.vector.datap)
return -ENOMEM;
- cntx->vector.vlenb = riscv_v_vsize / 32;
vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
if (!vcpu->arch.host_context.vector.datap)
@@ -87,7 +88,7 @@ int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
{
- kfree(vcpu->arch.guest_reset_context.vector.datap);
+ kfree(vcpu->arch.guest_context.vector.datap);
kfree(vcpu->arch.host_context.vector.datap);
}
#endif
diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c
index 7396b8654f45..b27ec8f96697 100644
--- a/arch/riscv/kvm/vm.c
+++ b/arch/riscv/kvm/vm.c
@@ -209,6 +209,19 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
return r;
}
+int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
+{
+ switch (cap->cap) {
+ case KVM_CAP_RISCV_MP_STATE_RESET:
+ if (cap->flags)
+ return -EINVAL;
+ kvm->arch.mp_state_reset = true;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
return -EINVAL;
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index b1c46153606a..0baec92d2f55 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-y += crypto/
lib-y += delay.o
lib-y += memcpy.o
lib-y += memset.o
diff --git a/arch/riscv/lib/crypto/Kconfig b/arch/riscv/lib/crypto/Kconfig
new file mode 100644
index 000000000000..47c99ea97ce2
--- /dev/null
+++ b/arch/riscv/lib/crypto/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CRYPTO_CHACHA_RISCV64
+ tristate
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ default CRYPTO_LIB_CHACHA
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+ select CRYPTO_LIB_CHACHA_GENERIC
+
+config CRYPTO_SHA256_RISCV64
+ tristate
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ default CRYPTO_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
+ select CRYPTO_LIB_SHA256_GENERIC
diff --git a/arch/riscv/lib/crypto/Makefile b/arch/riscv/lib/crypto/Makefile
new file mode 100644
index 000000000000..b7cb877a2c07
--- /dev/null
+++ b/arch/riscv/lib/crypto/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CRYPTO_CHACHA_RISCV64) += chacha-riscv64.o
+chacha-riscv64-y := chacha-riscv64-glue.o chacha-riscv64-zvkb.o
+
+obj-$(CONFIG_CRYPTO_SHA256_RISCV64) += sha256-riscv64.o
+sha256-riscv64-y := sha256.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o
diff --git a/arch/riscv/lib/crypto/chacha-riscv64-glue.c b/arch/riscv/lib/crypto/chacha-riscv64-glue.c
new file mode 100644
index 000000000000..8c3f11d79be3
--- /dev/null
+++ b/arch/riscv/lib/crypto/chacha-riscv64-glue.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ChaCha stream cipher (RISC-V optimized)
+ *
+ * Copyright (C) 2023 SiFive, Inc.
+ * Author: Jerry Shih <jerry.shih@sifive.com>
+ */
+
+#include <asm/simd.h>
+#include <asm/vector.h>
+#include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
+#include <linux/linkage.h>
+#include <linux/module.h>
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_zvkb);
+
+asmlinkage void chacha_zvkb(struct chacha_state *state, const u8 *in, u8 *out,
+ size_t nblocks, int nrounds);
+
+void hchacha_block_arch(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds)
+{
+ hchacha_block_generic(state, out, nrounds);
+}
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ u8 block_buffer[CHACHA_BLOCK_SIZE];
+ unsigned int full_blocks = bytes / CHACHA_BLOCK_SIZE;
+ unsigned int tail_bytes = bytes % CHACHA_BLOCK_SIZE;
+
+ if (!static_branch_likely(&use_zvkb) || !crypto_simd_usable())
+ return chacha_crypt_generic(state, dst, src, bytes, nrounds);
+
+ kernel_vector_begin();
+ if (full_blocks) {
+ chacha_zvkb(state, src, dst, full_blocks, nrounds);
+ src += full_blocks * CHACHA_BLOCK_SIZE;
+ dst += full_blocks * CHACHA_BLOCK_SIZE;
+ }
+ if (tail_bytes) {
+ memcpy(block_buffer, src, tail_bytes);
+ chacha_zvkb(state, block_buffer, block_buffer, 1, nrounds);
+ memcpy(dst, block_buffer, tail_bytes);
+ }
+ kernel_vector_end();
+}
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+bool chacha_is_arch_optimized(void)
+{
+ return static_key_enabled(&use_zvkb);
+}
+EXPORT_SYMBOL(chacha_is_arch_optimized);
+
+static int __init riscv64_chacha_mod_init(void)
+{
+ if (riscv_isa_extension_available(NULL, ZVKB) &&
+ riscv_vector_vlen() >= 128)
+ static_branch_enable(&use_zvkb);
+ return 0;
+}
+subsys_initcall(riscv64_chacha_mod_init);
+
+static void __exit riscv64_chacha_mod_exit(void)
+{
+}
+module_exit(riscv64_chacha_mod_exit);
+
+MODULE_DESCRIPTION("ChaCha stream cipher (RISC-V optimized)");
+MODULE_AUTHOR("Jerry Shih <jerry.shih@sifive.com>");
+MODULE_LICENSE("GPL");
diff --git a/arch/riscv/crypto/chacha-riscv64-zvkb.S b/arch/riscv/lib/crypto/chacha-riscv64-zvkb.S
index bf057737ac69..b777d0b4e379 100644
--- a/arch/riscv/crypto/chacha-riscv64-zvkb.S
+++ b/arch/riscv/lib/crypto/chacha-riscv64-zvkb.S
@@ -46,11 +46,11 @@
.text
.option arch, +zvkb
-#define KEYP a0
+#define STATEP a0
#define INP a1
#define OUTP a2
-#define LEN a3
-#define IVP a4
+#define NBLOCKS a3
+#define NROUNDS a4
#define CONSTS0 a5
#define CONSTS1 a6
@@ -59,7 +59,7 @@
#define TMP t1
#define VL t2
#define STRIDE t3
-#define NROUNDS t4
+#define ROUND_CTR t4
#define KEY0 s0
#define KEY1 s1
#define KEY2 s2
@@ -132,14 +132,16 @@
vror.vi \b3, \b3, 32 - 7
.endm
-// void chacha20_zvkb(const u32 key[8], const u8 *in, u8 *out, size_t len,
-// const u32 iv[4]);
+// void chacha_zvkb(struct chacha_state *state, const u8 *in, u8 *out,
+// size_t nblocks, int nrounds);
//
-// |len| must be nonzero and a multiple of 64 (CHACHA_BLOCK_SIZE).
-// The counter is treated as 32-bit, following the RFC7539 convention.
-SYM_FUNC_START(chacha20_zvkb)
- srli LEN, LEN, 6 // Bytes to blocks
-
+// |nblocks| is the number of 64-byte blocks to process, and must be nonzero.
+//
+// |state| gives the ChaCha state matrix, including the 32-bit counter in
+// state->x[12] following the RFC7539 convention; note that this differs from
+// the original Salsa20 paper which uses a 64-bit counter in state->x[12..13].
+// The updated 32-bit counter is written back to state->x[12] before returning.
+SYM_FUNC_START(chacha_zvkb)
addi sp, sp, -96
sd s0, 0(sp)
sd s1, 8(sp)
@@ -157,26 +159,26 @@ SYM_FUNC_START(chacha20_zvkb)
li STRIDE, 64
// Set up the initial state matrix in scalar registers.
- li CONSTS0, 0x61707865 // "expa" little endian
- li CONSTS1, 0x3320646e // "nd 3" little endian
- li CONSTS2, 0x79622d32 // "2-by" little endian
- li CONSTS3, 0x6b206574 // "te k" little endian
- lw KEY0, 0(KEYP)
- lw KEY1, 4(KEYP)
- lw KEY2, 8(KEYP)
- lw KEY3, 12(KEYP)
- lw KEY4, 16(KEYP)
- lw KEY5, 20(KEYP)
- lw KEY6, 24(KEYP)
- lw KEY7, 28(KEYP)
- lw COUNTER, 0(IVP)
- lw NONCE0, 4(IVP)
- lw NONCE1, 8(IVP)
- lw NONCE2, 12(IVP)
+ lw CONSTS0, 0(STATEP)
+ lw CONSTS1, 4(STATEP)
+ lw CONSTS2, 8(STATEP)
+ lw CONSTS3, 12(STATEP)
+ lw KEY0, 16(STATEP)
+ lw KEY1, 20(STATEP)
+ lw KEY2, 24(STATEP)
+ lw KEY3, 28(STATEP)
+ lw KEY4, 32(STATEP)
+ lw KEY5, 36(STATEP)
+ lw KEY6, 40(STATEP)
+ lw KEY7, 44(STATEP)
+ lw COUNTER, 48(STATEP)
+ lw NONCE0, 52(STATEP)
+ lw NONCE1, 56(STATEP)
+ lw NONCE2, 60(STATEP)
.Lblock_loop:
// Set vl to the number of blocks to process in this iteration.
- vsetvli VL, LEN, e32, m1, ta, ma
+ vsetvli VL, NBLOCKS, e32, m1, ta, ma
// Set up the initial state matrix for the next VL blocks in v0-v15.
// v{i} holds the i'th 32-bit word of the state matrix for all blocks.
@@ -203,16 +205,16 @@ SYM_FUNC_START(chacha20_zvkb)
// v{16+i} holds the i'th 32-bit word for all blocks.
vlsseg8e32.v v16, (INP), STRIDE
- li NROUNDS, 20
+ mv ROUND_CTR, NROUNDS
.Lnext_doubleround:
- addi NROUNDS, NROUNDS, -2
+ addi ROUND_CTR, ROUND_CTR, -2
// column round
chacha_round v0, v4, v8, v12, v1, v5, v9, v13, \
v2, v6, v10, v14, v3, v7, v11, v15
// diagonal round
chacha_round v0, v5, v10, v15, v1, v6, v11, v12, \
v2, v7, v8, v13, v3, v4, v9, v14
- bnez NROUNDS, .Lnext_doubleround
+ bnez ROUND_CTR, .Lnext_doubleround
// Load the second half of the input data for each block into v24-v31.
// v{24+i} holds the {8+i}'th 32-bit word for all blocks.
@@ -271,12 +273,13 @@ SYM_FUNC_START(chacha20_zvkb)
// Update the counter, the remaining number of blocks, and the input and
// output pointers according to the number of blocks processed (VL).
add COUNTER, COUNTER, VL
- sub LEN, LEN, VL
+ sub NBLOCKS, NBLOCKS, VL
slli TMP, VL, 6
add OUTP, OUTP, TMP
add INP, INP, TMP
- bnez LEN, .Lblock_loop
+ bnez NBLOCKS, .Lblock_loop
+ sw COUNTER, 48(STATEP)
ld s0, 0(sp)
ld s1, 8(sp)
ld s2, 16(sp)
@@ -291,4 +294,4 @@ SYM_FUNC_START(chacha20_zvkb)
ld s11, 88(sp)
addi sp, sp, 96
ret
-SYM_FUNC_END(chacha20_zvkb)
+SYM_FUNC_END(chacha_zvkb)
diff --git a/arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S b/arch/riscv/lib/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S
index 8ebcc17de4dc..fad501ad0617 100644
--- a/arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S
+++ b/arch/riscv/lib/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S
@@ -43,7 +43,7 @@
// - RISC-V Vector SHA-2 Secure Hash extension ('Zvknha' or 'Zvknhb')
// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
-#include <linux/cfi_types.h>
+#include <linux/linkage.h>
.text
.option arch, +zvknha, +zvkb
@@ -106,9 +106,9 @@
sha256_4rounds \last, \k3, W3, W0, W1, W2
.endm
-// void sha256_transform_zvknha_or_zvknhb_zvkb(u32 state[8], const u8 *data,
-// int num_blocks);
-SYM_TYPED_FUNC_START(sha256_transform_zvknha_or_zvknhb_zvkb)
+// void sha256_transform_zvknha_or_zvknhb_zvkb(u32 state[SHA256_STATE_WORDS],
+// const u8 *data, size_t nblocks);
+SYM_FUNC_START(sha256_transform_zvknha_or_zvknhb_zvkb)
// Load the round constants into K0-K15.
vsetivli zero, 4, e32, m1, ta, ma
diff --git a/arch/riscv/lib/crypto/sha256.c b/arch/riscv/lib/crypto/sha256.c
new file mode 100644
index 000000000000..71808397dff4
--- /dev/null
+++ b/arch/riscv/lib/crypto/sha256.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SHA-256 (RISC-V accelerated)
+ *
+ * Copyright (C) 2022 VRULL GmbH
+ * Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
+ *
+ * Copyright (C) 2023 SiFive, Inc.
+ * Author: Jerry Shih <jerry.shih@sifive.com>
+ */
+
+#include <asm/vector.h>
+#include <crypto/internal/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+asmlinkage void sha256_transform_zvknha_or_zvknhb_zvkb(
+ u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_extensions);
+
+void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks)
+{
+ if (static_branch_likely(&have_extensions)) {
+ kernel_vector_begin();
+ sha256_transform_zvknha_or_zvknhb_zvkb(state, data, nblocks);
+ kernel_vector_end();
+ } else {
+ sha256_blocks_generic(state, data, nblocks);
+ }
+}
+EXPORT_SYMBOL_GPL(sha256_blocks_simd);
+
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks)
+{
+ sha256_blocks_generic(state, data, nblocks);
+}
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+
+bool sha256_is_arch_optimized(void)
+{
+ return static_key_enabled(&have_extensions);
+}
+EXPORT_SYMBOL_GPL(sha256_is_arch_optimized);
+
+static int __init riscv64_sha256_mod_init(void)
+{
+ /* Both zvknha and zvknhb provide the SHA-256 instructions. */
+ if ((riscv_isa_extension_available(NULL, ZVKNHA) ||
+ riscv_isa_extension_available(NULL, ZVKNHB)) &&
+ riscv_isa_extension_available(NULL, ZVKB) &&
+ riscv_vector_vlen() >= 128)
+ static_branch_enable(&have_extensions);
+ return 0;
+}
+subsys_initcall(riscv64_sha256_mod_init);
+
+static void __exit riscv64_sha256_mod_exit(void)
+{
+}
+module_exit(riscv64_sha256_mod_exit);
+
+MODULE_DESCRIPTION("SHA-256 (RISC-V accelerated)");
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
+MODULE_LICENSE("GPL");
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index b81672729887..b8e96dfff19d 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -172,7 +172,7 @@ static void set_icache_stale_mask(void)
stale_cpu = cpumask_test_cpu(cpu, mask);
cpumask_setall(mask);
- cpumask_assign_cpu(cpu, mask, stale_cpu);
+ __assign_cpu(cpu, mask, stale_cpu);
put_cpu();
}
#endif
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index ab475ec6ca42..8d0374d7ce8e 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -442,7 +442,12 @@ static phys_addr_t __meminit alloc_pte_late(uintptr_t va)
{
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
- BUG_ON(!ptdesc || !pagetable_pte_ctor(ptdesc));
+ /*
+ * We do not know which mm the PTE page is associated to at this point.
+ * Passing NULL to the ctor is the safe option, though it may result
+ * in unnecessary work (e.g. initialising the ptlock for init_mm).
+ */
+ BUG_ON(!ptdesc || !pagetable_pte_ctor(NULL, ptdesc));
return __pa((pte_t *)ptdesc_address(ptdesc));
}
@@ -522,7 +527,8 @@ static phys_addr_t __meminit alloc_pmd_late(uintptr_t va)
{
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
- BUG_ON(!ptdesc || !pagetable_pmd_ctor(ptdesc));
+ /* See comment in alloc_pte_late() regarding NULL passed the ctor */
+ BUG_ON(!ptdesc || !pagetable_pmd_ctor(NULL, ptdesc));
return __pa((pmd_t *)ptdesc_address(ptdesc));
}
@@ -584,11 +590,11 @@ static phys_addr_t __init alloc_pud_fixmap(uintptr_t va)
static phys_addr_t __meminit alloc_pud_late(uintptr_t va)
{
- unsigned long vaddr;
+ struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, 0);
- vaddr = __get_free_page(GFP_KERNEL);
- BUG_ON(!vaddr);
- return __pa(vaddr);
+ BUG_ON(!ptdesc);
+ pagetable_pud_ctor(ptdesc);
+ return __pa((pud_t *)ptdesc_address(ptdesc));
}
static p4d_t *__init get_p4d_virt_early(phys_addr_t pa)
@@ -622,11 +628,11 @@ static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va)
static phys_addr_t __meminit alloc_p4d_late(uintptr_t va)
{
- unsigned long vaddr;
+ struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, 0);
- vaddr = __get_free_page(GFP_KERNEL);
- BUG_ON(!vaddr);
- return __pa(vaddr);
+ BUG_ON(!ptdesc);
+ pagetable_p4d_ctor(ptdesc);
+ return __pa((p4d_t *)ptdesc_address(ptdesc));
}
static void __meminit create_pud_mapping(pud_t *pudp, uintptr_t va, phys_addr_t pa, phys_addr_t sz,
diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
index 9d5f657a251b..32922550a50a 100644
--- a/arch/riscv/mm/ptdump.c
+++ b/arch/riscv/mm/ptdump.c
@@ -318,6 +318,38 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr,
}
}
+static void note_page_pte(struct ptdump_state *pt_st, unsigned long addr, pte_t pte)
+{
+ note_page(pt_st, addr, 4, pte_val(pte));
+}
+
+static void note_page_pmd(struct ptdump_state *pt_st, unsigned long addr, pmd_t pmd)
+{
+ note_page(pt_st, addr, 3, pmd_val(pmd));
+}
+
+static void note_page_pud(struct ptdump_state *pt_st, unsigned long addr, pud_t pud)
+{
+ note_page(pt_st, addr, 2, pud_val(pud));
+}
+
+static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d)
+{
+ note_page(pt_st, addr, 1, p4d_val(p4d));
+}
+
+static void note_page_pgd(struct ptdump_state *pt_st, unsigned long addr, pgd_t pgd)
+{
+ note_page(pt_st, addr, 0, pgd_val(pgd));
+}
+
+static void note_page_flush(struct ptdump_state *pt_st)
+{
+ pte_t pte_zero = {0};
+
+ note_page(pt_st, 0, -1, pte_val(pte_zero));
+}
+
static void ptdump_walk(struct seq_file *s, struct ptd_mm_info *pinfo)
{
struct pg_state st = {
@@ -325,7 +357,12 @@ static void ptdump_walk(struct seq_file *s, struct ptd_mm_info *pinfo)
.marker = pinfo->markers,
.level = -1,
.ptdump = {
- .note_page = note_page,
+ .note_page_pte = note_page_pte,
+ .note_page_pmd = note_page_pmd,
+ .note_page_pud = note_page_pud,
+ .note_page_p4d = note_page_p4d,
+ .note_page_pgd = note_page_pgd,
+ .note_page_flush = note_page_flush,
.range = (struct ptdump_range[]) {
{pinfo->base_addr, pinfo->end},
{0, 0}
@@ -347,7 +384,12 @@ bool ptdump_check_wx(void)
.level = -1,
.check_wx = true,
.ptdump = {
- .note_page = note_page,
+ .note_page_pte = note_page_pte,
+ .note_page_pmd = note_page_pmd,
+ .note_page_pud = note_page_pud,
+ .note_page_p4d = note_page_p4d,
+ .note_page_pgd = note_page_pgd,
+ .note_page_flush = note_page_flush,
.range = (struct ptdump_range[]) {
{KERN_VIRT_START, ULONG_MAX},
{0, 0}
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index 1d1c78d4cff1..e7b032dfd17f 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -608,6 +608,21 @@ static inline u32 rv_fence(u8 pred, u8 succ)
return rv_i_insn(imm11_0, 0, 0, 0, 0xf);
}
+static inline void emit_fence_r_rw(struct rv_jit_context *ctx)
+{
+ emit(rv_fence(0x2, 0x3), ctx);
+}
+
+static inline void emit_fence_rw_w(struct rv_jit_context *ctx)
+{
+ emit(rv_fence(0x3, 0x1), ctx);
+}
+
+static inline void emit_fence_rw_rw(struct rv_jit_context *ctx)
+{
+ emit(rv_fence(0x3, 0x3), ctx);
+}
+
static inline u32 rv_nop(void)
{
return rv_i_insn(0, 0, 0, 0, 0x13);
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index ca60db75199d..10e01ff06312 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -473,11 +473,212 @@ static inline void emit_kcfi(u32 hash, struct rv_jit_context *ctx)
emit(hash, ctx);
}
-static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
- struct rv_jit_context *ctx)
+static int emit_load_8(bool sign_ext, u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx)
+{
+ int insns_start;
+
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ if (sign_ext)
+ emit(rv_lb(rd, off, rs), ctx);
+ else
+ emit(rv_lbu(rd, off, rs), ctx);
+ return ctx->ninsns - insns_start;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
+ insns_start = ctx->ninsns;
+ if (sign_ext)
+ emit(rv_lb(rd, 0, RV_REG_T1), ctx);
+ else
+ emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
+ return ctx->ninsns - insns_start;
+}
+
+static int emit_load_16(bool sign_ext, u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx)
+{
+ int insns_start;
+
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ if (sign_ext)
+ emit(rv_lh(rd, off, rs), ctx);
+ else
+ emit(rv_lhu(rd, off, rs), ctx);
+ return ctx->ninsns - insns_start;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
+ insns_start = ctx->ninsns;
+ if (sign_ext)
+ emit(rv_lh(rd, 0, RV_REG_T1), ctx);
+ else
+ emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
+ return ctx->ninsns - insns_start;
+}
+
+static int emit_load_32(bool sign_ext, u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx)
+{
+ int insns_start;
+
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ if (sign_ext)
+ emit(rv_lw(rd, off, rs), ctx);
+ else
+ emit(rv_lwu(rd, off, rs), ctx);
+ return ctx->ninsns - insns_start;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
+ insns_start = ctx->ninsns;
+ if (sign_ext)
+ emit(rv_lw(rd, 0, RV_REG_T1), ctx);
+ else
+ emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
+ return ctx->ninsns - insns_start;
+}
+
+static int emit_load_64(bool sign_ext, u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx)
+{
+ int insns_start;
+
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit_ld(rd, off, rs, ctx);
+ return ctx->ninsns - insns_start;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
+ insns_start = ctx->ninsns;
+ emit_ld(rd, 0, RV_REG_T1, ctx);
+ return ctx->ninsns - insns_start;
+}
+
+static void emit_store_8(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx)
+{
+ if (is_12b_int(off)) {
+ emit(rv_sb(rd, off, rs), ctx);
+ return;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ emit(rv_sb(RV_REG_T1, 0, rs), ctx);
+}
+
+static void emit_store_16(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx)
+{
+ if (is_12b_int(off)) {
+ emit(rv_sh(rd, off, rs), ctx);
+ return;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ emit(rv_sh(RV_REG_T1, 0, rs), ctx);
+}
+
+static void emit_store_32(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx)
+{
+ if (is_12b_int(off)) {
+ emit_sw(rd, off, rs, ctx);
+ return;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ emit_sw(RV_REG_T1, 0, rs, ctx);
+}
+
+static void emit_store_64(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx)
+{
+ if (is_12b_int(off)) {
+ emit_sd(rd, off, rs, ctx);
+ return;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ emit_sd(RV_REG_T1, 0, rs, ctx);
+}
+
+static int emit_atomic_ld_st(u8 rd, u8 rs, const struct bpf_insn *insn,
+ struct rv_jit_context *ctx)
+{
+ u8 code = insn->code;
+ s32 imm = insn->imm;
+ s16 off = insn->off;
+
+ switch (imm) {
+ /* dst_reg = load_acquire(src_reg + off16) */
+ case BPF_LOAD_ACQ:
+ switch (BPF_SIZE(code)) {
+ case BPF_B:
+ emit_load_8(false, rd, off, rs, ctx);
+ break;
+ case BPF_H:
+ emit_load_16(false, rd, off, rs, ctx);
+ break;
+ case BPF_W:
+ emit_load_32(false, rd, off, rs, ctx);
+ break;
+ case BPF_DW:
+ emit_load_64(false, rd, off, rs, ctx);
+ break;
+ }
+ emit_fence_r_rw(ctx);
+
+ /* If our next insn is a redundant zext, return 1 to tell
+ * build_body() to skip it.
+ */
+ if (BPF_SIZE(code) != BPF_DW && insn_is_zext(&insn[1]))
+ return 1;
+ break;
+ /* store_release(dst_reg + off16, src_reg) */
+ case BPF_STORE_REL:
+ emit_fence_rw_w(ctx);
+ switch (BPF_SIZE(code)) {
+ case BPF_B:
+ emit_store_8(rd, off, rs, ctx);
+ break;
+ case BPF_H:
+ emit_store_16(rd, off, rs, ctx);
+ break;
+ case BPF_W:
+ emit_store_32(rd, off, rs, ctx);
+ break;
+ case BPF_DW:
+ emit_store_64(rd, off, rs, ctx);
+ break;
+ }
+ break;
+ default:
+ pr_err_once("bpf-jit: invalid atomic load/store opcode %02x\n", imm);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int emit_atomic_rmw(u8 rd, u8 rs, const struct bpf_insn *insn,
+ struct rv_jit_context *ctx)
{
- u8 r0;
+ u8 r0, code = insn->code;
+ s16 off = insn->off;
+ s32 imm = insn->imm;
int jmp_offset;
+ bool is64;
+
+ if (BPF_SIZE(code) != BPF_W && BPF_SIZE(code) != BPF_DW) {
+ pr_err_once("bpf-jit: 1- and 2-byte RMW atomics are not supported\n");
+ return -EINVAL;
+ }
+ is64 = BPF_SIZE(code) == BPF_DW;
if (off) {
if (is_12b_int(off)) {
@@ -554,9 +755,14 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
rv_sc_w(RV_REG_T3, rs, rd, 0, 1), ctx);
jmp_offset = ninsns_rvoff(-6);
emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx);
- emit(rv_fence(0x3, 0x3), ctx);
+ emit_fence_rw_rw(ctx);
break;
+ default:
+ pr_err_once("bpf-jit: invalid atomic RMW opcode %02x\n", imm);
+ return -EINVAL;
}
+
+ return 0;
}
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
@@ -1650,8 +1856,8 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
{
- int insn_len, insns_start;
bool sign_ext;
+ int insn_len;
sign_ext = BPF_MODE(insn->code) == BPF_MEMSX ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
@@ -1663,78 +1869,16 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
switch (BPF_SIZE(code)) {
case BPF_B:
- if (is_12b_int(off)) {
- insns_start = ctx->ninsns;
- if (sign_ext)
- emit(rv_lb(rd, off, rs), ctx);
- else
- emit(rv_lbu(rd, off, rs), ctx);
- insn_len = ctx->ninsns - insns_start;
- break;
- }
-
- emit_imm(RV_REG_T1, off, ctx);
- emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
- insns_start = ctx->ninsns;
- if (sign_ext)
- emit(rv_lb(rd, 0, RV_REG_T1), ctx);
- else
- emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
- insn_len = ctx->ninsns - insns_start;
+ insn_len = emit_load_8(sign_ext, rd, off, rs, ctx);
break;
case BPF_H:
- if (is_12b_int(off)) {
- insns_start = ctx->ninsns;
- if (sign_ext)
- emit(rv_lh(rd, off, rs), ctx);
- else
- emit(rv_lhu(rd, off, rs), ctx);
- insn_len = ctx->ninsns - insns_start;
- break;
- }
-
- emit_imm(RV_REG_T1, off, ctx);
- emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
- insns_start = ctx->ninsns;
- if (sign_ext)
- emit(rv_lh(rd, 0, RV_REG_T1), ctx);
- else
- emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
- insn_len = ctx->ninsns - insns_start;
+ insn_len = emit_load_16(sign_ext, rd, off, rs, ctx);
break;
case BPF_W:
- if (is_12b_int(off)) {
- insns_start = ctx->ninsns;
- if (sign_ext)
- emit(rv_lw(rd, off, rs), ctx);
- else
- emit(rv_lwu(rd, off, rs), ctx);
- insn_len = ctx->ninsns - insns_start;
- break;
- }
-
- emit_imm(RV_REG_T1, off, ctx);
- emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
- insns_start = ctx->ninsns;
- if (sign_ext)
- emit(rv_lw(rd, 0, RV_REG_T1), ctx);
- else
- emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
- insn_len = ctx->ninsns - insns_start;
+ insn_len = emit_load_32(sign_ext, rd, off, rs, ctx);
break;
case BPF_DW:
- if (is_12b_int(off)) {
- insns_start = ctx->ninsns;
- emit_ld(rd, off, rs, ctx);
- insn_len = ctx->ninsns - insns_start;
- break;
- }
-
- emit_imm(RV_REG_T1, off, ctx);
- emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
- insns_start = ctx->ninsns;
- emit_ld(rd, 0, RV_REG_T1, ctx);
- insn_len = ctx->ninsns - insns_start;
+ insn_len = emit_load_64(sign_ext, rd, off, rs, ctx);
break;
}
@@ -1879,49 +2023,27 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* STX: *(size *)(dst + off) = src */
case BPF_STX | BPF_MEM | BPF_B:
- if (is_12b_int(off)) {
- emit(rv_sb(rd, off, rs), ctx);
- break;
- }
-
- emit_imm(RV_REG_T1, off, ctx);
- emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
- emit(rv_sb(RV_REG_T1, 0, rs), ctx);
+ emit_store_8(rd, off, rs, ctx);
break;
case BPF_STX | BPF_MEM | BPF_H:
- if (is_12b_int(off)) {
- emit(rv_sh(rd, off, rs), ctx);
- break;
- }
-
- emit_imm(RV_REG_T1, off, ctx);
- emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
- emit(rv_sh(RV_REG_T1, 0, rs), ctx);
+ emit_store_16(rd, off, rs, ctx);
break;
case BPF_STX | BPF_MEM | BPF_W:
- if (is_12b_int(off)) {
- emit_sw(rd, off, rs, ctx);
- break;
- }
-
- emit_imm(RV_REG_T1, off, ctx);
- emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
- emit_sw(RV_REG_T1, 0, rs, ctx);
+ emit_store_32(rd, off, rs, ctx);
break;
case BPF_STX | BPF_MEM | BPF_DW:
- if (is_12b_int(off)) {
- emit_sd(rd, off, rs, ctx);
- break;
- }
-
- emit_imm(RV_REG_T1, off, ctx);
- emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
- emit_sd(RV_REG_T1, 0, rs, ctx);
+ emit_store_64(rd, off, rs, ctx);
break;
+ case BPF_STX | BPF_ATOMIC | BPF_B:
+ case BPF_STX | BPF_ATOMIC | BPF_H:
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
- emit_atomic(rd, rs, off, imm,
- BPF_SIZE(code) == BPF_DW, ctx);
+ if (bpf_atomic_is_load_store(insn))
+ ret = emit_atomic_ld_st(rd, rs, insn, ctx);
+ else
+ ret = emit_atomic_rmw(rd, rs, insn, ctx);
+ if (ret)
+ return ret;
break;
case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
index f8cd2f70a7fb..f6ca5cfa6b2f 100644
--- a/arch/riscv/net/bpf_jit_core.c
+++ b/arch/riscv/net/bpf_jit_core.c
@@ -26,9 +26,8 @@ static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset)
int ret;
ret = bpf_jit_emit_insn(insn, ctx, extra_pass);
- /* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */
if (ret > 0)
- i++;
+ i++; /* skip the next instruction */
if (offset)
offset[i] = ctx->ninsns;
if (ret < 0)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index db8161ebb43c..0c16dc443e2f 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -146,6 +146,7 @@ config S390
select ARCH_WANTS_NO_INSTR
select ARCH_WANT_DEFAULT_BPF_JIT
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WANT_KERNEL_PMD_MKWRITE
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
@@ -332,6 +333,10 @@ config HAVE_MARCH_Z16_FEATURES
def_bool n
select HAVE_MARCH_Z15_FEATURES
+config HAVE_MARCH_Z17_FEATURES
+ def_bool n
+ select HAVE_MARCH_Z16_FEATURES
+
choice
prompt "Processor type"
default MARCH_Z196
@@ -397,6 +402,14 @@ config MARCH_Z16
Select this to enable optimizations for IBM z16 (3931 and
3932 series).
+config MARCH_Z17
+ bool "IBM z17"
+ select HAVE_MARCH_Z17_FEATURES
+ depends on $(cc-option,-march=z17)
+ help
+ Select this to enable optimizations for IBM z17 (9175 and
+ 9176 series).
+
endchoice
config MARCH_Z10_TUNE
@@ -420,6 +433,9 @@ config MARCH_Z15_TUNE
config MARCH_Z16_TUNE
def_bool TUNE_Z16 || MARCH_Z16 && TUNE_DEFAULT
+config MARCH_Z17_TUNE
+ def_bool TUNE_Z17 || MARCH_Z17 && TUNE_DEFAULT
+
choice
prompt "Tune code generation"
default TUNE_DEFAULT
@@ -464,6 +480,10 @@ config TUNE_Z16
bool "IBM z16"
depends on $(cc-option,-mtune=z16)
+config TUNE_Z17
+ bool "IBM z17"
+ depends on $(cc-option,-mtune=z17)
+
endchoice
config 64BIT
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index b06dc53bfed5..7679bc16b692 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -48,6 +48,7 @@ mflags-$(CONFIG_MARCH_Z13) := -march=z13
mflags-$(CONFIG_MARCH_Z14) := -march=z14
mflags-$(CONFIG_MARCH_Z15) := -march=z15
mflags-$(CONFIG_MARCH_Z16) := -march=z16
+mflags-$(CONFIG_MARCH_Z17) := -march=z17
export CC_FLAGS_MARCH := $(mflags-y)
@@ -61,6 +62,7 @@ cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
cflags-$(CONFIG_MARCH_Z14_TUNE) += -mtune=z14
cflags-$(CONFIG_MARCH_Z15_TUNE) += -mtune=z15
cflags-$(CONFIG_MARCH_Z16_TUNE) += -mtune=z16
+cflags-$(CONFIG_MARCH_Z17_TUNE) += -mtune=z17
cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index d04e9b89d14a..f584d7da29cb 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -179,7 +179,7 @@ void setup_boot_command_line(void)
if (has_ebcdic_char(parmarea.command_line))
EBCASC(parmarea.command_line, COMMAND_LINE_SIZE);
/* copy arch command line */
- strcpy(early_command_line, strim(parmarea.command_line));
+ strscpy(early_command_line, strim(parmarea.command_line));
/* append IPL PARM data to the boot command line */
if (!is_prot_virt_guest() && ipl_block_valid)
@@ -253,7 +253,8 @@ void parse_boot_command_line(void)
int rc;
__kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE);
- args = strcpy(command_line_buf, early_command_line);
+ strscpy(command_line_buf, early_command_line);
+ args = command_line_buf;
while (*args) {
args = next_arg(args, &param, &val);
@@ -309,7 +310,7 @@ void parse_boot_command_line(void)
if (!strcmp(param, "bootdebug")) {
bootdebug = true;
if (val)
- strncpy(bootdebug_filter, val, sizeof(bootdebug_filter) - 1);
+ strscpy(bootdebug_filter, val);
}
if (!strcmp(param, "quiet"))
boot_console_loglevel = CONSOLE_LOGLEVEL_QUIET;
diff --git a/arch/s390/boot/printk.c b/arch/s390/boot/printk.c
index 8cf6331bc060..4bb6bc95704e 100644
--- a/arch/s390/boot/printk.c
+++ b/arch/s390/boot/printk.c
@@ -29,7 +29,8 @@ static void boot_rb_add(const char *str, size_t len)
/* store strings separated by '\0' */
if (len + 1 > avail)
boot_rb_off = 0;
- strcpy(boot_rb + boot_rb_off, str);
+ avail = sizeof(boot_rb) - boot_rb_off - 1;
+ strscpy(boot_rb + boot_rb_off, str, avail);
boot_rb_off += len + 1;
}
@@ -158,10 +159,10 @@ static noinline char *strsym(char *buf, void *ip)
p = findsym((unsigned long)ip, &off, &len);
if (p) {
- strncpy(buf, p, MAX_SYMLEN);
+ strscpy(buf, p, MAX_SYMLEN);
/* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */
p = buf + strnlen(buf, MAX_SYMLEN - 15);
- strcpy(p, "+0x");
+ strscpy(p, "+0x", MAX_SYMLEN - (p - buf));
as_hex(p + 3, off, 0);
strcat(p, "/0x");
as_hex(p + strlen(p), len, 0);
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 06316fb8e0fa..da8337e63a3e 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -6,6 +6,7 @@
#include <asm/boot_data.h>
#include <asm/extmem.h>
#include <asm/sections.h>
+#include <asm/diag288.h>
#include <asm/maccess.h>
#include <asm/machine.h>
#include <asm/sysinfo.h>
@@ -71,6 +72,20 @@ static void detect_machine_type(void)
set_machine_feature(MFEATURE_VM);
}
+static void detect_diag288(void)
+{
+ /* "BEGIN" in EBCDIC character set */
+ static const char cmd[] = "\xc2\xc5\xc7\xc9\xd5";
+ unsigned long action, len;
+
+ action = machine_is_vm() ? (unsigned long)cmd : LPARWDT_RESTART;
+ len = machine_is_vm() ? sizeof(cmd) : 0;
+ if (__diag288(WDT_FUNC_INIT, MIN_INTERVAL, action, len))
+ return;
+ __diag288(WDT_FUNC_CANCEL, 0, 0, 0);
+ set_machine_feature(MFEATURE_DIAG288);
+}
+
static void detect_diag9c(void)
{
unsigned int cpu;
@@ -519,6 +534,8 @@ void startup_kernel(void)
detect_facilities();
detect_diag9c();
detect_machine_type();
+ /* detect_diag288() needs machine type */
+ detect_diag288();
cmma_init();
sanitize_prot_virt_host();
max_physmem_end = detect_max_physmem_end();
diff --git a/arch/s390/boot/string.c b/arch/s390/boot/string.c
index f6b9b1df48a8..bd68161434a6 100644
--- a/arch/s390/boot/string.c
+++ b/arch/s390/boot/string.c
@@ -29,6 +29,18 @@ int strncmp(const char *cs, const char *ct, size_t count)
return 0;
}
+ssize_t sized_strscpy(char *dst, const char *src, size_t count)
+{
+ size_t len;
+
+ if (count == 0)
+ return -E2BIG;
+ len = strnlen(src, count - 1);
+ memcpy(dst, src, len);
+ dst[len] = '\0';
+ return src[len] ? -E2BIG : len;
+}
+
void *memset64(uint64_t *s, uint64_t v, size_t count)
{
uint64_t *xs = s;
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 6f2c9ce1b154..8ecad727497e 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -38,7 +38,6 @@ CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
-# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
@@ -92,7 +91,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
-CONFIG_ZSMALLOC=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_SLAB_BUCKETS=y
CONFIG_SLUB_STATS=y
@@ -395,6 +393,9 @@ CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=m
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_FLOWER=m
+CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NET_EMATCH=y
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
@@ -405,6 +406,9 @@ CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_VLAN=m
+CONFIG_NET_ACT_TUNNEL_KEY=m
+CONFIG_NET_ACT_CT=m
CONFIG_NET_ACT_GATE=m
CONFIG_NET_TC_SKB_EXT=y
CONFIG_DNS_RESOLVER=y
@@ -628,8 +632,16 @@ CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_MEM=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_VDPA=m
+CONFIG_VDPA_SIM=m
+CONFIG_VDPA_SIM_NET=m
+CONFIG_VDPA_SIM_BLOCK=m
+CONFIG_VDPA_USER=m
+CONFIG_MLX5_VDPA_NET=m
+CONFIG_VP_VDPA=m
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
+CONFIG_VHOST_VDPA=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -654,7 +666,6 @@ CONFIG_NILFS2_FS=m
CONFIG_BCACHEFS_FS=y
CONFIG_BCACHEFS_QUOTA=y
CONFIG_BCACHEFS_POSIX_ACL=y
-CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y
@@ -724,11 +735,10 @@ CONFIG_NLS_UTF8=m
CONFIG_DLM=m
CONFIG_UNICODE=y
CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
-CONFIG_HARDENED_USERCOPY=y
-CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_LOCKDOWN_LSM=y
@@ -741,12 +751,14 @@ CONFIG_IMA=y
CONFIG_IMA_DEFAULT_HASH_SHA256=y
CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_SELFTESTS=y
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
@@ -756,7 +768,6 @@ CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
@@ -795,13 +806,11 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_SHA1_S390=m
-CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA3_256_S390=m
CONFIG_CRYPTO_SHA3_512_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
-CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
@@ -812,9 +821,9 @@ CONFIG_PKEY_UV=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
+CONFIG_CRYPTO_KRB5=m
+CONFIG_CRYPTO_KRB5_SELFTESTS=y
CONFIG_CORDIC=m
-CONFIG_CRYPTO_LIB_CURVE25519=m
-CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_RANDOM32_SELFTEST=y
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index f18a7d97ac21..c13a77765162 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -36,7 +36,6 @@ CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
-# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
@@ -86,7 +85,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
-CONFIG_ZSMALLOC=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_SLAB_BUCKETS=y
# CONFIG_COMPAT_BRK is not set
@@ -385,6 +383,9 @@ CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=m
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_FLOWER=m
+CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NET_EMATCH=y
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
@@ -395,6 +396,9 @@ CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_VLAN=m
+CONFIG_NET_ACT_TUNNEL_KEY=m
+CONFIG_NET_ACT_CT=m
CONFIG_NET_ACT_GATE=m
CONFIG_NET_TC_SKB_EXT=y
CONFIG_DNS_RESOLVER=y
@@ -618,8 +622,16 @@ CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_MEM=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_VDPA=m
+CONFIG_VDPA_SIM=m
+CONFIG_VDPA_SIM_NET=m
+CONFIG_VDPA_SIM_BLOCK=m
+CONFIG_VDPA_USER=m
+CONFIG_MLX5_VDPA_NET=m
+CONFIG_VP_VDPA=m
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
+CONFIG_VHOST_VDPA=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -641,7 +653,6 @@ CONFIG_NILFS2_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_BCACHEFS_QUOTA=y
CONFIG_BCACHEFS_POSIX_ACL=y
-CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y
@@ -711,6 +722,7 @@ CONFIG_NLS_UTF8=m
CONFIG_DLM=m
CONFIG_UNICODE=y
CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
@@ -729,10 +741,10 @@ CONFIG_IMA_APPRAISE=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_FIPS=y
CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_SELFTESTS=y
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
@@ -742,7 +754,6 @@ CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
@@ -782,13 +793,11 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_SHA1_S390=m
-CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA3_256_S390=m
CONFIG_CRYPTO_SHA3_512_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
-CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
@@ -799,10 +808,10 @@ CONFIG_PKEY_UV=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
+CONFIG_CRYPTO_KRB5=m
+CONFIG_CRYPTO_KRB5_SELFTESTS=y
CONFIG_CORDIC=m
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRYPTO_LIB_CURVE25519=m
-CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 853b2326a171..8163c1702720 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -70,7 +70,6 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_DEBUG_FS=y
CONFIG_PANIC_ON_OOPS=y
-# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_RCU_TRACE is not set
# CONFIG_FTRACE is not set
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
index 8c4db8b64fa2..e2c27588b21a 100644
--- a/arch/s390/crypto/Kconfig
+++ b/arch/s390/crypto/Kconfig
@@ -4,7 +4,6 @@ menu "Accelerated Cryptographic Algorithms for CPU (s390)"
config CRYPTO_SHA512_S390
tristate "Hash functions: SHA-384 and SHA-512"
- depends on S390
select CRYPTO_HASH
help
SHA-384 and SHA-512 secure hash algorithms (FIPS 180)
@@ -15,7 +14,6 @@ config CRYPTO_SHA512_S390
config CRYPTO_SHA1_S390
tristate "Hash functions: SHA-1"
- depends on S390
select CRYPTO_HASH
help
SHA-1 secure hash algorithm (FIPS 180)
@@ -24,20 +22,8 @@ config CRYPTO_SHA1_S390
It is available as of z990.
-config CRYPTO_SHA256_S390
- tristate "Hash functions: SHA-224 and SHA-256"
- depends on S390
- select CRYPTO_HASH
- help
- SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
-
- Architecture: s390
-
- It is available as of z9.
-
config CRYPTO_SHA3_256_S390
tristate "Hash functions: SHA3-224 and SHA3-256"
- depends on S390
select CRYPTO_HASH
help
SHA3-224 and SHA3-256 secure hash algorithms (FIPS 202)
@@ -48,7 +34,6 @@ config CRYPTO_SHA3_256_S390
config CRYPTO_SHA3_512_S390
tristate "Hash functions: SHA3-384 and SHA3-512"
- depends on S390
select CRYPTO_HASH
help
SHA3-384 and SHA3-512 secure hash algorithms (FIPS 202)
@@ -59,7 +44,6 @@ config CRYPTO_SHA3_512_S390
config CRYPTO_GHASH_S390
tristate "Hash functions: GHASH"
- depends on S390
select CRYPTO_HASH
help
GCM GHASH hash function (NIST SP800-38D)
@@ -70,7 +54,6 @@ config CRYPTO_GHASH_S390
config CRYPTO_AES_S390
tristate "Ciphers: AES, modes: ECB, CBC, CTR, XTS, GCM"
- depends on S390
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
help
@@ -92,7 +75,6 @@ config CRYPTO_AES_S390
config CRYPTO_DES_S390
tristate "Ciphers: DES and Triple DES EDE, modes: ECB, CBC, CTR"
- depends on S390
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
@@ -107,23 +89,8 @@ config CRYPTO_DES_S390
As of z990 the ECB and CBC mode are hardware accelerated.
As of z196 the CTR mode is hardware accelerated.
-config CRYPTO_CHACHA_S390
- tristate
- depends on S390
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_CHACHA_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
- default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving cipher: ChaCha20 stream cipher (RFC 7539)
-
- Architecture: s390
-
- It is available as of z13.
-
config CRYPTO_HMAC_S390
tristate "Keyed-hash message authentication code: HMAC"
- depends on S390
select CRYPTO_HASH
help
s390 specific HMAC hardware support for SHA224, SHA256, SHA384 and
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index 14dafadbcbed..21757d86cd49 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -4,17 +4,13 @@
#
obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o
-obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_SHA3_256_S390) += sha3_256_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_SHA3_512_S390) += sha3_512_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o
-obj-$(CONFIG_CRYPTO_CHACHA_S390) += chacha_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
obj-$(CONFIG_CRYPTO_HMAC_S390) += hmac_s390.o
obj-y += arch_random.o
-
-chacha_s390-y := chacha-glue.o chacha-s390.o
diff --git a/arch/s390/crypto/chacha-glue.c b/arch/s390/crypto/chacha-glue.c
deleted file mode 100644
index 920e9f0941e7..000000000000
--- a/arch/s390/crypto/chacha-glue.c
+++ /dev/null
@@ -1,124 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * s390 ChaCha stream cipher.
- *
- * Copyright IBM Corp. 2021
- */
-
-#define KMSG_COMPONENT "chacha_s390"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/algapi.h>
-#include <linux/cpufeature.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sizes.h>
-#include <asm/fpu.h>
-#include "chacha-s390.h"
-
-static void chacha20_crypt_s390(u32 *state, u8 *dst, const u8 *src,
- unsigned int nbytes, const u32 *key,
- u32 *counter)
-{
- DECLARE_KERNEL_FPU_ONSTACK32(vxstate);
-
- kernel_fpu_begin(&vxstate, KERNEL_VXR);
- chacha20_vx(dst, src, nbytes, key, counter);
- kernel_fpu_end(&vxstate, KERNEL_VXR);
-
- *counter += round_up(nbytes, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE;
-}
-
-static int chacha20_s390(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 state[CHACHA_STATE_WORDS] __aligned(16);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int rc;
-
- rc = skcipher_walk_virt(&walk, req, false);
- chacha_init(state, ctx->key, req->iv);
-
- while (walk.nbytes > 0) {
- nbytes = walk.nbytes;
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- if (nbytes <= CHACHA_BLOCK_SIZE) {
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes,
- ctx->nrounds);
- } else {
- chacha20_crypt_s390(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes,
- &state[4], &state[12]);
- }
- rc = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
- return rc;
-}
-
-void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
-{
- /* TODO: implement hchacha_block_arch() in assembly */
- hchacha_block_generic(state, stream, nrounds);
-}
-EXPORT_SYMBOL(hchacha_block_arch);
-
-void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds)
-{
- /* s390 chacha20 implementation has 20 rounds hard-coded,
- * it cannot handle a block of data or less, but otherwise
- * it can handle data of arbitrary size
- */
- if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !cpu_has_vx())
- chacha_crypt_generic(state, dst, src, bytes, nrounds);
- else
- chacha20_crypt_s390(state, dst, src, bytes,
- &state[4], &state[12]);
-}
-EXPORT_SYMBOL(chacha_crypt_arch);
-
-static struct skcipher_alg chacha_algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-s390",
- .base.cra_priority = 900,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha20_s390,
- .decrypt = chacha20_s390,
- }
-};
-
-static int __init chacha_mod_init(void)
-{
- return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
- crypto_register_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs)) : 0;
-}
-
-static void __exit chacha_mod_fini(void)
-{
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER))
- crypto_unregister_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs));
-}
-
-module_cpu_feature_match(S390_CPU_FEATURE_VXRS, chacha_mod_init);
-module_exit(chacha_mod_fini);
-
-MODULE_DESCRIPTION("ChaCha20 stream cipher");
-MODULE_LICENSE("GPL v2");
-
-MODULE_ALIAS_CRYPTO("chacha20");
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index 0800a2a5799f..dcbcee37cb63 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -8,29 +8,28 @@
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
+#include <asm/cpacf.h>
+#include <crypto/ghash.h>
#include <crypto/internal/hash.h>
-#include <linux/module.h>
#include <linux/cpufeature.h>
-#include <asm/cpacf.h>
-
-#define GHASH_BLOCK_SIZE 16
-#define GHASH_DIGEST_SIZE 16
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
-struct ghash_ctx {
+struct s390_ghash_ctx {
u8 key[GHASH_BLOCK_SIZE];
};
-struct ghash_desc_ctx {
+struct s390_ghash_desc_ctx {
u8 icv[GHASH_BLOCK_SIZE];
u8 key[GHASH_BLOCK_SIZE];
- u8 buffer[GHASH_BLOCK_SIZE];
- u32 bytes;
};
static int ghash_init(struct shash_desc *desc)
{
- struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ struct s390_ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ struct s390_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
memset(dctx, 0, sizeof(*dctx));
memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
@@ -41,7 +40,7 @@ static int ghash_init(struct shash_desc *desc)
static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
- struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+ struct s390_ghash_ctx *ctx = crypto_shash_ctx(tfm);
if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
@@ -54,80 +53,71 @@ static int ghash_setkey(struct crypto_shash *tfm,
static int ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
- struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct s390_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
unsigned int n;
- u8 *buf = dctx->buffer;
-
- if (dctx->bytes) {
- u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
- n = min(srclen, dctx->bytes);
- dctx->bytes -= n;
- srclen -= n;
-
- memcpy(pos, src, n);
- src += n;
+ n = srclen & ~(GHASH_BLOCK_SIZE - 1);
+ cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n);
+ return srclen - n;
+}
- if (!dctx->bytes) {
- cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf,
- GHASH_BLOCK_SIZE);
- }
- }
+static void ghash_flush(struct s390_ghash_desc_ctx *dctx, const u8 *src,
+ unsigned int len)
+{
+ if (len) {
+ u8 buf[GHASH_BLOCK_SIZE] = {};
- n = srclen & ~(GHASH_BLOCK_SIZE - 1);
- if (n) {
- cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n);
- src += n;
- srclen -= n;
+ memcpy(buf, src, len);
+ cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
+ memzero_explicit(buf, sizeof(buf));
}
+}
- if (srclen) {
- dctx->bytes = GHASH_BLOCK_SIZE - srclen;
- memcpy(buf, src, srclen);
- }
+static int ghash_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
+{
+ struct s390_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ ghash_flush(dctx, src, len);
+ memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
return 0;
}
-static int ghash_flush(struct ghash_desc_ctx *dctx)
+static int ghash_export(struct shash_desc *desc, void *out)
{
- u8 *buf = dctx->buffer;
-
- if (dctx->bytes) {
- u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
-
- memset(pos, 0, dctx->bytes);
- cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
- dctx->bytes = 0;
- }
+ struct s390_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ memcpy(out, dctx->icv, GHASH_DIGEST_SIZE);
return 0;
}
-static int ghash_final(struct shash_desc *desc, u8 *dst)
+static int ghash_import(struct shash_desc *desc, const void *in)
{
- struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- int ret;
+ struct s390_ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ struct s390_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- ret = ghash_flush(dctx);
- if (!ret)
- memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
- return ret;
+ memcpy(dctx->icv, in, GHASH_DIGEST_SIZE);
+ memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
+ return 0;
}
static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
- .final = ghash_final,
+ .finup = ghash_finup,
.setkey = ghash_setkey,
- .descsize = sizeof(struct ghash_desc_ctx),
+ .export = ghash_export,
+ .import = ghash_import,
+ .statesize = sizeof(struct ghash_desc_ctx),
+ .descsize = sizeof(struct s390_ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-s390",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = GHASH_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ghash_ctx),
+ .cra_ctxsize = sizeof(struct s390_ghash_ctx),
.cra_module = THIS_MODULE,
},
};
diff --git a/arch/s390/crypto/hmac_s390.c b/arch/s390/crypto/hmac_s390.c
index bba9a818dfdc..93a1098d9f8d 100644
--- a/arch/s390/crypto/hmac_s390.c
+++ b/arch/s390/crypto/hmac_s390.c
@@ -9,10 +9,14 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <asm/cpacf.h>
-#include <crypto/sha2.h>
#include <crypto/internal/hash.h>
+#include <crypto/hmac.h>
+#include <crypto/sha2.h>
#include <linux/cpufeature.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
/*
* KMAC param block layout for sha2 function codes:
@@ -71,32 +75,31 @@ union s390_kmac_gr0 {
struct s390_kmac_sha2_ctx {
u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + MAX_BLOCK_SIZE];
union s390_kmac_gr0 gr0;
- u8 buf[MAX_BLOCK_SIZE];
- unsigned int buflen;
+ u64 buflen[2];
};
/*
* kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize
*/
-static inline void kmac_sha2_set_imbl(u8 *param, unsigned int buflen,
- unsigned int blocksize)
+static inline void kmac_sha2_set_imbl(u8 *param, u64 buflen_lo,
+ u64 buflen_hi, unsigned int blocksize)
{
u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize);
switch (blocksize) {
case SHA256_BLOCK_SIZE:
- *(u64 *)imbl = (u64)buflen * BITS_PER_BYTE;
+ *(u64 *)imbl = buflen_lo * BITS_PER_BYTE;
break;
case SHA512_BLOCK_SIZE:
- *(u128 *)imbl = (u128)buflen * BITS_PER_BYTE;
+ *(u128 *)imbl = (((u128)buflen_hi << 64) + buflen_lo) << 3;
break;
default:
break;
}
}
-static int hash_key(const u8 *in, unsigned int inlen,
- u8 *digest, unsigned int digestsize)
+static int hash_data(const u8 *in, unsigned int inlen,
+ u8 *digest, unsigned int digestsize, bool final)
{
unsigned long func;
union {
@@ -123,19 +126,23 @@ static int hash_key(const u8 *in, unsigned int inlen,
switch (digestsize) {
case SHA224_DIGEST_SIZE:
- func = CPACF_KLMD_SHA_256;
+ func = final ? CPACF_KLMD_SHA_256 : CPACF_KIMD_SHA_256;
PARAM_INIT(256, 224, inlen * 8);
+ if (!final)
+ digestsize = SHA256_DIGEST_SIZE;
break;
case SHA256_DIGEST_SIZE:
- func = CPACF_KLMD_SHA_256;
+ func = final ? CPACF_KLMD_SHA_256 : CPACF_KIMD_SHA_256;
PARAM_INIT(256, 256, inlen * 8);
break;
case SHA384_DIGEST_SIZE:
- func = CPACF_KLMD_SHA_512;
+ func = final ? CPACF_KLMD_SHA_512 : CPACF_KIMD_SHA_512;
PARAM_INIT(512, 384, inlen * 8);
+ if (!final)
+ digestsize = SHA512_DIGEST_SIZE;
break;
case SHA512_DIGEST_SIZE:
- func = CPACF_KLMD_SHA_512;
+ func = final ? CPACF_KLMD_SHA_512 : CPACF_KIMD_SHA_512;
PARAM_INIT(512, 512, inlen * 8);
break;
default:
@@ -151,6 +158,12 @@ static int hash_key(const u8 *in, unsigned int inlen,
return 0;
}
+static int hash_key(const u8 *in, unsigned int inlen,
+ u8 *digest, unsigned int digestsize)
+{
+ return hash_data(in, inlen, digest, digestsize, true);
+}
+
static int s390_hmac_sha2_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
@@ -176,7 +189,8 @@ static int s390_hmac_sha2_init(struct shash_desc *desc)
memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
tfm_ctx->key, bs);
- ctx->buflen = 0;
+ ctx->buflen[0] = 0;
+ ctx->buflen[1] = 0;
ctx->gr0.reg = 0;
switch (crypto_shash_digestsize(desc->tfm)) {
case SHA224_DIGEST_SIZE:
@@ -203,48 +217,31 @@ static int s390_hmac_sha2_update(struct shash_desc *desc,
{
struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
unsigned int bs = crypto_shash_blocksize(desc->tfm);
- unsigned int offset, n;
-
- /* check current buffer */
- offset = ctx->buflen % bs;
- ctx->buflen += len;
- if (offset + len < bs)
- goto store;
-
- /* process one stored block */
- if (offset) {
- n = bs - offset;
- memcpy(ctx->buf + offset, data, n);
- ctx->gr0.iimp = 1;
- _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs);
- data += n;
- len -= n;
- offset = 0;
- }
- /* process as many blocks as possible */
- if (len >= bs) {
- n = (len / bs) * bs;
- ctx->gr0.iimp = 1;
- _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, n);
- data += n;
- len -= n;
- }
-store:
- /* store incomplete block in buffer */
- if (len)
- memcpy(ctx->buf + offset, data, len);
+ unsigned int n = round_down(len, bs);
- return 0;
+ ctx->buflen[0] += n;
+ if (ctx->buflen[0] < n)
+ ctx->buflen[1]++;
+
+ /* process as many blocks as possible */
+ ctx->gr0.iimp = 1;
+ _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, n);
+ return len - n;
}
-static int s390_hmac_sha2_final(struct shash_desc *desc, u8 *out)
+static int s390_hmac_sha2_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
unsigned int bs = crypto_shash_blocksize(desc->tfm);
+ ctx->buflen[0] += len;
+ if (ctx->buflen[0] < len)
+ ctx->buflen[1]++;
+
ctx->gr0.iimp = 0;
- kmac_sha2_set_imbl(ctx->param, ctx->buflen, bs);
- _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, ctx->buflen % bs);
+ kmac_sha2_set_imbl(ctx->param, ctx->buflen[0], ctx->buflen[1], bs);
+ _cpacf_kmac(&ctx->gr0.reg, ctx->param, src, len);
memcpy(out, ctx->param, crypto_shash_digestsize(desc->tfm));
return 0;
@@ -262,7 +259,7 @@ static int s390_hmac_sha2_digest(struct shash_desc *desc,
return rc;
ctx->gr0.iimp = 0;
- kmac_sha2_set_imbl(ctx->param, len,
+ kmac_sha2_set_imbl(ctx->param, len, 0,
crypto_shash_blocksize(desc->tfm));
_cpacf_kmac(&ctx->gr0.reg, ctx->param, data, len);
memcpy(out, ctx->param, ds);
@@ -270,22 +267,89 @@ static int s390_hmac_sha2_digest(struct shash_desc *desc,
return 0;
}
-#define S390_HMAC_SHA2_ALG(x) { \
+static int s390_hmac_export_zero(struct shash_desc *desc, void *out)
+{
+ struct crypto_shash *tfm = desc->tfm;
+ u8 ipad[SHA512_BLOCK_SIZE];
+ struct s390_hmac_ctx *ctx;
+ unsigned int bs;
+ int err, i;
+
+ ctx = crypto_shash_ctx(tfm);
+ bs = crypto_shash_blocksize(tfm);
+ for (i = 0; i < bs; i++)
+ ipad[i] = ctx->key[i] ^ HMAC_IPAD_VALUE;
+
+ err = hash_data(ipad, bs, out, crypto_shash_digestsize(tfm), false);
+ memzero_explicit(ipad, sizeof(ipad));
+ return err;
+}
+
+static int s390_hmac_export(struct shash_desc *desc, void *out)
+{
+ struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int bs = crypto_shash_blocksize(desc->tfm);
+ unsigned int ds = bs / 2;
+ union {
+ u8 *u8;
+ u64 *u64;
+ } p = { .u8 = out };
+ int err = 0;
+
+ if (!ctx->gr0.ikp)
+ err = s390_hmac_export_zero(desc, out);
+ else
+ memcpy(p.u8, ctx->param, ds);
+ p.u8 += ds;
+ put_unaligned(ctx->buflen[0], p.u64++);
+ if (ds == SHA512_DIGEST_SIZE)
+ put_unaligned(ctx->buflen[1], p.u64);
+ return err;
+}
+
+static int s390_hmac_import(struct shash_desc *desc, const void *in)
+{
+ struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int bs = crypto_shash_blocksize(desc->tfm);
+ unsigned int ds = bs / 2;
+ union {
+ const u8 *u8;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int err;
+
+ err = s390_hmac_sha2_init(desc);
+ memcpy(ctx->param, p.u8, ds);
+ p.u8 += ds;
+ ctx->buflen[0] = get_unaligned(p.u64++);
+ if (ds == SHA512_DIGEST_SIZE)
+ ctx->buflen[1] = get_unaligned(p.u64);
+ if (ctx->buflen[0] | ctx->buflen[1])
+ ctx->gr0.ikp = 1;
+ return err;
+}
+
+#define S390_HMAC_SHA2_ALG(x, ss) { \
.fc = CPACF_KMAC_HMAC_SHA_##x, \
.alg = { \
.init = s390_hmac_sha2_init, \
.update = s390_hmac_sha2_update, \
- .final = s390_hmac_sha2_final, \
+ .finup = s390_hmac_sha2_finup, \
.digest = s390_hmac_sha2_digest, \
.setkey = s390_hmac_sha2_setkey, \
+ .export = s390_hmac_export, \
+ .import = s390_hmac_import, \
.descsize = sizeof(struct s390_kmac_sha2_ctx), \
.halg = { \
+ .statesize = ss, \
.digestsize = SHA##x##_DIGEST_SIZE, \
.base = { \
.cra_name = "hmac(sha" #x ")", \
.cra_driver_name = "hmac_s390_sha" #x, \
.cra_blocksize = SHA##x##_BLOCK_SIZE, \
.cra_priority = 400, \
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | \
+ CRYPTO_AHASH_ALG_FINUP_MAX, \
.cra_ctxsize = sizeof(struct s390_hmac_ctx), \
.cra_module = THIS_MODULE, \
}, \
@@ -298,10 +362,10 @@ static struct s390_hmac_alg {
unsigned int fc;
struct shash_alg alg;
} s390_hmac_algs[] = {
- S390_HMAC_SHA2_ALG(224),
- S390_HMAC_SHA2_ALG(256),
- S390_HMAC_SHA2_ALG(384),
- S390_HMAC_SHA2_ALG(512),
+ S390_HMAC_SHA2_ALG(224, sizeof(struct crypto_sha256_state)),
+ S390_HMAC_SHA2_ALG(256, sizeof(struct crypto_sha256_state)),
+ S390_HMAC_SHA2_ALG(384, SHA512_STATE_SIZE),
+ S390_HMAC_SHA2_ALG(512, SHA512_STATE_SIZE),
};
static __always_inline void _s390_hmac_algs_unregister(void)
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 511093713a6f..8a340c16acb4 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -5,7 +5,7 @@
* s390 implementation of the AES Cipher Algorithm with protected keys.
*
* s390 Version:
- * Copyright IBM Corp. 2017, 2023
+ * Copyright IBM Corp. 2017, 2025
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Harald Freudenberger <freude@de.ibm.com>
*/
@@ -13,16 +13,18 @@
#define KMSG_COMPONENT "paes_s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <crypto/aes.h>
-#include <crypto/algapi.h>
-#include <linux/bug.h>
-#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/atomic.h>
#include <linux/cpufeature.h>
+#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
-#include <linux/delay.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
@@ -44,23 +46,61 @@ static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
+static struct crypto_engine *paes_crypto_engine;
+#define MAX_QLEN 10
+
+/*
+ * protected key specific stuff
+ */
+
struct paes_protkey {
u32 type;
u32 len;
u8 protkey[PXTS_256_PROTKEY_SIZE];
};
-struct key_blob {
- /*
- * Small keys will be stored in the keybuf. Larger keys are
- * stored in extra allocated memory. In both cases does
- * key point to the memory where the key is stored.
- * The code distinguishes by checking keylen against
- * sizeof(keybuf). See the two following helper functions.
- */
- u8 *key;
- u8 keybuf[128];
+#define PK_STATE_NO_KEY 0
+#define PK_STATE_CONVERT_IN_PROGRESS 1
+#define PK_STATE_VALID 2
+
+struct s390_paes_ctx {
+ /* source key material used to derive a protected key from */
+ u8 keybuf[PAES_MAX_KEYSIZE];
+ unsigned int keylen;
+
+ /* cpacf function code to use with this protected key type */
+ long fc;
+
+ /* nr of requests enqueued via crypto engine which use this tfm ctx */
+ atomic_t via_engine_ctr;
+
+ /* spinlock to atomic read/update all the following fields */
+ spinlock_t pk_lock;
+
+ /* see PK_STATE* defines above, < 0 holds convert failure rc */
+ int pk_state;
+ /* if state is valid, pk holds the protected key */
+ struct paes_protkey pk;
+};
+
+struct s390_pxts_ctx {
+ /* source key material used to derive a protected key from */
+ u8 keybuf[2 * PAES_MAX_KEYSIZE];
unsigned int keylen;
+
+ /* cpacf function code to use with this protected key type */
+ long fc;
+
+ /* nr of requests enqueued via crypto engine which use this tfm ctx */
+ atomic_t via_engine_ctr;
+
+ /* spinlock to atomic read/update all the following fields */
+ spinlock_t pk_lock;
+
+ /* see PK_STATE* defines above, < 0 holds convert failure rc */
+ int pk_state;
+ /* if state is valid, pk[] hold(s) the protected key(s) */
+ struct paes_protkey pk[2];
};
/*
@@ -89,214 +129,370 @@ static inline u32 make_clrkey_token(const u8 *ck, size_t cklen, u8 *dest)
return sizeof(*token) + cklen;
}
-static inline int _key_to_kb(struct key_blob *kb,
- const u8 *key,
- unsigned int keylen)
+/*
+ * paes_ctx_setkey() - Set key value into context, maybe construct
+ * a clear key token digestible by pkey from a clear key value.
+ */
+static inline int paes_ctx_setkey(struct s390_paes_ctx *ctx,
+ const u8 *key, unsigned int keylen)
{
+ if (keylen > sizeof(ctx->keybuf))
+ return -EINVAL;
+
switch (keylen) {
case 16:
case 24:
case 32:
/* clear key value, prepare pkey clear key token in keybuf */
- memset(kb->keybuf, 0, sizeof(kb->keybuf));
- kb->keylen = make_clrkey_token(key, keylen, kb->keybuf);
- kb->key = kb->keybuf;
+ memset(ctx->keybuf, 0, sizeof(ctx->keybuf));
+ ctx->keylen = make_clrkey_token(key, keylen, ctx->keybuf);
break;
default:
/* other key material, let pkey handle this */
- if (keylen <= sizeof(kb->keybuf))
- kb->key = kb->keybuf;
- else {
- kb->key = kmalloc(keylen, GFP_KERNEL);
- if (!kb->key)
- return -ENOMEM;
- }
- memcpy(kb->key, key, keylen);
- kb->keylen = keylen;
+ memcpy(ctx->keybuf, key, keylen);
+ ctx->keylen = keylen;
break;
}
return 0;
}
-static inline int _xts_key_to_kb(struct key_blob *kb,
- const u8 *key,
- unsigned int keylen)
+/*
+ * pxts_ctx_setkey() - Set key value into context, maybe construct
+ * a clear key token digestible by pkey from a clear key value.
+ */
+static inline int pxts_ctx_setkey(struct s390_pxts_ctx *ctx,
+ const u8 *key, unsigned int keylen)
{
size_t cklen = keylen / 2;
- memset(kb->keybuf, 0, sizeof(kb->keybuf));
+ if (keylen > sizeof(ctx->keybuf))
+ return -EINVAL;
switch (keylen) {
case 32:
case 64:
/* clear key value, prepare pkey clear key tokens in keybuf */
- kb->key = kb->keybuf;
- kb->keylen = make_clrkey_token(key, cklen, kb->key);
- kb->keylen += make_clrkey_token(key + cklen, cklen,
- kb->key + kb->keylen);
+ memset(ctx->keybuf, 0, sizeof(ctx->keybuf));
+ ctx->keylen = make_clrkey_token(key, cklen, ctx->keybuf);
+ ctx->keylen += make_clrkey_token(key + cklen, cklen,
+ ctx->keybuf + ctx->keylen);
break;
default:
/* other key material, let pkey handle this */
- if (keylen <= sizeof(kb->keybuf)) {
- kb->key = kb->keybuf;
- } else {
- kb->key = kmalloc(keylen, GFP_KERNEL);
- if (!kb->key)
- return -ENOMEM;
- }
- memcpy(kb->key, key, keylen);
- kb->keylen = keylen;
+ memcpy(ctx->keybuf, key, keylen);
+ ctx->keylen = keylen;
break;
}
return 0;
}
-static inline void _free_kb_keybuf(struct key_blob *kb)
+/*
+ * Convert the raw key material into a protected key via PKEY api.
+ * This function may sleep - don't call in non-sleeping context.
+ */
+static inline int convert_key(const u8 *key, unsigned int keylen,
+ struct paes_protkey *pk)
{
- if (kb->key && kb->key != kb->keybuf
- && kb->keylen > sizeof(kb->keybuf)) {
- kfree_sensitive(kb->key);
- kb->key = NULL;
+ int rc, i;
+
+ pk->len = sizeof(pk->protkey);
+
+ /*
+ * In case of a busy card retry with increasing delay
+ * of 200, 400, 800 and 1600 ms - in total 3 s.
+ */
+ for (rc = -EIO, i = 0; rc && i < 5; i++) {
+ if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) {
+ rc = -EINTR;
+ goto out;
+ }
+ rc = pkey_key2protkey(key, keylen,
+ pk->protkey, &pk->len, &pk->type,
+ PKEY_XFLAG_NOMEMALLOC);
}
- memzero_explicit(kb->keybuf, sizeof(kb->keybuf));
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
}
-struct s390_paes_ctx {
- struct key_blob kb;
+/*
+ * (Re-)Convert the raw key material from the ctx into a protected key
+ * via convert_key() function. Update the pk_state, pk_type, pk_len
+ * and the protected key in the tfm context.
+ * Please note this function may be invoked concurrently with the very
+ * same tfm context. The pk_lock spinlock in the context ensures an
+ * atomic update of the pk and the pk state but does not guarantee any
+ * order of update. So a fresh converted valid protected key may get
+ * updated with an 'old' expired key value. As the cpacf instructions
+ * detect this, refuse to operate with an invalid key and the calling
+ * code triggers a (re-)conversion this does no harm. This may lead to
+ * unnecessary additional conversion but never to invalid data on en-
+ * or decrypt operations.
+ */
+static int paes_convert_key(struct s390_paes_ctx *ctx)
+{
struct paes_protkey pk;
- spinlock_t pk_lock;
- unsigned long fc;
-};
+ int rc;
-struct s390_pxts_ctx {
- struct key_blob kb;
- struct paes_protkey pk[2];
- spinlock_t pk_lock;
- unsigned long fc;
-};
+ spin_lock_bh(&ctx->pk_lock);
+ ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
+ spin_unlock_bh(&ctx->pk_lock);
-static inline int __paes_keyblob2pkey(const u8 *key, unsigned int keylen,
- struct paes_protkey *pk)
-{
- int i, rc = -EIO;
+ rc = convert_key(ctx->keybuf, ctx->keylen, &pk);
- /* try three times in case of busy card */
- for (i = 0; rc && i < 3; i++) {
- if (rc == -EBUSY && in_task()) {
- if (msleep_interruptible(1000))
- return -EINTR;
- }
- rc = pkey_key2protkey(key, keylen, pk->protkey, &pk->len,
- &pk->type);
+ /* update context */
+ spin_lock_bh(&ctx->pk_lock);
+ if (rc) {
+ ctx->pk_state = rc;
+ } else {
+ ctx->pk_state = PK_STATE_VALID;
+ ctx->pk = pk;
}
+ spin_unlock_bh(&ctx->pk_lock);
+ memzero_explicit(&pk, sizeof(pk));
+ pr_debug("rc=%d\n", rc);
return rc;
}
-static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
+/*
+ * (Re-)Convert the raw xts key material from the ctx into a
+ * protected key via convert_key() function. Update the pk_state,
+ * pk_type, pk_len and the protected key in the tfm context.
+ * See also comments on function paes_convert_key.
+ */
+static int pxts_convert_key(struct s390_pxts_ctx *ctx)
{
- struct paes_protkey pk;
+ struct paes_protkey pk0, pk1;
+ size_t split_keylen;
int rc;
- pk.len = sizeof(pk.protkey);
- rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk);
+ spin_lock_bh(&ctx->pk_lock);
+ ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
+ spin_unlock_bh(&ctx->pk_lock);
+
+ rc = convert_key(ctx->keybuf, ctx->keylen, &pk0);
if (rc)
- return rc;
+ goto out;
+
+ switch (pk0.type) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_256:
+ /* second keytoken required */
+ if (ctx->keylen % 2) {
+ rc = -EINVAL;
+ goto out;
+ }
+ split_keylen = ctx->keylen / 2;
+ rc = convert_key(ctx->keybuf + split_keylen,
+ split_keylen, &pk1);
+ if (rc)
+ goto out;
+ if (pk0.type != pk1.type) {
+ rc = -EINVAL;
+ goto out;
+ }
+ break;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ case PKEY_KEYTYPE_AES_XTS_256:
+ /* single key */
+ pk1.type = 0;
+ break;
+ default:
+ /* unsupported protected keytype */
+ rc = -EINVAL;
+ goto out;
+ }
+out:
+ /* update context */
spin_lock_bh(&ctx->pk_lock);
- memcpy(&ctx->pk, &pk, sizeof(pk));
+ if (rc) {
+ ctx->pk_state = rc;
+ } else {
+ ctx->pk_state = PK_STATE_VALID;
+ ctx->pk[0] = pk0;
+ ctx->pk[1] = pk1;
+ }
spin_unlock_bh(&ctx->pk_lock);
- return 0;
+ memzero_explicit(&pk0, sizeof(pk0));
+ memzero_explicit(&pk1, sizeof(pk1));
+ pr_debug("rc=%d\n", rc);
+ return rc;
}
-static int ecb_paes_init(struct crypto_skcipher *tfm)
-{
- struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+/*
+ * PAES ECB implementation
+ */
- ctx->kb.key = NULL;
- spin_lock_init(&ctx->pk_lock);
+struct ecb_param {
+ u8 key[PAES_256_PROTKEY_SIZE];
+} __packed;
- return 0;
-}
+struct s390_pecb_req_ctx {
+ unsigned long modifier;
+ struct skcipher_walk walk;
+ bool param_init_done;
+ struct ecb_param param;
+};
-static void ecb_paes_exit(struct crypto_skcipher *tfm)
+static int ecb_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
{
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- _free_kb_keybuf(&ctx->kb);
-}
-
-static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
-{
- unsigned long fc;
+ long fc;
int rc;
- rc = __paes_convert_key(ctx);
+ /* set raw key into context */
+ rc = paes_ctx_setkey(ctx, in_key, key_len);
if (rc)
- return rc;
+ goto out;
- /* Pick the correct function code based on the protected key type */
- fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
- (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
- (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
+ /* convert key into protected key */
+ rc = paes_convert_key(ctx);
+ if (rc)
+ goto out;
- /* Check if the function code is available */
+ /* Pick the correct function code based on the protected key type */
+ switch (ctx->pk.type) {
+ case PKEY_KEYTYPE_AES_128:
+ fc = CPACF_KM_PAES_128;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ fc = CPACF_KM_PAES_192;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ fc = CPACF_KM_PAES_256;
+ break;
+ default:
+ fc = 0;
+ break;
+ }
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
- return ctx->fc ? 0 : -EINVAL;
+ rc = fc ? 0 : -EINVAL;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
}
-static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
- unsigned int key_len)
+static int ecb_paes_do_crypt(struct s390_paes_ctx *ctx,
+ struct s390_pecb_req_ctx *req_ctx,
+ bool maysleep)
{
- struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
- int rc;
-
- _free_kb_keybuf(&ctx->kb);
- rc = _key_to_kb(&ctx->kb, in_key, key_len);
+ struct ecb_param *param = &req_ctx->param;
+ struct skcipher_walk *walk = &req_ctx->walk;
+ unsigned int nbytes, n, k;
+ int pk_state, rc = 0;
+
+ if (!req_ctx->param_init_done) {
+ /* fetch and check protected key state */
+ spin_lock_bh(&ctx->pk_lock);
+ pk_state = ctx->pk_state;
+ switch (pk_state) {
+ case PK_STATE_NO_KEY:
+ rc = -ENOKEY;
+ break;
+ case PK_STATE_CONVERT_IN_PROGRESS:
+ rc = -EKEYEXPIRED;
+ break;
+ case PK_STATE_VALID:
+ memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
+ req_ctx->param_init_done = true;
+ break;
+ default:
+ rc = pk_state < 0 ? pk_state : -EIO;
+ break;
+ }
+ spin_unlock_bh(&ctx->pk_lock);
+ }
if (rc)
- return rc;
+ goto out;
- return __ecb_paes_set_key(ctx);
+ /*
+ * Note that in case of partial processing or failure the walk
+ * is NOT unmapped here. So a follow up task may reuse the walk
+ * or in case of unrecoverable failure needs to unmap it.
+ */
+ while ((nbytes = walk->nbytes) != 0) {
+ /* only use complete blocks */
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ k = cpacf_km(ctx->fc | req_ctx->modifier, param,
+ walk->dst.virt.addr, walk->src.virt.addr, n);
+ if (k)
+ rc = skcipher_walk_done(walk, nbytes - k);
+ if (k < n) {
+ if (!maysleep) {
+ rc = -EKEYEXPIRED;
+ goto out;
+ }
+ rc = paes_convert_key(ctx);
+ if (rc)
+ goto out;
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
+ spin_unlock_bh(&ctx->pk_lock);
+ }
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
}
static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
{
+ struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct {
- u8 key[PAES_256_PROTKEY_SIZE];
- } param;
- struct skcipher_walk walk;
- unsigned int nbytes, n, k;
+ struct skcipher_walk *walk = &req_ctx->walk;
int rc;
- rc = skcipher_walk_virt(&walk, req, false);
+ /*
+ * Attempt synchronous encryption first. If it fails, schedule the request
+ * asynchronously via the crypto engine. To preserve execution order,
+ * once a request is queued to the engine, further requests using the same
+ * tfm will also be routed through the engine.
+ */
+
+ rc = skcipher_walk_virt(walk, req, false);
if (rc)
- return rc;
+ goto out;
- spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
- spin_unlock_bh(&ctx->pk_lock);
+ req_ctx->modifier = modifier;
+ req_ctx->param_init_done = false;
- while ((nbytes = walk.nbytes) != 0) {
- /* only use complete blocks */
- n = nbytes & ~(AES_BLOCK_SIZE - 1);
- k = cpacf_km(ctx->fc | modifier, &param,
- walk.dst.virt.addr, walk.src.virt.addr, n);
- if (k)
- rc = skcipher_walk_done(&walk, nbytes - k);
- if (k < n) {
- if (__paes_convert_key(ctx))
- return skcipher_walk_done(&walk, -EIO);
- spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
- spin_unlock_bh(&ctx->pk_lock);
- }
+ /* Try synchronous operation if no active engine usage */
+ if (!atomic_read(&ctx->via_engine_ctr)) {
+ rc = ecb_paes_do_crypt(ctx, req_ctx, false);
+ if (rc == 0)
+ goto out;
+ }
+
+ /*
+ * If sync operation failed or key expired or there are already
+ * requests enqueued via engine, fallback to async. Mark tfm as
+ * using engine to serialize requests.
+ */
+ if (rc == 0 || rc == -EKEYEXPIRED) {
+ atomic_inc(&ctx->via_engine_ctr);
+ rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req);
+ if (rc != -EINPROGRESS)
+ atomic_dec(&ctx->via_engine_ctr);
}
+
+ if (rc != -EINPROGRESS)
+ skcipher_walk_done(walk, rc);
+
+out:
+ if (rc != -EINPROGRESS)
+ memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
+ pr_debug("rc=%d\n", rc);
return rc;
}
@@ -310,112 +506,256 @@ static int ecb_paes_decrypt(struct skcipher_request *req)
return ecb_paes_crypt(req, CPACF_DECRYPT);
}
-static struct skcipher_alg ecb_paes_alg = {
- .base.cra_name = "ecb(paes)",
- .base.cra_driver_name = "ecb-paes-s390",
- .base.cra_priority = 401, /* combo: aes + ecb + 1 */
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list),
- .init = ecb_paes_init,
- .exit = ecb_paes_exit,
- .min_keysize = PAES_MIN_KEYSIZE,
- .max_keysize = PAES_MAX_KEYSIZE,
- .setkey = ecb_paes_set_key,
- .encrypt = ecb_paes_encrypt,
- .decrypt = ecb_paes_decrypt,
-};
-
-static int cbc_paes_init(struct crypto_skcipher *tfm)
+static int ecb_paes_init(struct crypto_skcipher *tfm)
{
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
- ctx->kb.key = NULL;
+ memset(ctx, 0, sizeof(*ctx));
spin_lock_init(&ctx->pk_lock);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pecb_req_ctx));
+
return 0;
}
-static void cbc_paes_exit(struct crypto_skcipher *tfm)
+static void ecb_paes_exit(struct crypto_skcipher *tfm)
{
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
- _free_kb_keybuf(&ctx->kb);
+ memzero_explicit(ctx, sizeof(*ctx));
}
-static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
+static int ecb_paes_do_one_request(struct crypto_engine *engine, void *areq)
{
- unsigned long fc;
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk *walk = &req_ctx->walk;
int rc;
- rc = __paes_convert_key(ctx);
- if (rc)
- return rc;
+ /* walk has already been prepared */
+
+ rc = ecb_paes_do_crypt(ctx, req_ctx, true);
+ if (rc == -EKEYEXPIRED) {
+ /*
+ * Protected key expired, conversion is in process.
+ * Trigger a re-schedule of this request by returning
+ * -ENOSPC ("hardware queue is full") to the crypto engine.
+ * To avoid immediately re-invocation of this callback,
+ * tell the scheduler to voluntarily give up the CPU here.
+ */
+ cond_resched();
+ pr_debug("rescheduling request\n");
+ return -ENOSPC;
+ } else if (rc) {
+ skcipher_walk_done(walk, rc);
+ }
- /* Pick the correct function code based on the protected key type */
- fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 :
- (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 :
- (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0;
+ memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
+ pr_debug("request complete with rc=%d\n", rc);
+ local_bh_disable();
+ atomic_dec(&ctx->via_engine_ctr);
+ crypto_finalize_skcipher_request(engine, req, rc);
+ local_bh_enable();
+ return rc;
+}
- /* Check if the function code is available */
- ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
+static struct skcipher_engine_alg ecb_paes_alg = {
+ .base = {
+ .base.cra_name = "ecb(paes)",
+ .base.cra_driver_name = "ecb-paes-s390",
+ .base.cra_priority = 401, /* combo: aes + ecb + 1 */
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.base.cra_list),
+ .init = ecb_paes_init,
+ .exit = ecb_paes_exit,
+ .min_keysize = PAES_MIN_KEYSIZE,
+ .max_keysize = PAES_MAX_KEYSIZE,
+ .setkey = ecb_paes_setkey,
+ .encrypt = ecb_paes_encrypt,
+ .decrypt = ecb_paes_decrypt,
+ },
+ .op = {
+ .do_one_request = ecb_paes_do_one_request,
+ },
+};
- return ctx->fc ? 0 : -EINVAL;
-}
+/*
+ * PAES CBC implementation
+ */
+
+struct cbc_param {
+ u8 iv[AES_BLOCK_SIZE];
+ u8 key[PAES_256_PROTKEY_SIZE];
+} __packed;
+
+struct s390_pcbc_req_ctx {
+ unsigned long modifier;
+ struct skcipher_walk walk;
+ bool param_init_done;
+ struct cbc_param param;
+};
-static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
- unsigned int key_len)
+static int cbc_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
{
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ long fc;
int rc;
- _free_kb_keybuf(&ctx->kb);
- rc = _key_to_kb(&ctx->kb, in_key, key_len);
+ /* set raw key into context */
+ rc = paes_ctx_setkey(ctx, in_key, key_len);
if (rc)
- return rc;
+ goto out;
- return __cbc_paes_set_key(ctx);
+ /* convert raw key into protected key */
+ rc = paes_convert_key(ctx);
+ if (rc)
+ goto out;
+
+ /* Pick the correct function code based on the protected key type */
+ switch (ctx->pk.type) {
+ case PKEY_KEYTYPE_AES_128:
+ fc = CPACF_KMC_PAES_128;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ fc = CPACF_KMC_PAES_192;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ fc = CPACF_KMC_PAES_256;
+ break;
+ default:
+ fc = 0;
+ break;
+ }
+ ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
+
+ rc = fc ? 0 : -EINVAL;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
}
-static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
+static int cbc_paes_do_crypt(struct s390_paes_ctx *ctx,
+ struct s390_pcbc_req_ctx *req_ctx,
+ bool maysleep)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct {
- u8 iv[AES_BLOCK_SIZE];
- u8 key[PAES_256_PROTKEY_SIZE];
- } param;
- struct skcipher_walk walk;
+ struct cbc_param *param = &req_ctx->param;
+ struct skcipher_walk *walk = &req_ctx->walk;
unsigned int nbytes, n, k;
- int rc;
-
- rc = skcipher_walk_virt(&walk, req, false);
+ int pk_state, rc = 0;
+
+ if (!req_ctx->param_init_done) {
+ /* fetch and check protected key state */
+ spin_lock_bh(&ctx->pk_lock);
+ pk_state = ctx->pk_state;
+ switch (pk_state) {
+ case PK_STATE_NO_KEY:
+ rc = -ENOKEY;
+ break;
+ case PK_STATE_CONVERT_IN_PROGRESS:
+ rc = -EKEYEXPIRED;
+ break;
+ case PK_STATE_VALID:
+ memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
+ req_ctx->param_init_done = true;
+ break;
+ default:
+ rc = pk_state < 0 ? pk_state : -EIO;
+ break;
+ }
+ spin_unlock_bh(&ctx->pk_lock);
+ }
if (rc)
- return rc;
+ goto out;
- memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
- spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
- spin_unlock_bh(&ctx->pk_lock);
+ memcpy(param->iv, walk->iv, AES_BLOCK_SIZE);
- while ((nbytes = walk.nbytes) != 0) {
+ /*
+ * Note that in case of partial processing or failure the walk
+ * is NOT unmapped here. So a follow up task may reuse the walk
+ * or in case of unrecoverable failure needs to unmap it.
+ */
+ while ((nbytes = walk->nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
- k = cpacf_kmc(ctx->fc | modifier, &param,
- walk.dst.virt.addr, walk.src.virt.addr, n);
+ k = cpacf_kmc(ctx->fc | req_ctx->modifier, param,
+ walk->dst.virt.addr, walk->src.virt.addr, n);
if (k) {
- memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
- rc = skcipher_walk_done(&walk, nbytes - k);
+ memcpy(walk->iv, param->iv, AES_BLOCK_SIZE);
+ rc = skcipher_walk_done(walk, nbytes - k);
}
if (k < n) {
- if (__paes_convert_key(ctx))
- return skcipher_walk_done(&walk, -EIO);
+ if (!maysleep) {
+ rc = -EKEYEXPIRED;
+ goto out;
+ }
+ rc = paes_convert_key(ctx);
+ if (rc)
+ goto out;
spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
+ memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
spin_unlock_bh(&ctx->pk_lock);
}
}
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
+{
+ struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk *walk = &req_ctx->walk;
+ int rc;
+
+ /*
+ * Attempt synchronous encryption first. If it fails, schedule the request
+ * asynchronously via the crypto engine. To preserve execution order,
+ * once a request is queued to the engine, further requests using the same
+ * tfm will also be routed through the engine.
+ */
+
+ rc = skcipher_walk_virt(walk, req, false);
+ if (rc)
+ goto out;
+
+ req_ctx->modifier = modifier;
+ req_ctx->param_init_done = false;
+
+ /* Try synchronous operation if no active engine usage */
+ if (!atomic_read(&ctx->via_engine_ctr)) {
+ rc = cbc_paes_do_crypt(ctx, req_ctx, false);
+ if (rc == 0)
+ goto out;
+ }
+
+ /*
+ * If sync operation failed or key expired or there are already
+ * requests enqueued via engine, fallback to async. Mark tfm as
+ * using engine to serialize requests.
+ */
+ if (rc == 0 || rc == -EKEYEXPIRED) {
+ atomic_inc(&ctx->via_engine_ctr);
+ rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req);
+ if (rc != -EINPROGRESS)
+ atomic_dec(&ctx->via_engine_ctr);
+ }
+
+ if (rc != -EINPROGRESS)
+ skcipher_walk_done(walk, rc);
+
+out:
+ if (rc != -EINPROGRESS)
+ memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
+ pr_debug("rc=%d\n", rc);
return rc;
}
@@ -429,496 +769,882 @@ static int cbc_paes_decrypt(struct skcipher_request *req)
return cbc_paes_crypt(req, CPACF_DECRYPT);
}
-static struct skcipher_alg cbc_paes_alg = {
- .base.cra_name = "cbc(paes)",
- .base.cra_driver_name = "cbc-paes-s390",
- .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list),
- .init = cbc_paes_init,
- .exit = cbc_paes_exit,
- .min_keysize = PAES_MIN_KEYSIZE,
- .max_keysize = PAES_MAX_KEYSIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = cbc_paes_set_key,
- .encrypt = cbc_paes_encrypt,
- .decrypt = cbc_paes_decrypt,
-};
-
-static int xts_paes_init(struct crypto_skcipher *tfm)
+static int cbc_paes_init(struct crypto_skcipher *tfm)
{
- struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
- ctx->kb.key = NULL;
+ memset(ctx, 0, sizeof(*ctx));
spin_lock_init(&ctx->pk_lock);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pcbc_req_ctx));
+
return 0;
}
-static void xts_paes_exit(struct crypto_skcipher *tfm)
+static void cbc_paes_exit(struct crypto_skcipher *tfm)
{
- struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
- _free_kb_keybuf(&ctx->kb);
+ memzero_explicit(ctx, sizeof(*ctx));
}
-static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
+static int cbc_paes_do_one_request(struct crypto_engine *engine, void *areq)
{
- struct paes_protkey pk0, pk1;
- size_t split_keylen;
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk *walk = &req_ctx->walk;
int rc;
- pk0.len = sizeof(pk0.protkey);
- pk1.len = sizeof(pk1.protkey);
-
- rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk0);
- if (rc)
- return rc;
+ /* walk has already been prepared */
+
+ rc = cbc_paes_do_crypt(ctx, req_ctx, true);
+ if (rc == -EKEYEXPIRED) {
+ /*
+ * Protected key expired, conversion is in process.
+ * Trigger a re-schedule of this request by returning
+ * -ENOSPC ("hardware queue is full") to the crypto engine.
+ * To avoid immediately re-invocation of this callback,
+ * tell the scheduler to voluntarily give up the CPU here.
+ */
+ cond_resched();
+ pr_debug("rescheduling request\n");
+ return -ENOSPC;
+ } else if (rc) {
+ skcipher_walk_done(walk, rc);
+ }
- switch (pk0.type) {
- case PKEY_KEYTYPE_AES_128:
- case PKEY_KEYTYPE_AES_256:
- /* second keytoken required */
- if (ctx->kb.keylen % 2)
- return -EINVAL;
- split_keylen = ctx->kb.keylen / 2;
+ memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
+ pr_debug("request complete with rc=%d\n", rc);
+ local_bh_disable();
+ atomic_dec(&ctx->via_engine_ctr);
+ crypto_finalize_skcipher_request(engine, req, rc);
+ local_bh_enable();
+ return rc;
+}
- rc = __paes_keyblob2pkey(ctx->kb.key + split_keylen,
- split_keylen, &pk1);
- if (rc)
- return rc;
+static struct skcipher_engine_alg cbc_paes_alg = {
+ .base = {
+ .base.cra_name = "cbc(paes)",
+ .base.cra_driver_name = "cbc-paes-s390",
+ .base.cra_priority = 402, /* cbc-paes-s390 + 1 */
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.base.cra_list),
+ .init = cbc_paes_init,
+ .exit = cbc_paes_exit,
+ .min_keysize = PAES_MIN_KEYSIZE,
+ .max_keysize = PAES_MAX_KEYSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_paes_setkey,
+ .encrypt = cbc_paes_encrypt,
+ .decrypt = cbc_paes_decrypt,
+ },
+ .op = {
+ .do_one_request = cbc_paes_do_one_request,
+ },
+};
- if (pk0.type != pk1.type)
- return -EINVAL;
- break;
- case PKEY_KEYTYPE_AES_XTS_128:
- case PKEY_KEYTYPE_AES_XTS_256:
- /* single key */
- pk1.type = 0;
- break;
- default:
- /* unsupported protected keytype */
- return -EINVAL;
- }
+/*
+ * PAES CTR implementation
+ */
- spin_lock_bh(&ctx->pk_lock);
- ctx->pk[0] = pk0;
- ctx->pk[1] = pk1;
- spin_unlock_bh(&ctx->pk_lock);
+struct ctr_param {
+ u8 key[PAES_256_PROTKEY_SIZE];
+} __packed;
- return 0;
-}
+struct s390_pctr_req_ctx {
+ unsigned long modifier;
+ struct skcipher_walk walk;
+ bool param_init_done;
+ struct ctr_param param;
+};
-static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
+static int ctr_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
{
- unsigned long fc;
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ long fc;
int rc;
- rc = __xts_paes_convert_key(ctx);
+ /* set raw key into context */
+ rc = paes_ctx_setkey(ctx, in_key, key_len);
if (rc)
- return rc;
+ goto out;
+
+ /* convert raw key into protected key */
+ rc = paes_convert_key(ctx);
+ if (rc)
+ goto out;
/* Pick the correct function code based on the protected key type */
- switch (ctx->pk[0].type) {
+ switch (ctx->pk.type) {
case PKEY_KEYTYPE_AES_128:
- fc = CPACF_KM_PXTS_128;
- break;
- case PKEY_KEYTYPE_AES_256:
- fc = CPACF_KM_PXTS_256;
+ fc = CPACF_KMCTR_PAES_128;
break;
- case PKEY_KEYTYPE_AES_XTS_128:
- fc = CPACF_KM_PXTS_128_FULL;
+ case PKEY_KEYTYPE_AES_192:
+ fc = CPACF_KMCTR_PAES_192;
break;
- case PKEY_KEYTYPE_AES_XTS_256:
- fc = CPACF_KM_PXTS_256_FULL;
+ case PKEY_KEYTYPE_AES_256:
+ fc = CPACF_KMCTR_PAES_256;
break;
default:
fc = 0;
break;
}
+ ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
- /* Check if the function code is available */
- ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+ rc = fc ? 0 : -EINVAL;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static inline unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* only use complete blocks, max. PAGE_SIZE */
+ memcpy(ctrptr, iv, AES_BLOCK_SIZE);
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
+ for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
+ memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
+ crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
+ ctrptr += AES_BLOCK_SIZE;
+ }
+ return n;
+}
+
+static int ctr_paes_do_crypt(struct s390_paes_ctx *ctx,
+ struct s390_pctr_req_ctx *req_ctx,
+ bool maysleep)
+{
+ struct ctr_param *param = &req_ctx->param;
+ struct skcipher_walk *walk = &req_ctx->walk;
+ u8 buf[AES_BLOCK_SIZE], *ctrptr;
+ unsigned int nbytes, n, k;
+ int pk_state, locked, rc = 0;
+
+ if (!req_ctx->param_init_done) {
+ /* fetch and check protected key state */
+ spin_lock_bh(&ctx->pk_lock);
+ pk_state = ctx->pk_state;
+ switch (pk_state) {
+ case PK_STATE_NO_KEY:
+ rc = -ENOKEY;
+ break;
+ case PK_STATE_CONVERT_IN_PROGRESS:
+ rc = -EKEYEXPIRED;
+ break;
+ case PK_STATE_VALID:
+ memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
+ req_ctx->param_init_done = true;
+ break;
+ default:
+ rc = pk_state < 0 ? pk_state : -EIO;
+ break;
+ }
+ spin_unlock_bh(&ctx->pk_lock);
+ }
+ if (rc)
+ goto out;
+
+ locked = mutex_trylock(&ctrblk_lock);
+
+ /*
+ * Note that in case of partial processing or failure the walk
+ * is NOT unmapped here. So a follow up task may reuse the walk
+ * or in case of unrecoverable failure needs to unmap it.
+ */
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ n = AES_BLOCK_SIZE;
+ if (nbytes >= 2 * AES_BLOCK_SIZE && locked)
+ n = __ctrblk_init(ctrblk, walk->iv, nbytes);
+ ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
+ k = cpacf_kmctr(ctx->fc, param, walk->dst.virt.addr,
+ walk->src.virt.addr, n, ctrptr);
+ if (k) {
+ if (ctrptr == ctrblk)
+ memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ crypto_inc(walk->iv, AES_BLOCK_SIZE);
+ rc = skcipher_walk_done(walk, nbytes - k);
+ }
+ if (k < n) {
+ if (!maysleep) {
+ if (locked)
+ mutex_unlock(&ctrblk_lock);
+ rc = -EKEYEXPIRED;
+ goto out;
+ }
+ rc = paes_convert_key(ctx);
+ if (rc) {
+ if (locked)
+ mutex_unlock(&ctrblk_lock);
+ goto out;
+ }
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
+ spin_unlock_bh(&ctx->pk_lock);
+ }
+ }
+ if (locked)
+ mutex_unlock(&ctrblk_lock);
+
+ /* final block may be < AES_BLOCK_SIZE, copy only nbytes */
+ if (nbytes) {
+ memset(buf, 0, AES_BLOCK_SIZE);
+ memcpy(buf, walk->src.virt.addr, nbytes);
+ while (1) {
+ if (cpacf_kmctr(ctx->fc, param, buf,
+ buf, AES_BLOCK_SIZE,
+ walk->iv) == AES_BLOCK_SIZE)
+ break;
+ if (!maysleep) {
+ rc = -EKEYEXPIRED;
+ goto out;
+ }
+ rc = paes_convert_key(ctx);
+ if (rc)
+ goto out;
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
+ spin_unlock_bh(&ctx->pk_lock);
+ }
+ memcpy(walk->dst.virt.addr, buf, nbytes);
+ crypto_inc(walk->iv, AES_BLOCK_SIZE);
+ rc = skcipher_walk_done(walk, 0);
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int ctr_paes_crypt(struct skcipher_request *req)
+{
+ struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk *walk = &req_ctx->walk;
+ int rc;
+
+ /*
+ * Attempt synchronous encryption first. If it fails, schedule the request
+ * asynchronously via the crypto engine. To preserve execution order,
+ * once a request is queued to the engine, further requests using the same
+ * tfm will also be routed through the engine.
+ */
+
+ rc = skcipher_walk_virt(walk, req, false);
+ if (rc)
+ goto out;
+
+ req_ctx->param_init_done = false;
+
+ /* Try synchronous operation if no active engine usage */
+ if (!atomic_read(&ctx->via_engine_ctr)) {
+ rc = ctr_paes_do_crypt(ctx, req_ctx, false);
+ if (rc == 0)
+ goto out;
+ }
+
+ /*
+ * If sync operation failed or key expired or there are already
+ * requests enqueued via engine, fallback to async. Mark tfm as
+ * using engine to serialize requests.
+ */
+ if (rc == 0 || rc == -EKEYEXPIRED) {
+ atomic_inc(&ctx->via_engine_ctr);
+ rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req);
+ if (rc != -EINPROGRESS)
+ atomic_dec(&ctx->via_engine_ctr);
+ }
+
+ if (rc != -EINPROGRESS)
+ skcipher_walk_done(walk, rc);
+
+out:
+ if (rc != -EINPROGRESS)
+ memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int ctr_paes_init(struct crypto_skcipher *tfm)
+{
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ memset(ctx, 0, sizeof(*ctx));
+ spin_lock_init(&ctx->pk_lock);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pctr_req_ctx));
+
+ return 0;
+}
+
+static void ctr_paes_exit(struct crypto_skcipher *tfm)
+{
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ memzero_explicit(ctx, sizeof(*ctx));
+}
+
+static int ctr_paes_do_one_request(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk *walk = &req_ctx->walk;
+ int rc;
- return ctx->fc ? 0 : -EINVAL;
+ /* walk has already been prepared */
+
+ rc = ctr_paes_do_crypt(ctx, req_ctx, true);
+ if (rc == -EKEYEXPIRED) {
+ /*
+ * Protected key expired, conversion is in process.
+ * Trigger a re-schedule of this request by returning
+ * -ENOSPC ("hardware queue is full") to the crypto engine.
+ * To avoid immediately re-invocation of this callback,
+ * tell the scheduler to voluntarily give up the CPU here.
+ */
+ cond_resched();
+ pr_debug("rescheduling request\n");
+ return -ENOSPC;
+ } else if (rc) {
+ skcipher_walk_done(walk, rc);
+ }
+
+ memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
+ pr_debug("request complete with rc=%d\n", rc);
+ local_bh_disable();
+ atomic_dec(&ctx->via_engine_ctr);
+ crypto_finalize_skcipher_request(engine, req, rc);
+ local_bh_enable();
+ return rc;
}
-static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
- unsigned int in_keylen)
+static struct skcipher_engine_alg ctr_paes_alg = {
+ .base = {
+ .base.cra_name = "ctr(paes)",
+ .base.cra_driver_name = "ctr-paes-s390",
+ .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.base.cra_list),
+ .init = ctr_paes_init,
+ .exit = ctr_paes_exit,
+ .min_keysize = PAES_MIN_KEYSIZE,
+ .max_keysize = PAES_MAX_KEYSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ctr_paes_setkey,
+ .encrypt = ctr_paes_crypt,
+ .decrypt = ctr_paes_crypt,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .op = {
+ .do_one_request = ctr_paes_do_one_request,
+ },
+};
+
+/*
+ * PAES XTS implementation
+ */
+
+struct xts_full_km_param {
+ u8 key[64];
+ u8 tweak[16];
+ u8 nap[16];
+ u8 wkvp[32];
+} __packed;
+
+struct xts_km_param {
+ u8 key[PAES_256_PROTKEY_SIZE];
+ u8 init[16];
+} __packed;
+
+struct xts_pcc_param {
+ u8 key[PAES_256_PROTKEY_SIZE];
+ u8 tweak[16];
+ u8 block[16];
+ u8 bit[16];
+ u8 xts[16];
+} __packed;
+
+struct s390_pxts_req_ctx {
+ unsigned long modifier;
+ struct skcipher_walk walk;
+ bool param_init_done;
+ union {
+ struct xts_full_km_param full_km_param;
+ struct xts_km_param km_param;
+ } param;
+};
+
+static int xts_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int in_keylen)
{
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 ckey[2 * AES_MAX_KEY_SIZE];
unsigned int ckey_len;
+ long fc;
int rc;
if ((in_keylen == 32 || in_keylen == 64) &&
xts_verify_key(tfm, in_key, in_keylen))
return -EINVAL;
- _free_kb_keybuf(&ctx->kb);
- rc = _xts_key_to_kb(&ctx->kb, in_key, in_keylen);
+ /* set raw key into context */
+ rc = pxts_ctx_setkey(ctx, in_key, in_keylen);
if (rc)
- return rc;
+ goto out;
- rc = __xts_paes_set_key(ctx);
+ /* convert raw key(s) into protected key(s) */
+ rc = pxts_convert_key(ctx);
if (rc)
- return rc;
+ goto out;
/*
- * It is not possible on a single protected key (e.g. full AES-XTS) to
- * check, if k1 and k2 are the same.
- */
- if (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128 ||
- ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_256)
- return 0;
- /*
* xts_verify_key verifies the key length is not odd and makes
* sure that the two keys are not the same. This can be done
- * on the two protected keys as well
+ * on the two protected keys as well - but not for full xts keys.
*/
- ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
- AES_KEYSIZE_128 : AES_KEYSIZE_256;
- memcpy(ckey, ctx->pk[0].protkey, ckey_len);
- memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
- return xts_verify_key(tfm, ckey, 2*ckey_len);
+ if (ctx->pk[0].type == PKEY_KEYTYPE_AES_128 ||
+ ctx->pk[0].type == PKEY_KEYTYPE_AES_256) {
+ ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
+ AES_KEYSIZE_128 : AES_KEYSIZE_256;
+ memcpy(ckey, ctx->pk[0].protkey, ckey_len);
+ memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
+ rc = xts_verify_key(tfm, ckey, 2 * ckey_len);
+ memzero_explicit(ckey, sizeof(ckey));
+ if (rc)
+ goto out;
+ }
+
+ /* Pick the correct function code based on the protected key type */
+ switch (ctx->pk[0].type) {
+ case PKEY_KEYTYPE_AES_128:
+ fc = CPACF_KM_PXTS_128;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ fc = CPACF_KM_PXTS_256;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ fc = CPACF_KM_PXTS_128_FULL;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ fc = CPACF_KM_PXTS_256_FULL;
+ break;
+ default:
+ fc = 0;
+ break;
+ }
+ ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+
+ rc = fc ? 0 : -EINVAL;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
}
-static int paes_xts_crypt_full(struct skcipher_request *req,
- unsigned long modifier)
+static int xts_paes_do_crypt_fullkey(struct s390_pxts_ctx *ctx,
+ struct s390_pxts_req_ctx *req_ctx,
+ bool maysleep)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct xts_full_km_param *param = &req_ctx->param.full_km_param;
+ struct skcipher_walk *walk = &req_ctx->walk;
unsigned int keylen, offset, nbytes, n, k;
- struct {
- u8 key[64];
- u8 tweak[16];
- u8 nap[16];
- u8 wkvp[32];
- } fxts_param = {
- .nap = {0},
- };
- struct skcipher_walk walk;
- int rc;
+ int rc = 0;
- rc = skcipher_walk_virt(&walk, req, false);
- if (rc)
- return rc;
+ /*
+ * The calling function xts_paes_do_crypt() ensures the
+ * protected key state is always PK_STATE_VALID when this
+ * function is invoked.
+ */
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 64;
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 0;
- spin_lock_bh(&ctx->pk_lock);
- memcpy(fxts_param.key + offset, ctx->pk[0].protkey, keylen);
- memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen,
- sizeof(fxts_param.wkvp));
- spin_unlock_bh(&ctx->pk_lock);
- memcpy(fxts_param.tweak, walk.iv, sizeof(fxts_param.tweak));
- fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */
+ if (!req_ctx->param_init_done) {
+ memset(param, 0, sizeof(*param));
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
+ memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp));
+ spin_unlock_bh(&ctx->pk_lock);
+ memcpy(param->tweak, walk->iv, sizeof(param->tweak));
+ param->nap[0] = 0x01; /* initial alpha power (1, little-endian) */
+ req_ctx->param_init_done = true;
+ }
- while ((nbytes = walk.nbytes) != 0) {
+ /*
+ * Note that in case of partial processing or failure the walk
+ * is NOT unmapped here. So a follow up task may reuse the walk
+ * or in case of unrecoverable failure needs to unmap it.
+ */
+ while ((nbytes = walk->nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
- k = cpacf_km(ctx->fc | modifier, fxts_param.key + offset,
- walk.dst.virt.addr, walk.src.virt.addr, n);
+ k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset,
+ walk->dst.virt.addr, walk->src.virt.addr, n);
if (k)
- rc = skcipher_walk_done(&walk, nbytes - k);
+ rc = skcipher_walk_done(walk, nbytes - k);
if (k < n) {
- if (__xts_paes_convert_key(ctx))
- return skcipher_walk_done(&walk, -EIO);
+ if (!maysleep) {
+ rc = -EKEYEXPIRED;
+ goto out;
+ }
+ rc = pxts_convert_key(ctx);
+ if (rc)
+ goto out;
spin_lock_bh(&ctx->pk_lock);
- memcpy(fxts_param.key + offset, ctx->pk[0].protkey,
- keylen);
- memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen,
- sizeof(fxts_param.wkvp));
+ memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
+ memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp));
spin_unlock_bh(&ctx->pk_lock);
}
}
+out:
+ pr_debug("rc=%d\n", rc);
return rc;
}
-static int paes_xts_crypt(struct skcipher_request *req, unsigned long modifier)
+static inline int __xts_2keys_prep_param(struct s390_pxts_ctx *ctx,
+ struct xts_km_param *param,
+ struct skcipher_walk *walk,
+ unsigned int keylen,
+ unsigned int offset, bool maysleep)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct xts_pcc_param pcc_param;
+ unsigned long cc = 1;
+ int rc = 0;
+
+ while (cc) {
+ memset(&pcc_param, 0, sizeof(pcc_param));
+ memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
+ memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
+ spin_unlock_bh(&ctx->pk_lock);
+ cc = cpacf_pcc(ctx->fc, pcc_param.key + offset);
+ if (cc) {
+ if (!maysleep) {
+ rc = -EKEYEXPIRED;
+ break;
+ }
+ rc = pxts_convert_key(ctx);
+ if (rc)
+ break;
+ continue;
+ }
+ memcpy(param->init, pcc_param.xts, 16);
+ }
+
+ memzero_explicit(pcc_param.key, sizeof(pcc_param.key));
+ return rc;
+}
+
+static int xts_paes_do_crypt_2keys(struct s390_pxts_ctx *ctx,
+ struct s390_pxts_req_ctx *req_ctx,
+ bool maysleep)
+{
+ struct xts_km_param *param = &req_ctx->param.km_param;
+ struct skcipher_walk *walk = &req_ctx->walk;
unsigned int keylen, offset, nbytes, n, k;
- struct {
- u8 key[PAES_256_PROTKEY_SIZE];
- u8 tweak[16];
- u8 block[16];
- u8 bit[16];
- u8 xts[16];
- } pcc_param;
- struct {
- u8 key[PAES_256_PROTKEY_SIZE];
- u8 init[16];
- } xts_param;
- struct skcipher_walk walk;
- int rc;
+ int rc = 0;
- rc = skcipher_walk_virt(&walk, req, false);
- if (rc)
- return rc;
+ /*
+ * The calling function xts_paes_do_crypt() ensures the
+ * protected key state is always PK_STATE_VALID when this
+ * function is invoked.
+ */
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
- memset(&pcc_param, 0, sizeof(pcc_param));
- memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
- spin_lock_bh(&ctx->pk_lock);
- memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
- memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
- spin_unlock_bh(&ctx->pk_lock);
- cpacf_pcc(ctx->fc, pcc_param.key + offset);
- memcpy(xts_param.init, pcc_param.xts, 16);
+ if (!req_ctx->param_init_done) {
+ rc = __xts_2keys_prep_param(ctx, param, walk,
+ keylen, offset, maysleep);
+ if (rc)
+ goto out;
+ req_ctx->param_init_done = true;
+ }
- while ((nbytes = walk.nbytes) != 0) {
+ /*
+ * Note that in case of partial processing or failure the walk
+ * is NOT unmapped here. So a follow up task may reuse the walk
+ * or in case of unrecoverable failure needs to unmap it.
+ */
+ while ((nbytes = walk->nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
- k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
- walk.dst.virt.addr, walk.src.virt.addr, n);
+ k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset,
+ walk->dst.virt.addr, walk->src.virt.addr, n);
if (k)
- rc = skcipher_walk_done(&walk, nbytes - k);
+ rc = skcipher_walk_done(walk, nbytes - k);
if (k < n) {
- if (__xts_paes_convert_key(ctx))
- return skcipher_walk_done(&walk, -EIO);
+ if (!maysleep) {
+ rc = -EKEYEXPIRED;
+ goto out;
+ }
+ rc = pxts_convert_key(ctx);
+ if (rc)
+ goto out;
spin_lock_bh(&ctx->pk_lock);
- memcpy(xts_param.key + offset,
- ctx->pk[0].protkey, keylen);
+ memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
spin_unlock_bh(&ctx->pk_lock);
}
}
+out:
+ pr_debug("rc=%d\n", rc);
return rc;
}
-static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
+static int xts_paes_do_crypt(struct s390_pxts_ctx *ctx,
+ struct s390_pxts_req_ctx *req_ctx,
+ bool maysleep)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int pk_state, rc = 0;
+
+ /* fetch and check protected key state */
+ spin_lock_bh(&ctx->pk_lock);
+ pk_state = ctx->pk_state;
+ switch (pk_state) {
+ case PK_STATE_NO_KEY:
+ rc = -ENOKEY;
+ break;
+ case PK_STATE_CONVERT_IN_PROGRESS:
+ rc = -EKEYEXPIRED;
+ break;
+ case PK_STATE_VALID:
+ break;
+ default:
+ rc = pk_state < 0 ? pk_state : -EIO;
+ break;
+ }
+ spin_unlock_bh(&ctx->pk_lock);
+ if (rc)
+ goto out;
+ /* Call the 'real' crypt function based on the xts prot key type. */
switch (ctx->fc) {
case CPACF_KM_PXTS_128:
case CPACF_KM_PXTS_256:
- return paes_xts_crypt(req, modifier);
+ rc = xts_paes_do_crypt_2keys(ctx, req_ctx, maysleep);
+ break;
case CPACF_KM_PXTS_128_FULL:
case CPACF_KM_PXTS_256_FULL:
- return paes_xts_crypt_full(req, modifier);
+ rc = xts_paes_do_crypt_fullkey(ctx, req_ctx, maysleep);
+ break;
default:
- return -EINVAL;
+ rc = -EINVAL;
}
-}
-static int xts_paes_encrypt(struct skcipher_request *req)
-{
- return xts_paes_crypt(req, 0);
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
}
-static int xts_paes_decrypt(struct skcipher_request *req)
+static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
{
- return xts_paes_crypt(req, CPACF_DECRYPT);
-}
+ struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk *walk = &req_ctx->walk;
+ int rc;
-static struct skcipher_alg xts_paes_alg = {
- .base.cra_name = "xts(paes)",
- .base.cra_driver_name = "xts-paes-s390",
- .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct s390_pxts_ctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list),
- .init = xts_paes_init,
- .exit = xts_paes_exit,
- .min_keysize = 2 * PAES_MIN_KEYSIZE,
- .max_keysize = 2 * PAES_MAX_KEYSIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = xts_paes_set_key,
- .encrypt = xts_paes_encrypt,
- .decrypt = xts_paes_decrypt,
-};
+ /*
+ * Attempt synchronous encryption first. If it fails, schedule the request
+ * asynchronously via the crypto engine. To preserve execution order,
+ * once a request is queued to the engine, further requests using the same
+ * tfm will also be routed through the engine.
+ */
-static int ctr_paes_init(struct crypto_skcipher *tfm)
-{
- struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ rc = skcipher_walk_virt(walk, req, false);
+ if (rc)
+ goto out;
- ctx->kb.key = NULL;
- spin_lock_init(&ctx->pk_lock);
+ req_ctx->modifier = modifier;
+ req_ctx->param_init_done = false;
- return 0;
-}
+ /* Try synchronous operation if no active engine usage */
+ if (!atomic_read(&ctx->via_engine_ctr)) {
+ rc = xts_paes_do_crypt(ctx, req_ctx, false);
+ if (rc == 0)
+ goto out;
+ }
-static void ctr_paes_exit(struct crypto_skcipher *tfm)
-{
- struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ /*
+ * If sync operation failed or key expired or there are already
+ * requests enqueued via engine, fallback to async. Mark tfm as
+ * using engine to serialize requests.
+ */
+ if (rc == 0 || rc == -EKEYEXPIRED) {
+ atomic_inc(&ctx->via_engine_ctr);
+ rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req);
+ if (rc != -EINPROGRESS)
+ atomic_dec(&ctx->via_engine_ctr);
+ }
+
+ if (rc != -EINPROGRESS)
+ skcipher_walk_done(walk, rc);
- _free_kb_keybuf(&ctx->kb);
+out:
+ if (rc != -EINPROGRESS)
+ memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
+ pr_debug("rc=%d\n", rc);
+ return rc;
}
-static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
+static int xts_paes_encrypt(struct skcipher_request *req)
{
- unsigned long fc;
- int rc;
-
- rc = __paes_convert_key(ctx);
- if (rc)
- return rc;
-
- /* Pick the correct function code based on the protected key type */
- fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 :
- (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 :
- (ctx->pk.type == PKEY_KEYTYPE_AES_256) ?
- CPACF_KMCTR_PAES_256 : 0;
-
- /* Check if the function code is available */
- ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
+ return xts_paes_crypt(req, 0);
+}
- return ctx->fc ? 0 : -EINVAL;
+static int xts_paes_decrypt(struct skcipher_request *req)
+{
+ return xts_paes_crypt(req, CPACF_DECRYPT);
}
-static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
- unsigned int key_len)
+static int xts_paes_init(struct crypto_skcipher *tfm)
{
- struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
- int rc;
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
- _free_kb_keybuf(&ctx->kb);
- rc = _key_to_kb(&ctx->kb, in_key, key_len);
- if (rc)
- return rc;
+ memset(ctx, 0, sizeof(*ctx));
+ spin_lock_init(&ctx->pk_lock);
- return __ctr_paes_set_key(ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pxts_req_ctx));
+
+ return 0;
}
-static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
+static void xts_paes_exit(struct crypto_skcipher *tfm)
{
- unsigned int i, n;
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
- /* only use complete blocks, max. PAGE_SIZE */
- memcpy(ctrptr, iv, AES_BLOCK_SIZE);
- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
- for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
- memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
- crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
- ctrptr += AES_BLOCK_SIZE;
- }
- return n;
+ memzero_explicit(ctx, sizeof(*ctx));
}
-static int ctr_paes_crypt(struct skcipher_request *req)
+static int xts_paes_do_one_request(struct crypto_engine *engine, void *areq)
{
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
- u8 buf[AES_BLOCK_SIZE], *ctrptr;
- struct {
- u8 key[PAES_256_PROTKEY_SIZE];
- } param;
- struct skcipher_walk walk;
- unsigned int nbytes, n, k;
- int rc, locked;
-
- rc = skcipher_walk_virt(&walk, req, false);
- if (rc)
- return rc;
-
- spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
- spin_unlock_bh(&ctx->pk_lock);
-
- locked = mutex_trylock(&ctrblk_lock);
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk *walk = &req_ctx->walk;
+ int rc;
- while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
- n = AES_BLOCK_SIZE;
- if (nbytes >= 2*AES_BLOCK_SIZE && locked)
- n = __ctrblk_init(ctrblk, walk.iv, nbytes);
- ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
- k = cpacf_kmctr(ctx->fc, &param, walk.dst.virt.addr,
- walk.src.virt.addr, n, ctrptr);
- if (k) {
- if (ctrptr == ctrblk)
- memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
- AES_BLOCK_SIZE);
- crypto_inc(walk.iv, AES_BLOCK_SIZE);
- rc = skcipher_walk_done(&walk, nbytes - k);
- }
- if (k < n) {
- if (__paes_convert_key(ctx)) {
- if (locked)
- mutex_unlock(&ctrblk_lock);
- return skcipher_walk_done(&walk, -EIO);
- }
- spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
- spin_unlock_bh(&ctx->pk_lock);
- }
- }
- if (locked)
- mutex_unlock(&ctrblk_lock);
- /*
- * final block may be < AES_BLOCK_SIZE, copy only nbytes
- */
- if (nbytes) {
- memset(buf, 0, AES_BLOCK_SIZE);
- memcpy(buf, walk.src.virt.addr, nbytes);
- while (1) {
- if (cpacf_kmctr(ctx->fc, &param, buf,
- buf, AES_BLOCK_SIZE,
- walk.iv) == AES_BLOCK_SIZE)
- break;
- if (__paes_convert_key(ctx))
- return skcipher_walk_done(&walk, -EIO);
- spin_lock_bh(&ctx->pk_lock);
- memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
- spin_unlock_bh(&ctx->pk_lock);
- }
- memcpy(walk.dst.virt.addr, buf, nbytes);
- crypto_inc(walk.iv, AES_BLOCK_SIZE);
- rc = skcipher_walk_done(&walk, nbytes);
+ /* walk has already been prepared */
+
+ rc = xts_paes_do_crypt(ctx, req_ctx, true);
+ if (rc == -EKEYEXPIRED) {
+ /*
+ * Protected key expired, conversion is in process.
+ * Trigger a re-schedule of this request by returning
+ * -ENOSPC ("hardware queue is full") to the crypto engine.
+ * To avoid immediately re-invocation of this callback,
+ * tell the scheduler to voluntarily give up the CPU here.
+ */
+ cond_resched();
+ pr_debug("rescheduling request\n");
+ return -ENOSPC;
+ } else if (rc) {
+ skcipher_walk_done(walk, rc);
}
+ memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
+ pr_debug("request complete with rc=%d\n", rc);
+ local_bh_disable();
+ atomic_dec(&ctx->via_engine_ctr);
+ crypto_finalize_skcipher_request(engine, req, rc);
+ local_bh_enable();
return rc;
}
-static struct skcipher_alg ctr_paes_alg = {
- .base.cra_name = "ctr(paes)",
- .base.cra_driver_name = "ctr-paes-s390",
- .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list),
- .init = ctr_paes_init,
- .exit = ctr_paes_exit,
- .min_keysize = PAES_MIN_KEYSIZE,
- .max_keysize = PAES_MAX_KEYSIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ctr_paes_set_key,
- .encrypt = ctr_paes_crypt,
- .decrypt = ctr_paes_crypt,
- .chunksize = AES_BLOCK_SIZE,
+static struct skcipher_engine_alg xts_paes_alg = {
+ .base = {
+ .base.cra_name = "xts(paes)",
+ .base.cra_driver_name = "xts-paes-s390",
+ .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_pxts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.base.cra_list),
+ .init = xts_paes_init,
+ .exit = xts_paes_exit,
+ .min_keysize = 2 * PAES_MIN_KEYSIZE,
+ .max_keysize = 2 * PAES_MAX_KEYSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_paes_setkey,
+ .encrypt = xts_paes_encrypt,
+ .decrypt = xts_paes_decrypt,
+ },
+ .op = {
+ .do_one_request = xts_paes_do_one_request,
+ },
};
-static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg)
+/*
+ * alg register, unregister, module init, exit
+ */
+
+static struct miscdevice paes_dev = {
+ .name = "paes",
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+static inline void __crypto_unregister_skcipher(struct skcipher_engine_alg *alg)
{
- if (!list_empty(&alg->base.cra_list))
- crypto_unregister_skcipher(alg);
+ if (!list_empty(&alg->base.base.cra_list))
+ crypto_engine_unregister_skcipher(alg);
}
static void paes_s390_fini(void)
{
+ if (paes_crypto_engine) {
+ crypto_engine_stop(paes_crypto_engine);
+ crypto_engine_exit(paes_crypto_engine);
+ }
__crypto_unregister_skcipher(&ctr_paes_alg);
__crypto_unregister_skcipher(&xts_paes_alg);
__crypto_unregister_skcipher(&cbc_paes_alg);
__crypto_unregister_skcipher(&ecb_paes_alg);
if (ctrblk)
- free_page((unsigned long) ctrblk);
+ free_page((unsigned long)ctrblk);
+ misc_deregister(&paes_dev);
}
static int __init paes_s390_init(void)
{
int rc;
+ /* register a simple paes pseudo misc device */
+ rc = misc_register(&paes_dev);
+ if (rc)
+ return rc;
+
+ /* with this pseudo devie alloc and start a crypto engine */
+ paes_crypto_engine =
+ crypto_engine_alloc_init_and_set(paes_dev.this_device,
+ true, NULL, false, MAX_QLEN);
+ if (!paes_crypto_engine) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
+ rc = crypto_engine_start(paes_crypto_engine);
+ if (rc) {
+ crypto_engine_exit(paes_crypto_engine);
+ paes_crypto_engine = NULL;
+ goto out_err;
+ }
+
/* Query available functions for KM, KMC and KMCTR */
cpacf_query(CPACF_KM, &km_functions);
cpacf_query(CPACF_KMC, &kmc_functions);
@@ -927,40 +1653,45 @@ static int __init paes_s390_init(void)
if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
- rc = crypto_register_skcipher(&ecb_paes_alg);
+ rc = crypto_engine_register_skcipher(&ecb_paes_alg);
if (rc)
goto out_err;
+ pr_debug("%s registered\n", ecb_paes_alg.base.base.cra_driver_name);
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
- rc = crypto_register_skcipher(&cbc_paes_alg);
+ rc = crypto_engine_register_skcipher(&cbc_paes_alg);
if (rc)
goto out_err;
+ pr_debug("%s registered\n", cbc_paes_alg.base.base.cra_driver_name);
}
if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
- rc = crypto_register_skcipher(&xts_paes_alg);
+ rc = crypto_engine_register_skcipher(&xts_paes_alg);
if (rc)
goto out_err;
+ pr_debug("%s registered\n", xts_paes_alg.base.base.cra_driver_name);
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
- ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
+ ctrblk = (u8 *)__get_free_page(GFP_KERNEL);
if (!ctrblk) {
rc = -ENOMEM;
goto out_err;
}
- rc = crypto_register_skcipher(&ctr_paes_alg);
+ rc = crypto_engine_register_skcipher(&ctr_paes_alg);
if (rc)
goto out_err;
+ pr_debug("%s registered\n", ctr_paes_alg.base.base.cra_driver_name);
}
return 0;
+
out_err:
paes_s390_fini();
return rc;
diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h
index 2bb22db54c31..d757ccbce2b4 100644
--- a/arch/s390/crypto/sha.h
+++ b/arch/s390/crypto/sha.h
@@ -10,27 +10,33 @@
#ifndef _CRYPTO_ARCH_S390_SHA_H
#define _CRYPTO_ARCH_S390_SHA_H
-#include <linux/crypto.h>
-#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/sha3.h>
+#include <linux/types.h>
/* must be big enough for the largest SHA variant */
-#define SHA3_STATE_SIZE 200
#define CPACF_MAX_PARMBLOCK_SIZE SHA3_STATE_SIZE
#define SHA_MAX_BLOCK_SIZE SHA3_224_BLOCK_SIZE
+#define S390_SHA_CTX_SIZE sizeof(struct s390_sha_ctx)
struct s390_sha_ctx {
u64 count; /* message length in bytes */
- u32 state[CPACF_MAX_PARMBLOCK_SIZE / sizeof(u32)];
- u8 buf[SHA_MAX_BLOCK_SIZE];
+ union {
+ u32 state[CPACF_MAX_PARMBLOCK_SIZE / sizeof(u32)];
+ struct {
+ u64 state[SHA512_DIGEST_SIZE / sizeof(u64)];
+ u64 count_hi;
+ } sha512;
+ };
int func; /* KIMD function to use */
- int first_message_part;
+ bool first_message_part;
};
struct shash_desc;
-int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len);
-int s390_sha_final(struct shash_desc *desc, u8 *out);
+int s390_sha_update_blocks(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+int s390_sha_finup(struct shash_desc *desc, const u8 *src, unsigned int len,
+ u8 *out);
#endif
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index bc3a22704e09..d229cbd2ba22 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -18,12 +18,12 @@
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
*/
+#include <asm/cpacf.h>
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/cpufeature.h>
#include <crypto/sha1.h>
-#include <asm/cpacf.h>
+#include <linux/cpufeature.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include "sha.h"
@@ -49,7 +49,6 @@ static int s390_sha1_export(struct shash_desc *desc, void *out)
octx->count = sctx->count;
memcpy(octx->state, sctx->state, sizeof(octx->state));
- memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer));
return 0;
}
@@ -60,7 +59,6 @@ static int s390_sha1_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->count;
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
- memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer));
sctx->func = CPACF_KIMD_SHA_1;
return 0;
}
@@ -68,16 +66,18 @@ static int s390_sha1_import(struct shash_desc *desc, const void *in)
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = s390_sha1_init,
- .update = s390_sha_update,
- .final = s390_sha_final,
+ .update = s390_sha_update_blocks,
+ .finup = s390_sha_finup,
.export = s390_sha1_export,
.import = s390_sha1_import,
- .descsize = sizeof(struct s390_sha_ctx),
- .statesize = sizeof(struct sha1_state),
+ .descsize = S390_SHA_CTX_SIZE,
+ .statesize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-s390",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
deleted file mode 100644
index 6f1ccdf93d3e..000000000000
--- a/arch/s390/crypto/sha256_s390.c
+++ /dev/null
@@ -1,143 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Cryptographic API.
- *
- * s390 implementation of the SHA256 and SHA224 Secure Hash Algorithm.
- *
- * s390 Version:
- * Copyright IBM Corp. 2005, 2011
- * Author(s): Jan Glauber (jang@de.ibm.com)
- */
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/cpufeature.h>
-#include <crypto/sha2.h>
-#include <asm/cpacf.h>
-
-#include "sha.h"
-
-static int s390_sha256_init(struct shash_desc *desc)
-{
- struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA256_H0;
- sctx->state[1] = SHA256_H1;
- sctx->state[2] = SHA256_H2;
- sctx->state[3] = SHA256_H3;
- sctx->state[4] = SHA256_H4;
- sctx->state[5] = SHA256_H5;
- sctx->state[6] = SHA256_H6;
- sctx->state[7] = SHA256_H7;
- sctx->count = 0;
- sctx->func = CPACF_KIMD_SHA_256;
-
- return 0;
-}
-
-static int sha256_export(struct shash_desc *desc, void *out)
-{
- struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- struct sha256_state *octx = out;
-
- octx->count = sctx->count;
- memcpy(octx->state, sctx->state, sizeof(octx->state));
- memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
- return 0;
-}
-
-static int sha256_import(struct shash_desc *desc, const void *in)
-{
- struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- const struct sha256_state *ictx = in;
-
- sctx->count = ictx->count;
- memcpy(sctx->state, ictx->state, sizeof(ictx->state));
- memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
- sctx->func = CPACF_KIMD_SHA_256;
- return 0;
-}
-
-static struct shash_alg sha256_alg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = s390_sha256_init,
- .update = s390_sha_update,
- .final = s390_sha_final,
- .export = sha256_export,
- .import = sha256_import,
- .descsize = sizeof(struct s390_sha_ctx),
- .statesize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name= "sha256-s390",
- .cra_priority = 300,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static int s390_sha224_init(struct shash_desc *desc)
-{
- struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA224_H0;
- sctx->state[1] = SHA224_H1;
- sctx->state[2] = SHA224_H2;
- sctx->state[3] = SHA224_H3;
- sctx->state[4] = SHA224_H4;
- sctx->state[5] = SHA224_H5;
- sctx->state[6] = SHA224_H6;
- sctx->state[7] = SHA224_H7;
- sctx->count = 0;
- sctx->func = CPACF_KIMD_SHA_256;
-
- return 0;
-}
-
-static struct shash_alg sha224_alg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = s390_sha224_init,
- .update = s390_sha_update,
- .final = s390_sha_final,
- .export = sha256_export,
- .import = sha256_import,
- .descsize = sizeof(struct s390_sha_ctx),
- .statesize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name= "sha224-s390",
- .cra_priority = 300,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static int __init sha256_s390_init(void)
-{
- int ret;
-
- if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_256))
- return -ENODEV;
- ret = crypto_register_shash(&sha256_alg);
- if (ret < 0)
- goto out;
- ret = crypto_register_shash(&sha224_alg);
- if (ret < 0)
- crypto_unregister_shash(&sha256_alg);
-out:
- return ret;
-}
-
-static void __exit sha256_s390_fini(void)
-{
- crypto_unregister_shash(&sha224_alg);
- crypto_unregister_shash(&sha256_alg);
-}
-
-module_cpu_feature_match(S390_CPU_FEATURE_MSA, sha256_s390_init);
-module_exit(sha256_s390_fini);
-
-MODULE_ALIAS_CRYPTO("sha256");
-MODULE_ALIAS_CRYPTO("sha224");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
diff --git a/arch/s390/crypto/sha3_256_s390.c b/arch/s390/crypto/sha3_256_s390.c
index a84ef692f572..4a7731ac6bcd 100644
--- a/arch/s390/crypto/sha3_256_s390.c
+++ b/arch/s390/crypto/sha3_256_s390.c
@@ -8,12 +8,14 @@
* Copyright IBM Corp. 2019
* Author(s): Joerg Schmidbauer (jschmidb@de.ibm.com)
*/
+#include <asm/cpacf.h>
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/cpufeature.h>
#include <crypto/sha3.h>
-#include <asm/cpacf.h>
+#include <linux/cpufeature.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include "sha.h"
@@ -21,11 +23,11 @@ static int sha3_256_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- if (!test_facility(86)) /* msa 12 */
+ sctx->first_message_part = test_facility(86);
+ if (!sctx->first_message_part)
memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_256;
- sctx->first_message_part = 1;
return 0;
}
@@ -35,11 +37,11 @@ static int sha3_256_export(struct shash_desc *desc, void *out)
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
struct sha3_state *octx = out;
- octx->rsiz = sctx->count;
+ if (sctx->first_message_part) {
+ memset(sctx->state, 0, sizeof(sctx->state));
+ sctx->first_message_part = 0;
+ }
memcpy(octx->st, sctx->state, sizeof(octx->st));
- memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
- octx->partial = sctx->first_message_part;
-
return 0;
}
@@ -48,10 +50,9 @@ static int sha3_256_import(struct shash_desc *desc, const void *in)
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha3_state *ictx = in;
- sctx->count = ictx->rsiz;
+ sctx->count = 0;
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
- memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
- sctx->first_message_part = ictx->partial;
+ sctx->first_message_part = 0;
sctx->func = CPACF_KIMD_SHA3_256;
return 0;
@@ -60,30 +61,26 @@ static int sha3_256_import(struct shash_desc *desc, const void *in)
static int sha3_224_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- const struct sha3_state *ictx = in;
- sctx->count = ictx->rsiz;
- memcpy(sctx->state, ictx->st, sizeof(ictx->st));
- memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
- sctx->first_message_part = ictx->partial;
+ sha3_256_import(desc, in);
sctx->func = CPACF_KIMD_SHA3_224;
-
return 0;
}
static struct shash_alg sha3_256_alg = {
.digestsize = SHA3_256_DIGEST_SIZE, /* = 32 */
.init = sha3_256_init,
- .update = s390_sha_update,
- .final = s390_sha_final,
+ .update = s390_sha_update_blocks,
+ .finup = s390_sha_finup,
.export = sha3_256_export,
.import = sha3_256_import,
- .descsize = sizeof(struct s390_sha_ctx),
- .statesize = sizeof(struct sha3_state),
+ .descsize = S390_SHA_CTX_SIZE,
+ .statesize = SHA3_STATE_SIZE,
.base = {
.cra_name = "sha3-256",
.cra_driver_name = "sha3-256-s390",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA3_256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -93,28 +90,25 @@ static int sha3_224_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- if (!test_facility(86)) /* msa 12 */
- memset(sctx->state, 0, sizeof(sctx->state));
- sctx->count = 0;
+ sha3_256_init(desc);
sctx->func = CPACF_KIMD_SHA3_224;
- sctx->first_message_part = 1;
-
return 0;
}
static struct shash_alg sha3_224_alg = {
.digestsize = SHA3_224_DIGEST_SIZE,
.init = sha3_224_init,
- .update = s390_sha_update,
- .final = s390_sha_final,
+ .update = s390_sha_update_blocks,
+ .finup = s390_sha_finup,
.export = sha3_256_export, /* same as for 256 */
.import = sha3_224_import, /* function code different! */
- .descsize = sizeof(struct s390_sha_ctx),
- .statesize = sizeof(struct sha3_state),
+ .descsize = S390_SHA_CTX_SIZE,
+ .statesize = SHA3_STATE_SIZE,
.base = {
.cra_name = "sha3-224",
.cra_driver_name = "sha3-224-s390",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA3_224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/s390/crypto/sha3_512_s390.c b/arch/s390/crypto/sha3_512_s390.c
index 07528fc98ff7..018f02fff444 100644
--- a/arch/s390/crypto/sha3_512_s390.c
+++ b/arch/s390/crypto/sha3_512_s390.c
@@ -7,12 +7,14 @@
* Copyright IBM Corp. 2019
* Author(s): Joerg Schmidbauer (jschmidb@de.ibm.com)
*/
+#include <asm/cpacf.h>
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/cpufeature.h>
#include <crypto/sha3.h>
-#include <asm/cpacf.h>
+#include <linux/cpufeature.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include "sha.h"
@@ -20,11 +22,11 @@ static int sha3_512_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- if (!test_facility(86)) /* msa 12 */
+ sctx->first_message_part = test_facility(86);
+ if (!sctx->first_message_part)
memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_512;
- sctx->first_message_part = 1;
return 0;
}
@@ -34,13 +36,12 @@ static int sha3_512_export(struct shash_desc *desc, void *out)
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
struct sha3_state *octx = out;
- octx->rsiz = sctx->count;
- octx->rsizw = sctx->count >> 32;
+ if (sctx->first_message_part) {
+ memset(sctx->state, 0, sizeof(sctx->state));
+ sctx->first_message_part = 0;
+ }
memcpy(octx->st, sctx->state, sizeof(octx->st));
- memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
- octx->partial = sctx->first_message_part;
-
return 0;
}
@@ -49,13 +50,9 @@ static int sha3_512_import(struct shash_desc *desc, const void *in)
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha3_state *ictx = in;
- if (unlikely(ictx->rsizw))
- return -ERANGE;
- sctx->count = ictx->rsiz;
-
+ sctx->count = 0;
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
- memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
- sctx->first_message_part = ictx->partial;
+ sctx->first_message_part = 0;
sctx->func = CPACF_KIMD_SHA3_512;
return 0;
@@ -64,33 +61,26 @@ static int sha3_512_import(struct shash_desc *desc, const void *in)
static int sha3_384_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- const struct sha3_state *ictx = in;
- if (unlikely(ictx->rsizw))
- return -ERANGE;
- sctx->count = ictx->rsiz;
-
- memcpy(sctx->state, ictx->st, sizeof(ictx->st));
- memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
- sctx->first_message_part = ictx->partial;
+ sha3_512_import(desc, in);
sctx->func = CPACF_KIMD_SHA3_384;
-
return 0;
}
static struct shash_alg sha3_512_alg = {
.digestsize = SHA3_512_DIGEST_SIZE,
.init = sha3_512_init,
- .update = s390_sha_update,
- .final = s390_sha_final,
+ .update = s390_sha_update_blocks,
+ .finup = s390_sha_finup,
.export = sha3_512_export,
.import = sha3_512_import,
- .descsize = sizeof(struct s390_sha_ctx),
- .statesize = sizeof(struct sha3_state),
+ .descsize = S390_SHA_CTX_SIZE,
+ .statesize = SHA3_STATE_SIZE,
.base = {
.cra_name = "sha3-512",
.cra_driver_name = "sha3-512-s390",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA3_512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -102,28 +92,25 @@ static int sha3_384_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- if (!test_facility(86)) /* msa 12 */
- memset(sctx->state, 0, sizeof(sctx->state));
- sctx->count = 0;
+ sha3_512_init(desc);
sctx->func = CPACF_KIMD_SHA3_384;
- sctx->first_message_part = 1;
-
return 0;
}
static struct shash_alg sha3_384_alg = {
.digestsize = SHA3_384_DIGEST_SIZE,
.init = sha3_384_init,
- .update = s390_sha_update,
- .final = s390_sha_final,
+ .update = s390_sha_update_blocks,
+ .finup = s390_sha_finup,
.export = sha3_512_export, /* same as for 512 */
.import = sha3_384_import, /* function code different! */
- .descsize = sizeof(struct s390_sha_ctx),
- .statesize = sizeof(struct sha3_state),
+ .descsize = S390_SHA_CTX_SIZE,
+ .statesize = SHA3_STATE_SIZE,
.base = {
.cra_name = "sha3-384",
.cra_driver_name = "sha3-384-s390",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA3_384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha_ctx),
.cra_module = THIS_MODULE,
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 04f11c407763..33711a29618c 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -7,14 +7,13 @@
* Copyright IBM Corp. 2007
* Author(s): Jan Glauber (jang@de.ibm.com)
*/
+#include <asm/cpacf.h>
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
+#include <linux/cpufeature.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/cpufeature.h>
-#include <asm/cpacf.h>
#include "sha.h"
@@ -22,15 +21,16 @@ static int sha512_init(struct shash_desc *desc)
{
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
- *(__u64 *)&ctx->state[0] = SHA512_H0;
- *(__u64 *)&ctx->state[2] = SHA512_H1;
- *(__u64 *)&ctx->state[4] = SHA512_H2;
- *(__u64 *)&ctx->state[6] = SHA512_H3;
- *(__u64 *)&ctx->state[8] = SHA512_H4;
- *(__u64 *)&ctx->state[10] = SHA512_H5;
- *(__u64 *)&ctx->state[12] = SHA512_H6;
- *(__u64 *)&ctx->state[14] = SHA512_H7;
+ ctx->sha512.state[0] = SHA512_H0;
+ ctx->sha512.state[1] = SHA512_H1;
+ ctx->sha512.state[2] = SHA512_H2;
+ ctx->sha512.state[3] = SHA512_H3;
+ ctx->sha512.state[4] = SHA512_H4;
+ ctx->sha512.state[5] = SHA512_H5;
+ ctx->sha512.state[6] = SHA512_H6;
+ ctx->sha512.state[7] = SHA512_H7;
ctx->count = 0;
+ ctx->sha512.count_hi = 0;
ctx->func = CPACF_KIMD_SHA_512;
return 0;
@@ -42,9 +42,8 @@ static int sha512_export(struct shash_desc *desc, void *out)
struct sha512_state *octx = out;
octx->count[0] = sctx->count;
- octx->count[1] = 0;
+ octx->count[1] = sctx->sha512.count_hi;
memcpy(octx->state, sctx->state, sizeof(octx->state));
- memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
return 0;
}
@@ -53,12 +52,10 @@ static int sha512_import(struct shash_desc *desc, const void *in)
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha512_state *ictx = in;
- if (unlikely(ictx->count[1]))
- return -ERANGE;
sctx->count = ictx->count[0];
+ sctx->sha512.count_hi = ictx->count[1];
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
- memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = CPACF_KIMD_SHA_512;
return 0;
}
@@ -66,16 +63,18 @@ static int sha512_import(struct shash_desc *desc, const void *in)
static struct shash_alg sha512_alg = {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_init,
- .update = s390_sha_update,
- .final = s390_sha_final,
+ .update = s390_sha_update_blocks,
+ .finup = s390_sha_finup,
.export = sha512_export,
.import = sha512_import,
.descsize = sizeof(struct s390_sha_ctx),
- .statesize = sizeof(struct sha512_state),
+ .statesize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha512",
.cra_driver_name= "sha512-s390",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -87,15 +86,16 @@ static int sha384_init(struct shash_desc *desc)
{
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
- *(__u64 *)&ctx->state[0] = SHA384_H0;
- *(__u64 *)&ctx->state[2] = SHA384_H1;
- *(__u64 *)&ctx->state[4] = SHA384_H2;
- *(__u64 *)&ctx->state[6] = SHA384_H3;
- *(__u64 *)&ctx->state[8] = SHA384_H4;
- *(__u64 *)&ctx->state[10] = SHA384_H5;
- *(__u64 *)&ctx->state[12] = SHA384_H6;
- *(__u64 *)&ctx->state[14] = SHA384_H7;
+ ctx->sha512.state[0] = SHA384_H0;
+ ctx->sha512.state[1] = SHA384_H1;
+ ctx->sha512.state[2] = SHA384_H2;
+ ctx->sha512.state[3] = SHA384_H3;
+ ctx->sha512.state[4] = SHA384_H4;
+ ctx->sha512.state[5] = SHA384_H5;
+ ctx->sha512.state[6] = SHA384_H6;
+ ctx->sha512.state[7] = SHA384_H7;
ctx->count = 0;
+ ctx->sha512.count_hi = 0;
ctx->func = CPACF_KIMD_SHA_512;
return 0;
@@ -104,17 +104,19 @@ static int sha384_init(struct shash_desc *desc)
static struct shash_alg sha384_alg = {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_init,
- .update = s390_sha_update,
- .final = s390_sha_final,
+ .update = s390_sha_update_blocks,
+ .finup = s390_sha_finup,
.export = sha512_export,
.import = sha512_import,
.descsize = sizeof(struct s390_sha_ctx),
- .statesize = sizeof(struct sha512_state),
+ .statesize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha384",
.cra_driver_name= "sha384-s390",
.cra_priority = 300,
.cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_ctxsize = sizeof(struct s390_sha_ctx),
.cra_module = THIS_MODULE,
}
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index 961d7d522af1..b5e2c365ea05 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -13,50 +13,33 @@
#include <asm/cpacf.h>
#include "sha.h"
-int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
+int s390_sha_update_blocks(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
- unsigned int index, n;
+ struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int n;
int fc;
- /* how much is already in the buffer? */
- index = ctx->count % bsize;
- ctx->count += len;
-
- if ((index + len) < bsize)
- goto store;
-
fc = ctx->func;
if (ctx->first_message_part)
- fc |= test_facility(86) ? CPACF_KIMD_NIP : 0;
-
- /* process one stored block */
- if (index) {
- memcpy(ctx->buf + index, data, bsize - index);
- cpacf_kimd(fc, ctx->state, ctx->buf, bsize);
- ctx->first_message_part = 0;
- fc &= ~CPACF_KIMD_NIP;
- data += bsize - index;
- len -= bsize - index;
- index = 0;
- }
+ fc |= CPACF_KIMD_NIP;
/* process as many blocks as possible */
- if (len >= bsize) {
- n = (len / bsize) * bsize;
- cpacf_kimd(fc, ctx->state, data, n);
- ctx->first_message_part = 0;
- data += n;
- len -= n;
+ n = (len / bsize) * bsize;
+ ctx->count += n;
+ switch (ctx->func) {
+ case CPACF_KLMD_SHA_512:
+ case CPACF_KLMD_SHA3_384:
+ if (ctx->count < n)
+ ctx->sha512.count_hi++;
+ break;
}
-store:
- if (len)
- memcpy(ctx->buf + index , data, len);
-
- return 0;
+ cpacf_kimd(fc, ctx->state, data, n);
+ ctx->first_message_part = 0;
+ return len - n;
}
-EXPORT_SYMBOL_GPL(s390_sha_update);
+EXPORT_SYMBOL_GPL(s390_sha_update_blocks);
static int s390_crypto_shash_parmsize(int func)
{
@@ -77,15 +60,15 @@ static int s390_crypto_shash_parmsize(int func)
}
}
-int s390_sha_final(struct shash_desc *desc, u8 *out)
+int s390_sha_finup(struct shash_desc *desc, const u8 *src, unsigned int len,
+ u8 *out)
{
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
- unsigned int bsize = crypto_shash_blocksize(desc->tfm);
- u64 bits;
- unsigned int n;
int mbl_offset, fc;
+ u64 bits;
+
+ ctx->count += len;
- n = ctx->count % bsize;
bits = ctx->count * 8;
mbl_offset = s390_crypto_shash_parmsize(ctx->func);
if (mbl_offset < 0)
@@ -95,17 +78,16 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
/* set total msg bit length (mbl) in CPACF parmblock */
switch (ctx->func) {
- case CPACF_KLMD_SHA_1:
- case CPACF_KLMD_SHA_256:
- memcpy(ctx->state + mbl_offset, &bits, sizeof(bits));
- break;
case CPACF_KLMD_SHA_512:
- /*
- * the SHA512 parmblock has a 128-bit mbl field, clear
- * high-order u64 field, copy bits to low-order u64 field
- */
- memset(ctx->state + mbl_offset, 0x00, sizeof(bits));
+ /* The SHA512 parmblock has a 128-bit mbl field. */
+ if (ctx->count < len)
+ ctx->sha512.count_hi++;
+ ctx->sha512.count_hi <<= 3;
+ ctx->sha512.count_hi |= ctx->count >> 61;
mbl_offset += sizeof(u64) / sizeof(u32);
+ fallthrough;
+ case CPACF_KLMD_SHA_1:
+ case CPACF_KLMD_SHA_256:
memcpy(ctx->state + mbl_offset, &bits, sizeof(bits));
break;
case CPACF_KLMD_SHA3_224:
@@ -121,16 +103,14 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
fc |= test_facility(86) ? CPACF_KLMD_DUFOP : 0;
if (ctx->first_message_part)
fc |= CPACF_KLMD_NIP;
- cpacf_klmd(fc, ctx->state, ctx->buf, n);
+ cpacf_klmd(fc, ctx->state, src, len);
/* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
- /* wipe context */
- memset(ctx, 0, sizeof *ctx);
return 0;
}
-EXPORT_SYMBOL_GPL(s390_sha_final);
+EXPORT_SYMBOL_GPL(s390_sha_finup);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("s390 SHA cipher common functions");
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 04ea1c03a5ff..96409573c75d 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -342,7 +342,7 @@ static struct dentry *hypfs_create_file(struct dentry *parent, const char *name,
struct inode *inode;
inode_lock(d_inode(parent));
- dentry = lookup_one_len(name, parent, strlen(name));
+ dentry = lookup_noperm(&QSTR(name), parent);
if (IS_ERR(dentry)) {
dentry = ERR_PTR(-ENOMEM);
goto fail;
diff --git a/arch/s390/include/asm/asce.h b/arch/s390/include/asm/asce.h
new file mode 100644
index 000000000000..f6dfaaba735a
--- /dev/null
+++ b/arch/s390/include/asm/asce.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_S390_ASCE_H
+#define _ASM_S390_ASCE_H
+
+#include <linux/thread_info.h>
+#include <linux/irqflags.h>
+#include <asm/lowcore.h>
+#include <asm/ctlreg.h>
+
+static inline bool enable_sacf_uaccess(void)
+{
+ unsigned long flags;
+
+ if (test_thread_flag(TIF_ASCE_PRIMARY))
+ return true;
+ local_irq_save(flags);
+ local_ctl_load(1, &get_lowcore()->kernel_asce);
+ set_thread_flag(TIF_ASCE_PRIMARY);
+ local_irq_restore(flags);
+ return false;
+}
+
+static inline void disable_sacf_uaccess(bool previous)
+{
+ unsigned long flags;
+
+ if (previous)
+ return;
+ local_irq_save(flags);
+ local_ctl_load(1, &get_lowcore()->user_asce);
+ clear_thread_flag(TIF_ASCE_PRIMARY);
+ local_irq_restore(flags);
+}
+
+#endif /* _ASM_S390_ASCE_H */
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 59ab1192e2d5..54cb97603ec0 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -649,18 +649,30 @@ static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len,
* instruction
* @func: the function code passed to PCC; see CPACF_KM_xxx defines
* @param: address of parameter block; see POP for details on each func
+ *
+ * Returns the condition code, this is
+ * 0 - cc code 0 (normal completion)
+ * 1 - cc code 1 (protected key wkvp mismatch or src operand out of range)
+ * 2 - cc code 2 (something invalid, scalar multiply infinity, ...)
+ * Condition code 3 (partial completion) is handled within the asm code
+ * and never returned.
*/
-static inline void cpacf_pcc(unsigned long func, void *param)
+static inline int cpacf_pcc(unsigned long func, void *param)
{
+ int cc;
+
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
"0: .insn rre,%[opc] << 16,0,0\n" /* PCC opcode */
" brc 1,0b\n" /* handle partial completion */
- :
+ CC_IPM(cc)
+ : CC_OUT(cc, cc)
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
[opc] "i" (CPACF_PCC)
- : "cc", "memory", "0", "1");
+ : CC_CLOBBER_LIST("memory", "0", "1"));
+
+ return CC_TRANSFORM(cc);
}
/**
diff --git a/arch/s390/include/asm/cpufeature.h b/arch/s390/include/asm/cpufeature.h
index e08169bd63a5..6c6a99660e78 100644
--- a/arch/s390/include/asm/cpufeature.h
+++ b/arch/s390/include/asm/cpufeature.h
@@ -15,6 +15,7 @@ enum {
S390_CPU_FEATURE_MSA,
S390_CPU_FEATURE_VXRS,
S390_CPU_FEATURE_UV,
+ S390_CPU_FEATURE_D288,
MAX_CPU_FEATURES
};
diff --git a/arch/s390/include/asm/diag288.h b/arch/s390/include/asm/diag288.h
new file mode 100644
index 000000000000..5e1b43cea9d6
--- /dev/null
+++ b/arch/s390/include/asm/diag288.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_S390_DIAG288_H
+#define _ASM_S390_DIAG288_H
+
+#include <asm/asm-extable.h>
+#include <asm/types.h>
+
+#define MIN_INTERVAL 15 /* Minimal time supported by diag288 */
+#define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */
+
+#define WDT_DEFAULT_TIMEOUT 30
+
+/* Function codes - init, change, cancel */
+#define WDT_FUNC_INIT 0
+#define WDT_FUNC_CHANGE 1
+#define WDT_FUNC_CANCEL 2
+#define WDT_FUNC_CONCEAL 0x80000000
+
+/* Action codes for LPAR watchdog */
+#define LPARWDT_RESTART 0
+
+static inline int __diag288(unsigned int func, unsigned int timeout,
+ unsigned long action, unsigned int len)
+{
+ union register_pair r1 = { .even = func, .odd = timeout, };
+ union register_pair r3 = { .even = action, .odd = len, };
+ int rc = -EINVAL;
+
+ asm volatile(
+ " diag %[r1],%[r3],0x288\n"
+ "0: lhi %[rc],0\n"
+ "1:"
+ EX_TABLE(0b, 1b)
+ : [rc] "+d" (rc)
+ : [r1] "d" (r1.pair), [r3] "d" (r3.pair)
+ : "cc", "memory");
+ return rc;
+}
+
+#endif /* _ASM_S390_DIAG288_H */
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index f5781794356b..942f21c39697 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -13,9 +13,11 @@
static uaccess_kmsan_or_inline int \
__futex_atomic_##name(int oparg, int *old, u32 __user *uaddr) \
{ \
+ bool sacf_flag; \
int rc, new; \
\
instrument_copy_from_user_before(old, uaddr, sizeof(*old)); \
+ sacf_flag = enable_sacf_uaccess(); \
asm_inline volatile( \
" sacf 256\n" \
"0: l %[old],%[uaddr]\n" \
@@ -32,6 +34,7 @@ __futex_atomic_##name(int oparg, int *old, u32 __user *uaddr) \
[new] "=&d" (new), [uaddr] "+Q" (*uaddr) \
: [oparg] "d" (oparg) \
: "cc"); \
+ disable_sacf_uaccess(sacf_flag); \
if (!rc) \
instrument_copy_from_user_after(old, uaddr, sizeof(*old), 0); \
return rc; \
@@ -75,9 +78,11 @@ int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
static uaccess_kmsan_or_inline
int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval)
{
+ bool sacf_flag;
int rc;
instrument_copy_from_user_before(uval, uaddr, sizeof(*uval));
+ sacf_flag = enable_sacf_uaccess();
asm_inline volatile(
" sacf 256\n"
"0: cs %[old],%[new],%[uaddr]\n"
@@ -88,6 +93,7 @@ int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32
: [rc] "=d" (rc), [old] "+d" (oldval), [uaddr] "+Q" (*uaddr)
: [new] "d" (newval)
: "cc", "memory");
+ disable_sacf_uaccess(sacf_flag);
*uval = oldval;
instrument_copy_from_user_after(uval, uaddr, sizeof(*uval), 0);
return rc;
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 9f2814d0e1e9..66c5808fd011 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -110,7 +110,6 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
-void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
void __gmap_zap(struct gmap *, unsigned long gaddr);
void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr);
@@ -134,7 +133,6 @@ int gmap_protect_one(struct gmap *gmap, unsigned long gaddr, int prot, unsigned
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
unsigned long gaddr, unsigned long vmaddr);
-int s390_disable_cow_sharing(void);
int s390_replace_asce(struct gmap *gmap);
void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
diff --git a/arch/s390/include/asm/gmap_helpers.h b/arch/s390/include/asm/gmap_helpers.h
new file mode 100644
index 000000000000..5356446a61c4
--- /dev/null
+++ b/arch/s390/include/asm/gmap_helpers.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Helper functions for KVM guest address space mapping code
+ *
+ * Copyright IBM Corp. 2025
+ */
+
+#ifndef _ASM_S390_GMAP_HELPERS_H
+#define _ASM_S390_GMAP_HELPERS_H
+
+void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_helper_discard(struct mm_struct *mm, unsigned long vmaddr, unsigned long end);
+int gmap_helper_disable_cow_sharing(void);
+
+#endif /* _ASM_S390_GMAP_HELPERS_H */
diff --git a/arch/s390/include/asm/machine.h b/arch/s390/include/asm/machine.h
index 54478caa5237..8abe5afdbfc4 100644
--- a/arch/s390/include/asm/machine.h
+++ b/arch/s390/include/asm/machine.h
@@ -18,6 +18,7 @@
#define MFEATURE_VM 7
#define MFEATURE_KVM 8
#define MFEATURE_LPAR 9
+#define MFEATURE_DIAG288 10
#ifndef __ASSEMBLY__
diff --git a/arch/s390/include/asm/march.h b/arch/s390/include/asm/march.h
index fd9eef3be44c..11a71bd14954 100644
--- a/arch/s390/include/asm/march.h
+++ b/arch/s390/include/asm/march.h
@@ -33,6 +33,10 @@
#define MARCH_HAS_Z16_FEATURES 1
#endif
+#ifdef CONFIG_HAVE_MARCH_Z17_FEATURES
+#define MARCH_HAS_Z17_FEATURES 1
+#endif
+
#endif /* __DECOMPRESSOR */
#endif /* __ASM_S390_MARCH_H */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 88f84beebb9e..d9b8501bc93d 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -13,6 +13,7 @@
#include <linux/mm_types.h>
#include <asm/tlbflush.h>
#include <asm/ctlreg.h>
+#include <asm/asce.h>
#include <asm-generic/mm_hooks.h>
#define init_new_context init_new_context
@@ -77,7 +78,8 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *
else
get_lowcore()->user_asce.val = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
- /* Clear previous user-ASCE from CR7 */
+ /* Clear previous user-ASCE from CR1 and CR7 */
+ local_ctl_load(1, &s390_invalid_asce);
local_ctl_load(7, &s390_invalid_asce);
if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
@@ -99,6 +101,7 @@ static inline void finish_arch_post_lock_switch(void)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
+ unsigned long flags;
if (mm) {
preempt_disable();
@@ -108,15 +111,25 @@ static inline void finish_arch_post_lock_switch(void)
__tlb_flush_mm_lazy(mm);
preempt_enable();
}
+ local_irq_save(flags);
+ if (test_thread_flag(TIF_ASCE_PRIMARY))
+ local_ctl_load(1, &get_lowcore()->kernel_asce);
+ else
+ local_ctl_load(1, &get_lowcore()->user_asce);
local_ctl_load(7, &get_lowcore()->user_asce);
+ local_irq_restore(flags);
}
#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
- switch_mm(prev, next, current);
+ switch_mm_irqs_off(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+ if (test_thread_flag(TIF_ASCE_PRIMARY))
+ local_ctl_load(1, &get_lowcore()->kernel_asce);
+ else
+ local_ctl_load(1, &get_lowcore()->user_asce);
local_ctl_load(7, &get_lowcore()->user_asce);
}
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
index 192835a3e24d..c7c96282f011 100644
--- a/arch/s390/include/asm/nospec-branch.h
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -26,8 +26,6 @@ static inline bool nospec_uses_trampoline(void)
return __is_defined(CC_USING_EXPOLINE) && !nospec_disable;
}
-#ifdef CONFIG_EXPOLINE_EXTERN
-
void __s390_indirect_jump_r1(void);
void __s390_indirect_jump_r2(void);
void __s390_indirect_jump_r3(void);
@@ -44,8 +42,6 @@ void __s390_indirect_jump_r13(void);
void __s390_indirect_jump_r14(void);
void __s390_indirect_jump_r15(void);
-#endif
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_EXPOLINE_H */
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index 42d7cc4262ca..d12e17201661 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -25,6 +25,7 @@ enum zpci_ioat_dtype {
#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
#define ZPCI_TABLE_SIZE_RT (1UL << 42)
+#define ZPCI_TABLE_SIZE_RS (1UL << 53)
#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
@@ -55,6 +56,8 @@ enum zpci_ioat_dtype {
#define ZPCI_PT_BITS 8
#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + PAGE_SHIFT)
#define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS)
+#define ZPCI_RS_SHIFT (ZPCI_RT_SHIFT + ZPCI_TABLE_BITS)
+#define ZPCI_RF_SHIFT (ZPCI_RS_SHIFT + ZPCI_TABLE_BITS)
#define ZPCI_RTE_FLAG_MASK 0x3fffUL
#define ZPCI_RTE_ADDR_MASK (~ZPCI_RTE_FLAG_MASK)
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 005497ffebda..5345398df653 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -97,7 +97,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
if (!table)
return NULL;
crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
- if (!pagetable_pmd_ctor(virt_to_ptdesc(table))) {
+ if (!pagetable_pmd_ctor(mm, virt_to_ptdesc(table))) {
crst_table_free(mm, table);
return NULL;
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index f8a6b54986ec..1c661ac62ce8 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1448,16 +1448,6 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
return pte_mkyoung(__pte);
}
-static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
-{
- unsigned long physpage = page_to_phys(page);
- pte_t __pte = mk_pte_phys(physpage, pgprot);
-
- if (pte_write(__pte) && PageDirty(page))
- __pte = pte_mkdirty(__pte);
- return __pte;
-}
-
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
@@ -1879,7 +1869,6 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
#define pmdp_collapse_flush pmdp_collapse_flush
#define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
-#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
static inline int pmd_trans_huge(pmd_t pmd)
{
diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h
index 5dca1a46a9f6..b7b59faf16f4 100644
--- a/arch/s390/include/asm/pkey.h
+++ b/arch/s390/include/asm/pkey.h
@@ -20,9 +20,22 @@
* @param key pointer to a buffer containing the key blob
* @param keylen size of the key blob in bytes
* @param protkey pointer to buffer receiving the protected key
+ * @param xflags additional execution flags (see PKEY_XFLAG_* definitions below)
+ * As of now the only supported flag is PKEY_XFLAG_NOMEMALLOC.
* @return 0 on success, negative errno value on failure
*/
int pkey_key2protkey(const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags);
+
+/*
+ * If this flag is given in the xflags parameter, the pkey implementation
+ * is not allowed to allocate memory but instead should fall back to use
+ * preallocated memory or simple fail with -ENOMEM.
+ * This flag is for protected key derive within a cipher or similar
+ * which must not allocate memory which would cause io operations - see
+ * also the CRYPTO_ALG_ALLOCATES_MEMORY flag in crypto.h.
+ */
+#define PKEY_XFLAG_NOMEMALLOC 0x0001
#endif /* _KAPI_PKEY_H */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index c66f3fc6daaf..62c0ab4a4b9d 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -9,6 +9,7 @@
#include <linux/bits.h>
#include <uapi/asm/ptrace.h>
+#include <asm/thread_info.h>
#include <asm/tpi.h>
#define PIF_SYSCALL 0 /* inside a system call */
@@ -126,7 +127,6 @@ struct pt_regs {
struct tpi_info tpi_info;
};
unsigned long flags;
- unsigned long cr1;
unsigned long last_break;
};
@@ -229,8 +229,44 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
int regs_query_register_offset(const char *name);
const char *regs_query_register_name(unsigned int offset);
-unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
-unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
+
+static __always_inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->gprs[15];
+}
+
+static __always_inline unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
+{
+ if (offset >= NUM_GPRS)
+ return 0;
+ return regs->gprs[offset];
+}
+
+static __always_inline int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+ unsigned long ksp = kernel_stack_pointer(regs);
+
+ return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs:pt_regs which contains kernel stack pointer.
+ * @n:stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static __always_inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long addr;
+
+ addr = kernel_stack_pointer(regs) + n * sizeof(long);
+ if (!regs_within_kernel_stack(regs, addr))
+ return 0;
+ return READ_ONCE_NOCHECK(addr);
+}
/**
* regs_get_kernel_argument() - get Nth function argument in kernel
@@ -251,11 +287,6 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
return regs_get_kernel_stack_nth(regs, argoffset + n);
}
-static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
-{
- return regs->gprs[15];
-}
-
static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->gprs[2] = rc;
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 2ab868cbae6c..f8f68f4ef255 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -26,11 +26,9 @@ void *memmove(void *dest, const void *src, size_t n);
#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
#define __HAVE_ARCH_STRCAT /* inline & arch function */
#define __HAVE_ARCH_STRCMP /* arch function */
-#define __HAVE_ARCH_STRCPY /* inline & arch function */
#define __HAVE_ARCH_STRLCAT /* arch function */
#define __HAVE_ARCH_STRLEN /* inline & arch function */
#define __HAVE_ARCH_STRNCAT /* arch function */
-#define __HAVE_ARCH_STRNCPY /* arch function */
#define __HAVE_ARCH_STRNLEN /* inline & arch function */
#define __HAVE_ARCH_STRSTR /* arch function */
#define __HAVE_ARCH_MEMSET16 /* arch function */
@@ -42,7 +40,6 @@ int memcmp(const void *s1, const void *s2, size_t n);
int strcmp(const char *s1, const char *s2);
size_t strlcat(char *dest, const char *src, size_t n);
char *strncat(char *dest, const char *src, size_t n);
-char *strncpy(char *dest, const char *src, size_t n);
char *strstr(const char *s1, const char *s2);
#endif /* !defined(CONFIG_KASAN) && !defined(CONFIG_KMSAN) */
@@ -155,22 +152,6 @@ static inline char *strcat(char *dst, const char *src)
}
#endif
-#ifdef __HAVE_ARCH_STRCPY
-static inline char *strcpy(char *dst, const char *src)
-{
- char *ret = dst;
-
- asm volatile(
- " lghi 0,0\n"
- "0: mvst %[dst],%[src]\n"
- " jo 0b"
- : [dst] "+&a" (dst), [src] "+&a" (src)
- :
- : "cc", "memory", "0");
- return ret;
-}
-#endif
-
#if defined(__HAVE_ARCH_STRLEN) || (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__))
static inline size_t __no_sanitize_prefix_strfunc(strlen)(const char *s)
{
@@ -208,7 +189,6 @@ static inline size_t strnlen(const char * s, size_t n)
void *memchr(const void * s, int c, size_t n);
void *memscan(void *s, int c, size_t n);
char *strcat(char *dst, const char *src);
-char *strcpy(char *dst, const char *src);
size_t strlen(const char *s);
size_t strnlen(const char * s, size_t n);
#endif /* !IN_ARCH_STRING_C */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 0213ec800b57..bd4cb00ccd5e 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -24,6 +24,18 @@ static inline long syscall_get_nr(struct task_struct *task,
(regs->int_code & 0xffff) : -1;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ /*
+ * Unlike syscall_get_nr(), syscall_set_nr() can be called only when
+ * the target task is stopped for tracing on entering syscall, so
+ * there is no need to have the same check syscall_get_nr() has.
+ */
+ regs->int_code = (regs->int_code & ~0xffff) | (nr & 0xffff);
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -76,6 +88,15 @@ static inline void syscall_get_arguments(struct task_struct *task,
args[0] = regs->orig_gpr2 & mask;
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ regs->orig_gpr2 = args[0];
+ for (int n = 1; n < 6; n++)
+ regs->gprs[2 + n] = args[n];
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
#ifdef CONFIG_COMPAT
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 91f569cae1ce..391eb04d26d8 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -9,6 +9,7 @@
#define _ASM_THREAD_INFO_H
#include <linux/bits.h>
+#include <vdso/page.h>
/*
* General size of kernel stacks
@@ -24,8 +25,6 @@
#define STACK_INIT_OFFSET (THREAD_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
#ifndef __ASSEMBLY__
-#include <asm/lowcore.h>
-#include <asm/page.h>
/*
* low level task data that entry.S needs immediate access to
@@ -64,6 +63,7 @@ void arch_setup_new_exec(void);
#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling needed */
#define TIF_UPROBE 4 /* breakpointed or single-stepping */
#define TIF_PATCH_PENDING 5 /* pending live patching update */
+#define TIF_ASCE_PRIMARY 6 /* primary asce is kernel asce */
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
#define TIF_GUARDED_STORAGE 8 /* load guarded storage control block */
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
@@ -85,6 +85,7 @@ void arch_setup_new_exec(void);
#define _TIF_NEED_RESCHED_LAZY BIT(TIF_NEED_RESCHED_LAZY)
#define _TIF_UPROBE BIT(TIF_UPROBE)
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
+#define _TIF_ASCE_PRIMARY BIT(TIF_ASCE_PRIMARY)
#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL)
#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index f20601995bb0..1e50f6f1ad9d 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -36,11 +36,12 @@ static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb,
#include <asm/tlbflush.h>
#include <asm-generic/tlb.h>
+#include <asm/gmap.h>
/*
* Release the page cache reference for a pte removed by
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
- * has already been freed, so just do free_page_and_swap_cache.
+ * has already been freed, so just do free_folio_and_swap_cache.
*
* s390 doesn't delay rmap removal.
*/
@@ -49,7 +50,7 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
{
VM_WARN_ON_ONCE(delay_rmap);
- free_page_and_swap_cache(page);
+ free_folio_and_swap_cache(page_folio(page));
return false;
}
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 8629d70ec38b..a43fc88c0050 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -19,6 +19,7 @@
#include <asm/extable.h>
#include <asm/facility.h>
#include <asm-generic/access_ok.h>
+#include <asm/asce.h>
#include <linux/instrumented.h>
void debug_user_asce(int exit);
@@ -478,6 +479,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
__uint128_t old, __uint128_t new,
unsigned long key, int size)
{
+ bool sacf_flag;
int rc = 0;
switch (size) {
@@ -490,6 +492,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
_old = ((unsigned int)old & 0xff) << shift;
_new = ((unsigned int)new & 0xff) << shift;
mask = ~(0xff << shift);
+ sacf_flag = enable_sacf_uaccess();
asm_inline volatile(
" spka 0(%[key])\n"
" sacf 256\n"
@@ -524,6 +527,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
[default_key] "J" (PAGE_DEFAULT_KEY),
[max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
: "memory", "cc");
+ disable_sacf_uaccess(sacf_flag);
*(unsigned char *)uval = prev >> shift;
if (!count)
rc = -EAGAIN;
@@ -538,6 +542,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
_old = ((unsigned int)old & 0xffff) << shift;
_new = ((unsigned int)new & 0xffff) << shift;
mask = ~(0xffff << shift);
+ sacf_flag = enable_sacf_uaccess();
asm_inline volatile(
" spka 0(%[key])\n"
" sacf 256\n"
@@ -572,6 +577,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
[default_key] "J" (PAGE_DEFAULT_KEY),
[max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
: "memory", "cc");
+ disable_sacf_uaccess(sacf_flag);
*(unsigned short *)uval = prev >> shift;
if (!count)
rc = -EAGAIN;
@@ -580,6 +586,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
case 4: {
unsigned int prev = old;
+ sacf_flag = enable_sacf_uaccess();
asm_inline volatile(
" spka 0(%[key])\n"
" sacf 256\n"
@@ -595,12 +602,14 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
[key] "a" (key << 4),
[default_key] "J" (PAGE_DEFAULT_KEY)
: "memory", "cc");
+ disable_sacf_uaccess(sacf_flag);
*(unsigned int *)uval = prev;
return rc;
}
case 8: {
unsigned long prev = old;
+ sacf_flag = enable_sacf_uaccess();
asm_inline volatile(
" spka 0(%[key])\n"
" sacf 256\n"
@@ -616,12 +625,14 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
[key] "a" (key << 4),
[default_key] "J" (PAGE_DEFAULT_KEY)
: "memory", "cc");
+ disable_sacf_uaccess(sacf_flag);
*(unsigned long *)uval = prev;
return rc;
}
case 16: {
__uint128_t prev = old;
+ sacf_flag = enable_sacf_uaccess();
asm_inline volatile(
" spka 0(%[key])\n"
" sacf 256\n"
@@ -637,6 +648,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
[key] "a" (key << 4),
[default_key] "J" (PAGE_DEFAULT_KEY)
: "memory", "cc");
+ disable_sacf_uaccess(sacf_flag);
*(__uint128_t *)uval = prev;
return rc;
}
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index 46fb0ef6f984..8018549a1ad2 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -16,7 +16,6 @@
#include <linux/bug.h>
#include <linux/sched.h>
#include <asm/page.h>
-#include <asm/gmap.h>
#include <asm/asm.h>
#define UVC_CC_OK 0
@@ -616,8 +615,9 @@ static inline int uv_remove_shared(unsigned long addr)
return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
}
-int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
- struct uv_secret_list_item_hdr *secret);
+int uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
+ struct uv_secret_list *list,
+ struct uv_secret_list_item_hdr *secret);
int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size);
extern int prot_virt_host;
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 841e05f7fa7e..95ecad9c7d7d 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -50,7 +50,6 @@ int main(void)
OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2);
OFFSET(__PT_INT_CODE, pt_regs, int_code);
OFFSET(__PT_FLAGS, pt_regs, flags);
- OFFSET(__PT_CR1, pt_regs, cr1);
OFFSET(__PT_LAST_BREAK, pt_regs, last_break);
DEFINE(__PT_SIZE, sizeof(struct pt_regs));
BLANK();
diff --git a/arch/s390/kernel/cert_store.c b/arch/s390/kernel/cert_store.c
index 03f3a1e52430..c217a5e64094 100644
--- a/arch/s390/kernel/cert_store.c
+++ b/arch/s390/kernel/cert_store.c
@@ -138,7 +138,7 @@ static void cert_store_key_describe(const struct key *key, struct seq_file *m)
* First 64 bytes of the key description is key name in EBCDIC CP 500.
* Convert it to ASCII for displaying in /proc/keys.
*/
- strscpy(ascii, key->description, sizeof(ascii));
+ strscpy(ascii, key->description);
EBCASC_500(ascii, VC_NAME_LEN_BYTES);
seq_puts(m, ascii);
diff --git a/arch/s390/kernel/cpufeature.c b/arch/s390/kernel/cpufeature.c
index 1b2ae42a0c15..76210f001028 100644
--- a/arch/s390/kernel/cpufeature.c
+++ b/arch/s390/kernel/cpufeature.c
@@ -5,11 +5,13 @@
#include <linux/cpufeature.h>
#include <linux/bug.h>
+#include <asm/machine.h>
#include <asm/elf.h>
enum {
TYPE_HWCAP,
TYPE_FACILITY,
+ TYPE_MACHINE,
};
struct s390_cpu_feature {
@@ -21,6 +23,7 @@ static struct s390_cpu_feature s390_cpu_features[MAX_CPU_FEATURES] = {
[S390_CPU_FEATURE_MSA] = {.type = TYPE_HWCAP, .num = HWCAP_NR_MSA},
[S390_CPU_FEATURE_VXRS] = {.type = TYPE_HWCAP, .num = HWCAP_NR_VXRS},
[S390_CPU_FEATURE_UV] = {.type = TYPE_FACILITY, .num = 158},
+ [S390_CPU_FEATURE_D288] = {.type = TYPE_MACHINE, .num = MFEATURE_DIAG288},
};
/*
@@ -38,6 +41,8 @@ int cpu_have_feature(unsigned int num)
return !!(elf_hwcap & BIT(feature->num));
case TYPE_FACILITY:
return test_facility(feature->num);
+ case TYPE_MACHINE:
+ return test_machine_feature(feature->num);
default:
WARN_ON_ONCE(1);
return 0;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 4a981266b483..adb164223f8c 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -354,7 +354,7 @@ static void *nt_prpsinfo(void *ptr)
memset(&prpsinfo, 0, sizeof(prpsinfo));
prpsinfo.pr_sname = 'R';
- strcpy(prpsinfo.pr_fname, "vmlinux");
+ strscpy(prpsinfo.pr_fname, "vmlinux");
return nt_init(ptr, PRPSINFO, prpsinfo);
}
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index ce038e9205f7..2a41be2f7925 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -251,7 +251,7 @@ static debug_info_t *debug_info_alloc(const char *name, int pages_per_area,
rc->level = level;
rc->buf_size = buf_size;
rc->entry_size = sizeof(debug_entry_t) + buf_size;
- strscpy(rc->name, name, sizeof(rc->name));
+ strscpy(rc->name, name);
memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
memset(rc->debugfs_entries, 0, DEBUG_MAX_VIEWS * sizeof(struct dentry *));
refcount_set(&(rc->ref_count), 0);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index dd291c9ad6a6..0f00f4b06d51 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -116,7 +116,7 @@ _LPP_OFFSET = __LC_LPP
.macro SIEEXIT sie_control,lowcore
lg %r9,\sie_control # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_KERNEL_ASCE(\lowcore) # load primary asce
+ lctlg %c1,%c1,__LC_USER_ASCE(\lowcore) # load primary asce
lg %r9,__LC_CURRENT(\lowcore)
mvi __TI_sie(%r9),0
larl %r9,sie_exit # skip forward to sie_exit
@@ -208,7 +208,7 @@ SYM_FUNC_START(__sie64a)
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
GET_LC %r14
- lctlg %c1,%c1,__LC_KERNEL_ASCE(%r14) # load primary asce
+ lctlg %c1,%c1,__LC_USER_ASCE(%r14) # load primary asce
lg %r14,__LC_CURRENT(%r14)
mvi __TI_sie(%r14),0
SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
@@ -240,7 +240,6 @@ SYM_CODE_START(system_call)
lghi %r14,0
.Lsysc_per:
STBEAR __LC_LAST_BREAK(%r13)
- lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13)
lg %r15,__LC_KERNEL_STACK(%r13)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
@@ -261,7 +260,6 @@ SYM_CODE_START(system_call)
lgr %r3,%r14
brasl %r14,__do_syscall
STACKLEAK_ERASE
- lctlg %c1,%c1,__LC_USER_ASCE(%r13)
mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
BPON
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
@@ -278,7 +276,6 @@ SYM_CODE_START(ret_from_fork)
brasl %r14,__ret_from_fork
STACKLEAK_ERASE
GET_LC %r13
- lctlg %c1,%c1,__LC_USER_ASCE(%r13)
mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
BPON
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
@@ -299,10 +296,7 @@ SYM_CODE_START(pgm_check_handler)
lmg %r8,%r9,__LC_PGM_OLD_PSW(%r13)
xgr %r10,%r10
tmhh %r8,0x0001 # coming from user space?
- jno .Lpgm_skip_asce
- lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13)
- j 3f # -> fault in user space
-.Lpgm_skip_asce:
+ jo 3f # -> fault in user space
#if IS_ENABLED(CONFIG_KVM)
lg %r11,__LC_CURRENT(%r13)
tm __TI_sie(%r11),0xff
@@ -340,7 +334,6 @@ SYM_CODE_START(pgm_check_handler)
tmhh %r8,0x0001 # returning to user space?
jno .Lpgm_exit_kernel
STACKLEAK_ERASE
- lctlg %c1,%c1,__LC_USER_ASCE(%r13)
BPON
stpt __LC_EXIT_TIMER(%r13)
.Lpgm_exit_kernel:
@@ -384,8 +377,7 @@ SYM_CODE_START(\name)
#endif
0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
-1: lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13)
- lg %r15,__LC_KERNEL_STACK(%r13)
+1: lg %r15,__LC_KERNEL_STACK(%r13)
2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
@@ -408,7 +400,6 @@ SYM_CODE_START(\name)
tmhh %r8,0x0001 # returning to user ?
jno 2f
STACKLEAK_ERASE
- lctlg %c1,%c1,__LC_USER_ASCE(%r13)
BPON
stpt __LC_EXIT_TIMER(%r13)
2: LBEAR __PT_LAST_BREAK(%r11)
@@ -476,8 +467,6 @@ SYM_CODE_START(mcck_int_handler)
.Lmcck_user:
lg %r15,__LC_MCCK_STACK(%r13)
la %r11,STACK_FRAME_OVERHEAD(%r15)
- stctg %c1,%c1,__PT_CR1(%r11)
- lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lay %r14,__LC_GPREGS_SAVE_AREA(%r13)
mvc __PT_R0(128,%r11),0(%r14)
@@ -495,7 +484,6 @@ SYM_CODE_START(mcck_int_handler)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,s390_do_machine_check
- lctlg %c1,%c1,__PT_CR1(%r11)
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ?
@@ -602,7 +590,8 @@ SYM_CODE_START(stack_invalid)
stmg %r0,%r7,__PT_R0(%r11)
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_R8(64,%r11),0(%r14)
- stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
+ GET_LC %r2
+ mvc __PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK(%r2)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_invalid
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 3b9d9ccfad63..ff15f91affde 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -270,7 +270,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
{ \
if (len >= sizeof(_value)) \
return -E2BIG; \
- len = strscpy(_value, buf, sizeof(_value)); \
+ len = strscpy(_value, buf); \
if ((ssize_t)len < 0) \
return len; \
strim(_value); \
@@ -2249,26 +2249,28 @@ static int __init s390_ipl_init(void)
__initcall(s390_ipl_init);
-static void __init strncpy_skip_quote(char *dst, char *src, int n)
+static void __init strscpy_skip_quote(char *dst, char *src, int n)
{
int sx, dx;
- dx = 0;
- for (sx = 0; src[sx] != 0; sx++) {
+ if (!n)
+ return;
+ for (sx = 0, dx = 0; src[sx]; sx++) {
if (src[sx] == '"')
continue;
- dst[dx++] = src[sx];
- if (dx >= n)
+ dst[dx] = src[sx];
+ if (dx + 1 == n)
break;
+ dx++;
}
+ dst[dx] = '\0';
}
static int __init vmcmd_on_reboot_setup(char *str)
{
if (!machine_is_vm())
return 1;
- strncpy_skip_quote(vmcmd_on_reboot, str, VMCMD_MAX_SIZE);
- vmcmd_on_reboot[VMCMD_MAX_SIZE] = 0;
+ strscpy_skip_quote(vmcmd_on_reboot, str, sizeof(vmcmd_on_reboot));
on_reboot_trigger.action = &vmcmd_action;
return 1;
}
@@ -2278,8 +2280,7 @@ static int __init vmcmd_on_panic_setup(char *str)
{
if (!machine_is_vm())
return 1;
- strncpy_skip_quote(vmcmd_on_panic, str, VMCMD_MAX_SIZE);
- vmcmd_on_panic[VMCMD_MAX_SIZE] = 0;
+ strscpy_skip_quote(vmcmd_on_panic, str, sizeof(vmcmd_on_panic));
on_panic_trigger.action = &vmcmd_action;
return 1;
}
@@ -2289,8 +2290,7 @@ static int __init vmcmd_on_halt_setup(char *str)
{
if (!machine_is_vm())
return 1;
- strncpy_skip_quote(vmcmd_on_halt, str, VMCMD_MAX_SIZE);
- vmcmd_on_halt[VMCMD_MAX_SIZE] = 0;
+ strscpy_skip_quote(vmcmd_on_halt, str, sizeof(vmcmd_on_halt));
on_halt_trigger.action = &vmcmd_action;
return 1;
}
@@ -2300,8 +2300,7 @@ static int __init vmcmd_on_poff_setup(char *str)
{
if (!machine_is_vm())
return 1;
- strncpy_skip_quote(vmcmd_on_poff, str, VMCMD_MAX_SIZE);
- vmcmd_on_poff[VMCMD_MAX_SIZE] = 0;
+ strscpy_skip_quote(vmcmd_on_poff, str, sizeof(vmcmd_on_poff));
on_poff_trigger.action = &vmcmd_action;
return 1;
}
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 33205dd410e4..6a262e198e35 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -442,7 +442,7 @@ static void cpum_cf_make_setsize(enum cpumf_ctr_set ctrset)
ctrset_size = 48;
else if (cpumf_ctr_info.csvn >= 3 && cpumf_ctr_info.csvn <= 5)
ctrset_size = 128;
- else if (cpumf_ctr_info.csvn == 6 || cpumf_ctr_info.csvn == 7)
+ else if (cpumf_ctr_info.csvn >= 6 && cpumf_ctr_info.csvn <= 8)
ctrset_size = 160;
break;
case CPUMF_CTR_SET_MT_DIAG:
@@ -858,18 +858,13 @@ static int cpumf_pmu_event_type(struct perf_event *event)
static int cpumf_pmu_event_init(struct perf_event *event)
{
unsigned int type = event->attr.type;
- int err;
+ int err = -ENOENT;
if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
err = __hw_perf_event_init(event, type);
else if (event->pmu->type == type)
/* Registered as unknown PMU */
err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
- else
- return -ENOENT;
-
- if (unlikely(err) && event->destroy)
- event->destroy(event);
return err;
}
@@ -985,8 +980,6 @@ static int cfdiag_push_sample(struct perf_event *event,
}
overflow = perf_event_overflow(event, &data, &regs);
- if (overflow)
- event->pmu->stop(event, 0);
perf_event_update_userpage(event);
return overflow;
@@ -1819,8 +1812,6 @@ static int cfdiag_event_init(struct perf_event *event)
event->destroy = hw_perf_event_destroy;
err = cfdiag_event_init2(event);
- if (unlikely(err))
- event->destroy(event);
out:
return err;
}
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index e4a6bfc91080..7ace1f9e4ccf 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -237,7 +237,6 @@ CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_NO_SPECIAL, 0x00f4);
CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5);
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
-
CPUMF_EVENT_ATTR(cf_z15, L1D_RO_EXCL_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z15, DTLB2_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z15, DTLB2_MISSES, 0x0082);
@@ -291,8 +290,8 @@ CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_NO_SPECIAL, 0x00f4);
CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5);
CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc);
-CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108);
-CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109);
+CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x0108);
+CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x0109);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
CPUMF_EVENT_ATTR(cf_z16, L1D_RO_EXCL_WRITES, 0x0080);
@@ -365,6 +364,83 @@ CPUMF_EVENT_ATTR(cf_z16, NNPA_WAIT_LOCK, 0x010d);
CPUMF_EVENT_ATTR(cf_z16, NNPA_HOLD_LOCK, 0x010e);
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
+CPUMF_EVENT_ATTR(cf_z17, L1D_RO_EXCL_WRITES, 0x0080);
+CPUMF_EVENT_ATTR(cf_z17, DTLB2_WRITES, 0x0081);
+CPUMF_EVENT_ATTR(cf_z17, DTLB2_MISSES, 0x0082);
+CPUMF_EVENT_ATTR(cf_z17, CRSTE_1MB_WRITES, 0x0083);
+CPUMF_EVENT_ATTR(cf_z17, DTLB2_GPAGE_WRITES, 0x0084);
+CPUMF_EVENT_ATTR(cf_z17, ITLB2_WRITES, 0x0086);
+CPUMF_EVENT_ATTR(cf_z17, ITLB2_MISSES, 0x0087);
+CPUMF_EVENT_ATTR(cf_z17, TLB2_PTE_WRITES, 0x0089);
+CPUMF_EVENT_ATTR(cf_z17, TLB2_CRSTE_WRITES, 0x008a);
+CPUMF_EVENT_ATTR(cf_z17, TLB2_ENGINES_BUSY, 0x008b);
+CPUMF_EVENT_ATTR(cf_z17, TX_C_TEND, 0x008c);
+CPUMF_EVENT_ATTR(cf_z17, TX_NC_TEND, 0x008d);
+CPUMF_EVENT_ATTR(cf_z17, L1C_TLB2_MISSES, 0x008f);
+CPUMF_EVENT_ATTR(cf_z17, DCW_REQ, 0x0091);
+CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_IV, 0x0092);
+CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_CHIP_HIT, 0x0093);
+CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_DRAWER_HIT, 0x0094);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP, 0x0095);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_IV, 0x0096);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_CHIP_HIT, 0x0097);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_DRAWER_HIT, 0x0098);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_MODULE, 0x0099);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_DRAWER, 0x009a);
+CPUMF_EVENT_ATTR(cf_z17, DCW_OFF_DRAWER, 0x009b);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_MEMORY, 0x009c);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_MODULE_MEMORY, 0x009d);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_DRAWER_MEMORY, 0x009e);
+CPUMF_EVENT_ATTR(cf_z17, DCW_OFF_DRAWER_MEMORY, 0x009f);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_IV, 0x00a0);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_CHIP_HIT, 0x00a1);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_DRAWER_HIT, 0x00a2);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_IV, 0x00a3);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_CHIP_HIT, 0x00a4);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_DRAWER_HIT, 0x00a5);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_IV, 0x00a6);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_CHIP_HIT, 0x00a7);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_DRAWER_HIT, 0x00a8);
+CPUMF_EVENT_ATTR(cf_z17, ICW_REQ, 0x00a9);
+CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_IV, 0x00aa);
+CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_CHIP_HIT, 0x00ab);
+CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_DRAWER_HIT, 0x00ac);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP, 0x00ad);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_IV, 0x00ae);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_CHIP_HIT, 0x00af);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_DRAWER_HIT, 0x00b0);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_MODULE, 0x00b1);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_DRAWER, 0x00b2);
+CPUMF_EVENT_ATTR(cf_z17, ICW_OFF_DRAWER, 0x00b3);
+CPUMF_EVENT_ATTR(cf_z17, CYCLES_SAMETHRD, 0x00ca);
+CPUMF_EVENT_ATTR(cf_z17, CYCLES_DIFFTHRD, 0x00cb);
+CPUMF_EVENT_ATTR(cf_z17, INST_SAMETHRD, 0x00cc);
+CPUMF_EVENT_ATTR(cf_z17, INST_DIFFTHRD, 0x00cd);
+CPUMF_EVENT_ATTR(cf_z17, WRONG_BRANCH_PREDICTION, 0x00ce);
+CPUMF_EVENT_ATTR(cf_z17, VX_BCD_EXECUTION_SLOTS, 0x00e1);
+CPUMF_EVENT_ATTR(cf_z17, DECIMAL_INSTRUCTIONS, 0x00e2);
+CPUMF_EVENT_ATTR(cf_z17, LAST_HOST_TRANSLATIONS, 0x00e8);
+CPUMF_EVENT_ATTR(cf_z17, TX_NC_TABORT, 0x00f4);
+CPUMF_EVENT_ATTR(cf_z17, TX_C_TABORT_NO_SPECIAL, 0x00f5);
+CPUMF_EVENT_ATTR(cf_z17, TX_C_TABORT_SPECIAL, 0x00f6);
+CPUMF_EVENT_ATTR(cf_z17, DFLT_ACCESS, 0x00f8);
+CPUMF_EVENT_ATTR(cf_z17, DFLT_CYCLES, 0x00fd);
+CPUMF_EVENT_ATTR(cf_z17, SORTL, 0x0100);
+CPUMF_EVENT_ATTR(cf_z17, DFLT_CC, 0x0109);
+CPUMF_EVENT_ATTR(cf_z17, DFLT_CCFINISH, 0x010a);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_INVOCATIONS, 0x010b);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_COMPLETIONS, 0x010c);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_WAIT_LOCK, 0x010d);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_HOLD_LOCK, 0x010e);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_ONCHIP, 0x0110);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_OFFCHIP, 0x0111);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_DIFF, 0x0112);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_4K_PREFETCH, 0x0114);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_COMPL_LOCK, 0x0115);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_RETRY_LOCK, 0x0116);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_RETRY_LOCK_WITH_PLO, 0x0117);
+CPUMF_EVENT_ATTR(cf_z17, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
+CPUMF_EVENT_ATTR(cf_z17, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES),
@@ -414,7 +490,7 @@ static struct attribute *cpumcf_svn_12345_pmu_event_attr[] __initdata = {
NULL,
};
-static struct attribute *cpumcf_svn_67_pmu_event_attr[] __initdata = {
+static struct attribute *cpumcf_svn_678_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
@@ -779,6 +855,87 @@ static struct attribute *cpumcf_z16_pmu_event_attr[] __initdata = {
NULL,
};
+static struct attribute *cpumcf_z17_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_z17, L1D_RO_EXCL_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, DTLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, DTLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z17, CRSTE_1MB_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, DTLB2_GPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, ITLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, ITLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z17, TLB2_PTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, TLB2_CRSTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, TLB2_ENGINES_BUSY),
+ CPUMF_EVENT_PTR(cf_z17, TX_C_TEND),
+ CPUMF_EVENT_PTR(cf_z17, TX_NC_TEND),
+ CPUMF_EVENT_PTR(cf_z17, L1C_TLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z17, DCW_REQ),
+ CPUMF_EVENT_PTR(cf_z17, DCW_REQ_IV),
+ CPUMF_EVENT_PTR(cf_z17, DCW_REQ_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, DCW_REQ_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_IV),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_MODULE),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_DRAWER),
+ CPUMF_EVENT_PTR(cf_z17, DCW_OFF_DRAWER),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_MEMORY),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_MODULE_MEMORY),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_DRAWER_MEMORY),
+ CPUMF_EVENT_PTR(cf_z17, DCW_OFF_DRAWER_MEMORY),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_IV),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_IV),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_IV),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_REQ),
+ CPUMF_EVENT_PTR(cf_z17, ICW_REQ_IV),
+ CPUMF_EVENT_PTR(cf_z17, ICW_REQ_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_REQ_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_IV),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_MODULE),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_DRAWER),
+ CPUMF_EVENT_PTR(cf_z17, ICW_OFF_DRAWER),
+ CPUMF_EVENT_PTR(cf_z17, CYCLES_SAMETHRD),
+ CPUMF_EVENT_PTR(cf_z17, CYCLES_DIFFTHRD),
+ CPUMF_EVENT_PTR(cf_z17, INST_SAMETHRD),
+ CPUMF_EVENT_PTR(cf_z17, INST_DIFFTHRD),
+ CPUMF_EVENT_PTR(cf_z17, WRONG_BRANCH_PREDICTION),
+ CPUMF_EVENT_PTR(cf_z17, VX_BCD_EXECUTION_SLOTS),
+ CPUMF_EVENT_PTR(cf_z17, DECIMAL_INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_z17, LAST_HOST_TRANSLATIONS),
+ CPUMF_EVENT_PTR(cf_z17, TX_NC_TABORT),
+ CPUMF_EVENT_PTR(cf_z17, TX_C_TABORT_NO_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z17, TX_C_TABORT_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z17, DFLT_ACCESS),
+ CPUMF_EVENT_PTR(cf_z17, DFLT_CYCLES),
+ CPUMF_EVENT_PTR(cf_z17, SORTL),
+ CPUMF_EVENT_PTR(cf_z17, DFLT_CC),
+ CPUMF_EVENT_PTR(cf_z17, DFLT_CCFINISH),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_INVOCATIONS),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_COMPLETIONS),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_WAIT_LOCK),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_HOLD_LOCK),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_INST_ONCHIP),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_INST_OFFCHIP),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_INST_DIFF),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_4K_PREFETCH),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_COMPL_LOCK),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_RETRY_LOCK),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_RETRY_LOCK_WITH_PLO),
+ CPUMF_EVENT_PTR(cf_z17, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
+ CPUMF_EVENT_PTR(cf_z17, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
+ NULL,
+};
+
/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
static struct attribute_group cpumcf_pmu_events_group = {
@@ -859,7 +1016,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
if (ci.csvn >= 1 && ci.csvn <= 5)
csvn = cpumcf_svn_12345_pmu_event_attr;
else if (ci.csvn >= 6)
- csvn = cpumcf_svn_67_pmu_event_attr;
+ csvn = cpumcf_svn_678_pmu_event_attr;
/* Determine model-specific counter set(s) */
get_cpu_id(&cpu_id);
@@ -892,6 +1049,10 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
case 0x3932:
model = cpumcf_z16_pmu_event_attr;
break;
+ case 0x9175:
+ case 0x9176:
+ model = cpumcf_z17_pmu_event_attr;
+ break;
default:
model = none;
break;
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 5f60248cb468..91469401f2c9 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -885,9 +885,6 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
event->attr.exclude_idle = 0;
err = __hw_perf_event_init(event);
- if (unlikely(err))
- if (event->destroy)
- event->destroy(event);
return err;
}
@@ -1075,10 +1072,7 @@ static int perf_push_sample(struct perf_event *event,
overflow = 0;
if (perf_event_exclude(event, &regs, sde_regs))
goto out;
- if (perf_event_overflow(event, &data, &regs)) {
- overflow = 1;
- event->pmu->stop(event, 0);
- }
+ overflow = perf_event_overflow(event, &data, &regs);
perf_event_update_userpage(event);
out:
return overflow;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 54e281436a28..11f70c1e2797 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -268,31 +268,35 @@ static int __init setup_elf_platform(void)
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
default: /* Use "z10" as default. */
- strcpy(elf_platform, "z10");
+ strscpy(elf_platform, "z10");
break;
case 0x2817:
case 0x2818:
- strcpy(elf_platform, "z196");
+ strscpy(elf_platform, "z196");
break;
case 0x2827:
case 0x2828:
- strcpy(elf_platform, "zEC12");
+ strscpy(elf_platform, "zEC12");
break;
case 0x2964:
case 0x2965:
- strcpy(elf_platform, "z13");
+ strscpy(elf_platform, "z13");
break;
case 0x3906:
case 0x3907:
- strcpy(elf_platform, "z14");
+ strscpy(elf_platform, "z14");
break;
case 0x8561:
case 0x8562:
- strcpy(elf_platform, "z15");
+ strscpy(elf_platform, "z15");
break;
case 0x3931:
case 0x3932:
- strcpy(elf_platform, "z16");
+ strscpy(elf_platform, "z16");
+ break;
+ case 0x9175:
+ case 0x9176:
+ strscpy(elf_platform, "z17");
break;
}
return 0;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 34b8d9e745df..e1240f6b29fa 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -1524,13 +1524,6 @@ static const char *gpr_names[NUM_GPRS] = {
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
};
-unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
-{
- if (offset >= NUM_GPRS)
- return 0;
- return regs->gprs[offset];
-}
-
int regs_query_register_offset(const char *name)
{
unsigned long offset;
@@ -1550,29 +1543,3 @@ const char *regs_query_register_name(unsigned int offset)
return NULL;
return gpr_names[offset];
}
-
-static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
-{
- unsigned long ksp = kernel_stack_pointer(regs);
-
- return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
-}
-
-/**
- * regs_get_kernel_stack_nth() - get Nth entry of the stack
- * @regs:pt_regs which contains kernel stack pointer.
- * @n:stack entry number.
- *
- * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
- * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
- * this returns 0.
- */
-unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
-{
- unsigned long addr;
-
- addr = kernel_stack_pointer(regs) + n * sizeof(long);
- if (!regs_within_kernel_stack(regs, addr))
- return 0;
- return READ_ONCE_NOCHECK(addr);
-}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 63f41dfaba85..81f12bb77f62 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -263,7 +263,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
abs_lc = get_abs_lowcore();
memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area));
put_abs_lowcore(abs_lc);
- lc->cregs_save_area[1] = lc->kernel_asce;
+ lc->cregs_save_area[1] = lc->user_asce;
lc->cregs_save_area[7] = lc->user_asce;
save_access_regs((unsigned int *) lc->access_regs_save_area);
arch_spin_lock_setup(cpu);
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 9a5d5be8acf4..b99478e84da4 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -15,6 +15,7 @@
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/pagewalk.h>
+#include <linux/backing-dev.h>
#include <asm/facility.h>
#include <asm/sections.h>
#include <asm/uv.h>
@@ -135,7 +136,7 @@ int uv_destroy_folio(struct folio *folio)
{
int rc;
- /* See gmap_make_secure(): large folios cannot be secure */
+ /* Large folios cannot be secure */
if (unlikely(folio_test_large(folio)))
return 0;
@@ -184,7 +185,7 @@ int uv_convert_from_secure_folio(struct folio *folio)
{
int rc;
- /* See gmap_make_secure(): large folios cannot be secure */
+ /* Large folios cannot be secure */
if (unlikely(folio_test_large(folio)))
return 0;
@@ -324,32 +325,87 @@ static int make_folio_secure(struct mm_struct *mm, struct folio *folio, struct u
}
/**
- * s390_wiggle_split_folio() - try to drain extra references to a folio and optionally split.
+ * s390_wiggle_split_folio() - try to drain extra references to a folio and
+ * split the folio if it is large.
* @mm: the mm containing the folio to work on
* @folio: the folio
- * @split: whether to split a large folio
*
* Context: Must be called while holding an extra reference to the folio;
* the mm lock should not be held.
- * Return: 0 if the folio was split successfully;
- * -EAGAIN if the folio was not split successfully but another attempt
- * can be made, or if @split was set to false;
- * -EINVAL in case of other errors. See split_folio().
+ * Return: 0 if the operation was successful;
+ * -EAGAIN if splitting the large folio was not successful,
+ * but another attempt can be made;
+ * -EINVAL in case of other folio splitting errors. See split_folio().
*/
-static int s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split)
+static int s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio)
{
- int rc;
+ int rc, tried_splits;
lockdep_assert_not_held(&mm->mmap_lock);
folio_wait_writeback(folio);
lru_add_drain_all();
- if (split) {
+
+ if (!folio_test_large(folio))
+ return 0;
+
+ for (tried_splits = 0; tried_splits < 2; tried_splits++) {
+ struct address_space *mapping;
+ loff_t lstart, lend;
+ struct inode *inode;
+
folio_lock(folio);
rc = split_folio(folio);
+ if (rc != -EBUSY) {
+ folio_unlock(folio);
+ return rc;
+ }
+
+ /*
+ * Splitting with -EBUSY can fail for various reasons, but we
+ * have to handle one case explicitly for now: some mappings
+ * don't allow for splitting dirty folios; writeback will
+ * mark them clean again, including marking all page table
+ * entries mapping the folio read-only, to catch future write
+ * attempts.
+ *
+ * While the system should be writing back dirty folios in the
+ * background, we obtained this folio by looking up a writable
+ * page table entry. On these problematic mappings, writable
+ * page table entries imply dirty folios, preventing the
+ * split in the first place.
+ *
+ * To prevent a livelock when trigger writeback manually and
+ * letting the caller look up the folio again in the page
+ * table (turning it dirty), immediately try to split again.
+ *
+ * This is only a problem for some mappings (e.g., XFS);
+ * mappings that do not support writeback (e.g., shmem) do not
+ * apply.
+ */
+ if (!folio_test_dirty(folio) || folio_test_anon(folio) ||
+ !folio->mapping || !mapping_can_writeback(folio->mapping)) {
+ folio_unlock(folio);
+ break;
+ }
+
+ /*
+ * Ideally, we'd only trigger writeback on this exact folio. But
+ * there is no easy way to do that, so we'll stabilize the
+ * mapping while we still hold the folio lock, so we can drop
+ * the folio lock to trigger writeback on the range currently
+ * covered by the folio instead.
+ */
+ mapping = folio->mapping;
+ lstart = folio_pos(folio);
+ lend = lstart + folio_size(folio) - 1;
+ inode = igrab(mapping->host);
folio_unlock(folio);
- if (rc != -EBUSY)
- return rc;
+ if (unlikely(!inode))
+ break;
+
+ filemap_write_and_wait_range(mapping, lstart, lend);
+ iput(mapping->host);
}
return -EAGAIN;
}
@@ -393,8 +449,11 @@ int make_hva_secure(struct mm_struct *mm, unsigned long hva, struct uv_cb_header
folio_walk_end(&fw, vma);
mmap_read_unlock(mm);
- if (rc == -E2BIG || rc == -EBUSY)
- rc = s390_wiggle_split_folio(mm, folio, rc == -E2BIG);
+ if (rc == -E2BIG || rc == -EBUSY) {
+ rc = s390_wiggle_split_folio(mm, folio);
+ if (!rc)
+ rc = -EAGAIN;
+ }
folio_put(folio);
return rc;
@@ -403,15 +462,15 @@ EXPORT_SYMBOL_GPL(make_hva_secure);
/*
* To be called with the folio locked or with an extra reference! This will
- * prevent gmap_make_secure from touching the folio concurrently. Having 2
- * parallel arch_make_folio_accessible is fine, as the UV calls will become a
- * no-op if the folio is already exported.
+ * prevent kvm_s390_pv_make_secure() from touching the folio concurrently.
+ * Having 2 parallel arch_make_folio_accessible is fine, as the UV calls will
+ * become a no-op if the folio is already exported.
*/
int arch_make_folio_accessible(struct folio *folio)
{
int rc = 0;
- /* See gmap_make_secure(): large folios cannot be secure */
+ /* Large folios cannot be secure */
if (unlikely(folio_test_large(folio)))
return 0;
@@ -782,7 +841,12 @@ out_kobj:
device_initcall(uv_sysfs_init);
/*
- * Find the secret with the secret_id in the provided list.
+ * Locate a secret in the list by its id.
+ * @secret_id: search pattern.
+ * @list: ephemeral buffer space
+ * @secret: output data, containing the secret's metadata.
+ *
+ * Search for a secret with the given secret_id in the Ultravisor secret store.
*
* Context: might sleep.
*/
@@ -803,12 +867,15 @@ static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
/*
* Do the actual search for `uv_get_secret_metadata`.
+ * @secret_id: search pattern.
+ * @list: ephemeral buffer space
+ * @secret: output data, containing the secret's metadata.
*
* Context: might sleep.
*/
-static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
- struct uv_secret_list *list,
- struct uv_secret_list_item_hdr *secret)
+int uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
+ struct uv_secret_list *list,
+ struct uv_secret_list_item_hdr *secret)
{
u16 start_idx = 0;
u16 list_rc;
@@ -830,36 +897,7 @@ static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
return -ENOENT;
}
-
-/**
- * uv_get_secret_metadata() - get secret metadata for a given secret id.
- * @secret_id: search pattern.
- * @secret: output data, containing the secret's metadata.
- *
- * Search for a secret with the given secret_id in the Ultravisor secret store.
- *
- * Context: might sleep.
- *
- * Return:
- * * %0: - Found entry; secret->idx and secret->type are valid.
- * * %ENOENT - No entry found.
- * * %ENODEV: - Not supported: UV not available or command not available.
- * * %EIO: - Other unexpected UV error.
- */
-int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
- struct uv_secret_list_item_hdr *secret)
-{
- struct uv_secret_list *buf;
- int rc;
-
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- rc = find_secret(secret_id, buf, secret);
- kfree(buf);
- return rc;
-}
-EXPORT_SYMBOL_GPL(uv_get_secret_metadata);
+EXPORT_SYMBOL_GPL(uv_find_secret);
/**
* uv_retrieve_secret() - get the secret value for the secret index.
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index f0ffe874adc2..9a723c48b05a 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -8,7 +8,7 @@ include $(srctree)/virt/kvm/Makefile.kvm
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
kvm-y += kvm-s390.o intercept.o interrupt.o priv.o sigp.o
-kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o gmap.o gmap-vsie.o
+kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o gmap-vsie.o
kvm-$(CONFIG_VFIO_PCI_ZDEV_KVM) += pci.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 74f73141f9b9..53233dec8cad 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -11,12 +11,30 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <asm/gmap.h>
+#include <asm/gmap_helpers.h>
#include <asm/virtio-ccw.h>
#include "kvm-s390.h"
#include "trace.h"
#include "trace-s390.h"
#include "gaccess.h"
+static void do_discard_gfn_range(struct kvm_vcpu *vcpu, gfn_t gfn_start, gfn_t gfn_end)
+{
+ struct kvm_memslot_iter iter;
+ struct kvm_memory_slot *slot;
+ struct kvm_memslots *slots;
+ unsigned long start, end;
+
+ slots = kvm_vcpu_memslots(vcpu);
+
+ kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
+ slot = iter.slot;
+ start = __gfn_to_hva_memslot(slot, max(gfn_start, slot->base_gfn));
+ end = __gfn_to_hva_memslot(slot, min(gfn_end, slot->base_gfn + slot->npages));
+ gmap_helper_discard(vcpu->kvm->mm, start, end);
+ }
+}
+
static int diag_release_pages(struct kvm_vcpu *vcpu)
{
unsigned long start, end;
@@ -32,12 +50,13 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
+ mmap_read_lock(vcpu->kvm->mm);
/*
* We checked for start >= end above, so lets check for the
* fast path (no prefix swap page involved)
*/
if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) {
- gmap_discard(vcpu->arch.gmap, start, end);
+ do_discard_gfn_range(vcpu, gpa_to_gfn(start), gpa_to_gfn(end));
} else {
/*
* This is slow path. gmap_discard will check for start
@@ -45,13 +64,14 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
* prefix and let gmap_discard make some of these calls
* NOPs.
*/
- gmap_discard(vcpu->arch.gmap, start, prefix);
+ do_discard_gfn_range(vcpu, gpa_to_gfn(start), gpa_to_gfn(prefix));
if (start <= prefix)
- gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE);
+ do_discard_gfn_range(vcpu, 0, 1);
if (end > prefix + PAGE_SIZE)
- gmap_discard(vcpu->arch.gmap, PAGE_SIZE, 2 * PAGE_SIZE);
- gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end);
+ do_discard_gfn_range(vcpu, 1, 2);
+ do_discard_gfn_range(vcpu, gpa_to_gfn(prefix) + 2, gpa_to_gfn(end));
}
+ mmap_read_unlock(vcpu->kvm->mm);
return 0;
}
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index f6fded15633a..e23670e1949c 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -16,9 +16,10 @@
#include <asm/gmap.h>
#include <asm/dat-bits.h>
#include "kvm-s390.h"
-#include "gmap.h"
#include "gaccess.h"
+#define GMAP_SHADOW_FAKE_TABLE 1ULL
+
/*
* vaddress union in order to easily decode a virtual address into its
* region first index, region second index etc. parts.
diff --git a/arch/s390/kvm/gmap-vsie.c b/arch/s390/kvm/gmap-vsie.c
index a6d1dbb04c97..56ef153eb8fe 100644
--- a/arch/s390/kvm/gmap-vsie.c
+++ b/arch/s390/kvm/gmap-vsie.c
@@ -22,7 +22,6 @@
#include <asm/uv.h>
#include "kvm-s390.h"
-#include "gmap.h"
/**
* gmap_find_shadow - find a specific asce in the list of shadow tables
diff --git a/arch/s390/kvm/gmap.c b/arch/s390/kvm/gmap.c
deleted file mode 100644
index 6d8944d1b4a0..000000000000
--- a/arch/s390/kvm/gmap.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Guest memory management for KVM/s390
- *
- * Copyright IBM Corp. 2008, 2020, 2024
- *
- * Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
- * David Hildenbrand <david@redhat.com>
- * Janosch Frank <frankja@linux.vnet.ibm.com>
- */
-
-#include <linux/compiler.h>
-#include <linux/kvm.h>
-#include <linux/kvm_host.h>
-#include <linux/pgtable.h>
-#include <linux/pagemap.h>
-
-#include <asm/lowcore.h>
-#include <asm/gmap.h>
-#include <asm/uv.h>
-
-#include "gmap.h"
-
-/**
- * gmap_make_secure() - make one guest page secure
- * @gmap: the guest gmap
- * @gaddr: the guest address that needs to be made secure
- * @uvcb: the UVCB specifying which operation needs to be performed
- *
- * Context: needs to be called with kvm->srcu held.
- * Return: 0 on success, < 0 in case of error.
- */
-int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
-{
- struct kvm *kvm = gmap->private;
- unsigned long vmaddr;
-
- lockdep_assert_held(&kvm->srcu);
-
- vmaddr = gfn_to_hva(kvm, gpa_to_gfn(gaddr));
- if (kvm_is_error_hva(vmaddr))
- return -EFAULT;
- return make_hva_secure(gmap->mm, vmaddr, uvcb);
-}
-
-int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
-{
- struct uv_cb_cts uvcb = {
- .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
- .header.len = sizeof(uvcb),
- .guest_handle = gmap->guest_handle,
- .gaddr = gaddr,
- };
-
- return gmap_make_secure(gmap, gaddr, &uvcb);
-}
-
-/**
- * __gmap_destroy_page() - Destroy a guest page.
- * @gmap: the gmap of the guest
- * @page: the page to destroy
- *
- * An attempt will be made to destroy the given guest page. If the attempt
- * fails, an attempt is made to export the page. If both attempts fail, an
- * appropriate error is returned.
- *
- * Context: must be called holding the mm lock for gmap->mm
- */
-static int __gmap_destroy_page(struct gmap *gmap, struct page *page)
-{
- struct folio *folio = page_folio(page);
- int rc;
-
- /*
- * See gmap_make_secure(): large folios cannot be secure. Small
- * folio implies FW_LEVEL_PTE.
- */
- if (folio_test_large(folio))
- return -EFAULT;
-
- rc = uv_destroy_folio(folio);
- /*
- * Fault handlers can race; it is possible that two CPUs will fault
- * on the same secure page. One CPU can destroy the page, reboot,
- * re-enter secure mode and import it, while the second CPU was
- * stuck at the beginning of the handler. At some point the second
- * CPU will be able to progress, and it will not be able to destroy
- * the page. In that case we do not want to terminate the process,
- * we instead try to export the page.
- */
- if (rc)
- rc = uv_convert_from_secure_folio(folio);
-
- return rc;
-}
-
-/**
- * gmap_destroy_page() - Destroy a guest page.
- * @gmap: the gmap of the guest
- * @gaddr: the guest address to destroy
- *
- * An attempt will be made to destroy the given guest page. If the attempt
- * fails, an attempt is made to export the page. If both attempts fail, an
- * appropriate error is returned.
- *
- * Context: may sleep.
- */
-int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
-{
- struct page *page;
- int rc = 0;
-
- mmap_read_lock(gmap->mm);
- page = gfn_to_page(gmap->private, gpa_to_gfn(gaddr));
- if (page)
- rc = __gmap_destroy_page(gmap, page);
- kvm_release_page_clean(page);
- mmap_read_unlock(gmap->mm);
- return rc;
-}
diff --git a/arch/s390/kvm/gmap.h b/arch/s390/kvm/gmap.h
deleted file mode 100644
index c8f031c9ea5f..000000000000
--- a/arch/s390/kvm/gmap.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * KVM guest address space mapping code
- *
- * Copyright IBM Corp. 2007, 2016, 2025
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- * Claudio Imbrenda <imbrenda@linux.ibm.com>
- */
-
-#ifndef ARCH_KVM_S390_GMAP_H
-#define ARCH_KVM_S390_GMAP_H
-
-#define GMAP_SHADOW_FAKE_TABLE 1ULL
-
-int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
-int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
-int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
-struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level);
-
-/**
- * gmap_shadow_valid - check if a shadow guest address space matches the
- * given properties and is still valid
- * @sg: pointer to the shadow guest address space structure
- * @asce: ASCE for which the shadow table is requested
- * @edat_level: edat level to be used for the shadow translation
- *
- * Returns 1 if the gmap shadow is still valid and matches the given
- * properties, the caller can continue using it. Returns 0 otherwise, the
- * caller has to request a new shadow gmap in this case.
- *
- */
-static inline int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
-{
- if (sg->removed)
- return 0;
- return sg->orig_asce == asce && sg->edat_level == edat_level;
-}
-
-#endif
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 610dd44a948b..c7908950c1f4 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -21,7 +21,6 @@
#include "gaccess.h"
#include "trace.h"
#include "trace-s390.h"
-#include "gmap.h"
u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
{
@@ -95,7 +94,7 @@ static int handle_validity(struct kvm_vcpu *vcpu)
vcpu->stat.exit_validity++;
trace_kvm_s390_intercept_validity(vcpu, viwhy);
- KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
+ KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%p)", viwhy,
current->pid, vcpu->kvm);
/* do not warn on invalid runtime instrumentation mode */
@@ -545,7 +544,7 @@ static int handle_pv_uvc(struct kvm_vcpu *vcpu)
guest_uvcb->header.cmd);
return 0;
}
- rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb);
+ rc = kvm_s390_pv_make_secure(vcpu->kvm, uvcb.gaddr, &uvcb);
/*
* If the unpin did not succeed, the guest will exit again for the UVC
* and we will retry the unpin.
@@ -653,10 +652,8 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
break;
case ICPT_PV_PREF:
rc = 0;
- gmap_convert_to_secure(vcpu->arch.gmap,
- kvm_s390_get_prefix(vcpu));
- gmap_convert_to_secure(vcpu->arch.gmap,
- kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
+ kvm_s390_pv_convert_to_secure(vcpu->kvm, kvm_s390_get_prefix(vcpu));
+ kvm_s390_pv_convert_to_secure(vcpu->kvm, kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
break;
default:
return -EOPNOTSUPP;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 2811a6c093b8..60c360c18690 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -3161,7 +3161,7 @@ void kvm_s390_gisa_clear(struct kvm *kvm)
if (!gi->origin)
return;
gisa_clear_ipm(gi->origin);
- VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
+ VM_EVENT(kvm, 3, "gisa 0x%p cleared", gi->origin);
}
void kvm_s390_gisa_init(struct kvm *kvm)
@@ -3177,7 +3177,7 @@ void kvm_s390_gisa_init(struct kvm *kvm)
hrtimer_setup(&gi->timer, gisa_vcpu_kicker, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
gi->origin->next_alert = (u32)virt_to_phys(gi->origin);
- VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
+ VM_EVENT(kvm, 3, "gisa 0x%p initialized", gi->origin);
}
void kvm_s390_gisa_enable(struct kvm *kvm)
@@ -3218,7 +3218,7 @@ void kvm_s390_gisa_destroy(struct kvm *kvm)
process_gib_alert_list();
hrtimer_cancel(&gi->timer);
gi->origin = NULL;
- VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
+ VM_EVENT(kvm, 3, "gisa 0x%p destroyed", gisa);
}
void kvm_s390_gisa_disable(struct kvm *kvm)
@@ -3467,7 +3467,7 @@ int __init kvm_s390_gib_init(u8 nisc)
}
}
- KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
+ KVM_EVENT(3, "gib 0x%p (nisc=%d) initialized", gib, gib->nisc);
goto out;
out_unreg_gal:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index fff863734975..d5ad10791c25 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -40,6 +40,7 @@
#include <asm/machine.h>
#include <asm/stp.h>
#include <asm/gmap.h>
+#include <asm/gmap_helpers.h>
#include <asm/nmi.h>
#include <asm/isc.h>
#include <asm/sclp.h>
@@ -52,7 +53,6 @@
#include "kvm-s390.h"
#include "gaccess.h"
#include "pci.h"
-#include "gmap.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -1022,7 +1022,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
- VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
+ VM_EVENT(kvm, 3, "New guest asce: 0x%p",
(void *) kvm->arch.gmap->asce);
break;
}
@@ -2674,7 +2674,9 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
if (r)
break;
- r = s390_disable_cow_sharing();
+ mmap_write_lock(kvm->mm);
+ r = gmap_helper_disable_cow_sharing();
+ mmap_write_unlock(kvm->mm);
if (r)
break;
@@ -3466,7 +3468,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_s390_gisa_init(kvm);
INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
kvm->arch.pv.set_aside = NULL;
- KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
+ KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
return 0;
out_err:
@@ -3529,7 +3531,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_s390_destroy_adapters(kvm);
kvm_s390_clear_float_irqs(kvm);
kvm_s390_vsie_destroy(kvm);
- KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
+ KVM_EVENT(3, "vm 0x%p destroyed", kvm);
}
/* Section: vcpu related */
@@ -3650,7 +3652,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
free_page((unsigned long)old_sca);
- VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
+ VM_EVENT(kvm, 2, "Switched to ESCA (0x%p -> 0x%p)",
old_sca, kvm->arch.sca);
return 0;
}
@@ -4027,7 +4029,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
goto out_free_sie_block;
}
- VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
+ VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sie block at 0x%p",
vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
@@ -4973,7 +4975,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
* previous protected guest. The old pages need to be destroyed
* so the new guest can use them.
*/
- if (gmap_destroy_page(vcpu->arch.gmap, gaddr)) {
+ if (kvm_s390_pv_destroy_page(vcpu->kvm, gaddr)) {
/*
* Either KVM messed up the secure guest mapping or the
* same page is mapped into multiple secure guests.
@@ -4995,7 +4997,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
* guest has not been imported yet. Try to import the page into
* the protected guest.
*/
- rc = gmap_convert_to_secure(vcpu->arch.gmap, gaddr);
+ rc = kvm_s390_pv_convert_to_secure(vcpu->kvm, gaddr);
if (rc == -EINVAL)
send_sig(SIGSEGV, current, 0);
if (rc != -ENXIO)
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 8d3bbb2dd8d2..c44fe0c3a097 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -308,6 +308,9 @@ int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc);
int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
u16 *rc, u16 *rrc);
+int kvm_s390_pv_destroy_page(struct kvm *kvm, unsigned long gaddr);
+int kvm_s390_pv_convert_to_secure(struct kvm *kvm, unsigned long gaddr);
+int kvm_s390_pv_make_secure(struct kvm *kvm, unsigned long gaddr, void *uvcb);
static inline u64 kvm_s390_pv_get_handle(struct kvm *kvm)
{
@@ -319,6 +322,41 @@ static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu)
return vcpu->arch.pv.handle;
}
+/**
+ * __kvm_s390_pv_destroy_page() - Destroy a guest page.
+ * @page: the page to destroy
+ *
+ * An attempt will be made to destroy the given guest page. If the attempt
+ * fails, an attempt is made to export the page. If both attempts fail, an
+ * appropriate error is returned.
+ *
+ * Context: must be called holding the mm lock for gmap->mm
+ */
+static inline int __kvm_s390_pv_destroy_page(struct page *page)
+{
+ struct folio *folio = page_folio(page);
+ int rc;
+
+ /* Large folios cannot be secure. Small folio implies FW_LEVEL_PTE. */
+ if (folio_test_large(folio))
+ return -EFAULT;
+
+ rc = uv_destroy_folio(folio);
+ /*
+ * Fault handlers can race; it is possible that two CPUs will fault
+ * on the same secure page. One CPU can destroy the page, reboot,
+ * re-enter secure mode and import it, while the second CPU was
+ * stuck at the beginning of the handler. At some point the second
+ * CPU will be able to progress, and it will not be able to destroy
+ * the page. In that case we do not want to terminate the process,
+ * we instead try to export the page.
+ */
+ if (rc)
+ rc = uv_convert_from_secure_folio(folio);
+
+ return rc;
+}
+
/* implemented in interrupt.c */
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
@@ -398,6 +436,10 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
unsigned long end);
void kvm_s390_vsie_init(struct kvm *kvm);
void kvm_s390_vsie_destroy(struct kvm *kvm);
+int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level);
+
+/* implemented in gmap-vsie.c */
+struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level);
/* implemented in sigp.c */
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 1a49b89706f8..9253c70897a8 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -1248,6 +1248,8 @@ static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
static int handle_essa(struct kvm_vcpu *vcpu)
{
+ lockdep_assert_held(&vcpu->kvm->srcu);
+
/* entries expected to be 1FF */
int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
unsigned long *cbrlo;
@@ -1297,12 +1299,8 @@ static int handle_essa(struct kvm_vcpu *vcpu)
/* Retry the ESSA instruction */
kvm_s390_retry_instr(vcpu);
} else {
- int srcu_idx;
-
mmap_read_lock(vcpu->kvm->mm);
- srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
i = __do_essa(vcpu, orc);
- srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
mmap_read_unlock(vcpu->kvm->mm);
if (i < 0)
return i;
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index 22c012aa5206..14c330ec8ceb 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -17,7 +17,6 @@
#include <linux/sched/mm.h>
#include <linux/mmu_notifier.h>
#include "kvm-s390.h"
-#include "gmap.h"
bool kvm_s390_pv_is_protected(struct kvm *kvm)
{
@@ -34,6 +33,64 @@ bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL_GPL(kvm_s390_pv_cpu_is_protected);
/**
+ * kvm_s390_pv_make_secure() - make one guest page secure
+ * @kvm: the guest
+ * @gaddr: the guest address that needs to be made secure
+ * @uvcb: the UVCB specifying which operation needs to be performed
+ *
+ * Context: needs to be called with kvm->srcu held.
+ * Return: 0 on success, < 0 in case of error.
+ */
+int kvm_s390_pv_make_secure(struct kvm *kvm, unsigned long gaddr, void *uvcb)
+{
+ unsigned long vmaddr;
+
+ lockdep_assert_held(&kvm->srcu);
+
+ vmaddr = gfn_to_hva(kvm, gpa_to_gfn(gaddr));
+ if (kvm_is_error_hva(vmaddr))
+ return -EFAULT;
+ return make_hva_secure(kvm->mm, vmaddr, uvcb);
+}
+
+int kvm_s390_pv_convert_to_secure(struct kvm *kvm, unsigned long gaddr)
+{
+ struct uv_cb_cts uvcb = {
+ .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
+ .header.len = sizeof(uvcb),
+ .guest_handle = kvm_s390_pv_get_handle(kvm),
+ .gaddr = gaddr,
+ };
+
+ return kvm_s390_pv_make_secure(kvm, gaddr, &uvcb);
+}
+
+/**
+ * kvm_s390_pv_destroy_page() - Destroy a guest page.
+ * @kvm: the guest
+ * @gaddr: the guest address to destroy
+ *
+ * An attempt will be made to destroy the given guest page. If the attempt
+ * fails, an attempt is made to export the page. If both attempts fail, an
+ * appropriate error is returned.
+ *
+ * Context: may sleep.
+ */
+int kvm_s390_pv_destroy_page(struct kvm *kvm, unsigned long gaddr)
+{
+ struct page *page;
+ int rc = 0;
+
+ mmap_read_lock(kvm->mm);
+ page = gfn_to_page(kvm, gpa_to_gfn(gaddr));
+ if (page)
+ rc = __kvm_s390_pv_destroy_page(page);
+ kvm_release_page_clean(page);
+ mmap_read_unlock(kvm->mm);
+ return rc;
+}
+
+/**
* struct pv_vm_to_be_destroyed - Represents a protected VM that needs to
* be destroyed
*
@@ -638,7 +695,7 @@ static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
.tweak[0] = tweak,
.tweak[1] = offset,
};
- int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
+ int ret = kvm_s390_pv_make_secure(kvm, addr, &uvcb);
unsigned long vmaddr;
bool unlocked;
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 9ac92dbf680d..9e28f165c114 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -56,7 +56,7 @@ TRACE_EVENT(kvm_s390_create_vcpu,
__entry->sie_block = sie_block;
),
- TP_printk("create cpu %d at 0x%pK, sie block at 0x%pK",
+ TP_printk("create cpu %d at 0x%p, sie block at 0x%p",
__entry->id, __entry->vcpu, __entry->sie_block)
);
@@ -255,7 +255,7 @@ TRACE_EVENT(kvm_s390_enable_css,
__entry->kvm = kvm;
),
- TP_printk("enabling channel I/O support (kvm @ %pK)\n",
+ TP_printk("enabling channel I/O support (kvm @ %p)\n",
__entry->kvm)
);
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index a78df3a4f353..13a9661d2b28 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -23,7 +23,6 @@
#include <asm/facility.h>
#include "kvm-s390.h"
#include "gaccess.h"
-#include "gmap.h"
enum vsie_page_flags {
VSIE_PAGE_IN_USE = 0,
@@ -68,6 +67,24 @@ struct vsie_page {
__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
};
+/**
+ * gmap_shadow_valid() - check if a shadow guest address space matches the
+ * given properties and is still valid
+ * @sg: pointer to the shadow guest address space structure
+ * @asce: ASCE for which the shadow table is requested
+ * @edat_level: edat level to be used for the shadow translation
+ *
+ * Returns 1 if the gmap shadow is still valid and matches the given
+ * properties, the caller can continue using it. Returns 0 otherwise; the
+ * caller has to request a new shadow gmap in this case.
+ */
+int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
+{
+ if (sg->removed)
+ return 0;
+ return sg->orig_asce == asce && sg->edat_level == edat_level;
+}
+
/* trigger a validity icpt for the given scb */
static int set_validity_icpt(struct kvm_s390_sie_block *scb,
__u16 reason_code)
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 14bbfe50033c..cd35cdbfa871 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -3,6 +3,7 @@
# Makefile for s390-specific library files..
#
+obj-y += crypto/
lib-y += delay.o string.o uaccess.o find.o spinlock.o tishift.o
lib-y += csum-partial.o
obj-y += mem.o xor.o
@@ -26,4 +27,4 @@ lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
obj-$(CONFIG_EXPOLINE_EXTERN) += expoline.o
obj-$(CONFIG_CRC32_ARCH) += crc32-s390.o
-crc32-s390-y := crc32-glue.o crc32le-vx.o crc32be-vx.o
+crc32-s390-y := crc32.o crc32le-vx.o crc32be-vx.o
diff --git a/arch/s390/lib/crc32-glue.c b/arch/s390/lib/crc32.c
index 124214a27340..3c4b344417c1 100644
--- a/arch/s390/lib/crc32-glue.c
+++ b/arch/s390/lib/crc32.c
@@ -18,8 +18,6 @@
#define VX_ALIGNMENT 16L
#define VX_ALIGN_MASK (VX_ALIGNMENT - 1)
-static DEFINE_STATIC_KEY_FALSE(have_vxrs);
-
/*
* DEFINE_CRC32_VX() - Define a CRC-32 function using the vector extension
*
@@ -34,8 +32,7 @@ static DEFINE_STATIC_KEY_FALSE(have_vxrs);
unsigned long prealign, aligned, remaining; \
DECLARE_KERNEL_FPU_ONSTACK16(vxstate); \
\
- if (datalen < VX_MIN_LEN + VX_ALIGN_MASK || \
- !static_branch_likely(&have_vxrs)) \
+ if (datalen < VX_MIN_LEN + VX_ALIGN_MASK || !cpu_has_vx()) \
return ___crc32_sw(crc, data, datalen); \
\
if ((unsigned long)data & VX_ALIGN_MASK) { \
@@ -64,25 +61,13 @@ DEFINE_CRC32_VX(crc32_le_arch, crc32_le_vgfm_16, crc32_le_base)
DEFINE_CRC32_VX(crc32_be_arch, crc32_be_vgfm_16, crc32_be_base)
DEFINE_CRC32_VX(crc32c_arch, crc32c_le_vgfm_16, crc32c_base)
-static int __init crc32_s390_init(void)
-{
- if (cpu_have_feature(S390_CPU_FEATURE_VXRS))
- static_branch_enable(&have_vxrs);
- return 0;
-}
-arch_initcall(crc32_s390_init);
-
-static void __exit crc32_s390_exit(void)
-{
-}
-module_exit(crc32_s390_exit);
-
u32 crc32_optimizations(void)
{
- if (static_key_enabled(&have_vxrs))
+ if (cpu_has_vx()) {
return CRC32_LE_OPTIMIZATION |
CRC32_BE_OPTIMIZATION |
CRC32C_OPTIMIZATION;
+ }
return 0;
}
EXPORT_SYMBOL(crc32_optimizations);
diff --git a/arch/s390/lib/crypto/Kconfig b/arch/s390/lib/crypto/Kconfig
new file mode 100644
index 000000000000..e3f855ef4393
--- /dev/null
+++ b/arch/s390/lib/crypto/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CRYPTO_CHACHA_S390
+ tristate
+ default CRYPTO_LIB_CHACHA
+ select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+
+config CRYPTO_SHA256_S390
+ tristate
+ default CRYPTO_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256
+ select CRYPTO_LIB_SHA256_GENERIC
diff --git a/arch/s390/lib/crypto/Makefile b/arch/s390/lib/crypto/Makefile
new file mode 100644
index 000000000000..5df30f1e7930
--- /dev/null
+++ b/arch/s390/lib/crypto/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CRYPTO_CHACHA_S390) += chacha_s390.o
+chacha_s390-y := chacha-glue.o chacha-s390.o
+
+obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256-s390.o
+sha256-s390-y := sha256.o
diff --git a/arch/s390/lib/crypto/chacha-glue.c b/arch/s390/lib/crypto/chacha-glue.c
new file mode 100644
index 000000000000..f95ba3483bbc
--- /dev/null
+++ b/arch/s390/lib/crypto/chacha-glue.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ChaCha stream cipher (s390 optimized)
+ *
+ * Copyright IBM Corp. 2021
+ */
+
+#define KMSG_COMPONENT "chacha_s390"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <crypto/chacha.h>
+#include <linux/cpufeature.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <asm/fpu.h>
+#include "chacha-s390.h"
+
+void hchacha_block_arch(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds)
+{
+ /* TODO: implement hchacha_block_arch() in assembly */
+ hchacha_block_generic(state, out, nrounds);
+}
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ /* s390 chacha20 implementation has 20 rounds hard-coded,
+ * it cannot handle a block of data or less, but otherwise
+ * it can handle data of arbitrary size
+ */
+ if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !cpu_has_vx()) {
+ chacha_crypt_generic(state, dst, src, bytes, nrounds);
+ } else {
+ DECLARE_KERNEL_FPU_ONSTACK32(vxstate);
+
+ kernel_fpu_begin(&vxstate, KERNEL_VXR);
+ chacha20_vx(dst, src, bytes, &state->x[4], &state->x[12]);
+ kernel_fpu_end(&vxstate, KERNEL_VXR);
+
+ state->x[12] += round_up(bytes, CHACHA_BLOCK_SIZE) /
+ CHACHA_BLOCK_SIZE;
+ }
+}
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+bool chacha_is_arch_optimized(void)
+{
+ return cpu_has_vx();
+}
+EXPORT_SYMBOL(chacha_is_arch_optimized);
+
+MODULE_DESCRIPTION("ChaCha stream cipher (s390 optimized)");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/s390/crypto/chacha-s390.S b/arch/s390/lib/crypto/chacha-s390.S
index 63f3102678c0..63f3102678c0 100644
--- a/arch/s390/crypto/chacha-s390.S
+++ b/arch/s390/lib/crypto/chacha-s390.S
diff --git a/arch/s390/crypto/chacha-s390.h b/arch/s390/lib/crypto/chacha-s390.h
index 733744ce30f5..733744ce30f5 100644
--- a/arch/s390/crypto/chacha-s390.h
+++ b/arch/s390/lib/crypto/chacha-s390.h
diff --git a/arch/s390/lib/crypto/sha256.c b/arch/s390/lib/crypto/sha256.c
new file mode 100644
index 000000000000..7dfe120fafab
--- /dev/null
+++ b/arch/s390/lib/crypto/sha256.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SHA-256 optimized using the CP Assist for Cryptographic Functions (CPACF)
+ *
+ * Copyright 2025 Google LLC
+ */
+#include <asm/cpacf.h>
+#include <crypto/internal/sha2.h>
+#include <linux/cpufeature.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_cpacf_sha256);
+
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks)
+{
+ if (static_branch_likely(&have_cpacf_sha256))
+ cpacf_kimd(CPACF_KIMD_SHA_256, state, data,
+ nblocks * SHA256_BLOCK_SIZE);
+ else
+ sha256_blocks_generic(state, data, nblocks);
+}
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+
+bool sha256_is_arch_optimized(void)
+{
+ return static_key_enabled(&have_cpacf_sha256);
+}
+EXPORT_SYMBOL_GPL(sha256_is_arch_optimized);
+
+static int __init sha256_s390_mod_init(void)
+{
+ if (cpu_have_feature(S390_CPU_FEATURE_MSA) &&
+ cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_256))
+ static_branch_enable(&have_cpacf_sha256);
+ return 0;
+}
+subsys_initcall(sha256_s390_mod_init);
+
+static void __exit sha256_s390_mod_exit(void)
+{
+}
+module_exit(sha256_s390_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-256 using the CP Assist for Cryptographic Functions (CPACF)");
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 373fa1f01937..099de76e8b1a 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -78,50 +78,6 @@ EXPORT_SYMBOL(strnlen);
#endif
/**
- * strcpy - Copy a %NUL terminated string
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- *
- * returns a pointer to @dest
- */
-#ifdef __HAVE_ARCH_STRCPY
-char *strcpy(char *dest, const char *src)
-{
- char *ret = dest;
-
- asm volatile(
- " lghi 0,0\n"
- "0: mvst %[dest],%[src]\n"
- " jo 0b\n"
- : [dest] "+&a" (dest), [src] "+&a" (src)
- :
- : "cc", "memory", "0");
- return ret;
-}
-EXPORT_SYMBOL(strcpy);
-#endif
-
-/**
- * strncpy - Copy a length-limited, %NUL-terminated string
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @n: The maximum number of bytes to copy
- *
- * The result is not %NUL-terminated if the source exceeds
- * @n bytes.
- */
-#ifdef __HAVE_ARCH_STRNCPY
-char *strncpy(char *dest, const char *src, size_t n)
-{
- size_t len = __strnend(src, n) - src;
- memset(dest + len, 0, n - len);
- memcpy(dest, src, len);
- return dest;
-}
-EXPORT_SYMBOL(strncpy);
-#endif
-
-/**
* strcat - Append one %NUL-terminated string to another
* @dest: The string to be appended to
* @src: The string to append to it
@@ -181,9 +137,6 @@ EXPORT_SYMBOL(strlcat);
* @n: The maximum numbers of bytes to copy
*
* returns a pointer to @dest
- *
- * Note that in contrast to strncpy, strncat ensures the result is
- * terminated.
*/
#ifdef __HAVE_ARCH_STRNCAT
char *strncat(char *dest, const char *src, size_t n)
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index cec20db88479..fa7d98fa1320 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -17,17 +17,18 @@
#ifdef CONFIG_DEBUG_ENTRY
void debug_user_asce(int exit)
{
+ struct lowcore *lc = get_lowcore();
struct ctlreg cr1, cr7;
local_ctl_store(1, &cr1);
local_ctl_store(7, &cr7);
- if (cr1.val == get_lowcore()->kernel_asce.val && cr7.val == get_lowcore()->user_asce.val)
+ if (cr1.val == lc->user_asce.val && cr7.val == lc->user_asce.val)
return;
panic("incorrect ASCE on kernel %s\n"
"cr1: %016lx cr7: %016lx\n"
"kernel: %016lx user: %016lx\n",
exit ? "exit" : "entry", cr1.val, cr7.val,
- get_lowcore()->kernel_asce.val, get_lowcore()->user_asce.val);
+ lc->kernel_asce.val, lc->user_asce.val);
}
#endif /*CONFIG_DEBUG_ENTRY */
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 9726b91fe7e4..bd0401cc7ca5 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -12,3 +12,5 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP) += dump_pagetables.o
obj-$(CONFIG_PGSTE) += gmap.o
obj-$(CONFIG_PFAULT) += pfault.o
+
+obj-$(subst m,y,$(CONFIG_KVM)) += gmap_helpers.o
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index d3e943752fa0..ac604b176660 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -147,11 +147,48 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
}
}
+static void note_page_pte(struct ptdump_state *pt_st, unsigned long addr, pte_t pte)
+{
+ note_page(pt_st, addr, 4, pte_val(pte));
+}
+
+static void note_page_pmd(struct ptdump_state *pt_st, unsigned long addr, pmd_t pmd)
+{
+ note_page(pt_st, addr, 3, pmd_val(pmd));
+}
+
+static void note_page_pud(struct ptdump_state *pt_st, unsigned long addr, pud_t pud)
+{
+ note_page(pt_st, addr, 2, pud_val(pud));
+}
+
+static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d)
+{
+ note_page(pt_st, addr, 1, p4d_val(p4d));
+}
+
+static void note_page_pgd(struct ptdump_state *pt_st, unsigned long addr, pgd_t pgd)
+{
+ note_page(pt_st, addr, 0, pgd_val(pgd));
+}
+
+static void note_page_flush(struct ptdump_state *pt_st)
+{
+ pte_t pte_zero = {0};
+
+ note_page(pt_st, 0, -1, pte_val(pte_zero));
+}
+
bool ptdump_check_wx(void)
{
struct pg_state st = {
.ptdump = {
- .note_page = note_page,
+ .note_page_pte = note_page_pte,
+ .note_page_pmd = note_page_pmd,
+ .note_page_pud = note_page_pud,
+ .note_page_p4d = note_page_p4d,
+ .note_page_pgd = note_page_pgd,
+ .note_page_flush = note_page_flush,
.range = (struct ptdump_range[]) {
{.start = 0, .end = max_addr},
{.start = 0, .end = 0},
@@ -190,7 +227,12 @@ static int ptdump_show(struct seq_file *m, void *v)
{
struct pg_state st = {
.ptdump = {
- .note_page = note_page,
+ .note_page_pte = note_page_pte,
+ .note_page_pmd = note_page_pmd,
+ .note_page_pud = note_page_pud,
+ .note_page_p4d = note_page_p4d,
+ .note_page_pgd = note_page_pgd,
+ .note_page_flush = note_page_flush,
.range = (struct ptdump_range[]) {
{.start = 0, .end = max_addr},
{.start = 0, .end = 0},
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index a6b8b8ea9086..f7da53e212f5 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -530,6 +530,14 @@ segment_modify_shared (char *name, int do_nonshared)
return rc;
}
+static void __dcss_diag_purge_on_cpu_0(void *data)
+{
+ struct dcss_segment *seg = (struct dcss_segment *)data;
+ unsigned long dummy;
+
+ dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
+}
+
/*
* Decrease the use count of a DCSS segment and remove
* it from the address space if nobody is using it
@@ -538,7 +546,6 @@ segment_modify_shared (char *name, int do_nonshared)
void
segment_unload(char *name)
{
- unsigned long dummy;
struct dcss_segment *seg;
if (!machine_is_vm())
@@ -556,7 +563,14 @@ segment_unload(char *name)
kfree(seg->res);
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
list_del(&seg->list);
- dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
+ /*
+ * Workaround for z/VM issue, where calling the DCSS unload diag on
+ * a non-IPL CPU would cause bogus sclp maximum memory detection on
+ * next IPL.
+ * IPL CPU 0 cannot be set offline, so the dcss_diag() call can
+ * directly be scheduled to that CPU.
+ */
+ smp_call_function_single(0, __dcss_diag_purge_on_cpu_0, seg, 1);
kfree(seg);
out_unlock:
mutex_unlock(&dcss_lock);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index da84ff6770de..3829521450dd 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -40,7 +40,6 @@
#include <asm/ptrace.h>
#include <asm/fault.h>
#include <asm/diag.h>
-#include <asm/gmap.h>
#include <asm/irq.h>
#include <asm/facility.h>
#include <asm/uv.h>
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index a94bd4870c65..012a4366a2ad 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -22,9 +22,9 @@
#include <asm/page-states.h>
#include <asm/pgalloc.h>
#include <asm/machine.h>
+#include <asm/gmap_helpers.h>
#include <asm/gmap.h>
#include <asm/page.h>
-#include <asm/tlb.h>
/*
* The address is saved in a radix tree directly; NULL would be ambiguous,
@@ -620,63 +620,20 @@ EXPORT_SYMBOL(__gmap_link);
*/
void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
{
- struct vm_area_struct *vma;
unsigned long vmaddr;
- spinlock_t *ptl;
- pte_t *ptep;
+
+ mmap_assert_locked(gmap->mm);
/* Find the vm address for the guest address */
vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
gaddr >> PMD_SHIFT);
if (vmaddr) {
vmaddr |= gaddr & ~PMD_MASK;
-
- vma = vma_lookup(gmap->mm, vmaddr);
- if (!vma || is_vm_hugetlb_page(vma))
- return;
-
- /* Get pointer to the page table entry */
- ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
- if (likely(ptep)) {
- ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
- pte_unmap_unlock(ptep, ptl);
- }
+ gmap_helper_zap_one_page(gmap->mm, vmaddr);
}
}
EXPORT_SYMBOL_GPL(__gmap_zap);
-void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
-{
- unsigned long gaddr, vmaddr, size;
- struct vm_area_struct *vma;
-
- mmap_read_lock(gmap->mm);
- for (gaddr = from; gaddr < to;
- gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
- /* Find the vm address for the guest address */
- vmaddr = (unsigned long)
- radix_tree_lookup(&gmap->guest_to_host,
- gaddr >> PMD_SHIFT);
- if (!vmaddr)
- continue;
- vmaddr |= gaddr & ~PMD_MASK;
- /* Find vma in the parent mm */
- vma = find_vma(gmap->mm, vmaddr);
- if (!vma)
- continue;
- /*
- * We do not discard pages that are backed by
- * hugetlbfs, so we don't have to refault them.
- */
- if (is_vm_hugetlb_page(vma))
- continue;
- size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
- zap_page_range_single(vma, vmaddr, size, NULL);
- }
- mmap_read_unlock(gmap->mm);
-}
-EXPORT_SYMBOL_GPL(gmap_discard);
-
static LIST_HEAD(gmap_notifier_list);
static DEFINE_SPINLOCK(gmap_notifier_lock);
@@ -2269,138 +2226,6 @@ int s390_enable_sie(void)
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
-static int find_zeropage_pte_entry(pte_t *pte, unsigned long addr,
- unsigned long end, struct mm_walk *walk)
-{
- unsigned long *found_addr = walk->private;
-
- /* Return 1 of the page is a zeropage. */
- if (is_zero_pfn(pte_pfn(*pte))) {
- /*
- * Shared zeropage in e.g., a FS DAX mapping? We cannot do the
- * right thing and likely don't care: FAULT_FLAG_UNSHARE
- * currently only works in COW mappings, which is also where
- * mm_forbids_zeropage() is checked.
- */
- if (!is_cow_mapping(walk->vma->vm_flags))
- return -EFAULT;
-
- *found_addr = addr;
- return 1;
- }
- return 0;
-}
-
-static const struct mm_walk_ops find_zeropage_ops = {
- .pte_entry = find_zeropage_pte_entry,
- .walk_lock = PGWALK_WRLOCK,
-};
-
-/*
- * Unshare all shared zeropages, replacing them by anonymous pages. Note that
- * we cannot simply zap all shared zeropages, because this could later
- * trigger unexpected userfaultfd missing events.
- *
- * This must be called after mm->context.allow_cow_sharing was
- * set to 0, to avoid future mappings of shared zeropages.
- *
- * mm contracts with s390, that even if mm were to remove a page table,
- * and racing with walk_page_range_vma() calling pte_offset_map_lock()
- * would fail, it will never insert a page table containing empty zero
- * pages once mm_forbids_zeropage(mm) i.e.
- * mm->context.allow_cow_sharing is set to 0.
- */
-static int __s390_unshare_zeropages(struct mm_struct *mm)
-{
- struct vm_area_struct *vma;
- VMA_ITERATOR(vmi, mm, 0);
- unsigned long addr;
- vm_fault_t fault;
- int rc;
-
- for_each_vma(vmi, vma) {
- /*
- * We could only look at COW mappings, but it's more future
- * proof to catch unexpected zeropages in other mappings and
- * fail.
- */
- if ((vma->vm_flags & VM_PFNMAP) || is_vm_hugetlb_page(vma))
- continue;
- addr = vma->vm_start;
-
-retry:
- rc = walk_page_range_vma(vma, addr, vma->vm_end,
- &find_zeropage_ops, &addr);
- if (rc < 0)
- return rc;
- else if (!rc)
- continue;
-
- /* addr was updated by find_zeropage_pte_entry() */
- fault = handle_mm_fault(vma, addr,
- FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE,
- NULL);
- if (fault & VM_FAULT_OOM)
- return -ENOMEM;
- /*
- * See break_ksm(): even after handle_mm_fault() returned 0, we
- * must start the lookup from the current address, because
- * handle_mm_fault() may back out if there's any difficulty.
- *
- * VM_FAULT_SIGBUS and VM_FAULT_SIGSEGV are unexpected but
- * maybe they could trigger in the future on concurrent
- * truncation. In that case, the shared zeropage would be gone
- * and we can simply retry and make progress.
- */
- cond_resched();
- goto retry;
- }
-
- return 0;
-}
-
-static int __s390_disable_cow_sharing(struct mm_struct *mm)
-{
- int rc;
-
- if (!mm->context.allow_cow_sharing)
- return 0;
-
- mm->context.allow_cow_sharing = 0;
-
- /* Replace all shared zeropages by anonymous pages. */
- rc = __s390_unshare_zeropages(mm);
- /*
- * Make sure to disable KSM (if enabled for the whole process or
- * individual VMAs). Note that nothing currently hinders user space
- * from re-enabling it.
- */
- if (!rc)
- rc = ksm_disable(mm);
- if (rc)
- mm->context.allow_cow_sharing = 1;
- return rc;
-}
-
-/*
- * Disable most COW-sharing of memory pages for the whole process:
- * (1) Disable KSM and unmerge/unshare any KSM pages.
- * (2) Disallow shared zeropages and unshare any zerpages that are mapped.
- *
- * Not that we currently don't bother with COW-shared pages that are shared
- * with parent/child processes due to fork().
- */
-int s390_disable_cow_sharing(void)
-{
- int rc;
-
- mmap_write_lock(current->mm);
- rc = __s390_disable_cow_sharing(current->mm);
- mmap_write_unlock(current->mm);
- return rc;
-}
-EXPORT_SYMBOL_GPL(s390_disable_cow_sharing);
-
/*
* Enable storage key handling from now on and initialize the storage
* keys with the default key.
@@ -2468,7 +2293,7 @@ int s390_enable_skey(void)
goto out_up;
mm->context.uses_skeys = 1;
- rc = __s390_disable_cow_sharing(mm);
+ rc = gmap_helper_disable_cow_sharing();
if (rc) {
mm->context.uses_skeys = 0;
goto out_up;
diff --git a/arch/s390/mm/gmap_helpers.c b/arch/s390/mm/gmap_helpers.c
new file mode 100644
index 000000000000..a45d417ad951
--- /dev/null
+++ b/arch/s390/mm/gmap_helpers.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper functions for KVM guest address space mapping code
+ *
+ * Copyright IBM Corp. 2007, 2025
+ */
+#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/pagewalk.h>
+#include <linux/ksm.h>
+#include <asm/gmap_helpers.h>
+
+/**
+ * ptep_zap_swap_entry() - discard a swap entry.
+ * @mm: the mm
+ * @entry: the swap entry that needs to be zapped
+ *
+ * Discards the given swap entry. If the swap entry was an actual swap
+ * entry (and not a migration entry, for example), the actual swapped
+ * page is also discarded from swap.
+ */
+static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
+{
+ if (!non_swap_entry(entry))
+ dec_mm_counter(mm, MM_SWAPENTS);
+ else if (is_migration_entry(entry))
+ dec_mm_counter(mm, mm_counter(pfn_swap_entry_folio(entry)));
+ free_swap_and_cache(entry);
+}
+
+/**
+ * gmap_helper_zap_one_page() - discard a page if it was swapped.
+ * @mm: the mm
+ * @vmaddr: the userspace virtual address that needs to be discarded
+ *
+ * If the given address maps to a swap entry, discard it.
+ *
+ * Context: needs to be called while holding the mmap lock.
+ */
+void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr)
+{
+ struct vm_area_struct *vma;
+ spinlock_t *ptl;
+ pte_t *ptep;
+
+ mmap_assert_locked(mm);
+
+ /* Find the vm address for the guest address */
+ vma = vma_lookup(mm, vmaddr);
+ if (!vma || is_vm_hugetlb_page(vma))
+ return;
+
+ /* Get pointer to the page table entry */
+ ptep = get_locked_pte(mm, vmaddr, &ptl);
+ if (unlikely(!ptep))
+ return;
+ if (pte_swap(*ptep))
+ ptep_zap_swap_entry(mm, pte_to_swp_entry(*ptep));
+ pte_unmap_unlock(ptep, ptl);
+}
+EXPORT_SYMBOL_GPL(gmap_helper_zap_one_page);
+
+/**
+ * gmap_helper_discard() - discard user pages in the given range
+ * @mm: the mm
+ * @vmaddr: starting userspace address
+ * @end: end address (first address outside the range)
+ *
+ * All userpace pages in the range [@vamddr, @end) are discarded and unmapped.
+ *
+ * Context: needs to be called while holding the mmap lock.
+ */
+void gmap_helper_discard(struct mm_struct *mm, unsigned long vmaddr, unsigned long end)
+{
+ struct vm_area_struct *vma;
+
+ mmap_assert_locked(mm);
+
+ while (vmaddr < end) {
+ vma = find_vma_intersection(mm, vmaddr, end);
+ if (!vma)
+ return;
+ if (!is_vm_hugetlb_page(vma))
+ zap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr, NULL);
+ vmaddr = vma->vm_end;
+ }
+}
+EXPORT_SYMBOL_GPL(gmap_helper_discard);
+
+static int find_zeropage_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ unsigned long *found_addr = walk->private;
+
+ /* Return 1 of the page is a zeropage. */
+ if (is_zero_pfn(pte_pfn(*pte))) {
+ /*
+ * Shared zeropage in e.g., a FS DAX mapping? We cannot do the
+ * right thing and likely don't care: FAULT_FLAG_UNSHARE
+ * currently only works in COW mappings, which is also where
+ * mm_forbids_zeropage() is checked.
+ */
+ if (!is_cow_mapping(walk->vma->vm_flags))
+ return -EFAULT;
+
+ *found_addr = addr;
+ return 1;
+ }
+ return 0;
+}
+
+static const struct mm_walk_ops find_zeropage_ops = {
+ .pte_entry = find_zeropage_pte_entry,
+ .walk_lock = PGWALK_WRLOCK,
+};
+
+/** __gmap_helper_unshare_zeropages() - unshare all shared zeropages
+ * @mm: the mm whose zero pages are to be unshared
+ *
+ * Unshare all shared zeropages, replacing them by anonymous pages. Note that
+ * we cannot simply zap all shared zeropages, because this could later
+ * trigger unexpected userfaultfd missing events.
+ *
+ * This must be called after mm->context.allow_cow_sharing was
+ * set to 0, to avoid future mappings of shared zeropages.
+ *
+ * mm contracts with s390, that even if mm were to remove a page table,
+ * and racing with walk_page_range_vma() calling pte_offset_map_lock()
+ * would fail, it will never insert a page table containing empty zero
+ * pages once mm_forbids_zeropage(mm) i.e.
+ * mm->context.allow_cow_sharing is set to 0.
+ */
+static int __gmap_helper_unshare_zeropages(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
+ unsigned long addr;
+ vm_fault_t fault;
+ int rc;
+
+ for_each_vma(vmi, vma) {
+ /*
+ * We could only look at COW mappings, but it's more future
+ * proof to catch unexpected zeropages in other mappings and
+ * fail.
+ */
+ if ((vma->vm_flags & VM_PFNMAP) || is_vm_hugetlb_page(vma))
+ continue;
+ addr = vma->vm_start;
+
+retry:
+ rc = walk_page_range_vma(vma, addr, vma->vm_end,
+ &find_zeropage_ops, &addr);
+ if (rc < 0)
+ return rc;
+ else if (!rc)
+ continue;
+
+ /* addr was updated by find_zeropage_pte_entry() */
+ fault = handle_mm_fault(vma, addr,
+ FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE,
+ NULL);
+ if (fault & VM_FAULT_OOM)
+ return -ENOMEM;
+ /*
+ * See break_ksm(): even after handle_mm_fault() returned 0, we
+ * must start the lookup from the current address, because
+ * handle_mm_fault() may back out if there's any difficulty.
+ *
+ * VM_FAULT_SIGBUS and VM_FAULT_SIGSEGV are unexpected but
+ * maybe they could trigger in the future on concurrent
+ * truncation. In that case, the shared zeropage would be gone
+ * and we can simply retry and make progress.
+ */
+ cond_resched();
+ goto retry;
+ }
+
+ return 0;
+}
+
+/**
+ * gmap_helper_disable_cow_sharing() - disable all COW sharing
+ *
+ * Disable most COW-sharing of memory pages for the whole process:
+ * (1) Disable KSM and unmerge/unshare any KSM pages.
+ * (2) Disallow shared zeropages and unshare any zerpages that are mapped.
+ *
+ * Not that we currently don't bother with COW-shared pages that are shared
+ * with parent/child processes due to fork().
+ */
+int gmap_helper_disable_cow_sharing(void)
+{
+ struct mm_struct *mm = current->mm;
+ int rc;
+
+ mmap_assert_write_locked(mm);
+
+ if (!mm->context.allow_cow_sharing)
+ return 0;
+
+ mm->context.allow_cow_sharing = 0;
+
+ /* Replace all shared zeropages by anonymous pages. */
+ rc = __gmap_helper_unshare_zeropages(mm);
+ /*
+ * Make sure to disable KSM (if enabled for the whole process or
+ * individual VMAs). Note that nothing currently hinders user space
+ * from re-enabling it.
+ */
+ if (!rc)
+ rc = ksm_disable(mm);
+ if (rc)
+ mm->context.allow_cow_sharing = 1;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_helper_disable_cow_sharing);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index afa085e8186c..074bf4fb4ce2 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -40,7 +40,6 @@
#include <asm/kfence.h>
#include <asm/dma.h>
#include <asm/abs_lowcore.h>
-#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/sclp.h>
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index e3a6f8ae156c..b449fd2605b0 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -12,8 +12,6 @@
#include <asm/mmu_context.h>
#include <asm/page-states.h>
#include <asm/pgalloc.h>
-#include <asm/gmap.h>
-#include <asm/tlb.h>
#include <asm/tlbflush.h>
unsigned long *crst_table_alloc(struct mm_struct *mm)
@@ -38,11 +36,15 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
+ struct ctlreg asce;
/* change all active ASCEs to avoid the creation of new TLBs */
if (current->active_mm == mm) {
- get_lowcore()->user_asce.val = mm->context.asce;
- local_ctl_load(7, &get_lowcore()->user_asce);
+ asce.val = mm->context.asce;
+ get_lowcore()->user_asce = asce;
+ local_ctl_load(7, &asce);
+ if (!test_thread_flag(TIF_ASCE_PRIMARY))
+ local_ctl_load(1, &asce);
}
__tlb_flush_local();
}
@@ -52,6 +54,8 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
unsigned long asce_limit = mm->context.asce_limit;
+ mmap_assert_write_locked(mm);
+
/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
VM_BUG_ON(asce_limit < _REGION2_SIZE);
@@ -75,13 +79,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
spin_lock_bh(&mm->page_table_lock);
- /*
- * This routine gets called with mmap_lock lock held and there is
- * no reason to optimize for the case of otherwise. However, if
- * that would ever change, the below check will let us know.
- */
- VM_BUG_ON(asce_limit != mm->context.asce_limit);
-
if (p4d) {
__pgd = (unsigned long *) mm->pgd;
p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
@@ -145,7 +142,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
ptdesc = pagetable_alloc(GFP_KERNEL, 0);
if (!ptdesc)
return NULL;
- if (!pagetable_pte_ctor(ptdesc)) {
+ if (!pagetable_pte_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 9901934284ec..7df70cd8f739 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -20,7 +20,6 @@
#include <linux/ksm.h>
#include <linux/mman.h>
-#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/page-states.h>
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 0776dfde2dba..c7f8313ba449 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -48,8 +48,6 @@ struct bpf_jit {
int lit64; /* Current position in 64-bit literal pool */
int base_ip; /* Base address for literal pool */
int exit_ip; /* Address of exit */
- int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
- int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
int tail_call_start; /* Tail call start offset */
int excnt; /* Number of exception table entries */
int prologue_plt_ret; /* Return address for prologue hotpatch PLT */
@@ -127,6 +125,18 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
jit->seen_regs |= (1 << r1);
}
+static s32 off_to_pcrel(struct bpf_jit *jit, u32 off)
+{
+ return off - jit->prg;
+}
+
+static s64 ptr_to_pcrel(struct bpf_jit *jit, const void *ptr)
+{
+ if (jit->prg_buf)
+ return (const u8 *)ptr - ((const u8 *)jit->prg_buf + jit->prg);
+ return 0;
+}
+
#define REG_SET_SEEN(b1) \
({ \
reg_set_seen(jit, b1); \
@@ -201,7 +211,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT4_PCREL_RIC(op, mask, target) \
({ \
- int __rel = ((target) - jit->prg) / 2; \
+ int __rel = off_to_pcrel(jit, target) / 2; \
_EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \
})
@@ -239,7 +249,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \
({ \
- unsigned int rel = (int)((target) - jit->prg) / 2; \
+ unsigned int rel = off_to_pcrel(jit, target) / 2; \
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \
(op2) | (mask) << 12); \
REG_SET_SEEN(b1); \
@@ -248,7 +258,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \
({ \
- unsigned int rel = (int)((target) - jit->prg) / 2; \
+ unsigned int rel = off_to_pcrel(jit, target) / 2; \
_EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \
(rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \
REG_SET_SEEN(b1); \
@@ -257,29 +267,41 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
({ \
- int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
+ int rel = off_to_pcrel(jit, addrs[(i) + (off) + 1]) / 2;\
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
+static void emit6_pcrel_ril(struct bpf_jit *jit, u32 op, s64 pcrel)
+{
+ u32 pc32dbl = (s32)(pcrel / 2);
+
+ _EMIT6(op | pc32dbl >> 16, pc32dbl & 0xffff);
+}
+
+static void emit6_pcrel_rilb(struct bpf_jit *jit, u32 op, u8 b, s64 pcrel)
+{
+ emit6_pcrel_ril(jit, op | reg_high(b) << 16, pcrel);
+ REG_SET_SEEN(b);
+}
+
#define EMIT6_PCREL_RILB(op, b, target) \
-({ \
- unsigned int rel = (int)((target) - jit->prg) / 2; \
- _EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\
- REG_SET_SEEN(b); \
-})
+ emit6_pcrel_rilb(jit, op, b, off_to_pcrel(jit, target))
-#define EMIT6_PCREL_RIL(op, target) \
-({ \
- unsigned int rel = (int)((target) - jit->prg) / 2; \
- _EMIT6((op) | rel >> 16, rel & 0xffff); \
-})
+#define EMIT6_PCREL_RILB_PTR(op, b, target_ptr) \
+ emit6_pcrel_rilb(jit, op, b, ptr_to_pcrel(jit, target_ptr))
+
+static void emit6_pcrel_rilc(struct bpf_jit *jit, u32 op, u8 mask, s64 pcrel)
+{
+ emit6_pcrel_ril(jit, op | mask << 20, pcrel);
+}
#define EMIT6_PCREL_RILC(op, mask, target) \
-({ \
- EMIT6_PCREL_RIL((op) | (mask) << 20, (target)); \
-})
+ emit6_pcrel_rilc(jit, op, mask, off_to_pcrel(jit, target))
+
+#define EMIT6_PCREL_RILC_PTR(op, mask, target_ptr) \
+ emit6_pcrel_rilc(jit, op, mask, ptr_to_pcrel(jit, target_ptr))
#define _EMIT6_IMM(op, imm) \
({ \
@@ -503,7 +525,7 @@ static void bpf_skip(struct bpf_jit *jit, int size)
{
if (size >= 6 && !is_valid_rel(size)) {
/* brcl 0xf,size */
- EMIT6_PCREL_RIL(0xc0f4000000, size);
+ EMIT6_PCREL_RILC(0xc0040000, 0xf, size);
size -= 6;
} else if (size >= 4 && is_valid_rel(size)) {
/* brc 0xf,size */
@@ -605,43 +627,30 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
}
/* Setup stack and backchain */
if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
- if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
- /* lgr %w1,%r15 (backchain) */
- EMIT4(0xb9040000, REG_W1, REG_15);
+ /* lgr %w1,%r15 (backchain) */
+ EMIT4(0xb9040000, REG_W1, REG_15);
/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
/* aghi %r15,-STK_OFF */
EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
- if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
- /* stg %w1,152(%r15) (backchain) */
- EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
- REG_15, 152);
+ /* stg %w1,152(%r15) (backchain) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
+ REG_15, 152);
}
}
/*
- * Emit an expoline for a jump that follows
+ * Jump using a register either directly or via an expoline thunk
*/
-static void emit_expoline(struct bpf_jit *jit)
-{
- /* exrl %r0,.+10 */
- EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
- /* j . */
- EMIT4_PCREL(0xa7f40000, 0);
-}
-
-/*
- * Emit __s390_indirect_jump_r1 thunk if necessary
- */
-static void emit_r1_thunk(struct bpf_jit *jit)
-{
- if (nospec_uses_trampoline()) {
- jit->r1_thunk_ip = jit->prg;
- emit_expoline(jit);
- /* br %r1 */
- _EMIT2(0x07f1);
- }
-}
+#define EMIT_JUMP_REG(reg) do { \
+ if (nospec_uses_trampoline()) \
+ /* brcl 0xf,__s390_indirect_jump_rN */ \
+ EMIT6_PCREL_RILC_PTR(0xc0040000, 0x0f, \
+ __s390_indirect_jump_r ## reg); \
+ else \
+ /* br %rN */ \
+ _EMIT2(0x07f0 | reg); \
+} while (0)
/*
* Call r1 either directly or via __s390_indirect_jump_r1 thunk
@@ -650,7 +659,8 @@ static void call_r1(struct bpf_jit *jit)
{
if (nospec_uses_trampoline())
/* brasl %r14,__s390_indirect_jump_r1 */
- EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
+ EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14,
+ __s390_indirect_jump_r1);
else
/* basr %r14,%r1 */
EMIT2(0x0d00, REG_14, REG_1);
@@ -666,16 +676,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
EMIT4(0xb9040000, REG_2, BPF_REG_0);
/* Restore registers */
save_restore_regs(jit, REGS_RESTORE, stack_depth, 0);
- if (nospec_uses_trampoline()) {
- jit->r14_thunk_ip = jit->prg;
- /* Generate __s390_indirect_jump_r14 thunk */
- emit_expoline(jit);
- }
- /* br %r14 */
- _EMIT2(0x07fe);
-
- if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
- emit_r1_thunk(jit);
+ EMIT_JUMP_REG(14);
jit->prg = ALIGN(jit->prg, 8);
jit->prologue_plt = jit->prg;
@@ -1877,7 +1878,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
/* aghi %r1,tail_call_start */
EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start);
/* brcl 0xf,__s390_indirect_jump_r1 */
- EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip);
+ EMIT6_PCREL_RILC_PTR(0xc0040000, 0xf,
+ __s390_indirect_jump_r1);
} else {
/* bc 0xf,tail_call_start(%r1) */
_EMIT4(0x47f01000 + jit->tail_call_start);
@@ -2585,9 +2587,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
if (nr_stack_args > MAX_NR_STACK_ARGS)
return -ENOTSUPP;
- /* Return to %r14, since func_addr and %r0 are not available. */
- if ((!func_addr && !(flags & BPF_TRAMP_F_ORIG_STACK)) ||
- (flags & BPF_TRAMP_F_INDIRECT))
+ /* Return to %r14 in the struct_ops case. */
+ if (flags & BPF_TRAMP_F_INDIRECT)
flags |= BPF_TRAMP_F_SKIP_FRAME;
/*
@@ -2847,17 +2848,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
0xf000 | tjit->tccnt_off);
/* aghi %r15,stack_size */
EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
- /* Emit an expoline for the following indirect jump. */
- if (nospec_uses_trampoline())
- emit_expoline(jit);
if (flags & BPF_TRAMP_F_SKIP_FRAME)
- /* br %r14 */
- _EMIT2(0x07fe);
+ EMIT_JUMP_REG(14);
else
- /* br %r1 */
- _EMIT2(0x07f1);
-
- emit_r1_thunk(jit);
+ EMIT_JUMP_REG(1);
return 0;
}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 5bbdc4190b8b..cd6676c2d602 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -45,6 +45,7 @@
/* list of all detected zpci devices */
static LIST_HEAD(zpci_list);
static DEFINE_SPINLOCK(zpci_list_lock);
+static DEFINE_MUTEX(zpci_add_remove_lock);
static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
static DEFINE_SPINLOCK(zpci_domain_lock);
@@ -70,6 +71,15 @@ EXPORT_SYMBOL_GPL(zpci_aipb);
struct airq_iv *zpci_aif_sbv;
EXPORT_SYMBOL_GPL(zpci_aif_sbv);
+void zpci_zdev_put(struct zpci_dev *zdev)
+{
+ if (!zdev)
+ return;
+ mutex_lock(&zpci_add_remove_lock);
+ kref_put_lock(&zdev->kref, zpci_release_device, &zpci_list_lock);
+ mutex_unlock(&zpci_add_remove_lock);
+}
+
struct zpci_dev *get_zdev_by_fid(u32 fid)
{
struct zpci_dev *tmp, *zdev = NULL;
@@ -837,6 +847,7 @@ int zpci_add_device(struct zpci_dev *zdev)
{
int rc;
+ mutex_lock(&zpci_add_remove_lock);
zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", zdev->fid, zdev->fh, zdev->state);
rc = zpci_init_iommu(zdev);
if (rc)
@@ -850,12 +861,14 @@ int zpci_add_device(struct zpci_dev *zdev)
spin_lock(&zpci_list_lock);
list_add_tail(&zdev->entry, &zpci_list);
spin_unlock(&zpci_list_lock);
+ mutex_unlock(&zpci_add_remove_lock);
return 0;
error_destroy_iommu:
zpci_destroy_iommu(zdev);
error:
zpci_dbg(0, "add fid:%x, rc:%d\n", zdev->fid, rc);
+ mutex_unlock(&zpci_add_remove_lock);
return rc;
}
@@ -925,21 +938,20 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
* @zdev: the zpci_dev that was reserved
*
* Handle the case that a given zPCI function was reserved by another system.
- * After a call to this function the zpci_dev can not be found via
- * get_zdev_by_fid() anymore but may still be accessible via existing
- * references though it will not be functional anymore.
*/
void zpci_device_reserved(struct zpci_dev *zdev)
{
- /*
- * Remove device from zpci_list as it is going away. This also
- * makes sure we ignore subsequent zPCI events for this device.
- */
- spin_lock(&zpci_list_lock);
- list_del(&zdev->entry);
- spin_unlock(&zpci_list_lock);
+ lockdep_assert_held(&zdev->state_lock);
+ /* We may declare the device reserved multiple times */
+ if (zdev->state == ZPCI_FN_STATE_RESERVED)
+ return;
zdev->state = ZPCI_FN_STATE_RESERVED;
zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+ /*
+ * The underlying device is gone. Allow the zdev to be freed
+ * as soon as all other references are gone by accounting for
+ * the removal as a dropped reference.
+ */
zpci_zdev_put(zdev);
}
@@ -947,13 +959,14 @@ void zpci_release_device(struct kref *kref)
{
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
+ lockdep_assert_held(&zpci_add_remove_lock);
WARN_ON(zdev->state != ZPCI_FN_STATE_RESERVED);
-
- if (zdev->zbus->bus)
- zpci_bus_remove_device(zdev, false);
-
- if (zdev_enabled(zdev))
- zpci_disable_device(zdev);
+ /*
+ * We already hold zpci_list_lock thanks to kref_put_lock().
+ * This makes sure no new reference can be taken from the list.
+ */
+ list_del(&zdev->entry);
+ spin_unlock(&zpci_list_lock);
if (zdev->has_hp_slot)
zpci_exit_slot(zdev);
diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
index e86a9419d233..ae3d7a9159bd 100644
--- a/arch/s390/pci/pci_bus.h
+++ b/arch/s390/pci/pci_bus.h
@@ -21,11 +21,8 @@ int zpci_bus_scan_device(struct zpci_dev *zdev);
void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error);
void zpci_release_device(struct kref *kref);
-static inline void zpci_zdev_put(struct zpci_dev *zdev)
-{
- if (zdev)
- kref_put(&zdev->kref, zpci_release_device);
-}
+
+void zpci_zdev_put(struct zpci_dev *zdev);
static inline void zpci_zdev_get(struct zpci_dev *zdev)
{
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 9a929bbcc397..241f7251c873 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -428,6 +428,8 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
return;
}
zdev = zpci_create_device(entry->fid, entry->fh, entry->config_state);
+ if (IS_ERR(zdev))
+ return;
list_add_tail(&zdev->entry, scan_list);
}
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 7bd7721c1239..2fbee3887d13 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -335,6 +335,22 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
zdev->state = ZPCI_FN_STATE_STANDBY;
}
+static void zpci_event_reappear(struct zpci_dev *zdev)
+{
+ lockdep_assert_held(&zdev->state_lock);
+ /*
+ * The zdev is in the reserved state. This means that it was presumed to
+ * go away but there are still undropped references. Now, the platform
+ * announced its availability again. Bring back the lingering zdev
+ * to standby. This is safe because we hold a temporary reference
+ * now so that it won't go away. Account for the re-appearance of the
+ * underlying device by incrementing the reference count.
+ */
+ zdev->state = ZPCI_FN_STATE_STANDBY;
+ zpci_zdev_get(zdev);
+ zpci_dbg(1, "rea fid:%x, fh:%x\n", zdev->fid, zdev->fh);
+}
+
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
{
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
@@ -358,8 +374,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
break;
}
} else {
+ if (zdev->state == ZPCI_FN_STATE_RESERVED)
+ zpci_event_reappear(zdev);
/* the configuration request may be stale */
- if (zdev->state != ZPCI_FN_STATE_STANDBY)
+ else if (zdev->state != ZPCI_FN_STATE_STANDBY)
break;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
}
@@ -375,6 +393,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
break;
}
} else {
+ if (zdev->state == ZPCI_FN_STATE_RESERVED)
+ zpci_event_reappear(zdev);
zpci_update_fh(zdev, ccdf->fh);
}
break;
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 5fcc1a3b04bd..51e7a28af899 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -32,8 +32,10 @@ static inline int __pcistb_mio_inuser(
u64 len, u8 *status)
{
int cc, exception;
+ bool sacf_flag;
exception = 1;
+ sacf_flag = enable_sacf_uaccess();
asm_inline volatile (
" sacf 256\n"
"0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
@@ -44,6 +46,7 @@ static inline int __pcistb_mio_inuser(
: CC_OUT(cc, cc), [len] "+d" (len), [exc] "+d" (exception)
: [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
: CC_CLOBBER_LIST("memory"));
+ disable_sacf_uaccess(sacf_flag);
*status = len >> 24 & 0xff;
return exception ? -ENXIO : CC_TRANSFORM(cc);
}
@@ -54,6 +57,7 @@ static inline int __pcistg_mio_inuser(
{
union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
int cc, exception;
+ bool sacf_flag;
u64 val = 0;
u64 cnt = ulen;
u8 tmp;
@@ -64,6 +68,7 @@ static inline int __pcistg_mio_inuser(
* address space. pcistg then uses the user mappings.
*/
exception = 1;
+ sacf_flag = enable_sacf_uaccess();
asm_inline volatile (
" sacf 256\n"
"0: llgc %[tmp],0(%[src])\n"
@@ -81,6 +86,7 @@ static inline int __pcistg_mio_inuser(
CC_OUT(cc, cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
:
: CC_CLOBBER_LIST("memory"));
+ disable_sacf_uaccess(sacf_flag);
*status = ioaddr_len.odd >> 24 & 0xff;
cc = exception ? -ENXIO : CC_TRANSFORM(cc);
@@ -204,6 +210,7 @@ static inline int __pcilg_mio_inuser(
u64 ulen, u8 *status)
{
union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
+ bool sacf_flag;
u64 cnt = ulen;
int shift = ulen * 8;
int cc, exception;
@@ -215,6 +222,7 @@ static inline int __pcilg_mio_inuser(
* user address @dst
*/
exception = 1;
+ sacf_flag = enable_sacf_uaccess();
asm_inline volatile (
" sacf 256\n"
"0: .insn rre,0xb9d60000,%[val],%[ioaddr_len]\n"
@@ -236,10 +244,10 @@ static inline int __pcilg_mio_inuser(
: [ioaddr_len] "+&d" (ioaddr_len.pair), [exc] "+d" (exception),
CC_OUT(cc, cc), [val] "=d" (val),
[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
- [shift] "+d" (shift)
+ [shift] "+a" (shift)
:
: CC_CLOBBER_LIST("memory"));
-
+ disable_sacf_uaccess(sacf_flag);
cc = exception ? -ENXIO : CC_TRANSFORM(cc);
/* did we write everything to the user space buffer? */
if (!cc && cnt != 0)
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 855f818deb98..d5c68ade71ab 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -54,6 +54,9 @@ static struct facility_def facility_defs[] = {
#ifdef CONFIG_HAVE_MARCH_Z15_FEATURES
61, /* miscellaneous-instruction-extension 3 */
#endif
+#ifdef CONFIG_HAVE_MARCH_Z17_FEATURES
+ 84, /* miscellaneous-instruction-extension 4 */
+#endif
-1 /* END */
}
},
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c
index f9f3b14f70d5..730c01b225bd 100644
--- a/arch/sh/boards/mach-se/7343/irq.c
+++ b/arch/sh/boards/mach-se/7343/irq.c
@@ -47,8 +47,9 @@ static void __init se7343_domain_init(void)
{
int i;
- se7343_irq_domain = irq_domain_add_linear(NULL, SE7343_FPGA_IRQ_NR,
- &irq_domain_simple_ops, NULL);
+ se7343_irq_domain = irq_domain_create_linear(NULL, SE7343_FPGA_IRQ_NR,
+ &irq_domain_simple_ops,
+ NULL);
if (unlikely(!se7343_irq_domain)) {
printk("Failed to get IRQ domain\n");
return;
@@ -70,7 +71,7 @@ static void __init se7343_gc_init(void)
struct irq_chip_type *ct;
unsigned int irq_base;
- irq_base = irq_linear_revmap(se7343_irq_domain, 0);
+ irq_base = irq_find_mapping(se7343_irq_domain, 0);
gc = irq_alloc_generic_chip(DRV_NAME, 1, irq_base, se7343_irq_regs,
handle_level_irq);
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c
index efa96edd47dc..49aa3a2b1b8f 100644
--- a/arch/sh/boards/mach-se/7722/irq.c
+++ b/arch/sh/boards/mach-se/7722/irq.c
@@ -46,7 +46,7 @@ static void __init se7722_domain_init(void)
{
int i;
- se7722_irq_domain = irq_domain_add_linear(NULL, SE7722_FPGA_IRQ_NR,
+ se7722_irq_domain = irq_domain_create_linear(NULL, SE7722_FPGA_IRQ_NR,
&irq_domain_simple_ops, NULL);
if (unlikely(!se7722_irq_domain)) {
printk("Failed to get IRQ domain\n");
@@ -69,7 +69,7 @@ static void __init se7722_gc_init(void)
struct irq_chip_type *ct;
unsigned int irq_base;
- irq_base = irq_linear_revmap(se7722_irq_domain, 0);
+ irq_base = irq_find_mapping(se7722_irq_domain, 0);
gc = irq_alloc_generic_chip(DRV_NAME, 1, irq_base, se7722_irq_regs,
handle_level_irq);
diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c
index f82d3a6a844a..c13d51b29702 100644
--- a/arch/sh/boards/mach-x3proto/gpio.c
+++ b/arch/sh/boards/mach-x3proto/gpio.c
@@ -108,7 +108,7 @@ int __init x3proto_gpio_setup(void)
if (unlikely(ret))
goto err_gpio;
- x3proto_irq_domain = irq_domain_add_linear(NULL, NR_BASEBOARD_GPIOS,
+ x3proto_irq_domain = irq_domain_create_linear(NULL, NR_BASEBOARD_GPIOS,
&x3proto_gpio_irq_ops, NULL);
if (unlikely(!x3proto_irq_domain))
goto err_irq;
diff --git a/arch/sh/configs/ap325rxa_defconfig b/arch/sh/configs/ap325rxa_defconfig
index 4464a2ad42ed..b6f36c938f1d 100644
--- a/arch/sh/configs/ap325rxa_defconfig
+++ b/arch/sh/configs/ap325rxa_defconfig
@@ -99,4 +99,3 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig
index ee1b36682155..e76694aace25 100644
--- a/arch/sh/configs/ecovec24_defconfig
+++ b/arch/sh/configs/ecovec24_defconfig
@@ -128,4 +128,3 @@ CONFIG_DEBUG_FS=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/edosk7705_defconfig b/arch/sh/configs/edosk7705_defconfig
index 296ed768cbbb..ee3f6db7d8da 100644
--- a/arch/sh/configs/edosk7705_defconfig
+++ b/arch/sh/configs/edosk7705_defconfig
@@ -33,4 +33,3 @@ CONFIG_CMDLINE_FROM_BOOTLOADER=y
# CONFIG_PROC_FS is not set
# CONFIG_SYSFS is not set
# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_CRC32 is not set
diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig
index 67716a44463e..da176f100e00 100644
--- a/arch/sh/configs/espt_defconfig
+++ b/arch/sh/configs/espt_defconfig
@@ -110,4 +110,3 @@ CONFIG_NLS_UTF8=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/hp6xx_defconfig b/arch/sh/configs/hp6xx_defconfig
index 77e3185f63e4..3582af15ad86 100644
--- a/arch/sh/configs/hp6xx_defconfig
+++ b/arch/sh/configs/hp6xx_defconfig
@@ -56,5 +56,3 @@ CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=y
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/kfr2r09-romimage_defconfig b/arch/sh/configs/kfr2r09-romimage_defconfig
index 42bf34181a3e..88fbb65cb9f9 100644
--- a/arch/sh/configs/kfr2r09-romimage_defconfig
+++ b/arch/sh/configs/kfr2r09-romimage_defconfig
@@ -49,4 +49,3 @@ CONFIG_TMPFS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
-# CONFIG_CRC32 is not set
diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig
index d871623955c5..924bb3233b0b 100644
--- a/arch/sh/configs/landisk_defconfig
+++ b/arch/sh/configs/landisk_defconfig
@@ -111,4 +111,3 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_SH_STANDARD_BIOS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/lboxre2_defconfig b/arch/sh/configs/lboxre2_defconfig
index 6a234761bfd7..0307bb2be79f 100644
--- a/arch/sh/configs/lboxre2_defconfig
+++ b/arch/sh/configs/lboxre2_defconfig
@@ -58,4 +58,3 @@ CONFIG_ROMFS_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_SH_STANDARD_BIOS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/magicpanelr2_defconfig b/arch/sh/configs/magicpanelr2_defconfig
index 8d443749550e..93b9aa32dc7c 100644
--- a/arch/sh/configs/magicpanelr2_defconfig
+++ b/arch/sh/configs/magicpanelr2_defconfig
@@ -86,5 +86,3 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_KOBJECT=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_FRAME_POINTER=y
-CONFIG_CRC_CCITT=m
-CONFIG_CRC16=m
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index 2d1e65cad239..31dbd8888aaa 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -87,7 +87,5 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_DEBUG_FS=y
-CONFIG_CRYPTO_MANAGER=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig
index 6bd6c0ae85d7..f28b8c4181c2 100644
--- a/arch/sh/configs/r7780mp_defconfig
+++ b/arch/sh/configs/r7780mp_defconfig
@@ -105,4 +105,3 @@ CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig
index cde668569cc1..3a4239f20ff1 100644
--- a/arch/sh/configs/r7785rp_defconfig
+++ b/arch/sh/configs/r7785rp_defconfig
@@ -103,4 +103,3 @@ CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig
index c863a11c7592..69568cc40396 100644
--- a/arch/sh/configs/rts7751r2d1_defconfig
+++ b/arch/sh/configs/rts7751r2d1_defconfig
@@ -87,4 +87,3 @@ CONFIG_MINIX_FS=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_DEBUG_FS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig
index 7e4f710d46c7..ecb4bdb5bb58 100644
--- a/arch/sh/configs/rts7751r2dplus_defconfig
+++ b/arch/sh/configs/rts7751r2dplus_defconfig
@@ -92,4 +92,3 @@ CONFIG_MINIX_FS=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_DEBUG_FS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig
index cd24cf08210e..9870d16d9711 100644
--- a/arch/sh/configs/sdk7780_defconfig
+++ b/arch/sh/configs/sdk7780_defconfig
@@ -136,4 +136,3 @@ CONFIG_SH_STANDARD_BIOS=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index 472fdf365cad..64f9308ee586 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -101,6 +101,3 @@ CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
-CONFIG_CRC_ITU_T=y
diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
index 49a4961889de..8770a72e6a63 100644
--- a/arch/sh/configs/se7712_defconfig
+++ b/arch/sh/configs/se7712_defconfig
@@ -96,4 +96,3 @@ CONFIG_FRAME_POINTER=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_CCITT=y
diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
index de293792db84..b15c6406a0e8 100644
--- a/arch/sh/configs/se7721_defconfig
+++ b/arch/sh/configs/se7721_defconfig
@@ -122,4 +122,3 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_FRAME_POINTER=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_CCITT=y
diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig
index 96521271758c..9501e69eb886 100644
--- a/arch/sh/configs/se7724_defconfig
+++ b/arch/sh/configs/se7724_defconfig
@@ -128,4 +128,3 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 48f38ec236b6..4d75c92cac10 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -120,6 +120,5 @@ CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_DEFLATE=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_CCITT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=y
diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig
index 1b1174a07e36..cc6292b3235a 100644
--- a/arch/sh/configs/sh2007_defconfig
+++ b/arch/sh/configs/sh2007_defconfig
@@ -193,5 +193,3 @@ CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig
index 5440bd0ca4ed..e6298f22623a 100644
--- a/arch/sh/configs/sh7724_generic_defconfig
+++ b/arch/sh/configs/sh7724_generic_defconfig
@@ -39,4 +39,3 @@ CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_SYSFS is not set
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_CRC32 is not set
diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig
index 57923c3296cc..b77b3313157e 100644
--- a/arch/sh/configs/sh7763rdp_defconfig
+++ b/arch/sh/configs/sh7763rdp_defconfig
@@ -112,4 +112,3 @@ CONFIG_NLS_UTF8=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig
index 4338af8d02d0..2e2b46980b58 100644
--- a/arch/sh/configs/sh7770_generic_defconfig
+++ b/arch/sh/configs/sh7770_generic_defconfig
@@ -41,4 +41,3 @@ CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_SYSFS is not set
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_CRC32 is not set
diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
index 8e85f205d8f5..f022ada363b5 100644
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@ -264,4 +264,3 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=m
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index f939f1215232..db2e48366e0d 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -380,14 +380,6 @@ PTE_BIT_FUNC(low, mkspecial, |= _PAGE_SPECIAL);
#define pgprot_noncached pgprot_writecombine
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- *
- * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte.pte_low &= _PAGE_CHG_MASK;
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h
index d87738eebe30..7027d87d901d 100644
--- a/arch/sh/include/asm/syscall_32.h
+++ b/arch/sh/include/asm/syscall_32.h
@@ -15,6 +15,18 @@ static inline long syscall_get_nr(struct task_struct *task,
return (regs->tra >= 0) ? regs->regs[3] : -1L;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ /*
+ * Unlike syscall_get_nr(), syscall_set_nr() can be called only when
+ * the target task is stopped for tracing on entering syscall, so
+ * there is no need to have the same check syscall_get_nr() has.
+ */
+ regs->regs[3] = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -57,6 +69,18 @@ static inline void syscall_get_arguments(struct task_struct *task,
args[0] = regs->regs[4];
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ regs->regs[1] = args[5];
+ regs->regs[0] = args[4];
+ regs->regs[7] = args[3];
+ regs->regs[6] = args[2];
+ regs->regs[5] = args[1];
+ regs->regs[4] = args[0];
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
int arch = AUDIT_ARCH_SH;
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 01b2bdfbf9a8..7a7c4dec2925 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -205,7 +205,7 @@ CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENTS=y
CONFIG_KEYS=y
CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -229,7 +229,6 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=m
CONFIG_VCC=m
CONFIG_PATA_CMD64X=y
CONFIG_IP_PNP=y
diff --git a/arch/sparc/crypto/Kconfig b/arch/sparc/crypto/Kconfig
index e858597de89d..a6ba319c42dc 100644
--- a/arch/sparc/crypto/Kconfig
+++ b/arch/sparc/crypto/Kconfig
@@ -36,16 +36,6 @@ config CRYPTO_SHA1_SPARC64
Architecture: sparc64
-config CRYPTO_SHA256_SPARC64
- tristate "Hash functions: SHA-224 and SHA-256"
- depends on SPARC64
- select CRYPTO_SHA256
- select CRYPTO_HASH
- help
- SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
-
- Architecture: sparc64 using crypto instructions, when available
-
config CRYPTO_SHA512_SPARC64
tristate "Hash functions: SHA-384 and SHA-512"
depends on SPARC64
diff --git a/arch/sparc/crypto/Makefile b/arch/sparc/crypto/Makefile
index a2d7fca40cb4..701c39edb0d7 100644
--- a/arch/sparc/crypto/Makefile
+++ b/arch/sparc/crypto/Makefile
@@ -4,7 +4,6 @@
#
obj-$(CONFIG_CRYPTO_SHA1_SPARC64) += sha1-sparc64.o
-obj-$(CONFIG_CRYPTO_SHA256_SPARC64) += sha256-sparc64.o
obj-$(CONFIG_CRYPTO_SHA512_SPARC64) += sha512-sparc64.o
obj-$(CONFIG_CRYPTO_MD5_SPARC64) += md5-sparc64.o
@@ -13,7 +12,6 @@ obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o
sha1-sparc64-y := sha1_asm.o sha1_glue.o
-sha256-sparc64-y := sha256_asm.o sha256_glue.o
sha512-sparc64-y := sha512_asm.o sha512_glue.o
md5-sparc64-y := md5_asm.o md5_glue.o
diff --git a/arch/sparc/crypto/aes_asm.S b/arch/sparc/crypto/aes_asm.S
index 155cefb98520..f291174a72a1 100644
--- a/arch/sparc/crypto/aes_asm.S
+++ b/arch/sparc/crypto/aes_asm.S
@@ -1,9 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
+#include <asm/opcodes.h>
#include <asm/visasm.h>
-#include "opcodes.h"
-
#define ENCRYPT_TWO_ROUNDS(KEY_BASE, I0, I1, T0, T1) \
AES_EROUND01(KEY_BASE + 0, I0, I1, T0) \
AES_EROUND23(KEY_BASE + 2, I0, I1, T1) \
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index 683150830356..359f22643b05 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -27,11 +27,10 @@
#include <crypto/internal/skcipher.h>
#include <asm/fpumacro.h>
+#include <asm/opcodes.h>
#include <asm/pstate.h>
#include <asm/elf.h>
-#include "opcodes.h"
-
struct aes_ops {
void (*encrypt)(const u64 *key, const u32 *input, u32 *output);
void (*decrypt)(const u64 *key, const u32 *input, u32 *output);
diff --git a/arch/sparc/crypto/camellia_asm.S b/arch/sparc/crypto/camellia_asm.S
index dcdc9193fcd7..8471b346ef54 100644
--- a/arch/sparc/crypto/camellia_asm.S
+++ b/arch/sparc/crypto/camellia_asm.S
@@ -1,9 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
+#include <asm/opcodes.h>
#include <asm/visasm.h>
-#include "opcodes.h"
-
#define CAMELLIA_6ROUNDS(KEY_BASE, I0, I1) \
CAMELLIA_F(KEY_BASE + 0, I1, I0, I1) \
CAMELLIA_F(KEY_BASE + 2, I0, I1, I0) \
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
index aaa9714378e6..e7a1e1c42b99 100644
--- a/arch/sparc/crypto/camellia_glue.c
+++ b/arch/sparc/crypto/camellia_glue.c
@@ -15,11 +15,10 @@
#include <crypto/internal/skcipher.h>
#include <asm/fpumacro.h>
+#include <asm/opcodes.h>
#include <asm/pstate.h>
#include <asm/elf.h>
-#include "opcodes.h"
-
#define CAMELLIA_MIN_KEY_SIZE 16
#define CAMELLIA_MAX_KEY_SIZE 32
#define CAMELLIA_BLOCK_SIZE 16
diff --git a/arch/sparc/crypto/des_asm.S b/arch/sparc/crypto/des_asm.S
index 7157468a679d..d534446cbef9 100644
--- a/arch/sparc/crypto/des_asm.S
+++ b/arch/sparc/crypto/des_asm.S
@@ -1,9 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
+#include <asm/opcodes.h>
#include <asm/visasm.h>
-#include "opcodes.h"
-
.align 32
ENTRY(des_sparc64_key_expand)
/* %o0=input_key, %o1=output_key */
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index a499102bf706..e50ec4cd57cd 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -16,11 +16,10 @@
#include <crypto/internal/skcipher.h>
#include <asm/fpumacro.h>
+#include <asm/opcodes.h>
#include <asm/pstate.h>
#include <asm/elf.h>
-#include "opcodes.h"
-
struct des_sparc64_ctx {
u64 encrypt_expkey[DES_EXPKEY_WORDS / 2];
u64 decrypt_expkey[DES_EXPKEY_WORDS / 2];
diff --git a/arch/sparc/crypto/md5_asm.S b/arch/sparc/crypto/md5_asm.S
index 7a6637455f37..60b544e4d205 100644
--- a/arch/sparc/crypto/md5_asm.S
+++ b/arch/sparc/crypto/md5_asm.S
@@ -1,9 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
+#include <asm/opcodes.h>
#include <asm/visasm.h>
-#include "opcodes.h"
-
ENTRY(md5_sparc64_transform)
/* %o0 = digest, %o1 = data, %o2 = rounds */
VISEntryHalf
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
index 511db98d590a..b3615f0cdf62 100644
--- a/arch/sparc/crypto/md5_glue.c
+++ b/arch/sparc/crypto/md5_glue.c
@@ -14,121 +14,104 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <asm/elf.h>
+#include <asm/opcodes.h>
+#include <asm/pstate.h>
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/md5.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
-#include <asm/pstate.h>
-#include <asm/elf.h>
-
-#include "opcodes.h"
+struct sparc_md5_state {
+ __le32 hash[MD5_HASH_WORDS];
+ u64 byte_count;
+};
-asmlinkage void md5_sparc64_transform(u32 *digest, const char *data,
+asmlinkage void md5_sparc64_transform(__le32 *digest, const char *data,
unsigned int rounds);
static int md5_sparc64_init(struct shash_desc *desc)
{
- struct md5_state *mctx = shash_desc_ctx(desc);
+ struct sparc_md5_state *mctx = shash_desc_ctx(desc);
- mctx->hash[0] = MD5_H0;
- mctx->hash[1] = MD5_H1;
- mctx->hash[2] = MD5_H2;
- mctx->hash[3] = MD5_H3;
- le32_to_cpu_array(mctx->hash, 4);
+ mctx->hash[0] = cpu_to_le32(MD5_H0);
+ mctx->hash[1] = cpu_to_le32(MD5_H1);
+ mctx->hash[2] = cpu_to_le32(MD5_H2);
+ mctx->hash[3] = cpu_to_le32(MD5_H3);
mctx->byte_count = 0;
return 0;
}
-static void __md5_sparc64_update(struct md5_state *sctx, const u8 *data,
- unsigned int len, unsigned int partial)
-{
- unsigned int done = 0;
-
- sctx->byte_count += len;
- if (partial) {
- done = MD5_HMAC_BLOCK_SIZE - partial;
- memcpy((u8 *)sctx->block + partial, data, done);
- md5_sparc64_transform(sctx->hash, (u8 *)sctx->block, 1);
- }
- if (len - done >= MD5_HMAC_BLOCK_SIZE) {
- const unsigned int rounds = (len - done) / MD5_HMAC_BLOCK_SIZE;
-
- md5_sparc64_transform(sctx->hash, data + done, rounds);
- done += rounds * MD5_HMAC_BLOCK_SIZE;
- }
-
- memcpy(sctx->block, data + done, len - done);
-}
-
static int md5_sparc64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct md5_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE;
-
- /* Handle the fast case right here */
- if (partial + len < MD5_HMAC_BLOCK_SIZE) {
- sctx->byte_count += len;
- memcpy((u8 *)sctx->block + partial, data, len);
- } else
- __md5_sparc64_update(sctx, data, len, partial);
+ struct sparc_md5_state *sctx = shash_desc_ctx(desc);
- return 0;
+ sctx->byte_count += round_down(len, MD5_HMAC_BLOCK_SIZE);
+ md5_sparc64_transform(sctx->hash, data, len / MD5_HMAC_BLOCK_SIZE);
+ return len - round_down(len, MD5_HMAC_BLOCK_SIZE);
}
/* Add padding and return the message digest. */
-static int md5_sparc64_final(struct shash_desc *desc, u8 *out)
+static int md5_sparc64_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int offset, u8 *out)
{
- struct md5_state *sctx = shash_desc_ctx(desc);
- unsigned int i, index, padlen;
- u32 *dst = (u32 *)out;
- __le64 bits;
- static const u8 padding[MD5_HMAC_BLOCK_SIZE] = { 0x80, };
-
- bits = cpu_to_le64(sctx->byte_count << 3);
-
- /* Pad out to 56 mod 64 and append length */
- index = sctx->byte_count % MD5_HMAC_BLOCK_SIZE;
- padlen = (index < 56) ? (56 - index) : ((MD5_HMAC_BLOCK_SIZE+56) - index);
-
- /* We need to fill a whole block for __md5_sparc64_update() */
- if (padlen <= 56) {
- sctx->byte_count += padlen;
- memcpy((u8 *)sctx->block + index, padding, padlen);
- } else {
- __md5_sparc64_update(sctx, padding, padlen, index);
- }
- __md5_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
+ struct sparc_md5_state *sctx = shash_desc_ctx(desc);
+ __le64 block[MD5_BLOCK_WORDS] = {};
+ u8 *p = memcpy(block, src, offset);
+ __le32 *dst = (__le32 *)out;
+ __le64 *pbits;
+ int i;
+
+ src = p;
+ p += offset;
+ *p++ = 0x80;
+ sctx->byte_count += offset;
+ pbits = &block[(MD5_BLOCK_WORDS / (offset > 55 ? 1 : 2)) - 1];
+ *pbits = cpu_to_le64(sctx->byte_count << 3);
+ md5_sparc64_transform(sctx->hash, src, (pbits - block + 1) / 8);
+ memzero_explicit(block, sizeof(block));
/* Store state in digest */
for (i = 0; i < MD5_HASH_WORDS; i++)
dst[i] = sctx->hash[i];
- /* Wipe context */
- memset(sctx, 0, sizeof(*sctx));
-
return 0;
}
static int md5_sparc64_export(struct shash_desc *desc, void *out)
{
- struct md5_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
+ struct sparc_md5_state *sctx = shash_desc_ctx(desc);
+ union {
+ u8 *u8;
+ u32 *u32;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
+ for (i = 0; i < MD5_HASH_WORDS; i++)
+ put_unaligned(le32_to_cpu(sctx->hash[i]), p.u32++);
+ put_unaligned(sctx->byte_count, p.u64);
return 0;
}
static int md5_sparc64_import(struct shash_desc *desc, const void *in)
{
- struct md5_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
+ struct sparc_md5_state *sctx = shash_desc_ctx(desc);
+ union {
+ const u8 *u8;
+ const u32 *u32;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
+ for (i = 0; i < MD5_HASH_WORDS; i++)
+ sctx->hash[i] = cpu_to_le32(get_unaligned(p.u32++));
+ sctx->byte_count = get_unaligned(p.u64);
return 0;
}
@@ -136,15 +119,16 @@ static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = md5_sparc64_init,
.update = md5_sparc64_update,
- .final = md5_sparc64_final,
+ .finup = md5_sparc64_finup,
.export = md5_sparc64_export,
.import = md5_sparc64_import,
- .descsize = sizeof(struct md5_state),
- .statesize = sizeof(struct md5_state),
+ .descsize = sizeof(struct sparc_md5_state),
+ .statesize = sizeof(struct sparc_md5_state),
.base = {
.cra_name = "md5",
.cra_driver_name= "md5-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/sparc/crypto/sha1_asm.S b/arch/sparc/crypto/sha1_asm.S
index 7d8bf354f0e7..00b46bac1b08 100644
--- a/arch/sparc/crypto/sha1_asm.S
+++ b/arch/sparc/crypto/sha1_asm.S
@@ -1,9 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
+#include <asm/opcodes.h>
#include <asm/visasm.h>
-#include "opcodes.h"
-
ENTRY(sha1_sparc64_transform)
/* %o0 = digest, %o1 = data, %o2 = rounds */
VISEntryHalf
diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
index 06b7becfcb21..ef19d5023b1b 100644
--- a/arch/sparc/crypto/sha1_glue.c
+++ b/arch/sparc/crypto/sha1_glue.c
@@ -11,124 +11,44 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <asm/elf.h>
+#include <asm/opcodes.h>
+#include <asm/pstate.h>
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
-#include <asm/pstate.h>
-#include <asm/elf.h>
-
-#include "opcodes.h"
-
-asmlinkage void sha1_sparc64_transform(u32 *digest, const char *data,
- unsigned int rounds);
-
-static void __sha1_sparc64_update(struct sha1_state *sctx, const u8 *data,
- unsigned int len, unsigned int partial)
-{
- unsigned int done = 0;
-
- sctx->count += len;
- if (partial) {
- done = SHA1_BLOCK_SIZE - partial;
- memcpy(sctx->buffer + partial, data, done);
- sha1_sparc64_transform(sctx->state, sctx->buffer, 1);
- }
- if (len - done >= SHA1_BLOCK_SIZE) {
- const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
-
- sha1_sparc64_transform(sctx->state, data + done, rounds);
- done += rounds * SHA1_BLOCK_SIZE;
- }
-
- memcpy(sctx->buffer, data + done, len - done);
-}
+asmlinkage void sha1_sparc64_transform(struct sha1_state *digest,
+ const u8 *data, int rounds);
static int sha1_sparc64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- /* Handle the fast case right here */
- if (partial + len < SHA1_BLOCK_SIZE) {
- sctx->count += len;
- memcpy(sctx->buffer + partial, data, len);
- } else
- __sha1_sparc64_update(sctx, data, len, partial);
-
- return 0;
+ return sha1_base_do_update_blocks(desc, data, len,
+ sha1_sparc64_transform);
}
/* Add padding and return the message digest. */
-static int sha1_sparc64_final(struct shash_desc *desc, u8 *out)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int i, index, padlen;
- __be32 *dst = (__be32 *)out;
- __be64 bits;
- static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
-
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64 and append length */
- index = sctx->count % SHA1_BLOCK_SIZE;
- padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
-
- /* We need to fill a whole block for __sha1_sparc64_update() */
- if (padlen <= 56) {
- sctx->count += padlen;
- memcpy(sctx->buffer + index, padding, padlen);
- } else {
- __sha1_sparc64_update(sctx, padding, padlen, index);
- }
- __sha1_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
-
- /* Store state in digest */
- for (i = 0; i < 5; i++)
- dst[i] = cpu_to_be32(sctx->state[i]);
-
- /* Wipe context */
- memset(sctx, 0, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha1_sparc64_export(struct shash_desc *desc, void *out)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha1_sparc64_import(struct shash_desc *desc, const void *in)
+static int sha1_sparc64_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
+ sha1_base_do_finup(desc, src, len, sha1_sparc64_transform);
+ return sha1_base_finish(desc, out);
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = sha1_sparc64_update,
- .final = sha1_sparc64_final,
- .export = sha1_sparc64_export,
- .import = sha1_sparc64_import,
- .descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
+ .finup = sha1_sparc64_finup,
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
deleted file mode 100644
index 285561a1cde5..000000000000
--- a/arch/sparc/crypto/sha256_glue.c
+++ /dev/null
@@ -1,210 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Glue code for SHA256 hashing optimized for sparc64 crypto opcodes.
- *
- * This is based largely upon crypto/sha256_generic.c
- *
- * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
- * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-
-#include <asm/pstate.h>
-#include <asm/elf.h>
-
-#include "opcodes.h"
-
-asmlinkage void sha256_sparc64_transform(u32 *digest, const char *data,
- unsigned int rounds);
-
-static void __sha256_sparc64_update(struct sha256_state *sctx, const u8 *data,
- unsigned int len, unsigned int partial)
-{
- unsigned int done = 0;
-
- sctx->count += len;
- if (partial) {
- done = SHA256_BLOCK_SIZE - partial;
- memcpy(sctx->buf + partial, data, done);
- sha256_sparc64_transform(sctx->state, sctx->buf, 1);
- }
- if (len - done >= SHA256_BLOCK_SIZE) {
- const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE;
-
- sha256_sparc64_transform(sctx->state, data + done, rounds);
- done += rounds * SHA256_BLOCK_SIZE;
- }
-
- memcpy(sctx->buf, data + done, len - done);
-}
-
-static int sha256_sparc64_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
-
- /* Handle the fast case right here */
- if (partial + len < SHA256_BLOCK_SIZE) {
- sctx->count += len;
- memcpy(sctx->buf + partial, data, len);
- } else
- __sha256_sparc64_update(sctx, data, len, partial);
-
- return 0;
-}
-
-static int sha256_sparc64_final(struct shash_desc *desc, u8 *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- unsigned int i, index, padlen;
- __be32 *dst = (__be32 *)out;
- __be64 bits;
- static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
-
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64 and append length */
- index = sctx->count % SHA256_BLOCK_SIZE;
- padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56) - index);
-
- /* We need to fill a whole block for __sha256_sparc64_update() */
- if (padlen <= 56) {
- sctx->count += padlen;
- memcpy(sctx->buf + index, padding, padlen);
- } else {
- __sha256_sparc64_update(sctx, padding, padlen, index);
- }
- __sha256_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
-
- /* Store state in digest */
- for (i = 0; i < 8; i++)
- dst[i] = cpu_to_be32(sctx->state[i]);
-
- /* Wipe context */
- memset(sctx, 0, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash)
-{
- u8 D[SHA256_DIGEST_SIZE];
-
- sha256_sparc64_final(desc, D);
-
- memcpy(hash, D, SHA224_DIGEST_SIZE);
- memzero_explicit(D, SHA256_DIGEST_SIZE);
-
- return 0;
-}
-
-static int sha256_sparc64_export(struct shash_desc *desc, void *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
-}
-
-static int sha256_sparc64_import(struct shash_desc *desc, const void *in)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
-}
-
-static struct shash_alg sha256_alg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = sha256_sparc64_update,
- .final = sha256_sparc64_final,
- .export = sha256_sparc64_export,
- .import = sha256_sparc64_import,
- .descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name= "sha256-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg sha224_alg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = sha256_sparc64_update,
- .final = sha224_sparc64_final,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name= "sha224-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static bool __init sparc64_has_sha256_opcode(void)
-{
- unsigned long cfr;
-
- if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
- return false;
-
- __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
- if (!(cfr & CFR_SHA256))
- return false;
-
- return true;
-}
-
-static int __init sha256_sparc64_mod_init(void)
-{
- if (sparc64_has_sha256_opcode()) {
- int ret = crypto_register_shash(&sha224_alg);
- if (ret < 0)
- return ret;
-
- ret = crypto_register_shash(&sha256_alg);
- if (ret < 0) {
- crypto_unregister_shash(&sha224_alg);
- return ret;
- }
-
- pr_info("Using sparc64 sha256 opcode optimized SHA-256/SHA-224 implementation\n");
- return 0;
- }
- pr_info("sparc64 sha256 opcode not available.\n");
- return -ENODEV;
-}
-
-static void __exit sha256_sparc64_mod_fini(void)
-{
- crypto_unregister_shash(&sha224_alg);
- crypto_unregister_shash(&sha256_alg);
-}
-
-module_init(sha256_sparc64_mod_init);
-module_exit(sha256_sparc64_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated");
-
-MODULE_ALIAS_CRYPTO("sha224");
-MODULE_ALIAS_CRYPTO("sha256");
-
-#include "crop_devid.c"
diff --git a/arch/sparc/crypto/sha512_asm.S b/arch/sparc/crypto/sha512_asm.S
index b2f6e6728802..9932b4fe1b59 100644
--- a/arch/sparc/crypto/sha512_asm.S
+++ b/arch/sparc/crypto/sha512_asm.S
@@ -1,9 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
+#include <asm/opcodes.h>
#include <asm/visasm.h>
-#include "opcodes.h"
-
ENTRY(sha512_sparc64_transform)
/* %o0 = digest, %o1 = data, %o2 = rounds */
VISEntry
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
index d66efa4ec59a..47b9277b6877 100644
--- a/arch/sparc/crypto/sha512_glue.c
+++ b/arch/sparc/crypto/sha512_glue.c
@@ -10,115 +10,42 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <asm/elf.h>
+#include <asm/opcodes.h>
+#include <asm/pstate.h>
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
-
-#include <asm/pstate.h>
-#include <asm/elf.h>
-
-#include "opcodes.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
asmlinkage void sha512_sparc64_transform(u64 *digest, const char *data,
unsigned int rounds);
-static void __sha512_sparc64_update(struct sha512_state *sctx, const u8 *data,
- unsigned int len, unsigned int partial)
+static void sha512_block(struct sha512_state *sctx, const u8 *src, int blocks)
{
- unsigned int done = 0;
-
- if ((sctx->count[0] += len) < len)
- sctx->count[1]++;
- if (partial) {
- done = SHA512_BLOCK_SIZE - partial;
- memcpy(sctx->buf + partial, data, done);
- sha512_sparc64_transform(sctx->state, sctx->buf, 1);
- }
- if (len - done >= SHA512_BLOCK_SIZE) {
- const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE;
-
- sha512_sparc64_transform(sctx->state, data + done, rounds);
- done += rounds * SHA512_BLOCK_SIZE;
- }
-
- memcpy(sctx->buf, data + done, len - done);
+ sha512_sparc64_transform(sctx->state, src, blocks);
}
static int sha512_sparc64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
-
- /* Handle the fast case right here */
- if (partial + len < SHA512_BLOCK_SIZE) {
- if ((sctx->count[0] += len) < len)
- sctx->count[1]++;
- memcpy(sctx->buf + partial, data, len);
- } else
- __sha512_sparc64_update(sctx, data, len, partial);
-
- return 0;
+ return sha512_base_do_update_blocks(desc, data, len, sha512_block);
}
-static int sha512_sparc64_final(struct shash_desc *desc, u8 *out)
+static int sha512_sparc64_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- unsigned int i, index, padlen;
- __be64 *dst = (__be64 *)out;
- __be64 bits[2];
- static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, };
-
- /* Save number of bits */
- bits[1] = cpu_to_be64(sctx->count[0] << 3);
- bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
-
- /* Pad out to 112 mod 128 and append length */
- index = sctx->count[0] % SHA512_BLOCK_SIZE;
- padlen = (index < 112) ? (112 - index) : ((SHA512_BLOCK_SIZE+112) - index);
-
- /* We need to fill a whole block for __sha512_sparc64_update() */
- if (padlen <= 112) {
- if ((sctx->count[0] += padlen) < padlen)
- sctx->count[1]++;
- memcpy(sctx->buf + index, padding, padlen);
- } else {
- __sha512_sparc64_update(sctx, padding, padlen, index);
- }
- __sha512_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 112);
-
- /* Store state in digest */
- for (i = 0; i < 8; i++)
- dst[i] = cpu_to_be64(sctx->state[i]);
-
- /* Wipe context */
- memset(sctx, 0, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha384_sparc64_final(struct shash_desc *desc, u8 *hash)
-{
- u8 D[64];
-
- sha512_sparc64_final(desc, D);
-
- memcpy(hash, D, 48);
- memzero_explicit(D, 64);
-
- return 0;
+ sha512_base_do_finup(desc, src, len, sha512_block);
+ return sha512_base_finish(desc, out);
}
static struct shash_alg sha512 = {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = sha512_sparc64_update,
- .final = sha512_sparc64_final,
- .descsize = sizeof(struct sha512_state),
+ .finup = sha512_sparc64_finup,
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha512",
.cra_driver_name= "sha512-sparc64",
@@ -132,8 +59,8 @@ static struct shash_alg sha384 = {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = sha512_sparc64_update,
- .final = sha384_sparc64_final,
- .descsize = sizeof(struct sha512_state),
+ .finup = sha512_sparc64_finup,
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha384",
.cra_driver_name= "sha384-sparc64",
diff --git a/arch/sparc/crypto/opcodes.h b/arch/sparc/include/asm/opcodes.h
index 417b6a10a337..ebfda6eb49b2 100644
--- a/arch/sparc/crypto/opcodes.h
+++ b/arch/sparc/include/asm/opcodes.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _OPCODES_H
-#define _OPCODES_H
+#ifndef _SPARC_ASM_OPCODES_H
+#define _SPARC_ASM_OPCODES_H
#define SPARC_CR_OPCODE_PRIORITY 300
@@ -97,4 +97,4 @@
#define MOVXTOD_G7_F62 \
.word 0xbfb02307;
-#endif /* _OPCODES_H */
+#endif /* _SPARC_ASM_OPCODES_H */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 62bcafe38b1f..1454ebe91539 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -255,7 +255,11 @@ static inline pte_t pte_mkyoung(pte_t pte)
}
#define PFN_PTE_SHIFT (PAGE_SHIFT - 4)
-#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
+
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+ return __pte((pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot));
+}
static inline unsigned long pte_pfn(pte_t pte)
{
@@ -272,15 +276,6 @@ static inline unsigned long pte_pfn(pte_t pte)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
-{
- return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
-}
-
static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
{
return __pte(((page) >> 4) | pgprot_val(pgprot));
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index dc28f2c4eee3..4af03e3c161b 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -225,7 +225,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
return __pte(paddr | pgprot_val(prot));
}
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
@@ -234,7 +233,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
return __pmd(pte_val(pte));
}
-#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
#endif
/* This one can be done with two shifts. */
diff --git a/arch/sparc/include/asm/syscall.h b/arch/sparc/include/asm/syscall.h
index 20c109ac8cc9..b0233924d323 100644
--- a/arch/sparc/include/asm/syscall.h
+++ b/arch/sparc/include/asm/syscall.h
@@ -25,6 +25,18 @@ static inline long syscall_get_nr(struct task_struct *task,
return (syscall_p ? regs->u_regs[UREG_G1] : -1L);
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ /*
+ * Unlike syscall_get_nr(), syscall_set_nr() can be called only when
+ * the target task is stopped for tracing on entering syscall, so
+ * there is no need to have the same check syscall_get_nr() has.
+ */
+ regs->u_regs[UREG_G1] = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -117,6 +129,16 @@ static inline void syscall_get_arguments(struct task_struct *task,
}
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ unsigned int i;
+
+ for (i = 0; i < 6; i++)
+ regs->u_regs[UREG_I0 + i] = args[i];
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
#if defined(CONFIG_SPARC64) && defined(CONFIG_COMPAT)
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 5b464a568664..adcba7329386 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -143,6 +143,8 @@
#define SO_RCVPRIORITY 0x005b
+#define SO_PASSRIGHTS 0x005c
+
#if !defined(__KERNEL__)
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 58ea4ef9b622..3453f330e363 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -35,6 +35,7 @@ obj-y += process.o
obj-y += signal_$(BITS).o
obj-y += sigutil_$(BITS).o
obj-$(CONFIG_SPARC32) += ioport.o
+obj-y += setup.o
obj-y += setup_$(BITS).o
obj-y += idprom.o
obj-y += sys_sparc_$(BITS).o
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index f02a283a8e8f..cae4d33002a5 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1668,8 +1668,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
if (!sparc_perf_event_set_period(event, hwc, idx))
continue;
- if (perf_event_overflow(event, &data, regs))
- sparc_pmu_stop(event, 0);
+ perf_event_overflow(event, &data, regs);
}
finish_clock = sched_clock();
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
new file mode 100644
index 000000000000..4975867d9001
--- /dev/null
+++ b/arch/sparc/kernel/setup.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm/setup.h>
+#include <linux/sysctl.h>
+
+static const struct ctl_table sparc_sysctl_table[] = {
+ {
+ .procname = "reboot-cmd",
+ .data = reboot_command,
+ .maxlen = 256,
+ .mode = 0644,
+ .proc_handler = proc_dostring,
+ },
+ {
+ .procname = "stop-a",
+ .data = &stop_a_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "scons-poweroff",
+ .data = &scons_pwroff,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#ifdef CONFIG_SPARC64
+ {
+ .procname = "tsb-ratio",
+ .data = &sysctl_tsb_ratio,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
+};
+
+
+static int __init init_sparc_sysctls(void)
+{
+ register_sysctl_init("kernel", sparc_sysctl_table);
+ return 0;
+}
+
+arch_initcall(init_sparc_sysctls);
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 5724d0f356eb..5cf9781d68b4 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -4,6 +4,7 @@
asflags-y := -ansi -DST_DIV0=0x02
+obj-y += crypto/
lib-$(CONFIG_SPARC32) += ashrdi3.o
lib-$(CONFIG_SPARC32) += memcpy.o memset.o
lib-y += strlen.o
@@ -54,4 +55,4 @@ obj-$(CONFIG_SPARC64) += iomap.o
obj-$(CONFIG_SPARC32) += atomic32.o
obj-$(CONFIG_SPARC64) += PeeCeeI.o
obj-$(CONFIG_CRC32_ARCH) += crc32-sparc.o
-crc32-sparc-y := crc32_glue.o crc32c_asm.o
+crc32-sparc-y := crc32.o crc32c_asm.o
diff --git a/arch/sparc/lib/crc32_glue.c b/arch/sparc/lib/crc32.c
index a70752c729cf..40d4720a42a1 100644
--- a/arch/sparc/lib/crc32_glue.c
+++ b/arch/sparc/lib/crc32.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Glue code for CRC32C optimized for sparc64 crypto opcodes.
+/* CRC32c (Castagnoli), sparc64 crc32c opcode accelerated
*
* This is based largely upon arch/x86/crypto/crc32c-intel.c
*
@@ -17,7 +17,7 @@
#include <asm/pstate.h>
#include <asm/elf.h>
-static DEFINE_STATIC_KEY_FALSE(have_crc32c_opcode);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32c_opcode);
u32 crc32_le_arch(u32 crc, const u8 *data, size_t len)
{
@@ -74,7 +74,7 @@ static int __init crc32_sparc_init(void)
pr_info("Using sparc64 crc32c opcode optimized CRC32C implementation\n");
return 0;
}
-arch_initcall(crc32_sparc_init);
+subsys_initcall(crc32_sparc_init);
static void __exit crc32_sparc_exit(void)
{
diff --git a/arch/sparc/lib/crc32c_asm.S b/arch/sparc/lib/crc32c_asm.S
index ee454fa6aed6..4db873850f44 100644
--- a/arch/sparc/lib/crc32c_asm.S
+++ b/arch/sparc/lib/crc32c_asm.S
@@ -1,10 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
+#include <asm/opcodes.h>
#include <asm/visasm.h>
#include <asm/asi.h>
-#include "../crypto/opcodes.h"
-
ENTRY(crc32c_sparc64)
/* %o0=crc32p, %o1=data_ptr, %o2=len */
VISEntryHalf
diff --git a/arch/sparc/lib/crypto/Kconfig b/arch/sparc/lib/crypto/Kconfig
new file mode 100644
index 000000000000..e5c3e4d3dba6
--- /dev/null
+++ b/arch/sparc/lib/crypto/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CRYPTO_SHA256_SPARC64
+ tristate
+ depends on SPARC64
+ default CRYPTO_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256
+ select CRYPTO_LIB_SHA256_GENERIC
diff --git a/arch/sparc/lib/crypto/Makefile b/arch/sparc/lib/crypto/Makefile
new file mode 100644
index 000000000000..75ee244ad6f7
--- /dev/null
+++ b/arch/sparc/lib/crypto/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CRYPTO_SHA256_SPARC64) += sha256-sparc64.o
+sha256-sparc64-y := sha256.o sha256_asm.o
diff --git a/arch/sparc/lib/crypto/sha256.c b/arch/sparc/lib/crypto/sha256.c
new file mode 100644
index 000000000000..8bdec2db08b3
--- /dev/null
+++ b/arch/sparc/lib/crypto/sha256.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SHA-256 accelerated using the sparc64 sha256 opcodes
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/elf.h>
+#include <asm/opcodes.h>
+#include <asm/pstate.h>
+#include <crypto/internal/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha256_opcodes);
+
+asmlinkage void sha256_sparc64_transform(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks)
+{
+ if (static_branch_likely(&have_sha256_opcodes))
+ sha256_sparc64_transform(state, data, nblocks);
+ else
+ sha256_blocks_generic(state, data, nblocks);
+}
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+
+bool sha256_is_arch_optimized(void)
+{
+ return static_key_enabled(&have_sha256_opcodes);
+}
+EXPORT_SYMBOL_GPL(sha256_is_arch_optimized);
+
+static int __init sha256_sparc64_mod_init(void)
+{
+ unsigned long cfr;
+
+ if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
+ return 0;
+
+ __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
+ if (!(cfr & CFR_SHA256))
+ return 0;
+
+ static_branch_enable(&have_sha256_opcodes);
+ pr_info("Using sparc64 sha256 opcode optimized SHA-256/SHA-224 implementation\n");
+ return 0;
+}
+subsys_initcall(sha256_sparc64_mod_init);
+
+static void __exit sha256_sparc64_mod_exit(void)
+{
+}
+module_exit(sha256_sparc64_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-256 accelerated using the sparc64 sha256 opcodes");
diff --git a/arch/sparc/crypto/sha256_asm.S b/arch/sparc/lib/crypto/sha256_asm.S
index 0b39ec7d7ca2..ddcdd3daf31e 100644
--- a/arch/sparc/crypto/sha256_asm.S
+++ b/arch/sparc/lib/crypto/sha256_asm.S
@@ -1,11 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
+#include <asm/opcodes.h>
#include <asm/visasm.h>
-#include "opcodes.h"
-
ENTRY(sha256_sparc64_transform)
- /* %o0 = digest, %o1 = data, %o2 = rounds */
+ /* %o0 = state, %o1 = data, %o2 = nblocks */
VISEntryHalf
ld [%o0 + 0x00], %f0
ld [%o0 + 0x04], %f1
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 760818950464..25ae4c897aae 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2878,33 +2878,27 @@ void __flush_tlb_all(void)
: : "r" (pstate));
}
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
- struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- pte_t *pte = NULL;
-
- if (page)
- pte = (pte_t *) page_address(page);
-
- return pte;
-}
-
-pgtable_t pte_alloc_one(struct mm_struct *mm)
+static pte_t *__pte_alloc_one(struct mm_struct *mm)
{
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0);
if (!ptdesc)
return NULL;
- if (!pagetable_pte_ctor(ptdesc)) {
+ if (!pagetable_pte_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
return ptdesc_address(ptdesc);
}
-void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
- free_page((unsigned long)pte);
+ return __pte_alloc_one(mm);
+}
+
+pgtable_t pte_alloc_one(struct mm_struct *mm)
+{
+ return __pte_alloc_one(mm);
}
static void __pte_free(pgtable_t pte)
@@ -2915,6 +2909,11 @@ static void __pte_free(pgtable_t pte)
pagetable_free(ptdesc);
}
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ __pte_free(pte);
+}
+
void pte_free(struct mm_struct *mm, pgtable_t pte)
{
__pte_free(pte);
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index dd32711022f5..f8fb4911d360 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -350,7 +350,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock);
if (page_ref_inc_return(page) == 2 &&
- !pagetable_pte_ctor(page_ptdesc(page))) {
+ !pagetable_pte_ctor(mm, page_ptdesc(page))) {
page_ref_dec(page);
ptep = NULL;
}
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 1d36a613aad8..7be0143b5ba3 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -118,9 +118,7 @@ archprepare:
$(Q)$(MAKE) $(build)=$(HOST_DIR)/um include/generated/user_constants.h
LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
-ifdef CONFIG_LD_SCRIPT_DYN
-LINK-$(call gcc-min-version, 60100)$(CONFIG_CC_IS_CLANG) += -no-pie
-endif
+LINK-$(CONFIG_LD_SCRIPT_DYN) += -no-pie
LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib
CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
@@ -154,5 +152,6 @@ MRPROPER_FILES += $(HOST_DIR)/include/generated
archclean:
@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
-o -name '*.gcov' \) -type f -print | xargs rm -f
+ $(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) clean
export HEADER_ARCH SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING DEV_NULL_PATH
diff --git a/arch/um/include/asm/fpu/api.h b/arch/um/include/asm/fpu/api.h
index 71bfd9ef3938..3abf67c83c40 100644
--- a/arch/um/include/asm/fpu/api.h
+++ b/arch/um/include/asm/fpu/api.h
@@ -2,6 +2,8 @@
#ifndef _ASM_UM_FPU_API_H
#define _ASM_UM_FPU_API_H
+#include <linux/types.h>
+
/* Copyright (c) 2020 Cambridge Greys Ltd
* Copyright (c) 2020 Red Hat Inc.
* A set of "dummy" defines to allow the direct inclusion
diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h
index ab0c8dd86564..14ec16f92ce4 100644
--- a/arch/um/include/asm/pgtable-2level.h
+++ b/arch/um/include/asm/pgtable-2level.h
@@ -37,7 +37,6 @@ static inline void pgd_mkuptodate(pgd_t pgd) { }
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
#define pte_pfn(x) phys_to_pfn(pte_val(x))
-#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
#endif
diff --git a/arch/um/include/asm/pgtable-4level.h b/arch/um/include/asm/pgtable-4level.h
index 0d279caee93c..7a271b7b83d2 100644
--- a/arch/um/include/asm/pgtable-4level.h
+++ b/arch/um/include/asm/pgtable-4level.h
@@ -102,15 +102,6 @@ static inline unsigned long pte_pfn(pte_t pte)
return phys_to_pfn(pte_val(pte));
}
-static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
-{
- pte_t pte;
- phys_t phys = pfn_to_phys(page_nr);
-
- pte_set_val(pte, phys, pgprot);
- return pte;
-}
-
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 5601ca98e8a6..ca2a519d53ab 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -260,19 +260,17 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEEDSYNC);
}
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-
#define __virt_to_page(virt) phys_to_page(__pa(virt))
#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
-#define mk_pte(page, pgprot) \
- ({ pte_t pte; \
- \
- pte_set_val(pte, page_to_phys(page), (pgprot)); \
- pte;})
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+ pte_t pte;
+
+ pte_set_val(pte, pfn_to_phys(pfn), pgprot);
+
+ return pte;
+}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
diff --git a/arch/um/include/asm/syscall-generic.h b/arch/um/include/asm/syscall-generic.h
index 172b74143c4b..bcd73bcfe577 100644
--- a/arch/um/include/asm/syscall-generic.h
+++ b/arch/um/include/asm/syscall-generic.h
@@ -21,6 +21,11 @@ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
return PT_REGS_SYSCALL_NR(regs);
}
+static inline void syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr)
+{
+ PT_REGS_SYSCALL_NR(regs) = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -62,6 +67,20 @@ static inline void syscall_get_arguments(struct task_struct *task,
*args = UPT_SYSCALL_ARG6(r);
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ struct uml_pt_regs *r = &regs->regs;
+
+ UPT_SYSCALL_ARG1(r) = *args++;
+ UPT_SYSCALL_ARG2(r) = *args++;
+ UPT_SYSCALL_ARG3(r) = *args++;
+ UPT_SYSCALL_ARG4(r) = *args++;
+ UPT_SYSCALL_ARG5(r) = *args++;
+ UPT_SYSCALL_ARG6(r) = *args;
+}
+
/* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */
#endif /* __UM_SYSCALL_GENERIC_H */
diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h
index 3a08f9029a3f..1c6e0ae41b0c 100644
--- a/arch/um/include/asm/uaccess.h
+++ b/arch/um/include/asm/uaccess.h
@@ -55,6 +55,7 @@ do { \
goto err_label; \
} \
*((type *)dst) = get_unaligned((type *)(src)); \
+ barrier(); \
current->thread.segv_continue = NULL; \
} while (0)
@@ -66,6 +67,7 @@ do { \
if (__faulted) \
goto err_label; \
put_unaligned(*((type *)src), (type *)(dst)); \
+ barrier(); \
current->thread.segv_continue = NULL; \
} while (0)
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index ce073150dc20..ef2272e92a43 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -225,20 +225,20 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
panic("Failed to sync kernel TLBs: %d", err);
goto out;
}
- else if (current->mm == NULL) {
- if (current->pagefault_disabled) {
- if (!mc) {
- show_regs(container_of(regs, struct pt_regs, regs));
- panic("Segfault with pagefaults disabled but no mcontext");
- }
- if (!current->thread.segv_continue) {
- show_regs(container_of(regs, struct pt_regs, regs));
- panic("Segfault without recovery target");
- }
- mc_set_rip(mc, current->thread.segv_continue);
- current->thread.segv_continue = NULL;
- goto out;
+ else if (current->pagefault_disabled) {
+ if (!mc) {
+ show_regs(container_of(regs, struct pt_regs, regs));
+ panic("Segfault with pagefaults disabled but no mcontext");
}
+ if (!current->thread.segv_continue) {
+ show_regs(container_of(regs, struct pt_regs, regs));
+ panic("Segfault without recovery target");
+ }
+ mc_set_rip(mc, current->thread.segv_continue);
+ current->thread.segv_continue = NULL;
+ goto out;
+ }
+ else if (current->mm == NULL) {
show_regs(container_of(regs, struct pt_regs, regs));
panic("Segfault with no mm");
}
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index d4b3b6742ec8..2f5ee045bc7a 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -477,7 +477,7 @@ void *text_poke_copy(void *addr, const void *opcode, size_t len)
return text_poke(addr, opcode, len);
}
-void text_poke_sync(void)
+void smp_text_poke_sync_each_cpu(void)
{
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4b9f378e05f6..340e5468980e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -38,6 +38,7 @@ config X86_64
select ARCH_HAS_ELFCORE_COMPAT
select ZONE_DMA32
select EXECMEM if DYNAMIC_FTRACE
+ select ACPI_MRRM if ACPI
config FORCE_DYNAMIC_FTRACE
def_bool y
@@ -153,6 +154,7 @@ config X86
select ARCH_WANT_HUGETLB_VMEMMAP_PREINIT if X86_64
select ARCH_WANTS_THP_SWAP if X86_64
select ARCH_HAS_PARANOID_L1D_FLUSH
+ select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select BUILDTIME_TABLE_SORT
select CLKEVT_I8253
select CLOCKSOURCE_WATCHDOG
@@ -426,8 +428,7 @@ config DYNAMIC_PHYSICAL_MASK
config PGTABLE_LEVELS
int
- default 5 if X86_5LEVEL
- default 4 if X86_64
+ default 5 if X86_64
default 3 if X86_PAE
default 2
@@ -507,8 +508,9 @@ config X86_MPPARSE
config X86_CPU_RESCTRL
bool "x86 CPU resource control support"
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
- select KERNFS
- select PROC_CPU_RESCTRL if PROC_FS
+ depends on MISC_FILESYSTEMS
+ select ARCH_HAS_CPU_RESCTRL
+ select RESCTRL_FS
select RESCTRL_FS_PSEUDO_LOCK
help
Enable x86 CPU resource control support.
@@ -526,12 +528,6 @@ config X86_CPU_RESCTRL
Say N if unsure.
-config RESCTRL_FS_PSEUDO_LOCK
- bool
- help
- Software mechanism to pin data in a cache portion using
- micro-architecture specific knowledge.
-
config X86_FRED
bool "Flexible Return and Event Delivery"
depends on X86_64
@@ -799,6 +795,7 @@ config PARAVIRT
config PARAVIRT_XXL
bool
+ depends on X86_64
config PARAVIRT_DEBUG
bool "paravirt-ops debugging"
@@ -1463,27 +1460,6 @@ config X86_PAE
has the cost of more pagetable lookup overhead, and also
consumes more pagetable space per process.
-config X86_5LEVEL
- bool "Enable 5-level page tables support"
- default y
- select DYNAMIC_MEMORY_LAYOUT
- select SPARSEMEM_VMEMMAP
- depends on X86_64
- help
- 5-level paging enables access to larger address space:
- up to 128 PiB of virtual address space and 4 PiB of
- physical address space.
-
- It will be supported by future Intel CPUs.
-
- A kernel with the option enabled can be booted on machines that
- support 4- or 5-level paging.
-
- See Documentation/arch/x86/x86_64/5level-paging.rst for more
- information.
-
- Say N if unsure.
-
config X86_DIRECT_GBPAGES
def_bool y
depends on X86_64
@@ -1579,6 +1555,7 @@ config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_STATIC if X86_32
select SPARSEMEM_VMEMMAP_ENABLE if X86_64
+ select SPARSEMEM_VMEMMAP if X86_64
config ARCH_SPARSEMEM_DEFAULT
def_bool X86_64 || (NUMA && X86_32)
@@ -1881,8 +1858,7 @@ endchoice
config X86_SGX
bool "Software Guard eXtensions (SGX)"
depends on X86_64 && CPU_SUP_INTEL && X86_X2APIC
- depends on CRYPTO=y
- depends on CRYPTO_SHA256=y
+ select CRYPTO_LIB_SHA256
select MMU_NOTIFIER
select NUMA_KEEP_MEMINFO if NUMA
select XARRAY_MULTI
@@ -2029,6 +2005,9 @@ config ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG
config ARCH_SUPPORTS_KEXEC_JUMP
def_bool y
+config ARCH_SUPPORTS_KEXEC_HANDOVER
+ def_bool X86_64
+
config ARCH_SUPPORTS_CRASH_DUMP
def_bool X86_64 || (X86_32 && HIGHMEM)
@@ -2167,17 +2146,10 @@ config PHYSICAL_ALIGN
Don't change this unless you know what you are doing.
-config DYNAMIC_MEMORY_LAYOUT
- bool
- help
- This option makes base addresses of vmalloc and vmemmap as well as
- __PAGE_OFFSET movable during boot.
-
config RANDOMIZE_MEMORY
bool "Randomize the kernel memory sections"
depends on X86_64
depends on RANDOMIZE_BASE
- select DYNAMIC_MEMORY_LAYOUT
default RANDOMIZE_BASE
help
Randomizes the base virtual address of kernel memory sections
@@ -2368,6 +2340,7 @@ config STRICT_SIGALTSTACK_SIZE
config CFI_AUTO_DEFAULT
bool "Attempt to use FineIBT by default at boot time"
depends on FINEIBT
+ depends on !RUST || RUSTC_VERSION >= 108800
default y
help
Attempt to use FineIBT by default at boot time. If enabled,
@@ -2710,6 +2683,18 @@ config MITIGATION_SSB
of speculative execution in a similar way to the Meltdown and Spectre
security vulnerabilities.
+config MITIGATION_ITS
+ bool "Enable Indirect Target Selection mitigation"
+ depends on CPU_SUP_INTEL && X86_64
+ depends on MITIGATION_RETPOLINE && MITIGATION_RETHUNK
+ select EXECMEM
+ default y
+ help
+ Enable Indirect Target Selection (ITS) mitigation. ITS is a bug in
+ BPU on some Intel CPUs that may allow Spectre V2 style attacks. If
+ disabled, mitigation cannot be enabled via cmdline.
+ See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
+
endif
config ARCH_HAS_ADD_PAGES
diff --git a/arch/x86/Kconfig.assembler b/arch/x86/Kconfig.assembler
index 6d20a6ce0507..c827f694fb72 100644
--- a/arch/x86/Kconfig.assembler
+++ b/arch/x86/Kconfig.assembler
@@ -6,20 +6,6 @@ config AS_AVX512
help
Supported by binutils >= 2.25 and LLVM integrated assembler
-config AS_SHA1_NI
- def_bool $(as-instr,sha1msg1 %xmm0$(comma)%xmm1)
- help
- Supported by binutils >= 2.24 and LLVM integrated assembler
-
-config AS_SHA256_NI
- def_bool $(as-instr,sha256msg1 %xmm0$(comma)%xmm1)
- help
- Supported by binutils >= 2.24 and LLVM integrated assembler
-config AS_TPAUSE
- def_bool $(as-instr,tpause %ecx)
- help
- Supported by binutils >= 2.31.1 and LLVM integrated assembler >= V7
-
config AS_GFNI
def_bool $(as-instr,vgf2p8mulb %xmm0$(comma)%xmm1$(comma)%xmm2)
help
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 753b8763abae..f928cf6e3252 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -245,6 +245,30 @@ config MATOM
endchoice
+config CC_HAS_MARCH_NATIVE
+ # This flag might not be available in cross-compilers:
+ def_bool $(cc-option, -march=native)
+ # LLVM 18 has an easily triggered internal compiler error in core
+ # networking code with '-march=native' on certain systems:
+ # https://github.com/llvm/llvm-project/issues/72026
+ # LLVM 19 introduces an optimization that resolves some high stack
+ # usage warnings that only appear wth '-march=native'.
+ depends on CC_IS_GCC || CLANG_VERSION >= 190100
+
+config X86_NATIVE_CPU
+ bool "Build and optimize for local/native CPU"
+ depends on X86_64
+ depends on CC_HAS_MARCH_NATIVE
+ help
+ Optimize for the current CPU used to compile the kernel.
+ Use this option if you intend to build the kernel for your
+ local machine.
+
+ Note that such a kernel might not work optimally on a
+ different x86 machine.
+
+ If unsure, say N.
+
config X86_GENERIC
bool "Generic x86 support"
depends on X86_32
diff --git a/arch/x86/Kconfig.cpufeatures b/arch/x86/Kconfig.cpufeatures
index e12d5b7e39a2..250c10627ab3 100644
--- a/arch/x86/Kconfig.cpufeatures
+++ b/arch/x86/Kconfig.cpufeatures
@@ -132,10 +132,6 @@ config X86_DISABLED_FEATURE_OSPKE
def_bool y
depends on !X86_INTEL_MEMORY_PROTECTION_KEYS
-config X86_DISABLED_FEATURE_LA57
- def_bool y
- depends on !X86_5LEVEL
-
config X86_DISABLED_FEATURE_PTI
def_bool y
depends on !MITIGATION_PAGE_TABLE_ISOLATION
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 594723005d95..1913d342969b 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -173,8 +173,13 @@ else
# Use -mskip-rax-setup if supported.
KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
+ifdef CONFIG_X86_NATIVE_CPU
+ KBUILD_CFLAGS += -march=native
+ KBUILD_RUSTFLAGS += -Ctarget-cpu=native
+else
KBUILD_CFLAGS += -march=x86-64 -mtune=generic
KBUILD_RUSTFLAGS += -Ctarget-cpu=x86-64 -Ztune-cpu=generic
+endif
KBUILD_CFLAGS += -mno-red-zone
KBUILD_CFLAGS += -mcmodel=kernel
@@ -281,6 +286,7 @@ archprepare: $(cpufeaturemasks.hdr)
###
# Kernel objects
+core-y += arch/x86/boot/startup/
libs-y += arch/x86/lib/
# drivers-y are linked after core-y
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 81f55da81967..640fcac3af74 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -59,7 +59,7 @@ KBUILD_CFLAGS += $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
$(obj)/bzImage: asflags-y := $(SVGA_MODE)
quiet_cmd_image = BUILD $@
- cmd_image = cp $< $@; truncate -s %4K $@; cat $(obj)/vmlinux.bin >>$@
+ cmd_image = (dd if=$< bs=4k conv=sync status=none; cat $(filter-out $<,$(real-prereqs))) >$@
$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin FORCE
$(call if_changed,image)
diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S
index aa9b96457584..cf4a6155714e 100644
--- a/arch/x86/boot/bioscall.S
+++ b/arch/x86/boot/bioscall.S
@@ -32,7 +32,7 @@ intcall:
movw %dx, %si
movw %sp, %di
movw $11, %cx
- rep; movsl
+ rep movsl
/* Pop full state from the stack */
popal
@@ -67,7 +67,7 @@ intcall:
jz 4f
movw %sp, %si
movw $11, %cx
- rep; movsl
+ rep movsl
4: addw $44, %sp
/* Restore state and return */
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 38f17a1e1e36..60580836daf7 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -34,7 +34,7 @@
extern struct setup_header hdr;
extern struct boot_params boot_params;
-#define cpu_relax() asm volatile("rep; nop")
+#define cpu_relax() asm volatile("pause")
static inline void io_delay(void)
{
@@ -155,14 +155,14 @@ static inline void wrgs32(u32 v, addr_t addr)
static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len)
{
bool diff;
- asm volatile("fs; repe; cmpsb" CC_SET(nz)
+ asm volatile("fs repe cmpsb" CC_SET(nz)
: CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len)
{
bool diff;
- asm volatile("gs; repe; cmpsb" CC_SET(nz)
+ asm volatile("gs repe cmpsb" CC_SET(nz)
: CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index fdbce022db55..f4f7b22d8113 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -44,10 +44,10 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
KBUILD_CFLAGS += $(call cc-option,-Wa$(comma)-mrelax-relocations=no)
KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
-# sev.c indirectly includes inat-table.h which is generated during
+# sev-decode-insn.c indirectly includes inat-table.c which is generated during
# compilation and stored in $(objtree). Add the directory to the includes so
# that the compiler finds it even with out-of-tree builds (make O=/some/path).
-CFLAGS_sev.o += -I$(objtree)/arch/x86/lib/
+CFLAGS_sev-handle-vc.o += -I$(objtree)/arch/x86/lib/
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
@@ -73,7 +73,7 @@ LDFLAGS_vmlinux += -T
hostprogs := mkpiggy
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
-sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
+sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABbCDGRSTtVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
quiet_cmd_voffset = VOFFSET $@
cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@
@@ -96,8 +96,7 @@ ifdef CONFIG_X86_64
vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o
vmlinux-objs-y += $(obj)/pgtable_64.o
- vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
- vmlinux-objs-y += $(obj)/la57toggle.o
+ vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o $(obj)/sev-handle-vc.o
endif
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
@@ -106,6 +105,7 @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o
vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+vmlinux-libs-$(CONFIG_X86_64) += $(objtree)/arch/x86/boot/startup/lib.a
$(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE
$(call if_changed,ld)
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index eafd4f185e77..d9dab940ff62 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -35,7 +35,6 @@
#include <asm/bootparam.h>
#include <asm/desc_defs.h>
#include <asm/trapnr.h>
-#include "pgtable.h"
/*
* Fix alignment at 16 bytes. Following CONFIG_FUNCTION_ALIGNMENT will result
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index f03d59ea6e40..3b0948ad449f 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -760,6 +760,49 @@ static void process_e820_entries(unsigned long minimum,
}
}
+/*
+ * If KHO is active, only process its scratch areas to ensure we are not
+ * stepping onto preserved memory.
+ */
+static bool process_kho_entries(unsigned long minimum, unsigned long image_size)
+{
+ struct kho_scratch *kho_scratch;
+ struct setup_data *ptr;
+ struct kho_data *kho;
+ int i, nr_areas = 0;
+
+ if (!IS_ENABLED(CONFIG_KEXEC_HANDOVER))
+ return false;
+
+ ptr = (struct setup_data *)(unsigned long)boot_params_ptr->hdr.setup_data;
+ while (ptr) {
+ if (ptr->type == SETUP_KEXEC_KHO) {
+ kho = (struct kho_data *)(unsigned long)ptr->data;
+ kho_scratch = (void *)(unsigned long)kho->scratch_addr;
+ nr_areas = kho->scratch_size / sizeof(*kho_scratch);
+ break;
+ }
+
+ ptr = (struct setup_data *)(unsigned long)ptr->next;
+ }
+
+ if (!nr_areas)
+ return false;
+
+ for (i = 0; i < nr_areas; i++) {
+ struct kho_scratch *area = &kho_scratch[i];
+ struct mem_vector region = {
+ .start = area->addr,
+ .size = area->size,
+ };
+
+ if (process_mem_region(&region, minimum, image_size))
+ break;
+ }
+
+ return true;
+}
+
static unsigned long find_random_phys_addr(unsigned long minimum,
unsigned long image_size)
{
@@ -775,7 +818,12 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
return 0;
}
- if (!process_efi_entries(minimum, image_size))
+ /*
+ * During kexec handover only process KHO scratch areas that are known
+ * not to contain any data that must be preserved.
+ */
+ if (!process_kho_entries(minimum, image_size) &&
+ !process_efi_entries(minimum, image_size))
process_e820_entries(minimum, image_size);
phys_addr = slots_fetch_random();
diff --git a/arch/x86/boot/compressed/mem.c b/arch/x86/boot/compressed/mem.c
index dbba332e4a12..0e9f84ab4bdc 100644
--- a/arch/x86/boot/compressed/mem.c
+++ b/arch/x86/boot/compressed/mem.c
@@ -38,7 +38,7 @@ void arch_accept_memory(phys_addr_t start, phys_addr_t end)
if (early_is_tdx_guest()) {
if (!tdx_accept_memory(start, end))
panic("TDX: Failed to accept memory\n");
- } else if (sev_snp_enabled()) {
+ } else if (early_is_sevsnp_guest()) {
snp_accept_memory(start, end);
} else {
error("Cannot accept memory: unknown platform\n");
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 1cdcd4aaf395..94b5991da001 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -14,7 +14,6 @@
#include "misc.h"
#include "error.h"
-#include "pgtable.h"
#include "../string.h"
#include "../voffset.h"
#include <asm/bootparam_utils.h>
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index dd8d1a85f671..db1048621ea2 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -136,6 +136,9 @@ static inline void console_init(void)
#endif
#ifdef CONFIG_AMD_MEM_ENCRYPT
+struct es_em_ctxt;
+struct insn;
+
void sev_enable(struct boot_params *bp);
void snp_check_features(void);
void sev_es_shutdown_ghcb(void);
@@ -143,6 +146,11 @@ extern bool sev_es_check_ghcb_fault(unsigned long address);
void snp_set_page_private(unsigned long paddr);
void snp_set_page_shared(unsigned long paddr);
void sev_prep_identity_maps(unsigned long top_level_pgt);
+
+enum es_result vc_decode_insn(struct es_em_ctxt *ctxt);
+bool insn_has_rep_prefix(struct insn *insn);
+void sev_insn_decode_init(void);
+bool early_setup_ghcb(void);
#else
static inline void sev_enable(struct boot_params *bp)
{
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
deleted file mode 100644
index 6d595abe06b3..000000000000
--- a/arch/x86/boot/compressed/pgtable.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef BOOT_COMPRESSED_PAGETABLE_H
-#define BOOT_COMPRESSED_PAGETABLE_H
-
-#define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE)
-
-#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
-#define TRAMPOLINE_32BIT_CODE_SIZE 0xA0
-
-#ifndef __ASSEMBLER__
-
-extern unsigned long *trampoline_32bit;
-
-extern void trampoline_32bit_src(void *trampoline, bool enable_5lvl);
-
-extern const u16 trampoline_ljmp_imm_offset;
-
-#endif /* __ASSEMBLER__ */
-#endif /* BOOT_COMPRESSED_PAGETABLE_H */
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index d8c5de40669d..bdd26050dff7 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -4,19 +4,16 @@
#include <asm/bootparam_utils.h>
#include <asm/e820/types.h>
#include <asm/processor.h>
-#include "pgtable.h"
#include "../string.h"
#include "efi.h"
#define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
#define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */
-#ifdef CONFIG_X86_5LEVEL
/* __pgtable_l5_enabled needs to be in .data to avoid being cleared along with .bss */
unsigned int __section(".data") __pgtable_l5_enabled;
unsigned int __section(".data") pgdir_shift = 39;
unsigned int __section(".data") ptrs_per_p4d = 1;
-#endif
/* Buffer to preserve trampoline memory */
static char trampoline_save[TRAMPOLINE_32BIT_SIZE];
@@ -115,18 +112,13 @@ asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
* Check if LA57 is desired and supported.
*
* There are several parts to the check:
- * - if the kernel supports 5-level paging: CONFIG_X86_5LEVEL=y
* - if user asked to disable 5-level paging: no5lvl in cmdline
* - if the machine supports 5-level paging:
* + CPUID leaf 7 is supported
* + the leaf has the feature bit set
- *
- * That's substitute for boot_cpu_has() in early boot code.
*/
- if (IS_ENABLED(CONFIG_X86_5LEVEL) &&
- !cmdline_find_option_bool("no5lvl") &&
- native_cpuid_eax(0) >= 7 &&
- (native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) {
+ if (!cmdline_find_option_bool("no5lvl") &&
+ native_cpuid_eax(0) >= 7 && (native_cpuid_ecx(7) & BIT(16))) {
l5_required = true;
/* Initialize variables for 5-level paging */
diff --git a/arch/x86/boot/compressed/sev-handle-vc.c b/arch/x86/boot/compressed/sev-handle-vc.c
new file mode 100644
index 000000000000..89dd02de2a0f
--- /dev/null
+++ b/arch/x86/boot/compressed/sev-handle-vc.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "misc.h"
+#include "sev.h"
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/insn.h>
+#include <asm/pgtable_types.h>
+#include <asm/ptrace.h>
+#include <asm/sev.h>
+#include <asm/trapnr.h>
+#include <asm/trap_pf.h>
+#include <asm/fpu/xcr.h>
+
+#define __BOOT_COMPRESSED
+
+/* Basic instruction decoding support needed */
+#include "../../lib/inat.c"
+#include "../../lib/insn.c"
+
+/*
+ * Copy a version of this function here - insn-eval.c can't be used in
+ * pre-decompression code.
+ */
+bool insn_has_rep_prefix(struct insn *insn)
+{
+ insn_byte_t p;
+ int i;
+
+ insn_get_prefixes(insn);
+
+ for_each_insn_prefix(insn, i, p) {
+ if (p == 0xf2 || p == 0xf3)
+ return true;
+ }
+
+ return false;
+}
+
+enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
+{
+ char buffer[MAX_INSN_SIZE];
+ int ret;
+
+ memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
+
+ ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
+ if (ret < 0)
+ return ES_DECODE_FAILED;
+
+ return ES_OK;
+}
+
+extern void sev_insn_decode_init(void) __alias(inat_init_tables);
+
+/*
+ * Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and
+ * doesn't use segments.
+ */
+static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
+{
+ return 0UL;
+}
+
+static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
+ void *dst, char *buf, size_t size)
+{
+ memcpy(dst, buf, size);
+
+ return ES_OK;
+}
+
+static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
+ void *src, char *buf, size_t size)
+{
+ memcpy(buf, src, size);
+
+ return ES_OK;
+}
+
+static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+{
+ return ES_OK;
+}
+
+static bool fault_in_kernel_space(unsigned long address)
+{
+ return false;
+}
+
+#define sev_printk(fmt, ...)
+
+#include "../../coco/sev/vc-shared.c"
+
+void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
+{
+ struct es_em_ctxt ctxt;
+ enum es_result result;
+
+ if (!boot_ghcb && !early_setup_ghcb())
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+
+ vc_ghcb_invalidate(boot_ghcb);
+ result = vc_init_em_ctxt(&ctxt, regs, exit_code);
+ if (result != ES_OK)
+ goto finish;
+
+ result = vc_check_opcode_bytes(&ctxt, exit_code);
+ if (result != ES_OK)
+ goto finish;
+
+ switch (exit_code) {
+ case SVM_EXIT_RDTSC:
+ case SVM_EXIT_RDTSCP:
+ result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code);
+ break;
+ case SVM_EXIT_IOIO:
+ result = vc_handle_ioio(boot_ghcb, &ctxt);
+ break;
+ case SVM_EXIT_CPUID:
+ result = vc_handle_cpuid(boot_ghcb, &ctxt);
+ break;
+ default:
+ result = ES_UNSUPPORTED;
+ break;
+ }
+
+finish:
+ if (result == ES_OK)
+ vc_finish_insn(&ctxt);
+ else if (result != ES_RETRY)
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+}
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index bb55934c1cee..fd1b67dfea22 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -21,99 +21,14 @@
#include <asm/fpu/xcr.h>
#include <asm/ptrace.h>
#include <asm/svm.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include "error.h"
-#include "../msr.h"
+#include "sev.h"
static struct ghcb boot_ghcb_page __aligned(PAGE_SIZE);
struct ghcb *boot_ghcb;
-/*
- * Copy a version of this function here - insn-eval.c can't be used in
- * pre-decompression code.
- */
-static bool insn_has_rep_prefix(struct insn *insn)
-{
- insn_byte_t p;
- int i;
-
- insn_get_prefixes(insn);
-
- for_each_insn_prefix(insn, i, p) {
- if (p == 0xf2 || p == 0xf3)
- return true;
- }
-
- return false;
-}
-
-/*
- * Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and
- * doesn't use segments.
- */
-static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
-{
- return 0UL;
-}
-
-static inline u64 sev_es_rd_ghcb_msr(void)
-{
- struct msr m;
-
- boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m);
-
- return m.q;
-}
-
-static inline void sev_es_wr_ghcb_msr(u64 val)
-{
- struct msr m;
-
- m.q = val;
- boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m);
-}
-
-static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
-{
- char buffer[MAX_INSN_SIZE];
- int ret;
-
- memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
-
- ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
- if (ret < 0)
- return ES_DECODE_FAILED;
-
- return ES_OK;
-}
-
-static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
- void *dst, char *buf, size_t size)
-{
- memcpy(dst, buf, size);
-
- return ES_OK;
-}
-
-static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
- void *src, char *buf, size_t size)
-{
- memcpy(buf, src, size);
-
- return ES_OK;
-}
-
-static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
-{
- return ES_OK;
-}
-
-static bool fault_in_kernel_space(unsigned long address)
-{
- return false;
-}
-
#undef __init
#define __init
@@ -122,24 +37,27 @@ static bool fault_in_kernel_space(unsigned long address)
#define __BOOT_COMPRESSED
-/* Basic instruction decoding support needed */
-#include "../../lib/inat.c"
-#include "../../lib/insn.c"
-
-/* Include code for early handlers */
-#include "../../coco/sev/shared.c"
+extern struct svsm_ca *boot_svsm_caa;
+extern u64 boot_svsm_caa_pa;
-static struct svsm_ca *svsm_get_caa(void)
+struct svsm_ca *svsm_get_caa(void)
{
return boot_svsm_caa;
}
-static u64 svsm_get_caa_pa(void)
+u64 svsm_get_caa_pa(void)
{
return boot_svsm_caa_pa;
}
-static int svsm_perform_call_protocol(struct svsm_call *call)
+int svsm_perform_call_protocol(struct svsm_call *call);
+
+u8 snp_vmpl;
+
+/* Include code for early handlers */
+#include "../../boot/startup/sev-shared.c"
+
+int svsm_perform_call_protocol(struct svsm_call *call)
{
struct ghcb *ghcb;
int ret;
@@ -157,17 +75,14 @@ static int svsm_perform_call_protocol(struct svsm_call *call)
return ret;
}
-bool sev_snp_enabled(void)
+static bool sev_snp_enabled(void)
{
return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
}
static void __page_state_change(unsigned long paddr, enum psc_op op)
{
- u64 val;
-
- if (!sev_snp_enabled())
- return;
+ u64 val, msr;
/*
* If private -> shared then invalidate the page before requesting the
@@ -176,6 +91,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
if (op == SNP_PAGE_STATE_SHARED)
pvalidate_4k_page(paddr, paddr, false);
+ /* Save the current GHCB MSR value */
+ msr = sev_es_rd_ghcb_msr();
+
/* Issue VMGEXIT to change the page state in RMP table. */
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
VMGEXIT();
@@ -185,6 +103,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+ /* Restore the GHCB MSR value */
+ sev_es_wr_ghcb_msr(msr);
+
/*
* Now that page state is changed in the RMP table, validate it so that it is
* consistent with the RMP entry.
@@ -195,15 +116,21 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
void snp_set_page_private(unsigned long paddr)
{
+ if (!sev_snp_enabled())
+ return;
+
__page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
}
void snp_set_page_shared(unsigned long paddr)
{
+ if (!sev_snp_enabled())
+ return;
+
__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
}
-static bool early_setup_ghcb(void)
+bool early_setup_ghcb(void)
{
if (set_page_decrypted((unsigned long)&boot_ghcb_page))
return false;
@@ -214,7 +141,7 @@ static bool early_setup_ghcb(void)
boot_ghcb = &boot_ghcb_page;
/* Initialize lookup tables for the instruction decoder */
- inat_init_tables();
+ sev_insn_decode_init();
/* SNP guest requires the GHCB GPA must be registered */
if (sev_snp_enabled())
@@ -223,56 +150,10 @@ static bool early_setup_ghcb(void)
return true;
}
-static phys_addr_t __snp_accept_memory(struct snp_psc_desc *desc,
- phys_addr_t pa, phys_addr_t pa_end)
-{
- struct psc_hdr *hdr;
- struct psc_entry *e;
- unsigned int i;
-
- hdr = &desc->hdr;
- memset(hdr, 0, sizeof(*hdr));
-
- e = desc->entries;
-
- i = 0;
- while (pa < pa_end && i < VMGEXIT_PSC_MAX_ENTRY) {
- hdr->end_entry = i;
-
- e->gfn = pa >> PAGE_SHIFT;
- e->operation = SNP_PAGE_STATE_PRIVATE;
- if (IS_ALIGNED(pa, PMD_SIZE) && (pa_end - pa) >= PMD_SIZE) {
- e->pagesize = RMP_PG_SIZE_2M;
- pa += PMD_SIZE;
- } else {
- e->pagesize = RMP_PG_SIZE_4K;
- pa += PAGE_SIZE;
- }
-
- e++;
- i++;
- }
-
- if (vmgexit_psc(boot_ghcb, desc))
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
-
- pvalidate_pages(desc);
-
- return pa;
-}
-
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
{
- struct snp_psc_desc desc = {};
- unsigned int i;
- phys_addr_t pa;
-
- if (!boot_ghcb && !early_setup_ghcb())
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
-
- pa = start;
- while (pa < end)
- pa = __snp_accept_memory(&desc, pa, end);
+ for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
+ __page_state_change(pa, SNP_PAGE_STATE_PRIVATE);
}
void sev_es_shutdown_ghcb(void)
@@ -333,46 +214,6 @@ bool sev_es_check_ghcb_fault(unsigned long address)
return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page);
}
-void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
-{
- struct es_em_ctxt ctxt;
- enum es_result result;
-
- if (!boot_ghcb && !early_setup_ghcb())
- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
-
- vc_ghcb_invalidate(boot_ghcb);
- result = vc_init_em_ctxt(&ctxt, regs, exit_code);
- if (result != ES_OK)
- goto finish;
-
- result = vc_check_opcode_bytes(&ctxt, exit_code);
- if (result != ES_OK)
- goto finish;
-
- switch (exit_code) {
- case SVM_EXIT_RDTSC:
- case SVM_EXIT_RDTSCP:
- result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code);
- break;
- case SVM_EXIT_IOIO:
- result = vc_handle_ioio(boot_ghcb, &ctxt);
- break;
- case SVM_EXIT_CPUID:
- result = vc_handle_cpuid(boot_ghcb, &ctxt);
- break;
- default:
- result = ES_UNSUPPORTED;
- break;
- }
-
-finish:
- if (result == ES_OK)
- vc_finish_insn(&ctxt);
- else if (result != ES_RETRY)
- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
-}
-
/*
* SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need
* guest side implementation for proper functioning of the guest. If any
@@ -682,3 +523,43 @@ void sev_prep_identity_maps(unsigned long top_level_pgt)
sev_verify_cbit(top_level_pgt);
}
+
+bool early_is_sevsnp_guest(void)
+{
+ static bool sevsnp;
+
+ if (sevsnp)
+ return true;
+
+ if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED))
+ return false;
+
+ sevsnp = true;
+
+ if (!snp_vmpl) {
+ unsigned int eax, ebx, ecx, edx;
+
+ /*
+ * CPUID Fn8000_001F_EAX[28] - SVSM support
+ */
+ eax = 0x8000001f;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+ if (eax & BIT(28)) {
+ struct msr m;
+
+ /* Obtain the address of the calling area to use */
+ boot_rdmsr(MSR_SVSM_CAA, &m);
+ boot_svsm_caa = (void *)m.q;
+ boot_svsm_caa_pa = m.q;
+
+ /*
+ * The real VMPL level cannot be discovered, but the
+ * memory acceptance routines make no use of that so
+ * any non-zero value suffices here.
+ */
+ snp_vmpl = U8_MAX;
+ }
+ }
+ return true;
+}
diff --git a/arch/x86/boot/compressed/sev.h b/arch/x86/boot/compressed/sev.h
index fc725a981b09..92f79c21939c 100644
--- a/arch/x86/boot/compressed/sev.h
+++ b/arch/x86/boot/compressed/sev.h
@@ -10,13 +10,34 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT
-bool sev_snp_enabled(void);
+#include "../msr.h"
+
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
+u64 sev_get_status(void);
+bool early_is_sevsnp_guest(void);
+
+static inline u64 sev_es_rd_ghcb_msr(void)
+{
+ struct msr m;
+
+ boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m);
+
+ return m.q;
+}
+
+static inline void sev_es_wr_ghcb_msr(u64 val)
+{
+ struct msr m;
+
+ m.q = val;
+ boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m);
+}
#else
-static inline bool sev_snp_enabled(void) { return false; }
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
+static inline u64 sev_get_status(void) { return 0; }
+static inline bool early_is_sevsnp_guest(void) { return false; }
#endif
diff --git a/arch/x86/boot/compressed/string.c b/arch/x86/boot/compressed/string.c
index 81fc1eaa3229..9af19d9614cb 100644
--- a/arch/x86/boot/compressed/string.c
+++ b/arch/x86/boot/compressed/string.c
@@ -15,9 +15,9 @@ static void *____memcpy(void *dest, const void *src, size_t n)
{
int d0, d1, d2;
asm volatile(
- "rep ; movsl\n\t"
+ "rep movsl\n\t"
"movl %4,%%ecx\n\t"
- "rep ; movsb\n\t"
+ "rep movsb"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (n >> 2), "g" (n & 3), "1" (dest), "2" (src)
: "memory");
@@ -29,9 +29,9 @@ static void *____memcpy(void *dest, const void *src, size_t n)
{
long d0, d1, d2;
asm volatile(
- "rep ; movsq\n\t"
+ "rep movsq\n\t"
"movq %4,%%rcx\n\t"
- "rep ; movsb\n\t"
+ "rep movsb"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (n >> 3), "g" (n & 7), "1" (dest), "2" (src)
: "memory");
diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S
index 6afd05e819d2..3973a67cd04e 100644
--- a/arch/x86/boot/copy.S
+++ b/arch/x86/boot/copy.S
@@ -22,10 +22,10 @@ SYM_FUNC_START_NOALIGN(memcpy)
movw %dx, %si
pushw %cx
shrw $2, %cx
- rep; movsl
+ rep movsl
popw %cx
andw $3, %cx
- rep; movsb
+ rep movsb
popw %di
popw %si
retl
@@ -38,10 +38,10 @@ SYM_FUNC_START_NOALIGN(memset)
imull $0x01010101,%eax
pushw %cx
shrw $2, %cx
- rep; stosl
+ rep stosl
popw %cx
andw $3, %cx
- rep; stosb
+ rep stosb
popw %di
retl
SYM_FUNC_END(memset)
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index b5c79f43359b..e1f4fd5bc8ee 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -43,7 +43,7 @@ SYSSEG = 0x1000 /* historical load address >> 4 */
.section ".bstext", "ax"
#ifdef CONFIG_EFI_STUB
# "MZ", MS-DOS header
- .word MZ_MAGIC
+ .word IMAGE_DOS_SIGNATURE
.org 0x38
#
# Offset to the PE header.
@@ -51,16 +51,16 @@ SYSSEG = 0x1000 /* historical load address >> 4 */
.long LINUX_PE_MAGIC
.long pe_header
pe_header:
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
coff_header:
#ifdef CONFIG_X86_32
.set image_file_add_flags, IMAGE_FILE_32BIT_MACHINE
- .set pe_opt_magic, PE_OPT_MAGIC_PE32
+ .set pe_opt_magic, IMAGE_NT_OPTIONAL_HDR32_MAGIC
.word IMAGE_FILE_MACHINE_I386
#else
.set image_file_add_flags, 0
- .set pe_opt_magic, PE_OPT_MAGIC_PE32PLUS
+ .set pe_opt_magic, IMAGE_NT_OPTIONAL_HDR64_MAGIC
.word IMAGE_FILE_MACHINE_AMD64
#endif
.word section_count # nr_sections
@@ -111,7 +111,7 @@ extra_header_fields:
.long salign # SizeOfHeaders
.long 0 # CheckSum
.word IMAGE_SUBSYSTEM_EFI_APPLICATION # Subsystem (EFI application)
- .word IMAGE_DLL_CHARACTERISTICS_NX_COMPAT # DllCharacteristics
+ .word IMAGE_DLLCHARACTERISTICS_NX_COMPAT # DllCharacteristics
#ifdef CONFIG_X86_32
.long 0 # SizeOfStackReserve
.long 0 # SizeOfStackCommit
@@ -361,12 +361,8 @@ xloadflags:
#endif
#ifdef CONFIG_X86_64
-#ifdef CONFIG_X86_5LEVEL
#define XLF56 (XLF_5LEVEL|XLF_5LEVEL_ENABLED)
#else
-#define XLF56 XLF_5LEVEL
-#endif
-#else
#define XLF56 0
#endif
@@ -585,7 +581,7 @@ start_of_setup:
xorl %eax, %eax
subw %di, %cx
shrw $2, %cx
- rep; stosl
+ rep stosl
# Jump to C code (should not return)
calll main
diff --git a/arch/x86/boot/startup/Makefile b/arch/x86/boot/startup/Makefile
new file mode 100644
index 000000000000..b514f7e81332
--- /dev/null
+++ b/arch/x86/boot/startup/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+
+KBUILD_AFLAGS += -D__DISABLE_EXPORTS
+KBUILD_CFLAGS += -D__DISABLE_EXPORTS -mcmodel=small -fPIC \
+ -Os -DDISABLE_BRANCH_PROFILING \
+ $(DISABLE_STACKLEAK_PLUGIN) \
+ -fno-stack-protector -D__NO_FORTIFY \
+ -fno-jump-tables \
+ -include $(srctree)/include/linux/hidden.h
+
+# disable ftrace hooks and LTO
+KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS))
+KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
+KMSAN_SANITIZE := n
+UBSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
+
+obj-$(CONFIG_X86_64) += gdt_idt.o map_kernel.o
+obj-$(CONFIG_AMD_MEM_ENCRYPT) += sme.o sev-startup.o
+
+lib-$(CONFIG_X86_64) += la57toggle.o
+lib-$(CONFIG_EFI_MIXED) += efi-mixed.o
+
+#
+# Disable objtool validation for all library code, which is intended
+# to be linked into the decompressor or the EFI stub but not vmlinux
+#
+$(patsubst %.o,$(obj)/%.o,$(lib-y)): OBJECT_FILES_NON_STANDARD := y
diff --git a/drivers/firmware/efi/libstub/x86-mixed.S b/arch/x86/boot/startup/efi-mixed.S
index e04ed99bc449..e04ed99bc449 100644
--- a/drivers/firmware/efi/libstub/x86-mixed.S
+++ b/arch/x86/boot/startup/efi-mixed.S
diff --git a/arch/x86/boot/startup/gdt_idt.c b/arch/x86/boot/startup/gdt_idt.c
new file mode 100644
index 000000000000..a3112a69b06a
--- /dev/null
+++ b/arch/x86/boot/startup/gdt_idt.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+#include <asm/desc.h>
+#include <asm/init.h>
+#include <asm/setup.h>
+#include <asm/sev.h>
+#include <asm/trapnr.h>
+
+/*
+ * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
+ * used until the idt_table takes over. On the boot CPU this happens in
+ * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
+ * this happens in the functions called from head_64.S.
+ *
+ * The idt_table can't be used that early because all the code modifying it is
+ * in idt.c and can be instrumented by tracing or KASAN, which both don't work
+ * during early CPU bringup. Also the idt_table has the runtime vectors
+ * configured which require certain CPU state to be setup already (like TSS),
+ * which also hasn't happened yet in early CPU bringup.
+ */
+static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
+
+/* This may run while still in the direct mapping */
+void __head startup_64_load_idt(void *vc_handler)
+{
+ struct desc_ptr desc = {
+ .address = (unsigned long)rip_rel_ptr(bringup_idt_table),
+ .size = sizeof(bringup_idt_table) - 1,
+ };
+ struct idt_data data;
+ gate_desc idt_desc;
+
+ /* @vc_handler is set only for a VMM Communication Exception */
+ if (vc_handler) {
+ init_idt_data(&data, X86_TRAP_VC, vc_handler);
+ idt_init_desc(&idt_desc, &data);
+ native_write_idt_entry((gate_desc *)desc.address, X86_TRAP_VC, &idt_desc);
+ }
+
+ native_load_idt(&desc);
+}
+
+/*
+ * Setup boot CPU state needed before kernel switches to virtual addresses.
+ */
+void __head startup_64_setup_gdt_idt(void)
+{
+ struct gdt_page *gp = rip_rel_ptr((void *)(__force unsigned long)&gdt_page);
+ void *handler = NULL;
+
+ struct desc_ptr startup_gdt_descr = {
+ .address = (unsigned long)gp->gdt,
+ .size = GDT_SIZE - 1,
+ };
+
+ /* Load GDT */
+ native_load_gdt(&startup_gdt_descr);
+
+ /* New GDT is live - reload data segment registers */
+ asm volatile("movl %%eax, %%ds\n"
+ "movl %%eax, %%ss\n"
+ "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
+
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
+ handler = rip_rel_ptr(vc_no_ghcb);
+
+ startup_64_load_idt(handler);
+}
diff --git a/arch/x86/boot/compressed/la57toggle.S b/arch/x86/boot/startup/la57toggle.S
index 9ee002387eb1..370075b4d95b 100644
--- a/arch/x86/boot/compressed/la57toggle.S
+++ b/arch/x86/boot/startup/la57toggle.S
@@ -5,7 +5,6 @@
#include <asm/boot.h>
#include <asm/msr.h>
#include <asm/processor-flags.h>
-#include "pgtable.h"
/*
* This is the 32-bit trampoline that will be copied over to low memory. It
diff --git a/arch/x86/boot/startup/map_kernel.c b/arch/x86/boot/startup/map_kernel.c
new file mode 100644
index 000000000000..332dbe6688c4
--- /dev/null
+++ b/arch/x86/boot/startup/map_kernel.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pgtable.h>
+
+#include <asm/init.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/sev.h>
+
+extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
+extern unsigned int next_early_pgt;
+
+static inline bool check_la57_support(void)
+{
+ /*
+ * 5-level paging is detected and enabled at kernel decompression
+ * stage. Only check if it has been enabled there.
+ */
+ if (!(native_read_cr4() & X86_CR4_LA57))
+ return false;
+
+ __pgtable_l5_enabled = 1;
+ pgdir_shift = 48;
+ ptrs_per_p4d = 512;
+
+ return true;
+}
+
+static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
+ pmdval_t *pmd,
+ unsigned long p2v_offset)
+{
+ unsigned long paddr, paddr_end;
+ int i;
+
+ /* Encrypt the kernel and related (if SME is active) */
+ sme_encrypt_kernel(bp);
+
+ /*
+ * Clear the memory encryption mask from the .bss..decrypted section.
+ * The bss section will be memset to zero later in the initialization so
+ * there is no need to zero it after changing the memory encryption
+ * attribute.
+ */
+ if (sme_get_me_mask()) {
+ paddr = (unsigned long)rip_rel_ptr(__start_bss_decrypted);
+ paddr_end = (unsigned long)rip_rel_ptr(__end_bss_decrypted);
+
+ for (; paddr < paddr_end; paddr += PMD_SIZE) {
+ /*
+ * On SNP, transition the page to shared in the RMP table so that
+ * it is consistent with the page table attribute change.
+ *
+ * __start_bss_decrypted has a virtual address in the high range
+ * mapping (kernel .text). PVALIDATE, by way of
+ * early_snp_set_memory_shared(), requires a valid virtual
+ * address but the kernel is currently running off of the identity
+ * mapping so use the PA to get a *currently* valid virtual address.
+ */
+ early_snp_set_memory_shared(paddr, paddr, PTRS_PER_PMD);
+
+ i = pmd_index(paddr - p2v_offset);
+ pmd[i] -= sme_get_me_mask();
+ }
+ }
+
+ /*
+ * Return the SME encryption mask (if SME is active) to be used as a
+ * modifier for the initial pgdir entry programmed into CR3.
+ */
+ return sme_get_me_mask();
+}
+
+/*
+ * This code is compiled using PIC codegen because it will execute from the
+ * early 1:1 mapping of memory, which deviates from the mapping expected by the
+ * linker. Due to this deviation, taking the address of a global variable will
+ * produce an ambiguous result when using the plain & operator. Instead,
+ * rip_rel_ptr() must be used, which will return the RIP-relative address in
+ * the 1:1 mapping of memory. Kernel virtual addresses can be determined by
+ * subtracting p2v_offset from the RIP-relative address.
+ */
+unsigned long __head __startup_64(unsigned long p2v_offset,
+ struct boot_params *bp)
+{
+ pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts);
+ unsigned long physaddr = (unsigned long)rip_rel_ptr(_text);
+ unsigned long va_text, va_end;
+ unsigned long pgtable_flags;
+ unsigned long load_delta;
+ pgdval_t *pgd;
+ p4dval_t *p4d;
+ pudval_t *pud;
+ pmdval_t *pmd, pmd_entry;
+ bool la57;
+ int i;
+
+ la57 = check_la57_support();
+
+ /* Is the address too large? */
+ if (physaddr >> MAX_PHYSMEM_BITS)
+ for (;;);
+
+ /*
+ * Compute the delta between the address I am compiled to run at
+ * and the address I am actually running at.
+ */
+ phys_base = load_delta = __START_KERNEL_map + p2v_offset;
+
+ /* Is the address not 2M aligned? */
+ if (load_delta & ~PMD_MASK)
+ for (;;);
+
+ va_text = physaddr - p2v_offset;
+ va_end = (unsigned long)rip_rel_ptr(_end) - p2v_offset;
+
+ /* Include the SME encryption mask in the fixup value */
+ load_delta += sme_get_me_mask();
+
+ /* Fixup the physical addresses in the page table */
+
+ pgd = rip_rel_ptr(early_top_pgt);
+ pgd[pgd_index(__START_KERNEL_map)] += load_delta;
+
+ if (la57) {
+ p4d = (p4dval_t *)rip_rel_ptr(level4_kernel_pgt);
+ p4d[MAX_PTRS_PER_P4D - 1] += load_delta;
+
+ pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE;
+ }
+
+ level3_kernel_pgt[PTRS_PER_PUD - 2].pud += load_delta;
+ level3_kernel_pgt[PTRS_PER_PUD - 1].pud += load_delta;
+
+ for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
+ level2_fixmap_pgt[i].pmd += load_delta;
+
+ /*
+ * Set up the identity mapping for the switchover. These
+ * entries should *NOT* have the global bit set! This also
+ * creates a bunch of nonsense entries but that is fine --
+ * it avoids problems around wraparound.
+ */
+
+ pud = &early_pgts[0]->pmd;
+ pmd = &early_pgts[1]->pmd;
+ next_early_pgt = 2;
+
+ pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
+
+ if (la57) {
+ p4d = &early_pgts[next_early_pgt++]->pmd;
+
+ i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
+ pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
+ pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
+
+ i = physaddr >> P4D_SHIFT;
+ p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
+ p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
+ } else {
+ i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
+ pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
+ pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
+ }
+
+ i = physaddr >> PUD_SHIFT;
+ pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
+ pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
+
+ pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
+ pmd_entry += sme_get_me_mask();
+ pmd_entry += physaddr;
+
+ for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) {
+ int idx = i + (physaddr >> PMD_SHIFT);
+
+ pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
+ }
+
+ /*
+ * Fixup the kernel text+data virtual addresses. Note that
+ * we might write invalid pmds, when the kernel is relocated
+ * cleanup_highmap() fixes this up along with the mappings
+ * beyond _end.
+ *
+ * Only the region occupied by the kernel image has so far
+ * been checked against the table of usable memory regions
+ * provided by the firmware, so invalidate pages outside that
+ * region. A page table entry that maps to a reserved area of
+ * memory would allow processor speculation into that area,
+ * and on some hardware (particularly the UV platform) even
+ * speculative access to some reserved areas is caught as an
+ * error, causing the BIOS to halt the system.
+ */
+
+ pmd = rip_rel_ptr(level2_kernel_pgt);
+
+ /* invalidate pages before the kernel image */
+ for (i = 0; i < pmd_index(va_text); i++)
+ pmd[i] &= ~_PAGE_PRESENT;
+
+ /* fixup pages that are part of the kernel image */
+ for (; i <= pmd_index(va_end); i++)
+ if (pmd[i] & _PAGE_PRESENT)
+ pmd[i] += load_delta;
+
+ /* invalidate pages after the kernel image */
+ for (; i < PTRS_PER_PMD; i++)
+ pmd[i] &= ~_PAGE_PRESENT;
+
+ return sme_postprocess_startup(bp, pmd, p2v_offset);
+}
diff --git a/arch/x86/coco/sev/shared.c b/arch/x86/boot/startup/sev-shared.c
index 2e4122f8aa6b..7a706db87b93 100644
--- a/arch/x86/coco/sev/shared.c
+++ b/arch/x86/boot/startup/sev-shared.c
@@ -14,76 +14,23 @@
#ifndef __BOOT_COMPRESSED
#define error(v) pr_err(v)
#define has_cpuflag(f) boot_cpu_has(f)
-#define sev_printk(fmt, ...) printk(fmt, ##__VA_ARGS__)
-#define sev_printk_rtl(fmt, ...) printk_ratelimited(fmt, ##__VA_ARGS__)
#else
#undef WARN
#define WARN(condition, format...) (!!(condition))
-#define sev_printk(fmt, ...)
-#define sev_printk_rtl(fmt, ...)
#undef vc_forward_exception
#define vc_forward_exception(c) panic("SNP: Hypervisor requested exception\n")
#endif
/*
* SVSM related information:
- * When running under an SVSM, the VMPL that Linux is executing at must be
- * non-zero. The VMPL is therefore used to indicate the presence of an SVSM.
- *
* During boot, the page tables are set up as identity mapped and later
* changed to use kernel virtual addresses. Maintain separate virtual and
* physical addresses for the CAA to allow SVSM functions to be used during
* early boot, both with identity mapped virtual addresses and proper kernel
* virtual addresses.
*/
-u8 snp_vmpl __ro_after_init;
-EXPORT_SYMBOL_GPL(snp_vmpl);
-static struct svsm_ca *boot_svsm_caa __ro_after_init;
-static u64 boot_svsm_caa_pa __ro_after_init;
-
-static struct svsm_ca *svsm_get_caa(void);
-static u64 svsm_get_caa_pa(void);
-static int svsm_perform_call_protocol(struct svsm_call *call);
-
-/* I/O parameters for CPUID-related helpers */
-struct cpuid_leaf {
- u32 fn;
- u32 subfn;
- u32 eax;
- u32 ebx;
- u32 ecx;
- u32 edx;
-};
-
-/*
- * Individual entries of the SNP CPUID table, as defined by the SNP
- * Firmware ABI, Revision 0.9, Section 7.1, Table 14.
- */
-struct snp_cpuid_fn {
- u32 eax_in;
- u32 ecx_in;
- u64 xcr0_in;
- u64 xss_in;
- u32 eax;
- u32 ebx;
- u32 ecx;
- u32 edx;
- u64 __reserved;
-} __packed;
-
-/*
- * SNP CPUID table, as defined by the SNP Firmware ABI, Revision 0.9,
- * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit
- * of 64 entries per CPUID table.
- */
-#define SNP_CPUID_COUNT_MAX 64
-
-struct snp_cpuid_table {
- u32 count;
- u32 __reserved1;
- u64 __reserved2;
- struct snp_cpuid_fn fn[SNP_CPUID_COUNT_MAX];
-} __packed;
+struct svsm_ca *boot_svsm_caa __ro_after_init;
+u64 boot_svsm_caa_pa __ro_after_init;
/*
* Since feature negotiation related variables are set early in the boot
@@ -107,7 +54,7 @@ static u32 cpuid_std_range_max __ro_after_init;
static u32 cpuid_hyp_range_max __ro_after_init;
static u32 cpuid_ext_range_max __ro_after_init;
-static bool __init sev_es_check_cpu_features(void)
+bool __init sev_es_check_cpu_features(void)
{
if (!has_cpuflag(X86_FEATURE_RDRAND)) {
error("RDRAND instruction not supported - no trusted source of randomness available\n");
@@ -117,7 +64,7 @@ static bool __init sev_es_check_cpu_features(void)
return true;
}
-static void __head __noreturn
+void __head __noreturn
sev_es_terminate(unsigned int set, unsigned int reason)
{
u64 val = GHCB_MSR_TERM_REQ;
@@ -136,7 +83,7 @@ sev_es_terminate(unsigned int set, unsigned int reason)
/*
* The hypervisor features are available from GHCB version 2 onward.
*/
-static u64 get_hv_features(void)
+u64 get_hv_features(void)
{
u64 val;
@@ -153,7 +100,7 @@ static u64 get_hv_features(void)
return GHCB_MSR_HV_FT_RESP_VAL(val);
}
-static void snp_register_ghcb_early(unsigned long paddr)
+void snp_register_ghcb_early(unsigned long paddr)
{
unsigned long pfn = paddr >> PAGE_SHIFT;
u64 val;
@@ -169,7 +116,7 @@ static void snp_register_ghcb_early(unsigned long paddr)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_REGISTER);
}
-static bool sev_es_negotiate_protocol(void)
+bool sev_es_negotiate_protocol(void)
{
u64 val;
@@ -190,39 +137,6 @@ static bool sev_es_negotiate_protocol(void)
return true;
}
-static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
-{
- ghcb->save.sw_exit_code = 0;
- __builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
-}
-
-static bool vc_decoding_needed(unsigned long exit_code)
-{
- /* Exceptions don't require to decode the instruction */
- return !(exit_code >= SVM_EXIT_EXCP_BASE &&
- exit_code <= SVM_EXIT_LAST_EXCP);
-}
-
-static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
- struct pt_regs *regs,
- unsigned long exit_code)
-{
- enum es_result ret = ES_OK;
-
- memset(ctxt, 0, sizeof(*ctxt));
- ctxt->regs = regs;
-
- if (vc_decoding_needed(exit_code))
- ret = vc_decode_insn(ctxt);
-
- return ret;
-}
-
-static void vc_finish_insn(struct es_em_ctxt *ctxt)
-{
- ctxt->regs->ip += ctxt->insn.length;
-}
-
static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
{
u32 ret;
@@ -344,7 +258,7 @@ static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call)
* Fill in protocol and format specifiers. This can be called very early
* in the boot, so use rip-relative references as needed.
*/
- ghcb->protocol_version = RIP_REL_REF(ghcb_version);
+ ghcb->protocol_version = ghcb_version;
ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL);
@@ -371,10 +285,10 @@ static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call)
return svsm_process_result_codes(call);
}
-static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt,
- u64 exit_code, u64 exit_info_1,
- u64 exit_info_2)
+enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt,
+ u64 exit_code, u64 exit_info_1,
+ u64 exit_info_2)
{
/* Fill in protocol and format specifiers */
ghcb->protocol_version = ghcb_version;
@@ -473,9 +387,9 @@ static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid
* while running with the initial identity mapping as well as the
* switch-over to kernel virtual addresses later.
*/
-static const struct snp_cpuid_table *snp_cpuid_get_table(void)
+const struct snp_cpuid_table *snp_cpuid_get_table(void)
{
- return &RIP_REL_REF(cpuid_table_copy);
+ return rip_rel_ptr(&cpuid_table_copy);
}
/*
@@ -672,7 +586,7 @@ snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
* Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
* should be treated as fatal by caller.
*/
-static int __head
+int __head
snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
@@ -701,9 +615,9 @@ snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
/* Skip post-processing for out-of-range zero leafs. */
- if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) ||
- (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) ||
- (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max))))
+ if (!(leaf->fn <= cpuid_std_range_max ||
+ (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) ||
+ (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max)))
return 0;
}
@@ -782,391 +696,6 @@ fail:
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
}
-static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
- unsigned long address,
- bool write)
-{
- if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) {
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.error_code = X86_PF_USER;
- ctxt->fi.cr2 = address;
- if (write)
- ctxt->fi.error_code |= X86_PF_WRITE;
-
- return ES_EXCEPTION;
- }
-
- return ES_OK;
-}
-
-static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
- void *src, char *buf,
- unsigned int data_size,
- unsigned int count,
- bool backwards)
-{
- int i, b = backwards ? -1 : 1;
- unsigned long address = (unsigned long)src;
- enum es_result ret;
-
- ret = vc_insn_string_check(ctxt, address, false);
- if (ret != ES_OK)
- return ret;
-
- for (i = 0; i < count; i++) {
- void *s = src + (i * data_size * b);
- char *d = buf + (i * data_size);
-
- ret = vc_read_mem(ctxt, s, d, data_size);
- if (ret != ES_OK)
- break;
- }
-
- return ret;
-}
-
-static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
- void *dst, char *buf,
- unsigned int data_size,
- unsigned int count,
- bool backwards)
-{
- int i, s = backwards ? -1 : 1;
- unsigned long address = (unsigned long)dst;
- enum es_result ret;
-
- ret = vc_insn_string_check(ctxt, address, true);
- if (ret != ES_OK)
- return ret;
-
- for (i = 0; i < count; i++) {
- void *d = dst + (i * data_size * s);
- char *b = buf + (i * data_size);
-
- ret = vc_write_mem(ctxt, d, b, data_size);
- if (ret != ES_OK)
- break;
- }
-
- return ret;
-}
-
-#define IOIO_TYPE_STR BIT(2)
-#define IOIO_TYPE_IN 1
-#define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR)
-#define IOIO_TYPE_OUT 0
-#define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR)
-
-#define IOIO_REP BIT(3)
-
-#define IOIO_ADDR_64 BIT(9)
-#define IOIO_ADDR_32 BIT(8)
-#define IOIO_ADDR_16 BIT(7)
-
-#define IOIO_DATA_32 BIT(6)
-#define IOIO_DATA_16 BIT(5)
-#define IOIO_DATA_8 BIT(4)
-
-#define IOIO_SEG_ES (0 << 10)
-#define IOIO_SEG_DS (3 << 10)
-
-static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
-{
- struct insn *insn = &ctxt->insn;
- size_t size;
- u64 port;
-
- *exitinfo = 0;
-
- switch (insn->opcode.bytes[0]) {
- /* INS opcodes */
- case 0x6c:
- case 0x6d:
- *exitinfo |= IOIO_TYPE_INS;
- *exitinfo |= IOIO_SEG_ES;
- port = ctxt->regs->dx & 0xffff;
- break;
-
- /* OUTS opcodes */
- case 0x6e:
- case 0x6f:
- *exitinfo |= IOIO_TYPE_OUTS;
- *exitinfo |= IOIO_SEG_DS;
- port = ctxt->regs->dx & 0xffff;
- break;
-
- /* IN immediate opcodes */
- case 0xe4:
- case 0xe5:
- *exitinfo |= IOIO_TYPE_IN;
- port = (u8)insn->immediate.value & 0xffff;
- break;
-
- /* OUT immediate opcodes */
- case 0xe6:
- case 0xe7:
- *exitinfo |= IOIO_TYPE_OUT;
- port = (u8)insn->immediate.value & 0xffff;
- break;
-
- /* IN register opcodes */
- case 0xec:
- case 0xed:
- *exitinfo |= IOIO_TYPE_IN;
- port = ctxt->regs->dx & 0xffff;
- break;
-
- /* OUT register opcodes */
- case 0xee:
- case 0xef:
- *exitinfo |= IOIO_TYPE_OUT;
- port = ctxt->regs->dx & 0xffff;
- break;
-
- default:
- return ES_DECODE_FAILED;
- }
-
- *exitinfo |= port << 16;
-
- switch (insn->opcode.bytes[0]) {
- case 0x6c:
- case 0x6e:
- case 0xe4:
- case 0xe6:
- case 0xec:
- case 0xee:
- /* Single byte opcodes */
- *exitinfo |= IOIO_DATA_8;
- size = 1;
- break;
- default:
- /* Length determined by instruction parsing */
- *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
- : IOIO_DATA_32;
- size = (insn->opnd_bytes == 2) ? 2 : 4;
- }
-
- switch (insn->addr_bytes) {
- case 2:
- *exitinfo |= IOIO_ADDR_16;
- break;
- case 4:
- *exitinfo |= IOIO_ADDR_32;
- break;
- case 8:
- *exitinfo |= IOIO_ADDR_64;
- break;
- }
-
- if (insn_has_rep_prefix(insn))
- *exitinfo |= IOIO_REP;
-
- return vc_ioio_check(ctxt, (u16)port, size);
-}
-
-static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
-{
- struct pt_regs *regs = ctxt->regs;
- u64 exit_info_1, exit_info_2;
- enum es_result ret;
-
- ret = vc_ioio_exitinfo(ctxt, &exit_info_1);
- if (ret != ES_OK)
- return ret;
-
- if (exit_info_1 & IOIO_TYPE_STR) {
-
- /* (REP) INS/OUTS */
-
- bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF);
- unsigned int io_bytes, exit_bytes;
- unsigned int ghcb_count, op_count;
- unsigned long es_base;
- u64 sw_scratch;
-
- /*
- * For the string variants with rep prefix the amount of in/out
- * operations per #VC exception is limited so that the kernel
- * has a chance to take interrupts and re-schedule while the
- * instruction is emulated.
- */
- io_bytes = (exit_info_1 >> 4) & 0x7;
- ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes;
-
- op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1;
- exit_info_2 = min(op_count, ghcb_count);
- exit_bytes = exit_info_2 * io_bytes;
-
- es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
-
- /* Read bytes of OUTS into the shared buffer */
- if (!(exit_info_1 & IOIO_TYPE_IN)) {
- ret = vc_insn_string_read(ctxt,
- (void *)(es_base + regs->si),
- ghcb->shared_buffer, io_bytes,
- exit_info_2, df);
- if (ret)
- return ret;
- }
-
- /*
- * Issue an VMGEXIT to the HV to consume the bytes from the
- * shared buffer or to have it write them into the shared buffer
- * depending on the instruction: OUTS or INS.
- */
- sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer);
- ghcb_set_sw_scratch(ghcb, sw_scratch);
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO,
- exit_info_1, exit_info_2);
- if (ret != ES_OK)
- return ret;
-
- /* Read bytes from shared buffer into the guest's destination. */
- if (exit_info_1 & IOIO_TYPE_IN) {
- ret = vc_insn_string_write(ctxt,
- (void *)(es_base + regs->di),
- ghcb->shared_buffer, io_bytes,
- exit_info_2, df);
- if (ret)
- return ret;
-
- if (df)
- regs->di -= exit_bytes;
- else
- regs->di += exit_bytes;
- } else {
- if (df)
- regs->si -= exit_bytes;
- else
- regs->si += exit_bytes;
- }
-
- if (exit_info_1 & IOIO_REP)
- regs->cx -= exit_info_2;
-
- ret = regs->cx ? ES_RETRY : ES_OK;
-
- } else {
-
- /* IN/OUT into/from rAX */
-
- int bits = (exit_info_1 & 0x70) >> 1;
- u64 rax = 0;
-
- if (!(exit_info_1 & IOIO_TYPE_IN))
- rax = lower_bits(regs->ax, bits);
-
- ghcb_set_rax(ghcb, rax);
-
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0);
- if (ret != ES_OK)
- return ret;
-
- if (exit_info_1 & IOIO_TYPE_IN) {
- if (!ghcb_rax_is_valid(ghcb))
- return ES_VMM_ERROR;
- regs->ax = lower_bits(ghcb->save.rax, bits);
- }
- }
-
- return ret;
-}
-
-static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
-{
- struct pt_regs *regs = ctxt->regs;
- struct cpuid_leaf leaf;
- int ret;
-
- leaf.fn = regs->ax;
- leaf.subfn = regs->cx;
- ret = snp_cpuid(ghcb, ctxt, &leaf);
- if (!ret) {
- regs->ax = leaf.eax;
- regs->bx = leaf.ebx;
- regs->cx = leaf.ecx;
- regs->dx = leaf.edx;
- }
-
- return ret;
-}
-
-static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- struct pt_regs *regs = ctxt->regs;
- u32 cr4 = native_read_cr4();
- enum es_result ret;
- int snp_cpuid_ret;
-
- snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt);
- if (!snp_cpuid_ret)
- return ES_OK;
- if (snp_cpuid_ret != -EOPNOTSUPP)
- return ES_VMM_ERROR;
-
- ghcb_set_rax(ghcb, regs->ax);
- ghcb_set_rcx(ghcb, regs->cx);
-
- if (cr4 & X86_CR4_OSXSAVE)
- /* Safe to read xcr0 */
- ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
- else
- /* xgetbv will cause #GP - use reset value for xcr0 */
- ghcb_set_xcr0(ghcb, 1);
-
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
- if (ret != ES_OK)
- return ret;
-
- if (!(ghcb_rax_is_valid(ghcb) &&
- ghcb_rbx_is_valid(ghcb) &&
- ghcb_rcx_is_valid(ghcb) &&
- ghcb_rdx_is_valid(ghcb)))
- return ES_VMM_ERROR;
-
- regs->ax = ghcb->save.rax;
- regs->bx = ghcb->save.rbx;
- regs->cx = ghcb->save.rcx;
- regs->dx = ghcb->save.rdx;
-
- return ES_OK;
-}
-
-static enum es_result vc_handle_rdtsc(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt,
- unsigned long exit_code)
-{
- bool rdtscp = (exit_code == SVM_EXIT_RDTSCP);
- enum es_result ret;
-
- /*
- * The hypervisor should not be intercepting RDTSC/RDTSCP when Secure
- * TSC is enabled. A #VC exception will be generated if the RDTSC/RDTSCP
- * instructions are being intercepted. If this should occur and Secure
- * TSC is enabled, guest execution should be terminated as the guest
- * cannot rely on the TSC value provided by the hypervisor.
- */
- if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
- return ES_VMM_ERROR;
-
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0);
- if (ret != ES_OK)
- return ret;
-
- if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) &&
- (!rdtscp || ghcb_rcx_is_valid(ghcb))))
- return ES_VMM_ERROR;
-
- ctxt->regs->ax = ghcb->save.rax;
- ctxt->regs->dx = ghcb->save.rdx;
- if (rdtscp)
- ctxt->regs->cx = ghcb->save.rcx;
-
- return ES_OK;
-}
-
struct cc_setup_data {
struct setup_data header;
u32 cc_blob_address;
@@ -1224,36 +753,14 @@ static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
if (fn->eax_in == 0x0)
- RIP_REL_REF(cpuid_std_range_max) = fn->eax;
+ cpuid_std_range_max = fn->eax;
else if (fn->eax_in == 0x40000000)
- RIP_REL_REF(cpuid_hyp_range_max) = fn->eax;
+ cpuid_hyp_range_max = fn->eax;
else if (fn->eax_in == 0x80000000)
- RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
+ cpuid_ext_range_max = fn->eax;
}
}
-static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
- int ret, u64 svsm_ret)
-{
- WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n",
- pfn, action, page_size, ret, svsm_ret);
-
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
-}
-
-static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
-{
- unsigned int page_size;
- bool action;
- u64 pfn;
-
- pfn = pc->entry[pc->cur_index].pfn;
- action = pc->entry[pc->cur_index].action;
- page_size = pc->entry[pc->cur_index].page_size;
-
- __pval_terminate(pfn, action, page_size, ret, svsm_ret);
-}
-
static void __head svsm_pval_4k_page(unsigned long paddr, bool validate)
{
struct svsm_pvalidate_call *pc;
@@ -1296,11 +803,7 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
{
int ret;
- /*
- * This can be called very early during boot, so use rIP-relative
- * references as needed.
- */
- if (RIP_REL_REF(snp_vmpl)) {
+ if (snp_vmpl) {
svsm_pval_4k_page(paddr, validate);
} else {
ret = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
@@ -1309,351 +812,6 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
}
}
-static void pval_pages(struct snp_psc_desc *desc)
-{
- struct psc_entry *e;
- unsigned long vaddr;
- unsigned int size;
- unsigned int i;
- bool validate;
- u64 pfn;
- int rc;
-
- for (i = 0; i <= desc->hdr.end_entry; i++) {
- e = &desc->entries[i];
-
- pfn = e->gfn;
- vaddr = (unsigned long)pfn_to_kaddr(pfn);
- size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
- validate = e->operation == SNP_PAGE_STATE_PRIVATE;
-
- rc = pvalidate(vaddr, size, validate);
- if (!rc)
- continue;
-
- if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
- unsigned long vaddr_end = vaddr + PMD_SIZE;
-
- for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) {
- rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
- if (rc)
- __pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc, 0);
- }
- } else {
- __pval_terminate(pfn, validate, size, rc, 0);
- }
- }
-}
-
-static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
- struct svsm_pvalidate_call *pc)
-{
- struct svsm_pvalidate_entry *pe;
-
- /* Nothing in the CA yet */
- pc->num_entries = 0;
- pc->cur_index = 0;
-
- pe = &pc->entry[0];
-
- while (pfn < pfn_end) {
- pe->page_size = RMP_PG_SIZE_4K;
- pe->action = action;
- pe->ignore_cf = 0;
- pe->pfn = pfn;
-
- pe++;
- pfn++;
-
- pc->num_entries++;
- if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
- break;
- }
-
- return pfn;
-}
-
-static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry,
- struct svsm_pvalidate_call *pc)
-{
- struct svsm_pvalidate_entry *pe;
- struct psc_entry *e;
-
- /* Nothing in the CA yet */
- pc->num_entries = 0;
- pc->cur_index = 0;
-
- pe = &pc->entry[0];
- e = &desc->entries[desc_entry];
-
- while (desc_entry <= desc->hdr.end_entry) {
- pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
- pe->action = e->operation == SNP_PAGE_STATE_PRIVATE;
- pe->ignore_cf = 0;
- pe->pfn = e->gfn;
-
- pe++;
- e++;
-
- desc_entry++;
- pc->num_entries++;
- if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
- break;
- }
-
- return desc_entry;
-}
-
-static void svsm_pval_pages(struct snp_psc_desc *desc)
-{
- struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY];
- unsigned int i, pv_4k_count = 0;
- struct svsm_pvalidate_call *pc;
- struct svsm_call call = {};
- unsigned long flags;
- bool action;
- u64 pc_pa;
- int ret;
-
- /*
- * This can be called very early in the boot, use native functions in
- * order to avoid paravirt issues.
- */
- flags = native_local_irq_save();
-
- /*
- * The SVSM calling area (CA) can support processing 510 entries at a
- * time. Loop through the Page State Change descriptor until the CA is
- * full or the last entry in the descriptor is reached, at which time
- * the SVSM is invoked. This repeats until all entries in the descriptor
- * are processed.
- */
- call.caa = svsm_get_caa();
-
- pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
- pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
-
- /* Protocol 0, Call ID 1 */
- call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
- call.rcx = pc_pa;
-
- for (i = 0; i <= desc->hdr.end_entry;) {
- i = svsm_build_ca_from_psc_desc(desc, i, pc);
-
- do {
- ret = svsm_perform_call_protocol(&call);
- if (!ret)
- continue;
-
- /*
- * Check if the entry failed because of an RMP mismatch (a
- * PVALIDATE at 2M was requested, but the page is mapped in
- * the RMP as 4K).
- */
-
- if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
- pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
- /* Save this entry for post-processing at 4K */
- pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
-
- /* Skip to the next one unless at the end of the list */
- pc->cur_index++;
- if (pc->cur_index < pc->num_entries)
- ret = -EAGAIN;
- else
- ret = 0;
- }
- } while (ret == -EAGAIN);
-
- if (ret)
- svsm_pval_terminate(pc, ret, call.rax_out);
- }
-
- /* Process any entries that failed to be validated at 2M and validate them at 4K */
- for (i = 0; i < pv_4k_count; i++) {
- u64 pfn, pfn_end;
-
- action = pv_4k[i].action;
- pfn = pv_4k[i].pfn;
- pfn_end = pfn + 512;
-
- while (pfn < pfn_end) {
- pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc);
-
- ret = svsm_perform_call_protocol(&call);
- if (ret)
- svsm_pval_terminate(pc, ret, call.rax_out);
- }
- }
-
- native_local_irq_restore(flags);
-}
-
-static void pvalidate_pages(struct snp_psc_desc *desc)
-{
- if (snp_vmpl)
- svsm_pval_pages(desc);
- else
- pval_pages(desc);
-}
-
-static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
-{
- int cur_entry, end_entry, ret = 0;
- struct snp_psc_desc *data;
- struct es_em_ctxt ctxt;
-
- vc_ghcb_invalidate(ghcb);
-
- /* Copy the input desc into GHCB shared buffer */
- data = (struct snp_psc_desc *)ghcb->shared_buffer;
- memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
-
- /*
- * As per the GHCB specification, the hypervisor can resume the guest
- * before processing all the entries. Check whether all the entries
- * are processed. If not, then keep retrying. Note, the hypervisor
- * will update the data memory directly to indicate the status, so
- * reference the data->hdr everywhere.
- *
- * The strategy here is to wait for the hypervisor to change the page
- * state in the RMP table before guest accesses the memory pages. If the
- * page state change was not successful, then later memory access will
- * result in a crash.
- */
- cur_entry = data->hdr.cur_entry;
- end_entry = data->hdr.end_entry;
-
- while (data->hdr.cur_entry <= data->hdr.end_entry) {
- ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
-
- /* This will advance the shared buffer data points to. */
- ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
-
- /*
- * Page State Change VMGEXIT can pass error code through
- * exit_info_2.
- */
- if (WARN(ret || ghcb->save.sw_exit_info_2,
- "SNP: PSC failed ret=%d exit_info_2=%llx\n",
- ret, ghcb->save.sw_exit_info_2)) {
- ret = 1;
- goto out;
- }
-
- /* Verify that reserved bit is not set */
- if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
- ret = 1;
- goto out;
- }
-
- /*
- * Sanity check that entry processing is not going backwards.
- * This will happen only if hypervisor is tricking us.
- */
- if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
-"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
- end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
- ret = 1;
- goto out;
- }
- }
-
-out:
- return ret;
-}
-
-static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
- unsigned long exit_code)
-{
- unsigned int opcode = (unsigned int)ctxt->insn.opcode.value;
- u8 modrm = ctxt->insn.modrm.value;
-
- switch (exit_code) {
-
- case SVM_EXIT_IOIO:
- case SVM_EXIT_NPF:
- /* handled separately */
- return ES_OK;
-
- case SVM_EXIT_CPUID:
- if (opcode == 0xa20f)
- return ES_OK;
- break;
-
- case SVM_EXIT_INVD:
- if (opcode == 0x080f)
- return ES_OK;
- break;
-
- case SVM_EXIT_MONITOR:
- /* MONITOR and MONITORX instructions generate the same error code */
- if (opcode == 0x010f && (modrm == 0xc8 || modrm == 0xfa))
- return ES_OK;
- break;
-
- case SVM_EXIT_MWAIT:
- /* MWAIT and MWAITX instructions generate the same error code */
- if (opcode == 0x010f && (modrm == 0xc9 || modrm == 0xfb))
- return ES_OK;
- break;
-
- case SVM_EXIT_MSR:
- /* RDMSR */
- if (opcode == 0x320f ||
- /* WRMSR */
- opcode == 0x300f)
- return ES_OK;
- break;
-
- case SVM_EXIT_RDPMC:
- if (opcode == 0x330f)
- return ES_OK;
- break;
-
- case SVM_EXIT_RDTSC:
- if (opcode == 0x310f)
- return ES_OK;
- break;
-
- case SVM_EXIT_RDTSCP:
- if (opcode == 0x010f && modrm == 0xf9)
- return ES_OK;
- break;
-
- case SVM_EXIT_READ_DR7:
- if (opcode == 0x210f &&
- X86_MODRM_REG(ctxt->insn.modrm.value) == 7)
- return ES_OK;
- break;
-
- case SVM_EXIT_VMMCALL:
- if (opcode == 0x010f && modrm == 0xd9)
- return ES_OK;
-
- break;
-
- case SVM_EXIT_WRITE_DR7:
- if (opcode == 0x230f &&
- X86_MODRM_REG(ctxt->insn.modrm.value) == 7)
- return ES_OK;
- break;
-
- case SVM_EXIT_WBINVD:
- if (opcode == 0x90f)
- return ES_OK;
- break;
-
- default:
- break;
- }
-
- sev_printk(KERN_ERR "Wrong/unhandled opcode bytes: 0x%x, exit_code: 0x%lx, rIP: 0x%lx\n",
- opcode, exit_code, ctxt->regs->ip);
-
- return ES_UNSUPPORTED;
-}
-
/*
* Maintain the GPA of the SVSM Calling Area (CA) in order to utilize the SVSM
* services needed when not running in VMPL0.
@@ -1681,7 +839,7 @@ static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
* routine is running identity mapped when called, both by the decompressor
* code and the early kernel code.
*/
- if (!rmpadjust((unsigned long)&RIP_REL_REF(boot_ghcb_page), RMP_PG_SIZE_4K, 1))
+ if (!rmpadjust((unsigned long)rip_rel_ptr(&boot_ghcb_page), RMP_PG_SIZE_4K, 1))
return false;
/*
@@ -1698,7 +856,7 @@ static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
if (!secrets_page->svsm_guest_vmpl)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_VMPL0);
- RIP_REL_REF(snp_vmpl) = secrets_page->svsm_guest_vmpl;
+ snp_vmpl = secrets_page->svsm_guest_vmpl;
caa = secrets_page->svsm_caa;
@@ -1713,8 +871,8 @@ static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
* The CA is identity mapped when this routine is called, both by the
* decompressor code and the early kernel code.
*/
- RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)caa;
- RIP_REL_REF(boot_svsm_caa_pa) = caa;
+ boot_svsm_caa = (struct svsm_ca *)caa;
+ boot_svsm_caa_pa = caa;
/* Advertise the SVSM presence via CPUID. */
cpuid_table = (struct snp_cpuid_table *)snp_cpuid_get_table();
diff --git a/arch/x86/boot/startup/sev-startup.c b/arch/x86/boot/startup/sev-startup.c
new file mode 100644
index 000000000000..0b7e3b950183
--- /dev/null
+++ b/arch/x86/boot/startup/sev-startup.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2019 SUSE
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#define pr_fmt(fmt) "SEV: " fmt
+
+#include <linux/percpu-defs.h>
+#include <linux/cc_platform.h>
+#include <linux/printk.h>
+#include <linux/mm_types.h>
+#include <linux/set_memory.h>
+#include <linux/memblock.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/cpumask.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/psp-sev.h>
+#include <uapi/linux/sev-guest.h>
+
+#include <asm/init.h>
+#include <asm/cpu_entry_area.h>
+#include <asm/stacktrace.h>
+#include <asm/sev.h>
+#include <asm/sev-internal.h>
+#include <asm/insn-eval.h>
+#include <asm/fpu/xcr.h>
+#include <asm/processor.h>
+#include <asm/realmode.h>
+#include <asm/setup.h>
+#include <asm/traps.h>
+#include <asm/svm.h>
+#include <asm/smp.h>
+#include <asm/cpu.h>
+#include <asm/apic.h>
+#include <asm/cpuid/api.h>
+#include <asm/cmdline.h>
+
+/* For early boot hypervisor communication in SEV-ES enabled guests */
+struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
+
+/*
+ * Needs to be in the .data section because we need it NULL before bss is
+ * cleared
+ */
+struct ghcb *boot_ghcb __section(".data");
+
+/* Bitmap of SEV features supported by the hypervisor */
+u64 sev_hv_features __ro_after_init;
+
+/* Secrets page physical address from the CC blob */
+u64 sev_secrets_pa __ro_after_init;
+
+/* For early boot SVSM communication */
+struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
+
+DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
+DEFINE_PER_CPU(u64, svsm_caa_pa);
+
+/*
+ * Nothing shall interrupt this code path while holding the per-CPU
+ * GHCB. The backup GHCB is only for NMIs interrupting this path.
+ *
+ * Callers must disable local interrupts around it.
+ */
+noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
+{
+ struct sev_es_runtime_data *data;
+ struct ghcb *ghcb;
+
+ WARN_ON(!irqs_disabled());
+
+ data = this_cpu_read(runtime_data);
+ ghcb = &data->ghcb_page;
+
+ if (unlikely(data->ghcb_active)) {
+ /* GHCB is already in use - save its contents */
+
+ if (unlikely(data->backup_ghcb_active)) {
+ /*
+ * Backup-GHCB is also already in use. There is no way
+ * to continue here so just kill the machine. To make
+ * panic() work, mark GHCBs inactive so that messages
+ * can be printed out.
+ */
+ data->ghcb_active = false;
+ data->backup_ghcb_active = false;
+
+ instrumentation_begin();
+ panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
+ instrumentation_end();
+ }
+
+ /* Mark backup_ghcb active before writing to it */
+ data->backup_ghcb_active = true;
+
+ state->ghcb = &data->backup_ghcb;
+
+ /* Backup GHCB content */
+ *state->ghcb = *ghcb;
+ } else {
+ state->ghcb = NULL;
+ data->ghcb_active = true;
+ }
+
+ return ghcb;
+}
+
+/* Include code shared with pre-decompression boot stage */
+#include "sev-shared.c"
+
+noinstr void __sev_put_ghcb(struct ghcb_state *state)
+{
+ struct sev_es_runtime_data *data;
+ struct ghcb *ghcb;
+
+ WARN_ON(!irqs_disabled());
+
+ data = this_cpu_read(runtime_data);
+ ghcb = &data->ghcb_page;
+
+ if (state->ghcb) {
+ /* Restore GHCB from Backup */
+ *ghcb = *state->ghcb;
+ data->backup_ghcb_active = false;
+ state->ghcb = NULL;
+ } else {
+ /*
+ * Invalidate the GHCB so a VMGEXIT instruction issued
+ * from userspace won't appear to be valid.
+ */
+ vc_ghcb_invalidate(ghcb);
+ data->ghcb_active = false;
+ }
+}
+
+int svsm_perform_call_protocol(struct svsm_call *call)
+{
+ struct ghcb_state state;
+ unsigned long flags;
+ struct ghcb *ghcb;
+ int ret;
+
+ /*
+ * This can be called very early in the boot, use native functions in
+ * order to avoid paravirt issues.
+ */
+ flags = native_local_irq_save();
+
+ if (sev_cfg.ghcbs_initialized)
+ ghcb = __sev_get_ghcb(&state);
+ else if (boot_ghcb)
+ ghcb = boot_ghcb;
+ else
+ ghcb = NULL;
+
+ do {
+ ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
+ : svsm_perform_msr_protocol(call);
+ } while (ret == -EAGAIN);
+
+ if (sev_cfg.ghcbs_initialized)
+ __sev_put_ghcb(&state);
+
+ native_local_irq_restore(flags);
+
+ return ret;
+}
+
+void __head
+early_set_pages_state(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages, enum psc_op op)
+{
+ unsigned long paddr_end;
+ u64 val;
+
+ vaddr = vaddr & PAGE_MASK;
+
+ paddr = paddr & PAGE_MASK;
+ paddr_end = paddr + (npages << PAGE_SHIFT);
+
+ while (paddr < paddr_end) {
+ /* Page validation must be rescinded before changing to shared */
+ if (op == SNP_PAGE_STATE_SHARED)
+ pvalidate_4k_page(vaddr, paddr, false);
+
+ /*
+ * Use the MSR protocol because this function can be called before
+ * the GHCB is established.
+ */
+ sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
+ VMGEXIT();
+
+ val = sev_es_rd_ghcb_msr();
+
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP)
+ goto e_term;
+
+ if (GHCB_MSR_PSC_RESP_VAL(val))
+ goto e_term;
+
+ /* Page validation must be performed after changing to private */
+ if (op == SNP_PAGE_STATE_PRIVATE)
+ pvalidate_4k_page(vaddr, paddr, true);
+
+ vaddr += PAGE_SIZE;
+ paddr += PAGE_SIZE;
+ }
+
+ return;
+
+e_term:
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+}
+
+void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages)
+{
+ /*
+ * This can be invoked in early boot while running identity mapped, so
+ * use an open coded check for SNP instead of using cc_platform_has().
+ * This eliminates worries about jump tables or checking boot_cpu_data
+ * in the cc_platform_has() function.
+ */
+ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+ return;
+
+ /*
+ * Ask the hypervisor to mark the memory pages as private in the RMP
+ * table.
+ */
+ early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
+}
+
+void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages)
+{
+ /*
+ * This can be invoked in early boot while running identity mapped, so
+ * use an open coded check for SNP instead of using cc_platform_has().
+ * This eliminates worries about jump tables or checking boot_cpu_data
+ * in the cc_platform_has() function.
+ */
+ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+ return;
+
+ /* Ask hypervisor to mark the memory pages shared in the RMP table. */
+ early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
+}
+
+/*
+ * Initial set up of SNP relies on information provided by the
+ * Confidential Computing blob, which can be passed to the kernel
+ * in the following ways, depending on how it is booted:
+ *
+ * - when booted via the boot/decompress kernel:
+ * - via boot_params
+ *
+ * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
+ * - via a setup_data entry, as defined by the Linux Boot Protocol
+ *
+ * Scan for the blob in that order.
+ */
+static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
+{
+ struct cc_blob_sev_info *cc_info;
+
+ /* Boot kernel would have passed the CC blob via boot_params. */
+ if (bp->cc_blob_address) {
+ cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
+ goto found_cc_info;
+ }
+
+ /*
+ * If kernel was booted directly, without the use of the
+ * boot/decompression kernel, the CC blob may have been passed via
+ * setup_data instead.
+ */
+ cc_info = find_cc_blob_setup_data(bp);
+ if (!cc_info)
+ return NULL;
+
+found_cc_info:
+ if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
+ snp_abort();
+
+ return cc_info;
+}
+
+static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
+{
+ struct svsm_call call = {};
+ int ret;
+ u64 pa;
+
+ /*
+ * Record the SVSM Calling Area address (CAA) if the guest is not
+ * running at VMPL0. The CA will be used to communicate with the
+ * SVSM to perform the SVSM services.
+ */
+ if (!svsm_setup_ca(cc_info))
+ return;
+
+ /*
+ * It is very early in the boot and the kernel is running identity
+ * mapped but without having adjusted the pagetables to where the
+ * kernel was loaded (physbase), so the get the CA address using
+ * RIP-relative addressing.
+ */
+ pa = (u64)rip_rel_ptr(&boot_svsm_ca_page);
+
+ /*
+ * Switch over to the boot SVSM CA while the current CA is still
+ * addressable. There is no GHCB at this point so use the MSR protocol.
+ *
+ * SVSM_CORE_REMAP_CA call:
+ * RAX = 0 (Protocol=0, CallID=0)
+ * RCX = New CA GPA
+ */
+ call.caa = svsm_get_caa();
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
+ call.rcx = pa;
+ ret = svsm_perform_call_protocol(&call);
+ if (ret)
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
+
+ boot_svsm_caa = (struct svsm_ca *)pa;
+ boot_svsm_caa_pa = pa;
+}
+
+bool __head snp_init(struct boot_params *bp)
+{
+ struct cc_blob_sev_info *cc_info;
+
+ if (!bp)
+ return false;
+
+ cc_info = find_cc_blob(bp);
+ if (!cc_info)
+ return false;
+
+ if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE)
+ sev_secrets_pa = cc_info->secrets_phys;
+ else
+ return false;
+
+ setup_cpuid_table(cc_info);
+
+ svsm_setup(cc_info);
+
+ /*
+ * The CC blob will be used later to access the secrets page. Cache
+ * it here like the boot kernel does.
+ */
+ bp->cc_blob_address = (u32)(unsigned long)cc_info;
+
+ return true;
+}
+
+void __head __noreturn snp_abort(void)
+{
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
+}
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/boot/startup/sme.c
index 5eecdd92da10..70ea1748c0a7 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/boot/startup/sme.c
@@ -45,8 +45,6 @@
#include <asm/coco.h>
#include <asm/sev.h>
-#include "mm_internal.h"
-
#define PGD_FLAGS _KERNPG_TABLE_NOENC
#define P4D_FLAGS _KERNPG_TABLE_NOENC
#define PUD_FLAGS _KERNPG_TABLE_NOENC
@@ -299,8 +297,7 @@ void __head sme_encrypt_kernel(struct boot_params *bp)
* instrumentation or checking boot_cpu_data in the cc_platform_has()
* function.
*/
- if (!sme_get_me_mask() ||
- RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
+ if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED)
return;
/*
@@ -318,8 +315,8 @@ void __head sme_encrypt_kernel(struct boot_params *bp)
* memory from being cached.
*/
- kernel_start = (unsigned long)RIP_REL_REF(_text);
- kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE);
+ kernel_start = (unsigned long)rip_rel_ptr(_text);
+ kernel_end = ALIGN((unsigned long)rip_rel_ptr(_end), PMD_SIZE);
kernel_len = kernel_end - kernel_start;
initrd_start = 0;
@@ -345,7 +342,7 @@ void __head sme_encrypt_kernel(struct boot_params *bp)
* pagetable structures for the encryption of the kernel
* pagetable structures for workarea (in case not currently mapped)
*/
- execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea);
+ execute_start = workarea_start = (unsigned long)rip_rel_ptr(sme_workarea);
execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
execute_len = execute_end - execute_start;
@@ -526,7 +523,7 @@ void __head sme_enable(struct boot_params *bp)
me_mask = 1UL << (ebx & 0x3f);
/* Check the SEV MSR whether SEV or SME is enabled */
- RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
+ sev_status = msr = native_rdmsrq(MSR_AMD64_SEV);
feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
/*
@@ -557,13 +554,22 @@ void __head sme_enable(struct boot_params *bp)
return;
/* For SME, check the SYSCFG MSR */
- msr = __rdmsr(MSR_AMD64_SYSCFG);
+ msr = native_rdmsrq(MSR_AMD64_SYSCFG);
if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
return;
}
- RIP_REL_REF(sme_me_mask) = me_mask;
- RIP_REL_REF(physical_mask) &= ~me_mask;
- RIP_REL_REF(cc_vendor) = CC_VENDOR_AMD;
+ sme_me_mask = me_mask;
+ physical_mask &= ~me_mask;
+ cc_vendor = CC_VENDOR_AMD;
cc_set_mask(me_mask);
}
+
+#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
+/* Local version for startup code, which never operates on user page tables */
+__weak
+pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
+{
+ return pgd;
+}
+#endif
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 84f7a883ce1e..f35369bb14c5 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -32,7 +32,7 @@
int memcmp(const void *s1, const void *s2, size_t len)
{
bool diff;
- asm("repe; cmpsb" CC_SET(nz)
+ asm("repe cmpsb" CC_SET(nz)
: CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
index f2e96905b3fe..0641c8c46aee 100644
--- a/arch/x86/boot/video.c
+++ b/arch/x86/boot/video.c
@@ -292,7 +292,7 @@ static void restore_screen(void)
"shrw %%cx ; "
"jnc 1f ; "
"stosw \n\t"
- "1: rep;stosl ; "
+ "1: rep stosl ; "
"popw %%es"
: "+D" (dst), "+c" (npad)
: "bdS" (video_segment),
diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c
index 9a0ddda3aa69..d4610af68114 100644
--- a/arch/x86/coco/core.c
+++ b/arch/x86/coco/core.c
@@ -18,7 +18,9 @@
#include <asm/processor.h>
enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
+SYM_PIC_ALIAS(cc_vendor);
u64 cc_mask __ro_after_init;
+SYM_PIC_ALIAS(cc_mask);
static struct cc_attr_flags {
__u64 host_sev_snp : 1,
diff --git a/arch/x86/coco/sev/Makefile b/arch/x86/coco/sev/Makefile
index dcb06dc8b5ae..db3255b979bd 100644
--- a/arch/x86/coco/sev/Makefile
+++ b/arch/x86/coco/sev/Makefile
@@ -1,22 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += core.o
-
-# jump tables are emitted using absolute references in non-PIC code
-# so they cannot be used in the early SEV startup code
-CFLAGS_core.o += -fno-jump-tables
-
-ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_core.o = -pg
-endif
-
-KASAN_SANITIZE_core.o := n
-KMSAN_SANITIZE_core.o := n
-KCOV_INSTRUMENT_core.o := n
-
-# With some compiler versions the generated code results in boot hangs, caused
-# by several compilation units. To be safe, disable all instrumentation.
-KCSAN_SANITIZE := n
+obj-y += core.o sev-nmi.o vc-handle.o
# Clang 14 and older may fail to respect __no_sanitize_undefined when inlining
-UBSAN_SANITIZE := n
+UBSAN_SANITIZE_sev-nmi.o := n
+
+# GCC may fail to respect __no_sanitize_address when inlining
+KASAN_SANITIZE_sev-nmi.o := n
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index b0c1a7a57497..b6db4e0b936b 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -31,6 +31,7 @@
#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
#include <asm/sev.h>
+#include <asm/sev-internal.h>
#include <asm/insn-eval.h>
#include <asm/fpu/xcr.h>
#include <asm/processor.h>
@@ -41,10 +42,9 @@
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/apic.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/cmdline.h>
-
-#define DR7_RESET_VALUE 0x400
+#include <asm/msr.h>
/* AP INIT values as documented in the APM2 section "Processor Initialization State" */
#define AP_INIT_CS_LIMIT 0xffff
@@ -81,21 +81,6 @@ static const char * const sev_status_feat_names[] = {
[MSR_AMD64_SNP_SMT_PROT_BIT] = "SMTProt",
};
-/* For early boot hypervisor communication in SEV-ES enabled guests */
-static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
-
-/*
- * Needs to be in the .data section because we need it NULL before bss is
- * cleared
- */
-static struct ghcb *boot_ghcb __section(".data");
-
-/* Bitmap of SEV features supported by the hypervisor */
-static u64 sev_hv_features __ro_after_init;
-
-/* Secrets page physical address from the CC blob */
-static u64 secrets_pa __ro_after_init;
-
/*
* For Secure TSC guests, the BSP fetches TSC_INFO using SNP guest messaging and
* initializes snp_tsc_scale and snp_tsc_offset. These values are replicated
@@ -105,558 +90,196 @@ static u64 snp_tsc_scale __ro_after_init;
static u64 snp_tsc_offset __ro_after_init;
static u64 snp_tsc_freq_khz __ro_after_init;
-/* #VC handler runtime per-CPU data */
-struct sev_es_runtime_data {
- struct ghcb ghcb_page;
-
- /*
- * Reserve one page per CPU as backup storage for the unencrypted GHCB.
- * It is needed when an NMI happens while the #VC handler uses the real
- * GHCB, and the NMI handler itself is causing another #VC exception. In
- * that case the GHCB content of the first handler needs to be backed up
- * and restored.
- */
- struct ghcb backup_ghcb;
-
- /*
- * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
- * There is no need for it to be atomic, because nothing is written to
- * the GHCB between the read and the write of ghcb_active. So it is safe
- * to use it when a nested #VC exception happens before the write.
- *
- * This is necessary for example in the #VC->NMI->#VC case when the NMI
- * happens while the first #VC handler uses the GHCB. When the NMI code
- * raises a second #VC handler it might overwrite the contents of the
- * GHCB written by the first handler. To avoid this the content of the
- * GHCB is saved and restored when the GHCB is detected to be in use
- * already.
- */
- bool ghcb_active;
- bool backup_ghcb_active;
-
- /*
- * Cached DR7 value - write it on DR7 writes and return it on reads.
- * That value will never make it to the real hardware DR7 as debugging
- * is currently unsupported in SEV-ES guests.
- */
- unsigned long dr7;
-};
-
-struct ghcb_state {
- struct ghcb *ghcb;
-};
-
-/* For early boot SVSM communication */
-static struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
-
-static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
-static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
-static DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
-static DEFINE_PER_CPU(u64, svsm_caa_pa);
-
-static __always_inline bool on_vc_stack(struct pt_regs *regs)
-{
- unsigned long sp = regs->sp;
-
- /* User-mode RSP is not trusted */
- if (user_mode(regs))
- return false;
-
- /* SYSCALL gap still has user-mode RSP */
- if (ip_within_syscall_gap(regs))
- return false;
-
- return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
-}
+DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
+DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
/*
- * This function handles the case when an NMI is raised in the #VC
- * exception handler entry code, before the #VC handler has switched off
- * its IST stack. In this case, the IST entry for #VC must be adjusted,
- * so that any nested #VC exception will not overwrite the stack
- * contents of the interrupted #VC handler.
- *
- * The IST entry is adjusted unconditionally so that it can be also be
- * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
- * nested sev_es_ist_exit() call may adjust back the IST entry too
- * early.
- *
- * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
- * on the NMI IST stack, as they are only called from NMI handling code
- * right now.
+ * SVSM related information:
+ * When running under an SVSM, the VMPL that Linux is executing at must be
+ * non-zero. The VMPL is therefore used to indicate the presence of an SVSM.
*/
-void noinstr __sev_es_ist_enter(struct pt_regs *regs)
-{
- unsigned long old_ist, new_ist;
-
- /* Read old IST entry */
- new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
-
- /*
- * If NMI happened while on the #VC IST stack, set the new IST
- * value below regs->sp, so that the interrupted stack frame is
- * not overwritten by subsequent #VC exceptions.
- */
- if (on_vc_stack(regs))
- new_ist = regs->sp;
-
- /*
- * Reserve additional 8 bytes and store old IST value so this
- * adjustment can be unrolled in __sev_es_ist_exit().
- */
- new_ist -= sizeof(old_ist);
- *(unsigned long *)new_ist = old_ist;
-
- /* Set new IST entry */
- this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
-}
+u8 snp_vmpl __ro_after_init;
+EXPORT_SYMBOL_GPL(snp_vmpl);
-void noinstr __sev_es_ist_exit(void)
+static u64 __init get_snp_jump_table_addr(void)
{
- unsigned long ist;
+ struct snp_secrets_page *secrets;
+ void __iomem *mem;
+ u64 addr;
+
+ mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE);
+ if (!mem) {
+ pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
+ return 0;
+ }
- /* Read IST entry */
- ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+ secrets = (__force struct snp_secrets_page *)mem;
- if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
- return;
+ addr = secrets->os_area.ap_jump_table_pa;
+ iounmap(mem);
- /* Read back old IST entry and write it to the TSS */
- this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
+ return addr;
}
-/*
- * Nothing shall interrupt this code path while holding the per-CPU
- * GHCB. The backup GHCB is only for NMIs interrupting this path.
- *
- * Callers must disable local interrupts around it.
- */
-static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
+static u64 __init get_jump_table_addr(void)
{
- struct sev_es_runtime_data *data;
+ struct ghcb_state state;
+ unsigned long flags;
struct ghcb *ghcb;
+ u64 ret = 0;
- WARN_ON(!irqs_disabled());
-
- data = this_cpu_read(runtime_data);
- ghcb = &data->ghcb_page;
+ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return get_snp_jump_table_addr();
- if (unlikely(data->ghcb_active)) {
- /* GHCB is already in use - save its contents */
+ local_irq_save(flags);
- if (unlikely(data->backup_ghcb_active)) {
- /*
- * Backup-GHCB is also already in use. There is no way
- * to continue here so just kill the machine. To make
- * panic() work, mark GHCBs inactive so that messages
- * can be printed out.
- */
- data->ghcb_active = false;
- data->backup_ghcb_active = false;
+ ghcb = __sev_get_ghcb(&state);
- instrumentation_begin();
- panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
- instrumentation_end();
- }
+ vc_ghcb_invalidate(ghcb);
+ ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
+ ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
+ ghcb_set_sw_exit_info_2(ghcb, 0);
- /* Mark backup_ghcb active before writing to it */
- data->backup_ghcb_active = true;
+ sev_es_wr_ghcb_msr(__pa(ghcb));
+ VMGEXIT();
- state->ghcb = &data->backup_ghcb;
+ if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
+ ghcb_sw_exit_info_2_is_valid(ghcb))
+ ret = ghcb->save.sw_exit_info_2;
- /* Backup GHCB content */
- *state->ghcb = *ghcb;
- } else {
- state->ghcb = NULL;
- data->ghcb_active = true;
- }
+ __sev_put_ghcb(&state);
- return ghcb;
-}
+ local_irq_restore(flags);
-static inline u64 sev_es_rd_ghcb_msr(void)
-{
- return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
+ return ret;
}
-static __always_inline void sev_es_wr_ghcb_msr(u64 val)
+static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
+ int ret, u64 svsm_ret)
{
- u32 low, high;
+ WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n",
+ pfn, action, page_size, ret, svsm_ret);
- low = (u32)(val);
- high = (u32)(val >> 32);
-
- native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
-}
-
-static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
- unsigned char *buffer)
-{
- return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
}
-static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
+static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
{
- char buffer[MAX_INSN_SIZE];
- int insn_bytes;
-
- insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
- if (insn_bytes == 0) {
- /* Nothing could be copied */
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
- ctxt->fi.cr2 = ctxt->regs->ip;
- return ES_EXCEPTION;
- } else if (insn_bytes == -EINVAL) {
- /* Effective RIP could not be calculated */
- ctxt->fi.vector = X86_TRAP_GP;
- ctxt->fi.error_code = 0;
- ctxt->fi.cr2 = 0;
- return ES_EXCEPTION;
- }
+ unsigned int page_size;
+ bool action;
+ u64 pfn;
- if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
- return ES_DECODE_FAILED;
+ pfn = pc->entry[pc->cur_index].pfn;
+ action = pc->entry[pc->cur_index].action;
+ page_size = pc->entry[pc->cur_index].page_size;
- if (ctxt->insn.immediate.got)
- return ES_OK;
- else
- return ES_DECODE_FAILED;
+ __pval_terminate(pfn, action, page_size, ret, svsm_ret);
}
-static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
+static void pval_pages(struct snp_psc_desc *desc)
{
- char buffer[MAX_INSN_SIZE];
- int res, ret;
-
- res = vc_fetch_insn_kernel(ctxt, buffer);
- if (res) {
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.error_code = X86_PF_INSTR;
- ctxt->fi.cr2 = ctxt->regs->ip;
- return ES_EXCEPTION;
- }
-
- ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
- if (ret < 0)
- return ES_DECODE_FAILED;
- else
- return ES_OK;
-}
+ struct psc_entry *e;
+ unsigned long vaddr;
+ unsigned int size;
+ unsigned int i;
+ bool validate;
+ u64 pfn;
+ int rc;
-static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
-{
- if (user_mode(ctxt->regs))
- return __vc_decode_user_insn(ctxt);
- else
- return __vc_decode_kern_insn(ctxt);
-}
+ for (i = 0; i <= desc->hdr.end_entry; i++) {
+ e = &desc->entries[i];
-static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
- char *dst, char *buf, size_t size)
-{
- unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
+ pfn = e->gfn;
+ vaddr = (unsigned long)pfn_to_kaddr(pfn);
+ size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
+ validate = e->operation == SNP_PAGE_STATE_PRIVATE;
- /*
- * This function uses __put_user() independent of whether kernel or user
- * memory is accessed. This works fine because __put_user() does no
- * sanity checks of the pointer being accessed. All that it does is
- * to report when the access failed.
- *
- * Also, this function runs in atomic context, so __put_user() is not
- * allowed to sleep. The page-fault handler detects that it is running
- * in atomic context and will not try to take mmap_sem and handle the
- * fault, so additional pagefault_enable()/disable() calls are not
- * needed.
- *
- * The access can't be done via copy_to_user() here because
- * vc_write_mem() must not use string instructions to access unsafe
- * memory. The reason is that MOVS is emulated by the #VC handler by
- * splitting the move up into a read and a write and taking a nested #VC
- * exception on whatever of them is the MMIO access. Using string
- * instructions here would cause infinite nesting.
- */
- switch (size) {
- case 1: {
- u8 d1;
- u8 __user *target = (u8 __user *)dst;
-
- memcpy(&d1, buf, 1);
- if (__put_user(d1, target))
- goto fault;
- break;
- }
- case 2: {
- u16 d2;
- u16 __user *target = (u16 __user *)dst;
-
- memcpy(&d2, buf, 2);
- if (__put_user(d2, target))
- goto fault;
- break;
- }
- case 4: {
- u32 d4;
- u32 __user *target = (u32 __user *)dst;
+ rc = pvalidate(vaddr, size, validate);
+ if (!rc)
+ continue;
- memcpy(&d4, buf, 4);
- if (__put_user(d4, target))
- goto fault;
- break;
- }
- case 8: {
- u64 d8;
- u64 __user *target = (u64 __user *)dst;
+ if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
+ unsigned long vaddr_end = vaddr + PMD_SIZE;
- memcpy(&d8, buf, 8);
- if (__put_user(d8, target))
- goto fault;
- break;
- }
- default:
- WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
- return ES_UNSUPPORTED;
+ for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) {
+ rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
+ if (rc)
+ __pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc, 0);
+ }
+ } else {
+ __pval_terminate(pfn, validate, size, rc, 0);
+ }
}
-
- return ES_OK;
-
-fault:
- if (user_mode(ctxt->regs))
- error_code |= X86_PF_USER;
-
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.error_code = error_code;
- ctxt->fi.cr2 = (unsigned long)dst;
-
- return ES_EXCEPTION;
}
-static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
- char *src, char *buf, size_t size)
+static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
+ struct svsm_pvalidate_call *pc)
{
- unsigned long error_code = X86_PF_PROT;
+ struct svsm_pvalidate_entry *pe;
- /*
- * This function uses __get_user() independent of whether kernel or user
- * memory is accessed. This works fine because __get_user() does no
- * sanity checks of the pointer being accessed. All that it does is
- * to report when the access failed.
- *
- * Also, this function runs in atomic context, so __get_user() is not
- * allowed to sleep. The page-fault handler detects that it is running
- * in atomic context and will not try to take mmap_sem and handle the
- * fault, so additional pagefault_enable()/disable() calls are not
- * needed.
- *
- * The access can't be done via copy_from_user() here because
- * vc_read_mem() must not use string instructions to access unsafe
- * memory. The reason is that MOVS is emulated by the #VC handler by
- * splitting the move up into a read and a write and taking a nested #VC
- * exception on whatever of them is the MMIO access. Using string
- * instructions here would cause infinite nesting.
- */
- switch (size) {
- case 1: {
- u8 d1;
- u8 __user *s = (u8 __user *)src;
-
- if (__get_user(d1, s))
- goto fault;
- memcpy(buf, &d1, 1);
- break;
- }
- case 2: {
- u16 d2;
- u16 __user *s = (u16 __user *)src;
+ /* Nothing in the CA yet */
+ pc->num_entries = 0;
+ pc->cur_index = 0;
- if (__get_user(d2, s))
- goto fault;
- memcpy(buf, &d2, 2);
- break;
- }
- case 4: {
- u32 d4;
- u32 __user *s = (u32 __user *)src;
-
- if (__get_user(d4, s))
- goto fault;
- memcpy(buf, &d4, 4);
- break;
- }
- case 8: {
- u64 d8;
- u64 __user *s = (u64 __user *)src;
- if (__get_user(d8, s))
- goto fault;
- memcpy(buf, &d8, 8);
- break;
- }
- default:
- WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
- return ES_UNSUPPORTED;
- }
+ pe = &pc->entry[0];
- return ES_OK;
+ while (pfn < pfn_end) {
+ pe->page_size = RMP_PG_SIZE_4K;
+ pe->action = action;
+ pe->ignore_cf = 0;
+ pe->pfn = pfn;
-fault:
- if (user_mode(ctxt->regs))
- error_code |= X86_PF_USER;
+ pe++;
+ pfn++;
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.error_code = error_code;
- ctxt->fi.cr2 = (unsigned long)src;
-
- return ES_EXCEPTION;
-}
-
-static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
- unsigned long vaddr, phys_addr_t *paddr)
-{
- unsigned long va = (unsigned long)vaddr;
- unsigned int level;
- phys_addr_t pa;
- pgd_t *pgd;
- pte_t *pte;
-
- pgd = __va(read_cr3_pa());
- pgd = &pgd[pgd_index(va)];
- pte = lookup_address_in_pgd(pgd, va, &level);
- if (!pte) {
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.cr2 = vaddr;
- ctxt->fi.error_code = 0;
-
- if (user_mode(ctxt->regs))
- ctxt->fi.error_code |= X86_PF_USER;
-
- return ES_EXCEPTION;
+ pc->num_entries++;
+ if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
+ break;
}
- if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
- /* Emulated MMIO to/from encrypted memory not supported */
- return ES_UNSUPPORTED;
-
- pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
- pa |= va & ~page_level_mask(level);
-
- *paddr = pa;
-
- return ES_OK;
+ return pfn;
}
-static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry,
+ struct svsm_pvalidate_call *pc)
{
- BUG_ON(size > 4);
-
- if (user_mode(ctxt->regs)) {
- struct thread_struct *t = &current->thread;
- struct io_bitmap *iobm = t->io_bitmap;
- size_t idx;
-
- if (!iobm)
- goto fault;
-
- for (idx = port; idx < port + size; ++idx) {
- if (test_bit(idx, iobm->bitmap))
- goto fault;
- }
- }
-
- return ES_OK;
+ struct svsm_pvalidate_entry *pe;
+ struct psc_entry *e;
-fault:
- ctxt->fi.vector = X86_TRAP_GP;
- ctxt->fi.error_code = 0;
+ /* Nothing in the CA yet */
+ pc->num_entries = 0;
+ pc->cur_index = 0;
- return ES_EXCEPTION;
-}
+ pe = &pc->entry[0];
+ e = &desc->entries[desc_entry];
-static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
-{
- long error_code = ctxt->fi.error_code;
- int trapnr = ctxt->fi.vector;
+ while (desc_entry <= desc->hdr.end_entry) {
+ pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
+ pe->action = e->operation == SNP_PAGE_STATE_PRIVATE;
+ pe->ignore_cf = 0;
+ pe->pfn = e->gfn;
- ctxt->regs->orig_ax = ctxt->fi.error_code;
+ pe++;
+ e++;
- switch (trapnr) {
- case X86_TRAP_GP:
- exc_general_protection(ctxt->regs, error_code);
- break;
- case X86_TRAP_UD:
- exc_invalid_op(ctxt->regs);
- break;
- case X86_TRAP_PF:
- write_cr2(ctxt->fi.cr2);
- exc_page_fault(ctxt->regs, error_code);
- break;
- case X86_TRAP_AC:
- exc_alignment_check(ctxt->regs, error_code);
- break;
- default:
- pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
- BUG();
+ desc_entry++;
+ pc->num_entries++;
+ if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
+ break;
}
-}
-
-/* Include code shared with pre-decompression boot stage */
-#include "shared.c"
-
-static inline struct svsm_ca *svsm_get_caa(void)
-{
- /*
- * Use rIP-relative references when called early in the boot. If
- * ->use_cas is set, then it is late in the boot and no need
- * to worry about rIP-relative references.
- */
- if (RIP_REL_REF(sev_cfg).use_cas)
- return this_cpu_read(svsm_caa);
- else
- return RIP_REL_REF(boot_svsm_caa);
-}
-static u64 svsm_get_caa_pa(void)
-{
- /*
- * Use rIP-relative references when called early in the boot. If
- * ->use_cas is set, then it is late in the boot and no need
- * to worry about rIP-relative references.
- */
- if (RIP_REL_REF(sev_cfg).use_cas)
- return this_cpu_read(svsm_caa_pa);
- else
- return RIP_REL_REF(boot_svsm_caa_pa);
+ return desc_entry;
}
-static noinstr void __sev_put_ghcb(struct ghcb_state *state)
+static void svsm_pval_pages(struct snp_psc_desc *desc)
{
- struct sev_es_runtime_data *data;
- struct ghcb *ghcb;
-
- WARN_ON(!irqs_disabled());
-
- data = this_cpu_read(runtime_data);
- ghcb = &data->ghcb_page;
-
- if (state->ghcb) {
- /* Restore GHCB from Backup */
- *ghcb = *state->ghcb;
- data->backup_ghcb_active = false;
- state->ghcb = NULL;
- } else {
- /*
- * Invalidate the GHCB so a VMGEXIT instruction issued
- * from userspace won't appear to be valid.
- */
- vc_ghcb_invalidate(ghcb);
- data->ghcb_active = false;
- }
-}
-
-static int svsm_perform_call_protocol(struct svsm_call *call)
-{
- struct ghcb_state state;
+ struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY];
+ unsigned int i, pv_4k_count = 0;
+ struct svsm_pvalidate_call *pc;
+ struct svsm_call call = {};
unsigned long flags;
- struct ghcb *ghcb;
+ bool action;
+ u64 pc_pa;
int ret;
/*
@@ -666,180 +289,145 @@ static int svsm_perform_call_protocol(struct svsm_call *call)
flags = native_local_irq_save();
/*
- * Use rip-relative references when called early in the boot. If
- * ghcbs_initialized is set, then it is late in the boot and no need
- * to worry about rip-relative references in called functions.
+ * The SVSM calling area (CA) can support processing 510 entries at a
+ * time. Loop through the Page State Change descriptor until the CA is
+ * full or the last entry in the descriptor is reached, at which time
+ * the SVSM is invoked. This repeats until all entries in the descriptor
+ * are processed.
*/
- if (RIP_REL_REF(sev_cfg).ghcbs_initialized)
- ghcb = __sev_get_ghcb(&state);
- else if (RIP_REL_REF(boot_ghcb))
- ghcb = RIP_REL_REF(boot_ghcb);
- else
- ghcb = NULL;
+ call.caa = svsm_get_caa();
- do {
- ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
- : svsm_perform_msr_protocol(call);
- } while (ret == -EAGAIN);
+ pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
+ pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
- if (RIP_REL_REF(sev_cfg).ghcbs_initialized)
- __sev_put_ghcb(&state);
+ /* Protocol 0, Call ID 1 */
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
+ call.rcx = pc_pa;
- native_local_irq_restore(flags);
+ for (i = 0; i <= desc->hdr.end_entry;) {
+ i = svsm_build_ca_from_psc_desc(desc, i, pc);
- return ret;
-}
+ do {
+ ret = svsm_perform_call_protocol(&call);
+ if (!ret)
+ continue;
-void noinstr __sev_es_nmi_complete(void)
-{
- struct ghcb_state state;
- struct ghcb *ghcb;
+ /*
+ * Check if the entry failed because of an RMP mismatch (a
+ * PVALIDATE at 2M was requested, but the page is mapped in
+ * the RMP as 4K).
+ */
- ghcb = __sev_get_ghcb(&state);
+ if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
+ pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
+ /* Save this entry for post-processing at 4K */
+ pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
+
+ /* Skip to the next one unless at the end of the list */
+ pc->cur_index++;
+ if (pc->cur_index < pc->num_entries)
+ ret = -EAGAIN;
+ else
+ ret = 0;
+ }
+ } while (ret == -EAGAIN);
- vc_ghcb_invalidate(ghcb);
- ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
- ghcb_set_sw_exit_info_1(ghcb, 0);
- ghcb_set_sw_exit_info_2(ghcb, 0);
+ if (ret)
+ svsm_pval_terminate(pc, ret, call.rax_out);
+ }
- sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
- VMGEXIT();
+ /* Process any entries that failed to be validated at 2M and validate them at 4K */
+ for (i = 0; i < pv_4k_count; i++) {
+ u64 pfn, pfn_end;
- __sev_put_ghcb(&state);
-}
+ action = pv_4k[i].action;
+ pfn = pv_4k[i].pfn;
+ pfn_end = pfn + 512;
-static u64 __init get_snp_jump_table_addr(void)
-{
- struct snp_secrets_page *secrets;
- void __iomem *mem;
- u64 addr;
+ while (pfn < pfn_end) {
+ pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc);
- mem = ioremap_encrypted(secrets_pa, PAGE_SIZE);
- if (!mem) {
- pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
- return 0;
+ ret = svsm_perform_call_protocol(&call);
+ if (ret)
+ svsm_pval_terminate(pc, ret, call.rax_out);
+ }
}
- secrets = (__force struct snp_secrets_page *)mem;
-
- addr = secrets->os_area.ap_jump_table_pa;
- iounmap(mem);
-
- return addr;
+ native_local_irq_restore(flags);
}
-static u64 __init get_jump_table_addr(void)
+static void pvalidate_pages(struct snp_psc_desc *desc)
{
- struct ghcb_state state;
- unsigned long flags;
- struct ghcb *ghcb;
- u64 ret = 0;
-
- if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
- return get_snp_jump_table_addr();
-
- local_irq_save(flags);
+ if (snp_vmpl)
+ svsm_pval_pages(desc);
+ else
+ pval_pages(desc);
+}
- ghcb = __sev_get_ghcb(&state);
+static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
+{
+ int cur_entry, end_entry, ret = 0;
+ struct snp_psc_desc *data;
+ struct es_em_ctxt ctxt;
vc_ghcb_invalidate(ghcb);
- ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
- ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
- ghcb_set_sw_exit_info_2(ghcb, 0);
-
- sev_es_wr_ghcb_msr(__pa(ghcb));
- VMGEXIT();
-
- if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
- ghcb_sw_exit_info_2_is_valid(ghcb))
- ret = ghcb->save.sw_exit_info_2;
-
- __sev_put_ghcb(&state);
-
- local_irq_restore(flags);
- return ret;
-}
-
-static void __head
-early_set_pages_state(unsigned long vaddr, unsigned long paddr,
- unsigned long npages, enum psc_op op)
-{
- unsigned long paddr_end;
- u64 val;
+ /* Copy the input desc into GHCB shared buffer */
+ data = (struct snp_psc_desc *)ghcb->shared_buffer;
+ memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
- vaddr = vaddr & PAGE_MASK;
+ /*
+ * As per the GHCB specification, the hypervisor can resume the guest
+ * before processing all the entries. Check whether all the entries
+ * are processed. If not, then keep retrying. Note, the hypervisor
+ * will update the data memory directly to indicate the status, so
+ * reference the data->hdr everywhere.
+ *
+ * The strategy here is to wait for the hypervisor to change the page
+ * state in the RMP table before guest accesses the memory pages. If the
+ * page state change was not successful, then later memory access will
+ * result in a crash.
+ */
+ cur_entry = data->hdr.cur_entry;
+ end_entry = data->hdr.end_entry;
- paddr = paddr & PAGE_MASK;
- paddr_end = paddr + (npages << PAGE_SHIFT);
+ while (data->hdr.cur_entry <= data->hdr.end_entry) {
+ ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
- while (paddr < paddr_end) {
- /* Page validation must be rescinded before changing to shared */
- if (op == SNP_PAGE_STATE_SHARED)
- pvalidate_4k_page(vaddr, paddr, false);
+ /* This will advance the shared buffer data points to. */
+ ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
/*
- * Use the MSR protocol because this function can be called before
- * the GHCB is established.
+ * Page State Change VMGEXIT can pass error code through
+ * exit_info_2.
*/
- sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
- VMGEXIT();
-
- val = sev_es_rd_ghcb_msr();
-
- if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP)
- goto e_term;
-
- if (GHCB_MSR_PSC_RESP_VAL(val))
- goto e_term;
+ if (WARN(ret || ghcb->save.sw_exit_info_2,
+ "SNP: PSC failed ret=%d exit_info_2=%llx\n",
+ ret, ghcb->save.sw_exit_info_2)) {
+ ret = 1;
+ goto out;
+ }
- /* Page validation must be performed after changing to private */
- if (op == SNP_PAGE_STATE_PRIVATE)
- pvalidate_4k_page(vaddr, paddr, true);
+ /* Verify that reserved bit is not set */
+ if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
+ ret = 1;
+ goto out;
+ }
- vaddr += PAGE_SIZE;
- paddr += PAGE_SIZE;
+ /*
+ * Sanity check that entry processing is not going backwards.
+ * This will happen only if hypervisor is tricking us.
+ */
+ if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
+"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
+ end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
+ ret = 1;
+ goto out;
+ }
}
- return;
-
-e_term:
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
-}
-
-void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
- unsigned long npages)
-{
- /*
- * This can be invoked in early boot while running identity mapped, so
- * use an open coded check for SNP instead of using cc_platform_has().
- * This eliminates worries about jump tables or checking boot_cpu_data
- * in the cc_platform_has() function.
- */
- if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
- return;
-
- /*
- * Ask the hypervisor to mark the memory pages as private in the RMP
- * table.
- */
- early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
-}
-
-void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
- unsigned long npages)
-{
- /*
- * This can be invoked in early boot while running identity mapped, so
- * use an open coded check for SNP instead of using cc_platform_has().
- * This eliminates worries about jump tables or checking boot_cpu_data
- * in the cc_platform_has() function.
- */
- if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
- return;
-
- /* Ask hypervisor to mark the memory pages shared in the RMP table. */
- early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
+out:
+ return ret;
}
static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
@@ -959,6 +547,102 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
}
+static int vmgexit_ap_control(u64 event, struct sev_es_save_area *vmsa, u32 apic_id)
+{
+ bool create = event != SVM_VMGEXIT_AP_DESTROY;
+ struct ghcb_state state;
+ unsigned long flags;
+ struct ghcb *ghcb;
+ int ret = 0;
+
+ local_irq_save(flags);
+
+ ghcb = __sev_get_ghcb(&state);
+
+ vc_ghcb_invalidate(ghcb);
+
+ if (create)
+ ghcb_set_rax(ghcb, vmsa->sev_features);
+
+ ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
+ ghcb_set_sw_exit_info_1(ghcb,
+ ((u64)apic_id << 32) |
+ ((u64)snp_vmpl << 16) |
+ event);
+ ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
+
+ sev_es_wr_ghcb_msr(__pa(ghcb));
+ VMGEXIT();
+
+ if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
+ lower_32_bits(ghcb->save.sw_exit_info_1)) {
+ pr_err("SNP AP %s error\n", (create ? "CREATE" : "DESTROY"));
+ ret = -EINVAL;
+ }
+
+ __sev_put_ghcb(&state);
+
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa)
+{
+ int ret;
+
+ if (snp_vmpl) {
+ struct svsm_call call = {};
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ call.caa = this_cpu_read(svsm_caa);
+ call.rcx = __pa(va);
+
+ if (make_vmsa) {
+ /* Protocol 0, Call ID 2 */
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU);
+ call.rdx = __pa(caa);
+ call.r8 = apic_id;
+ } else {
+ /* Protocol 0, Call ID 3 */
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU);
+ }
+
+ ret = svsm_perform_call_protocol(&call);
+
+ local_irq_restore(flags);
+ } else {
+ /*
+ * If the kernel runs at VMPL0, it can change the VMSA
+ * bit for a page using the RMPADJUST instruction.
+ * However, for the instruction to succeed it must
+ * target the permissions of a lesser privileged (higher
+ * numbered) VMPL level, so use VMPL1.
+ */
+ u64 attrs = 1;
+
+ if (make_vmsa)
+ attrs |= RMPADJUST_VMSA_PAGE_BIT;
+
+ ret = rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
+ }
+
+ return ret;
+}
+
+static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id)
+{
+ int err;
+
+ err = snp_set_vmsa(vmsa, NULL, apic_id, false);
+ if (err)
+ pr_err("clear VMSA page failed (%u), leaking page\n", err);
+ else
+ free_page((unsigned long)vmsa);
+}
+
static void set_pte_enc(pte_t *kpte, int level, void *va)
{
struct pte_enc_desc d = {
@@ -1005,7 +689,8 @@ static void unshare_all_memory(void)
data = per_cpu(runtime_data, cpu);
ghcb = (unsigned long)&data->ghcb_page;
- if (addr <= ghcb && ghcb <= addr + size) {
+ /* Handle the case of a huge page containing the GHCB page */
+ if (addr <= ghcb && ghcb < addr + size) {
skipped_addr = true;
break;
}
@@ -1055,11 +740,70 @@ void snp_kexec_begin(void)
pr_warn("Failed to stop shared<->private conversions\n");
}
+/*
+ * Shutdown all APs except the one handling kexec/kdump and clearing
+ * the VMSA tag on AP's VMSA pages as they are not being used as
+ * VMSA page anymore.
+ */
+static void shutdown_all_aps(void)
+{
+ struct sev_es_save_area *vmsa;
+ int apic_id, this_cpu, cpu;
+
+ this_cpu = get_cpu();
+
+ /*
+ * APs are already in HLT loop when enc_kexec_finish() callback
+ * is invoked.
+ */
+ for_each_present_cpu(cpu) {
+ vmsa = per_cpu(sev_vmsa, cpu);
+
+ /*
+ * The BSP or offlined APs do not have guest allocated VMSA
+ * and there is no need to clear the VMSA tag for this page.
+ */
+ if (!vmsa)
+ continue;
+
+ /*
+ * Cannot clear the VMSA tag for the currently running vCPU.
+ */
+ if (this_cpu == cpu) {
+ unsigned long pa;
+ struct page *p;
+
+ pa = __pa(vmsa);
+ /*
+ * Mark the VMSA page of the running vCPU as offline
+ * so that is excluded and not touched by makedumpfile
+ * while generating vmcore during kdump.
+ */
+ p = pfn_to_online_page(pa >> PAGE_SHIFT);
+ if (p)
+ __SetPageOffline(p);
+ continue;
+ }
+
+ apic_id = cpuid_to_apicid[cpu];
+
+ /*
+ * Issue AP destroy to ensure AP gets kicked out of guest mode
+ * to allow using RMPADJUST to remove the VMSA tag on it's
+ * VMSA page.
+ */
+ vmgexit_ap_control(SVM_VMGEXIT_AP_DESTROY, vmsa, apic_id);
+ snp_cleanup_vmsa(vmsa, apic_id);
+ }
+
+ put_cpu();
+}
+
void snp_kexec_finish(void)
{
struct sev_es_runtime_data *data;
+ unsigned long size, addr;
unsigned int level, cpu;
- unsigned long size;
struct ghcb *ghcb;
pte_t *pte;
@@ -1069,6 +813,8 @@ void snp_kexec_finish(void)
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
+ shutdown_all_aps();
+
unshare_all_memory();
/*
@@ -1085,54 +831,11 @@ void snp_kexec_finish(void)
ghcb = &data->ghcb_page;
pte = lookup_address((unsigned long)ghcb, &level);
size = page_level_size(level);
- set_pte_enc(pte, level, (void *)ghcb);
- snp_set_memory_private((unsigned long)ghcb, (size / PAGE_SIZE));
- }
-}
-
-static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa)
-{
- int ret;
-
- if (snp_vmpl) {
- struct svsm_call call = {};
- unsigned long flags;
-
- local_irq_save(flags);
-
- call.caa = this_cpu_read(svsm_caa);
- call.rcx = __pa(va);
-
- if (make_vmsa) {
- /* Protocol 0, Call ID 2 */
- call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU);
- call.rdx = __pa(caa);
- call.r8 = apic_id;
- } else {
- /* Protocol 0, Call ID 3 */
- call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU);
- }
-
- ret = svsm_perform_call_protocol(&call);
-
- local_irq_restore(flags);
- } else {
- /*
- * If the kernel runs at VMPL0, it can change the VMSA
- * bit for a page using the RMPADJUST instruction.
- * However, for the instruction to succeed it must
- * target the permissions of a lesser privileged (higher
- * numbered) VMPL level, so use VMPL1.
- */
- u64 attrs = 1;
-
- if (make_vmsa)
- attrs |= RMPADJUST_VMSA_PAGE_BIT;
-
- ret = rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
+ /* Handle the case of a huge page containing the GHCB page */
+ addr = (unsigned long)ghcb & page_level_mask(level);
+ set_pte_enc(pte, level, (void *)addr);
+ snp_set_memory_private(addr, (size / PAGE_SIZE));
}
-
- return ret;
}
#define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
@@ -1166,26 +869,12 @@ static void *snp_alloc_vmsa_page(int cpu)
return page_address(p + 1);
}
-static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id)
-{
- int err;
-
- err = snp_set_vmsa(vmsa, NULL, apic_id, false);
- if (err)
- pr_err("clear VMSA page failed (%u), leaking page\n", err);
- else
- free_page((unsigned long)vmsa);
-}
-
-static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
+static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip, unsigned int cpu)
{
struct sev_es_save_area *cur_vmsa, *vmsa;
- struct ghcb_state state;
struct svsm_ca *caa;
- unsigned long flags;
- struct ghcb *ghcb;
u8 sipi_vector;
- int cpu, ret;
+ int ret;
u64 cr4;
/*
@@ -1206,15 +895,6 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
/* Override start_ip with known protected guest start IP */
start_ip = real_mode_header->sev_es_trampoline_start;
-
- /* Find the logical CPU for the APIC ID */
- for_each_present_cpu(cpu) {
- if (arch_match_cpu_phys_id(cpu, apic_id))
- break;
- }
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
-
cur_vmsa = per_cpu(sev_vmsa, cpu);
/*
@@ -1297,33 +977,7 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
}
/* Issue VMGEXIT AP Creation NAE event */
- local_irq_save(flags);
-
- ghcb = __sev_get_ghcb(&state);
-
- vc_ghcb_invalidate(ghcb);
- ghcb_set_rax(ghcb, vmsa->sev_features);
- ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
- ghcb_set_sw_exit_info_1(ghcb,
- ((u64)apic_id << 32) |
- ((u64)snp_vmpl << 16) |
- SVM_VMGEXIT_AP_CREATE);
- ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
-
- sev_es_wr_ghcb_msr(__pa(ghcb));
- VMGEXIT();
-
- if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
- lower_32_bits(ghcb->save.sw_exit_info_1)) {
- pr_err("SNP AP Creation error\n");
- ret = -EINVAL;
- }
-
- __sev_put_ghcb(&state);
-
- local_irq_restore(flags);
-
- /* Perform cleanup if there was an error */
+ ret = vmgexit_ap_control(SVM_VMGEXIT_AP_CREATE, vmsa, apic_id);
if (ret) {
snp_cleanup_vmsa(vmsa, apic_id);
vmsa = NULL;
@@ -1417,90 +1071,6 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
return 0;
}
-/* Writes to the SVSM CAA MSR are ignored */
-static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write)
-{
- if (write)
- return ES_OK;
-
- regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa));
- regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa));
-
- return ES_OK;
-}
-
-/*
- * TSC related accesses should not exit to the hypervisor when a guest is
- * executing with Secure TSC enabled, so special handling is required for
- * accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ.
- */
-static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write)
-{
- u64 tsc;
-
- /*
- * GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled.
- * Terminate the SNP guest when the interception is enabled.
- */
- if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ)
- return ES_VMM_ERROR;
-
- /*
- * Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC
- * to return undefined values, so ignore all writes.
- *
- * Reads: Reads of MSR_IA32_TSC should return the current TSC value, use
- * the value returned by rdtsc_ordered().
- */
- if (write) {
- WARN_ONCE(1, "TSC MSR writes are verboten!\n");
- return ES_OK;
- }
-
- tsc = rdtsc_ordered();
- regs->ax = lower_32_bits(tsc);
- regs->dx = upper_32_bits(tsc);
-
- return ES_OK;
-}
-
-static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
-{
- struct pt_regs *regs = ctxt->regs;
- enum es_result ret;
- bool write;
-
- /* Is it a WRMSR? */
- write = ctxt->insn.opcode.bytes[1] == 0x30;
-
- switch (regs->cx) {
- case MSR_SVSM_CAA:
- return __vc_handle_msr_caa(regs, write);
- case MSR_IA32_TSC:
- case MSR_AMD64_GUEST_TSC_FREQ:
- if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
- return __vc_handle_secure_tsc_msrs(regs, write);
- break;
- default:
- break;
- }
-
- ghcb_set_rcx(ghcb, regs->cx);
- if (write) {
- ghcb_set_rax(ghcb, regs->ax);
- ghcb_set_rdx(ghcb, regs->dx);
- }
-
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, write, 0);
-
- if ((ret == ES_OK) && !write) {
- regs->ax = ghcb->save.rax;
- regs->dx = ghcb->save.rdx;
- }
-
- return ret;
-}
-
static void snp_register_per_cpu_ghcb(void)
{
struct sev_es_runtime_data *data;
@@ -1713,748 +1283,6 @@ void __init sev_es_init_vc_handling(void)
initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
}
-static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
-{
- int trapnr = ctxt->fi.vector;
-
- if (trapnr == X86_TRAP_PF)
- native_write_cr2(ctxt->fi.cr2);
-
- ctxt->regs->orig_ax = ctxt->fi.error_code;
- do_early_exception(ctxt->regs, trapnr);
-}
-
-static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
-{
- long *reg_array;
- int offset;
-
- reg_array = (long *)ctxt->regs;
- offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
-
- if (offset < 0)
- return NULL;
-
- offset /= sizeof(long);
-
- return reg_array + offset;
-}
-static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
- unsigned int bytes, bool read)
-{
- u64 exit_code, exit_info_1, exit_info_2;
- unsigned long ghcb_pa = __pa(ghcb);
- enum es_result res;
- phys_addr_t paddr;
- void __user *ref;
-
- ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
- if (ref == (void __user *)-1L)
- return ES_UNSUPPORTED;
-
- exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
-
- res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
- if (res != ES_OK) {
- if (res == ES_EXCEPTION && !read)
- ctxt->fi.error_code |= X86_PF_WRITE;
-
- return res;
- }
-
- exit_info_1 = paddr;
- /* Can never be greater than 8 */
- exit_info_2 = bytes;
-
- ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
-
- return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
-}
-
-/*
- * The MOVS instruction has two memory operands, which raises the
- * problem that it is not known whether the access to the source or the
- * destination caused the #VC exception (and hence whether an MMIO read
- * or write operation needs to be emulated).
- *
- * Instead of playing games with walking page-tables and trying to guess
- * whether the source or destination is an MMIO range, split the move
- * into two operations, a read and a write with only one memory operand.
- * This will cause a nested #VC exception on the MMIO address which can
- * then be handled.
- *
- * This implementation has the benefit that it also supports MOVS where
- * source _and_ destination are MMIO regions.
- *
- * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
- * rare operation. If it turns out to be a performance problem the split
- * operations can be moved to memcpy_fromio() and memcpy_toio().
- */
-static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
- unsigned int bytes)
-{
- unsigned long ds_base, es_base;
- unsigned char *src, *dst;
- unsigned char buffer[8];
- enum es_result ret;
- bool rep;
- int off;
-
- ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
- es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
-
- if (ds_base == -1L || es_base == -1L) {
- ctxt->fi.vector = X86_TRAP_GP;
- ctxt->fi.error_code = 0;
- return ES_EXCEPTION;
- }
-
- src = ds_base + (unsigned char *)ctxt->regs->si;
- dst = es_base + (unsigned char *)ctxt->regs->di;
-
- ret = vc_read_mem(ctxt, src, buffer, bytes);
- if (ret != ES_OK)
- return ret;
-
- ret = vc_write_mem(ctxt, dst, buffer, bytes);
- if (ret != ES_OK)
- return ret;
-
- if (ctxt->regs->flags & X86_EFLAGS_DF)
- off = -bytes;
- else
- off = bytes;
-
- ctxt->regs->si += off;
- ctxt->regs->di += off;
-
- rep = insn_has_rep_prefix(&ctxt->insn);
- if (rep)
- ctxt->regs->cx -= 1;
-
- if (!rep || ctxt->regs->cx == 0)
- return ES_OK;
- else
- return ES_RETRY;
-}
-
-static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
-{
- struct insn *insn = &ctxt->insn;
- enum insn_mmio_type mmio;
- unsigned int bytes = 0;
- enum es_result ret;
- u8 sign_byte;
- long *reg_data;
-
- mmio = insn_decode_mmio(insn, &bytes);
- if (mmio == INSN_MMIO_DECODE_FAILED)
- return ES_DECODE_FAILED;
-
- if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
- reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
- if (!reg_data)
- return ES_DECODE_FAILED;
- }
-
- if (user_mode(ctxt->regs))
- return ES_UNSUPPORTED;
-
- switch (mmio) {
- case INSN_MMIO_WRITE:
- memcpy(ghcb->shared_buffer, reg_data, bytes);
- ret = vc_do_mmio(ghcb, ctxt, bytes, false);
- break;
- case INSN_MMIO_WRITE_IMM:
- memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
- ret = vc_do_mmio(ghcb, ctxt, bytes, false);
- break;
- case INSN_MMIO_READ:
- ret = vc_do_mmio(ghcb, ctxt, bytes, true);
- if (ret)
- break;
-
- /* Zero-extend for 32-bit operation */
- if (bytes == 4)
- *reg_data = 0;
-
- memcpy(reg_data, ghcb->shared_buffer, bytes);
- break;
- case INSN_MMIO_READ_ZERO_EXTEND:
- ret = vc_do_mmio(ghcb, ctxt, bytes, true);
- if (ret)
- break;
-
- /* Zero extend based on operand size */
- memset(reg_data, 0, insn->opnd_bytes);
- memcpy(reg_data, ghcb->shared_buffer, bytes);
- break;
- case INSN_MMIO_READ_SIGN_EXTEND:
- ret = vc_do_mmio(ghcb, ctxt, bytes, true);
- if (ret)
- break;
-
- if (bytes == 1) {
- u8 *val = (u8 *)ghcb->shared_buffer;
-
- sign_byte = (*val & 0x80) ? 0xff : 0x00;
- } else {
- u16 *val = (u16 *)ghcb->shared_buffer;
-
- sign_byte = (*val & 0x8000) ? 0xff : 0x00;
- }
-
- /* Sign extend based on operand size */
- memset(reg_data, sign_byte, insn->opnd_bytes);
- memcpy(reg_data, ghcb->shared_buffer, bytes);
- break;
- case INSN_MMIO_MOVS:
- ret = vc_handle_mmio_movs(ctxt, bytes);
- break;
- default:
- ret = ES_UNSUPPORTED;
- break;
- }
-
- return ret;
-}
-
-static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
- long val, *reg = vc_insn_get_rm(ctxt);
- enum es_result ret;
-
- if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
- return ES_VMM_ERROR;
-
- if (!reg)
- return ES_DECODE_FAILED;
-
- val = *reg;
-
- /* Upper 32 bits must be written as zeroes */
- if (val >> 32) {
- ctxt->fi.vector = X86_TRAP_GP;
- ctxt->fi.error_code = 0;
- return ES_EXCEPTION;
- }
-
- /* Clear out other reserved bits and set bit 10 */
- val = (val & 0xffff23ffL) | BIT(10);
-
- /* Early non-zero writes to DR7 are not supported */
- if (!data && (val & ~DR7_RESET_VALUE))
- return ES_UNSUPPORTED;
-
- /* Using a value of 0 for ExitInfo1 means RAX holds the value */
- ghcb_set_rax(ghcb, val);
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
- if (ret != ES_OK)
- return ret;
-
- if (data)
- data->dr7 = val;
-
- return ES_OK;
-}
-
-static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
- long *reg = vc_insn_get_rm(ctxt);
-
- if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
- return ES_VMM_ERROR;
-
- if (!reg)
- return ES_DECODE_FAILED;
-
- if (data)
- *reg = data->dr7;
- else
- *reg = DR7_RESET_VALUE;
-
- return ES_OK;
-}
-
-static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
-}
-
-static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
-{
- enum es_result ret;
-
- ghcb_set_rcx(ghcb, ctxt->regs->cx);
-
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
- if (ret != ES_OK)
- return ret;
-
- if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
- return ES_VMM_ERROR;
-
- ctxt->regs->ax = ghcb->save.rax;
- ctxt->regs->dx = ghcb->save.rdx;
-
- return ES_OK;
-}
-
-static enum es_result vc_handle_monitor(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- /*
- * Treat it as a NOP and do not leak a physical address to the
- * hypervisor.
- */
- return ES_OK;
-}
-
-static enum es_result vc_handle_mwait(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- /* Treat the same as MONITOR/MONITORX */
- return ES_OK;
-}
-
-static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- enum es_result ret;
-
- ghcb_set_rax(ghcb, ctxt->regs->ax);
- ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
-
- if (x86_platform.hyper.sev_es_hcall_prepare)
- x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
-
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
- if (ret != ES_OK)
- return ret;
-
- if (!ghcb_rax_is_valid(ghcb))
- return ES_VMM_ERROR;
-
- ctxt->regs->ax = ghcb->save.rax;
-
- /*
- * Call sev_es_hcall_finish() after regs->ax is already set.
- * This allows the hypervisor handler to overwrite it again if
- * necessary.
- */
- if (x86_platform.hyper.sev_es_hcall_finish &&
- !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
- return ES_VMM_ERROR;
-
- return ES_OK;
-}
-
-static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- /*
- * Calling ecx_alignment_check() directly does not work, because it
- * enables IRQs and the GHCB is active. Forward the exception and call
- * it later from vc_forward_exception().
- */
- ctxt->fi.vector = X86_TRAP_AC;
- ctxt->fi.error_code = 0;
- return ES_EXCEPTION;
-}
-
-static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
- struct ghcb *ghcb,
- unsigned long exit_code)
-{
- enum es_result result = vc_check_opcode_bytes(ctxt, exit_code);
-
- if (result != ES_OK)
- return result;
-
- switch (exit_code) {
- case SVM_EXIT_READ_DR7:
- result = vc_handle_dr7_read(ghcb, ctxt);
- break;
- case SVM_EXIT_WRITE_DR7:
- result = vc_handle_dr7_write(ghcb, ctxt);
- break;
- case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
- result = vc_handle_trap_ac(ghcb, ctxt);
- break;
- case SVM_EXIT_RDTSC:
- case SVM_EXIT_RDTSCP:
- result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
- break;
- case SVM_EXIT_RDPMC:
- result = vc_handle_rdpmc(ghcb, ctxt);
- break;
- case SVM_EXIT_INVD:
- pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
- result = ES_UNSUPPORTED;
- break;
- case SVM_EXIT_CPUID:
- result = vc_handle_cpuid(ghcb, ctxt);
- break;
- case SVM_EXIT_IOIO:
- result = vc_handle_ioio(ghcb, ctxt);
- break;
- case SVM_EXIT_MSR:
- result = vc_handle_msr(ghcb, ctxt);
- break;
- case SVM_EXIT_VMMCALL:
- result = vc_handle_vmmcall(ghcb, ctxt);
- break;
- case SVM_EXIT_WBINVD:
- result = vc_handle_wbinvd(ghcb, ctxt);
- break;
- case SVM_EXIT_MONITOR:
- result = vc_handle_monitor(ghcb, ctxt);
- break;
- case SVM_EXIT_MWAIT:
- result = vc_handle_mwait(ghcb, ctxt);
- break;
- case SVM_EXIT_NPF:
- result = vc_handle_mmio(ghcb, ctxt);
- break;
- default:
- /*
- * Unexpected #VC exception
- */
- result = ES_UNSUPPORTED;
- }
-
- return result;
-}
-
-static __always_inline bool is_vc2_stack(unsigned long sp)
-{
- return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
-}
-
-static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
-{
- unsigned long sp, prev_sp;
-
- sp = (unsigned long)regs;
- prev_sp = regs->sp;
-
- /*
- * If the code was already executing on the VC2 stack when the #VC
- * happened, let it proceed to the normal handling routine. This way the
- * code executing on the VC2 stack can cause #VC exceptions to get handled.
- */
- return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
-}
-
-static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
-{
- struct ghcb_state state;
- struct es_em_ctxt ctxt;
- enum es_result result;
- struct ghcb *ghcb;
- bool ret = true;
-
- ghcb = __sev_get_ghcb(&state);
-
- vc_ghcb_invalidate(ghcb);
- result = vc_init_em_ctxt(&ctxt, regs, error_code);
-
- if (result == ES_OK)
- result = vc_handle_exitcode(&ctxt, ghcb, error_code);
-
- __sev_put_ghcb(&state);
-
- /* Done - now check the result */
- switch (result) {
- case ES_OK:
- vc_finish_insn(&ctxt);
- break;
- case ES_UNSUPPORTED:
- pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
- error_code, regs->ip);
- ret = false;
- break;
- case ES_VMM_ERROR:
- pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
- error_code, regs->ip);
- ret = false;
- break;
- case ES_DECODE_FAILED:
- pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
- error_code, regs->ip);
- ret = false;
- break;
- case ES_EXCEPTION:
- vc_forward_exception(&ctxt);
- break;
- case ES_RETRY:
- /* Nothing to do */
- break;
- default:
- pr_emerg("Unknown result in %s():%d\n", __func__, result);
- /*
- * Emulating the instruction which caused the #VC exception
- * failed - can't continue so print debug information
- */
- BUG();
- }
-
- return ret;
-}
-
-static __always_inline bool vc_is_db(unsigned long error_code)
-{
- return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
-}
-
-/*
- * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
- * and will panic when an error happens.
- */
-DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
-{
- irqentry_state_t irq_state;
-
- /*
- * With the current implementation it is always possible to switch to a
- * safe stack because #VC exceptions only happen at known places, like
- * intercepted instructions or accesses to MMIO areas/IO ports. They can
- * also happen with code instrumentation when the hypervisor intercepts
- * #DB, but the critical paths are forbidden to be instrumented, so #DB
- * exceptions currently also only happen in safe places.
- *
- * But keep this here in case the noinstr annotations are violated due
- * to bug elsewhere.
- */
- if (unlikely(vc_from_invalid_context(regs))) {
- instrumentation_begin();
- panic("Can't handle #VC exception from unsupported context\n");
- instrumentation_end();
- }
-
- /*
- * Handle #DB before calling into !noinstr code to avoid recursive #DB.
- */
- if (vc_is_db(error_code)) {
- exc_debug(regs);
- return;
- }
-
- irq_state = irqentry_nmi_enter(regs);
-
- instrumentation_begin();
-
- if (!vc_raw_handle_exception(regs, error_code)) {
- /* Show some debug info */
- show_regs(regs);
-
- /* Ask hypervisor to sev_es_terminate */
- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
-
- /* If that fails and we get here - just panic */
- panic("Returned from Terminate-Request to Hypervisor\n");
- }
-
- instrumentation_end();
- irqentry_nmi_exit(regs, irq_state);
-}
-
-/*
- * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
- * and will kill the current task with SIGBUS when an error happens.
- */
-DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
-{
- /*
- * Handle #DB before calling into !noinstr code to avoid recursive #DB.
- */
- if (vc_is_db(error_code)) {
- noist_exc_debug(regs);
- return;
- }
-
- irqentry_enter_from_user_mode(regs);
- instrumentation_begin();
-
- if (!vc_raw_handle_exception(regs, error_code)) {
- /*
- * Do not kill the machine if user-space triggered the
- * exception. Send SIGBUS instead and let user-space deal with
- * it.
- */
- force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
- }
-
- instrumentation_end();
- irqentry_exit_to_user_mode(regs);
-}
-
-bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
-{
- unsigned long exit_code = regs->orig_ax;
- struct es_em_ctxt ctxt;
- enum es_result result;
-
- vc_ghcb_invalidate(boot_ghcb);
-
- result = vc_init_em_ctxt(&ctxt, regs, exit_code);
- if (result == ES_OK)
- result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
-
- /* Done - now check the result */
- switch (result) {
- case ES_OK:
- vc_finish_insn(&ctxt);
- break;
- case ES_UNSUPPORTED:
- early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
- exit_code, regs->ip);
- goto fail;
- case ES_VMM_ERROR:
- early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
- exit_code, regs->ip);
- goto fail;
- case ES_DECODE_FAILED:
- early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
- exit_code, regs->ip);
- goto fail;
- case ES_EXCEPTION:
- vc_early_forward_exception(&ctxt);
- break;
- case ES_RETRY:
- /* Nothing to do */
- break;
- default:
- BUG();
- }
-
- return true;
-
-fail:
- show_regs(regs);
-
- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
-}
-
-/*
- * Initial set up of SNP relies on information provided by the
- * Confidential Computing blob, which can be passed to the kernel
- * in the following ways, depending on how it is booted:
- *
- * - when booted via the boot/decompress kernel:
- * - via boot_params
- *
- * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
- * - via a setup_data entry, as defined by the Linux Boot Protocol
- *
- * Scan for the blob in that order.
- */
-static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
-{
- struct cc_blob_sev_info *cc_info;
-
- /* Boot kernel would have passed the CC blob via boot_params. */
- if (bp->cc_blob_address) {
- cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
- goto found_cc_info;
- }
-
- /*
- * If kernel was booted directly, without the use of the
- * boot/decompression kernel, the CC blob may have been passed via
- * setup_data instead.
- */
- cc_info = find_cc_blob_setup_data(bp);
- if (!cc_info)
- return NULL;
-
-found_cc_info:
- if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
- snp_abort();
-
- return cc_info;
-}
-
-static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
-{
- struct svsm_call call = {};
- int ret;
- u64 pa;
-
- /*
- * Record the SVSM Calling Area address (CAA) if the guest is not
- * running at VMPL0. The CA will be used to communicate with the
- * SVSM to perform the SVSM services.
- */
- if (!svsm_setup_ca(cc_info))
- return;
-
- /*
- * It is very early in the boot and the kernel is running identity
- * mapped but without having adjusted the pagetables to where the
- * kernel was loaded (physbase), so the get the CA address using
- * RIP-relative addressing.
- */
- pa = (u64)&RIP_REL_REF(boot_svsm_ca_page);
-
- /*
- * Switch over to the boot SVSM CA while the current CA is still
- * addressable. There is no GHCB at this point so use the MSR protocol.
- *
- * SVSM_CORE_REMAP_CA call:
- * RAX = 0 (Protocol=0, CallID=0)
- * RCX = New CA GPA
- */
- call.caa = svsm_get_caa();
- call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
- call.rcx = pa;
- ret = svsm_perform_call_protocol(&call);
- if (ret)
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
-
- RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)pa;
- RIP_REL_REF(boot_svsm_caa_pa) = pa;
-}
-
-bool __head snp_init(struct boot_params *bp)
-{
- struct cc_blob_sev_info *cc_info;
-
- if (!bp)
- return false;
-
- cc_info = find_cc_blob(bp);
- if (!cc_info)
- return false;
-
- if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE)
- secrets_pa = cc_info->secrets_phys;
- else
- return false;
-
- setup_cpuid_table(cc_info);
-
- svsm_setup(cc_info);
-
- /*
- * The CC blob will be used later to access the secrets page. Cache
- * it here like the boot kernel does.
- */
- bp->cc_blob_address = (u32)(unsigned long)cc_info;
-
- return true;
-}
-
-void __head __noreturn snp_abort(void)
-{
- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
-}
-
/*
* SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are
* enabled, as the alternative (fallback) logic for DMI probing in the legacy
@@ -2625,11 +1453,74 @@ e_restore_irq:
return ret;
}
+/**
+ * snp_svsm_vtpm_probe() - Probe if SVSM provides a vTPM device
+ *
+ * Check that there is SVSM and that it supports at least TPM_SEND_COMMAND
+ * which is the only request used so far.
+ *
+ * Return: true if the platform provides a vTPM SVSM device, false otherwise.
+ */
+static bool snp_svsm_vtpm_probe(void)
+{
+ struct svsm_call call = {};
+
+ /* The vTPM device is available only if a SVSM is present */
+ if (!snp_vmpl)
+ return false;
+
+ call.caa = svsm_get_caa();
+ call.rax = SVSM_VTPM_CALL(SVSM_VTPM_QUERY);
+
+ if (svsm_perform_call_protocol(&call))
+ return false;
+
+ /* Check platform commands contains TPM_SEND_COMMAND - platform command 8 */
+ return call.rcx_out & BIT_ULL(8);
+}
+
+/**
+ * snp_svsm_vtpm_send_command() - Execute a vTPM operation on SVSM
+ * @buffer: A buffer used to both send the command and receive the response.
+ *
+ * Execute a SVSM_VTPM_CMD call as defined by
+ * "Secure VM Service Module for SEV-SNP Guests" Publication # 58019 Revision: 1.00
+ *
+ * All command request/response buffers have a common structure as specified by
+ * the following table:
+ * Byte Size     In/Out    Description
+ * Offset    (Bytes)
+ * 0x000     4          In        Platform command
+ *                         Out       Platform command response size
+ *
+ * Each command can build upon this common request/response structure to create
+ * a structure specific to the command. See include/linux/tpm_svsm.h for more
+ * details.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int snp_svsm_vtpm_send_command(u8 *buffer)
+{
+ struct svsm_call call = {};
+
+ call.caa = svsm_get_caa();
+ call.rax = SVSM_VTPM_CALL(SVSM_VTPM_CMD);
+ call.rcx = __pa(buffer);
+
+ return svsm_perform_call_protocol(&call);
+}
+EXPORT_SYMBOL_GPL(snp_svsm_vtpm_send_command);
+
static struct platform_device sev_guest_device = {
.name = "sev-guest",
.id = -1,
};
+static struct platform_device tpm_svsm_device = {
+ .name = "tpm-svsm",
+ .id = -1,
+};
+
static int __init snp_init_platform_device(void)
{
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
@@ -2638,7 +1529,11 @@ static int __init snp_init_platform_device(void)
if (platform_device_register(&sev_guest_device))
return -ENODEV;
- pr_info("SNP guest platform device initialized.\n");
+ if (snp_svsm_vtpm_probe() &&
+ platform_device_register(&tpm_svsm_device))
+ return -ENODEV;
+
+ pr_info("SNP guest platform devices initialized.\n");
return 0;
}
device_initcall(snp_init_platform_device);
@@ -2835,7 +1730,7 @@ struct snp_msg_desc *snp_msg_alloc(void)
if (!mdesc)
return ERR_PTR(-ENOMEM);
- mem = ioremap_encrypted(secrets_pa, PAGE_SIZE);
+ mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE);
if (!mem)
goto e_free_mdesc;
@@ -3278,7 +2173,7 @@ void __init snp_secure_tsc_init(void)
return;
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
- rdmsrl(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz);
+ rdmsrq(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz);
snp_tsc_freq_khz = (unsigned long)(tsc_freq_mhz * 1000);
x86_platform.calibrate_cpu = securetsc_get_tsc_khz;
diff --git a/arch/x86/coco/sev/sev-nmi.c b/arch/x86/coco/sev/sev-nmi.c
new file mode 100644
index 000000000000..d8dfaddfb367
--- /dev/null
+++ b/arch/x86/coco/sev/sev-nmi.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2019 SUSE
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#define pr_fmt(fmt) "SEV: " fmt
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <asm/cpu_entry_area.h>
+#include <asm/msr.h>
+#include <asm/ptrace.h>
+#include <asm/sev.h>
+#include <asm/sev-internal.h>
+
+static __always_inline bool on_vc_stack(struct pt_regs *regs)
+{
+ unsigned long sp = regs->sp;
+
+ /* User-mode RSP is not trusted */
+ if (user_mode(regs))
+ return false;
+
+ /* SYSCALL gap still has user-mode RSP */
+ if (ip_within_syscall_gap(regs))
+ return false;
+
+ return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
+}
+
+/*
+ * This function handles the case when an NMI is raised in the #VC
+ * exception handler entry code, before the #VC handler has switched off
+ * its IST stack. In this case, the IST entry for #VC must be adjusted,
+ * so that any nested #VC exception will not overwrite the stack
+ * contents of the interrupted #VC handler.
+ *
+ * The IST entry is adjusted unconditionally so that it can be also be
+ * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
+ * nested sev_es_ist_exit() call may adjust back the IST entry too
+ * early.
+ *
+ * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
+ * on the NMI IST stack, as they are only called from NMI handling code
+ * right now.
+ */
+void noinstr __sev_es_ist_enter(struct pt_regs *regs)
+{
+ unsigned long old_ist, new_ist;
+
+ /* Read old IST entry */
+ new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+
+ /*
+ * If NMI happened while on the #VC IST stack, set the new IST
+ * value below regs->sp, so that the interrupted stack frame is
+ * not overwritten by subsequent #VC exceptions.
+ */
+ if (on_vc_stack(regs))
+ new_ist = regs->sp;
+
+ /*
+ * Reserve additional 8 bytes and store old IST value so this
+ * adjustment can be unrolled in __sev_es_ist_exit().
+ */
+ new_ist -= sizeof(old_ist);
+ *(unsigned long *)new_ist = old_ist;
+
+ /* Set new IST entry */
+ this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
+}
+
+void noinstr __sev_es_ist_exit(void)
+{
+ unsigned long ist;
+
+ /* Read IST entry */
+ ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+
+ if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
+ return;
+
+ /* Read back old IST entry and write it to the TSS */
+ this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
+}
+
+void noinstr __sev_es_nmi_complete(void)
+{
+ struct ghcb_state state;
+ struct ghcb *ghcb;
+
+ ghcb = __sev_get_ghcb(&state);
+
+ vc_ghcb_invalidate(ghcb);
+ ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
+ ghcb_set_sw_exit_info_1(ghcb, 0);
+ ghcb_set_sw_exit_info_2(ghcb, 0);
+
+ sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
+ VMGEXIT();
+
+ __sev_put_ghcb(&state);
+}
diff --git a/arch/x86/coco/sev/vc-handle.c b/arch/x86/coco/sev/vc-handle.c
new file mode 100644
index 000000000000..0989d98da130
--- /dev/null
+++ b/arch/x86/coco/sev/vc-handle.c
@@ -0,0 +1,1061 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2019 SUSE
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#define pr_fmt(fmt) "SEV: " fmt
+
+#include <linux/sched/debug.h> /* For show_regs() */
+#include <linux/cc_platform.h>
+#include <linux/printk.h>
+#include <linux/mm_types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/psp-sev.h>
+#include <uapi/linux/sev-guest.h>
+
+#include <asm/init.h>
+#include <asm/stacktrace.h>
+#include <asm/sev.h>
+#include <asm/sev-internal.h>
+#include <asm/insn-eval.h>
+#include <asm/fpu/xcr.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/traps.h>
+#include <asm/svm.h>
+#include <asm/smp.h>
+#include <asm/cpu.h>
+#include <asm/apic.h>
+#include <asm/cpuid/api.h>
+
+static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+ unsigned long vaddr, phys_addr_t *paddr)
+{
+ unsigned long va = (unsigned long)vaddr;
+ unsigned int level;
+ phys_addr_t pa;
+ pgd_t *pgd;
+ pte_t *pte;
+
+ pgd = __va(read_cr3_pa());
+ pgd = &pgd[pgd_index(va)];
+ pte = lookup_address_in_pgd(pgd, va, &level);
+ if (!pte) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.cr2 = vaddr;
+ ctxt->fi.error_code = 0;
+
+ if (user_mode(ctxt->regs))
+ ctxt->fi.error_code |= X86_PF_USER;
+
+ return ES_EXCEPTION;
+ }
+
+ if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
+ /* Emulated MMIO to/from encrypted memory not supported */
+ return ES_UNSUPPORTED;
+
+ pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
+ pa |= va & ~page_level_mask(level);
+
+ *paddr = pa;
+
+ return ES_OK;
+}
+
+static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+{
+ BUG_ON(size > 4);
+
+ if (user_mode(ctxt->regs)) {
+ struct thread_struct *t = &current->thread;
+ struct io_bitmap *iobm = t->io_bitmap;
+ size_t idx;
+
+ if (!iobm)
+ goto fault;
+
+ for (idx = port; idx < port + size; ++idx) {
+ if (test_bit(idx, iobm->bitmap))
+ goto fault;
+ }
+ }
+
+ return ES_OK;
+
+fault:
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+
+ return ES_EXCEPTION;
+}
+
+void vc_forward_exception(struct es_em_ctxt *ctxt)
+{
+ long error_code = ctxt->fi.error_code;
+ int trapnr = ctxt->fi.vector;
+
+ ctxt->regs->orig_ax = ctxt->fi.error_code;
+
+ switch (trapnr) {
+ case X86_TRAP_GP:
+ exc_general_protection(ctxt->regs, error_code);
+ break;
+ case X86_TRAP_UD:
+ exc_invalid_op(ctxt->regs);
+ break;
+ case X86_TRAP_PF:
+ write_cr2(ctxt->fi.cr2);
+ exc_page_fault(ctxt->regs, error_code);
+ break;
+ case X86_TRAP_AC:
+ exc_alignment_check(ctxt->regs, error_code);
+ break;
+ default:
+ pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
+ BUG();
+ }
+}
+
+static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
+ unsigned char *buffer)
+{
+ return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
+}
+
+static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
+{
+ char buffer[MAX_INSN_SIZE];
+ int insn_bytes;
+
+ insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
+ if (insn_bytes == 0) {
+ /* Nothing could be copied */
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
+ ctxt->fi.cr2 = ctxt->regs->ip;
+ return ES_EXCEPTION;
+ } else if (insn_bytes == -EINVAL) {
+ /* Effective RIP could not be calculated */
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+ ctxt->fi.cr2 = 0;
+ return ES_EXCEPTION;
+ }
+
+ if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
+ return ES_DECODE_FAILED;
+
+ if (ctxt->insn.immediate.got)
+ return ES_OK;
+ else
+ return ES_DECODE_FAILED;
+}
+
+static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
+{
+ char buffer[MAX_INSN_SIZE];
+ int res, ret;
+
+ res = vc_fetch_insn_kernel(ctxt, buffer);
+ if (res) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = X86_PF_INSTR;
+ ctxt->fi.cr2 = ctxt->regs->ip;
+ return ES_EXCEPTION;
+ }
+
+ ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
+ if (ret < 0)
+ return ES_DECODE_FAILED;
+ else
+ return ES_OK;
+}
+
+static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
+{
+ if (user_mode(ctxt->regs))
+ return __vc_decode_user_insn(ctxt);
+ else
+ return __vc_decode_kern_insn(ctxt);
+}
+
+static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
+ char *dst, char *buf, size_t size)
+{
+ unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
+
+ /*
+ * This function uses __put_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __put_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * Also, this function runs in atomic context, so __put_user() is not
+ * allowed to sleep. The page-fault handler detects that it is running
+ * in atomic context and will not try to take mmap_sem and handle the
+ * fault, so additional pagefault_enable()/disable() calls are not
+ * needed.
+ *
+ * The access can't be done via copy_to_user() here because
+ * vc_write_mem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the #VC handler by
+ * splitting the move up into a read and a write and taking a nested #VC
+ * exception on whatever of them is the MMIO access. Using string
+ * instructions here would cause infinite nesting.
+ */
+ switch (size) {
+ case 1: {
+ u8 d1;
+ u8 __user *target = (u8 __user *)dst;
+
+ memcpy(&d1, buf, 1);
+ if (__put_user(d1, target))
+ goto fault;
+ break;
+ }
+ case 2: {
+ u16 d2;
+ u16 __user *target = (u16 __user *)dst;
+
+ memcpy(&d2, buf, 2);
+ if (__put_user(d2, target))
+ goto fault;
+ break;
+ }
+ case 4: {
+ u32 d4;
+ u32 __user *target = (u32 __user *)dst;
+
+ memcpy(&d4, buf, 4);
+ if (__put_user(d4, target))
+ goto fault;
+ break;
+ }
+ case 8: {
+ u64 d8;
+ u64 __user *target = (u64 __user *)dst;
+
+ memcpy(&d8, buf, 8);
+ if (__put_user(d8, target))
+ goto fault;
+ break;
+ }
+ default:
+ WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
+ return ES_UNSUPPORTED;
+ }
+
+ return ES_OK;
+
+fault:
+ if (user_mode(ctxt->regs))
+ error_code |= X86_PF_USER;
+
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = error_code;
+ ctxt->fi.cr2 = (unsigned long)dst;
+
+ return ES_EXCEPTION;
+}
+
+static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
+ char *src, char *buf, size_t size)
+{
+ unsigned long error_code = X86_PF_PROT;
+
+ /*
+ * This function uses __get_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __get_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * Also, this function runs in atomic context, so __get_user() is not
+ * allowed to sleep. The page-fault handler detects that it is running
+ * in atomic context and will not try to take mmap_sem and handle the
+ * fault, so additional pagefault_enable()/disable() calls are not
+ * needed.
+ *
+ * The access can't be done via copy_from_user() here because
+ * vc_read_mem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the #VC handler by
+ * splitting the move up into a read and a write and taking a nested #VC
+ * exception on whatever of them is the MMIO access. Using string
+ * instructions here would cause infinite nesting.
+ */
+ switch (size) {
+ case 1: {
+ u8 d1;
+ u8 __user *s = (u8 __user *)src;
+
+ if (__get_user(d1, s))
+ goto fault;
+ memcpy(buf, &d1, 1);
+ break;
+ }
+ case 2: {
+ u16 d2;
+ u16 __user *s = (u16 __user *)src;
+
+ if (__get_user(d2, s))
+ goto fault;
+ memcpy(buf, &d2, 2);
+ break;
+ }
+ case 4: {
+ u32 d4;
+ u32 __user *s = (u32 __user *)src;
+
+ if (__get_user(d4, s))
+ goto fault;
+ memcpy(buf, &d4, 4);
+ break;
+ }
+ case 8: {
+ u64 d8;
+ u64 __user *s = (u64 __user *)src;
+ if (__get_user(d8, s))
+ goto fault;
+ memcpy(buf, &d8, 8);
+ break;
+ }
+ default:
+ WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
+ return ES_UNSUPPORTED;
+ }
+
+ return ES_OK;
+
+fault:
+ if (user_mode(ctxt->regs))
+ error_code |= X86_PF_USER;
+
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = error_code;
+ ctxt->fi.cr2 = (unsigned long)src;
+
+ return ES_EXCEPTION;
+}
+
+#define sev_printk(fmt, ...) printk(fmt, ##__VA_ARGS__)
+
+#include "vc-shared.c"
+
+/* Writes to the SVSM CAA MSR are ignored */
+static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write)
+{
+ if (write)
+ return ES_OK;
+
+ regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa));
+ regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa));
+
+ return ES_OK;
+}
+
+/*
+ * TSC related accesses should not exit to the hypervisor when a guest is
+ * executing with Secure TSC enabled, so special handling is required for
+ * accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ.
+ */
+static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write)
+{
+ u64 tsc;
+
+ /*
+ * GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled.
+ * Terminate the SNP guest when the interception is enabled.
+ */
+ if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ)
+ return ES_VMM_ERROR;
+
+ /*
+ * Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC
+ * to return undefined values, so ignore all writes.
+ *
+ * Reads: Reads of MSR_IA32_TSC should return the current TSC value, use
+ * the value returned by rdtsc_ordered().
+ */
+ if (write) {
+ WARN_ONCE(1, "TSC MSR writes are verboten!\n");
+ return ES_OK;
+ }
+
+ tsc = rdtsc_ordered();
+ regs->ax = lower_32_bits(tsc);
+ regs->dx = upper_32_bits(tsc);
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ struct pt_regs *regs = ctxt->regs;
+ enum es_result ret;
+ bool write;
+
+ /* Is it a WRMSR? */
+ write = ctxt->insn.opcode.bytes[1] == 0x30;
+
+ switch (regs->cx) {
+ case MSR_SVSM_CAA:
+ return __vc_handle_msr_caa(regs, write);
+ case MSR_IA32_TSC:
+ case MSR_AMD64_GUEST_TSC_FREQ:
+ if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
+ return __vc_handle_secure_tsc_msrs(regs, write);
+ break;
+ default:
+ break;
+ }
+
+ ghcb_set_rcx(ghcb, regs->cx);
+ if (write) {
+ ghcb_set_rax(ghcb, regs->ax);
+ ghcb_set_rdx(ghcb, regs->dx);
+ }
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, write, 0);
+
+ if ((ret == ES_OK) && !write) {
+ regs->ax = ghcb->save.rax;
+ regs->dx = ghcb->save.rdx;
+ }
+
+ return ret;
+}
+
+static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
+{
+ int trapnr = ctxt->fi.vector;
+
+ if (trapnr == X86_TRAP_PF)
+ native_write_cr2(ctxt->fi.cr2);
+
+ ctxt->regs->orig_ax = ctxt->fi.error_code;
+ do_early_exception(ctxt->regs, trapnr);
+}
+
+static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
+{
+ long *reg_array;
+ int offset;
+
+ reg_array = (long *)ctxt->regs;
+ offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
+
+ if (offset < 0)
+ return NULL;
+
+ offset /= sizeof(long);
+
+ return reg_array + offset;
+}
+static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+ unsigned int bytes, bool read)
+{
+ u64 exit_code, exit_info_1, exit_info_2;
+ unsigned long ghcb_pa = __pa(ghcb);
+ enum es_result res;
+ phys_addr_t paddr;
+ void __user *ref;
+
+ ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
+ if (ref == (void __user *)-1L)
+ return ES_UNSUPPORTED;
+
+ exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
+
+ res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
+ if (res != ES_OK) {
+ if (res == ES_EXCEPTION && !read)
+ ctxt->fi.error_code |= X86_PF_WRITE;
+
+ return res;
+ }
+
+ exit_info_1 = paddr;
+ /* Can never be greater than 8 */
+ exit_info_2 = bytes;
+
+ ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
+
+ return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
+}
+
+/*
+ * The MOVS instruction has two memory operands, which raises the
+ * problem that it is not known whether the access to the source or the
+ * destination caused the #VC exception (and hence whether an MMIO read
+ * or write operation needs to be emulated).
+ *
+ * Instead of playing games with walking page-tables and trying to guess
+ * whether the source or destination is an MMIO range, split the move
+ * into two operations, a read and a write with only one memory operand.
+ * This will cause a nested #VC exception on the MMIO address which can
+ * then be handled.
+ *
+ * This implementation has the benefit that it also supports MOVS where
+ * source _and_ destination are MMIO regions.
+ *
+ * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
+ * rare operation. If it turns out to be a performance problem the split
+ * operations can be moved to memcpy_fromio() and memcpy_toio().
+ */
+static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
+ unsigned int bytes)
+{
+ unsigned long ds_base, es_base;
+ unsigned char *src, *dst;
+ unsigned char buffer[8];
+ enum es_result ret;
+ bool rep;
+ int off;
+
+ ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
+ es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
+
+ if (ds_base == -1L || es_base == -1L) {
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+ return ES_EXCEPTION;
+ }
+
+ src = ds_base + (unsigned char *)ctxt->regs->si;
+ dst = es_base + (unsigned char *)ctxt->regs->di;
+
+ ret = vc_read_mem(ctxt, src, buffer, bytes);
+ if (ret != ES_OK)
+ return ret;
+
+ ret = vc_write_mem(ctxt, dst, buffer, bytes);
+ if (ret != ES_OK)
+ return ret;
+
+ if (ctxt->regs->flags & X86_EFLAGS_DF)
+ off = -bytes;
+ else
+ off = bytes;
+
+ ctxt->regs->si += off;
+ ctxt->regs->di += off;
+
+ rep = insn_has_rep_prefix(&ctxt->insn);
+ if (rep)
+ ctxt->regs->cx -= 1;
+
+ if (!rep || ctxt->regs->cx == 0)
+ return ES_OK;
+ else
+ return ES_RETRY;
+}
+
+static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ struct insn *insn = &ctxt->insn;
+ enum insn_mmio_type mmio;
+ unsigned int bytes = 0;
+ enum es_result ret;
+ u8 sign_byte;
+ long *reg_data;
+
+ mmio = insn_decode_mmio(insn, &bytes);
+ if (mmio == INSN_MMIO_DECODE_FAILED)
+ return ES_DECODE_FAILED;
+
+ if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
+ reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
+ if (!reg_data)
+ return ES_DECODE_FAILED;
+ }
+
+ if (user_mode(ctxt->regs))
+ return ES_UNSUPPORTED;
+
+ switch (mmio) {
+ case INSN_MMIO_WRITE:
+ memcpy(ghcb->shared_buffer, reg_data, bytes);
+ ret = vc_do_mmio(ghcb, ctxt, bytes, false);
+ break;
+ case INSN_MMIO_WRITE_IMM:
+ memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
+ ret = vc_do_mmio(ghcb, ctxt, bytes, false);
+ break;
+ case INSN_MMIO_READ:
+ ret = vc_do_mmio(ghcb, ctxt, bytes, true);
+ if (ret)
+ break;
+
+ /* Zero-extend for 32-bit operation */
+ if (bytes == 4)
+ *reg_data = 0;
+
+ memcpy(reg_data, ghcb->shared_buffer, bytes);
+ break;
+ case INSN_MMIO_READ_ZERO_EXTEND:
+ ret = vc_do_mmio(ghcb, ctxt, bytes, true);
+ if (ret)
+ break;
+
+ /* Zero extend based on operand size */
+ memset(reg_data, 0, insn->opnd_bytes);
+ memcpy(reg_data, ghcb->shared_buffer, bytes);
+ break;
+ case INSN_MMIO_READ_SIGN_EXTEND:
+ ret = vc_do_mmio(ghcb, ctxt, bytes, true);
+ if (ret)
+ break;
+
+ if (bytes == 1) {
+ u8 *val = (u8 *)ghcb->shared_buffer;
+
+ sign_byte = (*val & 0x80) ? 0xff : 0x00;
+ } else {
+ u16 *val = (u16 *)ghcb->shared_buffer;
+
+ sign_byte = (*val & 0x8000) ? 0xff : 0x00;
+ }
+
+ /* Sign extend based on operand size */
+ memset(reg_data, sign_byte, insn->opnd_bytes);
+ memcpy(reg_data, ghcb->shared_buffer, bytes);
+ break;
+ case INSN_MMIO_MOVS:
+ ret = vc_handle_mmio_movs(ctxt, bytes);
+ break;
+ default:
+ ret = ES_UNSUPPORTED;
+ break;
+ }
+
+ return ret;
+}
+
+static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
+ long val, *reg = vc_insn_get_rm(ctxt);
+ enum es_result ret;
+
+ if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
+ return ES_VMM_ERROR;
+
+ if (!reg)
+ return ES_DECODE_FAILED;
+
+ val = *reg;
+
+ /* Upper 32 bits must be written as zeroes */
+ if (val >> 32) {
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+ return ES_EXCEPTION;
+ }
+
+ /* Clear out other reserved bits and set bit 10 */
+ val = (val & 0xffff23ffL) | BIT(10);
+
+ /* Early non-zero writes to DR7 are not supported */
+ if (!data && (val & ~DR7_RESET_VALUE))
+ return ES_UNSUPPORTED;
+
+ /* Using a value of 0 for ExitInfo1 means RAX holds the value */
+ ghcb_set_rax(ghcb, val);
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (data)
+ data->dr7 = val;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
+ long *reg = vc_insn_get_rm(ctxt);
+
+ if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
+ return ES_VMM_ERROR;
+
+ if (!reg)
+ return ES_DECODE_FAILED;
+
+ if (data)
+ *reg = data->dr7;
+ else
+ *reg = DR7_RESET_VALUE;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
+}
+
+static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ enum es_result ret;
+
+ ghcb_set_rcx(ghcb, ctxt->regs->cx);
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
+ return ES_VMM_ERROR;
+
+ ctxt->regs->ax = ghcb->save.rax;
+ ctxt->regs->dx = ghcb->save.rdx;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_monitor(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ /*
+ * Treat it as a NOP and do not leak a physical address to the
+ * hypervisor.
+ */
+ return ES_OK;
+}
+
+static enum es_result vc_handle_mwait(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ /* Treat the same as MONITOR/MONITORX */
+ return ES_OK;
+}
+
+static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ enum es_result ret;
+
+ ghcb_set_rax(ghcb, ctxt->regs->ax);
+ ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
+
+ if (x86_platform.hyper.sev_es_hcall_prepare)
+ x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (!ghcb_rax_is_valid(ghcb))
+ return ES_VMM_ERROR;
+
+ ctxt->regs->ax = ghcb->save.rax;
+
+ /*
+ * Call sev_es_hcall_finish() after regs->ax is already set.
+ * This allows the hypervisor handler to overwrite it again if
+ * necessary.
+ */
+ if (x86_platform.hyper.sev_es_hcall_finish &&
+ !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
+ return ES_VMM_ERROR;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ /*
+ * Calling ecx_alignment_check() directly does not work, because it
+ * enables IRQs and the GHCB is active. Forward the exception and call
+ * it later from vc_forward_exception().
+ */
+ ctxt->fi.vector = X86_TRAP_AC;
+ ctxt->fi.error_code = 0;
+ return ES_EXCEPTION;
+}
+
+static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
+ struct ghcb *ghcb,
+ unsigned long exit_code)
+{
+ enum es_result result = vc_check_opcode_bytes(ctxt, exit_code);
+
+ if (result != ES_OK)
+ return result;
+
+ switch (exit_code) {
+ case SVM_EXIT_READ_DR7:
+ result = vc_handle_dr7_read(ghcb, ctxt);
+ break;
+ case SVM_EXIT_WRITE_DR7:
+ result = vc_handle_dr7_write(ghcb, ctxt);
+ break;
+ case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
+ result = vc_handle_trap_ac(ghcb, ctxt);
+ break;
+ case SVM_EXIT_RDTSC:
+ case SVM_EXIT_RDTSCP:
+ result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
+ break;
+ case SVM_EXIT_RDPMC:
+ result = vc_handle_rdpmc(ghcb, ctxt);
+ break;
+ case SVM_EXIT_INVD:
+ pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
+ result = ES_UNSUPPORTED;
+ break;
+ case SVM_EXIT_CPUID:
+ result = vc_handle_cpuid(ghcb, ctxt);
+ break;
+ case SVM_EXIT_IOIO:
+ result = vc_handle_ioio(ghcb, ctxt);
+ break;
+ case SVM_EXIT_MSR:
+ result = vc_handle_msr(ghcb, ctxt);
+ break;
+ case SVM_EXIT_VMMCALL:
+ result = vc_handle_vmmcall(ghcb, ctxt);
+ break;
+ case SVM_EXIT_WBINVD:
+ result = vc_handle_wbinvd(ghcb, ctxt);
+ break;
+ case SVM_EXIT_MONITOR:
+ result = vc_handle_monitor(ghcb, ctxt);
+ break;
+ case SVM_EXIT_MWAIT:
+ result = vc_handle_mwait(ghcb, ctxt);
+ break;
+ case SVM_EXIT_NPF:
+ result = vc_handle_mmio(ghcb, ctxt);
+ break;
+ default:
+ /*
+ * Unexpected #VC exception
+ */
+ result = ES_UNSUPPORTED;
+ }
+
+ return result;
+}
+
+static __always_inline bool is_vc2_stack(unsigned long sp)
+{
+ return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
+}
+
+static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
+{
+ unsigned long sp, prev_sp;
+
+ sp = (unsigned long)regs;
+ prev_sp = regs->sp;
+
+ /*
+ * If the code was already executing on the VC2 stack when the #VC
+ * happened, let it proceed to the normal handling routine. This way the
+ * code executing on the VC2 stack can cause #VC exceptions to get handled.
+ */
+ return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
+}
+
+static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
+{
+ struct ghcb_state state;
+ struct es_em_ctxt ctxt;
+ enum es_result result;
+ struct ghcb *ghcb;
+ bool ret = true;
+
+ ghcb = __sev_get_ghcb(&state);
+
+ vc_ghcb_invalidate(ghcb);
+ result = vc_init_em_ctxt(&ctxt, regs, error_code);
+
+ if (result == ES_OK)
+ result = vc_handle_exitcode(&ctxt, ghcb, error_code);
+
+ __sev_put_ghcb(&state);
+
+ /* Done - now check the result */
+ switch (result) {
+ case ES_OK:
+ vc_finish_insn(&ctxt);
+ break;
+ case ES_UNSUPPORTED:
+ pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
+ error_code, regs->ip);
+ ret = false;
+ break;
+ case ES_VMM_ERROR:
+ pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
+ error_code, regs->ip);
+ ret = false;
+ break;
+ case ES_DECODE_FAILED:
+ pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
+ error_code, regs->ip);
+ ret = false;
+ break;
+ case ES_EXCEPTION:
+ vc_forward_exception(&ctxt);
+ break;
+ case ES_RETRY:
+ /* Nothing to do */
+ break;
+ default:
+ pr_emerg("Unknown result in %s():%d\n", __func__, result);
+ /*
+ * Emulating the instruction which caused the #VC exception
+ * failed - can't continue so print debug information
+ */
+ BUG();
+ }
+
+ return ret;
+}
+
+static __always_inline bool vc_is_db(unsigned long error_code)
+{
+ return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
+}
+
+/*
+ * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
+ * and will panic when an error happens.
+ */
+DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
+{
+ irqentry_state_t irq_state;
+
+ /*
+ * With the current implementation it is always possible to switch to a
+ * safe stack because #VC exceptions only happen at known places, like
+ * intercepted instructions or accesses to MMIO areas/IO ports. They can
+ * also happen with code instrumentation when the hypervisor intercepts
+ * #DB, but the critical paths are forbidden to be instrumented, so #DB
+ * exceptions currently also only happen in safe places.
+ *
+ * But keep this here in case the noinstr annotations are violated due
+ * to bug elsewhere.
+ */
+ if (unlikely(vc_from_invalid_context(regs))) {
+ instrumentation_begin();
+ panic("Can't handle #VC exception from unsupported context\n");
+ instrumentation_end();
+ }
+
+ /*
+ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ */
+ if (vc_is_db(error_code)) {
+ exc_debug(regs);
+ return;
+ }
+
+ irq_state = irqentry_nmi_enter(regs);
+
+ instrumentation_begin();
+
+ if (!vc_raw_handle_exception(regs, error_code)) {
+ /* Show some debug info */
+ show_regs(regs);
+
+ /* Ask hypervisor to sev_es_terminate */
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+
+ /* If that fails and we get here - just panic */
+ panic("Returned from Terminate-Request to Hypervisor\n");
+ }
+
+ instrumentation_end();
+ irqentry_nmi_exit(regs, irq_state);
+}
+
+/*
+ * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
+ * and will kill the current task with SIGBUS when an error happens.
+ */
+DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
+{
+ /*
+ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ */
+ if (vc_is_db(error_code)) {
+ noist_exc_debug(regs);
+ return;
+ }
+
+ irqentry_enter_from_user_mode(regs);
+ instrumentation_begin();
+
+ if (!vc_raw_handle_exception(regs, error_code)) {
+ /*
+ * Do not kill the machine if user-space triggered the
+ * exception. Send SIGBUS instead and let user-space deal with
+ * it.
+ */
+ force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
+ }
+
+ instrumentation_end();
+ irqentry_exit_to_user_mode(regs);
+}
+
+bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
+{
+ unsigned long exit_code = regs->orig_ax;
+ struct es_em_ctxt ctxt;
+ enum es_result result;
+
+ vc_ghcb_invalidate(boot_ghcb);
+
+ result = vc_init_em_ctxt(&ctxt, regs, exit_code);
+ if (result == ES_OK)
+ result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
+
+ /* Done - now check the result */
+ switch (result) {
+ case ES_OK:
+ vc_finish_insn(&ctxt);
+ break;
+ case ES_UNSUPPORTED:
+ early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
+ exit_code, regs->ip);
+ goto fail;
+ case ES_VMM_ERROR:
+ early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
+ exit_code, regs->ip);
+ goto fail;
+ case ES_DECODE_FAILED:
+ early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
+ exit_code, regs->ip);
+ goto fail;
+ case ES_EXCEPTION:
+ vc_early_forward_exception(&ctxt);
+ break;
+ case ES_RETRY:
+ /* Nothing to do */
+ break;
+ default:
+ BUG();
+ }
+
+ return true;
+
+fail:
+ show_regs(regs);
+
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+}
+
diff --git a/arch/x86/coco/sev/vc-shared.c b/arch/x86/coco/sev/vc-shared.c
new file mode 100644
index 000000000000..2c0ab0fdc060
--- /dev/null
+++ b/arch/x86/coco/sev/vc-shared.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+
+static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
+ unsigned long exit_code)
+{
+ unsigned int opcode = (unsigned int)ctxt->insn.opcode.value;
+ u8 modrm = ctxt->insn.modrm.value;
+
+ switch (exit_code) {
+
+ case SVM_EXIT_IOIO:
+ case SVM_EXIT_NPF:
+ /* handled separately */
+ return ES_OK;
+
+ case SVM_EXIT_CPUID:
+ if (opcode == 0xa20f)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_INVD:
+ if (opcode == 0x080f)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_MONITOR:
+ /* MONITOR and MONITORX instructions generate the same error code */
+ if (opcode == 0x010f && (modrm == 0xc8 || modrm == 0xfa))
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_MWAIT:
+ /* MWAIT and MWAITX instructions generate the same error code */
+ if (opcode == 0x010f && (modrm == 0xc9 || modrm == 0xfb))
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_MSR:
+ /* RDMSR */
+ if (opcode == 0x320f ||
+ /* WRMSR */
+ opcode == 0x300f)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_RDPMC:
+ if (opcode == 0x330f)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_RDTSC:
+ if (opcode == 0x310f)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_RDTSCP:
+ if (opcode == 0x010f && modrm == 0xf9)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_READ_DR7:
+ if (opcode == 0x210f &&
+ X86_MODRM_REG(ctxt->insn.modrm.value) == 7)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_VMMCALL:
+ if (opcode == 0x010f && modrm == 0xd9)
+ return ES_OK;
+
+ break;
+
+ case SVM_EXIT_WRITE_DR7:
+ if (opcode == 0x230f &&
+ X86_MODRM_REG(ctxt->insn.modrm.value) == 7)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_WBINVD:
+ if (opcode == 0x90f)
+ return ES_OK;
+ break;
+
+ default:
+ break;
+ }
+
+ sev_printk(KERN_ERR "Wrong/unhandled opcode bytes: 0x%x, exit_code: 0x%lx, rIP: 0x%lx\n",
+ opcode, exit_code, ctxt->regs->ip);
+
+ return ES_UNSUPPORTED;
+}
+
+static bool vc_decoding_needed(unsigned long exit_code)
+{
+ /* Exceptions don't require to decode the instruction */
+ return !(exit_code >= SVM_EXIT_EXCP_BASE &&
+ exit_code <= SVM_EXIT_LAST_EXCP);
+}
+
+static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
+ struct pt_regs *regs,
+ unsigned long exit_code)
+{
+ enum es_result ret = ES_OK;
+
+ memset(ctxt, 0, sizeof(*ctxt));
+ ctxt->regs = regs;
+
+ if (vc_decoding_needed(exit_code))
+ ret = vc_decode_insn(ctxt);
+
+ return ret;
+}
+
+static void vc_finish_insn(struct es_em_ctxt *ctxt)
+{
+ ctxt->regs->ip += ctxt->insn.length;
+}
+
+static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
+ unsigned long address,
+ bool write)
+{
+ if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = X86_PF_USER;
+ ctxt->fi.cr2 = address;
+ if (write)
+ ctxt->fi.error_code |= X86_PF_WRITE;
+
+ return ES_EXCEPTION;
+ }
+
+ return ES_OK;
+}
+
+static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
+ void *src, char *buf,
+ unsigned int data_size,
+ unsigned int count,
+ bool backwards)
+{
+ int i, b = backwards ? -1 : 1;
+ unsigned long address = (unsigned long)src;
+ enum es_result ret;
+
+ ret = vc_insn_string_check(ctxt, address, false);
+ if (ret != ES_OK)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ void *s = src + (i * data_size * b);
+ char *d = buf + (i * data_size);
+
+ ret = vc_read_mem(ctxt, s, d, data_size);
+ if (ret != ES_OK)
+ break;
+ }
+
+ return ret;
+}
+
+static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
+ void *dst, char *buf,
+ unsigned int data_size,
+ unsigned int count,
+ bool backwards)
+{
+ int i, s = backwards ? -1 : 1;
+ unsigned long address = (unsigned long)dst;
+ enum es_result ret;
+
+ ret = vc_insn_string_check(ctxt, address, true);
+ if (ret != ES_OK)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ void *d = dst + (i * data_size * s);
+ char *b = buf + (i * data_size);
+
+ ret = vc_write_mem(ctxt, d, b, data_size);
+ if (ret != ES_OK)
+ break;
+ }
+
+ return ret;
+}
+
+#define IOIO_TYPE_STR BIT(2)
+#define IOIO_TYPE_IN 1
+#define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR)
+#define IOIO_TYPE_OUT 0
+#define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR)
+
+#define IOIO_REP BIT(3)
+
+#define IOIO_ADDR_64 BIT(9)
+#define IOIO_ADDR_32 BIT(8)
+#define IOIO_ADDR_16 BIT(7)
+
+#define IOIO_DATA_32 BIT(6)
+#define IOIO_DATA_16 BIT(5)
+#define IOIO_DATA_8 BIT(4)
+
+#define IOIO_SEG_ES (0 << 10)
+#define IOIO_SEG_DS (3 << 10)
+
+static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+{
+ struct insn *insn = &ctxt->insn;
+ size_t size;
+ u64 port;
+
+ *exitinfo = 0;
+
+ switch (insn->opcode.bytes[0]) {
+ /* INS opcodes */
+ case 0x6c:
+ case 0x6d:
+ *exitinfo |= IOIO_TYPE_INS;
+ *exitinfo |= IOIO_SEG_ES;
+ port = ctxt->regs->dx & 0xffff;
+ break;
+
+ /* OUTS opcodes */
+ case 0x6e:
+ case 0x6f:
+ *exitinfo |= IOIO_TYPE_OUTS;
+ *exitinfo |= IOIO_SEG_DS;
+ port = ctxt->regs->dx & 0xffff;
+ break;
+
+ /* IN immediate opcodes */
+ case 0xe4:
+ case 0xe5:
+ *exitinfo |= IOIO_TYPE_IN;
+ port = (u8)insn->immediate.value & 0xffff;
+ break;
+
+ /* OUT immediate opcodes */
+ case 0xe6:
+ case 0xe7:
+ *exitinfo |= IOIO_TYPE_OUT;
+ port = (u8)insn->immediate.value & 0xffff;
+ break;
+
+ /* IN register opcodes */
+ case 0xec:
+ case 0xed:
+ *exitinfo |= IOIO_TYPE_IN;
+ port = ctxt->regs->dx & 0xffff;
+ break;
+
+ /* OUT register opcodes */
+ case 0xee:
+ case 0xef:
+ *exitinfo |= IOIO_TYPE_OUT;
+ port = ctxt->regs->dx & 0xffff;
+ break;
+
+ default:
+ return ES_DECODE_FAILED;
+ }
+
+ *exitinfo |= port << 16;
+
+ switch (insn->opcode.bytes[0]) {
+ case 0x6c:
+ case 0x6e:
+ case 0xe4:
+ case 0xe6:
+ case 0xec:
+ case 0xee:
+ /* Single byte opcodes */
+ *exitinfo |= IOIO_DATA_8;
+ size = 1;
+ break;
+ default:
+ /* Length determined by instruction parsing */
+ *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
+ : IOIO_DATA_32;
+ size = (insn->opnd_bytes == 2) ? 2 : 4;
+ }
+
+ switch (insn->addr_bytes) {
+ case 2:
+ *exitinfo |= IOIO_ADDR_16;
+ break;
+ case 4:
+ *exitinfo |= IOIO_ADDR_32;
+ break;
+ case 8:
+ *exitinfo |= IOIO_ADDR_64;
+ break;
+ }
+
+ if (insn_has_rep_prefix(insn))
+ *exitinfo |= IOIO_REP;
+
+ return vc_ioio_check(ctxt, (u16)port, size);
+}
+
+static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ struct pt_regs *regs = ctxt->regs;
+ u64 exit_info_1, exit_info_2;
+ enum es_result ret;
+
+ ret = vc_ioio_exitinfo(ctxt, &exit_info_1);
+ if (ret != ES_OK)
+ return ret;
+
+ if (exit_info_1 & IOIO_TYPE_STR) {
+
+ /* (REP) INS/OUTS */
+
+ bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF);
+ unsigned int io_bytes, exit_bytes;
+ unsigned int ghcb_count, op_count;
+ unsigned long es_base;
+ u64 sw_scratch;
+
+ /*
+ * For the string variants with rep prefix the amount of in/out
+ * operations per #VC exception is limited so that the kernel
+ * has a chance to take interrupts and re-schedule while the
+ * instruction is emulated.
+ */
+ io_bytes = (exit_info_1 >> 4) & 0x7;
+ ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes;
+
+ op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1;
+ exit_info_2 = min(op_count, ghcb_count);
+ exit_bytes = exit_info_2 * io_bytes;
+
+ es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
+
+ /* Read bytes of OUTS into the shared buffer */
+ if (!(exit_info_1 & IOIO_TYPE_IN)) {
+ ret = vc_insn_string_read(ctxt,
+ (void *)(es_base + regs->si),
+ ghcb->shared_buffer, io_bytes,
+ exit_info_2, df);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Issue an VMGEXIT to the HV to consume the bytes from the
+ * shared buffer or to have it write them into the shared buffer
+ * depending on the instruction: OUTS or INS.
+ */
+ sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer);
+ ghcb_set_sw_scratch(ghcb, sw_scratch);
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO,
+ exit_info_1, exit_info_2);
+ if (ret != ES_OK)
+ return ret;
+
+ /* Read bytes from shared buffer into the guest's destination. */
+ if (exit_info_1 & IOIO_TYPE_IN) {
+ ret = vc_insn_string_write(ctxt,
+ (void *)(es_base + regs->di),
+ ghcb->shared_buffer, io_bytes,
+ exit_info_2, df);
+ if (ret)
+ return ret;
+
+ if (df)
+ regs->di -= exit_bytes;
+ else
+ regs->di += exit_bytes;
+ } else {
+ if (df)
+ regs->si -= exit_bytes;
+ else
+ regs->si += exit_bytes;
+ }
+
+ if (exit_info_1 & IOIO_REP)
+ regs->cx -= exit_info_2;
+
+ ret = regs->cx ? ES_RETRY : ES_OK;
+
+ } else {
+
+ /* IN/OUT into/from rAX */
+
+ int bits = (exit_info_1 & 0x70) >> 1;
+ u64 rax = 0;
+
+ if (!(exit_info_1 & IOIO_TYPE_IN))
+ rax = lower_bits(regs->ax, bits);
+
+ ghcb_set_rax(ghcb, rax);
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (exit_info_1 & IOIO_TYPE_IN) {
+ if (!ghcb_rax_is_valid(ghcb))
+ return ES_VMM_ERROR;
+ regs->ax = lower_bits(ghcb->save.rax, bits);
+ }
+ }
+
+ return ret;
+}
+
+static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ struct pt_regs *regs = ctxt->regs;
+ struct cpuid_leaf leaf;
+ int ret;
+
+ leaf.fn = regs->ax;
+ leaf.subfn = regs->cx;
+ ret = snp_cpuid(ghcb, ctxt, &leaf);
+ if (!ret) {
+ regs->ax = leaf.eax;
+ regs->bx = leaf.ebx;
+ regs->cx = leaf.ecx;
+ regs->dx = leaf.edx;
+ }
+
+ return ret;
+}
+
+static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ struct pt_regs *regs = ctxt->regs;
+ u32 cr4 = native_read_cr4();
+ enum es_result ret;
+ int snp_cpuid_ret;
+
+ snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt);
+ if (!snp_cpuid_ret)
+ return ES_OK;
+ if (snp_cpuid_ret != -EOPNOTSUPP)
+ return ES_VMM_ERROR;
+
+ ghcb_set_rax(ghcb, regs->ax);
+ ghcb_set_rcx(ghcb, regs->cx);
+
+ if (cr4 & X86_CR4_OSXSAVE)
+ /* Safe to read xcr0 */
+ ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
+ else
+ /* xgetbv will cause #GP - use reset value for xcr0 */
+ ghcb_set_xcr0(ghcb, 1);
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (!(ghcb_rax_is_valid(ghcb) &&
+ ghcb_rbx_is_valid(ghcb) &&
+ ghcb_rcx_is_valid(ghcb) &&
+ ghcb_rdx_is_valid(ghcb)))
+ return ES_VMM_ERROR;
+
+ regs->ax = ghcb->save.rax;
+ regs->bx = ghcb->save.rbx;
+ regs->cx = ghcb->save.rcx;
+ regs->dx = ghcb->save.rdx;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_rdtsc(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt,
+ unsigned long exit_code)
+{
+ bool rdtscp = (exit_code == SVM_EXIT_RDTSCP);
+ enum es_result ret;
+
+ /*
+ * The hypervisor should not be intercepting RDTSC/RDTSCP when Secure
+ * TSC is enabled. A #VC exception will be generated if the RDTSC/RDTSCP
+ * instructions are being intercepted. If this should occur and Secure
+ * TSC is enabled, guest execution should be terminated as the guest
+ * cannot rely on the TSC value provided by the hypervisor.
+ */
+ if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
+ return ES_VMM_ERROR;
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) &&
+ (!rdtscp || ghcb_rcx_is_valid(ghcb))))
+ return ES_VMM_ERROR;
+
+ ctxt->regs->ax = ghcb->save.rax;
+ ctxt->regs->dx = ghcb->save.rdx;
+ if (rdtscp)
+ ctxt->regs->cx = ghcb->save.rcx;
+
+ return ES_OK;
+}
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index edab6d6049be..7b2833705d47 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -36,6 +36,7 @@
/* TDX Module call error codes */
#define TDCALL_RETURN_CODE(a) ((a) >> 32)
#define TDCALL_INVALID_OPERAND 0xc0000100
+#define TDCALL_OPERAND_BUSY 0x80000200
#define TDREPORT_SUBTYPE_0 0
@@ -109,12 +110,13 @@ static inline u64 tdg_vm_wr(u64 field, u64 value, u64 mask)
* REPORTDATA to be included into TDREPORT.
* @tdreport: Address of the output buffer to store TDREPORT.
*
- * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
- * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
+ * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module v1.0
+ * specification for more information on TDG.MR.REPORT TDCALL.
+ *
* It is used in the TDX guest driver module to get the TDREPORT0.
*
- * Return 0 on success, -EINVAL for invalid operands, or -EIO on
- * other TDCALL failures.
+ * Return 0 on success, -ENXIO for invalid operands, -EBUSY for busy operation,
+ * or -EIO on other TDCALL failures.
*/
int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
{
@@ -128,7 +130,9 @@ int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
ret = __tdcall(TDG_MR_REPORT, &args);
if (ret) {
if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
- return -EINVAL;
+ return -ENXIO;
+ else if (TDCALL_RETURN_CODE(ret) == TDCALL_OPERAND_BUSY)
+ return -EBUSY;
return -EIO;
}
@@ -137,6 +141,42 @@ int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
/**
+ * tdx_mcall_extend_rtmr() - Wrapper to extend RTMR registers using
+ * TDG.MR.RTMR.EXTEND TDCALL.
+ * @index: Index of RTMR register to be extended.
+ * @data: Address of the input buffer with RTMR register extend data.
+ *
+ * Refer to section titled "TDG.MR.RTMR.EXTEND leaf" in the TDX Module v1.0
+ * specification for more information on TDG.MR.RTMR.EXTEND TDCALL.
+ *
+ * It is used in the TDX guest driver module to allow user to extend the RTMR
+ * registers.
+ *
+ * Return 0 on success, -ENXIO for invalid operands, -EBUSY for busy operation,
+ * or -EIO on other TDCALL failures.
+ */
+int tdx_mcall_extend_rtmr(u8 index, u8 *data)
+{
+ struct tdx_module_args args = {
+ .rcx = virt_to_phys(data),
+ .rdx = index,
+ };
+ u64 ret;
+
+ ret = __tdcall(TDG_MR_RTMR_EXTEND, &args);
+ if (ret) {
+ if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
+ return -ENXIO;
+ if (TDCALL_RETURN_CODE(ret) == TDCALL_OPERAND_BUSY)
+ return -EBUSY;
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tdx_mcall_extend_rtmr);
+
+/**
* tdx_hcall_get_quote() - Wrapper to request TD Quote using GetQuote
* hypercall.
* @buf: Address of the directly mapped shared kernel buffer which
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 91801138b10b..7cd2f395f301 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,7 +1,6 @@
CONFIG_WERROR=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_USELIB=y
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig
index 3d948f10c94c..56cfdc79e2c6 100644
--- a/arch/x86/crypto/Kconfig
+++ b/arch/x86/crypto/Kconfig
@@ -4,7 +4,7 @@ menu "Accelerated Cryptographic Algorithms for CPU (x86)"
config CRYPTO_CURVE25519_X86
tristate
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_KPP
select CRYPTO_LIB_CURVE25519_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CURVE25519
@@ -17,13 +17,11 @@ config CRYPTO_CURVE25519_X86
config CRYPTO_AES_NI_INTEL
tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XCTR, XTS, GCM (AES-NI/VAES)"
- depends on X86
select CRYPTO_AEAD
select CRYPTO_LIB_AES
select CRYPTO_LIB_GF128MUL
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
help
Block cipher: AES cipher algorithms
AEAD cipher: AES with GCM
@@ -38,7 +36,7 @@ config CRYPTO_AES_NI_INTEL
config CRYPTO_BLOWFISH_X86_64
tristate "Ciphers: Blowfish, modes: ECB, CBC"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_BLOWFISH_COMMON
imply CRYPTO_CTR
@@ -50,7 +48,7 @@ config CRYPTO_BLOWFISH_X86_64
config CRYPTO_CAMELLIA_X86_64
tristate "Ciphers: Camellia with modes: ECB, CBC"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
imply CRYPTO_CTR
help
@@ -61,10 +59,9 @@ config CRYPTO_CAMELLIA_X86_64
config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
tristate "Ciphers: Camellia with modes: ECB, CBC (AES-NI/AVX)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_CAMELLIA_X86_64
- select CRYPTO_SIMD
imply CRYPTO_XTS
help
Length-preserving ciphers: Camellia with ECB and CBC modes
@@ -75,7 +72,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
tristate "Ciphers: Camellia with modes: ECB, CBC (AES-NI/AVX2)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
help
Length-preserving ciphers: Camellia with ECB and CBC modes
@@ -86,11 +83,10 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
config CRYPTO_CAST5_AVX_X86_64
tristate "Ciphers: CAST5 with modes: ECB, CBC (AVX)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_CAST5
select CRYPTO_CAST_COMMON
- select CRYPTO_SIMD
imply CRYPTO_CTR
help
Length-preserving ciphers: CAST5 (CAST-128) cipher algorithm
@@ -103,11 +99,10 @@ config CRYPTO_CAST5_AVX_X86_64
config CRYPTO_CAST6_AVX_X86_64
tristate "Ciphers: CAST6 with modes: ECB, CBC (AVX)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_CAST6
select CRYPTO_CAST_COMMON
- select CRYPTO_SIMD
imply CRYPTO_XTS
imply CRYPTO_CTR
help
@@ -121,7 +116,7 @@ config CRYPTO_CAST6_AVX_X86_64
config CRYPTO_DES3_EDE_X86_64
tristate "Ciphers: Triple DES EDE with modes: ECB, CBC"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
imply CRYPTO_CTR
@@ -135,10 +130,9 @@ config CRYPTO_DES3_EDE_X86_64
config CRYPTO_SERPENT_SSE2_X86_64
tristate "Ciphers: Serpent with modes: ECB, CBC (SSE2)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_SERPENT
- select CRYPTO_SIMD
imply CRYPTO_CTR
help
Length-preserving ciphers: Serpent cipher algorithm
@@ -151,10 +145,9 @@ config CRYPTO_SERPENT_SSE2_X86_64
config CRYPTO_SERPENT_SSE2_586
tristate "Ciphers: Serpent with modes: ECB, CBC (32-bit with SSE2)"
- depends on X86 && !64BIT
+ depends on !64BIT
select CRYPTO_SKCIPHER
select CRYPTO_SERPENT
- select CRYPTO_SIMD
imply CRYPTO_CTR
help
Length-preserving ciphers: Serpent cipher algorithm
@@ -167,10 +160,9 @@ config CRYPTO_SERPENT_SSE2_586
config CRYPTO_SERPENT_AVX_X86_64
tristate "Ciphers: Serpent with modes: ECB, CBC (AVX)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_SERPENT
- select CRYPTO_SIMD
imply CRYPTO_XTS
imply CRYPTO_CTR
help
@@ -184,7 +176,7 @@ config CRYPTO_SERPENT_AVX_X86_64
config CRYPTO_SERPENT_AVX2_X86_64
tristate "Ciphers: Serpent with modes: ECB, CBC (AVX2)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SERPENT_AVX_X86_64
help
Length-preserving ciphers: Serpent cipher algorithm
@@ -197,9 +189,8 @@ config CRYPTO_SERPENT_AVX2_X86_64
config CRYPTO_SM4_AESNI_AVX_X86_64
tristate "Ciphers: SM4 with modes: ECB, CBC, CTR (AES-NI/AVX)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
select CRYPTO_ALGAPI
select CRYPTO_SM4
help
@@ -218,9 +209,8 @@ config CRYPTO_SM4_AESNI_AVX_X86_64
config CRYPTO_SM4_AESNI_AVX2_X86_64
tristate "Ciphers: SM4 with modes: ECB, CBC, CTR (AES-NI/AVX2)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
select CRYPTO_ALGAPI
select CRYPTO_SM4
select CRYPTO_SM4_AESNI_AVX_X86_64
@@ -240,7 +230,7 @@ config CRYPTO_SM4_AESNI_AVX2_X86_64
config CRYPTO_TWOFISH_586
tristate "Ciphers: Twofish (32-bit)"
- depends on (X86 || UML_X86) && !64BIT
+ depends on !64BIT
select CRYPTO_ALGAPI
select CRYPTO_TWOFISH_COMMON
imply CRYPTO_CTR
@@ -251,7 +241,7 @@ config CRYPTO_TWOFISH_586
config CRYPTO_TWOFISH_X86_64
tristate "Ciphers: Twofish"
- depends on (X86 || UML_X86) && 64BIT
+ depends on 64BIT
select CRYPTO_ALGAPI
select CRYPTO_TWOFISH_COMMON
imply CRYPTO_CTR
@@ -262,7 +252,7 @@ config CRYPTO_TWOFISH_X86_64
config CRYPTO_TWOFISH_X86_64_3WAY
tristate "Ciphers: Twofish with modes: ECB, CBC (3-way parallel)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
@@ -277,9 +267,8 @@ config CRYPTO_TWOFISH_X86_64_3WAY
config CRYPTO_TWOFISH_AVX_X86_64
tristate "Ciphers: Twofish with modes: ECB, CBC (AVX)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
select CRYPTO_TWOFISH_X86_64_3WAY
@@ -295,9 +284,8 @@ config CRYPTO_TWOFISH_AVX_X86_64
config CRYPTO_ARIA_AESNI_AVX_X86_64
tristate "Ciphers: ARIA with modes: ECB, CTR (AES-NI/AVX/GFNI)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
select CRYPTO_ALGAPI
select CRYPTO_ARIA
help
@@ -313,9 +301,8 @@ config CRYPTO_ARIA_AESNI_AVX_X86_64
config CRYPTO_ARIA_AESNI_AVX2_X86_64
tristate "Ciphers: ARIA with modes: ECB, CTR (AES-NI/AVX2/GFNI)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
select CRYPTO_ALGAPI
select CRYPTO_ARIA
select CRYPTO_ARIA_AESNI_AVX_X86_64
@@ -332,9 +319,8 @@ config CRYPTO_ARIA_AESNI_AVX2_X86_64
config CRYPTO_ARIA_GFNI_AVX512_X86_64
tristate "Ciphers: ARIA with modes: ECB, CTR (AVX512/GFNI)"
- depends on X86 && 64BIT && AS_AVX512 && AS_GFNI
+ depends on 64BIT && AS_GFNI
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
select CRYPTO_ALGAPI
select CRYPTO_ARIA
select CRYPTO_ARIA_AESNI_AVX_X86_64
@@ -349,27 +335,10 @@ config CRYPTO_ARIA_GFNI_AVX512_X86_64
Processes 64 blocks in parallel.
-config CRYPTO_CHACHA20_X86_64
- tristate
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_CHACHA_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
- default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
- stream cipher algorithms
-
- Architecture: x86_64 using:
- - SSSE3 (Supplemental SSE3)
- - AVX2 (Advanced Vector Extensions 2)
- - AVX-512VL (Advanced Vector Extensions-512VL)
-
config CRYPTO_AEGIS128_AESNI_SSE2
tristate "AEAD ciphers: AEGIS-128 (AES-NI/SSE4.1)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_AEAD
- select CRYPTO_SIMD
help
AEGIS-128 AEAD algorithm
@@ -379,7 +348,7 @@ config CRYPTO_AEGIS128_AESNI_SSE2
config CRYPTO_NHPOLY1305_SSE2
tristate "Hash functions: NHPoly1305 (SSE2)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_NHPOLY1305
help
NHPoly1305 hash function for Adiantum
@@ -389,7 +358,7 @@ config CRYPTO_NHPOLY1305_SSE2
config CRYPTO_NHPOLY1305_AVX2
tristate "Hash functions: NHPoly1305 (AVX2)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_NHPOLY1305
help
NHPoly1305 hash function for Adiantum
@@ -397,21 +366,9 @@ config CRYPTO_NHPOLY1305_AVX2
Architecture: x86_64 using:
- AVX2 (Advanced Vector Extensions 2)
-config CRYPTO_BLAKE2S_X86
- bool "Hash functions: BLAKE2s (SSSE3/AVX-512)"
- depends on X86 && 64BIT
- select CRYPTO_LIB_BLAKE2S_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
- help
- BLAKE2s cryptographic hash function (RFC 7693)
-
- Architecture: x86_64 using:
- - SSSE3 (Supplemental SSE3)
- - AVX-512 (Advanced Vector Extensions-512)
-
config CRYPTO_POLYVAL_CLMUL_NI
tristate "Hash functions: POLYVAL (CLMUL-NI)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_POLYVAL
help
POLYVAL hash function for HCTR2
@@ -419,23 +376,9 @@ config CRYPTO_POLYVAL_CLMUL_NI
Architecture: x86_64 using:
- CLMUL-NI (carry-less multiplication new instructions)
-config CRYPTO_POLY1305_X86_64
- tristate
- depends on X86 && 64BIT
- select CRYPTO_HASH
- select CRYPTO_LIB_POLY1305_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_POLY1305
- default CRYPTO_LIB_POLY1305_INTERNAL
- help
- Poly1305 authenticator algorithm (RFC7539)
-
- Architecture: x86_64 using:
- - SSE2 (Streaming SIMD Extensions 2)
- - AVX2 (Advanced Vector Extensions 2)
-
config CRYPTO_SHA1_SSSE3
tristate "Hash functions: SHA-1 (SSSE3/AVX/AVX2/SHA-NI)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SHA1
select CRYPTO_HASH
help
@@ -447,23 +390,9 @@ config CRYPTO_SHA1_SSSE3
- AVX2 (Advanced Vector Extensions 2)
- SHA-NI (SHA Extensions New Instructions)
-config CRYPTO_SHA256_SSSE3
- tristate "Hash functions: SHA-224 and SHA-256 (SSSE3/AVX/AVX2/SHA-NI)"
- depends on X86 && 64BIT
- select CRYPTO_SHA256
- select CRYPTO_HASH
- help
- SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
-
- Architecture: x86_64 using:
- - SSSE3 (Supplemental SSE3)
- - AVX (Advanced Vector Extensions)
- - AVX2 (Advanced Vector Extensions 2)
- - SHA-NI (SHA Extensions New Instructions)
-
config CRYPTO_SHA512_SSSE3
tristate "Hash functions: SHA-384 and SHA-512 (SSSE3/AVX/AVX2)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_SHA512
select CRYPTO_HASH
help
@@ -476,9 +405,9 @@ config CRYPTO_SHA512_SSSE3
config CRYPTO_SM3_AVX_X86_64
tristate "Hash functions: SM3 (AVX)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_HASH
- select CRYPTO_SM3
+ select CRYPTO_LIB_SM3
help
SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3
@@ -489,7 +418,7 @@ config CRYPTO_SM3_AVX_X86_64
config CRYPTO_GHASH_CLMUL_NI_INTEL
tristate "Hash functions: GHASH (CLMUL-NI)"
- depends on X86 && 64BIT
+ depends on 64BIT
select CRYPTO_CRYPTD
help
GCM GHASH hash function (NIST SP800-38D)
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 5d19f41bde58..aa289a9e0153 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -42,10 +42,6 @@ cast6-avx-x86_64-y := cast6-avx-x86_64-asm_64.o cast6_avx_glue.o
obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o
aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o
-obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha-x86_64.o
-chacha-x86_64-y := chacha-avx2-x86_64.o chacha-ssse3-x86_64.o chacha_glue.o
-chacha-x86_64-$(CONFIG_AS_AVX512) += chacha-avx512vl-x86_64.o
-
obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
aesni-intel-$(CONFIG_64BIT) += aes-ctr-avx-x86_64.o \
@@ -56,29 +52,17 @@ aesni-intel-$(CONFIG_64BIT) += aes-gcm-avx10-x86_64.o
endif
obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
-sha1-ssse3-y := sha1_avx2_x86_64_asm.o sha1_ssse3_asm.o sha1_ssse3_glue.o
-sha1-ssse3-$(CONFIG_AS_SHA1_NI) += sha1_ni_asm.o
-
-obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
-sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o
-sha256-ssse3-$(CONFIG_AS_SHA256_NI) += sha256_ni_asm.o
+sha1-ssse3-y := sha1_avx2_x86_64_asm.o sha1_ssse3_asm.o sha1_ni_asm.o sha1_ssse3_glue.o
obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
-obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += libblake2s-x86_64.o
-libblake2s-x86_64-y := blake2s-core.o blake2s-glue.o
-
obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
obj-$(CONFIG_CRYPTO_POLYVAL_CLMUL_NI) += polyval-clmulni.o
polyval-clmulni-y := polyval-clmulni_asm.o polyval-clmulni_glue.o
-obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o
-poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o
-targets += poly1305-x86_64-cryptogams.S
-
obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o
nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o
@@ -104,10 +88,5 @@ aria-aesni-avx2-x86_64-y := aria-aesni-avx2-asm_64.o aria_aesni_avx2_glue.o
obj-$(CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64) += aria-gfni-avx512-x86_64.o
aria-gfni-avx512-x86_64-y := aria-gfni-avx512-asm_64.o aria_gfni_avx512_glue.o
-quiet_cmd_perlasm = PERLASM $@
- cmd_perlasm = $(PERL) $< > $@
-$(obj)/%.S: $(src)/%.pl FORCE
- $(call if_changed,perlasm)
-
# Disable GCOV in odd or sensitive code
GCOV_PROFILE_curve25519-x86_64.o := n
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index 26786e15abac..f1b6d40154e3 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -8,7 +8,6 @@
*/
#include <crypto/internal/aead.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
@@ -233,21 +232,18 @@ static struct aead_alg crypto_aegis128_aesni_alg = {
.chunksize = AEGIS128_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx) +
__alignof__(struct aegis_ctx),
.cra_priority = 400,
- .cra_name = "__aegis128",
- .cra_driver_name = "__aegis128-aesni",
+ .cra_name = "aegis128",
+ .cra_driver_name = "aegis128-aesni",
.cra_module = THIS_MODULE,
}
};
-static struct simd_aead_alg *simd_alg;
-
static int __init crypto_aegis128_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM4_1) ||
@@ -255,13 +251,12 @@ static int __init crypto_aegis128_aesni_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
- return simd_register_aeads_compat(&crypto_aegis128_aesni_alg, 1,
- &simd_alg);
+ return crypto_register_aead(&crypto_aegis128_aesni_alg);
}
static void __exit crypto_aegis128_aesni_module_exit(void)
{
- simd_unregister_aeads(&crypto_aegis128_aesni_alg, 1, &simd_alg);
+ crypto_unregister_aead(&crypto_aegis128_aesni_alg);
}
module_init(crypto_aegis128_aesni_module_init);
diff --git a/arch/x86/crypto/aes-ctr-avx-x86_64.S b/arch/x86/crypto/aes-ctr-avx-x86_64.S
index 1685d8b24b2c..bbbfd80f5a50 100644
--- a/arch/x86/crypto/aes-ctr-avx-x86_64.S
+++ b/arch/x86/crypto/aes-ctr-avx-x86_64.S
@@ -48,8 +48,7 @@
// using the following sets of CPU features:
// - AES-NI && AVX
// - VAES && AVX2
-// - VAES && (AVX10/256 || (AVX512BW && AVX512VL)) && BMI2
-// - VAES && (AVX10/512 || (AVX512BW && AVX512VL)) && BMI2
+// - VAES && AVX512BW && AVX512VL && BMI2
//
// See the function definitions at the bottom of the file for more information.
@@ -76,7 +75,6 @@
.text
// Move a vector between memory and a register.
-// The register operand must be in the first 16 vector registers.
.macro _vmovdqu src, dst
.if VL < 64
vmovdqu \src, \dst
@@ -86,7 +84,6 @@
.endm
// Move a vector between registers.
-// The registers must be in the first 16 vector registers.
.macro _vmovdqa src, dst
.if VL < 64
vmovdqa \src, \dst
@@ -96,7 +93,7 @@
.endm
// Broadcast a 128-bit value from memory to all 128-bit lanes of a vector
-// register. The register operand must be in the first 16 vector registers.
+// register.
.macro _vbroadcast128 src, dst
.if VL == 16
vmovdqu \src, \dst
@@ -108,7 +105,6 @@
.endm
// XOR two vectors together.
-// Any register operands must be in the first 16 vector registers.
.macro _vpxor src1, src2, dst
.if VL < 64
vpxor \src1, \src2, \dst
@@ -199,8 +195,8 @@
// XOR each with the zero-th round key. Also update LE_CTR if !\final.
.macro _prepare_2_ctr_vecs is_xctr, i0, i1, final=0
.if \is_xctr
- .if USE_AVX10
- _vmovdqa LE_CTR, AESDATA\i0
+ .if USE_AVX512
+ vmovdqa64 LE_CTR, AESDATA\i0
vpternlogd $0x96, XCTR_IV, RNDKEY0, AESDATA\i0
.else
vpxor XCTR_IV, LE_CTR, AESDATA\i0
@@ -208,7 +204,7 @@
.endif
vpaddq LE_CTR_INC1, LE_CTR, AESDATA\i1
- .if USE_AVX10
+ .if USE_AVX512
vpternlogd $0x96, XCTR_IV, RNDKEY0, AESDATA\i1
.else
vpxor XCTR_IV, AESDATA\i1, AESDATA\i1
@@ -481,18 +477,12 @@
.Lxor_tail_partial_vec_0\@:
// XOR the remaining 1 <= LEN < VL bytes. It's easy if masked
// loads/stores are available; otherwise it's a bit harder...
-.if USE_AVX10
- .if VL <= 32
- mov $-1, %eax
- bzhi LEN, %eax, %eax
- kmovd %eax, %k1
- .else
+.if USE_AVX512
mov $-1, %rax
bzhi LEN64, %rax, %rax
kmovq %rax, %k1
- .endif
vmovdqu8 (SRC), AESDATA1{%k1}{z}
- _vpxor AESDATA1, AESDATA0, AESDATA0
+ vpxord AESDATA1, AESDATA0, AESDATA0
vmovdqu8 AESDATA0, (DST){%k1}
.else
.if VL == 32
@@ -554,7 +544,7 @@
// eliminates carries. |ctr| is the per-message block counter starting at 1.
.set VL, 16
-.set USE_AVX10, 0
+.set USE_AVX512, 0
SYM_TYPED_FUNC_START(aes_ctr64_crypt_aesni_avx)
_aes_ctr_crypt 0
SYM_FUNC_END(aes_ctr64_crypt_aesni_avx)
@@ -564,7 +554,7 @@ SYM_FUNC_END(aes_xctr_crypt_aesni_avx)
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
.set VL, 32
-.set USE_AVX10, 0
+.set USE_AVX512, 0
SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx2)
_aes_ctr_crypt 0
SYM_FUNC_END(aes_ctr64_crypt_vaes_avx2)
@@ -572,21 +562,12 @@ SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx2)
_aes_ctr_crypt 1
SYM_FUNC_END(aes_xctr_crypt_vaes_avx2)
-.set VL, 32
-.set USE_AVX10, 1
-SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx10_256)
- _aes_ctr_crypt 0
-SYM_FUNC_END(aes_ctr64_crypt_vaes_avx10_256)
-SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx10_256)
- _aes_ctr_crypt 1
-SYM_FUNC_END(aes_xctr_crypt_vaes_avx10_256)
-
.set VL, 64
-.set USE_AVX10, 1
-SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx10_512)
+.set USE_AVX512, 1
+SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx512)
_aes_ctr_crypt 0
-SYM_FUNC_END(aes_ctr64_crypt_vaes_avx10_512)
-SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx10_512)
+SYM_FUNC_END(aes_ctr64_crypt_vaes_avx512)
+SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx512)
_aes_ctr_crypt 1
-SYM_FUNC_END(aes_xctr_crypt_vaes_avx10_512)
+SYM_FUNC_END(aes_xctr_crypt_vaes_avx512)
#endif // CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ
diff --git a/arch/x86/crypto/aes-xts-avx-x86_64.S b/arch/x86/crypto/aes-xts-avx-x86_64.S
index 93ba0ddbe009..db79cdf81588 100644
--- a/arch/x86/crypto/aes-xts-avx-x86_64.S
+++ b/arch/x86/crypto/aes-xts-avx-x86_64.S
@@ -52,32 +52,25 @@
* different code, it uses a macro to generate several implementations that
* share similar source code but are targeted at different CPUs, listed below:
*
- * AES-NI + AVX
+ * AES-NI && AVX
* - 128-bit vectors (1 AES block per vector)
* - VEX-coded instructions
* - xmm0-xmm15
* - This is for older CPUs that lack VAES but do have AVX.
*
- * VAES + VPCLMULQDQ + AVX2
+ * VAES && VPCLMULQDQ && AVX2
* - 256-bit vectors (2 AES blocks per vector)
* - VEX-coded instructions
* - ymm0-ymm15
- * - This is for CPUs that have VAES but lack AVX512 or AVX10,
- * e.g. Intel's Alder Lake and AMD's Zen 3.
+ * - This is for CPUs that have VAES but either lack AVX512 (e.g. Intel's
+ * Alder Lake and AMD's Zen 3) or downclock too eagerly when using zmm
+ * registers (e.g. Intel's Ice Lake).
*
- * VAES + VPCLMULQDQ + AVX10/256 + BMI2
- * - 256-bit vectors (2 AES blocks per vector)
+ * VAES && VPCLMULQDQ && AVX512BW && AVX512VL && BMI2
+ * - 512-bit vectors (4 AES blocks per vector)
* - EVEX-coded instructions
- * - ymm0-ymm31
- * - This is for CPUs that have AVX512 but where using zmm registers causes
- * downclocking, and for CPUs that have AVX10/256 but not AVX10/512.
- * - By "AVX10/256" we really mean (AVX512BW + AVX512VL) || AVX10/256.
- * To avoid confusion with 512-bit, we just write AVX10/256.
- *
- * VAES + VPCLMULQDQ + AVX10/512 + BMI2
- * - Same as the previous one, but upgrades to 512-bit vectors
- * (4 AES blocks per vector) in zmm0-zmm31.
- * - This is for CPUs that have good AVX512 or AVX10/512 support.
+ * - zmm0-zmm31
+ * - This is for CPUs that have good AVX512 support.
*
* This file doesn't have an implementation for AES-NI alone (without AVX), as
* the lack of VEX would make all the assembly code different.
@@ -107,9 +100,20 @@
// exists when there's a carry out of the low 64 bits of the tweak.
.quad 0x87, 1
+ // These are the shift amounts that are needed when multiplying by [x^0,
+ // x^1, x^2, x^3] to compute the first vector of tweaks when VL=64.
+ //
+ // The right shifts by 64 are expected to zeroize the destination.
+ // 'vpsrlvq' is indeed defined to do that; i.e. it doesn't truncate the
+ // amount to 64 & 63 = 0 like the 'shr' scalar shift instruction would.
+.Lrshift_amounts:
+ .byte 64, 64, 63, 63, 62, 62, 61, 61
+.Llshift_amounts:
+ .byte 0, 0, 1, 1, 2, 2, 3, 3
+
// This table contains constants for vpshufb and vpblendvb, used to
// handle variable byte shifts and blending during ciphertext stealing
- // on CPUs that don't support AVX10-style masking.
+ // on CPUs that don't support AVX512-style masking.
.Lcts_permute_table:
.byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
.byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
@@ -138,7 +142,7 @@
.irp i, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
_define_Vi \i
.endr
-.if USE_AVX10
+.if USE_AVX512
.irp i, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
_define_Vi \i
.endr
@@ -193,7 +197,7 @@
// keys to the *end* of this register range. I.e., AES-128 uses
// KEY5-KEY14, AES-192 uses KEY3-KEY14, and AES-256 uses KEY1-KEY14.
// (All also use KEY0 for the XOR-only "round" at the beginning.)
-.if USE_AVX10
+.if USE_AVX512
.set KEY1_XMM, %xmm16
.set KEY1, V16
.set KEY2_XMM, %xmm17
@@ -227,7 +231,6 @@
.endm
// Move a vector between memory and a register.
-// The register operand must be in the first 16 vector registers.
.macro _vmovdqu src, dst
.if VL < 64
vmovdqu \src, \dst
@@ -238,9 +241,9 @@
// Broadcast a 128-bit value into a vector.
.macro _vbroadcast128 src, dst
-.if VL == 16 && !USE_AVX10
+.if VL == 16
vmovdqu \src, \dst
-.elseif VL == 32 && !USE_AVX10
+.elseif VL == 32
vbroadcasti128 \src, \dst
.else
vbroadcasti32x4 \src, \dst
@@ -248,7 +251,6 @@
.endm
// XOR two vectors together.
-// Any register operands must be in the first 16 vector registers.
.macro _vpxor src1, src2, dst
.if VL < 64
vpxor \src1, \src2, \dst
@@ -259,7 +261,7 @@
// XOR three vectors together.
.macro _xor3 src1, src2, src3_and_dst
-.if USE_AVX10
+.if USE_AVX512
// vpternlogd with immediate 0x96 is a three-argument XOR.
vpternlogd $0x96, \src1, \src2, \src3_and_dst
.else
@@ -274,7 +276,7 @@
vpshufd $0x13, \src, \tmp
vpaddq \src, \src, \dst
vpsrad $31, \tmp, \tmp
-.if USE_AVX10
+.if USE_AVX512
vpternlogd $0x78, GF_POLY_XMM, \tmp, \dst
.else
vpand GF_POLY_XMM, \tmp, \tmp
@@ -303,52 +305,75 @@
// Given the first XTS tweak at (TWEAK), compute the first set of tweaks and
// store them in the vector registers TWEAK0-TWEAK3. Clobbers V0-V5.
.macro _compute_first_set_of_tweaks
- vmovdqu (TWEAK), TWEAK0_XMM
- _vbroadcast128 .Lgf_poly(%rip), GF_POLY
.if VL == 16
- // With VL=16, multiplying by x serially is fastest.
+ vmovdqu (TWEAK), TWEAK0_XMM
+ vmovdqu .Lgf_poly(%rip), GF_POLY
_next_tweak TWEAK0, %xmm0, TWEAK1
_next_tweak TWEAK1, %xmm0, TWEAK2
_next_tweak TWEAK2, %xmm0, TWEAK3
-.else
-.if VL == 32
- // Compute the second block of TWEAK0.
+.elseif VL == 32
+ vmovdqu (TWEAK), TWEAK0_XMM
+ vbroadcasti128 .Lgf_poly(%rip), GF_POLY
+
+ // Compute the first vector of tweaks.
_next_tweak TWEAK0_XMM, %xmm0, %xmm1
vinserti128 $1, %xmm1, TWEAK0, TWEAK0
-.elseif VL == 64
- // Compute the remaining blocks of TWEAK0.
- _next_tweak TWEAK0_XMM, %xmm0, %xmm1
- _next_tweak %xmm1, %xmm0, %xmm2
- _next_tweak %xmm2, %xmm0, %xmm3
- vinserti32x4 $1, %xmm1, TWEAK0, TWEAK0
- vinserti32x4 $2, %xmm2, TWEAK0, TWEAK0
- vinserti32x4 $3, %xmm3, TWEAK0, TWEAK0
-.endif
- // Compute TWEAK[1-3] from TWEAK0.
- vpsrlq $64 - 1*VL/16, TWEAK0, V0
- vpsrlq $64 - 2*VL/16, TWEAK0, V2
- vpsrlq $64 - 3*VL/16, TWEAK0, V4
+
+ // Compute the next three vectors of tweaks:
+ // TWEAK1 = TWEAK0 * [x^2, x^2]
+ // TWEAK2 = TWEAK0 * [x^4, x^4]
+ // TWEAK3 = TWEAK0 * [x^6, x^6]
+ vpsrlq $64 - 2, TWEAK0, V0
+ vpsrlq $64 - 4, TWEAK0, V2
+ vpsrlq $64 - 6, TWEAK0, V4
vpclmulqdq $0x01, GF_POLY, V0, V1
vpclmulqdq $0x01, GF_POLY, V2, V3
vpclmulqdq $0x01, GF_POLY, V4, V5
vpslldq $8, V0, V0
vpslldq $8, V2, V2
vpslldq $8, V4, V4
- vpsllq $1*VL/16, TWEAK0, TWEAK1
- vpsllq $2*VL/16, TWEAK0, TWEAK2
- vpsllq $3*VL/16, TWEAK0, TWEAK3
-.if USE_AVX10
- vpternlogd $0x96, V0, V1, TWEAK1
- vpternlogd $0x96, V2, V3, TWEAK2
- vpternlogd $0x96, V4, V5, TWEAK3
-.else
+ vpsllq $2, TWEAK0, TWEAK1
+ vpsllq $4, TWEAK0, TWEAK2
+ vpsllq $6, TWEAK0, TWEAK3
vpxor V0, TWEAK1, TWEAK1
vpxor V2, TWEAK2, TWEAK2
vpxor V4, TWEAK3, TWEAK3
vpxor V1, TWEAK1, TWEAK1
vpxor V3, TWEAK2, TWEAK2
vpxor V5, TWEAK3, TWEAK3
-.endif
+.else
+ vbroadcasti32x4 (TWEAK), TWEAK0
+ vbroadcasti32x4 .Lgf_poly(%rip), GF_POLY
+
+ // Compute the first vector of tweaks:
+ // TWEAK0 = broadcast128(TWEAK) * [x^0, x^1, x^2, x^3]
+ vpmovzxbq .Lrshift_amounts(%rip), V4
+ vpsrlvq V4, TWEAK0, V0
+ vpclmulqdq $0x01, GF_POLY, V0, V1
+ vpmovzxbq .Llshift_amounts(%rip), V4
+ vpslldq $8, V0, V0
+ vpsllvq V4, TWEAK0, TWEAK0
+ vpternlogd $0x96, V0, V1, TWEAK0
+
+ // Compute the next three vectors of tweaks:
+ // TWEAK1 = TWEAK0 * [x^4, x^4, x^4, x^4]
+ // TWEAK2 = TWEAK0 * [x^8, x^8, x^8, x^8]
+ // TWEAK3 = TWEAK0 * [x^12, x^12, x^12, x^12]
+ // x^8 only needs byte-aligned shifts, so optimize accordingly.
+ vpsrlq $64 - 4, TWEAK0, V0
+ vpsrldq $(64 - 8) / 8, TWEAK0, V2
+ vpsrlq $64 - 12, TWEAK0, V4
+ vpclmulqdq $0x01, GF_POLY, V0, V1
+ vpclmulqdq $0x01, GF_POLY, V2, V3
+ vpclmulqdq $0x01, GF_POLY, V4, V5
+ vpslldq $8, V0, V0
+ vpslldq $8, V4, V4
+ vpsllq $4, TWEAK0, TWEAK1
+ vpslldq $8 / 8, TWEAK0, TWEAK2
+ vpsllq $12, TWEAK0, TWEAK3
+ vpternlogd $0x96, V0, V1, TWEAK1
+ vpxord V3, TWEAK2, TWEAK2
+ vpternlogd $0x96, V4, V5, TWEAK3
.endif
.endm
@@ -474,26 +499,26 @@
lea OFFS-16(KEY, KEYLEN64, 4), KEY
// If all 32 SIMD registers are available, cache all the round keys.
-.if USE_AVX10
+.if USE_AVX512
cmp $24, KEYLEN
jl .Laes128\@
je .Laes192\@
- _vbroadcast128 -6*16(KEY), KEY1
- _vbroadcast128 -5*16(KEY), KEY2
+ vbroadcasti32x4 -6*16(KEY), KEY1
+ vbroadcasti32x4 -5*16(KEY), KEY2
.Laes192\@:
- _vbroadcast128 -4*16(KEY), KEY3
- _vbroadcast128 -3*16(KEY), KEY4
+ vbroadcasti32x4 -4*16(KEY), KEY3
+ vbroadcasti32x4 -3*16(KEY), KEY4
.Laes128\@:
- _vbroadcast128 -2*16(KEY), KEY5
- _vbroadcast128 -1*16(KEY), KEY6
- _vbroadcast128 0*16(KEY), KEY7
- _vbroadcast128 1*16(KEY), KEY8
- _vbroadcast128 2*16(KEY), KEY9
- _vbroadcast128 3*16(KEY), KEY10
- _vbroadcast128 4*16(KEY), KEY11
- _vbroadcast128 5*16(KEY), KEY12
- _vbroadcast128 6*16(KEY), KEY13
- _vbroadcast128 7*16(KEY), KEY14
+ vbroadcasti32x4 -2*16(KEY), KEY5
+ vbroadcasti32x4 -1*16(KEY), KEY6
+ vbroadcasti32x4 0*16(KEY), KEY7
+ vbroadcasti32x4 1*16(KEY), KEY8
+ vbroadcasti32x4 2*16(KEY), KEY9
+ vbroadcasti32x4 3*16(KEY), KEY10
+ vbroadcasti32x4 4*16(KEY), KEY11
+ vbroadcasti32x4 5*16(KEY), KEY12
+ vbroadcasti32x4 6*16(KEY), KEY13
+ vbroadcasti32x4 7*16(KEY), KEY14
.endif
.endm
@@ -521,7 +546,7 @@
// using the same key for all block(s). The round key is loaded from the
// appropriate register or memory location for round \i. May clobber \tmp.
.macro _vaes_1x enc, i, xmm_suffix, data, tmp
-.if USE_AVX10
+.if USE_AVX512
_vaes \enc, KEY\i\xmm_suffix, \data
.else
.ifnb \xmm_suffix
@@ -538,7 +563,7 @@
// appropriate register or memory location for round \i. In addition, does two
// steps of the computation of the next set of tweaks. May clobber V4 and V5.
.macro _vaes_4x enc, i
-.if USE_AVX10
+.if USE_AVX512
_tweak_step (2*(\i-5))
_vaes \enc, KEY\i, V0
_vaes \enc, KEY\i, V1
@@ -574,7 +599,7 @@
.irp i, 5,6,7,8,9,10,11,12,13
_vaes_1x \enc, \i, \xmm_suffix, \data, tmp=\tmp
.endr
-.if USE_AVX10
+.if USE_AVX512
vpxord KEY14\xmm_suffix, \tweak, \tmp
.else
.ifnb \xmm_suffix
@@ -617,11 +642,11 @@
// This is the main loop, en/decrypting 4*VL bytes per iteration.
// XOR each source block with its tweak and the zero-th round key.
-.if USE_AVX10
- _vmovdqu 0*VL(SRC), V0
- _vmovdqu 1*VL(SRC), V1
- _vmovdqu 2*VL(SRC), V2
- _vmovdqu 3*VL(SRC), V3
+.if USE_AVX512
+ vmovdqu8 0*VL(SRC), V0
+ vmovdqu8 1*VL(SRC), V1
+ vmovdqu8 2*VL(SRC), V2
+ vmovdqu8 3*VL(SRC), V3
vpternlogd $0x96, TWEAK0, KEY0, V0
vpternlogd $0x96, TWEAK1, KEY0, V1
vpternlogd $0x96, TWEAK2, KEY0, V2
@@ -654,7 +679,7 @@
// Reduce latency by doing the XOR before the vaesenclast, utilizing the
// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a)
// (and likewise for vaesdeclast).
-.if USE_AVX10
+.if USE_AVX512
_tweak_step 18
_tweak_step 19
vpxord TWEAK0, KEY14, V4
@@ -762,7 +787,7 @@
_aes_crypt \enc, _XMM, TWEAK1_XMM, %xmm0, tmp=%xmm1
.endif
-.if USE_AVX10
+.if USE_AVX512
// Create a mask that has the first LEN bits set.
mov $-1, %r9d
bzhi LEN, %r9d, %r9d
@@ -811,7 +836,7 @@
// u8 iv[AES_BLOCK_SIZE]);
//
// Encrypt |iv| using the AES key |tweak_key| to get the first tweak. Assumes
-// that the CPU supports AES-NI and AVX, but not necessarily VAES or AVX10.
+// that the CPU supports AES-NI and AVX, but not necessarily VAES or AVX512.
SYM_TYPED_FUNC_START(aes_xts_encrypt_iv)
.set TWEAK_KEY, %rdi
.set IV, %rsi
@@ -853,7 +878,7 @@ SYM_FUNC_END(aes_xts_encrypt_iv)
// multiple of 16, then this function updates |tweak| to contain the next tweak.
.set VL, 16
-.set USE_AVX10, 0
+.set USE_AVX512, 0
SYM_TYPED_FUNC_START(aes_xts_encrypt_aesni_avx)
_aes_xts_crypt 1
SYM_FUNC_END(aes_xts_encrypt_aesni_avx)
@@ -863,7 +888,7 @@ SYM_FUNC_END(aes_xts_decrypt_aesni_avx)
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
.set VL, 32
-.set USE_AVX10, 0
+.set USE_AVX512, 0
SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx2)
_aes_xts_crypt 1
SYM_FUNC_END(aes_xts_encrypt_vaes_avx2)
@@ -871,21 +896,12 @@ SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx2)
_aes_xts_crypt 0
SYM_FUNC_END(aes_xts_decrypt_vaes_avx2)
-.set VL, 32
-.set USE_AVX10, 1
-SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx10_256)
- _aes_xts_crypt 1
-SYM_FUNC_END(aes_xts_encrypt_vaes_avx10_256)
-SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx10_256)
- _aes_xts_crypt 0
-SYM_FUNC_END(aes_xts_decrypt_vaes_avx10_256)
-
.set VL, 64
-.set USE_AVX10, 1
-SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx10_512)
+.set USE_AVX512, 1
+SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx512)
_aes_xts_crypt 1
-SYM_FUNC_END(aes_xts_encrypt_vaes_avx10_512)
-SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx10_512)
+SYM_FUNC_END(aes_xts_encrypt_vaes_avx512)
+SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx512)
_aes_xts_crypt 0
-SYM_FUNC_END(aes_xts_decrypt_vaes_avx10_512)
+SYM_FUNC_END(aes_xts_decrypt_vaes_avx512)
#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index bc655d794a95..061b1ced93c5 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -566,10 +566,9 @@ static struct crypto_alg aesni_cipher_alg = {
static struct skcipher_alg aesni_skciphers[] = {
{
.base = {
- .cra_name = "__ecb(aes)",
- .cra_driver_name = "__ecb-aes-aesni",
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-aesni",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
@@ -581,10 +580,9 @@ static struct skcipher_alg aesni_skciphers[] = {
.decrypt = ecb_decrypt,
}, {
.base = {
- .cra_name = "__cbc(aes)",
- .cra_driver_name = "__cbc-aes-aesni",
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-aesni",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
@@ -597,10 +595,9 @@ static struct skcipher_alg aesni_skciphers[] = {
.decrypt = cbc_decrypt,
}, {
.base = {
- .cra_name = "__cts(cbc(aes))",
- .cra_driver_name = "__cts-cbc-aes-aesni",
+ .cra_name = "cts(cbc(aes))",
+ .cra_driver_name = "cts-cbc-aes-aesni",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
@@ -615,10 +612,9 @@ static struct skcipher_alg aesni_skciphers[] = {
#ifdef CONFIG_X86_64
}, {
.base = {
- .cra_name = "__ctr(aes)",
- .cra_driver_name = "__ctr-aes-aesni",
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-aesni",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
@@ -633,10 +629,9 @@ static struct skcipher_alg aesni_skciphers[] = {
#endif
}, {
.base = {
- .cra_name = "__xts(aes)",
- .cra_driver_name = "__xts-aes-aesni",
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-aesni",
.cra_priority = 401,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = XTS_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
@@ -651,9 +646,6 @@ static struct skcipher_alg aesni_skciphers[] = {
}
};
-static
-struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
-
#ifdef CONFIG_X86_64
asmlinkage void aes_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
u8 iv[AES_BLOCK_SIZE]);
@@ -792,10 +784,9 @@ static int xctr_crypt_##suffix(struct skcipher_request *req) \
} \
\
static struct skcipher_alg skcipher_algs_##suffix[] = {{ \
- .base.cra_name = "__xts(aes)", \
- .base.cra_driver_name = "__xts-aes-" driver_name_suffix, \
+ .base.cra_name = "xts(aes)", \
+ .base.cra_driver_name = "xts-aes-" driver_name_suffix, \
.base.cra_priority = priority, \
- .base.cra_flags = CRYPTO_ALG_INTERNAL, \
.base.cra_blocksize = AES_BLOCK_SIZE, \
.base.cra_ctxsize = XTS_AES_CTX_SIZE, \
.base.cra_module = THIS_MODULE, \
@@ -807,10 +798,9 @@ static struct skcipher_alg skcipher_algs_##suffix[] = {{ \
.encrypt = xts_encrypt_##suffix, \
.decrypt = xts_decrypt_##suffix, \
}, { \
- .base.cra_name = "__ctr(aes)", \
- .base.cra_driver_name = "__ctr-aes-" driver_name_suffix, \
+ .base.cra_name = "ctr(aes)", \
+ .base.cra_driver_name = "ctr-aes-" driver_name_suffix, \
.base.cra_priority = priority, \
- .base.cra_flags = CRYPTO_ALG_INTERNAL, \
.base.cra_blocksize = 1, \
.base.cra_ctxsize = CRYPTO_AES_CTX_SIZE, \
.base.cra_module = THIS_MODULE, \
@@ -822,10 +812,9 @@ static struct skcipher_alg skcipher_algs_##suffix[] = {{ \
.encrypt = ctr_crypt_##suffix, \
.decrypt = ctr_crypt_##suffix, \
}, { \
- .base.cra_name = "__xctr(aes)", \
- .base.cra_driver_name = "__xctr-aes-" driver_name_suffix, \
+ .base.cra_name = "xctr(aes)", \
+ .base.cra_driver_name = "xctr-aes-" driver_name_suffix, \
.base.cra_priority = priority, \
- .base.cra_flags = CRYPTO_ALG_INTERNAL, \
.base.cra_blocksize = 1, \
.base.cra_ctxsize = CRYPTO_AES_CTX_SIZE, \
.base.cra_module = THIS_MODULE, \
@@ -836,16 +825,12 @@ static struct skcipher_alg skcipher_algs_##suffix[] = {{ \
.setkey = aesni_skcipher_setkey, \
.encrypt = xctr_crypt_##suffix, \
.decrypt = xctr_crypt_##suffix, \
-}}; \
- \
-static struct simd_skcipher_alg * \
-simd_skcipher_algs_##suffix[ARRAY_SIZE(skcipher_algs_##suffix)]
+}}
DEFINE_AVX_SKCIPHER_ALGS(aesni_avx, "aesni-avx", 500);
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
DEFINE_AVX_SKCIPHER_ALGS(vaes_avx2, "vaes-avx2", 600);
-DEFINE_AVX_SKCIPHER_ALGS(vaes_avx10_256, "vaes-avx10_256", 700);
-DEFINE_AVX_SKCIPHER_ALGS(vaes_avx10_512, "vaes-avx10_512", 800);
+DEFINE_AVX_SKCIPHER_ALGS(vaes_avx512, "vaes-avx512", 800);
#endif
/* The common part of the x86_64 AES-GCM key struct */
@@ -1499,10 +1484,9 @@ static struct aead_alg aes_gcm_algs_##suffix[] = { { \
.chunksize = AES_BLOCK_SIZE, \
.maxauthsize = 16, \
.base = { \
- .cra_name = "__gcm(aes)", \
- .cra_driver_name = "__" generic_driver_name, \
+ .cra_name = "gcm(aes)", \
+ .cra_driver_name = generic_driver_name, \
.cra_priority = (priority), \
- .cra_flags = CRYPTO_ALG_INTERNAL, \
.cra_blocksize = 1, \
.cra_ctxsize = (ctxsize), \
.cra_module = THIS_MODULE, \
@@ -1516,17 +1500,14 @@ static struct aead_alg aes_gcm_algs_##suffix[] = { { \
.chunksize = AES_BLOCK_SIZE, \
.maxauthsize = 16, \
.base = { \
- .cra_name = "__rfc4106(gcm(aes))", \
- .cra_driver_name = "__" rfc_driver_name, \
+ .cra_name = "rfc4106(gcm(aes))", \
+ .cra_driver_name = rfc_driver_name, \
.cra_priority = (priority), \
- .cra_flags = CRYPTO_ALG_INTERNAL, \
.cra_blocksize = 1, \
.cra_ctxsize = (ctxsize), \
.cra_module = THIS_MODULE, \
}, \
-} }; \
- \
-static struct simd_aead_alg *aes_gcm_simdalgs_##suffix[2] \
+} }
/* aes_gcm_algs_aesni */
DEFINE_GCM_ALGS(aesni, /* no flags */ 0,
@@ -1556,14 +1537,12 @@ static int __init register_avx_algs(void)
if (!boot_cpu_has(X86_FEATURE_AVX))
return 0;
- err = simd_register_skciphers_compat(skcipher_algs_aesni_avx,
- ARRAY_SIZE(skcipher_algs_aesni_avx),
- simd_skcipher_algs_aesni_avx);
+ err = crypto_register_skciphers(skcipher_algs_aesni_avx,
+ ARRAY_SIZE(skcipher_algs_aesni_avx));
if (err)
return err;
- err = simd_register_aeads_compat(aes_gcm_algs_aesni_avx,
- ARRAY_SIZE(aes_gcm_algs_aesni_avx),
- aes_gcm_simdalgs_aesni_avx);
+ err = crypto_register_aeads(aes_gcm_algs_aesni_avx,
+ ARRAY_SIZE(aes_gcm_algs_aesni_avx));
if (err)
return err;
/*
@@ -1579,9 +1558,8 @@ static int __init register_avx_algs(void)
!boot_cpu_has(X86_FEATURE_PCLMULQDQ) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
return 0;
- err = simd_register_skciphers_compat(skcipher_algs_vaes_avx2,
- ARRAY_SIZE(skcipher_algs_vaes_avx2),
- simd_skcipher_algs_vaes_avx2);
+ err = crypto_register_skciphers(skcipher_algs_vaes_avx2,
+ ARRAY_SIZE(skcipher_algs_vaes_avx2));
if (err)
return err;
@@ -1592,76 +1570,52 @@ static int __init register_avx_algs(void)
XFEATURE_MASK_AVX512, NULL))
return 0;
- err = simd_register_skciphers_compat(skcipher_algs_vaes_avx10_256,
- ARRAY_SIZE(skcipher_algs_vaes_avx10_256),
- simd_skcipher_algs_vaes_avx10_256);
- if (err)
- return err;
- err = simd_register_aeads_compat(aes_gcm_algs_vaes_avx10_256,
- ARRAY_SIZE(aes_gcm_algs_vaes_avx10_256),
- aes_gcm_simdalgs_vaes_avx10_256);
+ err = crypto_register_aeads(aes_gcm_algs_vaes_avx10_256,
+ ARRAY_SIZE(aes_gcm_algs_vaes_avx10_256));
if (err)
return err;
if (boot_cpu_has(X86_FEATURE_PREFER_YMM)) {
int i;
- for (i = 0; i < ARRAY_SIZE(skcipher_algs_vaes_avx10_512); i++)
- skcipher_algs_vaes_avx10_512[i].base.cra_priority = 1;
+ for (i = 0; i < ARRAY_SIZE(skcipher_algs_vaes_avx512); i++)
+ skcipher_algs_vaes_avx512[i].base.cra_priority = 1;
for (i = 0; i < ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512); i++)
aes_gcm_algs_vaes_avx10_512[i].base.cra_priority = 1;
}
- err = simd_register_skciphers_compat(skcipher_algs_vaes_avx10_512,
- ARRAY_SIZE(skcipher_algs_vaes_avx10_512),
- simd_skcipher_algs_vaes_avx10_512);
+ err = crypto_register_skciphers(skcipher_algs_vaes_avx512,
+ ARRAY_SIZE(skcipher_algs_vaes_avx512));
if (err)
return err;
- err = simd_register_aeads_compat(aes_gcm_algs_vaes_avx10_512,
- ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512),
- aes_gcm_simdalgs_vaes_avx10_512);
+ err = crypto_register_aeads(aes_gcm_algs_vaes_avx10_512,
+ ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512));
if (err)
return err;
#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */
return 0;
}
+#define unregister_skciphers(A) \
+ if (refcount_read(&(A)[0].base.cra_refcnt) != 0) \
+ crypto_unregister_skciphers((A), ARRAY_SIZE(A))
+#define unregister_aeads(A) \
+ if (refcount_read(&(A)[0].base.cra_refcnt) != 0) \
+ crypto_unregister_aeads((A), ARRAY_SIZE(A))
+
static void unregister_avx_algs(void)
{
- if (simd_skcipher_algs_aesni_avx[0])
- simd_unregister_skciphers(skcipher_algs_aesni_avx,
- ARRAY_SIZE(skcipher_algs_aesni_avx),
- simd_skcipher_algs_aesni_avx);
- if (aes_gcm_simdalgs_aesni_avx[0])
- simd_unregister_aeads(aes_gcm_algs_aesni_avx,
- ARRAY_SIZE(aes_gcm_algs_aesni_avx),
- aes_gcm_simdalgs_aesni_avx);
+ unregister_skciphers(skcipher_algs_aesni_avx);
+ unregister_aeads(aes_gcm_algs_aesni_avx);
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
- if (simd_skcipher_algs_vaes_avx2[0])
- simd_unregister_skciphers(skcipher_algs_vaes_avx2,
- ARRAY_SIZE(skcipher_algs_vaes_avx2),
- simd_skcipher_algs_vaes_avx2);
- if (simd_skcipher_algs_vaes_avx10_256[0])
- simd_unregister_skciphers(skcipher_algs_vaes_avx10_256,
- ARRAY_SIZE(skcipher_algs_vaes_avx10_256),
- simd_skcipher_algs_vaes_avx10_256);
- if (aes_gcm_simdalgs_vaes_avx10_256[0])
- simd_unregister_aeads(aes_gcm_algs_vaes_avx10_256,
- ARRAY_SIZE(aes_gcm_algs_vaes_avx10_256),
- aes_gcm_simdalgs_vaes_avx10_256);
- if (simd_skcipher_algs_vaes_avx10_512[0])
- simd_unregister_skciphers(skcipher_algs_vaes_avx10_512,
- ARRAY_SIZE(skcipher_algs_vaes_avx10_512),
- simd_skcipher_algs_vaes_avx10_512);
- if (aes_gcm_simdalgs_vaes_avx10_512[0])
- simd_unregister_aeads(aes_gcm_algs_vaes_avx10_512,
- ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512),
- aes_gcm_simdalgs_vaes_avx10_512);
+ unregister_skciphers(skcipher_algs_vaes_avx2);
+ unregister_skciphers(skcipher_algs_vaes_avx512);
+ unregister_aeads(aes_gcm_algs_vaes_avx10_256);
+ unregister_aeads(aes_gcm_algs_vaes_avx10_512);
#endif
}
#else /* CONFIG_X86_64 */
static struct aead_alg aes_gcm_algs_aesni[0];
-static struct simd_aead_alg *aes_gcm_simdalgs_aesni[0];
static int __init register_avx_algs(void)
{
@@ -1690,15 +1644,13 @@ static int __init aesni_init(void)
if (err)
return err;
- err = simd_register_skciphers_compat(aesni_skciphers,
- ARRAY_SIZE(aesni_skciphers),
- aesni_simd_skciphers);
+ err = crypto_register_skciphers(aesni_skciphers,
+ ARRAY_SIZE(aesni_skciphers));
if (err)
goto unregister_cipher;
- err = simd_register_aeads_compat(aes_gcm_algs_aesni,
- ARRAY_SIZE(aes_gcm_algs_aesni),
- aes_gcm_simdalgs_aesni);
+ err = crypto_register_aeads(aes_gcm_algs_aesni,
+ ARRAY_SIZE(aes_gcm_algs_aesni));
if (err)
goto unregister_skciphers;
@@ -1710,12 +1662,11 @@ static int __init aesni_init(void)
unregister_avx:
unregister_avx_algs();
- simd_unregister_aeads(aes_gcm_algs_aesni,
- ARRAY_SIZE(aes_gcm_algs_aesni),
- aes_gcm_simdalgs_aesni);
+ crypto_unregister_aeads(aes_gcm_algs_aesni,
+ ARRAY_SIZE(aes_gcm_algs_aesni));
unregister_skciphers:
- simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
- aesni_simd_skciphers);
+ crypto_unregister_skciphers(aesni_skciphers,
+ ARRAY_SIZE(aesni_skciphers));
unregister_cipher:
crypto_unregister_alg(&aesni_cipher_alg);
return err;
@@ -1723,11 +1674,10 @@ unregister_cipher:
static void __exit aesni_exit(void)
{
- simd_unregister_aeads(aes_gcm_algs_aesni,
- ARRAY_SIZE(aes_gcm_algs_aesni),
- aes_gcm_simdalgs_aesni);
- simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
- aesni_simd_skciphers);
+ crypto_unregister_aeads(aes_gcm_algs_aesni,
+ ARRAY_SIZE(aes_gcm_algs_aesni));
+ crypto_unregister_skciphers(aesni_skciphers,
+ ARRAY_SIZE(aesni_skciphers));
crypto_unregister_alg(&aesni_cipher_alg);
unregister_avx_algs();
}
diff --git a/arch/x86/crypto/aria_aesni_avx2_glue.c b/arch/x86/crypto/aria_aesni_avx2_glue.c
index 87a11804fc77..b4bddcd58457 100644
--- a/arch/x86/crypto/aria_aesni_avx2_glue.c
+++ b/arch/x86/crypto/aria_aesni_avx2_glue.c
@@ -6,7 +6,6 @@
*/
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <crypto/aria.h>
#include <linux/crypto.h>
#include <linux/err.h>
@@ -165,10 +164,9 @@ static int aria_avx2_init_tfm(struct crypto_skcipher *tfm)
static struct skcipher_alg aria_algs[] = {
{
- .base.cra_name = "__ecb(aria)",
- .base.cra_driver_name = "__ecb-aria-avx2",
+ .base.cra_name = "ecb(aria)",
+ .base.cra_driver_name = "ecb-aria-avx2",
.base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = ARIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
@@ -178,11 +176,10 @@ static struct skcipher_alg aria_algs[] = {
.encrypt = aria_avx2_ecb_encrypt,
.decrypt = aria_avx2_ecb_decrypt,
}, {
- .base.cra_name = "__ctr(aria)",
- .base.cra_driver_name = "__ctr-aria-avx2",
+ .base.cra_name = "ctr(aria)",
+ .base.cra_driver_name = "ctr-aria-avx2",
.base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL |
- CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE,
+ .base.cra_flags = CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
@@ -197,8 +194,6 @@ static struct skcipher_alg aria_algs[] = {
}
};
-static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)];
-
static int __init aria_avx2_init(void)
{
const char *feature_name;
@@ -233,15 +228,12 @@ static int __init aria_avx2_init(void)
aria_ops.aria_ctr_crypt_32way = aria_aesni_avx2_ctr_crypt_32way;
}
- return simd_register_skciphers_compat(aria_algs,
- ARRAY_SIZE(aria_algs),
- aria_simd_algs);
+ return crypto_register_skciphers(aria_algs, ARRAY_SIZE(aria_algs));
}
static void __exit aria_avx2_exit(void)
{
- simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs),
- aria_simd_algs);
+ crypto_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs));
}
module_init(aria_avx2_init);
diff --git a/arch/x86/crypto/aria_aesni_avx_glue.c b/arch/x86/crypto/aria_aesni_avx_glue.c
index 4e1516b76669..ab9b38d05332 100644
--- a/arch/x86/crypto/aria_aesni_avx_glue.c
+++ b/arch/x86/crypto/aria_aesni_avx_glue.c
@@ -6,7 +6,6 @@
*/
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <crypto/aria.h>
#include <linux/crypto.h>
#include <linux/err.h>
@@ -152,10 +151,9 @@ static int aria_avx_init_tfm(struct crypto_skcipher *tfm)
static struct skcipher_alg aria_algs[] = {
{
- .base.cra_name = "__ecb(aria)",
- .base.cra_driver_name = "__ecb-aria-avx",
+ .base.cra_name = "ecb(aria)",
+ .base.cra_driver_name = "ecb-aria-avx",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = ARIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
@@ -165,10 +163,9 @@ static struct skcipher_alg aria_algs[] = {
.encrypt = aria_avx_ecb_encrypt,
.decrypt = aria_avx_ecb_decrypt,
}, {
- .base.cra_name = "__ctr(aria)",
- .base.cra_driver_name = "__ctr-aria-avx",
+ .base.cra_name = "ctr(aria)",
+ .base.cra_driver_name = "ctr-aria-avx",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
@@ -184,8 +181,6 @@ static struct skcipher_alg aria_algs[] = {
}
};
-static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)];
-
static int __init aria_avx_init(void)
{
const char *feature_name;
@@ -213,15 +208,12 @@ static int __init aria_avx_init(void)
aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_ctr_crypt_16way;
}
- return simd_register_skciphers_compat(aria_algs,
- ARRAY_SIZE(aria_algs),
- aria_simd_algs);
+ return crypto_register_skciphers(aria_algs, ARRAY_SIZE(aria_algs));
}
static void __exit aria_avx_exit(void)
{
- simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs),
- aria_simd_algs);
+ crypto_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs));
}
module_init(aria_avx_init);
diff --git a/arch/x86/crypto/aria_gfni_avx512_glue.c b/arch/x86/crypto/aria_gfni_avx512_glue.c
index f4a2208d2638..363cbf4399cc 100644
--- a/arch/x86/crypto/aria_gfni_avx512_glue.c
+++ b/arch/x86/crypto/aria_gfni_avx512_glue.c
@@ -6,7 +6,6 @@
*/
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <crypto/aria.h>
#include <linux/crypto.h>
#include <linux/err.h>
@@ -165,10 +164,9 @@ static int aria_avx512_init_tfm(struct crypto_skcipher *tfm)
static struct skcipher_alg aria_algs[] = {
{
- .base.cra_name = "__ecb(aria)",
- .base.cra_driver_name = "__ecb-aria-avx512",
+ .base.cra_name = "ecb(aria)",
+ .base.cra_driver_name = "ecb-aria-avx512",
.base.cra_priority = 600,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = ARIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
@@ -178,11 +176,10 @@ static struct skcipher_alg aria_algs[] = {
.encrypt = aria_avx512_ecb_encrypt,
.decrypt = aria_avx512_ecb_decrypt,
}, {
- .base.cra_name = "__ctr(aria)",
- .base.cra_driver_name = "__ctr-aria-avx512",
+ .base.cra_name = "ctr(aria)",
+ .base.cra_driver_name = "ctr-aria-avx512",
.base.cra_priority = 600,
- .base.cra_flags = CRYPTO_ALG_INTERNAL |
- CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE,
+ .base.cra_flags = CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
@@ -197,8 +194,6 @@ static struct skcipher_alg aria_algs[] = {
}
};
-static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)];
-
static int __init aria_avx512_init(void)
{
const char *feature_name;
@@ -229,15 +224,12 @@ static int __init aria_avx512_init(void)
aria_ops.aria_decrypt_64way = aria_gfni_avx512_decrypt_64way;
aria_ops.aria_ctr_crypt_64way = aria_gfni_avx512_ctr_crypt_64way;
- return simd_register_skciphers_compat(aria_algs,
- ARRAY_SIZE(aria_algs),
- aria_simd_algs);
+ return crypto_register_skciphers(aria_algs, ARRAY_SIZE(aria_algs));
}
static void __exit aria_avx512_exit(void)
{
- simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs),
- aria_simd_algs);
+ crypto_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs));
}
module_init(aria_avx512_init);
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index e7e4d64e9577..2d2f4e16537c 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -6,7 +6,6 @@
*/
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -69,10 +68,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg camellia_algs[] = {
{
- .base.cra_name = "__ecb(camellia)",
- .base.cra_driver_name = "__ecb-camellia-aesni-avx2",
+ .base.cra_name = "ecb(camellia)",
+ .base.cra_driver_name = "ecb-camellia-aesni-avx2",
.base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
@@ -82,10 +80,9 @@ static struct skcipher_alg camellia_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(camellia)",
- .base.cra_driver_name = "__cbc-camellia-aesni-avx2",
+ .base.cra_name = "cbc(camellia)",
+ .base.cra_driver_name = "cbc-camellia-aesni-avx2",
.base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
@@ -98,8 +95,6 @@ static struct skcipher_alg camellia_algs[] = {
},
};
-static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
-
static int __init camellia_aesni_init(void)
{
const char *feature_name;
@@ -118,15 +113,13 @@ static int __init camellia_aesni_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(camellia_algs,
- ARRAY_SIZE(camellia_algs),
- camellia_simd_algs);
+ return crypto_register_skciphers(camellia_algs,
+ ARRAY_SIZE(camellia_algs));
}
static void __exit camellia_aesni_fini(void)
{
- simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
- camellia_simd_algs);
+ crypto_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs));
}
module_init(camellia_aesni_init);
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index c7ccf63e741e..a7d162388142 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -6,7 +6,6 @@
*/
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -69,10 +68,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg camellia_algs[] = {
{
- .base.cra_name = "__ecb(camellia)",
- .base.cra_driver_name = "__ecb-camellia-aesni",
+ .base.cra_name = "ecb(camellia)",
+ .base.cra_driver_name = "ecb-camellia-aesni",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
@@ -82,10 +80,9 @@ static struct skcipher_alg camellia_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(camellia)",
- .base.cra_driver_name = "__cbc-camellia-aesni",
+ .base.cra_name = "cbc(camellia)",
+ .base.cra_driver_name = "cbc-camellia-aesni",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
@@ -98,8 +95,6 @@ static struct skcipher_alg camellia_algs[] = {
}
};
-static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
-
static int __init camellia_aesni_init(void)
{
const char *feature_name;
@@ -117,15 +112,13 @@ static int __init camellia_aesni_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(camellia_algs,
- ARRAY_SIZE(camellia_algs),
- camellia_simd_algs);
+ return crypto_register_skciphers(camellia_algs,
+ ARRAY_SIZE(camellia_algs));
}
static void __exit camellia_aesni_fini(void)
{
- simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
- camellia_simd_algs);
+ crypto_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs));
}
module_init(camellia_aesni_init);
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index 3976a87f92ad..3aca04d43b34 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -8,7 +8,6 @@
#include <crypto/algapi.h>
#include <crypto/cast5.h>
-#include <crypto/internal/simd.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -64,10 +63,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg cast5_algs[] = {
{
- .base.cra_name = "__ecb(cast5)",
- .base.cra_driver_name = "__ecb-cast5-avx",
+ .base.cra_name = "ecb(cast5)",
+ .base.cra_driver_name = "ecb-cast5-avx",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAST5_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cast5_ctx),
.base.cra_module = THIS_MODULE,
@@ -77,10 +75,9 @@ static struct skcipher_alg cast5_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(cast5)",
- .base.cra_driver_name = "__cbc-cast5-avx",
+ .base.cra_name = "cbc(cast5)",
+ .base.cra_driver_name = "cbc-cast5-avx",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAST5_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cast5_ctx),
.base.cra_module = THIS_MODULE,
@@ -93,8 +90,6 @@ static struct skcipher_alg cast5_algs[] = {
}
};
-static struct simd_skcipher_alg *cast5_simd_algs[ARRAY_SIZE(cast5_algs)];
-
static int __init cast5_init(void)
{
const char *feature_name;
@@ -105,15 +100,13 @@ static int __init cast5_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(cast5_algs,
- ARRAY_SIZE(cast5_algs),
- cast5_simd_algs);
+ return crypto_register_skciphers(cast5_algs,
+ ARRAY_SIZE(cast5_algs));
}
static void __exit cast5_exit(void)
{
- simd_unregister_skciphers(cast5_algs, ARRAY_SIZE(cast5_algs),
- cast5_simd_algs);
+ crypto_unregister_skciphers(cast5_algs, ARRAY_SIZE(cast5_algs));
}
module_init(cast5_init);
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 7e2aea372349..c4dd28c30303 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -14,7 +14,6 @@
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/cast6.h>
-#include <crypto/internal/simd.h>
#include "ecb_cbc_helpers.h"
@@ -64,10 +63,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg cast6_algs[] = {
{
- .base.cra_name = "__ecb(cast6)",
- .base.cra_driver_name = "__ecb-cast6-avx",
+ .base.cra_name = "ecb(cast6)",
+ .base.cra_driver_name = "ecb-cast6-avx",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAST6_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cast6_ctx),
.base.cra_module = THIS_MODULE,
@@ -77,10 +75,9 @@ static struct skcipher_alg cast6_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(cast6)",
- .base.cra_driver_name = "__cbc-cast6-avx",
+ .base.cra_name = "cbc(cast6)",
+ .base.cra_driver_name = "cbc-cast6-avx",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAST6_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cast6_ctx),
.base.cra_module = THIS_MODULE,
@@ -93,8 +90,6 @@ static struct skcipher_alg cast6_algs[] = {
},
};
-static struct simd_skcipher_alg *cast6_simd_algs[ARRAY_SIZE(cast6_algs)];
-
static int __init cast6_init(void)
{
const char *feature_name;
@@ -105,15 +100,12 @@ static int __init cast6_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(cast6_algs,
- ARRAY_SIZE(cast6_algs),
- cast6_simd_algs);
+ return crypto_register_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs));
}
static void __exit cast6_exit(void)
{
- simd_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs),
- cast6_simd_algs);
+ crypto_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs));
}
module_init(cast6_init);
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
deleted file mode 100644
index 8bb74a272879..000000000000
--- a/arch/x86/crypto/chacha_glue.c
+++ /dev/null
@@ -1,311 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * x64 SIMD accelerated ChaCha and XChaCha stream ciphers,
- * including ChaCha20 (RFC7539)
- *
- * Copyright (C) 2015 Martin Willi
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sizes.h>
-#include <asm/simd.h>
-
-asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
- unsigned int len, int nrounds);
-asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
- unsigned int len, int nrounds);
-asmlinkage void hchacha_block_ssse3(const u32 *state, u32 *out, int nrounds);
-
-asmlinkage void chacha_2block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
- unsigned int len, int nrounds);
-asmlinkage void chacha_4block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
- unsigned int len, int nrounds);
-asmlinkage void chacha_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
- unsigned int len, int nrounds);
-
-asmlinkage void chacha_2block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
- unsigned int len, int nrounds);
-asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
- unsigned int len, int nrounds);
-asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
- unsigned int len, int nrounds);
-
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_simd);
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx2);
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx512vl);
-
-static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks)
-{
- len = min(len, maxblocks * CHACHA_BLOCK_SIZE);
- return round_up(len, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE;
-}
-
-static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds)
-{
- if (IS_ENABLED(CONFIG_AS_AVX512) &&
- static_branch_likely(&chacha_use_avx512vl)) {
- while (bytes >= CHACHA_BLOCK_SIZE * 8) {
- chacha_8block_xor_avx512vl(state, dst, src, bytes,
- nrounds);
- bytes -= CHACHA_BLOCK_SIZE * 8;
- src += CHACHA_BLOCK_SIZE * 8;
- dst += CHACHA_BLOCK_SIZE * 8;
- state[12] += 8;
- }
- if (bytes > CHACHA_BLOCK_SIZE * 4) {
- chacha_8block_xor_avx512vl(state, dst, src, bytes,
- nrounds);
- state[12] += chacha_advance(bytes, 8);
- return;
- }
- if (bytes > CHACHA_BLOCK_SIZE * 2) {
- chacha_4block_xor_avx512vl(state, dst, src, bytes,
- nrounds);
- state[12] += chacha_advance(bytes, 4);
- return;
- }
- if (bytes) {
- chacha_2block_xor_avx512vl(state, dst, src, bytes,
- nrounds);
- state[12] += chacha_advance(bytes, 2);
- return;
- }
- }
-
- if (static_branch_likely(&chacha_use_avx2)) {
- while (bytes >= CHACHA_BLOCK_SIZE * 8) {
- chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
- bytes -= CHACHA_BLOCK_SIZE * 8;
- src += CHACHA_BLOCK_SIZE * 8;
- dst += CHACHA_BLOCK_SIZE * 8;
- state[12] += 8;
- }
- if (bytes > CHACHA_BLOCK_SIZE * 4) {
- chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
- state[12] += chacha_advance(bytes, 8);
- return;
- }
- if (bytes > CHACHA_BLOCK_SIZE * 2) {
- chacha_4block_xor_avx2(state, dst, src, bytes, nrounds);
- state[12] += chacha_advance(bytes, 4);
- return;
- }
- if (bytes > CHACHA_BLOCK_SIZE) {
- chacha_2block_xor_avx2(state, dst, src, bytes, nrounds);
- state[12] += chacha_advance(bytes, 2);
- return;
- }
- }
-
- while (bytes >= CHACHA_BLOCK_SIZE * 4) {
- chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
- bytes -= CHACHA_BLOCK_SIZE * 4;
- src += CHACHA_BLOCK_SIZE * 4;
- dst += CHACHA_BLOCK_SIZE * 4;
- state[12] += 4;
- }
- if (bytes > CHACHA_BLOCK_SIZE) {
- chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
- state[12] += chacha_advance(bytes, 4);
- return;
- }
- if (bytes) {
- chacha_block_xor_ssse3(state, dst, src, bytes, nrounds);
- state[12]++;
- }
-}
-
-void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
-{
- if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) {
- hchacha_block_generic(state, stream, nrounds);
- } else {
- kernel_fpu_begin();
- hchacha_block_ssse3(state, stream, nrounds);
- kernel_fpu_end();
- }
-}
-EXPORT_SYMBOL(hchacha_block_arch);
-
-void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
- int nrounds)
-{
- if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() ||
- bytes <= CHACHA_BLOCK_SIZE)
- return chacha_crypt_generic(state, dst, src, bytes, nrounds);
-
- do {
- unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
-
- kernel_fpu_begin();
- chacha_dosimd(state, dst, src, todo, nrounds);
- kernel_fpu_end();
-
- bytes -= todo;
- src += todo;
- dst += todo;
- } while (bytes);
-}
-EXPORT_SYMBOL(chacha_crypt_arch);
-
-static int chacha_simd_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
-{
- u32 state[CHACHA_STATE_WORDS] __aligned(8);
- struct skcipher_walk walk;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- if (!static_branch_likely(&chacha_use_simd) ||
- !crypto_simd_usable()) {
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes,
- ctx->nrounds);
- } else {
- kernel_fpu_begin();
- chacha_dosimd(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes,
- ctx->nrounds);
- kernel_fpu_end();
- }
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int chacha_simd(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_simd_stream_xor(req, ctx, req->iv);
-}
-
-static int xchacha_simd(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 state[CHACHA_STATE_WORDS] __aligned(8);
- struct chacha_ctx subctx;
- u8 real_iv[16];
-
- chacha_init(state, ctx->key, req->iv);
-
- if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
- kernel_fpu_begin();
- hchacha_block_ssse3(state, subctx.key, ctx->nrounds);
- kernel_fpu_end();
- } else {
- hchacha_block_generic(state, subctx.key, ctx->nrounds);
- }
- subctx.nrounds = ctx->nrounds;
-
- memcpy(&real_iv[0], req->iv + 24, 8);
- memcpy(&real_iv[8], req->iv + 16, 8);
- return chacha_simd_stream_xor(req, &subctx, real_iv);
-}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-simd",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_simd,
- .decrypt = chacha_simd,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-simd",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_simd,
- .decrypt = xchacha_simd,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-simd",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_simd,
- .decrypt = xchacha_simd,
- },
-};
-
-static int __init chacha_simd_mod_init(void)
-{
- if (!boot_cpu_has(X86_FEATURE_SSSE3))
- return 0;
-
- static_branch_enable(&chacha_use_simd);
-
- if (boot_cpu_has(X86_FEATURE_AVX) &&
- boot_cpu_has(X86_FEATURE_AVX2) &&
- cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
- static_branch_enable(&chacha_use_avx2);
-
- if (IS_ENABLED(CONFIG_AS_AVX512) &&
- boot_cpu_has(X86_FEATURE_AVX512VL) &&
- boot_cpu_has(X86_FEATURE_AVX512BW)) /* kmovq */
- static_branch_enable(&chacha_use_avx512vl);
- }
- return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
- crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0;
-}
-
-static void __exit chacha_simd_mod_fini(void)
-{
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) && boot_cpu_has(X86_FEATURE_SSSE3))
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-module_init(chacha_simd_mod_init);
-module_exit(chacha_simd_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (x64 SIMD accelerated)");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-simd");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-simd");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-simd");
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
index 99cb983ded9e..c4fbaa82ed7a 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -103,8 +103,8 @@ SYM_FUNC_START(clmul_ghash_mul)
SYM_FUNC_END(clmul_ghash_mul)
/*
- * void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
- * const le128 *shash);
+ * int clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
+ * const le128 *shash);
*/
SYM_FUNC_START(clmul_ghash_update)
FRAME_BEGIN
@@ -127,6 +127,7 @@ SYM_FUNC_START(clmul_ghash_update)
pshufb BSWAP, DATA
movups DATA, (%rdi)
.Lupdate_just_ret:
+ mov %rdx, %rax
FRAME_END
RET
SYM_FUNC_END(clmul_ghash_update)
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index c759ec808bf1..aea5d4d06be7 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -7,41 +7,27 @@
* Author: Huang Ying <ying.huang@intel.com>
*/
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/cryptd.h>
-#include <crypto/gf128mul.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
+#include <crypto/b128ops.h>
+#include <crypto/ghash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/utils.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include <linux/unaligned.h>
-#define GHASH_BLOCK_SIZE 16
-#define GHASH_DIGEST_SIZE 16
+asmlinkage void clmul_ghash_mul(char *dst, const le128 *shash);
-void clmul_ghash_mul(char *dst, const le128 *shash);
+asmlinkage int clmul_ghash_update(char *dst, const char *src,
+ unsigned int srclen, const le128 *shash);
-void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
- const le128 *shash);
-
-struct ghash_async_ctx {
- struct cryptd_ahash *cryptd_tfm;
-};
-
-struct ghash_ctx {
+struct x86_ghash_ctx {
le128 shash;
};
-struct ghash_desc_ctx {
- u8 buffer[GHASH_BLOCK_SIZE];
- u32 bytes;
-};
-
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
@@ -54,7 +40,7 @@ static int ghash_init(struct shash_desc *desc)
static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
- struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+ struct x86_ghash_ctx *ctx = crypto_shash_ctx(tfm);
u64 a, b;
if (keylen != GHASH_BLOCK_SIZE)
@@ -95,64 +81,38 @@ static int ghash_setkey(struct crypto_shash *tfm,
static int ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
+ struct x86_ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
+ int remain;
kernel_fpu_begin();
- if (dctx->bytes) {
- int n = min(srclen, dctx->bytes);
- u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
-
- dctx->bytes -= n;
- srclen -= n;
-
- while (n--)
- *pos++ ^= *src++;
-
- if (!dctx->bytes)
- clmul_ghash_mul(dst, &ctx->shash);
- }
-
- clmul_ghash_update(dst, src, srclen, &ctx->shash);
+ remain = clmul_ghash_update(dst, src, srclen, &ctx->shash);
kernel_fpu_end();
-
- if (srclen & 0xf) {
- src += srclen - (srclen & 0xf);
- srclen &= 0xf;
- dctx->bytes = GHASH_BLOCK_SIZE - srclen;
- while (srclen--)
- *dst++ ^= *src++;
- }
-
- return 0;
+ return remain;
}
-static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+static void ghash_flush(struct x86_ghash_ctx *ctx, struct ghash_desc_ctx *dctx,
+ const u8 *src, unsigned int len)
{
u8 *dst = dctx->buffer;
- if (dctx->bytes) {
- u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
-
- while (dctx->bytes--)
- *tmp++ ^= 0;
-
- kernel_fpu_begin();
+ kernel_fpu_begin();
+ if (len) {
+ crypto_xor(dst, src, len);
clmul_ghash_mul(dst, &ctx->shash);
- kernel_fpu_end();
}
-
- dctx->bytes = 0;
+ kernel_fpu_end();
}
-static int ghash_final(struct shash_desc *desc, u8 *dst)
+static int ghash_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
{
+ struct x86_ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
- ghash_flush(ctx, dctx);
+ ghash_flush(ctx, dctx, src, len);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
return 0;
@@ -162,186 +122,20 @@ static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
- .final = ghash_final,
+ .finup = ghash_finup,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
- .cra_name = "__ghash",
- .cra_driver_name = "__ghash-pclmulqdqni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_name = "ghash",
+ .cra_driver_name = "ghash-pclmulqdqni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = GHASH_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ghash_ctx),
+ .cra_ctxsize = sizeof(struct x86_ghash_ctx),
.cra_module = THIS_MODULE,
},
};
-static int ghash_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *cryptd_req = ahash_request_ctx(req);
- struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
- struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
-
- desc->tfm = child;
- return crypto_shash_init(desc);
-}
-
-static void ghash_init_cryptd_req(struct ahash_request *req)
-{
- struct ahash_request *cryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
-
- ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
- ahash_request_set_callback(cryptd_req, req->base.flags,
- req->base.complete, req->base.data);
- ahash_request_set_crypt(cryptd_req, req->src, req->result,
- req->nbytes);
-}
-
-static int ghash_async_update(struct ahash_request *req)
-{
- struct ahash_request *cryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
-
- if (!crypto_simd_usable() ||
- (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
- ghash_init_cryptd_req(req);
- return crypto_ahash_update(cryptd_req);
- } else {
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
- return shash_ahash_update(req, desc);
- }
-}
-
-static int ghash_async_final(struct ahash_request *req)
-{
- struct ahash_request *cryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
-
- if (!crypto_simd_usable() ||
- (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
- ghash_init_cryptd_req(req);
- return crypto_ahash_final(cryptd_req);
- } else {
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
- return crypto_shash_final(desc, req->result);
- }
-}
-
-static int ghash_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *cryptd_req = ahash_request_ctx(req);
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
- struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-
- ghash_async_init(req);
- memcpy(dctx, in, sizeof(*dctx));
- return 0;
-
-}
-
-static int ghash_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *cryptd_req = ahash_request_ctx(req);
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
- struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-
- memcpy(out, dctx, sizeof(*dctx));
- return 0;
-
-}
-
-static int ghash_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *cryptd_req = ahash_request_ctx(req);
- struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
-
- if (!crypto_simd_usable() ||
- (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
- ghash_init_cryptd_req(req);
- return crypto_ahash_digest(cryptd_req);
- } else {
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
- struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
-
- desc->tfm = child;
- return shash_ahash_digest(req, desc);
- }
-}
-
-static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct crypto_ahash *child = &ctx->cryptd_tfm->base;
-
- crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
- & CRYPTO_TFM_REQ_MASK);
- return crypto_ahash_setkey(child, key, keylen);
-}
-
-static int ghash_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct cryptd_ahash *cryptd_tfm;
- struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
-
- cryptd_tfm = cryptd_alloc_ahash("__ghash-pclmulqdqni",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
- ctx->cryptd_tfm = cryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&cryptd_tfm->base));
-
- return 0;
-}
-
-static void ghash_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
-
- cryptd_free_ahash(ctx->cryptd_tfm);
-}
-
-static struct ahash_alg ghash_async_alg = {
- .init = ghash_async_init,
- .update = ghash_async_update,
- .final = ghash_async_final,
- .setkey = ghash_async_setkey,
- .digest = ghash_async_digest,
- .export = ghash_async_export,
- .import = ghash_async_import,
- .halg = {
- .digestsize = GHASH_DIGEST_SIZE,
- .statesize = sizeof(struct ghash_desc_ctx),
- .base = {
- .cra_name = "ghash",
- .cra_driver_name = "ghash-clmulni",
- .cra_priority = 400,
- .cra_ctxsize = sizeof(struct ghash_async_ctx),
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = GHASH_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_init = ghash_async_init_tfm,
- .cra_exit = ghash_async_exit_tfm,
- },
- },
-};
-
static const struct x86_cpu_id pcmul_cpu_id[] = {
X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL), /* Pickle-Mickle-Duck */
{}
@@ -350,29 +144,14 @@ MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id);
static int __init ghash_pclmulqdqni_mod_init(void)
{
- int err;
-
if (!x86_match_cpu(pcmul_cpu_id))
return -ENODEV;
- err = crypto_register_shash(&ghash_alg);
- if (err)
- goto err_out;
- err = crypto_register_ahash(&ghash_async_alg);
- if (err)
- goto err_shash;
-
- return 0;
-
-err_shash:
- crypto_unregister_shash(&ghash_alg);
-err_out:
- return err;
+ return crypto_register_shash(&ghash_alg);
}
static void __exit ghash_pclmulqdqni_mod_exit(void)
{
- crypto_unregister_ahash(&ghash_async_alg);
crypto_unregister_shash(&ghash_alg);
}
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
deleted file mode 100644
index 08ff4b489f7e..000000000000
--- a/arch/x86/crypto/poly1305_glue.c
+++ /dev/null
@@ -1,290 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/*
- * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/poly1305.h>
-#include <crypto/internal/simd.h>
-#include <linux/crypto.h>
-#include <linux/jump_label.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sizes.h>
-#include <asm/cpu_device_id.h>
-#include <asm/simd.h>
-
-asmlinkage void poly1305_init_x86_64(void *ctx,
- const u8 key[POLY1305_BLOCK_SIZE]);
-asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
- const size_t len, const u32 padbit);
-asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
- const u32 nonce[4]);
-asmlinkage void poly1305_emit_avx(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
- const u32 nonce[4]);
-asmlinkage void poly1305_blocks_avx(void *ctx, const u8 *inp, const size_t len,
- const u32 padbit);
-asmlinkage void poly1305_blocks_avx2(void *ctx, const u8 *inp, const size_t len,
- const u32 padbit);
-asmlinkage void poly1305_blocks_avx512(void *ctx, const u8 *inp,
- const size_t len, const u32 padbit);
-
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx);
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2);
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx512);
-
-struct poly1305_arch_internal {
- union {
- struct {
- u32 h[5];
- u32 is_base2_26;
- };
- u64 hs[3];
- };
- u64 r[2];
- u64 pad;
- struct { u32 r2, r1, r4, r3; } rn[9];
-};
-
-/* The AVX code uses base 2^26, while the scalar code uses base 2^64. If we hit
- * the unfortunate situation of using AVX and then having to go back to scalar
- * -- because the user is silly and has called the update function from two
- * separate contexts -- then we need to convert back to the original base before
- * proceeding. It is possible to reason that the initial reduction below is
- * sufficient given the implementation invariants. However, for an avoidance of
- * doubt and because this is not performance critical, we do the full reduction
- * anyway. Z3 proof of below function: https://xn--4db.cc/ltPtHCKN/py
- */
-static void convert_to_base2_64(void *ctx)
-{
- struct poly1305_arch_internal *state = ctx;
- u32 cy;
-
- if (!state->is_base2_26)
- return;
-
- cy = state->h[0] >> 26; state->h[0] &= 0x3ffffff; state->h[1] += cy;
- cy = state->h[1] >> 26; state->h[1] &= 0x3ffffff; state->h[2] += cy;
- cy = state->h[2] >> 26; state->h[2] &= 0x3ffffff; state->h[3] += cy;
- cy = state->h[3] >> 26; state->h[3] &= 0x3ffffff; state->h[4] += cy;
- state->hs[0] = ((u64)state->h[2] << 52) | ((u64)state->h[1] << 26) | state->h[0];
- state->hs[1] = ((u64)state->h[4] << 40) | ((u64)state->h[3] << 14) | (state->h[2] >> 12);
- state->hs[2] = state->h[4] >> 24;
-#define ULT(a, b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
- cy = (state->hs[2] >> 2) + (state->hs[2] & ~3ULL);
- state->hs[2] &= 3;
- state->hs[0] += cy;
- state->hs[1] += (cy = ULT(state->hs[0], cy));
- state->hs[2] += ULT(state->hs[1], cy);
-#undef ULT
- state->is_base2_26 = 0;
-}
-
-static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
-{
- poly1305_init_x86_64(ctx, key);
-}
-
-static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
- const u32 padbit)
-{
- struct poly1305_arch_internal *state = ctx;
-
- /* SIMD disables preemption, so relax after processing each page. */
- BUILD_BUG_ON(SZ_4K < POLY1305_BLOCK_SIZE ||
- SZ_4K % POLY1305_BLOCK_SIZE);
-
- if (!static_branch_likely(&poly1305_use_avx) ||
- (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) ||
- !crypto_simd_usable()) {
- convert_to_base2_64(ctx);
- poly1305_blocks_x86_64(ctx, inp, len, padbit);
- return;
- }
-
- do {
- const size_t bytes = min_t(size_t, len, SZ_4K);
-
- kernel_fpu_begin();
- if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512))
- poly1305_blocks_avx512(ctx, inp, bytes, padbit);
- else if (static_branch_likely(&poly1305_use_avx2))
- poly1305_blocks_avx2(ctx, inp, bytes, padbit);
- else
- poly1305_blocks_avx(ctx, inp, bytes, padbit);
- kernel_fpu_end();
-
- len -= bytes;
- inp += bytes;
- } while (len);
-}
-
-static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
- const u32 nonce[4])
-{
- if (!static_branch_likely(&poly1305_use_avx))
- poly1305_emit_x86_64(ctx, mac, nonce);
- else
- poly1305_emit_avx(ctx, mac, nonce);
-}
-
-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
-{
- poly1305_simd_init(&dctx->h, key);
- dctx->s[0] = get_unaligned_le32(&key[16]);
- dctx->s[1] = get_unaligned_le32(&key[20]);
- dctx->s[2] = get_unaligned_le32(&key[24]);
- dctx->s[3] = get_unaligned_le32(&key[28]);
- dctx->buflen = 0;
- dctx->sset = true;
-}
-EXPORT_SYMBOL(poly1305_init_arch);
-
-static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx,
- const u8 *inp, unsigned int len)
-{
- unsigned int acc = 0;
- if (unlikely(!dctx->sset)) {
- if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) {
- poly1305_simd_init(&dctx->h, inp);
- inp += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- acc += POLY1305_BLOCK_SIZE;
- dctx->rset = 1;
- }
- if (len >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(&inp[0]);
- dctx->s[1] = get_unaligned_le32(&inp[4]);
- dctx->s[2] = get_unaligned_le32(&inp[8]);
- dctx->s[3] = get_unaligned_le32(&inp[12]);
- acc += POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- }
- return acc;
-}
-
-void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
- unsigned int srclen)
-{
- unsigned int bytes, used;
-
- if (unlikely(dctx->buflen)) {
- bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- srclen -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf, POLY1305_BLOCK_SIZE)))
- poly1305_simd_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 1);
- dctx->buflen = 0;
- }
- }
-
- if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
- bytes = round_down(srclen, POLY1305_BLOCK_SIZE);
- srclen -= bytes;
- used = crypto_poly1305_setdctxkey(dctx, src, bytes);
- if (likely(bytes - used))
- poly1305_simd_blocks(&dctx->h, src + used, bytes - used, 1);
- src += bytes;
- }
-
- if (unlikely(srclen)) {
- dctx->buflen = srclen;
- memcpy(dctx->buf, src, srclen);
- }
-}
-EXPORT_SYMBOL(poly1305_update_arch);
-
-void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
-{
- if (unlikely(dctx->buflen)) {
- dctx->buf[dctx->buflen++] = 1;
- memset(dctx->buf + dctx->buflen, 0,
- POLY1305_BLOCK_SIZE - dctx->buflen);
- poly1305_simd_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
- }
-
- poly1305_simd_emit(&dctx->h, dst, dctx->s);
- memzero_explicit(dctx, sizeof(*dctx));
-}
-EXPORT_SYMBOL(poly1305_final_arch);
-
-static int crypto_poly1305_init(struct shash_desc *desc)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- *dctx = (struct poly1305_desc_ctx){};
- return 0;
-}
-
-static int crypto_poly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- poly1305_update_arch(dctx, src, srclen);
- return 0;
-}
-
-static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (unlikely(!dctx->sset))
- return -ENOKEY;
-
- poly1305_final_arch(dctx, dst);
- return 0;
-}
-
-static struct shash_alg alg = {
- .digestsize = POLY1305_DIGEST_SIZE,
- .init = crypto_poly1305_init,
- .update = crypto_poly1305_update,
- .final = crypto_poly1305_final,
- .descsize = sizeof(struct poly1305_desc_ctx),
- .base = {
- .cra_name = "poly1305",
- .cra_driver_name = "poly1305-simd",
- .cra_priority = 300,
- .cra_blocksize = POLY1305_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- },
-};
-
-static int __init poly1305_simd_mod_init(void)
-{
- if (boot_cpu_has(X86_FEATURE_AVX) &&
- cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
- static_branch_enable(&poly1305_use_avx);
- if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) &&
- cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
- static_branch_enable(&poly1305_use_avx2);
- if (IS_ENABLED(CONFIG_AS_AVX512) && boot_cpu_has(X86_FEATURE_AVX) &&
- boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) &&
- cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL) &&
- /* Skylake downclocks unacceptably much when using zmm, but later generations are fast. */
- boot_cpu_data.x86_vfm != INTEL_SKYLAKE_X)
- static_branch_enable(&poly1305_use_avx512);
- return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? crypto_register_shash(&alg) : 0;
-}
-
-static void __exit poly1305_simd_mod_exit(void)
-{
- if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
- crypto_unregister_shash(&alg);
-}
-
-module_init(poly1305_simd_mod_init);
-module_exit(poly1305_simd_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
-MODULE_DESCRIPTION("Poly1305 authenticator");
-MODULE_ALIAS_CRYPTO("poly1305");
-MODULE_ALIAS_CRYPTO("poly1305-simd");
diff --git a/arch/x86/crypto/polyval-clmulni_glue.c b/arch/x86/crypto/polyval-clmulni_glue.c
index 8fa58b0f3cb3..6b466867f91a 100644
--- a/arch/x86/crypto/polyval-clmulni_glue.c
+++ b/arch/x86/crypto/polyval-clmulni_glue.c
@@ -16,16 +16,15 @@
* operations.
*/
-#include <crypto/algapi.h>
+#include <asm/cpu_device_id.h>
+#include <asm/fpu/api.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/polyval.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
+#include <crypto/utils.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/cpu_device_id.h>
-#include <asm/simd.h>
+#include <linux/string.h>
#define POLYVAL_ALIGN 16
#define POLYVAL_ALIGN_ATTR __aligned(POLYVAL_ALIGN)
@@ -42,7 +41,6 @@ struct polyval_tfm_ctx {
struct polyval_desc_ctx {
u8 buffer[POLYVAL_BLOCK_SIZE];
- u32 bytes;
};
asmlinkage void clmul_polyval_update(const struct polyval_tfm_ctx *keys,
@@ -57,25 +55,16 @@ static inline struct polyval_tfm_ctx *polyval_tfm_ctx(struct crypto_shash *tfm)
static void internal_polyval_update(const struct polyval_tfm_ctx *keys,
const u8 *in, size_t nblocks, u8 *accumulator)
{
- if (likely(crypto_simd_usable())) {
- kernel_fpu_begin();
- clmul_polyval_update(keys, in, nblocks, accumulator);
- kernel_fpu_end();
- } else {
- polyval_update_non4k(keys->key_powers[NUM_KEY_POWERS-1], in,
- nblocks, accumulator);
- }
+ kernel_fpu_begin();
+ clmul_polyval_update(keys, in, nblocks, accumulator);
+ kernel_fpu_end();
}
static void internal_polyval_mul(u8 *op1, const u8 *op2)
{
- if (likely(crypto_simd_usable())) {
- kernel_fpu_begin();
- clmul_polyval_mul(op1, op2);
- kernel_fpu_end();
- } else {
- polyval_mul_non4k(op1, op2);
- }
+ kernel_fpu_begin();
+ clmul_polyval_mul(op1, op2);
+ kernel_fpu_end();
}
static int polyval_x86_setkey(struct crypto_shash *tfm,
@@ -112,49 +101,27 @@ static int polyval_x86_update(struct shash_desc *desc,
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm);
- u8 *pos;
unsigned int nblocks;
- unsigned int n;
-
- if (dctx->bytes) {
- n = min(srclen, dctx->bytes);
- pos = dctx->buffer + POLYVAL_BLOCK_SIZE - dctx->bytes;
-
- dctx->bytes -= n;
- srclen -= n;
-
- while (n--)
- *pos++ ^= *src++;
- if (!dctx->bytes)
- internal_polyval_mul(dctx->buffer,
- tctx->key_powers[NUM_KEY_POWERS-1]);
- }
-
- while (srclen >= POLYVAL_BLOCK_SIZE) {
+ do {
/* Allow rescheduling every 4K bytes. */
nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE;
internal_polyval_update(tctx, src, nblocks, dctx->buffer);
srclen -= nblocks * POLYVAL_BLOCK_SIZE;
src += nblocks * POLYVAL_BLOCK_SIZE;
- }
+ } while (srclen >= POLYVAL_BLOCK_SIZE);
- if (srclen) {
- dctx->bytes = POLYVAL_BLOCK_SIZE - srclen;
- pos = dctx->buffer;
- while (srclen--)
- *pos++ ^= *src++;
- }
-
- return 0;
+ return srclen;
}
-static int polyval_x86_final(struct shash_desc *desc, u8 *dst)
+static int polyval_x86_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm);
- if (dctx->bytes) {
+ if (len) {
+ crypto_xor(dctx->buffer, src, len);
internal_polyval_mul(dctx->buffer,
tctx->key_powers[NUM_KEY_POWERS-1]);
}
@@ -168,13 +135,14 @@ static struct shash_alg polyval_alg = {
.digestsize = POLYVAL_DIGEST_SIZE,
.init = polyval_x86_init,
.update = polyval_x86_update,
- .final = polyval_x86_final,
+ .finup = polyval_x86_finup,
.setkey = polyval_x86_setkey,
.descsize = sizeof(struct polyval_desc_ctx),
.base = {
.cra_name = "polyval",
.cra_driver_name = "polyval-clmulni",
.cra_priority = 200,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = POLYVAL_BLOCK_SIZE,
.cra_ctxsize = POLYVAL_CTX_SIZE,
.cra_module = THIS_MODULE,
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 347e97f4b713..f5f2121b7956 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -10,7 +10,6 @@
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
#include "serpent-avx.h"
@@ -65,10 +64,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg serpent_algs[] = {
{
- .base.cra_name = "__ecb(serpent)",
- .base.cra_driver_name = "__ecb-serpent-avx2",
+ .base.cra_name = "ecb(serpent)",
+ .base.cra_driver_name = "ecb-serpent-avx2",
.base.cra_priority = 600,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
@@ -78,10 +76,9 @@ static struct skcipher_alg serpent_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(serpent)",
- .base.cra_driver_name = "__cbc-serpent-avx2",
+ .base.cra_name = "cbc(serpent)",
+ .base.cra_driver_name = "cbc-serpent-avx2",
.base.cra_priority = 600,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
@@ -94,8 +91,6 @@ static struct skcipher_alg serpent_algs[] = {
},
};
-static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
-
static int __init serpent_avx2_init(void)
{
const char *feature_name;
@@ -110,15 +105,13 @@ static int __init serpent_avx2_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(serpent_algs,
- ARRAY_SIZE(serpent_algs),
- serpent_simd_algs);
+ return crypto_register_skciphers(serpent_algs,
+ ARRAY_SIZE(serpent_algs));
}
static void __exit serpent_avx2_fini(void)
{
- simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
- serpent_simd_algs);
+ crypto_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs));
}
module_init(serpent_avx2_init);
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 6c248e1ea4ef..e640abc1cb8a 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -13,7 +13,6 @@
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
#include "serpent-avx.h"
@@ -71,10 +70,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg serpent_algs[] = {
{
- .base.cra_name = "__ecb(serpent)",
- .base.cra_driver_name = "__ecb-serpent-avx",
+ .base.cra_name = "ecb(serpent)",
+ .base.cra_driver_name = "ecb-serpent-avx",
.base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
@@ -84,10 +82,9 @@ static struct skcipher_alg serpent_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(serpent)",
- .base.cra_driver_name = "__cbc-serpent-avx",
+ .base.cra_name = "cbc(serpent)",
+ .base.cra_driver_name = "cbc-serpent-avx",
.base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
@@ -100,8 +97,6 @@ static struct skcipher_alg serpent_algs[] = {
},
};
-static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
-
static int __init serpent_init(void)
{
const char *feature_name;
@@ -112,15 +107,13 @@ static int __init serpent_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(serpent_algs,
- ARRAY_SIZE(serpent_algs),
- serpent_simd_algs);
+ return crypto_register_skciphers(serpent_algs,
+ ARRAY_SIZE(serpent_algs));
}
static void __exit serpent_exit(void)
{
- simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
- serpent_simd_algs);
+ crypto_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs));
}
module_init(serpent_init);
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index d78f37e9b2cf..80ee17ec21b4 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -18,7 +18,6 @@
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/b128ops.h>
-#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
#include "serpent-sse2.h"
@@ -74,10 +73,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg serpent_algs[] = {
{
- .base.cra_name = "__ecb(serpent)",
- .base.cra_driver_name = "__ecb-serpent-sse2",
+ .base.cra_name = "ecb(serpent)",
+ .base.cra_driver_name = "ecb-serpent-sse2",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
@@ -87,10 +85,9 @@ static struct skcipher_alg serpent_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(serpent)",
- .base.cra_driver_name = "__cbc-serpent-sse2",
+ .base.cra_name = "cbc(serpent)",
+ .base.cra_driver_name = "cbc-serpent-sse2",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
@@ -103,8 +100,6 @@ static struct skcipher_alg serpent_algs[] = {
},
};
-static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
-
static int __init serpent_sse2_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2)) {
@@ -112,15 +107,13 @@ static int __init serpent_sse2_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(serpent_algs,
- ARRAY_SIZE(serpent_algs),
- serpent_simd_algs);
+ return crypto_register_skciphers(serpent_algs,
+ ARRAY_SIZE(serpent_algs));
}
static void __exit serpent_sse2_exit(void)
{
- simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
- serpent_simd_algs);
+ crypto_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs));
}
module_init(serpent_sse2_init);
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index ab8bc54f254d..0a912bfc86c5 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -16,21 +16,17 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <asm/cpu_device_id.h>
+#include <asm/simd.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
-#include <asm/cpu_device_id.h>
-#include <asm/simd.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
static const struct x86_cpu_id module_cpu_ids[] = {
-#ifdef CONFIG_AS_SHA1_NI
X86_MATCH_FEATURE(X86_FEATURE_SHA_NI, NULL),
-#endif
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
@@ -38,14 +34,10 @@ static const struct x86_cpu_id module_cpu_ids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
-static int sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len, sha1_block_fn *sha1_xform)
+static inline int sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len, sha1_block_fn *sha1_xform)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
- return crypto_sha1_update(desc, data, len);
+ int remain;
/*
* Make sure struct sha1_state begins directly with the SHA1
@@ -54,22 +46,18 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
kernel_fpu_begin();
- sha1_base_do_update(desc, data, len, sha1_xform);
+ remain = sha1_base_do_update_blocks(desc, data, len, sha1_xform);
kernel_fpu_end();
- return 0;
+ return remain;
}
-static int sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out, sha1_block_fn *sha1_xform)
+static inline int sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out,
+ sha1_block_fn *sha1_xform)
{
- if (!crypto_simd_usable())
- return crypto_sha1_finup(desc, data, len, out);
-
kernel_fpu_begin();
- if (len)
- sha1_base_do_update(desc, data, len, sha1_xform);
- sha1_base_do_finalize(desc, sha1_xform);
+ sha1_base_do_finup(desc, data, len, sha1_xform);
kernel_fpu_end();
return sha1_base_finish(desc, out);
@@ -90,23 +78,17 @@ static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data,
return sha1_finup(desc, data, len, out, sha1_transform_ssse3);
}
-/* Add padding and return the message digest. */
-static int sha1_ssse3_final(struct shash_desc *desc, u8 *out)
-{
- return sha1_ssse3_finup(desc, NULL, 0, out);
-}
-
static struct shash_alg sha1_ssse3_alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = sha1_ssse3_update,
- .final = sha1_ssse3_final,
.finup = sha1_ssse3_finup,
- .descsize = sizeof(struct sha1_state),
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ssse3",
.cra_priority = 150,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -140,22 +122,17 @@ static int sha1_avx_finup(struct shash_desc *desc, const u8 *data,
return sha1_finup(desc, data, len, out, sha1_transform_avx);
}
-static int sha1_avx_final(struct shash_desc *desc, u8 *out)
-{
- return sha1_avx_finup(desc, NULL, 0, out);
-}
-
static struct shash_alg sha1_avx_alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = sha1_avx_update,
- .final = sha1_avx_final,
.finup = sha1_avx_finup,
- .descsize = sizeof(struct sha1_state),
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-avx",
.cra_priority = 160,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -200,8 +177,8 @@ static bool avx2_usable(void)
return false;
}
-static void sha1_apply_transform_avx2(struct sha1_state *state,
- const u8 *data, int blocks)
+static inline void sha1_apply_transform_avx2(struct sha1_state *state,
+ const u8 *data, int blocks)
{
/* Select the optimal transform based on data block size */
if (blocks >= SHA1_AVX2_BLOCK_OPTSIZE)
@@ -222,22 +199,17 @@ static int sha1_avx2_finup(struct shash_desc *desc, const u8 *data,
return sha1_finup(desc, data, len, out, sha1_apply_transform_avx2);
}
-static int sha1_avx2_final(struct shash_desc *desc, u8 *out)
-{
- return sha1_avx2_finup(desc, NULL, 0, out);
-}
-
static struct shash_alg sha1_avx2_alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = sha1_avx2_update,
- .final = sha1_avx2_final,
.finup = sha1_avx2_finup,
- .descsize = sizeof(struct sha1_state),
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-avx2",
.cra_priority = 170,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -256,7 +228,6 @@ static void unregister_sha1_avx2(void)
crypto_unregister_shash(&sha1_avx2_alg);
}
-#ifdef CONFIG_AS_SHA1_NI
asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data,
int rounds);
@@ -272,22 +243,17 @@ static int sha1_ni_finup(struct shash_desc *desc, const u8 *data,
return sha1_finup(desc, data, len, out, sha1_ni_transform);
}
-static int sha1_ni_final(struct shash_desc *desc, u8 *out)
-{
- return sha1_ni_finup(desc, NULL, 0, out);
-}
-
static struct shash_alg sha1_ni_alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = sha1_ni_update,
- .final = sha1_ni_final,
.finup = sha1_ni_finup,
- .descsize = sizeof(struct sha1_state),
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ni",
.cra_priority = 250,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -306,11 +272,6 @@ static void unregister_sha1_ni(void)
crypto_unregister_shash(&sha1_ni_alg);
}
-#else
-static inline int register_sha1_ni(void) { return 0; }
-static inline void unregister_sha1_ni(void) { }
-#endif
-
static int __init sha1_ssse3_mod_init(void)
{
if (!x86_match_cpu(module_cpu_ids))
@@ -360,6 +321,4 @@ MODULE_ALIAS_CRYPTO("sha1");
MODULE_ALIAS_CRYPTO("sha1-ssse3");
MODULE_ALIAS_CRYPTO("sha1-avx");
MODULE_ALIAS_CRYPTO("sha1-avx2");
-#ifdef CONFIG_AS_SHA1_NI
MODULE_ALIAS_CRYPTO("sha1-ni");
-#endif
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
deleted file mode 100644
index e04a43d9f7d5..000000000000
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ /dev/null
@@ -1,467 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Glue code for the SHA256 Secure Hash Algorithm assembler implementations
- * using SSSE3, AVX, AVX2, and SHA-NI instructions.
- *
- * This file is based on sha256_generic.c
- *
- * Copyright (C) 2013 Intel Corporation.
- *
- * Author:
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <linux/string.h>
-#include <asm/cpu_device_id.h>
-#include <asm/simd.h>
-
-asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
- const u8 *data, int blocks);
-
-static const struct x86_cpu_id module_cpu_ids[] = {
-#ifdef CONFIG_AS_SHA256_NI
- X86_MATCH_FEATURE(X86_FEATURE_SHA_NI, NULL),
-#endif
- X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
- X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
- X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
-
-static int _sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len, sha256_block_fn *sha256_xform)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
- return crypto_sha256_update(desc, data, len);
-
- /*
- * Make sure struct sha256_state begins directly with the SHA256
- * 256-bit internal state, as this is what the asm functions expect.
- */
- BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
-
- kernel_fpu_begin();
- sha256_base_do_update(desc, data, len, sha256_xform);
- kernel_fpu_end();
-
- return 0;
-}
-
-static int sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out, sha256_block_fn *sha256_xform)
-{
- if (!crypto_simd_usable())
- return crypto_sha256_finup(desc, data, len, out);
-
- kernel_fpu_begin();
- if (len)
- sha256_base_do_update(desc, data, len, sha256_xform);
- sha256_base_do_finalize(desc, sha256_xform);
- kernel_fpu_end();
-
- return sha256_base_finish(desc, out);
-}
-
-static int sha256_ssse3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- return _sha256_update(desc, data, len, sha256_transform_ssse3);
-}
-
-static int sha256_ssse3_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return sha256_finup(desc, data, len, out, sha256_transform_ssse3);
-}
-
-/* Add padding and return the message digest. */
-static int sha256_ssse3_final(struct shash_desc *desc, u8 *out)
-{
- return sha256_ssse3_finup(desc, NULL, 0, out);
-}
-
-static int sha256_ssse3_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return sha256_base_init(desc) ?:
- sha256_ssse3_finup(desc, data, len, out);
-}
-
-static struct shash_alg sha256_ssse3_algs[] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = sha256_ssse3_update,
- .final = sha256_ssse3_final,
- .finup = sha256_ssse3_finup,
- .digest = sha256_ssse3_digest,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-ssse3",
- .cra_priority = 150,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = sha256_ssse3_update,
- .final = sha256_ssse3_final,
- .finup = sha256_ssse3_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "sha224-ssse3",
- .cra_priority = 150,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int register_sha256_ssse3(void)
-{
- if (boot_cpu_has(X86_FEATURE_SSSE3))
- return crypto_register_shashes(sha256_ssse3_algs,
- ARRAY_SIZE(sha256_ssse3_algs));
- return 0;
-}
-
-static void unregister_sha256_ssse3(void)
-{
- if (boot_cpu_has(X86_FEATURE_SSSE3))
- crypto_unregister_shashes(sha256_ssse3_algs,
- ARRAY_SIZE(sha256_ssse3_algs));
-}
-
-asmlinkage void sha256_transform_avx(struct sha256_state *state,
- const u8 *data, int blocks);
-
-static int sha256_avx_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- return _sha256_update(desc, data, len, sha256_transform_avx);
-}
-
-static int sha256_avx_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return sha256_finup(desc, data, len, out, sha256_transform_avx);
-}
-
-static int sha256_avx_final(struct shash_desc *desc, u8 *out)
-{
- return sha256_avx_finup(desc, NULL, 0, out);
-}
-
-static int sha256_avx_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return sha256_base_init(desc) ?:
- sha256_avx_finup(desc, data, len, out);
-}
-
-static struct shash_alg sha256_avx_algs[] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = sha256_avx_update,
- .final = sha256_avx_final,
- .finup = sha256_avx_finup,
- .digest = sha256_avx_digest,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-avx",
- .cra_priority = 160,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = sha256_avx_update,
- .final = sha256_avx_final,
- .finup = sha256_avx_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "sha224-avx",
- .cra_priority = 160,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static bool avx_usable(void)
-{
- if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
- if (boot_cpu_has(X86_FEATURE_AVX))
- pr_info("AVX detected but unusable.\n");
- return false;
- }
-
- return true;
-}
-
-static int register_sha256_avx(void)
-{
- if (avx_usable())
- return crypto_register_shashes(sha256_avx_algs,
- ARRAY_SIZE(sha256_avx_algs));
- return 0;
-}
-
-static void unregister_sha256_avx(void)
-{
- if (avx_usable())
- crypto_unregister_shashes(sha256_avx_algs,
- ARRAY_SIZE(sha256_avx_algs));
-}
-
-asmlinkage void sha256_transform_rorx(struct sha256_state *state,
- const u8 *data, int blocks);
-
-static int sha256_avx2_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- return _sha256_update(desc, data, len, sha256_transform_rorx);
-}
-
-static int sha256_avx2_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return sha256_finup(desc, data, len, out, sha256_transform_rorx);
-}
-
-static int sha256_avx2_final(struct shash_desc *desc, u8 *out)
-{
- return sha256_avx2_finup(desc, NULL, 0, out);
-}
-
-static int sha256_avx2_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return sha256_base_init(desc) ?:
- sha256_avx2_finup(desc, data, len, out);
-}
-
-static struct shash_alg sha256_avx2_algs[] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = sha256_avx2_update,
- .final = sha256_avx2_final,
- .finup = sha256_avx2_finup,
- .digest = sha256_avx2_digest,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-avx2",
- .cra_priority = 170,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = sha256_avx2_update,
- .final = sha256_avx2_final,
- .finup = sha256_avx2_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "sha224-avx2",
- .cra_priority = 170,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static bool avx2_usable(void)
-{
- if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) &&
- boot_cpu_has(X86_FEATURE_BMI2))
- return true;
-
- return false;
-}
-
-static int register_sha256_avx2(void)
-{
- if (avx2_usable())
- return crypto_register_shashes(sha256_avx2_algs,
- ARRAY_SIZE(sha256_avx2_algs));
- return 0;
-}
-
-static void unregister_sha256_avx2(void)
-{
- if (avx2_usable())
- crypto_unregister_shashes(sha256_avx2_algs,
- ARRAY_SIZE(sha256_avx2_algs));
-}
-
-#ifdef CONFIG_AS_SHA256_NI
-asmlinkage void sha256_ni_transform(struct sha256_state *digest,
- const u8 *data, int rounds);
-
-static int sha256_ni_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- return _sha256_update(desc, data, len, sha256_ni_transform);
-}
-
-static int sha256_ni_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return sha256_finup(desc, data, len, out, sha256_ni_transform);
-}
-
-static int sha256_ni_final(struct shash_desc *desc, u8 *out)
-{
- return sha256_ni_finup(desc, NULL, 0, out);
-}
-
-static int sha256_ni_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return sha256_base_init(desc) ?:
- sha256_ni_finup(desc, data, len, out);
-}
-
-static struct shash_alg sha256_ni_algs[] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = sha256_ni_update,
- .final = sha256_ni_final,
- .finup = sha256_ni_finup,
- .digest = sha256_ni_digest,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-ni",
- .cra_priority = 250,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = sha256_ni_update,
- .final = sha256_ni_final,
- .finup = sha256_ni_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "sha224-ni",
- .cra_priority = 250,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int register_sha256_ni(void)
-{
- if (boot_cpu_has(X86_FEATURE_SHA_NI))
- return crypto_register_shashes(sha256_ni_algs,
- ARRAY_SIZE(sha256_ni_algs));
- return 0;
-}
-
-static void unregister_sha256_ni(void)
-{
- if (boot_cpu_has(X86_FEATURE_SHA_NI))
- crypto_unregister_shashes(sha256_ni_algs,
- ARRAY_SIZE(sha256_ni_algs));
-}
-
-#else
-static inline int register_sha256_ni(void) { return 0; }
-static inline void unregister_sha256_ni(void) { }
-#endif
-
-static int __init sha256_ssse3_mod_init(void)
-{
- if (!x86_match_cpu(module_cpu_ids))
- return -ENODEV;
-
- if (register_sha256_ssse3())
- goto fail;
-
- if (register_sha256_avx()) {
- unregister_sha256_ssse3();
- goto fail;
- }
-
- if (register_sha256_avx2()) {
- unregister_sha256_avx();
- unregister_sha256_ssse3();
- goto fail;
- }
-
- if (register_sha256_ni()) {
- unregister_sha256_avx2();
- unregister_sha256_avx();
- unregister_sha256_ssse3();
- goto fail;
- }
-
- return 0;
-fail:
- return -ENODEV;
-}
-
-static void __exit sha256_ssse3_mod_fini(void)
-{
- unregister_sha256_ni();
- unregister_sha256_avx2();
- unregister_sha256_avx();
- unregister_sha256_ssse3();
-}
-
-module_init(sha256_ssse3_mod_init);
-module_exit(sha256_ssse3_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
-
-MODULE_ALIAS_CRYPTO("sha256");
-MODULE_ALIAS_CRYPTO("sha256-ssse3");
-MODULE_ALIAS_CRYPTO("sha256-avx");
-MODULE_ALIAS_CRYPTO("sha256-avx2");
-MODULE_ALIAS_CRYPTO("sha224");
-MODULE_ALIAS_CRYPTO("sha224-ssse3");
-MODULE_ALIAS_CRYPTO("sha224-avx");
-MODULE_ALIAS_CRYPTO("sha224-avx2");
-#ifdef CONFIG_AS_SHA256_NI
-MODULE_ALIAS_CRYPTO("sha256-ni");
-MODULE_ALIAS_CRYPTO("sha224-ni");
-#endif
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 6d3b85e53d0e..067684c54395 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -27,17 +27,13 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <asm/cpu_device_id.h>
+#include <asm/simd.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/types.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
-#include <asm/cpu_device_id.h>
-#include <asm/simd.h>
asmlinkage void sha512_transform_ssse3(struct sha512_state *state,
const u8 *data, int blocks);
@@ -45,11 +41,7 @@ asmlinkage void sha512_transform_ssse3(struct sha512_state *state,
static int sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len, sha512_block_fn *sha512_xform)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
- return crypto_sha512_update(desc, data, len);
+ int remain;
/*
* Make sure struct sha512_state begins directly with the SHA512
@@ -58,22 +50,17 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0);
kernel_fpu_begin();
- sha512_base_do_update(desc, data, len, sha512_xform);
+ remain = sha512_base_do_update_blocks(desc, data, len, sha512_xform);
kernel_fpu_end();
- return 0;
+ return remain;
}
static int sha512_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha512_block_fn *sha512_xform)
{
- if (!crypto_simd_usable())
- return crypto_sha512_finup(desc, data, len, out);
-
kernel_fpu_begin();
- if (len)
- sha512_base_do_update(desc, data, len, sha512_xform);
- sha512_base_do_finalize(desc, sha512_xform);
+ sha512_base_do_finup(desc, data, len, sha512_xform);
kernel_fpu_end();
return sha512_base_finish(desc, out);
@@ -91,23 +78,18 @@ static int sha512_ssse3_finup(struct shash_desc *desc, const u8 *data,
return sha512_finup(desc, data, len, out, sha512_transform_ssse3);
}
-/* Add padding and return the message digest. */
-static int sha512_ssse3_final(struct shash_desc *desc, u8 *out)
-{
- return sha512_ssse3_finup(desc, NULL, 0, out);
-}
-
static struct shash_alg sha512_ssse3_algs[] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = sha512_ssse3_update,
- .final = sha512_ssse3_final,
.finup = sha512_ssse3_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-ssse3",
.cra_priority = 150,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -115,13 +97,14 @@ static struct shash_alg sha512_ssse3_algs[] = { {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = sha512_ssse3_update,
- .final = sha512_ssse3_final,
.finup = sha512_ssse3_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-ssse3",
.cra_priority = 150,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -167,23 +150,18 @@ static int sha512_avx_finup(struct shash_desc *desc, const u8 *data,
return sha512_finup(desc, data, len, out, sha512_transform_avx);
}
-/* Add padding and return the message digest. */
-static int sha512_avx_final(struct shash_desc *desc, u8 *out)
-{
- return sha512_avx_finup(desc, NULL, 0, out);
-}
-
static struct shash_alg sha512_avx_algs[] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = sha512_avx_update,
- .final = sha512_avx_final,
.finup = sha512_avx_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-avx",
.cra_priority = 160,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -191,13 +169,14 @@ static struct shash_alg sha512_avx_algs[] = { {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = sha512_avx_update,
- .final = sha512_avx_final,
.finup = sha512_avx_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-avx",
.cra_priority = 160,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -233,23 +212,18 @@ static int sha512_avx2_finup(struct shash_desc *desc, const u8 *data,
return sha512_finup(desc, data, len, out, sha512_transform_rorx);
}
-/* Add padding and return the message digest. */
-static int sha512_avx2_final(struct shash_desc *desc, u8 *out)
-{
- return sha512_avx2_finup(desc, NULL, 0, out);
-}
-
static struct shash_alg sha512_avx2_algs[] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = sha512_avx2_update,
- .final = sha512_avx2_final,
.finup = sha512_avx2_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-avx2",
.cra_priority = 170,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -257,13 +231,14 @@ static struct shash_alg sha512_avx2_algs[] = { {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = sha512_avx2_update,
- .final = sha512_avx2_final,
.finup = sha512_avx2_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-avx2",
.cra_priority = 170,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/x86/crypto/sm3_avx_glue.c b/arch/x86/crypto/sm3_avx_glue.c
index 661b6f22ffcd..6e8c42b9dc8e 100644
--- a/arch/x86/crypto/sm3_avx_glue.c
+++ b/arch/x86/crypto/sm3_avx_glue.c
@@ -10,12 +10,11 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
-#include <asm/simd.h>
+#include <linux/cpufeature.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
asmlinkage void sm3_transform_avx(struct sm3_state *state,
const u8 *data, int nblocks);
@@ -23,13 +22,7 @@ asmlinkage void sm3_transform_avx(struct sm3_state *state,
static int sm3_avx_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count % SM3_BLOCK_SIZE) + len < SM3_BLOCK_SIZE) {
- sm3_update(sctx, data, len);
- return 0;
- }
+ int remain;
/*
* Make sure struct sm3_state begins directly with the SM3
@@ -38,45 +31,17 @@ static int sm3_avx_update(struct shash_desc *desc, const u8 *data,
BUILD_BUG_ON(offsetof(struct sm3_state, state) != 0);
kernel_fpu_begin();
- sm3_base_do_update(desc, data, len, sm3_transform_avx);
+ remain = sm3_base_do_update_blocks(desc, data, len, sm3_transform_avx);
kernel_fpu_end();
-
- return 0;
+ return remain;
}
static int sm3_avx_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!crypto_simd_usable()) {
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- if (len)
- sm3_update(sctx, data, len);
-
- sm3_final(sctx, out);
- return 0;
- }
-
kernel_fpu_begin();
- if (len)
- sm3_base_do_update(desc, data, len, sm3_transform_avx);
- sm3_base_do_finalize(desc, sm3_transform_avx);
+ sm3_base_do_finup(desc, data, len, sm3_transform_avx);
kernel_fpu_end();
-
- return sm3_base_finish(desc, out);
-}
-
-static int sm3_avx_final(struct shash_desc *desc, u8 *out)
-{
- if (!crypto_simd_usable()) {
- sm3_final(shash_desc_ctx(desc), out);
- return 0;
- }
-
- kernel_fpu_begin();
- sm3_base_do_finalize(desc, sm3_transform_avx);
- kernel_fpu_end();
-
return sm3_base_finish(desc, out);
}
@@ -84,13 +49,14 @@ static struct shash_alg sm3_avx_alg = {
.digestsize = SM3_DIGEST_SIZE,
.init = sm3_base_init,
.update = sm3_avx_update,
- .final = sm3_avx_final,
.finup = sm3_avx_finup,
- .descsize = sizeof(struct sm3_state),
+ .descsize = SM3_STATE_SIZE,
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-avx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/x86/crypto/sm4_aesni_avx2_glue.c b/arch/x86/crypto/sm4_aesni_avx2_glue.c
index 1148fd4cd57f..fec0ab7a63dd 100644
--- a/arch/x86/crypto/sm4_aesni_avx2_glue.c
+++ b/arch/x86/crypto/sm4_aesni_avx2_glue.c
@@ -8,11 +8,10 @@
* Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
+#include <asm/fpu/api.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
-#include <asm/simd.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/sm4.h>
#include "sm4-avx.h"
@@ -48,10 +47,9 @@ static int ctr_crypt(struct skcipher_request *req)
static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
{
.base = {
- .cra_name = "__ecb(sm4)",
- .cra_driver_name = "__ecb-sm4-aesni-avx2",
+ .cra_name = "ecb(sm4)",
+ .cra_driver_name = "ecb-sm4-aesni-avx2",
.cra_priority = 500,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
@@ -64,10 +62,9 @@ static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
.decrypt = sm4_avx_ecb_decrypt,
}, {
.base = {
- .cra_name = "__cbc(sm4)",
- .cra_driver_name = "__cbc-sm4-aesni-avx2",
+ .cra_name = "cbc(sm4)",
+ .cra_driver_name = "cbc-sm4-aesni-avx2",
.cra_priority = 500,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
@@ -81,10 +78,9 @@ static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
.decrypt = cbc_decrypt,
}, {
.base = {
- .cra_name = "__ctr(sm4)",
- .cra_driver_name = "__ctr-sm4-aesni-avx2",
+ .cra_name = "ctr(sm4)",
+ .cra_driver_name = "ctr-sm4-aesni-avx2",
.cra_priority = 500,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
@@ -100,9 +96,6 @@ static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
}
};
-static struct simd_skcipher_alg *
-simd_sm4_aesni_avx2_skciphers[ARRAY_SIZE(sm4_aesni_avx2_skciphers)];
-
static int __init sm4_init(void)
{
const char *feature_name;
@@ -121,16 +114,14 @@ static int __init sm4_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(sm4_aesni_avx2_skciphers,
- ARRAY_SIZE(sm4_aesni_avx2_skciphers),
- simd_sm4_aesni_avx2_skciphers);
+ return crypto_register_skciphers(sm4_aesni_avx2_skciphers,
+ ARRAY_SIZE(sm4_aesni_avx2_skciphers));
}
static void __exit sm4_exit(void)
{
- simd_unregister_skciphers(sm4_aesni_avx2_skciphers,
- ARRAY_SIZE(sm4_aesni_avx2_skciphers),
- simd_sm4_aesni_avx2_skciphers);
+ crypto_unregister_skciphers(sm4_aesni_avx2_skciphers,
+ ARRAY_SIZE(sm4_aesni_avx2_skciphers));
}
module_init(sm4_init);
diff --git a/arch/x86/crypto/sm4_aesni_avx_glue.c b/arch/x86/crypto/sm4_aesni_avx_glue.c
index 85b4ca78b47b..72867fc49ce8 100644
--- a/arch/x86/crypto/sm4_aesni_avx_glue.c
+++ b/arch/x86/crypto/sm4_aesni_avx_glue.c
@@ -8,11 +8,10 @@
* Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
+#include <asm/fpu/api.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
-#include <asm/simd.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/sm4.h>
#include "sm4-avx.h"
@@ -263,10 +262,9 @@ static int ctr_crypt(struct skcipher_request *req)
static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
{
.base = {
- .cra_name = "__ecb(sm4)",
- .cra_driver_name = "__ecb-sm4-aesni-avx",
+ .cra_name = "ecb(sm4)",
+ .cra_driver_name = "ecb-sm4-aesni-avx",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
@@ -279,10 +277,9 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
.decrypt = sm4_avx_ecb_decrypt,
}, {
.base = {
- .cra_name = "__cbc(sm4)",
- .cra_driver_name = "__cbc-sm4-aesni-avx",
+ .cra_name = "cbc(sm4)",
+ .cra_driver_name = "cbc-sm4-aesni-avx",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
@@ -296,10 +293,9 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
.decrypt = cbc_decrypt,
}, {
.base = {
- .cra_name = "__ctr(sm4)",
- .cra_driver_name = "__ctr-sm4-aesni-avx",
+ .cra_name = "ctr(sm4)",
+ .cra_driver_name = "ctr-sm4-aesni-avx",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
@@ -315,9 +311,6 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
}
};
-static struct simd_skcipher_alg *
-simd_sm4_aesni_avx_skciphers[ARRAY_SIZE(sm4_aesni_avx_skciphers)];
-
static int __init sm4_init(void)
{
const char *feature_name;
@@ -335,16 +328,14 @@ static int __init sm4_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(sm4_aesni_avx_skciphers,
- ARRAY_SIZE(sm4_aesni_avx_skciphers),
- simd_sm4_aesni_avx_skciphers);
+ return crypto_register_skciphers(sm4_aesni_avx_skciphers,
+ ARRAY_SIZE(sm4_aesni_avx_skciphers));
}
static void __exit sm4_exit(void)
{
- simd_unregister_skciphers(sm4_aesni_avx_skciphers,
- ARRAY_SIZE(sm4_aesni_avx_skciphers),
- simd_sm4_aesni_avx_skciphers);
+ crypto_unregister_skciphers(sm4_aesni_avx_skciphers,
+ ARRAY_SIZE(sm4_aesni_avx_skciphers));
}
module_init(sm4_init);
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index 3eb3440b477a..9e20db013750 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -13,7 +13,6 @@
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <crypto/twofish.h>
#include "twofish.h"
@@ -74,10 +73,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg twofish_algs[] = {
{
- .base.cra_name = "__ecb(twofish)",
- .base.cra_driver_name = "__ecb-twofish-avx",
+ .base.cra_name = "ecb(twofish)",
+ .base.cra_driver_name = "ecb-twofish-avx",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = TF_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct twofish_ctx),
.base.cra_module = THIS_MODULE,
@@ -87,10 +85,9 @@ static struct skcipher_alg twofish_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(twofish)",
- .base.cra_driver_name = "__cbc-twofish-avx",
+ .base.cra_name = "cbc(twofish)",
+ .base.cra_driver_name = "cbc-twofish-avx",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = TF_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct twofish_ctx),
.base.cra_module = THIS_MODULE,
@@ -103,8 +100,6 @@ static struct skcipher_alg twofish_algs[] = {
},
};
-static struct simd_skcipher_alg *twofish_simd_algs[ARRAY_SIZE(twofish_algs)];
-
static int __init twofish_init(void)
{
const char *feature_name;
@@ -114,15 +109,13 @@ static int __init twofish_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(twofish_algs,
- ARRAY_SIZE(twofish_algs),
- twofish_simd_algs);
+ return crypto_register_skciphers(twofish_algs,
+ ARRAY_SIZE(twofish_algs));
}
static void __exit twofish_exit(void)
{
- simd_unregister_skciphers(twofish_algs, ARRAY_SIZE(twofish_algs),
- twofish_simd_algs);
+ crypto_unregister_skciphers(twofish_algs, ARRAY_SIZE(twofish_algs));
}
module_init(twofish_init);
diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
index d3caa31240ed..175958b02f2b 100644
--- a/arch/x86/entry/entry.S
+++ b/arch/x86/entry/entry.S
@@ -17,19 +17,20 @@
.pushsection .noinstr.text, "ax"
-SYM_FUNC_START(entry_ibpb)
+/* Clobbers AX, CX, DX */
+SYM_FUNC_START(write_ibpb)
ANNOTATE_NOENDBR
movl $MSR_IA32_PRED_CMD, %ecx
- movl $PRED_CMD_IBPB, %eax
+ movl _ASM_RIP(x86_pred_cmd), %eax
xorl %edx, %edx
wrmsr
/* Make sure IBPB clears return stack preductions too. */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
RET
-SYM_FUNC_END(entry_ibpb)
+SYM_FUNC_END(write_ibpb)
/* For KVM */
-EXPORT_SYMBOL_GPL(entry_ibpb);
+EXPORT_SYMBOL_GPL(write_ibpb);
.popsection
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index f40bdf97d390..ed04a968cc7d 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1525,7 +1525,9 @@ SYM_CODE_END(rewind_stack_and_make_dead)
* ORC to unwind properly.
*
* The alignment is for performance and not for safety, and may be safely
- * refactored in the future if needed.
+ * refactored in the future if needed. The .skips are for safety, to ensure
+ * that all RETs are in the second half of a cacheline to mitigate Indirect
+ * Target Selection, rather than taking the slowpath via its_return_thunk.
*/
SYM_FUNC_START(clear_bhb_loop)
ANNOTATE_NOENDBR
@@ -1536,10 +1538,22 @@ SYM_FUNC_START(clear_bhb_loop)
call 1f
jmp 5f
.align 64, 0xcc
+ /*
+ * Shift instructions so that the RET is in the upper half of the
+ * cacheline and don't take the slowpath to its_return_thunk.
+ */
+ .skip 32 - (.Lret1 - 1f), 0xcc
ANNOTATE_INTRA_FUNCTION_CALL
1: call 2f
- RET
+.Lret1: RET
.align 64, 0xcc
+ /*
+ * As above shift instructions for RET at .Lret2 as well.
+ *
+ * This should be ideally be: .skip 32 - (.Lret2 - 2f), 0xcc
+ * but some Clang versions (e.g. 18) don't like this.
+ */
+ .skip 32 - 18, 0xcc
2: movl $5, %eax
3: jmp 4f
nop
@@ -1547,7 +1561,7 @@ SYM_FUNC_START(clear_bhb_loop)
jnz 3b
sub $1, %ecx
jnz 1b
- RET
+.Lret2: RET
5: lfence
pop %rbp
RET
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index adb299d3b6a1..afe105b2f907 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -65,7 +65,6 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
static void vdso_fix_landing(const struct vdso_image *image,
struct vm_area_struct *new_vma)
{
-#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
if (in_ia32_syscall() && image == &vdso_image_32) {
struct pt_regs *regs = current_pt_regs();
unsigned long vdso_land = image->sym_int80_landing_pad;
@@ -76,7 +75,6 @@ static void vdso_fix_landing(const struct vdso_image *image,
if (regs->ip == old_land_addr)
regs->ip = new_vma->vm_start + vdso_land;
}
-#endif
}
static int vdso_mremap(const struct vm_special_mapping *sm,
@@ -227,7 +225,6 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
return map_vdso(image, addr);
}
-#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
static int load_vdso32(void)
{
if (vdso32_enabled != 1) /* Other values all mean "disabled" */
@@ -235,45 +232,38 @@ static int load_vdso32(void)
return map_vdso(&vdso_image_32, 0);
}
-#endif
-#ifdef CONFIG_X86_64
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
- if (!vdso64_enabled)
- return 0;
+ if (IS_ENABLED(CONFIG_X86_64)) {
+ if (!vdso64_enabled)
+ return 0;
+
+ return map_vdso(&vdso_image_64, 0);
+ }
- return map_vdso(&vdso_image_64, 0);
+ return load_vdso32();
}
#ifdef CONFIG_COMPAT
int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp, bool x32)
{
-#ifdef CONFIG_X86_X32_ABI
- if (x32) {
+ if (IS_ENABLED(CONFIG_X86_X32_ABI) && x32) {
if (!vdso64_enabled)
return 0;
return map_vdso(&vdso_image_x32, 0);
}
-#endif
-#ifdef CONFIG_IA32_EMULATION
- return load_vdso32();
-#else
+
+ if (IS_ENABLED(CONFIG_IA32_EMULATION))
+ return load_vdso32();
+
return 0;
-#endif
-}
-#endif
-#else
-int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
-{
- return load_vdso32();
}
#endif
bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
{
-#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
const struct vdso_image *image = current->mm->context.vdso_image;
unsigned long vdso = (unsigned long) current->mm->context.vdso;
@@ -282,7 +272,6 @@ bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
return true;
}
-#endif
return false;
}
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 2fb7d53cf333..c9103a6fa06e 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -341,9 +341,7 @@ void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
p4d = p4d_offset(pgd, VSYSCALL_ADDR);
-#if CONFIG_PGTABLE_LEVELS >= 5
set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
-#endif
pud = pud_offset(p4d, VSYSCALL_ADDR);
set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
pmd = pmd_offset(pud, VSYSCALL_ADDR);
diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c
index ec3427463382..06f35a6b58a5 100644
--- a/arch/x86/events/amd/brs.c
+++ b/arch/x86/events/amd/brs.c
@@ -44,12 +44,12 @@ static inline unsigned int brs_to(int idx)
static __always_inline void set_debug_extn_cfg(u64 val)
{
/* bits[4:3] must always be set to 11b */
- __wrmsr(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3, val >> 32);
+ native_wrmsrq(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3);
}
static __always_inline u64 get_debug_extn_cfg(void)
{
- return __rdmsr(MSR_AMD_DBG_EXTN_CFG);
+ return native_rdmsrq(MSR_AMD_DBG_EXTN_CFG);
}
static bool __init amd_brs_detect(void)
@@ -187,7 +187,7 @@ void amd_brs_reset(void)
/*
* Mark first entry as poisoned
*/
- wrmsrl(brs_to(0), BRS_POISON);
+ wrmsrq(brs_to(0), BRS_POISON);
}
int __init amd_brs_init(void)
@@ -325,7 +325,7 @@ void amd_brs_drain(void)
u32 brs_idx = tos - i;
u64 from, to;
- rdmsrl(brs_to(brs_idx), to);
+ rdmsrq(brs_to(brs_idx), to);
/* Entry does not belong to us (as marked by kernel) */
if (to == BRS_POISON)
@@ -341,7 +341,7 @@ void amd_brs_drain(void)
if (!amd_brs_match_plm(event, to))
continue;
- rdmsrl(brs_from(brs_idx), from);
+ rdmsrq(brs_from(brs_idx), from);
perf_clear_branch_entry_bitfields(br+nr);
@@ -371,7 +371,7 @@ static void amd_brs_poison_buffer(void)
idx = amd_brs_get_tos(&cfg);
/* Poison target of entry */
- wrmsrl(brs_to(idx), BRS_POISON);
+ wrmsrq(brs_to(idx), BRS_POISON);
}
/*
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 30d6ceb4c8ad..b20661b8621d 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -9,6 +9,7 @@
#include <linux/jiffies.h>
#include <asm/apicdef.h>
#include <asm/apic.h>
+#include <asm/msr.h>
#include <asm/nmi.h>
#include "../perf_event.h"
@@ -563,13 +564,13 @@ static void amd_pmu_cpu_reset(int cpu)
return;
/* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
- wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
+ wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
/*
* Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze
* and PerfCntrGLobalStatus.PerfCntrOvfl
*/
- wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
+ wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask);
}
@@ -651,7 +652,7 @@ static void amd_pmu_cpu_dead(int cpu)
static __always_inline void amd_pmu_set_global_ctl(u64 ctl)
{
- wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl);
+ wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl);
}
static inline u64 amd_pmu_get_global_status(void)
@@ -659,7 +660,7 @@ static inline u64 amd_pmu_get_global_status(void)
u64 status;
/* PerfCntrGlobalStatus is read-only */
- rdmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status);
+ rdmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status);
return status;
}
@@ -672,14 +673,14 @@ static inline void amd_pmu_ack_global_status(u64 status)
* clears the same bit in PerfCntrGlobalStatus
*/
- wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status);
+ wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status);
}
static bool amd_pmu_test_overflow_topbit(int idx)
{
u64 counter;
- rdmsrl(x86_pmu_event_addr(idx), counter);
+ rdmsrq(x86_pmu_event_addr(idx), counter);
return !(counter & BIT_ULL(x86_pmu.cntval_bits - 1));
}
@@ -1003,8 +1004,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL);
- if (perf_event_overflow(event, &data, regs))
- x86_pmu_stop(event, 0);
+ perf_event_overflow(event, &data, regs);
}
/*
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 0252b7ea8bca..112f43b23ebf 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -15,6 +15,7 @@
#include <linux/sched/clock.h>
#include <asm/apic.h>
+#include <asm/msr.h>
#include "../perf_event.h"
@@ -26,7 +27,7 @@ static u32 ibs_caps;
#include <linux/hardirq.h>
#include <asm/nmi.h>
-#include <asm/amd-ibs.h>
+#include <asm/amd/ibs.h>
/* attr.config2 */
#define IBS_SW_FILTER_MASK 1
@@ -424,7 +425,7 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
* prev count manually on overflow.
*/
while (!perf_event_try_update(event, count, 64)) {
- rdmsrl(event->hw.config_base, *config);
+ rdmsrq(event->hw.config_base, *config);
count = perf_ibs->get_count(*config);
}
}
@@ -435,9 +436,9 @@ static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
u64 tmp = hwc->config | config;
if (perf_ibs->fetch_count_reset_broken)
- wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask);
+ wrmsrq(hwc->config_base, tmp & ~perf_ibs->enable_mask);
- wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask);
+ wrmsrq(hwc->config_base, tmp | perf_ibs->enable_mask);
}
/*
@@ -452,9 +453,9 @@ static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
{
config &= ~perf_ibs->cnt_mask;
if (boot_cpu_data.x86 == 0x10)
- wrmsrl(hwc->config_base, config);
+ wrmsrq(hwc->config_base, config);
config &= ~perf_ibs->enable_mask;
- wrmsrl(hwc->config_base, config);
+ wrmsrq(hwc->config_base, config);
}
/*
@@ -513,7 +514,7 @@ static void perf_ibs_stop(struct perf_event *event, int flags)
if (!stopping && (hwc->state & PERF_HES_UPTODATE))
return;
- rdmsrl(hwc->config_base, config);
+ rdmsrq(hwc->config_base, config);
if (stopping) {
/*
@@ -1256,7 +1257,7 @@ fail:
hwc = &event->hw;
msr = hwc->config_base;
buf = ibs_data.regs;
- rdmsrl(msr, *buf);
+ rdmsrq(msr, *buf);
if (!(*buf++ & perf_ibs->valid_mask))
goto fail;
@@ -1274,7 +1275,7 @@ fail:
offset_max = perf_ibs_get_offset_max(perf_ibs, event, check_rip);
do {
- rdmsrl(msr + offset, *buf++);
+ rdmsrq(msr + offset, *buf++);
size++;
offset = find_next_bit(perf_ibs->offset_mask,
perf_ibs->offset_max,
@@ -1304,17 +1305,17 @@ fail:
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
if (perf_ibs == &perf_ibs_op) {
if (ibs_caps & IBS_CAPS_BRNTRGT) {
- rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
+ rdmsrq(MSR_AMD64_IBSBRTARGET, *buf++);
br_target_idx = size;
size++;
}
if (ibs_caps & IBS_CAPS_OPDATA4) {
- rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
+ rdmsrq(MSR_AMD64_IBSOPDATA4, *buf++);
size++;
}
}
if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
- rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++);
+ rdmsrq(MSR_AMD64_ICIBSEXTDCTL, *buf++);
size++;
}
}
@@ -1373,9 +1374,7 @@ fail:
hwc->sample_period = perf_ibs->min_period;
out:
- if (throttle) {
- perf_ibs_stop(event, 0);
- } else {
+ if (!throttle) {
if (perf_ibs == &perf_ibs_op) {
if (ibs_caps & IBS_CAPS_OPCNTEXT) {
new_config = period & IBS_OP_MAX_CNT_EXT_MASK;
@@ -1565,7 +1564,7 @@ static inline int ibs_eilvt_valid(void)
preempt_disable();
- rdmsrl(MSR_AMD64_IBSCTL, val);
+ rdmsrq(MSR_AMD64_IBSCTL, val);
offset = val & IBSCTL_LVT_OFFSET_MASK;
if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
@@ -1680,7 +1679,7 @@ static inline int get_ibs_lvt_offset(void)
{
u64 val;
- rdmsrl(MSR_AMD64_IBSCTL, val);
+ rdmsrq(MSR_AMD64_IBSCTL, val);
if (!(val & IBSCTL_LVT_OFFSET_VALID))
return -EINVAL;
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index f8228d8243f7..a721da9987dd 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -16,6 +16,8 @@
#include <linux/slab.h>
#include <linux/amd-iommu.h>
+#include <asm/msr.h>
+
#include "../perf_event.h"
#include "iommu.h"
diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c
index c06ccca96851..d24da377df77 100644
--- a/arch/x86/events/amd/lbr.c
+++ b/arch/x86/events/amd/lbr.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/perf_event.h>
+#include <asm/msr.h>
#include <asm/perf_event.h>
#include "../perf_event.h"
@@ -61,19 +62,19 @@ struct branch_entry {
static __always_inline void amd_pmu_lbr_set_from(unsigned int idx, u64 val)
{
- wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
+ wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
}
static __always_inline void amd_pmu_lbr_set_to(unsigned int idx, u64 val)
{
- wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
+ wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
}
static __always_inline u64 amd_pmu_lbr_get_from(unsigned int idx)
{
u64 val;
- rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
+ rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
return val;
}
@@ -82,7 +83,7 @@ static __always_inline u64 amd_pmu_lbr_get_to(unsigned int idx)
{
u64 val;
- rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
+ rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
return val;
}
@@ -333,7 +334,7 @@ void amd_pmu_lbr_reset(void)
cpuc->last_task_ctx = NULL;
cpuc->last_log_id = 0;
- wrmsrl(MSR_AMD64_LBR_SELECT, 0);
+ wrmsrq(MSR_AMD64_LBR_SELECT, 0);
}
void amd_pmu_lbr_add(struct perf_event *event)
@@ -396,16 +397,16 @@ void amd_pmu_lbr_enable_all(void)
/* Set hardware branch filter */
if (cpuc->lbr_select) {
lbr_select = cpuc->lbr_sel->config & LBR_SELECT_MASK;
- wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select);
+ wrmsrq(MSR_AMD64_LBR_SELECT, lbr_select);
}
if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
- rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+ wrmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
}
- rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
- wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
+ rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
+ wrmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
}
void amd_pmu_lbr_disable_all(void)
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
index 37d5b380516e..dad42790cf7d 100644
--- a/arch/x86/events/amd/power.c
+++ b/arch/x86/events/amd/power.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
#include "../perf_event.h"
/* Event code: LSB 8 bits, passed in attr->config any other bit is reserved. */
@@ -48,8 +49,8 @@ static void event_update(struct perf_event *event)
prev_pwr_acc = hwc->pwr_acc;
prev_ptsc = hwc->ptsc;
- rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc);
- rdmsrl(MSR_F15H_PTSC, new_ptsc);
+ rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc);
+ rdmsrq(MSR_F15H_PTSC, new_ptsc);
/*
* Calculate the CU power consumption over a time period, the unit of
@@ -75,8 +76,8 @@ static void __pmu_event_start(struct perf_event *event)
event->hw.state = 0;
- rdmsrl(MSR_F15H_PTSC, event->hw.ptsc);
- rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc);
+ rdmsrq(MSR_F15H_PTSC, event->hw.ptsc);
+ rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc);
}
static void pmu_event_start(struct perf_event *event, int mode)
@@ -272,7 +273,7 @@ static int __init amd_power_pmu_init(void)
cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);
- if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) {
+ if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) {
pr_err("Failed to read max compute unit power accumulator MSR\n");
return -ENODEV;
}
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 49c26ce2b115..e8b6af199c73 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -21,6 +21,7 @@
#define NUM_COUNTERS_NB 4
#define NUM_COUNTERS_L2 4
#define NUM_COUNTERS_L3 6
+#define NUM_COUNTERS_MAX 64
#define RDPMC_BASE_NB 6
#define RDPMC_BASE_LLC 10
@@ -38,7 +39,10 @@ struct amd_uncore_ctx {
int refcnt;
int cpu;
struct perf_event **events;
- struct hlist_node node;
+ unsigned long active_mask[BITS_TO_LONGS(NUM_COUNTERS_MAX)];
+ int nr_active;
+ struct hrtimer hrtimer;
+ u64 hrtimer_duration;
};
struct amd_uncore_pmu {
@@ -83,11 +87,51 @@ struct amd_uncore {
static struct amd_uncore uncores[UNCORE_TYPE_MAX];
+/* Interval for hrtimer, defaults to 60000 milliseconds */
+static unsigned int update_interval = 60 * MSEC_PER_SEC;
+module_param(update_interval, uint, 0444);
+
static struct amd_uncore_pmu *event_to_amd_uncore_pmu(struct perf_event *event)
{
return container_of(event->pmu, struct amd_uncore_pmu, pmu);
}
+static enum hrtimer_restart amd_uncore_hrtimer(struct hrtimer *hrtimer)
+{
+ struct amd_uncore_ctx *ctx;
+ struct perf_event *event;
+ int bit;
+
+ ctx = container_of(hrtimer, struct amd_uncore_ctx, hrtimer);
+
+ if (!ctx->nr_active || ctx->cpu != smp_processor_id())
+ return HRTIMER_NORESTART;
+
+ for_each_set_bit(bit, ctx->active_mask, NUM_COUNTERS_MAX) {
+ event = ctx->events[bit];
+ event->pmu->read(event);
+ }
+
+ hrtimer_forward_now(hrtimer, ns_to_ktime(ctx->hrtimer_duration));
+ return HRTIMER_RESTART;
+}
+
+static void amd_uncore_start_hrtimer(struct amd_uncore_ctx *ctx)
+{
+ hrtimer_start(&ctx->hrtimer, ns_to_ktime(ctx->hrtimer_duration),
+ HRTIMER_MODE_REL_PINNED_HARD);
+}
+
+static void amd_uncore_cancel_hrtimer(struct amd_uncore_ctx *ctx)
+{
+ hrtimer_cancel(&ctx->hrtimer);
+}
+
+static void amd_uncore_init_hrtimer(struct amd_uncore_ctx *ctx)
+{
+ hrtimer_setup(&ctx->hrtimer, amd_uncore_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
+}
+
static void amd_uncore_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -106,9 +150,9 @@ static void amd_uncore_read(struct perf_event *event)
* read counts directly from the corresponding PERF_CTR.
*/
if (hwc->event_base_rdpmc < 0)
- rdmsrl(hwc->event_base, new);
+ rdmsrq(hwc->event_base, new);
else
- rdpmcl(hwc->event_base_rdpmc, new);
+ new = rdpmc(hwc->event_base_rdpmc);
local64_set(&hwc->prev_count, new);
delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
@@ -118,27 +162,40 @@ static void amd_uncore_read(struct perf_event *event)
static void amd_uncore_start(struct perf_event *event, int flags)
{
+ struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event);
+ struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
struct hw_perf_event *hwc = &event->hw;
+ if (!ctx->nr_active++)
+ amd_uncore_start_hrtimer(ctx);
+
if (flags & PERF_EF_RELOAD)
- wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
+ wrmsrq(hwc->event_base, (u64)local64_read(&hwc->prev_count));
hwc->state = 0;
- wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
+ __set_bit(hwc->idx, ctx->active_mask);
+ wrmsrq(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
perf_event_update_userpage(event);
}
static void amd_uncore_stop(struct perf_event *event, int flags)
{
+ struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event);
+ struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
struct hw_perf_event *hwc = &event->hw;
- wrmsrl(hwc->config_base, hwc->config);
+ wrmsrq(hwc->config_base, hwc->config);
hwc->state |= PERF_HES_STOPPED;
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
event->pmu->read(event);
hwc->state |= PERF_HES_UPTODATE;
}
+
+ if (!--ctx->nr_active)
+ amd_uncore_cancel_hrtimer(ctx);
+
+ __clear_bit(hwc->idx, ctx->active_mask);
}
static int amd_uncore_add(struct perf_event *event, int flags)
@@ -491,6 +548,9 @@ static int amd_uncore_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
goto fail;
}
+ amd_uncore_init_hrtimer(curr);
+ curr->hrtimer_duration = (u64)update_interval * NSEC_PER_MSEC;
+
cpumask_set_cpu(cpu, &pmu->active_mask);
}
@@ -880,16 +940,55 @@ static int amd_uncore_umc_event_init(struct perf_event *event)
static void amd_uncore_umc_start(struct perf_event *event, int flags)
{
+ struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event);
+ struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
struct hw_perf_event *hwc = &event->hw;
+ if (!ctx->nr_active++)
+ amd_uncore_start_hrtimer(ctx);
+
if (flags & PERF_EF_RELOAD)
- wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
+ wrmsrq(hwc->event_base, (u64)local64_read(&hwc->prev_count));
hwc->state = 0;
- wrmsrl(hwc->config_base, (hwc->config | AMD64_PERFMON_V2_ENABLE_UMC));
+ __set_bit(hwc->idx, ctx->active_mask);
+ wrmsrq(hwc->config_base, (hwc->config | AMD64_PERFMON_V2_ENABLE_UMC));
perf_event_update_userpage(event);
}
+static void amd_uncore_umc_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev, new, shift;
+ s64 delta;
+
+ shift = COUNTER_SHIFT + 1;
+ prev = local64_read(&hwc->prev_count);
+
+ /*
+ * UMC counters do not have RDPMC assignments. Read counts directly
+ * from the corresponding PERF_CTR.
+ */
+ rdmsrl(hwc->event_base, new);
+
+ /*
+ * Unlike the other uncore counters, UMC counters saturate and set the
+ * Overflow bit (bit 48) on overflow. Since they do not roll over,
+ * proactively reset the corresponding PERF_CTR when bit 47 is set so
+ * that the counter never gets a chance to saturate.
+ */
+ if (new & BIT_ULL(63 - COUNTER_SHIFT)) {
+ wrmsrl(hwc->event_base, 0);
+ local64_set(&hwc->prev_count, 0);
+ } else {
+ local64_set(&hwc->prev_count, new);
+ }
+
+ delta = (new << shift) - (prev << shift);
+ delta >>= shift;
+ local64_add(delta, &event->count);
+}
+
static
void amd_uncore_umc_ctx_scan(struct amd_uncore *uncore, unsigned int cpu)
{
@@ -968,7 +1067,7 @@ int amd_uncore_umc_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
.del = amd_uncore_del,
.start = amd_uncore_umc_start,
.stop = amd_uncore_stop,
- .read = amd_uncore_read,
+ .read = amd_uncore_umc_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
.module = THIS_MODULE,
};
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 6866cc5acb0b..7610f26dfbd9 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -32,6 +32,7 @@
#include <asm/apic.h>
#include <asm/stacktrace.h>
+#include <asm/msr.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include <asm/alternative.h>
@@ -95,6 +96,11 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_filter, *x86_pmu.filter);
DEFINE_STATIC_CALL_NULL(x86_pmu_late_setup, *x86_pmu.late_setup);
+DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_enable, *x86_pmu.pebs_enable);
+DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_disable, *x86_pmu.pebs_disable);
+DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_enable_all, *x86_pmu.pebs_enable_all);
+DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_disable_all, *x86_pmu.pebs_disable_all);
+
/*
* This one is magic, it will get called even when PMU init fails (because
* there is no PMU), in which case it should simply return NULL.
@@ -134,7 +140,7 @@ u64 x86_perf_event_update(struct perf_event *event)
*/
prev_raw_count = local64_read(&hwc->prev_count);
do {
- rdpmcl(hwc->event_base_rdpmc, new_raw_count);
+ new_raw_count = rdpmc(hwc->event_base_rdpmc);
} while (!local64_try_cmpxchg(&hwc->prev_count,
&prev_raw_count, new_raw_count));
@@ -269,7 +275,7 @@ bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
*/
for_each_set_bit(i, cntr_mask, X86_PMC_IDX_MAX) {
reg = x86_pmu_config_addr(i);
- ret = rdmsrl_safe(reg, &val);
+ ret = rdmsrq_safe(reg, &val);
if (ret)
goto msr_fail;
if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
@@ -283,7 +289,7 @@ bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
if (*(u64 *)fixed_cntr_mask) {
reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
- ret = rdmsrl_safe(reg, &val);
+ ret = rdmsrq_safe(reg, &val);
if (ret)
goto msr_fail;
for_each_set_bit(i, fixed_cntr_mask, X86_PMC_IDX_MAX) {
@@ -314,11 +320,11 @@ bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
* (qemu/kvm) that don't trap on the MSR access and always return 0s.
*/
reg = x86_pmu_event_addr(reg_safe);
- if (rdmsrl_safe(reg, &val))
+ if (rdmsrq_safe(reg, &val))
goto msr_fail;
val ^= 0xffffUL;
- ret = wrmsrl_safe(reg, val);
- ret |= rdmsrl_safe(reg, &val_new);
+ ret = wrmsrq_safe(reg, val);
+ ret |= rdmsrq_safe(reg, &val_new);
if (ret || val != val_new)
goto msr_fail;
@@ -629,7 +635,7 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.type == event->pmu->type)
event->hw.config |= x86_pmu_get_event_config(event);
- if (!event->attr.freq && x86_pmu.limit_period) {
+ if (is_sampling_event(event) && !event->attr.freq && x86_pmu.limit_period) {
s64 left = event->attr.sample_period;
x86_pmu.limit_period(event, &left);
if (left > event->attr.sample_period)
@@ -674,6 +680,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
event->hw.idx = -1;
event->hw.last_cpu = -1;
event->hw.last_tag = ~0ULL;
+ event->hw.dyn_constraint = ~0ULL;
/* mark unused */
event->hw.extra_reg.idx = EXTRA_REG_NONE;
@@ -693,13 +700,13 @@ void x86_pmu_disable_all(void)
if (!test_bit(idx, cpuc->active_mask))
continue;
- rdmsrl(x86_pmu_config_addr(idx), val);
+ rdmsrq(x86_pmu_config_addr(idx), val);
if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
continue;
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(x86_pmu_config_addr(idx), val);
+ wrmsrq(x86_pmu_config_addr(idx), val);
if (is_counter_pair(hwc))
- wrmsrl(x86_pmu_config_addr(idx + 1), 0);
+ wrmsrq(x86_pmu_config_addr(idx + 1), 0);
}
}
@@ -754,17 +761,18 @@ void x86_pmu_enable_all(int added)
}
}
-static inline int is_x86_event(struct perf_event *event)
+int is_x86_event(struct perf_event *event)
{
- int i;
-
- if (!is_hybrid())
- return event->pmu == &pmu;
-
- for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
- if (event->pmu == &x86_pmu.hybrid_pmu[i].pmu)
- return true;
- }
+ /*
+ * For a non-hybrid platforms, the type of X86 pmu is
+ * always PERF_TYPE_RAW.
+ * For a hybrid platform, the PERF_PMU_CAP_EXTENDED_HW_TYPE
+ * is a unique capability for the X86 PMU.
+ * Use them to detect a X86 event.
+ */
+ if (event->pmu->type == PERF_TYPE_RAW ||
+ event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE)
+ return true;
return false;
}
@@ -1420,14 +1428,14 @@ int x86_perf_event_set_period(struct perf_event *event)
*/
local64_set(&hwc->prev_count, (u64)-left);
- wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
+ wrmsrq(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
/*
* Sign extend the Merge event counter's upper 16 bits since
* we currently declare a 48-bit counter width
*/
if (is_counter_pair(hwc))
- wrmsrl(x86_pmu_event_addr(idx + 1), 0xffff);
+ wrmsrq(x86_pmu_event_addr(idx + 1), 0xffff);
perf_event_update_userpage(event);
@@ -1550,10 +1558,10 @@ void perf_event_print_debug(void)
return;
if (x86_pmu.version >= 2) {
- rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
- rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
- rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
+ rdmsrq(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
+ rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status);
+ rdmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
+ rdmsrq(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
pr_info("\n");
pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
@@ -1561,19 +1569,19 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
if (pebs_constraints) {
- rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
+ rdmsrq(MSR_IA32_PEBS_ENABLE, pebs);
pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
}
if (x86_pmu.lbr_nr) {
- rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
pr_info("CPU#%d: debugctl: %016llx\n", cpu, debugctl);
}
}
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
for_each_set_bit(idx, cntr_mask, X86_PMC_IDX_MAX) {
- rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
- rdmsrl(x86_pmu_event_addr(idx), pmc_count);
+ rdmsrq(x86_pmu_config_addr(idx), pmc_ctrl);
+ rdmsrq(x86_pmu_event_addr(idx), pmc_count);
prev_left = per_cpu(pmc_prev_left[idx], cpu);
@@ -1587,7 +1595,7 @@ void perf_event_print_debug(void)
for_each_set_bit(idx, fixed_cntr_mask, X86_PMC_IDX_MAX) {
if (fixed_counter_disabled(idx, cpuc->pmu))
continue;
- rdmsrl(x86_pmu_fixed_ctr_addr(idx), pmc_count);
+ rdmsrq(x86_pmu_fixed_ctr_addr(idx), pmc_count);
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
@@ -1683,6 +1691,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
struct cpu_hw_events *cpuc;
struct perf_event *event;
int idx, handled = 0;
+ u64 last_period;
u64 val;
cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1702,6 +1711,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
continue;
event = cpuc->events[idx];
+ last_period = event->hw.last_period;
val = static_call(x86_pmu_update)(event);
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
@@ -1715,12 +1725,11 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
if (!static_call(x86_pmu_set_period)(event))
continue;
- perf_sample_data_init(&data, 0, event->hw.last_period);
+ perf_sample_data_init(&data, 0, last_period);
perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL);
- if (perf_event_overflow(event, &data, regs))
- x86_pmu_stop(event, 0);
+ perf_event_overflow(event, &data, regs);
}
if (handled)
@@ -2046,6 +2055,11 @@ static void x86_pmu_static_call_update(void)
static_call_update(x86_pmu_filter, x86_pmu.filter);
static_call_update(x86_pmu_late_setup, x86_pmu.late_setup);
+
+ static_call_update(x86_pmu_pebs_enable, x86_pmu.pebs_enable);
+ static_call_update(x86_pmu_pebs_disable, x86_pmu.pebs_disable);
+ static_call_update(x86_pmu_pebs_enable_all, x86_pmu.pebs_enable_all);
+ static_call_update(x86_pmu_pebs_disable_all, x86_pmu.pebs_disable_all);
}
static void _x86_pmu_read(struct perf_event *event)
@@ -2496,9 +2510,9 @@ void perf_clear_dirty_counters(void)
if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask)))
continue;
- wrmsrl(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0);
+ wrmsrq(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0);
} else {
- wrmsrl(x86_pmu_event_addr(i), 0);
+ wrmsrq(x86_pmu_event_addr(i), 0);
}
}
@@ -2803,8 +2817,15 @@ static unsigned long get_segment_base(unsigned int segment)
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt;
+ /*
+ * If we're not in a valid context with a real (not just lazy)
+ * user mm, then don't even try.
+ */
+ if (!nmi_uaccess_okay())
+ return 0;
+
/* IRQs are off, so this synchronizes with smp_store_release */
- ldt = READ_ONCE(current->active_mm->context.ldt);
+ ldt = smp_load_acquire(&current->mm->context.ldt);
if (!ldt || idx >= ldt->nr_entries)
return 0;
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index a95e6c91c4d7..61da6b8a3d51 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -17,6 +17,7 @@
#include <linux/sizes.h>
#include <asm/perf_event.h>
+#include <asm/msr.h>
#include "../perf_event.h"
@@ -80,54 +81,54 @@ static void *
bts_buffer_setup_aux(struct perf_event *event, void **pages,
int nr_pages, bool overwrite)
{
- struct bts_buffer *buf;
+ struct bts_buffer *bb;
struct page *page;
int cpu = event->cpu;
int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
unsigned long offset;
size_t size = nr_pages << PAGE_SHIFT;
- int pg, nbuf, pad;
+ int pg, nr_buf, pad;
/* count all the high order buffers */
- for (pg = 0, nbuf = 0; pg < nr_pages;) {
+ for (pg = 0, nr_buf = 0; pg < nr_pages;) {
page = virt_to_page(pages[pg]);
pg += buf_nr_pages(page);
- nbuf++;
+ nr_buf++;
}
/*
* to avoid interrupts in overwrite mode, only allow one physical
*/
- if (overwrite && nbuf > 1)
+ if (overwrite && nr_buf > 1)
return NULL;
- buf = kzalloc_node(offsetof(struct bts_buffer, buf[nbuf]), GFP_KERNEL, node);
- if (!buf)
+ bb = kzalloc_node(struct_size(bb, buf, nr_buf), GFP_KERNEL, node);
+ if (!bb)
return NULL;
- buf->nr_pages = nr_pages;
- buf->nr_bufs = nbuf;
- buf->snapshot = overwrite;
- buf->data_pages = pages;
- buf->real_size = size - size % BTS_RECORD_SIZE;
+ bb->nr_pages = nr_pages;
+ bb->nr_bufs = nr_buf;
+ bb->snapshot = overwrite;
+ bb->data_pages = pages;
+ bb->real_size = size - size % BTS_RECORD_SIZE;
- for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) {
+ for (pg = 0, nr_buf = 0, offset = 0, pad = 0; nr_buf < bb->nr_bufs; nr_buf++) {
unsigned int __nr_pages;
page = virt_to_page(pages[pg]);
__nr_pages = buf_nr_pages(page);
- buf->buf[nbuf].page = page;
- buf->buf[nbuf].offset = offset;
- buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
- buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement;
- pad = buf->buf[nbuf].size % BTS_RECORD_SIZE;
- buf->buf[nbuf].size -= pad;
+ bb->buf[nr_buf].page = page;
+ bb->buf[nr_buf].offset = offset;
+ bb->buf[nr_buf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
+ bb->buf[nr_buf].size = buf_size(page) - bb->buf[nr_buf].displacement;
+ pad = bb->buf[nr_buf].size % BTS_RECORD_SIZE;
+ bb->buf[nr_buf].size -= pad;
pg += __nr_pages;
offset += __nr_pages << PAGE_SHIFT;
}
- return buf;
+ return bb;
}
static void bts_buffer_free_aux(void *data)
@@ -135,25 +136,25 @@ static void bts_buffer_free_aux(void *data)
kfree(data);
}
-static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx)
+static unsigned long bts_buffer_offset(struct bts_buffer *bb, unsigned int idx)
{
- return buf->buf[idx].offset + buf->buf[idx].displacement;
+ return bb->buf[idx].offset + bb->buf[idx].displacement;
}
static void
-bts_config_buffer(struct bts_buffer *buf)
+bts_config_buffer(struct bts_buffer *bb)
{
int cpu = raw_smp_processor_id();
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
- struct bts_phys *phys = &buf->buf[buf->cur_buf];
+ struct bts_phys *phys = &bb->buf[bb->cur_buf];
unsigned long index, thresh = 0, end = phys->size;
struct page *page = phys->page;
- index = local_read(&buf->head);
+ index = local_read(&bb->head);
- if (!buf->snapshot) {
- if (buf->end < phys->offset + buf_size(page))
- end = buf->end - phys->offset - phys->displacement;
+ if (!bb->snapshot) {
+ if (bb->end < phys->offset + buf_size(page))
+ end = bb->end - phys->offset - phys->displacement;
index -= phys->offset + phys->displacement;
@@ -168,7 +169,7 @@ bts_config_buffer(struct bts_buffer *buf)
ds->bts_buffer_base = (u64)(long)page_address(page) + phys->displacement;
ds->bts_index = ds->bts_buffer_base + index;
ds->bts_absolute_maximum = ds->bts_buffer_base + end;
- ds->bts_interrupt_threshold = !buf->snapshot
+ ds->bts_interrupt_threshold = !bb->snapshot
? ds->bts_buffer_base + thresh
: ds->bts_absolute_maximum + BTS_RECORD_SIZE;
}
@@ -184,16 +185,16 @@ static void bts_update(struct bts_ctx *bts)
{
int cpu = raw_smp_processor_id();
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
- struct bts_buffer *buf = perf_get_aux(&bts->handle);
+ struct bts_buffer *bb = perf_get_aux(&bts->handle);
unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head;
- if (!buf)
+ if (!bb)
return;
- head = index + bts_buffer_offset(buf, buf->cur_buf);
- old = local_xchg(&buf->head, head);
+ head = index + bts_buffer_offset(bb, bb->cur_buf);
+ old = local_xchg(&bb->head, head);
- if (!buf->snapshot) {
+ if (!bb->snapshot) {
if (old == head)
return;
@@ -205,9 +206,9 @@ static void bts_update(struct bts_ctx *bts)
* old and head are always in the same physical buffer, so we
* can subtract them to get the data size.
*/
- local_add(head - old, &buf->data_size);
+ local_add(head - old, &bb->data_size);
} else {
- local_set(&buf->data_size, head);
+ local_set(&bb->data_size, head);
}
/*
@@ -218,7 +219,7 @@ static void bts_update(struct bts_ctx *bts)
}
static int
-bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
+bts_buffer_reset(struct bts_buffer *bb, struct perf_output_handle *handle);
/*
* Ordering PMU callbacks wrt themselves and the PMI is done by means
@@ -232,17 +233,17 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
static void __bts_event_start(struct perf_event *event)
{
struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
- struct bts_buffer *buf = perf_get_aux(&bts->handle);
+ struct bts_buffer *bb = perf_get_aux(&bts->handle);
u64 config = 0;
- if (!buf->snapshot)
+ if (!bb->snapshot)
config |= ARCH_PERFMON_EVENTSEL_INT;
if (!event->attr.exclude_kernel)
config |= ARCH_PERFMON_EVENTSEL_OS;
if (!event->attr.exclude_user)
config |= ARCH_PERFMON_EVENTSEL_USR;
- bts_config_buffer(buf);
+ bts_config_buffer(bb);
/*
* local barrier to make sure that ds configuration made it
@@ -261,13 +262,13 @@ static void bts_event_start(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
- struct bts_buffer *buf;
+ struct bts_buffer *bb;
- buf = perf_aux_output_begin(&bts->handle, event);
- if (!buf)
+ bb = perf_aux_output_begin(&bts->handle, event);
+ if (!bb)
goto fail_stop;
- if (bts_buffer_reset(buf, &bts->handle))
+ if (bts_buffer_reset(bb, &bts->handle))
goto fail_end_stop;
bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base;
@@ -306,27 +307,27 @@ static void bts_event_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
- struct bts_buffer *buf = NULL;
+ struct bts_buffer *bb = NULL;
int state = READ_ONCE(bts->state);
if (state == BTS_STATE_ACTIVE)
__bts_event_stop(event, BTS_STATE_STOPPED);
if (state != BTS_STATE_STOPPED)
- buf = perf_get_aux(&bts->handle);
+ bb = perf_get_aux(&bts->handle);
event->hw.state |= PERF_HES_STOPPED;
if (flags & PERF_EF_UPDATE) {
bts_update(bts);
- if (buf) {
- if (buf->snapshot)
+ if (bb) {
+ if (bb->snapshot)
bts->handle.head =
- local_xchg(&buf->data_size,
- buf->nr_pages << PAGE_SHIFT);
+ local_xchg(&bb->data_size,
+ bb->nr_pages << PAGE_SHIFT);
perf_aux_output_end(&bts->handle,
- local_xchg(&buf->data_size, 0));
+ local_xchg(&bb->data_size, 0));
}
cpuc->ds->bts_index = bts->ds_back.bts_buffer_base;
@@ -382,19 +383,19 @@ void intel_bts_disable_local(void)
}
static int
-bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
+bts_buffer_reset(struct bts_buffer *bb, struct perf_output_handle *handle)
{
unsigned long head, space, next_space, pad, gap, skip, wakeup;
unsigned int next_buf;
struct bts_phys *phys, *next_phys;
int ret;
- if (buf->snapshot)
+ if (bb->snapshot)
return 0;
- head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
+ head = handle->head & ((bb->nr_pages << PAGE_SHIFT) - 1);
- phys = &buf->buf[buf->cur_buf];
+ phys = &bb->buf[bb->cur_buf];
space = phys->offset + phys->displacement + phys->size - head;
pad = space;
if (space > handle->size) {
@@ -403,10 +404,10 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
}
if (space <= BTS_SAFETY_MARGIN) {
/* See if next phys buffer has more space */
- next_buf = buf->cur_buf + 1;
- if (next_buf >= buf->nr_bufs)
+ next_buf = bb->cur_buf + 1;
+ if (next_buf >= bb->nr_bufs)
next_buf = 0;
- next_phys = &buf->buf[next_buf];
+ next_phys = &bb->buf[next_buf];
gap = buf_size(phys->page) - phys->displacement - phys->size +
next_phys->displacement;
skip = pad + gap;
@@ -431,8 +432,8 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
* anymore, so we must not be racing with
* bts_update().
*/
- buf->cur_buf = next_buf;
- local_set(&buf->head, head);
+ bb->cur_buf = next_buf;
+ local_set(&bb->head, head);
}
}
}
@@ -445,7 +446,7 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
space -= space % BTS_RECORD_SIZE;
}
- buf->end = head + space;
+ bb->end = head + space;
/*
* If we have no space, the lost notification would have been sent when
@@ -462,7 +463,7 @@ int intel_bts_interrupt(void)
struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
struct bts_ctx *bts;
struct perf_event *event;
- struct bts_buffer *buf;
+ struct bts_buffer *bb;
s64 old_head;
int err = -ENOSPC, handled = 0;
@@ -485,8 +486,8 @@ int intel_bts_interrupt(void)
if (READ_ONCE(bts->state) == BTS_STATE_STOPPED)
return handled;
- buf = perf_get_aux(&bts->handle);
- if (!buf)
+ bb = perf_get_aux(&bts->handle);
+ if (!bb)
return handled;
/*
@@ -494,26 +495,26 @@ int intel_bts_interrupt(void)
* there's no other way of telling, because the pointer will
* keep moving
*/
- if (buf->snapshot)
+ if (bb->snapshot)
return 0;
- old_head = local_read(&buf->head);
+ old_head = local_read(&bb->head);
bts_update(bts);
/* no new data */
- if (old_head == local_read(&buf->head))
+ if (old_head == local_read(&bb->head))
return handled;
- perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0));
+ perf_aux_output_end(&bts->handle, local_xchg(&bb->data_size, 0));
- buf = perf_aux_output_begin(&bts->handle, event);
- if (buf)
- err = bts_buffer_reset(buf, &bts->handle);
+ bb = perf_aux_output_begin(&bts->handle, event);
+ if (bb)
+ err = bts_buffer_reset(bb, &bts->handle);
if (err) {
WRITE_ONCE(bts->state, BTS_STATE_STOPPED);
- if (buf) {
+ if (bb) {
/*
* BTS_STATE_STOPPED should be visible before
* cleared handle::event
@@ -599,7 +600,11 @@ static void bts_event_read(struct perf_event *event)
static __init int bts_init(void)
{
- if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
+ if (!boot_cpu_has(X86_FEATURE_DTES64))
+ return -ENODEV;
+
+ x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
+ if (!x86_pmu.bts)
return -ENODEV;
if (boot_cpu_has(X86_FEATURE_PTI)) {
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 09d2d66c9f21..466283326630 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -23,6 +23,7 @@
#include <asm/intel_pt.h>
#include <asm/apic.h>
#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
#include "../perf_event.h"
@@ -2224,6 +2225,18 @@ static struct extra_reg intel_cmt_extra_regs[] __read_mostly = {
EVENT_EXTRA_END
};
+EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_skt, "event=0x9c,umask=0x01");
+EVENT_ATTR_STR(topdown-retiring, td_retiring_skt, "event=0xc2,umask=0x02");
+EVENT_ATTR_STR(topdown-be-bound, td_be_bound_skt, "event=0xa4,umask=0x02");
+
+static struct attribute *skt_events_attrs[] = {
+ EVENT_PTR(td_fe_bound_skt),
+ EVENT_PTR(td_retiring_skt),
+ EVENT_PTR(td_bad_spec_cmt),
+ EVENT_PTR(td_be_bound_skt),
+ NULL,
+};
+
#define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
#define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
#define KNL_MCDRAM_LOCAL BIT_ULL(21)
@@ -2285,7 +2298,7 @@ static __always_inline void __intel_pmu_disable_all(bool bts)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0);
if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts();
@@ -2294,7 +2307,7 @@ static __always_inline void __intel_pmu_disable_all(bool bts)
static __always_inline void intel_pmu_disable_all(void)
{
__intel_pmu_disable_all(true);
- intel_pmu_pebs_disable_all();
+ static_call_cond(x86_pmu_pebs_disable_all)();
intel_pmu_lbr_disable_all();
}
@@ -2306,11 +2319,11 @@ static void __intel_pmu_enable_all(int added, bool pmi)
intel_pmu_lbr_enable_all(pmi);
if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) {
- wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val);
+ wrmsrq(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val);
cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val;
}
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
+ wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL,
intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
@@ -2326,7 +2339,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
static void intel_pmu_enable_all(int added)
{
- intel_pmu_pebs_enable_all();
+ static_call_cond(x86_pmu_pebs_enable_all)();
__intel_pmu_enable_all(added, false);
}
@@ -2426,12 +2439,12 @@ static void intel_pmu_nhm_workaround(void)
}
for (i = 0; i < 4; i++) {
- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
- wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
+ wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
+ wrmsrq(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
}
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
+ wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
+ wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
for (i = 0; i < 4; i++) {
event = cpuc->events[i];
@@ -2441,7 +2454,7 @@ static void intel_pmu_nhm_workaround(void)
__x86_pmu_enable_event(&event->hw,
ARCH_PERFMON_EVENTSEL_ENABLE);
} else
- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
+ wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
}
}
@@ -2458,7 +2471,7 @@ static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
if (cpuc->tfa_shadow != val) {
cpuc->tfa_shadow = val;
- wrmsrl(MSR_TSX_FORCE_ABORT, val);
+ wrmsrq(MSR_TSX_FORCE_ABORT, val);
}
}
@@ -2489,14 +2502,14 @@ static inline u64 intel_pmu_get_status(void)
{
u64 status;
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+ rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status);
return status;
}
static inline void intel_pmu_ack_status(u64 ack)
{
- wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
+ wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}
static inline bool event_is_checkpointed(struct perf_event *event)
@@ -2583,7 +2596,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
* so we don't trigger the event without PEBS bit set.
*/
if (unlikely(event->attr.precise_ip))
- intel_pmu_pebs_disable(event);
+ static_call(x86_pmu_pebs_disable)(event);
}
static void intel_pmu_assign_event(struct perf_event *event, int idx)
@@ -2603,6 +2616,9 @@ static void intel_pmu_del_event(struct perf_event *event)
intel_pmu_lbr_del(event);
if (event->attr.precise_ip)
intel_pmu_pebs_del(event);
+ if (is_pebs_counter_event_group(event) ||
+ is_acr_event_group(event))
+ this_cpu_ptr(&cpu_hw_events)->n_late_setup--;
}
static int icl_set_topdown_event_period(struct perf_event *event)
@@ -2619,15 +2635,15 @@ static int icl_set_topdown_event_period(struct perf_event *event)
* Don't need to clear them again.
*/
if (left == x86_pmu.max_period) {
- wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
- wrmsrl(MSR_PERF_METRICS, 0);
+ wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0);
+ wrmsrq(MSR_PERF_METRICS, 0);
hwc->saved_slots = 0;
hwc->saved_metric = 0;
}
if ((hwc->saved_slots) && is_slots_event(event)) {
- wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
- wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
+ wrmsrq(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
+ wrmsrq(MSR_PERF_METRICS, hwc->saved_metric);
}
perf_event_update_userpage(event);
@@ -2724,12 +2740,12 @@ static u64 intel_update_topdown_event(struct perf_event *event, int metric_end,
if (!val) {
/* read Fixed counter 3 */
- rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
+ slots = rdpmc(3 | INTEL_PMC_FIXED_RDPMC_BASE);
if (!slots)
return 0;
/* read PERF_METRICS */
- rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
+ metrics = rdpmc(INTEL_PMC_FIXED_RDPMC_METRICS);
} else {
slots = val[0];
metrics = val[1];
@@ -2773,8 +2789,8 @@ static u64 intel_update_topdown_event(struct perf_event *event, int metric_end,
if (reset) {
/* The fixed counter 3 has to be written before the PERF_METRICS. */
- wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
- wrmsrl(MSR_PERF_METRICS, 0);
+ wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0);
+ wrmsrq(MSR_PERF_METRICS, 0);
if (event)
update_saved_topdown_regs(event, 0, 0, metric_end);
}
@@ -2880,6 +2896,52 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
cpuc->fixed_ctrl_val |= bits;
}
+static void intel_pmu_config_acr(int idx, u64 mask, u32 reload)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int msr_b, msr_c;
+
+ if (!mask && !cpuc->acr_cfg_b[idx])
+ return;
+
+ if (idx < INTEL_PMC_IDX_FIXED) {
+ msr_b = MSR_IA32_PMC_V6_GP0_CFG_B;
+ msr_c = MSR_IA32_PMC_V6_GP0_CFG_C;
+ } else {
+ msr_b = MSR_IA32_PMC_V6_FX0_CFG_B;
+ msr_c = MSR_IA32_PMC_V6_FX0_CFG_C;
+ idx -= INTEL_PMC_IDX_FIXED;
+ }
+
+ if (cpuc->acr_cfg_b[idx] != mask) {
+ wrmsrl(msr_b + x86_pmu.addr_offset(idx, false), mask);
+ cpuc->acr_cfg_b[idx] = mask;
+ }
+ /* Only need to update the reload value when there is a valid config value. */
+ if (mask && cpuc->acr_cfg_c[idx] != reload) {
+ wrmsrl(msr_c + x86_pmu.addr_offset(idx, false), reload);
+ cpuc->acr_cfg_c[idx] = reload;
+ }
+}
+
+static void intel_pmu_enable_acr(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!is_acr_event_group(event) || !event->attr.config2) {
+ /*
+ * The disable doesn't clear the ACR CFG register.
+ * Check and clear the ACR CFG register.
+ */
+ intel_pmu_config_acr(hwc->idx, 0, 0);
+ return;
+ }
+
+ intel_pmu_config_acr(hwc->idx, hwc->config1, -hwc->sample_period);
+}
+
+DEFINE_STATIC_CALL_NULL(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
+
static void intel_pmu_enable_event(struct perf_event *event)
{
u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE;
@@ -2887,16 +2949,19 @@ static void intel_pmu_enable_event(struct perf_event *event)
int idx = hwc->idx;
if (unlikely(event->attr.precise_ip))
- intel_pmu_pebs_enable(event);
+ static_call(x86_pmu_pebs_enable)(event);
switch (idx) {
case 0 ... INTEL_PMC_IDX_FIXED - 1:
if (branch_sample_counters(event))
enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR;
intel_set_masks(event, idx);
+ static_call_cond(intel_pmu_enable_acr_event)(event);
__x86_pmu_enable_event(hwc, enable_mask);
break;
case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
+ static_call_cond(intel_pmu_enable_acr_event)(event);
+ fallthrough;
case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
intel_pmu_enable_fixed(event);
break;
@@ -2914,12 +2979,51 @@ static void intel_pmu_enable_event(struct perf_event *event)
}
}
+static void intel_pmu_acr_late_setup(struct cpu_hw_events *cpuc)
+{
+ struct perf_event *event, *leader;
+ int i, j, idx;
+
+ for (i = 0; i < cpuc->n_events; i++) {
+ leader = cpuc->event_list[i];
+ if (!is_acr_event_group(leader))
+ continue;
+
+ /* The ACR events must be contiguous. */
+ for (j = i; j < cpuc->n_events; j++) {
+ event = cpuc->event_list[j];
+ if (event->group_leader != leader->group_leader)
+ break;
+ for_each_set_bit(idx, (unsigned long *)&event->attr.config2, X86_PMC_IDX_MAX) {
+ if (WARN_ON_ONCE(i + idx > cpuc->n_events))
+ return;
+ __set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw.config1);
+ }
+ }
+ i = j - 1;
+ }
+}
+
+void intel_pmu_late_setup(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ if (!cpuc->n_late_setup)
+ return;
+
+ intel_pmu_pebs_late_setup(cpuc);
+ intel_pmu_acr_late_setup(cpuc);
+}
+
static void intel_pmu_add_event(struct perf_event *event)
{
if (event->attr.precise_ip)
intel_pmu_pebs_add(event);
if (intel_pmu_needs_branch_stack(event))
intel_pmu_lbr_add(event);
+ if (is_pebs_counter_event_group(event) ||
+ is_acr_event_group(event))
+ this_cpu_ptr(&cpu_hw_events)->n_late_setup++;
}
/*
@@ -2937,7 +3041,7 @@ int intel_pmu_save_and_restart(struct perf_event *event)
*/
if (unlikely(event_is_checkpointed(event))) {
/* No race with NMIs because the counter should not be armed */
- wrmsrl(event->hw.event_base, 0);
+ wrmsrq(event->hw.event_base, 0);
local64_set(&event->hw.prev_count, 0);
}
return static_call(x86_pmu_set_period)(event);
@@ -2976,13 +3080,13 @@ static void intel_pmu_reset(void)
pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) {
- wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
- wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
+ wrmsrq_safe(x86_pmu_config_addr(idx), 0ull);
+ wrmsrq_safe(x86_pmu_event_addr(idx), 0ull);
}
for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) {
if (fixed_counter_disabled(idx, cpuc->pmu))
continue;
- wrmsrl_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
+ wrmsrq_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
}
if (ds)
@@ -2991,7 +3095,7 @@ static void intel_pmu_reset(void)
/* Ack all overflows and disable fixed counters */
if (x86_pmu.version >= 2) {
intel_pmu_ack_status(intel_pmu_get_status());
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0);
}
/* Reset LBRs and LBR freezing */
@@ -3035,8 +3139,7 @@ static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
continue;
perf_sample_data_init(data, 0, event->hw.last_period);
- if (perf_event_overflow(event, data, regs))
- x86_pmu_stop(event, 0);
+ perf_event_overflow(event, data, regs);
/* Inject one fake event is enough. */
break;
@@ -3049,7 +3152,6 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int bit;
int handled = 0;
- u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
inc_irq_stat(apic_perf_irqs);
@@ -3093,7 +3195,6 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
handled++;
x86_pmu_handle_guest_pebs(regs, &data);
static_call(x86_pmu_drain_pebs)(regs, &data);
- status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
/*
* PMI throttle may be triggered, which stops the PEBS event.
@@ -3103,7 +3204,16 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
* Update the MSR if pebs_enabled is changed.
*/
if (pebs_enabled != cpuc->pebs_enabled)
- wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
+ wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
+
+ /*
+ * Above PEBS handler (PEBS counters snapshotting) has updated fixed
+ * counter 3 and perf metrics counts if they are in counter group,
+ * unnecessary to update again.
+ */
+ if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] &&
+ is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS]))
+ status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT;
}
/*
@@ -3123,6 +3233,8 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
static_call(intel_pmu_update_topdown_event)(NULL, NULL);
}
+ status &= hybrid(cpuc->pmu, intel_ctrl);
+
/*
* Checkpointed counters can lead to 'spurious' PMIs because the
* rollback caused by the PMI will have cleared the overflow status
@@ -3132,6 +3244,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
+ u64 last_period;
handled++;
@@ -3159,16 +3272,17 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
if (is_pebs_counter_event_group(event))
x86_pmu.drain_pebs(regs, &data);
+ last_period = event->hw.last_period;
+
if (!intel_pmu_save_and_restart(event))
continue;
- perf_sample_data_init(&data, 0, event->hw.last_period);
+ perf_sample_data_init(&data, 0, last_period);
if (has_branch_stack(event))
intel_pmu_lbr_save_brstack(&data, cpuc, event);
- if (perf_event_overflow(event, &data, regs))
- x86_pmu_stop(event, 0);
+ perf_event_overflow(event, &data, regs);
}
return handled;
@@ -3730,10 +3844,9 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
if (cpuc->excl_cntrs)
return intel_get_excl_constraints(cpuc, event, idx, c2);
- /* Not all counters support the branch counter feature. */
- if (branch_sample_counters(event)) {
+ if (event->hw.dyn_constraint != ~0ULL) {
c2 = dyn_constraint(cpuc, c2, idx);
- c2->idxmsk64 &= x86_pmu.lbr_counters;
+ c2->idxmsk64 &= event->hw.dyn_constraint;
c2->weight = hweight64(c2->idxmsk64);
}
@@ -4074,6 +4187,39 @@ end:
return start;
}
+static inline bool intel_pmu_has_acr(struct pmu *pmu)
+{
+ return !!hybrid(pmu, acr_cause_mask64);
+}
+
+static bool intel_pmu_is_acr_group(struct perf_event *event)
+{
+ /* The group leader has the ACR flag set */
+ if (is_acr_event_group(event))
+ return true;
+
+ /* The acr_mask is set */
+ if (event->attr.config2)
+ return true;
+
+ return false;
+}
+
+static inline void intel_pmu_set_acr_cntr_constr(struct perf_event *event,
+ u64 *cause_mask, int *num)
+{
+ event->hw.dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64);
+ *cause_mask |= event->attr.config2;
+ *num += 1;
+}
+
+static inline void intel_pmu_set_acr_caused_constr(struct perf_event *event,
+ int idx, u64 cause_mask)
+{
+ if (test_bit(idx, (unsigned long *)&cause_mask))
+ event->hw.dyn_constraint &= hybrid(event->pmu, acr_cause_mask64);
+}
+
static int intel_pmu_hw_config(struct perf_event *event)
{
int ret = x86_pmu_hw_config(event);
@@ -4135,15 +4281,19 @@ static int intel_pmu_hw_config(struct perf_event *event)
leader = event->group_leader;
if (branch_sample_call_stack(leader))
return -EINVAL;
- if (branch_sample_counters(leader))
+ if (branch_sample_counters(leader)) {
num++;
+ leader->hw.dyn_constraint &= x86_pmu.lbr_counters;
+ }
leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
for_each_sibling_event(sibling, leader) {
if (branch_sample_call_stack(sibling))
return -EINVAL;
- if (branch_sample_counters(sibling))
+ if (branch_sample_counters(sibling)) {
num++;
+ sibling->hw.dyn_constraint &= x86_pmu.lbr_counters;
+ }
}
if (num > fls(x86_pmu.lbr_counters))
@@ -4198,6 +4348,94 @@ static int intel_pmu_hw_config(struct perf_event *event)
event->attr.precise_ip)
event->group_leader->hw.flags |= PERF_X86_EVENT_PEBS_CNTR;
+ if (intel_pmu_has_acr(event->pmu) && intel_pmu_is_acr_group(event)) {
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct pmu *pmu = event->pmu;
+ bool has_sw_event = false;
+ int num = 0, idx = 0;
+ u64 cause_mask = 0;
+
+ /* Not support perf metrics */
+ if (is_metric_event(event))
+ return -EINVAL;
+
+ /* Not support freq mode */
+ if (event->attr.freq)
+ return -EINVAL;
+
+ /* PDist is not supported */
+ if (event->attr.config2 && event->attr.precise_ip > 2)
+ return -EINVAL;
+
+ /* The reload value cannot exceeds the max period */
+ if (event->attr.sample_period > x86_pmu.max_period)
+ return -EINVAL;
+ /*
+ * The counter-constraints of each event cannot be finalized
+ * unless the whole group is scanned. However, it's hard
+ * to know whether the event is the last one of the group.
+ * Recalculate the counter-constraints for each event when
+ * adding a new event.
+ *
+ * The group is traversed twice, which may be optimized later.
+ * In the first round,
+ * - Find all events which do reload when other events
+ * overflow and set the corresponding counter-constraints
+ * - Add all events, which can cause other events reload,
+ * in the cause_mask
+ * - Error out if the number of events exceeds the HW limit
+ * - The ACR events must be contiguous.
+ * Error out if there are non-X86 events between ACR events.
+ * This is not a HW limit, but a SW limit.
+ * With the assumption, the intel_pmu_acr_late_setup() can
+ * easily convert the event idx to counter idx without
+ * traversing the whole event list.
+ */
+ if (!is_x86_event(leader))
+ return -EINVAL;
+
+ if (leader->attr.config2)
+ intel_pmu_set_acr_cntr_constr(leader, &cause_mask, &num);
+
+ if (leader->nr_siblings) {
+ for_each_sibling_event(sibling, leader) {
+ if (!is_x86_event(sibling)) {
+ has_sw_event = true;
+ continue;
+ }
+ if (!sibling->attr.config2)
+ continue;
+ if (has_sw_event)
+ return -EINVAL;
+ intel_pmu_set_acr_cntr_constr(sibling, &cause_mask, &num);
+ }
+ }
+ if (leader != event && event->attr.config2) {
+ if (has_sw_event)
+ return -EINVAL;
+ intel_pmu_set_acr_cntr_constr(event, &cause_mask, &num);
+ }
+
+ if (hweight64(cause_mask) > hweight64(hybrid(pmu, acr_cause_mask64)) ||
+ num > hweight64(hybrid(event->pmu, acr_cntr_mask64)))
+ return -EINVAL;
+ /*
+ * In the second round, apply the counter-constraints for
+ * the events which can cause other events reload.
+ */
+ intel_pmu_set_acr_caused_constr(leader, idx++, cause_mask);
+
+ if (leader->nr_siblings) {
+ for_each_sibling_event(sibling, leader)
+ intel_pmu_set_acr_caused_constr(sibling, idx++, cause_mask);
+ }
+
+ if (leader != event)
+ intel_pmu_set_acr_caused_constr(event, idx, cause_mask);
+
+ leader->hw.flags |= PERF_X86_EVENT_ACR;
+ }
+
if ((event->attr.type == PERF_TYPE_HARDWARE) ||
(event->attr.type == PERF_TYPE_HW_CACHE))
return 0;
@@ -4345,7 +4583,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
.guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask,
};
- if (!x86_pmu.pebs)
+ if (!x86_pmu.ds_pebs)
return arr;
/*
@@ -4386,7 +4624,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
arr[pebs_enable] = (struct perf_guest_switch_msr){
.msr = MSR_IA32_PEBS_ENABLE,
.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
- .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
+ .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
};
if (arr[pebs_enable].host) {
@@ -4943,7 +5181,7 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
goto err;
}
- if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) {
+ if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_DYN_CONSTRAINT)) {
size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
@@ -5032,7 +5270,7 @@ static inline bool intel_pmu_broken_perf_cap(void)
return false;
}
-static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
+static void update_pmu_cap(struct pmu *pmu)
{
unsigned int cntr, fixed_cntr, ecx, edx;
union cpuid35_eax eax;
@@ -5041,20 +5279,30 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx);
if (ebx.split.umask2)
- pmu->config_mask |= ARCH_PERFMON_EVENTSEL_UMASK2;
+ hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_UMASK2;
if (ebx.split.eq)
- pmu->config_mask |= ARCH_PERFMON_EVENTSEL_EQ;
+ hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_EQ;
if (eax.split.cntr_subleaf) {
cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
&cntr, &fixed_cntr, &ecx, &edx);
- pmu->cntr_mask64 = cntr;
- pmu->fixed_cntr_mask64 = fixed_cntr;
+ hybrid(pmu, cntr_mask64) = cntr;
+ hybrid(pmu, fixed_cntr_mask64) = fixed_cntr;
+ }
+
+ if (eax.split.acr_subleaf) {
+ cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF,
+ &cntr, &fixed_cntr, &ecx, &edx);
+ /* The mask of the counters which can be reloaded */
+ hybrid(pmu, acr_cntr_mask64) = cntr | ((u64)fixed_cntr << INTEL_PMC_IDX_FIXED);
+
+ /* The mask of the counters which can cause a reload of reloadable counters */
+ hybrid(pmu, acr_cause_mask64) = ecx | ((u64)edx << INTEL_PMC_IDX_FIXED);
}
if (!intel_pmu_broken_perf_cap()) {
/* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
- rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities);
+ rdmsrq(MSR_IA32_PERF_CAPABILITIES, hybrid(pmu, intel_cap).capabilities);
}
}
@@ -5141,7 +5389,7 @@ static bool init_hybrid_pmu(int cpu)
goto end;
if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
- update_pmu_cap(pmu);
+ update_pmu_cap(&pmu->pmu);
intel_pmu_check_hybrid_pmus(pmu);
@@ -5202,7 +5450,7 @@ static void intel_pmu_cpu_starting(int cpu)
if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
union perf_capabilities perf_cap;
- rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
+ rdmsrq(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
if (!perf_cap.perf_metrics) {
x86_pmu.intel_cap.perf_metrics = 0;
x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
@@ -5515,7 +5763,7 @@ static __init void intel_clovertown_quirk(void)
* these chips.
*/
pr_warn("PEBS disabled due to CPU errata\n");
- x86_pmu.pebs = 0;
+ x86_pmu.ds_pebs = 0;
x86_pmu.pebs_constraints = NULL;
}
@@ -5610,24 +5858,24 @@ static bool check_msr(unsigned long msr, u64 mask)
* matches, this is needed to detect certain hardware emulators
* (qemu/kvm) that don't trap on the MSR access and always return 0s.
*/
- if (rdmsrl_safe(msr, &val_old))
+ if (rdmsrq_safe(msr, &val_old))
return false;
/*
- * Only change the bits which can be updated by wrmsrl.
+ * Only change the bits which can be updated by wrmsrq.
*/
val_tmp = val_old ^ mask;
if (is_lbr_from(msr))
val_tmp = lbr_from_signext_quirk_wr(val_tmp);
- if (wrmsrl_safe(msr, val_tmp) ||
- rdmsrl_safe(msr, &val_new))
+ if (wrmsrq_safe(msr, val_tmp) ||
+ rdmsrq_safe(msr, &val_new))
return false;
/*
- * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
- * should equal rdmsrl()'s even with the quirk.
+ * Quirk only affects validation in wrmsr(), so wrmsrq()'s value
+ * should equal rdmsrq()'s even with the quirk.
*/
if (val_new != val_tmp)
return false;
@@ -5638,7 +5886,7 @@ static bool check_msr(unsigned long msr, u64 mask)
/* Here it's sure that the MSR can be safely accessed.
* Restore the old value and return.
*/
- wrmsrl(msr, val_old);
+ wrmsrq(msr, val_old);
return true;
}
@@ -6003,7 +6251,7 @@ tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
static umode_t
pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
{
- return x86_pmu.pebs ? attr->mode : 0;
+ return x86_pmu.ds_pebs ? attr->mode : 0;
}
static umode_t
@@ -6034,6 +6282,21 @@ td_is_visible(struct kobject *kobj, struct attribute *attr, int i)
return attr->mode;
}
+PMU_FORMAT_ATTR(acr_mask, "config2:0-63");
+
+static struct attribute *format_acr_attrs[] = {
+ &format_attr_acr_mask.attr,
+ NULL
+};
+
+static umode_t
+acr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ return intel_pmu_has_acr(dev_get_drvdata(dev)) ? attr->mode : 0;
+}
+
static struct attribute_group group_events_td = {
.name = "events",
.is_visible = td_is_visible,
@@ -6076,6 +6339,12 @@ static struct attribute_group group_format_evtsel_ext = {
.is_visible = evtsel_ext_is_visible,
};
+static struct attribute_group group_format_acr = {
+ .name = "format",
+ .attrs = format_acr_attrs,
+ .is_visible = acr_is_visible,
+};
+
static struct attribute_group group_default = {
.attrs = intel_pmu_attrs,
.is_visible = default_is_visible,
@@ -6090,6 +6359,7 @@ static const struct attribute_group *attr_update[] = {
&group_format_extra,
&group_format_extra_skl,
&group_format_evtsel_ext,
+ &group_format_acr,
&group_default,
NULL,
};
@@ -6374,6 +6644,7 @@ static const struct attribute_group *hybrid_attr_update[] = {
&group_caps_lbr,
&hybrid_group_format_extra,
&group_format_evtsel_ext,
+ &group_format_acr,
&group_default,
&hybrid_group_cpus,
NULL,
@@ -6566,6 +6837,7 @@ static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
intel_pmu_init_grt(pmu);
hybrid(pmu, event_constraints) = intel_skt_event_constraints;
hybrid(pmu, extra_regs) = intel_cmt_extra_regs;
+ static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
}
__init int intel_pmu_init(void)
@@ -6626,6 +6898,7 @@ __init int intel_pmu_init(void)
x86_pmu.pebs_events_mask = intel_pmu_pebs_mask(x86_pmu.cntr_mask64);
x86_pmu.pebs_capable = PEBS_COUNTER_MASK;
+ x86_pmu.config_mask = X86_RAW_EVENT_MASK;
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
@@ -6642,7 +6915,7 @@ __init int intel_pmu_init(void)
if (boot_cpu_has(X86_FEATURE_PDCM)) {
u64 capabilities;
- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
+ rdmsrq(MSR_IA32_PERF_CAPABILITIES, capabilities);
x86_pmu.intel_cap.capabilities = capabilities;
}
@@ -6654,7 +6927,7 @@ __init int intel_pmu_init(void)
if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
intel_pmu_arch_lbr_init();
- intel_ds_init();
+ intel_pebs_init();
x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
@@ -6665,6 +6938,12 @@ __init int intel_pmu_init(void)
}
/*
+ * Many features on and after V6 require dynamic constraint,
+ * e.g., Arch PEBS, ACR.
+ */
+ if (version >= 6)
+ x86_pmu.flags |= PMU_FL_DYN_CONSTRAINT;
+ /*
* Install the hw-cache-events table:
*/
switch (boot_cpu_data.x86_vfm) {
@@ -6875,6 +7154,18 @@ __init int intel_pmu_init(void)
name = "crestmont";
break;
+ case INTEL_ATOM_DARKMONT_X:
+ intel_pmu_init_skt(NULL);
+ intel_pmu_pebs_data_source_cmt();
+ x86_pmu.pebs_latency_data = cmt_latency_data;
+ x86_pmu.get_event_constraints = cmt_get_event_constraints;
+ td_attr = skt_events_attrs;
+ mem_attr = grt_mem_attrs;
+ extra_attr = cmt_format_attr;
+ pr_cont("Darkmont events, ");
+ name = "darkmont";
+ break;
+
case INTEL_WESTMERE:
case INTEL_WESTMERE_EP:
case INTEL_WESTMERE_EX:
@@ -7305,8 +7596,17 @@ __init int intel_pmu_init(void)
name = "meteorlake_hybrid";
break;
+ case INTEL_PANTHERLAKE_L:
+ pr_cont("Pantherlake Hybrid events, ");
+ name = "pantherlake_hybrid";
+ goto lnl_common;
+
case INTEL_LUNARLAKE_M:
case INTEL_ARROWLAKE:
+ pr_cont("Lunarlake Hybrid events, ");
+ name = "lunarlake_hybrid";
+
+ lnl_common:
intel_pmu_init_hybrid(hybrid_big_small);
x86_pmu.pebs_latency_data = lnl_latency_data;
@@ -7328,8 +7628,6 @@ __init int intel_pmu_init(void)
intel_pmu_init_skt(&pmu->pmu);
intel_pmu_pebs_data_source_lnl();
- pr_cont("Lunarlake Hybrid events, ");
- name = "lunarlake_hybrid";
break;
case INTEL_ARROWLAKE_H:
@@ -7417,6 +7715,18 @@ __init int intel_pmu_init(void)
x86_pmu.attr_update = hybrid_attr_update;
}
+ /*
+ * The archPerfmonExt (0x23) includes an enhanced enumeration of
+ * PMU architectural features with a per-core view. For non-hybrid,
+ * each core has the same PMU capabilities. It's good enough to
+ * update the x86_pmu from the booting CPU. For hybrid, the x86_pmu
+ * is used to keep the common capabilities. Still keep the values
+ * from the leaf 0xa. The core specific update will be done later
+ * when a new type is online.
+ */
+ if (!is_hybrid() && boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
+ update_pmu_cap(NULL);
+
intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64,
&x86_pmu.fixed_cntr_mask64,
&x86_pmu.intel_ctrl);
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index ae4ec16156bb..ec753e39b007 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -111,6 +111,7 @@
#include <linux/nospec.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/msr.h>
#include "../perf_event.h"
#include "../probe.h"
@@ -320,7 +321,7 @@ static inline u64 cstate_pmu_read_counter(struct perf_event *event)
{
u64 val;
- rdmsrl(event->hw.event_base, val);
+ rdmsrq(event->hw.event_base, val);
return val;
}
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 1f7e1a692a7a..c0b7ac1c7594 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -10,6 +10,7 @@
#include <asm/tlbflush.h>
#include <asm/insn.h>
#include <asm/io.h>
+#include <asm/msr.h>
#include <asm/timer.h>
#include "../perf_event.h"
@@ -624,7 +625,7 @@ static int alloc_pebs_buffer(int cpu)
int max, node = cpu_to_node(cpu);
void *buffer, *insn_buff, *cea;
- if (!x86_pmu.pebs)
+ if (!x86_pmu.ds_pebs)
return 0;
buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
@@ -659,7 +660,7 @@ static void release_pebs_buffer(int cpu)
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
void *cea;
- if (!x86_pmu.pebs)
+ if (!x86_pmu.ds_pebs)
return;
kfree(per_cpu(insn_buffer, cpu));
@@ -734,7 +735,7 @@ void release_ds_buffers(void)
{
int cpu;
- if (!x86_pmu.bts && !x86_pmu.pebs)
+ if (!x86_pmu.bts && !x86_pmu.ds_pebs)
return;
for_each_possible_cpu(cpu)
@@ -750,7 +751,8 @@ void release_ds_buffers(void)
}
for_each_possible_cpu(cpu) {
- release_pebs_buffer(cpu);
+ if (x86_pmu.ds_pebs)
+ release_pebs_buffer(cpu);
release_bts_buffer(cpu);
}
}
@@ -761,15 +763,17 @@ void reserve_ds_buffers(void)
int cpu;
x86_pmu.bts_active = 0;
- x86_pmu.pebs_active = 0;
- if (!x86_pmu.bts && !x86_pmu.pebs)
+ if (x86_pmu.ds_pebs)
+ x86_pmu.pebs_active = 0;
+
+ if (!x86_pmu.bts && !x86_pmu.ds_pebs)
return;
if (!x86_pmu.bts)
bts_err = 1;
- if (!x86_pmu.pebs)
+ if (!x86_pmu.ds_pebs)
pebs_err = 1;
for_each_possible_cpu(cpu) {
@@ -781,7 +785,8 @@ void reserve_ds_buffers(void)
if (!bts_err && alloc_bts_buffer(cpu))
bts_err = 1;
- if (!pebs_err && alloc_pebs_buffer(cpu))
+ if (x86_pmu.ds_pebs && !pebs_err &&
+ alloc_pebs_buffer(cpu))
pebs_err = 1;
if (bts_err && pebs_err)
@@ -793,7 +798,7 @@ void reserve_ds_buffers(void)
release_bts_buffer(cpu);
}
- if (pebs_err) {
+ if (x86_pmu.ds_pebs && pebs_err) {
for_each_possible_cpu(cpu)
release_pebs_buffer(cpu);
}
@@ -805,7 +810,7 @@ void reserve_ds_buffers(void)
if (x86_pmu.bts && !bts_err)
x86_pmu.bts_active = 1;
- if (x86_pmu.pebs && !pebs_err)
+ if (x86_pmu.ds_pebs && !pebs_err)
x86_pmu.pebs_active = 1;
for_each_possible_cpu(cpu) {
@@ -1355,9 +1360,8 @@ static void __intel_pmu_pebs_update_cfg(struct perf_event *event,
}
-static void intel_pmu_late_setup(void)
+void intel_pmu_pebs_late_setup(struct cpu_hw_events *cpuc)
{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
u64 pebs_data_cfg = 0;
int i;
@@ -1399,8 +1403,10 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event)
* + precise_ip < 2 for the non event IP
* + For RTM TSX weight we need GPRs for the abort code.
*/
- gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
- (attr->sample_regs_intr & PEBS_GP_REGS);
+ gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) &&
+ (attr->sample_regs_intr & PEBS_GP_REGS)) ||
+ ((sample_type & PERF_SAMPLE_REGS_USER) &&
+ (attr->sample_regs_user & PEBS_GP_REGS));
tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
((attr->config & INTEL_ARCH_EVENT_MASK) ==
@@ -1515,7 +1521,7 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
else
value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx];
}
- wrmsrl(base + idx, value);
+ wrmsrq(base + idx, value);
}
static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
@@ -1552,7 +1558,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
*/
intel_pmu_drain_pebs_buffer();
adaptive_pebs_record_size_update();
- wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
+ wrmsrq(MSR_PEBS_DATA_CFG, pebs_data_cfg);
cpuc->active_pebs_data_cfg = pebs_data_cfg;
}
}
@@ -1615,7 +1621,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)
intel_pmu_pebs_via_pt_disable(event);
if (cpuc->enabled)
- wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
+ wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
}
@@ -1625,7 +1631,7 @@ void intel_pmu_pebs_enable_all(void)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (cpuc->pebs_enabled)
- wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
+ wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
}
void intel_pmu_pebs_disable_all(void)
@@ -1826,8 +1832,6 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event,
perf_sample_data_init(data, 0, event->hw.last_period);
- data->period = event->hw.last_period;
-
/*
* Use latency for weight (only avail with PEBS-LL)
*/
@@ -2080,7 +2084,6 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
sample_type = event->attr.sample_type;
format_group = basic->format_group;
perf_sample_data_init(data, 0, event->hw.last_period);
- data->period = event->hw.last_period;
setup_pebs_time(event, data, basic->tsc);
@@ -2123,7 +2126,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
regs->flags &= ~PERF_EFLAGS_EXACT;
}
- if (sample_type & PERF_SAMPLE_REGS_INTR)
+ if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER))
adaptive_pebs_save_regs(regs, gprs);
}
@@ -2274,7 +2277,7 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
WARN_ON(this_cpu_read(cpu_hw_events.enabled));
prev_raw_count = local64_read(&hwc->prev_count);
- rdpmcl(hwc->event_base_rdpmc, new_raw_count);
+ new_raw_count = rdpmc(hwc->event_base_rdpmc);
local64_set(&hwc->prev_count, new_raw_count);
/*
@@ -2357,8 +2360,7 @@ __intel_pmu_pebs_last_event(struct perf_event *event,
* All but the last records are processed.
* The last one is left to be able to call the overflow handler.
*/
- if (perf_event_overflow(event, data, regs))
- x86_pmu_stop(event, 0);
+ perf_event_overflow(event, data, regs);
}
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
@@ -2377,8 +2379,25 @@ __intel_pmu_pebs_last_event(struct perf_event *event,
*/
intel_pmu_save_and_restart_reload(event, count);
}
- } else
- intel_pmu_save_and_restart(event);
+ } else {
+ /*
+ * For a non-precise event, it's possible the
+ * counters-snapshotting records a positive value for the
+ * overflowed event. Then the HW auto-reload mechanism
+ * reset the counter to 0 immediately, because the
+ * pebs_event_reset is cleared if the PERF_X86_EVENT_AUTO_RELOAD
+ * is not set. The counter backwards may be observed in a
+ * PMI handler.
+ *
+ * Since the event value has been updated when processing the
+ * counters-snapshotting record, only needs to set the new
+ * period for the counter.
+ */
+ if (is_pebs_counter_event_group(event))
+ static_call(x86_pmu_set_period)(event);
+ else
+ intel_pmu_save_and_restart(event);
+ }
}
static __always_inline void
@@ -2446,8 +2465,9 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_
setup_pebs_fixed_sample_data);
}
-static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
+static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64 mask)
{
+ u64 pebs_enabled = cpuc->pebs_enabled & mask;
struct perf_event *event;
int bit;
@@ -2458,7 +2478,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int
* It needs to call intel_pmu_save_and_restart_reload() to
* update the event->count for this case.
*/
- for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
+ for_each_set_bit(bit, (unsigned long *)&pebs_enabled, X86_PMC_IDX_MAX) {
event = cpuc->events[bit];
if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
intel_pmu_save_and_restart_reload(event, 0);
@@ -2493,7 +2513,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
}
if (unlikely(base >= top)) {
- intel_pmu_pebs_event_update_no_drain(cpuc, size);
+ intel_pmu_pebs_event_update_no_drain(cpuc, mask);
return;
}
@@ -2569,8 +2589,8 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
if (error[bit]) {
perf_log_lost_samples(event, error[bit]);
- if (iregs && perf_event_account_interrupt(event))
- x86_pmu_stop(event, 0);
+ if (iregs)
+ perf_event_account_interrupt(event);
}
if (counts[bit]) {
@@ -2607,7 +2627,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
(hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED);
if (unlikely(base >= top)) {
- intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX);
+ intel_pmu_pebs_event_update_no_drain(cpuc, mask);
return;
}
@@ -2650,10 +2670,10 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
}
/*
- * BTS, PEBS probe and setup
+ * PEBS probe and setup
*/
-void __init intel_ds_init(void)
+void __init intel_pebs_init(void)
{
/*
* No support for 32bit formats
@@ -2661,13 +2681,12 @@ void __init intel_ds_init(void)
if (!boot_cpu_has(X86_FEATURE_DTES64))
return;
- x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
- x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
+ x86_pmu.ds_pebs = boot_cpu_has(X86_FEATURE_PEBS);
x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
if (x86_pmu.version <= 4)
x86_pmu.pebs_no_isolation = 1;
- if (x86_pmu.pebs) {
+ if (x86_pmu.ds_pebs) {
char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
char *pebs_qual = "";
int format = x86_pmu.intel_cap.pebs_format;
@@ -2675,6 +2694,11 @@ void __init intel_ds_init(void)
if (format < 4)
x86_pmu.intel_cap.pebs_baseline = 0;
+ x86_pmu.pebs_enable = intel_pmu_pebs_enable;
+ x86_pmu.pebs_disable = intel_pmu_pebs_disable;
+ x86_pmu.pebs_enable_all = intel_pmu_pebs_enable_all;
+ x86_pmu.pebs_disable_all = intel_pmu_pebs_disable_all;
+
switch (format) {
case 0:
pr_cont("PEBS fmt0%c, ", pebs_type);
@@ -2759,7 +2783,7 @@ void __init intel_ds_init(void)
default:
pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
- x86_pmu.pebs = 0;
+ x86_pmu.ds_pebs = 0;
}
}
}
@@ -2768,8 +2792,8 @@ void perf_restore_debug_store(void)
{
struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
- if (!x86_pmu.bts && !x86_pmu.pebs)
+ if (!x86_pmu.bts && !x86_pmu.ds_pebs)
return;
- wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
+ wrmsrq(MSR_IA32_DS_AREA, (unsigned long)ds);
}
diff --git a/arch/x86/events/intel/knc.c b/arch/x86/events/intel/knc.c
index 034a1f6a457c..e614baf42926 100644
--- a/arch/x86/events/intel/knc.c
+++ b/arch/x86/events/intel/knc.c
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <asm/hardirq.h>
+#include <asm/msr.h>
#include "../perf_event.h"
@@ -159,18 +160,18 @@ static void knc_pmu_disable_all(void)
{
u64 val;
- rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
+ rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
val &= ~(KNC_ENABLE_COUNTER0|KNC_ENABLE_COUNTER1);
- wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
+ wrmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
}
static void knc_pmu_enable_all(int added)
{
u64 val;
- rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
+ rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
val |= (KNC_ENABLE_COUNTER0|KNC_ENABLE_COUNTER1);
- wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
+ wrmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
}
static inline void
@@ -182,7 +183,7 @@ knc_pmu_disable_event(struct perf_event *event)
val = hwc->config;
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
+ (void)wrmsrq_safe(hwc->config_base + hwc->idx, val);
}
static void knc_pmu_enable_event(struct perf_event *event)
@@ -193,21 +194,21 @@ static void knc_pmu_enable_event(struct perf_event *event)
val = hwc->config;
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
+ (void)wrmsrq_safe(hwc->config_base + hwc->idx, val);
}
static inline u64 knc_pmu_get_status(void)
{
u64 status;
- rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status);
+ rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status);
return status;
}
static inline void knc_pmu_ack_status(u64 ack)
{
- wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL, ack);
+ wrmsrq(MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL, ack);
}
static int knc_pmu_handle_irq(struct pt_regs *regs)
@@ -241,19 +242,20 @@ again:
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
+ u64 last_period;
handled++;
if (!test_bit(bit, cpuc->active_mask))
continue;
+ last_period = event->hw.last_period;
if (!intel_pmu_save_and_restart(event))
continue;
- perf_sample_data_init(&data, 0, event->hw.last_period);
+ perf_sample_data_init(&data, 0, last_period);
- if (perf_event_overflow(event, &data, regs))
- x86_pmu_stop(event, 0);
+ perf_event_overflow(event, &data, regs);
}
/*
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index f44c3d866f24..7aa59966e7c3 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -137,9 +137,9 @@ static void __intel_pmu_lbr_enable(bool pmi)
if (cpuc->lbr_sel)
lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel)
- wrmsrl(MSR_LBR_SELECT, lbr_select);
+ wrmsrq(MSR_LBR_SELECT, lbr_select);
- rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
orig_debugctl = debugctl;
if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
@@ -155,10 +155,10 @@ static void __intel_pmu_lbr_enable(bool pmi)
debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
if (orig_debugctl != debugctl)
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
if (static_cpu_has(X86_FEATURE_ARCH_LBR))
- wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN);
+ wrmsrq(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN);
}
void intel_pmu_lbr_reset_32(void)
@@ -166,7 +166,7 @@ void intel_pmu_lbr_reset_32(void)
int i;
for (i = 0; i < x86_pmu.lbr_nr; i++)
- wrmsrl(x86_pmu.lbr_from + i, 0);
+ wrmsrq(x86_pmu.lbr_from + i, 0);
}
void intel_pmu_lbr_reset_64(void)
@@ -174,17 +174,17 @@ void intel_pmu_lbr_reset_64(void)
int i;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
- wrmsrl(x86_pmu.lbr_from + i, 0);
- wrmsrl(x86_pmu.lbr_to + i, 0);
+ wrmsrq(x86_pmu.lbr_from + i, 0);
+ wrmsrq(x86_pmu.lbr_to + i, 0);
if (x86_pmu.lbr_has_info)
- wrmsrl(x86_pmu.lbr_info + i, 0);
+ wrmsrq(x86_pmu.lbr_info + i, 0);
}
}
static void intel_pmu_arch_lbr_reset(void)
{
/* Write to ARCH_LBR_DEPTH MSR, all LBR entries are reset to 0 */
- wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr);
+ wrmsrq(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr);
}
void intel_pmu_lbr_reset(void)
@@ -199,7 +199,7 @@ void intel_pmu_lbr_reset(void)
cpuc->last_task_ctx = NULL;
cpuc->last_log_id = 0;
if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && cpuc->lbr_select)
- wrmsrl(MSR_LBR_SELECT, 0);
+ wrmsrq(MSR_LBR_SELECT, 0);
}
/*
@@ -209,7 +209,7 @@ static inline u64 intel_pmu_lbr_tos(void)
{
u64 tos;
- rdmsrl(x86_pmu.lbr_tos, tos);
+ rdmsrq(x86_pmu.lbr_tos, tos);
return tos;
}
@@ -282,17 +282,17 @@ static u64 lbr_from_signext_quirk_rd(u64 val)
static __always_inline void wrlbr_from(unsigned int idx, u64 val)
{
val = lbr_from_signext_quirk_wr(val);
- wrmsrl(x86_pmu.lbr_from + idx, val);
+ wrmsrq(x86_pmu.lbr_from + idx, val);
}
static __always_inline void wrlbr_to(unsigned int idx, u64 val)
{
- wrmsrl(x86_pmu.lbr_to + idx, val);
+ wrmsrq(x86_pmu.lbr_to + idx, val);
}
static __always_inline void wrlbr_info(unsigned int idx, u64 val)
{
- wrmsrl(x86_pmu.lbr_info + idx, val);
+ wrmsrq(x86_pmu.lbr_info + idx, val);
}
static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr)
@@ -302,7 +302,7 @@ static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr)
if (lbr)
return lbr->from;
- rdmsrl(x86_pmu.lbr_from + idx, val);
+ rdmsrq(x86_pmu.lbr_from + idx, val);
return lbr_from_signext_quirk_rd(val);
}
@@ -314,7 +314,7 @@ static __always_inline u64 rdlbr_to(unsigned int idx, struct lbr_entry *lbr)
if (lbr)
return lbr->to;
- rdmsrl(x86_pmu.lbr_to + idx, val);
+ rdmsrq(x86_pmu.lbr_to + idx, val);
return val;
}
@@ -326,7 +326,7 @@ static __always_inline u64 rdlbr_info(unsigned int idx, struct lbr_entry *lbr)
if (lbr)
return lbr->info;
- rdmsrl(x86_pmu.lbr_info + idx, val);
+ rdmsrq(x86_pmu.lbr_info + idx, val);
return val;
}
@@ -380,10 +380,10 @@ void intel_pmu_lbr_restore(void *ctx)
wrlbr_info(lbr_idx, 0);
}
- wrmsrl(x86_pmu.lbr_tos, tos);
+ wrmsrq(x86_pmu.lbr_tos, tos);
if (cpuc->lbr_select)
- wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
+ wrmsrq(MSR_LBR_SELECT, task_ctx->lbr_sel);
}
static void intel_pmu_arch_lbr_restore(void *ctx)
@@ -475,7 +475,7 @@ void intel_pmu_lbr_save(void *ctx)
task_ctx->tos = tos;
if (cpuc->lbr_select)
- rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
+ rdmsrq(MSR_LBR_SELECT, task_ctx->lbr_sel);
}
static void intel_pmu_arch_lbr_save(void *ctx)
@@ -752,7 +752,7 @@ void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
u64 lbr;
} msr_lastbranch;
- rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
+ rdmsrq(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
perf_clear_branch_entry_bitfields(br);
@@ -1602,7 +1602,7 @@ void __init intel_pmu_arch_lbr_init(void)
goto clear_arch_lbr;
/* Apply the max depth of Arch LBR */
- if (wrmsrl_safe(MSR_ARCH_LBR_DEPTH, lbr_nr))
+ if (wrmsrq_safe(MSR_ARCH_LBR_DEPTH, lbr_nr))
goto clear_arch_lbr;
x86_pmu.lbr_depth_mask = eax.split.lbr_depth_mask;
@@ -1618,7 +1618,7 @@ void __init intel_pmu_arch_lbr_init(void)
x86_pmu.lbr_nr = lbr_nr;
if (!!x86_pmu.lbr_counters)
- x86_pmu.flags |= PMU_FL_BR_CNTR;
+ x86_pmu.flags |= PMU_FL_BR_CNTR | PMU_FL_DYN_CONSTRAINT;
if (x86_pmu.lbr_mispred)
static_branch_enable(&x86_lbr_mispred);
diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c
index c85a9fc44355..e5fd7367e45d 100644
--- a/arch/x86/events/intel/p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -13,6 +13,7 @@
#include <asm/cpu_device_id.h>
#include <asm/hardirq.h>
#include <asm/apic.h>
+#include <asm/msr.h>
#include "../perf_event.h"
@@ -859,9 +860,9 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
u64 v;
/* an official way for overflow indication */
- rdmsrl(hwc->config_base, v);
+ rdmsrq(hwc->config_base, v);
if (v & P4_CCCR_OVF) {
- wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF);
+ wrmsrq(hwc->config_base, v & ~P4_CCCR_OVF);
return 1;
}
@@ -872,7 +873,7 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
* the counter has reached zero value and continued counting before
* real NMI signal was received:
*/
- rdmsrl(hwc->event_base, v);
+ rdmsrq(hwc->event_base, v);
if (!(v & ARCH_P4_UNFLAGGED_BIT))
return 1;
@@ -897,8 +898,8 @@ static void p4_pmu_disable_pebs(void)
* So at moment let leave metrics turned on forever -- it's
* ok for now but need to be revisited!
*
- * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, 0);
- * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, 0);
+ * (void)wrmsrq_safe(MSR_IA32_PEBS_ENABLE, 0);
+ * (void)wrmsrq_safe(MSR_P4_PEBS_MATRIX_VERT, 0);
*/
}
@@ -911,7 +912,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
* state we need to clear P4_CCCR_OVF, otherwise interrupt get
* asserted again and again
*/
- (void)wrmsrl_safe(hwc->config_base,
+ (void)wrmsrq_safe(hwc->config_base,
p4_config_unpack_cccr(hwc->config) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
}
@@ -944,8 +945,8 @@ static void p4_pmu_enable_pebs(u64 config)
bind = &p4_pebs_bind_map[idx];
- (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
- (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
+ (void)wrmsrq_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
+ (void)wrmsrq_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
}
static void __p4_pmu_enable_event(struct perf_event *event)
@@ -979,8 +980,8 @@ static void __p4_pmu_enable_event(struct perf_event *event)
*/
p4_pmu_enable_pebs(hwc->config);
- (void)wrmsrl_safe(escr_addr, escr_conf);
- (void)wrmsrl_safe(hwc->config_base,
+ (void)wrmsrq_safe(escr_addr, escr_conf);
+ (void)wrmsrq_safe(hwc->config_base,
(cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
}
@@ -1024,7 +1025,7 @@ static int p4_pmu_set_period(struct perf_event *event)
*
* the former idea is taken from OProfile code
*/
- wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
+ wrmsrq(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
}
return ret;
@@ -1072,8 +1073,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
continue;
- if (perf_event_overflow(event, &data, regs))
- x86_pmu_stop(event, 0);
+ perf_event_overflow(event, &data, regs);
}
if (handled)
@@ -1398,7 +1398,7 @@ __init int p4_pmu_init(void)
*/
for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
reg = x86_pmu_config_addr(i);
- wrmsrl_safe(reg, 0ULL);
+ wrmsrq_safe(reg, 0ULL);
}
return 0;
diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
index 65b45e9d7016..6e41de355bd8 100644
--- a/arch/x86/events/intel/p6.c
+++ b/arch/x86/events/intel/p6.c
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
#include "../perf_event.h"
@@ -142,9 +143,9 @@ static void p6_pmu_disable_all(void)
u64 val;
/* p6 only has one enable register */
- rdmsrl(MSR_P6_EVNTSEL0, val);
+ rdmsrq(MSR_P6_EVNTSEL0, val);
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(MSR_P6_EVNTSEL0, val);
+ wrmsrq(MSR_P6_EVNTSEL0, val);
}
static void p6_pmu_enable_all(int added)
@@ -152,9 +153,9 @@ static void p6_pmu_enable_all(int added)
unsigned long val;
/* p6 only has one enable register */
- rdmsrl(MSR_P6_EVNTSEL0, val);
+ rdmsrq(MSR_P6_EVNTSEL0, val);
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(MSR_P6_EVNTSEL0, val);
+ wrmsrq(MSR_P6_EVNTSEL0, val);
}
static inline void
@@ -163,7 +164,7 @@ p6_pmu_disable_event(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
u64 val = P6_NOP_EVENT;
- (void)wrmsrl_safe(hwc->config_base, val);
+ (void)wrmsrq_safe(hwc->config_base, val);
}
static void p6_pmu_enable_event(struct perf_event *event)
@@ -180,7 +181,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
* to actually enable the events.
*/
- (void)wrmsrl_safe(hwc->config_base, val);
+ (void)wrmsrq_safe(hwc->config_base, val);
}
PMU_FORMAT_ATTR(event, "config:0-7" );
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index fa37565f6418..e8cf29d2b10c 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -18,12 +18,13 @@
#include <linux/slab.h>
#include <linux/device.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/perf_event.h>
#include <asm/insn.h>
#include <asm/io.h>
#include <asm/intel_pt.h>
#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
#include "../perf_event.h"
#include "pt.h"
@@ -194,7 +195,7 @@ static int __init pt_pmu_hw_init(void)
int ret;
long i;
- rdmsrl(MSR_PLATFORM_INFO, reg);
+ rdmsrq(MSR_PLATFORM_INFO, reg);
pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8;
/*
@@ -230,7 +231,7 @@ static int __init pt_pmu_hw_init(void)
* "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
* post-VMXON.
*/
- rdmsrl(MSR_IA32_VMX_MISC, reg);
+ rdmsrq(MSR_IA32_VMX_MISC, reg);
if (reg & BIT(14))
pt_pmu.vmx = true;
}
@@ -426,7 +427,7 @@ static void pt_config_start(struct perf_event *event)
if (READ_ONCE(pt->vmx_on))
perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL);
else
- wrmsrl(MSR_IA32_RTIT_CTL, ctl);
+ wrmsrq(MSR_IA32_RTIT_CTL, ctl);
WRITE_ONCE(event->hw.aux_config, ctl);
}
@@ -485,12 +486,12 @@ static u64 pt_config_filters(struct perf_event *event)
/* avoid redundant msr writes */
if (pt->filters.filter[range].msr_a != filter->msr_a) {
- wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a);
+ wrmsrq(pt_address_ranges[range].msr_a, filter->msr_a);
pt->filters.filter[range].msr_a = filter->msr_a;
}
if (pt->filters.filter[range].msr_b != filter->msr_b) {
- wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b);
+ wrmsrq(pt_address_ranges[range].msr_b, filter->msr_b);
pt->filters.filter[range].msr_b = filter->msr_b;
}
@@ -509,7 +510,7 @@ static void pt_config(struct perf_event *event)
/* First round: clear STATUS, in particular the PSB byte counter. */
if (!event->hw.aux_config) {
perf_event_itrace_started(event);
- wrmsrl(MSR_IA32_RTIT_STATUS, 0);
+ wrmsrq(MSR_IA32_RTIT_STATUS, 0);
}
reg = pt_config_filters(event);
@@ -569,7 +570,7 @@ static void pt_config_stop(struct perf_event *event)
ctl &= ~RTIT_CTL_TRACEEN;
if (!READ_ONCE(pt->vmx_on))
- wrmsrl(MSR_IA32_RTIT_CTL, ctl);
+ wrmsrq(MSR_IA32_RTIT_CTL, ctl);
WRITE_ONCE(event->hw.aux_config, ctl);
@@ -658,13 +659,13 @@ static void pt_config_buffer(struct pt_buffer *buf)
reg = virt_to_phys(base);
if (pt->output_base != reg) {
pt->output_base = reg;
- wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, reg);
+ wrmsrq(MSR_IA32_RTIT_OUTPUT_BASE, reg);
}
reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32);
if (pt->output_mask != reg) {
pt->output_mask = reg;
- wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
+ wrmsrq(MSR_IA32_RTIT_OUTPUT_MASK, reg);
}
}
@@ -926,7 +927,7 @@ static void pt_handle_status(struct pt *pt)
int advance = 0;
u64 status;
- rdmsrl(MSR_IA32_RTIT_STATUS, status);
+ rdmsrq(MSR_IA32_RTIT_STATUS, status);
if (status & RTIT_STATUS_ERROR) {
pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
@@ -970,7 +971,7 @@ static void pt_handle_status(struct pt *pt)
if (advance)
pt_buffer_advance(buf);
- wrmsrl(MSR_IA32_RTIT_STATUS, status);
+ wrmsrq(MSR_IA32_RTIT_STATUS, status);
}
/**
@@ -985,12 +986,12 @@ static void pt_read_offset(struct pt_buffer *buf)
struct topa_page *tp;
if (!buf->single) {
- rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base);
+ rdmsrq(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base);
tp = phys_to_virt(pt->output_base);
buf->cur = &tp->topa;
}
- rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask);
+ rdmsrq(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask);
/* offset within current output region */
buf->output_off = pt->output_mask >> 32;
/* index of current output region within this table */
@@ -1585,7 +1586,7 @@ void intel_pt_handle_vmx(int on)
/* Turn PTs back on */
if (!on && event)
- wrmsrl(MSR_IA32_RTIT_CTL, event->hw.aux_config);
+ wrmsrq(MSR_IA32_RTIT_CTL, event->hw.aux_config);
local_irq_restore(flags);
}
@@ -1611,7 +1612,7 @@ static void pt_event_start(struct perf_event *event, int mode)
* PMI might have just cleared these, so resume_allowed
* must be checked again also.
*/
- rdmsrl(MSR_IA32_RTIT_STATUS, status);
+ rdmsrq(MSR_IA32_RTIT_STATUS, status);
if (!(status & (RTIT_STATUS_TRIGGEREN |
RTIT_STATUS_ERROR |
RTIT_STATUS_STOPPED)) &&
@@ -1839,7 +1840,7 @@ static __init int pt_init(void)
for_each_online_cpu(cpu) {
u64 ctl;
- ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
+ ret = rdmsrq_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
if (!ret && (ctl & RTIT_CTL_TRACEEN))
prior_warn++;
}
@@ -1863,6 +1864,8 @@ static __init int pt_init(void)
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG;
+ else
+ pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_PREFER_LARGE;
pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE |
PERF_PMU_CAP_ITRACE |
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index a34e50fc4a8f..e0815a12db90 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -3,6 +3,7 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/msr.h>
#include "uncore.h"
#include "uncore_discovery.h"
@@ -150,7 +151,7 @@ u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *eve
{
u64 count;
- rdmsrl(event->hw.event_base, count);
+ rdmsrq(event->hw.event_base, count);
return count;
}
@@ -305,17 +306,11 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
{
struct intel_uncore_box *box;
struct perf_event *event;
- unsigned long flags;
int bit;
box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
if (!box->n_active || box->cpu != smp_processor_id())
return HRTIMER_NORESTART;
- /*
- * disable local interrupt to prevent uncore_pmu_event_start/stop
- * to interrupt the update process
- */
- local_irq_save(flags);
/*
* handle boxes with an active event list as opposed to active
@@ -328,8 +323,6 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
uncore_perf_event_update(box, box->events[bit]);
- local_irq_restore(flags);
-
hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
return HRTIMER_RESTART;
}
@@ -337,7 +330,7 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
{
hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
- HRTIMER_MODE_REL_PINNED);
+ HRTIMER_MODE_REL_PINNED_HARD);
}
void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
@@ -347,7 +340,7 @@ void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
{
- hrtimer_setup(&box->hrtimer, uncore_pmu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_setup(&box->hrtimer, uncore_pmu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
}
static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c
index 571e44b49691..18a3022f26a0 100644
--- a/arch/x86/events/intel/uncore_discovery.c
+++ b/arch/x86/events/intel/uncore_discovery.c
@@ -5,6 +5,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <asm/msr.h>
#include "uncore.h"
#include "uncore_discovery.h"
@@ -441,17 +442,17 @@ static u64 intel_generic_uncore_box_ctl(struct intel_uncore_box *box)
void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
{
- wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
+ wrmsrq(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
}
void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
{
- wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
+ wrmsrq(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
}
void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
{
- wrmsrl(intel_generic_uncore_box_ctl(box), 0);
+ wrmsrq(intel_generic_uncore_box_ctl(box), 0);
}
static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box,
@@ -459,7 +460,7 @@ static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box,
{
struct hw_perf_event *hwc = &event->hw;
- wrmsrl(hwc->config_base, hwc->config);
+ wrmsrq(hwc->config_base, hwc->config);
}
static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box,
@@ -467,7 +468,7 @@ static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box,
{
struct hw_perf_event *hwc = &event->hw;
- wrmsrl(hwc->config_base, 0);
+ wrmsrq(hwc->config_base, 0);
}
static struct intel_uncore_ops generic_uncore_msr_ops = {
diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c
index 466833478e81..8962e7cb21e3 100644
--- a/arch/x86/events/intel/uncore_nhmex.c
+++ b/arch/x86/events/intel/uncore_nhmex.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Nehalem-EX/Westmere-EX uncore support */
#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
#include "uncore.h"
/* NHM-EX event control */
@@ -200,12 +201,12 @@ DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
{
- wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
+ wrmsrq(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
}
static void nhmex_uncore_msr_exit_box(struct intel_uncore_box *box)
{
- wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0);
+ wrmsrq(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0);
}
static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
@@ -214,12 +215,12 @@ static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
u64 config;
if (msr) {
- rdmsrl(msr, config);
+ rdmsrq(msr, config);
config &= ~((1ULL << uncore_num_counters(box)) - 1);
/* WBox has a fixed counter */
if (uncore_msr_fixed_ctl(box))
config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
- wrmsrl(msr, config);
+ wrmsrq(msr, config);
}
}
@@ -229,18 +230,18 @@ static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
u64 config;
if (msr) {
- rdmsrl(msr, config);
+ rdmsrq(msr, config);
config |= (1ULL << uncore_num_counters(box)) - 1;
/* WBox has a fixed counter */
if (uncore_msr_fixed_ctl(box))
config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
- wrmsrl(msr, config);
+ wrmsrq(msr, config);
}
}
static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
{
- wrmsrl(event->hw.config_base, 0);
+ wrmsrq(event->hw.config_base, 0);
}
static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
@@ -248,11 +249,11 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
struct hw_perf_event *hwc = &event->hw;
if (hwc->idx == UNCORE_PMC_IDX_FIXED)
- wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
+ wrmsrq(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
- wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
+ wrmsrq(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
else
- wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
+ wrmsrq(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
}
#define NHMEX_UNCORE_OPS_COMMON_INIT() \
@@ -382,10 +383,10 @@ static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct per
struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
if (reg1->idx != EXTRA_REG_NONE) {
- wrmsrl(reg1->reg, reg1->config);
- wrmsrl(reg1->reg + 1, reg2->config);
+ wrmsrq(reg1->reg, reg1->config);
+ wrmsrq(reg1->reg + 1, reg2->config);
}
- wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
+ wrmsrq(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
(hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
}
@@ -467,12 +468,12 @@ static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct per
struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
if (reg1->idx != EXTRA_REG_NONE) {
- wrmsrl(reg1->reg, 0);
- wrmsrl(reg1->reg + 1, reg1->config);
- wrmsrl(reg1->reg + 2, reg2->config);
- wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
+ wrmsrq(reg1->reg, 0);
+ wrmsrq(reg1->reg + 1, reg1->config);
+ wrmsrq(reg1->reg + 2, reg2->config);
+ wrmsrq(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
}
- wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
+ wrmsrq(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
}
static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
@@ -842,25 +843,25 @@ static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct per
idx = __BITS_VALUE(reg1->idx, 0, 8);
if (idx != 0xff)
- wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
+ wrmsrq(__BITS_VALUE(reg1->reg, 0, 16),
nhmex_mbox_shared_reg_config(box, idx));
idx = __BITS_VALUE(reg1->idx, 1, 8);
if (idx != 0xff)
- wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
+ wrmsrq(__BITS_VALUE(reg1->reg, 1, 16),
nhmex_mbox_shared_reg_config(box, idx));
if (reg2->idx != EXTRA_REG_NONE) {
- wrmsrl(reg2->reg, 0);
+ wrmsrq(reg2->reg, 0);
if (reg2->config != ~0ULL) {
- wrmsrl(reg2->reg + 1,
+ wrmsrq(reg2->reg + 1,
reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
- wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
+ wrmsrq(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
(reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
- wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
+ wrmsrq(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
}
}
- wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
+ wrmsrq(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
}
DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
@@ -1121,31 +1122,31 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per
switch (idx % 6) {
case 0:
- wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
+ wrmsrq(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
break;
case 1:
- wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
+ wrmsrq(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
break;
case 2:
case 3:
- wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
+ wrmsrq(NHMEX_R_MSR_PORTN_QLX_CFG(port),
uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
break;
case 4:
- wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
+ wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
hwc->config >> 32);
- wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
- wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
+ wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
+ wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
break;
case 5:
- wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
+ wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
hwc->config >> 32);
- wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
- wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
+ wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
+ wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
break;
}
- wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
+ wrmsrq(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
(hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
}
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index edb7fd50efe0..a1a96833e30e 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
+#include <asm/msr.h>
#include "uncore.h"
#include "uncore_discovery.h"
@@ -260,34 +261,34 @@ static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct per
struct hw_perf_event *hwc = &event->hw;
if (hwc->idx < UNCORE_PMC_IDX_FIXED)
- wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
+ wrmsrq(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
else
- wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
+ wrmsrq(hwc->config_base, SNB_UNC_CTL_EN);
}
static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
{
- wrmsrl(event->hw.config_base, 0);
+ wrmsrq(event->hw.config_base, 0);
}
static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
{
if (box->pmu->pmu_idx == 0) {
- wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
+ wrmsrq(SNB_UNC_PERF_GLOBAL_CTL,
SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
}
}
static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
{
- wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
+ wrmsrq(SNB_UNC_PERF_GLOBAL_CTL,
SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
}
static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
{
if (box->pmu->pmu_idx == 0)
- wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
+ wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, 0);
}
static struct uncore_event_desc snb_uncore_events[] = {
@@ -372,7 +373,7 @@ void snb_uncore_cpu_init(void)
static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
{
if (box->pmu->pmu_idx == 0) {
- wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
+ wrmsrq(SKL_UNC_PERF_GLOBAL_CTL,
SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
}
@@ -383,14 +384,14 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
{
- wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
+ wrmsrq(SKL_UNC_PERF_GLOBAL_CTL,
SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
}
static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
{
if (box->pmu->pmu_idx == 0)
- wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
+ wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, 0);
}
static struct intel_uncore_ops skl_uncore_msr_ops = {
@@ -504,7 +505,7 @@ static int icl_get_cbox_num(void)
{
u64 num_boxes;
- rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
+ rdmsrq(ICL_UNC_CBO_CONFIG, num_boxes);
return num_boxes & ICL_UNC_NUM_CBO_MASK;
}
@@ -525,7 +526,7 @@ static struct intel_uncore_type *tgl_msr_uncores[] = {
static void rkl_uncore_msr_init_box(struct intel_uncore_box *box)
{
if (box->pmu->pmu_idx == 0)
- wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
+ wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
}
void tgl_uncore_cpu_init(void)
@@ -541,24 +542,24 @@ void tgl_uncore_cpu_init(void)
static void adl_uncore_msr_init_box(struct intel_uncore_box *box)
{
if (box->pmu->pmu_idx == 0)
- wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
+ wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
}
static void adl_uncore_msr_enable_box(struct intel_uncore_box *box)
{
- wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
+ wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
}
static void adl_uncore_msr_disable_box(struct intel_uncore_box *box)
{
if (box->pmu->pmu_idx == 0)
- wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
+ wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, 0);
}
static void adl_uncore_msr_exit_box(struct intel_uncore_box *box)
{
if (box->pmu->pmu_idx == 0)
- wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
+ wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, 0);
}
static struct intel_uncore_ops adl_uncore_msr_ops = {
@@ -691,7 +692,7 @@ static struct intel_uncore_type mtl_uncore_hac_cbox = {
static void mtl_uncore_msr_init_box(struct intel_uncore_box *box)
{
- wrmsrl(uncore_msr_box_ctl(box), SNB_UNC_GLOBAL_CTL_EN);
+ wrmsrq(uncore_msr_box_ctl(box), SNB_UNC_GLOBAL_CTL_EN);
}
static struct intel_uncore_ops mtl_uncore_msr_ops = {
@@ -758,7 +759,7 @@ static struct intel_uncore_type *lnl_msr_uncores[] = {
static void lnl_uncore_msr_init_box(struct intel_uncore_box *box)
{
if (box->pmu->pmu_idx == 0)
- wrmsrl(LNL_UNC_MSR_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
+ wrmsrq(LNL_UNC_MSR_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
}
static struct intel_uncore_ops lnl_uncore_msr_ops = {
@@ -1306,12 +1307,12 @@ int skl_uncore_pci_init(void)
/* Nehalem uncore support */
static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
{
- wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
+ wrmsrq(NHM_UNC_PERF_GLOBAL_CTL, 0);
}
static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
{
- wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
+ wrmsrq(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
}
static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
@@ -1319,9 +1320,9 @@ static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct per
struct hw_perf_event *hwc = &event->hw;
if (hwc->idx < UNCORE_PMC_IDX_FIXED)
- wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
+ wrmsrq(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
else
- wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
+ wrmsrq(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
}
static struct attribute *nhm_uncore_formats_attr[] = {
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 60973c209c0e..2824dc9950be 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* SandyBridge-EP/IvyTown uncore support */
#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
#include "uncore.h"
#include "uncore_discovery.h"
@@ -618,9 +619,9 @@ static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
msr = uncore_msr_box_ctl(box);
if (msr) {
- rdmsrl(msr, config);
+ rdmsrq(msr, config);
config |= SNBEP_PMON_BOX_CTL_FRZ;
- wrmsrl(msr, config);
+ wrmsrq(msr, config);
}
}
@@ -631,9 +632,9 @@ static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
msr = uncore_msr_box_ctl(box);
if (msr) {
- rdmsrl(msr, config);
+ rdmsrq(msr, config);
config &= ~SNBEP_PMON_BOX_CTL_FRZ;
- wrmsrl(msr, config);
+ wrmsrq(msr, config);
}
}
@@ -643,9 +644,9 @@ static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
if (reg1->idx != EXTRA_REG_NONE)
- wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
+ wrmsrq(reg1->reg, uncore_shared_reg_config(box, 0));
- wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+ wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
}
static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
@@ -653,7 +654,7 @@ static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
{
struct hw_perf_event *hwc = &event->hw;
- wrmsrl(hwc->config_base, hwc->config);
+ wrmsrq(hwc->config_base, hwc->config);
}
static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
@@ -661,7 +662,7 @@ static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
unsigned msr = uncore_msr_box_ctl(box);
if (msr)
- wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
+ wrmsrq(msr, SNBEP_PMON_BOX_CTL_INT);
}
static struct attribute *snbep_uncore_formats_attr[] = {
@@ -1532,7 +1533,7 @@ static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
{
unsigned msr = uncore_msr_box_ctl(box);
if (msr)
- wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
+ wrmsrq(msr, IVBEP_PMON_BOX_CTL_INT);
}
static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
@@ -1783,11 +1784,11 @@ static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_ev
if (reg1->idx != EXTRA_REG_NONE) {
u64 filter = uncore_shared_reg_config(box, 0);
- wrmsrl(reg1->reg, filter & 0xffffffff);
- wrmsrl(reg1->reg + 6, filter >> 32);
+ wrmsrq(reg1->reg, filter & 0xffffffff);
+ wrmsrq(reg1->reg + 6, filter >> 32);
}
- wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+ wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
}
static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
@@ -2767,11 +2768,11 @@ static void hswep_cbox_enable_event(struct intel_uncore_box *box,
if (reg1->idx != EXTRA_REG_NONE) {
u64 filter = uncore_shared_reg_config(box, 0);
- wrmsrl(reg1->reg, filter & 0xffffffff);
- wrmsrl(reg1->reg + 1, filter >> 32);
+ wrmsrq(reg1->reg, filter & 0xffffffff);
+ wrmsrq(reg1->reg + 1, filter >> 32);
}
- wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+ wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
}
static struct intel_uncore_ops hswep_uncore_cbox_ops = {
@@ -2816,7 +2817,7 @@ static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
for_each_set_bit(i, (unsigned long *)&init, 64) {
flags |= (1ULL << i);
- wrmsrl(msr, flags);
+ wrmsrq(msr, flags);
}
}
}
@@ -3708,7 +3709,7 @@ static void skx_iio_enable_event(struct intel_uncore_box *box,
{
struct hw_perf_event *hwc = &event->hw;
- wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+ wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
}
static struct intel_uncore_ops skx_uncore_iio_ops = {
@@ -3765,7 +3766,7 @@ static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
{
u64 msr_value;
- if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
+ if (rdmsrq_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
!(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
return -ENXIO;
@@ -4655,9 +4656,9 @@ static void snr_cha_enable_event(struct intel_uncore_box *box,
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
if (reg1->idx != EXTRA_REG_NONE)
- wrmsrl(reg1->reg, reg1->config);
+ wrmsrq(reg1->reg, reg1->config);
- wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+ wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
}
static struct intel_uncore_ops snr_uncore_chabox_ops = {
@@ -4891,28 +4892,28 @@ static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
/* Free-Running IIO BANDWIDTH IN Counters */
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
{ /* end: all zeroes */ },
};
@@ -5485,37 +5486,6 @@ static struct freerunning_counters icx_iio_freerunning[] = {
[ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
};
-static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
- /* Free-Running IIO CLOCKS Counter */
- INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
- /* Free-Running IIO BANDWIDTH IN Counters */
- INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
- { /* end: all zeroes */ },
-};
-
static struct intel_uncore_type icx_uncore_iio_free_running = {
.name = "iio_free_running",
.num_counters = 9,
@@ -5523,7 +5493,7 @@ static struct intel_uncore_type icx_uncore_iio_free_running = {
.num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
.freerunning = icx_iio_freerunning,
.ops = &skx_uncore_iio_freerunning_ops,
- .event_descs = icx_uncore_iio_freerunning_events,
+ .event_descs = snr_uncore_iio_freerunning_events,
.format_group = &skx_uncore_iio_freerunning_format_group,
};
@@ -5913,9 +5883,9 @@ static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
if (reg1->idx != EXTRA_REG_NONE)
- wrmsrl(reg1->reg, reg1->config);
+ wrmsrq(reg1->reg, reg1->config);
- wrmsrl(hwc->config_base, hwc->config);
+ wrmsrq(hwc->config_base, hwc->config);
}
static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
@@ -5925,9 +5895,9 @@ static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
if (reg1->idx != EXTRA_REG_NONE)
- wrmsrl(reg1->reg, 0);
+ wrmsrq(reg1->reg, 0);
- wrmsrl(hwc->config_base, 0);
+ wrmsrq(hwc->config_base, 0);
}
static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
@@ -6320,69 +6290,13 @@ static struct freerunning_counters spr_iio_freerunning[] = {
[SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 },
};
-static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
- /* Free-Running IIO CLOCKS Counter */
- INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
- /* Free-Running IIO BANDWIDTH IN Counters */
- INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
- /* Free-Running IIO BANDWIDTH OUT Counters */
- INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB"),
- { /* end: all zeroes */ },
-};
-
static struct intel_uncore_type spr_uncore_iio_free_running = {
.name = "iio_free_running",
.num_counters = 17,
.num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX,
.freerunning = spr_iio_freerunning,
.ops = &skx_uncore_iio_freerunning_ops,
- .event_descs = spr_uncore_iio_freerunning_events,
+ .event_descs = snr_uncore_iio_freerunning_events,
.format_group = &skx_uncore_iio_freerunning_format_group,
};
@@ -6572,7 +6486,7 @@ void spr_uncore_cpu_init(void)
* of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a
* firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it.
*/
- rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
+ rdmsrq(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
/*
* The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact
* the EMR XCC. Don't let the value from the MSR replace the existing value.
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 45b1866ff051..7f5007a4752a 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -3,6 +3,8 @@
#include <linux/sysfs.h>
#include <linux/nospec.h>
#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
+
#include "probe.h"
enum perf_msr_id {
@@ -231,7 +233,7 @@ static inline u64 msr_read_counter(struct perf_event *event)
u64 now;
if (event->hw.event_base)
- rdmsrl(event->hw.event_base, now);
+ rdmsrq(event->hw.event_base, now);
else
now = rdtsc_ordered();
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 2c0ce0e9545e..2b969386dcdd 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -17,6 +17,7 @@
#include <asm/fpu/xstate.h>
#include <asm/intel_ds.h>
#include <asm/cpu.h>
+#include <asm/msr.h>
/* To enable MSR tracing please use the generic trace points. */
@@ -110,14 +111,26 @@ static inline bool is_topdown_event(struct perf_event *event)
return is_metric_event(event) || is_slots_event(event);
}
+int is_x86_event(struct perf_event *event);
+
+static inline bool check_leader_group(struct perf_event *leader, int flags)
+{
+ return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false;
+}
+
static inline bool is_branch_counters_group(struct perf_event *event)
{
- return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS;
+ return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS);
}
static inline bool is_pebs_counter_event_group(struct perf_event *event)
{
- return event->group_leader->hw.flags & PERF_X86_EVENT_PEBS_CNTR;
+ return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR);
+}
+
+static inline bool is_acr_event_group(struct perf_event *event)
+{
+ return check_leader_group(event->group_leader, PERF_X86_EVENT_ACR);
}
struct amd_nb {
@@ -261,6 +274,7 @@ struct cpu_hw_events {
struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
int n_excl; /* the number of exclusive events */
+ int n_late_setup; /* the num of events needs late setup */
unsigned int txn_flags;
int is_fake;
@@ -286,6 +300,10 @@ struct cpu_hw_events {
u64 fixed_ctrl_val;
u64 active_fixed_ctrl_val;
+ /* Intel ACR configuration */
+ u64 acr_cfg_b[X86_PMC_IDX_MAX];
+ u64 acr_cfg_c[X86_PMC_IDX_MAX];
+
/*
* Intel LBR bits
*/
@@ -707,6 +725,15 @@ struct x86_hybrid_pmu {
u64 fixed_cntr_mask64;
unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
};
+
+ union {
+ u64 acr_cntr_mask64;
+ unsigned long acr_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ };
+ union {
+ u64 acr_cause_mask64;
+ unsigned long acr_cause_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ };
struct event_constraint unconstrained;
u64 hw_cache_event_ids
@@ -789,6 +816,10 @@ struct x86_pmu {
int (*hw_config)(struct perf_event *event);
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
void (*late_setup)(void);
+ void (*pebs_enable)(struct perf_event *event);
+ void (*pebs_disable)(struct perf_event *event);
+ void (*pebs_enable_all)(void);
+ void (*pebs_disable_all)(void);
unsigned eventsel;
unsigned perfctr;
unsigned fixedctr;
@@ -805,6 +836,14 @@ struct x86_pmu {
u64 fixed_cntr_mask64;
unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
};
+ union {
+ u64 acr_cntr_mask64;
+ unsigned long acr_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ };
+ union {
+ u64 acr_cause_mask64;
+ unsigned long acr_cause_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ };
int cntval_bits;
u64 cntval_mask;
union {
@@ -871,7 +910,7 @@ struct x86_pmu {
*/
unsigned int bts :1,
bts_active :1,
- pebs :1,
+ ds_pebs :1,
pebs_active :1,
pebs_broken :1,
pebs_prec_dist :1,
@@ -1042,6 +1081,7 @@ do { \
#define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */
#define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */
#define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */
+#define PMU_FL_DYN_CONSTRAINT 0x800 /* Needs dynamic constraint */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
@@ -1084,6 +1124,7 @@ static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\
.pmu_type = _pmu, \
}
+int is_x86_event(struct perf_event *event);
struct pmu *x86_get_pmu(unsigned int cpu);
extern struct x86_pmu x86_pmu __read_mostly;
@@ -1091,6 +1132,10 @@ DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period);
DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update);
DECLARE_STATIC_CALL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
DECLARE_STATIC_CALL(x86_pmu_late_setup, *x86_pmu.late_setup);
+DECLARE_STATIC_CALL(x86_pmu_pebs_enable, *x86_pmu.pebs_enable);
+DECLARE_STATIC_CALL(x86_pmu_pebs_disable, *x86_pmu.pebs_disable);
+DECLARE_STATIC_CALL(x86_pmu_pebs_enable_all, *x86_pmu.pebs_enable_all);
+DECLARE_STATIC_CALL(x86_pmu_pebs_disable_all, *x86_pmu.pebs_disable_all);
static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
{
@@ -1198,16 +1243,16 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
if (hwc->extra_reg.reg)
- wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
+ wrmsrq(hwc->extra_reg.reg, hwc->extra_reg.config);
/*
* Add enabled Merge event on next counter
* if large increment event being enabled on this counter
*/
if (is_counter_pair(hwc))
- wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en);
+ wrmsrq(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en);
- wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
+ wrmsrq(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
}
void x86_pmu_enable_all(int added);
@@ -1223,10 +1268,10 @@ static inline void x86_pmu_disable_event(struct perf_event *event)
u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
struct hw_perf_event *hwc = &event->hw;
- wrmsrl(hwc->config_base, hwc->config & ~disable_mask);
+ wrmsrq(hwc->config_base, hwc->config & ~disable_mask);
if (is_counter_pair(hwc))
- wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);
+ wrmsrq(x86_pmu_config_addr(hwc->idx + 1), 0);
}
void x86_pmu_enable_event(struct perf_event *event);
@@ -1394,12 +1439,12 @@ static __always_inline void __amd_pmu_lbr_disable(void)
{
u64 dbg_ctl, dbg_extn_cfg;
- rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
- wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
+ rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
+ wrmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
- rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+ wrmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
}
}
@@ -1531,21 +1576,21 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
static __always_inline void __intel_pmu_pebs_disable_all(void)
{
- wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+ wrmsrq(MSR_IA32_PEBS_ENABLE, 0);
}
static __always_inline void __intel_pmu_arch_lbr_disable(void)
{
- wrmsrl(MSR_ARCH_LBR_CTL, 0);
+ wrmsrq(MSR_ARCH_LBR_CTL, 0);
}
static __always_inline void __intel_pmu_lbr_disable(void)
{
u64 debugctl;
- rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
}
int intel_pmu_save_and_restart(struct perf_event *event);
@@ -1580,6 +1625,8 @@ void intel_pmu_disable_bts(void);
int intel_pmu_drain_bts_buffer(void);
+void intel_pmu_late_setup(void);
+
u64 grt_latency_data(struct perf_event *event, u64 status);
u64 cmt_latency_data(struct perf_event *event, u64 status);
@@ -1636,11 +1683,13 @@ void intel_pmu_pebs_disable_all(void);
void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
+void intel_pmu_pebs_late_setup(struct cpu_hw_events *cpuc);
+
void intel_pmu_drain_pebs_buffer(void);
void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
-void intel_ds_init(void);
+void intel_pebs_init(void);
void intel_pmu_lbr_save_brstack(struct perf_sample_data *data,
struct cpu_hw_events *cpuc,
diff --git a/arch/x86/events/perf_event_flags.h b/arch/x86/events/perf_event_flags.h
index 1d9e385649b5..70078334e4a3 100644
--- a/arch/x86/events/perf_event_flags.h
+++ b/arch/x86/events/perf_event_flags.h
@@ -2,23 +2,24 @@
/*
* struct hw_perf_event.flags flags
*/
-PERF_ARCH(PEBS_LDLAT, 0x00001) /* ld+ldlat data address sampling */
-PERF_ARCH(PEBS_ST, 0x00002) /* st data address sampling */
-PERF_ARCH(PEBS_ST_HSW, 0x00004) /* haswell style datala, store */
-PERF_ARCH(PEBS_LD_HSW, 0x00008) /* haswell style datala, load */
-PERF_ARCH(PEBS_NA_HSW, 0x00010) /* haswell style datala, unknown */
-PERF_ARCH(EXCL, 0x00020) /* HT exclusivity on counter */
-PERF_ARCH(DYNAMIC, 0x00040) /* dynamic alloc'd constraint */
-PERF_ARCH(PEBS_CNTR, 0x00080) /* PEBS counters snapshot */
-PERF_ARCH(EXCL_ACCT, 0x00100) /* accounted EXCL event */
-PERF_ARCH(AUTO_RELOAD, 0x00200) /* use PEBS auto-reload */
-PERF_ARCH(LARGE_PEBS, 0x00400) /* use large PEBS */
-PERF_ARCH(PEBS_VIA_PT, 0x00800) /* use PT buffer for PEBS */
-PERF_ARCH(PAIR, 0x01000) /* Large Increment per Cycle */
-PERF_ARCH(LBR_SELECT, 0x02000) /* Save/Restore MSR_LBR_SELECT */
-PERF_ARCH(TOPDOWN, 0x04000) /* Count Topdown slots/metrics events */
-PERF_ARCH(PEBS_STLAT, 0x08000) /* st+stlat data address sampling */
-PERF_ARCH(AMD_BRS, 0x10000) /* AMD Branch Sampling */
-PERF_ARCH(PEBS_LAT_HYBRID, 0x20000) /* ld and st lat for hybrid */
-PERF_ARCH(NEEDS_BRANCH_STACK, 0x40000) /* require branch stack setup */
-PERF_ARCH(BRANCH_COUNTERS, 0x80000) /* logs the counters in the extra space of each branch */
+PERF_ARCH(PEBS_LDLAT, 0x0000001) /* ld+ldlat data address sampling */
+PERF_ARCH(PEBS_ST, 0x0000002) /* st data address sampling */
+PERF_ARCH(PEBS_ST_HSW, 0x0000004) /* haswell style datala, store */
+PERF_ARCH(PEBS_LD_HSW, 0x0000008) /* haswell style datala, load */
+PERF_ARCH(PEBS_NA_HSW, 0x0000010) /* haswell style datala, unknown */
+PERF_ARCH(EXCL, 0x0000020) /* HT exclusivity on counter */
+PERF_ARCH(DYNAMIC, 0x0000040) /* dynamic alloc'd constraint */
+PERF_ARCH(PEBS_CNTR, 0x0000080) /* PEBS counters snapshot */
+PERF_ARCH(EXCL_ACCT, 0x0000100) /* accounted EXCL event */
+PERF_ARCH(AUTO_RELOAD, 0x0000200) /* use PEBS auto-reload */
+PERF_ARCH(LARGE_PEBS, 0x0000400) /* use large PEBS */
+PERF_ARCH(PEBS_VIA_PT, 0x0000800) /* use PT buffer for PEBS */
+PERF_ARCH(PAIR, 0x0001000) /* Large Increment per Cycle */
+PERF_ARCH(LBR_SELECT, 0x0002000) /* Save/Restore MSR_LBR_SELECT */
+PERF_ARCH(TOPDOWN, 0x0004000) /* Count Topdown slots/metrics events */
+PERF_ARCH(PEBS_STLAT, 0x0008000) /* st+stlat data address sampling */
+PERF_ARCH(AMD_BRS, 0x0010000) /* AMD Branch Sampling */
+PERF_ARCH(PEBS_LAT_HYBRID, 0x0020000) /* ld and st lat for hybrid */
+PERF_ARCH(NEEDS_BRANCH_STACK, 0x0040000) /* require branch stack setup */
+PERF_ARCH(BRANCH_COUNTERS, 0x0080000) /* logs the counters in the extra space of each branch */
+PERF_ARCH(ACR, 0x0100000) /* Auto counter reload */
diff --git a/arch/x86/events/probe.c b/arch/x86/events/probe.c
index 600bf8d15c0c..bb719d0d3f0b 100644
--- a/arch/x86/events/probe.c
+++ b/arch/x86/events/probe.c
@@ -2,6 +2,8 @@
#include <linux/export.h>
#include <linux/types.h>
#include <linux/bits.h>
+
+#include <asm/msr.h>
#include "probe.h"
static umode_t
@@ -43,7 +45,7 @@ perf_msr_probe(struct perf_msr *msr, int cnt, bool zero, void *data)
if (msr[bit].test && !msr[bit].test(bit, data))
continue;
/* Virt sucks; you cannot tell if a R/O MSR is present :/ */
- if (rdmsrl_safe(msr[bit].msr, &val))
+ if (rdmsrq_safe(msr[bit].msr, &val))
continue;
mask = msr[bit].mask;
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 8ddace8cea96..defd86137f12 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -65,6 +65,7 @@
#include <linux/nospec.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/msr.h>
#include "perf_event.h"
#include "probe.h"
@@ -192,7 +193,7 @@ static inline unsigned int get_rapl_pmu_idx(int cpu, int scope)
static inline u64 rapl_read_counter(struct perf_event *event)
{
u64 raw;
- rdmsrl(event->hw.event_base, raw);
+ rdmsrq(event->hw.event_base, raw);
return raw;
}
@@ -221,7 +222,7 @@ static u64 rapl_event_update(struct perf_event *event)
prev_raw_count = local64_read(&hwc->prev_count);
do {
- rdmsrl(event->hw.event_base, new_raw_count);
+ rdmsrq(event->hw.event_base, new_raw_count);
} while (!local64_try_cmpxchg(&hwc->prev_count,
&prev_raw_count, new_raw_count));
@@ -610,8 +611,8 @@ static int rapl_check_hw_unit(void)
u64 msr_rapl_power_unit_bits;
int i;
- /* protect rdmsrl() to handle virtualization */
- if (rdmsrl_safe(rapl_model->msr_power_unit, &msr_rapl_power_unit_bits))
+ /* protect rdmsrq() to handle virtualization */
+ if (rdmsrq_safe(rapl_model->msr_power_unit, &msr_rapl_power_unit_bits))
return -1;
for (i = 0; i < NR_RAPL_PKG_DOMAINS; i++)
rapl_pkg_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
diff --git a/arch/x86/events/utils.c b/arch/x86/events/utils.c
index dab4ed199227..77fd00b3305e 100644
--- a/arch/x86/events/utils.c
+++ b/arch/x86/events/utils.c
@@ -2,6 +2,7 @@
#include <asm/insn.h>
#include <linux/mm.h>
+#include <asm/msr.h>
#include "perf_event.h"
static int decode_branch_type(struct insn *insn)
diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c
index 2fd9b0cf9a5e..4bdfcf091200 100644
--- a/arch/x86/events/zhaoxin/core.c
+++ b/arch/x86/events/zhaoxin/core.c
@@ -15,6 +15,7 @@
#include <asm/cpufeature.h>
#include <asm/hardirq.h>
#include <asm/apic.h>
+#include <asm/msr.h>
#include "../perf_event.h"
@@ -254,26 +255,26 @@ static __initconst const u64 zxe_hw_cache_event_ids
static void zhaoxin_pmu_disable_all(void)
{
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0);
}
static void zhaoxin_pmu_enable_all(int added)
{
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
+ wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
}
static inline u64 zhaoxin_pmu_get_status(void)
{
u64 status;
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+ rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status);
return status;
}
static inline void zhaoxin_pmu_ack_status(u64 ack)
{
- wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
+ wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}
static inline void zxc_pmu_ack_status(u64 ack)
@@ -293,9 +294,9 @@ static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc)
mask = 0xfULL << (idx * 4);
- rdmsrl(hwc->config_base, ctrl_val);
+ rdmsrq(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
- wrmsrl(hwc->config_base, ctrl_val);
+ wrmsrq(hwc->config_base, ctrl_val);
}
static void zhaoxin_pmu_disable_event(struct perf_event *event)
@@ -329,10 +330,10 @@ static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc)
bits <<= (idx * 4);
mask = 0xfULL << (idx * 4);
- rdmsrl(hwc->config_base, ctrl_val);
+ rdmsrq(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
ctrl_val |= bits;
- wrmsrl(hwc->config_base, ctrl_val);
+ wrmsrq(hwc->config_base, ctrl_val);
}
static void zhaoxin_pmu_enable_event(struct perf_event *event)
@@ -397,8 +398,7 @@ again:
if (!x86_perf_event_set_period(event))
continue;
- if (perf_event_overflow(event, &data, regs))
- x86_pmu_stop(event, 0);
+ perf_event_overflow(event, &data, regs);
}
/*
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 6d91ac5f9836..bfde0a3498b9 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -28,6 +28,7 @@
#include <asm/hypervisor.h>
#include <asm/mshyperv.h>
#include <asm/apic.h>
+#include <asm/msr.h>
#include <asm/trace/hyperv.h>
@@ -37,7 +38,7 @@ static u64 hv_apic_icr_read(void)
{
u64 reg_val;
- rdmsrl(HV_X64_MSR_ICR, reg_val);
+ rdmsrq(HV_X64_MSR_ICR, reg_val);
return reg_val;
}
@@ -49,7 +50,7 @@ static void hv_apic_icr_write(u32 low, u32 id)
reg_val = reg_val << 32;
reg_val |= low;
- wrmsrl(HV_X64_MSR_ICR, reg_val);
+ wrmsrq(HV_X64_MSR_ICR, reg_val);
}
static u32 hv_apic_read(u32 reg)
@@ -75,10 +76,10 @@ static void hv_apic_write(u32 reg, u32 val)
{
switch (reg) {
case APIC_EOI:
- wrmsr(HV_X64_MSR_EOI, val, 0);
+ wrmsrq(HV_X64_MSR_EOI, val);
break;
case APIC_TASKPRI:
- wrmsr(HV_X64_MSR_TPR, val, 0);
+ wrmsrq(HV_X64_MSR_TPR, val);
break;
default:
native_apic_mem_write(reg, val);
@@ -92,7 +93,7 @@ static void hv_apic_eoi_write(void)
if (hvp && (xchg(&hvp->apic_assist, 0) & 0x1))
return;
- wrmsr(HV_X64_MSR_EOI, APIC_EOI_ACK, 0);
+ wrmsrq(HV_X64_MSR_EOI, APIC_EOI_ACK);
}
static bool cpu_is_self(int cpu)
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index ddeb40930bc8..3d1d3547095a 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -21,6 +21,7 @@
#include <asm/hypervisor.h>
#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
+#include <asm/msr.h>
#include <asm/idtentry.h>
#include <asm/set_memory.h>
#include <linux/kexec.h>
@@ -62,7 +63,7 @@ static int hyperv_init_ghcb(void)
* returned by MSR_AMD64_SEV_ES_GHCB is above shared
* memory boundary and map it here.
*/
- rdmsrl(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
+ rdmsrq(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
ghcb_gpa &= ~ms_hyperv.shared_gpa_boundary;
@@ -95,7 +96,7 @@ static int hv_cpu_init(unsigned int cpu)
* For root partition we get the hypervisor provided VP assist
* page, instead of allocating a new page.
*/
- rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
+ rdmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
*hvp = memremap(msr.pfn << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT,
PAGE_SIZE, MEMREMAP_WB);
} else {
@@ -128,7 +129,7 @@ static int hv_cpu_init(unsigned int cpu)
}
if (!WARN_ON(!(*hvp))) {
msr.enable = 1;
- wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
+ wrmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
}
return hyperv_init_ghcb();
@@ -140,7 +141,7 @@ static void hv_reenlightenment_notify(struct work_struct *dummy)
{
struct hv_tsc_emulation_status emu_status;
- rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
+ rdmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
/* Don't issue the callback if TSC accesses are not emulated */
if (hv_reenlightenment_cb && emu_status.inprogress)
@@ -153,11 +154,11 @@ void hyperv_stop_tsc_emulation(void)
u64 freq;
struct hv_tsc_emulation_status emu_status;
- rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
+ rdmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
emu_status.inprogress = 0;
- wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
+ wrmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
- rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
+ rdmsrq(HV_X64_MSR_TSC_FREQUENCY, freq);
tsc_khz = div64_u64(freq, 1000);
}
EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation);
@@ -203,8 +204,8 @@ void set_hv_tscchange_cb(void (*cb)(void))
re_ctrl.target_vp = hv_vp_index[get_cpu()];
- wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
- wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl));
+ wrmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
+ wrmsrq(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl));
put_cpu();
}
@@ -217,9 +218,9 @@ void clear_hv_tscchange_cb(void)
if (!hv_reenlightenment_available())
return;
- rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
+ rdmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
re_ctrl.enabled = 0;
- wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
+ wrmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
hv_reenlightenment_cb = NULL;
}
@@ -251,16 +252,16 @@ static int hv_cpu_die(unsigned int cpu)
*/
memunmap(hv_vp_assist_page[cpu]);
hv_vp_assist_page[cpu] = NULL;
- rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
+ rdmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
msr.enable = 0;
}
- wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
+ wrmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
}
if (hv_reenlightenment_cb == NULL)
return 0;
- rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
+ rdmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
if (re_ctrl.target_vp == hv_vp_index[cpu]) {
/*
* Reassign reenlightenment notifications to some other online
@@ -274,7 +275,7 @@ static int hv_cpu_die(unsigned int cpu)
else
re_ctrl.enabled = 0;
- wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
+ wrmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
}
return 0;
@@ -331,9 +332,9 @@ static int hv_suspend(void)
hv_hypercall_pg = NULL;
/* Disable the hypercall page in the hypervisor */
- rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
hypercall_msr.enable = 0;
- wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
ret = hv_cpu_die(0);
return ret;
@@ -348,11 +349,11 @@ static void hv_resume(void)
WARN_ON(ret);
/* Re-enable the hypercall page */
- rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
hypercall_msr.enable = 1;
hypercall_msr.guest_physical_address =
vmalloc_to_pfn(hv_hypercall_pg_saved);
- wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
hv_hypercall_pg = hv_hypercall_pg_saved;
hv_hypercall_pg_saved = NULL;
@@ -390,40 +391,6 @@ static void __init hv_stimer_setup_percpu_clockev(void)
old_setup_percpu_clockev();
}
-#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE)
-static u8 __init get_vtl(void)
-{
- u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS;
- struct hv_input_get_vp_registers *input;
- struct hv_output_get_vp_registers *output;
- unsigned long flags;
- u64 ret;
-
- local_irq_save(flags);
- input = *this_cpu_ptr(hyperv_pcpu_input_arg);
- output = *this_cpu_ptr(hyperv_pcpu_output_arg);
-
- memset(input, 0, struct_size(input, names, 1));
- input->partition_id = HV_PARTITION_ID_SELF;
- input->vp_index = HV_VP_INDEX_SELF;
- input->input_vtl.as_uint8 = 0;
- input->names[0] = HV_REGISTER_VSM_VP_STATUS;
-
- ret = hv_do_hypercall(control, input, output);
- if (hv_result_success(ret)) {
- ret = output->values[0].reg8 & HV_X64_VTL_MASK;
- } else {
- pr_err("Failed to get VTL(error: %lld) exiting...\n", ret);
- BUG();
- }
-
- local_irq_restore(flags);
- return ret;
-}
-#else
-static inline u8 get_vtl(void) { return 0; }
-#endif
-
/*
* This function is to be invoked early in the boot sequence after the
* hypervisor has been detected.
@@ -499,7 +466,7 @@ void __init hyperv_init(void)
* in such a VM and is only used in such a VM.
*/
guest_id = hv_generate_guest_id(LINUX_VERSION_CODE);
- wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
+ wrmsrq(HV_X64_MSR_GUEST_OS_ID, guest_id);
/* With the paravisor, the VM must also write the ID via GHCB/GHCI */
hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, guest_id);
@@ -515,7 +482,7 @@ void __init hyperv_init(void)
if (hv_hypercall_pg == NULL)
goto clean_guest_os_id;
- rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
hypercall_msr.enable = 1;
if (hv_root_partition()) {
@@ -532,7 +499,7 @@ void __init hyperv_init(void)
* so it is populated with code, then copy the code to an
* executable page.
*/
- wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
pg = vmalloc_to_page(hv_hypercall_pg);
src = memremap(hypercall_msr.guest_physical_address << PAGE_SHIFT, PAGE_SIZE,
@@ -544,7 +511,7 @@ void __init hyperv_init(void)
hv_remap_tsc_clocksource();
} else {
hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
- wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
}
skip_hypercall_pg_init:
@@ -608,7 +575,7 @@ skip_hypercall_pg_init:
return;
clean_guest_os_id:
- wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+ wrmsrq(HV_X64_MSR_GUEST_OS_ID, 0);
hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
free_ghcb_page:
@@ -629,7 +596,7 @@ void hyperv_cleanup(void)
union hv_reference_tsc_msr tsc_msr;
/* Reset our OS id */
- wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+ wrmsrq(HV_X64_MSR_GUEST_OS_ID, 0);
hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
/*
@@ -667,18 +634,18 @@ void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
return;
panic_reported = true;
- rdmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
+ rdmsrq(HV_X64_MSR_GUEST_OS_ID, guest_id);
- wrmsrl(HV_X64_MSR_CRASH_P0, err);
- wrmsrl(HV_X64_MSR_CRASH_P1, guest_id);
- wrmsrl(HV_X64_MSR_CRASH_P2, regs->ip);
- wrmsrl(HV_X64_MSR_CRASH_P3, regs->ax);
- wrmsrl(HV_X64_MSR_CRASH_P4, regs->sp);
+ wrmsrq(HV_X64_MSR_CRASH_P0, err);
+ wrmsrq(HV_X64_MSR_CRASH_P1, guest_id);
+ wrmsrq(HV_X64_MSR_CRASH_P2, regs->ip);
+ wrmsrq(HV_X64_MSR_CRASH_P3, regs->ax);
+ wrmsrq(HV_X64_MSR_CRASH_P4, regs->sp);
/*
* Let Hyper-V know there is crash data available
*/
- wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
+ wrmsrq(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
}
EXPORT_SYMBOL_GPL(hyperv_report_panic);
@@ -701,8 +668,41 @@ bool hv_is_hyperv_initialized(void)
* that the hypercall page is setup
*/
hypercall_msr.as_uint64 = 0;
- rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
return hypercall_msr.enable;
}
EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
+
+int hv_apicid_to_vp_index(u32 apic_id)
+{
+ u64 control;
+ u64 status;
+ unsigned long irq_flags;
+ struct hv_get_vp_from_apic_id_in *input;
+ u32 *output, ret;
+
+ local_irq_save(irq_flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+ input->partition_id = HV_PARTITION_ID_SELF;
+ input->apic_ids[0] = apic_id;
+
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_INDEX_FROM_APIC_ID;
+ status = hv_do_hypercall(control, input, output);
+ ret = output[0];
+
+ local_irq_restore(irq_flags);
+
+ if (!hv_result_success(status)) {
+ pr_err("failed to get vp index from apic id %d, status %#llx\n",
+ apic_id, status);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hv_apicid_to_vp_index);
diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c
index 151e851bef09..81b006601370 100644
--- a/arch/x86/hyperv/hv_spinlock.c
+++ b/arch/x86/hyperv/hv_spinlock.c
@@ -15,6 +15,7 @@
#include <asm/mshyperv.h>
#include <asm/paravirt.h>
#include <asm/apic.h>
+#include <asm/msr.h>
static bool hv_pvspin __initdata = true;
@@ -39,18 +40,18 @@ static void hv_qlock_wait(u8 *byte, u8 val)
* To prevent a race against the unlock path it is required to
* disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE
* MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between
- * the lock value check and the rdmsrl() then the vCPU might be put
+ * the lock value check and the rdmsrq() then the vCPU might be put
* into 'idle' state by the hypervisor and kept in that state for
* an unspecified amount of time.
*/
local_irq_save(flags);
/*
- * Only issue the rdmsrl() when the lock state has not changed.
+ * Only issue the rdmsrq() when the lock state has not changed.
*/
if (READ_ONCE(*byte) == val) {
unsigned long msr_val;
- rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
+ rdmsrq(HV_X64_MSR_GUEST_IDLE, msr_val);
(void)msr_val;
}
diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
index 13242ed8ff16..042e8712d8de 100644
--- a/arch/x86/hyperv/hv_vtl.c
+++ b/arch/x86/hyperv/hv_vtl.c
@@ -11,6 +11,7 @@
#include <asm/desc.h>
#include <asm/i8259.h>
#include <asm/mshyperv.h>
+#include <asm/msr.h>
#include <asm/realmode.h>
#include <asm/reboot.h>
#include <../kernel/smpboot.h>
@@ -55,7 +56,12 @@ static void __noreturn hv_vtl_restart(char __maybe_unused *cmd)
void __init hv_vtl_init_platform(void)
{
- pr_info("Linux runs in Hyper-V Virtual Trust Level\n");
+ /*
+ * This function is a no-op if the VTL mode is not enabled.
+ * If it is, this function runs if and only the kernel boots in
+ * VTL2 which the x86 hv initialization path makes sure of.
+ */
+ pr_info("Linux runs in Hyper-V Virtual Trust Level %d\n", ms_hyperv.vtl);
x86_platform.realmode_reserve = x86_init_noop;
x86_platform.realmode_init = x86_init_noop;
@@ -149,11 +155,11 @@ static int hv_vtl_bringup_vcpu(u32 target_vp_index, int cpu, u64 eip_ignored)
input->vp_context.rip = rip;
input->vp_context.rsp = rsp;
input->vp_context.rflags = 0x0000000000000002;
- input->vp_context.efer = __rdmsr(MSR_EFER);
+ input->vp_context.efer = native_rdmsrq(MSR_EFER);
input->vp_context.cr0 = native_read_cr0();
input->vp_context.cr3 = __native_read_cr3();
input->vp_context.cr4 = native_read_cr4();
- input->vp_context.msr_cr_pat = __rdmsr(MSR_IA32_CR_PAT);
+ input->vp_context.msr_cr_pat = native_rdmsrq(MSR_IA32_CR_PAT);
input->vp_context.idtr.limit = idt_ptr.size;
input->vp_context.idtr.base = idt_ptr.address;
input->vp_context.gdtr.limit = gdt_ptr.size;
@@ -206,63 +212,23 @@ free_lock:
return ret;
}
-static int hv_vtl_apicid_to_vp_id(u32 apic_id)
-{
- u64 control;
- u64 status;
- unsigned long irq_flags;
- struct hv_get_vp_from_apic_id_in *input;
- u32 *output, ret;
-
- local_irq_save(irq_flags);
-
- input = *this_cpu_ptr(hyperv_pcpu_input_arg);
- memset(input, 0, sizeof(*input));
- input->partition_id = HV_PARTITION_ID_SELF;
- input->apic_ids[0] = apic_id;
-
- output = *this_cpu_ptr(hyperv_pcpu_output_arg);
-
- control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_ID_FROM_APIC_ID;
- status = hv_do_hypercall(control, input, output);
- ret = output[0];
-
- local_irq_restore(irq_flags);
-
- if (!hv_result_success(status)) {
- pr_err("failed to get vp id from apic id %d, status %#llx\n",
- apic_id, status);
- return -EINVAL;
- }
-
- return ret;
-}
-
-static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip)
+static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip, unsigned int cpu)
{
- int vp_id, cpu;
-
- /* Find the logical CPU for the APIC ID */
- for_each_present_cpu(cpu) {
- if (arch_match_cpu_phys_id(cpu, apicid))
- break;
- }
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
+ int vp_index;
pr_debug("Bringing up CPU with APIC ID %d in VTL2...\n", apicid);
- vp_id = hv_vtl_apicid_to_vp_id(apicid);
+ vp_index = hv_apicid_to_vp_index(apicid);
- if (vp_id < 0) {
+ if (vp_index < 0) {
pr_err("Couldn't find CPU with APIC ID %d\n", apicid);
return -EINVAL;
}
- if (vp_id > ms_hyperv.max_vp_index) {
- pr_err("Invalid CPU id %d for APIC ID %d\n", vp_id, apicid);
+ if (vp_index > ms_hyperv.max_vp_index) {
+ pr_err("Invalid CPU id %d for APIC ID %d\n", vp_index, apicid);
return -EINVAL;
}
- return hv_vtl_bringup_vcpu(vp_id, cpu, start_eip);
+ return hv_vtl_bringup_vcpu(vp_index, cpu, start_eip);
}
int __init hv_vtl_early_init(void)
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index 77bf05f06b9e..e93a2f488ff7 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
#include <asm/svm.h>
#include <asm/sev.h>
#include <asm/io.h>
@@ -22,6 +23,7 @@
#include <asm/realmode.h>
#include <asm/e820/api.h>
#include <asm/desc.h>
+#include <asm/msr.h>
#include <uapi/asm/vmx.h>
#ifdef CONFIG_AMD_MEM_ENCRYPT
@@ -110,12 +112,12 @@ u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
static inline u64 rd_ghcb_msr(void)
{
- return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
+ return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB);
}
static inline void wr_ghcb_msr(u64 val)
{
- native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val);
+ native_wrmsrq(MSR_AMD64_SEV_ES_GHCB, val);
}
static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
@@ -288,7 +290,7 @@ static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
free_page((unsigned long)vmsa);
}
-int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
+int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu)
{
struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
__get_free_page(GFP_KERNEL | __GFP_ZERO);
@@ -297,10 +299,16 @@ int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
u64 ret, retry = 5;
struct hv_enable_vp_vtl *start_vp_input;
unsigned long flags;
+ int vp_index;
if (!vmsa)
return -ENOMEM;
+ /* Find the Hyper-V VP index which might be not the same as APIC ID */
+ vp_index = hv_apicid_to_vp_index(apic_id);
+ if (vp_index < 0 || vp_index > ms_hyperv.max_vp_index)
+ return -EINVAL;
+
native_store_gdt(&gdtr);
vmsa->gdtr.base = gdtr.address;
@@ -348,7 +356,7 @@ int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
memset(start_vp_input, 0, sizeof(*start_vp_input));
start_vp_input->partition_id = -1;
- start_vp_input->vp_index = cpu;
+ start_vp_input->vp_index = vp_index;
start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
*(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
diff --git a/arch/x86/include/asm/acrn.h b/arch/x86/include/asm/acrn.h
index 1dd14381bcb6..fab11192c60a 100644
--- a/arch/x86/include/asm/acrn.h
+++ b/arch/x86/include/asm/acrn.h
@@ -25,7 +25,7 @@ void acrn_remove_intr_handler(void);
static inline u32 acrn_cpuid_base(void)
{
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
- return hypervisor_cpuid_base("ACRNACRNACRN", 0);
+ return cpuid_base_hypervisor("ACRNACRNACRN", 0);
return 0;
}
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 4a37a8bd87fd..15bc07a5ebb3 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -6,6 +6,7 @@
#include <linux/stringify.h>
#include <linux/objtool.h>
#include <asm/asm.h>
+#include <asm/bug.h>
#define ALT_FLAGS_SHIFT 16
@@ -82,6 +83,12 @@ struct alt_instr {
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+extern s32 __retpoline_sites[], __retpoline_sites_end[];
+extern s32 __return_sites[], __return_sites_end[];
+extern s32 __cfi_sites[], __cfi_sites_end[];
+extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
+extern s32 __smp_locks[], __smp_locks_end[];
+
/*
* Debug flag that can be tested to see whether alternative
* instructions were patched in already:
@@ -124,6 +131,37 @@ static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
}
#endif
+#ifdef CONFIG_MITIGATION_ITS
+extern void its_init_mod(struct module *mod);
+extern void its_fini_mod(struct module *mod);
+extern void its_free_mod(struct module *mod);
+extern u8 *its_static_thunk(int reg);
+#else /* CONFIG_MITIGATION_ITS */
+static inline void its_init_mod(struct module *mod) { }
+static inline void its_fini_mod(struct module *mod) { }
+static inline void its_free_mod(struct module *mod) { }
+static inline u8 *its_static_thunk(int reg)
+{
+ WARN_ONCE(1, "ITS not compiled in");
+
+ return NULL;
+}
+#endif
+
+#if defined(CONFIG_MITIGATION_RETHUNK) && defined(CONFIG_OBJTOOL)
+extern bool cpu_wants_rethunk(void);
+extern bool cpu_wants_rethunk_at(void *addr);
+#else
+static __always_inline bool cpu_wants_rethunk(void)
+{
+ return false;
+}
+static __always_inline bool cpu_wants_rethunk_at(void *addr)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_SMP
extern void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
@@ -335,11 +373,6 @@ void nop_func(void);
__ALTERNATIVE(\oldinstr, \newinstr, \ft_flags)
.endm
-#define old_len 141b-140b
-#define new_len1 144f-143f
-#define new_len2 145f-144f
-#define new_len3 146f-145f
-
/*
* Same as ALTERNATIVE macro above but for two alternatives. If CPU
* has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
diff --git a/arch/x86/include/asm/amd/fch.h b/arch/x86/include/asm/amd/fch.h
new file mode 100644
index 000000000000..2cf5153edbc2
--- /dev/null
+++ b/arch/x86/include/asm/amd/fch.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_AMD_FCH_H_
+#define _ASM_X86_AMD_FCH_H_
+
+#define FCH_PM_BASE 0xFED80300
+
+/* Register offsets from PM base: */
+#define FCH_PM_DECODEEN 0x00
+#define FCH_PM_DECODEEN_SMBUS0SEL GENMASK(20, 19)
+#define FCH_PM_SCRATCH 0x80
+#define FCH_PM_S5_RESET_STATUS 0xC0
+
+#endif /* _ASM_X86_AMD_FCH_H_ */
diff --git a/arch/x86/include/asm/amd_hsmp.h b/arch/x86/include/asm/amd/hsmp.h
index 03c2ce3edaf5..2137f62853ed 100644
--- a/arch/x86/include/asm/amd_hsmp.h
+++ b/arch/x86/include/asm/amd/hsmp.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-
#ifndef _ASM_X86_AMD_HSMP_H_
#define _ASM_X86_AMD_HSMP_H_
@@ -13,4 +12,5 @@ static inline int hsmp_send_message(struct hsmp_message *msg)
return -ENODEV;
}
#endif
+
#endif /*_ASM_X86_AMD_HSMP_H_*/
diff --git a/arch/x86/include/asm/amd-ibs.h b/arch/x86/include/asm/amd/ibs.h
index 77f3a589a99a..3ee5903982c2 100644
--- a/arch/x86/include/asm/amd-ibs.h
+++ b/arch/x86/include/asm/amd/ibs.h
@@ -1,4 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_AMD_IBS_H
+#define _ASM_X86_AMD_IBS_H
+
/*
* From PPR Vol 1 for AMD Family 19h Model 01h B1
* 55898 Rev 0.35 - Feb 5, 2021
@@ -151,3 +154,5 @@ struct perf_ibs_data {
};
u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
};
+
+#endif /* _ASM_X86_AMD_IBS_H */
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd/nb.h
index adfa0854cf2d..ddb5108cf46c 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd/nb.h
@@ -4,7 +4,7 @@
#include <linux/ioport.h>
#include <linux/pci.h>
-#include <asm/amd_node.h>
+#include <asm/amd/node.h>
struct amd_nb_bus_dev_range {
u8 bus;
diff --git a/arch/x86/include/asm/amd_node.h b/arch/x86/include/asm/amd/node.h
index 23fe617898a8..23fe617898a8 100644
--- a/arch/x86/include/asm/amd_node.h
+++ b/arch/x86/include/asm/amd/node.h
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index c903d358405d..23d86c9750b9 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -120,7 +120,7 @@ static inline bool apic_is_x2apic_enabled(void)
{
u64 msr;
- if (rdmsrl_safe(MSR_IA32_APICBASE, &msr))
+ if (rdmsrq_safe(MSR_IA32_APICBASE, &msr))
return false;
return msr & X2APIC_ENABLE;
}
@@ -209,12 +209,12 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
reg == APIC_LVR)
return;
- wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
+ wrmsrq(APIC_BASE_MSR + (reg >> 4), v);
}
static inline void native_apic_msr_eoi(void)
{
- __wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
+ native_wrmsrq(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK);
}
static inline u32 native_apic_msr_read(u32 reg)
@@ -224,20 +224,20 @@ static inline u32 native_apic_msr_read(u32 reg)
if (reg == APIC_DFR)
return -1;
- rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
+ rdmsrq(APIC_BASE_MSR + (reg >> 4), msr);
return (u32)msr;
}
static inline void native_x2apic_icr_write(u32 low, u32 id)
{
- wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
+ wrmsrq(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
}
static inline u64 native_x2apic_icr_read(void)
{
unsigned long val;
- rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
+ rdmsrq(APIC_BASE_MSR + (APIC_ICR >> 4), val);
return val;
}
@@ -313,9 +313,9 @@ struct apic {
u32 (*get_apic_id)(u32 id);
/* wakeup_secondary_cpu */
- int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip);
+ int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip, unsigned int cpu);
/* wakeup secondary CPU using 64-bit wakeup point */
- int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip);
+ int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip, unsigned int cpu);
char *name;
};
@@ -333,8 +333,8 @@ struct apic_override {
void (*send_IPI_self)(int vector);
u64 (*icr_read)(void);
void (*icr_write)(u32 low, u32 high);
- int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip);
- int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip);
+ int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip, unsigned int cpu);
+ int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip, unsigned int cpu);
};
/*
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index cbc6157f0b4b..b5982b94bdba 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -16,8 +16,7 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
- asm_inline (ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
- "call __sw_hweight32",
+ asm_inline (ALTERNATIVE("call __sw_hweight32",
"popcntl %[val], %[cnt]", X86_FEATURE_POPCNT)
: [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT
: [val] REG_IN (w));
@@ -46,8 +45,7 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res;
- asm_inline (ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
- "call __sw_hweight64",
+ asm_inline (ALTERNATIVE("call __sw_hweight64",
"popcntq %[val], %[cnt]", X86_FEATURE_POPCNT)
: [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT
: [val] REG_IN (w));
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index cc2881576c2c..f963848024a5 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -114,17 +114,12 @@
#endif
#ifndef __ASSEMBLER__
-#ifndef __pic__
static __always_inline __pure void *rip_rel_ptr(void *p)
{
asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p));
return p;
}
-#define RIP_REL_REF(var) (*(typeof(&(var)))rip_rel_ptr(&(var)))
-#else
-#define RIP_REL_REF(var) (var)
-#endif
#endif
/*
@@ -243,5 +238,24 @@ register unsigned long current_stack_pointer asm(_ASM_SP);
#define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_FAULT)
+/*
+ * Both i386 and x86_64 returns 64-bit values in edx:eax for certain
+ * instructions, but GCC's "A" constraint has different meanings.
+ * For i386, "A" means exactly edx:eax, while for x86_64 it
+ * means rax *or* rdx.
+ *
+ * These helpers wrapping these semantic differences save one instruction
+ * clearing the high half of 'low':
+ */
+#ifdef CONFIG_X86_64
+# define EAX_EDX_DECLARE_ARGS(val, low, high) unsigned long low, high
+# define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
+# define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
+#else
+# define EAX_EDX_DECLARE_ARGS(val, low, high) u64 val
+# define EAX_EDX_VAL(val, low, high) (val)
+# define EAX_EDX_RET(val, low, high) "=A" (val)
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 100413aff640..eebbc8889e70 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -248,7 +248,7 @@ arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
static __always_inline unsigned long variable__ffs(unsigned long word)
{
- asm("rep; bsf %1,%0"
+ asm("tzcnt %1,%0"
: "=r" (word)
: ASM_INPUT_RM (word));
return word;
@@ -267,10 +267,7 @@ static __always_inline unsigned long variable__ffs(unsigned long word)
static __always_inline unsigned long variable_ffz(unsigned long word)
{
- asm("rep; bsf %1,%0"
- : "=r" (word)
- : "r" (~word));
- return word;
+ return variable__ffs(~word);
}
/**
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 3f02ff6d333d..02b23aa78955 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -74,6 +74,11 @@
# define BOOT_STACK_SIZE 0x1000
#endif
+#define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE)
+
+#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
+#define TRAMPOLINE_32BIT_CODE_SIZE 0xA0
+
#ifndef __ASSEMBLER__
extern unsigned int output_len;
extern const unsigned long kernel_text_size;
@@ -83,6 +88,11 @@ unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
void (*error)(char *x));
extern struct boot_params *boot_params_ptr;
+extern unsigned long *trampoline_32bit;
+extern const u16 trampoline_ljmp_imm_offset;
+
+void trampoline_32bit_src(void *trampoline, bool enable_5lvl);
+
#endif
#endif /* _ASM_X86_BOOT_H */
diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
index e7225452963f..e1dbf8df1b69 100644
--- a/arch/x86/include/asm/coco.h
+++ b/arch/x86/include/asm/coco.h
@@ -22,7 +22,7 @@ static inline u64 cc_get_mask(void)
static inline void cc_set_mask(u64 mask)
{
- RIP_REL_REF(cc_mask) = mask;
+ cc_mask = mask;
}
u64 cc_mkenc(u64 val);
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 6c2c152d8a67..ee176236c2be 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -75,7 +75,7 @@
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* "centaur_mcr" Centaur MCRs (= MTRRs) */
#define X86_FEATURE_K8 ( 3*32+ 4) /* Opteron, Athlon64 */
#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* CPU based on Zen5 microarchitecture */
-/* Free ( 3*32+ 6) */
+#define X86_FEATURE_ZEN6 ( 3*32+ 6) /* CPU based on Zen6 microarchitecture */
/* Free ( 3*32+ 7) */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* "constant_tsc" TSC ticks at a constant rate */
#define X86_FEATURE_UP ( 3*32+ 9) /* "up" SMP kernel running on UP */
@@ -336,6 +336,7 @@
#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* Single Thread Indirect Branch Predictors always-on preferred */
+#define X86_FEATURE_AMD_IBRS_SAME_MODE (13*32+19) /* Indirect Branch Restricted Speculation same mode protection*/
#define X86_FEATURE_AMD_PPIN (13*32+23) /* "amd_ppin" Protected Processor Inventory Number */
#define X86_FEATURE_AMD_SSBD (13*32+24) /* Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* "virt_ssbd" Virtualized Speculative Store Bypass Disable */
@@ -378,6 +379,7 @@
#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* "v_spec_ctrl" Virtual SPEC_CTRL */
#define X86_FEATURE_VNMI (15*32+25) /* "vnmi" Virtual NMI */
#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* SVME addr check */
+#define X86_FEATURE_BUS_LOCK_THRESHOLD (15*32+29) /* Bus lock threshold */
#define X86_FEATURE_IDLE_HLT (15*32+30) /* IDLE HLT intercept */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
@@ -446,6 +448,7 @@
#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" SEV-ES full debug state swap support */
#define X86_FEATURE_RMPREAD (19*32+21) /* RMPREAD instruction */
#define X86_FEATURE_SEGMENTED_RMP (19*32+23) /* Segmented RMP support */
+#define X86_FEATURE_ALLOWED_SEV_FEATURES (19*32+27) /* Allowed SEV Features */
#define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */
#define X86_FEATURE_HV_INUSE_WR_ALLOWED (19*32+30) /* Allow Write to in-use hypervisor-owned pages */
@@ -457,6 +460,7 @@
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */
+#define X86_FEATURE_PREFETCHI (20*32+20) /* Prefetch Data/Instruction to Cache Level */
#define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
#define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
@@ -476,11 +480,13 @@
#define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* Clear branch history at syscall entry using SW loop */
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* BHI_DIS_S HW control available */
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */
-#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
-#define X86_FEATURE_AMD_FAST_CPPC (21*32 + 5) /* Fast CPPC */
-#define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32 + 6) /* Heterogeneous Core Topology */
-#define X86_FEATURE_AMD_WORKLOAD_CLASS (21*32 + 7) /* Workload Classification */
-#define X86_FEATURE_PREFER_YMM (21*32 + 8) /* Avoid ZMM registers due to downclocking */
+#define X86_FEATURE_CLEAR_BHB_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
+#define X86_FEATURE_AMD_FAST_CPPC (21*32+ 5) /* Fast CPPC */
+#define X86_FEATURE_AMD_HTR_CORES (21*32+ 6) /* Heterogeneous Core Topology */
+#define X86_FEATURE_AMD_WORKLOAD_CLASS (21*32+ 7) /* Workload Classification */
+#define X86_FEATURE_PREFER_YMM (21*32+ 8) /* Avoid ZMM registers due to downclocking */
+#define X86_FEATURE_APX (21*32+ 9) /* Advanced Performance Extensions */
+#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */
/*
* BUG word(s)
@@ -519,7 +525,7 @@
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* "itlb_multihit" CPU may incur MCE during certain page attribute changes */
#define X86_BUG_SRBDS X86_BUG(24) /* "srbds" CPU may leak RNG bits if not mitigated */
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* "mmio_stale_data" CPU is affected by Processor MMIO Stale Data vulnerabilities */
-#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* "mmio_unknown" CPU is too old and its MMIO Stale Data status is unknown */
+/* unused, was #define X86_BUG_MMIO_UNKNOWN X86_BUG(26) "mmio_unknown" CPU is too old and its MMIO Stale Data status is unknown */
#define X86_BUG_RETBLEED X86_BUG(27) /* "retbleed" CPU is affected by RETBleed */
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* "eibrs_pbrsb" EIBRS is vulnerable to Post Barrier RSB Predictions */
#define X86_BUG_SMT_RSB X86_BUG(29) /* "smt_rsb" CPU is vulnerable to Cross-Thread Return Address Predictions */
@@ -527,10 +533,14 @@
#define X86_BUG_TDX_PW_MCE X86_BUG(31) /* "tdx_pw_mce" CPU may incur #MC if non-TD software does partial write to TDX private memory */
/* BUG word 2 */
-#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* "srso" AMD SRSO bug */
-#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */
-#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
-#define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
-#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
-#define X86_BUG_SPECTRE_V2_USER X86_BUG(1*32 + 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */
+#define X86_BUG_SRSO X86_BUG( 1*32+ 0) /* "srso" AMD SRSO bug */
+#define X86_BUG_DIV0 X86_BUG( 1*32+ 1) /* "div0" AMD DIV0 speculation bug */
+#define X86_BUG_RFDS X86_BUG( 1*32+ 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
+#define X86_BUG_BHI X86_BUG( 1*32+ 3) /* "bhi" CPU is affected by Branch History Injection */
+#define X86_BUG_IBPB_NO_RET X86_BUG( 1*32+ 4) /* "ibpb_no_ret" IBPB omits return target predictions */
+#define X86_BUG_SPECTRE_V2_USER X86_BUG( 1*32+ 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */
+#define X86_BUG_OLD_MICROCODE X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */
+#define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */
+#define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
+
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/cpuid/api.h b/arch/x86/include/asm/cpuid/api.h
index 9c180c9cc58e..44fa82e1267c 100644
--- a/arch/x86/include/asm/cpuid/api.h
+++ b/arch/x86/include/asm/cpuid/api.h
@@ -14,9 +14,9 @@
*/
#ifdef CONFIG_X86_32
-bool have_cpuid_p(void);
+bool cpuid_feature(void);
#else
-static inline bool have_cpuid_p(void)
+static inline bool cpuid_feature(void)
{
return true;
}
@@ -36,9 +36,9 @@ static inline void native_cpuid(u32 *eax, u32 *ebx,
}
#define NATIVE_CPUID_REG(reg) \
-static inline u32 native_cpuid_##reg(u32 op) \
+static inline u32 native_cpuid_##reg(u32 op) \
{ \
- u32 eax = op, ebx, ecx = 0, edx; \
+ u32 eax = op, ebx, ecx = 0, edx; \
\
native_cpuid(&eax, &ebx, &ecx, &edx); \
\
@@ -160,6 +160,10 @@ static inline void __cpuid_read_reg(u32 leaf, u32 subleaf,
__cpuid_read_reg(leaf, 0, regidx, (u32 *)(reg)); \
}
+/*
+ * Hypervisor-related APIs:
+ */
+
static __always_inline bool cpuid_function_is_indexed(u32 function)
{
switch (function) {
@@ -184,14 +188,14 @@ static __always_inline bool cpuid_function_is_indexed(u32 function)
return false;
}
-#define for_each_possible_hypervisor_cpuid_base(function) \
+#define for_each_possible_cpuid_base_hypervisor(function) \
for (function = 0x40000000; function < 0x40010000; function += 0x100)
-static inline u32 hypervisor_cpuid_base(const char *sig, u32 leaves)
+static inline u32 cpuid_base_hypervisor(const char *sig, u32 leaves)
{
u32 base, eax, signature[3];
- for_each_possible_hypervisor_cpuid_base(base) {
+ for_each_possible_cpuid_base_hypervisor(base) {
cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
/*
@@ -207,4 +211,82 @@ static inline u32 hypervisor_cpuid_base(const char *sig, u32 leaves)
return 0;
}
+/*
+ * CPUID(0x2) parsing:
+ */
+
+/**
+ * cpuid_leaf_0x2() - Return sanitized CPUID(0x2) register output
+ * @regs: Output parameter
+ *
+ * Query CPUID(0x2) and store its output in @regs. Force set any
+ * invalid 1-byte descriptor returned by the hardware to zero (the NULL
+ * cache/TLB descriptor) before returning it to the caller.
+ *
+ * Use for_each_cpuid_0x2_desc() to iterate over the register output in
+ * parsed form.
+ */
+static inline void cpuid_leaf_0x2(union leaf_0x2_regs *regs)
+{
+ cpuid_leaf(0x2, regs);
+
+ /*
+ * All Intel CPUs must report an iteration count of 1. In case
+ * of bogus hardware, treat all returned descriptors as NULL.
+ */
+ if (regs->desc[0] != 0x01) {
+ for (int i = 0; i < 4; i++)
+ regs->regv[i] = 0;
+ return;
+ }
+
+ /*
+ * The most significant bit (MSB) of each register must be clear.
+ * If a register is invalid, replace its descriptors with NULL.
+ */
+ for (int i = 0; i < 4; i++) {
+ if (regs->reg[i].invalid)
+ regs->regv[i] = 0;
+ }
+}
+
+/**
+ * for_each_cpuid_0x2_desc() - Iterator for parsed CPUID(0x2) descriptors
+ * @_regs: CPUID(0x2) register output, as returned by cpuid_leaf_0x2()
+ * @_ptr: u8 pointer, for macro internal use only
+ * @_desc: Pointer to the parsed CPUID(0x2) descriptor at each iteration
+ *
+ * Loop over the 1-byte descriptors in the passed CPUID(0x2) output registers
+ * @_regs. Provide the parsed information for each descriptor through @_desc.
+ *
+ * To handle cache-specific descriptors, switch on @_desc->c_type. For TLB
+ * descriptors, switch on @_desc->t_type.
+ *
+ * Example usage for cache descriptors::
+ *
+ * const struct leaf_0x2_table *desc;
+ * union leaf_0x2_regs regs;
+ * u8 *ptr;
+ *
+ * cpuid_leaf_0x2(&regs);
+ * for_each_cpuid_0x2_desc(regs, ptr, desc) {
+ * switch (desc->c_type) {
+ * ...
+ * }
+ * }
+ */
+#define for_each_cpuid_0x2_desc(_regs, _ptr, _desc) \
+ for (_ptr = &(_regs).desc[1]; \
+ _ptr < &(_regs).desc[16] && (_desc = &cpuid_0x2_table[*_ptr]); \
+ _ptr++)
+
+/*
+ * CPUID(0x80000006) parsing:
+ */
+
+static inline bool cpuid_amd_hygon_has_l3_cache(void)
+{
+ return cpuid_edx(0x80000006);
+}
+
#endif /* _ASM_X86_CPUID_API_H */
diff --git a/arch/x86/include/asm/cpuid/types.h b/arch/x86/include/asm/cpuid/types.h
index 8582e27e836d..8a00364b79de 100644
--- a/arch/x86/include/asm/cpuid/types.h
+++ b/arch/x86/include/asm/cpuid/types.h
@@ -2,6 +2,7 @@
#ifndef _ASM_X86_CPUID_TYPES_H
#define _ASM_X86_CPUID_TYPES_H
+#include <linux/build_bug.h>
#include <linux/types.h>
/*
@@ -29,4 +30,98 @@ enum cpuid_regs_idx {
#define CPUID_LEAF_FREQ 0x16
#define CPUID_LEAF_TILE 0x1d
+/*
+ * Types for CPUID(0x2) parsing:
+ */
+
+struct leaf_0x2_reg {
+ u32 : 31,
+ invalid : 1;
+};
+
+union leaf_0x2_regs {
+ struct leaf_0x2_reg reg[4];
+ u32 regv[4];
+ u8 desc[16];
+};
+
+/*
+ * Leaf 0x2 1-byte descriptors' cache types
+ * To be used for their mappings at cpuid_0x2_table[]
+ *
+ * Start at 1 since type 0 is reserved for HW byte descriptors which are
+ * not recognized by the kernel; i.e., those without an explicit mapping.
+ */
+enum _cache_table_type {
+ CACHE_L1_INST = 1,
+ CACHE_L1_DATA,
+ CACHE_L2,
+ CACHE_L3
+ /* Adjust __TLB_TABLE_TYPE_BEGIN before adding more types */
+} __packed;
+#ifndef __CHECKER__
+static_assert(sizeof(enum _cache_table_type) == 1);
+#endif
+
+/*
+ * Ensure that leaf 0x2 cache and TLB type values do not intersect,
+ * since they share the same type field at struct cpuid_0x2_table.
+ */
+#define __TLB_TABLE_TYPE_BEGIN (CACHE_L3 + 1)
+
+/*
+ * Leaf 0x2 1-byte descriptors' TLB types
+ * To be used for their mappings at cpuid_0x2_table[]
+ */
+enum _tlb_table_type {
+ TLB_INST_4K = __TLB_TABLE_TYPE_BEGIN,
+ TLB_INST_4M,
+ TLB_INST_2M_4M,
+ TLB_INST_ALL,
+
+ TLB_DATA_4K,
+ TLB_DATA_4M,
+ TLB_DATA_2M_4M,
+ TLB_DATA_4K_4M,
+ TLB_DATA_1G,
+ TLB_DATA_1G_2M_4M,
+
+ TLB_DATA0_4K,
+ TLB_DATA0_4M,
+ TLB_DATA0_2M_4M,
+
+ STLB_4K,
+ STLB_4K_2M,
+} __packed;
+#ifndef __CHECKER__
+static_assert(sizeof(enum _tlb_table_type) == 1);
+#endif
+
+/*
+ * Combined parsing table for leaf 0x2 cache and TLB descriptors.
+ */
+
+struct leaf_0x2_table {
+ union {
+ enum _cache_table_type c_type;
+ enum _tlb_table_type t_type;
+ };
+ union {
+ short c_size;
+ short entries;
+ };
+};
+
+extern const struct leaf_0x2_table cpuid_0x2_table[256];
+
+/*
+ * All of leaf 0x2's one-byte TLB descriptors implies the same number of entries
+ * for their respective TLB types. TLB descriptor 0x63 is an exception: it
+ * implies 4 dTLB entries for 1GB pages and 32 dTLB entries for 2MB or 4MB pages.
+ *
+ * Encode that descriptor's dTLB entry count for 2MB/4MB pages here, as the entry
+ * count for dTLB 1GB pages is already encoded at the cpuid_0x2_table[]'s mapping.
+ */
+#define TLB_0x63_2M_4M_ENTRIES 32
+
#endif /* _ASM_X86_CPUID_TYPES_H */
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index fdbbbfec745a..363110e6b2e3 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -23,7 +23,7 @@ DECLARE_PER_CPU(unsigned long, cpu_dr7);
static __always_inline unsigned long native_get_debugreg(int regno)
{
- unsigned long val = 0; /* Damn you, gcc! */
+ unsigned long val;
switch (regno) {
case 0:
@@ -43,7 +43,7 @@ static __always_inline unsigned long native_get_debugreg(int regno)
break;
case 7:
/*
- * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
+ * Use "asm volatile" for DR7 reads to forbid re-ordering them
* with other code.
*
* This is needed because a DR7 access can cause a #VC exception
@@ -55,7 +55,7 @@ static __always_inline unsigned long native_get_debugreg(int regno)
* re-ordered to happen before the call to sev_es_ist_enter(),
* causing stack recursion.
*/
- asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
+ asm volatile("mov %%db7, %0" : "=r" (val));
break;
default:
BUG();
@@ -83,15 +83,15 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value)
break;
case 7:
/*
- * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
+ * Use "asm volatile" for DR7 writes to forbid re-ordering them
* with other code.
*
* While is didn't happen with a DR7 write (see the DR7 read
* comment above which explains where it happened), add the
- * __FORCE_ORDER here too to avoid similar problems in the
+ * "asm volatile" here too to avoid similar problems in the
* future.
*/
- asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER);
+ asm volatile("mov %0, %%db7" ::"r" (value));
break;
default:
BUG();
@@ -169,7 +169,7 @@ static inline unsigned long get_debugctlmsr(void)
if (boot_cpu_data.x86 < 6)
return 0;
#endif
- rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
+ rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
return debugctlmsr;
}
@@ -180,7 +180,7 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr)
if (boot_cpu_data.x86 < 6)
return;
#endif
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
+ wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
}
#endif /* _ASM_X86_DEBUGREG_H */
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 128602612eca..6c8fdc96be7e 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -76,12 +76,8 @@ typedef struct user_i387_struct elf_fpregset_t;
#include <asm/vdso.h>
-#ifdef CONFIG_X86_64
extern unsigned int vdso64_enabled;
-#endif
-#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
extern unsigned int vdso32_enabled;
-#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index 77d20555e04d..d535a97c7284 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -53,7 +53,6 @@ static inline void arch_exit_work(unsigned long ti_work)
if (unlikely(ti_work & _TIF_IO_BITMAP))
tss_update_io_bitmap();
- fpregs_assert_state_consistent();
if (unlikely(ti_work & _TIF_NEED_FPU_LOAD))
switch_fpu_return();
}
@@ -61,7 +60,9 @@ static inline void arch_exit_work(unsigned long ti_work)
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
unsigned long ti_work)
{
- if (IS_ENABLED(CONFIG_X86_DEBUG_FPU) || unlikely(ti_work))
+ fpregs_assert_state_consistent();
+
+ if (unlikely(ti_work))
arch_exit_work(ti_work);
fred_update_rsp0();
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index f42de5f05e7e..cd6f194a912b 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -126,6 +126,7 @@ static inline void fpstate_init_soft(struct swregs_state *soft) {}
#endif
/* State tracking */
+DECLARE_PER_CPU(bool, kernel_fpu_allowed);
DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
/* Process cleanup */
@@ -136,7 +137,7 @@ static inline void fpstate_free(struct fpu *fpu) { }
#endif
/* fpstate-related functions which are exported to KVM */
-extern void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature);
+extern void fpstate_clear_xstate_component(struct fpstate *fpstate, unsigned int xfeature);
extern u64 xstate_get_guest_group_perm(void);
diff --git a/arch/x86/include/asm/fpu/sched.h b/arch/x86/include/asm/fpu/sched.h
index c485f1944c5f..c060549c6c94 100644
--- a/arch/x86/include/asm/fpu/sched.h
+++ b/arch/x86/include/asm/fpu/sched.h
@@ -10,7 +10,7 @@
#include <asm/trace/fpu.h>
extern void save_fpregs_to_fpstate(struct fpu *fpu);
-extern void fpu__drop(struct fpu *fpu);
+extern void fpu__drop(struct task_struct *tsk);
extern int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal,
unsigned long shstk_addr);
extern void fpu_flush_thread(void);
@@ -18,31 +18,25 @@ extern void fpu_flush_thread(void);
/*
* FPU state switching for scheduling.
*
- * This is a two-stage process:
+ * switch_fpu() saves the old state and sets TIF_NEED_FPU_LOAD if
+ * TIF_NEED_FPU_LOAD is not set. This is done within the context
+ * of the old process.
*
- * - switch_fpu_prepare() saves the old state.
- * This is done within the context of the old process.
- *
- * - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state
- * will get loaded on return to userspace, or when the kernel needs it.
- *
- * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers
- * are saved in the current thread's FPU register state.
- *
- * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not
- * hold current()'s FPU registers. It is required to load the
+ * Once TIF_NEED_FPU_LOAD is set, it is required to load the
* registers before returning to userland or using the content
* otherwise.
*
* The FPU context is only stored/restored for a user task and
* PF_KTHREAD is used to distinguish between kernel and user threads.
*/
-static inline void switch_fpu_prepare(struct task_struct *old, int cpu)
+static inline void switch_fpu(struct task_struct *old, int cpu)
{
- if (cpu_feature_enabled(X86_FEATURE_FPU) &&
+ if (!test_tsk_thread_flag(old, TIF_NEED_FPU_LOAD) &&
+ cpu_feature_enabled(X86_FEATURE_FPU) &&
!(old->flags & (PF_KTHREAD | PF_USER_WORKER))) {
- struct fpu *old_fpu = &old->thread.fpu;
+ struct fpu *old_fpu = x86_task_fpu(old);
+ set_tsk_thread_flag(old, TIF_NEED_FPU_LOAD);
save_fpregs_to_fpstate(old_fpu);
/*
* The save operation preserved register state, so the
@@ -50,7 +44,7 @@ static inline void switch_fpu_prepare(struct task_struct *old, int cpu)
* current CPU number in @old_fpu, so the next return
* to user space can avoid the FPU register restore
* when is returns on the same CPU and still owns the
- * context.
+ * context. See fpregs_restore_userregs().
*/
old_fpu->last_cpu = cpu;
@@ -58,14 +52,4 @@ static inline void switch_fpu_prepare(struct task_struct *old, int cpu)
}
}
-/*
- * Delay loading of the complete FPU state until the return to userland.
- * PKRU is handled separately.
- */
-static inline void switch_fpu_finish(struct task_struct *new)
-{
- if (cpu_feature_enabled(X86_FEATURE_FPU))
- set_tsk_thread_flag(new, TIF_NEED_FPU_LOAD);
-}
-
#endif /* _ASM_X86_FPU_SCHED_H */
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index de16862bf230..1c94121acd3d 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -125,6 +125,7 @@ enum xfeature {
XFEATURE_RSRVD_COMP_16,
XFEATURE_XTILE_CFG,
XFEATURE_XTILE_DATA,
+ XFEATURE_APX,
XFEATURE_MAX,
};
@@ -145,6 +146,7 @@ enum xfeature {
#define XFEATURE_MASK_LBR (1 << XFEATURE_LBR)
#define XFEATURE_MASK_XTILE_CFG (1 << XFEATURE_XTILE_CFG)
#define XFEATURE_MASK_XTILE_DATA (1 << XFEATURE_XTILE_DATA)
+#define XFEATURE_MASK_APX (1 << XFEATURE_APX)
#define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
#define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \
@@ -304,6 +306,13 @@ struct xtile_data {
} __packed;
/*
+ * State component 19: 8B extended general purpose register.
+ */
+struct apx_state {
+ u64 egpr[16];
+} __packed;
+
+/*
* State component 10 is supervisor state used for context-switching the
* PASID state.
*/
@@ -407,9 +416,11 @@ struct fpu_state_perm {
/*
* @__state_perm:
*
- * This bitmap indicates the permission for state components, which
- * are available to a thread group. The permission prctl() sets the
- * enabled state bits in thread_group_leader()->thread.fpu.
+ * This bitmap indicates the permission for state components
+ * available to a thread group, including both user and supervisor
+ * components and software-defined bits like FPU_GUEST_PERM_LOCKED.
+ * The permission prctl() sets the enabled state bits in
+ * thread_group_leader()->thread.fpu.
*
* All run time operations use the per thread information in the
* currently active fpu.fpstate which contains the xfeature masks
@@ -525,13 +536,6 @@ struct fpu_guest {
u64 xfeatures;
/*
- * @perm: xfeature bitmap of features which are
- * permitted to be enabled for the guest
- * vCPU.
- */
- u64 perm;
-
- /*
* @xfd_err: Save the guest value.
*/
u64 xfd_err;
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 7f39fe7980c5..b308a76afbb7 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -32,7 +32,8 @@
XFEATURE_MASK_PKRU | \
XFEATURE_MASK_BNDREGS | \
XFEATURE_MASK_BNDCSR | \
- XFEATURE_MASK_XTILE)
+ XFEATURE_MASK_XTILE | \
+ XFEATURE_MASK_APX)
/*
* Features which are restored when returning to user space.
diff --git a/arch/x86/include/asm/fred.h b/arch/x86/include/asm/fred.h
index 2a29e5216881..12b34d5b2953 100644
--- a/arch/x86/include/asm/fred.h
+++ b/arch/x86/include/asm/fred.h
@@ -9,6 +9,7 @@
#include <linux/const.h>
#include <asm/asm.h>
+#include <asm/msr.h>
#include <asm/trapnr.h>
/*
diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h
index 02f239569b93..ab2547f97c2c 100644
--- a/arch/x86/include/asm/fsgsbase.h
+++ b/arch/x86/include/asm/fsgsbase.h
@@ -60,7 +60,7 @@ static inline unsigned long x86_fsbase_read_cpu(void)
if (boot_cpu_has(X86_FEATURE_FSGSBASE))
fsbase = rdfsbase();
else
- rdmsrl(MSR_FS_BASE, fsbase);
+ rdmsrq(MSR_FS_BASE, fsbase);
return fsbase;
}
@@ -70,7 +70,7 @@ static inline void x86_fsbase_write_cpu(unsigned long fsbase)
if (boot_cpu_has(X86_FEATURE_FSGSBASE))
wrfsbase(fsbase);
else
- wrmsrl(MSR_FS_BASE, fsbase);
+ wrmsrq(MSR_FS_BASE, fsbase);
}
extern unsigned long x86_gsbase_read_cpu_inactive(void);
diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h
index 53e4015242b4..97f341777db5 100644
--- a/arch/x86/include/asm/inat.h
+++ b/arch/x86/include/asm/inat.h
@@ -82,6 +82,7 @@
#define INAT_NO_REX2 (1 << (INAT_FLAG_OFFS + 8))
#define INAT_REX2_VARIANT (1 << (INAT_FLAG_OFFS + 9))
#define INAT_EVEX_SCALABLE (1 << (INAT_FLAG_OFFS + 10))
+#define INAT_INV64 (1 << (INAT_FLAG_OFFS + 11))
/* Attribute making macros for attribute tables */
#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS)
#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS)
@@ -242,4 +243,9 @@ static inline int inat_evex_scalable(insn_attr_t attr)
{
return attr & INAT_EVEX_SCALABLE;
}
+
+static inline int inat_is_invalid64(insn_attr_t attr)
+{
+ return attr & INAT_INV64;
+}
#endif
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 3a97a7eefb51..be10c188614f 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -126,6 +126,8 @@
#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD) /* Redwood Cove */
#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
+#define INTEL_BARTLETTLAKE IFM(6, 0xD7) /* Raptor Cove */
+
/* "Hybrid" Processors (P-Core/E-Core) */
#define INTEL_LAKEFIELD IFM(6, 0x8A) /* Sunny Cove / Tremont */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index e889c3bab5a2..ca309a3227c7 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -217,7 +217,7 @@ void memset_io(volatile void __iomem *, int, size_t);
static inline void __iowrite32_copy(void __iomem *to, const void *from,
size_t count)
{
- asm volatile("rep ; movsl"
+ asm volatile("rep movsl"
: "=&c"(count), "=&D"(to), "=&S"(from)
: "0"(count), "1"(to), "2"(from)
: "memory");
@@ -282,7 +282,7 @@ static inline void outs##bwl(u16 port, const void *addr, unsigned long count) \
count--; \
} \
} else { \
- asm volatile("rep; outs" #bwl \
+ asm volatile("rep outs" #bwl \
: "+S"(addr), "+c"(count) \
: "d"(port) : "memory"); \
} \
@@ -298,7 +298,7 @@ static inline void ins##bwl(u16 port, void *addr, unsigned long count) \
count--; \
} \
} else { \
- asm volatile("rep; ins" #bwl \
+ asm volatile("rep ins" #bwl \
: "+D"(addr), "+c"(count) \
: "d"(port) : "memory"); \
} \
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 5432457d2338..f2ad77929d6e 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -8,6 +8,9 @@
# define PA_PGD 2
# define PA_SWAP_PAGE 3
# define PAGES_NR 4
+#else
+/* Size of each exception handler referenced by the IDT */
+# define KEXEC_DEBUG_EXC_HANDLER_SIZE 6 /* PUSHI, PUSHI, 2-byte JMP */
#endif
# define KEXEC_CONTROL_PAGE_SIZE 4096
@@ -59,6 +62,10 @@ struct kimage;
extern unsigned long kexec_va_control_page;
extern unsigned long kexec_pa_table_page;
extern unsigned long kexec_pa_swap_page;
+extern gate_desc kexec_debug_idt[];
+extern unsigned char kexec_debug_exc_vectors[];
+extern uint16_t kexec_debug_8250_port;
+extern unsigned long kexec_debug_8250_mmio32;
#endif
/*
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 823c0434bbad..8d50e3e0a19b 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -21,6 +21,7 @@ KVM_X86_OP(has_emulated_msr)
KVM_X86_OP(vcpu_after_set_cpuid)
KVM_X86_OP(vm_init)
KVM_X86_OP_OPTIONAL(vm_destroy)
+KVM_X86_OP_OPTIONAL(vm_pre_destroy)
KVM_X86_OP_OPTIONAL_RET0(vcpu_precreate)
KVM_X86_OP(vcpu_create)
KVM_X86_OP(vcpu_free)
@@ -115,6 +116,7 @@ KVM_X86_OP_OPTIONAL(pi_start_assignment)
KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
+KVM_X86_OP_OPTIONAL(protected_apic_has_interrupt)
KVM_X86_OP_OPTIONAL(set_hv_timer)
KVM_X86_OP_OPTIONAL(cancel_hv_timer)
KVM_X86_OP(setup_mce)
@@ -126,6 +128,7 @@ KVM_X86_OP(enable_smi_window)
#endif
KVM_X86_OP_OPTIONAL(dev_get_attr)
KVM_X86_OP_OPTIONAL(mem_enc_ioctl)
+KVM_X86_OP_OPTIONAL(vcpu_mem_enc_ioctl)
KVM_X86_OP_OPTIONAL(mem_enc_register_region)
KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a884ab544335..b4a391929cdb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -34,7 +34,9 @@
#include <asm/desc.h>
#include <asm/mtrr.h>
#include <asm/msr-index.h>
+#include <asm/msr.h>
#include <asm/asm.h>
+#include <asm/irq_remapping.h>
#include <asm/kvm_page_track.h>
#include <asm/kvm_vcpu_regs.h>
#include <asm/reboot.h>
@@ -124,7 +126,8 @@
KVM_ARCH_REQ_FLAGS(31, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HV_TLB_FLUSH \
KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_UPDATE_PROTECTED_GUEST_STATE KVM_ARCH_REQ(34)
+#define KVM_REQ_UPDATE_PROTECTED_GUEST_STATE \
+ KVM_ARCH_REQ_FLAGS(34, KVM_REQUEST_WAIT)
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -410,7 +413,6 @@ struct kvm_rmap_head {
};
struct kvm_pio_request {
- unsigned long linear_rip;
unsigned long count;
int in;
int port;
@@ -607,8 +609,15 @@ struct kvm_pmu {
struct kvm_pmu_ops;
enum {
- KVM_DEBUGREG_BP_ENABLED = 1,
- KVM_DEBUGREG_WONT_EXIT = 2,
+ KVM_DEBUGREG_BP_ENABLED = BIT(0),
+ KVM_DEBUGREG_WONT_EXIT = BIT(1),
+ /*
+ * Guest debug registers (DR0-3, DR6 and DR7) are saved/restored by
+ * hardware on exit from or enter to guest. KVM needn't switch them.
+ * DR0-3, DR6 and DR7 are set to their architectural INIT value on VM
+ * exit, host values need to be restored.
+ */
+ KVM_DEBUGREG_AUTO_SWITCH = BIT(2),
};
struct kvm_mtrr {
@@ -909,6 +918,7 @@ struct kvm_vcpu_arch {
bool emulate_regs_need_sync_to_vcpu;
bool emulate_regs_need_sync_from_vcpu;
int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
+ unsigned long cui_linear_rip;
gpa_t time;
s8 pvclock_tsc_shift;
@@ -1026,6 +1036,7 @@ struct kvm_vcpu_arch {
int pending_ioapic_eoi;
int pending_external_vector;
+ int highest_stale_pending_ioapic_eoi;
/* be preempted when it's in kernel-mode(cpl=0) */
bool preempted_in_kernel;
@@ -1472,8 +1483,13 @@ struct kvm_arch {
struct once nx_once;
#ifdef CONFIG_X86_64
- /* The number of TDP MMU pages across all roots. */
+#ifdef CONFIG_KVM_PROVE_MMU
+ /*
+ * The number of TDP MMU pages across all roots. Used only to sanity
+ * check that KVM isn't leaking TDP MMU pages.
+ */
atomic64_t tdp_mmu_pages;
+#endif
/*
* List of struct kvm_mmu_pages being used as roots.
@@ -1564,6 +1580,13 @@ struct kvm_arch {
struct kvm_mmu_memory_cache split_desc_cache;
gfn_t gfn_direct_bits;
+
+ /*
+ * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A Zero
+ * value indicates CPU dirty logging is unsupported or disabled in
+ * current VM.
+ */
+ int cpu_dirty_log_size;
};
struct kvm_vm_stat {
@@ -1667,6 +1690,7 @@ struct kvm_x86_ops {
unsigned int vm_size;
int (*vm_init)(struct kvm *kvm);
void (*vm_destroy)(struct kvm *kvm);
+ void (*vm_pre_destroy)(struct kvm *kvm);
/* Create, but do not attach this VCPU */
int (*vcpu_precreate)(struct kvm *kvm);
@@ -1816,11 +1840,6 @@ struct kvm_x86_ops {
struct x86_exception *exception);
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
- /*
- * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A zero
- * value indicates CPU dirty logging is unsupported or disabled.
- */
- int cpu_dirty_log_size;
void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
const struct kvm_x86_nested_ops *nested_ops;
@@ -1834,6 +1853,7 @@ struct kvm_x86_ops {
void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
+ bool (*protected_apic_has_interrupt)(struct kvm_vcpu *vcpu);
int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
bool *expired);
@@ -1850,6 +1870,7 @@ struct kvm_x86_ops {
int (*dev_get_attr)(u32 group, u64 attr, u64 *val);
int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
+ int (*vcpu_mem_enc_ioctl)(struct kvm_vcpu *vcpu, void __user *argp);
int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
@@ -1923,6 +1944,7 @@ struct kvm_arch_async_pf {
extern u32 __read_mostly kvm_nr_uret_msrs;
extern bool __read_mostly allow_smaller_maxphyaddr;
extern bool __read_mostly enable_apicv;
+extern bool __read_mostly enable_device_posted_irqs;
extern struct kvm_x86_ops kvm_x86_ops;
#define kvm_x86_call(func) static_call(kvm_x86_##func)
@@ -2272,7 +2294,7 @@ static inline unsigned long read_msr(unsigned long msr)
{
u64 value;
- rdmsrl(msr, value);
+ rdmsrq(msr, value);
return value;
}
#endif
@@ -2326,6 +2348,7 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
int kvm_add_user_return_msr(u32 msr);
int kvm_find_user_return_msr(u32 msr);
int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
+void kvm_user_return_msr_update_cache(unsigned int index, u64 val);
static inline bool kvm_is_supported_user_return_msr(u32 msr)
{
@@ -2409,7 +2432,12 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \
KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS | \
KVM_X86_QUIRK_SLOT_ZAP_ALL | \
- KVM_X86_QUIRK_STUFF_FEATURE_MSRS)
+ KVM_X86_QUIRK_STUFF_FEATURE_MSRS | \
+ KVM_X86_QUIRK_IGNORE_GUEST_PAT)
+
+#define KVM_X86_CONDITIONAL_QUIRKS \
+ (KVM_X86_QUIRK_CD_NW_CLEARED | \
+ KVM_X86_QUIRK_IGNORE_GUEST_PAT)
/*
* KVM previously used a u32 field in kvm_run to indicate the hypercall was
@@ -2418,4 +2446,9 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
*/
#define KVM_EXIT_HYPERCALL_MBZ GENMASK_ULL(31, 1)
+static inline bool kvm_arch_has_irq_bypass(void)
+{
+ return enable_device_posted_irqs;
+}
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index b51d8a4673f5..9d38ae744a2e 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -141,5 +141,15 @@
#define SYM_FUNC_START_WEAK_NOALIGN(name) \
SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
+/*
+ * Expose 'sym' to the startup code in arch/x86/boot/startup/, by emitting an
+ * alias prefixed with __pi_
+ */
+#ifdef __ASSEMBLER__
+#define SYM_PIC_ALIAS(sym) SYM_ALIAS(__pi_ ## sym, sym, SYM_L_GLOBAL)
+#else
+#define SYM_PIC_ALIAS(sym) extern typeof(sym) __PASTE(__pi_, sym) __alias(sym)
+#endif
+
#endif /* _ASM_X86_LINKAGE_H */
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 1530ee301dfe..ea6494628cb0 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -61,7 +61,7 @@ void __init sev_es_init_vc_handling(void);
static inline u64 sme_get_me_mask(void)
{
- return RIP_REL_REF(sme_me_mask);
+ return sme_me_mask;
}
#define __bss_decrypted __section(".bss..decrypted")
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 695e569159c1..8b41f26f003b 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_MICROCODE_H
#define _ASM_X86_MICROCODE_H
+#include <asm/msr.h>
+
struct cpu_signature {
unsigned int sig;
unsigned int pf;
@@ -17,10 +19,12 @@ struct ucode_cpu_info {
void load_ucode_bsp(void);
void load_ucode_ap(void);
void microcode_bsp_resume(void);
+bool __init microcode_loader_disabled(void);
#else
static inline void load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { }
static inline void microcode_bsp_resume(void) { }
+static inline bool __init microcode_loader_disabled(void) { return false; }
#endif
extern unsigned long initrd_start_early;
@@ -61,7 +65,7 @@ static inline u32 intel_get_microcode_revision(void)
{
u32 rev, dummy;
- native_wrmsrl(MSR_IA32_UCODE_REV, 0);
+ native_wrmsrq(MSR_IA32_UCODE_REV, 0);
/* As documented in the SDM: Do a CPUID 1 here */
native_cpuid_eax(1);
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 8b8055a8eb9e..0fe9c569d171 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -16,6 +16,8 @@
#define MM_CONTEXT_LOCK_LAM 2
/* Allow LAM and SVA coexisting */
#define MM_CONTEXT_FORCE_TAGGED_SVA 3
+/* Tracks mm_cpumask */
+#define MM_CONTEXT_NOTRACK 4
/*
* x86 has arch-specific MMU state beyond what lives in mm_struct.
@@ -44,9 +46,7 @@ typedef struct {
struct ldt_struct *ldt;
#endif
-#ifdef CONFIG_X86_64
unsigned long flags;
-#endif
#ifdef CONFIG_ADDRESS_MASKING
/* Active LAM mode: X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 2398058b6e83..73bf3b1b44e8 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -190,7 +190,7 @@ extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
#define activate_mm(prev, next) \
do { \
paravirt_enter_mmap(next); \
- switch_mm((prev), (next), NULL); \
+ switch_mm_irqs_off((prev), (next), NULL); \
} while (0);
#ifdef CONFIG_X86_32
@@ -247,6 +247,16 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
}
#endif
+static inline bool is_notrack_mm(struct mm_struct *mm)
+{
+ return test_bit(MM_CONTEXT_NOTRACK, &mm->context.flags);
+}
+
+static inline void set_notrack_mm(struct mm_struct *mm)
+{
+ set_bit(MM_CONTEXT_NOTRACK, &mm->context.flags);
+}
+
/*
* We only want to enforce protection keys on the current process
* because we effectively have no access to PKRU for other
@@ -272,4 +282,7 @@ unsigned long __get_current_cr3_fast(void);
#include <asm-generic/mmu_context.h>
+extern struct mm_struct *use_temporary_mm(struct mm_struct *temp_mm);
+extern void unuse_temporary_mm(struct mm_struct *prev_mm);
+
#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index bab5ccfc60a7..e1752ba47e67 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <asm/nospec-branch.h>
#include <asm/paravirt.h>
+#include <asm/msr.h>
#include <hyperv/hvhdk.h>
/*
@@ -268,11 +269,12 @@ int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
#ifdef CONFIG_AMD_MEM_ENCRYPT
bool hv_ghcb_negotiate_protocol(void);
void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason);
-int hv_snp_boot_ap(u32 cpu, unsigned long start_ip);
+int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu);
#else
static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
-static inline int hv_snp_boot_ap(u32 cpu, unsigned long start_ip) { return 0; }
+static inline int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip,
+ unsigned int cpu) { return 0; }
#endif
#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
@@ -304,8 +306,9 @@ void hv_set_non_nested_msr(unsigned int reg, u64 value);
static __always_inline u64 hv_raw_get_msr(unsigned int reg)
{
- return __rdmsr(reg);
+ return native_rdmsrq(reg);
}
+int hv_apicid_to_vp_index(u32 apic_id);
#else /* CONFIG_HYPERV */
static inline void hyperv_init(void) {}
@@ -327,6 +330,7 @@ static inline void hv_set_msr(unsigned int reg, u64 value) { }
static inline u64 hv_get_msr(unsigned int reg) { return 0; }
static inline void hv_set_non_nested_msr(unsigned int reg, u64 value) { }
static inline u64 hv_get_non_nested_msr(unsigned int reg) { return 0; }
+static inline int hv_apicid_to_vp_index(u32 apic_id) { return -EINVAL; }
#endif /* CONFIG_HYPERV */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index e6134ef2263d..b7dded3c8113 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -211,6 +211,14 @@
* VERW clears CPU Register
* File.
*/
+#define ARCH_CAP_ITS_NO BIT_ULL(62) /*
+ * Not susceptible to
+ * Indirect Target Selection.
+ * This bit is not set by
+ * HW, but is synthesized by
+ * VMMs for guests to know
+ * their affected status.
+ */
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
@@ -525,7 +533,7 @@
#define MSR_HWP_CAPABILITIES 0x00000771
#define MSR_HWP_REQUEST_PKG 0x00000772
#define MSR_HWP_INTERRUPT 0x00000773
-#define MSR_HWP_REQUEST 0x00000774
+#define MSR_HWP_REQUEST 0x00000774
#define MSR_HWP_STATUS 0x00000777
/* CPUID.6.EAX */
@@ -542,16 +550,16 @@
#define HWP_LOWEST_PERF(x) (((x) >> 24) & 0xff)
/* IA32_HWP_REQUEST */
-#define HWP_MIN_PERF(x) (x & 0xff)
-#define HWP_MAX_PERF(x) ((x & 0xff) << 8)
+#define HWP_MIN_PERF(x) (x & 0xff)
+#define HWP_MAX_PERF(x) ((x & 0xff) << 8)
#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16)
-#define HWP_ENERGY_PERF_PREFERENCE(x) (((unsigned long long) x & 0xff) << 24)
+#define HWP_ENERGY_PERF_PREFERENCE(x) (((u64)x & 0xff) << 24)
#define HWP_EPP_PERFORMANCE 0x00
#define HWP_EPP_BALANCE_PERFORMANCE 0x80
#define HWP_EPP_BALANCE_POWERSAVE 0xC0
#define HWP_EPP_POWERSAVE 0xFF
-#define HWP_ACTIVITY_WINDOW(x) ((unsigned long long)(x & 0xff3) << 32)
-#define HWP_PACKAGE_CONTROL(x) ((unsigned long long)(x & 0x1) << 42)
+#define HWP_ACTIVITY_WINDOW(x) ((u64)(x & 0xff3) << 32)
+#define HWP_PACKAGE_CONTROL(x) ((u64)(x & 0x1) << 42)
/* IA32_HWP_STATUS */
#define HWP_GUARANTEED_CHANGE(x) (x & 0x1)
@@ -594,7 +602,11 @@
/* V6 PMON MSR range */
#define MSR_IA32_PMC_V6_GP0_CTR 0x1900
#define MSR_IA32_PMC_V6_GP0_CFG_A 0x1901
+#define MSR_IA32_PMC_V6_GP0_CFG_B 0x1902
+#define MSR_IA32_PMC_V6_GP0_CFG_C 0x1903
#define MSR_IA32_PMC_V6_FX0_CTR 0x1980
+#define MSR_IA32_PMC_V6_FX0_CFG_B 0x1982
+#define MSR_IA32_PMC_V6_FX0_CFG_C 0x1983
#define MSR_IA32_PMC_V6_STEP 4
/* KeyID partitioning between MKTME and TDX */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 9397a319d165..9c2ea29e12a9 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -12,6 +12,7 @@
#include <uapi/asm/msr.h>
#include <asm/shared/msr.h>
+#include <linux/types.h>
#include <linux/percpu.h>
struct msr_info {
@@ -37,23 +38,6 @@ struct saved_msrs {
};
/*
- * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
- * constraint has different meanings. For i386, "A" means exactly
- * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
- * it means rax *or* rdx.
- */
-#ifdef CONFIG_X86_64
-/* Using 64-bit values saves one instruction clearing the high half of low */
-#define DECLARE_ARGS(val, low, high) unsigned long low, high
-#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
-#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
-#else
-#define DECLARE_ARGS(val, low, high) unsigned long long val
-#define EAX_EDX_VAL(val, low, high) (val)
-#define EAX_EDX_RET(val, low, high) "=A" (val)
-#endif
-
-/*
* Be very careful with includes. This header is prone to include loops.
*/
#include <asm/atomic.h>
@@ -63,13 +47,13 @@ struct saved_msrs {
DECLARE_TRACEPOINT(read_msr);
DECLARE_TRACEPOINT(write_msr);
DECLARE_TRACEPOINT(rdpmc);
-extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
-extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
-extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
+extern void do_trace_write_msr(u32 msr, u64 val, int failed);
+extern void do_trace_read_msr(u32 msr, u64 val, int failed);
+extern void do_trace_rdpmc(u32 msr, u64 val, int failed);
#else
-static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
-static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
-static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
+static inline void do_trace_write_msr(u32 msr, u64 val, int failed) {}
+static inline void do_trace_read_msr(u32 msr, u64 val, int failed) {}
+static inline void do_trace_rdpmc(u32 msr, u64 val, int failed) {}
#endif
/*
@@ -79,9 +63,9 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
* think of extending them - you will be slapped with a stinking trout or a frozen
* shark will reach you, wherever you are! You've been warned.
*/
-static __always_inline unsigned long long __rdmsr(unsigned int msr)
+static __always_inline u64 __rdmsr(u32 msr)
{
- DECLARE_ARGS(val, low, high);
+ EAX_EDX_DECLARE_ARGS(val, low, high);
asm volatile("1: rdmsr\n"
"2:\n"
@@ -91,12 +75,12 @@ static __always_inline unsigned long long __rdmsr(unsigned int msr)
return EAX_EDX_VAL(val, low, high);
}
-static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
+static __always_inline void __wrmsrq(u32 msr, u64 val)
{
asm volatile("1: wrmsr\n"
"2:\n"
_ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
- : : "c" (msr), "a"(low), "d" (high) : "memory");
+ : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32)) : "memory");
}
#define native_rdmsr(msr, val1, val2) \
@@ -106,16 +90,20 @@ do { \
(void)((val2) = (u32)(__val >> 32)); \
} while (0)
+static __always_inline u64 native_rdmsrq(u32 msr)
+{
+ return __rdmsr(msr);
+}
+
#define native_wrmsr(msr, low, high) \
- __wrmsr(msr, low, high)
+ __wrmsrq((msr), (u64)(high) << 32 | (low))
-#define native_wrmsrl(msr, val) \
- __wrmsr((msr), (u32)((u64)(val)), \
- (u32)((u64)(val) >> 32))
+#define native_wrmsrq(msr, val) \
+ __wrmsrq((msr), (val))
-static inline unsigned long long native_read_msr(unsigned int msr)
+static inline u64 native_read_msr(u32 msr)
{
- unsigned long long val;
+ u64 val;
val = __rdmsr(msr);
@@ -125,34 +113,35 @@ static inline unsigned long long native_read_msr(unsigned int msr)
return val;
}
-static inline unsigned long long native_read_msr_safe(unsigned int msr,
- int *err)
+static inline int native_read_msr_safe(u32 msr, u64 *p)
{
- DECLARE_ARGS(val, low, high);
+ int err;
+ EAX_EDX_DECLARE_ARGS(val, low, high);
asm volatile("1: rdmsr ; xor %[err],%[err]\n"
"2:\n\t"
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
- : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
+ : [err] "=r" (err), EAX_EDX_RET(val, low, high)
: "c" (msr));
if (tracepoint_enabled(read_msr))
- do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
- return EAX_EDX_VAL(val, low, high);
+ do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), err);
+
+ *p = EAX_EDX_VAL(val, low, high);
+
+ return err;
}
/* Can be uninlined because referenced by paravirt */
-static inline void notrace
-native_write_msr(unsigned int msr, u32 low, u32 high)
+static inline void notrace native_write_msr(u32 msr, u64 val)
{
- __wrmsr(msr, low, high);
+ native_wrmsrq(msr, val);
if (tracepoint_enabled(write_msr))
- do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
+ do_trace_write_msr(msr, val, 0);
}
/* Can be uninlined because referenced by paravirt */
-static inline int notrace
-native_write_msr_safe(unsigned int msr, u32 low, u32 high)
+static inline int notrace native_write_msr_safe(u32 msr, u64 val)
{
int err;
@@ -160,73 +149,19 @@ native_write_msr_safe(unsigned int msr, u32 low, u32 high)
"2:\n\t"
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err])
: [err] "=a" (err)
- : "c" (msr), "0" (low), "d" (high)
+ : "c" (msr), "0" ((u32)val), "d" ((u32)(val >> 32))
: "memory");
if (tracepoint_enabled(write_msr))
- do_trace_write_msr(msr, ((u64)high << 32 | low), err);
+ do_trace_write_msr(msr, val, err);
return err;
}
extern int rdmsr_safe_regs(u32 regs[8]);
extern int wrmsr_safe_regs(u32 regs[8]);
-/**
- * rdtsc() - returns the current TSC without ordering constraints
- *
- * rdtsc() returns the result of RDTSC as a 64-bit integer. The
- * only ordering constraint it supplies is the ordering implied by
- * "asm volatile": it will put the RDTSC in the place you expect. The
- * CPU can and will speculatively execute that RDTSC, though, so the
- * results can be non-monotonic if compared on different CPUs.
- */
-static __always_inline unsigned long long rdtsc(void)
+static inline u64 native_read_pmc(int counter)
{
- DECLARE_ARGS(val, low, high);
-
- asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
-
- return EAX_EDX_VAL(val, low, high);
-}
-
-/**
- * rdtsc_ordered() - read the current TSC in program order
- *
- * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
- * It is ordered like a load to a global in-memory counter. It should
- * be impossible to observe non-monotonic rdtsc_unordered() behavior
- * across multiple CPUs as long as the TSC is synced.
- */
-static __always_inline unsigned long long rdtsc_ordered(void)
-{
- DECLARE_ARGS(val, low, high);
-
- /*
- * The RDTSC instruction is not ordered relative to memory
- * access. The Intel SDM and the AMD APM are both vague on this
- * point, but empirically an RDTSC instruction can be
- * speculatively executed before prior loads. An RDTSC
- * immediately after an appropriate barrier appears to be
- * ordered as a normal load, that is, it provides the same
- * ordering guarantees as reading from a global memory location
- * that some other imaginary CPU is updating continuously with a
- * time stamp.
- *
- * Thus, use the preferred barrier on the respective CPU, aiming for
- * RDTSCP as the default.
- */
- asm volatile(ALTERNATIVE_2("rdtsc",
- "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
- "rdtscp", X86_FEATURE_RDTSCP)
- : EAX_EDX_RET(val, low, high)
- /* RDTSCP clobbers ECX with MSR_TSC_AUX. */
- :: "ecx");
-
- return EAX_EDX_VAL(val, low, high);
-}
-
-static inline unsigned long long native_read_pmc(int counter)
-{
- DECLARE_ARGS(val, low, high);
+ EAX_EDX_DECLARE_ARGS(val, low, high);
asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
if (tracepoint_enabled(rdpmc))
@@ -251,56 +186,49 @@ do { \
(void)((high) = (u32)(__val >> 32)); \
} while (0)
-static inline void wrmsr(unsigned int msr, u32 low, u32 high)
+static inline void wrmsr(u32 msr, u32 low, u32 high)
{
- native_write_msr(msr, low, high);
+ native_write_msr(msr, (u64)high << 32 | low);
}
-#define rdmsrl(msr, val) \
+#define rdmsrq(msr, val) \
((val) = native_read_msr((msr)))
-static inline void wrmsrl(unsigned int msr, u64 val)
+static inline void wrmsrq(u32 msr, u64 val)
{
- native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
+ native_write_msr(msr, val);
}
/* wrmsr with exception handling */
-static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
+static inline int wrmsrq_safe(u32 msr, u64 val)
{
- return native_write_msr_safe(msr, low, high);
+ return native_write_msr_safe(msr, val);
}
/* rdmsr with exception handling */
#define rdmsr_safe(msr, low, high) \
({ \
- int __err; \
- u64 __val = native_read_msr_safe((msr), &__err); \
+ u64 __val; \
+ int __err = native_read_msr_safe((msr), &__val); \
(*low) = (u32)__val; \
(*high) = (u32)(__val >> 32); \
__err; \
})
-static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
+static inline int rdmsrq_safe(u32 msr, u64 *p)
{
- int err;
-
- *p = native_read_msr_safe(msr, &err);
- return err;
+ return native_read_msr_safe(msr, p);
}
-#define rdpmc(counter, low, high) \
-do { \
- u64 _l = native_read_pmc((counter)); \
- (low) = (u32)_l; \
- (high) = (u32)(_l >> 32); \
-} while (0)
-
-#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
+static __always_inline u64 rdpmc(int counter)
+{
+ return native_read_pmc(counter);
+}
#endif /* !CONFIG_PARAVIRT_XXL */
/* Instruction opcode for WRMSRNS supported in binutils >= 2.40 */
-#define WRMSRNS _ASM_BYTES(0x0f,0x01,0xc6)
+#define ASM_WRMSRNS _ASM_BYTES(0x0f,0x01,0xc6)
/* Non-serializing WRMSR, when available. Falls back to a serializing WRMSR. */
static __always_inline void wrmsrns(u32 msr, u64 val)
@@ -309,17 +237,17 @@ static __always_inline void wrmsrns(u32 msr, u64 val)
* WRMSR is 2 bytes. WRMSRNS is 3 bytes. Pad WRMSR with a redundant
* DS prefix to avoid a trailing NOP.
*/
- asm volatile("1: " ALTERNATIVE("ds wrmsr", WRMSRNS, X86_FEATURE_WRMSRNS)
+ asm volatile("1: " ALTERNATIVE("ds wrmsr", ASM_WRMSRNS, X86_FEATURE_WRMSRNS)
"2: " _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
: : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32)));
}
/*
- * 64-bit version of wrmsr_safe():
+ * Dual u32 version of wrmsrq_safe():
*/
-static inline int wrmsrl_safe(u32 msr, u64 val)
+static inline int wrmsr_safe(u32 msr, u32 low, u32 high)
{
- return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
+ return wrmsrq_safe(msr, (u64)high << 32 | low);
}
struct msr __percpu *msrs_alloc(void);
@@ -330,14 +258,14 @@ int msr_clear_bit(u32 msr, u8 bit);
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
-int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
-int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
+int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
-int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
-int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
+int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
#else /* CONFIG_SMP */
@@ -351,14 +279,14 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
wrmsr(msr_no, l, h);
return 0;
}
-static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+static inline int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
{
- rdmsrl(msr_no, *q);
+ rdmsrq(msr_no, *q);
return 0;
}
-static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+static inline int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{
- wrmsrl(msr_no, q);
+ wrmsrq(msr_no, q);
return 0;
}
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
@@ -380,13 +308,13 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
return wrmsr_safe(msr_no, l, h);
}
-static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+static inline int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
{
- return rdmsrl_safe(msr_no, q);
+ return rdmsrq_safe(msr_no, q);
}
-static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+static inline int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{
- return wrmsrl_safe(msr_no, q);
+ return wrmsrq_safe(msr_no, q);
}
static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
@@ -397,5 +325,11 @@ static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
return wrmsr_safe_regs(regs);
}
#endif /* CONFIG_SMP */
+
+/* Compatibility wrappers: */
+#define rdmsrl(msr, val) rdmsrq(msr, val)
+#define wrmsrl(msr, val) wrmsrq(msr, val)
+#define rdmsrl_on_cpu(cpu, msr, q) rdmsrq_on_cpu(cpu, msr, q)
+
#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_MSR_H */
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index ce857ef54cf1..dd2b129b0418 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -25,29 +25,31 @@
#define TPAUSE_C01_STATE 1
#define TPAUSE_C02_STATE 0
-static __always_inline void __monitor(const void *eax, unsigned long ecx,
- unsigned long edx)
+static __always_inline void __monitor(const void *eax, u32 ecx, u32 edx)
{
- /* "monitor %eax, %ecx, %edx;" */
- asm volatile(".byte 0x0f, 0x01, 0xc8;"
- :: "a" (eax), "c" (ecx), "d"(edx));
+ /*
+ * Use the instruction mnemonic with implicit operands, as the LLVM
+ * assembler fails to assemble the mnemonic with explicit operands:
+ */
+ asm volatile("monitor" :: "a" (eax), "c" (ecx), "d" (edx));
}
-static __always_inline void __monitorx(const void *eax, unsigned long ecx,
- unsigned long edx)
+static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx)
{
- /* "monitorx %eax, %ecx, %edx;" */
- asm volatile(".byte 0x0f, 0x01, 0xfa;"
+ /* "monitorx %eax, %ecx, %edx" */
+ asm volatile(".byte 0x0f, 0x01, 0xfa"
:: "a" (eax), "c" (ecx), "d"(edx));
}
-static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
+static __always_inline void __mwait(u32 eax, u32 ecx)
{
mds_idle_clear_cpu_buffers();
- /* "mwait %eax, %ecx;" */
- asm volatile(".byte 0x0f, 0x01, 0xc9;"
- :: "a" (eax), "c" (ecx));
+ /*
+ * Use the instruction mnemonic with implicit operands, as the LLVM
+ * assembler fails to assemble the mnemonic with explicit operands:
+ */
+ asm volatile("mwait" :: "a" (eax), "c" (ecx));
}
/*
@@ -76,13 +78,12 @@ static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
* EAX (logical) address to monitor
* ECX #GP if not zero
*/
-static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
- unsigned long ecx)
+static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx)
{
/* No MDS buffer clear as this is AMD/HYGON only */
- /* "mwaitx %eax, %ebx, %ecx;" */
- asm volatile(".byte 0x0f, 0x01, 0xfb;"
+ /* "mwaitx %eax, %ebx, %ecx" */
+ asm volatile(".byte 0x0f, 0x01, 0xfb"
:: "a" (eax), "b" (ebx), "c" (ecx));
}
@@ -95,12 +96,11 @@ static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
* executing mwait, it would otherwise go unnoticed and the next tick
* would not be reprogrammed accordingly before mwait ever wakes up.
*/
-static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+static __always_inline void __sti_mwait(u32 eax, u32 ecx)
{
mds_idle_clear_cpu_buffers();
- /* "mwait %eax, %ecx;" */
- asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
- :: "a" (eax), "c" (ecx));
+
+ asm volatile("sti; mwait" :: "a" (eax), "c" (ecx));
}
/*
@@ -113,16 +113,13 @@ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
* New with Core Duo processors, MWAIT can take some hints based on CPU
* capability.
*/
-static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
+static __always_inline void mwait_idle_with_hints(u32 eax, u32 ecx)
{
if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
- if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
- mb();
- clflush((void *)&current_thread_info()->flags);
- mb();
- }
+ const void *addr = &current_thread_info()->flags;
- __monitor((void *)&current_thread_info()->flags, 0, 0);
+ alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
+ __monitor(addr, 0, 0);
if (!need_resched()) {
if (ecx & 1) {
@@ -144,16 +141,9 @@ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned lo
*/
static inline void __tpause(u32 ecx, u32 edx, u32 eax)
{
- /* "tpause %ecx, %edx, %eax;" */
- #ifdef CONFIG_AS_TPAUSE
- asm volatile("tpause %%ecx\n"
- :
- : "c"(ecx), "d"(edx), "a"(eax));
- #else
- asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1\t\n"
- :
- : "c"(ecx), "d"(edx), "a"(eax));
- #endif
+ /* "tpause %ecx" */
+ asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1"
+ :: "c" (ecx), "d" (edx), "a" (eax));
}
#endif /* _ASM_X86_MWAIT_H */
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index f677382093f3..79d88d12c8fb 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -14,12 +14,26 @@ extern void release_perfctr_nmi(unsigned int);
extern int reserve_evntsel_nmi(unsigned int);
extern void release_evntsel_nmi(unsigned int);
-extern int unknown_nmi_panic;
-
#endif /* CONFIG_X86_LOCAL_APIC */
+extern int unknown_nmi_panic;
+extern int panic_on_unrecovered_nmi;
+extern int panic_on_io_nmi;
+
+/* NMI handler flags */
#define NMI_FLAG_FIRST 1
+/**
+ * enum - NMI types.
+ * @NMI_LOCAL: Local NMI, CPU-specific NMI generated by the Local APIC.
+ * @NMI_UNKNOWN: Unknown NMI, the source of the NMI may not be identified.
+ * @NMI_SERR: System Error NMI, typically triggered by PCI errors.
+ * @NMI_IO_CHECK: I/O Check NMI, related to I/O errors.
+ * @NMI_MAX: Maximum value for NMI types.
+ *
+ * NMI types are used to categorize NMIs and to dispatch them to the
+ * appropriate handler.
+ */
enum {
NMI_LOCAL=0,
NMI_UNKNOWN,
@@ -28,6 +42,7 @@ enum {
NMI_MAX
};
+/* NMI handler return values */
#define NMI_DONE 0
#define NMI_HANDLED 1
@@ -41,6 +56,25 @@ struct nmiaction {
const char *name;
};
+/**
+ * register_nmi_handler - Register a handler for a specific NMI type
+ * @t: NMI type (e.g. NMI_LOCAL)
+ * @fn: The NMI handler
+ * @fg: Flags associated with the NMI handler
+ * @n: Name of the NMI handler
+ * @init: Optional __init* attributes for struct nmiaction
+ *
+ * Adds the provided handler to the list of handlers for the specified
+ * NMI type. Handlers flagged with NMI_FLAG_FIRST would be executed first.
+ *
+ * Sometimes the source of an NMI can't be reliably determined which
+ * results in an NMI being tagged as "unknown". Register an additional
+ * handler using the NMI type - NMI_UNKNOWN to handle such cases. The
+ * caller would get one last chance to assume responsibility for the
+ * NMI.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
#define register_nmi_handler(t, fn, fg, n, init...) \
({ \
static struct nmiaction init fn##_na = { \
@@ -54,7 +88,16 @@ struct nmiaction {
int __register_nmi_handler(unsigned int, struct nmiaction *);
-void unregister_nmi_handler(unsigned int, const char *);
+/**
+ * unregister_nmi_handler - Unregister a handler for a specific NMI type
+ * @type: NMI type (e.g. NMI_LOCAL)
+ * @name: Name of the NMI handler used during registration
+ *
+ * Removes the handler associated with the specified NMI type from the
+ * NMI handler list. The "name" is used as a lookup key to identify the
+ * handler.
+ */
+void unregister_nmi_handler(unsigned int type, const char *name);
void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler);
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 8a5cc8e70439..20d754b98f3f 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -269,7 +269,7 @@
* typically has NO_MELTDOWN).
*
* While retbleed_untrain_ret() doesn't clobber anything but requires stack,
- * entry_ibpb() will clobber AX, CX, DX.
+ * write_ibpb() will clobber AX, CX, DX.
*
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
* where we have a stack but before any RET instruction.
@@ -279,7 +279,7 @@
VALIDATE_UNRET_END
CALL_UNTRAIN_RET
ALTERNATIVE_2 "", \
- "call entry_ibpb", \ibpb_feature, \
+ "call write_ibpb", \ibpb_feature, \
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
#endif
.endm
@@ -327,7 +327,7 @@
.endm
.macro CLEAR_BRANCH_HISTORY_VMEXIT
- ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
+ ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_VMEXIT
.endm
#else
#define CLEAR_BRANCH_HISTORY
@@ -336,10 +336,14 @@
#else /* __ASSEMBLER__ */
+#define ITS_THUNK_SIZE 64
+
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
+typedef u8 its_thunk_t[ITS_THUNK_SIZE];
extern retpoline_thunk_t __x86_indirect_thunk_array[];
extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
+extern its_thunk_t __x86_indirect_its_thunk_array[];
#ifdef CONFIG_MITIGATION_RETHUNK
extern void __x86_return_thunk(void);
@@ -363,12 +367,18 @@ static inline void srso_return_thunk(void) {}
static inline void srso_alias_return_thunk(void) {}
#endif
+#ifdef CONFIG_MITIGATION_ITS
+extern void its_return_thunk(void);
+#else
+static inline void its_return_thunk(void) {}
+#endif
+
extern void retbleed_return_thunk(void);
extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);
extern void entry_untrain_ret(void);
-extern void entry_ibpb(void);
+extern void write_ibpb(void);
#ifdef CONFIG_X86_64
extern void clear_bhb_loop(void);
@@ -514,11 +524,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
: "memory");
}
-extern u64 x86_pred_cmd;
-
static inline void indirect_branch_prediction_barrier(void)
{
- alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_IBPB);
+ asm_inline volatile(ALTERNATIVE("", "call write_ibpb", X86_FEATURE_IBPB)
+ : ASM_CALL_CONSTRAINT
+ :: "rax", "rcx", "rdx", "memory");
}
/* The Intel SPEC CTRL MSR base value cache */
@@ -561,7 +571,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
-DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
extern u16 mds_verw_sel;
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index a9b62e0e6f79..623f1e9f493e 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -73,7 +73,6 @@ extern unsigned int __VMALLOC_RESERVE;
extern int sysctl_legacy_va_layout;
extern void find_low_pfn_range(void);
-extern void setup_bootmem_allocator(void);
#endif /* !__ASSEMBLER__ */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index d3aab6f4e59a..015d23f3e01f 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -62,7 +62,6 @@ static inline void clear_page(void *page)
void copy_page(void *to, void *from);
KCFI_REFERENCE(copy_page);
-#ifdef CONFIG_X86_5LEVEL
/*
* User space process size. This is the first address outside the user range.
* There are a few constraints that determine this:
@@ -93,7 +92,6 @@ static __always_inline unsigned long task_size_max(void)
return ret;
}
-#endif /* CONFIG_X86_5LEVEL */
#endif /* !__ASSEMBLER__ */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 1faa8f88850a..7400dab373fe 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -41,25 +41,14 @@
#define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL)
#define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL)
-#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
#define __PAGE_OFFSET page_offset_base
-#else
-#define __PAGE_OFFSET __PAGE_OFFSET_BASE_L4
-#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
/* See Documentation/arch/x86/x86_64/mm.rst for a description of the memory map. */
#define __PHYSICAL_MASK_SHIFT 52
-
-#ifdef CONFIG_X86_5LEVEL
#define __VIRTUAL_MASK_SHIFT (pgtable_l5_enabled() ? 56 : 47)
-/* See task_size_max() in <asm/page_64.h> */
-#else
-#define __VIRTUAL_MASK_SHIFT 47
-#define task_size_max() ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE)
-#endif
#define TASK_SIZE_MAX task_size_max()
#define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE)
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 9f77bf03d747..018a8d906ca3 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -29,9 +29,7 @@
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
/* Physical address where kernel should be loaded. */
-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
- + (CONFIG_PHYSICAL_ALIGN - 1)) \
- & ~(CONFIG_PHYSICAL_ALIGN - 1))
+#define LOAD_PHYSICAL_ADDR __ALIGN_KERNEL_MASK(CONFIG_PHYSICAL_START, CONFIG_PHYSICAL_ALIGN - 1)
#define __START_KERNEL (__START_KERNEL_map + LOAD_PHYSICAL_ADDR)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index c4c23190925c..b5e59a7ba0d0 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -175,26 +175,24 @@ static inline void __write_cr4(unsigned long x)
PVOP_VCALL1(cpu.write_cr4, x);
}
-static inline u64 paravirt_read_msr(unsigned msr)
+static inline u64 paravirt_read_msr(u32 msr)
{
return PVOP_CALL1(u64, cpu.read_msr, msr);
}
-static inline void paravirt_write_msr(unsigned msr,
- unsigned low, unsigned high)
+static inline void paravirt_write_msr(u32 msr, u64 val)
{
- PVOP_VCALL3(cpu.write_msr, msr, low, high);
+ PVOP_VCALL2(cpu.write_msr, msr, val);
}
-static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
+static inline int paravirt_read_msr_safe(u32 msr, u64 *val)
{
- return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
+ return PVOP_CALL2(int, cpu.read_msr_safe, msr, val);
}
-static inline int paravirt_write_msr_safe(unsigned msr,
- unsigned low, unsigned high)
+static inline int paravirt_write_msr_safe(u32 msr, u64 val)
{
- return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
+ return PVOP_CALL2(int, cpu.write_msr_safe, msr, val);
}
#define rdmsr(msr, val1, val2) \
@@ -204,55 +202,46 @@ do { \
val2 = _l >> 32; \
} while (0)
-#define wrmsr(msr, val1, val2) \
-do { \
- paravirt_write_msr(msr, val1, val2); \
-} while (0)
+static __always_inline void wrmsr(u32 msr, u32 low, u32 high)
+{
+ paravirt_write_msr(msr, (u64)high << 32 | low);
+}
-#define rdmsrl(msr, val) \
+#define rdmsrq(msr, val) \
do { \
val = paravirt_read_msr(msr); \
} while (0)
-static inline void wrmsrl(unsigned msr, u64 val)
+static inline void wrmsrq(u32 msr, u64 val)
{
- wrmsr(msr, (u32)val, (u32)(val>>32));
+ paravirt_write_msr(msr, val);
}
-#define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
+static inline int wrmsrq_safe(u32 msr, u64 val)
+{
+ return paravirt_write_msr_safe(msr, val);
+}
/* rdmsr with exception handling */
#define rdmsr_safe(msr, a, b) \
({ \
- int _err; \
- u64 _l = paravirt_read_msr_safe(msr, &_err); \
+ u64 _l; \
+ int _err = paravirt_read_msr_safe((msr), &_l); \
(*a) = (u32)_l; \
- (*b) = _l >> 32; \
+ (*b) = (u32)(_l >> 32); \
_err; \
})
-static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
+static __always_inline int rdmsrq_safe(u32 msr, u64 *p)
{
- int err;
-
- *p = paravirt_read_msr_safe(msr, &err);
- return err;
+ return paravirt_read_msr_safe(msr, p);
}
-static inline unsigned long long paravirt_read_pmc(int counter)
+static __always_inline u64 rdpmc(int counter)
{
return PVOP_CALL1(u64, cpu.read_pmc, counter);
}
-#define rdpmc(counter, low, high) \
-do { \
- u64 _l = paravirt_read_pmc(counter); \
- low = (u32)_l; \
- high = _l >> 32; \
-} while (0)
-
-#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
-
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{
PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
@@ -474,8 +463,6 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
PVOP_VCALL2(mmu.set_p4d, p4dp, val);
}
-#if CONFIG_PGTABLE_LEVELS >= 5
-
static inline p4d_t __p4d(p4dval_t val)
{
p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val,
@@ -507,8 +494,6 @@ static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
set_pgd(pgdp, native_make_pgd(0)); \
} while (0)
-#endif /* CONFIG_PGTABLE_LEVELS == 5 */
-
static inline void p4d_clear(p4d_t *p4dp)
{
set_p4d(p4dp, native_make_p4d(0));
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 631c306ce1ff..37a8627d8277 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -91,15 +91,15 @@ struct pv_cpu_ops {
unsigned int *ecx, unsigned int *edx);
/* Unsafe MSR operations. These will warn or panic on failure. */
- u64 (*read_msr)(unsigned int msr);
- void (*write_msr)(unsigned int msr, unsigned low, unsigned high);
+ u64 (*read_msr)(u32 msr);
+ void (*write_msr)(u32 msr, u64 val);
/*
* Safe MSR operations.
- * read sets err to 0 or -EIO. write returns 0 or -EIO.
+ * Returns 0 or -EIO.
*/
- u64 (*read_msr_safe)(unsigned int msr, int *err);
- int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high);
+ int (*read_msr_safe)(u32 msr, u64 *val);
+ int (*write_msr_safe)(u32 msr, u64 val);
u64 (*read_pmc)(int counter);
@@ -189,12 +189,10 @@ struct pv_mmu_ops {
void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval);
-#if CONFIG_PGTABLE_LEVELS >= 5
struct paravirt_callee_save p4d_val;
struct paravirt_callee_save make_p4d;
void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
-#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
struct pv_lazy_ops lazy_mode;
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 5fe314a2e73e..b0d03b6c279b 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -29,6 +29,8 @@
#ifdef CONFIG_SMP
+#define __force_percpu_prefix "%%"__stringify(__percpu_seg)":"
+
#ifdef CONFIG_CC_HAS_NAMED_AS
#ifdef __CHECKER__
@@ -36,23 +38,23 @@
# define __seg_fs __attribute__((address_space(__seg_fs)))
#endif
+#define __percpu_prefix
#define __percpu_seg_override CONCATENATE(__seg_, __percpu_seg)
-#define __percpu_prefix ""
#else /* !CONFIG_CC_HAS_NAMED_AS: */
+#define __percpu_prefix __force_percpu_prefix
#define __percpu_seg_override
-#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
#endif /* CONFIG_CC_HAS_NAMED_AS */
-#define __force_percpu_prefix "%%"__stringify(__percpu_seg)":"
-#define __my_cpu_offset this_cpu_read(this_cpu_off)
-
/*
* Compared to the generic __my_cpu_offset version, the following
* saves one instruction and avoids clobbering a temp register.
- *
+ */
+#define __my_cpu_offset this_cpu_read(this_cpu_off)
+
+/*
* arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit
* kernel, because games are played with CONFIG_X86_64 there and
* sizeof(this_cpu_off) becames 4.
@@ -77,9 +79,9 @@
#else /* !CONFIG_SMP: */
+#define __force_percpu_prefix
+#define __percpu_prefix
#define __percpu_seg_override
-#define __percpu_prefix ""
-#define __force_percpu_prefix ""
#define PER_CPU_VAR(var) (var)__percpu_rel
@@ -97,8 +99,8 @@
# define __my_cpu_var(var) (*__my_cpu_ptr(&(var)))
#endif
-#define __percpu_arg(x) __percpu_prefix "%" #x
#define __force_percpu_arg(x) __force_percpu_prefix "%" #x
+#define __percpu_arg(x) __percpu_prefix "%" #x
/*
* For arch-specific code, we can use direct single-insn ops (they
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 812dac3f79f0..70d1d94aca7e 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -195,6 +195,7 @@ union cpuid10_edx {
*/
#define ARCH_PERFMON_EXT_LEAF 0x00000023
#define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1
+#define ARCH_PERFMON_ACR_LEAF 0x2
union cpuid35_eax {
struct {
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index a33147520044..c88691b15f3c 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -6,6 +6,8 @@
#include <linux/mm.h> /* for struct page */
#include <linux/pagemap.h>
+#include <asm/cpufeature.h>
+
#define __HAVE_ARCH_PTE_ALLOC_ONE
#define __HAVE_ARCH_PGD_FREE
#include <asm-generic/pgalloc.h>
@@ -29,16 +31,17 @@ static inline void paravirt_release_pud(unsigned long pfn) {}
static inline void paravirt_release_p4d(unsigned long pfn) {}
#endif
-#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
/*
- * Instead of one PGD, we acquire two PGDs. Being order-1, it is
- * both 8k in size and 8k-aligned. That lets us just flip bit 12
- * in a pointer to swap between the two 4k halves.
+ * In case of Page Table Isolation active, we acquire two PGDs instead of one.
+ * Being order-1, it is both 8k in size and 8k-aligned. That lets us just
+ * flip bit 12 in a pointer to swap between the two 4k halves.
*/
-#define PGD_ALLOCATION_ORDER 1
-#else
-#define PGD_ALLOCATION_ORDER 0
-#endif
+static inline unsigned int pgd_allocation_order(void)
+{
+ if (cpu_feature_enabled(X86_FEATURE_PTI))
+ return 1;
+ return 0;
+}
/*
* Allocate and free page tables.
diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h
index 66425424ce91..54690bd4ddbe 100644
--- a/arch/x86/include/asm/pgtable-2level_types.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -18,8 +18,6 @@ typedef union {
} pte_t;
#endif /* !__ASSEMBLER__ */
-#define SHARED_KERNEL_PMD 0
-
#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
/*
diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h
index 9d5b257d44e3..580b09bf6a45 100644
--- a/arch/x86/include/asm/pgtable-3level_types.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -27,9 +27,7 @@ typedef union {
} pmd_t;
#endif /* !__ASSEMBLER__ */
-#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI))
-
-#define ARCH_PAGE_TABLE_SYNC_MASK (SHARED_KERNEL_PMD ? 0 : PGTBL_PMD_MODIFIED)
+#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 7bd6bd6df4a1..774430c3abff 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -292,13 +292,6 @@ static inline unsigned long pgd_pfn(pgd_t pgd)
return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
-#define p4d_leaf p4d_leaf
-static inline bool p4d_leaf(p4d_t p4d)
-{
- /* No 512 GiB pages yet */
- return 0;
-}
-
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define pmd_leaf pmd_leaf
@@ -784,6 +777,9 @@ static inline pgprotval_t check_pgprot(pgprot_t pgprot)
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+ /* This bit combination is used to mark shadow stacks */
+ WARN_ON_ONCE((pgprot_val(pgprot) & (_PAGE_DIRTY | _PAGE_RW)) ==
+ _PAGE_DIRTY);
pfn ^= protnone_mask(pgprot_val(pgprot));
pfn &= PTE_PFN_MASK;
return __pte(pfn | check_pgprot(pgprot));
@@ -1080,22 +1076,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
*/
#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- *
- * (Currently stuck as a macro because of indirect forward reference
- * to linux/mm.h:page_to_nid())
- */
-#define mk_pte(page, pgprot) \
-({ \
- pgprot_t __pgprot = pgprot; \
- \
- WARN_ON_ONCE((pgprot_val(__pgprot) & (_PAGE_DIRTY | _PAGE_RW)) == \
- _PAGE_DIRTY); \
- pfn_pte(page_to_pfn(page), __pgprot); \
-})
-
static inline int pmd_bad(pmd_t pmd)
{
return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) !=
@@ -1360,8 +1340,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
-#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
-
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
@@ -1472,9 +1450,6 @@ static inline bool pgdp_maps_userspace(void *__ptr)
return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
}
-#define pgd_leaf pgd_leaf
-static inline bool pgd_leaf(pgd_t pgd) { return false; }
-
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
/*
* All top-level MITIGATION_PAGE_TABLE_ISOLATION page tables are order-1 pages
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index b89f8f1194a9..f06e5d6a2747 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -41,11 +41,9 @@ static inline void sync_initial_page_table(void) { }
pr_err("%s:%d: bad pud %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pud_val(e))
-#if CONFIG_PGTABLE_LEVELS >= 5
#define p4d_ERROR(e) \
pr_err("%s:%d: bad p4d %p(%016lx)\n", \
__FILE__, __LINE__, &(e), p4d_val(e))
-#endif
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %p(%016lx)\n", \
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 5bb782d856f2..4604f924d8b8 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -23,7 +23,6 @@ typedef struct { pmdval_t pmd; } pmd_t;
extern unsigned int __pgtable_l5_enabled;
-#ifdef CONFIG_X86_5LEVEL
#ifdef USE_EARLY_PGTABLE_L5
/*
* cpu_feature_enabled() is not available in early boot code.
@@ -37,19 +36,11 @@ static inline bool pgtable_l5_enabled(void)
#define pgtable_l5_enabled() cpu_feature_enabled(X86_FEATURE_LA57)
#endif /* USE_EARLY_PGTABLE_L5 */
-#else
-#define pgtable_l5_enabled() 0
-#endif /* CONFIG_X86_5LEVEL */
-
extern unsigned int pgdir_shift;
extern unsigned int ptrs_per_p4d;
#endif /* !__ASSEMBLER__ */
-#define SHARED_KERNEL_PMD 0
-
-#ifdef CONFIG_X86_5LEVEL
-
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
*/
@@ -67,17 +58,6 @@ extern unsigned int ptrs_per_p4d;
#define MAX_POSSIBLE_PHYSMEM_BITS 52
-#else /* CONFIG_X86_5LEVEL */
-
-/*
- * PGDIR_SHIFT determines what a top-level page table entry can map
- */
-#define PGDIR_SHIFT 39
-#define PTRS_PER_PGD 512
-#define MAX_PTRS_PER_P4D 1
-
-#endif /* CONFIG_X86_5LEVEL */
-
/*
* 3rd level page
*/
@@ -130,15 +110,9 @@ extern unsigned int ptrs_per_p4d;
#define __VMEMMAP_BASE_L4 0xffffea0000000000UL
#define __VMEMMAP_BASE_L5 0xffd4000000000000UL
-#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
# define VMALLOC_START vmalloc_base
# define VMALLOC_SIZE_TB (pgtable_l5_enabled() ? VMALLOC_SIZE_TB_L5 : VMALLOC_SIZE_TB_L4)
# define VMEMMAP_START vmemmap_base
-#else
-# define VMALLOC_START __VMALLOC_BASE_L4
-# define VMALLOC_SIZE_TB VMALLOC_SIZE_TB_L4
-# define VMEMMAP_START __VMEMMAP_BASE_L4
-#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */
#ifdef CONFIG_RANDOMIZE_MEMORY
# define DIRECT_MAP_PHYSMEM_END direct_map_physmem_end
diff --git a/arch/x86/include/asm/posted_intr.h b/arch/x86/include/asm/posted_intr.h
index de788b400fba..a5f761fbf45b 100644
--- a/arch/x86/include/asm/posted_intr.h
+++ b/arch/x86/include/asm/posted_intr.h
@@ -1,19 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _X86_POSTED_INTR_H
#define _X86_POSTED_INTR_H
+
+#include <asm/cmpxchg.h>
+#include <asm/rwonce.h>
#include <asm/irq_vectors.h>
+#include <linux/bitmap.h>
+
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
#define PID_TABLE_ENTRY_VALID 1
+#define NR_PIR_VECTORS 256
+#define NR_PIR_WORDS (NR_PIR_VECTORS / BITS_PER_LONG)
+
/* Posted-Interrupt Descriptor */
struct pi_desc {
- union {
- u32 pir[8]; /* Posted interrupt requested */
- u64 pir64[4];
- };
+ unsigned long pir[NR_PIR_WORDS]; /* Posted interrupt requested */
union {
struct {
u16 notifications; /* Suppress and outstanding bits */
@@ -26,6 +31,65 @@ struct pi_desc {
u32 rsvd[6];
} __aligned(64);
+/*
+ * De-multiplexing posted interrupts is on the performance path, the code
+ * below is written to optimize the cache performance based on the following
+ * considerations:
+ * 1.Posted interrupt descriptor (PID) fits in a cache line that is frequently
+ * accessed by both CPU and IOMMU.
+ * 2.During software processing of posted interrupts, the CPU needs to do
+ * natural width read and xchg for checking and clearing posted interrupt
+ * request (PIR), a 256 bit field within the PID.
+ * 3.On the other side, the IOMMU does atomic swaps of the entire PID cache
+ * line when posting interrupts and setting control bits.
+ * 4.The CPU can access the cache line a magnitude faster than the IOMMU.
+ * 5.Each time the IOMMU does interrupt posting to the PIR will evict the PID
+ * cache line. The cache line states after each operation are as follows,
+ * assuming a 64-bit kernel:
+ * CPU IOMMU PID Cache line state
+ * ---------------------------------------------------------------
+ *...read64 exclusive
+ *...lock xchg64 modified
+ *... post/atomic swap invalid
+ *...-------------------------------------------------------------
+ *
+ * To reduce L1 data cache miss, it is important to avoid contention with
+ * IOMMU's interrupt posting/atomic swap. Therefore, a copy of PIR is used
+ * when processing posted interrupts in software, e.g. to dispatch interrupt
+ * handlers for posted MSIs, or to move interrupts from the PIR to the vIRR
+ * in KVM.
+ *
+ * In addition, the code is trying to keep the cache line state consistent
+ * as much as possible. e.g. when making a copy and clearing the PIR
+ * (assuming non-zero PIR bits are present in the entire PIR), it does:
+ * read, read, read, read, xchg, xchg, xchg, xchg
+ * instead of:
+ * read, xchg, read, xchg, read, xchg, read, xchg
+ */
+static __always_inline bool pi_harvest_pir(unsigned long *pir,
+ unsigned long *pir_vals)
+{
+ unsigned long pending = 0;
+ int i;
+
+ for (i = 0; i < NR_PIR_WORDS; i++) {
+ pir_vals[i] = READ_ONCE(pir[i]);
+ pending |= pir_vals[i];
+ }
+
+ if (!pending)
+ return false;
+
+ for (i = 0; i < NR_PIR_WORDS; i++) {
+ if (!pir_vals[i])
+ continue;
+
+ pir_vals[i] = arch_xchg(&pir[i], 0);
+ }
+
+ return true;
+}
+
static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
{
return test_and_set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
@@ -43,12 +107,12 @@ static inline bool pi_test_and_clear_sn(struct pi_desc *pi_desc)
static inline bool pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
{
- return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
+ return test_and_set_bit(vector, pi_desc->pir);
}
static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
{
- return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
+ return bitmap_empty(pi_desc->pir, NR_VECTORS);
}
static inline void pi_set_sn(struct pi_desc *pi_desc)
@@ -81,6 +145,11 @@ static inline bool pi_test_sn(struct pi_desc *pi_desc)
return test_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control);
}
+static inline bool pi_test_pir(int vector, struct pi_desc *pi_desc)
+{
+ return test_bit(vector, (unsigned long *)pi_desc->pir);
+}
+
/* Non-atomic helpers */
static inline void __pi_set_sn(struct pi_desc *pi_desc)
{
@@ -105,7 +174,7 @@ static inline bool pi_pending_this_cpu(unsigned int vector)
if (WARN_ON_ONCE(vector > NR_VECTORS || vector < FIRST_EXTERNAL_VECTOR))
return false;
- return test_bit(vector, (unsigned long *)pid->pir);
+ return test_bit(vector, pid->pir);
}
extern void intel_posted_msi_init(void);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 5d2f7e5aff26..bde58f6510ac 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -16,7 +16,7 @@ struct vm86;
#include <uapi/asm/sigcontext.h>
#include <asm/current.h>
#include <asm/cpufeatures.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/page.h>
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
@@ -514,15 +514,14 @@ struct thread_struct {
struct thread_shstk shstk;
#endif
-
- /* Floating point and extended processor state */
- struct fpu fpu;
- /*
- * WARNING: 'fpu' is dynamically-sized. It *MUST* be at
- * the end.
- */
};
+#ifdef CONFIG_X86_DEBUG_FPU
+extern struct fpu *x86_task_fpu(struct task_struct *task);
+#else
+# define x86_task_fpu(task) ((struct fpu *)((void *)(task) + sizeof(*(task))))
+#endif
+
extern void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size);
static inline void arch_thread_struct_whitelist(unsigned long *offset,
@@ -734,6 +733,7 @@ void store_cpu_caps(struct cpuinfo_x86 *info);
enum l1tf_mitigations {
L1TF_MITIGATION_OFF,
+ L1TF_MITIGATION_AUTO,
L1TF_MITIGATION_FLUSH_NOWARN,
L1TF_MITIGATION_FLUSH,
L1TF_MITIGATION_FLUSH_NOSMT,
diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h
index 011bf67a1866..feb93b50e990 100644
--- a/arch/x86/include/asm/resctrl.h
+++ b/arch/x86/include/asm/resctrl.h
@@ -9,6 +9,8 @@
#include <linux/resctrl_types.h>
#include <linux/sched.h>
+#include <asm/msr.h>
+
/*
* This value can never be a valid CLOSID, and is used when mapping a
* (closid, rmid) pair to an index and back. On x86 only the RMID is
@@ -175,7 +177,7 @@ static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored,
return READ_ONCE(tsk->rmid) == rmid;
}
-static inline void resctrl_sched_in(struct task_struct *tsk)
+static inline void resctrl_arch_sched_in(struct task_struct *tsk)
{
if (static_branch_likely(&rdt_enable_key))
__resctrl_sched_in(tsk);
@@ -194,25 +196,22 @@ static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid)
/* x86 can always read an rmid, nothing needs allocating */
struct rdt_resource;
-static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid)
+static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r,
+ enum resctrl_event_id evtid)
{
might_sleep();
return NULL;
-};
+}
-static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid,
- void *ctx) { };
+static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r,
+ enum resctrl_event_id evtid,
+ void *ctx) { }
-u64 resctrl_arch_get_prefetch_disable_bits(void);
-int resctrl_arch_pseudo_lock_fn(void *_plr);
-int resctrl_arch_measure_cycles_lat_fn(void *_plr);
-int resctrl_arch_measure_l2_residency(void *_plr);
-int resctrl_arch_measure_l3_residency(void *_plr);
void resctrl_cpu_detect(struct cpuinfo_x86 *c);
#else
-static inline void resctrl_sched_in(struct task_struct *tsk) {}
+static inline void resctrl_arch_sched_in(struct task_struct *tsk) {}
static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
#endif /* CONFIG_X86_CPU_RESCTRL */
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index 8d9f1c9aaa4c..61f56cdaccb5 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -4,6 +4,7 @@
#include <asm/page.h>
#include <asm-generic/set_memory.h>
+#include <asm/pgtable.h>
#define set_memory_rox set_memory_rox
int set_memory_rox(unsigned long addr, int numpages);
@@ -37,6 +38,7 @@ int set_memory_rox(unsigned long addr, int numpages);
* The caller is required to take care of these.
*/
+int __set_memory_prot(unsigned long addr, int numpages, pgprot_t prot);
int _set_memory_uc(unsigned long addr, int numpages);
int _set_memory_wc(unsigned long addr, int numpages);
int _set_memory_wt(unsigned long addr, int numpages);
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ad9212df0ec0..692af46603a1 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -52,6 +52,7 @@ extern void reserve_standard_io_resources(void);
extern void i386_reserve_resources(void);
extern unsigned long __startup_64(unsigned long p2v_offset, struct boot_params *bp);
extern void startup_64_setup_gdt_idt(void);
+extern void startup_64_load_idt(void *vc_handler);
extern void early_setup_idt(void);
extern void __init do_early_exception(struct pt_regs *regs, int trapnr);
@@ -67,6 +68,8 @@ extern void x86_ce4100_early_setup(void);
static inline void x86_ce4100_early_setup(void) { }
#endif
+#include <linux/kexec_handover.h>
+
#ifndef _SETUP
#include <asm/espfix.h>
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index acb85b9346d8..0020d77a0800 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -116,7 +116,7 @@ enum psc_op {
#define GHCB_MSR_VMPL_REQ 0x016
#define GHCB_MSR_VMPL_REQ_LEVEL(v) \
/* GHCBData[39:32] */ \
- (((u64)(v) & GENMASK_ULL(7, 0) << 32) | \
+ ((((u64)(v) & GENMASK_ULL(7, 0)) << 32) | \
/* GHCBDdata[11:0] */ \
GHCB_MSR_VMPL_REQ)
diff --git a/arch/x86/include/asm/sev-internal.h b/arch/x86/include/asm/sev-internal.h
new file mode 100644
index 000000000000..3dfd306d1c9e
--- /dev/null
+++ b/arch/x86/include/asm/sev-internal.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define DR7_RESET_VALUE 0x400
+
+extern struct ghcb boot_ghcb_page;
+extern u64 sev_hv_features;
+extern u64 sev_secrets_pa;
+
+/* #VC handler runtime per-CPU data */
+struct sev_es_runtime_data {
+ struct ghcb ghcb_page;
+
+ /*
+ * Reserve one page per CPU as backup storage for the unencrypted GHCB.
+ * It is needed when an NMI happens while the #VC handler uses the real
+ * GHCB, and the NMI handler itself is causing another #VC exception. In
+ * that case the GHCB content of the first handler needs to be backed up
+ * and restored.
+ */
+ struct ghcb backup_ghcb;
+
+ /*
+ * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
+ * There is no need for it to be atomic, because nothing is written to
+ * the GHCB between the read and the write of ghcb_active. So it is safe
+ * to use it when a nested #VC exception happens before the write.
+ *
+ * This is necessary for example in the #VC->NMI->#VC case when the NMI
+ * happens while the first #VC handler uses the GHCB. When the NMI code
+ * raises a second #VC handler it might overwrite the contents of the
+ * GHCB written by the first handler. To avoid this the content of the
+ * GHCB is saved and restored when the GHCB is detected to be in use
+ * already.
+ */
+ bool ghcb_active;
+ bool backup_ghcb_active;
+
+ /*
+ * Cached DR7 value - write it on DR7 writes and return it on reads.
+ * That value will never make it to the real hardware DR7 as debugging
+ * is currently unsupported in SEV-ES guests.
+ */
+ unsigned long dr7;
+};
+
+struct ghcb_state {
+ struct ghcb *ghcb;
+};
+
+extern struct svsm_ca boot_svsm_ca_page;
+
+struct ghcb *__sev_get_ghcb(struct ghcb_state *state);
+void __sev_put_ghcb(struct ghcb_state *state);
+
+DECLARE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
+DECLARE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
+
+void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages, enum psc_op op);
+
+DECLARE_PER_CPU(struct svsm_ca *, svsm_caa);
+DECLARE_PER_CPU(u64, svsm_caa_pa);
+
+extern struct svsm_ca *boot_svsm_caa;
+extern u64 boot_svsm_caa_pa;
+
+static __always_inline struct svsm_ca *svsm_get_caa(void)
+{
+ if (sev_cfg.use_cas)
+ return this_cpu_read(svsm_caa);
+ else
+ return boot_svsm_caa;
+}
+
+static __always_inline u64 svsm_get_caa_pa(void)
+{
+ if (sev_cfg.use_cas)
+ return this_cpu_read(svsm_caa_pa);
+ else
+ return boot_svsm_caa_pa;
+}
+
+int svsm_perform_call_protocol(struct svsm_call *call);
+
+static inline u64 sev_es_rd_ghcb_msr(void)
+{
+ return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB);
+}
+
+static __always_inline void sev_es_wr_ghcb_msr(u64 val)
+{
+ u32 low, high;
+
+ low = (u32)(val);
+ high = (u32)(val >> 32);
+
+ native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
+}
+
+void snp_register_ghcb_early(unsigned long paddr);
+bool sev_es_negotiate_protocol(void);
+bool sev_es_check_cpu_features(void);
+u64 get_hv_features(void);
+
+const struct snp_cpuid_table *snp_cpuid_get_table(void);
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index ba7999f66abe..58e028d42e41 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -15,6 +15,7 @@
#include <asm/sev-common.h>
#include <asm/coco.h>
#include <asm/set_memory.h>
+#include <asm/svm.h>
#define GHCB_PROTOCOL_MIN 1ULL
#define GHCB_PROTOCOL_MAX 2ULL
@@ -83,6 +84,36 @@ extern void vc_no_ghcb(void);
extern void vc_boot_ghcb(void);
extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
+/*
+ * Individual entries of the SNP CPUID table, as defined by the SNP
+ * Firmware ABI, Revision 0.9, Section 7.1, Table 14.
+ */
+struct snp_cpuid_fn {
+ u32 eax_in;
+ u32 ecx_in;
+ u64 xcr0_in;
+ u64 xss_in;
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+ u64 __reserved;
+} __packed;
+
+/*
+ * SNP CPUID table, as defined by the SNP Firmware ABI, Revision 0.9,
+ * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit
+ * of 64 entries per CPUID table.
+ */
+#define SNP_CPUID_COUNT_MAX 64
+
+struct snp_cpuid_table {
+ u32 count;
+ u32 __reserved1;
+ u64 __reserved2;
+ struct snp_cpuid_fn fn[SNP_CPUID_COUNT_MAX];
+} __packed;
+
/* PVALIDATE return codes */
#define PVALIDATE_FAIL_SIZEMISMATCH 6
@@ -384,6 +415,10 @@ struct svsm_call {
#define SVSM_ATTEST_SERVICES 0
#define SVSM_ATTEST_SINGLE_SERVICE 1
+#define SVSM_VTPM_CALL(x) ((2ULL << 32) | (x))
+#define SVSM_VTPM_QUERY 0
+#define SVSM_VTPM_CMD 1
+
#ifdef CONFIG_AMD_MEM_ENCRYPT
extern u8 snp_vmpl;
@@ -481,9 +516,39 @@ void snp_msg_free(struct snp_msg_desc *mdesc);
int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req,
struct snp_guest_request_ioctl *rio);
+int snp_svsm_vtpm_send_command(u8 *buffer);
+
void __init snp_secure_tsc_prepare(void);
void __init snp_secure_tsc_init(void);
+static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
+{
+ ghcb->save.sw_exit_code = 0;
+ __builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+}
+
+void vc_forward_exception(struct es_em_ctxt *ctxt);
+
+/* I/O parameters for CPUID-related helpers */
+struct cpuid_leaf {
+ u32 fn;
+ u32 subfn;
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+};
+
+int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf);
+
+void __noreturn sev_es_terminate(unsigned int set, unsigned int reason);
+enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt,
+ u64 exit_code, u64 exit_info_1,
+ u64 exit_info_2);
+
+extern struct ghcb *boot_ghcb;
+
#else /* !CONFIG_AMD_MEM_ENCRYPT */
#define snp_vmpl 0
@@ -524,6 +589,7 @@ static inline struct snp_msg_desc *snp_msg_alloc(void) { return NULL; }
static inline void snp_msg_free(struct snp_msg_desc *mdesc) { }
static inline int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req,
struct snp_guest_request_ioctl *rio) { return -ENODEV; }
+static inline int snp_svsm_vtpm_send_command(u8 *buffer) { return -ENODEV; }
static inline void __init snp_secure_tsc_prepare(void) { }
static inline void __init snp_secure_tsc_init(void) { }
diff --git a/arch/x86/include/asm/shared/tdx.h b/arch/x86/include/asm/shared/tdx.h
index a28ff6b14145..2f3820342598 100644
--- a/arch/x86/include/asm/shared/tdx.h
+++ b/arch/x86/include/asm/shared/tdx.h
@@ -13,6 +13,7 @@
/* TDX module Call Leaf IDs */
#define TDG_VP_VMCALL 0
#define TDG_VP_INFO 1
+#define TDG_MR_RTMR_EXTEND 2
#define TDG_VP_VEINFO_GET 3
#define TDG_MR_REPORT 4
#define TDG_MEM_PAGE_ACCEPT 6
@@ -67,11 +68,18 @@
#define TD_CTLS_LOCK BIT_ULL(TD_CTLS_LOCK_BIT)
/* TDX hypercall Leaf IDs */
+#define TDVMCALL_GET_TD_VM_CALL_INFO 0x10000
#define TDVMCALL_MAP_GPA 0x10001
#define TDVMCALL_GET_QUOTE 0x10002
#define TDVMCALL_REPORT_FATAL_ERROR 0x10003
-#define TDVMCALL_STATUS_RETRY 1
+/*
+ * TDG.VP.VMCALL Status Codes (returned in R10)
+ */
+#define TDVMCALL_STATUS_SUCCESS 0x0000000000000000ULL
+#define TDVMCALL_STATUS_RETRY 0x0000000000000001ULL
+#define TDVMCALL_STATUS_INVALID_OPERAND 0x8000000000000000ULL
+#define TDVMCALL_STATUS_ALIGN_ERROR 0x8000000000000002ULL
/*
* Bitmasks of exposed registers (with VMM).
diff --git a/arch/x86/include/asm/simd.h b/arch/x86/include/asm/simd.h
index a341c878e977..b8027b63cd7a 100644
--- a/arch/x86/include/asm/simd.h
+++ b/arch/x86/include/asm/simd.h
@@ -1,6 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_SIMD_H
+#define _ASM_SIMD_H
#include <asm/fpu/api.h>
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
/*
* may_use_simd - whether it is allowable at this time to issue SIMD
@@ -10,3 +14,5 @@ static __must_check inline bool may_use_simd(void)
{
return irq_fpu_usable();
}
+
+#endif /* _ASM_SIMD_H */
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index 55a5e656e4b9..4f84d421d1cf 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -16,23 +16,23 @@
#ifdef __ASSEMBLER__
#define ASM_CLAC \
- ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "clac", X86_FEATURE_SMAP
+ ALTERNATIVE "", "clac", X86_FEATURE_SMAP
#define ASM_STAC \
- ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "stac", X86_FEATURE_SMAP
+ ALTERNATIVE "", "stac", X86_FEATURE_SMAP
#else /* __ASSEMBLER__ */
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP);
+ alternative("", "clac", X86_FEATURE_SMAP);
}
static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP);
+ alternative("", "stac", X86_FEATURE_SMAP);
}
static __always_inline unsigned long smap_save(void)
@@ -59,9 +59,9 @@ static __always_inline void smap_restore(unsigned long flags)
/* These macros can be used in asm() statements */
#define ASM_CLAC \
- ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP)
+ ALTERNATIVE("", "clac", X86_FEATURE_SMAP)
#define ASM_STAC \
- ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP)
+ ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
#define ASM_CLAC_UNSAFE \
ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "clac", X86_FEATURE_SMAP)
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index 658b690b2ccb..00b7e0398210 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -84,7 +84,7 @@ static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
static __always_inline void __update_spec_ctrl(u64 val)
{
__this_cpu_write(x86_spec_ctrl_current, val);
- native_wrmsrl(MSR_IA32_SPEC_CTRL, val);
+ native_wrmsrq(MSR_IA32_SPEC_CTRL, val);
}
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 6266d6b9e0b8..ecda17efa042 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -10,30 +10,19 @@
#include <linux/irqflags.h>
#include <linux/jump_label.h>
-/*
- * The compiler should not reorder volatile asm statements with respect to each
- * other: they should execute in program order. However GCC 4.9.x and 5.x have
- * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder
- * volatile asm. The write functions are not affected since they have memory
- * clobbers preventing reordering. To prevent reads from being reordered with
- * respect to writes, use a dummy memory operand.
- */
-
-#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL)
-
void native_write_cr0(unsigned long val);
static inline unsigned long native_read_cr0(void)
{
unsigned long val;
- asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ asm volatile("mov %%cr0,%0" : "=r" (val));
return val;
}
static __always_inline unsigned long native_read_cr2(void)
{
unsigned long val;
- asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ asm volatile("mov %%cr2,%0" : "=r" (val));
return val;
}
@@ -45,7 +34,7 @@ static __always_inline void native_write_cr2(unsigned long val)
static __always_inline unsigned long __native_read_cr3(void)
{
unsigned long val;
- asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ asm volatile("mov %%cr3,%0" : "=r" (val));
return val;
}
@@ -66,10 +55,10 @@ static inline unsigned long native_read_cr4(void)
asm volatile("1: mov %%cr4, %0\n"
"2:\n"
_ASM_EXTABLE(1b, 2b)
- : "=r" (val) : "0" (0), __FORCE_ORDER);
+ : "=r" (val) : "0" (0));
#else
/* CR4 always exists on x86_64. */
- asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ asm volatile("mov %%cr4,%0" : "=r" (val));
#endif
return val;
}
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
index 32c0d981a82a..e9cce169bb4c 100644
--- a/arch/x86/include/asm/string_32.h
+++ b/arch/x86/include/asm/string_32.h
@@ -33,11 +33,11 @@ extern size_t strlen(const char *s);
static __always_inline void *__memcpy(void *to, const void *from, size_t n)
{
int d0, d1, d2;
- asm volatile("rep ; movsl\n\t"
+ asm volatile("rep movsl\n\t"
"movl %4,%%ecx\n\t"
"andl $3,%%ecx\n\t"
"jz 1f\n\t"
- "rep ; movsb\n\t"
+ "rep movsb\n\t"
"1:"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
@@ -89,7 +89,7 @@ static __always_inline void *__constant_memcpy(void *to, const void *from,
if (n >= 5 * 4) {
/* large block: use rep prefix */
int ecx;
- asm volatile("rep ; movsl"
+ asm volatile("rep movsl"
: "=&c" (ecx), "=&D" (edi), "=&S" (esi)
: "0" (n / 4), "1" (edi), "2" (esi)
: "memory"
@@ -165,8 +165,7 @@ extern void *memchr(const void *cs, int c, size_t count);
static inline void *__memset_generic(void *s, char c, size_t count)
{
int d0, d1;
- asm volatile("rep\n\t"
- "stosb"
+ asm volatile("rep stosb"
: "=&c" (d0), "=&D" (d1)
: "a" (c), "1" (s), "0" (count)
: "memory");
@@ -199,8 +198,7 @@ extern void *memset(void *, int, size_t);
static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
{
int d0, d1;
- asm volatile("rep\n\t"
- "stosw"
+ asm volatile("rep stosw"
: "=&c" (d0), "=&D" (d1)
: "a" (v), "1" (s), "0" (n)
: "memory");
@@ -211,8 +209,7 @@ static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
{
int d0, d1;
- asm volatile("rep\n\t"
- "stosl"
+ asm volatile("rep stosl"
: "=&c" (d0), "=&D" (d1)
: "a" (v), "1" (s), "0" (n)
: "memory");
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index d8416b3bf832..e8e5aab06255 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -9,6 +9,7 @@
#include <asm/desc.h>
#include <asm/fpu/api.h>
+#include <asm/msr.h>
/* image of the saved processor state */
struct saved_context {
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 54df06687d83..b512f9665f78 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -9,6 +9,7 @@
#include <asm/desc.h>
#include <asm/fpu/api.h>
+#include <asm/msr.h>
/*
* Image of the saved processor state, used by the low level ACPI suspend to
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 9b7fa99ae951..ad954a1a6656 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -116,6 +116,7 @@ enum {
INTERCEPT_INVPCID,
INTERCEPT_MCOMMIT,
INTERCEPT_TLBSYNC,
+ INTERCEPT_BUSLOCK,
INTERCEPT_IDLE_HLT = 166,
};
@@ -159,7 +160,12 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u64 avic_physical_id; /* Offset 0xf8 */
u8 reserved_7[8];
u64 vmsa_pa; /* Used for an SEV-ES guest */
- u8 reserved_8[720];
+ u8 reserved_8[16];
+ u16 bus_lock_counter; /* Offset 0x120 */
+ u8 reserved_9[22];
+ u64 allowed_sev_features; /* Offset 0x138 */
+ u64 guest_sev_features; /* Offset 0x140 */
+ u8 reserved_10[664];
/*
* Offset 0x3e0, 32 bytes reserved
* for use by hypervisor/software.
@@ -291,6 +297,8 @@ static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_
#define SVM_SEV_FEAT_ALTERNATE_INJECTION BIT(4)
#define SVM_SEV_FEAT_DEBUG_SWAP BIT(5)
+#define VMCB_ALLOWED_SEV_FEATURES_VALID BIT_ULL(63)
+
struct vmcb_seg {
u16 selector;
u16 attrib;
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 75248546403d..499b1c15cc8b 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -52,6 +52,8 @@ do { \
} while (0)
#ifdef CONFIG_X86_32
+#include <asm/msr.h>
+
static inline void refresh_sysenter_cs(struct thread_struct *thread)
{
/* Only happens when SEP is enabled, no need to test "SEP"arately: */
@@ -59,7 +61,7 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
return;
this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
- wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+ wrmsrq(MSR_IA32_SYSENTER_CS, thread->sysenter_cs);
}
#endif
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 7c488ff0c764..c10dbb74cd00 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -38,6 +38,13 @@ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
return regs->orig_ax;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->orig_ax = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -90,6 +97,18 @@ static inline void syscall_get_arguments(struct task_struct *task,
args[5] = regs->bp;
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ regs->bx = args[0];
+ regs->cx = args[1];
+ regs->dx = args[2];
+ regs->si = args[3];
+ regs->di = args[4];
+ regs->bp = args[5];
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_I386;
@@ -121,6 +140,30 @@ static inline void syscall_get_arguments(struct task_struct *task,
}
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+# ifdef CONFIG_IA32_EMULATION
+ if (task->thread_info.status & TS_COMPAT) {
+ regs->bx = *args++;
+ regs->cx = *args++;
+ regs->dx = *args++;
+ regs->si = *args++;
+ regs->di = *args++;
+ regs->bp = *args;
+ } else
+# endif
+ {
+ regs->di = *args++;
+ regs->si = *args++;
+ regs->dx = *args++;
+ regs->r10 = *args++;
+ regs->r8 = *args++;
+ regs->r9 = *args;
+ }
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
/* x32 tasks should be considered AUDIT_ARCH_X86_64. */
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index 4a1922ec80cf..8b19294600c4 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -5,6 +5,7 @@
#include <linux/init.h>
#include <linux/bits.h>
+#include <linux/mmzone.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
@@ -18,6 +19,7 @@
* TDX module.
*/
#define TDX_ERROR _BITUL(63)
+#define TDX_NON_RECOVERABLE _BITUL(62)
#define TDX_SW_ERROR (TDX_ERROR | GENMASK_ULL(47, 40))
#define TDX_SEAMCALL_VMFAILINVALID (TDX_SW_ERROR | _UL(0xFFFF0000))
@@ -33,6 +35,8 @@
#ifndef __ASSEMBLER__
#include <uapi/asm/mce.h>
+#include <asm/tdx_global_metadata.h>
+#include <linux/pgtable.h>
/*
* Used by the #VE exception handler to gather the #VE exception
@@ -64,6 +68,8 @@ bool tdx_early_handle_ve(struct pt_regs *regs);
int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport);
+int tdx_mcall_extend_rtmr(u8 index, u8 *data);
+
u64 tdx_hcall_get_quote(u8 *buf, size_t size);
void __init tdx_dump_attributes(u64 td_attr);
@@ -119,11 +125,82 @@ static inline u64 sc_retry(sc_func_t func, u64 fn,
int tdx_cpu_enable(void);
int tdx_enable(void);
const char *tdx_dump_mce_info(struct mce *m);
+const struct tdx_sys_info *tdx_get_sysinfo(void);
+
+int tdx_guest_keyid_alloc(void);
+u32 tdx_get_nr_guest_keyids(void);
+void tdx_guest_keyid_free(unsigned int keyid);
+
+struct tdx_td {
+ /* TD root structure: */
+ struct page *tdr_page;
+
+ int tdcs_nr_pages;
+ /* TD control structure: */
+ struct page **tdcs_pages;
+
+ /* Size of `tdcx_pages` in struct tdx_vp */
+ int tdcx_nr_pages;
+};
+
+struct tdx_vp {
+ /* TDVP root page */
+ struct page *tdvpr_page;
+
+ /* TD vCPU control structure: */
+ struct page **tdcx_pages;
+};
+
+static inline u64 mk_keyed_paddr(u16 hkid, struct page *page)
+{
+ u64 ret;
+
+ ret = page_to_phys(page);
+ /* KeyID bits are just above the physical address bits: */
+ ret |= (u64)hkid << boot_cpu_data.x86_phys_bits;
+
+ return ret;
+}
+
+static inline int pg_level_to_tdx_sept_level(enum pg_level level)
+{
+ WARN_ON_ONCE(level == PG_LEVEL_NONE);
+ return level - 1;
+}
+
+u64 tdh_vp_enter(struct tdx_vp *vp, struct tdx_module_args *args);
+u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page);
+u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page *source, u64 *ext_err1, u64 *ext_err2);
+u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2);
+u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page);
+u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2);
+u64 tdh_mem_range_block(struct tdx_td *td, u64 gpa, int level, u64 *ext_err1, u64 *ext_err2);
+u64 tdh_mng_key_config(struct tdx_td *td);
+u64 tdh_mng_create(struct tdx_td *td, u16 hkid);
+u64 tdh_vp_create(struct tdx_td *td, struct tdx_vp *vp);
+u64 tdh_mng_rd(struct tdx_td *td, u64 field, u64 *data);
+u64 tdh_mr_extend(struct tdx_td *td, u64 gpa, u64 *ext_err1, u64 *ext_err2);
+u64 tdh_mr_finalize(struct tdx_td *td);
+u64 tdh_vp_flush(struct tdx_vp *vp);
+u64 tdh_mng_vpflushdone(struct tdx_td *td);
+u64 tdh_mng_key_freeid(struct tdx_td *td);
+u64 tdh_mng_init(struct tdx_td *td, u64 td_params, u64 *extended_err);
+u64 tdh_vp_init(struct tdx_vp *vp, u64 initial_rcx, u32 x2apicid);
+u64 tdh_vp_rd(struct tdx_vp *vp, u64 field, u64 *data);
+u64 tdh_vp_wr(struct tdx_vp *vp, u64 field, u64 data, u64 mask);
+u64 tdh_phymem_page_reclaim(struct page *page, u64 *tdx_pt, u64 *tdx_owner, u64 *tdx_size);
+u64 tdh_mem_track(struct tdx_td *tdr);
+u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u64 *ext_err2);
+u64 tdh_phymem_cache_wb(bool resume);
+u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td);
+u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page);
#else
static inline void tdx_init(void) { }
static inline int tdx_cpu_enable(void) { return -ENODEV; }
static inline int tdx_enable(void) { return -ENODEV; }
+static inline u32 tdx_get_nr_guest_keyids(void) { return 0; }
static inline const char *tdx_dump_mce_info(struct mce *m) { return NULL; }
+static inline const struct tdx_sys_info *tdx_get_sysinfo(void) { return NULL; }
#endif /* CONFIG_INTEL_TDX_HOST */
#endif /* !__ASSEMBLER__ */
diff --git a/arch/x86/virt/vmx/tdx/tdx_global_metadata.h b/arch/x86/include/asm/tdx_global_metadata.h
index 6dd3c9695f59..060a2ad744bf 100644
--- a/arch/x86/virt/vmx/tdx/tdx_global_metadata.h
+++ b/arch/x86/include/asm/tdx_global_metadata.h
@@ -17,9 +17,28 @@ struct tdx_sys_info_tdmr {
u16 pamt_1g_entry_size;
};
+struct tdx_sys_info_td_ctrl {
+ u16 tdr_base_size;
+ u16 tdcs_base_size;
+ u16 tdvps_base_size;
+};
+
+struct tdx_sys_info_td_conf {
+ u64 attributes_fixed0;
+ u64 attributes_fixed1;
+ u64 xfam_fixed0;
+ u64 xfam_fixed1;
+ u16 num_cpuid_config;
+ u16 max_vcpus_per_td;
+ u64 cpuid_config_leaves[128];
+ u64 cpuid_config_values[128][2];
+};
+
struct tdx_sys_info {
struct tdx_sys_info_features features;
struct tdx_sys_info_tdmr tdmr;
+ struct tdx_sys_info_td_ctrl td_ctrl;
+ struct tdx_sys_info_td_conf td_conf;
};
#endif
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index ab9e143ec9fe..5337f1be18f6 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -11,11 +11,11 @@
* JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5.
* Raise it if needed.
*/
-#define POKE_MAX_OPCODE_SIZE 5
+#define TEXT_POKE_MAX_OPCODE_SIZE 5
extern void text_poke_early(void *addr, const void *opcode, size_t len);
-extern void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len);
+extern void text_poke_apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len);
/*
* Clear and restore the kernel write-protection flag on the local CPU.
@@ -32,17 +32,17 @@ extern void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u
* an inconsistent instruction while you patch.
*/
extern void *text_poke(void *addr, const void *opcode, size_t len);
-extern void text_poke_sync(void);
+extern void smp_text_poke_sync_each_cpu(void);
extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
extern void *text_poke_copy(void *addr, const void *opcode, size_t len);
#define text_poke_copy text_poke_copy
extern void *text_poke_copy_locked(void *addr, const void *opcode, size_t len, bool core_ok);
extern void *text_poke_set(void *addr, int c, size_t len);
-extern int poke_int3_handler(struct pt_regs *regs);
-extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
+extern int smp_text_poke_int3_handler(struct pt_regs *regs);
+extern void smp_text_poke_single(void *addr, const void *opcode, size_t len, const void *emulate);
-extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate);
-extern void text_poke_finish(void);
+extern void smp_text_poke_batch_add(void *addr, const void *opcode, size_t len, const void *emulate);
+extern void smp_text_poke_batch_finish(void);
#define INT3_INSN_SIZE 1
#define INT3_INSN_OPCODE 0xCC
@@ -82,7 +82,7 @@ static __always_inline int text_opcode_size(u8 opcode)
}
union text_poke_insn {
- u8 text[POKE_MAX_OPCODE_SIZE];
+ u8 text[TEXT_POKE_MAX_OPCODE_SIZE];
struct {
u8 opcode;
s32 disp;
@@ -128,8 +128,8 @@ void *text_gen_insn(u8 opcode, const void *addr, const void *dest)
}
extern int after_bootmem;
-extern __ro_after_init struct mm_struct *poking_mm;
-extern __ro_after_init unsigned long poking_addr;
+extern __ro_after_init struct mm_struct *text_poke_mm;
+extern __ro_after_init unsigned long text_poke_mm_addr;
#ifndef CONFIG_UML_X86
static __always_inline
@@ -142,13 +142,14 @@ static __always_inline
void int3_emulate_push(struct pt_regs *regs, unsigned long val)
{
/*
- * The int3 handler in entry_64.S adds a gap between the
+ * The INT3 handler in entry_64.S adds a gap between the
* stack where the break point happened, and the saving of
* pt_regs. We can extend the original stack because of
- * this gap. See the idtentry macro's create_gap option.
+ * this gap. See the idtentry macro's X86_TRAP_BP logic.
*
- * Similarly entry_32.S will have a gap on the stack for (any) hardware
- * exception and pt_regs; see FIXUP_FRAME.
+ * Similarly, entry_32.S will have a gap on the stack for
+ * (any) hardware exception and pt_regs; see the
+ * FIXUP_FRAME macro.
*/
regs->sp -= sizeof(unsigned long);
*(unsigned long *)regs->sp = val;
diff --git a/arch/x86/include/asm/trace/common.h b/arch/x86/include/asm/trace/common.h
deleted file mode 100644
index f0f9bcdb74d9..000000000000
--- a/arch/x86/include/asm/trace/common.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_TRACE_COMMON_H
-#define _ASM_TRACE_COMMON_H
-
-#ifdef CONFIG_TRACING
-DECLARE_STATIC_KEY_FALSE(trace_pagefault_key);
-#define trace_pagefault_enabled() \
- static_branch_unlikely(&trace_pagefault_key)
-#else
-static inline bool trace_pagefault_enabled(void) { return false; }
-#endif
-
-#endif
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 4645a6334063..0454d5e60e5d 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -74,11 +74,6 @@ DEFINE_EVENT(x86_fpu, x86_fpu_dropped,
TP_ARGS(fpu)
);
-DEFINE_EVENT(x86_fpu, x86_fpu_copy_src,
- TP_PROTO(struct fpu *fpu),
- TP_ARGS(fpu)
-);
-
DEFINE_EVENT(x86_fpu, x86_fpu_copy_dst,
TP_PROTO(struct fpu *fpu),
TP_ARGS(fpu)
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 88e7f0f3bf62..7408bebdfde0 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -6,7 +6,6 @@
#define _TRACE_IRQ_VECTORS_H
#include <linux/tracepoint.h>
-#include <asm/trace/common.h>
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 94408a784c8e..4f7f09f50552 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -5,10 +5,65 @@
#ifndef _ASM_X86_TSC_H
#define _ASM_X86_TSC_H
+#include <asm/asm.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/msr.h>
+/**
+ * rdtsc() - returns the current TSC without ordering constraints
+ *
+ * rdtsc() returns the result of RDTSC as a 64-bit integer. The
+ * only ordering constraint it supplies is the ordering implied by
+ * "asm volatile": it will put the RDTSC in the place you expect. The
+ * CPU can and will speculatively execute that RDTSC, though, so the
+ * results can be non-monotonic if compared on different CPUs.
+ */
+static __always_inline u64 rdtsc(void)
+{
+ EAX_EDX_DECLARE_ARGS(val, low, high);
+
+ asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
+
+ return EAX_EDX_VAL(val, low, high);
+}
+
+/**
+ * rdtsc_ordered() - read the current TSC in program order
+ *
+ * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
+ * It is ordered like a load to a global in-memory counter. It should
+ * be impossible to observe non-monotonic rdtsc_unordered() behavior
+ * across multiple CPUs as long as the TSC is synced.
+ */
+static __always_inline u64 rdtsc_ordered(void)
+{
+ EAX_EDX_DECLARE_ARGS(val, low, high);
+
+ /*
+ * The RDTSC instruction is not ordered relative to memory
+ * access. The Intel SDM and the AMD APM are both vague on this
+ * point, but empirically an RDTSC instruction can be
+ * speculatively executed before prior loads. An RDTSC
+ * immediately after an appropriate barrier appears to be
+ * ordered as a normal load, that is, it provides the same
+ * ordering guarantees as reading from a global memory location
+ * that some other imaginary CPU is updating continuously with a
+ * time stamp.
+ *
+ * Thus, use the preferred barrier on the respective CPU, aiming for
+ * RDTSCP as the default.
+ */
+ asm volatile(ALTERNATIVE_2("rdtsc",
+ "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
+ "rdtscp", X86_FEATURE_RDTSCP)
+ : EAX_EDX_RET(val, low, high)
+ /* RDTSCP clobbers ECX with MSR_TSC_AUX. */
+ :: "ecx");
+
+ return EAX_EDX_VAL(val, low, high);
+}
+
/*
* Standard way to access the cycle counter.
*/
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index c52f0133425b..c8a5ae35c871 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -26,8 +26,8 @@ extern unsigned long USER_PTR_MAX;
*/
static inline unsigned long __untagged_addr(unsigned long addr)
{
- asm (ALTERNATIVE("",
- "and " __percpu_arg([mask]) ", %[addr]", X86_FEATURE_LAM)
+ asm_inline (ALTERNATIVE("", "and " __percpu_arg([mask]) ", %[addr]",
+ X86_FEATURE_LAM)
: [addr] "+r" (addr)
: [mask] "m" (__my_cpu_var(tlbstate_untag_mask)));
@@ -54,7 +54,7 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
#endif
#define valid_user_address(x) \
- ((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX))
+ likely((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX))
/*
* Masking the user address is an alternative to a conditional
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index 80be0da733df..b7253ef3205a 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -27,17 +27,9 @@ struct vdso_image {
long sym_vdso32_rt_sigreturn_landing_pad;
};
-#ifdef CONFIG_X86_64
extern const struct vdso_image vdso_image_64;
-#endif
-
-#ifdef CONFIG_X86_X32_ABI
extern const struct vdso_image vdso_image_x32;
-#endif
-
-#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
extern const struct vdso_image vdso_image_32;
-#endif
extern int __init init_vdso_image(const struct vdso_image *image);
diff --git a/arch/x86/include/asm/vdso/processor.h b/arch/x86/include/asm/vdso/processor.h
index c9b2ba7a9ec4..7000aeb59aa2 100644
--- a/arch/x86/include/asm/vdso/processor.h
+++ b/arch/x86/include/asm/vdso/processor.h
@@ -7,15 +7,15 @@
#ifndef __ASSEMBLER__
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static __always_inline void rep_nop(void)
+/* PAUSE is a good thing to insert into busy-wait loops. */
+static __always_inline void native_pause(void)
{
- asm volatile("rep; nop" ::: "memory");
+ asm volatile("pause" ::: "memory");
}
static __always_inline void cpu_relax(void)
{
- rep_nop();
+ native_pause();
}
struct getcpu_cache;
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 8707361b24da..cca7d6641287 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -256,6 +256,7 @@ enum vmcs_field {
TSC_MULTIPLIER_HIGH = 0x00002033,
TERTIARY_VM_EXEC_CONTROL = 0x00002034,
TERTIARY_VM_EXEC_CONTROL_HIGH = 0x00002035,
+ SHARED_EPT_POINTER = 0x0000203C,
PID_POINTER_TABLE = 0x00002042,
PID_POINTER_TABLE_HIGH = 0x00002043,
GUEST_PHYSICAL_ADDRESS = 0x00002400,
@@ -586,6 +587,7 @@ enum vm_entry_failure_code {
#define EPT_VIOLATION_PROT_READ BIT(3)
#define EPT_VIOLATION_PROT_WRITE BIT(4)
#define EPT_VIOLATION_PROT_EXEC BIT(5)
+#define EPT_VIOLATION_EXEC_FOR_RING3_LIN BIT(6)
#define EPT_VIOLATION_PROT_MASK (EPT_VIOLATION_PROT_READ | \
EPT_VIOLATION_PROT_WRITE | \
EPT_VIOLATION_PROT_EXEC)
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 213cf5379a5a..36698cc9fb44 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -292,6 +292,7 @@ struct x86_hyper_runtime {
* @set_wallclock: set time back to HW clock
* @is_untracked_pat_range exclude from PAT logic
* @nmi_init enable NMI on cpus
+ * @get_nmi_reason get the reason an NMI was received
* @save_sched_clock_state: save state for sched_clock() on suspend
* @restore_sched_clock_state: restore state for sched_clock() on resume
* @apic_post_init: adjust apic if needed
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index bd0fc69a10a7..c2fc7869b996 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -43,7 +43,7 @@ extern struct start_info *xen_start_info;
static inline uint32_t xen_cpuid_base(void)
{
- return hypervisor_cpuid_base(XEN_SIGNATURE, 2);
+ return cpuid_base_hypervisor(XEN_SIGNATURE, 2);
}
struct pci_dev;
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 460306b35a4b..6f3499507c5e 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -441,6 +441,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7)
#define KVM_X86_QUIRK_STUFF_FEATURE_MSRS (1 << 8)
+#define KVM_X86_QUIRK_IGNORE_GUEST_PAT (1 << 9)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1
@@ -844,6 +845,7 @@ struct kvm_sev_snp_launch_start {
};
/* Kept in sync with firmware values for simplicity. */
+#define KVM_SEV_PAGE_TYPE_INVALID 0x0
#define KVM_SEV_SNP_PAGE_TYPE_NORMAL 0x1
#define KVM_SEV_SNP_PAGE_TYPE_ZERO 0x3
#define KVM_SEV_SNP_PAGE_TYPE_UNMEASURED 0x4
@@ -930,4 +932,74 @@ struct kvm_hyperv_eventfd {
#define KVM_X86_SNP_VM 4
#define KVM_X86_TDX_VM 5
+/* Trust Domain eXtension sub-ioctl() commands. */
+enum kvm_tdx_cmd_id {
+ KVM_TDX_CAPABILITIES = 0,
+ KVM_TDX_INIT_VM,
+ KVM_TDX_INIT_VCPU,
+ KVM_TDX_INIT_MEM_REGION,
+ KVM_TDX_FINALIZE_VM,
+ KVM_TDX_GET_CPUID,
+
+ KVM_TDX_CMD_NR_MAX,
+};
+
+struct kvm_tdx_cmd {
+ /* enum kvm_tdx_cmd_id */
+ __u32 id;
+ /* flags for sub-commend. If sub-command doesn't use this, set zero. */
+ __u32 flags;
+ /*
+ * data for each sub-command. An immediate or a pointer to the actual
+ * data in process virtual address. If sub-command doesn't use it,
+ * set zero.
+ */
+ __u64 data;
+ /*
+ * Auxiliary error code. The sub-command may return TDX SEAMCALL
+ * status code in addition to -Exxx.
+ */
+ __u64 hw_error;
+};
+
+struct kvm_tdx_capabilities {
+ __u64 supported_attrs;
+ __u64 supported_xfam;
+ __u64 reserved[254];
+
+ /* Configurable CPUID bits for userspace */
+ struct kvm_cpuid2 cpuid;
+};
+
+struct kvm_tdx_init_vm {
+ __u64 attributes;
+ __u64 xfam;
+ __u64 mrconfigid[6]; /* sha384 digest */
+ __u64 mrowner[6]; /* sha384 digest */
+ __u64 mrownerconfig[6]; /* sha384 digest */
+
+ /* The total space for TD_PARAMS before the CPUIDs is 256 bytes */
+ __u64 reserved[12];
+
+ /*
+ * Call KVM_TDX_INIT_VM before vcpu creation, thus before
+ * KVM_SET_CPUID2.
+ * This configuration supersedes KVM_SET_CPUID2s for VCPUs because the
+ * TDX module directly virtualizes those CPUIDs without VMM. The user
+ * space VMM, e.g. qemu, should make KVM_SET_CPUID2 consistent with
+ * those values. If it doesn't, KVM may have wrong idea of vCPUIDs of
+ * the guest, and KVM may wrongly emulate CPUIDs or MSRs that the TDX
+ * module doesn't virtualize.
+ */
+ struct kvm_cpuid2 cpuid;
+};
+
+#define KVM_TDX_MEASURE_MEMORY_REGION _BITULL(0)
+
+struct kvm_tdx_init_mem_region {
+ __u64 source_addr;
+ __u64 gpa;
+ __u64 nr_pages;
+};
+
#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/uapi/asm/setup_data.h b/arch/x86/include/uapi/asm/setup_data.h
index 50c45ead4e7c..2671c4e1b3a0 100644
--- a/arch/x86/include/uapi/asm/setup_data.h
+++ b/arch/x86/include/uapi/asm/setup_data.h
@@ -13,7 +13,8 @@
#define SETUP_CC_BLOB 7
#define SETUP_IMA 8
#define SETUP_RNG_SEED 9
-#define SETUP_ENUM_MAX SETUP_RNG_SEED
+#define SETUP_KEXEC_KHO 10
+#define SETUP_ENUM_MAX SETUP_KEXEC_KHO
#define SETUP_INDIRECT (1<<31)
#define SETUP_TYPE_MAX (SETUP_ENUM_MAX | SETUP_INDIRECT)
@@ -78,6 +79,16 @@ struct ima_setup_data {
__u64 size;
} __attribute__((packed));
+/*
+ * Locations of kexec handover metadata
+ */
+struct kho_data {
+ __u64 fdt_addr;
+ __u64 fdt_size;
+ __u64 scratch_addr;
+ __u64 scratch_size;
+} __attribute__((packed));
+
#endif /* __ASSEMBLER__ */
#endif /* _UAPI_ASM_X86_SETUP_DATA_H */
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index ec1321248dac..9c640a521a67 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -95,6 +95,7 @@
#define SVM_EXIT_CR14_WRITE_TRAP 0x09e
#define SVM_EXIT_CR15_WRITE_TRAP 0x09f
#define SVM_EXIT_INVPCID 0x0a2
+#define SVM_EXIT_BUS_LOCK 0x0a5
#define SVM_EXIT_IDLE_HLT 0x0a6
#define SVM_EXIT_NPF 0x400
#define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401
@@ -225,6 +226,7 @@
{ SVM_EXIT_CR4_WRITE_TRAP, "write_cr4_trap" }, \
{ SVM_EXIT_CR8_WRITE_TRAP, "write_cr8_trap" }, \
{ SVM_EXIT_INVPCID, "invpcid" }, \
+ { SVM_EXIT_BUS_LOCK, "buslock" }, \
{ SVM_EXIT_IDLE_HLT, "idle-halt" }, \
{ SVM_EXIT_NPF, "npf" }, \
{ SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index a5faf6d88f1b..f0f4a4cf84a7 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -34,6 +34,7 @@
#define EXIT_REASON_TRIPLE_FAULT 2
#define EXIT_REASON_INIT_SIGNAL 3
#define EXIT_REASON_SIPI_SIGNAL 4
+#define EXIT_REASON_OTHER_SMI 6
#define EXIT_REASON_INTERRUPT_WINDOW 7
#define EXIT_REASON_NMI_WINDOW 8
@@ -92,6 +93,7 @@
#define EXIT_REASON_TPAUSE 68
#define EXIT_REASON_BUS_LOCK 74
#define EXIT_REASON_NOTIFY 75
+#define EXIT_REASON_TDCALL 77
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
@@ -155,7 +157,8 @@
{ EXIT_REASON_UMWAIT, "UMWAIT" }, \
{ EXIT_REASON_TPAUSE, "TPAUSE" }, \
{ EXIT_REASON_BUS_LOCK, "BUS_LOCK" }, \
- { EXIT_REASON_NOTIFY, "NOTIFY" }
+ { EXIT_REASON_NOTIFY, "NOTIFY" }, \
+ { EXIT_REASON_TDCALL, "TDCALL" }
#define VMX_EXIT_REASON_FLAGS \
{ VMX_EXIT_REASONS_FAILED_VMENTRY, "FAILED_VMENTRY" }
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 84cfa179802c..99a783fd4691 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -141,7 +141,6 @@ obj-$(CONFIG_OF) += devicetree.o
obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
-obj-$(CONFIG_TRACING) += tracepoint.o
obj-$(CONFIG_SCHED_MC_PRIO) += itmt.o
obj-$(CONFIG_X86_UMIP) += umip.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index dae6a73be40e..9fa321a95eb3 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -23,6 +23,8 @@
#include <linux/serial_core.h>
#include <linux/pgtable.h>
+#include <xen/xen.h>
+
#include <asm/e820/api.h>
#include <asm/irqdomain.h>
#include <asm/pci_x86.h>
@@ -1729,6 +1731,15 @@ int __init acpi_mps_check(void)
{
#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
/* mptable code is not built-in*/
+
+ /*
+ * Xen disables ACPI in PV DomU guests but it still emulates APIC and
+ * supports SMP. Returning early here ensures that APIC is not disabled
+ * unnecessarily and the guest is not limited to a single vCPU.
+ */
+ if (xen_pv_domain() && !xen_initial_domain())
+ return 0;
+
if (acpi_disabled || acpi_noirq) {
pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
return 1;
diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c
index 77bfb846490c..7047124490f6 100644
--- a/arch/x86/kernel/acpi/cppc.c
+++ b/arch/x86/kernel/acpi/cppc.c
@@ -49,7 +49,7 @@ int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
{
int err;
- err = rdmsrl_safe_on_cpu(cpunum, reg->address, val);
+ err = rdmsrq_safe_on_cpu(cpunum, reg->address, val);
if (!err) {
u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
reg->bit_offset);
@@ -65,7 +65,7 @@ int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
u64 rd_val;
int err;
- err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val);
+ err = rdmsrq_safe_on_cpu(cpunum, reg->address, &rd_val);
if (!err) {
u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
reg->bit_offset);
@@ -74,7 +74,7 @@ int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
val &= mask;
rd_val &= ~mask;
rd_val |= val;
- err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val);
+ err = wrmsrq_safe_on_cpu(cpunum, reg->address, rd_val);
}
return err;
}
@@ -147,7 +147,7 @@ int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
int ret;
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val);
+ ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val);
if (ret)
goto out;
@@ -272,7 +272,7 @@ int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
}
/* detect if running on heterogeneous design */
- if (cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES)) {
+ if (cpu_feature_enabled(X86_FEATURE_AMD_HTR_CORES)) {
switch (core_type) {
case TOPO_CPU_TYPE_UNKNOWN:
pr_warn("Undefined core type found for cpu %d\n", cpu);
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index d5ac34186555..8698d66563ed 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -14,7 +14,7 @@
#include <acpi/processor.h>
#include <asm/cpu_device_id.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/mwait.h>
#include <asm/special_insns.h>
#include <asm/smp.h>
diff --git a/arch/x86/kernel/acpi/madt_wakeup.c b/arch/x86/kernel/acpi/madt_wakeup.c
index f36f28405dcc..6d7603511f52 100644
--- a/arch/x86/kernel/acpi/madt_wakeup.c
+++ b/arch/x86/kernel/acpi/madt_wakeup.c
@@ -126,7 +126,7 @@ static int __init acpi_mp_setup_reset(u64 reset_vector)
return 0;
}
-static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip)
+static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip, unsigned int cpu)
{
if (!acpi_mp_wake_mailbox_paddr) {
pr_warn_once("No MADT mailbox: cannot bringup secondary CPUs. Booting with kexec?\n");
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 6dfecb27b846..91fa262f0e30 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -16,6 +16,7 @@
#include <asm/cacheflush.h>
#include <asm/realmode.h>
#include <asm/hypervisor.h>
+#include <asm/msr.h>
#include <asm/smp.h>
#include <linux/ftrace.h>
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index bf82c6f7d690..ecfe7b497cad 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1,36 +1,17 @@
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) "SMP alternatives: " fmt
-#include <linux/module.h>
-#include <linux/sched.h>
+#include <linux/mmu_context.h>
#include <linux/perf_event.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/stringify.h>
-#include <linux/highmem.h>
-#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/memory.h>
-#include <linux/stop_machine.h>
-#include <linux/slab.h>
-#include <linux/kdebug.h>
-#include <linux/kprobes.h>
-#include <linux/mmu_context.h>
-#include <linux/bsearch.h>
-#include <linux/sync_core.h>
+#include <linux/execmem.h>
+
#include <asm/text-patching.h>
-#include <asm/alternative.h>
-#include <asm/sections.h>
-#include <asm/mce.h>
-#include <asm/nmi.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
#include <asm/insn.h>
-#include <asm/io.h>
-#include <asm/fixmap.h>
-#include <asm/paravirt.h>
-#include <asm/asm-prototypes.h>
-#include <asm/cfi.h>
+#include <asm/ibt.h>
+#include <asm/set_memory.h>
+#include <asm/nmi.h>
int __read_mostly alternatives_patched;
@@ -124,6 +105,171 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
#endif
};
+#ifdef CONFIG_FINEIBT
+static bool cfi_paranoid __ro_after_init;
+#endif
+
+#ifdef CONFIG_MITIGATION_ITS
+
+#ifdef CONFIG_MODULES
+static struct module *its_mod;
+#endif
+static void *its_page;
+static unsigned int its_offset;
+
+/* Initialize a thunk with the "jmp *reg; int3" instructions. */
+static void *its_init_thunk(void *thunk, int reg)
+{
+ u8 *bytes = thunk;
+ int offset = 0;
+ int i = 0;
+
+#ifdef CONFIG_FINEIBT
+ if (cfi_paranoid) {
+ /*
+ * When ITS uses indirect branch thunk the fineibt_paranoid
+ * caller sequence doesn't fit in the caller site. So put the
+ * remaining part of the sequence (<ea> + JNE) into the ITS
+ * thunk.
+ */
+ bytes[i++] = 0xea; /* invalid instruction */
+ bytes[i++] = 0x75; /* JNE */
+ bytes[i++] = 0xfd;
+
+ offset = 1;
+ }
+#endif
+
+ if (reg >= 8) {
+ bytes[i++] = 0x41; /* REX.B prefix */
+ reg -= 8;
+ }
+ bytes[i++] = 0xff;
+ bytes[i++] = 0xe0 + reg; /* jmp *reg */
+ bytes[i++] = 0xcc;
+
+ return thunk + offset;
+}
+
+#ifdef CONFIG_MODULES
+void its_init_mod(struct module *mod)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
+ return;
+
+ mutex_lock(&text_mutex);
+ its_mod = mod;
+ its_page = NULL;
+}
+
+void its_fini_mod(struct module *mod)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
+ return;
+
+ WARN_ON_ONCE(its_mod != mod);
+
+ its_mod = NULL;
+ its_page = NULL;
+ mutex_unlock(&text_mutex);
+
+ for (int i = 0; i < mod->its_num_pages; i++) {
+ void *page = mod->its_page_array[i];
+ execmem_restore_rox(page, PAGE_SIZE);
+ }
+}
+
+void its_free_mod(struct module *mod)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
+ return;
+
+ for (int i = 0; i < mod->its_num_pages; i++) {
+ void *page = mod->its_page_array[i];
+ execmem_free(page);
+ }
+ kfree(mod->its_page_array);
+}
+#endif /* CONFIG_MODULES */
+
+static void *its_alloc(void)
+{
+ void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE);
+
+ if (!page)
+ return NULL;
+
+#ifdef CONFIG_MODULES
+ if (its_mod) {
+ void *tmp = krealloc(its_mod->its_page_array,
+ (its_mod->its_num_pages+1) * sizeof(void *),
+ GFP_KERNEL);
+ if (!tmp)
+ return NULL;
+
+ its_mod->its_page_array = tmp;
+ its_mod->its_page_array[its_mod->its_num_pages++] = page;
+
+ execmem_make_temp_rw(page, PAGE_SIZE);
+ }
+#endif /* CONFIG_MODULES */
+
+ return no_free_ptr(page);
+}
+
+static void *its_allocate_thunk(int reg)
+{
+ int size = 3 + (reg / 8);
+ void *thunk;
+
+#ifdef CONFIG_FINEIBT
+ /*
+ * The ITS thunk contains an indirect jump and an int3 instruction so
+ * its size is 3 or 4 bytes depending on the register used. If CFI
+ * paranoid is used then 3 extra bytes are added in the ITS thunk to
+ * complete the fineibt_paranoid caller sequence.
+ */
+ if (cfi_paranoid)
+ size += 3;
+#endif
+
+ if (!its_page || (its_offset + size - 1) >= PAGE_SIZE) {
+ its_page = its_alloc();
+ if (!its_page) {
+ pr_err("ITS page allocation failed\n");
+ return NULL;
+ }
+ memset(its_page, INT3_INSN_OPCODE, PAGE_SIZE);
+ its_offset = 32;
+ }
+
+ /*
+ * If the indirect branch instruction will be in the lower half
+ * of a cacheline, then update the offset to reach the upper half.
+ */
+ if ((its_offset + size - 1) % 64 < 32)
+ its_offset = ((its_offset - 1) | 0x3F) + 33;
+
+ thunk = its_page + its_offset;
+ its_offset += size;
+
+ return its_init_thunk(thunk, reg);
+}
+
+u8 *its_static_thunk(int reg)
+{
+ u8 *thunk = __x86_indirect_its_thunk_array[reg];
+
+#ifdef CONFIG_FINEIBT
+ /* Paranoid thunk starts 2 bytes before */
+ if (cfi_paranoid)
+ return thunk - 2;
+#endif
+ return thunk;
+}
+
+#endif
+
/*
* Nomenclature for variable names to simplify and clarify this code and ease
* any potential staring at it:
@@ -171,13 +317,6 @@ static void add_nop(u8 *buf, unsigned int len)
*buf = INT3_INSN_OPCODE;
}
-extern s32 __retpoline_sites[], __retpoline_sites_end[];
-extern s32 __return_sites[], __return_sites_end[];
-extern s32 __cfi_sites[], __cfi_sites_end[];
-extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
-extern s32 __smp_locks[], __smp_locks_end[];
-void text_poke_early(void *addr, const void *opcode, size_t len);
-
/*
* Matches NOP and NOPL, not any of the other possible NOPs.
*/
@@ -369,7 +508,7 @@ static void __apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen,
}
}
-void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
+void text_poke_apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
{
__apply_relocation(buf, instr, instrlen, repl, repl_len);
optimize_nops(instr, buf, instrlen);
@@ -457,7 +596,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
DPRINTK(ALT, "alt table %px, -> %px", start, end);
/*
- * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
+ * KASAN_SHADOW_START is defined using
* cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
* During the process, KASAN becomes confused seeing partial LA57
* conversion and triggers a false-positive out-of-bound report.
@@ -525,7 +664,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
insn_buff[insn_buff_sz] = 0x90;
- apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen);
+ text_poke_apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen);
DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr);
DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
@@ -581,7 +720,8 @@ static int emit_indirect(int op, int reg, u8 *bytes)
return i;
}
-static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
+static int __emit_trampoline(void *addr, struct insn *insn, u8 *bytes,
+ void *call_dest, void *jmp_dest)
{
u8 op = insn->opcode.bytes[0];
int i = 0;
@@ -602,7 +742,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8
switch (op) {
case CALL_INSN_OPCODE:
__text_gen_insn(bytes+i, op, addr+i,
- __x86_indirect_call_thunk_array[reg],
+ call_dest,
CALL_INSN_SIZE);
i += CALL_INSN_SIZE;
break;
@@ -610,7 +750,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8
case JMP32_INSN_OPCODE:
clang_jcc:
__text_gen_insn(bytes+i, op, addr+i,
- __x86_indirect_jump_thunk_array[reg],
+ jmp_dest,
JMP32_INSN_SIZE);
i += JMP32_INSN_SIZE;
break;
@@ -625,6 +765,48 @@ clang_jcc:
return i;
}
+static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
+{
+ return __emit_trampoline(addr, insn, bytes,
+ __x86_indirect_call_thunk_array[reg],
+ __x86_indirect_jump_thunk_array[reg]);
+}
+
+#ifdef CONFIG_MITIGATION_ITS
+static int emit_its_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes)
+{
+ u8 *thunk = __x86_indirect_its_thunk_array[reg];
+ u8 *tmp = its_allocate_thunk(reg);
+
+ if (tmp)
+ thunk = tmp;
+
+ return __emit_trampoline(addr, insn, bytes, thunk, thunk);
+}
+
+/* Check if an indirect branch is at ITS-unsafe address */
+static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
+ return false;
+
+ /* Indirect branch opcode is 2 or 3 bytes depending on reg */
+ addr += 1 + reg / 8;
+
+ /* Lower-half of the cacheline? */
+ return !(addr & 0x20);
+}
+#else /* CONFIG_MITIGATION_ITS */
+
+#ifdef CONFIG_FINEIBT
+static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg)
+{
+ return false;
+}
+#endif
+
+#endif /* CONFIG_MITIGATION_ITS */
+
/*
* Rewrite the compiler generated retpoline thunk calls.
*
@@ -699,6 +881,15 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
bytes[i++] = 0xe8; /* LFENCE */
}
+#ifdef CONFIG_MITIGATION_ITS
+ /*
+ * Check if the address of last byte of emitted-indirect is in
+ * lower-half of the cacheline. Such branches need ITS mitigation.
+ */
+ if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + i, reg))
+ return emit_its_trampoline(addr, insn, reg, bytes);
+#endif
+
ret = emit_indirect(op, reg, bytes + i);
if (ret < 0)
return ret;
@@ -732,6 +923,7 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
int len, ret;
u8 bytes[16];
u8 op1, op2;
+ u8 *dest;
ret = insn_decode_kernel(&insn, addr);
if (WARN_ON_ONCE(ret < 0))
@@ -748,6 +940,12 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
case CALL_INSN_OPCODE:
case JMP32_INSN_OPCODE:
+ /* Check for cfi_paranoid + ITS */
+ dest = addr + insn.length + insn.immediate.value;
+ if (dest[-1] == 0xea && (dest[0] & 0xf0) == 0x70) {
+ WARN_ON_ONCE(cfi_mode != CFI_FINEIBT);
+ continue;
+ }
break;
case 0x0f: /* escape */
@@ -775,6 +973,21 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
#ifdef CONFIG_MITIGATION_RETHUNK
+bool cpu_wants_rethunk(void)
+{
+ return cpu_feature_enabled(X86_FEATURE_RETHUNK);
+}
+
+bool cpu_wants_rethunk_at(void *addr)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_RETHUNK))
+ return false;
+ if (x86_return_thunk != its_return_thunk)
+ return true;
+
+ return !((unsigned long)addr & 0x20);
+}
+
/*
* Rewrite the compiler generated return thunk tail-calls.
*
@@ -791,7 +1004,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
int i = 0;
/* Patch the custom return thunks... */
- if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
+ if (cpu_wants_rethunk_at(addr)) {
i = JMP32_INSN_SIZE;
__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
} else {
@@ -808,7 +1021,7 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
{
s32 *s;
- if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+ if (cpu_wants_rethunk())
static_call_force_reinit();
for (s = start; s < end; s++) {
@@ -1022,8 +1235,6 @@ int cfi_get_func_arity(void *func)
static bool cfi_rand __ro_after_init = true;
static u32 cfi_seed __ro_after_init;
-static bool cfi_paranoid __ro_after_init = false;
-
/*
* Re-hash the CFI hash with a boot-time seed while making sure the result is
* not a valid ENDBR instruction.
@@ -1436,6 +1647,19 @@ static int cfi_rand_callers(s32 *start, s32 *end)
return 0;
}
+static int emit_paranoid_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes)
+{
+ u8 *thunk = (void *)__x86_indirect_its_thunk_array[reg] - 2;
+
+#ifdef CONFIG_MITIGATION_ITS
+ u8 *tmp = its_allocate_thunk(reg);
+ if (tmp)
+ thunk = tmp;
+#endif
+
+ return __emit_trampoline(addr, insn, bytes, thunk, thunk);
+}
+
static int cfi_rewrite_callers(s32 *start, s32 *end)
{
s32 *s;
@@ -1477,9 +1701,14 @@ static int cfi_rewrite_callers(s32 *start, s32 *end)
memcpy(bytes, fineibt_paranoid_start, fineibt_paranoid_size);
memcpy(bytes + fineibt_caller_hash, &hash, 4);
- ret = emit_indirect(op, 11, bytes + fineibt_paranoid_ind);
- if (WARN_ON_ONCE(ret != 3))
- continue;
+ if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + fineibt_paranoid_ind, 11)) {
+ emit_paranoid_trampoline(addr + fineibt_caller_size,
+ &insn, 11, bytes + fineibt_caller_size);
+ } else {
+ ret = emit_indirect(op, 11, bytes + fineibt_paranoid_ind);
+ if (WARN_ON_ONCE(ret != 3))
+ continue;
+ }
text_poke_early(addr, bytes, fineibt_paranoid_size);
}
@@ -1706,29 +1935,66 @@ Efault:
return false;
}
+static bool is_paranoid_thunk(unsigned long addr)
+{
+ u32 thunk;
+
+ __get_kernel_nofault(&thunk, (u32 *)addr, u32, Efault);
+ return (thunk & 0x00FFFFFF) == 0xfd75ea;
+
+Efault:
+ return false;
+}
+
/*
* regs->ip points to a LOCK Jcc.d8 instruction from the fineibt_paranoid_start[]
- * sequence.
+ * sequence, or to an invalid instruction (0xea) + Jcc.d8 for cfi_paranoid + ITS
+ * thunk.
*/
static bool decode_fineibt_paranoid(struct pt_regs *regs, unsigned long *target, u32 *type)
{
unsigned long addr = regs->ip - fineibt_paranoid_ud;
- u32 hash;
- if (!cfi_paranoid || !is_cfi_trap(addr + fineibt_caller_size - LEN_UD2))
+ if (!cfi_paranoid)
return false;
- __get_kernel_nofault(&hash, addr + fineibt_caller_hash, u32, Efault);
- *target = regs->r11 + fineibt_preamble_size;
- *type = regs->r10;
+ if (is_cfi_trap(addr + fineibt_caller_size - LEN_UD2)) {
+ *target = regs->r11 + fineibt_preamble_size;
+ *type = regs->r10;
+
+ /*
+ * Since the trapping instruction is the exact, but LOCK prefixed,
+ * Jcc.d8 that got us here, the normal fixup will work.
+ */
+ return true;
+ }
/*
- * Since the trapping instruction is the exact, but LOCK prefixed,
- * Jcc.d8 that got us here, the normal fixup will work.
+ * The cfi_paranoid + ITS thunk combination results in:
+ *
+ * 0: 41 ba 78 56 34 12 mov $0x12345678, %r10d
+ * 6: 45 3b 53 f7 cmp -0x9(%r11), %r10d
+ * a: 4d 8d 5b f0 lea -0x10(%r11), %r11
+ * e: 2e e8 XX XX XX XX cs call __x86_indirect_paranoid_thunk_r11
+ *
+ * Where the paranoid_thunk looks like:
+ *
+ * 1d: <ea> (bad)
+ * __x86_indirect_paranoid_thunk_r11:
+ * 1e: 75 fd jne 1d
+ * __x86_indirect_its_thunk_r11:
+ * 20: 41 ff eb jmp *%r11
+ * 23: cc int3
+ *
*/
- return true;
+ if (is_paranoid_thunk(regs->ip)) {
+ *target = regs->r11 + fineibt_preamble_size;
+ *type = regs->r10;
+
+ regs->ip = *target;
+ return true;
+ }
-Efault:
return false;
}
@@ -2010,7 +2276,7 @@ __visible noinline void __init __alt_reloc_selftest(void *arg)
static noinline void __init alt_reloc_selftest(void)
{
/*
- * Tests apply_relocation().
+ * Tests text_poke_apply_relocation().
*
* This has a relative immediate (CALL) in a place other than the first
* instruction and additionally on x86_64 we get a RIP-relative LEA:
@@ -2031,6 +2297,8 @@ static noinline void __init alt_reloc_selftest(void)
void __init alternative_instructions(void)
{
+ u64 ibt;
+
int3_selftest();
/*
@@ -2057,6 +2325,9 @@ void __init alternative_instructions(void)
*/
paravirt_set_cap();
+ /* Keep CET-IBT disabled until caller/callee are patched */
+ ibt = ibt_save(/*disable*/ true);
+
__apply_fineibt(__retpoline_sites, __retpoline_sites_end,
__cfi_sites, __cfi_sites_end, true);
@@ -2080,6 +2351,8 @@ void __init alternative_instructions(void)
*/
apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
+ ibt_restore(ibt);
+
#ifdef CONFIG_SMP
/* Patch to UP if other cpus not imminent. */
if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
@@ -2140,76 +2413,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
}
}
-typedef struct {
- struct mm_struct *mm;
-} temp_mm_state_t;
-
-/*
- * Using a temporary mm allows to set temporary mappings that are not accessible
- * by other CPUs. Such mappings are needed to perform sensitive memory writes
- * that override the kernel memory protections (e.g., W^X), without exposing the
- * temporary page-table mappings that are required for these write operations to
- * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
- * mapping is torn down.
- *
- * Context: The temporary mm needs to be used exclusively by a single core. To
- * harden security IRQs must be disabled while the temporary mm is
- * loaded, thereby preventing interrupt handler bugs from overriding
- * the kernel memory protection.
- */
-static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
-{
- temp_mm_state_t temp_state;
-
- lockdep_assert_irqs_disabled();
-
- /*
- * Make sure not to be in TLB lazy mode, as otherwise we'll end up
- * with a stale address space WITHOUT being in lazy mode after
- * restoring the previous mm.
- */
- if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
- leave_mm();
-
- temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
- switch_mm_irqs_off(NULL, mm, current);
-
- /*
- * If breakpoints are enabled, disable them while the temporary mm is
- * used. Userspace might set up watchpoints on addresses that are used
- * in the temporary mm, which would lead to wrong signals being sent or
- * crashes.
- *
- * Note that breakpoints are not disabled selectively, which also causes
- * kernel breakpoints (e.g., perf's) to be disabled. This might be
- * undesirable, but still seems reasonable as the code that runs in the
- * temporary mm should be short.
- */
- if (hw_breakpoint_active())
- hw_breakpoint_disable();
-
- return temp_state;
-}
-
-__ro_after_init struct mm_struct *poking_mm;
-__ro_after_init unsigned long poking_addr;
-
-static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
-{
- lockdep_assert_irqs_disabled();
-
- switch_mm_irqs_off(NULL, prev_state.mm, current);
-
- /* Clear the cpumask, to indicate no TLB flushing is needed anywhere */
- cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(poking_mm));
-
- /*
- * Restore the breakpoints if they were disabled before the temporary mm
- * was loaded.
- */
- if (hw_breakpoint_active())
- hw_breakpoint_restore();
-}
+__ro_after_init struct mm_struct *text_poke_mm;
+__ro_after_init unsigned long text_poke_mm_addr;
static void text_poke_memcpy(void *dst, const void *src, size_t len)
{
@@ -2229,7 +2434,7 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l
{
bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
struct page *pages[2] = {NULL};
- temp_mm_state_t prev;
+ struct mm_struct *prev_mm;
unsigned long flags;
pte_t pte, *ptep;
spinlock_t *ptl;
@@ -2266,7 +2471,7 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l
/*
* The lock is not really needed, but this allows to avoid open-coding.
*/
- ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
+ ptep = get_locked_pte(text_poke_mm, text_poke_mm_addr, &ptl);
/*
* This must not fail; preallocated in poking_init().
@@ -2276,21 +2481,21 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l
local_irq_save(flags);
pte = mk_pte(pages[0], pgprot);
- set_pte_at(poking_mm, poking_addr, ptep, pte);
+ set_pte_at(text_poke_mm, text_poke_mm_addr, ptep, pte);
if (cross_page_boundary) {
pte = mk_pte(pages[1], pgprot);
- set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
+ set_pte_at(text_poke_mm, text_poke_mm_addr + PAGE_SIZE, ptep + 1, pte);
}
/*
* Loading the temporary mm behaves as a compiler barrier, which
* guarantees that the PTE will be set at the time memcpy() is done.
*/
- prev = use_temporary_mm(poking_mm);
+ prev_mm = use_temporary_mm(text_poke_mm);
kasan_disable_current();
- func((u8 *)poking_addr + offset_in_page(addr), src, len);
+ func((u8 *)text_poke_mm_addr + offset_in_page(addr), src, len);
kasan_enable_current();
/*
@@ -2299,22 +2504,22 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l
*/
barrier();
- pte_clear(poking_mm, poking_addr, ptep);
+ pte_clear(text_poke_mm, text_poke_mm_addr, ptep);
if (cross_page_boundary)
- pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
+ pte_clear(text_poke_mm, text_poke_mm_addr + PAGE_SIZE, ptep + 1);
/*
* Loading the previous page-table hierarchy requires a serializing
* instruction that already allows the core to see the updated version.
* Xen-PV is assumed to serialize execution in a similar manner.
*/
- unuse_temporary_mm(prev);
+ unuse_temporary_mm(prev_mm);
/*
* Flushing the TLB might involve IPIs, which would require enabled
* IRQs, but not if the mm is not used, as it is in this point.
*/
- flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
+ flush_tlb_mm_range(text_poke_mm, text_poke_mm_addr, text_poke_mm_addr +
(cross_page_boundary ? 2 : 1) * PAGE_SIZE,
PAGE_SHIFT, false);
@@ -2450,7 +2655,7 @@ static void do_sync_core(void *info)
sync_core();
}
-void text_poke_sync(void)
+void smp_text_poke_sync_each_cpu(void)
{
on_each_cpu(do_sync_core, NULL, 1);
}
@@ -2460,64 +2665,66 @@ void text_poke_sync(void)
* this thing. When len == 6 everything is prefixed with 0x0f and we map
* opcode to Jcc.d8, using len to distinguish.
*/
-struct text_poke_loc {
+struct smp_text_poke_loc {
/* addr := _stext + rel_addr */
s32 rel_addr;
s32 disp;
u8 len;
u8 opcode;
- const u8 text[POKE_MAX_OPCODE_SIZE];
- /* see text_poke_bp_batch() */
+ const u8 text[TEXT_POKE_MAX_OPCODE_SIZE];
+ /* see smp_text_poke_batch_finish() */
u8 old;
};
-struct bp_patching_desc {
- struct text_poke_loc *vec;
+#define TEXT_POKE_ARRAY_MAX (PAGE_SIZE / sizeof(struct smp_text_poke_loc))
+
+static struct smp_text_poke_array {
+ struct smp_text_poke_loc vec[TEXT_POKE_ARRAY_MAX];
int nr_entries;
- atomic_t refs;
-};
+} text_poke_array;
-static struct bp_patching_desc bp_desc;
+static DEFINE_PER_CPU(atomic_t, text_poke_array_refs);
-static __always_inline
-struct bp_patching_desc *try_get_desc(void)
+/*
+ * These four __always_inline annotations imply noinstr, necessary
+ * due to smp_text_poke_int3_handler() being noinstr:
+ */
+
+static __always_inline bool try_get_text_poke_array(void)
{
- struct bp_patching_desc *desc = &bp_desc;
+ atomic_t *refs = this_cpu_ptr(&text_poke_array_refs);
- if (!raw_atomic_inc_not_zero(&desc->refs))
- return NULL;
+ if (!raw_atomic_inc_not_zero(refs))
+ return false;
- return desc;
+ return true;
}
-static __always_inline void put_desc(void)
+static __always_inline void put_text_poke_array(void)
{
- struct bp_patching_desc *desc = &bp_desc;
+ atomic_t *refs = this_cpu_ptr(&text_poke_array_refs);
smp_mb__before_atomic();
- raw_atomic_dec(&desc->refs);
+ raw_atomic_dec(refs);
}
-static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
+static __always_inline void *text_poke_addr(const struct smp_text_poke_loc *tpl)
{
- return _stext + tp->rel_addr;
+ return _stext + tpl->rel_addr;
}
-static __always_inline int patch_cmp(const void *key, const void *elt)
+static __always_inline int patch_cmp(const void *tpl_a, const void *tpl_b)
{
- struct text_poke_loc *tp = (struct text_poke_loc *) elt;
-
- if (key < text_poke_addr(tp))
+ if (tpl_a < text_poke_addr(tpl_b))
return -1;
- if (key > text_poke_addr(tp))
+ if (tpl_a > text_poke_addr(tpl_b))
return 1;
return 0;
}
-noinstr int poke_int3_handler(struct pt_regs *regs)
+noinstr int smp_text_poke_int3_handler(struct pt_regs *regs)
{
- struct bp_patching_desc *desc;
- struct text_poke_loc *tp;
+ struct smp_text_poke_loc *tpl;
int ret = 0;
void *ip;
@@ -2526,41 +2733,40 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
/*
* Having observed our INT3 instruction, we now must observe
- * bp_desc with non-zero refcount:
+ * text_poke_array with non-zero refcount:
*
- * bp_desc.refs = 1 INT3
- * WMB RMB
- * write INT3 if (bp_desc.refs != 0)
+ * text_poke_array_refs = 1 INT3
+ * WMB RMB
+ * write INT3 if (text_poke_array_refs != 0)
*/
smp_rmb();
- desc = try_get_desc();
- if (!desc)
+ if (!try_get_text_poke_array())
return 0;
/*
- * Discount the INT3. See text_poke_bp_batch().
+ * Discount the INT3. See smp_text_poke_batch_finish().
*/
ip = (void *) regs->ip - INT3_INSN_SIZE;
/*
* Skip the binary search if there is a single member in the vector.
*/
- if (unlikely(desc->nr_entries > 1)) {
- tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
- sizeof(struct text_poke_loc),
+ if (unlikely(text_poke_array.nr_entries > 1)) {
+ tpl = __inline_bsearch(ip, text_poke_array.vec, text_poke_array.nr_entries,
+ sizeof(struct smp_text_poke_loc),
patch_cmp);
- if (!tp)
+ if (!tpl)
goto out_put;
} else {
- tp = desc->vec;
- if (text_poke_addr(tp) != ip)
+ tpl = text_poke_array.vec;
+ if (text_poke_addr(tpl) != ip)
goto out_put;
}
- ip += tp->len;
+ ip += tpl->len;
- switch (tp->opcode) {
+ switch (tpl->opcode) {
case INT3_INSN_OPCODE:
/*
* Someone poked an explicit INT3, they'll want to handle it,
@@ -2573,16 +2779,16 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
break;
case CALL_INSN_OPCODE:
- int3_emulate_call(regs, (long)ip + tp->disp);
+ int3_emulate_call(regs, (long)ip + tpl->disp);
break;
case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE:
- int3_emulate_jmp(regs, (long)ip + tp->disp);
+ int3_emulate_jmp(regs, (long)ip + tpl->disp);
break;
case 0x70 ... 0x7f: /* Jcc */
- int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
+ int3_emulate_jcc(regs, tpl->opcode & 0xf, (long)ip, tpl->disp);
break;
default:
@@ -2592,51 +2798,50 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
ret = 1;
out_put:
- put_desc();
+ put_text_poke_array();
return ret;
}
-#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
-static struct text_poke_loc tp_vec[TP_VEC_MAX];
-static int tp_vec_nr;
-
/**
- * text_poke_bp_batch() -- update instructions on live kernel on SMP
- * @tp: vector of instructions to patch
- * @nr_entries: number of entries in the vector
+ * smp_text_poke_batch_finish() -- update instructions on live kernel on SMP
*
- * Modify multi-byte instruction by using int3 breakpoint on SMP.
- * We completely avoid stop_machine() here, and achieve the
- * synchronization using int3 breakpoint.
+ * Input state:
+ * text_poke_array.vec: vector of instructions to patch
+ * text_poke_array.nr_entries: number of entries in the vector
+ *
+ * Modify multi-byte instructions by using INT3 breakpoints on SMP.
+ * We completely avoid using stop_machine() here, and achieve the
+ * synchronization using INT3 breakpoints and SMP cross-calls.
*
* The way it is done:
* - For each entry in the vector:
- * - add a int3 trap to the address that will be patched
- * - sync cores
+ * - add an INT3 trap to the address that will be patched
+ * - SMP sync all CPUs
* - For each entry in the vector:
* - update all but the first byte of the patched range
- * - sync cores
+ * - SMP sync all CPUs
* - For each entry in the vector:
- * - replace the first byte (int3) by the first byte of
+ * - replace the first byte (INT3) by the first byte of the
* replacing opcode
- * - sync cores
+ * - SMP sync all CPUs
*/
-static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
+void smp_text_poke_batch_finish(void)
{
unsigned char int3 = INT3_INSN_OPCODE;
unsigned int i;
int do_sync;
- lockdep_assert_held(&text_mutex);
+ if (!text_poke_array.nr_entries)
+ return;
- bp_desc.vec = tp;
- bp_desc.nr_entries = nr_entries;
+ lockdep_assert_held(&text_mutex);
/*
- * Corresponds to the implicit memory barrier in try_get_desc() to
- * ensure reading a non-zero refcount provides up to date bp_desc data.
+ * Corresponds to the implicit memory barrier in try_get_text_poke_array() to
+ * ensure reading a non-zero refcount provides up to date text_poke_array data.
*/
- atomic_set_release(&bp_desc.refs, 1);
+ for_each_possible_cpu(i)
+ atomic_set_release(per_cpu_ptr(&text_poke_array_refs, i), 1);
/*
* Function tracing can enable thousands of places that need to be
@@ -2649,33 +2854,33 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
cond_resched();
/*
- * Corresponding read barrier in int3 notifier for making sure the
- * nr_entries and handler are correctly ordered wrt. patching.
+ * Corresponding read barrier in INT3 notifier for making sure the
+ * text_poke_array.nr_entries and handler are correctly ordered wrt. patching.
*/
smp_wmb();
/*
- * First step: add a int3 trap to the address that will be patched.
+ * First step: add a INT3 trap to the address that will be patched.
*/
- for (i = 0; i < nr_entries; i++) {
- tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
- text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
+ for (i = 0; i < text_poke_array.nr_entries; i++) {
+ text_poke_array.vec[i].old = *(u8 *)text_poke_addr(&text_poke_array.vec[i]);
+ text_poke(text_poke_addr(&text_poke_array.vec[i]), &int3, INT3_INSN_SIZE);
}
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
/*
* Second step: update all but the first byte of the patched range.
*/
- for (do_sync = 0, i = 0; i < nr_entries; i++) {
- u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
- u8 _new[POKE_MAX_OPCODE_SIZE+1];
- const u8 *new = tp[i].text;
- int len = tp[i].len;
+ for (do_sync = 0, i = 0; i < text_poke_array.nr_entries; i++) {
+ u8 old[TEXT_POKE_MAX_OPCODE_SIZE+1] = { text_poke_array.vec[i].old, };
+ u8 _new[TEXT_POKE_MAX_OPCODE_SIZE+1];
+ const u8 *new = text_poke_array.vec[i].text;
+ int len = text_poke_array.vec[i].len;
if (len - INT3_INSN_SIZE > 0) {
memcpy(old + INT3_INSN_SIZE,
- text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
+ text_poke_addr(&text_poke_array.vec[i]) + INT3_INSN_SIZE,
len - INT3_INSN_SIZE);
if (len == 6) {
@@ -2684,7 +2889,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
new = _new;
}
- text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
+ text_poke(text_poke_addr(&text_poke_array.vec[i]) + INT3_INSN_SIZE,
new + INT3_INSN_SIZE,
len - INT3_INSN_SIZE);
@@ -2715,7 +2920,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* The old instruction is recorded so that the event can be
* processed forwards or backwards.
*/
- perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
+ perf_event_text_poke(text_poke_addr(&text_poke_array.vec[i]), old, len, new, len);
}
if (do_sync) {
@@ -2724,63 +2929,79 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* not necessary and we'd be safe even without it. But
* better safe than sorry (plus there's not only Intel).
*/
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
}
/*
- * Third step: replace the first byte (int3) by the first byte of
+ * Third step: replace the first byte (INT3) by the first byte of the
* replacing opcode.
*/
- for (do_sync = 0, i = 0; i < nr_entries; i++) {
- u8 byte = tp[i].text[0];
+ for (do_sync = 0, i = 0; i < text_poke_array.nr_entries; i++) {
+ u8 byte = text_poke_array.vec[i].text[0];
- if (tp[i].len == 6)
+ if (text_poke_array.vec[i].len == 6)
byte = 0x0f;
if (byte == INT3_INSN_OPCODE)
continue;
- text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
+ text_poke(text_poke_addr(&text_poke_array.vec[i]), &byte, INT3_INSN_SIZE);
do_sync++;
}
if (do_sync)
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
/*
* Remove and wait for refs to be zero.
+ *
+ * Notably, if after step-3 above the INT3 got removed, then the
+ * smp_text_poke_sync_each_cpu() will have serialized against any running INT3
+ * handlers and the below spin-wait will not happen.
+ *
+ * IOW. unless the replacement instruction is INT3, this case goes
+ * unused.
*/
- if (!atomic_dec_and_test(&bp_desc.refs))
- atomic_cond_read_acquire(&bp_desc.refs, !VAL);
+ for_each_possible_cpu(i) {
+ atomic_t *refs = per_cpu_ptr(&text_poke_array_refs, i);
+
+ if (unlikely(!atomic_dec_and_test(refs)))
+ atomic_cond_read_acquire(refs, !VAL);
+ }
+
+ /* They are all completed: */
+ text_poke_array.nr_entries = 0;
}
-static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
- const void *opcode, size_t len, const void *emulate)
+static void __smp_text_poke_batch_add(void *addr, const void *opcode, size_t len, const void *emulate)
{
+ struct smp_text_poke_loc *tpl;
struct insn insn;
int ret, i = 0;
+ tpl = &text_poke_array.vec[text_poke_array.nr_entries++];
+
if (len == 6)
i = 1;
- memcpy((void *)tp->text, opcode+i, len-i);
+ memcpy((void *)tpl->text, opcode+i, len-i);
if (!emulate)
emulate = opcode;
ret = insn_decode_kernel(&insn, emulate);
BUG_ON(ret < 0);
- tp->rel_addr = addr - (void *)_stext;
- tp->len = len;
- tp->opcode = insn.opcode.bytes[0];
+ tpl->rel_addr = addr - (void *)_stext;
+ tpl->len = len;
+ tpl->opcode = insn.opcode.bytes[0];
if (is_jcc32(&insn)) {
/*
* Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
*/
- tp->opcode = insn.opcode.bytes[1] - 0x10;
+ tpl->opcode = insn.opcode.bytes[1] - 0x10;
}
- switch (tp->opcode) {
+ switch (tpl->opcode) {
case RET_INSN_OPCODE:
case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE:
@@ -2789,14 +3010,14 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
* next instruction can be padded with INT3.
*/
for (i = insn.length; i < len; i++)
- BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
+ BUG_ON(tpl->text[i] != INT3_INSN_OPCODE);
break;
default:
BUG_ON(len != insn.length);
}
- switch (tp->opcode) {
+ switch (tpl->opcode) {
case INT3_INSN_OPCODE:
case RET_INSN_OPCODE:
break;
@@ -2805,21 +3026,21 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE:
case 0x70 ... 0x7f: /* Jcc */
- tp->disp = insn.immediate.value;
+ tpl->disp = insn.immediate.value;
break;
default: /* assume NOP */
switch (len) {
case 2: /* NOP2 -- emulate as JMP8+0 */
BUG_ON(memcmp(emulate, x86_nops[len], len));
- tp->opcode = JMP8_INSN_OPCODE;
- tp->disp = 0;
+ tpl->opcode = JMP8_INSN_OPCODE;
+ tpl->disp = 0;
break;
case 5: /* NOP5 -- emulate as JMP32+0 */
BUG_ON(memcmp(emulate, x86_nops[len], len));
- tp->opcode = JMP32_INSN_OPCODE;
- tp->disp = 0;
+ tpl->opcode = JMP32_INSN_OPCODE;
+ tpl->disp = 0;
break;
default: /* unknown instruction */
@@ -2830,51 +3051,50 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
}
/*
- * We hard rely on the tp_vec being ordered; ensure this is so by flushing
+ * We hard rely on the text_poke_array.vec being ordered; ensure this is so by flushing
* early if needed.
*/
-static bool tp_order_fail(void *addr)
+static bool text_poke_addr_ordered(void *addr)
{
- struct text_poke_loc *tp;
-
- if (!tp_vec_nr)
- return false;
-
- if (!addr) /* force */
- return true;
+ WARN_ON_ONCE(!addr);
- tp = &tp_vec[tp_vec_nr - 1];
- if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
+ if (!text_poke_array.nr_entries)
return true;
- return false;
-}
-
-static void text_poke_flush(void *addr)
-{
- if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
- text_poke_bp_batch(tp_vec, tp_vec_nr);
- tp_vec_nr = 0;
- }
-}
+ /*
+ * If the last current entry's address is higher than the
+ * new entry's address we'd like to add, then ordering
+ * is violated and we must first flush all pending patching
+ * requests:
+ */
+ if (text_poke_addr(text_poke_array.vec + text_poke_array.nr_entries-1) > addr)
+ return false;
-void text_poke_finish(void)
-{
- text_poke_flush(NULL);
+ return true;
}
-void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
+/**
+ * smp_text_poke_batch_add() -- update instruction on live kernel on SMP, batched
+ * @addr: address to patch
+ * @opcode: opcode of new instruction
+ * @len: length to copy
+ * @emulate: instruction to be emulated
+ *
+ * Add a new instruction to the current queue of to-be-patched instructions
+ * the kernel maintains. The patching request will not be executed immediately,
+ * but becomes part of an array of patching requests, optimized for batched
+ * execution. All pending patching requests will be executed on the next
+ * smp_text_poke_batch_finish() call.
+ */
+void __ref smp_text_poke_batch_add(void *addr, const void *opcode, size_t len, const void *emulate)
{
- struct text_poke_loc *tp;
-
- text_poke_flush(addr);
-
- tp = &tp_vec[tp_vec_nr++];
- text_poke_loc_init(tp, addr, opcode, len, emulate);
+ if (text_poke_array.nr_entries == TEXT_POKE_ARRAY_MAX || !text_poke_addr_ordered(addr))
+ smp_text_poke_batch_finish();
+ __smp_text_poke_batch_add(addr, opcode, len, emulate);
}
/**
- * text_poke_bp() -- update instructions on live kernel on SMP
+ * smp_text_poke_single() -- update instruction on live kernel on SMP immediately
* @addr: address to patch
* @opcode: opcode of new instruction
* @len: length to copy
@@ -2882,12 +3102,11 @@ void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const voi
*
* Update a single instruction with the vector in the stack, avoiding
* dynamically allocated memory. This function should be used when it is
- * not possible to allocate memory.
+ * not possible to allocate memory for a vector. The single instruction
+ * is patched in immediately.
*/
-void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
+void __ref smp_text_poke_single(void *addr, const void *opcode, size_t len, const void *emulate)
{
- struct text_poke_loc tp;
-
- text_poke_loc_init(&tp, addr, opcode, len, emulate);
- text_poke_bp_batch(&tp, 1);
+ __smp_text_poke_batch_add(addr, opcode, len, emulate);
+ smp_text_poke_batch_finish();
}
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index c884deca839b..3485d419c2f5 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -39,7 +39,7 @@
#include <asm/gart.h>
#include <asm/set_memory.h>
#include <asm/dma.h>
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
#include <asm/x86_init.h>
static unsigned long iommu_bus_base; /* GART remapping area (physical) */
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 6d12a9b69432..c1acead6227a 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -13,7 +13,9 @@
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/pci_ids.h>
-#include <asm/amd_nb.h>
+
+#include <asm/amd/nb.h>
+#include <asm/cpuid/api.h>
static u32 *flush_words;
@@ -91,10 +93,7 @@ static int amd_cache_northbridges(void)
if (amd_gart_present())
amd_northbridges.flags |= AMD_NB_GART;
- /*
- * Check for L3 cache presence.
- */
- if (!cpuid_edx(0x80000006))
+ if (!cpuid_amd_hygon_has_l3_cache())
return 0;
/*
@@ -151,7 +150,7 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
/* Assume CPUs from Fam10h have mmconfig, although not all VMs do */
if (boot_cpu_data.x86 < 0x10 ||
- rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr))
+ rdmsrq_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr))
return NULL;
/* mmconfig is not enabled */
diff --git a/arch/x86/kernel/amd_node.c b/arch/x86/kernel/amd_node.c
index b670fa85c61b..a40176b62eb5 100644
--- a/arch/x86/kernel/amd_node.c
+++ b/arch/x86/kernel/amd_node.c
@@ -9,7 +9,7 @@
*/
#include <linux/debugfs.h>
-#include <asm/amd_node.h>
+#include <asm/amd/node.h>
/*
* AMD Nodes are a physical collection of I/O devices within an SoC. There can be one
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 89c0c8a3fc7e..769321185a08 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -29,7 +29,7 @@
#include <asm/gart.h>
#include <asm/pci-direct.h>
#include <asm/dma.h>
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
#include <asm/x86_init.h>
#include <linux/crash_dump.h>
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 62584a347931..d73ba5a7b623 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -59,6 +59,7 @@
#include <asm/time.h>
#include <asm/smp.h>
#include <asm/mce.h>
+#include <asm/msr.h>
#include <asm/tsc.h>
#include <asm/hypervisor.h>
#include <asm/cpu_device_id.h>
@@ -425,7 +426,7 @@ static int lapic_next_deadline(unsigned long delta,
weak_wrmsr_fence();
tsc = rdtsc();
- wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
+ wrmsrq(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
return 0;
}
@@ -449,7 +450,7 @@ static int lapic_timer_shutdown(struct clock_event_device *evt)
* the timer _and_ zero the counter registers:
*/
if (v & APIC_LVT_TIMER_TSCDEADLINE)
- wrmsrl(MSR_IA32_TSC_DEADLINE, 0);
+ wrmsrq(MSR_IA32_TSC_DEADLINE, 0);
else
apic_write(APIC_TMICT, 0);
@@ -1694,7 +1695,7 @@ static bool x2apic_hw_locked(void)
x86_arch_cap_msr = x86_read_arch_cap_msr();
if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
- rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
+ rdmsrq(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
return (msr & LEGACY_XAPIC_DISABLED);
}
return false;
@@ -1707,12 +1708,12 @@ static void __x2apic_disable(void)
if (!boot_cpu_has(X86_FEATURE_APIC))
return;
- rdmsrl(MSR_IA32_APICBASE, msr);
+ rdmsrq(MSR_IA32_APICBASE, msr);
if (!(msr & X2APIC_ENABLE))
return;
/* Disable xapic and x2apic first and then reenable xapic mode */
- wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
- wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
+ wrmsrq(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
+ wrmsrq(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
printk_once(KERN_INFO "x2apic disabled\n");
}
@@ -1720,10 +1721,10 @@ static void __x2apic_enable(void)
{
u64 msr;
- rdmsrl(MSR_IA32_APICBASE, msr);
+ rdmsrq(MSR_IA32_APICBASE, msr);
if (msr & X2APIC_ENABLE)
return;
- wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
+ wrmsrq(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
printk_once(KERN_INFO "x2apic enabled\n");
}
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index b5bb7a2e8340..58abb941c45b 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -27,7 +27,13 @@ static void noop_send_IPI_allbutself(int vector) { }
static void noop_send_IPI_all(int vector) { }
static void noop_send_IPI_self(int vector) { }
static void noop_apic_icr_write(u32 low, u32 id) { }
-static int noop_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip) { return -1; }
+
+static int noop_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip,
+ unsigned int cpu)
+{
+ return -1;
+}
+
static u64 noop_apic_icr_read(void) { return 0; }
static u32 noop_get_apic_id(u32 apicid) { return 0; }
static void noop_apic_eoi(void) { }
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 16410f087b7a..5c5be2d58242 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/pgtable.h>
+#include <asm/msr.h>
#include <asm/numachip/numachip.h>
#include <asm/numachip/numachip_csr.h>
@@ -31,7 +32,7 @@ static u32 numachip1_get_apic_id(u32 x)
unsigned int id = (x >> 24) & 0xff;
if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
- rdmsrl(MSR_FAM10H_NODE_ID, value);
+ rdmsrq(MSR_FAM10H_NODE_ID, value);
id |= (value << 2) & 0xff00;
}
@@ -42,7 +43,7 @@ static u32 numachip2_get_apic_id(u32 x)
{
u64 mcfg;
- rdmsrl(MSR_FAM10H_MMIO_CONF_BASE, mcfg);
+ rdmsrq(MSR_FAM10H_MMIO_CONF_BASE, mcfg);
return ((mcfg >> (28 - 8)) & 0xfff00) | (x >> 24);
}
@@ -56,7 +57,7 @@ static void numachip2_apic_icr_write(int apicid, unsigned int val)
numachip2_write32_lcsr(NUMACHIP2_APIC_ICR, (apicid << 12) | val);
}
-static int numachip_wakeup_secondary(u32 phys_apicid, unsigned long start_rip)
+static int numachip_wakeup_secondary(u32 phys_apicid, unsigned long start_rip, unsigned int cpu)
{
numachip_apic_icr_write(phys_apicid, APIC_DM_INIT);
numachip_apic_icr_write(phys_apicid, APIC_DM_STARTUP |
@@ -150,7 +151,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
/* Account for nodes per socket in multi-core-module processors */
if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
- rdmsrl(MSR_FAM10H_NODE_ID, val);
+ rdmsrq(MSR_FAM10H_NODE_ID, val);
nodes = ((val >> 3) & 7) + 1;
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index eebc360ed1bb..5ba2feb2c04c 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1486,7 +1486,7 @@ static void __init delay_with_tsc(void)
* 1 GHz == 40 jiffies
*/
do {
- rep_nop();
+ native_pause();
now = rdtsc();
} while ((now - start) < 40000000000ULL / HZ && time_before_eq(jiffies, end));
}
@@ -2225,7 +2225,7 @@ static int mp_irqdomain_create(int ioapic)
/* Handle device tree enumerated APICs proper */
if (cfg->dev) {
- fn = of_node_to_fwnode(cfg->dev);
+ fn = of_fwnode_handle(cfg->dev);
} else {
fn = irq_domain_alloc_named_id_fwnode("IO-APIC", mpc_ioapic_id(ioapic));
if (!fn)
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index fee42a73d64a..93069b13d3af 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -864,7 +864,7 @@ void lapic_offline(void)
__vector_cleanup(cl, false);
irq_matrix_offline(vector_matrix);
- WARN_ON_ONCE(try_to_del_timer_sync(&cl->timer) < 0);
+ WARN_ON_ONCE(timer_delete_sync_try(&cl->timer) < 0);
WARN_ON_ONCE(!hlist_empty(&cl->head));
unlock_vector_lock();
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 7fef504ca508..15209f220e1f 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -667,7 +667,7 @@ static __init void build_uv_gr_table(void)
}
}
-static int uv_wakeup_secondary(u32 phys_apicid, unsigned long start_rip)
+static int uv_wakeup_secondary(u32 phys_apicid, unsigned long start_rip, unsigned int cpu)
{
unsigned long val;
int pnode;
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index ad4ea6fb3b6c..6259b474073b 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -33,6 +33,14 @@
static void __used common(void)
{
+ OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
+ OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
+ OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
+ OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
+ OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
+ OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
+ OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
+
BLANK();
OFFSET(TASK_threadsp, task_struct, thread.sp);
#ifdef CONFIG_STACKPROTECTOR
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 2b411cd00a4e..e0a292db97b2 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -12,15 +12,6 @@ void foo(void);
void foo(void)
{
- OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
- OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
- OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
- OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
- OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
- OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
- OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
- BLANK();
-
OFFSET(PT_EBX, pt_regs, bx);
OFFSET(PT_ECX, pt_regs, cx);
OFFSET(PT_EDX, pt_regs, dx);
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index d86d7d6e750c..a951333c5995 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -185,7 +185,7 @@ static void *patch_dest(void *dest, bool direct)
u8 *pad = dest - tsize;
memcpy(insn_buff, skl_call_thunk_template, tsize);
- apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize);
+ text_poke_apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize);
/* Already patched? */
if (!bcmp(pad, insn_buff, tsize))
@@ -294,7 +294,7 @@ static bool is_callthunk(void *addr)
pad = (void *)(dest - tmpl_size);
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
- apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size);
+ text_poke_apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size);
return !bcmp(pad, insn_buff, tmpl_size);
}
@@ -312,7 +312,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
return 0;
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
- apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size);
+ text_poke_apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size);
memcpy(*pprog, insn_buff, tmpl_size);
*pprog += tmpl_size;
diff --git a/arch/x86/kernel/cet.c b/arch/x86/kernel/cet.c
index 303bf74d175b..99444409c026 100644
--- a/arch/x86/kernel/cet.c
+++ b/arch/x86/kernel/cet.c
@@ -2,6 +2,7 @@
#include <linux/ptrace.h>
#include <asm/bugs.h>
+#include <asm/msr.h>
#include <asm/traps.h>
enum cp_error_code {
@@ -55,7 +56,7 @@ static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code)
* will be whatever is live in userspace. So read the SSP before enabling
* interrupts so locking the fpregs to do it later is not required.
*/
- rdmsrl(MSR_IA32_PL3_SSP, ssp);
+ rdmsrq(MSR_IA32_PL3_SSP, ssp);
cond_local_irq_enable(regs);
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 4efdf5c2efc8..1e26179ff18c 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -24,7 +24,7 @@ obj-y += rdrand.o
obj-y += match.o
obj-y += bugs.o
obj-y += aperfmperf.o
-obj-y += cpuid-deps.o
+obj-y += cpuid-deps.o cpuid_0x2_table.o
obj-y += umwait.o
obj-y += capflags.o powerflags.o
@@ -38,6 +38,9 @@ obj-y += intel.o tsx.o
obj-$(CONFIG_PM) += intel_epb.o
endif
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
+ifeq ($(CONFIG_AMD_NB)$(CONFIG_SYSFS),yy)
+obj-y += amd_cache_disable.o
+endif
obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 79569f72b8ee..93da466dfe2c 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -9,6 +9,7 @@
#include <linux/sched/clock.h>
#include <linux/random.h>
#include <linux/topology.h>
+#include <asm/amd/fch.h>
#include <asm/processor.h>
#include <asm/apic.h>
#include <asm/cacheinfo.h>
@@ -21,6 +22,7 @@
#include <asm/delay.h>
#include <asm/debugreg.h>
#include <asm/resctrl.h>
+#include <asm/msr.h>
#include <asm/sev.h>
#ifdef CONFIG_X86_64
@@ -31,7 +33,7 @@
u16 invlpgb_count_max __ro_after_init;
-static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
+static inline int rdmsrq_amd_safe(unsigned msr, u64 *p)
{
u32 gprs[8] = { 0 };
int err;
@@ -49,7 +51,7 @@ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
return err;
}
-static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
+static inline int wrmsrq_amd_safe(unsigned msr, u64 val)
{
u32 gprs[8] = { 0 };
@@ -383,7 +385,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
(c->x86 == 0x10 && c->x86_model >= 0x2)) {
u64 val;
- rdmsrl(MSR_K7_HWCR, val);
+ rdmsrq(MSR_K7_HWCR, val);
if (!(val & BIT(24)))
pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
}
@@ -422,7 +424,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
* Try to cache the base value so further operations can
* avoid RMW. If that faults, do not enable SSBD.
*/
- if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+ if (!rdmsrq_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
setup_force_cpu_cap(X86_FEATURE_SSBD);
x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
@@ -472,6 +474,11 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
case 0x60 ... 0x7f:
setup_force_cpu_cap(X86_FEATURE_ZEN5);
break;
+ case 0x50 ... 0x5f:
+ case 0x90 ... 0xaf:
+ case 0xc0 ... 0xcf:
+ setup_force_cpu_cap(X86_FEATURE_ZEN6);
+ break;
default:
goto warn;
}
@@ -508,7 +515,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
*/
if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
/* Check if memory encryption is enabled */
- rdmsrl(MSR_AMD64_SYSCFG, msr);
+ rdmsrq(MSR_AMD64_SYSCFG, msr);
if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
goto clear_all;
@@ -525,7 +532,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
if (!sme_me_mask)
setup_clear_cpu_cap(X86_FEATURE_SME);
- rdmsrl(MSR_K7_HWCR, msr);
+ rdmsrq(MSR_K7_HWCR, msr);
if (!(msr & MSR_K7_HWCR_SMMLOCK))
goto clear_sev;
@@ -612,7 +619,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
- else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
+ else if (c->x86 >= 0x19 && !wrmsrq_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
setup_force_cpu_cap(X86_FEATURE_SBPB);
}
@@ -636,14 +643,14 @@ static void init_amd_k8(struct cpuinfo_x86 *c)
*/
if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) {
clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
- if (!rdmsrl_amd_safe(0xc001100d, &value)) {
+ if (!rdmsrq_amd_safe(0xc001100d, &value)) {
value &= ~BIT_64(32);
- wrmsrl_amd_safe(0xc001100d, value);
+ wrmsrq_amd_safe(0xc001100d, value);
}
}
if (!c->x86_model_id[0])
- strcpy(c->x86_model_id, "Hammer");
+ strscpy(c->x86_model_id, "Hammer");
#ifdef CONFIG_SMP
/*
@@ -788,9 +795,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
* Disable it on the affected CPUs.
*/
if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
- if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
+ if (!rdmsrq_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
value |= 0x1E;
- wrmsrl_safe(MSR_F15H_IC_CFG, value);
+ wrmsrq_safe(MSR_F15H_IC_CFG, value);
}
}
@@ -805,6 +812,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
static const struct x86_cpu_id erratum_1386_microcode[] = {
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x01), 0x2, 0x2, 0x0800126e),
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x31), 0x0, 0x0, 0x08301052),
+ {}
};
static void fix_erratum_1386(struct cpuinfo_x86 *c)
@@ -838,9 +846,9 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
* suppresses non-branch predictions.
*/
if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
- if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
+ if (!rdmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
- wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
+ wrmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
}
}
#endif
@@ -868,6 +876,16 @@ static void init_amd_zen1(struct cpuinfo_x86 *c)
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
setup_force_cpu_bug(X86_BUG_DIV0);
+
+ /*
+ * Turn off the Instructions Retired free counter on machines that are
+ * susceptible to erratum #1054 "Instructions Retired Performance
+ * Counter May Be Inaccurate".
+ */
+ if (c->x86_model < 0x30) {
+ msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
+ clear_cpu_cap(c, X86_FEATURE_IRPERF);
+ }
}
static bool cpu_has_zenbleed_microcode(void)
@@ -1014,7 +1032,7 @@ static void init_amd(struct cpuinfo_x86 *c)
init_amd_cacheinfo(c);
if (cpu_has(c, X86_FEATURE_SVM)) {
- rdmsrl(MSR_VM_CR, vm_cr);
+ rdmsrq(MSR_VM_CR, vm_cr);
if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
clear_cpu_cap(c, X86_FEATURE_SVM);
@@ -1051,13 +1069,8 @@ static void init_amd(struct cpuinfo_x86 *c)
if (!cpu_feature_enabled(X86_FEATURE_XENPV))
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
- /*
- * Turn on the Instructions Retired free counter on machines not
- * susceptible to erratum #1054 "Instructions Retired Performance
- * Counter May Be Inaccurate".
- */
- if (cpu_has(c, X86_FEATURE_IRPERF) &&
- (boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
+ /* Enable the Instructions Retired free counter */
+ if (cpu_has(c, X86_FEATURE_IRPERF))
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
check_null_seg_clears_base(c);
@@ -1200,7 +1213,7 @@ void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr)
if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
return;
- wrmsr(amd_msr_dr_addr_masks[dr], mask, 0);
+ wrmsrq(amd_msr_dr_addr_masks[dr], mask);
per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
}
@@ -1231,3 +1244,56 @@ void amd_check_microcode(void)
if (cpu_feature_enabled(X86_FEATURE_ZEN2))
on_each_cpu(zenbleed_check_cpu, NULL, 1);
}
+
+static const char * const s5_reset_reason_txt[] = {
+ [0] = "thermal pin BP_THERMTRIP_L was tripped",
+ [1] = "power button was pressed for 4 seconds",
+ [2] = "shutdown pin was tripped",
+ [4] = "remote ASF power off command was received",
+ [9] = "internal CPU thermal limit was tripped",
+ [16] = "system reset pin BP_SYS_RST_L was tripped",
+ [17] = "software issued PCI reset",
+ [18] = "software wrote 0x4 to reset control register 0xCF9",
+ [19] = "software wrote 0x6 to reset control register 0xCF9",
+ [20] = "software wrote 0xE to reset control register 0xCF9",
+ [21] = "ACPI power state transition occurred",
+ [22] = "keyboard reset pin KB_RST_L was tripped",
+ [23] = "internal CPU shutdown event occurred",
+ [24] = "system failed to boot before failed boot timer expired",
+ [25] = "hardware watchdog timer expired",
+ [26] = "remote ASF reset command was received",
+ [27] = "an uncorrected error caused a data fabric sync flood event",
+ [29] = "FCH and MP1 failed warm reset handshake",
+ [30] = "a parity error occurred",
+ [31] = "a software sync flood event occurred",
+};
+
+static __init int print_s5_reset_status_mmio(void)
+{
+ unsigned long value;
+ void __iomem *addr;
+ int i;
+
+ if (!cpu_feature_enabled(X86_FEATURE_ZEN))
+ return 0;
+
+ addr = ioremap(FCH_PM_BASE + FCH_PM_S5_RESET_STATUS, sizeof(value));
+ if (!addr)
+ return 0;
+
+ value = ioread32(addr);
+ iounmap(addr);
+
+ for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) {
+ if (!(value & BIT(i)))
+ continue;
+
+ if (s5_reset_reason_txt[i]) {
+ pr_info("x86/amd: Previous system reset reason [0x%08lx]: %s\n",
+ value, s5_reset_reason_txt[i]);
+ }
+ }
+
+ return 0;
+}
+late_initcall(print_s5_reset_status_mmio);
diff --git a/arch/x86/kernel/cpu/amd_cache_disable.c b/arch/x86/kernel/cpu/amd_cache_disable.c
new file mode 100644
index 000000000000..8843b9557aea
--- /dev/null
+++ b/arch/x86/kernel/cpu/amd_cache_disable.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD L3 cache_disable_{0,1} sysfs handling
+ * Documentation/ABI/testing/sysfs-devices-system-cpu
+ */
+
+#include <linux/cacheinfo.h>
+#include <linux/capability.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include <asm/amd/nb.h>
+
+#include "cpu.h"
+
+/*
+ * L3 cache descriptors
+ */
+static void amd_calc_l3_indices(struct amd_northbridge *nb)
+{
+ struct amd_l3_cache *l3 = &nb->l3_cache;
+ unsigned int sc0, sc1, sc2, sc3;
+ u32 val = 0;
+
+ pci_read_config_dword(nb->misc, 0x1C4, &val);
+
+ /* calculate subcache sizes */
+ l3->subcaches[0] = sc0 = !(val & BIT(0));
+ l3->subcaches[1] = sc1 = !(val & BIT(4));
+
+ if (boot_cpu_data.x86 == 0x15) {
+ l3->subcaches[0] = sc0 += !(val & BIT(1));
+ l3->subcaches[1] = sc1 += !(val & BIT(5));
+ }
+
+ l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
+ l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
+
+ l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
+}
+
+/*
+ * check whether a slot used for disabling an L3 index is occupied.
+ * @l3: L3 cache descriptor
+ * @slot: slot number (0..1)
+ *
+ * @returns: the disabled index if used or negative value if slot free.
+ */
+static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned int slot)
+{
+ unsigned int reg = 0;
+
+ pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
+
+ /* check whether this slot is activated already */
+ if (reg & (3UL << 30))
+ return reg & 0xfff;
+
+ return -1;
+}
+
+static ssize_t show_cache_disable(struct cacheinfo *ci, char *buf, unsigned int slot)
+{
+ int index;
+ struct amd_northbridge *nb = ci->priv;
+
+ index = amd_get_l3_disable_slot(nb, slot);
+ if (index >= 0)
+ return sysfs_emit(buf, "%d\n", index);
+
+ return sysfs_emit(buf, "FREE\n");
+}
+
+#define SHOW_CACHE_DISABLE(slot) \
+static ssize_t \
+cache_disable_##slot##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct cacheinfo *ci = dev_get_drvdata(dev); \
+ return show_cache_disable(ci, buf, slot); \
+}
+
+SHOW_CACHE_DISABLE(0)
+SHOW_CACHE_DISABLE(1)
+
+static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
+ unsigned int slot, unsigned long idx)
+{
+ int i;
+
+ idx |= BIT(30);
+
+ /*
+ * disable index in all 4 subcaches
+ */
+ for (i = 0; i < 4; i++) {
+ u32 reg = idx | (i << 20);
+
+ if (!nb->l3_cache.subcaches[i])
+ continue;
+
+ pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
+
+ /*
+ * We need to WBINVD on a core on the node containing the L3
+ * cache which indices we disable therefore a simple wbinvd()
+ * is not sufficient.
+ */
+ wbinvd_on_cpu(cpu);
+
+ reg |= BIT(31);
+ pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
+ }
+}
+
+/*
+ * disable a L3 cache index by using a disable-slot
+ *
+ * @l3: L3 cache descriptor
+ * @cpu: A CPU on the node containing the L3 cache
+ * @slot: slot number (0..1)
+ * @index: index to disable
+ *
+ * @return: 0 on success, error status on failure
+ */
+static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
+ unsigned int slot, unsigned long index)
+{
+ int ret = 0;
+
+ /* check if @slot is already used or the index is already disabled */
+ ret = amd_get_l3_disable_slot(nb, slot);
+ if (ret >= 0)
+ return -EEXIST;
+
+ if (index > nb->l3_cache.indices)
+ return -EINVAL;
+
+ /* check whether the other slot has disabled the same index already */
+ if (index == amd_get_l3_disable_slot(nb, !slot))
+ return -EEXIST;
+
+ amd_l3_disable_index(nb, cpu, slot, index);
+
+ return 0;
+}
+
+static ssize_t store_cache_disable(struct cacheinfo *ci, const char *buf,
+ size_t count, unsigned int slot)
+{
+ struct amd_northbridge *nb = ci->priv;
+ unsigned long val = 0;
+ int cpu, err = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ cpu = cpumask_first(&ci->shared_cpu_map);
+
+ if (kstrtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ err = amd_set_l3_disable_slot(nb, cpu, slot, val);
+ if (err) {
+ if (err == -EEXIST)
+ pr_warn("L3 slot %d in use/index already disabled!\n",
+ slot);
+ return err;
+ }
+ return count;
+}
+
+#define STORE_CACHE_DISABLE(slot) \
+static ssize_t \
+cache_disable_##slot##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct cacheinfo *ci = dev_get_drvdata(dev); \
+ return store_cache_disable(ci, buf, count, slot); \
+}
+
+STORE_CACHE_DISABLE(0)
+STORE_CACHE_DISABLE(1)
+
+static ssize_t subcaches_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cacheinfo *ci = dev_get_drvdata(dev);
+ int cpu = cpumask_first(&ci->shared_cpu_map);
+
+ return sysfs_emit(buf, "%x\n", amd_get_subcaches(cpu));
+}
+
+static ssize_t subcaches_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cacheinfo *ci = dev_get_drvdata(dev);
+ int cpu = cpumask_first(&ci->shared_cpu_map);
+ unsigned long val;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (kstrtoul(buf, 16, &val) < 0)
+ return -EINVAL;
+
+ if (amd_set_subcaches(cpu, val))
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(cache_disable_0);
+static DEVICE_ATTR_RW(cache_disable_1);
+static DEVICE_ATTR_RW(subcaches);
+
+static umode_t cache_private_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cacheinfo *ci = dev_get_drvdata(dev);
+ umode_t mode = attr->mode;
+
+ if (!ci->priv)
+ return 0;
+
+ if ((attr == &dev_attr_subcaches.attr) &&
+ amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ return mode;
+
+ if ((attr == &dev_attr_cache_disable_0.attr ||
+ attr == &dev_attr_cache_disable_1.attr) &&
+ amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+ return mode;
+
+ return 0;
+}
+
+static struct attribute_group cache_private_group = {
+ .is_visible = cache_private_attrs_is_visible,
+};
+
+static void init_amd_l3_attrs(void)
+{
+ static struct attribute **amd_l3_attrs;
+ int n = 1;
+
+ if (amd_l3_attrs) /* already initialized */
+ return;
+
+ if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+ n += 2;
+ if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ n += 1;
+
+ amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL);
+ if (!amd_l3_attrs)
+ return;
+
+ n = 0;
+ if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
+ amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr;
+ amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr;
+ }
+ if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ amd_l3_attrs[n++] = &dev_attr_subcaches.attr;
+
+ cache_private_group.attrs = amd_l3_attrs;
+}
+
+const struct attribute_group *cache_get_priv_group(struct cacheinfo *ci)
+{
+ struct amd_northbridge *nb = ci->priv;
+
+ if (ci->level < 3 || !nb)
+ return NULL;
+
+ if (nb && nb->l3_cache.indices)
+ init_amd_l3_attrs();
+
+ return &cache_private_group;
+}
+
+struct amd_northbridge *amd_init_l3_cache(int index)
+{
+ struct amd_northbridge *nb;
+ int node;
+
+ /* only for L3, and not in virtualized environments */
+ if (index < 3)
+ return NULL;
+
+ node = topology_amd_node_id(smp_processor_id());
+ nb = node_to_amd_nb(node);
+ if (nb && !nb->l3_cache.indices)
+ amd_calc_l3_indices(nb);
+
+ return nb;
+}
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index 6cf31a1649c4..a315b0627dfb 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -20,6 +20,7 @@
#include <asm/cpu.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/msr.h>
#include "cpu.h"
@@ -40,8 +41,8 @@ static void init_counter_refs(void)
{
u64 aperf, mperf;
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
+ rdmsrq(MSR_IA32_APERF, aperf);
+ rdmsrq(MSR_IA32_MPERF, mperf);
this_cpu_write(cpu_samples.aperf, aperf);
this_cpu_write(cpu_samples.mperf, mperf);
@@ -99,7 +100,7 @@ static bool __init turbo_disabled(void)
u64 misc_en;
int err;
- err = rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_en);
+ err = rdmsrq_safe(MSR_IA32_MISC_ENABLE, &misc_en);
if (err)
return false;
@@ -110,11 +111,11 @@ static bool __init slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
{
int err;
- err = rdmsrl_safe(MSR_ATOM_CORE_RATIOS, base_freq);
+ err = rdmsrq_safe(MSR_ATOM_CORE_RATIOS, base_freq);
if (err)
return false;
- err = rdmsrl_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq);
+ err = rdmsrq_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq);
if (err)
return false;
@@ -152,13 +153,13 @@ static bool __init knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq,
int err, i;
u64 msr;
- err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
+ err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq);
if (err)
return false;
*base_freq = (*base_freq >> 8) & 0xFF; /* max P state */
- err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr);
+ err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &msr);
if (err)
return false;
@@ -190,17 +191,17 @@ static bool __init skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int s
u32 group_size;
int err, i;
- err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
+ err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq);
if (err)
return false;
*base_freq = (*base_freq >> 8) & 0xFF; /* max P state */
- err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &ratios);
+ err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &ratios);
if (err)
return false;
- err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT1, &counts);
+ err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT1, &counts);
if (err)
return false;
@@ -220,11 +221,11 @@ static bool __init core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
u64 msr;
int err;
- err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
+ err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq);
if (err)
return false;
- err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr);
+ err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &msr);
if (err)
return false;
@@ -474,8 +475,8 @@ void arch_scale_freq_tick(void)
if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
return;
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
+ rdmsrq(MSR_IA32_APERF, aperf);
+ rdmsrq(MSR_IA32_MPERF, mperf);
acnt = aperf - s->aperf;
mcnt = mperf - s->mperf;
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 4386aa6c69e1..7f94e6a5497d 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -34,21 +34,66 @@
#include "cpu.h"
+/*
+ * Speculation Vulnerability Handling
+ *
+ * Each vulnerability is handled with the following functions:
+ * <vuln>_select_mitigation() -- Selects a mitigation to use. This should
+ * take into account all relevant command line
+ * options.
+ * <vuln>_update_mitigation() -- This is called after all vulnerabilities have
+ * selected a mitigation, in case the selection
+ * may want to change based on other choices
+ * made. This function is optional.
+ * <vuln>_apply_mitigation() -- Enable the selected mitigation.
+ *
+ * The compile-time mitigation in all cases should be AUTO. An explicit
+ * command-line option can override AUTO. If no such option is
+ * provided, <vuln>_select_mitigation() will override AUTO to the best
+ * mitigation option.
+ */
+
static void __init spectre_v1_select_mitigation(void);
+static void __init spectre_v1_apply_mitigation(void);
static void __init spectre_v2_select_mitigation(void);
+static void __init spectre_v2_update_mitigation(void);
+static void __init spectre_v2_apply_mitigation(void);
static void __init retbleed_select_mitigation(void);
+static void __init retbleed_update_mitigation(void);
+static void __init retbleed_apply_mitigation(void);
static void __init spectre_v2_user_select_mitigation(void);
+static void __init spectre_v2_user_update_mitigation(void);
+static void __init spectre_v2_user_apply_mitigation(void);
static void __init ssb_select_mitigation(void);
+static void __init ssb_apply_mitigation(void);
static void __init l1tf_select_mitigation(void);
+static void __init l1tf_apply_mitigation(void);
static void __init mds_select_mitigation(void);
-static void __init md_clear_update_mitigation(void);
-static void __init md_clear_select_mitigation(void);
+static void __init mds_update_mitigation(void);
+static void __init mds_apply_mitigation(void);
static void __init taa_select_mitigation(void);
+static void __init taa_update_mitigation(void);
+static void __init taa_apply_mitigation(void);
static void __init mmio_select_mitigation(void);
+static void __init mmio_update_mitigation(void);
+static void __init mmio_apply_mitigation(void);
+static void __init rfds_select_mitigation(void);
+static void __init rfds_update_mitigation(void);
+static void __init rfds_apply_mitigation(void);
static void __init srbds_select_mitigation(void);
+static void __init srbds_apply_mitigation(void);
static void __init l1d_flush_select_mitigation(void);
static void __init srso_select_mitigation(void);
+static void __init srso_update_mitigation(void);
+static void __init srso_apply_mitigation(void);
static void __init gds_select_mitigation(void);
+static void __init gds_apply_mitigation(void);
+static void __init bhi_select_mitigation(void);
+static void __init bhi_update_mitigation(void);
+static void __init bhi_apply_mitigation(void);
+static void __init its_select_mitigation(void);
+static void __init its_update_mitigation(void);
+static void __init its_apply_mitigation(void);
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base;
@@ -59,7 +104,6 @@ DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
-EXPORT_SYMBOL_GPL(x86_pred_cmd);
static u64 __ro_after_init x86_arch_cap_msr;
@@ -67,11 +111,19 @@ static DEFINE_MUTEX(spec_ctrl_mutex);
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
+static void __init set_return_thunk(void *thunk)
+{
+ if (x86_return_thunk != __x86_return_thunk)
+ pr_warn("x86/bugs: return thunk changed\n");
+
+ x86_return_thunk = thunk;
+}
+
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
static void update_spec_ctrl(u64 val)
{
this_cpu_write(x86_spec_ctrl_current, val);
- wrmsrl(MSR_IA32_SPEC_CTRL, val);
+ wrmsrq(MSR_IA32_SPEC_CTRL, val);
}
/*
@@ -90,7 +142,7 @@ void update_spec_ctrl_cond(u64 val)
* forced the update can be delayed until that time.
*/
if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
- wrmsrl(MSR_IA32_SPEC_CTRL, val);
+ wrmsrq(MSR_IA32_SPEC_CTRL, val);
}
noinstr u64 spec_ctrl_current(void)
@@ -128,9 +180,13 @@ EXPORT_SYMBOL_GPL(mds_idle_clear);
*/
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
-/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
-DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
-EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
+/*
+ * Controls CPU Fill buffer clear before VMenter. This is a subset of
+ * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
+ * mitigation is required.
+ */
+DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
+EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
void __init cpu_select_mitigations(void)
{
@@ -140,7 +196,7 @@ void __init cpu_select_mitigations(void)
* init code as it is not enumerated and depends on the family.
*/
if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
- rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
/*
* Previously running kernel (kexec), may have some controls
@@ -155,30 +211,67 @@ void __init cpu_select_mitigations(void)
/* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
- /*
- * retbleed_select_mitigation() relies on the state set by
- * spectre_v2_select_mitigation(); specifically it wants to know about
- * spectre_v2=ibrs.
- */
retbleed_select_mitigation();
- /*
- * spectre_v2_user_select_mitigation() relies on the state set by
- * retbleed_select_mitigation(); specifically the STIBP selection is
- * forced for UNRET or IBPB.
- */
spectre_v2_user_select_mitigation();
ssb_select_mitigation();
l1tf_select_mitigation();
- md_clear_select_mitigation();
+ mds_select_mitigation();
+ taa_select_mitigation();
+ mmio_select_mitigation();
+ rfds_select_mitigation();
srbds_select_mitigation();
l1d_flush_select_mitigation();
+ srso_select_mitigation();
+ gds_select_mitigation();
+ its_select_mitigation();
+ bhi_select_mitigation();
/*
- * srso_select_mitigation() depends and must run after
- * retbleed_select_mitigation().
+ * After mitigations are selected, some may need to update their
+ * choices.
*/
- srso_select_mitigation();
- gds_select_mitigation();
+ spectre_v2_update_mitigation();
+ /*
+ * retbleed_update_mitigation() relies on the state set by
+ * spectre_v2_update_mitigation(); specifically it wants to know about
+ * spectre_v2=ibrs.
+ */
+ retbleed_update_mitigation();
+ /*
+ * its_update_mitigation() depends on spectre_v2_update_mitigation()
+ * and retbleed_update_mitigation().
+ */
+ its_update_mitigation();
+
+ /*
+ * spectre_v2_user_update_mitigation() depends on
+ * retbleed_update_mitigation(), specifically the STIBP
+ * selection is forced for UNRET or IBPB.
+ */
+ spectre_v2_user_update_mitigation();
+ mds_update_mitigation();
+ taa_update_mitigation();
+ mmio_update_mitigation();
+ rfds_update_mitigation();
+ bhi_update_mitigation();
+ /* srso_update_mitigation() depends on retbleed_update_mitigation(). */
+ srso_update_mitigation();
+
+ spectre_v1_apply_mitigation();
+ spectre_v2_apply_mitigation();
+ retbleed_apply_mitigation();
+ spectre_v2_user_apply_mitigation();
+ ssb_apply_mitigation();
+ l1tf_apply_mitigation();
+ mds_apply_mitigation();
+ taa_apply_mitigation();
+ mmio_apply_mitigation();
+ rfds_apply_mitigation();
+ srbds_apply_mitigation();
+ srso_apply_mitigation();
+ gds_apply_mitigation();
+ its_apply_mitigation();
+ bhi_apply_mitigation();
}
/*
@@ -228,9 +321,9 @@ static void x86_amd_ssb_disable(void)
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
- wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
+ wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
- wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ wrmsrq(MSR_AMD64_LS_CFG, msrval);
}
#undef pr_fmt
@@ -281,6 +374,12 @@ enum rfds_mitigations {
static enum rfds_mitigations rfds_mitigation __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
+/*
+ * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing
+ * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry.
+ */
+static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init;
+
static void __init mds_select_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
@@ -291,12 +390,34 @@ static void __init mds_select_mitigation(void)
if (mds_mitigation == MDS_MITIGATION_AUTO)
mds_mitigation = MDS_MITIGATION_FULL;
+ if (mds_mitigation == MDS_MITIGATION_OFF)
+ return;
+
+ verw_clear_cpu_buf_mitigation_selected = true;
+}
+
+static void __init mds_update_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
+ return;
+
+ /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */
+ if (verw_clear_cpu_buf_mitigation_selected)
+ mds_mitigation = MDS_MITIGATION_FULL;
+
if (mds_mitigation == MDS_MITIGATION_FULL) {
if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
mds_mitigation = MDS_MITIGATION_VMWERV;
+ }
- setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ pr_info("%s\n", mds_strings[mds_mitigation]);
+}
+static void __init mds_apply_mitigation(void)
+{
+ if (mds_mitigation == MDS_MITIGATION_FULL ||
+ mds_mitigation == MDS_MITIGATION_VMWERV) {
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
(mds_nosmt || cpu_mitigations_auto_nosmt()))
cpu_smt_disable(false);
@@ -336,6 +457,11 @@ static const char * const taa_strings[] = {
[TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
};
+static bool __init taa_vulnerable(void)
+{
+ return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM);
+}
+
static void __init taa_select_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_TAA)) {
@@ -349,48 +475,63 @@ static void __init taa_select_mitigation(void)
return;
}
- if (cpu_mitigations_off()) {
+ if (cpu_mitigations_off())
taa_mitigation = TAA_MITIGATION_OFF;
- return;
- }
- /*
- * TAA mitigation via VERW is turned off if both
- * tsx_async_abort=off and mds=off are specified.
- */
- if (taa_mitigation == TAA_MITIGATION_OFF &&
- mds_mitigation == MDS_MITIGATION_OFF)
+ /* Microcode will be checked in taa_update_mitigation(). */
+ if (taa_mitigation == TAA_MITIGATION_AUTO)
+ taa_mitigation = TAA_MITIGATION_VERW;
+
+ if (taa_mitigation != TAA_MITIGATION_OFF)
+ verw_clear_cpu_buf_mitigation_selected = true;
+}
+
+static void __init taa_update_mitigation(void)
+{
+ if (!taa_vulnerable() || cpu_mitigations_off())
return;
- if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ if (verw_clear_cpu_buf_mitigation_selected)
taa_mitigation = TAA_MITIGATION_VERW;
- else
- taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
- /*
- * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
- * A microcode update fixes this behavior to clear CPU buffers. It also
- * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
- * ARCH_CAP_TSX_CTRL_MSR bit.
- *
- * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
- * update is required.
- */
- if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
- !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
- taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+ if (taa_mitigation == TAA_MITIGATION_VERW) {
+ /* Check if the requisite ucode is available. */
+ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
- /*
- * TSX is enabled, select alternate mitigation for TAA which is
- * the same as MDS. Enable MDS static branch to clear CPU buffers.
- *
- * For guests that can't determine whether the correct microcode is
- * present on host, enable the mitigation for UCODE_NEEDED as well.
- */
- setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ /*
+ * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
+ * A microcode update fixes this behavior to clear CPU buffers. It also
+ * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
+ * ARCH_CAP_TSX_CTRL_MSR bit.
+ *
+ * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
+ * update is required.
+ */
+ if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
+ !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+ }
- if (taa_nosmt || cpu_mitigations_auto_nosmt())
- cpu_smt_disable(false);
+ pr_info("%s\n", taa_strings[taa_mitigation]);
+}
+
+static void __init taa_apply_mitigation(void)
+{
+ if (taa_mitigation == TAA_MITIGATION_VERW ||
+ taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) {
+ /*
+ * TSX is enabled, select alternate mitigation for TAA which is
+ * the same as MDS. Enable MDS static branch to clear CPU buffers.
+ *
+ * For guests that can't determine whether the correct microcode is
+ * present on host, enable the mitigation for UCODE_NEEDED as well.
+ */
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+
+ if (taa_nosmt || cpu_mitigations_auto_nosmt())
+ cpu_smt_disable(false);
+ }
}
static int __init tsx_async_abort_parse_cmdline(char *str)
@@ -428,31 +569,67 @@ static const char * const mmio_strings[] = {
static void __init mmio_select_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
- boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
cpu_mitigations_off()) {
mmio_mitigation = MMIO_MITIGATION_OFF;
return;
}
+ /* Microcode will be checked in mmio_update_mitigation(). */
+ if (mmio_mitigation == MMIO_MITIGATION_AUTO)
+ mmio_mitigation = MMIO_MITIGATION_VERW;
+
if (mmio_mitigation == MMIO_MITIGATION_OFF)
return;
/*
* Enable CPU buffer clear mitigation for host and VMM, if also affected
- * by MDS or TAA. Otherwise, enable mitigation for VMM only.
+ * by MDS or TAA.
*/
- if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
- boot_cpu_has(X86_FEATURE_RTM)))
- setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable())
+ verw_clear_cpu_buf_mitigation_selected = true;
+}
+
+static void __init mmio_update_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || cpu_mitigations_off())
+ return;
+
+ if (verw_clear_cpu_buf_mitigation_selected)
+ mmio_mitigation = MMIO_MITIGATION_VERW;
+
+ if (mmio_mitigation == MMIO_MITIGATION_VERW) {
+ /*
+ * Check if the system has the right microcode.
+ *
+ * CPU Fill buffer clear mitigation is enumerated by either an explicit
+ * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
+ * affected systems.
+ */
+ if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
+ (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
+ boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
+ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO))))
+ mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
+ }
+
+ pr_info("%s\n", mmio_strings[mmio_mitigation]);
+}
+
+static void __init mmio_apply_mitigation(void)
+{
+ if (mmio_mitigation == MMIO_MITIGATION_OFF)
+ return;
/*
- * X86_FEATURE_CLEAR_CPU_BUF could be enabled by other VERW based
- * mitigations, disable KVM-only mitigation in that case.
+ * Only enable the VMM mitigation if the CPU buffer clear mitigation is
+ * not being used.
*/
- if (boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
- static_branch_disable(&mmio_stale_data_clear);
- else
- static_branch_enable(&mmio_stale_data_clear);
+ if (verw_clear_cpu_buf_mitigation_selected) {
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ static_branch_disable(&cpu_buf_vm_clear);
+ } else {
+ static_branch_enable(&cpu_buf_vm_clear);
+ }
/*
* If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
@@ -462,21 +639,6 @@ static void __init mmio_select_mitigation(void)
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear);
- /*
- * Check if the system has the right microcode.
- *
- * CPU Fill buffer clear mitigation is enumerated by either an explicit
- * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
- * affected systems.
- */
- if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
- (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
- boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
- !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
- mmio_mitigation = MMIO_MITIGATION_VERW;
- else
- mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
-
if (mmio_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false);
}
@@ -511,22 +673,48 @@ static const char * const rfds_strings[] = {
[RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
};
+static inline bool __init verw_clears_cpu_reg_file(void)
+{
+ return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR);
+}
+
static void __init rfds_select_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) {
rfds_mitigation = RFDS_MITIGATION_OFF;
return;
}
+
+ if (rfds_mitigation == RFDS_MITIGATION_AUTO)
+ rfds_mitigation = RFDS_MITIGATION_VERW;
+
if (rfds_mitigation == RFDS_MITIGATION_OFF)
return;
- if (rfds_mitigation == RFDS_MITIGATION_AUTO)
+ if (verw_clears_cpu_reg_file())
+ verw_clear_cpu_buf_mitigation_selected = true;
+}
+
+static void __init rfds_update_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off())
+ return;
+
+ if (verw_clear_cpu_buf_mitigation_selected)
rfds_mitigation = RFDS_MITIGATION_VERW;
- if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
+ if (rfds_mitigation == RFDS_MITIGATION_VERW) {
+ if (!verw_clears_cpu_reg_file())
+ rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
+ }
+
+ pr_info("%s\n", rfds_strings[rfds_mitigation]);
+}
+
+static void __init rfds_apply_mitigation(void)
+{
+ if (rfds_mitigation == RFDS_MITIGATION_VERW)
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
- else
- rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
}
static __init int rfds_parse_cmdline(char *str)
@@ -547,76 +735,11 @@ static __init int rfds_parse_cmdline(char *str)
early_param("reg_file_data_sampling", rfds_parse_cmdline);
#undef pr_fmt
-#define pr_fmt(fmt) "" fmt
-
-static void __init md_clear_update_mitigation(void)
-{
- if (cpu_mitigations_off())
- return;
-
- if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
- goto out;
-
- /*
- * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
- * Stale Data mitigation, if necessary.
- */
- if (mds_mitigation == MDS_MITIGATION_OFF &&
- boot_cpu_has_bug(X86_BUG_MDS)) {
- mds_mitigation = MDS_MITIGATION_FULL;
- mds_select_mitigation();
- }
- if (taa_mitigation == TAA_MITIGATION_OFF &&
- boot_cpu_has_bug(X86_BUG_TAA)) {
- taa_mitigation = TAA_MITIGATION_VERW;
- taa_select_mitigation();
- }
- /*
- * MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear
- * gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state.
- */
- if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
- mmio_mitigation = MMIO_MITIGATION_VERW;
- mmio_select_mitigation();
- }
- if (rfds_mitigation == RFDS_MITIGATION_OFF &&
- boot_cpu_has_bug(X86_BUG_RFDS)) {
- rfds_mitigation = RFDS_MITIGATION_VERW;
- rfds_select_mitigation();
- }
-out:
- if (boot_cpu_has_bug(X86_BUG_MDS))
- pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
- if (boot_cpu_has_bug(X86_BUG_TAA))
- pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
- if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
- pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
- else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
- pr_info("MMIO Stale Data: Unknown: No mitigations\n");
- if (boot_cpu_has_bug(X86_BUG_RFDS))
- pr_info("Register File Data Sampling: %s\n", rfds_strings[rfds_mitigation]);
-}
-
-static void __init md_clear_select_mitigation(void)
-{
- mds_select_mitigation();
- taa_select_mitigation();
- mmio_select_mitigation();
- rfds_select_mitigation();
-
- /*
- * As these mitigations are inter-related and rely on VERW instruction
- * to clear the microarchitural buffers, update and print their status
- * after mitigation selection is done for each of these vulnerabilities.
- */
- md_clear_update_mitigation();
-}
-
-#undef pr_fmt
#define pr_fmt(fmt) "SRBDS: " fmt
enum srbds_mitigations {
SRBDS_MITIGATION_OFF,
+ SRBDS_MITIGATION_AUTO,
SRBDS_MITIGATION_UCODE_NEEDED,
SRBDS_MITIGATION_FULL,
SRBDS_MITIGATION_TSX_OFF,
@@ -624,7 +747,7 @@ enum srbds_mitigations {
};
static enum srbds_mitigations srbds_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_FULL : SRBDS_MITIGATION_OFF;
+ IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF;
static const char * const srbds_strings[] = {
[SRBDS_MITIGATION_OFF] = "Vulnerable",
@@ -656,7 +779,7 @@ void update_srbds_msr(void)
if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
return;
- rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+ rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
switch (srbds_mitigation) {
case SRBDS_MITIGATION_OFF:
@@ -670,13 +793,18 @@ void update_srbds_msr(void)
break;
}
- wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+ wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
}
static void __init srbds_select_mitigation(void)
{
- if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS) || cpu_mitigations_off()) {
+ srbds_mitigation = SRBDS_MITIGATION_OFF;
return;
+ }
+
+ if (srbds_mitigation == SRBDS_MITIGATION_AUTO)
+ srbds_mitigation = SRBDS_MITIGATION_FULL;
/*
* Check to see if this is one of the MDS_NO systems supporting TSX that
@@ -690,13 +818,17 @@ static void __init srbds_select_mitigation(void)
srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
- else if (cpu_mitigations_off() || srbds_off)
+ else if (srbds_off)
srbds_mitigation = SRBDS_MITIGATION_OFF;
- update_srbds_msr();
pr_info("%s\n", srbds_strings[srbds_mitigation]);
}
+static void __init srbds_apply_mitigation(void)
+{
+ update_srbds_msr();
+}
+
static int __init srbds_parse_cmdline(char *str)
{
if (!str)
@@ -743,6 +875,7 @@ early_param("l1d_flush", l1d_flush_parse_cmdline);
enum gds_mitigations {
GDS_MITIGATION_OFF,
+ GDS_MITIGATION_AUTO,
GDS_MITIGATION_UCODE_NEEDED,
GDS_MITIGATION_FORCE,
GDS_MITIGATION_FULL,
@@ -751,7 +884,7 @@ enum gds_mitigations {
};
static enum gds_mitigations gds_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_FULL : GDS_MITIGATION_OFF;
+ IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF;
static const char * const gds_strings[] = {
[GDS_MITIGATION_OFF] = "Vulnerable",
@@ -776,7 +909,7 @@ void update_gds_msr(void)
switch (gds_mitigation) {
case GDS_MITIGATION_OFF:
- rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+ rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
mcu_ctrl |= GDS_MITG_DIS;
break;
case GDS_MITIGATION_FULL_LOCKED:
@@ -786,23 +919,24 @@ void update_gds_msr(void)
* CPUs.
*/
case GDS_MITIGATION_FULL:
- rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+ rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
mcu_ctrl &= ~GDS_MITG_DIS;
break;
case GDS_MITIGATION_FORCE:
case GDS_MITIGATION_UCODE_NEEDED:
case GDS_MITIGATION_HYPERVISOR:
+ case GDS_MITIGATION_AUTO:
return;
}
- wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+ wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
/*
* Check to make sure that the WRMSR value was not ignored. Writes to
* GDS_MITG_DIS will be ignored if this processor is locked but the boot
* processor was not.
*/
- rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
+ rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
}
@@ -815,33 +949,28 @@ static void __init gds_select_mitigation(void)
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
gds_mitigation = GDS_MITIGATION_HYPERVISOR;
- goto out;
+ return;
}
if (cpu_mitigations_off())
gds_mitigation = GDS_MITIGATION_OFF;
/* Will verify below that mitigation _can_ be disabled */
+ if (gds_mitigation == GDS_MITIGATION_AUTO)
+ gds_mitigation = GDS_MITIGATION_FULL;
+
/* No microcode */
if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
- if (gds_mitigation == GDS_MITIGATION_FORCE) {
- /*
- * This only needs to be done on the boot CPU so do it
- * here rather than in update_gds_msr()
- */
- setup_clear_cpu_cap(X86_FEATURE_AVX);
- pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
- } else {
+ if (gds_mitigation != GDS_MITIGATION_FORCE)
gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
- }
- goto out;
+ return;
}
/* Microcode has mitigation, use it */
if (gds_mitigation == GDS_MITIGATION_FORCE)
gds_mitigation = GDS_MITIGATION_FULL;
- rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+ rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
if (mcu_ctrl & GDS_MITG_LOCKED) {
if (gds_mitigation == GDS_MITIGATION_OFF)
pr_warn("Mitigation locked. Disable failed.\n");
@@ -855,9 +984,25 @@ static void __init gds_select_mitigation(void)
*/
gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
}
+}
+
+static void __init gds_apply_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_GDS))
+ return;
+
+ /* Microcode is present */
+ if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)
+ update_gds_msr();
+ else if (gds_mitigation == GDS_MITIGATION_FORCE) {
+ /*
+ * This only needs to be done on the boot CPU so do it
+ * here rather than in update_gds_msr()
+ */
+ setup_clear_cpu_cap(X86_FEATURE_AVX);
+ pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
+ }
- update_gds_msr();
-out:
pr_info("%s\n", gds_strings[gds_mitigation]);
}
@@ -918,10 +1063,14 @@ static bool smap_works_speculatively(void)
static void __init spectre_v1_select_mitigation(void)
{
- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off())
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
+}
+
+static void __init spectre_v1_apply_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off())
return;
- }
if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
/*
@@ -974,8 +1123,20 @@ enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
#undef pr_fmt
#define pr_fmt(fmt) "RETBleed: " fmt
+enum its_mitigation {
+ ITS_MITIGATION_OFF,
+ ITS_MITIGATION_AUTO,
+ ITS_MITIGATION_VMEXIT_ONLY,
+ ITS_MITIGATION_ALIGNED_THUNKS,
+ ITS_MITIGATION_RETPOLINE_STUFF,
+};
+
+static enum its_mitigation its_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF;
+
enum retbleed_mitigation {
RETBLEED_MITIGATION_NONE,
+ RETBLEED_MITIGATION_AUTO,
RETBLEED_MITIGATION_UNRET,
RETBLEED_MITIGATION_IBPB,
RETBLEED_MITIGATION_IBRS,
@@ -983,14 +1144,6 @@ enum retbleed_mitigation {
RETBLEED_MITIGATION_STUFF,
};
-enum retbleed_mitigation_cmd {
- RETBLEED_CMD_OFF,
- RETBLEED_CMD_AUTO,
- RETBLEED_CMD_UNRET,
- RETBLEED_CMD_IBPB,
- RETBLEED_CMD_STUFF,
-};
-
static const char * const retbleed_strings[] = {
[RETBLEED_MITIGATION_NONE] = "Vulnerable",
[RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
@@ -1001,9 +1154,7 @@ static const char * const retbleed_strings[] = {
};
static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
- RETBLEED_MITIGATION_NONE;
-static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_CMD_AUTO : RETBLEED_CMD_OFF;
+ IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE;
static int __ro_after_init retbleed_nosmt = false;
@@ -1020,15 +1171,15 @@ static int __init retbleed_parse_cmdline(char *str)
}
if (!strcmp(str, "off")) {
- retbleed_cmd = RETBLEED_CMD_OFF;
+ retbleed_mitigation = RETBLEED_MITIGATION_NONE;
} else if (!strcmp(str, "auto")) {
- retbleed_cmd = RETBLEED_CMD_AUTO;
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
} else if (!strcmp(str, "unret")) {
- retbleed_cmd = RETBLEED_CMD_UNRET;
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
} else if (!strcmp(str, "ibpb")) {
- retbleed_cmd = RETBLEED_CMD_IBPB;
+ retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
} else if (!strcmp(str, "stuff")) {
- retbleed_cmd = RETBLEED_CMD_STUFF;
+ retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
} else if (!strcmp(str, "nosmt")) {
retbleed_nosmt = true;
} else if (!strcmp(str, "force")) {
@@ -1049,77 +1200,122 @@ early_param("retbleed", retbleed_parse_cmdline);
static void __init retbleed_select_mitigation(void)
{
- bool mitigate_smt = false;
-
- if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
- return;
-
- switch (retbleed_cmd) {
- case RETBLEED_CMD_OFF:
+ if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) {
+ retbleed_mitigation = RETBLEED_MITIGATION_NONE;
return;
+ }
- case RETBLEED_CMD_UNRET:
- if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
- retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
- } else {
+ switch (retbleed_mitigation) {
+ case RETBLEED_MITIGATION_UNRET:
+ if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
- goto do_cmd_auto;
}
break;
-
- case RETBLEED_CMD_IBPB:
+ case RETBLEED_MITIGATION_IBPB:
if (!boot_cpu_has(X86_FEATURE_IBPB)) {
pr_err("WARNING: CPU does not support IBPB.\n");
- goto do_cmd_auto;
- } else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
- retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
- } else {
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
+ } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
- goto do_cmd_auto;
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
}
break;
+ case RETBLEED_MITIGATION_STUFF:
+ if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
+ pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
+ } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
+ pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n");
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
+ }
+ break;
+ default:
+ break;
+ }
- case RETBLEED_CMD_STUFF:
- if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) &&
- spectre_v2_enabled == SPECTRE_V2_RETPOLINE) {
- retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
+ if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO)
+ return;
+ /* Intel mitigation selected in retbleed_update_mitigation() */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+ if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
+ boot_cpu_has(X86_FEATURE_IBPB))
+ retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
+ else
+ retbleed_mitigation = RETBLEED_MITIGATION_NONE;
+ }
+}
+
+static void __init retbleed_update_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
+ return;
+
+ if (retbleed_mitigation == RETBLEED_MITIGATION_NONE)
+ goto out;
+
+ /*
+ * retbleed=stuff is only allowed on Intel. If stuffing can't be used
+ * then a different mitigation will be selected below.
+ *
+ * its=stuff will also attempt to enable stuffing.
+ */
+ if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF ||
+ its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF) {
+ if (spectre_v2_enabled != SPECTRE_V2_RETPOLINE) {
+ pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n");
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
} else {
- if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING))
- pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n");
- else
- pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
+ if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
+ pr_info("Retbleed mitigation updated to stuffing\n");
- goto do_cmd_auto;
+ retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
}
- break;
-
-do_cmd_auto:
- case RETBLEED_CMD_AUTO:
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
- if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
- retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
- else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
- boot_cpu_has(X86_FEATURE_IBPB))
- retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
+ }
+ /*
+ * Let IBRS trump all on Intel without affecting the effects of the
+ * retbleed= cmdline option except for call depth based stuffing
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+ switch (spectre_v2_enabled) {
+ case SPECTRE_V2_IBRS:
+ retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
+ break;
+ case SPECTRE_V2_EIBRS:
+ case SPECTRE_V2_EIBRS_RETPOLINE:
+ case SPECTRE_V2_EIBRS_LFENCE:
+ retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
+ break;
+ default:
+ if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
+ pr_err(RETBLEED_INTEL_MSG);
}
+ /* If nothing has set the mitigation yet, default to NONE. */
+ if (retbleed_mitigation == RETBLEED_MITIGATION_AUTO)
+ retbleed_mitigation = RETBLEED_MITIGATION_NONE;
+ }
+out:
+ pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
+}
- /*
- * The Intel mitigation (IBRS or eIBRS) was already selected in
- * spectre_v2_select_mitigation(). 'retbleed_mitigation' will
- * be set accordingly below.
- */
- break;
- }
+static void __init retbleed_apply_mitigation(void)
+{
+ bool mitigate_smt = false;
switch (retbleed_mitigation) {
+ case RETBLEED_MITIGATION_NONE:
+ return;
+
case RETBLEED_MITIGATION_UNRET:
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
- x86_return_thunk = retbleed_return_thunk;
+ set_return_thunk(retbleed_return_thunk);
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
@@ -1142,7 +1338,7 @@ do_cmd_auto:
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
@@ -1154,7 +1350,7 @@ do_cmd_auto:
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
- x86_return_thunk = call_depth_return_thunk;
+ set_return_thunk(call_depth_return_thunk);
break;
default:
@@ -1164,28 +1360,131 @@ do_cmd_auto:
if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
(retbleed_nosmt || cpu_mitigations_auto_nosmt()))
cpu_smt_disable(false);
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt) "ITS: " fmt
+
+static const char * const its_strings[] = {
+ [ITS_MITIGATION_OFF] = "Vulnerable",
+ [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
+ [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
+ [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
+};
+
+static int __init its_parse_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
+ pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
+ return 0;
+ }
+
+ if (!strcmp(str, "off")) {
+ its_mitigation = ITS_MITIGATION_OFF;
+ } else if (!strcmp(str, "on")) {
+ its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
+ } else if (!strcmp(str, "force")) {
+ its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
+ setup_force_cpu_bug(X86_BUG_ITS);
+ } else if (!strcmp(str, "vmexit")) {
+ its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
+ } else if (!strcmp(str, "stuff")) {
+ its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
+ } else {
+ pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
+ }
+
+ return 0;
+}
+early_param("indirect_target_selection", its_parse_cmdline);
+
+static void __init its_select_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) {
+ its_mitigation = ITS_MITIGATION_OFF;
+ return;
+ }
+
+ if (its_mitigation == ITS_MITIGATION_AUTO)
+ its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
+
+ if (its_mitigation == ITS_MITIGATION_OFF)
+ return;
+
+ if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
+ !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
+ pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
+ its_mitigation = ITS_MITIGATION_OFF;
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
+ pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
+ its_mitigation = ITS_MITIGATION_OFF;
+ return;
+ }
+
+ if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
+ !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
+ pr_err("RSB stuff mitigation not supported, using default\n");
+ its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
+ }
+
+ if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY &&
+ !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY))
+ its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
+}
+
+static void __init its_update_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off())
+ return;
+
+ switch (spectre_v2_enabled) {
+ case SPECTRE_V2_NONE:
+ pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
+ its_mitigation = ITS_MITIGATION_OFF;
+ break;
+ case SPECTRE_V2_RETPOLINE:
+ /* Retpoline+CDT mitigates ITS */
+ if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF)
+ its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
+ break;
+ case SPECTRE_V2_LFENCE:
+ case SPECTRE_V2_EIBRS_LFENCE:
+ pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
+ its_mitigation = ITS_MITIGATION_OFF;
+ break;
+ default:
+ break;
+ }
/*
- * Let IBRS trump all on Intel without affecting the effects of the
- * retbleed= cmdline option except for call depth based stuffing
+ * retbleed_update_mitigation() will try to do stuffing if its=stuff.
+ * If it can't, such as if spectre_v2!=retpoline, then fall back to
+ * aligned thunks.
*/
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
- switch (spectre_v2_enabled) {
- case SPECTRE_V2_IBRS:
- retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
- break;
- case SPECTRE_V2_EIBRS:
- case SPECTRE_V2_EIBRS_RETPOLINE:
- case SPECTRE_V2_EIBRS_LFENCE:
- retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
- break;
- default:
- if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
- pr_err(RETBLEED_INTEL_MSG);
- }
- }
+ if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
+ retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
+ its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
- pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
+ pr_info("%s\n", its_strings[its_mitigation]);
+}
+
+static void __init its_apply_mitigation(void)
+{
+ /* its=stuff forces retbleed stuffing and is enabled there. */
+ if (its_mitigation != ITS_MITIGATION_ALIGNED_THUNKS)
+ return;
+
+ if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
+ setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
+
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ set_return_thunk(its_return_thunk);
}
#undef pr_fmt
@@ -1265,6 +1564,8 @@ enum spectre_v2_mitigation_cmd {
SPECTRE_V2_CMD_IBRS,
};
+static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = SPECTRE_V2_CMD_AUTO;
+
enum spectre_v2_user_cmd {
SPECTRE_V2_USER_CMD_NONE,
SPECTRE_V2_USER_CMD_AUTO,
@@ -1303,31 +1604,18 @@ static void __init spec_v2_user_print_cond(const char *reason, bool secure)
pr_info("spectre_v2_user=%s forced on command line.\n", reason);
}
-static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
-
-static enum spectre_v2_user_cmd __init
-spectre_v2_parse_user_cmdline(void)
+static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void)
{
- enum spectre_v2_user_cmd mode;
char arg[20];
int ret, i;
- mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ?
- SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE;
-
- switch (spectre_v2_cmd) {
- case SPECTRE_V2_CMD_NONE:
+ if (cpu_mitigations_off() || !IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2))
return SPECTRE_V2_USER_CMD_NONE;
- case SPECTRE_V2_CMD_FORCE:
- return SPECTRE_V2_USER_CMD_FORCE;
- default:
- break;
- }
ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
arg, sizeof(arg));
if (ret < 0)
- return mode;
+ return SPECTRE_V2_USER_CMD_AUTO;
for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
if (match_option(arg, ret, v2_user_options[i].option)) {
@@ -1338,7 +1626,7 @@ spectre_v2_parse_user_cmdline(void)
}
pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
- return mode;
+ return SPECTRE_V2_USER_CMD_AUTO;
}
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
@@ -1346,60 +1634,72 @@ static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
}
-static void __init
-spectre_v2_user_select_mitigation(void)
+static void __init spectre_v2_user_select_mitigation(void)
{
- enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
- enum spectre_v2_user_cmd cmd;
-
if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
return;
- cmd = spectre_v2_parse_user_cmdline();
- switch (cmd) {
+ switch (spectre_v2_parse_user_cmdline()) {
case SPECTRE_V2_USER_CMD_NONE:
- goto set_mode;
+ return;
case SPECTRE_V2_USER_CMD_FORCE:
- mode = SPECTRE_V2_USER_STRICT;
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
break;
case SPECTRE_V2_USER_CMD_AUTO:
case SPECTRE_V2_USER_CMD_PRCTL:
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
+ break;
case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
- mode = SPECTRE_V2_USER_PRCTL;
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
break;
case SPECTRE_V2_USER_CMD_SECCOMP:
+ if (IS_ENABLED(CONFIG_SECCOMP))
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP;
+ else
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
+ spectre_v2_user_stibp = spectre_v2_user_ibpb;
+ break;
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
if (IS_ENABLED(CONFIG_SECCOMP))
- mode = SPECTRE_V2_USER_SECCOMP;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP;
else
- mode = SPECTRE_V2_USER_PRCTL;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
break;
}
- /* Initialize Indirect Branch Prediction Barrier */
- if (boot_cpu_has(X86_FEATURE_IBPB)) {
- static_branch_enable(&switch_vcpu_ibpb);
+ /*
+ * At this point, an STIBP mode other than "off" has been set.
+ * If STIBP support is not being forced, check if STIBP always-on
+ * is preferred.
+ */
+ if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) &&
+ boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
+ spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
- spectre_v2_user_ibpb = mode;
- switch (cmd) {
- case SPECTRE_V2_USER_CMD_NONE:
- break;
- case SPECTRE_V2_USER_CMD_FORCE:
- case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
- case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
- static_branch_enable(&switch_mm_always_ibpb);
- spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
- break;
- case SPECTRE_V2_USER_CMD_PRCTL:
- case SPECTRE_V2_USER_CMD_AUTO:
- case SPECTRE_V2_USER_CMD_SECCOMP:
- static_branch_enable(&switch_mm_cond_ibpb);
- break;
- }
+ if (!boot_cpu_has(X86_FEATURE_IBPB))
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
- pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
- static_key_enabled(&switch_mm_always_ibpb) ?
- "always-on" : "conditional");
+ if (!boot_cpu_has(X86_FEATURE_STIBP))
+ spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
+}
+
+static void __init spectre_v2_user_update_mitigation(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
+ return;
+
+ /* The spectre_v2 cmd line can override spectre_v2_user options */
+ if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) {
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
+ } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) {
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
}
/*
@@ -1417,30 +1717,44 @@ spectre_v2_user_select_mitigation(void)
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
!cpu_smt_possible() ||
(spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
- !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
+ !boot_cpu_has(X86_FEATURE_AUTOIBRS))) {
+ spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
return;
+ }
- /*
- * At this point, an STIBP mode other than "off" has been set.
- * If STIBP support is not being forced, check if STIBP always-on
- * is preferred.
- */
- if (mode != SPECTRE_V2_USER_STRICT &&
- boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
- mode = SPECTRE_V2_USER_STRICT_PREFERRED;
-
- if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
- retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
- if (mode != SPECTRE_V2_USER_STRICT &&
- mode != SPECTRE_V2_USER_STRICT_PREFERRED)
+ if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE &&
+ (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
+ retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) {
+ if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT &&
+ spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED)
pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
- mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
}
+ pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]);
+}
- spectre_v2_user_stibp = mode;
+static void __init spectre_v2_user_apply_mitigation(void)
+{
+ /* Initialize Indirect Branch Prediction Barrier */
+ if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) {
+ static_branch_enable(&switch_vcpu_ibpb);
+
+ switch (spectre_v2_user_ibpb) {
+ case SPECTRE_V2_USER_STRICT:
+ static_branch_enable(&switch_mm_always_ibpb);
+ break;
+ case SPECTRE_V2_USER_PRCTL:
+ case SPECTRE_V2_USER_SECCOMP:
+ static_branch_enable(&switch_mm_cond_ibpb);
+ break;
+ default:
+ break;
+ }
-set_mode:
- pr_info("%s\n", spectre_v2_user_strings[mode]);
+ pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
+ static_key_enabled(&switch_mm_always_ibpb) ?
+ "always-on" : "conditional");
+ }
}
static const char * const spectre_v2_strings[] = {
@@ -1592,51 +1906,54 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
rrsba_disabled = true;
}
-static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
+static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
{
/*
- * Similar to context switches, there are two types of RSB attacks
- * after VM exit:
+ * WARNING! There are many subtleties to consider when changing *any*
+ * code related to RSB-related mitigations. Before doing so, carefully
+ * read the following document, and update if necessary:
*
- * 1) RSB underflow
+ * Documentation/admin-guide/hw-vuln/rsb.rst
*
- * 2) Poisoned RSB entry
+ * In an overly simplified nutshell:
*
- * When retpoline is enabled, both are mitigated by filling/clearing
- * the RSB.
+ * - User->user RSB attacks are conditionally mitigated during
+ * context switches by cond_mitigation -> write_ibpb().
*
- * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
- * prediction isolation protections, RSB still needs to be cleared
- * because of #2. Note that SMEP provides no protection here, unlike
- * user-space-poisoned RSB entries.
+ * - User->kernel and guest->host attacks are mitigated by eIBRS or
+ * RSB filling.
*
- * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
- * bug is present then a LITE version of RSB protection is required,
- * just a single call needs to retire before a RET is executed.
+ * Though, depending on config, note that other alternative
+ * mitigations may end up getting used instead, e.g., IBPB on
+ * entry/vmexit, call depth tracking, or return thunks.
*/
+
switch (mode) {
case SPECTRE_V2_NONE:
- return;
+ break;
- case SPECTRE_V2_EIBRS_LFENCE:
case SPECTRE_V2_EIBRS:
+ case SPECTRE_V2_EIBRS_LFENCE:
+ case SPECTRE_V2_EIBRS_RETPOLINE:
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
- setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
+ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
}
- return;
+ break;
- case SPECTRE_V2_EIBRS_RETPOLINE:
case SPECTRE_V2_RETPOLINE:
case SPECTRE_V2_LFENCE:
case SPECTRE_V2_IBRS:
+ pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
- pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
- return;
- }
+ break;
- pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
- dump_stack();
+ default:
+ pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
+ dump_stack();
+ break;
+ }
}
/*
@@ -1657,12 +1974,13 @@ static bool __init spec_ctrl_bhi_dis(void)
enum bhi_mitigations {
BHI_MITIGATION_OFF,
+ BHI_MITIGATION_AUTO,
BHI_MITIGATION_ON,
BHI_MITIGATION_VMEXIT_ONLY,
};
static enum bhi_mitigations bhi_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF;
+ IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF;
static int __init spectre_bhi_parse_cmdline(char *str)
{
@@ -1684,6 +2002,25 @@ early_param("spectre_bhi", spectre_bhi_parse_cmdline);
static void __init bhi_select_mitigation(void)
{
+ if (!boot_cpu_has(X86_BUG_BHI) || cpu_mitigations_off())
+ bhi_mitigation = BHI_MITIGATION_OFF;
+
+ if (bhi_mitigation == BHI_MITIGATION_AUTO)
+ bhi_mitigation = BHI_MITIGATION_ON;
+}
+
+static void __init bhi_update_mitigation(void)
+{
+ if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE)
+ bhi_mitigation = BHI_MITIGATION_OFF;
+
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
+ spectre_v2_cmd == SPECTRE_V2_CMD_AUTO)
+ bhi_mitigation = BHI_MITIGATION_OFF;
+}
+
+static void __init bhi_apply_mitigation(void)
+{
if (bhi_mitigation == BHI_MITIGATION_OFF)
return;
@@ -1695,95 +2032,101 @@ static void __init bhi_select_mitigation(void)
return;
}
- /* Mitigate in hardware if supported */
- if (spec_ctrl_bhi_dis())
+ if (!IS_ENABLED(CONFIG_X86_64))
return;
- if (!IS_ENABLED(CONFIG_X86_64))
+ /* Mitigate in hardware if supported */
+ if (spec_ctrl_bhi_dis())
return;
if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n");
- setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
return;
}
pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n");
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
- setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
}
static void __init spectre_v2_select_mitigation(void)
{
- enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
- enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
+ spectre_v2_cmd = spectre_v2_parse_cmdline();
- /*
- * If the CPU is not affected and the command line mode is NONE or AUTO
- * then nothing to do.
- */
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
- (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
+ (spectre_v2_cmd == SPECTRE_V2_CMD_NONE || spectre_v2_cmd == SPECTRE_V2_CMD_AUTO))
return;
- switch (cmd) {
+ switch (spectre_v2_cmd) {
case SPECTRE_V2_CMD_NONE:
return;
case SPECTRE_V2_CMD_FORCE:
case SPECTRE_V2_CMD_AUTO:
if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
- mode = SPECTRE_V2_EIBRS;
- break;
- }
-
- if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
- boot_cpu_has_bug(X86_BUG_RETBLEED) &&
- retbleed_cmd != RETBLEED_CMD_OFF &&
- retbleed_cmd != RETBLEED_CMD_STUFF &&
- boot_cpu_has(X86_FEATURE_IBRS) &&
- boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
- mode = SPECTRE_V2_IBRS;
+ spectre_v2_enabled = SPECTRE_V2_EIBRS;
break;
}
- mode = spectre_v2_select_retpoline();
+ spectre_v2_enabled = spectre_v2_select_retpoline();
break;
case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
pr_err(SPECTRE_V2_LFENCE_MSG);
- mode = SPECTRE_V2_LFENCE;
+ spectre_v2_enabled = SPECTRE_V2_LFENCE;
break;
case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
- mode = SPECTRE_V2_RETPOLINE;
+ spectre_v2_enabled = SPECTRE_V2_RETPOLINE;
break;
case SPECTRE_V2_CMD_RETPOLINE:
- mode = spectre_v2_select_retpoline();
+ spectre_v2_enabled = spectre_v2_select_retpoline();
break;
case SPECTRE_V2_CMD_IBRS:
- mode = SPECTRE_V2_IBRS;
+ spectre_v2_enabled = SPECTRE_V2_IBRS;
break;
case SPECTRE_V2_CMD_EIBRS:
- mode = SPECTRE_V2_EIBRS;
+ spectre_v2_enabled = SPECTRE_V2_EIBRS;
break;
case SPECTRE_V2_CMD_EIBRS_LFENCE:
- mode = SPECTRE_V2_EIBRS_LFENCE;
+ spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE;
break;
case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
- mode = SPECTRE_V2_EIBRS_RETPOLINE;
+ spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE;
break;
}
+}
+
+static void __init spectre_v2_update_mitigation(void)
+{
+ if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO &&
+ !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) {
+ if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
+ boot_cpu_has_bug(X86_BUG_RETBLEED) &&
+ retbleed_mitigation != RETBLEED_MITIGATION_NONE &&
+ retbleed_mitigation != RETBLEED_MITIGATION_STUFF &&
+ boot_cpu_has(X86_FEATURE_IBRS) &&
+ boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+ spectre_v2_enabled = SPECTRE_V2_IBRS;
+ }
+ }
- if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
+ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && !cpu_mitigations_off())
+ pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]);
+}
+
+static void __init spectre_v2_apply_mitigation(void)
+{
+ if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
- if (spectre_v2_in_ibrs_mode(mode)) {
+ if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
} else {
@@ -1792,8 +2135,10 @@ static void __init spectre_v2_select_mitigation(void)
}
}
- switch (mode) {
+ switch (spectre_v2_enabled) {
case SPECTRE_V2_NONE:
+ return;
+
case SPECTRE_V2_EIBRS:
break;
@@ -1819,59 +2164,12 @@ static void __init spectre_v2_select_mitigation(void)
* JMPs gets protection against BHI and Intramode-BTI, but RET
* prediction from a non-RSB predictor is still a risk.
*/
- if (mode == SPECTRE_V2_EIBRS_LFENCE ||
- mode == SPECTRE_V2_EIBRS_RETPOLINE ||
- mode == SPECTRE_V2_RETPOLINE)
+ if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE ||
+ spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE ||
+ spectre_v2_enabled == SPECTRE_V2_RETPOLINE)
spec_ctrl_disable_kernel_rrsba();
- if (boot_cpu_has(X86_BUG_BHI))
- bhi_select_mitigation();
-
- spectre_v2_enabled = mode;
- pr_info("%s\n", spectre_v2_strings[mode]);
-
- /*
- * If Spectre v2 protection has been enabled, fill the RSB during a
- * context switch. In general there are two types of RSB attacks
- * across context switches, for which the CALLs/RETs may be unbalanced.
- *
- * 1) RSB underflow
- *
- * Some Intel parts have "bottomless RSB". When the RSB is empty,
- * speculated return targets may come from the branch predictor,
- * which could have a user-poisoned BTB or BHB entry.
- *
- * AMD has it even worse: *all* returns are speculated from the BTB,
- * regardless of the state of the RSB.
- *
- * When IBRS or eIBRS is enabled, the "user -> kernel" attack
- * scenario is mitigated by the IBRS branch prediction isolation
- * properties, so the RSB buffer filling wouldn't be necessary to
- * protect against this type of attack.
- *
- * The "user -> user" attack scenario is mitigated by RSB filling.
- *
- * 2) Poisoned RSB entry
- *
- * If the 'next' in-kernel return stack is shorter than 'prev',
- * 'next' could be tricked into speculating with a user-poisoned RSB
- * entry.
- *
- * The "user -> kernel" attack scenario is mitigated by SMEP and
- * eIBRS.
- *
- * The "user -> user" scenario, also known as SpectreBHB, requires
- * RSB clearing.
- *
- * So to mitigate all cases, unconditionally fill RSB on context
- * switches.
- *
- * FIXME: Is this pointless for retbleed-affected AMD?
- */
- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
- pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
-
- spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
+ spectre_v2_select_rsb_mitigation(spectre_v2_enabled);
/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
@@ -1879,28 +2177,26 @@ static void __init spectre_v2_select_mitigation(void)
* firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
* otherwise enabled.
*
- * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
- * the user might select retpoline on the kernel command line and if
- * the CPU supports Enhanced IBRS, kernel might un-intentionally not
- * enable IBRS around firmware calls.
+ * Use "spectre_v2_enabled" to check Enhanced IBRS instead of
+ * boot_cpu_has(), because the user might select retpoline on the kernel
+ * command line and if the CPU supports Enhanced IBRS, kernel might
+ * un-intentionally not enable IBRS around firmware calls.
*/
if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
boot_cpu_has(X86_FEATURE_IBPB) &&
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
- if (retbleed_cmd != RETBLEED_CMD_IBPB) {
+ if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
pr_info("Enabling Speculation Barrier for firmware calls\n");
}
- } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
+ } else if (boot_cpu_has(X86_FEATURE_IBRS) &&
+ !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
-
- /* Set up IBPB and STIBP depending on the general spectre V2 command */
- spectre_v2_cmd = cmd;
}
static void update_stibp_msr(void * __unused)
@@ -2089,19 +2385,18 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
return cmd;
}
-static enum ssb_mitigation __init __ssb_select_mitigation(void)
+static void __init ssb_select_mitigation(void)
{
- enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
enum ssb_mitigation_cmd cmd;
if (!boot_cpu_has(X86_FEATURE_SSBD))
- return mode;
+ goto out;
cmd = ssb_parse_cmdline();
if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
(cmd == SPEC_STORE_BYPASS_CMD_NONE ||
cmd == SPEC_STORE_BYPASS_CMD_AUTO))
- return mode;
+ return;
switch (cmd) {
case SPEC_STORE_BYPASS_CMD_SECCOMP:
@@ -2110,28 +2405,35 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
* enabled.
*/
if (IS_ENABLED(CONFIG_SECCOMP))
- mode = SPEC_STORE_BYPASS_SECCOMP;
+ ssb_mode = SPEC_STORE_BYPASS_SECCOMP;
else
- mode = SPEC_STORE_BYPASS_PRCTL;
+ ssb_mode = SPEC_STORE_BYPASS_PRCTL;
break;
case SPEC_STORE_BYPASS_CMD_ON:
- mode = SPEC_STORE_BYPASS_DISABLE;
+ ssb_mode = SPEC_STORE_BYPASS_DISABLE;
break;
case SPEC_STORE_BYPASS_CMD_AUTO:
case SPEC_STORE_BYPASS_CMD_PRCTL:
- mode = SPEC_STORE_BYPASS_PRCTL;
+ ssb_mode = SPEC_STORE_BYPASS_PRCTL;
break;
case SPEC_STORE_BYPASS_CMD_NONE:
break;
}
+out:
+ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+ pr_info("%s\n", ssb_strings[ssb_mode]);
+}
+
+static void __init ssb_apply_mitigation(void)
+{
/*
* We have three CPU feature flags that are in play here:
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
* - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
*/
- if (mode == SPEC_STORE_BYPASS_DISABLE) {
+ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) {
setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
/*
* Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
@@ -2145,16 +2447,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
update_spec_ctrl(x86_spec_ctrl_base);
}
}
-
- return mode;
-}
-
-static void ssb_select_mitigation(void)
-{
- ssb_mode = __ssb_select_mitigation();
-
- if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
- pr_info("%s\n", ssb_strings[ssb_mode]);
}
#undef pr_fmt
@@ -2410,7 +2702,7 @@ EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
/* Default mitigation for L1TF-affected CPUs */
enum l1tf_mitigations l1tf_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_FLUSH : L1TF_MITIGATION_OFF;
+ IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF;
#if IS_ENABLED(CONFIG_KVM_INTEL)
EXPORT_SYMBOL_GPL(l1tf_mitigation);
#endif
@@ -2458,22 +2750,33 @@ static void override_cache_bits(struct cpuinfo_x86 *c)
static void __init l1tf_select_mitigation(void)
{
+ if (!boot_cpu_has_bug(X86_BUG_L1TF) || cpu_mitigations_off()) {
+ l1tf_mitigation = L1TF_MITIGATION_OFF;
+ return;
+ }
+
+ if (l1tf_mitigation == L1TF_MITIGATION_AUTO) {
+ if (cpu_mitigations_auto_nosmt())
+ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
+ else
+ l1tf_mitigation = L1TF_MITIGATION_FLUSH;
+ }
+}
+
+static void __init l1tf_apply_mitigation(void)
+{
u64 half_pa;
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return;
- if (cpu_mitigations_off())
- l1tf_mitigation = L1TF_MITIGATION_OFF;
- else if (cpu_mitigations_auto_nosmt())
- l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
-
override_cache_bits(&boot_cpu_data);
switch (l1tf_mitigation) {
case L1TF_MITIGATION_OFF:
case L1TF_MITIGATION_FLUSH_NOWARN:
case L1TF_MITIGATION_FLUSH:
+ case L1TF_MITIGATION_AUTO:
break;
case L1TF_MITIGATION_FLUSH_NOSMT:
case L1TF_MITIGATION_FULL:
@@ -2533,6 +2836,7 @@ early_param("l1tf", l1tf_cmdline);
enum srso_mitigation {
SRSO_MITIGATION_NONE,
+ SRSO_MITIGATION_AUTO,
SRSO_MITIGATION_UCODE_NEEDED,
SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
SRSO_MITIGATION_MICROCODE,
@@ -2542,14 +2846,6 @@ enum srso_mitigation {
SRSO_MITIGATION_BP_SPEC_REDUCE,
};
-enum srso_mitigation_cmd {
- SRSO_CMD_OFF,
- SRSO_CMD_MICROCODE,
- SRSO_CMD_SAFE_RET,
- SRSO_CMD_IBPB,
- SRSO_CMD_IBPB_ON_VMEXIT,
-};
-
static const char * const srso_strings[] = {
[SRSO_MITIGATION_NONE] = "Vulnerable",
[SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
@@ -2561,8 +2857,7 @@ static const char * const srso_strings[] = {
[SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
};
-static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
-static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET;
+static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
static int __init srso_parse_cmdline(char *str)
{
@@ -2570,15 +2865,15 @@ static int __init srso_parse_cmdline(char *str)
return -EINVAL;
if (!strcmp(str, "off"))
- srso_cmd = SRSO_CMD_OFF;
+ srso_mitigation = SRSO_MITIGATION_NONE;
else if (!strcmp(str, "microcode"))
- srso_cmd = SRSO_CMD_MICROCODE;
+ srso_mitigation = SRSO_MITIGATION_MICROCODE;
else if (!strcmp(str, "safe-ret"))
- srso_cmd = SRSO_CMD_SAFE_RET;
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
else if (!strcmp(str, "ibpb"))
- srso_cmd = SRSO_CMD_IBPB;
+ srso_mitigation = SRSO_MITIGATION_IBPB;
else if (!strcmp(str, "ibpb-vmexit"))
- srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT;
+ srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
else
pr_err("Ignoring unknown SRSO option (%s).", str);
@@ -2590,132 +2885,85 @@ early_param("spec_rstack_overflow", srso_parse_cmdline);
static void __init srso_select_mitigation(void)
{
- bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
+ bool has_microcode;
- if (!boot_cpu_has_bug(X86_BUG_SRSO) ||
- cpu_mitigations_off() ||
- srso_cmd == SRSO_CMD_OFF) {
- if (boot_cpu_has(X86_FEATURE_SBPB))
- x86_pred_cmd = PRED_CMD_SBPB;
- goto out;
- }
+ if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
+ srso_mitigation = SRSO_MITIGATION_NONE;
+
+ if (srso_mitigation == SRSO_MITIGATION_NONE)
+ return;
+
+ if (srso_mitigation == SRSO_MITIGATION_AUTO)
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
+ has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
if (has_microcode) {
/*
* Zen1/2 with SMT off aren't vulnerable after the right
* IBPB microcode has been applied.
- *
- * Zen1/2 don't have SBPB, no need to try to enable it here.
*/
if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
- goto out;
- }
-
- if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
- srso_mitigation = SRSO_MITIGATION_IBPB;
- goto out;
+ srso_mitigation = SRSO_MITIGATION_NONE;
+ return;
}
} else {
pr_warn("IBPB-extending microcode not applied!\n");
pr_warn(SRSO_NOTICE);
-
- /* may be overwritten by SRSO_CMD_SAFE_RET below */
- srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
}
- switch (srso_cmd) {
- case SRSO_CMD_MICROCODE:
- if (has_microcode) {
- srso_mitigation = SRSO_MITIGATION_MICROCODE;
- pr_warn(SRSO_NOTICE);
- }
- break;
-
- case SRSO_CMD_SAFE_RET:
- if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))
+ switch (srso_mitigation) {
+ case SRSO_MITIGATION_SAFE_RET:
+ if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) {
+ srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
goto ibpb_on_vmexit;
+ }
- if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
- /*
- * Enable the return thunk for generated code
- * like ftrace, static_call, etc.
- */
- setup_force_cpu_cap(X86_FEATURE_RETHUNK);
- setup_force_cpu_cap(X86_FEATURE_UNRET);
-
- if (boot_cpu_data.x86 == 0x19) {
- setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
- x86_return_thunk = srso_alias_return_thunk;
- } else {
- setup_force_cpu_cap(X86_FEATURE_SRSO);
- x86_return_thunk = srso_return_thunk;
- }
- if (has_microcode)
- srso_mitigation = SRSO_MITIGATION_SAFE_RET;
- else
- srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
- } else {
+ if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
+ srso_mitigation = SRSO_MITIGATION_NONE;
}
- break;
- case SRSO_CMD_IBPB:
- if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
- if (has_microcode) {
- setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
- setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
- srso_mitigation = SRSO_MITIGATION_IBPB;
-
- /*
- * IBPB on entry already obviates the need for
- * software-based untraining so clear those in case some
- * other mitigation like Retbleed has selected them.
- */
- setup_clear_cpu_cap(X86_FEATURE_UNRET);
- setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
-
- /*
- * There is no need for RSB filling: entry_ibpb() ensures
- * all predictions, including the RSB, are invalidated,
- * regardless of IBPB implementation.
- */
- setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
- }
- } else {
- pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
- }
+ if (!has_microcode)
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
break;
-
ibpb_on_vmexit:
- case SRSO_CMD_IBPB_ON_VMEXIT:
+ case SRSO_MITIGATION_IBPB_ON_VMEXIT:
if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
break;
}
-
- if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
- if (has_microcode) {
- setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
- srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
-
- /*
- * There is no need for RSB filling: entry_ibpb() ensures
- * all predictions, including the RSB, are invalidated,
- * regardless of IBPB implementation.
- */
- setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
- }
- } else {
+ fallthrough;
+ case SRSO_MITIGATION_IBPB:
+ if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
+ srso_mitigation = SRSO_MITIGATION_NONE;
}
+
+ if (!has_microcode)
+ srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
break;
default:
break;
}
+}
-out:
+static void __init srso_update_mitigation(void)
+{
+ /* If retbleed is using IBPB, that works for SRSO as well */
+ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB &&
+ boot_cpu_has(X86_FEATURE_IBPB_BRTYPE))
+ srso_mitigation = SRSO_MITIGATION_IBPB;
+
+ if (boot_cpu_has_bug(X86_BUG_SRSO) &&
+ !cpu_mitigations_off() &&
+ !boot_cpu_has(X86_FEATURE_SRSO_NO))
+ pr_info("%s\n", srso_strings[srso_mitigation]);
+}
+
+static void __init srso_apply_mitigation(void)
+{
/*
* Clear the feature flag if this mitigation is not selected as that
* feature flag controls the BpSpecReduce MSR bit toggling in KVM.
@@ -2723,8 +2971,52 @@ out:
if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
- if (srso_mitigation != SRSO_MITIGATION_NONE)
- pr_info("%s\n", srso_strings[srso_mitigation]);
+ if (srso_mitigation == SRSO_MITIGATION_NONE) {
+ if (boot_cpu_has(X86_FEATURE_SBPB))
+ x86_pred_cmd = PRED_CMD_SBPB;
+ return;
+ }
+
+ switch (srso_mitigation) {
+ case SRSO_MITIGATION_SAFE_RET:
+ case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
+ /*
+ * Enable the return thunk for generated code
+ * like ftrace, static_call, etc.
+ */
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ setup_force_cpu_cap(X86_FEATURE_UNRET);
+
+ if (boot_cpu_data.x86 == 0x19) {
+ setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+ set_return_thunk(srso_alias_return_thunk);
+ } else {
+ setup_force_cpu_cap(X86_FEATURE_SRSO);
+ set_return_thunk(srso_return_thunk);
+ }
+ break;
+ case SRSO_MITIGATION_IBPB:
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
+ /*
+ * IBPB on entry already obviates the need for
+ * software-based untraining so clear those in case some
+ * other mitigation like Retbleed has selected them.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_UNRET);
+ setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
+ fallthrough;
+ case SRSO_MITIGATION_IBPB_ON_VMEXIT:
+ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ /*
+ * There is no need for RSB filling: entry_ibpb() ensures
+ * all predictions, including the RSB, are invalidated,
+ * regardless of IBPB implementation.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ break;
+ default:
+ break;
+ }
}
#undef pr_fmt
@@ -2819,9 +3111,6 @@ static ssize_t tsx_async_abort_show_state(char *buf)
static ssize_t mmio_stale_data_show_state(char *buf)
{
- if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
- return sysfs_emit(buf, "Unknown: No mitigations\n");
-
if (mmio_mitigation == MMIO_MITIGATION_OFF)
return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
@@ -2839,6 +3128,19 @@ static ssize_t rfds_show_state(char *buf)
return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
}
+static ssize_t old_microcode_show_state(char *buf)
+{
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return sysfs_emit(buf, "Unknown: running under hypervisor");
+
+ return sysfs_emit(buf, "Vulnerable\n");
+}
+
+static ssize_t its_show_state(char *buf)
+{
+ return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
+}
+
static char *stibp_state(void)
{
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
@@ -2897,7 +3199,7 @@ static const char *spectre_bhi_state(void)
!boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
rrsba_disabled)
return "; BHI: Retpoline";
- else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT))
return "; BHI: Vulnerable, KVM: SW loop";
return "; BHI: Vulnerable";
@@ -3006,7 +3308,6 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
return srbds_show_state(buf);
case X86_BUG_MMIO_STALE_DATA:
- case X86_BUG_MMIO_UNKNOWN:
return mmio_stale_data_show_state(buf);
case X86_BUG_RETBLEED:
@@ -3021,6 +3322,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_RFDS:
return rfds_show_state(buf);
+ case X86_BUG_OLD_MICROCODE:
+ return old_microcode_show_state(buf);
+
+ case X86_BUG_ITS:
+ return its_show_state(buf);
+
default:
break;
}
@@ -3075,10 +3382,7 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
{
- if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
- return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
- else
- return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
+ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
}
ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
@@ -3100,6 +3404,16 @@ ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attrib
{
return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
}
+
+ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE);
+}
+
+ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
+}
#endif
void __warn_thunk(void)
diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c
index 237faf7e700c..981f8b1f0792 100644
--- a/arch/x86/kernel/cpu/bus_lock.c
+++ b/arch/x86/kernel/cpu/bus_lock.c
@@ -10,6 +10,7 @@
#include <asm/cmdline.h>
#include <asm/traps.h>
#include <asm/cpu.h>
+#include <asm/msr.h>
enum split_lock_detect_state {
sld_off = 0,
@@ -95,15 +96,15 @@ static bool split_lock_verify_msr(bool on)
{
u64 ctrl, tmp;
- if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
+ if (rdmsrq_safe(MSR_TEST_CTRL, &ctrl))
return false;
if (on)
ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
else
ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
- if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
+ if (wrmsrq_safe(MSR_TEST_CTRL, ctrl))
return false;
- rdmsrl(MSR_TEST_CTRL, tmp);
+ rdmsrq(MSR_TEST_CTRL, tmp);
return ctrl == tmp;
}
@@ -137,7 +138,7 @@ static void __init __split_lock_setup(void)
return;
}
- rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
+ rdmsrq(MSR_TEST_CTRL, msr_test_ctrl_cache);
if (!split_lock_verify_msr(true)) {
pr_info("MSR access failed: Disabled\n");
@@ -145,7 +146,7 @@ static void __init __split_lock_setup(void)
}
/* Restore the MSR to its cached value. */
- wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
+ wrmsrq(MSR_TEST_CTRL, msr_test_ctrl_cache);
setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
}
@@ -162,7 +163,7 @@ static void sld_update_msr(bool on)
if (on)
test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
- wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
+ wrmsrq(MSR_TEST_CTRL, test_ctrl_val);
}
void split_lock_init(void)
@@ -297,7 +298,7 @@ void bus_lock_init(void)
if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
return;
- rdmsrl(MSR_IA32_DEBUGCTLMSR, val);
+ rdmsrq(MSR_IA32_DEBUGCTLMSR, val);
if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
(sld_state == sld_warn || sld_state == sld_fatal)) ||
@@ -311,7 +312,7 @@ void bus_lock_init(void)
val |= DEBUGCTLMSR_BUS_LOCK_DETECT;
}
- wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
+ wrmsrq(MSR_IA32_DEBUGCTLMSR, val);
}
bool handle_user_split_lock(struct pt_regs *regs, long error_code)
@@ -375,7 +376,7 @@ static void __init split_lock_setup(struct cpuinfo_x86 *c)
* MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is. All CPUs that set
* it have split lock detection.
*/
- rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
+ rdmsrq(MSR_IA32_CORE_CAPS, ia32_core_caps);
if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
goto supported;
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index b3a520959b51..adfa7e8bb865 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -1,35 +1,28 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Routines to identify caches on Intel CPU.
+ * x86 CPU caches detection and configuration
*
- * Changes:
- * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
- * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
- * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
+ * Previous changes
+ * - Venkatesh Pallipadi: Cache identification through CPUID(0x4)
+ * - Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure
+ * - Andi Kleen / Andreas Herrmann: CPUID(0x4) emulation on AMD
*/
#include <linux/cacheinfo.h>
-#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/cpuhotplug.h>
-#include <linux/pci.h>
#include <linux/stop_machine.h>
-#include <linux/sysfs.h>
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
#include <asm/cacheinfo.h>
#include <asm/cpufeature.h>
+#include <asm/cpuid/api.h>
#include <asm/mtrr.h>
#include <asm/smp.h>
#include <asm/tlbflush.h>
#include "cpu.h"
-#define LVL_1_INST 1
-#define LVL_1_DATA 2
-#define LVL_2 3
-#define LVL_3 4
-
/* Shared last level cache maps */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
@@ -41,208 +34,127 @@ static cpumask_var_t cpu_cacheinfo_mask;
/* Kernel controls MTRR and/or PAT MSRs. */
unsigned int memory_caching_control __ro_after_init;
-struct _cache_table {
- unsigned char descriptor;
- char cache_type;
- short size;
-};
-
-#define MB(x) ((x) * 1024)
-
-/* All the cache descriptor types we care about (no TLB or
- trace cache entries) */
-
-static const struct _cache_table cache_table[] =
-{
- { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
- { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
- { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
- { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
- { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
- { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
- { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
- { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
- { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
- { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
- { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
- { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
- { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
- { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
- { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
- { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
- { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
- { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
- { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
- { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
- { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
- { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
- { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
- { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
- { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
- { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
- { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
- { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
- { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
- { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
- { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
- { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
- { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
- { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
- { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
- { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
- { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
- { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
- { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
- { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
- { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
- { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
- { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
- { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
- { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
- { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
- { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
- { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
- { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
- { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
- { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
- { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
- { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
- { 0x00, 0, 0}
-};
-
-
enum _cache_type {
- CTYPE_NULL = 0,
- CTYPE_DATA = 1,
- CTYPE_INST = 2,
- CTYPE_UNIFIED = 3
+ CTYPE_NULL = 0,
+ CTYPE_DATA = 1,
+ CTYPE_INST = 2,
+ CTYPE_UNIFIED = 3
};
union _cpuid4_leaf_eax {
struct {
- enum _cache_type type:5;
- unsigned int level:3;
- unsigned int is_self_initializing:1;
- unsigned int is_fully_associative:1;
- unsigned int reserved:4;
- unsigned int num_threads_sharing:12;
- unsigned int num_cores_on_die:6;
+ enum _cache_type type :5;
+ unsigned int level :3;
+ unsigned int is_self_initializing :1;
+ unsigned int is_fully_associative :1;
+ unsigned int reserved :4;
+ unsigned int num_threads_sharing :12;
+ unsigned int num_cores_on_die :6;
} split;
u32 full;
};
union _cpuid4_leaf_ebx {
struct {
- unsigned int coherency_line_size:12;
- unsigned int physical_line_partition:10;
- unsigned int ways_of_associativity:10;
+ unsigned int coherency_line_size :12;
+ unsigned int physical_line_partition :10;
+ unsigned int ways_of_associativity :10;
} split;
u32 full;
};
union _cpuid4_leaf_ecx {
struct {
- unsigned int number_of_sets:32;
+ unsigned int number_of_sets :32;
} split;
u32 full;
};
-struct _cpuid4_info_regs {
+struct _cpuid4_info {
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned int id;
unsigned long size;
- struct amd_northbridge *nb;
};
-/* AMD doesn't have CPUID4. Emulate it here to report the same
- information to the user. This makes some assumptions about the machine:
- L2 not shared, no SMT etc. that is currently true on AMD CPUs.
+/* Map CPUID(0x4) EAX.cache_type to <linux/cacheinfo.h> types */
+static const enum cache_type cache_type_map[] = {
+ [CTYPE_NULL] = CACHE_TYPE_NOCACHE,
+ [CTYPE_DATA] = CACHE_TYPE_DATA,
+ [CTYPE_INST] = CACHE_TYPE_INST,
+ [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
+};
+
+/*
+ * Fallback AMD CPUID(0x4) emulation
+ * AMD CPUs with TOPOEXT can just use CPUID(0x8000001d)
+ *
+ * @AMD_L2_L3_INVALID_ASSOC: cache info for the respective L2/L3 cache should
+ * be determined from CPUID(0x8000001d) instead of CPUID(0x80000006).
+ */
+
+#define AMD_CPUID4_FULLY_ASSOCIATIVE 0xffff
+#define AMD_L2_L3_INVALID_ASSOC 0x9
- In theory the TLBs could be reported as fake type (they are in "dummy").
- Maybe later */
union l1_cache {
struct {
- unsigned line_size:8;
- unsigned lines_per_tag:8;
- unsigned assoc:8;
- unsigned size_in_kb:8;
+ unsigned line_size :8;
+ unsigned lines_per_tag :8;
+ unsigned assoc :8;
+ unsigned size_in_kb :8;
};
- unsigned val;
+ unsigned int val;
};
union l2_cache {
struct {
- unsigned line_size:8;
- unsigned lines_per_tag:4;
- unsigned assoc:4;
- unsigned size_in_kb:16;
+ unsigned line_size :8;
+ unsigned lines_per_tag :4;
+ unsigned assoc :4;
+ unsigned size_in_kb :16;
};
- unsigned val;
+ unsigned int val;
};
union l3_cache {
struct {
- unsigned line_size:8;
- unsigned lines_per_tag:4;
- unsigned assoc:4;
- unsigned res:2;
- unsigned size_encoded:14;
+ unsigned line_size :8;
+ unsigned lines_per_tag :4;
+ unsigned assoc :4;
+ unsigned res :2;
+ unsigned size_encoded :14;
};
- unsigned val;
+ unsigned int val;
};
+/* L2/L3 associativity mapping */
static const unsigned short assocs[] = {
- [1] = 1,
- [2] = 2,
- [4] = 4,
- [6] = 8,
- [8] = 16,
- [0xa] = 32,
- [0xb] = 48,
- [0xc] = 64,
- [0xd] = 96,
- [0xe] = 128,
- [0xf] = 0xffff /* fully associative - no way to show this currently */
+ [1] = 1,
+ [2] = 2,
+ [3] = 3,
+ [4] = 4,
+ [5] = 6,
+ [6] = 8,
+ [8] = 16,
+ [0xa] = 32,
+ [0xb] = 48,
+ [0xc] = 64,
+ [0xd] = 96,
+ [0xe] = 128,
+ [0xf] = AMD_CPUID4_FULLY_ASSOCIATIVE
};
static const unsigned char levels[] = { 1, 1, 2, 3 };
-static const unsigned char types[] = { 1, 2, 3, 3 };
+static const unsigned char types[] = { 1, 2, 3, 3 };
-static const enum cache_type cache_type_map[] = {
- [CTYPE_NULL] = CACHE_TYPE_NOCACHE,
- [CTYPE_DATA] = CACHE_TYPE_DATA,
- [CTYPE_INST] = CACHE_TYPE_INST,
- [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
-};
-
-static void
-amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
- union _cpuid4_leaf_ebx *ebx,
- union _cpuid4_leaf_ecx *ecx)
+static void legacy_amd_cpuid4(int index, union _cpuid4_leaf_eax *eax,
+ union _cpuid4_leaf_ebx *ebx, union _cpuid4_leaf_ecx *ecx)
{
- unsigned dummy;
- unsigned line_size, lines_per_tag, assoc, size_in_kb;
- union l1_cache l1i, l1d;
+ unsigned int dummy, line_size, lines_per_tag, assoc, size_in_kb;
+ union l1_cache l1i, l1d, *l1;
union l2_cache l2;
union l3_cache l3;
- union l1_cache *l1 = &l1d;
eax->full = 0;
ebx->full = 0;
@@ -251,430 +163,155 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
- switch (leaf) {
+ l1 = &l1d;
+ switch (index) {
case 1:
l1 = &l1i;
fallthrough;
case 0:
if (!l1->val)
return;
- assoc = assocs[l1->assoc];
- line_size = l1->line_size;
- lines_per_tag = l1->lines_per_tag;
- size_in_kb = l1->size_in_kb;
+
+ assoc = (l1->assoc == 0xff) ? AMD_CPUID4_FULLY_ASSOCIATIVE : l1->assoc;
+ line_size = l1->line_size;
+ lines_per_tag = l1->lines_per_tag;
+ size_in_kb = l1->size_in_kb;
break;
case 2:
- if (!l2.val)
+ if (!l2.assoc || l2.assoc == AMD_L2_L3_INVALID_ASSOC)
return;
- assoc = assocs[l2.assoc];
- line_size = l2.line_size;
- lines_per_tag = l2.lines_per_tag;
- /* cpu_data has errata corrections for K7 applied */
- size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
+
+ /* Use x86_cache_size as it might have K7 errata fixes */
+ assoc = assocs[l2.assoc];
+ line_size = l2.line_size;
+ lines_per_tag = l2.lines_per_tag;
+ size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
break;
case 3:
- if (!l3.val)
+ if (!l3.assoc || l3.assoc == AMD_L2_L3_INVALID_ASSOC)
return;
- assoc = assocs[l3.assoc];
- line_size = l3.line_size;
- lines_per_tag = l3.lines_per_tag;
- size_in_kb = l3.size_encoded * 512;
+
+ assoc = assocs[l3.assoc];
+ line_size = l3.line_size;
+ lines_per_tag = l3.lines_per_tag;
+ size_in_kb = l3.size_encoded * 512;
if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
- size_in_kb = size_in_kb >> 1;
- assoc = assoc >> 1;
+ size_in_kb = size_in_kb >> 1;
+ assoc = assoc >> 1;
}
break;
default:
return;
}
- eax->split.is_self_initializing = 1;
- eax->split.type = types[leaf];
- eax->split.level = levels[leaf];
- eax->split.num_threads_sharing = 0;
- eax->split.num_cores_on_die = topology_num_cores_per_package();
+ eax->split.is_self_initializing = 1;
+ eax->split.type = types[index];
+ eax->split.level = levels[index];
+ eax->split.num_threads_sharing = 0;
+ eax->split.num_cores_on_die = topology_num_cores_per_package();
-
- if (assoc == 0xffff)
+ if (assoc == AMD_CPUID4_FULLY_ASSOCIATIVE)
eax->split.is_fully_associative = 1;
- ebx->split.coherency_line_size = line_size - 1;
- ebx->split.ways_of_associativity = assoc - 1;
- ebx->split.physical_line_partition = lines_per_tag - 1;
- ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
- (ebx->split.ways_of_associativity + 1) - 1;
-}
-
-#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
-
-/*
- * L3 cache descriptors
- */
-static void amd_calc_l3_indices(struct amd_northbridge *nb)
-{
- struct amd_l3_cache *l3 = &nb->l3_cache;
- unsigned int sc0, sc1, sc2, sc3;
- u32 val = 0;
-
- pci_read_config_dword(nb->misc, 0x1C4, &val);
-
- /* calculate subcache sizes */
- l3->subcaches[0] = sc0 = !(val & BIT(0));
- l3->subcaches[1] = sc1 = !(val & BIT(4));
-
- if (boot_cpu_data.x86 == 0x15) {
- l3->subcaches[0] = sc0 += !(val & BIT(1));
- l3->subcaches[1] = sc1 += !(val & BIT(5));
- }
-
- l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
- l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
-
- l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
-}
-
-/*
- * check whether a slot used for disabling an L3 index is occupied.
- * @l3: L3 cache descriptor
- * @slot: slot number (0..1)
- *
- * @returns: the disabled index if used or negative value if slot free.
- */
-static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
-{
- unsigned int reg = 0;
-
- pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
-
- /* check whether this slot is activated already */
- if (reg & (3UL << 30))
- return reg & 0xfff;
-
- return -1;
-}
-
-static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf,
- unsigned int slot)
-{
- int index;
- struct amd_northbridge *nb = this_leaf->priv;
-
- index = amd_get_l3_disable_slot(nb, slot);
- if (index >= 0)
- return sprintf(buf, "%d\n", index);
-
- return sprintf(buf, "FREE\n");
-}
-
-#define SHOW_CACHE_DISABLE(slot) \
-static ssize_t \
-cache_disable_##slot##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
- return show_cache_disable(this_leaf, buf, slot); \
-}
-SHOW_CACHE_DISABLE(0)
-SHOW_CACHE_DISABLE(1)
-
-static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
- unsigned slot, unsigned long idx)
-{
- int i;
- idx |= BIT(30);
-
- /*
- * disable index in all 4 subcaches
- */
- for (i = 0; i < 4; i++) {
- u32 reg = idx | (i << 20);
-
- if (!nb->l3_cache.subcaches[i])
- continue;
-
- pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
-
- /*
- * We need to WBINVD on a core on the node containing the L3
- * cache which indices we disable therefore a simple wbinvd()
- * is not sufficient.
- */
- wbinvd_on_cpu(cpu);
-
- reg |= BIT(31);
- pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
- }
-}
-
-/*
- * disable a L3 cache index by using a disable-slot
- *
- * @l3: L3 cache descriptor
- * @cpu: A CPU on the node containing the L3 cache
- * @slot: slot number (0..1)
- * @index: index to disable
- *
- * @return: 0 on success, error status on failure
- */
-static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
- unsigned slot, unsigned long index)
-{
- int ret = 0;
-
- /* check if @slot is already used or the index is already disabled */
- ret = amd_get_l3_disable_slot(nb, slot);
- if (ret >= 0)
- return -EEXIST;
-
- if (index > nb->l3_cache.indices)
- return -EINVAL;
-
- /* check whether the other slot has disabled the same index already */
- if (index == amd_get_l3_disable_slot(nb, !slot))
- return -EEXIST;
-
- amd_l3_disable_index(nb, cpu, slot, index);
-
- return 0;
-}
-
-static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
- const char *buf, size_t count,
- unsigned int slot)
-{
- unsigned long val = 0;
- int cpu, err = 0;
- struct amd_northbridge *nb = this_leaf->priv;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- cpu = cpumask_first(&this_leaf->shared_cpu_map);
-
- if (kstrtoul(buf, 10, &val) < 0)
- return -EINVAL;
-
- err = amd_set_l3_disable_slot(nb, cpu, slot, val);
- if (err) {
- if (err == -EEXIST)
- pr_warn("L3 slot %d in use/index already disabled!\n",
- slot);
- return err;
- }
- return count;
-}
-
-#define STORE_CACHE_DISABLE(slot) \
-static ssize_t \
-cache_disable_##slot##_store(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
- return store_cache_disable(this_leaf, buf, count, slot); \
-}
-STORE_CACHE_DISABLE(0)
-STORE_CACHE_DISABLE(1)
-
-static ssize_t subcaches_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cacheinfo *this_leaf = dev_get_drvdata(dev);
- int cpu = cpumask_first(&this_leaf->shared_cpu_map);
-
- return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
-}
-
-static ssize_t subcaches_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cacheinfo *this_leaf = dev_get_drvdata(dev);
- int cpu = cpumask_first(&this_leaf->shared_cpu_map);
- unsigned long val;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (kstrtoul(buf, 16, &val) < 0)
- return -EINVAL;
-
- if (amd_set_subcaches(cpu, val))
- return -EINVAL;
-
- return count;
+ ebx->split.coherency_line_size = line_size - 1;
+ ebx->split.ways_of_associativity = assoc - 1;
+ ebx->split.physical_line_partition = lines_per_tag - 1;
+ ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
+ (ebx->split.ways_of_associativity + 1) - 1;
}
-static DEVICE_ATTR_RW(cache_disable_0);
-static DEVICE_ATTR_RW(cache_disable_1);
-static DEVICE_ATTR_RW(subcaches);
-
-static umode_t
-cache_private_attrs_is_visible(struct kobject *kobj,
- struct attribute *attr, int unused)
+static int cpuid4_info_fill_done(struct _cpuid4_info *id4, union _cpuid4_leaf_eax eax,
+ union _cpuid4_leaf_ebx ebx, union _cpuid4_leaf_ecx ecx)
{
- struct device *dev = kobj_to_dev(kobj);
- struct cacheinfo *this_leaf = dev_get_drvdata(dev);
- umode_t mode = attr->mode;
-
- if (!this_leaf->priv)
- return 0;
-
- if ((attr == &dev_attr_subcaches.attr) &&
- amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- return mode;
+ if (eax.split.type == CTYPE_NULL)
+ return -EIO;
- if ((attr == &dev_attr_cache_disable_0.attr ||
- attr == &dev_attr_cache_disable_1.attr) &&
- amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
- return mode;
+ id4->eax = eax;
+ id4->ebx = ebx;
+ id4->ecx = ecx;
+ id4->size = (ecx.split.number_of_sets + 1) *
+ (ebx.split.coherency_line_size + 1) *
+ (ebx.split.physical_line_partition + 1) *
+ (ebx.split.ways_of_associativity + 1);
return 0;
}
-static struct attribute_group cache_private_group = {
- .is_visible = cache_private_attrs_is_visible,
-};
-
-static void init_amd_l3_attrs(void)
-{
- int n = 1;
- static struct attribute **amd_l3_attrs;
-
- if (amd_l3_attrs) /* already initialized */
- return;
-
- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
- n += 2;
- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- n += 1;
-
- amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL);
- if (!amd_l3_attrs)
- return;
-
- n = 0;
- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
- amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr;
- amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr;
- }
- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- amd_l3_attrs[n++] = &dev_attr_subcaches.attr;
-
- cache_private_group.attrs = amd_l3_attrs;
-}
-
-const struct attribute_group *
-cache_get_priv_group(struct cacheinfo *this_leaf)
+static int amd_fill_cpuid4_info(int index, struct _cpuid4_info *id4)
{
- struct amd_northbridge *nb = this_leaf->priv;
-
- if (this_leaf->level < 3 || !nb)
- return NULL;
+ union _cpuid4_leaf_eax eax;
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ u32 ignored;
- if (nb && nb->l3_cache.indices)
- init_amd_l3_attrs();
+ if (boot_cpu_has(X86_FEATURE_TOPOEXT) || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+ cpuid_count(0x8000001d, index, &eax.full, &ebx.full, &ecx.full, &ignored);
+ else
+ legacy_amd_cpuid4(index, &eax, &ebx, &ecx);
- return &cache_private_group;
+ return cpuid4_info_fill_done(id4, eax, ebx, ecx);
}
-static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
+static int intel_fill_cpuid4_info(int index, struct _cpuid4_info *id4)
{
- int node;
+ union _cpuid4_leaf_eax eax;
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ u32 ignored;
- /* only for L3, and not in virtualized environments */
- if (index < 3)
- return;
+ cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &ignored);
- node = topology_amd_node_id(smp_processor_id());
- this_leaf->nb = node_to_amd_nb(node);
- if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
- amd_calc_l3_indices(this_leaf->nb);
+ return cpuid4_info_fill_done(id4, eax, ebx, ecx);
}
-#else
-#define amd_init_l3_cache(x, y)
-#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
-static int
-cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
+static int fill_cpuid4_info(int index, struct _cpuid4_info *id4)
{
- union _cpuid4_leaf_eax eax;
- union _cpuid4_leaf_ebx ebx;
- union _cpuid4_leaf_ecx ecx;
- unsigned edx;
-
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
- if (boot_cpu_has(X86_FEATURE_TOPOEXT))
- cpuid_count(0x8000001d, index, &eax.full,
- &ebx.full, &ecx.full, &edx);
- else
- amd_cpuid4(index, &eax, &ebx, &ecx);
- amd_init_l3_cache(this_leaf, index);
- } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
- cpuid_count(0x8000001d, index, &eax.full,
- &ebx.full, &ecx.full, &edx);
- amd_init_l3_cache(this_leaf, index);
- } else {
- cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
- }
+ u8 cpu_vendor = boot_cpu_data.x86_vendor;
- if (eax.split.type == CTYPE_NULL)
- return -EIO; /* better error ? */
-
- this_leaf->eax = eax;
- this_leaf->ebx = ebx;
- this_leaf->ecx = ecx;
- this_leaf->size = (ecx.split.number_of_sets + 1) *
- (ebx.split.coherency_line_size + 1) *
- (ebx.split.physical_line_partition + 1) *
- (ebx.split.ways_of_associativity + 1);
- return 0;
+ return (cpu_vendor == X86_VENDOR_AMD || cpu_vendor == X86_VENDOR_HYGON) ?
+ amd_fill_cpuid4_info(index, id4) :
+ intel_fill_cpuid4_info(index, id4);
}
static int find_num_cache_leaves(struct cpuinfo_x86 *c)
{
- unsigned int eax, ebx, ecx, edx, op;
- union _cpuid4_leaf_eax cache_eax;
- int i = -1;
-
- if (c->x86_vendor == X86_VENDOR_AMD ||
- c->x86_vendor == X86_VENDOR_HYGON)
- op = 0x8000001d;
- else
- op = 4;
+ unsigned int eax, ebx, ecx, edx, op;
+ union _cpuid4_leaf_eax cache_eax;
+ int i = -1;
+ /* Do a CPUID(op) loop to calculate num_cache_leaves */
+ op = (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) ? 0x8000001d : 4;
do {
++i;
- /* Do cpuid(op) loop to find out num_cache_leaves */
cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
cache_eax.full = eax;
} while (cache_eax.split.type != CTYPE_NULL);
return i;
}
+/*
+ * AMD/Hygon CPUs may have multiple LLCs if L3 caches exist.
+ */
+
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, u16 die_id)
{
- /*
- * We may have multiple LLCs if L3 caches exist, so check if we
- * have an L3 cache by looking at the L3 cache CPUID leaf.
- */
- if (!cpuid_edx(0x80000006))
+ if (!cpuid_amd_hygon_has_l3_cache())
return;
if (c->x86 < 0x17) {
- /* LLC is at the node level. */
+ /* Pre-Zen: LLC is at the node level */
c->topo.llc_id = die_id;
} else if (c->x86 == 0x17 && c->x86_model <= 0x1F) {
/*
- * LLC is at the core complex level.
- * Core complex ID is ApicId[3] for these processors.
+ * Family 17h up to 1F models: LLC is at the core
+ * complex level. Core complex ID is ApicId[3].
*/
c->topo.llc_id = c->topo.apicid >> 3;
} else {
/*
- * LLC ID is calculated from the number of threads sharing the
- * cache.
- * */
+ * Newer families: LLC ID is calculated from the number
+ * of threads sharing the L3 cache.
+ */
u32 eax, ebx, ecx, edx, num_sharing_cache = 0;
u32 llc_index = find_num_cache_leaves(c) - 1;
@@ -683,25 +320,21 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, u16 die_id)
num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
if (num_sharing_cache) {
- int bits = get_count_order(num_sharing_cache);
+ int index_msb = get_count_order(num_sharing_cache);
- c->topo.llc_id = c->topo.apicid >> bits;
+ c->topo.llc_id = c->topo.apicid >> index_msb;
}
}
}
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c)
{
- /*
- * We may have multiple LLCs if L3 caches exist, so check if we
- * have an L3 cache by looking at the L3 cache CPUID leaf.
- */
- if (!cpuid_edx(0x80000006))
+ if (!cpuid_amd_hygon_has_l3_cache())
return;
/*
- * LLC is at the core complex level.
- * Core complex ID is ApicId[3] for these processors.
+ * Hygons are similar to AMD Family 17h up to 1F models: LLC is
+ * at the core complex level. Core complex ID is ApicId[3].
*/
c->topo.llc_id = c->topo.apicid >> 3;
}
@@ -710,14 +343,10 @@ void init_amd_cacheinfo(struct cpuinfo_x86 *c)
{
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
- if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+ if (boot_cpu_has(X86_FEATURE_TOPOEXT))
ci->num_leaves = find_num_cache_leaves(c);
- } else if (c->extended_cpuid_level >= 0x80000006) {
- if (cpuid_edx(0x80000006) & 0xf000)
- ci->num_leaves = 4;
- else
- ci->num_leaves = 3;
- }
+ else if (c->extended_cpuid_level >= 0x80000006)
+ ci->num_leaves = (cpuid_edx(0x80000006) & 0xf000) ? 4 : 3;
}
void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
@@ -727,148 +356,131 @@ void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
ci->num_leaves = find_num_cache_leaves(c);
}
-void init_intel_cacheinfo(struct cpuinfo_x86 *c)
+static void intel_cacheinfo_done(struct cpuinfo_x86 *c, unsigned int l3,
+ unsigned int l2, unsigned int l1i, unsigned int l1d)
{
- /* Cache sizes */
- unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0;
- unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
- unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
- unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
- struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
+ /*
+ * If llc_id is still unset, then cpuid_level < 4, which implies
+ * that the only possibility left is SMT. Since CPUID(0x2) doesn't
+ * specify any shared caches and SMT shares all caches, we can
+ * unconditionally set LLC ID to the package ID so that all
+ * threads share it.
+ */
+ if (c->topo.llc_id == BAD_APICID)
+ c->topo.llc_id = c->topo.pkg_id;
- if (c->cpuid_level > 3) {
- /*
- * There should be at least one leaf. A non-zero value means
- * that the number of leaves has been initialized.
- */
- if (!ci->num_leaves)
- ci->num_leaves = find_num_cache_leaves(c);
+ c->x86_cache_size = l3 ? l3 : (l2 ? l2 : l1i + l1d);
- /*
- * Whenever possible use cpuid(4), deterministic cache
- * parameters cpuid leaf to find the cache details
- */
- for (i = 0; i < ci->num_leaves; i++) {
- struct _cpuid4_info_regs this_leaf = {};
- int retval;
+ if (!l2)
+ cpu_detect_cache_sizes(c);
+}
- retval = cpuid4_cache_lookup_regs(i, &this_leaf);
- if (retval < 0)
- continue;
+/*
+ * Legacy Intel CPUID(0x2) path if CPUID(0x4) is not available.
+ */
+static void intel_cacheinfo_0x2(struct cpuinfo_x86 *c)
+{
+ unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0;
+ const struct leaf_0x2_table *desc;
+ union leaf_0x2_regs regs;
+ u8 *ptr;
- switch (this_leaf.eax.split.level) {
- case 1:
- if (this_leaf.eax.split.type == CTYPE_DATA)
- new_l1d = this_leaf.size/1024;
- else if (this_leaf.eax.split.type == CTYPE_INST)
- new_l1i = this_leaf.size/1024;
- break;
- case 2:
- new_l2 = this_leaf.size/1024;
- num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
- index_msb = get_count_order(num_threads_sharing);
- l2_id = c->topo.apicid & ~((1 << index_msb) - 1);
- break;
- case 3:
- new_l3 = this_leaf.size/1024;
- num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
- index_msb = get_count_order(num_threads_sharing);
- l3_id = c->topo.apicid & ~((1 << index_msb) - 1);
- break;
- default:
- break;
- }
- }
- }
+ if (c->cpuid_level < 2)
+ return;
- /* Don't use CPUID(2) if CPUID(4) is supported. */
- if (!ci->num_leaves && c->cpuid_level > 1) {
- /* supports eax=2 call */
- int j, n;
- unsigned int regs[4];
- unsigned char *dp = (unsigned char *)regs;
-
- /* Number of times to iterate */
- n = cpuid_eax(2) & 0xFF;
-
- for (i = 0 ; i < n ; i++) {
- cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
-
- /* If bit 31 is set, this is an unknown format */
- for (j = 0 ; j < 4 ; j++)
- if (regs[j] & (1 << 31))
- regs[j] = 0;
-
- /* Byte 0 is level count, not a descriptor */
- for (j = 1 ; j < 16 ; j++) {
- unsigned char des = dp[j];
- unsigned char k = 0;
-
- /* look up this descriptor in the table */
- while (cache_table[k].descriptor != 0) {
- if (cache_table[k].descriptor == des) {
- switch (cache_table[k].cache_type) {
- case LVL_1_INST:
- l1i += cache_table[k].size;
- break;
- case LVL_1_DATA:
- l1d += cache_table[k].size;
- break;
- case LVL_2:
- l2 += cache_table[k].size;
- break;
- case LVL_3:
- l3 += cache_table[k].size;
- break;
- }
-
- break;
- }
-
- k++;
- }
- }
+ cpuid_leaf_0x2(&regs);
+ for_each_cpuid_0x2_desc(regs, ptr, desc) {
+ switch (desc->c_type) {
+ case CACHE_L1_INST: l1i += desc->c_size; break;
+ case CACHE_L1_DATA: l1d += desc->c_size; break;
+ case CACHE_L2: l2 += desc->c_size; break;
+ case CACHE_L3: l3 += desc->c_size; break;
}
}
- if (new_l1d)
- l1d = new_l1d;
+ intel_cacheinfo_done(c, l3, l2, l1i, l1d);
+}
- if (new_l1i)
- l1i = new_l1i;
+static unsigned int calc_cache_topo_id(struct cpuinfo_x86 *c, const struct _cpuid4_info *id4)
+{
+ unsigned int num_threads_sharing;
+ int index_msb;
- if (new_l2) {
- l2 = new_l2;
- c->topo.llc_id = l2_id;
- c->topo.l2c_id = l2_id;
- }
+ num_threads_sharing = 1 + id4->eax.split.num_threads_sharing;
+ index_msb = get_count_order(num_threads_sharing);
+ return c->topo.apicid & ~((1 << index_msb) - 1);
+}
- if (new_l3) {
- l3 = new_l3;
- c->topo.llc_id = l3_id;
- }
+static bool intel_cacheinfo_0x4(struct cpuinfo_x86 *c)
+{
+ struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
+ unsigned int l2_id = BAD_APICID, l3_id = BAD_APICID;
+ unsigned int l1d = 0, l1i = 0, l2 = 0, l3 = 0;
+
+ if (c->cpuid_level < 4)
+ return false;
/*
- * If llc_id is not yet set, this means cpuid_level < 4 which in
- * turns means that the only possibility is SMT (as indicated in
- * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
- * that SMT shares all caches, we can unconditionally set cpu_llc_id to
- * c->topo.pkg_id.
+ * There should be at least one leaf. A non-zero value means
+ * that the number of leaves has been previously initialized.
*/
- if (c->topo.llc_id == BAD_APICID)
- c->topo.llc_id = c->topo.pkg_id;
+ if (!ci->num_leaves)
+ ci->num_leaves = find_num_cache_leaves(c);
+
+ if (!ci->num_leaves)
+ return false;
- c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
+ for (int i = 0; i < ci->num_leaves; i++) {
+ struct _cpuid4_info id4 = {};
+ int ret;
- if (!l2)
- cpu_detect_cache_sizes(c);
+ ret = intel_fill_cpuid4_info(i, &id4);
+ if (ret < 0)
+ continue;
+
+ switch (id4.eax.split.level) {
+ case 1:
+ if (id4.eax.split.type == CTYPE_DATA)
+ l1d = id4.size / 1024;
+ else if (id4.eax.split.type == CTYPE_INST)
+ l1i = id4.size / 1024;
+ break;
+ case 2:
+ l2 = id4.size / 1024;
+ l2_id = calc_cache_topo_id(c, &id4);
+ break;
+ case 3:
+ l3 = id4.size / 1024;
+ l3_id = calc_cache_topo_id(c, &id4);
+ break;
+ default:
+ break;
+ }
+ }
+
+ c->topo.l2c_id = l2_id;
+ c->topo.llc_id = (l3_id == BAD_APICID) ? l2_id : l3_id;
+ intel_cacheinfo_done(c, l3, l2, l1i, l1d);
+ return true;
+}
+
+void init_intel_cacheinfo(struct cpuinfo_x86 *c)
+{
+ /* Don't use CPUID(0x2) if CPUID(0x4) is supported. */
+ if (intel_cacheinfo_0x4(c))
+ return;
+
+ intel_cacheinfo_0x2(c);
}
+/*
+ * <linux/cacheinfo.h> shared_cpu_map setup, AMD/Hygon
+ */
static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
- struct _cpuid4_info_regs *base)
+ const struct _cpuid4_info *id4)
{
struct cpu_cacheinfo *this_cpu_ci;
- struct cacheinfo *this_leaf;
+ struct cacheinfo *ci;
int i, sibling;
/*
@@ -880,18 +492,18 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
this_cpu_ci = get_cpu_cacheinfo(i);
if (!this_cpu_ci->info_list)
continue;
- this_leaf = this_cpu_ci->info_list + index;
+
+ ci = this_cpu_ci->info_list + index;
for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
if (!cpu_online(sibling))
continue;
- cpumask_set_cpu(sibling,
- &this_leaf->shared_cpu_map);
+ cpumask_set_cpu(sibling, &ci->shared_cpu_map);
}
}
} else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
unsigned int apicid, nshared, first, last;
- nshared = base->eax.split.num_threads_sharing + 1;
+ nshared = id4->eax.split.num_threads_sharing + 1;
apicid = cpu_data(cpu).topo.apicid;
first = apicid - (apicid % nshared);
last = first + nshared - 1;
@@ -905,14 +517,13 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
if ((apicid < first) || (apicid > last))
continue;
- this_leaf = this_cpu_ci->info_list + index;
+ ci = this_cpu_ci->info_list + index;
for_each_online_cpu(sibling) {
apicid = cpu_data(sibling).topo.apicid;
if ((apicid < first) || (apicid > last))
continue;
- cpumask_set_cpu(sibling,
- &this_leaf->shared_cpu_map);
+ cpumask_set_cpu(sibling, &ci->shared_cpu_map);
}
}
} else
@@ -921,25 +532,27 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
return 1;
}
+/*
+ * <linux/cacheinfo.h> shared_cpu_map setup, Intel + fallback AMD/Hygon
+ */
static void __cache_cpumap_setup(unsigned int cpu, int index,
- struct _cpuid4_info_regs *base)
+ const struct _cpuid4_info *id4)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
- struct cacheinfo *this_leaf, *sibling_leaf;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ struct cacheinfo *ci, *sibling_ci;
unsigned long num_threads_sharing;
int index_msb, i;
- struct cpuinfo_x86 *c = &cpu_data(cpu);
- if (c->x86_vendor == X86_VENDOR_AMD ||
- c->x86_vendor == X86_VENDOR_HYGON) {
- if (__cache_amd_cpumap_setup(cpu, index, base))
+ if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
+ if (__cache_amd_cpumap_setup(cpu, index, id4))
return;
}
- this_leaf = this_cpu_ci->info_list + index;
- num_threads_sharing = 1 + base->eax.split.num_threads_sharing;
+ ci = this_cpu_ci->info_list + index;
+ num_threads_sharing = 1 + id4->eax.split.num_threads_sharing;
- cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+ cpumask_set_cpu(cpu, &ci->shared_cpu_map);
if (num_threads_sharing == 1)
return;
@@ -949,30 +562,29 @@ static void __cache_cpumap_setup(unsigned int cpu, int index,
if (cpu_data(i).topo.apicid >> index_msb == c->topo.apicid >> index_msb) {
struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
+ /* Skip if itself or no cacheinfo */
if (i == cpu || !sib_cpu_ci->info_list)
- continue;/* skip if itself or no cacheinfo */
- sibling_leaf = sib_cpu_ci->info_list + index;
- cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
- cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map);
+ continue;
+
+ sibling_ci = sib_cpu_ci->info_list + index;
+ cpumask_set_cpu(i, &ci->shared_cpu_map);
+ cpumask_set_cpu(cpu, &sibling_ci->shared_cpu_map);
}
}
-static void ci_leaf_init(struct cacheinfo *this_leaf,
- struct _cpuid4_info_regs *base)
+static void ci_info_init(struct cacheinfo *ci, const struct _cpuid4_info *id4,
+ struct amd_northbridge *nb)
{
- this_leaf->id = base->id;
- this_leaf->attributes = CACHE_ID;
- this_leaf->level = base->eax.split.level;
- this_leaf->type = cache_type_map[base->eax.split.type];
- this_leaf->coherency_line_size =
- base->ebx.split.coherency_line_size + 1;
- this_leaf->ways_of_associativity =
- base->ebx.split.ways_of_associativity + 1;
- this_leaf->size = base->size;
- this_leaf->number_of_sets = base->ecx.split.number_of_sets + 1;
- this_leaf->physical_line_partition =
- base->ebx.split.physical_line_partition + 1;
- this_leaf->priv = base->nb;
+ ci->id = id4->id;
+ ci->attributes = CACHE_ID;
+ ci->level = id4->eax.split.level;
+ ci->type = cache_type_map[id4->eax.split.type];
+ ci->coherency_line_size = id4->ebx.split.coherency_line_size + 1;
+ ci->ways_of_associativity = id4->ebx.split.ways_of_associativity + 1;
+ ci->size = id4->size;
+ ci->number_of_sets = id4->ecx.split.number_of_sets + 1;
+ ci->physical_line_partition = id4->ebx.split.physical_line_partition + 1;
+ ci->priv = nb;
}
int init_cache_level(unsigned int cpu)
@@ -987,38 +599,45 @@ int init_cache_level(unsigned int cpu)
}
/*
- * The max shared threads number comes from CPUID.4:EAX[25-14] with input
+ * The max shared threads number comes from CPUID(0x4) EAX[25-14] with input
* ECX as cache index. Then right shift apicid by the number's order to get
* cache id for this cache node.
*/
-static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
+static void get_cache_id(int cpu, struct _cpuid4_info *id4)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
unsigned long num_threads_sharing;
int index_msb;
- num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing;
+ num_threads_sharing = 1 + id4->eax.split.num_threads_sharing;
index_msb = get_count_order(num_threads_sharing);
- id4_regs->id = c->topo.apicid >> index_msb;
+ id4->id = c->topo.apicid >> index_msb;
}
int populate_cache_leaves(unsigned int cpu)
{
- unsigned int idx, ret;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
- struct cacheinfo *this_leaf = this_cpu_ci->info_list;
- struct _cpuid4_info_regs id4_regs = {};
+ struct cacheinfo *ci = this_cpu_ci->info_list;
+ u8 cpu_vendor = boot_cpu_data.x86_vendor;
+ struct amd_northbridge *nb = NULL;
+ struct _cpuid4_info id4 = {};
+ int idx, ret;
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
- ret = cpuid4_cache_lookup_regs(idx, &id4_regs);
+ ret = fill_cpuid4_info(idx, &id4);
if (ret)
return ret;
- get_cache_id(cpu, &id4_regs);
- ci_leaf_init(this_leaf++, &id4_regs);
- __cache_cpumap_setup(cpu, idx, &id4_regs);
+
+ get_cache_id(cpu, &id4);
+
+ if (cpu_vendor == X86_VENDOR_AMD || cpu_vendor == X86_VENDOR_HYGON)
+ nb = amd_init_l3_cache(idx);
+
+ ci_info_init(ci++, &id4, nb);
+ __cache_cpumap_setup(cpu, idx, &id4);
}
- this_cpu_ci->cpu_map_populated = true;
+ this_cpu_ci->cpu_map_populated = true;
return 0;
}
@@ -1034,31 +653,33 @@ int populate_cache_leaves(unsigned int cpu)
static unsigned long saved_cr4;
static DEFINE_RAW_SPINLOCK(cache_disable_lock);
+/*
+ * Cache flushing is the most time-consuming step when programming the
+ * MTRRs. On many Intel CPUs without known erratas, it can be skipped
+ * if the CPU declares cache self-snooping support.
+ */
+static void maybe_flush_caches(void)
+{
+ if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
+ wbinvd();
+}
+
void cache_disable(void) __acquires(cache_disable_lock)
{
unsigned long cr0;
/*
- * Note that this is not ideal
- * since the cache is only flushed/disabled for this CPU while the
- * MTRRs are changed, but changing this requires more invasive
- * changes to the way the kernel boots
+ * This is not ideal since the cache is only flushed/disabled
+ * for this CPU while the MTRRs are changed, but changing this
+ * requires more invasive changes to the way the kernel boots.
*/
-
raw_spin_lock(&cache_disable_lock);
/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
cr0 = read_cr0() | X86_CR0_CD;
write_cr0(cr0);
- /*
- * Cache flushing is the most time-consuming step when programming
- * the MTRRs. Fortunately, as per the Intel Software Development
- * Manual, we can skip it if the processor supports cache self-
- * snooping.
- */
- if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
- wbinvd();
+ maybe_flush_caches();
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if (cpu_feature_enabled(X86_FEATURE_PGE)) {
@@ -1073,9 +694,7 @@ void cache_disable(void) __acquires(cache_disable_lock)
if (cpu_feature_enabled(X86_FEATURE_MTRR))
mtrr_disable();
- /* Again, only flush caches if we have to. */
- if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
- wbinvd();
+ maybe_flush_caches();
}
void cache_enable(void) __releases(cache_disable_lock)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 12126adbc3a9..8feb8fd2957a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -29,7 +29,7 @@
#include <asm/alternative.h>
#include <asm/cmdline.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/perf_event.h>
#include <asm/mmu_context.h>
#include <asm/doublefault.h>
@@ -148,7 +148,7 @@ static void ppin_init(struct cpuinfo_x86 *c)
*/
info = (struct ppin_info *)id->driver_data;
- if (rdmsrl_safe(info->msr_ppin_ctl, &val))
+ if (rdmsrq_safe(info->msr_ppin_ctl, &val))
goto clear_ppin;
if ((val & 3UL) == 1UL) {
@@ -158,13 +158,13 @@ static void ppin_init(struct cpuinfo_x86 *c)
/* If PPIN is disabled, try to enable */
if (!(val & 2UL)) {
- wrmsrl_safe(info->msr_ppin_ctl, val | 2UL);
- rdmsrl_safe(info->msr_ppin_ctl, &val);
+ wrmsrq_safe(info->msr_ppin_ctl, val | 2UL);
+ rdmsrq_safe(info->msr_ppin_ctl, &val);
}
/* Is the enable bit set? */
if (val & 2UL) {
- c->ppin = __rdmsr(info->msr_ppin);
+ c->ppin = native_rdmsrq(info->msr_ppin);
set_cpu_cap(c, info->feature);
return;
}
@@ -242,6 +242,7 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
#endif
} };
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+SYM_PIC_ALIAS(gdt_page);
#ifdef CONFIG_X86_64
static int __init x86_nopcid_setup(char *s)
@@ -321,7 +322,7 @@ static int __init cachesize_setup(char *str)
__setup("cachesize=", cachesize_setup);
/* Probe for the CPUID instruction */
-bool have_cpuid_p(void)
+bool cpuid_feature(void)
{
return flag_is_changeable_p(X86_EFLAGS_ID);
}
@@ -562,9 +563,9 @@ __noendbr u64 ibt_save(bool disable)
u64 msr = 0;
if (cpu_feature_enabled(X86_FEATURE_IBT)) {
- rdmsrl(MSR_IA32_S_CET, msr);
+ rdmsrq(MSR_IA32_S_CET, msr);
if (disable)
- wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);
+ wrmsrq(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);
}
return msr;
@@ -575,10 +576,10 @@ __noendbr void ibt_restore(u64 save)
u64 msr;
if (cpu_feature_enabled(X86_FEATURE_IBT)) {
- rdmsrl(MSR_IA32_S_CET, msr);
+ rdmsrq(MSR_IA32_S_CET, msr);
msr &= ~CET_ENDBR_EN;
msr |= (save & CET_ENDBR_EN);
- wrmsrl(MSR_IA32_S_CET, msr);
+ wrmsrq(MSR_IA32_S_CET, msr);
}
}
@@ -602,15 +603,15 @@ static __always_inline void setup_cet(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_USER_SHSTK);
if (kernel_ibt)
- wrmsrl(MSR_IA32_S_CET, CET_ENDBR_EN);
+ wrmsrq(MSR_IA32_S_CET, CET_ENDBR_EN);
else
- wrmsrl(MSR_IA32_S_CET, 0);
+ wrmsrq(MSR_IA32_S_CET, 0);
cr4_set_bits(X86_CR4_CET);
if (kernel_ibt && ibt_selftest()) {
pr_err("IBT selftest: Failed!\n");
- wrmsrl(MSR_IA32_S_CET, 0);
+ wrmsrq(MSR_IA32_S_CET, 0);
setup_clear_cpu_cap(X86_FEATURE_IBT);
}
}
@@ -621,8 +622,8 @@ __noendbr void cet_disable(void)
cpu_feature_enabled(X86_FEATURE_SHSTK)))
return;
- wrmsrl(MSR_IA32_S_CET, 0);
- wrmsrl(MSR_IA32_U_CET, 0);
+ wrmsrq(MSR_IA32_S_CET, 0);
+ wrmsrq(MSR_IA32_U_CET, 0);
}
/*
@@ -751,9 +752,9 @@ void __init switch_gdt_and_percpu_base(int cpu)
* No need to load %gs. It is already correct.
*
* Writing %gs on 64bit would zero GSBASE which would make any per
- * CPU operation up to the point of the wrmsrl() fault.
+ * CPU operation up to the point of the wrmsrq() fault.
*
- * Set GSBASE to the new offset. Until the wrmsrl() happens the
+ * Set GSBASE to the new offset. Until the wrmsrq() happens the
* early mapping is still valid. That means the GSBASE update will
* lose any prior per CPU data which was not copied over in
* setup_per_cpu_areas().
@@ -761,7 +762,7 @@ void __init switch_gdt_and_percpu_base(int cpu)
* This works even with stackprotector enabled because the
* per CPU stack canary is 0 in both per CPU areas.
*/
- wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
+ wrmsrq(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
#else
/*
* %fs is already set to __KERNEL_PERCPU, but after switching GDT
@@ -1005,17 +1006,18 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_capability[CPUID_D_1_EAX] = eax;
}
- /* AMD-defined flags: level 0x80000001 */
+ /*
+ * Check if extended CPUID leaves are implemented: Max extended
+ * CPUID leaf must be in the 0x80000001-0x8000ffff range.
+ */
eax = cpuid_eax(0x80000000);
- c->extended_cpuid_level = eax;
+ c->extended_cpuid_level = ((eax & 0xffff0000) == 0x80000000) ? eax : 0;
- if ((eax & 0xffff0000) == 0x80000000) {
- if (eax >= 0x80000001) {
- cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
+ if (c->extended_cpuid_level >= 0x80000001) {
+ cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
- c->x86_capability[CPUID_8000_0001_ECX] = ecx;
- c->x86_capability[CPUID_8000_0001_EDX] = edx;
- }
+ c->x86_capability[CPUID_8000_0001_ECX] = ecx;
+ c->x86_capability[CPUID_8000_0001_EDX] = edx;
}
if (c->extended_cpuid_level >= 0x80000007) {
@@ -1227,6 +1229,10 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define GDS BIT(6)
/* CPU is affected by Register File Data Sampling */
#define RFDS BIT(7)
+/* CPU is affected by Indirect Target Selection */
+#define ITS BIT(8)
+/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
+#define ITS_NATIVE_ONLY BIT(9)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS),
@@ -1238,22 +1244,25 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPS(INTEL_BROADWELL_G, X86_STEP_MAX, SRBDS),
VULNBL_INTEL_STEPS(INTEL_BROADWELL_X, X86_STEP_MAX, MMIO),
VULNBL_INTEL_STEPS(INTEL_BROADWELL, X86_STEP_MAX, SRBDS),
- VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, X86_STEP_MAX, MMIO | RETBLEED | GDS),
+ VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, 0x5, MMIO | RETBLEED | GDS),
+ VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS),
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPS(INTEL_SKYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
- VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
- VULNBL_INTEL_STEPS(INTEL_KABYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, 0xb, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS),
+ VULNBL_INTEL_STEPS(INTEL_KABYLAKE, 0xc, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPS(INTEL_KABYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS),
VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L, X86_STEP_MAX, RETBLEED),
- VULNBL_INTEL_STEPS(INTEL_ICELAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS),
- VULNBL_INTEL_STEPS(INTEL_ICELAKE_D, X86_STEP_MAX, MMIO | GDS),
- VULNBL_INTEL_STEPS(INTEL_ICELAKE_X, X86_STEP_MAX, MMIO | GDS),
- VULNBL_INTEL_STEPS(INTEL_COMETLAKE, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS),
- VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED),
- VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS),
- VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L, X86_STEP_MAX, GDS),
- VULNBL_INTEL_STEPS(INTEL_TIGERLAKE, X86_STEP_MAX, GDS),
+ VULNBL_INTEL_STEPS(INTEL_ICELAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
+ VULNBL_INTEL_STEPS(INTEL_ICELAKE_D, X86_STEP_MAX, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
+ VULNBL_INTEL_STEPS(INTEL_ICELAKE_X, X86_STEP_MAX, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
+ VULNBL_INTEL_STEPS(INTEL_COMETLAKE, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
+ VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED | ITS),
+ VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
+ VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L, X86_STEP_MAX, GDS | ITS | ITS_NATIVE_ONLY),
+ VULNBL_INTEL_STEPS(INTEL_TIGERLAKE, X86_STEP_MAX, GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPS(INTEL_LAKEFIELD, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED),
- VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS),
+ VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_TYPE(INTEL_ALDERLAKE, ATOM, RFDS),
VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L, X86_STEP_MAX, RFDS),
VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE, ATOM, RFDS),
@@ -1288,7 +1297,7 @@ u64 x86_read_arch_cap_msr(void)
u64 x86_arch_cap_msr = 0;
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
+ rdmsrq(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
return x86_arch_cap_msr;
}
@@ -1318,10 +1327,78 @@ static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
return cpu_matches(cpu_vuln_blacklist, RFDS);
}
+static bool __init vulnerable_to_its(u64 x86_arch_cap_msr)
+{
+ /* The "immunity" bit trumps everything else: */
+ if (x86_arch_cap_msr & ARCH_CAP_ITS_NO)
+ return false;
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return false;
+
+ /* None of the affected CPUs have BHI_CTRL */
+ if (boot_cpu_has(X86_FEATURE_BHI_CTRL))
+ return false;
+
+ /*
+ * If a VMM did not expose ITS_NO, assume that a guest could
+ * be running on a vulnerable hardware or may migrate to such
+ * hardware.
+ */
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return true;
+
+ if (cpu_matches(cpu_vuln_blacklist, ITS))
+ return true;
+
+ return false;
+}
+
+static struct x86_cpu_id cpu_latest_microcode[] = {
+#include "microcode/intel-ucode-defs.h"
+ {}
+};
+
+static bool __init cpu_has_old_microcode(void)
+{
+ const struct x86_cpu_id *m = x86_match_cpu(cpu_latest_microcode);
+
+ /* Give unknown CPUs a pass: */
+ if (!m) {
+ /* Intel CPUs should be in the list. Warn if not: */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ pr_info("x86/CPU: Model not found in latest microcode list\n");
+ return false;
+ }
+
+ /*
+ * Hosts usually lie to guests with a super high microcode
+ * version. Just ignore what hosts tell guests:
+ */
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return false;
+
+ /* Consider all debug microcode to be old: */
+ if (boot_cpu_data.microcode & BIT(31))
+ return true;
+
+ /* Give new microcode a pass: */
+ if (boot_cpu_data.microcode >= m->driver_data)
+ return false;
+
+ /* Uh oh, too old: */
+ return true;
+}
+
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
+ if (cpu_has_old_microcode()) {
+ pr_warn("x86/CPU: Running old microcode\n");
+ setup_force_cpu_bug(X86_BUG_OLD_MICROCODE);
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ }
+
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
!(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
@@ -1402,15 +1479,10 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* Affected CPU list is generally enough to enumerate the vulnerability,
* but for virtualization case check for ARCH_CAP MSR bits also, VMM may
* not want the guest to enumerate the bug.
- *
- * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
- * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
*/
if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
if (cpu_matches(cpu_vuln_blacklist, MMIO))
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
- else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
- setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
}
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
@@ -1439,9 +1511,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (vulnerable_to_rfds(x86_arch_cap_msr))
setup_force_cpu_bug(X86_BUG_RFDS);
- /* When virtualized, eIBRS could be hidden, assume vulnerable */
- if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
- !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
+ /*
+ * Intel parts with eIBRS are vulnerable to BHI attacks. Parts with
+ * BHI_NO still need to use the BHI mitigation to prevent Intra-mode
+ * attacks. When virtualized, eIBRS could be hidden, assume vulnerable.
+ */
+ if (!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
(boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
setup_force_cpu_bug(X86_BUG_BHI);
@@ -1449,6 +1524,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
+ if (vulnerable_to_its(x86_arch_cap_msr)) {
+ setup_force_cpu_bug(X86_BUG_ITS);
+ if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY))
+ setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
+ }
+
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;
@@ -1630,11 +1711,11 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
memset(&c->x86_capability, 0, sizeof(c->x86_capability));
c->extended_cpuid_level = 0;
- if (!have_cpuid_p())
+ if (!cpuid_feature())
identify_cpu_without_cpuid(c);
/* cyrix could have cpuid enabled via c_identify()*/
- if (have_cpuid_p()) {
+ if (cpuid_feature()) {
cpu_detect(c);
get_cpu_vendor(c);
intel_unlock_cpuid_leafs(c);
@@ -1749,11 +1830,11 @@ static bool detect_null_seg_behavior(void)
*/
unsigned long old_base, tmp;
- rdmsrl(MSR_FS_BASE, old_base);
- wrmsrl(MSR_FS_BASE, 1);
+ rdmsrq(MSR_FS_BASE, old_base);
+ wrmsrq(MSR_FS_BASE, 1);
loadsegment(fs, 0);
- rdmsrl(MSR_FS_BASE, tmp);
- wrmsrl(MSR_FS_BASE, old_base);
+ rdmsrq(MSR_FS_BASE, tmp);
+ wrmsrq(MSR_FS_BASE, old_base);
return tmp == 0;
}
@@ -1794,11 +1875,11 @@ static void generic_identify(struct cpuinfo_x86 *c)
{
c->extended_cpuid_level = 0;
- if (!have_cpuid_p())
+ if (!cpuid_feature())
identify_cpu_without_cpuid(c);
/* cyrix could have cpuid enabled via c_identify()*/
- if (!have_cpuid_p())
+ if (!cpuid_feature())
return;
cpu_detect(c);
@@ -1982,9 +2063,9 @@ void enable_sep_cpu(void)
*/
tss->x86_tss.ss1 = __KERNEL_CS;
- wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
- wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
- wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
+ wrmsrq(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1);
+ wrmsrq(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
+ wrmsrq(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32);
put_cpu();
}
@@ -2091,7 +2172,7 @@ DEFINE_PER_CPU_CACHE_HOT(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_
DEFINE_PER_CPU_CACHE_HOT(u64, __x86_call_depth);
EXPORT_PER_CPU_SYMBOL(__x86_call_depth);
-static void wrmsrl_cstar(unsigned long val)
+static void wrmsrq_cstar(unsigned long val)
{
/*
* Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR
@@ -2099,37 +2180,37 @@ static void wrmsrl_cstar(unsigned long val)
* guest. Avoid the pointless write on all Intel CPUs.
*/
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
- wrmsrl(MSR_CSTAR, val);
+ wrmsrq(MSR_CSTAR, val);
}
static inline void idt_syscall_init(void)
{
- wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
+ wrmsrq(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
if (ia32_enabled()) {
- wrmsrl_cstar((unsigned long)entry_SYSCALL_compat);
+ wrmsrq_cstar((unsigned long)entry_SYSCALL_compat);
/*
* This only works on Intel CPUs.
* On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
* This does not cause SYSENTER to jump to the wrong location, because
* AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
*/
- wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
- wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
+ wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
+ wrmsrq_safe(MSR_IA32_SYSENTER_ESP,
(unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
- wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
+ wrmsrq_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
} else {
- wrmsrl_cstar((unsigned long)entry_SYSCALL32_ignore);
- wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
- wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
- wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
+ wrmsrq_cstar((unsigned long)entry_SYSCALL32_ignore);
+ wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
+ wrmsrq_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+ wrmsrq_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
}
/*
* Flags to clear on syscall; clear as much as possible
* to minimize user space-kernel interference.
*/
- wrmsrl(MSR_SYSCALL_MASK,
+ wrmsrq(MSR_SYSCALL_MASK,
X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
@@ -2198,7 +2279,7 @@ static inline void setup_getcpu(int cpu)
struct desc_struct d = { };
if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
- wrmsr(MSR_TSC_AUX, cpudata, 0);
+ wrmsrq(MSR_TSC_AUX, cpudata);
/* Store CPU and node number in limit. */
d.limit0 = cpudata;
@@ -2313,8 +2394,8 @@ void cpu_init(void)
memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
syscall_init();
- wrmsrl(MSR_FS_BASE, 0);
- wrmsrl(MSR_KERNEL_GS_BASE, 0);
+ wrmsrq(MSR_FS_BASE, 0);
+ wrmsrq(MSR_KERNEL_GS_BASE, 0);
barrier();
x2apic_setup();
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 51deb60a9d26..bc38b2d56f26 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -75,6 +75,15 @@ extern void check_null_seg_clears_base(struct cpuinfo_x86 *c);
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, u16 die_id);
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c);
+#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
+struct amd_northbridge *amd_init_l3_cache(int index);
+#else
+static inline struct amd_northbridge *amd_init_l3_cache(int index)
+{
+ return NULL;
+}
+#endif
+
unsigned int aperfmperf_get_khz(int cpu);
void cpu_select_mitigations(void);
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index a2fbea0be535..46efcbd6afa4 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -28,6 +28,7 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_PKU, X86_FEATURE_XSAVE },
{ X86_FEATURE_MPX, X86_FEATURE_XSAVE },
{ X86_FEATURE_XGETBV1, X86_FEATURE_XSAVE },
+ { X86_FEATURE_APX, X86_FEATURE_XSAVE },
{ X86_FEATURE_CMOV, X86_FEATURE_FXSR },
{ X86_FEATURE_MMX, X86_FEATURE_FXSR },
{ X86_FEATURE_MMXEXT, X86_FEATURE_MMX },
@@ -82,8 +83,12 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_XFD, X86_FEATURE_XSAVES },
{ X86_FEATURE_XFD, X86_FEATURE_XGETBV1 },
{ X86_FEATURE_AMX_TILE, X86_FEATURE_XFD },
+ { X86_FEATURE_AMX_FP16, X86_FEATURE_AMX_TILE },
+ { X86_FEATURE_AMX_BF16, X86_FEATURE_AMX_TILE },
+ { X86_FEATURE_AMX_INT8, X86_FEATURE_AMX_TILE },
{ X86_FEATURE_SHSTK, X86_FEATURE_XSAVES },
{ X86_FEATURE_FRED, X86_FEATURE_LKGS },
+ { X86_FEATURE_SPEC_CTRL_SSBD, X86_FEATURE_SPEC_CTRL },
{}
};
diff --git a/arch/x86/kernel/cpu/cpuid_0x2_table.c b/arch/x86/kernel/cpu/cpuid_0x2_table.c
new file mode 100644
index 000000000000..89bc8db5e9c6
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpuid_0x2_table.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/sizes.h>
+
+#include <asm/cpuid/types.h>
+
+#include "cpu.h"
+
+#define CACHE_ENTRY(_desc, _type, _size) \
+ [_desc] = { \
+ .c_type = (_type), \
+ .c_size = (_size) / SZ_1K, \
+ }
+
+#define TLB_ENTRY(_desc, _type, _entries) \
+ [_desc] = { \
+ .t_type = (_type), \
+ .entries = (_entries), \
+ }
+
+const struct leaf_0x2_table cpuid_0x2_table[256] = {
+ CACHE_ENTRY(0x06, CACHE_L1_INST, SZ_8K ), /* 4-way set assoc, 32 byte line size */
+ CACHE_ENTRY(0x08, CACHE_L1_INST, SZ_16K ), /* 4-way set assoc, 32 byte line size */
+ CACHE_ENTRY(0x09, CACHE_L1_INST, SZ_32K ), /* 4-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x0a, CACHE_L1_DATA, SZ_8K ), /* 2 way set assoc, 32 byte line size */
+ CACHE_ENTRY(0x0c, CACHE_L1_DATA, SZ_16K ), /* 4-way set assoc, 32 byte line size */
+ CACHE_ENTRY(0x0d, CACHE_L1_DATA, SZ_16K ), /* 4-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x0e, CACHE_L1_DATA, SZ_24K ), /* 6-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x21, CACHE_L2, SZ_256K ), /* 8-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x22, CACHE_L3, SZ_512K ), /* 4-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x23, CACHE_L3, SZ_1M ), /* 8-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x25, CACHE_L3, SZ_2M ), /* 8-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x29, CACHE_L3, SZ_4M ), /* 8-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x2c, CACHE_L1_DATA, SZ_32K ), /* 8-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x30, CACHE_L1_INST, SZ_32K ), /* 8-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x39, CACHE_L2, SZ_128K ), /* 4-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x3a, CACHE_L2, SZ_192K ), /* 6-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x3b, CACHE_L2, SZ_128K ), /* 2-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x3c, CACHE_L2, SZ_256K ), /* 4-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x3d, CACHE_L2, SZ_384K ), /* 6-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x3e, CACHE_L2, SZ_512K ), /* 4-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x3f, CACHE_L2, SZ_256K ), /* 2-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x41, CACHE_L2, SZ_128K ), /* 4-way set assoc, 32 byte line size */
+ CACHE_ENTRY(0x42, CACHE_L2, SZ_256K ), /* 4-way set assoc, 32 byte line size */
+ CACHE_ENTRY(0x43, CACHE_L2, SZ_512K ), /* 4-way set assoc, 32 byte line size */
+ CACHE_ENTRY(0x44, CACHE_L2, SZ_1M ), /* 4-way set assoc, 32 byte line size */
+ CACHE_ENTRY(0x45, CACHE_L2, SZ_2M ), /* 4-way set assoc, 32 byte line size */
+ CACHE_ENTRY(0x46, CACHE_L3, SZ_4M ), /* 4-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x47, CACHE_L3, SZ_8M ), /* 8-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x48, CACHE_L2, SZ_3M ), /* 12-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x49, CACHE_L3, SZ_4M ), /* 16-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x4a, CACHE_L3, SZ_6M ), /* 12-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x4b, CACHE_L3, SZ_8M ), /* 16-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x4c, CACHE_L3, SZ_12M ), /* 12-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x4d, CACHE_L3, SZ_16M ), /* 16-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x4e, CACHE_L2, SZ_6M ), /* 24-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x60, CACHE_L1_DATA, SZ_16K ), /* 8-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x66, CACHE_L1_DATA, SZ_8K ), /* 4-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x67, CACHE_L1_DATA, SZ_16K ), /* 4-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x68, CACHE_L1_DATA, SZ_32K ), /* 4-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x78, CACHE_L2, SZ_1M ), /* 4-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x79, CACHE_L2, SZ_128K ), /* 8-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x7a, CACHE_L2, SZ_256K ), /* 8-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x7b, CACHE_L2, SZ_512K ), /* 8-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x7c, CACHE_L2, SZ_1M ), /* 8-way set assoc, sectored cache, 64 byte line size */
+ CACHE_ENTRY(0x7d, CACHE_L2, SZ_2M ), /* 8-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x7f, CACHE_L2, SZ_512K ), /* 2-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x80, CACHE_L2, SZ_512K ), /* 8-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x82, CACHE_L2, SZ_256K ), /* 8-way set assoc, 32 byte line size */
+ CACHE_ENTRY(0x83, CACHE_L2, SZ_512K ), /* 8-way set assoc, 32 byte line size */
+ CACHE_ENTRY(0x84, CACHE_L2, SZ_1M ), /* 8-way set assoc, 32 byte line size */
+ CACHE_ENTRY(0x85, CACHE_L2, SZ_2M ), /* 8-way set assoc, 32 byte line size */
+ CACHE_ENTRY(0x86, CACHE_L2, SZ_512K ), /* 4-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0x87, CACHE_L2, SZ_1M ), /* 8-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xd0, CACHE_L3, SZ_512K ), /* 4-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xd1, CACHE_L3, SZ_1M ), /* 4-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xd2, CACHE_L3, SZ_2M ), /* 4-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xd6, CACHE_L3, SZ_1M ), /* 8-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xd7, CACHE_L3, SZ_2M ), /* 8-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xd8, CACHE_L3, SZ_4M ), /* 12-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xdc, CACHE_L3, SZ_2M ), /* 12-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xdd, CACHE_L3, SZ_4M ), /* 12-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xde, CACHE_L3, SZ_8M ), /* 12-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xe2, CACHE_L3, SZ_2M ), /* 16-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xe3, CACHE_L3, SZ_4M ), /* 16-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xe4, CACHE_L3, SZ_8M ), /* 16-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xea, CACHE_L3, SZ_12M ), /* 24-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xeb, CACHE_L3, SZ_18M ), /* 24-way set assoc, 64 byte line size */
+ CACHE_ENTRY(0xec, CACHE_L3, SZ_24M ), /* 24-way set assoc, 64 byte line size */
+
+ TLB_ENTRY( 0x01, TLB_INST_4K, 32 ), /* TLB_INST 4 KByte pages, 4-way set associative */
+ TLB_ENTRY( 0x02, TLB_INST_4M, 2 ), /* TLB_INST 4 MByte pages, full associative */
+ TLB_ENTRY( 0x03, TLB_DATA_4K, 64 ), /* TLB_DATA 4 KByte pages, 4-way set associative */
+ TLB_ENTRY( 0x04, TLB_DATA_4M, 8 ), /* TLB_DATA 4 MByte pages, 4-way set associative */
+ TLB_ENTRY( 0x05, TLB_DATA_4M, 32 ), /* TLB_DATA 4 MByte pages, 4-way set associative */
+ TLB_ENTRY( 0x0b, TLB_INST_4M, 4 ), /* TLB_INST 4 MByte pages, 4-way set associative */
+ TLB_ENTRY( 0x4f, TLB_INST_4K, 32 ), /* TLB_INST 4 KByte pages */
+ TLB_ENTRY( 0x50, TLB_INST_ALL, 64 ), /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */
+ TLB_ENTRY( 0x51, TLB_INST_ALL, 128 ), /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */
+ TLB_ENTRY( 0x52, TLB_INST_ALL, 256 ), /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */
+ TLB_ENTRY( 0x55, TLB_INST_2M_4M, 7 ), /* TLB_INST 2-MByte or 4-MByte pages, fully associative */
+ TLB_ENTRY( 0x56, TLB_DATA0_4M, 16 ), /* TLB_DATA0 4 MByte pages, 4-way set associative */
+ TLB_ENTRY( 0x57, TLB_DATA0_4K, 16 ), /* TLB_DATA0 4 KByte pages, 4-way associative */
+ TLB_ENTRY( 0x59, TLB_DATA0_4K, 16 ), /* TLB_DATA0 4 KByte pages, fully associative */
+ TLB_ENTRY( 0x5a, TLB_DATA0_2M_4M, 32 ), /* TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative */
+ TLB_ENTRY( 0x5b, TLB_DATA_4K_4M, 64 ), /* TLB_DATA 4 KByte and 4 MByte pages */
+ TLB_ENTRY( 0x5c, TLB_DATA_4K_4M, 128 ), /* TLB_DATA 4 KByte and 4 MByte pages */
+ TLB_ENTRY( 0x5d, TLB_DATA_4K_4M, 256 ), /* TLB_DATA 4 KByte and 4 MByte pages */
+ TLB_ENTRY( 0x61, TLB_INST_4K, 48 ), /* TLB_INST 4 KByte pages, full associative */
+ TLB_ENTRY( 0x63, TLB_DATA_1G_2M_4M, 4 ), /* TLB_DATA 1 GByte pages, 4-way set associative
+ * (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here) */
+ TLB_ENTRY( 0x6b, TLB_DATA_4K, 256 ), /* TLB_DATA 4 KByte pages, 8-way associative */
+ TLB_ENTRY( 0x6c, TLB_DATA_2M_4M, 128 ), /* TLB_DATA 2 MByte or 4 MByte pages, 8-way associative */
+ TLB_ENTRY( 0x6d, TLB_DATA_1G, 16 ), /* TLB_DATA 1 GByte pages, fully associative */
+ TLB_ENTRY( 0x76, TLB_INST_2M_4M, 8 ), /* TLB_INST 2-MByte or 4-MByte pages, fully associative */
+ TLB_ENTRY( 0xb0, TLB_INST_4K, 128 ), /* TLB_INST 4 KByte pages, 4-way set associative */
+ TLB_ENTRY( 0xb1, TLB_INST_2M_4M, 4 ), /* TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries */
+ TLB_ENTRY( 0xb2, TLB_INST_4K, 64 ), /* TLB_INST 4KByte pages, 4-way set associative */
+ TLB_ENTRY( 0xb3, TLB_DATA_4K, 128 ), /* TLB_DATA 4 KByte pages, 4-way set associative */
+ TLB_ENTRY( 0xb4, TLB_DATA_4K, 256 ), /* TLB_DATA 4 KByte pages, 4-way associative */
+ TLB_ENTRY( 0xb5, TLB_INST_4K, 64 ), /* TLB_INST 4 KByte pages, 8-way set associative */
+ TLB_ENTRY( 0xb6, TLB_INST_4K, 128 ), /* TLB_INST 4 KByte pages, 8-way set associative */
+ TLB_ENTRY( 0xba, TLB_DATA_4K, 64 ), /* TLB_DATA 4 KByte pages, 4-way associative */
+ TLB_ENTRY( 0xc0, TLB_DATA_4K_4M, 8 ), /* TLB_DATA 4 KByte and 4 MByte pages, 4-way associative */
+ TLB_ENTRY( 0xc1, STLB_4K_2M, 1024 ), /* STLB 4 KByte and 2 MByte pages, 8-way associative */
+ TLB_ENTRY( 0xc2, TLB_DATA_2M_4M, 16 ), /* TLB_DATA 2 MByte/4MByte pages, 4-way associative */
+ TLB_ENTRY( 0xca, STLB_4K, 512 ), /* STLB 4 KByte pages, 4-way associative */
+};
diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c
index 4a4118784c13..d69757246bde 100644
--- a/arch/x86/kernel/cpu/feat_ctl.c
+++ b/arch/x86/kernel/cpu/feat_ctl.c
@@ -4,6 +4,7 @@
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/msr-index.h>
+#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/vmx.h>
@@ -118,7 +119,7 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c)
bool enable_vmx;
u64 msr;
- if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) {
+ if (rdmsrq_safe(MSR_IA32_FEAT_CTL, &msr)) {
clear_cpu_cap(c, X86_FEATURE_VMX);
clear_cpu_cap(c, X86_FEATURE_SGX);
return;
@@ -165,7 +166,7 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c)
msr |= FEAT_CTL_SGX_LC_ENABLED;
}
- wrmsrl(MSR_IA32_FEAT_CTL, msr);
+ wrmsrq(MSR_IA32_FEAT_CTL, msr);
update_caps:
set_cpu_cap(c, X86_FEATURE_MSR_IA32_FEAT_CTL);
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index 6af4a4a90a52..2154f12766fb 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -15,6 +15,7 @@
#include <asm/cacheinfo.h>
#include <asm/spec-ctrl.h>
#include <asm/delay.h>
+#include <asm/msr.h>
#include "cpu.h"
@@ -96,7 +97,7 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
u64 val;
- rdmsrl(MSR_K7_HWCR, val);
+ rdmsrq(MSR_K7_HWCR, val);
if (!(val & BIT(24)))
pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
}
@@ -110,7 +111,7 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c)
* Try to cache the base value so further operations can
* avoid RMW. If that faults, do not enable SSBD.
*/
- if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+ if (!rdmsrq_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
setup_force_cpu_cap(X86_FEATURE_SSBD);
x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
@@ -194,7 +195,7 @@ static void init_hygon(struct cpuinfo_x86 *c)
init_hygon_cacheinfo(c);
if (cpu_has(c, X86_FEATURE_SVM)) {
- rdmsrl(MSR_VM_CR, vm_cr);
+ rdmsrq(MSR_VM_CR, vm_cr);
if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
clear_cpu_cap(c, X86_FEATURE_SVM);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index cdc9813871ef..076eaa41b8c8 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -6,6 +6,7 @@
#include <linux/minmax.h>
#include <linux/smp.h>
#include <linux/string.h>
+#include <linux/types.h>
#ifdef CONFIG_X86_64
#include <linux/topology.h>
@@ -15,6 +16,7 @@
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
#include <asm/cpu.h>
+#include <asm/cpuid/api.h>
#include <asm/hwcap2.h>
#include <asm/intel-family.h>
#include <asm/microcode.h>
@@ -157,7 +159,7 @@ static void detect_tme_early(struct cpuinfo_x86 *c)
u64 tme_activate;
int keyid_bits;
- rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
+ rdmsrq(MSR_IA32_TME_ACTIVATE, tme_activate);
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
pr_info_once("x86/tme: not enabled by BIOS\n");
@@ -299,7 +301,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
* string flag and enhanced fast string capabilities accordingly.
*/
if (c->x86_vfm >= INTEL_PENTIUM_M_DOTHAN) {
- rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+ rdmsrq(MSR_IA32_MISC_ENABLE, misc_enable);
if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
/* X86_FEATURE_ERMS is set based on CPUID */
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
@@ -488,7 +490,7 @@ static void init_cpuid_fault(struct cpuinfo_x86 *c)
{
u64 msr;
- if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
+ if (!rdmsrq_safe(MSR_PLATFORM_INFO, &msr)) {
if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
}
@@ -498,7 +500,7 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c)
{
u64 msr;
- if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
+ if (rdmsrq_safe(MSR_MISC_FEATURES_ENABLES, &msr))
return;
/* Clear all MISC features */
@@ -509,7 +511,7 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c)
probe_xeon_phi_r3mwait(c);
msr = this_cpu_read(msr_misc_features_shadow);
- wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
+ wrmsrq(MSR_MISC_FEATURES_ENABLES, msr);
}
/*
@@ -646,103 +648,11 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
}
#endif
-#define TLB_INST_4K 0x01
-#define TLB_INST_4M 0x02
-#define TLB_INST_2M_4M 0x03
-
-#define TLB_INST_ALL 0x05
-#define TLB_INST_1G 0x06
-
-#define TLB_DATA_4K 0x11
-#define TLB_DATA_4M 0x12
-#define TLB_DATA_2M_4M 0x13
-#define TLB_DATA_4K_4M 0x14
-
-#define TLB_DATA_1G 0x16
-#define TLB_DATA_1G_2M_4M 0x17
-
-#define TLB_DATA0_4K 0x21
-#define TLB_DATA0_4M 0x22
-#define TLB_DATA0_2M_4M 0x23
-
-#define STLB_4K 0x41
-#define STLB_4K_2M 0x42
-
-/*
- * All of leaf 0x2's one-byte TLB descriptors implies the same number of
- * entries for their respective TLB types. The 0x63 descriptor is an
- * exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries
- * for 2MB or 4MB pages. Encode descriptor 0x63 dTLB entry count for
- * 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the
- * intel_tlb_table[] mapping.
- */
-#define TLB_0x63_2M_4M_ENTRIES 32
-
-struct _tlb_table {
- unsigned char descriptor;
- char tlb_type;
- unsigned int entries;
-};
-
-static const struct _tlb_table intel_tlb_table[] = {
- { 0x01, TLB_INST_4K, 32}, /* TLB_INST 4 KByte pages, 4-way set associative */
- { 0x02, TLB_INST_4M, 2}, /* TLB_INST 4 MByte pages, full associative */
- { 0x03, TLB_DATA_4K, 64}, /* TLB_DATA 4 KByte pages, 4-way set associative */
- { 0x04, TLB_DATA_4M, 8}, /* TLB_DATA 4 MByte pages, 4-way set associative */
- { 0x05, TLB_DATA_4M, 32}, /* TLB_DATA 4 MByte pages, 4-way set associative */
- { 0x0b, TLB_INST_4M, 4}, /* TLB_INST 4 MByte pages, 4-way set associative */
- { 0x4f, TLB_INST_4K, 32}, /* TLB_INST 4 KByte pages */
- { 0x50, TLB_INST_ALL, 64}, /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */
- { 0x51, TLB_INST_ALL, 128}, /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */
- { 0x52, TLB_INST_ALL, 256}, /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */
- { 0x55, TLB_INST_2M_4M, 7}, /* TLB_INST 2-MByte or 4-MByte pages, fully associative */
- { 0x56, TLB_DATA0_4M, 16}, /* TLB_DATA0 4 MByte pages, 4-way set associative */
- { 0x57, TLB_DATA0_4K, 16}, /* TLB_DATA0 4 KByte pages, 4-way associative */
- { 0x59, TLB_DATA0_4K, 16}, /* TLB_DATA0 4 KByte pages, fully associative */
- { 0x5a, TLB_DATA0_2M_4M, 32}, /* TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative */
- { 0x5b, TLB_DATA_4K_4M, 64}, /* TLB_DATA 4 KByte and 4 MByte pages */
- { 0x5c, TLB_DATA_4K_4M, 128}, /* TLB_DATA 4 KByte and 4 MByte pages */
- { 0x5d, TLB_DATA_4K_4M, 256}, /* TLB_DATA 4 KByte and 4 MByte pages */
- { 0x61, TLB_INST_4K, 48}, /* TLB_INST 4 KByte pages, full associative */
- { 0x63, TLB_DATA_1G_2M_4M, 4}, /* TLB_DATA 1 GByte pages, 4-way set associative
- * (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here) */
- { 0x6b, TLB_DATA_4K, 256}, /* TLB_DATA 4 KByte pages, 8-way associative */
- { 0x6c, TLB_DATA_2M_4M, 128}, /* TLB_DATA 2 MByte or 4 MByte pages, 8-way associative */
- { 0x6d, TLB_DATA_1G, 16}, /* TLB_DATA 1 GByte pages, fully associative */
- { 0x76, TLB_INST_2M_4M, 8}, /* TLB_INST 2-MByte or 4-MByte pages, fully associative */
- { 0xb0, TLB_INST_4K, 128}, /* TLB_INST 4 KByte pages, 4-way set associative */
- { 0xb1, TLB_INST_2M_4M, 4}, /* TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries */
- { 0xb2, TLB_INST_4K, 64}, /* TLB_INST 4KByte pages, 4-way set associative */
- { 0xb3, TLB_DATA_4K, 128}, /* TLB_DATA 4 KByte pages, 4-way set associative */
- { 0xb4, TLB_DATA_4K, 256}, /* TLB_DATA 4 KByte pages, 4-way associative */
- { 0xb5, TLB_INST_4K, 64}, /* TLB_INST 4 KByte pages, 8-way set associative */
- { 0xb6, TLB_INST_4K, 128}, /* TLB_INST 4 KByte pages, 8-way set associative */
- { 0xba, TLB_DATA_4K, 64}, /* TLB_DATA 4 KByte pages, 4-way associative */
- { 0xc0, TLB_DATA_4K_4M, 8}, /* TLB_DATA 4 KByte and 4 MByte pages, 4-way associative */
- { 0xc1, STLB_4K_2M, 1024}, /* STLB 4 KByte and 2 MByte pages, 8-way associative */
- { 0xc2, TLB_DATA_2M_4M, 16}, /* TLB_DATA 2 MByte/4MByte pages, 4-way associative */
- { 0xca, STLB_4K, 512}, /* STLB 4 KByte pages, 4-way associative */
- { 0x00, 0, 0 }
-};
-
-static void intel_tlb_lookup(const unsigned char desc)
+static void intel_tlb_lookup(const struct leaf_0x2_table *desc)
{
- unsigned int entries;
- unsigned char k;
-
- if (desc == 0)
- return;
-
- /* look up this descriptor in the table */
- for (k = 0; intel_tlb_table[k].descriptor != desc &&
- intel_tlb_table[k].descriptor != 0; k++)
- ;
+ short entries = desc->entries;
- if (intel_tlb_table[k].tlb_type == 0)
- return;
-
- entries = intel_tlb_table[k].entries;
- switch (intel_tlb_table[k].tlb_type) {
+ switch (desc->t_type) {
case STLB_4K:
tlb_lli_4k = max(tlb_lli_4k, entries);
tlb_lld_4k = max(tlb_lld_4k, entries);
@@ -799,28 +709,16 @@ static void intel_tlb_lookup(const unsigned char desc)
static void intel_detect_tlb(struct cpuinfo_x86 *c)
{
- int i, j, n;
- unsigned int regs[4];
- unsigned char *desc = (unsigned char *)regs;
+ const struct leaf_0x2_table *desc;
+ union leaf_0x2_regs regs;
+ u8 *ptr;
if (c->cpuid_level < 2)
return;
- /* Number of times to iterate */
- n = cpuid_eax(2) & 0xFF;
-
- for (i = 0 ; i < n ; i++) {
- cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
-
- /* If bit 31 is set, this is an unknown format */
- for (j = 0 ; j < 4 ; j++)
- if (regs[j] & (1 << 31))
- regs[j] = 0;
-
- /* Byte 0 is level count, not a descriptor */
- for (j = 1 ; j < 16 ; j++)
- intel_tlb_lookup(desc[j]);
- }
+ cpuid_leaf_0x2(&regs);
+ for_each_cpuid_0x2_desc(regs, ptr, desc)
+ intel_tlb_lookup(desc);
}
static const struct cpu_dev intel_cpu_dev = {
diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c
index 30b1d63b97f3..bc7671f920a7 100644
--- a/arch/x86/kernel/cpu/intel_epb.c
+++ b/arch/x86/kernel/cpu/intel_epb.c
@@ -79,7 +79,7 @@ static int intel_epb_save(void)
{
u64 epb;
- rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+ rdmsrq(MSR_IA32_ENERGY_PERF_BIAS, epb);
/*
* Ensure that saved_epb will always be nonzero after this write even if
* the EPB value read from the MSR is 0.
@@ -94,7 +94,7 @@ static void intel_epb_restore(void)
u64 val = this_cpu_read(saved_epb);
u64 epb;
- rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+ rdmsrq(MSR_IA32_ENERGY_PERF_BIAS, epb);
if (val) {
val &= EPB_MASK;
} else {
@@ -111,7 +111,7 @@ static void intel_epb_restore(void)
pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
}
}
- wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val);
+ wrmsrq(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val);
}
static struct syscore_ops intel_epb_syscore_ops = {
@@ -135,7 +135,7 @@ static ssize_t energy_perf_bias_show(struct device *dev,
u64 epb;
int ret;
- ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret < 0)
return ret;
@@ -157,11 +157,11 @@ static ssize_t energy_perf_bias_store(struct device *dev,
else if (kstrtou64(buf, 0, &val) || val > MAX_EPB)
return -EINVAL;
- ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret < 0)
return ret;
- ret = wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS,
+ ret = wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS,
(epb & ~EPB_MASK) | val);
if (ret < 0)
return ret;
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 1075a90141da..9d852c3b2cb5 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -662,12 +662,12 @@ static void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
return;
}
- rdmsrl(MSR_K7_HWCR, hwcr);
+ rdmsrq(MSR_K7_HWCR, hwcr);
/* McStatusWrEn has to be set */
need_toggle = !(hwcr & BIT(18));
if (need_toggle)
- wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
+ wrmsrq(MSR_K7_HWCR, hwcr | BIT(18));
/* Clear CntP bit safely */
for (i = 0; i < num_msrs; i++)
@@ -675,7 +675,7 @@ static void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
/* restore old settings */
if (need_toggle)
- wrmsrl(MSR_K7_HWCR, hwcr);
+ wrmsrq(MSR_K7_HWCR, hwcr);
}
/* cpu init entry point, called from mce.c with preempt off */
@@ -805,12 +805,12 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
}
if (mce_flags.smca) {
- rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m->ipid);
+ rdmsrq(MSR_AMD64_SMCA_MCx_IPID(bank), m->ipid);
if (m->status & MCI_STATUS_SYNDV) {
- rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m->synd);
- rdmsrl(MSR_AMD64_SMCA_MCx_SYND1(bank), err.vendor.amd.synd1);
- rdmsrl(MSR_AMD64_SMCA_MCx_SYND2(bank), err.vendor.amd.synd2);
+ rdmsrq(MSR_AMD64_SMCA_MCx_SYND(bank), m->synd);
+ rdmsrq(MSR_AMD64_SMCA_MCx_SYND1(bank), err.vendor.amd.synd1);
+ rdmsrq(MSR_AMD64_SMCA_MCx_SYND2(bank), err.vendor.amd.synd2);
}
}
@@ -834,16 +834,16 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
{
u64 status, addr = 0;
- rdmsrl(msr_stat, status);
+ rdmsrq(msr_stat, status);
if (!(status & MCI_STATUS_VAL))
return false;
if (status & MCI_STATUS_ADDRV)
- rdmsrl(msr_addr, addr);
+ rdmsrq(msr_addr, addr);
__log_error(bank, status, addr, misc);
- wrmsrl(msr_stat, 0);
+ wrmsrq(msr_stat, 0);
return status & MCI_STATUS_DEFERRED;
}
@@ -862,7 +862,7 @@ static bool _log_error_deferred(unsigned int bank, u32 misc)
return true;
/* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */
- wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
+ wrmsrq(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
return true;
}
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index f6fd71b64b66..e9b3c5d4a52e 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -121,7 +121,7 @@ void mce_prep_record_common(struct mce *m)
{
m->cpuid = cpuid_eax(1);
m->cpuvendor = boot_cpu_data.x86_vendor;
- m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
+ m->mcgcap = native_rdmsrq(MSR_IA32_MCG_CAP);
/* need the internal __ version to avoid deadlocks */
m->time = __ktime_get_real_seconds();
}
@@ -388,9 +388,9 @@ void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr)
}
/* MSR access wrappers used for error injection */
-noinstr u64 mce_rdmsrl(u32 msr)
+noinstr u64 mce_rdmsrq(u32 msr)
{
- DECLARE_ARGS(val, low, high);
+ EAX_EDX_DECLARE_ARGS(val, low, high);
if (__this_cpu_read(injectm.finished)) {
int offset;
@@ -423,7 +423,7 @@ noinstr u64 mce_rdmsrl(u32 msr)
return EAX_EDX_VAL(val, low, high);
}
-static noinstr void mce_wrmsrl(u32 msr, u64 v)
+static noinstr void mce_wrmsrq(u32 msr, u64 v)
{
u32 low, high;
@@ -444,7 +444,7 @@ static noinstr void mce_wrmsrl(u32 msr, u64 v)
low = (u32)v;
high = (u32)(v >> 32);
- /* See comment in mce_rdmsrl() */
+ /* See comment in mce_rdmsrq() */
asm volatile("1: wrmsr\n"
"2:\n"
_ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR_IN_MCE)
@@ -468,7 +468,7 @@ static noinstr void mce_gather_info(struct mce_hw_err *err, struct pt_regs *regs
instrumentation_end();
m = &err->m;
- m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
+ m->mcgstatus = mce_rdmsrq(MSR_IA32_MCG_STATUS);
if (regs) {
/*
* Get the address of the instruction at the time of
@@ -488,7 +488,7 @@ static noinstr void mce_gather_info(struct mce_hw_err *err, struct pt_regs *regs
}
/* Use accurate RIP reporting if available. */
if (mca_cfg.rip_msr)
- m->ip = mce_rdmsrl(mca_cfg.rip_msr);
+ m->ip = mce_rdmsrq(mca_cfg.rip_msr);
}
}
@@ -684,10 +684,10 @@ static noinstr void mce_read_aux(struct mce_hw_err *err, int i)
struct mce *m = &err->m;
if (m->status & MCI_STATUS_MISCV)
- m->misc = mce_rdmsrl(mca_msr_reg(i, MCA_MISC));
+ m->misc = mce_rdmsrq(mca_msr_reg(i, MCA_MISC));
if (m->status & MCI_STATUS_ADDRV) {
- m->addr = mce_rdmsrl(mca_msr_reg(i, MCA_ADDR));
+ m->addr = mce_rdmsrq(mca_msr_reg(i, MCA_ADDR));
/*
* Mask the reported address by the reported granularity.
@@ -702,12 +702,12 @@ static noinstr void mce_read_aux(struct mce_hw_err *err, int i)
}
if (mce_flags.smca) {
- m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));
+ m->ipid = mce_rdmsrq(MSR_AMD64_SMCA_MCx_IPID(i));
if (m->status & MCI_STATUS_SYNDV) {
- m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
- err->vendor.amd.synd1 = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND1(i));
- err->vendor.amd.synd2 = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND2(i));
+ m->synd = mce_rdmsrq(MSR_AMD64_SMCA_MCx_SYND(i));
+ err->vendor.amd.synd1 = mce_rdmsrq(MSR_AMD64_SMCA_MCx_SYND1(i));
+ err->vendor.amd.synd2 = mce_rdmsrq(MSR_AMD64_SMCA_MCx_SYND2(i));
}
}
}
@@ -753,7 +753,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
m->bank = i;
barrier();
- m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
+ m->status = mce_rdmsrq(mca_msr_reg(i, MCA_STATUS));
/*
* Update storm tracking here, before checking for the
@@ -829,7 +829,7 @@ clear_it:
/*
* Clear state for this bank.
*/
- mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
+ mce_wrmsrq(mca_msr_reg(i, MCA_STATUS), 0);
}
/*
@@ -887,8 +887,8 @@ quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
*/
static noinstr bool quirk_skylake_repmov(void)
{
- u64 mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
- u64 misc_enable = mce_rdmsrl(MSR_IA32_MISC_ENABLE);
+ u64 mcgstatus = mce_rdmsrq(MSR_IA32_MCG_STATUS);
+ u64 misc_enable = mce_rdmsrq(MSR_IA32_MISC_ENABLE);
u64 mc1_status;
/*
@@ -899,7 +899,7 @@ static noinstr bool quirk_skylake_repmov(void)
!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING))
return false;
- mc1_status = mce_rdmsrl(MSR_IA32_MCx_STATUS(1));
+ mc1_status = mce_rdmsrq(MSR_IA32_MCx_STATUS(1));
/* Check for a software-recoverable data fetch error. */
if ((mc1_status &
@@ -910,8 +910,8 @@ static noinstr bool quirk_skylake_repmov(void)
MCI_STATUS_ADDRV | MCI_STATUS_MISCV |
MCI_STATUS_AR | MCI_STATUS_S)) {
misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
- mce_wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
- mce_wrmsrl(MSR_IA32_MCx_STATUS(1), 0);
+ mce_wrmsrq(MSR_IA32_MISC_ENABLE, misc_enable);
+ mce_wrmsrq(MSR_IA32_MCx_STATUS(1), 0);
instrumentation_begin();
pr_err_once("Erratum detected, disable fast string copy instructions.\n");
@@ -955,7 +955,7 @@ static __always_inline int mce_no_way_out(struct mce_hw_err *err, char **msg, un
int i;
for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
- m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
+ m->status = mce_rdmsrq(mca_msr_reg(i, MCA_STATUS));
if (!(m->status & MCI_STATUS_VAL))
continue;
@@ -1274,7 +1274,7 @@ static __always_inline void mce_clear_state(unsigned long *toclear)
for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
if (arch_test_bit(i, toclear))
- mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
+ mce_wrmsrq(mca_msr_reg(i, MCA_STATUS), 0);
}
}
@@ -1298,7 +1298,7 @@ static noinstr bool mce_check_crashing_cpu(void)
(crashing_cpu != -1 && crashing_cpu != cpu)) {
u64 mcgstatus;
- mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS);
+ mcgstatus = native_rdmsrq(MSR_IA32_MCG_STATUS);
if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) {
if (mcgstatus & MCG_STATUS_LMCES)
@@ -1306,7 +1306,7 @@ static noinstr bool mce_check_crashing_cpu(void)
}
if (mcgstatus & MCG_STATUS_RIPV) {
- __wrmsr(MSR_IA32_MCG_STATUS, 0, 0);
+ native_wrmsrq(MSR_IA32_MCG_STATUS, 0);
return true;
}
}
@@ -1335,7 +1335,7 @@ __mc_scan_banks(struct mce_hw_err *err, struct pt_regs *regs,
m->addr = 0;
m->bank = i;
- m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
+ m->status = mce_rdmsrq(mca_msr_reg(i, MCA_STATUS));
if (!(m->status & MCI_STATUS_VAL))
continue;
@@ -1693,7 +1693,7 @@ out:
instrumentation_end();
clear:
- mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+ mce_wrmsrq(MSR_IA32_MCG_STATUS, 0);
}
EXPORT_SYMBOL_GPL(do_machine_check);
@@ -1822,7 +1822,7 @@ static void __mcheck_cpu_cap_init(void)
u64 cap;
u8 b;
- rdmsrl(MSR_IA32_MCG_CAP, cap);
+ rdmsrq(MSR_IA32_MCG_CAP, cap);
b = cap & MCG_BANKCNT_MASK;
@@ -1863,7 +1863,7 @@ static void __mcheck_cpu_init_generic(void)
cr4_set_bits(X86_CR4_MCE);
- rdmsrl(MSR_IA32_MCG_CAP, cap);
+ rdmsrq(MSR_IA32_MCG_CAP, cap);
if (cap & MCG_CTL_P)
wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
}
@@ -1878,8 +1878,8 @@ static void __mcheck_cpu_init_clear_banks(void)
if (!b->init)
continue;
- wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl);
- wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
+ wrmsrq(mca_msr_reg(i, MCA_CTL), b->ctl);
+ wrmsrq(mca_msr_reg(i, MCA_STATUS), 0);
}
}
@@ -1905,7 +1905,7 @@ static void __mcheck_cpu_check_banks(void)
if (!b->init)
continue;
- rdmsrl(mca_msr_reg(i, MCA_CTL), msrval);
+ rdmsrq(mca_msr_reg(i, MCA_CTL), msrval);
b->init = !!msrval;
}
}
@@ -2436,7 +2436,7 @@ static void mce_disable_error_reporting(void)
struct mce_bank *b = &mce_banks[i];
if (b->init)
- wrmsrl(mca_msr_reg(i, MCA_CTL), 0);
+ wrmsrq(mca_msr_reg(i, MCA_CTL), 0);
}
return;
}
@@ -2786,7 +2786,7 @@ static void mce_reenable_cpu(void)
struct mce_bank *b = &mce_banks[i];
if (b->init)
- wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl);
+ wrmsrq(mca_msr_reg(i, MCA_CTL), b->ctl);
}
}
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 06e3cf7229ce..d02c4f556cd0 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -24,10 +24,11 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
#include <asm/apic.h>
#include <asm/irq_vectors.h>
#include <asm/mce.h>
+#include <asm/msr.h>
#include <asm/nmi.h>
#include <asm/smp.h>
@@ -475,27 +476,27 @@ static void prepare_msrs(void *info)
struct mce m = *(struct mce *)info;
u8 b = m.bank;
- wrmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
+ wrmsrq(MSR_IA32_MCG_STATUS, m.mcgstatus);
if (boot_cpu_has(X86_FEATURE_SMCA)) {
if (m.inject_flags == DFR_INT_INJ) {
- wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status);
- wrmsrl(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr);
+ wrmsrq(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status);
+ wrmsrq(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr);
} else {
- wrmsrl(MSR_AMD64_SMCA_MCx_STATUS(b), m.status);
- wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr);
+ wrmsrq(MSR_AMD64_SMCA_MCx_STATUS(b), m.status);
+ wrmsrq(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr);
}
- wrmsrl(MSR_AMD64_SMCA_MCx_SYND(b), m.synd);
+ wrmsrq(MSR_AMD64_SMCA_MCx_SYND(b), m.synd);
if (m.misc)
- wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), m.misc);
+ wrmsrq(MSR_AMD64_SMCA_MCx_MISC(b), m.misc);
} else {
- wrmsrl(MSR_IA32_MCx_STATUS(b), m.status);
- wrmsrl(MSR_IA32_MCx_ADDR(b), m.addr);
+ wrmsrq(MSR_IA32_MCx_STATUS(b), m.status);
+ wrmsrq(MSR_IA32_MCx_ADDR(b), m.addr);
if (m.misc)
- wrmsrl(MSR_IA32_MCx_MISC(b), m.misc);
+ wrmsrq(MSR_IA32_MCx_MISC(b), m.misc);
}
}
@@ -589,7 +590,7 @@ static int inj_bank_set(void *data, u64 val)
u64 cap;
/* Get bank count on target CPU so we can handle non-uniform values. */
- rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
+ rdmsrq_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
n_banks = cap & MCG_BANKCNT_MASK;
if (val >= n_banks) {
@@ -613,7 +614,7 @@ static int inj_bank_set(void *data, u64 val)
if (cpu_feature_enabled(X86_FEATURE_SMCA)) {
u64 ipid;
- if (rdmsrl_on_cpu(m->extcpu, MSR_AMD64_SMCA_MCx_IPID(val), &ipid)) {
+ if (rdmsrq_on_cpu(m->extcpu, MSR_AMD64_SMCA_MCx_IPID(val), &ipid)) {
pr_err("Error reading IPID on CPU%d\n", m->extcpu);
return -EINVAL;
}
@@ -741,15 +742,15 @@ static void check_hw_inj_possible(void)
u64 status = MCI_STATUS_VAL, ipid;
/* Check whether bank is populated */
- rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), ipid);
+ rdmsrq(MSR_AMD64_SMCA_MCx_IPID(bank), ipid);
if (!ipid)
continue;
toggle_hw_mce_inject(cpu, true);
- wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status);
- rdmsrl_safe(mca_msr_reg(bank, MCA_STATUS), &status);
- wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), 0);
+ wrmsrq_safe(mca_msr_reg(bank, MCA_STATUS), status);
+ rdmsrq_safe(mca_msr_reg(bank, MCA_STATUS), &status);
+ wrmsrq_safe(mca_msr_reg(bank, MCA_STATUS), 0);
if (!status) {
hw_injection_possible = false;
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index f863df0ff42c..efcf21e9552e 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -94,7 +94,7 @@ static bool cmci_supported(int *banks)
if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6)
return false;
- rdmsrl(MSR_IA32_MCG_CAP, cap);
+ rdmsrq(MSR_IA32_MCG_CAP, cap);
*banks = min_t(unsigned, MAX_NR_BANKS, cap & MCG_BANKCNT_MASK);
return !!(cap & MCG_CMCI_P);
}
@@ -106,7 +106,7 @@ static bool lmce_supported(void)
if (mca_cfg.lmce_disabled)
return false;
- rdmsrl(MSR_IA32_MCG_CAP, tmp);
+ rdmsrq(MSR_IA32_MCG_CAP, tmp);
/*
* LMCE depends on recovery support in the processor. Hence both
@@ -123,7 +123,7 @@ static bool lmce_supported(void)
* WARN if the MSR isn't locked as init_ia32_feat_ctl() unconditionally
* locks the MSR in the event that it wasn't already locked by BIOS.
*/
- rdmsrl(MSR_IA32_FEAT_CTL, tmp);
+ rdmsrq(MSR_IA32_FEAT_CTL, tmp);
if (WARN_ON_ONCE(!(tmp & FEAT_CTL_LOCKED)))
return false;
@@ -141,9 +141,9 @@ static void cmci_set_threshold(int bank, int thresh)
u64 val;
raw_spin_lock_irqsave(&cmci_discover_lock, flags);
- rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ rdmsrq(MSR_IA32_MCx_CTL2(bank), val);
val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
- wrmsrl(MSR_IA32_MCx_CTL2(bank), val | thresh);
+ wrmsrq(MSR_IA32_MCx_CTL2(bank), val | thresh);
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}
@@ -184,7 +184,7 @@ static bool cmci_skip_bank(int bank, u64 *val)
if (test_bit(bank, mce_banks_ce_disabled))
return true;
- rdmsrl(MSR_IA32_MCx_CTL2(bank), *val);
+ rdmsrq(MSR_IA32_MCx_CTL2(bank), *val);
/* Already owned by someone else? */
if (*val & MCI_CTL2_CMCI_EN) {
@@ -232,8 +232,8 @@ static void cmci_claim_bank(int bank, u64 val, int bios_zero_thresh, int *bios_w
struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
val |= MCI_CTL2_CMCI_EN;
- wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
- rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ wrmsrq(MSR_IA32_MCx_CTL2(bank), val);
+ rdmsrq(MSR_IA32_MCx_CTL2(bank), val);
/* If the enable bit did not stick, this bank should be polled. */
if (!(val & MCI_CTL2_CMCI_EN)) {
@@ -324,9 +324,9 @@ static void __cmci_disable_bank(int bank)
if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
return;
- rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ rdmsrq(MSR_IA32_MCx_CTL2(bank), val);
val &= ~MCI_CTL2_CMCI_EN;
- wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ wrmsrq(MSR_IA32_MCx_CTL2(bank), val);
__clear_bit(bank, this_cpu_ptr(mce_banks_owned));
if ((val & MCI_CTL2_CMCI_THRESHOLD_MASK) == CMCI_STORM_THRESHOLD)
@@ -430,10 +430,10 @@ void intel_init_lmce(void)
if (!lmce_supported())
return;
- rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
+ rdmsrq(MSR_IA32_MCG_EXT_CTL, val);
if (!(val & MCG_EXT_CTL_LMCE_EN))
- wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
+ wrmsrq(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
}
void intel_clear_lmce(void)
@@ -443,9 +443,9 @@ void intel_clear_lmce(void)
if (!lmce_supported())
return;
- rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
+ rdmsrq(MSR_IA32_MCG_EXT_CTL, val);
val &= ~MCG_EXT_CTL_LMCE_EN;
- wrmsrl(MSR_IA32_MCG_EXT_CTL, val);
+ wrmsrq(MSR_IA32_MCG_EXT_CTL, val);
}
/*
@@ -460,10 +460,10 @@ static void intel_imc_init(struct cpuinfo_x86 *c)
case INTEL_SANDYBRIDGE_X:
case INTEL_IVYBRIDGE_X:
case INTEL_HASWELL_X:
- if (rdmsrl_safe(MSR_ERROR_CONTROL, &error_control))
+ if (rdmsrq_safe(MSR_ERROR_CONTROL, &error_control))
return;
error_control |= 2;
- wrmsrl_safe(MSR_ERROR_CONTROL, error_control);
+ wrmsrq_safe(MSR_ERROR_CONTROL, error_control);
break;
}
}
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index 95a504ece43e..b5ba598e54cb 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -312,7 +312,7 @@ static __always_inline void pentium_machine_check(struct pt_regs *regs) {}
static __always_inline void winchip_machine_check(struct pt_regs *regs) {}
#endif
-noinstr u64 mce_rdmsrl(u32 msr);
+noinstr u64 mce_rdmsrq(u32 msr);
static __always_inline u32 mca_msr_reg(int bank, enum mca_msr reg)
{
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index b61028cf5c8a..097e39327942 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -199,6 +199,12 @@ static bool need_sha_check(u32 cur_rev)
case 0xa70c0: return cur_rev <= 0xa70C009; break;
case 0xaa001: return cur_rev <= 0xaa00116; break;
case 0xaa002: return cur_rev <= 0xaa00218; break;
+ case 0xb0021: return cur_rev <= 0xb002146; break;
+ case 0xb1010: return cur_rev <= 0xb101046; break;
+ case 0xb2040: return cur_rev <= 0xb204031; break;
+ case 0xb4040: return cur_rev <= 0xb404031; break;
+ case 0xb6000: return cur_rev <= 0xb600031; break;
+ case 0xb7000: return cur_rev <= 0xb700031; break;
default: break;
}
@@ -211,11 +217,9 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi
{
struct patch_digest *pd = NULL;
u8 digest[SHA256_DIGEST_SIZE];
- struct sha256_state s;
int i;
- if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
- x86_family(bsp_cpuid_1_eax) > 0x19)
+ if (x86_family(bsp_cpuid_1_eax) < 0x17)
return true;
if (!need_sha_check(cur_rev))
@@ -230,9 +234,7 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi
return false;
}
- sha256_init(&s);
- sha256_update(&s, data, len);
- sha256_final(&s, digest);
+ sha256(data, len, digest);
if (memcmp(digest, pd->sha256, sizeof(digest))) {
pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
@@ -602,7 +604,7 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
return false;
- native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
+ native_wrmsrq(MSR_AMD64_PATCH_LOADER, p_addr);
if (x86_family(bsp_cpuid_1_eax) == 0x17) {
unsigned long p_addr_end = p_addr + psize - 1;
@@ -1093,15 +1095,17 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
static int __init save_microcode_in_initrd(void)
{
- unsigned int cpuid_1_eax = native_cpuid_eax(1);
struct cpuinfo_x86 *c = &boot_cpu_data;
struct cont_desc desc = { 0 };
+ unsigned int cpuid_1_eax;
enum ucode_state ret;
struct cpio_data cp;
- if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
+ if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
return 0;
+ cpuid_1_eax = native_cpuid_eax(1);
+
if (!find_blobs_in_containers(&cp))
return -EINVAL;
@@ -1171,11 +1175,18 @@ static void microcode_fini_cpu_amd(int cpu)
uci->mc = NULL;
}
+static void finalize_late_load_amd(int result)
+{
+ if (result)
+ cleanup();
+}
+
static struct microcode_ops microcode_amd_ops = {
.request_microcode_fw = request_microcode_amd,
.collect_cpu_info = collect_cpu_info_amd,
.apply_microcode = apply_microcode_amd,
.microcode_fini_cpu = microcode_fini_cpu_amd,
+ .finalize_late_load = finalize_late_load_amd,
.nmi_safe = true,
};
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index b3658d11e7b6..fe50eb5b7c4a 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -37,12 +37,13 @@
#include <asm/perf_event.h>
#include <asm/processor.h>
#include <asm/cmdline.h>
+#include <asm/msr.h>
#include <asm/setup.h>
#include "internal.h"
-static struct microcode_ops *microcode_ops;
-bool dis_ucode_ldr = true;
+static struct microcode_ops *microcode_ops;
+static bool dis_ucode_ldr = false;
bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
@@ -84,6 +85,9 @@ static bool amd_check_current_patch_level(void)
u32 lvl, dummy, i;
u32 *levels;
+ if (x86_cpuid_vendor() != X86_VENDOR_AMD)
+ return false;
+
native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
levels = final_levels;
@@ -95,27 +99,29 @@ static bool amd_check_current_patch_level(void)
return false;
}
-static bool __init check_loader_disabled_bsp(void)
+bool __init microcode_loader_disabled(void)
{
- static const char *__dis_opt_str = "dis_ucode_ldr";
- const char *cmdline = boot_command_line;
- const char *option = __dis_opt_str;
+ if (dis_ucode_ldr)
+ return true;
/*
- * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
- * completely accurate as xen pv guests don't see that CPUID bit set but
- * that's good enough as they don't land on the BSP path anyway.
+ * Disable when:
+ *
+ * 1) The CPU does not support CPUID.
+ *
+ * 2) Bit 31 in CPUID[1]:ECX is clear
+ * The bit is reserved for hypervisor use. This is still not
+ * completely accurate as XEN PV guests don't see that CPUID bit
+ * set, but that's good enough as they don't land on the BSP
+ * path anyway.
+ *
+ * 3) Certain AMD patch levels are not allowed to be
+ * overwritten.
*/
- if (native_cpuid_ecx(1) & BIT(31))
- return true;
-
- if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
- if (amd_check_current_patch_level())
- return true;
- }
-
- if (cmdline_find_option_bool(cmdline, option) <= 0)
- dis_ucode_ldr = false;
+ if (!cpuid_feature() ||
+ native_cpuid_ecx(1) & BIT(31) ||
+ amd_check_current_patch_level())
+ dis_ucode_ldr = true;
return dis_ucode_ldr;
}
@@ -125,7 +131,10 @@ void __init load_ucode_bsp(void)
unsigned int cpuid_1_eax;
bool intel = true;
- if (!have_cpuid_p())
+ if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0)
+ dis_ucode_ldr = true;
+
+ if (microcode_loader_disabled())
return;
cpuid_1_eax = native_cpuid_eax(1);
@@ -146,9 +155,6 @@ void __init load_ucode_bsp(void)
return;
}
- if (check_loader_disabled_bsp())
- return;
-
if (intel)
load_ucode_intel_bsp(&early_data);
else
@@ -159,6 +165,11 @@ void load_ucode_ap(void)
{
unsigned int cpuid_1_eax;
+ /*
+ * Can't use microcode_loader_disabled() here - .init section
+ * hell. It doesn't have to either - the BSP variant must've
+ * parsed cmdline already anyway.
+ */
if (dis_ucode_ldr)
return;
@@ -686,6 +697,8 @@ static int load_late_locked(void)
return load_late_stop_cpus(true);
case UCODE_NFOUND:
return -ENOENT;
+ case UCODE_OK:
+ return 0;
default:
return -EBADFD;
}
@@ -810,7 +823,7 @@ static int __init microcode_init(void)
struct cpuinfo_x86 *c = &boot_cpu_data;
int error;
- if (dis_ucode_ldr)
+ if (microcode_loader_disabled())
return -EINVAL;
if (c->x86_vendor == X86_VENDOR_INTEL)
diff --git a/arch/x86/kernel/cpu/microcode/intel-ucode-defs.h b/arch/x86/kernel/cpu/microcode/intel-ucode-defs.h
new file mode 100644
index 000000000000..cb6e601701ab
--- /dev/null
+++ b/arch/x86/kernel/cpu/microcode/intel-ucode-defs.h
@@ -0,0 +1,150 @@
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x03, .steppings = 0x0004, .driver_data = 0x2 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x05, .steppings = 0x0001, .driver_data = 0x45 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x05, .steppings = 0x0002, .driver_data = 0x40 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x05, .steppings = 0x0004, .driver_data = 0x2c },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x05, .steppings = 0x0008, .driver_data = 0x10 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x06, .steppings = 0x0001, .driver_data = 0xa },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x06, .steppings = 0x0020, .driver_data = 0x3 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x06, .steppings = 0x0400, .driver_data = 0xd },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x06, .steppings = 0x2000, .driver_data = 0x7 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x07, .steppings = 0x0002, .driver_data = 0x14 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x07, .steppings = 0x0004, .driver_data = 0x38 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x07, .steppings = 0x0008, .driver_data = 0x2e },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x08, .steppings = 0x0002, .driver_data = 0x11 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x08, .steppings = 0x0008, .driver_data = 0x8 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x08, .steppings = 0x0040, .driver_data = 0xc },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x08, .steppings = 0x0400, .driver_data = 0x5 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x09, .steppings = 0x0020, .driver_data = 0x47 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0a, .steppings = 0x0001, .driver_data = 0x3 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0a, .steppings = 0x0002, .driver_data = 0x1 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0b, .steppings = 0x0002, .driver_data = 0x1d },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0b, .steppings = 0x0010, .driver_data = 0x2 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0d, .steppings = 0x0040, .driver_data = 0x18 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0e, .steppings = 0x0100, .driver_data = 0x39 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0e, .steppings = 0x1000, .driver_data = 0x59 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0004, .driver_data = 0x5d },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0040, .driver_data = 0xd2 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0080, .driver_data = 0x6b },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0400, .driver_data = 0x95 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0800, .driver_data = 0xbc },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x2000, .driver_data = 0xa4 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x16, .steppings = 0x0002, .driver_data = 0x44 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x17, .steppings = 0x0040, .driver_data = 0x60f },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x17, .steppings = 0x0080, .driver_data = 0x70a },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x17, .steppings = 0x0400, .driver_data = 0xa0b },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1a, .steppings = 0x0010, .driver_data = 0x12 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1a, .steppings = 0x0020, .driver_data = 0x1d },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1c, .steppings = 0x0004, .driver_data = 0x219 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1c, .steppings = 0x0400, .driver_data = 0x107 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1d, .steppings = 0x0002, .driver_data = 0x29 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1e, .steppings = 0x0020, .driver_data = 0xa },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x25, .steppings = 0x0004, .driver_data = 0x11 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x25, .steppings = 0x0020, .driver_data = 0x7 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x26, .steppings = 0x0002, .driver_data = 0x105 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2a, .steppings = 0x0080, .driver_data = 0x2f },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2c, .steppings = 0x0004, .driver_data = 0x1f },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2d, .steppings = 0x0040, .driver_data = 0x621 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2d, .steppings = 0x0080, .driver_data = 0x71a },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2e, .steppings = 0x0040, .driver_data = 0xd },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2f, .steppings = 0x0004, .driver_data = 0x3b },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x37, .steppings = 0x0100, .driver_data = 0x838 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x37, .steppings = 0x0200, .driver_data = 0x90d },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3a, .steppings = 0x0200, .driver_data = 0x21 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3c, .steppings = 0x0008, .driver_data = 0x28 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3d, .steppings = 0x0010, .driver_data = 0x2f },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3e, .steppings = 0x0010, .driver_data = 0x42e },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3e, .steppings = 0x0040, .driver_data = 0x600 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3e, .steppings = 0x0080, .driver_data = 0x715 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3f, .steppings = 0x0004, .driver_data = 0x49 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3f, .steppings = 0x0010, .driver_data = 0x1a },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x45, .steppings = 0x0002, .driver_data = 0x26 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x46, .steppings = 0x0002, .driver_data = 0x1c },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x47, .steppings = 0x0002, .driver_data = 0x22 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x4c, .steppings = 0x0008, .driver_data = 0x368 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x4c, .steppings = 0x0010, .driver_data = 0x411 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x4d, .steppings = 0x0100, .driver_data = 0x12d },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x4e, .steppings = 0x0008, .driver_data = 0xf0 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0008, .driver_data = 0x1000191 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0010, .driver_data = 0x2007006 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0020, .driver_data = 0x3000010 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0040, .driver_data = 0x4003605 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0080, .driver_data = 0x5003707 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0800, .driver_data = 0x7002904 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x56, .steppings = 0x0004, .driver_data = 0x1c },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x56, .steppings = 0x0008, .driver_data = 0x700001c },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x56, .steppings = 0x0010, .driver_data = 0xf00001a },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x56, .steppings = 0x0020, .driver_data = 0xe000015 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5c, .steppings = 0x0004, .driver_data = 0x14 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5c, .steppings = 0x0200, .driver_data = 0x48 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5c, .steppings = 0x0400, .driver_data = 0x28 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5e, .steppings = 0x0008, .driver_data = 0xf0 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5f, .steppings = 0x0002, .driver_data = 0x3e },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x66, .steppings = 0x0008, .driver_data = 0x2a },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x6a, .steppings = 0x0020, .driver_data = 0xc0002f0 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x6a, .steppings = 0x0040, .driver_data = 0xd0003e7 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x6c, .steppings = 0x0002, .driver_data = 0x10002b0 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x7a, .steppings = 0x0002, .driver_data = 0x42 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x7a, .steppings = 0x0100, .driver_data = 0x24 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x7e, .steppings = 0x0020, .driver_data = 0xc6 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8a, .steppings = 0x0002, .driver_data = 0x33 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8c, .steppings = 0x0002, .driver_data = 0xb8 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8c, .steppings = 0x0004, .driver_data = 0x38 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8d, .steppings = 0x0002, .driver_data = 0x52 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8e, .steppings = 0x0200, .driver_data = 0xf6 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8e, .steppings = 0x0400, .driver_data = 0xf6 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8e, .steppings = 0x0800, .driver_data = 0xf6 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8e, .steppings = 0x1000, .driver_data = 0xfc },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0100, .driver_data = 0x2c000390 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0080, .driver_data = 0x2b000603 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0040, .driver_data = 0x2c000390 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0020, .driver_data = 0x2c000390 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0010, .driver_data = 0x2c000390 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x96, .steppings = 0x0002, .driver_data = 0x1a },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x97, .steppings = 0x0004, .driver_data = 0x37 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x97, .steppings = 0x0020, .driver_data = 0x37 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xbf, .steppings = 0x0004, .driver_data = 0x37 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xbf, .steppings = 0x0020, .driver_data = 0x37 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9a, .steppings = 0x0008, .driver_data = 0x435 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9a, .steppings = 0x0010, .driver_data = 0x435 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9c, .steppings = 0x0001, .driver_data = 0x24000026 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x0200, .driver_data = 0xf8 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x0400, .driver_data = 0xf8 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x0800, .driver_data = 0xf6 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x1000, .driver_data = 0xf8 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x2000, .driver_data = 0x100 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa5, .steppings = 0x0004, .driver_data = 0xfc },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa5, .steppings = 0x0008, .driver_data = 0xfc },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa5, .steppings = 0x0020, .driver_data = 0xfc },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa6, .steppings = 0x0001, .driver_data = 0xfe },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa6, .steppings = 0x0002, .driver_data = 0xfc },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa7, .steppings = 0x0002, .driver_data = 0x62 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xaa, .steppings = 0x0010, .driver_data = 0x20 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xb7, .steppings = 0x0002, .driver_data = 0x12b },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xba, .steppings = 0x0004, .driver_data = 0x4123 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xba, .steppings = 0x0008, .driver_data = 0x4123 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xba, .steppings = 0x0100, .driver_data = 0x4123 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xbe, .steppings = 0x0001, .driver_data = 0x1a },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xcf, .steppings = 0x0004, .driver_data = 0x21000283 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xcf, .steppings = 0x0002, .driver_data = 0x21000283 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x00, .steppings = 0x0080, .driver_data = 0x12 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x00, .steppings = 0x0400, .driver_data = 0x15 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x01, .steppings = 0x0004, .driver_data = 0x2e },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0010, .driver_data = 0x21 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0020, .driver_data = 0x2c },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0040, .driver_data = 0x10 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0080, .driver_data = 0x39 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0200, .driver_data = 0x2f },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x03, .steppings = 0x0004, .driver_data = 0xa },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x03, .steppings = 0x0008, .driver_data = 0xc },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x03, .steppings = 0x0010, .driver_data = 0x17 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0002, .driver_data = 0x17 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0008, .driver_data = 0x5 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0010, .driver_data = 0x6 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0080, .driver_data = 0x3 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0100, .driver_data = 0xe },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0200, .driver_data = 0x3 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0400, .driver_data = 0x4 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x06, .steppings = 0x0004, .driver_data = 0xf },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x06, .steppings = 0x0010, .driver_data = 0x4 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x06, .steppings = 0x0020, .driver_data = 0x8 },
+{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x06, .steppings = 0x0100, .driver_data = 0x9 },
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 819199bc0119..371ca6eac00e 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -320,7 +320,7 @@ static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci,
}
/* write microcode via MSR 0x79 */
- native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+ native_wrmsrq(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
rev = intel_get_microcode_revision();
if (rev != mc->hdr.rev)
@@ -389,7 +389,7 @@ static int __init save_builtin_microcode(void)
if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
return 0;
- if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return 0;
uci.mc = get_microcode_blob(&uci, true);
diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h
index 5df621752fef..50a9702ae4e2 100644
--- a/arch/x86/kernel/cpu/microcode/internal.h
+++ b/arch/x86/kernel/cpu/microcode/internal.h
@@ -94,7 +94,6 @@ static inline unsigned int x86_cpuid_family(void)
return x86_family(eax);
}
-extern bool dis_ucode_ldr;
extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 3e2533954675..c78f860419d6 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -30,6 +30,7 @@
#include <asm/reboot.h>
#include <asm/nmi.h>
#include <clocksource/hyperv_timer.h>
+#include <asm/msr.h>
#include <asm/numa.h>
#include <asm/svm.h>
@@ -70,7 +71,7 @@ u64 hv_get_non_nested_msr(unsigned int reg)
if (hv_is_synic_msr(reg) && ms_hyperv.paravisor_present)
hv_ivm_msr_read(reg, &value);
else
- rdmsrl(reg, value);
+ rdmsrq(reg, value);
return value;
}
EXPORT_SYMBOL_GPL(hv_get_non_nested_msr);
@@ -82,9 +83,9 @@ void hv_set_non_nested_msr(unsigned int reg, u64 value)
/* Write proxy bit via wrmsl instruction */
if (hv_is_sint_msr(reg))
- wrmsrl(reg, value | 1 << 20);
+ wrmsrq(reg, value | 1 << 20);
} else {
- wrmsrl(reg, value);
+ wrmsrq(reg, value);
}
}
EXPORT_SYMBOL_GPL(hv_set_non_nested_msr);
@@ -345,7 +346,7 @@ static unsigned long hv_get_tsc_khz(void)
{
unsigned long freq;
- rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
+ rdmsrq(HV_X64_MSR_TSC_FREQUENCY, freq);
return freq / 1000;
}
@@ -541,7 +542,7 @@ static void __init ms_hyperv_init_platform(void)
*/
u64 hv_lapic_frequency;
- rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
+ rdmsrq(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
lapic_timer_period = hv_lapic_frequency;
pr_info("Hyper-V: LAPIC Timer Frequency: %#x\n",
@@ -574,7 +575,7 @@ static void __init ms_hyperv_init_platform(void)
* setting of this MSR bit should happen before init_intel()
* is called.
*/
- wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, HV_EXPOSE_INVARIANT_TSC);
+ wrmsrq(HV_X64_MSR_TSC_INVARIANT_CONTROL, HV_EXPOSE_INVARIANT_TSC);
setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
}
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index e2c6b471d230..8c18327eb10b 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -593,7 +593,7 @@ static void get_fixed_ranges(mtrr_type *frs)
void mtrr_save_fixed_ranges(void *info)
{
- if (boot_cpu_has(X86_FEATURE_MTRR))
+ if (mtrr_state.have_fixed)
get_fixed_ranges(mtrr_state.fixed_ranges);
}
diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile
index 0c13b0befd8a..d8a04b195da2 100644
--- a/arch/x86/kernel/cpu/resctrl/Makefile
+++ b/arch/x86/kernel/cpu/resctrl/Makefile
@@ -2,4 +2,6 @@
obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o
obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o
obj-$(CONFIG_RESCTRL_FS_PSEUDO_LOCK) += pseudo_lock.o
+
+# To allow define_trace.h's recursive include:
CFLAGS_pseudo_lock.o = -I$(src)
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index cf29681d01e0..7109cbfcad4f 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -22,6 +22,7 @@
#include <linux/cpuhotplug.h>
#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
#include <asm/resctrl.h>
#include "internal.h"
@@ -60,7 +61,6 @@ struct rdt_hw_resource rdt_resources_all[RDT_NUM_RESOURCES] = {
[RDT_RESOURCE_L3] =
{
.r_resctrl = {
- .rid = RDT_RESOURCE_L3,
.name = "L3",
.ctrl_scope = RESCTRL_L3_CACHE,
.mon_scope = RESCTRL_L3_CACHE,
@@ -74,7 +74,6 @@ struct rdt_hw_resource rdt_resources_all[RDT_NUM_RESOURCES] = {
[RDT_RESOURCE_L2] =
{
.r_resctrl = {
- .rid = RDT_RESOURCE_L2,
.name = "L2",
.ctrl_scope = RESCTRL_L2_CACHE,
.ctrl_domains = ctrl_domain_init(RDT_RESOURCE_L2),
@@ -86,7 +85,6 @@ struct rdt_hw_resource rdt_resources_all[RDT_NUM_RESOURCES] = {
[RDT_RESOURCE_MBA] =
{
.r_resctrl = {
- .rid = RDT_RESOURCE_MBA,
.name = "MB",
.ctrl_scope = RESCTRL_L3_CACHE,
.ctrl_domains = ctrl_domain_init(RDT_RESOURCE_MBA),
@@ -96,7 +94,6 @@ struct rdt_hw_resource rdt_resources_all[RDT_NUM_RESOURCES] = {
[RDT_RESOURCE_SMBA] =
{
.r_resctrl = {
- .rid = RDT_RESOURCE_SMBA,
.name = "SMBA",
.ctrl_scope = RESCTRL_L3_CACHE,
.ctrl_domains = ctrl_domain_init(RDT_RESOURCE_SMBA),
@@ -145,10 +142,10 @@ static inline void cache_alloc_hsw_probe(void)
struct rdt_resource *r = &hw_res->r_resctrl;
u64 max_cbm = BIT_ULL_MASK(20) - 1, l3_cbm_0;
- if (wrmsrl_safe(MSR_IA32_L3_CBM_BASE, max_cbm))
+ if (wrmsrq_safe(MSR_IA32_L3_CBM_BASE, max_cbm))
return;
- rdmsrl(MSR_IA32_L3_CBM_BASE, l3_cbm_0);
+ rdmsrq(MSR_IA32_L3_CBM_BASE, l3_cbm_0);
/* If all the bits were set in MSR, return success */
if (l3_cbm_0 != max_cbm)
@@ -164,21 +161,6 @@ static inline void cache_alloc_hsw_probe(void)
rdt_alloc_capable = true;
}
-bool is_mba_sc(struct rdt_resource *r)
-{
- if (!r)
- r = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
-
- /*
- * The software controller support is only applicable to MBA resource.
- * Make sure to check for resource type.
- */
- if (r->rid != RDT_RESOURCE_MBA)
- return false;
-
- return r->membw.mba_sc;
-}
-
/*
* rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
* exposed to user interface and the h/w understandable delay values.
@@ -309,7 +291,7 @@ static void mba_wrmsr_amd(struct msr_param *m)
unsigned int i;
for (i = m->low; i < m->high; i++)
- wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
+ wrmsrq(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
}
/*
@@ -334,7 +316,7 @@ static void mba_wrmsr_intel(struct msr_param *m)
/* Write the delay values for mba. */
for (i = m->low; i < m->high; i++)
- wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], m->res));
+ wrmsrq(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], m->res));
}
static void cat_wrmsr(struct msr_param *m)
@@ -344,7 +326,7 @@ static void cat_wrmsr(struct msr_param *m)
unsigned int i;
for (i = m->low; i < m->high; i++)
- wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
+ wrmsrq(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
}
u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
@@ -737,7 +719,7 @@ struct rdt_options {
bool force_off, force_on;
};
-static struct rdt_options rdt_options[] __initdata = {
+static struct rdt_options rdt_options[] __ro_after_init = {
RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC),
RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL),
RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL),
@@ -777,7 +759,7 @@ static int __init set_rdt_options(char *str)
}
__setup("rdt", set_rdt_options);
-bool __init rdt_cpu_has(int flag)
+bool rdt_cpu_has(int flag)
{
bool ret = boot_cpu_has(flag);
struct rdt_options *o;
@@ -797,7 +779,7 @@ bool __init rdt_cpu_has(int flag)
return ret;
}
-__init bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt)
+bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt)
{
if (!rdt_cpu_has(X86_FEATURE_BMEC))
return false;
@@ -1011,7 +993,11 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c)
static int __init resctrl_arch_late_init(void)
{
struct rdt_resource *r;
- int state, ret;
+ int state, ret, i;
+
+ /* for_each_rdt_resource() requires all rid to be initialised. */
+ for (i = 0; i < RDT_NUM_RESOURCES; i++)
+ rdt_resources_all[i].r_resctrl.rid = i;
/*
* Initialize functions(or definitions) that are different
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index 0a0ac5f6112e..1189c0df4ad7 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -16,277 +16,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpu.h>
-#include <linux/kernfs.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/tick.h>
#include "internal.h"
-struct rdt_parse_data {
- struct rdtgroup *rdtgrp;
- char *buf;
-};
-
-typedef int (ctrlval_parser_t)(struct rdt_parse_data *data,
- struct resctrl_schema *s,
- struct rdt_ctrl_domain *d);
-
-/*
- * Check whether MBA bandwidth percentage value is correct. The value is
- * checked against the minimum and max bandwidth values specified by the
- * hardware. The allocated bandwidth percentage is rounded to the next
- * control step available on the hardware.
- */
-static bool bw_validate(char *buf, u32 *data, struct rdt_resource *r)
-{
- int ret;
- u32 bw;
-
- /*
- * Only linear delay values is supported for current Intel SKUs.
- */
- if (!r->membw.delay_linear && r->membw.arch_needs_linear) {
- rdt_last_cmd_puts("No support for non-linear MB domains\n");
- return false;
- }
-
- ret = kstrtou32(buf, 10, &bw);
- if (ret) {
- rdt_last_cmd_printf("Invalid MB value %s\n", buf);
- return false;
- }
-
- /* Nothing else to do if software controller is enabled. */
- if (is_mba_sc(r)) {
- *data = bw;
- return true;
- }
-
- if (bw < r->membw.min_bw || bw > r->membw.max_bw) {
- rdt_last_cmd_printf("MB value %u out of range [%d,%d]\n",
- bw, r->membw.min_bw, r->membw.max_bw);
- return false;
- }
-
- *data = roundup(bw, (unsigned long)r->membw.bw_gran);
- return true;
-}
-
-static int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
- struct rdt_ctrl_domain *d)
-{
- struct resctrl_staged_config *cfg;
- u32 closid = data->rdtgrp->closid;
- struct rdt_resource *r = s->res;
- u32 bw_val;
-
- cfg = &d->staged_config[s->conf_type];
- if (cfg->have_new_ctrl) {
- rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id);
- return -EINVAL;
- }
-
- if (!bw_validate(data->buf, &bw_val, r))
- return -EINVAL;
-
- if (is_mba_sc(r)) {
- d->mbps_val[closid] = bw_val;
- return 0;
- }
-
- cfg->new_ctrl = bw_val;
- cfg->have_new_ctrl = true;
-
- return 0;
-}
-
-/*
- * Check whether a cache bit mask is valid.
- * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID:
- * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1
- * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1
- *
- * Haswell does not support a non-contiguous 1s value and additionally
- * requires at least two bits set.
- * AMD allows non-contiguous bitmasks.
- */
-static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
-{
- u32 supported_bits = BIT_MASK(r->cache.cbm_len) - 1;
- unsigned int cbm_len = r->cache.cbm_len;
- unsigned long first_bit, zero_bit, val;
- int ret;
-
- ret = kstrtoul(buf, 16, &val);
- if (ret) {
- rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
- return false;
- }
-
- if ((r->cache.min_cbm_bits > 0 && val == 0) || val > supported_bits) {
- rdt_last_cmd_puts("Mask out of range\n");
- return false;
- }
-
- first_bit = find_first_bit(&val, cbm_len);
- zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
-
- /* Are non-contiguous bitmasks allowed? */
- if (!r->cache.arch_has_sparse_bitmasks &&
- (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) {
- rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
- return false;
- }
-
- if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
- rdt_last_cmd_printf("Need at least %d bits in the mask\n",
- r->cache.min_cbm_bits);
- return false;
- }
-
- *data = val;
- return true;
-}
-
-/*
- * Read one cache bit mask (hex). Check that it is valid for the current
- * resource type.
- */
-static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
- struct rdt_ctrl_domain *d)
-{
- struct rdtgroup *rdtgrp = data->rdtgrp;
- struct resctrl_staged_config *cfg;
- struct rdt_resource *r = s->res;
- u32 cbm_val;
-
- cfg = &d->staged_config[s->conf_type];
- if (cfg->have_new_ctrl) {
- rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id);
- return -EINVAL;
- }
-
- /*
- * Cannot set up more than one pseudo-locked region in a cache
- * hierarchy.
- */
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
- rdtgroup_pseudo_locked_in_hierarchy(d)) {
- rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
- return -EINVAL;
- }
-
- if (!cbm_validate(data->buf, &cbm_val, r))
- return -EINVAL;
-
- if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
- rdtgrp->mode == RDT_MODE_SHAREABLE) &&
- rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
- rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
- return -EINVAL;
- }
-
- /*
- * The CBM may not overlap with the CBM of another closid if
- * either is exclusive.
- */
- if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
- rdt_last_cmd_puts("Overlaps with exclusive group\n");
- return -EINVAL;
- }
-
- if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
- if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
- rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
- rdt_last_cmd_puts("Overlaps with other group\n");
- return -EINVAL;
- }
- }
-
- cfg->new_ctrl = cbm_val;
- cfg->have_new_ctrl = true;
-
- return 0;
-}
-
-/*
- * For each domain in this resource we expect to find a series of:
- * id=mask
- * separated by ";". The "id" is in decimal, and must match one of
- * the "id"s for this resource.
- */
-static int parse_line(char *line, struct resctrl_schema *s,
- struct rdtgroup *rdtgrp)
-{
- enum resctrl_conf_type t = s->conf_type;
- ctrlval_parser_t *parse_ctrlval = NULL;
- struct resctrl_staged_config *cfg;
- struct rdt_resource *r = s->res;
- struct rdt_parse_data data;
- struct rdt_ctrl_domain *d;
- char *dom = NULL, *id;
- unsigned long dom_id;
-
- /* Walking r->domains, ensure it can't race with cpuhp */
- lockdep_assert_cpus_held();
-
- switch (r->schema_fmt) {
- case RESCTRL_SCHEMA_BITMAP:
- parse_ctrlval = &parse_cbm;
- break;
- case RESCTRL_SCHEMA_RANGE:
- parse_ctrlval = &parse_bw;
- break;
- }
-
- if (WARN_ON_ONCE(!parse_ctrlval))
- return -EINVAL;
-
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
- (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) {
- rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
- return -EINVAL;
- }
-
-next:
- if (!line || line[0] == '\0')
- return 0;
- dom = strsep(&line, ";");
- id = strsep(&dom, "=");
- if (!dom || kstrtoul(id, 10, &dom_id)) {
- rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
- return -EINVAL;
- }
- dom = strim(dom);
- list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
- if (d->hdr.id == dom_id) {
- data.buf = dom;
- data.rdtgrp = rdtgrp;
- if (parse_ctrlval(&data, s, d))
- return -EINVAL;
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
- cfg = &d->staged_config[t];
- /*
- * In pseudo-locking setup mode and just
- * parsed a valid CBM that should be
- * pseudo-locked. Only one locked region per
- * resource group and domain so just do
- * the required initialization for single
- * region and return.
- */
- rdtgrp->plr->s = s;
- rdtgrp->plr->d = d;
- rdtgrp->plr->cbm = cfg->new_ctrl;
- d->plr = rdtgrp->plr;
- return 0;
- }
- goto next;
- }
- }
- return -EINVAL;
-}
-
int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 closid, enum resctrl_conf_type t, u32 cfg_val)
{
@@ -351,100 +83,6 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
return 0;
}
-static int rdtgroup_parse_resource(char *resname, char *tok,
- struct rdtgroup *rdtgrp)
-{
- struct resctrl_schema *s;
-
- list_for_each_entry(s, &resctrl_schema_all, list) {
- if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
- return parse_line(tok, s, rdtgrp);
- }
- rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
- return -EINVAL;
-}
-
-ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off)
-{
- struct resctrl_schema *s;
- struct rdtgroup *rdtgrp;
- struct rdt_resource *r;
- char *tok, *resname;
- int ret = 0;
-
- /* Valid input requires a trailing newline */
- if (nbytes == 0 || buf[nbytes - 1] != '\n')
- return -EINVAL;
- buf[nbytes - 1] = '\0';
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
- if (!rdtgrp) {
- rdtgroup_kn_unlock(of->kn);
- return -ENOENT;
- }
- rdt_last_cmd_clear();
-
- /*
- * No changes to pseudo-locked region allowed. It has to be removed
- * and re-created instead.
- */
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
- ret = -EINVAL;
- rdt_last_cmd_puts("Resource group is pseudo-locked\n");
- goto out;
- }
-
- rdt_staged_configs_clear();
-
- while ((tok = strsep(&buf, "\n")) != NULL) {
- resname = strim(strsep(&tok, ":"));
- if (!tok) {
- rdt_last_cmd_puts("Missing ':'\n");
- ret = -EINVAL;
- goto out;
- }
- if (tok[0] == '\0') {
- rdt_last_cmd_printf("Missing '%s' value\n", resname);
- ret = -EINVAL;
- goto out;
- }
- ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
- if (ret)
- goto out;
- }
-
- list_for_each_entry(s, &resctrl_schema_all, list) {
- r = s->res;
-
- /*
- * Writes to mba_sc resources update the software controller,
- * not the control MSR.
- */
- if (is_mba_sc(r))
- continue;
-
- ret = resctrl_arch_update_domains(r, rdtgrp->closid);
- if (ret)
- goto out;
- }
-
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
- /*
- * If pseudo-locking fails we keep the resource group in
- * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
- * active and updated for just the domain the pseudo-locked
- * region was requested for.
- */
- ret = rdtgroup_pseudo_lock_create(rdtgrp);
- }
-
-out:
- rdt_staged_configs_clear();
- rdtgroup_kn_unlock(of->kn);
- return ret ?: nbytes;
-}
-
u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 closid, enum resctrl_conf_type type)
{
@@ -453,276 +91,3 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
return hw_dom->ctrl_val[idx];
}
-
-static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
-{
- struct rdt_resource *r = schema->res;
- struct rdt_ctrl_domain *dom;
- bool sep = false;
- u32 ctrl_val;
-
- /* Walking r->domains, ensure it can't race with cpuhp */
- lockdep_assert_cpus_held();
-
- seq_printf(s, "%*s:", max_name_width, schema->name);
- list_for_each_entry(dom, &r->ctrl_domains, hdr.list) {
- if (sep)
- seq_puts(s, ";");
-
- if (is_mba_sc(r))
- ctrl_val = dom->mbps_val[closid];
- else
- ctrl_val = resctrl_arch_get_config(r, dom, closid,
- schema->conf_type);
-
- seq_printf(s, schema->fmt_str, dom->hdr.id, ctrl_val);
- sep = true;
- }
- seq_puts(s, "\n");
-}
-
-int rdtgroup_schemata_show(struct kernfs_open_file *of,
- struct seq_file *s, void *v)
-{
- struct resctrl_schema *schema;
- struct rdtgroup *rdtgrp;
- int ret = 0;
- u32 closid;
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
- if (rdtgrp) {
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
- list_for_each_entry(schema, &resctrl_schema_all, list) {
- seq_printf(s, "%s:uninitialized\n", schema->name);
- }
- } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
- if (!rdtgrp->plr->d) {
- rdt_last_cmd_clear();
- rdt_last_cmd_puts("Cache domain offline\n");
- ret = -ENODEV;
- } else {
- seq_printf(s, "%s:%d=%x\n",
- rdtgrp->plr->s->res->name,
- rdtgrp->plr->d->hdr.id,
- rdtgrp->plr->cbm);
- }
- } else {
- closid = rdtgrp->closid;
- list_for_each_entry(schema, &resctrl_schema_all, list) {
- if (closid < schema->num_closid)
- show_doms(s, schema, closid);
- }
- }
- } else {
- ret = -ENOENT;
- }
- rdtgroup_kn_unlock(of->kn);
- return ret;
-}
-
-static int smp_mon_event_count(void *arg)
-{
- mon_event_count(arg);
-
- return 0;
-}
-
-ssize_t rdtgroup_mba_mbps_event_write(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off)
-{
- struct rdtgroup *rdtgrp;
- int ret = 0;
-
- /* Valid input requires a trailing newline */
- if (nbytes == 0 || buf[nbytes - 1] != '\n')
- return -EINVAL;
- buf[nbytes - 1] = '\0';
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
- if (!rdtgrp) {
- rdtgroup_kn_unlock(of->kn);
- return -ENOENT;
- }
- rdt_last_cmd_clear();
-
- if (!strcmp(buf, "mbm_local_bytes")) {
- if (resctrl_arch_is_mbm_local_enabled())
- rdtgrp->mba_mbps_event = QOS_L3_MBM_LOCAL_EVENT_ID;
- else
- ret = -EINVAL;
- } else if (!strcmp(buf, "mbm_total_bytes")) {
- if (resctrl_arch_is_mbm_total_enabled())
- rdtgrp->mba_mbps_event = QOS_L3_MBM_TOTAL_EVENT_ID;
- else
- ret = -EINVAL;
- } else {
- ret = -EINVAL;
- }
-
- if (ret)
- rdt_last_cmd_printf("Unsupported event id '%s'\n", buf);
-
- rdtgroup_kn_unlock(of->kn);
-
- return ret ?: nbytes;
-}
-
-int rdtgroup_mba_mbps_event_show(struct kernfs_open_file *of,
- struct seq_file *s, void *v)
-{
- struct rdtgroup *rdtgrp;
- int ret = 0;
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
-
- if (rdtgrp) {
- switch (rdtgrp->mba_mbps_event) {
- case QOS_L3_MBM_LOCAL_EVENT_ID:
- seq_puts(s, "mbm_local_bytes\n");
- break;
- case QOS_L3_MBM_TOTAL_EVENT_ID:
- seq_puts(s, "mbm_total_bytes\n");
- break;
- default:
- pr_warn_once("Bad event %d\n", rdtgrp->mba_mbps_event);
- ret = -EINVAL;
- break;
- }
- } else {
- ret = -ENOENT;
- }
-
- rdtgroup_kn_unlock(of->kn);
-
- return ret;
-}
-
-struct rdt_domain_hdr *resctrl_find_domain(struct list_head *h, int id,
- struct list_head **pos)
-{
- struct rdt_domain_hdr *d;
- struct list_head *l;
-
- list_for_each(l, h) {
- d = list_entry(l, struct rdt_domain_hdr, list);
- /* When id is found, return its domain. */
- if (id == d->id)
- return d;
- /* Stop searching when finding id's position in sorted list. */
- if (id < d->id)
- break;
- }
-
- if (pos)
- *pos = l;
-
- return NULL;
-}
-
-void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
- struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
- cpumask_t *cpumask, int evtid, int first)
-{
- int cpu;
-
- /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */
- lockdep_assert_cpus_held();
-
- /*
- * Setup the parameters to pass to mon_event_count() to read the data.
- */
- rr->rgrp = rdtgrp;
- rr->evtid = evtid;
- rr->r = r;
- rr->d = d;
- rr->first = first;
- rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid);
- if (IS_ERR(rr->arch_mon_ctx)) {
- rr->err = -EINVAL;
- return;
- }
-
- cpu = cpumask_any_housekeeping(cpumask, RESCTRL_PICK_ANY_CPU);
-
- /*
- * cpumask_any_housekeeping() prefers housekeeping CPUs, but
- * are all the CPUs nohz_full? If yes, pick a CPU to IPI.
- * MPAM's resctrl_arch_rmid_read() is unable to read the
- * counters on some platforms if its called in IRQ context.
- */
- if (tick_nohz_full_cpu(cpu))
- smp_call_function_any(cpumask, mon_event_count, rr, 1);
- else
- smp_call_on_cpu(cpu, smp_mon_event_count, rr, false);
-
- resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx);
-}
-
-int rdtgroup_mondata_show(struct seq_file *m, void *arg)
-{
- struct kernfs_open_file *of = m->private;
- struct rdt_domain_hdr *hdr;
- struct rmid_read rr = {0};
- struct rdt_mon_domain *d;
- u32 resid, evtid, domid;
- struct rdtgroup *rdtgrp;
- struct rdt_resource *r;
- union mon_data_bits md;
- int ret = 0;
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
- if (!rdtgrp) {
- ret = -ENOENT;
- goto out;
- }
-
- md.priv = of->kn->priv;
- resid = md.u.rid;
- domid = md.u.domid;
- evtid = md.u.evtid;
- r = resctrl_arch_get_resource(resid);
-
- if (md.u.sum) {
- /*
- * This file requires summing across all domains that share
- * the L3 cache id that was provided in the "domid" field of the
- * mon_data_bits union. Search all domains in the resource for
- * one that matches this cache id.
- */
- list_for_each_entry(d, &r->mon_domains, hdr.list) {
- if (d->ci->id == domid) {
- rr.ci = d->ci;
- mon_event_read(&rr, r, NULL, rdtgrp,
- &d->ci->shared_cpu_map, evtid, false);
- goto checkresult;
- }
- }
- ret = -ENOENT;
- goto out;
- } else {
- /*
- * This file provides data from a single domain. Search
- * the resource to find the domain with "domid".
- */
- hdr = resctrl_find_domain(&r->mon_domains, domid, NULL);
- if (!hdr || WARN_ON_ONCE(hdr->type != RESCTRL_MON_DOMAIN)) {
- ret = -ENOENT;
- goto out;
- }
- d = container_of(hdr, struct rdt_mon_domain, hdr);
- mon_event_read(&rr, r, d, rdtgrp, &d->hdr.cpu_mask, evtid, false);
- }
-
-checkresult:
-
- if (rr.err == -EIO)
- seq_puts(m, "Error\n");
- else if (rr.err == -EINVAL)
- seq_puts(m, "Unavailable\n");
- else
- seq_printf(m, "%llu\n", rr.val);
-
-out:
- rdtgroup_kn_unlock(of->kn);
- return ret;
-}
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index eaae99602b61..5e3c41b36437 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -3,28 +3,21 @@
#define _ASM_X86_RESCTRL_INTERNAL_H
#include <linux/resctrl.h>
-#include <linux/sched.h>
-#include <linux/kernfs.h>
-#include <linux/fs_context.h>
-#include <linux/jump_label.h>
-#include <linux/tick.h>
-
-#include <asm/resctrl.h>
#define L3_QOS_CDP_ENABLE 0x01ULL
#define L2_QOS_CDP_ENABLE 0x01ULL
-#define CQM_LIMBOCHECK_INTERVAL 1000
-
#define MBM_CNTR_WIDTH_BASE 24
-#define MBM_OVERFLOW_INTERVAL 1000
-#define MAX_MBA_BW 100u
+
#define MBA_IS_LINEAR 0x4
+
#define MBM_CNTR_WIDTH_OFFSET_AMD 20
#define RMID_VAL_ERROR BIT_ULL(63)
+
#define RMID_VAL_UNAVAIL BIT_ULL(62)
+
/*
* With the above fields in use 62 bits remain in MSR_IA32_QM_CTR for
* data to be returned. The counter width is discovered from the hardware
@@ -33,278 +26,6 @@
#define MBM_CNTR_WIDTH_OFFSET_MAX (62 - MBM_CNTR_WIDTH_BASE)
/**
- * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that
- * aren't marked nohz_full
- * @mask: The mask to pick a CPU from.
- * @exclude_cpu:The CPU to avoid picking.
- *
- * Returns a CPU from @mask, but not @exclude_cpu. If there are housekeeping
- * CPUs that don't use nohz_full, these are preferred. Pass
- * RESCTRL_PICK_ANY_CPU to avoid excluding any CPUs.
- *
- * When a CPU is excluded, returns >= nr_cpu_ids if no CPUs are available.
- */
-static inline unsigned int
-cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu)
-{
- unsigned int cpu, hk_cpu;
-
- if (exclude_cpu == RESCTRL_PICK_ANY_CPU)
- cpu = cpumask_any(mask);
- else
- cpu = cpumask_any_but(mask, exclude_cpu);
-
- /* Only continue if tick_nohz_full_mask has been initialized. */
- if (!tick_nohz_full_enabled())
- return cpu;
-
- /* If the CPU picked isn't marked nohz_full nothing more needs doing. */
- if (cpu < nr_cpu_ids && !tick_nohz_full_cpu(cpu))
- return cpu;
-
- /* Try to find a CPU that isn't nohz_full to use in preference */
- hk_cpu = cpumask_nth_andnot(0, mask, tick_nohz_full_mask);
- if (hk_cpu == exclude_cpu)
- hk_cpu = cpumask_nth_andnot(1, mask, tick_nohz_full_mask);
-
- if (hk_cpu < nr_cpu_ids)
- cpu = hk_cpu;
-
- return cpu;
-}
-
-struct rdt_fs_context {
- struct kernfs_fs_context kfc;
- bool enable_cdpl2;
- bool enable_cdpl3;
- bool enable_mba_mbps;
- bool enable_debug;
-};
-
-static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc)
-{
- struct kernfs_fs_context *kfc = fc->fs_private;
-
- return container_of(kfc, struct rdt_fs_context, kfc);
-}
-
-/**
- * struct mon_evt - Entry in the event list of a resource
- * @evtid: event id
- * @name: name of the event
- * @configurable: true if the event is configurable
- * @list: entry in &rdt_resource->evt_list
- */
-struct mon_evt {
- enum resctrl_event_id evtid;
- char *name;
- bool configurable;
- struct list_head list;
-};
-
-/**
- * union mon_data_bits - Monitoring details for each event file.
- * @priv: Used to store monitoring event data in @u
- * as kernfs private data.
- * @u.rid: Resource id associated with the event file.
- * @u.evtid: Event id associated with the event file.
- * @u.sum: Set when event must be summed across multiple
- * domains.
- * @u.domid: When @u.sum is zero this is the domain to which
- * the event file belongs. When @sum is one this
- * is the id of the L3 cache that all domains to be
- * summed share.
- * @u: Name of the bit fields struct.
- */
-union mon_data_bits {
- void *priv;
- struct {
- unsigned int rid : 10;
- enum resctrl_event_id evtid : 7;
- unsigned int sum : 1;
- unsigned int domid : 14;
- } u;
-};
-
-/**
- * struct rmid_read - Data passed across smp_call*() to read event count.
- * @rgrp: Resource group for which the counter is being read. If it is a parent
- * resource group then its event count is summed with the count from all
- * its child resource groups.
- * @r: Resource describing the properties of the event being read.
- * @d: Domain that the counter should be read from. If NULL then sum all
- * domains in @r sharing L3 @ci.id
- * @evtid: Which monitor event to read.
- * @first: Initialize MBM counter when true.
- * @ci: Cacheinfo for L3. Only set when @d is NULL. Used when summing domains.
- * @err: Error encountered when reading counter.
- * @val: Returned value of event counter. If @rgrp is a parent resource group,
- * @val includes the sum of event counts from its child resource groups.
- * If @d is NULL, @val includes the sum of all domains in @r sharing @ci.id,
- * (summed across child resource groups if @rgrp is a parent resource group).
- * @arch_mon_ctx: Hardware monitor allocated for this read request (MPAM only).
- */
-struct rmid_read {
- struct rdtgroup *rgrp;
- struct rdt_resource *r;
- struct rdt_mon_domain *d;
- enum resctrl_event_id evtid;
- bool first;
- struct cacheinfo *ci;
- int err;
- u64 val;
- void *arch_mon_ctx;
-};
-
-extern struct list_head resctrl_schema_all;
-extern bool resctrl_mounted;
-
-enum rdt_group_type {
- RDTCTRL_GROUP = 0,
- RDTMON_GROUP,
- RDT_NUM_GROUP,
-};
-
-/**
- * enum rdtgrp_mode - Mode of a RDT resource group
- * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations
- * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed
- * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking
- * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations
- * allowed AND the allocations are Cache Pseudo-Locked
- * @RDT_NUM_MODES: Total number of modes
- *
- * The mode of a resource group enables control over the allowed overlap
- * between allocations associated with different resource groups (classes
- * of service). User is able to modify the mode of a resource group by
- * writing to the "mode" resctrl file associated with the resource group.
- *
- * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by
- * writing the appropriate text to the "mode" file. A resource group enters
- * "pseudo-locked" mode after the schemata is written while the resource
- * group is in "pseudo-locksetup" mode.
- */
-enum rdtgrp_mode {
- RDT_MODE_SHAREABLE = 0,
- RDT_MODE_EXCLUSIVE,
- RDT_MODE_PSEUDO_LOCKSETUP,
- RDT_MODE_PSEUDO_LOCKED,
-
- /* Must be last */
- RDT_NUM_MODES,
-};
-
-/**
- * struct mongroup - store mon group's data in resctrl fs.
- * @mon_data_kn: kernfs node for the mon_data directory
- * @parent: parent rdtgrp
- * @crdtgrp_list: child rdtgroup node list
- * @rmid: rmid for this rdtgroup
- */
-struct mongroup {
- struct kernfs_node *mon_data_kn;
- struct rdtgroup *parent;
- struct list_head crdtgrp_list;
- u32 rmid;
-};
-
-/**
- * struct rdtgroup - store rdtgroup's data in resctrl file system.
- * @kn: kernfs node
- * @rdtgroup_list: linked list for all rdtgroups
- * @closid: closid for this rdtgroup
- * @cpu_mask: CPUs assigned to this rdtgroup
- * @flags: status bits
- * @waitcount: how many cpus expect to find this
- * group when they acquire rdtgroup_mutex
- * @type: indicates type of this rdtgroup - either
- * monitor only or ctrl_mon group
- * @mon: mongroup related data
- * @mode: mode of resource group
- * @mba_mbps_event: input monitoring event id when mba_sc is enabled
- * @plr: pseudo-locked region
- */
-struct rdtgroup {
- struct kernfs_node *kn;
- struct list_head rdtgroup_list;
- u32 closid;
- struct cpumask cpu_mask;
- int flags;
- atomic_t waitcount;
- enum rdt_group_type type;
- struct mongroup mon;
- enum rdtgrp_mode mode;
- enum resctrl_event_id mba_mbps_event;
- struct pseudo_lock_region *plr;
-};
-
-/* rdtgroup.flags */
-#define RDT_DELETED 1
-
-/* rftype.flags */
-#define RFTYPE_FLAGS_CPUS_LIST 1
-
-/*
- * Define the file type flags for base and info directories.
- */
-#define RFTYPE_INFO BIT(0)
-#define RFTYPE_BASE BIT(1)
-#define RFTYPE_CTRL BIT(4)
-#define RFTYPE_MON BIT(5)
-#define RFTYPE_TOP BIT(6)
-#define RFTYPE_RES_CACHE BIT(8)
-#define RFTYPE_RES_MB BIT(9)
-#define RFTYPE_DEBUG BIT(10)
-#define RFTYPE_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL)
-#define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON)
-#define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP)
-#define RFTYPE_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL)
-#define RFTYPE_MON_BASE (RFTYPE_BASE | RFTYPE_MON)
-
-/* List of all resource groups */
-extern struct list_head rdt_all_groups;
-
-extern int max_name_width;
-
-/**
- * struct rftype - describe each file in the resctrl file system
- * @name: File name
- * @mode: Access mode
- * @kf_ops: File operations
- * @flags: File specific RFTYPE_FLAGS_* flags
- * @fflags: File specific RFTYPE_* flags
- * @seq_show: Show content of the file
- * @write: Write to the file
- */
-struct rftype {
- char *name;
- umode_t mode;
- const struct kernfs_ops *kf_ops;
- unsigned long flags;
- unsigned long fflags;
-
- int (*seq_show)(struct kernfs_open_file *of,
- struct seq_file *sf, void *v);
- /*
- * write() is the generic write callback which maps directly to
- * kernfs write operation and overrides all other operations.
- * Maximum write size is determined by ->max_write_len.
- */
- ssize_t (*write)(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off);
-};
-
-/**
- * struct mbm_state - status for each MBM counter in each domain
- * @prev_bw_bytes: Previous bytes value read for bandwidth calculation
- * @prev_bw: The most recent bandwidth in MBps
- */
-struct mbm_state {
- u64 prev_bw_bytes;
- u32 prev_bw;
-};
-
-/**
* struct arch_mbm_state - values used to compute resctrl_arch_rmid_read()s
* return value.
* @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
@@ -401,24 +122,7 @@ static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r
return container_of(r, struct rdt_hw_resource, r_resctrl);
}
-extern struct mutex rdtgroup_mutex;
-
-static inline const char *rdt_kn_name(const struct kernfs_node *kn)
-{
- return rcu_dereference_check(kn->name, lockdep_is_held(&rdtgroup_mutex));
-}
-
extern struct rdt_hw_resource rdt_resources_all[];
-extern struct rdtgroup rdtgroup_default;
-extern struct dentry *debugfs_resctrl;
-extern enum resctrl_event_id mba_mbps_default_event;
-
-static inline bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l)
-{
- return rdt_resources_all[l].cdp_enabled;
-}
-
-int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
void arch_mon_domain_online(struct rdt_resource *r, struct rdt_mon_domain *d);
@@ -455,99 +159,14 @@ union cpuid_0x10_x_edx {
unsigned int full;
};
-void rdt_last_cmd_clear(void);
-void rdt_last_cmd_puts(const char *s);
-__printf(1, 2)
-void rdt_last_cmd_printf(const char *fmt, ...);
-
void rdt_ctrl_update(void *arg);
-struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
-void rdtgroup_kn_unlock(struct kernfs_node *kn);
-int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name);
-int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
- umode_t mask);
-ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off);
-int rdtgroup_schemata_show(struct kernfs_open_file *of,
- struct seq_file *s, void *v);
-ssize_t rdtgroup_mba_mbps_event_write(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off);
-int rdtgroup_mba_mbps_event_show(struct kernfs_open_file *of,
- struct seq_file *s, void *v);
-bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_ctrl_domain *d,
- unsigned long cbm, int closid, bool exclusive);
-unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_ctrl_domain *d,
- unsigned long cbm);
-enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
-int rdtgroup_tasks_assigned(struct rdtgroup *r);
-int closids_supported(void);
-void closid_free(int closid);
-int alloc_rmid(u32 closid);
-void free_rmid(u32 closid, u32 rmid);
-int rdt_get_mon_l3_config(struct rdt_resource *r);
-void resctrl_mon_resource_exit(void);
-bool __init rdt_cpu_has(int flag);
-void mon_event_count(void *info);
-int rdtgroup_mondata_show(struct seq_file *m, void *arg);
-void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
- struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
- cpumask_t *cpumask, int evtid, int first);
-int __init resctrl_mon_resource_init(void);
-void mbm_setup_overflow_handler(struct rdt_mon_domain *dom,
- unsigned long delay_ms,
- int exclude_cpu);
-void mbm_handle_overflow(struct work_struct *work);
-void __init intel_rdt_mbm_apply_quirk(void);
-bool is_mba_sc(struct rdt_resource *r);
-void cqm_setup_limbo_handler(struct rdt_mon_domain *dom, unsigned long delay_ms,
- int exclude_cpu);
-void cqm_handle_limbo(struct work_struct *work);
-bool has_busy_rmid(struct rdt_mon_domain *d);
-void __check_limbo(struct rdt_mon_domain *d, bool force_free);
-void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
-void resctrl_file_fflags_init(const char *config, unsigned long fflags);
-void rdt_staged_configs_clear(void);
-bool closid_allocated(unsigned int closid);
-int resctrl_find_cleanest_closid(void);
-
-#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
-int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
-int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm);
-bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d);
-int rdt_pseudo_lock_init(void);
-void rdt_pseudo_lock_release(void);
-int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
-void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
-#else
-static inline int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
-{
- return -EOPNOTSUPP;
-}
-static inline int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
-{
- return -EOPNOTSUPP;
-}
-
-static inline bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm)
-{
- return false;
-}
+int rdt_get_mon_l3_config(struct rdt_resource *r);
-static inline bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
-{
- return false;
-}
+bool rdt_cpu_has(int flag);
-static inline int rdt_pseudo_lock_init(void) { return 0; }
-static inline void rdt_pseudo_lock_release(void) { }
-static inline int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
-{
- return -EOPNOTSUPP;
-}
+void __init intel_rdt_mbm_apply_quirk(void);
-static inline void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { }
-#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */
+void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index a93ed7d2a160..c261558276cd 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -18,62 +18,12 @@
#define pr_fmt(fmt) "resctrl: " fmt
#include <linux/cpu.h>
-#include <linux/module.h>
-#include <linux/sizes.h>
-#include <linux/slab.h>
+#include <linux/resctrl.h>
#include <asm/cpu_device_id.h>
-#include <asm/resctrl.h>
+#include <asm/msr.h>
#include "internal.h"
-#include "trace.h"
-
-/**
- * struct rmid_entry - dirty tracking for all RMID.
- * @closid: The CLOSID for this entry.
- * @rmid: The RMID for this entry.
- * @busy: The number of domains with cached data using this RMID.
- * @list: Member of the rmid_free_lru list when busy == 0.
- *
- * Depending on the architecture the correct monitor is accessed using
- * both @closid and @rmid, or @rmid only.
- *
- * Take the rdtgroup_mutex when accessing.
- */
-struct rmid_entry {
- u32 closid;
- u32 rmid;
- int busy;
- struct list_head list;
-};
-
-/*
- * @rmid_free_lru - A least recently used list of free RMIDs
- * These RMIDs are guaranteed to have an occupancy less than the
- * threshold occupancy
- */
-static LIST_HEAD(rmid_free_lru);
-
-/*
- * @closid_num_dirty_rmid The number of dirty RMID each CLOSID has.
- * Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined.
- * Indexed by CLOSID. Protected by rdtgroup_mutex.
- */
-static u32 *closid_num_dirty_rmid;
-
-/*
- * @rmid_limbo_count - count of currently unused but (potentially)
- * dirty RMIDs.
- * This counts RMIDs that no one is currently using but that
- * may have a occupancy value > resctrl_rmid_realloc_threshold. User can
- * change the threshold occupancy value.
- */
-static unsigned int rmid_limbo_count;
-
-/*
- * @rmid_entry - The entry in the limbo and free lists.
- */
-static struct rmid_entry *rmid_ptrs;
/*
* Global boolean for rdt_monitor which is true if any
@@ -86,23 +36,12 @@ bool rdt_mon_capable;
*/
unsigned int rdt_mon_features;
-/*
- * This is the threshold cache occupancy in bytes at which we will consider an
- * RMID available for re-allocation.
- */
-unsigned int resctrl_rmid_realloc_threshold;
-
-/*
- * This is the maximum value for the reallocation threshold, in bytes.
- */
-unsigned int resctrl_rmid_realloc_limit;
-
#define CF(cf) ((unsigned long)(1048576 * (cf) + 0.5))
static int snc_nodes_per_l3_cache = 1;
/*
- * The correction factor table is documented in Documentation/arch/x86/resctrl.rst.
+ * The correction factor table is documented in Documentation/filesystems/resctrl.rst.
* If rmid > rmid threshold, MBM total and local values should be multiplied
* by the correction factor.
*
@@ -151,6 +90,7 @@ static const struct mbm_correction_factor_table {
};
static u32 mbm_cf_rmidthreshold __read_mostly = UINT_MAX;
+
static u64 mbm_cf __read_mostly;
static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val)
@@ -163,33 +103,6 @@ static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val)
}
/*
- * x86 and arm64 differ in their handling of monitoring.
- * x86's RMID are independent numbers, there is only one source of traffic
- * with an RMID value of '1'.
- * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of
- * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID
- * value is no longer unique.
- * To account for this, resctrl uses an index. On x86 this is just the RMID,
- * on arm64 it encodes the CLOSID and RMID. This gives a unique number.
- *
- * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code
- * must accept an attempt to read every index.
- */
-static inline struct rmid_entry *__rmid_entry(u32 idx)
-{
- struct rmid_entry *entry;
- u32 closid, rmid;
-
- entry = &rmid_ptrs[idx];
- resctrl_arch_rmid_idx_decode(idx, &closid, &rmid);
-
- WARN_ON_ONCE(entry->closid != closid);
- WARN_ON_ONCE(entry->rmid != rmid);
-
- return entry;
-}
-
-/*
* When Sub-NUMA Cluster (SNC) mode is not enabled (as indicated by
* "snc_nodes_per_l3_cache == 1") no translation of the RMID value is
* needed. The physical RMID is the same as the logical RMID.
@@ -238,7 +151,7 @@ static int __rmid_read_phys(u32 prmid, enum resctrl_event_id eventid, u64 *val)
* are error bits.
*/
wrmsr(MSR_IA32_QM_EVTSEL, eventid, prmid);
- rdmsrl(MSR_IA32_QM_CTR, msr_val);
+ rdmsrq(MSR_IA32_QM_CTR, msr_val);
if (msr_val & RMID_VAL_ERROR)
return -EIO;
@@ -260,12 +173,11 @@ static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_mon_domain *hw_do
return &hw_dom->arch_mbm_total[rmid];
case QOS_L3_MBM_LOCAL_EVENT_ID:
return &hw_dom->arch_mbm_local[rmid];
+ default:
+ /* Never expect to get here */
+ WARN_ON_ONCE(1);
+ return NULL;
}
-
- /* Never expect to get here */
- WARN_ON_ONCE(1);
-
- return NULL;
}
void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
@@ -346,769 +258,6 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
return 0;
}
-static void limbo_release_entry(struct rmid_entry *entry)
-{
- lockdep_assert_held(&rdtgroup_mutex);
-
- rmid_limbo_count--;
- list_add_tail(&entry->list, &rmid_free_lru);
-
- if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
- closid_num_dirty_rmid[entry->closid]--;
-}
-
-/*
- * Check the RMIDs that are marked as busy for this domain. If the
- * reported LLC occupancy is below the threshold clear the busy bit and
- * decrement the count. If the busy count gets to zero on an RMID, we
- * free the RMID
- */
-void __check_limbo(struct rdt_mon_domain *d, bool force_free)
-{
- struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
- u32 idx_limit = resctrl_arch_system_num_rmid_idx();
- struct rmid_entry *entry;
- u32 idx, cur_idx = 1;
- void *arch_mon_ctx;
- bool rmid_dirty;
- u64 val = 0;
-
- arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID);
- if (IS_ERR(arch_mon_ctx)) {
- pr_warn_ratelimited("Failed to allocate monitor context: %ld",
- PTR_ERR(arch_mon_ctx));
- return;
- }
-
- /*
- * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
- * are marked as busy for occupancy < threshold. If the occupancy
- * is less than the threshold decrement the busy counter of the
- * RMID and move it to the free list when the counter reaches 0.
- */
- for (;;) {
- idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx);
- if (idx >= idx_limit)
- break;
-
- entry = __rmid_entry(idx);
- if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid,
- QOS_L3_OCCUP_EVENT_ID, &val,
- arch_mon_ctx)) {
- rmid_dirty = true;
- } else {
- rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
-
- /*
- * x86's CLOSID and RMID are independent numbers, so the entry's
- * CLOSID is an empty CLOSID (X86_RESCTRL_EMPTY_CLOSID). On Arm the
- * RMID (PMG) extends the CLOSID (PARTID) space with bits that aren't
- * used to select the configuration. It is thus necessary to track both
- * CLOSID and RMID because there may be dependencies between them
- * on some architectures.
- */
- trace_mon_llc_occupancy_limbo(entry->closid, entry->rmid, d->hdr.id, val);
- }
-
- if (force_free || !rmid_dirty) {
- clear_bit(idx, d->rmid_busy_llc);
- if (!--entry->busy)
- limbo_release_entry(entry);
- }
- cur_idx = idx + 1;
- }
-
- resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx);
-}
-
-bool has_busy_rmid(struct rdt_mon_domain *d)
-{
- u32 idx_limit = resctrl_arch_system_num_rmid_idx();
-
- return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit;
-}
-
-static struct rmid_entry *resctrl_find_free_rmid(u32 closid)
-{
- struct rmid_entry *itr;
- u32 itr_idx, cmp_idx;
-
- if (list_empty(&rmid_free_lru))
- return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC);
-
- list_for_each_entry(itr, &rmid_free_lru, list) {
- /*
- * Get the index of this free RMID, and the index it would need
- * to be if it were used with this CLOSID.
- * If the CLOSID is irrelevant on this architecture, the two
- * index values are always the same on every entry and thus the
- * very first entry will be returned.
- */
- itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid);
- cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid);
-
- if (itr_idx == cmp_idx)
- return itr;
- }
-
- return ERR_PTR(-ENOSPC);
-}
-
-/**
- * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated
- * RMID are clean, or the CLOSID that has
- * the most clean RMID.
- *
- * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID
- * may not be able to allocate clean RMID. To avoid this the allocator will
- * choose the CLOSID with the most clean RMID.
- *
- * When the CLOSID and RMID are independent numbers, the first free CLOSID will
- * be returned.
- */
-int resctrl_find_cleanest_closid(void)
-{
- u32 cleanest_closid = ~0;
- int i = 0;
-
- lockdep_assert_held(&rdtgroup_mutex);
-
- if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
- return -EIO;
-
- for (i = 0; i < closids_supported(); i++) {
- int num_dirty;
-
- if (closid_allocated(i))
- continue;
-
- num_dirty = closid_num_dirty_rmid[i];
- if (num_dirty == 0)
- return i;
-
- if (cleanest_closid == ~0)
- cleanest_closid = i;
-
- if (num_dirty < closid_num_dirty_rmid[cleanest_closid])
- cleanest_closid = i;
- }
-
- if (cleanest_closid == ~0)
- return -ENOSPC;
-
- return cleanest_closid;
-}
-
-/*
- * For MPAM the RMID value is not unique, and has to be considered with
- * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which
- * allows all domains to be managed by a single free list.
- * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler.
- */
-int alloc_rmid(u32 closid)
-{
- struct rmid_entry *entry;
-
- lockdep_assert_held(&rdtgroup_mutex);
-
- entry = resctrl_find_free_rmid(closid);
- if (IS_ERR(entry))
- return PTR_ERR(entry);
-
- list_del(&entry->list);
- return entry->rmid;
-}
-
-static void add_rmid_to_limbo(struct rmid_entry *entry)
-{
- struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
- struct rdt_mon_domain *d;
- u32 idx;
-
- lockdep_assert_held(&rdtgroup_mutex);
-
- /* Walking r->domains, ensure it can't race with cpuhp */
- lockdep_assert_cpus_held();
-
- idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid);
-
- entry->busy = 0;
- list_for_each_entry(d, &r->mon_domains, hdr.list) {
- /*
- * For the first limbo RMID in the domain,
- * setup up the limbo worker.
- */
- if (!has_busy_rmid(d))
- cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL,
- RESCTRL_PICK_ANY_CPU);
- set_bit(idx, d->rmid_busy_llc);
- entry->busy++;
- }
-
- rmid_limbo_count++;
- if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
- closid_num_dirty_rmid[entry->closid]++;
-}
-
-void free_rmid(u32 closid, u32 rmid)
-{
- u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
- struct rmid_entry *entry;
-
- lockdep_assert_held(&rdtgroup_mutex);
-
- /*
- * Do not allow the default rmid to be free'd. Comparing by index
- * allows architectures that ignore the closid parameter to avoid an
- * unnecessary check.
- */
- if (!resctrl_arch_mon_capable() ||
- idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
- RESCTRL_RESERVED_RMID))
- return;
-
- entry = __rmid_entry(idx);
-
- if (resctrl_arch_is_llc_occupancy_enabled())
- add_rmid_to_limbo(entry);
- else
- list_add_tail(&entry->list, &rmid_free_lru);
-}
-
-static struct mbm_state *get_mbm_state(struct rdt_mon_domain *d, u32 closid,
- u32 rmid, enum resctrl_event_id evtid)
-{
- u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
-
- switch (evtid) {
- case QOS_L3_MBM_TOTAL_EVENT_ID:
- return &d->mbm_total[idx];
- case QOS_L3_MBM_LOCAL_EVENT_ID:
- return &d->mbm_local[idx];
- default:
- return NULL;
- }
-}
-
-static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
-{
- int cpu = smp_processor_id();
- struct rdt_mon_domain *d;
- struct mbm_state *m;
- int err, ret;
- u64 tval = 0;
-
- if (rr->first) {
- resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid);
- m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
- if (m)
- memset(m, 0, sizeof(struct mbm_state));
- return 0;
- }
-
- if (rr->d) {
- /* Reading a single domain, must be on a CPU in that domain. */
- if (!cpumask_test_cpu(cpu, &rr->d->hdr.cpu_mask))
- return -EINVAL;
- rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid,
- rr->evtid, &tval, rr->arch_mon_ctx);
- if (rr->err)
- return rr->err;
-
- rr->val += tval;
-
- return 0;
- }
-
- /* Summing domains that share a cache, must be on a CPU for that cache. */
- if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
- return -EINVAL;
-
- /*
- * Legacy files must report the sum of an event across all
- * domains that share the same L3 cache instance.
- * Report success if a read from any domain succeeds, -EINVAL
- * (translated to "Unavailable" for user space) if reading from
- * all domains fail for any reason.
- */
- ret = -EINVAL;
- list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
- if (d->ci->id != rr->ci->id)
- continue;
- err = resctrl_arch_rmid_read(rr->r, d, closid, rmid,
- rr->evtid, &tval, rr->arch_mon_ctx);
- if (!err) {
- rr->val += tval;
- ret = 0;
- }
- }
-
- if (ret)
- rr->err = ret;
-
- return ret;
-}
-
-/*
- * mbm_bw_count() - Update bw count from values previously read by
- * __mon_event_count().
- * @closid: The closid used to identify the cached mbm_state.
- * @rmid: The rmid used to identify the cached mbm_state.
- * @rr: The struct rmid_read populated by __mon_event_count().
- *
- * Supporting function to calculate the memory bandwidth
- * and delta bandwidth in MBps. The chunks value previously read by
- * __mon_event_count() is compared with the chunks value from the previous
- * invocation. This must be called once per second to maintain values in MBps.
- */
-static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr)
-{
- u64 cur_bw, bytes, cur_bytes;
- struct mbm_state *m;
-
- m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
- if (WARN_ON_ONCE(!m))
- return;
-
- cur_bytes = rr->val;
- bytes = cur_bytes - m->prev_bw_bytes;
- m->prev_bw_bytes = cur_bytes;
-
- cur_bw = bytes / SZ_1M;
-
- m->prev_bw = cur_bw;
-}
-
-/*
- * This is scheduled by mon_event_read() to read the CQM/MBM counters
- * on a domain.
- */
-void mon_event_count(void *info)
-{
- struct rdtgroup *rdtgrp, *entry;
- struct rmid_read *rr = info;
- struct list_head *head;
- int ret;
-
- rdtgrp = rr->rgrp;
-
- ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr);
-
- /*
- * For Ctrl groups read data from child monitor groups and
- * add them together. Count events which are read successfully.
- * Discard the rmid_read's reporting errors.
- */
- head = &rdtgrp->mon.crdtgrp_list;
-
- if (rdtgrp->type == RDTCTRL_GROUP) {
- list_for_each_entry(entry, head, mon.crdtgrp_list) {
- if (__mon_event_count(entry->closid, entry->mon.rmid,
- rr) == 0)
- ret = 0;
- }
- }
-
- /*
- * __mon_event_count() calls for newly created monitor groups may
- * report -EINVAL/Unavailable if the monitor hasn't seen any traffic.
- * Discard error if any of the monitor event reads succeeded.
- */
- if (ret == 0)
- rr->err = 0;
-}
-
-static struct rdt_ctrl_domain *get_ctrl_domain_from_cpu(int cpu,
- struct rdt_resource *r)
-{
- struct rdt_ctrl_domain *d;
-
- lockdep_assert_cpus_held();
-
- list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
- /* Find the domain that contains this CPU */
- if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
- return d;
- }
-
- return NULL;
-}
-
-/*
- * Feedback loop for MBA software controller (mba_sc)
- *
- * mba_sc is a feedback loop where we periodically read MBM counters and
- * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
- * that:
- *
- * current bandwidth(cur_bw) < user specified bandwidth(user_bw)
- *
- * This uses the MBM counters to measure the bandwidth and MBA throttle
- * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
- * fact that resctrl rdtgroups have both monitoring and control.
- *
- * The frequency of the checks is 1s and we just tag along the MBM overflow
- * timer. Having 1s interval makes the calculation of bandwidth simpler.
- *
- * Although MBA's goal is to restrict the bandwidth to a maximum, there may
- * be a need to increase the bandwidth to avoid unnecessarily restricting
- * the L2 <-> L3 traffic.
- *
- * Since MBA controls the L2 external bandwidth where as MBM measures the
- * L3 external bandwidth the following sequence could lead to such a
- * situation.
- *
- * Consider an rdtgroup which had high L3 <-> memory traffic in initial
- * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
- * after some time rdtgroup has mostly L2 <-> L3 traffic.
- *
- * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
- * throttle MSRs already have low percentage values. To avoid
- * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
- */
-static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_mon_domain *dom_mbm)
-{
- u32 closid, rmid, cur_msr_val, new_msr_val;
- struct mbm_state *pmbm_data, *cmbm_data;
- struct rdt_ctrl_domain *dom_mba;
- enum resctrl_event_id evt_id;
- struct rdt_resource *r_mba;
- struct list_head *head;
- struct rdtgroup *entry;
- u32 cur_bw, user_bw;
-
- r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
- evt_id = rgrp->mba_mbps_event;
-
- closid = rgrp->closid;
- rmid = rgrp->mon.rmid;
- pmbm_data = get_mbm_state(dom_mbm, closid, rmid, evt_id);
- if (WARN_ON_ONCE(!pmbm_data))
- return;
-
- dom_mba = get_ctrl_domain_from_cpu(smp_processor_id(), r_mba);
- if (!dom_mba) {
- pr_warn_once("Failure to get domain for MBA update\n");
- return;
- }
-
- cur_bw = pmbm_data->prev_bw;
- user_bw = dom_mba->mbps_val[closid];
-
- /* MBA resource doesn't support CDP */
- cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
-
- /*
- * For Ctrl groups read data from child monitor groups.
- */
- head = &rgrp->mon.crdtgrp_list;
- list_for_each_entry(entry, head, mon.crdtgrp_list) {
- cmbm_data = get_mbm_state(dom_mbm, entry->closid, entry->mon.rmid, evt_id);
- if (WARN_ON_ONCE(!cmbm_data))
- return;
- cur_bw += cmbm_data->prev_bw;
- }
-
- /*
- * Scale up/down the bandwidth linearly for the ctrl group. The
- * bandwidth step is the bandwidth granularity specified by the
- * hardware.
- * Always increase throttling if current bandwidth is above the
- * target set by user.
- * But avoid thrashing up and down on every poll by checking
- * whether a decrease in throttling is likely to push the group
- * back over target. E.g. if currently throttling to 30% of bandwidth
- * on a system with 10% granularity steps, check whether moving to
- * 40% would go past the limit by multiplying current bandwidth by
- * "(30 + 10) / 30".
- */
- if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
- new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
- } else if (cur_msr_val < MAX_MBA_BW &&
- (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) {
- new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
- } else {
- return;
- }
-
- resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
-}
-
-static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *d,
- u32 closid, u32 rmid, enum resctrl_event_id evtid)
-{
- struct rmid_read rr = {0};
-
- rr.r = r;
- rr.d = d;
- rr.evtid = evtid;
- rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
- if (IS_ERR(rr.arch_mon_ctx)) {
- pr_warn_ratelimited("Failed to allocate monitor context: %ld",
- PTR_ERR(rr.arch_mon_ctx));
- return;
- }
-
- __mon_event_count(closid, rmid, &rr);
-
- /*
- * If the software controller is enabled, compute the
- * bandwidth for this event id.
- */
- if (is_mba_sc(NULL))
- mbm_bw_count(closid, rmid, &rr);
-
- resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
-}
-
-static void mbm_update(struct rdt_resource *r, struct rdt_mon_domain *d,
- u32 closid, u32 rmid)
-{
- /*
- * This is protected from concurrent reads from user as both
- * the user and overflow handler hold the global mutex.
- */
- if (resctrl_arch_is_mbm_total_enabled())
- mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_TOTAL_EVENT_ID);
-
- if (resctrl_arch_is_mbm_local_enabled())
- mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_LOCAL_EVENT_ID);
-}
-
-/*
- * Handler to scan the limbo list and move the RMIDs
- * to free list whose occupancy < threshold_occupancy.
- */
-void cqm_handle_limbo(struct work_struct *work)
-{
- unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
- struct rdt_mon_domain *d;
-
- cpus_read_lock();
- mutex_lock(&rdtgroup_mutex);
-
- d = container_of(work, struct rdt_mon_domain, cqm_limbo.work);
-
- __check_limbo(d, false);
-
- if (has_busy_rmid(d)) {
- d->cqm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
- RESCTRL_PICK_ANY_CPU);
- schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo,
- delay);
- }
-
- mutex_unlock(&rdtgroup_mutex);
- cpus_read_unlock();
-}
-
-/**
- * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this
- * domain.
- * @dom: The domain the limbo handler should run for.
- * @delay_ms: How far in the future the handler should run.
- * @exclude_cpu: Which CPU the handler should not run on,
- * RESCTRL_PICK_ANY_CPU to pick any CPU.
- */
-void cqm_setup_limbo_handler(struct rdt_mon_domain *dom, unsigned long delay_ms,
- int exclude_cpu)
-{
- unsigned long delay = msecs_to_jiffies(delay_ms);
- int cpu;
-
- cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
- dom->cqm_work_cpu = cpu;
-
- if (cpu < nr_cpu_ids)
- schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
-}
-
-void mbm_handle_overflow(struct work_struct *work)
-{
- unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
- struct rdtgroup *prgrp, *crgrp;
- struct rdt_mon_domain *d;
- struct list_head *head;
- struct rdt_resource *r;
-
- cpus_read_lock();
- mutex_lock(&rdtgroup_mutex);
-
- /*
- * If the filesystem has been unmounted this work no longer needs to
- * run.
- */
- if (!resctrl_mounted || !resctrl_arch_mon_capable())
- goto out_unlock;
-
- r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
- d = container_of(work, struct rdt_mon_domain, mbm_over.work);
-
- list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
- mbm_update(r, d, prgrp->closid, prgrp->mon.rmid);
-
- head = &prgrp->mon.crdtgrp_list;
- list_for_each_entry(crgrp, head, mon.crdtgrp_list)
- mbm_update(r, d, crgrp->closid, crgrp->mon.rmid);
-
- if (is_mba_sc(NULL))
- update_mba_bw(prgrp, d);
- }
-
- /*
- * Re-check for housekeeping CPUs. This allows the overflow handler to
- * move off a nohz_full CPU quickly.
- */
- d->mbm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
- RESCTRL_PICK_ANY_CPU);
- schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay);
-
-out_unlock:
- mutex_unlock(&rdtgroup_mutex);
- cpus_read_unlock();
-}
-
-/**
- * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this
- * domain.
- * @dom: The domain the overflow handler should run for.
- * @delay_ms: How far in the future the handler should run.
- * @exclude_cpu: Which CPU the handler should not run on,
- * RESCTRL_PICK_ANY_CPU to pick any CPU.
- */
-void mbm_setup_overflow_handler(struct rdt_mon_domain *dom, unsigned long delay_ms,
- int exclude_cpu)
-{
- unsigned long delay = msecs_to_jiffies(delay_ms);
- int cpu;
-
- /*
- * When a domain comes online there is no guarantee the filesystem is
- * mounted. If not, there is no need to catch counter overflow.
- */
- if (!resctrl_mounted || !resctrl_arch_mon_capable())
- return;
- cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
- dom->mbm_work_cpu = cpu;
-
- if (cpu < nr_cpu_ids)
- schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
-}
-
-static int dom_data_init(struct rdt_resource *r)
-{
- u32 idx_limit = resctrl_arch_system_num_rmid_idx();
- u32 num_closid = resctrl_arch_get_num_closid(r);
- struct rmid_entry *entry = NULL;
- int err = 0, i;
- u32 idx;
-
- mutex_lock(&rdtgroup_mutex);
- if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
- u32 *tmp;
-
- /*
- * If the architecture hasn't provided a sanitised value here,
- * this may result in larger arrays than necessary. Resctrl will
- * use a smaller system wide value based on the resources in
- * use.
- */
- tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL);
- if (!tmp) {
- err = -ENOMEM;
- goto out_unlock;
- }
-
- closid_num_dirty_rmid = tmp;
- }
-
- rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL);
- if (!rmid_ptrs) {
- if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
- kfree(closid_num_dirty_rmid);
- closid_num_dirty_rmid = NULL;
- }
- err = -ENOMEM;
- goto out_unlock;
- }
-
- for (i = 0; i < idx_limit; i++) {
- entry = &rmid_ptrs[i];
- INIT_LIST_HEAD(&entry->list);
-
- resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid);
- list_add_tail(&entry->list, &rmid_free_lru);
- }
-
- /*
- * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and
- * are always allocated. These are used for the rdtgroup_default
- * control group, which will be setup later in resctrl_init().
- */
- idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
- RESCTRL_RESERVED_RMID);
- entry = __rmid_entry(idx);
- list_del(&entry->list);
-
-out_unlock:
- mutex_unlock(&rdtgroup_mutex);
-
- return err;
-}
-
-static void dom_data_exit(struct rdt_resource *r)
-{
- mutex_lock(&rdtgroup_mutex);
-
- if (!r->mon_capable)
- goto out_unlock;
-
- if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
- kfree(closid_num_dirty_rmid);
- closid_num_dirty_rmid = NULL;
- }
-
- kfree(rmid_ptrs);
- rmid_ptrs = NULL;
-
-out_unlock:
- mutex_unlock(&rdtgroup_mutex);
-}
-
-static struct mon_evt llc_occupancy_event = {
- .name = "llc_occupancy",
- .evtid = QOS_L3_OCCUP_EVENT_ID,
-};
-
-static struct mon_evt mbm_total_event = {
- .name = "mbm_total_bytes",
- .evtid = QOS_L3_MBM_TOTAL_EVENT_ID,
-};
-
-static struct mon_evt mbm_local_event = {
- .name = "mbm_local_bytes",
- .evtid = QOS_L3_MBM_LOCAL_EVENT_ID,
-};
-
-/*
- * Initialize the event list for the resource.
- *
- * Note that MBM events are also part of RDT_RESOURCE_L3 resource
- * because as per the SDM the total and local memory bandwidth
- * are enumerated as part of L3 monitoring.
- */
-static void l3_mon_evt_init(struct rdt_resource *r)
-{
- INIT_LIST_HEAD(&r->evt_list);
-
- if (resctrl_arch_is_llc_occupancy_enabled())
- list_add_tail(&llc_occupancy_event.list, &r->evt_list);
- if (resctrl_arch_is_mbm_total_enabled())
- list_add_tail(&mbm_total_event.list, &r->evt_list);
- if (resctrl_arch_is_mbm_local_enabled())
- list_add_tail(&mbm_local_event.list, &r->evt_list);
-}
-
/*
* The power-on reset value of MSR_RMID_SNC_CONFIG is 0x1
* which indicates that RMIDs are configured in legacy mode.
@@ -1192,51 +341,6 @@ static __init int snc_get_config(void)
return ret;
}
-/**
- * resctrl_mon_resource_init() - Initialise global monitoring structures.
- *
- * Allocate and initialise global monitor resources that do not belong to a
- * specific domain. i.e. the rmid_ptrs[] used for the limbo and free lists.
- * Called once during boot after the struct rdt_resource's have been configured
- * but before the filesystem is mounted.
- * Resctrl's cpuhp callbacks may be called before this point to bring a domain
- * online.
- *
- * Returns 0 for success, or -ENOMEM.
- */
-int __init resctrl_mon_resource_init(void)
-{
- struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
- int ret;
-
- if (!r->mon_capable)
- return 0;
-
- ret = dom_data_init(r);
- if (ret)
- return ret;
-
- l3_mon_evt_init(r);
-
- if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) {
- mbm_total_event.configurable = true;
- resctrl_file_fflags_init("mbm_total_bytes_config",
- RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
- }
- if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) {
- mbm_local_event.configurable = true;
- resctrl_file_fflags_init("mbm_local_bytes_config",
- RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
- }
-
- if (resctrl_arch_is_mbm_local_enabled())
- mba_mbps_default_event = QOS_L3_MBM_LOCAL_EVENT_ID;
- else if (resctrl_arch_is_mbm_total_enabled())
- mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID;
-
- return 0;
-}
-
int __init rdt_get_mon_l3_config(struct rdt_resource *r)
{
unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
@@ -1284,13 +388,6 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
return 0;
}
-void resctrl_mon_resource_exit(void)
-{
- struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
-
- dom_data_exit(r);
-}
-
void __init intel_rdt_mbm_apply_quirk(void)
{
int cf_index;
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 92ea1472bde9..de580eca3363 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -11,26 +11,22 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cacheflush.h>
#include <linux/cpu.h>
-#include <linux/cpumask.h>
-#include <linux/debugfs.h>
-#include <linux/kthread.h>
-#include <linux/mman.h>
#include <linux/perf_event.h>
#include <linux/pm_qos.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
+#include <linux/resctrl.h>
-#include <asm/cacheflush.h>
#include <asm/cpu_device_id.h>
-#include <asm/resctrl.h>
#include <asm/perf_event.h>
+#include <asm/msr.h>
#include "../../events/perf_event.h" /* For X86_CONFIG() */
#include "internal.h"
#define CREATE_TRACE_POINTS
-#include "trace.h"
+
+#include "pseudo_lock_trace.h"
/*
* The bits needed to disable hardware prefetching varies based on the
@@ -38,29 +34,6 @@
*/
static u64 prefetch_disable_bits;
-/*
- * Major number assigned to and shared by all devices exposing
- * pseudo-locked regions.
- */
-static unsigned int pseudo_lock_major;
-static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0);
-
-static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode)
-{
- const struct rdtgroup *rdtgrp;
-
- rdtgrp = dev_get_drvdata(dev);
- if (mode)
- *mode = 0600;
- guard(mutex)(&rdtgroup_mutex);
- return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdt_kn_name(rdtgrp->kn));
-}
-
-static const struct class pseudo_lock_class = {
- .name = "pseudo_lock",
- .devnode = pseudo_lock_devnode,
-};
-
/**
* resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported
* platforms
@@ -122,298 +95,6 @@ u64 resctrl_arch_get_prefetch_disable_bits(void)
}
/**
- * pseudo_lock_minor_get - Obtain available minor number
- * @minor: Pointer to where new minor number will be stored
- *
- * A bitmask is used to track available minor numbers. Here the next free
- * minor number is marked as unavailable and returned.
- *
- * Return: 0 on success, <0 on failure.
- */
-static int pseudo_lock_minor_get(unsigned int *minor)
-{
- unsigned long first_bit;
-
- first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS);
-
- if (first_bit == MINORBITS)
- return -ENOSPC;
-
- __clear_bit(first_bit, &pseudo_lock_minor_avail);
- *minor = first_bit;
-
- return 0;
-}
-
-/**
- * pseudo_lock_minor_release - Return minor number to available
- * @minor: The minor number made available
- */
-static void pseudo_lock_minor_release(unsigned int minor)
-{
- __set_bit(minor, &pseudo_lock_minor_avail);
-}
-
-/**
- * region_find_by_minor - Locate a pseudo-lock region by inode minor number
- * @minor: The minor number of the device representing pseudo-locked region
- *
- * When the character device is accessed we need to determine which
- * pseudo-locked region it belongs to. This is done by matching the minor
- * number of the device to the pseudo-locked region it belongs.
- *
- * Minor numbers are assigned at the time a pseudo-locked region is associated
- * with a cache instance.
- *
- * Return: On success return pointer to resource group owning the pseudo-locked
- * region, NULL on failure.
- */
-static struct rdtgroup *region_find_by_minor(unsigned int minor)
-{
- struct rdtgroup *rdtgrp, *rdtgrp_match = NULL;
-
- list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
- if (rdtgrp->plr && rdtgrp->plr->minor == minor) {
- rdtgrp_match = rdtgrp;
- break;
- }
- }
- return rdtgrp_match;
-}
-
-/**
- * struct pseudo_lock_pm_req - A power management QoS request list entry
- * @list: Entry within the @pm_reqs list for a pseudo-locked region
- * @req: PM QoS request
- */
-struct pseudo_lock_pm_req {
- struct list_head list;
- struct dev_pm_qos_request req;
-};
-
-static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
-{
- struct pseudo_lock_pm_req *pm_req, *next;
-
- list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) {
- dev_pm_qos_remove_request(&pm_req->req);
- list_del(&pm_req->list);
- kfree(pm_req);
- }
-}
-
-/**
- * pseudo_lock_cstates_constrain - Restrict cores from entering C6
- * @plr: Pseudo-locked region
- *
- * To prevent the cache from being affected by power management entering
- * C6 has to be avoided. This is accomplished by requesting a latency
- * requirement lower than lowest C6 exit latency of all supported
- * platforms as found in the cpuidle state tables in the intel_idle driver.
- * At this time it is possible to do so with a single latency requirement
- * for all supported platforms.
- *
- * Since Goldmont is supported, which is affected by X86_BUG_MONITOR,
- * the ACPI latencies need to be considered while keeping in mind that C2
- * may be set to map to deeper sleep states. In this case the latency
- * requirement needs to prevent entering C2 also.
- *
- * Return: 0 on success, <0 on failure
- */
-static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
-{
- struct pseudo_lock_pm_req *pm_req;
- int cpu;
- int ret;
-
- for_each_cpu(cpu, &plr->d->hdr.cpu_mask) {
- pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
- if (!pm_req) {
- rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
- ret = -ENOMEM;
- goto out_err;
- }
- ret = dev_pm_qos_add_request(get_cpu_device(cpu),
- &pm_req->req,
- DEV_PM_QOS_RESUME_LATENCY,
- 30);
- if (ret < 0) {
- rdt_last_cmd_printf("Failed to add latency req CPU%d\n",
- cpu);
- kfree(pm_req);
- ret = -1;
- goto out_err;
- }
- list_add(&pm_req->list, &plr->pm_reqs);
- }
-
- return 0;
-
-out_err:
- pseudo_lock_cstates_relax(plr);
- return ret;
-}
-
-/**
- * pseudo_lock_region_clear - Reset pseudo-lock region data
- * @plr: pseudo-lock region
- *
- * All content of the pseudo-locked region is reset - any memory allocated
- * freed.
- *
- * Return: void
- */
-static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
-{
- plr->size = 0;
- plr->line_size = 0;
- kfree(plr->kmem);
- plr->kmem = NULL;
- plr->s = NULL;
- if (plr->d)
- plr->d->plr = NULL;
- plr->d = NULL;
- plr->cbm = 0;
- plr->debugfs_dir = NULL;
-}
-
-/**
- * pseudo_lock_region_init - Initialize pseudo-lock region information
- * @plr: pseudo-lock region
- *
- * Called after user provided a schemata to be pseudo-locked. From the
- * schemata the &struct pseudo_lock_region is on entry already initialized
- * with the resource, domain, and capacity bitmask. Here the information
- * required for pseudo-locking is deduced from this data and &struct
- * pseudo_lock_region initialized further. This information includes:
- * - size in bytes of the region to be pseudo-locked
- * - cache line size to know the stride with which data needs to be accessed
- * to be pseudo-locked
- * - a cpu associated with the cache instance on which the pseudo-locking
- * flow can be executed
- *
- * Return: 0 on success, <0 on failure. Descriptive error will be written
- * to last_cmd_status buffer.
- */
-static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
-{
- enum resctrl_scope scope = plr->s->res->ctrl_scope;
- struct cacheinfo *ci;
- int ret;
-
- if (WARN_ON_ONCE(scope != RESCTRL_L2_CACHE && scope != RESCTRL_L3_CACHE))
- return -ENODEV;
-
- /* Pick the first cpu we find that is associated with the cache. */
- plr->cpu = cpumask_first(&plr->d->hdr.cpu_mask);
-
- if (!cpu_online(plr->cpu)) {
- rdt_last_cmd_printf("CPU %u associated with cache not online\n",
- plr->cpu);
- ret = -ENODEV;
- goto out_region;
- }
-
- ci = get_cpu_cacheinfo_level(plr->cpu, scope);
- if (ci) {
- plr->line_size = ci->coherency_line_size;
- plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
- return 0;
- }
-
- ret = -1;
- rdt_last_cmd_puts("Unable to determine cache line size\n");
-out_region:
- pseudo_lock_region_clear(plr);
- return ret;
-}
-
-/**
- * pseudo_lock_init - Initialize a pseudo-lock region
- * @rdtgrp: resource group to which new pseudo-locked region will belong
- *
- * A pseudo-locked region is associated with a resource group. When this
- * association is created the pseudo-locked region is initialized. The
- * details of the pseudo-locked region are not known at this time so only
- * allocation is done and association established.
- *
- * Return: 0 on success, <0 on failure
- */
-static int pseudo_lock_init(struct rdtgroup *rdtgrp)
-{
- struct pseudo_lock_region *plr;
-
- plr = kzalloc(sizeof(*plr), GFP_KERNEL);
- if (!plr)
- return -ENOMEM;
-
- init_waitqueue_head(&plr->lock_thread_wq);
- INIT_LIST_HEAD(&plr->pm_reqs);
- rdtgrp->plr = plr;
- return 0;
-}
-
-/**
- * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked
- * @plr: pseudo-lock region
- *
- * Initialize the details required to set up the pseudo-locked region and
- * allocate the contiguous memory that will be pseudo-locked to the cache.
- *
- * Return: 0 on success, <0 on failure. Descriptive error will be written
- * to last_cmd_status buffer.
- */
-static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
-{
- int ret;
-
- ret = pseudo_lock_region_init(plr);
- if (ret < 0)
- return ret;
-
- /*
- * We do not yet support contiguous regions larger than
- * KMALLOC_MAX_SIZE.
- */
- if (plr->size > KMALLOC_MAX_SIZE) {
- rdt_last_cmd_puts("Requested region exceeds maximum size\n");
- ret = -E2BIG;
- goto out_region;
- }
-
- plr->kmem = kzalloc(plr->size, GFP_KERNEL);
- if (!plr->kmem) {
- rdt_last_cmd_puts("Unable to allocate memory\n");
- ret = -ENOMEM;
- goto out_region;
- }
-
- ret = 0;
- goto out;
-out_region:
- pseudo_lock_region_clear(plr);
-out:
- return ret;
-}
-
-/**
- * pseudo_lock_free - Free a pseudo-locked region
- * @rdtgrp: resource group to which pseudo-locked region belonged
- *
- * The pseudo-locked region's resources have already been released, or not
- * yet created at this point. Now it can be freed and disassociated from the
- * resource group.
- *
- * Return: void
- */
-static void pseudo_lock_free(struct rdtgroup *rdtgrp)
-{
- pseudo_lock_region_clear(rdtgrp->plr);
- kfree(rdtgrp->plr);
- rdtgrp->plr = NULL;
-}
-
-/**
* resctrl_arch_pseudo_lock_fn - Load kernel memory into cache
* @_plr: the pseudo-lock region descriptor
*
@@ -481,8 +162,8 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* the buffer and evict pseudo-locked memory read earlier from the
* cache.
*/
- saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL);
- __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ saved_msr = native_rdmsrq(MSR_MISC_FEATURE_CONTROL);
+ native_wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits);
closid_p = this_cpu_read(pqr_state.cur_closid);
rmid_p = this_cpu_read(pqr_state.cur_rmid);
mem_r = plr->kmem;
@@ -494,7 +175,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* pseudo-locked followed by reading of kernel memory to load it
* into the cache.
*/
- __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid);
+ native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid);
/*
* Cache was flushed earlier. Now access kernel memory to read it
@@ -531,10 +212,10 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* Critical section end: restore closid with capacity bitmask that
* does not overlap with pseudo-locked region.
*/
- __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
+ native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
/* Re-enable the hardware prefetcher(s) */
- wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr);
+ wrmsrq(MSR_MISC_FEATURE_CONTROL, saved_msr);
local_irq_enable();
plr->thread_done = 1;
@@ -543,340 +224,6 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
}
/**
- * rdtgroup_monitor_in_progress - Test if monitoring in progress
- * @rdtgrp: resource group being queried
- *
- * Return: 1 if monitor groups have been created for this resource
- * group, 0 otherwise.
- */
-static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp)
-{
- return !list_empty(&rdtgrp->mon.crdtgrp_list);
-}
-
-/**
- * rdtgroup_locksetup_user_restrict - Restrict user access to group
- * @rdtgrp: resource group needing access restricted
- *
- * A resource group used for cache pseudo-locking cannot have cpus or tasks
- * assigned to it. This is communicated to the user by restricting access
- * to all the files that can be used to make such changes.
- *
- * Permissions restored with rdtgroup_locksetup_user_restore()
- *
- * Return: 0 on success, <0 on failure. If a failure occurs during the
- * restriction of access an attempt will be made to restore permissions but
- * the state of the mode of these files will be uncertain when a failure
- * occurs.
- */
-static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp)
-{
- int ret;
-
- ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
- if (ret)
- return ret;
-
- ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
- if (ret)
- goto err_tasks;
-
- ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
- if (ret)
- goto err_cpus;
-
- if (resctrl_arch_mon_capable()) {
- ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups");
- if (ret)
- goto err_cpus_list;
- }
-
- ret = 0;
- goto out;
-
-err_cpus_list:
- rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
-err_cpus:
- rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
-err_tasks:
- rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
-out:
- return ret;
-}
-
-/**
- * rdtgroup_locksetup_user_restore - Restore user access to group
- * @rdtgrp: resource group needing access restored
- *
- * Restore all file access previously removed using
- * rdtgroup_locksetup_user_restrict()
- *
- * Return: 0 on success, <0 on failure. If a failure occurs during the
- * restoration of access an attempt will be made to restrict permissions
- * again but the state of the mode of these files will be uncertain when
- * a failure occurs.
- */
-static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp)
-{
- int ret;
-
- ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
- if (ret)
- return ret;
-
- ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
- if (ret)
- goto err_tasks;
-
- ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
- if (ret)
- goto err_cpus;
-
- if (resctrl_arch_mon_capable()) {
- ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777);
- if (ret)
- goto err_cpus_list;
- }
-
- ret = 0;
- goto out;
-
-err_cpus_list:
- rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
-err_cpus:
- rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
-err_tasks:
- rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
-out:
- return ret;
-}
-
-/**
- * rdtgroup_locksetup_enter - Resource group enters locksetup mode
- * @rdtgrp: resource group requested to enter locksetup mode
- *
- * A resource group enters locksetup mode to reflect that it would be used
- * to represent a pseudo-locked region and is in the process of being set
- * up to do so. A resource group used for a pseudo-locked region would
- * lose the closid associated with it so we cannot allow it to have any
- * tasks or cpus assigned nor permit tasks or cpus to be assigned in the
- * future. Monitoring of a pseudo-locked region is not allowed either.
- *
- * The above and more restrictions on a pseudo-locked region are checked
- * for and enforced before the resource group enters the locksetup mode.
- *
- * Returns: 0 if the resource group successfully entered locksetup mode, <0
- * on failure. On failure the last_cmd_status buffer is updated with text to
- * communicate details of failure to the user.
- */
-int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
-{
- int ret;
-
- /*
- * The default resource group can neither be removed nor lose the
- * default closid associated with it.
- */
- if (rdtgrp == &rdtgroup_default) {
- rdt_last_cmd_puts("Cannot pseudo-lock default group\n");
- return -EINVAL;
- }
-
- /*
- * Cache Pseudo-locking not supported when CDP is enabled.
- *
- * Some things to consider if you would like to enable this
- * support (using L3 CDP as example):
- * - When CDP is enabled two separate resources are exposed,
- * L3DATA and L3CODE, but they are actually on the same cache.
- * The implication for pseudo-locking is that if a
- * pseudo-locked region is created on a domain of one
- * resource (eg. L3CODE), then a pseudo-locked region cannot
- * be created on that same domain of the other resource
- * (eg. L3DATA). This is because the creation of a
- * pseudo-locked region involves a call to wbinvd that will
- * affect all cache allocations on particular domain.
- * - Considering the previous, it may be possible to only
- * expose one of the CDP resources to pseudo-locking and
- * hide the other. For example, we could consider to only
- * expose L3DATA and since the L3 cache is unified it is
- * still possible to place instructions there are execute it.
- * - If only one region is exposed to pseudo-locking we should
- * still keep in mind that availability of a portion of cache
- * for pseudo-locking should take into account both resources.
- * Similarly, if a pseudo-locked region is created in one
- * resource, the portion of cache used by it should be made
- * unavailable to all future allocations from both resources.
- */
- if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) ||
- resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) {
- rdt_last_cmd_puts("CDP enabled\n");
- return -EINVAL;
- }
-
- /*
- * Not knowing the bits to disable prefetching implies that this
- * platform does not support Cache Pseudo-Locking.
- */
- if (resctrl_arch_get_prefetch_disable_bits() == 0) {
- rdt_last_cmd_puts("Pseudo-locking not supported\n");
- return -EINVAL;
- }
-
- if (rdtgroup_monitor_in_progress(rdtgrp)) {
- rdt_last_cmd_puts("Monitoring in progress\n");
- return -EINVAL;
- }
-
- if (rdtgroup_tasks_assigned(rdtgrp)) {
- rdt_last_cmd_puts("Tasks assigned to resource group\n");
- return -EINVAL;
- }
-
- if (!cpumask_empty(&rdtgrp->cpu_mask)) {
- rdt_last_cmd_puts("CPUs assigned to resource group\n");
- return -EINVAL;
- }
-
- if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
- rdt_last_cmd_puts("Unable to modify resctrl permissions\n");
- return -EIO;
- }
-
- ret = pseudo_lock_init(rdtgrp);
- if (ret) {
- rdt_last_cmd_puts("Unable to init pseudo-lock region\n");
- goto out_release;
- }
-
- /*
- * If this system is capable of monitoring a rmid would have been
- * allocated when the control group was created. This is not needed
- * anymore when this group would be used for pseudo-locking. This
- * is safe to call on platforms not capable of monitoring.
- */
- free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
-
- ret = 0;
- goto out;
-
-out_release:
- rdtgroup_locksetup_user_restore(rdtgrp);
-out:
- return ret;
-}
-
-/**
- * rdtgroup_locksetup_exit - resource group exist locksetup mode
- * @rdtgrp: resource group
- *
- * When a resource group exits locksetup mode the earlier restrictions are
- * lifted.
- *
- * Return: 0 on success, <0 on failure
- */
-int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
-{
- int ret;
-
- if (resctrl_arch_mon_capable()) {
- ret = alloc_rmid(rdtgrp->closid);
- if (ret < 0) {
- rdt_last_cmd_puts("Out of RMIDs\n");
- return ret;
- }
- rdtgrp->mon.rmid = ret;
- }
-
- ret = rdtgroup_locksetup_user_restore(rdtgrp);
- if (ret) {
- free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
- return ret;
- }
-
- pseudo_lock_free(rdtgrp);
- return 0;
-}
-
-/**
- * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
- * @d: RDT domain
- * @cbm: CBM to test
- *
- * @d represents a cache instance and @cbm a capacity bitmask that is
- * considered for it. Determine if @cbm overlaps with any existing
- * pseudo-locked region on @d.
- *
- * @cbm is unsigned long, even if only 32 bits are used, to make the
- * bitmap functions work correctly.
- *
- * Return: true if @cbm overlaps with pseudo-locked region on @d, false
- * otherwise.
- */
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm)
-{
- unsigned int cbm_len;
- unsigned long cbm_b;
-
- if (d->plr) {
- cbm_len = d->plr->s->res->cache.cbm_len;
- cbm_b = d->plr->cbm;
- if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
- return true;
- }
- return false;
-}
-
-/**
- * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy
- * @d: RDT domain under test
- *
- * The setup of a pseudo-locked region affects all cache instances within
- * the hierarchy of the region. It is thus essential to know if any
- * pseudo-locked regions exist within a cache hierarchy to prevent any
- * attempts to create new pseudo-locked regions in the same hierarchy.
- *
- * Return: true if a pseudo-locked region exists in the hierarchy of @d or
- * if it is not possible to test due to memory allocation issue,
- * false otherwise.
- */
-bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
-{
- struct rdt_ctrl_domain *d_i;
- cpumask_var_t cpu_with_psl;
- struct rdt_resource *r;
- bool ret = false;
-
- /* Walking r->domains, ensure it can't race with cpuhp */
- lockdep_assert_cpus_held();
-
- if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL))
- return true;
-
- /*
- * First determine which cpus have pseudo-locked regions
- * associated with them.
- */
- for_each_alloc_capable_rdt_resource(r) {
- list_for_each_entry(d_i, &r->ctrl_domains, hdr.list) {
- if (d_i->plr)
- cpumask_or(cpu_with_psl, cpu_with_psl,
- &d_i->hdr.cpu_mask);
- }
- }
-
- /*
- * Next test if new pseudo-locked region would intersect with
- * existing region.
- */
- if (cpumask_intersects(&d->hdr.cpu_mask, cpu_with_psl))
- ret = true;
-
- free_cpumask_var(cpu_with_psl);
- return ret;
-}
-
-/**
* resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read
* pseudo-locked memory
* @_plr: pseudo-lock region to measure
@@ -904,7 +251,7 @@ int resctrl_arch_measure_cycles_lat_fn(void *_plr)
* Disable hardware prefetchers.
*/
rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
- wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits);
mem_r = READ_ONCE(plr->kmem);
/*
* Dummy execute of the time measurement to load the needed
@@ -1000,7 +347,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
* Disable hardware prefetchers.
*/
rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
- wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits);
/* Initialize rest of local variables */
/*
@@ -1018,8 +365,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
* used in L1 cache, second to capture accurate value that does not
* include cache misses incurred because of instruction loads.
*/
- rdpmcl(hit_pmcnum, hits_before);
- rdpmcl(miss_pmcnum, miss_before);
+ hits_before = rdpmc(hit_pmcnum);
+ miss_before = rdpmc(miss_pmcnum);
/*
* From SDM: Performing back-to-back fast reads are not guaranteed
* to be monotonic.
@@ -1027,8 +374,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
* before proceeding.
*/
rmb();
- rdpmcl(hit_pmcnum, hits_before);
- rdpmcl(miss_pmcnum, miss_before);
+ hits_before = rdpmc(hit_pmcnum);
+ miss_before = rdpmc(miss_pmcnum);
/*
* Use LFENCE to ensure all previous instructions are retired
* before proceeding.
@@ -1050,8 +397,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
* before proceeding.
*/
rmb();
- rdpmcl(hit_pmcnum, hits_after);
- rdpmcl(miss_pmcnum, miss_after);
+ hits_after = rdpmc(hit_pmcnum);
+ miss_after = rdpmc(miss_pmcnum);
/*
* Use LFENCE to ensure all previous instructions are retired
* before proceeding.
@@ -1168,433 +515,3 @@ out:
wake_up_interruptible(&plr->lock_thread_wq);
return 0;
}
-
-/**
- * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
- * @rdtgrp: Resource group to which the pseudo-locked region belongs.
- * @sel: Selector of which measurement to perform on a pseudo-locked region.
- *
- * The measurement of latency to access a pseudo-locked region should be
- * done from a cpu that is associated with that pseudo-locked region.
- * Determine which cpu is associated with this region and start a thread on
- * that cpu to perform the measurement, wait for that thread to complete.
- *
- * Return: 0 on success, <0 on failure
- */
-static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
-{
- struct pseudo_lock_region *plr = rdtgrp->plr;
- struct task_struct *thread;
- unsigned int cpu;
- int ret = -1;
-
- cpus_read_lock();
- mutex_lock(&rdtgroup_mutex);
-
- if (rdtgrp->flags & RDT_DELETED) {
- ret = -ENODEV;
- goto out;
- }
-
- if (!plr->d) {
- ret = -ENODEV;
- goto out;
- }
-
- plr->thread_done = 0;
- cpu = cpumask_first(&plr->d->hdr.cpu_mask);
- if (!cpu_online(cpu)) {
- ret = -ENODEV;
- goto out;
- }
-
- plr->cpu = cpu;
-
- if (sel == 1)
- thread = kthread_run_on_cpu(resctrl_arch_measure_cycles_lat_fn,
- plr, cpu, "pseudo_lock_measure/%u");
- else if (sel == 2)
- thread = kthread_run_on_cpu(resctrl_arch_measure_l2_residency,
- plr, cpu, "pseudo_lock_measure/%u");
- else if (sel == 3)
- thread = kthread_run_on_cpu(resctrl_arch_measure_l3_residency,
- plr, cpu, "pseudo_lock_measure/%u");
- else
- goto out;
-
- if (IS_ERR(thread)) {
- ret = PTR_ERR(thread);
- goto out;
- }
-
- ret = wait_event_interruptible(plr->lock_thread_wq,
- plr->thread_done == 1);
- if (ret < 0)
- goto out;
-
- ret = 0;
-
-out:
- mutex_unlock(&rdtgroup_mutex);
- cpus_read_unlock();
- return ret;
-}
-
-static ssize_t pseudo_lock_measure_trigger(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct rdtgroup *rdtgrp = file->private_data;
- size_t buf_size;
- char buf[32];
- int ret;
- int sel;
-
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
- ret = kstrtoint(buf, 10, &sel);
- if (ret == 0) {
- if (sel != 1 && sel != 2 && sel != 3)
- return -EINVAL;
- ret = debugfs_file_get(file->f_path.dentry);
- if (ret)
- return ret;
- ret = pseudo_lock_measure_cycles(rdtgrp, sel);
- if (ret == 0)
- ret = count;
- debugfs_file_put(file->f_path.dentry);
- }
-
- return ret;
-}
-
-static const struct file_operations pseudo_measure_fops = {
- .write = pseudo_lock_measure_trigger,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-/**
- * rdtgroup_pseudo_lock_create - Create a pseudo-locked region
- * @rdtgrp: resource group to which pseudo-lock region belongs
- *
- * Called when a resource group in the pseudo-locksetup mode receives a
- * valid schemata that should be pseudo-locked. Since the resource group is
- * in pseudo-locksetup mode the &struct pseudo_lock_region has already been
- * allocated and initialized with the essential information. If a failure
- * occurs the resource group remains in the pseudo-locksetup mode with the
- * &struct pseudo_lock_region associated with it, but cleared from all
- * information and ready for the user to re-attempt pseudo-locking by
- * writing the schemata again.
- *
- * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0
- * on failure. Descriptive error will be written to last_cmd_status buffer.
- */
-int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
-{
- struct pseudo_lock_region *plr = rdtgrp->plr;
- struct task_struct *thread;
- unsigned int new_minor;
- struct device *dev;
- char *kn_name __free(kfree) = NULL;
- int ret;
-
- ret = pseudo_lock_region_alloc(plr);
- if (ret < 0)
- return ret;
-
- ret = pseudo_lock_cstates_constrain(plr);
- if (ret < 0) {
- ret = -EINVAL;
- goto out_region;
- }
- kn_name = kstrdup(rdt_kn_name(rdtgrp->kn), GFP_KERNEL);
- if (!kn_name) {
- ret = -ENOMEM;
- goto out_cstates;
- }
-
- plr->thread_done = 0;
-
- thread = kthread_run_on_cpu(resctrl_arch_pseudo_lock_fn, plr,
- plr->cpu, "pseudo_lock/%u");
- if (IS_ERR(thread)) {
- ret = PTR_ERR(thread);
- rdt_last_cmd_printf("Locking thread returned error %d\n", ret);
- goto out_cstates;
- }
-
- ret = wait_event_interruptible(plr->lock_thread_wq,
- plr->thread_done == 1);
- if (ret < 0) {
- /*
- * If the thread does not get on the CPU for whatever
- * reason and the process which sets up the region is
- * interrupted then this will leave the thread in runnable
- * state and once it gets on the CPU it will dereference
- * the cleared, but not freed, plr struct resulting in an
- * empty pseudo-locking loop.
- */
- rdt_last_cmd_puts("Locking thread interrupted\n");
- goto out_cstates;
- }
-
- ret = pseudo_lock_minor_get(&new_minor);
- if (ret < 0) {
- rdt_last_cmd_puts("Unable to obtain a new minor number\n");
- goto out_cstates;
- }
-
- /*
- * Unlock access but do not release the reference. The
- * pseudo-locked region will still be here on return.
- *
- * The mutex has to be released temporarily to avoid a potential
- * deadlock with the mm->mmap_lock which is obtained in the
- * device_create() and debugfs_create_dir() callpath below as well as
- * before the mmap() callback is called.
- */
- mutex_unlock(&rdtgroup_mutex);
-
- if (!IS_ERR_OR_NULL(debugfs_resctrl)) {
- plr->debugfs_dir = debugfs_create_dir(kn_name, debugfs_resctrl);
- if (!IS_ERR_OR_NULL(plr->debugfs_dir))
- debugfs_create_file("pseudo_lock_measure", 0200,
- plr->debugfs_dir, rdtgrp,
- &pseudo_measure_fops);
- }
-
- dev = device_create(&pseudo_lock_class, NULL,
- MKDEV(pseudo_lock_major, new_minor),
- rdtgrp, "%s", kn_name);
-
- mutex_lock(&rdtgroup_mutex);
-
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- rdt_last_cmd_printf("Failed to create character device: %d\n",
- ret);
- goto out_debugfs;
- }
-
- /* We released the mutex - check if group was removed while we did so */
- if (rdtgrp->flags & RDT_DELETED) {
- ret = -ENODEV;
- goto out_device;
- }
-
- plr->minor = new_minor;
-
- rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED;
- closid_free(rdtgrp->closid);
- rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444);
- rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444);
-
- ret = 0;
- goto out;
-
-out_device:
- device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor));
-out_debugfs:
- debugfs_remove_recursive(plr->debugfs_dir);
- pseudo_lock_minor_release(new_minor);
-out_cstates:
- pseudo_lock_cstates_relax(plr);
-out_region:
- pseudo_lock_region_clear(plr);
-out:
- return ret;
-}
-
-/**
- * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region
- * @rdtgrp: resource group to which the pseudo-locked region belongs
- *
- * The removal of a pseudo-locked region can be initiated when the resource
- * group is removed from user space via a "rmdir" from userspace or the
- * unmount of the resctrl filesystem. On removal the resource group does
- * not go back to pseudo-locksetup mode before it is removed, instead it is
- * removed directly. There is thus asymmetry with the creation where the
- * &struct pseudo_lock_region is removed here while it was not created in
- * rdtgroup_pseudo_lock_create().
- *
- * Return: void
- */
-void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp)
-{
- struct pseudo_lock_region *plr = rdtgrp->plr;
-
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
- /*
- * Default group cannot be a pseudo-locked region so we can
- * free closid here.
- */
- closid_free(rdtgrp->closid);
- goto free;
- }
-
- pseudo_lock_cstates_relax(plr);
- debugfs_remove_recursive(rdtgrp->plr->debugfs_dir);
- device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor));
- pseudo_lock_minor_release(plr->minor);
-
-free:
- pseudo_lock_free(rdtgrp);
-}
-
-static int pseudo_lock_dev_open(struct inode *inode, struct file *filp)
-{
- struct rdtgroup *rdtgrp;
-
- mutex_lock(&rdtgroup_mutex);
-
- rdtgrp = region_find_by_minor(iminor(inode));
- if (!rdtgrp) {
- mutex_unlock(&rdtgroup_mutex);
- return -ENODEV;
- }
-
- filp->private_data = rdtgrp;
- atomic_inc(&rdtgrp->waitcount);
- /* Perform a non-seekable open - llseek is not supported */
- filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
-
- mutex_unlock(&rdtgroup_mutex);
-
- return 0;
-}
-
-static int pseudo_lock_dev_release(struct inode *inode, struct file *filp)
-{
- struct rdtgroup *rdtgrp;
-
- mutex_lock(&rdtgroup_mutex);
- rdtgrp = filp->private_data;
- WARN_ON(!rdtgrp);
- if (!rdtgrp) {
- mutex_unlock(&rdtgroup_mutex);
- return -ENODEV;
- }
- filp->private_data = NULL;
- atomic_dec(&rdtgrp->waitcount);
- mutex_unlock(&rdtgroup_mutex);
- return 0;
-}
-
-static int pseudo_lock_dev_mremap(struct vm_area_struct *area)
-{
- /* Not supported */
- return -EINVAL;
-}
-
-static const struct vm_operations_struct pseudo_mmap_ops = {
- .mremap = pseudo_lock_dev_mremap,
-};
-
-static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- unsigned long vsize = vma->vm_end - vma->vm_start;
- unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
- struct pseudo_lock_region *plr;
- struct rdtgroup *rdtgrp;
- unsigned long physical;
- unsigned long psize;
-
- mutex_lock(&rdtgroup_mutex);
-
- rdtgrp = filp->private_data;
- WARN_ON(!rdtgrp);
- if (!rdtgrp) {
- mutex_unlock(&rdtgroup_mutex);
- return -ENODEV;
- }
-
- plr = rdtgrp->plr;
-
- if (!plr->d) {
- mutex_unlock(&rdtgroup_mutex);
- return -ENODEV;
- }
-
- /*
- * Task is required to run with affinity to the cpus associated
- * with the pseudo-locked region. If this is not the case the task
- * may be scheduled elsewhere and invalidate entries in the
- * pseudo-locked region.
- */
- if (!cpumask_subset(current->cpus_ptr, &plr->d->hdr.cpu_mask)) {
- mutex_unlock(&rdtgroup_mutex);
- return -EINVAL;
- }
-
- physical = __pa(plr->kmem) >> PAGE_SHIFT;
- psize = plr->size - off;
-
- if (off > plr->size) {
- mutex_unlock(&rdtgroup_mutex);
- return -ENOSPC;
- }
-
- /*
- * Ensure changes are carried directly to the memory being mapped,
- * do not allow copy-on-write mapping.
- */
- if (!(vma->vm_flags & VM_SHARED)) {
- mutex_unlock(&rdtgroup_mutex);
- return -EINVAL;
- }
-
- if (vsize > psize) {
- mutex_unlock(&rdtgroup_mutex);
- return -ENOSPC;
- }
-
- memset(plr->kmem + off, 0, vsize);
-
- if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff,
- vsize, vma->vm_page_prot)) {
- mutex_unlock(&rdtgroup_mutex);
- return -EAGAIN;
- }
- vma->vm_ops = &pseudo_mmap_ops;
- mutex_unlock(&rdtgroup_mutex);
- return 0;
-}
-
-static const struct file_operations pseudo_lock_dev_fops = {
- .owner = THIS_MODULE,
- .read = NULL,
- .write = NULL,
- .open = pseudo_lock_dev_open,
- .release = pseudo_lock_dev_release,
- .mmap = pseudo_lock_dev_mmap,
-};
-
-int rdt_pseudo_lock_init(void)
-{
- int ret;
-
- ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops);
- if (ret < 0)
- return ret;
-
- pseudo_lock_major = ret;
-
- ret = class_register(&pseudo_lock_class);
- if (ret) {
- unregister_chrdev(pseudo_lock_major, "pseudo_lock");
- return ret;
- }
-
- return 0;
-}
-
-void rdt_pseudo_lock_release(void)
-{
- class_unregister(&pseudo_lock_class);
- unregister_chrdev(pseudo_lock_major, "pseudo_lock");
- pseudo_lock_major = 0;
-}
diff --git a/arch/x86/kernel/cpu/resctrl/trace.h b/arch/x86/kernel/cpu/resctrl/pseudo_lock_trace.h
index 2a506316b303..7c8aef08010f 100644
--- a/arch/x86/kernel/cpu/resctrl/trace.h
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock_trace.h
@@ -2,8 +2,8 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM resctrl
-#if !defined(_TRACE_RESCTRL_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_RESCTRL_H
+#if !defined(_X86_RESCTRL_PSEUDO_LOCK_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _X86_RESCTRL_PSEUDO_LOCK_TRACE_H
#include <linux/tracepoint.h>
@@ -35,25 +35,11 @@ TRACE_EVENT(pseudo_lock_l3,
TP_printk("hits=%llu miss=%llu",
__entry->l3_hits, __entry->l3_miss));
-TRACE_EVENT(mon_llc_occupancy_limbo,
- TP_PROTO(u32 ctrl_hw_id, u32 mon_hw_id, int domain_id, u64 llc_occupancy_bytes),
- TP_ARGS(ctrl_hw_id, mon_hw_id, domain_id, llc_occupancy_bytes),
- TP_STRUCT__entry(__field(u32, ctrl_hw_id)
- __field(u32, mon_hw_id)
- __field(int, domain_id)
- __field(u64, llc_occupancy_bytes)),
- TP_fast_assign(__entry->ctrl_hw_id = ctrl_hw_id;
- __entry->mon_hw_id = mon_hw_id;
- __entry->domain_id = domain_id;
- __entry->llc_occupancy_bytes = llc_occupancy_bytes;),
- TP_printk("ctrl_hw_id=%u mon_hw_id=%u domain_id=%d llc_occupancy_bytes=%llu",
- __entry->ctrl_hw_id, __entry->mon_hw_id, __entry->domain_id,
- __entry->llc_occupancy_bytes)
- );
-
-#endif /* _TRACE_RESCTRL_H */
+#endif /* _X86_RESCTRL_PSEUDO_LOCK_TRACE_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
+
+#define TRACE_INCLUDE_FILE pseudo_lock_trace
+
#include <trace/define_trace.h>
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 93ec829015f1..885026468440 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -18,6 +18,7 @@
#include <linux/fs_parser.h>
#include <linux/sysfs.h>
#include <linux/kernfs.h>
+#include <linux/resctrl.h>
#include <linux/seq_buf.h>
#include <linux/seq_file.h>
#include <linux/sched/signal.h>
@@ -28,341 +29,17 @@
#include <uapi/linux/magic.h>
-#include <asm/resctrl.h>
+#include <asm/msr.h>
#include "internal.h"
DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
-DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
-DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
-
-/* Mutex to protect rdtgroup access. */
-DEFINE_MUTEX(rdtgroup_mutex);
-
-static struct kernfs_root *rdt_root;
-struct rdtgroup rdtgroup_default;
-LIST_HEAD(rdt_all_groups);
-
-/* list of entries for the schemata file */
-LIST_HEAD(resctrl_schema_all);
-
-/* The filesystem can only be mounted once. */
-bool resctrl_mounted;
-
-/* Kernel fs node for "info" directory under root */
-static struct kernfs_node *kn_info;
-
-/* Kernel fs node for "mon_groups" directory under root */
-static struct kernfs_node *kn_mongrp;
-
-/* Kernel fs node for "mon_data" directory under root */
-static struct kernfs_node *kn_mondata;
-
-/*
- * Used to store the max resource name width to display the schemata names in
- * a tabular format.
- */
-int max_name_width;
-
-static struct seq_buf last_cmd_status;
-static char last_cmd_status_buf[512];
-
-static int rdtgroup_setup_root(struct rdt_fs_context *ctx);
-static void rdtgroup_destroy_root(void);
-
-struct dentry *debugfs_resctrl;
-
-/*
- * Memory bandwidth monitoring event to use for the default CTRL_MON group
- * and each new CTRL_MON group created by the user. Only relevant when
- * the filesystem is mounted with the "mba_MBps" option so it does not
- * matter that it remains uninitialized on systems that do not support
- * the "mba_MBps" option.
- */
-enum resctrl_event_id mba_mbps_default_event;
-
-static bool resctrl_debug;
-
-void rdt_last_cmd_clear(void)
-{
- lockdep_assert_held(&rdtgroup_mutex);
- seq_buf_clear(&last_cmd_status);
-}
-
-void rdt_last_cmd_puts(const char *s)
-{
- lockdep_assert_held(&rdtgroup_mutex);
- seq_buf_puts(&last_cmd_status, s);
-}
-
-void rdt_last_cmd_printf(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- lockdep_assert_held(&rdtgroup_mutex);
- seq_buf_vprintf(&last_cmd_status, fmt, ap);
- va_end(ap);
-}
-
-void rdt_staged_configs_clear(void)
-{
- struct rdt_ctrl_domain *dom;
- struct rdt_resource *r;
-
- lockdep_assert_held(&rdtgroup_mutex);
-
- for_each_alloc_capable_rdt_resource(r) {
- list_for_each_entry(dom, &r->ctrl_domains, hdr.list)
- memset(dom->staged_config, 0, sizeof(dom->staged_config));
- }
-}
-
-static bool resctrl_is_mbm_enabled(void)
-{
- return (resctrl_arch_is_mbm_total_enabled() ||
- resctrl_arch_is_mbm_local_enabled());
-}
-
-static bool resctrl_is_mbm_event(int e)
-{
- return (e >= QOS_L3_MBM_TOTAL_EVENT_ID &&
- e <= QOS_L3_MBM_LOCAL_EVENT_ID);
-}
-
-/*
- * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
- * we can keep a bitmap of free CLOSIDs in a single integer.
- *
- * Using a global CLOSID across all resources has some advantages and
- * some drawbacks:
- * + We can simply set current's closid to assign a task to a resource
- * group.
- * + Context switch code can avoid extra memory references deciding which
- * CLOSID to load into the PQR_ASSOC MSR
- * - We give up some options in configuring resource groups across multi-socket
- * systems.
- * - Our choices on how to configure each resource become progressively more
- * limited as the number of resources grows.
- */
-static unsigned long closid_free_map;
-static int closid_free_map_len;
-
-int closids_supported(void)
-{
- return closid_free_map_len;
-}
-
-static void closid_init(void)
-{
- struct resctrl_schema *s;
- u32 rdt_min_closid = 32;
-
- /* Compute rdt_min_closid across all resources */
- list_for_each_entry(s, &resctrl_schema_all, list)
- rdt_min_closid = min(rdt_min_closid, s->num_closid);
-
- closid_free_map = BIT_MASK(rdt_min_closid) - 1;
-
- /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */
- __clear_bit(RESCTRL_RESERVED_CLOSID, &closid_free_map);
- closid_free_map_len = rdt_min_closid;
-}
-
-static int closid_alloc(void)
-{
- int cleanest_closid;
- u32 closid;
-
- lockdep_assert_held(&rdtgroup_mutex);
-
- if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) &&
- resctrl_arch_is_llc_occupancy_enabled()) {
- cleanest_closid = resctrl_find_cleanest_closid();
- if (cleanest_closid < 0)
- return cleanest_closid;
- closid = cleanest_closid;
- } else {
- closid = ffs(closid_free_map);
- if (closid == 0)
- return -ENOSPC;
- closid--;
- }
- __clear_bit(closid, &closid_free_map);
-
- return closid;
-}
-
-void closid_free(int closid)
-{
- lockdep_assert_held(&rdtgroup_mutex);
-
- __set_bit(closid, &closid_free_map);
-}
-
-/**
- * closid_allocated - test if provided closid is in use
- * @closid: closid to be tested
- *
- * Return: true if @closid is currently associated with a resource group,
- * false if @closid is free
- */
-bool closid_allocated(unsigned int closid)
-{
- lockdep_assert_held(&rdtgroup_mutex);
-
- return !test_bit(closid, &closid_free_map);
-}
-
-/**
- * rdtgroup_mode_by_closid - Return mode of resource group with closid
- * @closid: closid if the resource group
- *
- * Each resource group is associated with a @closid. Here the mode
- * of a resource group can be queried by searching for it using its closid.
- *
- * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
- */
-enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
-{
- struct rdtgroup *rdtgrp;
-
- list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
- if (rdtgrp->closid == closid)
- return rdtgrp->mode;
- }
-
- return RDT_NUM_MODES;
-}
-
-static const char * const rdt_mode_str[] = {
- [RDT_MODE_SHAREABLE] = "shareable",
- [RDT_MODE_EXCLUSIVE] = "exclusive",
- [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
- [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
-};
-
-/**
- * rdtgroup_mode_str - Return the string representation of mode
- * @mode: the resource group mode as &enum rdtgroup_mode
- *
- * Return: string representation of valid mode, "unknown" otherwise
- */
-static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
-{
- if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
- return "unknown";
-
- return rdt_mode_str[mode];
-}
-/* set uid and gid of rdtgroup dirs and files to that of the creator */
-static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
-{
- struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
- .ia_uid = current_fsuid(),
- .ia_gid = current_fsgid(), };
-
- if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
- gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
- return 0;
-
- return kernfs_setattr(kn, &iattr);
-}
-
-static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
-{
- struct kernfs_node *kn;
- int ret;
-
- kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
- GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
- 0, rft->kf_ops, rft, NULL, NULL);
- if (IS_ERR(kn))
- return PTR_ERR(kn);
-
- ret = rdtgroup_kn_set_ugid(kn);
- if (ret) {
- kernfs_remove(kn);
- return ret;
- }
-
- return 0;
-}
-
-static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
-{
- struct kernfs_open_file *of = m->private;
- struct rftype *rft = of->kn->priv;
-
- if (rft->seq_show)
- return rft->seq_show(of, m, arg);
- return 0;
-}
-
-static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
- size_t nbytes, loff_t off)
-{
- struct rftype *rft = of->kn->priv;
-
- if (rft->write)
- return rft->write(of, buf, nbytes, off);
-
- return -EINVAL;
-}
-
-static const struct kernfs_ops rdtgroup_kf_single_ops = {
- .atomic_write_len = PAGE_SIZE,
- .write = rdtgroup_file_write,
- .seq_show = rdtgroup_seqfile_show,
-};
-
-static const struct kernfs_ops kf_mondata_ops = {
- .atomic_write_len = PAGE_SIZE,
- .seq_show = rdtgroup_mondata_show,
-};
-
-static bool is_cpu_list(struct kernfs_open_file *of)
-{
- struct rftype *rft = of->kn->priv;
-
- return rft->flags & RFTYPE_FLAGS_CPUS_LIST;
-}
-
-static int rdtgroup_cpus_show(struct kernfs_open_file *of,
- struct seq_file *s, void *v)
-{
- struct rdtgroup *rdtgrp;
- struct cpumask *mask;
- int ret = 0;
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
-
- if (rdtgrp) {
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
- if (!rdtgrp->plr->d) {
- rdt_last_cmd_clear();
- rdt_last_cmd_puts("Cache domain offline\n");
- ret = -ENODEV;
- } else {
- mask = &rdtgrp->plr->d->hdr.cpu_mask;
- seq_printf(s, is_cpu_list(of) ?
- "%*pbl\n" : "%*pb\n",
- cpumask_pr_args(mask));
- }
- } else {
- seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
- cpumask_pr_args(&rdtgrp->cpu_mask));
- }
- } else {
- ret = -ENOENT;
- }
- rdtgroup_kn_unlock(of->kn);
+DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
- return ret;
-}
+DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
/*
- * This is safe against resctrl_sched_in() called from __switch_to()
+ * This is safe against resctrl_arch_sched_in() called from __switch_to()
* because __switch_to() is executed with interrupts disabled. A local call
* from update_closid_rmid() is protected against __switch_to() because
* preemption is disabled.
@@ -381,1223 +58,7 @@ void resctrl_arch_sync_cpu_closid_rmid(void *info)
* executing task might have its own closid selected. Just reuse
* the context switch code.
*/
- resctrl_sched_in(current);
-}
-
-/*
- * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
- *
- * Per task closids/rmids must have been set up before calling this function.
- * @r may be NULL.
- */
-static void
-update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
-{
- struct resctrl_cpu_defaults defaults, *p = NULL;
-
- if (r) {
- defaults.closid = r->closid;
- defaults.rmid = r->mon.rmid;
- p = &defaults;
- }
-
- on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_closid_rmid, p, 1);
-}
-
-static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
- cpumask_var_t tmpmask)
-{
- struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
- struct list_head *head;
-
- /* Check whether cpus belong to parent ctrl group */
- cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
- if (!cpumask_empty(tmpmask)) {
- rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
- return -EINVAL;
- }
-
- /* Check whether cpus are dropped from this group */
- cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
- if (!cpumask_empty(tmpmask)) {
- /* Give any dropped cpus to parent rdtgroup */
- cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
- update_closid_rmid(tmpmask, prgrp);
- }
-
- /*
- * If we added cpus, remove them from previous group that owned them
- * and update per-cpu rmid
- */
- cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
- if (!cpumask_empty(tmpmask)) {
- head = &prgrp->mon.crdtgrp_list;
- list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
- if (crgrp == rdtgrp)
- continue;
- cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
- tmpmask);
- }
- update_closid_rmid(tmpmask, rdtgrp);
- }
-
- /* Done pushing/pulling - update this group with new mask */
- cpumask_copy(&rdtgrp->cpu_mask, newmask);
-
- return 0;
-}
-
-static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
-{
- struct rdtgroup *crgrp;
-
- cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
- /* update the child mon group masks as well*/
- list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
- cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
-}
-
-static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
- cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
-{
- struct rdtgroup *r, *crgrp;
- struct list_head *head;
-
- /* Check whether cpus are dropped from this group */
- cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
- if (!cpumask_empty(tmpmask)) {
- /* Can't drop from default group */
- if (rdtgrp == &rdtgroup_default) {
- rdt_last_cmd_puts("Can't drop CPUs from default group\n");
- return -EINVAL;
- }
-
- /* Give any dropped cpus to rdtgroup_default */
- cpumask_or(&rdtgroup_default.cpu_mask,
- &rdtgroup_default.cpu_mask, tmpmask);
- update_closid_rmid(tmpmask, &rdtgroup_default);
- }
-
- /*
- * If we added cpus, remove them from previous group and
- * the prev group's child groups that owned them
- * and update per-cpu closid/rmid.
- */
- cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
- if (!cpumask_empty(tmpmask)) {
- list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
- if (r == rdtgrp)
- continue;
- cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
- if (!cpumask_empty(tmpmask1))
- cpumask_rdtgrp_clear(r, tmpmask1);
- }
- update_closid_rmid(tmpmask, rdtgrp);
- }
-
- /* Done pushing/pulling - update this group with new mask */
- cpumask_copy(&rdtgrp->cpu_mask, newmask);
-
- /*
- * Clear child mon group masks since there is a new parent mask
- * now and update the rmid for the cpus the child lost.
- */
- head = &rdtgrp->mon.crdtgrp_list;
- list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
- cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
- update_closid_rmid(tmpmask, rdtgrp);
- cpumask_clear(&crgrp->cpu_mask);
- }
-
- return 0;
-}
-
-static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off)
-{
- cpumask_var_t tmpmask, newmask, tmpmask1;
- struct rdtgroup *rdtgrp;
- int ret;
-
- if (!buf)
- return -EINVAL;
-
- if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
- return -ENOMEM;
- if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
- free_cpumask_var(tmpmask);
- return -ENOMEM;
- }
- if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
- free_cpumask_var(tmpmask);
- free_cpumask_var(newmask);
- return -ENOMEM;
- }
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
- if (!rdtgrp) {
- ret = -ENOENT;
- goto unlock;
- }
-
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
- rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
- ret = -EINVAL;
- rdt_last_cmd_puts("Pseudo-locking in progress\n");
- goto unlock;
- }
-
- if (is_cpu_list(of))
- ret = cpulist_parse(buf, newmask);
- else
- ret = cpumask_parse(buf, newmask);
-
- if (ret) {
- rdt_last_cmd_puts("Bad CPU list/mask\n");
- goto unlock;
- }
-
- /* check that user didn't specify any offline cpus */
- cpumask_andnot(tmpmask, newmask, cpu_online_mask);
- if (!cpumask_empty(tmpmask)) {
- ret = -EINVAL;
- rdt_last_cmd_puts("Can only assign online CPUs\n");
- goto unlock;
- }
-
- if (rdtgrp->type == RDTCTRL_GROUP)
- ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
- else if (rdtgrp->type == RDTMON_GROUP)
- ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
- else
- ret = -EINVAL;
-
-unlock:
- rdtgroup_kn_unlock(of->kn);
- free_cpumask_var(tmpmask);
- free_cpumask_var(newmask);
- free_cpumask_var(tmpmask1);
-
- return ret ?: nbytes;
-}
-
-/**
- * rdtgroup_remove - the helper to remove resource group safely
- * @rdtgrp: resource group to remove
- *
- * On resource group creation via a mkdir, an extra kernfs_node reference is
- * taken to ensure that the rdtgroup structure remains accessible for the
- * rdtgroup_kn_unlock() calls where it is removed.
- *
- * Drop the extra reference here, then free the rdtgroup structure.
- *
- * Return: void
- */
-static void rdtgroup_remove(struct rdtgroup *rdtgrp)
-{
- kernfs_put(rdtgrp->kn);
- kfree(rdtgrp);
-}
-
-static void _update_task_closid_rmid(void *task)
-{
- /*
- * If the task is still current on this CPU, update PQR_ASSOC MSR.
- * Otherwise, the MSR is updated when the task is scheduled in.
- */
- if (task == current)
- resctrl_sched_in(task);
-}
-
-static void update_task_closid_rmid(struct task_struct *t)
-{
- if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
- smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
- else
- _update_task_closid_rmid(t);
-}
-
-static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp)
-{
- u32 closid, rmid = rdtgrp->mon.rmid;
-
- if (rdtgrp->type == RDTCTRL_GROUP)
- closid = rdtgrp->closid;
- else if (rdtgrp->type == RDTMON_GROUP)
- closid = rdtgrp->mon.parent->closid;
- else
- return false;
-
- return resctrl_arch_match_closid(tsk, closid) &&
- resctrl_arch_match_rmid(tsk, closid, rmid);
-}
-
-static int __rdtgroup_move_task(struct task_struct *tsk,
- struct rdtgroup *rdtgrp)
-{
- /* If the task is already in rdtgrp, no need to move the task. */
- if (task_in_rdtgroup(tsk, rdtgrp))
- return 0;
-
- /*
- * Set the task's closid/rmid before the PQR_ASSOC MSR can be
- * updated by them.
- *
- * For ctrl_mon groups, move both closid and rmid.
- * For monitor groups, can move the tasks only from
- * their parent CTRL group.
- */
- if (rdtgrp->type == RDTMON_GROUP &&
- !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) {
- rdt_last_cmd_puts("Can't move task to different control group\n");
- return -EINVAL;
- }
-
- if (rdtgrp->type == RDTMON_GROUP)
- resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid,
- rdtgrp->mon.rmid);
- else
- resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid,
- rdtgrp->mon.rmid);
-
- /*
- * Ensure the task's closid and rmid are written before determining if
- * the task is current that will decide if it will be interrupted.
- * This pairs with the full barrier between the rq->curr update and
- * resctrl_sched_in() during context switch.
- */
- smp_mb();
-
- /*
- * By now, the task's closid and rmid are set. If the task is current
- * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
- * group go into effect. If the task is not current, the MSR will be
- * updated when the task is scheduled in.
- */
- update_task_closid_rmid(tsk);
-
- return 0;
-}
-
-static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
-{
- return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) &&
- resctrl_arch_match_closid(t, r->closid));
-}
-
-static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
-{
- return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) &&
- resctrl_arch_match_rmid(t, r->mon.parent->closid,
- r->mon.rmid));
-}
-
-/**
- * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
- * @r: Resource group
- *
- * Return: 1 if tasks have been assigned to @r, 0 otherwise
- */
-int rdtgroup_tasks_assigned(struct rdtgroup *r)
-{
- struct task_struct *p, *t;
- int ret = 0;
-
- lockdep_assert_held(&rdtgroup_mutex);
-
- rcu_read_lock();
- for_each_process_thread(p, t) {
- if (is_closid_match(t, r) || is_rmid_match(t, r)) {
- ret = 1;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-static int rdtgroup_task_write_permission(struct task_struct *task,
- struct kernfs_open_file *of)
-{
- const struct cred *tcred = get_task_cred(task);
- const struct cred *cred = current_cred();
- int ret = 0;
-
- /*
- * Even if we're attaching all tasks in the thread group, we only
- * need to check permissions on one of them.
- */
- if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
- !uid_eq(cred->euid, tcred->uid) &&
- !uid_eq(cred->euid, tcred->suid)) {
- rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
- ret = -EPERM;
- }
-
- put_cred(tcred);
- return ret;
-}
-
-static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
- struct kernfs_open_file *of)
-{
- struct task_struct *tsk;
- int ret;
-
- rcu_read_lock();
- if (pid) {
- tsk = find_task_by_vpid(pid);
- if (!tsk) {
- rcu_read_unlock();
- rdt_last_cmd_printf("No task %d\n", pid);
- return -ESRCH;
- }
- } else {
- tsk = current;
- }
-
- get_task_struct(tsk);
- rcu_read_unlock();
-
- ret = rdtgroup_task_write_permission(tsk, of);
- if (!ret)
- ret = __rdtgroup_move_task(tsk, rdtgrp);
-
- put_task_struct(tsk);
- return ret;
-}
-
-static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off)
-{
- struct rdtgroup *rdtgrp;
- char *pid_str;
- int ret = 0;
- pid_t pid;
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
- if (!rdtgrp) {
- rdtgroup_kn_unlock(of->kn);
- return -ENOENT;
- }
- rdt_last_cmd_clear();
-
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
- rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
- ret = -EINVAL;
- rdt_last_cmd_puts("Pseudo-locking in progress\n");
- goto unlock;
- }
-
- while (buf && buf[0] != '\0' && buf[0] != '\n') {
- pid_str = strim(strsep(&buf, ","));
-
- if (kstrtoint(pid_str, 0, &pid)) {
- rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str);
- ret = -EINVAL;
- break;
- }
-
- if (pid < 0) {
- rdt_last_cmd_printf("Invalid pid %d\n", pid);
- ret = -EINVAL;
- break;
- }
-
- ret = rdtgroup_move_task(pid, rdtgrp, of);
- if (ret) {
- rdt_last_cmd_printf("Error while processing task %d\n", pid);
- break;
- }
- }
-
-unlock:
- rdtgroup_kn_unlock(of->kn);
-
- return ret ?: nbytes;
-}
-
-static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
-{
- struct task_struct *p, *t;
- pid_t pid;
-
- rcu_read_lock();
- for_each_process_thread(p, t) {
- if (is_closid_match(t, r) || is_rmid_match(t, r)) {
- pid = task_pid_vnr(t);
- if (pid)
- seq_printf(s, "%d\n", pid);
- }
- }
- rcu_read_unlock();
-}
-
-static int rdtgroup_tasks_show(struct kernfs_open_file *of,
- struct seq_file *s, void *v)
-{
- struct rdtgroup *rdtgrp;
- int ret = 0;
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
- if (rdtgrp)
- show_rdt_tasks(rdtgrp, s);
- else
- ret = -ENOENT;
- rdtgroup_kn_unlock(of->kn);
-
- return ret;
-}
-
-static int rdtgroup_closid_show(struct kernfs_open_file *of,
- struct seq_file *s, void *v)
-{
- struct rdtgroup *rdtgrp;
- int ret = 0;
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
- if (rdtgrp)
- seq_printf(s, "%u\n", rdtgrp->closid);
- else
- ret = -ENOENT;
- rdtgroup_kn_unlock(of->kn);
-
- return ret;
-}
-
-static int rdtgroup_rmid_show(struct kernfs_open_file *of,
- struct seq_file *s, void *v)
-{
- struct rdtgroup *rdtgrp;
- int ret = 0;
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
- if (rdtgrp)
- seq_printf(s, "%u\n", rdtgrp->mon.rmid);
- else
- ret = -ENOENT;
- rdtgroup_kn_unlock(of->kn);
-
- return ret;
-}
-
-#ifdef CONFIG_PROC_CPU_RESCTRL
-
-/*
- * A task can only be part of one resctrl control group and of one monitor
- * group which is associated to that control group.
- *
- * 1) res:
- * mon:
- *
- * resctrl is not available.
- *
- * 2) res:/
- * mon:
- *
- * Task is part of the root resctrl control group, and it is not associated
- * to any monitor group.
- *
- * 3) res:/
- * mon:mon0
- *
- * Task is part of the root resctrl control group and monitor group mon0.
- *
- * 4) res:group0
- * mon:
- *
- * Task is part of resctrl control group group0, and it is not associated
- * to any monitor group.
- *
- * 5) res:group0
- * mon:mon1
- *
- * Task is part of resctrl control group group0 and monitor group mon1.
- */
-int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
- struct pid *pid, struct task_struct *tsk)
-{
- struct rdtgroup *rdtg;
- int ret = 0;
-
- mutex_lock(&rdtgroup_mutex);
-
- /* Return empty if resctrl has not been mounted. */
- if (!resctrl_mounted) {
- seq_puts(s, "res:\nmon:\n");
- goto unlock;
- }
-
- list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) {
- struct rdtgroup *crg;
-
- /*
- * Task information is only relevant for shareable
- * and exclusive groups.
- */
- if (rdtg->mode != RDT_MODE_SHAREABLE &&
- rdtg->mode != RDT_MODE_EXCLUSIVE)
- continue;
-
- if (!resctrl_arch_match_closid(tsk, rdtg->closid))
- continue;
-
- seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "",
- rdt_kn_name(rdtg->kn));
- seq_puts(s, "mon:");
- list_for_each_entry(crg, &rdtg->mon.crdtgrp_list,
- mon.crdtgrp_list) {
- if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid,
- crg->mon.rmid))
- continue;
- seq_printf(s, "%s", rdt_kn_name(crg->kn));
- break;
- }
- seq_putc(s, '\n');
- goto unlock;
- }
- /*
- * The above search should succeed. Otherwise return
- * with an error.
- */
- ret = -ENOENT;
-unlock:
- mutex_unlock(&rdtgroup_mutex);
-
- return ret;
-}
-#endif
-
-static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- int len;
-
- mutex_lock(&rdtgroup_mutex);
- len = seq_buf_used(&last_cmd_status);
- if (len)
- seq_printf(seq, "%.*s", len, last_cmd_status_buf);
- else
- seq_puts(seq, "ok\n");
- mutex_unlock(&rdtgroup_mutex);
- return 0;
-}
-
-static void *rdt_kn_parent_priv(struct kernfs_node *kn)
-{
- /*
- * The parent pointer is only valid within RCU section since it can be
- * replaced.
- */
- guard(rcu)();
- return rcu_dereference(kn->__parent)->priv;
-}
-
-static int rdt_num_closids_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
-
- seq_printf(seq, "%u\n", s->num_closid);
- return 0;
-}
-
-static int rdt_default_ctrl_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
- struct rdt_resource *r = s->res;
-
- seq_printf(seq, "%x\n", resctrl_get_default_ctrl(r));
- return 0;
-}
-
-static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
- struct rdt_resource *r = s->res;
-
- seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
- return 0;
-}
-
-static int rdt_shareable_bits_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
- struct rdt_resource *r = s->res;
-
- seq_printf(seq, "%x\n", r->cache.shareable_bits);
- return 0;
-}
-
-/*
- * rdt_bit_usage_show - Display current usage of resources
- *
- * A domain is a shared resource that can now be allocated differently. Here
- * we display the current regions of the domain as an annotated bitmask.
- * For each domain of this resource its allocation bitmask
- * is annotated as below to indicate the current usage of the corresponding bit:
- * 0 - currently unused
- * X - currently available for sharing and used by software and hardware
- * H - currently used by hardware only but available for software use
- * S - currently used and shareable by software only
- * E - currently used exclusively by one resource group
- * P - currently pseudo-locked by one resource group
- */
-static int rdt_bit_usage_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
- /*
- * Use unsigned long even though only 32 bits are used to ensure
- * test_bit() is used safely.
- */
- unsigned long sw_shareable = 0, hw_shareable = 0;
- unsigned long exclusive = 0, pseudo_locked = 0;
- struct rdt_resource *r = s->res;
- struct rdt_ctrl_domain *dom;
- int i, hwb, swb, excl, psl;
- enum rdtgrp_mode mode;
- bool sep = false;
- u32 ctrl_val;
-
- cpus_read_lock();
- mutex_lock(&rdtgroup_mutex);
- hw_shareable = r->cache.shareable_bits;
- list_for_each_entry(dom, &r->ctrl_domains, hdr.list) {
- if (sep)
- seq_putc(seq, ';');
- sw_shareable = 0;
- exclusive = 0;
- seq_printf(seq, "%d=", dom->hdr.id);
- for (i = 0; i < closids_supported(); i++) {
- if (!closid_allocated(i))
- continue;
- ctrl_val = resctrl_arch_get_config(r, dom, i,
- s->conf_type);
- mode = rdtgroup_mode_by_closid(i);
- switch (mode) {
- case RDT_MODE_SHAREABLE:
- sw_shareable |= ctrl_val;
- break;
- case RDT_MODE_EXCLUSIVE:
- exclusive |= ctrl_val;
- break;
- case RDT_MODE_PSEUDO_LOCKSETUP:
- /*
- * RDT_MODE_PSEUDO_LOCKSETUP is possible
- * here but not included since the CBM
- * associated with this CLOSID in this mode
- * is not initialized and no task or cpu can be
- * assigned this CLOSID.
- */
- break;
- case RDT_MODE_PSEUDO_LOCKED:
- case RDT_NUM_MODES:
- WARN(1,
- "invalid mode for closid %d\n", i);
- break;
- }
- }
- for (i = r->cache.cbm_len - 1; i >= 0; i--) {
- pseudo_locked = dom->plr ? dom->plr->cbm : 0;
- hwb = test_bit(i, &hw_shareable);
- swb = test_bit(i, &sw_shareable);
- excl = test_bit(i, &exclusive);
- psl = test_bit(i, &pseudo_locked);
- if (hwb && swb)
- seq_putc(seq, 'X');
- else if (hwb && !swb)
- seq_putc(seq, 'H');
- else if (!hwb && swb)
- seq_putc(seq, 'S');
- else if (excl)
- seq_putc(seq, 'E');
- else if (psl)
- seq_putc(seq, 'P');
- else /* Unused bits remain */
- seq_putc(seq, '0');
- }
- sep = true;
- }
- seq_putc(seq, '\n');
- mutex_unlock(&rdtgroup_mutex);
- cpus_read_unlock();
- return 0;
-}
-
-static int rdt_min_bw_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
- struct rdt_resource *r = s->res;
-
- seq_printf(seq, "%u\n", r->membw.min_bw);
- return 0;
-}
-
-static int rdt_num_rmids_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
-
- seq_printf(seq, "%d\n", r->num_rmid);
-
- return 0;
-}
-
-static int rdt_mon_features_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
- struct mon_evt *mevt;
-
- list_for_each_entry(mevt, &r->evt_list, list) {
- seq_printf(seq, "%s\n", mevt->name);
- if (mevt->configurable)
- seq_printf(seq, "%s_config\n", mevt->name);
- }
-
- return 0;
-}
-
-static int rdt_bw_gran_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
- struct rdt_resource *r = s->res;
-
- seq_printf(seq, "%u\n", r->membw.bw_gran);
- return 0;
-}
-
-static int rdt_delay_linear_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
- struct rdt_resource *r = s->res;
-
- seq_printf(seq, "%u\n", r->membw.delay_linear);
- return 0;
-}
-
-static int max_threshold_occ_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold);
-
- return 0;
-}
-
-static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
- struct rdt_resource *r = s->res;
-
- switch (r->membw.throttle_mode) {
- case THREAD_THROTTLE_PER_THREAD:
- seq_puts(seq, "per-thread\n");
- return 0;
- case THREAD_THROTTLE_MAX:
- seq_puts(seq, "max\n");
- return 0;
- case THREAD_THROTTLE_UNDEFINED:
- seq_puts(seq, "undefined\n");
- return 0;
- }
-
- WARN_ON_ONCE(1);
-
- return 0;
-}
-
-static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off)
-{
- unsigned int bytes;
- int ret;
-
- ret = kstrtouint(buf, 0, &bytes);
- if (ret)
- return ret;
-
- if (bytes > resctrl_rmid_realloc_limit)
- return -EINVAL;
-
- resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes);
-
- return nbytes;
-}
-
-/*
- * rdtgroup_mode_show - Display mode of this resource group
- */
-static int rdtgroup_mode_show(struct kernfs_open_file *of,
- struct seq_file *s, void *v)
-{
- struct rdtgroup *rdtgrp;
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
- if (!rdtgrp) {
- rdtgroup_kn_unlock(of->kn);
- return -ENOENT;
- }
-
- seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
-
- rdtgroup_kn_unlock(of->kn);
- return 0;
-}
-
-static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
-{
- switch (my_type) {
- case CDP_CODE:
- return CDP_DATA;
- case CDP_DATA:
- return CDP_CODE;
- default:
- case CDP_NONE:
- return CDP_NONE;
- }
-}
-
-static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
- struct rdt_resource *r = s->res;
-
- seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks);
-
- return 0;
-}
-
-/**
- * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
- * @r: Resource to which domain instance @d belongs.
- * @d: The domain instance for which @closid is being tested.
- * @cbm: Capacity bitmask being tested.
- * @closid: Intended closid for @cbm.
- * @type: CDP type of @r.
- * @exclusive: Only check if overlaps with exclusive resource groups
- *
- * Checks if provided @cbm intended to be used for @closid on domain
- * @d overlaps with any other closids or other hardware usage associated
- * with this domain. If @exclusive is true then only overlaps with
- * resource groups in exclusive mode will be considered. If @exclusive
- * is false then overlaps with any resource group or hardware entities
- * will be considered.
- *
- * @cbm is unsigned long, even if only 32 bits are used, to make the
- * bitmap functions work correctly.
- *
- * Return: false if CBM does not overlap, true if it does.
- */
-static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_ctrl_domain *d,
- unsigned long cbm, int closid,
- enum resctrl_conf_type type, bool exclusive)
-{
- enum rdtgrp_mode mode;
- unsigned long ctrl_b;
- int i;
-
- /* Check for any overlap with regions used by hardware directly */
- if (!exclusive) {
- ctrl_b = r->cache.shareable_bits;
- if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
- return true;
- }
-
- /* Check for overlap with other resource groups */
- for (i = 0; i < closids_supported(); i++) {
- ctrl_b = resctrl_arch_get_config(r, d, i, type);
- mode = rdtgroup_mode_by_closid(i);
- if (closid_allocated(i) && i != closid &&
- mode != RDT_MODE_PSEUDO_LOCKSETUP) {
- if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
- if (exclusive) {
- if (mode == RDT_MODE_EXCLUSIVE)
- return true;
- continue;
- }
- return true;
- }
- }
- }
-
- return false;
-}
-
-/**
- * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
- * @s: Schema for the resource to which domain instance @d belongs.
- * @d: The domain instance for which @closid is being tested.
- * @cbm: Capacity bitmask being tested.
- * @closid: Intended closid for @cbm.
- * @exclusive: Only check if overlaps with exclusive resource groups
- *
- * Resources that can be allocated using a CBM can use the CBM to control
- * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
- * for overlap. Overlap test is not limited to the specific resource for
- * which the CBM is intended though - when dealing with CDP resources that
- * share the underlying hardware the overlap check should be performed on
- * the CDP resource sharing the hardware also.
- *
- * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
- * overlap test.
- *
- * Return: true if CBM overlap detected, false if there is no overlap
- */
-bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_ctrl_domain *d,
- unsigned long cbm, int closid, bool exclusive)
-{
- enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
- struct rdt_resource *r = s->res;
-
- if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type,
- exclusive))
- return true;
-
- if (!resctrl_arch_get_cdp_enabled(r->rid))
- return false;
- return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive);
-}
-
-/**
- * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
- * @rdtgrp: Resource group identified through its closid.
- *
- * An exclusive resource group implies that there should be no sharing of
- * its allocated resources. At the time this group is considered to be
- * exclusive this test can determine if its current schemata supports this
- * setting by testing for overlap with all other resource groups.
- *
- * Return: true if resource group can be exclusive, false if there is overlap
- * with allocations of other resource groups and thus this resource group
- * cannot be exclusive.
- */
-static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
-{
- int closid = rdtgrp->closid;
- struct rdt_ctrl_domain *d;
- struct resctrl_schema *s;
- struct rdt_resource *r;
- bool has_cache = false;
- u32 ctrl;
-
- /* Walking r->domains, ensure it can't race with cpuhp */
- lockdep_assert_cpus_held();
-
- list_for_each_entry(s, &resctrl_schema_all, list) {
- r = s->res;
- if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)
- continue;
- has_cache = true;
- list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
- ctrl = resctrl_arch_get_config(r, d, closid,
- s->conf_type);
- if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) {
- rdt_last_cmd_puts("Schemata overlaps\n");
- return false;
- }
- }
- }
-
- if (!has_cache) {
- rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
- return false;
- }
-
- return true;
-}
-
-/*
- * rdtgroup_mode_write - Modify the resource group's mode
- */
-static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off)
-{
- struct rdtgroup *rdtgrp;
- enum rdtgrp_mode mode;
- int ret = 0;
-
- /* Valid input requires a trailing newline */
- if (nbytes == 0 || buf[nbytes - 1] != '\n')
- return -EINVAL;
- buf[nbytes - 1] = '\0';
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
- if (!rdtgrp) {
- rdtgroup_kn_unlock(of->kn);
- return -ENOENT;
- }
-
- rdt_last_cmd_clear();
-
- mode = rdtgrp->mode;
-
- if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
- (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
- (!strcmp(buf, "pseudo-locksetup") &&
- mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
- (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
- goto out;
-
- if (mode == RDT_MODE_PSEUDO_LOCKED) {
- rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
- ret = -EINVAL;
- goto out;
- }
-
- if (!strcmp(buf, "shareable")) {
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
- ret = rdtgroup_locksetup_exit(rdtgrp);
- if (ret)
- goto out;
- }
- rdtgrp->mode = RDT_MODE_SHAREABLE;
- } else if (!strcmp(buf, "exclusive")) {
- if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
- ret = -EINVAL;
- goto out;
- }
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
- ret = rdtgroup_locksetup_exit(rdtgrp);
- if (ret)
- goto out;
- }
- rdtgrp->mode = RDT_MODE_EXCLUSIVE;
- } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) &&
- !strcmp(buf, "pseudo-locksetup")) {
- ret = rdtgroup_locksetup_enter(rdtgrp);
- if (ret)
- goto out;
- rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
- } else {
- rdt_last_cmd_puts("Unknown or unsupported mode\n");
- ret = -EINVAL;
- }
-
-out:
- rdtgroup_kn_unlock(of->kn);
- return ret ?: nbytes;
-}
-
-/**
- * rdtgroup_cbm_to_size - Translate CBM to size in bytes
- * @r: RDT resource to which @d belongs.
- * @d: RDT domain instance.
- * @cbm: bitmask for which the size should be computed.
- *
- * The bitmask provided associated with the RDT domain instance @d will be
- * translated into how many bytes it represents. The size in bytes is
- * computed by first dividing the total cache size by the CBM length to
- * determine how many bytes each bit in the bitmask represents. The result
- * is multiplied with the number of bits set in the bitmask.
- *
- * @cbm is unsigned long, even if only 32 bits are used to make the
- * bitmap functions work correctly.
- */
-unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
- struct rdt_ctrl_domain *d, unsigned long cbm)
-{
- unsigned int size = 0;
- struct cacheinfo *ci;
- int num_b;
-
- if (WARN_ON_ONCE(r->ctrl_scope != RESCTRL_L2_CACHE && r->ctrl_scope != RESCTRL_L3_CACHE))
- return size;
-
- num_b = bitmap_weight(&cbm, r->cache.cbm_len);
- ci = get_cpu_cacheinfo_level(cpumask_any(&d->hdr.cpu_mask), r->ctrl_scope);
- if (ci)
- size = ci->size / r->cache.cbm_len * num_b;
-
- return size;
-}
-
-/*
- * rdtgroup_size_show - Display size in bytes of allocated regions
- *
- * The "size" file mirrors the layout of the "schemata" file, printing the
- * size in bytes of each region instead of the capacity bitmask.
- */
-static int rdtgroup_size_show(struct kernfs_open_file *of,
- struct seq_file *s, void *v)
-{
- struct resctrl_schema *schema;
- enum resctrl_conf_type type;
- struct rdt_ctrl_domain *d;
- struct rdtgroup *rdtgrp;
- struct rdt_resource *r;
- unsigned int size;
- int ret = 0;
- u32 closid;
- bool sep;
- u32 ctrl;
-
- rdtgrp = rdtgroup_kn_lock_live(of->kn);
- if (!rdtgrp) {
- rdtgroup_kn_unlock(of->kn);
- return -ENOENT;
- }
-
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
- if (!rdtgrp->plr->d) {
- rdt_last_cmd_clear();
- rdt_last_cmd_puts("Cache domain offline\n");
- ret = -ENODEV;
- } else {
- seq_printf(s, "%*s:", max_name_width,
- rdtgrp->plr->s->name);
- size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res,
- rdtgrp->plr->d,
- rdtgrp->plr->cbm);
- seq_printf(s, "%d=%u\n", rdtgrp->plr->d->hdr.id, size);
- }
- goto out;
- }
-
- closid = rdtgrp->closid;
-
- list_for_each_entry(schema, &resctrl_schema_all, list) {
- r = schema->res;
- type = schema->conf_type;
- sep = false;
- seq_printf(s, "%*s:", max_name_width, schema->name);
- list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
- if (sep)
- seq_putc(s, ';');
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
- size = 0;
- } else {
- if (is_mba_sc(r))
- ctrl = d->mbps_val[closid];
- else
- ctrl = resctrl_arch_get_config(r, d,
- closid,
- type);
- if (r->rid == RDT_RESOURCE_MBA ||
- r->rid == RDT_RESOURCE_SMBA)
- size = ctrl;
- else
- size = rdtgroup_cbm_to_size(r, d, ctrl);
- }
- seq_printf(s, "%d=%u", d->hdr.id, size);
- sep = true;
- }
- seq_putc(s, '\n');
- }
-
-out:
- rdtgroup_kn_unlock(of->kn);
-
- return ret;
+ resctrl_arch_sched_in(current);
}
#define INVALID_CONFIG_INDEX UINT_MAX
@@ -1635,68 +96,12 @@ void resctrl_arch_mon_event_config_read(void *_config_info)
pr_warn_once("Invalid event id %d\n", config_info->evtid);
return;
}
- rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval);
+ rdmsrq(MSR_IA32_EVT_CFG_BASE + index, msrval);
/* Report only the valid event configuration bits */
config_info->mon_config = msrval & MAX_EVT_CONFIG_BITS;
}
-static void mondata_config_read(struct resctrl_mon_config_info *mon_info)
-{
- smp_call_function_any(&mon_info->d->hdr.cpu_mask,
- resctrl_arch_mon_event_config_read, mon_info, 1);
-}
-
-static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid)
-{
- struct resctrl_mon_config_info mon_info;
- struct rdt_mon_domain *dom;
- bool sep = false;
-
- cpus_read_lock();
- mutex_lock(&rdtgroup_mutex);
-
- list_for_each_entry(dom, &r->mon_domains, hdr.list) {
- if (sep)
- seq_puts(s, ";");
-
- memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info));
- mon_info.r = r;
- mon_info.d = dom;
- mon_info.evtid = evtid;
- mondata_config_read(&mon_info);
-
- seq_printf(s, "%d=0x%02x", dom->hdr.id, mon_info.mon_config);
- sep = true;
- }
- seq_puts(s, "\n");
-
- mutex_unlock(&rdtgroup_mutex);
- cpus_read_unlock();
-
- return 0;
-}
-
-static int mbm_total_bytes_config_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
-
- mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID);
-
- return 0;
-}
-
-static int mbm_local_bytes_config_show(struct kernfs_open_file *of,
- struct seq_file *seq, void *v)
-{
- struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
-
- mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID);
-
- return 0;
-}
-
void resctrl_arch_mon_event_config_write(void *_config_info)
{
struct resctrl_mon_config_info *config_info = _config_info;
@@ -1707,638 +112,21 @@ void resctrl_arch_mon_event_config_write(void *_config_info)
pr_warn_once("Invalid event id %d\n", config_info->evtid);
return;
}
- wrmsr(MSR_IA32_EVT_CFG_BASE + index, config_info->mon_config, 0);
-}
-
-static void mbm_config_write_domain(struct rdt_resource *r,
- struct rdt_mon_domain *d, u32 evtid, u32 val)
-{
- struct resctrl_mon_config_info mon_info = {0};
-
- /*
- * Read the current config value first. If both are the same then
- * no need to write it again.
- */
- mon_info.r = r;
- mon_info.d = d;
- mon_info.evtid = evtid;
- mondata_config_read(&mon_info);
- if (mon_info.mon_config == val)
- return;
-
- mon_info.mon_config = val;
-
- /*
- * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the
- * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE
- * are scoped at the domain level. Writing any of these MSRs
- * on one CPU is observed by all the CPUs in the domain.
- */
- smp_call_function_any(&d->hdr.cpu_mask, resctrl_arch_mon_event_config_write,
- &mon_info, 1);
-
- /*
- * When an Event Configuration is changed, the bandwidth counters
- * for all RMIDs and Events will be cleared by the hardware. The
- * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for
- * every RMID on the next read to any event for every RMID.
- * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62)
- * cleared while it is tracked by the hardware. Clear the
- * mbm_local and mbm_total counts for all the RMIDs.
- */
- resctrl_arch_reset_rmid_all(r, d);
-}
-
-static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid)
-{
- char *dom_str = NULL, *id_str;
- unsigned long dom_id, val;
- struct rdt_mon_domain *d;
-
- /* Walking r->domains, ensure it can't race with cpuhp */
- lockdep_assert_cpus_held();
-
-next:
- if (!tok || tok[0] == '\0')
- return 0;
-
- /* Start processing the strings for each domain */
- dom_str = strim(strsep(&tok, ";"));
- id_str = strsep(&dom_str, "=");
-
- if (!id_str || kstrtoul(id_str, 10, &dom_id)) {
- rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n");
- return -EINVAL;
- }
-
- if (!dom_str || kstrtoul(dom_str, 16, &val)) {
- rdt_last_cmd_puts("Non-numeric event configuration value\n");
- return -EINVAL;
- }
-
- /* Value from user cannot be more than the supported set of events */
- if ((val & r->mbm_cfg_mask) != val) {
- rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n",
- r->mbm_cfg_mask);
- return -EINVAL;
- }
-
- list_for_each_entry(d, &r->mon_domains, hdr.list) {
- if (d->hdr.id == dom_id) {
- mbm_config_write_domain(r, d, evtid, val);
- goto next;
- }
- }
-
- return -EINVAL;
-}
-
-static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of,
- char *buf, size_t nbytes,
- loff_t off)
-{
- struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
- int ret;
-
- /* Valid input requires a trailing newline */
- if (nbytes == 0 || buf[nbytes - 1] != '\n')
- return -EINVAL;
-
- cpus_read_lock();
- mutex_lock(&rdtgroup_mutex);
-
- rdt_last_cmd_clear();
-
- buf[nbytes - 1] = '\0';
-
- ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID);
-
- mutex_unlock(&rdtgroup_mutex);
- cpus_read_unlock();
-
- return ret ?: nbytes;
-}
-
-static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of,
- char *buf, size_t nbytes,
- loff_t off)
-{
- struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
- int ret;
-
- /* Valid input requires a trailing newline */
- if (nbytes == 0 || buf[nbytes - 1] != '\n')
- return -EINVAL;
-
- cpus_read_lock();
- mutex_lock(&rdtgroup_mutex);
-
- rdt_last_cmd_clear();
-
- buf[nbytes - 1] = '\0';
-
- ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID);
-
- mutex_unlock(&rdtgroup_mutex);
- cpus_read_unlock();
-
- return ret ?: nbytes;
-}
-
-/* rdtgroup information files for one cache resource. */
-static struct rftype res_common_files[] = {
- {
- .name = "last_cmd_status",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_last_cmd_status_show,
- .fflags = RFTYPE_TOP_INFO,
- },
- {
- .name = "num_closids",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_num_closids_show,
- .fflags = RFTYPE_CTRL_INFO,
- },
- {
- .name = "mon_features",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_mon_features_show,
- .fflags = RFTYPE_MON_INFO,
- },
- {
- .name = "num_rmids",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_num_rmids_show,
- .fflags = RFTYPE_MON_INFO,
- },
- {
- .name = "cbm_mask",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_default_ctrl_show,
- .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
- },
- {
- .name = "min_cbm_bits",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_min_cbm_bits_show,
- .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
- },
- {
- .name = "shareable_bits",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_shareable_bits_show,
- .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
- },
- {
- .name = "bit_usage",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_bit_usage_show,
- .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
- },
- {
- .name = "min_bandwidth",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_min_bw_show,
- .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
- },
- {
- .name = "bandwidth_gran",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_bw_gran_show,
- .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
- },
- {
- .name = "delay_linear",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_delay_linear_show,
- .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
- },
- /*
- * Platform specific which (if any) capabilities are provided by
- * thread_throttle_mode. Defer "fflags" initialization to platform
- * discovery.
- */
- {
- .name = "thread_throttle_mode",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_thread_throttle_mode_show,
- },
- {
- .name = "max_threshold_occupancy",
- .mode = 0644,
- .kf_ops = &rdtgroup_kf_single_ops,
- .write = max_threshold_occ_write,
- .seq_show = max_threshold_occ_show,
- .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE,
- },
- {
- .name = "mbm_total_bytes_config",
- .mode = 0644,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = mbm_total_bytes_config_show,
- .write = mbm_total_bytes_config_write,
- },
- {
- .name = "mbm_local_bytes_config",
- .mode = 0644,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = mbm_local_bytes_config_show,
- .write = mbm_local_bytes_config_write,
- },
- {
- .name = "cpus",
- .mode = 0644,
- .kf_ops = &rdtgroup_kf_single_ops,
- .write = rdtgroup_cpus_write,
- .seq_show = rdtgroup_cpus_show,
- .fflags = RFTYPE_BASE,
- },
- {
- .name = "cpus_list",
- .mode = 0644,
- .kf_ops = &rdtgroup_kf_single_ops,
- .write = rdtgroup_cpus_write,
- .seq_show = rdtgroup_cpus_show,
- .flags = RFTYPE_FLAGS_CPUS_LIST,
- .fflags = RFTYPE_BASE,
- },
- {
- .name = "tasks",
- .mode = 0644,
- .kf_ops = &rdtgroup_kf_single_ops,
- .write = rdtgroup_tasks_write,
- .seq_show = rdtgroup_tasks_show,
- .fflags = RFTYPE_BASE,
- },
- {
- .name = "mon_hw_id",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdtgroup_rmid_show,
- .fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG,
- },
- {
- .name = "schemata",
- .mode = 0644,
- .kf_ops = &rdtgroup_kf_single_ops,
- .write = rdtgroup_schemata_write,
- .seq_show = rdtgroup_schemata_show,
- .fflags = RFTYPE_CTRL_BASE,
- },
- {
- .name = "mba_MBps_event",
- .mode = 0644,
- .kf_ops = &rdtgroup_kf_single_ops,
- .write = rdtgroup_mba_mbps_event_write,
- .seq_show = rdtgroup_mba_mbps_event_show,
- },
- {
- .name = "mode",
- .mode = 0644,
- .kf_ops = &rdtgroup_kf_single_ops,
- .write = rdtgroup_mode_write,
- .seq_show = rdtgroup_mode_show,
- .fflags = RFTYPE_CTRL_BASE,
- },
- {
- .name = "size",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdtgroup_size_show,
- .fflags = RFTYPE_CTRL_BASE,
- },
- {
- .name = "sparse_masks",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_has_sparse_bitmasks_show,
- .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
- },
- {
- .name = "ctrl_hw_id",
- .mode = 0444,
- .kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdtgroup_closid_show,
- .fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG,
- },
-
-};
-
-static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
-{
- struct rftype *rfts, *rft;
- int ret, len;
-
- rfts = res_common_files;
- len = ARRAY_SIZE(res_common_files);
-
- lockdep_assert_held(&rdtgroup_mutex);
-
- if (resctrl_debug)
- fflags |= RFTYPE_DEBUG;
-
- for (rft = rfts; rft < rfts + len; rft++) {
- if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) {
- ret = rdtgroup_add_file(kn, rft);
- if (ret)
- goto error;
- }
- }
-
- return 0;
-error:
- pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
- while (--rft >= rfts) {
- if ((fflags & rft->fflags) == rft->fflags)
- kernfs_remove_by_name(kn, rft->name);
- }
- return ret;
-}
-
-static struct rftype *rdtgroup_get_rftype_by_name(const char *name)
-{
- struct rftype *rfts, *rft;
- int len;
-
- rfts = res_common_files;
- len = ARRAY_SIZE(res_common_files);
-
- for (rft = rfts; rft < rfts + len; rft++) {
- if (!strcmp(rft->name, name))
- return rft;
- }
-
- return NULL;
-}
-
-static void thread_throttle_mode_init(void)
-{
- enum membw_throttle_mode throttle_mode = THREAD_THROTTLE_UNDEFINED;
- struct rdt_resource *r_mba, *r_smba;
-
- r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
- if (r_mba->alloc_capable &&
- r_mba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED)
- throttle_mode = r_mba->membw.throttle_mode;
-
- r_smba = resctrl_arch_get_resource(RDT_RESOURCE_SMBA);
- if (r_smba->alloc_capable &&
- r_smba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED)
- throttle_mode = r_smba->membw.throttle_mode;
-
- if (throttle_mode == THREAD_THROTTLE_UNDEFINED)
- return;
-
- resctrl_file_fflags_init("thread_throttle_mode",
- RFTYPE_CTRL_INFO | RFTYPE_RES_MB);
-}
-
-void resctrl_file_fflags_init(const char *config, unsigned long fflags)
-{
- struct rftype *rft;
-
- rft = rdtgroup_get_rftype_by_name(config);
- if (rft)
- rft->fflags = fflags;
-}
-
-/**
- * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
- * @r: The resource group with which the file is associated.
- * @name: Name of the file
- *
- * The permissions of named resctrl file, directory, or link are modified
- * to not allow read, write, or execute by any user.
- *
- * WARNING: This function is intended to communicate to the user that the
- * resctrl file has been locked down - that it is not relevant to the
- * particular state the system finds itself in. It should not be relied
- * on to protect from user access because after the file's permissions
- * are restricted the user can still change the permissions using chmod
- * from the command line.
- *
- * Return: 0 on success, <0 on failure.
- */
-int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
-{
- struct iattr iattr = {.ia_valid = ATTR_MODE,};
- struct kernfs_node *kn;
- int ret = 0;
-
- kn = kernfs_find_and_get_ns(r->kn, name, NULL);
- if (!kn)
- return -ENOENT;
-
- switch (kernfs_type(kn)) {
- case KERNFS_DIR:
- iattr.ia_mode = S_IFDIR;
- break;
- case KERNFS_FILE:
- iattr.ia_mode = S_IFREG;
- break;
- case KERNFS_LINK:
- iattr.ia_mode = S_IFLNK;
- break;
- }
-
- ret = kernfs_setattr(kn, &iattr);
- kernfs_put(kn);
- return ret;
-}
-
-/**
- * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
- * @r: The resource group with which the file is associated.
- * @name: Name of the file
- * @mask: Mask of permissions that should be restored
- *
- * Restore the permissions of the named file. If @name is a directory the
- * permissions of its parent will be used.
- *
- * Return: 0 on success, <0 on failure.
- */
-int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
- umode_t mask)
-{
- struct iattr iattr = {.ia_valid = ATTR_MODE,};
- struct kernfs_node *kn, *parent;
- struct rftype *rfts, *rft;
- int ret, len;
-
- rfts = res_common_files;
- len = ARRAY_SIZE(res_common_files);
-
- for (rft = rfts; rft < rfts + len; rft++) {
- if (!strcmp(rft->name, name))
- iattr.ia_mode = rft->mode & mask;
- }
-
- kn = kernfs_find_and_get_ns(r->kn, name, NULL);
- if (!kn)
- return -ENOENT;
-
- switch (kernfs_type(kn)) {
- case KERNFS_DIR:
- parent = kernfs_get_parent(kn);
- if (parent) {
- iattr.ia_mode |= parent->mode;
- kernfs_put(parent);
- }
- iattr.ia_mode |= S_IFDIR;
- break;
- case KERNFS_FILE:
- iattr.ia_mode |= S_IFREG;
- break;
- case KERNFS_LINK:
- iattr.ia_mode |= S_IFLNK;
- break;
- }
-
- ret = kernfs_setattr(kn, &iattr);
- kernfs_put(kn);
- return ret;
-}
-
-static int rdtgroup_mkdir_info_resdir(void *priv, char *name,
- unsigned long fflags)
-{
- struct kernfs_node *kn_subdir;
- int ret;
-
- kn_subdir = kernfs_create_dir(kn_info, name,
- kn_info->mode, priv);
- if (IS_ERR(kn_subdir))
- return PTR_ERR(kn_subdir);
-
- ret = rdtgroup_kn_set_ugid(kn_subdir);
- if (ret)
- return ret;
-
- ret = rdtgroup_add_files(kn_subdir, fflags);
- if (!ret)
- kernfs_activate(kn_subdir);
-
- return ret;
-}
-
-static unsigned long fflags_from_resource(struct rdt_resource *r)
-{
- switch (r->rid) {
- case RDT_RESOURCE_L3:
- case RDT_RESOURCE_L2:
- return RFTYPE_RES_CACHE;
- case RDT_RESOURCE_MBA:
- case RDT_RESOURCE_SMBA:
- return RFTYPE_RES_MB;
- }
-
- return WARN_ON_ONCE(1);
-}
-
-static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
-{
- struct resctrl_schema *s;
- struct rdt_resource *r;
- unsigned long fflags;
- char name[32];
- int ret;
-
- /* create the directory */
- kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
- if (IS_ERR(kn_info))
- return PTR_ERR(kn_info);
-
- ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO);
- if (ret)
- goto out_destroy;
-
- /* loop over enabled controls, these are all alloc_capable */
- list_for_each_entry(s, &resctrl_schema_all, list) {
- r = s->res;
- fflags = fflags_from_resource(r) | RFTYPE_CTRL_INFO;
- ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags);
- if (ret)
- goto out_destroy;
- }
-
- for_each_mon_capable_rdt_resource(r) {
- fflags = fflags_from_resource(r) | RFTYPE_MON_INFO;
- sprintf(name, "%s_MON", r->name);
- ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
- if (ret)
- goto out_destroy;
- }
-
- ret = rdtgroup_kn_set_ugid(kn_info);
- if (ret)
- goto out_destroy;
-
- kernfs_activate(kn_info);
-
- return 0;
-
-out_destroy:
- kernfs_remove(kn_info);
- return ret;
-}
-
-static int
-mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
- char *name, struct kernfs_node **dest_kn)
-{
- struct kernfs_node *kn;
- int ret;
-
- /* create the directory */
- kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
- if (IS_ERR(kn))
- return PTR_ERR(kn);
-
- if (dest_kn)
- *dest_kn = kn;
-
- ret = rdtgroup_kn_set_ugid(kn);
- if (ret)
- goto out_destroy;
-
- kernfs_activate(kn);
-
- return 0;
-
-out_destroy:
- kernfs_remove(kn);
- return ret;
+ wrmsrq(MSR_IA32_EVT_CFG_BASE + index, config_info->mon_config);
}
static void l3_qos_cfg_update(void *arg)
{
bool *enable = arg;
- wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
+ wrmsrq(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
}
static void l2_qos_cfg_update(void *arg)
{
bool *enable = arg;
- wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
-}
-
-static inline bool is_mba_linear(void)
-{
- return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear;
+ wrmsrq(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
}
static int set_cache_qos_cfg(int level, bool enable)
@@ -2396,76 +184,6 @@ void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
l3_qos_cfg_update(&hw_res->cdp_enabled);
}
-static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_ctrl_domain *d)
-{
- u32 num_closid = resctrl_arch_get_num_closid(r);
- int cpu = cpumask_any(&d->hdr.cpu_mask);
- int i;
-
- d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val),
- GFP_KERNEL, cpu_to_node(cpu));
- if (!d->mbps_val)
- return -ENOMEM;
-
- for (i = 0; i < num_closid; i++)
- d->mbps_val[i] = MBA_MAX_MBPS;
-
- return 0;
-}
-
-static void mba_sc_domain_destroy(struct rdt_resource *r,
- struct rdt_ctrl_domain *d)
-{
- kfree(d->mbps_val);
- d->mbps_val = NULL;
-}
-
-/*
- * MBA software controller is supported only if
- * MBM is supported and MBA is in linear scale,
- * and the MBM monitor scope is the same as MBA
- * control scope.
- */
-static bool supports_mba_mbps(void)
-{
- struct rdt_resource *rmbm = resctrl_arch_get_resource(RDT_RESOURCE_L3);
- struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
-
- return (resctrl_is_mbm_enabled() &&
- r->alloc_capable && is_mba_linear() &&
- r->ctrl_scope == rmbm->mon_scope);
-}
-
-/*
- * Enable or disable the MBA software controller
- * which helps user specify bandwidth in MBps.
- */
-static int set_mba_sc(bool mba_sc)
-{
- struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
- u32 num_closid = resctrl_arch_get_num_closid(r);
- struct rdt_ctrl_domain *d;
- unsigned long fflags;
- int i;
-
- if (!supports_mba_mbps() || mba_sc == is_mba_sc(r))
- return -EINVAL;
-
- r->membw.mba_sc = mba_sc;
-
- rdtgroup_default.mba_mbps_event = mba_mbps_default_event;
-
- list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
- for (i = 0; i < num_closid; i++)
- d->mbps_val[i] = MBA_MAX_MBPS;
- }
-
- fflags = mba_sc ? RFTYPE_CTRL_BASE | RFTYPE_MON_BASE : 0;
- resctrl_file_fflags_init("mba_MBps_event", fflags);
-
- return 0;
-}
-
static int cdp_enable(int level)
{
struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl;
@@ -2506,419 +224,9 @@ int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable)
return 0;
}
-/*
- * We don't allow rdtgroup directories to be created anywhere
- * except the root directory. Thus when looking for the rdtgroup
- * structure for a kernfs node we are either looking at a directory,
- * in which case the rdtgroup structure is pointed at by the "priv"
- * field, otherwise we have a file, and need only look to the parent
- * to find the rdtgroup.
- */
-static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
-{
- if (kernfs_type(kn) == KERNFS_DIR) {
- /*
- * All the resource directories use "kn->priv"
- * to point to the "struct rdtgroup" for the
- * resource. "info" and its subdirectories don't
- * have rdtgroup structures, so return NULL here.
- */
- if (kn == kn_info ||
- rcu_access_pointer(kn->__parent) == kn_info)
- return NULL;
- else
- return kn->priv;
- } else {
- return rdt_kn_parent_priv(kn);
- }
-}
-
-static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn)
-{
- atomic_inc(&rdtgrp->waitcount);
- kernfs_break_active_protection(kn);
-}
-
-static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn)
-{
- if (atomic_dec_and_test(&rdtgrp->waitcount) &&
- (rdtgrp->flags & RDT_DELETED)) {
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
- rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
- rdtgroup_pseudo_lock_remove(rdtgrp);
- kernfs_unbreak_active_protection(kn);
- rdtgroup_remove(rdtgrp);
- } else {
- kernfs_unbreak_active_protection(kn);
- }
-}
-
-struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
-{
- struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
-
- if (!rdtgrp)
- return NULL;
-
- rdtgroup_kn_get(rdtgrp, kn);
-
- cpus_read_lock();
- mutex_lock(&rdtgroup_mutex);
-
- /* Was this group deleted while we waited? */
- if (rdtgrp->flags & RDT_DELETED)
- return NULL;
-
- return rdtgrp;
-}
-
-void rdtgroup_kn_unlock(struct kernfs_node *kn)
-{
- struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
-
- if (!rdtgrp)
- return;
-
- mutex_unlock(&rdtgroup_mutex);
- cpus_read_unlock();
-
- rdtgroup_kn_put(rdtgrp, kn);
-}
-
-static int mkdir_mondata_all(struct kernfs_node *parent_kn,
- struct rdtgroup *prgrp,
- struct kernfs_node **mon_data_kn);
-
-static void rdt_disable_ctx(void)
-{
- resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
- resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
- set_mba_sc(false);
-
- resctrl_debug = false;
-}
-
-static int rdt_enable_ctx(struct rdt_fs_context *ctx)
-{
- int ret = 0;
-
- if (ctx->enable_cdpl2) {
- ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true);
- if (ret)
- goto out_done;
- }
-
- if (ctx->enable_cdpl3) {
- ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true);
- if (ret)
- goto out_cdpl2;
- }
-
- if (ctx->enable_mba_mbps) {
- ret = set_mba_sc(true);
- if (ret)
- goto out_cdpl3;
- }
-
- if (ctx->enable_debug)
- resctrl_debug = true;
-
- return 0;
-
-out_cdpl3:
- resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
-out_cdpl2:
- resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
-out_done:
- return ret;
-}
-
-static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type)
-{
- struct resctrl_schema *s;
- const char *suffix = "";
- int ret, cl;
-
- s = kzalloc(sizeof(*s), GFP_KERNEL);
- if (!s)
- return -ENOMEM;
-
- s->res = r;
- s->num_closid = resctrl_arch_get_num_closid(r);
- if (resctrl_arch_get_cdp_enabled(r->rid))
- s->num_closid /= 2;
-
- s->conf_type = type;
- switch (type) {
- case CDP_CODE:
- suffix = "CODE";
- break;
- case CDP_DATA:
- suffix = "DATA";
- break;
- case CDP_NONE:
- suffix = "";
- break;
- }
-
- ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix);
- if (ret >= sizeof(s->name)) {
- kfree(s);
- return -EINVAL;
- }
-
- cl = strlen(s->name);
-
- /*
- * If CDP is supported by this resource, but not enabled,
- * include the suffix. This ensures the tabular format of the
- * schemata file does not change between mounts of the filesystem.
- */
- if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid))
- cl += 4;
-
- if (cl > max_name_width)
- max_name_width = cl;
-
- switch (r->schema_fmt) {
- case RESCTRL_SCHEMA_BITMAP:
- s->fmt_str = "%d=%x";
- break;
- case RESCTRL_SCHEMA_RANGE:
- s->fmt_str = "%d=%u";
- break;
- }
-
- if (WARN_ON_ONCE(!s->fmt_str)) {
- kfree(s);
- return -EINVAL;
- }
-
- INIT_LIST_HEAD(&s->list);
- list_add(&s->list, &resctrl_schema_all);
-
- return 0;
-}
-
-static int schemata_list_create(void)
-{
- struct rdt_resource *r;
- int ret = 0;
-
- for_each_alloc_capable_rdt_resource(r) {
- if (resctrl_arch_get_cdp_enabled(r->rid)) {
- ret = schemata_list_add(r, CDP_CODE);
- if (ret)
- break;
-
- ret = schemata_list_add(r, CDP_DATA);
- } else {
- ret = schemata_list_add(r, CDP_NONE);
- }
-
- if (ret)
- break;
- }
-
- return ret;
-}
-
-static void schemata_list_destroy(void)
-{
- struct resctrl_schema *s, *tmp;
-
- list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) {
- list_del(&s->list);
- kfree(s);
- }
-}
-
-static int rdt_get_tree(struct fs_context *fc)
-{
- struct rdt_fs_context *ctx = rdt_fc2context(fc);
- unsigned long flags = RFTYPE_CTRL_BASE;
- struct rdt_mon_domain *dom;
- struct rdt_resource *r;
- int ret;
-
- cpus_read_lock();
- mutex_lock(&rdtgroup_mutex);
- /*
- * resctrl file system can only be mounted once.
- */
- if (resctrl_mounted) {
- ret = -EBUSY;
- goto out;
- }
-
- ret = rdtgroup_setup_root(ctx);
- if (ret)
- goto out;
-
- ret = rdt_enable_ctx(ctx);
- if (ret)
- goto out_root;
-
- ret = schemata_list_create();
- if (ret) {
- schemata_list_destroy();
- goto out_ctx;
- }
-
- closid_init();
-
- if (resctrl_arch_mon_capable())
- flags |= RFTYPE_MON;
-
- ret = rdtgroup_add_files(rdtgroup_default.kn, flags);
- if (ret)
- goto out_schemata_free;
-
- kernfs_activate(rdtgroup_default.kn);
-
- ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
- if (ret < 0)
- goto out_schemata_free;
-
- if (resctrl_arch_mon_capable()) {
- ret = mongroup_create_dir(rdtgroup_default.kn,
- &rdtgroup_default, "mon_groups",
- &kn_mongrp);
- if (ret < 0)
- goto out_info;
-
- ret = mkdir_mondata_all(rdtgroup_default.kn,
- &rdtgroup_default, &kn_mondata);
- if (ret < 0)
- goto out_mongrp;
- rdtgroup_default.mon.mon_data_kn = kn_mondata;
- }
-
- ret = rdt_pseudo_lock_init();
- if (ret)
- goto out_mondata;
-
- ret = kernfs_get_tree(fc);
- if (ret < 0)
- goto out_psl;
-
- if (resctrl_arch_alloc_capable())
- resctrl_arch_enable_alloc();
- if (resctrl_arch_mon_capable())
- resctrl_arch_enable_mon();
-
- if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable())
- resctrl_mounted = true;
-
- if (resctrl_is_mbm_enabled()) {
- r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
- list_for_each_entry(dom, &r->mon_domains, hdr.list)
- mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL,
- RESCTRL_PICK_ANY_CPU);
- }
-
- goto out;
-
-out_psl:
- rdt_pseudo_lock_release();
-out_mondata:
- if (resctrl_arch_mon_capable())
- kernfs_remove(kn_mondata);
-out_mongrp:
- if (resctrl_arch_mon_capable())
- kernfs_remove(kn_mongrp);
-out_info:
- kernfs_remove(kn_info);
-out_schemata_free:
- schemata_list_destroy();
-out_ctx:
- rdt_disable_ctx();
-out_root:
- rdtgroup_destroy_root();
-out:
- rdt_last_cmd_clear();
- mutex_unlock(&rdtgroup_mutex);
- cpus_read_unlock();
- return ret;
-}
-
-enum rdt_param {
- Opt_cdp,
- Opt_cdpl2,
- Opt_mba_mbps,
- Opt_debug,
- nr__rdt_params
-};
-
-static const struct fs_parameter_spec rdt_fs_parameters[] = {
- fsparam_flag("cdp", Opt_cdp),
- fsparam_flag("cdpl2", Opt_cdpl2),
- fsparam_flag("mba_MBps", Opt_mba_mbps),
- fsparam_flag("debug", Opt_debug),
- {}
-};
-
-static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
-{
- struct rdt_fs_context *ctx = rdt_fc2context(fc);
- struct fs_parse_result result;
- const char *msg;
- int opt;
-
- opt = fs_parse(fc, rdt_fs_parameters, param, &result);
- if (opt < 0)
- return opt;
-
- switch (opt) {
- case Opt_cdp:
- ctx->enable_cdpl3 = true;
- return 0;
- case Opt_cdpl2:
- ctx->enable_cdpl2 = true;
- return 0;
- case Opt_mba_mbps:
- msg = "mba_MBps requires MBM and linear scale MBA at L3 scope";
- if (!supports_mba_mbps())
- return invalfc(fc, msg);
- ctx->enable_mba_mbps = true;
- return 0;
- case Opt_debug:
- ctx->enable_debug = true;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static void rdt_fs_context_free(struct fs_context *fc)
+bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l)
{
- struct rdt_fs_context *ctx = rdt_fc2context(fc);
-
- kernfs_free_fs_context(fc);
- kfree(ctx);
-}
-
-static const struct fs_context_operations rdt_fs_context_ops = {
- .free = rdt_fs_context_free,
- .parse_param = rdt_parse_param,
- .get_tree = rdt_get_tree,
-};
-
-static int rdt_init_fs_context(struct fs_context *fc)
-{
- struct rdt_fs_context *ctx;
-
- ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->kfc.magic = RDTGROUP_SUPER_MAGIC;
- fc->fs_private = &ctx->kfc;
- fc->ops = &rdt_fs_context_ops;
- put_user_ns(fc->user_ns);
- fc->user_ns = get_user_ns(&init_user_ns);
- fc->global = true;
- return 0;
+ return rdt_resources_all[l].cdp_enabled;
}
void resctrl_arch_reset_all_ctrls(struct rdt_resource *r)
@@ -2952,1454 +260,3 @@ void resctrl_arch_reset_all_ctrls(struct rdt_resource *r)
return;
}
-
-/*
- * Move tasks from one to the other group. If @from is NULL, then all tasks
- * in the systems are moved unconditionally (used for teardown).
- *
- * If @mask is not NULL the cpus on which moved tasks are running are set
- * in that mask so the update smp function call is restricted to affected
- * cpus.
- */
-static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
- struct cpumask *mask)
-{
- struct task_struct *p, *t;
-
- read_lock(&tasklist_lock);
- for_each_process_thread(p, t) {
- if (!from || is_closid_match(t, from) ||
- is_rmid_match(t, from)) {
- resctrl_arch_set_closid_rmid(t, to->closid,
- to->mon.rmid);
-
- /*
- * Order the closid/rmid stores above before the loads
- * in task_curr(). This pairs with the full barrier
- * between the rq->curr update and resctrl_sched_in()
- * during context switch.
- */
- smp_mb();
-
- /*
- * If the task is on a CPU, set the CPU in the mask.
- * The detection is inaccurate as tasks might move or
- * schedule before the smp function call takes place.
- * In such a case the function call is pointless, but
- * there is no other side effect.
- */
- if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
- cpumask_set_cpu(task_cpu(t), mask);
- }
- }
- read_unlock(&tasklist_lock);
-}
-
-static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
-{
- struct rdtgroup *sentry, *stmp;
- struct list_head *head;
-
- head = &rdtgrp->mon.crdtgrp_list;
- list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
- free_rmid(sentry->closid, sentry->mon.rmid);
- list_del(&sentry->mon.crdtgrp_list);
-
- if (atomic_read(&sentry->waitcount) != 0)
- sentry->flags = RDT_DELETED;
- else
- rdtgroup_remove(sentry);
- }
-}
-
-/*
- * Forcibly remove all of subdirectories under root.
- */
-static void rmdir_all_sub(void)
-{
- struct rdtgroup *rdtgrp, *tmp;
-
- /* Move all tasks to the default resource group */
- rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
-
- list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
- /* Free any child rmids */
- free_all_child_rdtgrp(rdtgrp);
-
- /* Remove each rdtgroup other than root */
- if (rdtgrp == &rdtgroup_default)
- continue;
-
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
- rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
- rdtgroup_pseudo_lock_remove(rdtgrp);
-
- /*
- * Give any CPUs back to the default group. We cannot copy
- * cpu_online_mask because a CPU might have executed the
- * offline callback already, but is still marked online.
- */
- cpumask_or(&rdtgroup_default.cpu_mask,
- &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
-
- free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
-
- kernfs_remove(rdtgrp->kn);
- list_del(&rdtgrp->rdtgroup_list);
-
- if (atomic_read(&rdtgrp->waitcount) != 0)
- rdtgrp->flags = RDT_DELETED;
- else
- rdtgroup_remove(rdtgrp);
- }
- /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
- update_closid_rmid(cpu_online_mask, &rdtgroup_default);
-
- kernfs_remove(kn_info);
- kernfs_remove(kn_mongrp);
- kernfs_remove(kn_mondata);
-}
-
-static void rdt_kill_sb(struct super_block *sb)
-{
- struct rdt_resource *r;
-
- cpus_read_lock();
- mutex_lock(&rdtgroup_mutex);
-
- rdt_disable_ctx();
-
- /* Put everything back to default values. */
- for_each_alloc_capable_rdt_resource(r)
- resctrl_arch_reset_all_ctrls(r);
-
- rmdir_all_sub();
- rdt_pseudo_lock_release();
- rdtgroup_default.mode = RDT_MODE_SHAREABLE;
- schemata_list_destroy();
- rdtgroup_destroy_root();
- if (resctrl_arch_alloc_capable())
- resctrl_arch_disable_alloc();
- if (resctrl_arch_mon_capable())
- resctrl_arch_disable_mon();
- resctrl_mounted = false;
- kernfs_kill_sb(sb);
- mutex_unlock(&rdtgroup_mutex);
- cpus_read_unlock();
-}
-
-static struct file_system_type rdt_fs_type = {
- .name = "resctrl",
- .init_fs_context = rdt_init_fs_context,
- .parameters = rdt_fs_parameters,
- .kill_sb = rdt_kill_sb,
-};
-
-static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
- void *priv)
-{
- struct kernfs_node *kn;
- int ret = 0;
-
- kn = __kernfs_create_file(parent_kn, name, 0444,
- GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
- &kf_mondata_ops, priv, NULL, NULL);
- if (IS_ERR(kn))
- return PTR_ERR(kn);
-
- ret = rdtgroup_kn_set_ugid(kn);
- if (ret) {
- kernfs_remove(kn);
- return ret;
- }
-
- return ret;
-}
-
-static void mon_rmdir_one_subdir(struct kernfs_node *pkn, char *name, char *subname)
-{
- struct kernfs_node *kn;
-
- kn = kernfs_find_and_get(pkn, name);
- if (!kn)
- return;
- kernfs_put(kn);
-
- if (kn->dir.subdirs <= 1)
- kernfs_remove(kn);
- else
- kernfs_remove_by_name(kn, subname);
-}
-
-/*
- * Remove all subdirectories of mon_data of ctrl_mon groups
- * and monitor groups for the given domain.
- * Remove files and directories containing "sum" of domain data
- * when last domain being summed is removed.
- */
-static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
- struct rdt_mon_domain *d)
-{
- struct rdtgroup *prgrp, *crgrp;
- char subname[32];
- bool snc_mode;
- char name[32];
-
- snc_mode = r->mon_scope == RESCTRL_L3_NODE;
- sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci->id : d->hdr.id);
- if (snc_mode)
- sprintf(subname, "mon_sub_%s_%02d", r->name, d->hdr.id);
-
- list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
- mon_rmdir_one_subdir(prgrp->mon.mon_data_kn, name, subname);
-
- list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
- mon_rmdir_one_subdir(crgrp->mon.mon_data_kn, name, subname);
- }
-}
-
-static int mon_add_all_files(struct kernfs_node *kn, struct rdt_mon_domain *d,
- struct rdt_resource *r, struct rdtgroup *prgrp,
- bool do_sum)
-{
- struct rmid_read rr = {0};
- union mon_data_bits priv;
- struct mon_evt *mevt;
- int ret;
-
- if (WARN_ON(list_empty(&r->evt_list)))
- return -EPERM;
-
- priv.u.rid = r->rid;
- priv.u.domid = do_sum ? d->ci->id : d->hdr.id;
- priv.u.sum = do_sum;
- list_for_each_entry(mevt, &r->evt_list, list) {
- priv.u.evtid = mevt->evtid;
- ret = mon_addfile(kn, mevt->name, priv.priv);
- if (ret)
- return ret;
-
- if (!do_sum && resctrl_is_mbm_event(mevt->evtid))
- mon_event_read(&rr, r, d, prgrp, &d->hdr.cpu_mask, mevt->evtid, true);
- }
-
- return 0;
-}
-
-static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
- struct rdt_mon_domain *d,
- struct rdt_resource *r, struct rdtgroup *prgrp)
-{
- struct kernfs_node *kn, *ckn;
- char name[32];
- bool snc_mode;
- int ret = 0;
-
- lockdep_assert_held(&rdtgroup_mutex);
-
- snc_mode = r->mon_scope == RESCTRL_L3_NODE;
- sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci->id : d->hdr.id);
- kn = kernfs_find_and_get(parent_kn, name);
- if (kn) {
- /*
- * rdtgroup_mutex will prevent this directory from being
- * removed. No need to keep this hold.
- */
- kernfs_put(kn);
- } else {
- kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
- if (IS_ERR(kn))
- return PTR_ERR(kn);
-
- ret = rdtgroup_kn_set_ugid(kn);
- if (ret)
- goto out_destroy;
- ret = mon_add_all_files(kn, d, r, prgrp, snc_mode);
- if (ret)
- goto out_destroy;
- }
-
- if (snc_mode) {
- sprintf(name, "mon_sub_%s_%02d", r->name, d->hdr.id);
- ckn = kernfs_create_dir(kn, name, parent_kn->mode, prgrp);
- if (IS_ERR(ckn)) {
- ret = -EINVAL;
- goto out_destroy;
- }
-
- ret = rdtgroup_kn_set_ugid(ckn);
- if (ret)
- goto out_destroy;
-
- ret = mon_add_all_files(ckn, d, r, prgrp, false);
- if (ret)
- goto out_destroy;
- }
-
- kernfs_activate(kn);
- return 0;
-
-out_destroy:
- kernfs_remove(kn);
- return ret;
-}
-
-/*
- * Add all subdirectories of mon_data for "ctrl_mon" groups
- * and "monitor" groups with given domain id.
- */
-static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
- struct rdt_mon_domain *d)
-{
- struct kernfs_node *parent_kn;
- struct rdtgroup *prgrp, *crgrp;
- struct list_head *head;
-
- list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
- parent_kn = prgrp->mon.mon_data_kn;
- mkdir_mondata_subdir(parent_kn, d, r, prgrp);
-
- head = &prgrp->mon.crdtgrp_list;
- list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
- parent_kn = crgrp->mon.mon_data_kn;
- mkdir_mondata_subdir(parent_kn, d, r, crgrp);
- }
- }
-}
-
-static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
- struct rdt_resource *r,
- struct rdtgroup *prgrp)
-{
- struct rdt_mon_domain *dom;
- int ret;
-
- /* Walking r->domains, ensure it can't race with cpuhp */
- lockdep_assert_cpus_held();
-
- list_for_each_entry(dom, &r->mon_domains, hdr.list) {
- ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/*
- * This creates a directory mon_data which contains the monitored data.
- *
- * mon_data has one directory for each domain which are named
- * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
- * with L3 domain looks as below:
- * ./mon_data:
- * mon_L3_00
- * mon_L3_01
- * mon_L3_02
- * ...
- *
- * Each domain directory has one file per event:
- * ./mon_L3_00/:
- * llc_occupancy
- *
- */
-static int mkdir_mondata_all(struct kernfs_node *parent_kn,
- struct rdtgroup *prgrp,
- struct kernfs_node **dest_kn)
-{
- struct rdt_resource *r;
- struct kernfs_node *kn;
- int ret;
-
- /*
- * Create the mon_data directory first.
- */
- ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn);
- if (ret)
- return ret;
-
- if (dest_kn)
- *dest_kn = kn;
-
- /*
- * Create the subdirectories for each domain. Note that all events
- * in a domain like L3 are grouped into a resource whose domain is L3
- */
- for_each_mon_capable_rdt_resource(r) {
- ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
- if (ret)
- goto out_destroy;
- }
-
- return 0;
-
-out_destroy:
- kernfs_remove(kn);
- return ret;
-}
-
-/**
- * cbm_ensure_valid - Enforce validity on provided CBM
- * @_val: Candidate CBM
- * @r: RDT resource to which the CBM belongs
- *
- * The provided CBM represents all cache portions available for use. This
- * may be represented by a bitmap that does not consist of contiguous ones
- * and thus be an invalid CBM.
- * Here the provided CBM is forced to be a valid CBM by only considering
- * the first set of contiguous bits as valid and clearing all bits.
- * The intention here is to provide a valid default CBM with which a new
- * resource group is initialized. The user can follow this with a
- * modification to the CBM if the default does not satisfy the
- * requirements.
- */
-static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
-{
- unsigned int cbm_len = r->cache.cbm_len;
- unsigned long first_bit, zero_bit;
- unsigned long val = _val;
-
- if (!val)
- return 0;
-
- first_bit = find_first_bit(&val, cbm_len);
- zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
-
- /* Clear any remaining bits to ensure contiguous region */
- bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
- return (u32)val;
-}
-
-/*
- * Initialize cache resources per RDT domain
- *
- * Set the RDT domain up to start off with all usable allocations. That is,
- * all shareable and unused bits. All-zero CBM is invalid.
- */
-static int __init_one_rdt_domain(struct rdt_ctrl_domain *d, struct resctrl_schema *s,
- u32 closid)
-{
- enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
- enum resctrl_conf_type t = s->conf_type;
- struct resctrl_staged_config *cfg;
- struct rdt_resource *r = s->res;
- u32 used_b = 0, unused_b = 0;
- unsigned long tmp_cbm;
- enum rdtgrp_mode mode;
- u32 peer_ctl, ctrl_val;
- int i;
-
- cfg = &d->staged_config[t];
- cfg->have_new_ctrl = false;
- cfg->new_ctrl = r->cache.shareable_bits;
- used_b = r->cache.shareable_bits;
- for (i = 0; i < closids_supported(); i++) {
- if (closid_allocated(i) && i != closid) {
- mode = rdtgroup_mode_by_closid(i);
- if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
- /*
- * ctrl values for locksetup aren't relevant
- * until the schemata is written, and the mode
- * becomes RDT_MODE_PSEUDO_LOCKED.
- */
- continue;
- /*
- * If CDP is active include peer domain's
- * usage to ensure there is no overlap
- * with an exclusive group.
- */
- if (resctrl_arch_get_cdp_enabled(r->rid))
- peer_ctl = resctrl_arch_get_config(r, d, i,
- peer_type);
- else
- peer_ctl = 0;
- ctrl_val = resctrl_arch_get_config(r, d, i,
- s->conf_type);
- used_b |= ctrl_val | peer_ctl;
- if (mode == RDT_MODE_SHAREABLE)
- cfg->new_ctrl |= ctrl_val | peer_ctl;
- }
- }
- if (d->plr && d->plr->cbm > 0)
- used_b |= d->plr->cbm;
- unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
- unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
- cfg->new_ctrl |= unused_b;
- /*
- * Force the initial CBM to be valid, user can
- * modify the CBM based on system availability.
- */
- cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r);
- /*
- * Assign the u32 CBM to an unsigned long to ensure that
- * bitmap_weight() does not access out-of-bound memory.
- */
- tmp_cbm = cfg->new_ctrl;
- if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
- rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->hdr.id);
- return -ENOSPC;
- }
- cfg->have_new_ctrl = true;
-
- return 0;
-}
-
-/*
- * Initialize cache resources with default values.
- *
- * A new RDT group is being created on an allocation capable (CAT)
- * supporting system. Set this group up to start off with all usable
- * allocations.
- *
- * If there are no more shareable bits available on any domain then
- * the entire allocation will fail.
- */
-static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
-{
- struct rdt_ctrl_domain *d;
- int ret;
-
- list_for_each_entry(d, &s->res->ctrl_domains, hdr.list) {
- ret = __init_one_rdt_domain(d, s, closid);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-/* Initialize MBA resource with default values. */
-static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid)
-{
- struct resctrl_staged_config *cfg;
- struct rdt_ctrl_domain *d;
-
- list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
- if (is_mba_sc(r)) {
- d->mbps_val[closid] = MBA_MAX_MBPS;
- continue;
- }
-
- cfg = &d->staged_config[CDP_NONE];
- cfg->new_ctrl = resctrl_get_default_ctrl(r);
- cfg->have_new_ctrl = true;
- }
-}
-
-/* Initialize the RDT group's allocations. */
-static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
-{
- struct resctrl_schema *s;
- struct rdt_resource *r;
- int ret = 0;
-
- rdt_staged_configs_clear();
-
- list_for_each_entry(s, &resctrl_schema_all, list) {
- r = s->res;
- if (r->rid == RDT_RESOURCE_MBA ||
- r->rid == RDT_RESOURCE_SMBA) {
- rdtgroup_init_mba(r, rdtgrp->closid);
- if (is_mba_sc(r))
- continue;
- } else {
- ret = rdtgroup_init_cat(s, rdtgrp->closid);
- if (ret < 0)
- goto out;
- }
-
- ret = resctrl_arch_update_domains(r, rdtgrp->closid);
- if (ret < 0) {
- rdt_last_cmd_puts("Failed to initialize allocations\n");
- goto out;
- }
-
- }
-
- rdtgrp->mode = RDT_MODE_SHAREABLE;
-
-out:
- rdt_staged_configs_clear();
- return ret;
-}
-
-static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp)
-{
- int ret;
-
- if (!resctrl_arch_mon_capable())
- return 0;
-
- ret = alloc_rmid(rdtgrp->closid);
- if (ret < 0) {
- rdt_last_cmd_puts("Out of RMIDs\n");
- return ret;
- }
- rdtgrp->mon.rmid = ret;
-
- ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
- if (ret) {
- rdt_last_cmd_puts("kernfs subdir error\n");
- free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
- return ret;
- }
-
- return 0;
-}
-
-static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp)
-{
- if (resctrl_arch_mon_capable())
- free_rmid(rgrp->closid, rgrp->mon.rmid);
-}
-
-static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
- const char *name, umode_t mode,
- enum rdt_group_type rtype, struct rdtgroup **r)
-{
- struct rdtgroup *prdtgrp, *rdtgrp;
- unsigned long files = 0;
- struct kernfs_node *kn;
- int ret;
-
- prdtgrp = rdtgroup_kn_lock_live(parent_kn);
- if (!prdtgrp) {
- ret = -ENODEV;
- goto out_unlock;
- }
-
- if (rtype == RDTMON_GROUP &&
- (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
- prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
- ret = -EINVAL;
- rdt_last_cmd_puts("Pseudo-locking in progress\n");
- goto out_unlock;
- }
-
- /* allocate the rdtgroup. */
- rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
- if (!rdtgrp) {
- ret = -ENOSPC;
- rdt_last_cmd_puts("Kernel out of memory\n");
- goto out_unlock;
- }
- *r = rdtgrp;
- rdtgrp->mon.parent = prdtgrp;
- rdtgrp->type = rtype;
- INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
-
- /* kernfs creates the directory for rdtgrp */
- kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
- if (IS_ERR(kn)) {
- ret = PTR_ERR(kn);
- rdt_last_cmd_puts("kernfs create error\n");
- goto out_free_rgrp;
- }
- rdtgrp->kn = kn;
-
- /*
- * kernfs_remove() will drop the reference count on "kn" which
- * will free it. But we still need it to stick around for the
- * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
- * which will be dropped by kernfs_put() in rdtgroup_remove().
- */
- kernfs_get(kn);
-
- ret = rdtgroup_kn_set_ugid(kn);
- if (ret) {
- rdt_last_cmd_puts("kernfs perm error\n");
- goto out_destroy;
- }
-
- if (rtype == RDTCTRL_GROUP) {
- files = RFTYPE_BASE | RFTYPE_CTRL;
- if (resctrl_arch_mon_capable())
- files |= RFTYPE_MON;
- } else {
- files = RFTYPE_BASE | RFTYPE_MON;
- }
-
- ret = rdtgroup_add_files(kn, files);
- if (ret) {
- rdt_last_cmd_puts("kernfs fill error\n");
- goto out_destroy;
- }
-
- /*
- * The caller unlocks the parent_kn upon success.
- */
- return 0;
-
-out_destroy:
- kernfs_put(rdtgrp->kn);
- kernfs_remove(rdtgrp->kn);
-out_free_rgrp:
- kfree(rdtgrp);
-out_unlock:
- rdtgroup_kn_unlock(parent_kn);
- return ret;
-}
-
-static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
-{
- kernfs_remove(rgrp->kn);
- rdtgroup_remove(rgrp);
-}
-
-/*
- * Create a monitor group under "mon_groups" directory of a control
- * and monitor group(ctrl_mon). This is a resource group
- * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
- */
-static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
- const char *name, umode_t mode)
-{
- struct rdtgroup *rdtgrp, *prgrp;
- int ret;
-
- ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp);
- if (ret)
- return ret;
-
- prgrp = rdtgrp->mon.parent;
- rdtgrp->closid = prgrp->closid;
-
- ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp);
- if (ret) {
- mkdir_rdt_prepare_clean(rdtgrp);
- goto out_unlock;
- }
-
- kernfs_activate(rdtgrp->kn);
-
- /*
- * Add the rdtgrp to the list of rdtgrps the parent
- * ctrl_mon group has to track.
- */
- list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
-
-out_unlock:
- rdtgroup_kn_unlock(parent_kn);
- return ret;
-}
-
-/*
- * These are rdtgroups created under the root directory. Can be used
- * to allocate and monitor resources.
- */
-static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
- const char *name, umode_t mode)
-{
- struct rdtgroup *rdtgrp;
- struct kernfs_node *kn;
- u32 closid;
- int ret;
-
- ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp);
- if (ret)
- return ret;
-
- kn = rdtgrp->kn;
- ret = closid_alloc();
- if (ret < 0) {
- rdt_last_cmd_puts("Out of CLOSIDs\n");
- goto out_common_fail;
- }
- closid = ret;
- ret = 0;
-
- rdtgrp->closid = closid;
-
- ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp);
- if (ret)
- goto out_closid_free;
-
- kernfs_activate(rdtgrp->kn);
-
- ret = rdtgroup_init_alloc(rdtgrp);
- if (ret < 0)
- goto out_rmid_free;
-
- list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
-
- if (resctrl_arch_mon_capable()) {
- /*
- * Create an empty mon_groups directory to hold the subset
- * of tasks and cpus to monitor.
- */
- ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL);
- if (ret) {
- rdt_last_cmd_puts("kernfs subdir error\n");
- goto out_del_list;
- }
- if (is_mba_sc(NULL))
- rdtgrp->mba_mbps_event = mba_mbps_default_event;
- }
-
- goto out_unlock;
-
-out_del_list:
- list_del(&rdtgrp->rdtgroup_list);
-out_rmid_free:
- mkdir_rdt_prepare_rmid_free(rdtgrp);
-out_closid_free:
- closid_free(closid);
-out_common_fail:
- mkdir_rdt_prepare_clean(rdtgrp);
-out_unlock:
- rdtgroup_kn_unlock(parent_kn);
- return ret;
-}
-
-/*
- * We allow creating mon groups only with in a directory called "mon_groups"
- * which is present in every ctrl_mon group. Check if this is a valid
- * "mon_groups" directory.
- *
- * 1. The directory should be named "mon_groups".
- * 2. The mon group itself should "not" be named "mon_groups".
- * This makes sure "mon_groups" directory always has a ctrl_mon group
- * as parent.
- */
-static bool is_mon_groups(struct kernfs_node *kn, const char *name)
-{
- return (!strcmp(rdt_kn_name(kn), "mon_groups") &&
- strcmp(name, "mon_groups"));
-}
-
-static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
- umode_t mode)
-{
- /* Do not accept '\n' to avoid unparsable situation. */
- if (strchr(name, '\n'))
- return -EINVAL;
-
- /*
- * If the parent directory is the root directory and RDT
- * allocation is supported, add a control and monitoring
- * subdirectory
- */
- if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn)
- return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
-
- /*
- * If RDT monitoring is supported and the parent directory is a valid
- * "mon_groups" directory, add a monitoring subdirectory.
- */
- if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name))
- return rdtgroup_mkdir_mon(parent_kn, name, mode);
-
- return -EPERM;
-}
-
-static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
-{
- struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
- u32 closid, rmid;
- int cpu;
-
- /* Give any tasks back to the parent group */
- rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
-
- /*
- * Update per cpu closid/rmid of the moved CPUs first.
- * Note: the closid will not change, but the arch code still needs it.
- */
- closid = prdtgrp->closid;
- rmid = prdtgrp->mon.rmid;
- for_each_cpu(cpu, &rdtgrp->cpu_mask)
- resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid);
-
- /*
- * Update the MSR on moved CPUs and CPUs which have moved
- * task running on them.
- */
- cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
- update_closid_rmid(tmpmask, NULL);
-
- rdtgrp->flags = RDT_DELETED;
- free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
-
- /*
- * Remove the rdtgrp from the parent ctrl_mon group's list
- */
- WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
- list_del(&rdtgrp->mon.crdtgrp_list);
-
- kernfs_remove(rdtgrp->kn);
-
- return 0;
-}
-
-static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp)
-{
- rdtgrp->flags = RDT_DELETED;
- list_del(&rdtgrp->rdtgroup_list);
-
- kernfs_remove(rdtgrp->kn);
- return 0;
-}
-
-static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
-{
- u32 closid, rmid;
- int cpu;
-
- /* Give any tasks back to the default group */
- rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
-
- /* Give any CPUs back to the default group */
- cpumask_or(&rdtgroup_default.cpu_mask,
- &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
-
- /* Update per cpu closid and rmid of the moved CPUs first */
- closid = rdtgroup_default.closid;
- rmid = rdtgroup_default.mon.rmid;
- for_each_cpu(cpu, &rdtgrp->cpu_mask)
- resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid);
-
- /*
- * Update the MSR on moved CPUs and CPUs which have moved
- * task running on them.
- */
- cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
- update_closid_rmid(tmpmask, NULL);
-
- free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
- closid_free(rdtgrp->closid);
-
- rdtgroup_ctrl_remove(rdtgrp);
-
- /*
- * Free all the child monitor group rmids.
- */
- free_all_child_rdtgrp(rdtgrp);
-
- return 0;
-}
-
-static struct kernfs_node *rdt_kn_parent(struct kernfs_node *kn)
-{
- /*
- * Valid within the RCU section it was obtained or while rdtgroup_mutex
- * is held.
- */
- return rcu_dereference_check(kn->__parent, lockdep_is_held(&rdtgroup_mutex));
-}
-
-static int rdtgroup_rmdir(struct kernfs_node *kn)
-{
- struct kernfs_node *parent_kn;
- struct rdtgroup *rdtgrp;
- cpumask_var_t tmpmask;
- int ret = 0;
-
- if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
- return -ENOMEM;
-
- rdtgrp = rdtgroup_kn_lock_live(kn);
- if (!rdtgrp) {
- ret = -EPERM;
- goto out;
- }
- parent_kn = rdt_kn_parent(kn);
-
- /*
- * If the rdtgroup is a ctrl_mon group and parent directory
- * is the root directory, remove the ctrl_mon group.
- *
- * If the rdtgroup is a mon group and parent directory
- * is a valid "mon_groups" directory, remove the mon group.
- */
- if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
- rdtgrp != &rdtgroup_default) {
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
- rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
- ret = rdtgroup_ctrl_remove(rdtgrp);
- } else {
- ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask);
- }
- } else if (rdtgrp->type == RDTMON_GROUP &&
- is_mon_groups(parent_kn, rdt_kn_name(kn))) {
- ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask);
- } else {
- ret = -EPERM;
- }
-
-out:
- rdtgroup_kn_unlock(kn);
- free_cpumask_var(tmpmask);
- return ret;
-}
-
-/**
- * mongrp_reparent() - replace parent CTRL_MON group of a MON group
- * @rdtgrp: the MON group whose parent should be replaced
- * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp
- * @cpus: cpumask provided by the caller for use during this call
- *
- * Replaces the parent CTRL_MON group for a MON group, resulting in all member
- * tasks' CLOSID immediately changing to that of the new parent group.
- * Monitoring data for the group is unaffected by this operation.
- */
-static void mongrp_reparent(struct rdtgroup *rdtgrp,
- struct rdtgroup *new_prdtgrp,
- cpumask_var_t cpus)
-{
- struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
-
- WARN_ON(rdtgrp->type != RDTMON_GROUP);
- WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP);
-
- /* Nothing to do when simply renaming a MON group. */
- if (prdtgrp == new_prdtgrp)
- return;
-
- WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
- list_move_tail(&rdtgrp->mon.crdtgrp_list,
- &new_prdtgrp->mon.crdtgrp_list);
-
- rdtgrp->mon.parent = new_prdtgrp;
- rdtgrp->closid = new_prdtgrp->closid;
-
- /* Propagate updated closid to all tasks in this group. */
- rdt_move_group_tasks(rdtgrp, rdtgrp, cpus);
-
- update_closid_rmid(cpus, NULL);
-}
-
-static int rdtgroup_rename(struct kernfs_node *kn,
- struct kernfs_node *new_parent, const char *new_name)
-{
- struct kernfs_node *kn_parent;
- struct rdtgroup *new_prdtgrp;
- struct rdtgroup *rdtgrp;
- cpumask_var_t tmpmask;
- int ret;
-
- rdtgrp = kernfs_to_rdtgroup(kn);
- new_prdtgrp = kernfs_to_rdtgroup(new_parent);
- if (!rdtgrp || !new_prdtgrp)
- return -ENOENT;
-
- /* Release both kernfs active_refs before obtaining rdtgroup mutex. */
- rdtgroup_kn_get(rdtgrp, kn);
- rdtgroup_kn_get(new_prdtgrp, new_parent);
-
- mutex_lock(&rdtgroup_mutex);
-
- rdt_last_cmd_clear();
-
- /*
- * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if
- * either kernfs_node is a file.
- */
- if (kernfs_type(kn) != KERNFS_DIR ||
- kernfs_type(new_parent) != KERNFS_DIR) {
- rdt_last_cmd_puts("Source and destination must be directories");
- ret = -EPERM;
- goto out;
- }
-
- if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) {
- ret = -ENOENT;
- goto out;
- }
-
- kn_parent = rdt_kn_parent(kn);
- if (rdtgrp->type != RDTMON_GROUP || !kn_parent ||
- !is_mon_groups(kn_parent, rdt_kn_name(kn))) {
- rdt_last_cmd_puts("Source must be a MON group\n");
- ret = -EPERM;
- goto out;
- }
-
- if (!is_mon_groups(new_parent, new_name)) {
- rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n");
- ret = -EPERM;
- goto out;
- }
-
- /*
- * If the MON group is monitoring CPUs, the CPUs must be assigned to the
- * current parent CTRL_MON group and therefore cannot be assigned to
- * the new parent, making the move illegal.
- */
- if (!cpumask_empty(&rdtgrp->cpu_mask) &&
- rdtgrp->mon.parent != new_prdtgrp) {
- rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n");
- ret = -EPERM;
- goto out;
- }
-
- /*
- * Allocate the cpumask for use in mongrp_reparent() to avoid the
- * possibility of failing to allocate it after kernfs_rename() has
- * succeeded.
- */
- if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * Perform all input validation and allocations needed to ensure
- * mongrp_reparent() will succeed before calling kernfs_rename(),
- * otherwise it would be necessary to revert this call if
- * mongrp_reparent() failed.
- */
- ret = kernfs_rename(kn, new_parent, new_name);
- if (!ret)
- mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask);
-
- free_cpumask_var(tmpmask);
-
-out:
- mutex_unlock(&rdtgroup_mutex);
- rdtgroup_kn_put(rdtgrp, kn);
- rdtgroup_kn_put(new_prdtgrp, new_parent);
- return ret;
-}
-
-static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
-{
- if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
- seq_puts(seq, ",cdp");
-
- if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
- seq_puts(seq, ",cdpl2");
-
- if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA)))
- seq_puts(seq, ",mba_MBps");
-
- if (resctrl_debug)
- seq_puts(seq, ",debug");
-
- return 0;
-}
-
-static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
- .mkdir = rdtgroup_mkdir,
- .rmdir = rdtgroup_rmdir,
- .rename = rdtgroup_rename,
- .show_options = rdtgroup_show_options,
-};
-
-static int rdtgroup_setup_root(struct rdt_fs_context *ctx)
-{
- rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
- KERNFS_ROOT_CREATE_DEACTIVATED |
- KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
- &rdtgroup_default);
- if (IS_ERR(rdt_root))
- return PTR_ERR(rdt_root);
-
- ctx->kfc.root = rdt_root;
- rdtgroup_default.kn = kernfs_root_to_node(rdt_root);
-
- return 0;
-}
-
-static void rdtgroup_destroy_root(void)
-{
- kernfs_destroy_root(rdt_root);
- rdtgroup_default.kn = NULL;
-}
-
-static void __init rdtgroup_setup_default(void)
-{
- mutex_lock(&rdtgroup_mutex);
-
- rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID;
- rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID;
- rdtgroup_default.type = RDTCTRL_GROUP;
- INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
-
- list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
-
- mutex_unlock(&rdtgroup_mutex);
-}
-
-static void domain_destroy_mon_state(struct rdt_mon_domain *d)
-{
- bitmap_free(d->rmid_busy_llc);
- kfree(d->mbm_total);
- kfree(d->mbm_local);
-}
-
-void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d)
-{
- mutex_lock(&rdtgroup_mutex);
-
- if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA)
- mba_sc_domain_destroy(r, d);
-
- mutex_unlock(&rdtgroup_mutex);
-}
-
-void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d)
-{
- mutex_lock(&rdtgroup_mutex);
-
- /*
- * If resctrl is mounted, remove all the
- * per domain monitor data directories.
- */
- if (resctrl_mounted && resctrl_arch_mon_capable())
- rmdir_mondata_subdir_allrdtgrp(r, d);
-
- if (resctrl_is_mbm_enabled())
- cancel_delayed_work(&d->mbm_over);
- if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) {
- /*
- * When a package is going down, forcefully
- * decrement rmid->ebusy. There is no way to know
- * that the L3 was flushed and hence may lead to
- * incorrect counts in rare scenarios, but leaving
- * the RMID as busy creates RMID leaks if the
- * package never comes back.
- */
- __check_limbo(d, true);
- cancel_delayed_work(&d->cqm_limbo);
- }
-
- domain_destroy_mon_state(d);
-
- mutex_unlock(&rdtgroup_mutex);
-}
-
-/**
- * domain_setup_mon_state() - Initialise domain monitoring structures.
- * @r: The resource for the newly online domain.
- * @d: The newly online domain.
- *
- * Allocate monitor resources that belong to this domain.
- * Called when the first CPU of a domain comes online, regardless of whether
- * the filesystem is mounted.
- * During boot this may be called before global allocations have been made by
- * resctrl_mon_resource_init().
- *
- * Returns 0 for success, or -ENOMEM.
- */
-static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_mon_domain *d)
-{
- u32 idx_limit = resctrl_arch_system_num_rmid_idx();
- size_t tsize;
-
- if (resctrl_arch_is_llc_occupancy_enabled()) {
- d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL);
- if (!d->rmid_busy_llc)
- return -ENOMEM;
- }
- if (resctrl_arch_is_mbm_total_enabled()) {
- tsize = sizeof(*d->mbm_total);
- d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL);
- if (!d->mbm_total) {
- bitmap_free(d->rmid_busy_llc);
- return -ENOMEM;
- }
- }
- if (resctrl_arch_is_mbm_local_enabled()) {
- tsize = sizeof(*d->mbm_local);
- d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL);
- if (!d->mbm_local) {
- bitmap_free(d->rmid_busy_llc);
- kfree(d->mbm_total);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d)
-{
- int err = 0;
-
- mutex_lock(&rdtgroup_mutex);
-
- if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) {
- /* RDT_RESOURCE_MBA is never mon_capable */
- err = mba_sc_domain_allocate(r, d);
- }
-
- mutex_unlock(&rdtgroup_mutex);
-
- return err;
-}
-
-int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d)
-{
- int err;
-
- mutex_lock(&rdtgroup_mutex);
-
- err = domain_setup_mon_state(r, d);
- if (err)
- goto out_unlock;
-
- if (resctrl_is_mbm_enabled()) {
- INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
- mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL,
- RESCTRL_PICK_ANY_CPU);
- }
-
- if (resctrl_arch_is_llc_occupancy_enabled())
- INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
-
- /*
- * If the filesystem is not mounted then only the default resource group
- * exists. Creation of its directories is deferred until mount time
- * by rdt_get_tree() calling mkdir_mondata_all().
- * If resctrl is mounted, add per domain monitor data directories.
- */
- if (resctrl_mounted && resctrl_arch_mon_capable())
- mkdir_mondata_subdir_allrdtgrp(r, d);
-
-out_unlock:
- mutex_unlock(&rdtgroup_mutex);
-
- return err;
-}
-
-void resctrl_online_cpu(unsigned int cpu)
-{
- mutex_lock(&rdtgroup_mutex);
- /* The CPU is set in default rdtgroup after online. */
- cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
- mutex_unlock(&rdtgroup_mutex);
-}
-
-static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
-{
- struct rdtgroup *cr;
-
- list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
- if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask))
- break;
- }
-}
-
-static struct rdt_mon_domain *get_mon_domain_from_cpu(int cpu,
- struct rdt_resource *r)
-{
- struct rdt_mon_domain *d;
-
- lockdep_assert_cpus_held();
-
- list_for_each_entry(d, &r->mon_domains, hdr.list) {
- /* Find the domain that contains this CPU */
- if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
- return d;
- }
-
- return NULL;
-}
-
-void resctrl_offline_cpu(unsigned int cpu)
-{
- struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3);
- struct rdt_mon_domain *d;
- struct rdtgroup *rdtgrp;
-
- mutex_lock(&rdtgroup_mutex);
- list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
- if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
- clear_childcpus(rdtgrp, cpu);
- break;
- }
- }
-
- if (!l3->mon_capable)
- goto out_unlock;
-
- d = get_mon_domain_from_cpu(cpu, l3);
- if (d) {
- if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) {
- cancel_delayed_work(&d->mbm_over);
- mbm_setup_overflow_handler(d, 0, cpu);
- }
- if (resctrl_arch_is_llc_occupancy_enabled() &&
- cpu == d->cqm_work_cpu && has_busy_rmid(d)) {
- cancel_delayed_work(&d->cqm_limbo);
- cqm_setup_limbo_handler(d, 0, cpu);
- }
- }
-
-out_unlock:
- mutex_unlock(&rdtgroup_mutex);
-}
-
-/*
- * resctrl_init - resctrl filesystem initialization
- *
- * Setup resctrl file system including set up root, create mount point,
- * register resctrl filesystem, and initialize files under root directory.
- *
- * Return: 0 on success or -errno
- */
-int __init resctrl_init(void)
-{
- int ret = 0;
-
- seq_buf_init(&last_cmd_status, last_cmd_status_buf,
- sizeof(last_cmd_status_buf));
-
- rdtgroup_setup_default();
-
- thread_throttle_mode_init();
-
- ret = resctrl_mon_resource_init();
- if (ret)
- return ret;
-
- ret = sysfs_create_mount_point(fs_kobj, "resctrl");
- if (ret) {
- resctrl_mon_resource_exit();
- return ret;
- }
-
- ret = register_filesystem(&rdt_fs_type);
- if (ret)
- goto cleanup_mountpoint;
-
- /*
- * Adding the resctrl debugfs directory here may not be ideal since
- * it would let the resctrl debugfs directory appear on the debugfs
- * filesystem before the resctrl filesystem is mounted.
- * It may also be ok since that would enable debugging of RDT before
- * resctrl is mounted.
- * The reason why the debugfs directory is created here and not in
- * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and
- * during the debugfs directory creation also &sb->s_type->i_mutex_key
- * (the lockdep class of inode->i_rwsem). Other filesystem
- * interactions (eg. SyS_getdents) have the lock ordering:
- * &sb->s_type->i_mutex_key --> &mm->mmap_lock
- * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex
- * is taken, thus creating dependency:
- * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause
- * issues considering the other two lock dependencies.
- * By creating the debugfs directory here we avoid a dependency
- * that may cause deadlock (even though file operations cannot
- * occur until the filesystem is mounted, but I do not know how to
- * tell lockdep that).
- */
- debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
-
- return 0;
-
-cleanup_mountpoint:
- sysfs_remove_mount_point(fs_kobj, "resctrl");
- resctrl_mon_resource_exit();
-
- return ret;
-}
-
-void __exit resctrl_exit(void)
-{
- debugfs_remove_recursive(debugfs_resctrl);
- unregister_filesystem(&rdt_fs_type);
- sysfs_remove_mount_point(fs_kobj, "resctrl");
-
- resctrl_mon_resource_exit();
-}
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 16f3ca30626a..dbf6d71bdf18 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -27,6 +27,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_INTEL_PPIN, CPUID_EBX, 0, 0x00000007, 1 },
+ { X86_FEATURE_APX, CPUID_EDX, 21, 0x00000007, 1 },
{ X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
{ X86_FEATURE_BHI_CTRL, CPUID_EDX, 4, 0x00000007, 2 },
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
@@ -53,7 +54,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
- { X86_FEATURE_AMD_HETEROGENEOUS_CORES, CPUID_EAX, 30, 0x80000026, 0 },
+ { X86_FEATURE_AMD_HTR_CORES, CPUID_EAX, 30, 0x80000026, 0 },
{ 0, 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/cpu/sgx/driver.h b/arch/x86/kernel/cpu/sgx/driver.h
index 4eddb4d571ef..30f39f92c98f 100644
--- a/arch/x86/kernel/cpu/sgx/driver.h
+++ b/arch/x86/kernel/cpu/sgx/driver.h
@@ -2,7 +2,6 @@
#ifndef __ARCH_SGX_DRIVER_H__
#define __ARCH_SGX_DRIVER_H__
-#include <crypto/hash.h>
#include <linux/kref.h>
#include <linux/mmu_notifier.h>
#include <linux/radix-tree.h>
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index 776a20172867..66f1efa16fbb 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -3,6 +3,7 @@
#include <asm/mman.h>
#include <asm/sgx.h>
+#include <crypto/sha2.h>
#include <linux/mman.h>
#include <linux/delay.h>
#include <linux/file.h>
@@ -463,31 +464,6 @@ static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg)
return ret;
}
-static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,
- void *hash)
-{
- SHASH_DESC_ON_STACK(shash, tfm);
-
- shash->tfm = tfm;
-
- return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
-}
-
-static int sgx_get_key_hash(const void *modulus, void *hash)
-{
- struct crypto_shash *tfm;
- int ret;
-
- tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- ret = __sgx_get_key_hash(tfm, modulus, hash);
-
- crypto_free_shash(tfm);
- return ret;
-}
-
static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
void *token)
{
@@ -523,9 +499,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
sgx_xfrm_reserved_mask)
return -EINVAL;
- ret = sgx_get_key_hash(sigstruct->modulus, mrsigner);
- if (ret)
- return ret;
+ sha256(sigstruct->modulus, SGX_MODULUS_SIZE, (u8 *)mrsigner);
mutex_lock(&encl->lock);
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 8ce352fc72ac..2de01b379aa3 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/vmalloc.h>
+#include <asm/msr.h>
#include <asm/sgx.h>
#include "driver.h"
#include "encl.h"
@@ -719,6 +720,8 @@ int arch_memory_failure(unsigned long pfn, int flags)
goto out;
}
+ sgx_unmark_page_reclaimable(page);
+
/*
* TBD: Add additional plumbing to enable pre-emptive
* action for asynchronous poison notification. Until
@@ -871,7 +874,7 @@ void sgx_update_lepubkeyhash(u64 *lepubkeyhash)
WARN_ON_ONCE(preemptible());
for (i = 0; i < 4; i++)
- wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
+ wrmsrq(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
}
const struct file_operations sgx_provision_fops = {
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 01456236a6dd..e35ccdc84910 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -30,6 +30,7 @@
#include <asm/hypervisor.h>
#include <asm/io_apic.h>
#include <asm/mpspec.h>
+#include <asm/msr.h>
#include <asm/smp.h>
#include "cpu.h"
@@ -154,7 +155,7 @@ static __init bool check_for_real_bsp(u32 apic_id)
* kernel must rely on the firmware enumeration order.
*/
if (has_apic_base) {
- rdmsrl(MSR_IA32_APICBASE, msr);
+ rdmsrq(MSR_IA32_APICBASE, msr);
is_bsp = !!(msr & MSR_IA32_APICBASE_BSP);
}
diff --git a/arch/x86/kernel/cpu/topology_amd.c b/arch/x86/kernel/cpu/topology_amd.c
index 03b3c9c3a45e..843b1655ab45 100644
--- a/arch/x86/kernel/cpu/topology_amd.c
+++ b/arch/x86/kernel/cpu/topology_amd.c
@@ -3,6 +3,7 @@
#include <asm/apic.h>
#include <asm/memtype.h>
+#include <asm/msr.h>
#include <asm/processor.h>
#include "cpu.h"
@@ -133,7 +134,7 @@ static void parse_fam10h_node_id(struct topo_scan *tscan)
if (!boot_cpu_has(X86_FEATURE_NODEID_MSR))
return;
- rdmsrl(MSR_FAM10H_NODE_ID, nid.msr);
+ rdmsrq(MSR_FAM10H_NODE_ID, nid.msr);
store_node(tscan, nid.nodes_per_pkg + 1, nid.node_id);
tscan->c->topo.llc_id = nid.node_id;
}
@@ -160,7 +161,7 @@ static void topoext_fixup(struct topo_scan *tscan)
if (msr_set_bit(0xc0011005, 54) <= 0)
return;
- rdmsrl(0xc0011005, msrval);
+ rdmsrq(0xc0011005, msrval);
if (msrval & BIT_64(54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
@@ -182,7 +183,7 @@ static void parse_topology_amd(struct topo_scan *tscan)
if (cpu_feature_enabled(X86_FEATURE_TOPOEXT))
has_topoext = cpu_parse_topology_ext(tscan);
- if (cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES))
+ if (cpu_feature_enabled(X86_FEATURE_AMD_HTR_CORES))
tscan->c->topo.cpu_type = cpuid_ebx(0x80000026);
if (!has_topoext && !parse_8000_0008(tscan))
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
index b31ee4f1657a..49782724a943 100644
--- a/arch/x86/kernel/cpu/tsx.c
+++ b/arch/x86/kernel/cpu/tsx.c
@@ -12,6 +12,7 @@
#include <asm/cmdline.h>
#include <asm/cpu.h>
+#include <asm/msr.h>
#include "cpu.h"
@@ -24,7 +25,7 @@ static void tsx_disable(void)
{
u64 tsx;
- rdmsrl(MSR_IA32_TSX_CTRL, tsx);
+ rdmsrq(MSR_IA32_TSX_CTRL, tsx);
/* Force all transactions to immediately abort */
tsx |= TSX_CTRL_RTM_DISABLE;
@@ -37,14 +38,14 @@ static void tsx_disable(void)
*/
tsx |= TSX_CTRL_CPUID_CLEAR;
- wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+ wrmsrq(MSR_IA32_TSX_CTRL, tsx);
}
static void tsx_enable(void)
{
u64 tsx;
- rdmsrl(MSR_IA32_TSX_CTRL, tsx);
+ rdmsrq(MSR_IA32_TSX_CTRL, tsx);
/* Enable the RTM feature in the cpu */
tsx &= ~TSX_CTRL_RTM_DISABLE;
@@ -56,7 +57,7 @@ static void tsx_enable(void)
*/
tsx &= ~TSX_CTRL_CPUID_CLEAR;
- wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+ wrmsrq(MSR_IA32_TSX_CTRL, tsx);
}
static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
@@ -115,13 +116,13 @@ static void tsx_clear_cpuid(void)
*/
if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
- rdmsrl(MSR_TSX_FORCE_ABORT, msr);
+ rdmsrq(MSR_TSX_FORCE_ABORT, msr);
msr |= MSR_TFA_TSX_CPUID_CLEAR;
- wrmsrl(MSR_TSX_FORCE_ABORT, msr);
+ wrmsrq(MSR_TSX_FORCE_ABORT, msr);
} else if (cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL)) {
- rdmsrl(MSR_IA32_TSX_CTRL, msr);
+ rdmsrq(MSR_IA32_TSX_CTRL, msr);
msr |= TSX_CTRL_CPUID_CLEAR;
- wrmsrl(MSR_IA32_TSX_CTRL, msr);
+ wrmsrq(MSR_IA32_TSX_CTRL, msr);
}
}
@@ -146,11 +147,11 @@ static void tsx_dev_mode_disable(void)
!cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL))
return;
- rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
+ rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
if (mcu_opt_ctrl & RTM_ALLOW) {
mcu_opt_ctrl &= ~RTM_ALLOW;
- wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
+ wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
setup_force_cpu_cap(X86_FEATURE_RTM_ALWAYS_ABORT);
}
}
diff --git a/arch/x86/kernel/cpu/umwait.c b/arch/x86/kernel/cpu/umwait.c
index 2293efd6ffa6..933fcd7ff250 100644
--- a/arch/x86/kernel/cpu/umwait.c
+++ b/arch/x86/kernel/cpu/umwait.c
@@ -33,7 +33,7 @@ static DEFINE_MUTEX(umwait_lock);
static void umwait_update_control_msr(void * unused)
{
lockdep_assert_irqs_disabled();
- wrmsr(MSR_IA32_UMWAIT_CONTROL, READ_ONCE(umwait_control_cached), 0);
+ wrmsrq(MSR_IA32_UMWAIT_CONTROL, READ_ONCE(umwait_control_cached));
}
/*
@@ -71,7 +71,7 @@ static int umwait_cpu_offline(unsigned int cpu)
* the original control MSR value in umwait_init(). So there
* is no race condition here.
*/
- wrmsr(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached, 0);
+ wrmsrq(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached);
return 0;
}
@@ -214,7 +214,7 @@ static int __init umwait_init(void)
* changed. This is the only place where orig_umwait_control_cached
* is modified.
*/
- rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached);
+ rdmsrq(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached);
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online",
umwait_cpu_online, umwait_cpu_offline);
diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c
index 90eba7eb5335..89b1c8a70fe8 100644
--- a/arch/x86/kernel/cpu/zhaoxin.c
+++ b/arch/x86/kernel/cpu/zhaoxin.c
@@ -4,6 +4,7 @@
#include <asm/cpu.h>
#include <asm/cpufeature.h>
+#include <asm/msr.h>
#include "cpu.h"
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 0be61c45400c..bcb534688dfe 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -278,6 +278,7 @@ static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
unsigned long long mend)
{
unsigned long start, end;
+ int ret;
cmem->ranges[0].start = mstart;
cmem->ranges[0].end = mend;
@@ -286,22 +287,43 @@ static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
/* Exclude elf header region */
start = image->elf_load_addr;
end = start + image->elf_headers_sz - 1;
- return crash_exclude_mem_range(cmem, start, end);
+ ret = crash_exclude_mem_range(cmem, start, end);
+
+ if (ret)
+ return ret;
+
+ /* Exclude dm crypt keys region */
+ if (image->dm_crypt_keys_addr) {
+ start = image->dm_crypt_keys_addr;
+ end = start + image->dm_crypt_keys_sz - 1;
+ return crash_exclude_mem_range(cmem, start, end);
+ }
+
+ return ret;
}
/* Prepare memory map for crash dump kernel */
int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
{
+ unsigned int nr_ranges = 0;
int i, ret = 0;
unsigned long flags;
struct e820_entry ei;
struct crash_memmap_data cmd;
struct crash_mem *cmem;
- cmem = vzalloc(struct_size(cmem, ranges, 1));
+ /*
+ * Using random kexec_buf for passing dm crypt keys may cause a range
+ * split. So use two slots here.
+ */
+ nr_ranges = 2;
+ cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
if (!cmem)
return -ENOMEM;
+ cmem->max_nr_ranges = nr_ranges;
+ cmem->nr_ranges = 0;
+
memset(&cmd, 0, sizeof(struct crash_memmap_data));
cmd.params = params;
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index c6fefd4585f8..71ee20102a8a 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -23,8 +23,6 @@
#include <asm/stacktrace.h>
#include <asm/unwind.h>
-int panic_on_unrecovered_nmi;
-int panic_on_io_nmi;
static int die_counter;
static struct pt_regs exec_summary_regs;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 57120f0749cc..c3acbd26408b 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -753,22 +753,21 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len)
void __init e820__register_nosave_regions(unsigned long limit_pfn)
{
int i;
- unsigned long pfn = 0;
+ u64 last_addr = 0;
for (i = 0; i < e820_table->nr_entries; i++) {
struct e820_entry *entry = &e820_table->entries[i];
- if (pfn < PFN_UP(entry->addr))
- register_nosave_region(pfn, PFN_UP(entry->addr));
-
- pfn = PFN_DOWN(entry->addr + entry->size);
-
if (entry->type != E820_TYPE_RAM)
- register_nosave_region(PFN_UP(entry->addr), pfn);
+ continue;
- if (pfn >= limit_pfn)
- break;
+ if (last_addr < entry->addr)
+ register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr));
+
+ last_addr = entry->addr + entry->size;
}
+
+ register_nosave_region(PFN_DOWN(last_addr), limit_pfn);
}
#ifdef CONFIG_ACPI
@@ -1300,6 +1299,32 @@ void __init e820__memblock_setup(void)
memblock_add(entry->addr, entry->size);
}
+ /*
+ * At this point memblock is only allowed to allocate from memory
+ * below 1M (aka ISA_END_ADDRESS) up until direct map is completely set
+ * up in init_mem_mapping().
+ *
+ * KHO kernels are special and use only scratch memory for memblock
+ * allocations, but memory below 1M is ignored by kernel after early
+ * boot and cannot be naturally marked as scratch.
+ *
+ * To allow allocation of the real-mode trampoline and a few (if any)
+ * other very early allocations from below 1M forcibly mark the memory
+ * below 1M as scratch.
+ *
+ * After real mode trampoline is allocated, we clear that scratch
+ * marking.
+ */
+ memblock_mark_kho_scratch(0, SZ_1M);
+
+ /*
+ * 32-bit systems are limited to 4BG of memory even with HIGHMEM and
+ * to even less without it.
+ * Discard memory after max_pfn - the actual limit detected at runtime.
+ */
+ if (IS_ENABLED(CONFIG_X86_32))
+ memblock_remove(PFN_PHYS(max_pfn), -1);
+
/* Throw away partial pages: */
memblock_trim_memory(PAGE_SIZE);
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 611f27e3890c..cba75306e5b6 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/console.h>
#include <linux/kernel.h>
+#include <linux/kexec.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/screen_info.h>
@@ -144,6 +145,11 @@ static __init void early_serial_hw_init(unsigned divisor)
static_call(serial_out)(early_serial_base, DLL, divisor & 0xff);
static_call(serial_out)(early_serial_base, DLH, (divisor >> 8) & 0xff);
static_call(serial_out)(early_serial_base, LCR, c & ~DLAB);
+
+#if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_X86_64)
+ if (static_call_query(serial_in) == io_serial_in)
+ kexec_debug_8250_port = early_serial_base;
+#endif
}
#define DEFAULT_BAUD 9600
@@ -327,6 +333,9 @@ static __init void early_pci_serial_init(char *s)
/* WARNING! assuming the address is always in the first 4G */
early_serial_base =
(unsigned long)early_ioremap(bar0 & PCI_BASE_ADDRESS_MEM_MASK, 0x10);
+#if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_X86_64)
+ kexec_debug_8250_mmio32 = bar0 & PCI_BASE_ADDRESS_MEM_MASK;
+#endif
write_pci_config(bus, slot, func, PCI_COMMAND,
cmdreg|PCI_COMMAND_MEMORY);
}
@@ -389,10 +398,10 @@ static int __init setup_early_printk(char *buf)
keep = (strstr(buf, "keep") != NULL);
while (*buf != '\0') {
- if (!strncmp(buf, "mmio", 4)) {
- early_mmio_serial_init(buf + 4);
+ if (!strncmp(buf, "mmio32", 6)) {
+ buf += 6;
+ early_mmio_serial_init(buf);
early_console_register(&early_serial_console, keep);
- buf += 4;
}
if (!strncmp(buf, "serial", 6)) {
buf += 6;
@@ -407,9 +416,9 @@ static int __init setup_early_printk(char *buf)
}
#ifdef CONFIG_PCI
if (!strncmp(buf, "pciserial", 9)) {
- early_pci_serial_init(buf + 9);
+ buf += 9; /* Keep from match the above "pciserial" */
+ early_pci_serial_init(buf);
early_console_register(&early_serial_console, keep);
- buf += 9; /* Keep from match the above "serial" */
}
#endif
if (!strncmp(buf, "vga", 3) &&
diff --git a/arch/x86/kernel/fpu/context.h b/arch/x86/kernel/fpu/context.h
index f6d856bd50bc..10d0a720659c 100644
--- a/arch/x86/kernel/fpu/context.h
+++ b/arch/x86/kernel/fpu/context.h
@@ -53,7 +53,7 @@ static inline void fpregs_activate(struct fpu *fpu)
/* Internal helper for switch_fpu_return() and signal frame setup */
static inline void fpregs_restore_userregs(void)
{
- struct fpu *fpu = &current->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(current);
int cpu = smp_processor_id();
if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_USER_WORKER)))
@@ -67,7 +67,7 @@ static inline void fpregs_restore_userregs(void)
* If PKRU is enabled, then the PKRU value is already
* correct because it was either set in switch_to() or in
* flush_thread(). So it is excluded because it might be
- * not up to date in current->thread.fpu.xsave state.
+ * not up to date in current->thread.fpu->xsave state.
*
* XFD state is handled in restore_fpregs_from_fpstate().
*/
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 91d6341f281f..ea138583dd92 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -11,6 +11,7 @@
#include <asm/fpu/sched.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/types.h>
+#include <asm/msr.h>
#include <asm/traps.h>
#include <asm/irq_regs.h>
@@ -43,14 +44,27 @@ struct fpu_state_config fpu_user_cfg __ro_after_init;
*/
struct fpstate init_fpstate __ro_after_init;
-/* Track in-kernel FPU usage */
-static DEFINE_PER_CPU(bool, in_kernel_fpu);
+/*
+ * Track FPU initialization and kernel-mode usage. 'true' means the FPU is
+ * initialized and is not currently being used by the kernel:
+ */
+DEFINE_PER_CPU(bool, kernel_fpu_allowed);
/*
* Track which context is using the FPU on the CPU:
*/
DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
+#ifdef CONFIG_X86_DEBUG_FPU
+struct fpu *x86_task_fpu(struct task_struct *task)
+{
+ if (WARN_ON_ONCE(task->flags & PF_KTHREAD))
+ return NULL;
+
+ return (void *)task + sizeof(*task);
+}
+#endif
+
/*
* Can we use the FPU in kernel mode with the
* whole "kernel_fpu_begin/end()" sequence?
@@ -61,15 +75,18 @@ bool irq_fpu_usable(void)
return false;
/*
- * In kernel FPU usage already active? This detects any explicitly
- * nested usage in task or softirq context, which is unsupported. It
- * also detects attempted usage in a hardirq that has interrupted a
- * kernel-mode FPU section.
+ * Return false in the following cases:
+ *
+ * - FPU is not yet initialized. This can happen only when the call is
+ * coming from CPU onlining, for example for microcode checksumming.
+ * - The kernel is already using the FPU, either because of explicit
+ * nesting (which should never be done), or because of implicit
+ * nesting when a hardirq interrupted a kernel-mode FPU section.
+ *
+ * The single boolean check below handles both cases:
*/
- if (this_cpu_read(in_kernel_fpu)) {
- WARN_ON_FPU(!in_hardirq());
+ if (!this_cpu_read(kernel_fpu_allowed))
return false;
- }
/*
* When not in NMI or hard interrupt context, FPU can be used in:
@@ -202,7 +219,7 @@ void fpu_reset_from_exception_fixup(void)
#if IS_ENABLED(CONFIG_KVM)
static void __fpstate_reset(struct fpstate *fpstate, u64 xfd);
-static void fpu_init_guest_permissions(struct fpu_guest *gfpu)
+static void fpu_lock_guest_permissions(void)
{
struct fpu_state_perm *fpuperm;
u64 perm;
@@ -211,15 +228,13 @@ static void fpu_init_guest_permissions(struct fpu_guest *gfpu)
return;
spin_lock_irq(&current->sighand->siglock);
- fpuperm = &current->group_leader->thread.fpu.guest_perm;
+ fpuperm = &x86_task_fpu(current->group_leader)->guest_perm;
perm = fpuperm->__state_perm;
/* First fpstate allocation locks down permissions. */
WRITE_ONCE(fpuperm->__state_perm, perm | FPU_GUEST_PERM_LOCKED);
spin_unlock_irq(&current->sighand->siglock);
-
- gfpu->perm = perm & ~FPU_GUEST_PERM_LOCKED;
}
bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
@@ -240,7 +255,6 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
gfpu->fpstate = fpstate;
gfpu->xfeatures = fpu_kernel_cfg.default_features;
- gfpu->perm = fpu_kernel_cfg.default_features;
/*
* KVM sets the FP+SSE bits in the XSAVE header when copying FPU state
@@ -255,7 +269,7 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
if (WARN_ON_ONCE(fpu_user_cfg.default_size > gfpu->uabi_size))
gfpu->uabi_size = fpu_user_cfg.default_size;
- fpu_init_guest_permissions(gfpu);
+ fpu_lock_guest_permissions();
return true;
}
@@ -263,16 +277,16 @@ EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate);
void fpu_free_guest_fpstate(struct fpu_guest *gfpu)
{
- struct fpstate *fps = gfpu->fpstate;
+ struct fpstate *fpstate = gfpu->fpstate;
- if (!fps)
+ if (!fpstate)
return;
- if (WARN_ON_ONCE(!fps->is_valloc || !fps->is_guest || fps->in_use))
+ if (WARN_ON_ONCE(!fpstate->is_valloc || !fpstate->is_guest || fpstate->in_use))
return;
gfpu->fpstate = NULL;
- vfree(fps);
+ vfree(fpstate);
}
EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate);
@@ -323,12 +337,12 @@ EXPORT_SYMBOL_GPL(fpu_update_guest_xfd);
*/
void fpu_sync_guest_vmexit_xfd_state(void)
{
- struct fpstate *fps = current->thread.fpu.fpstate;
+ struct fpstate *fpstate = x86_task_fpu(current)->fpstate;
lockdep_assert_irqs_disabled();
if (fpu_state_size_dynamic()) {
- rdmsrl(MSR_IA32_XFD, fps->xfd);
- __this_cpu_write(xfd_state, fps->xfd);
+ rdmsrq(MSR_IA32_XFD, fpstate->xfd);
+ __this_cpu_write(xfd_state, fpstate->xfd);
}
}
EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state);
@@ -337,7 +351,7 @@ EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state);
int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
{
struct fpstate *guest_fps = guest_fpu->fpstate;
- struct fpu *fpu = &current->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(current);
struct fpstate *cur_fps = fpu->fpstate;
fpregs_lock();
@@ -431,14 +445,15 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask)
fpregs_lock();
WARN_ON_FPU(!irq_fpu_usable());
- WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
- this_cpu_write(in_kernel_fpu, true);
+ /* Toggle kernel_fpu_allowed to false: */
+ WARN_ON_FPU(!this_cpu_read(kernel_fpu_allowed));
+ this_cpu_write(kernel_fpu_allowed, false);
if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) &&
!test_thread_flag(TIF_NEED_FPU_LOAD)) {
set_thread_flag(TIF_NEED_FPU_LOAD);
- save_fpregs_to_fpstate(&current->thread.fpu);
+ save_fpregs_to_fpstate(x86_task_fpu(current));
}
__cpu_invalidate_fpregs_state();
@@ -453,9 +468,10 @@ EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
void kernel_fpu_end(void)
{
- WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
+ /* Toggle kernel_fpu_allowed back to true: */
+ WARN_ON_FPU(this_cpu_read(kernel_fpu_allowed));
+ this_cpu_write(kernel_fpu_allowed, true);
- this_cpu_write(in_kernel_fpu, false);
if (!irqs_disabled())
fpregs_unlock();
}
@@ -467,7 +483,7 @@ EXPORT_SYMBOL_GPL(kernel_fpu_end);
*/
void fpu_sync_fpstate(struct fpu *fpu)
{
- WARN_ON_FPU(fpu != &current->thread.fpu);
+ WARN_ON_FPU(fpu != x86_task_fpu(current));
fpregs_lock();
trace_x86_fpu_before_save(fpu);
@@ -552,7 +568,7 @@ void fpstate_reset(struct fpu *fpu)
static inline void fpu_inherit_perms(struct fpu *dst_fpu)
{
if (fpu_state_size_dynamic()) {
- struct fpu *src_fpu = &current->group_leader->thread.fpu;
+ struct fpu *src_fpu = x86_task_fpu(current->group_leader);
spin_lock_irq(&current->sighand->siglock);
/* Fork also inherits the permissions of the parent */
@@ -572,7 +588,7 @@ static int update_fpu_shstk(struct task_struct *dst, unsigned long ssp)
if (!ssp)
return 0;
- xstate = get_xsave_addr(&dst->thread.fpu.fpstate->regs.xsave,
+ xstate = get_xsave_addr(&x86_task_fpu(dst)->fpstate->regs.xsave,
XFEATURE_CET_USER);
/*
@@ -593,8 +609,16 @@ static int update_fpu_shstk(struct task_struct *dst, unsigned long ssp)
int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal,
unsigned long ssp)
{
- struct fpu *src_fpu = &current->thread.fpu;
- struct fpu *dst_fpu = &dst->thread.fpu;
+ /*
+ * We allocate the new FPU structure right after the end of the task struct.
+ * task allocation size already took this into account.
+ *
+ * This is safe because task_struct size is a multiple of cacheline size,
+ * thus x86_task_fpu() will always be cacheline aligned as well.
+ */
+ struct fpu *dst_fpu = (void *)dst + sizeof(*dst);
+
+ BUILD_BUG_ON(sizeof(*dst) % SMP_CACHE_BYTES != 0);
/* The new task's FPU state cannot be valid in the hardware. */
dst_fpu->last_cpu = -1;
@@ -657,19 +681,22 @@ int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal,
if (update_fpu_shstk(dst, ssp))
return 1;
- trace_x86_fpu_copy_src(src_fpu);
trace_x86_fpu_copy_dst(dst_fpu);
return 0;
}
/*
- * Whitelist the FPU register state embedded into task_struct for hardened
- * usercopy.
+ * While struct fpu is no longer part of struct thread_struct, it is still
+ * allocated after struct task_struct in the "task_struct" kmem cache. But
+ * since FPU is expected to be part of struct thread_struct, we have to
+ * adjust for it here.
*/
void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size)
{
- *offset = offsetof(struct thread_struct, fpu.__fpstate.regs);
+ /* The allocation follows struct task_struct. */
+ *offset = sizeof(struct task_struct) - offsetof(struct task_struct, thread);
+ *offset += offsetof(struct fpu, __fpstate.regs);
*size = fpu_kernel_cfg.default_size;
}
@@ -682,11 +709,18 @@ void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size)
* a state-restore is coming: either an explicit one,
* or a reschedule.
*/
-void fpu__drop(struct fpu *fpu)
+void fpu__drop(struct task_struct *tsk)
{
+ struct fpu *fpu;
+
+ if (test_tsk_thread_flag(tsk, TIF_NEED_FPU_LOAD))
+ return;
+
+ fpu = x86_task_fpu(tsk);
+
preempt_disable();
- if (fpu == &current->thread.fpu) {
+ if (fpu == x86_task_fpu(current)) {
/* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n"
"2:\n"
@@ -718,9 +752,9 @@ static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
/*
* Reset current->fpu memory state to the init values.
*/
-static void fpu_reset_fpregs(void)
+static void fpu_reset_fpstate_regs(void)
{
- struct fpu *fpu = &current->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(current);
fpregs_lock();
__fpu_invalidate_fpregs_state(fpu);
@@ -749,11 +783,11 @@ static void fpu_reset_fpregs(void)
*/
void fpu__clear_user_states(struct fpu *fpu)
{
- WARN_ON_FPU(fpu != &current->thread.fpu);
+ WARN_ON_FPU(fpu != x86_task_fpu(current));
fpregs_lock();
if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
- fpu_reset_fpregs();
+ fpu_reset_fpstate_regs();
fpregs_unlock();
return;
}
@@ -782,8 +816,8 @@ void fpu__clear_user_states(struct fpu *fpu)
void fpu_flush_thread(void)
{
- fpstate_reset(&current->thread.fpu);
- fpu_reset_fpregs();
+ fpstate_reset(x86_task_fpu(current));
+ fpu_reset_fpstate_regs();
}
/*
* Load FPU context before returning to userspace.
@@ -823,7 +857,7 @@ void fpregs_lock_and_load(void)
*/
void fpregs_assert_state_consistent(void)
{
- struct fpu *fpu = &current->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(current);
if (test_thread_flag(TIF_NEED_FPU_LOAD))
return;
@@ -835,7 +869,7 @@ EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
void fpregs_mark_activate(void)
{
- struct fpu *fpu = &current->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(current);
fpregs_activate(fpu);
fpu->last_cpu = smp_processor_id();
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 998a08f17e33..99db41bf9fa6 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -38,7 +38,7 @@ static void fpu__init_cpu_generic(void)
/* Flush out any pending x87 state: */
#ifdef CONFIG_MATH_EMULATION
if (!boot_cpu_has(X86_FEATURE_FPU))
- fpstate_init_soft(&current->thread.fpu.fpstate->regs.soft);
+ ;
else
#endif
asm volatile ("fninit");
@@ -51,6 +51,9 @@ void fpu__init_cpu(void)
{
fpu__init_cpu_generic();
fpu__init_cpu_xstate();
+
+ /* Start allowing kernel-mode FPU: */
+ this_cpu_write(kernel_fpu_allowed, true);
}
static bool __init fpu__probe_without_cpuid(void)
@@ -73,6 +76,8 @@ static bool __init fpu__probe_without_cpuid(void)
static void __init fpu__init_system_early_generic(void)
{
+ set_thread_flag(TIF_NEED_FPU_LOAD);
+
if (!boot_cpu_has(X86_FEATURE_CPUID) &&
!test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
if (fpu__probe_without_cpuid())
@@ -94,7 +99,6 @@ static void __init fpu__init_system_early_generic(void)
* Boot time FPU feature detection code:
*/
unsigned int mxcsr_feature_mask __ro_after_init = 0xffffffffu;
-EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
static void __init fpu__init_system_mxcsr(void)
{
@@ -150,11 +154,13 @@ static void __init fpu__init_task_struct_size(void)
{
int task_size = sizeof(struct task_struct);
+ task_size += sizeof(struct fpu);
+
/*
* Subtract off the static size of the register state.
* It potentially has a bunch of padding.
*/
- task_size -= sizeof(current->thread.fpu.__fpstate.regs);
+ task_size -= sizeof(union fpregs_state);
/*
* Add back the dynamically-calculated register state
@@ -164,14 +170,9 @@ static void __init fpu__init_task_struct_size(void)
/*
* We dynamically size 'struct fpu', so we require that
- * it be at the end of 'thread_struct' and that
- * 'thread_struct' be at the end of 'task_struct'. If
- * you hit a compile error here, check the structure to
- * see if something got added to the end.
+ * 'state' be at the end of 'it:
*/
CHECK_MEMBER_AT_END_OF(struct fpu, __fpstate);
- CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
- CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
arch_task_struct_size = task_size;
}
@@ -204,7 +205,6 @@ static void __init fpu__init_system_xstate_size_legacy(void)
fpu_kernel_cfg.default_size = size;
fpu_user_cfg.max_size = size;
fpu_user_cfg.default_size = size;
- fpstate_reset(&current->thread.fpu);
}
/*
@@ -213,7 +213,6 @@ static void __init fpu__init_system_xstate_size_legacy(void)
*/
void __init fpu__init_system(void)
{
- fpstate_reset(&current->thread.fpu);
fpu__init_system_early_generic();
/*
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index 887b0b8e21e3..0986c2200adc 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -45,7 +45,7 @@ int regset_xregset_fpregs_active(struct task_struct *target, const struct user_r
*/
static void sync_fpstate(struct fpu *fpu)
{
- if (fpu == &current->thread.fpu)
+ if (fpu == x86_task_fpu(current))
fpu_sync_fpstate(fpu);
}
@@ -63,7 +63,7 @@ static void fpu_force_restore(struct fpu *fpu)
* Only stopped child tasks can be used to modify the FPU
* state in the fpstate buffer:
*/
- WARN_ON_FPU(fpu == &current->thread.fpu);
+ WARN_ON_FPU(fpu == x86_task_fpu(current));
__fpu_invalidate_fpregs_state(fpu);
}
@@ -71,7 +71,7 @@ static void fpu_force_restore(struct fpu *fpu)
int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
- struct fpu *fpu = &target->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(target);
if (!cpu_feature_enabled(X86_FEATURE_FXSR))
return -ENODEV;
@@ -91,7 +91,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct fpu *fpu = &target->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(target);
struct fxregs_state newstate;
int ret;
@@ -133,7 +133,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
if (!cpu_feature_enabled(X86_FEATURE_XSAVE))
return -ENODEV;
- sync_fpstate(&target->thread.fpu);
+ sync_fpstate(x86_task_fpu(target));
copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_XSAVE);
return 0;
@@ -143,7 +143,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct fpu *fpu = &target->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(target);
struct xregs_state *tmpbuf = NULL;
int ret;
@@ -187,7 +187,7 @@ int ssp_active(struct task_struct *target, const struct user_regset *regset)
int ssp_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
- struct fpu *fpu = &target->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(target);
struct cet_user_state *cetregs;
if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) ||
@@ -214,7 +214,7 @@ int ssp_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct fpu *fpu = &target->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(target);
struct xregs_state *xsave = &fpu->fpstate->regs.xsave;
struct cet_user_state *cetregs;
unsigned long user_ssp;
@@ -368,7 +368,7 @@ static void __convert_from_fxsr(struct user_i387_ia32_struct *env,
void
convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
{
- __convert_from_fxsr(env, tsk, &tsk->thread.fpu.fpstate->regs.fxsave);
+ __convert_from_fxsr(env, tsk, &x86_task_fpu(tsk)->fpstate->regs.fxsave);
}
void convert_to_fxsr(struct fxregs_state *fxsave,
@@ -401,7 +401,7 @@ void convert_to_fxsr(struct fxregs_state *fxsave,
int fpregs_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
- struct fpu *fpu = &target->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(target);
struct user_i387_ia32_struct env;
struct fxregs_state fxsave, *fx;
@@ -433,7 +433,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct fpu *fpu = &target->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(target);
struct user_i387_ia32_struct env;
int ret;
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 6c69cb28b298..c3ec2512f2bb 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -43,13 +43,13 @@ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf,
* fpstate layout with out copying the extended state information
* in the memory layout.
*/
- if (__get_user(magic2, (__u32 __user *)(fpstate + current->thread.fpu.fpstate->user_size)))
+ if (__get_user(magic2, (__u32 __user *)(fpstate + x86_task_fpu(current)->fpstate->user_size)))
return false;
if (likely(magic2 == FP_XSTATE_MAGIC2))
return true;
setfx:
- trace_x86_fpu_xstate_check_failed(&current->thread.fpu);
+ trace_x86_fpu_xstate_check_failed(x86_task_fpu(current));
/* Set the parameters for fx only state */
fx_sw->magic1 = 0;
@@ -64,13 +64,13 @@ setfx:
static inline bool save_fsave_header(struct task_struct *tsk, void __user *buf)
{
if (use_fxsr()) {
- struct xregs_state *xsave = &tsk->thread.fpu.fpstate->regs.xsave;
+ struct xregs_state *xsave = &x86_task_fpu(tsk)->fpstate->regs.xsave;
struct user_i387_ia32_struct env;
struct _fpstate_32 __user *fp = buf;
fpregs_lock();
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
- fxsave(&tsk->thread.fpu.fpstate->regs.fxsave);
+ fxsave(&x86_task_fpu(tsk)->fpstate->regs.fxsave);
fpregs_unlock();
convert_from_fxsr(&env, tsk);
@@ -114,7 +114,6 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
{
struct xregs_state __user *x = buf;
struct _fpx_sw_bytes sw_bytes = {};
- u32 xfeatures;
int err;
/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
@@ -128,12 +127,6 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
(__u32 __user *)(buf + fpstate->user_size));
/*
- * Read the xfeatures which we copied (directly from the cpu or
- * from the state in task struct) to the user buffers.
- */
- err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
-
- /*
* For legacy compatible, we always set FP/SSE bits in the bit
* vector while saving the state to the user context. This will
* enable us capturing any changes(during sigreturn) to
@@ -144,9 +137,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
* header as well as change any contents in the memory layout.
* xrestore as part of sigreturn will capture all the changes.
*/
- xfeatures |= XFEATURE_MASK_FPSSE;
-
- err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
+ err |= set_xfeature_in_sigframe(x, XFEATURE_MASK_FPSSE);
return !err;
}
@@ -184,7 +175,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pk
bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size, u32 pkru)
{
struct task_struct *tsk = current;
- struct fpstate *fpstate = tsk->thread.fpu.fpstate;
+ struct fpstate *fpstate = x86_task_fpu(tsk)->fpstate;
bool ia32_fxstate = (buf != buf_fx);
int ret;
@@ -272,7 +263,7 @@ static int __restore_fpregs_from_user(void __user *buf, u64 ufeatures,
*/
static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only)
{
- struct fpu *fpu = &current->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(current);
int ret;
/* Restore enabled features only. */
@@ -332,7 +323,7 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
bool ia32_fxstate)
{
struct task_struct *tsk = current;
- struct fpu *fpu = &tsk->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(tsk);
struct user_i387_ia32_struct env;
bool success, fx_only = false;
union fpregs_state *fpregs;
@@ -452,7 +443,7 @@ static inline unsigned int xstate_sigframe_size(struct fpstate *fpstate)
*/
bool fpu__restore_sig(void __user *buf, int ia32_frame)
{
- struct fpu *fpu = &current->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(current);
void __user *buf_fx = buf;
bool ia32_fxstate = false;
bool success = false;
@@ -499,7 +490,7 @@ unsigned long
fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
unsigned long *buf_fx, unsigned long *size)
{
- unsigned long frame_size = xstate_sigframe_size(current->thread.fpu.fpstate);
+ unsigned long frame_size = xstate_sigframe_size(x86_task_fpu(current)->fpstate);
*buf_fx = sp = round_down(sp - frame_size, 64);
if (ia32_frame && use_fxsr()) {
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 6a41d1610d8b..9aa9ac8399ae 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -14,13 +14,15 @@
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/coredump.h>
+#include <linux/sort.h>
#include <asm/fpu/api.h>
#include <asm/fpu/regset.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/xcr.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
+#include <asm/msr.h>
#include <asm/tlbflush.h>
#include <asm/prctl.h>
#include <asm/elf.h>
@@ -62,6 +64,7 @@ static const char *xfeature_names[] =
"unknown xstate feature",
"AMX Tile config",
"AMX Tile data",
+ "APX registers",
"unknown xstate feature",
};
@@ -80,6 +83,7 @@ static unsigned short xsave_cpuid_features[] __initdata = {
[XFEATURE_CET_USER] = X86_FEATURE_SHSTK,
[XFEATURE_XTILE_CFG] = X86_FEATURE_AMX_TILE,
[XFEATURE_XTILE_DATA] = X86_FEATURE_AMX_TILE,
+ [XFEATURE_APX] = X86_FEATURE_APX,
};
static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init =
@@ -88,6 +92,31 @@ static unsigned int xstate_sizes[XFEATURE_MAX] __ro_after_init =
{ [ 0 ... XFEATURE_MAX - 1] = -1};
static unsigned int xstate_flags[XFEATURE_MAX] __ro_after_init;
+/*
+ * Ordering of xstate components in uncompacted format: The xfeature
+ * number does not necessarily indicate its position in the XSAVE buffer.
+ * This array defines the traversal order of xstate features.
+ */
+static unsigned int xfeature_uncompact_order[XFEATURE_MAX] __ro_after_init =
+ { [ 0 ... XFEATURE_MAX - 1] = -1};
+
+static inline unsigned int next_xfeature_order(unsigned int i, u64 mask)
+{
+ for (; xfeature_uncompact_order[i] != -1; i++) {
+ if (mask & BIT_ULL(xfeature_uncompact_order[i]))
+ break;
+ }
+
+ return i;
+}
+
+/* Iterate xstate features in uncompacted order: */
+#define for_each_extended_xfeature_in_order(i, mask) \
+ for (i = 0; \
+ i = next_xfeature_order(i, mask), \
+ xfeature_uncompact_order[i] != -1; \
+ i++)
+
#define XSTATE_FLAG_SUPERVISOR BIT(0)
#define XSTATE_FLAG_ALIGNED64 BIT(1)
@@ -199,7 +228,7 @@ void fpu__init_cpu_xstate(void)
* MSR_IA32_XSS sets supervisor states managed by XSAVES.
*/
if (boot_cpu_has(X86_FEATURE_XSAVES)) {
- wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
+ wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor() |
xfeatures_mask_independent());
}
}
@@ -209,16 +238,20 @@ static bool xfeature_enabled(enum xfeature xfeature)
return fpu_kernel_cfg.max_features & BIT_ULL(xfeature);
}
+static int compare_xstate_offsets(const void *xfeature1, const void *xfeature2)
+{
+ return xstate_offsets[*(unsigned int *)xfeature1] -
+ xstate_offsets[*(unsigned int *)xfeature2];
+}
+
/*
* Record the offsets and sizes of various xstates contained
- * in the XSAVE state memory layout.
+ * in the XSAVE state memory layout. Also, create an ordered
+ * list of xfeatures for handling out-of-order offsets.
*/
static void __init setup_xstate_cache(void)
{
- u32 eax, ebx, ecx, edx, i;
- /* start at the beginning of the "extended state" */
- unsigned int last_good_offset = offsetof(struct xregs_state,
- extended_state_area);
+ u32 eax, ebx, ecx, edx, xfeature, i = 0;
/*
* The FP xstates and SSE xstates are legacy states. They are always
* in the fixed offsets in the xsave area in either compacted form
@@ -232,31 +265,30 @@ static void __init setup_xstate_cache(void)
xstate_sizes[XFEATURE_SSE] = sizeof_field(struct fxregs_state,
xmm_space);
- for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
- cpuid_count(CPUID_LEAF_XSTATE, i, &eax, &ebx, &ecx, &edx);
+ for_each_extended_xfeature(xfeature, fpu_kernel_cfg.max_features) {
+ cpuid_count(CPUID_LEAF_XSTATE, xfeature, &eax, &ebx, &ecx, &edx);
- xstate_sizes[i] = eax;
- xstate_flags[i] = ecx;
+ xstate_sizes[xfeature] = eax;
+ xstate_flags[xfeature] = ecx;
/*
* If an xfeature is supervisor state, the offset in EBX is
* invalid, leave it to -1.
*/
- if (xfeature_is_supervisor(i))
+ if (xfeature_is_supervisor(xfeature))
continue;
- xstate_offsets[i] = ebx;
+ xstate_offsets[xfeature] = ebx;
- /*
- * In our xstate size checks, we assume that the highest-numbered
- * xstate feature has the highest offset in the buffer. Ensure
- * it does.
- */
- WARN_ONCE(last_good_offset > xstate_offsets[i],
- "x86/fpu: misordered xstate at %d\n", last_good_offset);
-
- last_good_offset = xstate_offsets[i];
+ /* Populate the list of xfeatures before sorting */
+ xfeature_uncompact_order[i++] = xfeature;
}
+
+ /*
+ * Sort xfeatures by their offsets to support out-of-order
+ * offsets in the uncompacted format.
+ */
+ sort(xfeature_uncompact_order, i, sizeof(unsigned int), compare_xstate_offsets, NULL);
}
/*
@@ -340,7 +372,8 @@ static __init void os_xrstor_booting(struct xregs_state *xstate)
XFEATURE_MASK_BNDCSR | \
XFEATURE_MASK_PASID | \
XFEATURE_MASK_CET_USER | \
- XFEATURE_MASK_XTILE)
+ XFEATURE_MASK_XTILE | \
+ XFEATURE_MASK_APX)
/*
* setup the xstate image representing the init state
@@ -540,6 +573,7 @@ static bool __init check_xstate_against_struct(int nr)
case XFEATURE_PASID: return XCHECK_SZ(sz, nr, struct ia32_pasid_state);
case XFEATURE_XTILE_CFG: return XCHECK_SZ(sz, nr, struct xtile_cfg);
case XFEATURE_CET_USER: return XCHECK_SZ(sz, nr, struct cet_user_state);
+ case XFEATURE_APX: return XCHECK_SZ(sz, nr, struct apx_state);
case XFEATURE_XTILE_DATA: check_xtile_data_against_struct(sz); return true;
default:
XSTATE_WARN_ON(1, "No structure for xstate: %d\n", nr);
@@ -552,13 +586,20 @@ static bool __init check_xstate_against_struct(int nr)
static unsigned int xstate_calculate_size(u64 xfeatures, bool compacted)
{
unsigned int topmost = fls64(xfeatures) - 1;
- unsigned int offset = xstate_offsets[topmost];
+ unsigned int offset, i;
if (topmost <= XFEATURE_SSE)
return sizeof(struct xregs_state);
- if (compacted)
+ if (compacted) {
offset = xfeature_get_offset(xfeatures, topmost);
+ } else {
+ /* Walk through the xfeature order to pick the last */
+ for_each_extended_xfeature_in_order(i, xfeatures)
+ topmost = xfeature_uncompact_order[i];
+ offset = xstate_offsets[topmost];
+ }
+
return offset + xstate_sizes[topmost];
}
@@ -639,7 +680,7 @@ static unsigned int __init get_xsave_compacted_size(void)
return get_compacted_size();
/* Disable independent features. */
- wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
+ wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor());
/*
* Ask the hardware what size is required of the buffer.
@@ -648,7 +689,7 @@ static unsigned int __init get_xsave_compacted_size(void)
size = get_compacted_size();
/* Re-enable independent features so XSAVES will work on them again. */
- wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
+ wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
return size;
}
@@ -711,6 +752,8 @@ static int __init init_xstate_size(void)
*/
static void __init fpu__init_disable_system_xstate(unsigned int legacy_size)
{
+ pr_info("x86/fpu: XSAVE disabled\n");
+
fpu_kernel_cfg.max_features = 0;
cr4_clear_bits(X86_CR4_OSXSAVE);
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
@@ -727,7 +770,7 @@ static void __init fpu__init_disable_system_xstate(unsigned int legacy_size)
*/
init_fpstate.xfd = 0;
- fpstate_reset(&current->thread.fpu);
+ fpstate_reset(x86_task_fpu(current));
}
/*
@@ -775,6 +818,17 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
goto out_disable;
}
+ if (fpu_kernel_cfg.max_features & XFEATURE_MASK_APX &&
+ fpu_kernel_cfg.max_features & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) {
+ /*
+ * This is a problematic CPU configuration where two
+ * conflicting state components are both enumerated.
+ */
+ pr_err("x86/fpu: Both APX/MPX present in the CPU's xstate features: 0x%llx.\n",
+ fpu_kernel_cfg.max_features);
+ goto out_disable;
+ }
+
fpu_kernel_cfg.independent_features = fpu_kernel_cfg.max_features &
XFEATURE_MASK_INDEPENDENT;
@@ -834,9 +888,6 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
if (err)
goto out_disable;
- /* Reset the state for the current task */
- fpstate_reset(&current->thread.fpu);
-
/*
* Update info used for ptrace frames; use standard-format size and no
* supervisor xstates:
@@ -852,7 +903,7 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
init_fpstate.xfeatures = fpu_kernel_cfg.default_features;
if (init_fpstate.size > sizeof(init_fpstate.regs)) {
- pr_warn("x86/fpu: init_fpstate buffer too small (%zu < %d), disabling XSAVE\n",
+ pr_warn("x86/fpu: init_fpstate buffer too small (%zu < %d)\n",
sizeof(init_fpstate.regs), init_fpstate.size);
goto out_disable;
}
@@ -864,7 +915,7 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
* xfeatures mask.
*/
if (xfeatures != fpu_kernel_cfg.max_features) {
- pr_err("x86/fpu: xfeatures modified from 0x%016llx to 0x%016llx during init, disabling XSAVE\n",
+ pr_err("x86/fpu: xfeatures modified from 0x%016llx to 0x%016llx during init\n",
xfeatures, fpu_kernel_cfg.max_features);
goto out_disable;
}
@@ -904,12 +955,12 @@ void fpu__resume_cpu(void)
* of XSAVES and MSR_IA32_XSS.
*/
if (cpu_feature_enabled(X86_FEATURE_XSAVES)) {
- wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
+ wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor() |
xfeatures_mask_independent());
}
if (fpu_state_size_dynamic())
- wrmsrl(MSR_IA32_XFD, current->thread.fpu.fpstate->xfd);
+ wrmsrq(MSR_IA32_XFD, x86_task_fpu(current)->fpstate->xfd);
}
/*
@@ -1071,10 +1122,9 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
struct xregs_state *xinit = &init_fpstate.regs.xsave;
struct xregs_state *xsave = &fpstate->regs.xsave;
+ unsigned int zerofrom, i, xfeature;
struct xstate_header header;
- unsigned int zerofrom;
u64 mask;
- int i;
memset(&header, 0, sizeof(header));
header.xfeatures = xsave->header.xfeatures;
@@ -1143,15 +1193,16 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
*/
mask = header.xfeatures;
- for_each_extended_xfeature(i, mask) {
+ for_each_extended_xfeature_in_order(i, mask) {
+ xfeature = xfeature_uncompact_order[i];
/*
* If there was a feature or alignment gap, zero the space
* in the destination buffer.
*/
- if (zerofrom < xstate_offsets[i])
- membuf_zero(&to, xstate_offsets[i] - zerofrom);
+ if (zerofrom < xstate_offsets[xfeature])
+ membuf_zero(&to, xstate_offsets[xfeature] - zerofrom);
- if (i == XFEATURE_PKRU) {
+ if (xfeature == XFEATURE_PKRU) {
struct pkru_state pkru = {0};
/*
* PKRU is not necessarily up to date in the
@@ -1161,14 +1212,14 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
membuf_write(&to, &pkru, sizeof(pkru));
} else {
membuf_write(&to,
- __raw_xsave_addr(xsave, i),
- xstate_sizes[i]);
+ __raw_xsave_addr(xsave, xfeature),
+ xstate_sizes[xfeature]);
}
/*
* Keep track of the last copied state in the non-compacted
* target buffer for gap zeroing.
*/
- zerofrom = xstate_offsets[i] + xstate_sizes[i];
+ zerofrom = xstate_offsets[xfeature] + xstate_sizes[xfeature];
}
out:
@@ -1191,8 +1242,8 @@ out:
void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
enum xstate_copy_mode copy_mode)
{
- __copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate,
- tsk->thread.fpu.fpstate->user_xfeatures,
+ __copy_xstate_to_uabi_buf(to, x86_task_fpu(tsk)->fpstate,
+ x86_task_fpu(tsk)->fpstate->user_xfeatures,
tsk->thread.pkru, copy_mode);
}
@@ -1332,7 +1383,7 @@ int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u
int copy_sigframe_from_user_to_xstate(struct task_struct *tsk,
const void __user *ubuf)
{
- return copy_uabi_to_xstate(tsk->thread.fpu.fpstate, NULL, ubuf, &tsk->thread.pkru);
+ return copy_uabi_to_xstate(x86_task_fpu(tsk)->fpstate, NULL, ubuf, &tsk->thread.pkru);
}
static bool validate_independent_components(u64 mask)
@@ -1398,9 +1449,9 @@ void xrstors(struct xregs_state *xstate, u64 mask)
}
#if IS_ENABLED(CONFIG_KVM)
-void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature)
+void fpstate_clear_xstate_component(struct fpstate *fpstate, unsigned int xfeature)
{
- void *addr = get_xsave_addr(&fps->regs.xsave, xfeature);
+ void *addr = get_xsave_addr(&fpstate->regs.xsave, xfeature);
if (addr)
memset(addr, 0, xstate_sizes[xfeature]);
@@ -1426,7 +1477,7 @@ static bool xstate_op_valid(struct fpstate *fpstate, u64 mask, bool rstor)
* The XFD MSR does not match fpstate->xfd. That's invalid when
* the passed in fpstate is current's fpstate.
*/
- if (fpstate->xfd == current->thread.fpu.fpstate->xfd)
+ if (fpstate->xfd == x86_task_fpu(current)->fpstate->xfd)
return false;
/*
@@ -1503,7 +1554,7 @@ void fpstate_free(struct fpu *fpu)
static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
unsigned int usize, struct fpu_guest *guest_fpu)
{
- struct fpu *fpu = &current->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(current);
struct fpstate *curfps, *newfps = NULL;
unsigned int fpsize;
bool in_use;
@@ -1596,7 +1647,7 @@ static int __xstate_request_perm(u64 permitted, u64 requested, bool guest)
* AVX512.
*/
bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED);
- struct fpu *fpu = &current->group_leader->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(current->group_leader);
struct fpu_state_perm *perm;
unsigned int ksize, usize;
u64 mask;
@@ -1606,16 +1657,20 @@ static int __xstate_request_perm(u64 permitted, u64 requested, bool guest)
if ((permitted & requested) == requested)
return 0;
- /* Calculate the resulting kernel state size */
+ /*
+ * Calculate the resulting kernel state size. Note, @permitted also
+ * contains supervisor xfeatures even though supervisor are always
+ * permitted for kernel and guest FPUs, and never permitted for user
+ * FPUs.
+ */
mask = permitted | requested;
- /* Take supervisor states into account on the host */
- if (!guest)
- mask |= xfeatures_mask_supervisor();
ksize = xstate_calculate_size(mask, compacted);
- /* Calculate the resulting user state size */
- mask &= XFEATURE_MASK_USER_SUPPORTED;
- usize = xstate_calculate_size(mask, false);
+ /*
+ * Calculate the resulting user state size. Take care not to clobber
+ * the supervisor xfeatures in the new mask!
+ */
+ usize = xstate_calculate_size(mask & XFEATURE_MASK_USER_SUPPORTED, false);
if (!guest) {
ret = validate_sigaltstack(usize);
@@ -1699,7 +1754,7 @@ int __xfd_enable_feature(u64 xfd_err, struct fpu_guest *guest_fpu)
return -EPERM;
}
- fpu = &current->group_leader->thread.fpu;
+ fpu = x86_task_fpu(current->group_leader);
perm = guest_fpu ? &fpu->guest_perm : &fpu->perm;
ksize = perm->__state_size;
usize = perm->__user_state_size;
@@ -1804,7 +1859,7 @@ long fpu_xstate_prctl(int option, unsigned long arg2)
*/
static void avx512_status(struct seq_file *m, struct task_struct *task)
{
- unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp);
+ unsigned long timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);
long delta;
if (!timestamp) {
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index 0fd34f53f025..52ce19289989 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -5,6 +5,7 @@
#include <asm/cpufeature.h>
#include <asm/fpu/xstate.h>
#include <asm/fpu/xcr.h>
+#include <asm/msr.h>
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(u64, xfd_state);
@@ -22,7 +23,7 @@ static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask)
static inline u64 xstate_get_group_perm(bool guest)
{
- struct fpu *fpu = &current->group_leader->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(current->group_leader);
struct fpu_state_perm *perm;
/* Pairs with WRITE_ONCE() in xstate_request_perm() */
@@ -69,21 +70,31 @@ static inline u64 xfeatures_mask_independent(void)
return fpu_kernel_cfg.independent_features;
}
+static inline int set_xfeature_in_sigframe(struct xregs_state __user *xbuf, u64 mask)
+{
+ u64 xfeatures;
+ int err;
+
+ /* Read the xfeatures value already saved in the user buffer */
+ err = __get_user(xfeatures, &xbuf->header.xfeatures);
+ xfeatures |= mask;
+ err |= __put_user(xfeatures, &xbuf->header.xfeatures);
+
+ return err;
+}
+
/*
* Update the value of PKRU register that was already pushed onto the signal frame.
*/
-static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru)
+static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
{
- u64 xstate_bv;
int err;
if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
return 0;
/* Mark PKRU as in-use so that it is restored correctly. */
- xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU;
-
- err = __put_user(xstate_bv, &buf->header.xfeatures);
+ err = set_xfeature_in_sigframe(buf, XFEATURE_MASK_PKRU);
if (err)
return err;
@@ -171,7 +182,7 @@ static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rs
#ifdef CONFIG_X86_64
static inline void xfd_set_state(u64 xfd)
{
- wrmsrl(MSR_IA32_XFD, xfd);
+ wrmsrq(MSR_IA32_XFD, xfd);
__this_cpu_write(xfd_state, xfd);
}
@@ -288,7 +299,7 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkr
* internally, e.g. PKRU. That's user space ABI and also required
* to allow the signal handler to modify PKRU.
*/
- struct fpstate *fpstate = current->thread.fpu.fpstate;
+ struct fpstate *fpstate = x86_task_fpu(current)->fpstate;
u64 mask = fpstate->user_xfeatures;
u32 lmask;
u32 hmask;
@@ -307,7 +318,7 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkr
clac();
if (!err)
- err = update_pkru_in_sigframe(buf, mask, pkru);
+ err = update_pkru_in_sigframe(buf, pkru);
return err;
}
@@ -322,7 +333,7 @@ static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64
u32 hmask = mask >> 32;
int err;
- xfd_validate_state(current->thread.fpu.fpstate, mask, true);
+ xfd_validate_state(x86_task_fpu(current)->fpstate, mask, true);
stac();
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
diff --git a/arch/x86/kernel/fred.c b/arch/x86/kernel/fred.c
index 5e2cd1004980..816187da3a47 100644
--- a/arch/x86/kernel/fred.c
+++ b/arch/x86/kernel/fred.c
@@ -3,6 +3,7 @@
#include <asm/desc.h>
#include <asm/fred.h>
+#include <asm/msr.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
@@ -43,23 +44,23 @@ void cpu_init_fred_exceptions(void)
*/
loadsegment(ss, __KERNEL_DS);
- wrmsrl(MSR_IA32_FRED_CONFIG,
+ wrmsrq(MSR_IA32_FRED_CONFIG,
/* Reserve for CALL emulation */
FRED_CONFIG_REDZONE |
FRED_CONFIG_INT_STKLVL(0) |
FRED_CONFIG_ENTRYPOINT(asm_fred_entrypoint_user));
- wrmsrl(MSR_IA32_FRED_STKLVLS, 0);
+ wrmsrq(MSR_IA32_FRED_STKLVLS, 0);
/*
* Ater a CPU offline/online cycle, the FRED RSP0 MSR should be
* resynchronized with its per-CPU cache.
*/
- wrmsrl(MSR_IA32_FRED_RSP0, __this_cpu_read(fred_rsp0));
+ wrmsrq(MSR_IA32_FRED_RSP0, __this_cpu_read(fred_rsp0));
- wrmsrl(MSR_IA32_FRED_RSP1, 0);
- wrmsrl(MSR_IA32_FRED_RSP2, 0);
- wrmsrl(MSR_IA32_FRED_RSP3, 0);
+ wrmsrq(MSR_IA32_FRED_RSP1, 0);
+ wrmsrq(MSR_IA32_FRED_RSP2, 0);
+ wrmsrq(MSR_IA32_FRED_RSP3, 0);
/* Enable FRED */
cr4_set_bits(X86_CR4_FRED);
@@ -79,14 +80,14 @@ void cpu_init_fred_rsps(void)
* (remember that user space faults are always taken on stack level 0)
* is to avoid overflowing the kernel stack.
*/
- wrmsrl(MSR_IA32_FRED_STKLVLS,
+ wrmsrq(MSR_IA32_FRED_STKLVLS,
FRED_STKLVL(X86_TRAP_DB, FRED_DB_STACK_LEVEL) |
FRED_STKLVL(X86_TRAP_NMI, FRED_NMI_STACK_LEVEL) |
FRED_STKLVL(X86_TRAP_MC, FRED_MC_STACK_LEVEL) |
FRED_STKLVL(X86_TRAP_DF, FRED_DF_STACK_LEVEL));
/* The FRED equivalents to IST stacks... */
- wrmsrl(MSR_IA32_FRED_RSP1, __this_cpu_ist_top_va(DB));
- wrmsrl(MSR_IA32_FRED_RSP2, __this_cpu_ist_top_va(NMI));
- wrmsrl(MSR_IA32_FRED_RSP3, __this_cpu_ist_top_va(DF));
+ wrmsrq(MSR_IA32_FRED_RSP1, __this_cpu_ist_top_va(DB));
+ wrmsrq(MSR_IA32_FRED_RSP2, __this_cpu_ist_top_va(NMI));
+ wrmsrq(MSR_IA32_FRED_RSP3, __this_cpu_ist_top_va(DF));
}
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index cace6e8d7cc7..252e82bcfd2f 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -55,10 +55,10 @@ void ftrace_arch_code_modify_post_process(void)
{
/*
* ftrace_make_{call,nop}() may be called during
- * module load, and we need to finish the text_poke_queue()
+ * module load, and we need to finish the smp_text_poke_batch_add()
* that they do, here.
*/
- text_poke_finish();
+ smp_text_poke_batch_finish();
ftrace_poke_late = 0;
mutex_unlock(&text_mutex);
}
@@ -119,7 +119,7 @@ ftrace_modify_code_direct(unsigned long ip, const char *old_code,
/* replace the text with the new text */
if (ftrace_poke_late)
- text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
+ smp_text_poke_batch_add((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
else
text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
return 0;
@@ -186,11 +186,11 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
ip = (unsigned long)(&ftrace_call);
new = ftrace_call_replace(ip, (unsigned long)func);
- text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
+ smp_text_poke_single((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
ip = (unsigned long)(&ftrace_regs_call);
new = ftrace_call_replace(ip, (unsigned long)func);
- text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
+ smp_text_poke_single((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
return 0;
}
@@ -247,10 +247,10 @@ void ftrace_replace_code(int enable)
break;
}
- text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
+ smp_text_poke_batch_add((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
ftrace_update_record(rec, enable);
}
- text_poke_finish();
+ smp_text_poke_batch_finish();
}
void arch_ftrace_update_code(int command)
@@ -354,7 +354,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
goto fail;
ip = trampoline + size;
- if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+ if (cpu_wants_rethunk_at(ip))
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
else
memcpy(ip, retq, sizeof(retq));
@@ -492,7 +492,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
mutex_lock(&text_mutex);
/* Do a safe modify in case the trampoline is executing */
new = ftrace_call_replace(ip, (unsigned long)func);
- text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
+ smp_text_poke_single((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
mutex_unlock(&text_mutex);
}
@@ -586,7 +586,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func)
const char *new;
new = ftrace_jmp_replace(ip, (unsigned long)func);
- text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
+ smp_text_poke_single((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
return 0;
}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index de001b2146ab..375f2d7f1762 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -145,10 +145,6 @@ void __init __no_stack_protector mk_early_pgtbl_32(void)
*ptr = (unsigned long)ptep + PAGE_OFFSET;
#ifdef CONFIG_MICROCODE_INITRD32
- /* Running on a hypervisor? */
- if (native_cpuid_ecx(1) & BIT(31))
- return;
-
params = (struct boot_params *)__pa_nodebug(&boot_params);
if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
return;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index fa9b6339975f..533fcf5636fc 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -47,234 +47,22 @@
* Manage page tables very early on.
*/
extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
-static unsigned int __initdata next_early_pgt;
+unsigned int __initdata next_early_pgt;
+SYM_PIC_ALIAS(next_early_pgt);
pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
-#ifdef CONFIG_X86_5LEVEL
unsigned int __pgtable_l5_enabled __ro_after_init;
unsigned int pgdir_shift __ro_after_init = 39;
EXPORT_SYMBOL(pgdir_shift);
unsigned int ptrs_per_p4d __ro_after_init = 1;
EXPORT_SYMBOL(ptrs_per_p4d);
-#endif
-#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
EXPORT_SYMBOL(page_offset_base);
unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
EXPORT_SYMBOL(vmalloc_base);
unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
EXPORT_SYMBOL(vmemmap_base);
-#endif
-
-static inline bool check_la57_support(void)
-{
- if (!IS_ENABLED(CONFIG_X86_5LEVEL))
- return false;
-
- /*
- * 5-level paging is detected and enabled at kernel decompression
- * stage. Only check if it has been enabled there.
- */
- if (!(native_read_cr4() & X86_CR4_LA57))
- return false;
-
- RIP_REL_REF(__pgtable_l5_enabled) = 1;
- RIP_REL_REF(pgdir_shift) = 48;
- RIP_REL_REF(ptrs_per_p4d) = 512;
- RIP_REL_REF(page_offset_base) = __PAGE_OFFSET_BASE_L5;
- RIP_REL_REF(vmalloc_base) = __VMALLOC_BASE_L5;
- RIP_REL_REF(vmemmap_base) = __VMEMMAP_BASE_L5;
-
- return true;
-}
-
-static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
- pmdval_t *pmd,
- unsigned long p2v_offset)
-{
- unsigned long paddr, paddr_end;
- int i;
-
- /* Encrypt the kernel and related (if SME is active) */
- sme_encrypt_kernel(bp);
-
- /*
- * Clear the memory encryption mask from the .bss..decrypted section.
- * The bss section will be memset to zero later in the initialization so
- * there is no need to zero it after changing the memory encryption
- * attribute.
- */
- if (sme_get_me_mask()) {
- paddr = (unsigned long)&RIP_REL_REF(__start_bss_decrypted);
- paddr_end = (unsigned long)&RIP_REL_REF(__end_bss_decrypted);
-
- for (; paddr < paddr_end; paddr += PMD_SIZE) {
- /*
- * On SNP, transition the page to shared in the RMP table so that
- * it is consistent with the page table attribute change.
- *
- * __start_bss_decrypted has a virtual address in the high range
- * mapping (kernel .text). PVALIDATE, by way of
- * early_snp_set_memory_shared(), requires a valid virtual
- * address but the kernel is currently running off of the identity
- * mapping so use the PA to get a *currently* valid virtual address.
- */
- early_snp_set_memory_shared(paddr, paddr, PTRS_PER_PMD);
-
- i = pmd_index(paddr - p2v_offset);
- pmd[i] -= sme_get_me_mask();
- }
- }
-
- /*
- * Return the SME encryption mask (if SME is active) to be used as a
- * modifier for the initial pgdir entry programmed into CR3.
- */
- return sme_get_me_mask();
-}
-
-/* Code in __startup_64() can be relocated during execution, but the compiler
- * doesn't have to generate PC-relative relocations when accessing globals from
- * that function. Clang actually does not generate them, which leads to
- * boot-time crashes. To work around this problem, every global pointer must
- * be accessed using RIP_REL_REF(). Kernel virtual addresses can be determined
- * by subtracting p2v_offset from the RIP-relative address.
- */
-unsigned long __head __startup_64(unsigned long p2v_offset,
- struct boot_params *bp)
-{
- pmd_t (*early_pgts)[PTRS_PER_PMD] = RIP_REL_REF(early_dynamic_pgts);
- unsigned long physaddr = (unsigned long)&RIP_REL_REF(_text);
- unsigned long va_text, va_end;
- unsigned long pgtable_flags;
- unsigned long load_delta;
- pgdval_t *pgd;
- p4dval_t *p4d;
- pudval_t *pud;
- pmdval_t *pmd, pmd_entry;
- bool la57;
- int i;
-
- la57 = check_la57_support();
-
- /* Is the address too large? */
- if (physaddr >> MAX_PHYSMEM_BITS)
- for (;;);
-
- /*
- * Compute the delta between the address I am compiled to run at
- * and the address I am actually running at.
- */
- load_delta = __START_KERNEL_map + p2v_offset;
- RIP_REL_REF(phys_base) = load_delta;
-
- /* Is the address not 2M aligned? */
- if (load_delta & ~PMD_MASK)
- for (;;);
-
- va_text = physaddr - p2v_offset;
- va_end = (unsigned long)&RIP_REL_REF(_end) - p2v_offset;
-
- /* Include the SME encryption mask in the fixup value */
- load_delta += sme_get_me_mask();
-
- /* Fixup the physical addresses in the page table */
-
- pgd = &RIP_REL_REF(early_top_pgt)->pgd;
- pgd[pgd_index(__START_KERNEL_map)] += load_delta;
-
- if (IS_ENABLED(CONFIG_X86_5LEVEL) && la57) {
- p4d = (p4dval_t *)&RIP_REL_REF(level4_kernel_pgt);
- p4d[MAX_PTRS_PER_P4D - 1] += load_delta;
-
- pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE;
- }
-
- RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 2].pud += load_delta;
- RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 1].pud += load_delta;
-
- for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
- RIP_REL_REF(level2_fixmap_pgt)[i].pmd += load_delta;
-
- /*
- * Set up the identity mapping for the switchover. These
- * entries should *NOT* have the global bit set! This also
- * creates a bunch of nonsense entries but that is fine --
- * it avoids problems around wraparound.
- */
-
- pud = &early_pgts[0]->pmd;
- pmd = &early_pgts[1]->pmd;
- RIP_REL_REF(next_early_pgt) = 2;
-
- pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
-
- if (la57) {
- p4d = &early_pgts[RIP_REL_REF(next_early_pgt)++]->pmd;
-
- i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
- pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
- pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
-
- i = physaddr >> P4D_SHIFT;
- p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
- p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
- } else {
- i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
- pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
- pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
- }
-
- i = physaddr >> PUD_SHIFT;
- pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
- pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
-
- pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
- /* Filter out unsupported __PAGE_KERNEL_* bits: */
- pmd_entry &= RIP_REL_REF(__supported_pte_mask);
- pmd_entry += sme_get_me_mask();
- pmd_entry += physaddr;
-
- for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) {
- int idx = i + (physaddr >> PMD_SHIFT);
-
- pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
- }
-
- /*
- * Fixup the kernel text+data virtual addresses. Note that
- * we might write invalid pmds, when the kernel is relocated
- * cleanup_highmap() fixes this up along with the mappings
- * beyond _end.
- *
- * Only the region occupied by the kernel image has so far
- * been checked against the table of usable memory regions
- * provided by the firmware, so invalidate pages outside that
- * region. A page table entry that maps to a reserved area of
- * memory would allow processor speculation into that area,
- * and on some hardware (particularly the UV platform) even
- * speculative access to some reserved areas is caught as an
- * error, causing the BIOS to halt the system.
- */
-
- pmd = &RIP_REL_REF(level2_kernel_pgt)->pmd;
-
- /* invalidate pages before the kernel image */
- for (i = 0; i < pmd_index(va_text); i++)
- pmd[i] &= ~_PAGE_PRESENT;
-
- /* fixup pages that are part of the kernel image */
- for (; i <= pmd_index(va_end); i++)
- if (pmd[i] & _PAGE_PRESENT)
- pmd[i] += load_delta;
-
- /* invalidate pages after the kernel image */
- for (; i < PTRS_PER_PMD; i++)
- pmd[i] &= ~_PAGE_PRESENT;
-
- return sme_postprocess_startup(bp, pmd, p2v_offset);
-}
/* Wipe all early page tables except for the kernel symbol map */
static void __init reset_early_page_tables(void)
@@ -449,6 +237,12 @@ asmlinkage __visible void __init __noreturn x86_64_start_kernel(char * real_mode
/* Kill off the identity-map trampoline */
reset_early_page_tables();
+ if (pgtable_l5_enabled()) {
+ page_offset_base = __PAGE_OFFSET_BASE_L5;
+ vmalloc_base = __VMALLOC_BASE_L5;
+ vmemmap_base = __VMEMMAP_BASE_L5;
+ }
+
clear_bss();
/*
@@ -513,41 +307,6 @@ void __init __noreturn x86_64_start_reservations(char *real_mode_data)
start_kernel();
}
-/*
- * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
- * used until the idt_table takes over. On the boot CPU this happens in
- * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
- * this happens in the functions called from head_64.S.
- *
- * The idt_table can't be used that early because all the code modifying it is
- * in idt.c and can be instrumented by tracing or KASAN, which both don't work
- * during early CPU bringup. Also the idt_table has the runtime vectors
- * configured which require certain CPU state to be setup already (like TSS),
- * which also hasn't happened yet in early CPU bringup.
- */
-static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
-
-/* This may run while still in the direct mapping */
-static void __head startup_64_load_idt(void *vc_handler)
-{
- struct desc_ptr desc = {
- .address = (unsigned long)&RIP_REL_REF(bringup_idt_table),
- .size = sizeof(bringup_idt_table) - 1,
- };
- struct idt_data data;
- gate_desc idt_desc;
-
- /* @vc_handler is set only for a VMM Communication Exception */
- if (vc_handler) {
- init_idt_data(&data, X86_TRAP_VC, vc_handler);
- idt_init_desc(&idt_desc, &data);
- native_write_idt_entry((gate_desc *)desc.address, X86_TRAP_VC, &idt_desc);
- }
-
- native_load_idt(&desc);
-}
-
-/* This is used when running on kernel addresses */
void early_setup_idt(void)
{
void *handler = NULL;
@@ -559,30 +318,3 @@ void early_setup_idt(void)
startup_64_load_idt(handler);
}
-
-/*
- * Setup boot CPU state needed before kernel switches to virtual addresses.
- */
-void __head startup_64_setup_gdt_idt(void)
-{
- struct desc_struct *gdt = (void *)(__force unsigned long)gdt_page.gdt;
- void *handler = NULL;
-
- struct desc_ptr startup_gdt_descr = {
- .address = (unsigned long)&RIP_REL_REF(*gdt),
- .size = GDT_SIZE - 1,
- };
-
- /* Load GDT */
- native_load_gdt(&startup_gdt_descr);
-
- /* New GDT is live - reload data segment registers */
- asm volatile("movl %%eax, %%ds\n"
- "movl %%eax, %%ss\n"
- "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
-
- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
- handler = &RIP_REL_REF(vc_no_ghcb);
-
- startup_64_load_idt(handler);
-}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 2e42056d2306..76743dfad6ab 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -86,7 +86,7 @@ SYM_CODE_START(startup_32)
movl $pa(__bss_stop),%ecx
subl %edi,%ecx
shrl $2,%ecx
- rep ; stosl
+ rep stosl
/*
* Copy bootup parameters out of the way.
* Note: %esi still has the pointer to the real-mode data.
@@ -98,15 +98,13 @@ SYM_CODE_START(startup_32)
movl $pa(boot_params),%edi
movl $(PARAM_SIZE/4),%ecx
cld
- rep
- movsl
+ rep movsl
movl pa(boot_params) + NEW_CL_POINTER,%esi
andl %esi,%esi
jz 1f # No command line
movl $pa(boot_command_line),%edi
movl $(COMMAND_LINE_SIZE/4),%ecx
- rep
- movsl
+ rep movsl
1:
#ifdef CONFIG_OLPC
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index fefe2a25cf02..3e9b3a3bd039 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -573,6 +573,7 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb)
/* Pure iret required here - don't use INTERRUPT_RETURN */
iretq
SYM_CODE_END(vc_no_ghcb)
+SYM_PIC_ALIAS(vc_no_ghcb);
#endif
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
@@ -604,10 +605,12 @@ SYM_DATA_START_PTI_ALIGNED(early_top_pgt)
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
.fill PTI_USER_PGD_FILL,8,0
SYM_DATA_END(early_top_pgt)
+SYM_PIC_ALIAS(early_top_pgt)
SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
.fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
SYM_DATA_END(early_dynamic_pgts)
+SYM_PIC_ALIAS(early_dynamic_pgts);
SYM_DATA(early_recursion_flag, .long 0)
@@ -646,12 +649,11 @@ SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
SYM_DATA_END(init_top_pgt)
#endif
-#ifdef CONFIG_X86_5LEVEL
SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
.fill 511,8,0
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
SYM_DATA_END(level4_kernel_pgt)
-#endif
+SYM_PIC_ALIAS(level4_kernel_pgt)
SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
.fill L3_START_KERNEL,8,0
@@ -659,6 +661,7 @@ SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
.quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
SYM_DATA_END(level3_kernel_pgt)
+SYM_PIC_ALIAS(level3_kernel_pgt)
SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
/*
@@ -676,6 +679,7 @@ SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
*/
PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
SYM_DATA_END(level2_kernel_pgt)
+SYM_PIC_ALIAS(level2_kernel_pgt)
SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
.fill (512 - 4 - FIXMAP_PMD_NUM),8,0
@@ -688,6 +692,7 @@ SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
/* 6 MB reserved space + a 2MB hole */
.fill 4,8,0
SYM_DATA_END(level2_fixmap_pgt)
+SYM_PIC_ALIAS(level2_fixmap_pgt)
SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
.rept (FIXMAP_PMD_NUM)
@@ -703,6 +708,7 @@ SYM_DATA(smpboot_control, .long 0)
.align 16
/* This must match the first entry in level2_kernel_pgt */
SYM_DATA(phys_base, .quad 0x0)
+SYM_PIC_ALIAS(phys_base);
EXPORT_SYMBOL(phys_base)
#include "../xen/xen-head.S"
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 7f4b2966e15c..d6387dde3ff9 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -7,11 +7,12 @@
#include <linux/cpu.h>
#include <linux/irq.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/irq_remapping.h>
#include <asm/hpet.h>
#include <asm/time.h>
#include <asm/mwait.h>
+#include <asm/msr.h>
#undef pr_fmt
#define pr_fmt(fmt) "hpet: " fmt
@@ -970,7 +971,7 @@ static bool __init hpet_is_pc10_damaged(void)
return false;
/* Check whether PC10 is enabled in PKG C-state limit */
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
if ((pcfg & 0xF) < 8)
return false;
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 80e262bb627f..cb9852ad6098 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -46,7 +46,8 @@ bool __init pit_timer_init(void)
* VMMs otherwise steal CPU time just to pointlessly waggle
* the (masked) IRQ.
*/
- clockevent_i8253_disable();
+ scoped_guard(irq)
+ clockevent_i8253_disable();
return false;
}
clockevent_i8253_init(true);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 81f9b78e0f7b..9ed29ff10e59 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -380,61 +380,18 @@ void intel_posted_msi_init(void)
this_cpu_write(posted_msi_pi_desc.ndst, destination);
}
-/*
- * De-multiplexing posted interrupts is on the performance path, the code
- * below is written to optimize the cache performance based on the following
- * considerations:
- * 1.Posted interrupt descriptor (PID) fits in a cache line that is frequently
- * accessed by both CPU and IOMMU.
- * 2.During posted MSI processing, the CPU needs to do 64-bit read and xchg
- * for checking and clearing posted interrupt request (PIR), a 256 bit field
- * within the PID.
- * 3.On the other side, the IOMMU does atomic swaps of the entire PID cache
- * line when posting interrupts and setting control bits.
- * 4.The CPU can access the cache line a magnitude faster than the IOMMU.
- * 5.Each time the IOMMU does interrupt posting to the PIR will evict the PID
- * cache line. The cache line states after each operation are as follows:
- * CPU IOMMU PID Cache line state
- * ---------------------------------------------------------------
- *...read64 exclusive
- *...lock xchg64 modified
- *... post/atomic swap invalid
- *...-------------------------------------------------------------
- *
- * To reduce L1 data cache miss, it is important to avoid contention with
- * IOMMU's interrupt posting/atomic swap. Therefore, a copy of PIR is used
- * to dispatch interrupt handlers.
- *
- * In addition, the code is trying to keep the cache line state consistent
- * as much as possible. e.g. when making a copy and clearing the PIR
- * (assuming non-zero PIR bits are present in the entire PIR), it does:
- * read, read, read, read, xchg, xchg, xchg, xchg
- * instead of:
- * read, xchg, read, xchg, read, xchg, read, xchg
- */
-static __always_inline bool handle_pending_pir(u64 *pir, struct pt_regs *regs)
+static __always_inline bool handle_pending_pir(unsigned long *pir, struct pt_regs *regs)
{
- int i, vec = FIRST_EXTERNAL_VECTOR;
- unsigned long pir_copy[4];
- bool handled = false;
+ unsigned long pir_copy[NR_PIR_WORDS];
+ int vec = FIRST_EXTERNAL_VECTOR;
- for (i = 0; i < 4; i++)
- pir_copy[i] = pir[i];
-
- for (i = 0; i < 4; i++) {
- if (!pir_copy[i])
- continue;
+ if (!pi_harvest_pir(pir, pir_copy))
+ return false;
- pir_copy[i] = arch_xchg(&pir[i], 0);
- handled = true;
- }
-
- if (handled) {
- for_each_set_bit_from(vec, pir_copy, FIRST_SYSTEM_VECTOR)
- call_irq_handler(vec, regs);
- }
+ for_each_set_bit_from(vec, pir_copy, FIRST_SYSTEM_VECTOR)
+ call_irq_handler(vec, regs);
- return handled;
+ return true;
}
/*
@@ -464,7 +421,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_posted_msi_notification)
* MAX_POSTED_MSI_COALESCING_LOOP - 1 loops are executed here.
*/
while (++i < MAX_POSTED_MSI_COALESCING_LOOP) {
- if (!handle_pending_pir(pid->pir64, regs))
+ if (!handle_pending_pir(pid->pir, regs))
break;
}
@@ -479,7 +436,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_posted_msi_notification)
* process PIR bits one last time such that handling the new interrupts
* are not delayed until the next IRQ.
*/
- handle_pending_pir(pid->pir64, regs);
+ handle_pending_pir(pid->pir, regs);
apic_eoi();
irq_exit();
diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c
index cd8ed1edbf9e..9e9a591a5fec 100644
--- a/arch/x86/kernel/jailhouse.c
+++ b/arch/x86/kernel/jailhouse.c
@@ -49,7 +49,7 @@ static uint32_t jailhouse_cpuid_base(void)
!boot_cpu_has(X86_FEATURE_HYPERVISOR))
return 0;
- return hypervisor_cpuid_base("Jailhouse\0\0\0", 0);
+ return cpuid_base_hypervisor("Jailhouse\0\0\0", 0);
}
static uint32_t __init jailhouse_detect(void)
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index f5b8ef02d172..a7949a54a0ff 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -102,7 +102,7 @@ __jump_label_transform(struct jump_entry *entry,
return;
}
- text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
+ smp_text_poke_single((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
}
static void __ref jump_label_transform(struct jump_entry *entry,
@@ -135,7 +135,7 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
mutex_lock(&text_mutex);
jlp = __jump_label_patch(entry, type);
- text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
+ smp_text_poke_batch_add((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
mutex_unlock(&text_mutex);
return true;
}
@@ -143,6 +143,6 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
void arch_jump_label_transform_apply(void)
{
mutex_lock(&text_mutex);
- text_poke_finish();
+ smp_text_poke_batch_finish();
mutex_unlock(&text_mutex);
}
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 68530fad05f7..24a41f0e0cf1 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -27,6 +27,8 @@
#include <asm/kexec-bzimage64.h>
#define MAX_ELFCOREHDR_STR_LEN 30 /* elfcorehdr=0x<64bit-value> */
+#define MAX_DMCRYPTKEYS_STR_LEN 31 /* dmcryptkeys=0x<64bit-value> */
+
/*
* Defines lowest physical address for various segments. Not sure where
@@ -76,6 +78,10 @@ static int setup_cmdline(struct kimage *image, struct boot_params *params,
if (image->type == KEXEC_TYPE_CRASH) {
len = sprintf(cmdline_ptr,
"elfcorehdr=0x%lx ", image->elf_load_addr);
+
+ if (image->dm_crypt_keys_addr != 0)
+ len += sprintf(cmdline_ptr + len,
+ "dmcryptkeys=0x%lx ", image->dm_crypt_keys_addr);
}
memcpy(cmdline_ptr + len, cmdline, cmdline_len);
cmdline_len += len;
@@ -233,6 +239,32 @@ setup_ima_state(const struct kimage *image, struct boot_params *params,
#endif /* CONFIG_IMA_KEXEC */
}
+static void setup_kho(const struct kimage *image, struct boot_params *params,
+ unsigned long params_load_addr,
+ unsigned int setup_data_offset)
+{
+ struct setup_data *sd = (void *)params + setup_data_offset;
+ struct kho_data *kho = (void *)sd + sizeof(*sd);
+
+ if (!IS_ENABLED(CONFIG_KEXEC_HANDOVER))
+ return;
+
+ sd->type = SETUP_KEXEC_KHO;
+ sd->len = sizeof(struct kho_data);
+
+ /* Only add if we have all KHO images in place */
+ if (!image->kho.fdt || !image->kho.scratch)
+ return;
+
+ /* Add setup data */
+ kho->fdt_addr = image->kho.fdt;
+ kho->fdt_size = PAGE_SIZE;
+ kho->scratch_addr = image->kho.scratch->mem;
+ kho->scratch_size = image->kho.scratch->bufsz;
+ sd->next = params->hdr.setup_data;
+ params->hdr.setup_data = params_load_addr + setup_data_offset;
+}
+
static int
setup_boot_parameters(struct kimage *image, struct boot_params *params,
unsigned long params_load_addr,
@@ -312,6 +344,13 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
sizeof(struct ima_setup_data);
}
+ if (IS_ENABLED(CONFIG_KEXEC_HANDOVER)) {
+ /* Setup space to store preservation metadata */
+ setup_kho(image, params, params_load_addr, setup_data_offset);
+ setup_data_offset += sizeof(struct setup_data) +
+ sizeof(struct kho_data);
+ }
+
/* Setup RNG seed */
setup_rng_seed(params, params_load_addr, setup_data_offset);
@@ -441,6 +480,19 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
ret = crash_load_segments(image);
if (ret)
return ERR_PTR(ret);
+ ret = crash_load_dm_crypt_keys(image);
+ if (ret == -ENOENT) {
+ kexec_dprintk("No dm crypt key to load\n");
+ } else if (ret) {
+ pr_err("Failed to load dm crypt keys\n");
+ return ERR_PTR(ret);
+ }
+ if (image->dm_crypt_keys_addr &&
+ cmdline_len + MAX_ELFCOREHDR_STR_LEN + MAX_DMCRYPTKEYS_STR_LEN >
+ header->cmdline_size) {
+ pr_err("Appending dmcryptkeys=<addr> to command line exceeds maximum allowed length\n");
+ return ERR_PTR(-EINVAL);
+ }
}
#endif
@@ -468,6 +520,8 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
efi_map_sz = efi_get_runtime_map_size();
params_cmdline_sz = sizeof(struct boot_params) + cmdline_len +
MAX_ELFCOREHDR_STR_LEN;
+ if (image->dm_crypt_keys_addr)
+ params_cmdline_sz += MAX_DMCRYPTKEYS_STR_LEN;
params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) +
sizeof(struct setup_data) +
@@ -479,6 +533,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
kbuf.bufsz += sizeof(struct setup_data) +
sizeof(struct ima_setup_data);
+ if (IS_ENABLED(CONFIG_KEXEC_HANDOVER))
+ kbuf.bufsz += sizeof(struct setup_data) +
+ sizeof(struct kho_data);
+
params = kzalloc(kbuf.bufsz, GFP_KERNEL);
if (!params)
return ERR_PTR(-ENOMEM);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 09608fd93687..47cb8eb138ba 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -808,7 +808,7 @@ void arch_arm_kprobe(struct kprobe *p)
u8 int3 = INT3_INSN_OPCODE;
text_poke(p->addr, &int3, 1);
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1);
}
@@ -818,7 +818,7 @@ void arch_disarm_kprobe(struct kprobe *p)
perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1);
text_poke(p->addr, &p->opcode, 1);
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
}
void arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 36d6809c6c9e..0aabd4c4e2c4 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -488,7 +488,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
insn_buff[0] = JMP32_INSN_OPCODE;
*(s32 *)(&insn_buff[1]) = rel;
- text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
+ smp_text_poke_single(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
list_del_init(&op->list);
}
@@ -513,11 +513,11 @@ void arch_unoptimize_kprobe(struct optimized_kprobe *op)
JMP32_INSN_SIZE - INT3_INSN_SIZE);
text_poke(addr, new, INT3_INSN_SIZE);
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
text_poke(addr + INT3_INSN_SIZE,
new + INT3_INSN_SIZE,
JMP32_INSN_SIZE - INT3_INSN_SIZE);
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 3be9b3342c67..921c1c783bc1 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -40,6 +40,7 @@
#include <asm/mtrr.h>
#include <asm/tlb.h>
#include <asm/cpuidle_haltpoll.h>
+#include <asm/msr.h>
#include <asm/ptrace.h>
#include <asm/reboot.h>
#include <asm/svm.h>
@@ -301,7 +302,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
token = __this_cpu_read(apf_reason.token);
kvm_async_pf_task_wake(token);
__this_cpu_write(apf_reason.token, 0);
- wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
+ wrmsrq(MSR_KVM_ASYNC_PF_ACK, 1);
}
set_irq_regs(old_regs);
@@ -327,7 +328,7 @@ static void kvm_register_steal_time(void)
if (!has_steal_clock)
return;
- wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
+ wrmsrq(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
pr_debug("stealtime: cpu %d, msr %llx\n", cpu,
(unsigned long long) slow_virt_to_phys(st));
}
@@ -361,9 +362,9 @@ static void kvm_guest_cpu_init(void)
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
- wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
+ wrmsrq(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
- wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
+ wrmsrq(MSR_KVM_ASYNC_PF_EN, pa);
__this_cpu_write(async_pf_enabled, true);
pr_debug("setup async PF for cpu %d\n", smp_processor_id());
}
@@ -376,7 +377,7 @@ static void kvm_guest_cpu_init(void)
__this_cpu_write(kvm_apic_eoi, 0);
pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
| KVM_MSR_ENABLED;
- wrmsrl(MSR_KVM_PV_EOI_EN, pa);
+ wrmsrq(MSR_KVM_PV_EOI_EN, pa);
}
if (has_steal_clock)
@@ -388,7 +389,7 @@ static void kvm_pv_disable_apf(void)
if (!__this_cpu_read(async_pf_enabled))
return;
- wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
+ wrmsrq(MSR_KVM_ASYNC_PF_EN, 0);
__this_cpu_write(async_pf_enabled, false);
pr_debug("disable async PF for cpu %d\n", smp_processor_id());
@@ -399,7 +400,7 @@ static void kvm_disable_steal_time(void)
if (!has_steal_clock)
return;
- wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
+ wrmsrq(MSR_KVM_STEAL_TIME, 0);
}
static u64 kvm_steal_clock(int cpu)
@@ -451,9 +452,9 @@ static void kvm_guest_cpu_offline(bool shutdown)
{
kvm_disable_steal_time();
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
- wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+ wrmsrq(MSR_KVM_PV_EOI_EN, 0);
if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
- wrmsrl(MSR_KVM_MIGRATION_CONTROL, 0);
+ wrmsrq(MSR_KVM_MIGRATION_CONTROL, 0);
kvm_pv_disable_apf();
if (!shutdown)
apf_task_wake_all();
@@ -615,7 +616,7 @@ static int __init setup_efi_kvm_sev_migration(void)
}
pr_info("%s : live migration enabled in EFI\n", __func__);
- wrmsrl(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY);
+ wrmsrq(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY);
return 1;
}
@@ -728,7 +729,7 @@ static int kvm_suspend(void)
#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
- rdmsrl(MSR_KVM_POLL_CONTROL, val);
+ rdmsrq(MSR_KVM_POLL_CONTROL, val);
has_guest_poll = !(val & 1);
#endif
return 0;
@@ -740,7 +741,7 @@ static void kvm_resume(void)
#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll)
- wrmsrl(MSR_KVM_POLL_CONTROL, 0);
+ wrmsrq(MSR_KVM_POLL_CONTROL, 0);
#endif
}
@@ -874,7 +875,7 @@ static noinline uint32_t __kvm_cpuid_base(void)
return 0; /* So we don't blow up on old processors */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
- return hypervisor_cpuid_base(KVM_SIGNATURE, 0);
+ return cpuid_base_hypervisor(KVM_SIGNATURE, 0);
return 0;
}
@@ -975,7 +976,7 @@ static void __init kvm_init_platform(void)
* If not booted using EFI, enable Live migration support.
*/
if (!efi_enabled(EFI_BOOT))
- wrmsrl(MSR_KVM_MIGRATION_CONTROL,
+ wrmsrq(MSR_KVM_MIGRATION_CONTROL,
KVM_MIGRATION_READY);
}
kvmclock_init();
@@ -1124,12 +1125,12 @@ out:
static void kvm_disable_host_haltpoll(void *i)
{
- wrmsrl(MSR_KVM_POLL_CONTROL, 0);
+ wrmsrq(MSR_KVM_POLL_CONTROL, 0);
}
static void kvm_enable_host_haltpoll(void *i)
{
- wrmsrl(MSR_KVM_POLL_CONTROL, 1);
+ wrmsrq(MSR_KVM_POLL_CONTROL, 1);
}
void arch_haltpoll_enable(unsigned int cpu)
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 5b2c15214a6b..ca0a49eeac4a 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -60,7 +60,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);
*/
static void kvm_get_wallclock(struct timespec64 *now)
{
- wrmsrl(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock));
+ wrmsrq(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock));
preempt_disable();
pvclock_read_wallclock(&wall_clock, this_cpu_pvti(), now);
preempt_enable();
@@ -173,7 +173,7 @@ static void kvm_register_clock(char *txt)
return;
pa = slow_virt_to_phys(&src->pvti) | 0x01ULL;
- wrmsrl(msr_kvm_system_time, pa);
+ wrmsrq(msr_kvm_system_time, pa);
pr_debug("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt);
}
@@ -196,7 +196,7 @@ static void kvm_setup_secondary_clock(void)
void kvmclock_disable(void)
{
if (msr_kvm_system_time)
- native_write_msr(msr_kvm_system_time, 0, 0);
+ native_write_msr(msr_kvm_system_time, 0);
}
static void __init kvmclock_init_mem(void)
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 80265162aeff..1f325304c4a8 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -42,7 +42,7 @@ static void load_segments(void)
static void machine_kexec_free_page_tables(struct kimage *image)
{
- free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER);
+ free_pages((unsigned long)image->arch.pgd, pgd_allocation_order());
image->arch.pgd = NULL;
#ifdef CONFIG_X86_PAE
free_page((unsigned long)image->arch.pmd0);
@@ -59,7 +59,7 @@ static void machine_kexec_free_page_tables(struct kimage *image)
static int machine_kexec_alloc_page_tables(struct kimage *image)
{
image->arch.pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- PGD_ALLOCATION_ORDER);
+ pgd_allocation_order());
#ifdef CONFIG_X86_PAE
image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index a68f5a0a9f37..697fb99406e6 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -76,6 +76,19 @@ map_acpi_tables(struct x86_mapping_info *info, pgd_t *level4p)
static int map_acpi_tables(struct x86_mapping_info *info, pgd_t *level4p) { return 0; }
#endif
+static int map_mmio_serial(struct x86_mapping_info *info, pgd_t *level4p)
+{
+ unsigned long mstart, mend;
+
+ if (!kexec_debug_8250_mmio32)
+ return 0;
+
+ mstart = kexec_debug_8250_mmio32 & PAGE_MASK;
+ mend = (kexec_debug_8250_mmio32 + PAGE_SIZE + 23) & PAGE_MASK;
+ pr_info("Map PCI serial at %lx - %lx\n", mstart, mend);
+ return kernel_ident_mapping_init(info, level4p, mstart, mend);
+}
+
#ifdef CONFIG_KEXEC_FILE
const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_bzImage64_ops,
@@ -285,6 +298,10 @@ static int init_pgtable(struct kimage *image, unsigned long control_page)
if (result)
return result;
+ result = map_mmio_serial(&info, image->arch.pgd);
+ if (result)
+ return result;
+
/*
* This must be last because the intermediate page table pages it
* allocates will not be control pages and may overlap the image.
@@ -304,6 +321,24 @@ static void load_segments(void)
);
}
+static void prepare_debug_idt(unsigned long control_page, unsigned long vec_ofs)
+{
+ gate_desc idtentry = { 0 };
+ int i;
+
+ idtentry.bits.p = 1;
+ idtentry.bits.type = GATE_TRAP;
+ idtentry.segment = __KERNEL_CS;
+ idtentry.offset_low = (control_page & 0xFFFF) + vec_ofs;
+ idtentry.offset_middle = (control_page >> 16) & 0xFFFF;
+ idtentry.offset_high = control_page >> 32;
+
+ for (i = 0; i < 16; i++) {
+ kexec_debug_idt[i] = idtentry;
+ idtentry.offset_low += KEXEC_DEBUG_EXC_HANDLER_SIZE;
+ }
+}
+
int machine_kexec_prepare(struct kimage *image)
{
void *control_page = page_address(image->control_code_page);
@@ -321,6 +356,9 @@ int machine_kexec_prepare(struct kimage *image)
if (image->type == KEXEC_TYPE_DEFAULT)
kexec_pa_swap_page = page_to_pfn(image->swap_page) << PAGE_SHIFT;
+ prepare_debug_idt((unsigned long)__pa(control_page),
+ (unsigned long)kexec_debug_exc_vectors - reloc_start);
+
__memcpy(control_page, __relocate_kernel_start, reloc_end - reloc_start);
set_memory_rox((unsigned long)control_page, 1);
@@ -396,16 +434,10 @@ void __nocfi machine_kexec(struct kimage *image)
* with from a table in memory. At no other time is the
* descriptor table in memory accessed.
*
- * I take advantage of this here by force loading the
- * segments, before I zap the gdt with an invalid value.
+ * Take advantage of this here by force loading the segments,
+ * before the GDT is zapped with an invalid value.
*/
load_segments();
- /*
- * The gdt & idt are now invalid.
- * If you want to load them you must set up your own idt & gdt.
- */
- native_idt_invalidate();
- native_gdt_invalidate();
/* now call it */
image->start = relocate_kernel_ptr((unsigned long)image->head,
@@ -598,13 +630,35 @@ static void kexec_mark_crashkres(bool protect)
kexec_mark_range(control, crashk_res.end, protect);
}
+/* make the memory storing dm crypt keys in/accessible */
+static void kexec_mark_dm_crypt_keys(bool protect)
+{
+ unsigned long start_paddr, end_paddr;
+ unsigned int nr_pages;
+
+ if (kexec_crash_image->dm_crypt_keys_addr) {
+ start_paddr = kexec_crash_image->dm_crypt_keys_addr;
+ end_paddr = start_paddr + kexec_crash_image->dm_crypt_keys_sz - 1;
+ nr_pages = (PAGE_ALIGN(end_paddr) - PAGE_ALIGN_DOWN(start_paddr))/PAGE_SIZE;
+ if (protect)
+ set_memory_np((unsigned long)phys_to_virt(start_paddr), nr_pages);
+ else
+ __set_memory_prot(
+ (unsigned long)phys_to_virt(start_paddr),
+ nr_pages,
+ __pgprot(_PAGE_PRESENT | _PAGE_NX | _PAGE_RW));
+ }
+}
+
void arch_kexec_protect_crashkres(void)
{
kexec_mark_crashkres(true);
+ kexec_mark_dm_crypt_keys(true);
}
void arch_kexec_unprotect_crashkres(void)
{
+ kexec_mark_dm_crypt_keys(false);
kexec_mark_crashkres(false);
}
#endif
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 1f54eedc3015..ef6104e7cc72 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -97,7 +97,7 @@ static void get_fam10h_pci_mmconf_base(void)
/* SYS_CFG */
address = MSR_AMD64_SYSCFG;
- rdmsrl(address, val);
+ rdmsrq(address, val);
/* TOP_MEM2 is not enabled? */
if (!(val & (1<<21))) {
@@ -105,7 +105,7 @@ static void get_fam10h_pci_mmconf_base(void)
} else {
/* TOP_MEM2 */
address = MSR_K8_TOP_MEM2;
- rdmsrl(address, val);
+ rdmsrq(address, val);
tom2 = max(val & 0xffffff800000ULL, 1ULL << 32);
}
@@ -177,7 +177,7 @@ void fam10h_check_enable_mmcfg(void)
return;
address = MSR_FAM10H_MMIO_CONF_BASE;
- rdmsrl(address, val);
+ rdmsrq(address, val);
/* try to make sure that AP's setting is identical to BSP setting */
if (val & FAM10H_MMIO_CONF_ENABLE) {
@@ -212,7 +212,7 @@ void fam10h_check_enable_mmcfg(void)
(FAM10H_MMIO_CONF_BUSRANGE_MASK<<FAM10H_MMIO_CONF_BUSRANGE_SHIFT));
val |= fam10h_pci_mmconf_base | (8 << FAM10H_MMIO_CONF_BUSRANGE_SHIFT) |
FAM10H_MMIO_CONF_ENABLE;
- wrmsrl(address, val);
+ wrmsrq(address, val);
}
static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d)
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index a7998f351701..0ffbae902e2f 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -206,7 +206,7 @@ static int write_relocate_add(Elf64_Shdr *sechdrs,
write, apply);
if (!early) {
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
mutex_unlock(&text_mutex);
}
@@ -266,6 +266,8 @@ int module_finalize(const Elf_Ehdr *hdr,
ibt_endbr = s;
}
+ its_init_mod(me);
+
if (retpolines || cfi) {
void *rseg = NULL, *cseg = NULL;
unsigned int rsize = 0, csize = 0;
@@ -286,6 +288,9 @@ int module_finalize(const Elf_Ehdr *hdr,
void *rseg = (void *)retpolines->sh_addr;
apply_retpolines(rseg, rseg + retpolines->sh_size);
}
+
+ its_fini_mod(me);
+
if (returns) {
void *rseg = (void *)returns->sh_addr;
apply_returns(rseg, rseg + returns->sh_size);
@@ -326,4 +331,5 @@ int module_finalize(const Elf_Ehdr *hdr,
void module_arch_cleanup(struct module *mod)
{
alternatives_smp_module_del(mod);
+ its_free_mod(mod);
}
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 9a95d00f1423..be93ec7255bf 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -49,27 +49,20 @@ struct nmi_desc {
struct list_head head;
};
-static struct nmi_desc nmi_desc[NMI_MAX] =
-{
- {
- .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
- .head = LIST_HEAD_INIT(nmi_desc[0].head),
- },
- {
- .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
- .head = LIST_HEAD_INIT(nmi_desc[1].head),
- },
- {
- .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
- .head = LIST_HEAD_INIT(nmi_desc[2].head),
- },
- {
- .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
- .head = LIST_HEAD_INIT(nmi_desc[3].head),
- },
+#define NMI_DESC_INIT(type) { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[type].lock), \
+ .head = LIST_HEAD_INIT(nmi_desc[type].head), \
+}
+static struct nmi_desc nmi_desc[NMI_MAX] = {
+ NMI_DESC_INIT(NMI_LOCAL),
+ NMI_DESC_INIT(NMI_UNKNOWN),
+ NMI_DESC_INIT(NMI_SERR),
+ NMI_DESC_INIT(NMI_IO_CHECK),
};
+#define nmi_to_desc(type) (&nmi_desc[type])
+
struct nmi_stats {
unsigned int normal;
unsigned int unknown;
@@ -91,6 +84,9 @@ static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
static int ignore_nmis __read_mostly;
int unknown_nmi_panic;
+int panic_on_unrecovered_nmi;
+int panic_on_io_nmi;
+
/*
* Prevent NMI reason port (0x61) being accessed simultaneously, can
* only be used in NMI handler.
@@ -104,8 +100,6 @@ static int __init setup_unknown_nmi_panic(char *str)
}
__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
-#define nmi_to_desc(type) (&nmi_desc[type])
-
static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
static int __init nmi_warning_debugfs(void)
@@ -125,12 +119,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
action->max_duration = duration;
- remainder_ns = do_div(duration, (1000 * 1000));
- decimal_msecs = remainder_ns / 1000;
+ /* Convert duration from nsec to msec */
+ remainder_ns = do_div(duration, NSEC_PER_MSEC);
+ decimal_msecs = remainder_ns / NSEC_PER_USEC;
- printk_ratelimited(KERN_INFO
- "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
- action->handler, duration, decimal_msecs);
+ pr_info_ratelimited("INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
+ action->handler, duration, decimal_msecs);
}
static int nmi_handle(unsigned int type, struct pt_regs *regs)
@@ -333,10 +327,9 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
int handled;
/*
- * Use 'false' as back-to-back NMIs are dealt with one level up.
- * Of course this makes having multiple 'unknown' handlers useless
- * as only the first one is ever run (unless it can actually determine
- * if it caused the NMI)
+ * As a last resort, let the "unknown" handlers make a
+ * best-effort attempt to figure out if they can claim
+ * responsibility for this Unknown NMI.
*/
handled = nmi_handle(NMI_UNKNOWN, regs);
if (handled) {
@@ -366,17 +359,18 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
bool b2b = false;
/*
- * CPU-specific NMI must be processed before non-CPU-specific
- * NMI, otherwise we may lose it, because the CPU-specific
- * NMI can not be detected/processed on other CPUs.
- */
-
- /*
- * Back-to-back NMIs are interesting because they can either
- * be two NMI or more than two NMIs (any thing over two is dropped
- * due to NMI being edge-triggered). If this is the second half
- * of the back-to-back NMI, assume we dropped things and process
- * more handlers. Otherwise reset the 'swallow' NMI behaviour
+ * Back-to-back NMIs are detected by comparing the RIP of the
+ * current NMI with that of the previous NMI. If it is the same,
+ * it is assumed that the CPU did not have a chance to jump back
+ * into a non-NMI context and execute code in between the two
+ * NMIs.
+ *
+ * They are interesting because even if there are more than two,
+ * only a maximum of two can be detected (anything over two is
+ * dropped due to NMI being edge-triggered). If this is the
+ * second half of the back-to-back NMI, assume we dropped things
+ * and process more handlers. Otherwise, reset the 'swallow' NMI
+ * behavior.
*/
if (regs->ip == __this_cpu_read(last_nmi_rip))
b2b = true;
@@ -390,6 +384,11 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
if (microcode_nmi_handler_enabled() && microcode_nmi_handler())
goto out;
+ /*
+ * CPU-specific NMI must be processed before non-CPU-specific
+ * NMI, otherwise we may lose it, because the CPU-specific
+ * NMI can not be detected/processed on other CPUs.
+ */
handled = nmi_handle(NMI_LOCAL, regs);
__this_cpu_add(nmi_stats.normal, handled);
if (handled) {
@@ -426,13 +425,14 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
pci_serr_error(reason, regs);
else if (reason & NMI_REASON_IOCHK)
io_check_error(reason, regs);
-#ifdef CONFIG_X86_32
+
/*
* Reassert NMI in case it became active
* meanwhile as it's edge-triggered:
*/
- reassert_nmi();
-#endif
+ if (IS_ENABLED(CONFIG_X86_32))
+ reassert_nmi();
+
__this_cpu_add(nmi_stats.external, 1);
raw_spin_unlock(&nmi_reason_lock);
goto out;
@@ -751,4 +751,3 @@ void local_touch_nmi(void)
{
__this_cpu_write(last_nmi_rip, 0);
}
-EXPORT_SYMBOL_GPL(local_touch_nmi);
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
index e93a8545c74d..a010e9d062bf 100644
--- a/arch/x86/kernel/nmi_selftest.c
+++ b/arch/x86/kernel/nmi_selftest.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * arch/x86/kernel/nmi-selftest.c
- *
* Testsuite for NMI: IPIs
*
* Started by Don Zickus:
@@ -30,7 +28,6 @@ static DECLARE_BITMAP(nmi_ipi_mask, NR_CPUS) __initdata;
static int __initdata testcase_total;
static int __initdata testcase_successes;
-static int __initdata expected_testcase_failures;
static int __initdata unexpected_testcase_failures;
static int __initdata unexpected_testcase_unknowns;
@@ -120,26 +117,22 @@ static void __init dotest(void (*testcase_fn)(void), int expected)
unexpected_testcase_failures++;
if (nmi_fail == FAILURE)
- printk(KERN_CONT "FAILED |");
+ pr_cont("FAILED |");
else if (nmi_fail == TIMEOUT)
- printk(KERN_CONT "TIMEOUT|");
+ pr_cont("TIMEOUT|");
else
- printk(KERN_CONT "ERROR |");
+ pr_cont("ERROR |");
dump_stack();
} else {
testcase_successes++;
- printk(KERN_CONT " ok |");
+ pr_cont(" ok |");
}
- testcase_total++;
+ pr_cont("\n");
+ testcase_total++;
reset_nmi();
}
-static inline void __init print_testname(const char *testname)
-{
- printk("%12s:", testname);
-}
-
void __init nmi_selftest(void)
{
init_nmi_testsuite();
@@ -147,38 +140,25 @@ void __init nmi_selftest(void)
/*
* Run the testsuite:
*/
- printk("----------------\n");
- printk("| NMI testsuite:\n");
- printk("--------------------\n");
+ pr_info("----------------\n");
+ pr_info("| NMI testsuite:\n");
+ pr_info("--------------------\n");
- print_testname("remote IPI");
+ pr_info("%12s:", "remote IPI");
dotest(remote_ipi, SUCCESS);
- printk(KERN_CONT "\n");
- print_testname("local IPI");
+
+ pr_info("%12s:", "local IPI");
dotest(local_ipi, SUCCESS);
- printk(KERN_CONT "\n");
cleanup_nmi_testsuite();
+ pr_info("--------------------\n");
if (unexpected_testcase_failures) {
- printk("--------------------\n");
- printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n",
+ pr_info("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n",
unexpected_testcase_failures, testcase_total);
- printk("-----------------------------------------------------------------\n");
- } else if (expected_testcase_failures && testcase_successes) {
- printk("--------------------\n");
- printk("%3d out of %3d testcases failed, as expected. |\n",
- expected_testcase_failures, testcase_total);
- printk("----------------------------------------------------\n");
- } else if (expected_testcase_failures && !testcase_successes) {
- printk("--------------------\n");
- printk("All %3d testcases failed, as expected. |\n",
- expected_testcase_failures);
- printk("----------------------------------------\n");
} else {
- printk("--------------------\n");
- printk("Good, all %3d testcases passed! |\n",
+ pr_info("Good, all %3d testcases passed! |\n",
testcase_successes);
- printk("---------------------------------\n");
}
+ pr_info("-----------------------------------------------------------------\n");
}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 1ccd05d8999f..ab3e172dcc69 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -33,6 +33,7 @@
#include <asm/tlb.h>
#include <asm/io_bitmap.h>
#include <asm/gsseg.h>
+#include <asm/msr.h>
/* stub always returning 0. */
DEFINE_ASM_FUNC(paravirt_ret0, "xor %eax,%eax", .entry.text);
@@ -210,12 +211,10 @@ struct paravirt_patch_template pv_ops = {
.mmu.set_p4d = native_set_p4d,
-#if CONFIG_PGTABLE_LEVELS >= 5
.mmu.p4d_val = PTE_IDENT,
.mmu.make_p4d = PTE_IDENT,
.mmu.set_pgd = native_set_pgd,
-#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
.mmu.pte_val = PTE_IDENT,
.mmu.pgd_val = PTE_IDENT,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 962c3ce39323..c1d2dac72b9c 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -30,7 +30,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/entry-common.h>
#include <asm/cpu.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/apic.h>
#include <linux/uaccess.h>
#include <asm/mwait.h>
@@ -52,6 +52,7 @@
#include <asm/unwind.h>
#include <asm/tdx.h>
#include <asm/mmu_context.h>
+#include <asm/msr.h>
#include <asm/shstk.h>
#include "process.h"
@@ -93,17 +94,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- /* init_task is not dynamically sized (incomplete FPU state) */
- if (unlikely(src == &init_task))
- memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(init_task), 0);
- else
- memcpy(dst, src, arch_task_struct_size);
+ /* fpu_clone() will initialize the "dst_fpu" memory */
+ memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(*dst), 0);
#ifdef CONFIG_VM86
dst->thread.vm86 = NULL;
#endif
- /* Drop the copied pointer to current's fpstate */
- dst->thread.fpu.fpstate = NULL;
return 0;
}
@@ -111,8 +107,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
#ifdef CONFIG_X86_64
void arch_release_task_struct(struct task_struct *tsk)
{
- if (fpu_state_size_dynamic())
- fpstate_free(&tsk->thread.fpu);
+ if (fpu_state_size_dynamic() && !(tsk->flags & (PF_KTHREAD | PF_USER_WORKER)))
+ fpstate_free(x86_task_fpu(tsk));
}
#endif
@@ -122,7 +118,6 @@ void arch_release_task_struct(struct task_struct *tsk)
void exit_thread(struct task_struct *tsk)
{
struct thread_struct *t = &tsk->thread;
- struct fpu *fpu = &t->fpu;
if (test_thread_flag(TIF_IO_BITMAP))
io_bitmap_exit(tsk);
@@ -130,7 +125,7 @@ void exit_thread(struct task_struct *tsk)
free_vm86(t);
shstk_free(tsk);
- fpu__drop(fpu);
+ fpu__drop(tsk);
}
static int set_new_tls(struct task_struct *p, unsigned long tls)
@@ -344,7 +339,7 @@ static void set_cpuid_faulting(bool on)
msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
this_cpu_write(msr_misc_features_shadow, msrval);
- wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
+ wrmsrq(MSR_MISC_FEATURES_ENABLES, msrval);
}
static void disable_cpuid(void)
@@ -561,7 +556,7 @@ static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
if (!static_cpu_has(X86_FEATURE_ZEN)) {
msr |= ssbd_tif_to_amd_ls_cfg(tifn);
- wrmsrl(MSR_AMD64_LS_CFG, msr);
+ wrmsrq(MSR_AMD64_LS_CFG, msr);
return;
}
@@ -578,7 +573,7 @@ static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
raw_spin_lock(&st->shared_state->lock);
/* First sibling enables SSBD: */
if (!st->shared_state->disable_state)
- wrmsrl(MSR_AMD64_LS_CFG, msr);
+ wrmsrq(MSR_AMD64_LS_CFG, msr);
st->shared_state->disable_state++;
raw_spin_unlock(&st->shared_state->lock);
} else {
@@ -588,7 +583,7 @@ static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
raw_spin_lock(&st->shared_state->lock);
st->shared_state->disable_state--;
if (!st->shared_state->disable_state)
- wrmsrl(MSR_AMD64_LS_CFG, msr);
+ wrmsrq(MSR_AMD64_LS_CFG, msr);
raw_spin_unlock(&st->shared_state->lock);
}
}
@@ -597,7 +592,7 @@ static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
{
u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
- wrmsrl(MSR_AMD64_LS_CFG, msr);
+ wrmsrq(MSR_AMD64_LS_CFG, msr);
}
#endif
@@ -607,7 +602,7 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
* SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
* so ssbd_tif_to_spec_ctrl() just works.
*/
- wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
+ wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
}
/*
@@ -710,11 +705,11 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
arch_has_block_step()) {
unsigned long debugctl, msk;
- rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
debugctl &= ~DEBUGCTLMSR_BTF;
msk = tifn & _TIF_BLOCKSTEP;
debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
}
if ((tifp ^ tifn) & _TIF_NOTSC)
@@ -907,13 +902,10 @@ static __init bool prefer_mwait_c1_over_halt(void)
static __cpuidle void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
- if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
- mb(); /* quirk */
- clflush((void *)&current_thread_info()->flags);
- mb(); /* quirk */
- }
+ const void *addr = &current_thread_info()->flags;
- __monitor((void *)&current_thread_info()->flags, 0, 0);
+ alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
+ __monitor(addr, 0, 0);
if (!need_resched()) {
__sti_mwait(0, 0);
raw_local_irq_disable();
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 4636ef359973..a10e180cbf23 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -160,8 +160,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
- if (!test_tsk_thread_flag(prev_p, TIF_NEED_FPU_LOAD))
- switch_fpu_prepare(prev_p, cpu);
+ switch_fpu(prev_p, cpu);
/*
* Save away %gs. No need to save %fs, as it was saved on the
@@ -208,10 +207,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
raw_cpu_write(current_task, next_p);
- switch_fpu_finish(next_p);
-
/* Load the Intel cache allocation PQR MSR. */
- resctrl_sched_in(next_p);
+ resctrl_arch_sched_in(next_p);
return prev_p;
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 7196ca7048be..8d6cf25127aa 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -57,6 +57,7 @@
#include <asm/unistd.h>
#include <asm/fsgsbase.h>
#include <asm/fred.h>
+#include <asm/msr.h>
#ifdef CONFIG_IA32_EMULATION
/* Not included via unistd.h */
#include <asm/unistd_32_ia32.h>
@@ -95,8 +96,8 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
return;
if (mode == SHOW_REGS_USER) {
- rdmsrl(MSR_FS_BASE, fs);
- rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+ rdmsrq(MSR_FS_BASE, fs);
+ rdmsrq(MSR_KERNEL_GS_BASE, shadowgs);
printk("%sFS: %016lx GS: %016lx\n",
log_lvl, fs, shadowgs);
return;
@@ -107,9 +108,9 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
asm("movl %%fs,%0" : "=r" (fsindex));
asm("movl %%gs,%0" : "=r" (gsindex));
- rdmsrl(MSR_FS_BASE, fs);
- rdmsrl(MSR_GS_BASE, gs);
- rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+ rdmsrq(MSR_FS_BASE, fs);
+ rdmsrq(MSR_GS_BASE, gs);
+ rdmsrq(MSR_KERNEL_GS_BASE, shadowgs);
cr0 = read_cr0();
cr2 = read_cr2();
@@ -195,7 +196,7 @@ static noinstr unsigned long __rdgsbase_inactive(void)
native_swapgs();
} else {
instrumentation_begin();
- rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
+ rdmsrq(MSR_KERNEL_GS_BASE, gsbase);
instrumentation_end();
}
@@ -221,7 +222,7 @@ static noinstr void __wrgsbase_inactive(unsigned long gsbase)
native_swapgs();
} else {
instrumentation_begin();
- wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
+ wrmsrq(MSR_KERNEL_GS_BASE, gsbase);
instrumentation_end();
}
}
@@ -353,7 +354,7 @@ static __always_inline void load_seg_legacy(unsigned short prev_index,
} else {
if (prev_index != next_index)
loadseg(which, next_index);
- wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
+ wrmsrq(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
next_base);
}
} else {
@@ -463,7 +464,7 @@ unsigned long x86_gsbase_read_cpu_inactive(void)
gsbase = __rdgsbase_inactive();
local_irq_restore(flags);
} else {
- rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
+ rdmsrq(MSR_KERNEL_GS_BASE, gsbase);
}
return gsbase;
@@ -478,7 +479,7 @@ void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
__wrgsbase_inactive(gsbase);
local_irq_restore(flags);
} else {
- wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
+ wrmsrq(MSR_KERNEL_GS_BASE, gsbase);
}
}
@@ -616,8 +617,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
this_cpu_read(hardirq_stack_inuse));
- if (!test_tsk_thread_flag(prev_p, TIF_NEED_FPU_LOAD))
- switch_fpu_prepare(prev_p, cpu);
+ switch_fpu(prev_p, cpu);
/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
@@ -671,8 +671,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
raw_cpu_write(current_task, next_p);
raw_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
- switch_fpu_finish(next_p);
-
/* Reload sp0. */
update_task_stack(next_p);
@@ -707,7 +705,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
}
/* Load the Intel cache allocation PQR MSR. */
- resctrl_sched_in(next_p);
+ resctrl_arch_sched_in(next_p);
return prev_p;
}
diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
index b7c0f142d026..4679ac0a03eb 100644
--- a/arch/x86/kernel/reboot_fixups_32.c
+++ b/arch/x86/kernel/reboot_fixups_32.c
@@ -27,7 +27,7 @@ static void cs5530a_warm_reset(struct pci_dev *dev)
static void cs5536_warm_reset(struct pci_dev *dev)
{
/* writing 1 to the LSB of this MSR causes a hard reset */
- wrmsrl(MSR_DIVIL_SOFT_RESET, 1ULL);
+ wrmsrq(MSR_DIVIL_SOFT_RESET, 1ULL);
udelay(50); /* shouldn't get here but be safe and spin a while */
}
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index c7c4b1917336..57276f134d12 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -263,17 +263,17 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
movl %edx, %edi
movl $1024, %ecx
- rep ; movsl
+ rep movsl
movl %ebp, %edi
movl %eax, %esi
movl $1024, %ecx
- rep ; movsl
+ rep movsl
movl %eax, %edi
movl %edx, %esi
movl $1024, %ecx
- rep ; movsl
+ rep movsl
lea PAGE_SIZE(%ebp), %esi
jmp 0b
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index ac058971a382..ea604f4d0b52 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -39,6 +39,8 @@ SYM_DATA(kexec_va_control_page, .quad 0)
SYM_DATA(kexec_pa_table_page, .quad 0)
SYM_DATA(kexec_pa_swap_page, .quad 0)
SYM_DATA_LOCAL(pa_backup_pages_map, .quad 0)
+SYM_DATA(kexec_debug_8250_mmio32, .quad 0)
+SYM_DATA(kexec_debug_8250_port, .word 0)
.balign 16
SYM_DATA_START_LOCAL(kexec_debug_gdt)
@@ -50,6 +52,11 @@ SYM_DATA_START_LOCAL(kexec_debug_gdt)
.quad 0x00cf92000000ffff /* __KERNEL_DS */
SYM_DATA_END_LABEL(kexec_debug_gdt, SYM_L_LOCAL, kexec_debug_gdt_end)
+ .balign 8
+SYM_DATA_START(kexec_debug_idt)
+ .skip 0x100, 0x00
+SYM_DATA_END(kexec_debug_idt)
+
.section .text..relocate_kernel,"ax";
.code64
SYM_CODE_START_NOALIGN(relocate_kernel)
@@ -72,8 +79,13 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
pushq %r15
pushf
- /* zero out flags, and disable interrupts */
- pushq $0
+ /* Invalidate GDT/IDT, zero out flags */
+ pushq $0
+ pushq $0
+
+ lidt (%rsp)
+ lgdt (%rsp)
+ addq $8, %rsp
popfq
/* Switch to the identity mapped page tables */
@@ -139,6 +151,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
movq %ds, %rax
movq %rax, %ds
+ /* Now an IDTR on the stack to load the IDT the kernel created */
+ leaq kexec_debug_idt(%rip), %rsi
+ pushq %rsi
+ pushw $0xff
+ lidt (%rsp)
+ addq $10, %rsp
+
+ //int3
+
/*
* Clear X86_CR4_CET (if it was set) such that we can clear CR0_WP
* below.
@@ -342,20 +363,20 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
/* copy source page to swap page */
movq kexec_pa_swap_page(%rip), %rdi
movl $512, %ecx
- rep ; movsq
+ rep movsq
/* copy destination page to source page */
movq %rax, %rdi
movq %rdx, %rsi
movl $512, %ecx
- rep ; movsq
+ rep movsq
/* copy swap page to destination page */
movq %rdx, %rdi
movq kexec_pa_swap_page(%rip), %rsi
.Lnoswap:
movl $512, %ecx
- rep ; movsq
+ rep movsq
lea PAGE_SIZE(%rax), %rsi
jmp .Lloop
@@ -364,3 +385,222 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
ret
int3
SYM_CODE_END(swap_pages)
+
+/*
+ * Generic 'print character' routine
+ * - %al: Character to be printed (may clobber %rax)
+ * - %rdx: MMIO address or port.
+ */
+#define XMTRDY 0x20
+
+#define TXR 0 /* Transmit register (WRITE) */
+#define LSR 5 /* Line Status */
+
+SYM_CODE_START_LOCAL_NOALIGN(pr_char_8250)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+ addw $LSR, %dx
+ xchg %al, %ah
+.Lxmtrdy_loop:
+ inb %dx, %al
+ testb $XMTRDY, %al
+ jnz .Lready
+ pause
+ jmp .Lxmtrdy_loop
+
+.Lready:
+ subw $LSR, %dx
+ xchg %al, %ah
+ outb %al, %dx
+pr_char_null:
+ ANNOTATE_NOENDBR
+
+ ANNOTATE_UNRET_SAFE
+ ret
+SYM_CODE_END(pr_char_8250)
+
+SYM_CODE_START_LOCAL_NOALIGN(pr_char_8250_mmio32)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+.Lxmtrdy_loop_mmio:
+ movb (LSR*4)(%rdx), %ah
+ testb $XMTRDY, %ah
+ jnz .Lready_mmio
+ pause
+ jmp .Lxmtrdy_loop_mmio
+
+.Lready_mmio:
+ movb %al, (%rdx)
+ ANNOTATE_UNRET_SAFE
+ ret
+SYM_CODE_END(pr_char_8250_mmio32)
+
+/*
+ * Load pr_char function pointer into %rsi and load %rdx with whatever
+ * that function wants to see there (typically port/MMIO address).
+ */
+.macro pr_setup
+ leaq pr_char_8250(%rip), %rsi
+ movw kexec_debug_8250_port(%rip), %dx
+ testw %dx, %dx
+ jnz 1f
+
+ leaq pr_char_8250_mmio32(%rip), %rsi
+ movq kexec_debug_8250_mmio32(%rip), %rdx
+ testq %rdx, %rdx
+ jnz 1f
+
+ leaq pr_char_null(%rip), %rsi
+1:
+.endm
+
+/* Print the nybble in %bl, clobber %rax */
+SYM_CODE_START_LOCAL_NOALIGN(pr_nybble)
+ UNWIND_HINT_FUNC
+ movb %bl, %al
+ nop
+ andb $0x0f, %al
+ addb $0x30, %al
+ cmpb $0x3a, %al
+ jb 1f
+ addb $('a' - '0' - 10), %al
+ ANNOTATE_RETPOLINE_SAFE
+1: jmp *%rsi
+SYM_CODE_END(pr_nybble)
+
+SYM_CODE_START_LOCAL_NOALIGN(pr_qword)
+ UNWIND_HINT_FUNC
+ movq $16, %rcx
+1: rolq $4, %rbx
+ call pr_nybble
+ loop 1b
+ movb $'\n', %al
+ ANNOTATE_RETPOLINE_SAFE
+ jmp *%rsi
+SYM_CODE_END(pr_qword)
+
+.macro print_reg a, b, c, d, r
+ movb $\a, %al
+ ANNOTATE_RETPOLINE_SAFE
+ call *%rsi
+ movb $\b, %al
+ ANNOTATE_RETPOLINE_SAFE
+ call *%rsi
+ movb $\c, %al
+ ANNOTATE_RETPOLINE_SAFE
+ call *%rsi
+ movb $\d, %al
+ ANNOTATE_RETPOLINE_SAFE
+ call *%rsi
+ movq \r, %rbx
+ call pr_qword
+.endm
+
+SYM_CODE_START_NOALIGN(kexec_debug_exc_vectors)
+ /* Each of these is 6 bytes. */
+.macro vec_err exc
+ UNWIND_HINT_ENTRY
+ . = kexec_debug_exc_vectors + (\exc * KEXEC_DEBUG_EXC_HANDLER_SIZE)
+ nop
+ nop
+ pushq $\exc
+ jmp exc_handler
+.endm
+
+.macro vec_noerr exc
+ UNWIND_HINT_ENTRY
+ . = kexec_debug_exc_vectors + (\exc * KEXEC_DEBUG_EXC_HANDLER_SIZE)
+ pushq $0
+ pushq $\exc
+ jmp exc_handler
+.endm
+
+ ANNOTATE_NOENDBR
+ vec_noerr 0 // #DE
+ vec_noerr 1 // #DB
+ vec_noerr 2 // #NMI
+ vec_noerr 3 // #BP
+ vec_noerr 4 // #OF
+ vec_noerr 5 // #BR
+ vec_noerr 6 // #UD
+ vec_noerr 7 // #NM
+ vec_err 8 // #DF
+ vec_noerr 9
+ vec_err 10 // #TS
+ vec_err 11 // #NP
+ vec_err 12 // #SS
+ vec_err 13 // #GP
+ vec_err 14 // #PF
+ vec_noerr 15
+SYM_CODE_END(kexec_debug_exc_vectors)
+
+SYM_CODE_START_LOCAL_NOALIGN(exc_handler)
+ /* No need for RET mitigations during kexec */
+ VALIDATE_UNRET_END
+
+ pushq %rax
+ pushq %rbx
+ pushq %rcx
+ pushq %rdx
+ pushq %rsi
+
+ /* Stack frame */
+#define EXC_SS 0x58 /* Architectural... */
+#define EXC_RSP 0x50
+#define EXC_EFLAGS 0x48
+#define EXC_CS 0x40
+#define EXC_RIP 0x38
+#define EXC_ERRORCODE 0x30 /* Either architectural or zero pushed by handler */
+#define EXC_EXCEPTION 0x28 /* Pushed by handler entry point */
+#define EXC_RAX 0x20 /* Pushed just above in exc_handler */
+#define EXC_RBX 0x18
+#define EXC_RCX 0x10
+#define EXC_RDX 0x08
+#define EXC_RSI 0x00
+
+ /* Set up %rdx/%rsi for debug output */
+ pr_setup
+
+ /* rip and exception info */
+ print_reg 'E', 'x', 'c', ':', EXC_EXCEPTION(%rsp)
+ print_reg 'E', 'r', 'r', ':', EXC_ERRORCODE(%rsp)
+ print_reg 'r', 'i', 'p', ':', EXC_RIP(%rsp)
+ print_reg 'r', 's', 'p', ':', EXC_RSP(%rsp)
+
+ /* We spilled these to the stack */
+ print_reg 'r', 'a', 'x', ':', EXC_RAX(%rsp)
+ print_reg 'r', 'b', 'x', ':', EXC_RBX(%rsp)
+ print_reg 'r', 'c', 'x', ':', EXC_RCX(%rsp)
+ print_reg 'r', 'd', 'x', ':', EXC_RDX(%rsp)
+ print_reg 'r', 's', 'i', ':', EXC_RSI(%rsp)
+
+ /* Other registers untouched */
+ print_reg 'r', 'd', 'i', ':', %rdi
+ print_reg 'r', '8', ' ', ':', %r8
+ print_reg 'r', '9', ' ', ':', %r9
+ print_reg 'r', '1', '0', ':', %r10
+ print_reg 'r', '1', '1', ':', %r11
+ print_reg 'r', '1', '2', ':', %r12
+ print_reg 'r', '1', '3', ':', %r13
+ print_reg 'r', '1', '4', ':', %r14
+ print_reg 'r', '1', '5', ':', %r15
+ print_reg 'c', 'r', '2', ':', %cr2
+
+ /* Only return from INT3 */
+ cmpq $3, EXC_EXCEPTION(%rsp)
+ jne .Ldie
+
+ popq %rsi
+ popq %rdx
+ popq %rcx
+ popq %rbx
+ popq %rax
+
+ addq $16, %rsp
+ iretq
+
+.Ldie:
+ hlt
+ jmp .Ldie
+
+SYM_CODE_END(exc_handler)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9d2a13b37833..fb27be697128 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -11,6 +11,7 @@
#include <linux/crash_dump.h>
#include <linux/dma-map-ops.h>
#include <linux/efi.h>
+#include <linux/hugetlb.h>
#include <linux/ima.h>
#include <linux/init_ohci1394_dma.h>
#include <linux/initrd.h>
@@ -18,21 +19,19 @@
#include <linux/memblock.h>
#include <linux/panic_notifier.h>
#include <linux/pci.h>
+#include <linux/random.h>
#include <linux/root_dev.h>
-#include <linux/hugetlb.h>
-#include <linux/tboot.h>
-#include <linux/usb/xhci-dbgp.h>
#include <linux/static_call.h>
#include <linux/swiotlb.h>
-#include <linux/random.h>
+#include <linux/tboot.h>
+#include <linux/usb/xhci-dbgp.h>
+#include <linux/vmalloc.h>
#include <uapi/linux/mount.h>
#include <xen/xen.h>
#include <asm/apic.h>
-#include <asm/efi.h>
-#include <asm/numa.h>
#include <asm/bios_ebda.h>
#include <asm/bugs.h>
#include <asm/cacheinfo.h>
@@ -47,18 +46,16 @@
#include <asm/mce.h>
#include <asm/memtype.h>
#include <asm/mtrr.h>
-#include <asm/realmode.h>
+#include <asm/nmi.h>
+#include <asm/numa.h>
#include <asm/olpc_ofw.h>
#include <asm/pci-direct.h>
#include <asm/prom.h>
#include <asm/proto.h>
+#include <asm/realmode.h>
#include <asm/thermal.h>
#include <asm/unwind.h>
#include <asm/vsyscall.h>
-#include <linux/vmalloc.h>
-#if defined(CONFIG_X86_LOCAL_APIC)
-#include <asm/nmi.h>
-#endif
/*
* max_low_pfn_mapped: highest directly mapped pfn < 4 GB
@@ -134,6 +131,7 @@ struct ist_info ist_info;
struct cpuinfo_x86 boot_cpu_data __read_mostly;
EXPORT_SYMBOL(boot_cpu_data);
+SYM_PIC_ALIAS(boot_cpu_data);
#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
__visible unsigned long mmu_cr4_features __ro_after_init;
@@ -151,6 +149,13 @@ int bootloader_type, bootloader_version;
static const struct ctl_table x86_sysctl_table[] = {
{
+ .procname = "unknown_nmi_panic",
+ .data = &unknown_nmi_panic,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "panic_on_unrecovered_nmi",
.data = &panic_on_unrecovered_nmi,
.maxlen = sizeof(int),
@@ -185,15 +190,6 @@ static const struct ctl_table x86_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
-#if defined(CONFIG_X86_LOCAL_APIC)
- {
- .procname = "unknown_nmi_panic",
- .data = &unknown_nmi_panic,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-#endif
#if defined(CONFIG_ACPI_SLEEP)
{
.procname = "acpi_video_flags",
@@ -286,8 +282,8 @@ static void __init cleanup_highmap(void)
static void __init reserve_brk(void)
{
if (_brk_end > _brk_start)
- memblock_reserve(__pa_symbol(_brk_start),
- _brk_end - _brk_start);
+ memblock_reserve_kern(__pa_symbol(_brk_start),
+ _brk_end - _brk_start);
/* Mark brk area as locked down and no longer taking any
new allocations */
@@ -360,7 +356,7 @@ static void __init early_reserve_initrd(void)
!ramdisk_image || !ramdisk_size)
return; /* No initrd provided by bootloader */
- memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
+ memblock_reserve_kern(ramdisk_image, ramdisk_end - ramdisk_image);
}
static void __init reserve_initrd(void)
@@ -413,7 +409,7 @@ static void __init add_early_ima_buffer(u64 phys_addr)
}
if (data->size) {
- memblock_reserve(data->addr, data->size);
+ memblock_reserve_kern(data->addr, data->size);
ima_kexec_buffer_phys = data->addr;
ima_kexec_buffer_size = data->size;
}
@@ -451,6 +447,29 @@ int __init ima_get_kexec_buffer(void **addr, size_t *size)
}
#endif
+static void __init add_kho(u64 phys_addr, u32 data_len)
+{
+ struct kho_data *kho;
+ u64 addr = phys_addr + sizeof(struct setup_data);
+ u64 size = data_len - sizeof(struct setup_data);
+
+ if (!IS_ENABLED(CONFIG_KEXEC_HANDOVER)) {
+ pr_warn("Passed KHO data, but CONFIG_KEXEC_HANDOVER not set. Ignoring.\n");
+ return;
+ }
+
+ kho = early_memremap(addr, size);
+ if (!kho) {
+ pr_warn("setup: failed to memremap kho data (0x%llx, 0x%llx)\n",
+ addr, size);
+ return;
+ }
+
+ kho_populate(kho->fdt_addr, kho->fdt_size, kho->scratch_addr, kho->scratch_size);
+
+ early_memunmap(kho, size);
+}
+
static void __init parse_setup_data(void)
{
struct setup_data *data;
@@ -479,6 +498,9 @@ static void __init parse_setup_data(void)
case SETUP_IMA:
add_early_ima_buffer(pa_data);
break;
+ case SETUP_KEXEC_KHO:
+ add_kho(pa_data, data_len);
+ break;
case SETUP_RNG_SEED:
data = early_memremap(pa_data, data_len);
add_bootloader_randomness(data->data, data->len);
@@ -553,7 +575,7 @@ static void __init memblock_x86_reserve_range_setup_data(void)
len = sizeof(*data);
pa_next = data->next;
- memblock_reserve(pa_data, sizeof(*data) + data->len);
+ memblock_reserve_kern(pa_data, sizeof(*data) + data->len);
if (data->type == SETUP_INDIRECT) {
len += data->len;
@@ -567,7 +589,7 @@ static void __init memblock_x86_reserve_range_setup_data(void)
indirect = (struct setup_indirect *)data->data;
if (indirect->type != SETUP_INDIRECT)
- memblock_reserve(indirect->addr, indirect->len);
+ memblock_reserve_kern(indirect->addr, indirect->len);
}
pa_data = pa_next;
@@ -770,8 +792,8 @@ static void __init early_reserve_memory(void)
* __end_of_kernel_reserve symbol must be explicitly reserved with a
* separate memblock_reserve() or they will be discarded.
*/
- memblock_reserve(__pa_symbol(_text),
- (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
+ memblock_reserve_kern(__pa_symbol(_text),
+ (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
/*
* The first 4Kb of memory is a BIOS owned area, but generally it is
diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
index 059685612362..2ddf23387c7e 100644
--- a/arch/x86/kernel/shstk.c
+++ b/arch/x86/kernel/shstk.c
@@ -173,8 +173,8 @@ static int shstk_setup(void)
return PTR_ERR((void *)addr);
fpregs_lock_and_load();
- wrmsrl(MSR_IA32_PL3_SSP, addr + size);
- wrmsrl(MSR_IA32_U_CET, CET_SHSTK_EN);
+ wrmsrq(MSR_IA32_PL3_SSP, addr + size);
+ wrmsrq(MSR_IA32_U_CET, CET_SHSTK_EN);
fpregs_unlock();
shstk->base = addr;
@@ -239,7 +239,7 @@ static unsigned long get_user_shstk_addr(void)
fpregs_lock_and_load();
- rdmsrl(MSR_IA32_PL3_SSP, ssp);
+ rdmsrq(MSR_IA32_PL3_SSP, ssp);
fpregs_unlock();
@@ -372,7 +372,7 @@ int setup_signal_shadow_stack(struct ksignal *ksig)
return -EFAULT;
fpregs_lock_and_load();
- wrmsrl(MSR_IA32_PL3_SSP, ssp);
+ wrmsrq(MSR_IA32_PL3_SSP, ssp);
fpregs_unlock();
return 0;
@@ -396,7 +396,7 @@ int restore_signal_shadow_stack(void)
return err;
fpregs_lock_and_load();
- wrmsrl(MSR_IA32_PL3_SSP, ssp);
+ wrmsrq(MSR_IA32_PL3_SSP, ssp);
fpregs_unlock();
return 0;
@@ -460,7 +460,7 @@ static int wrss_control(bool enable)
return 0;
fpregs_lock_and_load();
- rdmsrl(MSR_IA32_U_CET, msrval);
+ rdmsrq(MSR_IA32_U_CET, msrval);
if (enable) {
features_set(ARCH_SHSTK_WRSS);
@@ -473,7 +473,7 @@ static int wrss_control(bool enable)
msrval &= ~CET_WRSS_EN;
}
- wrmsrl(MSR_IA32_U_CET, msrval);
+ wrmsrq(MSR_IA32_U_CET, msrval);
unlock:
fpregs_unlock();
@@ -492,8 +492,8 @@ static int shstk_disable(void)
fpregs_lock_and_load();
/* Disable WRSS too when disabling shadow stack */
- wrmsrl(MSR_IA32_U_CET, 0);
- wrmsrl(MSR_IA32_PL3_SSP, 0);
+ wrmsrq(MSR_IA32_U_CET, 0);
+ wrmsrq(MSR_IA32_PL3_SSP, 0);
fpregs_unlock();
shstk_free(current);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 5f441039b572..2404233336ab 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -255,7 +255,7 @@ static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
bool stepping, failed;
- struct fpu *fpu = &current->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(current);
if (v8086_mode(regs))
save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
@@ -423,14 +423,14 @@ bool sigaltstack_size_valid(size_t ss_size)
if (!fpu_state_size_dynamic() && !strict_sigaltstack_size)
return true;
- fsize += current->group_leader->thread.fpu.perm.__user_state_size;
+ fsize += x86_task_fpu(current->group_leader)->perm.__user_state_size;
if (likely(ss_size > fsize))
return true;
if (strict_sigaltstack_size)
return ss_size > fsize;
- mask = current->group_leader->thread.fpu.perm.__state_perm;
+ mask = x86_task_fpu(current->group_leader)->perm.__state_perm;
if (mask & XFEATURE_MASK_USER_DYNAMIC)
return ss_size > fsize;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index d6cf1e23c2a3..fc78c2325fd2 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -64,7 +64,7 @@
#include <asm/acpi.h>
#include <asm/cacheinfo.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/desc.h>
#include <asm/nmi.h>
#include <asm/irq.h>
@@ -695,7 +695,7 @@ static void send_init_sequence(u32 phys_apicid)
/*
* Wake up AP by INIT, INIT, STARTUP sequence.
*/
-static int wakeup_secondary_cpu_via_init(u32 phys_apicid, unsigned long start_eip)
+static int wakeup_secondary_cpu_via_init(u32 phys_apicid, unsigned long start_eip, unsigned int cpu)
{
unsigned long send_status = 0, accept_status = 0;
int num_starts, j, maxlvt;
@@ -842,7 +842,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
* Returns zero if startup was successfully sent, else error code from
* ->wakeup_secondary_cpu.
*/
-static int do_boot_cpu(u32 apicid, int cpu, struct task_struct *idle)
+static int do_boot_cpu(u32 apicid, unsigned int cpu, struct task_struct *idle)
{
unsigned long start_ip = real_mode_header->trampoline_start;
int ret;
@@ -896,11 +896,11 @@ static int do_boot_cpu(u32 apicid, int cpu, struct task_struct *idle)
* - Use an INIT boot APIC message
*/
if (apic->wakeup_secondary_cpu_64)
- ret = apic->wakeup_secondary_cpu_64(apicid, start_ip);
+ ret = apic->wakeup_secondary_cpu_64(apicid, start_ip, cpu);
else if (apic->wakeup_secondary_cpu)
- ret = apic->wakeup_secondary_cpu(apicid, start_ip);
+ ret = apic->wakeup_secondary_cpu(apicid, start_ip, cpu);
else
- ret = wakeup_secondary_cpu_via_init(apicid, start_ip);
+ ret = wakeup_secondary_cpu_via_init(apicid, start_ip, cpu);
/* If the wakeup mechanism failed, cleanup the warm reset vector */
if (ret)
@@ -1188,6 +1188,12 @@ void cpu_disable_common(void)
remove_siblinginfo(cpu);
+ /*
+ * Stop allowing kernel-mode FPU. This is needed so that if the CPU is
+ * brought online again, the initial state is not allowed:
+ */
+ this_cpu_write(kernel_fpu_allowed, false);
+
/* It's now safe to remove this processor from the online map */
lock_vector_lock();
remove_cpu_from_maps(cpu);
@@ -1238,10 +1244,6 @@ void play_dead_common(void)
local_irq_disable();
}
-/*
- * We need to flush the caches before going to sleep, lest we have
- * dirty data in our caches when we come back up.
- */
void __noreturn mwait_play_dead(unsigned int eax_hint)
{
struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead);
@@ -1288,6 +1290,50 @@ void __noreturn mwait_play_dead(unsigned int eax_hint)
}
/*
+ * We need to flush the caches before going to sleep, lest we have
+ * dirty data in our caches when we come back up.
+ */
+static inline void mwait_play_dead_cpuid_hint(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ unsigned int highest_cstate = 0;
+ unsigned int highest_subcstate = 0;
+ int i;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+ return;
+ if (!this_cpu_has(X86_FEATURE_MWAIT))
+ return;
+ if (!this_cpu_has(X86_FEATURE_CLFLUSH))
+ return;
+
+ eax = CPUID_LEAF_MWAIT;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+
+ /*
+ * eax will be 0 if EDX enumeration is not valid.
+ * Initialized below to cstate, sub_cstate value when EDX is valid.
+ */
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
+ eax = 0;
+ } else {
+ edx >>= MWAIT_SUBSTATE_SIZE;
+ for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
+ if (edx & MWAIT_SUBSTATE_MASK) {
+ highest_cstate = i;
+ highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
+ }
+ }
+ eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+ (highest_subcstate - 1);
+ }
+
+ mwait_play_dead(eax);
+}
+
+/*
* Kick all "offline" CPUs out of mwait on kexec(). See comment in
* mwait_play_dead().
*/
@@ -1337,9 +1383,9 @@ void native_play_dead(void)
play_dead_common();
tboot_shutdown(TB_SHUTDOWN_WFS);
- /* Below returns only on error. */
- cpuidle_play_dead();
- hlt_play_dead();
+ mwait_play_dead_cpuid_hint();
+ if (cpuidle_play_dead())
+ hlt_play_dead();
}
#else /* ... !CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
index a59c72e77645..378c388d1b31 100644
--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -81,7 +81,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
break;
case RET:
- if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+ if (cpu_wants_rethunk_at(insn))
code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
else
code = &retinsn;
@@ -90,7 +90,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
case JCC:
if (!func) {
func = __static_call_return;
- if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+ if (cpu_wants_rethunk())
func = x86_return_thunk;
}
@@ -108,7 +108,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
if (system_state == SYSTEM_BOOTING || modinit)
return text_poke_early(insn, code, size);
- text_poke_bp(insn, code, size, emulate);
+ smp_text_poke_single(insn, code, size, emulate);
}
static void __static_call_validate(u8 *insn, bool tail, bool tramp)
diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c
index b8e7abe00b06..708d61743d15 100644
--- a/arch/x86/kernel/trace_clock.c
+++ b/arch/x86/kernel/trace_clock.c
@@ -4,7 +4,7 @@
*/
#include <asm/trace_clock.h>
#include <asm/barrier.h>
-#include <asm/msr.h>
+#include <asm/tsc.h>
/*
* trace_clock_x86_tsc(): A clock that is just the cycle counter.
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
deleted file mode 100644
index 03ae1caaa878..000000000000
--- a/arch/x86/kernel/tracepoint.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2013 Seiji Aguchi <seiji.aguchi@hds.com>
- */
-#include <linux/jump_label.h>
-#include <linux/atomic.h>
-
-#include <asm/trace/exceptions.h>
-
-DEFINE_STATIC_KEY_FALSE(trace_pagefault_key);
-
-int trace_pagefault_reg(void)
-{
- static_branch_inc(&trace_pagefault_key);
- return 0;
-}
-
-void trace_pagefault_unreg(void)
-{
- static_branch_dec(&trace_pagefault_key);
-}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 9f88b8a78e50..c5c897a86418 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -68,6 +68,7 @@
#include <asm/vdso.h>
#include <asm/tdx.h>
#include <asm/cfi.h>
+#include <asm/msr.h>
#ifdef CONFIG_X86_64
#include <asm/x86_init.h>
@@ -351,7 +352,7 @@ static noinstr bool handle_bug(struct pt_regs *regs)
case BUG_UD1_UBSAN:
if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
pr_crit("%s at %pS\n",
- report_ubsan_failure(regs, ud_imm),
+ report_ubsan_failure(ud_imm),
(void *)regs->ip);
}
break;
@@ -749,7 +750,7 @@ static bool try_fixup_enqcmd_gp(void)
if (current->pasid_activated)
return false;
- wrmsrl(MSR_IA32_PASID, pasid | MSR_IA32_PASID_VALID);
+ wrmsrq(MSR_IA32_PASID, pasid | MSR_IA32_PASID_VALID);
current->pasid_activated = 1;
return true;
@@ -882,16 +883,16 @@ static void do_int3_user(struct pt_regs *regs)
DEFINE_IDTENTRY_RAW(exc_int3)
{
/*
- * poke_int3_handler() is completely self contained code; it does (and
+ * smp_text_poke_int3_handler() is completely self contained code; it does (and
* must) *NOT* call out to anything, lest it hits upon yet another
* INT3.
*/
- if (poke_int3_handler(regs))
+ if (smp_text_poke_int3_handler(regs))
return;
/*
* irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
- * and therefore can trigger INT3, hence poke_int3_handler() must
+ * and therefore can trigger INT3, hence smp_text_poke_int3_handler() must
* be done before. If the entry came from kernel mode, then use
* nmi_enter() because the INT3 could have been hit in any context
* including NMI.
@@ -1120,9 +1121,9 @@ static noinstr void exc_debug_kernel(struct pt_regs *regs, unsigned long dr6)
*/
unsigned long debugctl;
- rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
debugctl |= DEBUGCTLMSR_BTF;
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
}
/*
@@ -1295,7 +1296,7 @@ DEFINE_IDTENTRY_RAW(exc_debug)
static void math_error(struct pt_regs *regs, int trapnr)
{
struct task_struct *task = current;
- struct fpu *fpu = &task->thread.fpu;
+ struct fpu *fpu = x86_task_fpu(task);
int si_code;
char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
"simd exception";
@@ -1386,11 +1387,11 @@ static bool handle_xfd_event(struct pt_regs *regs)
if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD))
return false;
- rdmsrl(MSR_IA32_XFD_ERR, xfd_err);
+ rdmsrq(MSR_IA32_XFD_ERR, xfd_err);
if (!xfd_err)
return false;
- wrmsrl(MSR_IA32_XFD_ERR, 0);
+ wrmsrq(MSR_IA32_XFD_ERR, 0);
/* Die if that happens in kernel space */
if (WARN_ON(!user_mode(regs)))
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 88e5a4ed9db3..87e749106dda 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -16,7 +16,7 @@
#include <linux/static_key.h>
#include <linux/static_call.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/hpet.h>
#include <asm/timer.h>
#include <asm/vgtod.h>
@@ -29,6 +29,7 @@
#include <asm/apic.h>
#include <asm/cpu_device_id.h>
#include <asm/i8259.h>
+#include <asm/msr.h>
#include <asm/topology.h>
#include <asm/uv/uv.h>
#include <asm/sev.h>
@@ -1098,7 +1099,7 @@ static void __init detect_art(void)
if (art_base_clk.denominator < ART_MIN_DENOMINATOR)
return;
- rdmsrl(MSR_IA32_TSC_ADJUST, art_base_clk.offset);
+ rdmsrq(MSR_IA32_TSC_ADJUST, art_base_clk.offset);
/* Make this sticky over multiple CPU init calls */
setup_force_cpu_cap(X86_FEATURE_ART);
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 4334033658ed..ec3aa340d351 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/nmi.h>
+#include <asm/msr.h>
#include <asm/tsc.h>
struct tsc_adjust {
@@ -65,12 +66,12 @@ void tsc_verify_tsc_adjust(bool resume)
adj->nextcheck = jiffies + HZ;
- rdmsrl(MSR_IA32_TSC_ADJUST, curval);
+ rdmsrq(MSR_IA32_TSC_ADJUST, curval);
if (adj->adjusted == curval)
return;
/* Restore the original value */
- wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);
+ wrmsrq(MSR_IA32_TSC_ADJUST, adj->adjusted);
if (!adj->warned || resume) {
pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n",
@@ -142,7 +143,7 @@ static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
if (likely(!tsc_async_resets)) {
pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n",
cpu, bootval);
- wrmsrl(MSR_IA32_TSC_ADJUST, 0);
+ wrmsrq(MSR_IA32_TSC_ADJUST, 0);
bootval = 0;
} else {
pr_info("TSC ADJUST: CPU%u: %lld NOT forced to 0\n",
@@ -165,7 +166,7 @@ bool __init tsc_store_and_check_tsc_adjust(bool bootcpu)
if (check_tsc_unstable())
return false;
- rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
+ rdmsrq(MSR_IA32_TSC_ADJUST, bootval);
cur->bootval = bootval;
cur->nextcheck = jiffies + HZ;
tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), bootcpu);
@@ -187,7 +188,7 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
return false;
- rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
+ rdmsrq(MSR_IA32_TSC_ADJUST, bootval);
cur->bootval = bootval;
cur->nextcheck = jiffies + HZ;
cur->warned = false;
@@ -229,7 +230,7 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
*/
if (bootval != ref->adjusted) {
cur->adjusted = ref->adjusted;
- wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted);
+ wrmsrq(MSR_IA32_TSC_ADJUST, ref->adjusted);
}
/*
* We have the TSCs forced to be in sync on this package. Skip sync
@@ -518,7 +519,7 @@ retry:
pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n",
cpu, cur_max_warp, cur->adjusted);
- wrmsrl(MSR_IA32_TSC_ADJUST, cur->adjusted);
+ wrmsrq(MSR_IA32_TSC_ADJUST, cur->adjusted);
goto retry;
}
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 9194695662b2..6d383839e839 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -840,6 +840,11 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
insn_byte_t p;
int i;
+ /* x86_nops[insn->length]; same as jmp with .offs = 0 */
+ if (insn->length <= ASM_NOP_MAX &&
+ !memcmp(insn->kaddr, x86_nops[insn->length], insn->length))
+ goto setup;
+
switch (opc1) {
case 0xeb: /* jmp 8 */
case 0xe9: /* jmp 32 */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index ccdc45e5b759..4fa0be732af1 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -79,11 +79,13 @@ const_cpu_current_top_of_stack = cpu_current_top_of_stack;
#define BSS_DECRYPTED \
. = ALIGN(PMD_SIZE); \
__start_bss_decrypted = .; \
+ __pi___start_bss_decrypted = .; \
*(.bss..decrypted); \
. = ALIGN(PAGE_SIZE); \
__start_bss_decrypted_unused = .; \
. = ALIGN(PMD_SIZE); \
__end_bss_decrypted = .; \
+ __pi___end_bss_decrypted = .; \
#else
@@ -128,6 +130,7 @@ SECTIONS
/* Text and read-only data */
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = .;
+ __pi__text = .;
_stext = .;
ALIGN_ENTRY_TEXT_BEGIN
*(.text..__x86.rethunk_untrain)
@@ -391,6 +394,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */
_end = .;
+ __pi__end = .;
#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
@@ -466,10 +470,18 @@ SECTIONS
}
/*
- * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
+ * COMPILE_TEST kernels can be large - CONFIG_KASAN, for example, can cause
+ * this. Let's assume that nobody will be running a COMPILE_TEST kernel and
+ * let's assert that fuller build coverage is more valuable than being able to
+ * run a COMPILE_TEST kernel.
+ */
+#ifndef CONFIG_COMPILE_TEST
+/*
+ * The ASSERT() sync to . is intentional, for binutils 2.14 compatibility:
*/
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
"kernel image bigger than KERNEL_IMAGE_SIZE");
+#endif
/* needed for Clang - see arch/x86/entry/entry.S */
PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
@@ -497,6 +509,16 @@ PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
"SRSO function pair won't alias");
#endif
+#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
+. = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline");
+. = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart");
+. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
+#endif
+
+#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
+. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
+#endif
+
#endif /* CONFIG_X86_64 */
/*
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index fe8ea8c097de..2eeffcec5382 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -95,6 +95,8 @@ config KVM_SW_PROTECTED_VM
config KVM_INTEL
tristate "KVM for Intel (and compatible) processors support"
depends on KVM && IA32_FEAT_CTL
+ select KVM_GENERIC_PRIVATE_MEM if INTEL_TDX_HOST
+ select KVM_GENERIC_MEMORY_ATTRIBUTES if INTEL_TDX_HOST
help
Provides support for KVM on processors equipped with Intel's VT
extensions, a.k.a. Virtual Machine Extensions (VMX).
@@ -129,6 +131,16 @@ config X86_SGX_KVM
If unsure, say N.
+config KVM_INTEL_TDX
+ bool "Intel Trust Domain Extensions (TDX) support"
+ default y
+ depends on INTEL_TDX_HOST
+ help
+ Provides support for launching Intel Trust Domain Extensions (TDX)
+ confidential VMs on Intel processors.
+
+ If unsure, say N.
+
config KVM_AMD
tristate "KVM for AMD processors support"
depends on KVM && (CPU_SUP_AMD || CPU_SUP_HYGON)
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index f9dddb8cb466..a5d362c7b504 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -20,6 +20,7 @@ kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o
kvm-intel-$(CONFIG_KVM_HYPERV) += vmx/hyperv.o vmx/hyperv_evmcs.o
+kvm-intel-$(CONFIG_KVM_INTEL_TDX) += vmx/tdx.o
kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 5e4d4934c0d3..b2d006756e02 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -21,7 +21,7 @@
#include <asm/user.h>
#include <asm/fpu/xstate.h>
#include <asm/sgx.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include "cpuid.h"
#include "lapic.h"
#include "mmu.h"
@@ -81,17 +81,8 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted)
return ret;
}
-/*
- * Magic value used by KVM when querying userspace-provided CPUID entries and
- * doesn't care about the CPIUD index because the index of the function in
- * question is not significant. Note, this magic value must have at least one
- * bit set in bits[63:32] and must be consumed as a u64 by cpuid_entry2_find()
- * to avoid false positives when processing guest CPUID input.
- */
-#define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull
-
-static struct kvm_cpuid_entry2 *cpuid_entry2_find(struct kvm_vcpu *vcpu,
- u32 function, u64 index)
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry2(
+ struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index)
{
struct kvm_cpuid_entry2 *e;
int i;
@@ -108,8 +99,8 @@ static struct kvm_cpuid_entry2 *cpuid_entry2_find(struct kvm_vcpu *vcpu,
*/
lockdep_assert_irqs_enabled();
- for (i = 0; i < vcpu->arch.cpuid_nent; i++) {
- e = &vcpu->arch.cpuid_entries[i];
+ for (i = 0; i < nent; i++) {
+ e = &entries[i];
if (e->function != function)
continue;
@@ -140,26 +131,7 @@ static struct kvm_cpuid_entry2 *cpuid_entry2_find(struct kvm_vcpu *vcpu,
return NULL;
}
-
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
- u32 function, u32 index)
-{
- return cpuid_entry2_find(vcpu, function, index);
-}
-EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index);
-
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
- u32 function)
-{
- return cpuid_entry2_find(vcpu, function, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
-}
-EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
-
-/*
- * cpuid_entry2_find() and KVM_CPUID_INDEX_NOT_SIGNIFICANT should never be used
- * directly outside of kvm_find_cpuid_entry() and kvm_find_cpuid_entry_index().
- */
-#undef KVM_CPUID_INDEX_NOT_SIGNIFICANT
+EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry2);
static int kvm_check_cpuid(struct kvm_vcpu *vcpu)
{
@@ -236,7 +208,7 @@ static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcp
struct kvm_cpuid_entry2 *entry;
u32 base;
- for_each_possible_hypervisor_cpuid_base(base) {
+ for_each_possible_cpuid_base_hypervisor(base) {
entry = kvm_find_cpuid_entry(vcpu, base);
if (entry) {
@@ -492,6 +464,20 @@ not_found:
return 36;
}
+int cpuid_query_maxguestphyaddr(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000000);
+ if (!best || best->eax < 0x80000008)
+ goto not_found;
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008);
+ if (best)
+ return (best->eax >> 16) & 0xff;
+not_found:
+ return 0;
+}
+
/*
* This "raw" version returns the reserved GPA bits without any adjustments for
* encryption technologies that usurp bits. The raw mask should be used if and
@@ -992,6 +978,7 @@ void kvm_set_cpu_caps(void)
F(FZRM),
F(FSRS),
F(FSRC),
+ F(WRMSRNS),
F(AMX_FP16),
F(AVX_IFMA),
F(LAM),
@@ -1107,6 +1094,7 @@ void kvm_set_cpu_caps(void)
F(AMD_SSB_NO),
F(AMD_STIBP),
F(AMD_STIBP_ALWAYS_ON),
+ F(AMD_IBRS_SAME_MODE),
F(AMD_PSFD),
F(AMD_IBPB_RET),
);
@@ -1164,6 +1152,7 @@ void kvm_set_cpu_caps(void)
kvm_cpu_cap_init(CPUID_8000_0021_EAX,
F(NO_NESTED_DATA_BP),
+ F(WRMSR_XX_BASE_NS),
/*
* Synthesize "LFENCE is serializing" into the AMD-defined entry
* in KVM's supported CPUID, i.e. if the feature is reported as
@@ -1177,10 +1166,13 @@ void kvm_set_cpu_caps(void)
SYNTHESIZED_F(LFENCE_RDTSC),
/* SmmPgCfgLock */
F(NULL_SEL_CLR_BASE),
+ /* UpperAddressIgnore */
F(AUTOIBRS),
+ F(PREFETCHI),
EMULATED_F(NO_SMM_CTL_MSR),
/* PrefetchCtlMsr */
- F(WRMSR_XX_BASE_NS),
+ /* GpOnUserCpuid */
+ /* EPSF */
SYNTHESIZED_F(SBPB),
SYNTHESIZED_F(IBPB_BRTYPE),
SYNTHESIZED_F(SRSO_NO),
@@ -1427,8 +1419,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
}
break;
case 0xa: { /* Architectural Performance Monitoring */
- union cpuid10_eax eax;
- union cpuid10_edx edx;
+ union cpuid10_eax eax = { };
+ union cpuid10_edx edx = { };
if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
@@ -1444,8 +1436,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
if (kvm_pmu_cap.version)
edx.split.anythread_deprecated = 1;
- edx.split.reserved1 = 0;
- edx.split.reserved2 = 0;
entry->eax = eax.full;
entry->ebx = kvm_pmu_cap.events_mask;
@@ -1763,7 +1753,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break;
/* AMD Extended Performance Monitoring and Debug */
case 0x80000022: {
- union cpuid_0x80000022_ebx ebx;
+ union cpuid_0x80000022_ebx ebx = { };
entry->ecx = entry->edx = 0;
if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index d2884162a46a..d3f5ae15a7ca 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -11,10 +11,34 @@ extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
void kvm_set_cpu_caps(void);
void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu);
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
- u32 function, u32 index);
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
- u32 function);
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry2(struct kvm_cpuid_entry2 *entries,
+ int nent, u32 function, u64 index);
+/*
+ * Magic value used by KVM when querying userspace-provided CPUID entries and
+ * doesn't care about the CPIUD index because the index of the function in
+ * question is not significant. Note, this magic value must have at least one
+ * bit set in bits[63:32] and must be consumed as a u64 by kvm_find_cpuid_entry2()
+ * to avoid false positives when processing guest CPUID input.
+ *
+ * KVM_CPUID_INDEX_NOT_SIGNIFICANT should never be used directly outside of
+ * kvm_find_cpuid_entry2() and kvm_find_cpuid_entry().
+ */
+#define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull
+
+static inline struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
+ u32 function, u32 index)
+{
+ return kvm_find_cpuid_entry2(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
+ function, index);
+}
+
+static inline struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+ u32 function)
+{
+ return kvm_find_cpuid_entry2(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
+ function, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
+}
+
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries,
unsigned int type);
@@ -34,6 +58,7 @@ void __init kvm_init_xstate_sizes(void);
u32 xstate_required_size(u64 xstate_bv, bool compacted);
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
+int cpuid_query_maxguestphyaddr(struct kvm_vcpu *vcpu);
u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 995eb5054360..45dae2d5d2f1 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -296,11 +296,8 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
index == RTC_GSI) {
u16 dm = kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
- if (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
- e->fields.dest_id, dm) ||
- kvm_apic_pending_eoi(vcpu, e->fields.vector))
- __set_bit(e->fields.vector,
- ioapic_handled_vectors);
+ kvm_scan_ioapic_irq(vcpu, e->fields.dest_id, dm,
+ e->fields.vector, ioapic_handled_vectors);
}
}
spin_unlock(&ioapic->lock);
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index 539333ac4b38..aa8cb4ac0479 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -120,4 +120,6 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu,
ulong *ioapic_handled_vectors);
void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
ulong *ioapic_handled_vectors);
+void kvm_scan_ioapic_irq(struct kvm_vcpu *vcpu, u32 dest_id, u16 dest_mode,
+ u8 vector, unsigned long *ioapic_handled_vectors);
#endif
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 63f66c51975a..97d68d837929 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -100,6 +100,9 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
if (kvm_cpu_has_extint(v))
return 1;
+ if (lapic_in_kernel(v) && v->arch.apic->guest_apic_protected)
+ return kvm_x86_call(protected_apic_has_interrupt)(v);
+
return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
}
EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 8136695f7b96..d6d792b5d1bd 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -402,6 +402,33 @@ void kvm_arch_post_irq_routing_update(struct kvm *kvm)
kvm_make_scan_ioapic_request(kvm);
}
+void kvm_scan_ioapic_irq(struct kvm_vcpu *vcpu, u32 dest_id, u16 dest_mode,
+ u8 vector, unsigned long *ioapic_handled_vectors)
+{
+ /*
+ * Intercept EOI if the vCPU is the target of the new IRQ routing, or
+ * the vCPU has a pending IRQ from the old routing, i.e. if the vCPU
+ * may receive a level-triggered IRQ in the future, or already received
+ * level-triggered IRQ. The EOI needs to be intercepted and forwarded
+ * to I/O APIC emulation so that the IRQ can be de-asserted.
+ */
+ if (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, dest_id, dest_mode)) {
+ __set_bit(vector, ioapic_handled_vectors);
+ } else if (kvm_apic_pending_eoi(vcpu, vector)) {
+ __set_bit(vector, ioapic_handled_vectors);
+
+ /*
+ * Track the highest pending EOI for which the vCPU is NOT the
+ * target in the new routing. Only the EOI for the IRQ that is
+ * in-flight (for the old routing) needs to be intercepted, any
+ * future IRQs that arrive on this vCPU will be coincidental to
+ * the level-triggered routing and don't need to be intercepted.
+ */
+ if ((int)vector > vcpu->arch.highest_stale_pending_ioapic_eoi)
+ vcpu->arch.highest_stale_pending_ioapic_eoi = vector;
+ }
+}
+
void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
ulong *ioapic_handled_vectors)
{
@@ -424,11 +451,11 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
kvm_set_msi_irq(vcpu->kvm, entry, &irq);
- if (irq.trig_mode &&
- (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
- irq.dest_id, irq.dest_mode) ||
- kvm_apic_pending_eoi(vcpu, irq.vector)))
- __set_bit(irq.vector, ioapic_handled_vectors);
+ if (!irq.trig_mode)
+ continue;
+
+ kvm_scan_ioapic_irq(vcpu, irq.dest_id, irq.dest_mode,
+ irq.vector, ioapic_handled_vectors);
}
}
srcu_read_unlock(&kvm->irq_srcu, idx);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 28e3317124fd..73418dc0ebb2 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -655,27 +655,29 @@ static u8 count_vectors(void *bitmap)
return count;
}
-bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
+bool __kvm_apic_update_irr(unsigned long *pir, void *regs, int *max_irr)
{
+ unsigned long pir_vals[NR_PIR_WORDS];
+ u32 *__pir = (void *)pir_vals;
u32 i, vec;
- u32 pir_val, irr_val, prev_irr_val;
+ u32 irr_val, prev_irr_val;
int max_updated_irr;
max_updated_irr = -1;
*max_irr = -1;
+ if (!pi_harvest_pir(pir, pir_vals))
+ return false;
+
for (i = vec = 0; i <= 7; i++, vec += 32) {
u32 *p_irr = (u32 *)(regs + APIC_IRR + i * 0x10);
- irr_val = *p_irr;
- pir_val = READ_ONCE(pir[i]);
-
- if (pir_val) {
- pir_val = xchg(&pir[i], 0);
+ irr_val = READ_ONCE(*p_irr);
+ if (__pir[i]) {
prev_irr_val = irr_val;
do {
- irr_val = prev_irr_val | pir_val;
+ irr_val = prev_irr_val | __pir[i];
} while (prev_irr_val != irr_val &&
!try_cmpxchg(p_irr, &prev_irr_val, irr_val));
@@ -691,7 +693,7 @@ bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
}
EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
-bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
+bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, unsigned long *pir, int *max_irr)
{
struct kvm_lapic *apic = vcpu->arch.apic;
bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr);
@@ -1459,6 +1461,14 @@ static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
if (!kvm_ioapic_handles_vector(apic, vector))
return;
+ /*
+ * If the intercepted EOI is for an IRQ that was pending from previous
+ * routing, then re-scan the I/O APIC routes as EOIs for the IRQ likely
+ * no longer need to be intercepted.
+ */
+ if (apic->vcpu->arch.highest_stale_pending_ioapic_eoi == vector)
+ kvm_make_request(KVM_REQ_SCAN_IOAPIC, apic->vcpu);
+
/* Request a KVM exit to inform the userspace IOAPIC. */
if (irqchip_split(apic->vcpu->kvm)) {
apic->vcpu->arch.pending_ioapic_eoi = vector;
@@ -1790,8 +1800,17 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
- u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
+ u32 reg;
+ /*
+ * Assume a timer IRQ was "injected" if the APIC is protected. KVM's
+ * copy of the vIRR is bogus, it's the responsibility of the caller to
+ * precisely check whether or not a timer IRQ is pending.
+ */
+ if (apic->guest_apic_protected)
+ return true;
+
+ reg = kvm_lapic_get_reg(apic, APIC_LVTT);
if (kvm_apic_hw_enabled(apic)) {
int vec = reg & APIC_VECTOR_MASK;
void *bitmap = apic->regs + APIC_ISR;
@@ -2650,6 +2669,7 @@ int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated)
kvm_recalculate_apic_map(vcpu->kvm);
return 0;
}
+EXPORT_SYMBOL_GPL(kvm_apic_set_base);
void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
{
@@ -2958,6 +2978,9 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
if (!kvm_apic_present(vcpu))
return -1;
+ if (apic->guest_apic_protected)
+ return -1;
+
__apic_update_ppr(apic, &ppr);
return apic_has_interrupt_for_ppr(apic, ppr);
}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 1a8553ebdb42..4ce30db65828 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -65,6 +65,8 @@ struct kvm_lapic {
bool sw_enabled;
bool irr_pending;
bool lvt0_in_nmi_mode;
+ /* Select registers in the vAPIC cannot be read/written. */
+ bool guest_apic_protected;
/* Number of bits set in ISR. */
s16 isr_count;
/* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
@@ -101,8 +103,8 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int shorthand, unsigned int dest, int dest_mode);
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec);
-bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr);
-bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr);
+bool __kvm_apic_update_irr(unsigned long *pir, void *regs, int *max_irr);
+bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, unsigned long *pir, int *max_irr);
void kvm_apic_update_ppr(struct kvm_vcpu *vcpu);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
struct dest_map *dest_map);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 050a0e229a4d..b4b6860ab971 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -79,6 +79,7 @@ static inline gfn_t kvm_mmu_max_gfn(void)
u8 kvm_mmu_get_max_tdp_level(void);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
+void kvm_mmu_set_mmio_spte_value(struct kvm *kvm, u64 mmio_value);
void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask);
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
@@ -104,6 +105,9 @@ void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
+ if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
+ kvm_mmu_free_obsolete_roots(vcpu);
+
/*
* Checking root.hpa is sufficient even when KVM has mirror root.
* We can have either:
@@ -231,7 +235,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
return -(u32)fault & errcode;
}
-bool kvm_mmu_may_ignore_guest_pat(void);
+bool kvm_mmu_may_ignore_guest_pat(struct kvm *kvm);
int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
@@ -253,6 +257,9 @@ extern bool tdp_mmu_enabled;
#define tdp_mmu_enabled false
#endif
+bool kvm_tdp_mmu_gpa_is_mapped(struct kvm_vcpu *vcpu, u64 gpa);
+int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level);
+
static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
{
return !tdp_mmu_enabled || kvm_shadow_root_allocated(kvm);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 63bb77ee1bb1..cbc84c6abc2e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -110,6 +110,7 @@ static bool __ro_after_init tdp_mmu_allowed;
#ifdef CONFIG_X86_64
bool __read_mostly tdp_mmu_enabled = true;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444);
+EXPORT_SYMBOL_GPL(tdp_mmu_enabled);
#endif
static int max_huge_page_level __read_mostly;
@@ -1456,15 +1457,15 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
* enabled but it chooses between clearing the Dirty bit and Writeable
* bit based on the context.
*/
- if (kvm_x86_ops.cpu_dirty_log_size)
+ if (kvm->arch.cpu_dirty_log_size)
kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
else
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
-int kvm_cpu_dirty_log_size(void)
+int kvm_cpu_dirty_log_size(struct kvm *kvm)
{
- return kvm_x86_ops.cpu_dirty_log_size;
+ return kvm->arch.cpu_dirty_log_size;
}
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
@@ -3019,7 +3020,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
}
if (is_shadow_present_pte(*sptep)) {
- if (prefetch)
+ if (prefetch && is_last_spte(*sptep, level) &&
+ pfn == spte_to_pfn(*sptep))
return RET_PF_SPURIOUS;
/*
@@ -3033,7 +3035,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
child = spte_to_child_sp(pte);
drop_parent_pte(vcpu->kvm, child, sptep);
flush = true;
- } else if (pfn != spte_to_pfn(*sptep)) {
+ } else if (WARN_ON_ONCE(pfn != spte_to_pfn(*sptep))) {
drop_spte(vcpu->kvm, sptep);
flush = true;
} else
@@ -4835,19 +4837,6 @@ out_unlock:
}
#endif
-bool kvm_mmu_may_ignore_guest_pat(void)
-{
- /*
- * When EPT is enabled (shadow_memtype_mask is non-zero), and the VM
- * has non-coherent DMA (DMA doesn't snoop CPU caches), KVM's ABI is to
- * honor the memtype from the guest's PAT so that guest accesses to
- * memory that is DMA'd aren't cached against the guest's wishes. As a
- * result, KVM _may_ ignore guest PAT, whereas without non-coherent DMA,
- * KVM _always_ ignores guest PAT (when EPT is enabled).
- */
- return shadow_memtype_mask;
-}
-
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
#ifdef CONFIG_X86_64
@@ -4858,8 +4847,7 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
return direct_page_fault(vcpu, fault);
}
-static int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
- u8 *level)
+int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level)
{
int r;
@@ -4873,6 +4861,10 @@ static int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
do {
if (signal_pending(current))
return -EINTR;
+
+ if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu))
+ return -EIO;
+
cond_resched();
r = kvm_mmu_do_page_fault(vcpu, gpa, error_code, true, NULL, level);
} while (r == RET_PF_RETRY);
@@ -4897,6 +4889,7 @@ static int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
return -EIO;
}
}
+EXPORT_SYMBOL_GPL(kvm_tdp_map_page);
long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
struct kvm_pre_fault_memory *range)
@@ -5589,12 +5582,19 @@ void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
{
+ int maxpa;
+
+ if (vcpu->kvm->arch.vm_type == KVM_X86_TDX_VM)
+ maxpa = cpuid_query_maxguestphyaddr(vcpu);
+ else
+ maxpa = cpuid_maxphyaddr(vcpu);
+
/* tdp_root_level is architecture forced level, use it if nonzero */
if (tdp_root_level)
return tdp_root_level;
/* Use 5-level TDP if and only if it's useful/necessary. */
- if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
+ if (max_tdp_level == 5 && maxpa <= 48)
return 4;
return max_tdp_level;
@@ -5913,6 +5913,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
out:
return r;
}
+EXPORT_SYMBOL_GPL(kvm_mmu_load);
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
@@ -5974,6 +5975,7 @@ void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
}
+EXPORT_SYMBOL_GPL(kvm_mmu_free_obsolete_roots);
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
int *bytes)
@@ -7238,6 +7240,7 @@ static void kvm_mmu_zap_memslot(struct kvm *kvm,
.start = slot->base_gfn,
.end = slot->base_gfn + slot->npages,
.may_block = true,
+ .attr_filter = KVM_FILTER_PRIVATE | KVM_FILTER_SHARED,
};
bool flush;
@@ -7669,9 +7672,30 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
}
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+ int level)
+{
+ return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
+}
+
+static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+ int level)
+{
+ lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
+}
+
+static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+ int level)
+{
+ lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
+}
+
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range)
{
+ struct kvm_memory_slot *slot = range->slot;
+ int level;
+
/*
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
@@ -7686,6 +7710,38 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
return false;
+ if (WARN_ON_ONCE(range->end <= range->start))
+ return false;
+
+ /*
+ * If the head and tail pages of the range currently allow a hugepage,
+ * i.e. reside fully in the slot and don't have mixed attributes, then
+ * add each corresponding hugepage range to the ongoing invalidation,
+ * e.g. to prevent KVM from creating a hugepage in response to a fault
+ * for a gfn whose attributes aren't changing. Note, only the range
+ * of gfns whose attributes are being modified needs to be explicitly
+ * unmapped, as that will unmap any existing hugepages.
+ */
+ for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
+ gfn_t start = gfn_round_for_level(range->start, level);
+ gfn_t end = gfn_round_for_level(range->end - 1, level);
+ gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
+
+ if ((start != range->start || start + nr_pages > range->end) &&
+ start >= slot->base_gfn &&
+ start + nr_pages <= slot->base_gfn + slot->npages &&
+ !hugepage_test_mixed(slot, start, level))
+ kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
+
+ if (end == start)
+ continue;
+
+ if ((end + nr_pages) > range->end &&
+ (end + nr_pages) <= (slot->base_gfn + slot->npages) &&
+ !hugepage_test_mixed(slot, end, level))
+ kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
+ }
+
/* Unmap the old attribute page. */
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
range->attr_filter = KVM_FILTER_SHARED;
@@ -7695,23 +7751,7 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
return kvm_unmap_gfn_range(kvm, range);
}
-static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
- int level)
-{
- return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
-}
-
-static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
- int level)
-{
- lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
-}
-static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
- int level)
-{
- lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
-}
static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, int level, unsigned long attrs)
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 75f00598289d..db8f33e4de62 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -187,7 +187,8 @@ static inline gfn_t kvm_gfn_root_bits(const struct kvm *kvm, const struct kvm_mm
return kvm_gfn_direct_bits(kvm);
}
-static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
+static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm *kvm,
+ struct kvm_mmu_page *sp)
{
/*
* When using the EPT page-modification log, the GPAs in the CPU dirty
@@ -197,7 +198,7 @@ static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
* being enabled is mandatory as the bits used to denote WP-only SPTEs
* are reserved for PAE paging (32-bit KVM).
*/
- return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode;
+ return kvm->arch.cpu_dirty_log_size && sp->role.guest_mode;
}
static inline gfn_t gfn_round_for_level(gfn_t gfn, int level)
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index 561c331fd6ec..1b17b12393a8 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -172,6 +172,9 @@ static int kvm_enable_external_write_tracking(struct kvm *kvm)
struct kvm_memory_slot *slot;
int r = 0, i, bkt;
+ if (kvm->arch.vm_type == KVM_X86_TDX_VM)
+ return -EOPNOTSUPP;
+
mutex_lock(&kvm->slots_arch_lock);
/*
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 0f9f47b4ab0e..cfce03d8f123 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -37,7 +37,6 @@ u64 __read_mostly shadow_mmio_value;
u64 __read_mostly shadow_mmio_mask;
u64 __read_mostly shadow_mmio_access_mask;
u64 __read_mostly shadow_present_mask;
-u64 __read_mostly shadow_memtype_mask;
u64 __read_mostly shadow_me_value;
u64 __read_mostly shadow_me_mask;
u64 __read_mostly shadow_acc_track_mask;
@@ -96,8 +95,6 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
u64 spte = generation_mmio_spte_mask(gen);
u64 gpa = gfn << PAGE_SHIFT;
- WARN_ON_ONCE(!vcpu->kvm->arch.shadow_mmio_value);
-
access &= shadow_mmio_access_mask;
spte |= vcpu->kvm->arch.shadow_mmio_value | access;
spte |= gpa | shadow_nonpresent_or_rsvd_mask;
@@ -177,7 +174,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (sp->role.ad_disabled)
spte |= SPTE_TDP_AD_DISABLED;
- else if (kvm_mmu_page_ad_need_write_protect(sp))
+ else if (kvm_mmu_page_ad_need_write_protect(vcpu->kvm, sp))
spte |= SPTE_TDP_AD_WRPROT_ONLY;
spte |= shadow_present_mask;
@@ -212,9 +209,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (level > PG_LEVEL_4K)
spte |= PT_PAGE_SIZE_MASK;
- if (shadow_memtype_mask)
- spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn,
- kvm_is_mmio_pfn(pfn));
+ spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn));
if (host_writable)
spte |= shadow_host_writable_mask;
else
@@ -440,6 +435,12 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
+void kvm_mmu_set_mmio_spte_value(struct kvm *kvm, u64 mmio_value)
+{
+ kvm->arch.shadow_mmio_value = mmio_value;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_value);
+
void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask)
{
/* shadow_me_value must be a subset of shadow_me_mask */
@@ -463,13 +464,7 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
/* VMX_EPT_SUPPRESS_VE_BIT is needed for W or X violation. */
shadow_present_mask =
(has_exec_only ? 0ull : VMX_EPT_READABLE_MASK) | VMX_EPT_SUPPRESS_VE_BIT;
- /*
- * EPT overrides the host MTRRs, and so KVM must program the desired
- * memtype directly into the SPTEs. Note, this mask is just the mask
- * of all bits that factor into the memtype, the actual memtype must be
- * dynamically calculated, e.g. to ensure host MMIO is mapped UC.
- */
- shadow_memtype_mask = VMX_EPT_MT_MASK | VMX_EPT_IPAT_BIT;
+
shadow_acc_track_mask = VMX_EPT_RWX_MASK;
shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE;
@@ -521,12 +516,6 @@ void kvm_mmu_reset_all_pte_masks(void)
shadow_x_mask = 0;
shadow_present_mask = PT_PRESENT_MASK;
- /*
- * For shadow paging and NPT, KVM uses PAT entry '0' to encode WB
- * memtype in the SPTEs, i.e. relies on host MTRRs to provide the
- * correct memtype (WB is the "weakest" memtype).
- */
- shadow_memtype_mask = 0;
shadow_acc_track_mask = 0;
shadow_me_mask = 0;
shadow_me_value = 0;
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 79cdceba9857..1e94f081bdaf 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -187,7 +187,6 @@ extern u64 __read_mostly shadow_mmio_value;
extern u64 __read_mostly shadow_mmio_mask;
extern u64 __read_mostly shadow_mmio_access_mask;
extern u64 __read_mostly shadow_present_mask;
-extern u64 __read_mostly shadow_memtype_mask;
extern u64 __read_mostly shadow_me_value;
extern u64 __read_mostly shadow_me_mask;
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 7cc0564f5f97..7f3d7229b2c1 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -40,7 +40,9 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
kvm_tdp_mmu_invalidate_roots(kvm, KVM_VALID_ROOTS);
kvm_tdp_mmu_zap_invalidated_roots(kvm, false);
- WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
+#ifdef CONFIG_KVM_PROVE_MMU
+ KVM_MMU_WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
+#endif
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
/*
@@ -325,13 +327,17 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
kvm_account_pgtable_pages((void *)sp->spt, +1);
+#ifdef CONFIG_KVM_PROVE_MMU
atomic64_inc(&kvm->arch.tdp_mmu_pages);
+#endif
}
static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
kvm_account_pgtable_pages((void *)sp->spt, -1);
+#ifdef CONFIG_KVM_PROVE_MMU
atomic64_dec(&kvm->arch.tdp_mmu_pages);
+#endif
}
/**
@@ -372,7 +378,7 @@ static void remove_external_spte(struct kvm *kvm, gfn_t gfn, u64 old_spte,
/* Zapping leaf spte is allowed only when write lock is held. */
lockdep_assert_held_write(&kvm->mmu_lock);
/* Because write lock is held, operation should success. */
- ret = static_call(kvm_x86_remove_external_spte)(kvm, gfn, level, old_pfn);
+ ret = kvm_x86_call(remove_external_spte)(kvm, gfn, level, old_pfn);
KVM_BUG_ON(ret, kvm);
}
@@ -479,8 +485,8 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
}
if (is_mirror_sp(sp) &&
- WARN_ON(static_call(kvm_x86_free_external_spt)(kvm, base_gfn, sp->role.level,
- sp->external_spt))) {
+ WARN_ON(kvm_x86_call(free_external_spt)(kvm, base_gfn, sp->role.level,
+ sp->external_spt))) {
/*
* Failed to free page table page in mirror page table and
* there is nothing to do further.
@@ -532,12 +538,12 @@ static int __must_check set_external_spte_present(struct kvm *kvm, tdp_ptep_t sp
* external page table, or leaf.
*/
if (is_leaf) {
- ret = static_call(kvm_x86_set_external_spte)(kvm, gfn, level, new_pfn);
+ ret = kvm_x86_call(set_external_spte)(kvm, gfn, level, new_pfn);
} else {
void *external_spt = get_external_spt(gfn, new_spte, level);
KVM_BUG_ON(!external_spt, kvm);
- ret = static_call(kvm_x86_link_external_spt)(kvm, gfn, level, external_spt);
+ ret = kvm_x86_call(link_external_spt)(kvm, gfn, level, external_spt);
}
if (ret)
__kvm_tdp_mmu_write_spte(sptep, old_spte);
@@ -1147,13 +1153,12 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
return RET_PF_RETRY;
- if (fault->prefetch && is_shadow_present_pte(iter->old_spte))
- return RET_PF_SPURIOUS;
-
if (is_shadow_present_pte(iter->old_spte) &&
- is_access_allowed(fault, iter->old_spte) &&
- is_last_spte(iter->old_spte, iter->level))
+ (fault->prefetch || is_access_allowed(fault, iter->old_spte)) &&
+ is_last_spte(iter->old_spte, iter->level)) {
+ WARN_ON_ONCE(fault->pfn != spte_to_pfn(iter->old_spte));
return RET_PF_SPURIOUS;
+ }
if (unlikely(!fault->slot))
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
@@ -1624,21 +1629,21 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
}
}
-static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
+static bool tdp_mmu_need_write_protect(struct kvm *kvm, struct kvm_mmu_page *sp)
{
/*
* All TDP MMU shadow pages share the same role as their root, aside
* from level, so it is valid to key off any shadow page to determine if
* write protection is needed for an entire tree.
*/
- return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled;
+ return kvm_mmu_page_ad_need_write_protect(kvm, sp) || !kvm_ad_enabled;
}
static void clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end)
{
- const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
- shadow_dirty_mask;
+ const u64 dbit = tdp_mmu_need_write_protect(kvm, root) ?
+ PT_WRITABLE_MASK : shadow_dirty_mask;
struct tdp_iter iter;
rcu_read_lock();
@@ -1683,8 +1688,8 @@ void kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t gfn, unsigned long mask, bool wrprot)
{
- const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
- shadow_dirty_mask;
+ const u64 dbit = (wrprot || tdp_mmu_need_write_protect(kvm, root)) ?
+ PT_WRITABLE_MASK : shadow_dirty_mask;
struct tdp_iter iter;
lockdep_assert_held_write(&kvm->mmu_lock);
@@ -1905,16 +1910,13 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
*
* Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
*/
-int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
- int *root_level)
+static int __kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+ struct kvm_mmu_page *root)
{
- struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
struct tdp_iter iter;
gfn_t gfn = addr >> PAGE_SHIFT;
int leaf = -1;
- *root_level = vcpu->arch.mmu->root_role.level;
-
for_each_tdp_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {
leaf = iter.level;
sptes[leaf] = iter.old_spte;
@@ -1923,6 +1925,36 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
return leaf;
}
+int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+ int *root_level)
+{
+ struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
+ *root_level = vcpu->arch.mmu->root_role.level;
+
+ return __kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root);
+}
+
+bool kvm_tdp_mmu_gpa_is_mapped(struct kvm_vcpu *vcpu, u64 gpa)
+{
+ struct kvm *kvm = vcpu->kvm;
+ bool is_direct = kvm_is_addr_direct(kvm, gpa);
+ hpa_t root = is_direct ? vcpu->arch.mmu->root.hpa :
+ vcpu->arch.mmu->mirror_root_hpa;
+ u64 sptes[PT64_ROOT_MAX_LEVEL + 1], spte;
+ int leaf;
+
+ lockdep_assert_held(&kvm->mmu_lock);
+ rcu_read_lock();
+ leaf = __kvm_tdp_mmu_get_walk(vcpu, gpa, sptes, root_to_sp(root));
+ rcu_read_unlock();
+ if (leaf < 0)
+ return false;
+
+ spte = sptes[leaf];
+ return is_shadow_present_pte(spte) && is_last_spte(spte, leaf);
+}
+EXPORT_SYMBOL_GPL(kvm_tdp_mmu_gpa_is_mapped);
+
/*
* Returns the last level spte pointer of the shadow page walk for the given
* gpa, and sets *spte to the spte value. This spte may be non-preset. If no
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index 699e551ec93b..9864c057187d 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -131,6 +131,7 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
kvm_mmu_reset_context(vcpu);
}
+EXPORT_SYMBOL_GPL(kvm_smm_changed);
void process_smi(struct kvm_vcpu *vcpu)
{
diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h
index a1cf2ac5bd78..551703fbe200 100644
--- a/arch/x86/kvm/smm.h
+++ b/arch/x86/kvm/smm.h
@@ -142,6 +142,9 @@ union kvm_smram {
static inline int kvm_inject_smi(struct kvm_vcpu *vcpu)
{
+ if (!kvm_x86_call(has_emulated_msr)(vcpu->kvm, MSR_IA32_SMBASE))
+ return -ENOTTY;
+
kvm_make_request(KVM_REQ_SMI, vcpu);
return 0;
}
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 65fd245a9953..067f8e3f5a0d 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -20,6 +20,7 @@
#include <linux/kvm_host.h>
#include <asm/irq_remapping.h>
+#include <asm/msr.h>
#include "trace.h"
#include "lapic.h"
@@ -330,7 +331,7 @@ void avic_ring_doorbell(struct kvm_vcpu *vcpu)
int cpu = READ_ONCE(vcpu->cpu);
if (cpu != get_cpu()) {
- wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
+ wrmsrq(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
trace_kvm_avic_doorbell(vcpu->vcpu_id, kvm_cpu_get_apicid(cpu));
}
put_cpu();
@@ -796,12 +797,15 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
struct amd_svm_iommu_ir *ir;
u64 entry;
+ if (WARN_ON_ONCE(!pi->ir_data))
+ return -EINVAL;
+
/**
* In some cases, the existing irte is updated and re-set,
* so we need to check here if it's already been * added
* to the ir_list.
*/
- if (pi->ir_data && (pi->prev_ga_tag != 0)) {
+ if (pi->prev_ga_tag) {
struct kvm *kvm = svm->vcpu.kvm;
u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
@@ -820,7 +824,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
* Allocating new amd_iommu_pi_data, which will get
* add to the per-vcpu ir_list.
*/
- ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
+ ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_ATOMIC | __GFP_ACCOUNT);
if (!ir) {
ret = -ENOMEM;
goto out;
@@ -896,10 +900,10 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
{
struct kvm_kernel_irq_routing_entry *e;
struct kvm_irq_routing_table *irq_rt;
+ bool enable_remapped_mode = true;
int idx, ret = 0;
- if (!kvm_arch_has_assigned_device(kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ if (!kvm_arch_has_assigned_device(kvm) || !kvm_arch_has_irq_bypass())
return 0;
pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
@@ -933,6 +937,8 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
kvm_vcpu_apicv_active(&svm->vcpu)) {
struct amd_iommu_pi_data pi;
+ enable_remapped_mode = false;
+
/* Try to enable guest_mode in IRTE */
pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
AVIC_HPA_MASK);
@@ -951,33 +957,6 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
*/
if (!ret && pi.is_guest_mode)
svm_ir_list_add(svm, &pi);
- } else {
- /* Use legacy mode in IRTE */
- struct amd_iommu_pi_data pi;
-
- /**
- * Here, pi is used to:
- * - Tell IOMMU to use legacy mode for this interrupt.
- * - Retrieve ga_tag of prior interrupt remapping data.
- */
- pi.prev_ga_tag = 0;
- pi.is_guest_mode = false;
- ret = irq_set_vcpu_affinity(host_irq, &pi);
-
- /**
- * Check if the posted interrupt was previously
- * setup with the guest_mode by checking if the ga_tag
- * was cached. If so, we need to clean up the per-vcpu
- * ir_list.
- */
- if (!ret && pi.prev_ga_tag) {
- int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
- struct kvm_vcpu *vcpu;
-
- vcpu = kvm_get_vcpu_by_id(kvm, id);
- if (vcpu)
- svm_ir_list_del(to_svm(vcpu), &pi);
- }
}
if (!ret && svm) {
@@ -993,6 +972,34 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
}
ret = 0;
+ if (enable_remapped_mode) {
+ /* Use legacy mode in IRTE */
+ struct amd_iommu_pi_data pi;
+
+ /**
+ * Here, pi is used to:
+ * - Tell IOMMU to use legacy mode for this interrupt.
+ * - Retrieve ga_tag of prior interrupt remapping data.
+ */
+ pi.prev_ga_tag = 0;
+ pi.is_guest_mode = false;
+ ret = irq_set_vcpu_affinity(host_irq, &pi);
+
+ /**
+ * Check if the posted interrupt was previously
+ * setup with the guest_mode by checking if the ga_tag
+ * was cached. If so, we need to clean up the per-vcpu
+ * ir_list.
+ */
+ if (!ret && pi.prev_ga_tag) {
+ int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
+ struct kvm_vcpu *vcpu;
+
+ vcpu = kvm_get_vcpu_by_id(kvm, id);
+ if (vcpu)
+ svm_ir_list_del(to_svm(vcpu), &pi);
+ }
+ }
out:
srcu_read_unlock(&kvm->irq_srcu, idx);
return ret;
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 834b67672d50..8427a48b8b7a 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -678,6 +678,33 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
+ /*
+ * Stash vmcb02's counter if the guest hasn't moved past the guilty
+ * instruction; otherwise, reset the counter to '0'.
+ *
+ * In order to detect if L2 has made forward progress or not, track the
+ * RIP at which a bus lock has occurred on a per-vmcb12 basis. If RIP
+ * is changed, guest has clearly made forward progress, bus_lock_counter
+ * still remained '1', so reset bus_lock_counter to '0'. Eg. In the
+ * scenario, where a buslock happened in L1 before VMRUN, the bus lock
+ * firmly happened on an instruction in the past. Even if vmcb01's
+ * counter is still '1', (because the guilty instruction got patched),
+ * the vCPU has clearly made forward progress and so KVM should reset
+ * vmcb02's counter to '0'.
+ *
+ * If the RIP hasn't changed, stash the bus lock counter at nested VMRUN
+ * to prevent the same guilty instruction from triggering a VM-Exit. Eg.
+ * if userspace rate-limits the vCPU, then it's entirely possible that
+ * L1's tick interrupt is pending by the time userspace re-runs the
+ * vCPU. If KVM unconditionally clears the counter on VMRUN, then when
+ * L1 re-enters L2, the same instruction will trigger a VM-Exit and the
+ * entire cycle start over.
+ */
+ if (vmcb02->save.rip && (svm->nested.ctl.bus_lock_rip == vmcb02->save.rip))
+ vmcb02->control.bus_lock_counter = 1;
+ else
+ vmcb02->control.bus_lock_counter = 0;
+
/* Done at vmrun: asid. */
/* Also overwritten later if necessary. */
@@ -1039,8 +1066,17 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
}
+ /*
+ * Invalidate bus_lock_rip unless KVM is still waiting for the guest
+ * to make forward progress before re-enabling bus lock detection.
+ */
+ if (!vmcb02->control.bus_lock_counter)
+ svm->nested.ctl.bus_lock_rip = INVALID_GPA;
+
nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
+ kvm_nested_vmexit_handle_ibrs(vcpu);
+
svm_switch_vmcb(svm, &svm->vmcb01);
/*
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0bc708ee2788..5a69b657dae9 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -26,6 +26,7 @@
#include <asm/fpu/xcr.h>
#include <asm/fpu/xstate.h>
#include <asm/debugreg.h>
+#include <asm/msr.h>
#include <asm/sev.h>
#include "mmu.h"
@@ -560,6 +561,8 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
return -EFAULT;
+ sev->policy = params.policy;
+
memset(&start, 0, sizeof(start));
dh_blob = NULL;
@@ -1592,11 +1595,11 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* allocate memory for header and transport buffer */
ret = -ENOMEM;
- hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
+ hdr = kzalloc(params.hdr_len, GFP_KERNEL);
if (!hdr)
goto e_unpin;
- trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
+ trans_data = kzalloc(params.trans_len, GFP_KERNEL);
if (!trans_data)
goto e_free_hdr;
@@ -1882,70 +1885,6 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
atomic_set_release(&src_sev->migration_in_progress, 0);
}
-/* vCPU mutex subclasses. */
-enum sev_migration_role {
- SEV_MIGRATION_SOURCE = 0,
- SEV_MIGRATION_TARGET,
- SEV_NR_MIGRATION_ROLES,
-};
-
-static int sev_lock_vcpus_for_migration(struct kvm *kvm,
- enum sev_migration_role role)
-{
- struct kvm_vcpu *vcpu;
- unsigned long i, j;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (mutex_lock_killable_nested(&vcpu->mutex, role))
- goto out_unlock;
-
-#ifdef CONFIG_PROVE_LOCKING
- if (!i)
- /*
- * Reset the role to one that avoids colliding with
- * the role used for the first vcpu mutex.
- */
- role = SEV_NR_MIGRATION_ROLES;
- else
- mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
-#endif
- }
-
- return 0;
-
-out_unlock:
-
- kvm_for_each_vcpu(j, vcpu, kvm) {
- if (i == j)
- break;
-
-#ifdef CONFIG_PROVE_LOCKING
- if (j)
- mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
-#endif
-
- mutex_unlock(&vcpu->mutex);
- }
- return -EINTR;
-}
-
-static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
-{
- struct kvm_vcpu *vcpu;
- unsigned long i;
- bool first = true;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (first)
- first = false;
- else
- mutex_acquire(&vcpu->mutex.dep_map,
- SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
-
- mutex_unlock(&vcpu->mutex);
- }
-}
-
static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
{
struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
@@ -2083,10 +2022,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
charged = true;
}
- ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
+ ret = kvm_lock_all_vcpus(kvm);
if (ret)
goto out_dst_cgroup;
- ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
+ ret = kvm_lock_all_vcpus(source_kvm);
if (ret)
goto out_dst_vcpu;
@@ -2100,9 +2039,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
ret = 0;
out_source_vcpu:
- sev_unlock_vcpus_for_migration(source_kvm);
+ kvm_unlock_all_vcpus(source_kvm);
out_dst_vcpu:
- sev_unlock_vcpus_for_migration(kvm);
+ kvm_unlock_all_vcpus(kvm);
out_dst_cgroup:
/* Operates on the source on success, on the destination on failure. */
if (charged)
@@ -2199,6 +2138,8 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (params.policy & SNP_POLICY_MASK_SINGLE_SOCKET)
return -EINVAL;
+ sev->policy = params.policy;
+
sev->snp_context = snp_context_create(kvm, argp);
if (!sev->snp_context)
return -ENOTTY;
@@ -2933,6 +2874,7 @@ void __init sev_set_cpu_caps(void)
void __init sev_hardware_setup(void)
{
unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
+ struct sev_platform_init_args init_args = {0};
bool sev_snp_supported = false;
bool sev_es_supported = false;
bool sev_supported = false;
@@ -3059,6 +3001,15 @@ out:
sev_supported_vmsa_features = 0;
if (sev_es_debug_swap_enabled)
sev_supported_vmsa_features |= SVM_SEV_FEAT_DEBUG_SWAP;
+
+ if (!sev_enabled)
+ return;
+
+ /*
+ * Do both SNP and SEV initialization at KVM module load.
+ */
+ init_args.probe = true;
+ sev_platform_init(&init_args);
}
void sev_hardware_unsetup(void)
@@ -3074,6 +3025,8 @@ void sev_hardware_unsetup(void)
misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
+
+ sev_platform_shutdown();
}
int sev_cpu_init(struct svm_cpu_data *sd)
@@ -3119,7 +3072,7 @@ static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
* back to WBINVD if this faults so as not to make any problems worse
* by leaving stale encrypted data in the cache.
*/
- if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
+ if (WARN_ON_ONCE(wrmsrq_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
goto do_wbinvd;
return;
@@ -3173,9 +3126,14 @@ skip_vmsa_free:
kvfree(svm->sev_es.ghcb_sa);
}
+static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
+{
+ return (((u64)control->exit_code_hi) << 32) | control->exit_code;
+}
+
static void dump_ghcb(struct vcpu_svm *svm)
{
- struct ghcb *ghcb = svm->sev_es.ghcb;
+ struct vmcb_control_area *control = &svm->vmcb->control;
unsigned int nbits;
/* Re-use the dump_invalid_vmcb module parameter */
@@ -3184,18 +3142,24 @@ static void dump_ghcb(struct vcpu_svm *svm)
return;
}
- nbits = sizeof(ghcb->save.valid_bitmap) * 8;
+ nbits = sizeof(svm->sev_es.valid_bitmap) * 8;
- pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
+ /*
+ * Print KVM's snapshot of the GHCB values that were (unsuccessfully)
+ * used to handle the exit. If the guest has since modified the GHCB
+ * itself, dumping the raw GHCB won't help debug why KVM was unable to
+ * handle the VMGEXIT that KVM observed.
+ */
+ pr_err("GHCB (GPA=%016llx) snapshot:\n", svm->vmcb->control.ghcb_gpa);
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
- ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
+ kvm_ghcb_get_sw_exit_code(control), kvm_ghcb_sw_exit_code_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
- ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
+ control->exit_info_1, kvm_ghcb_sw_exit_info_1_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
- ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
+ control->exit_info_2, kvm_ghcb_sw_exit_info_2_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
- ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
- pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
+ svm->sev_es.sw_scratch, kvm_ghcb_sw_scratch_is_valid(svm));
+ pr_err("%-20s%*pb\n", "valid_bitmap", nbits, svm->sev_es.valid_bitmap);
}
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
@@ -3266,11 +3230,6 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}
-static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
-{
- return (((u64)control->exit_code_hi) << 32) | control->exit_code;
-}
-
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
@@ -3988,10 +3947,8 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
* Unless Creation is deferred until INIT, signal the vCPU to update
* its state.
*/
- if (request != SVM_VMGEXIT_AP_CREATE_ON_INIT) {
- kvm_make_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
- kvm_vcpu_kick(target_vcpu);
- }
+ if (request != SVM_VMGEXIT_AP_CREATE_ON_INIT)
+ kvm_make_request_and_kick(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
return 0;
}
@@ -4449,6 +4406,7 @@ void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
static void sev_es_init_vmcb(struct vcpu_svm *svm)
{
+ struct kvm_sev_info *sev = to_kvm_sev_info(svm->vcpu.kvm);
struct vmcb *vmcb = svm->vmcb01.ptr;
struct kvm_vcpu *vcpu = &svm->vcpu;
@@ -4464,6 +4422,10 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
if (svm->sev_es.vmsa && !svm->sev_es.snp_has_guest_vmsa)
svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
+ if (cpu_feature_enabled(X86_FEATURE_ALLOWED_SEV_FEATURES))
+ svm->vmcb->control.allowed_sev_features = sev->vmsa_features |
+ VMCB_ALLOWED_SEV_FEATURES_VALID;
+
/* Can't intercept CR register access, HV can't modify CR registers */
svm_clr_intercept(svm, INTERCEPT_CR0_READ);
svm_clr_intercept(svm, INTERCEPT_CR4_READ);
@@ -4924,3 +4886,97 @@ int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
return level;
}
+
+struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb_save_area *vmsa;
+ struct kvm_sev_info *sev;
+ int error = 0;
+ int ret;
+
+ if (!sev_es_guest(vcpu->kvm))
+ return NULL;
+
+ /*
+ * If the VMSA has not yet been encrypted, return a pointer to the
+ * current un-encrypted VMSA.
+ */
+ if (!vcpu->arch.guest_state_protected)
+ return (struct vmcb_save_area *)svm->sev_es.vmsa;
+
+ sev = to_kvm_sev_info(vcpu->kvm);
+
+ /* Check if the SEV policy allows debugging */
+ if (sev_snp_guest(vcpu->kvm)) {
+ if (!(sev->policy & SNP_POLICY_DEBUG))
+ return NULL;
+ } else {
+ if (sev->policy & SEV_POLICY_NODBG)
+ return NULL;
+ }
+
+ if (sev_snp_guest(vcpu->kvm)) {
+ struct sev_data_snp_dbg dbg = {0};
+
+ vmsa = snp_alloc_firmware_page(__GFP_ZERO);
+ if (!vmsa)
+ return NULL;
+
+ dbg.gctx_paddr = __psp_pa(sev->snp_context);
+ dbg.src_addr = svm->vmcb->control.vmsa_pa;
+ dbg.dst_addr = __psp_pa(vmsa);
+
+ ret = sev_do_cmd(SEV_CMD_SNP_DBG_DECRYPT, &dbg, &error);
+
+ /*
+ * Return the target page to a hypervisor page no matter what.
+ * If this fails, the page can't be used, so leak it and don't
+ * try to use it.
+ */
+ if (snp_page_reclaim(vcpu->kvm, PHYS_PFN(__pa(vmsa))))
+ return NULL;
+
+ if (ret) {
+ pr_err("SEV: SNP_DBG_DECRYPT failed ret=%d, fw_error=%d (%#x)\n",
+ ret, error, error);
+ free_page((unsigned long)vmsa);
+
+ return NULL;
+ }
+ } else {
+ struct sev_data_dbg dbg = {0};
+ struct page *vmsa_page;
+
+ vmsa_page = alloc_page(GFP_KERNEL);
+ if (!vmsa_page)
+ return NULL;
+
+ vmsa = page_address(vmsa_page);
+
+ dbg.handle = sev->handle;
+ dbg.src_addr = svm->vmcb->control.vmsa_pa;
+ dbg.dst_addr = __psp_pa(vmsa);
+ dbg.len = PAGE_SIZE;
+
+ ret = sev_do_cmd(SEV_CMD_DBG_DECRYPT, &dbg, &error);
+ if (ret) {
+ pr_err("SEV: SEV_CMD_DBG_DECRYPT failed ret=%d, fw_error=%d (0x%x)\n",
+ ret, error, error);
+ __free_page(vmsa_page);
+
+ return NULL;
+ }
+ }
+
+ return vmsa;
+}
+
+void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa)
+{
+ /* If the VMSA has not yet been encrypted, nothing was allocated */
+ if (!vcpu->arch.guest_state_protected || !vmsa)
+ return;
+
+ free_page((unsigned long)vmsa);
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d5d0c5c3300b..ab9b947dbf4f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -29,8 +29,10 @@
#include <linux/cc_platform.h>
#include <linux/smp.h>
#include <linux/string_choices.h>
+#include <linux/mutex.h>
#include <asm/apic.h>
+#include <asm/msr.h>
#include <asm/perf_event.h>
#include <asm/tlbflush.h>
#include <asm/desc.h>
@@ -231,6 +233,8 @@ module_param(tsc_scaling, int, 0444);
static bool avic;
module_param(avic, bool, 0444);
+module_param(enable_device_posted_irqs, bool, 0444);
+
bool __read_mostly dump_invalid_vmcb;
module_param(dump_invalid_vmcb, bool, 0644);
@@ -249,6 +253,8 @@ static unsigned long iopm_base;
DEFINE_PER_CPU(struct svm_cpu_data, svm_data);
+static DEFINE_MUTEX(vmcb_dump_mutex);
+
/*
* Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via
* the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE.
@@ -475,24 +481,18 @@ static void svm_inject_exception(struct kvm_vcpu *vcpu)
static void svm_init_erratum_383(void)
{
- u32 low, high;
- int err;
u64 val;
if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
return;
/* Use _safe variants to not break nested virtualization */
- val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
- if (err)
+ if (native_read_msr_safe(MSR_AMD64_DC_CFG, &val))
return;
val |= (1ULL << 47);
- low = lower_32_bits(val);
- high = upper_32_bits(val);
-
- native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
+ native_write_msr_safe(MSR_AMD64_DC_CFG, val);
erratum_383_found = true;
}
@@ -566,7 +566,7 @@ static void __svm_write_tsc_multiplier(u64 multiplier)
if (multiplier == __this_cpu_read(current_tsc_ratio))
return;
- wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
+ wrmsrq(MSR_AMD64_TSC_RATIO, multiplier);
__this_cpu_write(current_tsc_ratio, multiplier);
}
@@ -579,15 +579,15 @@ static inline void kvm_cpu_svm_disable(void)
{
uint64_t efer;
- wrmsrl(MSR_VM_HSAVE_PA, 0);
- rdmsrl(MSR_EFER, efer);
+ wrmsrq(MSR_VM_HSAVE_PA, 0);
+ rdmsrq(MSR_EFER, efer);
if (efer & EFER_SVME) {
/*
* Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
* NMI aren't blocked.
*/
stgi();
- wrmsrl(MSR_EFER, efer & ~EFER_SVME);
+ wrmsrq(MSR_EFER, efer & ~EFER_SVME);
}
}
@@ -607,9 +607,6 @@ static void svm_disable_virtualization_cpu(void)
kvm_cpu_svm_disable();
amd_pmu_disable_virt();
-
- if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
- msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
}
static int svm_enable_virtualization_cpu(void)
@@ -619,7 +616,7 @@ static int svm_enable_virtualization_cpu(void)
uint64_t efer;
int me = raw_smp_processor_id();
- rdmsrl(MSR_EFER, efer);
+ rdmsrq(MSR_EFER, efer);
if (efer & EFER_SVME)
return -EBUSY;
@@ -629,9 +626,9 @@ static int svm_enable_virtualization_cpu(void)
sd->next_asid = sd->max_asid + 1;
sd->min_asid = max_sev_asid + 1;
- wrmsrl(MSR_EFER, efer | EFER_SVME);
+ wrmsrq(MSR_EFER, efer | EFER_SVME);
- wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa);
+ wrmsrq(MSR_VM_HSAVE_PA, sd->save_area_pa);
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
/*
@@ -652,13 +649,12 @@ static int svm_enable_virtualization_cpu(void)
* erratum is present everywhere).
*/
if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
- uint64_t len, status = 0;
+ u64 len, status = 0;
int err;
- len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
+ err = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &len);
if (!err)
- status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
- &err);
+ err = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, &status);
if (err)
osvw_status = osvw_len = 0;
@@ -687,9 +683,6 @@ static int svm_enable_virtualization_cpu(void)
rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
}
- if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
- msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
-
return 0;
}
@@ -1381,6 +1374,9 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
}
+ if (vcpu->kvm->arch.bus_lock_detection_enabled)
+ svm_set_intercept(svm, INTERCEPT_BUSLOCK);
+
if (sev_guest(vcpu->kvm))
sev_init_vmcb(svm);
@@ -1490,25 +1486,10 @@ out:
return err;
}
-static void svm_clear_current_vmcb(struct vmcb *vmcb)
-{
- int i;
-
- for_each_online_cpu(i)
- cmpxchg(per_cpu_ptr(&svm_data.current_vmcb, i), vmcb, NULL);
-}
-
static void svm_vcpu_free(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- /*
- * The vmcb page can be recycled, causing a false negative in
- * svm_vcpu_load(). So, ensure that no logical CPU has this
- * vmcb page recorded as its current vmcb.
- */
- svm_clear_current_vmcb(svm->vmcb);
-
svm_leave_nested(vcpu);
svm_free_nested(svm);
@@ -1518,6 +1499,63 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
}
+#ifdef CONFIG_CPU_MITIGATIONS
+static DEFINE_SPINLOCK(srso_lock);
+static atomic_t srso_nr_vms;
+
+static void svm_srso_clear_bp_spec_reduce(void *ign)
+{
+ struct svm_cpu_data *sd = this_cpu_ptr(&svm_data);
+
+ if (!sd->bp_spec_reduce_set)
+ return;
+
+ msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
+ sd->bp_spec_reduce_set = false;
+}
+
+static void svm_srso_vm_destroy(void)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
+ return;
+
+ if (atomic_dec_return(&srso_nr_vms))
+ return;
+
+ guard(spinlock)(&srso_lock);
+
+ /*
+ * Verify a new VM didn't come along, acquire the lock, and increment
+ * the count before this task acquired the lock.
+ */
+ if (atomic_read(&srso_nr_vms))
+ return;
+
+ on_each_cpu(svm_srso_clear_bp_spec_reduce, NULL, 1);
+}
+
+static void svm_srso_vm_init(void)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
+ return;
+
+ /*
+ * Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0
+ * transition, i.e. destroying the last VM, is fully complete, e.g. so
+ * that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs.
+ */
+ if (atomic_inc_not_zero(&srso_nr_vms))
+ return;
+
+ guard(spinlock)(&srso_lock);
+
+ atomic_inc(&srso_nr_vms);
+}
+#else
+static void svm_srso_vm_init(void) { }
+static void svm_srso_vm_destroy(void) { }
+#endif
+
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1550,6 +1588,11 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
(!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
+ if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) &&
+ !sd->bp_spec_reduce_set) {
+ sd->bp_spec_reduce_set = true;
+ msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
+ }
svm->guest_state_loaded = true;
}
@@ -1560,19 +1603,9 @@ static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- struct vcpu_svm *svm = to_svm(vcpu);
- struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
-
if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
shrink_ple_window(vcpu);
- if (sd->current_vmcb != svm->vmcb) {
- sd->current_vmcb = svm->vmcb;
-
- if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT) &&
- static_branch_likely(&switch_vcpu_ibpb))
- indirect_branch_prediction_barrier();
- }
if (kvm_vcpu_apicv_active(vcpu))
avic_vcpu_load(vcpu, cpu);
}
@@ -2149,14 +2182,13 @@ static int ac_interception(struct kvm_vcpu *vcpu)
static bool is_erratum_383(void)
{
- int err, i;
+ int i;
u64 value;
if (!erratum_383_found)
return false;
- value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
- if (err)
+ if (native_read_msr_safe(MSR_IA32_MC0_STATUS, &value))
return false;
/* Bit 62 may or may not be set for this mce */
@@ -2167,17 +2199,11 @@ static bool is_erratum_383(void)
/* Clear MCi_STATUS registers */
for (i = 0; i < 6; ++i)
- native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
-
- value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
- if (!err) {
- u32 low, high;
+ native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0);
+ if (!native_read_msr_safe(MSR_IA32_MCG_STATUS, &value)) {
value &= ~(1ULL << 2);
- low = lower_32_bits(value);
- high = upper_32_bits(value);
-
- native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
+ native_write_msr_safe(MSR_IA32_MCG_STATUS, value);
}
/* Flush tlb to evict multi-match entries */
@@ -2231,6 +2257,10 @@ static int shutdown_interception(struct kvm_vcpu *vcpu)
*/
if (!sev_es_guest(vcpu->kvm)) {
clear_page(svm->vmcb);
+#ifdef CONFIG_KVM_SMM
+ if (is_smm(vcpu))
+ kvm_smm_changed(vcpu, false);
+#endif
kvm_vcpu_reset(vcpu, true);
}
@@ -3174,17 +3204,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
}
/*
- * AMD changed the architectural behavior of bits 5:2. On CPUs
- * without BusLockTrap, bits 5:2 control "external pins", but
- * on CPUs that support BusLockDetect, bit 2 enables BusLockTrap
- * and bits 5:3 are reserved-to-zero. Sadly, old KVM allowed
- * the guest to set bits 5:2 despite not actually virtualizing
- * Performance-Monitoring/Breakpoint external pins. Drop bits
- * 5:2 for backwards compatibility.
- */
- data &= ~GENMASK(5, 2);
-
- /*
* Suppress BTF as KVM doesn't virtualize BTF, but there's no
* way to communicate lack of support to the guest.
*/
@@ -3314,6 +3333,37 @@ static int invpcid_interception(struct kvm_vcpu *vcpu)
return kvm_handle_invpcid(vcpu, type, gva);
}
+static inline int complete_userspace_buslock(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ /*
+ * If userspace has NOT changed RIP, then KVM's ABI is to let the guest
+ * execute the bus-locking instruction. Set the bus lock counter to '1'
+ * to effectively step past the bus lock.
+ */
+ if (kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip))
+ svm->vmcb->control.bus_lock_counter = 1;
+
+ return 1;
+}
+
+static int bus_lock_exit(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
+ vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
+
+ vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
+ vcpu->arch.complete_userspace_io = complete_userspace_buslock;
+
+ if (is_guest_mode(vcpu))
+ svm->nested.ctl.bus_lock_rip = vcpu->arch.cui_linear_rip;
+
+ return 0;
+}
+
static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_EXIT_READ_CR0] = cr_interception,
[SVM_EXIT_READ_CR3] = cr_interception,
@@ -3383,6 +3433,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_EXIT_INVPCID] = invpcid_interception,
[SVM_EXIT_IDLE_HLT] = kvm_emulate_halt,
[SVM_EXIT_NPF] = npf_interception,
+ [SVM_EXIT_BUS_LOCK] = bus_lock_exit,
[SVM_EXIT_RSM] = rsm_interception,
[SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
[SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
@@ -3397,14 +3448,21 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
struct vmcb_control_area *control = &svm->vmcb->control;
struct vmcb_save_area *save = &svm->vmcb->save;
struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save;
+ char *vm_type;
if (!dump_invalid_vmcb) {
pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
return;
}
- pr_err("VMCB %p, last attempted VMRUN on CPU %d\n",
- svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu);
+ guard(mutex)(&vmcb_dump_mutex);
+
+ vm_type = sev_snp_guest(vcpu->kvm) ? "SEV-SNP" :
+ sev_es_guest(vcpu->kvm) ? "SEV-ES" :
+ sev_guest(vcpu->kvm) ? "SEV" : "SVM";
+
+ pr_err("%s vCPU%u VMCB %p, last attempted VMRUN on CPU %d\n",
+ vm_type, vcpu->vcpu_id, svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu);
pr_err("VMCB Control Area:\n");
pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
@@ -3442,6 +3500,17 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa);
+ pr_err("%-20s%016llx\n", "allowed_sev_features:", control->allowed_sev_features);
+ pr_err("%-20s%016llx\n", "guest_sev_features:", control->guest_sev_features);
+
+ if (sev_es_guest(vcpu->kvm)) {
+ save = sev_decrypt_vmsa(vcpu);
+ if (!save)
+ goto no_vmsa;
+
+ save01 = save;
+ }
+
pr_err("VMCB State Save Area:\n");
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"es:",
@@ -3512,6 +3581,63 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
pr_err("%-15s %016llx %-13s %016llx\n",
"excp_from:", save->last_excp_from,
"excp_to:", save->last_excp_to);
+
+ if (sev_es_guest(vcpu->kvm)) {
+ struct sev_es_save_area *vmsa = (struct sev_es_save_area *)save;
+
+ pr_err("%-15s %016llx\n",
+ "sev_features", vmsa->sev_features);
+
+ pr_err("%-15s %016llx %-13s %016llx\n",
+ "rax:", vmsa->rax, "rbx:", vmsa->rbx);
+ pr_err("%-15s %016llx %-13s %016llx\n",
+ "rcx:", vmsa->rcx, "rdx:", vmsa->rdx);
+ pr_err("%-15s %016llx %-13s %016llx\n",
+ "rsi:", vmsa->rsi, "rdi:", vmsa->rdi);
+ pr_err("%-15s %016llx %-13s %016llx\n",
+ "rbp:", vmsa->rbp, "rsp:", vmsa->rsp);
+ pr_err("%-15s %016llx %-13s %016llx\n",
+ "r8:", vmsa->r8, "r9:", vmsa->r9);
+ pr_err("%-15s %016llx %-13s %016llx\n",
+ "r10:", vmsa->r10, "r11:", vmsa->r11);
+ pr_err("%-15s %016llx %-13s %016llx\n",
+ "r12:", vmsa->r12, "r13:", vmsa->r13);
+ pr_err("%-15s %016llx %-13s %016llx\n",
+ "r14:", vmsa->r14, "r15:", vmsa->r15);
+ pr_err("%-15s %016llx %-13s %016llx\n",
+ "xcr0:", vmsa->xcr0, "xss:", vmsa->xss);
+ } else {
+ pr_err("%-15s %016llx %-13s %016lx\n",
+ "rax:", save->rax, "rbx:",
+ vcpu->arch.regs[VCPU_REGS_RBX]);
+ pr_err("%-15s %016lx %-13s %016lx\n",
+ "rcx:", vcpu->arch.regs[VCPU_REGS_RCX],
+ "rdx:", vcpu->arch.regs[VCPU_REGS_RDX]);
+ pr_err("%-15s %016lx %-13s %016lx\n",
+ "rsi:", vcpu->arch.regs[VCPU_REGS_RSI],
+ "rdi:", vcpu->arch.regs[VCPU_REGS_RDI]);
+ pr_err("%-15s %016lx %-13s %016llx\n",
+ "rbp:", vcpu->arch.regs[VCPU_REGS_RBP],
+ "rsp:", save->rsp);
+#ifdef CONFIG_X86_64
+ pr_err("%-15s %016lx %-13s %016lx\n",
+ "r8:", vcpu->arch.regs[VCPU_REGS_R8],
+ "r9:", vcpu->arch.regs[VCPU_REGS_R9]);
+ pr_err("%-15s %016lx %-13s %016lx\n",
+ "r10:", vcpu->arch.regs[VCPU_REGS_R10],
+ "r11:", vcpu->arch.regs[VCPU_REGS_R11]);
+ pr_err("%-15s %016lx %-13s %016lx\n",
+ "r12:", vcpu->arch.regs[VCPU_REGS_R12],
+ "r13:", vcpu->arch.regs[VCPU_REGS_R13]);
+ pr_err("%-15s %016lx %-13s %016lx\n",
+ "r14:", vcpu->arch.regs[VCPU_REGS_R14],
+ "r15:", vcpu->arch.regs[VCPU_REGS_R15]);
+#endif
+ }
+
+no_vmsa:
+ if (sev_es_guest(vcpu->kvm))
+ sev_free_decrypted_vmsa(vcpu, save);
}
static bool svm_check_exit_valid(u64 exit_code)
@@ -3548,6 +3674,10 @@ int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
return kvm_emulate_halt(vcpu);
else if (exit_code == SVM_EXIT_NPF)
return npf_interception(vcpu);
+#ifdef CONFIG_KVM_AMD_SEV
+ else if (exit_code == SVM_EXIT_VMGEXIT)
+ return sev_handle_vmgexit(vcpu);
+#endif
#endif
return svm_exit_handlers[exit_code](vcpu);
}
@@ -5036,6 +5166,8 @@ static void svm_vm_destroy(struct kvm *kvm)
{
avic_vm_destroy(kvm);
sev_vm_destroy(kvm);
+
+ svm_srso_vm_destroy();
}
static int svm_vm_init(struct kvm *kvm)
@@ -5061,6 +5193,7 @@ static int svm_vm_init(struct kvm *kvm)
return ret;
}
+ svm_srso_vm_init();
return 0;
}
@@ -5232,7 +5365,7 @@ static __init void svm_adjust_mmio_mask(void)
return;
/* If memory encryption is not enabled, use existing mask */
- rdmsrl(MSR_AMD64_SYSCFG, msr);
+ rdmsrq(MSR_AMD64_SYSCFG, msr);
if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
return;
@@ -5306,6 +5439,9 @@ static __init void svm_set_cpu_caps(void)
kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
}
+ if (cpu_feature_enabled(X86_FEATURE_BUS_LOCK_THRESHOLD))
+ kvm_caps.has_bus_lock_exit = true;
+
/* CPUID 0x80000008 */
if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
boot_cpu_has(X86_FEATURE_AMD_SSBD))
@@ -5501,6 +5637,7 @@ static __init int svm_hardware_setup(void)
*/
allow_smaller_maxphyaddr = !npt_enabled;
+ kvm_caps.inapplicable_quirks &= ~KVM_X86_QUIRK_CD_NW_CLEARED;
return 0;
err:
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index d4490eaed55d..e6f3c6a153a0 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -98,6 +98,7 @@ struct kvm_sev_info {
unsigned int asid; /* ASID used for this guest */
unsigned int handle; /* SEV firmware handle */
int fd; /* SEV device fd */
+ unsigned long policy;
unsigned long pages_locked; /* Number of pages locked */
struct list_head regions_list; /* List of registered regions */
u64 ap_jump_table; /* SEV-ES AP Jump Table address */
@@ -114,6 +115,9 @@ struct kvm_sev_info {
struct mutex guest_req_mutex; /* Must acquire before using bounce buffers */
};
+#define SEV_POLICY_NODBG BIT_ULL(0)
+#define SNP_POLICY_DEBUG BIT_ULL(19)
+
struct kvm_svm {
struct kvm kvm;
@@ -169,6 +173,7 @@ struct vmcb_ctrl_area_cached {
u64 nested_cr3;
u64 virt_ext;
u32 clean;
+ u64 bus_lock_rip;
union {
#if IS_ENABLED(CONFIG_HYPERV) || IS_ENABLED(CONFIG_KVM_HYPERV)
struct hv_vmcb_enlightenments hv_enlightenments;
@@ -335,11 +340,11 @@ struct svm_cpu_data {
u32 next_asid;
u32 min_asid;
+ bool bp_spec_reduce_set;
+
struct vmcb *save_area;
unsigned long save_area_pa;
- struct vmcb *current_vmcb;
-
/* index = sev_asid, value = vmcb pointer */
struct vmcb **sev_vmcbs;
};
@@ -783,6 +788,8 @@ void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
+struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu);
+void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa);
#else
static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
{
@@ -814,6 +821,11 @@ static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
return 0;
}
+static inline struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
+{
+ return NULL;
+}
+static inline void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa) {}
#endif
/* vmenter.S */
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index ccda95e53f62..ba736cbb0587 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -11,6 +11,13 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
+#ifdef CREATE_TRACE_POINTS
+#define tracing_kvm_rip_read(vcpu) ({ \
+ typeof(vcpu) __vcpu = vcpu; \
+ __vcpu->arch.guest_state_protected ? 0 : kvm_rip_read(__vcpu); \
+ })
+#endif
+
/*
* Tracepoint for guest mode entry.
*/
@@ -28,7 +35,7 @@ TRACE_EVENT(kvm_entry,
TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id;
- __entry->rip = kvm_rip_read(vcpu);
+ __entry->rip = tracing_kvm_rip_read(vcpu);
__entry->immediate_exit = force_immediate_exit;
kvm_x86_call(get_entry_info)(vcpu, &__entry->intr_info,
@@ -319,7 +326,7 @@ TRACE_EVENT(name, \
), \
\
TP_fast_assign( \
- __entry->guest_rip = kvm_rip_read(vcpu); \
+ __entry->guest_rip = tracing_kvm_rip_read(vcpu); \
__entry->isa = isa; \
__entry->vcpu_id = vcpu->vcpu_id; \
__entry->requests = READ_ONCE(vcpu->requests); \
@@ -423,7 +430,7 @@ TRACE_EVENT(kvm_page_fault,
TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id;
- __entry->guest_rip = kvm_rip_read(vcpu);
+ __entry->guest_rip = tracing_kvm_rip_read(vcpu);
__entry->fault_address = fault_address;
__entry->error_code = error_code;
),
diff --git a/arch/x86/kvm/vmx/common.h b/arch/x86/kvm/vmx/common.h
new file mode 100644
index 000000000000..a0c5e8781c33
--- /dev/null
+++ b/arch/x86/kvm/vmx/common.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __KVM_X86_VMX_COMMON_H
+#define __KVM_X86_VMX_COMMON_H
+
+#include <linux/kvm_host.h>
+#include <asm/posted_intr.h>
+
+#include "mmu.h"
+
+union vmx_exit_reason {
+ struct {
+ u32 basic : 16;
+ u32 reserved16 : 1;
+ u32 reserved17 : 1;
+ u32 reserved18 : 1;
+ u32 reserved19 : 1;
+ u32 reserved20 : 1;
+ u32 reserved21 : 1;
+ u32 reserved22 : 1;
+ u32 reserved23 : 1;
+ u32 reserved24 : 1;
+ u32 reserved25 : 1;
+ u32 bus_lock_detected : 1;
+ u32 enclave_mode : 1;
+ u32 smi_pending_mtf : 1;
+ u32 smi_from_vmx_root : 1;
+ u32 reserved30 : 1;
+ u32 failed_vmentry : 1;
+ };
+ u32 full;
+};
+
+struct vcpu_vt {
+ /* Posted interrupt descriptor */
+ struct pi_desc pi_desc;
+
+ /* Used if this vCPU is waiting for PI notification wakeup. */
+ struct list_head pi_wakeup_list;
+
+ union vmx_exit_reason exit_reason;
+
+ unsigned long exit_qualification;
+ u32 exit_intr_info;
+
+ /*
+ * If true, guest state has been loaded into hardware, and host state
+ * saved into vcpu_{vt,vmx,tdx}. If false, host state is loaded into
+ * hardware.
+ */
+ bool guest_state_loaded;
+ bool emulation_required;
+
+#ifdef CONFIG_X86_64
+ u64 msr_host_kernel_gs_base;
+#endif
+
+ unsigned long host_debugctlmsr;
+};
+
+#ifdef CONFIG_KVM_INTEL_TDX
+
+static __always_inline bool is_td(struct kvm *kvm)
+{
+ return kvm->arch.vm_type == KVM_X86_TDX_VM;
+}
+
+static __always_inline bool is_td_vcpu(struct kvm_vcpu *vcpu)
+{
+ return is_td(vcpu->kvm);
+}
+
+#else
+
+static __always_inline bool is_td(struct kvm *kvm) { return false; }
+static __always_inline bool is_td_vcpu(struct kvm_vcpu *vcpu) { return false; }
+
+#endif
+
+static inline bool vt_is_tdx_private_gpa(struct kvm *kvm, gpa_t gpa)
+{
+ /* For TDX the direct mask is the shared mask. */
+ return !kvm_is_addr_direct(kvm, gpa);
+}
+
+static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa,
+ unsigned long exit_qualification)
+{
+ u64 error_code;
+
+ /* Is it a read fault? */
+ error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
+ ? PFERR_USER_MASK : 0;
+ /* Is it a write fault? */
+ error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
+ ? PFERR_WRITE_MASK : 0;
+ /* Is it a fetch fault? */
+ error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
+ ? PFERR_FETCH_MASK : 0;
+ /* ept page table entry is present? */
+ error_code |= (exit_qualification & EPT_VIOLATION_PROT_MASK)
+ ? PFERR_PRESENT_MASK : 0;
+
+ if (error_code & EPT_VIOLATION_GVA_IS_VALID)
+ error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
+ PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
+
+ if (vt_is_tdx_private_gpa(vcpu->kvm, gpa))
+ error_code |= PFERR_PRIVATE_ACCESS;
+
+ return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
+}
+
+static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
+ int pi_vec)
+{
+#ifdef CONFIG_SMP
+ if (vcpu->mode == IN_GUEST_MODE) {
+ /*
+ * The vector of the virtual has already been set in the PIR.
+ * Send a notification event to deliver the virtual interrupt
+ * unless the vCPU is the currently running vCPU, i.e. the
+ * event is being sent from a fastpath VM-Exit handler, in
+ * which case the PIR will be synced to the vIRR before
+ * re-entering the guest.
+ *
+ * When the target is not the running vCPU, the following
+ * possibilities emerge:
+ *
+ * Case 1: vCPU stays in non-root mode. Sending a notification
+ * event posts the interrupt to the vCPU.
+ *
+ * Case 2: vCPU exits to root mode and is still runnable. The
+ * PIR will be synced to the vIRR before re-entering the guest.
+ * Sending a notification event is ok as the host IRQ handler
+ * will ignore the spurious event.
+ *
+ * Case 3: vCPU exits to root mode and is blocked. vcpu_block()
+ * has already synced PIR to vIRR and never blocks the vCPU if
+ * the vIRR is not empty. Therefore, a blocked vCPU here does
+ * not wait for any requested interrupts in PIR, and sending a
+ * notification event also results in a benign, spurious event.
+ */
+
+ if (vcpu != kvm_get_running_vcpu())
+ __apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
+ return;
+ }
+#endif
+ /*
+ * The vCPU isn't in the guest; wake the vCPU in case it is blocking,
+ * otherwise do nothing as KVM will grab the highest priority pending
+ * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().
+ */
+ kvm_vcpu_wake_up(vcpu);
+}
+
+/*
+ * Post an interrupt to a vCPU's PIR and trigger the vCPU to process the
+ * interrupt if necessary.
+ */
+static inline void __vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu,
+ struct pi_desc *pi_desc, int vector)
+{
+ if (pi_test_and_set_pir(vector, pi_desc))
+ return;
+
+ /* If a previous notification has sent the IPI, nothing to do. */
+ if (pi_test_and_set_on(pi_desc))
+ return;
+
+ /*
+ * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()
+ * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
+ * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
+ * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
+ */
+ kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
+}
+
+noinstr void vmx_handle_nmi(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_X86_VMX_COMMON_H */
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index 43ee9ed11291..d1e02e567b57 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -3,9 +3,888 @@
#include "x86_ops.h"
#include "vmx.h"
+#include "mmu.h"
#include "nested.h"
#include "pmu.h"
#include "posted_intr.h"
+#include "tdx.h"
+#include "tdx_arch.h"
+
+#ifdef CONFIG_KVM_INTEL_TDX
+static_assert(offsetof(struct vcpu_vmx, vt) == offsetof(struct vcpu_tdx, vt));
+
+static void vt_disable_virtualization_cpu(void)
+{
+ /* Note, TDX *and* VMX need to be disabled if TDX is enabled. */
+ if (enable_tdx)
+ tdx_disable_virtualization_cpu();
+ vmx_disable_virtualization_cpu();
+}
+
+static __init int vt_hardware_setup(void)
+{
+ int ret;
+
+ ret = vmx_hardware_setup();
+ if (ret)
+ return ret;
+
+ /*
+ * Update vt_x86_ops::vm_size here so it is ready before
+ * kvm_ops_update() is called in kvm_x86_vendor_init().
+ *
+ * Note, the actual bringing up of TDX must be done after
+ * kvm_ops_update() because enabling TDX requires enabling
+ * hardware virtualization first, i.e., all online CPUs must
+ * be in post-VMXON state. This means the @vm_size here
+ * may be updated to TDX's size but TDX may fail to enable
+ * at later time.
+ *
+ * The VMX/VT code could update kvm_x86_ops::vm_size again
+ * after bringing up TDX, but this would require exporting
+ * either kvm_x86_ops or kvm_ops_update() from the base KVM
+ * module, which looks overkill. Anyway, the worst case here
+ * is KVM may allocate couple of more bytes than needed for
+ * each VM.
+ */
+ if (enable_tdx) {
+ vt_x86_ops.vm_size = max_t(unsigned int, vt_x86_ops.vm_size,
+ sizeof(struct kvm_tdx));
+ /*
+ * Note, TDX may fail to initialize in a later time in
+ * vt_init(), in which case it is not necessary to setup
+ * those callbacks. But making them valid here even
+ * when TDX fails to init later is fine because those
+ * callbacks won't be called if the VM isn't TDX guest.
+ */
+ vt_x86_ops.link_external_spt = tdx_sept_link_private_spt;
+ vt_x86_ops.set_external_spte = tdx_sept_set_private_spte;
+ vt_x86_ops.free_external_spt = tdx_sept_free_private_spt;
+ vt_x86_ops.remove_external_spte = tdx_sept_remove_private_spte;
+ vt_x86_ops.protected_apic_has_interrupt = tdx_protected_apic_has_interrupt;
+ }
+
+ return 0;
+}
+
+static int vt_vm_init(struct kvm *kvm)
+{
+ if (is_td(kvm))
+ return tdx_vm_init(kvm);
+
+ return vmx_vm_init(kvm);
+}
+
+static void vt_vm_pre_destroy(struct kvm *kvm)
+{
+ if (is_td(kvm))
+ return tdx_mmu_release_hkid(kvm);
+}
+
+static void vt_vm_destroy(struct kvm *kvm)
+{
+ if (is_td(kvm))
+ return tdx_vm_destroy(kvm);
+
+ vmx_vm_destroy(kvm);
+}
+
+static int vt_vcpu_precreate(struct kvm *kvm)
+{
+ if (is_td(kvm))
+ return 0;
+
+ return vmx_vcpu_precreate(kvm);
+}
+
+static int vt_vcpu_create(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return tdx_vcpu_create(vcpu);
+
+ return vmx_vcpu_create(vcpu);
+}
+
+static void vt_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu)) {
+ tdx_vcpu_free(vcpu);
+ return;
+ }
+
+ vmx_vcpu_free(vcpu);
+}
+
+static void vt_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+{
+ if (is_td_vcpu(vcpu)) {
+ tdx_vcpu_reset(vcpu, init_event);
+ return;
+ }
+
+ vmx_vcpu_reset(vcpu, init_event);
+}
+
+static void vt_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ if (is_td_vcpu(vcpu)) {
+ tdx_vcpu_load(vcpu, cpu);
+ return;
+ }
+
+ vmx_vcpu_load(vcpu, cpu);
+}
+
+static void vt_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Basic TDX does not support feature PML. KVM does not enable PML in
+ * TD's VMCS, nor does it allocate or flush PML buffer for TDX.
+ */
+ if (WARN_ON_ONCE(is_td_vcpu(vcpu)))
+ return;
+
+ vmx_update_cpu_dirty_logging(vcpu);
+}
+
+static void vt_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu)) {
+ tdx_prepare_switch_to_guest(vcpu);
+ return;
+ }
+
+ vmx_prepare_switch_to_guest(vcpu);
+}
+
+static void vt_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu)) {
+ tdx_vcpu_put(vcpu);
+ return;
+ }
+
+ vmx_vcpu_put(vcpu);
+}
+
+static int vt_vcpu_pre_run(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return tdx_vcpu_pre_run(vcpu);
+
+ return vmx_vcpu_pre_run(vcpu);
+}
+
+static fastpath_t vt_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+{
+ if (is_td_vcpu(vcpu))
+ return tdx_vcpu_run(vcpu, force_immediate_exit);
+
+ return vmx_vcpu_run(vcpu, force_immediate_exit);
+}
+
+static int vt_handle_exit(struct kvm_vcpu *vcpu,
+ enum exit_fastpath_completion fastpath)
+{
+ if (is_td_vcpu(vcpu))
+ return tdx_handle_exit(vcpu, fastpath);
+
+ return vmx_handle_exit(vcpu, fastpath);
+}
+
+static int vt_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+ if (unlikely(is_td_vcpu(vcpu)))
+ return tdx_set_msr(vcpu, msr_info);
+
+ return vmx_set_msr(vcpu, msr_info);
+}
+
+/*
+ * The kvm parameter can be NULL (module initialization, or invocation before
+ * VM creation). Be sure to check the kvm parameter before using it.
+ */
+static bool vt_has_emulated_msr(struct kvm *kvm, u32 index)
+{
+ if (kvm && is_td(kvm))
+ return tdx_has_emulated_msr(index);
+
+ return vmx_has_emulated_msr(kvm, index);
+}
+
+static int vt_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+ if (unlikely(is_td_vcpu(vcpu)))
+ return tdx_get_msr(vcpu, msr_info);
+
+ return vmx_get_msr(vcpu, msr_info);
+}
+
+static void vt_msr_filter_changed(struct kvm_vcpu *vcpu)
+{
+ /*
+ * TDX doesn't allow VMM to configure interception of MSR accesses.
+ * TDX guest requests MSR accesses by calling TDVMCALL. The MSR
+ * filters will be applied when handling the TDVMCALL for RDMSR/WRMSR
+ * if the userspace has set any.
+ */
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_msr_filter_changed(vcpu);
+}
+
+static int vt_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
+{
+ if (is_td_vcpu(vcpu))
+ return tdx_complete_emulated_msr(vcpu, err);
+
+ return vmx_complete_emulated_msr(vcpu, err);
+}
+
+#ifdef CONFIG_KVM_SMM
+static int vt_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+{
+ if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+ return 0;
+
+ return vmx_smi_allowed(vcpu, for_injection);
+}
+
+static int vt_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
+{
+ if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+ return 0;
+
+ return vmx_enter_smm(vcpu, smram);
+}
+
+static int vt_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
+{
+ if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+ return 0;
+
+ return vmx_leave_smm(vcpu, smram);
+}
+
+static void vt_enable_smi_window(struct kvm_vcpu *vcpu)
+{
+ if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+ return;
+
+ /* RSM will cause a vmexit anyway. */
+ vmx_enable_smi_window(vcpu);
+}
+#endif
+
+static int vt_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
+ void *insn, int insn_len)
+{
+ /*
+ * For TDX, this can only be triggered for MMIO emulation. Let the
+ * guest retry after installing the SPTE with suppress #VE bit cleared,
+ * so that the guest will receive #VE when retry. The guest is expected
+ * to call TDG.VP.VMCALL<MMIO> to request VMM to do MMIO emulation on
+ * #VE.
+ */
+ if (is_td_vcpu(vcpu))
+ return X86EMUL_RETRY_INSTR;
+
+ return vmx_check_emulate_instruction(vcpu, emul_type, insn, insn_len);
+}
+
+static bool vt_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
+{
+ /*
+ * INIT and SIPI are always blocked for TDX, i.e., INIT handling and
+ * the OP vcpu_deliver_sipi_vector() won't be called.
+ */
+ if (is_td_vcpu(vcpu))
+ return true;
+
+ return vmx_apic_init_signal_blocked(vcpu);
+}
+
+static void vt_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
+{
+ /* Only x2APIC mode is supported for TD. */
+ if (is_td_vcpu(vcpu))
+ return;
+
+ return vmx_set_virtual_apic_mode(vcpu);
+}
+
+static void vt_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ return vmx_hwapic_isr_update(vcpu, max_isr);
+}
+
+static int vt_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return -1;
+
+ return vmx_sync_pir_to_irr(vcpu);
+}
+
+static void vt_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector)
+{
+ if (is_td_vcpu(apic->vcpu)) {
+ tdx_deliver_interrupt(apic, delivery_mode, trig_mode,
+ vector);
+ return;
+ }
+
+ vmx_deliver_interrupt(apic, delivery_mode, trig_mode, vector);
+}
+
+static void vt_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_vcpu_after_set_cpuid(vcpu);
+}
+
+static void vt_update_exception_bitmap(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_update_exception_bitmap(vcpu);
+}
+
+static u64 vt_get_segment_base(struct kvm_vcpu *vcpu, int seg)
+{
+ if (is_td_vcpu(vcpu))
+ return 0;
+
+ return vmx_get_segment_base(vcpu, seg);
+}
+
+static void vt_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
+ int seg)
+{
+ if (is_td_vcpu(vcpu)) {
+ memset(var, 0, sizeof(*var));
+ return;
+ }
+
+ vmx_get_segment(vcpu, var, seg);
+}
+
+static void vt_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
+ int seg)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_set_segment(vcpu, var, seg);
+}
+
+static int vt_get_cpl(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return 0;
+
+ return vmx_get_cpl(vcpu);
+}
+
+static int vt_get_cpl_no_cache(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return 0;
+
+ return vmx_get_cpl_no_cache(vcpu);
+}
+
+static void vt_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
+{
+ if (is_td_vcpu(vcpu)) {
+ *db = 0;
+ *l = 0;
+ return;
+ }
+
+ vmx_get_cs_db_l_bits(vcpu, db, l);
+}
+
+static bool vt_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+{
+ if (is_td_vcpu(vcpu))
+ return true;
+
+ return vmx_is_valid_cr0(vcpu, cr0);
+}
+
+static void vt_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_set_cr0(vcpu, cr0);
+}
+
+static bool vt_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+ if (is_td_vcpu(vcpu))
+ return true;
+
+ return vmx_is_valid_cr4(vcpu, cr4);
+}
+
+static void vt_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_set_cr4(vcpu, cr4);
+}
+
+static int vt_set_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+ if (is_td_vcpu(vcpu))
+ return 0;
+
+ return vmx_set_efer(vcpu, efer);
+}
+
+static void vt_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+ if (is_td_vcpu(vcpu)) {
+ memset(dt, 0, sizeof(*dt));
+ return;
+ }
+
+ vmx_get_idt(vcpu, dt);
+}
+
+static void vt_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_set_idt(vcpu, dt);
+}
+
+static void vt_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+ if (is_td_vcpu(vcpu)) {
+ memset(dt, 0, sizeof(*dt));
+ return;
+ }
+
+ vmx_get_gdt(vcpu, dt);
+}
+
+static void vt_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_set_gdt(vcpu, dt);
+}
+
+static void vt_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_set_dr6(vcpu, val);
+}
+
+static void vt_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_set_dr7(vcpu, val);
+}
+
+static void vt_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
+{
+ /*
+ * MOV-DR exiting is always cleared for TD guest, even in debug mode.
+ * Thus KVM_DEBUGREG_WONT_EXIT can never be set and it should never
+ * reach here for TD vcpu.
+ */
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_sync_dirty_debug_regs(vcpu);
+}
+
+static void vt_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+{
+ if (WARN_ON_ONCE(is_td_vcpu(vcpu)))
+ return;
+
+ vmx_cache_reg(vcpu, reg);
+}
+
+static unsigned long vt_get_rflags(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return 0;
+
+ return vmx_get_rflags(vcpu);
+}
+
+static void vt_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_set_rflags(vcpu, rflags);
+}
+
+static bool vt_get_if_flag(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return false;
+
+ return vmx_get_if_flag(vcpu);
+}
+
+static void vt_flush_tlb_all(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu)) {
+ tdx_flush_tlb_all(vcpu);
+ return;
+ }
+
+ vmx_flush_tlb_all(vcpu);
+}
+
+static void vt_flush_tlb_current(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu)) {
+ tdx_flush_tlb_current(vcpu);
+ return;
+ }
+
+ vmx_flush_tlb_current(vcpu);
+}
+
+static void vt_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_flush_tlb_gva(vcpu, addr);
+}
+
+static void vt_flush_tlb_guest(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_flush_tlb_guest(vcpu);
+}
+
+static void vt_inject_nmi(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu)) {
+ tdx_inject_nmi(vcpu);
+ return;
+ }
+
+ vmx_inject_nmi(vcpu);
+}
+
+static int vt_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+{
+ /*
+ * The TDX module manages NMI windows and NMI reinjection, and hides NMI
+ * blocking, all KVM can do is throw an NMI over the wall.
+ */
+ if (is_td_vcpu(vcpu))
+ return true;
+
+ return vmx_nmi_allowed(vcpu, for_injection);
+}
+
+static bool vt_get_nmi_mask(struct kvm_vcpu *vcpu)
+{
+ /*
+ * KVM can't get NMI blocking status for TDX guest, assume NMIs are
+ * always unmasked.
+ */
+ if (is_td_vcpu(vcpu))
+ return false;
+
+ return vmx_get_nmi_mask(vcpu);
+}
+
+static void vt_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_set_nmi_mask(vcpu, masked);
+}
+
+static void vt_enable_nmi_window(struct kvm_vcpu *vcpu)
+{
+ /* Refer to the comments in tdx_inject_nmi(). */
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_enable_nmi_window(vcpu);
+}
+
+static void vt_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
+ int pgd_level)
+{
+ if (is_td_vcpu(vcpu)) {
+ tdx_load_mmu_pgd(vcpu, root_hpa, pgd_level);
+ return;
+ }
+
+ vmx_load_mmu_pgd(vcpu, root_hpa, pgd_level);
+}
+
+static void vt_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_set_interrupt_shadow(vcpu, mask);
+}
+
+static u32 vt_get_interrupt_shadow(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return 0;
+
+ return vmx_get_interrupt_shadow(vcpu);
+}
+
+static void vt_patch_hypercall(struct kvm_vcpu *vcpu,
+ unsigned char *hypercall)
+{
+ /*
+ * Because guest memory is protected, guest can't be patched. TD kernel
+ * is modified to use TDG.VP.VMCALL for hypercall.
+ */
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_patch_hypercall(vcpu, hypercall);
+}
+
+static void vt_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_inject_irq(vcpu, reinjected);
+}
+
+static void vt_inject_exception(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_inject_exception(vcpu);
+}
+
+static void vt_cancel_injection(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_cancel_injection(vcpu);
+}
+
+static int vt_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+{
+ if (is_td_vcpu(vcpu))
+ return tdx_interrupt_allowed(vcpu);
+
+ return vmx_interrupt_allowed(vcpu, for_injection);
+}
+
+static void vt_enable_irq_window(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_enable_irq_window(vcpu);
+}
+
+static void vt_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info, u32 *error_code)
+{
+ *intr_info = 0;
+ *error_code = 0;
+
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_get_entry_info(vcpu, intr_info, error_code);
+}
+
+static void vt_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
+ u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
+{
+ if (is_td_vcpu(vcpu)) {
+ tdx_get_exit_info(vcpu, reason, info1, info2, intr_info,
+ error_code);
+ return;
+ }
+
+ vmx_get_exit_info(vcpu, reason, info1, info2, intr_info, error_code);
+}
+
+static void vt_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_update_cr8_intercept(vcpu, tpr, irr);
+}
+
+static void vt_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_set_apic_access_page_addr(vcpu);
+}
+
+static void vt_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu)) {
+ KVM_BUG_ON(!kvm_vcpu_apicv_active(vcpu), vcpu->kvm);
+ return;
+ }
+
+ vmx_refresh_apicv_exec_ctrl(vcpu);
+}
+
+static void vt_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+}
+
+static int vt_set_tss_addr(struct kvm *kvm, unsigned int addr)
+{
+ if (is_td(kvm))
+ return 0;
+
+ return vmx_set_tss_addr(kvm, addr);
+}
+
+static int vt_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
+{
+ if (is_td(kvm))
+ return 0;
+
+ return vmx_set_identity_map_addr(kvm, ident_addr);
+}
+
+static u64 vt_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
+{
+ /* TDX doesn't support L2 guest at the moment. */
+ if (is_td_vcpu(vcpu))
+ return 0;
+
+ return vmx_get_l2_tsc_offset(vcpu);
+}
+
+static u64 vt_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
+{
+ /* TDX doesn't support L2 guest at the moment. */
+ if (is_td_vcpu(vcpu))
+ return 0;
+
+ return vmx_get_l2_tsc_multiplier(vcpu);
+}
+
+static void vt_write_tsc_offset(struct kvm_vcpu *vcpu)
+{
+ /* In TDX, tsc offset can't be changed. */
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_write_tsc_offset(vcpu);
+}
+
+static void vt_write_tsc_multiplier(struct kvm_vcpu *vcpu)
+{
+ /* In TDX, tsc multiplier can't be changed. */
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_write_tsc_multiplier(vcpu);
+}
+
+#ifdef CONFIG_X86_64
+static int vt_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
+ bool *expired)
+{
+ /* VMX-preemption timer isn't available for TDX. */
+ if (is_td_vcpu(vcpu))
+ return -EINVAL;
+
+ return vmx_set_hv_timer(vcpu, guest_deadline_tsc, expired);
+}
+
+static void vt_cancel_hv_timer(struct kvm_vcpu *vcpu)
+{
+ /* VMX-preemption timer can't be set. See vt_set_hv_timer(). */
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_cancel_hv_timer(vcpu);
+}
+#endif
+
+static void vt_setup_mce(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_setup_mce(vcpu);
+}
+
+static int vt_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
+{
+ if (!is_td(kvm))
+ return -ENOTTY;
+
+ return tdx_vm_ioctl(kvm, argp);
+}
+
+static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
+{
+ if (!is_td_vcpu(vcpu))
+ return -EINVAL;
+
+ return tdx_vcpu_ioctl(vcpu, argp);
+}
+
+static int vt_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
+{
+ if (is_td(kvm))
+ return tdx_gmem_private_max_mapping_level(kvm, pfn);
+
+ return 0;
+}
+
+#define vt_op(name) vt_##name
+#define vt_op_tdx_only(name) vt_##name
+#else /* CONFIG_KVM_INTEL_TDX */
+#define vt_op(name) vmx_##name
+#define vt_op_tdx_only(name) NULL
+#endif /* CONFIG_KVM_INTEL_TDX */
#define VMX_REQUIRED_APICV_INHIBITS \
(BIT(APICV_INHIBIT_REASON_DISABLED) | \
@@ -24,111 +903,113 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.hardware_unsetup = vmx_hardware_unsetup,
.enable_virtualization_cpu = vmx_enable_virtualization_cpu,
- .disable_virtualization_cpu = vmx_disable_virtualization_cpu,
+ .disable_virtualization_cpu = vt_op(disable_virtualization_cpu),
.emergency_disable_virtualization_cpu = vmx_emergency_disable_virtualization_cpu,
- .has_emulated_msr = vmx_has_emulated_msr,
+ .has_emulated_msr = vt_op(has_emulated_msr),
.vm_size = sizeof(struct kvm_vmx),
- .vm_init = vmx_vm_init,
- .vm_destroy = vmx_vm_destroy,
- .vcpu_precreate = vmx_vcpu_precreate,
- .vcpu_create = vmx_vcpu_create,
- .vcpu_free = vmx_vcpu_free,
- .vcpu_reset = vmx_vcpu_reset,
+ .vm_init = vt_op(vm_init),
+ .vm_destroy = vt_op(vm_destroy),
+ .vm_pre_destroy = vt_op_tdx_only(vm_pre_destroy),
+
+ .vcpu_precreate = vt_op(vcpu_precreate),
+ .vcpu_create = vt_op(vcpu_create),
+ .vcpu_free = vt_op(vcpu_free),
+ .vcpu_reset = vt_op(vcpu_reset),
- .prepare_switch_to_guest = vmx_prepare_switch_to_guest,
- .vcpu_load = vmx_vcpu_load,
- .vcpu_put = vmx_vcpu_put,
+ .prepare_switch_to_guest = vt_op(prepare_switch_to_guest),
+ .vcpu_load = vt_op(vcpu_load),
+ .vcpu_put = vt_op(vcpu_put),
- .update_exception_bitmap = vmx_update_exception_bitmap,
+ .update_exception_bitmap = vt_op(update_exception_bitmap),
.get_feature_msr = vmx_get_feature_msr,
- .get_msr = vmx_get_msr,
- .set_msr = vmx_set_msr,
- .get_segment_base = vmx_get_segment_base,
- .get_segment = vmx_get_segment,
- .set_segment = vmx_set_segment,
- .get_cpl = vmx_get_cpl,
- .get_cpl_no_cache = vmx_get_cpl_no_cache,
- .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
- .is_valid_cr0 = vmx_is_valid_cr0,
- .set_cr0 = vmx_set_cr0,
- .is_valid_cr4 = vmx_is_valid_cr4,
- .set_cr4 = vmx_set_cr4,
- .set_efer = vmx_set_efer,
- .get_idt = vmx_get_idt,
- .set_idt = vmx_set_idt,
- .get_gdt = vmx_get_gdt,
- .set_gdt = vmx_set_gdt,
- .set_dr6 = vmx_set_dr6,
- .set_dr7 = vmx_set_dr7,
- .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
- .cache_reg = vmx_cache_reg,
- .get_rflags = vmx_get_rflags,
- .set_rflags = vmx_set_rflags,
- .get_if_flag = vmx_get_if_flag,
-
- .flush_tlb_all = vmx_flush_tlb_all,
- .flush_tlb_current = vmx_flush_tlb_current,
- .flush_tlb_gva = vmx_flush_tlb_gva,
- .flush_tlb_guest = vmx_flush_tlb_guest,
-
- .vcpu_pre_run = vmx_vcpu_pre_run,
- .vcpu_run = vmx_vcpu_run,
- .handle_exit = vmx_handle_exit,
+ .get_msr = vt_op(get_msr),
+ .set_msr = vt_op(set_msr),
+
+ .get_segment_base = vt_op(get_segment_base),
+ .get_segment = vt_op(get_segment),
+ .set_segment = vt_op(set_segment),
+ .get_cpl = vt_op(get_cpl),
+ .get_cpl_no_cache = vt_op(get_cpl_no_cache),
+ .get_cs_db_l_bits = vt_op(get_cs_db_l_bits),
+ .is_valid_cr0 = vt_op(is_valid_cr0),
+ .set_cr0 = vt_op(set_cr0),
+ .is_valid_cr4 = vt_op(is_valid_cr4),
+ .set_cr4 = vt_op(set_cr4),
+ .set_efer = vt_op(set_efer),
+ .get_idt = vt_op(get_idt),
+ .set_idt = vt_op(set_idt),
+ .get_gdt = vt_op(get_gdt),
+ .set_gdt = vt_op(set_gdt),
+ .set_dr6 = vt_op(set_dr6),
+ .set_dr7 = vt_op(set_dr7),
+ .sync_dirty_debug_regs = vt_op(sync_dirty_debug_regs),
+ .cache_reg = vt_op(cache_reg),
+ .get_rflags = vt_op(get_rflags),
+ .set_rflags = vt_op(set_rflags),
+ .get_if_flag = vt_op(get_if_flag),
+
+ .flush_tlb_all = vt_op(flush_tlb_all),
+ .flush_tlb_current = vt_op(flush_tlb_current),
+ .flush_tlb_gva = vt_op(flush_tlb_gva),
+ .flush_tlb_guest = vt_op(flush_tlb_guest),
+
+ .vcpu_pre_run = vt_op(vcpu_pre_run),
+ .vcpu_run = vt_op(vcpu_run),
+ .handle_exit = vt_op(handle_exit),
.skip_emulated_instruction = vmx_skip_emulated_instruction,
.update_emulated_instruction = vmx_update_emulated_instruction,
- .set_interrupt_shadow = vmx_set_interrupt_shadow,
- .get_interrupt_shadow = vmx_get_interrupt_shadow,
- .patch_hypercall = vmx_patch_hypercall,
- .inject_irq = vmx_inject_irq,
- .inject_nmi = vmx_inject_nmi,
- .inject_exception = vmx_inject_exception,
- .cancel_injection = vmx_cancel_injection,
- .interrupt_allowed = vmx_interrupt_allowed,
- .nmi_allowed = vmx_nmi_allowed,
- .get_nmi_mask = vmx_get_nmi_mask,
- .set_nmi_mask = vmx_set_nmi_mask,
- .enable_nmi_window = vmx_enable_nmi_window,
- .enable_irq_window = vmx_enable_irq_window,
- .update_cr8_intercept = vmx_update_cr8_intercept,
+ .set_interrupt_shadow = vt_op(set_interrupt_shadow),
+ .get_interrupt_shadow = vt_op(get_interrupt_shadow),
+ .patch_hypercall = vt_op(patch_hypercall),
+ .inject_irq = vt_op(inject_irq),
+ .inject_nmi = vt_op(inject_nmi),
+ .inject_exception = vt_op(inject_exception),
+ .cancel_injection = vt_op(cancel_injection),
+ .interrupt_allowed = vt_op(interrupt_allowed),
+ .nmi_allowed = vt_op(nmi_allowed),
+ .get_nmi_mask = vt_op(get_nmi_mask),
+ .set_nmi_mask = vt_op(set_nmi_mask),
+ .enable_nmi_window = vt_op(enable_nmi_window),
+ .enable_irq_window = vt_op(enable_irq_window),
+ .update_cr8_intercept = vt_op(update_cr8_intercept),
.x2apic_icr_is_split = false,
- .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
- .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
- .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
- .load_eoi_exitmap = vmx_load_eoi_exitmap,
- .apicv_pre_state_restore = vmx_apicv_pre_state_restore,
+ .set_virtual_apic_mode = vt_op(set_virtual_apic_mode),
+ .set_apic_access_page_addr = vt_op(set_apic_access_page_addr),
+ .refresh_apicv_exec_ctrl = vt_op(refresh_apicv_exec_ctrl),
+ .load_eoi_exitmap = vt_op(load_eoi_exitmap),
+ .apicv_pre_state_restore = pi_apicv_pre_state_restore,
.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
- .hwapic_isr_update = vmx_hwapic_isr_update,
- .sync_pir_to_irr = vmx_sync_pir_to_irr,
- .deliver_interrupt = vmx_deliver_interrupt,
+ .hwapic_isr_update = vt_op(hwapic_isr_update),
+ .sync_pir_to_irr = vt_op(sync_pir_to_irr),
+ .deliver_interrupt = vt_op(deliver_interrupt),
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
- .set_tss_addr = vmx_set_tss_addr,
- .set_identity_map_addr = vmx_set_identity_map_addr,
+ .set_tss_addr = vt_op(set_tss_addr),
+ .set_identity_map_addr = vt_op(set_identity_map_addr),
.get_mt_mask = vmx_get_mt_mask,
- .get_exit_info = vmx_get_exit_info,
- .get_entry_info = vmx_get_entry_info,
+ .get_exit_info = vt_op(get_exit_info),
+ .get_entry_info = vt_op(get_entry_info),
- .vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,
+ .vcpu_after_set_cpuid = vt_op(vcpu_after_set_cpuid),
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
- .get_l2_tsc_offset = vmx_get_l2_tsc_offset,
- .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
- .write_tsc_offset = vmx_write_tsc_offset,
- .write_tsc_multiplier = vmx_write_tsc_multiplier,
+ .get_l2_tsc_offset = vt_op(get_l2_tsc_offset),
+ .get_l2_tsc_multiplier = vt_op(get_l2_tsc_multiplier),
+ .write_tsc_offset = vt_op(write_tsc_offset),
+ .write_tsc_multiplier = vt_op(write_tsc_multiplier),
- .load_mmu_pgd = vmx_load_mmu_pgd,
+ .load_mmu_pgd = vt_op(load_mmu_pgd),
.check_intercept = vmx_check_intercept,
.handle_exit_irqoff = vmx_handle_exit_irqoff,
- .cpu_dirty_log_size = PML_LOG_NR_ENTRIES,
- .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
+ .update_cpu_dirty_logging = vt_op(update_cpu_dirty_logging),
.nested_ops = &vmx_nested_ops,
@@ -136,35 +1017,95 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.pi_start_assignment = vmx_pi_start_assignment,
#ifdef CONFIG_X86_64
- .set_hv_timer = vmx_set_hv_timer,
- .cancel_hv_timer = vmx_cancel_hv_timer,
+ .set_hv_timer = vt_op(set_hv_timer),
+ .cancel_hv_timer = vt_op(cancel_hv_timer),
#endif
- .setup_mce = vmx_setup_mce,
+ .setup_mce = vt_op(setup_mce),
#ifdef CONFIG_KVM_SMM
- .smi_allowed = vmx_smi_allowed,
- .enter_smm = vmx_enter_smm,
- .leave_smm = vmx_leave_smm,
- .enable_smi_window = vmx_enable_smi_window,
+ .smi_allowed = vt_op(smi_allowed),
+ .enter_smm = vt_op(enter_smm),
+ .leave_smm = vt_op(leave_smm),
+ .enable_smi_window = vt_op(enable_smi_window),
#endif
- .check_emulate_instruction = vmx_check_emulate_instruction,
- .apic_init_signal_blocked = vmx_apic_init_signal_blocked,
+ .check_emulate_instruction = vt_op(check_emulate_instruction),
+ .apic_init_signal_blocked = vt_op(apic_init_signal_blocked),
.migrate_timers = vmx_migrate_timers,
- .msr_filter_changed = vmx_msr_filter_changed,
- .complete_emulated_msr = kvm_complete_insn_gp,
+ .msr_filter_changed = vt_op(msr_filter_changed),
+ .complete_emulated_msr = vt_op(complete_emulated_msr),
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
.get_untagged_addr = vmx_get_untagged_addr,
+
+ .mem_enc_ioctl = vt_op_tdx_only(mem_enc_ioctl),
+ .vcpu_mem_enc_ioctl = vt_op_tdx_only(vcpu_mem_enc_ioctl),
+
+ .private_max_mapping_level = vt_op_tdx_only(gmem_private_max_mapping_level)
};
struct kvm_x86_init_ops vt_init_ops __initdata = {
- .hardware_setup = vmx_hardware_setup,
+ .hardware_setup = vt_op(hardware_setup),
.handle_intel_pt_intr = NULL,
.runtime_ops = &vt_x86_ops,
.pmu_ops = &intel_pmu_ops,
};
+
+static void __exit vt_exit(void)
+{
+ kvm_exit();
+ tdx_cleanup();
+ vmx_exit();
+}
+module_exit(vt_exit);
+
+static int __init vt_init(void)
+{
+ unsigned vcpu_size, vcpu_align;
+ int r;
+
+ r = vmx_init();
+ if (r)
+ return r;
+
+ /* tdx_init() has been taken */
+ r = tdx_bringup();
+ if (r)
+ goto err_tdx_bringup;
+
+ /*
+ * TDX and VMX have different vCPU structures. Calculate the
+ * maximum size/align so that kvm_init() can use the larger
+ * values to create the kmem_vcpu_cache.
+ */
+ vcpu_size = sizeof(struct vcpu_vmx);
+ vcpu_align = __alignof__(struct vcpu_vmx);
+ if (enable_tdx) {
+ vcpu_size = max_t(unsigned, vcpu_size,
+ sizeof(struct vcpu_tdx));
+ vcpu_align = max_t(unsigned, vcpu_align,
+ __alignof__(struct vcpu_tdx));
+ kvm_caps.supported_vm_types |= BIT(KVM_X86_TDX_VM);
+ }
+
+ /*
+ * Common KVM initialization _must_ come last, after this, /dev/kvm is
+ * exposed to userspace!
+ */
+ r = kvm_init(vcpu_size, vcpu_align, THIS_MODULE);
+ if (r)
+ goto err_kvm_init;
+
+ return 0;
+
+err_kvm_init:
+ tdx_cleanup();
+err_tdx_bringup:
+ vmx_exit();
+ return r;
+}
+module_init(vt_init);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 5504d9e9fd32..7211c71d4241 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -6,6 +6,7 @@
#include <asm/debugreg.h>
#include <asm/mmu_context.h>
+#include <asm/msr.h>
#include "x86.h"
#include "cpuid.h"
@@ -275,7 +276,7 @@ static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
{
struct vmcs_host_state *dest, *src;
- if (unlikely(!vmx->guest_state_loaded))
+ if (unlikely(!vmx->vt.guest_state_loaded))
return;
src = &prev->host_state;
@@ -301,7 +302,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
cpu = get_cpu();
prev = vmx->loaded_vmcs;
vmx->loaded_vmcs = vmcs;
- vmx_vcpu_load_vmcs(vcpu, cpu, prev);
+ vmx_vcpu_load_vmcs(vcpu, cpu);
vmx_sync_vmcs_host_state(vmx, prev);
put_cpu();
@@ -425,7 +426,7 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
* tables also changed, but KVM should not treat EPT Misconfig
* VM-Exits as writes.
*/
- WARN_ON_ONCE(vmx->exit_reason.basic != EXIT_REASON_EPT_VIOLATION);
+ WARN_ON_ONCE(vmx->vt.exit_reason.basic != EXIT_REASON_EPT_VIOLATION);
/*
* PML Full and EPT Violation VM-Exits both use bit 12 to report
@@ -824,12 +825,30 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
return 0;
}
+static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
+ vmx->nested.msrs.misc_high);
+
+ return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
+}
+
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
u32 count, u64 addr)
{
if (count == 0)
return 0;
+ /*
+ * Exceeding the limit results in architecturally _undefined_ behavior,
+ * i.e. KVM is allowed to do literally anything in response to a bad
+ * limit. Immediately generate a consistency check so that code that
+ * consumes the count doesn't need to worry about extreme edge cases.
+ */
+ if (count > nested_vmx_max_atomic_switch_msrs(vcpu))
+ return -EINVAL;
+
if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) ||
!kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1)))
return -EINVAL;
@@ -940,15 +959,6 @@ static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
return 0;
}
-static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
- vmx->nested.msrs.misc_high);
-
- return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
-}
-
/*
* Load guest's/host's msr at nested entry/exit.
* return 0 for success, entry index for failure.
@@ -965,7 +975,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
for (i = 0; i < count; i++) {
- if (unlikely(i >= max_msr_list_size))
+ if (WARN_ON_ONCE(i >= max_msr_list_size))
goto fail;
if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
@@ -1053,7 +1063,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
for (i = 0; i < count; i++) {
- if (unlikely(i >= max_msr_list_size))
+ if (WARN_ON_ONCE(i >= max_msr_list_size))
return -EINVAL;
if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
@@ -4520,12 +4530,12 @@ static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
cpu = get_cpu();
vmx->loaded_vmcs = &vmx->nested.vmcs02;
- vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01);
+ vmx_vcpu_load_vmcs(vcpu, cpu);
sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
vmx->loaded_vmcs = &vmx->vmcs01;
- vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02);
+ vmx_vcpu_load_vmcs(vcpu, cpu);
put_cpu();
}
@@ -4622,7 +4632,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
{
/* update exit information fields: */
vmcs12->vm_exit_reason = vm_exit_reason;
- if (to_vmx(vcpu)->exit_reason.enclave_mode)
+ if (vmx_get_exit_reason(vcpu).enclave_mode)
vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE;
vmcs12->exit_qualification = exit_qualification;
@@ -4794,7 +4804,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vmcs12->vm_exit_msr_load_count))
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
- to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
+ to_vt(vcpu)->emulation_required = vmx_emulation_required(vcpu);
}
static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
@@ -5020,16 +5030,7 @@ void __nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
- /*
- * If IBRS is advertised to the vCPU, KVM must flush the indirect
- * branch predictors when transitioning from L2 to L1, as L1 expects
- * hardware (KVM in this case) to provide separate predictor modes.
- * Bare metal isolates VMX root (host) from VMX non-root (guest), but
- * doesn't isolate different VMCSs, i.e. in this case, doesn't provide
- * separate modes for L2 vs L1.
- */
- if (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL))
- indirect_branch_prediction_barrier();
+ kvm_nested_vmexit_handle_ibrs(vcpu);
/* Update any VMCS fields that might have changed while L2 ran */
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
@@ -6127,7 +6128,7 @@ fail:
* nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
* EXIT_REASON_VMFUNC as the exit reason.
*/
- nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
+ nested_vmx_vmexit(vcpu, vmx->vt.exit_reason.full,
vmx_get_intr_info(vcpu),
vmx_get_exit_qual(vcpu));
return 1;
@@ -6572,7 +6573,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- union vmx_exit_reason exit_reason = vmx->exit_reason;
+ union vmx_exit_reason exit_reason = vmx->vt.exit_reason;
unsigned long exit_qual;
u32 exit_intr_info;
@@ -7202,8 +7203,8 @@ static void nested_vmx_setup_cr_fixed(struct nested_vmx_msrs *msrs)
msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
/* These MSRs specify bits which the guest must keep fixed off. */
- rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
- rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
+ rdmsrq(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
+ rdmsrq(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
if (vmx_umip_emulated())
msrs->cr4_fixed1 |= X86_CR4_UMIP;
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 77012b2eca0e..bbf4509f32d0 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -13,12 +13,14 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
+#include <asm/msr.h>
#include <asm/perf_event.h>
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
#include "nested.h"
#include "pmu.h"
+#include "tdx.h"
/*
* Perf's "BASE" is wildly misleading, architectural PMUs use bits 31:16 of ECX
@@ -34,6 +36,24 @@
#define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
+static struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return NULL;
+
+ return &to_vmx(vcpu)->lbr_desc;
+}
+
+static struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return NULL;
+
+ return &to_vmx(vcpu)->lbr_desc.records;
+}
+
+#pragma GCC poison to_vmx
+
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{
struct kvm_pmc *pmc;
@@ -129,6 +149,22 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
}
+static bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return false;
+
+ return cpuid_model_is_consistent(vcpu);
+}
+
+bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return false;
+
+ return !!vcpu_to_lbr_records(vcpu)->nr;
+}
+
static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
{
struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
@@ -194,6 +230,9 @@ static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
{
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ if (!lbr_desc)
+ return;
+
if (lbr_desc->event) {
perf_event_release_kernel(lbr_desc->event);
lbr_desc->event = NULL;
@@ -235,6 +274,9 @@ int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
PERF_SAMPLE_BRANCH_USER,
};
+ if (WARN_ON_ONCE(!lbr_desc))
+ return 0;
+
if (unlikely(lbr_desc->event)) {
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
return 0;
@@ -279,9 +321,9 @@ static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
local_irq_disable();
if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
if (read)
- rdmsrl(index, msr_info->data);
+ rdmsrq(index, msr_info->data);
else
- wrmsrl(index, msr_info->data);
+ wrmsrq(index, msr_info->data);
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
local_irq_enable();
return true;
@@ -466,6 +508,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
u64 perf_capabilities;
u64 counter_rsvd;
+ if (!lbr_desc)
+ return;
+
memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
/*
@@ -542,7 +587,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
perf_capabilities = vcpu_get_perf_capabilities(vcpu);
- if (cpuid_model_is_consistent(vcpu) &&
+ if (intel_pmu_lbr_is_compatible(vcpu) &&
(perf_capabilities & PMU_CAP_LBR_FMT))
memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
else
@@ -570,6 +615,9 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ if (!lbr_desc)
+ return;
+
for (i = 0; i < KVM_MAX_NR_INTEL_GP_COUNTERS; i++) {
pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu;
@@ -677,6 +725,9 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ if (WARN_ON_ONCE(!lbr_desc))
+ return;
+
if (!lbr_desc->event) {
vmx_disable_lbr_msrs_passthrough(vcpu);
if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
diff --git a/arch/x86/kvm/vmx/pmu_intel.h b/arch/x86/kvm/vmx/pmu_intel.h
new file mode 100644
index 000000000000..5620d0882cdc
--- /dev/null
+++ b/arch/x86/kvm/vmx/pmu_intel.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_VMX_PMU_INTEL_H
+#define __KVM_X86_VMX_PMU_INTEL_H
+
+#include <linux/kvm_host.h>
+
+bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
+int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
+
+struct lbr_desc {
+ /* Basic info about guest LBR records. */
+ struct x86_pmu_lbr records;
+
+ /*
+ * Emulate LBR feature via passthrough LBR registers when the
+ * per-vcpu guest LBR event is scheduled on the current pcpu.
+ *
+ * The records may be inaccurate if the host reclaims the LBR.
+ */
+ struct perf_event *event;
+
+ /* True if LBRs are marked as not intercepted in the MSR bitmap */
+ bool msr_passthrough;
+};
+
+extern struct x86_pmu_lbr vmx_lbr_caps;
+
+#endif /* __KVM_X86_VMX_PMU_INTEL_H */
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index ec08fa3caf43..5c615e5845bf 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -11,6 +11,7 @@
#include "posted_intr.h"
#include "trace.h"
#include "vmx.h"
+#include "tdx.h"
/*
* Maintain a per-CPU list of vCPUs that need to be awakened by wakeup_handler()
@@ -31,9 +32,11 @@ static DEFINE_PER_CPU(struct list_head, wakeup_vcpus_on_cpu);
*/
static DEFINE_PER_CPU(raw_spinlock_t, wakeup_vcpus_on_cpu_lock);
-static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
+#define PI_LOCK_SCHED_OUT SINGLE_DEPTH_NESTING
+
+static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
{
- return &(to_vmx(vcpu)->pi_desc);
+ return &(to_vt(vcpu)->pi_desc);
}
static int pi_try_set_control(struct pi_desc *pi_desc, u64 *pold, u64 new)
@@ -53,7 +56,7 @@ static int pi_try_set_control(struct pi_desc *pi_desc, u64 *pold, u64 new)
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vcpu_vt *vt = to_vt(vcpu);
struct pi_desc old, new;
unsigned long flags;
unsigned int dest;
@@ -89,9 +92,20 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
* current pCPU if the task was migrated.
*/
if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR) {
- raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
- list_del(&vmx->pi_wakeup_list);
- raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
+ raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu);
+
+ /*
+ * In addition to taking the wakeup lock for the regular/IRQ
+ * context, tell lockdep it is being taken for the "sched out"
+ * context as well. vCPU loads happens in task context, and
+ * this is taking the lock of the *previous* CPU, i.e. can race
+ * with both the scheduler and the wakeup handler.
+ */
+ raw_spin_lock(spinlock);
+ spin_acquire(&spinlock->dep_map, PI_LOCK_SCHED_OUT, 0, _RET_IP_);
+ list_del(&vt->pi_wakeup_list);
+ spin_release(&spinlock->dep_map, _RET_IP_);
+ raw_spin_unlock(spinlock);
}
dest = cpu_physical_id(cpu);
@@ -134,9 +148,8 @@ after_clear_sn:
static bool vmx_can_use_vtd_pi(struct kvm *kvm)
{
- return irqchip_in_kernel(kvm) && enable_apicv &&
- kvm_arch_has_assigned_device(kvm) &&
- irq_remapping_cap(IRQ_POSTING_CAP);
+ return irqchip_in_kernel(kvm) && kvm_arch_has_irq_bypass() &&
+ kvm_arch_has_assigned_device(kvm);
}
/*
@@ -146,14 +159,26 @@ static bool vmx_can_use_vtd_pi(struct kvm *kvm)
static void pi_enable_wakeup_handler(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vcpu_vt *vt = to_vt(vcpu);
struct pi_desc old, new;
- unsigned long flags;
- local_irq_save(flags);
+ lockdep_assert_irqs_disabled();
- raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
- list_add_tail(&vmx->pi_wakeup_list,
+ /*
+ * Acquire the wakeup lock using the "sched out" context to workaround
+ * a lockdep false positive. When this is called, schedule() holds
+ * various per-CPU scheduler locks. When the wakeup handler runs, it
+ * holds this CPU's wakeup lock while calling try_to_wake_up(), which
+ * can eventually take the aforementioned scheduler locks, which causes
+ * lockdep to assume there is deadlock.
+ *
+ * Deadlock can't actually occur because IRQs are disabled for the
+ * entirety of the sched_out critical section, i.e. the wakeup handler
+ * can't run while the scheduler locks are held.
+ */
+ raw_spin_lock_nested(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu),
+ PI_LOCK_SCHED_OUT);
+ list_add_tail(&vt->pi_wakeup_list,
&per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu));
raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
@@ -176,8 +201,6 @@ static void pi_enable_wakeup_handler(struct kvm_vcpu *vcpu)
*/
if (pi_test_on(&new))
__apic_send_IPI_self(POSTED_INTR_WAKEUP_VECTOR);
-
- local_irq_restore(flags);
}
static bool vmx_needs_pi_wakeup(struct kvm_vcpu *vcpu)
@@ -190,7 +213,8 @@ static bool vmx_needs_pi_wakeup(struct kvm_vcpu *vcpu)
* notification vector is switched to the one that calls
* back to the pi_wakeup_handler() function.
*/
- return vmx_can_use_ipiv(vcpu) || vmx_can_use_vtd_pi(vcpu->kvm);
+ return (vmx_can_use_ipiv(vcpu) && !is_td_vcpu(vcpu)) ||
+ vmx_can_use_vtd_pi(vcpu->kvm);
}
void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
@@ -200,7 +224,9 @@ void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
if (!vmx_needs_pi_wakeup(vcpu))
return;
- if (kvm_vcpu_is_blocking(vcpu) && !vmx_interrupt_blocked(vcpu))
+ if (kvm_vcpu_is_blocking(vcpu) &&
+ ((is_td_vcpu(vcpu) && tdx_interrupt_allowed(vcpu)) ||
+ (!is_td_vcpu(vcpu) && !vmx_interrupt_blocked(vcpu))))
pi_enable_wakeup_handler(vcpu);
/*
@@ -220,13 +246,13 @@ void pi_wakeup_handler(void)
int cpu = smp_processor_id();
struct list_head *wakeup_list = &per_cpu(wakeup_vcpus_on_cpu, cpu);
raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, cpu);
- struct vcpu_vmx *vmx;
+ struct vcpu_vt *vt;
raw_spin_lock(spinlock);
- list_for_each_entry(vmx, wakeup_list, pi_wakeup_list) {
+ list_for_each_entry(vt, wakeup_list, pi_wakeup_list) {
- if (pi_test_on(&vmx->pi_desc))
- kvm_vcpu_wake_up(&vmx->vcpu);
+ if (pi_test_on(&vt->pi_desc))
+ kvm_vcpu_wake_up(vt_to_vcpu(vt));
}
raw_spin_unlock(spinlock);
}
@@ -237,6 +263,14 @@ void __init pi_init_cpu(int cpu)
raw_spin_lock_init(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu));
}
+void pi_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
+{
+ struct pi_desc *pi = vcpu_to_pi_desc(vcpu);
+
+ pi_clear_on(pi);
+ memset(pi->pir, 0, sizeof(pi->pir));
+}
+
bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
@@ -254,7 +288,7 @@ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
*/
void vmx_pi_start_assignment(struct kvm *kvm)
{
- if (!irq_remapping_cap(IRQ_POSTING_CAP))
+ if (!kvm_arch_has_irq_bypass())
return;
kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK);
@@ -274,6 +308,7 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
{
struct kvm_kernel_irq_routing_entry *e;
struct kvm_irq_routing_table *irq_rt;
+ bool enable_remapped_mode = true;
struct kvm_lapic_irq irq;
struct kvm_vcpu *vcpu;
struct vcpu_data vcpu_info;
@@ -312,21 +347,8 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
kvm_set_msi_irq(kvm, e, &irq);
if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
- !kvm_irq_is_postable(&irq)) {
- /*
- * Make sure the IRTE is in remapped mode if
- * we don't handle it in posted mode.
- */
- ret = irq_set_vcpu_affinity(host_irq, NULL);
- if (ret < 0) {
- printk(KERN_INFO
- "failed to back to remapped mode, irq: %u\n",
- host_irq);
- goto out;
- }
-
+ !kvm_irq_is_postable(&irq))
continue;
- }
vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
vcpu_info.vector = irq.vector;
@@ -334,11 +356,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
vcpu_info.vector, vcpu_info.pi_desc_addr, set);
- if (set)
- ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
- else
- ret = irq_set_vcpu_affinity(host_irq, NULL);
+ if (!set)
+ continue;
+
+ enable_remapped_mode = false;
+ ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
if (ret < 0) {
printk(KERN_INFO "%s: failed to update PI IRTE\n",
__func__);
@@ -346,6 +369,9 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
}
}
+ if (enable_remapped_mode)
+ ret = irq_set_vcpu_affinity(host_irq, NULL);
+
ret = 0;
out:
srcu_read_unlock(&kvm->irq_srcu, idx);
diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h
index ad9116a99bcc..80499ea0e674 100644
--- a/arch/x86/kvm/vmx/posted_intr.h
+++ b/arch/x86/kvm/vmx/posted_intr.h
@@ -9,6 +9,7 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu);
void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu);
void pi_wakeup_handler(void);
void __init pi_init_cpu(int cpu);
+void pi_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu);
int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
@@ -18,7 +19,7 @@ static inline int pi_find_highest_vector(struct pi_desc *pi_desc)
{
int vec;
- vec = find_last_bit((unsigned long *)pi_desc->pir, 256);
+ vec = find_last_bit(pi_desc->pir, 256);
return vec < 256 ? vec : -1;
}
diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
index 9961e07cf071..df1d0cf76947 100644
--- a/arch/x86/kvm/vmx/sgx.c
+++ b/arch/x86/kvm/vmx/sgx.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2021 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <asm/msr.h>
#include <asm/sgx.h>
#include "x86.h"
@@ -411,16 +412,16 @@ void setup_default_sgx_lepubkeyhash(void)
* MSRs exist but are read-only (locked and not writable).
*/
if (!enable_sgx || boot_cpu_has(X86_FEATURE_SGX_LC) ||
- rdmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) {
+ rdmsrq_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) {
sgx_pubkey_hash[0] = 0xa6053e051270b7acULL;
sgx_pubkey_hash[1] = 0x6cfbe8ba8b3b413dULL;
sgx_pubkey_hash[2] = 0xc4916d99f2b3735dULL;
sgx_pubkey_hash[3] = 0xd4f8c05909f9bb3bULL;
} else {
/* MSR_IA32_SGXLEPUBKEYHASH0 is read above */
- rdmsrl(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]);
- rdmsrl(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]);
- rdmsrl(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]);
+ rdmsrq(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]);
+ rdmsrq(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]);
+ rdmsrq(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]);
}
}
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
new file mode 100644
index 000000000000..b952bc673271
--- /dev/null
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -0,0 +1,3526 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/cleanup.h>
+#include <linux/cpu.h>
+#include <asm/cpufeature.h>
+#include <asm/fpu/xcr.h>
+#include <linux/misc_cgroup.h>
+#include <linux/mmu_context.h>
+#include <asm/tdx.h>
+#include "capabilities.h"
+#include "mmu.h"
+#include "x86_ops.h"
+#include "lapic.h"
+#include "tdx.h"
+#include "vmx.h"
+#include "mmu/spte.h"
+#include "common.h"
+#include "posted_intr.h"
+#include "irq.h"
+#include <trace/events/kvm.h>
+#include "trace.h"
+
+#pragma GCC poison to_vmx
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define pr_tdx_error(__fn, __err) \
+ pr_err_ratelimited("SEAMCALL %s failed: 0x%llx\n", #__fn, __err)
+
+#define __pr_tdx_error_N(__fn_str, __err, __fmt, ...) \
+ pr_err_ratelimited("SEAMCALL " __fn_str " failed: 0x%llx, " __fmt, __err, __VA_ARGS__)
+
+#define pr_tdx_error_1(__fn, __err, __rcx) \
+ __pr_tdx_error_N(#__fn, __err, "rcx 0x%llx\n", __rcx)
+
+#define pr_tdx_error_2(__fn, __err, __rcx, __rdx) \
+ __pr_tdx_error_N(#__fn, __err, "rcx 0x%llx, rdx 0x%llx\n", __rcx, __rdx)
+
+#define pr_tdx_error_3(__fn, __err, __rcx, __rdx, __r8) \
+ __pr_tdx_error_N(#__fn, __err, "rcx 0x%llx, rdx 0x%llx, r8 0x%llx\n", __rcx, __rdx, __r8)
+
+bool enable_tdx __ro_after_init;
+module_param_named(tdx, enable_tdx, bool, 0444);
+
+#define TDX_SHARED_BIT_PWL_5 gpa_to_gfn(BIT_ULL(51))
+#define TDX_SHARED_BIT_PWL_4 gpa_to_gfn(BIT_ULL(47))
+
+static enum cpuhp_state tdx_cpuhp_state;
+
+static const struct tdx_sys_info *tdx_sysinfo;
+
+void tdh_vp_rd_failed(struct vcpu_tdx *tdx, char *uclass, u32 field, u64 err)
+{
+ KVM_BUG_ON(1, tdx->vcpu.kvm);
+ pr_err("TDH_VP_RD[%s.0x%x] failed 0x%llx\n", uclass, field, err);
+}
+
+void tdh_vp_wr_failed(struct vcpu_tdx *tdx, char *uclass, char *op, u32 field,
+ u64 val, u64 err)
+{
+ KVM_BUG_ON(1, tdx->vcpu.kvm);
+ pr_err("TDH_VP_WR[%s.0x%x]%s0x%llx failed: 0x%llx\n", uclass, field, op, val, err);
+}
+
+#define KVM_SUPPORTED_TD_ATTRS (TDX_TD_ATTR_SEPT_VE_DISABLE)
+
+static __always_inline struct kvm_tdx *to_kvm_tdx(struct kvm *kvm)
+{
+ return container_of(kvm, struct kvm_tdx, kvm);
+}
+
+static __always_inline struct vcpu_tdx *to_tdx(struct kvm_vcpu *vcpu)
+{
+ return container_of(vcpu, struct vcpu_tdx, vcpu);
+}
+
+static u64 tdx_get_supported_attrs(const struct tdx_sys_info_td_conf *td_conf)
+{
+ u64 val = KVM_SUPPORTED_TD_ATTRS;
+
+ if ((val & td_conf->attributes_fixed1) != td_conf->attributes_fixed1)
+ return 0;
+
+ val &= td_conf->attributes_fixed0;
+
+ return val;
+}
+
+static u64 tdx_get_supported_xfam(const struct tdx_sys_info_td_conf *td_conf)
+{
+ u64 val = kvm_caps.supported_xcr0 | kvm_caps.supported_xss;
+
+ if ((val & td_conf->xfam_fixed1) != td_conf->xfam_fixed1)
+ return 0;
+
+ val &= td_conf->xfam_fixed0;
+
+ return val;
+}
+
+static int tdx_get_guest_phys_addr_bits(const u32 eax)
+{
+ return (eax & GENMASK(23, 16)) >> 16;
+}
+
+static u32 tdx_set_guest_phys_addr_bits(const u32 eax, int addr_bits)
+{
+ return (eax & ~GENMASK(23, 16)) | (addr_bits & 0xff) << 16;
+}
+
+#define TDX_FEATURE_TSX (__feature_bit(X86_FEATURE_HLE) | __feature_bit(X86_FEATURE_RTM))
+
+static bool has_tsx(const struct kvm_cpuid_entry2 *entry)
+{
+ return entry->function == 7 && entry->index == 0 &&
+ (entry->ebx & TDX_FEATURE_TSX);
+}
+
+static void clear_tsx(struct kvm_cpuid_entry2 *entry)
+{
+ entry->ebx &= ~TDX_FEATURE_TSX;
+}
+
+static bool has_waitpkg(const struct kvm_cpuid_entry2 *entry)
+{
+ return entry->function == 7 && entry->index == 0 &&
+ (entry->ecx & __feature_bit(X86_FEATURE_WAITPKG));
+}
+
+static void clear_waitpkg(struct kvm_cpuid_entry2 *entry)
+{
+ entry->ecx &= ~__feature_bit(X86_FEATURE_WAITPKG);
+}
+
+static void tdx_clear_unsupported_cpuid(struct kvm_cpuid_entry2 *entry)
+{
+ if (has_tsx(entry))
+ clear_tsx(entry);
+
+ if (has_waitpkg(entry))
+ clear_waitpkg(entry);
+}
+
+static bool tdx_unsupported_cpuid(const struct kvm_cpuid_entry2 *entry)
+{
+ return has_tsx(entry) || has_waitpkg(entry);
+}
+
+#define KVM_TDX_CPUID_NO_SUBLEAF ((__u32)-1)
+
+static void td_init_cpuid_entry2(struct kvm_cpuid_entry2 *entry, unsigned char idx)
+{
+ const struct tdx_sys_info_td_conf *td_conf = &tdx_sysinfo->td_conf;
+
+ entry->function = (u32)td_conf->cpuid_config_leaves[idx];
+ entry->index = td_conf->cpuid_config_leaves[idx] >> 32;
+ entry->eax = (u32)td_conf->cpuid_config_values[idx][0];
+ entry->ebx = td_conf->cpuid_config_values[idx][0] >> 32;
+ entry->ecx = (u32)td_conf->cpuid_config_values[idx][1];
+ entry->edx = td_conf->cpuid_config_values[idx][1] >> 32;
+
+ if (entry->index == KVM_TDX_CPUID_NO_SUBLEAF)
+ entry->index = 0;
+
+ /*
+ * The TDX module doesn't allow configuring the guest phys addr bits
+ * (EAX[23:16]). However, KVM uses it as an interface to the userspace
+ * to configure the GPAW. Report these bits as configurable.
+ */
+ if (entry->function == 0x80000008)
+ entry->eax = tdx_set_guest_phys_addr_bits(entry->eax, 0xff);
+
+ tdx_clear_unsupported_cpuid(entry);
+}
+
+static int init_kvm_tdx_caps(const struct tdx_sys_info_td_conf *td_conf,
+ struct kvm_tdx_capabilities *caps)
+{
+ int i;
+
+ caps->supported_attrs = tdx_get_supported_attrs(td_conf);
+ if (!caps->supported_attrs)
+ return -EIO;
+
+ caps->supported_xfam = tdx_get_supported_xfam(td_conf);
+ if (!caps->supported_xfam)
+ return -EIO;
+
+ caps->cpuid.nent = td_conf->num_cpuid_config;
+
+ for (i = 0; i < td_conf->num_cpuid_config; i++)
+ td_init_cpuid_entry2(&caps->cpuid.entries[i], i);
+
+ return 0;
+}
+
+/*
+ * Some SEAMCALLs acquire the TDX module globally, and can fail with
+ * TDX_OPERAND_BUSY. Use a global mutex to serialize these SEAMCALLs.
+ */
+static DEFINE_MUTEX(tdx_lock);
+
+static atomic_t nr_configured_hkid;
+
+static bool tdx_operand_busy(u64 err)
+{
+ return (err & TDX_SEAMCALL_STATUS_MASK) == TDX_OPERAND_BUSY;
+}
+
+
+/*
+ * A per-CPU list of TD vCPUs associated with a given CPU.
+ * Protected by interrupt mask. Only manipulated by the CPU owning this per-CPU
+ * list.
+ * - When a vCPU is loaded onto a CPU, it is removed from the per-CPU list of
+ * the old CPU during the IPI callback running on the old CPU, and then added
+ * to the per-CPU list of the new CPU.
+ * - When a TD is tearing down, all vCPUs are disassociated from their current
+ * running CPUs and removed from the per-CPU list during the IPI callback
+ * running on those CPUs.
+ * - When a CPU is brought down, traverse the per-CPU list to disassociate all
+ * associated TD vCPUs and remove them from the per-CPU list.
+ */
+static DEFINE_PER_CPU(struct list_head, associated_tdvcpus);
+
+static __always_inline unsigned long tdvmcall_exit_type(struct kvm_vcpu *vcpu)
+{
+ return to_tdx(vcpu)->vp_enter_args.r10;
+}
+
+static __always_inline unsigned long tdvmcall_leaf(struct kvm_vcpu *vcpu)
+{
+ return to_tdx(vcpu)->vp_enter_args.r11;
+}
+
+static __always_inline void tdvmcall_set_return_code(struct kvm_vcpu *vcpu,
+ long val)
+{
+ to_tdx(vcpu)->vp_enter_args.r10 = val;
+}
+
+static __always_inline void tdvmcall_set_return_val(struct kvm_vcpu *vcpu,
+ unsigned long val)
+{
+ to_tdx(vcpu)->vp_enter_args.r11 = val;
+}
+
+static inline void tdx_hkid_free(struct kvm_tdx *kvm_tdx)
+{
+ tdx_guest_keyid_free(kvm_tdx->hkid);
+ kvm_tdx->hkid = -1;
+ atomic_dec(&nr_configured_hkid);
+ misc_cg_uncharge(MISC_CG_RES_TDX, kvm_tdx->misc_cg, 1);
+ put_misc_cg(kvm_tdx->misc_cg);
+ kvm_tdx->misc_cg = NULL;
+}
+
+static inline bool is_hkid_assigned(struct kvm_tdx *kvm_tdx)
+{
+ return kvm_tdx->hkid > 0;
+}
+
+static inline void tdx_disassociate_vp(struct kvm_vcpu *vcpu)
+{
+ lockdep_assert_irqs_disabled();
+
+ list_del(&to_tdx(vcpu)->cpu_list);
+
+ /*
+ * Ensure tdx->cpu_list is updated before setting vcpu->cpu to -1,
+ * otherwise, a different CPU can see vcpu->cpu = -1 and add the vCPU
+ * to its list before it's deleted from this CPU's list.
+ */
+ smp_wmb();
+
+ vcpu->cpu = -1;
+}
+
+static void tdx_clear_page(struct page *page)
+{
+ const void *zero_page = (const void *) page_to_virt(ZERO_PAGE(0));
+ void *dest = page_to_virt(page);
+ unsigned long i;
+
+ /*
+ * The page could have been poisoned. MOVDIR64B also clears
+ * the poison bit so the kernel can safely use the page again.
+ */
+ for (i = 0; i < PAGE_SIZE; i += 64)
+ movdir64b(dest + i, zero_page);
+ /*
+ * MOVDIR64B store uses WC buffer. Prevent following memory reads
+ * from seeing potentially poisoned cache.
+ */
+ __mb();
+}
+
+static void tdx_no_vcpus_enter_start(struct kvm *kvm)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ WRITE_ONCE(kvm_tdx->wait_for_sept_zap, true);
+
+ kvm_make_all_cpus_request(kvm, KVM_REQ_OUTSIDE_GUEST_MODE);
+}
+
+static void tdx_no_vcpus_enter_stop(struct kvm *kvm)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ WRITE_ONCE(kvm_tdx->wait_for_sept_zap, false);
+}
+
+/* TDH.PHYMEM.PAGE.RECLAIM is allowed only when destroying the TD. */
+static int __tdx_reclaim_page(struct page *page)
+{
+ u64 err, rcx, rdx, r8;
+
+ err = tdh_phymem_page_reclaim(page, &rcx, &rdx, &r8);
+
+ /*
+ * No need to check for TDX_OPERAND_BUSY; all TD pages are freed
+ * before the HKID is released and control pages have also been
+ * released at this point, so there is no possibility of contention.
+ */
+ if (WARN_ON_ONCE(err)) {
+ pr_tdx_error_3(TDH_PHYMEM_PAGE_RECLAIM, err, rcx, rdx, r8);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int tdx_reclaim_page(struct page *page)
+{
+ int r;
+
+ r = __tdx_reclaim_page(page);
+ if (!r)
+ tdx_clear_page(page);
+ return r;
+}
+
+
+/*
+ * Reclaim the TD control page(s) which are crypto-protected by TDX guest's
+ * private KeyID. Assume the cache associated with the TDX private KeyID has
+ * been flushed.
+ */
+static void tdx_reclaim_control_page(struct page *ctrl_page)
+{
+ /*
+ * Leak the page if the kernel failed to reclaim the page.
+ * The kernel cannot use it safely anymore.
+ */
+ if (tdx_reclaim_page(ctrl_page))
+ return;
+
+ __free_page(ctrl_page);
+}
+
+struct tdx_flush_vp_arg {
+ struct kvm_vcpu *vcpu;
+ u64 err;
+};
+
+static void tdx_flush_vp(void *_arg)
+{
+ struct tdx_flush_vp_arg *arg = _arg;
+ struct kvm_vcpu *vcpu = arg->vcpu;
+ u64 err;
+
+ arg->err = 0;
+ lockdep_assert_irqs_disabled();
+
+ /* Task migration can race with CPU offlining. */
+ if (unlikely(vcpu->cpu != raw_smp_processor_id()))
+ return;
+
+ /*
+ * No need to do TDH_VP_FLUSH if the vCPU hasn't been initialized. The
+ * list tracking still needs to be updated so that it's correct if/when
+ * the vCPU does get initialized.
+ */
+ if (to_tdx(vcpu)->state != VCPU_TD_STATE_UNINITIALIZED) {
+ /*
+ * No need to retry. TDX Resources needed for TDH.VP.FLUSH are:
+ * TDVPR as exclusive, TDR as shared, and TDCS as shared. This
+ * vp flush function is called when destructing vCPU/TD or vCPU
+ * migration. No other thread uses TDVPR in those cases.
+ */
+ err = tdh_vp_flush(&to_tdx(vcpu)->vp);
+ if (unlikely(err && err != TDX_VCPU_NOT_ASSOCIATED)) {
+ /*
+ * This function is called in IPI context. Do not use
+ * printk to avoid console semaphore.
+ * The caller prints out the error message, instead.
+ */
+ if (err)
+ arg->err = err;
+ }
+ }
+
+ tdx_disassociate_vp(vcpu);
+}
+
+static void tdx_flush_vp_on_cpu(struct kvm_vcpu *vcpu)
+{
+ struct tdx_flush_vp_arg arg = {
+ .vcpu = vcpu,
+ };
+ int cpu = vcpu->cpu;
+
+ if (unlikely(cpu == -1))
+ return;
+
+ smp_call_function_single(cpu, tdx_flush_vp, &arg, 1);
+ if (KVM_BUG_ON(arg.err, vcpu->kvm))
+ pr_tdx_error(TDH_VP_FLUSH, arg.err);
+}
+
+void tdx_disable_virtualization_cpu(void)
+{
+ int cpu = raw_smp_processor_id();
+ struct list_head *tdvcpus = &per_cpu(associated_tdvcpus, cpu);
+ struct tdx_flush_vp_arg arg;
+ struct vcpu_tdx *tdx, *tmp;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ /* Safe variant needed as tdx_disassociate_vp() deletes the entry. */
+ list_for_each_entry_safe(tdx, tmp, tdvcpus, cpu_list) {
+ arg.vcpu = &tdx->vcpu;
+ tdx_flush_vp(&arg);
+ }
+ local_irq_restore(flags);
+}
+
+#define TDX_SEAMCALL_RETRIES 10000
+
+static void smp_func_do_phymem_cache_wb(void *unused)
+{
+ u64 err = 0;
+ bool resume;
+ int i;
+
+ /*
+ * TDH.PHYMEM.CACHE.WB flushes caches associated with any TDX private
+ * KeyID on the package or core. The TDX module may not finish the
+ * cache flush but return TDX_INTERRUPTED_RESUMEABLE instead. The
+ * kernel should retry it until it returns success w/o rescheduling.
+ */
+ for (i = TDX_SEAMCALL_RETRIES; i > 0; i--) {
+ resume = !!err;
+ err = tdh_phymem_cache_wb(resume);
+ switch (err) {
+ case TDX_INTERRUPTED_RESUMABLE:
+ continue;
+ case TDX_NO_HKID_READY_TO_WBCACHE:
+ err = TDX_SUCCESS; /* Already done by other thread */
+ fallthrough;
+ default:
+ goto out;
+ }
+ }
+
+out:
+ if (WARN_ON_ONCE(err))
+ pr_tdx_error(TDH_PHYMEM_CACHE_WB, err);
+}
+
+void tdx_mmu_release_hkid(struct kvm *kvm)
+{
+ bool packages_allocated, targets_allocated;
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ cpumask_var_t packages, targets;
+ struct kvm_vcpu *vcpu;
+ unsigned long j;
+ int i;
+ u64 err;
+
+ if (!is_hkid_assigned(kvm_tdx))
+ return;
+
+ packages_allocated = zalloc_cpumask_var(&packages, GFP_KERNEL);
+ targets_allocated = zalloc_cpumask_var(&targets, GFP_KERNEL);
+ cpus_read_lock();
+
+ kvm_for_each_vcpu(j, vcpu, kvm)
+ tdx_flush_vp_on_cpu(vcpu);
+
+ /*
+ * TDH.PHYMEM.CACHE.WB tries to acquire the TDX module global lock
+ * and can fail with TDX_OPERAND_BUSY when it fails to get the lock.
+ * Multiple TDX guests can be destroyed simultaneously. Take the
+ * mutex to prevent it from getting error.
+ */
+ mutex_lock(&tdx_lock);
+
+ /*
+ * Releasing HKID is in vm_destroy().
+ * After the above flushing vps, there should be no more vCPU
+ * associations, as all vCPU fds have been released at this stage.
+ */
+ err = tdh_mng_vpflushdone(&kvm_tdx->td);
+ if (err == TDX_FLUSHVP_NOT_DONE)
+ goto out;
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error(TDH_MNG_VPFLUSHDONE, err);
+ pr_err("tdh_mng_vpflushdone() failed. HKID %d is leaked.\n",
+ kvm_tdx->hkid);
+ goto out;
+ }
+
+ for_each_online_cpu(i) {
+ if (packages_allocated &&
+ cpumask_test_and_set_cpu(topology_physical_package_id(i),
+ packages))
+ continue;
+ if (targets_allocated)
+ cpumask_set_cpu(i, targets);
+ }
+ if (targets_allocated)
+ on_each_cpu_mask(targets, smp_func_do_phymem_cache_wb, NULL, true);
+ else
+ on_each_cpu(smp_func_do_phymem_cache_wb, NULL, true);
+ /*
+ * In the case of error in smp_func_do_phymem_cache_wb(), the following
+ * tdh_mng_key_freeid() will fail.
+ */
+ err = tdh_mng_key_freeid(&kvm_tdx->td);
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error(TDH_MNG_KEY_FREEID, err);
+ pr_err("tdh_mng_key_freeid() failed. HKID %d is leaked.\n",
+ kvm_tdx->hkid);
+ } else {
+ tdx_hkid_free(kvm_tdx);
+ }
+
+out:
+ mutex_unlock(&tdx_lock);
+ cpus_read_unlock();
+ free_cpumask_var(targets);
+ free_cpumask_var(packages);
+}
+
+static void tdx_reclaim_td_control_pages(struct kvm *kvm)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ u64 err;
+ int i;
+
+ /*
+ * tdx_mmu_release_hkid() failed to reclaim HKID. Something went wrong
+ * heavily with TDX module. Give up freeing TD pages. As the function
+ * already warned, don't warn it again.
+ */
+ if (is_hkid_assigned(kvm_tdx))
+ return;
+
+ if (kvm_tdx->td.tdcs_pages) {
+ for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
+ if (!kvm_tdx->td.tdcs_pages[i])
+ continue;
+
+ tdx_reclaim_control_page(kvm_tdx->td.tdcs_pages[i]);
+ }
+ kfree(kvm_tdx->td.tdcs_pages);
+ kvm_tdx->td.tdcs_pages = NULL;
+ }
+
+ if (!kvm_tdx->td.tdr_page)
+ return;
+
+ if (__tdx_reclaim_page(kvm_tdx->td.tdr_page))
+ return;
+
+ /*
+ * Use a SEAMCALL to ask the TDX module to flush the cache based on the
+ * KeyID. TDX module may access TDR while operating on TD (Especially
+ * when it is reclaiming TDCS).
+ */
+ err = tdh_phymem_page_wbinvd_tdr(&kvm_tdx->td);
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
+ return;
+ }
+ tdx_clear_page(kvm_tdx->td.tdr_page);
+
+ __free_page(kvm_tdx->td.tdr_page);
+ kvm_tdx->td.tdr_page = NULL;
+}
+
+void tdx_vm_destroy(struct kvm *kvm)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+
+ tdx_reclaim_td_control_pages(kvm);
+
+ kvm_tdx->state = TD_STATE_UNINITIALIZED;
+}
+
+static int tdx_do_tdh_mng_key_config(void *param)
+{
+ struct kvm_tdx *kvm_tdx = param;
+ u64 err;
+
+ /* TDX_RND_NO_ENTROPY related retries are handled by sc_retry() */
+ err = tdh_mng_key_config(&kvm_tdx->td);
+
+ if (KVM_BUG_ON(err, &kvm_tdx->kvm)) {
+ pr_tdx_error(TDH_MNG_KEY_CONFIG, err);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int tdx_vm_init(struct kvm *kvm)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+
+ kvm->arch.has_protected_state = true;
+ kvm->arch.has_private_mem = true;
+ kvm->arch.disabled_quirks |= KVM_X86_QUIRK_IGNORE_GUEST_PAT;
+
+ /*
+ * Because guest TD is protected, VMM can't parse the instruction in TD.
+ * Instead, guest uses MMIO hypercall. For unmodified device driver,
+ * #VE needs to be injected for MMIO and #VE handler in TD converts MMIO
+ * instruction into MMIO hypercall.
+ *
+ * SPTE value for MMIO needs to be setup so that #VE is injected into
+ * TD instead of triggering EPT MISCONFIG.
+ * - RWX=0 so that EPT violation is triggered.
+ * - suppress #VE bit is cleared to inject #VE.
+ */
+ kvm_mmu_set_mmio_spte_value(kvm, 0);
+
+ /*
+ * TDX has its own limit of maximum vCPUs it can support for all
+ * TDX guests in addition to KVM_MAX_VCPUS. TDX module reports
+ * such limit via the MAX_VCPU_PER_TD global metadata. In
+ * practice, it reflects the number of logical CPUs that ALL
+ * platforms that the TDX module supports can possibly have.
+ *
+ * Limit TDX guest's maximum vCPUs to the number of logical CPUs
+ * the platform has. Simply forwarding the MAX_VCPU_PER_TD to
+ * userspace would result in an unpredictable ABI.
+ */
+ kvm->max_vcpus = min_t(int, kvm->max_vcpus, num_present_cpus());
+
+ kvm_tdx->state = TD_STATE_UNINITIALIZED;
+
+ return 0;
+}
+
+int tdx_vcpu_create(struct kvm_vcpu *vcpu)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+
+ if (kvm_tdx->state != TD_STATE_INITIALIZED)
+ return -EIO;
+
+ /*
+ * TDX module mandates APICv, which requires an in-kernel local APIC.
+ * Disallow an in-kernel I/O APIC, because level-triggered interrupts
+ * and thus the I/O APIC as a whole can't be faithfully emulated in KVM.
+ */
+ if (!irqchip_split(vcpu->kvm))
+ return -EINVAL;
+
+ fpstate_set_confidential(&vcpu->arch.guest_fpu);
+ vcpu->arch.apic->guest_apic_protected = true;
+ INIT_LIST_HEAD(&tdx->vt.pi_wakeup_list);
+
+ vcpu->arch.efer = EFER_SCE | EFER_LME | EFER_LMA | EFER_NX;
+
+ vcpu->arch.switch_db_regs = KVM_DEBUGREG_AUTO_SWITCH;
+ vcpu->arch.cr0_guest_owned_bits = -1ul;
+ vcpu->arch.cr4_guest_owned_bits = -1ul;
+
+ /* KVM can't change TSC offset/multiplier as TDX module manages them. */
+ vcpu->arch.guest_tsc_protected = true;
+ vcpu->arch.tsc_offset = kvm_tdx->tsc_offset;
+ vcpu->arch.l1_tsc_offset = vcpu->arch.tsc_offset;
+ vcpu->arch.tsc_scaling_ratio = kvm_tdx->tsc_multiplier;
+ vcpu->arch.l1_tsc_scaling_ratio = kvm_tdx->tsc_multiplier;
+
+ vcpu->arch.guest_state_protected =
+ !(to_kvm_tdx(vcpu->kvm)->attributes & TDX_TD_ATTR_DEBUG);
+
+ if ((kvm_tdx->xfam & XFEATURE_MASK_XTILE) == XFEATURE_MASK_XTILE)
+ vcpu->arch.xfd_no_write_intercept = true;
+
+ tdx->vt.pi_desc.nv = POSTED_INTR_VECTOR;
+ __pi_set_sn(&tdx->vt.pi_desc);
+
+ tdx->state = VCPU_TD_STATE_UNINITIALIZED;
+
+ return 0;
+}
+
+void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+
+ vmx_vcpu_pi_load(vcpu, cpu);
+ if (vcpu->cpu == cpu || !is_hkid_assigned(to_kvm_tdx(vcpu->kvm)))
+ return;
+
+ tdx_flush_vp_on_cpu(vcpu);
+
+ KVM_BUG_ON(cpu != raw_smp_processor_id(), vcpu->kvm);
+ local_irq_disable();
+ /*
+ * Pairs with the smp_wmb() in tdx_disassociate_vp() to ensure
+ * vcpu->cpu is read before tdx->cpu_list.
+ */
+ smp_rmb();
+
+ list_add(&tdx->cpu_list, &per_cpu(associated_tdvcpus, cpu));
+ local_irq_enable();
+}
+
+bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+ /*
+ * KVM can't get the interrupt status of TDX guest and it assumes
+ * interrupt is always allowed unless TDX guest calls TDVMCALL with HLT,
+ * which passes the interrupt blocked flag.
+ */
+ return vmx_get_exit_reason(vcpu).basic != EXIT_REASON_HLT ||
+ !to_tdx(vcpu)->vp_enter_args.r12;
+}
+
+bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ u64 vcpu_state_details;
+
+ if (pi_has_pending_interrupt(vcpu))
+ return true;
+
+ /*
+ * Only check RVI pending for HALTED case with IRQ enabled.
+ * For non-HLT cases, KVM doesn't care about STI/SS shadows. And if the
+ * interrupt was pending before TD exit, then it _must_ be blocked,
+ * otherwise the interrupt would have been serviced at the instruction
+ * boundary.
+ */
+ if (vmx_get_exit_reason(vcpu).basic != EXIT_REASON_HLT ||
+ to_tdx(vcpu)->vp_enter_args.r12)
+ return false;
+
+ vcpu_state_details =
+ td_state_non_arch_read64(to_tdx(vcpu), TD_VCPU_STATE_DETAILS_NON_ARCH);
+
+ return tdx_vcpu_state_details_intr_pending(vcpu_state_details);
+}
+
+/*
+ * Compared to vmx_prepare_switch_to_guest(), there is not much to do
+ * as SEAMCALL/SEAMRET calls take care of most of save and restore.
+ */
+void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vt *vt = to_vt(vcpu);
+
+ if (vt->guest_state_loaded)
+ return;
+
+ if (likely(is_64bit_mm(current->mm)))
+ vt->msr_host_kernel_gs_base = current->thread.gsbase;
+ else
+ vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
+
+ vt->host_debugctlmsr = get_debugctlmsr();
+
+ vt->guest_state_loaded = true;
+}
+
+struct tdx_uret_msr {
+ u32 msr;
+ unsigned int slot;
+ u64 defval;
+};
+
+static struct tdx_uret_msr tdx_uret_msrs[] = {
+ {.msr = MSR_SYSCALL_MASK, .defval = 0x20200 },
+ {.msr = MSR_STAR,},
+ {.msr = MSR_LSTAR,},
+ {.msr = MSR_TSC_AUX,},
+};
+
+static void tdx_user_return_msr_update_cache(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++)
+ kvm_user_return_msr_update_cache(tdx_uret_msrs[i].slot,
+ tdx_uret_msrs[i].defval);
+}
+
+static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vt *vt = to_vt(vcpu);
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+
+ if (!vt->guest_state_loaded)
+ return;
+
+ ++vcpu->stat.host_state_reload;
+ wrmsrl(MSR_KERNEL_GS_BASE, vt->msr_host_kernel_gs_base);
+
+ if (tdx->guest_entered) {
+ tdx_user_return_msr_update_cache();
+ tdx->guest_entered = false;
+ }
+
+ vt->guest_state_loaded = false;
+}
+
+void tdx_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ vmx_vcpu_pi_put(vcpu);
+ tdx_prepare_switch_to_host(vcpu);
+}
+
+void tdx_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ int i;
+
+ /*
+ * It is not possible to reclaim pages while hkid is assigned. It might
+ * be assigned if:
+ * 1. the TD VM is being destroyed but freeing hkid failed, in which
+ * case the pages are leaked
+ * 2. TD VCPU creation failed and this on the error path, in which case
+ * there is nothing to do anyway
+ */
+ if (is_hkid_assigned(kvm_tdx))
+ return;
+
+ if (tdx->vp.tdcx_pages) {
+ for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
+ if (tdx->vp.tdcx_pages[i])
+ tdx_reclaim_control_page(tdx->vp.tdcx_pages[i]);
+ }
+ kfree(tdx->vp.tdcx_pages);
+ tdx->vp.tdcx_pages = NULL;
+ }
+ if (tdx->vp.tdvpr_page) {
+ tdx_reclaim_control_page(tdx->vp.tdvpr_page);
+ tdx->vp.tdvpr_page = 0;
+ }
+
+ tdx->state = VCPU_TD_STATE_UNINITIALIZED;
+}
+
+int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu)
+{
+ if (unlikely(to_tdx(vcpu)->state != VCPU_TD_STATE_INITIALIZED ||
+ to_kvm_tdx(vcpu->kvm)->state != TD_STATE_RUNNABLE))
+ return -EINVAL;
+
+ return 1;
+}
+
+static __always_inline u32 tdcall_to_vmx_exit_reason(struct kvm_vcpu *vcpu)
+{
+ switch (tdvmcall_leaf(vcpu)) {
+ case EXIT_REASON_CPUID:
+ case EXIT_REASON_HLT:
+ case EXIT_REASON_IO_INSTRUCTION:
+ case EXIT_REASON_MSR_READ:
+ case EXIT_REASON_MSR_WRITE:
+ return tdvmcall_leaf(vcpu);
+ case EXIT_REASON_EPT_VIOLATION:
+ return EXIT_REASON_EPT_MISCONFIG;
+ default:
+ break;
+ }
+
+ return EXIT_REASON_TDCALL;
+}
+
+static __always_inline u32 tdx_to_vmx_exit_reason(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ u32 exit_reason;
+
+ switch (tdx->vp_enter_ret & TDX_SEAMCALL_STATUS_MASK) {
+ case TDX_SUCCESS:
+ case TDX_NON_RECOVERABLE_VCPU:
+ case TDX_NON_RECOVERABLE_TD:
+ case TDX_NON_RECOVERABLE_TD_NON_ACCESSIBLE:
+ case TDX_NON_RECOVERABLE_TD_WRONG_APIC_MODE:
+ break;
+ default:
+ return -1u;
+ }
+
+ exit_reason = tdx->vp_enter_ret;
+
+ switch (exit_reason) {
+ case EXIT_REASON_TDCALL:
+ if (tdvmcall_exit_type(vcpu))
+ return EXIT_REASON_VMCALL;
+
+ return tdcall_to_vmx_exit_reason(vcpu);
+ case EXIT_REASON_EPT_MISCONFIG:
+ /*
+ * Defer KVM_BUG_ON() until tdx_handle_exit() because this is in
+ * non-instrumentable code with interrupts disabled.
+ */
+ return -1u;
+ default:
+ break;
+ }
+
+ return exit_reason;
+}
+
+static noinstr void tdx_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ struct vcpu_vt *vt = to_vt(vcpu);
+
+ guest_state_enter_irqoff();
+
+ tdx->vp_enter_ret = tdh_vp_enter(&tdx->vp, &tdx->vp_enter_args);
+
+ vt->exit_reason.full = tdx_to_vmx_exit_reason(vcpu);
+
+ vt->exit_qualification = tdx->vp_enter_args.rcx;
+ tdx->ext_exit_qualification = tdx->vp_enter_args.rdx;
+ tdx->exit_gpa = tdx->vp_enter_args.r8;
+ vt->exit_intr_info = tdx->vp_enter_args.r9;
+
+ vmx_handle_nmi(vcpu);
+
+ guest_state_exit_irqoff();
+}
+
+static bool tdx_failed_vmentry(struct kvm_vcpu *vcpu)
+{
+ return vmx_get_exit_reason(vcpu).failed_vmentry &&
+ vmx_get_exit_reason(vcpu).full != -1u;
+}
+
+static fastpath_t tdx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
+{
+ u64 vp_enter_ret = to_tdx(vcpu)->vp_enter_ret;
+
+ /*
+ * TDX_OPERAND_BUSY could be returned for SEPT due to 0-step mitigation
+ * or for TD EPOCH due to contention with TDH.MEM.TRACK on TDH.VP.ENTER.
+ *
+ * When KVM requests KVM_REQ_OUTSIDE_GUEST_MODE, which has both
+ * KVM_REQUEST_WAIT and KVM_REQUEST_NO_ACTION set, it requires target
+ * vCPUs leaving fastpath so that interrupt can be enabled to ensure the
+ * IPIs can be delivered. Return EXIT_FASTPATH_EXIT_HANDLED instead of
+ * EXIT_FASTPATH_REENTER_GUEST to exit fastpath, otherwise, the
+ * requester may be blocked endlessly.
+ */
+ if (unlikely(tdx_operand_busy(vp_enter_ret)))
+ return EXIT_FASTPATH_EXIT_HANDLED;
+
+ return EXIT_FASTPATH_NONE;
+}
+
+#define TDX_REGS_AVAIL_SET (BIT_ULL(VCPU_EXREG_EXIT_INFO_1) | \
+ BIT_ULL(VCPU_EXREG_EXIT_INFO_2) | \
+ BIT_ULL(VCPU_REGS_RAX) | \
+ BIT_ULL(VCPU_REGS_RBX) | \
+ BIT_ULL(VCPU_REGS_RCX) | \
+ BIT_ULL(VCPU_REGS_RDX) | \
+ BIT_ULL(VCPU_REGS_RBP) | \
+ BIT_ULL(VCPU_REGS_RSI) | \
+ BIT_ULL(VCPU_REGS_RDI) | \
+ BIT_ULL(VCPU_REGS_R8) | \
+ BIT_ULL(VCPU_REGS_R9) | \
+ BIT_ULL(VCPU_REGS_R10) | \
+ BIT_ULL(VCPU_REGS_R11) | \
+ BIT_ULL(VCPU_REGS_R12) | \
+ BIT_ULL(VCPU_REGS_R13) | \
+ BIT_ULL(VCPU_REGS_R14) | \
+ BIT_ULL(VCPU_REGS_R15))
+
+static void tdx_load_host_xsave_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
+
+ /*
+ * All TDX hosts support PKRU; but even if they didn't,
+ * vcpu->arch.host_pkru would be 0 and the wrpkru would be
+ * skipped.
+ */
+ if (vcpu->arch.host_pkru != 0)
+ wrpkru(vcpu->arch.host_pkru);
+
+ if (kvm_host.xcr0 != (kvm_tdx->xfam & kvm_caps.supported_xcr0))
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);
+
+ /*
+ * Likewise, even if a TDX hosts didn't support XSS both arms of
+ * the comparison would be 0 and the wrmsrl would be skipped.
+ */
+ if (kvm_host.xss != (kvm_tdx->xfam & kvm_caps.supported_xss))
+ wrmsrl(MSR_IA32_XSS, kvm_host.xss);
+}
+
+#define TDX_DEBUGCTL_PRESERVED (DEBUGCTLMSR_BTF | \
+ DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI | \
+ DEBUGCTLMSR_FREEZE_IN_SMM)
+
+fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ struct vcpu_vt *vt = to_vt(vcpu);
+
+ /*
+ * force_immediate_exit requires vCPU entering for events injection with
+ * an immediately exit followed. But The TDX module doesn't guarantee
+ * entry, it's already possible for KVM to _think_ it completely entry
+ * to the guest without actually having done so.
+ * Since KVM never needs to force an immediate exit for TDX, and can't
+ * do direct injection, just warn on force_immediate_exit.
+ */
+ WARN_ON_ONCE(force_immediate_exit);
+
+ /*
+ * Wait until retry of SEPT-zap-related SEAMCALL completes before
+ * allowing vCPU entry to avoid contention with tdh_vp_enter() and
+ * TDCALLs.
+ */
+ if (unlikely(READ_ONCE(to_kvm_tdx(vcpu->kvm)->wait_for_sept_zap)))
+ return EXIT_FASTPATH_EXIT_HANDLED;
+
+ trace_kvm_entry(vcpu, force_immediate_exit);
+
+ if (pi_test_on(&vt->pi_desc)) {
+ apic->send_IPI_self(POSTED_INTR_VECTOR);
+
+ if (pi_test_pir(kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVTT) &
+ APIC_VECTOR_MASK, &vt->pi_desc))
+ kvm_wait_lapic_expire(vcpu);
+ }
+
+ tdx_vcpu_enter_exit(vcpu);
+
+ if (vt->host_debugctlmsr & ~TDX_DEBUGCTL_PRESERVED)
+ update_debugctlmsr(vt->host_debugctlmsr);
+
+ tdx_load_host_xsave_state(vcpu);
+ tdx->guest_entered = true;
+
+ vcpu->arch.regs_avail &= TDX_REGS_AVAIL_SET;
+
+ if (unlikely(tdx->vp_enter_ret == EXIT_REASON_EPT_MISCONFIG))
+ return EXIT_FASTPATH_NONE;
+
+ if (unlikely((tdx->vp_enter_ret & TDX_SW_ERROR) == TDX_SW_ERROR))
+ return EXIT_FASTPATH_NONE;
+
+ if (unlikely(vmx_get_exit_reason(vcpu).basic == EXIT_REASON_MCE_DURING_VMENTRY))
+ kvm_machine_check();
+
+ trace_kvm_exit(vcpu, KVM_ISA_VMX);
+
+ if (unlikely(tdx_failed_vmentry(vcpu)))
+ return EXIT_FASTPATH_NONE;
+
+ return tdx_exit_handlers_fastpath(vcpu);
+}
+
+void tdx_inject_nmi(struct kvm_vcpu *vcpu)
+{
+ ++vcpu->stat.nmi_injections;
+ td_management_write8(to_tdx(vcpu), TD_VCPU_PEND_NMI, 1);
+ /*
+ * From KVM's perspective, NMI injection is completed right after
+ * writing to PEND_NMI. KVM doesn't care whether an NMI is injected by
+ * the TDX module or not.
+ */
+ vcpu->arch.nmi_injected = false;
+ /*
+ * TDX doesn't support KVM to request NMI window exit. If there is
+ * still a pending vNMI, KVM is not able to inject it along with the
+ * one pending in TDX module in a back-to-back way. Since the previous
+ * vNMI is still pending in TDX module, i.e. it has not been delivered
+ * to TDX guest yet, it's OK to collapse the pending vNMI into the
+ * previous one. The guest is expected to handle all the NMI sources
+ * when handling the first vNMI.
+ */
+ vcpu->arch.nmi_pending = 0;
+}
+
+static int tdx_handle_exception_nmi(struct kvm_vcpu *vcpu)
+{
+ u32 intr_info = vmx_get_intr_info(vcpu);
+
+ /*
+ * Machine checks are handled by handle_exception_irqoff(), or by
+ * tdx_handle_exit() with TDX_NON_RECOVERABLE set if a #MC occurs on
+ * VM-Entry. NMIs are handled by tdx_vcpu_enter_exit().
+ */
+ if (is_nmi(intr_info) || is_machine_check(intr_info))
+ return 1;
+
+ vcpu->run->exit_reason = KVM_EXIT_EXCEPTION;
+ vcpu->run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
+ vcpu->run->ex.error_code = 0;
+
+ return 0;
+}
+
+static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
+{
+ tdvmcall_set_return_code(vcpu, vcpu->run->hypercall.ret);
+ return 1;
+}
+
+static int tdx_emulate_vmcall(struct kvm_vcpu *vcpu)
+{
+ kvm_rax_write(vcpu, to_tdx(vcpu)->vp_enter_args.r10);
+ kvm_rbx_write(vcpu, to_tdx(vcpu)->vp_enter_args.r11);
+ kvm_rcx_write(vcpu, to_tdx(vcpu)->vp_enter_args.r12);
+ kvm_rdx_write(vcpu, to_tdx(vcpu)->vp_enter_args.r13);
+ kvm_rsi_write(vcpu, to_tdx(vcpu)->vp_enter_args.r14);
+
+ return __kvm_emulate_hypercall(vcpu, 0, complete_hypercall_exit);
+}
+
+/*
+ * Split into chunks and check interrupt pending between chunks. This allows
+ * for timely injection of interrupts to prevent issues with guest lockup
+ * detection.
+ */
+#define TDX_MAP_GPA_MAX_LEN (2 * 1024 * 1024)
+static void __tdx_map_gpa(struct vcpu_tdx *tdx);
+
+static int tdx_complete_vmcall_map_gpa(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+
+ if (vcpu->run->hypercall.ret) {
+ tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
+ tdx->vp_enter_args.r11 = tdx->map_gpa_next;
+ return 1;
+ }
+
+ tdx->map_gpa_next += TDX_MAP_GPA_MAX_LEN;
+ if (tdx->map_gpa_next >= tdx->map_gpa_end)
+ return 1;
+
+ /*
+ * Stop processing the remaining part if there is a pending interrupt,
+ * which could be qualified to deliver. Skip checking pending RVI for
+ * TDVMCALL_MAP_GPA, see comments in tdx_protected_apic_has_interrupt().
+ */
+ if (kvm_vcpu_has_events(vcpu)) {
+ tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_RETRY);
+ tdx->vp_enter_args.r11 = tdx->map_gpa_next;
+ return 1;
+ }
+
+ __tdx_map_gpa(tdx);
+ return 0;
+}
+
+static void __tdx_map_gpa(struct vcpu_tdx *tdx)
+{
+ u64 gpa = tdx->map_gpa_next;
+ u64 size = tdx->map_gpa_end - tdx->map_gpa_next;
+
+ if (size > TDX_MAP_GPA_MAX_LEN)
+ size = TDX_MAP_GPA_MAX_LEN;
+
+ tdx->vcpu.run->exit_reason = KVM_EXIT_HYPERCALL;
+ tdx->vcpu.run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
+ /*
+ * In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2)
+ * assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that
+ * it was always zero on KVM_EXIT_HYPERCALL. Since KVM is now overwriting
+ * vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU.
+ */
+ tdx->vcpu.run->hypercall.ret = 0;
+ tdx->vcpu.run->hypercall.args[0] = gpa & ~gfn_to_gpa(kvm_gfn_direct_bits(tdx->vcpu.kvm));
+ tdx->vcpu.run->hypercall.args[1] = size / PAGE_SIZE;
+ tdx->vcpu.run->hypercall.args[2] = vt_is_tdx_private_gpa(tdx->vcpu.kvm, gpa) ?
+ KVM_MAP_GPA_RANGE_ENCRYPTED :
+ KVM_MAP_GPA_RANGE_DECRYPTED;
+ tdx->vcpu.run->hypercall.flags = KVM_EXIT_HYPERCALL_LONG_MODE;
+
+ tdx->vcpu.arch.complete_userspace_io = tdx_complete_vmcall_map_gpa;
+}
+
+static int tdx_map_gpa(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ u64 gpa = tdx->vp_enter_args.r12;
+ u64 size = tdx->vp_enter_args.r13;
+ u64 ret;
+
+ /*
+ * Converting TDVMCALL_MAP_GPA to KVM_HC_MAP_GPA_RANGE requires
+ * userspace to enable KVM_CAP_EXIT_HYPERCALL with KVM_HC_MAP_GPA_RANGE
+ * bit set. If not, the error code is not defined in GHCI for TDX, use
+ * TDVMCALL_STATUS_INVALID_OPERAND for this case.
+ */
+ if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) {
+ ret = TDVMCALL_STATUS_INVALID_OPERAND;
+ goto error;
+ }
+
+ if (gpa + size <= gpa || !kvm_vcpu_is_legal_gpa(vcpu, gpa) ||
+ !kvm_vcpu_is_legal_gpa(vcpu, gpa + size - 1) ||
+ (vt_is_tdx_private_gpa(vcpu->kvm, gpa) !=
+ vt_is_tdx_private_gpa(vcpu->kvm, gpa + size - 1))) {
+ ret = TDVMCALL_STATUS_INVALID_OPERAND;
+ goto error;
+ }
+
+ if (!PAGE_ALIGNED(gpa) || !PAGE_ALIGNED(size)) {
+ ret = TDVMCALL_STATUS_ALIGN_ERROR;
+ goto error;
+ }
+
+ tdx->map_gpa_end = gpa + size;
+ tdx->map_gpa_next = gpa;
+
+ __tdx_map_gpa(tdx);
+ return 0;
+
+error:
+ tdvmcall_set_return_code(vcpu, ret);
+ tdx->vp_enter_args.r11 = gpa;
+ return 1;
+}
+
+static int tdx_report_fatal_error(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ u64 *regs = vcpu->run->system_event.data;
+ u64 *module_regs = &tdx->vp_enter_args.r8;
+ int index = VCPU_REGS_RAX;
+
+ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+ vcpu->run->system_event.type = KVM_SYSTEM_EVENT_TDX_FATAL;
+ vcpu->run->system_event.ndata = 16;
+
+ /* Dump 16 general-purpose registers to userspace in ascending order. */
+ regs[index++] = tdx->vp_enter_ret;
+ regs[index++] = tdx->vp_enter_args.rcx;
+ regs[index++] = tdx->vp_enter_args.rdx;
+ regs[index++] = tdx->vp_enter_args.rbx;
+ regs[index++] = 0;
+ regs[index++] = 0;
+ regs[index++] = tdx->vp_enter_args.rsi;
+ regs[index] = tdx->vp_enter_args.rdi;
+ for (index = 0; index < 8; index++)
+ regs[VCPU_REGS_R8 + index] = module_regs[index];
+
+ return 0;
+}
+
+static int tdx_emulate_cpuid(struct kvm_vcpu *vcpu)
+{
+ u32 eax, ebx, ecx, edx;
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+
+ /* EAX and ECX for cpuid is stored in R12 and R13. */
+ eax = tdx->vp_enter_args.r12;
+ ecx = tdx->vp_enter_args.r13;
+
+ kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
+
+ tdx->vp_enter_args.r12 = eax;
+ tdx->vp_enter_args.r13 = ebx;
+ tdx->vp_enter_args.r14 = ecx;
+ tdx->vp_enter_args.r15 = edx;
+
+ return 1;
+}
+
+static int tdx_complete_pio_out(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.pio.count = 0;
+ return 1;
+}
+
+static int tdx_complete_pio_in(struct kvm_vcpu *vcpu)
+{
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
+ unsigned long val = 0;
+ int ret;
+
+ ret = ctxt->ops->pio_in_emulated(ctxt, vcpu->arch.pio.size,
+ vcpu->arch.pio.port, &val, 1);
+
+ WARN_ON_ONCE(!ret);
+
+ tdvmcall_set_return_val(vcpu, val);
+
+ return 1;
+}
+
+static int tdx_emulate_io(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
+ unsigned long val = 0;
+ unsigned int port;
+ u64 size, write;
+ int ret;
+
+ ++vcpu->stat.io_exits;
+
+ size = tdx->vp_enter_args.r12;
+ write = tdx->vp_enter_args.r13;
+ port = tdx->vp_enter_args.r14;
+
+ if ((write != 0 && write != 1) || (size != 1 && size != 2 && size != 4)) {
+ tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
+ return 1;
+ }
+
+ if (write) {
+ val = tdx->vp_enter_args.r15;
+ ret = ctxt->ops->pio_out_emulated(ctxt, size, port, &val, 1);
+ } else {
+ ret = ctxt->ops->pio_in_emulated(ctxt, size, port, &val, 1);
+ }
+
+ if (!ret)
+ vcpu->arch.complete_userspace_io = write ? tdx_complete_pio_out :
+ tdx_complete_pio_in;
+ else if (!write)
+ tdvmcall_set_return_val(vcpu, val);
+
+ return ret;
+}
+
+static int tdx_complete_mmio_read(struct kvm_vcpu *vcpu)
+{
+ unsigned long val = 0;
+ gpa_t gpa;
+ int size;
+
+ gpa = vcpu->mmio_fragments[0].gpa;
+ size = vcpu->mmio_fragments[0].len;
+
+ memcpy(&val, vcpu->run->mmio.data, size);
+ tdvmcall_set_return_val(vcpu, val);
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ, size, gpa, &val);
+ return 1;
+}
+
+static inline int tdx_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, int size,
+ unsigned long val)
+{
+ if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
+ trace_kvm_fast_mmio(gpa);
+ return 0;
+ }
+
+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, size, gpa, &val);
+ if (kvm_io_bus_write(vcpu, KVM_MMIO_BUS, gpa, size, &val))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static inline int tdx_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, int size)
+{
+ unsigned long val;
+
+ if (kvm_io_bus_read(vcpu, KVM_MMIO_BUS, gpa, size, &val))
+ return -EOPNOTSUPP;
+
+ tdvmcall_set_return_val(vcpu, val);
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ, size, gpa, &val);
+ return 0;
+}
+
+static int tdx_emulate_mmio(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ int size, write, r;
+ unsigned long val;
+ gpa_t gpa;
+
+ size = tdx->vp_enter_args.r12;
+ write = tdx->vp_enter_args.r13;
+ gpa = tdx->vp_enter_args.r14;
+ val = write ? tdx->vp_enter_args.r15 : 0;
+
+ if (size != 1 && size != 2 && size != 4 && size != 8)
+ goto error;
+ if (write != 0 && write != 1)
+ goto error;
+
+ /*
+ * TDG.VP.VMCALL<MMIO> allows only shared GPA, it makes no sense to
+ * do MMIO emulation for private GPA.
+ */
+ if (vt_is_tdx_private_gpa(vcpu->kvm, gpa) ||
+ vt_is_tdx_private_gpa(vcpu->kvm, gpa + size - 1))
+ goto error;
+
+ gpa = gpa & ~gfn_to_gpa(kvm_gfn_direct_bits(vcpu->kvm));
+
+ if (write)
+ r = tdx_mmio_write(vcpu, gpa, size, val);
+ else
+ r = tdx_mmio_read(vcpu, gpa, size);
+ if (!r)
+ /* Kernel completed device emulation. */
+ return 1;
+
+ /* Request the device emulation to userspace device model. */
+ vcpu->mmio_is_write = write;
+ if (!write)
+ vcpu->arch.complete_userspace_io = tdx_complete_mmio_read;
+
+ vcpu->run->mmio.phys_addr = gpa;
+ vcpu->run->mmio.len = size;
+ vcpu->run->mmio.is_write = write;
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
+
+ if (write) {
+ memcpy(vcpu->run->mmio.data, &val, size);
+ } else {
+ vcpu->mmio_fragments[0].gpa = gpa;
+ vcpu->mmio_fragments[0].len = size;
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, size, gpa, NULL);
+ }
+ return 0;
+
+error:
+ tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
+ return 1;
+}
+
+static int tdx_get_td_vm_call_info(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+
+ if (tdx->vp_enter_args.r12)
+ tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
+ else {
+ tdx->vp_enter_args.r11 = 0;
+ tdx->vp_enter_args.r13 = 0;
+ tdx->vp_enter_args.r14 = 0;
+ }
+ return 1;
+}
+
+static int handle_tdvmcall(struct kvm_vcpu *vcpu)
+{
+ switch (tdvmcall_leaf(vcpu)) {
+ case TDVMCALL_MAP_GPA:
+ return tdx_map_gpa(vcpu);
+ case TDVMCALL_REPORT_FATAL_ERROR:
+ return tdx_report_fatal_error(vcpu);
+ case TDVMCALL_GET_TD_VM_CALL_INFO:
+ return tdx_get_td_vm_call_info(vcpu);
+ default:
+ break;
+ }
+
+ tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
+ return 1;
+}
+
+void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
+{
+ u64 shared_bit = (pgd_level == 5) ? TDX_SHARED_BIT_PWL_5 :
+ TDX_SHARED_BIT_PWL_4;
+
+ if (KVM_BUG_ON(shared_bit != kvm_gfn_direct_bits(vcpu->kvm), vcpu->kvm))
+ return;
+
+ td_vmcs_write64(to_tdx(vcpu), SHARED_EPT_POINTER, root_hpa);
+}
+
+static void tdx_unpin(struct kvm *kvm, struct page *page)
+{
+ put_page(page);
+}
+
+static int tdx_mem_page_aug(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, struct page *page)
+{
+ int tdx_level = pg_level_to_tdx_sept_level(level);
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ gpa_t gpa = gfn_to_gpa(gfn);
+ u64 entry, level_state;
+ u64 err;
+
+ err = tdh_mem_page_aug(&kvm_tdx->td, gpa, tdx_level, page, &entry, &level_state);
+ if (unlikely(tdx_operand_busy(err))) {
+ tdx_unpin(kvm, page);
+ return -EBUSY;
+ }
+
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error_2(TDH_MEM_PAGE_AUG, err, entry, level_state);
+ tdx_unpin(kvm, page);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * KVM_TDX_INIT_MEM_REGION calls kvm_gmem_populate() to map guest pages; the
+ * callback tdx_gmem_post_populate() then maps pages into private memory.
+ * through the a seamcall TDH.MEM.PAGE.ADD(). The SEAMCALL also requires the
+ * private EPT structures for the page to have been built before, which is
+ * done via kvm_tdp_map_page(). nr_premapped counts the number of pages that
+ * were added to the EPT structures but not added with TDH.MEM.PAGE.ADD().
+ * The counter has to be zero on KVM_TDX_FINALIZE_VM, to ensure that there
+ * are no half-initialized shared EPT pages.
+ */
+static int tdx_mem_page_record_premap_cnt(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, kvm_pfn_t pfn)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+
+ if (KVM_BUG_ON(kvm->arch.pre_fault_allowed, kvm))
+ return -EINVAL;
+
+ /* nr_premapped will be decreased when tdh_mem_page_add() is called. */
+ atomic64_inc(&kvm_tdx->nr_premapped);
+ return 0;
+}
+
+int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, kvm_pfn_t pfn)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ struct page *page = pfn_to_page(pfn);
+
+ /* TODO: handle large pages. */
+ if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
+ return -EINVAL;
+
+ /*
+ * Because guest_memfd doesn't support page migration with
+ * a_ops->migrate_folio (yet), no callback is triggered for KVM on page
+ * migration. Until guest_memfd supports page migration, prevent page
+ * migration.
+ * TODO: Once guest_memfd introduces callback on page migration,
+ * implement it and remove get_page/put_page().
+ */
+ get_page(page);
+
+ /*
+ * Read 'pre_fault_allowed' before 'kvm_tdx->state'; see matching
+ * barrier in tdx_td_finalize().
+ */
+ smp_rmb();
+ if (likely(kvm_tdx->state == TD_STATE_RUNNABLE))
+ return tdx_mem_page_aug(kvm, gfn, level, page);
+
+ return tdx_mem_page_record_premap_cnt(kvm, gfn, level, pfn);
+}
+
+static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, struct page *page)
+{
+ int tdx_level = pg_level_to_tdx_sept_level(level);
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ gpa_t gpa = gfn_to_gpa(gfn);
+ u64 err, entry, level_state;
+
+ /* TODO: handle large pages. */
+ if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
+ return -EINVAL;
+
+ if (KVM_BUG_ON(!is_hkid_assigned(kvm_tdx), kvm))
+ return -EINVAL;
+
+ /*
+ * When zapping private page, write lock is held. So no race condition
+ * with other vcpu sept operation.
+ * Race with TDH.VP.ENTER due to (0-step mitigation) and Guest TDCALLs.
+ */
+ err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
+ &level_state);
+
+ if (unlikely(tdx_operand_busy(err))) {
+ /*
+ * The second retry is expected to succeed after kicking off all
+ * other vCPUs and prevent them from invoking TDH.VP.ENTER.
+ */
+ tdx_no_vcpus_enter_start(kvm);
+ err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
+ &level_state);
+ tdx_no_vcpus_enter_stop(kvm);
+ }
+
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error_2(TDH_MEM_PAGE_REMOVE, err, entry, level_state);
+ return -EIO;
+ }
+
+ err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
+
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
+ return -EIO;
+ }
+ tdx_clear_page(page);
+ tdx_unpin(kvm, page);
+ return 0;
+}
+
+int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, void *private_spt)
+{
+ int tdx_level = pg_level_to_tdx_sept_level(level);
+ gpa_t gpa = gfn_to_gpa(gfn);
+ struct page *page = virt_to_page(private_spt);
+ u64 err, entry, level_state;
+
+ err = tdh_mem_sept_add(&to_kvm_tdx(kvm)->td, gpa, tdx_level, page, &entry,
+ &level_state);
+ if (unlikely(tdx_operand_busy(err)))
+ return -EBUSY;
+
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error_2(TDH_MEM_SEPT_ADD, err, entry, level_state);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Check if the error returned from a SEPT zap SEAMCALL is due to that a page is
+ * mapped by KVM_TDX_INIT_MEM_REGION without tdh_mem_page_add() being called
+ * successfully.
+ *
+ * Since tdh_mem_sept_add() must have been invoked successfully before a
+ * non-leaf entry present in the mirrored page table, the SEPT ZAP related
+ * SEAMCALLs should not encounter err TDX_EPT_WALK_FAILED. They should instead
+ * find TDX_EPT_ENTRY_STATE_INCORRECT due to an empty leaf entry found in the
+ * SEPT.
+ *
+ * Further check if the returned entry from SEPT walking is with RWX permissions
+ * to filter out anything unexpected.
+ *
+ * Note: @level is pg_level, not the tdx_level. The tdx_level extracted from
+ * level_state returned from a SEAMCALL error is the same as that passed into
+ * the SEAMCALL.
+ */
+static int tdx_is_sept_zap_err_due_to_premap(struct kvm_tdx *kvm_tdx, u64 err,
+ u64 entry, int level)
+{
+ if (!err || kvm_tdx->state == TD_STATE_RUNNABLE)
+ return false;
+
+ if (err != (TDX_EPT_ENTRY_STATE_INCORRECT | TDX_OPERAND_ID_RCX))
+ return false;
+
+ if ((is_last_spte(entry, level) && (entry & VMX_EPT_RWX_MASK)))
+ return false;
+
+ return true;
+}
+
+static int tdx_sept_zap_private_spte(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, struct page *page)
+{
+ int tdx_level = pg_level_to_tdx_sept_level(level);
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ gpa_t gpa = gfn_to_gpa(gfn) & KVM_HPAGE_MASK(level);
+ u64 err, entry, level_state;
+
+ /* For now large page isn't supported yet. */
+ WARN_ON_ONCE(level != PG_LEVEL_4K);
+
+ err = tdh_mem_range_block(&kvm_tdx->td, gpa, tdx_level, &entry, &level_state);
+
+ if (unlikely(tdx_operand_busy(err))) {
+ /* After no vCPUs enter, the second retry is expected to succeed */
+ tdx_no_vcpus_enter_start(kvm);
+ err = tdh_mem_range_block(&kvm_tdx->td, gpa, tdx_level, &entry, &level_state);
+ tdx_no_vcpus_enter_stop(kvm);
+ }
+ if (tdx_is_sept_zap_err_due_to_premap(kvm_tdx, err, entry, level) &&
+ !KVM_BUG_ON(!atomic64_read(&kvm_tdx->nr_premapped), kvm)) {
+ atomic64_dec(&kvm_tdx->nr_premapped);
+ tdx_unpin(kvm, page);
+ return 0;
+ }
+
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error_2(TDH_MEM_RANGE_BLOCK, err, entry, level_state);
+ return -EIO;
+ }
+ return 1;
+}
+
+/*
+ * Ensure shared and private EPTs to be flushed on all vCPUs.
+ * tdh_mem_track() is the only caller that increases TD epoch. An increase in
+ * the TD epoch (e.g., to value "N + 1") is successful only if no vCPUs are
+ * running in guest mode with the value "N - 1".
+ *
+ * A successful execution of tdh_mem_track() ensures that vCPUs can only run in
+ * guest mode with TD epoch value "N" if no TD exit occurs after the TD epoch
+ * being increased to "N + 1".
+ *
+ * Kicking off all vCPUs after that further results in no vCPUs can run in guest
+ * mode with TD epoch value "N", which unblocks the next tdh_mem_track() (e.g.
+ * to increase TD epoch to "N + 2").
+ *
+ * TDX module will flush EPT on the next TD enter and make vCPUs to run in
+ * guest mode with TD epoch value "N + 1".
+ *
+ * kvm_make_all_cpus_request() guarantees all vCPUs are out of guest mode by
+ * waiting empty IPI handler ack_kick().
+ *
+ * No action is required to the vCPUs being kicked off since the kicking off
+ * occurs certainly after TD epoch increment and before the next
+ * tdh_mem_track().
+ */
+static void tdx_track(struct kvm *kvm)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ u64 err;
+
+ /* If TD isn't finalized, it's before any vcpu running. */
+ if (unlikely(kvm_tdx->state != TD_STATE_RUNNABLE))
+ return;
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ err = tdh_mem_track(&kvm_tdx->td);
+ if (unlikely(tdx_operand_busy(err))) {
+ /* After no vCPUs enter, the second retry is expected to succeed */
+ tdx_no_vcpus_enter_start(kvm);
+ err = tdh_mem_track(&kvm_tdx->td);
+ tdx_no_vcpus_enter_stop(kvm);
+ }
+
+ if (KVM_BUG_ON(err, kvm))
+ pr_tdx_error(TDH_MEM_TRACK, err);
+
+ kvm_make_all_cpus_request(kvm, KVM_REQ_OUTSIDE_GUEST_MODE);
+}
+
+int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, void *private_spt)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+
+ /*
+ * free_external_spt() is only called after hkid is freed when TD is
+ * tearing down.
+ * KVM doesn't (yet) zap page table pages in mirror page table while
+ * TD is active, though guest pages mapped in mirror page table could be
+ * zapped during TD is active, e.g. for shared <-> private conversion
+ * and slot move/deletion.
+ */
+ if (KVM_BUG_ON(is_hkid_assigned(kvm_tdx), kvm))
+ return -EINVAL;
+
+ /*
+ * The HKID assigned to this TD was already freed and cache was
+ * already flushed. We don't have to flush again.
+ */
+ return tdx_reclaim_page(virt_to_page(private_spt));
+}
+
+int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, kvm_pfn_t pfn)
+{
+ struct page *page = pfn_to_page(pfn);
+ int ret;
+
+ /*
+ * HKID is released after all private pages have been removed, and set
+ * before any might be populated. Warn if zapping is attempted when
+ * there can't be anything populated in the private EPT.
+ */
+ if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
+ return -EINVAL;
+
+ ret = tdx_sept_zap_private_spte(kvm, gfn, level, page);
+ if (ret <= 0)
+ return ret;
+
+ /*
+ * TDX requires TLB tracking before dropping private page. Do
+ * it here, although it is also done later.
+ */
+ tdx_track(kvm);
+
+ return tdx_sept_drop_private_spte(kvm, gfn, level, page);
+}
+
+void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector)
+{
+ struct kvm_vcpu *vcpu = apic->vcpu;
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+
+ /* TDX supports only posted interrupt. No lapic emulation. */
+ __vmx_deliver_posted_interrupt(vcpu, &tdx->vt.pi_desc, vector);
+
+ trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
+}
+
+static inline bool tdx_is_sept_violation_unexpected_pending(struct kvm_vcpu *vcpu)
+{
+ u64 eeq_type = to_tdx(vcpu)->ext_exit_qualification & TDX_EXT_EXIT_QUAL_TYPE_MASK;
+ u64 eq = vmx_get_exit_qual(vcpu);
+
+ if (eeq_type != TDX_EXT_EXIT_QUAL_TYPE_PENDING_EPT_VIOLATION)
+ return false;
+
+ return !(eq & EPT_VIOLATION_PROT_MASK) && !(eq & EPT_VIOLATION_EXEC_FOR_RING3_LIN);
+}
+
+static int tdx_handle_ept_violation(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qual;
+ gpa_t gpa = to_tdx(vcpu)->exit_gpa;
+ bool local_retry = false;
+ int ret;
+
+ if (vt_is_tdx_private_gpa(vcpu->kvm, gpa)) {
+ if (tdx_is_sept_violation_unexpected_pending(vcpu)) {
+ pr_warn("Guest access before accepting 0x%llx on vCPU %d\n",
+ gpa, vcpu->vcpu_id);
+ kvm_vm_dead(vcpu->kvm);
+ return -EIO;
+ }
+ /*
+ * Always treat SEPT violations as write faults. Ignore the
+ * EXIT_QUALIFICATION reported by TDX-SEAM for SEPT violations.
+ * TD private pages are always RWX in the SEPT tables,
+ * i.e. they're always mapped writable. Just as importantly,
+ * treating SEPT violations as write faults is necessary to
+ * avoid COW allocations, which will cause TDAUGPAGE failures
+ * due to aliasing a single HPA to multiple GPAs.
+ */
+ exit_qual = EPT_VIOLATION_ACC_WRITE;
+
+ /* Only private GPA triggers zero-step mitigation */
+ local_retry = true;
+ } else {
+ exit_qual = vmx_get_exit_qual(vcpu);
+ /*
+ * EPT violation due to instruction fetch should never be
+ * triggered from shared memory in TDX guest. If such EPT
+ * violation occurs, treat it as broken hardware.
+ */
+ if (KVM_BUG_ON(exit_qual & EPT_VIOLATION_ACC_INSTR, vcpu->kvm))
+ return -EIO;
+ }
+
+ trace_kvm_page_fault(vcpu, gpa, exit_qual);
+
+ /*
+ * To minimize TDH.VP.ENTER invocations, retry locally for private GPA
+ * mapping in TDX.
+ *
+ * KVM may return RET_PF_RETRY for private GPA due to
+ * - contentions when atomically updating SPTEs of the mirror page table
+ * - in-progress GFN invalidation or memslot removal.
+ * - TDX_OPERAND_BUSY error from TDH.MEM.PAGE.AUG or TDH.MEM.SEPT.ADD,
+ * caused by contentions with TDH.VP.ENTER (with zero-step mitigation)
+ * or certain TDCALLs.
+ *
+ * If TDH.VP.ENTER is invoked more times than the threshold set by the
+ * TDX module before KVM resolves the private GPA mapping, the TDX
+ * module will activate zero-step mitigation during TDH.VP.ENTER. This
+ * process acquires an SEPT tree lock in the TDX module, leading to
+ * further contentions with TDH.MEM.PAGE.AUG or TDH.MEM.SEPT.ADD
+ * operations on other vCPUs.
+ *
+ * Breaking out of local retries for kvm_vcpu_has_events() is for
+ * interrupt injection. kvm_vcpu_has_events() should not see pending
+ * events for TDX. Since KVM can't determine if IRQs (or NMIs) are
+ * blocked by TDs, false positives are inevitable i.e., KVM may re-enter
+ * the guest even if the IRQ/NMI can't be delivered.
+ *
+ * Note: even without breaking out of local retries, zero-step
+ * mitigation may still occur due to
+ * - invoking of TDH.VP.ENTER after KVM_EXIT_MEMORY_FAULT,
+ * - a single RIP causing EPT violations for more GFNs than the
+ * threshold count.
+ * This is safe, as triggering zero-step mitigation only introduces
+ * contentions to page installation SEAMCALLs on other vCPUs, which will
+ * handle retries locally in their EPT violation handlers.
+ */
+ while (1) {
+ ret = __vmx_handle_ept_violation(vcpu, gpa, exit_qual);
+
+ if (ret != RET_PF_RETRY || !local_retry)
+ break;
+
+ if (kvm_vcpu_has_events(vcpu) || signal_pending(current))
+ break;
+
+ if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) {
+ ret = -EIO;
+ break;
+ }
+
+ cond_resched();
+ }
+ return ret;
+}
+
+int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
+{
+ if (err) {
+ tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
+ return 1;
+ }
+
+ if (vmx_get_exit_reason(vcpu).basic == EXIT_REASON_MSR_READ)
+ tdvmcall_set_return_val(vcpu, kvm_read_edx_eax(vcpu));
+
+ return 1;
+}
+
+
+int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ u64 vp_enter_ret = tdx->vp_enter_ret;
+ union vmx_exit_reason exit_reason = vmx_get_exit_reason(vcpu);
+
+ if (fastpath != EXIT_FASTPATH_NONE)
+ return 1;
+
+ if (unlikely(vp_enter_ret == EXIT_REASON_EPT_MISCONFIG)) {
+ KVM_BUG_ON(1, vcpu->kvm);
+ return -EIO;
+ }
+
+ /*
+ * Handle TDX SW errors, including TDX_SEAMCALL_UD, TDX_SEAMCALL_GP and
+ * TDX_SEAMCALL_VMFAILINVALID.
+ */
+ if (unlikely((vp_enter_ret & TDX_SW_ERROR) == TDX_SW_ERROR)) {
+ KVM_BUG_ON(!kvm_rebooting, vcpu->kvm);
+ goto unhandled_exit;
+ }
+
+ if (unlikely(tdx_failed_vmentry(vcpu))) {
+ /*
+ * If the guest state is protected, that means off-TD debug is
+ * not enabled, TDX_NON_RECOVERABLE must be set.
+ */
+ WARN_ON_ONCE(vcpu->arch.guest_state_protected &&
+ !(vp_enter_ret & TDX_NON_RECOVERABLE));
+ vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ vcpu->run->fail_entry.hardware_entry_failure_reason = exit_reason.full;
+ vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
+ return 0;
+ }
+
+ if (unlikely(vp_enter_ret & (TDX_ERROR | TDX_NON_RECOVERABLE)) &&
+ exit_reason.basic != EXIT_REASON_TRIPLE_FAULT) {
+ kvm_pr_unimpl("TD vp_enter_ret 0x%llx\n", vp_enter_ret);
+ goto unhandled_exit;
+ }
+
+ WARN_ON_ONCE(exit_reason.basic != EXIT_REASON_TRIPLE_FAULT &&
+ (vp_enter_ret & TDX_SEAMCALL_STATUS_MASK) != TDX_SUCCESS);
+
+ switch (exit_reason.basic) {
+ case EXIT_REASON_TRIPLE_FAULT:
+ vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
+ vcpu->mmio_needed = 0;
+ return 0;
+ case EXIT_REASON_EXCEPTION_NMI:
+ return tdx_handle_exception_nmi(vcpu);
+ case EXIT_REASON_EXTERNAL_INTERRUPT:
+ ++vcpu->stat.irq_exits;
+ return 1;
+ case EXIT_REASON_CPUID:
+ return tdx_emulate_cpuid(vcpu);
+ case EXIT_REASON_HLT:
+ return kvm_emulate_halt_noskip(vcpu);
+ case EXIT_REASON_TDCALL:
+ return handle_tdvmcall(vcpu);
+ case EXIT_REASON_VMCALL:
+ return tdx_emulate_vmcall(vcpu);
+ case EXIT_REASON_IO_INSTRUCTION:
+ return tdx_emulate_io(vcpu);
+ case EXIT_REASON_MSR_READ:
+ kvm_rcx_write(vcpu, tdx->vp_enter_args.r12);
+ return kvm_emulate_rdmsr(vcpu);
+ case EXIT_REASON_MSR_WRITE:
+ kvm_rcx_write(vcpu, tdx->vp_enter_args.r12);
+ kvm_rax_write(vcpu, tdx->vp_enter_args.r13 & -1u);
+ kvm_rdx_write(vcpu, tdx->vp_enter_args.r13 >> 32);
+ return kvm_emulate_wrmsr(vcpu);
+ case EXIT_REASON_EPT_MISCONFIG:
+ return tdx_emulate_mmio(vcpu);
+ case EXIT_REASON_EPT_VIOLATION:
+ return tdx_handle_ept_violation(vcpu);
+ case EXIT_REASON_OTHER_SMI:
+ /*
+ * Unlike VMX, SMI in SEAM non-root mode (i.e. when
+ * TD guest vCPU is running) will cause VM exit to TDX module,
+ * then SEAMRET to KVM. Once it exits to KVM, SMI is delivered
+ * and handled by kernel handler right away.
+ *
+ * The Other SMI exit can also be caused by the SEAM non-root
+ * machine check delivered via Machine Check System Management
+ * Interrupt (MSMI), but it has already been handled by the
+ * kernel machine check handler, i.e., the memory page has been
+ * marked as poisoned and it won't be freed to the free list
+ * when the TDX guest is terminated (the TDX module marks the
+ * guest as dead and prevent it from further running when
+ * machine check happens in SEAM non-root).
+ *
+ * - A MSMI will not reach here, it's handled as non_recoverable
+ * case above.
+ * - If it's not an MSMI, no need to do anything here.
+ */
+ return 1;
+ default:
+ break;
+ }
+
+unhandled_exit:
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
+ vcpu->run->internal.ndata = 2;
+ vcpu->run->internal.data[0] = vp_enter_ret;
+ vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
+ return 0;
+}
+
+void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
+ u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+
+ *reason = tdx->vt.exit_reason.full;
+ if (*reason != -1u) {
+ *info1 = vmx_get_exit_qual(vcpu);
+ *info2 = tdx->ext_exit_qualification;
+ *intr_info = vmx_get_intr_info(vcpu);
+ } else {
+ *info1 = 0;
+ *info2 = 0;
+ *intr_info = 0;
+ }
+
+ *error_code = 0;
+}
+
+bool tdx_has_emulated_msr(u32 index)
+{
+ switch (index) {
+ case MSR_IA32_UCODE_REV:
+ case MSR_IA32_ARCH_CAPABILITIES:
+ case MSR_IA32_POWER_CTL:
+ case MSR_IA32_CR_PAT:
+ case MSR_MTRRcap:
+ case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
+ case MSR_MTRRdefType:
+ case MSR_IA32_TSC_DEADLINE:
+ case MSR_IA32_MISC_ENABLE:
+ case MSR_PLATFORM_INFO:
+ case MSR_MISC_FEATURES_ENABLES:
+ case MSR_IA32_APICBASE:
+ case MSR_EFER:
+ case MSR_IA32_FEAT_CTL:
+ case MSR_IA32_MCG_CAP:
+ case MSR_IA32_MCG_STATUS:
+ case MSR_IA32_MCG_CTL:
+ case MSR_IA32_MCG_EXT_CTL:
+ case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
+ case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
+ /* MSR_IA32_MCx_{CTL, STATUS, ADDR, MISC, CTL2} */
+ case MSR_KVM_POLL_CONTROL:
+ return true;
+ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
+ /*
+ * x2APIC registers that are virtualized by the CPU can't be
+ * emulated, KVM doesn't have access to the virtual APIC page.
+ */
+ switch (index) {
+ case X2APIC_MSR(APIC_TASKPRI):
+ case X2APIC_MSR(APIC_PROCPRI):
+ case X2APIC_MSR(APIC_EOI):
+ case X2APIC_MSR(APIC_ISR) ... X2APIC_MSR(APIC_ISR + APIC_ISR_NR):
+ case X2APIC_MSR(APIC_TMR) ... X2APIC_MSR(APIC_TMR + APIC_ISR_NR):
+ case X2APIC_MSR(APIC_IRR) ... X2APIC_MSR(APIC_IRR + APIC_ISR_NR):
+ return false;
+ default:
+ return true;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool tdx_is_read_only_msr(u32 index)
+{
+ return index == MSR_IA32_APICBASE || index == MSR_EFER ||
+ index == MSR_IA32_FEAT_CTL;
+}
+
+int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+{
+ switch (msr->index) {
+ case MSR_IA32_FEAT_CTL:
+ /*
+ * MCE and MCA are advertised via cpuid. Guest kernel could
+ * check if LMCE is enabled or not.
+ */
+ msr->data = FEAT_CTL_LOCKED;
+ if (vcpu->arch.mcg_cap & MCG_LMCE_P)
+ msr->data |= FEAT_CTL_LMCE_ENABLED;
+ return 0;
+ case MSR_IA32_MCG_EXT_CTL:
+ if (!msr->host_initiated && !(vcpu->arch.mcg_cap & MCG_LMCE_P))
+ return 1;
+ msr->data = vcpu->arch.mcg_ext_ctl;
+ return 0;
+ default:
+ if (!tdx_has_emulated_msr(msr->index))
+ return 1;
+
+ return kvm_get_msr_common(vcpu, msr);
+ }
+}
+
+int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+{
+ switch (msr->index) {
+ case MSR_IA32_MCG_EXT_CTL:
+ if ((!msr->host_initiated && !(vcpu->arch.mcg_cap & MCG_LMCE_P)) ||
+ (msr->data & ~MCG_EXT_CTL_LMCE_EN))
+ return 1;
+ vcpu->arch.mcg_ext_ctl = msr->data;
+ return 0;
+ default:
+ if (tdx_is_read_only_msr(msr->index))
+ return 1;
+
+ if (!tdx_has_emulated_msr(msr->index))
+ return 1;
+
+ return kvm_set_msr_common(vcpu, msr);
+ }
+}
+
+static int tdx_get_capabilities(struct kvm_tdx_cmd *cmd)
+{
+ const struct tdx_sys_info_td_conf *td_conf = &tdx_sysinfo->td_conf;
+ struct kvm_tdx_capabilities __user *user_caps;
+ struct kvm_tdx_capabilities *caps = NULL;
+ int ret = 0;
+
+ /* flags is reserved for future use */
+ if (cmd->flags)
+ return -EINVAL;
+
+ caps = kmalloc(sizeof(*caps) +
+ sizeof(struct kvm_cpuid_entry2) * td_conf->num_cpuid_config,
+ GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ user_caps = u64_to_user_ptr(cmd->data);
+ if (copy_from_user(caps, user_caps, sizeof(*caps))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (caps->cpuid.nent < td_conf->num_cpuid_config) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ ret = init_kvm_tdx_caps(td_conf, caps);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(user_caps, caps, sizeof(*caps))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (copy_to_user(user_caps->cpuid.entries, caps->cpuid.entries,
+ caps->cpuid.nent *
+ sizeof(caps->cpuid.entries[0])))
+ ret = -EFAULT;
+
+out:
+ /* kfree() accepts NULL. */
+ kfree(caps);
+ return ret;
+}
+
+/*
+ * KVM reports guest physical address in CPUID.0x800000008.EAX[23:16], which is
+ * similar to TDX's GPAW. Use this field as the interface for userspace to
+ * configure the GPAW and EPT level for TDs.
+ *
+ * Only values 48 and 52 are supported. Value 52 means GPAW-52 and EPT level
+ * 5, Value 48 means GPAW-48 and EPT level 4. For value 48, GPAW-48 is always
+ * supported. Value 52 is only supported when the platform supports 5 level
+ * EPT.
+ */
+static int setup_tdparams_eptp_controls(struct kvm_cpuid2 *cpuid,
+ struct td_params *td_params)
+{
+ const struct kvm_cpuid_entry2 *entry;
+ int guest_pa;
+
+ entry = kvm_find_cpuid_entry2(cpuid->entries, cpuid->nent, 0x80000008, 0);
+ if (!entry)
+ return -EINVAL;
+
+ guest_pa = tdx_get_guest_phys_addr_bits(entry->eax);
+
+ if (guest_pa != 48 && guest_pa != 52)
+ return -EINVAL;
+
+ if (guest_pa == 52 && !cpu_has_vmx_ept_5levels())
+ return -EINVAL;
+
+ td_params->eptp_controls = VMX_EPTP_MT_WB;
+ if (guest_pa == 52) {
+ td_params->eptp_controls |= VMX_EPTP_PWL_5;
+ td_params->config_flags |= TDX_CONFIG_FLAGS_MAX_GPAW;
+ } else {
+ td_params->eptp_controls |= VMX_EPTP_PWL_4;
+ }
+
+ return 0;
+}
+
+static int setup_tdparams_cpuids(struct kvm_cpuid2 *cpuid,
+ struct td_params *td_params)
+{
+ const struct tdx_sys_info_td_conf *td_conf = &tdx_sysinfo->td_conf;
+ const struct kvm_cpuid_entry2 *entry;
+ struct tdx_cpuid_value *value;
+ int i, copy_cnt = 0;
+
+ /*
+ * td_params.cpuid_values: The number and the order of cpuid_value must
+ * be same to the one of struct tdsysinfo.{num_cpuid_config, cpuid_configs}
+ * It's assumed that td_params was zeroed.
+ */
+ for (i = 0; i < td_conf->num_cpuid_config; i++) {
+ struct kvm_cpuid_entry2 tmp;
+
+ td_init_cpuid_entry2(&tmp, i);
+
+ entry = kvm_find_cpuid_entry2(cpuid->entries, cpuid->nent,
+ tmp.function, tmp.index);
+ if (!entry)
+ continue;
+
+ if (tdx_unsupported_cpuid(entry))
+ return -EINVAL;
+
+ copy_cnt++;
+
+ value = &td_params->cpuid_values[i];
+ value->eax = entry->eax;
+ value->ebx = entry->ebx;
+ value->ecx = entry->ecx;
+ value->edx = entry->edx;
+
+ /*
+ * TDX module does not accept nonzero bits 16..23 for the
+ * CPUID[0x80000008].EAX, see setup_tdparams_eptp_controls().
+ */
+ if (tmp.function == 0x80000008)
+ value->eax = tdx_set_guest_phys_addr_bits(value->eax, 0);
+ }
+
+ /*
+ * Rely on the TDX module to reject invalid configuration, but it can't
+ * check of leafs that don't have a proper slot in td_params->cpuid_values
+ * to stick then. So fail if there were entries that didn't get copied to
+ * td_params.
+ */
+ if (copy_cnt != cpuid->nent)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int setup_tdparams(struct kvm *kvm, struct td_params *td_params,
+ struct kvm_tdx_init_vm *init_vm)
+{
+ const struct tdx_sys_info_td_conf *td_conf = &tdx_sysinfo->td_conf;
+ struct kvm_cpuid2 *cpuid = &init_vm->cpuid;
+ int ret;
+
+ if (kvm->created_vcpus)
+ return -EBUSY;
+
+ if (init_vm->attributes & ~tdx_get_supported_attrs(td_conf))
+ return -EINVAL;
+
+ if (init_vm->xfam & ~tdx_get_supported_xfam(td_conf))
+ return -EINVAL;
+
+ td_params->max_vcpus = kvm->max_vcpus;
+ td_params->attributes = init_vm->attributes | td_conf->attributes_fixed1;
+ td_params->xfam = init_vm->xfam | td_conf->xfam_fixed1;
+
+ td_params->config_flags = TDX_CONFIG_FLAGS_NO_RBP_MOD;
+ td_params->tsc_frequency = TDX_TSC_KHZ_TO_25MHZ(kvm->arch.default_tsc_khz);
+
+ ret = setup_tdparams_eptp_controls(cpuid, td_params);
+ if (ret)
+ return ret;
+
+ ret = setup_tdparams_cpuids(cpuid, td_params);
+ if (ret)
+ return ret;
+
+#define MEMCPY_SAME_SIZE(dst, src) \
+ do { \
+ BUILD_BUG_ON(sizeof(dst) != sizeof(src)); \
+ memcpy((dst), (src), sizeof(dst)); \
+ } while (0)
+
+ MEMCPY_SAME_SIZE(td_params->mrconfigid, init_vm->mrconfigid);
+ MEMCPY_SAME_SIZE(td_params->mrowner, init_vm->mrowner);
+ MEMCPY_SAME_SIZE(td_params->mrownerconfig, init_vm->mrownerconfig);
+
+ return 0;
+}
+
+static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
+ u64 *seamcall_err)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ cpumask_var_t packages;
+ struct page **tdcs_pages = NULL;
+ struct page *tdr_page;
+ int ret, i;
+ u64 err, rcx;
+
+ *seamcall_err = 0;
+ ret = tdx_guest_keyid_alloc();
+ if (ret < 0)
+ return ret;
+ kvm_tdx->hkid = ret;
+ kvm_tdx->misc_cg = get_current_misc_cg();
+ ret = misc_cg_try_charge(MISC_CG_RES_TDX, kvm_tdx->misc_cg, 1);
+ if (ret)
+ goto free_hkid;
+
+ ret = -ENOMEM;
+
+ atomic_inc(&nr_configured_hkid);
+
+ tdr_page = alloc_page(GFP_KERNEL);
+ if (!tdr_page)
+ goto free_hkid;
+
+ kvm_tdx->td.tdcs_nr_pages = tdx_sysinfo->td_ctrl.tdcs_base_size / PAGE_SIZE;
+ /* TDVPS = TDVPR(4K page) + TDCX(multiple 4K pages), -1 for TDVPR. */
+ kvm_tdx->td.tdcx_nr_pages = tdx_sysinfo->td_ctrl.tdvps_base_size / PAGE_SIZE - 1;
+ tdcs_pages = kcalloc(kvm_tdx->td.tdcs_nr_pages, sizeof(*kvm_tdx->td.tdcs_pages),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!tdcs_pages)
+ goto free_tdr;
+
+ for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
+ tdcs_pages[i] = alloc_page(GFP_KERNEL);
+ if (!tdcs_pages[i])
+ goto free_tdcs;
+ }
+
+ if (!zalloc_cpumask_var(&packages, GFP_KERNEL))
+ goto free_tdcs;
+
+ cpus_read_lock();
+
+ /*
+ * Need at least one CPU of the package to be online in order to
+ * program all packages for host key id. Check it.
+ */
+ for_each_present_cpu(i)
+ cpumask_set_cpu(topology_physical_package_id(i), packages);
+ for_each_online_cpu(i)
+ cpumask_clear_cpu(topology_physical_package_id(i), packages);
+ if (!cpumask_empty(packages)) {
+ ret = -EIO;
+ /*
+ * Because it's hard for human operator to figure out the
+ * reason, warn it.
+ */
+#define MSG_ALLPKG "All packages need to have online CPU to create TD. Online CPU and retry.\n"
+ pr_warn_ratelimited(MSG_ALLPKG);
+ goto free_packages;
+ }
+
+ /*
+ * TDH.MNG.CREATE tries to grab the global TDX module and fails
+ * with TDX_OPERAND_BUSY when it fails to grab. Take the global
+ * lock to prevent it from failure.
+ */
+ mutex_lock(&tdx_lock);
+ kvm_tdx->td.tdr_page = tdr_page;
+ err = tdh_mng_create(&kvm_tdx->td, kvm_tdx->hkid);
+ mutex_unlock(&tdx_lock);
+
+ if (err == TDX_RND_NO_ENTROPY) {
+ ret = -EAGAIN;
+ goto free_packages;
+ }
+
+ if (WARN_ON_ONCE(err)) {
+ pr_tdx_error(TDH_MNG_CREATE, err);
+ ret = -EIO;
+ goto free_packages;
+ }
+
+ for_each_online_cpu(i) {
+ int pkg = topology_physical_package_id(i);
+
+ if (cpumask_test_and_set_cpu(pkg, packages))
+ continue;
+
+ /*
+ * Program the memory controller in the package with an
+ * encryption key associated to a TDX private host key id
+ * assigned to this TDR. Concurrent operations on same memory
+ * controller results in TDX_OPERAND_BUSY. No locking needed
+ * beyond the cpus_read_lock() above as it serializes against
+ * hotplug and the first online CPU of the package is always
+ * used. We never have two CPUs in the same socket trying to
+ * program the key.
+ */
+ ret = smp_call_on_cpu(i, tdx_do_tdh_mng_key_config,
+ kvm_tdx, true);
+ if (ret)
+ break;
+ }
+ cpus_read_unlock();
+ free_cpumask_var(packages);
+ if (ret) {
+ i = 0;
+ goto teardown;
+ }
+
+ kvm_tdx->td.tdcs_pages = tdcs_pages;
+ for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
+ err = tdh_mng_addcx(&kvm_tdx->td, tdcs_pages[i]);
+ if (err == TDX_RND_NO_ENTROPY) {
+ /* Here it's hard to allow userspace to retry. */
+ ret = -EAGAIN;
+ goto teardown;
+ }
+ if (WARN_ON_ONCE(err)) {
+ pr_tdx_error(TDH_MNG_ADDCX, err);
+ ret = -EIO;
+ goto teardown;
+ }
+ }
+
+ err = tdh_mng_init(&kvm_tdx->td, __pa(td_params), &rcx);
+ if ((err & TDX_SEAMCALL_STATUS_MASK) == TDX_OPERAND_INVALID) {
+ /*
+ * Because a user gives operands, don't warn.
+ * Return a hint to the user because it's sometimes hard for the
+ * user to figure out which operand is invalid. SEAMCALL status
+ * code includes which operand caused invalid operand error.
+ */
+ *seamcall_err = err;
+ ret = -EINVAL;
+ goto teardown;
+ } else if (WARN_ON_ONCE(err)) {
+ pr_tdx_error_1(TDH_MNG_INIT, err, rcx);
+ ret = -EIO;
+ goto teardown;
+ }
+
+ return 0;
+
+ /*
+ * The sequence for freeing resources from a partially initialized TD
+ * varies based on where in the initialization flow failure occurred.
+ * Simply use the full teardown and destroy, which naturally play nice
+ * with partial initialization.
+ */
+teardown:
+ /* Only free pages not yet added, so start at 'i' */
+ for (; i < kvm_tdx->td.tdcs_nr_pages; i++) {
+ if (tdcs_pages[i]) {
+ __free_page(tdcs_pages[i]);
+ tdcs_pages[i] = NULL;
+ }
+ }
+ if (!kvm_tdx->td.tdcs_pages)
+ kfree(tdcs_pages);
+
+ tdx_mmu_release_hkid(kvm);
+ tdx_reclaim_td_control_pages(kvm);
+
+ return ret;
+
+free_packages:
+ cpus_read_unlock();
+ free_cpumask_var(packages);
+
+free_tdcs:
+ for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
+ if (tdcs_pages[i])
+ __free_page(tdcs_pages[i]);
+ }
+ kfree(tdcs_pages);
+ kvm_tdx->td.tdcs_pages = NULL;
+
+free_tdr:
+ if (tdr_page)
+ __free_page(tdr_page);
+ kvm_tdx->td.tdr_page = 0;
+
+free_hkid:
+ tdx_hkid_free(kvm_tdx);
+
+ return ret;
+}
+
+static u64 tdx_td_metadata_field_read(struct kvm_tdx *tdx, u64 field_id,
+ u64 *data)
+{
+ u64 err;
+
+ err = tdh_mng_rd(&tdx->td, field_id, data);
+
+ return err;
+}
+
+#define TDX_MD_UNREADABLE_LEAF_MASK GENMASK(30, 7)
+#define TDX_MD_UNREADABLE_SUBLEAF_MASK GENMASK(31, 7)
+
+static int tdx_read_cpuid(struct kvm_vcpu *vcpu, u32 leaf, u32 sub_leaf,
+ bool sub_leaf_set, int *entry_index,
+ struct kvm_cpuid_entry2 *out)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
+ u64 field_id = TD_MD_FIELD_ID_CPUID_VALUES;
+ u64 ebx_eax, edx_ecx;
+ u64 err = 0;
+
+ if (sub_leaf > 0b1111111)
+ return -EINVAL;
+
+ if (*entry_index >= KVM_MAX_CPUID_ENTRIES)
+ return -EINVAL;
+
+ if (leaf & TDX_MD_UNREADABLE_LEAF_MASK ||
+ sub_leaf & TDX_MD_UNREADABLE_SUBLEAF_MASK)
+ return -EINVAL;
+
+ /*
+ * bit 23:17, REVSERVED: reserved, must be 0;
+ * bit 16, LEAF_31: leaf number bit 31;
+ * bit 15:9, LEAF_6_0: leaf number bits 6:0, leaf bits 30:7 are
+ * implicitly 0;
+ * bit 8, SUBLEAF_NA: sub-leaf not applicable flag;
+ * bit 7:1, SUBLEAF_6_0: sub-leaf number bits 6:0. If SUBLEAF_NA is 1,
+ * the SUBLEAF_6_0 is all-1.
+ * sub-leaf bits 31:7 are implicitly 0;
+ * bit 0, ELEMENT_I: Element index within field;
+ */
+ field_id |= ((leaf & 0x80000000) ? 1 : 0) << 16;
+ field_id |= (leaf & 0x7f) << 9;
+ if (sub_leaf_set)
+ field_id |= (sub_leaf & 0x7f) << 1;
+ else
+ field_id |= 0x1fe;
+
+ err = tdx_td_metadata_field_read(kvm_tdx, field_id, &ebx_eax);
+ if (err) //TODO check for specific errors
+ goto err_out;
+
+ out->eax = (u32) ebx_eax;
+ out->ebx = (u32) (ebx_eax >> 32);
+
+ field_id++;
+ err = tdx_td_metadata_field_read(kvm_tdx, field_id, &edx_ecx);
+ /*
+ * It's weird that reading edx_ecx fails while reading ebx_eax
+ * succeeded.
+ */
+ if (WARN_ON_ONCE(err))
+ goto err_out;
+
+ out->ecx = (u32) edx_ecx;
+ out->edx = (u32) (edx_ecx >> 32);
+
+ out->function = leaf;
+ out->index = sub_leaf;
+ out->flags |= sub_leaf_set ? KVM_CPUID_FLAG_SIGNIFCANT_INDEX : 0;
+
+ /*
+ * Work around missing support on old TDX modules, fetch
+ * guest maxpa from gfn_direct_bits.
+ */
+ if (leaf == 0x80000008) {
+ gpa_t gpa_bits = gfn_to_gpa(kvm_gfn_direct_bits(vcpu->kvm));
+ unsigned int g_maxpa = __ffs(gpa_bits) + 1;
+
+ out->eax = tdx_set_guest_phys_addr_bits(out->eax, g_maxpa);
+ }
+
+ (*entry_index)++;
+
+ return 0;
+
+err_out:
+ out->eax = 0;
+ out->ebx = 0;
+ out->ecx = 0;
+ out->edx = 0;
+
+ return -EIO;
+}
+
+static int tdx_td_init(struct kvm *kvm, struct kvm_tdx_cmd *cmd)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ struct kvm_tdx_init_vm *init_vm;
+ struct td_params *td_params = NULL;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(*init_vm) != 256 + sizeof_field(struct kvm_tdx_init_vm, cpuid));
+ BUILD_BUG_ON(sizeof(struct td_params) != 1024);
+
+ if (kvm_tdx->state != TD_STATE_UNINITIALIZED)
+ return -EINVAL;
+
+ if (cmd->flags)
+ return -EINVAL;
+
+ init_vm = kmalloc(sizeof(*init_vm) +
+ sizeof(init_vm->cpuid.entries[0]) * KVM_MAX_CPUID_ENTRIES,
+ GFP_KERNEL);
+ if (!init_vm)
+ return -ENOMEM;
+
+ if (copy_from_user(init_vm, u64_to_user_ptr(cmd->data), sizeof(*init_vm))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (init_vm->cpuid.nent > KVM_MAX_CPUID_ENTRIES) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ if (copy_from_user(init_vm->cpuid.entries,
+ u64_to_user_ptr(cmd->data) + sizeof(*init_vm),
+ flex_array_size(init_vm, cpuid.entries, init_vm->cpuid.nent))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (memchr_inv(init_vm->reserved, 0, sizeof(init_vm->reserved))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (init_vm->cpuid.padding) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ td_params = kzalloc(sizeof(struct td_params), GFP_KERNEL);
+ if (!td_params) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = setup_tdparams(kvm, td_params, init_vm);
+ if (ret)
+ goto out;
+
+ ret = __tdx_td_init(kvm, td_params, &cmd->hw_error);
+ if (ret)
+ goto out;
+
+ kvm_tdx->tsc_offset = td_tdcs_exec_read64(kvm_tdx, TD_TDCS_EXEC_TSC_OFFSET);
+ kvm_tdx->tsc_multiplier = td_tdcs_exec_read64(kvm_tdx, TD_TDCS_EXEC_TSC_MULTIPLIER);
+ kvm_tdx->attributes = td_params->attributes;
+ kvm_tdx->xfam = td_params->xfam;
+
+ if (td_params->config_flags & TDX_CONFIG_FLAGS_MAX_GPAW)
+ kvm->arch.gfn_direct_bits = TDX_SHARED_BIT_PWL_5;
+ else
+ kvm->arch.gfn_direct_bits = TDX_SHARED_BIT_PWL_4;
+
+ kvm_tdx->state = TD_STATE_INITIALIZED;
+out:
+ /* kfree() accepts NULL. */
+ kfree(init_vm);
+ kfree(td_params);
+
+ return ret;
+}
+
+void tdx_flush_tlb_current(struct kvm_vcpu *vcpu)
+{
+ /*
+ * flush_tlb_current() is invoked when the first time for the vcpu to
+ * run or when root of shared EPT is invalidated.
+ * KVM only needs to flush shared EPT because the TDX module handles TLB
+ * invalidation for private EPT in tdh_vp_enter();
+ *
+ * A single context invalidation for shared EPT can be performed here.
+ * However, this single context invalidation requires the private EPTP
+ * rather than the shared EPTP to flush shared EPT, as shared EPT uses
+ * private EPTP as its ASID for TLB invalidation.
+ *
+ * To avoid reading back private EPTP, perform a global invalidation for
+ * shared EPT instead to keep this function simple.
+ */
+ ept_sync_global();
+}
+
+void tdx_flush_tlb_all(struct kvm_vcpu *vcpu)
+{
+ /*
+ * TDX has called tdx_track() in tdx_sept_remove_private_spte() to
+ * ensure that private EPT will be flushed on the next TD enter. No need
+ * to call tdx_track() here again even when this callback is a result of
+ * zapping private EPT.
+ *
+ * Due to the lack of the context to determine which EPT has been
+ * affected by zapping, invoke invept() directly here for both shared
+ * EPT and private EPT for simplicity, though it's not necessary for
+ * private EPT.
+ */
+ ept_sync_global();
+}
+
+static int tdx_td_finalize(struct kvm *kvm, struct kvm_tdx_cmd *cmd)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+
+ guard(mutex)(&kvm->slots_lock);
+
+ if (!is_hkid_assigned(kvm_tdx) || kvm_tdx->state == TD_STATE_RUNNABLE)
+ return -EINVAL;
+ /*
+ * Pages are pending for KVM_TDX_INIT_MEM_REGION to issue
+ * TDH.MEM.PAGE.ADD().
+ */
+ if (atomic64_read(&kvm_tdx->nr_premapped))
+ return -EINVAL;
+
+ cmd->hw_error = tdh_mr_finalize(&kvm_tdx->td);
+ if (tdx_operand_busy(cmd->hw_error))
+ return -EBUSY;
+ if (KVM_BUG_ON(cmd->hw_error, kvm)) {
+ pr_tdx_error(TDH_MR_FINALIZE, cmd->hw_error);
+ return -EIO;
+ }
+
+ kvm_tdx->state = TD_STATE_RUNNABLE;
+ /* TD_STATE_RUNNABLE must be set before 'pre_fault_allowed' */
+ smp_wmb();
+ kvm->arch.pre_fault_allowed = true;
+ return 0;
+}
+
+int tdx_vm_ioctl(struct kvm *kvm, void __user *argp)
+{
+ struct kvm_tdx_cmd tdx_cmd;
+ int r;
+
+ if (copy_from_user(&tdx_cmd, argp, sizeof(struct kvm_tdx_cmd)))
+ return -EFAULT;
+
+ /*
+ * Userspace should never set hw_error. It is used to fill
+ * hardware-defined error by the kernel.
+ */
+ if (tdx_cmd.hw_error)
+ return -EINVAL;
+
+ mutex_lock(&kvm->lock);
+
+ switch (tdx_cmd.id) {
+ case KVM_TDX_CAPABILITIES:
+ r = tdx_get_capabilities(&tdx_cmd);
+ break;
+ case KVM_TDX_INIT_VM:
+ r = tdx_td_init(kvm, &tdx_cmd);
+ break;
+ case KVM_TDX_FINALIZE_VM:
+ r = tdx_td_finalize(kvm, &tdx_cmd);
+ break;
+ default:
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (copy_to_user(argp, &tdx_cmd, sizeof(struct kvm_tdx_cmd)))
+ r = -EFAULT;
+
+out:
+ mutex_unlock(&kvm->lock);
+ return r;
+}
+
+/* VMM can pass one 64bit auxiliary data to vcpu via RCX for guest BIOS. */
+static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ struct page *page;
+ int ret, i;
+ u64 err;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ tdx->vp.tdvpr_page = page;
+
+ tdx->vp.tdcx_pages = kcalloc(kvm_tdx->td.tdcx_nr_pages, sizeof(*tdx->vp.tdcx_pages),
+ GFP_KERNEL);
+ if (!tdx->vp.tdcx_pages) {
+ ret = -ENOMEM;
+ goto free_tdvpr;
+ }
+
+ for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ goto free_tdcx;
+ }
+ tdx->vp.tdcx_pages[i] = page;
+ }
+
+ err = tdh_vp_create(&kvm_tdx->td, &tdx->vp);
+ if (KVM_BUG_ON(err, vcpu->kvm)) {
+ ret = -EIO;
+ pr_tdx_error(TDH_VP_CREATE, err);
+ goto free_tdcx;
+ }
+
+ for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
+ err = tdh_vp_addcx(&tdx->vp, tdx->vp.tdcx_pages[i]);
+ if (KVM_BUG_ON(err, vcpu->kvm)) {
+ pr_tdx_error(TDH_VP_ADDCX, err);
+ /*
+ * Pages already added are reclaimed by the vcpu_free
+ * method, but the rest are freed here.
+ */
+ for (; i < kvm_tdx->td.tdcx_nr_pages; i++) {
+ __free_page(tdx->vp.tdcx_pages[i]);
+ tdx->vp.tdcx_pages[i] = NULL;
+ }
+ return -EIO;
+ }
+ }
+
+ err = tdh_vp_init(&tdx->vp, vcpu_rcx, vcpu->vcpu_id);
+ if (KVM_BUG_ON(err, vcpu->kvm)) {
+ pr_tdx_error(TDH_VP_INIT, err);
+ return -EIO;
+ }
+
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+
+ return 0;
+
+free_tdcx:
+ for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
+ if (tdx->vp.tdcx_pages[i])
+ __free_page(tdx->vp.tdcx_pages[i]);
+ tdx->vp.tdcx_pages[i] = NULL;
+ }
+ kfree(tdx->vp.tdcx_pages);
+ tdx->vp.tdcx_pages = NULL;
+
+free_tdvpr:
+ if (tdx->vp.tdvpr_page)
+ __free_page(tdx->vp.tdvpr_page);
+ tdx->vp.tdvpr_page = 0;
+
+ return ret;
+}
+
+/* Sometimes reads multipple subleafs. Return how many enties were written. */
+static int tdx_vcpu_get_cpuid_leaf(struct kvm_vcpu *vcpu, u32 leaf, int *entry_index,
+ struct kvm_cpuid_entry2 *output_e)
+{
+ int sub_leaf = 0;
+ int ret;
+
+ /* First try without a subleaf */
+ ret = tdx_read_cpuid(vcpu, leaf, 0, false, entry_index, output_e);
+
+ /* If success, or invalid leaf, just give up */
+ if (ret != -EIO)
+ return ret;
+
+ /*
+ * If the try without a subleaf failed, try reading subleafs until
+ * failure. The TDX module only supports 6 bits of subleaf index.
+ */
+ while (1) {
+ /* Keep reading subleafs until there is a failure. */
+ if (tdx_read_cpuid(vcpu, leaf, sub_leaf, true, entry_index, output_e))
+ return !sub_leaf;
+
+ sub_leaf++;
+ output_e++;
+ }
+
+ return 0;
+}
+
+static int tdx_vcpu_get_cpuid(struct kvm_vcpu *vcpu, struct kvm_tdx_cmd *cmd)
+{
+ struct kvm_cpuid2 __user *output, *td_cpuid;
+ int r = 0, i = 0, leaf;
+ u32 level;
+
+ output = u64_to_user_ptr(cmd->data);
+ td_cpuid = kzalloc(sizeof(*td_cpuid) +
+ sizeof(output->entries[0]) * KVM_MAX_CPUID_ENTRIES,
+ GFP_KERNEL);
+ if (!td_cpuid)
+ return -ENOMEM;
+
+ if (copy_from_user(td_cpuid, output, sizeof(*output))) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ /* Read max CPUID for normal range */
+ if (tdx_vcpu_get_cpuid_leaf(vcpu, 0, &i, &td_cpuid->entries[i])) {
+ r = -EIO;
+ goto out;
+ }
+ level = td_cpuid->entries[0].eax;
+
+ for (leaf = 1; leaf <= level; leaf++)
+ tdx_vcpu_get_cpuid_leaf(vcpu, leaf, &i, &td_cpuid->entries[i]);
+
+ /* Read max CPUID for extended range */
+ if (tdx_vcpu_get_cpuid_leaf(vcpu, 0x80000000, &i, &td_cpuid->entries[i])) {
+ r = -EIO;
+ goto out;
+ }
+ level = td_cpuid->entries[i - 1].eax;
+
+ for (leaf = 0x80000001; leaf <= level; leaf++)
+ tdx_vcpu_get_cpuid_leaf(vcpu, leaf, &i, &td_cpuid->entries[i]);
+
+ if (td_cpuid->nent < i)
+ r = -E2BIG;
+ td_cpuid->nent = i;
+
+ if (copy_to_user(output, td_cpuid, sizeof(*output))) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ if (r == -E2BIG)
+ goto out;
+
+ if (copy_to_user(output->entries, td_cpuid->entries,
+ td_cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
+ r = -EFAULT;
+
+out:
+ kfree(td_cpuid);
+
+ return r;
+}
+
+static int tdx_vcpu_init(struct kvm_vcpu *vcpu, struct kvm_tdx_cmd *cmd)
+{
+ u64 apic_base;
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ int ret;
+
+ if (cmd->flags)
+ return -EINVAL;
+
+ if (tdx->state != VCPU_TD_STATE_UNINITIALIZED)
+ return -EINVAL;
+
+ /*
+ * TDX requires X2APIC, userspace is responsible for configuring guest
+ * CPUID accordingly.
+ */
+ apic_base = APIC_DEFAULT_PHYS_BASE | LAPIC_MODE_X2APIC |
+ (kvm_vcpu_is_reset_bsp(vcpu) ? MSR_IA32_APICBASE_BSP : 0);
+ if (kvm_apic_set_base(vcpu, apic_base, true))
+ return -EINVAL;
+
+ ret = tdx_td_vcpu_init(vcpu, (u64)cmd->data);
+ if (ret)
+ return ret;
+
+ td_vmcs_write16(tdx, POSTED_INTR_NV, POSTED_INTR_VECTOR);
+ td_vmcs_write64(tdx, POSTED_INTR_DESC_ADDR, __pa(&tdx->vt.pi_desc));
+ td_vmcs_setbit32(tdx, PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_POSTED_INTR);
+
+ tdx->state = VCPU_TD_STATE_INITIALIZED;
+
+ return 0;
+}
+
+void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+{
+ /*
+ * Yell on INIT, as TDX doesn't support INIT, i.e. KVM should drop all
+ * INIT events.
+ *
+ * Defer initializing vCPU for RESET state until KVM_TDX_INIT_VCPU, as
+ * userspace needs to define the vCPU model before KVM can initialize
+ * vCPU state, e.g. to enable x2APIC.
+ */
+ WARN_ON_ONCE(init_event);
+}
+
+struct tdx_gmem_post_populate_arg {
+ struct kvm_vcpu *vcpu;
+ __u32 flags;
+};
+
+static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
+ void __user *src, int order, void *_arg)
+{
+ u64 error_code = PFERR_GUEST_FINAL_MASK | PFERR_PRIVATE_ACCESS;
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ struct tdx_gmem_post_populate_arg *arg = _arg;
+ struct kvm_vcpu *vcpu = arg->vcpu;
+ gpa_t gpa = gfn_to_gpa(gfn);
+ u8 level = PG_LEVEL_4K;
+ struct page *src_page;
+ int ret, i;
+ u64 err, entry, level_state;
+
+ /*
+ * Get the source page if it has been faulted in. Return failure if the
+ * source page has been swapped out or unmapped in primary memory.
+ */
+ ret = get_user_pages_fast((unsigned long)src, 1, 0, &src_page);
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -ENOMEM;
+
+ ret = kvm_tdp_map_page(vcpu, gpa, error_code, &level);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * The private mem cannot be zapped after kvm_tdp_map_page()
+ * because all paths are covered by slots_lock and the
+ * filemap invalidate lock. Check that they are indeed enough.
+ */
+ if (IS_ENABLED(CONFIG_KVM_PROVE_MMU)) {
+ scoped_guard(read_lock, &kvm->mmu_lock) {
+ if (KVM_BUG_ON(!kvm_tdp_mmu_gpa_is_mapped(vcpu, gpa), kvm)) {
+ ret = -EIO;
+ goto out;
+ }
+ }
+ }
+
+ ret = 0;
+ err = tdh_mem_page_add(&kvm_tdx->td, gpa, pfn_to_page(pfn),
+ src_page, &entry, &level_state);
+ if (err) {
+ ret = unlikely(tdx_operand_busy(err)) ? -EBUSY : -EIO;
+ goto out;
+ }
+
+ if (!KVM_BUG_ON(!atomic64_read(&kvm_tdx->nr_premapped), kvm))
+ atomic64_dec(&kvm_tdx->nr_premapped);
+
+ if (arg->flags & KVM_TDX_MEASURE_MEMORY_REGION) {
+ for (i = 0; i < PAGE_SIZE; i += TDX_EXTENDMR_CHUNKSIZE) {
+ err = tdh_mr_extend(&kvm_tdx->td, gpa + i, &entry,
+ &level_state);
+ if (err) {
+ ret = -EIO;
+ break;
+ }
+ }
+ }
+
+out:
+ put_page(src_page);
+ return ret;
+}
+
+static int tdx_vcpu_init_mem_region(struct kvm_vcpu *vcpu, struct kvm_tdx_cmd *cmd)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ struct kvm_tdx_init_mem_region region;
+ struct tdx_gmem_post_populate_arg arg;
+ long gmem_ret;
+ int ret;
+
+ if (tdx->state != VCPU_TD_STATE_INITIALIZED)
+ return -EINVAL;
+
+ guard(mutex)(&kvm->slots_lock);
+
+ /* Once TD is finalized, the initial guest memory is fixed. */
+ if (kvm_tdx->state == TD_STATE_RUNNABLE)
+ return -EINVAL;
+
+ if (cmd->flags & ~KVM_TDX_MEASURE_MEMORY_REGION)
+ return -EINVAL;
+
+ if (copy_from_user(&region, u64_to_user_ptr(cmd->data), sizeof(region)))
+ return -EFAULT;
+
+ if (!PAGE_ALIGNED(region.source_addr) || !PAGE_ALIGNED(region.gpa) ||
+ !region.nr_pages ||
+ region.gpa + (region.nr_pages << PAGE_SHIFT) <= region.gpa ||
+ !vt_is_tdx_private_gpa(kvm, region.gpa) ||
+ !vt_is_tdx_private_gpa(kvm, region.gpa + (region.nr_pages << PAGE_SHIFT) - 1))
+ return -EINVAL;
+
+ kvm_mmu_reload(vcpu);
+ ret = 0;
+ while (region.nr_pages) {
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ arg = (struct tdx_gmem_post_populate_arg) {
+ .vcpu = vcpu,
+ .flags = cmd->flags,
+ };
+ gmem_ret = kvm_gmem_populate(kvm, gpa_to_gfn(region.gpa),
+ u64_to_user_ptr(region.source_addr),
+ 1, tdx_gmem_post_populate, &arg);
+ if (gmem_ret < 0) {
+ ret = gmem_ret;
+ break;
+ }
+
+ if (gmem_ret != 1) {
+ ret = -EIO;
+ break;
+ }
+
+ region.source_addr += PAGE_SIZE;
+ region.gpa += PAGE_SIZE;
+ region.nr_pages--;
+
+ cond_resched();
+ }
+
+ if (copy_to_user(u64_to_user_ptr(cmd->data), &region, sizeof(region)))
+ ret = -EFAULT;
+ return ret;
+}
+
+int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
+ struct kvm_tdx_cmd cmd;
+ int ret;
+
+ if (!is_hkid_assigned(kvm_tdx) || kvm_tdx->state == TD_STATE_RUNNABLE)
+ return -EINVAL;
+
+ if (copy_from_user(&cmd, argp, sizeof(cmd)))
+ return -EFAULT;
+
+ if (cmd.hw_error)
+ return -EINVAL;
+
+ switch (cmd.id) {
+ case KVM_TDX_INIT_VCPU:
+ ret = tdx_vcpu_init(vcpu, &cmd);
+ break;
+ case KVM_TDX_INIT_MEM_REGION:
+ ret = tdx_vcpu_init_mem_region(vcpu, &cmd);
+ break;
+ case KVM_TDX_GET_CPUID:
+ ret = tdx_vcpu_get_cpuid(vcpu, &cmd);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
+{
+ return PG_LEVEL_4K;
+}
+
+static int tdx_online_cpu(unsigned int cpu)
+{
+ unsigned long flags;
+ int r;
+
+ /* Sanity check CPU is already in post-VMXON */
+ WARN_ON_ONCE(!(cr4_read_shadow() & X86_CR4_VMXE));
+
+ local_irq_save(flags);
+ r = tdx_cpu_enable();
+ local_irq_restore(flags);
+
+ return r;
+}
+
+static int tdx_offline_cpu(unsigned int cpu)
+{
+ int i;
+
+ /* No TD is running. Allow any cpu to be offline. */
+ if (!atomic_read(&nr_configured_hkid))
+ return 0;
+
+ /*
+ * In order to reclaim TDX HKID, (i.e. when deleting guest TD), need to
+ * call TDH.PHYMEM.PAGE.WBINVD on all packages to program all memory
+ * controller with pconfig. If we have active TDX HKID, refuse to
+ * offline the last online cpu.
+ */
+ for_each_online_cpu(i) {
+ /*
+ * Found another online cpu on the same package.
+ * Allow to offline.
+ */
+ if (i != cpu && topology_physical_package_id(i) ==
+ topology_physical_package_id(cpu))
+ return 0;
+ }
+
+ /*
+ * This is the last cpu of this package. Don't offline it.
+ *
+ * Because it's hard for human operator to understand the
+ * reason, warn it.
+ */
+#define MSG_ALLPKG_ONLINE \
+ "TDX requires all packages to have an online CPU. Delete all TDs in order to offline all CPUs of a package.\n"
+ pr_warn_ratelimited(MSG_ALLPKG_ONLINE);
+ return -EBUSY;
+}
+
+static void __do_tdx_cleanup(void)
+{
+ /*
+ * Once TDX module is initialized, it cannot be disabled and
+ * re-initialized again w/o runtime update (which isn't
+ * supported by kernel). Only need to remove the cpuhp here.
+ * The TDX host core code tracks TDX status and can handle
+ * 'multiple enabling' scenario.
+ */
+ WARN_ON_ONCE(!tdx_cpuhp_state);
+ cpuhp_remove_state_nocalls_cpuslocked(tdx_cpuhp_state);
+ tdx_cpuhp_state = 0;
+}
+
+static void __tdx_cleanup(void)
+{
+ cpus_read_lock();
+ __do_tdx_cleanup();
+ cpus_read_unlock();
+}
+
+static int __init __do_tdx_bringup(void)
+{
+ int r;
+
+ /*
+ * TDX-specific cpuhp callback to call tdx_cpu_enable() on all
+ * online CPUs before calling tdx_enable(), and on any new
+ * going-online CPU to make sure it is ready for TDX guest.
+ */
+ r = cpuhp_setup_state_cpuslocked(CPUHP_AP_ONLINE_DYN,
+ "kvm/cpu/tdx:online",
+ tdx_online_cpu, tdx_offline_cpu);
+ if (r < 0)
+ return r;
+
+ tdx_cpuhp_state = r;
+
+ r = tdx_enable();
+ if (r)
+ __do_tdx_cleanup();
+
+ return r;
+}
+
+static int __init __tdx_bringup(void)
+{
+ const struct tdx_sys_info_td_conf *td_conf;
+ int r, i;
+
+ for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++) {
+ /*
+ * Check if MSRs (tdx_uret_msrs) can be saved/restored
+ * before returning to user space.
+ *
+ * this_cpu_ptr(user_return_msrs)->registered isn't checked
+ * because the registration is done at vcpu runtime by
+ * tdx_user_return_msr_update_cache().
+ */
+ tdx_uret_msrs[i].slot = kvm_find_user_return_msr(tdx_uret_msrs[i].msr);
+ if (tdx_uret_msrs[i].slot == -1) {
+ /* If any MSR isn't supported, it is a KVM bug */
+ pr_err("MSR %x isn't included by kvm_find_user_return_msr\n",
+ tdx_uret_msrs[i].msr);
+ return -EIO;
+ }
+ }
+
+ /*
+ * Enabling TDX requires enabling hardware virtualization first,
+ * as making SEAMCALLs requires CPU being in post-VMXON state.
+ */
+ r = kvm_enable_virtualization();
+ if (r)
+ return r;
+
+ cpus_read_lock();
+ r = __do_tdx_bringup();
+ cpus_read_unlock();
+
+ if (r)
+ goto tdx_bringup_err;
+
+ /* Get TDX global information for later use */
+ tdx_sysinfo = tdx_get_sysinfo();
+ if (WARN_ON_ONCE(!tdx_sysinfo)) {
+ r = -EINVAL;
+ goto get_sysinfo_err;
+ }
+
+ /* Check TDX module and KVM capabilities */
+ if (!tdx_get_supported_attrs(&tdx_sysinfo->td_conf) ||
+ !tdx_get_supported_xfam(&tdx_sysinfo->td_conf))
+ goto get_sysinfo_err;
+
+ if (!(tdx_sysinfo->features.tdx_features0 & MD_FIELD_ID_FEATURES0_TOPOLOGY_ENUM))
+ goto get_sysinfo_err;
+
+ /*
+ * TDX has its own limit of maximum vCPUs it can support for all
+ * TDX guests in addition to KVM_MAX_VCPUS. Userspace needs to
+ * query TDX guest's maximum vCPUs by checking KVM_CAP_MAX_VCPU
+ * extension on per-VM basis.
+ *
+ * TDX module reports such limit via the MAX_VCPU_PER_TD global
+ * metadata. Different modules may report different values.
+ * Some old module may also not support this metadata (in which
+ * case this limit is U16_MAX).
+ *
+ * In practice, the reported value reflects the maximum logical
+ * CPUs that ALL the platforms that the module supports can
+ * possibly have.
+ *
+ * Simply forwarding the MAX_VCPU_PER_TD to userspace could
+ * result in an unpredictable ABI. KVM instead always advertise
+ * the number of logical CPUs the platform has as the maximum
+ * vCPUs for TDX guests.
+ *
+ * Make sure MAX_VCPU_PER_TD reported by TDX module is not
+ * smaller than the number of logical CPUs, otherwise KVM will
+ * report an unsupported value to userspace.
+ *
+ * Note, a platform with TDX enabled in the BIOS cannot support
+ * physical CPU hotplug, and TDX requires the BIOS has marked
+ * all logical CPUs in MADT table as enabled. Just use
+ * num_present_cpus() for the number of logical CPUs.
+ */
+ td_conf = &tdx_sysinfo->td_conf;
+ if (td_conf->max_vcpus_per_td < num_present_cpus()) {
+ pr_err("Disable TDX: MAX_VCPU_PER_TD (%u) smaller than number of logical CPUs (%u).\n",
+ td_conf->max_vcpus_per_td, num_present_cpus());
+ r = -EINVAL;
+ goto get_sysinfo_err;
+ }
+
+ if (misc_cg_set_capacity(MISC_CG_RES_TDX, tdx_get_nr_guest_keyids())) {
+ r = -EINVAL;
+ goto get_sysinfo_err;
+ }
+
+ /*
+ * Leave hardware virtualization enabled after TDX is enabled
+ * successfully. TDX CPU hotplug depends on this.
+ */
+ return 0;
+
+get_sysinfo_err:
+ __tdx_cleanup();
+tdx_bringup_err:
+ kvm_disable_virtualization();
+ return r;
+}
+
+void tdx_cleanup(void)
+{
+ if (enable_tdx) {
+ misc_cg_set_capacity(MISC_CG_RES_TDX, 0);
+ __tdx_cleanup();
+ kvm_disable_virtualization();
+ }
+}
+
+int __init tdx_bringup(void)
+{
+ int r, i;
+
+ /* tdx_disable_virtualization_cpu() uses associated_tdvcpus. */
+ for_each_possible_cpu(i)
+ INIT_LIST_HEAD(&per_cpu(associated_tdvcpus, i));
+
+ if (!enable_tdx)
+ return 0;
+
+ if (!enable_ept) {
+ pr_err("EPT is required for TDX\n");
+ goto success_disable_tdx;
+ }
+
+ if (!tdp_mmu_enabled || !enable_mmio_caching || !enable_ept_ad_bits) {
+ pr_err("TDP MMU and MMIO caching and EPT A/D bit is required for TDX\n");
+ goto success_disable_tdx;
+ }
+
+ if (!enable_apicv) {
+ pr_err("APICv is required for TDX\n");
+ goto success_disable_tdx;
+ }
+
+ if (!cpu_feature_enabled(X86_FEATURE_OSXSAVE)) {
+ pr_err("tdx: OSXSAVE is required for TDX\n");
+ goto success_disable_tdx;
+ }
+
+ if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
+ pr_err("tdx: MOVDIR64B is required for TDX\n");
+ goto success_disable_tdx;
+ }
+
+ if (!cpu_feature_enabled(X86_FEATURE_SELFSNOOP)) {
+ pr_err("Self-snoop is required for TDX\n");
+ goto success_disable_tdx;
+ }
+
+ if (!cpu_feature_enabled(X86_FEATURE_TDX_HOST_PLATFORM)) {
+ pr_err("tdx: no TDX private KeyIDs available\n");
+ goto success_disable_tdx;
+ }
+
+ if (!enable_virt_at_load) {
+ pr_err("tdx: tdx requires kvm.enable_virt_at_load=1\n");
+ goto success_disable_tdx;
+ }
+
+ /*
+ * Ideally KVM should probe whether TDX module has been loaded
+ * first and then try to bring it up. But TDX needs to use SEAMCALL
+ * to probe whether the module is loaded (there is no CPUID or MSR
+ * for that), and making SEAMCALL requires enabling virtualization
+ * first, just like the rest steps of bringing up TDX module.
+ *
+ * So, for simplicity do everything in __tdx_bringup(); the first
+ * SEAMCALL will return -ENODEV when the module is not loaded. The
+ * only complication is having to make sure that initialization
+ * SEAMCALLs don't return TDX_SEAMCALL_VMFAILINVALID in other
+ * cases.
+ */
+ r = __tdx_bringup();
+ if (r) {
+ /*
+ * Disable TDX only but don't fail to load module if
+ * the TDX module could not be loaded. No need to print
+ * message saying "module is not loaded" because it was
+ * printed when the first SEAMCALL failed.
+ */
+ if (r == -ENODEV)
+ goto success_disable_tdx;
+
+ enable_tdx = 0;
+ }
+
+ return r;
+
+success_disable_tdx:
+ enable_tdx = 0;
+ return 0;
+}
diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
new file mode 100644
index 000000000000..51f98443e8a2
--- /dev/null
+++ b/arch/x86/kvm/vmx/tdx.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_VMX_TDX_H
+#define __KVM_X86_VMX_TDX_H
+
+#include "tdx_arch.h"
+#include "tdx_errno.h"
+
+#ifdef CONFIG_KVM_INTEL_TDX
+#include "common.h"
+
+int tdx_bringup(void);
+void tdx_cleanup(void);
+
+extern bool enable_tdx;
+
+/* TDX module hardware states. These follow the TDX module OP_STATEs. */
+enum kvm_tdx_state {
+ TD_STATE_UNINITIALIZED = 0,
+ TD_STATE_INITIALIZED,
+ TD_STATE_RUNNABLE,
+};
+
+struct kvm_tdx {
+ struct kvm kvm;
+
+ struct misc_cg *misc_cg;
+ int hkid;
+ enum kvm_tdx_state state;
+
+ u64 attributes;
+ u64 xfam;
+
+ u64 tsc_offset;
+ u64 tsc_multiplier;
+
+ struct tdx_td td;
+
+ /* For KVM_TDX_INIT_MEM_REGION. */
+ atomic64_t nr_premapped;
+
+ /*
+ * Prevent vCPUs from TD entry to ensure SEPT zap related SEAMCALLs do
+ * not contend with tdh_vp_enter() and TDCALLs.
+ * Set/unset is protected with kvm->mmu_lock.
+ */
+ bool wait_for_sept_zap;
+};
+
+/* TDX module vCPU states */
+enum vcpu_tdx_state {
+ VCPU_TD_STATE_UNINITIALIZED = 0,
+ VCPU_TD_STATE_INITIALIZED,
+};
+
+struct vcpu_tdx {
+ struct kvm_vcpu vcpu;
+ struct vcpu_vt vt;
+ u64 ext_exit_qualification;
+ gpa_t exit_gpa;
+ struct tdx_module_args vp_enter_args;
+
+ struct tdx_vp vp;
+
+ struct list_head cpu_list;
+
+ u64 vp_enter_ret;
+
+ enum vcpu_tdx_state state;
+ bool guest_entered;
+
+ u64 map_gpa_next;
+ u64 map_gpa_end;
+};
+
+void tdh_vp_rd_failed(struct vcpu_tdx *tdx, char *uclass, u32 field, u64 err);
+void tdh_vp_wr_failed(struct vcpu_tdx *tdx, char *uclass, char *op, u32 field,
+ u64 val, u64 err);
+
+static __always_inline u64 td_tdcs_exec_read64(struct kvm_tdx *kvm_tdx, u32 field)
+{
+ u64 err, data;
+
+ err = tdh_mng_rd(&kvm_tdx->td, TDCS_EXEC(field), &data);
+ if (unlikely(err)) {
+ pr_err("TDH_MNG_RD[EXEC.0x%x] failed: 0x%llx\n", field, err);
+ return 0;
+ }
+ return data;
+}
+
+static __always_inline void tdvps_vmcs_check(u32 field, u8 bits)
+{
+#define VMCS_ENC_ACCESS_TYPE_MASK 0x1UL
+#define VMCS_ENC_ACCESS_TYPE_FULL 0x0UL
+#define VMCS_ENC_ACCESS_TYPE_HIGH 0x1UL
+#define VMCS_ENC_ACCESS_TYPE(field) ((field) & VMCS_ENC_ACCESS_TYPE_MASK)
+
+ /* TDX is 64bit only. HIGH field isn't supported. */
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) &&
+ VMCS_ENC_ACCESS_TYPE(field) == VMCS_ENC_ACCESS_TYPE_HIGH,
+ "Read/Write to TD VMCS *_HIGH fields not supported");
+
+ BUILD_BUG_ON(bits != 16 && bits != 32 && bits != 64);
+
+#define VMCS_ENC_WIDTH_MASK GENMASK(14, 13)
+#define VMCS_ENC_WIDTH_16BIT (0UL << 13)
+#define VMCS_ENC_WIDTH_64BIT (1UL << 13)
+#define VMCS_ENC_WIDTH_32BIT (2UL << 13)
+#define VMCS_ENC_WIDTH_NATURAL (3UL << 13)
+#define VMCS_ENC_WIDTH(field) ((field) & VMCS_ENC_WIDTH_MASK)
+
+ /* TDX is 64bit only. i.e. natural width = 64bit. */
+ BUILD_BUG_ON_MSG(bits != 64 && __builtin_constant_p(field) &&
+ (VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_64BIT ||
+ VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_NATURAL),
+ "Invalid TD VMCS access for 64-bit field");
+ BUILD_BUG_ON_MSG(bits != 32 && __builtin_constant_p(field) &&
+ VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_32BIT,
+ "Invalid TD VMCS access for 32-bit field");
+ BUILD_BUG_ON_MSG(bits != 16 && __builtin_constant_p(field) &&
+ VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_16BIT,
+ "Invalid TD VMCS access for 16-bit field");
+}
+
+static __always_inline void tdvps_management_check(u64 field, u8 bits) {}
+static __always_inline void tdvps_state_non_arch_check(u64 field, u8 bits) {}
+
+#define TDX_BUILD_TDVPS_ACCESSORS(bits, uclass, lclass) \
+static __always_inline u##bits td_##lclass##_read##bits(struct vcpu_tdx *tdx, \
+ u32 field) \
+{ \
+ u64 err, data; \
+ \
+ tdvps_##lclass##_check(field, bits); \
+ err = tdh_vp_rd(&tdx->vp, TDVPS_##uclass(field), &data); \
+ if (unlikely(err)) { \
+ tdh_vp_rd_failed(tdx, #uclass, field, err); \
+ return 0; \
+ } \
+ return (u##bits)data; \
+} \
+static __always_inline void td_##lclass##_write##bits(struct vcpu_tdx *tdx, \
+ u32 field, u##bits val) \
+{ \
+ u64 err; \
+ \
+ tdvps_##lclass##_check(field, bits); \
+ err = tdh_vp_wr(&tdx->vp, TDVPS_##uclass(field), val, \
+ GENMASK_ULL(bits - 1, 0)); \
+ if (unlikely(err)) \
+ tdh_vp_wr_failed(tdx, #uclass, " = ", field, (u64)val, err); \
+} \
+static __always_inline void td_##lclass##_setbit##bits(struct vcpu_tdx *tdx, \
+ u32 field, u64 bit) \
+{ \
+ u64 err; \
+ \
+ tdvps_##lclass##_check(field, bits); \
+ err = tdh_vp_wr(&tdx->vp, TDVPS_##uclass(field), bit, bit); \
+ if (unlikely(err)) \
+ tdh_vp_wr_failed(tdx, #uclass, " |= ", field, bit, err); \
+} \
+static __always_inline void td_##lclass##_clearbit##bits(struct vcpu_tdx *tdx, \
+ u32 field, u64 bit) \
+{ \
+ u64 err; \
+ \
+ tdvps_##lclass##_check(field, bits); \
+ err = tdh_vp_wr(&tdx->vp, TDVPS_##uclass(field), 0, bit); \
+ if (unlikely(err)) \
+ tdh_vp_wr_failed(tdx, #uclass, " &= ~", field, bit, err);\
+}
+
+
+bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu);
+int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err);
+
+TDX_BUILD_TDVPS_ACCESSORS(16, VMCS, vmcs);
+TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs);
+TDX_BUILD_TDVPS_ACCESSORS(64, VMCS, vmcs);
+
+TDX_BUILD_TDVPS_ACCESSORS(8, MANAGEMENT, management);
+TDX_BUILD_TDVPS_ACCESSORS(64, STATE_NON_ARCH, state_non_arch);
+
+#else
+static inline int tdx_bringup(void) { return 0; }
+static inline void tdx_cleanup(void) {}
+
+#define enable_tdx 0
+
+struct kvm_tdx {
+ struct kvm kvm;
+};
+
+struct vcpu_tdx {
+ struct kvm_vcpu vcpu;
+};
+
+static inline bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu) { return false; }
+static inline int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) { return 0; }
+
+#endif
+
+#endif
diff --git a/arch/x86/kvm/vmx/tdx_arch.h b/arch/x86/kvm/vmx/tdx_arch.h
new file mode 100644
index 000000000000..a30e880849e3
--- /dev/null
+++ b/arch/x86/kvm/vmx/tdx_arch.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* architectural constants/data definitions for TDX SEAMCALLs */
+
+#ifndef __KVM_X86_TDX_ARCH_H
+#define __KVM_X86_TDX_ARCH_H
+
+#include <linux/types.h>
+
+/* TDX control structure (TDR/TDCS/TDVPS) field access codes */
+#define TDX_NON_ARCH BIT_ULL(63)
+#define TDX_CLASS_SHIFT 56
+#define TDX_FIELD_MASK GENMASK_ULL(31, 0)
+
+#define __BUILD_TDX_FIELD(non_arch, class, field) \
+ (((non_arch) ? TDX_NON_ARCH : 0) | \
+ ((u64)(class) << TDX_CLASS_SHIFT) | \
+ ((u64)(field) & TDX_FIELD_MASK))
+
+#define BUILD_TDX_FIELD(class, field) \
+ __BUILD_TDX_FIELD(false, (class), (field))
+
+#define BUILD_TDX_FIELD_NON_ARCH(class, field) \
+ __BUILD_TDX_FIELD(true, (class), (field))
+
+
+/* Class code for TD */
+#define TD_CLASS_EXECUTION_CONTROLS 17ULL
+
+/* Class code for TDVPS */
+#define TDVPS_CLASS_VMCS 0ULL
+#define TDVPS_CLASS_GUEST_GPR 16ULL
+#define TDVPS_CLASS_OTHER_GUEST 17ULL
+#define TDVPS_CLASS_MANAGEMENT 32ULL
+
+enum tdx_tdcs_execution_control {
+ TD_TDCS_EXEC_TSC_OFFSET = 10,
+ TD_TDCS_EXEC_TSC_MULTIPLIER = 11,
+};
+
+enum tdx_vcpu_guest_other_state {
+ TD_VCPU_STATE_DETAILS_NON_ARCH = 0x100,
+};
+
+#define TDX_VCPU_STATE_DETAILS_INTR_PENDING BIT_ULL(0)
+
+static inline bool tdx_vcpu_state_details_intr_pending(u64 vcpu_state_details)
+{
+ return !!(vcpu_state_details & TDX_VCPU_STATE_DETAILS_INTR_PENDING);
+}
+
+/* @field is any of enum tdx_tdcs_execution_control */
+#define TDCS_EXEC(field) BUILD_TDX_FIELD(TD_CLASS_EXECUTION_CONTROLS, (field))
+
+/* @field is the VMCS field encoding */
+#define TDVPS_VMCS(field) BUILD_TDX_FIELD(TDVPS_CLASS_VMCS, (field))
+
+/* @field is any of enum tdx_guest_other_state */
+#define TDVPS_STATE(field) BUILD_TDX_FIELD(TDVPS_CLASS_OTHER_GUEST, (field))
+#define TDVPS_STATE_NON_ARCH(field) BUILD_TDX_FIELD_NON_ARCH(TDVPS_CLASS_OTHER_GUEST, (field))
+
+/* Management class fields */
+enum tdx_vcpu_guest_management {
+ TD_VCPU_PEND_NMI = 11,
+};
+
+/* @field is any of enum tdx_vcpu_guest_management */
+#define TDVPS_MANAGEMENT(field) BUILD_TDX_FIELD(TDVPS_CLASS_MANAGEMENT, (field))
+
+#define TDX_EXTENDMR_CHUNKSIZE 256
+
+struct tdx_cpuid_value {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+} __packed;
+
+#define TDX_TD_ATTR_DEBUG BIT_ULL(0)
+#define TDX_TD_ATTR_SEPT_VE_DISABLE BIT_ULL(28)
+#define TDX_TD_ATTR_PKS BIT_ULL(30)
+#define TDX_TD_ATTR_KL BIT_ULL(31)
+#define TDX_TD_ATTR_PERFMON BIT_ULL(63)
+
+#define TDX_EXT_EXIT_QUAL_TYPE_MASK GENMASK(3, 0)
+#define TDX_EXT_EXIT_QUAL_TYPE_PENDING_EPT_VIOLATION 6
+/*
+ * TD_PARAMS is provided as an input to TDH_MNG_INIT, the size of which is 1024B.
+ */
+struct td_params {
+ u64 attributes;
+ u64 xfam;
+ u16 max_vcpus;
+ u8 reserved0[6];
+
+ u64 eptp_controls;
+ u64 config_flags;
+ u16 tsc_frequency;
+ u8 reserved1[38];
+
+ u64 mrconfigid[6];
+ u64 mrowner[6];
+ u64 mrownerconfig[6];
+ u64 reserved2[4];
+
+ union {
+ DECLARE_FLEX_ARRAY(struct tdx_cpuid_value, cpuid_values);
+ u8 reserved3[768];
+ };
+} __packed __aligned(1024);
+
+/*
+ * Guest uses MAX_PA for GPAW when set.
+ * 0: GPA.SHARED bit is GPA[47]
+ * 1: GPA.SHARED bit is GPA[51]
+ */
+#define TDX_CONFIG_FLAGS_MAX_GPAW BIT_ULL(0)
+
+/*
+ * TDH.VP.ENTER, TDG.VP.VMCALL preserves RBP
+ * 0: RBP can be used for TDG.VP.VMCALL input. RBP is clobbered.
+ * 1: RBP can't be used for TDG.VP.VMCALL input. RBP is preserved.
+ */
+#define TDX_CONFIG_FLAGS_NO_RBP_MOD BIT_ULL(2)
+
+
+/*
+ * TDX requires the frequency to be defined in units of 25MHz, which is the
+ * frequency of the core crystal clock on TDX-capable platforms, i.e. the TDX
+ * module can only program frequencies that are multiples of 25MHz. The
+ * frequency must be between 100mhz and 10ghz (inclusive).
+ */
+#define TDX_TSC_KHZ_TO_25MHZ(tsc_in_khz) ((tsc_in_khz) / (25 * 1000))
+#define TDX_TSC_25MHZ_TO_KHZ(tsc_in_25mhz) ((tsc_in_25mhz) * (25 * 1000))
+#define TDX_MIN_TSC_FREQUENCY_KHZ (100 * 1000)
+#define TDX_MAX_TSC_FREQUENCY_KHZ (10 * 1000 * 1000)
+
+/* Additional Secure EPT entry information */
+#define TDX_SEPT_LEVEL_MASK GENMASK_ULL(2, 0)
+#define TDX_SEPT_STATE_MASK GENMASK_ULL(15, 8)
+#define TDX_SEPT_STATE_SHIFT 8
+
+enum tdx_sept_entry_state {
+ TDX_SEPT_FREE = 0,
+ TDX_SEPT_BLOCKED = 1,
+ TDX_SEPT_PENDING = 2,
+ TDX_SEPT_PENDING_BLOCKED = 3,
+ TDX_SEPT_PRESENT = 4,
+};
+
+static inline u8 tdx_get_sept_level(u64 sept_entry_info)
+{
+ return sept_entry_info & TDX_SEPT_LEVEL_MASK;
+}
+
+static inline u8 tdx_get_sept_state(u64 sept_entry_info)
+{
+ return (sept_entry_info & TDX_SEPT_STATE_MASK) >> TDX_SEPT_STATE_SHIFT;
+}
+
+#define MD_FIELD_ID_FEATURES0_TOPOLOGY_ENUM BIT_ULL(20)
+
+/*
+ * TD scope metadata field ID.
+ */
+#define TD_MD_FIELD_ID_CPUID_VALUES 0x9410000300000000ULL
+
+#endif /* __KVM_X86_TDX_ARCH_H */
diff --git a/arch/x86/kvm/vmx/tdx_errno.h b/arch/x86/kvm/vmx/tdx_errno.h
new file mode 100644
index 000000000000..6ff4672c4181
--- /dev/null
+++ b/arch/x86/kvm/vmx/tdx_errno.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* architectural status code for SEAMCALL */
+
+#ifndef __KVM_X86_TDX_ERRNO_H
+#define __KVM_X86_TDX_ERRNO_H
+
+#define TDX_SEAMCALL_STATUS_MASK 0xFFFFFFFF00000000ULL
+
+/*
+ * TDX SEAMCALL Status Codes (returned in RAX)
+ */
+#define TDX_NON_RECOVERABLE_VCPU 0x4000000100000000ULL
+#define TDX_NON_RECOVERABLE_TD 0x4000000200000000ULL
+#define TDX_NON_RECOVERABLE_TD_NON_ACCESSIBLE 0x6000000500000000ULL
+#define TDX_NON_RECOVERABLE_TD_WRONG_APIC_MODE 0x6000000700000000ULL
+#define TDX_INTERRUPTED_RESUMABLE 0x8000000300000000ULL
+#define TDX_OPERAND_INVALID 0xC000010000000000ULL
+#define TDX_OPERAND_BUSY 0x8000020000000000ULL
+#define TDX_PREVIOUS_TLB_EPOCH_BUSY 0x8000020100000000ULL
+#define TDX_PAGE_METADATA_INCORRECT 0xC000030000000000ULL
+#define TDX_VCPU_NOT_ASSOCIATED 0x8000070200000000ULL
+#define TDX_KEY_GENERATION_FAILED 0x8000080000000000ULL
+#define TDX_KEY_STATE_INCORRECT 0xC000081100000000ULL
+#define TDX_KEY_CONFIGURED 0x0000081500000000ULL
+#define TDX_NO_HKID_READY_TO_WBCACHE 0x0000082100000000ULL
+#define TDX_FLUSHVP_NOT_DONE 0x8000082400000000ULL
+#define TDX_EPT_WALK_FAILED 0xC0000B0000000000ULL
+#define TDX_EPT_ENTRY_STATE_INCORRECT 0xC0000B0D00000000ULL
+#define TDX_METADATA_FIELD_NOT_READABLE 0xC0000C0200000000ULL
+
+/*
+ * TDX module operand ID, appears in 31:0 part of error code as
+ * detail information
+ */
+#define TDX_OPERAND_ID_RCX 0x01
+#define TDX_OPERAND_ID_TDR 0x80
+#define TDX_OPERAND_ID_SEPT 0x92
+#define TDX_OPERAND_ID_TD_EPOCH 0xa9
+
+#endif /* __KVM_X86_TDX_ERRNO_H */
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index f6986dee6f8c..0a6cf5bff2aa 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -59,8 +59,7 @@
* without the explicit restore, thinks the stack is getting walloped.
* Using an unwind hint is problematic due to x86-64's dynamic alignment.
*/
- mov %_ASM_BP, %_ASM_SP
- pop %_ASM_BP
+ leave
RET
.endm
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 5c5766467a61..4953846cb30d 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -46,6 +46,7 @@
#include <asm/perf_event.h>
#include <asm/mmu_context.h>
#include <asm/mshyperv.h>
+#include <asm/msr.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h>
#include <asm/vmx.h>
@@ -53,6 +54,7 @@
#include <trace/events/ipi.h>
#include "capabilities.h"
+#include "common.h"
#include "cpuid.h"
#include "hyperv.h"
#include "kvm_onhyperv.h"
@@ -115,6 +117,8 @@ module_param(enable_apicv, bool, 0444);
bool __read_mostly enable_ipiv = true;
module_param(enable_ipiv, bool, 0444);
+module_param(enable_device_posted_irqs, bool, 0444);
+
/*
* If nested=1, nested virtualization is supported, i.e., guests may use
* VMX and be a hypervisor for its own guests. If nested=0, guests may not
@@ -273,6 +277,7 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
case L1TF_MITIGATION_OFF:
l1tf = VMENTER_L1D_FLUSH_NEVER;
break;
+ case L1TF_MITIGATION_AUTO:
case L1TF_MITIGATION_FLUSH_NOWARN:
case L1TF_MITIGATION_FLUSH:
case L1TF_MITIGATION_FLUSH_NOSMT:
@@ -380,9 +385,9 @@ static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
if (!vmx->disable_fb_clear)
return;
- msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL);
+ msr = native_rdmsrq(MSR_IA32_MCU_OPT_CTRL);
msr |= FB_CLEAR_DIS;
- native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
+ native_wrmsrq(MSR_IA32_MCU_OPT_CTRL, msr);
/* Cache the MSR value to avoid reading it later */
vmx->msr_ia32_mcu_opt_ctrl = msr;
}
@@ -393,7 +398,7 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
return;
vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
- native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
+ native_wrmsrq(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
}
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
@@ -769,8 +774,11 @@ void vmx_emergency_disable_virtualization_cpu(void)
return;
list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
- loaded_vmcss_on_cpu_link)
+ loaded_vmcss_on_cpu_link) {
vmcs_clear(v->vmcs);
+ if (v->shadow_vmcs)
+ vmcs_clear(v->shadow_vmcs);
+ }
kvm_cpu_vmxoff();
}
@@ -1063,7 +1071,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
* provide that period, so a CPU could write host's record into
* guest's memory.
*/
- wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+ wrmsrq(MSR_IA32_PEBS_ENABLE, 0);
}
i = vmx_find_loadstore_msr_slot(&m->guest, msr);
@@ -1192,13 +1200,13 @@ static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
{
u32 i;
- wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
- wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
- wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
- wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
+ wrmsrq(MSR_IA32_RTIT_STATUS, ctx->status);
+ wrmsrq(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
+ wrmsrq(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
+ wrmsrq(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
for (i = 0; i < addr_range; i++) {
- wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
- wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
+ wrmsrq(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
+ wrmsrq(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
}
}
@@ -1206,13 +1214,13 @@ static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
{
u32 i;
- rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
- rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
- rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
- rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
+ rdmsrq(MSR_IA32_RTIT_STATUS, ctx->status);
+ rdmsrq(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
+ rdmsrq(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
+ rdmsrq(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
for (i = 0; i < addr_range; i++) {
- rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
- rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
+ rdmsrq(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
+ rdmsrq(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
}
}
@@ -1225,9 +1233,9 @@ static void pt_guest_enter(struct vcpu_vmx *vmx)
* GUEST_IA32_RTIT_CTL is already set in the VMCS.
* Save host state before VM entry.
*/
- rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
+ rdmsrq(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
- wrmsrl(MSR_IA32_RTIT_CTL, 0);
+ wrmsrq(MSR_IA32_RTIT_CTL, 0);
pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
}
@@ -1248,7 +1256,7 @@ static void pt_guest_exit(struct vcpu_vmx *vmx)
* i.e. RTIT_CTL is always cleared on VM-Exit. Restore it if necessary.
*/
if (vmx->pt_desc.host.ctl)
- wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
+ wrmsrq(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
}
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
@@ -1281,6 +1289,7 @@ void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vcpu_vt *vt = to_vt(vcpu);
struct vmcs_host_state *host_state;
#ifdef CONFIG_X86_64
int cpu = raw_smp_processor_id();
@@ -1309,7 +1318,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
if (vmx->nested.need_vmcs12_to_shadow_sync)
nested_sync_vmcs12_to_shadow(vcpu);
- if (vmx->guest_state_loaded)
+ if (vt->guest_state_loaded)
return;
host_state = &vmx->loaded_vmcs->host_state;
@@ -1330,15 +1339,15 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
fs_sel = current->thread.fsindex;
gs_sel = current->thread.gsindex;
fs_base = current->thread.fsbase;
- vmx->msr_host_kernel_gs_base = current->thread.gsbase;
+ vt->msr_host_kernel_gs_base = current->thread.gsbase;
} else {
savesegment(fs, fs_sel);
savesegment(gs, gs_sel);
fs_base = read_msr(MSR_FS_BASE);
- vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
+ vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
}
- wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ wrmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#else
savesegment(fs, fs_sel);
savesegment(gs, gs_sel);
@@ -1347,14 +1356,14 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
#endif
vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base);
- vmx->guest_state_loaded = true;
+ vt->guest_state_loaded = true;
}
static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
{
struct vmcs_host_state *host_state;
- if (!vmx->guest_state_loaded)
+ if (!vmx->vt.guest_state_loaded)
return;
host_state = &vmx->loaded_vmcs->host_state;
@@ -1362,7 +1371,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
++vmx->vcpu.stat.host_state_reload;
#ifdef CONFIG_X86_64
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ rdmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif
if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
kvm_load_ldt(host_state->ldt_sel);
@@ -1382,10 +1391,10 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
#endif
invalidate_tss_limit();
#ifdef CONFIG_X86_64
- wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+ wrmsrq(MSR_KERNEL_GS_BASE, vmx->vt.msr_host_kernel_gs_base);
#endif
load_fixmap_gdt(raw_smp_processor_id());
- vmx->guest_state_loaded = false;
+ vmx->vt.guest_state_loaded = false;
vmx->guest_uret_msrs_loaded = false;
}
@@ -1393,8 +1402,8 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
{
preempt_disable();
- if (vmx->guest_state_loaded)
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ if (vmx->vt.guest_state_loaded)
+ rdmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
preempt_enable();
return vmx->msr_guest_kernel_gs_base;
}
@@ -1402,8 +1411,8 @@ static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
{
preempt_disable();
- if (vmx->guest_state_loaded)
- wrmsrl(MSR_KERNEL_GS_BASE, data);
+ if (vmx->vt.guest_state_loaded)
+ wrmsrq(MSR_KERNEL_GS_BASE, data);
preempt_enable();
vmx->msr_guest_kernel_gs_base = data;
}
@@ -1441,8 +1450,7 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
}
}
-void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
- struct loaded_vmcs *buddy)
+void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
@@ -1469,17 +1477,6 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
if (prev != vmx->loaded_vmcs->vmcs) {
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
vmcs_load(vmx->loaded_vmcs->vmcs);
-
- /*
- * No indirect branch prediction barrier needed when switching
- * the active VMCS within a vCPU, unless IBRS is advertised to
- * the vCPU. To minimize the number of IBPBs executed, KVM
- * performs IBPB on nested VM-Exit (a single nested transition
- * may switch the active VMCS multiple times).
- */
- if (static_branch_likely(&switch_vcpu_ibpb) &&
- (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)))
- indirect_branch_prediction_barrier();
}
if (!already_loaded) {
@@ -1518,7 +1515,7 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
shrink_ple_window(vcpu);
- vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
+ vmx_vcpu_load_vmcs(vcpu, cpu);
vmx_vcpu_pi_load(vcpu, cpu);
}
@@ -1579,7 +1576,7 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
vmcs_writel(GUEST_RFLAGS, rflags);
if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
- vmx->emulation_required = vmx_emulation_required(vcpu);
+ vmx->vt.emulation_required = vmx_emulation_required(vcpu);
}
bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
@@ -1699,7 +1696,7 @@ int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
* so that guest userspace can't DoS the guest simply by triggering
* emulation (enclaves are CPL3 only).
*/
- if (to_vmx(vcpu)->exit_reason.enclave_mode) {
+ if (vmx_get_exit_reason(vcpu).enclave_mode) {
kvm_queue_exception(vcpu, UD_VECTOR);
return X86EMUL_PROPAGATE_FAULT;
}
@@ -1714,7 +1711,7 @@ int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
- union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason;
+ union vmx_exit_reason exit_reason = vmx_get_exit_reason(vcpu);
unsigned long rip, orig_rip;
u32 instr_len;
@@ -1861,7 +1858,7 @@ void vmx_inject_exception(struct kvm_vcpu *vcpu)
return;
}
- WARN_ON_ONCE(vmx->emulation_required);
+ WARN_ON_ONCE(vmx->vt.emulation_required);
if (kvm_exception_is_soft(ex->vector)) {
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
@@ -2574,7 +2571,7 @@ static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
{
u64 allowed;
- rdmsrl(msr, allowed);
+ rdmsrq(msr, allowed);
return ctl_opt & allowed;
}
@@ -2746,7 +2743,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
break;
}
- rdmsrl(MSR_IA32_VMX_BASIC, basic_msr);
+ rdmsrq(MSR_IA32_VMX_BASIC, basic_msr);
/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
if (vmx_basic_vmcs_size(basic_msr) > PAGE_SIZE)
@@ -2766,7 +2763,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
if (vmx_basic_vmcs_mem_type(basic_msr) != X86_MEMTYPE_WB)
return -EIO;
- rdmsrl(MSR_IA32_VMX_MISC, misc_msr);
+ rdmsrq(MSR_IA32_VMX_MISC, misc_msr);
vmcs_conf->basic = basic_msr;
vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
@@ -2850,7 +2847,7 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer)
fault:
WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
- rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
+ rdmsrq_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
cr4_clear_bits(X86_CR4_VMXE);
return -EFAULT;
@@ -3404,7 +3401,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
}
/* depends on vcpu->arch.cr0 to be set to a new value */
- vmx->emulation_required = vmx_emulation_required(vcpu);
+ vmx->vt.emulation_required = vmx_emulation_required(vcpu);
}
static int vmx_get_max_ept_level(void)
@@ -3667,7 +3664,7 @@ void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
{
__vmx_set_segment(vcpu, var, seg);
- to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
+ to_vmx(vcpu)->vt.emulation_required = vmx_emulation_required(vcpu);
}
void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -4195,50 +4192,6 @@ void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
pt_update_intercept_for_msr(vcpu);
}
-static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
- int pi_vec)
-{
-#ifdef CONFIG_SMP
- if (vcpu->mode == IN_GUEST_MODE) {
- /*
- * The vector of the virtual has already been set in the PIR.
- * Send a notification event to deliver the virtual interrupt
- * unless the vCPU is the currently running vCPU, i.e. the
- * event is being sent from a fastpath VM-Exit handler, in
- * which case the PIR will be synced to the vIRR before
- * re-entering the guest.
- *
- * When the target is not the running vCPU, the following
- * possibilities emerge:
- *
- * Case 1: vCPU stays in non-root mode. Sending a notification
- * event posts the interrupt to the vCPU.
- *
- * Case 2: vCPU exits to root mode and is still runnable. The
- * PIR will be synced to the vIRR before re-entering the guest.
- * Sending a notification event is ok as the host IRQ handler
- * will ignore the spurious event.
- *
- * Case 3: vCPU exits to root mode and is blocked. vcpu_block()
- * has already synced PIR to vIRR and never blocks the vCPU if
- * the vIRR is not empty. Therefore, a blocked vCPU here does
- * not wait for any requested interrupts in PIR, and sending a
- * notification event also results in a benign, spurious event.
- */
-
- if (vcpu != kvm_get_running_vcpu())
- __apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
- return;
- }
-#endif
- /*
- * The vCPU isn't in the guest; wake the vCPU in case it is blocking,
- * otherwise do nothing as KVM will grab the highest priority pending
- * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().
- */
- kvm_vcpu_wake_up(vcpu);
-}
-
static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
int vector)
{
@@ -4287,7 +4240,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
*/
static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vcpu_vt *vt = to_vt(vcpu);
int r;
r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
@@ -4298,20 +4251,7 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
if (!vcpu->arch.apic->apicv_active)
return -1;
- if (pi_test_and_set_pir(vector, &vmx->pi_desc))
- return 0;
-
- /* If a previous notification has sent the IPI, nothing to do. */
- if (pi_test_and_set_on(&vmx->pi_desc))
- return 0;
-
- /*
- * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()
- * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
- * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
- * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
- */
- kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
+ __vmx_deliver_posted_interrupt(vcpu, &vt->pi_desc, vector);
return 0;
}
@@ -4391,7 +4331,7 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32))
vmcs_writel(HOST_IA32_SYSENTER_ESP, 0);
- rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
+ rdmsrq(MSR_IA32_SYSENTER_EIP, tmpl);
vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
@@ -4778,7 +4718,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
vmcs_write16(GUEST_INTR_STATUS, 0);
vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
- vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
+ vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->vt.pi_desc)));
}
if (vmx_can_use_ipiv(&vmx->vcpu)) {
@@ -4891,8 +4831,8 @@ static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu)
* Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
* or POSTED_INTR_WAKEUP_VECTOR.
*/
- vmx->pi_desc.nv = POSTED_INTR_VECTOR;
- __pi_set_sn(&vmx->pi_desc);
+ vmx->vt.pi_desc.nv = POSTED_INTR_VECTOR;
+ __pi_set_sn(&vmx->vt.pi_desc);
}
void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -5809,11 +5749,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
static int handle_ept_violation(struct kvm_vcpu *vcpu)
{
- unsigned long exit_qualification;
+ unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
gpa_t gpa;
- u64 error_code;
-
- exit_qualification = vmx_get_exit_qual(vcpu);
/*
* EPT violation happened while executing iret from NMI,
@@ -5829,23 +5766,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
trace_kvm_page_fault(vcpu, gpa, exit_qualification);
- /* Is it a read fault? */
- error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
- ? PFERR_USER_MASK : 0;
- /* Is it a write fault? */
- error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
- ? PFERR_WRITE_MASK : 0;
- /* Is it a fetch fault? */
- error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
- ? PFERR_FETCH_MASK : 0;
- /* ept page table entry is present? */
- error_code |= (exit_qualification & EPT_VIOLATION_PROT_MASK)
- ? PFERR_PRESENT_MASK : 0;
-
- if (error_code & EPT_VIOLATION_GVA_IS_VALID)
- error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
- PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
-
/*
* Check that the GPA doesn't exceed physical memory limits, as that is
* a guest page fault. We have to emulate the instruction here, because
@@ -5857,7 +5777,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
if (unlikely(allow_smaller_maxphyaddr && !kvm_vcpu_is_legal_gpa(vcpu, gpa)))
return kvm_emulate_instruction(vcpu, 0);
- return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
+ return __vmx_handle_ept_violation(vcpu, gpa, exit_qualification);
}
static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
@@ -5902,7 +5822,7 @@ static bool vmx_unhandleable_emulation_required(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (!vmx->emulation_required)
+ if (!vmx->vt.emulation_required)
return false;
/*
@@ -5934,7 +5854,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
intr_window_requested = exec_controls_get(vmx) &
CPU_BASED_INTR_WINDOW_EXITING;
- while (vmx->emulation_required && count-- != 0) {
+ while (vmx->vt.emulation_required && count-- != 0) {
if (intr_window_requested && !vmx_interrupt_blocked(vcpu))
return handle_interrupt_window(&vmx->vcpu);
@@ -6129,7 +6049,7 @@ static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
* VM-Exits. Unconditionally set the flag here and leave the handling to
* vmx_handle_exit().
*/
- to_vmx(vcpu)->exit_reason.bus_lock_detected = true;
+ to_vt(vcpu)->exit_reason.bus_lock_detected = true;
return 1;
}
@@ -6227,9 +6147,9 @@ void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- *reason = vmx->exit_reason.full;
+ *reason = vmx->vt.exit_reason.full;
*info1 = vmx_get_exit_qual(vcpu);
- if (!(vmx->exit_reason.failed_vmentry)) {
+ if (!(vmx->vt.exit_reason.failed_vmentry)) {
*info2 = vmx->idt_vectoring_info;
*intr_info = vmx_get_intr_info(vcpu);
if (is_exception_with_error_code(*intr_info))
@@ -6525,7 +6445,7 @@ void dump_vmcs(struct kvm_vcpu *vcpu)
static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- union vmx_exit_reason exit_reason = vmx->exit_reason;
+ union vmx_exit_reason exit_reason = vmx_get_exit_reason(vcpu);
u32 vectoring_info = vmx->idt_vectoring_info;
u16 exit_handler_index;
@@ -6581,7 +6501,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
* the least awful solution for the userspace case without
* risking false positives.
*/
- if (vmx->emulation_required) {
+ if (vmx->vt.emulation_required) {
nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
return 1;
}
@@ -6591,7 +6511,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
}
/* If guest state is invalid, start emulating. L2 is handled above. */
- if (vmx->emulation_required)
+ if (vmx->vt.emulation_required)
return handle_invalid_guest_state(vcpu);
if (exit_reason.failed_vmentry) {
@@ -6691,7 +6611,7 @@ int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
* Exit to user space when bus lock detected to inform that there is
* a bus lock in guest.
*/
- if (to_vmx(vcpu)->exit_reason.bus_lock_detected) {
+ if (vmx_get_exit_reason(vcpu).bus_lock_detected) {
if (ret > 0)
vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
@@ -6745,7 +6665,7 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
vcpu->stat.l1d_flush++;
if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
- native_wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+ native_wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
return;
}
@@ -6970,22 +6890,22 @@ static void vmx_set_rvi(int vector)
int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vcpu_vt *vt = to_vt(vcpu);
int max_irr;
bool got_posted_interrupt;
if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
return -EIO;
- if (pi_test_on(&vmx->pi_desc)) {
- pi_clear_on(&vmx->pi_desc);
+ if (pi_test_on(&vt->pi_desc)) {
+ pi_clear_on(&vt->pi_desc);
/*
* IOMMU can write to PID.ON, so the barrier matters even on UP.
* But on x86 this is just a compiler barrier anyway.
*/
smp_mb__after_atomic();
got_posted_interrupt =
- kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
+ kvm_apic_update_irr(vcpu, vt->pi_desc.pir, &max_irr);
} else {
max_irr = kvm_lapic_find_highest_irr(vcpu);
got_posted_interrupt = false;
@@ -7025,14 +6945,6 @@ void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
}
-void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- pi_clear_on(&vmx->pi_desc);
- memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
-}
-
void vmx_do_interrupt_irqoff(unsigned long entry);
void vmx_do_nmi_irqoff(void);
@@ -7052,7 +6964,7 @@ static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
* the #NM exception.
*/
if (is_xfd_nm_fault(vcpu))
- rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
+ rdmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
}
static void handle_exception_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
@@ -7089,14 +7001,12 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu,
void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (vmx->emulation_required)
+ if (to_vt(vcpu)->emulation_required)
return;
- if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
+ if (vmx_get_exit_reason(vcpu).basic == EXIT_REASON_EXTERNAL_INTERRUPT)
handle_external_interrupt_irqoff(vcpu, vmx_get_intr_info(vcpu));
- else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
+ else if (vmx_get_exit_reason(vcpu).basic == EXIT_REASON_EXCEPTION_NMI)
handle_exception_irqoff(vcpu, vmx_get_intr_info(vcpu));
}
@@ -7307,7 +7217,7 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
return;
if (flags & VMX_RUN_SAVE_SPEC_CTRL)
- vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
+ vmx->spec_ctrl = native_rdmsrq(MSR_IA32_SPEC_CTRL);
/*
* If the guest/host SPEC_CTRL values differ, restore the host value.
@@ -7318,7 +7228,7 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
*/
if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
vmx->spec_ctrl != hostval)
- native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
+ native_wrmsrq(MSR_IA32_SPEC_CTRL, hostval);
barrier_nospec();
}
@@ -7331,10 +7241,10 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
* the fastpath even, all other exits must use the slow path.
*/
if (is_guest_mode(vcpu) &&
- to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_PREEMPTION_TIMER)
+ vmx_get_exit_reason(vcpu).basic != EXIT_REASON_PREEMPTION_TIMER)
return EXIT_FASTPATH_NONE;
- switch (to_vmx(vcpu)->exit_reason.basic) {
+ switch (vmx_get_exit_reason(vcpu).basic) {
case EXIT_REASON_MSR_WRITE:
return handle_fastpath_set_msr_irqoff(vcpu);
case EXIT_REASON_PREEMPTION_TIMER:
@@ -7346,6 +7256,20 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
}
}
+noinstr void vmx_handle_nmi(struct kvm_vcpu *vcpu)
+{
+ if ((u16)vmx_get_exit_reason(vcpu).basic != EXIT_REASON_EXCEPTION_NMI ||
+ !is_nmi(vmx_get_intr_info(vcpu)))
+ return;
+
+ kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
+ if (cpu_feature_enabled(X86_FEATURE_FRED))
+ fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR);
+ else
+ vmx_do_nmi_irqoff();
+ kvm_after_interrupt(vcpu);
+}
+
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
unsigned int flags)
{
@@ -7358,10 +7282,14 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
* mitigation for MDS is done late in VMentry and is still
* executed in spite of L1D Flush. This is because an extra VERW
* should not matter much after the big hammer L1D Flush.
+ *
+ * cpu_buf_vm_clear is used when system is not vulnerable to MDS/TAA,
+ * and is affected by MMIO Stale Data. In such cases mitigation in only
+ * needed against an MMIO capable guest.
*/
if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu);
- else if (static_branch_unlikely(&mmio_stale_data_clear) &&
+ else if (static_branch_unlikely(&cpu_buf_vm_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm))
mds_clear_cpu_buffers();
@@ -7381,23 +7309,15 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
vmx_enable_fb_clear(vmx);
if (unlikely(vmx->fail)) {
- vmx->exit_reason.full = 0xdead;
+ vmx->vt.exit_reason.full = 0xdead;
goto out;
}
- vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
- if (likely(!vmx->exit_reason.failed_vmentry))
+ vmx->vt.exit_reason.full = vmcs_read32(VM_EXIT_REASON);
+ if (likely(!vmx_get_exit_reason(vcpu).failed_vmentry))
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
- if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI &&
- is_nmi(vmx_get_intr_info(vcpu))) {
- kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
- if (cpu_feature_enabled(X86_FEATURE_FRED))
- fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR);
- else
- vmx_do_nmi_irqoff();
- kvm_after_interrupt(vcpu);
- }
+ vmx_handle_nmi(vcpu);
out:
guest_state_exit_irqoff();
@@ -7418,15 +7338,15 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
* start emulation until we arrive back to a valid state. Synthesize a
* consistency check VM-Exit due to invalid guest state and bail.
*/
- if (unlikely(vmx->emulation_required)) {
+ if (unlikely(vmx->vt.emulation_required)) {
vmx->fail = 0;
- vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
- vmx->exit_reason.failed_vmentry = 1;
+ vmx->vt.exit_reason.full = EXIT_REASON_INVALID_STATE;
+ vmx->vt.exit_reason.failed_vmentry = 1;
kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
- vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
+ vmx->vt.exit_qualification = ENTRY_FAIL_DEFAULT;
kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
- vmx->exit_intr_info = 0;
+ vmx->vt.exit_intr_info = 0;
return EXIT_FASTPATH_NONE;
}
@@ -7529,7 +7449,7 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
* checking.
*/
if (vmx->nested.nested_run_pending &&
- !vmx->exit_reason.failed_vmentry)
+ !vmx_get_exit_reason(vcpu).failed_vmentry)
++vcpu->stat.nested_run;
vmx->nested.nested_run_pending = 0;
@@ -7538,12 +7458,12 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
if (unlikely(vmx->fail))
return EXIT_FASTPATH_NONE;
- if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
+ if (unlikely((u16)vmx_get_exit_reason(vcpu).basic == EXIT_REASON_MCE_DURING_VMENTRY))
kvm_machine_check();
trace_kvm_exit(vcpu, KVM_ISA_VMX);
- if (unlikely(vmx->exit_reason.failed_vmentry))
+ if (unlikely(vmx_get_exit_reason(vcpu).failed_vmentry))
return EXIT_FASTPATH_NONE;
vmx->loaded_vmcs->launched = 1;
@@ -7575,7 +7495,7 @@ int vmx_vcpu_create(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
vmx = to_vmx(vcpu);
- INIT_LIST_HEAD(&vmx->pi_wakeup_list);
+ INIT_LIST_HEAD(&vmx->vt.pi_wakeup_list);
err = -ENOMEM;
@@ -7673,7 +7593,7 @@ int vmx_vcpu_create(struct kvm_vcpu *vcpu)
if (vmx_can_use_ipiv(vcpu))
WRITE_ONCE(to_kvm_vmx(vcpu->kvm)->pid_table[vcpu->vcpu_id],
- __pa(&vmx->pi_desc) | PID_TABLE_ENTRY_VALID);
+ __pa(&vmx->vt.pi_desc) | PID_TABLE_ENTRY_VALID);
return 0;
@@ -7700,6 +7620,7 @@ int vmx_vm_init(struct kvm *kvm)
case L1TF_MITIGATION_FLUSH_NOWARN:
/* 'I explicitly don't care' is set */
break;
+ case L1TF_MITIGATION_AUTO:
case L1TF_MITIGATION_FLUSH:
case L1TF_MITIGATION_FLUSH_NOSMT:
case L1TF_MITIGATION_FULL:
@@ -7717,9 +7638,23 @@ int vmx_vm_init(struct kvm *kvm)
break;
}
}
+
+ if (enable_pml)
+ kvm->arch.cpu_dirty_log_size = PML_LOG_NR_ENTRIES;
return 0;
}
+static inline bool vmx_ignore_guest_pat(struct kvm *kvm)
+{
+ /*
+ * Non-coherent DMA devices need the guest to flush CPU properly.
+ * In that case it is not possible to map all guest RAM as WB, so
+ * always trust guest PAT.
+ */
+ return !kvm_arch_has_noncoherent_dma(kvm) &&
+ kvm_check_has_quirk(kvm, KVM_X86_QUIRK_IGNORE_GUEST_PAT);
+}
+
u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
/*
@@ -7729,13 +7664,8 @@ u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
if (is_mmio)
return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
- /*
- * Force WB and ignore guest PAT if the VM does NOT have a non-coherent
- * device attached. Letting the guest control memory types on Intel
- * CPUs may result in unexpected behavior, and so KVM's ABI is to trust
- * the guest to behave only as a last resort.
- */
- if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
+ /* Force WB if ignoring guest PAT */
+ if (vmx_ignore_guest_pat(vcpu->kvm))
return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT);
@@ -7959,7 +7889,7 @@ static __init u64 vmx_get_perf_capabilities(void)
return 0;
if (boot_cpu_has(X86_FEATURE_PDCM))
- rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
+ rdmsrq(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) {
x86_perf_get_lbr(&vmx_lbr_caps);
@@ -8508,7 +8438,7 @@ __init int vmx_hardware_setup(void)
kvm_enable_efer_bits(EFER_NX);
if (boot_cpu_has(X86_FEATURE_MPX)) {
- rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
+ rdmsrq(MSR_IA32_BNDCFGS, host_bndcfgs);
WARN_ONCE(host_bndcfgs, "BNDCFGS in host will be lost");
}
@@ -8597,6 +8527,8 @@ __init int vmx_hardware_setup(void)
if (enable_ept)
kvm_mmu_set_ept_masks(enable_ept_ad_bits,
cpu_has_vmx_ept_execute_only());
+ else
+ vt_x86_ops.get_mt_mask = NULL;
/*
* Setup shadow_me_value/shadow_me_mask to include MKTME KeyID
@@ -8614,9 +8546,6 @@ __init int vmx_hardware_setup(void)
if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
enable_pml = 0;
- if (!enable_pml)
- vt_x86_ops.cpu_dirty_log_size = 0;
-
if (!cpu_has_vmx_preemption_timer())
enable_preemption_timer = false;
@@ -8674,6 +8603,27 @@ __init int vmx_hardware_setup(void)
kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler);
+ /*
+ * On Intel CPUs that lack self-snoop feature, letting the guest control
+ * memory types may result in unexpected behavior. So always ignore guest
+ * PAT on those CPUs and map VM as writeback, not allowing userspace to
+ * disable the quirk.
+ *
+ * On certain Intel CPUs (e.g. SPR, ICX), though self-snoop feature is
+ * supported, UC is slow enough to cause issues with some older guests (e.g.
+ * an old version of bochs driver uses ioremap() instead of ioremap_wc() to
+ * map the video RAM, causing wayland desktop to fail to get started
+ * correctly). To avoid breaking those older guests that rely on KVM to force
+ * memory type to WB, provide KVM_X86_QUIRK_IGNORE_GUEST_PAT to preserve the
+ * safer (for performance) default behavior.
+ *
+ * On top of this, non-coherent DMA devices need the guest to flush CPU
+ * caches properly. This also requires honoring guest PAT, and is forced
+ * independent of the quirk in vmx_ignore_guest_pat().
+ */
+ if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
+ kvm_caps.supported_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
+ kvm_caps.inapplicable_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
return r;
}
@@ -8687,23 +8637,16 @@ static void vmx_cleanup_l1d_flush(void)
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
}
-static void __vmx_exit(void)
+void vmx_exit(void)
{
allow_smaller_maxphyaddr = false;
vmx_cleanup_l1d_flush();
-}
-static void __exit vmx_exit(void)
-{
- kvm_exit();
- __vmx_exit();
kvm_x86_vendor_exit();
-
}
-module_exit(vmx_exit);
-static int __init vmx_init(void)
+int __init vmx_init(void)
{
int r, cpu;
@@ -8747,21 +8690,9 @@ static int __init vmx_init(void)
if (!enable_ept)
allow_smaller_maxphyaddr = true;
- /*
- * Common KVM initialization _must_ come last, after this, /dev/kvm is
- * exposed to userspace!
- */
- r = kvm_init(sizeof(struct vcpu_vmx), __alignof__(struct vcpu_vmx),
- THIS_MODULE);
- if (r)
- goto err_kvm_init;
-
return 0;
-err_kvm_init:
- __vmx_exit();
err_l1d_flush:
kvm_x86_vendor_exit();
return r;
}
-module_init(vmx_init);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 951e44dc9d0e..b5758c33c60f 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -11,11 +11,13 @@
#include "capabilities.h"
#include "../kvm_cache_regs.h"
+#include "pmu_intel.h"
#include "vmcs.h"
#include "vmx_ops.h"
#include "../cpuid.h"
#include "run_flags.h"
#include "../mmu.h"
+#include "common.h"
#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
@@ -67,47 +69,6 @@ struct pt_desc {
struct pt_ctx guest;
};
-union vmx_exit_reason {
- struct {
- u32 basic : 16;
- u32 reserved16 : 1;
- u32 reserved17 : 1;
- u32 reserved18 : 1;
- u32 reserved19 : 1;
- u32 reserved20 : 1;
- u32 reserved21 : 1;
- u32 reserved22 : 1;
- u32 reserved23 : 1;
- u32 reserved24 : 1;
- u32 reserved25 : 1;
- u32 bus_lock_detected : 1;
- u32 enclave_mode : 1;
- u32 smi_pending_mtf : 1;
- u32 smi_from_vmx_root : 1;
- u32 reserved30 : 1;
- u32 failed_vmentry : 1;
- };
- u32 full;
-};
-
-struct lbr_desc {
- /* Basic info about guest LBR records. */
- struct x86_pmu_lbr records;
-
- /*
- * Emulate LBR feature via passthrough LBR registers when the
- * per-vcpu guest LBR event is scheduled on the current pcpu.
- *
- * The records may be inaccurate if the host reclaims the LBR.
- */
- struct perf_event *event;
-
- /* True if LBRs are marked as not intercepted in the MSR bitmap */
- bool msr_passthrough;
-};
-
-extern struct x86_pmu_lbr vmx_lbr_caps;
-
/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
@@ -248,20 +209,10 @@ struct nested_vmx {
struct vcpu_vmx {
struct kvm_vcpu vcpu;
+ struct vcpu_vt vt;
u8 fail;
u8 x2apic_msr_bitmap_mode;
- /*
- * If true, host state has been stored in vmx->loaded_vmcs for
- * the CPU registers that only need to be switched when transitioning
- * to/from the kernel, and the registers have been loaded with guest
- * values. If false, host state is loaded in the CPU registers
- * and vmx->loaded_vmcs->host_state is invalid.
- */
- bool guest_state_loaded;
-
- unsigned long exit_qualification;
- u32 exit_intr_info;
u32 idt_vectoring_info;
ulong rflags;
@@ -274,7 +225,6 @@ struct vcpu_vmx {
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
bool guest_uret_msrs_loaded;
#ifdef CONFIG_X86_64
- u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base;
#endif
@@ -313,15 +263,6 @@ struct vcpu_vmx {
} seg[8];
} segment_cache;
int vpid;
- bool emulation_required;
-
- union vmx_exit_reason exit_reason;
-
- /* Posted interrupt descriptor */
- struct pi_desc pi_desc;
-
- /* Used if this vCPU is waiting for PI notification wakeup. */
- struct list_head pi_wakeup_list;
/* Support for a guest hypervisor (nested VMX) */
struct nested_vmx nested;
@@ -376,8 +317,44 @@ struct kvm_vmx {
u64 *pid_table;
};
-void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
- struct loaded_vmcs *buddy);
+static __always_inline struct vcpu_vt *to_vt(struct kvm_vcpu *vcpu)
+{
+ return &(container_of(vcpu, struct vcpu_vmx, vcpu)->vt);
+}
+
+static __always_inline struct kvm_vcpu *vt_to_vcpu(struct vcpu_vt *vt)
+{
+ return &(container_of(vt, struct vcpu_vmx, vt)->vcpu);
+}
+
+static __always_inline union vmx_exit_reason vmx_get_exit_reason(struct kvm_vcpu *vcpu)
+{
+ return to_vt(vcpu)->exit_reason;
+}
+
+static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vt *vt = to_vt(vcpu);
+
+ if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1) &&
+ !WARN_ON_ONCE(is_td_vcpu(vcpu)))
+ vt->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+ return vt->exit_qualification;
+}
+
+static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vt *vt = to_vt(vcpu);
+
+ if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2) &&
+ !WARN_ON_ONCE(is_td_vcpu(vcpu)))
+ vt->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+
+ return vt->exit_intr_info;
+}
+
+void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu);
int allocate_vpid(void);
void free_vpid(int vpid);
void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
@@ -662,45 +639,10 @@ static __always_inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct vcpu_vmx, vcpu);
}
-static inline struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
-{
- return &to_vmx(vcpu)->lbr_desc;
-}
-
-static inline struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
-{
- return &vcpu_to_lbr_desc(vcpu)->records;
-}
-
-static inline bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
-{
- return !!vcpu_to_lbr_records(vcpu)->nr;
-}
-
void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
-static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1))
- vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-
- return vmx->exit_qualification;
-}
-
-static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2))
- vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-
- return vmx->exit_intr_info;
-}
-
struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
void free_vmcs(struct vmcs *vmcs);
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
@@ -758,4 +700,7 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
vmx->segment_cache.bitmask = 0;
}
+int vmx_init(void);
+void vmx_exit(void);
+
#endif /* __KVM_X86_VMX_H */
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index 430773a5ef8e..b4596f651232 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -46,7 +46,6 @@ int vmx_check_intercept(struct kvm_vcpu *vcpu,
bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu);
void vmx_migrate_timers(struct kvm_vcpu *vcpu);
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
-void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
@@ -58,6 +57,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
int vmx_get_feature_msr(u32 msr, u64 *data);
int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+#define vmx_complete_emulated_msr kvm_complete_insn_gp
u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg);
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
@@ -121,4 +121,49 @@ void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu);
#endif
void vmx_setup_mce(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_KVM_INTEL_TDX
+void tdx_disable_virtualization_cpu(void);
+int tdx_vm_init(struct kvm *kvm);
+void tdx_mmu_release_hkid(struct kvm *kvm);
+void tdx_vm_destroy(struct kvm *kvm);
+int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);
+
+int tdx_vcpu_create(struct kvm_vcpu *vcpu);
+void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
+void tdx_vcpu_free(struct kvm_vcpu *vcpu);
+void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu);
+fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
+void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
+void tdx_vcpu_put(struct kvm_vcpu *vcpu);
+bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu);
+int tdx_handle_exit(struct kvm_vcpu *vcpu,
+ enum exit_fastpath_completion fastpath);
+
+void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector);
+void tdx_inject_nmi(struct kvm_vcpu *vcpu);
+void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
+ u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
+bool tdx_has_emulated_msr(u32 index);
+int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
+int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
+
+int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
+
+int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, void *private_spt);
+int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, void *private_spt);
+int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, kvm_pfn_t pfn);
+int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, kvm_pfn_t pfn);
+
+void tdx_flush_tlb_current(struct kvm_vcpu *vcpu);
+void tdx_flush_tlb_all(struct kvm_vcpu *vcpu);
+void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
+int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
+#endif
+
#endif /* __KVM_X86_VMX_X86_OPS_H */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c841817a914a..b58a74c1722d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -90,7 +90,6 @@
#include "trace.h"
#define MAX_IO_MSRS 256
-#define KVM_MAX_MCE_BANKS 32
/*
* Note, kvm_caps fields should *never* have default values, all fields must be
@@ -227,6 +226,9 @@ EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
bool __read_mostly enable_apicv = true;
EXPORT_SYMBOL_GPL(enable_apicv);
+bool __read_mostly enable_device_posted_irqs = true;
+EXPORT_SYMBOL_GPL(enable_device_posted_irqs);
+
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
@@ -578,7 +580,7 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
values = &msrs->values[slot];
if (values->host != values->curr) {
- wrmsrl(kvm_uret_msrs_list[slot], values->host);
+ wrmsrq(kvm_uret_msrs_list[slot], values->host);
values->curr = values->host;
}
}
@@ -590,10 +592,10 @@ static int kvm_probe_user_return_msr(u32 msr)
int ret;
preempt_disable();
- ret = rdmsrl_safe(msr, &val);
+ ret = rdmsrq_safe(msr, &val);
if (ret)
goto out;
- ret = wrmsrl_safe(msr, val);
+ ret = wrmsrq_safe(msr, val);
out:
preempt_enable();
return ret;
@@ -630,12 +632,21 @@ static void kvm_user_return_msr_cpu_online(void)
int i;
for (i = 0; i < kvm_nr_uret_msrs; ++i) {
- rdmsrl_safe(kvm_uret_msrs_list[i], &value);
+ rdmsrq_safe(kvm_uret_msrs_list[i], &value);
msrs->values[i].host = value;
msrs->values[i].curr = value;
}
}
+static void kvm_user_return_register_notifier(struct kvm_user_return_msrs *msrs)
+{
+ if (!msrs->registered) {
+ msrs->urn.on_user_return = kvm_on_user_return;
+ user_return_notifier_register(&msrs->urn);
+ msrs->registered = true;
+ }
+}
+
int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
{
struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
@@ -644,20 +655,25 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
value = (value & mask) | (msrs->values[slot].host & ~mask);
if (value == msrs->values[slot].curr)
return 0;
- err = wrmsrl_safe(kvm_uret_msrs_list[slot], value);
+ err = wrmsrq_safe(kvm_uret_msrs_list[slot], value);
if (err)
return 1;
msrs->values[slot].curr = value;
- if (!msrs->registered) {
- msrs->urn.on_user_return = kvm_on_user_return;
- user_return_notifier_register(&msrs->urn);
- msrs->registered = true;
- }
+ kvm_user_return_register_notifier(msrs);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_user_return_msr);
+void kvm_user_return_msr_update_cache(unsigned int slot, u64 value)
+{
+ struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
+
+ msrs->values[slot].curr = value;
+ kvm_user_return_register_notifier(msrs);
+}
+EXPORT_SYMBOL_GPL(kvm_user_return_msr_update_cache);
+
static void drop_user_return_notifiers(void)
{
struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
@@ -1174,7 +1190,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
vcpu->arch.ia32_xss != kvm_host.xss)
- wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
+ wrmsrq(MSR_IA32_XSS, vcpu->arch.ia32_xss);
}
if (cpu_feature_enabled(X86_FEATURE_PKU) &&
@@ -1205,7 +1221,7 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
vcpu->arch.ia32_xss != kvm_host.xss)
- wrmsrl(MSR_IA32_XSS, kvm_host.xss);
+ wrmsrq(MSR_IA32_XSS, kvm_host.xss);
}
}
@@ -1584,7 +1600,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
- ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO)
+ ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | ARCH_CAP_ITS_NO)
static u64 kvm_get_arch_capabilities(void)
{
@@ -1618,6 +1634,8 @@ static u64 kvm_get_arch_capabilities(void)
data |= ARCH_CAP_MDS_NO;
if (!boot_cpu_has_bug(X86_BUG_RFDS))
data |= ARCH_CAP_RFDS_NO;
+ if (!boot_cpu_has_bug(X86_BUG_ITS))
+ data |= ARCH_CAP_ITS_NO;
if (!boot_cpu_has(X86_FEATURE_RTM)) {
/*
@@ -1660,7 +1678,7 @@ static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
*data = MSR_PLATFORM_INFO_CPUID_FAULT;
break;
case MSR_IA32_UCODE_REV:
- rdmsrl_safe(index, data);
+ rdmsrq_safe(index, data);
break;
default:
return kvm_x86_call(get_feature_msr)(index, data);
@@ -3827,7 +3845,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!data)
break;
- wrmsrl(MSR_IA32_PRED_CMD, data);
+ wrmsrq(MSR_IA32_PRED_CMD, data);
break;
}
case MSR_IA32_FLUSH_CMD:
@@ -3840,7 +3858,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!data)
break;
- wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+ wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
break;
case MSR_EFER:
return set_efer(vcpu, msr_info);
@@ -4597,7 +4615,7 @@ static bool kvm_is_vm_type_supported(unsigned long type)
return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
}
-static inline u32 kvm_sync_valid_fields(struct kvm *kvm)
+static inline u64 kvm_sync_valid_fields(struct kvm *kvm)
{
return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
}
@@ -4737,6 +4755,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
+ if (kvm)
+ r = kvm->max_vcpus;
break;
case KVM_CAP_MAX_VCPU_ID:
r = KVM_MAX_VCPU_IDS;
@@ -4792,7 +4812,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0;
break;
case KVM_CAP_DISABLE_QUIRKS2:
- r = KVM_X86_VALID_QUIRKS;
+ r = kvm_caps.supported_quirks;
break;
case KVM_CAP_X86_NOTIFY_VMEXIT:
r = kvm_caps.has_notify_vmexit;
@@ -4973,6 +4993,8 @@ static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
return kvm_arch_has_noncoherent_dma(vcpu->kvm);
}
+static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
+
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -4995,6 +5017,19 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_x86_call(vcpu_load)(vcpu, cpu);
+ if (vcpu != per_cpu(last_vcpu, cpu)) {
+ /*
+ * Flush the branch predictor when switching vCPUs on the same
+ * physical CPU, as each vCPU needs its own branch prediction
+ * domain. No IBPB is needed when switching between L1 and L2
+ * on the same vCPU unless IBRS is advertised to the vCPU; that
+ * is handled on the nested VM-Exit path.
+ */
+ if (static_branch_likely(&switch_vcpu_ibpb))
+ indirect_branch_prediction_barrier();
+ per_cpu(last_vcpu, cpu) = vcpu;
+ }
+
/* Save host pkru register if supported */
vcpu->arch.host_pkru = read_pkru();
@@ -5115,6 +5150,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
+ if (vcpu->arch.apic->guest_apic_protected)
+ return -EINVAL;
+
kvm_x86_call(sync_pir_to_irr)(vcpu);
return kvm_apic_get_state(vcpu, s);
@@ -5125,6 +5163,9 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
{
int r;
+ if (vcpu->arch.apic->guest_apic_protected)
+ return -EINVAL;
+
r = kvm_apic_set_state(vcpu, s);
if (r)
return r;
@@ -6302,6 +6343,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_SET_DEVICE_ATTR:
r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp);
break;
+ case KVM_MEMORY_ENCRYPT_OP:
+ r = -ENOTTY;
+ if (!kvm_x86_ops.vcpu_mem_enc_ioctl)
+ goto out;
+ r = kvm_x86_ops.vcpu_mem_enc_ioctl(vcpu, argp);
+ break;
default:
r = -EINVAL;
}
@@ -6489,7 +6536,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
struct kvm_vcpu *vcpu;
unsigned long i;
- if (!kvm_x86_ops.cpu_dirty_log_size)
+ if (!kvm->arch.cpu_dirty_log_size)
return;
kvm_for_each_vcpu(i, vcpu, kvm)
@@ -6519,11 +6566,11 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
switch (cap->cap) {
case KVM_CAP_DISABLE_QUIRKS2:
r = -EINVAL;
- if (cap->args[0] & ~KVM_X86_VALID_QUIRKS)
+ if (cap->args[0] & ~kvm_caps.supported_quirks)
break;
fallthrough;
case KVM_CAP_DISABLE_QUIRKS:
- kvm->arch.disabled_quirks = cap->args[0];
+ kvm->arch.disabled_quirks |= cap->args[0] & kvm_caps.supported_quirks;
r = 0;
break;
case KVM_CAP_SPLIT_IRQCHIP: {
@@ -7297,14 +7344,13 @@ set_pit2_out:
r = READ_ONCE(kvm->arch.default_tsc_khz);
goto out;
}
- case KVM_MEMORY_ENCRYPT_OP: {
+ case KVM_MEMORY_ENCRYPT_OP:
r = -ENOTTY;
if (!kvm_x86_ops.mem_enc_ioctl)
goto out;
r = kvm_x86_call(mem_enc_ioctl)(kvm, argp);
break;
- }
case KVM_MEMORY_ENCRYPT_REG_REGION: {
struct kvm_enc_region region;
@@ -7998,7 +8044,7 @@ static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
return rc;
if (!vcpu->mmio_nr_fragments)
- return rc;
+ return X86EMUL_CONTINUE;
gpa = vcpu->mmio_fragments[0].gpa;
@@ -9336,7 +9382,7 @@ static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
{
vcpu->arch.pio.count = 0;
- if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
+ if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip)))
return 1;
return kvm_skip_emulated_instruction(vcpu);
@@ -9361,7 +9407,7 @@ static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
complete_fast_pio_out_port_0x7e;
kvm_skip_emulated_instruction(vcpu);
} else {
- vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
+ vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
vcpu->arch.complete_userspace_io = complete_fast_pio_out;
}
return 0;
@@ -9374,7 +9420,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
/* We should only ever be called with arch.pio.count equal to 1 */
BUG_ON(vcpu->arch.pio.count != 1);
- if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
+ if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip))) {
vcpu->arch.pio.count = 0;
return 1;
}
@@ -9403,7 +9449,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
return ret;
}
- vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
+ vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
vcpu->arch.complete_userspace_io = complete_fast_pio_in;
return 0;
@@ -9736,7 +9782,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
* with an exception. PAT[0] is set to WB on RESET and also by the
* kernel, i.e. failure indicates a kernel bug or broken firmware.
*/
- if (rdmsrl_safe(MSR_IA32_CR_PAT, &host_pat) ||
+ if (rdmsrq_safe(MSR_IA32_CR_PAT, &host_pat) ||
(host_pat & GENMASK(2, 0)) != 6) {
pr_err("host PAT[0] is not WB\n");
return -EIO;
@@ -9769,21 +9815,26 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
kvm_host.xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
kvm_caps.supported_xcr0 = kvm_host.xcr0 & KVM_SUPPORTED_XCR0;
}
+ kvm_caps.supported_quirks = KVM_X86_VALID_QUIRKS;
+ kvm_caps.inapplicable_quirks = KVM_X86_CONDITIONAL_QUIRKS;
- rdmsrl_safe(MSR_EFER, &kvm_host.efer);
+ rdmsrq_safe(MSR_EFER, &kvm_host.efer);
if (boot_cpu_has(X86_FEATURE_XSAVES))
- rdmsrl(MSR_IA32_XSS, kvm_host.xss);
+ rdmsrq(MSR_IA32_XSS, kvm_host.xss);
kvm_init_pmu_capability(ops->pmu_ops);
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities);
+ rdmsrq(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities);
r = ops->hardware_setup();
if (r != 0)
goto out_mmu_exit;
+ enable_device_posted_irqs &= enable_apicv &&
+ irq_remapping_cap(IRQ_POSTING_CAP);
+
kvm_ops_update(ops);
for_each_online_cpu(cpu) {
@@ -9813,6 +9864,10 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_mmu_enabled)
kvm_caps.supported_vm_types |= BIT(KVM_X86_SW_PROTECTED_VM);
+ /* KVM always ignores guest PAT for shadow paging. */
+ if (!tdp_enabled)
+ kvm_caps.supported_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
+
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
kvm_caps.supported_xss = 0;
@@ -10021,13 +10076,16 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}
-int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
- unsigned long a0, unsigned long a1,
- unsigned long a2, unsigned long a3,
- int op_64_bit, int cpl,
+int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl,
int (*complete_hypercall)(struct kvm_vcpu *))
{
unsigned long ret;
+ unsigned long nr = kvm_rax_read(vcpu);
+ unsigned long a0 = kvm_rbx_read(vcpu);
+ unsigned long a1 = kvm_rcx_read(vcpu);
+ unsigned long a2 = kvm_rdx_read(vcpu);
+ unsigned long a3 = kvm_rsi_read(vcpu);
+ int op_64_bit = is_64_bit_hypercall(vcpu);
++vcpu->stat.hypercalls;
@@ -10130,9 +10188,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
if (kvm_hv_hypercall_enabled(vcpu))
return kvm_hv_hypercall(vcpu);
- return __kvm_emulate_hypercall(vcpu, rax, rbx, rcx, rdx, rsi,
- is_64_bit_hypercall(vcpu),
- kvm_x86_call(get_cpl)(vcpu),
+ return __kvm_emulate_hypercall(vcpu, kvm_x86_call(get_cpl)(vcpu),
complete_hypercall_exit);
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
@@ -10662,6 +10718,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
return;
bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
+ vcpu->arch.highest_stale_pending_ioapic_eoi = -1;
kvm_x86_call(sync_pir_to_irr)(vcpu);
@@ -10974,9 +11031,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
switch_fpu_return();
if (vcpu->arch.guest_fpu.xfd_err)
- wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
+ wrmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
- if (unlikely(vcpu->arch.switch_db_regs)) {
+ if (unlikely(vcpu->arch.switch_db_regs &&
+ !(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH))) {
set_debugreg(0, 7);
set_debugreg(vcpu->arch.eff_db[0], 0);
set_debugreg(vcpu->arch.eff_db[1], 1);
@@ -11028,6 +11086,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
*/
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
+ WARN_ON(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH);
kvm_x86_call(sync_dirty_debug_regs)(vcpu);
kvm_update_dr0123(vcpu);
kvm_update_dr7(vcpu);
@@ -11060,7 +11119,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_x86_call(handle_exit_irqoff)(vcpu);
if (vcpu->arch.guest_fpu.xfd_err)
- wrmsrl(MSR_IA32_XFD_ERR, 0);
+ wrmsrq(MSR_IA32_XFD_ERR, 0);
/*
* Consume any pending interrupts, including the possible source of
@@ -11098,7 +11157,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
/*
* Profile KVM exit RIPs:
*/
- if (unlikely(prof_on == KVM_PROFILING)) {
+ if (unlikely(prof_on == KVM_PROFILING &&
+ !vcpu->arch.guest_state_protected)) {
unsigned long rip = kvm_rip_read(vcpu);
profile_hit(KVM_PROFILING, (void *)rip);
}
@@ -11131,7 +11191,7 @@ static bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
!vcpu->arch.apf.halted);
}
-static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
if (!list_empty_careful(&vcpu->async_pf.done))
return true;
@@ -11140,9 +11200,6 @@ static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
kvm_apic_init_sipi_allowed(vcpu))
return true;
- if (vcpu->arch.pv.pv_unhalted)
- return true;
-
if (kvm_is_exception_pending(vcpu))
return true;
@@ -11180,10 +11237,12 @@ static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
return false;
}
+EXPORT_SYMBOL_GPL(kvm_vcpu_has_events);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
+ return kvm_vcpu_running(vcpu) || vcpu->arch.pv.pv_unhalted ||
+ kvm_vcpu_has_events(vcpu);
}
/* Called within kvm->srcu read side. */
@@ -11317,7 +11376,7 @@ static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
*/
++vcpu->stat.halt_exits;
if (lapic_in_kernel(vcpu)) {
- if (kvm_vcpu_has_events(vcpu))
+ if (kvm_vcpu_has_events(vcpu) || vcpu->arch.pv.pv_unhalted)
state = KVM_MP_STATE_RUNNABLE;
kvm_set_mp_state(vcpu, state);
return 1;
@@ -11492,7 +11551,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
struct kvm_queued_exception *ex = &vcpu->arch.exception;
struct kvm_run *kvm_run = vcpu->run;
- u32 sync_valid_fields;
+ u64 sync_valid_fields;
int r;
r = kvm_mmu_post_init_vm(vcpu->kvm);
@@ -11786,6 +11845,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
if (kvm_mpx_supported())
kvm_load_guest_fpu(vcpu);
+ kvm_vcpu_srcu_read_lock(vcpu);
+
r = kvm_apic_accept_events(vcpu);
if (r < 0)
goto out;
@@ -11799,6 +11860,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
mp_state->mp_state = vcpu->arch.mp_state;
out:
+ kvm_vcpu_srcu_read_unlock(vcpu);
+
if (kvm_mpx_supported())
kvm_put_guest_fpu(vcpu);
vcpu_put(vcpu);
@@ -12381,13 +12444,16 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
- int idx;
+ int idx, cpu;
kvm_clear_async_pf_completion_queue(vcpu);
kvm_mmu_unload(vcpu);
kvmclock_reset(vcpu);
+ for_each_possible_cpu(cpu)
+ cmpxchg(per_cpu_ptr(&last_vcpu, cpu), vcpu, NULL);
+
kvm_x86_call(vcpu_free)(vcpu);
kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
@@ -12687,6 +12753,7 @@ bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
}
+EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
{
@@ -12716,6 +12783,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* Decided by the vendor code for other VM types. */
kvm->arch.pre_fault_allowed =
type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM;
+ kvm->arch.disabled_quirks = kvm_caps.inapplicable_quirks & kvm_caps.supported_quirks;
ret = kvm_page_track_init(kvm);
if (ret)
@@ -12869,6 +12937,7 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm)
kvm_free_pit(kvm);
kvm_mmu_pre_destroy_vm(kvm);
+ static_call_cond(kvm_x86_vm_pre_destroy)(kvm);
}
void kvm_arch_destroy_vm(struct kvm *kvm)
@@ -13066,7 +13135,7 @@ static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
{
int nr_slots;
- if (!kvm_x86_ops.cpu_dirty_log_size)
+ if (!kvm->arch.cpu_dirty_log_size)
return;
nr_slots = atomic_read(&kvm->nr_memslots_dirty_logging);
@@ -13138,7 +13207,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
if (READ_ONCE(eager_page_split))
kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K);
- if (kvm_x86_ops.cpu_dirty_log_size) {
+ if (kvm->arch.cpu_dirty_log_size) {
kvm_mmu_slot_leaf_clear_dirty(kvm, new);
kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
} else {
@@ -13527,8 +13596,10 @@ static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm)
* due to toggling the "ignore PAT" bit. Zap all SPTEs when the first
* (or last) non-coherent device is (un)registered to so that new SPTEs
* with the correct "ignore guest PAT" setting are created.
+ *
+ * If KVM always honors guest PAT, however, there is nothing to do.
*/
- if (kvm_mmu_may_ignore_guest_pat())
+ if (kvm_check_has_quirk(kvm, KVM_X86_QUIRK_IGNORE_GUEST_PAT))
kvm_zap_gfn_range(kvm, gpa_to_gfn(0), gpa_to_gfn(~0ULL));
}
@@ -13552,25 +13623,27 @@ bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
}
EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
-bool kvm_arch_has_irq_bypass(void)
-{
- return enable_apicv && irq_remapping_cap(IRQ_POSTING_CAP);
-}
-
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
struct irq_bypass_producer *prod)
{
struct kvm_kernel_irqfd *irqfd =
container_of(cons, struct kvm_kernel_irqfd, consumer);
+ struct kvm *kvm = irqfd->kvm;
int ret;
- irqfd->producer = prod;
kvm_arch_start_assignment(irqfd->kvm);
+
+ spin_lock_irq(&kvm->irqfds.lock);
+ irqfd->producer = prod;
+
ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
prod->irq, irqfd->gsi, 1);
if (ret)
kvm_arch_end_assignment(irqfd->kvm);
+ spin_unlock_irq(&kvm->irqfds.lock);
+
+
return ret;
}
@@ -13580,9 +13653,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
int ret;
struct kvm_kernel_irqfd *irqfd =
container_of(cons, struct kvm_kernel_irqfd, consumer);
+ struct kvm *kvm = irqfd->kvm;
WARN_ON(irqfd->producer != prod);
- irqfd->producer = NULL;
/*
* When producer of consumer is unregistered, we change back to
@@ -13590,12 +13663,18 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
* when the irq is masked/disabled or the consumer side (KVM
* int this case doesn't want to receive the interrupts.
*/
+ spin_lock_irq(&kvm->irqfds.lock);
+ irqfd->producer = NULL;
+
ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
prod->irq, irqfd->gsi, 0);
if (ret)
printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
" fails: %d\n", irqfd->consumer.token, ret);
+ spin_unlock_irq(&kvm->irqfds.lock);
+
+
kvm_arch_end_assignment(irqfd->kvm);
}
@@ -13608,7 +13687,8 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
struct kvm_kernel_irq_routing_entry *new)
{
- if (new->type != KVM_IRQ_ROUTING_MSI)
+ if (old->type != KVM_IRQ_ROUTING_MSI ||
+ new->type != KVM_IRQ_ROUTING_MSI)
return true;
return !!memcmp(&old->msi, &new->msi, sizeof(new->msi));
@@ -13652,12 +13732,12 @@ int kvm_spec_ctrl_test_value(u64 value)
local_irq_save(flags);
- if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value))
+ if (rdmsrq_safe(MSR_IA32_SPEC_CTRL, &saved_value))
ret = 1;
- else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value))
+ else if (wrmsrq_safe(MSR_IA32_SPEC_CTRL, value))
ret = 1;
else
- wrmsrl(MSR_IA32_SPEC_CTRL, saved_value);
+ wrmsrq(MSR_IA32_SPEC_CTRL, saved_value);
local_irq_restore(flags);
@@ -13996,6 +14076,7 @@ EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 9dc32a409076..832f0faf4779 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -10,6 +10,8 @@
#include "kvm_emulate.h"
#include "cpuid.h"
+#define KVM_MAX_MCE_BANKS 32
+
struct kvm_caps {
/* control of guest tsc rate supported? */
bool has_tsc_control;
@@ -32,6 +34,9 @@ struct kvm_caps {
u64 supported_xcr0;
u64 supported_xss;
u64 supported_perf_cap;
+
+ u64 supported_quirks;
+ u64 inapplicable_quirks;
};
struct kvm_host_values {
@@ -116,6 +121,24 @@ static inline void kvm_leave_nested(struct kvm_vcpu *vcpu)
kvm_x86_ops.nested_ops->leave_nested(vcpu);
}
+/*
+ * If IBRS is advertised to the vCPU, KVM must flush the indirect branch
+ * predictors when transitioning from L2 to L1, as L1 expects hardware (KVM in
+ * this case) to provide separate predictor modes. Bare metal isolates the host
+ * from the guest, but doesn't isolate different guests from one another (in
+ * this case L1 and L2). The exception is if bare metal supports same mode IBRS,
+ * which offers protection within the same mode, and hence protects L1 from L2.
+ */
+static inline void kvm_nested_vmexit_handle_ibrs(struct kvm_vcpu *vcpu)
+{
+ if (cpu_feature_enabled(X86_FEATURE_AMD_IBRS_SAME_MODE))
+ return;
+
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
+ guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBRS))
+ indirect_branch_prediction_barrier();
+}
+
static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
{
return vcpu->arch.last_vmentry_cpu != -1;
@@ -629,25 +652,17 @@ static inline bool user_exit_on_hypercall(struct kvm *kvm, unsigned long hc_nr)
return kvm->arch.hypercall_exit_enabled & BIT(hc_nr);
}
-int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
- unsigned long a0, unsigned long a1,
- unsigned long a2, unsigned long a3,
- int op_64_bit, int cpl,
+int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl,
int (*complete_hypercall)(struct kvm_vcpu *));
-#define __kvm_emulate_hypercall(_vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl, complete_hypercall) \
-({ \
- int __ret; \
- \
- __ret = ____kvm_emulate_hypercall(_vcpu, \
- kvm_##nr##_read(_vcpu), kvm_##a0##_read(_vcpu), \
- kvm_##a1##_read(_vcpu), kvm_##a2##_read(_vcpu), \
- kvm_##a3##_read(_vcpu), op_64_bit, cpl, \
- complete_hypercall); \
- \
- if (__ret > 0) \
- __ret = complete_hypercall(_vcpu); \
- __ret; \
+#define __kvm_emulate_hypercall(_vcpu, cpl, complete_hypercall) \
+({ \
+ int __ret; \
+ __ret = ____kvm_emulate_hypercall(_vcpu, cpl, complete_hypercall); \
+ \
+ if (__ret > 0) \
+ __ret = complete_hypercall(_vcpu); \
+ __ret; \
})
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 1c50352eb49f..4fa5c4e1ba8a 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -3,6 +3,8 @@
# Makefile for x86 specific library files.
#
+obj-y += crypto/
+
# Produces uninteresting flaky coverage.
KCOV_INSTRUMENT_delay.o := n
@@ -39,14 +41,14 @@ lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_MITIGATION_RETPOLINE) += retpoline.o
obj-$(CONFIG_CRC32_ARCH) += crc32-x86.o
-crc32-x86-y := crc32-glue.o crc32-pclmul.o
+crc32-x86-y := crc32.o crc32-pclmul.o
crc32-x86-$(CONFIG_64BIT) += crc32c-3way.o
obj-$(CONFIG_CRC64_ARCH) += crc64-x86.o
-crc64-x86-y := crc64-glue.o crc64-pclmul.o
+crc64-x86-y := crc64.o crc64-pclmul.o
obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-x86.o
-crc-t10dif-x86-y := crc-t10dif-glue.o crc16-msb-pclmul.o
+crc-t10dif-x86-y := crc-t10dif.o crc16-msb-pclmul.o
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
obj-y += iomem.o
diff --git a/arch/x86/lib/crc-t10dif-glue.c b/arch/x86/lib/crc-t10dif.c
index f89c335cde3c..db7ce59c31ac 100644
--- a/arch/x86/lib/crc-t10dif-glue.c
+++ b/arch/x86/lib/crc-t10dif.c
@@ -9,7 +9,7 @@
#include <linux/module.h>
#include "crc-pclmul-template.h"
-static DEFINE_STATIC_KEY_FALSE(have_pclmulqdq);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pclmulqdq);
DECLARE_CRC_PCLMUL_FUNCS(crc16_msb, u16);
@@ -29,7 +29,7 @@ static int __init crc_t10dif_x86_init(void)
}
return 0;
}
-arch_initcall(crc_t10dif_x86_init);
+subsys_initcall(crc_t10dif_x86_init);
static void __exit crc_t10dif_x86_exit(void)
{
diff --git a/arch/x86/lib/crc32-glue.c b/arch/x86/lib/crc32.c
index e3f93b17ac3f..d09343e2cea9 100644
--- a/arch/x86/lib/crc32-glue.c
+++ b/arch/x86/lib/crc32.c
@@ -11,8 +11,8 @@
#include <linux/module.h>
#include "crc-pclmul-template.h"
-static DEFINE_STATIC_KEY_FALSE(have_crc32);
-static DEFINE_STATIC_KEY_FALSE(have_pclmulqdq);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pclmulqdq);
DECLARE_CRC_PCLMUL_FUNCS(crc32_lsb, u32);
@@ -88,7 +88,7 @@ static int __init crc32_x86_init(void)
}
return 0;
}
-arch_initcall(crc32_x86_init);
+subsys_initcall(crc32_x86_init);
static void __exit crc32_x86_exit(void)
{
diff --git a/arch/x86/lib/crc64-glue.c b/arch/x86/lib/crc64.c
index b0e1b719ecbf..351a09f5813e 100644
--- a/arch/x86/lib/crc64-glue.c
+++ b/arch/x86/lib/crc64.c
@@ -9,7 +9,7 @@
#include <linux/module.h>
#include "crc-pclmul-template.h"
-static DEFINE_STATIC_KEY_FALSE(have_pclmulqdq);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pclmulqdq);
DECLARE_CRC_PCLMUL_FUNCS(crc64_msb, u64);
DECLARE_CRC_PCLMUL_FUNCS(crc64_lsb, u64);
@@ -39,7 +39,7 @@ static int __init crc64_x86_init(void)
}
return 0;
}
-arch_initcall(crc64_x86_init);
+subsys_initcall(crc64_x86_init);
static void __exit crc64_x86_exit(void)
{
diff --git a/arch/x86/lib/crypto/.gitignore b/arch/x86/lib/crypto/.gitignore
new file mode 100644
index 000000000000..580c839bb177
--- /dev/null
+++ b/arch/x86/lib/crypto/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+poly1305-x86_64-cryptogams.S
diff --git a/arch/x86/lib/crypto/Kconfig b/arch/x86/lib/crypto/Kconfig
new file mode 100644
index 000000000000..5e94cdee492c
--- /dev/null
+++ b/arch/x86/lib/crypto/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CRYPTO_BLAKE2S_X86
+ bool "Hash functions: BLAKE2s (SSSE3/AVX-512)"
+ depends on 64BIT
+ select CRYPTO_LIB_BLAKE2S_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+ help
+ BLAKE2s cryptographic hash function (RFC 7693)
+
+ Architecture: x86_64 using:
+ - SSSE3 (Supplemental SSE3)
+ - AVX-512 (Advanced Vector Extensions-512)
+
+config CRYPTO_CHACHA20_X86_64
+ tristate
+ depends on 64BIT
+ default CRYPTO_LIB_CHACHA
+ select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+
+config CRYPTO_POLY1305_X86_64
+ tristate
+ depends on 64BIT
+ default CRYPTO_LIB_POLY1305
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
+
+config CRYPTO_SHA256_X86_64
+ tristate
+ depends on 64BIT
+ default CRYPTO_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
+ select CRYPTO_LIB_SHA256_GENERIC
diff --git a/arch/x86/lib/crypto/Makefile b/arch/x86/lib/crypto/Makefile
new file mode 100644
index 000000000000..abceca3d31c0
--- /dev/null
+++ b/arch/x86/lib/crypto/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += libblake2s-x86_64.o
+libblake2s-x86_64-y := blake2s-core.o blake2s-glue.o
+
+obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha-x86_64.o
+chacha-x86_64-y := chacha-avx2-x86_64.o chacha-ssse3-x86_64.o chacha-avx512vl-x86_64.o chacha_glue.o
+
+obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o
+poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o
+targets += poly1305-x86_64-cryptogams.S
+
+obj-$(CONFIG_CRYPTO_SHA256_X86_64) += sha256-x86_64.o
+sha256-x86_64-y := sha256.o sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256-ni-asm.o
+
+quiet_cmd_perlasm = PERLASM $@
+ cmd_perlasm = $(PERL) $< > $@
+
+$(obj)/%.S: $(src)/%.pl FORCE
+ $(call if_changed,perlasm)
diff --git a/arch/x86/crypto/blake2s-core.S b/arch/x86/lib/crypto/blake2s-core.S
index b50b35ff1fdb..ac1c845445a4 100644
--- a/arch/x86/crypto/blake2s-core.S
+++ b/arch/x86/lib/crypto/blake2s-core.S
@@ -29,7 +29,6 @@ SIGMA:
.byte 13, 7, 12, 3, 11, 14, 1, 9, 2, 5, 15, 8, 10, 0, 4, 6
.byte 6, 14, 11, 0, 15, 9, 3, 8, 10, 12, 13, 1, 5, 2, 7, 4
.byte 10, 8, 7, 1, 2, 4, 6, 5, 13, 15, 9, 3, 0, 11, 14, 12
-#ifdef CONFIG_AS_AVX512
.section .rodata.cst64.BLAKE2S_SIGMA2, "aM", @progbits, 640
.align 64
SIGMA2:
@@ -43,7 +42,6 @@ SIGMA2:
.long 6, 13, 0, 14, 12, 2, 1, 11, 15, 4, 5, 8, 7, 9, 3, 10
.long 15, 5, 4, 13, 10, 7, 3, 11, 12, 2, 0, 6, 9, 8, 1, 14
.long 8, 7, 14, 11, 13, 15, 0, 12, 10, 4, 5, 6, 3, 2, 1, 9
-#endif /* CONFIG_AS_AVX512 */
.text
SYM_FUNC_START(blake2s_compress_ssse3)
@@ -174,7 +172,6 @@ SYM_FUNC_START(blake2s_compress_ssse3)
RET
SYM_FUNC_END(blake2s_compress_ssse3)
-#ifdef CONFIG_AS_AVX512
SYM_FUNC_START(blake2s_compress_avx512)
vmovdqu (%rdi),%xmm0
vmovdqu 0x10(%rdi),%xmm1
@@ -253,4 +250,3 @@ SYM_FUNC_START(blake2s_compress_avx512)
vzeroupper
RET
SYM_FUNC_END(blake2s_compress_avx512)
-#endif /* CONFIG_AS_AVX512 */
diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/lib/crypto/blake2s-glue.c
index 0313f9673f56..adc296cd17c9 100644
--- a/arch/x86/crypto/blake2s-glue.c
+++ b/arch/x86/lib/crypto/blake2s-glue.c
@@ -3,17 +3,15 @@
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
-#include <crypto/internal/blake2s.h>
-
-#include <linux/types.h>
-#include <linux/jump_label.h>
-#include <linux/kernel.h>
-#include <linux/sizes.h>
-
#include <asm/cpufeature.h>
#include <asm/fpu/api.h>
#include <asm/processor.h>
#include <asm/simd.h>
+#include <crypto/internal/blake2s.h>
+#include <linux/init.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
asmlinkage void blake2s_compress_ssse3(struct blake2s_state *state,
const u8 *block, const size_t nblocks,
@@ -41,8 +39,7 @@ void blake2s_compress(struct blake2s_state *state, const u8 *block,
SZ_4K / BLAKE2S_BLOCK_SIZE);
kernel_fpu_begin();
- if (IS_ENABLED(CONFIG_AS_AVX512) &&
- static_branch_likely(&blake2s_use_avx512))
+ if (static_branch_likely(&blake2s_use_avx512))
blake2s_compress_avx512(state, block, blocks, inc);
else
blake2s_compress_ssse3(state, block, blocks, inc);
@@ -59,8 +56,7 @@ static int __init blake2s_mod_init(void)
if (boot_cpu_has(X86_FEATURE_SSSE3))
static_branch_enable(&blake2s_use_ssse3);
- if (IS_ENABLED(CONFIG_AS_AVX512) &&
- boot_cpu_has(X86_FEATURE_AVX) &&
+ if (boot_cpu_has(X86_FEATURE_AVX) &&
boot_cpu_has(X86_FEATURE_AVX2) &&
boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512VL) &&
diff --git a/arch/x86/crypto/chacha-avx2-x86_64.S b/arch/x86/lib/crypto/chacha-avx2-x86_64.S
index f3d8fc018249..f3d8fc018249 100644
--- a/arch/x86/crypto/chacha-avx2-x86_64.S
+++ b/arch/x86/lib/crypto/chacha-avx2-x86_64.S
diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/lib/crypto/chacha-avx512vl-x86_64.S
index 259383e1ad44..259383e1ad44 100644
--- a/arch/x86/crypto/chacha-avx512vl-x86_64.S
+++ b/arch/x86/lib/crypto/chacha-avx512vl-x86_64.S
diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/lib/crypto/chacha-ssse3-x86_64.S
index 7111949cd5b9..7111949cd5b9 100644
--- a/arch/x86/crypto/chacha-ssse3-x86_64.S
+++ b/arch/x86/lib/crypto/chacha-ssse3-x86_64.S
diff --git a/arch/x86/lib/crypto/chacha_glue.c b/arch/x86/lib/crypto/chacha_glue.c
new file mode 100644
index 000000000000..10b2c945f541
--- /dev/null
+++ b/arch/x86/lib/crypto/chacha_glue.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ChaCha and HChaCha functions (x86_64 optimized)
+ *
+ * Copyright (C) 2015 Martin Willi
+ */
+
+#include <asm/simd.h>
+#include <crypto/chacha.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+
+asmlinkage void chacha_block_xor_ssse3(const struct chacha_state *state,
+ u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+asmlinkage void chacha_4block_xor_ssse3(const struct chacha_state *state,
+ u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+asmlinkage void hchacha_block_ssse3(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds);
+
+asmlinkage void chacha_2block_xor_avx2(const struct chacha_state *state,
+ u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+asmlinkage void chacha_4block_xor_avx2(const struct chacha_state *state,
+ u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+asmlinkage void chacha_8block_xor_avx2(const struct chacha_state *state,
+ u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+
+asmlinkage void chacha_2block_xor_avx512vl(const struct chacha_state *state,
+ u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+asmlinkage void chacha_4block_xor_avx512vl(const struct chacha_state *state,
+ u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+asmlinkage void chacha_8block_xor_avx512vl(const struct chacha_state *state,
+ u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_simd);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx2);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx512vl);
+
+static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks)
+{
+ len = min(len, maxblocks * CHACHA_BLOCK_SIZE);
+ return round_up(len, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE;
+}
+
+static void chacha_dosimd(struct chacha_state *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ if (static_branch_likely(&chacha_use_avx512vl)) {
+ while (bytes >= CHACHA_BLOCK_SIZE * 8) {
+ chacha_8block_xor_avx512vl(state, dst, src, bytes,
+ nrounds);
+ bytes -= CHACHA_BLOCK_SIZE * 8;
+ src += CHACHA_BLOCK_SIZE * 8;
+ dst += CHACHA_BLOCK_SIZE * 8;
+ state->x[12] += 8;
+ }
+ if (bytes > CHACHA_BLOCK_SIZE * 4) {
+ chacha_8block_xor_avx512vl(state, dst, src, bytes,
+ nrounds);
+ state->x[12] += chacha_advance(bytes, 8);
+ return;
+ }
+ if (bytes > CHACHA_BLOCK_SIZE * 2) {
+ chacha_4block_xor_avx512vl(state, dst, src, bytes,
+ nrounds);
+ state->x[12] += chacha_advance(bytes, 4);
+ return;
+ }
+ if (bytes) {
+ chacha_2block_xor_avx512vl(state, dst, src, bytes,
+ nrounds);
+ state->x[12] += chacha_advance(bytes, 2);
+ return;
+ }
+ }
+
+ if (static_branch_likely(&chacha_use_avx2)) {
+ while (bytes >= CHACHA_BLOCK_SIZE * 8) {
+ chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
+ bytes -= CHACHA_BLOCK_SIZE * 8;
+ src += CHACHA_BLOCK_SIZE * 8;
+ dst += CHACHA_BLOCK_SIZE * 8;
+ state->x[12] += 8;
+ }
+ if (bytes > CHACHA_BLOCK_SIZE * 4) {
+ chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
+ state->x[12] += chacha_advance(bytes, 8);
+ return;
+ }
+ if (bytes > CHACHA_BLOCK_SIZE * 2) {
+ chacha_4block_xor_avx2(state, dst, src, bytes, nrounds);
+ state->x[12] += chacha_advance(bytes, 4);
+ return;
+ }
+ if (bytes > CHACHA_BLOCK_SIZE) {
+ chacha_2block_xor_avx2(state, dst, src, bytes, nrounds);
+ state->x[12] += chacha_advance(bytes, 2);
+ return;
+ }
+ }
+
+ while (bytes >= CHACHA_BLOCK_SIZE * 4) {
+ chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
+ bytes -= CHACHA_BLOCK_SIZE * 4;
+ src += CHACHA_BLOCK_SIZE * 4;
+ dst += CHACHA_BLOCK_SIZE * 4;
+ state->x[12] += 4;
+ }
+ if (bytes > CHACHA_BLOCK_SIZE) {
+ chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
+ state->x[12] += chacha_advance(bytes, 4);
+ return;
+ }
+ if (bytes) {
+ chacha_block_xor_ssse3(state, dst, src, bytes, nrounds);
+ state->x[12]++;
+ }
+}
+
+void hchacha_block_arch(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds)
+{
+ if (!static_branch_likely(&chacha_use_simd)) {
+ hchacha_block_generic(state, out, nrounds);
+ } else {
+ kernel_fpu_begin();
+ hchacha_block_ssse3(state, out, nrounds);
+ kernel_fpu_end();
+ }
+}
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ if (!static_branch_likely(&chacha_use_simd) ||
+ bytes <= CHACHA_BLOCK_SIZE)
+ return chacha_crypt_generic(state, dst, src, bytes, nrounds);
+
+ do {
+ unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+ kernel_fpu_begin();
+ chacha_dosimd(state, dst, src, todo, nrounds);
+ kernel_fpu_end();
+
+ bytes -= todo;
+ src += todo;
+ dst += todo;
+ } while (bytes);
+}
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+bool chacha_is_arch_optimized(void)
+{
+ return static_key_enabled(&chacha_use_simd);
+}
+EXPORT_SYMBOL(chacha_is_arch_optimized);
+
+static int __init chacha_simd_mod_init(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_SSSE3))
+ return 0;
+
+ static_branch_enable(&chacha_use_simd);
+
+ if (boot_cpu_has(X86_FEATURE_AVX) &&
+ boot_cpu_has(X86_FEATURE_AVX2) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+ static_branch_enable(&chacha_use_avx2);
+
+ if (boot_cpu_has(X86_FEATURE_AVX512VL) &&
+ boot_cpu_has(X86_FEATURE_AVX512BW)) /* kmovq */
+ static_branch_enable(&chacha_use_avx512vl);
+ }
+ return 0;
+}
+subsys_initcall(chacha_simd_mod_init);
+
+static void __exit chacha_simd_mod_exit(void)
+{
+}
+module_exit(chacha_simd_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("ChaCha and HChaCha functions (x86_64 optimized)");
diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/lib/crypto/poly1305-x86_64-cryptogams.pl
index b9abcd79c1f4..501827254fed 100644
--- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
+++ b/arch/x86/lib/crypto/poly1305-x86_64-cryptogams.pl
@@ -118,6 +118,19 @@ sub declare_function() {
}
}
+sub declare_typed_function() {
+ my ($name, $align, $nargs) = @_;
+ if($kernel) {
+ $code .= "SYM_TYPED_FUNC_START($name)\n";
+ $code .= ".L$name:\n";
+ } else {
+ $code .= ".globl $name\n";
+ $code .= ".type $name,\@function,$nargs\n";
+ $code .= ".align $align\n";
+ $code .= "$name:\n";
+ }
+}
+
sub end_function() {
my ($name) = @_;
if($kernel) {
@@ -128,7 +141,7 @@ sub end_function() {
}
$code.=<<___ if $kernel;
-#include <linux/linkage.h>
+#include <linux/cfi_types.h>
___
if ($avx) {
@@ -236,14 +249,14 @@ ___
$code.=<<___ if (!$kernel);
.extern OPENSSL_ia32cap_P
-.globl poly1305_init_x86_64
-.hidden poly1305_init_x86_64
+.globl poly1305_block_init_arch
+.hidden poly1305_block_init_arch
.globl poly1305_blocks_x86_64
.hidden poly1305_blocks_x86_64
.globl poly1305_emit_x86_64
.hidden poly1305_emit_x86_64
___
-&declare_function("poly1305_init_x86_64", 32, 3);
+&declare_typed_function("poly1305_block_init_arch", 32, 3);
$code.=<<___;
xor %eax,%eax
mov %rax,0($ctx) # initialize hash value
@@ -298,7 +311,7 @@ $code.=<<___;
.Lno_key:
RET
___
-&end_function("poly1305_init_x86_64");
+&end_function("poly1305_block_init_arch");
&declare_function("poly1305_blocks_x86_64", 32, 4);
$code.=<<___;
@@ -2811,18 +2824,10 @@ if ($avx>2) {
# reason stack layout is kept identical to poly1305_blocks_avx2. If not
# for this tail, we wouldn't have to even allocate stack frame...
-if($kernel) {
- $code .= "#ifdef CONFIG_AS_AVX512\n";
-}
-
&declare_function("poly1305_blocks_avx512", 32, 4);
poly1305_blocks_avxN(1);
&end_function("poly1305_blocks_avx512");
-if ($kernel) {
- $code .= "#endif\n";
-}
-
if (!$kernel && $avx>3) {
########################################################################
# VPMADD52 version using 2^44 radix.
@@ -4113,9 +4118,9 @@ avx_handler:
.section .pdata
.align 4
- .rva .LSEH_begin_poly1305_init_x86_64
- .rva .LSEH_end_poly1305_init_x86_64
- .rva .LSEH_info_poly1305_init_x86_64
+ .rva .LSEH_begin_poly1305_block_init_arch
+ .rva .LSEH_end_poly1305_block_init_arch
+ .rva .LSEH_info_poly1305_block_init_arch
.rva .LSEH_begin_poly1305_blocks_x86_64
.rva .LSEH_end_poly1305_blocks_x86_64
@@ -4163,10 +4168,10 @@ ___
$code.=<<___;
.section .xdata
.align 8
-.LSEH_info_poly1305_init_x86_64:
+.LSEH_info_poly1305_block_init_arch:
.byte 9,0,0,0
.rva se_handler
- .rva .LSEH_begin_poly1305_init_x86_64,.LSEH_begin_poly1305_init_x86_64
+ .rva .LSEH_begin_poly1305_block_init_arch,.LSEH_begin_poly1305_block_init_arch
.LSEH_info_poly1305_blocks_x86_64:
.byte 9,0,0,0
diff --git a/arch/x86/lib/crypto/poly1305_glue.c b/arch/x86/lib/crypto/poly1305_glue.c
new file mode 100644
index 000000000000..b7e78a583e07
--- /dev/null
+++ b/arch/x86/lib/crypto/poly1305_glue.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <asm/cpu_device_id.h>
+#include <asm/fpu/api.h>
+#include <crypto/internal/poly1305.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/unaligned.h>
+
+struct poly1305_arch_internal {
+ union {
+ struct {
+ u32 h[5];
+ u32 is_base2_26;
+ };
+ u64 hs[3];
+ };
+ u64 r[2];
+ u64 pad;
+ struct { u32 r2, r1, r4, r3; } rn[9];
+};
+
+asmlinkage void poly1305_block_init_arch(
+ struct poly1305_block_state *state,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+EXPORT_SYMBOL_GPL(poly1305_block_init_arch);
+asmlinkage void poly1305_blocks_x86_64(struct poly1305_arch_internal *ctx,
+ const u8 *inp,
+ const size_t len, const u32 padbit);
+asmlinkage void poly1305_emit_x86_64(const struct poly1305_state *ctx,
+ u8 mac[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4]);
+asmlinkage void poly1305_emit_avx(const struct poly1305_state *ctx,
+ u8 mac[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4]);
+asmlinkage void poly1305_blocks_avx(struct poly1305_arch_internal *ctx,
+ const u8 *inp, const size_t len,
+ const u32 padbit);
+asmlinkage void poly1305_blocks_avx2(struct poly1305_arch_internal *ctx,
+ const u8 *inp, const size_t len,
+ const u32 padbit);
+asmlinkage void poly1305_blocks_avx512(struct poly1305_arch_internal *ctx,
+ const u8 *inp,
+ const size_t len, const u32 padbit);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx512);
+
+void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *inp,
+ unsigned int len, u32 padbit)
+{
+ struct poly1305_arch_internal *ctx =
+ container_of(&state->h.h, struct poly1305_arch_internal, h);
+
+ /* SIMD disables preemption, so relax after processing each page. */
+ BUILD_BUG_ON(SZ_4K < POLY1305_BLOCK_SIZE ||
+ SZ_4K % POLY1305_BLOCK_SIZE);
+
+ if (!static_branch_likely(&poly1305_use_avx)) {
+ poly1305_blocks_x86_64(ctx, inp, len, padbit);
+ return;
+ }
+
+ do {
+ const unsigned int bytes = min(len, SZ_4K);
+
+ kernel_fpu_begin();
+ if (static_branch_likely(&poly1305_use_avx512))
+ poly1305_blocks_avx512(ctx, inp, bytes, padbit);
+ else if (static_branch_likely(&poly1305_use_avx2))
+ poly1305_blocks_avx2(ctx, inp, bytes, padbit);
+ else
+ poly1305_blocks_avx(ctx, inp, bytes, padbit);
+ kernel_fpu_end();
+
+ len -= bytes;
+ inp += bytes;
+ } while (len);
+}
+EXPORT_SYMBOL_GPL(poly1305_blocks_arch);
+
+void poly1305_emit_arch(const struct poly1305_state *ctx,
+ u8 mac[POLY1305_DIGEST_SIZE], const u32 nonce[4])
+{
+ if (!static_branch_likely(&poly1305_use_avx))
+ poly1305_emit_x86_64(ctx, mac, nonce);
+ else
+ poly1305_emit_avx(ctx, mac, nonce);
+}
+EXPORT_SYMBOL_GPL(poly1305_emit_arch);
+
+bool poly1305_is_arch_optimized(void)
+{
+ return static_key_enabled(&poly1305_use_avx);
+}
+EXPORT_SYMBOL(poly1305_is_arch_optimized);
+
+static int __init poly1305_simd_mod_init(void)
+{
+ if (boot_cpu_has(X86_FEATURE_AVX) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
+ static_branch_enable(&poly1305_use_avx);
+ if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
+ static_branch_enable(&poly1305_use_avx2);
+ if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) &&
+ boot_cpu_has(X86_FEATURE_AVX512F) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL) &&
+ /* Skylake downclocks unacceptably much when using zmm, but later generations are fast. */
+ boot_cpu_data.x86_vfm != INTEL_SKYLAKE_X)
+ static_branch_enable(&poly1305_use_avx512);
+ return 0;
+}
+subsys_initcall(poly1305_simd_mod_init);
+
+static void __exit poly1305_simd_mod_exit(void)
+{
+}
+module_exit(poly1305_simd_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
+MODULE_DESCRIPTION("Poly1305 authenticator");
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/lib/crypto/sha256-avx-asm.S
index 53de72bdd851..0d7b2c3e45d9 100644
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/lib/crypto/sha256-avx-asm.S
@@ -48,7 +48,7 @@
########################################################################
#include <linux/linkage.h>
-#include <linux/cfi_types.h>
+#include <linux/objtool.h>
## assume buffers not aligned
#define VMOVDQ vmovdqu
@@ -341,13 +341,13 @@ a = TMP_
.endm
########################################################################
-## void sha256_transform_avx(state sha256_state *state, const u8 *data, int blocks)
-## arg 1 : pointer to state
-## arg 2 : pointer to input data
-## arg 3 : Num blocks
+## void sha256_transform_avx(u32 state[SHA256_STATE_WORDS],
+## const u8 *data, size_t nblocks);
########################################################################
.text
-SYM_TYPED_FUNC_START(sha256_transform_avx)
+SYM_FUNC_START(sha256_transform_avx)
+ ANNOTATE_NOENDBR # since this is called only via static_call
+
pushq %rbx
pushq %r12
pushq %r13
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/lib/crypto/sha256-avx2-asm.S
index 0bbec1c75cd0..25d3380321ec 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/lib/crypto/sha256-avx2-asm.S
@@ -49,7 +49,7 @@
########################################################################
#include <linux/linkage.h>
-#include <linux/cfi_types.h>
+#include <linux/objtool.h>
## assume buffers not aligned
#define VMOVDQ vmovdqu
@@ -518,13 +518,13 @@ STACK_SIZE = _CTX + _CTX_SIZE
.endm
########################################################################
-## void sha256_transform_rorx(struct sha256_state *state, const u8 *data, int blocks)
-## arg 1 : pointer to state
-## arg 2 : pointer to input data
-## arg 3 : Num blocks
+## void sha256_transform_rorx(u32 state[SHA256_STATE_WORDS],
+## const u8 *data, size_t nblocks);
########################################################################
.text
-SYM_TYPED_FUNC_START(sha256_transform_rorx)
+SYM_FUNC_START(sha256_transform_rorx)
+ ANNOTATE_NOENDBR # since this is called only via static_call
+
pushq %rbx
pushq %r12
pushq %r13
diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/lib/crypto/sha256-ni-asm.S
index d515a55a3bc1..d3548206cf3d 100644
--- a/arch/x86/crypto/sha256_ni_asm.S
+++ b/arch/x86/lib/crypto/sha256-ni-asm.S
@@ -54,9 +54,9 @@
*/
#include <linux/linkage.h>
-#include <linux/cfi_types.h>
+#include <linux/objtool.h>
-#define DIGEST_PTR %rdi /* 1st arg */
+#define STATE_PTR %rdi /* 1st arg */
#define DATA_PTR %rsi /* 2nd arg */
#define NUM_BLKS %rdx /* 3rd arg */
@@ -98,24 +98,20 @@
.endm
/*
- * Intel SHA Extensions optimized implementation of a SHA-256 update function
+ * Intel SHA Extensions optimized implementation of a SHA-256 block function
*
- * The function takes a pointer to the current hash values, a pointer to the
- * input data, and a number of 64 byte blocks to process. Once all blocks have
- * been processed, the digest pointer is updated with the resulting hash value.
- * The function only processes complete blocks, there is no functionality to
- * store partial blocks. All message padding and hash value initialization must
- * be done outside the update function.
+ * This function takes a pointer to the current SHA-256 state, a pointer to the
+ * input data, and the number of 64-byte blocks to process. Once all blocks
+ * have been processed, the state is updated with the new state. This function
+ * only processes complete blocks. State initialization, buffering of partial
+ * blocks, and digest finalization is expected to be handled elsewhere.
*
- * void sha256_ni_transform(uint32_t *digest, const void *data,
- uint32_t numBlocks);
- * digest : pointer to digest
- * data: pointer to input data
- * numBlocks: Number of blocks to process
+ * void sha256_ni_transform(u32 state[SHA256_STATE_WORDS],
+ * const u8 *data, size_t nblocks);
*/
-
.text
-SYM_TYPED_FUNC_START(sha256_ni_transform)
+SYM_FUNC_START(sha256_ni_transform)
+ ANNOTATE_NOENDBR # since this is called only via static_call
shl $6, NUM_BLKS /* convert to bytes */
jz .Ldone_hash
@@ -126,8 +122,8 @@ SYM_TYPED_FUNC_START(sha256_ni_transform)
* Need to reorder these appropriately
* DCBA, HGFE -> ABEF, CDGH
*/
- movdqu 0*16(DIGEST_PTR), STATE0 /* DCBA */
- movdqu 1*16(DIGEST_PTR), STATE1 /* HGFE */
+ movdqu 0*16(STATE_PTR), STATE0 /* DCBA */
+ movdqu 1*16(STATE_PTR), STATE1 /* HGFE */
movdqa STATE0, TMP
punpcklqdq STATE1, STATE0 /* FEBA */
@@ -166,8 +162,8 @@ SYM_TYPED_FUNC_START(sha256_ni_transform)
pshufd $0xB1, STATE0, STATE0 /* HGFE */
pshufd $0x1B, STATE1, STATE1 /* DCBA */
- movdqu STATE1, 0*16(DIGEST_PTR)
- movdqu STATE0, 1*16(DIGEST_PTR)
+ movdqu STATE1, 0*16(STATE_PTR)
+ movdqu STATE0, 1*16(STATE_PTR)
.Ldone_hash:
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/lib/crypto/sha256-ssse3-asm.S
index 93264ee44543..7f24a4cdcb25 100644
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/lib/crypto/sha256-ssse3-asm.S
@@ -47,7 +47,7 @@
########################################################################
#include <linux/linkage.h>
-#include <linux/cfi_types.h>
+#include <linux/objtool.h>
## assume buffers not aligned
#define MOVDQ movdqu
@@ -348,15 +348,13 @@ a = TMP_
.endm
########################################################################
-## void sha256_transform_ssse3(struct sha256_state *state, const u8 *data,
-## int blocks);
-## arg 1 : pointer to state
-## (struct sha256_state is assumed to begin with u32 state[8])
-## arg 2 : pointer to input data
-## arg 3 : Num blocks
+## void sha256_transform_ssse3(u32 state[SHA256_STATE_WORDS],
+## const u8 *data, size_t nblocks);
########################################################################
.text
-SYM_TYPED_FUNC_START(sha256_transform_ssse3)
+SYM_FUNC_START(sha256_transform_ssse3)
+ ANNOTATE_NOENDBR # since this is called only via static_call
+
pushq %rbx
pushq %r12
pushq %r13
diff --git a/arch/x86/lib/crypto/sha256.c b/arch/x86/lib/crypto/sha256.c
new file mode 100644
index 000000000000..80380f8fdcee
--- /dev/null
+++ b/arch/x86/lib/crypto/sha256.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SHA-256 optimized for x86_64
+ *
+ * Copyright 2025 Google LLC
+ */
+#include <asm/fpu/api.h>
+#include <crypto/internal/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/static_call.h>
+
+asmlinkage void sha256_transform_ssse3(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+asmlinkage void sha256_transform_avx(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+asmlinkage void sha256_transform_rorx(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+asmlinkage void sha256_ni_transform(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha256_x86);
+
+DEFINE_STATIC_CALL(sha256_blocks_x86, sha256_transform_ssse3);
+
+void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks)
+{
+ if (static_branch_likely(&have_sha256_x86)) {
+ kernel_fpu_begin();
+ static_call(sha256_blocks_x86)(state, data, nblocks);
+ kernel_fpu_end();
+ } else {
+ sha256_blocks_generic(state, data, nblocks);
+ }
+}
+EXPORT_SYMBOL_GPL(sha256_blocks_simd);
+
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks)
+{
+ sha256_blocks_generic(state, data, nblocks);
+}
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+
+bool sha256_is_arch_optimized(void)
+{
+ return static_key_enabled(&have_sha256_x86);
+}
+EXPORT_SYMBOL_GPL(sha256_is_arch_optimized);
+
+static int __init sha256_x86_mod_init(void)
+{
+ if (boot_cpu_has(X86_FEATURE_SHA_NI)) {
+ static_call_update(sha256_blocks_x86, sha256_ni_transform);
+ } else if (cpu_has_xfeatures(XFEATURE_MASK_SSE |
+ XFEATURE_MASK_YMM, NULL) &&
+ boot_cpu_has(X86_FEATURE_AVX)) {
+ if (boot_cpu_has(X86_FEATURE_AVX2) &&
+ boot_cpu_has(X86_FEATURE_BMI2))
+ static_call_update(sha256_blocks_x86,
+ sha256_transform_rorx);
+ else
+ static_call_update(sha256_blocks_x86,
+ sha256_transform_avx);
+ } else if (!boot_cpu_has(X86_FEATURE_SSSE3)) {
+ return 0;
+ }
+ static_branch_enable(&have_sha256_x86);
+ return 0;
+}
+subsys_initcall(sha256_x86_mod_init);
+
+static void __exit sha256_x86_mod_exit(void)
+{
+}
+module_exit(sha256_x86_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-256 optimized for x86_64");
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index e86eda2c0b04..eb2d2e1cbddd 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -75,7 +75,7 @@ static void delay_tsc(u64 cycles)
/* Allow RT tasks to run */
preempt_enable();
- rep_nop();
+ native_pause();
preempt_disable();
/*
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 98631c0e7a11..4e385cbfd444 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -13,6 +13,7 @@
#include <asm/insn.h>
#include <asm/insn-eval.h>
#include <asm/ldt.h>
+#include <asm/msr.h>
#include <asm/vm86.h>
#undef pr_fmt
@@ -631,14 +632,21 @@ static bool get_desc(struct desc_struct *out, unsigned short sel)
/* Bits [15:3] contain the index of the desired entry. */
sel >>= 3;
- mutex_lock(&current->active_mm->context.lock);
- ldt = current->active_mm->context.ldt;
+ /*
+ * If we're not in a valid context with a real (not just lazy)
+ * user mm, then don't even try.
+ */
+ if (!nmi_uaccess_okay())
+ return false;
+
+ mutex_lock(&current->mm->context.lock);
+ ldt = current->mm->context.ldt;
if (ldt && sel < ldt->nr_entries) {
*out = ldt->entries[sel];
success = true;
}
- mutex_unlock(&current->active_mm->context.lock);
+ mutex_unlock(&current->mm->context.lock);
return success;
}
@@ -702,16 +710,16 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
unsigned long base;
if (seg_reg_idx == INAT_SEG_REG_FS) {
- rdmsrl(MSR_FS_BASE, base);
+ rdmsrq(MSR_FS_BASE, base);
} else if (seg_reg_idx == INAT_SEG_REG_GS) {
/*
* swapgs was called at the kernel entry point. Thus,
* MSR_KERNEL_GS_BASE will have the user-space GS base.
*/
if (user_mode(regs))
- rdmsrl(MSR_KERNEL_GS_BASE, base);
+ rdmsrq(MSR_KERNEL_GS_BASE, base);
else
- rdmsrl(MSR_GS_BASE, base);
+ rdmsrq(MSR_GS_BASE, base);
} else {
base = 0;
}
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 6ffb931b9fb1..149a57e334ab 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -324,6 +324,11 @@ int insn_get_opcode(struct insn *insn)
}
insn->attr = inat_get_opcode_attribute(op);
+ if (insn->x86_64 && inat_is_invalid64(insn->attr)) {
+ /* This instruction is invalid, like UD2. Stop decoding. */
+ insn->attr &= INAT_INV64;
+ }
+
while (inat_is_escape(insn->attr)) {
/* Get escaped opcode */
op = get_next(insn_byte_t, insn);
@@ -337,6 +342,7 @@ int insn_get_opcode(struct insn *insn)
insn->attr = 0;
return -EINVAL;
}
+
end:
opcode->got = 1;
return 0;
@@ -658,7 +664,6 @@ int insn_get_immediate(struct insn *insn)
}
if (!inat_has_immediate(insn->attr))
- /* no immediates */
goto done;
switch (inat_immediate_size(insn->attr)) {
diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
index 5eecb45d05d5..c20e04764edc 100644
--- a/arch/x86/lib/iomem.c
+++ b/arch/x86/lib/iomem.c
@@ -10,7 +10,7 @@
static __always_inline void rep_movs(void *to, const void *from, size_t n)
{
unsigned long d0, d1, d2;
- asm volatile("rep ; movsl\n\t"
+ asm volatile("rep movsl\n\t"
"testb $2,%b4\n\t"
"je 1f\n\t"
"movsw\n"
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
index a58f451a7dd3..b5893928d55c 100644
--- a/arch/x86/lib/kaslr.c
+++ b/arch/x86/lib/kaslr.c
@@ -8,7 +8,7 @@
*/
#include <asm/asm.h>
#include <asm/kaslr.h>
-#include <asm/msr.h>
+#include <asm/tsc.h>
#include <asm/archrandom.h>
#include <asm/e820/api.h>
#include <asm/shared/io.h>
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 0ae2e1712e2e..12a23fa7c44c 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -41,6 +41,7 @@ SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy)
SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy)
+SYM_PIC_ALIAS(memcpy)
EXPORT_SYMBOL(memcpy)
SYM_FUNC_START_LOCAL(memcpy_orig)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index d66b710d628f..fb5a03cf5ab7 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -42,6 +42,7 @@ SYM_FUNC_END(__memset)
EXPORT_SYMBOL(__memset)
SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
+SYM_PIC_ALIAS(memset)
EXPORT_SYMBOL(memset)
SYM_FUNC_START_LOCAL(memset_orig)
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index acd463d887e1..b8f63419e6ae 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -47,7 +47,7 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
}
EXPORT_SYMBOL(rdmsr_on_cpu);
-int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
{
int err;
struct msr_info rv;
@@ -60,7 +60,7 @@ int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
return err;
}
-EXPORT_SYMBOL(rdmsrl_on_cpu);
+EXPORT_SYMBOL(rdmsrq_on_cpu);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
@@ -78,7 +78,7 @@ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
}
EXPORT_SYMBOL(wrmsr_on_cpu);
-int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{
int err;
struct msr_info rv;
@@ -92,7 +92,7 @@ int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
return err;
}
-EXPORT_SYMBOL(wrmsrl_on_cpu);
+EXPORT_SYMBOL(wrmsrq_on_cpu);
static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
struct msr __percpu *msrs,
@@ -204,7 +204,7 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
}
EXPORT_SYMBOL(wrmsr_safe_on_cpu);
-int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{
int err;
struct msr_info rv;
@@ -218,9 +218,9 @@ int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
return err ? err : rv.err;
}
-EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
+EXPORT_SYMBOL(wrmsrq_safe_on_cpu);
-int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
{
u32 low, high;
int err;
@@ -230,7 +230,7 @@ int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
return err;
}
-EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
+EXPORT_SYMBOL(rdmsrq_safe_on_cpu);
/*
* These variants are significantly slower, but allows control over
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 5a18ecc04a6c..4ef7c6dcbea6 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -41,7 +41,7 @@ static int msr_read(u32 msr, struct msr *m)
int err;
u64 val;
- err = rdmsrl_safe(msr, &val);
+ err = rdmsrq_safe(msr, &val);
if (!err)
m->q = val;
@@ -58,7 +58,7 @@ static int msr_read(u32 msr, struct msr *m)
*/
static int msr_write(u32 msr, struct msr *m)
{
- return wrmsrl_safe(msr, m->q);
+ return wrmsrq_safe(msr, m->q);
}
static inline int __flip_bit(u32 msr, u8 bit, bool set)
@@ -122,23 +122,23 @@ int msr_clear_bit(u32 msr, u8 bit)
EXPORT_SYMBOL_GPL(msr_clear_bit);
#ifdef CONFIG_TRACEPOINTS
-void do_trace_write_msr(unsigned int msr, u64 val, int failed)
+void do_trace_write_msr(u32 msr, u64 val, int failed)
{
trace_write_msr(msr, val, failed);
}
EXPORT_SYMBOL(do_trace_write_msr);
EXPORT_TRACEPOINT_SYMBOL(write_msr);
-void do_trace_read_msr(unsigned int msr, u64 val, int failed)
+void do_trace_read_msr(u32 msr, u64 val, int failed)
{
trace_read_msr(msr, val, failed);
}
EXPORT_SYMBOL(do_trace_read_msr);
EXPORT_TRACEPOINT_SYMBOL(read_msr);
-void do_trace_rdpmc(unsigned counter, u64 val, int failed)
+void do_trace_rdpmc(u32 msr, u64 val, int failed)
{
- trace_rdpmc(counter, val, failed);
+ trace_rdpmc(msr, val, failed);
}
EXPORT_SYMBOL(do_trace_rdpmc);
EXPORT_TRACEPOINT_SYMBOL(rdpmc);
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index a26c43abd47d..d78d769a02bd 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -40,6 +40,7 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
ALTERNATIVE_2 __stringify(RETPOLINE \reg), \
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \
__stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE)
+SYM_PIC_ALIAS(__x86_indirect_thunk_\reg)
.endm
@@ -367,6 +368,54 @@ SYM_FUNC_END(call_depth_return_thunk)
#endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
+#ifdef CONFIG_MITIGATION_ITS
+
+.macro ITS_THUNK reg
+
+/*
+ * If CFI paranoid is used then the ITS thunk starts with opcodes (0xea; jne 1b)
+ * that complete the fineibt_paranoid caller sequence.
+ */
+1: .byte 0xea
+SYM_INNER_LABEL(__x86_indirect_paranoid_thunk_\reg, SYM_L_GLOBAL)
+ UNWIND_HINT_UNDEFINED
+ ANNOTATE_NOENDBR
+ jne 1b
+SYM_INNER_LABEL(__x86_indirect_its_thunk_\reg, SYM_L_GLOBAL)
+ UNWIND_HINT_UNDEFINED
+ ANNOTATE_NOENDBR
+ ANNOTATE_RETPOLINE_SAFE
+ jmp *%\reg
+ int3
+ .align 32, 0xcc /* fill to the end of the line */
+ .skip 32 - (__x86_indirect_its_thunk_\reg - 1b), 0xcc /* skip to the next upper half */
+.endm
+
+/* ITS mitigation requires thunks be aligned to upper half of cacheline */
+.align 64, 0xcc
+.skip 29, 0xcc
+
+#define GEN(reg) ITS_THUNK reg
+#include <asm/GEN-for-each-reg.h>
+#undef GEN
+
+ .align 64, 0xcc
+SYM_FUNC_ALIAS(__x86_indirect_its_thunk_array, __x86_indirect_its_thunk_rax)
+SYM_CODE_END(__x86_indirect_its_thunk_array)
+
+.align 64, 0xcc
+.skip 32, 0xcc
+SYM_CODE_START(its_return_thunk)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+SYM_CODE_END(its_return_thunk)
+EXPORT_SYMBOL(its_return_thunk)
+
+#endif /* CONFIG_MITIGATION_ITS */
+
/*
* This function name is magical and is used by -mfunction-return=thunk-extern
* for the compiler to generate JMPs to it.
@@ -394,6 +443,7 @@ SYM_CODE_START(__x86_return_thunk)
#endif
int3
SYM_CODE_END(__x86_return_thunk)
+SYM_PIC_ALIAS(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk)
#endif /* CONFIG_MITIGATION_RETHUNK */
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c
index 53b3f202267c..f87ec24fa579 100644
--- a/arch/x86/lib/string_32.c
+++ b/arch/x86/lib/string_32.c
@@ -40,8 +40,7 @@ char *strncpy(char *dest, const char *src, size_t count)
"stosb\n\t"
"testb %%al,%%al\n\t"
"jne 1b\n\t"
- "rep\n\t"
- "stosb\n"
+ "rep stosb\n"
"2:"
: "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
: "0" (src), "1" (dest), "2" (count) : "memory");
@@ -54,8 +53,7 @@ EXPORT_SYMBOL(strncpy);
char *strcat(char *dest, const char *src)
{
int d0, d1, d2, d3;
- asm volatile("repne\n\t"
- "scasb\n\t"
+ asm volatile("repne scasb\n\t"
"decl %1\n"
"1:\tlodsb\n\t"
"stosb\n\t"
@@ -72,8 +70,7 @@ EXPORT_SYMBOL(strcat);
char *strncat(char *dest, const char *src, size_t count)
{
int d0, d1, d2, d3;
- asm volatile("repne\n\t"
- "scasb\n\t"
+ asm volatile("repne scasb\n\t"
"decl %1\n\t"
"movl %8,%3\n"
"1:\tdecl %3\n\t"
@@ -167,8 +164,7 @@ size_t strlen(const char *s)
{
int d0;
size_t res;
- asm volatile("repne\n\t"
- "scasb"
+ asm volatile("repne scasb"
: "=c" (res), "=&D" (d0)
: "1" (s), "a" (0), "0" (0xffffffffu)
: "memory");
@@ -184,8 +180,7 @@ void *memchr(const void *cs, int c, size_t count)
void *res;
if (!count)
return NULL;
- asm volatile("repne\n\t"
- "scasb\n\t"
+ asm volatile("repne scasb\n\t"
"je 1f\n\t"
"movl $1,%0\n"
"1:\tdecl %0"
@@ -202,7 +197,7 @@ void *memscan(void *addr, int c, size_t size)
{
if (!size)
return addr;
- asm volatile("repnz; scasb\n\t"
+ asm volatile("repnz scasb\n\t"
"jnz 1f\n\t"
"dec %%edi\n"
"1:"
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c
index 38f37df056f7..28267985e85f 100644
--- a/arch/x86/lib/strstr_32.c
+++ b/arch/x86/lib/strstr_32.c
@@ -8,16 +8,14 @@ int d0, d1;
register char *__res;
__asm__ __volatile__(
"movl %6,%%edi\n\t"
- "repne\n\t"
- "scasb\n\t"
+ "repne scasb\n\t"
"notl %%ecx\n\t"
"decl %%ecx\n\t" /* NOTE! This also sets Z if searchstring='' */
"movl %%ecx,%%edx\n"
"1:\tmovl %6,%%edi\n\t"
"movl %%esi,%%eax\n\t"
"movl %%edx,%%ecx\n\t"
- "repe\n\t"
- "cmpsb\n\t"
+ "repe cmpsb\n\t"
"je 2f\n\t" /* also works for empty string, see above */
"xchgl %%eax,%%esi\n\t"
"incl %%esi\n\t"
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 422257c350c6..f6f436f1d573 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -38,9 +38,9 @@ do { \
might_fault(); \
__asm__ __volatile__( \
ASM_STAC "\n" \
- "0: rep; stosl\n" \
+ "0: rep stosl\n" \
" movl %2,%0\n" \
- "1: rep; stosb\n" \
+ "1: rep stosb\n" \
"2: " ASM_CLAC "\n" \
_ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN4, %2) \
_ASM_EXTABLE_UA(1b, 2b) \
@@ -140,9 +140,9 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
- "99: rep; movsl\n"
+ "99: rep movsl\n"
"36: movl %%eax, %0\n"
- "37: rep; movsb\n"
+ "37: rep movsb\n"
"100:\n"
_ASM_EXTABLE_UA(1b, 100b)
_ASM_EXTABLE_UA(2b, 100b)
@@ -242,9 +242,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
- "6: rep; movsl\n"
+ "6: rep movsl\n"
" movl %%eax,%0\n"
- "7: rep; movsb\n"
+ "7: rep movsb\n"
"8:\n"
_ASM_EXTABLE_UA(0b, 8b)
_ASM_EXTABLE_UA(1b, 8b)
@@ -293,14 +293,14 @@ do { \
" negl %0\n" \
" andl $7,%0\n" \
" subl %0,%3\n" \
- "4: rep; movsb\n" \
+ "4: rep movsb\n" \
" movl %3,%0\n" \
" shrl $2,%0\n" \
" andl $3,%3\n" \
" .align 2,0x90\n" \
- "0: rep; movsl\n" \
+ "0: rep movsl\n" \
" movl %3,%0\n" \
- "1: rep; movsb\n" \
+ "1: rep movsb\n" \
"2:\n" \
_ASM_EXTABLE_TYPE_REG(4b, 2b, EX_TYPE_UCOPY_LEN1, %3) \
_ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN4, %3) \
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index caedb3ef6688..262f7ca1fb95 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -35,7 +35,7 @@
# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
#
-# REX2 Prefix
+# REX2 Prefix Superscripts
# - (!REX2): REX2 is not allowed
# - (REX2): REX2 variant e.g. JMPABS
@@ -147,7 +147,7 @@ AVXcode:
# 0x60 - 0x6f
60: PUSHA/PUSHAD (i64)
61: POPA/POPAD (i64)
-62: BOUND Gv,Ma (i64) | EVEX (Prefix)
+62: BOUND Gv,Ma (i64) | EVEX (Prefix),(o64)
63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
64: SEG=FS (Prefix)
65: SEG=GS (Prefix)
@@ -253,8 +253,8 @@ c0: Grp2 Eb,Ib (1A)
c1: Grp2 Ev,Ib (1A)
c2: RETN Iw (f64)
c3: RETN
-c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
-c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
+c4: LES Gz,Mp (i64) | VEX+2byte (Prefix),(o64)
+c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix),(o64)
c6: Grp11A Eb,Ib (1A)
c7: Grp11B Ev,Iz (1A)
c8: ENTER Iw,Ib
@@ -286,10 +286,10 @@ df: ESC
# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
-e0: LOOPNE/LOOPNZ Jb (f64) (!REX2)
-e1: LOOPE/LOOPZ Jb (f64) (!REX2)
-e2: LOOP Jb (f64) (!REX2)
-e3: JrCXZ Jb (f64) (!REX2)
+e0: LOOPNE/LOOPNZ Jb (f64),(!REX2)
+e1: LOOPE/LOOPZ Jb (f64),(!REX2)
+e2: LOOP Jb (f64),(!REX2)
+e3: JrCXZ Jb (f64),(!REX2)
e4: IN AL,Ib (!REX2)
e5: IN eAX,Ib (!REX2)
e6: OUT Ib,AL (!REX2)
@@ -298,10 +298,10 @@ e7: OUT Ib,eAX (!REX2)
# in "near" jumps and calls is 16-bit. For CALL,
# push of return address is 16-bit wide, RSP is decremented by 2
# but is not truncated to 16 bits, unlike RIP.
-e8: CALL Jz (f64) (!REX2)
-e9: JMP-near Jz (f64) (!REX2)
-ea: JMP-far Ap (i64) (!REX2)
-eb: JMP-short Jb (f64) (!REX2)
+e8: CALL Jz (f64),(!REX2)
+e9: JMP-near Jz (f64),(!REX2)
+ea: JMP-far Ap (i64),(!REX2)
+eb: JMP-short Jb (f64),(!REX2)
ec: IN AL,DX (!REX2)
ed: IN eAX,DX (!REX2)
ee: OUT DX,AL (!REX2)
@@ -478,22 +478,22 @@ AVXcode: 1
7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
# 0x0f 0x80-0x8f
# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
-80: JO Jz (f64) (!REX2)
-81: JNO Jz (f64) (!REX2)
-82: JB/JC/JNAE Jz (f64) (!REX2)
-83: JAE/JNB/JNC Jz (f64) (!REX2)
-84: JE/JZ Jz (f64) (!REX2)
-85: JNE/JNZ Jz (f64) (!REX2)
-86: JBE/JNA Jz (f64) (!REX2)
-87: JA/JNBE Jz (f64) (!REX2)
-88: JS Jz (f64) (!REX2)
-89: JNS Jz (f64) (!REX2)
-8a: JP/JPE Jz (f64) (!REX2)
-8b: JNP/JPO Jz (f64) (!REX2)
-8c: JL/JNGE Jz (f64) (!REX2)
-8d: JNL/JGE Jz (f64) (!REX2)
-8e: JLE/JNG Jz (f64) (!REX2)
-8f: JNLE/JG Jz (f64) (!REX2)
+80: JO Jz (f64),(!REX2)
+81: JNO Jz (f64),(!REX2)
+82: JB/JC/JNAE Jz (f64),(!REX2)
+83: JAE/JNB/JNC Jz (f64),(!REX2)
+84: JE/JZ Jz (f64),(!REX2)
+85: JNE/JNZ Jz (f64),(!REX2)
+86: JBE/JNA Jz (f64),(!REX2)
+87: JA/JNBE Jz (f64),(!REX2)
+88: JS Jz (f64),(!REX2)
+89: JNS Jz (f64),(!REX2)
+8a: JP/JPE Jz (f64),(!REX2)
+8b: JNP/JPO Jz (f64),(!REX2)
+8c: JL/JNGE Jz (f64),(!REX2)
+8d: JNL/JGE Jz (f64),(!REX2)
+8e: JLE/JNG Jz (f64),(!REX2)
+8f: JNLE/JG Jz (f64),(!REX2)
# 0x0f 0x90-0x9f
90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
@@ -996,8 +996,8 @@ AVXcode: 4
83: Grp1 Ev,Ib (1A),(es)
# CTESTSCC instructions are: CTESTB, CTESTBE, CTESTF, CTESTL, CTESTLE, CTESTNB, CTESTNBE, CTESTNL,
# CTESTNLE, CTESTNO, CTESTNS, CTESTNZ, CTESTO, CTESTS, CTESTT, CTESTZ
-84: CTESTSCC (ev)
-85: CTESTSCC (es) | CTESTSCC (66),(es)
+84: CTESTSCC Eb,Gb (ev)
+85: CTESTSCC Ev,Gv (es) | CTESTSCC Ev,Gv (66),(es)
88: POPCNT Gv,Ev (es) | POPCNT Gv,Ev (66),(es)
8f: POP2 Bq,Rq (000),(11B),(ev)
a5: SHLD Ev,Gv,CL (es) | SHLD Ev,Gv,CL (66),(es)
diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c
index d62662bdd460..5f253ae406b6 100644
--- a/arch/x86/math-emu/fpu_aux.c
+++ b/arch/x86/math-emu/fpu_aux.c
@@ -53,7 +53,7 @@ void fpstate_init_soft(struct swregs_state *soft)
void finit(void)
{
- fpstate_init_soft(&current->thread.fpu.fpstate->regs.soft);
+ fpstate_init_soft(&x86_task_fpu(current)->fpstate->regs.soft);
}
/*
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 91c52ead1226..5034df617740 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -641,7 +641,7 @@ int fpregs_soft_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct swregs_state *s387 = &target->thread.fpu.fpstate->regs.soft;
+ struct swregs_state *s387 = &x86_task_fpu(target)->fpstate->regs.soft;
void *space = s387->st_space;
int ret;
int offset, other, i, tags, regnr, tag, newtop;
@@ -692,7 +692,7 @@ int fpregs_soft_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
- struct swregs_state *s387 = &target->thread.fpu.fpstate->regs.soft;
+ struct swregs_state *s387 = &x86_task_fpu(target)->fpstate->regs.soft;
const void *space = s387->st_space;
int offset = (S387->ftop & 7) * 10, other = 80 - offset;
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index eec3e4805c75..5e238e930fe3 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -73,7 +73,7 @@ static inline bool seg_writable(struct desc_struct *d)
return (d->type & SEG_TYPE_EXECUTE_MASK) == SEG_TYPE_WRITABLE;
}
-#define I387 (&current->thread.fpu.fpstate->regs)
+#define I387 (&x86_task_fpu(current)->fpstate->regs)
#define FPU_info (I387->soft.info)
#define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs))
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 32035d5be5a0..5b9908f13dcf 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -3,12 +3,10 @@
KCOV_INSTRUMENT_tlb.o := n
KCOV_INSTRUMENT_mem_encrypt.o := n
KCOV_INSTRUMENT_mem_encrypt_amd.o := n
-KCOV_INSTRUMENT_mem_encrypt_identity.o := n
KCOV_INSTRUMENT_pgprot.o := n
KASAN_SANITIZE_mem_encrypt.o := n
KASAN_SANITIZE_mem_encrypt_amd.o := n
-KASAN_SANITIZE_mem_encrypt_identity.o := n
KASAN_SANITIZE_pgprot.o := n
# Disable KCSAN entirely, because otherwise we get warnings that some functions
@@ -16,12 +14,10 @@ KASAN_SANITIZE_pgprot.o := n
KCSAN_SANITIZE := n
# Avoid recursion by not calling KMSAN hooks for CEA code.
KMSAN_SANITIZE_cpu_entry_area.o := n
-KMSAN_SANITIZE_mem_encrypt_identity.o := n
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_mem_encrypt.o = -pg
CFLAGS_REMOVE_mem_encrypt_amd.o = -pg
-CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
CFLAGS_REMOVE_pgprot.o = -pg
endif
@@ -32,9 +28,6 @@ obj-y += pat/
# Make sure __phys_addr has no stackprotector
CFLAGS_physaddr.o := -fno-stack-protector
-CFLAGS_mem_encrypt_identity.o := -fno-stack-protector
-
-CFLAGS_fault.o := -I $(src)/../include/asm/trace
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
@@ -52,7 +45,7 @@ obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
-obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o
+obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_AMD_NUMA) += amdtopology.o
obj-$(CONFIG_ACPI_NUMA) += srat.o
@@ -63,5 +56,4 @@ obj-$(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION) += pti.o
obj-$(CONFIG_X86_MEM_ENCRYPT) += mem_encrypt.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o
-obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 628833afee37..f980b0eb0105 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -25,7 +25,7 @@
#include <asm/numa.h>
#include <asm/mpspec.h>
#include <asm/apic.h>
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
static unsigned char __initdata nodeids[8];
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 89079ea73e65..a4700ef6eb64 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -266,6 +266,32 @@ static void effective_prot(struct ptdump_state *pt_st, int level, u64 val)
st->prot_levels[level] = effective;
}
+static void effective_prot_pte(struct ptdump_state *st, pte_t pte)
+{
+ effective_prot(st, 4, pte_val(pte));
+}
+
+static void effective_prot_pmd(struct ptdump_state *st, pmd_t pmd)
+{
+ effective_prot(st, 3, pmd_val(pmd));
+}
+
+static void effective_prot_pud(struct ptdump_state *st, pud_t pud)
+{
+ effective_prot(st, 2, pud_val(pud));
+}
+
+static void effective_prot_p4d(struct ptdump_state *st, p4d_t p4d)
+{
+ effective_prot(st, 1, p4d_val(p4d));
+}
+
+static void effective_prot_pgd(struct ptdump_state *st, pgd_t pgd)
+{
+ effective_prot(st, 0, pgd_val(pgd));
+}
+
+
/*
* This function gets called on a break in a continuous series
* of PTE entries; the next one is different so we need to
@@ -362,6 +388,38 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
}
}
+static void note_page_pte(struct ptdump_state *pt_st, unsigned long addr, pte_t pte)
+{
+ note_page(pt_st, addr, 4, pte_val(pte));
+}
+
+static void note_page_pmd(struct ptdump_state *pt_st, unsigned long addr, pmd_t pmd)
+{
+ note_page(pt_st, addr, 3, pmd_val(pmd));
+}
+
+static void note_page_pud(struct ptdump_state *pt_st, unsigned long addr, pud_t pud)
+{
+ note_page(pt_st, addr, 2, pud_val(pud));
+}
+
+static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d)
+{
+ note_page(pt_st, addr, 1, p4d_val(p4d));
+}
+
+static void note_page_pgd(struct ptdump_state *pt_st, unsigned long addr, pgd_t pgd)
+{
+ note_page(pt_st, addr, 0, pgd_val(pgd));
+}
+
+static void note_page_flush(struct ptdump_state *pt_st)
+{
+ pte_t pte_zero = {0};
+
+ note_page(pt_st, 0, -1, pte_val(pte_zero));
+}
+
bool ptdump_walk_pgd_level_core(struct seq_file *m,
struct mm_struct *mm, pgd_t *pgd,
bool checkwx, bool dmesg)
@@ -378,8 +436,17 @@ bool ptdump_walk_pgd_level_core(struct seq_file *m,
struct pg_state st = {
.ptdump = {
- .note_page = note_page,
- .effective_prot = effective_prot,
+ .note_page_pte = note_page_pte,
+ .note_page_pmd = note_page_pmd,
+ .note_page_pud = note_page_pud,
+ .note_page_p4d = note_page_p4d,
+ .note_page_pgd = note_page_pgd,
+ .note_page_flush = note_page_flush,
+ .effective_prot_pte = effective_prot_pte,
+ .effective_prot_pmd = effective_prot_pmd,
+ .effective_prot_pud = effective_prot_pud,
+ .effective_prot_p4d = effective_prot_p4d,
+ .effective_prot_pgd = effective_prot_pgd,
.range = ptdump_ranges
},
.level = -1,
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 51986e8a9d35..bf8dab18be97 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -111,7 +111,7 @@ static bool ex_handler_sgx(const struct exception_table_entry *fixup,
/*
* Handler for when we fail to restore a task's FPU state. We should never get
- * here because the FPU state of a task using the FPU (task->thread.fpu.state)
+ * here because the FPU state of a task using the FPU (struct fpu::fpstate)
* should always be valid. However, past bugs have allowed userspace to set
* reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
* These caused XRSTOR to fail when switching to the task, leaking the FPU
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 296d294142c8..998bd807fc7b 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -13,7 +13,6 @@
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
#include <linux/hugetlb.h> /* hstate_index_to_shift */
-#include <linux/prefetch.h> /* prefetchw */
#include <linux/context_tracking.h> /* exception_enter(), ... */
#include <linux/uaccess.h> /* faulthandler_disabled() */
#include <linux/efi.h> /* efi_crash_gracefully_on_page_fault()*/
@@ -38,7 +37,7 @@
#include <asm/sev.h> /* snp_dump_hva_rmpentry() */
#define CREATE_TRACE_POINTS
-#include <asm/trace/exceptions.h>
+#include <trace/events/exceptions.h>
/*
* Returns 0 if mmiotrace is disabled, or if the fault is not
@@ -1455,9 +1454,6 @@ static __always_inline void
trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
- if (!trace_pagefault_enabled())
- return;
-
if (user_mode(regs))
trace_page_fault_user(address, regs, error_code);
else
@@ -1496,8 +1492,6 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
address = cpu_feature_enabled(X86_FEATURE_FRED) ? fred_event_data(regs) : read_cr2();
- prefetchw(&current->mm->mmap_lock);
-
/*
* KVM uses #PF vector to deliver 'page not present' events to guests
* (asynchronous page fault mechanism). The event happens when a
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index bfa444a7dbb0..7456df985d96 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -28,6 +28,7 @@
#include <asm/text-patching.h>
#include <asm/memtype.h>
#include <asm/paravirt.h>
+#include <asm/mmu_context.h>
/*
* We need to define the tracepoints somewhere, and tlb.c
@@ -173,11 +174,7 @@ __ref void *alloc_low_pages(unsigned int num)
* randomization is enabled.
*/
-#ifndef CONFIG_X86_5LEVEL
-#define INIT_PGD_PAGE_TABLES 3
-#else
#define INIT_PGD_PAGE_TABLES 4
-#endif
#ifndef CONFIG_RANDOMIZE_MEMORY
#define INIT_PGD_PAGE_COUNT (2 * INIT_PGD_PAGE_TABLES)
@@ -824,31 +821,33 @@ void __init poking_init(void)
spinlock_t *ptl;
pte_t *ptep;
- poking_mm = mm_alloc();
- BUG_ON(!poking_mm);
+ text_poke_mm = mm_alloc();
+ BUG_ON(!text_poke_mm);
/* Xen PV guests need the PGD to be pinned. */
- paravirt_enter_mmap(poking_mm);
+ paravirt_enter_mmap(text_poke_mm);
+
+ set_notrack_mm(text_poke_mm);
/*
* Randomize the poking address, but make sure that the following page
* will be mapped at the same PMD. We need 2 pages, so find space for 3,
* and adjust the address if the PMD ends after the first one.
*/
- poking_addr = TASK_UNMAPPED_BASE;
+ text_poke_mm_addr = TASK_UNMAPPED_BASE;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
+ text_poke_mm_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
(TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
- if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
- poking_addr += PAGE_SIZE;
+ if (((text_poke_mm_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
+ text_poke_mm_addr += PAGE_SIZE;
/*
* We need to trigger the allocation of the page-tables that will be
* needed for poking now. Later, poking may be performed in an atomic
* section, which might cause allocation to fail.
*/
- ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
+ ptep = get_locked_pte(text_poke_mm, text_poke_mm_addr, &ptl);
BUG_ON(!ptep);
pte_unmap_unlock(ptep, ptl);
}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index ad662cc4605c..607d6a2e66e2 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -30,6 +30,7 @@
#include <linux/initrd.h>
#include <linux/cpumask.h>
#include <linux/gfp.h>
+#include <linux/execmem.h>
#include <asm/asm.h>
#include <asm/bios_ebda.h>
@@ -565,7 +566,7 @@ static void __init lowmem_pfn_init(void)
"only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
#define MSG_HIGHMEM_TRIMMED \
- "Warning: only 4GB will be used. Support for for CONFIG_HIGHMEM64G was removed!\n"
+ "Warning: only 4GB will be used. Support for CONFIG_HIGHMEM64G was removed!\n"
/*
* We have more RAM than fits into lowmem - we try to put it into
* highmem, also taking the highmem=x boot parameter into account:
@@ -612,7 +613,6 @@ void __init find_low_pfn_range(void)
highmem_pfn_init();
}
-#ifndef CONFIG_NUMA
void __init initmem_init(void)
{
#ifdef CONFIG_HIGHMEM
@@ -633,12 +633,6 @@ void __init initmem_init(void)
printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
pages_to_mb(max_low_pfn));
- setup_bootmem_allocator();
-}
-#endif /* !CONFIG_NUMA */
-
-void __init setup_bootmem_allocator(void)
-{
printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
max_pfn_mapped<<PAGE_SHIFT);
printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
@@ -755,6 +749,8 @@ void mark_rodata_ro(void)
pr_info("Write protecting kernel text and read-only data: %luk\n",
size >> 10);
+ execmem_cache_make_ro();
+
kernel_set_to_readonly = 1;
#ifdef CONFIG_CPA_DEBUG
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 7c4f6f591f2b..ee66fae9ebcc 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -34,6 +34,7 @@
#include <linux/gfp.h>
#include <linux/kcore.h>
#include <linux/bootmem_info.h>
+#include <linux/execmem.h>
#include <asm/processor.h>
#include <asm/bios_ebda.h>
@@ -805,12 +806,17 @@ kernel_physical_mapping_change(unsigned long paddr_start,
}
#ifndef CONFIG_NUMA
-void __init initmem_init(void)
+static inline void x86_numa_init(void)
{
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
}
#endif
+void __init initmem_init(void)
+{
+ x86_numa_init();
+}
+
void __init paging_init(void)
{
sparse_init();
@@ -827,7 +833,6 @@ void __init paging_init(void)
zone_sizes_init();
}
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
#define PAGE_UNUSED 0xFD
/*
@@ -926,7 +931,6 @@ static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long
if (!IS_ALIGNED(end, PMD_SIZE))
unused_pmd_start = end;
}
-#endif
/*
* Memory hotplug specific functions
@@ -1146,16 +1150,13 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
pmd_clear(pmd);
spin_unlock(&init_mm.page_table_lock);
pages++;
- }
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
- else if (vmemmap_pmd_is_unused(addr, next)) {
+ } else if (vmemmap_pmd_is_unused(addr, next)) {
free_hugepage_table(pmd_page(*pmd),
altmap);
spin_lock(&init_mm.page_table_lock);
pmd_clear(pmd);
spin_unlock(&init_mm.page_table_lock);
}
-#endif
continue;
}
@@ -1391,6 +1392,8 @@ void mark_rodata_ro(void)
(end - start) >> 10);
set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+ execmem_cache_make_ro();
+
kernel_set_to_readonly = 1;
/*
@@ -1464,16 +1467,21 @@ static unsigned long probe_memory_block_size(void)
}
/*
- * Use max block size to minimize overhead on bare metal, where
- * alignment for memory hotplug isn't a concern.
+ * When hotplug alignment is not a concern, maximize blocksize
+ * to minimize overhead. Otherwise, align to the lesser of advice
+ * alignment and end of memory alignment.
*/
- if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+ bz = memory_block_advised_max_size();
+ if (!bz) {
bz = MAX_BLOCK_SIZE;
- goto done;
+ if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+ goto done;
+ } else {
+ bz = max(min(bz, MAX_BLOCK_SIZE), MIN_MEMORY_BLOCK_SIZE);
}
/* Find the largest allowed block size that aligns to memory end */
- for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) {
+ for (; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) {
if (IS_ALIGNED(boot_mem_end, bz))
break;
}
@@ -1492,7 +1500,6 @@ unsigned long memory_block_size_bytes(void)
return memory_block_size_probed;
}
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* Initialise the sparsemem vmemmap using huge-pages at the PMD level.
*/
@@ -1639,4 +1646,3 @@ void __meminit vmemmap_populate_print_last(void)
node_start = 0;
}
}
-#endif
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 331e101bf801..12c8180ca1ba 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -71,7 +71,7 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
static unsigned int __ioremap_check_ram(struct resource *res)
{
unsigned long start_pfn, stop_pfn;
- unsigned long i;
+ unsigned long pfn;
if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
return 0;
@@ -79,9 +79,8 @@ static unsigned int __ioremap_check_ram(struct resource *res)
start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
stop_pfn = (res->end + 1) >> PAGE_SHIFT;
if (stop_pfn > start_pfn) {
- for (i = 0; i < (stop_pfn - start_pfn); ++i)
- if (pfn_valid(start_pfn + i) &&
- !PageReserved(pfn_to_page(start_pfn + i)))
+ for_each_valid_pfn(pfn, start_pfn, stop_pfn)
+ if (!PageReserved(pfn_to_page(pfn)))
return IORES_MAP_SYSTEM_RAM;
}
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 7490ff6d83b1..faf3a13fb6ba 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -40,7 +40,9 @@
* section is later cleared.
*/
u64 sme_me_mask __section(".data") = 0;
+SYM_PIC_ALIAS(sme_me_mask);
u64 sev_status __section(".data") = 0;
+SYM_PIC_ALIAS(sev_status);
u64 sev_check_data __section(".data") = 0;
EXPORT_SYMBOL(sme_me_mask);
diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h
index 3f37b5c80bb3..097aadc250f7 100644
--- a/arch/x86/mm/mm_internal.h
+++ b/arch/x86/mm/mm_internal.h
@@ -25,4 +25,8 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache);
extern unsigned long tlb_single_page_flush_ceiling;
+#ifdef CONFIG_NUMA
+void __init x86_numa_init(void);
+#endif
+
#endif /* __X86_MM_INTERNAL_H */
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 64e5cdb2460a..c24890c40138 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -18,9 +18,10 @@
#include <asm/e820/api.h>
#include <asm/proto.h>
#include <asm/dma.h>
-#include <asm/amd_nb.h>
+#include <asm/numa.h>
+#include <asm/amd/nb.h>
-#include "numa_internal.h"
+#include "mm_internal.h"
int numa_off;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
deleted file mode 100644
index 65fda406e6f2..000000000000
--- a/arch/x86/mm/numa_32.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation
- * August 2002: added remote node KVA remap - Martin J. Bligh
- *
- * Copyright (C) 2002, IBM Corp.
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/memblock.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <asm/pgtable_areas.h>
-
-#include "numa_internal.h"
-
-extern unsigned long highend_pfn, highstart_pfn;
-
-void __init initmem_init(void)
-{
- x86_numa_init();
-
-#ifdef CONFIG_HIGHMEM
- highstart_pfn = highend_pfn = max_pfn;
- if (max_pfn > max_low_pfn)
- highstart_pfn = max_low_pfn;
- printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
- pages_to_mb(highend_pfn - highstart_pfn));
- high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
-#else
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
-#endif
- printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
- pages_to_mb(max_low_pfn));
- printk(KERN_DEBUG "max_low_pfn = %lx, highstart_pfn = %lx\n",
- max_low_pfn, highstart_pfn);
-
- printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n",
- (ulong) pfn_to_kaddr(max_low_pfn));
-
- printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
- (ulong) pfn_to_kaddr(highstart_pfn));
-
- __vmalloc_start_set = true;
- setup_bootmem_allocator();
-}
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
deleted file mode 100644
index 59d80160fa5a..000000000000
--- a/arch/x86/mm/numa_64.c
+++ /dev/null
@@ -1,13 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Generic VM initialization for x86-64 NUMA setups.
- * Copyright 2002,2003 Andi Kleen, SuSE Labs.
- */
-#include <linux/memblock.h>
-
-#include "numa_internal.h"
-
-void __init initmem_init(void)
-{
- x86_numa_init();
-}
diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h
deleted file mode 100644
index 11e1ff370c10..000000000000
--- a/arch/x86/mm/numa_internal.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __X86_MM_NUMA_INTERNAL_H
-#define __X86_MM_NUMA_INTERNAL_H
-
-#include <linux/types.h>
-#include <asm/numa.h>
-
-void __init x86_numa_init(void);
-
-#endif /* __X86_MM_NUMA_INTERNAL_H */
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index 72d8cbc61158..2e7923844afe 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -38,6 +38,7 @@
#include <linux/kernel.h>
#include <linux/pfn_t.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/fs.h>
@@ -232,7 +233,7 @@ void pat_cpu_init(void)
panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
}
- wrmsrl(MSR_IA32_CR_PAT, pat_msr_val);
+ wrmsrq(MSR_IA32_CR_PAT, pat_msr_val);
__flush_tlb_all();
}
@@ -256,7 +257,7 @@ void __init pat_bp_init(void)
if (!cpu_feature_enabled(X86_FEATURE_PAT))
pat_disable("PAT not supported by the CPU.");
else
- rdmsrl(MSR_IA32_CR_PAT, pat_msr_val);
+ rdmsrq(MSR_IA32_CR_PAT, pat_msr_val);
if (!pat_msr_val) {
pat_disable("PAT support disabled by the firmware.");
@@ -682,6 +683,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
/**
* pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type
* of @pfn cannot be overridden by UC MTRR memory type.
+ * @pfn: The page frame number to check.
*
* Only to be called when PAT is enabled.
*
@@ -773,46 +775,27 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
return vma_prot;
}
-#ifdef CONFIG_STRICT_DEVMEM
-/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
-static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+static inline void pgprot_set_cachemode(pgprot_t *prot, enum page_cache_mode pcm)
{
- return 1;
-}
-#else
-/* This check is needed to avoid cache aliasing when PAT is enabled */
-static inline int range_is_allowed(unsigned long pfn, unsigned long size)
-{
- u64 from = ((u64)pfn) << PAGE_SHIFT;
- u64 to = from + size;
- u64 cursor = from;
-
- if (!pat_enabled())
- return 1;
-
- while (cursor < to) {
- if (!devmem_is_allowed(pfn))
- return 0;
- cursor += PAGE_SIZE;
- pfn++;
- }
- return 1;
+ *prot = __pgprot((pgprot_val(*prot) & ~_PAGE_CACHE_MASK) |
+ cachemode2protval(pcm));
}
-#endif /* CONFIG_STRICT_DEVMEM */
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t *vma_prot)
{
enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
+ if (!pat_enabled())
+ return 1;
+
if (!range_is_allowed(pfn, size))
return 0;
if (file->f_flags & O_DSYNC)
pcm = _PAGE_CACHE_MODE_UC_MINUS;
- *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
- cachemode2protval(pcm));
+ pgprot_set_cachemode(vma_prot, pcm);
return 1;
}
@@ -853,8 +836,7 @@ int memtype_kernel_map_sync(u64 base, unsigned long size,
* Reserved non RAM regions only and after successful memtype_reserve,
* this func also keeps identity mapping (if any) in sync with this new prot.
*/
-static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
- int strict_prot)
+static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot)
{
int is_ram = 0;
int ret;
@@ -880,9 +862,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
(unsigned long long)paddr,
(unsigned long long)(paddr + size - 1),
cattr_name(pcm));
- *vma_prot = __pgprot((pgprot_val(*vma_prot) &
- (~_PAGE_CACHE_MASK)) |
- cachemode2protval(pcm));
+ pgprot_set_cachemode(vma_prot, pcm);
}
return 0;
}
@@ -892,8 +872,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
return ret;
if (pcm != want_pcm) {
- if (strict_prot ||
- !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
+ if (!is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
memtype_free(paddr, paddr + size);
pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
current->comm, current->pid,
@@ -903,13 +882,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
cattr_name(pcm));
return -EINVAL;
}
- /*
- * We allow returning different type than the one requested in
- * non strict case.
- */
- *vma_prot = __pgprot((pgprot_val(*vma_prot) &
- (~_PAGE_CACHE_MASK)) |
- cachemode2protval(pcm));
+ pgprot_set_cachemode(vma_prot, pcm);
}
if (memtype_kernel_map_sync(paddr, size, pcm) < 0) {
@@ -932,124 +905,14 @@ static void free_pfn_range(u64 paddr, unsigned long size)
memtype_free(paddr, paddr + size);
}
-static int follow_phys(struct vm_area_struct *vma, unsigned long *prot,
- resource_size_t *phys)
-{
- struct follow_pfnmap_args args = { .vma = vma, .address = vma->vm_start };
-
- if (follow_pfnmap_start(&args))
- return -EINVAL;
-
- /* Never return PFNs of anon folios in COW mappings. */
- if (!args.special) {
- follow_pfnmap_end(&args);
- return -EINVAL;
- }
-
- *prot = pgprot_val(args.pgprot);
- *phys = (resource_size_t)args.pfn << PAGE_SHIFT;
- follow_pfnmap_end(&args);
- return 0;
-}
-
-static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
- pgprot_t *pgprot)
-{
- unsigned long prot;
-
- VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT));
-
- /*
- * We need the starting PFN and cachemode used for track_pfn_remap()
- * that covered the whole VMA. For most mappings, we can obtain that
- * information from the page tables. For COW mappings, we might now
- * suddenly have anon folios mapped and follow_phys() will fail.
- *
- * Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to
- * detect the PFN. If we need the cachemode as well, we're out of luck
- * for now and have to fail fork().
- */
- if (!follow_phys(vma, &prot, paddr)) {
- if (pgprot)
- *pgprot = __pgprot(prot);
- return 0;
- }
- if (is_cow_mapping(vma->vm_flags)) {
- if (pgprot)
- return -EINVAL;
- *paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
- return 0;
- }
- WARN_ON_ONCE(1);
- return -EINVAL;
-}
-
-int track_pfn_copy(struct vm_area_struct *dst_vma,
- struct vm_area_struct *src_vma, unsigned long *pfn)
-{
- const unsigned long vma_size = src_vma->vm_end - src_vma->vm_start;
- resource_size_t paddr;
- pgprot_t pgprot;
- int rc;
-
- if (!(src_vma->vm_flags & VM_PAT))
- return 0;
-
- /*
- * Duplicate the PAT information for the dst VMA based on the src
- * VMA.
- */
- if (get_pat_info(src_vma, &paddr, &pgprot))
- return -EINVAL;
- rc = reserve_pfn_range(paddr, vma_size, &pgprot, 1);
- if (rc)
- return rc;
-
- /* Reservation for the destination VMA succeeded. */
- vm_flags_set(dst_vma, VM_PAT);
- *pfn = PHYS_PFN(paddr);
- return 0;
-}
-
-void untrack_pfn_copy(struct vm_area_struct *dst_vma, unsigned long pfn)
-{
- untrack_pfn(dst_vma, pfn, dst_vma->vm_end - dst_vma->vm_start, true);
- /*
- * Reservation was freed, any copied page tables will get cleaned
- * up later, but without getting PAT involved again.
- */
-}
-
-/*
- * prot is passed in as a parameter for the new mapping. If the vma has
- * a linear pfn mapping for the entire range, or no vma is provided,
- * reserve the entire pfn + size range with single reserve_pfn_range
- * call.
- */
-int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
- unsigned long pfn, unsigned long addr, unsigned long size)
+int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, pgprot_t *prot)
{
resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
enum page_cache_mode pcm;
- /* reserve the whole chunk starting from paddr */
- if (!vma || (addr == vma->vm_start
- && size == (vma->vm_end - vma->vm_start))) {
- int ret;
-
- ret = reserve_pfn_range(paddr, size, prot, 0);
- if (ret == 0 && vma)
- vm_flags_set(vma, VM_PAT);
- return ret;
- }
-
if (!pat_enabled())
return 0;
- /*
- * For anything smaller than the vma size we set prot based on the
- * lookup.
- */
pcm = lookup_memtype(paddr);
/* Check memtype for the remaining pages */
@@ -1060,70 +923,35 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
return -EINVAL;
}
- *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
- cachemode2protval(pcm));
-
+ pgprot_set_cachemode(prot, pcm);
return 0;
}
-void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
+int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot)
{
- enum page_cache_mode pcm;
-
- if (!pat_enabled())
- return;
+ const resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
- /* Set prot based on lookup */
- pcm = lookup_memtype(pfn_t_to_phys(pfn));
- *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
- cachemode2protval(pcm));
+ return reserve_pfn_range(paddr, size, prot);
}
-/*
- * untrack_pfn is called while unmapping a pfnmap for a region.
- * untrack can be called for a specific region indicated by pfn and size or
- * can be for the entire vma (in which case pfn, size are zero).
- */
-void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
- unsigned long size, bool mm_wr_locked)
+void pfnmap_untrack(unsigned long pfn, unsigned long size)
{
- resource_size_t paddr;
-
- if (vma && !(vma->vm_flags & VM_PAT))
- return;
+ const resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
- /* free the chunk starting from pfn or the whole chunk */
- paddr = (resource_size_t)pfn << PAGE_SHIFT;
- if (!paddr && !size) {
- if (get_pat_info(vma, &paddr, NULL))
- return;
- size = vma->vm_end - vma->vm_start;
- }
free_pfn_range(paddr, size);
- if (vma) {
- if (mm_wr_locked)
- vm_flags_clear(vma, VM_PAT);
- else
- __vm_flags_mod(vma, 0, VM_PAT);
- }
-}
-
-void untrack_pfn_clear(struct vm_area_struct *vma)
-{
- vm_flags_clear(vma, VM_PAT);
}
pgprot_t pgprot_writecombine(pgprot_t prot)
{
- return __pgprot(pgprot_val(prot) |
- cachemode2protval(_PAGE_CACHE_MODE_WC));
+ pgprot_set_cachemode(&prot, _PAGE_CACHE_MODE_WC);
+ return prot;
}
EXPORT_SYMBOL_GPL(pgprot_writecombine);
pgprot_t pgprot_writethrough(pgprot_t prot)
{
- return __pgprot(pgprot_val(prot) |
- cachemode2protval(_PAGE_CACHE_MODE_WT));
+ pgprot_set_cachemode(&prot, _PAGE_CACHE_MODE_WT);
+ return prot;
}
EXPORT_SYMBOL_GPL(pgprot_writethrough);
diff --git a/arch/x86/mm/pat/memtype_interval.c b/arch/x86/mm/pat/memtype_interval.c
index 645613d59942..e5844ed1311e 100644
--- a/arch/x86/mm/pat/memtype_interval.c
+++ b/arch/x86/mm/pat/memtype_interval.c
@@ -49,32 +49,6 @@ INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end,
static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED;
-enum {
- MEMTYPE_EXACT_MATCH = 0,
- MEMTYPE_END_MATCH = 1
-};
-
-static struct memtype *memtype_match(u64 start, u64 end, int match_type)
-{
- struct memtype *entry_match;
-
- entry_match = interval_iter_first(&memtype_rbroot, start, end-1);
-
- while (entry_match != NULL && entry_match->start < end) {
- if ((match_type == MEMTYPE_EXACT_MATCH) &&
- (entry_match->start == start) && (entry_match->end == end))
- return entry_match;
-
- if ((match_type == MEMTYPE_END_MATCH) &&
- (entry_match->start < start) && (entry_match->end == end))
- return entry_match;
-
- entry_match = interval_iter_next(entry_match, start, end-1);
- }
-
- return NULL; /* Returns NULL if there is no match */
-}
-
static int memtype_check_conflict(u64 start, u64 end,
enum page_cache_mode reqtype,
enum page_cache_mode *newtype)
@@ -130,35 +104,16 @@ int memtype_check_insert(struct memtype *entry_new, enum page_cache_mode *ret_ty
struct memtype *memtype_erase(u64 start, u64 end)
{
- struct memtype *entry_old;
-
- /*
- * Since the memtype_rbroot tree allows overlapping ranges,
- * memtype_erase() checks with EXACT_MATCH first, i.e. free
- * a whole node for the munmap case. If no such entry is found,
- * it then checks with END_MATCH, i.e. shrink the size of a node
- * from the end for the mremap case.
- */
- entry_old = memtype_match(start, end, MEMTYPE_EXACT_MATCH);
- if (!entry_old) {
- entry_old = memtype_match(start, end, MEMTYPE_END_MATCH);
- if (!entry_old)
- return ERR_PTR(-EINVAL);
+ struct memtype *entry = interval_iter_first(&memtype_rbroot, start, end - 1);
+
+ while (entry && entry->start < end) {
+ if (entry->start == start && entry->end == end) {
+ interval_remove(entry, &memtype_rbroot);
+ return entry;
+ }
+ entry = interval_iter_next(entry, start, end - 1);
}
-
- if (entry_old->start == start) {
- /* munmap: erase this node */
- interval_remove(entry_old, &memtype_rbroot);
- } else {
- /* mremap: update the end value of this node */
- interval_remove(entry_old, &memtype_rbroot);
- entry_old->end = start;
- interval_insert(entry_old, &memtype_rbroot);
-
- return NULL;
- }
-
- return entry_old;
+ return ERR_PTR(-EINVAL);
}
struct memtype *memtype_lookup(u64 addr)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index def3d9284254..46edc11726b7 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -889,7 +889,7 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
/* change init_mm */
set_pte_atomic(kpte, pte);
#ifdef CONFIG_X86_32
- if (!SHARED_KERNEL_PMD) {
+ {
struct page *page;
list_for_each_entry(page, &pgd_list, lru) {
@@ -1293,7 +1293,7 @@ static int collapse_pmd_page(pmd_t *pmd, unsigned long addr,
/* Queue the page table to be freed after TLB flush */
list_add(&page_ptdesc(pmd_page(old_pmd))->pt_list, pgtables);
- if (IS_ENABLED(CONFIG_X86_32) && !SHARED_KERNEL_PMD) {
+ if (IS_ENABLED(CONFIG_X86_32)) {
struct page *page;
/* Update all PGD tables to use the same large page */
@@ -2148,6 +2148,19 @@ static inline int cpa_clear_pages_array(struct page **pages, int numpages,
CPA_PAGES_ARRAY, pages);
}
+/*
+ * __set_memory_prot is an internal helper for callers that have been passed
+ * a pgprot_t value from upper layers and a reservation has already been taken.
+ * If you want to set the pgprot to a specific page protocol, use the
+ * set_memory_xx() functions.
+ */
+int __set_memory_prot(unsigned long addr, int numpages, pgprot_t prot)
+{
+ return change_page_attr_set_clr(&addr, numpages, prot,
+ __pgprot(~pgprot_val(prot)), 0, 0,
+ NULL);
+}
+
int _set_memory_uc(unsigned long addr, int numpages)
{
/*
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index a05fcddfc811..ddf248c3ee7d 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -10,6 +10,7 @@
#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
EXPORT_SYMBOL(physical_mask);
+SYM_PIC_ALIAS(physical_mask);
#endif
pgtable_t pte_alloc_one(struct mm_struct *mm)
@@ -68,12 +69,6 @@ static inline void pgd_list_del(pgd_t *pgd)
list_del(&ptdesc->pt_list);
}
-#define UNSHARED_PTRS_PER_PGD \
- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
-#define MAX_UNSHARED_PTRS_PER_PGD \
- MAX_T(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
-
-
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
{
virt_to_ptdesc(pgd)->pt_mm = mm;
@@ -86,29 +81,19 @@ struct mm_struct *pgd_page_get_mm(struct page *page)
static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
{
- /* If the pgd points to a shared pagetable level (either the
- ptes in non-PAE, or shared PMD in PAE), then just copy the
- references from swapper_pg_dir. */
- if (CONFIG_PGTABLE_LEVELS == 2 ||
- (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
- CONFIG_PGTABLE_LEVELS >= 4) {
+ /* PAE preallocates all its PMDs. No cloning needed. */
+ if (!IS_ENABLED(CONFIG_X86_PAE))
clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
KERNEL_PGD_PTRS);
- }
- /* list required to sync kernel mapping updates */
- if (!SHARED_KERNEL_PMD) {
- pgd_set_mm(pgd, mm);
- pgd_list_add(pgd);
- }
+ /* List used to sync kernel mapping updates */
+ pgd_set_mm(pgd, mm);
+ pgd_list_add(pgd);
}
static void pgd_dtor(pgd_t *pgd)
{
- if (SHARED_KERNEL_PMD)
- return;
-
spin_lock(&pgd_lock);
pgd_list_del(pgd);
spin_unlock(&pgd_lock);
@@ -132,15 +117,15 @@ static void pgd_dtor(pgd_t *pgd)
* processor notices the update. Since this is expensive, and
* all 4 top-level entries are used almost immediately in a
* new process's life, we just pre-populate them here.
- *
- * Also, if we're in a paravirt environment where the kernel pmd is
- * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
- * and initialize the kernel pmds here.
*/
-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
-#define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
+#define PREALLOCATED_PMDS PTRS_PER_PGD
/*
+ * "USER_PMDS" are the PMDs for the user copy of the page tables when
+ * PTI is enabled. They do not exist when PTI is disabled. Note that
+ * this is distinct from the user _portion_ of the kernel page tables
+ * which always exists.
+ *
* We allocate separate PMDs for the kernel part of the user page-table
* when PTI is enabled. We need them to map the per-process LDT into the
* user-space page-table.
@@ -169,7 +154,6 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
/* No need to prepopulate any pagetable entries in non-PAE modes. */
#define PREALLOCATED_PMDS 0
-#define MAX_PREALLOCATED_PMDS 0
#define PREALLOCATED_USER_PMDS 0
#define MAX_PREALLOCATED_USER_PMDS 0
#endif /* CONFIG_X86_PAE */
@@ -205,7 +189,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
if (!ptdesc)
failed = true;
- if (ptdesc && !pagetable_pmd_ctor(ptdesc)) {
+ if (ptdesc && !pagetable_pmd_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
ptdesc = NULL;
failed = true;
@@ -318,82 +302,28 @@ static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
{
}
#endif
-/*
- * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
- * assumes that pgd should be in one page.
- *
- * But kernel with PAE paging that is not running as a Xen domain
- * only needs to allocate 32 bytes for pgd instead of one page.
- */
-#ifdef CONFIG_X86_PAE
-
-#include <linux/slab.h>
-
-#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
-#define PGD_ALIGN 32
-
-static struct kmem_cache *pgd_cache;
-
-void __init pgtable_cache_init(void)
-{
- /*
- * When PAE kernel is running as a Xen domain, it does not use
- * shared kernel pmd. And this requires a whole page for pgd.
- */
- if (!SHARED_KERNEL_PMD)
- return;
-
- /*
- * when PAE kernel is not running as a Xen domain, it uses
- * shared kernel pmd. Shared kernel pmd does not require a whole
- * page for pgd. We are able to just allocate a 32-byte for pgd.
- * During boot time, we create a 32-byte slab for pgd table allocation.
- */
- pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
- SLAB_PANIC, NULL);
-}
static inline pgd_t *_pgd_alloc(struct mm_struct *mm)
{
/*
- * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
- * We allocate one page for pgd.
- */
- if (!SHARED_KERNEL_PMD)
- return __pgd_alloc(mm, PGD_ALLOCATION_ORDER);
-
- /*
- * Now PAE kernel is not running as a Xen domain. We can allocate
- * a 32-byte slab for pgd to save memory space.
+ * PTI and Xen need a whole page for the PAE PGD
+ * even though the hardware only needs 32 bytes.
+ *
+ * For simplicity, allocate a page for all users.
*/
- return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
-}
-
-static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
- if (!SHARED_KERNEL_PMD)
- __pgd_free(mm, pgd);
- else
- kmem_cache_free(pgd_cache, pgd);
-}
-#else
-
-static inline pgd_t *_pgd_alloc(struct mm_struct *mm)
-{
- return __pgd_alloc(mm, PGD_ALLOCATION_ORDER);
+ return __pgd_alloc(mm, pgd_allocation_order());
}
static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
__pgd_free(mm, pgd);
}
-#endif /* CONFIG_X86_PAE */
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
- pmd_t *pmds[MAX_PREALLOCATED_PMDS];
+ pmd_t *pmds[PREALLOCATED_PMDS];
pgd = _pgd_alloc(mm);
@@ -613,11 +543,11 @@ pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
#endif
/**
- * reserve_top_address - reserves a hole in the top of kernel address space
- * @reserve - size of hole to reserve
+ * reserve_top_address - Reserve a hole in the top of the kernel address space
+ * @reserve: Size of hole to reserve
*
* Can be used to relocate the fixmap area and poke a hole in the top
- * of kernel address space to make room for a hypervisor.
+ * of the kernel address space to make room for a hypervisor.
*/
void __init reserve_top_address(unsigned long reserve)
{
@@ -662,9 +592,12 @@ void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
}
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-#ifdef CONFIG_X86_5LEVEL
+#if CONFIG_PGTABLE_LEVELS > 4
/**
- * p4d_set_huge - setup kernel P4D mapping
+ * p4d_set_huge - Set up kernel P4D mapping
+ * @p4d: Pointer to the P4D entry
+ * @addr: Virtual address associated with the P4D entry
+ * @prot: Protection bits to use
*
* No 512GB pages yet -- always return 0
*/
@@ -674,9 +607,10 @@ int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
}
/**
- * p4d_clear_huge - clear kernel P4D mapping when it is set
+ * p4d_clear_huge - Clear kernel P4D mapping when it is set
+ * @p4d: Pointer to the P4D entry to clear
*
- * No 512GB pages yet -- always return 0
+ * No 512GB pages yet -- do nothing
*/
void p4d_clear_huge(p4d_t *p4d)
{
@@ -684,7 +618,10 @@ void p4d_clear_huge(p4d_t *p4d)
#endif
/**
- * pud_set_huge - setup kernel PUD mapping
+ * pud_set_huge - Set up kernel PUD mapping
+ * @pud: Pointer to the PUD entry
+ * @addr: Virtual address associated with the PUD entry
+ * @prot: Protection bits to use
*
* MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
* function sets up a huge page only if the complete range has the same MTRR
@@ -715,7 +652,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
}
/**
- * pmd_set_huge - setup kernel PMD mapping
+ * pmd_set_huge - Set up kernel PMD mapping
+ * @pmd: Pointer to the PMD entry
+ * @addr: Virtual address associated with the PMD entry
+ * @prot: Protection bits to use
*
* See text over pud_set_huge() above.
*
@@ -744,7 +684,8 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
}
/**
- * pud_clear_huge - clear kernel PUD mapping when it is set
+ * pud_clear_huge - Clear kernel PUD mapping when it is set
+ * @pud: Pointer to the PUD entry to clear.
*
* Returns 1 on success and 0 on failure (no PUD map is found).
*/
@@ -759,7 +700,8 @@ int pud_clear_huge(pud_t *pud)
}
/**
- * pmd_clear_huge - clear kernel PMD mapping when it is set
+ * pmd_clear_huge - Clear kernel PMD mapping when it is set
+ * @pmd: Pointer to the PMD entry to clear.
*
* Returns 1 on success and 0 on failure (no PMD map is found).
*/
@@ -775,11 +717,11 @@ int pmd_clear_huge(pmd_t *pmd)
#ifdef CONFIG_X86_64
/**
- * pud_free_pmd_page - Clear pud entry and free pmd page.
- * @pud: Pointer to a PUD.
- * @addr: Virtual address associated with pud.
+ * pud_free_pmd_page - Clear PUD entry and free PMD page
+ * @pud: Pointer to a PUD
+ * @addr: Virtual address associated with PUD
*
- * Context: The pud range has been unmapped and TLB purged.
+ * Context: The PUD range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
*
* NOTE: Callers must allow a single page allocation.
@@ -809,24 +751,23 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
for (i = 0; i < PTRS_PER_PMD; i++) {
if (!pmd_none(pmd_sv[i])) {
pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
- free_page((unsigned long)pte);
+ pte_free_kernel(&init_mm, pte);
}
}
free_page((unsigned long)pmd_sv);
- pagetable_dtor(virt_to_ptdesc(pmd));
- free_page((unsigned long)pmd);
+ pmd_free(&init_mm, pmd);
return 1;
}
/**
- * pmd_free_pte_page - Clear pmd entry and free pte page.
- * @pmd: Pointer to a PMD.
- * @addr: Virtual address associated with pmd.
+ * pmd_free_pte_page - Clear PMD entry and free PTE page.
+ * @pmd: Pointer to the PMD
+ * @addr: Virtual address associated with PMD
*
- * Context: The pmd range has been unmapped and TLB purged.
+ * Context: The PMD range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
*/
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
@@ -839,7 +780,7 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
/* INVLPG to clear all paging-structure caches */
flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
- free_page((unsigned long)pte);
+ pte_free_kernel(&init_mm, pte);
return 1;
}
@@ -848,7 +789,7 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
/*
* Disable free page handling on x86-PAE. This assures that ioremap()
- * does not update sync'd pmd entries. See vmalloc_sync_one().
+ * does not update sync'd PMD entries. See vmalloc_sync_one().
*/
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 5f0d579932c6..190299834011 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -185,7 +185,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
}
- BUILD_BUG_ON(pgd_leaf(*pgd) != 0);
+ BUILD_BUG_ON(pgd_leaf(*pgd));
return p4d_offset(pgd, address);
}
@@ -206,7 +206,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
if (!p4d)
return NULL;
- BUILD_BUG_ON(p4d_leaf(*p4d) != 0);
+ BUILD_BUG_ON(p4d_leaf(*p4d));
if (p4d_none(*p4d)) {
unsigned long new_pud_page = __get_free_page(gfp);
if (WARN_ON_ONCE(!new_pud_page))
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index e459d97ef397..39f80111e6f1 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -19,6 +19,7 @@
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/apic.h>
+#include <asm/msr.h>
#include <asm/perf_event.h>
#include <asm/tlb.h>
@@ -215,16 +216,20 @@ static void clear_asid_other(void)
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
+struct new_asid {
+ unsigned int asid : 16;
+ unsigned int need_flush : 1;
+};
-static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
- u16 *new_asid, bool *need_flush)
+static struct new_asid choose_new_asid(struct mm_struct *next, u64 next_tlb_gen)
{
+ struct new_asid ns;
u16 asid;
if (!static_cpu_has(X86_FEATURE_PCID)) {
- *new_asid = 0;
- *need_flush = true;
- return;
+ ns.asid = 0;
+ ns.need_flush = 1;
+ return ns;
}
/*
@@ -235,9 +240,9 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
u16 global_asid = mm_global_asid(next);
if (global_asid) {
- *new_asid = global_asid;
- *need_flush = false;
- return;
+ ns.asid = global_asid;
+ ns.need_flush = 0;
+ return ns;
}
}
@@ -249,22 +254,23 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
next->context.ctx_id)
continue;
- *new_asid = asid;
- *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) <
- next_tlb_gen);
- return;
+ ns.asid = asid;
+ ns.need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < next_tlb_gen);
+ return ns;
}
/*
* We don't currently own an ASID slot on this CPU.
* Allocate a slot.
*/
- *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1;
- if (*new_asid >= TLB_NR_DYN_ASIDS) {
- *new_asid = 0;
+ ns.asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1;
+ if (ns.asid >= TLB_NR_DYN_ASIDS) {
+ ns.asid = 0;
this_cpu_write(cpu_tlbstate.next_asid, 1);
}
- *need_flush = true;
+ ns.need_flush = true;
+
+ return ns;
}
/*
@@ -623,7 +629,7 @@ static void l1d_flush_evaluate(unsigned long prev_mm, unsigned long next_mm,
{
/* Flush L1D if the outgoing task requests it */
if (prev_mm & LAST_USER_MM_L1D_FLUSH)
- wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+ wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
/* Check whether the incoming task opted in for L1D flush */
if (likely(!(next_mm & LAST_USER_MM_L1D_FLUSH)))
@@ -667,9 +673,9 @@ static void cond_mitigation(struct task_struct *next)
prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
/*
- * Avoid user/user BTB poisoning by flushing the branch predictor
- * when switching between processes. This stops one process from
- * doing Spectre-v2 attacks on another.
+ * Avoid user->user BTB/RSB poisoning by flushing them when switching
+ * between processes. This stops one process from doing Spectre-v2
+ * attacks on another.
*
* Both, the conditional and the always IBPB mode use the mm
* pointer to avoid the IBPB when switching between tasks of the
@@ -781,9 +787,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy);
unsigned cpu = smp_processor_id();
unsigned long new_lam;
+ struct new_asid ns;
u64 next_tlb_gen;
- bool need_flush;
- u16 new_asid;
+
/* We don't want flush_tlb_func() to run concurrently with us. */
if (IS_ENABLED(CONFIG_PROVE_LOCKING))
@@ -847,14 +853,15 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
* mm_cpumask. The TLB shootdown code can figure out from
* cpu_tlbstate_shared.is_lazy whether or not to send an IPI.
*/
- if (IS_ENABLED(CONFIG_DEBUG_VM) && WARN_ON_ONCE(prev != &init_mm &&
+ if (IS_ENABLED(CONFIG_DEBUG_VM) &&
+ WARN_ON_ONCE(prev != &init_mm && !is_notrack_mm(prev) &&
!cpumask_test_cpu(cpu, mm_cpumask(next))))
cpumask_set_cpu(cpu, mm_cpumask(next));
/* Check if the current mm is transitioning to a global ASID */
if (mm_needs_global_asid(next, prev_asid)) {
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
- choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+ ns = choose_new_asid(next, next_tlb_gen);
goto reload_tlb;
}
@@ -889,8 +896,8 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
* TLB contents went out of date while we were in lazy
* mode. Fall through to the TLB switching code below.
*/
- new_asid = prev_asid;
- need_flush = true;
+ ns.asid = prev_asid;
+ ns.need_flush = true;
} else {
/*
* Apply process to process speculation vulnerability
@@ -899,40 +906,33 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
cond_mitigation(tsk);
/*
- * Let nmi_uaccess_okay() and finish_asid_transition()
- * know that CR3 is changing.
+ * Indicate that CR3 is about to change. nmi_uaccess_okay()
+ * and others are sensitive to the window where mm_cpumask(),
+ * CR3 and cpu_tlbstate.loaded_mm are not all in sync.
*/
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
barrier();
- /*
- * Leave this CPU in prev's mm_cpumask. Atomic writes to
- * mm_cpumask can be expensive under contention. The CPU
- * will be removed lazily at TLB flush time.
- */
- VM_WARN_ON_ONCE(prev != &init_mm && !cpumask_test_cpu(cpu,
- mm_cpumask(prev)));
-
/* Start receiving IPIs and then read tlb_gen (and LAM below) */
if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))
cpumask_set_cpu(cpu, mm_cpumask(next));
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
- choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+ ns = choose_new_asid(next, next_tlb_gen);
}
reload_tlb:
new_lam = mm_lam_cr3_mask(next);
- if (need_flush) {
- VM_WARN_ON_ONCE(is_global_asid(new_asid));
- this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
- this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
- load_new_mm_cr3(next->pgd, new_asid, new_lam, true);
+ if (ns.need_flush) {
+ VM_WARN_ON_ONCE(is_global_asid(ns.asid));
+ this_cpu_write(cpu_tlbstate.ctxs[ns.asid].ctx_id, next->context.ctx_id);
+ this_cpu_write(cpu_tlbstate.ctxs[ns.asid].tlb_gen, next_tlb_gen);
+ load_new_mm_cr3(next->pgd, ns.asid, new_lam, true);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
} else {
/* The new ASID is already up to date. */
- load_new_mm_cr3(next->pgd, new_asid, new_lam, false);
+ load_new_mm_cr3(next->pgd, ns.asid, new_lam, false);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
@@ -941,7 +941,7 @@ reload_tlb:
barrier();
this_cpu_write(cpu_tlbstate.loaded_mm, next);
- this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+ this_cpu_write(cpu_tlbstate.loaded_mm_asid, ns.asid);
cpu_tlbstate_update_lam(new_lam, mm_untag_mask(next));
if (next != prev) {
@@ -972,6 +972,77 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
}
/*
+ * Using a temporary mm allows to set temporary mappings that are not accessible
+ * by other CPUs. Such mappings are needed to perform sensitive memory writes
+ * that override the kernel memory protections (e.g., W^X), without exposing the
+ * temporary page-table mappings that are required for these write operations to
+ * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
+ * mapping is torn down. Temporary mms can also be used for EFI runtime service
+ * calls or similar functionality.
+ *
+ * It is illegal to schedule while using a temporary mm -- the context switch
+ * code is unaware of the temporary mm and does not know how to context switch.
+ * Use a real (non-temporary) mm in a kernel thread if you need to sleep.
+ *
+ * Note: For sensitive memory writes, the temporary mm needs to be used
+ * exclusively by a single core, and IRQs should be disabled while the
+ * temporary mm is loaded, thereby preventing interrupt handler bugs from
+ * overriding the kernel memory protection.
+ */
+struct mm_struct *use_temporary_mm(struct mm_struct *temp_mm)
+{
+ struct mm_struct *prev_mm;
+
+ lockdep_assert_preemption_disabled();
+ guard(irqsave)();
+
+ /*
+ * Make sure not to be in TLB lazy mode, as otherwise we'll end up
+ * with a stale address space WITHOUT being in lazy mode after
+ * restoring the previous mm.
+ */
+ if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
+ leave_mm();
+
+ prev_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+ switch_mm_irqs_off(NULL, temp_mm, current);
+
+ /*
+ * If breakpoints are enabled, disable them while the temporary mm is
+ * used. Userspace might set up watchpoints on addresses that are used
+ * in the temporary mm, which would lead to wrong signals being sent or
+ * crashes.
+ *
+ * Note that breakpoints are not disabled selectively, which also causes
+ * kernel breakpoints (e.g., perf's) to be disabled. This might be
+ * undesirable, but still seems reasonable as the code that runs in the
+ * temporary mm should be short.
+ */
+ if (hw_breakpoint_active())
+ hw_breakpoint_disable();
+
+ return prev_mm;
+}
+
+void unuse_temporary_mm(struct mm_struct *prev_mm)
+{
+ lockdep_assert_preemption_disabled();
+ guard(irqsave)();
+
+ /* Clear the cpumask, to indicate no TLB flushing is needed anywhere */
+ cpumask_clear_cpu(smp_processor_id(), mm_cpumask(this_cpu_read(cpu_tlbstate.loaded_mm)));
+
+ switch_mm_irqs_off(NULL, prev_mm, current);
+
+ /*
+ * Restore the breakpoints if they were disabled before the temporary mm
+ * was loaded.
+ */
+ if (hw_breakpoint_active())
+ hw_breakpoint_restore();
+}
+
+/*
* Call this when reinitializing a CPU. It fixes the following potential
* problems:
*
@@ -1204,8 +1275,16 @@ done:
static bool should_flush_tlb(int cpu, void *data)
{
+ struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu);
struct flush_tlb_info *info = data;
+ /*
+ * Order the 'loaded_mm' and 'is_lazy' against their
+ * write ordering in switch_mm_irqs_off(). Ensure
+ * 'is_lazy' is at least as new as 'loaded_mm'.
+ */
+ smp_rmb();
+
/* Lazy TLB will get flushed at the next context switch. */
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
return false;
@@ -1214,8 +1293,15 @@ static bool should_flush_tlb(int cpu, void *data)
if (!info->mm)
return true;
+ /*
+ * While switching, the remote CPU could have state from
+ * either the prev or next mm. Assume the worst and flush.
+ */
+ if (loaded_mm == LOADED_MM_SWITCHING)
+ return true;
+
/* The target mm is loaded, and the CPU is not lazy. */
- if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
+ if (loaded_mm == info->mm)
return true;
/* In cpumask, but not the loaded mm? Periodically remove by flushing. */
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 9e5fe2ba858f..15672cb926fc 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -41,6 +41,8 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
+#define EMIT5(b1, b2, b3, b4, b5) \
+ do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0)
#define EMIT1_off32(b1, off) \
do { EMIT1(b1); EMIT(off, 4); } while (0)
@@ -629,7 +631,7 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
goto out;
ret = 1;
if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
- text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
+ smp_text_poke_single(ip, new_insn, X86_PATCH_SIZE, NULL);
ret = 0;
}
out:
@@ -661,7 +663,10 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
{
u8 *prog = *pprog;
- if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
+ if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
+ OPTIMIZER_HIDE_VAR(reg);
+ emit_jump(&prog, its_static_thunk(reg), ip);
+ } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
EMIT_LFENCE();
EMIT2(0xFF, 0xE0 + reg);
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
@@ -683,7 +688,7 @@ static void emit_return(u8 **pprog, u8 *ip)
{
u8 *prog = *pprog;
- if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
+ if (cpu_wants_rethunk()) {
emit_jump(&prog, x86_return_thunk, ip);
} else {
EMIT1(0xC3); /* ret */
@@ -1502,6 +1507,48 @@ static void emit_priv_frame_ptr(u8 **pprog, void __percpu *priv_frame_ptr)
#define PRIV_STACK_GUARD_SZ 8
#define PRIV_STACK_GUARD_VAL 0xEB9F12345678eb9fULL
+static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip,
+ struct bpf_prog *bpf_prog)
+{
+ u8 *prog = *pprog;
+ u8 *func;
+
+ if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) {
+ /* The clearing sequence clobbers eax and ecx. */
+ EMIT1(0x50); /* push rax */
+ EMIT1(0x51); /* push rcx */
+ ip += 2;
+
+ func = (u8 *)clear_bhb_loop;
+ ip += x86_call_depth_emit_accounting(&prog, func, ip);
+
+ if (emit_call(&prog, func, ip))
+ return -EINVAL;
+ EMIT1(0x59); /* pop rcx */
+ EMIT1(0x58); /* pop rax */
+ }
+ /* Insert IBHF instruction */
+ if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) &&
+ cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) ||
+ cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) {
+ /*
+ * Add an Indirect Branch History Fence (IBHF). IBHF acts as a
+ * fence preventing branch history from before the fence from
+ * affecting indirect branches after the fence. This is
+ * specifically used in cBPF jitted code to prevent Intra-mode
+ * BHI attacks. The IBHF instruction is designed to be a NOP on
+ * hardware that doesn't need or support it. The REP and REX.W
+ * prefixes are required by the microcode, and they also ensure
+ * that the NOP is unlikely to be used in existing code.
+ *
+ * IBHF is not a valid instruction in 32-bit mode.
+ */
+ EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */
+ }
+ *pprog = prog;
+ return 0;
+}
+
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
int oldproglen, struct jit_context *ctx, bool jmp_padding)
{
@@ -2544,6 +2591,13 @@ emit_jmp:
seen_exit = true;
/* Update cleanup_addr */
ctx->cleanup_addr = proglen;
+ if (bpf_prog_was_classic(bpf_prog) &&
+ !capable(CAP_SYS_ADMIN)) {
+ u8 *ip = image + addrs[i - 1];
+
+ if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
+ return -EINVAL;
+ }
if (bpf_prog->aux->exception_boundary) {
pop_callee_regs(&prog, all_callee_regs_used);
pop_r12(&prog);
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index 4933fb337983..c1efd5b0d198 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -8,13 +8,13 @@ obj-$(CONFIG_PCI_OLPC) += olpc.o
obj-$(CONFIG_PCI_XEN) += xen.o
obj-y += fixup.o
-obj-$(CONFIG_X86_INTEL_CE) += ce4100.o
obj-$(CONFIG_ACPI) += acpi.o
obj-y += legacy.o irq.o
-obj-$(CONFIG_X86_NUMACHIP) += numachip.o
+obj-$(CONFIG_X86_INTEL_CE) += ce4100.o
+obj-$(CONFIG_X86_INTEL_MID) += intel_mid.o
-obj-$(CONFIG_X86_INTEL_MID) += intel_mid_pci.o
+obj-$(CONFIG_X86_NUMACHIP) += numachip.o
obj-y += common.o early.o
obj-y += bus_numa.o
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 631512f7ec85..99b1727136c1 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -5,7 +5,7 @@
#include <linux/cpu.h>
#include <linux/range.h>
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
#include <asm/pci_x86.h>
#include <asm/pci-direct.h>
@@ -202,7 +202,7 @@ static int __init early_root_info_init(void)
/* need to take out [0, TOM) for RAM*/
address = MSR_K8_TOP_MEM1;
- rdmsrl(address, val);
+ rdmsrq(address, val);
end = (val & 0xffffff800000ULL);
printk(KERN_INFO "TOM: %016llx aka %lldM\n", end, end>>20);
if (end < (1ULL<<32))
@@ -293,12 +293,12 @@ static int __init early_root_info_init(void)
/* need to take out [4G, TOM2) for RAM*/
/* SYS_CFG */
address = MSR_AMD64_SYSCFG;
- rdmsrl(address, val);
+ rdmsrq(address, val);
/* TOP_MEM2 is enabled? */
if (val & (1<<21)) {
/* TOP_MEM2 */
address = MSR_K8_TOP_MEM2;
- rdmsrl(address, val);
+ rdmsrq(address, val);
end = (val & 0xffffff800000ULL);
printk(KERN_INFO "TOM2: %016llx aka %lldM\n", end, end>>20);
subtract_range(range, RANGE_NUM, 1ULL<<32, end);
@@ -341,10 +341,10 @@ static int amd_bus_cpu_online(unsigned int cpu)
{
u64 reg;
- rdmsrl(MSR_AMD64_NB_CFG, reg);
+ rdmsrq(MSR_AMD64_NB_CFG, reg);
if (!(reg & ENABLE_CF8_EXT_CFG)) {
reg |= ENABLE_CF8_EXT_CFG;
- wrmsrl(MSR_AMD64_NB_CFG, reg);
+ wrmsrq(MSR_AMD64_NB_CFG, reg);
}
return 0;
}
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index efefeb82ab61..e7e71490bd25 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -9,7 +9,7 @@
#include <linux/pci.h>
#include <linux/suspend.h>
#include <linux/vgaarb.h>
-#include <asm/amd_node.h>
+#include <asm/amd/node.h>
#include <asm/hpet.h>
#include <asm/pci_x86.h>
@@ -970,13 +970,13 @@ static void amd_rp_pme_suspend(struct pci_dev *dev)
struct pci_dev *rp;
/*
- * PM_SUSPEND_ON means we're doing runtime suspend, which means
+ * If system suspend is not in progress, we're doing runtime suspend, so
* amd-pmc will not be involved so PMEs during D3 work as advertised.
*
* The PMEs *do* work if amd-pmc doesn't put the SoC in the hardware
* sleep state, but we assume amd-pmc is always present.
*/
- if (pm_suspend_target_state == PM_SUSPEND_ON)
+ if (!pm_suspend_in_progress())
return;
rp = pcie_find_root_port(dev);
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid.c
index b433b1753016..b433b1753016 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid.c
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 39255f0eb14d..1f4522325920 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -22,9 +22,10 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
+#include <asm/acpi.h>
#include <asm/e820/api.h>
+#include <asm/msr.h>
#include <asm/pci_x86.h>
-#include <asm/acpi.h>
/* Indicate if the ECAM resources have been placed into the resource table */
static bool pci_mmcfg_running_state;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index ac57259a432b..e7e8f77f77f8 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -73,7 +73,7 @@ int __init efi_alloc_page_tables(void)
gfp_t gfp_mask;
gfp_mask = GFP_KERNEL | __GFP_ZERO;
- efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
+ efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, pgd_allocation_order());
if (!efi_pgd)
goto fail;
@@ -89,6 +89,7 @@ int __init efi_alloc_page_tables(void)
efi_mm.pgd = efi_pgd;
mm_init_cpumask(&efi_mm);
init_new_context(NULL, &efi_mm);
+ set_notrack_mm(&efi_mm);
return 0;
@@ -96,7 +97,7 @@ free_p4d:
if (pgtable_l5_enabled())
free_page((unsigned long)pgd_page_vaddr(*pgd));
free_pgd:
- free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
+ free_pages((unsigned long)efi_pgd, pgd_allocation_order());
fail:
return -ENOMEM;
}
@@ -434,15 +435,12 @@ void __init efi_dump_pagetable(void)
*/
static void efi_enter_mm(void)
{
- efi_prev_mm = current->active_mm;
- current->active_mm = &efi_mm;
- switch_mm(efi_prev_mm, &efi_mm, NULL);
+ efi_prev_mm = use_temporary_mm(&efi_mm);
}
static void efi_leave_mm(void)
{
- current->active_mm = efi_prev_mm;
- switch_mm(&efi_mm, efi_prev_mm, NULL);
+ unuse_temporary_mm(efi_prev_mm);
}
void arch_efi_call_virt_setup(void)
diff --git a/arch/x86/platform/olpc/olpc-xo1-rtc.c b/arch/x86/platform/olpc/olpc-xo1-rtc.c
index 57f210cda761..ee77d57bcab7 100644
--- a/arch/x86/platform/olpc/olpc-xo1-rtc.c
+++ b/arch/x86/platform/olpc/olpc-xo1-rtc.c
@@ -64,9 +64,9 @@ static int __init xo1_rtc_init(void)
of_node_put(node);
pr_info("olpc-xo1-rtc: Initializing OLPC XO-1 RTC\n");
- rdmsrl(MSR_RTC_DOMA_OFFSET, rtc_info.rtc_day_alarm);
- rdmsrl(MSR_RTC_MONA_OFFSET, rtc_info.rtc_mon_alarm);
- rdmsrl(MSR_RTC_CEN_OFFSET, rtc_info.rtc_century);
+ rdmsrq(MSR_RTC_DOMA_OFFSET, rtc_info.rtc_day_alarm);
+ rdmsrq(MSR_RTC_MONA_OFFSET, rtc_info.rtc_mon_alarm);
+ rdmsrq(MSR_RTC_CEN_OFFSET, rtc_info.rtc_century);
r = platform_device_register(&xo1_rtc_device);
if (r)
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index 63066e7c8517..30751b42d54e 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -325,7 +325,7 @@ static int setup_sci_interrupt(struct platform_device *pdev)
dev_info(&pdev->dev, "SCI unmapped. Mapping to IRQ 3\n");
sci_irq = 3;
lo |= 0x00300000;
- wrmsrl(0x51400020, lo);
+ wrmsrq(0x51400020, lo);
}
/* Select level triggered in PIC */
diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index cfa18ec7d55f..1d78e5631bb8 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -87,8 +87,7 @@ SYM_CODE_START(pvh_start_xen)
mov %ebx, %esi
movl rva(pvh_start_info_sz)(%ebp), %ecx
shr $2,%ecx
- rep
- movsl
+ rep movsl
leal rva(early_stack_end)(%ebp), %esp
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 08e76a5ca155..916441f5e85c 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -27,6 +27,7 @@
#include <asm/mmu_context.h>
#include <asm/cpu_device_id.h>
#include <asm/microcode.h>
+#include <asm/msr.h>
#include <asm/fred.h>
#ifdef CONFIG_X86_32
@@ -44,7 +45,7 @@ static void msr_save_context(struct saved_context *ctxt)
while (msr < end) {
if (msr->valid)
- rdmsrl(msr->info.msr_no, msr->info.reg.q);
+ rdmsrq(msr->info.msr_no, msr->info.reg.q);
msr++;
}
}
@@ -56,7 +57,7 @@ static void msr_restore_context(struct saved_context *ctxt)
while (msr < end) {
if (msr->valid)
- wrmsrl(msr->info.msr_no, msr->info.reg.q);
+ wrmsrq(msr->info.msr_no, msr->info.reg.q);
msr++;
}
}
@@ -110,12 +111,12 @@ static void __save_processor_state(struct saved_context *ctxt)
savesegment(ds, ctxt->ds);
savesegment(es, ctxt->es);
- rdmsrl(MSR_FS_BASE, ctxt->fs_base);
- rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
- rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
+ rdmsrq(MSR_FS_BASE, ctxt->fs_base);
+ rdmsrq(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+ rdmsrq(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
mtrr_save_fixed_ranges(NULL);
- rdmsrl(MSR_EFER, ctxt->efer);
+ rdmsrq(MSR_EFER, ctxt->efer);
#endif
/*
@@ -125,7 +126,7 @@ static void __save_processor_state(struct saved_context *ctxt)
ctxt->cr2 = read_cr2();
ctxt->cr3 = __read_cr3();
ctxt->cr4 = __read_cr4();
- ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
+ ctxt->misc_enable_saved = !rdmsrq_safe(MSR_IA32_MISC_ENABLE,
&ctxt->misc_enable);
msr_save_context(ctxt);
}
@@ -198,7 +199,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
struct cpuinfo_x86 *c;
if (ctxt->misc_enable_saved)
- wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
+ wrmsrq(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
/*
* control registers
*/
@@ -208,7 +209,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
__write_cr4(ctxt->cr4);
#else
/* CONFIG X86_64 */
- wrmsrl(MSR_EFER, ctxt->efer);
+ wrmsrq(MSR_EFER, ctxt->efer);
__write_cr4(ctxt->cr4);
#endif
write_cr3(ctxt->cr3);
@@ -231,7 +232,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
* handlers or in complicated helpers like load_gs_index().
*/
#ifdef CONFIG_X86_64
- wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+ wrmsrq(MSR_GS_BASE, ctxt->kernelmode_gs_base);
/*
* Reinitialize FRED to ensure the FRED MSRs contain the same values
@@ -267,8 +268,8 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
* restoring the selectors clobbers the bases. Keep in mind
* that MSR_KERNEL_GS_BASE is horribly misnamed.
*/
- wrmsrl(MSR_FS_BASE, ctxt->fs_base);
- wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
+ wrmsrq(MSR_FS_BASE, ctxt->fs_base);
+ wrmsrq(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
#else
loadsegment(gs, ctxt->gs);
#endif
@@ -414,7 +415,7 @@ static int msr_build_context(const u32 *msr_id, const int num)
u64 dummy;
msr_array[i].info.msr_no = msr_id[j];
- msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy);
+ msr_array[i].valid = !rdmsrq_safe(msr_id[j], &dummy);
msr_array[i].info.reg.q = 0;
}
saved_msrs->num = total_num;
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index 5b81d19cd114..a7c23f2a58c9 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -42,6 +42,7 @@ unsigned long relocated_restore_code __visible;
/**
* pfn_is_nosave - check if given pfn is in the 'nosave' section
+ * @pfn: the page frame number to check.
*/
int pfn_is_nosave(unsigned long pfn)
{
@@ -86,7 +87,10 @@ static inline u32 compute_e820_crc32(struct e820_table *table)
/**
* arch_hibernation_header_save - populate the architecture specific part
* of a hibernation image header
- * @addr: address to save the data at
+ * @addr: address where architecture specific header data will be saved.
+ * @max_size: maximum size of architecture specific data in hibernation header.
+ *
+ * Return: 0 on success, -EOVERFLOW if max_size is insufficient.
*/
int arch_hibernation_header_save(void *addr, unsigned int max_size)
{
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index 5606a15cf9a1..fb910d9f8471 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -69,8 +69,7 @@ copy_loop:
movl pbe_orig_address(%edx), %edi
movl $(PAGE_SIZE >> 2), %ecx
- rep
- movsl
+ rep movsl
movl pbe_next(%edx), %edx
jmp copy_loop
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 8c534c36adfa..c73be0a02a6c 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -26,7 +26,7 @@
/* code below belongs to the image kernel */
.align PAGE_SIZE
SYM_FUNC_START(restore_registers)
- ANNOTATE_NOENDBR
+ ENDBR
/* go back to the original page tables */
movq %r9, %cr3
@@ -120,7 +120,7 @@ SYM_FUNC_END(restore_image)
/* code below has been relocated to a safe page */
SYM_FUNC_START(core_restore_code)
- ANNOTATE_NOENDBR
+ ENDBR
/* switch to temporary page tables */
movq %rax, %cr3
/* flush TLB */
@@ -138,8 +138,7 @@ SYM_FUNC_START(core_restore_code)
movq pbe_address(%rdx), %rsi
movq pbe_orig_address(%rdx), %rdi
movq $(PAGE_SIZE >> 3), %rcx
- rep
- movsq
+ rep movsq
/* progress to the next pbe */
movq pbe_next(%rdx), %rdx
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index f9bc444a3064..88be32026768 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -9,6 +9,7 @@
#include <asm/realmode.h>
#include <asm/tlbflush.h>
#include <asm/crash.h>
+#include <asm/msr.h>
#include <asm/sev.h>
struct real_mode_header *real_mode_header;
@@ -65,6 +66,8 @@ void __init reserve_real_mode(void)
* setup_arch().
*/
memblock_reserve(0, SZ_1M);
+
+ memblock_clear_kho_scratch(0, SZ_1M);
}
static void __init sme_sev_setup_real_mode(struct trampoline_header *th)
@@ -145,7 +148,7 @@ static void __init setup_real_mode(void)
* Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
* so we need to mask it out.
*/
- rdmsrl(MSR_EFER, efer);
+ rdmsrq(MSR_EFER, efer);
trampoline_header->efer = efer & ~EFER_LMA;
trampoline_header->start = (u64) secondary_startup_64;
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index 5770c8097f32..2c19d7fc8a85 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -64,6 +64,8 @@ BEGIN {
modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
force64_expr = "\\([df]64\\)"
+ invalid64_expr = "\\(i64\\)"
+ only64_expr = "\\(o64\\)"
rex_expr = "^((REX(\\.[XRWB]+)+)|(REX$))"
rex2_expr = "\\(REX2\\)"
no_rex2_expr = "\\(!REX2\\)"
@@ -319,6 +321,11 @@ function convert_operands(count,opnd, i,j,imm,mod)
if (match(ext, force64_expr))
flags = add_flags(flags, "INAT_FORCE64")
+ # check invalid in 64-bit (and no only64)
+ if (match(ext, invalid64_expr) &&
+ !match($0, only64_expr))
+ flags = add_flags(flags, "INAT_INV64")
+
# check REX2 not allowed
if (match(ext, no_rex2_expr))
flags = add_flags(flags, "INAT_NO_REX2")
diff --git a/arch/x86/um/shared/sysdep/faultinfo_32.h b/arch/x86/um/shared/sysdep/faultinfo_32.h
index ab5c8e47049c..9193a7790a71 100644
--- a/arch/x86/um/shared/sysdep/faultinfo_32.h
+++ b/arch/x86/um/shared/sysdep/faultinfo_32.h
@@ -31,8 +31,8 @@ struct faultinfo {
#define ___backtrack_faulted(_faulted) \
asm volatile ( \
- "mov $0, %0\n" \
"movl $__get_kernel_nofault_faulted_%=,%1\n" \
+ "mov $0, %0\n" \
"jmp _end_%=\n" \
"__get_kernel_nofault_faulted_%=:\n" \
"mov $1, %0;" \
diff --git a/arch/x86/um/shared/sysdep/faultinfo_64.h b/arch/x86/um/shared/sysdep/faultinfo_64.h
index 26fb4835d3e9..61e4ca1e0ab5 100644
--- a/arch/x86/um/shared/sysdep/faultinfo_64.h
+++ b/arch/x86/um/shared/sysdep/faultinfo_64.h
@@ -31,8 +31,8 @@ struct faultinfo {
#define ___backtrack_faulted(_faulted) \
asm volatile ( \
- "mov $0, %0\n" \
"movq $__get_kernel_nofault_faulted_%=,%1\n" \
+ "mov $0, %0\n" \
"jmp _end_%=\n" \
"__get_kernel_nofault_faulted_%=:\n" \
"mov $1, %0;" \
diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
index fc473ca12c44..942372e69b4d 100644
--- a/arch/x86/virt/svm/sev.c
+++ b/arch/x86/virt/svm/sev.c
@@ -27,9 +27,10 @@
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/apic.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/cmdline.h>
#include <asm/iommu.h>
+#include <asm/msr.h>
/*
* The RMP entry information as returned by the RMPREAD instruction.
@@ -136,11 +137,11 @@ static int __mfd_enable(unsigned int cpu)
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
- rdmsrl(MSR_AMD64_SYSCFG, val);
+ rdmsrq(MSR_AMD64_SYSCFG, val);
val |= MSR_AMD64_SYSCFG_MFDM;
- wrmsrl(MSR_AMD64_SYSCFG, val);
+ wrmsrq(MSR_AMD64_SYSCFG, val);
return 0;
}
@@ -157,12 +158,12 @@ static int __snp_enable(unsigned int cpu)
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
- rdmsrl(MSR_AMD64_SYSCFG, val);
+ rdmsrq(MSR_AMD64_SYSCFG, val);
val |= MSR_AMD64_SYSCFG_SNP_EN;
val |= MSR_AMD64_SYSCFG_SNP_VMPL_EN;
- wrmsrl(MSR_AMD64_SYSCFG, val);
+ wrmsrq(MSR_AMD64_SYSCFG, val);
return 0;
}
@@ -522,7 +523,7 @@ int __init snp_rmptable_init(void)
* Check if SEV-SNP is already enabled, this can happen in case of
* kexec boot.
*/
- rdmsrl(MSR_AMD64_SYSCFG, val);
+ rdmsrq(MSR_AMD64_SYSCFG, val);
if (val & MSR_AMD64_SYSCFG_SNP_EN)
goto skip_enable;
@@ -576,8 +577,8 @@ static bool probe_contiguous_rmptable_info(void)
{
u64 rmp_sz, rmp_base, rmp_end;
- rdmsrl(MSR_AMD64_RMP_BASE, rmp_base);
- rdmsrl(MSR_AMD64_RMP_END, rmp_end);
+ rdmsrq(MSR_AMD64_RMP_BASE, rmp_base);
+ rdmsrq(MSR_AMD64_RMP_END, rmp_end);
if (!(rmp_base & RMP_ADDR_MASK) || !(rmp_end & RMP_ADDR_MASK)) {
pr_err("Memory for the RMP table has not been reserved by BIOS\n");
@@ -610,13 +611,13 @@ static bool probe_segmented_rmptable_info(void)
unsigned int eax, ebx, segment_shift, segment_shift_min, segment_shift_max;
u64 rmp_base, rmp_end;
- rdmsrl(MSR_AMD64_RMP_BASE, rmp_base);
+ rdmsrq(MSR_AMD64_RMP_BASE, rmp_base);
if (!(rmp_base & RMP_ADDR_MASK)) {
pr_err("Memory for the RMP table has not been reserved by BIOS\n");
return false;
}
- rdmsrl(MSR_AMD64_RMP_END, rmp_end);
+ rdmsrq(MSR_AMD64_RMP_END, rmp_end);
WARN_ONCE(rmp_end & RMP_ADDR_MASK,
"Segmented RMP enabled but RMP_END MSR is non-zero\n");
@@ -652,7 +653,7 @@ static bool probe_segmented_rmptable_info(void)
bool snp_probe_rmptable_info(void)
{
if (cpu_feature_enabled(X86_FEATURE_SEGMENTED_RMP))
- rdmsrl(MSR_AMD64_RMP_CFG, rmp_cfg);
+ rdmsrq(MSR_AMD64_RMP_CFG, rmp_cfg);
if (rmp_cfg & MSR_AMD64_SEG_RMP_ENABLED)
return probe_segmented_rmptable_info();
diff --git a/arch/x86/virt/vmx/tdx/seamcall.S b/arch/x86/virt/vmx/tdx/seamcall.S
index 5b1f2286aea9..6854c52c374b 100644
--- a/arch/x86/virt/vmx/tdx/seamcall.S
+++ b/arch/x86/virt/vmx/tdx/seamcall.S
@@ -41,6 +41,9 @@ SYM_FUNC_START(__seamcall_ret)
TDX_MODULE_CALL host=1 ret=1
SYM_FUNC_END(__seamcall_ret)
+/* KVM requires non-instrumentable __seamcall_saved_ret() for TDH.VP.ENTER */
+.section .noinstr.text, "ax"
+
/*
* __seamcall_saved_ret() - Host-side interface functions to SEAM software
* (the P-SEAMLDR or the TDX module), with saving output registers to the
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index 7fdb37387886..2457d13c3f9e 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -5,6 +5,7 @@
* Intel Trusted Domain Extensions (TDX) support
*/
+#include "asm/page_types.h"
#define pr_fmt(fmt) "virt/tdx: " fmt
#include <linux/types.h>
@@ -27,6 +28,7 @@
#include <linux/log2.h>
#include <linux/acpi.h>
#include <linux/suspend.h>
+#include <linux/idr.h>
#include <asm/page.h>
#include <asm/special_insns.h>
#include <asm/msr-index.h>
@@ -42,6 +44,8 @@ static u32 tdx_global_keyid __ro_after_init;
static u32 tdx_guest_keyid_start __ro_after_init;
static u32 tdx_nr_guest_keyids __ro_after_init;
+static DEFINE_IDA(tdx_guest_keyid_pool);
+
static DEFINE_PER_CPU(bool, tdx_lp_initialized);
static struct tdmr_info_list tdx_tdmr_list;
@@ -52,6 +56,8 @@ static DEFINE_MUTEX(tdx_module_lock);
/* All TDX-usable memory regions. Protected by mem_hotplug_lock. */
static LIST_HEAD(tdx_memlist);
+static struct tdx_sys_info tdx_sysinfo;
+
typedef void (*sc_err_func_t)(u64 fn, u64 err, struct tdx_module_args *args);
static inline void seamcall_err(u64 fn, u64 err, struct tdx_module_args *args)
@@ -1060,15 +1066,14 @@ static int init_tdmrs(struct tdmr_info_list *tdmr_list)
static int init_tdx_module(void)
{
- struct tdx_sys_info sysinfo;
int ret;
- ret = get_tdx_sys_info(&sysinfo);
+ ret = get_tdx_sys_info(&tdx_sysinfo);
if (ret)
return ret;
/* Check whether the kernel can support this module */
- ret = check_features(&sysinfo);
+ ret = check_features(&tdx_sysinfo);
if (ret)
return ret;
@@ -1089,12 +1094,12 @@ static int init_tdx_module(void)
goto out_put_tdxmem;
/* Allocate enough space for constructing TDMRs */
- ret = alloc_tdmr_list(&tdx_tdmr_list, &sysinfo.tdmr);
+ ret = alloc_tdmr_list(&tdx_tdmr_list, &tdx_sysinfo.tdmr);
if (ret)
goto err_free_tdxmem;
/* Cover all TDX-usable memory regions in TDMRs */
- ret = construct_tdmrs(&tdx_memlist, &tdx_tdmr_list, &sysinfo.tdmr);
+ ret = construct_tdmrs(&tdx_memlist, &tdx_tdmr_list, &tdx_sysinfo.tdmr);
if (ret)
goto err_free_tdmrs;
@@ -1456,3 +1461,411 @@ void __init tdx_init(void)
check_tdx_erratum();
}
+
+const struct tdx_sys_info *tdx_get_sysinfo(void)
+{
+ const struct tdx_sys_info *p = NULL;
+
+ /* Make sure all fields in @tdx_sysinfo have been populated */
+ mutex_lock(&tdx_module_lock);
+ if (tdx_module_status == TDX_MODULE_INITIALIZED)
+ p = (const struct tdx_sys_info *)&tdx_sysinfo;
+ mutex_unlock(&tdx_module_lock);
+
+ return p;
+}
+EXPORT_SYMBOL_GPL(tdx_get_sysinfo);
+
+u32 tdx_get_nr_guest_keyids(void)
+{
+ return tdx_nr_guest_keyids;
+}
+EXPORT_SYMBOL_GPL(tdx_get_nr_guest_keyids);
+
+int tdx_guest_keyid_alloc(void)
+{
+ return ida_alloc_range(&tdx_guest_keyid_pool, tdx_guest_keyid_start,
+ tdx_guest_keyid_start + tdx_nr_guest_keyids - 1,
+ GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(tdx_guest_keyid_alloc);
+
+void tdx_guest_keyid_free(unsigned int keyid)
+{
+ ida_free(&tdx_guest_keyid_pool, keyid);
+}
+EXPORT_SYMBOL_GPL(tdx_guest_keyid_free);
+
+static inline u64 tdx_tdr_pa(struct tdx_td *td)
+{
+ return page_to_phys(td->tdr_page);
+}
+
+static inline u64 tdx_tdvpr_pa(struct tdx_vp *td)
+{
+ return page_to_phys(td->tdvpr_page);
+}
+
+/*
+ * The TDX module exposes a CLFLUSH_BEFORE_ALLOC bit to specify whether
+ * a CLFLUSH of pages is required before handing them to the TDX module.
+ * Be conservative and make the code simpler by doing the CLFLUSH
+ * unconditionally.
+ */
+static void tdx_clflush_page(struct page *page)
+{
+ clflush_cache_range(page_to_virt(page), PAGE_SIZE);
+}
+
+noinstr __flatten u64 tdh_vp_enter(struct tdx_vp *td, struct tdx_module_args *args)
+{
+ args->rcx = tdx_tdvpr_pa(td);
+
+ return __seamcall_saved_ret(TDH_VP_ENTER, args);
+}
+EXPORT_SYMBOL_GPL(tdh_vp_enter);
+
+u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page)
+{
+ struct tdx_module_args args = {
+ .rcx = page_to_phys(tdcs_page),
+ .rdx = tdx_tdr_pa(td),
+ };
+
+ tdx_clflush_page(tdcs_page);
+ return seamcall(TDH_MNG_ADDCX, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mng_addcx);
+
+u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page *source, u64 *ext_err1, u64 *ext_err2)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa,
+ .rdx = tdx_tdr_pa(td),
+ .r8 = page_to_phys(page),
+ .r9 = page_to_phys(source),
+ };
+ u64 ret;
+
+ tdx_clflush_page(page);
+ ret = seamcall_ret(TDH_MEM_PAGE_ADD, &args);
+
+ *ext_err1 = args.rcx;
+ *ext_err2 = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mem_page_add);
+
+u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdx_tdr_pa(td),
+ .r8 = page_to_phys(page),
+ };
+ u64 ret;
+
+ tdx_clflush_page(page);
+ ret = seamcall_ret(TDH_MEM_SEPT_ADD, &args);
+
+ *ext_err1 = args.rcx;
+ *ext_err2 = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mem_sept_add);
+
+u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page)
+{
+ struct tdx_module_args args = {
+ .rcx = page_to_phys(tdcx_page),
+ .rdx = tdx_tdvpr_pa(vp),
+ };
+
+ tdx_clflush_page(tdcx_page);
+ return seamcall(TDH_VP_ADDCX, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_vp_addcx);
+
+u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdx_tdr_pa(td),
+ .r8 = page_to_phys(page),
+ };
+ u64 ret;
+
+ tdx_clflush_page(page);
+ ret = seamcall_ret(TDH_MEM_PAGE_AUG, &args);
+
+ *ext_err1 = args.rcx;
+ *ext_err2 = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mem_page_aug);
+
+u64 tdh_mem_range_block(struct tdx_td *td, u64 gpa, int level, u64 *ext_err1, u64 *ext_err2)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdx_tdr_pa(td),
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_MEM_RANGE_BLOCK, &args);
+
+ *ext_err1 = args.rcx;
+ *ext_err2 = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mem_range_block);
+
+u64 tdh_mng_key_config(struct tdx_td *td)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ };
+
+ return seamcall(TDH_MNG_KEY_CONFIG, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mng_key_config);
+
+u64 tdh_mng_create(struct tdx_td *td, u16 hkid)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ .rdx = hkid,
+ };
+
+ tdx_clflush_page(td->tdr_page);
+ return seamcall(TDH_MNG_CREATE, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mng_create);
+
+u64 tdh_vp_create(struct tdx_td *td, struct tdx_vp *vp)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdvpr_pa(vp),
+ .rdx = tdx_tdr_pa(td),
+ };
+
+ tdx_clflush_page(vp->tdvpr_page);
+ return seamcall(TDH_VP_CREATE, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_vp_create);
+
+u64 tdh_mng_rd(struct tdx_td *td, u64 field, u64 *data)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ .rdx = field,
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_MNG_RD, &args);
+
+ /* R8: Content of the field, or 0 in case of error. */
+ *data = args.r8;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mng_rd);
+
+u64 tdh_mr_extend(struct tdx_td *td, u64 gpa, u64 *ext_err1, u64 *ext_err2)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa,
+ .rdx = tdx_tdr_pa(td),
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_MR_EXTEND, &args);
+
+ *ext_err1 = args.rcx;
+ *ext_err2 = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mr_extend);
+
+u64 tdh_mr_finalize(struct tdx_td *td)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ };
+
+ return seamcall(TDH_MR_FINALIZE, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mr_finalize);
+
+u64 tdh_vp_flush(struct tdx_vp *vp)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdvpr_pa(vp),
+ };
+
+ return seamcall(TDH_VP_FLUSH, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_vp_flush);
+
+u64 tdh_mng_vpflushdone(struct tdx_td *td)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ };
+
+ return seamcall(TDH_MNG_VPFLUSHDONE, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mng_vpflushdone);
+
+u64 tdh_mng_key_freeid(struct tdx_td *td)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ };
+
+ return seamcall(TDH_MNG_KEY_FREEID, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mng_key_freeid);
+
+u64 tdh_mng_init(struct tdx_td *td, u64 td_params, u64 *extended_err)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ .rdx = td_params,
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_MNG_INIT, &args);
+
+ *extended_err = args.rcx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mng_init);
+
+u64 tdh_vp_rd(struct tdx_vp *vp, u64 field, u64 *data)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdvpr_pa(vp),
+ .rdx = field,
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_VP_RD, &args);
+
+ /* R8: Content of the field, or 0 in case of error. */
+ *data = args.r8;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_vp_rd);
+
+u64 tdh_vp_wr(struct tdx_vp *vp, u64 field, u64 data, u64 mask)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdvpr_pa(vp),
+ .rdx = field,
+ .r8 = data,
+ .r9 = mask,
+ };
+
+ return seamcall(TDH_VP_WR, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_vp_wr);
+
+u64 tdh_vp_init(struct tdx_vp *vp, u64 initial_rcx, u32 x2apicid)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdvpr_pa(vp),
+ .rdx = initial_rcx,
+ .r8 = x2apicid,
+ };
+
+ /* apicid requires version == 1. */
+ return seamcall(TDH_VP_INIT | (1ULL << TDX_VERSION_SHIFT), &args);
+}
+EXPORT_SYMBOL_GPL(tdh_vp_init);
+
+/*
+ * TDX ABI defines output operands as PT, OWNER and SIZE. These are TDX defined fomats.
+ * So despite the names, they must be interpted specially as described by the spec. Return
+ * them only for error reporting purposes.
+ */
+u64 tdh_phymem_page_reclaim(struct page *page, u64 *tdx_pt, u64 *tdx_owner, u64 *tdx_size)
+{
+ struct tdx_module_args args = {
+ .rcx = page_to_phys(page),
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_PHYMEM_PAGE_RECLAIM, &args);
+
+ *tdx_pt = args.rcx;
+ *tdx_owner = args.rdx;
+ *tdx_size = args.r8;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_phymem_page_reclaim);
+
+u64 tdh_mem_track(struct tdx_td *td)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ };
+
+ return seamcall(TDH_MEM_TRACK, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mem_track);
+
+u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u64 *ext_err2)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdx_tdr_pa(td),
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_MEM_PAGE_REMOVE, &args);
+
+ *ext_err1 = args.rcx;
+ *ext_err2 = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mem_page_remove);
+
+u64 tdh_phymem_cache_wb(bool resume)
+{
+ struct tdx_module_args args = {
+ .rcx = resume ? 1 : 0,
+ };
+
+ return seamcall(TDH_PHYMEM_CACHE_WB, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_phymem_cache_wb);
+
+u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td)
+{
+ struct tdx_module_args args = {};
+
+ args.rcx = mk_keyed_paddr(tdx_global_keyid, td->tdr_page);
+
+ return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_tdr);
+
+u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page)
+{
+ struct tdx_module_args args = {};
+
+ args.rcx = mk_keyed_paddr(hkid, page);
+
+ return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_hkid);
diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h
index 4e3d533cdd61..82bb82be8567 100644
--- a/arch/x86/virt/vmx/tdx/tdx.h
+++ b/arch/x86/virt/vmx/tdx/tdx.h
@@ -3,7 +3,6 @@
#define _X86_VIRT_TDX_H
#include <linux/bits.h>
-#include "tdx_global_metadata.h"
/*
* This file contains both macros and data structures defined by the TDX
@@ -15,13 +14,46 @@
/*
* TDX module SEAMCALL leaf functions
*/
-#define TDH_PHYMEM_PAGE_RDMD 24
-#define TDH_SYS_KEY_CONFIG 31
-#define TDH_SYS_INIT 33
-#define TDH_SYS_RD 34
-#define TDH_SYS_LP_INIT 35
-#define TDH_SYS_TDMR_INIT 36
-#define TDH_SYS_CONFIG 45
+#define TDH_VP_ENTER 0
+#define TDH_MNG_ADDCX 1
+#define TDH_MEM_PAGE_ADD 2
+#define TDH_MEM_SEPT_ADD 3
+#define TDH_VP_ADDCX 4
+#define TDH_MEM_PAGE_AUG 6
+#define TDH_MEM_RANGE_BLOCK 7
+#define TDH_MNG_KEY_CONFIG 8
+#define TDH_MNG_CREATE 9
+#define TDH_MNG_RD 11
+#define TDH_MR_EXTEND 16
+#define TDH_MR_FINALIZE 17
+#define TDH_VP_FLUSH 18
+#define TDH_MNG_VPFLUSHDONE 19
+#define TDH_VP_CREATE 10
+#define TDH_MNG_KEY_FREEID 20
+#define TDH_MNG_INIT 21
+#define TDH_VP_INIT 22
+#define TDH_PHYMEM_PAGE_RDMD 24
+#define TDH_VP_RD 26
+#define TDH_PHYMEM_PAGE_RECLAIM 28
+#define TDH_MEM_PAGE_REMOVE 29
+#define TDH_SYS_KEY_CONFIG 31
+#define TDH_SYS_INIT 33
+#define TDH_SYS_RD 34
+#define TDH_SYS_LP_INIT 35
+#define TDH_SYS_TDMR_INIT 36
+#define TDH_MEM_TRACK 38
+#define TDH_PHYMEM_CACHE_WB 40
+#define TDH_PHYMEM_PAGE_WBINVD 41
+#define TDH_VP_WR 43
+#define TDH_SYS_CONFIG 45
+
+/*
+ * SEAMCALL leaf:
+ *
+ * Bit 15:0 Leaf number
+ * Bit 23:16 Version number
+ */
+#define TDX_VERSION_SHIFT 16
/* TDX page types */
#define PT_NDA 0x0
diff --git a/arch/x86/virt/vmx/tdx/tdx_global_metadata.c b/arch/x86/virt/vmx/tdx/tdx_global_metadata.c
index 8027a24d1c6e..13ad2663488b 100644
--- a/arch/x86/virt/vmx/tdx/tdx_global_metadata.c
+++ b/arch/x86/virt/vmx/tdx/tdx_global_metadata.c
@@ -37,12 +37,62 @@ static int get_tdx_sys_info_tdmr(struct tdx_sys_info_tdmr *sysinfo_tdmr)
return ret;
}
+static int get_tdx_sys_info_td_ctrl(struct tdx_sys_info_td_ctrl *sysinfo_td_ctrl)
+{
+ int ret = 0;
+ u64 val;
+
+ if (!ret && !(ret = read_sys_metadata_field(0x9800000100000000, &val)))
+ sysinfo_td_ctrl->tdr_base_size = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x9800000100000100, &val)))
+ sysinfo_td_ctrl->tdcs_base_size = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x9800000100000200, &val)))
+ sysinfo_td_ctrl->tdvps_base_size = val;
+
+ return ret;
+}
+
+static int get_tdx_sys_info_td_conf(struct tdx_sys_info_td_conf *sysinfo_td_conf)
+{
+ int ret = 0;
+ u64 val;
+ int i, j;
+
+ if (!ret && !(ret = read_sys_metadata_field(0x1900000300000000, &val)))
+ sysinfo_td_conf->attributes_fixed0 = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x1900000300000001, &val)))
+ sysinfo_td_conf->attributes_fixed1 = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x1900000300000002, &val)))
+ sysinfo_td_conf->xfam_fixed0 = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x1900000300000003, &val)))
+ sysinfo_td_conf->xfam_fixed1 = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x9900000100000004, &val)))
+ sysinfo_td_conf->num_cpuid_config = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x9900000100000008, &val)))
+ sysinfo_td_conf->max_vcpus_per_td = val;
+ if (sysinfo_td_conf->num_cpuid_config > ARRAY_SIZE(sysinfo_td_conf->cpuid_config_leaves))
+ return -EINVAL;
+ for (i = 0; i < sysinfo_td_conf->num_cpuid_config; i++)
+ if (!ret && !(ret = read_sys_metadata_field(0x9900000300000400 + i, &val)))
+ sysinfo_td_conf->cpuid_config_leaves[i] = val;
+ if (sysinfo_td_conf->num_cpuid_config > ARRAY_SIZE(sysinfo_td_conf->cpuid_config_values))
+ return -EINVAL;
+ for (i = 0; i < sysinfo_td_conf->num_cpuid_config; i++)
+ for (j = 0; j < 2; j++)
+ if (!ret && !(ret = read_sys_metadata_field(0x9900000300000500 + i * 2 + j, &val)))
+ sysinfo_td_conf->cpuid_config_values[i][j] = val;
+
+ return ret;
+}
+
static int get_tdx_sys_info(struct tdx_sys_info *sysinfo)
{
int ret = 0;
ret = ret ?: get_tdx_sys_info_features(&sysinfo->features);
ret = ret ?: get_tdx_sys_info_tdmr(&sysinfo->tdmr);
+ ret = ret ?: get_tdx_sys_info_td_ctrl(&sysinfo->td_ctrl);
+ ret = ret ?: get_tdx_sys_info_td_conf(&sysinfo->td_conf);
return ret;
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 43dcd8c7badc..53282dc7d5ac 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -70,6 +70,9 @@ EXPORT_SYMBOL(xen_start_flags);
*/
struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
+/* Number of pages released from the initial allocation. */
+unsigned long xen_released_pages;
+
static __ref void xen_get_vendor(void)
{
init_cpu_devs();
@@ -100,10 +103,6 @@ noinstr void *__xen_hypercall_setfunc(void)
void (*func)(void);
/*
- * Xen is supported only on CPUs with CPUID, so testing for
- * X86_FEATURE_CPUID is a test for early_cpu_init() having been
- * run.
- *
* Note that __xen_hypercall_setfunc() is noinstr only due to a nasty
* dependency chain: it is being called via the xen_hypercall static
* call when running as a PVH or HVM guest. Hypercalls need to be
@@ -115,8 +114,7 @@ noinstr void *__xen_hypercall_setfunc(void)
*/
instrumentation_begin();
- if (!boot_cpu_has(X86_FEATURE_CPUID))
- xen_get_vendor();
+ xen_get_vendor();
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
@@ -466,6 +464,13 @@ int __init arch_xen_unpopulated_init(struct resource **res)
xen_free_unpopulated_pages(1, &pg);
}
+ /*
+ * Account for the region being in the physmap but unpopulated.
+ * The value in xen_released_pages is used by the balloon
+ * driver to know how much of the physmap is unpopulated and
+ * set an accurate initial memory target.
+ */
+ xen_released_pages += xen_extra_mem[i].n_pfns;
/* Zero so region is not also added to the balloon driver. */
xen_extra_mem[i].n_pfns = 0;
}
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 846b5737d320..26bbaf4b7330 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -49,7 +49,7 @@
#include <xen/hvc-console.h>
#include <xen/acpi.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/paravirt.h>
#include <asm/apic.h>
#include <asm/page.h>
@@ -61,6 +61,7 @@
#include <asm/processor.h>
#include <asm/proto.h>
#include <asm/msr-index.h>
+#include <asm/msr.h>
#include <asm/traps.h>
#include <asm/setup.h>
#include <asm/desc.h>
@@ -1086,15 +1087,15 @@ static void xen_write_cr4(unsigned long cr4)
native_write_cr4(cr4);
}
-static u64 xen_do_read_msr(unsigned int msr, int *err)
+static u64 xen_do_read_msr(u32 msr, int *err)
{
u64 val = 0; /* Avoid uninitialized value for safe variant. */
- if (pmu_msr_read(msr, &val, err))
+ if (pmu_msr_chk_emulated(msr, &val, true))
return val;
if (err)
- val = native_read_msr_safe(msr, err);
+ *err = native_read_msr_safe(msr, &val);
else
val = native_read_msr(msr);
@@ -1110,17 +1111,9 @@ static u64 xen_do_read_msr(unsigned int msr, int *err)
return val;
}
-static void set_seg(unsigned int which, unsigned int low, unsigned int high,
- int *err)
+static void set_seg(u32 which, u64 base)
{
- u64 base = ((u64)high << 32) | low;
-
- if (HYPERVISOR_set_segment_base(which, base) == 0)
- return;
-
- if (err)
- *err = -EIO;
- else
+ if (HYPERVISOR_set_segment_base(which, base))
WARN(1, "Xen set_segment_base(%u, %llx) failed\n", which, base);
}
@@ -1129,20 +1122,19 @@ static void set_seg(unsigned int which, unsigned int low, unsigned int high,
* With err == NULL write_msr() semantics are selected.
* Supplying an err pointer requires err to be pre-initialized with 0.
*/
-static void xen_do_write_msr(unsigned int msr, unsigned int low,
- unsigned int high, int *err)
+static void xen_do_write_msr(u32 msr, u64 val, int *err)
{
switch (msr) {
case MSR_FS_BASE:
- set_seg(SEGBASE_FS, low, high, err);
+ set_seg(SEGBASE_FS, val);
break;
case MSR_KERNEL_GS_BASE:
- set_seg(SEGBASE_GS_USER, low, high, err);
+ set_seg(SEGBASE_GS_USER, val);
break;
case MSR_GS_BASE:
- set_seg(SEGBASE_GS_KERNEL, low, high, err);
+ set_seg(SEGBASE_GS_KERNEL, val);
break;
case MSR_STAR:
@@ -1158,42 +1150,45 @@ static void xen_do_write_msr(unsigned int msr, unsigned int low,
break;
default:
- if (!pmu_msr_write(msr, low, high, err)) {
- if (err)
- *err = native_write_msr_safe(msr, low, high);
- else
- native_write_msr(msr, low, high);
- }
+ if (pmu_msr_chk_emulated(msr, &val, false))
+ return;
+
+ if (err)
+ *err = native_write_msr_safe(msr, val);
+ else
+ native_write_msr(msr, val);
}
}
-static u64 xen_read_msr_safe(unsigned int msr, int *err)
+static int xen_read_msr_safe(u32 msr, u64 *val)
{
- return xen_do_read_msr(msr, err);
+ int err = 0;
+
+ *val = xen_do_read_msr(msr, &err);
+ return err;
}
-static int xen_write_msr_safe(unsigned int msr, unsigned int low,
- unsigned int high)
+static int xen_write_msr_safe(u32 msr, u64 val)
{
int err = 0;
- xen_do_write_msr(msr, low, high, &err);
+ xen_do_write_msr(msr, val, &err);
return err;
}
-static u64 xen_read_msr(unsigned int msr)
+static u64 xen_read_msr(u32 msr)
{
- int err;
+ int err = 0;
return xen_do_read_msr(msr, xen_msr_safe ? &err : NULL);
}
-static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
+static void xen_write_msr(u32 msr, u64 val)
{
int err;
- xen_do_write_msr(msr, low, high, xen_msr_safe ? &err : NULL);
+ xen_do_write_msr(msr, val, xen_msr_safe ? &err : NULL);
}
/* This is called once we have the cpu_possible_mask */
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index 0e3d930bcb89..9d25d9373945 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
+#include <linux/cpufreq.h>
+#include <linux/cpuidle.h>
#include <linux/export.h>
#include <linux/mm.h>
@@ -123,8 +125,23 @@ static void __init pvh_arch_setup(void)
{
pvh_reserve_extra_memory();
- if (xen_initial_domain())
+ if (xen_initial_domain()) {
xen_add_preferred_consoles();
+
+ /*
+ * Disable usage of CPU idle and frequency drivers: when
+ * running as hardware domain the exposed native ACPI tables
+ * causes idle and/or frequency drivers to attach and
+ * malfunction. It's Xen the entity that controls the idle and
+ * frequency states.
+ *
+ * For unprivileged domains the exposed ACPI tables are
+ * fabricated and don't contain such data.
+ */
+ disable_cpuidle();
+ disable_cpufreq();
+ WARN_ON(xen_set_default_idle());
+ }
}
void __init xen_pvh_init(struct boot_params *boot_params)
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 38971c6dcd4b..2a4a8deaf612 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -578,7 +578,6 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val)
xen_mc_issue(XEN_LAZY_MMU);
}
-#if CONFIG_PGTABLE_LEVELS >= 5
__visible p4dval_t xen_p4d_val(p4d_t p4d)
{
return pte_mfn_to_pfn(p4d.p4d);
@@ -592,7 +591,6 @@ __visible p4d_t xen_make_p4d(p4dval_t p4d)
return native_make_p4d(p4d);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
-#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
void (*func)(struct mm_struct *mm, struct page *,
@@ -2222,10 +2220,8 @@ static const typeof(pv_ops) xen_mmu_ops __initconst = {
.alloc_pud = xen_alloc_pmd_init,
.release_pud = xen_release_pmd_init,
-#if CONFIG_PGTABLE_LEVELS >= 5
.p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
.make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
-#endif
.enter_mmap = xen_enter_mmap,
.exit_mmap = xen_exit_mmap,
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 10c660fae8b3..7237d56a9d3f 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -54,14 +54,20 @@ struct mc_debug_data {
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
static struct mc_debug_data mc_debug_data_early __initdata;
-static DEFINE_PER_CPU(struct mc_debug_data *, mc_debug_data) =
- &mc_debug_data_early;
static struct mc_debug_data __percpu *mc_debug_data_ptr;
DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
static struct static_key mc_debug __ro_after_init;
static bool mc_debug_enabled __initdata;
+static struct mc_debug_data * __ref get_mc_debug(void)
+{
+ if (!mc_debug_data_ptr)
+ return &mc_debug_data_early;
+
+ return this_cpu_ptr(mc_debug_data_ptr);
+}
+
static int __init xen_parse_mc_debug(char *arg)
{
mc_debug_enabled = true;
@@ -71,20 +77,16 @@ static int __init xen_parse_mc_debug(char *arg)
}
early_param("xen_mc_debug", xen_parse_mc_debug);
-void mc_percpu_init(unsigned int cpu)
-{
- per_cpu(mc_debug_data, cpu) = per_cpu_ptr(mc_debug_data_ptr, cpu);
-}
-
static int __init mc_debug_enable(void)
{
unsigned long flags;
+ struct mc_debug_data __percpu *mcdb;
if (!mc_debug_enabled)
return 0;
- mc_debug_data_ptr = alloc_percpu(struct mc_debug_data);
- if (!mc_debug_data_ptr) {
+ mcdb = alloc_percpu(struct mc_debug_data);
+ if (!mcdb) {
pr_err("xen_mc_debug inactive\n");
static_key_slow_dec(&mc_debug);
return -ENOMEM;
@@ -93,7 +95,7 @@ static int __init mc_debug_enable(void)
/* Be careful when switching to percpu debug data. */
local_irq_save(flags);
xen_mc_flush();
- mc_percpu_init(0);
+ mc_debug_data_ptr = mcdb;
local_irq_restore(flags);
pr_info("xen_mc_debug active\n");
@@ -155,7 +157,7 @@ void xen_mc_flush(void)
trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
if (static_key_false(&mc_debug)) {
- mcdb = __this_cpu_read(mc_debug_data);
+ mcdb = get_mc_debug();
memcpy(mcdb->entries, b->entries,
b->mcidx * sizeof(struct multicall_entry));
}
@@ -235,7 +237,7 @@ struct multicall_space __xen_mc_entry(size_t args)
ret.mc = &b->entries[b->mcidx];
if (static_key_false(&mc_debug)) {
- struct mc_debug_data *mcdb = __this_cpu_read(mc_debug_data);
+ struct mc_debug_data *mcdb = get_mc_debug();
mcdb->caller[b->mcidx] = __builtin_return_address(0);
mcdb->argsz[b->mcidx] = args;
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index f06987b0efc3..8f89ce0b67e3 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -2,6 +2,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
+#include <asm/msr.h>
#include <asm/xen/hypercall.h>
#include <xen/xen.h>
#include <xen/page.h>
@@ -128,7 +129,7 @@ static inline uint32_t get_fam15h_addr(u32 addr)
return addr;
}
-static inline bool is_amd_pmu_msr(unsigned int msr)
+static bool is_amd_pmu_msr(u32 msr)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
@@ -194,8 +195,7 @@ static bool is_intel_pmu_msr(u32 msr_index, int *type, int *index)
}
}
-static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type,
- int index, bool is_read)
+static bool xen_intel_pmu_emulate(u32 msr, u64 *val, int type, int index, bool is_read)
{
uint64_t *reg = NULL;
struct xen_pmu_intel_ctxt *ctxt;
@@ -257,7 +257,7 @@ static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type,
return false;
}
-static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
+static bool xen_amd_pmu_emulate(u32 msr, u64 *val, bool is_read)
{
uint64_t *reg = NULL;
int i, off = 0;
@@ -298,55 +298,20 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
return false;
}
-static bool pmu_msr_chk_emulated(unsigned int msr, uint64_t *val, bool is_read,
- bool *emul)
+bool pmu_msr_chk_emulated(u32 msr, u64 *val, bool is_read)
{
int type, index = 0;
if (is_amd_pmu_msr(msr))
- *emul = xen_amd_pmu_emulate(msr, val, is_read);
- else if (is_intel_pmu_msr(msr, &type, &index))
- *emul = xen_intel_pmu_emulate(msr, val, type, index, is_read);
- else
- return false;
-
- return true;
-}
-
-bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
-{
- bool emulated;
+ return xen_amd_pmu_emulate(msr, val, is_read);
- if (!pmu_msr_chk_emulated(msr, val, true, &emulated))
- return false;
+ if (is_intel_pmu_msr(msr, &type, &index))
+ return xen_intel_pmu_emulate(msr, val, type, index, is_read);
- if (!emulated) {
- *val = err ? native_read_msr_safe(msr, err)
- : native_read_msr(msr);
- }
-
- return true;
-}
-
-bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
-{
- uint64_t val = ((uint64_t)high << 32) | low;
- bool emulated;
-
- if (!pmu_msr_chk_emulated(msr, &val, false, &emulated))
- return false;
-
- if (!emulated) {
- if (err)
- *err = native_write_msr_safe(msr, low, high);
- else
- native_write_msr(msr, low, high);
- }
-
- return true;
+ return false;
}
-static unsigned long long xen_amd_read_pmc(int counter)
+static u64 xen_amd_read_pmc(int counter)
{
struct xen_pmu_amd_ctxt *ctxt;
uint64_t *counter_regs;
@@ -354,11 +319,12 @@ static unsigned long long xen_amd_read_pmc(int counter)
uint8_t xenpmu_flags = get_xenpmu_flags();
if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
- uint32_t msr;
- int err;
+ u32 msr;
+ u64 val;
msr = amd_counters_base + (counter * amd_msr_step);
- return native_read_msr_safe(msr, &err);
+ native_read_msr_safe(msr, &val);
+ return val;
}
ctxt = &xenpmu_data->pmu.c.amd;
@@ -366,7 +332,7 @@ static unsigned long long xen_amd_read_pmc(int counter)
return counter_regs[counter];
}
-static unsigned long long xen_intel_read_pmc(int counter)
+static u64 xen_intel_read_pmc(int counter)
{
struct xen_pmu_intel_ctxt *ctxt;
uint64_t *fixed_counters;
@@ -375,15 +341,16 @@ static unsigned long long xen_intel_read_pmc(int counter)
uint8_t xenpmu_flags = get_xenpmu_flags();
if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
- uint32_t msr;
- int err;
+ u32 msr;
+ u64 val;
if (counter & (1 << INTEL_PMC_TYPE_SHIFT))
msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff);
else
msr = MSR_IA32_PERFCTR0 + counter;
- return native_read_msr_safe(msr, &err);
+ native_read_msr_safe(msr, &val);
+ return val;
}
ctxt = &xenpmu_data->pmu.c.intel;
@@ -396,7 +363,7 @@ static unsigned long long xen_intel_read_pmc(int counter)
return arch_cntr_pair[counter].counter;
}
-unsigned long long xen_read_pmc(int counter)
+u64 xen_read_pmc(int counter)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return xen_amd_read_pmc(counter);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index c3db71d96c43..3823e52aef52 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -37,9 +37,6 @@
#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
-/* Number of pages released from the initial allocation. */
-unsigned long xen_released_pages;
-
/* Memory map would allow PCI passthrough. */
bool xen_pv_pci_possible;
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 688ff59318ae..9bb8ff8bff30 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -305,7 +305,6 @@ static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
return rc;
xen_pmu_init(cpu);
- mc_percpu_init(cpu);
/*
* Why is this a BUG? If the hypercall fails then everything can be
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 77a6ea1c60e4..ba2f17e64321 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -13,6 +13,7 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
#include <asm/fixmap.h>
+#include <asm/msr.h>
#include "xen-ops.h"
@@ -39,7 +40,7 @@ void xen_arch_post_suspend(int cancelled)
static void xen_vcpu_notify_restore(void *data)
{
if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
- wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
+ wrmsrq(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
/* Boot processor notified via generic timekeeping_resume() */
if (smp_processor_id() == 0)
@@ -55,9 +56,9 @@ static void xen_vcpu_notify_suspend(void *data)
tick_suspend_local();
if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
- rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
+ rdmsrq(MSR_IA32_SPEC_CTRL, tmp);
this_cpu_write(spec_ctrl, tmp);
- wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ wrmsrq(MSR_IA32_SPEC_CTRL, 0);
}
}
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 109af12f7647..461bb1526502 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -226,9 +226,7 @@ SYM_CODE_END(xen_early_idt_handler_array)
push %rax
mov $__HYPERVISOR_iret, %eax
syscall /* Do the IRET. */
-#ifdef CONFIG_MITIGATION_SLS
- int3
-#endif
+ ud2 /* The SYSCALL should never return. */
.endm
SYM_CODE_START(xen_iret)
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 63c13a2ccf55..090349baec09 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -261,9 +261,6 @@ void xen_mc_callback(void (*fn)(void *), void *data);
*/
struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size);
-/* Do percpu data initialization for multicalls. */
-void mc_percpu_init(unsigned int cpu);
-
extern bool is_xen_pmu;
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
@@ -274,10 +271,9 @@ void xen_pmu_finish(int cpu);
static inline void xen_pmu_init(int cpu) {}
static inline void xen_pmu_finish(int cpu) {}
#endif
-bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
-bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
+bool pmu_msr_chk_emulated(u32 msr, u64 *val, bool is_read);
int pmu_apic_update(uint32_t reg);
-unsigned long long xen_read_pmc(int counter);
+u64 xen_read_pmc(int counter);
#ifdef CONFIG_SMP
diff --git a/arch/xtensa/Kbuild b/arch/xtensa/Kbuild
index fd12f61745ba..015baeb765b9 100644
--- a/arch/xtensa/Kbuild
+++ b/arch/xtensa/Kbuild
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += kernel/ mm/ platforms/ boot/dts/
+obj-y += kernel/ mm/ platforms/
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index d3db28f2f811..f2f9cd9cde50 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -20,6 +20,7 @@ config XTENSA
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT
+ select GENERIC_BUILTIN_DTB
select CLONE_BACKWARDS
select COMMON_CLK
select DMA_NONCOHERENT_MMAP if MMU
@@ -462,7 +463,7 @@ config USE_OF
help
Include support for flattened device tree machine descriptions.
-config BUILTIN_DTB_SOURCE
+config BUILTIN_DTB_NAME
string "DTB to build into the kernel image"
depends on OF
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile
index d6408c16d74e..7271294ce523 100644
--- a/arch/xtensa/boot/dts/Makefile
+++ b/arch/xtensa/boot/dts/Makefile
@@ -7,7 +7,7 @@
#
#
-obj-$(CONFIG_OF) += $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_SOURCE))
+dtb-$(CONFIG_OF) += $(addsuffix .dtb, $(CONFIG_BUILTIN_DTB_NAME))
# for CONFIG_OF_ALL_DTBS test
dtb- := $(patsubst $(src)/%.dts,%.dtb, $(wildcard $(src)/*.dts))
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index 436b7cac9694..f2af1a32c9c7 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -30,7 +30,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB_SOURCE="kc705"
+CONFIG_BUILTIN_DTB_NAME="kc705"
# CONFIG_COMPACTION is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_PM=y
diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig
index 91c4c4cae8a7..88ed5284e21c 100644
--- a/arch/xtensa/configs/cadence_csp_defconfig
+++ b/arch/xtensa/configs/cadence_csp_defconfig
@@ -1,6 +1,5 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_USELIB=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
@@ -35,7 +34,7 @@ CONFIG_HIGHMEM=y
# CONFIG_PCI is not set
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB_SOURCE="csp"
+CONFIG_BUILTIN_DTB_NAME="csp"
# CONFIG_COMPACTION is not set
CONFIG_XTFPGA_LCD=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig
index fa9389869154..09e4a1d9d1f3 100644
--- a/arch/xtensa/configs/common_defconfig
+++ b/arch/xtensa/configs/common_defconfig
@@ -32,7 +32,6 @@ CONFIG_NET_SCH_TEQL=m
CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
-CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index e376238bc5ca..4427907becca 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -29,7 +29,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB_SOURCE="kc705"
+CONFIG_BUILTIN_DTB_NAME="kc705"
# CONFIG_COMPACTION is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
index c2ab4306ee20..5828228522ba 100644
--- a/arch/xtensa/configs/nommu_kc705_defconfig
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -36,7 +36,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000"
CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB_SOURCE="kc705_nommu"
+CONFIG_BUILTIN_DTB_NAME="kc705_nommu"
CONFIG_BINFMT_FLAT=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 63b56ce79f83..326966ca7831 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -33,7 +33,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB_SOURCE="lx200mx"
+CONFIG_BUILTIN_DTB_NAME="lx200mx"
# CONFIG_COMPACTION is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
diff --git a/arch/xtensa/configs/virt_defconfig b/arch/xtensa/configs/virt_defconfig
index 98acb7191cb7..e37048985b47 100644
--- a/arch/xtensa/configs/virt_defconfig
+++ b/arch/xtensa/configs/virt_defconfig
@@ -24,7 +24,7 @@ CONFIG_HIGHMEM=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x80000000@0"
CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB_SOURCE="virt"
+CONFIG_BUILTIN_DTB_NAME="virt"
# CONFIG_PARSE_BOOTPARAM is not set
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig
index 165652c45b85..ee47438f9b51 100644
--- a/arch/xtensa/configs/xip_kc705_defconfig
+++ b/arch/xtensa/configs/xip_kc705_defconfig
@@ -29,7 +29,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB_SOURCE="kc705"
+CONFIG_BUILTIN_DTB_NAME="kc705"
# CONFIG_PARSE_BOOTPARAM is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPACTION is not set
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 1647a7cc3fbf..cb1725c40e36 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -269,17 +269,11 @@ static inline pte_t pte_mkwrite_novma(pte_t pte)
((__pgprot((pgprot_val(prot) & ~_PAGE_CA_MASK) | \
_PAGE_CA_BYPASS)))
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-
#define PFN_PTE_SHIFT PAGE_SHIFT
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pte_same(a,b) (pte_val(a) == pte_val(b))
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index 86c70117371b..4871e5a4d6fb 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -72,13 +72,10 @@ struct pt_regs {
/* Additional configurable registers that are used by the compiler. */
xtregs_opt_t xtregs_opt;
- /* Make sure the areg field is 16 bytes aligned. */
- int align[0] __attribute__ ((aligned(16)));
-
/* current register frame.
* Note: The ESF for kernel exceptions ends after 16 registers!
*/
- unsigned long areg[XCHAL_NUM_AREGS];
+ unsigned long areg[XCHAL_NUM_AREGS] __aligned(16);
};
# define arch_has_single_step() (1)
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index 5ee974bf8330..7db3b489c8ad 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -28,6 +28,13 @@ static inline long syscall_get_nr(struct task_struct *task,
return regs->syscall;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->syscall = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -68,6 +75,17 @@ static inline void syscall_get_arguments(struct task_struct *task,
args[i] = regs->areg[reg[i]];
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
+ unsigned int i;
+
+ for (i = 0; i < 6; ++i)
+ regs->areg[reg[i]] = args[i];
+}
+
asmlinkage long xtensa_rt_sigreturn(void);
asmlinkage long xtensa_shmat(int, char __user *, int);
asmlinkage long xtensa_fadvise64_64(int, int,
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index 183618090d05..223f1d452310 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -388,8 +388,7 @@ irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
struct pt_regs *regs = get_irq_regs();
perf_sample_data_init(&data, 0, last_period);
- if (perf_event_overflow(event, &data, regs))
- xtensa_pmu_stop(event, 0);
+ perf_event_overflow(event, &data, regs);
}
rc = IRQ_HANDLED;
diff --git a/block/Kconfig b/block/Kconfig
index df8973bc0539..15027963472d 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -211,14 +211,6 @@ config BLK_INLINE_ENCRYPTION_FALLBACK
source "block/partitions/Kconfig"
-config BLK_MQ_PCI
- def_bool PCI
-
-config BLK_MQ_VIRTIO
- bool
- depends on VIRTIO
- default y
-
config BLK_PM
def_bool PM
diff --git a/block/Makefile b/block/Makefile
index 3a941dc0d27f..c65f4da93702 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,13 +5,12 @@
obj-y := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
- blk-merge.o blk-timeout.o \
- blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
+ blk-merge.o blk-timeout.o blk-lib.o blk-mq.o \
+ blk-mq-tag.o blk-mq-dma.o blk-stat.o \
blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
genhd.o ioprio.o badblocks.o partitions/ blk-rq-qos.o \
disk-events.o blk-ia-ranges.o early-lookup.o
-obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_BLK_DEV_BSG_COMMON) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
diff --git a/block/bdev.c b/block/bdev.c
index 4844d1e27b6f..b77ddd12dc06 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -152,27 +152,65 @@ static void set_init_blocksize(struct block_device *bdev)
get_order(bsize));
}
-int set_blocksize(struct file *file, int size)
+/**
+ * bdev_validate_blocksize - check that this block size is acceptable
+ * @bdev: blockdevice to check
+ * @block_size: block size to check
+ *
+ * For block device users that do not use buffer heads or the block device
+ * page cache, make sure that this block size can be used with the device.
+ *
+ * Return: On success zero is returned, negative error code on failure.
+ */
+int bdev_validate_blocksize(struct block_device *bdev, int block_size)
{
- struct inode *inode = file->f_mapping->host;
- struct block_device *bdev = I_BDEV(inode);
-
- if (blk_validate_block_size(size))
+ if (blk_validate_block_size(block_size))
return -EINVAL;
/* Size cannot be smaller than the size supported by the device */
- if (size < bdev_logical_block_size(bdev))
+ if (block_size < bdev_logical_block_size(bdev))
return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bdev_validate_blocksize);
+
+int set_blocksize(struct file *file, int size)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct block_device *bdev = I_BDEV(inode);
+ int ret;
+
+ ret = bdev_validate_blocksize(bdev, size);
+ if (ret)
+ return ret;
+
if (!file->private_data)
return -EINVAL;
/* Don't change the size if it is same as current */
if (inode->i_blkbits != blksize_bits(size)) {
+ /*
+ * Flush and truncate the pagecache before we reconfigure the
+ * mapping geometry because folio sizes are variable now. If a
+ * reader has already allocated a folio whose size is smaller
+ * than the new min_order but invokes readahead after the new
+ * min_order becomes visible, readahead will think there are
+ * "zero" blocks per folio and crash. Take the inode and
+ * invalidation locks to avoid racing with
+ * read/write/fallocate.
+ */
+ inode_lock(inode);
+ filemap_invalidate_lock(inode->i_mapping);
+
sync_blockdev(bdev);
+ kill_bdev(bdev);
+
inode->i_blkbits = blksize_bits(size);
mapping_set_folio_min_order(inode->i_mapping, get_order(size));
kill_bdev(bdev);
+ filemap_invalidate_unlock(inode->i_mapping);
+ inode_unlock(inode);
}
return 0;
}
@@ -777,13 +815,13 @@ static void blkdev_put_part(struct block_device *part)
blkdev_put_whole(whole);
}
-struct block_device *blkdev_get_no_open(dev_t dev)
+struct block_device *blkdev_get_no_open(dev_t dev, bool autoload)
{
struct block_device *bdev;
struct inode *inode;
inode = ilookup(blockdev_superblock, dev);
- if (!inode && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
+ if (!inode && autoload && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
blk_request_module(dev);
inode = ilookup(blockdev_superblock, dev);
if (inode)
@@ -1005,7 +1043,7 @@ struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
if (ret)
return ERR_PTR(ret);
- bdev = blkdev_get_no_open(dev);
+ bdev = blkdev_get_no_open(dev, true);
if (!bdev)
return ERR_PTR(-ENXIO);
@@ -1272,21 +1310,17 @@ void sync_bdevs(bool wait)
/*
* Handle STATX_{DIOALIGN, WRITE_ATOMIC} for block devices.
*/
-void bdev_statx(struct path *path, struct kstat *stat,
- u32 request_mask)
+void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask)
{
- struct inode *backing_inode;
struct block_device *bdev;
- backing_inode = d_backing_inode(path->dentry);
-
/*
- * Note that backing_inode is the inode of a block device node file,
- * not the block device's internal inode. Therefore it is *not* valid
- * to use I_BDEV() here; the block device has to be looked up by i_rdev
+ * Note that d_backing_inode() returns the block device node inode, not
+ * the block device's internal inode. Therefore it is *not* valid to
+ * use I_BDEV() here; the block device has to be looked up by i_rdev
* instead.
*/
- bdev = blkdev_get_no_open(backing_inode->i_rdev);
+ bdev = blkdev_get_no_open(d_backing_inode(path->dentry)->i_rdev, false);
if (!bdev)
return;
@@ -1301,7 +1335,8 @@ void bdev_statx(struct path *path, struct kstat *stat,
generic_fill_statx_atomic_writes(stat,
queue_atomic_write_unit_min_bytes(bd_queue),
- queue_atomic_write_unit_max_bytes(bd_queue));
+ queue_atomic_write_unit_max_bytes(bd_queue),
+ 0);
}
stat->blksize = bdev_io_min(bdev);
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index abd80dc13562..0cb1e9873aab 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -7210,8 +7210,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
#endif
blk_stat_disable_accounting(bfqd->queue);
- clear_bit(ELEVATOR_FLAG_DISABLE_WBT, &e->flags);
- wbt_enable_default(bfqd->queue->disk);
+ blk_queue_flag_clear(QUEUE_FLAG_DISABLE_WBT_DEF, bfqd->queue);
+ set_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT, &e->flags);
kfree(bfqd);
}
@@ -7397,7 +7397,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
/* We dispatch from request queue wide instead of hw queue */
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
- set_bit(ELEVATOR_FLAG_DISABLE_WBT, &eq->flags);
+ blk_queue_flag_set(QUEUE_FLAG_DISABLE_WBT_DEF, q);
wbt_disable_default(q->disk);
blk_stat_enable_accounting(q);
diff --git a/block/bio-integrity-auto.c b/block/bio-integrity-auto.c
index e524c609be50..9c6657664792 100644
--- a/block/bio-integrity-auto.c
+++ b/block/bio-integrity-auto.c
@@ -9,6 +9,7 @@
* not aware of PI.
*/
#include <linux/blk-integrity.h>
+#include <linux/t10-pi.h>
#include <linux/workqueue.h>
#include "blk.h"
@@ -43,6 +44,29 @@ static void bio_integrity_verify_fn(struct work_struct *work)
bio_endio(bio);
}
+#define BIP_CHECK_FLAGS (BIP_CHECK_GUARD | BIP_CHECK_REFTAG | BIP_CHECK_APPTAG)
+static bool bip_should_check(struct bio_integrity_payload *bip)
+{
+ return bip->bip_flags & BIP_CHECK_FLAGS;
+}
+
+static bool bi_offload_capable(struct blk_integrity *bi)
+{
+ switch (bi->csum_type) {
+ case BLK_INTEGRITY_CSUM_CRC64:
+ return bi->tuple_size == sizeof(struct crc64_pi_tuple);
+ case BLK_INTEGRITY_CSUM_CRC:
+ case BLK_INTEGRITY_CSUM_IP:
+ return bi->tuple_size == sizeof(struct t10_pi_tuple);
+ default:
+ pr_warn_once("%s: unknown integrity checksum type:%d\n",
+ __func__, bi->csum_type);
+ fallthrough;
+ case BLK_INTEGRITY_CSUM_NONE:
+ return false;
+ }
+}
+
/**
* __bio_integrity_endio - Integrity I/O completion function
* @bio: Protected bio
@@ -54,12 +78,12 @@ static void bio_integrity_verify_fn(struct work_struct *work)
*/
bool __bio_integrity_endio(struct bio *bio)
{
- struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_integrity_data *bid =
container_of(bip, struct bio_integrity_data, bip);
- if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && bi->csum_type) {
+ if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
+ bip_should_check(bip)) {
INIT_WORK(&bid->work, bio_integrity_verify_fn);
queue_work(kintegrityd_wq, &bid->work);
return false;
@@ -84,6 +108,7 @@ bool bio_integrity_prep(struct bio *bio)
{
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct bio_integrity_data *bid;
+ bool set_flags = true;
gfp_t gfp = GFP_NOIO;
unsigned int len;
void *buf;
@@ -100,19 +125,24 @@ bool bio_integrity_prep(struct bio *bio)
switch (bio_op(bio)) {
case REQ_OP_READ:
- if (bi->flags & BLK_INTEGRITY_NOVERIFY)
- return true;
+ if (bi->flags & BLK_INTEGRITY_NOVERIFY) {
+ if (bi_offload_capable(bi))
+ return true;
+ set_flags = false;
+ }
break;
case REQ_OP_WRITE:
- if (bi->flags & BLK_INTEGRITY_NOGENERATE)
- return true;
-
/*
* Zero the memory allocated to not leak uninitialized kernel
* memory to disk for non-integrity metadata where nothing else
* initializes the memory.
*/
- if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
+ if (bi->flags & BLK_INTEGRITY_NOGENERATE) {
+ if (bi_offload_capable(bi))
+ return true;
+ set_flags = false;
+ gfp |= __GFP_ZERO;
+ } else if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
gfp |= __GFP_ZERO;
break;
default:
@@ -137,19 +167,21 @@ bool bio_integrity_prep(struct bio *bio)
bid->bip.bip_flags |= BIP_BLOCK_INTEGRITY;
bip_set_seed(&bid->bip, bio->bi_iter.bi_sector);
- if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
- bid->bip.bip_flags |= BIP_IP_CHECKSUM;
- if (bi->csum_type)
- bid->bip.bip_flags |= BIP_CHECK_GUARD;
- if (bi->flags & BLK_INTEGRITY_REF_TAG)
- bid->bip.bip_flags |= BIP_CHECK_REFTAG;
+ if (set_flags) {
+ if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
+ bid->bip.bip_flags |= BIP_IP_CHECKSUM;
+ if (bi->csum_type)
+ bid->bip.bip_flags |= BIP_CHECK_GUARD;
+ if (bi->flags & BLK_INTEGRITY_REF_TAG)
+ bid->bip.bip_flags |= BIP_CHECK_REFTAG;
+ }
if (bio_integrity_add_page(bio, virt_to_page(buf), len,
offset_in_page(buf)) < len)
goto err_end_io;
/* Auto-generate integrity metadata if this is a write */
- if (bio_data_dir(bio) == WRITE)
+ if (bio_data_dir(bio) == WRITE && bip_should_check(&bid->bip))
blk_integrity_generate(bio);
else
bid->saved_bio_iter = bio->bi_iter;
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 608594a154a5..cb94e9be26dc 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -66,16 +66,12 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
}
EXPORT_SYMBOL(bio_integrity_alloc);
-static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs,
- bool dirty)
+static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs)
{
int i;
- for (i = 0; i < nr_vecs; i++) {
- if (dirty && !PageCompound(bv[i].bv_page))
- set_page_dirty_lock(bv[i].bv_page);
+ for (i = 0; i < nr_vecs; i++)
unpin_user_page(bv[i].bv_page);
- }
}
static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
@@ -91,7 +87,7 @@ static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
ret = copy_to_iter(bvec_virt(bounce_bvec), bytes, &orig_iter);
WARN_ON_ONCE(ret != bytes);
- bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs, true);
+ bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs);
}
/**
@@ -111,8 +107,7 @@ void bio_integrity_unmap_user(struct bio *bio)
return;
}
- bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt,
- bio_data_dir(bio) == READ);
+ bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt);
}
/**
@@ -132,10 +127,8 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
if (bip->bip_vcnt > 0) {
struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1];
- bool same_page = false;
- if (bvec_try_merge_hw_page(q, bv, page, len, offset,
- &same_page)) {
+ if (bvec_try_merge_hw_page(q, bv, page, len, offset)) {
bip->bip_iter.bi_size += len;
return len;
}
@@ -198,7 +191,7 @@ static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
}
if (write)
- bio_integrity_unpin_bvec(bvec, nr_vecs, false);
+ bio_integrity_unpin_bvec(bvec, nr_vecs);
else
memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec));
@@ -319,7 +312,7 @@ int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
return 0;
release_pages:
- bio_integrity_unpin_bvec(bvec, nr_bvecs, false);
+ bio_integrity_unpin_bvec(bvec, nr_bvecs);
free_bvec:
if (bvec != stack_vec)
kfree(bvec);
diff --git a/block/bio.c b/block/bio.c
index 4e6c85a33d74..3c0a558c90f5 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -251,6 +251,7 @@ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
bio->bi_flags = 0;
bio->bi_ioprio = 0;
bio->bi_write_hint = 0;
+ bio->bi_write_stream = 0;
bio->bi_status = 0;
bio->bi_iter.bi_sector = 0;
bio->bi_iter.bi_size = 0;
@@ -611,7 +612,7 @@ struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
{
struct bio *bio;
- if (nr_vecs > UIO_MAXIOV)
+ if (nr_vecs > BIO_MAX_INLINE_VECS)
return NULL;
return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
}
@@ -827,6 +828,7 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
bio_set_flag(bio, BIO_CLONED);
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;
+ bio->bi_write_stream = bio_src->bi_write_stream;
bio->bi_iter = bio_src->bi_iter;
if (bio->bi_bdev) {
@@ -918,7 +920,7 @@ static inline bool bio_full(struct bio *bio, unsigned len)
}
static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
- unsigned int len, unsigned int off, bool *same_page)
+ unsigned int len, unsigned int off)
{
size_t bv_end = bv->bv_offset + bv->bv_len;
phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
@@ -931,9 +933,7 @@ static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
return false;
- *same_page = ((vec_end_addr & PAGE_MASK) == ((page_addr + off) &
- PAGE_MASK));
- if (!*same_page) {
+ if ((vec_end_addr & PAGE_MASK) != ((page_addr + off) & PAGE_MASK)) {
if (IS_ENABLED(CONFIG_KMSAN))
return false;
if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
@@ -953,8 +953,7 @@ static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
* helpers to split. Hopefully this will go away soon.
*/
bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
- struct page *page, unsigned len, unsigned offset,
- bool *same_page)
+ struct page *page, unsigned len, unsigned offset)
{
unsigned long mask = queue_segment_boundary(q);
phys_addr_t addr1 = bvec_phys(bv);
@@ -964,7 +963,7 @@ bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
return false;
if (len > queue_max_segment_size(q) - bv->bv_len)
return false;
- return bvec_try_merge_page(bv, page, len, offset, same_page);
+ return bvec_try_merge_page(bv, page, len, offset);
}
/**
@@ -990,6 +989,22 @@ void __bio_add_page(struct bio *bio, struct page *page,
EXPORT_SYMBOL_GPL(__bio_add_page);
/**
+ * bio_add_virt_nofail - add data in the direct kernel mapping to a bio
+ * @bio: destination bio
+ * @vaddr: data to add
+ * @len: length of the data to add, may cross pages
+ *
+ * Add the data at @vaddr to @bio. The caller must have ensure a segment
+ * is available for the added data. No merging into an existing segment
+ * will be performed.
+ */
+void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len)
+{
+ __bio_add_page(bio, virt_to_page(vaddr), len, offset_in_page(vaddr));
+}
+EXPORT_SYMBOL_GPL(bio_add_virt_nofail);
+
+/**
* bio_add_page - attempt to add page(s) to bio
* @bio: destination bio
* @page: start page to add
@@ -1002,8 +1017,6 @@ EXPORT_SYMBOL_GPL(__bio_add_page);
int bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
- bool same_page = false;
-
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
return 0;
if (bio->bi_iter.bi_size > UINT_MAX - len)
@@ -1011,7 +1024,7 @@ int bio_add_page(struct bio *bio, struct page *page,
if (bio->bi_vcnt > 0 &&
bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
- page, len, offset, &same_page)) {
+ page, len, offset)) {
bio->bi_iter.bi_size += len;
return len;
}
@@ -1058,6 +1071,61 @@ bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
}
EXPORT_SYMBOL(bio_add_folio);
+/**
+ * bio_add_vmalloc_chunk - add a vmalloc chunk to a bio
+ * @bio: destination bio
+ * @vaddr: vmalloc address to add
+ * @len: total length in bytes of the data to add
+ *
+ * Add data starting at @vaddr to @bio and return how many bytes were added.
+ * This may be less than the amount originally asked. Returns 0 if no data
+ * could be added to @bio.
+ *
+ * This helper calls flush_kernel_vmap_range() for the range added. For reads
+ * the caller still needs to manually call invalidate_kernel_vmap_range() in
+ * the completion handler.
+ */
+unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len)
+{
+ unsigned int offset = offset_in_page(vaddr);
+
+ len = min(len, PAGE_SIZE - offset);
+ if (bio_add_page(bio, vmalloc_to_page(vaddr), len, offset) < len)
+ return 0;
+ if (op_is_write(bio_op(bio)))
+ flush_kernel_vmap_range(vaddr, len);
+ return len;
+}
+EXPORT_SYMBOL_GPL(bio_add_vmalloc_chunk);
+
+/**
+ * bio_add_vmalloc - add a vmalloc region to a bio
+ * @bio: destination bio
+ * @vaddr: vmalloc address to add
+ * @len: total length in bytes of the data to add
+ *
+ * Add data starting at @vaddr to @bio. Return %true on success or %false if
+ * @bio does not have enough space for the payload.
+ *
+ * This helper calls flush_kernel_vmap_range() for the range added. For reads
+ * the caller still needs to manually call invalidate_kernel_vmap_range() in
+ * the completion handler.
+ */
+bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len)
+{
+ do {
+ unsigned int added = bio_add_vmalloc_chunk(bio, vaddr, len);
+
+ if (!added)
+ return false;
+ vaddr += added;
+ len -= added;
+ } while (len);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(bio_add_vmalloc);
+
void __bio_release_pages(struct bio *bio, bool mark_dirty)
{
struct folio_iter fi;
@@ -1088,27 +1156,6 @@ void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter)
bio_set_flag(bio, BIO_CLONED);
}
-static int bio_iov_add_folio(struct bio *bio, struct folio *folio, size_t len,
- size_t offset)
-{
- bool same_page = false;
-
- if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len))
- return -EIO;
-
- if (bio->bi_vcnt > 0 &&
- bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
- folio_page(folio, 0), len, offset,
- &same_page)) {
- bio->bi_iter.bi_size += len;
- if (same_page && bio_flagged(bio, BIO_PAGE_PINNED))
- unpin_user_folio(folio, 1);
- return 0;
- }
- bio_add_folio_nofail(bio, folio, len, offset);
- return 0;
-}
-
static unsigned int get_contig_folio_len(unsigned int *num_pages,
struct page **pages, unsigned int i,
struct folio *folio, size_t left,
@@ -1203,6 +1250,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
for (left = size, i = 0; left > 0; left -= len, i += num_pages) {
struct page *page = pages[i];
struct folio *folio = page_folio(page);
+ unsigned int old_vcnt = bio->bi_vcnt;
folio_offset = ((size_t)folio_page_idx(folio, page) <<
PAGE_SHIFT) + offset;
@@ -1215,7 +1263,23 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
len = get_contig_folio_len(&num_pages, pages, i,
folio, left, offset);
- bio_iov_add_folio(bio, folio, len, folio_offset);
+ if (!bio_add_folio(bio, folio, len, folio_offset)) {
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (bio_flagged(bio, BIO_PAGE_PINNED)) {
+ /*
+ * We're adding another fragment of a page that already
+ * was part of the last segment. Undo our pin as the
+ * page was pinned when an earlier fragment of it was
+ * added to the bio and __bio_release_pages expects a
+ * single pin per page.
+ */
+ if (offset && bio->bi_vcnt == old_vcnt)
+ unpin_user_folio(folio, 1);
+ }
offset = 0;
}
@@ -1301,6 +1365,36 @@ int submit_bio_wait(struct bio *bio)
}
EXPORT_SYMBOL(submit_bio_wait);
+/**
+ * bdev_rw_virt - synchronously read into / write from kernel mapping
+ * @bdev: block device to access
+ * @sector: sector to access
+ * @data: data to read/write
+ * @len: length in byte to read/write
+ * @op: operation (e.g. REQ_OP_READ/REQ_OP_WRITE)
+ *
+ * Performs synchronous I/O to @bdev for @data/@len. @data must be in
+ * the kernel direct mapping and not a vmalloc address.
+ */
+int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
+ size_t len, enum req_op op)
+{
+ struct bio_vec bv;
+ struct bio bio;
+ int error;
+
+ if (WARN_ON_ONCE(is_vmalloc_addr(data)))
+ return -EIO;
+
+ bio_init(&bio, bdev, &bv, 1, op);
+ bio.bi_iter.bi_sector = sector;
+ bio_add_virt_nofail(&bio, data, len);
+ error = submit_bio_wait(&bio);
+ bio_uninit(&bio);
+ return error;
+}
+EXPORT_SYMBOL_GPL(bdev_rw_virt);
+
static void bio_wait_end_io(struct bio *bio)
{
complete(bio->bi_private);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5905f277057b..5936db7f8475 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -797,7 +797,7 @@ int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx)
return -EINVAL;
input = skip_spaces(input);
- bdev = blkdev_get_no_open(MKDEV(major, minor));
+ bdev = blkdev_get_no_open(MKDEV(major, minor), false);
if (!bdev)
return -ENODEV;
if (bdev_is_partition(bdev)) {
@@ -1074,8 +1074,8 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
/*
* For covering concurrent parent blkg update from blkg_release().
*
- * When flushing from cgroup, cgroup_rstat_lock is always held, so
- * this lock won't cause contention most of time.
+ * When flushing from cgroup, the subsystem rstat lock is always held,
+ * so this lock won't cause contention most of time.
*/
raw_spin_lock_irqsave(&blkg_stat_lock, flags);
@@ -1144,7 +1144,7 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
/*
* We source root cgroup stats from the system-wide stats to avoid
* tracking the same information twice and incurring overhead when no
- * cgroups are defined. For that reason, cgroup_rstat_flush in
+ * cgroups are defined. For that reason, css_rstat_flush in
* blkcg_print_stat does not actually fill out the iostat in the root
* cgroup's blkcg_gq.
*
@@ -1253,7 +1253,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
if (!seq_css(sf)->parent)
blkcg_fill_root_iostats();
else
- cgroup_rstat_flush(blkcg->css.cgroup);
+ css_rstat_flush(&blkcg->css);
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
@@ -2243,7 +2243,7 @@ void blk_cgroup_bio_start(struct bio *bio)
}
u64_stats_update_end_irqrestore(&bis->sync, flags);
- cgroup_rstat_updated(blkcg->css.cgroup, cpu);
+ css_rstat_updated(&blkcg->css, cpu);
put_cpu();
}
diff --git a/block/blk-core.c b/block/blk-core.c
index e8cc270a453f..b862c66018f2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1018,7 +1018,7 @@ again:
stamp = READ_ONCE(part->bd_stamp);
if (unlikely(time_after(now, stamp)) &&
likely(try_cmpxchg(&part->bd_stamp, &stamp, now)) &&
- (end || part_in_flight(part)))
+ (end || bdev_count_inflight(part)))
__part_stat_add(part, io_ticks, now - stamp);
if (bdev_is_partition(part)) {
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index f154be0b575a..005c9157ffb3 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -173,6 +173,7 @@ static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src)
bio_set_flag(bio, BIO_REMAPPED);
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;
+ bio->bi_write_stream = bio_src->bi_write_stream;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c
index 94a155912bf1..81918f6e0cae 100644
--- a/block/blk-crypto-profile.c
+++ b/block/blk-crypto-profile.c
@@ -501,6 +501,7 @@ int blk_crypto_derive_sw_secret(struct block_device *bdev,
blk_crypto_hw_exit(profile);
return err;
}
+EXPORT_SYMBOL_GPL(blk_crypto_derive_sw_secret);
int blk_crypto_import_key(struct blk_crypto_profile *profile,
const u8 *raw_key, size_t raw_key_size,
@@ -520,6 +521,7 @@ int blk_crypto_import_key(struct blk_crypto_profile *profile,
blk_crypto_hw_exit(profile);
return ret;
}
+EXPORT_SYMBOL_GPL(blk_crypto_import_key);
int blk_crypto_generate_key(struct blk_crypto_profile *profile,
u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
@@ -537,6 +539,7 @@ int blk_crypto_generate_key(struct blk_crypto_profile *profile,
blk_crypto_hw_exit(profile);
return ret;
}
+EXPORT_SYMBOL_GPL(blk_crypto_generate_key);
int blk_crypto_prepare_key(struct blk_crypto_profile *profile,
const u8 *lt_key, size_t lt_key_size,
@@ -556,6 +559,7 @@ int blk_crypto_prepare_key(struct blk_crypto_profile *profile,
blk_crypto_hw_exit(profile);
return ret;
}
+EXPORT_SYMBOL_GPL(blk_crypto_prepare_key);
/**
* blk_crypto_intersect_capabilities() - restrict supported crypto capabilities
diff --git a/block/blk-map.c b/block/blk-map.c
index d2f22744b3d1..23e5d5ebe59e 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -317,64 +317,26 @@ static void bio_map_kern_endio(struct bio *bio)
kfree(bio);
}
-/**
- * bio_map_kern - map kernel address into bio
- * @q: the struct request_queue for the bio
- * @data: pointer to buffer to map
- * @len: length in bytes
- * @gfp_mask: allocation flags for bio allocation
- *
- * Map the kernel address into a bio suitable for io to a block
- * device. Returns an error pointer in case of error.
- */
-static struct bio *bio_map_kern(struct request_queue *q, void *data,
- unsigned int len, gfp_t gfp_mask)
+static struct bio *bio_map_kern(void *data, unsigned int len, enum req_op op,
+ gfp_t gfp_mask)
{
- unsigned long kaddr = (unsigned long)data;
- unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = kaddr >> PAGE_SHIFT;
- const int nr_pages = end - start;
- bool is_vmalloc = is_vmalloc_addr(data);
- struct page *page;
- int offset, i;
+ unsigned int nr_vecs = bio_add_max_vecs(data, len);
struct bio *bio;
- bio = bio_kmalloc(nr_pages, gfp_mask);
+ bio = bio_kmalloc(nr_vecs, gfp_mask);
if (!bio)
return ERR_PTR(-ENOMEM);
- bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
-
- if (is_vmalloc) {
- flush_kernel_vmap_range(data, len);
+ bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, op);
+ if (is_vmalloc_addr(data)) {
bio->bi_private = data;
- }
-
- offset = offset_in_page(kaddr);
- for (i = 0; i < nr_pages; i++) {
- unsigned int bytes = PAGE_SIZE - offset;
-
- if (len <= 0)
- break;
-
- if (bytes > len)
- bytes = len;
-
- if (!is_vmalloc)
- page = virt_to_page(data);
- else
- page = vmalloc_to_page(data);
- if (bio_add_page(bio, page, bytes, offset) < bytes) {
- /* we don't support partial mappings */
+ if (!bio_add_vmalloc(bio, data, len)) {
bio_uninit(bio);
kfree(bio);
return ERR_PTR(-EINVAL);
}
-
- data += bytes;
- len -= bytes;
- offset = 0;
+ } else {
+ bio_add_virt_nofail(bio, data, len);
}
-
bio->bi_end_io = bio_map_kern_endio;
return bio;
}
@@ -402,17 +364,16 @@ static void bio_copy_kern_endio_read(struct bio *bio)
/**
* bio_copy_kern - copy kernel address into bio
- * @q: the struct request_queue for the bio
* @data: pointer to buffer to copy
* @len: length in bytes
+ * @op: bio/request operation
* @gfp_mask: allocation flags for bio and page allocation
- * @reading: data direction is READ
*
* copy the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-static struct bio *bio_copy_kern(struct request_queue *q, void *data,
- unsigned int len, gfp_t gfp_mask, int reading)
+static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op,
+ gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -431,7 +392,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
bio = bio_kmalloc(nr_pages, gfp_mask);
if (!bio)
return ERR_PTR(-ENOMEM);
- bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
+ bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, op);
while (len) {
struct page *page;
@@ -444,7 +405,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
if (!page)
goto cleanup;
- if (!reading)
+ if (op_is_write(op))
memcpy(page_address(page), p, bytes);
if (bio_add_page(bio, page, bytes, 0) < bytes)
@@ -454,11 +415,11 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
p += bytes;
}
- if (reading) {
+ if (op_is_write(op)) {
+ bio->bi_end_io = bio_copy_kern_endio;
+ } else {
bio->bi_end_io = bio_copy_kern_endio_read;
bio->bi_private = data;
- } else {
- bio->bi_end_io = bio_copy_kern_endio;
}
return bio;
@@ -556,8 +517,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
if (map_data)
copy = true;
- else if (blk_queue_may_bounce(q))
- copy = true;
else if (iov_iter_alignment(iter) & align)
copy = true;
else if (iov_iter_is_bvec(iter))
@@ -689,7 +648,6 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
/**
* blk_rq_map_kern - map kernel data to a request, for passthrough requests
- * @q: request queue where request should be inserted
* @rq: request to fill
* @kbuf: the kernel buffer
* @len: length of user data
@@ -700,31 +658,26 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
* buffer is used. Can be called multiple times to append multiple
* buffers.
*/
-int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
- unsigned int len, gfp_t gfp_mask)
+int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len,
+ gfp_t gfp_mask)
{
- int reading = rq_data_dir(rq) == READ;
unsigned long addr = (unsigned long) kbuf;
struct bio *bio;
int ret;
- if (len > (queue_max_hw_sectors(q) << 9))
+ if (len > (queue_max_hw_sectors(rq->q) << SECTOR_SHIFT))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
- if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
- blk_queue_may_bounce(q))
- bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
+ if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf))
+ bio = bio_copy_kern(kbuf, len, req_op(rq), gfp_mask);
else
- bio = bio_map_kern(q, kbuf, len, gfp_mask);
+ bio = bio_map_kern(kbuf, len, req_op(rq), gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
- bio->bi_opf &= ~REQ_OP_MASK;
- bio->bi_opf |= req_op(rq);
-
ret = blk_rq_append_bio(rq, bio);
if (unlikely(ret)) {
bio_uninit(bio);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index fdd4efb54c6c..3af1d284add5 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -7,7 +7,6 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
-#include <linux/scatterlist.h>
#include <linux/part_stat.h>
#include <linux/blk-cgroup.h>
@@ -226,27 +225,6 @@ static inline unsigned get_max_io_size(struct bio *bio,
}
/**
- * get_max_segment_size() - maximum number of bytes to add as a single segment
- * @lim: Request queue limits.
- * @paddr: address of the range to add
- * @len: maximum length available to add at @paddr
- *
- * Returns the maximum number of bytes of the range starting at @paddr that can
- * be added to a single segment.
- */
-static inline unsigned get_max_segment_size(const struct queue_limits *lim,
- phys_addr_t paddr, unsigned int len)
-{
- /*
- * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
- * after having calculated the minimum.
- */
- return min_t(unsigned long, len,
- min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
- (unsigned long)lim->max_segment_size - 1) + 1);
-}
-
-/**
* bvec_split_segs - verify whether or not a bvec should be split in the middle
* @lim: [in] queue limits to split based on
* @bv: [in] bvec to examine
@@ -473,117 +451,6 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
return nr_phys_segs;
}
-struct phys_vec {
- phys_addr_t paddr;
- u32 len;
-};
-
-static bool blk_map_iter_next(struct request *req,
- struct req_iterator *iter, struct phys_vec *vec)
-{
- unsigned int max_size;
- struct bio_vec bv;
-
- if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
- if (!iter->bio)
- return false;
- vec->paddr = bvec_phys(&req->special_vec);
- vec->len = req->special_vec.bv_len;
- iter->bio = NULL;
- return true;
- }
-
- if (!iter->iter.bi_size)
- return false;
-
- bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
- vec->paddr = bvec_phys(&bv);
- max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX);
- bv.bv_len = min(bv.bv_len, max_size);
- bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len);
-
- /*
- * If we are entirely done with this bi_io_vec entry, check if the next
- * one could be merged into it. This typically happens when moving to
- * the next bio, but some callers also don't pack bvecs tight.
- */
- while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) {
- struct bio_vec next;
-
- if (!iter->iter.bi_size) {
- if (!iter->bio->bi_next)
- break;
- iter->bio = iter->bio->bi_next;
- iter->iter = iter->bio->bi_iter;
- }
-
- next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
- if (bv.bv_len + next.bv_len > max_size ||
- !biovec_phys_mergeable(req->q, &bv, &next))
- break;
-
- bv.bv_len += next.bv_len;
- bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len);
- }
-
- vec->len = bv.bv_len;
- return true;
-}
-
-static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
- struct scatterlist *sglist)
-{
- if (!*sg)
- return sglist;
-
- /*
- * If the driver previously mapped a shorter list, we could see a
- * termination bit prematurely unless it fully inits the sg table
- * on each mapping. We KNOW that there must be more entries here
- * or the driver would be buggy, so force clear the termination bit
- * to avoid doing a full sg_init_table() in drivers for each command.
- */
- sg_unmark_end(*sg);
- return sg_next(*sg);
-}
-
-/*
- * Map a request to scatterlist, return number of sg entries setup. Caller
- * must make sure sg can hold rq->nr_phys_segments entries.
- */
-int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
- struct scatterlist **last_sg)
-{
- struct req_iterator iter = {
- .bio = rq->bio,
- };
- struct phys_vec vec;
- int nsegs = 0;
-
- /* the internal flush request may not have bio attached */
- if (iter.bio)
- iter.iter = iter.bio->bi_iter;
-
- while (blk_map_iter_next(rq, &iter, &vec)) {
- *last_sg = blk_next_sg(last_sg, sglist);
- sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,
- offset_in_page(vec.paddr));
- nsegs++;
- }
-
- if (*last_sg)
- sg_mark_end(*last_sg);
-
- /*
- * Something must have been wrong if the figured number of
- * segment is bigger than number of req's physical segments
- */
- WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
-
- return nsegs;
-}
-EXPORT_SYMBOL(__blk_rq_map_sg);
-
static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
sector_t offset)
{
@@ -832,6 +699,8 @@ static struct request *attempt_merge(struct request_queue *q,
if (req->bio->bi_write_hint != next->bio->bi_write_hint)
return NULL;
+ if (req->bio->bi_write_stream != next->bio->bi_write_stream)
+ return NULL;
if (req->bio->bi_ioprio != next->bio->bi_ioprio)
return NULL;
if (!blk_atomic_write_mergeable_rqs(req, next))
@@ -953,6 +822,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
return false;
if (rq->bio->bi_write_hint != bio->bi_write_hint)
return false;
+ if (rq->bio->bi_write_stream != bio->bi_write_stream)
+ return false;
if (rq->bio->bi_ioprio != bio->bi_ioprio)
return false;
if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false)
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 3421b5521fe2..29b3540dd180 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -93,6 +93,8 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
QUEUE_FLAG_NAME(HCTX_ACTIVE),
QUEUE_FLAG_NAME(SQ_SCHED),
+ QUEUE_FLAG_NAME(DISABLE_WBT_DEF),
+ QUEUE_FLAG_NAME(NO_ELV_SWITCH),
};
#undef QUEUE_FLAG_NAME
@@ -624,20 +626,9 @@ void blk_mq_debugfs_register(struct request_queue *q)
debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
- /*
- * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
- * didn't exist yet (because we don't know what to name the directory
- * until the queue is registered to a gendisk).
- */
- if (q->elevator && !q->sched_debugfs_dir)
- blk_mq_debugfs_register_sched(q);
-
- /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx->debugfs_dir)
blk_mq_debugfs_register_hctx(q, hctx);
- if (q->elevator && !hctx->sched_debugfs_dir)
- blk_mq_debugfs_register_sched_hctx(q, hctx);
}
if (q->rq_qos) {
diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c
new file mode 100644
index 000000000000..82bae475dfa4
--- /dev/null
+++ b/block/blk-mq-dma.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Christoph Hellwig
+ */
+#include "blk.h"
+
+struct phys_vec {
+ phys_addr_t paddr;
+ u32 len;
+};
+
+static bool blk_map_iter_next(struct request *req, struct req_iterator *iter,
+ struct phys_vec *vec)
+{
+ unsigned int max_size;
+ struct bio_vec bv;
+
+ if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ if (!iter->bio)
+ return false;
+ vec->paddr = bvec_phys(&req->special_vec);
+ vec->len = req->special_vec.bv_len;
+ iter->bio = NULL;
+ return true;
+ }
+
+ if (!iter->iter.bi_size)
+ return false;
+
+ bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
+ vec->paddr = bvec_phys(&bv);
+ max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX);
+ bv.bv_len = min(bv.bv_len, max_size);
+ bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len);
+
+ /*
+ * If we are entirely done with this bi_io_vec entry, check if the next
+ * one could be merged into it. This typically happens when moving to
+ * the next bio, but some callers also don't pack bvecs tight.
+ */
+ while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) {
+ struct bio_vec next;
+
+ if (!iter->iter.bi_size) {
+ if (!iter->bio->bi_next)
+ break;
+ iter->bio = iter->bio->bi_next;
+ iter->iter = iter->bio->bi_iter;
+ }
+
+ next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
+ if (bv.bv_len + next.bv_len > max_size ||
+ !biovec_phys_mergeable(req->q, &bv, &next))
+ break;
+
+ bv.bv_len += next.bv_len;
+ bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len);
+ }
+
+ vec->len = bv.bv_len;
+ return true;
+}
+
+static inline struct scatterlist *
+blk_next_sg(struct scatterlist **sg, struct scatterlist *sglist)
+{
+ if (!*sg)
+ return sglist;
+
+ /*
+ * If the driver previously mapped a shorter list, we could see a
+ * termination bit prematurely unless it fully inits the sg table
+ * on each mapping. We KNOW that there must be more entries here
+ * or the driver would be buggy, so force clear the termination bit
+ * to avoid doing a full sg_init_table() in drivers for each command.
+ */
+ sg_unmark_end(*sg);
+ return sg_next(*sg);
+}
+
+/*
+ * Map a request to scatterlist, return number of sg entries setup. Caller
+ * must make sure sg can hold rq->nr_phys_segments entries.
+ */
+int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
+ struct scatterlist **last_sg)
+{
+ struct req_iterator iter = {
+ .bio = rq->bio,
+ };
+ struct phys_vec vec;
+ int nsegs = 0;
+
+ /* the internal flush request may not have bio attached */
+ if (iter.bio)
+ iter.iter = iter.bio->bi_iter;
+
+ while (blk_map_iter_next(rq, &iter, &vec)) {
+ *last_sg = blk_next_sg(last_sg, sglist);
+ sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,
+ offset_in_page(vec.paddr));
+ nsegs++;
+ }
+
+ if (*last_sg)
+ sg_mark_end(*last_sg);
+
+ /*
+ * Something must have been wrong if the figured number of
+ * segment is bigger than number of req's physical segments
+ */
+ WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
+
+ return nsegs;
+}
+EXPORT_SYMBOL(__blk_rq_map_sg);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 109611445d40..55a0fd105147 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -59,19 +59,17 @@ static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
struct request *rq;
LIST_HEAD(hctx_list);
- unsigned int count = 0;
list_for_each_entry(rq, rq_list, queuelist) {
if (rq->mq_hctx != hctx) {
list_cut_before(&hctx_list, rq_list, &rq->queuelist);
goto dispatch;
}
- count++;
}
list_splice_tail_init(rq_list, &hctx_list);
dispatch:
- return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
+ return blk_mq_dispatch_rq_list(hctx, &hctx_list, false);
}
#define BLK_MQ_BUDGET_DELAY 3 /* ms units */
@@ -167,7 +165,7 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
} while (!list_empty(&rq_list));
} else {
- dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
+ dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, false);
}
if (busy)
@@ -261,7 +259,7 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
/* round robin for fair dispatch */
ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
- } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
+ } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, false));
WRITE_ONCE(hctx->dispatch_from, ctx);
return ret;
@@ -298,7 +296,7 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
*/
if (!list_empty(&rq_list)) {
blk_mq_sched_mark_restart_hctx(hctx);
- if (!blk_mq_dispatch_rq_list(hctx, &rq_list, 0))
+ if (!blk_mq_dispatch_rq_list(hctx, &rq_list, true))
return 0;
need_dispatch = true;
} else {
@@ -312,7 +310,7 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
if (need_dispatch)
return blk_mq_do_dispatch_ctx(hctx);
blk_mq_flush_busy_ctxs(hctx, &rq_list);
- blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
+ blk_mq_dispatch_rq_list(hctx, &rq_list, true);
return 0;
}
@@ -436,6 +434,30 @@ static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
return 0;
}
+void blk_mq_sched_reg_debugfs(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long i;
+
+ mutex_lock(&q->debugfs_mutex);
+ blk_mq_debugfs_register_sched(q);
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_debugfs_register_sched_hctx(q, hctx);
+ mutex_unlock(&q->debugfs_mutex);
+}
+
+void blk_mq_sched_unreg_debugfs(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long i;
+
+ mutex_lock(&q->debugfs_mutex);
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_debugfs_unregister_sched_hctx(hctx);
+ blk_mq_debugfs_unregister_sched(q);
+ mutex_unlock(&q->debugfs_mutex);
+}
+
/* caller must have a reference to @e, will grab another one if successful */
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{
@@ -469,10 +491,6 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
if (ret)
goto err_free_map_and_rqs;
- mutex_lock(&q->debugfs_mutex);
- blk_mq_debugfs_register_sched(q);
- mutex_unlock(&q->debugfs_mutex);
-
queue_for_each_hw_ctx(q, hctx, i) {
if (e->ops.init_hctx) {
ret = e->ops.init_hctx(hctx, i);
@@ -484,11 +502,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
return ret;
}
}
- mutex_lock(&q->debugfs_mutex);
- blk_mq_debugfs_register_sched_hctx(q, hctx);
- mutex_unlock(&q->debugfs_mutex);
}
-
return 0;
err_free_map_and_rqs:
@@ -527,10 +541,6 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
unsigned int flags = 0;
queue_for_each_hw_ctx(q, hctx, i) {
- mutex_lock(&q->debugfs_mutex);
- blk_mq_debugfs_unregister_sched_hctx(hctx);
- mutex_unlock(&q->debugfs_mutex);
-
if (e->type->ops.exit_hctx && hctx->sched_data) {
e->type->ops.exit_hctx(hctx, i);
hctx->sched_data = NULL;
@@ -538,12 +548,9 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
flags = hctx->flags;
}
- mutex_lock(&q->debugfs_mutex);
- blk_mq_debugfs_unregister_sched(q);
- mutex_unlock(&q->debugfs_mutex);
-
if (e->type->ops.exit_sched)
e->type->ops.exit_sched(e);
blk_mq_sched_tags_teardown(q, flags);
+ set_bit(ELEVATOR_FLAG_DYING, &q->elevator->flags);
q->elevator = NULL;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c2697db59109..4806b867e37d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -89,7 +89,7 @@ struct mq_inflight {
unsigned int inflight[2];
};
-static bool blk_mq_check_inflight(struct request *rq, void *priv)
+static bool blk_mq_check_in_driver(struct request *rq, void *priv)
{
struct mq_inflight *mi = priv;
@@ -101,24 +101,14 @@ static bool blk_mq_check_inflight(struct request *rq, void *priv)
return true;
}
-unsigned int blk_mq_in_flight(struct request_queue *q,
- struct block_device *part)
+void blk_mq_in_driver_rw(struct block_device *part, unsigned int inflight[2])
{
struct mq_inflight mi = { .part = part };
- blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
-
- return mi.inflight[0] + mi.inflight[1];
-}
-
-void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
- unsigned int inflight[2])
-{
- struct mq_inflight mi = { .part = part };
-
- blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
- inflight[0] = mi.inflight[0];
- inflight[1] = mi.inflight[1];
+ blk_mq_queue_tag_busy_iter(bdev_get_queue(part), blk_mq_check_in_driver,
+ &mi);
+ inflight[READ] = mi.inflight[READ];
+ inflight[WRITE] = mi.inflight[WRITE];
}
#ifdef CONFIG_LOCKDEP
@@ -584,9 +574,13 @@ static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
struct blk_mq_alloc_data data = {
.q = q,
.flags = flags,
+ .shallow_depth = 0,
.cmd_flags = opf,
+ .rq_flags = 0,
.nr_tags = plug->nr_ios,
.cached_rqs = &plug->cached_rqs,
+ .ctx = NULL,
+ .hctx = NULL
};
struct request *rq;
@@ -646,8 +640,13 @@ struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
struct blk_mq_alloc_data data = {
.q = q,
.flags = flags,
+ .shallow_depth = 0,
.cmd_flags = opf,
+ .rq_flags = 0,
.nr_tags = 1,
+ .cached_rqs = NULL,
+ .ctx = NULL,
+ .hctx = NULL
};
int ret;
@@ -675,8 +674,13 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
struct blk_mq_alloc_data data = {
.q = q,
.flags = flags,
+ .shallow_depth = 0,
.cmd_flags = opf,
+ .rq_flags = 0,
.nr_tags = 1,
+ .cached_rqs = NULL,
+ .ctx = NULL,
+ .hctx = NULL
};
u64 alloc_time_ns = 0;
struct request *rq;
@@ -2080,7 +2084,7 @@ static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
* Returns true if we did some work AND can potentially do more.
*/
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
- unsigned int nr_budgets)
+ bool get_budget)
{
enum prep_dispatch prep;
struct request_queue *q = hctx->queue;
@@ -2102,7 +2106,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
rq = list_first_entry(list, struct request, queuelist);
WARN_ON_ONCE(hctx != rq->mq_hctx);
- prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
+ prep = blk_mq_prep_dispatch_rq(rq, get_budget);
if (prep != PREP_DISPATCH_OK)
break;
@@ -2111,12 +2115,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
bd.rq = rq;
bd.last = list_empty(list);
- /*
- * once the request is queued to lld, no need to cover the
- * budget any more
- */
- if (nr_budgets)
- nr_budgets--;
ret = q->mq_ops->queue_rq(hctx, &bd);
switch (ret) {
case BLK_STS_OK:
@@ -2150,7 +2148,11 @@ out:
((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) ||
blk_mq_is_shared_tags(hctx->flags));
- if (nr_budgets)
+ /*
+ * If the caller allocated budgets, free the budgets of the
+ * requests that have not yet been passed to the block driver.
+ */
+ if (!get_budget)
blk_mq_release_budgets(q, list);
spin_lock(&hctx->lock);
@@ -2778,15 +2780,15 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
return __blk_mq_issue_directly(hctx, rq, last);
}
-static void blk_mq_plug_issue_direct(struct blk_plug *plug)
+static void blk_mq_issue_direct(struct rq_list *rqs)
{
struct blk_mq_hw_ctx *hctx = NULL;
struct request *rq;
int queued = 0;
blk_status_t ret = BLK_STS_OK;
- while ((rq = rq_list_pop(&plug->mq_list))) {
- bool last = rq_list_empty(&plug->mq_list);
+ while ((rq = rq_list_pop(rqs))) {
+ bool last = rq_list_empty(rqs);
if (hctx != rq->mq_hctx) {
if (hctx) {
@@ -2817,15 +2819,64 @@ out:
blk_mq_commit_rqs(hctx, queued, false);
}
-static void __blk_mq_flush_plug_list(struct request_queue *q,
- struct blk_plug *plug)
+static void __blk_mq_flush_list(struct request_queue *q, struct rq_list *rqs)
{
if (blk_queue_quiesced(q))
return;
- q->mq_ops->queue_rqs(&plug->mq_list);
+ q->mq_ops->queue_rqs(rqs);
+}
+
+static unsigned blk_mq_extract_queue_requests(struct rq_list *rqs,
+ struct rq_list *queue_rqs)
+{
+ struct request *rq = rq_list_pop(rqs);
+ struct request_queue *this_q = rq->q;
+ struct request **prev = &rqs->head;
+ struct rq_list matched_rqs = {};
+ struct request *last = NULL;
+ unsigned depth = 1;
+
+ rq_list_add_tail(&matched_rqs, rq);
+ while ((rq = *prev)) {
+ if (rq->q == this_q) {
+ /* move rq from rqs to matched_rqs */
+ *prev = rq->rq_next;
+ rq_list_add_tail(&matched_rqs, rq);
+ depth++;
+ } else {
+ /* leave rq in rqs */
+ prev = &rq->rq_next;
+ last = rq;
+ }
+ }
+
+ rqs->tail = last;
+ *queue_rqs = matched_rqs;
+ return depth;
+}
+
+static void blk_mq_dispatch_queue_requests(struct rq_list *rqs, unsigned depth)
+{
+ struct request_queue *q = rq_list_peek(rqs)->q;
+
+ trace_block_unplug(q, depth, true);
+
+ /*
+ * Peek first request and see if we have a ->queue_rqs() hook.
+ * If we do, we can dispatch the whole list in one go.
+ * We already know at this point that all requests belong to the
+ * same queue, caller must ensure that's the case.
+ */
+ if (q->mq_ops->queue_rqs) {
+ blk_mq_run_dispatch_ops(q, __blk_mq_flush_list(q, rqs));
+ if (rq_list_empty(rqs))
+ return;
+ }
+
+ blk_mq_run_dispatch_ops(q, blk_mq_issue_direct(rqs));
}
-static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
+static void blk_mq_dispatch_list(struct rq_list *rqs, bool from_sched)
{
struct blk_mq_hw_ctx *this_hctx = NULL;
struct blk_mq_ctx *this_ctx = NULL;
@@ -2835,7 +2886,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
LIST_HEAD(list);
do {
- struct request *rq = rq_list_pop(&plug->mq_list);
+ struct request *rq = rq_list_pop(rqs);
if (!this_hctx) {
this_hctx = rq->mq_hctx;
@@ -2848,9 +2899,9 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
}
list_add_tail(&rq->queuelist, &list);
depth++;
- } while (!rq_list_empty(&plug->mq_list));
+ } while (!rq_list_empty(rqs));
- plug->mq_list = requeue_list;
+ *rqs = requeue_list;
trace_block_unplug(this_hctx->queue, depth, !from_sched);
percpu_ref_get(&this_hctx->queue->q_usage_counter);
@@ -2870,9 +2921,21 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
percpu_ref_put(&this_hctx->queue->q_usage_counter);
}
+static void blk_mq_dispatch_multiple_queue_requests(struct rq_list *rqs)
+{
+ do {
+ struct rq_list queue_rqs;
+ unsigned depth;
+
+ depth = blk_mq_extract_queue_requests(rqs, &queue_rqs);
+ blk_mq_dispatch_queue_requests(&queue_rqs, depth);
+ while (!rq_list_empty(&queue_rqs))
+ blk_mq_dispatch_list(&queue_rqs, false);
+ } while (!rq_list_empty(rqs));
+}
+
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
- struct request *rq;
unsigned int depth;
/*
@@ -2887,34 +2950,19 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
depth = plug->rq_count;
plug->rq_count = 0;
- if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
- struct request_queue *q;
-
- rq = rq_list_peek(&plug->mq_list);
- q = rq->q;
- trace_block_unplug(q, depth, true);
-
- /*
- * Peek first request and see if we have a ->queue_rqs() hook.
- * If we do, we can dispatch the whole plug list in one go. We
- * already know at this point that all requests belong to the
- * same queue, caller must ensure that's the case.
- */
- if (q->mq_ops->queue_rqs) {
- blk_mq_run_dispatch_ops(q,
- __blk_mq_flush_plug_list(q, plug));
- if (rq_list_empty(&plug->mq_list))
- return;
+ if (!plug->has_elevator && !from_schedule) {
+ if (plug->multiple_queues) {
+ blk_mq_dispatch_multiple_queue_requests(&plug->mq_list);
+ return;
}
- blk_mq_run_dispatch_ops(q,
- blk_mq_plug_issue_direct(plug));
+ blk_mq_dispatch_queue_requests(&plug->mq_list, depth);
if (rq_list_empty(&plug->mq_list))
return;
}
do {
- blk_mq_dispatch_plug_list(plug, from_schedule);
+ blk_mq_dispatch_list(&plug->mq_list, from_schedule);
} while (!rq_list_empty(&plug->mq_list));
}
@@ -2969,8 +3017,14 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
{
struct blk_mq_alloc_data data = {
.q = q,
- .nr_tags = 1,
+ .flags = 0,
+ .shallow_depth = 0,
.cmd_flags = bio->bi_opf,
+ .rq_flags = 0,
+ .nr_tags = 1,
+ .cached_rqs = NULL,
+ .ctx = NULL,
+ .hctx = NULL
};
struct request *rq;
@@ -3080,8 +3134,6 @@ void blk_mq_submit_bio(struct bio *bio)
goto new_request;
}
- bio = blk_queue_bounce(bio, q);
-
/*
* The cached request already holds a q_usage_counter reference and we
* don't have to acquire a new one if we use it.
@@ -4094,8 +4146,6 @@ static void blk_mq_map_swqueue(struct request_queue *q)
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
- mutex_lock(&q->elevator_lock);
-
queue_for_each_hw_ctx(q, hctx, i) {
cpumask_clear(hctx->cpumask);
hctx->nr_ctx = 0;
@@ -4200,8 +4250,6 @@ static void blk_mq_map_swqueue(struct request_queue *q)
hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
}
-
- mutex_unlock(&q->elevator_lock);
}
/*
@@ -4505,16 +4553,9 @@ static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
}
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
- struct request_queue *q, bool lock)
+ struct request_queue *q)
{
- if (lock) {
- /* protect against switching io scheduler */
- mutex_lock(&q->elevator_lock);
- __blk_mq_realloc_hw_ctxs(set, q);
- mutex_unlock(&q->elevator_lock);
- } else {
- __blk_mq_realloc_hw_ctxs(set, q);
- }
+ __blk_mq_realloc_hw_ctxs(set, q);
/* unregister cpuhp callbacks for exited hctxs */
blk_mq_remove_hw_queues_cpuhp(q);
@@ -4546,7 +4587,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
xa_init(&q->hctx_table);
- blk_mq_realloc_hw_ctxs(set, q, false);
+ blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
goto err_hctxs;
@@ -4563,8 +4604,8 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
q->nr_requests = set->queue_depth;
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
- blk_mq_add_queue_tag_set(set, q);
blk_mq_map_swqueue(q);
+ blk_mq_add_queue_tag_set(set, q);
return 0;
err_hctxs:
@@ -4784,6 +4825,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
goto out_free_srcu;
}
+ init_rwsem(&set->update_nr_hwq_lock);
+
ret = -ENOMEM;
set->tags = kcalloc_node(set->nr_hw_queues,
sizeof(struct blk_mq_tags *), GFP_KERNEL,
@@ -4923,88 +4966,10 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return ret;
}
-/*
- * request_queue and elevator_type pair.
- * It is just used by __blk_mq_update_nr_hw_queues to cache
- * the elevator_type associated with a request_queue.
- */
-struct blk_mq_qe_pair {
- struct list_head node;
- struct request_queue *q;
- struct elevator_type *type;
-};
-
-/*
- * Cache the elevator_type in qe pair list and switch the
- * io scheduler to 'none'
- */
-static bool blk_mq_elv_switch_none(struct list_head *head,
- struct request_queue *q)
-{
- struct blk_mq_qe_pair *qe;
-
- qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
- if (!qe)
- return false;
-
- /* Accessing q->elevator needs protection from ->elevator_lock. */
- mutex_lock(&q->elevator_lock);
-
- if (!q->elevator) {
- kfree(qe);
- goto unlock;
- }
-
- INIT_LIST_HEAD(&qe->node);
- qe->q = q;
- qe->type = q->elevator->type;
- /* keep a reference to the elevator module as we'll switch back */
- __elevator_get(qe->type);
- list_add(&qe->node, head);
- elevator_disable(q);
-unlock:
- mutex_unlock(&q->elevator_lock);
-
- return true;
-}
-
-static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
- struct request_queue *q)
-{
- struct blk_mq_qe_pair *qe;
-
- list_for_each_entry(qe, head, node)
- if (qe->q == q)
- return qe;
-
- return NULL;
-}
-
-static void blk_mq_elv_switch_back(struct list_head *head,
- struct request_queue *q)
-{
- struct blk_mq_qe_pair *qe;
- struct elevator_type *t;
-
- qe = blk_lookup_qe_pair(head, q);
- if (!qe)
- return;
- t = qe->type;
- list_del(&qe->node);
- kfree(qe);
-
- mutex_lock(&q->elevator_lock);
- elevator_switch(q, t);
- /* drop the reference acquired in blk_mq_elv_switch_none */
- elevator_put(t);
- mutex_unlock(&q->elevator_lock);
-}
-
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
int nr_hw_queues)
{
struct request_queue *q;
- LIST_HEAD(head);
int prev_nr_hw_queues = set->nr_hw_queues;
unsigned int memflags;
int i;
@@ -5019,30 +4984,24 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
return;
memflags = memalloc_noio_save();
- list_for_each_entry(q, &set->tag_list, tag_set_list)
- blk_mq_freeze_queue_nomemsave(q);
-
- /*
- * Switch IO scheduler to 'none', cleaning up the data associated
- * with the previous scheduler. We will switch back once we are done
- * updating the new sw to hw queue mappings.
- */
- list_for_each_entry(q, &set->tag_list, tag_set_list)
- if (!blk_mq_elv_switch_none(&head, q))
- goto switch_back;
-
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_debugfs_unregister_hctxs(q);
blk_mq_sysfs_unregister_hctxs(q);
}
- if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ blk_mq_freeze_queue_nomemsave(q);
+
+ if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) {
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ blk_mq_unfreeze_queue_nomemrestore(q);
goto reregister;
+ }
fallback:
blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
- blk_mq_realloc_hw_ctxs(set, q, true);
+ __blk_mq_realloc_hw_ctxs(set, q);
if (q->nr_hw_queues != set->nr_hw_queues) {
int i = prev_nr_hw_queues;
@@ -5058,18 +5017,18 @@ fallback:
blk_mq_map_swqueue(q);
}
+ /* elv_update_nr_hw_queues() unfreeze queue for us */
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ elv_update_nr_hw_queues(q);
+
reregister:
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_sysfs_register_hctxs(q);
blk_mq_debugfs_register_hctxs(q);
- }
-
-switch_back:
- list_for_each_entry(q, &set->tag_list, tag_set_list)
- blk_mq_elv_switch_back(&head, q);
- list_for_each_entry(q, &set->tag_list, tag_set_list)
- blk_mq_unfreeze_queue_nomemrestore(q);
+ blk_mq_remove_hw_queues_cpuhp(q);
+ blk_mq_add_hw_queues_cpuhp(q);
+ }
memalloc_noio_restore(memflags);
/* Free the excess tags when nr_hw_queues shrink. */
@@ -5079,9 +5038,11 @@ switch_back:
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
{
+ down_write(&set->update_nr_hwq_lock);
mutex_lock(&set->tag_list_lock);
__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
mutex_unlock(&set->tag_list_lock);
+ up_write(&set->update_nr_hwq_lock);
}
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 3011a78cf16a..affb2e14b56e 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -48,7 +48,7 @@ void blk_mq_exit_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
- unsigned int);
+ bool);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start);
@@ -246,10 +246,7 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
return hctx->nr_ctx && hctx->tags;
}
-unsigned int blk_mq_in_flight(struct request_queue *q,
- struct block_device *part);
-void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
- unsigned int inflight[2]);
+void blk_mq_in_driver_rw(struct block_device *part, unsigned int inflight[2]);
static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
int budget_token)
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 95982bc46ba1..848591fb3c57 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -2,6 +2,8 @@
#include "blk-rq-qos.h"
+__read_mostly DEFINE_STATIC_KEY_FALSE(block_rq_qos);
+
/*
* Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
* false if 'v' + 1 would be bigger than 'below'.
@@ -317,6 +319,7 @@ void rq_qos_exit(struct request_queue *q)
struct rq_qos *rqos = q->rq_qos;
q->rq_qos = rqos->next;
rqos->ops->exit(rqos);
+ static_branch_dec(&block_rq_qos);
}
mutex_unlock(&q->rq_qos_mutex);
}
@@ -343,6 +346,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
goto ebusy;
rqos->next = q->rq_qos;
q->rq_qos = rqos;
+ static_branch_inc(&block_rq_qos);
blk_mq_unfreeze_queue(q, memflags);
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 37245c97ee61..39749f4066fb 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -12,6 +12,7 @@
#include "blk-mq-debugfs.h"
struct blk_mq_debugfs_attr;
+extern struct static_key_false block_rq_qos;
enum rq_qos_id {
RQ_QOS_WBT,
@@ -112,31 +113,33 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
{
- if (q->rq_qos)
+ if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
__rq_qos_cleanup(q->rq_qos, bio);
}
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
{
- if (q->rq_qos && !blk_rq_is_passthrough(rq))
+ if (static_branch_unlikely(&block_rq_qos) && q->rq_qos &&
+ !blk_rq_is_passthrough(rq))
__rq_qos_done(q->rq_qos, rq);
}
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
{
- if (q->rq_qos)
+ if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
__rq_qos_issue(q->rq_qos, rq);
}
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
{
- if (q->rq_qos)
+ if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
__rq_qos_requeue(q->rq_qos, rq);
}
static inline void rq_qos_done_bio(struct bio *bio)
{
- if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
+ if (static_branch_unlikely(&block_rq_qos) &&
+ bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
bio_flagged(bio, BIO_QOS_MERGED))) {
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
if (q->rq_qos)
@@ -146,7 +149,7 @@ static inline void rq_qos_done_bio(struct bio *bio)
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
{
- if (q->rq_qos) {
+ if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
bio_set_flag(bio, BIO_QOS_THROTTLED);
__rq_qos_throttle(q->rq_qos, bio);
}
@@ -155,14 +158,14 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- if (q->rq_qos)
+ if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
__rq_qos_track(q->rq_qos, rq, bio);
}
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- if (q->rq_qos) {
+ if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
bio_set_flag(bio, BIO_QOS_MERGED);
__rq_qos_merge(q->rq_qos, rq, bio);
}
@@ -170,7 +173,7 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
{
- if (q->rq_qos)
+ if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
__rq_qos_queue_depth_changed(q->rq_qos);
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 6b2dbe645d23..a000daafbfb4 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -61,8 +61,14 @@ void blk_apply_bdi_limits(struct backing_dev_info *bdi,
/*
* For read-ahead of large files to be effective, we need to read ahead
* at least twice the optimal I/O size.
+ *
+ * There is no hardware limitation for the read-ahead size and the user
+ * might have increased the read-ahead size through sysfs, so don't ever
+ * decrease it.
*/
- bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
+ bdi->ra_pages = max3(bdi->ra_pages,
+ lim->io_opt * 2 / PAGE_SIZE,
+ VM_READAHEAD_PAGES);
bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
}
@@ -118,11 +124,6 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
return 0;
}
- if (lim->features & BLK_FEAT_BOUNCE_HIGH) {
- pr_warn("no bounce buffer support for integrity metadata\n");
- return -EINVAL;
- }
-
if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
pr_warn("integrity support disabled.\n");
return -EINVAL;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index a2882751f0d2..b2b9b89d6967 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -134,6 +134,8 @@ QUEUE_SYSFS_LIMIT_SHOW(max_segments)
QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)
QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments)
QUEUE_SYSFS_LIMIT_SHOW(max_segment_size)
+QUEUE_SYSFS_LIMIT_SHOW(max_write_streams)
+QUEUE_SYSFS_LIMIT_SHOW(write_stream_granularity)
QUEUE_SYSFS_LIMIT_SHOW(logical_block_size)
QUEUE_SYSFS_LIMIT_SHOW(physical_block_size)
QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors)
@@ -488,6 +490,8 @@ QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
QUEUE_LIM_RO_ENTRY(queue_max_segments, "max_segments");
QUEUE_LIM_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
QUEUE_LIM_RO_ENTRY(queue_max_segment_size, "max_segment_size");
+QUEUE_LIM_RO_ENTRY(queue_max_write_streams, "max_write_streams");
+QUEUE_LIM_RO_ENTRY(queue_write_stream_granularity, "write_stream_granularity");
QUEUE_RW_ENTRY(elv_iosched, "scheduler");
QUEUE_LIM_RO_ENTRY(queue_logical_block_size, "logical_block_size");
@@ -560,7 +564,7 @@ static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
ssize_t ret;
struct request_queue *q = disk->queue;
- mutex_lock(&q->elevator_lock);
+ mutex_lock(&disk->rqos_state_mutex);
if (!wbt_rq_qos(q)) {
ret = -EINVAL;
goto out;
@@ -573,7 +577,7 @@ static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
ret = sysfs_emit(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
out:
- mutex_unlock(&q->elevator_lock);
+ mutex_unlock(&disk->rqos_state_mutex);
return ret;
}
@@ -593,7 +597,6 @@ static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
return -EINVAL;
memflags = blk_mq_freeze_queue(q);
- mutex_lock(&q->elevator_lock);
rqos = wbt_rq_qos(q);
if (!rqos) {
@@ -618,11 +621,12 @@ static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
*/
blk_mq_quiesce_queue(q);
+ mutex_lock(&disk->rqos_state_mutex);
wbt_set_min_lat(q, val);
+ mutex_unlock(&disk->rqos_state_mutex);
blk_mq_unquiesce_queue(q);
out:
- mutex_unlock(&q->elevator_lock);
blk_mq_unfreeze_queue(q, memflags);
return ret;
@@ -642,6 +646,8 @@ static struct attribute *queue_attrs[] = {
&queue_max_discard_segments_entry.attr,
&queue_max_integrity_segments_entry.attr,
&queue_max_segment_size_entry.attr,
+ &queue_max_write_streams_entry.attr,
+ &queue_write_stream_granularity_entry.attr,
&queue_hw_sector_size_entry.attr,
&queue_logical_block_size_entry.attr,
&queue_physical_block_size_entry.attr,
@@ -869,16 +875,9 @@ int blk_register_queue(struct gendisk *disk)
if (ret)
goto out_unregister_ia_ranges;
- mutex_lock(&q->elevator_lock);
- if (q->elevator) {
- ret = elv_register_queue(q, false);
- if (ret) {
- mutex_unlock(&q->elevator_lock);
- goto out_crypto_sysfs_unregister;
- }
- }
+ if (queue_is_mq(q))
+ elevator_set_default(q);
wbt_enable_default(disk);
- mutex_unlock(&q->elevator_lock);
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
@@ -902,13 +901,13 @@ int blk_register_queue(struct gendisk *disk)
return ret;
-out_crypto_sysfs_unregister:
- blk_crypto_sysfs_unregister(disk);
out_unregister_ia_ranges:
disk_unregister_independent_access_ranges(disk);
out_debugfs_remove:
blk_debugfs_remove(disk);
mutex_unlock(&q->sysfs_lock);
+ if (queue_is_mq(q))
+ blk_mq_sysfs_unregister(disk);
out_put_queue_kobj:
kobject_put(&disk->queue_kobj);
return ret;
@@ -949,10 +948,6 @@ void blk_unregister_queue(struct gendisk *disk)
blk_mq_sysfs_unregister(disk);
blk_crypto_sysfs_unregister(disk);
- mutex_lock(&q->elevator_lock);
- elv_unregister_queue(q);
- mutex_unlock(&q->elevator_lock);
-
mutex_lock(&q->sysfs_lock);
disk_unregister_independent_access_ranges(disk);
mutex_unlock(&q->sysfs_lock);
@@ -961,5 +956,8 @@ void blk_unregister_queue(struct gendisk *disk)
kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
kobject_del(&disk->queue_kobj);
+ if (queue_is_mq(q))
+ elevator_set_none(q);
+
blk_debugfs_remove(disk);
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index d6dd2e047874..bd15357f23bd 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -143,7 +143,8 @@ static inline unsigned int throtl_bio_data_size(struct bio *bio)
static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
{
INIT_LIST_HEAD(&qn->node);
- bio_list_init(&qn->bios);
+ bio_list_init(&qn->bios_bps);
+ bio_list_init(&qn->bios_iops);
qn->tg = tg;
}
@@ -151,18 +152,32 @@ static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
* throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
* @bio: bio being added
* @qn: qnode to add bio to
- * @queued: the service_queue->queued[] list @qn belongs to
+ * @sq: the service_queue @qn belongs to
*
- * Add @bio to @qn and put @qn on @queued if it's not already on.
+ * Add @bio to @qn and put @qn on @sq->queued if it's not already on.
* @qn->tg's reference count is bumped when @qn is activated. See the
* comment on top of throtl_qnode definition for details.
*/
static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
- struct list_head *queued)
+ struct throtl_service_queue *sq)
{
- bio_list_add(&qn->bios, bio);
+ bool rw = bio_data_dir(bio);
+
+ /*
+ * Split bios have already been throttled by bps, so they are
+ * directly queued into the iops path.
+ */
+ if (bio_flagged(bio, BIO_TG_BPS_THROTTLED) ||
+ bio_flagged(bio, BIO_BPS_THROTTLED)) {
+ bio_list_add(&qn->bios_iops, bio);
+ sq->nr_queued_iops[rw]++;
+ } else {
+ bio_list_add(&qn->bios_bps, bio);
+ sq->nr_queued_bps[rw]++;
+ }
+
if (list_empty(&qn->node)) {
- list_add_tail(&qn->node, queued);
+ list_add_tail(&qn->node, &sq->queued[rw]);
blkg_get(tg_to_blkg(qn->tg));
}
}
@@ -170,6 +185,10 @@ static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
/**
* throtl_peek_queued - peek the first bio on a qnode list
* @queued: the qnode list to peek
+ *
+ * Always take a bio from the head of the iops queue first. If the queue is
+ * empty, we then take it from the bps queue to maintain the overall idea of
+ * fetching bios from the head.
*/
static struct bio *throtl_peek_queued(struct list_head *queued)
{
@@ -180,28 +199,33 @@ static struct bio *throtl_peek_queued(struct list_head *queued)
return NULL;
qn = list_first_entry(queued, struct throtl_qnode, node);
- bio = bio_list_peek(&qn->bios);
+ bio = bio_list_peek(&qn->bios_iops);
+ if (!bio)
+ bio = bio_list_peek(&qn->bios_bps);
WARN_ON_ONCE(!bio);
return bio;
}
/**
* throtl_pop_queued - pop the first bio form a qnode list
- * @queued: the qnode list to pop a bio from
+ * @sq: the service_queue to pop a bio from
* @tg_to_put: optional out argument for throtl_grp to put
+ * @rw: read/write
*
- * Pop the first bio from the qnode list @queued. After popping, the first
- * qnode is removed from @queued if empty or moved to the end of @queued so
- * that the popping order is round-robin.
+ * Pop the first bio from the qnode list @sq->queued. Note that we firstly
+ * focus on the iops list because bios are ultimately dispatched from it.
+ * After popping, the first qnode is removed from @sq->queued if empty or moved
+ * to the end of @sq->queued so that the popping order is round-robin.
*
* When the first qnode is removed, its associated throtl_grp should be put
* too. If @tg_to_put is NULL, this function automatically puts it;
* otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
* responsible for putting it.
*/
-static struct bio *throtl_pop_queued(struct list_head *queued,
- struct throtl_grp **tg_to_put)
+static struct bio *throtl_pop_queued(struct throtl_service_queue *sq,
+ struct throtl_grp **tg_to_put, bool rw)
{
+ struct list_head *queued = &sq->queued[rw];
struct throtl_qnode *qn;
struct bio *bio;
@@ -209,10 +233,17 @@ static struct bio *throtl_pop_queued(struct list_head *queued,
return NULL;
qn = list_first_entry(queued, struct throtl_qnode, node);
- bio = bio_list_pop(&qn->bios);
+ bio = bio_list_pop(&qn->bios_iops);
+ if (bio) {
+ sq->nr_queued_iops[rw]--;
+ } else {
+ bio = bio_list_pop(&qn->bios_bps);
+ if (bio)
+ sq->nr_queued_bps[rw]--;
+ }
WARN_ON_ONCE(!bio);
- if (bio_list_empty(&qn->bios)) {
+ if (bio_list_empty(&qn->bios_bps) && bio_list_empty(&qn->bios_iops)) {
list_del_init(&qn->node);
if (tg_to_put)
*tg_to_put = qn->tg;
@@ -520,6 +551,9 @@ static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
unsigned long jiffy_end)
{
+ if (!time_before(tg->slice_end[rw], jiffy_end))
+ return;
+
throtl_set_slice_end(tg, rw, jiffy_end);
throtl_log(&tg->service_queue,
"[%c] extend slice start=%lu end=%lu jiffies=%lu",
@@ -536,6 +570,11 @@ static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
return true;
}
+static unsigned int sq_queued(struct throtl_service_queue *sq, int type)
+{
+ return sq->nr_queued_bps[type] + sq->nr_queued_iops[type];
+}
+
static unsigned int calculate_io_allowed(u32 iops_limit,
unsigned long jiffy_elapsed)
{
@@ -571,6 +610,48 @@ static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
}
+static long long throtl_trim_bps(struct throtl_grp *tg, bool rw,
+ unsigned long time_elapsed)
+{
+ u64 bps_limit = tg_bps_limit(tg, rw);
+ long long bytes_trim;
+
+ if (bps_limit == U64_MAX)
+ return 0;
+
+ /* Need to consider the case of bytes_allowed overflow. */
+ bytes_trim = calculate_bytes_allowed(bps_limit, time_elapsed);
+ if (bytes_trim <= 0 || tg->bytes_disp[rw] < bytes_trim) {
+ bytes_trim = tg->bytes_disp[rw];
+ tg->bytes_disp[rw] = 0;
+ } else {
+ tg->bytes_disp[rw] -= bytes_trim;
+ }
+
+ return bytes_trim;
+}
+
+static int throtl_trim_iops(struct throtl_grp *tg, bool rw,
+ unsigned long time_elapsed)
+{
+ u32 iops_limit = tg_iops_limit(tg, rw);
+ int io_trim;
+
+ if (iops_limit == UINT_MAX)
+ return 0;
+
+ /* Need to consider the case of io_allowed overflow. */
+ io_trim = calculate_io_allowed(iops_limit, time_elapsed);
+ if (io_trim <= 0 || tg->io_disp[rw] < io_trim) {
+ io_trim = tg->io_disp[rw];
+ tg->io_disp[rw] = 0;
+ } else {
+ tg->io_disp[rw] -= io_trim;
+ }
+
+ return io_trim;
+}
+
/* Trim the used slices and adjust slice start accordingly */
static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
{
@@ -612,22 +693,11 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
* one extra slice is preserved for deviation.
*/
time_elapsed -= tg->td->throtl_slice;
- bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
- time_elapsed);
- io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed);
- if (bytes_trim <= 0 && io_trim <= 0)
+ bytes_trim = throtl_trim_bps(tg, rw, time_elapsed);
+ io_trim = throtl_trim_iops(tg, rw, time_elapsed);
+ if (!bytes_trim && !io_trim)
return;
- if ((long long)tg->bytes_disp[rw] >= bytes_trim)
- tg->bytes_disp[rw] -= bytes_trim;
- else
- tg->bytes_disp[rw] = 0;
-
- if ((int)tg->io_disp[rw] >= io_trim)
- tg->io_disp[rw] -= io_trim;
- else
- tg->io_disp[rw] = 0;
-
tg->slice_start[rw] += time_elapsed;
throtl_log(&tg->service_queue,
@@ -643,21 +713,41 @@ static void __tg_update_carryover(struct throtl_grp *tg, bool rw,
unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
u64 bps_limit = tg_bps_limit(tg, rw);
u32 iops_limit = tg_iops_limit(tg, rw);
+ long long bytes_allowed;
+ int io_allowed;
+
+ /*
+ * If the queue is empty, carryover handling is not needed. In such cases,
+ * tg->[bytes/io]_disp should be reset to 0 to avoid impacting the dispatch
+ * of subsequent bios. The same handling applies when the previous BPS/IOPS
+ * limit was set to max.
+ */
+ if (sq_queued(&tg->service_queue, rw) == 0) {
+ tg->bytes_disp[rw] = 0;
+ tg->io_disp[rw] = 0;
+ return;
+ }
/*
* If config is updated while bios are still throttled, calculate and
- * accumulate how many bytes/ios are waited across changes. And
- * carryover_bytes/ios will be used to calculate new wait time under new
- * configuration.
+ * accumulate how many bytes/ios are waited across changes. And use the
+ * calculated carryover (@bytes/@ios) to update [bytes/io]_disp, which
+ * will be used to calculate new wait time under new configuration.
+ * And we need to consider the case of bytes/io_allowed overflow.
*/
- if (bps_limit != U64_MAX)
- *bytes = calculate_bytes_allowed(bps_limit, jiffy_elapsed) -
- tg->bytes_disp[rw];
- if (iops_limit != UINT_MAX)
- *ios = calculate_io_allowed(iops_limit, jiffy_elapsed) -
- tg->io_disp[rw];
- tg->bytes_disp[rw] -= *bytes;
- tg->io_disp[rw] -= *ios;
+ if (bps_limit != U64_MAX) {
+ bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed);
+ if (bytes_allowed > 0)
+ *bytes = bytes_allowed - tg->bytes_disp[rw];
+ }
+ if (iops_limit != UINT_MAX) {
+ io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed);
+ if (io_allowed > 0)
+ *ios = io_allowed - tg->io_disp[rw];
+ }
+
+ tg->bytes_disp[rw] = -*bytes;
+ tg->io_disp[rw] = -*ios;
}
static void tg_update_carryover(struct throtl_grp *tg)
@@ -665,12 +755,10 @@ static void tg_update_carryover(struct throtl_grp *tg)
long long bytes[2] = {0};
int ios[2] = {0};
- if (tg->service_queue.nr_queued[READ])
- __tg_update_carryover(tg, READ, &bytes[READ], &ios[READ]);
- if (tg->service_queue.nr_queued[WRITE])
- __tg_update_carryover(tg, WRITE, &bytes[WRITE], &ios[WRITE]);
+ __tg_update_carryover(tg, READ, &bytes[READ], &ios[READ]);
+ __tg_update_carryover(tg, WRITE, &bytes[WRITE], &ios[WRITE]);
- /* see comments in struct throtl_grp for meaning of these fields. */
+ /* see comments in struct throtl_grp for meaning of carryover. */
throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__,
bytes[READ], bytes[WRITE], ios[READ], ios[WRITE]);
}
@@ -682,10 +770,6 @@ static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio
int io_allowed;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
- if (iops_limit == UINT_MAX) {
- return 0;
- }
-
jiffy_elapsed = jiffies - tg->slice_start[rw];
/* Round up to the next throttle slice, wait time must be nonzero */
@@ -711,11 +795,6 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
unsigned int bio_size = throtl_bio_data_size(bio);
- /* no need to throttle if this bio's bytes have been accounted */
- if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) {
- return 0;
- }
-
jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
/* Slice has just started. Consider one slice interval */
@@ -724,7 +803,9 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd);
- if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
+ /* Need to consider the case of bytes_allowed overflow. */
+ if ((bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
+ || bytes_allowed < 0)
return 0;
/* Calc approx time to dispatch */
@@ -742,17 +823,82 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
return jiffy_wait;
}
+static void throtl_charge_bps_bio(struct throtl_grp *tg, struct bio *bio)
+{
+ unsigned int bio_size = throtl_bio_data_size(bio);
+
+ /* Charge the bio to the group */
+ if (!bio_flagged(bio, BIO_BPS_THROTTLED) &&
+ !bio_flagged(bio, BIO_TG_BPS_THROTTLED)) {
+ bio_set_flag(bio, BIO_TG_BPS_THROTTLED);
+ tg->bytes_disp[bio_data_dir(bio)] += bio_size;
+ }
+}
+
+static void throtl_charge_iops_bio(struct throtl_grp *tg, struct bio *bio)
+{
+ bio_clear_flag(bio, BIO_TG_BPS_THROTTLED);
+ tg->io_disp[bio_data_dir(bio)]++;
+}
+
/*
- * Returns whether one can dispatch a bio or not. Also returns approx number
- * of jiffies to wait before this bio is with-in IO rate and can be dispatched
+ * If previous slice expired, start a new one otherwise renew/extend existing
+ * slice to make sure it is at least throtl_slice interval long since now. New
+ * slice is started only for empty throttle group. If there is queued bio, that
+ * means there should be an active slice and it should be extended instead.
*/
-static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
- unsigned long *wait)
+static void tg_update_slice(struct throtl_grp *tg, bool rw)
+{
+ if (throtl_slice_used(tg, rw) &&
+ sq_queued(&tg->service_queue, rw) == 0)
+ throtl_start_new_slice(tg, rw, true);
+ else
+ throtl_extend_slice(tg, rw, jiffies + tg->td->throtl_slice);
+}
+
+static unsigned long tg_dispatch_bps_time(struct throtl_grp *tg, struct bio *bio)
{
bool rw = bio_data_dir(bio);
- unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
u64 bps_limit = tg_bps_limit(tg, rw);
+ unsigned long bps_wait;
+
+ /* no need to throttle if this bio's bytes have been accounted */
+ if (bps_limit == U64_MAX || tg->flags & THROTL_TG_CANCELING ||
+ bio_flagged(bio, BIO_BPS_THROTTLED) ||
+ bio_flagged(bio, BIO_TG_BPS_THROTTLED))
+ return 0;
+
+ tg_update_slice(tg, rw);
+ bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
+ throtl_extend_slice(tg, rw, jiffies + bps_wait);
+
+ return bps_wait;
+}
+
+static unsigned long tg_dispatch_iops_time(struct throtl_grp *tg, struct bio *bio)
+{
+ bool rw = bio_data_dir(bio);
u32 iops_limit = tg_iops_limit(tg, rw);
+ unsigned long iops_wait;
+
+ if (iops_limit == UINT_MAX || tg->flags & THROTL_TG_CANCELING)
+ return 0;
+
+ tg_update_slice(tg, rw);
+ iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
+ throtl_extend_slice(tg, rw, jiffies + iops_wait);
+
+ return iops_wait;
+}
+
+/*
+ * Returns approx number of jiffies to wait before this bio is with-in IO rate
+ * and can be moved to other queue or dispatched.
+ */
+static unsigned long tg_dispatch_time(struct throtl_grp *tg, struct bio *bio)
+{
+ bool rw = bio_data_dir(bio);
+ unsigned long wait;
/*
* Currently whole state machine of group depends on first bio
@@ -760,62 +906,20 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
* this function with a different bio if there are other bios
* queued.
*/
- BUG_ON(tg->service_queue.nr_queued[rw] &&
+ BUG_ON(sq_queued(&tg->service_queue, rw) &&
bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
- /* If tg->bps = -1, then BW is unlimited */
- if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) ||
- tg->flags & THROTL_TG_CANCELING) {
- if (wait)
- *wait = 0;
- return true;
- }
+ wait = tg_dispatch_bps_time(tg, bio);
+ if (wait != 0)
+ return wait;
/*
- * If previous slice expired, start a new one otherwise renew/extend
- * existing slice to make sure it is at least throtl_slice interval
- * long since now. New slice is started only for empty throttle group.
- * If there is queued bio, that means there should be an active
- * slice and it should be extended instead.
+ * Charge bps here because @bio will be directly placed into the
+ * iops queue afterward.
*/
- if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
- throtl_start_new_slice(tg, rw, true);
- else {
- if (time_before(tg->slice_end[rw],
- jiffies + tg->td->throtl_slice))
- throtl_extend_slice(tg, rw,
- jiffies + tg->td->throtl_slice);
- }
-
- bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
- iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
- if (bps_wait + iops_wait == 0) {
- if (wait)
- *wait = 0;
- return true;
- }
-
- max_wait = max(bps_wait, iops_wait);
-
- if (wait)
- *wait = max_wait;
-
- if (time_before(tg->slice_end[rw], jiffies + max_wait))
- throtl_extend_slice(tg, rw, jiffies + max_wait);
-
- return false;
-}
-
-static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
-{
- bool rw = bio_data_dir(bio);
- unsigned int bio_size = throtl_bio_data_size(bio);
+ throtl_charge_bps_bio(tg, bio);
- /* Charge the bio to the group */
- if (!bio_flagged(bio, BIO_BPS_THROTTLED))
- tg->bytes_disp[rw] += bio_size;
-
- tg->io_disp[rw]++;
+ return tg_dispatch_iops_time(tg, bio);
}
/**
@@ -842,28 +946,36 @@ static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
* dispatched. Mark that @tg was empty. This is automatically
* cleared on the next tg_update_disptime().
*/
- if (!sq->nr_queued[rw])
+ if (sq_queued(sq, rw) == 0)
tg->flags |= THROTL_TG_WAS_EMPTY;
- throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
+ throtl_qnode_add_bio(bio, qn, sq);
+
+ /*
+ * Since we have split the queues, when the iops queue is
+ * previously empty and a new @bio is added into the first @qn,
+ * we also need to update the @tg->disptime.
+ */
+ if (bio_flagged(bio, BIO_BPS_THROTTLED) &&
+ bio == throtl_peek_queued(&sq->queued[rw]))
+ tg->flags |= THROTL_TG_IOPS_WAS_EMPTY;
- sq->nr_queued[rw]++;
throtl_enqueue_tg(tg);
}
static void tg_update_disptime(struct throtl_grp *tg)
{
struct throtl_service_queue *sq = &tg->service_queue;
- unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
+ unsigned long read_wait = -1, write_wait = -1, min_wait, disptime;
struct bio *bio;
bio = throtl_peek_queued(&sq->queued[READ]);
if (bio)
- tg_may_dispatch(tg, bio, &read_wait);
+ read_wait = tg_dispatch_time(tg, bio);
bio = throtl_peek_queued(&sq->queued[WRITE]);
if (bio)
- tg_may_dispatch(tg, bio, &write_wait);
+ write_wait = tg_dispatch_time(tg, bio);
min_wait = min(read_wait, write_wait);
disptime = jiffies + min_wait;
@@ -875,6 +987,7 @@ static void tg_update_disptime(struct throtl_grp *tg)
/* see throtl_add_bio_tg() */
tg->flags &= ~THROTL_TG_WAS_EMPTY;
+ tg->flags &= ~THROTL_TG_IOPS_WAS_EMPTY;
}
static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
@@ -901,10 +1014,9 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
* getting released prematurely. Remember the tg to put and put it
* after @bio is transferred to @parent_sq.
*/
- bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
- sq->nr_queued[rw]--;
+ bio = throtl_pop_queued(sq, &tg_to_put, rw);
- throtl_charge_bio(tg, bio);
+ throtl_charge_iops_bio(tg, bio);
/*
* If our parent is another tg, we just need to transfer @bio to
@@ -919,7 +1031,7 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
} else {
bio_set_flag(bio, BIO_BPS_THROTTLED);
throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
- &parent_sq->queued[rw]);
+ parent_sq);
BUG_ON(tg->td->nr_queued[rw] <= 0);
tg->td->nr_queued[rw]--;
}
@@ -941,7 +1053,7 @@ static int throtl_dispatch_tg(struct throtl_grp *tg)
/* Try to dispatch 75% READS and 25% WRITES */
while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
- tg_may_dispatch(tg, bio, NULL)) {
+ tg_dispatch_time(tg, bio) == 0) {
tg_dispatch_one_bio(tg, READ);
nr_reads++;
@@ -951,7 +1063,7 @@ static int throtl_dispatch_tg(struct throtl_grp *tg)
}
while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
- tg_may_dispatch(tg, bio, NULL)) {
+ tg_dispatch_time(tg, bio) == 0) {
tg_dispatch_one_bio(tg, WRITE);
nr_writes++;
@@ -984,7 +1096,7 @@ static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
nr_disp += throtl_dispatch_tg(tg);
sq = &tg->service_queue;
- if (sq->nr_queued[READ] || sq->nr_queued[WRITE])
+ if (sq_queued(sq, READ) || sq_queued(sq, WRITE))
tg_update_disptime(tg);
else
throtl_dequeue_tg(tg);
@@ -1037,9 +1149,11 @@ again:
dispatched = false;
while (true) {
+ unsigned int __maybe_unused bio_cnt_r = sq_queued(sq, READ);
+ unsigned int __maybe_unused bio_cnt_w = sq_queued(sq, WRITE);
+
throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
- sq->nr_queued[READ] + sq->nr_queued[WRITE],
- sq->nr_queued[READ], sq->nr_queued[WRITE]);
+ bio_cnt_r + bio_cnt_w, bio_cnt_r, bio_cnt_w);
ret = throtl_select_dispatch(sq);
if (ret) {
@@ -1061,7 +1175,8 @@ again:
if (parent_sq) {
/* @parent_sq is another throl_grp, propagate dispatch */
- if (tg->flags & THROTL_TG_WAS_EMPTY) {
+ if (tg->flags & THROTL_TG_WAS_EMPTY ||
+ tg->flags & THROTL_TG_IOPS_WAS_EMPTY) {
tg_update_disptime(tg);
if (!throtl_schedule_next_dispatch(parent_sq, false)) {
/* window is already open, repeat dispatching */
@@ -1101,7 +1216,7 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work)
spin_lock_irq(&q->queue_lock);
for (rw = READ; rw <= WRITE; rw++)
- while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
+ while ((bio = throtl_pop_queued(td_sq, NULL, rw)))
bio_list_add(&bio_list_on_stack, bio);
spin_unlock_irq(&q->queue_lock);
@@ -1606,11 +1721,30 @@ void blk_throtl_cancel_bios(struct gendisk *disk)
static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
{
- /* throtl is FIFO - if bios are already queued, should queue */
- if (tg->service_queue.nr_queued[rw])
+ struct throtl_service_queue *sq = &tg->service_queue;
+
+ /*
+ * For a split bio, we need to specifically distinguish whether the
+ * iops queue is empty.
+ */
+ if (bio_flagged(bio, BIO_BPS_THROTTLED))
+ return sq->nr_queued_iops[rw] == 0 &&
+ tg_dispatch_iops_time(tg, bio) == 0;
+
+ /*
+ * Throtl is FIFO - if bios are already queued, should queue.
+ * If the bps queue is empty and @bio is within the bps limit, charge
+ * bps here for direct placement into the iops queue.
+ */
+ if (sq_queued(&tg->service_queue, rw)) {
+ if (sq->nr_queued_bps[rw] == 0 &&
+ tg_dispatch_bps_time(tg, bio) == 0)
+ throtl_charge_bps_bio(tg, bio);
+
return false;
+ }
- return tg_may_dispatch(tg, bio, NULL);
+ return tg_dispatch_time(tg, bio) == 0;
}
bool __blk_throtl_bio(struct bio *bio)
@@ -1631,7 +1765,7 @@ bool __blk_throtl_bio(struct bio *bio)
while (true) {
if (tg_within_limit(tg, bio, rw)) {
/* within limits, let's charge and dispatch directly */
- throtl_charge_bio(tg, bio);
+ throtl_charge_iops_bio(tg, bio);
/*
* We need to trim slice even when bios are not being
@@ -1654,7 +1788,8 @@ bool __blk_throtl_bio(struct bio *bio)
* control algorithm is adaptive, and extra IO bytes
* will be throttled for paying the debt
*/
- throtl_charge_bio(tg, bio);
+ throtl_charge_bps_bio(tg, bio);
+ throtl_charge_iops_bio(tg, bio);
} else {
/* if above limits, break to queue */
break;
@@ -1680,7 +1815,7 @@ bool __blk_throtl_bio(struct bio *bio)
tg->bytes_disp[rw], bio->bi_iter.bi_size,
tg_bps_limit(tg, rw),
tg->io_disp[rw], tg_iops_limit(tg, rw),
- sq->nr_queued[READ], sq->nr_queued[WRITE]);
+ sq_queued(sq, READ), sq_queued(sq, WRITE));
td->nr_queued[rw]++;
throtl_add_bio_tg(bio, qn, tg);
@@ -1688,11 +1823,13 @@ bool __blk_throtl_bio(struct bio *bio)
/*
* Update @tg's dispatch time and force schedule dispatch if @tg
- * was empty before @bio. The forced scheduling isn't likely to
- * cause undue delay as @bio is likely to be dispatched directly if
- * its @tg's disptime is not in the future.
+ * was empty before @bio, or the iops queue is empty and @bio will
+ * add to. The forced scheduling isn't likely to cause undue
+ * delay as @bio is likely to be dispatched directly if its @tg's
+ * disptime is not in the future.
*/
- if (tg->flags & THROTL_TG_WAS_EMPTY) {
+ if (tg->flags & THROTL_TG_WAS_EMPTY ||
+ tg->flags & THROTL_TG_IOPS_WAS_EMPTY) {
tg_update_disptime(tg);
throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
}
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index 7964cc041e06..3b27755bfbff 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BLK_THROTTLE_H
#define BLK_THROTTLE_H
@@ -28,7 +29,8 @@
*/
struct throtl_qnode {
struct list_head node; /* service_queue->queued[] */
- struct bio_list bios; /* queued bios */
+ struct bio_list bios_bps; /* queued bios for bps limit */
+ struct bio_list bios_iops; /* queued bios for iops limit */
struct throtl_grp *tg; /* tg this qnode belongs to */
};
@@ -40,7 +42,8 @@ struct throtl_service_queue {
* children throtl_grp's.
*/
struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
- unsigned int nr_queued[2]; /* number of queued bios */
+ unsigned int nr_queued_bps[2]; /* number of queued bps bios */
+ unsigned int nr_queued_iops[2]; /* number of queued iops bios */
/*
* RB tree of active children throtl_grp's, which are sorted by
@@ -53,9 +56,14 @@ struct throtl_service_queue {
};
enum tg_state_flags {
- THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
- THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
- THROTL_TG_CANCELING = 1 << 2, /* starts to cancel bio */
+ THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
+ THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
+ /*
+ * The sq's iops queue is empty, and a bio is about to be enqueued
+ * to the first qnode's bios_iops list.
+ */
+ THROTL_TG_IOPS_WAS_EMPTY = 1 << 2,
+ THROTL_TG_CANCELING = 1 << 3, /* starts to cancel bio */
};
struct throtl_grp {
@@ -101,19 +109,16 @@ struct throtl_grp {
/* IOPS limits */
unsigned int iops[2];
- /* Number of bytes dispatched in current slice */
- int64_t bytes_disp[2];
- /* Number of bio's dispatched in current slice */
- int io_disp[2];
-
/*
- * The following two fields are updated when new configuration is
- * submitted while some bios are still throttled, they record how many
- * bytes/ios are waited already in previous configuration, and they will
- * be used to calculate wait time under new configuration.
+ * Number of bytes/bio's dispatched in current slice.
+ * When new configuration is submitted while some bios are still throttled,
+ * first calculate the carryover: the amount of bytes/IOs already waited
+ * under the previous configuration. Then, [bytes/io]_disp are represented
+ * as the negative of the carryover, and they will be used to calculate the
+ * wait time under the new configuration.
*/
- long long carryover_bytes[2];
- int carryover_ios[2];
+ int64_t bytes_disp[2];
+ int io_disp[2];
unsigned long last_check_time;
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index f1754d07f7e0..a50d4cd55f41 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -37,7 +37,7 @@
enum wbt_flags {
WBT_TRACKED = 1, /* write, tracked for throttling */
WBT_READ = 2, /* read */
- WBT_SWAP = 4, /* write, from swap_writepage() */
+ WBT_SWAP = 4, /* write, from swap_writeout() */
WBT_DISCARD = 8, /* discard */
WBT_NR_BITS = 4, /* number of bits */
@@ -704,8 +704,9 @@ void wbt_enable_default(struct gendisk *disk)
struct rq_qos *rqos;
bool enable = IS_ENABLED(CONFIG_BLK_WBT_MQ);
- if (q->elevator &&
- test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags))
+ mutex_lock(&disk->rqos_state_mutex);
+
+ if (blk_queue_disable_wbt(q))
enable = false;
/* Throttling already enabled? */
@@ -713,8 +714,10 @@ void wbt_enable_default(struct gendisk *disk)
if (rqos) {
if (enable && RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
+ mutex_unlock(&disk->rqos_state_mutex);
return;
}
+ mutex_unlock(&disk->rqos_state_mutex);
/* Queue not registered? Maybe shutting down... */
if (!blk_queue_registered(q))
@@ -774,11 +777,13 @@ void wbt_disable_default(struct gendisk *disk)
struct rq_wb *rwb;
if (!rqos)
return;
+ mutex_lock(&disk->rqos_state_mutex);
rwb = RQWB(rqos);
if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
blk_stat_deactivate(rwb->cb);
rwb->enable_state = WBT_STATE_OFF_DEFAULT;
}
+ mutex_unlock(&disk->rqos_state_mutex);
}
EXPORT_SYMBOL_GPL(wbt_disable_default);
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 0c77244a35c9..8f15d1aa6eb8 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -343,6 +343,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
op = REQ_OP_ZONE_RESET;
/* Invalidate the page cache, including dirty pages. */
+ inode_lock(bdev->bd_mapping->host);
filemap_invalidate_lock(bdev->bd_mapping);
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
if (ret)
@@ -364,8 +365,10 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
fail:
- if (cmd == BLKRESETZONE)
+ if (cmd == BLKRESETZONE) {
filemap_invalidate_unlock(bdev->bd_mapping);
+ inode_unlock(bdev->bd_mapping->host);
+ }
return ret;
}
diff --git a/block/blk.h b/block/blk.h
index 006e3be433d2..37ec459fe656 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -94,14 +94,16 @@ static inline void blk_wait_io(struct completion *done)
wait_for_completion_io(done);
}
+struct block_device *blkdev_get_no_open(dev_t dev, bool autoload);
+void blkdev_put_no_open(struct block_device *bdev);
+
#define BIO_INLINE_VECS 4
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
gfp_t gfp_mask);
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
- struct page *page, unsigned len, unsigned offset,
- bool *same_page);
+ struct page *page, unsigned len, unsigned offset);
static inline bool biovec_phys_mergeable(struct request_queue *q,
struct bio_vec *vec1, struct bio_vec *vec2)
@@ -319,11 +321,9 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
bool blk_insert_flush(struct request *rq);
-int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
-void elevator_disable(struct request_queue *q);
-void elevator_exit(struct request_queue *q);
-int elv_register_queue(struct request_queue *q, bool uevent);
-void elv_unregister_queue(struct request_queue *q);
+void elv_update_nr_hw_queues(struct request_queue *q);
+void elevator_set_default(struct request_queue *q);
+void elevator_set_none(struct request_queue *q);
ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
char *buf);
@@ -404,6 +404,27 @@ static inline struct bio *__bio_split_to_limits(struct bio *bio,
}
}
+/**
+ * get_max_segment_size() - maximum number of bytes to add as a single segment
+ * @lim: Request queue limits.
+ * @paddr: address of the range to add
+ * @len: maximum length available to add at @paddr
+ *
+ * Returns the maximum number of bytes of the range starting at @paddr that can
+ * be added to a single segment.
+ */
+static inline unsigned get_max_segment_size(const struct queue_limits *lim,
+ phys_addr_t paddr, unsigned int len)
+{
+ /*
+ * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
+ * after having calculated the minimum.
+ */
+ return min_t(unsigned long, len,
+ min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
+ (unsigned long)lim->max_segment_size - 1) + 1);
+}
+
int ll_back_merge_fn(struct request *req, struct bio *bio,
unsigned int nr_segs);
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
@@ -418,7 +439,6 @@ void blk_apply_bdi_limits(struct backing_dev_info *bdi,
int blk_dev_init(void);
void update_io_ticks(struct block_device *part, unsigned long now, bool end);
-unsigned int part_in_flight(struct block_device *part);
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
{
@@ -440,23 +460,6 @@ static inline void ioc_clear_queue(struct request_queue *q)
}
#endif /* CONFIG_BLK_ICQ */
-struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
-
-static inline bool blk_queue_may_bounce(struct request_queue *q)
-{
- return IS_ENABLED(CONFIG_BOUNCE) &&
- (q->limits.features & BLK_FEAT_BOUNCE_HIGH) &&
- max_low_pfn >= max_pfn;
-}
-
-static inline struct bio *blk_queue_bounce(struct bio *bio,
- struct request_queue *q)
-{
- if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
- return __blk_queue_bounce(bio, q);
- return bio;
-}
-
#ifdef CONFIG_BLK_DEV_ZONED
void disk_init_zone_resources(struct gendisk *disk);
void disk_free_zone_resources(struct gendisk *disk);
@@ -477,7 +480,8 @@ static inline void blk_zone_update_request_bio(struct request *rq,
* the original BIO sector so that blk_zone_write_plug_bio_endio() can
* lookup the zone write plug.
*/
- if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio))
+ if (req_op(rq) == REQ_OP_ZONE_APPEND ||
+ bio_flagged(bio, BIO_EMULATES_ZONE_APPEND))
bio->bi_iter.bi_sector = rq->__sector;
}
void blk_zone_write_plug_bio_endio(struct bio *bio);
diff --git a/block/bounce.c b/block/bounce.c
deleted file mode 100644
index 09a9616cf209..000000000000
--- a/block/bounce.c
+++ /dev/null
@@ -1,267 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* bounce buffer handling for block devices
- *
- * - Split from highmem.c
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/swap.h>
-#include <linux/gfp.h>
-#include <linux/bio-integrity.h>
-#include <linux/pagemap.h>
-#include <linux/mempool.h>
-#include <linux/blkdev.h>
-#include <linux/backing-dev.h>
-#include <linux/init.h>
-#include <linux/hash.h>
-#include <linux/highmem.h>
-#include <linux/printk.h>
-#include <asm/tlbflush.h>
-
-#include <trace/events/block.h>
-#include "blk.h"
-#include "blk-cgroup.h"
-
-#define POOL_SIZE 64
-#define ISA_POOL_SIZE 16
-
-static struct bio_set bounce_bio_set, bounce_bio_split;
-static mempool_t page_pool;
-
-static void init_bounce_bioset(void)
-{
- static bool bounce_bs_setup;
- int ret;
-
- if (bounce_bs_setup)
- return;
-
- ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
- BUG_ON(ret);
-
- ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
- BUG_ON(ret);
- bounce_bs_setup = true;
-}
-
-static __init int init_emergency_pool(void)
-{
- int ret;
-
-#ifndef CONFIG_MEMORY_HOTPLUG
- if (max_pfn <= max_low_pfn)
- return 0;
-#endif
-
- ret = mempool_init_page_pool(&page_pool, POOL_SIZE, 0);
- BUG_ON(ret);
- pr_info("pool size: %d pages\n", POOL_SIZE);
-
- init_bounce_bioset();
- return 0;
-}
-
-__initcall(init_emergency_pool);
-
-/*
- * Simple bounce buffer support for highmem pages. Depending on the
- * queue gfp mask set, *to may or may not be a highmem page. kmap it
- * always, it will do the Right Thing
- */
-static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
-{
- struct bio_vec tovec, fromvec;
- struct bvec_iter iter;
- /*
- * The bio of @from is created by bounce, so we can iterate
- * its bvec from start to end, but the @from->bi_iter can't be
- * trusted because it might be changed by splitting.
- */
- struct bvec_iter from_iter = BVEC_ITER_ALL_INIT;
-
- bio_for_each_segment(tovec, to, iter) {
- fromvec = bio_iter_iovec(from, from_iter);
- if (tovec.bv_page != fromvec.bv_page) {
- /*
- * fromvec->bv_offset and fromvec->bv_len might have
- * been modified by the block layer, so use the original
- * copy, bounce_copy_vec already uses tovec->bv_len
- */
- memcpy_to_bvec(&tovec, page_address(fromvec.bv_page) +
- tovec.bv_offset);
- }
- bio_advance_iter(from, &from_iter, tovec.bv_len);
- }
-}
-
-static void bounce_end_io(struct bio *bio)
-{
- struct bio *bio_orig = bio->bi_private;
- struct bio_vec *bvec, orig_vec;
- struct bvec_iter orig_iter = bio_orig->bi_iter;
- struct bvec_iter_all iter_all;
-
- /*
- * free up bounce indirect pages used
- */
- bio_for_each_segment_all(bvec, bio, iter_all) {
- orig_vec = bio_iter_iovec(bio_orig, orig_iter);
- if (bvec->bv_page != orig_vec.bv_page) {
- dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
- mempool_free(bvec->bv_page, &page_pool);
- }
- bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
- }
-
- bio_orig->bi_status = bio->bi_status;
- bio_endio(bio_orig);
- bio_put(bio);
-}
-
-static void bounce_end_io_write(struct bio *bio)
-{
- bounce_end_io(bio);
-}
-
-static void bounce_end_io_read(struct bio *bio)
-{
- struct bio *bio_orig = bio->bi_private;
-
- if (!bio->bi_status)
- copy_to_high_bio_irq(bio_orig, bio);
-
- bounce_end_io(bio);
-}
-
-static struct bio *bounce_clone_bio(struct bio *bio_src)
-{
- struct bvec_iter iter;
- struct bio_vec bv;
- struct bio *bio;
-
- /*
- * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
- * bio_src->bi_io_vec to bio->bi_io_vec.
- *
- * We can't do that anymore, because:
- *
- * - The point of cloning the biovec is to produce a bio with a biovec
- * the caller can modify: bi_idx and bi_bvec_done should be 0.
- *
- * - The original bio could've had more than BIO_MAX_VECS biovecs; if
- * we tried to clone the whole thing bio_alloc_bioset() would fail.
- * But the clone should succeed as long as the number of biovecs we
- * actually need to allocate is fewer than BIO_MAX_VECS.
- *
- * - Lastly, bi_vcnt should not be looked at or relied upon by code
- * that does not own the bio - reason being drivers don't use it for
- * iterating over the biovec anymore, so expecting it to be kept up
- * to date (i.e. for clones that share the parent biovec) is just
- * asking for trouble and would force extra work.
- */
- bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src),
- bio_src->bi_opf, GFP_NOIO, &bounce_bio_set);
- if (bio_flagged(bio_src, BIO_REMAPPED))
- bio_set_flag(bio, BIO_REMAPPED);
- bio->bi_ioprio = bio_src->bi_ioprio;
- bio->bi_write_hint = bio_src->bi_write_hint;
- bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
- bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
-
- switch (bio_op(bio)) {
- case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
- case REQ_OP_WRITE_ZEROES:
- break;
- default:
- bio_for_each_segment(bv, bio_src, iter)
- bio->bi_io_vec[bio->bi_vcnt++] = bv;
- break;
- }
-
- if (bio_crypt_clone(bio, bio_src, GFP_NOIO) < 0)
- goto err_put;
-
- if (bio_integrity(bio_src) &&
- bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0)
- goto err_put;
-
- bio_clone_blkg_association(bio, bio_src);
-
- return bio;
-
-err_put:
- bio_put(bio);
- return NULL;
-}
-
-struct bio *__blk_queue_bounce(struct bio *bio_orig, struct request_queue *q)
-{
- struct bio *bio;
- int rw = bio_data_dir(bio_orig);
- struct bio_vec *to, from;
- struct bvec_iter iter;
- unsigned i = 0, bytes = 0;
- bool bounce = false;
- int sectors;
-
- bio_for_each_segment(from, bio_orig, iter) {
- if (i++ < BIO_MAX_VECS)
- bytes += from.bv_len;
- if (PageHighMem(from.bv_page))
- bounce = true;
- }
- if (!bounce)
- return bio_orig;
-
- /*
- * Individual bvecs might not be logical block aligned. Round down
- * the split size so that each bio is properly block size aligned,
- * even if we do not use the full hardware limits.
- */
- sectors = ALIGN_DOWN(bytes, queue_logical_block_size(q)) >>
- SECTOR_SHIFT;
- if (sectors < bio_sectors(bio_orig)) {
- bio = bio_split(bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
- bio_chain(bio, bio_orig);
- submit_bio_noacct(bio_orig);
- bio_orig = bio;
- }
- bio = bounce_clone_bio(bio_orig);
-
- /*
- * Bvec table can't be updated by bio_for_each_segment_all(),
- * so retrieve bvec from the table directly. This way is safe
- * because the 'bio' is single-page bvec.
- */
- for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
- struct page *bounce_page;
-
- if (!PageHighMem(to->bv_page))
- continue;
-
- bounce_page = mempool_alloc(&page_pool, GFP_NOIO);
- inc_zone_page_state(bounce_page, NR_BOUNCE);
-
- if (rw == WRITE) {
- flush_dcache_page(to->bv_page);
- memcpy_from_bvec(page_address(bounce_page), to);
- }
- to->bv_page = bounce_page;
- }
-
- trace_block_bio_bounce(bio_orig);
-
- bio->bi_flags |= (1 << BIO_BOUNCED);
-
- if (rw == READ)
- bio->bi_end_io = bounce_end_io_read;
- else
- bio->bi_end_io = bounce_end_io_write;
-
- bio->bi_private = bio_orig;
- return bio;
-}
diff --git a/block/elevator.c b/block/elevator.c
index b4d08026b02c..ab22542e6cf0 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -45,6 +45,17 @@
#include "blk-wbt.h"
#include "blk-cgroup.h"
+/* Holding context data for changing elevator */
+struct elv_change_ctx {
+ const char *name;
+ bool no_uevent;
+
+ /* for unregistering old elevator */
+ struct elevator_queue *old;
+ /* for registering new elevator */
+ struct elevator_queue *new;
+};
+
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
@@ -148,18 +159,18 @@ static void elevator_release(struct kobject *kobj)
kfree(e);
}
-void elevator_exit(struct request_queue *q)
+static void elevator_exit(struct request_queue *q)
{
struct elevator_queue *e = q->elevator;
+ lockdep_assert_held(&q->elevator_lock);
+
ioc_clear_queue(q);
blk_mq_sched_free_rqs(q);
mutex_lock(&e->sysfs_lock);
blk_mq_exit_sched(q, e);
mutex_unlock(&e->sysfs_lock);
-
- kobject_put(&e->kobj);
}
static inline void __elv_rqhash_del(struct request *rq)
@@ -412,14 +423,15 @@ elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
const struct elv_fs_entry *entry = to_elv(attr);
struct elevator_queue *e;
- ssize_t error;
+ ssize_t error = -ENODEV;
if (!entry->show)
return -EIO;
e = container_of(kobj, struct elevator_queue, kobj);
mutex_lock(&e->sysfs_lock);
- error = e->type ? entry->show(e, page) : -ENOENT;
+ if (!test_bit(ELEVATOR_FLAG_DYING, &e->flags))
+ error = entry->show(e, page);
mutex_unlock(&e->sysfs_lock);
return error;
}
@@ -430,14 +442,15 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
{
const struct elv_fs_entry *entry = to_elv(attr);
struct elevator_queue *e;
- ssize_t error;
+ ssize_t error = -ENODEV;
if (!entry->store)
return -EIO;
e = container_of(kobj, struct elevator_queue, kobj);
mutex_lock(&e->sysfs_lock);
- error = e->type ? entry->store(e, page, length) : -ENOENT;
+ if (!test_bit(ELEVATOR_FLAG_DYING, &e->flags))
+ error = entry->store(e, page, length);
mutex_unlock(&e->sysfs_lock);
return error;
}
@@ -452,13 +465,12 @@ static const struct kobj_type elv_ktype = {
.release = elevator_release,
};
-int elv_register_queue(struct request_queue *q, bool uevent)
+static int elv_register_queue(struct request_queue *q,
+ struct elevator_queue *e,
+ bool uevent)
{
- struct elevator_queue *e = q->elevator;
int error;
- lockdep_assert_held(&q->elevator_lock);
-
error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
if (!error) {
const struct elv_fs_entry *attr = e->type->elevator_attrs;
@@ -472,20 +484,25 @@ int elv_register_queue(struct request_queue *q, bool uevent)
if (uevent)
kobject_uevent(&e->kobj, KOBJ_ADD);
+ /*
+ * Sched is initialized, it is ready to export it via
+ * debugfs
+ */
+ blk_mq_sched_reg_debugfs(q);
set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
}
return error;
}
-void elv_unregister_queue(struct request_queue *q)
+static void elv_unregister_queue(struct request_queue *q,
+ struct elevator_queue *e)
{
- struct elevator_queue *e = q->elevator;
-
- lockdep_assert_held(&q->elevator_lock);
-
if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
kobject_uevent(&e->kobj, KOBJ_REMOVE);
kobject_del(&e->kobj);
+
+ /* unexport via debugfs before exiting sched */
+ blk_mq_sched_unreg_debugfs(q);
}
}
@@ -548,159 +565,194 @@ void elv_unregister(struct elevator_type *e)
EXPORT_SYMBOL_GPL(elv_unregister);
/*
- * For single queue devices, default to using mq-deadline. If we have multiple
- * queues or mq-deadline is not available, default to "none".
- */
-static struct elevator_type *elevator_get_default(struct request_queue *q)
-{
- if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
- return NULL;
-
- if (q->nr_hw_queues != 1 &&
- !blk_mq_is_shared_tags(q->tag_set->flags))
- return NULL;
-
- return elevator_find_get("mq-deadline");
-}
-
-/*
- * Use the default elevator settings. If the chosen elevator initialization
- * fails, fall back to the "none" elevator (no elevator).
- */
-void elevator_init_mq(struct request_queue *q)
-{
- struct elevator_type *e;
- unsigned int memflags;
- int err;
-
- WARN_ON_ONCE(blk_queue_registered(q));
-
- if (unlikely(q->elevator))
- return;
-
- e = elevator_get_default(q);
- if (!e)
- return;
-
- /*
- * We are called before adding disk, when there isn't any FS I/O,
- * so freezing queue plus canceling dispatch work is enough to
- * drain any dispatch activities originated from passthrough
- * requests, then no need to quiesce queue which may add long boot
- * latency, especially when lots of disks are involved.
- *
- * Disk isn't added yet, so verifying queue lock only manually.
- */
- memflags = blk_mq_freeze_queue(q);
-
- blk_mq_cancel_work_sync(q);
-
- err = blk_mq_init_sched(q, e);
-
- blk_mq_unfreeze_queue(q, memflags);
-
- if (err) {
- pr_warn("\"%s\" elevator initialization failed, "
- "falling back to \"none\"\n", e->elevator_name);
- }
-
- elevator_put(e);
-}
-
-/*
* Switch to new_e io scheduler.
*
* If switching fails, we are most likely running out of memory and not able
* to restore the old io scheduler, so leaving the io scheduler being none.
*/
-int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
+static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx)
{
- unsigned int memflags;
- int ret;
+ struct elevator_type *new_e = NULL;
+ int ret = 0;
+ WARN_ON_ONCE(q->mq_freeze_depth == 0);
lockdep_assert_held(&q->elevator_lock);
- memflags = blk_mq_freeze_queue(q);
+ if (strncmp(ctx->name, "none", 4)) {
+ new_e = elevator_find_get(ctx->name);
+ if (!new_e)
+ return -EINVAL;
+ }
+
blk_mq_quiesce_queue(q);
if (q->elevator) {
- elv_unregister_queue(q);
+ ctx->old = q->elevator;
elevator_exit(q);
}
- ret = blk_mq_init_sched(q, new_e);
- if (ret)
- goto out_unfreeze;
-
- ret = elv_register_queue(q, true);
- if (ret) {
- elevator_exit(q);
- goto out_unfreeze;
+ if (new_e) {
+ ret = blk_mq_init_sched(q, new_e);
+ if (ret)
+ goto out_unfreeze;
+ ctx->new = q->elevator;
+ } else {
+ blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
+ q->elevator = NULL;
+ q->nr_requests = q->tag_set->queue_depth;
}
- blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
+ blk_add_trace_msg(q, "elv switch: %s", ctx->name);
out_unfreeze:
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q, memflags);
if (ret) {
pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
new_e->elevator_name);
}
+ if (new_e)
+ elevator_put(new_e);
return ret;
}
-void elevator_disable(struct request_queue *q)
+static void elv_exit_and_release(struct request_queue *q)
{
- unsigned int memflags;
-
- lockdep_assert_held(&q->elevator_lock);
+ struct elevator_queue *e;
+ unsigned memflags;
memflags = blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
-
- elv_unregister_queue(q);
+ mutex_lock(&q->elevator_lock);
+ e = q->elevator;
elevator_exit(q);
- blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
- q->elevator = NULL;
- q->nr_requests = q->tag_set->queue_depth;
- blk_add_trace_msg(q, "elv switch: none");
-
- blk_mq_unquiesce_queue(q);
+ mutex_unlock(&q->elevator_lock);
blk_mq_unfreeze_queue(q, memflags);
+ if (e)
+ kobject_put(&e->kobj);
+}
+
+static int elevator_change_done(struct request_queue *q,
+ struct elv_change_ctx *ctx)
+{
+ int ret = 0;
+
+ if (ctx->old) {
+ bool enable_wbt = test_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT,
+ &ctx->old->flags);
+
+ elv_unregister_queue(q, ctx->old);
+ kobject_put(&ctx->old->kobj);
+ if (enable_wbt)
+ wbt_enable_default(q->disk);
+ }
+ if (ctx->new) {
+ ret = elv_register_queue(q, ctx->new, !ctx->no_uevent);
+ if (ret)
+ elv_exit_and_release(q);
+ }
+ return ret;
}
/*
* Switch this queue to the given IO scheduler.
*/
-static int elevator_change(struct request_queue *q, const char *elevator_name)
+static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
{
- struct elevator_type *e;
- int ret;
+ unsigned int memflags;
+ int ret = 0;
- /* Make sure queue is not in the middle of being removed */
- if (!blk_queue_registered(q))
- return -ENOENT;
+ lockdep_assert_held(&q->tag_set->update_nr_hwq_lock);
+
+ memflags = blk_mq_freeze_queue(q);
+ /*
+ * May be called before adding disk, when there isn't any FS I/O,
+ * so freezing queue plus canceling dispatch work is enough to
+ * drain any dispatch activities originated from passthrough
+ * requests, then no need to quiesce queue which may add long boot
+ * latency, especially when lots of disks are involved.
+ *
+ * Disk isn't added yet, so verifying queue lock only manually.
+ */
+ blk_mq_cancel_work_sync(q);
+ mutex_lock(&q->elevator_lock);
+ if (!(q->elevator && elevator_match(q->elevator->type, ctx->name)))
+ ret = elevator_switch(q, ctx);
+ mutex_unlock(&q->elevator_lock);
+ blk_mq_unfreeze_queue(q, memflags);
+ if (!ret)
+ ret = elevator_change_done(q, ctx);
+
+ return ret;
+}
+
+/*
+ * The I/O scheduler depends on the number of hardware queues, this forces a
+ * reattachment when nr_hw_queues changes.
+ */
+void elv_update_nr_hw_queues(struct request_queue *q)
+{
+ struct elv_change_ctx ctx = {};
+ int ret = -ENODEV;
+
+ WARN_ON_ONCE(q->mq_freeze_depth == 0);
- if (!strncmp(elevator_name, "none", 4)) {
- if (q->elevator)
- elevator_disable(q);
- return 0;
+ mutex_lock(&q->elevator_lock);
+ if (q->elevator && !blk_queue_dying(q) && blk_queue_registered(q)) {
+ ctx.name = q->elevator->type->elevator_name;
+
+ /* force to reattach elevator after nr_hw_queue is updated */
+ ret = elevator_switch(q, &ctx);
}
+ mutex_unlock(&q->elevator_lock);
+ blk_mq_unfreeze_queue_nomemrestore(q);
+ if (!ret)
+ WARN_ON_ONCE(elevator_change_done(q, &ctx));
+}
- if (q->elevator && elevator_match(q->elevator->type, elevator_name))
- return 0;
+/*
+ * Use the default elevator settings. If the chosen elevator initialization
+ * fails, fall back to the "none" elevator (no elevator).
+ */
+void elevator_set_default(struct request_queue *q)
+{
+ struct elv_change_ctx ctx = {
+ .name = "mq-deadline",
+ .no_uevent = true,
+ };
+ int err = 0;
- e = elevator_find_get(elevator_name);
- if (!e)
- return -EINVAL;
- ret = elevator_switch(q, e);
- elevator_put(e);
- return ret;
+ /* now we allow to switch elevator */
+ blk_queue_flag_clear(QUEUE_FLAG_NO_ELV_SWITCH, q);
+
+ if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
+ return;
+
+ /*
+ * For single queue devices, default to using mq-deadline. If we
+ * have multiple queues or mq-deadline is not available, default
+ * to "none".
+ */
+ if (elevator_find_get(ctx.name) && (q->nr_hw_queues == 1 ||
+ blk_mq_is_shared_tags(q->tag_set->flags)))
+ err = elevator_change(q, &ctx);
+ if (err < 0)
+ pr_warn("\"%s\" elevator initialization, failed %d, "
+ "falling back to \"none\"\n", ctx.name, err);
}
-static void elv_iosched_load_module(char *elevator_name)
+void elevator_set_none(struct request_queue *q)
+{
+ struct elv_change_ctx ctx = {
+ .name = "none",
+ };
+ int err;
+
+ err = elevator_change(q, &ctx);
+ if (err < 0)
+ pr_warn("%s: set none elevator failed %d\n", __func__, err);
+}
+
+static void elv_iosched_load_module(const char *elevator_name)
{
struct elevator_type *found;
@@ -716,10 +768,14 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
size_t count)
{
char elevator_name[ELV_NAME_MAX];
- char *name;
+ struct elv_change_ctx ctx = {};
int ret;
- unsigned int memflags;
struct request_queue *q = disk->queue;
+ struct blk_mq_tag_set *set = q->tag_set;
+
+ /* Make sure queue is not in the middle of being removed */
+ if (!blk_queue_registered(q))
+ return -ENOENT;
/*
* If the attribute needs to load a module, do it before freezing the
@@ -727,24 +783,25 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
* queue is the one for the device storing the module file.
*/
strscpy(elevator_name, buf, sizeof(elevator_name));
- name = strstrip(elevator_name);
+ ctx.name = strstrip(elevator_name);
- elv_iosched_load_module(name);
+ elv_iosched_load_module(ctx.name);
- memflags = blk_mq_freeze_queue(q);
- mutex_lock(&q->elevator_lock);
- ret = elevator_change(q, name);
- if (!ret)
- ret = count;
- mutex_unlock(&q->elevator_lock);
- blk_mq_unfreeze_queue(q, memflags);
+ down_read(&set->update_nr_hwq_lock);
+ if (!blk_queue_no_elv_switch(q)) {
+ ret = elevator_change(q, &ctx);
+ if (!ret)
+ ret = count;
+ } else {
+ ret = -ENOENT;
+ }
+ up_read(&set->update_nr_hwq_lock);
return ret;
}
ssize_t elv_iosched_show(struct gendisk *disk, char *name)
{
struct request_queue *q = disk->queue;
- struct elevator_queue *eq = q->elevator;
struct elevator_type *cur = NULL, *e;
int len = 0;
@@ -753,7 +810,7 @@ ssize_t elv_iosched_show(struct gendisk *disk, char *name)
len += sprintf(name+len, "[none] ");
} else {
len += sprintf(name+len, "none ");
- cur = eq->type;
+ cur = q->elevator->type;
}
spin_lock(&elv_list_lock);
diff --git a/block/elevator.h b/block/elevator.h
index e4e44dfac503..a07ce773a38f 100644
--- a/block/elevator.h
+++ b/block/elevator.h
@@ -121,7 +121,8 @@ struct elevator_queue
};
#define ELEVATOR_FLAG_REGISTERED 0
-#define ELEVATOR_FLAG_DISABLE_WBT 1
+#define ELEVATOR_FLAG_DYING 1
+#define ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT 2
/*
* block elevator interface
@@ -182,4 +183,7 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t);
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
+void blk_mq_sched_reg_debugfs(struct request_queue *q);
+void blk_mq_sched_unreg_debugfs(struct request_queue *q);
+
#endif /* _ELEVATOR_H */
diff --git a/block/fops.c b/block/fops.c
index be9f1dbea9ce..1309861d4c2c 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -73,6 +73,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
}
bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
+ bio.bi_write_stream = iocb->ki_write_stream;
bio.bi_ioprio = iocb->ki_ioprio;
if (iocb->ki_flags & IOCB_ATOMIC)
bio.bi_opf |= REQ_ATOMIC;
@@ -206,6 +207,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
for (;;) {
bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
+ bio->bi_write_stream = iocb->ki_write_stream;
bio->bi_private = dio;
bio->bi_end_io = blkdev_bio_end_io;
bio->bi_ioprio = iocb->ki_ioprio;
@@ -333,6 +335,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
dio->iocb = iocb;
bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
+ bio->bi_write_stream = iocb->ki_write_stream;
bio->bi_end_io = blkdev_bio_end_io_async;
bio->bi_ioprio = iocb->ki_ioprio;
@@ -398,6 +401,26 @@ static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
if (blkdev_dio_invalid(bdev, iocb, iter))
return -EINVAL;
+ if (iov_iter_rw(iter) == WRITE) {
+ u16 max_write_streams = bdev_max_write_streams(bdev);
+
+ if (iocb->ki_write_stream) {
+ if (iocb->ki_write_stream > max_write_streams)
+ return -EINVAL;
+ } else if (max_write_streams) {
+ enum rw_hint write_hint =
+ file_inode(iocb->ki_filp)->i_write_hint;
+
+ /*
+ * Just use the write hint as write stream for block
+ * device writes. This assumes no file system is
+ * mounted that would use the streams differently.
+ */
+ if (write_hint <= max_write_streams)
+ iocb->ki_write_stream = write_hint;
+ }
+ }
+
nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
if (likely(nr_pages <= BIO_MAX_VECS)) {
if (is_sync_kiocb(iocb))
@@ -451,12 +474,13 @@ static int blkdev_get_block(struct inode *inode, sector_t iblock,
static int blkdev_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
+ struct folio *folio = NULL;
struct blk_plug plug;
int err;
blk_start_plug(&plug);
- err = write_cache_pages(mapping, wbc, block_write_full_folio,
- blkdev_get_block);
+ while ((folio = writeback_iter(mapping, wbc, folio, &err)))
+ err = block_write_full_folio(folio, wbc, blkdev_get_block);
blk_finish_plug(&plug);
return err;
@@ -642,7 +666,7 @@ static int blkdev_open(struct inode *inode, struct file *filp)
if (ret)
return ret;
- bdev = blkdev_get_no_open(inode->i_rdev);
+ bdev = blkdev_get_no_open(inode->i_rdev, true);
if (!bdev)
return -ENXIO;
@@ -746,7 +770,14 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
ret = direct_write_fallback(iocb, from, ret,
blkdev_buffered_write(iocb, from));
} else {
+ /*
+ * Take i_rwsem and invalidate_lock to avoid racing with
+ * set_blocksize changing i_blkbits/folio order and punching
+ * out the pagecache.
+ */
+ inode_lock_shared(bd_inode);
ret = blkdev_buffered_write(iocb, from);
+ inode_unlock_shared(bd_inode);
}
if (ret > 0)
@@ -757,6 +788,7 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
+ struct inode *bd_inode = bdev_file_inode(iocb->ki_filp);
struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
loff_t size = bdev_nr_bytes(bdev);
loff_t pos = iocb->ki_pos;
@@ -793,7 +825,13 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
goto reexpand;
}
+ /*
+ * Take i_rwsem and invalidate_lock to avoid racing with set_blocksize
+ * changing i_blkbits/folio order and punching out the pagecache.
+ */
+ inode_lock_shared(bd_inode);
ret = filemap_read(iocb, to, ret);
+ inode_unlock_shared(bd_inode);
reexpand:
if (unlikely(shorted))
@@ -836,6 +874,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
if ((start | len) & (bdev_logical_block_size(bdev) - 1))
return -EINVAL;
+ inode_lock(inode);
filemap_invalidate_lock(inode->i_mapping);
/*
@@ -868,6 +907,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
fail:
filemap_invalidate_unlock(inode->i_mapping);
+ inode_unlock(inode);
return error;
}
diff --git a/block/genhd.c b/block/genhd.c
index c2bd86cd09de..8171a6bc3210 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -125,37 +125,46 @@ static void part_stat_read_all(struct block_device *part,
}
}
-unsigned int part_in_flight(struct block_device *part)
+static void bdev_count_inflight_rw(struct block_device *part,
+ unsigned int inflight[2], bool mq_driver)
{
- unsigned int inflight = 0;
int cpu;
- for_each_possible_cpu(cpu) {
- inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) +
- part_stat_local_read_cpu(part, in_flight[1], cpu);
+ if (mq_driver) {
+ blk_mq_in_driver_rw(part, inflight);
+ } else {
+ for_each_possible_cpu(cpu) {
+ inflight[READ] += part_stat_local_read_cpu(
+ part, in_flight[READ], cpu);
+ inflight[WRITE] += part_stat_local_read_cpu(
+ part, in_flight[WRITE], cpu);
+ }
}
- if ((int)inflight < 0)
- inflight = 0;
- return inflight;
+ if (WARN_ON_ONCE((int)inflight[READ] < 0))
+ inflight[READ] = 0;
+ if (WARN_ON_ONCE((int)inflight[WRITE] < 0))
+ inflight[WRITE] = 0;
}
-static void part_in_flight_rw(struct block_device *part,
- unsigned int inflight[2])
+/**
+ * bdev_count_inflight - get the number of inflight IOs for a block device.
+ *
+ * @part: the block device.
+ *
+ * Inflight here means started IO accounting, from bdev_start_io_acct() for
+ * bio-based block device, and from blk_account_io_start() for rq-based block
+ * device.
+ */
+unsigned int bdev_count_inflight(struct block_device *part)
{
- int cpu;
+ unsigned int inflight[2] = {0};
- inflight[0] = 0;
- inflight[1] = 0;
- for_each_possible_cpu(cpu) {
- inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu);
- inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu);
- }
- if ((int)inflight[0] < 0)
- inflight[0] = 0;
- if ((int)inflight[1] < 0)
- inflight[1] = 0;
+ bdev_count_inflight_rw(part, inflight, false);
+
+ return inflight[READ] + inflight[WRITE];
}
+EXPORT_SYMBOL_GPL(bdev_count_inflight);
/*
* Can be deleted altogether. Later.
@@ -389,19 +398,35 @@ int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
return ret;
}
-/**
- * add_disk_fwnode - add disk information to kernel list with fwnode
- * @parent: parent device for the disk
- * @disk: per-device partitioning information
- * @groups: Additional per-device sysfs groups
- * @fwnode: attached disk fwnode
- *
- * This function registers the partitioning information in @disk
- * with the kernel. Also attach a fwnode to the disk device.
- */
-int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
- const struct attribute_group **groups,
- struct fwnode_handle *fwnode)
+static void add_disk_final(struct gendisk *disk)
+{
+ struct device *ddev = disk_to_dev(disk);
+
+ if (!(disk->flags & GENHD_FL_HIDDEN)) {
+ /* Make sure the first partition scan will be proceed */
+ if (get_capacity(disk) && disk_has_partscan(disk))
+ set_bit(GD_NEED_PART_SCAN, &disk->state);
+
+ bdev_add(disk->part0, ddev->devt);
+ if (get_capacity(disk))
+ disk_scan_partitions(disk, BLK_OPEN_READ);
+
+ /*
+ * Announce the disk and partitions after all partitions are
+ * created. (for hidden disks uevents remain suppressed forever)
+ */
+ dev_set_uevent_suppress(ddev, 0);
+ disk_uevent(disk, KOBJ_ADD);
+ }
+
+ blk_apply_bdi_limits(disk->bdi, &disk->queue->limits);
+ disk_add_events(disk);
+ set_bit(GD_ADDED, &disk->state);
+}
+
+static int __add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups,
+ struct fwnode_handle *fwnode)
{
struct device *ddev = disk_to_dev(disk);
@@ -416,12 +441,6 @@ int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
*/
if (disk->fops->submit_bio || disk->fops->poll_bio)
return -EINVAL;
-
- /*
- * Initialize the I/O scheduler code and pick a default one if
- * needed.
- */
- elevator_init_mq(disk->queue);
} else {
if (!disk->fops->submit_bio)
return -EINVAL;
@@ -438,7 +457,7 @@ int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
ret = -EINVAL;
if (disk->major) {
if (WARN_ON(!disk->minors))
- goto out_exit_elevator;
+ goto out;
if (disk->minors > DISK_MAX_PARTS) {
pr_err("block: can't allocate more than %d partitions\n",
@@ -448,14 +467,14 @@ int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
if (disk->first_minor > MINORMASK ||
disk->minors > MINORMASK + 1 ||
disk->first_minor + disk->minors > MINORMASK + 1)
- goto out_exit_elevator;
+ goto out;
} else {
if (WARN_ON(disk->minors))
- goto out_exit_elevator;
+ goto out;
ret = blk_alloc_ext_minor();
if (ret < 0)
- goto out_exit_elevator;
+ goto out;
disk->major = BLOCK_EXT_MAJOR;
disk->first_minor = ret;
}
@@ -516,21 +535,6 @@ int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
&disk->bdi->dev->kobj, "bdi");
if (ret)
goto out_unregister_bdi;
-
- /* Make sure the first partition scan will be proceed */
- if (get_capacity(disk) && disk_has_partscan(disk))
- set_bit(GD_NEED_PART_SCAN, &disk->state);
-
- bdev_add(disk->part0, ddev->devt);
- if (get_capacity(disk))
- disk_scan_partitions(disk, BLK_OPEN_READ);
-
- /*
- * Announce the disk and partitions after all partitions are
- * created. (for hidden disks uevents remain suppressed forever)
- */
- dev_set_uevent_suppress(ddev, 0);
- disk_uevent(disk, KOBJ_ADD);
} else {
/*
* Even if the block_device for a hidden gendisk is not
@@ -539,10 +543,6 @@ int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
*/
disk->part0->bd_dev = MKDEV(disk->major, disk->first_minor);
}
-
- blk_apply_bdi_limits(disk->bdi, &disk->queue->limits);
- disk_add_events(disk);
- set_bit(GD_ADDED, &disk->state);
return 0;
out_unregister_bdi:
@@ -564,12 +564,46 @@ out_device_del:
out_free_ext_minor:
if (disk->major == BLOCK_EXT_MAJOR)
blk_free_ext_minor(disk->first_minor);
-out_exit_elevator:
- if (disk->queue->elevator) {
- mutex_lock(&disk->queue->elevator_lock);
- elevator_exit(disk->queue);
- mutex_unlock(&disk->queue->elevator_lock);
+out:
+ return ret;
+}
+
+/**
+ * add_disk_fwnode - add disk information to kernel list with fwnode
+ * @parent: parent device for the disk
+ * @disk: per-device partitioning information
+ * @groups: Additional per-device sysfs groups
+ * @fwnode: attached disk fwnode
+ *
+ * This function registers the partitioning information in @disk
+ * with the kernel. Also attach a fwnode to the disk device.
+ */
+int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups,
+ struct fwnode_handle *fwnode)
+{
+ struct blk_mq_tag_set *set;
+ unsigned int memflags;
+ int ret;
+
+ if (queue_is_mq(disk->queue)) {
+ set = disk->queue->tag_set;
+ memflags = memalloc_noio_save();
+ down_read(&set->update_nr_hwq_lock);
+ ret = __add_disk(parent, disk, groups, fwnode);
+ up_read(&set->update_nr_hwq_lock);
+ memalloc_noio_restore(memflags);
+ } else {
+ ret = __add_disk(parent, disk, groups, fwnode);
}
+
+ /*
+ * add_disk_final() needn't to read `nr_hw_queues`, so move it out
+ * of read lock `set->update_nr_hwq_lock` for avoiding unnecessary
+ * lock dependency on `disk->open_mutex` from scanning partition.
+ */
+ if (!ret)
+ add_disk_final(disk);
return ret;
}
EXPORT_SYMBOL_GPL(add_disk_fwnode);
@@ -652,26 +686,7 @@ void blk_mark_disk_dead(struct gendisk *disk)
}
EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
-/**
- * del_gendisk - remove the gendisk
- * @disk: the struct gendisk to remove
- *
- * Removes the gendisk and all its associated resources. This deletes the
- * partitions associated with the gendisk, and unregisters the associated
- * request_queue.
- *
- * This is the counter to the respective __device_add_disk() call.
- *
- * The final removal of the struct gendisk happens when its refcount reaches 0
- * with put_disk(), which should be called after del_gendisk(), if
- * __device_add_disk() was used.
- *
- * Drivers exist which depend on the release of the gendisk to be synchronous,
- * it should not be deferred.
- *
- * Context: can sleep
- */
-void del_gendisk(struct gendisk *disk)
+static void __del_gendisk(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct block_device *part;
@@ -743,14 +758,7 @@ void del_gendisk(struct gendisk *disk)
if (queue_is_mq(q))
blk_mq_cancel_work_sync(q);
- blk_mq_quiesce_queue(q);
- if (q->elevator) {
- mutex_lock(&q->elevator_lock);
- elevator_exit(q);
- mutex_unlock(&q->elevator_lock);
- }
rq_qos_exit(q);
- blk_mq_unquiesce_queue(q);
/*
* If the disk does not own the queue, allow using passthrough requests
@@ -764,6 +772,55 @@ void del_gendisk(struct gendisk *disk)
if (start_drain)
blk_unfreeze_release_lock(q);
}
+
+static void disable_elv_switch(struct request_queue *q)
+{
+ struct blk_mq_tag_set *set = q->tag_set;
+ WARN_ON_ONCE(!queue_is_mq(q));
+
+ down_write(&set->update_nr_hwq_lock);
+ blk_queue_flag_set(QUEUE_FLAG_NO_ELV_SWITCH, q);
+ up_write(&set->update_nr_hwq_lock);
+}
+
+/**
+ * del_gendisk - remove the gendisk
+ * @disk: the struct gendisk to remove
+ *
+ * Removes the gendisk and all its associated resources. This deletes the
+ * partitions associated with the gendisk, and unregisters the associated
+ * request_queue.
+ *
+ * This is the counter to the respective __device_add_disk() call.
+ *
+ * The final removal of the struct gendisk happens when its refcount reaches 0
+ * with put_disk(), which should be called after del_gendisk(), if
+ * __device_add_disk() was used.
+ *
+ * Drivers exist which depend on the release of the gendisk to be synchronous,
+ * it should not be deferred.
+ *
+ * Context: can sleep
+ */
+void del_gendisk(struct gendisk *disk)
+{
+ struct blk_mq_tag_set *set;
+ unsigned int memflags;
+
+ if (!queue_is_mq(disk->queue)) {
+ __del_gendisk(disk);
+ } else {
+ set = disk->queue->tag_set;
+
+ disable_elv_switch(disk->queue);
+
+ memflags = memalloc_noio_save();
+ down_read(&set->update_nr_hwq_lock);
+ __del_gendisk(disk);
+ up_read(&set->update_nr_hwq_lock);
+ memalloc_noio_restore(memflags);
+ }
+}
EXPORT_SYMBOL(del_gendisk);
/**
@@ -1005,7 +1062,7 @@ ssize_t part_stat_show(struct device *dev,
struct disk_stats stat;
unsigned int inflight;
- inflight = part_in_flight(bdev);
+ inflight = bdev_count_inflight(bdev);
if (inflight) {
part_stat_lock();
update_io_ticks(bdev, jiffies, true);
@@ -1042,19 +1099,21 @@ ssize_t part_stat_show(struct device *dev,
(unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC));
}
+/*
+ * Show the number of IOs issued to driver.
+ * For bio-based device, started from bdev_start_io_acct();
+ * For rq-based device, started from blk_mq_start_request();
+ */
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct block_device *bdev = dev_to_bdev(dev);
struct request_queue *q = bdev_get_queue(bdev);
- unsigned int inflight[2];
+ unsigned int inflight[2] = {0};
- if (queue_is_mq(q))
- blk_mq_in_flight_rw(q, bdev, inflight);
- else
- part_in_flight_rw(bdev, inflight);
+ bdev_count_inflight_rw(bdev, inflight, queue_is_mq(q));
- return sysfs_emit(buf, "%8u %8u\n", inflight[0], inflight[1]);
+ return sysfs_emit(buf, "%8u %8u\n", inflight[READ], inflight[WRITE]);
}
static ssize_t disk_capability_show(struct device *dev,
@@ -1307,7 +1366,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
if (bdev_is_partition(hd) && !bdev_nr_sectors(hd))
continue;
- inflight = part_in_flight(hd);
+ inflight = bdev_count_inflight(hd);
if (inflight) {
part_stat_lock();
update_io_ticks(hd, jiffies, true);
@@ -1422,6 +1481,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
INIT_LIST_HEAD(&disk->slave_bdevs);
#endif
+ mutex_init(&disk->rqos_state_mutex);
return disk;
out_erase_part0:
diff --git a/block/ioctl.c b/block/ioctl.c
index faa40f383e27..e472cc1030c6 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -142,6 +142,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
if (err)
return err;
+ inode_lock(bdev->bd_mapping->host);
filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
if (err)
@@ -174,6 +175,7 @@ out_unplug:
blk_finish_plug(&plug);
fail:
filemap_invalidate_unlock(bdev->bd_mapping);
+ inode_unlock(bdev->bd_mapping->host);
return err;
}
@@ -199,12 +201,14 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
end > bdev_nr_bytes(bdev))
return -EINVAL;
+ inode_lock(bdev->bd_mapping->host);
filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, end - 1);
if (!err)
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
GFP_KERNEL);
filemap_invalidate_unlock(bdev->bd_mapping);
+ inode_unlock(bdev->bd_mapping->host);
return err;
}
@@ -236,6 +240,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
return -EINVAL;
/* Invalidate the page cache, including dirty pages */
+ inode_lock(bdev->bd_mapping->host);
filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, end);
if (err)
@@ -246,6 +251,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
fail:
filemap_invalidate_unlock(bdev->bd_mapping);
+ inode_unlock(bdev->bd_mapping->host);
return err;
}
diff --git a/block/ioprio.c b/block/ioprio.c
index 73301a261429..f0ee2798539c 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -46,12 +46,8 @@ int ioprio_check_cap(int ioprio)
*/
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
return -EPERM;
- fallthrough;
- /* rt has prio field too */
- case IOPRIO_CLASS_BE:
- if (level >= IOPRIO_NR_LEVELS)
- return -EINVAL;
break;
+ case IOPRIO_CLASS_BE:
case IOPRIO_CLASS_IDLE:
break;
case IOPRIO_CLASS_NONE:
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 754f6b7415cd..2edf1cac06d5 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -715,7 +715,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
}
/*
- * Called from blk_mq_insert_request() or blk_mq_dispatch_plug_list().
+ * Called from blk_mq_insert_request() or blk_mq_dispatch_list().
*/
static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
struct list_head *list,
diff --git a/crypto/842.c b/crypto/842.c
index 5fb37a925989..8c257c40e2b9 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -23,10 +23,6 @@
#include <linux/module.h>
#include <linux/sw842.h>
-struct crypto842_ctx {
- void *wmem; /* working memory for compress */
-};
-
static void *crypto842_alloc_ctx(void)
{
void *ctx;
@@ -74,7 +70,7 @@ static int __init crypto842_mod_init(void)
{
return crypto_register_scomp(&scomp);
}
-subsys_initcall(crypto842_mod_init);
+module_init(crypto842_mod_init);
static void __exit crypto842_mod_exit(void)
{
diff --git a/crypto/Kconfig b/crypto/Kconfig
index dbf97c4e7c59..e9fee7818e27 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -25,7 +25,7 @@ menu "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
- depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
+ depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && CRYPTO_SELFTESTS
depends on (MODULE_SIG || !MODULES)
help
This option enables the fips boot option which is
@@ -143,16 +143,17 @@ config CRYPTO_ACOMP
config CRYPTO_HKDF
tristate
- select CRYPTO_SHA256 if !CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
- select CRYPTO_SHA512 if !CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_SHA256 if CRYPTO_SELFTESTS
+ select CRYPTO_SHA512 if CRYPTO_SELFTESTS
select CRYPTO_HASH2
config CRYPTO_MANAGER
- tristate "Cryptographic algorithm manager"
+ tristate
+ default CRYPTO_ALGAPI if CRYPTO_SELFTESTS
select CRYPTO_MANAGER2
help
- Create default cryptographic template instantiations such as
- cbc(aes).
+ This provides the support for instantiating templates such as
+ cbc(aes), and the support for the crypto self-tests.
config CRYPTO_MANAGER2
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
@@ -173,35 +174,27 @@ config CRYPTO_USER
Userspace configuration for cryptographic instantiations such as
cbc(aes).
-config CRYPTO_MANAGER_DISABLE_TESTS
- bool "Disable run-time self tests"
- default y
+config CRYPTO_SELFTESTS
+ bool "Enable cryptographic self-tests"
+ depends on DEBUG_KERNEL
help
- Disable run-time self tests that normally take place at
- algorithm registration.
+ Enable the cryptographic self-tests.
-config CRYPTO_MANAGER_EXTRA_TESTS
- bool "Enable extra run-time crypto self tests"
- depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS && CRYPTO_MANAGER
- help
- Enable extra run-time self tests of registered crypto algorithms,
- including randomized fuzz tests.
+ The cryptographic self-tests run at boot time, or at algorithm
+ registration time if algorithms are dynamically loaded later.
- This is intended for developer use only, as these tests take much
- longer to run than the normal self tests.
+ This is primarily intended for developer use. It should not be
+ enabled in production kernels, unless you are trying to use these
+ tests to fulfill a FIPS testing requirement.
config CRYPTO_NULL
tristate "Null algorithms"
- select CRYPTO_NULL2
+ select CRYPTO_ALGAPI
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
help
These are 'Null' algorithms, used by IPsec, which do nothing.
-config CRYPTO_NULL2
- tristate
- select CRYPTO_ALGAPI2
- select CRYPTO_SKCIPHER2
- select CRYPTO_HASH2
-
config CRYPTO_PCRYPT
tristate "Parallel crypto engine"
depends on SMP
@@ -228,7 +221,6 @@ config CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
select CRYPTO_HASH
- select CRYPTO_NULL
help
Authenc: Combined mode wrapper for IPsec.
@@ -240,18 +232,21 @@ config CRYPTO_KRB5ENC
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
select CRYPTO_HASH
- select CRYPTO_NULL
help
Combined hash and cipher support for Kerberos 5 RFC3961 simplified
profile. This is required for Kerberos 5-style encryption, used by
sunrpc/NFS and rxrpc/AFS.
-config CRYPTO_TEST
- tristate "Testing module"
+config CRYPTO_BENCHMARK
+ tristate "Crypto benchmarking module"
depends on m || EXPERT
select CRYPTO_MANAGER
help
- Quick & dirty crypto test module.
+ Quick & dirty crypto benchmarking module.
+
+ This is mainly intended for use by people developing cryptographic
+ algorithms in the kernel. It should not be enabled in production
+ kernels.
config CRYPTO_SIMD
tristate
@@ -634,8 +629,8 @@ config CRYPTO_ARC4
config CRYPTO_CHACHA20
tristate "ChaCha"
+ select CRYPTO_LIB_CHACHA
select CRYPTO_LIB_CHACHA_GENERIC
- select CRYPTO_LIB_CHACHA_INTERNAL
select CRYPTO_SKCIPHER
help
The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms
@@ -784,8 +779,8 @@ config CRYPTO_AEGIS128_SIMD
config CRYPTO_CHACHA20POLY1305
tristate "ChaCha20-Poly1305"
select CRYPTO_CHACHA20
- select CRYPTO_POLY1305
select CRYPTO_AEAD
+ select CRYPTO_LIB_POLY1305
select CRYPTO_MANAGER
help
ChaCha20 stream cipher and Poly1305 authenticator combined
@@ -806,7 +801,6 @@ config CRYPTO_GCM
select CRYPTO_CTR
select CRYPTO_AEAD
select CRYPTO_GHASH
- select CRYPTO_NULL
select CRYPTO_MANAGER
help
GCM (Galois/Counter Mode) authenticated encryption mode and GMAC
@@ -817,7 +811,6 @@ config CRYPTO_GCM
config CRYPTO_GENIV
tristate
select CRYPTO_AEAD
- select CRYPTO_NULL
select CRYPTO_MANAGER
select CRYPTO_RNG_DEFAULT
@@ -953,18 +946,6 @@ config CRYPTO_POLYVAL
This is used in HCTR2. It is not a general-purpose
cryptographic hash function.
-config CRYPTO_POLY1305
- tristate "Poly1305"
- select CRYPTO_HASH
- select CRYPTO_LIB_POLY1305_GENERIC
- select CRYPTO_LIB_POLY1305_INTERNAL
- help
- Poly1305 authenticator algorithm (RFC7539)
-
- Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein.
- It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
- in IETF protocols. This is the portable C implementation of Poly1305.
-
config CRYPTO_RMD160
tristate "RIPEMD-160"
select CRYPTO_HASH
@@ -994,6 +975,7 @@ config CRYPTO_SHA256
tristate "SHA-224 and SHA-256"
select CRYPTO_HASH
select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA256_GENERIC
help
SHA-224 and SHA-256 secure hash algorithms (FIPS 180, ISO/IEC 10118-3)
@@ -1012,13 +994,10 @@ config CRYPTO_SHA3
help
SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3)
-config CRYPTO_SM3
- tristate
-
config CRYPTO_SM3_GENERIC
tristate "SM3 (ShangMi 3)"
select CRYPTO_HASH
- select CRYPTO_SM3
+ select CRYPTO_LIB_SM3
help
SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012, ISO/IEC 10118-3)
@@ -1406,7 +1385,6 @@ config CRYPTO_USER_API_AEAD
depends on NET
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
- select CRYPTO_NULL
select CRYPTO_USER_API
help
Enable the userspace interface for AEAD cipher algorithms.
diff --git a/crypto/Makefile b/crypto/Makefile
index 0e6ab5ffd3f7..017df3a2e4bb 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -71,15 +71,15 @@ obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
-obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
+obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
obj-$(CONFIG_CRYPTO_MD5) += md5.o
obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
-obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
+obj-$(CONFIG_CRYPTO_SHA256) += sha256.o
+CFLAGS_sha256.o += -DARCH=$(ARCH)
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
-obj-$(CONFIG_CRYPTO_SM3) += sm3.o
obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o
obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
@@ -148,14 +148,16 @@ obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
obj-$(CONFIG_CRYPTO_ARIA) += aria_generic.o
-obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o
-obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
+obj-$(CONFIG_CRYPTO_CHACHA20) += chacha.o
+CFLAGS_chacha.o += -DARCH=$(ARCH)
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
-obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
-obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
-CFLAGS_crc32c_generic.o += -DARCH=$(ARCH)
-CFLAGS_crc32_generic.o += -DARCH=$(ARCH)
+obj-$(CONFIG_CRYPTO_CRC32C) += crc32c-cryptoapi.o
+crc32c-cryptoapi-y := crc32c.o
+CFLAGS_crc32c.o += -DARCH=$(ARCH)
+obj-$(CONFIG_CRYPTO_CRC32) += crc32-cryptoapi.o
+crc32-cryptoapi-y := crc32.o
+CFLAGS_crc32.o += -DARCH=$(ARCH)
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
obj-$(CONFIG_CRYPTO_KRB5ENC) += krb5enc.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o
@@ -172,7 +174,7 @@ KASAN_SANITIZE_jitterentropy.o = n
UBSAN_SANITIZE_jitterentropy.o = n
jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o
obj-$(CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE) += jitterentropy-testing.o
-obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
+obj-$(CONFIG_CRYPTO_BENCHMARK) += tcrypt.o
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
diff --git a/crypto/acompress.c b/crypto/acompress.c
index f7a3fbe5447e..be28cbfd22e3 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -8,20 +8,32 @@
*/
#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
-#include <linux/errno.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/page-flags.h>
+#include <linux/percpu.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include <net/netlink.h>
#include "compress.h"
struct crypto_scomp;
+enum {
+ ACOMP_WALK_SLEEP = 1 << 0,
+ ACOMP_WALK_SRC_LINEAR = 1 << 1,
+ ACOMP_WALK_DST_LINEAR = 1 << 2,
+};
+
static const struct crypto_type crypto_acomp_type;
static void acomp_reqchain_done(void *data, int err);
@@ -65,7 +77,7 @@ static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
alg->exit(acomp);
if (acomp_is_async(acomp))
- crypto_free_acomp(acomp->fb);
+ crypto_free_acomp(crypto_acomp_fb(acomp));
}
static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
@@ -75,8 +87,6 @@ static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
struct crypto_acomp *fb = NULL;
int err;
- acomp->fb = acomp;
-
if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
return crypto_init_scomp_ops_async(tfm);
@@ -90,12 +100,12 @@ static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
goto out_free_fb;
- acomp->fb = fb;
+ tfm->fb = crypto_acomp_tfm(fb);
}
acomp->compress = alg->compress;
acomp->decompress = alg->decompress;
- acomp->reqsize = alg->reqsize;
+ acomp->reqsize = alg->base.cra_reqsize;
acomp->base.exit = crypto_acomp_exit_tfm;
@@ -136,6 +146,7 @@ static const struct crypto_type crypto_acomp_type = {
.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
.tfmsize = offsetof(struct crypto_acomp, base),
+ .algsize = offsetof(struct acomp_alg, base),
};
struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
@@ -161,7 +172,6 @@ static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
state->data = req->base.data;
req->base.complete = cplt;
req->base.data = state;
- state->req0 = req;
}
static void acomp_restore_req(struct acomp_req *req)
@@ -172,23 +182,16 @@ static void acomp_restore_req(struct acomp_req *req)
req->base.data = state->data;
}
-static void acomp_reqchain_virt(struct acomp_req_chain *state, int err)
+static void acomp_reqchain_virt(struct acomp_req *req)
{
- struct acomp_req *req = state->cur;
+ struct acomp_req_chain *state = &req->chain;
unsigned int slen = req->slen;
unsigned int dlen = req->dlen;
- req->base.err = err;
- state = &req->chain;
-
if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT)
acomp_request_set_src_dma(req, state->src, slen);
- else if (state->flags & CRYPTO_ACOMP_REQ_SRC_FOLIO)
- acomp_request_set_src_folio(req, state->sfolio, state->soff, slen);
if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT)
acomp_request_set_dst_dma(req, state->dst, dlen);
- else if (state->flags & CRYPTO_ACOMP_REQ_DST_FOLIO)
- acomp_request_set_dst_folio(req, state->dfolio, state->doff, dlen);
}
static void acomp_virt_to_sg(struct acomp_req *req)
@@ -196,9 +199,7 @@ static void acomp_virt_to_sg(struct acomp_req *req)
struct acomp_req_chain *state = &req->chain;
state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
- CRYPTO_ACOMP_REQ_DST_VIRT |
- CRYPTO_ACOMP_REQ_SRC_FOLIO |
- CRYPTO_ACOMP_REQ_DST_FOLIO);
+ CRYPTO_ACOMP_REQ_DST_VIRT);
if (acomp_request_src_isvirt(req)) {
unsigned int slen = req->slen;
@@ -207,17 +208,6 @@ static void acomp_virt_to_sg(struct acomp_req *req)
state->src = svirt;
sg_init_one(&state->ssg, svirt, slen);
acomp_request_set_src_sg(req, &state->ssg, slen);
- } else if (acomp_request_src_isfolio(req)) {
- struct folio *folio = req->sfolio;
- unsigned int slen = req->slen;
- size_t off = req->soff;
-
- state->sfolio = folio;
- state->soff = off;
- sg_init_table(&state->ssg, 1);
- sg_set_page(&state->ssg, folio_page(folio, off / PAGE_SIZE),
- slen, off % PAGE_SIZE);
- acomp_request_set_src_sg(req, &state->ssg, slen);
}
if (acomp_request_dst_isvirt(req)) {
@@ -227,39 +217,15 @@ static void acomp_virt_to_sg(struct acomp_req *req)
state->dst = dvirt;
sg_init_one(&state->dsg, dvirt, dlen);
acomp_request_set_dst_sg(req, &state->dsg, dlen);
- } else if (acomp_request_dst_isfolio(req)) {
- struct folio *folio = req->dfolio;
- unsigned int dlen = req->dlen;
- size_t off = req->doff;
-
- state->dfolio = folio;
- state->doff = off;
- sg_init_table(&state->dsg, 1);
- sg_set_page(&state->dsg, folio_page(folio, off / PAGE_SIZE),
- dlen, off % PAGE_SIZE);
- acomp_request_set_src_sg(req, &state->dsg, dlen);
}
}
-static int acomp_do_nondma(struct acomp_req_chain *state,
- struct acomp_req *req)
+static int acomp_do_nondma(struct acomp_req *req, bool comp)
{
- u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT |
- CRYPTO_ACOMP_REQ_SRC_NONDMA |
- CRYPTO_ACOMP_REQ_DST_VIRT |
- CRYPTO_ACOMP_REQ_DST_NONDMA;
- ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req));
+ ACOMP_FBREQ_ON_STACK(fbreq, req);
int err;
- acomp_request_set_callback(fbreq, req->base.flags, NULL, NULL);
- fbreq->base.flags &= ~keep;
- fbreq->base.flags |= req->base.flags & keep;
- fbreq->src = req->src;
- fbreq->dst = req->dst;
- fbreq->slen = req->slen;
- fbreq->dlen = req->dlen;
-
- if (state->op == crypto_acomp_reqtfm(req)->compress)
+ if (comp)
err = crypto_acomp_compress(fbreq);
else
err = crypto_acomp_decompress(fbreq);
@@ -268,114 +234,74 @@ static int acomp_do_nondma(struct acomp_req_chain *state,
return err;
}
-static int acomp_do_one_req(struct acomp_req_chain *state,
- struct acomp_req *req)
+static int acomp_do_one_req(struct acomp_req *req, bool comp)
{
- state->cur = req;
-
if (acomp_request_isnondma(req))
- return acomp_do_nondma(state, req);
+ return acomp_do_nondma(req, comp);
acomp_virt_to_sg(req);
- return state->op(req);
+ return comp ? crypto_acomp_reqtfm(req)->compress(req) :
+ crypto_acomp_reqtfm(req)->decompress(req);
}
-static int acomp_reqchain_finish(struct acomp_req *req0, int err, u32 mask)
+static int acomp_reqchain_finish(struct acomp_req *req, int err)
{
- struct acomp_req_chain *state = req0->base.data;
- struct acomp_req *req = state->cur;
- struct acomp_req *n;
-
- acomp_reqchain_virt(state, err);
-
- if (req != req0)
- list_add_tail(&req->base.list, &req0->base.list);
-
- list_for_each_entry_safe(req, n, &state->head, base.list) {
- list_del_init(&req->base.list);
-
- req->base.flags &= mask;
- req->base.complete = acomp_reqchain_done;
- req->base.data = state;
-
- err = acomp_do_one_req(state, req);
-
- if (err == -EINPROGRESS) {
- if (!list_empty(&state->head))
- err = -EBUSY;
- goto out;
- }
-
- if (err == -EBUSY)
- goto out;
-
- acomp_reqchain_virt(state, err);
- list_add_tail(&req->base.list, &req0->base.list);
- }
-
- acomp_restore_req(req0);
-
-out:
+ acomp_reqchain_virt(req);
+ acomp_restore_req(req);
return err;
}
static void acomp_reqchain_done(void *data, int err)
{
- struct acomp_req_chain *state = data;
- crypto_completion_t compl = state->compl;
+ struct acomp_req *req = data;
+ crypto_completion_t compl;
- data = state->data;
+ compl = req->chain.compl;
+ data = req->chain.data;
- if (err == -EINPROGRESS) {
- if (!list_empty(&state->head))
- return;
+ if (err == -EINPROGRESS)
goto notify;
- }
- err = acomp_reqchain_finish(state->req0, err,
- CRYPTO_TFM_REQ_MAY_BACKLOG);
- if (err == -EBUSY)
- return;
+ err = acomp_reqchain_finish(req, err);
notify:
compl(data, err);
}
-static int acomp_do_req_chain(struct acomp_req *req,
- int (*op)(struct acomp_req *req))
+static int acomp_do_req_chain(struct acomp_req *req, bool comp)
{
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct acomp_req_chain *state;
int err;
- if (crypto_acomp_req_chain(tfm) ||
- (!acomp_request_chained(req) && acomp_request_issg(req)))
- return op(req);
-
acomp_save_req(req, acomp_reqchain_done);
- state = req->base.data;
-
- state->op = op;
- state->src = NULL;
- INIT_LIST_HEAD(&state->head);
- list_splice_init(&req->base.list, &state->head);
- err = acomp_do_one_req(state, req);
+ err = acomp_do_one_req(req, comp);
if (err == -EBUSY || err == -EINPROGRESS)
- return -EBUSY;
+ return err;
- return acomp_reqchain_finish(req, err, ~0);
+ return acomp_reqchain_finish(req, err);
}
int crypto_acomp_compress(struct acomp_req *req)
{
- return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->compress);
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (acomp_req_on_stack(req) && acomp_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
+ return crypto_acomp_reqtfm(req)->compress(req);
+ return acomp_do_req_chain(req, true);
}
EXPORT_SYMBOL_GPL(crypto_acomp_compress);
int crypto_acomp_decompress(struct acomp_req *req)
{
- return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->decompress);
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (acomp_req_on_stack(req) && acomp_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
+ return crypto_acomp_reqtfm(req)->decompress(req);
+ return acomp_do_req_chain(req, false);
}
EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
@@ -434,5 +360,229 @@ void crypto_unregister_acomps(struct acomp_alg *algs, int count)
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
+static void acomp_stream_workfn(struct work_struct *work)
+{
+ struct crypto_acomp_streams *s =
+ container_of(work, struct crypto_acomp_streams, stream_work);
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu;
+
+ for_each_cpu(cpu, &s->stream_want) {
+ struct crypto_acomp_stream *ps;
+ void *ctx;
+
+ ps = per_cpu_ptr(streams, cpu);
+ if (ps->ctx)
+ continue;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx))
+ break;
+
+ spin_lock_bh(&ps->lock);
+ ps->ctx = ctx;
+ spin_unlock_bh(&ps->lock);
+
+ cpumask_clear_cpu(cpu, &s->stream_want);
+ }
+}
+
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ void (*free_ctx)(void *);
+ int i;
+
+ s->streams = NULL;
+ if (!streams)
+ return;
+
+ cancel_work_sync(&s->stream_work);
+ free_ctx = s->free_ctx;
+
+ for_each_possible_cpu(i) {
+ struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i);
+
+ if (!ps->ctx)
+ continue;
+
+ free_ctx(ps->ctx);
+ }
+
+ free_percpu(streams);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_free_streams);
+
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams;
+ struct crypto_acomp_stream *ps;
+ unsigned int i;
+ void *ctx;
+
+ if (s->streams)
+ return 0;
+
+ streams = alloc_percpu(struct crypto_acomp_stream);
+ if (!streams)
+ return -ENOMEM;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx)) {
+ free_percpu(streams);
+ return PTR_ERR(ctx);
+ }
+
+ i = cpumask_first(cpu_possible_mask);
+ ps = per_cpu_ptr(streams, i);
+ ps->ctx = ctx;
+
+ for_each_possible_cpu(i) {
+ ps = per_cpu_ptr(streams, i);
+ spin_lock_init(&ps->lock);
+ }
+
+ s->streams = streams;
+
+ INIT_WORK(&s->stream_work, acomp_stream_workfn);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu = raw_smp_processor_id();
+ struct crypto_acomp_stream *ps;
+
+ ps = per_cpu_ptr(streams, cpu);
+ spin_lock_bh(&ps->lock);
+ if (likely(ps->ctx))
+ return ps;
+ spin_unlock(&ps->lock);
+
+ cpumask_set_cpu(cpu, &s->stream_want);
+ schedule_work(&s->stream_work);
+
+ ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask));
+ spin_lock(&ps->lock);
+ return ps;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh);
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used)
+{
+ walk->slen -= used;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR))
+ scatterwalk_advance(&walk->in, used);
+ else
+ scatterwalk_done_src(&walk->in, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_src);
+
+void acomp_walk_done_dst(struct acomp_walk *walk, int used)
+{
+ walk->dlen -= used;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR))
+ scatterwalk_advance(&walk->out, used);
+ else
+ scatterwalk_done_dst(&walk->out, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_dst);
+
+int acomp_walk_next_src(struct acomp_walk *walk)
+{
+ unsigned int slen = walk->slen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.__addr = (void *)(((u8 *)walk->in.sg) +
+ walk->in.offset);
+ return min(slen, max);
+ }
+
+ return slen ? scatterwalk_next(&walk->in, slen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_src);
+
+int acomp_walk_next_dst(struct acomp_walk *walk)
+{
+ unsigned int dlen = walk->dlen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.__addr = (void *)(((u8 *)walk->out.sg) +
+ walk->out.offset);
+ return min(dlen, max);
+ }
+
+ return dlen ? scatterwalk_next(&walk->out, dlen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_dst);
+
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req, bool atomic)
+{
+ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+
+ walk->slen = req->slen;
+ walk->dlen = req->dlen;
+
+ if (!walk->slen || !walk->dlen)
+ return -EINVAL;
+
+ walk->flags = 0;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags |= ACOMP_WALK_SLEEP;
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT))
+ walk->flags |= ACOMP_WALK_SRC_LINEAR;
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT))
+ walk->flags |= ACOMP_WALK_DST_LINEAR;
+
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.sg = (void *)req->svirt;
+ walk->in.offset = 0;
+ } else
+ scatterwalk_start(&walk->in, src);
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.sg = (void *)req->dvirt;
+ walk->out.offset = 0;
+ } else
+ scatterwalk_start(&walk->out, dst);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_virt);
+
+struct acomp_req *acomp_request_clone(struct acomp_req *req,
+ size_t total, gfp_t gfp)
+{
+ struct acomp_req *nreq;
+
+ nreq = container_of(crypto_request_clone(&req->base, total, gfp),
+ struct acomp_req, base);
+ if (nreq == req)
+ return req;
+
+ if (req->src == &req->chain.ssg)
+ nreq->src = &nreq->chain.ssg;
+ if (req->dst == &req->chain.dsg)
+ nreq->dst = &nreq->chain.dsg;
+ return nreq;
+}
+EXPORT_SYMBOL_GPL(acomp_request_clone);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous compression type");
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index c3ef583598b4..a6bca877c3c7 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -639,7 +639,7 @@ static void __exit adiantum_module_exit(void)
crypto_unregister_template(&adiantum_tmpl);
}
-subsys_initcall(adiantum_module_init);
+module_init(adiantum_module_init);
module_exit(adiantum_module_exit);
MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
diff --git a/crypto/aead.c b/crypto/aead.c
index 12f5b42171af..5d14b775036e 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -186,6 +186,7 @@ static const struct crypto_type crypto_aead_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AEAD,
.tfmsize = offsetof(struct crypto_aead, base),
+ .algsize = offsetof(struct aead_alg, base),
};
int crypto_grab_aead(struct crypto_aead_spawn *spawn,
diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c
index 72f6ee1345ef..ca80d861345d 100644
--- a/crypto/aegis128-core.c
+++ b/crypto/aegis128-core.c
@@ -566,7 +566,7 @@ static void __exit crypto_aegis128_module_exit(void)
crypto_unregister_aead(&crypto_aegis128_alg_generic);
}
-subsys_initcall(crypto_aegis128_module_init);
+module_init(crypto_aegis128_module_init);
module_exit(crypto_aegis128_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 3c66d425c97b..85d2e78c8ef2 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1311,7 +1311,7 @@ static void __exit aes_fini(void)
crypto_unregister_alg(&aes_alg);
}
-subsys_initcall(aes_init);
+module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 9f57b925b116..e10bc2659ae4 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -18,7 +18,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/sched.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/string.h>
@@ -42,26 +42,46 @@ struct crypto_hash_walk {
struct scatterlist *sg;
};
-struct ahash_save_req_state {
- struct list_head head;
- struct ahash_request *req0;
- struct ahash_request *cur;
- int (*op)(struct ahash_request *req);
+static int ahash_def_finup(struct ahash_request *req);
+
+static inline bool crypto_ahash_block_only(struct crypto_ahash *tfm)
+{
+ return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+ CRYPTO_AHASH_ALG_BLOCK_ONLY;
+}
+
+static inline bool crypto_ahash_final_nonzero(struct crypto_ahash *tfm)
+{
+ return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
+}
+
+static inline bool crypto_ahash_need_fallback(struct crypto_ahash *tfm)
+{
+ return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+ CRYPTO_ALG_NEED_FALLBACK;
+}
+
+static inline void ahash_op_done(void *data, int err,
+ int (*finish)(struct ahash_request *, int))
+{
+ struct ahash_request *areq = data;
crypto_completion_t compl;
- void *data;
- struct scatterlist sg;
- const u8 *src;
- u8 *page;
- unsigned int offset;
- unsigned int nbytes;
-};
-static void ahash_reqchain_done(void *data, int err);
-static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt);
-static void ahash_restore_req(struct ahash_request *req);
-static void ahash_def_finup_done1(void *data, int err);
-static int ahash_def_finup_finish1(struct ahash_request *req, int err);
-static int ahash_def_finup(struct ahash_request *req);
+ compl = areq->saved_complete;
+ data = areq->saved_data;
+ if (err == -EINPROGRESS)
+ goto out;
+
+ areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = finish(areq, err);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+
+out:
+ compl(data, err);
+}
static int hash_walk_next(struct crypto_hash_walk *walk)
{
@@ -266,7 +286,6 @@ static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
CRYPTO_TFM_NEED_KEY);
- crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
return 0;
}
@@ -303,6 +322,9 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
int err;
err = alg->setkey(tfm, key, keylen);
+ if (!err && crypto_ahash_need_fallback(tfm))
+ err = crypto_ahash_setkey(crypto_ahash_fb(tfm),
+ key, keylen);
if (unlikely(err)) {
ahash_set_needkey(tfm, alg);
return err;
@@ -313,480 +335,261 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
-static bool ahash_request_hasvirt(struct ahash_request *req)
-{
- struct ahash_request *r2;
-
- if (ahash_request_isvirt(req))
- return true;
-
- list_for_each_entry(r2, &req->base.list, base.list)
- if (ahash_request_isvirt(r2))
- return true;
-
- return false;
-}
-
-static int ahash_reqchain_virt(struct ahash_save_req_state *state,
- int err, u32 mask)
-{
- struct ahash_request *req = state->cur;
-
- for (;;) {
- unsigned len = state->nbytes;
-
- req->base.err = err;
-
- if (!state->offset)
- break;
-
- if (state->offset == len || err) {
- u8 *result = req->result;
-
- ahash_request_set_virt(req, state->src, result, len);
- state->offset = 0;
- break;
- }
-
- len -= state->offset;
-
- len = min(PAGE_SIZE, len);
- memcpy(state->page, state->src + state->offset, len);
- state->offset += len;
- req->nbytes = len;
-
- err = state->op(req);
- if (err == -EINPROGRESS) {
- if (!list_empty(&state->head) ||
- state->offset < state->nbytes)
- err = -EBUSY;
- break;
- }
-
- if (err == -EBUSY)
- break;
- }
-
- return err;
-}
-
-static int ahash_reqchain_finish(struct ahash_request *req0,
- struct ahash_save_req_state *state,
- int err, u32 mask)
-{
- struct ahash_request *req = state->cur;
- struct crypto_ahash *tfm;
- struct ahash_request *n;
- bool update;
- u8 *page;
-
- err = ahash_reqchain_virt(state, err, mask);
- if (err == -EINPROGRESS || err == -EBUSY)
- goto out;
-
- if (req != req0)
- list_add_tail(&req->base.list, &req0->base.list);
-
- tfm = crypto_ahash_reqtfm(req);
- update = state->op == crypto_ahash_alg(tfm)->update;
-
- list_for_each_entry_safe(req, n, &state->head, base.list) {
- list_del_init(&req->base.list);
-
- req->base.flags &= mask;
- req->base.complete = ahash_reqchain_done;
- req->base.data = state;
- state->cur = req;
-
- if (update && ahash_request_isvirt(req) && req->nbytes) {
- unsigned len = req->nbytes;
- u8 *result = req->result;
-
- state->src = req->svirt;
- state->nbytes = len;
-
- len = min(PAGE_SIZE, len);
-
- memcpy(state->page, req->svirt, len);
- state->offset = len;
-
- ahash_request_set_crypt(req, &state->sg, result, len);
- }
-
- err = state->op(req);
-
- if (err == -EINPROGRESS) {
- if (!list_empty(&state->head) ||
- state->offset < state->nbytes)
- err = -EBUSY;
- goto out;
- }
-
- if (err == -EBUSY)
- goto out;
-
- err = ahash_reqchain_virt(state, err, mask);
- if (err == -EINPROGRESS || err == -EBUSY)
- goto out;
-
- list_add_tail(&req->base.list, &req0->base.list);
- }
-
- page = state->page;
- if (page) {
- memset(page, 0, PAGE_SIZE);
- free_page((unsigned long)page);
- }
- ahash_restore_req(req0);
-
-out:
- return err;
-}
-
-static void ahash_reqchain_done(void *data, int err)
-{
- struct ahash_save_req_state *state = data;
- crypto_completion_t compl = state->compl;
-
- data = state->data;
-
- if (err == -EINPROGRESS) {
- if (!list_empty(&state->head) || state->offset < state->nbytes)
- return;
- goto notify;
- }
-
- err = ahash_reqchain_finish(state->req0, state, err,
- CRYPTO_TFM_REQ_MAY_BACKLOG);
- if (err == -EBUSY)
- return;
-
-notify:
- compl(data, err);
-}
-
static int ahash_do_req_chain(struct ahash_request *req,
- int (*op)(struct ahash_request *req))
+ int (*const *op)(struct ahash_request *req))
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- bool update = op == crypto_ahash_alg(tfm)->update;
- struct ahash_save_req_state *state;
- struct ahash_save_req_state state0;
- struct ahash_request *r2;
- u8 *page = NULL;
int err;
- if (crypto_ahash_req_chain(tfm) ||
- (!ahash_request_chained(req) &&
- (!update || !ahash_request_isvirt(req))))
- return op(req);
-
- if (update && ahash_request_hasvirt(req)) {
- gfp_t gfp;
- u32 flags;
-
- flags = ahash_request_flags(req);
- gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- page = (void *)__get_free_page(gfp);
- err = -ENOMEM;
- if (!page)
- goto out_set_chain;
- }
+ if (crypto_ahash_req_virt(tfm) || !ahash_request_isvirt(req))
+ return (*op)(req);
- state = &state0;
- if (ahash_is_async(tfm)) {
- err = ahash_save_req(req, ahash_reqchain_done);
- if (err)
- goto out_free_page;
+ if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE)
+ return -ENOSYS;
- state = req->base.data;
- }
-
- state->op = op;
- state->cur = req;
- state->page = page;
- state->offset = 0;
- state->nbytes = 0;
- INIT_LIST_HEAD(&state->head);
- list_splice_init(&req->base.list, &state->head);
+ {
+ u8 state[HASH_MAX_STATESIZE];
- if (page)
- sg_init_one(&state->sg, page, PAGE_SIZE);
+ if (op == &crypto_ahash_alg(tfm)->digest) {
+ ahash_request_set_tfm(req, crypto_ahash_fb(tfm));
+ err = crypto_ahash_digest(req);
+ goto out_no_state;
+ }
- if (update && ahash_request_isvirt(req) && req->nbytes) {
- unsigned len = req->nbytes;
- u8 *result = req->result;
+ err = crypto_ahash_export(req, state);
+ ahash_request_set_tfm(req, crypto_ahash_fb(tfm));
+ err = err ?: crypto_ahash_import(req, state);
- state->src = req->svirt;
- state->nbytes = len;
+ if (op == &crypto_ahash_alg(tfm)->finup) {
+ err = err ?: crypto_ahash_finup(req);
+ goto out_no_state;
+ }
- len = min(PAGE_SIZE, len);
+ err = err ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export(req, state);
- memcpy(page, req->svirt, len);
- state->offset = len;
+ ahash_request_set_tfm(req, tfm);
+ return err ?: crypto_ahash_import(req, state);
- ahash_request_set_crypt(req, &state->sg, result, len);
+out_no_state:
+ ahash_request_set_tfm(req, tfm);
+ return err;
}
-
- err = op(req);
- if (err == -EBUSY || err == -EINPROGRESS)
- return -EBUSY;
-
- return ahash_reqchain_finish(req, state, err, ~0);
-
-out_free_page:
- free_page((unsigned long)page);
-
-out_set_chain:
- req->base.err = err;
- list_for_each_entry(r2, &req->base.list, base.list)
- r2->base.err = err;
-
- return err;
}
int crypto_ahash_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- if (likely(tfm->using_shash)) {
- struct ahash_request *r2;
- int err;
-
- err = crypto_shash_init(prepare_shash_desc(req, tfm));
- req->base.err = err;
-
- list_for_each_entry(r2, &req->base.list, base.list) {
- struct shash_desc *desc;
-
- desc = prepare_shash_desc(r2, tfm);
- r2->base.err = crypto_shash_init(desc);
- }
-
- return err;
- }
-
+ if (likely(tfm->using_shash))
+ return crypto_shash_init(prepare_shash_desc(req, tfm));
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_ahash_block_only(tfm)) {
+ u8 *buf = ahash_request_ctx(req);
- return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init);
+ buf += crypto_ahash_reqsize(tfm) - 1;
+ *buf = 0;
+ }
+ return crypto_ahash_alg(tfm)->init(req);
}
EXPORT_SYMBOL_GPL(crypto_ahash_init);
-static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
+static void ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ahash_save_req_state *state;
- gfp_t gfp;
- u32 flags;
-
- if (!ahash_is_async(tfm))
- return 0;
-
- flags = ahash_request_flags(req);
- gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
- state = kmalloc(sizeof(*state), gfp);
- if (!state)
- return -ENOMEM;
-
- state->compl = req->base.complete;
- state->data = req->base.data;
+ req->saved_complete = req->base.complete;
+ req->saved_data = req->base.data;
req->base.complete = cplt;
- req->base.data = state;
- state->req0 = req;
-
- return 0;
+ req->base.data = req;
}
static void ahash_restore_req(struct ahash_request *req)
{
- struct ahash_save_req_state *state;
- struct crypto_ahash *tfm;
-
- tfm = crypto_ahash_reqtfm(req);
- if (!ahash_is_async(tfm))
- return;
-
- state = req->base.data;
-
- req->base.complete = state->compl;
- req->base.data = state->data;
- kfree(state);
+ req->base.complete = req->saved_complete;
+ req->base.data = req->saved_data;
}
-int crypto_ahash_update(struct ahash_request *req)
+static int ahash_update_finish(struct ahash_request *req, int err)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ bool nonzero = crypto_ahash_final_nonzero(tfm);
+ int bs = crypto_ahash_blocksize(tfm);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen;
+ u8 *buf;
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+ buf = blenp - bs;
+
+ if (blen) {
+ req->src = req->sg_head + 1;
+ if (sg_is_chain(req->src))
+ req->src = sg_chain_ptr(req->src);
+ }
- if (likely(tfm->using_shash)) {
- struct ahash_request *r2;
- int err;
-
- err = shash_ahash_update(req, ahash_request_ctx(req));
- req->base.err = err;
-
- list_for_each_entry(r2, &req->base.list, base.list) {
- struct shash_desc *desc;
+ req->nbytes += nonzero - blen;
- desc = ahash_request_ctx(r2);
- r2->base.err = shash_ahash_update(r2, desc);
- }
+ blen = err < 0 ? 0 : err + nonzero;
+ if (ahash_request_isvirt(req))
+ memcpy(buf, req->svirt + req->nbytes - blen, blen);
+ else
+ memcpy_from_sglist(buf, req->src, req->nbytes - blen, blen);
+ *blenp = blen;
- return err;
- }
+ ahash_restore_req(req);
- return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update);
+ return err;
}
-EXPORT_SYMBOL_GPL(crypto_ahash_update);
-int crypto_ahash_final(struct ahash_request *req)
+static void ahash_update_done(void *data, int err)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (likely(tfm->using_shash)) {
- struct ahash_request *r2;
- int err;
-
- err = crypto_shash_final(ahash_request_ctx(req), req->result);
- req->base.err = err;
-
- list_for_each_entry(r2, &req->base.list, base.list) {
- struct shash_desc *desc;
-
- desc = ahash_request_ctx(r2);
- r2->base.err = crypto_shash_final(desc, r2->result);
- }
-
- return err;
- }
-
- return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final);
+ ahash_op_done(data, err, ahash_update_finish);
}
-EXPORT_SYMBOL_GPL(crypto_ahash_final);
-int crypto_ahash_finup(struct ahash_request *req)
+int crypto_ahash_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ bool nonzero = crypto_ahash_final_nonzero(tfm);
+ int bs = crypto_ahash_blocksize(tfm);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen, err;
+ u8 *buf;
- if (likely(tfm->using_shash)) {
- struct ahash_request *r2;
- int err;
-
- err = shash_ahash_finup(req, ahash_request_ctx(req));
- req->base.err = err;
+ if (likely(tfm->using_shash))
+ return shash_ahash_update(req, ahash_request_ctx(req));
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
+ if (!crypto_ahash_block_only(tfm))
+ return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update);
- list_for_each_entry(r2, &req->base.list, base.list) {
- struct shash_desc *desc;
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+ buf = blenp - bs;
- desc = ahash_request_ctx(r2);
- r2->base.err = shash_ahash_finup(r2, desc);
- }
+ if (blen + req->nbytes < bs + nonzero) {
+ if (ahash_request_isvirt(req))
+ memcpy(buf + blen, req->svirt, req->nbytes);
+ else
+ memcpy_from_sglist(buf + blen, req->src, 0,
+ req->nbytes);
- return err;
+ *blenp += req->nbytes;
+ return 0;
}
- if (!crypto_ahash_alg(tfm)->finup ||
- (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req)))
- return ahash_def_finup(req);
-
- return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup);
-}
-EXPORT_SYMBOL_GPL(crypto_ahash_finup);
-
-static int ahash_def_digest_finish(struct ahash_request *req, int err)
-{
- struct crypto_ahash *tfm;
-
- if (err)
- goto out;
+ if (blen) {
+ memset(req->sg_head, 0, sizeof(req->sg_head[0]));
+ sg_set_buf(req->sg_head, buf, blen);
+ if (req->src != req->sg_head + 1)
+ sg_chain(req->sg_head, 2, req->src);
+ req->src = req->sg_head;
+ req->nbytes += blen;
+ }
+ req->nbytes -= nonzero;
- tfm = crypto_ahash_reqtfm(req);
- if (ahash_is_async(tfm))
- req->base.complete = ahash_def_finup_done1;
+ ahash_save_req(req, ahash_update_done);
- err = crypto_ahash_update(req);
+ err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
- return ahash_def_finup_finish1(req, err);
-
-out:
- ahash_restore_req(req);
- return err;
+ return ahash_update_finish(req, err);
}
+EXPORT_SYMBOL_GPL(crypto_ahash_update);
-static void ahash_def_digest_done(void *data, int err)
+static int ahash_finup_finish(struct ahash_request *req, int err)
{
- struct ahash_save_req_state *state0 = data;
- struct ahash_save_req_state state;
- struct ahash_request *areq;
-
- state = *state0;
- areq = state.req0;
- if (err == -EINPROGRESS)
- goto out;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen;
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+
+ if (blen) {
+ if (sg_is_last(req->src))
+ req->src = NULL;
+ else {
+ req->src = req->sg_head + 1;
+ if (sg_is_chain(req->src))
+ req->src = sg_chain_ptr(req->src);
+ }
+ req->nbytes -= blen;
+ }
- areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_restore_req(req);
- err = ahash_def_digest_finish(areq, err);
- if (err == -EINPROGRESS || err == -EBUSY)
- return;
+ return err;
+}
-out:
- state.compl(state.data, err);
+static void ahash_finup_done(void *data, int err)
+{
+ ahash_op_done(data, err, ahash_finup_finish);
}
-static int ahash_def_digest(struct ahash_request *req)
+int crypto_ahash_finup(struct ahash_request *req)
{
- int err;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int bs = crypto_ahash_blocksize(tfm);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen, err;
+ u8 *buf;
- err = ahash_save_req(req, ahash_def_digest_done);
- if (err)
- return err;
+ if (likely(tfm->using_shash))
+ return shash_ahash_finup(req, ahash_request_ctx(req));
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
+ if (!crypto_ahash_alg(tfm)->finup)
+ return ahash_def_finup(req);
+ if (!crypto_ahash_block_only(tfm))
+ return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup);
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+ buf = blenp - bs;
+
+ if (blen) {
+ memset(req->sg_head, 0, sizeof(req->sg_head[0]));
+ sg_set_buf(req->sg_head, buf, blen);
+ if (!req->src)
+ sg_mark_end(req->sg_head);
+ else if (req->src != req->sg_head + 1)
+ sg_chain(req->sg_head, 2, req->src);
+ req->src = req->sg_head;
+ req->nbytes += blen;
+ }
+
+ ahash_save_req(req, ahash_finup_done);
- err = crypto_ahash_init(req);
+ err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
- return ahash_def_digest_finish(req, err);
+ return ahash_finup_finish(req, err);
}
+EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- if (likely(tfm->using_shash)) {
- struct ahash_request *r2;
- int err;
-
- err = shash_ahash_digest(req, prepare_shash_desc(req, tfm));
- req->base.err = err;
-
- list_for_each_entry(r2, &req->base.list, base.list) {
- struct shash_desc *desc;
-
- desc = prepare_shash_desc(r2, tfm);
- r2->base.err = shash_ahash_digest(r2, desc);
- }
-
- return err;
- }
-
- if (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))
- return ahash_def_digest(req);
-
+ if (likely(tfm->using_shash))
+ return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
-
- return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest);
+ return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
static void ahash_def_finup_done2(void *data, int err)
{
- struct ahash_save_req_state *state = data;
- struct ahash_request *areq = state->req0;
+ struct ahash_request *areq = data;
if (err == -EINPROGRESS)
return;
@@ -797,14 +600,10 @@ static void ahash_def_finup_done2(void *data, int err)
static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
- struct crypto_ahash *tfm;
-
if (err)
goto out;
- tfm = crypto_ahash_reqtfm(req);
- if (ahash_is_async(tfm))
- req->base.complete = ahash_def_finup_done2;
+ req->base.complete = ahash_def_finup_done2;
err = crypto_ahash_final(req);
if (err == -EINPROGRESS || err == -EBUSY)
@@ -817,32 +616,14 @@ out:
static void ahash_def_finup_done1(void *data, int err)
{
- struct ahash_save_req_state *state0 = data;
- struct ahash_save_req_state state;
- struct ahash_request *areq;
-
- state = *state0;
- areq = state.req0;
- if (err == -EINPROGRESS)
- goto out;
-
- areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = ahash_def_finup_finish1(areq, err);
- if (err == -EINPROGRESS || err == -EBUSY)
- return;
-
-out:
- state.compl(state.data, err);
+ ahash_op_done(data, err, ahash_def_finup_finish1);
}
static int ahash_def_finup(struct ahash_request *req)
{
int err;
- err = ahash_save_req(req, ahash_def_finup_done1);
- if (err)
- return err;
+ ahash_save_req(req, ahash_def_finup_done1);
err = crypto_ahash_update(req);
if (err == -EINPROGRESS || err == -EBUSY)
@@ -851,16 +632,47 @@ static int ahash_def_finup(struct ahash_request *req)
return ahash_def_finup_finish1(req, err);
}
+int crypto_ahash_export_core(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (likely(tfm->using_shash))
+ return crypto_shash_export_core(ahash_request_ctx(req), out);
+ return crypto_ahash_alg(tfm)->export_core(req, out);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_export_core);
+
int crypto_ahash_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash))
return crypto_shash_export(ahash_request_ctx(req), out);
+ if (crypto_ahash_block_only(tfm)) {
+ unsigned int plen = crypto_ahash_blocksize(tfm) + 1;
+ unsigned int reqsize = crypto_ahash_reqsize(tfm);
+ unsigned int ss = crypto_ahash_statesize(tfm);
+ u8 *buf = ahash_request_ctx(req);
+
+ memcpy(out + ss - plen, buf + reqsize - plen, plen);
+ }
return crypto_ahash_alg(tfm)->export(req, out);
}
EXPORT_SYMBOL_GPL(crypto_ahash_export);
+int crypto_ahash_import_core(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (likely(tfm->using_shash))
+ return crypto_shash_import_core(prepare_shash_desc(req, tfm),
+ in);
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+ return crypto_ahash_alg(tfm)->import_core(req, in);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_import_core);
+
int crypto_ahash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -869,6 +681,12 @@ int crypto_ahash_import(struct ahash_request *req, const void *in)
return crypto_shash_import(prepare_shash_desc(req, tfm), in);
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
+ if (crypto_ahash_block_only(tfm)) {
+ unsigned int reqsize = crypto_ahash_reqsize(tfm);
+ u8 *buf = ahash_request_ctx(req);
+
+ buf[reqsize - 1] = 0;
+ }
return crypto_ahash_alg(tfm)->import(req, in);
}
EXPORT_SYMBOL_GPL(crypto_ahash_import);
@@ -878,26 +696,73 @@ static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
- alg->exit_tfm(hash);
+ if (alg->exit_tfm)
+ alg->exit_tfm(hash);
+ else if (tfm->__crt_alg->cra_exit)
+ tfm->__crt_alg->cra_exit(tfm);
+
+ if (crypto_ahash_need_fallback(hash))
+ crypto_free_ahash(crypto_ahash_fb(hash));
}
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
+ struct crypto_ahash *fb = NULL;
+ int err;
crypto_ahash_set_statesize(hash, alg->halg.statesize);
- crypto_ahash_set_reqsize(hash, alg->reqsize);
+ crypto_ahash_set_reqsize(hash, crypto_tfm_alg_reqsize(tfm));
if (tfm->__crt_alg->cra_type == &crypto_shash_type)
return crypto_init_ahash_using_shash(tfm);
+ if (crypto_ahash_need_fallback(hash)) {
+ fb = crypto_alloc_ahash(crypto_ahash_alg_name(hash),
+ CRYPTO_ALG_REQ_VIRT,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_REQ_VIRT |
+ CRYPTO_AHASH_ALG_NO_EXPORT_CORE);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
+ tfm->fb = crypto_ahash_tfm(fb);
+ }
+
ahash_set_needkey(hash, alg);
- if (alg->exit_tfm)
- tfm->exit = crypto_ahash_exit_tfm;
+ tfm->exit = crypto_ahash_exit_tfm;
+
+ if (alg->init_tfm)
+ err = alg->init_tfm(hash);
+ else if (tfm->__crt_alg->cra_init)
+ err = tfm->__crt_alg->cra_init(tfm);
+ else
+ return 0;
+
+ if (err)
+ goto out_free_sync_hash;
+
+ if (!ahash_is_async(hash) && crypto_ahash_reqsize(hash) >
+ MAX_SYNC_HASH_REQSIZE)
+ goto out_exit_tfm;
+
+ BUILD_BUG_ON(HASH_MAX_DESCSIZE > MAX_SYNC_HASH_REQSIZE);
+ if (crypto_ahash_reqsize(hash) < HASH_MAX_DESCSIZE)
+ crypto_ahash_set_reqsize(hash, HASH_MAX_DESCSIZE);
- return alg->init_tfm ? alg->init_tfm(hash) : 0;
+ return 0;
+
+out_exit_tfm:
+ if (alg->exit_tfm)
+ alg->exit_tfm(hash);
+ else if (tfm->__crt_alg->cra_exit)
+ tfm->__crt_alg->cra_exit(tfm);
+ err = -EINVAL;
+out_free_sync_hash:
+ crypto_free_ahash(fb);
+ return err;
}
static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
@@ -956,6 +821,7 @@ static const struct crypto_type crypto_ahash_type = {
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
.tfmsize = offsetof(struct crypto_ahash, base),
+ .algsize = offsetof(struct ahash_alg, halg.base),
};
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
@@ -980,7 +846,7 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_ahash);
-static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
{
struct crypto_alg *alg = &halg->base;
@@ -989,11 +855,13 @@ static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey;
}
+EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
{
struct hash_alg_common *halg = crypto_hash_alg_common(hash);
struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+ struct crypto_ahash *fb = NULL;
struct crypto_ahash *nhash;
struct ahash_alg *alg;
int err;
@@ -1023,28 +891,52 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
err = PTR_ERR(shash);
goto out_free_nhash;
}
+ crypto_ahash_tfm(nhash)->exit = crypto_exit_ahash_using_shash;
nhash->using_shash = true;
*nctx = shash;
return nhash;
}
+ if (crypto_ahash_need_fallback(hash)) {
+ fb = crypto_clone_ahash(crypto_ahash_fb(hash));
+ err = PTR_ERR(fb);
+ if (IS_ERR(fb))
+ goto out_free_nhash;
+
+ crypto_ahash_tfm(nhash)->fb = crypto_ahash_tfm(fb);
+ }
+
err = -ENOSYS;
alg = crypto_ahash_alg(hash);
if (!alg->clone_tfm)
- goto out_free_nhash;
+ goto out_free_fb;
err = alg->clone_tfm(nhash, hash);
if (err)
- goto out_free_nhash;
+ goto out_free_fb;
+
+ crypto_ahash_tfm(nhash)->exit = crypto_ahash_exit_tfm;
return nhash;
+out_free_fb:
+ crypto_free_ahash(fb);
out_free_nhash:
crypto_free_ahash(nhash);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_clone_ahash);
+static int ahash_default_export_core(struct ahash_request *req, void *out)
+{
+ return -ENOSYS;
+}
+
+static int ahash_default_import_core(struct ahash_request *req, const void *in)
+{
+ return -ENOSYS;
+}
+
static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
@@ -1053,7 +945,11 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
if (alg->halg.statesize == 0)
return -EINVAL;
- if (alg->reqsize && alg->reqsize < alg->halg.statesize)
+ if (base->cra_reqsize && base->cra_reqsize < alg->halg.statesize)
+ return -EINVAL;
+
+ if (!(base->cra_flags & CRYPTO_ALG_ASYNC) &&
+ base->cra_reqsize > MAX_SYNC_HASH_REQSIZE)
return -EINVAL;
err = hash_prepare_alg(&alg->halg);
@@ -1063,9 +959,28 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
base->cra_type = &crypto_ahash_type;
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
+ if ((base->cra_flags ^ CRYPTO_ALG_REQ_VIRT) &
+ (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT))
+ base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
+
if (!alg->setkey)
alg->setkey = ahash_nosetkey;
+ if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) {
+ BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256);
+ if (!alg->finup)
+ return -EINVAL;
+
+ base->cra_reqsize += base->cra_blocksize + 1;
+ alg->halg.statesize += base->cra_blocksize + 1;
+ alg->export_core = alg->export;
+ alg->import_core = alg->import;
+ } else if (!alg->export_core || !alg->import_core) {
+ alg->export_core = ahash_default_export_core;
+ alg->import_core = ahash_default_import_core;
+ base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+ }
+
return 0;
}
@@ -1135,18 +1050,40 @@ EXPORT_SYMBOL_GPL(ahash_register_instance);
void ahash_request_free(struct ahash_request *req)
{
- struct ahash_request *tmp;
- struct ahash_request *r2;
-
if (unlikely(!req))
return;
- list_for_each_entry_safe(r2, tmp, &req->base.list, base.list)
- kfree_sensitive(r2);
+ if (!ahash_req_on_stack(req)) {
+ kfree(req);
+ return;
+ }
- kfree_sensitive(req);
+ ahash_request_zero(req);
}
EXPORT_SYMBOL_GPL(ahash_request_free);
+int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ HASH_REQUEST_ON_STACK(req, crypto_ahash_fb(tfm));
+ int err;
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_virt(req, data, out, len);
+ err = crypto_ahash_digest(req);
+
+ ahash_request_zero(req);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(crypto_hash_digest);
+
+void ahash_free_singlespawn_instance(struct ahash_instance *inst)
+{
+ crypto_drop_spawn(ahash_instance_ctx(inst));
+ kfree(inst);
+}
+EXPORT_SYMBOL_GPL(ahash_free_singlespawn_instance);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 72c82d9aa077..a36f50c83827 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -97,6 +97,7 @@ static const struct crypto_type crypto_akcipher_type = {
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AKCIPHER,
.tfmsize = offsetof(struct crypto_akcipher, base),
+ .algsize = offsetof(struct akcipher_alg, base),
};
int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn,
diff --git a/crypto/algapi.c b/crypto/algapi.c
index ea9ed9580aa8..e604d0d8b7b4 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -71,12 +71,23 @@ static void crypto_free_instance(struct crypto_instance *inst)
static void crypto_destroy_instance_workfn(struct work_struct *w)
{
- struct crypto_instance *inst = container_of(w, struct crypto_instance,
+ struct crypto_template *tmpl = container_of(w, struct crypto_template,
free_work);
- struct crypto_template *tmpl = inst->tmpl;
+ struct crypto_instance *inst;
+ struct hlist_node *n;
+ HLIST_HEAD(list);
+
+ down_write(&crypto_alg_sem);
+ hlist_for_each_entry_safe(inst, n, &tmpl->dead, list) {
+ if (refcount_read(&inst->alg.cra_refcnt) != -1)
+ continue;
+ hlist_del(&inst->list);
+ hlist_add_head(&inst->list, &list);
+ }
+ up_write(&crypto_alg_sem);
- crypto_free_instance(inst);
- crypto_tmpl_put(tmpl);
+ hlist_for_each_entry_safe(inst, n, &list, list)
+ crypto_free_instance(inst);
}
static void crypto_destroy_instance(struct crypto_alg *alg)
@@ -84,9 +95,10 @@ static void crypto_destroy_instance(struct crypto_alg *alg)
struct crypto_instance *inst = container_of(alg,
struct crypto_instance,
alg);
+ struct crypto_template *tmpl = inst->tmpl;
- INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn);
- schedule_work(&inst->free_work);
+ refcount_set(&alg->cra_refcnt, -1);
+ schedule_work(&tmpl->free_work);
}
/*
@@ -132,14 +144,16 @@ static void crypto_remove_instance(struct crypto_instance *inst,
inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
- if (!tmpl || !crypto_tmpl_get(tmpl))
+ if (!tmpl)
return;
- list_move(&inst->alg.cra_list, list);
+ list_del_init(&inst->alg.cra_list);
hlist_del(&inst->list);
- inst->alg.cra_destroy = crypto_destroy_instance;
+ hlist_add_head(&inst->list, &tmpl->dead);
BUG_ON(!list_empty(&inst->alg.cra_users));
+
+ crypto_alg_put(&inst->alg);
}
/*
@@ -260,8 +274,7 @@ static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg)
{
struct crypto_larval *larval;
- if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER) ||
- IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) ||
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) ||
(alg->cra_flags & CRYPTO_ALG_INTERNAL))
return NULL; /* No self-test needed */
@@ -404,6 +417,15 @@ void crypto_remove_final(struct list_head *list)
}
EXPORT_SYMBOL_GPL(crypto_remove_final);
+static void crypto_free_alg(struct crypto_alg *alg)
+{
+ unsigned int algsize = alg->cra_type->algsize;
+ u8 *p = (u8 *)alg - algsize;
+
+ crypto_destroy_alg(alg);
+ kfree(p);
+}
+
int crypto_register_alg(struct crypto_alg *alg)
{
struct crypto_larval *larval;
@@ -416,6 +438,19 @@ int crypto_register_alg(struct crypto_alg *alg)
if (err)
return err;
+ if (alg->cra_flags & CRYPTO_ALG_DUP_FIRST &&
+ !WARN_ON_ONCE(alg->cra_destroy)) {
+ unsigned int algsize = alg->cra_type->algsize;
+ u8 *p = (u8 *)alg - algsize;
+
+ p = kmemdup(p, algsize + sizeof(*alg), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ alg = (void *)(p + algsize);
+ alg->cra_destroy = crypto_free_alg;
+ }
+
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(alg, &algs_to_put);
if (!IS_ERR_OR_NULL(larval)) {
@@ -424,8 +459,10 @@ int crypto_register_alg(struct crypto_alg *alg)
}
up_write(&crypto_alg_sem);
- if (IS_ERR(larval))
+ if (IS_ERR(larval)) {
+ crypto_alg_put(alg);
return PTR_ERR(larval);
+ }
if (test_started)
crypto_schedule_test(larval);
@@ -461,11 +498,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
return;
- if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
- return;
-
- crypto_alg_put(alg);
+ WARN_ON(!alg->cra_destroy && refcount_read(&alg->cra_refcnt) != 1);
+ list_add(&alg->cra_list, &list);
crypto_remove_final(&list);
}
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
@@ -504,6 +539,8 @@ int crypto_register_template(struct crypto_template *tmpl)
struct crypto_template *q;
int err = -EEXIST;
+ INIT_WORK(&tmpl->free_work, crypto_destroy_instance_workfn);
+
down_write(&crypto_alg_sem);
crypto_check_module_sig(tmpl->module);
@@ -565,6 +602,8 @@ void crypto_unregister_template(struct crypto_template *tmpl)
crypto_free_instance(inst);
}
crypto_remove_final(&users);
+
+ flush_work(&tmpl->free_work);
}
EXPORT_SYMBOL_GPL(crypto_unregister_template);
@@ -618,6 +657,7 @@ int crypto_register_instance(struct crypto_template *tmpl,
inst->alg.cra_module = tmpl->module;
inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
+ inst->alg.cra_destroy = crypto_destroy_instance;
down_write(&crypto_alg_sem);
@@ -883,20 +923,20 @@ const char *crypto_attr_alg_name(struct rtattr *rta)
}
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
-int crypto_inst_setname(struct crypto_instance *inst, const char *name,
- struct crypto_alg *alg)
+int __crypto_inst_setname(struct crypto_instance *inst, const char *name,
+ const char *driver, struct crypto_alg *alg)
{
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
- name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ driver, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return 0;
}
-EXPORT_SYMBOL_GPL(crypto_inst_setname);
+EXPORT_SYMBOL_GPL(__crypto_inst_setname);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)
{
@@ -1018,7 +1058,7 @@ static void __init crypto_start_tests(void)
if (!IS_BUILTIN(CONFIG_CRYPTO_ALGAPI))
return;
- if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
return;
set_crypto_boot_test_finished();
diff --git a/crypto/algboss.c b/crypto/algboss.c
index a20926bfd34e..846f586889ee 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -189,7 +189,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
struct task_struct *thread;
struct crypto_test_param *param;
- if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
return NOTIFY_DONE;
if (!try_module_get(THIS_MODULE))
@@ -247,13 +247,7 @@ static void __exit cryptomgr_exit(void)
BUG_ON(err);
}
-/*
- * This is arch_initcall() so that the crypto self-tests are run on algorithms
- * registered early by subsys_initcall(). subsys_initcall() is needed for
- * generic implementations so that they're available for comparison tests when
- * other implementations are registered later by module_init().
- */
-arch_initcall(cryptomgr_init);
+module_init(cryptomgr_init);
module_exit(cryptomgr_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 7d58cbbce4af..79b016a899a1 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -27,7 +27,6 @@
#include <crypto/scatterwalk.h>
#include <crypto/if_alg.h>
#include <crypto/skcipher.h>
-#include <crypto/null.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
@@ -36,19 +35,13 @@
#include <linux/net.h>
#include <net/sock.h>
-struct aead_tfm {
- struct crypto_aead *aead;
- struct crypto_sync_skcipher *null_tfm;
-};
-
static inline bool aead_sufficient_data(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_aead *tfm = pask->private;
unsigned int as = crypto_aead_authsize(tfm);
/*
@@ -64,27 +57,12 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_aead *tfm = pask->private;
unsigned int ivsize = crypto_aead_ivsize(tfm);
return af_alg_sendmsg(sock, msg, size, ivsize);
}
-static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
- struct scatterlist *src,
- struct scatterlist *dst, unsigned int len)
-{
- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
-
- skcipher_request_set_sync_tfm(skreq, null_tfm);
- skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
- NULL, NULL);
- skcipher_request_set_crypt(skreq, src, dst, len, NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-
static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@@ -93,9 +71,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
- struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
+ struct crypto_aead *tfm = pask->private;
unsigned int i, as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq;
struct af_alg_tsgl *tsgl, *tmp;
@@ -223,11 +199,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* v v
* RX SGL: AAD || PT || Tag
*/
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sgt.sgl,
- processed);
- if (err)
- goto free;
+ memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src,
+ processed);
af_alg_pull_tsgl(sk, processed, NULL, 0);
} else {
/*
@@ -241,12 +214,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* RX SGL: AAD || CT ----+
*/
- /* Copy AAD || CT to RX SGL buffer for in-place operation. */
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sgt.sgl,
- outlen);
- if (err)
- goto free;
+ /* Copy AAD || CT to RX SGL buffer for in-place operation. */
+ memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, outlen);
/* Create TX SGL for tag and chain it to RX SGL. */
areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
@@ -379,7 +348,7 @@ static int aead_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
- struct aead_tfm *tfm;
+ struct crypto_aead *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -393,7 +362,7 @@ static int aead_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
+ if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
atomic_dec(&pask->nokey_refcnt);
@@ -454,54 +423,22 @@ static struct proto_ops algif_aead_ops_nokey = {
static void *aead_bind(const char *name, u32 type, u32 mask)
{
- struct aead_tfm *tfm;
- struct crypto_aead *aead;
- struct crypto_sync_skcipher *null_tfm;
-
- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
- if (!tfm)
- return ERR_PTR(-ENOMEM);
-
- aead = crypto_alloc_aead(name, type, mask);
- if (IS_ERR(aead)) {
- kfree(tfm);
- return ERR_CAST(aead);
- }
-
- null_tfm = crypto_get_default_null_skcipher();
- if (IS_ERR(null_tfm)) {
- crypto_free_aead(aead);
- kfree(tfm);
- return ERR_CAST(null_tfm);
- }
-
- tfm->aead = aead;
- tfm->null_tfm = null_tfm;
-
- return tfm;
+ return crypto_alloc_aead(name, type, mask);
}
static void aead_release(void *private)
{
- struct aead_tfm *tfm = private;
-
- crypto_free_aead(tfm->aead);
- crypto_put_default_null_skcipher();
- kfree(tfm);
+ crypto_free_aead(private);
}
static int aead_setauthsize(void *private, unsigned int authsize)
{
- struct aead_tfm *tfm = private;
-
- return crypto_aead_setauthsize(tfm->aead, authsize);
+ return crypto_aead_setauthsize(private, authsize);
}
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
- struct aead_tfm *tfm = private;
-
- return crypto_aead_setkey(tfm->aead, key, keylen);
+ return crypto_aead_setkey(private, key, keylen);
}
static void aead_sock_destruct(struct sock *sk)
@@ -510,8 +447,7 @@ static void aead_sock_destruct(struct sock *sk)
struct af_alg_ctx *ctx = ask->private;
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_aead *tfm = pask->private;
unsigned int ivlen = crypto_aead_ivsize(tfm);
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
@@ -524,10 +460,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- struct aead_tfm *tfm = private;
- struct crypto_aead *aead = tfm->aead;
+ struct crypto_aead *tfm = private;
unsigned int len = sizeof(*ctx);
- unsigned int ivlen = crypto_aead_ivsize(aead);
+ unsigned int ivlen = crypto_aead_ivsize(tfm);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -554,9 +489,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
static int aead_accept_parent(void *private, struct sock *sk)
{
- struct aead_tfm *tfm = private;
+ struct crypto_aead *tfm = private;
- if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
+ if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return aead_accept_parent_nokey(private, sk);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 5498a87249d3..e3f1a4852737 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -265,10 +265,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock,
goto out_free_state;
err = crypto_ahash_import(&ctx2->req, state);
- if (err) {
- sock_orphan(sk2);
- sock_put(sk2);
- }
out_free_state:
kfree_sensitive(state);
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 64f57c4c4b06..153523ce6076 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -467,7 +467,7 @@ MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
module_param(dbg, int, 0);
MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
-subsys_initcall(prng_mod_init);
+module_init(prng_mod_init);
module_exit(prng_mod_fini);
MODULE_ALIAS_CRYPTO("stdrng");
MODULE_ALIAS_CRYPTO("ansi_cprng");
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 886e7c913688..4268c3833baa 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -694,7 +694,7 @@ static void __exit anubis_mod_fini(void)
crypto_unregister_alg(&anubis_alg);
}
-subsys_initcall(anubis_mod_init);
+module_init(anubis_mod_init);
module_exit(anubis_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/api.c b/crypto/api.c
index 3416e98128a0..5724d62e9d07 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -31,8 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
BLOCKING_NOTIFIER_HEAD(crypto_chain);
EXPORT_SYMBOL_GPL(crypto_chain);
-#if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && \
- !IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)
+#if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
#endif
@@ -220,10 +219,19 @@ again:
if (crypto_is_test_larval(larval))
crypto_larval_kill(larval);
alg = ERR_PTR(-ETIMEDOUT);
- } else if (!alg) {
+ } else if (!alg || PTR_ERR(alg) == -EEXIST) {
+ int err = alg ? -EEXIST : -EAGAIN;
+
+ /*
+ * EEXIST is expected because two probes can be scheduled
+ * at the same time with one using alg_name and the other
+ * using driver_name. Do a re-lookup but do not retry in
+ * case we hit a quirk like gcm_base(ctr(aes),...) which
+ * will never match.
+ */
alg = &larval->alg;
alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
- ERR_PTR(-EAGAIN);
+ ERR_PTR(err);
} else if (IS_ERR(alg))
;
else if (crypto_is_test_larval(larval) &&
@@ -528,6 +536,7 @@ void *crypto_create_tfm_node(struct crypto_alg *alg,
goto out;
tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
+ tfm->fb = tfm;
err = frontend->init_tfm(tfm);
if (err)
@@ -569,7 +578,7 @@ void *crypto_clone_tfm(const struct crypto_type *frontend,
tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
tfm->crt_flags = otfm->crt_flags;
- tfm->exit = otfm->exit;
+ tfm->fb = tfm;
out:
return mem;
@@ -707,11 +716,27 @@ void crypto_destroy_alg(struct crypto_alg *alg)
{
if (alg->cra_type && alg->cra_type->destroy)
alg->cra_type->destroy(alg);
-
if (alg->cra_destroy)
alg->cra_destroy(alg);
}
EXPORT_SYMBOL_GPL(crypto_destroy_alg);
+struct crypto_async_request *crypto_request_clone(
+ struct crypto_async_request *req, size_t total, gfp_t gfp)
+{
+ struct crypto_tfm *tfm = req->tfm;
+ struct crypto_async_request *nreq;
+
+ nreq = kmemdup(req, total, gfp);
+ if (!nreq) {
+ req->tfm = tfm->fb;
+ return req;
+ }
+
+ nreq->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
+ return nreq;
+}
+EXPORT_SYMBOL_GPL(crypto_request_clone);
+
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 1a4825c97c5a..1608018111d0 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -73,7 +73,7 @@ static void __exit arc4_exit(void)
crypto_unregister_lskcipher(&arc4_alg);
}
-subsys_initcall(arc4_init);
+module_init(arc4_init);
module_exit(arc4_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c
index bd359d3313c2..faa7900383f6 100644
--- a/crypto/aria_generic.c
+++ b/crypto/aria_generic.c
@@ -304,7 +304,7 @@ static void __exit aria_fini(void)
crypto_unregister_alg(&aria_alg);
}
-subsys_initcall(aria_init);
+module_init(aria_init);
module_exit(aria_fini);
MODULE_DESCRIPTION("ARIA Cipher Algorithm");
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index bf165d321440..e5b177c8e842 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -163,10 +163,8 @@ static u8 *pkey_pack_u32(u8 *dst, u32 val)
static int software_key_query(const struct kernel_pkey_params *params,
struct kernel_pkey_query *info)
{
- struct crypto_akcipher *tfm;
struct public_key *pkey = params->key->payload.data[asym_crypto];
char alg_name[CRYPTO_MAX_ALG_NAME];
- struct crypto_sig *sig;
u8 *key, *ptr;
int ret, len;
bool issig;
@@ -188,7 +186,11 @@ static int software_key_query(const struct kernel_pkey_params *params,
ptr = pkey_pack_u32(ptr, pkey->paramlen);
memcpy(ptr, pkey->params, pkey->paramlen);
+ memset(info, 0, sizeof(*info));
+
if (issig) {
+ struct crypto_sig *sig;
+
sig = crypto_alloc_sig(alg_name, 0, 0);
if (IS_ERR(sig)) {
ret = PTR_ERR(sig);
@@ -200,9 +202,10 @@ static int software_key_query(const struct kernel_pkey_params *params,
else
ret = crypto_sig_set_pubkey(sig, key, pkey->keylen);
if (ret < 0)
- goto error_free_tfm;
+ goto error_free_sig;
len = crypto_sig_keysize(sig);
+ info->key_size = len;
info->max_sig_size = crypto_sig_maxsize(sig);
info->max_data_size = crypto_sig_digestsize(sig);
@@ -211,11 +214,19 @@ static int software_key_query(const struct kernel_pkey_params *params,
info->supported_ops |= KEYCTL_SUPPORTS_SIGN;
if (strcmp(params->encoding, "pkcs1") == 0) {
+ info->max_enc_size = len / BITS_PER_BYTE;
+ info->max_dec_size = len / BITS_PER_BYTE;
+
info->supported_ops |= KEYCTL_SUPPORTS_ENCRYPT;
if (pkey->key_is_private)
info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT;
}
+
+error_free_sig:
+ crypto_free_sig(sig);
} else {
+ struct crypto_akcipher *tfm;
+
tfm = crypto_alloc_akcipher(alg_name, 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
@@ -227,28 +238,23 @@ static int software_key_query(const struct kernel_pkey_params *params,
else
ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
if (ret < 0)
- goto error_free_tfm;
+ goto error_free_akcipher;
len = crypto_akcipher_maxsize(tfm);
+ info->key_size = len * BITS_PER_BYTE;
info->max_sig_size = len;
info->max_data_size = len;
+ info->max_enc_size = len;
+ info->max_dec_size = len;
info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT;
if (pkey->key_is_private)
info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT;
- }
-
- info->key_size = len * 8;
- info->max_enc_size = len;
- info->max_dec_size = len;
-
- ret = 0;
-error_free_tfm:
- if (issig)
- crypto_free_sig(sig);
- else
+error_free_akcipher:
crypto_free_akcipher(tfm);
+ }
+
error_free_key:
kfree_sensitive(key);
pr_devel("<==%s() = %d\n", __func__, ret);
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index 2863984b6700..1f3b227ba7f2 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -40,13 +40,13 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
} while (0)
chkaddr(0, 0, sizeof(*mz));
- if (mz->magic != MZ_MAGIC)
+ if (mz->magic != IMAGE_DOS_SIGNATURE)
return -ELIBBAD;
cursor = sizeof(*mz);
chkaddr(cursor, mz->peaddr, sizeof(*pe));
pe = pebuf + mz->peaddr;
- if (pe->magic != PE_MAGIC)
+ if (pe->magic != IMAGE_NT_SIGNATURE)
return -ELIBBAD;
cursor = mz->peaddr + sizeof(*pe);
@@ -55,7 +55,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
pe64 = pebuf + cursor;
switch (pe32->magic) {
- case PE_OPT_MAGIC_PE32:
+ case IMAGE_NT_OPTIONAL_HDR32_MAGIC:
chkaddr(0, cursor, sizeof(*pe32));
ctx->image_checksum_offset =
(unsigned long)&pe32->csum - (unsigned long)pebuf;
@@ -64,7 +64,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
ctx->n_data_dirents = pe32->data_dirs;
break;
- case PE_OPT_MAGIC_PE32PLUS:
+ case IMAGE_NT_OPTIONAL_HDR64_MAGIC:
chkaddr(0, cursor, sizeof(*pe64));
ctx->image_checksum_offset =
(unsigned long)&pe64->csum - (unsigned long)pebuf;
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index ee2fdab42334..2ffe4ae90bea 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -372,10 +372,9 @@ static int x509_fabricate_name(struct x509_parse_context *ctx, size_t hdrlen,
/* Empty name string if no material */
if (!ctx->cn_size && !ctx->o_size && !ctx->email_size) {
- buffer = kmalloc(1, GFP_KERNEL);
+ buffer = kzalloc(1, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
- buffer[0] = 0;
goto done;
}
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 3aaf3ab4e360..a723769c8777 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -9,7 +9,6 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
-#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -28,7 +27,6 @@ struct authenc_instance_ctx {
struct crypto_authenc_ctx {
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
};
struct authenc_request_ctx {
@@ -170,21 +168,6 @@ out:
authenc_request_complete(areq, err);
}
-static int crypto_authenc_copy_assoc(struct aead_request *req)
-{
- struct crypto_aead *authenc = crypto_aead_reqtfm(req);
- struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
-
- skcipher_request_set_sync_tfm(skreq, ctx->null);
- skcipher_request_set_callback(skreq, aead_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
- NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-
static int crypto_authenc_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
@@ -203,10 +186,7 @@ static int crypto_authenc_encrypt(struct aead_request *req)
dst = src;
if (req->src != req->dst) {
- err = crypto_authenc_copy_assoc(req);
- if (err)
- return err;
-
+ memcpy_sglist(req->dst, req->src, req->assoclen);
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
}
@@ -303,7 +283,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
@@ -315,14 +294,8 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher();
- err = PTR_ERR(null);
- if (IS_ERR(null))
- goto err_free_skcipher;
-
ctx->auth = auth;
ctx->enc = enc;
- ctx->null = null;
crypto_aead_set_reqsize(
tfm,
@@ -336,8 +309,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
return 0;
-err_free_skcipher:
- crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@@ -349,7 +320,6 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher();
}
static void crypto_authenc_free(struct aead_instance *inst)
@@ -451,7 +421,7 @@ static void __exit crypto_authenc_module_exit(void)
crypto_unregister_template(&crypto_authenc_tmpl);
}
-subsys_initcall(crypto_authenc_module_init);
+module_init(crypto_authenc_module_init);
module_exit(crypto_authenc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 2cc933e2f790..d1bf0fda3f2e 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -12,7 +12,6 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
-#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -31,7 +30,6 @@ struct crypto_authenc_esn_ctx {
unsigned int reqoff;
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
};
struct authenc_esn_request_ctx {
@@ -158,20 +156,6 @@ static void crypto_authenc_esn_encrypt_done(void *data, int err)
authenc_esn_request_complete(areq, err);
}
-static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
-{
- struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
- struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
-
- skcipher_request_set_sync_tfm(skreq, ctx->null);
- skcipher_request_set_callback(skreq, aead_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-
static int crypto_authenc_esn_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
@@ -190,10 +174,7 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
dst = src;
if (req->src != req->dst) {
- err = crypto_authenc_esn_copy(req, assoclen);
- if (err)
- return err;
-
+ memcpy_sglist(req->dst, req->src, assoclen);
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
}
@@ -277,11 +258,8 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req)
cryptlen -= authsize;
- if (req->src != dst) {
- err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
- if (err)
- return err;
- }
+ if (req->src != dst)
+ memcpy_sglist(dst, req->src, assoclen + cryptlen);
scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
authsize, 0);
@@ -317,7 +295,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
@@ -329,14 +306,8 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher();
- err = PTR_ERR(null);
- if (IS_ERR(null))
- goto err_free_skcipher;
-
ctx->auth = auth;
ctx->enc = enc;
- ctx->null = null;
ctx->reqoff = 2 * crypto_ahash_digestsize(auth);
@@ -352,8 +323,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
return 0;
-err_free_skcipher:
- crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@@ -365,7 +334,6 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher();
}
static void crypto_authenc_esn_free(struct aead_instance *inst)
@@ -465,7 +433,7 @@ static void __exit crypto_authenc_esn_module_exit(void)
crypto_unregister_template(&crypto_authenc_esn_tmpl);
}
-subsys_initcall(crypto_authenc_esn_module_init);
+module_init(crypto_authenc_esn_module_init);
module_exit(crypto_authenc_esn_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c
index 04a712ddfb43..60f056217510 100644
--- a/crypto/blake2b_generic.c
+++ b/crypto/blake2b_generic.c
@@ -15,12 +15,12 @@
* More information about BLAKE2 can be found at https://blake2.net.
*/
-#include <linux/unaligned.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
#include <crypto/internal/blake2b.h>
#include <crypto/internal/hash.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
static const u8 blake2b_sigma[12][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
@@ -111,8 +111,8 @@ static void blake2b_compress_one_generic(struct blake2b_state *S,
#undef G
#undef ROUND
-void blake2b_compress_generic(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc)
+static void blake2b_compress_generic(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc)
{
do {
blake2b_increment_counter(state, inc);
@@ -120,17 +120,19 @@ void blake2b_compress_generic(struct blake2b_state *state,
block += BLAKE2B_BLOCK_SIZE;
} while (--nblocks);
}
-EXPORT_SYMBOL(blake2b_compress_generic);
static int crypto_blake2b_update_generic(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2b_update(desc, in, inlen, blake2b_compress_generic);
+ return crypto_blake2b_update_bo(desc, in, inlen,
+ blake2b_compress_generic);
}
-static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out)
+static int crypto_blake2b_finup_generic(struct shash_desc *desc, const u8 *in,
+ unsigned int inlen, u8 *out)
{
- return crypto_blake2b_final(desc, out, blake2b_compress_generic);
+ return crypto_blake2b_finup(desc, in, inlen, out,
+ blake2b_compress_generic);
}
#define BLAKE2B_ALG(name, driver_name, digest_size) \
@@ -138,7 +140,9 @@ static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out)
.base.cra_name = name, \
.base.cra_driver_name = driver_name, \
.base.cra_priority = 100, \
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY | \
+ CRYPTO_AHASH_ALG_BLOCK_ONLY | \
+ CRYPTO_AHASH_ALG_FINAL_NONZERO, \
.base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \
.base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \
.base.cra_module = THIS_MODULE, \
@@ -146,8 +150,9 @@ static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out)
.setkey = crypto_blake2b_setkey, \
.init = crypto_blake2b_init, \
.update = crypto_blake2b_update_generic, \
- .final = crypto_blake2b_final_generic, \
- .descsize = sizeof(struct blake2b_state), \
+ .finup = crypto_blake2b_finup_generic, \
+ .descsize = BLAKE2B_DESC_SIZE, \
+ .statesize = BLAKE2B_STATE_SIZE, \
}
static struct shash_alg blake2b_algs[] = {
@@ -171,7 +176,7 @@ static void __exit blake2b_mod_fini(void)
crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs));
}
-subsys_initcall(blake2b_mod_init);
+module_init(blake2b_mod_init);
module_exit(blake2b_mod_fini);
MODULE_AUTHOR("David Sterba <kdave@kernel.org>");
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 0146bc762c09..f3c5f9b09850 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -124,7 +124,7 @@ static void __exit blowfish_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(blowfish_mod_init);
+module_init(blowfish_mod_init);
module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 197fcf3abc89..ee4336a04b93 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -1064,7 +1064,7 @@ static void __exit camellia_fini(void)
crypto_unregister_alg(&camellia_alg);
}
-subsys_initcall(camellia_init);
+module_init(camellia_init);
module_exit(camellia_fini);
MODULE_DESCRIPTION("Camellia Cipher Algorithm");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index f3e57775fa02..f68330793e0c 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -531,7 +531,7 @@ static void __exit cast5_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(cast5_mod_init);
+module_init(cast5_mod_init);
module_exit(cast5_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index 11b725b12f27..4c08c42646f0 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -271,7 +271,7 @@ static void __exit cast6_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(cast6_mod_init);
+module_init(cast6_mod_init);
module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cbc.c b/crypto/cbc.c
index e81918ca68b7..ed3df6246765 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -179,7 +179,7 @@ static void __exit crypto_cbc_module_exit(void)
crypto_unregister_template(&crypto_cbc_tmpl);
}
-subsys_initcall(crypto_cbc_module_init);
+module_init(crypto_cbc_module_init);
module_exit(crypto_cbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 06476b53b491..2ae929ffdef8 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -10,11 +10,12 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
+#include <crypto/utils.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string.h>
struct ccm_instance_ctx {
struct crypto_skcipher_spawn ctr;
@@ -54,11 +55,6 @@ struct cbcmac_tfm_ctx {
struct crypto_cipher *child;
};
-struct cbcmac_desc_ctx {
- unsigned int len;
- u8 dg[];
-};
-
static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
struct aead_request *req)
{
@@ -783,12 +779,10 @@ static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent,
static int crypto_cbcmac_digest_init(struct shash_desc *pdesc)
{
- struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_digestsize(pdesc->tfm);
+ u8 *dg = shash_desc_ctx(pdesc);
- ctx->len = 0;
- memset(ctx->dg, 0, bs);
-
+ memset(dg, 0, bs);
return 0;
}
@@ -797,39 +791,34 @@ static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p,
{
struct crypto_shash *parent = pdesc->tfm;
struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_digestsize(parent);
-
- while (len > 0) {
- unsigned int l = min(len, bs - ctx->len);
-
- crypto_xor(&ctx->dg[ctx->len], p, l);
- ctx->len +=l;
- len -= l;
- p += l;
-
- if (ctx->len == bs) {
- crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg);
- ctx->len = 0;
- }
- }
-
- return 0;
+ u8 *dg = shash_desc_ctx(pdesc);
+
+ do {
+ crypto_xor(dg, p, bs);
+ crypto_cipher_encrypt_one(tfm, dg, dg);
+ p += bs;
+ len -= bs;
+ } while (len >= bs);
+ return len;
}
-static int crypto_cbcmac_digest_final(struct shash_desc *pdesc, u8 *out)
+static int crypto_cbcmac_digest_finup(struct shash_desc *pdesc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_digestsize(parent);
+ u8 *dg = shash_desc_ctx(pdesc);
- if (ctx->len)
- crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg);
-
- memcpy(out, ctx->dg, bs);
+ if (len) {
+ crypto_xor(dg, src, len);
+ crypto_cipher_encrypt_one(tfm, out, dg);
+ return 0;
+ }
+ memcpy(out, dg, bs);
return 0;
}
@@ -883,19 +872,19 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
goto err_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
- inst->alg.base.cra_blocksize = 1;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize = sizeof(struct cbcmac_desc_ctx) +
- alg->cra_blocksize;
+ inst->alg.descsize = alg->cra_blocksize;
+ inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY;
inst->alg.base.cra_ctxsize = sizeof(struct cbcmac_tfm_ctx);
inst->alg.base.cra_init = cbcmac_init_tfm;
inst->alg.base.cra_exit = cbcmac_exit_tfm;
inst->alg.init = crypto_cbcmac_digest_init;
inst->alg.update = crypto_cbcmac_digest_update;
- inst->alg.final = crypto_cbcmac_digest_final;
+ inst->alg.finup = crypto_cbcmac_digest_finup;
inst->alg.setkey = crypto_cbcmac_digest_setkey;
inst->free = shash_free_singlespawn_instance;
@@ -940,7 +929,7 @@ static void __exit crypto_ccm_module_exit(void)
ARRAY_SIZE(crypto_ccm_tmpls));
}
-subsys_initcall(crypto_ccm_module_init);
+module_init(crypto_ccm_module_init);
module_exit(crypto_ccm_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/chacha.c b/crypto/chacha.c
new file mode 100644
index 000000000000..c3a11f4e2d13
--- /dev/null
+++ b/crypto/chacha.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers
+ *
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2018 Google LLC
+ */
+
+#include <linux/unaligned.h>
+#include <crypto/algapi.h>
+#include <crypto/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/module.h>
+
+struct chacha_ctx {
+ u32 key[8];
+ int nrounds;
+};
+
+static int chacha_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize, int nrounds)
+{
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int i;
+
+ if (keysize != CHACHA_KEY_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
+
+ ctx->nrounds = nrounds;
+ return 0;
+}
+
+static int chacha20_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 20);
+}
+
+static int chacha12_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 12);
+}
+
+static int chacha_stream_xor(struct skcipher_request *req,
+ const struct chacha_ctx *ctx,
+ const u8 iv[CHACHA_IV_SIZE], bool arch)
+{
+ struct skcipher_walk walk;
+ struct chacha_state state;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ chacha_init(&state, ctx->key, iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
+
+ if (arch)
+ chacha_crypt(&state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes, ctx->nrounds);
+ else
+ chacha_crypt_generic(&state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes,
+ ctx->nrounds);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ return err;
+}
+
+static int crypto_chacha_crypt_generic(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return chacha_stream_xor(req, ctx, req->iv, false);
+}
+
+static int crypto_chacha_crypt_arch(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return chacha_stream_xor(req, ctx, req->iv, true);
+}
+
+static int crypto_xchacha_crypt(struct skcipher_request *req, bool arch)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha_ctx subctx;
+ struct chacha_state state;
+ u8 real_iv[16];
+
+ /* Compute the subkey given the original key and first 128 nonce bits */
+ chacha_init(&state, ctx->key, req->iv);
+ if (arch)
+ hchacha_block(&state, subctx.key, ctx->nrounds);
+ else
+ hchacha_block_generic(&state, subctx.key, ctx->nrounds);
+ subctx.nrounds = ctx->nrounds;
+
+ /* Build the real IV */
+ memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */
+ memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
+
+ /* Generate the stream and XOR it with the data */
+ return chacha_stream_xor(req, &subctx, real_iv, arch);
+}
+
+static int crypto_xchacha_crypt_generic(struct skcipher_request *req)
+{
+ return crypto_xchacha_crypt(req, false);
+}
+
+static int crypto_xchacha_crypt_arch(struct skcipher_request *req)
+{
+ return crypto_xchacha_crypt(req, true);
+}
+
+static struct skcipher_alg algs[] = {
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_chacha_crypt_generic,
+ .decrypt = crypto_chacha_crypt_generic,
+ },
+ {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_xchacha_crypt_generic,
+ .decrypt = crypto_xchacha_crypt_generic,
+ },
+ {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha12_setkey,
+ .encrypt = crypto_xchacha_crypt_generic,
+ .decrypt = crypto_xchacha_crypt_generic,
+ },
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_chacha_crypt_arch,
+ .decrypt = crypto_chacha_crypt_arch,
+ },
+ {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_xchacha_crypt_arch,
+ .decrypt = crypto_xchacha_crypt_arch,
+ },
+ {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha12_setkey,
+ .encrypt = crypto_xchacha_crypt_arch,
+ .decrypt = crypto_xchacha_crypt_arch,
+ }
+};
+
+static unsigned int num_algs;
+
+static int __init crypto_chacha_mod_init(void)
+{
+ /* register the arch flavours only if they differ from generic */
+ num_algs = ARRAY_SIZE(algs);
+ BUILD_BUG_ON(ARRAY_SIZE(algs) % 2 != 0);
+ if (!chacha_is_arch_optimized())
+ num_algs /= 2;
+
+ return crypto_register_skciphers(algs, num_algs);
+}
+
+static void __exit crypto_chacha_mod_fini(void)
+{
+ crypto_unregister_skciphers(algs, num_algs);
+}
+
+module_init(crypto_chacha_mod_init);
+module_exit(crypto_chacha_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-generic");
+MODULE_ALIAS_CRYPTO("chacha20-" __stringify(ARCH));
+MODULE_ALIAS_CRYPTO("xchacha20");
+MODULE_ALIAS_CRYPTO("xchacha20-generic");
+MODULE_ALIAS_CRYPTO("xchacha20-" __stringify(ARCH));
+MODULE_ALIAS_CRYPTO("xchacha12");
+MODULE_ALIAS_CRYPTO("xchacha12-generic");
+MODULE_ALIAS_CRYPTO("xchacha12-" __stringify(ARCH));
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index d740849f1c19..b4b5a7198d84 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -12,36 +12,23 @@
#include <crypto/chacha.h>
#include <crypto/poly1305.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/string.h>
struct chachapoly_instance_ctx {
struct crypto_skcipher_spawn chacha;
- struct crypto_ahash_spawn poly;
unsigned int saltlen;
};
struct chachapoly_ctx {
struct crypto_skcipher *chacha;
- struct crypto_ahash *poly;
/* key bytes we use for the ChaCha20 IV */
unsigned int saltlen;
u8 salt[] __counted_by(saltlen);
};
-struct poly_req {
- /* zero byte padding for AD/ciphertext, as needed */
- u8 pad[POLY1305_BLOCK_SIZE];
- /* tail data with AD/ciphertext lengths */
- struct {
- __le64 assoclen;
- __le64 cryptlen;
- } tail;
- struct scatterlist src[1];
- struct ahash_request req; /* must be last member */
-};
-
struct chacha_req {
u8 iv[CHACHA_IV_SIZE];
struct scatterlist src[1];
@@ -62,7 +49,6 @@ struct chachapoly_req_ctx {
/* request flags, with MAY_SLEEP cleared if needed */
u32 flags;
union {
- struct poly_req poly;
struct chacha_req chacha;
} u;
};
@@ -105,16 +91,6 @@ static int poly_verify_tag(struct aead_request *req)
return 0;
}
-static int poly_copy_tag(struct aead_request *req)
-{
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
-
- scatterwalk_map_and_copy(rctx->tag, req->dst,
- req->assoclen + rctx->cryptlen,
- sizeof(rctx->tag), 1);
- return 0;
-}
-
static void chacha_decrypt_done(void *data, int err)
{
async_done_continue(data, err, poly_verify_tag);
@@ -151,210 +127,76 @@ skip:
return poly_verify_tag(req);
}
-static int poly_tail_continue(struct aead_request *req)
-{
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
-
- if (rctx->cryptlen == req->cryptlen) /* encrypting */
- return poly_copy_tag(req);
-
- return chacha_decrypt(req);
-}
-
-static void poly_tail_done(void *data, int err)
-{
- async_done_continue(data, err, poly_tail_continue);
-}
-
-static int poly_tail(struct aead_request *req)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
-
- preq->tail.assoclen = cpu_to_le64(rctx->assoclen);
- preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen);
- sg_init_one(preq->src, &preq->tail, sizeof(preq->tail));
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_tail_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src,
- rctx->tag, sizeof(preq->tail));
-
- err = crypto_ahash_finup(&preq->req);
- if (err)
- return err;
-
- return poly_tail_continue(req);
-}
-
-static void poly_cipherpad_done(void *data, int err)
-{
- async_done_continue(data, err, poly_tail);
-}
-
-static int poly_cipherpad(struct aead_request *req)
+static int poly_hash(struct aead_request *req)
{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
+ const void *zp = page_address(ZERO_PAGE(0));
+ struct scatterlist *sg = req->src;
+ struct poly1305_desc_ctx desc;
+ struct scatter_walk walk;
+ struct {
+ union {
+ struct {
+ __le64 assoclen;
+ __le64 cryptlen;
+ };
+ u8 u8[16];
+ };
+ } tail;
unsigned int padlen;
- int err;
-
- padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE;
- memset(preq->pad, 0, sizeof(preq->pad));
- sg_init_one(preq->src, preq->pad, padlen);
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_cipherpad_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
+ unsigned int total;
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_tail(req);
-}
-
-static void poly_cipher_done(void *data, int err)
-{
- async_done_continue(data, err, poly_cipherpad);
-}
-
-static int poly_cipher(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- struct scatterlist *crypt = req->src;
- int err;
+ if (sg != req->dst)
+ memcpy_sglist(req->dst, sg, req->assoclen);
if (rctx->cryptlen == req->cryptlen) /* encrypting */
- crypt = req->dst;
-
- crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_cipher_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
+ sg = req->dst;
- return poly_cipherpad(req);
-}
+ poly1305_init(&desc, rctx->key);
+ scatterwalk_start(&walk, sg);
-static void poly_adpad_done(void *data, int err)
-{
- async_done_continue(data, err, poly_cipher);
-}
+ total = rctx->assoclen;
+ while (total) {
+ unsigned int n = scatterwalk_next(&walk, total);
-static int poly_adpad(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- unsigned int padlen;
- int err;
+ poly1305_update(&desc, walk.addr, n);
+ scatterwalk_done_src(&walk, n);
+ total -= n;
+ }
padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE;
- memset(preq->pad, 0, sizeof(preq->pad));
- sg_init_one(preq->src, preq->pad, padlen);
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_adpad_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_cipher(req);
-}
-
-static void poly_ad_done(void *data, int err)
-{
- async_done_continue(data, err, poly_adpad);
-}
-
-static int poly_ad(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_ad_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_adpad(req);
-}
-
-static void poly_setkey_done(void *data, int err)
-{
- async_done_continue(data, err, poly_ad);
-}
+ poly1305_update(&desc, zp, padlen);
-static int poly_setkey(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
+ scatterwalk_skip(&walk, req->assoclen - rctx->assoclen);
- sg_init_one(preq->src, rctx->key, sizeof(rctx->key));
+ total = rctx->cryptlen;
+ while (total) {
+ unsigned int n = scatterwalk_next(&walk, total);
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_setkey_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_ad(req);
-}
-
-static void poly_init_done(void *data, int err)
-{
- async_done_continue(data, err, poly_setkey);
-}
+ poly1305_update(&desc, walk.addr, n);
+ scatterwalk_done_src(&walk, n);
+ total -= n;
+ }
-static int poly_init(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
+ padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE;
+ poly1305_update(&desc, zp, padlen);
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_init_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
+ tail.assoclen = cpu_to_le64(rctx->assoclen);
+ tail.cryptlen = cpu_to_le64(rctx->cryptlen);
+ poly1305_update(&desc, tail.u8, sizeof(tail));
+ memzero_explicit(&tail, sizeof(tail));
+ poly1305_final(&desc, rctx->tag);
- err = crypto_ahash_init(&preq->req);
- if (err)
- return err;
+ if (rctx->cryptlen != req->cryptlen)
+ return chacha_decrypt(req);
- return poly_setkey(req);
+ memcpy_to_scatterwalk(&walk, rctx->tag, sizeof(rctx->tag));
+ return 0;
}
static void poly_genkey_done(void *data, int err)
{
- async_done_continue(data, err, poly_init);
+ async_done_continue(data, err, poly_hash);
}
static int poly_genkey(struct aead_request *req)
@@ -388,7 +230,7 @@ static int poly_genkey(struct aead_request *req)
if (err)
return err;
- return poly_init(req);
+ return poly_hash(req);
}
static void chacha_encrypt_done(void *data, int err)
@@ -437,14 +279,7 @@ static int chachapoly_encrypt(struct aead_request *req)
/* encrypt call chain:
* - chacha_encrypt/done()
* - poly_genkey/done()
- * - poly_init/done()
- * - poly_setkey/done()
- * - poly_ad/done()
- * - poly_adpad/done()
- * - poly_cipher/done()
- * - poly_cipherpad/done()
- * - poly_tail/done/continue()
- * - poly_copy_tag()
+ * - poly_hash()
*/
return chacha_encrypt(req);
}
@@ -458,13 +293,7 @@ static int chachapoly_decrypt(struct aead_request *req)
/* decrypt call chain:
* - poly_genkey/done()
- * - poly_init/done()
- * - poly_setkey/done()
- * - poly_ad/done()
- * - poly_adpad/done()
- * - poly_cipher/done()
- * - poly_cipherpad/done()
- * - poly_tail/done/continue()
+ * - poly_hash()
* - chacha_decrypt/done()
* - poly_verify_tag()
*/
@@ -503,21 +332,13 @@ static int chachapoly_init(struct crypto_aead *tfm)
struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_skcipher *chacha;
- struct crypto_ahash *poly;
unsigned long align;
- poly = crypto_spawn_ahash(&ictx->poly);
- if (IS_ERR(poly))
- return PTR_ERR(poly);
-
chacha = crypto_spawn_skcipher(&ictx->chacha);
- if (IS_ERR(chacha)) {
- crypto_free_ahash(poly);
+ if (IS_ERR(chacha))
return PTR_ERR(chacha);
- }
ctx->chacha = chacha;
- ctx->poly = poly;
ctx->saltlen = ictx->saltlen;
align = crypto_aead_alignmask(tfm);
@@ -525,12 +346,9 @@ static int chachapoly_init(struct crypto_aead *tfm)
crypto_aead_set_reqsize(
tfm,
align + offsetof(struct chachapoly_req_ctx, u) +
- max(offsetof(struct chacha_req, req) +
- sizeof(struct skcipher_request) +
- crypto_skcipher_reqsize(chacha),
- offsetof(struct poly_req, req) +
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(poly)));
+ offsetof(struct chacha_req, req) +
+ sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(chacha));
return 0;
}
@@ -539,7 +357,6 @@ static void chachapoly_exit(struct crypto_aead *tfm)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
- crypto_free_ahash(ctx->poly);
crypto_free_skcipher(ctx->chacha);
}
@@ -548,7 +365,6 @@ static void chachapoly_free(struct aead_instance *inst)
struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_skcipher(&ctx->chacha);
- crypto_drop_ahash(&ctx->poly);
kfree(inst);
}
@@ -559,7 +375,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
struct aead_instance *inst;
struct chachapoly_instance_ctx *ctx;
struct skcipher_alg_common *chacha;
- struct hash_alg_common *poly;
int err;
if (ivsize > CHACHAPOLY_IV_SIZE)
@@ -581,14 +396,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
goto err_free_inst;
chacha = crypto_spawn_skcipher_alg_common(&ctx->chacha);
- err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst),
- crypto_attr_alg_name(tb[2]), 0, mask);
- if (err)
- goto err_free_inst;
- poly = crypto_spawn_ahash_alg(&ctx->poly);
-
err = -EINVAL;
- if (poly->digestsize != POLY1305_DIGEST_SIZE)
+ if (strcmp(crypto_attr_alg_name(tb[2]), "poly1305") &&
+ strcmp(crypto_attr_alg_name(tb[2]), "poly1305-generic"))
goto err_free_inst;
/* Need 16-byte IV size, including Initial Block Counter value */
if (chacha->ivsize != CHACHA_IV_SIZE)
@@ -599,16 +409,15 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s,%s)", name, chacha->base.cra_name,
- poly->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
+ "%s(%s,poly1305)", name,
+ chacha->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s,%s)", name, chacha->base.cra_driver_name,
- poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ "%s(%s,poly1305-generic)", name,
+ chacha->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_priority = (chacha->base.cra_priority +
- poly->base.cra_priority) / 2;
+ inst->alg.base.cra_priority = chacha->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = chacha->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
@@ -667,7 +476,7 @@ static void __exit chacha20poly1305_module_exit(void)
ARRAY_SIZE(rfc7539_tmpls));
}
-subsys_initcall(chacha20poly1305_module_init);
+module_init(chacha20poly1305_module_init);
module_exit(chacha20poly1305_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c
deleted file mode 100644
index 1fb9fbd302c6..000000000000
--- a/crypto/chacha_generic.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539)
- *
- * Copyright (C) 2015 Martin Willi
- * Copyright (C) 2018 Google LLC
- */
-
-#include <linux/unaligned.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/module.h>
-
-static int chacha_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
-{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
-
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes, ctx->nrounds);
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int crypto_chacha_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_stream_xor(req, ctx, req->iv);
-}
-
-static int crypto_xchacha_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- /* Compute the subkey given the original key and first 128 nonce bits */
- chacha_init(state, ctx->key, req->iv);
- hchacha_block_generic(state, subctx.key, ctx->nrounds);
- subctx.nrounds = ctx->nrounds;
-
- /* Build the real IV */
- memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */
- memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
-
- /* Generate the stream and XOR it with the data */
- return chacha_stream_xor(req, &subctx, real_iv);
-}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = crypto_chacha_crypt,
- .decrypt = crypto_chacha_crypt,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = crypto_xchacha_crypt,
- .decrypt = crypto_xchacha_crypt,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = crypto_xchacha_crypt,
- .decrypt = crypto_xchacha_crypt,
- }
-};
-
-static int __init chacha_generic_mod_init(void)
-{
- return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit chacha_generic_mod_fini(void)
-{
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-subsys_initcall(chacha_generic_mod_init);
-module_exit(chacha_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (generic)");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-generic");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-generic");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-generic");
diff --git a/crypto/cmac.c b/crypto/cmac.c
index c66a0f4d8808..1b03964abe00 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -13,9 +13,12 @@
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
+#include <crypto/utils.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
/*
* +------------------------
@@ -31,22 +34,6 @@ struct cmac_tfm_ctx {
__be64 consts[];
};
-/*
- * +------------------------
- * | <shash desc>
- * +------------------------
- * | cmac_desc_ctx
- * +------------------------
- * | odds (block size)
- * +------------------------
- * | prev (block size)
- * +------------------------
- */
-struct cmac_desc_ctx {
- unsigned int len;
- u8 odds[];
-};
-
static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
@@ -102,13 +89,10 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
static int crypto_cmac_digest_init(struct shash_desc *pdesc)
{
- struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_blocksize(pdesc->tfm);
- u8 *prev = &ctx->odds[bs];
+ u8 *prev = shash_desc_ctx(pdesc);
- ctx->len = 0;
memset(prev, 0, bs);
-
return 0;
}
@@ -117,77 +101,36 @@ static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p,
{
struct crypto_shash *parent = pdesc->tfm;
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = ctx->odds;
- u8 *prev = odds + bs;
-
- /* checking the data can fill the block */
- if ((ctx->len + len) <= bs) {
- memcpy(odds + ctx->len, p, len);
- ctx->len += len;
- return 0;
- }
-
- /* filling odds with new data and encrypting it */
- memcpy(odds + ctx->len, p, bs - ctx->len);
- len -= bs - ctx->len;
- p += bs - ctx->len;
-
- crypto_xor(prev, odds, bs);
- crypto_cipher_encrypt_one(tfm, prev, prev);
+ u8 *prev = shash_desc_ctx(pdesc);
- /* clearing the length */
- ctx->len = 0;
-
- /* encrypting the rest of data */
- while (len > bs) {
+ do {
crypto_xor(prev, p, bs);
crypto_cipher_encrypt_one(tfm, prev, prev);
p += bs;
len -= bs;
- }
-
- /* keeping the surplus of blocksize */
- if (len) {
- memcpy(odds, p, len);
- ctx->len = len;
- }
-
- return 0;
+ } while (len >= bs);
+ return len;
}
-static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out)
+static int crypto_cmac_digest_finup(struct shash_desc *pdesc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = ctx->odds;
- u8 *prev = odds + bs;
+ u8 *prev = shash_desc_ctx(pdesc);
unsigned int offset = 0;
- if (ctx->len != bs) {
- unsigned int rlen;
- u8 *p = odds + ctx->len;
-
- *p = 0x80;
- p++;
-
- rlen = bs - ctx->len - 1;
- if (rlen)
- memset(p, 0, rlen);
-
+ crypto_xor(prev, src, len);
+ if (len != bs) {
+ prev[len] ^= 0x80;
offset += bs;
}
-
- crypto_xor(prev, odds, bs);
crypto_xor(prev, (const u8 *)tctx->consts + offset, bs);
-
crypto_cipher_encrypt_one(tfm, out, prev);
-
return 0;
}
@@ -269,13 +212,14 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_ctxsize = sizeof(struct cmac_tfm_ctx) +
alg->cra_blocksize * 2;
+ inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize = sizeof(struct cmac_desc_ctx) +
- alg->cra_blocksize * 2;
+ inst->alg.descsize = alg->cra_blocksize;
inst->alg.init = crypto_cmac_digest_init;
inst->alg.update = crypto_cmac_digest_update;
- inst->alg.final = crypto_cmac_digest_final;
+ inst->alg.finup = crypto_cmac_digest_finup;
inst->alg.setkey = crypto_cmac_digest_setkey;
inst->alg.init_tfm = cmac_init_tfm;
inst->alg.clone_tfm = cmac_clone_tfm;
@@ -307,7 +251,7 @@ static void __exit crypto_cmac_module_exit(void)
crypto_unregister_template(&crypto_cmac_tmpl);
}
-subsys_initcall(crypto_cmac_module_init);
+module_init(crypto_cmac_module_init);
module_exit(crypto_cmac_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/crc32_generic.c b/crypto/crc32.c
index 783a30b27398..cc371d42601f 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32.c
@@ -172,7 +172,7 @@ static void __exit crc32_mod_fini(void)
crypto_unregister_shashes(algs, num_algs);
}
-subsys_initcall(crc32_mod_init);
+module_init(crc32_mod_init);
module_exit(crc32_mod_fini);
MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c.c
index b1a36d32dc50..e5377898414a 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c.c
@@ -212,7 +212,7 @@ static void __exit crc32c_mod_fini(void)
crypto_unregister_shashes(algs, num_algs);
}
-subsys_initcall(crc32c_mod_init);
+module_init(crc32c_mod_init);
module_exit(crc32c_mod_fini);
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 31d022d47f7a..5bb6f8d88cc2 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -1138,7 +1138,7 @@ static void __exit cryptd_exit(void)
crypto_unregister_template(&cryptd_tmpl);
}
-subsys_initcall(cryptd_init);
+module_init(cryptd_init);
module_exit(cryptd_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index c7c16da5e649..445d3c113ee1 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -23,9 +23,6 @@
#define CRYPTO_ENGINE_MAX_QLEN 10
-/* Temporary algorithm flag used to indicate an updated driver. */
-#define CRYPTO_ALG_ENGINE 0x200
-
struct crypto_engine_alg {
struct crypto_alg base;
struct crypto_engine_op op;
@@ -148,16 +145,9 @@ start_request:
}
}
- if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) {
- alg = container_of(async_req->tfm->__crt_alg,
- struct crypto_engine_alg, base);
- op = &alg->op;
- } else {
- dev_err(engine->dev, "failed to do request\n");
- ret = -EINVAL;
- goto req_err_1;
- }
-
+ alg = container_of(async_req->tfm->__crt_alg,
+ struct crypto_engine_alg, base);
+ op = &alg->op;
ret = op->do_one_request(engine, async_req);
/* Request unsuccessfully executed by hardware */
@@ -569,9 +559,6 @@ int crypto_engine_register_aead(struct aead_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_aead(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
@@ -614,9 +601,6 @@ int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_ahash(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
@@ -660,9 +644,6 @@ int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_akcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
@@ -677,9 +658,6 @@ int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_kpp(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
@@ -694,9 +672,6 @@ int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_skcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index ced90f88ee07..34588f39fdfc 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -15,15 +15,11 @@
#include <crypto/null.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/spinlock.h>
#include <linux/string.h>
-static DEFINE_SPINLOCK(crypto_default_null_skcipher_lock);
-static struct crypto_sync_skcipher *crypto_default_null_skcipher;
-static int crypto_default_null_skcipher_refcnt;
-
static int null_init(struct shash_desc *desc)
{
return 0;
@@ -65,19 +61,9 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
static int null_skcipher_crypt(struct skcipher_request *req)
{
- struct skcipher_walk walk;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes) {
- if (walk.src.virt.addr != walk.dst.virt.addr)
- memcpy(walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes);
- err = skcipher_walk_done(&walk, 0);
- }
-
- return err;
+ if (req->src != req->dst)
+ memcpy_sglist(req->dst, req->src, req->cryptlen);
+ return 0;
}
static struct shash_alg digest_null = {
@@ -129,54 +115,6 @@ static struct crypto_alg cipher_null = {
MODULE_ALIAS_CRYPTO("digest_null");
MODULE_ALIAS_CRYPTO("cipher_null");
-struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
-{
- struct crypto_sync_skcipher *ntfm = NULL;
- struct crypto_sync_skcipher *tfm;
-
- spin_lock_bh(&crypto_default_null_skcipher_lock);
- tfm = crypto_default_null_skcipher;
-
- if (!tfm) {
- spin_unlock_bh(&crypto_default_null_skcipher_lock);
-
- ntfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
- if (IS_ERR(ntfm))
- return ntfm;
-
- spin_lock_bh(&crypto_default_null_skcipher_lock);
- tfm = crypto_default_null_skcipher;
- if (!tfm) {
- tfm = ntfm;
- ntfm = NULL;
- crypto_default_null_skcipher = tfm;
- }
- }
-
- crypto_default_null_skcipher_refcnt++;
- spin_unlock_bh(&crypto_default_null_skcipher_lock);
-
- crypto_free_sync_skcipher(ntfm);
-
- return tfm;
-}
-EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher);
-
-void crypto_put_default_null_skcipher(void)
-{
- struct crypto_sync_skcipher *tfm = NULL;
-
- spin_lock_bh(&crypto_default_null_skcipher_lock);
- if (!--crypto_default_null_skcipher_refcnt) {
- tfm = crypto_default_null_skcipher;
- crypto_default_null_skcipher = NULL;
- }
- spin_unlock_bh(&crypto_default_null_skcipher_lock);
-
- crypto_free_sync_skcipher(tfm);
-}
-EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher);
-
static int __init crypto_null_mod_init(void)
{
int ret = 0;
@@ -210,7 +148,7 @@ static void __exit crypto_null_mod_fini(void)
crypto_unregister_skcipher(&skcipher_null);
}
-subsys_initcall(crypto_null_mod_init);
+module_init(crypto_null_mod_init);
module_exit(crypto_null_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 97a947b0a876..a388f0ceb3a0 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -350,7 +350,7 @@ static void __exit crypto_ctr_module_exit(void)
ARRAY_SIZE(crypto_ctr_tmpls));
}
-subsys_initcall(crypto_ctr_module_init);
+module_init(crypto_ctr_module_init);
module_exit(crypto_ctr_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/cts.c b/crypto/cts.c
index f5b42156b6c7..48898d5e24ff 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -402,7 +402,7 @@ static void __exit crypto_cts_module_exit(void)
crypto_unregister_template(&crypto_cts_tmpl);
}
-subsys_initcall(crypto_cts_module_init);
+module_init(crypto_cts_module_init);
module_exit(crypto_cts_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/curve25519-generic.c b/crypto/curve25519-generic.c
index 68a673262e04..f3e56e73c66c 100644
--- a/crypto/curve25519-generic.c
+++ b/crypto/curve25519-generic.c
@@ -82,7 +82,7 @@ static void __exit curve25519_exit(void)
crypto_unregister_kpp(&curve25519_alg);
}
-subsys_initcall(curve25519_init);
+module_init(curve25519_init);
module_exit(curve25519_exit);
MODULE_ALIAS_CRYPTO("curve25519");
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 5c346c544093..fe8e4ad0fee1 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -6,253 +6,250 @@
* by IPCOMP (RFC 3173 & RFC 2394).
*
* Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
- *
- * FIXME: deflate transforms will require up to a total of about 436k of kernel
- * memory on i386 (390k for compression, the rest for decompression), as the
- * current zlib kernel code uses a worst case pre-allocation system by default.
- * This needs to be fixed so that the amount of memory required is properly
- * related to the winbits and memlevel parameters.
- *
- * The default winbits of 11 should suit most packets, and it may be something
- * to configure on a per-tfm basis in the future.
- *
- * Currently, compression history is not maintained between tfm calls, as
- * it is not needed for IPCOMP and keeps the code simpler. It can be
- * implemented if someone wants it.
+ * Copyright (c) 2023 Google, LLC. <ardb@kernel.org>
+ * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au>
*/
+#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/crypto.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/zlib.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <crypto/internal/scompress.h>
#define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION
#define DEFLATE_DEF_WINBITS 11
#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
-struct deflate_ctx {
- struct z_stream_s comp_stream;
- struct z_stream_s decomp_stream;
+struct deflate_stream {
+ struct z_stream_s stream;
+ u8 workspace[];
};
-static int deflate_comp_init(struct deflate_ctx *ctx)
-{
- int ret = 0;
- struct z_stream_s *stream = &ctx->comp_stream;
-
- stream->workspace = vzalloc(zlib_deflate_workspacesize(
- -DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL));
- if (!stream->workspace) {
- ret = -ENOMEM;
- goto out;
- }
- ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
- -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
- Z_DEFAULT_STRATEGY);
- if (ret != Z_OK) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(stream->workspace);
- goto out;
-}
+static DEFINE_MUTEX(deflate_stream_lock);
-static int deflate_decomp_init(struct deflate_ctx *ctx)
+static void *deflate_alloc_stream(void)
{
- int ret = 0;
- struct z_stream_s *stream = &ctx->decomp_stream;
+ size_t size = max(zlib_inflate_workspacesize(),
+ zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS,
+ DEFLATE_DEF_MEMLEVEL));
+ struct deflate_stream *ctx;
- stream->workspace = vzalloc(zlib_inflate_workspacesize());
- if (!stream->workspace) {
- ret = -ENOMEM;
- goto out;
- }
- ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS);
- if (ret != Z_OK) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(stream->workspace);
- goto out;
-}
+ ctx = kvmalloc(sizeof(*ctx) + size, GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
-static void deflate_comp_exit(struct deflate_ctx *ctx)
-{
- zlib_deflateEnd(&ctx->comp_stream);
- vfree(ctx->comp_stream.workspace);
-}
+ ctx->stream.workspace = ctx->workspace;
-static void deflate_decomp_exit(struct deflate_ctx *ctx)
-{
- zlib_inflateEnd(&ctx->decomp_stream);
- vfree(ctx->decomp_stream.workspace);
+ return ctx;
}
-static int __deflate_init(void *ctx)
+static struct crypto_acomp_streams deflate_streams = {
+ .alloc_ctx = deflate_alloc_stream,
+ .cfree_ctx = kvfree,
+};
+
+static int deflate_compress_one(struct acomp_req *req,
+ struct deflate_stream *ds)
{
+ struct z_stream_s *stream = &ds->stream;
+ struct acomp_walk walk;
int ret;
- ret = deflate_comp_init(ctx);
+ ret = acomp_walk_virt(&walk, req, true);
if (ret)
- goto out;
- ret = deflate_decomp_init(ctx);
- if (ret)
- deflate_comp_exit(ctx);
-out:
- return ret;
-}
+ return ret;
-static void *deflate_alloc_ctx(void)
-{
- struct deflate_ctx *ctx;
- int ret;
+ do {
+ unsigned int dcur;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur)
+ return -ENOSPC;
- ret = __deflate_init(ctx);
- if (ret) {
- kfree(ctx);
- return ERR_PTR(ret);
- }
+ stream->avail_out = dcur;
+ stream->next_out = walk.dst.virt.addr;
- return ctx;
-}
+ do {
+ int flush = Z_FINISH;
+ unsigned int scur;
-static void __deflate_exit(void *ctx)
-{
- deflate_comp_exit(ctx);
- deflate_decomp_exit(ctx);
-}
+ stream->avail_in = 0;
+ stream->next_in = NULL;
-static void deflate_free_ctx(void *ctx)
-{
- __deflate_exit(ctx);
- kfree_sensitive(ctx);
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ if (acomp_walk_more_src(&walk, scur))
+ flush = Z_NO_FLUSH;
+ stream->avail_in = scur;
+ stream->next_in = walk.src.virt.addr;
+ }
+
+ ret = zlib_deflate(stream, flush);
+
+ if (scur) {
+ scur -= stream->avail_in;
+ acomp_walk_done_src(&walk, scur);
+ }
+ } while (ret == Z_OK && stream->avail_out);
+
+ acomp_walk_done_dst(&walk, dcur);
+ } while (ret == Z_OK);
+
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dlen = stream->total_out;
+ return 0;
}
-static int __deflate_compress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
+static int deflate_compress(struct acomp_req *req)
{
- int ret = 0;
- struct deflate_ctx *dctx = ctx;
- struct z_stream_s *stream = &dctx->comp_stream;
+ struct crypto_acomp_stream *s;
+ struct deflate_stream *ds;
+ int err;
+
+ s = crypto_acomp_lock_stream_bh(&deflate_streams);
+ ds = s->ctx;
- ret = zlib_deflateReset(stream);
- if (ret != Z_OK) {
- ret = -EINVAL;
+ err = zlib_deflateInit2(&ds->stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
+ -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ Z_DEFAULT_STRATEGY);
+ if (err != Z_OK) {
+ err = -EINVAL;
goto out;
}
- stream->next_in = (u8 *)src;
- stream->avail_in = slen;
- stream->next_out = (u8 *)dst;
- stream->avail_out = *dlen;
+ err = deflate_compress_one(req, ds);
- ret = zlib_deflate(stream, Z_FINISH);
- if (ret != Z_STREAM_END) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *dlen = stream->total_out;
out:
- return ret;
+ crypto_acomp_unlock_stream_bh(s);
+
+ return err;
}
-static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
+static int deflate_decompress_one(struct acomp_req *req,
+ struct deflate_stream *ds)
{
- return __deflate_compress(src, slen, dst, dlen, ctx);
+ struct z_stream_s *stream = &ds->stream;
+ bool out_of_space = false;
+ struct acomp_walk walk;
+ int ret;
+
+ ret = acomp_walk_virt(&walk, req, true);
+ if (ret)
+ return ret;
+
+ do {
+ unsigned int scur;
+
+ stream->avail_in = 0;
+ stream->next_in = NULL;
+
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ stream->avail_in = scur;
+ stream->next_in = walk.src.virt.addr;
+ }
+
+ do {
+ unsigned int dcur;
+
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur) {
+ out_of_space = true;
+ break;
+ }
+
+ stream->avail_out = dcur;
+ stream->next_out = walk.dst.virt.addr;
+
+ ret = zlib_inflate(stream, Z_NO_FLUSH);
+
+ dcur -= stream->avail_out;
+ acomp_walk_done_dst(&walk, dcur);
+ } while (ret == Z_OK && stream->avail_in);
+
+ if (scur)
+ acomp_walk_done_src(&walk, scur);
+
+ if (out_of_space)
+ return -ENOSPC;
+ } while (ret == Z_OK);
+
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dlen = stream->total_out;
+ return 0;
}
-static int __deflate_decompress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
+static int deflate_decompress(struct acomp_req *req)
{
+ struct crypto_acomp_stream *s;
+ struct deflate_stream *ds;
+ int err;
- int ret = 0;
- struct deflate_ctx *dctx = ctx;
- struct z_stream_s *stream = &dctx->decomp_stream;
+ s = crypto_acomp_lock_stream_bh(&deflate_streams);
+ ds = s->ctx;
- ret = zlib_inflateReset(stream);
- if (ret != Z_OK) {
- ret = -EINVAL;
+ err = zlib_inflateInit2(&ds->stream, -DEFLATE_DEF_WINBITS);
+ if (err != Z_OK) {
+ err = -EINVAL;
goto out;
}
- stream->next_in = (u8 *)src;
- stream->avail_in = slen;
- stream->next_out = (u8 *)dst;
- stream->avail_out = *dlen;
-
- ret = zlib_inflate(stream, Z_SYNC_FLUSH);
- /*
- * Work around a bug in zlib, which sometimes wants to taste an extra
- * byte when being used in the (undocumented) raw deflate mode.
- * (From USAGI).
- */
- if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
- u8 zerostuff = 0;
- stream->next_in = &zerostuff;
- stream->avail_in = 1;
- ret = zlib_inflate(stream, Z_FINISH);
- }
- if (ret != Z_STREAM_END) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *dlen = stream->total_out;
+ err = deflate_decompress_one(req, ds);
+
out:
- return ret;
+ crypto_acomp_unlock_stream_bh(s);
+
+ return err;
}
-static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
+static int deflate_init(struct crypto_acomp *tfm)
{
- return __deflate_decompress(src, slen, dst, dlen, ctx);
+ int ret;
+
+ mutex_lock(&deflate_stream_lock);
+ ret = crypto_acomp_alloc_streams(&deflate_streams);
+ mutex_unlock(&deflate_stream_lock);
+
+ return ret;
}
-static struct scomp_alg scomp = {
- .alloc_ctx = deflate_alloc_ctx,
- .free_ctx = deflate_free_ctx,
- .compress = deflate_scompress,
- .decompress = deflate_sdecompress,
- .base = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-scomp",
- .cra_module = THIS_MODULE,
- }
+static struct acomp_alg acomp = {
+ .compress = deflate_compress,
+ .decompress = deflate_decompress,
+ .init = deflate_init,
+ .base.cra_name = "deflate",
+ .base.cra_driver_name = "deflate-generic",
+ .base.cra_flags = CRYPTO_ALG_REQ_VIRT,
+ .base.cra_module = THIS_MODULE,
};
static int __init deflate_mod_init(void)
{
- return crypto_register_scomp(&scomp);
+ return crypto_register_acomp(&acomp);
}
static void __exit deflate_mod_fini(void)
{
- crypto_unregister_scomp(&scomp);
+ crypto_unregister_acomp(&acomp);
+ crypto_acomp_free_streams(&deflate_streams);
}
-subsys_initcall(deflate_mod_init);
+module_init(deflate_mod_init);
module_exit(deflate_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
+MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
+MODULE_AUTHOR("Herbert Xu <herbert@gondor.apana.org.au>");
MODULE_ALIAS_CRYPTO("deflate");
MODULE_ALIAS_CRYPTO("deflate-generic");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 1274e18d3eb9..fce341400914 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -122,7 +122,7 @@ static void __exit des_generic_mod_fini(void)
crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs));
}
-subsys_initcall(des_generic_mod_init);
+module_init(des_generic_mod_init);
module_exit(des_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/dh.c b/crypto/dh.c
index afc0fd847761..8250eeeebd0f 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -920,7 +920,7 @@ static void __exit dh_exit(void)
crypto_unregister_kpp(&dh);
}
-subsys_initcall(dh_init);
+module_init(dh_init);
module_exit(dh_exit);
MODULE_ALIAS_CRYPTO("dh");
MODULE_LICENSE("GPL");
diff --git a/crypto/drbg.c b/crypto/drbg.c
index f28dfc2511a2..dbe4c8bb5ceb 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -2132,7 +2132,7 @@ static void __exit drbg_exit(void)
crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
}
-subsys_initcall(drbg_init);
+module_init(drbg_init);
module_exit(drbg_exit);
#ifndef CRYPTO_DRBG_HASH_STRING
#define CRYPTO_DRBG_HASH_STRING ""
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 95d7e972865a..cd1b20456dad 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -219,7 +219,7 @@ static void __exit crypto_ecb_module_exit(void)
crypto_unregister_template(&crypto_ecb_tmpl);
}
-subsys_initcall(crypto_ecb_module_init);
+module_init(crypto_ecb_module_init);
module_exit(crypto_ecb_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 72cfd1590156..9f0b93b3166d 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -240,7 +240,7 @@ static void __exit ecdh_exit(void)
crypto_unregister_kpp(&ecdh_nist_p384);
}
-subsys_initcall(ecdh_init);
+module_init(ecdh_init);
module_exit(ecdh_exit);
MODULE_ALIAS_CRYPTO("ecdh");
MODULE_LICENSE("GPL");
diff --git a/crypto/ecdsa-p1363.c b/crypto/ecdsa-p1363.c
index 4454f1f8f33f..e0c55c64711c 100644
--- a/crypto/ecdsa-p1363.c
+++ b/crypto/ecdsa-p1363.c
@@ -21,7 +21,8 @@ static int ecdsa_p1363_verify(struct crypto_sig *tfm,
const void *digest, unsigned int dlen)
{
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
- unsigned int keylen = crypto_sig_keysize(ctx->child);
+ unsigned int keylen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ BITS_PER_BYTE);
unsigned int ndigits = DIV_ROUND_UP_POW2(keylen, sizeof(u64));
struct ecdsa_raw_sig sig;
@@ -45,7 +46,8 @@ static unsigned int ecdsa_p1363_max_size(struct crypto_sig *tfm)
{
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
- return 2 * crypto_sig_keysize(ctx->child);
+ return 2 * DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ BITS_PER_BYTE);
}
static unsigned int ecdsa_p1363_digest_size(struct crypto_sig *tfm)
diff --git a/crypto/ecdsa-x962.c b/crypto/ecdsa-x962.c
index 90a04f4b9a2f..ee71594d10a0 100644
--- a/crypto/ecdsa-x962.c
+++ b/crypto/ecdsa-x962.c
@@ -82,7 +82,7 @@ static int ecdsa_x962_verify(struct crypto_sig *tfm,
int err;
sig_ctx.ndigits = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
- sizeof(u64));
+ sizeof(u64) * BITS_PER_BYTE);
err = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, src, slen);
if (err < 0)
@@ -103,7 +103,8 @@ static unsigned int ecdsa_x962_max_size(struct crypto_sig *tfm)
{
struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
struct sig_alg *alg = crypto_sig_alg(ctx->child);
- int slen = crypto_sig_keysize(ctx->child);
+ int slen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ BITS_PER_BYTE);
/*
* Verify takes ECDSA-Sig-Value (described in RFC 5480) as input,
diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
index 117526d15dde..ce8e4364842f 100644
--- a/crypto/ecdsa.c
+++ b/crypto/ecdsa.c
@@ -167,7 +167,7 @@ static unsigned int ecdsa_key_size(struct crypto_sig *tfm)
{
struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
- return DIV_ROUND_UP(ctx->curve->nbits, 8);
+ return ctx->curve->nbits;
}
static unsigned int ecdsa_digest_size(struct crypto_sig *tfm)
@@ -334,7 +334,7 @@ static void __exit ecdsa_exit(void)
crypto_unregister_sig(&ecdsa_nist_p521);
}
-subsys_initcall(ecdsa_init);
+module_init(ecdsa_init);
module_exit(ecdsa_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 69686668625e..e0a2d3209938 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -32,7 +32,6 @@ static int echainiv_encrypt(struct aead_request *req)
u64 seqno;
u8 *info;
unsigned int ivsize = crypto_aead_ivsize(geniv);
- int err;
if (req->cryptlen < ivsize)
return -EINVAL;
@@ -41,20 +40,9 @@ static int echainiv_encrypt(struct aead_request *req)
info = req->iv;
- if (req->src != req->dst) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
-
- skcipher_request_set_sync_tfm(nreq, ctx->sknull);
- skcipher_request_set_callback(nreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(nreq, req->src, req->dst,
- req->assoclen + req->cryptlen,
- NULL);
-
- err = crypto_skcipher_encrypt(nreq);
- if (err)
- return err;
- }
+ if (req->src != req->dst)
+ memcpy_sglist(req->dst, req->src,
+ req->assoclen + req->cryptlen);
aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
@@ -157,7 +145,7 @@ static void __exit echainiv_module_exit(void)
crypto_unregister_template(&echainiv_tmpl);
}
-subsys_initcall(echainiv_module_init);
+module_init(echainiv_module_init);
module_exit(echainiv_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
index b3dd8a3ddeb7..2c0602f0cd40 100644
--- a/crypto/ecrdsa.c
+++ b/crypto/ecrdsa.c
@@ -249,7 +249,7 @@ static unsigned int ecrdsa_key_size(struct crypto_sig *tfm)
* Verify doesn't need any output, so it's just informational
* for keyctl to determine the key bit size.
*/
- return ctx->pub_key.ndigits * sizeof(u64);
+ return ctx->pub_key.ndigits * sizeof(u64) * BITS_PER_BYTE;
}
static unsigned int ecrdsa_max_size(struct crypto_sig *tfm)
diff --git a/crypto/essiv.c b/crypto/essiv.c
index ec0ec8992c2d..d003b78fcd85 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -548,8 +548,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
}
/* record the driver name so we can instantiate this exact algo later */
- strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name,
- CRYPTO_MAX_ALG_NAME);
+ strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name);
/* Instance fields */
@@ -642,7 +641,7 @@ static void __exit essiv_module_exit(void)
crypto_unregister_template(&essiv_tmpl);
}
-subsys_initcall(essiv_module_init);
+module_init(essiv_module_init);
module_exit(essiv_module_exit);
MODULE_DESCRIPTION("ESSIV skcipher/aead wrapper for block encryption");
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index 95a16e88899b..80036835cec5 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -411,7 +411,7 @@ static void __exit fcrypt_mod_fini(void)
crypto_unregister_alg(&fcrypt_alg);
}
-subsys_initcall(fcrypt_mod_init);
+module_init(fcrypt_mod_init);
module_exit(fcrypt_mod_fini);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/fips.c b/crypto/fips.c
index 2fa3a9ee61a1..e88a604cb42b 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -95,5 +95,5 @@ static void __exit fips_exit(void)
crypto_proc_fips_exit();
}
-subsys_initcall(fips_init);
+module_init(fips_init);
module_exit(fips_exit);
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 84f7c23d14e4..97716482bed0 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -9,7 +9,6 @@
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
-#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <crypto/gcm.h>
#include <crypto/hash.h>
@@ -46,7 +45,6 @@ struct crypto_rfc4543_instance_ctx {
struct crypto_rfc4543_ctx {
struct crypto_aead *child;
- struct crypto_sync_skcipher *null;
u8 nonce[4];
};
@@ -79,8 +77,6 @@ static struct {
struct scatterlist sg;
} *gcm_zeroes;
-static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc);
-
static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
struct aead_request *req)
{
@@ -930,12 +926,12 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc)
unsigned int authsize = crypto_aead_authsize(aead);
u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
crypto_aead_alignmask(ctx->child) + 1);
- int err;
if (req->src != req->dst) {
- err = crypto_rfc4543_copy_src_to_dst(req, enc);
- if (err)
- return err;
+ unsigned int nbytes = req->assoclen + req->cryptlen -
+ (enc ? 0 : authsize);
+
+ memcpy_sglist(req->dst, req->src, nbytes);
}
memcpy(iv, ctx->nonce, 4);
@@ -952,22 +948,6 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc)
return enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq);
}
-static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
- unsigned int authsize = crypto_aead_authsize(aead);
- unsigned int nbytes = req->assoclen + req->cryptlen -
- (enc ? 0 : authsize);
- SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
-
- skcipher_request_set_sync_tfm(nreq, ctx->null);
- skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
- skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
-
- return crypto_skcipher_encrypt(nreq);
-}
-
static int crypto_rfc4543_encrypt(struct aead_request *req)
{
return crypto_ipsec_check_assoclen(req->assoclen) ?:
@@ -987,21 +967,13 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
struct crypto_aead_spawn *spawn = &ictx->aead;
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *aead;
- struct crypto_sync_skcipher *null;
unsigned long align;
- int err = 0;
aead = crypto_spawn_aead(spawn);
if (IS_ERR(aead))
return PTR_ERR(aead);
- null = crypto_get_default_null_skcipher();
- err = PTR_ERR(null);
- if (IS_ERR(null))
- goto err_free_aead;
-
ctx->child = aead;
- ctx->null = null;
align = crypto_aead_alignmask(aead);
align &= ~(crypto_tfm_ctx_alignment() - 1);
@@ -1012,10 +984,6 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
align + GCM_AES_IV_SIZE);
return 0;
-
-err_free_aead:
- crypto_free_aead(aead);
- return err;
}
static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
@@ -1023,7 +991,6 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher();
}
static void crypto_rfc4543_free(struct aead_instance *inst)
@@ -1152,7 +1119,7 @@ static void __exit crypto_gcm_module_exit(void)
ARRAY_SIZE(crypto_gcm_tmpls));
}
-subsys_initcall(crypto_gcm_module_init);
+module_init(crypto_gcm_module_init);
module_exit(crypto_gcm_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/geniv.c b/crypto/geniv.c
index bee4621b4f12..42eff6a7387c 100644
--- a/crypto/geniv.c
+++ b/crypto/geniv.c
@@ -9,7 +9,6 @@
#include <crypto/internal/geniv.h>
#include <crypto/internal/rng.h>
-#include <crypto/null.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -125,15 +124,10 @@ int aead_init_geniv(struct crypto_aead *aead)
if (err)
goto out;
- ctx->sknull = crypto_get_default_null_skcipher();
- err = PTR_ERR(ctx->sknull);
- if (IS_ERR(ctx->sknull))
- goto out;
-
child = crypto_spawn_aead(aead_instance_ctx(inst));
err = PTR_ERR(child);
if (IS_ERR(child))
- goto drop_null;
+ goto out;
ctx->child = child;
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) +
@@ -143,10 +137,6 @@ int aead_init_geniv(struct crypto_aead *aead)
out:
return err;
-
-drop_null:
- crypto_put_default_null_skcipher();
- goto out;
}
EXPORT_SYMBOL_GPL(aead_init_geniv);
@@ -155,7 +145,6 @@ void aead_exit_geniv(struct crypto_aead *tfm)
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher();
}
EXPORT_SYMBOL_GPL(aead_exit_geniv);
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index c70d163c1ac9..e5803c249c12 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -34,14 +34,14 @@
* (https://csrc.nist.gov/publications/detail/sp/800-38d/final)
*/
-#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
#include <crypto/ghash.h>
#include <crypto/internal/hash.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
+#include <crypto/utils.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
static int ghash_init(struct shash_desc *desc)
{
@@ -82,59 +82,36 @@ static int ghash_update(struct shash_desc *desc,
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
- if (dctx->bytes) {
- int n = min(srclen, dctx->bytes);
- u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
-
- dctx->bytes -= n;
- srclen -= n;
-
- while (n--)
- *pos++ ^= *src++;
-
- if (!dctx->bytes)
- gf128mul_4k_lle((be128 *)dst, ctx->gf128);
- }
-
- while (srclen >= GHASH_BLOCK_SIZE) {
+ do {
crypto_xor(dst, src, GHASH_BLOCK_SIZE);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
src += GHASH_BLOCK_SIZE;
srclen -= GHASH_BLOCK_SIZE;
- }
-
- if (srclen) {
- dctx->bytes = GHASH_BLOCK_SIZE - srclen;
- while (srclen--)
- *dst++ ^= *src++;
- }
+ } while (srclen >= GHASH_BLOCK_SIZE);
- return 0;
+ return srclen;
}
-static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+static void ghash_flush(struct shash_desc *desc, const u8 *src,
+ unsigned int len)
{
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
u8 *dst = dctx->buffer;
- if (dctx->bytes) {
- u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
-
- while (dctx->bytes--)
- *tmp++ ^= 0;
-
+ if (len) {
+ crypto_xor(dst, src, len);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
-
- dctx->bytes = 0;
}
-static int ghash_final(struct shash_desc *desc, u8 *dst)
+static int ghash_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
- ghash_flush(ctx, dctx);
+ ghash_flush(desc, src, len);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
return 0;
@@ -151,13 +128,14 @@ static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
- .final = ghash_final,
+ .finup = ghash_finup,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
@@ -175,7 +153,7 @@ static void __exit ghash_mod_exit(void)
crypto_unregister_shash(&ghash_alg);
}
-subsys_initcall(ghash_mod_init);
+module_init(ghash_mod_init);
module_exit(ghash_mod_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/hctr2.c b/crypto/hctr2.c
index cbcd673be481..c8932777bba8 100644
--- a/crypto/hctr2.c
+++ b/crypto/hctr2.c
@@ -570,7 +570,7 @@ static void __exit hctr2_module_exit(void)
ARRAY_SIZE(hctr2_tmpls));
}
-subsys_initcall(hctr2_module_init);
+module_init(hctr2_module_init);
module_exit(hctr2_module_exit);
MODULE_DESCRIPTION("HCTR2 length-preserving encryption mode");
diff --git a/crypto/hkdf.c b/crypto/hkdf.c
index 2434c5c42545..f24c2a8d4df9 100644
--- a/crypto/hkdf.c
+++ b/crypto/hkdf.c
@@ -543,7 +543,7 @@ static int __init crypto_hkdf_module_init(void)
{
int ret = 0, i;
- if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
return 0;
for (i = 0; i < ARRAY_SIZE(hkdf_sha256_tv); i++) {
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 7cec25ff9889..148af460ae97 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -13,13 +13,11 @@
#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
-#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/fips.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/string.h>
struct hmac_ctx {
@@ -28,6 +26,12 @@ struct hmac_ctx {
u8 pads[];
};
+struct ahash_hmac_ctx {
+ struct crypto_ahash *hash;
+ /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */
+ u8 pads[];
+};
+
static int hmac_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
@@ -39,7 +43,7 @@ static int hmac_setkey(struct crypto_shash *parent,
u8 *ipad = &tctx->pads[0];
u8 *opad = &tctx->pads[ss];
SHASH_DESC_ON_STACK(shash, hash);
- unsigned int i;
+ int err, i;
if (fips_enabled && (keylen < 112 / 8))
return -EINVAL;
@@ -65,12 +69,14 @@ static int hmac_setkey(struct crypto_shash *parent,
opad[i] ^= HMAC_OPAD_VALUE;
}
- return crypto_shash_init(shash) ?:
- crypto_shash_update(shash, ipad, bs) ?:
- crypto_shash_export(shash, ipad) ?:
- crypto_shash_init(shash) ?:
- crypto_shash_update(shash, opad, bs) ?:
- crypto_shash_export(shash, opad);
+ err = crypto_shash_init(shash) ?:
+ crypto_shash_update(shash, ipad, bs) ?:
+ crypto_shash_export(shash, ipad) ?:
+ crypto_shash_init(shash) ?:
+ crypto_shash_update(shash, opad, bs) ?:
+ crypto_shash_export(shash, opad);
+ shash_desc_zero(shash);
+ return err;
}
static int hmac_export(struct shash_desc *pdesc, void *out)
@@ -90,6 +96,22 @@ static int hmac_import(struct shash_desc *pdesc, const void *in)
return crypto_shash_import(desc, in);
}
+static int hmac_export_core(struct shash_desc *pdesc, void *out)
+{
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
+
+ return crypto_shash_export_core(desc, out);
+}
+
+static int hmac_import_core(struct shash_desc *pdesc, const void *in)
+{
+ const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm);
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
+
+ desc->tfm = tctx->hash;
+ return crypto_shash_import_core(desc, in);
+}
+
static int hmac_init(struct shash_desc *pdesc)
{
const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm);
@@ -105,20 +127,6 @@ static int hmac_update(struct shash_desc *pdesc,
return crypto_shash_update(desc, data, nbytes);
}
-static int hmac_final(struct shash_desc *pdesc, u8 *out)
-{
- struct crypto_shash *parent = pdesc->tfm;
- int ds = crypto_shash_digestsize(parent);
- int ss = crypto_shash_statesize(parent);
- const struct hmac_ctx *tctx = crypto_shash_ctx(parent);
- const u8 *opad = &tctx->pads[ss];
- struct shash_desc *desc = shash_desc_ctx(pdesc);
-
- return crypto_shash_final(desc, out) ?:
- crypto_shash_import(desc, opad) ?:
- crypto_shash_finup(desc, out, ds, out);
-}
-
static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
unsigned int nbytes, u8 *out)
{
@@ -146,9 +154,6 @@ static int hmac_init_tfm(struct crypto_shash *parent)
if (IS_ERR(hash))
return PTR_ERR(hash);
- parent->descsize = sizeof(struct shash_desc) +
- crypto_shash_descsize(hash);
-
tctx->hash = hash;
return 0;
}
@@ -174,26 +179,23 @@ static void hmac_exit_tfm(struct crypto_shash *parent)
crypto_free_shash(tctx->hash);
}
-static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+static int __hmac_create_shash(struct crypto_template *tmpl,
+ struct rtattr **tb, u32 mask)
{
struct shash_instance *inst;
struct crypto_shash_spawn *spawn;
struct crypto_alg *alg;
struct shash_alg *salg;
- u32 mask;
int err;
int ds;
int ss;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
- if (err)
- return err;
-
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = shash_instance_ctx(inst);
+ mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
err = crypto_grab_shash(spawn, shash_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
@@ -212,7 +214,8 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
ss < alg->cra_blocksize)
goto err_free_inst;
- err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
+ err = crypto_inst_setname(shash_crypto_instance(inst), "hmac",
+ "hmac-shash", alg);
if (err)
goto err_free_inst;
@@ -222,12 +225,14 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.digestsize = ds;
inst->alg.statesize = ss;
+ inst->alg.descsize = sizeof(struct shash_desc) + salg->descsize;
inst->alg.init = hmac_init;
inst->alg.update = hmac_update;
- inst->alg.final = hmac_final;
inst->alg.finup = hmac_finup;
inst->alg.export = hmac_export;
inst->alg.import = hmac_import;
+ inst->alg.export_core = hmac_export_core;
+ inst->alg.import_core = hmac_import_core;
inst->alg.setkey = hmac_setkey;
inst->alg.init_tfm = hmac_init_tfm;
inst->alg.clone_tfm = hmac_clone_tfm;
@@ -243,23 +248,332 @@ err_free_inst:
return err;
}
-static struct crypto_template hmac_tmpl = {
- .name = "hmac",
- .create = hmac_create,
- .module = THIS_MODULE,
+static int hmac_setkey_ahash(struct crypto_ahash *parent,
+ const u8 *inkey, unsigned int keylen)
+{
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+ struct crypto_ahash *fb = crypto_ahash_fb(tctx->hash);
+ int ds = crypto_ahash_digestsize(parent);
+ int bs = crypto_ahash_blocksize(parent);
+ int ss = crypto_ahash_statesize(parent);
+ HASH_REQUEST_ON_STACK(req, fb);
+ u8 *opad = &tctx->pads[ss];
+ u8 *ipad = &tctx->pads[0];
+ int err, i;
+
+ if (fips_enabled && (keylen < 112 / 8))
+ return -EINVAL;
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+
+ if (keylen > bs) {
+ ahash_request_set_virt(req, inkey, ipad, keylen);
+ err = crypto_ahash_digest(req);
+ if (err)
+ goto out_zero_req;
+
+ keylen = ds;
+ } else
+ memcpy(ipad, inkey, keylen);
+
+ memset(ipad + keylen, 0, bs - keylen);
+ memcpy(opad, ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ ipad[i] ^= HMAC_IPAD_VALUE;
+ opad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ ahash_request_set_virt(req, ipad, NULL, bs);
+ err = crypto_ahash_init(req) ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export(req, ipad);
+
+ ahash_request_set_virt(req, opad, NULL, bs);
+ err = err ?:
+ crypto_ahash_init(req) ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export(req, opad);
+
+out_zero_req:
+ HASH_REQUEST_ZERO(req);
+ return err;
+}
+
+static int hmac_export_ahash(struct ahash_request *preq, void *out)
+{
+ return crypto_ahash_export(ahash_request_ctx(preq), out);
+}
+
+static int hmac_import_ahash(struct ahash_request *preq, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_tfm(req, tctx->hash);
+ return crypto_ahash_import(req, in);
+}
+
+static int hmac_export_core_ahash(struct ahash_request *preq, void *out)
+{
+ return crypto_ahash_export_core(ahash_request_ctx(preq), out);
+}
+
+static int hmac_import_core_ahash(struct ahash_request *preq, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_tfm(req, tctx->hash);
+ return crypto_ahash_import_core(req, in);
+}
+
+static int hmac_init_ahash(struct ahash_request *preq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ return hmac_import_ahash(preq, &tctx->pads[0]);
+}
+
+static int hmac_update_ahash(struct ahash_request *preq)
+{
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_callback(req, ahash_request_flags(preq),
+ preq->base.complete, preq->base.data);
+ if (ahash_request_isvirt(preq))
+ ahash_request_set_virt(req, preq->svirt, NULL, preq->nbytes);
+ else
+ ahash_request_set_crypt(req, preq->src, NULL, preq->nbytes);
+ return crypto_ahash_update(req);
+}
+
+static int hmac_finup_finish(struct ahash_request *preq, unsigned int mask)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_request *req = ahash_request_ctx(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+ int ds = crypto_ahash_digestsize(tfm);
+ int ss = crypto_ahash_statesize(tfm);
+ const u8 *opad = &tctx->pads[ss];
+
+ ahash_request_set_callback(req, ahash_request_flags(preq) & ~mask,
+ preq->base.complete, preq->base.data);
+ ahash_request_set_virt(req, preq->result, preq->result, ds);
+ return crypto_ahash_import(req, opad) ?:
+ crypto_ahash_finup(req);
+
+}
+
+static void hmac_finup_done(void *data, int err)
+{
+ struct ahash_request *preq = data;
+
+ if (err)
+ goto out;
+
+ err = hmac_finup_finish(preq, CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+
+out:
+ ahash_request_complete(preq, err);
+}
+
+static int hmac_finup_ahash(struct ahash_request *preq)
+{
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_callback(req, ahash_request_flags(preq),
+ hmac_finup_done, preq);
+ if (ahash_request_isvirt(preq))
+ ahash_request_set_virt(req, preq->svirt, preq->result,
+ preq->nbytes);
+ else
+ ahash_request_set_crypt(req, preq->src, preq->result,
+ preq->nbytes);
+ return crypto_ahash_finup(req) ?:
+ hmac_finup_finish(preq, 0);
+}
+
+static int hmac_digest_ahash(struct ahash_request *preq)
+{
+ return hmac_init_ahash(preq) ?:
+ hmac_finup_ahash(preq);
+}
+
+static int hmac_init_ahash_tfm(struct crypto_ahash *parent)
+{
+ struct ahash_instance *inst = ahash_alg_instance(parent);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+ struct crypto_ahash *hash;
+
+ hash = crypto_spawn_ahash(ahash_instance_ctx(inst));
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ if (crypto_ahash_reqsize(parent) < sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(hash))
+ return -EINVAL;
+
+ tctx->hash = hash;
+ return 0;
+}
+
+static int hmac_clone_ahash_tfm(struct crypto_ahash *dst,
+ struct crypto_ahash *src)
+{
+ struct ahash_hmac_ctx *sctx = crypto_ahash_ctx(src);
+ struct ahash_hmac_ctx *dctx = crypto_ahash_ctx(dst);
+ struct crypto_ahash *hash;
+
+ hash = crypto_clone_ahash(sctx->hash);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ dctx->hash = hash;
+ return 0;
+}
+
+static void hmac_exit_ahash_tfm(struct crypto_ahash *parent)
+{
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+
+ crypto_free_ahash(tctx->hash);
+}
+
+static int hmac_create_ahash(struct crypto_template *tmpl, struct rtattr **tb,
+ u32 mask)
+{
+ struct crypto_ahash_spawn *spawn;
+ struct ahash_instance *inst;
+ struct crypto_alg *alg;
+ struct hash_alg_common *halg;
+ int ds, ss, err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ spawn = ahash_instance_ctx(inst);
+
+ mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+ err = crypto_grab_ahash(spawn, ahash_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
+ if (err)
+ goto err_free_inst;
+ halg = crypto_spawn_ahash_alg(spawn);
+ alg = &halg->base;
+
+ /* The underlying hash algorithm must not require a key */
+ err = -EINVAL;
+ if (crypto_hash_alg_needs_key(halg))
+ goto err_free_inst;
+
+ ds = halg->digestsize;
+ ss = halg->statesize;
+ if (ds > alg->cra_blocksize || ss < alg->cra_blocksize)
+ goto err_free_inst;
+
+ err = crypto_inst_setname(ahash_crypto_instance(inst), tmpl->name, alg);
+ if (err)
+ goto err_free_inst;
+
+ inst->alg.halg.base.cra_flags = alg->cra_flags &
+ CRYPTO_ALG_INHERITED_FLAGS;
+ inst->alg.halg.base.cra_flags |= CRYPTO_ALG_REQ_VIRT;
+ inst->alg.halg.base.cra_priority = alg->cra_priority + 100;
+ inst->alg.halg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.halg.base.cra_ctxsize = sizeof(struct ahash_hmac_ctx) +
+ (ss * 2);
+ inst->alg.halg.base.cra_reqsize = sizeof(struct ahash_request) +
+ alg->cra_reqsize;
+
+ inst->alg.halg.digestsize = ds;
+ inst->alg.halg.statesize = ss;
+ inst->alg.init = hmac_init_ahash;
+ inst->alg.update = hmac_update_ahash;
+ inst->alg.finup = hmac_finup_ahash;
+ inst->alg.digest = hmac_digest_ahash;
+ inst->alg.export = hmac_export_ahash;
+ inst->alg.import = hmac_import_ahash;
+ inst->alg.export_core = hmac_export_core_ahash;
+ inst->alg.import_core = hmac_import_core_ahash;
+ inst->alg.setkey = hmac_setkey_ahash;
+ inst->alg.init_tfm = hmac_init_ahash_tfm;
+ inst->alg.clone_tfm = hmac_clone_ahash_tfm;
+ inst->alg.exit_tfm = hmac_exit_ahash_tfm;
+
+ inst->free = ahash_free_singlespawn_instance;
+
+ err = ahash_register_instance(tmpl, inst);
+ if (err) {
+err_free_inst:
+ ahash_free_singlespawn_instance(inst);
+ }
+ return err;
+}
+
+static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+ u32 mask;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ mask = crypto_algt_inherited_mask(algt);
+
+ if (!((algt->type ^ CRYPTO_ALG_TYPE_AHASH) &
+ algt->mask & CRYPTO_ALG_TYPE_MASK))
+ return hmac_create_ahash(tmpl, tb, mask);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_SHASH) &
+ algt->mask & CRYPTO_ALG_TYPE_MASK)
+ return -EINVAL;
+
+ return __hmac_create_shash(tmpl, tb, mask);
+}
+
+static int hmac_create_shash(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
+ if (err)
+ return err == -EINVAL ? -ENOENT : err;
+
+ return __hmac_create_shash(tmpl, tb, mask);
+}
+
+static struct crypto_template hmac_tmpls[] = {
+ {
+ .name = "hmac",
+ .create = hmac_create,
+ .module = THIS_MODULE,
+ },
+ {
+ .name = "hmac-shash",
+ .create = hmac_create_shash,
+ .module = THIS_MODULE,
+ },
};
static int __init hmac_module_init(void)
{
- return crypto_register_template(&hmac_tmpl);
+ return crypto_register_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls));
}
static void __exit hmac_module_exit(void)
{
- crypto_unregister_template(&hmac_tmpl);
+ crypto_unregister_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls));
}
-subsys_initcall(hmac_module_init);
+module_init(hmac_module_init);
module_exit(hmac_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/internal.h b/crypto/internal.h
index 11567ea24fc3..b9afd68767c1 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -46,6 +46,7 @@ struct crypto_type {
unsigned int maskclear;
unsigned int maskset;
unsigned int tfmsize;
+ unsigned int algsize;
};
enum {
@@ -66,8 +67,7 @@ extern struct blocking_notifier_head crypto_chain;
int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
-#if !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || \
- IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)
+#if !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
static inline bool crypto_boot_test_finished(void)
{
return true;
@@ -86,7 +86,7 @@ static inline void set_crypto_boot_test_finished(void)
static_branch_enable(&__crypto_boot_test_finished);
}
#endif /* !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) ||
- * IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)
+ * !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
*/
#ifdef CONFIG_PROC_FS
@@ -128,7 +128,6 @@ void *crypto_create_tfm_node(struct crypto_alg *alg,
const struct crypto_type *frontend, int node);
void *crypto_clone_tfm(const struct crypto_type *frontend,
struct crypto_tfm *otfm);
-void crypto_destroy_alg(struct crypto_alg *alg);
static inline void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend)
@@ -163,6 +162,8 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
return alg;
}
+void crypto_destroy_alg(struct crypto_alg *alg);
+
static inline void crypto_alg_put(struct crypto_alg *alg)
{
if (refcount_dec_and_test(&alg->cra_refcnt))
diff --git a/crypto/kdf_sp800108.c b/crypto/kdf_sp800108.c
index c3f9938e1ad2..b7a6bf9da773 100644
--- a/crypto/kdf_sp800108.c
+++ b/crypto/kdf_sp800108.c
@@ -127,7 +127,7 @@ static int __init crypto_kdf108_init(void)
{
int ret;
- if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
return 0;
ret = kdf_test(&kdf_ctr_hmac_sha256_tv_template[0], "hmac(sha256)",
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 7ad338ca2c18..024264ee9cd1 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -871,7 +871,7 @@ static void __exit khazad_mod_fini(void)
}
-subsys_initcall(khazad_mod_init);
+module_init(khazad_mod_init);
module_exit(khazad_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/kpp.c b/crypto/kpp.c
index ecc63a1a948d..2e0cefe7a25f 100644
--- a/crypto/kpp.c
+++ b/crypto/kpp.c
@@ -80,6 +80,7 @@ static const struct crypto_type crypto_kpp_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_KPP,
.tfmsize = offsetof(struct crypto_kpp, base),
+ .algsize = offsetof(struct kpp_alg, base),
};
struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask)
diff --git a/crypto/krb5/rfc3961_simplified.c b/crypto/krb5/rfc3961_simplified.c
index 79180d28baa9..e49cbdec7c40 100644
--- a/crypto/krb5/rfc3961_simplified.c
+++ b/crypto/krb5/rfc3961_simplified.c
@@ -89,6 +89,7 @@ int crypto_shash_update_sg(struct shash_desc *desc, struct scatterlist *sg,
sg_miter_start(&miter, sg, sg_nents(sg),
SG_MITER_FROM_SG | SG_MITER_LOCAL);
+ sg_miter_skip(&miter, offset);
for (i = 0; i < len; i += n) {
sg_miter_next(&miter);
n = min(miter.length, len - i);
diff --git a/crypto/krb5enc.c b/crypto/krb5enc.c
index d07769bf149e..a1de55994d92 100644
--- a/crypto/krb5enc.c
+++ b/crypto/krb5enc.c
@@ -496,7 +496,7 @@ static void __exit crypto_krb5enc_module_exit(void)
crypto_unregister_template(&crypto_krb5enc_tmpl);
}
-subsys_initcall(crypto_krb5enc_module_init);
+module_init(crypto_krb5enc_module_init);
module_exit(crypto_krb5enc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 391ae0f7641f..dd403b800513 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -322,7 +322,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
cipher_name, 0, mask);
- if (err == -ENOENT) {
+ if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) {
err = -ENAMETOOLONG;
if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
@@ -356,7 +356,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
/* Alas we screwed up the naming so we have to mangle the
* cipher name.
*/
- if (!strncmp(cipher_name, "ecb(", 4)) {
+ if (!memcmp(cipher_name, "ecb(", 4)) {
int len;
len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
@@ -420,7 +420,7 @@ static void __exit lrw_module_exit(void)
crypto_unregister_template(&lrw_tmpl);
}
-subsys_initcall(lrw_module_init);
+module_init(lrw_module_init);
module_exit(lrw_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
index cdb4897c63e6..c2e2c38b5aa8 100644
--- a/crypto/lskcipher.c
+++ b/crypto/lskcipher.c
@@ -294,6 +294,7 @@ static const struct crypto_type crypto_lskcipher_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_LSKCIPHER,
.tfmsize = offsetof(struct crypto_lskcipher, base),
+ .algsize = offsetof(struct lskcipher_alg, co.base),
};
static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm)
diff --git a/crypto/lz4.c b/crypto/lz4.c
index 82588607fb2e..7a984ae5ae52 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -12,10 +12,6 @@
#include <linux/lz4.h>
#include <crypto/internal/scompress.h>
-struct lz4_ctx {
- void *lz4_comp_mem;
-};
-
static void *lz4_alloc_ctx(void)
{
void *ctx;
@@ -93,7 +89,7 @@ static void __exit lz4_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lz4_mod_init);
+module_init(lz4_mod_init);
module_exit(lz4_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 997e76c0183a..9c61d05b6214 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -10,10 +10,6 @@
#include <linux/vmalloc.h>
#include <linux/lz4.h>
-struct lz4hc_ctx {
- void *lz4hc_comp_mem;
-};
-
static void *lz4hc_alloc_ctx(void)
{
void *ctx;
@@ -91,7 +87,7 @@ static void __exit lz4hc_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lz4hc_mod_init);
+module_init(lz4hc_mod_init);
module_exit(lz4hc_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c
index b1350ae278b8..ba013f2d5090 100644
--- a/crypto/lzo-rle.c
+++ b/crypto/lzo-rle.c
@@ -9,10 +9,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-struct lzorle_ctx {
- void *lzorle_comp_mem;
-};
-
static void *lzorle_alloc_ctx(void)
{
void *ctx;
@@ -95,7 +91,7 @@ static void __exit lzorle_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lzorle_mod_init);
+module_init(lzorle_mod_init);
module_exit(lzorle_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lzo.c b/crypto/lzo.c
index dfe5a07ca35f..7867e2c67c4e 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -9,10 +9,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-struct lzo_ctx {
- void *lzo_comp_mem;
-};
-
static void *lzo_alloc_ctx(void)
{
void *ctx;
@@ -95,7 +91,7 @@ static void __exit lzo_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lzo_mod_init);
+module_init(lzo_mod_init);
module_exit(lzo_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/md4.c b/crypto/md4.c
index 2e7f2f319f95..55bf47e23c13 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -233,7 +233,7 @@ static void __exit md4_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(md4_mod_init);
+module_init(md4_mod_init);
module_exit(md4_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/md5.c b/crypto/md5.c
index 72c0c46fb5ee..32c0819f5118 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -17,11 +17,9 @@
*/
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
-#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
@@ -120,10 +118,11 @@ static void md5_transform(__u32 *hash, __u32 const *in)
hash[3] += d;
}
-static inline void md5_transform_helper(struct md5_state *ctx)
+static inline void md5_transform_helper(struct md5_state *ctx,
+ u32 block[MD5_BLOCK_WORDS])
{
- le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
- md5_transform(ctx->hash, ctx->block);
+ le32_to_cpu_array(block, MD5_BLOCK_WORDS);
+ md5_transform(ctx->hash, block);
}
static int md5_init(struct shash_desc *desc)
@@ -142,76 +141,53 @@ static int md5_init(struct shash_desc *desc)
static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
struct md5_state *mctx = shash_desc_ctx(desc);
- const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
+ u32 block[MD5_BLOCK_WORDS];
mctx->byte_count += len;
-
- if (avail > len) {
- memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
- data, len);
- return 0;
- }
-
- memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
- data, avail);
-
- md5_transform_helper(mctx);
- data += avail;
- len -= avail;
-
- while (len >= sizeof(mctx->block)) {
- memcpy(mctx->block, data, sizeof(mctx->block));
- md5_transform_helper(mctx);
- data += sizeof(mctx->block);
- len -= sizeof(mctx->block);
- }
-
- memcpy(mctx->block, data, len);
-
- return 0;
+ do {
+ memcpy(block, data, sizeof(block));
+ md5_transform_helper(mctx, block);
+ data += sizeof(block);
+ len -= sizeof(block);
+ } while (len >= sizeof(block));
+ memzero_explicit(block, sizeof(block));
+ mctx->byte_count -= len;
+ return len;
}
-static int md5_final(struct shash_desc *desc, u8 *out)
+static int md5_finup(struct shash_desc *desc, const u8 *data, unsigned int len,
+ u8 *out)
{
struct md5_state *mctx = shash_desc_ctx(desc);
- const unsigned int offset = mctx->byte_count & 0x3f;
- char *p = (char *)mctx->block + offset;
- int padding = 56 - (offset + 1);
+ u32 block[MD5_BLOCK_WORDS];
+ unsigned int offset;
+ int padding;
+ char *p;
+
+ memcpy(block, data, len);
+
+ offset = len;
+ p = (char *)block + offset;
+ padding = 56 - (offset + 1);
*p++ = 0x80;
if (padding < 0) {
memset(p, 0x00, padding + sizeof (u64));
- md5_transform_helper(mctx);
- p = (char *)mctx->block;
+ md5_transform_helper(mctx, block);
+ p = (char *)block;
padding = 56;
}
memset(p, 0, padding);
- mctx->block[14] = mctx->byte_count << 3;
- mctx->block[15] = mctx->byte_count >> 29;
- le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
- sizeof(u64)) / sizeof(u32));
- md5_transform(mctx->hash, mctx->block);
+ mctx->byte_count += len;
+ block[14] = mctx->byte_count << 3;
+ block[15] = mctx->byte_count >> 29;
+ le32_to_cpu_array(block, (sizeof(block) - sizeof(u64)) / sizeof(u32));
+ md5_transform(mctx->hash, block);
+ memzero_explicit(block, sizeof(block));
cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32));
memcpy(out, mctx->hash, sizeof(mctx->hash));
- memset(mctx, 0, sizeof(*mctx));
-
- return 0;
-}
-
-static int md5_export(struct shash_desc *desc, void *out)
-{
- struct md5_state *ctx = shash_desc_ctx(desc);
-
- memcpy(out, ctx, sizeof(*ctx));
- return 0;
-}
-
-static int md5_import(struct shash_desc *desc, const void *in)
-{
- struct md5_state *ctx = shash_desc_ctx(desc);
- memcpy(ctx, in, sizeof(*ctx));
return 0;
}
@@ -219,14 +195,12 @@ static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = md5_init,
.update = md5_update,
- .final = md5_final,
- .export = md5_export,
- .import = md5_import,
- .descsize = sizeof(struct md5_state),
- .statesize = sizeof(struct md5_state),
+ .finup = md5_finup,
+ .descsize = MD5_STATE_SIZE,
.base = {
.cra_name = "md5",
.cra_driver_name = "md5-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -242,7 +216,7 @@ static void __exit md5_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(md5_mod_init);
+module_init(md5_mod_init);
module_exit(md5_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 0d14e980d4d6..69ad35f524d7 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -167,7 +167,7 @@ static void __exit michael_mic_exit(void)
}
-subsys_initcall(michael_mic_init);
+module_init(michael_mic_init);
module_exit(michael_mic_exit);
MODULE_LICENSE("GPL v2");
diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c
index a661d4f667cd..2b648615b5ec 100644
--- a/crypto/nhpoly1305.c
+++ b/crypto/nhpoly1305.c
@@ -245,7 +245,7 @@ static void __exit nhpoly1305_mod_exit(void)
crypto_unregister_shash(&nhpoly1305_alg);
}
-subsys_initcall(nhpoly1305_mod_init);
+module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function");
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 9d2e56d6744a..d092717ea4fc 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -186,7 +186,7 @@ static void __exit crypto_pcbc_module_exit(void)
crypto_unregister_template(&crypto_pcbc_tmpl);
}
-subsys_initcall(crypto_pcbc_module_init);
+module_init(crypto_pcbc_module_init);
module_exit(crypto_pcbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 7fc79e7dce44..c33d29a523e0 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -381,7 +381,7 @@ static void __exit pcrypt_exit(void)
kset_unregister(pcrypt_kset);
}
-subsys_initcall(pcrypt_init);
+module_init(pcrypt_init);
module_exit(pcrypt_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
deleted file mode 100644
index e6f29a98725a..000000000000
--- a/crypto/poly1305_generic.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Poly1305 authenticator algorithm, RFC7539
- *
- * Copyright (C) 2015 Martin Willi
- *
- * Based on public domain code by Andrew Moon and Daniel J. Bernstein.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/poly1305.h>
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/unaligned.h>
-
-static int crypto_poly1305_init(struct shash_desc *desc)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- poly1305_core_init(&dctx->h);
- dctx->buflen = 0;
- dctx->rset = 0;
- dctx->sset = false;
-
- return 0;
-}
-
-static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen)
-{
- if (!dctx->sset) {
- if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
- poly1305_core_setkey(&dctx->core_r, src);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->rset = 2;
- }
- if (srclen >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(src + 0);
- dctx->s[1] = get_unaligned_le32(src + 4);
- dctx->s[2] = get_unaligned_le32(src + 8);
- dctx->s[3] = get_unaligned_le32(src + 12);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- }
- return srclen;
-}
-
-static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
- unsigned int srclen)
-{
- unsigned int datalen;
-
- if (unlikely(!dctx->sset)) {
- datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
- src += srclen - datalen;
- srclen = datalen;
- }
-
- poly1305_core_blocks(&dctx->h, &dctx->core_r, src,
- srclen / POLY1305_BLOCK_SIZE, 1);
-}
-
-static int crypto_poly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- unsigned int bytes;
-
- if (unlikely(dctx->buflen)) {
- bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- srclen -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- poly1305_blocks(dctx, dctx->buf,
- POLY1305_BLOCK_SIZE);
- dctx->buflen = 0;
- }
- }
-
- if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
- poly1305_blocks(dctx, src, srclen);
- src += srclen - (srclen % POLY1305_BLOCK_SIZE);
- srclen %= POLY1305_BLOCK_SIZE;
- }
-
- if (unlikely(srclen)) {
- dctx->buflen = srclen;
- memcpy(dctx->buf, src, srclen);
- }
-
- return 0;
-}
-
-static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (unlikely(!dctx->sset))
- return -ENOKEY;
-
- poly1305_final_generic(dctx, dst);
- return 0;
-}
-
-static struct shash_alg poly1305_alg = {
- .digestsize = POLY1305_DIGEST_SIZE,
- .init = crypto_poly1305_init,
- .update = crypto_poly1305_update,
- .final = crypto_poly1305_final,
- .descsize = sizeof(struct poly1305_desc_ctx),
- .base = {
- .cra_name = "poly1305",
- .cra_driver_name = "poly1305-generic",
- .cra_priority = 100,
- .cra_blocksize = POLY1305_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- },
-};
-
-static int __init poly1305_mod_init(void)
-{
- return crypto_register_shash(&poly1305_alg);
-}
-
-static void __exit poly1305_mod_exit(void)
-{
- crypto_unregister_shash(&poly1305_alg);
-}
-
-subsys_initcall(poly1305_mod_init);
-module_exit(poly1305_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
-MODULE_DESCRIPTION("Poly1305 authenticator");
-MODULE_ALIAS_CRYPTO("poly1305");
-MODULE_ALIAS_CRYPTO("poly1305-generic");
diff --git a/crypto/polyval-generic.c b/crypto/polyval-generic.c
index 4f98910bcdb5..db8adb56e4ca 100644
--- a/crypto/polyval-generic.c
+++ b/crypto/polyval-generic.c
@@ -44,15 +44,15 @@
*
*/
-#include <linux/unaligned.h>
-#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
-#include <crypto/polyval.h>
#include <crypto/internal/hash.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
+#include <crypto/polyval.h>
+#include <crypto/utils.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
struct polyval_tfm_ctx {
struct gf128mul_4k *gf128;
@@ -63,7 +63,6 @@ struct polyval_desc_ctx {
u8 buffer[POLYVAL_BLOCK_SIZE];
be128 buffer128;
};
- u32 bytes;
};
static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE],
@@ -76,46 +75,6 @@ static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE],
put_unaligned(swab64(b), (u64 *)&dst[0]);
}
-/*
- * Performs multiplication in the POLYVAL field using the GHASH field as a
- * subroutine. This function is used as a fallback for hardware accelerated
- * implementations when simd registers are unavailable.
- *
- * Note: This function is not used for polyval-generic, instead we use the 4k
- * lookup table implementation for finite field multiplication.
- */
-void polyval_mul_non4k(u8 *op1, const u8 *op2)
-{
- be128 a, b;
-
- // Assume one argument is in Montgomery form and one is not.
- copy_and_reverse((u8 *)&a, op1);
- copy_and_reverse((u8 *)&b, op2);
- gf128mul_x_lle(&a, &a);
- gf128mul_lle(&a, &b);
- copy_and_reverse(op1, (u8 *)&a);
-}
-EXPORT_SYMBOL_GPL(polyval_mul_non4k);
-
-/*
- * Perform a POLYVAL update using non4k multiplication. This function is used
- * as a fallback for hardware accelerated implementations when simd registers
- * are unavailable.
- *
- * Note: This function is not used for polyval-generic, instead we use the 4k
- * lookup table implementation of finite field multiplication.
- */
-void polyval_update_non4k(const u8 *key, const u8 *in,
- size_t nblocks, u8 *accumulator)
-{
- while (nblocks--) {
- crypto_xor(accumulator, in, POLYVAL_BLOCK_SIZE);
- polyval_mul_non4k(accumulator, key);
- in += POLYVAL_BLOCK_SIZE;
- }
-}
-EXPORT_SYMBOL_GPL(polyval_update_non4k);
-
static int polyval_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
@@ -154,56 +113,53 @@ static int polyval_update(struct shash_desc *desc,
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm);
- u8 *pos;
u8 tmp[POLYVAL_BLOCK_SIZE];
- int n;
-
- if (dctx->bytes) {
- n = min(srclen, dctx->bytes);
- pos = dctx->buffer + dctx->bytes - 1;
-
- dctx->bytes -= n;
- srclen -= n;
-
- while (n--)
- *pos-- ^= *src++;
- if (!dctx->bytes)
- gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
- }
-
- while (srclen >= POLYVAL_BLOCK_SIZE) {
+ do {
copy_and_reverse(tmp, src);
crypto_xor(dctx->buffer, tmp, POLYVAL_BLOCK_SIZE);
gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
src += POLYVAL_BLOCK_SIZE;
srclen -= POLYVAL_BLOCK_SIZE;
- }
+ } while (srclen >= POLYVAL_BLOCK_SIZE);
+
+ return srclen;
+}
- if (srclen) {
- dctx->bytes = POLYVAL_BLOCK_SIZE - srclen;
- pos = dctx->buffer + POLYVAL_BLOCK_SIZE - 1;
- while (srclen--)
- *pos-- ^= *src++;
+static int polyval_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
+{
+ struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (len) {
+ u8 tmp[POLYVAL_BLOCK_SIZE] = {};
+
+ memcpy(tmp, src, len);
+ polyval_update(desc, tmp, POLYVAL_BLOCK_SIZE);
}
+ copy_and_reverse(dst, dctx->buffer);
+ return 0;
+}
+
+static int polyval_export(struct shash_desc *desc, void *out)
+{
+ struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
+ copy_and_reverse(out, dctx->buffer);
return 0;
}
-static int polyval_final(struct shash_desc *desc, u8 *dst)
+static int polyval_import(struct shash_desc *desc, const void *in)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
- const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm);
- if (dctx->bytes)
- gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
- copy_and_reverse(dst, dctx->buffer);
+ copy_and_reverse(dctx->buffer, in);
return 0;
}
-static void polyval_exit_tfm(struct crypto_tfm *tfm)
+static void polyval_exit_tfm(struct crypto_shash *tfm)
{
- struct polyval_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct polyval_tfm_ctx *ctx = crypto_shash_ctx(tfm);
gf128mul_free_4k(ctx->gf128);
}
@@ -212,17 +168,21 @@ static struct shash_alg polyval_alg = {
.digestsize = POLYVAL_DIGEST_SIZE,
.init = polyval_init,
.update = polyval_update,
- .final = polyval_final,
+ .finup = polyval_finup,
.setkey = polyval_setkey,
+ .export = polyval_export,
+ .import = polyval_import,
+ .exit_tfm = polyval_exit_tfm,
+ .statesize = sizeof(struct polyval_desc_ctx),
.descsize = sizeof(struct polyval_desc_ctx),
.base = {
.cra_name = "polyval",
.cra_driver_name = "polyval-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = POLYVAL_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct polyval_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_exit = polyval_exit_tfm,
},
};
@@ -236,7 +196,7 @@ static void __exit polyval_mod_exit(void)
crypto_unregister_shash(&polyval_alg);
}
-subsys_initcall(polyval_mod_init);
+module_init(polyval_mod_init);
module_exit(polyval_mod_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index c5fe4034b153..9860b60c9be4 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -9,18 +9,14 @@
* Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
+#include <linux/string.h>
#include "ripemd.h"
struct rmd160_ctx {
u64 byte_count;
u32 state[5];
- __le32 buffer[16];
};
#define K1 RMD_K1
@@ -265,72 +261,59 @@ static int rmd160_init(struct shash_desc *desc)
rctx->state[3] = RMD_H3;
rctx->state[4] = RMD_H4;
- memset(rctx->buffer, 0, sizeof(rctx->buffer));
-
return 0;
}
static int rmd160_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ int remain = len - round_down(len, RMD160_BLOCK_SIZE);
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
- const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
-
- rctx->byte_count += len;
+ __le32 buffer[RMD160_BLOCK_SIZE / 4];
- /* Enough space in buffer? If so copy and we're done */
- if (avail > len) {
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, len);
- goto out;
- }
-
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, avail);
+ rctx->byte_count += len - remain;
- rmd160_transform(rctx->state, rctx->buffer);
- data += avail;
- len -= avail;
-
- while (len >= sizeof(rctx->buffer)) {
- memcpy(rctx->buffer, data, sizeof(rctx->buffer));
- rmd160_transform(rctx->state, rctx->buffer);
- data += sizeof(rctx->buffer);
- len -= sizeof(rctx->buffer);
- }
+ do {
+ memcpy(buffer, data, sizeof(buffer));
+ rmd160_transform(rctx->state, buffer);
+ data += sizeof(buffer);
+ len -= sizeof(buffer);
+ } while (len >= sizeof(buffer));
- memcpy(rctx->buffer, data, len);
-
-out:
- return 0;
+ memzero_explicit(buffer, sizeof(buffer));
+ return remain;
}
/* Add padding and return the message digest. */
-static int rmd160_final(struct shash_desc *desc, u8 *out)
+static int rmd160_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
+ unsigned int bit_offset = RMD160_BLOCK_SIZE / 8 - 1;
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
- u32 i, index, padlen;
- __le64 bits;
+ union {
+ __le64 l64[RMD160_BLOCK_SIZE / 4];
+ __le32 l32[RMD160_BLOCK_SIZE / 2];
+ u8 u8[RMD160_BLOCK_SIZE * 2];
+ } block = {};
__le32 *dst = (__le32 *)out;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_le64(rctx->byte_count << 3);
-
- /* Pad out to 56 mod 64 */
- index = rctx->byte_count & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- rmd160_update(desc, padding, padlen);
+ u32 i;
- /* Append length */
- rmd160_update(desc, (const u8 *)&bits, sizeof(bits));
+ rctx->byte_count += len;
+ if (len >= bit_offset * 8)
+ bit_offset += RMD160_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ block.l64[bit_offset] = cpu_to_le64(rctx->byte_count << 3);
+
+ rmd160_transform(rctx->state, block.l32);
+ if (bit_offset > RMD160_BLOCK_SIZE / 8)
+ rmd160_transform(rctx->state,
+ block.l32 + RMD160_BLOCK_SIZE / 4);
+ memzero_explicit(&block, sizeof(block));
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_le32p(&rctx->state[i]);
-
- /* Wipe context */
- memset(rctx, 0, sizeof(*rctx));
-
return 0;
}
@@ -338,11 +321,12 @@ static struct shash_alg alg = {
.digestsize = RMD160_DIGEST_SIZE,
.init = rmd160_init,
.update = rmd160_update,
- .final = rmd160_final,
+ .finup = rmd160_finup,
.descsize = sizeof(struct rmd160_ctx),
.base = {
.cra_name = "rmd160",
.cra_driver_name = "rmd160-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = RMD160_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -358,7 +342,7 @@ static void __exit rmd160_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(rmd160_mod_init);
+module_init(rmd160_mod_init);
module_exit(rmd160_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/rng.c b/crypto/rng.c
index 9d8804e46422..b8ae6ebc091d 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -98,6 +98,7 @@ static const struct crypto_type crypto_rng_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_RNG,
.tfmsize = offsetof(struct crypto_rng, base),
+ .algsize = offsetof(struct rng_alg, base),
};
struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask)
diff --git a/crypto/rsa.c b/crypto/rsa.c
index b7d21529c552..6c7734083c98 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -430,7 +430,7 @@ static void __exit rsa_exit(void)
crypto_unregister_akcipher(&rsa);
}
-subsys_initcall(rsa_init);
+module_init(rsa_init);
module_exit(rsa_exit);
MODULE_ALIAS_CRYPTO("rsa");
MODULE_LICENSE("GPL");
diff --git a/crypto/rsassa-pkcs1.c b/crypto/rsassa-pkcs1.c
index d01ac75635e0..94fa5e9600e7 100644
--- a/crypto/rsassa-pkcs1.c
+++ b/crypto/rsassa-pkcs1.c
@@ -301,7 +301,7 @@ static unsigned int rsassa_pkcs1_key_size(struct crypto_sig *tfm)
{
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
- return ctx->key_size;
+ return ctx->key_size * BITS_PER_BYTE;
}
static int rsassa_pkcs1_set_pub_key(struct crypto_sig *tfm,
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 8225801488d5..1d010e2a1b1a 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -10,10 +10,25 @@
*/
#include <crypto/scatterwalk.h>
+#include <linux/crypto.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+enum {
+ SKCIPHER_WALK_SLOW = 1 << 0,
+ SKCIPHER_WALK_COPY = 1 << 1,
+ SKCIPHER_WALK_DIFF = 1 << 2,
+ SKCIPHER_WALK_SLEEP = 1 << 3,
+};
+
+static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
+{
+ return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes)
{
@@ -89,27 +104,23 @@ EXPORT_SYMBOL_GPL(memcpy_to_sglist);
void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct scatter_walk swalk;
- struct scatter_walk dwalk;
+ struct skcipher_walk walk = {};
if (unlikely(nbytes == 0)) /* in case sg == NULL */
return;
- scatterwalk_start(&swalk, src);
- scatterwalk_start(&dwalk, dst);
+ walk.total = nbytes;
+
+ scatterwalk_start(&walk.in, src);
+ scatterwalk_start(&walk.out, dst);
+ skcipher_walk_first(&walk, true);
do {
- unsigned int slen, dlen;
- unsigned int len;
-
- slen = scatterwalk_next(&swalk, nbytes);
- dlen = scatterwalk_next(&dwalk, nbytes);
- len = min(slen, dlen);
- memcpy(dwalk.addr, swalk.addr, len);
- scatterwalk_done_dst(&dwalk, len);
- scatterwalk_done_src(&swalk, len);
- nbytes -= len;
- } while (nbytes);
+ if (walk.src.virt.addr != walk.dst.virt.addr)
+ memcpy(walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes);
+ skcipher_walk_done(&walk, 0);
+ } while (walk.nbytes);
}
EXPORT_SYMBOL_GPL(memcpy_sglist);
@@ -135,3 +146,236 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
return dst;
}
EXPORT_SYMBOL_GPL(scatterwalk_ffwd);
+
+static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
+{
+ unsigned alignmask = walk->alignmask;
+ unsigned n;
+ void *buffer;
+
+ if (!walk->buffer)
+ walk->buffer = walk->page;
+ buffer = walk->buffer;
+ if (!buffer) {
+ /* Min size for a buffer of bsize bytes aligned to alignmask */
+ n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ buffer = kzalloc(n, skcipher_walk_gfp(walk));
+ if (!buffer)
+ return skcipher_walk_done(walk, -ENOMEM);
+ walk->buffer = buffer;
+ }
+
+ buffer = PTR_ALIGN(buffer, alignmask + 1);
+ memcpy_from_scatterwalk(buffer, &walk->in, bsize);
+ walk->out.__addr = buffer;
+ walk->in.__addr = walk->out.addr;
+
+ walk->nbytes = bsize;
+ walk->flags |= SKCIPHER_WALK_SLOW;
+
+ return 0;
+}
+
+static int skcipher_next_copy(struct skcipher_walk *walk)
+{
+ void *tmp = walk->page;
+
+ scatterwalk_map(&walk->in);
+ memcpy(tmp, walk->in.addr, walk->nbytes);
+ scatterwalk_unmap(&walk->in);
+ /*
+ * walk->in is advanced later when the number of bytes actually
+ * processed (which might be less than walk->nbytes) is known.
+ */
+
+ walk->in.__addr = tmp;
+ walk->out.__addr = tmp;
+ return 0;
+}
+
+static int skcipher_next_fast(struct skcipher_walk *walk)
+{
+ unsigned long diff;
+
+ diff = offset_in_page(walk->in.offset) -
+ offset_in_page(walk->out.offset);
+ diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
+ (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
+
+ scatterwalk_map(&walk->out);
+ walk->in.__addr = walk->out.__addr;
+
+ if (diff) {
+ walk->flags |= SKCIPHER_WALK_DIFF;
+ scatterwalk_map(&walk->in);
+ }
+
+ return 0;
+}
+
+static int skcipher_walk_next(struct skcipher_walk *walk)
+{
+ unsigned int bsize;
+ unsigned int n;
+
+ n = walk->total;
+ bsize = min(walk->stride, max(n, walk->blocksize));
+ n = scatterwalk_clamp(&walk->in, n);
+ n = scatterwalk_clamp(&walk->out, n);
+
+ if (unlikely(n < bsize)) {
+ if (unlikely(walk->total < walk->blocksize))
+ return skcipher_walk_done(walk, -EINVAL);
+
+slow_path:
+ return skcipher_next_slow(walk, bsize);
+ }
+ walk->nbytes = n;
+
+ if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
+ if (!walk->page) {
+ gfp_t gfp = skcipher_walk_gfp(walk);
+
+ walk->page = (void *)__get_free_page(gfp);
+ if (!walk->page)
+ goto slow_path;
+ }
+ walk->flags |= SKCIPHER_WALK_COPY;
+ return skcipher_next_copy(walk);
+ }
+
+ return skcipher_next_fast(walk);
+}
+
+static int skcipher_copy_iv(struct skcipher_walk *walk)
+{
+ unsigned alignmask = walk->alignmask;
+ unsigned ivsize = walk->ivsize;
+ unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
+ unsigned size;
+ u8 *iv;
+
+ /* Min size for a buffer of stride + ivsize, aligned to alignmask */
+ size = aligned_stride + ivsize +
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
+ if (!walk->buffer)
+ return -ENOMEM;
+
+ iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
+
+ walk->iv = memcpy(iv, walk->iv, walk->ivsize);
+ return 0;
+}
+
+int skcipher_walk_first(struct skcipher_walk *walk, bool atomic)
+{
+ if (WARN_ON_ONCE(in_hardirq()))
+ return -EDEADLK;
+
+ walk->flags = atomic ? 0 : SKCIPHER_WALK_SLEEP;
+
+ walk->buffer = NULL;
+ if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+ int err = skcipher_copy_iv(walk);
+ if (err)
+ return err;
+ }
+
+ walk->page = NULL;
+
+ return skcipher_walk_next(walk);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_first);
+
+/**
+ * skcipher_walk_done() - finish one step of a skcipher_walk
+ * @walk: the skcipher_walk
+ * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
+ * or a -errno value to terminate the walk due to an error
+ *
+ * This function cleans up after one step of walking through the source and
+ * destination scatterlists, and advances to the next step if applicable.
+ * walk->nbytes is set to the number of bytes available in the next step,
+ * walk->total is set to the new total number of bytes remaining, and
+ * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
+ * is no more data, or if an error occurred (i.e. -errno return), then
+ * walk->nbytes and walk->total are set to 0 and all resources owned by the
+ * skcipher_walk are freed.
+ *
+ * Return: 0 or a -errno value. If @res was a -errno value then it will be
+ * returned, but other errors may occur too.
+ */
+int skcipher_walk_done(struct skcipher_walk *walk, int res)
+{
+ unsigned int n = walk->nbytes; /* num bytes processed this step */
+ unsigned int total = 0; /* new total remaining */
+
+ if (!n)
+ goto finish;
+
+ if (likely(res >= 0)) {
+ n -= res; /* subtract num bytes *not* processed */
+ total = walk->total - n;
+ }
+
+ if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
+ SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF)))) {
+ scatterwalk_advance(&walk->in, n);
+ } else if (walk->flags & SKCIPHER_WALK_DIFF) {
+ scatterwalk_done_src(&walk->in, n);
+ } else if (walk->flags & SKCIPHER_WALK_COPY) {
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_map(&walk->out);
+ memcpy(walk->out.addr, walk->page, n);
+ } else { /* SKCIPHER_WALK_SLOW */
+ if (res > 0) {
+ /*
+ * Didn't process all bytes. Either the algorithm is
+ * broken, or this was the last step and it turned out
+ * the message wasn't evenly divisible into blocks but
+ * the algorithm requires it.
+ */
+ res = -EINVAL;
+ total = 0;
+ } else
+ memcpy_to_scatterwalk(&walk->out, walk->out.addr, n);
+ goto dst_done;
+ }
+
+ scatterwalk_done_dst(&walk->out, n);
+dst_done:
+
+ if (res > 0)
+ res = 0;
+
+ walk->total = total;
+ walk->nbytes = 0;
+
+ if (total) {
+ if (walk->flags & SKCIPHER_WALK_SLEEP)
+ cond_resched();
+ walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF);
+ return skcipher_walk_next(walk);
+ }
+
+finish:
+ /* Short-circuit for the common/fast path. */
+ if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
+ goto out;
+
+ if (walk->iv != walk->oiv)
+ memcpy(walk->oiv, walk->iv, walk->ivsize);
+ if (walk->buffer != walk->page)
+ kfree(walk->buffer);
+ if (walk->page)
+ free_page((unsigned long)walk->page);
+
+out:
+ return res;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_done);
diff --git a/crypto/scompress.c b/crypto/scompress.c
index d435d4b24469..c651e7f2197a 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -7,9 +7,9 @@
* Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
-#include <crypto/internal/acompress.h>
#include <crypto/internal/scompress.h>
#include <crypto/scatterwalk.h>
+#include <linux/cpumask.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/highmem.h>
@@ -20,20 +20,17 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
#include <net/netlink.h>
#include "compress.h"
-#define SCOMP_SCRATCH_SIZE 65400
-
struct scomp_scratch {
spinlock_t lock;
union {
void *src;
unsigned long saddr;
};
- void *dst;
};
static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
@@ -44,6 +41,10 @@ static const struct crypto_type crypto_scomp_type;
static int scomp_scratch_users;
static DEFINE_MUTEX(scomp_lock);
+static cpumask_t scomp_scratch_want;
+static void scomp_scratch_workfn(struct work_struct *work);
+static DECLARE_WORK(scomp_scratch_work, scomp_scratch_workfn);
+
static int __maybe_unused crypto_scomp_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
@@ -74,78 +75,48 @@ static void crypto_scomp_free_scratches(void)
scratch = per_cpu_ptr(&scomp_scratch, i);
free_page(scratch->saddr);
- vfree(scratch->dst);
scratch->src = NULL;
- scratch->dst = NULL;
}
}
-static int crypto_scomp_alloc_scratches(void)
+static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu)
{
- struct scomp_scratch *scratch;
- int i;
-
- for_each_possible_cpu(i) {
- struct page *page;
- void *mem;
-
- scratch = per_cpu_ptr(&scomp_scratch, i);
+ int node = cpu_to_node(cpu);
+ struct page *page;
- page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, 0);
- if (!page)
- goto error;
- scratch->src = page_address(page);
- mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
- if (!mem)
- goto error;
- scratch->dst = mem;
- }
+ page = alloc_pages_node(node, GFP_KERNEL, 0);
+ if (!page)
+ return -ENOMEM;
+ spin_lock_bh(&scratch->lock);
+ scratch->src = page_address(page);
+ spin_unlock_bh(&scratch->lock);
return 0;
-error:
- crypto_scomp_free_scratches();
- return -ENOMEM;
}
-static void scomp_free_streams(struct scomp_alg *alg)
+static void scomp_scratch_workfn(struct work_struct *work)
{
- struct crypto_acomp_stream __percpu *stream = alg->stream;
- int i;
+ int cpu;
- for_each_possible_cpu(i) {
- struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
+ for_each_cpu(cpu, &scomp_scratch_want) {
+ struct scomp_scratch *scratch;
- if (!ps->ctx)
+ scratch = per_cpu_ptr(&scomp_scratch, cpu);
+ if (scratch->src)
+ continue;
+ if (scomp_alloc_scratch(scratch, cpu))
break;
- alg->free_ctx(ps->ctx);
+ cpumask_clear_cpu(cpu, &scomp_scratch_want);
}
-
- free_percpu(stream);
}
-static int scomp_alloc_streams(struct scomp_alg *alg)
+static int crypto_scomp_alloc_scratches(void)
{
- struct crypto_acomp_stream __percpu *stream;
- int i;
-
- stream = alloc_percpu(struct crypto_acomp_stream);
- if (!stream)
- return -ENOMEM;
-
- for_each_possible_cpu(i) {
- struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
-
- ps->ctx = alg->alloc_ctx();
- if (IS_ERR(ps->ctx)) {
- scomp_free_streams(alg);
- return PTR_ERR(ps->ctx);
- }
-
- spin_lock_init(&ps->lock);
- }
+ unsigned int i = cpumask_first(cpu_possible_mask);
+ struct scomp_scratch *scratch;
- alg->stream = stream;
- return 0;
+ scratch = per_cpu_ptr(&scomp_scratch, i);
+ return scomp_alloc_scratch(scratch, i);
}
static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
@@ -154,16 +125,13 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
int ret = 0;
mutex_lock(&scomp_lock);
- if (!alg->stream) {
- ret = scomp_alloc_streams(alg);
- if (ret)
- goto unlock;
- }
- if (!scomp_scratch_users) {
+ ret = crypto_acomp_alloc_streams(&alg->streams);
+ if (ret)
+ goto unlock;
+ if (!scomp_scratch_users++) {
ret = crypto_scomp_alloc_scratches();
if (ret)
- goto unlock;
- scomp_scratch_users++;
+ scomp_scratch_users--;
}
unlock:
mutex_unlock(&scomp_lock);
@@ -171,13 +139,40 @@ unlock:
return ret;
}
+static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch)
+{
+ int cpu = raw_smp_processor_id();
+ struct scomp_scratch *scratch;
+
+ scratch = per_cpu_ptr(&scomp_scratch, cpu);
+ spin_lock(&scratch->lock);
+ if (likely(scratch->src))
+ return scratch;
+ spin_unlock(&scratch->lock);
+
+ cpumask_set_cpu(cpu, &scomp_scratch_want);
+ schedule_work(&scomp_scratch_work);
+
+ scratch = per_cpu_ptr(&scomp_scratch, cpumask_first(cpu_possible_mask));
+ spin_lock(&scratch->lock);
+ return scratch;
+}
+
+static inline void scomp_unlock_scratch(struct scomp_scratch *scratch)
+ __releases(scratch)
+{
+ spin_unlock(&scratch->lock);
+}
+
static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
{
- struct scomp_scratch *scratch = raw_cpu_ptr(&scomp_scratch);
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm);
+ bool src_isvirt = acomp_request_src_isvirt(req);
+ bool dst_isvirt = acomp_request_dst_isvirt(req);
struct crypto_scomp *scomp = *tfm_ctx;
struct crypto_acomp_stream *stream;
+ struct scomp_scratch *scratch;
unsigned int slen = req->slen;
unsigned int dlen = req->dlen;
struct page *spage, *dpage;
@@ -194,15 +189,32 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
if (!req->dst || !dlen)
return -EINVAL;
- if (acomp_request_src_isvirt(req))
+ if (dst_isvirt)
+ dst = req->dvirt;
+ else {
+ if (dlen <= req->dst->length) {
+ dpage = sg_page(req->dst);
+ doff = req->dst->offset;
+ } else
+ return -ENOSYS;
+
+ dpage = nth_page(dpage, doff / PAGE_SIZE);
+ doff = offset_in_page(doff);
+
+ n = (dlen - 1) / PAGE_SIZE;
+ n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE;
+ if (PageHighMem(dpage + n) &&
+ size_add(doff, dlen) > PAGE_SIZE)
+ return -ENOSYS;
+ dst = kmap_local_page(dpage) + doff;
+ }
+
+ if (src_isvirt)
src = req->svirt;
else {
- src = scratch->src;
+ src = NULL;
do {
- if (acomp_request_src_isfolio(req)) {
- spage = folio_page(req->sfolio, 0);
- soff = req->soff;
- } else if (slen <= req->src->length) {
+ if (slen <= req->src->length) {
spage = sg_page(req->src);
soff = req->src->offset;
} else
@@ -211,8 +223,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
spage = nth_page(spage, soff / PAGE_SIZE);
soff = offset_in_page(soff);
- n = slen / PAGE_SIZE;
- n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE;
+ n = (slen - 1) / PAGE_SIZE;
+ n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE;
if (PageHighMem(nth_page(spage, n)) &&
size_add(soff, slen) > PAGE_SIZE)
break;
@@ -220,59 +232,37 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
} while (0);
}
- if (acomp_request_dst_isvirt(req))
- dst = req->dvirt;
- else {
- unsigned int max = SCOMP_SCRATCH_SIZE;
+ stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams);
- dst = scratch->dst;
- do {
- if (acomp_request_dst_isfolio(req)) {
- dpage = folio_page(req->dfolio, 0);
- doff = req->doff;
- } else if (dlen <= req->dst->length) {
- dpage = sg_page(req->dst);
- doff = req->dst->offset;
- } else
- break;
-
- dpage = nth_page(dpage, doff / PAGE_SIZE);
- doff = offset_in_page(doff);
+ if (!src_isvirt && !src) {
+ const u8 *src;
- n = dlen / PAGE_SIZE;
- n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE;
- if (PageHighMem(dpage + n) &&
- size_add(doff, dlen) > PAGE_SIZE)
- break;
- dst = kmap_local_page(dpage) + doff;
- max = dlen;
- } while (0);
- dlen = min(dlen, max);
- }
-
- spin_lock_bh(&scratch->lock);
-
- if (src == scratch->src)
+ scratch = scomp_lock_scratch();
+ src = scratch->src;
memcpy_from_sglist(scratch->src, req->src, 0, slen);
- stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream);
- spin_lock(&stream->lock);
- if (dir)
+ if (dir)
+ ret = crypto_scomp_compress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
+ else
+ ret = crypto_scomp_decompress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
+
+ scomp_unlock_scratch(scratch);
+ } else if (dir)
ret = crypto_scomp_compress(scomp, src, slen,
dst, &dlen, stream->ctx);
else
ret = crypto_scomp_decompress(scomp, src, slen,
dst, &dlen, stream->ctx);
- if (dst == scratch->dst)
- memcpy_to_sglist(req->dst, 0, dst, dlen);
-
- spin_unlock(&stream->lock);
- spin_unlock_bh(&scratch->lock);
+ crypto_acomp_unlock_stream_bh(stream);
req->dlen = dlen;
- if (!acomp_request_dst_isvirt(req) && dst != scratch->dst) {
+ if (!src_isvirt && src)
+ kunmap_local(src);
+ if (!dst_isvirt) {
kunmap_local(dst);
dlen += doff;
for (;;) {
@@ -283,34 +273,18 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
dpage = nth_page(dpage, 1);
}
}
- if (!acomp_request_src_isvirt(req) && src != scratch->src)
- kunmap_local(src);
return ret;
}
-static int scomp_acomp_chain(struct acomp_req *req, int dir)
-{
- struct acomp_req *r2;
- int err;
-
- err = scomp_acomp_comp_decomp(req, dir);
- req->base.err = err;
-
- list_for_each_entry(r2, &req->base.list, base.list)
- r2->base.err = scomp_acomp_comp_decomp(r2, dir);
-
- return err;
-}
-
static int scomp_acomp_compress(struct acomp_req *req)
{
- return scomp_acomp_chain(req, 1);
+ return scomp_acomp_comp_decomp(req, 1);
}
static int scomp_acomp_decompress(struct acomp_req *req)
{
- return scomp_acomp_chain(req, 0);
+ return scomp_acomp_comp_decomp(req, 0);
}
static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
@@ -319,6 +293,7 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
crypto_free_scomp(*ctx);
+ flush_work(&scomp_scratch_work);
mutex_lock(&scomp_lock);
if (!--scomp_scratch_users)
crypto_scomp_free_scratches();
@@ -352,7 +327,9 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
static void crypto_scomp_destroy(struct crypto_alg *alg)
{
- scomp_free_streams(__crypto_scomp_alg(alg));
+ struct scomp_alg *scomp = __crypto_scomp_alg(alg);
+
+ crypto_acomp_free_streams(&scomp->streams);
}
static const struct crypto_type crypto_scomp_type = {
@@ -369,6 +346,7 @@ static const struct crypto_type crypto_scomp_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SCOMPRESS,
.tfmsize = offsetof(struct crypto_scomp, base),
+ .algsize = offsetof(struct scomp_alg, base),
};
static void scomp_prepare_alg(struct scomp_alg *alg)
@@ -377,7 +355,7 @@ static void scomp_prepare_alg(struct scomp_alg *alg)
comp_prepare_alg(&alg->calg);
- base->cra_flags |= CRYPTO_ALG_REQ_CHAIN;
+ base->cra_flags |= CRYPTO_ALG_REQ_VIRT;
}
int crypto_register_scomp(struct scomp_alg *alg)
diff --git a/crypto/seed.c b/crypto/seed.c
index d05d8ed909fa..815391f213de 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -460,7 +460,7 @@ static void __exit seed_fini(void)
crypto_unregister_alg(&seed_alg);
}
-subsys_initcall(seed_init);
+module_init(seed_init);
module_exit(seed_fini);
MODULE_DESCRIPTION("SEED Cipher Algorithm");
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 17e11d51ddc3..2bae99e33526 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -64,20 +64,9 @@ static int seqiv_aead_encrypt(struct aead_request *req)
data = req->base.data;
info = req->iv;
- if (req->src != req->dst) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
-
- skcipher_request_set_sync_tfm(nreq, ctx->sknull);
- skcipher_request_set_callback(nreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(nreq, req->src, req->dst,
- req->assoclen + req->cryptlen,
- NULL);
-
- err = crypto_skcipher_encrypt(nreq);
- if (err)
- return err;
- }
+ if (req->src != req->dst)
+ memcpy_sglist(req->dst, req->src,
+ req->assoclen + req->cryptlen);
if (unlikely(!IS_ALIGNED((unsigned long)info,
crypto_aead_alignmask(geniv) + 1))) {
@@ -179,7 +168,7 @@ static void __exit seqiv_module_exit(void)
crypto_unregister_template(&seqiv_tmpl);
}
-subsys_initcall(seqiv_module_init);
+module_init(seqiv_module_init);
module_exit(seqiv_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index f6ef187be6fe..b21e7606c652 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -599,7 +599,7 @@ static void __exit serpent_mod_fini(void)
crypto_unregister_alg(&srp_alg);
}
-subsys_initcall(serpent_mod_init);
+module_init(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 325b57fe28dc..024e8043bab0 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -12,13 +12,11 @@
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
-#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = {
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
@@ -39,38 +37,31 @@ static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
memzero_explicit(temp, sizeof(temp));
}
-int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- return sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
+ return sha1_base_do_update_blocks(desc, data, len,
+ sha1_generic_block_fn);
}
-EXPORT_SYMBOL(crypto_sha1_update);
-static int sha1_final(struct shash_desc *desc, u8 *out)
+static int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- sha1_base_do_finalize(desc, sha1_generic_block_fn);
+ sha1_base_do_finup(desc, data, len, sha1_generic_block_fn);
return sha1_base_finish(desc, out);
}
-int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
- return sha1_final(desc, out);
-}
-EXPORT_SYMBOL(crypto_sha1_finup);
-
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = crypto_sha1_update,
- .final = sha1_final,
.finup = crypto_sha1_finup,
- .descsize = sizeof(struct sha1_state),
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -86,7 +77,7 @@ static void __exit sha1_generic_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(sha1_generic_mod_init);
+module_init(sha1_generic_mod_init);
module_exit(sha1_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha256.c b/crypto/sha256.c
new file mode 100644
index 000000000000..4aeb213bab11
--- /dev/null
+++ b/crypto/sha256.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API wrapper for the SHA-256 and SHA-224 library functions
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
+ */
+#include <crypto/internal/hash.h>
+#include <crypto/internal/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
+ 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
+ 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
+ 0x2f
+};
+EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
+
+const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
+
+static int crypto_sha256_init(struct shash_desc *desc)
+{
+ sha256_block_init(shash_desc_ctx(desc));
+ return 0;
+}
+
+static inline int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len, bool force_generic)
+{
+ struct crypto_sha256_state *sctx = shash_desc_ctx(desc);
+ int remain = len % SHA256_BLOCK_SIZE;
+
+ sctx->count += len - remain;
+ sha256_choose_blocks(sctx->state, data, len / SHA256_BLOCK_SIZE,
+ force_generic, !force_generic);
+ return remain;
+}
+
+static int crypto_sha256_update_generic(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return crypto_sha256_update(desc, data, len, true);
+}
+
+static int crypto_sha256_update_lib(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ sha256_update(shash_desc_ctx(desc), data, len);
+ return 0;
+}
+
+static int crypto_sha256_update_arch(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return crypto_sha256_update(desc, data, len, false);
+}
+
+static int crypto_sha256_final_lib(struct shash_desc *desc, u8 *out)
+{
+ sha256_final(shash_desc_ctx(desc), out);
+ return 0;
+}
+
+static __always_inline int crypto_sha256_finup(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len, u8 *out,
+ bool force_generic)
+{
+ struct crypto_sha256_state *sctx = shash_desc_ctx(desc);
+ unsigned int remain = len;
+ u8 *buf;
+
+ if (len >= SHA256_BLOCK_SIZE)
+ remain = crypto_sha256_update(desc, data, len, force_generic);
+ sctx->count += remain;
+ buf = memcpy(sctx + 1, data + len - remain, remain);
+ sha256_finup(sctx, buf, remain, out,
+ crypto_shash_digestsize(desc->tfm), force_generic,
+ !force_generic);
+ return 0;
+}
+
+static int crypto_sha256_finup_generic(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return crypto_sha256_finup(desc, data, len, out, true);
+}
+
+static int crypto_sha256_finup_arch(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return crypto_sha256_finup(desc, data, len, out, false);
+}
+
+static int crypto_sha256_digest_generic(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ crypto_sha256_init(desc);
+ return crypto_sha256_finup_generic(desc, data, len, out);
+}
+
+static int crypto_sha256_digest_lib(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ sha256(data, len, out);
+ return 0;
+}
+
+static int crypto_sha256_digest_arch(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ crypto_sha256_init(desc);
+ return crypto_sha256_finup_arch(desc, data, len, out);
+}
+
+static int crypto_sha224_init(struct shash_desc *desc)
+{
+ sha224_block_init(shash_desc_ctx(desc));
+ return 0;
+}
+
+static int crypto_sha224_final_lib(struct shash_desc *desc, u8 *out)
+{
+ sha224_final(shash_desc_ctx(desc), out);
+ return 0;
+}
+
+static int crypto_sha256_import_lib(struct shash_desc *desc, const void *in)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ const u8 *p = in;
+
+ memcpy(sctx, p, sizeof(*sctx));
+ p += sizeof(*sctx);
+ sctx->count += *p;
+ return 0;
+}
+
+static int crypto_sha256_export_lib(struct shash_desc *desc, void *out)
+{
+ struct sha256_state *sctx0 = shash_desc_ctx(desc);
+ struct sha256_state sctx = *sctx0;
+ unsigned int partial;
+ u8 *p = out;
+
+ partial = sctx.count % SHA256_BLOCK_SIZE;
+ sctx.count -= partial;
+ memcpy(p, &sctx, sizeof(sctx));
+ p += sizeof(sctx);
+ *p = partial;
+ return 0;
+}
+
+static struct shash_alg algs[] = {
+ {
+ .base.cra_name = "sha256",
+ .base.cra_driver_name = "sha256-generic",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
+ .base.cra_blocksize = SHA256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = crypto_sha256_init,
+ .update = crypto_sha256_update_generic,
+ .finup = crypto_sha256_finup_generic,
+ .digest = crypto_sha256_digest_generic,
+ .descsize = sizeof(struct crypto_sha256_state),
+ },
+ {
+ .base.cra_name = "sha224",
+ .base.cra_driver_name = "sha224-generic",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
+ .base.cra_blocksize = SHA224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = crypto_sha224_init,
+ .update = crypto_sha256_update_generic,
+ .finup = crypto_sha256_finup_generic,
+ .descsize = sizeof(struct crypto_sha256_state),
+ },
+ {
+ .base.cra_name = "sha256",
+ .base.cra_driver_name = "sha256-lib",
+ .base.cra_blocksize = SHA256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = crypto_sha256_init,
+ .update = crypto_sha256_update_lib,
+ .final = crypto_sha256_final_lib,
+ .digest = crypto_sha256_digest_lib,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct crypto_sha256_state) +
+ SHA256_BLOCK_SIZE + 1,
+ .import = crypto_sha256_import_lib,
+ .export = crypto_sha256_export_lib,
+ },
+ {
+ .base.cra_name = "sha224",
+ .base.cra_driver_name = "sha224-lib",
+ .base.cra_blocksize = SHA224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = crypto_sha224_init,
+ .update = crypto_sha256_update_lib,
+ .final = crypto_sha224_final_lib,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct crypto_sha256_state) +
+ SHA256_BLOCK_SIZE + 1,
+ .import = crypto_sha256_import_lib,
+ .export = crypto_sha256_export_lib,
+ },
+ {
+ .base.cra_name = "sha256",
+ .base.cra_driver_name = "sha256-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
+ .base.cra_blocksize = SHA256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = crypto_sha256_init,
+ .update = crypto_sha256_update_arch,
+ .finup = crypto_sha256_finup_arch,
+ .digest = crypto_sha256_digest_arch,
+ .descsize = sizeof(struct crypto_sha256_state),
+ },
+ {
+ .base.cra_name = "sha224",
+ .base.cra_driver_name = "sha224-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
+ .base.cra_blocksize = SHA224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = crypto_sha224_init,
+ .update = crypto_sha256_update_arch,
+ .finup = crypto_sha256_finup_arch,
+ .descsize = sizeof(struct crypto_sha256_state),
+ },
+};
+
+static unsigned int num_algs;
+
+static int __init crypto_sha256_mod_init(void)
+{
+ /* register the arch flavours only if they differ from generic */
+ num_algs = ARRAY_SIZE(algs);
+ BUILD_BUG_ON(ARRAY_SIZE(algs) <= 2);
+ if (!sha256_is_arch_optimized())
+ num_algs -= 2;
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+module_init(crypto_sha256_mod_init);
+
+static void __exit crypto_sha256_mod_exit(void)
+{
+ crypto_unregister_shashes(algs, num_algs);
+}
+module_exit(crypto_sha256_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crypto API wrapper for the SHA-256 and SHA-224 library functions");
+
+MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha256-generic");
+MODULE_ALIAS_CRYPTO("sha256-" __stringify(ARCH));
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha224-generic");
+MODULE_ALIAS_CRYPTO("sha224-" __stringify(ARCH));
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
deleted file mode 100644
index b00521f1a6d4..000000000000
--- a/crypto/sha256_generic.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Crypto API wrapper for the generic SHA256 code from lib/crypto/sha256.c
- *
- * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
- * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
- */
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <asm/byteorder.h>
-#include <linux/unaligned.h>
-
-const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
- 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
- 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
- 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
- 0x2f
-};
-EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
-
-const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
- 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
- 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
- 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
- 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
-};
-EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
-
-int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- sha256_update(shash_desc_ctx(desc), data, len);
- return 0;
-}
-EXPORT_SYMBOL(crypto_sha256_update);
-
-static int crypto_sha256_final(struct shash_desc *desc, u8 *out)
-{
- if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE)
- sha224_final(shash_desc_ctx(desc), out);
- else
- sha256_final(shash_desc_ctx(desc), out);
- return 0;
-}
-
-int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash)
-{
- sha256_update(shash_desc_ctx(desc), data, len);
- return crypto_sha256_final(desc, hash);
-}
-EXPORT_SYMBOL(crypto_sha256_finup);
-
-static struct shash_alg sha256_algs[2] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = crypto_sha256_update,
- .final = crypto_sha256_final,
- .finup = crypto_sha256_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name= "sha256-generic",
- .cra_priority = 100,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = crypto_sha256_update,
- .final = crypto_sha256_final,
- .finup = crypto_sha256_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name= "sha224-generic",
- .cra_priority = 100,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init sha256_generic_mod_init(void)
-{
- return crypto_register_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
-}
-
-static void __exit sha256_generic_mod_fini(void)
-{
- crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
-}
-
-subsys_initcall(sha256_generic_mod_init);
-module_exit(sha256_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
-
-MODULE_ALIAS_CRYPTO("sha224");
-MODULE_ALIAS_CRYPTO("sha224-generic");
-MODULE_ALIAS_CRYPTO("sha256");
-MODULE_ALIAS_CRYPTO("sha256-generic");
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index b103642b56ea..41d1e506e6de 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -9,10 +9,10 @@
* Ard Biesheuvel <ard.biesheuvel@linaro.org>
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
#include <crypto/sha3.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include <linux/unaligned.h>
/*
@@ -161,68 +161,51 @@ static void keccakf(u64 st[25])
int crypto_sha3_init(struct shash_desc *desc)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
-
- sctx->rsiz = 200 - 2 * digest_size;
- sctx->rsizw = sctx->rsiz / 8;
- sctx->partial = 0;
memset(sctx->st, 0, sizeof(sctx->st));
return 0;
}
EXPORT_SYMBOL(crypto_sha3_init);
-int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
+ unsigned int rsiz = crypto_shash_blocksize(desc->tfm);
struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int done;
- const u8 *src;
-
- done = 0;
- src = data;
-
- if ((sctx->partial + len) > (sctx->rsiz - 1)) {
- if (sctx->partial) {
- done = -sctx->partial;
- memcpy(sctx->buf + sctx->partial, data,
- done + sctx->rsiz);
- src = sctx->buf;
- }
+ unsigned int rsizw = rsiz / 8;
- do {
- unsigned int i;
+ do {
+ int i;
- for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
- keccakf(sctx->st);
+ for (i = 0; i < rsizw; i++)
+ sctx->st[i] ^= get_unaligned_le64(data + 8 * i);
+ keccakf(sctx->st);
- done += sctx->rsiz;
- src = data + done;
- } while (done + (sctx->rsiz - 1) < len);
-
- sctx->partial = 0;
- }
- memcpy(sctx->buf + sctx->partial, src, len - done);
- sctx->partial += (len - done);
-
- return 0;
+ data += rsiz;
+ len -= rsiz;
+ } while (len >= rsiz);
+ return len;
}
-EXPORT_SYMBOL(crypto_sha3_update);
-int crypto_sha3_final(struct shash_desc *desc, u8 *out)
+static int crypto_sha3_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
- struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int i, inlen = sctx->partial;
unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ unsigned int rsiz = crypto_shash_blocksize(desc->tfm);
+ struct sha3_state *sctx = shash_desc_ctx(desc);
+ __le64 block[SHA3_224_BLOCK_SIZE / 8] = {};
__le64 *digest = (__le64 *)out;
+ unsigned int rsizw = rsiz / 8;
+ u8 *p;
+ int i;
- sctx->buf[inlen++] = 0x06;
- memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
- sctx->buf[sctx->rsiz - 1] |= 0x80;
+ p = memcpy(block, src, len);
+ p[len++] = 0x06;
+ p[rsiz - 1] |= 0x80;
- for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
+ for (i = 0; i < rsizw; i++)
+ sctx->st[i] ^= le64_to_cpu(block[i]);
+ memzero_explicit(block, sizeof(block));
keccakf(sctx->st);
@@ -232,49 +215,51 @@ int crypto_sha3_final(struct shash_desc *desc, u8 *out)
if (digest_size & 4)
put_unaligned_le32(sctx->st[i], (__le32 *)digest);
- memset(sctx, 0, sizeof(*sctx));
return 0;
}
-EXPORT_SYMBOL(crypto_sha3_final);
static struct shash_alg algs[] = { {
.digestsize = SHA3_224_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = crypto_sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-224",
.base.cra_driver_name = "sha3-224-generic",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA3_256_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = crypto_sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-256",
.base.cra_driver_name = "sha3-256-generic",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA3_384_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = crypto_sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-384",
.base.cra_driver_name = "sha3-384-generic",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA3_512_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = crypto_sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-512",
.base.cra_driver_name = "sha3-512-generic",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
@@ -289,7 +274,7 @@ static void __exit sha3_generic_mod_fini(void)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
-subsys_initcall(sha3_generic_mod_init);
+module_init(sha3_generic_mod_init);
module_exit(sha3_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index ed81813bd420..7368173f545e 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -6,16 +6,10 @@
* Copyright (c) 2003 Kyle McMartin <kyle@debian.org>
*/
#include <crypto/internal/hash.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/crypto.h>
-#include <linux/types.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
-#include <linux/percpu.h>
-#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/unaligned.h>
const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = {
@@ -145,47 +139,42 @@ sha512_transform(u64 *state, const u8 *input)
state[4] += e; state[5] += f; state[6] += g; state[7] += h;
}
-static void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
- int blocks)
+void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
+ int blocks)
{
- while (blocks--) {
+ do {
sha512_transform(sst->state, src);
src += SHA512_BLOCK_SIZE;
- }
+ } while (--blocks);
}
+EXPORT_SYMBOL_GPL(sha512_generic_block_fn);
-int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- return sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
+ return sha512_base_do_update_blocks(desc, data, len,
+ sha512_generic_block_fn);
}
-EXPORT_SYMBOL(crypto_sha512_update);
-static int sha512_final(struct shash_desc *desc, u8 *hash)
+static int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash)
{
- sha512_base_do_finalize(desc, sha512_generic_block_fn);
+ sha512_base_do_finup(desc, data, len, sha512_generic_block_fn);
return sha512_base_finish(desc, hash);
}
-int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash)
-{
- sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
- return sha512_final(desc, hash);
-}
-EXPORT_SYMBOL(crypto_sha512_finup);
-
static struct shash_alg sha512_algs[2] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = crypto_sha512_update,
- .final = sha512_final,
.finup = crypto_sha512_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -193,13 +182,14 @@ static struct shash_alg sha512_algs[2] = { {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = crypto_sha512_update,
- .final = sha512_final,
.finup = crypto_sha512_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -215,7 +205,7 @@ static void __exit sha512_generic_mod_fini(void)
crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs));
}
-subsys_initcall(sha512_generic_mod_init);
+module_init(sha512_generic_mod_init);
module_exit(sha512_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/shash.c b/crypto/shash.c
index 301ab42bf849..4721f5f134f4 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -16,6 +16,24 @@
#include "hash.h"
+static inline bool crypto_shash_block_only(struct crypto_shash *tfm)
+{
+ return crypto_shash_alg(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_BLOCK_ONLY;
+}
+
+static inline bool crypto_shash_final_nonzero(struct crypto_shash *tfm)
+{
+ return crypto_shash_alg(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
+}
+
+static inline bool crypto_shash_finup_max(struct crypto_shash *tfm)
+{
+ return crypto_shash_alg(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_FINUP_MAX;
+}
+
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -46,18 +64,27 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
-int crypto_shash_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int __crypto_shash_init(struct shash_desc *desc)
{
- return crypto_shash_alg(desc->tfm)->update(desc, data, len);
+ struct crypto_shash *tfm = desc->tfm;
+
+ if (crypto_shash_block_only(tfm)) {
+ u8 *buf = shash_desc_ctx(desc);
+
+ buf += crypto_shash_descsize(tfm) - 1;
+ *buf = 0;
+ }
+
+ return crypto_shash_alg(tfm)->init(desc);
}
-EXPORT_SYMBOL_GPL(crypto_shash_update);
-int crypto_shash_final(struct shash_desc *desc, u8 *out)
+int crypto_shash_init(struct shash_desc *desc)
{
- return crypto_shash_alg(desc->tfm)->final(desc, out);
+ if (crypto_shash_get_flags(desc->tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+ return __crypto_shash_init(desc);
}
-EXPORT_SYMBOL_GPL(crypto_shash_final);
+EXPORT_SYMBOL_GPL(crypto_shash_init);
static int shash_default_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
@@ -68,20 +95,89 @@ static int shash_default_finup(struct shash_desc *desc, const u8 *data,
shash->final(desc, out);
}
-int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int crypto_shash_op_and_zero(
+ int (*op)(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out),
+ struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
{
- return crypto_shash_alg(desc->tfm)->finup(desc, data, len, out);
+ int err;
+
+ err = op(desc, data, len, out);
+ memset(shash_desc_ctx(desc), 0, crypto_shash_descsize(desc->tfm));
+ return err;
+}
+
+int crypto_shash_finup(struct shash_desc *restrict desc, const u8 *data,
+ unsigned int len, u8 *restrict out)
+{
+ struct crypto_shash *tfm = desc->tfm;
+ u8 *blenp = shash_desc_ctx(desc);
+ bool finup_max, nonzero;
+ unsigned int bs;
+ int err;
+ u8 *buf;
+
+ if (!crypto_shash_block_only(tfm)) {
+ if (out)
+ goto finup;
+ return crypto_shash_alg(tfm)->update(desc, data, len);
+ }
+
+ finup_max = out && crypto_shash_finup_max(tfm);
+
+ /* Retain extra block for final nonzero algorithms. */
+ nonzero = crypto_shash_final_nonzero(tfm);
+
+ /*
+ * The partial block buffer follows the algorithm desc context.
+ * The byte following that contains the length.
+ */
+ blenp += crypto_shash_descsize(tfm) - 1;
+ bs = crypto_shash_blocksize(tfm);
+ buf = blenp - bs;
+
+ if (likely(!*blenp && finup_max))
+ goto finup;
+
+ while ((*blenp + len) >= bs + nonzero) {
+ unsigned int nbytes = len - nonzero;
+ const u8 *src = data;
+
+ if (*blenp) {
+ memcpy(buf + *blenp, data, bs - *blenp);
+ nbytes = bs;
+ src = buf;
+ }
+
+ err = crypto_shash_alg(tfm)->update(desc, src, nbytes);
+ if (err < 0)
+ return err;
+
+ data += nbytes - err - *blenp;
+ len -= nbytes - err - *blenp;
+ *blenp = 0;
+ }
+
+ if (*blenp || !out) {
+ memcpy(buf + *blenp, data, len);
+ *blenp += len;
+ if (!out)
+ return 0;
+ data = buf;
+ len = *blenp;
+ }
+
+finup:
+ return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->finup, desc,
+ data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_finup);
static int shash_default_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- struct shash_alg *shash = crypto_shash_alg(desc->tfm);
-
- return shash->init(desc) ?:
- shash->finup(desc, data, len, out);
+ return __crypto_shash_init(desc) ?:
+ crypto_shash_finup(desc, data, len, out);
}
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
@@ -92,7 +188,8 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
- return crypto_shash_alg(tfm)->digest(desc, data, len, out);
+ return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->digest, desc,
+ data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
@@ -100,44 +197,105 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
unsigned int len, u8 *out)
{
SHASH_DESC_ON_STACK(desc, tfm);
- int err;
desc->tfm = tfm;
+ return crypto_shash_digest(desc, data, len, out);
+}
+EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest);
- err = crypto_shash_digest(desc, data, len, out);
+static int __crypto_shash_export(struct shash_desc *desc, void *out,
+ int (*export)(struct shash_desc *desc,
+ void *out))
+{
+ struct crypto_shash *tfm = desc->tfm;
+ u8 *buf = shash_desc_ctx(desc);
+ unsigned int plen, ss;
+
+ plen = crypto_shash_blocksize(tfm) + 1;
+ ss = crypto_shash_statesize(tfm);
+ if (crypto_shash_block_only(tfm))
+ ss -= plen;
+ if (!export) {
+ memcpy(out, buf, ss);
+ return 0;
+ }
- shash_desc_zero(desc);
+ return export(desc, out);
+}
- return err;
+int crypto_shash_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_shash_export(desc, out,
+ crypto_shash_alg(desc->tfm)->export_core);
}
-EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest);
+EXPORT_SYMBOL_GPL(crypto_shash_export_core);
int crypto_shash_export(struct shash_desc *desc, void *out)
{
struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
- if (shash->export)
- return shash->export(desc, out);
+ if (crypto_shash_block_only(tfm)) {
+ unsigned int plen = crypto_shash_blocksize(tfm) + 1;
+ unsigned int descsize = crypto_shash_descsize(tfm);
+ unsigned int ss = crypto_shash_statesize(tfm);
+ u8 *buf = shash_desc_ctx(desc);
- memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(tfm));
- return 0;
+ memcpy(out + ss - plen, buf + descsize - plen, plen);
+ }
+ return __crypto_shash_export(desc, out, crypto_shash_alg(tfm)->export);
}
EXPORT_SYMBOL_GPL(crypto_shash_export);
-int crypto_shash_import(struct shash_desc *desc, const void *in)
+static int __crypto_shash_import(struct shash_desc *desc, const void *in,
+ int (*import)(struct shash_desc *desc,
+ const void *in))
{
struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
+ unsigned int descsize, plen, ss;
+ u8 *buf = shash_desc_ctx(desc);
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
- if (shash->import)
- return shash->import(desc, in);
+ ss = crypto_shash_statesize(tfm);
+ if (crypto_shash_block_only(tfm)) {
+ plen = crypto_shash_blocksize(tfm) + 1;
+ ss -= plen;
+ descsize = crypto_shash_descsize(tfm);
+ buf[descsize - 1] = 0;
+ }
+ if (!import) {
+ memcpy(buf, in, ss);
+ return 0;
+ }
- memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm));
- return 0;
+ return import(desc, in);
+}
+
+int crypto_shash_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_shash_import(desc, in,
+ crypto_shash_alg(desc->tfm)->import_core);
+}
+EXPORT_SYMBOL_GPL(crypto_shash_import_core);
+
+int crypto_shash_import(struct shash_desc *desc, const void *in)
+{
+ struct crypto_shash *tfm = desc->tfm;
+ int err;
+
+ err = __crypto_shash_import(desc, in, crypto_shash_alg(tfm)->import);
+ if (crypto_shash_block_only(tfm)) {
+ unsigned int plen = crypto_shash_blocksize(tfm) + 1;
+ unsigned int descsize = crypto_shash_descsize(tfm);
+ unsigned int ss = crypto_shash_statesize(tfm);
+ u8 *buf = shash_desc_ctx(desc);
+
+ memcpy(buf + descsize - plen, in + ss - plen, plen);
+ if (buf[descsize - 1] >= plen)
+ err = -EOVERFLOW;
+ }
+ return err;
}
EXPORT_SYMBOL_GPL(crypto_shash_import);
@@ -153,9 +311,6 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
struct shash_alg *alg = crypto_shash_alg(hash);
- int err;
-
- hash->descsize = alg->descsize;
shash_set_needkey(hash, alg);
@@ -165,18 +320,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
if (!alg->init_tfm)
return 0;
- err = alg->init_tfm(hash);
- if (err)
- return err;
-
- /* ->init_tfm() may have increased the descsize. */
- if (WARN_ON_ONCE(hash->descsize > HASH_MAX_DESCSIZE)) {
- if (alg->exit_tfm)
- alg->exit_tfm(hash);
- return -EINVAL;
- }
-
- return 0;
+ return alg->init_tfm(hash);
}
static void crypto_shash_free_instance(struct crypto_instance *inst)
@@ -227,6 +371,7 @@ const struct crypto_type crypto_shash_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SHASH,
.tfmsize = offsetof(struct crypto_shash, base),
+ .algsize = offsetof(struct shash_alg, base),
};
int crypto_grab_shash(struct crypto_shash_spawn *spawn,
@@ -273,8 +418,6 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
if (IS_ERR(nhash))
return nhash;
- nhash->descsize = hash->descsize;
-
if (alg->clone_tfm) {
err = alg->clone_tfm(nhash, hash);
if (err) {
@@ -283,6 +426,9 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
}
}
+ if (alg->exit_tfm)
+ crypto_shash_tfm(nhash)->exit = crypto_shash_exit_tfm;
+
return nhash;
}
EXPORT_SYMBOL_GPL(crypto_clone_shash);
@@ -303,14 +449,21 @@ int hash_prepare_alg(struct hash_alg_common *alg)
return 0;
}
+static int shash_default_export_core(struct shash_desc *desc, void *out)
+{
+ return -ENOSYS;
+}
+
+static int shash_default_import_core(struct shash_desc *desc, const void *in)
+{
+ return -ENOSYS;
+}
+
static int shash_prepare_alg(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
int err;
- if (alg->descsize > HASH_MAX_DESCSIZE)
- return -EINVAL;
-
if ((alg->export && !alg->import) || (alg->import && !alg->export))
return -EINVAL;
@@ -320,6 +473,7 @@ static int shash_prepare_alg(struct shash_alg *alg)
base->cra_type = &crypto_shash_type;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
+ base->cra_flags |= CRYPTO_ALG_REQ_VIRT;
/*
* Handle missing optional functions. For each one we can either
@@ -336,11 +490,30 @@ static int shash_prepare_alg(struct shash_alg *alg)
alg->finup = shash_default_finup;
if (!alg->digest)
alg->digest = shash_default_digest;
- if (!alg->export)
+ if (!alg->export && !alg->halg.statesize)
alg->halg.statesize = alg->descsize;
if (!alg->setkey)
alg->setkey = shash_no_setkey;
+ if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) {
+ BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256);
+ alg->descsize += base->cra_blocksize + 1;
+ alg->statesize += base->cra_blocksize + 1;
+ alg->export_core = alg->export;
+ alg->import_core = alg->import;
+ } else if (!alg->export_core || !alg->import_core) {
+ alg->export_core = shash_default_export_core;
+ alg->import_core = shash_default_import_core;
+ base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+ }
+
+ if (alg->descsize > HASH_MAX_DESCSIZE)
+ return -EINVAL;
+ if (alg->statesize > HASH_MAX_STATESIZE)
+ return -EINVAL;
+
+ base->cra_reqsize = sizeof(struct shash_desc) + alg->descsize;
+
return 0;
}
diff --git a/crypto/sig.c b/crypto/sig.c
index dfc7cae90802..beba745b6405 100644
--- a/crypto/sig.c
+++ b/crypto/sig.c
@@ -74,6 +74,7 @@ static const struct crypto_type crypto_sig_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SIG,
.tfmsize = offsetof(struct crypto_sig, base),
+ .algsize = offsetof(struct sig_alg, base),
};
struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask)
@@ -102,6 +103,11 @@ static int sig_default_set_key(struct crypto_sig *tfm,
return -ENOSYS;
}
+static unsigned int sig_default_size(struct crypto_sig *tfm)
+{
+ return DIV_ROUND_UP_POW2(crypto_sig_keysize(tfm), BITS_PER_BYTE);
+}
+
static int sig_prepare_alg(struct sig_alg *alg)
{
struct crypto_alg *base = &alg->base;
@@ -117,9 +123,9 @@ static int sig_prepare_alg(struct sig_alg *alg)
if (!alg->key_size)
return -EINVAL;
if (!alg->max_size)
- alg->max_size = alg->key_size;
+ alg->max_size = sig_default_size;
if (!alg->digest_size)
- alg->digest_size = alg->key_size;
+ alg->digest_size = sig_default_size;
base->cra_type = &crypto_sig_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 132075a905d9..de5fc91bba26 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -17,7 +17,6 @@
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -28,258 +27,14 @@
#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
-enum {
- SKCIPHER_WALK_SLOW = 1 << 0,
- SKCIPHER_WALK_COPY = 1 << 1,
- SKCIPHER_WALK_DIFF = 1 << 2,
- SKCIPHER_WALK_SLEEP = 1 << 3,
-};
-
static const struct crypto_type crypto_skcipher_type;
-static int skcipher_walk_next(struct skcipher_walk *walk);
-
-static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
-{
- return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
-}
-
static inline struct skcipher_alg *__crypto_skcipher_alg(
struct crypto_alg *alg)
{
return container_of(alg, struct skcipher_alg, base);
}
-/**
- * skcipher_walk_done() - finish one step of a skcipher_walk
- * @walk: the skcipher_walk
- * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
- * or a -errno value to terminate the walk due to an error
- *
- * This function cleans up after one step of walking through the source and
- * destination scatterlists, and advances to the next step if applicable.
- * walk->nbytes is set to the number of bytes available in the next step,
- * walk->total is set to the new total number of bytes remaining, and
- * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
- * is no more data, or if an error occurred (i.e. -errno return), then
- * walk->nbytes and walk->total are set to 0 and all resources owned by the
- * skcipher_walk are freed.
- *
- * Return: 0 or a -errno value. If @res was a -errno value then it will be
- * returned, but other errors may occur too.
- */
-int skcipher_walk_done(struct skcipher_walk *walk, int res)
-{
- unsigned int n = walk->nbytes; /* num bytes processed this step */
- unsigned int total = 0; /* new total remaining */
-
- if (!n)
- goto finish;
-
- if (likely(res >= 0)) {
- n -= res; /* subtract num bytes *not* processed */
- total = walk->total - n;
- }
-
- if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
- SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF)))) {
- scatterwalk_advance(&walk->in, n);
- } else if (walk->flags & SKCIPHER_WALK_DIFF) {
- scatterwalk_done_src(&walk->in, n);
- } else if (walk->flags & SKCIPHER_WALK_COPY) {
- scatterwalk_advance(&walk->in, n);
- scatterwalk_map(&walk->out);
- memcpy(walk->out.addr, walk->page, n);
- } else { /* SKCIPHER_WALK_SLOW */
- if (res > 0) {
- /*
- * Didn't process all bytes. Either the algorithm is
- * broken, or this was the last step and it turned out
- * the message wasn't evenly divisible into blocks but
- * the algorithm requires it.
- */
- res = -EINVAL;
- total = 0;
- } else
- memcpy_to_scatterwalk(&walk->out, walk->out.addr, n);
- goto dst_done;
- }
-
- scatterwalk_done_dst(&walk->out, n);
-dst_done:
-
- if (res > 0)
- res = 0;
-
- walk->total = total;
- walk->nbytes = 0;
-
- if (total) {
- if (walk->flags & SKCIPHER_WALK_SLEEP)
- cond_resched();
- walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF);
- return skcipher_walk_next(walk);
- }
-
-finish:
- /* Short-circuit for the common/fast path. */
- if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
- goto out;
-
- if (walk->iv != walk->oiv)
- memcpy(walk->oiv, walk->iv, walk->ivsize);
- if (walk->buffer != walk->page)
- kfree(walk->buffer);
- if (walk->page)
- free_page((unsigned long)walk->page);
-
-out:
- return res;
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_done);
-
-static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
-{
- unsigned alignmask = walk->alignmask;
- unsigned n;
- void *buffer;
-
- if (!walk->buffer)
- walk->buffer = walk->page;
- buffer = walk->buffer;
- if (!buffer) {
- /* Min size for a buffer of bsize bytes aligned to alignmask */
- n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
-
- buffer = kzalloc(n, skcipher_walk_gfp(walk));
- if (!buffer)
- return skcipher_walk_done(walk, -ENOMEM);
- walk->buffer = buffer;
- }
-
- buffer = PTR_ALIGN(buffer, alignmask + 1);
- memcpy_from_scatterwalk(buffer, &walk->in, bsize);
- walk->out.__addr = buffer;
- walk->in.__addr = walk->out.addr;
-
- walk->nbytes = bsize;
- walk->flags |= SKCIPHER_WALK_SLOW;
-
- return 0;
-}
-
-static int skcipher_next_copy(struct skcipher_walk *walk)
-{
- void *tmp = walk->page;
-
- scatterwalk_map(&walk->in);
- memcpy(tmp, walk->in.addr, walk->nbytes);
- scatterwalk_unmap(&walk->in);
- /*
- * walk->in is advanced later when the number of bytes actually
- * processed (which might be less than walk->nbytes) is known.
- */
-
- walk->in.__addr = tmp;
- walk->out.__addr = tmp;
- return 0;
-}
-
-static int skcipher_next_fast(struct skcipher_walk *walk)
-{
- unsigned long diff;
-
- diff = offset_in_page(walk->in.offset) -
- offset_in_page(walk->out.offset);
- diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
- (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
-
- scatterwalk_map(&walk->out);
- walk->in.__addr = walk->out.__addr;
-
- if (diff) {
- walk->flags |= SKCIPHER_WALK_DIFF;
- scatterwalk_map(&walk->in);
- }
-
- return 0;
-}
-
-static int skcipher_walk_next(struct skcipher_walk *walk)
-{
- unsigned int bsize;
- unsigned int n;
-
- n = walk->total;
- bsize = min(walk->stride, max(n, walk->blocksize));
- n = scatterwalk_clamp(&walk->in, n);
- n = scatterwalk_clamp(&walk->out, n);
-
- if (unlikely(n < bsize)) {
- if (unlikely(walk->total < walk->blocksize))
- return skcipher_walk_done(walk, -EINVAL);
-
-slow_path:
- return skcipher_next_slow(walk, bsize);
- }
- walk->nbytes = n;
-
- if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
- if (!walk->page) {
- gfp_t gfp = skcipher_walk_gfp(walk);
-
- walk->page = (void *)__get_free_page(gfp);
- if (!walk->page)
- goto slow_path;
- }
- walk->flags |= SKCIPHER_WALK_COPY;
- return skcipher_next_copy(walk);
- }
-
- return skcipher_next_fast(walk);
-}
-
-static int skcipher_copy_iv(struct skcipher_walk *walk)
-{
- unsigned alignmask = walk->alignmask;
- unsigned ivsize = walk->ivsize;
- unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
- unsigned size;
- u8 *iv;
-
- /* Min size for a buffer of stride + ivsize, aligned to alignmask */
- size = aligned_stride + ivsize +
- (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
-
- walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
- if (!walk->buffer)
- return -ENOMEM;
-
- iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
-
- walk->iv = memcpy(iv, walk->iv, walk->ivsize);
- return 0;
-}
-
-static int skcipher_walk_first(struct skcipher_walk *walk)
-{
- if (WARN_ON_ONCE(in_hardirq()))
- return -EDEADLK;
-
- walk->buffer = NULL;
- if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
- int err = skcipher_copy_iv(walk);
- if (err)
- return err;
- }
-
- walk->page = NULL;
-
- return skcipher_walk_next(walk);
-}
-
int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
struct skcipher_request *__restrict req, bool atomic)
{
@@ -294,10 +49,8 @@ int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
- if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
- walk->flags = SKCIPHER_WALK_SLEEP;
- else
- walk->flags = 0;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
+ atomic = true;
if (unlikely(!walk->total))
return 0;
@@ -314,7 +67,7 @@ int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
else
walk->stride = alg->walksize;
- return skcipher_walk_first(walk);
+ return skcipher_walk_first(walk, atomic);
}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
@@ -327,10 +80,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
- if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
- walk->flags = SKCIPHER_WALK_SLEEP;
- else
- walk->flags = 0;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
+ atomic = true;
if (unlikely(!walk->total))
return 0;
@@ -343,7 +94,7 @@ static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
walk->ivsize = crypto_aead_ivsize(tfm);
walk->alignmask = crypto_aead_alignmask(tfm);
- return skcipher_walk_first(walk);
+ return skcipher_walk_first(walk, atomic);
}
int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
@@ -620,6 +371,7 @@ static const struct crypto_type crypto_skcipher_type = {
.maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.tfmsize = offsetof(struct crypto_skcipher, base),
+ .algsize = offsetof(struct skcipher_alg, base),
};
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index a2d23a46924e..7529139fcc96 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -9,15 +9,10 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
-#include <linux/bitops.h>
-#include <asm/byteorder.h>
-#include <linux/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = {
0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
@@ -30,38 +25,28 @@ EXPORT_SYMBOL_GPL(sm3_zero_message_hash);
static int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- sm3_update(shash_desc_ctx(desc), data, len);
- return 0;
-}
-
-static int crypto_sm3_final(struct shash_desc *desc, u8 *out)
-{
- sm3_final(shash_desc_ctx(desc), out);
- return 0;
+ return sm3_base_do_update_blocks(desc, data, len, sm3_block_generic);
}
static int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash)
{
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- if (len)
- sm3_update(sctx, data, len);
- sm3_final(sctx, hash);
- return 0;
+ sm3_base_do_finup(desc, data, len, sm3_block_generic);
+ return sm3_base_finish(desc, hash);
}
static struct shash_alg sm3_alg = {
.digestsize = SM3_DIGEST_SIZE,
.init = sm3_base_init,
.update = crypto_sm3_update,
- .final = crypto_sm3_final,
.finup = crypto_sm3_finup,
- .descsize = sizeof(struct sm3_state),
+ .descsize = SM3_STATE_SIZE,
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -77,7 +62,7 @@ static void __exit sm3_generic_mod_fini(void)
crypto_unregister_shash(&sm3_alg);
}
-subsys_initcall(sm3_generic_mod_init);
+module_init(sm3_generic_mod_init);
module_exit(sm3_generic_mod_fini);
MODULE_LICENSE("GPL v2");
diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c
index 7df86369ac00..d57444e8428c 100644
--- a/crypto/sm4_generic.c
+++ b/crypto/sm4_generic.c
@@ -83,7 +83,7 @@ static void __exit sm4_fini(void)
crypto_unregister_alg(&sm4_alg);
}
-subsys_initcall(sm4_init);
+module_init(sm4_init);
module_exit(sm4_fini);
MODULE_DESCRIPTION("SM4 Cipher Algorithm");
diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c
index dc625ffc54ad..57bbf70f4c22 100644
--- a/crypto/streebog_generic.c
+++ b/crypto/streebog_generic.c
@@ -13,9 +13,10 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
#include <crypto/streebog.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
static const struct streebog_uint512 buffer0 = { {
0, 0, 0, 0, 0, 0, 0, 0
@@ -919,17 +920,6 @@ static int streebog_init(struct shash_desc *desc)
return 0;
}
-static void streebog_pad(struct streebog_state *ctx)
-{
- if (ctx->fillsize >= STREEBOG_BLOCK_SIZE)
- return;
-
- memset(ctx->buffer + ctx->fillsize, 0,
- sizeof(ctx->buffer) - ctx->fillsize);
-
- ctx->buffer[ctx->fillsize] = 1;
-}
-
static void streebog_add512(const struct streebog_uint512 *x,
const struct streebog_uint512 *y,
struct streebog_uint512 *r)
@@ -984,16 +974,23 @@ static void streebog_stage2(struct streebog_state *ctx, const u8 *data)
streebog_add512(&ctx->Sigma, &m, &ctx->Sigma);
}
-static void streebog_stage3(struct streebog_state *ctx)
+static void streebog_stage3(struct streebog_state *ctx, const u8 *src,
+ unsigned int len)
{
struct streebog_uint512 buf = { { 0 } };
+ union {
+ u8 buffer[STREEBOG_BLOCK_SIZE];
+ struct streebog_uint512 m;
+ } u = {};
- buf.qword[0] = cpu_to_le64(ctx->fillsize << 3);
- streebog_pad(ctx);
+ buf.qword[0] = cpu_to_le64(len << 3);
+ memcpy(u.buffer, src, len);
+ u.buffer[len] = 1;
- streebog_g(&ctx->h, &ctx->N, &ctx->m);
+ streebog_g(&ctx->h, &ctx->N, &u.m);
streebog_add512(&ctx->N, &buf, &ctx->N);
- streebog_add512(&ctx->Sigma, &ctx->m, &ctx->Sigma);
+ streebog_add512(&ctx->Sigma, &u.m, &ctx->Sigma);
+ memzero_explicit(&u, sizeof(u));
streebog_g(&ctx->h, &buffer0, &ctx->N);
streebog_g(&ctx->h, &buffer0, &ctx->Sigma);
memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512));
@@ -1003,42 +1000,22 @@ static int streebog_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct streebog_state *ctx = shash_desc_ctx(desc);
- size_t chunksize;
- if (ctx->fillsize) {
- chunksize = STREEBOG_BLOCK_SIZE - ctx->fillsize;
- if (chunksize > len)
- chunksize = len;
- memcpy(&ctx->buffer[ctx->fillsize], data, chunksize);
- ctx->fillsize += chunksize;
- len -= chunksize;
- data += chunksize;
-
- if (ctx->fillsize == STREEBOG_BLOCK_SIZE) {
- streebog_stage2(ctx, ctx->buffer);
- ctx->fillsize = 0;
- }
- }
-
- while (len >= STREEBOG_BLOCK_SIZE) {
+ do {
streebog_stage2(ctx, data);
data += STREEBOG_BLOCK_SIZE;
len -= STREEBOG_BLOCK_SIZE;
- }
+ } while (len >= STREEBOG_BLOCK_SIZE);
- if (len) {
- memcpy(&ctx->buffer, data, len);
- ctx->fillsize = len;
- }
- return 0;
+ return len;
}
-static int streebog_final(struct shash_desc *desc, u8 *digest)
+static int streebog_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *digest)
{
struct streebog_state *ctx = shash_desc_ctx(desc);
- streebog_stage3(ctx);
- ctx->fillsize = 0;
+ streebog_stage3(ctx, src, len);
if (crypto_shash_digestsize(desc->tfm) == STREEBOG256_DIGEST_SIZE)
memcpy(digest, &ctx->hash.qword[4], STREEBOG256_DIGEST_SIZE);
else
@@ -1050,11 +1027,12 @@ static struct shash_alg algs[2] = { {
.digestsize = STREEBOG256_DIGEST_SIZE,
.init = streebog_init,
.update = streebog_update,
- .final = streebog_final,
+ .finup = streebog_finup,
.descsize = sizeof(struct streebog_state),
.base = {
.cra_name = "streebog256",
.cra_driver_name = "streebog256-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = STREEBOG_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
@@ -1062,11 +1040,12 @@ static struct shash_alg algs[2] = { {
.digestsize = STREEBOG512_DIGEST_SIZE,
.init = streebog_init,
.update = streebog_update,
- .final = streebog_final,
+ .finup = streebog_finup,
.descsize = sizeof(struct streebog_state),
.base = {
.cra_name = "streebog512",
.cra_driver_name = "streebog512-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = STREEBOG_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1082,7 +1061,7 @@ static void __exit streebog_mod_fini(void)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
-subsys_initcall(streebog_mod_init);
+module_init(streebog_mod_init);
module_exit(streebog_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 96f4a66be14c..d1d88debbd71 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Quick & dirty crypto testing module.
+ * Quick & dirty crypto benchmarking module.
*
- * This will only exist until we have a better testing mechanism
+ * This will only exist until we have a better benchmarking mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
@@ -39,7 +39,7 @@
#include "tcrypt.h"
/*
- * Need slab memory for testing (size in number of pages).
+ * Need slab memory for benchmarking (size in number of pages).
*/
#define TVMEMSIZE 4
@@ -716,207 +716,6 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
return crypto_wait_req(ret, wait);
}
-struct test_mb_ahash_data {
- struct scatterlist sg[XBUFSIZE];
- char result[64];
- struct ahash_request *req;
- struct crypto_wait wait;
- char *xbuf[XBUFSIZE];
-};
-
-static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb,
- int *rc)
-{
- int i, err;
-
- /* Fire up a bunch of concurrent requests */
- err = crypto_ahash_digest(data[0].req);
-
- /* Wait for all requests to finish */
- err = crypto_wait_req(err, &data[0].wait);
- if (num_mb < 2)
- return err;
-
- for (i = 0; i < num_mb; i++) {
- rc[i] = ahash_request_err(data[i].req);
- if (rc[i]) {
- pr_info("concurrent request %d error %d\n", i, rc[i]);
- err = rc[i];
- }
- }
-
- return err;
-}
-
-static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
- int secs, u32 num_mb)
-{
- unsigned long start, end;
- int bcount;
- int ret = 0;
- int *rc;
-
- rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
- if (!rc)
- return -ENOMEM;
-
- for (start = jiffies, end = start + secs * HZ, bcount = 0;
- time_before(jiffies, end); bcount++) {
- ret = do_mult_ahash_op(data, num_mb, rc);
- if (ret)
- goto out;
- }
-
- pr_cont("%d operations in %d seconds (%llu bytes)\n",
- bcount * num_mb, secs, (u64)bcount * blen * num_mb);
-
-out:
- kfree(rc);
- return ret;
-}
-
-static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
- u32 num_mb)
-{
- unsigned long cycles = 0;
- int ret = 0;
- int i;
- int *rc;
-
- rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
- if (!rc)
- return -ENOMEM;
-
- /* Warm-up run. */
- for (i = 0; i < 4; i++) {
- ret = do_mult_ahash_op(data, num_mb, rc);
- if (ret)
- goto out;
- }
-
- /* The real thing. */
- for (i = 0; i < 8; i++) {
- cycles_t start, end;
-
- start = get_cycles();
- ret = do_mult_ahash_op(data, num_mb, rc);
- end = get_cycles();
-
- if (ret)
- goto out;
-
- cycles += end - start;
- }
-
- pr_cont("1 operation in %lu cycles (%d bytes)\n",
- (cycles + 4) / (8 * num_mb), blen);
-
-out:
- kfree(rc);
- return ret;
-}
-
-static void test_mb_ahash_speed(const char *algo, unsigned int secs,
- struct hash_speed *speed, u32 num_mb)
-{
- struct test_mb_ahash_data *data;
- struct crypto_ahash *tfm;
- unsigned int i, j, k;
- int ret;
-
- data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
- if (!data)
- return;
-
- tfm = crypto_alloc_ahash(algo, 0, 0);
- if (IS_ERR(tfm)) {
- pr_err("failed to load transform for %s: %ld\n",
- algo, PTR_ERR(tfm));
- goto free_data;
- }
-
- for (i = 0; i < num_mb; ++i) {
- if (testmgr_alloc_buf(data[i].xbuf))
- goto out;
-
- crypto_init_wait(&data[i].wait);
-
- data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!data[i].req) {
- pr_err("alg: hash: Failed to allocate request for %s\n",
- algo);
- goto out;
- }
-
-
- if (i) {
- ahash_request_set_callback(data[i].req, 0, NULL, NULL);
- ahash_request_chain(data[i].req, data[0].req);
- } else
- ahash_request_set_callback(data[0].req, 0,
- crypto_req_done,
- &data[0].wait);
-
- sg_init_table(data[i].sg, XBUFSIZE);
- for (j = 0; j < XBUFSIZE; j++) {
- sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE);
- memset(data[i].xbuf[j], 0xff, PAGE_SIZE);
- }
- }
-
- pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
- get_driver_name(crypto_ahash, tfm));
-
- for (i = 0; speed[i].blen != 0; i++) {
- /* For some reason this only tests digests. */
- if (speed[i].blen != speed[i].plen)
- continue;
-
- if (speed[i].blen > XBUFSIZE * PAGE_SIZE) {
- pr_err("template (%u) too big for tvmem (%lu)\n",
- speed[i].blen, XBUFSIZE * PAGE_SIZE);
- goto out;
- }
-
- if (klen)
- crypto_ahash_setkey(tfm, tvmem[0], klen);
-
- for (k = 0; k < num_mb; k++)
- ahash_request_set_crypt(data[k].req, data[k].sg,
- data[k].result, speed[i].blen);
-
- pr_info("test%3u "
- "(%5u byte blocks,%5u bytes per update,%4u updates): ",
- i, speed[i].blen, speed[i].plen,
- speed[i].blen / speed[i].plen);
-
- if (secs) {
- ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
- num_mb);
- cond_resched();
- } else {
- ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
- }
-
-
- if (ret) {
- pr_err("At least one hashing failed ret=%d\n", ret);
- break;
- }
- }
-
-out:
- ahash_request_free(data[0].req);
-
- for (k = 0; k < num_mb; ++k)
- testmgr_free_buf(data[k].xbuf);
-
- crypto_free_ahash(tfm);
-
-free_data:
- kfree(data);
-}
-
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
char *out, int secs)
{
@@ -2584,36 +2383,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_ahash_speed("sm3", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
- case 450:
- test_mb_ahash_speed("sha1", sec, generic_hash_speed_template,
- num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 451:
- test_mb_ahash_speed("sha256", sec, generic_hash_speed_template,
- num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 452:
- test_mb_ahash_speed("sha512", sec, generic_hash_speed_template,
- num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 453:
- test_mb_ahash_speed("sm3", sec, generic_hash_speed_template,
- num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 454:
- test_mb_ahash_speed("streebog256", sec,
- generic_hash_speed_template, num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 455:
- test_mb_ahash_speed("streebog512", sec,
- generic_hash_speed_template, num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
case 499:
break;
@@ -3099,5 +2868,5 @@ module_param(klen, uint, 0);
MODULE_PARM_DESC(klen, "Key length (defaults to 0)");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Quick & dirty crypto testing module");
+MODULE_DESCRIPTION("Quick & dirty crypto benchmarking module");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 96c843a24607..7f938ac93e58 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Quick & dirty crypto testing module.
+ * Quick & dirty crypto benchmarking module.
*
- * This will only exist until we have a better testing mechanism
+ * This will only exist until we have a better benchmarking mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
diff --git a/crypto/tea.c b/crypto/tea.c
index b315da8c89eb..cb05140e3470 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -255,7 +255,7 @@ MODULE_ALIAS_CRYPTO("tea");
MODULE_ALIAS_CRYPTO("xtea");
MODULE_ALIAS_CRYPTO("xeta");
-subsys_initcall(tea_mod_init);
+module_init(tea_mod_init);
module_exit(tea_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index abd609d4c8ef..72005074a5c2 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -43,25 +43,17 @@ MODULE_IMPORT_NS("CRYPTO_INTERNAL");
static bool notests;
module_param(notests, bool, 0644);
-MODULE_PARM_DESC(notests, "disable crypto self-tests");
+MODULE_PARM_DESC(notests, "disable all crypto self-tests");
-static bool panic_on_fail;
-module_param(panic_on_fail, bool, 0444);
-
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
-static bool noextratests;
-module_param(noextratests, bool, 0644);
-MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests");
+static bool noslowtests;
+module_param(noslowtests, bool, 0644);
+MODULE_PARM_DESC(noslowtests, "disable slow crypto self-tests");
static unsigned int fuzz_iterations = 100;
module_param(fuzz_iterations, uint, 0644);
MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
-#endif
-
-/* Multibuffer is unlimited. Set arbitrary limit for testing. */
-#define MAX_MB_MSGS 16
-#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+#ifndef CONFIG_CRYPTO_SELFTESTS
/* a perfect nop */
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
@@ -327,10 +319,9 @@ struct testvec_config {
/*
* The following are the lists of testvec_configs to test for each algorithm
- * type when the basic crypto self-tests are enabled, i.e. when
- * CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is unset. They aim to provide good test
- * coverage, while keeping the test time much shorter than the full fuzz tests
- * so that the basic tests can be enabled in a wider range of circumstances.
+ * type when the fast crypto self-tests are enabled. They aim to provide good
+ * test coverage, while keeping the test time much shorter than the full tests
+ * so that the fast tests can be used to fulfill FIPS 140 testing requirements.
*/
/* Configs for skciphers and aeads */
@@ -879,8 +870,6 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
err; \
})
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
-
/*
* The fuzz tests use prandom instead of the normal Linux RNG since they don't
* need cryptographically secure random numbers. This greatly improves the
@@ -1245,15 +1234,6 @@ too_long:
algname);
return -ENAMETOOLONG;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static void crypto_disable_simd_for_test(void)
-{
-}
-
-static void crypto_reenable_simd_for_test(void)
-{
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int build_hash_sglist(struct test_sglist *tsgl,
const struct hash_testvec *vec,
@@ -1694,8 +1674,7 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
return err;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- if (!noextratests) {
+ if (!noslowtests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
@@ -1712,17 +1691,15 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
cond_resched();
}
}
-#endif
return 0;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
/*
* Generate a hash test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
*/
static void generate_random_hash_testvec(struct rnd_state *rng,
- struct shash_desc *desc,
+ struct ahash_request *req,
struct hash_testvec *vec,
unsigned int maxkeysize,
unsigned int maxdatasize,
@@ -1744,16 +1721,17 @@ static void generate_random_hash_testvec(struct rnd_state *rng,
vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize);
generate_random_bytes(rng, (u8 *)vec->key, vec->ksize);
- vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
- vec->ksize);
+ vec->setkey_error = crypto_ahash_setkey(
+ crypto_ahash_reqtfm(req), vec->key, vec->ksize);
/* If the key couldn't be set, no need to continue to digest. */
if (vec->setkey_error)
goto done;
}
/* Digest */
- vec->digest_error = crypto_shash_digest(desc, vec->plaintext,
- vec->psize, (u8 *)vec->digest);
+ vec->digest_error = crypto_hash_digest(
+ crypto_ahash_reqtfm(req), vec->plaintext,
+ vec->psize, (u8 *)vec->digest);
done:
snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"",
vec->psize, vec->ksize);
@@ -1778,8 +1756,8 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
const char *driver = crypto_ahash_driver_name(tfm);
struct rnd_state rng;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
- struct crypto_shash *generic_tfm = NULL;
- struct shash_desc *generic_desc = NULL;
+ struct ahash_request *generic_req = NULL;
+ struct crypto_ahash *generic_tfm = NULL;
unsigned int i;
struct hash_testvec vec = { 0 };
char vec_name[64];
@@ -1787,7 +1765,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
char cfgname[TESTVEC_CONFIG_NAMELEN];
int err;
- if (noextratests)
+ if (noslowtests)
return 0;
init_rnd_state(&rng);
@@ -1802,7 +1780,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
return 0;
- generic_tfm = crypto_alloc_shash(generic_driver, 0, 0);
+ generic_tfm = crypto_alloc_ahash(generic_driver, 0, 0);
if (IS_ERR(generic_tfm)) {
err = PTR_ERR(generic_tfm);
if (err == -ENOENT) {
@@ -1821,27 +1799,25 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
goto out;
}
- generic_desc = kzalloc(sizeof(*desc) +
- crypto_shash_descsize(generic_tfm), GFP_KERNEL);
- if (!generic_desc) {
+ generic_req = ahash_request_alloc(generic_tfm, GFP_KERNEL);
+ if (!generic_req) {
err = -ENOMEM;
goto out;
}
- generic_desc->tfm = generic_tfm;
/* Check the algorithm properties for consistency. */
- if (digestsize != crypto_shash_digestsize(generic_tfm)) {
+ if (digestsize != crypto_ahash_digestsize(generic_tfm)) {
pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n",
driver, digestsize,
- crypto_shash_digestsize(generic_tfm));
+ crypto_ahash_digestsize(generic_tfm));
err = -EINVAL;
goto out;
}
- if (blocksize != crypto_shash_blocksize(generic_tfm)) {
+ if (blocksize != crypto_ahash_blocksize(generic_tfm)) {
pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n",
- driver, blocksize, crypto_shash_blocksize(generic_tfm));
+ driver, blocksize, crypto_ahash_blocksize(generic_tfm));
err = -EINVAL;
goto out;
}
@@ -1860,7 +1836,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
}
for (i = 0; i < fuzz_iterations * 8; i++) {
- generate_random_hash_testvec(&rng, generic_desc, &vec,
+ generate_random_hash_testvec(&rng, generic_req, &vec,
maxkeysize, maxdatasize,
vec_name, sizeof(vec_name));
generate_random_testvec_config(&rng, cfg, cfgname,
@@ -1878,21 +1854,10 @@ out:
kfree(vec.key);
kfree(vec.plaintext);
kfree(vec.digest);
- crypto_free_shash(generic_tfm);
- kfree_sensitive(generic_desc);
+ ahash_request_free(generic_req);
+ crypto_free_ahash(generic_tfm);
return err;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static int test_hash_vs_generic_impl(const char *generic_driver,
- unsigned int maxkeysize,
- struct ahash_request *req,
- struct shash_desc *desc,
- struct test_sglist *tsgl,
- u8 *hashstate)
-{
- return 0;
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int alloc_shash(const char *driver, u32 type, u32 mask,
struct crypto_shash **tfm_ret,
@@ -1903,7 +1868,7 @@ static int alloc_shash(const char *driver, u32 type, u32 mask,
tfm = crypto_alloc_shash(driver, type, mask);
if (IS_ERR(tfm)) {
- if (PTR_ERR(tfm) == -ENOENT) {
+ if (PTR_ERR(tfm) == -ENOENT || PTR_ERR(tfm) == -EEXIST) {
/*
* This algorithm is only available through the ahash
* API, not the shash API, so skip the shash tests.
@@ -2266,8 +2231,7 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
return err;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- if (!noextratests) {
+ if (!noslowtests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
@@ -2284,13 +2248,10 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
cond_resched();
}
}
-#endif
return 0;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
-
-struct aead_extra_tests_ctx {
+struct aead_slow_tests_ctx {
struct rnd_state rng;
struct aead_request *req;
struct crypto_aead *tfm;
@@ -2465,8 +2426,7 @@ static void generate_random_aead_testvec(struct rnd_state *rng,
vec->alen, vec->plen, authsize, vec->klen, vec->novrfy);
}
-static void try_to_generate_inauthentic_testvec(
- struct aead_extra_tests_ctx *ctx)
+static void try_to_generate_inauthentic_testvec(struct aead_slow_tests_ctx *ctx)
{
int i;
@@ -2485,7 +2445,7 @@ static void try_to_generate_inauthentic_testvec(
* Generate inauthentic test vectors (i.e. ciphertext, AAD pairs that aren't the
* result of an encryption with the key) and verify that decryption fails.
*/
-static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
+static int test_aead_inauthentic_inputs(struct aead_slow_tests_ctx *ctx)
{
unsigned int i;
int err;
@@ -2520,7 +2480,7 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
* Test the AEAD algorithm against the corresponding generic implementation, if
* one is available.
*/
-static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
+static int test_aead_vs_generic_impl(struct aead_slow_tests_ctx *ctx)
{
struct crypto_aead *tfm = ctx->tfm;
const char *algname = crypto_aead_alg(tfm)->base.cra_name;
@@ -2624,15 +2584,15 @@ out:
return err;
}
-static int test_aead_extra(const struct alg_test_desc *test_desc,
- struct aead_request *req,
- struct cipher_test_sglists *tsgls)
+static int test_aead_slow(const struct alg_test_desc *test_desc,
+ struct aead_request *req,
+ struct cipher_test_sglists *tsgls)
{
- struct aead_extra_tests_ctx *ctx;
+ struct aead_slow_tests_ctx *ctx;
unsigned int i;
int err;
- if (noextratests)
+ if (noslowtests)
return 0;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -2674,14 +2634,6 @@ out:
kfree(ctx);
return err;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static int test_aead_extra(const struct alg_test_desc *test_desc,
- struct aead_request *req,
- struct cipher_test_sglists *tsgls)
-{
- return 0;
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_aead(int enc, const struct aead_test_suite *suite,
struct aead_request *req,
@@ -2747,7 +2699,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
if (err)
goto out;
- err = test_aead_extra(desc, req, tsgls);
+ err = test_aead_slow(desc, req, tsgls);
out:
free_cipher_test_sglists(tsgls);
aead_request_free(req);
@@ -3021,8 +2973,7 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
return err;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- if (!noextratests) {
+ if (!noslowtests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
@@ -3039,11 +2990,9 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
cond_resched();
}
}
-#endif
return 0;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
/*
* Generate a symmetric cipher test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
@@ -3126,7 +3075,7 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
char cfgname[TESTVEC_CONFIG_NAMELEN];
int err;
- if (noextratests)
+ if (noslowtests)
return 0;
init_rnd_state(&rng);
@@ -3242,14 +3191,6 @@ out:
skcipher_request_free(generic_req);
return err;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static int test_skcipher_vs_generic_impl(const char *generic_driver,
- struct skcipher_request *req,
- struct cipher_test_sglists *tsgls)
-{
- return 0;
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_skcipher(int enc, const struct cipher_test_suite *suite,
struct skcipher_request *req,
@@ -3329,48 +3270,27 @@ static int test_acomp(struct crypto_acomp *tfm,
int ctcount, int dtcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
- struct scatterlist *src = NULL, *dst = NULL;
- struct acomp_req *reqs[MAX_MB_MSGS] = {};
- char *decomp_out[MAX_MB_MSGS] = {};
- char *output[MAX_MB_MSGS] = {};
- struct crypto_wait wait;
- struct acomp_req *req;
- int ret = -ENOMEM;
unsigned int i;
+ char *output, *decomp_out;
+ int ret;
+ struct scatterlist src, dst;
+ struct acomp_req *req;
+ struct crypto_wait wait;
- src = kmalloc_array(MAX_MB_MSGS, sizeof(*src), GFP_KERNEL);
- if (!src)
- goto out;
- dst = kmalloc_array(MAX_MB_MSGS, sizeof(*dst), GFP_KERNEL);
- if (!dst)
- goto out;
-
- for (i = 0; i < MAX_MB_MSGS; i++) {
- reqs[i] = acomp_request_alloc(tfm);
- if (!reqs[i])
- goto out;
-
- acomp_request_set_callback(reqs[i],
- CRYPTO_TFM_REQ_MAY_SLEEP |
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &wait);
- if (i)
- acomp_request_chain(reqs[i], reqs[0]);
-
- output[i] = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!output[i])
- goto out;
+ output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
+ if (!output)
+ return -ENOMEM;
- decomp_out[i] = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!decomp_out[i])
- goto out;
+ decomp_out = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
+ if (!decomp_out) {
+ kfree(output);
+ return -ENOMEM;
}
for (i = 0; i < ctcount; i++) {
unsigned int dlen = COMP_BUF_SIZE;
int ilen = ctemplate[i].inlen;
void *input_vec;
- int j;
input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
if (!input_vec) {
@@ -3378,61 +3298,70 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
+ memset(output, 0, dlen);
crypto_init_wait(&wait);
- sg_init_one(src, input_vec, ilen);
+ sg_init_one(&src, input_vec, ilen);
+ sg_init_one(&dst, output, dlen);
- for (j = 0; j < MAX_MB_MSGS; j++) {
- sg_init_one(dst + j, output[j], dlen);
- acomp_request_set_params(reqs[j], src, dst + j, ilen, dlen);
+ req = acomp_request_alloc(tfm);
+ if (!req) {
+ pr_err("alg: acomp: request alloc failed for %s\n",
+ algo);
+ kfree(input_vec);
+ ret = -ENOMEM;
+ goto out;
}
- req = reqs[0];
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
+ acomp_request_free(req);
goto out;
}
ilen = req->dlen;
dlen = COMP_BUF_SIZE;
+ sg_init_one(&src, output, ilen);
+ sg_init_one(&dst, decomp_out, dlen);
crypto_init_wait(&wait);
- for (j = 0; j < MAX_MB_MSGS; j++) {
- sg_init_one(src + j, output[j], ilen);
- sg_init_one(dst + j, decomp_out[j], dlen);
- acomp_request_set_params(reqs[j], src + j, dst + j, ilen, dlen);
- }
-
- crypto_wait_req(crypto_acomp_decompress(req), &wait);
- for (j = 0; j < MAX_MB_MSGS; j++) {
- ret = reqs[j]->base.err;
- if (ret) {
- pr_err("alg: acomp: compression failed on test %d (%d) for %s: ret=%d\n",
- i + 1, j, algo, -ret);
- kfree(input_vec);
- goto out;
- }
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
- if (reqs[j]->dlen != ctemplate[i].inlen) {
- pr_err("alg: acomp: Compression test %d (%d) failed for %s: output len = %d\n",
- i + 1, j, algo, reqs[j]->dlen);
- ret = -EINVAL;
- kfree(input_vec);
- goto out;
- }
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
+ if (ret) {
+ pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
+ i + 1, algo, -ret);
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
- if (memcmp(input_vec, decomp_out[j], reqs[j]->dlen)) {
- pr_err("alg: acomp: Compression test %d (%d) failed for %s\n",
- i + 1, j, algo);
- hexdump(output[j], reqs[j]->dlen);
- ret = -EINVAL;
- kfree(input_vec);
- goto out;
- }
+ if (req->dlen != ctemplate[i].inlen) {
+ pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
+ i + 1, algo, req->dlen);
+ ret = -EINVAL;
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (memcmp(input_vec, decomp_out, req->dlen)) {
+ pr_err("alg: acomp: Compression test %d failed for %s\n",
+ i + 1, algo);
+ hexdump(output, req->dlen);
+ ret = -EINVAL;
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
}
kfree(input_vec);
+ acomp_request_free(req);
}
for (i = 0; i < dtcount; i++) {
@@ -3446,9 +3375,10 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
+ memset(output, 0, dlen);
crypto_init_wait(&wait);
- sg_init_one(src, input_vec, ilen);
- sg_init_one(dst, output[0], dlen);
+ sg_init_one(&src, input_vec, ilen);
+ sg_init_one(&dst, output, dlen);
req = acomp_request_alloc(tfm);
if (!req) {
@@ -3459,7 +3389,7 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
- acomp_request_set_params(req, src, dst, ilen, dlen);
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
@@ -3481,10 +3411,10 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
- if (memcmp(output[0], dtemplate[i].output, req->dlen)) {
+ if (memcmp(output, dtemplate[i].output, req->dlen)) {
pr_err("alg: acomp: Decompression test %d failed for %s\n",
i + 1, algo);
- hexdump(output[0], req->dlen);
+ hexdump(output, req->dlen);
ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req);
@@ -3498,13 +3428,8 @@ static int test_acomp(struct crypto_acomp *tfm,
ret = 0;
out:
- acomp_request_free(reqs[0]);
- for (i = 0; i < MAX_MB_MSGS; i++) {
- kfree(output[i]);
- kfree(decomp_out[i]);
- }
- kfree(dst);
- kfree(src);
+ kfree(decomp_out);
+ kfree(output);
return ret;
}
@@ -5426,12 +5351,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
- .alg = "poly1305",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(poly1305_tv_template)
- }
- }, {
.alg = "polyval",
.test = alg_test_hash,
.suite = {
@@ -5788,9 +5707,8 @@ static void testmgr_onetime_init(void)
alg_check_test_descs_order();
alg_check_testvec_configs();
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- pr_warn("alg: extra crypto tests enabled. This is intended for developer use only.\n");
-#endif
+ if (!noslowtests)
+ pr_warn("alg: full crypto tests enabled. This is intended for developer use only.\n");
}
static int alg_find_test(const char *alg)
@@ -5879,11 +5797,10 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
test_done:
if (rc) {
- if (fips_enabled || panic_on_fail) {
+ if (fips_enabled) {
fips_fail_notify();
- panic("alg: self-tests for %s (%s) failed in %s mode!\n",
- driver, alg,
- fips_enabled ? "fips" : "panic_on_fail");
+ panic("alg: self-tests for %s (%s) failed in fips mode!\n",
+ driver, alg);
}
pr_warn("alg: self-tests for %s using %s failed (rc=%d)",
alg, driver, rc);
@@ -5928,6 +5845,6 @@ non_fips_alg:
return alg_fips_disabled(driver, alg);
}
-#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
+#endif /* CONFIG_CRYPTO_SELFTESTS */
EXPORT_SYMBOL_GPL(alg_test);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index afc10af59b0a..32d099ac9e73 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -8836,294 +8836,6 @@ static const struct hash_testvec hmac_sha3_512_tv_template[] = {
},
};
-/*
- * Poly1305 test vectors from RFC7539 A.3.
- */
-
-static const struct hash_testvec poly1305_tv_template[] = {
- { /* Test Vector #1 */
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 96,
- .digest = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #2 */
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70"
- "\xf0\xef\xca\x96\x22\x7a\x86\x3e"
- "\x41\x6e\x79\x20\x73\x75\x62\x6d"
- "\x69\x73\x73\x69\x6f\x6e\x20\x74"
- "\x6f\x20\x74\x68\x65\x20\x49\x45"
- "\x54\x46\x20\x69\x6e\x74\x65\x6e"
- "\x64\x65\x64\x20\x62\x79\x20\x74"
- "\x68\x65\x20\x43\x6f\x6e\x74\x72"
- "\x69\x62\x75\x74\x6f\x72\x20\x66"
- "\x6f\x72\x20\x70\x75\x62\x6c\x69"
- "\x63\x61\x74\x69\x6f\x6e\x20\x61"
- "\x73\x20\x61\x6c\x6c\x20\x6f\x72"
- "\x20\x70\x61\x72\x74\x20\x6f\x66"
- "\x20\x61\x6e\x20\x49\x45\x54\x46"
- "\x20\x49\x6e\x74\x65\x72\x6e\x65"
- "\x74\x2d\x44\x72\x61\x66\x74\x20"
- "\x6f\x72\x20\x52\x46\x43\x20\x61"
- "\x6e\x64\x20\x61\x6e\x79\x20\x73"
- "\x74\x61\x74\x65\x6d\x65\x6e\x74"
- "\x20\x6d\x61\x64\x65\x20\x77\x69"
- "\x74\x68\x69\x6e\x20\x74\x68\x65"
- "\x20\x63\x6f\x6e\x74\x65\x78\x74"
- "\x20\x6f\x66\x20\x61\x6e\x20\x49"
- "\x45\x54\x46\x20\x61\x63\x74\x69"
- "\x76\x69\x74\x79\x20\x69\x73\x20"
- "\x63\x6f\x6e\x73\x69\x64\x65\x72"
- "\x65\x64\x20\x61\x6e\x20\x22\x49"
- "\x45\x54\x46\x20\x43\x6f\x6e\x74"
- "\x72\x69\x62\x75\x74\x69\x6f\x6e"
- "\x22\x2e\x20\x53\x75\x63\x68\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x63\x6c\x75"
- "\x64\x65\x20\x6f\x72\x61\x6c\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x20\x49\x45"
- "\x54\x46\x20\x73\x65\x73\x73\x69"
- "\x6f\x6e\x73\x2c\x20\x61\x73\x20"
- "\x77\x65\x6c\x6c\x20\x61\x73\x20"
- "\x77\x72\x69\x74\x74\x65\x6e\x20"
- "\x61\x6e\x64\x20\x65\x6c\x65\x63"
- "\x74\x72\x6f\x6e\x69\x63\x20\x63"
- "\x6f\x6d\x6d\x75\x6e\x69\x63\x61"
- "\x74\x69\x6f\x6e\x73\x20\x6d\x61"
- "\x64\x65\x20\x61\x74\x20\x61\x6e"
- "\x79\x20\x74\x69\x6d\x65\x20\x6f"
- "\x72\x20\x70\x6c\x61\x63\x65\x2c"
- "\x20\x77\x68\x69\x63\x68\x20\x61"
- "\x72\x65\x20\x61\x64\x64\x72\x65"
- "\x73\x73\x65\x64\x20\x74\x6f",
- .psize = 407,
- .digest = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70"
- "\xf0\xef\xca\x96\x22\x7a\x86\x3e",
- }, { /* Test Vector #3 */
- .plaintext = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70"
- "\xf0\xef\xca\x96\x22\x7a\x86\x3e"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x41\x6e\x79\x20\x73\x75\x62\x6d"
- "\x69\x73\x73\x69\x6f\x6e\x20\x74"
- "\x6f\x20\x74\x68\x65\x20\x49\x45"
- "\x54\x46\x20\x69\x6e\x74\x65\x6e"
- "\x64\x65\x64\x20\x62\x79\x20\x74"
- "\x68\x65\x20\x43\x6f\x6e\x74\x72"
- "\x69\x62\x75\x74\x6f\x72\x20\x66"
- "\x6f\x72\x20\x70\x75\x62\x6c\x69"
- "\x63\x61\x74\x69\x6f\x6e\x20\x61"
- "\x73\x20\x61\x6c\x6c\x20\x6f\x72"
- "\x20\x70\x61\x72\x74\x20\x6f\x66"
- "\x20\x61\x6e\x20\x49\x45\x54\x46"
- "\x20\x49\x6e\x74\x65\x72\x6e\x65"
- "\x74\x2d\x44\x72\x61\x66\x74\x20"
- "\x6f\x72\x20\x52\x46\x43\x20\x61"
- "\x6e\x64\x20\x61\x6e\x79\x20\x73"
- "\x74\x61\x74\x65\x6d\x65\x6e\x74"
- "\x20\x6d\x61\x64\x65\x20\x77\x69"
- "\x74\x68\x69\x6e\x20\x74\x68\x65"
- "\x20\x63\x6f\x6e\x74\x65\x78\x74"
- "\x20\x6f\x66\x20\x61\x6e\x20\x49"
- "\x45\x54\x46\x20\x61\x63\x74\x69"
- "\x76\x69\x74\x79\x20\x69\x73\x20"
- "\x63\x6f\x6e\x73\x69\x64\x65\x72"
- "\x65\x64\x20\x61\x6e\x20\x22\x49"
- "\x45\x54\x46\x20\x43\x6f\x6e\x74"
- "\x72\x69\x62\x75\x74\x69\x6f\x6e"
- "\x22\x2e\x20\x53\x75\x63\x68\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x63\x6c\x75"
- "\x64\x65\x20\x6f\x72\x61\x6c\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x20\x49\x45"
- "\x54\x46\x20\x73\x65\x73\x73\x69"
- "\x6f\x6e\x73\x2c\x20\x61\x73\x20"
- "\x77\x65\x6c\x6c\x20\x61\x73\x20"
- "\x77\x72\x69\x74\x74\x65\x6e\x20"
- "\x61\x6e\x64\x20\x65\x6c\x65\x63"
- "\x74\x72\x6f\x6e\x69\x63\x20\x63"
- "\x6f\x6d\x6d\x75\x6e\x69\x63\x61"
- "\x74\x69\x6f\x6e\x73\x20\x6d\x61"
- "\x64\x65\x20\x61\x74\x20\x61\x6e"
- "\x79\x20\x74\x69\x6d\x65\x20\x6f"
- "\x72\x20\x70\x6c\x61\x63\x65\x2c"
- "\x20\x77\x68\x69\x63\x68\x20\x61"
- "\x72\x65\x20\x61\x64\x64\x72\x65"
- "\x73\x73\x65\x64\x20\x74\x6f",
- .psize = 407,
- .digest = "\xf3\x47\x7e\x7c\xd9\x54\x17\xaf"
- "\x89\xa6\xb8\x79\x4c\x31\x0c\xf0",
- }, { /* Test Vector #4 */
- .plaintext = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
- "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
- "\x47\x39\x17\xc1\x40\x2b\x80\x09"
- "\x9d\xca\x5c\xbc\x20\x70\x75\xc0"
- "\x27\x54\x77\x61\x73\x20\x62\x72"
- "\x69\x6c\x6c\x69\x67\x2c\x20\x61"
- "\x6e\x64\x20\x74\x68\x65\x20\x73"
- "\x6c\x69\x74\x68\x79\x20\x74\x6f"
- "\x76\x65\x73\x0a\x44\x69\x64\x20"
- "\x67\x79\x72\x65\x20\x61\x6e\x64"
- "\x20\x67\x69\x6d\x62\x6c\x65\x20"
- "\x69\x6e\x20\x74\x68\x65\x20\x77"
- "\x61\x62\x65\x3a\x0a\x41\x6c\x6c"
- "\x20\x6d\x69\x6d\x73\x79\x20\x77"
- "\x65\x72\x65\x20\x74\x68\x65\x20"
- "\x62\x6f\x72\x6f\x67\x6f\x76\x65"
- "\x73\x2c\x0a\x41\x6e\x64\x20\x74"
- "\x68\x65\x20\x6d\x6f\x6d\x65\x20"
- "\x72\x61\x74\x68\x73\x20\x6f\x75"
- "\x74\x67\x72\x61\x62\x65\x2e",
- .psize = 159,
- .digest = "\x45\x41\x66\x9a\x7e\xaa\xee\x61"
- "\xe7\x08\xdc\x7c\xbc\xc5\xeb\x62",
- }, { /* Test Vector #5 */
- .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .psize = 48,
- .digest = "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #6 */
- .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 48,
- .digest = "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #7 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xf0\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\x11\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 80,
- .digest = "\x05\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #8 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xfb\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
- "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01",
- .psize = 80,
- .digest = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #9 */
- .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xfd\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .psize = 48,
- .digest = "\xfa\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- }, { /* Test Vector #10 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x04\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xe3\x35\x94\xd7\x50\x5e\x43\xb9"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x33\x94\xd7\x50\x5e\x43\x79\xcd"
- "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 96,
- .digest = "\x14\x00\x00\x00\x00\x00\x00\x00"
- "\x55\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #11 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x04\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xe3\x35\x94\xd7\x50\x5e\x43\xb9"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x33\x94\xd7\x50\x5e\x43\x79\xcd"
- "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 80,
- .digest = "\x13\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Regression test for overflow in AVX2 implementation */
- .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff",
- .psize = 300,
- .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
- "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
- }
-};
-
/* NHPoly1305 test vectors from https://github.com/google/adiantum */
static const struct hash_testvec nhpoly1305_tv_template[] = {
{
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 19f2b365e140..368018cfa9bf 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -187,7 +187,7 @@ static void __exit twofish_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(twofish_mod_init);
+module_init(twofish_mod_init);
module_exit(twofish_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 07994e5ebf4e..41f13d490333 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1169,7 +1169,7 @@ MODULE_ALIAS_CRYPTO("wp512");
MODULE_ALIAS_CRYPTO("wp384");
MODULE_ALIAS_CRYPTO("wp256");
-subsys_initcall(wp512_mod_init);
+module_init(wp512_mod_init);
module_exit(wp512_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index fc785667b134..6c5f6766fdd6 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -8,9 +8,12 @@
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
+#include <crypto/utils.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
0x02020202, 0x02020202, 0x02020202, 0x02020202,
@@ -30,22 +33,6 @@ struct xcbc_tfm_ctx {
u8 consts[];
};
-/*
- * +------------------------
- * | <shash desc>
- * +------------------------
- * | xcbc_desc_ctx
- * +------------------------
- * | odds (block size)
- * +------------------------
- * | prev (block size)
- * +------------------------
- */
-struct xcbc_desc_ctx {
- unsigned int len;
- u8 odds[];
-};
-
#define XCBC_BLOCKSIZE 16
static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
@@ -70,13 +57,10 @@ static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
static int crypto_xcbc_digest_init(struct shash_desc *pdesc)
{
- struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_blocksize(pdesc->tfm);
- u8 *prev = &ctx->odds[bs];
+ u8 *prev = shash_desc_ctx(pdesc);
- ctx->len = 0;
memset(prev, 0, bs);
-
return 0;
}
@@ -85,77 +69,36 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p,
{
struct crypto_shash *parent = pdesc->tfm;
struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = ctx->odds;
- u8 *prev = odds + bs;
-
- /* checking the data can fill the block */
- if ((ctx->len + len) <= bs) {
- memcpy(odds + ctx->len, p, len);
- ctx->len += len;
- return 0;
- }
-
- /* filling odds with new data and encrypting it */
- memcpy(odds + ctx->len, p, bs - ctx->len);
- len -= bs - ctx->len;
- p += bs - ctx->len;
-
- crypto_xor(prev, odds, bs);
- crypto_cipher_encrypt_one(tfm, prev, prev);
+ u8 *prev = shash_desc_ctx(pdesc);
- /* clearing the length */
- ctx->len = 0;
-
- /* encrypting the rest of data */
- while (len > bs) {
+ do {
crypto_xor(prev, p, bs);
crypto_cipher_encrypt_one(tfm, prev, prev);
p += bs;
len -= bs;
- }
-
- /* keeping the surplus of blocksize */
- if (len) {
- memcpy(odds, p, len);
- ctx->len = len;
- }
-
- return 0;
+ } while (len >= bs);
+ return len;
}
-static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out)
+static int crypto_xcbc_digest_finup(struct shash_desc *pdesc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = ctx->odds;
- u8 *prev = odds + bs;
+ u8 *prev = shash_desc_ctx(pdesc);
unsigned int offset = 0;
- if (ctx->len != bs) {
- unsigned int rlen;
- u8 *p = odds + ctx->len;
-
- *p = 0x80;
- p++;
-
- rlen = bs - ctx->len -1;
- if (rlen)
- memset(p, 0, rlen);
-
+ crypto_xor(prev, src, len);
+ if (len != bs) {
+ prev[len] ^= 0x80;
offset += bs;
}
-
- crypto_xor(prev, odds, bs);
crypto_xor(prev, &tctx->consts[offset], bs);
-
crypto_cipher_encrypt_one(tfm, out, prev);
-
return 0;
}
@@ -216,17 +159,18 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_ctxsize = sizeof(struct xcbc_tfm_ctx) +
alg->cra_blocksize * 2;
+ inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize = sizeof(struct xcbc_desc_ctx) +
- alg->cra_blocksize * 2;
+ inst->alg.descsize = alg->cra_blocksize;
inst->alg.base.cra_init = xcbc_init_tfm;
inst->alg.base.cra_exit = xcbc_exit_tfm;
inst->alg.init = crypto_xcbc_digest_init;
inst->alg.update = crypto_xcbc_digest_update;
- inst->alg.final = crypto_xcbc_digest_final;
+ inst->alg.finup = crypto_xcbc_digest_finup;
inst->alg.setkey = crypto_xcbc_digest_setkey;
inst->free = shash_free_singlespawn_instance;
@@ -255,7 +199,7 @@ static void __exit crypto_xcbc_module_exit(void)
crypto_unregister_template(&crypto_xcbc_tmpl);
}
-subsys_initcall(crypto_xcbc_module_init);
+module_init(crypto_xcbc_module_init);
module_exit(crypto_xcbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/xctr.c b/crypto/xctr.c
index 9c536ab6d2e5..607ab82cb19b 100644
--- a/crypto/xctr.c
+++ b/crypto/xctr.c
@@ -182,7 +182,7 @@ static void __exit crypto_xctr_module_exit(void)
crypto_unregister_template(&crypto_xctr_tmpl);
}
-subsys_initcall(crypto_xctr_module_init);
+module_init(crypto_xctr_module_init);
module_exit(crypto_xctr_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/xts.c b/crypto/xts.c
index 31529c9ef08f..3da8f5e053d6 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -363,7 +363,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
cipher_name, 0, mask);
- if (err == -ENOENT) {
+ if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) {
err = -ENAMETOOLONG;
if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
@@ -397,7 +397,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
/* Alas we screwed up the naming so we have to mangle the
* cipher name.
*/
- if (!strncmp(cipher_name, "ecb(", 4)) {
+ if (!memcmp(cipher_name, "ecb(", 4)) {
int len;
len = strscpy(name, cipher_name + 4, sizeof(name));
@@ -466,7 +466,7 @@ static void __exit xts_module_exit(void)
crypto_unregister_template(&xts_tmpl);
}
-subsys_initcall(xts_module_init);
+module_init(xts_module_init);
module_exit(xts_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/xxhash_generic.c b/crypto/xxhash_generic.c
index ac206ad4184d..175bb7ae0fcd 100644
--- a/crypto/xxhash_generic.c
+++ b/crypto/xxhash_generic.c
@@ -96,7 +96,7 @@ static void __exit xxhash_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(xxhash_mod_init);
+module_init(xxhash_mod_init);
module_exit(xxhash_mod_fini);
MODULE_AUTHOR("Nikolay Borisov <nborisov@suse.com>");
diff --git a/crypto/zstd.c b/crypto/zstd.c
index 90bb4f36f846..7570e11b4ee6 100644
--- a/crypto/zstd.c
+++ b/crypto/zstd.c
@@ -196,7 +196,7 @@ static void __exit zstd_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(zstd_mod_init);
+module_init(zstd_mod_init);
module_exit(zstd_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO
index 5119bccd1917..ad8ac6e315b6 100644
--- a/drivers/accel/amdxdna/TODO
+++ b/drivers/accel/amdxdna/TODO
@@ -1,3 +1,2 @@
-- Add import and export BO support
- Add debugfs support
- Add debug BO support
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index 00d215ac866e..e04549f64d69 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -758,27 +758,42 @@ int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *bu
static int aie2_populate_range(struct amdxdna_gem_obj *abo)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
- struct mm_struct *mm = abo->mem.notifier.mm;
- struct hmm_range range = { 0 };
+ struct amdxdna_umap *mapp;
unsigned long timeout;
+ struct mm_struct *mm;
+ bool found;
int ret;
- XDNA_INFO_ONCE(xdna, "populate memory range %llx size %lx",
- abo->mem.userptr, abo->mem.size);
- range.notifier = &abo->mem.notifier;
- range.start = abo->mem.userptr;
- range.end = abo->mem.userptr + abo->mem.size;
- range.hmm_pfns = abo->mem.pfns;
- range.default_flags = HMM_PFN_REQ_FAULT;
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+again:
+ found = false;
+ down_write(&xdna->notifier_lock);
+ list_for_each_entry(mapp, &abo->mem.umap_list, node) {
+ if (mapp->invalid) {
+ found = true;
+ break;
+ }
+ }
- if (!mmget_not_zero(mm))
+ if (!found) {
+ abo->mem.map_invalid = false;
+ up_write(&xdna->notifier_lock);
+ return 0;
+ }
+ kref_get(&mapp->refcnt);
+ up_write(&xdna->notifier_lock);
+
+ XDNA_DBG(xdna, "populate memory range %lx %lx",
+ mapp->vma->vm_start, mapp->vma->vm_end);
+ mm = mapp->notifier.mm;
+ if (!mmget_not_zero(mm)) {
+ amdxdna_umap_put(mapp);
return -EFAULT;
+ }
- timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
-again:
- range.notifier_seq = mmu_interval_read_begin(&abo->mem.notifier);
+ mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier);
mmap_read_lock(mm);
- ret = hmm_range_fault(&range);
+ ret = hmm_range_fault(&mapp->range);
mmap_read_unlock(mm);
if (ret) {
if (time_after(jiffies, timeout)) {
@@ -786,21 +801,27 @@ again:
goto put_mm;
}
- if (ret == -EBUSY)
+ if (ret == -EBUSY) {
+ amdxdna_umap_put(mapp);
goto again;
+ }
goto put_mm;
}
- down_read(&xdna->notifier_lock);
- if (mmu_interval_read_retry(&abo->mem.notifier, range.notifier_seq)) {
- up_read(&xdna->notifier_lock);
+ down_write(&xdna->notifier_lock);
+ if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) {
+ up_write(&xdna->notifier_lock);
+ amdxdna_umap_put(mapp);
goto again;
}
- abo->mem.map_invalid = false;
- up_read(&xdna->notifier_lock);
+ mapp->invalid = false;
+ up_write(&xdna->notifier_lock);
+ amdxdna_umap_put(mapp);
+ goto again;
put_mm:
+ amdxdna_umap_put(mapp);
mmput(mm);
return ret;
}
@@ -908,10 +929,6 @@ void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
struct drm_gem_object *gobj = to_gobj(abo);
long ret;
- down_write(&xdna->notifier_lock);
- abo->mem.map_invalid = true;
- mmu_interval_set_seq(&abo->mem.notifier, cur_seq);
- up_write(&xdna->notifier_lock);
ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
true, MAX_SCHEDULE_TIMEOUT);
if (!ret || ret == -ERESTARTSYS)
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
index bf4219e32cc1..82412eec9a4b 100644
--- a/drivers/accel/amdxdna/aie2_message.c
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -525,7 +525,7 @@ aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset,
if (!payload)
return -EINVAL;
- if (!slot_cf_has_space(offset, payload_len))
+ if (!slot_has_space(*buf, offset, payload_len))
return -ENOSPC;
buf->cu_idx = cu_idx;
@@ -558,7 +558,7 @@ aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
return -EINVAL;
- if (!slot_dpu_has_space(offset, arg_sz))
+ if (!slot_has_space(*buf, offset, arg_sz))
return -ENOSPC;
buf->inst_buf_addr = sn->buffer;
@@ -569,7 +569,7 @@ aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
memcpy(buf->args, sn->prop_args, arg_sz);
/* Accurate buf size to hint firmware to do necessary copy */
- *size += sizeof(*buf) + arg_sz;
+ *size = sizeof(*buf) + arg_sz;
return 0;
}
diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h
index 4e02e744b470..6df9065b13f6 100644
--- a/drivers/accel/amdxdna/aie2_msg_priv.h
+++ b/drivers/accel/amdxdna/aie2_msg_priv.h
@@ -319,18 +319,16 @@ struct async_event_msg_resp {
} __packed;
#define MAX_CHAIN_CMDBUF_SIZE SZ_4K
-#define slot_cf_has_space(offset, payload_size) \
- (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
- offsetof(struct cmd_chain_slot_execbuf_cf, args[0]))
+#define slot_has_space(slot, offset, payload_size) \
+ (MAX_CHAIN_CMDBUF_SIZE >= (offset) + (payload_size) + \
+ sizeof(typeof(slot)))
+
struct cmd_chain_slot_execbuf_cf {
__u32 cu_idx;
__u32 arg_cnt;
__u32 args[] __counted_by(arg_cnt);
};
-#define slot_dpu_has_space(offset, payload_size) \
- (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
- offsetof(struct cmd_chain_slot_dpu, args[0]))
struct cmd_chain_slot_dpu {
__u64 inst_buf_addr;
__u32 inst_size;
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index 5a058e565b01..c6cf7068d23c 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -512,12 +512,6 @@ static int aie2_init(struct amdxdna_dev *xdna)
goto release_fw;
}
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- if (ret) {
- XDNA_ERR(xdna, "Enable PASID failed, ret %d", ret);
- goto free_irq;
- }
-
psp_conf.fw_size = fw->size;
psp_conf.fw_buf = fw->data;
for (i = 0; i < PSP_MAX_REGS; i++)
@@ -526,14 +520,14 @@ static int aie2_init(struct amdxdna_dev *xdna)
if (!ndev->psp_hdl) {
XDNA_ERR(xdna, "failed to create psp");
ret = -ENOMEM;
- goto disable_sva;
+ goto free_irq;
}
xdna->dev_handle = ndev;
ret = aie2_hw_start(xdna);
if (ret) {
XDNA_ERR(xdna, "start npu failed, ret %d", ret);
- goto disable_sva;
+ goto free_irq;
}
ret = aie2_mgmt_fw_query(ndev);
@@ -584,8 +578,6 @@ async_event_free:
aie2_error_async_events_free(ndev);
stop_hw:
aie2_hw_stop(xdna);
-disable_sva:
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
free_irq:
pci_free_irq_vectors(pdev);
release_fw:
@@ -601,7 +593,6 @@ static void aie2_fini(struct amdxdna_dev *xdna)
aie2_hw_stop(xdna);
aie2_error_async_events_free(ndev);
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
pci_free_irq_vectors(pdev);
}
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
index 43442b9e273b..be073224bd69 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.c
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -496,11 +496,11 @@ static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
struct amdxdna_drm_exec_cmd *args)
{
struct amdxdna_dev *xdna = client->xdna;
- u32 *arg_bo_hdls;
+ u32 *arg_bo_hdls = NULL;
u32 cmd_bo_hdl;
int ret;
- if (!args->arg_count || args->arg_count > MAX_ARG_COUNT) {
+ if (args->arg_count > MAX_ARG_COUNT) {
XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
return -EINVAL;
}
@@ -512,14 +512,16 @@ static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
}
cmd_bo_hdl = (u32)args->cmd_handles;
- arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
- if (!arg_bo_hdls)
- return -ENOMEM;
- ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
- args->arg_count * sizeof(u32));
- if (ret) {
- ret = -EFAULT;
- goto free_cmd_bo_hdls;
+ if (args->arg_count) {
+ arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
+ if (!arg_bo_hdls)
+ return -ENOMEM;
+ ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
+ args->arg_count * sizeof(u32));
+ if (ret) {
+ ret = -EFAULT;
+ goto free_cmd_bo_hdls;
+ }
}
ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls,
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index 606433d73236..26831ec69f89 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -9,7 +9,10 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/gpu_scheduler.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-direct.h>
#include <linux/iosys-map.h>
+#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include "amdxdna_ctx.h"
@@ -18,6 +21,8 @@
#define XDNA_MAX_CMD_BO_SIZE SZ_32K
+MODULE_IMPORT_NS("DMA_BUF");
+
static int
amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
{
@@ -55,57 +60,38 @@ amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
return 0;
}
-static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
-{
- struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
- struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
-
- XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
- if (abo->pinned)
- amdxdna_gem_unpin(abo);
-
- if (abo->type == AMDXDNA_BO_DEV) {
- mutex_lock(&abo->client->mm_lock);
- drm_mm_remove_node(&abo->mm_node);
- mutex_unlock(&abo->client->mm_lock);
-
- vunmap(abo->mem.kva);
- drm_gem_object_put(to_gobj(abo->dev_heap));
- drm_gem_object_release(gobj);
- mutex_destroy(&abo->lock);
- kfree(abo);
- return;
- }
-
- if (abo->type == AMDXDNA_BO_DEV_HEAP)
- drm_mm_takedown(&abo->mm);
-
- drm_gem_vunmap_unlocked(gobj, &map);
- mutex_destroy(&abo->lock);
- drm_gem_shmem_free(&abo->base);
-}
-
-static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
- .free = amdxdna_gem_obj_free,
-};
-
static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
- struct amdxdna_gem_obj *abo = container_of(mni, struct amdxdna_gem_obj,
- mem.notifier);
- struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier);
+ struct amdxdna_gem_obj *abo = mapp->abo;
+ struct amdxdna_dev *xdna;
- XDNA_DBG(xdna, "Invalid range 0x%llx, 0x%lx, type %d",
- abo->mem.userptr, abo->mem.size, abo->type);
+ xdna = to_xdna_dev(to_gobj(abo)->dev);
+ XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d",
+ mapp->vma->vm_start, mapp->vma->vm_end, abo->type);
if (!mmu_notifier_range_blockable(range))
return false;
+ down_write(&xdna->notifier_lock);
+ abo->mem.map_invalid = true;
+ mapp->invalid = true;
+ mmu_interval_set_seq(&mapp->notifier, cur_seq);
+ up_write(&xdna->notifier_lock);
+
xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
+ if (range->event == MMU_NOTIFY_UNMAP) {
+ down_write(&xdna->notifier_lock);
+ if (!mapp->unmapped) {
+ queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
+ mapp->unmapped = true;
+ }
+ up_write(&xdna->notifier_lock);
+ }
+
return true;
}
@@ -113,102 +99,310 @@ static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
.invalidate = amdxdna_hmm_invalidate,
};
-static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo)
+static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct amdxdna_umap *mapp;
+
+ down_read(&xdna->notifier_lock);
+ list_for_each_entry(mapp, &abo->mem.umap_list, node) {
+ if (!vma || mapp->vma == vma) {
+ if (!mapp->unmapped) {
+ queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
+ mapp->unmapped = true;
+ }
+ if (vma)
+ break;
+ }
+ }
+ up_read(&xdna->notifier_lock);
+}
- if (!xdna->dev_info->ops->hmm_invalidate)
- return;
+static void amdxdna_umap_release(struct kref *ref)
+{
+ struct amdxdna_umap *mapp = container_of(ref, struct amdxdna_umap, refcnt);
+ struct vm_area_struct *vma = mapp->vma;
+ struct amdxdna_dev *xdna;
+
+ mmu_interval_notifier_remove(&mapp->notifier);
+ if (is_import_bo(mapp->abo) && vma->vm_file && vma->vm_file->f_mapping)
+ mapping_clear_unevictable(vma->vm_file->f_mapping);
+
+ xdna = to_xdna_dev(to_gobj(mapp->abo)->dev);
+ down_write(&xdna->notifier_lock);
+ list_del(&mapp->node);
+ up_write(&xdna->notifier_lock);
+
+ kvfree(mapp->range.hmm_pfns);
+ kfree(mapp);
+}
+
+void amdxdna_umap_put(struct amdxdna_umap *mapp)
+{
+ kref_put(&mapp->refcnt, amdxdna_umap_release);
+}
+
+static void amdxdna_hmm_unreg_work(struct work_struct *work)
+{
+ struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap,
+ hmm_unreg_work);
- mmu_interval_notifier_remove(&abo->mem.notifier);
- kvfree(abo->mem.pfns);
- abo->mem.pfns = NULL;
+ amdxdna_umap_put(mapp);
}
-static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, unsigned long addr,
- size_t len)
+static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ unsigned long len = vma->vm_end - vma->vm_start;
+ unsigned long addr = vma->vm_start;
+ struct amdxdna_umap *mapp;
u32 nr_pages;
int ret;
if (!xdna->dev_info->ops->hmm_invalidate)
return 0;
- if (abo->mem.pfns)
- return -EEXIST;
+ mapp = kzalloc(sizeof(*mapp), GFP_KERNEL);
+ if (!mapp)
+ return -ENOMEM;
nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
- abo->mem.pfns = kvcalloc(nr_pages, sizeof(*abo->mem.pfns),
- GFP_KERNEL);
- if (!abo->mem.pfns)
- return -ENOMEM;
+ mapp->range.hmm_pfns = kvcalloc(nr_pages, sizeof(*mapp->range.hmm_pfns),
+ GFP_KERNEL);
+ if (!mapp->range.hmm_pfns) {
+ ret = -ENOMEM;
+ goto free_map;
+ }
- ret = mmu_interval_notifier_insert_locked(&abo->mem.notifier,
+ ret = mmu_interval_notifier_insert_locked(&mapp->notifier,
current->mm,
addr,
len,
&amdxdna_hmm_ops);
if (ret) {
XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
- kvfree(abo->mem.pfns);
+ goto free_pfns;
}
- abo->mem.userptr = addr;
+ mapp->range.notifier = &mapp->notifier;
+ mapp->range.start = vma->vm_start;
+ mapp->range.end = vma->vm_end;
+ mapp->range.default_flags = HMM_PFN_REQ_FAULT;
+ mapp->vma = vma;
+ mapp->abo = abo;
+ kref_init(&mapp->refcnt);
+
+ if (abo->mem.userptr == AMDXDNA_INVALID_ADDR)
+ abo->mem.userptr = addr;
+ INIT_WORK(&mapp->hmm_unreg_work, amdxdna_hmm_unreg_work);
+ if (is_import_bo(abo) && vma->vm_file && vma->vm_file->f_mapping)
+ mapping_set_unevictable(vma->vm_file->f_mapping);
+
+ down_write(&xdna->notifier_lock);
+ list_add_tail(&mapp->node, &abo->mem.umap_list);
+ up_write(&xdna->notifier_lock);
+
+ return 0;
+
+free_pfns:
+ kvfree(mapp->range.hmm_pfns);
+free_map:
+ kfree(mapp);
return ret;
}
+static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ unsigned long num_pages = vma_pages(vma);
+ unsigned long offset = 0;
+ int ret;
+
+ if (!is_import_bo(abo)) {
+ ret = drm_gem_shmem_mmap(&abo->base, vma);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed shmem mmap %d", ret);
+ return ret;
+ }
+
+ /* The buffer is based on memory pages. Fix the flag. */
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
+ &num_pages);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed insert pages %d", ret);
+ vma->vm_ops->close(vma);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ vma->vm_private_data = NULL;
+ vma->vm_ops = NULL;
+ ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret);
+ return ret;
+ }
+
+ do {
+ vm_fault_t fault_ret;
+
+ fault_ret = handle_mm_fault(vma, vma->vm_start + offset,
+ FAULT_FLAG_WRITE, NULL);
+ if (fault_ret & VM_FAULT_ERROR) {
+ vma->vm_ops->close(vma);
+ XDNA_ERR(xdna, "Fault in page failed");
+ return -EFAULT;
+ }
+
+ offset += PAGE_SIZE;
+ } while (--num_pages);
+
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ drm_gem_object_put(to_gobj(abo));
+
+ return 0;
+}
+
static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
struct vm_area_struct *vma)
{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
- unsigned long num_pages;
int ret;
- ret = amdxdna_hmm_register(abo, vma->vm_start, gobj->size);
+ ret = amdxdna_hmm_register(abo, vma);
if (ret)
return ret;
+ ret = amdxdna_insert_pages(abo, vma);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed insert pages, ret %d", ret);
+ goto hmm_unreg;
+ }
+
+ XDNA_DBG(xdna, "BO map_offset 0x%llx type %d userptr 0x%lx size 0x%lx",
+ drm_vma_node_offset_addr(&gobj->vma_node), abo->type,
+ vma->vm_start, gobj->size);
+ return 0;
+
+hmm_unreg:
+ amdxdna_hmm_unregister(abo, vma);
+ return ret;
+}
+
+static int amdxdna_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gobj = dma_buf->priv;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ unsigned long num_pages = vma_pages(vma);
+ int ret;
+
+ vma->vm_ops = &drm_gem_shmem_vm_ops;
+ vma->vm_private_data = gobj;
+
+ drm_gem_object_get(gobj);
ret = drm_gem_shmem_mmap(&abo->base, vma);
if (ret)
- goto hmm_unreg;
+ goto put_obj;
- num_pages = gobj->size >> PAGE_SHIFT;
- /* Try to insert the pages */
+ /* The buffer is based on memory pages. Fix the flag. */
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
- ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, &num_pages);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
+ &num_pages);
if (ret)
- XDNA_ERR(abo->client->xdna, "Failed insert pages, ret %d", ret);
+ goto close_vma;
return 0;
-hmm_unreg:
- amdxdna_hmm_unregister(abo);
+close_vma:
+ vma->vm_ops->close(vma);
+put_obj:
+ drm_gem_object_put(gobj);
return ret;
}
-static vm_fault_t amdxdna_gem_vm_fault(struct vm_fault *vmf)
+static const struct dma_buf_ops amdxdna_dmabuf_ops = {
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .mmap = amdxdna_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
{
- return drm_gem_shmem_vm_ops.fault(vmf);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &amdxdna_dmabuf_ops;
+ exp_info.size = gobj->size;
+ exp_info.flags = flags;
+ exp_info.priv = gobj;
+ exp_info.resv = gobj->resv;
+
+ return drm_gem_dmabuf_export(gobj->dev, &exp_info);
}
-static void amdxdna_gem_vm_open(struct vm_area_struct *vma)
+static void amdxdna_imported_obj_free(struct amdxdna_gem_obj *abo)
{
- drm_gem_shmem_vm_ops.open(vma);
+ dma_buf_unmap_attachment_unlocked(abo->attach, abo->base.sgt, DMA_BIDIRECTIONAL);
+ dma_buf_detach(abo->dma_buf, abo->attach);
+ dma_buf_put(abo->dma_buf);
+ drm_gem_object_release(to_gobj(abo));
+ kfree(abo);
}
-static void amdxdna_gem_vm_close(struct vm_area_struct *vma)
+static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
{
- struct drm_gem_object *gobj = vma->vm_private_data;
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+
+ amdxdna_hmm_unregister(abo, NULL);
+ flush_workqueue(xdna->notifier_wq);
+
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
- amdxdna_hmm_unregister(to_xdna_obj(gobj));
- drm_gem_shmem_vm_ops.close(vma);
+ if (abo->type == AMDXDNA_BO_DEV) {
+ mutex_lock(&abo->client->mm_lock);
+ drm_mm_remove_node(&abo->mm_node);
+ mutex_unlock(&abo->client->mm_lock);
+
+ vunmap(abo->mem.kva);
+ drm_gem_object_put(to_gobj(abo->dev_heap));
+ drm_gem_object_release(gobj);
+ mutex_destroy(&abo->lock);
+ kfree(abo);
+ return;
+ }
+
+ if (abo->type == AMDXDNA_BO_DEV_HEAP)
+ drm_mm_takedown(&abo->mm);
+
+ drm_gem_vunmap(gobj, &map);
+ mutex_destroy(&abo->lock);
+
+ if (is_import_bo(abo)) {
+ amdxdna_imported_obj_free(abo);
+ return;
+ }
+
+ drm_gem_shmem_free(&abo->base);
}
-static const struct vm_operations_struct amdxdna_gem_vm_ops = {
- .fault = amdxdna_gem_vm_fault,
- .open = amdxdna_gem_vm_open,
- .close = amdxdna_gem_vm_close,
+static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
+ .free = amdxdna_gem_obj_free,
};
static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
@@ -220,7 +414,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = amdxdna_gem_obj_mmap,
- .vm_ops = &amdxdna_gem_vm_ops,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+ .export = amdxdna_gem_prime_export,
};
static struct amdxdna_gem_obj *
@@ -239,6 +434,7 @@ amdxdna_gem_create_obj(struct drm_device *dev, size_t size)
abo->mem.userptr = AMDXDNA_INVALID_ADDR;
abo->mem.dev_addr = AMDXDNA_INVALID_ADDR;
abo->mem.size = size;
+ INIT_LIST_HEAD(&abo->mem.umap_list);
return abo;
}
@@ -258,6 +454,51 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
return to_gobj(abo);
}
+struct drm_gem_object *
+amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ struct sg_table *sgt;
+ int ret;
+
+ get_dma_buf(dma_buf);
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach)) {
+ ret = PTR_ERR(attach);
+ goto put_buf;
+ }
+
+ sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ gobj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(gobj)) {
+ ret = PTR_ERR(gobj);
+ goto fail_unmap;
+ }
+
+ abo = to_xdna_obj(gobj);
+ abo->attach = attach;
+ abo->dma_buf = dma_buf;
+
+ return gobj;
+
+fail_unmap:
+ dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+put_buf:
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+
static struct amdxdna_gem_obj *
amdxdna_drm_alloc_shmem(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
@@ -417,7 +658,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
abo->type = AMDXDNA_BO_CMD;
abo->client = filp->driver_priv;
- ret = drm_gem_vmap_unlocked(to_gobj(abo), &map);
+ ret = drm_gem_vmap(to_gobj(abo), &map);
if (ret) {
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
goto release_obj;
@@ -483,6 +724,9 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
int ret;
+ if (is_import_bo(abo))
+ return 0;
+
switch (abo->type) {
case AMDXDNA_BO_SHMEM:
case AMDXDNA_BO_DEV_HEAP:
@@ -515,6 +759,9 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
{
+ if (is_import_bo(abo))
+ return;
+
if (abo->type == AMDXDNA_BO_DEV)
abo = abo->dev_heap;
@@ -606,7 +853,9 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
goto put_obj;
}
- if (abo->type == AMDXDNA_BO_DEV)
+ if (is_import_bo(abo))
+ drm_clflush_sg(abo->base.sgt);
+ else if (abo->type == AMDXDNA_BO_DEV)
drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
else
drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
index 8ccc0375dd9d..aee97e971d6d 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.h
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -6,6 +6,20 @@
#ifndef _AMDXDNA_GEM_H_
#define _AMDXDNA_GEM_H_
+#include <linux/hmm.h>
+
+struct amdxdna_umap {
+ struct vm_area_struct *vma;
+ struct mmu_interval_notifier notifier;
+ struct hmm_range range;
+ struct work_struct hmm_unreg_work;
+ struct amdxdna_gem_obj *abo;
+ struct list_head node;
+ struct kref refcnt;
+ bool invalid;
+ bool unmapped;
+};
+
struct amdxdna_mem {
u64 userptr;
void *kva;
@@ -13,8 +27,7 @@ struct amdxdna_mem {
size_t size;
struct page **pages;
u32 nr_pages;
- struct mmu_interval_notifier notifier;
- unsigned long *pfns;
+ struct list_head umap_list;
bool map_invalid;
};
@@ -31,9 +44,12 @@ struct amdxdna_gem_obj {
struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */
struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
u32 assigned_hwctx;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attach;
};
#define to_gobj(obj) (&(obj)->base.base)
+#define is_import_bo(obj) ((obj)->attach)
static inline struct amdxdna_gem_obj *to_xdna_obj(struct drm_gem_object *gobj)
{
@@ -47,8 +63,12 @@ static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
drm_gem_object_put(to_gobj(abo));
}
+void amdxdna_umap_put(struct amdxdna_umap *mapp);
+
struct drm_gem_object *
amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size);
+struct drm_gem_object *
+amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct amdxdna_gem_obj *
amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index f5b8497cf5ad..f2bf1d374cc7 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -226,6 +226,7 @@ const struct drm_driver amdxdna_drm_drv = {
.num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
.gem_create_object = amdxdna_gem_create_object_cb,
+ .gem_prime_import = amdxdna_gem_prime_import,
};
static const struct amdxdna_dev_info *
@@ -266,12 +267,16 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
fs_reclaim_release(GFP_KERNEL);
}
+ xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", 0);
+ if (!xdna->notifier_wq)
+ return -ENOMEM;
+
mutex_lock(&xdna->dev_lock);
ret = xdna->dev_info->ops->init(xdna);
mutex_unlock(&xdna->dev_lock);
if (ret) {
XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
- return ret;
+ goto destroy_notifier_wq;
}
ret = amdxdna_sysfs_init(xdna);
@@ -301,6 +306,8 @@ failed_dev_fini:
mutex_lock(&xdna->dev_lock);
xdna->dev_info->ops->fini(xdna);
mutex_unlock(&xdna->dev_lock);
+destroy_notifier_wq:
+ destroy_workqueue(xdna->notifier_wq);
return ret;
}
@@ -310,6 +317,8 @@ static void amdxdna_remove(struct pci_dev *pdev)
struct device *dev = &pdev->dev;
struct amdxdna_client *client;
+ destroy_workqueue(xdna->notifier_wq);
+
pm_runtime_get_noresume(dev);
pm_runtime_forbid(dev);
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
index 37848a8d8031..ab79600911aa 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.h
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -6,6 +6,7 @@
#ifndef _AMDXDNA_PCI_DRV_H_
#define _AMDXDNA_PCI_DRV_H_
+#include <linux/workqueue.h>
#include <linux/xarray.h>
#define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args)
@@ -98,6 +99,7 @@ struct amdxdna_dev {
struct list_head client_list;
struct amdxdna_fw_ver fw_ver;
struct rw_semaphore notifier_lock; /* for mmu notifier*/
+ struct workqueue_struct *notifier_wq;
};
/*
diff --git a/drivers/accel/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig
index be85336107f9..1919fbb169c7 100644
--- a/drivers/accel/habanalabs/Kconfig
+++ b/drivers/accel/habanalabs/Kconfig
@@ -6,7 +6,7 @@
config DRM_ACCEL_HABANALABS
tristate "HabanaLabs AI accelerators"
depends on DRM_ACCEL
- depends on X86_64
+ depends on X86 && X86_64
depends on PCI && HAS_IOMEM
select GENERIC_ALLOCATOR
select HWMON
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index 8729a0c57d78..dc80ca921d90 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -17,8 +17,6 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
-#include <asm/msr.h>
-
/* make sure there is space for all the signed info */
static_assert(sizeof(struct cpucp_info) <= SEC_DEV_INFO_BUF_SZ);
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index 0825851656a2..cd24ccd20ba6 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -332,7 +332,7 @@ ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t si
return -EINVAL;
ret = ivpu_rpm_get(vdev);
- if (ret)
+ if (ret < 0)
return ret;
ivpu_pm_trigger_recovery(vdev, "debugfs");
@@ -383,7 +383,7 @@ static int dct_active_set(void *data, u64 active_percent)
return -EINVAL;
ret = ivpu_rpm_get(vdev);
- if (ret)
+ if (ret < 0)
return ret;
if (active_percent)
@@ -455,7 +455,7 @@ priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t
if (ret < 0)
return ret;
- buf[size] = '\0';
+ buf[ret] = '\0';
ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period,
&process_quantum);
if (ret != 4)
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 4fa73189502e..0e7748c5e117 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#include <linux/firmware.h>
@@ -164,7 +164,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
- args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
+ args->value = ivpu_hw_dpu_max_freq_get(vdev);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);
@@ -374,6 +374,9 @@ int ivpu_boot(struct ivpu_device *vdev)
{
int ret;
+ drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter));
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
+
/* Update boot params located at first 4KB of FW memory */
ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
@@ -421,9 +424,9 @@ void ivpu_prepare_for_reset(struct ivpu_device *vdev)
{
ivpu_hw_irq_disable(vdev);
disable_irq(vdev->irq);
- cancel_work_sync(&vdev->irq_ipc_work);
- cancel_work_sync(&vdev->irq_dct_work);
- cancel_work_sync(&vdev->context_abort_work);
+ flush_work(&vdev->irq_ipc_work);
+ flush_work(&vdev->irq_dct_work);
+ flush_work(&vdev->context_abort_work);
ivpu_ipc_disable(vdev);
ivpu_mmu_disable(vdev);
}
@@ -573,6 +576,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0);
+ atomic_set(&vdev->job_timeout_counter, 0);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 92753effb1c9..5497e7030e91 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -154,6 +154,7 @@ struct ivpu_device {
struct mutex submitted_jobs_lock; /* Protects submitted_jobs */
struct xarray submitted_jobs_xa;
struct ivpu_ipc_consumer job_done_consumer;
+ atomic_t job_timeout_counter;
atomic64_t unique_id_counter;
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 7a1bb92d8c81..ccaaf6c100c0 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#include <linux/firmware.h>
@@ -233,10 +233,20 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->dvfs_mode = 0;
fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr);
- fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
- fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS");
+ if (fw_hdr->preemption_buffer_1_max_size)
+ fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
+ else
+ fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
+
+ if (fw_hdr->preemption_buffer_2_max_size)
+ fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
+ else
+ fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
+ ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n",
+ fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size);
+
if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
fw_hdr->ro_section_size,
fw_hdr->image_load_address,
@@ -534,7 +544,7 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->d0i3_entry_vpu_ts);
ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
boot_params->system_time_us);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = %u\n",
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n",
boot_params->power_profile);
}
@@ -566,7 +576,6 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
- boot_params->frequency = ivpu_hw_pll_freq_get(vdev);
/*
* This param is a debug firmware feature. It switches default clock
@@ -637,7 +646,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->d0i3_residency_time_us = 0;
boot_params->d0i3_entry_vpu_ts = 0;
if (IVPU_WA(disable_d0i2))
- boot_params->power_profile = 1;
+ boot_params->power_profile |= BIT(1);
boot_params->system_time_us = ktime_to_us(ktime_get_real());
wmb(); /* Flush WC buffers after writing bootparams */
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 1d0b2bd9d65c..9a3935be1c05 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -39,6 +39,7 @@ struct ivpu_fw_info {
u64 read_only_addr;
u32 read_only_size;
u32 sched_mode;
+ u64 last_heartbeat;
};
int ivpu_fw_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 8741c73b92ce..e0d242d9f3e5 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -30,7 +30,7 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
"%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
- (bool)bo->base.base.import_attach);
+ (bool)drm_gem_is_imported(&bo->base.base));
}
/*
@@ -122,7 +122,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
bo->ctx = NULL;
}
- if (bo->base.base.import_attach)
+ if (drm_gem_is_imported(&bo->base.base))
return;
dma_resv_lock(bo->base.base.resv, NULL);
@@ -282,7 +282,7 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
ivpu_bo_unbind_locked(bo);
mutex_destroy(&bo->lock);
- drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
+ drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
drm_gem_shmem_free(&bo->base);
}
@@ -362,7 +362,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
if (flags & DRM_IVPU_BO_MAPPABLE) {
dma_resv_lock(bo->base.base.resv, NULL);
- ret = drm_gem_shmem_vmap(&bo->base, &map);
+ ret = drm_gem_shmem_vmap_locked(&bo->base, &map);
dma_resv_unlock(bo->base.base.resv);
if (ret)
@@ -387,7 +387,7 @@ void ivpu_bo_free(struct ivpu_bo *bo)
if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
dma_resv_lock(bo->base.base.resv, NULL);
- drm_gem_shmem_vunmap(&bo->base, &map);
+ drm_gem_shmem_vunmap_locked(&bo->base, &map);
dma_resv_unlock(bo->base.base.resv);
}
@@ -461,7 +461,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
if (bo->mmu_mapped)
drm_printf(p, " mmu_mapped");
- if (bo->base.base.import_attach)
+ if (drm_gem_is_imported(&bo->base.base))
drm_printf(p, " imported");
drm_printf(p, "\n");
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
index ec9a3629da3a..633160470c93 100644
--- a/drivers/accel/ivpu/ivpu_hw.c
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -119,7 +119,7 @@ static void timeouts_init(struct ivpu_device *vdev)
else
vdev->timeout.autosuspend = 100;
vdev->timeout.d0i3_entry_msg = 5;
- vdev->timeout.state_dump_msg = 10;
+ vdev->timeout.state_dump_msg = 100;
}
}
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index 16435f2756d0..d79668fe1609 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_HW_H__
@@ -82,19 +82,19 @@ static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
return range->end - range->start;
}
-static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+static inline u32 ivpu_hw_dpu_max_freq_get(struct ivpu_device *vdev)
{
- return ivpu_hw_btrs_ratio_to_freq(vdev, ratio);
+ return ivpu_hw_btrs_dpu_max_freq_get(vdev);
}
-static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
+static inline u32 ivpu_hw_dpu_freq_get(struct ivpu_device *vdev)
{
- ivpu_hw_ip_irq_clear(vdev);
+ return ivpu_hw_btrs_dpu_freq_get(vdev);
}
-static inline u32 ivpu_hw_pll_freq_get(struct ivpu_device *vdev)
+static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
{
- return ivpu_hw_btrs_pll_freq_get(vdev);
+ ivpu_hw_ip_irq_clear(vdev);
}
static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
index 56c56012b980..b236c7234daa 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.c
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
+#include <linux/units.h>
+
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_hw_btrs.h"
@@ -28,17 +30,13 @@
#define BTRS_LNL_ALL_IRQ_MASK ((u32)-1)
-#define BTRS_MTL_WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_5_3)
-#define BTRS_MTL_WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_4_3)
-#define BTRS_MTL_WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_5_3)
-#define BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3)
-#define BTRS_MTL_WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
#define PLL_CONFIG_DEFAULT 0x0
-#define PLL_SIMULATION_FREQ 10000000
-#define PLL_REF_CLK_FREQ 50000000
+#define PLL_REF_CLK_FREQ 50000000ull
+#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
+
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
#define TIMEOUT_US (150 * USEC_PER_MSEC)
@@ -62,6 +60,8 @@
#define DCT_ENABLE 0x1
#define DCT_DISABLE 0x0
+static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio);
+
int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev)
{
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK);
@@ -156,7 +156,7 @@ static int info_init_mtl(struct ivpu_device *vdev)
hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH;
hw->sku = BTRS_MTL_TILE_SKU_BOTH;
- hw->config = BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO;
+ hw->config = WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3);
return 0;
}
@@ -334,8 +334,8 @@ int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
prepare_wp_request(vdev, &wp, enable);
- ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
- PLL_RATIO_TO_FREQ(wp.target), wp.cfg, wp.epp, wp.cdyn);
+ ivpu_dbg(vdev, PM, "PLL workpoint request: %lu MHz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
+ pll_ratio_to_dpu_freq(vdev, wp.target) / HZ_PER_MHZ, wp.cfg, wp.epp, wp.cdyn);
ret = wp_request_send(vdev, &wp);
if (ret) {
@@ -573,6 +573,47 @@ int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev)
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
}
+static u32 pll_config_get_mtl(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
+}
+
+static u32 pll_config_get_lnl(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
+}
+
+static u32 pll_ratio_to_dpu_freq_mtl(u16 ratio)
+{
+ return (PLL_RATIO_TO_FREQ(ratio) * 2) / 3;
+}
+
+static u32 pll_ratio_to_dpu_freq_lnl(u16 ratio)
+{
+ return PLL_RATIO_TO_FREQ(ratio) / 2;
+}
+
+static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return pll_ratio_to_dpu_freq_mtl(ratio);
+ else
+ return pll_ratio_to_dpu_freq_lnl(ratio);
+}
+
+u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev)
+{
+ return pll_ratio_to_dpu_freq(vdev, vdev->hw->pll.max_ratio);
+}
+
+u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return pll_ratio_to_dpu_freq_mtl(pll_config_get_mtl(vdev));
+ else
+ return pll_ratio_to_dpu_freq_lnl(pll_config_get_lnl(vdev));
+}
+
/* Handler for IRQs from Buttress core (irqB) */
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
{
@@ -582,9 +623,12 @@ bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
if (!status)
return false;
- if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status))
- ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
- REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL));
+ if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
+ u32 pll = pll_config_get_mtl(vdev);
+
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
+ pll, pll_ratio_to_dpu_freq_mtl(pll) / HZ_PER_MHZ);
+ }
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
@@ -633,8 +677,12 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
queue_work(system_wq, &vdev->irq_dct_work);
}
- if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status))
- ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ));
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
+ u32 pll = pll_config_get_lnl(vdev);
+
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
+ pll, pll_ratio_to_dpu_freq_lnl(pll) / HZ_PER_MHZ);
+ }
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
@@ -717,60 +765,6 @@ void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 acti
REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val);
}
-static u32 pll_ratio_to_freq_mtl(u32 ratio, u32 config)
-{
- u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
- u32 cpu_clock;
-
- if ((config & 0xff) == MTL_PLL_RATIO_4_3)
- cpu_clock = pll_clock * 2 / 4;
- else
- cpu_clock = pll_clock * 2 / 5;
-
- return cpu_clock;
-}
-
-u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
-{
- struct ivpu_hw_info *hw = vdev->hw;
-
- if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
- return pll_ratio_to_freq_mtl(ratio, hw->config);
- else
- return PLL_RATIO_TO_FREQ(ratio);
-}
-
-static u32 pll_freq_get_mtl(struct ivpu_device *vdev)
-{
- u32 pll_curr_ratio;
-
- pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
- pll_curr_ratio &= VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK;
-
- if (!ivpu_is_silicon(vdev))
- return PLL_SIMULATION_FREQ;
-
- return pll_ratio_to_freq_mtl(pll_curr_ratio, vdev->hw->config);
-}
-
-static u32 pll_freq_get_lnl(struct ivpu_device *vdev)
-{
- u32 pll_curr_ratio;
-
- pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
- pll_curr_ratio &= VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK;
-
- return PLL_RATIO_TO_FREQ(pll_curr_ratio);
-}
-
-u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev)
-{
- if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
- return pll_freq_get_mtl(vdev);
- else
- return pll_freq_get_lnl(vdev);
-}
-
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
index 1fd71b4d4ab0..d2d82651976d 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_HW_BTRS_H__
@@ -13,9 +13,8 @@
#define PLL_PROFILING_FREQ_DEFAULT 38400000
#define PLL_PROFILING_FREQ_HIGH 400000000
-#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
-#define DCT_DEFAULT_ACTIVE_PERCENT 15u
+#define DCT_DEFAULT_ACTIVE_PERCENT 30u
#define DCT_PERIOD_US 35300u
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);
@@ -32,12 +31,12 @@ int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev);
void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev);
void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev);
void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev);
+u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev);
+u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev);
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent);
-u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev);
-u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio);
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 0e096fd9b95d..39f83225c181 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -302,7 +302,8 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req
struct ivpu_ipc_consumer cons;
int ret;
- drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
+ drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev) &&
+ pm_runtime_enabled(vdev->drm.dev));
ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 004059e4f1e8..b28da35c30b6 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -470,8 +470,8 @@ static void ivpu_job_destroy(struct ivpu_job *job)
struct ivpu_device *vdev = job->vdev;
u32 i;
- ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
- job->job_id, job->file_priv->ctx.id, job->engine_idx);
+ ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d cmdq_id %u engine %d",
+ job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx);
for (i = 0; i < job->bo_count; i++)
if (job->bos[i])
@@ -564,8 +564,8 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
dma_fence_signal(job->done_fence);
trace_job("done", job);
- ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
- job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
+ ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d cmdq_id %u engine %d status 0x%x\n",
+ job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, job_status);
ivpu_job_destroy(job);
ivpu_stop_job_timeout_detection(vdev);
@@ -664,8 +664,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
}
trace_job("submit", job);
- ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n",
- job->job_id, file_priv->ctx.id, job->engine_idx, cmdq->priority,
+ ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d cmdq_id %u engine %d prio %d addr 0x%llx next %d\n",
+ job->job_id, file_priv->ctx.id, cmdq->id, job->engine_idx, cmdq->priority,
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
mutex_unlock(&file_priv->lock);
@@ -681,8 +681,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
err_erase_xa:
xa_erase(&vdev->submitted_jobs_xa, job->job_id);
err_unlock:
- mutex_unlock(&vdev->submitted_jobs_lock);
mutex_unlock(&file_priv->lock);
+ mutex_unlock(&vdev->submitted_jobs_lock);
ivpu_rpm_put(vdev);
return ret;
}
@@ -777,7 +777,8 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv,
goto err_free_handles;
}
- ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n", file_priv->ctx.id, buffer_count);
+ ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u cmdq_id %u buf_count %u\n",
+ file_priv->ctx.id, cmdq_id, buffer_count);
job = ivpu_job_create(file_priv, engine, buffer_count);
if (!job) {
@@ -873,15 +874,21 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_cmdq_create *args = data;
struct ivpu_cmdq *cmdq;
+ int ret;
- if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
return -ENODEV;
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->lock);
cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false);
@@ -890,6 +897,8 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
mutex_unlock(&file_priv->lock);
+ ivpu_rpm_put(vdev);
+
return cmdq ? 0 : -ENOMEM;
}
@@ -899,28 +908,35 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file
struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_cmdq_destroy *args = data;
struct ivpu_cmdq *cmdq;
- u32 cmdq_id;
+ u32 cmdq_id = 0;
int ret;
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
return -ENODEV;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->lock);
cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
if (!cmdq || cmdq->is_legacy) {
ret = -ENOENT;
- goto err_unlock;
+ } else {
+ cmdq_id = cmdq->id;
+ ivpu_cmdq_destroy(file_priv, cmdq);
+ ret = 0;
}
- cmdq_id = cmdq->id;
- ivpu_cmdq_destroy(file_priv, cmdq);
mutex_unlock(&file_priv->lock);
- ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
- return 0;
-err_unlock:
- mutex_unlock(&file_priv->lock);
+ /* Abort any pending jobs only if cmdq was destroyed */
+ if (!ret)
+ ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
+
+ ivpu_rpm_put(vdev);
+
return ret;
}
diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c
index ffe7b10f8a76..2a043baf10ca 100644
--- a/drivers/accel/ivpu/ivpu_ms.c
+++ b/drivers/accel/ivpu/ivpu_ms.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_file.h>
+#include <linux/pm_runtime.h>
#include "ivpu_drv.h"
#include "ivpu_gem.h"
@@ -44,6 +45,10 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->ms_lock);
if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
@@ -96,6 +101,8 @@ err_free_ms:
kfree(ms);
unlock:
mutex_unlock(&file_priv->ms_lock);
+
+ ivpu_rpm_put(vdev);
return ret;
}
@@ -160,6 +167,10 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
if (!args->metric_group_mask)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
@@ -187,6 +198,7 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
unlock:
mutex_unlock(&file_priv->ms_lock);
+ ivpu_rpm_put(vdev);
return ret;
}
@@ -204,11 +216,17 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_metric_streamer_stop *args = data;
+ struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_ms_instance *ms;
+ int ret;
if (!args->metric_group_mask)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
@@ -217,6 +235,7 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
mutex_unlock(&file_priv->ms_lock);
+ ivpu_rpm_put(vdev);
return ms ? 0 : -EINVAL;
}
@@ -281,6 +300,9 @@ unlock:
void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
{
struct ivpu_ms_instance *ms, *tmp;
+ struct ivpu_device *vdev = file_priv->vdev;
+
+ pm_runtime_get_sync(vdev->drm.dev);
mutex_lock(&file_priv->ms_lock);
@@ -293,6 +315,8 @@ void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
free_instance(file_priv, ms);
mutex_unlock(&file_priv->ms_lock);
+
+ pm_runtime_put_autosuspend(vdev->drm.dev);
}
void ivpu_ms_cleanup_all(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index b5891e91f7ab..ea30db181cd7 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -34,6 +34,7 @@ module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
#define PM_RESCHEDULE_LIMIT 5
+#define PM_TDR_HEARTBEAT_LIMIT 30
static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
{
@@ -44,6 +45,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
ivpu_fw_log_reset(vdev);
ivpu_fw_load(vdev);
fw->entry_point = fw->cold_boot_entry_point;
+ fw->last_heartbeat = 0;
}
static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
@@ -189,7 +191,24 @@ static void ivpu_job_timeout_work(struct work_struct *work)
{
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
struct ivpu_device *vdev = pm->vdev;
+ u64 heartbeat;
+ if (ivpu_jsm_get_heartbeat(vdev, 0, &heartbeat) || heartbeat <= vdev->fw->last_heartbeat) {
+ ivpu_err(vdev, "Job timeout detected, heartbeat not progressed\n");
+ goto recovery;
+ }
+
+ if (atomic_fetch_inc(&vdev->job_timeout_counter) > PM_TDR_HEARTBEAT_LIMIT) {
+ ivpu_err(vdev, "Job timeout detected, heartbeat limit exceeded\n");
+ goto recovery;
+ }
+
+ vdev->fw->last_heartbeat = heartbeat;
+ ivpu_start_job_timeout_detection(vdev);
+ return;
+
+recovery:
+ atomic_set(&vdev->job_timeout_counter, 0);
ivpu_pm_trigger_recovery(vdev, "TDR");
}
@@ -204,6 +223,7 @@ void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev)
{
cancel_delayed_work_sync(&vdev->pm->job_timeout_work);
+ atomic_set(&vdev->job_timeout_counter, 0);
}
int ivpu_pm_suspend_cb(struct device *dev)
@@ -428,16 +448,17 @@ int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent)
active_us = (DCT_PERIOD_US * active_percent) / 100;
inactive_us = DCT_PERIOD_US - active_us;
+ vdev->pm->dct_active_percent = active_percent;
+
+ ivpu_dbg(vdev, PM, "DCT requested %u%% (D0: %uus, D0i2: %uus)\n",
+ active_percent, active_us, inactive_us);
+
ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us);
if (ret) {
ivpu_err_ratelimited(vdev, "Failed to enable DCT: %d\n", ret);
return ret;
}
- vdev->pm->dct_active_percent = active_percent;
-
- ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n",
- active_percent, active_us, inactive_us);
return 0;
}
@@ -445,15 +466,16 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev)
{
int ret;
+ vdev->pm->dct_active_percent = 0;
+
+ ivpu_dbg(vdev, PM, "DCT requested to be disabled\n");
+
ret = ivpu_jsm_dct_disable(vdev);
if (ret) {
ivpu_err_ratelimited(vdev, "Failed to disable DCT: %d\n", ret);
return ret;
}
- vdev->pm->dct_active_percent = 0;
-
- ivpu_dbg(vdev, PM, "DCT disabled\n");
return 0;
}
@@ -466,7 +488,7 @@ void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
if (ivpu_hw_btrs_dct_get_request(vdev, &enable))
return;
- if (vdev->pm->dct_active_percent)
+ if (enable)
ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT);
else
ret = ivpu_pm_dct_disable(vdev);
diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c
index 97102feaf8dd..268ab7744a8b 100644
--- a/drivers/accel/ivpu/ivpu_sysfs.c
+++ b/drivers/accel/ivpu/ivpu_sysfs.c
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/units.h>
#include "ivpu_drv.h"
#include "ivpu_gem.h"
@@ -90,10 +92,55 @@ sched_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR_RO(sched_mode);
+/**
+ * DOC: npu_max_frequency
+ *
+ * The npu_max_frequency shows maximum frequency in MHz of the NPU's data
+ * processing unit
+ */
+static ssize_t
+npu_max_frequency_mhz_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ u32 freq = ivpu_hw_dpu_max_freq_get(vdev);
+
+ return sysfs_emit(buf, "%lu\n", freq / HZ_PER_MHZ);
+}
+
+static DEVICE_ATTR_RO(npu_max_frequency_mhz);
+
+/**
+ * DOC: npu_current_frequency_mhz
+ *
+ * The npu_current_frequency_mhz shows current frequency in MHz of the NPU's
+ * data processing unit
+ */
+static ssize_t
+npu_current_frequency_mhz_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ u32 freq = 0;
+
+ /* Read frequency only if device is active, otherwise frequency is 0 */
+ if (pm_runtime_get_if_active(vdev->drm.dev) > 0) {
+ freq = ivpu_hw_dpu_freq_get(vdev);
+
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+ }
+
+ return sysfs_emit(buf, "%lu\n", freq / HZ_PER_MHZ);
+}
+
+static DEVICE_ATTR_RO(npu_current_frequency_mhz);
+
static struct attribute *ivpu_dev_attrs[] = {
&dev_attr_npu_busy_time_us.attr,
&dev_attr_npu_memory_utilization.attr,
&dev_attr_sched_mode.attr,
+ &dev_attr_npu_max_frequency_mhz.attr,
+ &dev_attr_npu_current_frequency_mhz.attr,
NULL,
};
diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h
index 908e68ea1c39..218468bbbcad 100644
--- a/drivers/accel/ivpu/vpu_boot_api.h
+++ b/drivers/accel/ivpu/vpu_boot_api.h
@@ -26,7 +26,7 @@
* Minor version changes when API backward compatibility is preserved.
* Resets to 0 if Major version is incremented.
*/
-#define VPU_BOOT_API_VER_MINOR 26
+#define VPU_BOOT_API_VER_MINOR 28
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
@@ -76,8 +76,15 @@ struct vpu_firmware_header {
* submission queue size and device capabilities.
*/
u32 preemption_buffer_2_size;
+ /*
+ * Maximum preemption buffer size that the FW can use: no need for the host
+ * driver to allocate more space than that specified by these fields.
+ * A value of 0 means no declared limit.
+ */
+ u32 preemption_buffer_1_max_size;
+ u32 preemption_buffer_2_max_size;
/* Space reserved for future preemption-related fields. */
- u32 preemption_reserved[6];
+ u32 preemption_reserved[4];
/* FW image read only section start address, 4KB aligned */
u64 ro_section_start_address;
/* FW image read only section size, 4KB aligned */
@@ -134,7 +141,7 @@ enum vpu_trace_destination {
/*
* Processor bit shifts (for loggable HW components).
*/
-#define VPU_TRACE_PROC_BIT_ARM 0
+#define VPU_TRACE_PROC_BIT_RESERVED 0
#define VPU_TRACE_PROC_BIT_LRT 1
#define VPU_TRACE_PROC_BIT_LNN 2
#define VPU_TRACE_PROC_BIT_SHV_0 3
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
index 7215c144158c..4b6b2b3d2583 100644
--- a/drivers/accel/ivpu/vpu_jsm_api.h
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -22,7 +22,7 @@
/*
* Minor version changes when API backward compatibility is preserved.
*/
-#define VPU_JSM_API_VER_MINOR 25
+#define VPU_JSM_API_VER_MINOR 29
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
@@ -53,8 +53,7 @@
* Engine indexes.
*/
#define VPU_ENGINE_COMPUTE 0
-#define VPU_ENGINE_COPY 1
-#define VPU_ENGINE_NB 2
+#define VPU_ENGINE_NB 1
/*
* VPU status values.
@@ -126,11 +125,13 @@ enum {
* When set, indicates that job queue uses native fences (as inline commands
* in job queue). Such queues may also use legacy fences (as commands in batch buffers).
* When cleared, indicates the job queue only uses legacy fences.
- * NOTE: For queues using native fences, VPU expects that all jobs in the queue
- * are immediately followed by an inline command object. This object is expected
- * to be a fence signal command in most cases, but can also be a NOP in case the host
- * does not need per-job fence signalling. Other inline commands objects can be
- * inserted between "job and inline command" pairs.
+ * NOTES:
+ * 1. For queues using native fences, VPU expects that all jobs in the queue
+ * are immediately followed by an inline command object. This object is expected
+ * to be a fence signal command in most cases, but can also be a NOP in case the host
+ * does not need per-job fence signalling. Other inline commands objects can be
+ * inserted between "job and inline command" pairs.
+ * 2. Native fence queues are only supported on VPU 40xx onwards.
*/
VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U),
@@ -275,6 +276,8 @@ struct vpu_inline_cmd {
u64 value;
/* User VA of the log buffer in which to add log entry on completion. */
u64 log_buffer_va;
+ /* NPU private data. */
+ u64 npu_private_data;
} fence;
/* Other commands do not have a payload. */
/* Payload definition for future inline commands can be inserted here. */
@@ -791,12 +794,22 @@ struct vpu_jsm_metric_streamer_update {
/** Metric group mask that identifies metric streamer instance. */
u64 metric_group_mask;
/**
- * Address and size of the buffer where the VPU will write metric data. If
- * the buffer address is 0 or same as the currently used buffer the VPU will
- * continue writing metric data to the current buffer. In this case the
- * buffer size is ignored and the size of the current buffer is unchanged.
- * If the address is non-zero and differs from the current buffer address the
- * VPU will immediately switch data collection to the new buffer.
+ * Address and size of the buffer where the VPU will write metric data.
+ * This member dictates how the update operation should perform:
+ * 1. client needs information about the number of collected samples and the
+ * amount of data written to the current buffer
+ * 2. client wants to switch to a new buffer
+ *
+ * Case 1. is identified by the buffer address being 0 or the same as the
+ * currently used buffer address. In this case the buffer size is ignored and
+ * the size of the current buffer is unchanged. The VPU will return an update
+ * in the vpu_jsm_metric_streamer_done structure. The internal writing position
+ * into the buffer is not changed.
+ *
+ * Case 2. is identified by the address being non-zero and differs from the
+ * current buffer address. The VPU will immediately switch data collection to
+ * the new buffer. Then the VPU will return an update in the
+ * vpu_jsm_metric_streamer_done structure.
*/
u64 buffer_addr;
u64 buffer_size;
@@ -934,6 +947,7 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
/*
* Default quantum in 100ns units for scheduling across processes
* within a priority band
+ * Minimum value supported by NPU is 1ms (10000 in 100ns units).
*/
u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
/*
@@ -946,8 +960,10 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
* in situations when it's starved by the focus band.
*/
u32 normal_band_percentage;
- /* Reserved */
- u32 reserved_0;
+ /*
+ * TDR timeout value in milliseconds. Default value of 0 meaning no timeout.
+ */
+ u32 tdr_timeout;
};
/*
@@ -1024,7 +1040,10 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
s32 in_process_priority;
/* Zero padding / Reserved */
u32 reserved_1;
- /* Context quantum relative to other contexts of same priority in the same process */
+ /*
+ * Context quantum relative to other contexts of same priority in the same process
+ * Minimum value supported by NPU is 1ms (10000 in 100ns units).
+ */
u64 context_quantum;
/* Grace period when preempting context of the same priority within the same process */
u64 grace_period_same_priority;
diff --git a/drivers/accel/qaic/Kconfig b/drivers/accel/qaic/Kconfig
index a9f866230058..5e405a19c157 100644
--- a/drivers/accel/qaic/Kconfig
+++ b/drivers/accel/qaic/Kconfig
@@ -8,7 +8,6 @@ config DRM_ACCEL_QAIC
depends on DRM_ACCEL
depends on PCI && HAS_IOMEM
depends on MHI_BUS
- depends on MMU
select CRC32
help
Enables driver for Qualcomm's Cloud AI accelerator PCIe cards that are
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 43aba57b48f0..1bce1af7c72c 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -609,7 +609,7 @@ static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc
struct scatterlist *sg;
int ret = 0;
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return -EINVAL;
for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) {
@@ -630,7 +630,7 @@ static void qaic_free_object(struct drm_gem_object *obj)
{
struct qaic_bo *bo = to_qaic_bo(obj);
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
/* DMABUF/PRIME Path */
drm_prime_gem_destroy(obj, NULL);
} else {
@@ -870,7 +870,7 @@ static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
{
int ret;
- if (bo->base.import_attach)
+ if (drm_gem_is_imported(&bo->base))
ret = qaic_prepare_import_bo(bo, hdr);
else
ret = qaic_prepare_export_bo(qdev, bo, hdr);
@@ -894,7 +894,7 @@ static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *b
static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
{
- if (bo->base.import_attach)
+ if (drm_gem_is_imported(&bo->base))
qaic_unprepare_import_bo(bo);
else
qaic_unprepare_export_bo(qdev, bo);
diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
index ba0cf2f94732..a991b8198dc4 100644
--- a/drivers/accel/qaic/qaic_debugfs.c
+++ b/drivers/accel/qaic/qaic_debugfs.c
@@ -240,7 +240,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
mhi_unprepare:
mhi_unprepare_from_transfer(mhi_dev);
destroy_workqueue:
- flush_workqueue(qdev->bootlog_wq);
destroy_workqueue(qdev->bootlog_wq);
out:
return ret;
@@ -253,7 +252,6 @@ static void qaic_bootlog_mhi_remove(struct mhi_device *mhi_dev)
qdev = dev_get_drvdata(&mhi_dev->dev);
mhi_unprepare_from_transfer(qdev->bootlog_ch);
- flush_workqueue(qdev->bootlog_wq);
destroy_workqueue(qdev->bootlog_wq);
qdev->bootlog_ch = NULL;
}
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7f10aa38269d..7bc40c2735ac 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -576,6 +576,9 @@ config ACPI_FFH
Enable this feature if you want to set up and install the FFH Address
Space handler to handle FFH OpRegion in the firmware.
+config ACPI_MRRM
+ bool
+
source "drivers/acpi/pmic/Kconfig"
config ACPI_VIOT
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 797070fc9a3f..d1b0affb844f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -66,6 +66,7 @@ acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
acpi-$(CONFIG_ACPI_PRMT) += prmt.o
acpi-$(CONFIG_ACPI_PCC) += acpi_pcc.o
acpi-$(CONFIG_ACPI_FFH) += acpi_ffh.o
+acpi-$(CONFIG_ACPI_MRRM) += acpi_mrrm.o
# Address translation
acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index f7fb7205028d..f6b9562779de 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -15,6 +15,7 @@
#include <acpi/ghes.h>
#include <asm/cpu.h>
#include <asm/mce.h>
+#include <asm/msr.h>
#include "apei/apei-internal.h"
#include <ras/ras_event.h>
@@ -234,7 +235,7 @@ static int __init extlog_init(void)
u64 cap;
int rc;
- if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) ||
+ if (rdmsrq_safe(MSR_IA32_MCG_CAP, &cap) ||
!(cap & MCG_ELOG_P) ||
!extlog_get_l1addr())
return -ENODEV;
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 794962c5c88e..b8d98b1b48ae 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -39,7 +39,7 @@ static int lpit_read_residency_counter_us(u64 *counter, bool io_mem)
return 0;
}
- err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter);
+ err = rdmsrq_safe(residency_info_ffh.gaddr.address, counter);
if (!err) {
u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset +
residency_info_ffh.gaddr. bit_width - 1,
diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c
new file mode 100644
index 000000000000..47ea3ccc2142
--- /dev/null
+++ b/drivers/acpi/acpi_mrrm.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025, Intel Corporation.
+ *
+ * Memory Range and Region Mapping (MRRM) structure
+ *
+ * Parse and report the platform's MRRM table in /sys.
+ */
+
+#define pr_fmt(fmt) "acpi/mrrm: " fmt
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+
+/* Default assume one memory region covering all system memory, per the spec */
+static int max_mem_region = 1;
+
+/* Access for use by resctrl file system */
+int acpi_mrrm_max_mem_region(void)
+{
+ return max_mem_region;
+}
+
+struct mrrm_mem_range_entry {
+ u64 base;
+ u64 length;
+ int node;
+ u8 local_region_id;
+ u8 remote_region_id;
+};
+
+static struct mrrm_mem_range_entry *mrrm_mem_range_entry;
+static u32 mrrm_mem_entry_num;
+
+static int get_node_num(struct mrrm_mem_range_entry *e)
+{
+ unsigned int nid;
+
+ for_each_online_node(nid) {
+ for (int z = 0; z < MAX_NR_ZONES; z++) {
+ struct zone *zone = NODE_DATA(nid)->node_zones + z;
+
+ if (!populated_zone(zone))
+ continue;
+ if (zone_intersects(zone, PHYS_PFN(e->base), PHYS_PFN(e->length)))
+ return zone_to_nid(zone);
+ }
+ }
+
+ return -ENOENT;
+}
+
+static __init int acpi_parse_mrrm(struct acpi_table_header *table)
+{
+ struct acpi_mrrm_mem_range_entry *mre_entry;
+ struct acpi_table_mrrm *mrrm;
+ void *mre, *mrrm_end;
+ int mre_count = 0;
+
+ mrrm = (struct acpi_table_mrrm *)table;
+ if (!mrrm)
+ return -ENODEV;
+
+ if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)
+ return -EOPNOTSUPP;
+
+ mrrm_end = (void *)mrrm + mrrm->header.length - 1;
+ mre = (void *)mrrm + sizeof(struct acpi_table_mrrm);
+ while (mre < mrrm_end) {
+ mre_entry = mre;
+ mre_count++;
+ mre += mre_entry->header.length;
+ }
+ if (!mre_count) {
+ pr_info(FW_BUG "No ranges listed in MRRM table\n");
+ return -EINVAL;
+ }
+
+ mrrm_mem_range_entry = kmalloc_array(mre_count, sizeof(*mrrm_mem_range_entry),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!mrrm_mem_range_entry)
+ return -ENOMEM;
+
+ mre = (void *)mrrm + sizeof(struct acpi_table_mrrm);
+ while (mre < mrrm_end) {
+ struct mrrm_mem_range_entry *e;
+
+ mre_entry = mre;
+ e = mrrm_mem_range_entry + mrrm_mem_entry_num;
+
+ e->base = mre_entry->addr_base;
+ e->length = mre_entry->addr_len;
+ e->node = get_node_num(e);
+
+ if (mre_entry->region_id_flags & ACPI_MRRM_VALID_REGION_ID_FLAGS_LOCAL)
+ e->local_region_id = mre_entry->local_region_id;
+ else
+ e->local_region_id = -1;
+ if (mre_entry->region_id_flags & ACPI_MRRM_VALID_REGION_ID_FLAGS_REMOTE)
+ e->remote_region_id = mre_entry->remote_region_id;
+ else
+ e->remote_region_id = -1;
+
+ mrrm_mem_entry_num++;
+ mre += mre_entry->header.length;
+ }
+
+ max_mem_region = mrrm->max_mem_region;
+
+ return 0;
+}
+
+#define RANGE_ATTR(name, fmt) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ struct mrrm_mem_range_entry *mre; \
+ const char *kname = kobject_name(kobj); \
+ int n, ret; \
+ \
+ ret = kstrtoint(kname + 5, 10, &n); \
+ if (ret) \
+ return ret; \
+ \
+ mre = mrrm_mem_range_entry + n; \
+ \
+ return sysfs_emit(buf, fmt, mre->name); \
+} \
+static struct kobj_attribute name##_attr = __ATTR_RO(name)
+
+RANGE_ATTR(base, "0x%llx\n");
+RANGE_ATTR(length, "0x%llx\n");
+RANGE_ATTR(node, "%d\n");
+RANGE_ATTR(local_region_id, "%d\n");
+RANGE_ATTR(remote_region_id, "%d\n");
+
+static struct attribute *memory_range_attrs[] = {
+ &base_attr.attr,
+ &length_attr.attr,
+ &node_attr.attr,
+ &local_region_id_attr.attr,
+ &remote_region_id_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(memory_range);
+
+static __init int add_boot_memory_ranges(void)
+{
+ struct kobject *pkobj, *kobj;
+ int ret = -EINVAL;
+ char *name;
+
+ pkobj = kobject_create_and_add("memory_ranges", acpi_kobj);
+
+ for (int i = 0; i < mrrm_mem_entry_num; i++) {
+ name = kasprintf(GFP_KERNEL, "range%d", i);
+ if (!name) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ kobj = kobject_create_and_add(name, pkobj);
+
+ ret = sysfs_create_groups(kobj, memory_range_groups);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static __init int mrrm_init(void)
+{
+ int ret;
+
+ ret = acpi_table_parse(ACPI_SIG_MRRM, acpi_parse_mrrm);
+ if (ret < 0)
+ return ret;
+
+ return add_boot_memory_ranges();
+}
+device_initcall(mrrm_init);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 3fde4496f8a2..6f8bbe1247a5 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -19,7 +19,7 @@
#include <linux/acpi.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/mwait.h>
#include <xen/xen.h>
diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c
index 07a034a53aca..97064e943768 100644
--- a/drivers/acpi/acpi_pcc.c
+++ b/drivers/acpi/acpi_pcc.c
@@ -31,7 +31,6 @@
struct pcc_data {
struct pcc_mbox_chan *pcc_chan;
- void __iomem *pcc_comm_addr;
struct completion done;
struct mbox_client cl;
struct acpi_pcc_info ctx;
@@ -81,14 +80,6 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
ret = AE_SUPPORT;
goto err_free_channel;
}
- data->pcc_comm_addr = acpi_os_ioremap(pcc_chan->shmem_base_addr,
- pcc_chan->shmem_size);
- if (!data->pcc_comm_addr) {
- pr_err("Failed to ioremap PCC comm region mem for %d\n",
- ctx->subspace_id);
- ret = AE_NO_MEMORY;
- goto err_free_channel;
- }
*region_context = data;
return AE_OK;
@@ -113,7 +104,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
reinit_completion(&data->done);
/* Write to Shared Memory */
- memcpy_toio(data->pcc_comm_addr, (void *)value, data->ctx.length);
+ memcpy_toio(data->pcc_chan->shmem, (void *)value, data->ctx.length);
ret = mbox_send_message(data->pcc_chan->mchan, NULL);
if (ret < 0)
@@ -134,7 +125,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
mbox_chan_txdone(data->pcc_chan->mchan, ret);
- memcpy_fromio(value, data->pcc_comm_addr, data->ctx.length);
+ memcpy_fromio(value, data->pcc_chan->shmem, data->ctx.length);
return AE_OK;
}
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 9d4cbd956627..d7d4649ce66f 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -17,7 +17,7 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2022 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2025 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH " (64-bit version)"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 4536dc9d3979..662231f4f881 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -3,7 +3,7 @@
*
* Name: accommon.h - Common include files for generation of ACPICA source
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index c6ba6a36cfb5..24998f2d7539 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 911875c5a5f1..fe6d38b43c9a 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -3,7 +3,7 @@
*
* Name: acdebug.h - ACPI/AML debugger
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -37,7 +37,7 @@ struct acpi_db_argument_info {
struct acpi_db_execute_walk {
u32 count;
u32 max_count;
- char name_seg[ACPI_NAMESEG_SIZE + 1];
+ char name_seg[ACPI_NAMESEG_SIZE + 1] ACPI_NONSTRING;
};
#define PARAM_LIST(pl) pl
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 73eecbf62f06..5d48a344b35f 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -3,7 +3,7 @@
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 1c5218b79fc2..b40fb3a5ac8a 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -3,7 +3,7 @@
*
* Name: acevents.h - Event subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 309ce8efb4f6..c8a750d2674c 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -3,7 +3,7 @@
*
* Name: acglobal.h - Declarations for global variables
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index b8543a34caea..6aec56c65fa0 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -3,7 +3,7 @@
*
* Name: achware.h -- hardware specific interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 955114c926bd..1ee6ac9b2baf 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -3,7 +3,7 @@
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -120,6 +120,9 @@ void
acpi_ex_trace_point(acpi_trace_event_type type,
u8 begin, u8 *aml, char *pathname);
+void
+acpi_ex_trace_args(union acpi_operand_object **params, u32 count);
+
/*
* exfield - ACPI AML (p-code) execution - field manipulation
*/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 6f4fe47c955b..0c41f0097e8d 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -3,7 +3,7 @@
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -293,7 +293,7 @@ acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state);
* expected_return_btypes - Allowed type(s) for the return value
*/
struct acpi_name_info {
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u16 argument_list;
u8 expected_btypes;
};
@@ -370,7 +370,7 @@ typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node *
converted_object);
struct acpi_simple_repair_info {
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u32 unexpected_btypes;
u32 package_index;
acpi_object_converter object_converter;
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index de83dd22292b..4e9402c02410 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -3,7 +3,7 @@
*
* Name: acmacros.h - C macros for the entire subsystem.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 9448bc026b9b..13f050fecb49 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -3,7 +3,7 @@
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 8fc02946d3cd..6ffcc7a0a0c2 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -3,7 +3,7 @@
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index da96d80e6b3a..a2a9e51d7ac6 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -3,7 +3,7 @@
*
* Name: acopcode.h - AML opcode information for the AML parser and interpreter
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 6dad786a382c..65a15dee092b 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -3,7 +3,7 @@
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index ef068f4c864a..76c5ed02e916 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -3,7 +3,7 @@
*
* Name: acpredef - Information table for ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index d772ff9ca07d..e8a92be5adae 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -3,7 +3,7 @@
*
* Name: acresrc.h - Resource Manager function prototypes
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index f8fee94ba708..e690f604cfa0 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -3,7 +3,7 @@
*
* Name: acstruct.h - Internal structs
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index b6ae979b01b6..ebef72bf58d0 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -3,7 +3,7 @@
*
* Name: actables.h - ACPI table management
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index edfdbbef81c1..3990d509bbab 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -3,7 +3,7 @@
*
* Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index effe52b40dce..c5b544a006c5 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -5,7 +5,7 @@
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 4e88f9fc2a28..54d6e51e0b9a 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -3,7 +3,7 @@
*
* Module Name: amlresrc.h - AML resource descriptors
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -504,10 +504,6 @@ struct aml_resource_pin_group_config {
#define AML_RESOURCE_PIN_GROUP_CONFIG_REVISION 1 /* ACPI 6.2 */
-/* restore default alignment */
-
-#pragma pack()
-
/* Union of all resource descriptors, so we can allocate the worst case */
union aml_resource {
@@ -562,6 +558,10 @@ union aml_resource {
u8 byte_item;
};
+/* restore default alignment */
+
+#pragma pack()
+
/* Interfaces used by both the disassembler and compiler */
void
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index e874c1dddefa..554ae35108bd 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -3,7 +3,7 @@
*
* Module Name: dbhistry - debugger HISTORY command
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 4354c175e12e..e2f00c54cb36 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -4,7 +4,7 @@
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 80c69af06948..c1f79d7a2026 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -4,7 +4,7 @@
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index c5c8380a3114..274b74255551 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: dsdebug - Parser/Interpreter interface - debugging
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 532401ecdab0..df132c9089c7 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -3,7 +3,7 @@
*
* Module Name: dsfield - Dispatcher field routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 6e0e362e461f..57cd9e2d1109 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: dsinit - Object initialization namespace walk
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index e809c2aed78a..c8f37f4e6626 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -3,7 +3,7 @@
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index eca50517ad82..5393de4dbc4c 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -188,6 +188,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
index++;
}
+ acpi_ex_trace_args(params, index);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%u args passed to method\n", index));
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 555f148d666b..1bf7eec49899 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -3,7 +3,7 @@
*
* Module Name: dsobject - Dispatcher object management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index dd3059000885..5699b0872848 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: dsopcode - Dispatcher support for regions and fields
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index ecf793fe9919..1ed2386fab82 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -3,7 +3,7 @@
*
* Module Name: dspkginit - Completion of deferred package initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index fb9ed5e1da89..baf6a1f27605 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -668,6 +668,8 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS];
u32 arg_count = 0;
u32 index = walk_state->num_operands;
+ u32 prev_num_operands = walk_state->num_operands;
+ u32 new_num_operands;
u32 i;
ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
@@ -696,6 +698,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
/* Create the interpreter arguments, in reverse order */
+ new_num_operands = index;
index--;
for (i = 0; i < arg_count; i++) {
arg = arguments[index];
@@ -720,7 +723,11 @@ cleanup:
* pop everything off of the operand stack and delete those
* objects
*/
- acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
+ walk_state->num_operands = (u8)(i);
+ acpi_ds_obj_stack_pop_and_delete(new_num_operands, walk_state);
+
+ /* Restore operand count */
+ walk_state->num_operands = (u8)(prev_num_operands);
ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %u", index));
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index a43336f05206..5c5c6d8a4e48 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -4,7 +4,7 @@
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index f7b8496c8bdd..666419b6a5c6 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 541235f498c2..bfc54c914757 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 1fdd07ae862c..375a8fa43d9d 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -3,7 +3,7 @@
*
* Module Name: dswscope - Scope stack manipulation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 75338a13c802..02aaddb89df9 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -3,7 +3,7 @@
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 9e78c5b9ad52..6cdd39c987b8 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -3,7 +3,7 @@
*
* Module Name: evevent - Fixed Event handling and dispatch
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 989dc01af03f..fa3e0d00d1ca 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -3,7 +3,7 @@
*
* Module Name: evglock - Global Lock support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 934b201d3820..ba65b2ea49b2 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 58e1890ab25b..fadd93caf1d5 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeblk - GPE block creation and initialization.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 38f408cf13ce..eb769739420e 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeinit - System GPE initialization and update
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index ee3b1ea656d4..d15b1d75c8ec 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeutil - GPE utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 1c8cb6d924df..5a35dae945e2 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -3,7 +3,7 @@
*
* Module Name: evhandler - Support for Address Space handlers
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index e68e876d3b84..04a23a6c3bb1 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: evmisc - Miscellaneous event manager support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index cf53b9535f18..fa3475da7ea9 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -3,7 +3,7 @@
*
* Module Name: evregion - Operation Region support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 46d1b3f5582d..b03952798af5 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -3,7 +3,7 @@
*
* Module Name: evrgnini- ACPI address_space (op_region) init
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 24fa6433d562..86a8d41c079c 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -3,7 +3,7 @@
*
* Module Name: evxface - External interfaces for ACPI events
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 48bf845191d2..4b052908d2e7 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 4eeeb3b7ab7e..60dacec1b121 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index bff2d099f469..bccc672c934c 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -4,7 +4,7 @@
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index 2fb78b35565b..c248c9b162fa 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -3,7 +3,7 @@
*
* Module Name: exconcat - Concatenate-type AML operators
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 473115309860..4d7dd0fc6b07 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -3,7 +3,7 @@
*
* Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index bb1be42daee1..fded9bfc2436 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -3,7 +3,7 @@
*
* Module Name: exconvrt - Object conversion routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -226,8 +226,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
/* Copy the string to the buffer */
new_buf = return_desc->buffer.pointer;
- strncpy((char *)new_buf, (char *)obj_desc->string.pointer,
- obj_desc->string.length);
+ memcpy((char *)new_buf, (char *)obj_desc->string.pointer,
+ obj_desc->string.length);
break;
default:
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 1bea9d97652c..052c69567997 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -3,7 +3,7 @@
*
* Module Name: excreate - Named object creation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 3f86bfada510..81a07a52b73c 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: exdebug - Support for stores to the AML Debug Object
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 2e2da8790224..d8aeebaab70a 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -3,7 +3,7 @@
*
* Module Name: exdump - Interpreter debug output routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 61ff36189ace..ced3ff9d0a86 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -3,7 +3,7 @@
*
* Module Name: exfield - AML execution - field_unit read/write
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index cf6c812a8b6d..0771934c0455 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -3,7 +3,7 @@
*
* Module Name: exfldio - Aml Field I/O
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index c6f2a9166ac0..07cbac58ed21 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 65c487facdda..1fa013197fcf 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -3,7 +3,7 @@
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 9a448165bfeb..76ab73c37e90 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -3,7 +3,7 @@
*
* Module Name: exnames - interpreter/scanner name load/execute
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 20fb34b68bee..6ac7e0ca5c9d 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 743c258bf2e8..a94fa4d70e99 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index d3091f619909..bf08110ed6d2 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 1af35e143ba9..cb078e39abf7 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 82b1fa2d201f..1b1a006e82de 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -3,7 +3,7 @@
*
* Module Name: exprep - ACPI AML field prep utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index c49b9f8de723..a390a1c2b0ab 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -3,7 +3,7 @@
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 873de01b8ad2..dd83631090fc 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -3,7 +3,7 @@
*
* Module Name: exresnte - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 24a78b5e266c..4589de3f3012 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -3,7 +3,7 @@
*
* Module Name: exresolv - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 3a437e6ace5c..782ee353a709 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -3,7 +3,7 @@
*
* Module Name: exresop - AML Interpreter operand/object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index 5241f4c01c76..6d2581ec22ad 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -3,7 +3,7 @@
*
* Module Name: exserial - field_unit support for serial address spaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -201,6 +201,12 @@ acpi_ex_read_serial_bus(union acpi_operand_object *obj_desc,
function = ACPI_READ;
break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+
+ buffer_length = ACPI_FFH_INPUT_BUFFER_SIZE;
+ function = ACPI_READ;
+ break;
+
default:
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 575c7a39f1aa..cbc42207496d 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -3,7 +3,7 @@
*
* Module Name: exstore - AML Interpreter object store support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index b01ae015e1b5..0470b2639831 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -4,7 +4,7 @@
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 37c3131a82fa..5b168fbc03e8 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -3,7 +3,7 @@
*
* Module Name: exstorob - AML object store support, store to object
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 2c384bd52b9c..7f843c9d8a06 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -3,7 +3,7 @@
*
* Module Name: exsystem - Interface to OS services
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index f1730221ff13..d34497f3576a 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -3,7 +3,7 @@
*
* Module Name: extrace - Support for interpreter execution tracing
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -149,6 +149,57 @@ acpi_ex_trace_point(acpi_trace_event_type type,
/*******************************************************************************
*
+ * FUNCTION: acpi_ex_trace_args
+ *
+ * PARAMETERS: params - AML method arguments
+ * count - numer of method arguments
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Trace any arguments
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_trace_args(union acpi_operand_object **params, u32 count)
+{
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ex_trace_args);
+
+ for (i = 0; i < count; i++) {
+ union acpi_operand_object *obj_desc = params[i];
+
+ if (!i) {
+ ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, " "));
+ }
+
+ switch (obj_desc->common.type) {
+ case ACPI_TYPE_INTEGER:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "%llx", obj_desc->integer.value));
+ break;
+ case ACPI_TYPE_STRING:
+ if (!obj_desc->string.length) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "NULL"));
+ continue;
+ }
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_TRACE_POINT, _COMPONENT))
+ acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX);
+ break;
+ default:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "Unknown"));
+ break;
+ }
+ if (i+1 == count) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "\n"));
+ } else {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, ", "));
+ }
+ }
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ex_start_trace_method
*
* PARAMETERS: method_node - Node of the method
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index f4d4a033f166..cc10c0732218 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -3,7 +3,7 @@
*
* Module Name: exutils - interpreter/scanner utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 790f342dcd25..a1e1fa787566 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -3,7 +3,7 @@
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index a9ba9190408b..631fd8e2b774 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -4,7 +4,7 @@
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
* extended FADT-V5 sleep registers.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index e0c847ab8324..386f4759c317 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index e0921f08b71a..87d78bef6323 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -4,7 +4,7 @@
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
* original/legacy sleep/PM registers.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 192c04b5a599..a5e0bccae6a4 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -3,7 +3,7 @@
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index b8de458f0368..496fd9e49f0b 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -3,7 +3,7 @@
*
* Module Name: hwvalid - I/O request validation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index c31f803995c6..847cd1b2493d 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -3,7 +3,7 @@
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 8dbf83aeb455..9aabe30416da 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -3,7 +3,7 @@
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 3efb46f0dc54..366d54a1d157 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -3,7 +3,7 @@
*
* Module Name: nsarguments - Validation of args for ACPI predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 7e5a683ae957..f05a92b88642 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -4,7 +4,7 @@
* Module Name: nsconvert - Object conversions for objects returned by
* predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 90a26cb0c472..6dc20486ad51 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index fa116ebe49a3..d5b16aaec233 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 86d126fdb27d..03373e7f7978 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: nsinit - namespace initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index fcb9de0f77a2..6ec4c646fff7 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -3,7 +3,7 @@
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d91153f65700..22aeeeb56cff 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -194,7 +194,7 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *full_path, u32 path_size, u8 no_trailing)
{
u32 length = 0, i;
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u8 do_no_trailing;
char c, *left, *right;
struct acpi_namespace_node *next_node;
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 31e551cf4ea6..959e6379bc4c 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -3,7 +3,7 @@
*
* Module Name: nsparse - namespace interface to AML parser
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index cf57bd69616d..81995ee48c49 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -3,7 +3,7 @@
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index dd37fc108fce..ca137ce5674f 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -3,7 +3,7 @@
*
* Module Name: nsprepkg - Validation of package objects for predefined names
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index b8657004190d..accfdcfb7e62 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -3,7 +3,7 @@
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 1bb7b71f07f1..8dbb870f40d2 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -4,7 +4,7 @@
* Module Name: nsrepair2 - Repair for objects returned by specific
* predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -25,7 +25,7 @@ acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info,
return_object_ptr);
typedef struct acpi_repair_info {
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
acpi_repair_function repair_function;
} acpi_repair_info;
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 06ffdb6808f5..49cc07e2ac5a 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -4,7 +4,7 @@
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index eee396a77bae..a2ac06a26e92 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: nswalk - Functions for walking the ACPI namespace
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 5d5bcf165298..1db831545ec8 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -4,7 +4,7 @@
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 28582adfc0ac..6f6ae38ec044 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -3,7 +3,7 @@
*
* Module Name: psargs - Parse AML opcode arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index d0fd55636129..c989cadf271c 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -3,7 +3,7 @@
*
* Module Name: psloop - Main AML parse loop
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 54471083ba54..496a1c1d5b0b 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -3,7 +3,7 @@
*
* Module Name: psobject - Support for parse objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -636,7 +636,8 @@ acpi_status
acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op, acpi_status status)
{
- acpi_status status2;
+ acpi_status return_status = status;
+ u8 ascending = TRUE;
ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
@@ -650,7 +651,7 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
op));
do {
if (op) {
- if (walk_state->ascending_callback != NULL) {
+ if (ascending && walk_state->ascending_callback != NULL) {
walk_state->op = op;
walk_state->op_info =
acpi_ps_get_opcode_info(op->common.
@@ -672,49 +673,26 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
}
if (status == AE_CTRL_TERMINATE) {
- status = AE_OK;
-
- /* Clean up */
- do {
- if (op) {
- status2 =
- acpi_ps_complete_this_op
- (walk_state, op);
- if (ACPI_FAILURE
- (status2)) {
- return_ACPI_STATUS
- (status2);
- }
- }
-
- acpi_ps_pop_scope(&
- (walk_state->
- parser_state),
- &op,
- &walk_state->
- arg_types,
- &walk_state->
- arg_count);
-
- } while (op);
-
- return_ACPI_STATUS(status);
+ ascending = FALSE;
+ return_status = AE_CTRL_TERMINATE;
}
else if (ACPI_FAILURE(status)) {
/* First error is most important */
- (void)
- acpi_ps_complete_this_op(walk_state,
- op);
- return_ACPI_STATUS(status);
+ ascending = FALSE;
+ return_status = status;
}
}
- status2 = acpi_ps_complete_this_op(walk_state, op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
+ status = acpi_ps_complete_this_op(walk_state, op);
+ if (ACPI_FAILURE(status)) {
+ ascending = FALSE;
+ if (ACPI_SUCCESS(return_status) ||
+ return_status == AE_CTRL_TERMINATE) {
+ return_status = status;
+ }
}
}
@@ -724,5 +702,5 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
} while (op);
- return_ACPI_STATUS(status);
+ return_ACPI_STATUS(return_status);
}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 39e31030e5f4..bf6103986f48 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index bccf606e08b4..1c8044ffcb97 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -3,7 +3,7 @@
*
* Module Name: psopinfo - AML opcode information functions and dispatch tables
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 10a072953d78..55a416e56fd8 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -3,7 +3,7 @@
*
* Module Name: psparse - Parser top level AML parse routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index a0035bde7556..c4e4483f0a0b 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -3,7 +3,7 @@
*
* Module Name: psscope - Parser scope stack management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 7f7f5ecd4011..5a285d3f2cdb 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -3,7 +3,7 @@
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index d550c4af4702..ada1dc304d25 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -3,7 +3,7 @@
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index d92817c72b8d..2f3ebcd8aebe 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 6f4eace0ba69..d480de075a90 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -3,7 +3,7 @@
*
* Module Name: psxface - Parser external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 27384ee245f0..f92010e667cd 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -272,18 +272,13 @@ u8
acpi_rs_get_address_common(struct acpi_resource *resource,
union aml_resource *aml)
{
- struct aml_resource_address address;
-
ACPI_FUNCTION_ENTRY();
- /* Avoid undefined behavior: member access within misaligned address */
-
- memcpy(&address, aml, sizeof(address));
-
/* Validate the Resource Type */
- if ((address.resource_type > 2) &&
- (address.resource_type < 0xC0) && (address.resource_type != 0x0A)) {
+ if ((aml->address.resource_type > 2) &&
+ (aml->address.resource_type < 0xC0) &&
+ (aml->address.resource_type != 0x0A)) {
return (FALSE);
}
@@ -304,7 +299,7 @@ acpi_rs_get_address_common(struct acpi_resource *resource,
/* Generic resource type, just grab the type_specific byte */
resource->data.address.info.type_specific =
- address.specific_flags;
+ aml->address.specific_flags;
}
return (TRUE);
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 6e7a152d6459..242daf45e20e 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -608,18 +608,12 @@ acpi_rs_get_list_length(u8 *aml_buffer,
case ACPI_RESOURCE_NAME_SERIAL_BUS:{
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus
- common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
minimum_aml_resource_length =
acpi_gbl_resource_aml_serial_bus_sizes
- [common_serial_bus.type];
+ [aml_resource->common_serial_bus.type];
extra_struct_bytes +=
- common_serial_bus.resource_length -
+ aml_resource->common_serial_bus.
+ resource_length -
minimum_aml_resource_length;
break;
}
@@ -688,16 +682,10 @@ acpi_rs_get_list_length(u8 *aml_buffer,
*/
if (acpi_ut_get_resource_type(aml_buffer) ==
ACPI_RESOURCE_NAME_SERIAL_BUS) {
-
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
buffer_size =
acpi_gbl_resource_struct_serial_bus_sizes
- [common_serial_bus.type] + extra_struct_bytes;
+ [aml_resource->common_serial_bus.type] +
+ extra_struct_bytes;
} else {
buffer_size =
acpi_gbl_resource_struct_sizes[resource_index] +
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 164c96e063c6..e46efaa889cd 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -55,21 +55,15 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
-
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
- if (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE) {
+ if (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE) {
conversion_table = NULL;
} else {
/* This is an I2C, SPI, UART, or CSI2 serial_bus descriptor */
conversion_table =
acpi_gbl_convert_resource_serial_bus_dispatch
- [common_serial_bus.type];
+ [aml_resource->common_serial_bus.type];
}
} else {
conversion_table =
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index a1f10e4409a3..5b98e09fff76 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -3,7 +3,7 @@
*
* Module Name: tbdata - Table manager data structure functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 3c126c6d306b..c6658b2f3027 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfadt - FADT table utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 1c1b2e284bd9..d71a73216380 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfind - find table
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -57,8 +57,8 @@ acpi_tb_find_table(char *signature,
memset(&header, 0, sizeof(struct acpi_table_header));
ACPI_COPY_NAMESEG(header.signature, signature);
- strncpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
- strncpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+ memcpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
+ memcpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
/* Search for the table */
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 0dc003c20e4d..ee9b85bc238b 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -3,7 +3,7 @@
*
* Module Name: tbinstal - ACPI table installation and removal
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 58b02e4b254b..fd64460a2e26 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -3,7 +3,7 @@
*
* Module Name: tbprint - Table output utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index dad7425fce3f..fa64851c7b62 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -3,7 +3,7 @@
*
* Module Name: tbutils - ACPI Table utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 275b52dc42e9..a8f07d2641b6 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxface - ACPI table-oriented external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 0f2a7343de3a..2a17c60a9a39 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfload - Table load/unload external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 5b413bbab338..961577ba9486 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index be94d2fd99a7..c673d6c95e0a 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -3,7 +3,7 @@
*
* Module Name: utaddress - op_region address range check
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index c1fb70457e20..2418a312733a 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -3,7 +3,7 @@
*
* Module Name: utalloc - local memory allocation routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index 2be37676edd7..259c28d3fecd 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -3,7 +3,7 @@
*
* Module Name: utascii - Utility ascii functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index b054bb5eeaf0..f6e6e98e9523 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -3,7 +3,7 @@
*
* Module Name: utbuffer - Buffer dump routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 85a85f7cf750..cabec193febb 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -3,7 +3,7 @@
*
* Module Name: utcache - local cache allocation routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -251,9 +251,9 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
} else {
/* The cache is empty, create a new object */
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
ACPI_MEM_TRACKING(cache->total_allocated++);
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
if ((cache->total_allocated - cache->total_freed) >
cache->max_occupied) {
cache->max_occupied =
diff --git a/drivers/acpi/acpica/utcksum.c b/drivers/acpi/acpica/utcksum.c
index b483894c3629..e6f6030b3a3f 100644
--- a/drivers/acpi/acpica/utcksum.c
+++ b/drivers/acpi/acpica/utcksum.c
@@ -3,7 +3,7 @@
*
* Module Name: utcksum - Support generating table checksums
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 2e17e657dfa4..80458e70ac2b 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -3,7 +3,7 @@
*
* Module Name: utcopy - Internal to external object translation utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 3d71bd9245c7..9f197e293c7e 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: utdebug - Debug print/trace routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 95a4b7509e01..b82130d1a8bc 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -3,7 +3,7 @@
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index c85bfa13ac1e..e8180099d01f 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -404,7 +404,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
object, object->common.type,
acpi_ut_get_object_type_name(object),
new_count));
- message = "Incremement";
+ message = "Increment";
break;
case REF_DECREMENT:
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 3e5173d03953..abc6583ed369 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -3,7 +3,7 @@
*
* Module Name: uteval - Object evaluation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 820820ea8119..97c55a113bae 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -3,7 +3,7 @@
*
* Module Name: utglobal - Global variables for the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index e62802791dcf..8cd050e9cad5 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -3,7 +3,7 @@
*
* Module Name: uthex -- Hex/ASCII support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 15c2ce91d403..eb88335dea2c 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -3,7 +3,7 @@
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 6d78504e9fbc..4bef97e8223a 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utinit - Common ACPI subsystem initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index ee6d72385c5c..123dbcbc60bc 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -3,7 +3,7 @@
*
* Module Name: utlock - Reader/Writer lock interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index f4aae8f0d3a8..272e46208263 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -3,7 +3,7 @@
*
* Module Name: utobject - ACPI object create/delete/size/cache routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 99b85fd6eccf..f6ac16729e42 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -3,7 +3,7 @@
*
* Module Name: utosi - Support for the _OSI predefined control method
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 29d2977d0746..d9bd80e2d32a 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -3,7 +3,7 @@
*
* Module Name: utpredef - support functions for predefined names
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 42b30b9f9312..423d10569736 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -3,7 +3,7 @@
*
* Module Name: utprint - Formatted printing routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -333,11 +333,8 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
pos = string;
- if (size != ACPI_UINT32_MAX) {
- end = string + size;
- } else {
- end = ACPI_CAST_PTR(char, ACPI_UINT32_MAX);
- }
+ size = ACPI_MIN(size, ACPI_PTR_DIFF(ACPI_MAX_PTR, string));
+ end = string + size;
for (; *format; ++format) {
if (*format != '%') {
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index cff7901f7866..e1cc3d348750 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -361,20 +361,16 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
/* Validate the bus_type field */
- if ((common_serial_bus.type == 0) ||
- (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE)) {
+ if ((aml_resource->common_serial_bus.type == 0) ||
+ (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE)) {
if (walk_state) {
ACPI_ERROR((AE_INFO,
"Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
- common_serial_bus.type));
+ aml_resource->common_serial_bus.
+ type));
}
return (AE_AML_INVALID_RESOURCE_TYPE);
}
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index f5f5da441458..a99c4c9e3d39 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -3,7 +3,7 @@
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 8f10b413e928..0682554934ca 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -3,7 +3,7 @@
*
* Module Name: utuuid -- UUID support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index aa2e923462b7..56942b5f026b 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -3,7 +3,7 @@
*
* Module Name: utxface - External interfaces, miscellaneous utility functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 70ae0afa7939..c1702f8fba67 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utxfinit - External interfaces for ACPICA initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 3cfe7e7475f2..070c07d68dfb 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -23,6 +23,7 @@ config ACPI_APEI_GHES
select ACPI_HED
select IRQ_WORK
select GENERIC_ALLOCATOR
+ select ARM_SDE_INTERFACE if ARM64
help
Generic Hardware Error Source provides a way to report
platform hardware errors (such as that from chipset). It
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 04731a5b01fa..fea11a35eea3 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -21,7 +21,7 @@
#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/mm.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/unaligned.h>
#include "apei-internal.h"
@@ -83,6 +83,8 @@ static struct debugfs_blob_wrapper vendor_blob;
static struct debugfs_blob_wrapper vendor_errors;
static char vendor_dev[64];
+static u32 available_error_type;
+
/*
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
@@ -648,14 +650,9 @@ static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
static int available_error_type_show(struct seq_file *m, void *v)
{
- int rc;
- u32 error_type = 0;
- rc = einj_get_available_error_type(&error_type);
- if (rc)
- return rc;
for (int pos = 0; pos < ARRAY_SIZE(einj_error_type_string); pos++)
- if (error_type & einj_error_type_string[pos].mask)
+ if (available_error_type & einj_error_type_string[pos].mask)
seq_printf(m, "0x%08x\t%s\n", einj_error_type_string[pos].mask,
einj_error_type_string[pos].str);
@@ -678,8 +675,7 @@ bool einj_is_cxl_error_type(u64 type)
int einj_validate_error_type(u64 type)
{
- u32 tval, vendor, available_error_type = 0;
- int rc;
+ u32 tval, vendor;
/* Only low 32 bits for error type are valid */
if (type & GENMASK_ULL(63, 32))
@@ -695,13 +691,9 @@ int einj_validate_error_type(u64 type)
/* Only one error type can be specified */
if (tval & (tval - 1))
return -EINVAL;
- if (!vendor) {
- rc = einj_get_available_error_type(&available_error_type);
- if (rc)
- return rc;
+ if (!vendor)
if (!(type & available_error_type))
return -EINVAL;
- }
return 0;
}
@@ -749,17 +741,12 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
return 0;
}
-static int __init einj_probe(struct platform_device *pdev)
+static int __init einj_probe(struct faux_device *fdev)
{
int rc;
acpi_status status;
struct apei_exec_context ctx;
- if (acpi_disabled) {
- pr_debug("ACPI disabled.\n");
- return -ENODEV;
- }
-
status = acpi_get_table(ACPI_SIG_EINJ, 0,
(struct acpi_table_header **)&einj_tab);
if (status == AE_NOT_FOUND) {
@@ -777,6 +764,10 @@ static int __init einj_probe(struct platform_device *pdev)
goto err_put_table;
}
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ goto err_put_table;
+
rc = -ENOMEM;
einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
@@ -851,7 +842,7 @@ err_put_table:
return rc;
}
-static void __exit einj_remove(struct platform_device *pdev)
+static void __exit einj_remove(struct faux_device *fdev)
{
struct apei_exec_context ctx;
@@ -872,34 +863,30 @@ static void __exit einj_remove(struct platform_device *pdev)
acpi_put_table((struct acpi_table_header *)einj_tab);
}
-static struct platform_device *einj_dev;
+static struct faux_device *einj_dev;
/*
* einj_remove() lives in .exit.text. For drivers registered via
* platform_driver_probe() this is ok because they cannot get unbound at
* runtime. So mark the driver struct with __refdata to prevent modpost
* triggering a section mismatch warning.
*/
-static struct platform_driver einj_driver __refdata = {
+static struct faux_device_ops einj_device_ops __refdata = {
+ .probe = einj_probe,
.remove = __exit_p(einj_remove),
- .driver = {
- .name = "acpi-einj",
- },
};
static int __init einj_init(void)
{
- struct platform_device_info einj_dev_info = {
- .name = "acpi-einj",
- .id = -1,
- };
- int rc;
+ if (acpi_disabled) {
+ pr_debug("ACPI disabled.\n");
+ return -ENODEV;
+ }
- einj_dev = platform_device_register_full(&einj_dev_info);
- if (IS_ERR(einj_dev))
- return PTR_ERR(einj_dev);
+ einj_dev = faux_device_create("acpi-einj", NULL, &einj_device_ops);
+ if (!einj_dev)
+ return -ENODEV;
- rc = platform_driver_probe(&einj_driver, einj_probe);
- einj_initialized = rc == 0;
+ einj_initialized = true;
return 0;
}
@@ -907,9 +894,8 @@ static int __init einj_init(void)
static void __exit einj_exit(void)
{
if (einj_initialized)
- platform_driver_unregister(&einj_driver);
+ faux_device_destroy(einj_dev);
- platform_device_unregister(einj_dev);
}
module_init(einj_init);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 289e365f84b2..0f3c663c1b0a 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1715,7 +1715,7 @@ void __init acpi_ghes_init(void)
{
int rc;
- sdei_init();
+ acpi_sdei_init();
if (acpi_disabled)
return;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 6760330a8af5..45593612a4db 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -243,10 +243,23 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_POWER_NOW:
- if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) {
ret = -ENODEV;
- else
- val->intval = battery->rate_now * 1000;
+ break;
+ }
+
+ val->intval = battery->rate_now * 1000;
+ /*
+ * When discharging, the current should be reported as a
+ * negative number as per the power supply class interface
+ * definition.
+ */
+ if (psp == POWER_SUPPLY_PROP_CURRENT_NOW &&
+ (battery->state & ACPI_BATTERY_STATE_DISCHARGING) &&
+ acpi_battery_handle_discharging(battery)
+ == POWER_SUPPLY_STATUS_DISCHARGING)
+ val->intval = -val->intval;
+
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
@@ -279,8 +292,8 @@ static int acpi_battery_get_property(struct power_supply *psy,
full_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
- val->intval = battery->capacity_now * 100/
- full_capacity;
+ val->intval = DIV_ROUND_CLOSEST_ULL(battery->capacity_now * 100ULL,
+ full_capacity);
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 058910af82bc..c2ab2783303f 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1446,8 +1446,10 @@ static int __init acpi_init(void)
}
acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
- if (!acpi_kobj)
- pr_debug("%s: kset create error\n", __func__);
+ if (!acpi_kobj) {
+ pr_err("Failed to register kobject\n");
+ return -ENOMEM;
+ }
init_prmt();
acpi_init_pcc();
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 90b09840536d..0a7026040188 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -458,7 +458,7 @@ static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
acpi_pm_wakeup_event(&device->dev);
button = acpi_driver_data(device);
- if (button->suspended)
+ if (button->suspended || event == ACPI_BUTTON_NOTIFY_WAKE)
return;
input = button->input;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index f193e713825a..a9ae2fd62863 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -47,7 +47,6 @@
struct cppc_pcc_data {
struct pcc_mbox_chan *pcc_channel;
- void __iomem *pcc_comm_addr;
bool pcc_channel_acquired;
unsigned int deadline_us;
unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
@@ -95,7 +94,7 @@ static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
/* pcc mapped address + header size + offset within PCC subspace */
-#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
+#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_channel->shmem + \
0x8 + (offs))
/* Check if a CPC register is in PCC */
@@ -129,6 +128,20 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
!!(cpc)->cpc_entry.int_value : \
!IS_NULL_REG(&(cpc)->cpc_entry.reg))
+
+/*
+ * Each bit indicates the optionality of the register in per-cpu
+ * cpc_regs[] with the corresponding index. 0 means mandatory and 1
+ * means optional.
+ */
+#define REG_OPTIONAL (0x1FC7D0)
+
+/*
+ * Use the index of the register in per-cpu cpc_regs[] to check if
+ * it's an optional one.
+ */
+#define IS_OPTIONAL_CPC_REG(reg_idx) (REG_OPTIONAL & (1U << (reg_idx)))
+
/*
* Arbitrary Retries in case the remote processor is slow to respond
* to PCC commands. Keeping it high enough to cover emulators where
@@ -223,7 +236,7 @@ static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
int ret, status;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
- pcc_ss_data->pcc_comm_addr;
+ pcc_ss_data->pcc_channel->shmem;
if (!pcc_ss_data->platform_owns_pcc)
return 0;
@@ -258,7 +271,7 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
int ret = -EIO, i;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
- pcc_ss_data->pcc_comm_addr;
+ pcc_ss_data->pcc_channel->shmem;
unsigned int time_delta;
/*
@@ -571,15 +584,6 @@ static int register_pcc_channel(int pcc_ss_idx)
pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
- pcc_data[pcc_ss_idx]->pcc_comm_addr =
- acpi_os_ioremap(pcc_chan->shmem_base_addr,
- pcc_chan->shmem_size);
- if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
- pr_err("Failed to ioremap PCC comm region mem for %d\n",
- pcc_ss_idx);
- return -ENOMEM;
- }
-
/* Set flag so that we don't come here for each CPU. */
pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
}
@@ -1175,43 +1179,106 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return ret_val;
}
-static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
+static int cppc_get_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 *val)
{
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret;
+
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id\n");
+ return -ENODEV;
+ }
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
+ ret = cpc_read(cpu, reg, val);
+ else
+ ret = -EIO;
+
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+}
+
+static int cppc_get_reg_val(int cpu, enum cppc_regs reg_idx, u64 *val)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cpc_register_resource *reg;
+ if (val == NULL)
+ return -EINVAL;
+
if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
return -ENODEV;
}
reg = &cpc_desc->cpc_regs[reg_idx];
- if (CPC_IN_PCC(reg)) {
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = 0;
+ if ((reg->type == ACPI_TYPE_INTEGER && IS_OPTIONAL_CPC_REG(reg_idx) &&
+ !reg->cpc_entry.int_value) || (reg->type != ACPI_TYPE_INTEGER &&
+ IS_NULL_REG(&reg->cpc_entry.reg))) {
+ pr_debug("CPC register is not supported\n");
+ return -EOPNOTSUPP;
+ }
- if (pcc_ss_id < 0)
- return -EIO;
+ if (CPC_IN_PCC(reg))
+ return cppc_get_reg_val_in_pcc(cpu, reg, val);
- pcc_ss_data = pcc_data[pcc_ss_id];
-
- down_write(&pcc_ss_data->pcc_lock);
+ return cpc_read(cpu, reg, val);
+}
- if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
- cpc_read(cpunum, reg, perf);
- else
- ret = -EIO;
+static int cppc_set_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 val)
+{
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret;
- up_write(&pcc_ss_data->pcc_lock);
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id\n");
+ return -ENODEV;
+ }
+ ret = cpc_write(cpu, reg, val);
+ if (ret)
return ret;
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+ /* after writing CPC, transfer the ownership of PCC to platform */
+ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+}
+
+static int cppc_set_reg_val(int cpu, enum cppc_regs reg_idx, u64 val)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+ struct cpc_register_resource *reg;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
+ return -ENODEV;
}
- cpc_read(cpunum, reg, perf);
+ reg = &cpc_desc->cpc_regs[reg_idx];
- return 0;
+ /* if a register is writeable, it must be a buffer and not null */
+ if ((reg->type != ACPI_TYPE_BUFFER) || IS_NULL_REG(&reg->cpc_entry.reg)) {
+ pr_debug("CPC register is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (CPC_IN_PCC(reg))
+ return cppc_set_reg_val_in_pcc(cpu, reg, val);
+
+ return cpc_write(cpu, reg, val);
}
/**
@@ -1223,7 +1290,7 @@ static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
*/
int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
- return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
+ return cppc_get_reg_val(cpunum, DESIRED_PERF, desired_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
@@ -1236,7 +1303,7 @@ EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
*/
int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
{
- return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
+ return cppc_get_reg_val(cpunum, NOMINAL_PERF, nominal_perf);
}
/**
@@ -1248,7 +1315,7 @@ int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
*/
int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
{
- return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
+ return cppc_get_reg_val(cpunum, HIGHEST_PERF, highest_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
@@ -1261,7 +1328,7 @@ EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
*/
int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
{
- return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
+ return cppc_get_reg_val(cpunum, ENERGY_PERF, epp_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
@@ -1535,53 +1602,110 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
/**
- * cppc_get_auto_sel_caps - Read autonomous selection register.
- * @cpunum : CPU from which to read register.
- * @perf_caps : struct where autonomous selection register value is updated.
+ * cppc_set_epp() - Write the EPP register.
+ * @cpu: CPU on which to write register.
+ * @epp_val: Value to write to the EPP register.
*/
-int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
+int cppc_set_epp(int cpu, u64 epp_val)
{
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
- struct cpc_register_resource *auto_sel_reg;
- u64 auto_sel;
+ if (epp_val > CPPC_ENERGY_PERF_MAX)
+ return -EINVAL;
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
- return -ENODEV;
- }
+ return cppc_set_reg_val(cpu, ENERGY_PERF, epp_val);
+}
+EXPORT_SYMBOL_GPL(cppc_set_epp);
- auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
+/**
+ * cppc_get_auto_act_window() - Read autonomous activity window register.
+ * @cpu: CPU from which to read register.
+ * @auto_act_window: Return address.
+ *
+ * According to ACPI 6.5, s8.4.6.1.6, the value read from the autonomous
+ * activity window register consists of two parts: a 7 bits value indicate
+ * significand and a 3 bits value indicate exponent.
+ */
+int cppc_get_auto_act_window(int cpu, u64 *auto_act_window)
+{
+ unsigned int exp;
+ u64 val, sig;
+ int ret;
- if (!CPC_SUPPORTED(auto_sel_reg))
- pr_warn_once("Autonomous mode is not unsupported!\n");
+ if (auto_act_window == NULL)
+ return -EINVAL;
- if (CPC_IN_PCC(auto_sel_reg)) {
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = 0;
+ ret = cppc_get_reg_val(cpu, AUTO_ACT_WINDOW, &val);
+ if (ret)
+ return ret;
- if (pcc_ss_id < 0)
- return -ENODEV;
+ sig = val & CPPC_AUTO_ACT_WINDOW_MAX_SIG;
+ exp = (val >> CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) & CPPC_AUTO_ACT_WINDOW_MAX_EXP;
+ *auto_act_window = sig * int_pow(10, exp);
- pcc_ss_data = pcc_data[pcc_ss_id];
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cppc_get_auto_act_window);
- down_write(&pcc_ss_data->pcc_lock);
+/**
+ * cppc_set_auto_act_window() - Write autonomous activity window register.
+ * @cpu: CPU on which to write register.
+ * @auto_act_window: usec value to write to the autonomous activity window register.
+ *
+ * According to ACPI 6.5, s8.4.6.1.6, the value to write to the autonomous
+ * activity window register consists of two parts: a 7 bits value indicate
+ * significand and a 3 bits value indicate exponent.
+ */
+int cppc_set_auto_act_window(int cpu, u64 auto_act_window)
+{
+ /* The max value to store is 1270000000 */
+ u64 max_val = CPPC_AUTO_ACT_WINDOW_MAX_SIG * int_pow(10, CPPC_AUTO_ACT_WINDOW_MAX_EXP);
+ int exp = 0;
+ u64 val;
- if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
- cpc_read(cpunum, auto_sel_reg, &auto_sel);
- perf_caps->auto_sel = (bool)auto_sel;
- } else {
- ret = -EIO;
- }
+ if (auto_act_window > max_val)
+ return -EINVAL;
- up_write(&pcc_ss_data->pcc_lock);
+ /*
+ * The max significand is 127, when auto_act_window is larger than
+ * 129, discard the precision of the last digit and increase the
+ * exponent by 1.
+ */
+ while (auto_act_window > CPPC_AUTO_ACT_WINDOW_SIG_CARRY_THRESH) {
+ auto_act_window /= 10;
+ exp += 1;
+ }
+
+ /* For 128 and 129, cut it to 127. */
+ if (auto_act_window > CPPC_AUTO_ACT_WINDOW_MAX_SIG)
+ auto_act_window = CPPC_AUTO_ACT_WINDOW_MAX_SIG;
+
+ val = (exp << CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) + auto_act_window;
+ return cppc_set_reg_val(cpu, AUTO_ACT_WINDOW, val);
+}
+EXPORT_SYMBOL_GPL(cppc_set_auto_act_window);
+
+/**
+ * cppc_get_auto_sel() - Read autonomous selection register.
+ * @cpu: CPU from which to read register.
+ * @enable: Return address.
+ */
+int cppc_get_auto_sel(int cpu, bool *enable)
+{
+ u64 auto_sel;
+ int ret;
+
+ if (enable == NULL)
+ return -EINVAL;
+
+ ret = cppc_get_reg_val(cpu, AUTO_SEL_ENABLE, &auto_sel);
+ if (ret)
return ret;
- }
+
+ *enable = (bool)auto_sel;
return 0;
}
-EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
+EXPORT_SYMBOL_GPL(cppc_get_auto_sel);
/**
* cppc_set_auto_sel - Write autonomous selection register.
@@ -1590,43 +1714,7 @@ EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
*/
int cppc_set_auto_sel(int cpu, bool enable)
{
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
- struct cpc_register_resource *auto_sel_reg;
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = -EINVAL;
-
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpu);
- return -ENODEV;
- }
-
- auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
-
- if (CPC_IN_PCC(auto_sel_reg)) {
- if (pcc_ss_id < 0) {
- pr_debug("Invalid pcc_ss_id\n");
- return -ENODEV;
- }
-
- if (CPC_SUPPORTED(auto_sel_reg)) {
- ret = cpc_write(cpu, auto_sel_reg, enable);
- if (ret)
- return ret;
- }
-
- pcc_ss_data = pcc_data[pcc_ss_id];
-
- down_write(&pcc_ss_data->pcc_lock);
- /* after writing CPC, transfer the ownership of PCC to platform */
- ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
- up_write(&pcc_ss_data->pcc_lock);
- } else {
- ret = -ENOTSUPP;
- pr_debug("_CPC in PCC is not supported\n");
- }
-
- return ret;
+ return cppc_set_reg_val(cpu, AUTO_SEL_ENABLE, enable);
}
EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
@@ -1640,38 +1728,7 @@ EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
*/
int cppc_set_enable(int cpu, bool enable)
{
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
- struct cpc_register_resource *enable_reg;
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = -EINVAL;
-
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpu);
- return -EINVAL;
- }
-
- enable_reg = &cpc_desc->cpc_regs[ENABLE];
-
- if (CPC_IN_PCC(enable_reg)) {
-
- if (pcc_ss_id < 0)
- return -EIO;
-
- ret = cpc_write(cpu, enable_reg, enable);
- if (ret)
- return ret;
-
- pcc_ss_data = pcc_data[pcc_ss_id];
-
- down_write(&pcc_ss_data->pcc_lock);
- /* after writing CPC, transfer the ownership of PCC to platfrom */
- ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
- up_write(&pcc_ss_data->pcc_lock);
- return ret;
- }
-
- return cpc_write(cpu, enable_reg, enable);
+ return cppc_set_reg_val(cpu, ENABLE, enable);
}
EXPORT_SYMBOL_GPL(cppc_set_enable);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 8db09d81918f..6f4203716b53 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2301,6 +2301,40 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
},
},
+ /*
+ * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3929
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
+ }
+ },
+ {
+ // TUXEDO InfinityBook Pro AMD Gen9
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
+ },
+ },
{ },
};
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 1687483ff319..76a856c32c4d 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -12,7 +12,7 @@
enum acpi_irq_model_id acpi_irq_model;
-static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi);
+static acpi_gsi_domain_disp_fn acpi_get_gsi_domain_id;
static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi);
/**
@@ -307,12 +307,24 @@ EXPORT_SYMBOL_GPL(acpi_irq_get);
* for a given GSI
*/
void __init acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *(*fn)(u32))
+ acpi_gsi_domain_disp_fn fn)
{
acpi_irq_model = model;
acpi_get_gsi_domain_id = fn;
}
+/*
+ * acpi_get_gsi_dispatcher() - Get the GSI dispatcher function
+ *
+ * Return the dispatcher function that computes the domain fwnode for
+ * a given GSI.
+ */
+acpi_gsi_domain_disp_fn acpi_get_gsi_dispatcher(void)
+{
+ return acpi_get_gsi_domain_id;
+}
+EXPORT_SYMBOL_GPL(acpi_get_gsi_dispatcher);
+
/**
* acpi_set_gsi_to_irq_fallback - Register a GSI transfer
* callback to fallback to arch specified implementation.
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 0a725e46d017..53816dfab645 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/memblock.h>
+#include <linux/memory.h>
#include <linux/numa.h>
#include <linux/nodemask.h>
#include <linux/topology.h>
@@ -429,13 +430,23 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
{
struct acpi_cedt_cfmws *cfmws;
int *fake_pxm = arg;
- u64 start, end;
+ u64 start, end, align;
int node;
+ int err;
cfmws = (struct acpi_cedt_cfmws *)header;
start = cfmws->base_hpa;
end = cfmws->base_hpa + cfmws->window_size;
+ /* Align memblock size to CFMW regions if possible */
+ align = 1UL << __ffs(start | end);
+ if (align >= SZ_256M) {
+ err = memory_block_advise_max_size(align);
+ if (err)
+ pr_warn("CFMWS: memblock size advise failed (%d)\n", err);
+ } else
+ pr_err("CFMWS: [BIOS BUG] base/size alignment violates spec\n");
+
/*
* The SRAT may have already described NUMA details for all,
* or a portion of, this CFMWS HPA range. Extend the memblks
@@ -453,7 +464,7 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
return -EINVAL;
}
- if (numa_add_memblk(node, start, end) < 0) {
+ if (numa_add_reserved_memblk(node, start, end) < 0) {
/* CXL driver must handle the NUMA_NO_NODE case */
pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
node, start, end);
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index df9328c850bd..f2c943b934be 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -42,7 +42,6 @@ static struct acpi_osi_entry
osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
{"Module Device", true},
{"Processor Device", true},
- {"3.0 _SCP Extensions", true},
{"Processor Aggregator Device", true},
};
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d0b6a024daae..74ade4160314 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -858,7 +858,7 @@ next:
}
}
-static void acpi_pci_root_remap_iospace(struct fwnode_handle *fwnode,
+static void acpi_pci_root_remap_iospace(const struct fwnode_handle *fwnode,
struct resource_entry *entry)
{
#ifdef PCI_IOBASE
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index ffbfd32f4cf1..b43f4459a4f6 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -688,6 +688,9 @@ static int __init platform_profile_init(void)
{
int err;
+ if (acpi_disabled)
+ return -EOPNOTSUPP;
+
err = class_register(&platform_profile_class);
if (err)
return err;
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index a35dd0e41c27..54676e3d82dd 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -229,18 +229,20 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
node_entry = ACPI_PTR_DIFF(node, table_hdr);
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
- while ((unsigned long)entry + proc_sz < table_end) {
+ /* ignore subtable types that are smaller than a processor node */
+ while ((unsigned long)entry + proc_sz <= table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
+
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
cpu_node->parent == node_entry)
return 0;
if (entry->length == 0)
return 0;
+
entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
entry->length);
-
}
return 1;
}
@@ -270,18 +272,21 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
table_end = (unsigned long)table_hdr + table_hdr->length;
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
/* find the processor structure associated with this cpuid */
- while ((unsigned long)entry + proc_sz < table_end) {
+ while ((unsigned long)entry + proc_sz <= table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
if (entry->length == 0) {
pr_warn("Invalid zero length subtable\n");
break;
}
+ /* entry->length may not equal proc_sz, revalidate the processor structure length */
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
acpi_cpu_id == cpu_node->acpi_processor_id &&
+ (unsigned long)entry + entry->length <= table_end &&
+ entry->length == proc_sz + cpu_node->number_of_priv_resources * sizeof(u32) &&
acpi_pptt_leaf_node(table_hdr, cpu_node)) {
return (struct acpi_pptt_processor *)entry;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index b181f7fc2090..e2febca2ec13 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -461,10 +461,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
{
- unsigned int i;
int result;
-
/* NOTE: the idle thread may not be running while calling
* this function */
@@ -481,17 +479,7 @@ static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
acpi_processor_get_power_info_default(pr);
pr->power.count = acpi_processor_power_verify(pr);
-
- /*
- * if one state of type C2 or C3 is available, mark this
- * CPU as being "idle manageable"
- */
- for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
- if (pr->power.states[i].valid) {
- pr->power.count = i;
- pr->flags.power = 1;
- }
- }
+ pr->flags.power = 1;
return 0;
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 53996f1a2d80..64b8d1e19594 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -20,6 +20,7 @@
#include <acpi/processor.h>
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
+#include <asm/msr.h>
#endif
#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 00d045e5f524..d1541a386fbc 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -18,9 +18,12 @@
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/acpi.h>
+#include <linux/uaccess.h>
#include <acpi/processor.h>
#include <asm/io.h>
-#include <linux/uaccess.h>
+#ifdef CONFIG_X86
+#include <asm/msr.h>
+#endif
/* ignore_tpc:
* 0 -> acpi processor driver doesn't ignore _TPC values
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 14c7bac4100b..7d59c6c9185f 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -534,7 +534,7 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
*/
static const struct dmi_system_id irq1_edge_low_force_override[] = {
{
- /* MECHREV Jiaolong17KS Series GM7XG0M */
+ /* MECHREVO Jiaolong17KS Series GM7XG0M */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM7XG0M"),
},
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 2295abbecd14..fa9bb8c8ce95 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -396,7 +396,7 @@ static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
}
/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
-static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
+static const char table_sigs[][ACPI_NAMESEG_SIZE] __nonstring_array __initconst = {
ACPI_SIG_BERT, ACPI_SIG_BGRT, ACPI_SIG_CPEP, ACPI_SIG_ECDT,
ACPI_SIG_EINJ, ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT,
ACPI_SIG_MSCT, ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT,
@@ -719,8 +719,12 @@ int __init acpi_locate_initial_tables(void)
}
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_warn("Failed to initialize tables, status=0x%x (%s)", status, msg);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 0c874186f8ae..5c2defe55898 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -803,6 +803,12 @@ static int acpi_thermal_add(struct acpi_device *device)
acpi_thermal_aml_dependency_fix(tz);
+ /*
+ * Set the cooling mode [_SCP] to active cooling. This needs to happen before
+ * we retrieve the trip point values.
+ */
+ acpi_execute_simple_method(tz->device->handle, "_SCP", ACPI_THERMAL_MODE_ACTIVE);
+
/* Get trip points [_ACi, _PSV, etc.] (required). */
acpi_thermal_get_trip_points(tz);
@@ -814,10 +820,6 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
- /* Set the cooling mode [_SCP] to active cooling. */
- acpi_execute_simple_method(tz->device->handle, "_SCP",
- ACPI_THERMAL_MODE_ACTIVE);
-
/* Determine the default polling frequency [_TZP]. */
if (tzp)
tz->polling_frequency = tzp;
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index 2aa69a2fba73..c13a20365c2c 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -19,11 +19,11 @@
#define pr_fmt(fmt) "ACPI: VIOT: " fmt
#include <linux/acpi_viot.h>
-#include <linux/fwnode.h>
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
struct viot_iommu {
/* Node offset within the table */
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 76052006bd87..5fc2c8ee61b1 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -6373,7 +6373,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd offset %lx\n",
buffer->data_size, buffer->offsets_size,
- proc->alloc.vm_start - buffer->user_data);
+ buffer->user_data - proc->alloc.vm_start);
}
static void print_binder_work_ilocked(struct seq_file *m,
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 94c6446604fc..98da8c4eea59 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -187,7 +187,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
inode_lock(d_inode(root));
/* look it up */
- dentry = lookup_one_len(name, root, name_len);
+ dentry = lookup_noperm(&QSTR(name), root);
if (IS_ERR(dentry)) {
inode_unlock(d_inode(root));
ret = PTR_ERR(dentry);
@@ -487,7 +487,7 @@ static struct dentry *binderfs_create_dentry(struct dentry *parent,
{
struct dentry *dentry;
- dentry = lookup_one_len(name, parent, strlen(name));
+ dentry = lookup_noperm(&QSTR(name), parent);
if (IS_ERR(dentry))
return dentry;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 773799cfd443..79b20da0a256 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6682,12 +6682,6 @@ const struct ata_port_info ata_dummy_port_info = {
};
EXPORT_SYMBOL_GPL(ata_dummy_port_info);
-void ata_print_version(const struct device *dev, const char *version)
-{
- dev_printk(KERN_DEBUG, dev, "version %s\n", version);
-}
-EXPORT_SYMBOL(ata_print_version);
-
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index b990c1ee0b12..c11d8e634bf7 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3432,7 +3432,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
enum ata_lpm_policy old_policy = link->lpm_policy;
- bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
+ bool host_has_dipm = !(link->ap->flags & ATA_FLAG_NO_DIPM);
unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
unsigned int err_mask;
int rc;
@@ -3443,28 +3443,35 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
return 0;
/*
- * DIPM is enabled only for MIN_POWER as some devices
- * misbehave when the host NACKs transition to SLUMBER. Order
- * device and link configurations such that the host always
- * allows DIPM requests.
+ * This function currently assumes that it will never be supplied policy
+ * ATA_LPM_UNKNOWN.
+ */
+ if (WARN_ON_ONCE(policy == ATA_LPM_UNKNOWN))
+ return 0;
+
+ /*
+ * DIPM is enabled only for ATA_LPM_MIN_POWER,
+ * ATA_LPM_MIN_POWER_WITH_PARTIAL, and ATA_LPM_MED_POWER_WITH_DIPM, as
+ * some devices misbehave when the host NACKs transition to SLUMBER.
*/
ata_for_each_dev(dev, link, ENABLED) {
- bool hipm = ata_id_has_hipm(dev->id);
- bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
+ bool dev_has_hipm = ata_id_has_hipm(dev->id);
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
/* find the first enabled and LPM enabled devices */
if (!link_dev)
link_dev = dev;
- if (!lpm_dev && (hipm || dipm))
+ if (!lpm_dev &&
+ (dev_has_hipm || (dev_has_dipm && host_has_dipm)))
lpm_dev = dev;
hints &= ~ATA_LPM_EMPTY;
- if (!hipm)
+ if (!dev_has_hipm)
hints &= ~ATA_LPM_HIPM;
/* disable DIPM before changing link config */
- if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
+ if (dev_has_dipm) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_DISABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
@@ -3505,10 +3512,16 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
if (ap && ap->slave_link)
ap->slave_link->lpm_policy = policy;
- /* host config updated, enable DIPM if transitioning to MIN_POWER */
+ /*
+ * Host config updated, enable DIPM if transitioning to
+ * ATA_LPM_MIN_POWER, ATA_LPM_MIN_POWER_WITH_PARTIAL, or
+ * ATA_LPM_MED_POWER_WITH_DIPM.
+ */
ata_for_each_dev(dev, link, ENABLED) {
- if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
- ata_id_has_dipm(dev->id)) {
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
+
+ if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && host_has_dipm &&
+ dev_has_dipm) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index ba300cc0a3a3..cb46ce276bb1 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -1509,7 +1509,10 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
struct ata_queued_cmd *qc;
unsigned int err_mask, tag;
u8 *sense, sk = 0, asc = 0, ascq = 0;
- u64 sense_valid, val;
+ u16 extended_sense;
+ bool aux_icc_valid;
+ u32 sense_valid;
+ u64 val;
int ret = 0;
err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2);
@@ -1527,8 +1530,9 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
return -EIO;
}
- sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) |
- ((u64)buf[10] << 16) | ((u64)buf[11] << 24);
+ sense_valid = get_unaligned_le32(&buf[8]);
+ extended_sense = get_unaligned_le16(&buf[14]);
+ aux_icc_valid = extended_sense & BIT(15);
ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_EH) ||
@@ -1541,7 +1545,7 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
* If the command does not have any sense data, clear ATA_SENSE.
* Keep ATA_QCFLAG_EH_SUCCESS_CMD so that command is finished.
*/
- if (!(sense_valid & (1ULL << tag))) {
+ if (!(sense_valid & BIT(tag))) {
qc->result_tf.status &= ~ATA_SENSE;
continue;
}
@@ -1556,6 +1560,17 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
continue;
}
+ qc->result_tf.nsect = sense[6];
+ qc->result_tf.hob_nsect = sense[7];
+ qc->result_tf.lbal = sense[8];
+ qc->result_tf.lbam = sense[9];
+ qc->result_tf.lbah = sense[10];
+ qc->result_tf.hob_lbal = sense[11];
+ qc->result_tf.hob_lbam = sense[12];
+ qc->result_tf.hob_lbah = sense[13];
+ if (aux_icc_valid)
+ qc->result_tf.auxiliary = get_unaligned_le32(&sense[16]);
+
/* Set sense without also setting scsicmd->result */
scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE,
qc->scsicmd->sense_buffer, sk,
@@ -1619,7 +1634,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
return;
}
- if (!(link->sactive & (1 << tag))) {
+ if (!(link->sactive & BIT(tag))) {
ata_link_err(link, "log page 10h reported inactive tag %d\n",
tag);
return;
@@ -1644,8 +1659,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
if (ata_scsi_sense_is_valid(sense_key, asc, ascq)) {
ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc,
ascq);
- ata_scsi_set_sense_information(dev, qc->scsicmd,
- &qc->result_tf);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
}
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2796c0da8257..a21c9895408d 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -216,17 +216,21 @@ void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
scsi_build_sense(cmd, d_sense, sk, asc, ascq);
}
-void ata_scsi_set_sense_information(struct ata_device *dev,
- struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf)
+static void ata_scsi_set_sense_information(struct ata_queued_cmd *qc)
{
u64 information;
- information = ata_tf_read_block(tf, dev);
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(qc->dev,
+ "missing result TF: can't set INFORMATION sense field\n");
+ return;
+ }
+
+ information = ata_tf_read_block(&qc->result_tf, qc->dev);
if (information == U64_MAX)
return;
- scsi_set_sense_information(cmd->sense_buffer,
+ scsi_set_sense_information(qc->scsicmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, information);
}
@@ -971,8 +975,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
* ata_gen_ata_sense - generate a SCSI fixed sense block
* @qc: Command that we are erroring out
*
- * Generate sense block for a failed ATA command @qc. Descriptor
- * format is used to accommodate LBA48 block address.
+ * Generate sense block for a failed ATA command @qc.
*
* LOCKING:
* None.
@@ -982,8 +985,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
struct ata_device *dev = qc->dev;
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
- unsigned char *sb = cmd->sense_buffer;
- u64 block;
u8 sense_key, asc, ascq;
if (ata_dev_disabled(dev)) {
@@ -1014,12 +1015,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
-
- block = ata_tf_read_block(&qc->result_tf, dev);
- if (block == U64_MAX)
- return;
-
- scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
}
void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -1679,8 +1674,10 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
ata_scsi_set_passthru_sense_fields(qc);
if (is_ck_cond_request)
set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
- } else if (is_error && !have_sense) {
- ata_gen_ata_sense(qc);
+ } else if (is_error) {
+ if (!have_sense)
+ ata_gen_ata_sense(qc);
+ ata_scsi_set_sense_information(qc);
}
ata_qc_done(qc);
@@ -2453,8 +2450,8 @@ static unsigned int ata_msense_control_ata_feature(struct ata_device *dev,
*/
put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]);
- if (dev->flags & ATA_DFLAG_CDL)
- buf[4] = 0x02; /* Support T2A and T2B pages */
+ if (dev->flags & ATA_DFLAG_CDL_ENABLED)
+ buf[4] = 0x02; /* T2A and T2B pages enabled */
else
buf[4] = 0;
@@ -3886,12 +3883,11 @@ static int ata_mselect_control_spg0(struct ata_queued_cmd *qc,
}
/*
- * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
+ * Translate MODE SELECT control mode page, sub-page f2h (ATA feature mode
* page) into a SET FEATURES command.
*/
-static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
- const u8 *buf, int len,
- u16 *fp)
+static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
+ const u8 *buf, int len, u16 *fp)
{
struct ata_device *dev = qc->dev;
struct ata_taskfile *tf = &qc->tf;
@@ -3909,17 +3905,27 @@ static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
/* Check cdl_ctrl */
switch (buf[0] & 0x03) {
case 0:
- /* Disable CDL */
+ /* Disable CDL if it is enabled */
+ if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
+ return 0;
+ ata_dev_dbg(dev, "Disabling CDL\n");
cdl_action = 0;
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
break;
case 0x02:
- /* Enable CDL T2A/T2B: NCQ priority must be disabled */
+ /*
+ * Enable CDL if not already enabled. Since this is mutually
+ * exclusive with NCQ priority, allow this only if NCQ priority
+ * is disabled.
+ */
+ if (dev->flags & ATA_DFLAG_CDL_ENABLED)
+ return 0;
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
ata_dev_err(dev,
"NCQ priority must be disabled to enable CDL\n");
return -EINVAL;
}
+ ata_dev_dbg(dev, "Enabling CDL\n");
cdl_action = 1;
dev->flags |= ATA_DFLAG_CDL_ENABLED;
break;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 0337be4faec7..ce5c628fa6fd 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -141,9 +141,6 @@ extern int ata_scsi_offline_dev(struct ata_device *dev);
extern bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq);
extern void ata_scsi_set_sense(struct ata_device *dev,
struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct ata_device *dev,
- struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_scsi_dev_rescan(struct work_struct *work);
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 434f380114af..03dbaf4a13a7 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -223,10 +223,16 @@ static int pxa_ata_probe(struct platform_device *pdev)
ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
resource_size(cmd_res));
+ if (!ap->ioaddr.cmd_addr)
+ return -ENOMEM;
ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
resource_size(ctl_res));
+ if (!ap->ioaddr.ctl_addr)
+ return -ENOMEM;
ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
resource_size(dma_res));
+ if (!ap->ioaddr.bmdma_addr)
+ return -ENOMEM;
/*
* Adjust register offsets
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index a482741eb181..f7f5131af937 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1117,9 +1117,14 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
mmio += PDC_CHIP0_OFS;
for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- pdc_i2c_read_data[i].reg,
- &spd0[pdc_i2c_read_data[i].ofs]);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ pdc_i2c_read_data[i].reg,
+ &spd0[pdc_i2c_read_data[i].ofs])) {
+ dev_err(host->dev,
+ "Failed in i2c read at index %d: device=%#x, reg=%#x\n",
+ i, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg);
+ return -EIO;
+ }
data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
@@ -1284,6 +1289,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* Programming DIMM0 Module Control Register (index_CID0:80h) */
size = pdc20621_prog_dimm0(host);
+ if (size < 0)
+ return size;
dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
/* Programming DIMM Module Global Control Register (index_CID0:88h) */
@@ -1294,32 +1301,32 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
}
if (dimm_test) {
- u8 test_parttern1[40] =
+ u8 test_pattern1[40] =
{0x55,0xAA,'P','r','o','m','i','s','e',' ',
'N','o','t',' ','Y','e','t',' ',
'D','e','f','i','n','e','d',' ',
'1','.','1','0',
'9','8','0','3','1','6','1','2',0,0};
- u8 test_parttern2[40] = {0};
+ u8 test_pattern2[40] = {0};
- pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
- pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_pattern2, 0x10040, 40);
+ pdc20621_put_to_dimm(host, test_pattern2, 0x40, 40);
- pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
- pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
- dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
- pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
+ pdc20621_put_to_dimm(host, test_pattern1, 0x10040, 40);
+ pdc20621_get_from_dimm(host, test_pattern2, 0x40, 40);
+ dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
+ pdc20621_get_from_dimm(host, test_pattern2, 0x10040,
40);
dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
- test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
- pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
- pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_pattern1, 0x40, 40);
+ pdc20621_get_from_dimm(host, test_pattern2, 0x40, 40);
dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
- test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
}
/* ECC initiliazation. */
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index af0029d30dbe..1037169abb45 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -154,14 +154,6 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
per_cpu(arch_freq_scale, i) = scale;
}
-DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
-EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
-
-void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
-{
- per_cpu(cpu_scale, cpu) = capacity;
-}
-
DEFINE_PER_CPU(unsigned long, hw_pressure);
/**
@@ -207,53 +199,9 @@ void topology_update_hw_pressure(const struct cpumask *cpus,
}
EXPORT_SYMBOL_GPL(topology_update_hw_pressure);
-static ssize_t cpu_capacity_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
-
- return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
-}
-
static void update_topology_flags_workfn(struct work_struct *work);
static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
-static DEVICE_ATTR_RO(cpu_capacity);
-
-static int cpu_capacity_sysctl_add(unsigned int cpu)
-{
- struct device *cpu_dev = get_cpu_device(cpu);
-
- if (!cpu_dev)
- return -ENOENT;
-
- device_create_file(cpu_dev, &dev_attr_cpu_capacity);
-
- return 0;
-}
-
-static int cpu_capacity_sysctl_remove(unsigned int cpu)
-{
- struct device *cpu_dev = get_cpu_device(cpu);
-
- if (!cpu_dev)
- return -ENOENT;
-
- device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
-
- return 0;
-}
-
-static int register_cpu_capacity_sysctl(void)
-{
- cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
- cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
-
- return 0;
-}
-subsys_initcall(register_cpu_capacity_sysctl);
-
static int update_topology;
int topology_update_cpu_topology(void)
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index afa4df4c5a3f..dba7c8e13a53 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -156,6 +156,16 @@
* },
* .ops = my_custom_ops,
* };
+ *
+ * Please note that such custom ops approach is valid, but it is hard to implement
+ * it right without global locks per-device to protect from auxiliary_drv removal
+ * during call to that ops. In addition, this implementation lacks proper module
+ * dependency, which causes to load/unload races between auxiliary parent and devices
+ * modules.
+ *
+ * The most easiest way to provide these ops reliably without needing to
+ * have a lock is to EXPORT_SYMBOL*() them and rely on already existing
+ * modules infrastructure for validity and correct dependencies chains.
*/
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
@@ -385,6 +395,114 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
}
EXPORT_SYMBOL_GPL(auxiliary_driver_unregister);
+static void auxiliary_device_release(struct device *dev)
+{
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+
+ kfree(auxdev);
+}
+
+/**
+ * auxiliary_device_create - create a device on the auxiliary bus
+ * @dev: parent device
+ * @modname: module name used to create the auxiliary driver name.
+ * @devname: auxiliary bus device name
+ * @platform_data: auxiliary bus device platform data
+ * @id: auxiliary bus device id
+ *
+ * Helper to create an auxiliary bus device.
+ * The device created matches driver 'modname.devname' on the auxiliary bus.
+ */
+struct auxiliary_device *auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id)
+{
+ struct auxiliary_device *auxdev;
+ int ret;
+
+ auxdev = kzalloc(sizeof(*auxdev), GFP_KERNEL);
+ if (!auxdev)
+ return NULL;
+
+ auxdev->id = id;
+ auxdev->name = devname;
+ auxdev->dev.parent = dev;
+ auxdev->dev.platform_data = platform_data;
+ auxdev->dev.release = auxiliary_device_release;
+ device_set_of_node_from_dev(&auxdev->dev, dev);
+
+ ret = auxiliary_device_init(auxdev);
+ if (ret) {
+ kfree(auxdev);
+ return NULL;
+ }
+
+ ret = __auxiliary_device_add(auxdev, modname);
+ if (ret) {
+ /*
+ * It may look odd but auxdev should not be freed here.
+ * auxiliary_device_uninit() calls device_put() which call
+ * the device release function, freeing auxdev.
+ */
+ auxiliary_device_uninit(auxdev);
+ return NULL;
+ }
+
+ return auxdev;
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_create);
+
+/**
+ * auxiliary_device_destroy - remove an auxiliary device
+ * @auxdev: pointer to the auxdev to be removed
+ *
+ * Helper to remove an auxiliary device created with
+ * auxiliary_device_create()
+ */
+void auxiliary_device_destroy(void *auxdev)
+{
+ struct auxiliary_device *_auxdev = auxdev;
+
+ auxiliary_device_delete(_auxdev);
+ auxiliary_device_uninit(_auxdev);
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_destroy);
+
+/**
+ * __devm_auxiliary_device_create - create a managed device on the auxiliary bus
+ * @dev: parent device
+ * @modname: module name used to create the auxiliary driver name.
+ * @devname: auxiliary bus device name
+ * @platform_data: auxiliary bus device platform data
+ * @id: auxiliary bus device id
+ *
+ * Device managed helper to create an auxiliary bus device.
+ * The device created matches driver 'modname.devname' on the auxiliary bus.
+ */
+struct auxiliary_device *__devm_auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id)
+{
+ struct auxiliary_device *auxdev;
+ int ret;
+
+ auxdev = auxiliary_device_create(dev, modname, devname, platform_data, id);
+ if (!auxdev)
+ return NULL;
+
+ ret = devm_add_action_or_reset(dev, auxiliary_device_destroy,
+ auxdev);
+ if (ret)
+ return NULL;
+
+ return auxdev;
+}
+EXPORT_SYMBOL_GPL(__devm_auxiliary_device_create);
+
void __init auxiliary_bus_init(void)
{
WARN_ON(bus_register(&auxiliary_bus_type));
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 0042e4774b0c..123031a757d9 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -73,6 +73,7 @@ static inline void subsys_put(struct subsys_private *sp)
kset_put(&sp->subsys);
}
+struct subsys_private *bus_to_subsys(const struct bus_type *bus);
struct subsys_private *class_to_subsys(const struct class *class);
struct driver_private {
@@ -180,6 +181,22 @@ int driver_add_groups(const struct device_driver *drv, const struct attribute_gr
void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups);
void device_driver_detach(struct device *dev);
+static inline void device_set_driver(struct device *dev, const struct device_driver *drv)
+{
+ /*
+ * Majority (all?) read accesses to dev->driver happens either
+ * while holding device lock or in bus/driver code that is only
+ * invoked when the device is bound to a driver and there is no
+ * concern of the pointer being changed while it is being read.
+ * However when reading device's uevent file we read driver pointer
+ * without taking device lock (so we do not block there for
+ * arbitrary amount of time). We use WRITE_ONCE() here to prevent
+ * tearing so that READ_ONCE() can safely be used in uevent code.
+ */
+ // FIXME - this cast should not be needed "soon"
+ WRITE_ONCE(dev->driver, (struct device_driver *)drv);
+}
+
int devres_release_all(struct device *dev);
void device_block_probing(void);
void device_unblock_probing(void);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 5ea3b03af9ba..5e75e1bce551 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -57,7 +57,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
* NULL. A call to subsys_put() must be done when finished with the pointer in
* order for it to be properly freed.
*/
-static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
+struct subsys_private *bus_to_subsys(const struct bus_type *bus)
{
struct subsys_private *sp = NULL;
struct kobject *kobj;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index abe60eb45c55..024ad9471b8a 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -586,7 +586,8 @@ EXPORT_SYMBOL_GPL(component_master_is_bound);
static void component_unbind(struct component *component,
struct aggregate_device *adev, void *data)
{
- WARN_ON(!component->bound);
+ if (WARN_ON(!component->bound))
+ return;
dev_dbg(adev->parent, "unbinding %s component %p (ops %ps)\n",
dev_name(component->dev), component, component->ops);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d2f9d3a59d6b..cbc0099d8ef2 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2624,6 +2624,35 @@ static const char *dev_uevent_name(const struct kobject *kobj)
return NULL;
}
+/*
+ * Try filling "DRIVER=<name>" uevent variable for a device. Because this
+ * function may race with binding and unbinding the device from a driver,
+ * we need to be careful. Binding is generally safe, at worst we miss the
+ * fact that the device is already bound to a driver (but the driver
+ * information that is delivered through uevents is best-effort, it may
+ * become obsolete as soon as it is generated anyways). Unbinding is more
+ * risky as driver pointer is transitioning to NULL, so READ_ONCE() should
+ * be used to make sure we are dealing with the same pointer, and to
+ * ensure that driver structure is not going to disappear from under us
+ * we take bus' drivers klist lock. The assumption that only registered
+ * driver can be bound to a device, and to unregister a driver bus code
+ * will take the same lock.
+ */
+static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+
+ if (sp) {
+ scoped_guard(spinlock, &sp->klist_drivers.k_lock) {
+ struct device_driver *drv = READ_ONCE(dev->driver);
+ if (drv)
+ add_uevent_var(env, "DRIVER=%s", drv->name);
+ }
+
+ subsys_put(sp);
+ }
+}
+
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
const struct device *dev = kobj_to_dev(kobj);
@@ -2655,8 +2684,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
if (dev->type && dev->type->name)
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
- if (dev->driver)
- add_uevent_var(env, "DRIVER=%s", dev->driver->name);
+ /* Add "DRIVER=%s" variable if the device is bound to a driver */
+ dev_driver_uevent(dev, env);
/* Add common DT information about the device */
of_device_uevent(dev, env);
@@ -2726,11 +2755,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env)
return -ENOMEM;
- /* Synchronize with really_probe() */
- device_lock(dev);
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
- device_unlock(dev);
if (retval)
goto out;
@@ -3700,7 +3726,7 @@ done:
device_pm_remove(dev);
dpm_sysfs_remove(dev);
DPMError:
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
bus_remove_device(dev);
BusError:
device_remove_attrs(dev);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index a7e511849875..7779ab0ca7ce 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -600,6 +600,8 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
CPU_SHOW_VULN_FALLBACK(ghostwrite);
+CPU_SHOW_VULN_FALLBACK(old_microcode);
+CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -616,6 +618,8 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
+static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
+static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -633,6 +637,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_gather_data_sampling.attr,
&dev_attr_reg_file_data_sampling.attr,
&dev_attr_ghostwrite.attr,
+ &dev_attr_old_microcode.attr,
+ &dev_attr_indirect_target_selection.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index f0e4b4aba885..b526e0e0f52d 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -550,7 +550,7 @@ static void device_unbind_cleanup(struct device *dev)
arch_teardown_dma_ops(dev);
kfree(dev->dma_range_map);
dev->dma_range_map = NULL;
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
@@ -629,8 +629,7 @@ static int really_probe(struct device *dev, const struct device_driver *drv)
}
re_probe:
- // FIXME - this cast should not be needed "soon"
- dev->driver = (struct device_driver *)drv;
+ device_set_driver(dev, drv);
/* If using pinctrl, bind pins now before probing */
ret = pinctrl_bind_pins(dev);
@@ -1014,7 +1013,7 @@ static int __device_attach(struct device *dev, bool allow_async)
if (ret == 0)
ret = 1;
else {
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
ret = 0;
}
} else {
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index d8a733ea5e1a..ff55e1bcfa30 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -759,6 +759,17 @@ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, co
}
EXPORT_SYMBOL_GPL(__devm_add_action);
+bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data)
+{
+ struct action_devres devres = {
+ .data = data,
+ .action = action,
+ };
+
+ return devres_find(dev, devm_action_release, devm_action_match, &devres);
+}
+EXPORT_SYMBOL_GPL(devm_is_action_added);
+
/**
* devm_remove_action_nowarn() - removes previously added custom action
* @dev: Device that owns the action
@@ -976,17 +987,10 @@ EXPORT_SYMBOL_GPL(devm_krealloc);
*/
char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
{
- size_t size;
- char *buf;
-
if (!s)
return NULL;
- size = strlen(s) + 1;
- buf = devm_kmalloc(dev, size, gfp);
- if (buf)
- memcpy(buf, s, size);
- return buf;
+ return devm_kmemdup(dev, s, strlen(s) + 1, gfp);
}
EXPORT_SYMBOL_GPL(devm_kstrdup);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 6dd1a8860f1c..31bfb3194b4c 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -296,7 +296,7 @@ static int delete_path(const char *nodepath)
return err;
}
-static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
+static int dev_mynode(struct device *dev, struct inode *inode)
{
/* did we create it */
if (inode->i_private != &thread)
@@ -304,13 +304,13 @@ static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *sta
/* does the dev_t match */
if (is_blockdev(dev)) {
- if (!S_ISBLK(stat->mode))
+ if (!S_ISBLK(inode->i_mode))
return 0;
} else {
- if (!S_ISCHR(stat->mode))
+ if (!S_ISCHR(inode->i_mode))
return 0;
}
- if (stat->rdev != dev->devt)
+ if (inode->i_rdev != dev->devt)
return 0;
/* ours */
@@ -321,20 +321,16 @@ static int handle_remove(const char *nodename, struct device *dev)
{
struct path parent;
struct dentry *dentry;
- struct kstat stat;
- struct path p;
+ struct inode *inode;
int deleted = 0;
- int err;
+ int err = 0;
dentry = kern_path_locked(nodename, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- p.mnt = parent.mnt;
- p.dentry = dentry;
- err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
- AT_STATX_SYNC_AS_STAT);
- if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
+ inode = d_inode(dentry);
+ if (dev_mynode(dev, inode)) {
struct iattr newattrs;
/*
* before unlinking this node, reset permissions
@@ -342,7 +338,7 @@ static int handle_remove(const char *nodename, struct device *dev)
*/
newattrs.ia_uid = GLOBAL_ROOT_UID;
newattrs.ia_gid = GLOBAL_ROOT_GID;
- newattrs.ia_mode = stat.mode & ~0777;
+ newattrs.ia_mode = inode->i_mode & ~0777;
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
inode_lock(d_inode(dentry));
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
index 407c1d1aad50..9054d346bd7f 100644
--- a/drivers/base/faux.c
+++ b/drivers/base/faux.c
@@ -25,6 +25,7 @@
struct faux_object {
struct faux_device faux_dev;
const struct faux_device_ops *faux_ops;
+ const struct attribute_group **groups;
};
#define to_faux_object(dev) container_of_const(dev, struct faux_object, faux_dev.dev)
@@ -43,10 +44,21 @@ static int faux_probe(struct device *dev)
struct faux_object *faux_obj = to_faux_object(dev);
struct faux_device *faux_dev = &faux_obj->faux_dev;
const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
- int ret = 0;
+ int ret;
- if (faux_ops && faux_ops->probe)
+ if (faux_ops && faux_ops->probe) {
ret = faux_ops->probe(faux_dev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Add groups after the probe succeeds to ensure resources are
+ * initialized correctly
+ */
+ ret = device_add_groups(dev, faux_obj->groups);
+ if (ret && faux_ops && faux_ops->remove)
+ faux_ops->remove(faux_dev);
return ret;
}
@@ -57,6 +69,8 @@ static void faux_remove(struct device *dev)
struct faux_device *faux_dev = &faux_obj->faux_dev;
const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+ device_remove_groups(dev, faux_obj->groups);
+
if (faux_ops && faux_ops->remove)
faux_ops->remove(faux_dev);
}
@@ -124,8 +138,9 @@ struct faux_device *faux_device_create_with_groups(const char *name,
if (!faux_obj)
return NULL;
- /* Save off the callbacks so we can use them in the future */
+ /* Save off the callbacks and groups so we can use them in the future */
faux_obj->faux_ops = faux_ops;
+ faux_obj->groups = groups;
/* Initialize the device portion and register it with the driver core */
faux_dev = &faux_obj->faux_dev;
@@ -138,7 +153,6 @@ struct faux_device *faux_device_create_with_groups(const char *name,
else
dev->parent = &faux_bus_root;
dev->bus = &faux_bus_type;
- dev->groups = groups;
dev_set_name(dev, "%s", name);
ret = device_add(dev);
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index a03701674265..752b9a9bea03 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -3,8 +3,7 @@ menu "Firmware loader"
config FW_LOADER
tristate "Firmware loading facility" if EXPERT
- select CRYPTO_HASH if FW_LOADER_DEBUG
- select CRYPTO_SHA256 if FW_LOADER_DEBUG
+ select CRYPTO_LIB_SHA256 if FW_LOADER_DEBUG
default y
help
This enables the firmware loading facility in the kernel. The kernel
@@ -28,7 +27,6 @@ config FW_LOADER
config FW_LOADER_DEBUG
bool "Log filenames and checksums for loaded firmware"
- depends on CRYPTO = FW_LOADER || CRYPTO=y
depends on DYNAMIC_DEBUG
depends on FW_LOADER
default FW_LOADER
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index cb0912ea3e62..44486b2c7172 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -806,41 +806,15 @@ static void fw_abort_batch_reqs(struct firmware *fw)
}
#if defined(CONFIG_FW_LOADER_DEBUG)
-#include <crypto/hash.h>
#include <crypto/sha2.h>
static void fw_log_firmware_info(const struct firmware *fw, const char *name, struct device *device)
{
- struct shash_desc *shash;
- struct crypto_shash *alg;
- u8 *sha256buf;
- char *outbuf;
+ u8 digest[SHA256_DIGEST_SIZE];
- alg = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(alg))
- return;
-
- sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
- outbuf = kmalloc(SHA256_BLOCK_SIZE + 1, GFP_KERNEL);
- shash = kmalloc(sizeof(*shash) + crypto_shash_descsize(alg), GFP_KERNEL);
- if (!sha256buf || !outbuf || !shash)
- goto out_free;
-
- shash->tfm = alg;
-
- if (crypto_shash_digest(shash, fw->data, fw->size, sha256buf) < 0)
- goto out_free;
-
- for (int i = 0; i < SHA256_DIGEST_SIZE; i++)
- sprintf(&outbuf[i * 2], "%02x", sha256buf[i]);
- outbuf[SHA256_BLOCK_SIZE] = 0;
- dev_dbg(device, "Loaded FW: %s, sha256: %s\n", name, outbuf);
-
-out_free:
- kfree(shash);
- kfree(outbuf);
- kfree(sha256buf);
- crypto_free_shash(alg);
+ sha256(fw->data, fw->size, digest);
+ dev_dbg(device, "Loaded FW: %s, sha256: %*phN\n",
+ name, SHA256_DIGEST_SIZE, digest);
}
#else
static void fw_log_firmware_info(const struct firmware *fw, const char *name,
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8f3a41d9bfaa..ed3e69dc785c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -110,6 +110,57 @@ static void memory_block_release(struct device *dev)
kfree(mem);
}
+
+/* Max block size to be set by memory_block_advise_max_size */
+static unsigned long memory_block_advised_size;
+static bool memory_block_advised_size_queried;
+
+/**
+ * memory_block_advise_max_size() - advise memory hotplug on the max suggested
+ * block size, usually for alignment.
+ * @size: suggestion for maximum block size. must be aligned on power of 2.
+ *
+ * Early boot software (pre-allocator init) may advise archs on the max block
+ * size. This value can only decrease after initialization, as the intent is
+ * to identify the largest supported alignment for all sources.
+ *
+ * Use of this value is arch-defined, as is min/max block size.
+ *
+ * Return: 0 on success
+ * -EINVAL if size is 0 or not pow2 aligned
+ * -EBUSY if value has already been probed
+ */
+int __init memory_block_advise_max_size(unsigned long size)
+{
+ if (!size || !is_power_of_2(size))
+ return -EINVAL;
+
+ if (memory_block_advised_size_queried)
+ return -EBUSY;
+
+ if (memory_block_advised_size)
+ memory_block_advised_size = min(memory_block_advised_size, size);
+ else
+ memory_block_advised_size = size;
+
+ return 0;
+}
+
+/**
+ * memory_block_advised_max_size() - query advised max hotplug block size.
+ *
+ * After the first call, the value can never change. Callers looking for the
+ * actual block size should use memory_block_size_bytes. This interface is
+ * intended for use by arch-init when initializing the hotplug block size.
+ *
+ * Return: advised size in bytes, or 0 if never set.
+ */
+unsigned long memory_block_advised_max_size(void)
+{
+ memory_block_advised_size_queried = true;
+ return memory_block_advised_size;
+}
+
unsigned long __weak memory_block_size_bytes(void)
{
return MIN_MEMORY_BLOCK_SIZE;
@@ -816,21 +867,6 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
return 0;
}
-static int __init add_boot_memory_block(unsigned long base_section_nr)
-{
- unsigned long nr;
-
- for_each_present_section_nr(base_section_nr, nr) {
- if (nr >= (base_section_nr + sections_per_block))
- break;
-
- return add_memory_block(memory_block_id(base_section_nr),
- MEM_ONLINE, NULL, NULL);
- }
-
- return 0;
-}
-
static int add_hotplug_memory_block(unsigned long block_id,
struct vmem_altmap *altmap,
struct memory_group *group)
@@ -957,7 +993,7 @@ static const struct attribute_group *memory_root_attr_groups[] = {
void __init memory_dev_init(void)
{
int ret;
- unsigned long block_sz, nr;
+ unsigned long block_sz, block_id, nr;
/* Validate the configured memory block size */
block_sz = memory_block_size_bytes();
@@ -970,15 +1006,23 @@ void __init memory_dev_init(void)
panic("%s() failed to register subsystem: %d\n", __func__, ret);
/*
- * Create entries for memory sections that were found
- * during boot and have been initialized
+ * Create entries for memory sections that were found during boot
+ * and have been initialized. Use @block_id to track the last
+ * handled block and initialize it to an invalid value (ULONG_MAX)
+ * to bypass the block ID matching check for the first present
+ * block so that it can be covered.
*/
- for (nr = 0; nr <= __highest_present_section_nr;
- nr += sections_per_block) {
- ret = add_boot_memory_block(nr);
- if (ret)
- panic("%s() failed to add memory block: %d\n", __func__,
- ret);
+ block_id = ULONG_MAX;
+ for_each_present_section_nr(0, nr) {
+ if (block_id != ULONG_MAX && memory_block_id(nr) == block_id)
+ continue;
+
+ block_id = memory_block_id(nr);
+ ret = add_memory_block(block_id, MEM_ONLINE, NULL, NULL);
+ if (ret) {
+ panic("%s() failed to add memory block: %d\n",
+ __func__, ret);
+ }
}
}
diff --git a/drivers/base/module.c b/drivers/base/module.c
index 5bc71bea883a..218aaa096455 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -42,16 +42,13 @@ int module_add_driver(struct module *mod, const struct device_driver *drv)
if (mod)
mk = &mod->mkobj;
else if (drv->mod_name) {
- struct kobject *mkobj;
-
- /* Lookup built-in module entry in /sys/modules */
- mkobj = kset_find_obj(module_kset, drv->mod_name);
- if (mkobj) {
- mk = container_of(mkobj, struct module_kobject, kobj);
+ /* Lookup or create built-in module entry in /sys/modules */
+ mk = lookup_or_create_module_kobject(drv->mod_name);
+ if (mk) {
/* remember our module structure */
drv->p->mkobj = mk;
- /* kset_find_obj took a reference */
- kobject_put(mkobj);
+ /* lookup_or_create_module_kobject took a reference */
+ kobject_put(&mk->kobj);
}
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index cd13ef287011..c19094481630 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memory.h>
+#include <linux/mempolicy.h>
#include <linux/vmstat.h>
#include <linux/notifier.h>
#include <linux/node.h>
@@ -214,6 +215,14 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
break;
}
}
+
+ /* When setting CPU access coordinates, update mempolicy */
+ if (access == ACCESS_COORDINATE_CPU) {
+ if (mempolicy_set_node_perf(nid, coord)) {
+ pr_info("failed to set mempolicy attrs for node %d\n",
+ nid);
+ }
+ }
}
EXPORT_SYMBOL_GPL(node_set_perf_attrs);
@@ -468,7 +477,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_PAGETABLE)),
nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
nid, 0UL,
- nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
+ nid, 0UL,
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
nid, K(sreclaimable +
node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 0e60dd650b5e..70db08f3ac6f 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -95,5 +95,6 @@ EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs);
void platform_device_msi_free_irqs_all(struct device *dev)
{
msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
+ msi_remove_device_irq_domain(dev, MSI_DEFAULT_DOMAIN);
}
EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 1813cfd0c4bd..075ec1d1b73a 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -982,7 +982,7 @@ struct platform_device * __init_or_module __platform_create_bundle(
struct platform_device *pdev;
int error;
- pdev = platform_device_alloc(driver->driver.name, -1);
+ pdev = platform_device_alloc(driver->driver.name, PLATFORM_DEVID_NONE);
if (!pdev) {
error = -ENOMEM;
goto err_out;
@@ -1440,7 +1440,7 @@ static void platform_shutdown(struct device *_dev)
static int platform_dma_configure(struct device *dev)
{
- struct platform_driver *drv = to_platform_driver(dev->driver);
+ struct device_driver *drv = READ_ONCE(dev->driver);
struct fwnode_handle *fwnode = dev_fwnode(dev);
enum dev_dma_attr attr;
int ret = 0;
@@ -1451,8 +1451,8 @@ static int platform_dma_configure(struct device *dev)
attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
ret = acpi_dma_configure(dev, attr);
}
- /* @drv may not be valid when we're called from the IOMMU layer */
- if (ret || !dev->driver || drv->driver_managed_dma)
+ /* @dev->driver may not be valid when we're called from the IOMMU layer */
+ if (ret || !drv || to_platform_driver(drv)->driver_managed_dma)
return ret;
ret = iommu_device_use_default_domain(dev);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c8b0a9e29ed8..19fd55b8ac77 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -63,6 +63,7 @@ static LIST_HEAD(dpm_noirq_list);
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
+static DEFINE_MUTEX(async_wip_mtx);
static int async_error;
static const char *pm_verb(int event)
@@ -560,7 +561,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
struct timer_list *timer = &wd->timer;
timer_delete_sync(timer);
- destroy_timer_on_stack(timer);
+ timer_destroy_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
@@ -597,8 +598,11 @@ static bool is_async(struct device *dev)
&& !pm_trace_is_enabled();
}
-static bool dpm_async_fn(struct device *dev, async_func_t func)
+static bool __dpm_async(struct device *dev, async_func_t func)
{
+ if (dev->power.work_in_progress)
+ return true;
+
if (!is_async(dev))
return false;
@@ -611,14 +615,37 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
put_device(dev);
+ return false;
+}
+
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+ guard(mutex)(&async_wip_mtx);
+
+ return __dpm_async(dev, func);
+}
+
+static int dpm_async_with_cleanup(struct device *dev, void *fn)
+{
+ guard(mutex)(&async_wip_mtx);
+
+ if (!__dpm_async(dev, fn))
+ dev->power.work_in_progress = false;
+
+ return 0;
+}
+
+static void dpm_async_resume_children(struct device *dev, async_func_t func)
+{
/*
- * async_schedule_dev_nocall() above has returned false, so func() is
- * not running and it is safe to update power.work_in_progress without
- * extra synchronization.
+ * Start processing "async" children of the device unless it's been
+ * started already for them.
+ *
+ * This could have been done for the device's "async" consumers too, but
+ * they either need to wait for their parents or the processing has
+ * already started for them after their parents were processed.
*/
- dev->power.work_in_progress = false;
-
- return false;
+ device_for_each_child(dev, func, dpm_async_with_cleanup);
}
static void dpm_clear_async_state(struct device *dev)
@@ -627,6 +654,13 @@ static void dpm_clear_async_state(struct device *dev)
dev->power.work_in_progress = false;
}
+static bool dpm_root_device(struct device *dev)
+{
+ return !dev->parent;
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie);
+
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -710,6 +744,8 @@ Out:
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
+
+ dpm_async_resume_children(dev, async_resume_noirq);
}
static void async_resume_noirq(void *data, async_cookie_t cookie)
@@ -733,19 +769,20 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
dpm_clear_async_state(dev);
- dpm_async_fn(dev, async_resume_noirq);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_noirq);
}
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
- if (!dev->power.work_in_progress) {
+ if (!dpm_async_fn(dev, async_resume_noirq)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -781,6 +818,8 @@ void dpm_resume_noirq(pm_message_t state)
device_wakeup_disarm_wake_irqs();
}
+static void async_resume_early(void *data, async_cookie_t cookie);
+
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -848,6 +887,8 @@ Out:
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
+
+ dpm_async_resume_children(dev, async_resume_early);
}
static void async_resume_early(void *data, async_cookie_t cookie)
@@ -875,19 +916,20 @@ void dpm_resume_early(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
dpm_clear_async_state(dev);
- dpm_async_fn(dev, async_resume_early);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_early);
}
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
- if (!dev->power.work_in_progress) {
+ if (!dpm_async_fn(dev, async_resume_early)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -919,6 +961,8 @@ void dpm_resume_start(pm_message_t state)
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
+static void async_resume(void *data, async_cookie_t cookie);
+
/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
@@ -1018,6 +1062,8 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
+
+ dpm_async_resume_children(dev, async_resume);
}
static void async_resume(void *data, async_cookie_t cookie)
@@ -1049,19 +1095,20 @@ void dpm_resume(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
dpm_clear_async_state(dev);
- dpm_async_fn(dev, async_resume);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume);
}
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
list_move_tail(&dev->power.entry, &dpm_prepared_list);
- if (!dev->power.work_in_progress) {
+ if (!dpm_async_fn(dev, async_resume)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -1189,6 +1236,41 @@ EXPORT_SYMBOL_GPL(dpm_resume_end);
/*------------------------- Suspend routines -------------------------*/
+static bool dpm_leaf_device(struct device *dev)
+{
+ struct device *child;
+
+ lockdep_assert_held(&dpm_list_mtx);
+
+ child = device_find_any_child(dev);
+ if (child) {
+ put_device(child);
+
+ return false;
+ }
+
+ return true;
+}
+
+static void dpm_async_suspend_parent(struct device *dev, async_func_t func)
+{
+ guard(mutex)(&dpm_list_mtx);
+
+ /*
+ * If the device is suspended asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by checking
+ * if the device has been deleted already as the parent cannot be
+ * deleted before it.
+ */
+ if (!device_pm_initialized(dev))
+ return;
+
+ /* Start processing the device's parent if it is "async". */
+ if (dev->parent)
+ dpm_async_with_cleanup(dev->parent, func);
+}
+
/**
* resume_event - Return a "resume" message for given "suspend" sleep state.
* @sleep_state: PM message representing a sleep state.
@@ -1226,6 +1308,8 @@ static void dpm_superior_set_must_resume(struct device *dev)
device_links_read_unlock(idx);
}
+static void async_suspend_noirq(void *data, async_cookie_t cookie);
+
/**
* device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1304,7 +1388,13 @@ Skip:
Complete:
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
+
+ if (error || async_error)
+ return error;
+
+ dpm_async_suspend_parent(dev, async_suspend_noirq);
+
+ return 0;
}
static void async_suspend_noirq(void *data, async_cookie_t cookie)
@@ -1318,6 +1408,7 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie)
static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
+ struct device *dev;
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
@@ -1327,12 +1418,21 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_noirq);
+ }
+
while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.prev);
+ dev = to_device(dpm_late_early_list.prev);
list_move(&dev->power.entry, &dpm_noirq_list);
- dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend_noirq))
continue;
@@ -1346,8 +1446,14 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (error || async_error) {
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice(&dpm_late_early_list, &dpm_noirq_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
@@ -1400,6 +1506,8 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
spin_unlock_irq(&parent->power.lock);
}
+static void async_suspend_late(void *data, async_cookie_t cookie);
+
/**
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@@ -1476,7 +1584,13 @@ Skip:
Complete:
TRACE_SUSPEND(error);
complete_all(&dev->power.completion);
- return error;
+
+ if (error || async_error)
+ return error;
+
+ dpm_async_suspend_parent(dev, async_suspend_late);
+
+ return 0;
}
static void async_suspend_late(void *data, async_cookie_t cookie)
@@ -1494,6 +1608,7 @@ static void async_suspend_late(void *data, async_cookie_t cookie)
int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
+ struct device *dev;
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
@@ -1505,12 +1620,21 @@ int dpm_suspend_late(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_late);
+ }
+
while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
+ dev = to_device(dpm_suspended_list.prev);
list_move(&dev->power.entry, &dpm_late_early_list);
- dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend_late))
continue;
@@ -1524,8 +1648,14 @@ int dpm_suspend_late(pm_message_t state)
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (error || async_error) {
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice(&dpm_suspended_list, &dpm_late_early_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
@@ -1614,6 +1744,8 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
device_links_read_unlock(idx);
}
+static void async_suspend(void *data, async_cookie_t cookie);
+
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
@@ -1743,7 +1875,13 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
+
+ if (error || async_error)
+ return error;
+
+ dpm_async_suspend_parent(dev, async_suspend);
+
+ return 0;
}
static void async_suspend(void *data, async_cookie_t cookie)
@@ -1761,6 +1899,7 @@ static void async_suspend(void *data, async_cookie_t cookie)
int dpm_suspend(pm_message_t state)
{
ktime_t starttime = ktime_get();
+ struct device *dev;
int error = 0;
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
@@ -1774,12 +1913,21 @@ int dpm_suspend(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend);
+ }
+
while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
+ dev = to_device(dpm_prepared_list.prev);
list_move(&dev->power.entry, &dpm_suspended_list);
- dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend))
continue;
@@ -1793,8 +1941,14 @@ int dpm_suspend(pm_message_t state)
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (error || async_error) {
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice(&dpm_prepared_list, &dpm_suspended_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0e127b0329c0..c55a7c70bc1a 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1011,7 +1011,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
* If 'expires' is after the current time, we've been called
* too early.
*/
- if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
+ if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@@ -1568,6 +1568,32 @@ out:
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
+static void pm_runtime_set_suspended_action(void *data)
+{
+ pm_runtime_set_suspended(data);
+}
+
+/**
+ * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_set_active_enabled(struct device *dev)
+{
+ int err;
+
+ err = pm_runtime_set_active(dev);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
+ if (err)
+ return err;
+
+ return devm_pm_runtime_enable(dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
+
static void pm_runtime_disable_action(void *data)
{
pm_runtime_dont_use_autosuspend(data);
@@ -1590,6 +1616,24 @@ int devm_pm_runtime_enable(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+static void pm_runtime_put_noidle_action(void *data)
+{
+ pm_runtime_put_noidle(data);
+}
+
+/**
+ * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_get_noresume(struct device *dev)
+{
+ pm_runtime_get_noresume(dev);
+
+ return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
+
/**
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index f84018125b46..13b31a3adc77 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -611,15 +611,9 @@ static DEVICE_ATTR_RW(async);
#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute *power_attrs[] = {
-#ifdef CONFIG_PM_ADVANCED_DEBUG
-#ifdef CONFIG_PM_SLEEP
+#if defined(CONFIG_PM_ADVANCED_DEBUG) && defined(CONFIG_PM_SLEEP)
&dev_attr_async.attr,
#endif
- &dev_attr_runtime_status.attr,
- &dev_attr_runtime_usage.attr,
- &dev_attr_runtime_active_kids.attr,
- &dev_attr_runtime_enabled.attr,
-#endif /* CONFIG_PM_ADVANCED_DEBUG */
NULL,
};
static const struct attribute_group pm_attr_group = {
@@ -650,13 +644,16 @@ static const struct attribute_group pm_wakeup_attr_group = {
};
static struct attribute *runtime_attrs[] = {
-#ifndef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_runtime_status.attr,
-#endif
&dev_attr_control.attr,
&dev_attr_runtime_suspended_time.attr,
&dev_attr_runtime_active_time.attr,
&dev_attr_autosuspend_delay_ms.attr,
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+ &dev_attr_runtime_usage.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_enabled.attr,
+#endif
NULL,
};
static const struct attribute_group pm_runtime_attr_group = {
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 63bf914a4d44..f7c96a3bf719 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -77,7 +77,7 @@ static DEFINE_IDA(wakeup_ida);
* wakeup_source_create - Create a struct wakeup_source object.
* @name: Name of the new wakeup source.
*/
-struct wakeup_source *wakeup_source_create(const char *name)
+static struct wakeup_source *wakeup_source_create(const char *name)
{
struct wakeup_source *ws;
const char *ws_name;
@@ -106,7 +106,6 @@ err_name:
err_ws:
return NULL;
}
-EXPORT_SYMBOL_GPL(wakeup_source_create);
/*
* Record wakeup_source statistics being deleted into a dummy wakeup_source.
@@ -149,7 +148,7 @@ static void wakeup_source_free(struct wakeup_source *ws)
*
* Use only for wakeup source objects created with wakeup_source_create().
*/
-void wakeup_source_destroy(struct wakeup_source *ws)
+static void wakeup_source_destroy(struct wakeup_source *ws)
{
if (!ws)
return;
@@ -158,13 +157,12 @@ void wakeup_source_destroy(struct wakeup_source *ws)
wakeup_source_record(ws);
wakeup_source_free(ws);
}
-EXPORT_SYMBOL_GPL(wakeup_source_destroy);
/**
* wakeup_source_add - Add given object to the list of wakeup sources.
* @ws: Wakeup source object to add to the list.
*/
-void wakeup_source_add(struct wakeup_source *ws)
+static void wakeup_source_add(struct wakeup_source *ws)
{
unsigned long flags;
@@ -179,13 +177,12 @@ void wakeup_source_add(struct wakeup_source *ws)
list_add_rcu(&ws->entry, &wakeup_sources);
raw_spin_unlock_irqrestore(&events_lock, flags);
}
-EXPORT_SYMBOL_GPL(wakeup_source_add);
/**
* wakeup_source_remove - Remove given object from the wakeup sources list.
* @ws: Wakeup source object to remove from the list.
*/
-void wakeup_source_remove(struct wakeup_source *ws)
+static void wakeup_source_remove(struct wakeup_source *ws)
{
unsigned long flags;
@@ -204,7 +201,6 @@ void wakeup_source_remove(struct wakeup_source *ws)
*/
ws->timer.function = NULL;
}
-EXPORT_SYMBOL_GPL(wakeup_source_remove);
/**
* wakeup_source_register - Create wakeup source and add it to the list.
@@ -337,7 +333,7 @@ int device_wakeup_enable(struct device *dev)
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
- if (pm_suspend_target_state != PM_SUSPEND_ON)
+ if (pm_sleep_transition_in_progress())
dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
ws = wakeup_source_register(dev, dev_name(dev));
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
index 6732ed2869f9..3ffd427248e8 100644
--- a/drivers/base/power/wakeup_stats.c
+++ b/drivers/base/power/wakeup_stats.c
@@ -34,6 +34,7 @@ wakeup_attr(active_count);
wakeup_attr(event_count);
wakeup_attr(wakeup_count);
wakeup_attr(expire_count);
+wakeup_attr(relax_count);
static ssize_t active_time_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -119,6 +120,7 @@ static struct attribute *wakeup_source_attrs[] = {
&dev_attr_event_count.attr,
&dev_attr_wakeup_count.attr,
&dev_attr_expire_count.attr,
+ &dev_attr_relax_count.attr,
&dev_attr_active_time_ms.attr,
&dev_attr_total_time_ms.attr,
&dev_attr_max_time_ms.attr,
diff --git a/drivers/base/property.c b/drivers/base/property.c
index c1392743df9c..805f75b35115 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -928,22 +928,22 @@ bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_device_is_available);
/**
- * device_get_child_node_count - return the number of child nodes for device
- * @dev: Device to count the child nodes for
+ * fwnode_get_child_node_count - return the number of child nodes for a given firmware node
+ * @fwnode: Pointer to the parent firmware node
*
- * Return: the number of child nodes for a given device.
+ * Return: the number of child nodes for a given firmware node.
*/
-unsigned int device_get_child_node_count(const struct device *dev)
+unsigned int fwnode_get_child_node_count(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *child;
unsigned int count = 0;
- device_for_each_child_node(dev, child)
+ fwnode_for_each_child_node(fwnode, child)
count++;
return count;
}
-EXPORT_SYMBOL_GPL(device_get_child_node_count);
+EXPORT_SYMBOL_GPL(fwnode_get_child_node_count);
bool device_dma_supported(const struct device *dev)
{
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index b1affac70d5d..ffb2ef488298 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,8 +6,6 @@
config REGMAP
bool
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI)
- select IRQ_DOMAIN if REGMAP_IRQ
- select MDIO_BUS if REGMAP_MDIO
help
Enable support for the Register Map (regmap) access API.
@@ -58,12 +56,14 @@ config REGMAP_W1
config REGMAP_MDIO
tristate
+ select MDIO_BUS
config REGMAP_MMIO
tristate
config REGMAP_IRQ
bool
+ select IRQ_DOMAIN
config REGMAP_RAM
tristate
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index f7fcf2de1301..c7650fa434ad 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -34,21 +34,10 @@ static int regcache_defaults_cmp(const void *a, const void *b)
return 0;
}
-static void regcache_defaults_swap(void *a, void *b, int size)
-{
- struct reg_default *x = a;
- struct reg_default *y = b;
- struct reg_default tmp;
-
- tmp = *x;
- *x = *y;
- *y = tmp;
-}
-
void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults)
{
sort(defaults, ndefaults, sizeof(*defaults),
- regcache_defaults_cmp, regcache_defaults_swap);
+ regcache_defaults_cmp, NULL);
}
EXPORT_SYMBOL_GPL(regcache_sort_defaults);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 6c6869188c31..d1585f073776 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -6,11 +6,13 @@
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/overflow.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -33,6 +35,7 @@ struct regmap_irq_chip_data {
void *status_reg_buf;
unsigned int *main_status_buf;
unsigned int *status_buf;
+ unsigned int *prev_status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
unsigned int *wake_buf;
@@ -193,10 +196,10 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
/* If we've changed our wakeup count propagate it to the parent */
if (d->wake_count < 0)
for (i = d->wake_count; i < 0; i++)
- irq_set_irq_wake(d->irq, 0);
+ disable_irq_wake(d->irq);
else if (d->wake_count > 0)
for (i = 0; i < d->wake_count; i++)
- irq_set_irq_wake(d->irq, 1);
+ enable_irq_wake(d->irq);
d->wake_count = 0;
@@ -332,27 +335,13 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
return ret;
}
-static irqreturn_t regmap_irq_thread(int irq, void *d)
+static int read_irq_data(struct regmap_irq_chip_data *data)
{
- struct regmap_irq_chip_data *data = d;
const struct regmap_irq_chip *chip = data->chip;
struct regmap *map = data->map;
int ret, i;
- bool handled = false;
u32 reg;
- if (chip->handle_pre_irq)
- chip->handle_pre_irq(chip->irq_drv_data);
-
- if (chip->runtime_pm) {
- ret = pm_runtime_get_sync(map->dev);
- if (ret < 0) {
- dev_err(map->dev, "IRQ thread failed to resume: %d\n",
- ret);
- goto exit;
- }
- }
-
/*
* Read only registers with active IRQs if the chip has 'main status
* register'. Else read in the statuses, using a single bulk read if
@@ -379,10 +368,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
reg = data->get_irq_reg(data, chip->main_status, i);
ret = regmap_read(map, reg, &data->main_status_buf[i]);
if (ret) {
- dev_err(map->dev,
- "Failed to read IRQ status %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
+ return ret;
}
}
@@ -398,10 +385,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
ret = read_sub_irq_data(data, b);
if (ret != 0) {
- dev_err(map->dev,
- "Failed to read IRQ status %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
+ return ret;
}
}
@@ -418,9 +403,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
data->status_reg_buf,
chip->num_regs);
if (ret != 0) {
- dev_err(map->dev, "Failed to read IRQ status: %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+ return ret;
}
for (i = 0; i < data->chip->num_regs; i++) {
@@ -436,7 +420,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
break;
default:
BUG();
- goto exit;
+ return -EIO;
}
}
@@ -447,10 +431,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
ret = regmap_read(map, reg, &data->status_buf[i]);
if (ret != 0) {
- dev_err(map->dev,
- "Failed to read IRQ status: %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+ return ret;
}
}
}
@@ -459,6 +441,42 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
for (i = 0; i < data->chip->num_regs; i++)
data->status_buf[i] = ~data->status_buf[i];
+ return 0;
+}
+
+static irqreturn_t regmap_irq_thread(int irq, void *d)
+{
+ struct regmap_irq_chip_data *data = d;
+ const struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+ int ret, i;
+ bool handled = false;
+ u32 reg;
+
+ if (chip->handle_pre_irq)
+ chip->handle_pre_irq(chip->irq_drv_data);
+
+ if (chip->runtime_pm) {
+ ret = pm_runtime_get_sync(map->dev);
+ if (ret < 0) {
+ dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret);
+ goto exit;
+ }
+ }
+
+ ret = read_irq_data(data);
+ if (ret < 0)
+ goto exit;
+
+ if (chip->status_is_level) {
+ for (i = 0; i < data->chip->num_regs; i++) {
+ unsigned int val = data->status_buf[i];
+
+ data->status_buf[i] ^= data->prev_status_buf[i];
+ data->prev_status_buf[i] = val;
+ }
+ }
+
/*
* Ignore masked IRQs and ack if we need to; we ack early so
* there is no race between handling and acknowledging the
@@ -705,6 +723,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (!d->status_buf)
goto err_alloc;
+ if (chip->status_is_level) {
+ d->prev_status_buf = kcalloc(chip->num_regs, sizeof(*d->prev_status_buf),
+ GFP_KERNEL);
+ if (!d->prev_status_buf)
+ goto err_alloc;
+ }
+
d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf),
GFP_KERNEL);
if (!d->mask_buf)
@@ -881,6 +906,16 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
}
}
+ /* Store current levels */
+ if (chip->status_is_level) {
+ ret = read_irq_data(d);
+ if (ret < 0)
+ goto err_alloc;
+
+ memcpy(d->prev_status_buf, d->status_buf,
+ array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0])));
+ }
+
ret = regmap_irq_create_domain(fwnode, irq_base, chip, d);
if (ret)
goto err_alloc;
@@ -908,6 +943,7 @@ err_alloc:
kfree(d->mask_buf);
kfree(d->main_status_buf);
kfree(d->status_buf);
+ kfree(d->prev_status_buf);
kfree(d->status_reg_buf);
if (d->config_buf) {
for (i = 0; i < chip->num_config_bases; i++)
@@ -985,6 +1021,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->main_status_buf);
kfree(d->status_reg_buf);
kfree(d->status_buf);
+ kfree(d->prev_status_buf);
if (d->config_buf) {
for (i = 0; i < d->chip->num_config_bases; i++)
kfree(d->config_buf[i]);
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index b1726a3515f6..deda7f35a059 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -529,7 +529,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (prop->is_inline)
return -EINVAL;
- if (index * sizeof(*ref) >= prop->length)
+ if ((index + 1) * sizeof(*ref) > prop->length)
return -ENOENT;
ref_array = prop->pointer;
@@ -1080,6 +1080,7 @@ void software_node_notify(struct device *dev)
if (!swnode)
return;
+ kobject_get(&swnode->kobj);
ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node");
if (ret)
return;
@@ -1089,8 +1090,6 @@ void software_node_notify(struct device *dev)
sysfs_remove_link(&dev->kobj, "software_node");
return;
}
-
- kobject_get(&swnode->kobj);
}
void software_node_notify_remove(struct device *dev)
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index b962da263eee..8b42df05feff 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -208,3 +208,55 @@ static int __init topology_sysfs_init(void)
}
device_initcall(topology_sysfs_init);
+
+DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
+
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
+{
+ per_cpu(cpu_scale, cpu) = capacity;
+}
+
+static ssize_t cpu_capacity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+
+ return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
+}
+
+static DEVICE_ATTR_RO(cpu_capacity);
+
+static int cpu_capacity_sysctl_add(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev)
+ return -ENOENT;
+
+ device_create_file(cpu_dev, &dev_attr_cpu_capacity);
+
+ return 0;
+}
+
+static int cpu_capacity_sysctl_remove(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev)
+ return -ENOENT;
+
+ device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
+
+ return 0;
+}
+
+static int register_cpu_capacity_sysctl(void)
+{
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
+ cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
+
+ return 0;
+}
+subsys_initcall(register_cpu_capacity_sysctl);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 5f90bac6bb09..f021e27644e0 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -26,12 +26,14 @@ static int bcma_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
return !!bcma_chipco_gpio_in(cc, 1 << gpio);
}
-static void bcma_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
- int value)
+static int bcma_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct bcma_drv_cc *cc = gpiochip_get_data(chip);
bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
+
+ return 0;
}
static int bcma_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
@@ -184,7 +186,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->request = bcma_gpio_request;
chip->free = bcma_gpio_free;
chip->get = bcma_gpio_get_value;
- chip->set = bcma_gpio_set_value;
+ chip->set_rv = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
chip->parent = bus->dev;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index a97f2c40c640..0f70e2374e7f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -367,7 +367,7 @@ config BLK_DEV_RBD
tristate "Rados block device (RBD)"
depends on INET && BLOCK
select CEPH_LIB
- select LIBCRC32C
+ select CRC32
select CRYPTO_AES
select CRYPTO
help
@@ -388,12 +388,6 @@ config BLK_DEV_UBLK
definition isn't finalized yet, and might change according to future
requirement, so mark is as experimental now.
- Say Y if you want to get better performance because task_work_add()
- can be used in IO path for replacing io_uring cmd, which will become
- shared between IO tasks and ubq daemon, meantime task_work_add() can
- can handle batch more effectively, but task_work_add() isn't exported
- for module, so ublk has to be built to kernel.
-
config BLKDEV_UBLK_LEGACY_OPCODES
bool "Support legacy command opcode"
depends on BLK_DEV_UBLK
@@ -413,4 +407,23 @@ config BLKDEV_UBLK_LEGACY_OPCODES
source "drivers/block/rnbd/Kconfig"
+config BLK_DEV_ZONED_LOOP
+ tristate "Zoned loopback device support"
+ depends on BLK_DEV_ZONED
+ help
+ Saying Y here will allow you to use create a zoned block device using
+ regular files for zones (one file per zones). This is useful to test
+ file systems, device mapper and applications that support zoned block
+ devices. To create a zoned loop device, no user utility is needed, a
+ zoned loop device can be created (or re-started) using a command
+ like:
+
+ echo "add id=0,zone_size_mb=256,capacity_mb=16384,conv_zones=11" > \
+ /dev/zloop-control
+
+ See Documentation/admin-guide/blockdev/zoned_loop.rst for usage
+ details.
+
+ If unsure, say N.
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 1105a2d4fdcb..097707aca725 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -41,5 +41,6 @@ obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o
+obj-$(CONFIG_BLK_DEV_ZONED_LOOP) += zloop.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 292f127cae0a..b1be6c510372 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -54,32 +54,33 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
/*
* Insert a new page for a given sector, if one does not already exist.
*/
-static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
+static struct page *brd_insert_page(struct brd_device *brd, sector_t sector,
+ blk_opf_t opf)
+ __releases(rcu)
+ __acquires(rcu)
{
- pgoff_t idx = sector >> PAGE_SECTORS_SHIFT;
- struct page *page;
- int ret = 0;
-
- page = brd_lookup_page(brd, sector);
- if (page)
- return 0;
+ gfp_t gfp = (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO;
+ struct page *page, *ret;
+ rcu_read_unlock();
page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
+ rcu_read_lock();
if (!page)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
xa_lock(&brd->brd_pages);
- ret = __xa_insert(&brd->brd_pages, idx, page, gfp);
- if (!ret)
- brd->brd_nr_pages++;
- xa_unlock(&brd->brd_pages);
-
- if (ret < 0) {
+ ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL,
+ page, gfp);
+ if (ret) {
+ xa_unlock(&brd->brd_pages);
__free_page(page);
- if (ret == -EBUSY)
- ret = 0;
+ if (xa_is_err(ret))
+ return ERR_PTR(xa_err(ret));
+ return ret;
}
- return ret;
+ brd->brd_nr_pages++;
+ xa_unlock(&brd->brd_pages);
+ return page;
}
/*
@@ -100,143 +101,77 @@ static void brd_free_pages(struct brd_device *brd)
}
/*
- * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
+ * Process a single segment. The segment is capped to not cross page boundaries
+ * in both the bio and the brd backing memory.
*/
-static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
- gfp_t gfp)
-{
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
- int ret;
-
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- ret = brd_insert_page(brd, sector, gfp);
- if (ret)
- return ret;
- if (copy < n) {
- sector += copy >> SECTOR_SHIFT;
- ret = brd_insert_page(brd, sector, gfp);
- }
- return ret;
-}
-
-/*
- * Copy n bytes from src to the brd starting at sector. Does not sleep.
- */
-static void copy_to_brd(struct brd_device *brd, const void *src,
- sector_t sector, size_t n)
+static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
{
+ struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
+ sector_t sector = bio->bi_iter.bi_sector;
+ u32 offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
+ blk_opf_t opf = bio->bi_opf;
struct page *page;
- void *dst;
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
+ void *kaddr;
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- page = brd_lookup_page(brd, sector);
- BUG_ON(!page);
-
- dst = kmap_atomic(page);
- memcpy(dst + offset, src, copy);
- kunmap_atomic(dst);
-
- if (copy < n) {
- src += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- BUG_ON(!page);
-
- dst = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(dst);
- }
-}
+ bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
-/*
- * Copy n bytes to dst from the brd starting at sector. Does not sleep.
- */
-static void copy_from_brd(void *dst, struct brd_device *brd,
- sector_t sector, size_t n)
-{
- struct page *page;
- void *src;
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
-
- copy = min_t(size_t, n, PAGE_SIZE - offset);
+ rcu_read_lock();
page = brd_lookup_page(brd, sector);
- if (page) {
- src = kmap_atomic(page);
- memcpy(dst, src + offset, copy);
- kunmap_atomic(src);
- } else
- memset(dst, 0, copy);
-
- if (copy < n) {
- dst += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- if (page) {
- src = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(src);
- } else
- memset(dst, 0, copy);
+ if (!page && op_is_write(opf)) {
+ page = brd_insert_page(brd, sector, opf);
+ if (IS_ERR(page))
+ goto out_error;
}
-}
-
-/*
- * Process a single bvec of a bio.
- */
-static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, blk_opf_t opf,
- sector_t sector)
-{
- void *mem;
- int err = 0;
+ kaddr = bvec_kmap_local(&bv);
if (op_is_write(opf)) {
- /*
- * Must use NOIO because we don't want to recurse back into the
- * block or filesystem layers from page reclaim.
- */
- gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
-
- err = copy_to_brd_setup(brd, sector, len, gfp);
- if (err)
- goto out;
- }
-
- mem = kmap_atomic(page);
- if (!op_is_write(opf)) {
- copy_from_brd(mem + off, brd, sector, len);
- flush_dcache_page(page);
+ memcpy_to_page(page, offset, kaddr, bv.bv_len);
} else {
- flush_dcache_page(page);
- copy_to_brd(brd, mem + off, sector, len);
+ if (page)
+ memcpy_from_page(kaddr, page, offset, bv.bv_len);
+ else
+ memset(kaddr, 0, bv.bv_len);
}
- kunmap_atomic(mem);
+ kunmap_local(kaddr);
+ rcu_read_unlock();
+
+ bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
+ return true;
+
+out_error:
+ rcu_read_unlock();
+ if (PTR_ERR(page) == -ENOMEM && (opf & REQ_NOWAIT))
+ bio_wouldblock_error(bio);
+ else
+ bio_io_error(bio);
+ return false;
+}
-out:
- return err;
+static void brd_free_one_page(struct rcu_head *head)
+{
+ struct page *page = container_of(head, struct page, rcu_head);
+
+ __free_page(page);
}
static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
{
- sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS;
+ sector_t aligned_sector = round_up(sector, PAGE_SECTORS);
+ sector_t aligned_end = round_down(
+ sector + (size >> SECTOR_SHIFT), PAGE_SECTORS);
struct page *page;
- size -= (aligned_sector - sector) * SECTOR_SIZE;
+ if (aligned_end <= aligned_sector)
+ return;
+
xa_lock(&brd->brd_pages);
- while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) {
+ while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) {
page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
if (page) {
- __free_page(page);
+ call_rcu(&page->rcu_head, brd_free_one_page);
brd->brd_nr_pages--;
}
aligned_sector += PAGE_SECTORS;
- size -= PAGE_SIZE;
}
xa_unlock(&brd->brd_pages);
}
@@ -244,36 +179,18 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
static void brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
- sector_t sector = bio->bi_iter.bi_sector;
- struct bio_vec bvec;
- struct bvec_iter iter;
if (unlikely(op_is_discard(bio->bi_opf))) {
- brd_do_discard(brd, sector, bio->bi_iter.bi_size);
+ brd_do_discard(brd, bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size);
bio_endio(bio);
return;
}
- bio_for_each_segment(bvec, bio, iter) {
- unsigned int len = bvec.bv_len;
- int err;
-
- /* Don't support un-aligned buffer */
- WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) ||
- (len & (SECTOR_SIZE - 1)));
-
- err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
- bio->bi_opf, sector);
- if (err) {
- if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
- bio_wouldblock_error(bio);
- return;
- }
- bio_io_error(bio);
+ do {
+ if (!brd_rw_bvec(brd, bio))
return;
- }
- sector += len >> SECTOR_SHIFT;
- }
+ } while (bio->bi_iter.bi_size);
bio_endio(bio);
}
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index 6fb4e38fca88..495a72da04c6 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -10,7 +10,7 @@ config BLK_DEV_DRBD
tristate "DRBD Distributed Replicated Block Device support"
depends on PROC_FS && INET
select LRU_CACHE
- select LIBCRC32C
+ select CRC32
help
NOTE: In order to authenticate connections you have to select
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 674527d770dc..e2b1f377f585 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -211,72 +211,6 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
}
-static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
-{
- struct iov_iter i;
- ssize_t bw;
-
- iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len);
-
- bw = vfs_iter_write(file, &i, ppos, 0);
-
- if (likely(bw == bvec->bv_len))
- return 0;
-
- printk_ratelimited(KERN_ERR
- "loop: Write error at byte offset %llu, length %i.\n",
- (unsigned long long)*ppos, bvec->bv_len);
- if (bw >= 0)
- bw = -EIO;
- return bw;
-}
-
-static int lo_write_simple(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec;
- struct req_iterator iter;
- int ret = 0;
-
- rq_for_each_segment(bvec, rq, iter) {
- ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
- if (ret < 0)
- break;
- cond_resched();
- }
-
- return ret;
-}
-
-static int lo_read_simple(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec;
- struct req_iterator iter;
- struct iov_iter i;
- ssize_t len;
-
- rq_for_each_segment(bvec, rq, iter) {
- iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len);
- len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
- if (len < 0)
- return len;
-
- flush_dcache_page(bvec.bv_page);
-
- if (len != bvec.bv_len) {
- struct bio *bio;
-
- __rq_for_each_bio(bio, rq)
- zero_fill_bio(bio);
- break;
- }
- cond_resched();
- }
-
- return 0;
-}
-
static void loop_clear_limits(struct loop_device *lo, int mode)
{
struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
@@ -342,7 +276,7 @@ static void lo_complete_rq(struct request *rq)
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
blk_status_t ret = BLK_STS_OK;
- if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
+ if (cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
req_op(rq) != REQ_OP_READ) {
if (cmd->ret < 0)
ret = errno_to_blk_status(cmd->ret);
@@ -358,14 +292,13 @@ static void lo_complete_rq(struct request *rq)
cmd->ret = 0;
blk_mq_requeue_request(rq, true);
} else {
- if (cmd->use_aio) {
- struct bio *bio = rq->bio;
+ struct bio *bio = rq->bio;
- while (bio) {
- zero_fill_bio(bio);
- bio = bio->bi_next;
- }
+ while (bio) {
+ zero_fill_bio(bio);
+ bio = bio->bi_next;
}
+
ret = BLK_STS_IOERR;
end_io:
blk_mq_end_request(rq, ret);
@@ -445,9 +378,14 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
- cmd->iocb.ki_complete = lo_rw_aio_complete;
- cmd->iocb.ki_flags = IOCB_DIRECT;
- cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+ cmd->iocb.ki_ioprio = req_get_ioprio(rq);
+ if (cmd->use_aio) {
+ cmd->iocb.ki_complete = lo_rw_aio_complete;
+ cmd->iocb.ki_flags = IOCB_DIRECT;
+ } else {
+ cmd->iocb.ki_complete = NULL;
+ cmd->iocb.ki_flags = 0;
+ }
if (rw == ITER_SOURCE)
ret = file->f_op->write_iter(&cmd->iocb, &iter);
@@ -458,7 +396,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
if (ret != -EIOCBQUEUED)
lo_rw_aio_complete(&cmd->iocb, ret);
- return 0;
+ return -EIOCBQUEUED;
}
static int do_req_filebacked(struct loop_device *lo, struct request *rq)
@@ -466,15 +404,6 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
- /*
- * lo_write_simple and lo_read_simple should have been covered
- * by io submit style function like lo_rw_aio(), one blocker
- * is that lo_read_simple() need to call flush_dcache_page after
- * the page is written from kernel, and it isn't easy to handle
- * this in io submit style function which submits all segments
- * of the req at one time. And direct read IO doesn't need to
- * run flush_dcache_page().
- */
switch (req_op(rq)) {
case REQ_OP_FLUSH:
return lo_req_flush(lo, rq);
@@ -490,15 +419,9 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
case REQ_OP_DISCARD:
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
- if (cmd->use_aio)
- return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
- else
- return lo_write_simple(lo, rq, pos);
+ return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
case REQ_OP_READ:
- if (cmd->use_aio)
- return lo_rw_aio(lo, cmd, pos, ITER_DEST);
- else
- return lo_read_simple(lo, rq, pos);
+ return lo_rw_aio(lo, cmd, pos, ITER_DEST);
default:
WARN_ON_ONCE(1);
return -EIO;
@@ -582,6 +505,17 @@ static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
lo->lo_min_dio_size = loop_query_min_dio_size(lo);
}
+static int loop_check_backing_file(struct file *file)
+{
+ if (!file->f_op->read_iter)
+ return -EINVAL;
+
+ if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter)
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* loop_change_fd switched the backing store of a loopback device to
* a new file. This is useful for operating system installers to free up
@@ -603,6 +537,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!file)
return -EBADF;
+ error = loop_check_backing_file(file);
+ if (error)
+ return error;
+
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
@@ -662,19 +600,20 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
* dependency.
*/
fput(old_file);
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
if (partscan)
loop_reread_partitions(lo);
error = 0;
done:
- /* enable and uncork uevent now that we are done */
- dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
return error;
out_err:
loop_global_unlock(lo, is_loop);
out_putf:
fput(file);
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
goto done;
}
@@ -1039,6 +978,11 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (!file)
return -EBADF;
+
+ error = loop_check_backing_file(file);
+ if (error)
+ return error;
+
is_loop = is_loop_device(file);
/* This is safe, since we have a reference from open(). */
@@ -1129,8 +1073,8 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
- /* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
loop_global_unlock(lo, is_loop);
if (partscan)
@@ -1921,7 +1865,6 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
struct loop_device *lo = rq->q->queuedata;
int ret = 0;
struct mem_cgroup *old_memcg = NULL;
- const bool use_aio = cmd->use_aio;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
ret = -EIO;
@@ -1951,7 +1894,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
}
failed:
/* complete non-aio request */
- if (!use_aio || ret) {
+ if (ret != -EIOCBQUEUED) {
if (ret == -EOPNOTSUPP)
cmd->ret = ret;
else
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 0d619df03fa9..66ce6b81c7d9 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3717,7 +3717,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rv) {
dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
- goto setmask_err;
+ goto iomap_err;
}
/* Copy the info we may need later into the private data structure. */
@@ -3733,7 +3733,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
if (!dd->isr_workq) {
dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
rv = -ENOMEM;
- goto setmask_err;
+ goto iomap_err;
}
memset(cpu_list, 0, sizeof(cpu_list));
@@ -3830,8 +3830,6 @@ msi_initialize_err:
drop_cpu(dd->work[1].cpu_binding);
drop_cpu(dd->work[2].cpu_binding);
}
-setmask_err:
- pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
iomap_err:
kfree(dd);
@@ -3907,7 +3905,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
pci_disable_msi(pdev);
- pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
pci_set_drvdata(pdev, NULL);
put_disk(dd->disk);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 3bb9cee0a9b5..aa163ae9b2aa 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -2031,7 +2031,7 @@ static int null_add_dev(struct nullb_device *dev)
nullb->disk->minors = 1;
nullb->disk->fops = &null_ops;
nullb->disk->private_data = nullb;
- strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+ strscpy(nullb->disk->disk_name, nullb->disk_name);
if (nullb->dev->zoned) {
rv = null_register_zoned_dev(nullb);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 65b96c083b3c..d5cc7bd2875c 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -725,7 +725,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
scmd = blk_mq_rq_to_pdu(rq);
if (cgc->buflen) {
- ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
+ ret = blk_rq_map_kern(rq, cgc->buffer, cgc->buflen,
GFP_NOIO);
if (ret)
goto out;
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 2ee6e9bd4e28..2df8941a6b14 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -147,12 +147,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
bio = bio_alloc(file_bdev(sess_dev->bdev_file), 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
- if (bio_add_page(bio, virt_to_page(data), datalen,
- offset_in_page(data)) != datalen) {
- rnbd_srv_err_rl(sess_dev, "Failed to map data to bio\n");
- err = -EINVAL;
- goto bio_put;
- }
+ bio_add_virt_nofail(bio, data, datalen);
bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
if (bio_has_data(bio) &&
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 2fd05c1bd30b..6f51072776f1 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -50,6 +50,8 @@
/* private ioctl command mirror */
#define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
+#define UBLK_CMD_UPDATE_SIZE _IOC_NR(UBLK_U_CMD_UPDATE_SIZE)
+#define UBLK_CMD_QUIESCE_DEV _IOC_NR(UBLK_U_CMD_QUIESCE_DEV)
#define UBLK_IO_REGISTER_IO_BUF _IOC_NR(UBLK_U_IO_REGISTER_IO_BUF)
#define UBLK_IO_UNREGISTER_IO_BUF _IOC_NR(UBLK_U_IO_UNREGISTER_IO_BUF)
@@ -64,7 +66,10 @@
| UBLK_F_CMD_IOCTL_ENCODE \
| UBLK_F_USER_COPY \
| UBLK_F_ZONED \
- | UBLK_F_USER_RECOVERY_FAIL_IO)
+ | UBLK_F_USER_RECOVERY_FAIL_IO \
+ | UBLK_F_UPDATE_SIZE \
+ | UBLK_F_AUTO_BUF_REG \
+ | UBLK_F_QUIESCE)
#define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
| UBLK_F_USER_RECOVERY_REISSUE \
@@ -77,7 +82,11 @@
UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT)
struct ublk_rq_data {
- struct kref ref;
+ refcount_t ref;
+
+ /* for auto-unregister buffer in case of UBLK_F_AUTO_BUF_REG */
+ u16 buf_index;
+ void *buf_ctx_handle;
};
struct ublk_uring_cmd_pdu {
@@ -99,6 +108,9 @@ struct ublk_uring_cmd_pdu {
* setup in ublk uring_cmd handler
*/
struct ublk_queue *ubq;
+
+ struct ublk_auto_buf_reg buf;
+
u16 tag;
};
@@ -123,15 +135,6 @@ struct ublk_uring_cmd_pdu {
#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
/*
- * IO command is aborted, so this flag is set in case of
- * !UBLK_IO_FLAG_ACTIVE.
- *
- * After this flag is observed, any pending or new incoming request
- * associated with this io command will be failed immediately
- */
-#define UBLK_IO_FLAG_ABORTED 0x04
-
-/*
* UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
* get data buffer address from ublksrv.
*
@@ -140,6 +143,14 @@ struct ublk_uring_cmd_pdu {
*/
#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
+/*
+ * request buffer is registered automatically, so we have to unregister it
+ * before completing this request.
+ *
+ * io_uring will unregister buffer automatically for us during exiting.
+ */
+#define UBLK_IO_FLAG_AUTO_BUF_REG 0x10
+
/* atomic RW with ubq->cancel_lock */
#define UBLK_IO_FLAG_CANCELED 0x80000000
@@ -149,7 +160,12 @@ struct ublk_io {
unsigned int flags;
int res;
- struct io_uring_cmd *cmd;
+ union {
+ /* valid if UBLK_IO_FLAG_ACTIVE is set */
+ struct io_uring_cmd *cmd;
+ /* valid if UBLK_IO_FLAG_OWNED_BY_SRV is set */
+ struct request *req;
+ };
};
struct ublk_queue {
@@ -199,8 +215,6 @@ struct ublk_device {
struct completion completion;
unsigned int nr_queues_ready;
unsigned int nr_privileged_daemon;
-
- struct work_struct nosrv_work;
};
/* header of ublk_params */
@@ -209,16 +223,17 @@ struct ublk_params_header {
__u32 types;
};
-static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq);
-
+static void ublk_io_release(void *priv);
+static void ublk_stop_dev_unlocked(struct ublk_device *ub);
+static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- struct ublk_queue *ubq, int tag, size_t offset);
+ const struct ublk_queue *ubq, int tag, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
-static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
- int tag);
-static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
+
+static inline struct ublksrv_io_desc *
+ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
{
- return ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
+ return &ubq->io_cmd_buf[tag];
}
static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
@@ -372,8 +387,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
if (ret)
goto free_req;
- ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
- GFP_KERNEL);
+ ret = blk_rq_map_kern(req, buffer, buffer_length, GFP_KERNEL);
if (ret)
goto erase_desc;
@@ -493,7 +507,6 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
#endif
static inline void __ublk_complete_rq(struct request *req);
-static void ublk_complete_rq(struct kref *ref);
static dev_t ublk_chr_devt;
static const struct class ublk_chr_class = {
@@ -620,14 +633,25 @@ static void ublk_apply_params(struct ublk_device *ub)
ublk_dev_param_zoned_apply(ub);
}
+static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
+{
+ return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
+}
+
+static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq)
+{
+ return ubq->flags & UBLK_F_AUTO_BUF_REG;
+}
+
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
- return ubq->flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
+ return ubq->flags & UBLK_F_USER_COPY;
}
static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
{
- return !ublk_support_user_copy(ubq);
+ return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) &&
+ !ublk_support_auto_buf_reg(ubq);
}
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
@@ -635,8 +659,16 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
/*
* read()/write() is involved in user copy, so request reference
* has to be grabbed
+ *
+ * for zero copy, request buffer need to be registered to io_uring
+ * buffer table, so reference is needed
+ *
+ * For auto buffer register, ublk server still may issue
+ * UBLK_IO_COMMIT_AND_FETCH_REQ before one registered buffer is used up,
+ * so reference is required too.
*/
- return ublk_support_user_copy(ubq);
+ return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq) ||
+ ublk_support_auto_buf_reg(ubq);
}
static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
@@ -645,7 +677,7 @@ static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
if (ublk_need_req_ref(ubq)) {
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
- kref_init(&data->ref);
+ refcount_set(&data->ref, 1);
}
}
@@ -655,7 +687,7 @@ static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
if (ublk_need_req_ref(ubq)) {
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
- return kref_get_unless_zero(&data->ref);
+ return refcount_inc_not_zero(&data->ref);
}
return true;
@@ -667,7 +699,8 @@ static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
if (ublk_need_req_ref(ubq)) {
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
- kref_put(&data->ref, ublk_complete_rq);
+ if (refcount_dec_and_test(&data->ref))
+ __ublk_complete_rq(req);
} else {
__ublk_complete_rq(req);
}
@@ -703,12 +736,6 @@ static inline bool ublk_rq_has_data(const struct request *rq)
return bio_has_data(rq->bio);
}
-static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
- int tag)
-{
- return &ubq->io_cmd_buf[tag];
-}
-
static inline struct ublksrv_io_desc *
ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
{
@@ -1074,7 +1101,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
{
- return ubq->ubq_daemon->flags & PF_EXITING;
+ return !ubq->ubq_daemon || ubq->ubq_daemon->flags & PF_EXITING;
}
/* todo: handle partial completion */
@@ -1085,12 +1112,6 @@ static inline void __ublk_complete_rq(struct request *req)
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
- /* called from ublk_abort_queue() code path */
- if (io->flags & UBLK_IO_FLAG_ABORTED) {
- res = BLK_STS_IOERR;
- goto exit;
- }
-
/* failed read IO if nothing is read */
if (!io->res && req_op(req) == REQ_OP_READ)
io->res = -EIO;
@@ -1131,37 +1152,12 @@ exit:
blk_mq_end_request(req, res);
}
-static void ublk_complete_rq(struct kref *ref)
-{
- struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
- ref);
- struct request *req = blk_mq_rq_from_pdu(data);
-
- __ublk_complete_rq(req);
-}
-
-/*
- * Since ublk_rq_task_work_cb always fails requests immediately during
- * exiting, __ublk_fail_req() is only called from abort context during
- * exiting. So lock is unnecessary.
- *
- * Also aborting may not be started yet, keep in mind that one failed
- * request may be issued by block layer again.
- */
-static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
- struct request *req)
+static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
+ int res, unsigned issue_flags)
{
- WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
+ /* read cmd first because req will overwrite it */
+ struct io_uring_cmd *cmd = io->cmd;
- if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
- blk_mq_requeue_request(req, false);
- else
- ublk_put_req_ref(ubq, req);
-}
-
-static void ubq_complete_io_cmd(struct ublk_io *io, int res,
- unsigned issue_flags)
-{
/* mark this cmd owned by ublksrv */
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
@@ -1171,8 +1167,10 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res,
*/
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
+ io->req = req;
+
/* tell ublksrv one io request is coming */
- io_uring_cmd_done(io->cmd, res, 0, issue_flags);
+ io_uring_cmd_done(cmd, res, 0, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
@@ -1187,16 +1185,91 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
blk_mq_end_request(rq, BLK_STS_IOERR);
}
+static void ublk_auto_buf_reg_fallback(struct request *req)
+{
+ const struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ iod->op_flags |= UBLK_IO_F_NEED_REG_BUF;
+ refcount_set(&data->ref, 1);
+}
+
+static bool ublk_auto_buf_reg(struct request *req, struct ublk_io *io,
+ unsigned int issue_flags)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(io->cmd);
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ int ret;
+
+ ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release,
+ pdu->buf.index, issue_flags);
+ if (ret) {
+ if (pdu->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
+ ublk_auto_buf_reg_fallback(req);
+ return true;
+ }
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ return false;
+ }
+ /* one extra reference is dropped by ublk_io_release */
+ refcount_set(&data->ref, 2);
+
+ data->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd);
+ /* store buffer index in request payload */
+ data->buf_index = pdu->buf.index;
+ io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
+ return true;
+}
+
+static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq,
+ struct request *req, struct ublk_io *io,
+ unsigned int issue_flags)
+{
+ if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req))
+ return ublk_auto_buf_reg(req, io, issue_flags);
+
+ ublk_init_req_ref(ubq, req);
+ return true;
+}
+
+static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
+ struct ublk_io *io)
+{
+ unsigned mapped_bytes = ublk_map_io(ubq, req, io);
+
+ /* partially mapped, update io descriptor */
+ if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
+ /*
+ * Nothing mapped, retry until we succeed.
+ *
+ * We may never succeed in mapping any bytes here because
+ * of OOM. TODO: reserve one buffer with single page pinned
+ * for providing forward progress guarantee.
+ */
+ if (unlikely(!mapped_bytes)) {
+ blk_mq_requeue_request(req, false);
+ blk_mq_delay_kick_requeue_list(req->q,
+ UBLK_REQUEUE_DELAY_MS);
+ return false;
+ }
+
+ ublk_get_iod(ubq, req->tag)->nr_sectors =
+ mapped_bytes >> 9;
+ }
+
+ return true;
+}
+
static void ublk_dispatch_req(struct ublk_queue *ubq,
struct request *req,
unsigned int issue_flags)
{
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
- unsigned int mapped_bytes;
- pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
- __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
+ pr_devel("%s: complete: qid %d tag %d io_flags %x addr %llx\n",
+ __func__, ubq->q_id, req->tag, io->flags,
ublk_get_iod(ubq, req->tag)->addr);
/*
@@ -1216,54 +1289,22 @@ static void ublk_dispatch_req(struct ublk_queue *ubq,
if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
/*
* We have not handled UBLK_IO_NEED_GET_DATA command yet,
- * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
+ * so immediately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
* and notify it.
*/
- if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
- io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
- pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
- __func__, io->cmd->cmd_op, ubq->q_id,
- req->tag, io->flags);
- ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
- return;
- }
- /*
- * We have handled UBLK_IO_NEED_GET_DATA command,
- * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
- * do the copy work.
- */
- io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
- /* update iod->addr because ublksrv may have passed a new io buffer */
- ublk_get_iod(ubq, req->tag)->addr = io->addr;
- pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
- __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
- ublk_get_iod(ubq, req->tag)->addr);
+ io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
+ pr_devel("%s: need get data. qid %d tag %d io_flags %x\n",
+ __func__, ubq->q_id, req->tag, io->flags);
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_NEED_GET_DATA,
+ issue_flags);
+ return;
}
- mapped_bytes = ublk_map_io(ubq, req, io);
-
- /* partially mapped, update io descriptor */
- if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
- /*
- * Nothing mapped, retry until we succeed.
- *
- * We may never succeed in mapping any bytes here because
- * of OOM. TODO: reserve one buffer with single page pinned
- * for providing forward progress guarantee.
- */
- if (unlikely(!mapped_bytes)) {
- blk_mq_requeue_request(req, false);
- blk_mq_delay_kick_requeue_list(req->q,
- UBLK_REQUEUE_DELAY_MS);
- return;
- }
-
- ublk_get_iod(ubq, req->tag)->nr_sectors =
- mapped_bytes >> 9;
- }
+ if (!ublk_start_io(ubq, req, io))
+ return;
- ublk_init_req_ref(ubq, req);
- ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
+ if (ublk_prep_auto_buf_reg(ubq, req, io, issue_flags))
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
}
static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
@@ -1314,8 +1355,6 @@ static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l)
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
{
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
- unsigned int nr_inflight = 0;
- int i;
if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
if (!ubq->timeout) {
@@ -1326,30 +1365,11 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
return BLK_EH_DONE;
}
- if (!ubq_daemon_is_dying(ubq))
- return BLK_EH_RESET_TIMER;
-
- for (i = 0; i < ubq->q_depth; i++) {
- struct ublk_io *io = &ubq->ios[i];
-
- if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
- nr_inflight++;
- }
-
- /* cancelable uring_cmd can't help us if all commands are in-flight */
- if (nr_inflight == ubq->q_depth) {
- struct ublk_device *ub = ubq->dev;
-
- if (ublk_abort_requests(ub, ubq)) {
- schedule_work(&ub->nosrv_work);
- }
- return BLK_EH_DONE;
- }
-
return BLK_EH_RESET_TIMER;
}
-static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq)
+static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
+ bool check_cancel)
{
blk_status_t res;
@@ -1368,7 +1388,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq)
if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
return BLK_STS_IOERR;
- if (unlikely(ubq->canceling))
+ if (check_cancel && unlikely(ubq->canceling))
return BLK_STS_IOERR;
/* fill iod to slot in io cmd buffer */
@@ -1387,7 +1407,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *rq = bd->rq;
blk_status_t res;
- res = ublk_prep_req(ubq, rq);
+ res = ublk_prep_req(ubq, rq, false);
if (res != BLK_STS_OK)
return res;
@@ -1419,7 +1439,7 @@ static void ublk_queue_rqs(struct rq_list *rqlist)
ublk_queue_cmd_list(ubq, &submit_list);
ubq = this_q;
- if (ublk_prep_req(ubq, req) == BLK_STS_OK)
+ if (ublk_prep_req(ubq, req, true) == BLK_STS_OK)
rq_list_add_tail(&submit_list, req);
else
rq_list_add_tail(&requeue_list, req);
@@ -1447,6 +1467,37 @@ static const struct blk_mq_ops ublk_mq_ops = {
.timeout = ublk_timeout,
};
+static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+{
+ int i;
+
+ /* All old ioucmds have to be completed */
+ ubq->nr_io_ready = 0;
+
+ /*
+ * old daemon is PF_EXITING, put it now
+ *
+ * It could be NULL in case of closing one quisced device.
+ */
+ if (ubq->ubq_daemon)
+ put_task_struct(ubq->ubq_daemon);
+ /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
+ ubq->ubq_daemon = NULL;
+ ubq->timeout = false;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+ /*
+ * UBLK_IO_FLAG_CANCELED is kept for avoiding to touch
+ * io->cmd
+ */
+ io->flags &= UBLK_IO_FLAG_CANCELED;
+ io->cmd = NULL;
+ io->addr = 0;
+ }
+}
+
static int ublk_ch_open(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = container_of(inode->i_cdev,
@@ -1458,10 +1509,119 @@ static int ublk_ch_open(struct inode *inode, struct file *filp)
return 0;
}
+static void ublk_reset_ch_dev(struct ublk_device *ub)
+{
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_queue_reinit(ub, ublk_get_queue(ub, i));
+
+ /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
+ ub->mm = NULL;
+ ub->nr_queues_ready = 0;
+ ub->nr_privileged_daemon = 0;
+}
+
+static struct gendisk *ublk_get_disk(struct ublk_device *ub)
+{
+ struct gendisk *disk;
+
+ spin_lock(&ub->lock);
+ disk = ub->ub_disk;
+ if (disk)
+ get_device(disk_to_dev(disk));
+ spin_unlock(&ub->lock);
+
+ return disk;
+}
+
+static void ublk_put_disk(struct gendisk *disk)
+{
+ if (disk)
+ put_device(disk_to_dev(disk));
+}
+
static int ublk_ch_release(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = filp->private_data;
+ struct gendisk *disk;
+ int i;
+
+ /*
+ * disk isn't attached yet, either device isn't live, or it has
+ * been removed already, so we needn't to do anything
+ */
+ disk = ublk_get_disk(ub);
+ if (!disk)
+ goto out;
+
+ /*
+ * All uring_cmd are done now, so abort any request outstanding to
+ * the ublk server
+ *
+ * This can be done in lockless way because ublk server has been
+ * gone
+ *
+ * More importantly, we have to provide forward progress guarantee
+ * without holding ub->mutex, otherwise control task grabbing
+ * ub->mutex triggers deadlock
+ *
+ * All requests may be inflight, so ->canceling may not be set, set
+ * it now.
+ */
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ ubq->canceling = true;
+ ublk_abort_queue(ub, ubq);
+ }
+ blk_mq_kick_requeue_list(disk->queue);
+ /*
+ * All infligh requests have been completed or requeued and any new
+ * request will be failed or requeued via `->canceling` now, so it is
+ * fine to grab ub->mutex now.
+ */
+ mutex_lock(&ub->mutex);
+
+ /* double check after grabbing lock */
+ if (!ub->ub_disk)
+ goto unlock;
+
+ /*
+ * Transition the device to the nosrv state. What exactly this
+ * means depends on the recovery flags
+ */
+ blk_mq_quiesce_queue(disk->queue);
+ if (ublk_nosrv_should_stop_dev(ub)) {
+ /*
+ * Allow any pending/future I/O to pass through quickly
+ * with an error. This is needed because del_gendisk
+ * waits for all pending I/O to complete
+ */
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_get_queue(ub, i)->force_abort = true;
+ blk_mq_unquiesce_queue(disk->queue);
+
+ ublk_stop_dev_unlocked(ub);
+ } else {
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
+ /* ->canceling is set and all requests are aborted */
+ ub->dev_info.state = UBLK_S_DEV_QUIESCED;
+ } else {
+ ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_get_queue(ub, i)->fail_io = true;
+ }
+ blk_mq_unquiesce_queue(disk->queue);
+ }
+unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_put_disk(disk);
+
+ /* all uring_cmd has been done now, reset device & ubq */
+ ublk_reset_ch_dev(ub);
+out:
clear_bit(UB_STATE_OPEN, &ub->state);
return 0;
}
@@ -1504,34 +1664,26 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
}
-static void ublk_commit_completion(struct ublk_device *ub,
- const struct ublksrv_io_cmd *ub_cmd)
+static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
+ struct request *req)
{
- u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
- struct ublk_queue *ubq = ublk_get_queue(ub, qid);
- struct ublk_io *io = &ubq->ios[tag];
- struct request *req;
-
- /* now this cmd slot is owned by nbd driver */
- io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
- io->res = ub_cmd->result;
-
- /* find the io request and complete */
- req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
- if (WARN_ON_ONCE(unlikely(!req)))
- return;
-
- if (req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = ub_cmd->zone_append_lba;
+ WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
- if (likely(!blk_should_fake_timeout(req->q)))
- ublk_put_req_ref(ubq, req);
+ if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
+ blk_mq_requeue_request(req, false);
+ else {
+ io->res = -EIO;
+ __ublk_complete_rq(req);
+ }
}
/*
- * Called from ubq_daemon context via cancel fn, meantime quiesce ublk
- * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
- * context, so everything is serialized.
+ * Called from ublk char device release handler, when any uring_cmd is
+ * done, meantime request queue is "quiesced" since all inflight requests
+ * can't be completed because ublk server is dead.
+ *
+ * So no one can hold our request IO reference any more, simply ignore the
+ * reference, and complete the request immediately
*/
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
{
@@ -1540,54 +1692,28 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
- if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
- struct request *rq;
-
- /*
- * Either we fail the request or ublk_rq_task_work_cb
- * will do it
- */
- rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
- if (rq && blk_mq_request_started(rq)) {
- io->flags |= UBLK_IO_FLAG_ABORTED;
- __ublk_fail_req(ubq, io, rq);
- }
- }
+ if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
+ __ublk_fail_req(ubq, io, io->req);
}
}
/* Must be called when queue is frozen */
-static bool ublk_mark_queue_canceling(struct ublk_queue *ubq)
+static void ublk_mark_queue_canceling(struct ublk_queue *ubq)
{
- bool canceled;
-
spin_lock(&ubq->cancel_lock);
- canceled = ubq->canceling;
- if (!canceled)
+ if (!ubq->canceling)
ubq->canceling = true;
spin_unlock(&ubq->cancel_lock);
-
- return canceled;
}
-static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
+static void ublk_start_cancel(struct ublk_queue *ubq)
{
- bool was_canceled = ubq->canceling;
- struct gendisk *disk;
-
- if (was_canceled)
- return false;
-
- spin_lock(&ub->lock);
- disk = ub->ub_disk;
- if (disk)
- get_device(disk_to_dev(disk));
- spin_unlock(&ub->lock);
+ struct ublk_device *ub = ubq->dev;
+ struct gendisk *disk = ublk_get_disk(ub);
/* Our disk has been dead */
if (!disk)
- return false;
-
+ return;
/*
* Now we are serialized with ublk_queue_rq()
*
@@ -1596,25 +1722,36 @@ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
* touch completed uring_cmd
*/
blk_mq_quiesce_queue(disk->queue);
- was_canceled = ublk_mark_queue_canceling(ubq);
- if (!was_canceled) {
- /* abort queue is for making forward progress */
- ublk_abort_queue(ub, ubq);
- }
+ ublk_mark_queue_canceling(ubq);
blk_mq_unquiesce_queue(disk->queue);
- put_device(disk_to_dev(disk));
-
- return !was_canceled;
+ ublk_put_disk(disk);
}
-static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
+static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
unsigned int issue_flags)
{
+ struct ublk_io *io = &ubq->ios[tag];
+ struct ublk_device *ub = ubq->dev;
+ struct request *req;
bool done;
if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
return;
+ /*
+ * Don't try to cancel this command if the request is started for
+ * avoiding race between io_uring_cmd_done() and
+ * io_uring_cmd_complete_in_task().
+ *
+ * Either the started request will be aborted via __ublk_abort_rq(),
+ * then this uring_cmd is canceled next time, or it will be done in
+ * task work function ublk_dispatch_req() because io_uring guarantees
+ * that ublk_dispatch_req() is always called
+ */
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
+ if (req && blk_mq_request_started(req) && req->tag == tag)
+ return;
+
spin_lock(&ubq->cancel_lock);
done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
if (!done)
@@ -1628,6 +1765,17 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
/*
* The ublk char device won't be closed when calling cancel fn, so both
* ublk device and queue are guaranteed to be live
+ *
+ * Two-stage cancel:
+ *
+ * - make every active uring_cmd done in ->cancel_fn()
+ *
+ * - aborting inflight ublk IO requests in ublk char device release handler,
+ * which depends on 1st stage because device can only be closed iff all
+ * uring_cmd are done
+ *
+ * Do _not_ try to acquire ub->mutex before all inflight requests are
+ * aborted, otherwise deadlock may be caused.
*/
static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
unsigned int issue_flags)
@@ -1635,9 +1783,6 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
struct task_struct *task;
- struct ublk_device *ub;
- bool need_schedule;
- struct ublk_io *io;
if (WARN_ON_ONCE(!ubq))
return;
@@ -1649,16 +1794,11 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
if (WARN_ON_ONCE(task && task != ubq->ubq_daemon))
return;
- ub = ubq->dev;
- need_schedule = ublk_abort_requests(ub, ubq);
-
- io = &ubq->ios[pdu->tag];
- WARN_ON_ONCE(io->cmd != cmd);
- ublk_cancel_cmd(ubq, io, issue_flags);
+ if (!ubq->canceling)
+ ublk_start_cancel(ubq);
- if (need_schedule) {
- schedule_work(&ub->nosrv_work);
- }
+ WARN_ON_ONCE(ubq->ios[pdu->tag].cmd != cmd);
+ ublk_cancel_cmd(ubq, pdu->tag, issue_flags);
}
static inline bool ublk_queue_ready(struct ublk_queue *ubq)
@@ -1671,7 +1811,7 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
int i;
for (i = 0; i < ubq->q_depth; i++)
- ublk_cancel_cmd(ubq, &ubq->ios[i], IO_URING_F_UNLOCKED);
+ ublk_cancel_cmd(ubq, i, IO_URING_F_UNLOCKED);
}
/* Cancel all pending commands, must be called after del_gendisk() returns */
@@ -1709,33 +1849,20 @@ static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
}
}
-static void __ublk_quiesce_dev(struct ublk_device *ub)
-{
- pr_devel("%s: quiesce ub: dev_id %d state %s\n",
- __func__, ub->dev_info.dev_id,
- ub->dev_info.state == UBLK_S_DEV_LIVE ?
- "LIVE" : "QUIESCED");
- blk_mq_quiesce_queue(ub->ub_disk->queue);
- ublk_wait_tagset_rqs_idle(ub);
- ub->dev_info.state = UBLK_S_DEV_QUIESCED;
-}
-
-static void ublk_unquiesce_dev(struct ublk_device *ub)
+static void ublk_force_abort_dev(struct ublk_device *ub)
{
int i;
- pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
+ pr_devel("%s: force abort ub: dev_id %d state %s\n",
__func__, ub->dev_info.dev_id,
ub->dev_info.state == UBLK_S_DEV_LIVE ?
"LIVE" : "QUIESCED");
- /* quiesce_work has run. We let requeued rqs be aborted
- * before running fallback_wq. "force_abort" must be seen
- * after request queue is unqiuesced. Then del_gendisk()
- * can move on.
- */
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE)
+ ublk_wait_tagset_rqs_idle(ub);
+
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_get_queue(ub, i)->force_abort = true;
-
blk_mq_unquiesce_queue(ub->ub_disk->queue);
/* We may have requeued some rqs in ublk_quiesce_queue() */
blk_mq_kick_requeue_list(ub->ub_disk->queue);
@@ -1756,61 +1883,51 @@ static struct gendisk *ublk_detach_disk(struct ublk_device *ub)
return disk;
}
-static void ublk_stop_dev(struct ublk_device *ub)
+static void ublk_stop_dev_unlocked(struct ublk_device *ub)
+ __must_hold(&ub->mutex)
{
struct gendisk *disk;
- mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_DEAD)
- goto unlock;
- if (ublk_nosrv_dev_should_queue_io(ub)) {
- if (ub->dev_info.state == UBLK_S_DEV_LIVE)
- __ublk_quiesce_dev(ub);
- ublk_unquiesce_dev(ub);
- }
+ return;
+
+ if (ublk_nosrv_dev_should_queue_io(ub))
+ ublk_force_abort_dev(ub);
del_gendisk(ub->ub_disk);
disk = ublk_detach_disk(ub);
put_disk(disk);
- unlock:
+}
+
+static void ublk_stop_dev(struct ublk_device *ub)
+{
+ mutex_lock(&ub->mutex);
+ ublk_stop_dev_unlocked(ub);
mutex_unlock(&ub->mutex);
ublk_cancel_dev(ub);
}
-static void ublk_nosrv_work(struct work_struct *work)
+/* reset ublk io_uring queue & io flags */
+static void ublk_reset_io_flags(struct ublk_device *ub)
{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, nosrv_work);
- int i;
+ int i, j;
- if (ublk_nosrv_should_stop_dev(ub)) {
- ublk_stop_dev(ub);
- return;
- }
-
- mutex_lock(&ub->mutex);
- if (ub->dev_info.state != UBLK_S_DEV_LIVE)
- goto unlock;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
- if (ublk_nosrv_dev_should_queue_io(ub)) {
- __ublk_quiesce_dev(ub);
- } else {
- blk_mq_quiesce_queue(ub->ub_disk->queue);
- ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
- ublk_get_queue(ub, i)->fail_io = true;
- }
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ /* UBLK_IO_FLAG_CANCELED can be cleared now */
+ spin_lock(&ubq->cancel_lock);
+ for (j = 0; j < ubq->q_depth; j++)
+ ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
+ spin_unlock(&ubq->cancel_lock);
+ ubq->canceling = false;
+ ubq->fail_io = false;
}
-
- unlock:
- mutex_unlock(&ub->mutex);
- ublk_cancel_dev(ub);
}
/* device can only be started after all IOs are ready */
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
+ __must_hold(&ub->mutex)
{
- mutex_lock(&ub->mutex);
ubq->nr_io_ready++;
if (ublk_queue_ready(ubq)) {
ubq->ubq_daemon = current;
@@ -1820,18 +1937,12 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
if (capable(CAP_SYS_ADMIN))
ub->nr_privileged_daemon++;
}
- if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
- complete_all(&ub->completion);
- mutex_unlock(&ub->mutex);
-}
-static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
- int tag)
-{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
- struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
-
- ublk_queue_cmd(ubq, req);
+ if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
+ /* now we are ready for handling ublk io request */
+ ublk_reset_io_flags(ub);
+ complete_all(&ub->completion);
+ }
}
static inline int ublk_check_cmd_op(u32 cmd_op)
@@ -1870,6 +1981,20 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
io_uring_cmd_mark_cancelable(cmd, issue_flags);
}
+static inline int ublk_set_auto_buf_reg(struct io_uring_cmd *cmd)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+
+ pdu->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
+
+ if (pdu->buf.reserved0 || pdu->buf.reserved1)
+ return -EINVAL;
+
+ if (pdu->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
+ return -EINVAL;
+ return 0;
+}
+
static void ublk_io_release(void *priv)
{
struct request *rq = priv;
@@ -1879,13 +2004,16 @@ static void ublk_io_release(void *priv)
}
static int ublk_register_io_buf(struct io_uring_cmd *cmd,
- struct ublk_queue *ubq, unsigned int tag,
+ const struct ublk_queue *ubq, unsigned int tag,
unsigned int index, unsigned int issue_flags)
{
struct ublk_device *ub = cmd->file->private_data;
struct request *req;
int ret;
+ if (!ublk_support_zero_copy(ubq))
+ return -EINVAL;
+
req = __ublk_check_and_get_req(ub, ubq, tag, 0);
if (!req)
return -EINVAL;
@@ -1901,11 +2029,151 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd,
}
static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
+ const struct ublk_queue *ubq,
unsigned int index, unsigned int issue_flags)
{
+ if (!ublk_support_zero_copy(ubq))
+ return -EINVAL;
+
return io_buffer_unregister_bvec(cmd, index, issue_flags);
}
+static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
+ struct ublk_io *io, __u64 buf_addr)
+{
+ struct ublk_device *ub = ubq->dev;
+ int ret = 0;
+
+ /*
+ * When handling FETCH command for setting up ublk uring queue,
+ * ub->mutex is the innermost lock, and we won't block for handling
+ * FETCH, so it is fine even for IO_URING_F_NONBLOCK.
+ */
+ mutex_lock(&ub->mutex);
+ /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
+ if (ublk_queue_ready(ubq)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* allow each command to be FETCHed at most once */
+ if (io->flags & UBLK_IO_FLAG_ACTIVE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
+
+ if (ublk_need_map_io(ubq)) {
+ /*
+ * FETCH_RQ has to provide IO buffer if NEED GET
+ * DATA is not enabled
+ */
+ if (!buf_addr && !ublk_need_get_data(ubq))
+ goto out;
+ } else if (buf_addr) {
+ /* User copy requires addr to be unset */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ublk_support_auto_buf_reg(ubq)) {
+ ret = ublk_set_auto_buf_reg(cmd);
+ if (ret)
+ goto out;
+ }
+
+ ublk_fill_io_cmd(io, cmd, buf_addr);
+ ublk_mark_io_ready(ub, ubq);
+out:
+ mutex_unlock(&ub->mutex);
+ return ret;
+}
+
+static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
+ struct ublk_io *io, struct io_uring_cmd *cmd,
+ const struct ublksrv_io_cmd *ub_cmd,
+ unsigned int issue_flags)
+{
+ struct request *req = io->req;
+
+ if (ublk_need_map_io(ubq)) {
+ /*
+ * COMMIT_AND_FETCH_REQ has to provide IO buffer if
+ * NEED GET DATA is not enabled or it is Read IO.
+ */
+ if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
+ req_op(req) == REQ_OP_READ))
+ return -EINVAL;
+ } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
+ /*
+ * User copy requires addr to be unset when command is
+ * not zone append
+ */
+ return -EINVAL;
+ }
+
+ if (ublk_support_auto_buf_reg(ubq)) {
+ int ret;
+
+ /*
+ * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ`
+ * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same
+ * `io_ring_ctx`.
+ *
+ * If this uring_cmd's io_ring_ctx isn't same with the
+ * one for registering the buffer, it is ublk server's
+ * responsibility for unregistering the buffer, otherwise
+ * this ublk request gets stuck.
+ */
+ if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ if (data->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
+ io_buffer_unregister_bvec(cmd, data->buf_index,
+ issue_flags);
+ io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
+ }
+
+ ret = ublk_set_auto_buf_reg(cmd);
+ if (ret)
+ return ret;
+ }
+
+ ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
+
+ /* now this cmd slot is owned by ublk driver */
+ io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
+ io->res = ub_cmd->result;
+
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ req->__sector = ub_cmd->zone_append_lba;
+
+ if (likely(!blk_should_fake_timeout(req->q)))
+ ublk_put_req_ref(ubq, req);
+
+ return 0;
+}
+
+static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io)
+{
+ struct request *req = io->req;
+
+ /*
+ * We have handled UBLK_IO_NEED_GET_DATA command,
+ * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
+ * do the copy work.
+ */
+ io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
+ /* update iod->addr because ublksrv may have passed a new io buffer */
+ ublk_get_iod(ubq, req->tag)->addr = io->addr;
+ pr_devel("%s: update iod->addr: qid %d tag %d io_flags %x addr %llx\n",
+ __func__, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
+
+ return ublk_start_io(ubq, req, io);
+}
+
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
const struct ublksrv_io_cmd *ub_cmd)
@@ -1916,7 +2184,6 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
u32 cmd_op = cmd->cmd_op;
unsigned tag = ub_cmd->tag;
int ret = -EINVAL;
- struct request *req;
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
@@ -1926,9 +2193,6 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
goto out;
ubq = ublk_get_queue(ub, ub_cmd->q_id);
- if (!ubq || ub_cmd->q_id != ubq->q_id)
- goto out;
-
if (ubq->ubq_daemon && ubq->ubq_daemon != current)
goto out;
@@ -1943,6 +2207,11 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
goto out;
}
+ /* only UBLK_IO_FETCH_REQ is allowed if io is not OWNED_BY_SRV */
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) &&
+ _IOC_NR(cmd_op) != UBLK_IO_FETCH_REQ)
+ goto out;
+
/*
* ensure that the user issues UBLK_IO_NEED_GET_DATA
* iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
@@ -1960,68 +2229,23 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
case UBLK_IO_REGISTER_IO_BUF:
return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
case UBLK_IO_UNREGISTER_IO_BUF:
- return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags);
+ return ublk_unregister_io_buf(cmd, ubq, ub_cmd->addr, issue_flags);
case UBLK_IO_FETCH_REQ:
- /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
- if (ublk_queue_ready(ubq)) {
- ret = -EBUSY;
- goto out;
- }
- /*
- * The io is being handled by server, so COMMIT_RQ is expected
- * instead of FETCH_REQ
- */
- if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
- goto out;
-
- if (ublk_need_map_io(ubq)) {
- /*
- * FETCH_RQ has to provide IO buffer if NEED GET
- * DATA is not enabled
- */
- if (!ub_cmd->addr && !ublk_need_get_data(ubq))
- goto out;
- } else if (ub_cmd->addr) {
- /* User copy requires addr to be unset */
- ret = -EINVAL;
+ ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
+ if (ret)
goto out;
- }
-
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_mark_io_ready(ub, ubq);
break;
case UBLK_IO_COMMIT_AND_FETCH_REQ:
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
-
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
- goto out;
-
- if (ublk_need_map_io(ubq)) {
- /*
- * COMMIT_AND_FETCH_REQ has to provide IO buffer if
- * NEED GET DATA is not enabled or it is Read IO.
- */
- if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
- req_op(req) == REQ_OP_READ))
- goto out;
- } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
- /*
- * User copy requires addr to be unset when command is
- * not zone append
- */
- ret = -EINVAL;
+ ret = ublk_commit_and_fetch(ubq, io, cmd, ub_cmd, issue_flags);
+ if (ret)
goto out;
- }
-
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_commit_completion(ub, ub_cmd);
break;
case UBLK_IO_NEED_GET_DATA:
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
- goto out;
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
- break;
+ io->addr = ub_cmd->addr;
+ if (!ublk_get_data(ubq, io))
+ return -EIOCBQUEUED;
+
+ return UBLK_IO_RES_OK;
default:
goto out;
}
@@ -2035,13 +2259,10 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- struct ublk_queue *ubq, int tag, size_t offset)
+ const struct ublk_queue *ubq, int tag, size_t offset)
{
struct request *req;
- if (!ublk_need_req_ref(ubq))
- return NULL;
-
req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
if (!req)
return NULL;
@@ -2155,6 +2376,9 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
if (!ubq)
return ERR_PTR(-EINVAL);
+ if (!ublk_support_user_copy(ubq))
+ return ERR_PTR(-EACCES);
+
if (tag >= ubq->q_depth)
return ERR_PTR(-EINVAL);
@@ -2388,7 +2612,6 @@ static void ublk_remove(struct ublk_device *ub)
bool unprivileged;
ublk_stop_dev(ub);
- cancel_work_sync(&ub->nosrv_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
ublk_put_device(ub);
@@ -2413,9 +2636,9 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
return ub;
}
-static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
+static int ublk_ctrl_start_dev(struct ublk_device *ub,
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
const struct ublk_param_basic *p = &ub->params.basic;
int ublksrv_pid = (int)header->data[0];
struct queue_limits lim = {
@@ -2534,9 +2757,8 @@ out_unlock:
}
static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
cpumask_var_t cpumask;
unsigned long queue;
@@ -2585,9 +2807,8 @@ static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
info->nr_hw_queues, info->queue_depth);
}
-static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
+static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublksrv_ctrl_dev_info info;
struct ublk_device *ub;
@@ -2622,6 +2843,11 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
return -EINVAL;
}
+ if ((info.flags & UBLK_F_QUIESCE) && !(info.flags & UBLK_F_USER_RECOVERY)) {
+ pr_warn("UBLK_F_QUIESCE requires UBLK_F_USER_RECOVERY\n");
+ return -EINVAL;
+ }
+
/*
* unprivileged device can't be trusted, but RECOVERY and
* RECOVERY_REISSUE still may hang error handling, so can't
@@ -2638,8 +2864,11 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
* For USER_COPY, we depends on userspace to fill request
* buffer by pwrite() to ublk char device, which can't be
* used for unprivileged device
+ *
+ * Same with zero copy or auto buffer register.
*/
- if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY))
+ if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG))
return -EINVAL;
}
@@ -2675,7 +2904,6 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
goto out_unlock;
mutex_init(&ub->mutex);
spin_lock_init(&ub->lock);
- INIT_WORK(&ub->nosrv_work, ublk_nosrv_work);
ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0)
@@ -2697,13 +2925,19 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
UBLK_F_URING_CMD_COMP_IN_TASK;
- /* GET_DATA isn't needed any more with USER_COPY */
- if (ublk_dev_is_user_copy(ub))
+ /* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */
+ if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG))
ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
- /* Zoned storage support requires user copy feature */
+ /*
+ * Zoned storage support requires reuse `ublksrv_io_cmd->addr` for
+ * returning write_append_lba, which is only allowed in case of
+ * user copy or zero copy
+ */
if (ublk_dev_is_zoned(ub) &&
- (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) {
+ (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !(ub->dev_info.flags &
+ (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)))) {
ret = -EINVAL;
goto out_free_dev_number;
}
@@ -2807,14 +3041,12 @@ static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
static int ublk_ctrl_stop_dev(struct ublk_device *ub)
{
ublk_stop_dev(ub);
- cancel_work_sync(&ub->nosrv_work);
return 0;
}
static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
@@ -2843,9 +3075,8 @@ static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
}
static int ublk_ctrl_get_params(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret;
@@ -2874,9 +3105,8 @@ static int ublk_ctrl_get_params(struct ublk_device *ub,
}
static int ublk_ctrl_set_params(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret = -EFAULT;
@@ -2914,43 +3144,14 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
return ret;
}
-static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
-{
- int i;
-
- WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
-
- /* All old ioucmds have to be completed */
- ubq->nr_io_ready = 0;
- /* old daemon is PF_EXITING, put it now */
- put_task_struct(ubq->ubq_daemon);
- /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
- ubq->ubq_daemon = NULL;
- ubq->timeout = false;
- ubq->canceling = false;
-
- for (i = 0; i < ubq->q_depth; i++) {
- struct ublk_io *io = &ubq->ios[i];
-
- /* forget everything now and be ready for new FETCH_REQ */
- io->flags = 0;
- io->cmd = NULL;
- io->addr = 0;
- }
-}
-
static int ublk_ctrl_start_recovery(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ret = -EINVAL;
- int i;
mutex_lock(&ub->mutex);
if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
- if (!ub->nr_queues_ready)
- goto out_unlock;
/*
* START_RECOVERY is only allowd after:
*
@@ -2974,12 +3175,6 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
goto out_unlock;
}
pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_queue_reinit(ub, ublk_get_queue(ub, i));
- /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
- ub->mm = NULL;
- ub->nr_queues_ready = 0;
- ub->nr_privileged_daemon = 0;
init_completion(&ub->completion);
ret = 0;
out_unlock:
@@ -2988,12 +3183,10 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
}
static int ublk_ctrl_end_recovery(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
int ret = -EINVAL;
- int i;
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
@@ -3013,33 +3206,18 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
goto out_unlock;
}
ub->dev_info.ublksrv_pid = ublksrv_pid;
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
__func__, ublksrv_pid, header->dev_id);
-
- if (ublk_nosrv_dev_should_queue_io(ub)) {
- ub->dev_info.state = UBLK_S_DEV_LIVE;
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
- pr_devel("%s: queue unquiesced, dev id %d.\n",
- __func__, header->dev_id);
- blk_mq_kick_requeue_list(ub->ub_disk->queue);
- } else {
- blk_mq_quiesce_queue(ub->ub_disk->queue);
- ub->dev_info.state = UBLK_S_DEV_LIVE;
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
- ublk_get_queue(ub, i)->fail_io = false;
- }
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
- }
-
+ blk_mq_kick_requeue_list(ub->ub_disk->queue);
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
return ret;
}
-static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
+static int ublk_ctrl_get_features(const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
u64 features = UBLK_F_ALL;
@@ -3052,6 +3230,127 @@ static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
return 0;
}
+static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header)
+{
+ struct ublk_param_basic *p = &ub->params.basic;
+ u64 new_size = header->data[0];
+
+ mutex_lock(&ub->mutex);
+ p->dev_sectors = new_size;
+ set_capacity_and_notify(ub->ub_disk, p->dev_sectors);
+ mutex_unlock(&ub->mutex);
+}
+
+struct count_busy {
+ const struct ublk_queue *ubq;
+ unsigned int nr_busy;
+};
+
+static bool ublk_count_busy_req(struct request *rq, void *data)
+{
+ struct count_busy *idle = data;
+
+ if (!blk_mq_request_started(rq) && rq->mq_hctx->driver_data == idle->ubq)
+ idle->nr_busy += 1;
+ return true;
+}
+
+/* uring_cmd is guaranteed to be active if the associated request is idle */
+static bool ubq_has_idle_io(const struct ublk_queue *ubq)
+{
+ struct count_busy data = {
+ .ubq = ubq,
+ };
+
+ blk_mq_tagset_busy_iter(&ubq->dev->tag_set, ublk_count_busy_req, &data);
+ return data.nr_busy < ubq->q_depth;
+}
+
+/* Wait until each hw queue has at least one idle IO */
+static int ublk_wait_for_idle_io(struct ublk_device *ub,
+ unsigned int timeout_ms)
+{
+ unsigned int elapsed = 0;
+ int ret;
+
+ while (elapsed < timeout_ms && !signal_pending(current)) {
+ unsigned int queues_cancelable = 0;
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ queues_cancelable += !!ubq_has_idle_io(ubq);
+ }
+
+ /*
+ * Each queue needs at least one active command for
+ * notifying ublk server
+ */
+ if (queues_cancelable == ub->dev_info.nr_hw_queues)
+ break;
+
+ msleep(UBLK_REQUEUE_DELAY_MS);
+ elapsed += UBLK_REQUEUE_DELAY_MS;
+ }
+
+ if (signal_pending(current))
+ ret = -EINTR;
+ else if (elapsed >= timeout_ms)
+ ret = -EBUSY;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int ublk_ctrl_quiesce_dev(struct ublk_device *ub,
+ const struct ublksrv_ctrl_cmd *header)
+{
+ /* zero means wait forever */
+ u64 timeout_ms = header->data[0];
+ struct gendisk *disk;
+ int i, ret = -ENODEV;
+
+ if (!(ub->dev_info.flags & UBLK_F_QUIESCE))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ub->mutex);
+ disk = ublk_get_disk(ub);
+ if (!disk)
+ goto unlock;
+ if (ub->dev_info.state == UBLK_S_DEV_DEAD)
+ goto put_disk;
+
+ ret = 0;
+ /* already in expected state */
+ if (ub->dev_info.state != UBLK_S_DEV_LIVE)
+ goto put_disk;
+
+ /* Mark all queues as canceling */
+ blk_mq_quiesce_queue(disk->queue);
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ ubq->canceling = true;
+ }
+ blk_mq_unquiesce_queue(disk->queue);
+
+ if (!timeout_ms)
+ timeout_ms = UINT_MAX;
+ ret = ublk_wait_for_idle_io(ub, timeout_ms);
+
+put_disk:
+ ublk_put_disk(disk);
+unlock:
+ mutex_unlock(&ub->mutex);
+
+ /* Cancel pending uring_cmd */
+ if (!ret)
+ ublk_cancel_dev(ub);
+ return ret;
+}
+
/*
* All control commands are sent via /dev/ublk-control, so we have to check
* the destination device's permission
@@ -3137,6 +3436,8 @@ static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
case UBLK_CMD_SET_PARAMS:
case UBLK_CMD_START_USER_RECOVERY:
case UBLK_CMD_END_USER_RECOVERY:
+ case UBLK_CMD_UPDATE_SIZE:
+ case UBLK_CMD_QUIESCE_DEV:
mask = MAY_READ | MAY_WRITE;
break;
default:
@@ -3178,7 +3479,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
goto out;
if (cmd_op == UBLK_U_CMD_GET_FEATURES) {
- ret = ublk_ctrl_get_features(cmd);
+ ret = ublk_ctrl_get_features(header);
goto out;
}
@@ -3195,17 +3496,17 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
switch (_IOC_NR(cmd_op)) {
case UBLK_CMD_START_DEV:
- ret = ublk_ctrl_start_dev(ub, cmd);
+ ret = ublk_ctrl_start_dev(ub, header);
break;
case UBLK_CMD_STOP_DEV:
ret = ublk_ctrl_stop_dev(ub);
break;
case UBLK_CMD_GET_DEV_INFO:
case UBLK_CMD_GET_DEV_INFO2:
- ret = ublk_ctrl_get_dev_info(ub, cmd);
+ ret = ublk_ctrl_get_dev_info(ub, header);
break;
case UBLK_CMD_ADD_DEV:
- ret = ublk_ctrl_add_dev(cmd);
+ ret = ublk_ctrl_add_dev(header);
break;
case UBLK_CMD_DEL_DEV:
ret = ublk_ctrl_del_dev(&ub, true);
@@ -3214,19 +3515,26 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
ret = ublk_ctrl_del_dev(&ub, false);
break;
case UBLK_CMD_GET_QUEUE_AFFINITY:
- ret = ublk_ctrl_get_queue_affinity(ub, cmd);
+ ret = ublk_ctrl_get_queue_affinity(ub, header);
break;
case UBLK_CMD_GET_PARAMS:
- ret = ublk_ctrl_get_params(ub, cmd);
+ ret = ublk_ctrl_get_params(ub, header);
break;
case UBLK_CMD_SET_PARAMS:
- ret = ublk_ctrl_set_params(ub, cmd);
+ ret = ublk_ctrl_set_params(ub, header);
break;
case UBLK_CMD_START_USER_RECOVERY:
- ret = ublk_ctrl_start_recovery(ub, cmd);
+ ret = ublk_ctrl_start_recovery(ub, header);
break;
case UBLK_CMD_END_USER_RECOVERY:
- ret = ublk_ctrl_end_recovery(ub, cmd);
+ ret = ublk_ctrl_end_recovery(ub, header);
+ break;
+ case UBLK_CMD_UPDATE_SIZE:
+ ublk_ctrl_set_size(ub, header);
+ ret = 0;
+ break;
+ case UBLK_CMD_QUIESCE_DEV:
+ ret = ublk_ctrl_quiesce_dev(ub, header);
break;
default:
ret = -EOPNOTSUPP;
@@ -3261,6 +3569,7 @@ static int __init ublk_init(void)
BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
+ BUILD_BUG_ON(sizeof(struct ublk_auto_buf_reg) != 8);
init_waitqueue_head(&ublk_idr_wq);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7cffea01d868..30bca8cb7106 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -571,7 +571,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
- err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL);
+ err = blk_rq_map_kern(req, report_buf, report_len, GFP_KERNEL);
if (err)
goto out;
@@ -817,7 +817,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
vbr->out_hdr.sector = 0;
- err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
+ err = blk_rq_map_kern(req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
if (err)
goto out;
diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
new file mode 100644
index 000000000000..553b1a713ab9
--- /dev/null
+++ b/drivers/block/zloop.c
@@ -0,0 +1,1385 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Christoph Hellwig.
+ * Copyright (c) 2025, Western Digital Corporation or its affiliates.
+ *
+ * Zoned Loop Device driver - exports a zoned block device using one file per
+ * zone as backing storage.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/blk-mq.h>
+#include <linux/blkzoned.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/falloc.h>
+#include <linux/mutex.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+
+/*
+ * Options for adding (and removing) a device.
+ */
+enum {
+ ZLOOP_OPT_ERR = 0,
+ ZLOOP_OPT_ID = (1 << 0),
+ ZLOOP_OPT_CAPACITY = (1 << 1),
+ ZLOOP_OPT_ZONE_SIZE = (1 << 2),
+ ZLOOP_OPT_ZONE_CAPACITY = (1 << 3),
+ ZLOOP_OPT_NR_CONV_ZONES = (1 << 4),
+ ZLOOP_OPT_BASE_DIR = (1 << 5),
+ ZLOOP_OPT_NR_QUEUES = (1 << 6),
+ ZLOOP_OPT_QUEUE_DEPTH = (1 << 7),
+ ZLOOP_OPT_BUFFERED_IO = (1 << 8),
+};
+
+static const match_table_t zloop_opt_tokens = {
+ { ZLOOP_OPT_ID, "id=%d" },
+ { ZLOOP_OPT_CAPACITY, "capacity_mb=%u" },
+ { ZLOOP_OPT_ZONE_SIZE, "zone_size_mb=%u" },
+ { ZLOOP_OPT_ZONE_CAPACITY, "zone_capacity_mb=%u" },
+ { ZLOOP_OPT_NR_CONV_ZONES, "conv_zones=%u" },
+ { ZLOOP_OPT_BASE_DIR, "base_dir=%s" },
+ { ZLOOP_OPT_NR_QUEUES, "nr_queues=%u" },
+ { ZLOOP_OPT_QUEUE_DEPTH, "queue_depth=%u" },
+ { ZLOOP_OPT_BUFFERED_IO, "buffered_io" },
+ { ZLOOP_OPT_ERR, NULL }
+};
+
+/* Default values for the "add" operation. */
+#define ZLOOP_DEF_ID -1
+#define ZLOOP_DEF_ZONE_SIZE ((256ULL * SZ_1M) >> SECTOR_SHIFT)
+#define ZLOOP_DEF_NR_ZONES 64
+#define ZLOOP_DEF_NR_CONV_ZONES 8
+#define ZLOOP_DEF_BASE_DIR "/var/local/zloop"
+#define ZLOOP_DEF_NR_QUEUES 1
+#define ZLOOP_DEF_QUEUE_DEPTH 128
+#define ZLOOP_DEF_BUFFERED_IO false
+
+/* Arbitrary limit on the zone size (16GB). */
+#define ZLOOP_MAX_ZONE_SIZE_MB 16384
+
+struct zloop_options {
+ unsigned int mask;
+ int id;
+ sector_t capacity;
+ sector_t zone_size;
+ sector_t zone_capacity;
+ unsigned int nr_conv_zones;
+ char *base_dir;
+ unsigned int nr_queues;
+ unsigned int queue_depth;
+ bool buffered_io;
+};
+
+/*
+ * Device states.
+ */
+enum {
+ Zlo_creating = 0,
+ Zlo_live,
+ Zlo_deleting,
+};
+
+enum zloop_zone_flags {
+ ZLOOP_ZONE_CONV = 0,
+ ZLOOP_ZONE_SEQ_ERROR,
+};
+
+struct zloop_zone {
+ struct file *file;
+
+ unsigned long flags;
+ struct mutex lock;
+ enum blk_zone_cond cond;
+ sector_t start;
+ sector_t wp;
+
+ gfp_t old_gfp_mask;
+};
+
+struct zloop_device {
+ unsigned int id;
+ unsigned int state;
+
+ struct blk_mq_tag_set tag_set;
+ struct gendisk *disk;
+
+ struct workqueue_struct *workqueue;
+ bool buffered_io;
+
+ const char *base_dir;
+ struct file *data_dir;
+
+ unsigned int zone_shift;
+ sector_t zone_size;
+ sector_t zone_capacity;
+ unsigned int nr_zones;
+ unsigned int nr_conv_zones;
+ unsigned int block_size;
+
+ struct zloop_zone zones[] __counted_by(nr_zones);
+};
+
+struct zloop_cmd {
+ struct work_struct work;
+ atomic_t ref;
+ sector_t sector;
+ sector_t nr_sectors;
+ long ret;
+ struct kiocb iocb;
+ struct bio_vec *bvec;
+};
+
+static DEFINE_IDR(zloop_index_idr);
+static DEFINE_MUTEX(zloop_ctl_mutex);
+
+static unsigned int rq_zone_no(struct request *rq)
+{
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ return blk_rq_pos(rq) >> zlo->zone_shift;
+}
+
+static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ struct kstat stat;
+ sector_t file_sectors;
+ int ret;
+
+ lockdep_assert_held(&zone->lock);
+
+ ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0);
+ if (ret < 0) {
+ pr_err("Failed to get zone %u file stat (err=%d)\n",
+ zone_no, ret);
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ return ret;
+ }
+
+ file_sectors = stat.size >> SECTOR_SHIFT;
+ if (file_sectors > zlo->zone_capacity) {
+ pr_err("Zone %u file too large (%llu sectors > %llu)\n",
+ zone_no, file_sectors, zlo->zone_capacity);
+ return -EINVAL;
+ }
+
+ if (file_sectors & ((zlo->block_size >> SECTOR_SHIFT) - 1)) {
+ pr_err("Zone %u file size not aligned to block size %u\n",
+ zone_no, zlo->block_size);
+ return -EINVAL;
+ }
+
+ if (!file_sectors) {
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+ } else if (file_sectors == zlo->zone_capacity) {
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zlo->zone_size;
+ } else {
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ zone->wp = zone->start + file_sectors;
+ }
+
+ return 0;
+}
+
+static int zloop_open_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret)
+ goto unlock;
+ }
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_EXP_OPEN:
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_CLOSED:
+ case BLK_ZONE_COND_IMP_OPEN:
+ zone->cond = BLK_ZONE_COND_EXP_OPEN;
+ break;
+ case BLK_ZONE_COND_FULL:
+ default:
+ ret = -EIO;
+ break;
+ }
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret)
+ goto unlock;
+ }
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_CLOSED:
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ if (zone->wp == zone->start)
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ else
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_FULL:
+ default:
+ ret = -EIO;
+ break;
+ }
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) &&
+ zone->cond == BLK_ZONE_COND_EMPTY)
+ goto unlock;
+
+ if (vfs_truncate(&zone->file->f_path, 0)) {
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+ clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_reset_all_zones(struct zloop_device *zlo)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = zlo->nr_conv_zones; i < zlo->nr_zones; i++) {
+ ret = zloop_reset_zone(zlo, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) &&
+ zone->cond == BLK_ZONE_COND_FULL)
+ goto unlock;
+
+ if (vfs_truncate(&zone->file->f_path, zlo->zone_size << SECTOR_SHIFT)) {
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zlo->zone_size;
+ clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+
+ unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static void zloop_put_cmd(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+
+ if (!atomic_dec_and_test(&cmd->ref))
+ return;
+ kfree(cmd->bvec);
+ cmd->bvec = NULL;
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
+}
+
+static void zloop_rw_complete(struct kiocb *iocb, long ret)
+{
+ struct zloop_cmd *cmd = container_of(iocb, struct zloop_cmd, iocb);
+
+ cmd->ret = ret;
+ zloop_put_cmd(cmd);
+}
+
+static void zloop_rw(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ struct zloop_device *zlo = rq->q->queuedata;
+ unsigned int zone_no = rq_zone_no(rq);
+ sector_t sector = blk_rq_pos(rq);
+ sector_t nr_sectors = blk_rq_sectors(rq);
+ bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND;
+ bool is_write = req_op(rq) == REQ_OP_WRITE || is_append;
+ int rw = is_write ? ITER_SOURCE : ITER_DEST;
+ struct req_iterator rq_iter;
+ struct zloop_zone *zone;
+ struct iov_iter iter;
+ struct bio_vec tmp;
+ sector_t zone_end;
+ int nr_bvec = 0;
+ int ret;
+
+ atomic_set(&cmd->ref, 2);
+ cmd->sector = sector;
+ cmd->nr_sectors = nr_sectors;
+ cmd->ret = 0;
+
+ /* We should never get an I/O beyond the device capacity. */
+ if (WARN_ON_ONCE(zone_no >= zlo->nr_zones)) {
+ ret = -EIO;
+ goto out;
+ }
+ zone = &zlo->zones[zone_no];
+ zone_end = zone->start + zlo->zone_capacity;
+
+ /*
+ * The block layer should never send requests that are not fully
+ * contained within the zone.
+ */
+ if (WARN_ON_ONCE(sector + nr_sectors > zone->start + zlo->zone_size)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ mutex_lock(&zone->lock);
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ mutex_unlock(&zone->lock);
+ if (ret)
+ goto out;
+ }
+
+ if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) {
+ mutex_lock(&zone->lock);
+
+ if (is_append) {
+ sector = zone->wp;
+ cmd->sector = sector;
+ }
+
+ /*
+ * Write operations must be aligned to the write pointer and
+ * fully contained within the zone capacity.
+ */
+ if (sector != zone->wp || zone->wp + nr_sectors > zone_end) {
+ pr_err("Zone %u: unaligned write: sect %llu, wp %llu\n",
+ zone_no, sector, zone->wp);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* Implicitly open the target zone. */
+ if (zone->cond == BLK_ZONE_COND_CLOSED ||
+ zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
+
+ /*
+ * Advance the write pointer of sequential zones. If the write
+ * fails, the wp position will be corrected when the next I/O
+ * copmpletes.
+ */
+ zone->wp += nr_sectors;
+ if (zone->wp == zone_end)
+ zone->cond = BLK_ZONE_COND_FULL;
+ }
+
+ rq_for_each_bvec(tmp, rq, rq_iter)
+ nr_bvec++;
+
+ if (rq->bio != rq->biotail) {
+ struct bio_vec *bvec;
+
+ cmd->bvec = kmalloc_array(nr_bvec, sizeof(*cmd->bvec), GFP_NOIO);
+ if (!cmd->bvec) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /*
+ * The bios of the request may be started from the middle of
+ * the 'bvec' because of bio splitting, so we can't directly
+ * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
+ * API will take care of all details for us.
+ */
+ bvec = cmd->bvec;
+ rq_for_each_bvec(tmp, rq, rq_iter) {
+ *bvec = tmp;
+ bvec++;
+ }
+ iov_iter_bvec(&iter, rw, cmd->bvec, nr_bvec, blk_rq_bytes(rq));
+ } else {
+ /*
+ * Same here, this bio may be started from the middle of the
+ * 'bvec' because of bio splitting, so offset from the bvec
+ * must be passed to iov iterator
+ */
+ iov_iter_bvec(&iter, rw,
+ __bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter),
+ nr_bvec, blk_rq_bytes(rq));
+ iter.iov_offset = rq->bio->bi_iter.bi_bvec_done;
+ }
+
+ cmd->iocb.ki_pos = (sector - zone->start) << SECTOR_SHIFT;
+ cmd->iocb.ki_filp = zone->file;
+ cmd->iocb.ki_complete = zloop_rw_complete;
+ if (!zlo->buffered_io)
+ cmd->iocb.ki_flags = IOCB_DIRECT;
+ cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+
+ if (rw == ITER_SOURCE)
+ ret = zone->file->f_op->write_iter(&cmd->iocb, &iter);
+ else
+ ret = zone->file->f_op->read_iter(&cmd->iocb, &iter);
+unlock:
+ if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write)
+ mutex_unlock(&zone->lock);
+out:
+ if (ret != -EIOCBQUEUED)
+ zloop_rw_complete(&cmd->iocb, ret);
+ zloop_put_cmd(cmd);
+}
+
+static void zloop_handle_cmd(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ switch (req_op(rq)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ case REQ_OP_ZONE_APPEND:
+ /*
+ * zloop_rw() always executes asynchronously or completes
+ * directly.
+ */
+ zloop_rw(cmd);
+ return;
+ case REQ_OP_FLUSH:
+ /*
+ * Sync the entire FS containing the zone files instead of
+ * walking all files
+ */
+ cmd->ret = sync_filesystem(file_inode(zlo->data_dir)->i_sb);
+ break;
+ case REQ_OP_ZONE_RESET:
+ cmd->ret = zloop_reset_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_RESET_ALL:
+ cmd->ret = zloop_reset_all_zones(zlo);
+ break;
+ case REQ_OP_ZONE_FINISH:
+ cmd->ret = zloop_finish_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_OPEN:
+ cmd->ret = zloop_open_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ cmd->ret = zloop_close_zone(zlo, rq_zone_no(rq));
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ pr_err("Unsupported operation %d\n", req_op(rq));
+ cmd->ret = -EOPNOTSUPP;
+ break;
+ }
+
+ blk_mq_complete_request(rq);
+}
+
+static void zloop_cmd_workfn(struct work_struct *work)
+{
+ struct zloop_cmd *cmd = container_of(work, struct zloop_cmd, work);
+ int orig_flags = current->flags;
+
+ current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
+ zloop_handle_cmd(cmd);
+ current->flags = orig_flags;
+}
+
+static void zloop_complete_rq(struct request *rq)
+{
+ struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct zloop_device *zlo = rq->q->queuedata;
+ unsigned int zone_no = cmd->sector >> zlo->zone_shift;
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ blk_status_t sts = BLK_STS_OK;
+
+ switch (req_op(rq)) {
+ case REQ_OP_READ:
+ if (cmd->ret < 0)
+ pr_err("Zone %u: failed read sector %llu, %llu sectors\n",
+ zone_no, cmd->sector, cmd->nr_sectors);
+
+ if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) {
+ /* short read */
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ zero_fill_bio(bio);
+ }
+ break;
+ case REQ_OP_WRITE:
+ case REQ_OP_ZONE_APPEND:
+ if (cmd->ret < 0)
+ pr_err("Zone %u: failed %swrite sector %llu, %llu sectors\n",
+ zone_no,
+ req_op(rq) == REQ_OP_WRITE ? "" : "append ",
+ cmd->sector, cmd->nr_sectors);
+
+ if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) {
+ pr_err("Zone %u: partial write %ld/%u B\n",
+ zone_no, cmd->ret, blk_rq_bytes(rq));
+ cmd->ret = -EIO;
+ }
+
+ if (cmd->ret < 0 && !test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
+ /*
+ * A write to a sequential zone file failed: mark the
+ * zone as having an error. This will be corrected and
+ * cleared when the next IO is submitted.
+ */
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ break;
+ }
+ if (req_op(rq) == REQ_OP_ZONE_APPEND)
+ rq->__sector = cmd->sector;
+
+ break;
+ default:
+ break;
+ }
+
+ if (cmd->ret < 0)
+ sts = errno_to_blk_status(cmd->ret);
+ blk_mq_end_request(rq, sts);
+}
+
+static blk_status_t zloop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *rq = bd->rq;
+ struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ if (zlo->state == Zlo_deleting)
+ return BLK_STS_IOERR;
+
+ blk_mq_start_request(rq);
+
+ INIT_WORK(&cmd->work, zloop_cmd_workfn);
+ queue_work(zlo->workqueue, &cmd->work);
+
+ return BLK_STS_OK;
+}
+
+static const struct blk_mq_ops zloop_mq_ops = {
+ .queue_rq = zloop_queue_rq,
+ .complete = zloop_complete_rq,
+};
+
+static int zloop_open(struct gendisk *disk, blk_mode_t mode)
+{
+ struct zloop_device *zlo = disk->private_data;
+ int ret;
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ if (zlo->state != Zlo_live)
+ ret = -ENXIO;
+ mutex_unlock(&zloop_ctl_mutex);
+ return ret;
+}
+
+static int zloop_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct zloop_device *zlo = disk->private_data;
+ struct blk_zone blkz = {};
+ unsigned int first, i;
+ int ret;
+
+ first = disk_zone_no(disk, sector);
+ if (first >= zlo->nr_zones)
+ return 0;
+ nr_zones = min(nr_zones, zlo->nr_zones - first);
+
+ for (i = 0; i < nr_zones; i++) {
+ unsigned int zone_no = first + i;
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret) {
+ mutex_unlock(&zone->lock);
+ return ret;
+ }
+ }
+
+ blkz.start = zone->start;
+ blkz.len = zlo->zone_size;
+ blkz.wp = zone->wp;
+ blkz.cond = zone->cond;
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
+ blkz.type = BLK_ZONE_TYPE_CONVENTIONAL;
+ blkz.capacity = zlo->zone_size;
+ } else {
+ blkz.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ blkz.capacity = zlo->zone_capacity;
+ }
+
+ mutex_unlock(&zone->lock);
+
+ ret = cb(&blkz, i, data);
+ if (ret)
+ return ret;
+ }
+
+ return nr_zones;
+}
+
+static void zloop_free_disk(struct gendisk *disk)
+{
+ struct zloop_device *zlo = disk->private_data;
+ unsigned int i;
+
+ for (i = 0; i < zlo->nr_zones; i++) {
+ struct zloop_zone *zone = &zlo->zones[i];
+
+ mapping_set_gfp_mask(zone->file->f_mapping,
+ zone->old_gfp_mask);
+ fput(zone->file);
+ }
+
+ fput(zlo->data_dir);
+ destroy_workqueue(zlo->workqueue);
+ kfree(zlo->base_dir);
+ kvfree(zlo);
+}
+
+static const struct block_device_operations zloop_fops = {
+ .owner = THIS_MODULE,
+ .open = zloop_open,
+ .report_zones = zloop_report_zones,
+ .free_disk = zloop_free_disk,
+};
+
+__printf(3, 4)
+static struct file *zloop_filp_open_fmt(int oflags, umode_t mode,
+ const char *fmt, ...)
+{
+ struct file *file;
+ va_list ap;
+ char *p;
+
+ va_start(ap, fmt);
+ p = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ file = filp_open(p, oflags, mode);
+ kfree(p);
+ return file;
+}
+
+static int zloop_get_block_size(struct zloop_device *zlo,
+ struct zloop_zone *zone)
+{
+ struct block_device *sb_bdev = zone->file->f_mapping->host->i_sb->s_bdev;
+ struct kstat st;
+
+ /*
+ * If the FS block size is lower than or equal to 4K, use that as the
+ * device block size. Otherwise, fallback to the FS direct IO alignment
+ * constraint if that is provided, and to the FS underlying device
+ * physical block size if the direct IO alignment is unknown.
+ */
+ if (file_inode(zone->file)->i_sb->s_blocksize <= SZ_4K)
+ zlo->block_size = file_inode(zone->file)->i_sb->s_blocksize;
+ else if (!vfs_getattr(&zone->file->f_path, &st, STATX_DIOALIGN, 0) &&
+ (st.result_mask & STATX_DIOALIGN))
+ zlo->block_size = st.dio_offset_align;
+ else if (sb_bdev)
+ zlo->block_size = bdev_physical_block_size(sb_bdev);
+ else
+ zlo->block_size = SECTOR_SIZE;
+
+ if (zlo->zone_capacity & ((zlo->block_size >> SECTOR_SHIFT) - 1)) {
+ pr_err("Zone capacity is not aligned to block size %u\n",
+ zlo->block_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int zloop_init_zone(struct zloop_device *zlo, struct zloop_options *opts,
+ unsigned int zone_no, bool restore)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int oflags = O_RDWR;
+ struct kstat stat;
+ sector_t file_sectors;
+ int ret;
+
+ mutex_init(&zone->lock);
+ zone->start = (sector_t)zone_no << zlo->zone_shift;
+
+ if (!restore)
+ oflags |= O_CREAT;
+
+ if (!opts->buffered_io)
+ oflags |= O_DIRECT;
+
+ if (zone_no < zlo->nr_conv_zones) {
+ /* Conventional zone file. */
+ set_bit(ZLOOP_ZONE_CONV, &zone->flags);
+ zone->cond = BLK_ZONE_COND_NOT_WP;
+ zone->wp = U64_MAX;
+
+ zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/cnv-%06u",
+ zlo->base_dir, zlo->id, zone_no);
+ if (IS_ERR(zone->file)) {
+ pr_err("Failed to open zone %u file %s/%u/cnv-%06u (err=%ld)",
+ zone_no, zlo->base_dir, zlo->id, zone_no,
+ PTR_ERR(zone->file));
+ return PTR_ERR(zone->file);
+ }
+
+ if (!zlo->block_size) {
+ ret = zloop_get_block_size(zlo, zone);
+ if (ret)
+ return ret;
+ }
+
+ ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0);
+ if (ret < 0) {
+ pr_err("Failed to get zone %u file stat\n", zone_no);
+ return ret;
+ }
+ file_sectors = stat.size >> SECTOR_SHIFT;
+
+ if (restore && file_sectors != zlo->zone_size) {
+ pr_err("Invalid conventional zone %u file size (%llu sectors != %llu)\n",
+ zone_no, file_sectors, zlo->zone_capacity);
+ return ret;
+ }
+
+ ret = vfs_truncate(&zone->file->f_path,
+ zlo->zone_size << SECTOR_SHIFT);
+ if (ret < 0) {
+ pr_err("Failed to truncate zone %u file (err=%d)\n",
+ zone_no, ret);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* Sequential zone file. */
+ zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/seq-%06u",
+ zlo->base_dir, zlo->id, zone_no);
+ if (IS_ERR(zone->file)) {
+ pr_err("Failed to open zone %u file %s/%u/seq-%06u (err=%ld)",
+ zone_no, zlo->base_dir, zlo->id, zone_no,
+ PTR_ERR(zone->file));
+ return PTR_ERR(zone->file);
+ }
+
+ if (!zlo->block_size) {
+ ret = zloop_get_block_size(zlo, zone);
+ if (ret)
+ return ret;
+ }
+
+ zloop_get_block_size(zlo, zone);
+
+ mutex_lock(&zone->lock);
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static bool zloop_dev_exists(struct zloop_device *zlo)
+{
+ struct file *cnv, *seq;
+ bool exists;
+
+ cnv = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/cnv-%06u",
+ zlo->base_dir, zlo->id, 0);
+ seq = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/seq-%06u",
+ zlo->base_dir, zlo->id, 0);
+ exists = !IS_ERR(cnv) || !IS_ERR(seq);
+
+ if (!IS_ERR(cnv))
+ fput(cnv);
+ if (!IS_ERR(seq))
+ fput(seq);
+
+ return exists;
+}
+
+static int zloop_ctl_add(struct zloop_options *opts)
+{
+ struct queue_limits lim = {
+ .max_hw_sectors = SZ_1M >> SECTOR_SHIFT,
+ .max_hw_zone_append_sectors = SZ_1M >> SECTOR_SHIFT,
+ .chunk_sectors = opts->zone_size,
+ .features = BLK_FEAT_ZONED,
+ };
+ unsigned int nr_zones, i, j;
+ struct zloop_device *zlo;
+ int ret = -EINVAL;
+ bool restore;
+
+ __module_get(THIS_MODULE);
+
+ nr_zones = opts->capacity >> ilog2(opts->zone_size);
+ if (opts->nr_conv_zones >= nr_zones) {
+ pr_err("Invalid number of conventional zones %u\n",
+ opts->nr_conv_zones);
+ goto out;
+ }
+
+ zlo = kvzalloc(struct_size(zlo, zones, nr_zones), GFP_KERNEL);
+ if (!zlo) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ zlo->state = Zlo_creating;
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ goto out_free_dev;
+
+ /* Allocate id, if @opts->id >= 0, we're requesting that specific id */
+ if (opts->id >= 0) {
+ ret = idr_alloc(&zloop_index_idr, zlo,
+ opts->id, opts->id + 1, GFP_KERNEL);
+ if (ret == -ENOSPC)
+ ret = -EEXIST;
+ } else {
+ ret = idr_alloc(&zloop_index_idr, zlo, 0, 0, GFP_KERNEL);
+ }
+ mutex_unlock(&zloop_ctl_mutex);
+ if (ret < 0)
+ goto out_free_dev;
+
+ zlo->id = ret;
+ zlo->zone_shift = ilog2(opts->zone_size);
+ zlo->zone_size = opts->zone_size;
+ if (opts->zone_capacity)
+ zlo->zone_capacity = opts->zone_capacity;
+ else
+ zlo->zone_capacity = zlo->zone_size;
+ zlo->nr_zones = nr_zones;
+ zlo->nr_conv_zones = opts->nr_conv_zones;
+ zlo->buffered_io = opts->buffered_io;
+
+ zlo->workqueue = alloc_workqueue("zloop%d", WQ_UNBOUND | WQ_FREEZABLE,
+ opts->nr_queues * opts->queue_depth, zlo->id);
+ if (!zlo->workqueue) {
+ ret = -ENOMEM;
+ goto out_free_idr;
+ }
+
+ if (opts->base_dir)
+ zlo->base_dir = kstrdup(opts->base_dir, GFP_KERNEL);
+ else
+ zlo->base_dir = kstrdup(ZLOOP_DEF_BASE_DIR, GFP_KERNEL);
+ if (!zlo->base_dir) {
+ ret = -ENOMEM;
+ goto out_destroy_workqueue;
+ }
+
+ zlo->data_dir = zloop_filp_open_fmt(O_RDONLY | O_DIRECTORY, 0, "%s/%u",
+ zlo->base_dir, zlo->id);
+ if (IS_ERR(zlo->data_dir)) {
+ ret = PTR_ERR(zlo->data_dir);
+ pr_warn("Failed to open directory %s/%u (err=%d)\n",
+ zlo->base_dir, zlo->id, ret);
+ goto out_free_base_dir;
+ }
+
+ /*
+ * If we already have zone files, we are restoring a device created by a
+ * previous add operation. In this case, zloop_init_zone() will check
+ * that the zone files are consistent with the zone configuration given.
+ */
+ restore = zloop_dev_exists(zlo);
+ for (i = 0; i < nr_zones; i++) {
+ ret = zloop_init_zone(zlo, opts, i, restore);
+ if (ret)
+ goto out_close_files;
+ }
+
+ lim.physical_block_size = zlo->block_size;
+ lim.logical_block_size = zlo->block_size;
+
+ zlo->tag_set.ops = &zloop_mq_ops;
+ zlo->tag_set.nr_hw_queues = opts->nr_queues;
+ zlo->tag_set.queue_depth = opts->queue_depth;
+ zlo->tag_set.numa_node = NUMA_NO_NODE;
+ zlo->tag_set.cmd_size = sizeof(struct zloop_cmd);
+ zlo->tag_set.driver_data = zlo;
+
+ ret = blk_mq_alloc_tag_set(&zlo->tag_set);
+ if (ret) {
+ pr_err("blk_mq_alloc_tag_set failed (err=%d)\n", ret);
+ goto out_close_files;
+ }
+
+ zlo->disk = blk_mq_alloc_disk(&zlo->tag_set, &lim, zlo);
+ if (IS_ERR(zlo->disk)) {
+ pr_err("blk_mq_alloc_disk failed (err=%d)\n", ret);
+ ret = PTR_ERR(zlo->disk);
+ goto out_cleanup_tags;
+ }
+ zlo->disk->flags = GENHD_FL_NO_PART;
+ zlo->disk->fops = &zloop_fops;
+ zlo->disk->private_data = zlo;
+ sprintf(zlo->disk->disk_name, "zloop%d", zlo->id);
+ set_capacity(zlo->disk, (u64)lim.chunk_sectors * zlo->nr_zones);
+
+ ret = blk_revalidate_disk_zones(zlo->disk);
+ if (ret)
+ goto out_cleanup_disk;
+
+ ret = add_disk(zlo->disk);
+ if (ret) {
+ pr_err("add_disk failed (err=%d)\n", ret);
+ goto out_cleanup_disk;
+ }
+
+ mutex_lock(&zloop_ctl_mutex);
+ zlo->state = Zlo_live;
+ mutex_unlock(&zloop_ctl_mutex);
+
+ pr_info("Added device %d: %u zones of %llu MB, %u B block size\n",
+ zlo->id, zlo->nr_zones,
+ ((sector_t)zlo->zone_size << SECTOR_SHIFT) >> 20,
+ zlo->block_size);
+
+ return 0;
+
+out_cleanup_disk:
+ put_disk(zlo->disk);
+out_cleanup_tags:
+ blk_mq_free_tag_set(&zlo->tag_set);
+out_close_files:
+ for (j = 0; j < i; j++) {
+ struct zloop_zone *zone = &zlo->zones[j];
+
+ if (!IS_ERR_OR_NULL(zone->file))
+ fput(zone->file);
+ }
+ fput(zlo->data_dir);
+out_free_base_dir:
+ kfree(zlo->base_dir);
+out_destroy_workqueue:
+ destroy_workqueue(zlo->workqueue);
+out_free_idr:
+ mutex_lock(&zloop_ctl_mutex);
+ idr_remove(&zloop_index_idr, zlo->id);
+ mutex_unlock(&zloop_ctl_mutex);
+out_free_dev:
+ kvfree(zlo);
+out:
+ module_put(THIS_MODULE);
+ if (ret == -ENOENT)
+ ret = -EINVAL;
+ return ret;
+}
+
+static int zloop_ctl_remove(struct zloop_options *opts)
+{
+ struct zloop_device *zlo;
+ int ret;
+
+ if (!(opts->mask & ZLOOP_OPT_ID)) {
+ pr_err("No ID specified\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ zlo = idr_find(&zloop_index_idr, opts->id);
+ if (!zlo || zlo->state == Zlo_creating) {
+ ret = -ENODEV;
+ } else if (zlo->state == Zlo_deleting) {
+ ret = -EINVAL;
+ } else {
+ idr_remove(&zloop_index_idr, zlo->id);
+ zlo->state = Zlo_deleting;
+ }
+
+ mutex_unlock(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ del_gendisk(zlo->disk);
+ put_disk(zlo->disk);
+ blk_mq_free_tag_set(&zlo->tag_set);
+
+ pr_info("Removed device %d\n", opts->id);
+
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+static int zloop_parse_options(struct zloop_options *opts, const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ unsigned int token;
+ int ret = 0;
+
+ /* Set defaults. */
+ opts->mask = 0;
+ opts->id = ZLOOP_DEF_ID;
+ opts->capacity = ZLOOP_DEF_ZONE_SIZE * ZLOOP_DEF_NR_ZONES;
+ opts->zone_size = ZLOOP_DEF_ZONE_SIZE;
+ opts->nr_conv_zones = ZLOOP_DEF_NR_CONV_ZONES;
+ opts->nr_queues = ZLOOP_DEF_NR_QUEUES;
+ opts->queue_depth = ZLOOP_DEF_QUEUE_DEPTH;
+ opts->buffered_io = ZLOOP_DEF_BUFFERED_IO;
+
+ if (!buf)
+ return 0;
+
+ /* Skip leading spaces before the options. */
+ while (isspace(*buf))
+ buf++;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ /* Parse the options, doing only some light invalid value checks. */
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, zloop_opt_tokens, args);
+ opts->mask |= token;
+ switch (token) {
+ case ZLOOP_OPT_ID:
+ if (match_int(args, &opts->id)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case ZLOOP_OPT_CAPACITY:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid capacity\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->capacity =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_ZONE_SIZE:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token || token > ZLOOP_MAX_ZONE_SIZE_MB ||
+ !is_power_of_2(token)) {
+ pr_err("Invalid zone size %u\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->zone_size =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_ZONE_CAPACITY:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid zone capacity\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->zone_capacity =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_NR_CONV_ZONES:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_conv_zones = token;
+ break;
+ case ZLOOP_OPT_BASE_DIR:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->base_dir);
+ opts->base_dir = p;
+ break;
+ case ZLOOP_OPT_NR_QUEUES:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid number of queues\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_queues = min(token, num_online_cpus());
+ break;
+ case ZLOOP_OPT_QUEUE_DEPTH:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid queue depth\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->queue_depth = token;
+ break;
+ case ZLOOP_OPT_BUFFERED_IO:
+ opts->buffered_io = true;
+ break;
+ case ZLOOP_OPT_ERR:
+ default:
+ pr_warn("unknown parameter or missing value '%s'\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ ret = -EINVAL;
+ if (opts->capacity <= opts->zone_size) {
+ pr_err("Invalid capacity\n");
+ goto out;
+ }
+
+ if (opts->zone_capacity > opts->zone_size) {
+ pr_err("Invalid zone capacity\n");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ kfree(options);
+ return ret;
+}
+
+enum {
+ ZLOOP_CTL_ADD,
+ ZLOOP_CTL_REMOVE,
+};
+
+static struct zloop_ctl_op {
+ int code;
+ const char *name;
+} zloop_ctl_ops[] = {
+ { ZLOOP_CTL_ADD, "add" },
+ { ZLOOP_CTL_REMOVE, "remove" },
+ { -1, NULL },
+};
+
+static ssize_t zloop_ctl_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ struct zloop_options opts = { };
+ struct zloop_ctl_op *op;
+ const char *buf, *opts_buf;
+ int i, ret;
+
+ if (count > PAGE_SIZE)
+ return -ENOMEM;
+
+ buf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ for (i = 0; i < ARRAY_SIZE(zloop_ctl_ops); i++) {
+ op = &zloop_ctl_ops[i];
+ if (!op->name) {
+ pr_err("Invalid operation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!strncmp(buf, op->name, strlen(op->name)))
+ break;
+ }
+
+ if (count <= strlen(op->name))
+ opts_buf = NULL;
+ else
+ opts_buf = buf + strlen(op->name);
+
+ ret = zloop_parse_options(&opts, opts_buf);
+ if (ret) {
+ pr_err("Failed to parse options\n");
+ goto out;
+ }
+
+ switch (op->code) {
+ case ZLOOP_CTL_ADD:
+ ret = zloop_ctl_add(&opts);
+ break;
+ case ZLOOP_CTL_REMOVE:
+ ret = zloop_ctl_remove(&opts);
+ break;
+ default:
+ pr_err("Invalid operation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(opts.base_dir);
+ kfree(buf);
+ return ret ? ret : count;
+}
+
+static int zloop_ctl_show(struct seq_file *seq_file, void *private)
+{
+ const struct match_token *tok;
+ int i;
+
+ /* Add operation */
+ seq_printf(seq_file, "%s ", zloop_ctl_ops[0].name);
+ for (i = 0; i < ARRAY_SIZE(zloop_opt_tokens); i++) {
+ tok = &zloop_opt_tokens[i];
+ if (!tok->pattern)
+ break;
+ if (i)
+ seq_putc(seq_file, ',');
+ seq_puts(seq_file, tok->pattern);
+ }
+ seq_putc(seq_file, '\n');
+
+ /* Remove operation */
+ seq_puts(seq_file, zloop_ctl_ops[1].name);
+ seq_puts(seq_file, " id=%d\n");
+
+ return 0;
+}
+
+static int zloop_ctl_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return single_open(file, zloop_ctl_show, NULL);
+}
+
+static int zloop_ctl_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations zloop_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = zloop_ctl_open,
+ .release = zloop_ctl_release,
+ .write = zloop_ctl_write,
+ .read = seq_read,
+};
+
+static struct miscdevice zloop_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "zloop-control",
+ .fops = &zloop_ctl_fops,
+};
+
+static int __init zloop_init(void)
+{
+ int ret;
+
+ ret = misc_register(&zloop_misc);
+ if (ret) {
+ pr_err("Failed to register misc device: %d\n", ret);
+ return ret;
+ }
+ pr_info("Module loaded\n");
+
+ return 0;
+}
+
+static void __exit zloop_exit(void)
+{
+ misc_deregister(&zloop_misc);
+ idr_destroy(&zloop_index_idr);
+}
+
+module_init(zloop_init);
+module_exit(zloop_exit);
+
+MODULE_DESCRIPTION("Zoned loopback device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/zram/backend_deflate.c b/drivers/block/zram/backend_deflate.c
index 0f7f252c12f4..b75016e0e654 100644
--- a/drivers/block/zram/backend_deflate.c
+++ b/drivers/block/zram/backend_deflate.c
@@ -8,7 +8,7 @@
#include "backend_deflate.h"
/* Use the same value as crypto API */
-#define DEFLATE_DEF_WINBITS 11
+#define DEFLATE_DEF_WINBITS (-11)
#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
struct deflate_ctx {
@@ -22,8 +22,10 @@ static void deflate_release_params(struct zcomp_params *params)
static int deflate_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = Z_DEFAULT_COMPRESSION;
+ if (params->deflate.winbits == ZCOMP_PARAM_NOT_SET)
+ params->deflate.winbits = DEFLATE_DEF_WINBITS;
return 0;
}
@@ -57,13 +59,13 @@ static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
return -ENOMEM;
ctx->context = zctx;
- sz = zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL);
+ sz = zlib_deflate_workspacesize(params->deflate.winbits, MAX_MEM_LEVEL);
zctx->cctx.workspace = vzalloc(sz);
if (!zctx->cctx.workspace)
goto error;
ret = zlib_deflateInit2(&zctx->cctx, params->level, Z_DEFLATED,
- -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ params->deflate.winbits, DEFLATE_DEF_MEMLEVEL,
Z_DEFAULT_STRATEGY);
if (ret != Z_OK)
goto error;
@@ -73,7 +75,7 @@ static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
if (!zctx->dctx.workspace)
goto error;
- ret = zlib_inflateInit2(&zctx->dctx, -DEFLATE_DEF_WINBITS);
+ ret = zlib_inflateInit2(&zctx->dctx, params->deflate.winbits);
if (ret != Z_OK)
goto error;
diff --git a/drivers/block/zram/backend_lz4.c b/drivers/block/zram/backend_lz4.c
index 847f3334eb38..daccd60857eb 100644
--- a/drivers/block/zram/backend_lz4.c
+++ b/drivers/block/zram/backend_lz4.c
@@ -18,7 +18,7 @@ static void lz4_release_params(struct zcomp_params *params)
static int lz4_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = LZ4_ACCELERATION_DEFAULT;
return 0;
diff --git a/drivers/block/zram/backend_lz4hc.c b/drivers/block/zram/backend_lz4hc.c
index 5f37d5abcaeb..9e8a35dfa56d 100644
--- a/drivers/block/zram/backend_lz4hc.c
+++ b/drivers/block/zram/backend_lz4hc.c
@@ -18,7 +18,7 @@ static void lz4hc_release_params(struct zcomp_params *params)
static int lz4hc_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = LZ4HC_DEFAULT_CLEVEL;
return 0;
diff --git a/drivers/block/zram/backend_zstd.c b/drivers/block/zram/backend_zstd.c
index 22c8067536f3..81defb98ed09 100644
--- a/drivers/block/zram/backend_zstd.c
+++ b/drivers/block/zram/backend_zstd.c
@@ -58,7 +58,7 @@ static int zstd_setup_params(struct zcomp_params *params)
return -ENOMEM;
params->drv_data = zp;
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = zstd_default_clevel();
zp->cprm = zstd_get_params(params->level, PAGE_SIZE);
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 25339ed1e07e..4acffe671a5e 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -5,7 +5,11 @@
#include <linux/mutex.h>
-#define ZCOMP_PARAM_NO_LEVEL INT_MIN
+#define ZCOMP_PARAM_NOT_SET INT_MIN
+
+struct deflate_params {
+ s32 winbits;
+};
/*
* Immutable driver (backend) parameters. The driver may attach private
@@ -17,6 +21,9 @@ struct zcomp_params {
void *dict;
size_t dict_sz;
s32 level;
+ union {
+ struct deflate_params deflate;
+ };
void *drv_data;
};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fda7d8624889..54c57103715f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -734,114 +734,19 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
submit_bio(bio);
}
-#define PAGE_WB_SIG "page_index="
-
-#define PAGE_WRITEBACK 0
-#define HUGE_WRITEBACK (1<<0)
-#define IDLE_WRITEBACK (1<<1)
-#define INCOMPRESSIBLE_WRITEBACK (1<<2)
-
-static int scan_slots_for_writeback(struct zram *zram, u32 mode,
- unsigned long nr_pages,
- unsigned long index,
- struct zram_pp_ctl *ctl)
-{
- for (; nr_pages != 0; index++, nr_pages--) {
- bool ok = true;
-
- zram_slot_lock(zram, index);
- if (!zram_allocated(zram, index))
- goto next;
-
- if (zram_test_flag(zram, index, ZRAM_WB) ||
- zram_test_flag(zram, index, ZRAM_SAME))
- goto next;
-
- if (mode & IDLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_IDLE))
- goto next;
- if (mode & HUGE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_HUGE))
- goto next;
- if (mode & INCOMPRESSIBLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
- goto next;
-
- ok = place_pp_slot(zram, ctl, index);
-next:
- zram_slot_unlock(zram, index);
- if (!ok)
- break;
- }
-
- return 0;
-}
-
-static ssize_t writeback_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
{
- struct zram *zram = dev_to_zram(dev);
- unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
- struct zram_pp_ctl *ctl = NULL;
+ unsigned long blk_idx = 0;
+ struct page *page = NULL;
struct zram_pp_slot *pps;
- unsigned long index = 0;
- struct bio bio;
struct bio_vec bio_vec;
- struct page *page = NULL;
- ssize_t ret = len;
- int mode, err;
- unsigned long blk_idx = 0;
-
- if (sysfs_streq(buf, "idle"))
- mode = IDLE_WRITEBACK;
- else if (sysfs_streq(buf, "huge"))
- mode = HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "huge_idle"))
- mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "incompressible"))
- mode = INCOMPRESSIBLE_WRITEBACK;
- else {
- if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
- return -EINVAL;
-
- if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
- index >= nr_pages)
- return -EINVAL;
-
- nr_pages = 1;
- mode = PAGE_WRITEBACK;
- }
-
- down_read(&zram->init_lock);
- if (!init_done(zram)) {
- ret = -EINVAL;
- goto release_init_lock;
- }
-
- /* Do not permit concurrent post-processing actions. */
- if (atomic_xchg(&zram->pp_in_progress, 1)) {
- up_read(&zram->init_lock);
- return -EAGAIN;
- }
-
- if (!zram->backing_dev) {
- ret = -ENODEV;
- goto release_init_lock;
- }
+ struct bio bio;
+ int ret = 0, err;
+ u32 index;
page = alloc_page(GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- goto release_init_lock;
- }
-
- ctl = init_pp_ctl();
- if (!ctl) {
- ret = -ENOMEM;
- goto release_init_lock;
- }
-
- scan_slots_for_writeback(zram, mode, nr_pages, index, ctl);
+ if (!page)
+ return -ENOMEM;
while ((pps = select_pp_slot(ctl))) {
spin_lock(&zram->wb_limit_lock);
@@ -929,10 +834,215 @@ next:
if (blk_idx)
free_block_bdev(zram, blk_idx);
-
-release_init_lock:
if (page)
__free_page(page);
+
+ return ret;
+}
+
+#define PAGE_WRITEBACK 0
+#define HUGE_WRITEBACK (1 << 0)
+#define IDLE_WRITEBACK (1 << 1)
+#define INCOMPRESSIBLE_WRITEBACK (1 << 2)
+
+static int parse_page_index(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ int ret;
+
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+ *hi = *lo + 1;
+ return 0;
+}
+
+static int parse_page_indexes(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ char *delim;
+ int ret;
+
+ delim = strchr(val, '-');
+ if (!delim)
+ return -EINVAL;
+
+ *delim = 0x00;
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+
+ ret = kstrtoul(delim + 1, 10, hi);
+ if (ret)
+ return ret;
+ if (*hi >= nr_pages || *lo > *hi)
+ return -ERANGE;
+ *hi += 1;
+ return 0;
+}
+
+static int parse_mode(char *val, u32 *mode)
+{
+ *mode = 0;
+
+ if (!strcmp(val, "idle"))
+ *mode = IDLE_WRITEBACK;
+ if (!strcmp(val, "huge"))
+ *mode = HUGE_WRITEBACK;
+ if (!strcmp(val, "huge_idle"))
+ *mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
+ if (!strcmp(val, "incompressible"))
+ *mode = INCOMPRESSIBLE_WRITEBACK;
+
+ if (*mode == 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int scan_slots_for_writeback(struct zram *zram, u32 mode,
+ unsigned long lo, unsigned long hi,
+ struct zram_pp_ctl *ctl)
+{
+ u32 index = lo;
+
+ while (index < hi) {
+ bool ok = true;
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ if (zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME))
+ goto next;
+
+ if (mode & IDLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_IDLE))
+ goto next;
+ if (mode & HUGE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_HUGE))
+ goto next;
+ if (mode & INCOMPRESSIBLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+ goto next;
+
+ ok = place_pp_slot(zram, ctl, index);
+next:
+ zram_slot_unlock(zram, index);
+ if (!ok)
+ break;
+ index++;
+ }
+
+ return 0;
+}
+
+static ssize_t writeback_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u64 nr_pages = zram->disksize >> PAGE_SHIFT;
+ unsigned long lo = 0, hi = nr_pages;
+ struct zram_pp_ctl *ctl = NULL;
+ char *args, *param, *val;
+ ssize_t ret = len;
+ int err, mode = 0;
+
+ down_read(&zram->init_lock);
+ if (!init_done(zram)) {
+ up_read(&zram->init_lock);
+ return -EINVAL;
+ }
+
+ /* Do not permit concurrent post-processing actions. */
+ if (atomic_xchg(&zram->pp_in_progress, 1)) {
+ up_read(&zram->init_lock);
+ return -EAGAIN;
+ }
+
+ if (!zram->backing_dev) {
+ ret = -ENODEV;
+ goto release_init_lock;
+ }
+
+ ctl = init_pp_ctl();
+ if (!ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ args = skip_spaces(buf);
+ while (*args) {
+ args = next_arg(args, &param, &val);
+
+ /*
+ * Workaround to support the old writeback interface.
+ *
+ * The old writeback interface has a minor inconsistency and
+ * requires key=value only for page_index parameter, while the
+ * writeback mode is a valueless parameter.
+ *
+ * This is not the case anymore and now all parameters are
+ * required to have values, however, we need to support the
+ * legacy writeback interface format so we check if we can
+ * recognize a valueless parameter as the (legacy) writeback
+ * mode.
+ */
+ if (!val || !*val) {
+ err = parse_mode(param, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ break;
+ }
+
+ if (!strcmp(param, "type")) {
+ err = parse_mode(val, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ break;
+ }
+
+ if (!strcmp(param, "page_index")) {
+ err = parse_page_index(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ continue;
+ }
+
+ if (!strcmp(param, "page_indexes")) {
+ err = parse_page_indexes(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ continue;
+ }
+ }
+
+ err = zram_writeback_slots(zram, ctl);
+ if (err)
+ ret = err;
+
+release_init_lock:
release_pp_ctl(zram, ctl);
atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
@@ -1166,13 +1276,15 @@ static void comp_params_reset(struct zram *zram, u32 prio)
struct zcomp_params *params = &zram->params[prio];
vfree(params->dict);
- params->level = ZCOMP_PARAM_NO_LEVEL;
+ params->level = ZCOMP_PARAM_NOT_SET;
+ params->deflate.winbits = ZCOMP_PARAM_NOT_SET;
params->dict_sz = 0;
params->dict = NULL;
}
static int comp_params_store(struct zram *zram, u32 prio, s32 level,
- const char *dict_path)
+ const char *dict_path,
+ struct deflate_params *deflate_params)
{
ssize_t sz = 0;
@@ -1190,6 +1302,7 @@ static int comp_params_store(struct zram *zram, u32 prio, s32 level,
zram->params[prio].dict_sz = sz;
zram->params[prio].level = level;
+ zram->params[prio].deflate.winbits = deflate_params->winbits;
return 0;
}
@@ -1198,11 +1311,14 @@ static ssize_t algorithm_params_store(struct device *dev,
const char *buf,
size_t len)
{
- s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NO_LEVEL;
+ s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NOT_SET;
char *args, *param, *val, *algo = NULL, *dict_path = NULL;
+ struct deflate_params deflate_params;
struct zram *zram = dev_to_zram(dev);
int ret;
+ deflate_params.winbits = ZCOMP_PARAM_NOT_SET;
+
args = skip_spaces(buf);
while (*args) {
args = next_arg(args, &param, &val);
@@ -1233,6 +1349,13 @@ static ssize_t algorithm_params_store(struct device *dev,
dict_path = val;
continue;
}
+
+ if (!strcmp(param, "deflate.winbits")) {
+ ret = kstrtoint(val, 10, &deflate_params.winbits);
+ if (ret)
+ return ret;
+ continue;
+ }
}
/* Lookup priority by algorithm name */
@@ -1254,7 +1377,7 @@ static ssize_t algorithm_params_store(struct device *dev,
if (prio < ZRAM_PRIMARY_COMP || prio >= ZRAM_MAX_COMPS)
return -EINVAL;
- ret = comp_params_store(zram, prio, level, dict_path);
+ ret = comp_params_store(zram, prio, level, dict_path, &deflate_params);
return ret ? ret : len;
}
@@ -1694,7 +1817,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
*/
handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle))
return PTR_ERR((void *)handle);
@@ -1761,7 +1884,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle)) {
zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle);
@@ -1981,10 +2104,15 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
* We are holding per-CPU stream mutex and entry lock so better
* avoid direct reclaim. Allocation error is not fatal since
* we still have the old object in the mem_pool.
+ *
+ * XXX: technically, the node we really want here is the node that holds
+ * the original compressed data. But that would require us to modify
+ * zsmalloc API to return this information. For now, we will make do with
+ * the node of the page allocated for recompression.
*/
handle_new = zs_malloc(zram->mem_pool, comp_len_new,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle_new)) {
zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle_new);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 7771edf54fb3..4ab32abf0f48 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -56,18 +56,6 @@ config BT_HCIBTUSB_POLL_SYNC
Say Y here to enable USB poll_sync for Bluetooth USB devices by
default.
-config BT_HCIBTUSB_AUTO_ISOC_ALT
- bool "Automatically adjust alternate setting for Isoc endpoints"
- depends on BT_HCIBTUSB
- default y if CHROME_PLATFORMS
- help
- Say Y here to automatically adjusting the alternate setting for
- HCI_USER_CHANNEL whenever a SCO link is established.
-
- When enabled, btusb intercepts the HCI_EV_SYNC_CONN_COMPLETE packets
- and configures isoc endpoint alternate setting automatically when
- HCI_USER_CHANNEL is in use.
-
config BT_HCIBTUSB_BCM
bool "Broadcom protocol support"
depends on BT_HCIBTUSB
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 48e2f400957b..55cc1652bfe4 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -2719,7 +2719,7 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var)
} __packed data;
efi_status_t status;
- unsigned long data_size = 0;
+ unsigned long data_size = sizeof(data);
efi_guid_t guid = EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, 0x8d, 0x03,
0x77, 0x2e, 0xcc, 0x3d, 0xa5, 0x31);
@@ -2730,15 +2730,9 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var)
return -EOPNOTSUPP;
status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
- NULL);
-
- if (status != EFI_BUFFER_TOO_SMALL || !data_size)
- return -EIO;
-
- status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
&data);
- if (status != EFI_SUCCESS)
+ if (status != EFI_SUCCESS || data_size != sizeof(data))
return -ENXIO;
*dsbr_var = data.dsbr;
@@ -3688,7 +3682,7 @@ int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name)
}
EXPORT_SYMBOL_GPL(btintel_configure_setup);
-int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
+static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
{
struct intel_tlv *tlv = (void *)&skb->data[5];
@@ -3716,7 +3710,6 @@ int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
recv_frame:
return hci_recv_frame(hdev, skb);
}
-EXPORT_SYMBOL_GPL(btintel_diagnostics);
int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 2aece3effa4e..1d12c4113c66 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -277,7 +277,6 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
int btintel_shutdown_combined(struct hci_dev *hdev);
void btintel_hw_error(struct hci_dev *hdev, u8 code);
void btintel_print_fseq_info(struct hci_dev *hdev);
-int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -411,9 +410,4 @@ static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
static inline void btintel_print_fseq_info(struct hci_dev *hdev)
{
}
-
-static inline int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
-{
- return -EOPNOTSUPP;
-}
#endif
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index c1e69fcc9c4f..50fe17f1e1d1 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -208,6 +208,96 @@ static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
memcpy(buf->data, skb->data, tfd->size);
}
+static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ u16 cr_hia, cr_tia;
+ u32 reg, mbox_reg;
+ struct sk_buff *skb;
+ u8 buf[80];
+
+ skb = alloc_skb(1024, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ snprintf(buf, sizeof(buf), "%s", "---- Dump of debug registers ---");
+ bt_dev_dbg(hdev, "%s", buf);
+ skb_put_data(skb, buf, strlen(buf));
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
+ snprintf(buf, sizeof(buf), "boot stage: 0x%8.8x", reg);
+ bt_dev_dbg(hdev, "%s", buf);
+ skb_put_data(skb, buf, strlen(buf));
+ data->boot_stage_cache = reg;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_STATUS_REG);
+ snprintf(buf, sizeof(buf), "ipc status: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_CONTROL_REG);
+ snprintf(buf, sizeof(buf), "ipc control: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG);
+ snprintf(buf, sizeof(buf), "ipc sleep control: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ /*Read the Mail box status and registers*/
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MBOX_STATUS_REG);
+ snprintf(buf, sizeof(buf), "mbox status: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_1_REG);
+ snprintf(buf, sizeof(buf), "mbox_1: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_2_REG);
+ snprintf(buf, sizeof(buf), "mbox_2: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_3_REG);
+ snprintf(buf, sizeof(buf), "mbox_3: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_4_REG);
+ snprintf(buf, sizeof(buf), "mbox_4: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
+ cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
+ snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
+ cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
+ snprintf(buf, sizeof(buf), "txq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ snprintf(buf, sizeof(buf), "--------------------------------");
+ bt_dev_dbg(hdev, "%s", buf);
+
+ hci_recv_diag(hdev, skb);
+}
+
static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
struct sk_buff *skb)
{
@@ -237,8 +327,11 @@ static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
/* Wait for the complete interrupt - URBD0 */
ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
- if (!ret)
+ if (!ret) {
+ bt_dev_err(data->hdev, "tx completion timeout");
+ btintel_pcie_dump_debug_registers(data->hdev);
return -ETIME;
+ }
return 0;
}
@@ -756,6 +849,26 @@ static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
return 0;
}
+static inline bool btintel_pcie_in_lockdown(struct btintel_pcie_data *data)
+{
+ return (data->boot_stage_cache &
+ BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN) ||
+ (data->boot_stage_cache &
+ BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN);
+}
+
+static inline bool btintel_pcie_in_error(struct btintel_pcie_data *data)
+{
+ return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) ||
+ (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER);
+}
+
+static void btintel_pcie_msix_gp1_handler(struct btintel_pcie_data *data)
+{
+ bt_dev_err(data->hdev, "Received gp1 mailbox interrupt");
+ btintel_pcie_dump_debug_registers(data->hdev);
+}
+
/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
* BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
*/
@@ -779,6 +892,18 @@ static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
if (reg != data->img_resp_cache)
data->img_resp_cache = reg;
+ if (btintel_pcie_in_error(data)) {
+ bt_dev_err(data->hdev, "Controller in error state");
+ btintel_pcie_dump_debug_registers(data->hdev);
+ return;
+ }
+
+ if (btintel_pcie_in_lockdown(data)) {
+ bt_dev_err(data->hdev, "Controller in lockdown state");
+ btintel_pcie_dump_debug_registers(data->hdev);
+ return;
+ }
+
data->gp0_received = true;
old_ctxt = data->alive_intr_ctxt;
@@ -889,7 +1014,6 @@ static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *)skb->data;
- const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 };
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
@@ -945,20 +1069,13 @@ static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
}
}
- /* Handle all diagnostics events separately. May still call
- * hci_recv_frame.
- */
- if (len >= sizeof(diagnostics_hdr) &&
- memcmp(&skb->data[2], diagnostics_hdr,
- sizeof(diagnostics_hdr)) == 0) {
- return btintel_diagnostics(hdev, skb);
- }
-
/* This is a debug event that comes from IML and OP image when it
* starts execution. There is no need pass this event to stack.
*/
- if (skb->data[2] == 0x97)
+ if (skb->data[2] == 0x97) {
+ hci_recv_diag(hdev, skb);
return 0;
+ }
}
return hci_recv_frame(hdev, skb);
@@ -974,7 +1091,6 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
u8 pkt_type;
u16 plen;
u32 pcie_pkt_type;
- struct sk_buff *new_skb;
void *pdata;
struct hci_dev *hdev = data->hdev;
@@ -1051,24 +1167,20 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
- new_skb = bt_skb_alloc(plen, GFP_ATOMIC);
- if (!new_skb) {
- bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u",
- skb->len);
- ret = -ENOMEM;
- goto exit_error;
- }
-
- hci_skb_pkt_type(new_skb) = pkt_type;
- skb_put_data(new_skb, skb->data, plen);
+ hci_skb_pkt_type(skb) = pkt_type;
hdev->stat.byte_rx += plen;
+ skb_trim(skb, plen);
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
- ret = btintel_pcie_recv_event(hdev, new_skb);
+ ret = btintel_pcie_recv_event(hdev, skb);
else
- ret = hci_recv_frame(hdev, new_skb);
+ ret = hci_recv_frame(hdev, skb);
+ skb = NULL; /* skb is freed in the callee */
exit_error:
+ if (skb)
+ kfree_skb(skb);
+
if (ret)
hdev->stat.err_rx++;
@@ -1202,8 +1314,6 @@ static void btintel_pcie_rx_work(struct work_struct *work)
struct btintel_pcie_data *data = container_of(work,
struct btintel_pcie_data, rx_work);
struct sk_buff *skb;
- int err;
- struct hci_dev *hdev = data->hdev;
if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
/* Unlike usb products, controller will not send hardware
@@ -1224,11 +1334,7 @@ static void btintel_pcie_rx_work(struct work_struct *work)
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
- err = btintel_pcie_recv_frame(data, skb);
- if (err)
- bt_dev_err(hdev, "Failed to send received frame: %d",
- err);
- kfree_skb(skb);
+ btintel_pcie_recv_frame(data, skb);
}
}
@@ -1281,10 +1387,8 @@ static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
/* Check CR_TIA and CR_HIA for change */
- if (cr_tia == cr_hia) {
- bt_dev_warn(hdev, "RXQ: no new CD found");
+ if (cr_tia == cr_hia)
return;
- }
rxq = &data->rxq;
@@ -1320,6 +1424,16 @@ static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
return IRQ_WAKE_THREAD;
}
+static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
+{
+ return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
+}
+
+static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
+{
+ return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
+}
+
static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
{
struct msix_entry *entry = dev_id;
@@ -1344,6 +1458,9 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
btintel_pcie_msix_hw_exp_handler(data);
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
+ btintel_pcie_msix_gp1_handler(data);
+
/* This interrupt is triggered by the firmware after updating
* boot_stage register and image_response register
*/
@@ -1351,12 +1468,18 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
btintel_pcie_msix_gp0_handler(data);
/* For TX */
- if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0)
+ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
btintel_pcie_msix_tx_handle(data);
+ if (!btintel_pcie_is_rxq_empty(data))
+ btintel_pcie_msix_rx_handle(data);
+ }
/* For RX */
- if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1)
+ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
btintel_pcie_msix_rx_handle(data);
+ if (!btintel_pcie_is_txackq_empty(data))
+ btintel_pcie_msix_tx_handle(data);
+ }
/*
* Before sending the interrupt the HW disables it to prevent a nested
@@ -2023,6 +2146,7 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
bt_dev_err(hdev, "Firmware download retry count: %d",
fw_dl_retry);
+ btintel_pcie_dump_debug_registers(hdev);
err = btintel_pcie_reset_bt(data);
if (err) {
bt_dev_err(hdev, "Failed to do shr reset: %d", err);
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index 873178019cad..21b964b15c1c 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -12,10 +12,17 @@
#define BTINTEL_PCIE_CSR_HW_REV_REG (BTINTEL_PCIE_CSR_BASE + 0x028)
#define BTINTEL_PCIE_CSR_RF_ID_REG (BTINTEL_PCIE_CSR_BASE + 0x09C)
#define BTINTEL_PCIE_CSR_BOOT_STAGE_REG (BTINTEL_PCIE_CSR_BASE + 0x108)
+#define BTINTEL_PCIE_CSR_IPC_CONTROL_REG (BTINTEL_PCIE_CSR_BASE + 0x10C)
+#define BTINTEL_PCIE_CSR_IPC_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x110)
#define BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG (BTINTEL_PCIE_CSR_BASE + 0x114)
#define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118)
#define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C)
#define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C)
+#define BTINTEL_PCIE_CSR_MBOX_1_REG (BTINTEL_PCIE_CSR_BASE + 0x170)
+#define BTINTEL_PCIE_CSR_MBOX_2_REG (BTINTEL_PCIE_CSR_BASE + 0x174)
+#define BTINTEL_PCIE_CSR_MBOX_3_REG (BTINTEL_PCIE_CSR_BASE + 0x178)
+#define BTINTEL_PCIE_CSR_MBOX_4_REG (BTINTEL_PCIE_CSR_BASE + 0x17C)
+#define BTINTEL_PCIE_CSR_MBOX_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x180)
#define BTINTEL_PCIE_PRPH_DEV_ADDR_REG (BTINTEL_PCIE_CSR_BASE + 0x440)
#define BTINTEL_PCIE_PRPH_DEV_RD_REG (BTINTEL_PCIE_CSR_BASE + 0x458)
#define BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR (BTINTEL_PCIE_CSR_BASE + 0x460)
@@ -41,6 +48,9 @@
#define BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW (BIT(2))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN (BIT(10))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN (BIT(11))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR (BIT(12))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER (BIT(13))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED (BIT(14))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_MAC_ACCESS_ON (BIT(16))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ALIVE (BIT(23))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY (BIT(24))
@@ -89,6 +99,7 @@ enum msix_fh_int_causes {
/* Causes for the HW register interrupts */
enum msix_hw_int_causes {
BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */
+ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1 = BIT(1), /* cause 33 */
BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP = BIT(3), /* cause 35 */
};
@@ -121,6 +132,14 @@ enum btintel_pcie_tlv_type {
BTINTEL_FW_BUILD,
};
+/* causes for the MBOX interrupts */
+enum msix_mbox_int_causes {
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1 = BIT(0), /* cause MBOX1 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2 = BIT(1), /* cause MBOX2 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3 = BIT(2), /* cause MBOX3 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4 = BIT(3), /* cause MBOX4 */
+};
+
#define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
/* Minimum and Maximum number of MSI-X Vector
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 07cd308f7abf..93932a0d8625 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -100,7 +100,9 @@ static int btmrvl_sdio_probe_of(struct device *dev,
}
/* Configure wakeup (enabled by default) */
- device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
}
}
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index edd5eead1e93..c16a3518b8ff 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -723,6 +723,10 @@ static int btmtksdio_close(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ /* Skip btmtksdio_close if BTMTKSDIO_FUNC_ENABLED isn't set */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
+ return 0;
+
sdio_claim_host(bdev->func);
/* Disable interrupt */
@@ -1410,7 +1414,7 @@ static int btmtksdio_probe(struct sdio_func *func,
*/
pm_runtime_put_noidle(bdev->dev);
- err = device_init_wakeup(bdev->dev, true);
+ err = devm_device_init_wakeup(bdev->dev);
if (err)
bt_dev_err(hdev, "failed to initialize device wakeup");
@@ -1443,11 +1447,15 @@ static void btmtksdio_remove(struct sdio_func *func)
if (!bdev)
return;
+ hdev = bdev->hdev;
+
+ /* Make sure to call btmtksdio_close before removing sdio card */
+ if (test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
+ btmtksdio_close(hdev);
+
/* Be consistent the state in btmtksdio_probe */
pm_runtime_get_noresume(bdev->dev);
- hdev = bdev->hdev;
-
sdio_set_drvdata(func, NULL);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 5091dea762a0..b34623a69b8a 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -17,6 +17,7 @@
#include <linux/crc32.h>
#include <linux/string_helpers.h>
#include <linux/gpio/consumer.h>
+#include <linux/of_irq.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -143,7 +144,9 @@ struct ps_data {
bool driver_sent_cmd;
u16 h2c_ps_interval;
u16 c2h_ps_interval;
+ bool wakeup_source;
struct gpio_desc *h2c_ps_gpio;
+ s32 irq_handler;
struct hci_dev *hdev;
struct work_struct work;
struct timer_list ps_timer;
@@ -476,12 +479,21 @@ static void ps_timeout_func(struct timer_list *t)
}
}
+static irqreturn_t ps_host_wakeup_irq_handler(int irq, void *priv)
+{
+ struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)priv;
+
+ bt_dev_dbg(nxpdev->hdev, "Host wakeup interrupt");
+ return IRQ_HANDLED;
+}
static int ps_setup(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct serdev_device *serdev = nxpdev->serdev;
struct ps_data *psdata = &nxpdev->psdata;
+ int ret;
+ /* Out-Of-Band Device Wakeup */
psdata->h2c_ps_gpio = devm_gpiod_get_optional(&serdev->dev, "device-wakeup",
GPIOD_OUT_LOW);
if (IS_ERR(psdata->h2c_ps_gpio)) {
@@ -493,11 +505,37 @@ static int ps_setup(struct hci_dev *hdev)
if (device_property_read_u8(&serdev->dev, "nxp,wakein-pin", &psdata->h2c_wakeup_gpio)) {
psdata->h2c_wakeup_gpio = 0xff; /* 0xff: use default pin/gpio */
} else if (!psdata->h2c_ps_gpio) {
- bt_dev_warn(hdev, "nxp,wakein-pin property without device-wakeup GPIO");
+ bt_dev_warn(hdev, "nxp,wakein-pin property without device-wakeup-gpios");
psdata->h2c_wakeup_gpio = 0xff;
}
- device_property_read_u8(&serdev->dev, "nxp,wakeout-pin", &psdata->c2h_wakeup_gpio);
+ /* Out-Of-Band Host Wakeup */
+ if (of_property_read_bool(serdev->dev.of_node, "wakeup-source")) {
+ psdata->irq_handler = of_irq_get_byname(serdev->dev.of_node, "wakeup");
+ bt_dev_info(nxpdev->hdev, "irq_handler: %d", psdata->irq_handler);
+ if (psdata->irq_handler > 0)
+ psdata->wakeup_source = true;
+ }
+
+ if (device_property_read_u8(&serdev->dev, "nxp,wakeout-pin", &psdata->c2h_wakeup_gpio)) {
+ psdata->c2h_wakeup_gpio = 0xff;
+ if (psdata->wakeup_source) {
+ bt_dev_warn(hdev, "host wakeup interrupt without nxp,wakeout-pin");
+ psdata->wakeup_source = false;
+ }
+ } else if (!psdata->wakeup_source) {
+ bt_dev_warn(hdev, "nxp,wakeout-pin property without host wakeup interrupt");
+ psdata->c2h_wakeup_gpio = 0xff;
+ }
+
+ if (psdata->wakeup_source) {
+ ret = devm_request_irq(&serdev->dev, psdata->irq_handler,
+ ps_host_wakeup_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ dev_name(&serdev->dev), nxpdev);
+ disable_irq(psdata->irq_handler);
+ device_init_wakeup(&serdev->dev, true);
+ }
psdata->hdev = hdev;
INIT_WORK(&psdata->work, ps_work_func);
@@ -637,12 +675,10 @@ static void ps_init(struct hci_dev *hdev)
psdata->ps_state = PS_STATE_AWAKE;
- if (psdata->c2h_wakeup_gpio) {
+ if (psdata->c2h_wakeup_gpio != 0xff)
psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_GPIO;
- } else {
+ else
psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE;
- psdata->c2h_wakeup_gpio = 0xff;
- }
psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID;
if (psdata->h2c_ps_gpio)
@@ -1286,7 +1322,9 @@ static void nxp_coredump(struct hci_dev *hdev)
u8 pcmd = 2;
skb = nxp_drv_send_cmd(hdev, HCI_NXP_TRIGGER_DUMP, 1, &pcmd);
- if (!IS_ERR(skb))
+ if (IS_ERR(skb))
+ bt_dev_err(hdev, "Failed to trigger FW Dump. (%ld)", PTR_ERR(skb));
+ else
kfree_skb(skb);
}
@@ -1445,9 +1483,6 @@ static int nxp_shutdown(struct hci_dev *hdev)
/* HCI_NXP_IND_RESET command may not returns any response */
if (!IS_ERR(skb))
kfree_skb(skb);
- } else if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
- nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
- nxp_set_baudrate_cmd(hdev, NULL);
}
return 0;
@@ -1799,13 +1834,15 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
wake_up_interruptible(&nxpdev->check_boot_sign_wait_q);
wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
- }
-
- if (test_bit(HCI_RUNNING, &hdev->flags)) {
- /* Ensure shutdown callback is executed before unregistering, so
- * that baudrate is reset to initial value.
+ } else {
+ /* Restore FW baudrate to fw_init_baudrate if changed.
+ * This will ensure FW baudrate is in sync with
+ * driver baudrate in case this driver is re-inserted.
*/
- nxp_shutdown(hdev);
+ if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
+ nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
+ nxp_set_baudrate_cmd(hdev, NULL);
+ }
}
ps_cleanup(nxpdev);
@@ -1820,6 +1857,11 @@ static int nxp_serdev_suspend(struct device *dev)
struct ps_data *psdata = &nxpdev->psdata;
ps_control(psdata->hdev, PS_STATE_SLEEP);
+
+ if (psdata->wakeup_source) {
+ enable_irq_wake(psdata->irq_handler);
+ enable_irq(psdata->irq_handler);
+ }
return 0;
}
@@ -1828,6 +1870,11 @@ static int nxp_serdev_resume(struct device *dev)
struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
struct ps_data *psdata = &nxpdev->psdata;
+ if (psdata->wakeup_source) {
+ disable_irq(psdata->irq_handler);
+ disable_irq_wake(psdata->irq_handler);
+ }
+
ps_control(psdata->hdev, PS_STATE_AWAKE);
return 0;
}
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 3d6778b95e00..edefb9dc76aa 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -889,7 +889,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_T)
variant = "t";
else if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_S)
- variant = "u";
+ variant = "s";
snprintf(config.fwname, sizeof(config.fwname),
"qca/cmnv%02x%s.bin", rom_ver, variant);
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index d3eba0d4a57d..7838c89e529e 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -1215,6 +1215,8 @@ next:
rtl_dev_err(hdev, "mandatory config file %s not found",
btrtl_dev->ic_info->cfg_name);
ret = btrtl_dev->cfg_len;
+ if (!ret)
+ ret = -EINVAL;
goto err_free;
}
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 5012b5ff92c8..9ab661d2d1e6 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,6 +21,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_drv.h>
#include "btintel.h"
#include "btbcm.h"
@@ -34,7 +35,6 @@ static bool force_scofix;
static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
static bool enable_poll_sync = IS_ENABLED(CONFIG_BT_HCIBTUSB_POLL_SYNC);
static bool reset = true;
-static bool auto_isoc_alt = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT);
static struct usb_driver btusb_driver;
@@ -513,6 +513,7 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
/* Realtek 8851BE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0xb850), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3600), .driver_info = BTUSB_REALTEK },
/* Realtek 8852AE Bluetooth devices */
@@ -678,6 +679,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3584), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK |
@@ -716,8 +719,12 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3608), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3613), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3630), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -1118,42 +1125,6 @@ static inline void btusb_free_frags(struct btusb_data *data)
spin_unlock_irqrestore(&data->rxlock, flags);
}
-static void btusb_sco_connected(struct btusb_data *data, struct sk_buff *skb)
-{
- struct hci_event_hdr *hdr = (void *) skb->data;
- struct hci_ev_sync_conn_complete *ev =
- (void *) skb->data + sizeof(*hdr);
- struct hci_dev *hdev = data->hdev;
- unsigned int notify_air_mode;
-
- if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
- return;
-
- if (skb->len < sizeof(*hdr) || hdr->evt != HCI_EV_SYNC_CONN_COMPLETE)
- return;
-
- if (skb->len != sizeof(*hdr) + sizeof(*ev) || ev->status)
- return;
-
- switch (ev->air_mode) {
- case BT_CODEC_CVSD:
- notify_air_mode = HCI_NOTIFY_ENABLE_SCO_CVSD;
- break;
-
- case BT_CODEC_TRANSPARENT:
- notify_air_mode = HCI_NOTIFY_ENABLE_SCO_TRANSP;
- break;
-
- default:
- return;
- }
-
- bt_dev_info(hdev, "enabling SCO with air mode %u", ev->air_mode);
- data->sco_num = 1;
- data->air_mode = notify_air_mode;
- schedule_work(&data->work);
-}
-
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
{
if (data->intr_interval) {
@@ -1161,10 +1132,6 @@ static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
schedule_delayed_work(&data->rx_work, 0);
}
- /* Configure altsetting for HCI_USER_CHANNEL on SCO connected */
- if (auto_isoc_alt && hci_dev_test_flag(data->hdev, HCI_USER_CHANNEL))
- btusb_sco_connected(data, skb);
-
return data->recv_event(data->hdev, skb);
}
@@ -3010,55 +2977,27 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
-/*
- * ==0: not a dump pkt.
- * < 0: fails to handle a dump pkt
- * > 0: otherwise.
- */
+/* Return: 0 on success, negative errno on failure. */
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- int ret = 1;
+ int ret = 0;
+ unsigned int skip = 0;
u8 pkt_type;
- u8 *sk_ptr;
- unsigned int sk_len;
u16 seqno;
u32 dump_size;
- struct hci_event_hdr *event_hdr;
- struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
struct btusb_data *btdata = hci_get_drvdata(hdev);
struct usb_device *udev = btdata->udev;
pkt_type = hci_skb_pkt_type(skb);
- sk_ptr = skb->data;
- sk_len = skb->len;
+ skip = sizeof(struct hci_event_hdr);
+ if (pkt_type == HCI_ACLDATA_PKT)
+ skip += sizeof(struct hci_acl_hdr);
- if (pkt_type == HCI_ACLDATA_PKT) {
- acl_hdr = hci_acl_hdr(skb);
- if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
- return 0;
- sk_ptr += HCI_ACL_HDR_SIZE;
- sk_len -= HCI_ACL_HDR_SIZE;
- event_hdr = (struct hci_event_hdr *)sk_ptr;
- } else {
- event_hdr = hci_event_hdr(skb);
- }
-
- if ((event_hdr->evt != HCI_VENDOR_PKT)
- || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
- return 0;
-
- sk_ptr += HCI_EVENT_HDR_SIZE;
- sk_len -= HCI_EVENT_HDR_SIZE;
-
- dump_hdr = (struct qca_dump_hdr *)sk_ptr;
- if ((sk_len < offsetof(struct qca_dump_hdr, data))
- || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
- || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
- return 0;
+ skb_pull(skb, skip);
+ dump_hdr = (struct qca_dump_hdr *)skb->data;
- /*it is dump pkt now*/
seqno = le16_to_cpu(dump_hdr->seqno);
if (seqno == 0) {
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
@@ -3078,16 +3017,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
btdata->qca_dump.ram_dump_size = dump_size;
btdata->qca_dump.ram_dump_seqno = 0;
- sk_ptr += offsetof(struct qca_dump_hdr, data0);
- sk_len -= offsetof(struct qca_dump_hdr, data0);
+
+ skb_pull(skb, offsetof(struct qca_dump_hdr, data0));
usb_disable_autosuspend(udev);
bt_dev_info(hdev, "%s memdump size(%u)\n",
(pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event",
dump_size);
} else {
- sk_ptr += offsetof(struct qca_dump_hdr, data);
- sk_len -= offsetof(struct qca_dump_hdr, data);
+ skb_pull(skb, offsetof(struct qca_dump_hdr, data));
}
if (!btdata->qca_dump.ram_dump_size) {
@@ -3107,7 +3045,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
return ret;
}
- skb_pull(skb, skb->len - sk_len);
hci_devcd_append(hdev, skb);
btdata->qca_dump.ram_dump_seqno++;
if (seqno == QCA_LAST_SEQUENCE_NUM) {
@@ -3132,17 +3069,74 @@ out:
return ret;
}
+/* Return: true if the ACL packet is a dump packet, false otherwise. */
+static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_event_hdr *event_hdr;
+ struct hci_acl_hdr *acl_hdr;
+ struct qca_dump_hdr *dump_hdr;
+ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
+ bool is_dump = false;
+
+ if (!clone)
+ return false;
+
+ acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr));
+ if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE))
+ goto out;
+
+ event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
+ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
+ goto out;
+
+ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
+ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+ goto out;
+
+ is_dump = true;
+out:
+ consume_skb(clone);
+ return is_dump;
+}
+
+/* Return: true if the event packet is a dump packet, false otherwise. */
+static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_event_hdr *event_hdr;
+ struct qca_dump_hdr *dump_hdr;
+ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
+ bool is_dump = false;
+
+ if (!clone)
+ return false;
+
+ event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
+ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
+ goto out;
+
+ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
+ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+ goto out;
+
+ is_dump = true;
+out:
+ consume_skb(clone);
+ return is_dump;
+}
+
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- if (handle_dump_pkt_qca(hdev, skb))
- return 0;
+ if (acl_pkt_is_dump_qca(hdev, skb))
+ return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- if (handle_dump_pkt_qca(hdev, skb))
- return 0;
+ if (evt_pkt_is_dump_qca(hdev, skb))
+ return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
@@ -3726,31 +3720,133 @@ static const struct file_operations force_poll_sync_fops = {
.llseek = default_llseek,
};
-static ssize_t isoc_alt_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+#define BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS \
+ hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0000)
+#define BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE 0
+struct btusb_hci_drv_rp_supported_altsettings {
+ __u8 num;
+ __u8 altsettings[];
+} __packed;
+
+#define BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING \
+ hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0001)
+#define BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE 1
+struct btusb_hci_drv_cmd_switch_altsetting {
+ __u8 altsetting;
+} __packed;
+
+static const struct {
+ u16 opcode;
+ const char *desc;
+} btusb_hci_drv_supported_commands[] = {
+ /* Common commands */
+ { HCI_DRV_OP_READ_INFO, "Read Info" },
+
+ /* Driver specific commands */
+ { BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS, "Supported Altsettings" },
+ { BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING, "Switch Altsetting" },
+};
+static int btusb_hci_drv_read_info(struct hci_dev *hdev, void *data,
+ u16 data_len)
{
- struct btusb_data *data = dev_get_drvdata(dev);
+ struct hci_drv_rp_read_info *rp;
+ size_t rp_size;
+ int err, i;
+ u16 opcode, num_supported_commands =
+ ARRAY_SIZE(btusb_hci_drv_supported_commands);
+
+ rp_size = sizeof(*rp) + num_supported_commands * 2;
+
+ rp = kmalloc(rp_size, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
- return sysfs_emit(buf, "%d\n", data->isoc_altsetting);
+ strscpy_pad(rp->driver_name, btusb_driver.name);
+
+ rp->num_supported_commands = cpu_to_le16(num_supported_commands);
+ for (i = 0; i < num_supported_commands; i++) {
+ opcode = btusb_hci_drv_supported_commands[i].opcode;
+ bt_dev_info(hdev,
+ "Supported HCI Drv command (0x%02x|0x%04x): %s",
+ hci_opcode_ogf(opcode),
+ hci_opcode_ocf(opcode),
+ btusb_hci_drv_supported_commands[i].desc);
+ rp->supported_commands[i] = cpu_to_le16(opcode);
+ }
+
+ err = hci_drv_cmd_complete(hdev, HCI_DRV_OP_READ_INFO,
+ HCI_DRV_STATUS_SUCCESS, rp, rp_size);
+
+ kfree(rp);
+ return err;
}
-static ssize_t isoc_alt_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int btusb_hci_drv_supported_altsettings(struct hci_dev *hdev, void *data,
+ u16 data_len)
{
- struct btusb_data *data = dev_get_drvdata(dev);
- int alt;
- int ret;
+ struct btusb_data *drvdata = hci_get_drvdata(hdev);
+ struct btusb_hci_drv_rp_supported_altsettings *rp;
+ size_t rp_size;
+ int err;
+ u8 i;
+
+ /* There are at most 7 alt (0 - 6) */
+ rp = kmalloc(sizeof(*rp) + 7, GFP_KERNEL);
+
+ rp->num = 0;
+ if (!drvdata->isoc)
+ goto done;
+
+ for (i = 0; i <= 6; i++) {
+ if (btusb_find_altsetting(drvdata, i))
+ rp->altsettings[rp->num++] = i;
+ }
+
+done:
+ rp_size = sizeof(*rp) + rp->num;
+
+ err = hci_drv_cmd_complete(hdev, BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS,
+ HCI_DRV_STATUS_SUCCESS, rp, rp_size);
+ kfree(rp);
+ return err;
+}
+
+static int btusb_hci_drv_switch_altsetting(struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct btusb_hci_drv_cmd_switch_altsetting *cmd = data;
+ u8 status;
- if (kstrtoint(buf, 10, &alt))
- return -EINVAL;
+ if (cmd->altsetting > 6) {
+ status = HCI_DRV_STATUS_INVALID_PARAMETERS;
+ } else {
+ if (btusb_switch_alt_setting(hdev, cmd->altsetting))
+ status = HCI_DRV_STATUS_UNSPECIFIED_ERROR;
+ else
+ status = HCI_DRV_STATUS_SUCCESS;
+ }
- ret = btusb_switch_alt_setting(data->hdev, alt);
- return ret < 0 ? ret : count;
+ return hci_drv_cmd_status(hdev, BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING,
+ status);
}
-static DEVICE_ATTR_RW(isoc_alt);
+static const struct hci_drv_handler btusb_hci_drv_common_handlers[] = {
+ { btusb_hci_drv_read_info, HCI_DRV_READ_INFO_SIZE },
+};
+
+static const struct hci_drv_handler btusb_hci_drv_specific_handlers[] = {
+ { btusb_hci_drv_supported_altsettings,
+ BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE },
+ { btusb_hci_drv_switch_altsetting,
+ BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE },
+};
+
+static struct hci_drv btusb_hci_drv = {
+ .common_handler_count = ARRAY_SIZE(btusb_hci_drv_common_handlers),
+ .common_handlers = btusb_hci_drv_common_handlers,
+ .specific_handler_count = ARRAY_SIZE(btusb_hci_drv_specific_handlers),
+ .specific_handlers = btusb_hci_drv_specific_handlers,
+};
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
@@ -3891,12 +3987,13 @@ static int btusb_probe(struct usb_interface *intf,
data->reset_gpio = reset_gpio;
}
- hdev->open = btusb_open;
- hdev->close = btusb_close;
- hdev->flush = btusb_flush;
- hdev->send = btusb_send_frame;
- hdev->notify = btusb_notify;
- hdev->wakeup = btusb_wakeup;
+ hdev->open = btusb_open;
+ hdev->close = btusb_close;
+ hdev->flush = btusb_flush;
+ hdev->send = btusb_send_frame;
+ hdev->notify = btusb_notify;
+ hdev->wakeup = btusb_wakeup;
+ hdev->hci_drv = &btusb_hci_drv;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
@@ -4115,10 +4212,6 @@ static int btusb_probe(struct usb_interface *intf,
data->isoc, data);
if (err < 0)
goto out_free_dev;
-
- err = device_create_file(&intf->dev, &dev_attr_isoc_alt);
- if (err)
- goto out_free_dev;
}
if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && data->diag) {
@@ -4165,10 +4258,8 @@ static void btusb_disconnect(struct usb_interface *intf)
hdev = data->hdev;
usb_set_intfdata(data->intf, NULL);
- if (data->isoc) {
- device_remove_file(&intf->dev, &dev_attr_isoc_alt);
+ if (data->isoc)
usb_set_intfdata(data->isoc, NULL);
- }
if (data->diag)
usb_set_intfdata(data->diag, NULL);
diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c
index dc9541e76d81..1394c575aa6d 100644
--- a/drivers/bluetooth/hci_aml.c
+++ b/drivers/bluetooth/hci_aml.c
@@ -313,8 +313,7 @@ static int aml_download_firmware(struct hci_dev *hdev, const char *fw_name)
goto exit;
exit:
- if (firmware)
- release_firmware(firmware);
+ release_firmware(firmware);
return ret;
}
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index a51935d37e5d..59f4d7bdffdc 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -289,18 +289,18 @@ static void vhci_coredump(struct hci_dev *hdev)
static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
{
- char buf[80];
+ const char *buf;
- snprintf(buf, sizeof(buf), "Controller Name: vhci_ctrl\n");
+ buf = "Controller Name: vhci_ctrl\n";
skb_put_data(skb, buf, strlen(buf));
- snprintf(buf, sizeof(buf), "Firmware Version: vhci_fw\n");
+ buf = "Firmware Version: vhci_fw\n";
skb_put_data(skb, buf, strlen(buf));
- snprintf(buf, sizeof(buf), "Driver: vhci_drv\n");
+ buf = "Driver: vhci_drv\n";
skb_put_data(skb, buf, strlen(buf));
- snprintf(buf, sizeof(buf), "Vendor: vhci\n");
+ buf = "Vendor: vhci\n";
skb_put_data(skb, buf, strlen(buf));
}
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index ee29162da4ee..91ef99c42344 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -395,10 +395,7 @@ static struct attribute *gisb_arb_sysfs_attrs[] = {
&dev_attr_gisb_arb_timeout.attr,
NULL,
};
-
-static struct attribute_group gisb_arb_sysfs_attr_group = {
- .attrs = gisb_arb_sysfs_attrs,
-};
+ATTRIBUTE_GROUPS(gisb_arb_sysfs);
static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
{ .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 },
@@ -490,10 +487,6 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
- if (err)
- return err;
-
platform_set_drvdata(pdev, gdev);
list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
@@ -550,6 +543,7 @@ static struct platform_driver brcmstb_gisb_arb_driver = {
.name = "brcm-gisb-arb",
.of_match_table = brcmstb_gisb_arb_of_match,
.pm = &brcmstb_gisb_arb_pm_ops,
+ .dev_groups = gisb_arb_sysfs_groups,
},
};
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 52053f7c6d9a..c63a7e688db6 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -806,8 +806,6 @@ int dprc_cleanup(struct fsl_mc_device *mc_dev)
dev_set_msi_domain(&mc_dev->dev, NULL);
}
- fsl_mc_cleanup_all_resource_pools(mc_dev);
-
/* if this step fails we cannot go further with cleanup as there is no way of
* communicating with the firmware
*/
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index dd1b5c0fb7e2..38d40c09b719 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -489,7 +489,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
cmd_params->obj_id = cpu_to_le32(obj_id);
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -561,7 +561,7 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id);
cmd_params->region_index = region_index;
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index 6c3beb82dd1b..d2ea59471323 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -555,27 +555,6 @@ void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
}
}
-static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
- enum fsl_mc_pool_type pool_type)
-{
- struct fsl_mc_resource *resource;
- struct fsl_mc_resource *next;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
- struct fsl_mc_resource_pool *res_pool =
- &mc_bus->resource_pools[pool_type];
-
- list_for_each_entry_safe(resource, next, &res_pool->free_list, node)
- devm_kfree(&mc_bus_dev->dev, resource);
-}
-
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
-{
- int pool_type;
-
- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
- fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
-}
-
/*
* fsl_mc_allocator_probe - callback invoked when an allocatable device is
* being added to the system
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index a8be8cf246fb..7671bd158545 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -139,9 +139,9 @@ static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *e
static int fsl_mc_dma_configure(struct device *dev)
{
+ const struct device_driver *drv = READ_ONCE(dev->driver);
struct device *dma_dev = dev;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
u32 input_id = mc_dev->icid;
int ret;
@@ -153,8 +153,8 @@ static int fsl_mc_dma_configure(struct device *dev)
else
ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
- /* @mc_drv may not be valid when we're called from the IOMMU layer */
- if (!ret && dev->driver && !mc_drv->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && drv && !to_fsl_mc_driver(drv)->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -906,8 +906,10 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
error_cleanup_dev:
kfree(mc_dev->regions);
- kfree(mc_bus);
- kfree(mc_dev);
+ if (mc_bus)
+ kfree(mc_bus);
+ else
+ kfree(mc_dev);
return error;
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index e1b7ec3ed1a7..beed4c53533d 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -629,8 +629,6 @@ int __init fsl_mc_allocator_driver_init(void);
void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-
int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
enum fsl_mc_pool_type pool_type,
struct fsl_mc_resource
diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c
index 9c4c1395fcdb..823969e4159c 100644
--- a/drivers/bus/fsl-mc/fsl-mc-uapi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c
@@ -48,6 +48,7 @@ enum fsl_mc_cmd_index {
DPRC_GET_POOL,
DPRC_GET_POOL_COUNT,
DPRC_GET_CONNECTION,
+ DPRC_GET_MEM,
DPCI_GET_LINK_STATE,
DPCI_GET_PEER_ATTR,
DPAIOP_GET_SL_VERSION,
@@ -194,6 +195,12 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.token = true,
.size = 32,
},
+ [DPRC_GET_MEM] = {
+ .cmdid_value = 0x16D0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
[DPCI_GET_LINK_STATE] = {
.cmdid_value = 0x0E10,
@@ -275,13 +282,13 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.size = 8,
},
[DPSW_GET_TAILDROP] = {
- .cmdid_value = 0x0A80,
+ .cmdid_value = 0x0A90,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 14,
},
[DPSW_SET_TAILDROP] = {
- .cmdid_value = 0x0A90,
+ .cmdid_value = 0x0A80,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 24,
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index a0ad7866cbfc..cd8754763f40 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -214,12 +214,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
if (error < 0)
goto error_cleanup_resource;
- dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
- &dpmcp_dev->dev,
- DL_FLAG_AUTOREMOVE_CONSUMER);
- if (!dpmcp_dev->consumer_link) {
- error = -EINVAL;
- goto error_cleanup_mc_io;
+ /* If the DPRC device itself tries to allocate a portal (usually for
+ * UAPI interaction), don't add a device link between them since the
+ * DPMCP device is an actual child device of the DPRC and a reverse
+ * dependency is not allowed.
+ */
+ if (mc_dev != mc_bus_dev) {
+ dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
+ &dpmcp_dev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dpmcp_dev->consumer_link) {
+ error = -EINVAL;
+ goto error_cleanup_mc_io;
+ }
}
*new_mc_io = mc_io;
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index f2052cd0a051..b22c59d57c8f 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -19,7 +19,7 @@
/*
* Timeout in milliseconds to wait for the completion of an MC command
*/
-#define MC_CMD_COMPLETION_TIMEOUT_MS 500
+#define MC_CMD_COMPLETION_TIMEOUT_MS 15000
/*
* usleep_range() min and max values used to throttle down polling
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 1e57ebfb7622..6c3e5c5dae10 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -737,9 +737,9 @@ static int moxtet_irq_setup(struct moxtet *moxtet)
{
int i, ret;
- moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node,
- MOXTET_NIRQS, 0,
- &moxtet_irq_domain, moxtet);
+ moxtet->irq.domain = irq_domain_create_simple(of_fwnode_handle(moxtet->dev->of_node),
+ MOXTET_NIRQS, 0,
+ &moxtet_irq_domain, moxtet);
if (moxtet->irq.domain == NULL) {
dev_err(moxtet->dev, "Could not add IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index f67b927ae4ca..9f624e5da991 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -677,51 +677,6 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
return 0;
}
-/* Interconnect instances to probe before l4_per instances */
-static struct resource early_bus_ranges[] = {
- /* am3/4 l4_wkup */
- { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
- /* omap4/5 and dra7 l4_cfg */
- { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
- /* omap4 l4_wkup */
- { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
- /* omap5 and dra7 l4_wkup without dra7 dcan segment */
- { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
-};
-
-static atomic_t sysc_defer = ATOMIC_INIT(10);
-
-/**
- * sysc_defer_non_critical - defer non_critical interconnect probing
- * @ddata: device driver data
- *
- * We want to probe l4_cfg and l4_wkup interconnect instances before any
- * l4_per instances as l4_per instances depend on resources on l4_cfg and
- * l4_wkup interconnects.
- */
-static int sysc_defer_non_critical(struct sysc *ddata)
-{
- struct resource *res;
- int i;
-
- if (!atomic_read(&sysc_defer))
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
- res = &early_bus_ranges[i];
- if (ddata->module_pa >= res->start &&
- ddata->module_pa <= res->end) {
- atomic_set(&sysc_defer, 0);
-
- return 0;
- }
- }
-
- atomic_dec_if_positive(&sysc_defer);
-
- return -EPROBE_DEFER;
-}
-
static struct device_node *stdout_path;
static void sysc_init_stdout_path(struct sysc *ddata)
@@ -947,10 +902,6 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
if (error)
return error;
- error = sysc_defer_non_critical(ddata);
- if (error)
- return error;
-
sysc_check_children(ddata);
if (!of_property_present(np, "reg"))
@@ -2036,6 +1987,21 @@ static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
}
+static void sysc_module_enable_quirk_pruss(struct sysc *ddata)
+{
+ u32 reg;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /*
+ * Clearing the SYSC_PRUSS_STANDBY_INIT bit - Updates OCP master
+ * port configuration to enable memory access outside of the
+ * PRU-ICSS subsystem.
+ */
+ reg &= (~SYSC_PRUSS_STANDBY_INIT);
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+}
+
static void sysc_init_module_quirks(struct sysc *ddata)
{
if (ddata->legacy_mode || !ddata->name)
@@ -2088,8 +2054,10 @@ static void sysc_init_module_quirks(struct sysc *ddata)
ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
}
- if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) {
+ ddata->module_enable_quirk = sysc_module_enable_quirk_pruss;
ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
+ }
}
static int sysc_clockdomain_init(struct sysc *ddata)
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index 6874b72ec59d..e1a283805ea7 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -118,6 +118,8 @@ static void ccache_config_read(void)
}
static const struct of_device_id sifive_ccache_ids[] = {
+ { .compatible = "eswin,eic7700-l3-cache",
+ .data = (void *)(QUIRK_NONSTANDARD_CACHE_OPS) },
{ .compatible = "sifive,fu540-c000-ccache" },
{ .compatible = "sifive,fu740-c000-ccache" },
{ .compatible = "starfive,jh7100-ccache",
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index b163e043c687..21a10552da61 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3677,8 +3677,7 @@ static void cdrom_sysctl_register(void)
static void cdrom_sysctl_unregister(void)
{
- if (cdrom_sysctl_header)
- unregister_sysctl_table(cdrom_sysctl_header);
+ unregister_sysctl_table(cdrom_sysctl_header);
}
#else /* CONFIG_SYSCTL */
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 8e41731d3642..bf490967241a 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -16,7 +16,7 @@
#include <linux/mmzone.h>
#include <asm/page.h> /* PAGE_SIZE */
#include <asm/e820/api.h>
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
#include <asm/gart.h>
#include "agp.h"
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index ef30445527a2..bcc26785175d 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -53,6 +53,7 @@ struct intel_gtt_driver {
* of the mmio register file, that's done in the generic code. */
void (*cleanup)(void);
void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
+ dma_addr_t (*read_entry)(unsigned int entry, bool *is_present, bool *is_local);
/* Flags is a more or less chipset specific opaque value.
* For chipsets that need to support old ums (non-gem) code, this
* needs to be identical to the various supported agp memory types! */
@@ -336,6 +337,19 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i810_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u32 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = val & I810_PTE_LOCAL;
+
+ return val & ~0xfff;
+}
+
static resource_size_t intel_gtt_stolen_size(void)
{
u16 gmch_ctrl;
@@ -741,6 +755,19 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i830_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u32 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = false;
+
+ return val & ~0xfff;
+}
+
bool intel_gmch_enable_gtt(void)
{
u8 __iomem *reg;
@@ -878,6 +905,13 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
}
EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries);
+dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg,
+ bool *is_present, bool *is_local)
+{
+ return intel_private.driver->read_entry(pg, is_present, is_local);
+}
+EXPORT_SYMBOL(intel_gmch_gtt_read_entry);
+
#if IS_ENABLED(CONFIG_AGP_INTEL)
static void intel_gmch_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
@@ -1126,6 +1160,19 @@ static void i965_write_entry(dma_addr_t addr,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i965_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u64 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = false;
+
+ return ((val & 0xf0) << 28) | (val & ~0xfff);
+}
+
static int i9xx_setup(void)
{
phys_addr_t reg_addr;
@@ -1187,6 +1234,7 @@ static const struct intel_gtt_driver i81x_gtt_driver = {
.cleanup = i810_cleanup,
.check_flags = i830_check_flags,
.write_entry = i810_write_entry,
+ .read_entry = i810_read_entry,
};
static const struct intel_gtt_driver i8xx_gtt_driver = {
.gen = 2,
@@ -1194,6 +1242,7 @@ static const struct intel_gtt_driver i8xx_gtt_driver = {
.setup = i830_setup,
.cleanup = i830_cleanup,
.write_entry = i830_write_entry,
+ .read_entry = i830_read_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i830_chipset_flush,
@@ -1205,6 +1254,7 @@ static const struct intel_gtt_driver i915_gtt_driver = {
.cleanup = i9xx_cleanup,
/* i945 is the last gpu to need phys mem (for overlay and cursors). */
.write_entry = i830_write_entry,
+ .read_entry = i830_read_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1215,6 +1265,7 @@ static const struct intel_gtt_driver g33_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1225,6 +1276,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1235,6 +1287,7 @@ static const struct intel_gtt_driver i965_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1244,6 +1297,7 @@ static const struct intel_gtt_driver g4x_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1254,6 +1308,7 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index e424360fb4a1..4787391bb6b4 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -11,6 +11,7 @@
#include <linux/page-flags.h>
#include <linux/mm.h>
#include <linux/jiffies.h>
+#include <asm/msr.h>
#include "agp.h"
/* NVIDIA registers */
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 143406bc6939..d2b00458761e 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -37,6 +37,7 @@ struct atmel_trng {
struct clk *clk;
void __iomem *base;
struct hwrng rng;
+ struct device *dev;
bool has_half_rate;
};
@@ -59,9 +60,9 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
u32 *data = buf;
int ret;
- ret = pm_runtime_get_sync((struct device *)trng->rng.priv);
+ ret = pm_runtime_get_sync(trng->dev);
if (ret < 0) {
- pm_runtime_put_sync((struct device *)trng->rng.priv);
+ pm_runtime_put_sync(trng->dev);
return ret;
}
@@ -79,8 +80,8 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
ret = 4;
out:
- pm_runtime_mark_last_busy((struct device *)trng->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv);
+ pm_runtime_mark_last_busy(trng->dev);
+ pm_runtime_put_sync_autosuspend(trng->dev);
return ret;
}
@@ -134,9 +135,9 @@ static int atmel_trng_probe(struct platform_device *pdev)
return -ENODEV;
trng->has_half_rate = data->has_half_rate;
+ trng->dev = &pdev->dev;
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
- trng->rng.priv = (unsigned long)&pdev->dev;
platform_set_drvdata(pdev, trng);
#ifndef CONFIG_PM
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index 1e3048f2bb38..b7fa1bc1122b 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -36,6 +36,7 @@ struct mtk_rng {
void __iomem *base;
struct clk *clk;
struct hwrng rng;
+ struct device *dev;
};
static int mtk_rng_init(struct hwrng *rng)
@@ -85,7 +86,7 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
struct mtk_rng *priv = to_mtk_rng(rng);
int retval = 0;
- pm_runtime_get_sync((struct device *)priv->rng.priv);
+ pm_runtime_get_sync(priv->dev);
while (max >= sizeof(u32)) {
if (!mtk_rng_wait_ready(rng, wait))
@@ -97,8 +98,8 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max -= sizeof(u32);
}
- pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
}
@@ -112,13 +113,13 @@ static int mtk_rng_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->dev = &pdev->dev;
priv->rng.name = pdev->name;
#ifndef CONFIG_PM
priv->rng.init = mtk_rng_init;
priv->rng.cleanup = mtk_rng_cleanup;
#endif
priv->rng.read = mtk_rng_read;
- priv->rng.priv = (unsigned long)&pdev->dev;
priv->rng.quality = 900;
priv->clk = devm_clk_get(&pdev->dev, "rng");
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index 9ff00f096f38..3e308c890bd2 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -32,6 +32,7 @@
struct npcm_rng {
void __iomem *base;
struct hwrng rng;
+ struct device *dev;
u32 clkp;
};
@@ -57,7 +58,7 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
int retval = 0;
int ready;
- pm_runtime_get_sync((struct device *)priv->rng.priv);
+ pm_runtime_get_sync(priv->dev);
while (max) {
if (wait) {
@@ -79,8 +80,8 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max--;
}
- pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
}
@@ -109,7 +110,7 @@ static int npcm_rng_probe(struct platform_device *pdev)
#endif
priv->rng.name = pdev->name;
priv->rng.read = npcm_rng_read;
- priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->dev = &pdev->dev;
priv->clkp = (u32)(uintptr_t)of_device_get_match_data(&pdev->dev);
writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG);
diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c
index 161050591663..fb4a30b95507 100644
--- a/drivers/char/hw_random/rockchip-rng.c
+++ b/drivers/char/hw_random/rockchip-rng.c
@@ -93,6 +93,30 @@
#define TRNG_v1_VERSION_CODE 0x46bc
/* end of TRNG_V1 register definitions */
+/*
+ * RKRNG register definitions
+ * The RKRNG IP is a stand-alone TRNG implementation (not part of a crypto IP)
+ * and can be found in the Rockchip RK3576, Rockchip RK3562 and Rockchip RK3528
+ * SoCs. It can either output true randomness (TRNG) or "deterministic"
+ * randomness derived from hashing the true entropy (DRNG). This driver
+ * implementation uses just the true entropy, and leaves stretching the entropy
+ * up to Linux.
+ */
+#define RKRNG_CFG 0x0000
+#define RKRNG_CTRL 0x0010
+#define RKRNG_CTRL_REQ_TRNG BIT(4)
+#define RKRNG_STATE 0x0014
+#define RKRNG_STATE_TRNG_RDY BIT(4)
+#define RKRNG_TRNG_DATA0 0x0050
+#define RKRNG_TRNG_DATA1 0x0054
+#define RKRNG_TRNG_DATA2 0x0058
+#define RKRNG_TRNG_DATA3 0x005C
+#define RKRNG_TRNG_DATA4 0x0060
+#define RKRNG_TRNG_DATA5 0x0064
+#define RKRNG_TRNG_DATA6 0x0068
+#define RKRNG_TRNG_DATA7 0x006C
+#define RKRNG_READ_LEN 32
+
/* Before removing this assert, give rk3588_rng_read an upper bound of 32 */
static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0),
"You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats.");
@@ -205,6 +229,46 @@ out:
return (ret < 0) ? ret : to_read;
}
+static int rk3576_rng_init(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+
+ return rk_rng_enable_clks(rk_rng);
+}
+
+static int rk3576_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ size_t to_read = min_t(size_t, max, RKRNG_READ_LEN);
+ int ret = 0;
+ u32 val;
+
+ ret = pm_runtime_resume_and_get(rk_rng->dev);
+ if (ret < 0)
+ return ret;
+
+ rk_rng_writel(rk_rng, RKRNG_CTRL_REQ_TRNG | (RKRNG_CTRL_REQ_TRNG << 16),
+ RKRNG_CTRL);
+
+ if (readl_poll_timeout(rk_rng->base + RKRNG_STATE, val,
+ (val & RKRNG_STATE_TRNG_RDY), RK_RNG_POLL_PERIOD_US,
+ RK_RNG_POLL_TIMEOUT_US)) {
+ dev_err(rk_rng->dev, "timed out waiting for data\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ rk_rng_writel(rk_rng, RKRNG_STATE_TRNG_RDY, RKRNG_STATE);
+
+ memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read);
+
+out:
+ pm_runtime_mark_last_busy(rk_rng->dev);
+ pm_runtime_put_sync_autosuspend(rk_rng->dev);
+
+ return (ret < 0) ? ret : to_read;
+}
+
static int rk3588_rng_init(struct hwrng *rng)
{
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
@@ -305,6 +369,14 @@ static const struct rk_rng_soc_data rk3568_soc_data = {
.reset_optional = false,
};
+static const struct rk_rng_soc_data rk3576_soc_data = {
+ .rk_rng_init = rk3576_rng_init,
+ .rk_rng_read = rk3576_rng_read,
+ .rk_rng_cleanup = rk3588_rng_cleanup,
+ .quality = 999, /* as determined by actual testing */
+ .reset_optional = true,
+};
+
static const struct rk_rng_soc_data rk3588_soc_data = {
.rk_rng_init = rk3588_rng_init,
.rk_rng_read = rk3588_rng_read,
@@ -397,6 +469,7 @@ static const struct dev_pm_ops rk_rng_pm_ops = {
static const struct of_device_id rk_rng_dt_match[] = {
{ .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data },
+ { .compatible = "rockchip,rk3576-rng", .data = (void *)&rk3576_soc_data },
{ .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data },
{ /* sentinel */ },
};
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 3ba9d7e9a6c7..064944ae9fdc 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -27,7 +27,6 @@
#include <linux/ipmi_smi.h>
#include <linux/notifier.h>
#include <linux/init.h>
-#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
@@ -41,11 +40,12 @@
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
-static void smi_recv_work(struct work_struct *t);
+static void smi_work(struct work_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg);
+static void intf_free(struct kref *ref);
static bool initialized;
static bool drvregistered;
@@ -180,14 +180,8 @@ MODULE_PARM_DESC(max_msgs_per_user,
struct ipmi_user {
struct list_head link;
- /*
- * Set to NULL when the user is destroyed, a pointer to myself
- * so srcu_dereference can be used on it.
- */
- struct ipmi_user *self;
- struct srcu_struct release_barrier;
-
struct kref refcount;
+ refcount_t destroyed;
/* The upper layer that handles receive messages. */
const struct ipmi_user_hndl *handler;
@@ -200,30 +194,8 @@ struct ipmi_user {
bool gets_events;
atomic_t nr_msgs;
-
- /* Free must run in process context for RCU cleanup. */
- struct work_struct remove_work;
};
-static struct workqueue_struct *remove_work_wq;
-
-static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
- __acquires(user->release_barrier)
-{
- struct ipmi_user *ruser;
-
- *index = srcu_read_lock(&user->release_barrier);
- ruser = srcu_dereference(user->self, &user->release_barrier);
- if (!ruser)
- srcu_read_unlock(&user->release_barrier, *index);
- return ruser;
-}
-
-static void release_ipmi_user(struct ipmi_user *user, int index)
-{
- srcu_read_unlock(&user->release_barrier, index);
-}
-
struct cmd_rcvr {
struct list_head link;
@@ -327,6 +299,8 @@ struct bmc_device {
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
+static struct workqueue_struct *bmc_remove_work_wq;
+
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
struct ipmi_device_id *id,
bool *guid_set, guid_t *guid);
@@ -451,11 +425,10 @@ struct ipmi_smi {
struct list_head link;
/*
- * The list of upper layers that are using me. seq_lock write
- * protects this. Read protection is with srcu.
+ * The list of upper layers that are using me.
*/
struct list_head users;
- struct srcu_struct users_srcu;
+ struct mutex users_mutex;
atomic_t nr_users;
struct device_attribute nr_users_devattr;
struct device_attribute nr_msgs_devattr;
@@ -496,15 +469,22 @@ struct ipmi_smi {
int curr_seq;
/*
- * Messages queued for delivery. If delivery fails (out of memory
- * for instance), They will stay in here to be processed later in a
- * periodic timer interrupt. The workqueue is for handling received
- * messages directly from the handler.
+ * Messages queued for deliver to the user.
+ */
+ struct mutex user_msgs_mutex;
+ struct list_head user_msgs;
+
+ /*
+ * Messages queued for processing. If processing fails (out
+ * of memory for instance), They will stay in here to be
+ * processed later in a periodic timer interrupt. The
+ * workqueue is for handling received messages directly from
+ * the handler.
*/
spinlock_t waiting_rcv_msgs_lock;
struct list_head waiting_rcv_msgs;
atomic_t watchdog_pretimeouts_to_deliver;
- struct work_struct recv_work;
+ struct work_struct smi_work;
spinlock_t xmit_msgs_lock;
struct list_head xmit_msgs;
@@ -522,10 +502,9 @@ struct ipmi_smi {
* Events that were queues because no one was there to receive
* them.
*/
- spinlock_t events_lock; /* For dealing with event stuff. */
+ struct mutex events_mutex; /* For dealing with event stuff. */
struct list_head waiting_events;
unsigned int waiting_events_count; /* How many events in queue? */
- char delivering_events;
char event_msg_printed;
/* How many users are waiting for events? */
@@ -613,6 +592,28 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
bool guid_set, guid_t *guid, int intf_num);
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
+static void free_ipmi_user(struct kref *ref)
+{
+ struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+ struct module *owner;
+
+ owner = user->intf->owner;
+ kref_put(&user->intf->refcount, intf_free);
+ module_put(owner);
+ vfree(user);
+}
+
+static void release_ipmi_user(struct ipmi_user *user)
+{
+ kref_put(&user->refcount, free_ipmi_user);
+}
+
+static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user)
+{
+ if (!kref_get_unless_zero(&user->refcount))
+ return NULL;
+ return user;
+}
/*
* The driver model view of the IPMI messaging driver.
@@ -630,9 +631,6 @@ static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
-#define ipmi_interfaces_mutex_held() \
- lockdep_is_held(&ipmi_interfaces_mutex)
-static struct srcu_struct ipmi_interfaces_srcu;
/*
* List of watchers that want to know when smi's are added and deleted.
@@ -698,27 +696,20 @@ static void free_smi_msg_list(struct list_head *q)
}
}
-static void clean_up_interface_data(struct ipmi_smi *intf)
+static void intf_free(struct kref *ref)
{
+ struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
int i;
struct cmd_rcvr *rcvr, *rcvr2;
- struct list_head list;
-
- cancel_work_sync(&intf->recv_work);
free_smi_msg_list(&intf->waiting_rcv_msgs);
free_recv_msg_list(&intf->waiting_events);
/*
* Wholesale remove all the entries from the list in the
- * interface and wait for RCU to know that none are in use.
+ * interface. No need for locks, this is single-threaded.
*/
- mutex_lock(&intf->cmd_rcvrs_mutex);
- INIT_LIST_HEAD(&list);
- list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
- mutex_unlock(&intf->cmd_rcvrs_mutex);
-
- list_for_each_entry_safe(rcvr, rcvr2, &list, link)
+ list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link)
kfree(rcvr);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
@@ -726,20 +717,17 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
&& (intf->seq_table[i].recv_msg))
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
-}
-
-static void intf_free(struct kref *ref)
-{
- struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
- clean_up_interface_data(intf);
kfree(intf);
}
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
struct ipmi_smi *intf;
- int index, rv;
+ unsigned int count = 0, i;
+ int *interfaces = NULL;
+ struct device **devices = NULL;
+ int rv = 0;
/*
* Make sure the driver is actually initialized, this handles
@@ -753,20 +741,53 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
list_add(&watcher->link, &smi_watchers);
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
- lockdep_is_held(&smi_watchers_mutex)) {
- int intf_num = READ_ONCE(intf->intf_num);
+ /*
+ * Build an array of ipmi interfaces and fill it in, and
+ * another array of the devices. We can't call the callback
+ * with ipmi_interfaces_mutex held. smi_watchers_mutex will
+ * keep things in order for the user.
+ */
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link)
+ count++;
+ if (count > 0) {
+ interfaces = kmalloc_array(count, sizeof(*interfaces),
+ GFP_KERNEL);
+ if (!interfaces) {
+ rv = -ENOMEM;
+ } else {
+ devices = kmalloc_array(count, sizeof(*devices),
+ GFP_KERNEL);
+ if (!devices) {
+ kfree(interfaces);
+ interfaces = NULL;
+ rv = -ENOMEM;
+ }
+ }
+ count = 0;
+ }
+ if (interfaces) {
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
+ int intf_num = READ_ONCE(intf->intf_num);
- if (intf_num == -1)
- continue;
- watcher->new_smi(intf_num, intf->si_dev);
+ if (intf_num == -1)
+ continue;
+ devices[count] = intf->si_dev;
+ interfaces[count++] = intf_num;
+ }
+ }
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ if (interfaces) {
+ for (i = 0; i < count; i++)
+ watcher->new_smi(interfaces[i], devices[i]);
+ kfree(interfaces);
+ kfree(devices);
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
mutex_unlock(&smi_watchers_mutex);
- return 0;
+ return rv;
}
EXPORT_SYMBOL(ipmi_smi_watcher_register);
@@ -779,22 +800,17 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
}
EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
-/*
- * Must be called with smi_watchers_mutex held.
- */
static void
call_smi_watchers(int i, struct device *dev)
{
struct ipmi_smi_watcher *w;
- mutex_lock(&smi_watchers_mutex);
list_for_each_entry(w, &smi_watchers, link) {
if (try_module_get(w->owner)) {
w->new_smi(i, dev);
module_put(w->owner);
}
}
- mutex_unlock(&smi_watchers_mutex);
}
static int
@@ -941,18 +957,14 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
ipmi_free_recv_msg(msg);
atomic_dec(&msg->user->nr_msgs);
} else {
- int index;
- struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
-
- if (user) {
- atomic_dec(&user->nr_msgs);
- user->handler->ipmi_recv_hndl(msg, user->handler_data);
- release_ipmi_user(user, index);
- } else {
- /* User went away, give up. */
- ipmi_free_recv_msg(msg);
- rv = -EINVAL;
- }
+ /*
+ * Deliver it in smi_work. The message will hold a
+ * refcount to the user.
+ */
+ mutex_lock(&intf->user_msgs_mutex);
+ list_add_tail(&msg->link, &intf->user_msgs);
+ mutex_unlock(&intf->user_msgs_mutex);
+ queue_work(system_wq, &intf->smi_work);
}
return rv;
@@ -1192,23 +1204,14 @@ static int intf_err_seq(struct ipmi_smi *intf,
return rv;
}
-static void free_user_work(struct work_struct *work)
-{
- struct ipmi_user *user = container_of(work, struct ipmi_user,
- remove_work);
-
- cleanup_srcu_struct(&user->release_barrier);
- vfree(user);
-}
-
int ipmi_create_user(unsigned int if_num,
const struct ipmi_user_hndl *handler,
void *handler_data,
struct ipmi_user **user)
{
unsigned long flags;
- struct ipmi_user *new_user;
- int rv, index;
+ struct ipmi_user *new_user = NULL;
+ int rv = 0;
struct ipmi_smi *intf;
/*
@@ -1230,30 +1233,31 @@ int ipmi_create_user(unsigned int if_num,
if (rv)
return rv;
- new_user = vzalloc(sizeof(*new_user));
- if (!new_user)
- return -ENOMEM;
-
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (intf->intf_num == if_num)
goto found;
}
/* Not found, return an error */
rv = -EINVAL;
- goto out_kfree;
+ goto out_unlock;
found:
+ if (intf->in_shutdown) {
+ rv = -ENODEV;
+ goto out_unlock;
+ }
+
if (atomic_add_return(1, &intf->nr_users) > max_users) {
rv = -EBUSY;
goto out_kfree;
}
- INIT_WORK(&new_user->remove_work, free_user_work);
-
- rv = init_srcu_struct(&new_user->release_barrier);
- if (rv)
+ new_user = vzalloc(sizeof(*new_user));
+ if (!new_user) {
+ rv = -ENOMEM;
goto out_kfree;
+ }
if (!try_module_get(intf->owner)) {
rv = -ENODEV;
@@ -1265,64 +1269,58 @@ int ipmi_create_user(unsigned int if_num,
atomic_set(&new_user->nr_msgs, 0);
kref_init(&new_user->refcount);
+ refcount_set(&new_user->destroyed, 1);
+ kref_get(&new_user->refcount); /* Destroy owns a refcount. */
new_user->handler = handler;
new_user->handler_data = handler_data;
new_user->intf = intf;
new_user->gets_events = false;
- rcu_assign_pointer(new_user->self, new_user);
+ mutex_lock(&intf->users_mutex);
spin_lock_irqsave(&intf->seq_lock, flags);
- list_add_rcu(&new_user->link, &intf->users);
+ list_add(&new_user->link, &intf->users);
spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->users_mutex);
+
if (handler->ipmi_watchdog_pretimeout)
/* User wants pretimeouts, so make sure to watch for them. */
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
- *user = new_user;
- return 0;
out_kfree:
- atomic_dec(&intf->nr_users);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
- vfree(new_user);
+ if (rv) {
+ atomic_dec(&intf->nr_users);
+ vfree(new_user);
+ } else {
+ *user = new_user;
+ }
+out_unlock:
+ mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
EXPORT_SYMBOL(ipmi_create_user);
int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
{
- int rv, index;
+ int rv = -EINVAL;
struct ipmi_smi *intf;
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- if (intf->intf_num == if_num)
- goto found;
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num) {
+ if (!intf->handlers->get_smi_info)
+ rv = -ENOTTY;
+ else
+ rv = intf->handlers->get_smi_info(intf->send_info, data);
+ break;
+ }
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
-
- /* Not found, return an error */
- return -EINVAL;
-
-found:
- if (!intf->handlers->get_smi_info)
- rv = -ENOTTY;
- else
- rv = intf->handlers->get_smi_info(intf->send_info, data);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
EXPORT_SYMBOL(ipmi_get_smi_info);
-static void free_user(struct kref *ref)
-{
- struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
-
- /* SRCU cleanup must happen in workqueue context. */
- queue_work(remove_work_wq, &user->remove_work);
-}
-
+/* Must be called with intf->users_mutex held. */
static void _ipmi_destroy_user(struct ipmi_user *user)
{
struct ipmi_smi *intf = user->intf;
@@ -1330,21 +1328,10 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- struct module *owner;
+ struct ipmi_recv_msg *msg, *msg2;
- if (!acquire_ipmi_user(user, &i)) {
- /*
- * The user has already been cleaned up, just make sure
- * nothing is using it and return.
- */
- synchronize_srcu(&user->release_barrier);
+ if (!refcount_dec_if_one(&user->destroyed))
return;
- }
-
- rcu_assign_pointer(user->self, NULL);
- release_ipmi_user(user, i);
-
- synchronize_srcu(&user->release_barrier);
if (user->handler->shutdown)
user->handler->shutdown(user->handler_data);
@@ -1355,11 +1342,11 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
if (user->gets_events)
atomic_dec(&intf->event_waiters);
- /* Remove the user from the interface's sequence table. */
- spin_lock_irqsave(&intf->seq_lock, flags);
- list_del_rcu(&user->link);
+ /* Remove the user from the interface's list and sequence table. */
+ list_del(&user->link);
atomic_dec(&intf->nr_users);
+ spin_lock_irqsave(&intf->seq_lock, flags);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (intf->seq_table[i].inuse
&& (intf->seq_table[i].recv_msg->user == user)) {
@@ -1374,7 +1361,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
* Remove the user from the command receiver's table. First
* we build a list of everything (not using the standard link,
* since other things may be using it till we do
- * synchronize_srcu()) then free everything in that list.
+ * synchronize_rcu()) then free everything in that list.
*/
mutex_lock(&intf->cmd_rcvrs_mutex);
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
@@ -1386,23 +1373,33 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
}
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
- synchronize_rcu();
while (rcvrs) {
rcvr = rcvrs;
rcvrs = rcvr->next;
kfree(rcvr);
}
- owner = intf->owner;
- kref_put(&intf->refcount, intf_free);
- module_put(owner);
+ mutex_lock(&intf->user_msgs_mutex);
+ list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
+ if (msg->user != user)
+ continue;
+ list_del(&msg->link);
+ ipmi_free_recv_msg(msg);
+ }
+ mutex_unlock(&intf->user_msgs_mutex);
+
+ release_ipmi_user(user);
}
void ipmi_destroy_user(struct ipmi_user *user)
{
+ struct ipmi_smi *intf = user->intf;
+
+ mutex_lock(&intf->users_mutex);
_ipmi_destroy_user(user);
+ mutex_unlock(&intf->users_mutex);
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
}
EXPORT_SYMBOL(ipmi_destroy_user);
@@ -1411,9 +1408,9 @@ int ipmi_get_version(struct ipmi_user *user,
unsigned char *minor)
{
struct ipmi_device_id id;
- int rv, index;
+ int rv;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1422,7 +1419,7 @@ int ipmi_get_version(struct ipmi_user *user,
*major = ipmi_version_major(&id);
*minor = ipmi_version_minor(&id);
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1432,9 +1429,9 @@ int ipmi_set_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1444,7 +1441,7 @@ int ipmi_set_my_address(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].address = address;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1454,9 +1451,9 @@ int ipmi_get_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1466,7 +1463,7 @@ int ipmi_get_my_address(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].address;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1476,9 +1473,9 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char LUN)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1488,7 +1485,7 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].lun = LUN & 0x3;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1498,9 +1495,9 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1510,7 +1507,7 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].lun;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1518,17 +1515,17 @@ EXPORT_SYMBOL(ipmi_get_my_LUN);
int ipmi_get_maintenance_mode(struct ipmi_user *user)
{
- int mode, index;
+ int mode;
unsigned long flags;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
mode = user->intf->maintenance_mode;
spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return mode;
}
@@ -1543,11 +1540,11 @@ static void maintenance_mode_update(struct ipmi_smi *intf)
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
{
- int rv = 0, index;
+ int rv = 0;
unsigned long flags;
struct ipmi_smi *intf = user->intf;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1577,7 +1574,7 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
}
out_unlock:
spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1585,19 +1582,17 @@ EXPORT_SYMBOL(ipmi_set_maintenance_mode);
int ipmi_set_gets_events(struct ipmi_user *user, bool val)
{
- unsigned long flags;
struct ipmi_smi *intf = user->intf;
struct ipmi_recv_msg *msg, *msg2;
struct list_head msgs;
- int index;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
INIT_LIST_HEAD(&msgs);
- spin_lock_irqsave(&intf->events_lock, flags);
+ mutex_lock(&intf->events_mutex);
if (user->gets_events == val)
goto out;
@@ -1610,13 +1605,6 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
atomic_dec(&intf->event_waiters);
}
- if (intf->delivering_events)
- /*
- * Another thread is delivering events for this, so
- * let it handle any new events.
- */
- goto out;
-
/* Deliver any queued events. */
while (user->gets_events && !list_empty(&intf->waiting_events)) {
list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
@@ -1627,22 +1615,16 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
intf->event_msg_printed = 0;
}
- intf->delivering_events = 1;
- spin_unlock_irqrestore(&intf->events_lock, flags);
-
list_for_each_entry_safe(msg, msg2, &msgs, link) {
msg->user = user;
kref_get(&user->refcount);
deliver_local_response(intf, msg);
}
-
- spin_lock_irqsave(&intf->events_lock, flags);
- intf->delivering_events = 0;
}
out:
- spin_unlock_irqrestore(&intf->events_lock, flags);
- release_ipmi_user(user, index);
+ mutex_unlock(&intf->events_mutex);
+ release_ipmi_user(user);
return 0;
}
@@ -1687,9 +1669,9 @@ int ipmi_register_for_cmd(struct ipmi_user *user,
{
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
- int rv = 0, index;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1719,7 +1701,7 @@ out_unlock:
if (rv)
kfree(rcvr);
out_release:
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1733,9 +1715,9 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- int i, rv = -ENOENT, index;
+ int i, rv = -ENOENT;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1758,7 +1740,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
while (rcvrs) {
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
rcvr = rcvrs;
@@ -1882,13 +1864,12 @@ static void smi_send(struct ipmi_smi *intf,
const struct ipmi_smi_handlers *handlers,
struct ipmi_smi_msg *smi_msg, int priority)
{
- int run_to_completion = intf->run_to_completion;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
unsigned long flags = 0;
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
smi_msg = smi_add_send_msg(intf, smi_msg, priority);
-
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
@@ -2304,6 +2285,7 @@ static int i_ipmi_request(struct ipmi_user *user,
{
struct ipmi_smi_msg *smi_msg;
struct ipmi_recv_msg *recv_msg;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
int rv = 0;
if (user) {
@@ -2337,7 +2319,8 @@ static int i_ipmi_request(struct ipmi_user *user,
}
}
- rcu_read_lock();
+ if (!run_to_completion)
+ mutex_lock(&intf->users_mutex);
if (intf->in_shutdown) {
rv = -ENODEV;
goto out_err;
@@ -2383,7 +2366,8 @@ out_err:
smi_send(intf, intf->handlers, smi_msg, priority);
}
- rcu_read_unlock();
+ if (!run_to_completion)
+ mutex_unlock(&intf->users_mutex);
out:
if (rv && user)
@@ -2414,12 +2398,12 @@ int ipmi_request_settime(struct ipmi_user *user,
unsigned int retry_time_ms)
{
unsigned char saddr = 0, lun = 0;
- int rv, index;
+ int rv;
if (!user)
return -EINVAL;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -2438,7 +2422,7 @@ int ipmi_request_settime(struct ipmi_user *user,
retries,
retry_time_ms);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
EXPORT_SYMBOL(ipmi_request_settime);
@@ -2453,12 +2437,12 @@ int ipmi_request_supply_msgs(struct ipmi_user *user,
int priority)
{
unsigned char saddr = 0, lun = 0;
- int rv, index;
+ int rv;
if (!user)
return -EINVAL;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -2477,7 +2461,7 @@ int ipmi_request_supply_msgs(struct ipmi_user *user,
lun,
-1, 0);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
EXPORT_SYMBOL(ipmi_request_supply_msgs);
@@ -3064,7 +3048,7 @@ cleanup_bmc_device(struct kref *ref)
* with removing the device attributes while reading a device
* attribute.
*/
- queue_work(remove_work_wq, &bmc->remove_work);
+ queue_work(bmc_remove_work_wq, &bmc->remove_work);
}
/*
@@ -3520,15 +3504,14 @@ static ssize_t nr_msgs_show(struct device *dev,
char *buf)
{
struct ipmi_smi *intf = container_of(attr,
- struct ipmi_smi, nr_msgs_devattr);
+ struct ipmi_smi, nr_msgs_devattr);
struct ipmi_user *user;
- int index;
unsigned int count = 0;
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link)
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link)
count += atomic_read(&user->nr_msgs);
- srcu_read_unlock(&intf->users_srcu, index);
+ mutex_unlock(&intf->users_mutex);
return sysfs_emit(buf, "%u\n", count);
}
@@ -3569,12 +3552,6 @@ int ipmi_add_smi(struct module *owner,
if (!intf)
return -ENOMEM;
- rv = init_srcu_struct(&intf->users_srcu);
- if (rv) {
- kfree(intf);
- return rv;
- }
-
intf->owner = owner;
intf->bmc = &intf->tmp_bmc;
INIT_LIST_HEAD(&intf->bmc->intfs);
@@ -3591,7 +3568,10 @@ int ipmi_add_smi(struct module *owner,
}
if (slave_addr != 0)
intf->addrinfo[0].address = slave_addr;
+ INIT_LIST_HEAD(&intf->user_msgs);
+ mutex_init(&intf->user_msgs_mutex);
INIT_LIST_HEAD(&intf->users);
+ mutex_init(&intf->users_mutex);
atomic_set(&intf->nr_users, 0);
intf->handlers = handlers;
intf->send_info = send_info;
@@ -3603,12 +3583,12 @@ int ipmi_add_smi(struct module *owner,
intf->curr_seq = 0;
spin_lock_init(&intf->waiting_rcv_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
- INIT_WORK(&intf->recv_work, smi_recv_work);
+ INIT_WORK(&intf->smi_work, smi_work);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->xmit_msgs_lock);
INIT_LIST_HEAD(&intf->xmit_msgs);
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
- spin_lock_init(&intf->events_lock);
+ mutex_init(&intf->events_mutex);
spin_lock_init(&intf->watch_lock);
atomic_set(&intf->event_waiters, 0);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
@@ -3621,12 +3601,16 @@ int ipmi_add_smi(struct module *owner,
for (i = 0; i < IPMI_NUM_STATS; i++)
atomic_set(&intf->stats[i], 0);
+ /*
+ * Grab the watchers mutex so we can deliver the new interface
+ * without races.
+ */
+ mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
/* Look for a hole in the numbers. */
i = 0;
link = &ipmi_interfaces;
- list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
- ipmi_interfaces_mutex_held()) {
+ list_for_each_entry(tintf, &ipmi_interfaces, link) {
if (tintf->intf_num != i) {
link = &tintf->link;
break;
@@ -3635,9 +3619,9 @@ int ipmi_add_smi(struct module *owner,
}
/* Add the new interface in numeric order. */
if (i == 0)
- list_add_rcu(&intf->link, &ipmi_interfaces);
+ list_add(&intf->link, &ipmi_interfaces);
else
- list_add_tail_rcu(&intf->link, link);
+ list_add_tail(&intf->link, link);
rv = handlers->start_processing(send_info, intf);
if (rv)
@@ -3669,18 +3653,14 @@ int ipmi_add_smi(struct module *owner,
goto out_err_bmc_reg;
}
- /*
- * Keep memory order straight for RCU readers. Make
- * sure everything else is committed to memory before
- * setting intf_num to mark the interface valid.
- */
- smp_wmb();
intf->intf_num = i;
mutex_unlock(&ipmi_interfaces_mutex);
/* After this point the interface is legal to use. */
call_smi_watchers(i, intf->si_dev);
+ mutex_unlock(&smi_watchers_mutex);
+
return 0;
out_err_bmc_reg:
@@ -3689,10 +3669,9 @@ int ipmi_add_smi(struct module *owner,
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
out_err:
- list_del_rcu(&intf->link);
+ list_del(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
- synchronize_srcu(&ipmi_interfaces_srcu);
- cleanup_srcu_struct(&intf->users_srcu);
+ mutex_unlock(&smi_watchers_mutex);
kref_put(&intf->refcount, intf_free);
return rv;
@@ -3758,19 +3737,28 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
void ipmi_unregister_smi(struct ipmi_smi *intf)
{
struct ipmi_smi_watcher *w;
- int intf_num, index;
+ int intf_num;
if (!intf)
return;
+
intf_num = intf->intf_num;
mutex_lock(&ipmi_interfaces_mutex);
+ cancel_work_sync(&intf->smi_work);
+ /* smi_work() can no longer be in progress after this. */
+
intf->intf_num = -1;
intf->in_shutdown = true;
- list_del_rcu(&intf->link);
+ list_del(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
- synchronize_srcu(&ipmi_interfaces_srcu);
- /* At this point no users can be added to the interface. */
+ /*
+ * At this point no users can be added to the interface and no
+ * new messages can be sent.
+ */
+
+ if (intf->handlers->shutdown)
+ intf->handlers->shutdown(intf->send_info);
device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
@@ -3784,24 +3772,19 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
w->smi_gone(intf_num);
mutex_unlock(&smi_watchers_mutex);
- index = srcu_read_lock(&intf->users_srcu);
+ mutex_lock(&intf->users_mutex);
while (!list_empty(&intf->users)) {
- struct ipmi_user *user =
- container_of(list_next_rcu(&intf->users),
- struct ipmi_user, link);
+ struct ipmi_user *user = list_first_entry(&intf->users,
+ struct ipmi_user, link);
_ipmi_destroy_user(user);
}
- srcu_read_unlock(&intf->users_srcu, index);
-
- if (intf->handlers->shutdown)
- intf->handlers->shutdown(intf->send_info);
+ mutex_unlock(&intf->users_mutex);
cleanup_smi_msgs(intf);
ipmi_bmc_unregister(intf);
- cleanup_srcu_struct(&intf->users_srcu);
kref_put(&intf->refcount, intf_free);
}
EXPORT_SYMBOL(ipmi_unregister_smi);
@@ -3926,17 +3909,12 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
msg->data_size, msg->data);
- rcu_read_lock();
- if (!intf->in_shutdown) {
- smi_send(intf, intf->handlers, msg, 0);
- /*
- * We used the message, so return the value
- * that causes it to not be freed or
- * queued.
- */
- rv = -1;
- }
- rcu_read_unlock();
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
+ */
+ rv = -1;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
@@ -3946,7 +3924,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
* later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/* Extract the source address from the data. */
ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
@@ -4017,17 +3995,12 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
msg->data_size = 5;
- rcu_read_lock();
- if (!intf->in_shutdown) {
- smi_send(intf, intf->handlers, msg, 0);
- /*
- * We used the message, so return the value
- * that causes it to not be freed or
- * queued.
- */
- rv = -1;
- }
- rcu_read_unlock();
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
+ */
+ rv = -1;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
@@ -4037,7 +4010,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
* later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/* Extract the source address from the data. */
daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
@@ -4206,14 +4179,33 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
rcu_read_unlock();
if (user == NULL) {
- /* We didn't find a user, just give up. */
+ /* We didn't find a user, just give up and return an error. */
ipmi_inc_stat(intf, unhandled_commands);
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_SEND_MSG_CMD;
+ msg->data[2] = chan;
+ msg->data[3] = msg->rsp[4]; /* handle */
+ msg->data[4] = msg->rsp[8]; /* rsSWID */
+ msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3);
+ msg->data[6] = ipmb_checksum(&msg->data[3], 3);
+ msg->data[7] = msg->rsp[5]; /* rqSWID */
+ /* rqseq/lun */
+ msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3);
+ msg->data[9] = cmd;
+ msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data[11] = ipmb_checksum(&msg->data[7], 4);
+ msg->data_size = 12;
+
+ dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
+ msg->data_size, msg->data);
+
+ smi_send(intf, intf->handlers, msg, 0);
/*
- * Don't do anything with these messages, just allow
- * them to be freed.
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
*/
- rv = 0;
+ rv = -1;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
@@ -4222,7 +4214,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
* message, so requeue it for handling later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/* Extract the source address from the data. */
lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
@@ -4331,7 +4323,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
* later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/*
* OEM Messages are expected to be delivered via
@@ -4393,8 +4385,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg, *recv_msg2;
struct list_head msgs;
struct ipmi_user *user;
- int rv = 0, deliver_count = 0, index;
- unsigned long flags;
+ int rv = 0, deliver_count = 0;
if (msg->rsp_size < 19) {
/* Message is too small to be an IPMB event. */
@@ -4409,7 +4400,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
INIT_LIST_HEAD(&msgs);
- spin_lock_irqsave(&intf->events_lock, flags);
+ mutex_lock(&intf->events_mutex);
ipmi_inc_stat(intf, events);
@@ -4417,18 +4408,20 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
* Allocate and fill in one message for every user that is
* getting events.
*/
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link) {
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link) {
if (!user->gets_events)
continue;
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
- rcu_read_unlock();
+ mutex_unlock(&intf->users_mutex);
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
link) {
+ user = recv_msg->user;
list_del(&recv_msg->link);
ipmi_free_recv_msg(recv_msg);
+ kref_put(&user->refcount, free_ipmi_user);
}
/*
* We couldn't allocate memory for the
@@ -4446,7 +4439,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
kref_get(&user->refcount);
list_add_tail(&recv_msg->link, &msgs);
}
- srcu_read_unlock(&intf->users_srcu, index);
+ mutex_unlock(&intf->users_mutex);
if (deliver_count) {
/* Now deliver all the messages. */
@@ -4484,7 +4477,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
}
out:
- spin_unlock_irqrestore(&intf->events_lock, flags);
+ mutex_unlock(&intf->events_mutex);
return rv;
}
@@ -4570,7 +4563,7 @@ return_unspecified:
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
&& (msg->user_data == NULL)) {
- if (intf->in_shutdown)
+ if (intf->in_shutdown || intf->run_to_completion)
goto out;
/*
@@ -4642,6 +4635,9 @@ return_unspecified:
*/
struct ipmi_recv_msg *recv_msg;
+ if (intf->run_to_completion)
+ goto out;
+
chan = msg->data[2] & 0x0f;
if (chan >= IPMI_MAX_CHANNELS)
/* Invalid channel number */
@@ -4664,6 +4660,9 @@ process_response_response:
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
struct ipmi_channel *chans;
+ if (intf->run_to_completion)
+ goto out;
+
/* It's from the receive queue. */
chan = msg->rsp[3] & 0xf;
if (chan >= IPMI_MAX_CHANNELS) {
@@ -4738,6 +4737,9 @@ process_response_response:
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
/* It's an asynchronous event. */
+ if (intf->run_to_completion)
+ goto out;
+
requeue = handle_read_event_rsp(intf, msg);
} else {
/* It's a response from the local BMC. */
@@ -4753,10 +4755,10 @@ process_response_response:
*/
static void handle_new_recv_msgs(struct ipmi_smi *intf)
{
- struct ipmi_smi_msg *smi_msg;
- unsigned long flags = 0;
- int rv;
- int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi_msg *smi_msg;
+ unsigned long flags = 0;
+ int rv;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
/* See if any waiting messages need to be processed. */
if (!run_to_completion)
@@ -4790,31 +4792,15 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
}
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
-
- /*
- * If the pretimout count is non-zero, decrement one from it and
- * deliver pretimeouts to all the users.
- */
- if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
- struct ipmi_user *user;
- int index;
-
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link) {
- if (user->handler->ipmi_watchdog_pretimeout)
- user->handler->ipmi_watchdog_pretimeout(
- user->handler_data);
- }
- srcu_read_unlock(&intf->users_srcu, index);
- }
}
-static void smi_recv_work(struct work_struct *t)
+static void smi_work(struct work_struct *t)
{
unsigned long flags = 0; /* keep us warning-free. */
- struct ipmi_smi *intf = from_work(intf, t, recv_work);
- int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi *intf = from_work(intf, t, smi_work);
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
struct ipmi_smi_msg *newmsg = NULL;
+ struct ipmi_recv_msg *msg, *msg2;
/*
* Start the next message if available.
@@ -4824,8 +4810,6 @@ static void smi_recv_work(struct work_struct *t)
* message delivery.
*/
- rcu_read_lock();
-
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -4843,15 +4827,57 @@ static void smi_recv_work(struct work_struct *t)
intf->curr_msg = newmsg;
}
}
-
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+
if (newmsg)
intf->handlers->sender(intf->send_info, newmsg);
- rcu_read_unlock();
-
handle_new_recv_msgs(intf);
+
+ /* Nothing below applies during panic time. */
+ if (run_to_completion)
+ return;
+
+ /*
+ * If the pretimout count is non-zero, decrement one from it and
+ * deliver pretimeouts to all the users.
+ */
+ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+ struct ipmi_user *user;
+
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link) {
+ if (user->handler->ipmi_watchdog_pretimeout)
+ user->handler->ipmi_watchdog_pretimeout(
+ user->handler_data);
+ }
+ mutex_unlock(&intf->users_mutex);
+ }
+
+ /*
+ * Freeing the message can cause a user to be released, which
+ * can then cause the interface to be freed. Make sure that
+ * doesn't happen until we are ready.
+ */
+ kref_get(&intf->refcount);
+
+ mutex_lock(&intf->user_msgs_mutex);
+ list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
+ struct ipmi_user *user = msg->user;
+
+ list_del(&msg->link);
+
+ if (refcount_read(&user->destroyed) == 0) {
+ ipmi_free_recv_msg(msg);
+ } else {
+ atomic_dec(&user->nr_msgs);
+ user->handler->ipmi_recv_hndl(msg, user->handler_data);
+ }
+ }
+ mutex_unlock(&intf->user_msgs_mutex);
+
+ kref_put(&intf->refcount, intf_free);
}
/* Handle a new message from the lower layer. */
@@ -4859,7 +4885,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
unsigned long flags = 0; /* keep us warning-free. */
- int run_to_completion = intf->run_to_completion;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
/*
* To preserve message order, we keep a queue and deliver from
@@ -4884,9 +4910,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (run_to_completion)
- smi_recv_work(&intf->recv_work);
+ smi_work(&intf->smi_work);
else
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_msg_received);
@@ -4896,7 +4922,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
return;
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
@@ -5065,7 +5091,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
flags);
}
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
return need_timer;
}
@@ -5084,17 +5110,19 @@ static struct timer_list ipmi_timer;
static atomic_t stop_operation;
-static void ipmi_timeout(struct timer_list *unused)
+static void ipmi_timeout_work(struct work_struct *work)
{
+ if (atomic_read(&stop_operation))
+ return;
+
struct ipmi_smi *intf;
bool need_timer = false;
- int index;
if (atomic_read(&stop_operation))
return;
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (atomic_read(&intf->event_waiters)) {
intf->ticks_to_req_ev--;
if (intf->ticks_to_req_ev == 0) {
@@ -5106,12 +5134,22 @@ static void ipmi_timeout(struct timer_list *unused)
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ mutex_unlock(&ipmi_interfaces_mutex);
if (need_timer)
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
+static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work);
+
+static void ipmi_timeout(struct timer_list *unused)
+{
+ if (atomic_read(&stop_operation))
+ return;
+
+ queue_work(system_wq, &ipmi_timer_work);
+}
+
static void need_waiter(struct ipmi_smi *intf)
{
/* Racy, but worst case we start the timer twice. */
@@ -5168,7 +5206,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
if (msg->user && !oops_in_progress)
- kref_put(&msg->user->refcount, free_user);
+ kref_put(&msg->user->refcount, free_ipmi_user);
msg->done(msg);
}
EXPORT_SYMBOL(ipmi_free_recv_msg);
@@ -5188,9 +5226,9 @@ static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
/*
* Inside a panic, send a message and wait for a response.
*/
-static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
- struct ipmi_addr *addr,
- struct kernel_ipmi_msg *msg)
+static void _ipmi_panic_request_and_wait(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
{
struct ipmi_smi_msg smi_msg;
struct ipmi_recv_msg recv_msg;
@@ -5220,6 +5258,15 @@ static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
ipmi_poll(intf);
}
+void ipmi_panic_request_and_wait(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
+{
+ user->intf->run_to_completion = 1;
+ _ipmi_panic_request_and_wait(user->intf, addr, msg);
+}
+EXPORT_SYMBOL(ipmi_panic_request_and_wait);
+
static void event_receiver_fetcher(struct ipmi_smi *intf,
struct ipmi_recv_msg *msg)
{
@@ -5288,7 +5335,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
}
/* Send the event announcing the panic. */
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
/*
* On every interface, dump a bunch of OEM event holding the
@@ -5324,7 +5371,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = device_id_fetcher;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
if (intf->local_event_generator) {
/* Request the event receiver from the local MC. */
@@ -5333,7 +5380,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = event_receiver_fetcher;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
}
intf->null_user_handler = NULL;
@@ -5385,7 +5432,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
memcpy_and_pad(data+5, 11, p, size, '\0');
p += size;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
@@ -5403,7 +5450,7 @@ static int panic_event(struct notifier_block *this,
has_panicked = 1;
/* For every registered interface, set it to run to completion. */
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (!intf->handlers || intf->intf_num == -1)
/* Interface is not ready. */
continue;
@@ -5433,7 +5480,7 @@ static int panic_event(struct notifier_block *this,
intf->handlers->set_run_to_completion(intf->send_info,
1);
- list_for_each_entry_rcu(user, &intf->users, link) {
+ list_for_each_entry(user, &intf->users, link) {
if (user->handler->ipmi_panic_handler)
user->handler->ipmi_panic_handler(
user->handler_data);
@@ -5478,15 +5525,11 @@ static int ipmi_init_msghandler(void)
if (initialized)
goto out;
- rv = init_srcu_struct(&ipmi_interfaces_srcu);
- if (rv)
- goto out;
-
- remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
- if (!remove_work_wq) {
+ bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+ if (!bmc_remove_work_wq) {
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
rv = -ENOMEM;
- goto out_wq;
+ goto out;
}
timer_setup(&ipmi_timer, ipmi_timeout, 0);
@@ -5496,9 +5539,6 @@ static int ipmi_init_msghandler(void)
initialized = true;
-out_wq:
- if (rv)
- cleanup_srcu_struct(&ipmi_interfaces_srcu);
out:
mutex_unlock(&ipmi_interfaces_mutex);
return rv;
@@ -5522,7 +5562,7 @@ static void __exit cleanup_ipmi(void)
int count;
if (initialized) {
- destroy_workqueue(remove_work_wq);
+ destroy_workqueue(bmc_remove_work_wq);
atomic_notifier_chain_unregister(&panic_notifier_list,
&panic_block);
@@ -5539,6 +5579,7 @@ static void __exit cleanup_ipmi(void)
*/
atomic_set(&stop_operation, 1);
timer_delete_sync(&ipmi_timer);
+ cancel_work_sync(&ipmi_timer_work);
initialized = false;
@@ -5549,8 +5590,6 @@ static void __exit cleanup_ipmi(void)
count = atomic_read(&recv_msg_inuse_count);
if (count != 0)
pr_warn("recv message count %d at exit\n", count);
-
- cleanup_srcu_struct(&ipmi_interfaces_srcu);
}
if (drvregistered)
driver_unregister(&ipmidriver.driver);
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
index a7ead2a4c753..508c3fd45877 100644
--- a/drivers/char/ipmi/ipmi_si.h
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -26,6 +26,14 @@ enum si_type {
/* Array is defined in the ipmi_si_intf.c */
extern const char *const si_to_str[];
+struct ipmi_match_info {
+ enum si_type type;
+};
+
+extern const struct ipmi_match_info ipmi_kcs_si_info;
+extern const struct ipmi_match_info ipmi_smic_si_info;
+extern const struct ipmi_match_info ipmi_bt_si_info;
+
enum ipmi_addr_space {
IPMI_IO_ADDR_SPACE, IPMI_MEM_ADDR_SPACE
};
@@ -64,7 +72,7 @@ struct si_sm_io {
void (*irq_cleanup)(struct si_sm_io *io);
u8 slave_addr;
- enum si_type si_type;
+ const struct ipmi_match_info *si_info;
struct device *dev;
};
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 12b0b77eb1cc..7fe891783a37 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -73,6 +73,10 @@ enum si_intf_state {
/* 'invalid' to allow a firmware-specified interface to be disabled */
const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL };
+const struct ipmi_match_info ipmi_kcs_si_info = { .type = SI_KCS };
+const struct ipmi_match_info ipmi_smic_si_info = { .type = SI_SMIC };
+const struct ipmi_match_info ipmi_bt_si_info = { .type = SI_BT };
+
static bool initialized;
/*
@@ -692,7 +696,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
break;
}
enables = current_global_enables(smi_info, 0, &irq_on);
- if (smi_info->io.si_type == SI_BT)
+ if (smi_info->io.si_info->type == SI_BT)
/* BT has its own interrupt enable bit. */
check_bt_irq(smi_info, irq_on);
if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
@@ -1119,7 +1123,7 @@ irqreturn_t ipmi_si_irq_handler(int irq, void *data)
struct smi_info *smi_info = data;
unsigned long flags;
- if (smi_info->io.si_type == SI_BT)
+ if (smi_info->io.si_info->type == SI_BT)
/* We need to clear the IRQ flag for the BT interface. */
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_CLEAR_IRQ_BIT
@@ -1164,7 +1168,7 @@ static int smi_start_processing(void *send_info,
* The BT interface is efficient enough to not need a thread,
* and there is no need for a thread if we have interrupts.
*/
- else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
+ else if (new_smi->io.si_info->type != SI_BT && !new_smi->io.irq)
enable = 1;
if (enable) {
@@ -1235,7 +1239,7 @@ MODULE_PARM_DESC(kipmid_max_busy_us,
void ipmi_irq_finish_setup(struct si_sm_io *io)
{
- if (io->si_type == SI_BT)
+ if (io->si_info->type == SI_BT)
/* Enable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
@@ -1243,7 +1247,7 @@ void ipmi_irq_finish_setup(struct si_sm_io *io)
void ipmi_irq_start_cleanup(struct si_sm_io *io)
{
- if (io->si_type == SI_BT)
+ if (io->si_info->type == SI_BT)
/* Disable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG, 0);
}
@@ -1614,7 +1618,7 @@ static ssize_t type_show(struct device *dev,
{
struct smi_info *smi_info = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]);
+ return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_info->type]);
}
static DEVICE_ATTR_RO(type);
@@ -1649,7 +1653,7 @@ static ssize_t params_show(struct device *dev,
return sysfs_emit(buf,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
- si_to_str[smi_info->io.si_type],
+ si_to_str[smi_info->io.si_info->type],
addr_space_to_str[smi_info->io.addr_space],
smi_info->io.addr_data,
smi_info->io.regspacing,
@@ -1803,7 +1807,7 @@ setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
- smi_info->io.si_type == SI_BT)
+ smi_info->io.si_info->type == SI_BT)
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}
@@ -1907,13 +1911,13 @@ int ipmi_si_add_smi(struct si_sm_io *io)
/* We prefer ACPI over SMBIOS. */
dev_info(dup->io.dev,
"Removing SMBIOS-specified %s state machine in favor of ACPI\n",
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
cleanup_one_si(dup);
} else {
dev_info(new_smi->io.dev,
"%s-specified %s state machine: duplicate\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
rv = -EBUSY;
kfree(new_smi);
goto out_err;
@@ -1922,7 +1926,7 @@ int ipmi_si_add_smi(struct si_sm_io *io)
pr_info("Adding %s-specified %s state machine\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
list_add_tail(&new_smi->link, &smi_infos);
@@ -1945,12 +1949,12 @@ static int try_smi_init(struct smi_info *new_smi)
pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type],
+ si_to_str[new_smi->io.si_info->type],
addr_space_to_str[new_smi->io.addr_space],
new_smi->io.addr_data,
new_smi->io.slave_addr, new_smi->io.irq);
- switch (new_smi->io.si_type) {
+ switch (new_smi->io.si_info->type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
break;
@@ -2073,7 +2077,7 @@ static int try_smi_init(struct smi_info *new_smi)
smi_num++;
dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
WARN_ON(new_smi->io.dev->init_name != NULL);
@@ -2091,9 +2095,18 @@ static int try_smi_init(struct smi_info *new_smi)
return rv;
}
+/*
+ * Devices in the same address space at the same address are the same.
+ */
+static bool __init ipmi_smi_info_same(struct smi_info *e1, struct smi_info *e2)
+{
+ return (e1->io.addr_space == e2->io.addr_space &&
+ e1->io.addr_data == e2->io.addr_data);
+}
+
static int __init init_ipmi_si(void)
{
- struct smi_info *e;
+ struct smi_info *e, *e2;
enum ipmi_addr_src type = SI_INVALID;
if (initialized)
@@ -2109,37 +2122,70 @@ static int __init init_ipmi_si(void)
ipmi_si_parisc_init();
- /* We prefer devices with interrupts, but in the case of a machine
- with multiple BMCs we assume that there will be several instances
- of a given type so if we succeed in registering a type then also
- try to register everything else of the same type */
mutex_lock(&smi_infos_lock);
+
+ /*
+ * Scan through all the devices. We prefer devices with
+ * interrupts, so go through those first in case there are any
+ * duplicates that don't have the interrupt set.
+ */
list_for_each_entry(e, &smi_infos, link) {
- /* Try to register a device if it has an IRQ and we either
- haven't successfully registered a device yet or this
- device has the same type as one we successfully registered */
- if (e->io.irq && (!type || e->io.addr_source == type)) {
- if (!try_smi_init(e)) {
- type = e->io.addr_source;
+ bool dup = false;
+
+ /* Register ones with interrupts first. */
+ if (!e->io.irq)
+ continue;
+
+ /*
+ * Go through the ones we have already seen to see if this
+ * is a dup.
+ */
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (e2 == e)
+ break;
+ if (e2->io.irq && ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
}
}
+ if (!dup)
+ try_smi_init(e);
}
- /* type will only have been set if we successfully registered an si */
- if (type)
- goto skip_fallback_noirq;
+ /*
+ * Now try devices without interrupts.
+ */
+ list_for_each_entry(e, &smi_infos, link) {
+ bool dup = false;
- /* Fall back to the preferred device */
+ if (e->io.irq)
+ continue;
- list_for_each_entry(e, &smi_infos, link) {
- if (!e->io.irq && (!type || e->io.addr_source == type)) {
- if (!try_smi_init(e)) {
- type = e->io.addr_source;
+ /*
+ * Go through the ones we have already seen to see if
+ * this is a dup. We have already looked at the ones
+ * with interrupts.
+ */
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (!e2->io.irq)
+ continue;
+ if (ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
+ }
+ }
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (e2 == e)
+ break;
+ if (ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
}
}
+ if (!dup)
+ try_smi_init(e);
}
-skip_fallback_noirq:
initialized = true;
mutex_unlock(&smi_infos_lock);
@@ -2267,7 +2313,7 @@ struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
if (e->io.addr_space != addr_space)
continue;
- if (e->io.si_type != si_type)
+ if (e->io.si_info->type != si_type)
continue;
if (e->io.addr_data == addr) {
dev = get_device(e->io.dev);
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
index 2be2967f6b5f..3b0a70d9adbb 100644
--- a/drivers/char/ipmi/ipmi_si_parisc.c
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -13,7 +13,7 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
memset(&io, 0, sizeof(io));
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
io.addr_source = SI_DEVICETREE;
io.addr_space = IPMI_MEM_ADDR_SPACE;
io.addr_data = dev->hpa.start;
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index 8c0ea637aba0..17f72763322d 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -23,30 +23,32 @@ MODULE_PARM_DESC(trypci,
static int ipmi_pci_probe_regspacing(struct si_sm_io *io)
{
- if (io->si_type == SI_KCS) {
- unsigned char status;
- int regspacing;
-
- io->regsize = DEFAULT_REGSIZE;
- io->regshift = 0;
-
- /* detect 1, 4, 16byte spacing */
- for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
- io->regspacing = regspacing;
- if (io->io_setup(io)) {
- dev_err(io->dev, "Could not setup I/O space\n");
- return DEFAULT_REGSPACING;
- }
- /* write invalid cmd */
- io->outputb(io, 1, 0x10);
- /* read status back */
- status = io->inputb(io, 1);
- io->io_cleanup(io);
- if (status)
- return regspacing;
- regspacing *= 4;
+ unsigned char status;
+ int regspacing;
+
+ if (io->si_info->type != SI_KCS)
+ return DEFAULT_REGSPACING;
+
+ io->regsize = DEFAULT_REGSIZE;
+ io->regshift = 0;
+
+ /* detect 1, 4, 16byte spacing */
+ for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
+ io->regspacing = regspacing;
+ if (io->io_setup(io)) {
+ dev_err(io->dev, "Could not setup I/O space\n");
+ return DEFAULT_REGSPACING;
}
+ /* write invalid cmd */
+ io->outputb(io, 1, 0x10);
+ /* read status back */
+ status = io->inputb(io, 1);
+ io->io_cleanup(io);
+ if (status)
+ return regspacing;
+ regspacing *= 4;
}
+
return DEFAULT_REGSPACING;
}
@@ -74,15 +76,15 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
switch (pdev->class) {
case PCI_CLASS_SERIAL_IPMI_SMIC:
- io.si_type = SI_SMIC;
+ io.si_info = &ipmi_smic_si_info;
break;
case PCI_CLASS_SERIAL_IPMI_KCS:
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
break;
case PCI_CLASS_SERIAL_IPMI_BT:
- io.si_type = SI_BT;
+ io.si_info = &ipmi_bt_si_info;
break;
default:
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 550cabd43ae6..fb6e359ae494 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -163,9 +163,13 @@ static int platform_ipmi_probe(struct platform_device *pdev)
switch (type) {
case SI_KCS:
+ io.si_info = &ipmi_kcs_si_info;
+ break;
case SI_SMIC:
+ io.si_info = &ipmi_smic_si_info;
+ break;
case SI_BT:
- io.si_type = type;
+ io.si_info = &ipmi_bt_si_info;
break;
case SI_TYPE_INVALID: /* User disabled this in hardcode. */
return -ENODEV;
@@ -213,13 +217,10 @@ static int platform_ipmi_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id of_ipmi_match[] = {
- { .type = "ipmi", .compatible = "ipmi-kcs",
- .data = (void *)(unsigned long) SI_KCS },
- { .type = "ipmi", .compatible = "ipmi-smic",
- .data = (void *)(unsigned long) SI_SMIC },
- { .type = "ipmi", .compatible = "ipmi-bt",
- .data = (void *)(unsigned long) SI_BT },
- {},
+ { .type = "ipmi", .compatible = "ipmi-kcs", .data = &ipmi_kcs_si_info },
+ { .type = "ipmi", .compatible = "ipmi-smic", .data = &ipmi_smic_si_info },
+ { .type = "ipmi", .compatible = "ipmi-bt", .data = &ipmi_bt_si_info },
+ {}
};
MODULE_DEVICE_TABLE(of, of_ipmi_match);
@@ -265,7 +266,7 @@ static int of_ipmi_probe(struct platform_device *pdev)
}
memset(&io, 0, sizeof(io));
- io.si_type = (enum si_type)device_get_match_data(&pdev->dev);
+ io.si_info = device_get_match_data(&pdev->dev);
io.addr_source = SI_DEVICETREE;
io.irq_setup = ipmi_std_irq_setup;
@@ -296,7 +297,7 @@ static int find_slave_address(struct si_sm_io *io, int slave_addr)
{
#ifdef CONFIG_IPMI_DMI_DECODE
if (!slave_addr)
- slave_addr = ipmi_dmi_get_slave_addr(io->si_type,
+ slave_addr = ipmi_dmi_get_slave_addr(io->si_info->type,
io->addr_space,
io->addr_data);
#endif
@@ -335,13 +336,13 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
switch (tmp) {
case 1:
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
break;
case 2:
- io.si_type = SI_SMIC;
+ io.si_info = &ipmi_smic_si_info;
break;
case 3:
- io.si_type = SI_BT;
+ io.si_info = &ipmi_bt_si_info;
break;
case 4: /* SSIF, just ignore */
return -ENODEV;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 0b45b07dec22..5bf038e620c7 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -481,8 +481,6 @@ static int ipmi_ssif_thread(void *data)
/* Wait for something to do */
result = wait_for_completion_interruptible(
&ssif_info->wake_thread);
- if (ssif_info->stopping)
- break;
if (result == -ERESTARTSYS)
continue;
init_completion(&ssif_info->wake_thread);
@@ -1270,10 +1268,8 @@ static void shutdown_ssif(void *send_info)
ssif_info->stopping = true;
timer_delete_sync(&ssif_info->watch_timer);
timer_delete_sync(&ssif_info->retry_timer);
- if (ssif_info->thread) {
- complete(&ssif_info->wake_thread);
+ if (ssif_info->thread)
kthread_stop(ssif_info->thread);
- }
}
static void ssif_remove(struct i2c_client *client)
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index f1875b2bebbc..ab759b492fdd 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -150,7 +150,7 @@ static char preaction[16] = "pre_none";
static unsigned char preop_val = WDOG_PREOP_NONE;
static char preop[16] = "preop_none";
-static DEFINE_SPINLOCK(ipmi_read_lock);
+static DEFINE_MUTEX(ipmi_read_mutex);
static char data_to_read;
static DECLARE_WAIT_QUEUE_HEAD(read_q);
static struct fasync_struct *fasync_q;
@@ -363,7 +363,7 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
{
struct kernel_ipmi_msg msg;
unsigned char data[6];
- int rv;
+ int rv = 0;
struct ipmi_system_interface_addr addr;
int hbnow = 0;
@@ -405,14 +405,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
msg.cmd = IPMI_WDOG_SET_TIMER;
msg.data = data;
msg.data_len = sizeof(data);
- rv = ipmi_request_supply_msgs(watchdog_user,
- (struct ipmi_addr *) &addr,
- 0,
- &msg,
- NULL,
- smi_msg,
- recv_msg,
- 1);
+ if (smi_msg)
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ smi_msg,
+ recv_msg,
+ 1);
+ else
+ ipmi_panic_request_and_wait(watchdog_user,
+ (struct ipmi_addr *) &addr, &msg);
if (rv)
pr_warn("set timeout error: %d\n", rv);
else if (send_heartbeat_now)
@@ -431,9 +435,7 @@ static int _ipmi_set_timeout(int do_heartbeat)
atomic_set(&msg_tofree, 2);
- rv = __ipmi_set_timeout(&smi_msg,
- &recv_msg,
- &send_heartbeat_now);
+ rv = __ipmi_set_timeout(&smi_msg, &recv_msg, &send_heartbeat_now);
if (rv) {
atomic_set(&msg_tofree, 0);
return rv;
@@ -460,27 +462,10 @@ static int ipmi_set_timeout(int do_heartbeat)
return rv;
}
-static atomic_t panic_done_count = ATOMIC_INIT(0);
-
-static void panic_smi_free(struct ipmi_smi_msg *msg)
-{
- atomic_dec(&panic_done_count);
-}
-static void panic_recv_free(struct ipmi_recv_msg *msg)
-{
- atomic_dec(&panic_done_count);
-}
-
-static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
- INIT_IPMI_SMI_MSG(panic_smi_free);
-static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
- INIT_IPMI_RECV_MSG(panic_recv_free);
-
static void panic_halt_ipmi_heartbeat(void)
{
struct kernel_ipmi_msg msg;
struct ipmi_system_interface_addr addr;
- int rv;
/*
* Don't reset the timer if we have the timer turned off, that
@@ -497,24 +482,10 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
- atomic_add(2, &panic_done_count);
- rv = ipmi_request_supply_msgs(watchdog_user,
- (struct ipmi_addr *) &addr,
- 0,
- &msg,
- NULL,
- &panic_halt_heartbeat_smi_msg,
- &panic_halt_heartbeat_recv_msg,
- 1);
- if (rv)
- atomic_sub(2, &panic_done_count);
+ ipmi_panic_request_and_wait(watchdog_user, (struct ipmi_addr *) &addr,
+ &msg);
}
-static struct ipmi_smi_msg panic_halt_smi_msg =
- INIT_IPMI_SMI_MSG(panic_smi_free);
-static struct ipmi_recv_msg panic_halt_recv_msg =
- INIT_IPMI_RECV_MSG(panic_recv_free);
-
/*
* Special call, doesn't claim any locks. This is only to be called
* at panic or halt time, in run-to-completion mode, when the caller
@@ -526,22 +497,13 @@ static void panic_halt_ipmi_set_timeout(void)
int send_heartbeat_now;
int rv;
- /* Wait for the messages to be free. */
- while (atomic_read(&panic_done_count) != 0)
- ipmi_poll_interface(watchdog_user);
- atomic_add(2, &panic_done_count);
- rv = __ipmi_set_timeout(&panic_halt_smi_msg,
- &panic_halt_recv_msg,
- &send_heartbeat_now);
+ rv = __ipmi_set_timeout(NULL, NULL, &send_heartbeat_now);
if (rv) {
- atomic_sub(2, &panic_done_count);
pr_warn("Unable to extend the watchdog timeout\n");
} else {
if (send_heartbeat_now)
panic_halt_ipmi_heartbeat();
}
- while (atomic_read(&panic_done_count) != 0)
- ipmi_poll_interface(watchdog_user);
}
static int __ipmi_heartbeat(void)
@@ -793,7 +755,7 @@ static ssize_t ipmi_read(struct file *file,
* Reading returns if the pretimeout has gone off, and it only does
* it once per pretimeout.
*/
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
if (!data_to_read) {
if (file->f_flags & O_NONBLOCK) {
rv = -EAGAIN;
@@ -804,9 +766,9 @@ static ssize_t ipmi_read(struct file *file,
add_wait_queue(&read_q, &wait);
while (!data_to_read && !signal_pending(current)) {
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
schedule();
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
}
remove_wait_queue(&read_q, &wait);
@@ -818,7 +780,7 @@ static ssize_t ipmi_read(struct file *file,
data_to_read = 0;
out:
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
if (rv == 0) {
if (copy_to_user(buf, &data_to_read, 1))
@@ -856,10 +818,10 @@ static __poll_t ipmi_poll(struct file *file, poll_table *wait)
poll_wait(file, &read_q, wait);
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
if (data_to_read)
mask |= (EPOLLIN | EPOLLRDNORM);
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
return mask;
}
@@ -932,13 +894,11 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data)
if (atomic_inc_and_test(&preop_panic_excl))
panic("Watchdog pre-timeout");
} else if (preop_val == WDOG_PREOP_GIVE_DATA) {
- unsigned long flags;
-
- spin_lock_irqsave(&ipmi_read_lock, flags);
+ mutex_lock(&ipmi_read_mutex);
data_to_read = 1;
wake_up_interruptible(&read_q);
kill_fasync(&fasync_q, SIGIO, POLL_IN);
- spin_unlock_irqrestore(&ipmi_read_lock, flags);
+ mutex_unlock(&ipmi_read_mutex);
}
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 169eed162a7f..48839958b0b1 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -61,29 +61,11 @@ static inline int page_is_allowed(unsigned long pfn)
{
return devmem_is_allowed(pfn);
}
-static inline int range_is_allowed(unsigned long pfn, unsigned long size)
-{
- u64 from = ((u64)pfn) << PAGE_SHIFT;
- u64 to = from + size;
- u64 cursor = from;
-
- while (cursor < to) {
- if (!devmem_is_allowed(pfn))
- return 0;
- cursor += PAGE_SIZE;
- pfn++;
- }
- return 1;
-}
#else
static inline int page_is_allowed(unsigned long pfn)
{
return 1;
}
-static inline int range_is_allowed(unsigned long pfn, unsigned long size)
-{
- return 1;
-}
#endif
static inline bool should_stop_iteration(void)
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index f7dd455dd0dd..dda466f9181a 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -315,7 +315,7 @@ static int __init misc_init(void)
goto fail_remove;
err = -EIO;
- if (register_chrdev(MISC_MAJOR, "misc", &misc_fops))
+ if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops))
goto fail_printk;
return 0;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 38f2fab29c56..b8b24b6ed3fe 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -309,11 +309,11 @@ static void crng_reseed(struct work_struct *work)
* key value, at index 4, so the state should always be zeroed out
* immediately after using in order to maintain forward secrecy.
* If the state cannot be erased in a timely manner, then it is
- * safer to set the random_data parameter to &chacha_state[4] so
- * that this function overwrites it before returning.
+ * safer to set the random_data parameter to &chacha_state->x[4]
+ * so that this function overwrites it before returning.
*/
static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
- u32 chacha_state[CHACHA_STATE_WORDS],
+ struct chacha_state *chacha_state,
u8 *random_data, size_t random_data_len)
{
u8 first_block[CHACHA_BLOCK_SIZE];
@@ -321,8 +321,8 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
BUG_ON(random_data_len > 32);
chacha_init_consts(chacha_state);
- memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
- memset(&chacha_state[12], 0, sizeof(u32) * 4);
+ memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE);
+ memset(&chacha_state->x[12], 0, sizeof(u32) * 4);
chacha20_block(chacha_state, first_block);
memcpy(key, first_block, CHACHA_KEY_SIZE);
@@ -335,7 +335,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
* random data. It also returns up to 32 bytes on its own of random data
* that may be used; random_data_len may not be greater than 32.
*/
-static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
+static void crng_make_state(struct chacha_state *chacha_state,
u8 *random_data, size_t random_data_len)
{
unsigned long flags;
@@ -395,7 +395,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
static void _get_random_bytes(void *buf, size_t len)
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
u8 tmp[CHACHA_BLOCK_SIZE];
size_t first_block_len;
@@ -403,26 +403,26 @@ static void _get_random_bytes(void *buf, size_t len)
return;
first_block_len = min_t(size_t, 32, len);
- crng_make_state(chacha_state, buf, first_block_len);
+ crng_make_state(&chacha_state, buf, first_block_len);
len -= first_block_len;
buf += first_block_len;
while (len) {
if (len < CHACHA_BLOCK_SIZE) {
- chacha20_block(chacha_state, tmp);
+ chacha20_block(&chacha_state, tmp);
memcpy(buf, tmp, len);
memzero_explicit(tmp, sizeof(tmp));
break;
}
- chacha20_block(chacha_state, buf);
- if (unlikely(chacha_state[12] == 0))
- ++chacha_state[13];
+ chacha20_block(&chacha_state, buf);
+ if (unlikely(chacha_state.x[12] == 0))
+ ++chacha_state.x[13];
len -= CHACHA_BLOCK_SIZE;
buf += CHACHA_BLOCK_SIZE;
}
- memzero_explicit(chacha_state, sizeof(chacha_state));
+ chacha_zeroize_state(&chacha_state);
}
/*
@@ -441,7 +441,7 @@ EXPORT_SYMBOL(get_random_bytes);
static ssize_t get_random_bytes_user(struct iov_iter *iter)
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
u8 block[CHACHA_BLOCK_SIZE];
size_t ret = 0, copied;
@@ -453,21 +453,22 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
* bytes, in case userspace causes copy_to_iter() below to sleep
* forever, so that we still retain forward secrecy in that case.
*/
- crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+ crng_make_state(&chacha_state, (u8 *)&chacha_state.x[4],
+ CHACHA_KEY_SIZE);
/*
* However, if we're doing a read of len <= 32, we don't need to
* use chacha_state after, so we can simply return those bytes to
* the user directly.
*/
if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
- ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
+ ret = copy_to_iter(&chacha_state.x[4], CHACHA_KEY_SIZE, iter);
goto out_zero_chacha;
}
for (;;) {
- chacha20_block(chacha_state, block);
- if (unlikely(chacha_state[12] == 0))
- ++chacha_state[13];
+ chacha20_block(&chacha_state, block);
+ if (unlikely(chacha_state.x[12] == 0))
+ ++chacha_state.x[13];
copied = copy_to_iter(block, sizeof(block), iter);
ret += copied;
@@ -484,7 +485,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
memzero_explicit(block, sizeof(block));
out_zero_chacha:
- memzero_explicit(chacha_state, sizeof(chacha_state));
+ chacha_zeroize_state(&chacha_state);
return ret ? ret : -EFAULT;
}
@@ -726,6 +727,7 @@ static void __cold _credit_init_bits(size_t bits)
static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
+ int m;
if (!bits)
return;
@@ -748,9 +750,9 @@ static void __cold _credit_init_bits(size_t bits)
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
- if (urandom_warning.missed)
- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
- urandom_warning.missed);
+ m = ratelimit_state_get_miss(&urandom_warning);
+ if (m)
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n", m);
} else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
spin_lock_irqsave(&base_crng.lock, flags);
/* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
@@ -1311,9 +1313,9 @@ static void __cold try_to_generate_entropy(void)
while (!crng_ready() && !signal_pending(current)) {
/*
* Check !timer_pending() and then ensure that any previous callback has finished
- * executing by checking try_to_del_timer_sync(), before queueing the next one.
+ * executing by checking timer_delete_sync_try(), before queueing the next one.
*/
- if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) {
+ if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) {
struct cpumask timer_cpus;
unsigned int num_cpus;
@@ -1353,7 +1355,7 @@ static void __cold try_to_generate_entropy(void)
mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
timer_delete_sync(&stack->timer);
- destroy_timer_on_stack(&stack->timer);
+ timer_destroy_on_stack(&stack->timer);
}
@@ -1466,7 +1468,7 @@ static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
if (!crng_ready()) {
if (!ratelimit_disable && maxwarn <= 0)
- ++urandom_warning.missed;
+ ratelimit_state_inc_miss(&urandom_warning);
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
--maxwarn;
pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index fe4f3a609934..dddd702b2454 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -234,5 +234,15 @@ config TCG_FTPM_TEE
help
This driver proxies for firmware TPM running in TEE.
+config TCG_SVSM
+ tristate "SNP SVSM vTPM interface"
+ depends on AMD_MEM_ENCRYPT
+ help
+ This is a driver for the AMD SVSM vTPM protocol that a SEV-SNP guest
+ OS can use to discover and talk to a vTPM emulated by the Secure VM
+ Service Module (SVSM) in the guest context, but at a more privileged
+ level (usually VMPL0). To compile this driver as a module, choose M
+ here; the module will be called tpm_svsm.
+
source "drivers/char/tpm/st33zp24/Kconfig"
endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 2b004df8c04b..9de1b3ea34a9 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -45,3 +45,4 @@ obj-$(CONFIG_TCG_CRB) += tpm_crb.o
obj-$(CONFIG_TCG_ARM_CRB_FFA) += tpm_crb_ffa.o
obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
+obj-$(CONFIG_TCG_SVSM) += tpm_svsm.o
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
index 12ee42a31c71..e7913b2853d5 100644
--- a/drivers/char/tpm/eventlog/tpm1.c
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -257,11 +257,8 @@ static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v)
(unsigned char *)(v + sizeof(struct tcpa_event));
eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
- if (!eventname) {
- printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
- __func__);
- return -EFAULT;
- }
+ if (!eventname)
+ return -ENOMEM;
/* 1st: PCR */
seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index));
diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c
index e49a19fea3bd..dc882fc9fa9e 100644
--- a/drivers/char/tpm/tpm-buf.c
+++ b/drivers/char/tpm/tpm-buf.c
@@ -201,7 +201,7 @@ static void tpm_buf_read(struct tpm_buf *buf, off_t *offset, size_t count, void
*/
u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset)
{
- u8 value;
+ u8 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
@@ -218,7 +218,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u8);
*/
u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset)
{
- u16 value;
+ u16 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
@@ -235,7 +235,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u16);
*/
u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset)
{
- u32 value;
+ u32 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index 3f89635ba5e8..7b5049b3d476 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -40,11 +40,6 @@
*
* These are the usage functions:
*
- * tpm2_start_auth_session() which allocates the opaque auth structure
- * and gets a session from the TPM. This must be called before
- * any of the following functions. The session is protected by a
- * session_key which is derived from a random salt value
- * encrypted to the NULL seed.
* tpm2_end_auth_session() kills the session and frees the resources.
* Under normal operation this function is done by
* tpm_buf_check_hmac_response(), so this is only to be used on
@@ -963,16 +958,13 @@ err:
}
/**
- * tpm2_start_auth_session() - create a HMAC authentication session with the TPM
- * @chip: the TPM chip structure to create the session with
+ * tpm2_start_auth_session() - Create an a HMAC authentication session
+ * @chip: A TPM chip
*
- * This function loads the NULL seed from its saved context and starts
- * an authentication session on the null seed, fills in the
- * @chip->auth structure to contain all the session details necessary
- * for performing the HMAC, encrypt and decrypt operations and
- * returns. The NULL seed is flushed before this function returns.
+ * Loads the ephemeral key (null seed), and starts an HMAC authenticated
+ * session. The null seed is flushed before the return.
*
- * Return: zero on success or actual error encountered.
+ * Returns zero on success, or a POSIX error code.
*/
int tpm2_start_auth_session(struct tpm_chip *chip)
{
@@ -1024,7 +1016,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
/* hash algorithm for session */
tpm_buf_append_u16(&buf, TPM_ALG_SHA256);
- rc = tpm_transmit_cmd(chip, &buf, 0, "start auth session");
+ rc = tpm_ret_to_err(tpm_transmit_cmd(chip, &buf, 0, "StartAuthSession"));
tpm2_flush_context(chip, null_key);
if (rc == TPM2_RC_SUCCESS)
diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c
index 3169a87a56b6..4ead61f01299 100644
--- a/drivers/char/tpm/tpm_crb_ffa.c
+++ b/drivers/char/tpm/tpm_crb_ffa.c
@@ -38,9 +38,11 @@
* messages.
*
* All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP
- * are using the AArch32 SMC calling convention with register usage as
- * defined in FF-A specification:
- * w0: Function ID (0x8400006F or 0x84000070)
+ * are using the AArch32 or AArch64 SMC calling convention with register usage
+ * as defined in FF-A specification:
+ * w0: Function ID
+ * -for 32-bit: 0x8400006F or 0x84000070
+ * -for 64-bit: 0xC400006F or 0xC4000070
* w1: Source/Destination IDs
* w2: Reserved (MBZ)
* w3-w7: Implementation defined, free to be used below
@@ -68,7 +70,8 @@
#define CRB_FFA_GET_INTERFACE_VERSION 0x0f000001
/*
- * Return information on a given feature of the TPM service
+ * Notifies the TPM service that a TPM command or TPM locality request is
+ * ready to be processed, and allows the TPM service to process it.
* Call register usage:
* w3: Not used (MBZ)
* w4: TPM service function ID, CRB_FFA_START
@@ -105,7 +108,10 @@ struct tpm_crb_ffa {
u16 minor_version;
/* lock to protect sending of FF-A messages: */
struct mutex msg_data_lock;
- struct ffa_send_direct_data direct_msg_data;
+ union {
+ struct ffa_send_direct_data direct_msg_data;
+ struct ffa_send_direct_data2 direct_msg_data2;
+ };
};
static struct tpm_crb_ffa *tpm_crb_ffa;
@@ -185,18 +191,34 @@ static int __tpm_crb_ffa_send_recieve(unsigned long func_id,
msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops;
- memset(&tpm_crb_ffa->direct_msg_data, 0x00,
- sizeof(struct ffa_send_direct_data));
-
- tpm_crb_ffa->direct_msg_data.data1 = func_id;
- tpm_crb_ffa->direct_msg_data.data2 = a0;
- tpm_crb_ffa->direct_msg_data.data3 = a1;
- tpm_crb_ffa->direct_msg_data.data4 = a2;
+ if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
+ memset(&tpm_crb_ffa->direct_msg_data2, 0x00,
+ sizeof(struct ffa_send_direct_data2));
+
+ tpm_crb_ffa->direct_msg_data2.data[0] = func_id;
+ tpm_crb_ffa->direct_msg_data2.data[1] = a0;
+ tpm_crb_ffa->direct_msg_data2.data[2] = a1;
+ tpm_crb_ffa->direct_msg_data2.data[3] = a2;
+
+ ret = msg_ops->sync_send_receive2(tpm_crb_ffa->ffa_dev,
+ &tpm_crb_ffa->direct_msg_data2);
+ if (!ret)
+ ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data2.data[0]);
+ } else {
+ memset(&tpm_crb_ffa->direct_msg_data, 0x00,
+ sizeof(struct ffa_send_direct_data));
+
+ tpm_crb_ffa->direct_msg_data.data1 = func_id;
+ tpm_crb_ffa->direct_msg_data.data2 = a0;
+ tpm_crb_ffa->direct_msg_data.data3 = a1;
+ tpm_crb_ffa->direct_msg_data.data4 = a2;
+
+ ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev,
+ &tpm_crb_ffa->direct_msg_data);
+ if (!ret)
+ ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1);
+ }
- ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev,
- &tpm_crb_ffa->direct_msg_data);
- if (!ret)
- ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1);
return ret;
}
@@ -231,8 +253,13 @@ int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
rc = __tpm_crb_ffa_send_recieve(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00);
if (!rc) {
- *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
- *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
+ *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]);
+ *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]);
+ } else {
+ *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ }
}
return rc;
@@ -277,8 +304,9 @@ static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev)
tpm_crb_ffa = ERR_PTR(-ENODEV); // set tpm_crb_ffa so we can detect probe failure
- if (!ffa_partition_supports_direct_recv(ffa_dev)) {
- pr_err("TPM partition doesn't support direct message receive.\n");
+ if (!ffa_partition_supports_direct_recv(ffa_dev) &&
+ !ffa_partition_supports_direct_req2_recv(ffa_dev)) {
+ dev_warn(&ffa_dev->dev, "partition doesn't support direct message receive.\n");
return -EINVAL;
}
@@ -299,17 +327,17 @@ static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev)
rc = tpm_crb_ffa_get_interface_version(&tpm_crb_ffa->major_version,
&tpm_crb_ffa->minor_version);
if (rc) {
- pr_err("failed to get crb interface version. rc:%d", rc);
+ dev_err(&ffa_dev->dev, "failed to get crb interface version. rc:%d\n", rc);
goto out;
}
- pr_info("ABI version %u.%u", tpm_crb_ffa->major_version,
+ dev_info(&ffa_dev->dev, "ABI version %u.%u\n", tpm_crb_ffa->major_version,
tpm_crb_ffa->minor_version);
if (tpm_crb_ffa->major_version != CRB_FFA_VERSION_MAJOR ||
(tpm_crb_ffa->minor_version > 0 &&
tpm_crb_ffa->minor_version < CRB_FFA_VERSION_MINOR)) {
- pr_err("Incompatible ABI version");
+ dev_warn(&ffa_dev->dev, "Incompatible ABI version\n");
goto out;
}
diff --git a/drivers/char/tpm/tpm_svsm.c b/drivers/char/tpm/tpm_svsm.c
new file mode 100644
index 000000000000..4280edf427d6
--- /dev/null
+++ b/drivers/char/tpm/tpm_svsm.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ *
+ * Driver for the vTPM defined by the AMD SVSM spec [1].
+ *
+ * The specification defines a protocol that a SEV-SNP guest OS can use to
+ * discover and talk to a vTPM emulated by the Secure VM Service Module (SVSM)
+ * in the guest context, but at a more privileged level (usually VMPL0).
+ *
+ * [1] "Secure VM Service Module for SEV-SNP Guests"
+ * Publication # 58019 Revision: 1.00
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/tpm_svsm.h>
+
+#include <asm/sev.h>
+
+#include "tpm.h"
+
+struct tpm_svsm_priv {
+ void *buffer;
+};
+
+static int tpm_svsm_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev);
+ int ret;
+
+ ret = svsm_vtpm_cmd_request_fill(priv->buffer, 0, buf, len);
+ if (ret)
+ return ret;
+
+ /*
+ * The SVSM call uses the same buffer for the command and for the
+ * response, so after this call, the buffer will contain the response
+ * that can be used by .recv() op.
+ */
+ return snp_svsm_vtpm_send_command(priv->buffer);
+}
+
+static int tpm_svsm_recv(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev);
+
+ /*
+ * The internal buffer contains the response after we send the command
+ * to SVSM.
+ */
+ return svsm_vtpm_cmd_response_parse(priv->buffer, buf, len);
+}
+
+static struct tpm_class_ops tpm_chip_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = tpm_svsm_recv,
+ .send = tpm_svsm_send,
+};
+
+static int __init tpm_svsm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tpm_svsm_priv *priv;
+ struct tpm_chip *chip;
+ int err;
+
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * The maximum buffer supported is one page (see SVSM_VTPM_MAX_BUFFER
+ * in tpm_svsm.h).
+ */
+ priv->buffer = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+ if (!priv->buffer)
+ return -ENOMEM;
+
+ chip = tpmm_chip_alloc(dev, &tpm_chip_ops);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ err = tpm2_probe(chip);
+ if (err)
+ return err;
+
+ err = tpm_chip_register(chip);
+ if (err)
+ return err;
+
+ dev_info(dev, "SNP SVSM vTPM %s device\n",
+ (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2");
+
+ return 0;
+}
+
+static void __exit tpm_svsm_remove(struct platform_device *pdev)
+{
+ struct tpm_chip *chip = platform_get_drvdata(pdev);
+
+ tpm_chip_unregister(chip);
+}
+
+/*
+ * tpm_svsm_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound
+ * at runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver tpm_svsm_driver __refdata = {
+ .remove = __exit_p(tpm_svsm_remove),
+ .driver = {
+ .name = "tpm-svsm",
+ },
+};
+
+module_platform_driver_probe(tpm_svsm_driver, tpm_svsm_probe);
+
+MODULE_DESCRIPTION("SNP SVSM vTPM Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:tpm-svsm");
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 970d02c337c7..6c3aa480396b 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -54,7 +54,7 @@ enum tis_int_flags {
enum tis_defaults {
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
- TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+ TIS_LONG_TIMEOUT = 4000, /* 4 secs */
TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */
TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */
};
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 5f04951d0dd4..088182e54deb 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1576,8 +1576,8 @@ static void handle_control_message(struct virtio_device *vdev,
break;
case VIRTIO_CONSOLE_RESIZE: {
struct {
- __u16 rows;
- __u16 cols;
+ __virtio16 cols;
+ __virtio16 rows;
} size;
if (!is_console_port(port))
@@ -1585,7 +1585,8 @@ static void handle_control_message(struct virtio_device *vdev,
memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
sizeof(size));
- set_console_size(port, size.rows, size.cols);
+ set_console_size(port, virtio16_to_cpu(vdev, size.rows),
+ virtio16_to_cpu(vdev, size.cols));
port->cons.hvc->irq_requested = 1;
resize_console(port);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 713573b6c86c..19c1ed280fd7 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -517,6 +517,7 @@ source "drivers/clk/samsung/Kconfig"
source "drivers/clk/sifive/Kconfig"
source "drivers/clk/socfpga/Kconfig"
source "drivers/clk/sophgo/Kconfig"
+source "drivers/clk/spacemit/Kconfig"
source "drivers/clk/sprd/Kconfig"
source "drivers/clk/starfive/Kconfig"
source "drivers/clk/sunxi/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index bf4bd45adc3a..42867cd37c33 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -145,6 +145,7 @@ obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_CLK_SIFIVE) += sifive/
obj-y += socfpga/
obj-y += sophgo/
+obj-y += spacemit/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 2b0ea882f1e4..0171e6b2bfca 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -53,24 +53,6 @@ static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
}
-/*
- * Build a scaled divider value as close as possible to the
- * given whole part (div_value) and fractional part (expressed
- * in billionths).
- */
-u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
-{
- u64 combined;
-
- BUG_ON(!div_value);
- BUG_ON(billionths >= BILLION);
-
- combined = (u64)div_value * BILLION + billionths;
- combined <<= div->u.s.frac_width;
-
- return DIV_ROUND_CLOSEST_ULL(combined, BILLION);
-}
-
/* The scaled minimum divisor representable by a divider */
static inline u64
scaled_div_min(struct bcm_clk_div *div)
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index e09655024ac2..348a3454ce40 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -492,8 +492,6 @@ extern struct clk_ops kona_peri_clk_ops;
/* Externally visible functions */
extern u64 scaled_div_max(struct bcm_clk_div *div);
-extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value,
- u32 billionths);
extern void __init kona_dt_ccu_setup(struct ccu_data *ccu,
struct device_node *node);
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 0e1fe3759530..8e4fde03ed23 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -286,6 +286,8 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
init.name = devm_kasprintf(rpi->dev, GFP_KERNEL,
"fw-clk-%s",
rpi_firmware_clk_names[id]);
+ if (!init.name)
+ return ERR_PTR(-ENOMEM);
init.ops = &raspberrypi_firmware_clk_ops;
init.flags = CLK_GET_RATE_NOCACHE;
@@ -480,4 +482,3 @@ module_platform_driver(raspberrypi_clk_driver);
MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>");
MODULE_DESCRIPTION("Raspberry Pi firmware clock driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:raspberrypi-clk");
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 014db6386624..8ddf3a9a53df 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -137,6 +137,8 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
+ clk_data->num = S2MPS11_CLKS_NUM;
+
switch (hwid) {
case S2MPS11X:
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
@@ -186,7 +188,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
clk_data->hws[i] = &s2mps11_clks[i].hw;
}
- clk_data->num = S2MPS11_CLKS_NUM;
of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get,
clk_data);
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 6807a2efa93b..bfb6bbdc036c 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -763,13 +763,14 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
return PTR_ERR(clk);
}
- child = of_get_child_by_name(node, "pllout");
- if (of_device_is_available(child))
+ child = of_get_available_child_by_name(node, "pllout");
+ if (child) {
of_clk_add_provider(child, of_clk_src_simple_get, clk);
- of_node_put(child);
+ of_node_put(child);
+ }
- child = of_get_child_by_name(node, "sysclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "sysclk");
+ if (child) {
struct clk_onecell_data *clk_data;
struct clk **clks;
int n_clks = max_sysclk_id + 1;
@@ -803,11 +804,11 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
clks[(*div_info)->id] = clk;
}
of_clk_add_provider(child, of_clk_src_onecell_get, clk_data);
+ of_node_put(child);
}
- of_node_put(child);
- child = of_get_child_by_name(node, "auxclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "auxclk");
+ if (child) {
char child_name[MAX_NAME_SIZE];
snprintf(child_name, MAX_NAME_SIZE, "%s_auxclk", info->name);
@@ -818,11 +819,12 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
child_name, PTR_ERR(clk));
else
of_clk_add_provider(child, of_clk_src_simple_get, clk);
+
+ of_node_put(child);
}
- of_node_put(child);
- child = of_get_child_by_name(node, "obsclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "obsclk");
+ if (child) {
if (obsclk_info)
clk = davinci_pll_obsclk_register(dev, obsclk_info, base);
else
@@ -833,8 +835,8 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
PTR_ERR(clk));
else
of_clk_add_provider(child, of_clk_src_simple_get, clk);
+ of_node_put(child);
}
- of_node_put(child);
return 0;
}
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index be2e3a5f8336..ff003dc5ab20 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -55,7 +55,7 @@ config COMMON_CLK_MESON_CPU_DYNDIV
config COMMON_CLK_MESON8B
bool "Meson8 SoC Clock controller support"
depends on ARM
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_MPLL
@@ -70,7 +70,7 @@ config COMMON_CLK_MESON8B
config COMMON_CLK_GXBB
tristate "GXBB and GXL SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_VID_PLL_DIV
@@ -86,7 +86,7 @@ config COMMON_CLK_GXBB
config COMMON_CLK_AXG
tristate "AXG SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
@@ -136,7 +136,7 @@ config COMMON_CLK_A1_PERIPHERALS
config COMMON_CLK_C3_PLL
tristate "Amlogic C3 PLL clock controller"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_CLKC_UTILS
@@ -149,7 +149,7 @@ config COMMON_CLK_C3_PLL
config COMMON_CLK_C3_PERIPHERALS
tristate "Amlogic C3 peripherals clock controller"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_CLKC_UTILS
@@ -163,7 +163,7 @@ config COMMON_CLK_C3_PERIPHERALS
config COMMON_CLK_G12A
tristate "G12 and SM1 SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
@@ -181,7 +181,7 @@ config COMMON_CLK_G12A
config COMMON_CLK_S4_PLL
tristate "S4 SoC PLL clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
@@ -194,7 +194,7 @@ config COMMON_CLK_S4_PLL
config COMMON_CLK_S4_PERIPHERALS
tristate "S4 SoC peripherals clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index ceabebb1863d..d9e546e006d7 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -4093,6 +4093,7 @@ static const struct clk_parent_data spicc_sclk_parent_data[] = {
{ .hw = &g12a_clk81.hw },
{ .hw = &g12a_fclk_div4.hw },
{ .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_fclk_div2.hw },
{ .hw = &g12a_fclk_div5.hw },
{ .hw = &g12a_fclk_div7.hw },
};
diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c
index 76ece6c4a969..3ba01622d8f0 100644
--- a/drivers/clk/qcom/apcs-sdx55.c
+++ b/drivers/clk/qcom/apcs-sdx55.c
@@ -111,7 +111,11 @@ static int qcom_apcs_sdx55_clk_probe(struct platform_device *pdev)
* driver, there seems to be no better place to do this. So do it here!
*/
cpu_dev = get_cpu_device(0);
- dev_pm_domain_attach(cpu_dev, true);
+ ret = dev_pm_domain_attach(cpu_dev, true);
+ if (ret) {
+ dev_err_probe(dev, ret, "can't get PM domain: %d\n", ret);
+ goto err;
+ }
return 0;
diff --git a/drivers/clk/qcom/camcc-sa8775p.c b/drivers/clk/qcom/camcc-sa8775p.c
index 11bd2e234811..50e5a131261b 100644
--- a/drivers/clk/qcom/camcc-sa8775p.c
+++ b/drivers/clk/qcom/camcc-sa8775p.c
@@ -10,7 +10,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <dt-bindings/clock/qcom,sa8775p-camcc.h>
+#include <dt-bindings/clock/qcom,qcs8300-camcc.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
@@ -1681,6 +1681,24 @@ static struct clk_branch cam_cc_sm_obs_clk = {
},
};
+static struct clk_branch cam_cc_titan_top_accu_shift_clk = {
+ .halt_reg = 0x131f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x131f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_titan_top_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc cam_cc_titan_top_gdsc = {
.gdscr = 0x131bc,
.en_rest_wait_val = 0x2,
@@ -1775,6 +1793,7 @@ static struct clk_regmap *cam_cc_sa8775p_clocks[] = {
[CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
[CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
[CAM_CC_SM_OBS_CLK] = &cam_cc_sm_obs_clk.clkr,
+ [CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] = NULL,
[CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
[CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr,
};
@@ -1811,6 +1830,7 @@ static const struct qcom_cc_desc cam_cc_sa8775p_desc = {
};
static const struct of_device_id cam_cc_sa8775p_match_table[] = {
+ { .compatible = "qcom,qcs8300-camcc" },
{ .compatible = "qcom,sa8775p-camcc" },
{ }
};
@@ -1841,10 +1861,83 @@ static int cam_cc_sa8775p_probe(struct platform_device *pdev)
clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- /* Keep some clocks always enabled */
- qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */
- qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */
+ if (device_is_compatible(&pdev->dev, "qcom,qcs8300-camcc")) {
+ cam_cc_camnoc_axi_clk_src.cmd_rcgr = 0x13154;
+ cam_cc_camnoc_axi_clk.halt_reg = 0x1316c;
+ cam_cc_camnoc_axi_clk.clkr.enable_reg = 0x1316c;
+ cam_cc_camnoc_dcd_xo_clk.halt_reg = 0x13174;
+ cam_cc_camnoc_dcd_xo_clk.clkr.enable_reg = 0x13174;
+
+ cam_cc_csi0phytimer_clk_src.cmd_rcgr = 0x15054;
+ cam_cc_csi1phytimer_clk_src.cmd_rcgr = 0x15078;
+ cam_cc_csi2phytimer_clk_src.cmd_rcgr = 0x15098;
+ cam_cc_csid_clk_src.cmd_rcgr = 0x13134;
+
+ cam_cc_mclk0_clk_src.cmd_rcgr = 0x15000;
+ cam_cc_mclk1_clk_src.cmd_rcgr = 0x1501c;
+ cam_cc_mclk2_clk_src.cmd_rcgr = 0x15038;
+
+ cam_cc_fast_ahb_clk_src.cmd_rcgr = 0x13104;
+ cam_cc_slow_ahb_clk_src.cmd_rcgr = 0x1311c;
+ cam_cc_xo_clk_src.cmd_rcgr = 0x131b8;
+ cam_cc_sleep_clk_src.cmd_rcgr = 0x131d4;
+
+ cam_cc_core_ahb_clk.halt_reg = 0x131b4;
+ cam_cc_core_ahb_clk.clkr.enable_reg = 0x131b4;
+
+ cam_cc_cpas_ahb_clk.halt_reg = 0x130f4;
+ cam_cc_cpas_ahb_clk.clkr.enable_reg = 0x130f4;
+ cam_cc_cpas_fast_ahb_clk.halt_reg = 0x130fc;
+ cam_cc_cpas_fast_ahb_clk.clkr.enable_reg = 0x130fc;
+
+ cam_cc_csi0phytimer_clk.halt_reg = 0x1506c;
+ cam_cc_csi0phytimer_clk.clkr.enable_reg = 0x1506c;
+ cam_cc_csi1phytimer_clk.halt_reg = 0x15090;
+ cam_cc_csi1phytimer_clk.clkr.enable_reg = 0x15090;
+ cam_cc_csi2phytimer_clk.halt_reg = 0x150b0;
+ cam_cc_csi2phytimer_clk.clkr.enable_reg = 0x150b0;
+ cam_cc_csid_clk.halt_reg = 0x1314c;
+ cam_cc_csid_clk.clkr.enable_reg = 0x1314c;
+ cam_cc_csid_csiphy_rx_clk.halt_reg = 0x15074;
+ cam_cc_csid_csiphy_rx_clk.clkr.enable_reg = 0x15074;
+ cam_cc_csiphy0_clk.halt_reg = 0x15070;
+ cam_cc_csiphy0_clk.clkr.enable_reg = 0x15070;
+ cam_cc_csiphy1_clk.halt_reg = 0x15094;
+ cam_cc_csiphy1_clk.clkr.enable_reg = 0x15094;
+ cam_cc_csiphy2_clk.halt_reg = 0x150b4;
+ cam_cc_csiphy2_clk.clkr.enable_reg = 0x150b4;
+
+ cam_cc_mclk0_clk.halt_reg = 0x15018;
+ cam_cc_mclk0_clk.clkr.enable_reg = 0x15018;
+ cam_cc_mclk1_clk.halt_reg = 0x15034;
+ cam_cc_mclk1_clk.clkr.enable_reg = 0x15034;
+ cam_cc_mclk2_clk.halt_reg = 0x15050;
+ cam_cc_mclk2_clk.clkr.enable_reg = 0x15050;
+ cam_cc_qdss_debug_xo_clk.halt_reg = 0x1319c;
+ cam_cc_qdss_debug_xo_clk.clkr.enable_reg = 0x1319c;
+
+ cam_cc_titan_top_gdsc.gdscr = 0x131a0;
+
+ cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSIPHY3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] =
+ &cam_cc_titan_top_accu_shift_clk.clkr;
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13178); /* CAM_CC_CAMNOC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131d0); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_SLEEP_CLK */
+ } else {
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */
+ }
ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sa8775p_desc, regmap);
diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c
index 1871970fb046..8aac97d29ce3 100644
--- a/drivers/clk/qcom/camcc-sm6350.c
+++ b/drivers/clk/qcom/camcc-sm6350.c
@@ -1695,6 +1695,9 @@ static struct clk_branch camcc_sys_tmr_clk = {
static struct gdsc bps_gdsc = {
.gdscr = 0x6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "bps_gdsc",
},
@@ -1704,6 +1707,9 @@ static struct gdsc bps_gdsc = {
static struct gdsc ipe_0_gdsc = {
.gdscr = 0x7004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ipe_0_gdsc",
},
@@ -1713,6 +1719,9 @@ static struct gdsc ipe_0_gdsc = {
static struct gdsc ife_0_gdsc = {
.gdscr = 0x9004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_0_gdsc",
},
@@ -1721,6 +1730,9 @@ static struct gdsc ife_0_gdsc = {
static struct gdsc ife_1_gdsc = {
.gdscr = 0xa004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_1_gdsc",
},
@@ -1729,6 +1741,9 @@ static struct gdsc ife_1_gdsc = {
static struct gdsc ife_2_gdsc = {
.gdscr = 0xb004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_2_gdsc",
},
@@ -1737,6 +1752,9 @@ static struct gdsc ife_2_gdsc = {
static struct gdsc titan_top_gdsc = {
.gdscr = 0x14004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "titan_top_gdsc",
},
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index c7675930fde1..00fb3e53a388 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -66,6 +66,8 @@ struct clk_rpmh {
struct clk_rpmh_desc {
struct clk_hw **clks;
size_t num_clks;
+ /* RPMh clock clkaN are optional for this platform */
+ bool clka_optional;
};
static DEFINE_MUTEX(rpmh_clk_lock);
@@ -648,6 +650,7 @@ static struct clk_hw *sm8550_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8550 = {
.clks = sm8550_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8550_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *sm8650_rpmh_clocks[] = {
@@ -679,6 +682,7 @@ static struct clk_hw *sm8650_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8650 = {
.clks = sm8650_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8650_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *sc7280_rpmh_clocks[] = {
@@ -847,6 +851,7 @@ static struct clk_hw *sm8750_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8750 = {
.clks = sm8750_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8750_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
@@ -890,6 +895,12 @@ static int clk_rpmh_probe(struct platform_device *pdev)
rpmh_clk = to_clk_rpmh(hw_clks[i]);
res_addr = cmd_db_read_addr(rpmh_clk->res_name);
if (!res_addr) {
+ hw_clks[i] = NULL;
+
+ if (desc->clka_optional &&
+ !strncmp(rpmh_clk->res_name, "clka", sizeof("clka") - 1))
+ continue;
+
dev_err(&pdev->dev, "missing RPMh resource address for %s\n",
rpmh_clk->res_name);
return -ENODEV;
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index e703ecf00e44..b0bd163a449c 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -681,6 +681,9 @@ static struct clk_branch disp_cc_xo_clk = {
static struct gdsc mdss_gdsc = {
.gdscr = 0x1004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "mdss_gdsc",
},
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
index 7431c9a65044..45193b3d714b 100644
--- a/drivers/clk/qcom/gcc-msm8939.c
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -432,7 +432,7 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_gpll6_sleep_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 },
{ P_GPLL1_AUX, 2 },
- { P_GPLL6, 2 },
+ { P_GPLL6, 3 },
{ P_SLEEP_CLK, 6 },
};
@@ -1113,7 +1113,7 @@ static struct clk_rcg2 jpeg0_clk_src = {
};
static const struct freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = {
- F(24000000, P_GPLL0, 1, 1, 45),
+ F(24000000, P_GPLL6, 1, 1, 45),
F(66670000, P_GPLL0, 12, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index 74346dc02606..a4d6dff9d0f7 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -2320,6 +2320,9 @@ static struct clk_branch gcc_video_xo_clk = {
static struct gdsc usb30_prim_gdsc = {
.gdscr = 0x1a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "usb30_prim_gdsc",
},
@@ -2328,6 +2331,9 @@ static struct gdsc usb30_prim_gdsc = {
static struct gdsc ufs_phy_gdsc = {
.gdscr = 0x3a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ufs_phy_gdsc",
},
diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
index fa1672c4e7d8..24f98062b9dd 100644
--- a/drivers/clk/qcom/gcc-sm8650.c
+++ b/drivers/clk/qcom/gcc-sm8650.c
@@ -3817,7 +3817,9 @@ static int gcc_sm8650_probe(struct platform_device *pdev)
qcom_branch_set_clk_en(regmap, 0x32004); /* GCC_VIDEO_AHB_CLK */
qcom_branch_set_clk_en(regmap, 0x32030); /* GCC_VIDEO_XO_CLK */
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
/* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
regmap_write(regmap, 0x52150, 0x0);
diff --git a/drivers/clk/qcom/gcc-sm8750.c b/drivers/clk/qcom/gcc-sm8750.c
index b36d70976095..8092dd6b37b5 100644
--- a/drivers/clk/qcom/gcc-sm8750.c
+++ b/drivers/clk/qcom/gcc-sm8750.c
@@ -3244,8 +3244,9 @@ static int gcc_sm8750_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x52010, BIT(20), BIT(20));
regmap_update_bits(regmap, 0x52010, BIT(21), BIT(21));
- /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
return qcom_cc_really_probe(&pdev->dev, &gcc_sm8750_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 009f39139b64..3e44757e25d3 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -6753,6 +6753,10 @@ static int gcc_x1e80100_probe(struct platform_device *pdev)
/* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
regmap_write(regmap, 0x52224, 0x0);
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
+
return qcom_cc_really_probe(&pdev->dev, &gcc_x1e80100_desc, regmap);
}
diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
index 35ed0500bc59..ee89c42413f8 100644
--- a/drivers/clk/qcom/gpucc-sm6350.c
+++ b/drivers/clk/qcom/gpucc-sm6350.c
@@ -413,6 +413,9 @@ static struct clk_branch gpu_cc_gx_vsense_clk = {
static struct gdsc gpu_cx_gdsc = {
.gdscr = 0x106c,
.gds_hw_ctrl = 0x1540,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x8,
.pd = {
.name = "gpu_cx_gdsc",
},
@@ -423,6 +426,9 @@ static struct gdsc gpu_cx_gdsc = {
static struct gdsc gpu_gx_gdsc = {
.gdscr = 0x100c,
.clamp_io_ctrl = 0x1508,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
.pd = {
.name = "gpu_gx_gdsc",
.power_on = gdsc_gx_do_nothing_enable,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 5a4bc3f94d49..50c20119d12a 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -41,6 +41,7 @@ config CLK_RENESAS
select CLK_R9A08G045 if ARCH_R9A08G045
select CLK_R9A09G011 if ARCH_R9A09G011
select CLK_R9A09G047 if ARCH_R9A09G047
+ select CLK_R9A09G056 if ARCH_R9A09G056
select CLK_R9A09G057 if ARCH_R9A09G057
select CLK_SH73A0 if ARCH_SH73A0
@@ -199,6 +200,10 @@ config CLK_R9A09G047
bool "RZ/G3E clock support" if COMPILE_TEST
select CLK_RZV2H
+config CLK_R9A09G056
+ bool "RZ/V2N clock support" if COMPILE_TEST
+ select CLK_RZV2H
+
config CLK_R9A09G057
bool "RZ/V2H(P) clock support" if COMPILE_TEST
select CLK_RZV2H
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 2d6e746939c4..f9075bca6e95 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A08G045) += r9a08g045-cpg.o
obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o
obj-$(CONFIG_CLK_R9A09G047) += r9a09g047-cpg.o
+obj-$(CONFIG_CLK_R9A09G056) += r9a09g056-cpg.o
obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c
index e9cf4342d0cf..21699999cedd 100644
--- a/drivers/clk/renesas/r9a09g047-cpg.c
+++ b/drivers/clk/renesas/r9a09g047-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G047_IOTOP_0_SHCLK,
+ LAST_DT_CORE_CLK = R9A09G047_GBETH_1_CLK_PTP_REF_I,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -31,7 +31,14 @@ enum clk_ids {
CLK_PLLVDO,
/* Internal Core Clocks */
+ CLK_PLLCM33_DIV3,
+ CLK_PLLCM33_DIV4,
+ CLK_PLLCM33_DIV5,
CLK_PLLCM33_DIV16,
+ CLK_PLLCM33_GEAR,
+ CLK_SMUX2_XSPI_CLK0,
+ CLK_SMUX2_XSPI_CLK1,
+ CLK_PLLCM33_XSPI,
CLK_PLLCLN_DIV2,
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
@@ -41,6 +48,7 @@ enum clk_ids {
CLK_PLLDTY_ACPU_DIV4,
CLK_PLLDTY_DIV16,
CLK_PLLVDO_CRU0,
+ CLK_PLLVDO_GPU,
/* Module Clocks */
MOD_CLK_BASE,
@@ -60,6 +68,14 @@ static const struct clk_div_table dtable_2_4[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_16[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -69,6 +85,10 @@ static const struct clk_div_table dtable_2_64[] = {
{0, 0},
};
+/* Mux clock tables */
+static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" };
+static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" };
+
static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
@@ -79,12 +99,21 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
- DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
/* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3),
+ DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
+ DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
+
+ DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
+ DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
+ DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
+ dtable_2_16),
DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
@@ -96,6 +125,7 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
+ DEF_DDIV(".pllvdo_gpu", CLK_PLLVDO_GPU, CLK_PLLVDO, CDDIV3_DIVCTL1, dtable_2_64),
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G047_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
@@ -108,6 +138,7 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_DDIV("ca55_0_coreclk3", R9A09G047_CA55_0_CORECLK3, CLK_PLLCA55,
CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G047_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+ DEF_FIXED("spi_clk_spi", R9A09G047_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2),
};
static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
@@ -153,6 +184,12 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(10, BIT(14))),
DEF_MOD("canfd_0_clkc", CLK_PLLCLN_DIV20, 9, 14, 4, 30,
BUS_MSTOP(10, BIT(14))),
+ DEF_MOD("spi_hclk", CLK_PLLCM33_GEAR, 9, 15, 4, 31,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_aclk", CLK_PLLCM33_GEAR, 10, 0, 5, 0,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD_NO_PM("spi_clk_spix2", CLK_PLLCM33_XSPI, 10, 1, 5, 2,
+ BUS_MSTOP(4, BIT(5))),
DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
BUS_MSTOP(8, BIT(2))),
DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
@@ -183,6 +220,12 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(9, BIT(4))),
DEF_MOD("cru_0_pclk", CLK_PLLDTY_DIV16, 13, 4, 6, 20,
BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("ge3d_clk", CLK_PLLVDO_GPU, 15, 0, 7, 16,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("ge3d_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("ge3d_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
+ BUS_MSTOP(3, BIT(4))),
DEF_MOD("tsu_1_pclk", CLK_QEXTAL, 16, 10, 8, 10,
BUS_MSTOP(2, BIT(15))),
};
@@ -207,12 +250,17 @@ static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
DEF_RST(10, 1, 4, 18), /* CANFD_0_RSTP_N */
DEF_RST(10, 2, 4, 19), /* CANFD_0_RSTC_N */
+ DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
+ DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
+ DEF_RST(13, 13, 6, 14), /* GE3D_RESETN */
+ DEF_RST(13, 14, 6, 15), /* GE3D_AXI_RESETN */
+ DEF_RST(13, 15, 6, 16), /* GE3D_ACE_RESETN */
DEF_RST(15, 8, 7, 9), /* TSU_1_PRESETN */
};
diff --git a/drivers/clk/renesas/r9a09g056-cpg.c b/drivers/clk/renesas/r9a09g056-cpg.c
new file mode 100644
index 000000000000..e2712a25c43a
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g056-cpg.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2N CPG driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g056-cpg.h>
+
+#include "rzv2h-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G056_GBETH_1_CLK_PTP_REF_I,
+
+ /* External Input Clocks */
+ CLK_AUDIO_EXTAL,
+ CLK_RTXIN,
+ CLK_QEXTAL,
+
+ /* PLL Clocks */
+ CLK_PLLCM33,
+ CLK_PLLCLN,
+ CLK_PLLDTY,
+ CLK_PLLCA55,
+
+ /* Internal Core Clocks */
+ CLK_PLLCM33_DIV16,
+ CLK_PLLCLN_DIV2,
+ CLK_PLLCLN_DIV8,
+ CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV4,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_64[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {4, 64},
+ {0, 0},
+};
+
+static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
+ DEF_INPUT("rtxin", CLK_RTXIN),
+ DEF_INPUT("qextal", CLK_QEXTAL),
+
+ /* PLL Clocks */
+ DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
+
+ /* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+
+ DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
+ DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
+
+ DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+
+ /* Core Clocks */
+ DEF_FIXED("sys_0_pclk", R9A09G056_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
+ DEF_DDIV("ca55_0_coreclk0", R9A09G056_CA55_0_CORE_CLK0, CLK_PLLCA55,
+ CDDIV1_DIVCTL0, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk1", R9A09G056_CA55_0_CORE_CLK1, CLK_PLLCA55,
+ CDDIV1_DIVCTL1, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk2", R9A09G056_CA55_0_CORE_CLK2, CLK_PLLCA55,
+ CDDIV1_DIVCTL2, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk3", R9A09G056_CA55_0_CORE_CLK3, CLK_PLLCA55,
+ CDDIV1_DIVCTL3, dtable_1_8),
+ DEF_FIXED("iotop_0_shclk", R9A09G056_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+};
+
+static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
+ DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
+ BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
+ BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
+ BUS_MSTOP(8, BIT(4))),
+};
+
+static const struct rzv2h_reset r9a09g056_resets[] __initconst = {
+ DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
+ DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
+ DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
+ DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+};
+
+const struct rzv2h_cpg_info r9a09g056_cpg_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r9a09g056_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g056_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g056_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g056_mod_clks),
+ .num_hw_mod_clks = 25 * 16,
+
+ /* Resets */
+ .resets = r9a09g056_resets,
+ .num_resets = ARRAY_SIZE(r9a09g056_resets),
+
+ .num_mstop_bits = 192,
+};
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
index d63eafbca780..3c40e36259fe 100644
--- a/drivers/clk/renesas/r9a09g057-cpg.c
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G057_IOTOP_0_SHCLK,
+ LAST_DT_CORE_CLK = R9A09G057_GBETH_1_CLK_PTP_REF_I,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -29,6 +29,7 @@ enum clk_ids {
CLK_PLLDTY,
CLK_PLLCA55,
CLK_PLLVDO,
+ CLK_PLLGPU,
/* Internal Core Clocks */
CLK_PLLCM33_DIV4,
@@ -40,6 +41,7 @@ enum clk_ids {
CLK_PLLDTY_ACPU,
CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV8,
CLK_PLLDTY_DIV16,
CLK_PLLDTY_RCPU,
CLK_PLLDTY_RCPU_DIV4,
@@ -47,6 +49,7 @@ enum clk_ids {
CLK_PLLVDO_CRU1,
CLK_PLLVDO_CRU2,
CLK_PLLVDO_CRU3,
+ CLK_PLLGPU_GEAR,
/* Module Clocks */
MOD_CLK_BASE,
@@ -85,8 +88,9 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
- DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
+ DEF_PLL(".pllgpu", CLK_PLLGPU, CLK_QEXTAL, PLLGPU),
/* Internal Core Clocks */
DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
@@ -101,6 +105,7 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div8", CLK_PLLDTY_DIV8, CLK_PLLDTY, 1, 8),
DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
DEF_DDIV(".plldty_rcpu", CLK_PLLDTY_RCPU, CLK_PLLDTY, CDDIV3_DIVCTL2, dtable_2_64),
DEF_FIXED(".plldty_rcpu_div4", CLK_PLLDTY_RCPU_DIV4, CLK_PLLDTY_RCPU, 1, 4),
@@ -110,6 +115,8 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV(".pllvdo_cru2", CLK_PLLVDO_CRU2, CLK_PLLVDO, CDDIV4_DIVCTL1, dtable_2_4),
DEF_DDIV(".pllvdo_cru3", CLK_PLLVDO_CRU3, CLK_PLLVDO, CDDIV4_DIVCTL2, dtable_2_4),
+ DEF_DDIV(".pllgpu_gear", CLK_PLLGPU_GEAR, CLK_PLLGPU, CDDIV3_DIVCTL1, dtable_2_64),
+
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G057_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
DEF_DDIV("ca55_0_coreclk0", R9A09G057_CA55_0_CORE_CLK0, CLK_PLLCA55,
@@ -121,6 +128,8 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV("ca55_0_coreclk3", R9A09G057_CA55_0_CORE_CLK3, CLK_PLLCA55,
CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G057_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+ DEF_FIXED("usb2_0_clk_core0", R9A09G057_USB2_0_CLK_CORE0, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb2_0_clk_core1", R9A09G057_USB2_0_CLK_CORE1, CLK_QEXTAL, 1, 1),
};
static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
@@ -214,6 +223,16 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(4))),
DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19,
+ BUS_MSTOP(7, BIT(7))),
+ DEF_MOD("usb2_0_u2h1_hclk", CLK_PLLDTY_DIV8, 11, 4, 5, 20,
+ BUS_MSTOP(7, BIT(8))),
+ DEF_MOD("usb2_0_u2p_exr_cpuclk", CLK_PLLDTY_ACPU_DIV4, 11, 5, 5, 21,
+ BUS_MSTOP(7, BIT(9))),
+ DEF_MOD("usb2_0_pclk_usbtst0", CLK_PLLDTY_ACPU_DIV4, 11, 6, 5, 22,
+ BUS_MSTOP(7, BIT(10))),
+ DEF_MOD("usb2_0_pclk_usbtst1", CLK_PLLDTY_ACPU_DIV4, 11, 7, 5, 23,
+ BUS_MSTOP(7, BIT(11))),
DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
BUS_MSTOP(9, BIT(4))),
DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
@@ -238,6 +257,12 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(9, BIT(7))),
DEF_MOD("cru_3_pclk", CLK_PLLDTY_DIV16, 13, 13, 6, 29,
BUS_MSTOP(9, BIT(7))),
+ DEF_MOD("gpu_0_clk", CLK_PLLGPU_GEAR, 15, 0, 7, 16,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
+ BUS_MSTOP(3, BIT(4))),
};
static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
@@ -275,6 +300,10 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */
+ DEF_RST(10, 13, 4, 30), /* USB2_0_U2H1_HRESETN */
+ DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
+ DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
@@ -287,6 +316,9 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(12, 14, 5, 31), /* CRU_3_PRESETN */
DEF_RST(12, 15, 6, 0), /* CRU_3_ARESETN */
DEF_RST(13, 0, 6, 1), /* CRU_3_S_RESETN */
+ DEF_RST(13, 13, 6, 14), /* GPU_0_RESETN */
+ DEF_RST(13, 14, 6, 15), /* GPU_0_AXI_RESETN */
+ DEF_RST(13, 15, 6, 16), /* GPU_0_ACE_RESETN */
};
const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index da021ee446ec..71431970d6e6 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -27,6 +27,7 @@
#include <linux/psci.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -204,7 +205,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
int error;
dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
spin_lock_irqsave(&priv->rmw_lock, flags);
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index b91dfbfb01e3..a8628f64a03b 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -27,6 +27,7 @@
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/units.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -1217,7 +1218,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
}
dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
value = bitmask << 16;
if (enable)
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index 2b9771ab2b3f..bcc496e8cbcd 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -25,6 +25,7 @@
#include <linux/pm_domain.h>
#include <linux/refcount.h>
#include <linux/reset-controller.h>
+#include <linux/string_choices.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -44,10 +45,18 @@
#define CPG_BUS_1_MSTOP (0xd00)
#define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4)
-#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
-#define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
-#define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
-#define SDIV(val) FIELD_GET(GENMASK(2, 0), (val))
+#define CPG_PLL_STBY(x) ((x))
+#define CPG_PLL_STBY_RESETB BIT(0)
+#define CPG_PLL_STBY_RESETB_WEN BIT(16)
+#define CPG_PLL_CLK1(x) ((x) + 0x004)
+#define CPG_PLL_CLK1_KDIV(x) ((s16)FIELD_GET(GENMASK(31, 16), (x)))
+#define CPG_PLL_CLK1_MDIV(x) FIELD_GET(GENMASK(15, 6), (x))
+#define CPG_PLL_CLK1_PDIV(x) FIELD_GET(GENMASK(5, 0), (x))
+#define CPG_PLL_CLK2(x) ((x) + 0x008)
+#define CPG_PLL_CLK2_SDIV(x) FIELD_GET(GENMASK(2, 0), (x))
+#define CPG_PLL_MON(x) ((x) + 0x010)
+#define CPG_PLL_MON_RESETB BIT(0)
+#define CPG_PLL_MON_LOCK BIT(4)
#define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16)
@@ -94,8 +103,7 @@ struct pll_clk {
struct rzv2h_cpg_priv *priv;
void __iomem *base;
struct clk_hw hw;
- unsigned int conf;
- unsigned int type;
+ struct pll pll;
};
#define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
@@ -110,7 +118,7 @@ struct pll_clk {
* @on_index: register offset
* @on_bit: ON/MON bit
* @mon_index: monitor register offset
- * @mon_bit: montor bit
+ * @mon_bit: monitor bit
*/
struct mod_clock {
struct rzv2h_cpg_priv *priv;
@@ -140,27 +148,78 @@ struct ddiv_clk {
#define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
+static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ u32 val = readl(priv->base + CPG_PLL_MON(pll_clk->pll.offset));
+
+ /* Ensure both RESETB and LOCK bits are set */
+ return (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
+ (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK);
+}
+
+static int rzv2h_cpg_pll_clk_enable(struct clk_hw *hw)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ struct pll pll = pll_clk->pll;
+ u32 stby_offset;
+ u32 mon_offset;
+ u32 val;
+ int ret;
+
+ if (rzv2h_cpg_pll_clk_is_enabled(hw))
+ return 0;
+
+ stby_offset = CPG_PLL_STBY(pll.offset);
+ mon_offset = CPG_PLL_MON(pll.offset);
+
+ writel(CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB,
+ priv->base + stby_offset);
+
+ /*
+ * Ensure PLL enters into normal mode
+ *
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Since this latency might depend on external crystal or PLL rate,
+ * use a "super" safe timeout value.
+ */
+ ret = readl_poll_timeout_atomic(priv->base + mon_offset, val,
+ (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
+ (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK), 200, 2000);
+ if (ret)
+ dev_err(priv->dev, "Failed to enable PLL 0x%x/%pC\n",
+ stby_offset, hw->clk);
+
+ return ret;
+}
+
static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct pll_clk *pll_clk = to_pll(hw);
struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ struct pll pll = pll_clk->pll;
unsigned int clk1, clk2;
u64 rate;
- if (!PLL_CLK_ACCESS(pll_clk->conf))
+ if (!pll.has_clkn)
return 0;
- clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf));
- clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf));
+ clk1 = readl(priv->base + CPG_PLL_CLK1(pll.offset));
+ clk2 = readl(priv->base + CPG_PLL_CLK2(pll.offset));
- rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1),
- 16 + SDIV(clk2));
+ rate = mul_u64_u32_shr(parent_rate, (CPG_PLL_CLK1_MDIV(clk1) << 16) +
+ CPG_PLL_CLK1_KDIV(clk1), 16 + CPG_PLL_CLK2_SDIV(clk2));
- return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1));
+ return DIV_ROUND_CLOSEST_ULL(rate, CPG_PLL_CLK1_PDIV(clk1));
}
static const struct clk_ops rzv2h_cpg_pll_ops = {
+ .is_enabled = rzv2h_cpg_pll_clk_is_enabled,
+ .enable = rzv2h_cpg_pll_clk_enable,
.recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
};
@@ -193,10 +252,9 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
init.num_parents = 1;
pll_clk->hw.init = &init;
- pll_clk->conf = core->cfg.conf;
+ pll_clk->pll = core->cfg.pll;
pll_clk->base = base;
pll_clk->priv = priv;
- pll_clk->type = core->type;
ret = devm_clk_hw_register(dev, &pll_clk->hw);
if (ret)
@@ -241,6 +299,9 @@ static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon
u32 bitmask = BIT(mon);
u32 val;
+ if (mon == CSDIV_NO_MON)
+ return 0;
+
return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200);
}
@@ -272,12 +333,6 @@ static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
writel(val, divider->reg);
ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
- if (ret)
- goto ddiv_timeout;
-
- spin_unlock_irqrestore(divider->lock, flags);
-
- return 0;
ddiv_timeout:
spin_unlock_irqrestore(divider->lock, flags);
@@ -320,7 +375,10 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
return ERR_PTR(-ENOMEM);
init.name = core->name;
- init.ops = &rzv2h_ddiv_clk_divider_ops;
+ if (cfg_ddiv.no_rmw)
+ init.ops = &clk_divider_ops;
+ else
+ init.ops = &rzv2h_ddiv_clk_divider_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -342,6 +400,24 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
return div->hw.clk;
}
+static struct clk * __init
+rzv2h_cpg_mux_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct smuxed mux = core->cfg.smux;
+ const struct clk_hw *clk_hw;
+
+ clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
+ core->parent_names, core->num_parents,
+ core->flag, priv->base + mux.offset,
+ mux.shift, mux.width,
+ core->mux_flags, &priv->rmw_lock);
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+}
+
static struct clk
*rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
void *data)
@@ -426,6 +502,9 @@ rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
case CLK_TYPE_DDIV:
clk = rzv2h_cpg_ddiv_clk_register(core, priv);
break;
+ case CLK_TYPE_SMUX:
+ clk = rzv2h_cpg_mux_clk_register(core, priv);
+ break;
default:
goto fail;
}
@@ -494,11 +573,14 @@ static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
if (clock->mon_index >= 0) {
offset = GET_CLK_MON_OFFSET(clock->mon_index);
bitmask = BIT(clock->mon_bit);
- } else {
- offset = GET_CLK_ON_OFFSET(clock->on_index);
- bitmask = BIT(clock->on_bit);
+
+ if (!(readl(priv->base + offset) & bitmask))
+ return 0;
}
+ offset = GET_CLK_ON_OFFSET(clock->on_index);
+ bitmask = BIT(clock->on_bit);
+
return readl(priv->base + offset) & bitmask;
}
@@ -514,7 +596,7 @@ static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
int error;
dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
if (enabled == enable)
return 0;
@@ -658,8 +740,8 @@ fail:
mod->name, PTR_ERR(clk));
}
-static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
{
struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
@@ -667,35 +749,31 @@ static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
u8 monbit = priv->resets[id].mon_bit;
u32 value = mask << 16;
- dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg);
+ dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
+ assert ? "assert" : "deassert", id, reg);
+ if (!assert)
+ value |= mask;
writel(value, priv->base + reg);
reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
mask = BIT(monbit);
return readl_poll_timeout_atomic(priv->base + reg, value,
- value & mask, 10, 200);
+ assert ? (value & mask) : !(value & mask),
+ 10, 200);
+}
+
+static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return __rzv2h_cpg_assert(rcdev, id, true);
}
static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
- unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
- u32 mask = BIT(priv->resets[id].reset_bit);
- u8 monbit = priv->resets[id].mon_bit;
- u32 value = (mask << 16) | mask;
-
- dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg);
-
- writel(value, priv->base + reg);
-
- reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
- mask = BIT(monbit);
-
- return readl_poll_timeout_atomic(priv->base + reg, value,
- !(value & mask), 10, 200);
+ return __rzv2h_cpg_assert(rcdev, id, false);
}
static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev,
@@ -967,18 +1045,24 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
}
static const struct of_device_id rzv2h_cpg_match[] = {
-#ifdef CONFIG_CLK_R9A09G057
- {
- .compatible = "renesas,r9a09g057-cpg",
- .data = &r9a09g057_cpg_info,
- },
-#endif
#ifdef CONFIG_CLK_R9A09G047
{
.compatible = "renesas,r9a09g047-cpg",
.data = &r9a09g047_cpg_info,
},
#endif
+#ifdef CONFIG_CLK_R9A09G056
+ {
+ .compatible = "renesas,r9a09g056-cpg",
+ .data = &r9a09g056_cpg_info,
+ },
+#endif
+#ifdef CONFIG_CLK_R9A09G057
+ {
+ .compatible = "renesas,r9a09g057-cpg",
+ .data = &r9a09g057_cpg_info,
+ },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
index 576a070763cb..9104b1cd276c 100644
--- a/drivers/clk/renesas/rzv2h-cpg.h
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -11,20 +11,51 @@
#include <linux/bitfield.h>
/**
+ * struct pll - Structure for PLL configuration
+ *
+ * @offset: STBY register offset
+ * @has_clkn: Flag to indicate if CLK1/2 are accessible or not
+ */
+struct pll {
+ unsigned int offset:9;
+ unsigned int has_clkn:1;
+};
+
+#define PLL_PACK(_offset, _has_clkn) \
+ ((struct pll){ \
+ .offset = _offset, \
+ .has_clkn = _has_clkn \
+ })
+
+#define PLLCA55 PLL_PACK(0x60, 1)
+#define PLLGPU PLL_PACK(0x120, 1)
+
+/**
* struct ddiv - Structure for dynamic switching divider
*
* @offset: register offset
* @shift: position of the divider bit
* @width: width of the divider
* @monbit: monitor bit in CPG_CLKSTATUS0 register
+ * @no_rmw: flag to indicate if the register is read-modify-write
+ * (1: no RMW, 0: RMW)
*/
struct ddiv {
unsigned int offset:11;
unsigned int shift:4;
unsigned int width:4;
unsigned int monbit:5;
+ unsigned int no_rmw:1;
};
+/*
+ * On RZ/V2H(P), the dynamic divider clock supports up to 19 monitor bits,
+ * while on RZ/G3E, it supports up to 16 monitor bits. Use the maximum value
+ * `0x1f` to indicate that monitor bits are not supported for static divider
+ * clocks.
+ */
+#define CSDIV_NO_MON (0x1f)
+
#define DDIV_PACK(_offset, _shift, _width, _monbit) \
((struct ddiv){ \
.offset = _offset, \
@@ -33,10 +64,41 @@ struct ddiv {
.monbit = _monbit \
})
+#define DDIV_PACK_NO_RMW(_offset, _shift, _width, _monbit) \
+ ((struct ddiv){ \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .monbit = (_monbit), \
+ .no_rmw = 1 \
+ })
+
+/**
+ * struct smuxed - Structure for static muxed clocks
+ *
+ * @offset: register offset
+ * @shift: position of the divider field
+ * @width: width of the divider field
+ */
+struct smuxed {
+ unsigned int offset:11;
+ unsigned int shift:4;
+ unsigned int width:4;
+};
+
+#define SMUX_PACK(_offset, _shift, _width) \
+ ((struct smuxed){ \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ })
+
+#define CPG_SSEL1 (0x304)
#define CPG_CDDIV0 (0x400)
#define CPG_CDDIV1 (0x404)
#define CPG_CDDIV3 (0x40C)
#define CPG_CDDIV4 (0x410)
+#define CPG_CSDIV0 (0x500)
#define CDDIV0_DIVCTL1 DDIV_PACK(CPG_CDDIV0, 4, 3, 1)
#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2)
@@ -44,12 +106,18 @@ struct ddiv {
#define CDDIV1_DIVCTL1 DDIV_PACK(CPG_CDDIV1, 4, 2, 5)
#define CDDIV1_DIVCTL2 DDIV_PACK(CPG_CDDIV1, 8, 2, 6)
#define CDDIV1_DIVCTL3 DDIV_PACK(CPG_CDDIV1, 12, 2, 7)
+#define CDDIV3_DIVCTL1 DDIV_PACK(CPG_CDDIV3, 4, 3, 13)
#define CDDIV3_DIVCTL2 DDIV_PACK(CPG_CDDIV3, 8, 3, 14)
#define CDDIV3_DIVCTL3 DDIV_PACK(CPG_CDDIV3, 12, 1, 15)
#define CDDIV4_DIVCTL0 DDIV_PACK(CPG_CDDIV4, 0, 1, 16)
#define CDDIV4_DIVCTL1 DDIV_PACK(CPG_CDDIV4, 4, 1, 17)
#define CDDIV4_DIVCTL2 DDIV_PACK(CPG_CDDIV4, 8, 1, 18)
+#define CSDIV0_DIVCTL3 DDIV_PACK_NO_RMW(CPG_CSDIV0, 12, 2, CSDIV_NO_MON)
+
+#define SSEL1_SELCTL2 SMUX_PACK(CPG_SSEL1, 8, 1)
+#define SSEL1_SELCTL3 SMUX_PACK(CPG_SSEL1, 12, 1)
+
#define BUS_MSTOP_IDX_MASK GENMASK(31, 16)
#define BUS_MSTOP_BITS_MASK GENMASK(15, 0)
#define BUS_MSTOP(idx, mask) (FIELD_PREP_CONST(BUS_MSTOP_IDX_MASK, (idx)) | \
@@ -74,8 +142,13 @@ struct cpg_core_clk {
union {
unsigned int conf;
struct ddiv ddiv;
+ struct pll pll;
+ struct smuxed smux;
} cfg;
const struct clk_div_table *dtable;
+ const char * const *parent_names;
+ unsigned int num_parents;
+ u8 mux_flags;
u32 flag;
};
@@ -85,20 +158,15 @@ enum clk_types {
CLK_TYPE_FF, /* Fixed Factor Clock */
CLK_TYPE_PLL,
CLK_TYPE_DDIV, /* Dynamic Switching Divider */
+ CLK_TYPE_SMUX, /* Static Mux */
};
-/* BIT(31) indicates if CLK1/2 are accessible or not */
-#define PLL_CONF(n) (BIT(31) | ((n) & ~GENMASK(31, 16)))
-#define PLL_CLK_ACCESS(n) ((n) & BIT(31) ? 1 : 0)
-#define PLL_CLK1_OFFSET(n) ((n) & ~GENMASK(31, 16))
-#define PLL_CLK2_OFFSET(n) (((n) & ~GENMASK(31, 16)) + (0x4))
-
#define DEF_TYPE(_name, _id, _type...) \
{ .name = _name, .id = _id, .type = _type }
#define DEF_BASE(_name, _id, _type, _parent...) \
DEF_TYPE(_name, _id, _type, .parent = _parent)
-#define DEF_PLL(_name, _id, _parent, _conf) \
- DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.conf = _conf)
+#define DEF_PLL(_name, _id, _parent, _pll_packed) \
+ DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.pll = _pll_packed)
#define DEF_INPUT(_name, _id) \
DEF_TYPE(_name, _id, CLK_TYPE_IN)
#define DEF_FIXED(_name, _id, _parent, _mult, _div) \
@@ -109,6 +177,15 @@ enum clk_types {
.parent = _parent, \
.dtable = _dtable, \
.flag = CLK_DIVIDER_HIWORD_MASK)
+#define DEF_CSDIV(_name, _id, _parent, _ddiv_packed, _dtable) \
+ DEF_DDIV(_name, _id, _parent, _ddiv_packed, _dtable)
+#define DEF_SMUX(_name, _id, _smux_packed, _parent_names) \
+ DEF_TYPE(_name, _id, CLK_TYPE_SMUX, \
+ .cfg.smux = _smux_packed, \
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names), \
+ .flag = CLK_SET_RATE_PARENT, \
+ .mux_flags = CLK_MUX_HIWORD_MASK)
/**
* struct rzv2h_mod_clk - Module Clocks definitions
@@ -221,6 +298,7 @@ struct rzv2h_cpg_info {
};
extern const struct rzv2h_cpg_info r9a09g047_cpg_info;
+extern const struct rzv2h_cpg_info r9a09g056_cpg_info;
extern const struct rzv2h_cpg_info r9a09g057_cpg_info;
#endif /* __RENESAS_RZV2H_CPG_H__ */
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index e8ece20aebfd..c281a9738d9f 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_COMMON_CLK_ROCKCHIP) += clk-rockchip.o
clk-rockchip-y += clk.o
clk-rockchip-y += clk-pll.o
clk-rockchip-y += clk-cpu.o
+clk-rockchip-y += clk-gate-grf.o
clk-rockchip-y += clk-half-divider.o
clk-rockchip-y += clk-inverter.o
clk-rockchip-y += clk-mmc-phase.o
diff --git a/drivers/clk/rockchip/clk-gate-grf.c b/drivers/clk/rockchip/clk-gate-grf.c
new file mode 100644
index 000000000000..8122f471f391
--- /dev/null
+++ b/drivers/clk/rockchip/clk-gate-grf.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2025 Collabora Ltd.
+ * Author: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+ *
+ * Certain clocks on Rockchip are "gated" behind an additional register bit
+ * write in a GRF register, such as the SAI MCLKs on RK3576. This code
+ * implements a clock driver for these types of gates, based on regmaps.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+struct rockchip_gate_grf {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int reg;
+ unsigned int shift;
+ u8 flags;
+};
+
+#define to_gate_grf(_hw) container_of(_hw, struct rockchip_gate_grf, hw)
+
+static int rockchip_gate_grf_enable(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ u32 val = !(gate->flags & CLK_GATE_SET_TO_DISABLE) ? BIT(gate->shift) : 0;
+ u32 hiword = ((gate->flags & CLK_GATE_HIWORD_MASK) ? 1 : 0) << (gate->shift + 16);
+ int ret;
+
+ ret = regmap_update_bits(gate->regmap, gate->reg,
+ hiword | BIT(gate->shift), hiword | val);
+
+ return ret;
+}
+
+static void rockchip_gate_grf_disable(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ u32 val = !(gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : BIT(gate->shift);
+ u32 hiword = ((gate->flags & CLK_GATE_HIWORD_MASK) ? 1 : 0) << (gate->shift + 16);
+
+ regmap_update_bits(gate->regmap, gate->reg,
+ hiword | BIT(gate->shift), hiword | val);
+}
+
+static int rockchip_gate_grf_is_enabled(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ bool invert = !!(gate->flags & CLK_GATE_SET_TO_DISABLE);
+ int ret;
+
+ ret = regmap_test_bits(gate->regmap, gate->reg, BIT(gate->shift));
+ if (ret < 0)
+ ret = 0;
+
+ return invert ? 1 - ret : ret;
+
+}
+
+static const struct clk_ops rockchip_gate_grf_ops = {
+ .enable = rockchip_gate_grf_enable,
+ .disable = rockchip_gate_grf_disable,
+ .is_enabled = rockchip_gate_grf_is_enabled,
+};
+
+struct clk *rockchip_clk_register_gate_grf(const char *name,
+ const char *parent_name, unsigned long flags,
+ struct regmap *regmap, unsigned int reg, unsigned int shift,
+ u8 gate_flags)
+{
+ struct rockchip_gate_grf *gate;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ if (IS_ERR(regmap)) {
+ pr_err("%s: regmap not available\n", __func__);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = flags;
+ init.num_parents = parent_name ? 1 : 0;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.ops = &rockchip_gate_grf_ops;
+
+ gate->hw.init = &init;
+ gate->regmap = regmap;
+ gate->reg = reg;
+ gate->shift = shift;
+ gate->flags = gate_flags;
+
+ clk = clk_register(NULL, &gate->hw);
+ if (IS_ERR(clk))
+ kfree(gate);
+
+ return clk;
+}
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 91012078681b..b3ed8e7523e5 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -9,11 +9,14 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/regmap.h>
#include "clk.h"
struct rockchip_mmc_clock {
struct clk_hw hw;
void __iomem *reg;
+ struct regmap *grf;
+ int grf_reg;
int shift;
int cached_phase;
struct notifier_block clk_rate_change_nb;
@@ -54,7 +57,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
if (!rate)
return 0;
- raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
+ if (mmc_clock->grf)
+ regmap_read(mmc_clock->grf, mmc_clock->grf_reg, &raw_value);
+ else
+ raw_value = readl(mmc_clock->reg);
+
+ raw_value >>= mmc_clock->shift;
degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
@@ -134,8 +142,12 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
raw_value |= nineties;
- writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift),
- mmc_clock->reg);
+ raw_value = HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift);
+
+ if (mmc_clock->grf)
+ regmap_write(mmc_clock->grf, mmc_clock->grf_reg, raw_value);
+ else
+ writel(raw_value, mmc_clock->reg);
pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
clk_hw_get_name(hw), degrees, delay_num,
@@ -189,7 +201,9 @@ static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb,
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
- void __iomem *reg, int shift)
+ void __iomem *reg,
+ struct regmap *grf, int grf_reg,
+ int shift)
{
struct clk_init_data init;
struct rockchip_mmc_clock *mmc_clock;
@@ -208,6 +222,8 @@ struct clk *rockchip_clk_register_mmc(const char *name,
mmc_clock->hw.init = &init;
mmc_clock->reg = reg;
+ mmc_clock->grf = grf;
+ mmc_clock->grf_reg = grf_reg;
mmc_clock->shift = shift;
clk = clk_register(NULL, &mmc_clock->hw);
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 2c2abb3b4210..af74439a7457 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -1027,16 +1027,6 @@ static int rockchip_rk3588_pll_is_enabled(struct clk_hw *hw)
return !(pllcon & RK3588_PLLCON1_PWRDOWN);
}
-static int rockchip_rk3588_pll_init(struct clk_hw *hw)
-{
- struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
-
- if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
- return 0;
-
- return 0;
-}
-
static const struct clk_ops rockchip_rk3588_pll_clk_norate_ops = {
.recalc_rate = rockchip_rk3588_pll_recalc_rate,
.enable = rockchip_rk3588_pll_enable,
@@ -1051,7 +1041,6 @@ static const struct clk_ops rockchip_rk3588_pll_clk_ops = {
.enable = rockchip_rk3588_pll_enable,
.disable = rockchip_rk3588_pll_disable,
.is_enabled = rockchip_rk3588_pll_is_enabled,
- .init = rockchip_rk3588_pll_init,
};
/*
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index d341ce0708aa..df9330958c83 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -123,6 +123,7 @@ PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" };
PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" };
PNAME(mux_pll_src_dmyapll_dpll_gpll_xin24_p) = { "dummy_apll", "dpll", "gpll", "xin24m" };
+PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" };
PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" };
PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
PNAME(mux_i2s_clkout_p) = { "i2s_pre", "xin12m" };
@@ -423,6 +424,9 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS),
GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS),
GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS),
+
+ MUX(SCLK_USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT,
+ RK2928_MISC_CON, 15, 1, MFLAGS),
};
static const char *const rk3036_critical_clocks[] __initconst = {
@@ -431,6 +435,7 @@ static const char *const rk3036_critical_clocks[] __initconst = {
"hclk_peri",
"pclk_peri",
"pclk_ddrupctl",
+ "ddrphy",
};
static void __init rk3036_clk_init(struct device_node *np)
@@ -438,7 +443,6 @@ static void __init rk3036_clk_init(struct device_node *np)
struct rockchip_clk_provider *ctx;
unsigned long clk_nr_clks;
void __iomem *reg_base;
- struct clk *clk;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -462,11 +466,6 @@ static void __init rk3036_clk_init(struct device_node *np)
return;
}
- clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock usb480m: %ld\n",
- __func__, PTR_ERR(clk));
-
rockchip_clk_register_plls(ctx, rk3036_pll_clks,
ARRAY_SIZE(rk3036_pll_clks),
RK3036_GRF_SOC_STATUS0);
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 90d329216064..0a1e017df7c6 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -418,7 +418,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(3), 11, GFLAGS),
MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
- RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
+ RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS, grf_type_sys),
GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
RK3288_CLKGATE_CON(9), 0, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index cf60fcf2fa5c..cd5f65b6cdf5 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -677,9 +677,9 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKSEL_CON(27), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK3328_CLKGATE_CON(3), 5, GFLAGS),
MUXGRF(SCLK_MAC2IO, "clk_mac2io", mux_mac2io_src_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_MAC_CON1, 10, 1, MFLAGS),
+ RK3328_GRF_MAC_CON1, 10, 1, MFLAGS, grf_type_sys),
MUXGRF(SCLK_MAC2IO_EXT, "clk_mac2io_ext", mux_mac2io_ext_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_SOC_CON4, 14, 1, MFLAGS),
+ RK3328_GRF_SOC_CON4, 14, 1, MFLAGS, grf_type_sys),
COMPOSITE(SCLK_MAC2PHY_SRC, "clk_mac2phy_src", mux_2plls_p, 0,
RK3328_CLKSEL_CON(26), 7, 1, MFLAGS, 0, 5, DFLAGS,
@@ -692,7 +692,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKSEL_CON(26), 8, 2, DFLAGS,
RK3328_CLKGATE_CON(9), 2, GFLAGS),
MUXGRF(SCLK_MAC2PHY, "clk_mac2phy", mux_mac2phy_src_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_MAC_CON2, 10, 1, MFLAGS),
+ RK3328_GRF_MAC_CON2, 10, 1, MFLAGS, grf_type_sys),
FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
diff --git a/drivers/clk/rockchip/clk-rk3528.c b/drivers/clk/rockchip/clk-rk3528.c
index b8b577b902a0..a5ff64b93f8f 100644
--- a/drivers/clk/rockchip/clk-rk3528.c
+++ b/drivers/clk/rockchip/clk-rk3528.c
@@ -10,6 +10,9 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/minmax.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/rockchip,rk3528-cru.h>
@@ -1061,23 +1064,65 @@ static struct rockchip_clk_branch rk3528_clk_branches[] __initdata = {
0, 1, 1),
};
+static struct rockchip_clk_branch rk3528_vo_clk_branches[] __initdata = {
+ MMC_GRF(SCLK_SDMMC_DRV, "sdmmc_drv", "cclk_src_sdmmc0",
+ RK3528_SDMMC_CON(0), 1, grf_type_vo),
+ MMC_GRF(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "cclk_src_sdmmc0",
+ RK3528_SDMMC_CON(1), 1, grf_type_vo),
+};
+
+static struct rockchip_clk_branch rk3528_vpu_clk_branches[] __initdata = {
+ MMC_GRF(SCLK_SDIO0_DRV, "sdio0_drv", "cclk_src_sdio0",
+ RK3528_SDIO0_CON(0), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO0_SAMPLE, "sdio0_sample", "cclk_src_sdio0",
+ RK3528_SDIO0_CON(1), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO1_DRV, "sdio1_drv", "cclk_src_sdio1",
+ RK3528_SDIO1_CON(0), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO1_SAMPLE, "sdio1_sample", "cclk_src_sdio1",
+ RK3528_SDIO1_CON(1), 1, grf_type_vpu),
+};
+
static int __init clk_rk3528_probe(struct platform_device *pdev)
{
- struct rockchip_clk_provider *ctx;
+ unsigned long nr_vpu_branches = ARRAY_SIZE(rk3528_vpu_clk_branches);
+ unsigned long nr_vo_branches = ARRAY_SIZE(rk3528_vo_clk_branches);
+ unsigned long nr_branches = ARRAY_SIZE(rk3528_clk_branches);
+ unsigned long nr_clks, nr_vo_clks, nr_vpu_clks;
+ struct rockchip_aux_grf *vo_grf_e, *vpu_grf_e;
+ struct regmap *vo_grf, *vpu_grf;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- unsigned long nr_branches = ARRAY_SIZE(rk3528_clk_branches);
- unsigned long nr_clks;
+ struct rockchip_clk_provider *ctx;
void __iomem *reg_base;
- nr_clks = rockchip_clk_find_max_clk_id(rk3528_clk_branches,
- nr_branches) + 1;
-
reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return dev_err_probe(dev, PTR_ERR(reg_base),
"could not map cru region");
+ nr_clks = rockchip_clk_find_max_clk_id(rk3528_clk_branches,
+ nr_branches) + 1;
+
+ vo_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3528-vo-grf");
+ if (!IS_ERR(vo_grf)) {
+ nr_vo_clks = rockchip_clk_find_max_clk_id(rk3528_vo_clk_branches,
+ nr_vo_branches) + 1;
+ nr_clks = max(nr_clks, nr_vo_clks);
+ } else if (PTR_ERR(vo_grf) != -ENODEV) {
+ return dev_err_probe(dev, PTR_ERR(vo_grf),
+ "failed to look up VO GRF\n");
+ }
+
+ vpu_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3528-vpu-grf");
+ if (!IS_ERR(vpu_grf)) {
+ nr_vpu_clks = rockchip_clk_find_max_clk_id(rk3528_vpu_clk_branches,
+ nr_vpu_branches) + 1;
+ nr_clks = max(nr_clks, nr_vpu_clks);
+ } else if (PTR_ERR(vpu_grf) != -ENODEV) {
+ return dev_err_probe(dev, PTR_ERR(vpu_grf),
+ "failed to look up VPU GRF\n");
+ }
+
ctx = rockchip_clk_init(np, reg_base, nr_clks);
if (IS_ERR(ctx))
return dev_err_probe(dev, PTR_ERR(ctx),
@@ -1092,6 +1137,32 @@ static int __init clk_rk3528_probe(struct platform_device *pdev)
ARRAY_SIZE(rk3528_cpuclk_rates));
rockchip_clk_register_branches(ctx, rk3528_clk_branches, nr_branches);
+ if (!IS_ERR(vo_grf)) {
+ vo_grf_e = devm_kzalloc(dev, sizeof(*vo_grf_e), GFP_KERNEL);
+ if (!vo_grf_e)
+ return -ENOMEM;
+
+ vo_grf_e->grf = vo_grf;
+ vo_grf_e->type = grf_type_vo;
+ hash_add(ctx->aux_grf_table, &vo_grf_e->node, grf_type_vo);
+
+ rockchip_clk_register_branches(ctx, rk3528_vo_clk_branches,
+ nr_vo_branches);
+ }
+
+ if (!IS_ERR(vpu_grf)) {
+ vpu_grf_e = devm_kzalloc(dev, sizeof(*vpu_grf_e), GFP_KERNEL);
+ if (!vpu_grf_e)
+ return -ENOMEM;
+
+ vpu_grf_e->grf = vpu_grf;
+ vpu_grf_e->type = grf_type_vpu;
+ hash_add(ctx->aux_grf_table, &vpu_grf_e->node, grf_type_vpu);
+
+ rockchip_clk_register_branches(ctx, rk3528_vpu_clk_branches,
+ nr_vpu_branches);
+ }
+
rk3528_rst_init(np, reg_base);
rockchip_register_restart_notifier(ctx, RK3528_GLB_SRST_FST, NULL);
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 7d9279291e76..d48ab9d6c064 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -89,6 +89,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
RK3036_PLL_RATE(96000000, 1, 96, 6, 4, 1, 0),
RK3036_PLL_RATE(78750000, 4, 315, 6, 4, 1, 0),
RK3036_PLL_RATE(74250000, 2, 99, 4, 4, 1, 0),
+ RK3036_PLL_RATE(33300000, 4, 111, 5, 4, 1, 0),
{ /* sentinel */ },
};
@@ -590,7 +591,7 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = {
RK3568_CLKSEL_CON(9), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3568_CLKGATE_CON(4), 0, GFLAGS),
MUXGRF(CLK_DDR1X, "clk_ddr1x", clk_ddr1x_p, CLK_SET_RATE_PARENT,
- RK3568_CLKSEL_CON(9), 15, 1, MFLAGS),
+ RK3568_CLKSEL_CON(9), 15, 1, MFLAGS, grf_type_sys),
COMPOSITE_NOMUX(CLK_MSCH, "clk_msch", "clk_ddr1x", CLK_IGNORE_UNUSED,
RK3568_CLKSEL_CON(10), 0, 2, DFLAGS,
diff --git a/drivers/clk/rockchip/clk-rk3576.c b/drivers/clk/rockchip/clk-rk3576.c
index 595e010341f7..9bc0ef51ef68 100644
--- a/drivers/clk/rockchip/clk-rk3576.c
+++ b/drivers/clk/rockchip/clk-rk3576.c
@@ -10,11 +10,13 @@
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/rockchip,rk3576-cru.h>
#include "clk.h"
#define RK3576_GRF_SOC_STATUS0 0x600
#define RK3576_PMU0_GRF_OSC_CON6 0x18
+#define RK3576_VCCIO_IOC_MISC_CON0 0x6400
enum rk3576_plls {
bpll, lpll, vpll, aupll, cpll, gpll, ppll,
@@ -541,6 +543,8 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
RK3576_CLKGATE_CON(5), 14, GFLAGS),
GATE(CLK_OTPC_AUTO_RD_G, "clk_otpc_auto_rd_g", "xin24m", 0,
RK3576_CLKGATE_CON(5), 15, GFLAGS),
+ GATE(CLK_OTP_PHY_G, "clk_otp_phy_g", "xin24m", 0,
+ RK3576_CLKGATE_CON(6), 0, GFLAGS),
COMPOSITE(CLK_MIPI_CAMERAOUT_M0, "clk_mipi_cameraout_m0", mux_24m_spll_gpll_cpll_p, 0,
RK3576_CLKSEL_CON(38), 8, 2, MFLAGS, 0, 8, DFLAGS,
RK3576_CLKGATE_CON(6), 3, GFLAGS),
@@ -1479,6 +1483,14 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
RK3576_CLKGATE_CON(10), 0, GFLAGS),
GATE(CLK_SAI0_MCLKOUT, "clk_sai0_mclkout", "mclk_sai0_8ch", 0,
RK3576_CLKGATE_CON(10), 1, GFLAGS),
+ GATE_GRF(CLK_SAI0_MCLKOUT_TO_IO, "mclk_sai0_to_io", "clk_sai0_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 0, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI1_MCLKOUT_TO_IO, "mclk_sai1_to_io", "clk_sai1_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 1, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI2_MCLKOUT_TO_IO, "mclk_sai2_to_io", "clk_sai2_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 2, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI3_MCLKOUT_TO_IO, "mclk_sai3_to_io", "clk_sai3_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 3, GFLAGS, grf_type_ioc),
/* sdgmac */
COMPOSITE_NODIV(HCLK_SDGMAC_ROOT, "hclk_sdgmac_root", mux_200m_100m_50m_24m_p, 0,
@@ -1676,13 +1688,13 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
/* phy ref */
MUXGRF(CLK_PHY_REF_SRC, "clk_phy_ref_src", clk_phy_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_USBPHY_REF_SRC, "clk_usbphy_ref_src", clk_usbphy_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_CPLL_REF_SRC, "clk_cpll_ref_src", clk_cpll_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_AUPLL_REF_SRC, "clk_aupll_ref_src", clk_aupll_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS, grf_type_pmu0),
/* secure ns */
COMPOSITE_NODIV(ACLK_SECURE_NS, "aclk_secure_ns", mux_350m_175m_116m_24m_p, CLK_IS_CRITICAL,
@@ -1725,17 +1737,26 @@ static void __init rk3576_clk_init(struct device_node *np)
struct rockchip_clk_provider *ctx;
unsigned long clk_nr_clks;
void __iomem *reg_base;
- struct regmap *grf;
+ struct rockchip_aux_grf *ioc_grf_e;
+ struct rockchip_aux_grf *pmu0_grf_e;
+ struct regmap *ioc_grf;
+ struct regmap *pmu0_grf;
clk_nr_clks = rockchip_clk_find_max_clk_id(rk3576_clk_branches,
ARRAY_SIZE(rk3576_clk_branches)) + 1;
- grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf");
- if (IS_ERR(grf)) {
+ pmu0_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf");
+ if (IS_ERR(pmu0_grf)) {
pr_err("%s: could not get PMU0 GRF syscon\n", __func__);
return;
}
+ ioc_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-ioc-grf");
+ if (IS_ERR(ioc_grf)) {
+ pr_err("%s: could not get IOC GRF syscon\n", __func__);
+ return;
+ }
+
reg_base = of_iomap(np, 0);
if (!reg_base) {
pr_err("%s: could not map cru region\n", __func__);
@@ -1745,11 +1766,24 @@ static void __init rk3576_clk_init(struct device_node *np)
ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
- iounmap(reg_base);
- return;
+ goto err_unmap;
}
- ctx->grf = grf;
+ pmu0_grf_e = kzalloc(sizeof(*pmu0_grf_e), GFP_KERNEL);
+ if (!pmu0_grf_e)
+ goto err_unmap;
+
+ pmu0_grf_e->grf = pmu0_grf;
+ pmu0_grf_e->type = grf_type_pmu0;
+ hash_add(ctx->aux_grf_table, &pmu0_grf_e->node, grf_type_pmu0);
+
+ ioc_grf_e = kzalloc(sizeof(*ioc_grf_e), GFP_KERNEL);
+ if (!ioc_grf_e)
+ goto err_free_pmu0;
+
+ ioc_grf_e->grf = ioc_grf;
+ ioc_grf_e->type = grf_type_ioc;
+ hash_add(ctx->aux_grf_table, &ioc_grf_e->node, grf_type_ioc);
rockchip_clk_register_plls(ctx, rk3576_pll_clks,
ARRAY_SIZE(rk3576_pll_clks),
@@ -1772,6 +1806,14 @@ static void __init rk3576_clk_init(struct device_node *np)
rockchip_register_restart_notifier(ctx, RK3576_GLB_SRST_FST, NULL);
rockchip_clk_of_add_provider(np, ctx);
+
+ return;
+
+err_free_pmu0:
+ kfree(pmu0_grf_e);
+err_unmap:
+ iounmap(reg_base);
+ return;
}
CLK_OF_DECLARE(rk3576_cru, "rockchip,rk3576-cru", rk3576_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index 4031733def4e..1694223f4f84 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -64,6 +64,7 @@ static struct rockchip_pll_rate_table rk3588_pll_rates[] = {
RK3588_PLL_RATE(1560000000, 2, 260, 1, 0),
RK3588_PLL_RATE(1536000000, 2, 256, 1, 0),
RK3588_PLL_RATE(1512000000, 2, 252, 1, 0),
+ RK3588_PLL_RATE(1500000000, 2, 250, 1, 0),
RK3588_PLL_RATE(1488000000, 2, 248, 1, 0),
RK3588_PLL_RATE(1464000000, 2, 244, 1, 0),
RK3588_PLL_RATE(1440000000, 2, 240, 1, 0),
diff --git a/drivers/clk/rockchip/clk-rv1126.c b/drivers/clk/rockchip/clk-rv1126.c
index fc19c5522490..15e7bfe84506 100644
--- a/drivers/clk/rockchip/clk-rv1126.c
+++ b/drivers/clk/rockchip/clk-rv1126.c
@@ -857,7 +857,7 @@ static struct rockchip_clk_branch rv1126_clk_branches[] __initdata = {
RV1126_GMAC_CON, 5, 1, MFLAGS),
MUXGRF(CLK_GMAC_SRC, "clk_gmac_src", mux_clk_gmac_src_p, CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT,
- RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS),
+ RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS, grf_type_sys),
GATE(CLK_GMAC_REF, "clk_gmac_ref", "clk_gmac_src", 0,
RV1126_CLKGATE_CON(20), 7, GFLAGS),
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index cbf93ea119a9..19caf26c991b 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -382,6 +382,8 @@ static struct rockchip_clk_provider *rockchip_clk_init_base(
ctx->cru_node = np;
spin_lock_init(&ctx->lock);
+ hash_init(ctx->aux_grf_table);
+
ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
"rockchip,grf");
@@ -496,6 +498,8 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
{
+ struct regmap *grf = ctx->grf;
+ struct rockchip_aux_grf *agrf;
struct clk *clk;
unsigned int idx;
unsigned long flags;
@@ -504,6 +508,19 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
flags = list->flags;
clk = NULL;
+ /* for GRF-dependent branches, choose the right grf first */
+ if ((list->branch_type == branch_grf_mux ||
+ list->branch_type == branch_grf_gate ||
+ list->branch_type == branch_grf_mmc) &&
+ list->grf_type != grf_type_sys) {
+ hash_for_each_possible(ctx->aux_grf_table, agrf, node, list->grf_type) {
+ if (agrf->type == list->grf_type) {
+ grf = agrf->grf;
+ break;
+ }
+ }
+ }
+
/* catch simple muxes */
switch (list->branch_type) {
case branch_mux:
@@ -523,10 +540,10 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->mux_shift, list->mux_width,
list->mux_flags, &ctx->lock);
break;
- case branch_muxgrf:
+ case branch_grf_mux:
clk = rockchip_clk_register_muxgrf(list->name,
list->parent_names, list->num_parents,
- flags, ctx->grf, list->muxdiv_offset,
+ flags, grf, list->muxdiv_offset,
list->mux_shift, list->mux_width,
list->mux_flags);
break;
@@ -573,6 +590,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
ctx->reg_base + list->gate_offset,
list->gate_shift, list->gate_flags, &ctx->lock);
break;
+ case branch_grf_gate:
+ flags |= CLK_SET_RATE_PARENT;
+ clk = rockchip_clk_register_gate_grf(list->name,
+ list->parent_names[0], flags, grf,
+ list->gate_offset, list->gate_shift,
+ list->gate_flags);
+ break;
case branch_composite:
clk = rockchip_clk_register_branch(list->name,
list->parent_names, list->num_parents,
@@ -590,6 +614,16 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->name,
list->parent_names, list->num_parents,
ctx->reg_base + list->muxdiv_offset,
+ NULL, 0,
+ list->div_shift
+ );
+ break;
+ case branch_grf_mmc:
+ clk = rockchip_clk_register_mmc(
+ list->name,
+ list->parent_names, list->num_parents,
+ NULL,
+ grf, list->muxdiv_offset,
list->div_shift
);
break;
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index df2b2d706450..1e9c3c0d31e3 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/clk-provider.h>
+#include <linux/hashtable.h>
struct clk;
@@ -217,6 +218,9 @@ struct clk;
#define RK3528_CLKSEL_CON(x) ((x) * 0x4 + 0x300)
#define RK3528_CLKGATE_CON(x) ((x) * 0x4 + 0x800)
#define RK3528_SOFTRST_CON(x) ((x) * 0x4 + 0xa00)
+#define RK3528_SDMMC_CON(x) ((x) * 0x4 + 0x24)
+#define RK3528_SDIO0_CON(x) ((x) * 0x4 + 0x4)
+#define RK3528_SDIO1_CON(x) ((x) * 0x4 + 0xc)
#define RK3528_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PMU_CRU_BASE)
#define RK3528_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RK3528_PMU_CRU_BASE)
#define RK3528_PCIE_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PCIE_CRU_BASE)
@@ -440,12 +444,37 @@ enum rockchip_pll_type {
.k = _k, \
}
+enum rockchip_grf_type {
+ grf_type_sys = 0,
+ grf_type_pmu0,
+ grf_type_pmu1,
+ grf_type_ioc,
+ grf_type_vo,
+ grf_type_vpu,
+};
+
+/* ceil(sqrt(enums in rockchip_grf_type - 1)) */
+#define GRF_HASH_ORDER 2
+
+/**
+ * struct rockchip_aux_grf - entry for the aux_grf_table hashtable
+ * @grf: pointer to the grf this entry references
+ * @type: what type of GRF this is
+ * @node: hlist node
+ */
+struct rockchip_aux_grf {
+ struct regmap *grf;
+ enum rockchip_grf_type type;
+ struct hlist_node node;
+};
+
/**
* struct rockchip_clk_provider - information about clock provider
* @reg_base: virtual address for the register base.
* @clk_data: holds clock related data like clk* and number of clocks.
* @cru_node: device-node of the clock-provider
* @grf: regmap of the general-register-files syscon
+ * @aux_grf_table: hashtable of auxiliary GRF regmaps, indexed by grf_type
* @lock: maintains exclusion between callbacks for a given clock-provider.
*/
struct rockchip_clk_provider {
@@ -453,6 +482,7 @@ struct rockchip_clk_provider {
struct clk_onecell_data clk_data;
struct device_node *cru_node;
struct regmap *grf;
+ DECLARE_HASHTABLE(aux_grf_table, GRF_HASH_ORDER);
spinlock_t lock;
};
@@ -594,7 +624,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
- void __iomem *reg, int shift);
+ void __iomem *reg,
+ struct regmap *grf, int grf_reg,
+ int shift);
/*
* DDRCLK flags, including method of setting the rate
@@ -622,17 +654,24 @@ struct clk *rockchip_clk_register_muxgrf(const char *name,
int flags, struct regmap *grf, int reg,
int shift, int width, int mux_flags);
+struct clk *rockchip_clk_register_gate_grf(const char *name,
+ const char *parent_name, unsigned long flags,
+ struct regmap *regmap, unsigned int reg,
+ unsigned int shift, u8 gate_flags);
+
#define PNAME(x) static const char *const x[] __initconst
enum rockchip_clk_branch_type {
branch_composite,
branch_mux,
- branch_muxgrf,
+ branch_grf_mux,
branch_divider,
branch_fraction_divider,
branch_gate,
+ branch_grf_gate,
branch_linked_gate,
branch_mmc,
+ branch_grf_mmc,
branch_inverter,
branch_factor,
branch_ddrclk,
@@ -660,6 +699,7 @@ struct rockchip_clk_branch {
u8 gate_shift;
u8 gate_flags;
unsigned int linked_clk_id;
+ enum rockchip_grf_type grf_type;
struct rockchip_clk_branch *child;
};
@@ -900,10 +940,10 @@ struct rockchip_clk_branch {
.mux_table = mt, \
}
-#define MUXGRF(_id, cname, pnames, f, o, s, w, mf) \
+#define MUXGRF(_id, cname, pnames, f, o, s, w, mf, gt) \
{ \
.id = _id, \
- .branch_type = branch_muxgrf, \
+ .branch_type = branch_grf_mux, \
.name = cname, \
.parent_names = pnames, \
.num_parents = ARRAY_SIZE(pnames), \
@@ -913,6 +953,7 @@ struct rockchip_clk_branch {
.mux_width = w, \
.mux_flags = mf, \
.gate_offset = -1, \
+ .grf_type = gt, \
}
#define DIV(_id, cname, pname, f, o, s, w, df) \
@@ -958,6 +999,20 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define GATE_GRF(_id, cname, pname, f, o, b, gf, gt) \
+ { \
+ .id = _id, \
+ .branch_type = branch_grf_gate, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .gate_offset = o, \
+ .gate_shift = b, \
+ .gate_flags = gf, \
+ .grf_type = gt, \
+ }
+
#define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \
{ \
.id = _id, \
@@ -983,6 +1038,18 @@ struct rockchip_clk_branch {
.div_shift = shift, \
}
+#define MMC_GRF(_id, cname, pname, offset, shift, grftype) \
+ { \
+ .id = _id, \
+ .branch_type = branch_grf_mmc, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .muxdiv_offset = offset, \
+ .div_shift = shift, \
+ .grf_type = grftype, \
+ }
+
#define INVERTER(_id, cname, pname, io, is, if) \
{ \
.id = _id, \
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 374c26e5d9fd..cc5c1644c41c 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1269,6 +1269,45 @@ static const struct samsung_cpu_clock exynos4412_cpu_clks[] __initconst = {
CPUCLK_LAYOUT_E4210, e4412_armclk_d),
};
+static const struct samsung_cmu_info cmu_info_exynos4 __initconst = {
+ .mux_clks = exynos4_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4_mux_clks),
+ .div_clks = exynos4_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4_div_clks),
+ .gate_clks = exynos4_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4_gate_clks),
+ .fixed_factor_clks = exynos4_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4_fixed_factor_clks),
+ .fixed_clks = exynos4_fixed_rate_clks,
+ .nr_fixed_clks = ARRAY_SIZE(exynos4_fixed_rate_clks),
+};
+
+static const struct samsung_cmu_info cmu_info_exynos4210 __initconst = {
+ .mux_clks = exynos4210_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4210_mux_clks),
+ .div_clks = exynos4210_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4210_div_clks),
+ .gate_clks = exynos4210_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4210_gate_clks),
+ .fixed_factor_clks = exynos4210_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4210_fixed_factor_clks),
+ .fixed_clks = exynos4210_fixed_rate_clks,
+ .nr_fixed_clks = ARRAY_SIZE(exynos4210_fixed_rate_clks),
+ .cpu_clks = exynos4210_cpu_clks,
+ .nr_cpu_clks = ARRAY_SIZE(exynos4210_cpu_clks),
+};
+
+static const struct samsung_cmu_info cmu_info_exynos4x12 __initconst = {
+ .mux_clks = exynos4x12_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4x12_mux_clks),
+ .div_clks = exynos4x12_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4x12_div_clks),
+ .gate_clks = exynos4x12_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4x12_gate_clks),
+ .fixed_factor_clks = exynos4x12_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4x12_fixed_factor_clks),
+};
+
/* register exynos4 clocks */
static void __init exynos4_clk_init(struct device_node *np,
enum exynos4_soc soc)
@@ -1322,41 +1361,12 @@ static void __init exynos4_clk_init(struct device_node *np,
ARRAY_SIZE(exynos4x12_plls));
}
- samsung_clk_register_fixed_rate(ctx, exynos4_fixed_rate_clks,
- ARRAY_SIZE(exynos4_fixed_rate_clks));
- samsung_clk_register_mux(ctx, exynos4_mux_clks,
- ARRAY_SIZE(exynos4_mux_clks));
- samsung_clk_register_div(ctx, exynos4_div_clks,
- ARRAY_SIZE(exynos4_div_clks));
- samsung_clk_register_gate(ctx, exynos4_gate_clks,
- ARRAY_SIZE(exynos4_gate_clks));
- samsung_clk_register_fixed_factor(ctx, exynos4_fixed_factor_clks,
- ARRAY_SIZE(exynos4_fixed_factor_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4);
if (exynos4_soc == EXYNOS4210) {
- samsung_clk_register_fixed_rate(ctx, exynos4210_fixed_rate_clks,
- ARRAY_SIZE(exynos4210_fixed_rate_clks));
- samsung_clk_register_mux(ctx, exynos4210_mux_clks,
- ARRAY_SIZE(exynos4210_mux_clks));
- samsung_clk_register_div(ctx, exynos4210_div_clks,
- ARRAY_SIZE(exynos4210_div_clks));
- samsung_clk_register_gate(ctx, exynos4210_gate_clks,
- ARRAY_SIZE(exynos4210_gate_clks));
- samsung_clk_register_fixed_factor(ctx,
- exynos4210_fixed_factor_clks,
- ARRAY_SIZE(exynos4210_fixed_factor_clks));
- samsung_clk_register_cpu(ctx, exynos4210_cpu_clks,
- ARRAY_SIZE(exynos4210_cpu_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4210);
} else {
- samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
- ARRAY_SIZE(exynos4x12_mux_clks));
- samsung_clk_register_div(ctx, exynos4x12_div_clks,
- ARRAY_SIZE(exynos4x12_div_clks));
- samsung_clk_register_gate(ctx, exynos4x12_gate_clks,
- ARRAY_SIZE(exynos4x12_gate_clks));
- samsung_clk_register_fixed_factor(ctx,
- exynos4x12_fixed_factor_clks,
- ARRAY_SIZE(exynos4x12_fixed_factor_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4x12);
if (soc == EXYNOS4412)
samsung_clk_register_cpu(ctx, exynos4412_cpu_clks,
ARRAY_SIZE(exynos4412_cpu_clks));
diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c
index dc8d4240f6de..da4afe8ac2ab 100644
--- a/drivers/clk/samsung/clk-exynosautov920.c
+++ b/drivers/clk/samsung/clk-exynosautov920.c
@@ -18,6 +18,9 @@
/* NOTE: Must be equal to the last clock ID increased by one */
#define CLKS_NR_TOP (DOUT_CLKCMU_TAA_NOC + 1)
+#define CLKS_NR_CPUCL0 (CLK_DOUT_CPUCL0_NOCP + 1)
+#define CLKS_NR_CPUCL1 (CLK_DOUT_CPUCL1_NOCP + 1)
+#define CLKS_NR_CPUCL2 (CLK_DOUT_CPUCL2_NOCP + 1)
#define CLKS_NR_PERIC0 (CLK_DOUT_PERIC0_I3C + 1)
#define CLKS_NR_PERIC1 (CLK_DOUT_PERIC1_I3C + 1)
#define CLKS_NR_MISC (CLK_DOUT_MISC_OSC_DIV2 + 1)
@@ -1005,6 +1008,339 @@ static void __init exynosautov920_cmu_top_init(struct device_node *np)
CLK_OF_DECLARE(exynosautov920_cmu_top, "samsung,exynosautov920-cmu-top",
exynosautov920_cmu_top_init);
+/* ---- CMU_CPUCL0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL0 (0x1EC00000) */
+#define PLL_LOCKTIME_PLL_CPUCL0 0x0000
+#define PLL_CON0_PLL_CPUCL0 0x0100
+#define PLL_CON1_PLL_CPUCL0 0x0104
+#define PLL_CON3_PLL_CPUCL0 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER 0x0620
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL0_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC 0x181c
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG 0x1820
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP 0x1824
+
+static const unsigned long cpucl0_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL0,
+ PLL_CON0_PLL_CPUCL0,
+ PLL_CON1_PLL_CPUCL0,
+ PLL_CON3_PLL_CPUCL0,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL0 */
+PNAME(mout_pll_cpucl0_p) = { "oscclk", "fout_cpucl0_pll" };
+PNAME(mout_cpucl0_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl0_cluster" };
+PNAME(mout_cpucl0_dbg_user_p) = { "oscclk", "dout_clkcmu_cpucl0_dbg" };
+PNAME(mout_cpucl0_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl0_switch" };
+PNAME(mout_cpucl0_cluster_p) = { "oscclk", "mout_cpucl0_cluster_user",
+ "mout_cpucl0_switch_user"};
+PNAME(mout_cpucl0_core_p) = { "oscclk", "mout_pll_cpucl0",
+ "mout_cpucl0_switch_user"};
+
+static const struct samsung_pll_rate_table cpu_pll_rates[] __initconst = {
+ PLL_35XX_RATE(38400000U, 2400000000U, 250, 4, 0),
+ PLL_35XX_RATE(38400000U, 2304000000U, 240, 4, 0),
+ PLL_35XX_RATE(38400000U, 2208000000U, 230, 4, 0),
+ PLL_35XX_RATE(38400000U, 2112000000U, 220, 4, 0),
+ PLL_35XX_RATE(38400000U, 2016000000U, 210, 4, 0),
+ PLL_35XX_RATE(38400000U, 1824000000U, 190, 4, 0),
+ PLL_35XX_RATE(38400000U, 1680000000U, 175, 4, 0),
+ PLL_35XX_RATE(38400000U, 1344000000U, 140, 4, 0),
+ PLL_35XX_RATE(38400000U, 1152000000U, 120, 4, 0),
+ PLL_35XX_RATE(38400000U, 576000000U, 120, 4, 1),
+ PLL_35XX_RATE(38400000U, 288000000U, 120, 4, 2),
+};
+
+static const struct samsung_pll_clock cpucl0_pll_clks[] __initconst = {
+ /* CMU_CPUCL0_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL0_PLL, "fout_cpucl0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL0, PLL_CON3_PLL_CPUCL0, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL0, "mout_pll_cpucl0", mout_pll_cpucl0_p,
+ PLL_CON0_PLL_CPUCL0, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_CLUSTER_USER, "mout_cpucl0_cluster_user", mout_cpucl0_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_DBG_USER, "mout_cpucl0_dbg_user", mout_cpucl0_dbg_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_SWITCH_USER, "mout_cpucl0_switch_user", mout_cpucl0_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_CLUSTER, "mout_cpucl0_cluster", mout_cpucl0_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL0_CORE, "mout_cpucl0_core", mout_cpucl0_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER0_ACLK, "dout_cluster0_aclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_ATCLK, "dout_cluster0_atclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_MPCLK, "dout_cluster0_mpclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_PCLK, "dout_cluster0_pclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_PERIPHCLK, "dout_cluster0_periphclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL0_DBG_NOC, "dout_cpucl0_dbg_noc",
+ "mout_cpucl0_dbg_user", CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC, 0, 3),
+ DIV(CLK_DOUT_CPUCL0_DBG_PCLKDBG, "dout_cpucl0_dbg_pclkdbg",
+ "mout_cpucl0_dbg_user", CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG, 0, 3),
+ DIV(CLK_DOUT_CPUCL0_NOCP, "dout_cpucl0_nocp",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl0_cmu_info __initconst = {
+ .pll_clks = cpucl0_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl0_pll_clks),
+ .mux_clks = cpucl0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl0_mux_clks),
+ .div_clks = cpucl0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl0_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL0,
+ .clk_regs = cpucl0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl0_clk_regs),
+ .clk_name = "cpucl0",
+};
+
+static void __init exynosautov920_cmu_cpucl0_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl0_cmu_info);
+}
+
+/* Register CMU_CPUCL0 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl0, "samsung,exynosautov920-cmu-cpucl0",
+ exynosautov920_cmu_cpucl0_init);
+
+/* ---- CMU_CPUCL1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL1 (0x1ED00000) */
+#define PLL_LOCKTIME_PLL_CPUCL1 0x0000
+#define PLL_CON0_PLL_CPUCL1 0x0100
+#define PLL_CON1_PLL_CPUCL1 0x0104
+#define PLL_CON3_PLL_CPUCL1 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER 0x0610
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL1_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP 0x181c
+
+static const unsigned long cpucl1_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL1,
+ PLL_CON0_PLL_CPUCL1,
+ PLL_CON1_PLL_CPUCL1,
+ PLL_CON3_PLL_CPUCL1,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL1 */
+PNAME(mout_pll_cpucl1_p) = { "oscclk", "fout_cpucl1_pll" };
+PNAME(mout_cpucl1_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl1_cluster" };
+PNAME(mout_cpucl1_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl1_switch" };
+PNAME(mout_cpucl1_cluster_p) = { "oscclk", "mout_cpucl1_cluster_user",
+ "mout_cpucl1_switch_user"};
+PNAME(mout_cpucl1_core_p) = { "oscclk", "mout_pll_cpucl1",
+ "mout_cpucl1_switch_user"};
+
+static const struct samsung_pll_clock cpucl1_pll_clks[] __initconst = {
+ /* CMU_CPUCL1_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL1_PLL, "fout_cpucl1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL1, PLL_CON3_PLL_CPUCL1, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL1, "mout_pll_cpucl1", mout_pll_cpucl1_p,
+ PLL_CON0_PLL_CPUCL1, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_CLUSTER_USER, "mout_cpucl1_cluster_user", mout_cpucl1_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_SWITCH_USER, "mout_cpucl1_switch_user", mout_cpucl1_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_CLUSTER, "mout_cpucl1_cluster", mout_cpucl1_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL1_CORE, "mout_cpucl1_core", mout_cpucl1_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER1_ACLK, "dout_cluster1_aclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_ATCLK, "dout_cluster1_atclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_MPCLK, "dout_cluster1_mpclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_PCLK, "dout_cluster1_pclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_PERIPHCLK, "dout_cluster1_periphclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL1_NOCP, "dout_cpucl1_nocp",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl1_cmu_info __initconst = {
+ .pll_clks = cpucl1_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl1_pll_clks),
+ .mux_clks = cpucl1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl1_mux_clks),
+ .div_clks = cpucl1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl1_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL1,
+ .clk_regs = cpucl1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl1_clk_regs),
+ .clk_name = "cpucl1",
+};
+
+static void __init exynosautov920_cmu_cpucl1_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl1_cmu_info);
+}
+
+/* Register CMU_CPUCL1 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl1, "samsung,exynosautov920-cmu-cpucl1",
+ exynosautov920_cmu_cpucl1_init);
+
+/* ---- CMU_CPUCL2 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL2 (0x1EE00000) */
+#define PLL_LOCKTIME_PLL_CPUCL2 0x0000
+#define PLL_CON0_PLL_CPUCL2 0x0100
+#define PLL_CON1_PLL_CPUCL2 0x0104
+#define PLL_CON3_PLL_CPUCL2 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER 0x0610
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL2_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP 0x181c
+
+static const unsigned long cpucl2_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL2,
+ PLL_CON0_PLL_CPUCL2,
+ PLL_CON1_PLL_CPUCL2,
+ PLL_CON3_PLL_CPUCL2,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL2 */
+PNAME(mout_pll_cpucl2_p) = { "oscclk", "fout_cpucl2_pll" };
+PNAME(mout_cpucl2_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl2_cluster" };
+PNAME(mout_cpucl2_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl2_switch" };
+PNAME(mout_cpucl2_cluster_p) = { "oscclk", "mout_cpucl2_cluster_user",
+ "mout_cpucl2_switch_user"};
+PNAME(mout_cpucl2_core_p) = { "oscclk", "mout_pll_cpucl2",
+ "mout_cpucl2_switch_user"};
+
+static const struct samsung_pll_clock cpucl2_pll_clks[] __initconst = {
+ /* CMU_CPUCL2_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL2_PLL, "fout_cpucl2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL2, PLL_CON3_PLL_CPUCL2, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl2_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL2, "mout_pll_cpucl2", mout_pll_cpucl2_p,
+ PLL_CON0_PLL_CPUCL2, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_CLUSTER_USER, "mout_cpucl2_cluster_user", mout_cpucl2_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_SWITCH_USER, "mout_cpucl2_switch_user", mout_cpucl2_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_CLUSTER, "mout_cpucl2_cluster", mout_cpucl2_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL2_CORE, "mout_cpucl2_core", mout_cpucl2_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl2_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER2_ACLK, "dout_cluster2_aclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_ATCLK, "dout_cluster2_atclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_MPCLK, "dout_cluster2_mpclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_PCLK, "dout_cluster2_pclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_PERIPHCLK, "dout_cluster2_periphclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL2_NOCP, "dout_cpucl2_nocp",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl2_cmu_info __initconst = {
+ .pll_clks = cpucl2_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl2_pll_clks),
+ .mux_clks = cpucl2_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl2_mux_clks),
+ .div_clks = cpucl2_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl2_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL2,
+ .clk_regs = cpucl2_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl2_clk_regs),
+ .clk_name = "cpucl2",
+};
+
+static void __init exynosautov920_cmu_cpucl2_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl2_cmu_info);
+}
+
+/* Register CMU_CPUCL2 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl2, "samsung,exynosautov920-cmu-cpucl2",
+ exynosautov920_cmu_cpucl2_init);
+
/* ---- CMU_PERIC0 --------------------------------------------------------- */
/* Register Offset definitions for CMU_PERIC0 (0x10800000) */
@@ -1393,7 +1729,7 @@ static const unsigned long hsi1_clk_regs[] __initconst = {
/* List of parent clocks for Muxes in CMU_HSI1 */
PNAME(mout_hsi1_mmc_card_user_p) = {"oscclk", "dout_clkcmu_hsi1_mmc_card"};
PNAME(mout_hsi1_noc_user_p) = { "oscclk", "dout_clkcmu_hsi1_noc" };
-PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "mout_clkcmu_hsi1_usbdrd" };
+PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "dout_clkcmu_hsi1_usbdrd" };
PNAME(mout_hsi1_usbdrd_p) = { "dout_tcxo_div2", "mout_hsi1_usbdrd_user" };
static const struct samsung_mux_clock hsi1_mux_clks[] __initconst = {
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 1d82737befd3..a88c212bda12 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -83,9 +83,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long mdiv;
- unsigned long refdiv;
- unsigned long reg;
+ u32 mdiv;
+ u32 refdiv;
+ u32 reg;
unsigned long long vco_freq;
/* read VCO1 reg for numerator and denominator */
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index 9dcc1b2d2cc0..03a96139a576 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -39,9 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long divf, divq, reg;
+ u32 divf, divq, reg;
unsigned long long vco_freq;
- unsigned long bypass;
+ u32 bypass;
reg = readl(socfpgaclk->hw.reg);
bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS);
diff --git a/drivers/clk/sophgo/Kconfig b/drivers/clk/sophgo/Kconfig
index 8b1367e3a95e..e14e802f28bf 100644
--- a/drivers/clk/sophgo/Kconfig
+++ b/drivers/clk/sophgo/Kconfig
@@ -37,3 +37,22 @@ config CLK_SOPHGO_SG2042_RPGATE
This clock IP depends on SG2042 Clock Generator because it uses
clock from Clock Generator IP as input.
This driver provides Gate function for RP.
+
+config CLK_SOPHGO_SG2044
+ tristate "Sophgo SG2044 clock controller support"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ This driver supports the clock controller on the Sophgo SG2044
+ SoC. This controller requires mulitple PLL clock as input.
+ This clock control provides PLL clocks and common clock function
+ for various IPs on the SoC.
+
+config CLK_SOPHGO_SG2044_PLL
+ tristate "Sophgo SG2044 PLL clock controller support"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ select MFD_SYSCON
+ select REGMAP_MMIO
+ help
+ This driver supports the PLL clock controller on the Sophgo
+ SG2044 SoC. This controller requires 25M oscillator as input.
+ This clock control provides PLL clocks on the SoC.
diff --git a/drivers/clk/sophgo/Makefile b/drivers/clk/sophgo/Makefile
index 53506845a044..26b2fd121582 100644
--- a/drivers/clk/sophgo/Makefile
+++ b/drivers/clk/sophgo/Makefile
@@ -9,3 +9,5 @@ clk-sophgo-cv1800-y += clk-cv18xx-pll.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_CLKGEN) += clk-sg2042-clkgen.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_PLL) += clk-sg2042-pll.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_RPGATE) += clk-sg2042-rpgate.o
+obj-$(CONFIG_CLK_SOPHGO_SG2044) += clk-sg2044.o
+obj-$(CONFIG_CLK_SOPHGO_SG2044_PLL) += clk-sg2044-pll.o
diff --git a/drivers/clk/sophgo/clk-cv1800.c b/drivers/clk/sophgo/clk-cv1800.c
index e0c4dc347579..a4116ac1adcb 100644
--- a/drivers/clk/sophgo/clk-cv1800.c
+++ b/drivers/clk/sophgo/clk-cv1800.c
@@ -1519,7 +1519,9 @@ static int cv1800_clk_probe(struct platform_device *pdev)
static const struct of_device_id cv1800_clk_ids[] = {
{ .compatible = "sophgo,cv1800-clk", .data = &cv1800_desc },
+ { .compatible = "sophgo,cv1800b-clk", .data = &cv1800_desc },
{ .compatible = "sophgo,cv1810-clk", .data = &cv1810_desc },
+ { .compatible = "sophgo,cv1812h-clk", .data = &cv1810_desc },
{ .compatible = "sophgo,sg2000-clk", .data = &sg2000_desc },
{ }
};
diff --git a/drivers/clk/sophgo/clk-sg2044-pll.c b/drivers/clk/sophgo/clk-sg2044-pll.c
new file mode 100644
index 000000000000..94c0f519ba6d
--- /dev/null
+++ b/drivers/clk/sophgo/clk-sg2044-pll.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2044 PLL clock controller driver
+ *
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/sophgo,sg2044-pll.h>
+
+/* Low Control part */
+#define PLL_VCOSEL_MASK GENMASK(17, 16)
+
+/* High Control part */
+#define PLL_FBDIV_MASK GENMASK(11, 0)
+#define PLL_REFDIV_MASK GENMASK(17, 12)
+#define PLL_POSTDIV1_MASK GENMASK(20, 18)
+#define PLL_POSTDIV2_MASK GENMASK(23, 21)
+
+#define PLL_CALIBRATE_EN BIT(24)
+#define PLL_CALIBRATE_MASK GENMASK(29, 27)
+#define PLL_CALIBRATE_DEFAULT FIELD_PREP(PLL_CALIBRATE_MASK, 2)
+#define PLL_UPDATE_EN BIT(30)
+
+#define PLL_HIGH_CTRL_MASK \
+ (PLL_FBDIV_MASK | PLL_REFDIV_MASK | \
+ PLL_POSTDIV1_MASK | PLL_POSTDIV2_MASK | \
+ PLL_CALIBRATE_EN | PLL_CALIBRATE_MASK | \
+ PLL_UPDATE_EN)
+
+#define PLL_HIGH_CTRL_OFFSET 4
+
+#define PLL_VCOSEL_1G6 0x2
+#define PLL_VCOSEL_2G4 0x3
+
+#define PLL_LIMIT_FOUTVCO 0
+#define PLL_LIMIT_FOUT 1
+#define PLL_LIMIT_REFDIV 2
+#define PLL_LIMIT_FBDIV 3
+#define PLL_LIMIT_POSTDIV1 4
+#define PLL_LIMIT_POSTDIV2 5
+
+#define for_each_pll_limit_range(_var, _limit) \
+ for (_var = (_limit)->min; _var <= (_limit)->max; _var++)
+
+struct sg2044_pll_limit {
+ u64 min;
+ u64 max;
+};
+
+struct sg2044_pll_internal {
+ u32 ctrl_offset;
+ u32 status_offset;
+ u32 enable_offset;
+
+ u8 status_lock_bit;
+ u8 status_updating_bit;
+ u8 enable_bit;
+
+ const struct sg2044_pll_limit *limits;
+};
+
+struct sg2044_clk_common {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ spinlock_t *lock;
+ unsigned int id;
+};
+
+struct sg2044_pll {
+ struct sg2044_clk_common common;
+ struct sg2044_pll_internal pll;
+ unsigned int syscon_offset;
+};
+
+struct sg2044_pll_desc_data {
+ struct sg2044_clk_common * const *pll;
+ u16 num_pll;
+};
+
+#define SG2044_SYSCON_PLL_OFFSET 0x98
+
+struct sg2044_pll_ctrl {
+ spinlock_t lock;
+ struct clk_hw_onecell_data data;
+};
+
+#define hw_to_sg2044_clk_common(_hw) \
+ container_of((_hw), struct sg2044_clk_common, hw)
+
+static inline bool sg2044_clk_fit_limit(u64 value,
+ const struct sg2044_pll_limit *limit)
+{
+ return value >= limit->min && value <= limit->max;
+}
+
+static inline struct sg2044_pll *hw_to_sg2044_pll(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_pll, common);
+}
+
+static unsigned long sg2044_pll_calc_vco_rate(unsigned long parent_rate,
+ unsigned long refdiv,
+ unsigned long fbdiv)
+{
+ u64 numerator = parent_rate * fbdiv;
+
+ return div64_ul(numerator, refdiv);
+}
+
+static unsigned long sg2044_pll_calc_rate(unsigned long parent_rate,
+ unsigned long refdiv,
+ unsigned long fbdiv,
+ unsigned long postdiv1,
+ unsigned long postdiv2)
+{
+ u64 numerator, denominator;
+
+ numerator = parent_rate * fbdiv;
+ denominator = refdiv * (postdiv1 + 1) * (postdiv2 + 1);
+
+ return div64_u64(numerator, denominator);
+}
+
+static unsigned long sg2044_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ u32 value;
+ int ret;
+
+ ret = regmap_read(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset + PLL_HIGH_CTRL_OFFSET,
+ &value);
+ if (ret < 0)
+ return 0;
+
+ return sg2044_pll_calc_rate(parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value),
+ FIELD_GET(PLL_POSTDIV1_MASK, value),
+ FIELD_GET(PLL_POSTDIV2_MASK, value));
+}
+
+static bool pll_is_better_rate(unsigned long target, unsigned long now,
+ unsigned long best)
+{
+ return abs_diff(target, now) < abs_diff(target, best);
+}
+
+static int sg2042_pll_compute_postdiv(const struct sg2044_pll_limit *limits,
+ unsigned long target,
+ unsigned long parent_rate,
+ unsigned int refdiv,
+ unsigned int fbdiv,
+ unsigned int *postdiv1,
+ unsigned int *postdiv2)
+{
+ unsigned int div1, div2;
+ unsigned long tmp, best_rate = 0;
+ unsigned int best_div1 = 0, best_div2 = 0;
+
+ for_each_pll_limit_range(div2, &limits[PLL_LIMIT_POSTDIV2]) {
+ for_each_pll_limit_range(div1, &limits[PLL_LIMIT_POSTDIV1]) {
+ tmp = sg2044_pll_calc_rate(parent_rate,
+ refdiv, fbdiv,
+ div1, div2);
+
+ if (tmp > target)
+ continue;
+
+ if (pll_is_better_rate(target, tmp, best_rate)) {
+ best_div1 = div1;
+ best_div2 = div2;
+ best_rate = tmp;
+
+ if (tmp == target)
+ goto find;
+ }
+ }
+ }
+
+find:
+ if (best_rate) {
+ *postdiv1 = best_div1;
+ *postdiv2 = best_div2;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int sg2044_compute_pll_setting(const struct sg2044_pll_limit *limits,
+ unsigned long req_rate,
+ unsigned long parent_rate,
+ unsigned int *value)
+{
+ unsigned int refdiv, fbdiv, postdiv1, postdiv2;
+ unsigned int best_refdiv, best_fbdiv, best_postdiv1, best_postdiv2;
+ unsigned long tmp, best_rate = 0;
+ int ret;
+
+ for_each_pll_limit_range(fbdiv, &limits[PLL_LIMIT_FBDIV]) {
+ for_each_pll_limit_range(refdiv, &limits[PLL_LIMIT_REFDIV]) {
+ u64 vco = sg2044_pll_calc_vco_rate(parent_rate,
+ refdiv, fbdiv);
+ if (!sg2044_clk_fit_limit(vco, &limits[PLL_LIMIT_FOUTVCO]))
+ continue;
+
+ ret = sg2042_pll_compute_postdiv(limits,
+ req_rate, parent_rate,
+ refdiv, fbdiv,
+ &postdiv1, &postdiv2);
+ if (ret)
+ continue;
+
+ tmp = sg2044_pll_calc_rate(parent_rate,
+ refdiv, fbdiv,
+ postdiv1, postdiv2);
+
+ if (pll_is_better_rate(req_rate, tmp, best_rate)) {
+ best_refdiv = refdiv;
+ best_fbdiv = fbdiv;
+ best_postdiv1 = postdiv1;
+ best_postdiv2 = postdiv2;
+ best_rate = tmp;
+
+ if (tmp == req_rate)
+ goto find;
+ }
+ }
+ }
+
+find:
+ if (best_rate) {
+ *value = FIELD_PREP(PLL_REFDIV_MASK, best_refdiv) |
+ FIELD_PREP(PLL_FBDIV_MASK, best_fbdiv) |
+ FIELD_PREP(PLL_POSTDIV1_MASK, best_postdiv1) |
+ FIELD_PREP(PLL_POSTDIV2_MASK, best_postdiv2);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int sg2044_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ unsigned int value;
+ u64 target;
+ int ret;
+
+ target = clamp(req->rate, pll->pll.limits[PLL_LIMIT_FOUT].min,
+ pll->pll.limits[PLL_LIMIT_FOUT].max);
+
+ ret = sg2044_compute_pll_setting(pll->pll.limits, target,
+ req->best_parent_rate, &value);
+ if (ret < 0)
+ return ret;
+
+ req->rate = sg2044_pll_calc_rate(req->best_parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value),
+ FIELD_GET(PLL_POSTDIV1_MASK, value),
+ FIELD_GET(PLL_POSTDIV2_MASK, value));
+
+ return 0;
+}
+
+static int sg2044_pll_poll_update(struct sg2044_pll *pll)
+{
+ int ret;
+ unsigned int value;
+
+ ret = regmap_read_poll_timeout_atomic(pll->common.regmap,
+ pll->syscon_offset + pll->pll.status_offset,
+ value,
+ (value & BIT(pll->pll.status_lock_bit)),
+ 1, 100000);
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout_atomic(pll->common.regmap,
+ pll->syscon_offset + pll->pll.status_offset,
+ value,
+ (!(value & BIT(pll->pll.status_updating_bit))),
+ 1, 100000);
+}
+
+static int sg2044_pll_enable(struct sg2044_pll *pll, bool en)
+{
+ if (en) {
+ if (sg2044_pll_poll_update(pll) < 0)
+ pr_warn("%s: fail to lock pll\n", clk_hw_get_name(&pll->common.hw));
+
+ return regmap_set_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.enable_offset,
+ BIT(pll->pll.enable_bit));
+ }
+
+ return regmap_clear_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.enable_offset,
+ BIT(pll->pll.enable_bit));
+}
+
+static int sg2044_pll_update_vcosel(struct sg2044_pll *pll, u64 rate)
+{
+ unsigned int sel;
+
+ if (rate < U64_C(2400000000))
+ sel = PLL_VCOSEL_1G6;
+ else
+ sel = PLL_VCOSEL_2G4;
+
+ return regmap_write_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset,
+ PLL_VCOSEL_MASK,
+ FIELD_PREP(PLL_VCOSEL_MASK, sel));
+}
+
+static int sg2044_pll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ unsigned int value;
+ u64 vco;
+ int ret;
+
+ ret = sg2044_compute_pll_setting(pll->pll.limits, rate,
+ parent_rate, &value);
+ if (ret < 0)
+ return ret;
+
+ vco = sg2044_pll_calc_vco_rate(parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value));
+
+ value |= PLL_CALIBRATE_EN;
+ value |= PLL_CALIBRATE_DEFAULT;
+ value |= PLL_UPDATE_EN;
+
+ guard(spinlock_irqsave)(pll->common.lock);
+
+ ret = sg2044_pll_enable(pll, false);
+ if (ret)
+ return ret;
+
+ sg2044_pll_update_vcosel(pll, vco);
+
+ regmap_write_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset +
+ PLL_HIGH_CTRL_OFFSET,
+ PLL_HIGH_CTRL_MASK, value);
+
+ sg2044_pll_enable(pll, true);
+
+ return ret;
+}
+
+static const struct clk_ops sg2044_pll_ops = {
+ .recalc_rate = sg2044_pll_recalc_rate,
+ .determine_rate = sg2044_pll_determine_rate,
+ .set_rate = sg2044_pll_set_rate,
+};
+
+static const struct clk_ops sg2044_pll_ro_ops = {
+ .recalc_rate = sg2044_pll_recalc_rate,
+};
+
+#define SG2044_CLK_COMMON_PDATA(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define DEFINE_SG2044_PLL(_id, _name, _parent, _flags, \
+ _ctrl_offset, \
+ _status_offset, _status_lock_bit, \
+ _status_updating_bit, \
+ _enable_offset, _enable_bit, \
+ _limits) \
+ struct sg2044_pll _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_pll_ops, \
+ (_flags)), \
+ .pll = { \
+ .ctrl_offset = (_ctrl_offset), \
+ .status_offset = (_status_offset), \
+ .enable_offset = (_enable_offset), \
+ .status_lock_bit = (_status_lock_bit), \
+ .status_updating_bit = (_status_updating_bit), \
+ .enable_bit = (_enable_bit), \
+ .limits = (_limits), \
+ }, \
+ }
+
+#define DEFINE_SG2044_PLL_RO(_id, _name, _parent, _flags, \
+ _ctrl_offset, \
+ _status_offset, _status_lock_bit, \
+ _status_updating_bit, \
+ _enable_offset, _enable_bit, \
+ _limits) \
+ struct sg2044_pll _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_pll_ro_ops, \
+ (_flags)), \
+ .pll = { \
+ .ctrl_offset = (_ctrl_offset), \
+ .status_offset = (_status_offset), \
+ .enable_offset = (_enable_offset), \
+ .status_lock_bit = (_status_lock_bit), \
+ .status_updating_bit = (_status_updating_bit), \
+ .enable_bit = (_enable_bit), \
+ .limits = (_limits), \
+ }, \
+ }
+
+static const struct clk_parent_data osc_parents[] = {
+ { .index = 0 },
+};
+
+static const struct sg2044_pll_limit pll_limits[] = {
+ [PLL_LIMIT_FOUTVCO] = {
+ .min = U64_C(1600000000),
+ .max = U64_C(3200000000),
+ },
+ [PLL_LIMIT_FOUT] = {
+ .min = U64_C(25000),
+ .max = U64_C(3200000000),
+ },
+ [PLL_LIMIT_REFDIV] = {
+ .min = U64_C(1),
+ .max = U64_C(63),
+ },
+ [PLL_LIMIT_FBDIV] = {
+ .min = U64_C(8),
+ .max = U64_C(1066),
+ },
+ [PLL_LIMIT_POSTDIV1] = {
+ .min = U64_C(0),
+ .max = U64_C(7),
+ },
+ [PLL_LIMIT_POSTDIV2] = {
+ .min = U64_C(0),
+ .max = U64_C(7),
+ },
+};
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL0, clk_fpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x58, 0x00, 22, 6,
+ 0x04, 6, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL1, clk_fpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x60, 0x00, 23, 7,
+ 0x04, 7, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL2, clk_fpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x20, 0x08, 16, 0,
+ 0x0c, 0, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL0, clk_dpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x68, 0x00, 24, 8,
+ 0x04, 8, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL1, clk_dpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x70, 0x00, 25, 9,
+ 0x04, 9, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL2, clk_dpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x78, 0x00, 26, 10,
+ 0x04, 10, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL3, clk_dpll3, osc_parents, CLK_IS_CRITICAL,
+ 0x80, 0x00, 27, 11,
+ 0x04, 11, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL4, clk_dpll4, osc_parents, CLK_IS_CRITICAL,
+ 0x88, 0x00, 28, 12,
+ 0x04, 12, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL5, clk_dpll5, osc_parents, CLK_IS_CRITICAL,
+ 0x90, 0x00, 29, 13,
+ 0x04, 13, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL6, clk_dpll6, osc_parents, CLK_IS_CRITICAL,
+ 0x98, 0x00, 30, 14,
+ 0x04, 14, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL7, clk_dpll7, osc_parents, CLK_IS_CRITICAL,
+ 0xa0, 0x00, 31, 15,
+ 0x04, 15, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL0, clk_mpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x28, 0x00, 16, 0,
+ 0x04, 0, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL1, clk_mpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x30, 0x00, 17, 1,
+ 0x04, 1, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL2, clk_mpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x38, 0x00, 18, 2,
+ 0x04, 2, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL3, clk_mpll3, osc_parents, CLK_IS_CRITICAL,
+ 0x40, 0x00, 19, 3,
+ 0x04, 3, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL4, clk_mpll4, osc_parents, CLK_IS_CRITICAL,
+ 0x48, 0x00, 20, 4,
+ 0x04, 4, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL5, clk_mpll5, osc_parents, CLK_IS_CRITICAL,
+ 0x50, 0x00, 21, 5,
+ 0x04, 5, pll_limits);
+
+static struct sg2044_clk_common * const sg2044_pll_commons[] = {
+ &clk_fpll0.common,
+ &clk_fpll1.common,
+ &clk_fpll2.common,
+ &clk_dpll0.common,
+ &clk_dpll1.common,
+ &clk_dpll2.common,
+ &clk_dpll3.common,
+ &clk_dpll4.common,
+ &clk_dpll5.common,
+ &clk_dpll6.common,
+ &clk_dpll7.common,
+ &clk_mpll0.common,
+ &clk_mpll1.common,
+ &clk_mpll2.common,
+ &clk_mpll3.common,
+ &clk_mpll4.common,
+ &clk_mpll5.common,
+};
+
+static int sg2044_pll_init_ctrl(struct device *dev, struct regmap *regmap,
+ struct sg2044_pll_ctrl *ctrl,
+ const struct sg2044_pll_desc_data *desc)
+{
+ int ret, i;
+
+ spin_lock_init(&ctrl->lock);
+
+ for (i = 0; i < desc->num_pll; i++) {
+ struct sg2044_clk_common *common = desc->pll[i];
+ struct sg2044_pll *pll = hw_to_sg2044_pll(&common->hw);
+
+ common->lock = &ctrl->lock;
+ common->regmap = regmap;
+ pll->syscon_offset = SG2044_SYSCON_PLL_OFFSET;
+
+ ret = devm_clk_hw_register(dev, &common->hw);
+ if (ret)
+ return ret;
+
+ ctrl->data.hws[common->id] = &common->hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &ctrl->data);
+}
+
+static int sg2044_pll_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sg2044_pll_ctrl *ctrl;
+ const struct sg2044_pll_desc_data *desc;
+ struct regmap *regmap;
+
+ regmap = device_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "fail to get the regmap for PLL\n");
+
+ desc = (const struct sg2044_pll_desc_data *)platform_get_device_id(pdev)->driver_data;
+ if (!desc)
+ return dev_err_probe(dev, -EINVAL, "no match data for platform\n");
+
+ ctrl = devm_kzalloc(dev, struct_size(ctrl, data.hws, desc->num_pll), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->data.num = desc->num_pll;
+
+ return sg2044_pll_init_ctrl(dev, regmap, ctrl, desc);
+}
+
+static const struct sg2044_pll_desc_data sg2044_pll_desc_data = {
+ .pll = sg2044_pll_commons,
+ .num_pll = ARRAY_SIZE(sg2044_pll_commons),
+};
+
+static const struct platform_device_id sg2044_pll_match[] = {
+ { .name = "sg2044-pll",
+ .driver_data = (unsigned long)&sg2044_pll_desc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, sg2044_pll_match);
+
+static struct platform_driver sg2044_clk_driver = {
+ .probe = sg2044_pll_probe,
+ .driver = {
+ .name = "sg2044-pll",
+ },
+ .id_table = sg2044_pll_match,
+};
+module_platform_driver(sg2044_clk_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo SG2044 pll clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sophgo/clk-sg2044.c b/drivers/clk/sophgo/clk-sg2044.c
new file mode 100644
index 000000000000..f67f99c926b6
--- /dev/null
+++ b/drivers/clk/sophgo/clk-sg2044.c
@@ -0,0 +1,1812 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2044 clock controller driver
+ *
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/sophgo,sg2044-clk.h>
+
+#define DIV_ASSERT BIT(0)
+#define DIV_FACTOR_REG_SOURCE BIT(3)
+#define DIV_BRANCH_EN BIT(4)
+
+#define DIV_ASSERT_TIME 2
+
+struct sg2044_div_internal {
+ u32 offset;
+ u32 initval;
+ u8 shift;
+ u8 width;
+ u16 flags;
+};
+
+struct sg2044_mux_internal {
+ const u32 *table;
+ u32 offset;
+ u16 shift;
+ u16 flags;
+};
+
+struct sg2044_gate_internal {
+ u32 offset;
+ u16 shift;
+ u16 flags;
+};
+
+struct sg2044_clk_common {
+ struct clk_hw hw;
+ void __iomem *base;
+ spinlock_t *lock;
+ unsigned int id;
+};
+
+struct sg2044_div {
+ struct sg2044_clk_common common;
+ struct sg2044_div_internal div;
+};
+
+struct sg2044_mux {
+ struct sg2044_clk_common common;
+ struct sg2044_mux_internal mux;
+ struct notifier_block nb;
+ u8 saved_parent;
+};
+
+struct sg2044_gate {
+ struct sg2044_clk_common common;
+ struct sg2044_gate_internal gate;
+};
+
+struct sg2044_clk_ctrl {
+ spinlock_t lock;
+ struct clk_hw_onecell_data data;
+};
+
+struct sg2044_clk_desc_data {
+ struct sg2044_clk_common * const *pll;
+ struct sg2044_clk_common * const *div;
+ struct sg2044_clk_common * const *mux;
+ struct sg2044_clk_common * const *gate;
+ u16 num_pll;
+ u16 num_div;
+ u16 num_mux;
+ u16 num_gate;
+};
+
+#define hw_to_sg2044_clk_common(_hw) \
+ container_of((_hw), struct sg2044_clk_common, hw)
+
+static inline struct sg2044_div *hw_to_sg2044_div(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_div, common);
+}
+
+static u32 sg2044_div_get_reg_div(u32 reg, struct sg2044_div_internal *div)
+{
+ if ((reg & DIV_FACTOR_REG_SOURCE))
+ return (reg >> div->shift) & clk_div_mask(div->width);
+
+ return div->initval == 0 ? 1 : div->initval;
+}
+
+static unsigned long _sg2044_div_recalc_rate(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ unsigned long parent_rate)
+{
+ u32 reg = readl(common->base + div->offset);
+ u32 val = sg2044_div_get_reg_div(reg, div);
+
+ return divider_recalc_rate(&common->hw, parent_rate, val, NULL,
+ div->flags, div->width);
+}
+
+static unsigned long sg2044_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return _sg2044_div_recalc_rate(&div->common, &div->div,
+ parent_rate);
+}
+
+static int _sg2044_div_determine_rate(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ struct clk_rate_request *req)
+{
+ if (div->flags & CLK_DIVIDER_READ_ONLY) {
+ u32 reg = readl(common->base + div->offset);
+ u32 val = sg2044_div_get_reg_div(reg, div);
+
+ return divider_ro_determine_rate(&common->hw, req, NULL,
+ div->width, div->flags,
+ val);
+ }
+
+ return divider_determine_rate(&common->hw, req, NULL,
+ div->width, div->flags);
+}
+
+static int sg2044_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return _sg2044_div_determine_rate(&div->common, &div->div, req);
+}
+
+static void sg2044_div_set_reg_div(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ u32 value)
+{
+ void __iomem *addr = common->base + div->offset;
+ u32 reg;
+
+ reg = readl(addr);
+
+ /* assert */
+ reg &= ~DIV_ASSERT;
+ writel(reg, addr);
+
+ /* set value */
+ reg = readl(addr);
+ reg &= ~(clk_div_mask(div->width) << div->shift);
+ reg |= (value << div->shift) | DIV_FACTOR_REG_SOURCE;
+ writel(reg, addr);
+
+ /* de-assert */
+ reg |= DIV_ASSERT;
+ writel(reg, addr);
+}
+
+static int sg2044_div_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ u32 value;
+
+ value = divider_get_val(rate, parent_rate, NULL,
+ div->div.width, div->div.flags);
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ sg2044_div_set_reg_div(&div->common, &div->div, value);
+
+ return 0;
+}
+
+static int sg2044_div_enable(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ void __iomem *addr = div->common.base + div->div.offset;
+ u32 value;
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ value = readl(addr);
+ value |= DIV_BRANCH_EN;
+ writel(value, addr);
+
+ return 0;
+}
+
+static void sg2044_div_disable(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ void __iomem *addr = div->common.base + div->div.offset;
+ u32 value;
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ value = readl(addr);
+ value &= ~DIV_BRANCH_EN;
+ writel(value, addr);
+}
+
+static int sg2044_div_is_enabled(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return readl(div->common.base + div->div.offset) & DIV_BRANCH_EN;
+}
+
+static const struct clk_ops sg2044_gateable_div_ops = {
+ .enable = sg2044_div_enable,
+ .disable = sg2044_div_disable,
+ .is_enabled = sg2044_div_is_enabled,
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+ .set_rate = sg2044_div_set_rate,
+};
+
+static const struct clk_ops sg2044_div_ops = {
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+ .set_rate = sg2044_div_set_rate,
+};
+
+static const struct clk_ops sg2044_div_ro_ops = {
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+};
+
+static inline struct sg2044_mux *hw_to_sg2044_mux(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_mux, common);
+}
+
+static inline struct sg2044_mux *nb_to_sg2044_mux(struct notifier_block *nb)
+{
+ return container_of(nb, struct sg2044_mux, nb);
+}
+
+static const u32 sg2044_mux_table[] = {0, 1};
+
+static int sg2044_mux_notifier_cb(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ struct sg2044_mux *mux = nb_to_sg2044_mux(nb);
+ const struct clk_ops *ops = &clk_mux_ops;
+ struct clk_notifier_data *ndata = data;
+ struct clk_hw *hw = __clk_get_hw(ndata->clk);
+ int ret = 0;
+
+ if (event == PRE_RATE_CHANGE) {
+ mux->saved_parent = ops->get_parent(hw);
+ if (mux->saved_parent)
+ ret = ops->set_parent(hw, 0);
+ } else if (event == POST_RATE_CHANGE) {
+ ret = ops->set_parent(hw, mux->saved_parent);
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static inline struct sg2044_gate *hw_to_sg2044_gate(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_gate, common);
+}
+
+#define SG2044_CLK_COMMON_PDATA(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define SG2044_CLK_COMMON_PHWS(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_HW(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define DEFINE_SG2044_GATEABLE_DIV(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_gateable_div_ops,\
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PHWS(_id, #_name, _parent, \
+ &sg2044_div_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV_PDATA(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_div_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV_RO(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_div_ro_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags) | CLK_DIVIDER_READ_ONLY,\
+ }, \
+ }
+
+#define DEFINE_SG2044_MUX(_id, _name, _parent, _flags, \
+ _mux_offset, _mux_shift, \
+ _mux_table, _mux_flags) \
+ struct sg2044_mux _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &clk_mux_ops, (_flags)),\
+ .mux = { \
+ .table = (_mux_table), \
+ .offset = (_mux_offset), \
+ .shift = (_mux_shift), \
+ .flags = (_mux_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_GATE(_id, _name, _parent, _flags, \
+ _gate_offset, _gate_shift, _gate_flags) \
+ struct sg2044_gate _name = { \
+ .common = SG2044_CLK_COMMON_PHWS(_id, #_name, _parent, \
+ &clk_gate_ops, (_flags)),\
+ .gate = { \
+ .offset = (_gate_offset), \
+ .shift = (_gate_shift), \
+ .flags = (_gate_flags), \
+ }, \
+ }
+
+static const struct clk_parent_data clk_fpll0_parent[] = {
+ { .fw_name = "fpll0" },
+};
+
+static const struct clk_parent_data clk_fpll1_parent[] = {
+ { .fw_name = "fpll1" },
+};
+
+static const struct clk_parent_data clk_fpll2_parent[] = {
+ { .fw_name = "fpll2" },
+};
+
+static const struct clk_parent_data clk_dpll0_parent[] = {
+ { .fw_name = "dpll0" },
+};
+
+static const struct clk_parent_data clk_dpll1_parent[] = {
+ { .fw_name = "dpll1" },
+};
+
+static const struct clk_parent_data clk_dpll2_parent[] = {
+ { .fw_name = "dpll2" },
+};
+
+static const struct clk_parent_data clk_dpll3_parent[] = {
+ { .fw_name = "dpll3" },
+};
+
+static const struct clk_parent_data clk_dpll4_parent[] = {
+ { .fw_name = "dpll4" },
+};
+
+static const struct clk_parent_data clk_dpll5_parent[] = {
+ { .fw_name = "dpll5" },
+};
+
+static const struct clk_parent_data clk_dpll6_parent[] = {
+ { .fw_name = "dpll6" },
+};
+
+static const struct clk_parent_data clk_dpll7_parent[] = {
+ { .fw_name = "dpll7" },
+};
+
+static const struct clk_parent_data clk_mpll0_parent[] = {
+ { .fw_name = "mpll0" },
+};
+
+static const struct clk_parent_data clk_mpll1_parent[] = {
+ { .fw_name = "mpll1" },
+};
+
+static const struct clk_parent_data clk_mpll2_parent[] = {
+ { .fw_name = "mpll2" },
+};
+
+static const struct clk_parent_data clk_mpll3_parent[] = {
+ { .fw_name = "mpll3" },
+};
+
+static const struct clk_parent_data clk_mpll4_parent[] = {
+ { .fw_name = "mpll4" },
+};
+
+static const struct clk_parent_data clk_mpll5_parent[] = {
+ { .fw_name = "mpll5" },
+};
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_AP_SYS_FIXED, clk_div_ap_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x044, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_AP_SYS_MAIN, clk_div_ap_sys_main,
+ clk_mpll0_parent, 0,
+ 0x040, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_RP_SYS_FIXED, clk_div_rp_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x050, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_RP_SYS_MAIN, clk_div_rp_sys_main,
+ clk_mpll1_parent, 0,
+ 0x04c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_TPU_SYS_FIXED, clk_div_tpu_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x058, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_TPU_SYS_MAIN, clk_div_tpu_sys_main,
+ clk_mpll2_parent, 0,
+ 0x054, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_NOC_SYS_FIXED, clk_div_noc_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x070, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_NOC_SYS_MAIN, clk_div_noc_sys_main,
+ clk_mpll3_parent, 0,
+ 0x06c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC0_FIXED, clk_div_vc_src0_fixed,
+ clk_fpll0_parent, 0,
+ 0x078, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC0_MAIN, clk_div_vc_src0_main,
+ clk_mpll4_parent, 0,
+ 0x074, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC1_FIXED, clk_div_vc_src1_fixed,
+ clk_fpll0_parent, 0,
+ 0x080, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 3);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC1_MAIN, clk_div_vc_src1_main,
+ clk_mpll5_parent, 0,
+ 0x07c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_CXP_MAC_FIXED, clk_div_cxp_mac_fixed,
+ clk_fpll0_parent, 0,
+ 0x088, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_CXP_MAC_MAIN, clk_div_cxp_mac_main,
+ clk_fpll1_parent, 0,
+ 0x084, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR0_FIXED, clk_div_ddr0_fixed,
+ clk_fpll0_parent, 0,
+ 0x124, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR0_MAIN, clk_div_ddr0_main,
+ clk_dpll0_parent, 0,
+ 0x120, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR1_FIXED, clk_div_ddr1_fixed,
+ clk_fpll0_parent, 0,
+ 0x12c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR1_MAIN, clk_div_ddr1_main,
+ clk_dpll1_parent, 0,
+ 0x128, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR2_FIXED, clk_div_ddr2_fixed,
+ clk_fpll0_parent, 0,
+ 0x134, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR2_MAIN, clk_div_ddr2_main,
+ clk_dpll2_parent, 0,
+ 0x130, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR3_FIXED, clk_div_ddr3_fixed,
+ clk_fpll0_parent, 0,
+ 0x13c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR3_MAIN, clk_div_ddr3_main,
+ clk_dpll3_parent, 0,
+ 0x138, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR4_FIXED, clk_div_ddr4_fixed,
+ clk_fpll0_parent, 0,
+ 0x144, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR4_MAIN, clk_div_ddr4_main,
+ clk_dpll4_parent, 0,
+ 0x140, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR5_FIXED, clk_div_ddr5_fixed,
+ clk_fpll0_parent, 0,
+ 0x14c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR5_MAIN, clk_div_ddr5_main,
+ clk_dpll5_parent, 0,
+ 0x148, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR6_FIXED, clk_div_ddr6_fixed,
+ clk_fpll0_parent, 0,
+ 0x154, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR6_MAIN, clk_div_ddr6_main,
+ clk_dpll6_parent, 0,
+ 0x150, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR7_FIXED, clk_div_ddr7_fixed,
+ clk_fpll0_parent, 0,
+ 0x15c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR7_MAIN, clk_div_ddr7_main,
+ clk_dpll7_parent, 0,
+ 0x158, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TOP_50M, clk_div_top_50m,
+ clk_fpll0_parent, 0,
+ 0x048, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 40);
+
+static const struct clk_hw *clk_div_top_50m_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_TOP_AXI0, clk_div_top_axi0,
+ clk_fpll0_parent, 0,
+ 0x118, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 20);
+
+static const struct clk_hw *clk_div_top_axi0_parent[] = {
+ &clk_div_top_axi0.common.hw,
+};
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TOP_AXI_HSPERI, clk_div_top_axi_hsperi,
+ clk_fpll0_parent, 0,
+ 0x11c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 8);
+
+static const struct clk_hw *clk_div_top_axi_hsperi_parent[] = {
+ &clk_div_top_axi_hsperi.common.hw,
+};
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER0, clk_div_timer0,
+ clk_div_top_50m_parent, 0,
+ 0x0d0, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER1, clk_div_timer1,
+ clk_div_top_50m_parent, 0,
+ 0x0d4, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER2, clk_div_timer2,
+ clk_div_top_50m_parent, 0,
+ 0x0d8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER3, clk_div_timer3,
+ clk_div_top_50m_parent, 0,
+ 0x0dc, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER4, clk_div_timer4,
+ clk_div_top_50m_parent, 0,
+ 0x0e0, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER5, clk_div_timer5,
+ clk_div_top_50m_parent, 0,
+ 0x0e4, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER6, clk_div_timer6,
+ clk_div_top_50m_parent, 0,
+ 0x0e8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER7, clk_div_timer7,
+ clk_div_top_50m_parent, 0,
+ 0x0ec, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_CXP_TEST_PHY, clk_div_cxp_test_phy,
+ clk_fpll0_parent, 0,
+ 0x064, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_CXP_TEST_ETH_PHY, clk_div_cxp_test_eth_phy,
+ clk_fpll2_parent, 0,
+ 0x068, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_C2C0_TEST_PHY, clk_div_c2c0_test_phy,
+ clk_fpll0_parent, 0,
+ 0x05c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_C2C1_TEST_PHY, clk_div_c2c1_test_phy,
+ clk_fpll0_parent, 0,
+ 0x060, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PCIE_1G, clk_div_pcie_1g,
+ clk_fpll1_parent, 0,
+ 0x160, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_UART_500M, clk_div_uart_500m,
+ clk_fpll0_parent, 0,
+ 0x0cc, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 4);
+
+static DEFINE_SG2044_DIV(CLK_DIV_GPIO_DB, clk_div_gpio_db,
+ clk_div_top_axi0_parent, 0,
+ 0x0f8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_SD, clk_div_sd,
+ clk_fpll0_parent, 0,
+ 0x110, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 5);
+
+static DEFINE_SG2044_DIV(CLK_DIV_SD_100K, clk_div_sd_100k,
+ clk_div_top_axi0_parent, 0,
+ 0x114, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_EMMC, clk_div_emmc,
+ clk_fpll0_parent, 0,
+ 0x108, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 5);
+
+static DEFINE_SG2044_DIV(CLK_DIV_EMMC_100K, clk_div_emmc_100k,
+ clk_div_top_axi0_parent, 0,
+ 0x10c, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_EFUSE, clk_div_efuse,
+ clk_fpll0_parent, 0,
+ 0x0f4, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 80);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TX_ETH0, clk_div_tx_eth0,
+ clk_fpll0_parent, 0,
+ 0x0fc, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 16);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PTP_REF_I_ETH0, clk_div_ptp_ref_i_eth0,
+ clk_fpll0_parent, 0,
+ 0x100, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 40);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_REF_ETH0, clk_div_ref_eth0,
+ clk_fpll0_parent, 0,
+ 0x104, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 80);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PKA, clk_div_pka,
+ clk_fpll0_parent, 0,
+ 0x0f0, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static const struct clk_parent_data clk_mux_ddr0_parents[] = {
+ { .hw = &clk_div_ddr0_fixed.common.hw },
+ { .hw = &clk_div_ddr0_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR0, clk_mux_ddr0,
+ clk_mux_ddr0_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 7, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr1_parents[] = {
+ { .hw = &clk_div_ddr1_fixed.common.hw },
+ { .hw = &clk_div_ddr1_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR1, clk_mux_ddr1,
+ clk_mux_ddr1_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 8, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr2_parents[] = {
+ { .hw = &clk_div_ddr2_fixed.common.hw },
+ { .hw = &clk_div_ddr2_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR2, clk_mux_ddr2,
+ clk_mux_ddr2_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 9, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr3_parents[] = {
+ { .hw = &clk_div_ddr3_fixed.common.hw },
+ { .hw = &clk_div_ddr3_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR3, clk_mux_ddr3,
+ clk_mux_ddr3_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 10, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr4_parents[] = {
+ { .hw = &clk_div_ddr4_fixed.common.hw },
+ { .hw = &clk_div_ddr4_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR4, clk_mux_ddr4,
+ clk_mux_ddr4_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 11, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr5_parents[] = {
+ { .hw = &clk_div_ddr5_fixed.common.hw },
+ { .hw = &clk_div_ddr5_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR5, clk_mux_ddr5,
+ clk_mux_ddr5_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 12, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr6_parents[] = {
+ { .hw = &clk_div_ddr6_fixed.common.hw },
+ { .hw = &clk_div_ddr6_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR6, clk_mux_ddr6,
+ clk_mux_ddr6_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 13, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr7_parents[] = {
+ { .hw = &clk_div_ddr7_fixed.common.hw },
+ { .hw = &clk_div_ddr7_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR7, clk_mux_ddr7,
+ clk_mux_ddr7_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 14, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_noc_sys_parents[] = {
+ { .hw = &clk_div_noc_sys_fixed.common.hw },
+ { .hw = &clk_div_noc_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_NOC_SYS, clk_mux_noc_sys,
+ clk_mux_noc_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 3, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_tpu_sys_parents[] = {
+ { .hw = &clk_div_tpu_sys_fixed.common.hw },
+ { .hw = &clk_div_tpu_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_TPU_SYS, clk_mux_tpu_sys,
+ clk_mux_tpu_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 2, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_rp_sys_parents[] = {
+ { .hw = &clk_div_rp_sys_fixed.common.hw },
+ { .hw = &clk_div_rp_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_RP_SYS, clk_mux_rp_sys,
+ clk_mux_rp_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 1, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_ap_sys_parents[] = {
+ { .hw = &clk_div_ap_sys_fixed.common.hw },
+ { .hw = &clk_div_ap_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_AP_SYS, clk_mux_ap_sys,
+ clk_mux_ap_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 0, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_vc_src0_parents[] = {
+ { .hw = &clk_div_vc_src0_fixed.common.hw },
+ { .hw = &clk_div_vc_src0_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_VC_SRC0, clk_mux_vc_src0,
+ clk_mux_vc_src0_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 4, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_vc_src1_parents[] = {
+ { .hw = &clk_div_vc_src1_fixed.common.hw },
+ { .hw = &clk_div_vc_src1_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_VC_SRC1, clk_mux_vc_src1,
+ clk_mux_vc_src1_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 5, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_cxp_mac_parents[] = {
+ { .hw = &clk_div_cxp_mac_fixed.common.hw },
+ { .hw = &clk_div_cxp_mac_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_CXP_MAC, clk_mux_cxp_mac,
+ clk_mux_cxp_mac_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 6, sg2044_mux_table, 0);
+
+static const struct clk_hw *clk_gate_ap_sys_parent[] = {
+ &clk_mux_ap_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_AP_SYS, clk_gate_ap_sys,
+ clk_gate_ap_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 0, 0);
+
+static const struct clk_hw *clk_gate_rp_sys_parent[] = {
+ &clk_mux_rp_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_RP_SYS, clk_gate_rp_sys,
+ clk_gate_rp_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 2, 0);
+
+static const struct clk_hw *clk_gate_tpu_sys_parent[] = {
+ &clk_mux_tpu_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TPU_SYS, clk_gate_tpu_sys,
+ clk_gate_tpu_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 3, 0);
+
+static const struct clk_hw *clk_gate_noc_sys_parent[] = {
+ &clk_mux_noc_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_NOC_SYS, clk_gate_noc_sys,
+ clk_gate_noc_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 8, 0);
+
+static const struct clk_hw *clk_gate_vc_src0_parent[] = {
+ &clk_mux_vc_src0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_VC_SRC0, clk_gate_vc_src0,
+ clk_gate_vc_src0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 9, 0);
+
+static const struct clk_hw *clk_gate_vc_src1_parent[] = {
+ &clk_mux_vc_src1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_VC_SRC1, clk_gate_vc_src1,
+ clk_gate_vc_src1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 10, 0);
+
+static const struct clk_hw *clk_gate_ddr0_parent[] = {
+ &clk_mux_ddr0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR0, clk_gate_ddr0,
+ clk_gate_ddr0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 7, 0);
+
+static const struct clk_hw *clk_gate_ddr1_parent[] = {
+ &clk_mux_ddr1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR1, clk_gate_ddr1,
+ clk_gate_ddr1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 8, 0);
+
+static const struct clk_hw *clk_gate_ddr2_parent[] = {
+ &clk_mux_ddr2.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR2, clk_gate_ddr2,
+ clk_gate_ddr2_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 9, 0);
+
+static const struct clk_hw *clk_gate_ddr3_parent[] = {
+ &clk_mux_ddr3.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR3, clk_gate_ddr3,
+ clk_gate_ddr3_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 10, 0);
+
+static const struct clk_hw *clk_gate_ddr4_parent[] = {
+ &clk_mux_ddr4.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR4, clk_gate_ddr4,
+ clk_gate_ddr4_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 11, 0);
+
+static const struct clk_hw *clk_gate_ddr5_parent[] = {
+ &clk_mux_ddr5.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR5, clk_gate_ddr5,
+ clk_gate_ddr5_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 12, 0);
+
+static const struct clk_hw *clk_gate_ddr6_parent[] = {
+ &clk_mux_ddr6.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR6, clk_gate_ddr6,
+ clk_gate_ddr6_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 13, 0);
+
+static const struct clk_hw *clk_gate_ddr7_parent[] = {
+ &clk_mux_ddr7.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR7, clk_gate_ddr7,
+ clk_gate_ddr7_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 14, 0);
+
+static const struct clk_hw *clk_gate_top_50m_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_50M, clk_gate_top_50m,
+ clk_gate_top_50m_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 1, 0);
+
+static const struct clk_hw *clk_gate_sc_rx_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SC_RX, clk_gate_sc_rx,
+ clk_gate_sc_rx_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 12, 0);
+
+static const struct clk_hw *clk_gate_sc_rx_x0y1_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SC_RX_X0Y1, clk_gate_sc_rx_x0y1,
+ clk_gate_sc_rx_x0y1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 13, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_AXI0, clk_gate_top_axi0,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 5, 0);
+
+static const struct clk_hw *clk_gate_mailbox_intc_parent[] = {
+ &clk_gate_top_axi0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC0, clk_gate_intc0,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 20, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC1, clk_gate_intc1,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 21, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC2, clk_gate_intc2,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 22, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC3, clk_gate_intc3,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 23, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX0, clk_gate_mailbox0,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 16, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX1, clk_gate_mailbox1,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 17, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX2, clk_gate_mailbox2,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 18, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX3, clk_gate_mailbox3,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 19, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_AXI_HSPERI, clk_gate_top_axi_hsperi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 6, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_TIMER, clk_gate_apb_timer,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 7, 0);
+
+static const struct clk_hw *clk_gate_timer0_parent[] = {
+ &clk_div_timer0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER0, clk_gate_timer0,
+ clk_gate_timer0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 8, 0);
+
+static const struct clk_hw *clk_gate_timer1_parent[] = {
+ &clk_div_timer1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER1, clk_gate_timer1,
+ clk_gate_timer1_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 9, 0);
+
+static const struct clk_hw *clk_gate_timer2_parent[] = {
+ &clk_div_timer2.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER2, clk_gate_timer2,
+ clk_gate_timer2_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 10, 0);
+
+static const struct clk_hw *clk_gate_timer3_parent[] = {
+ &clk_div_timer3.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER3, clk_gate_timer3,
+ clk_gate_timer3_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 11, 0);
+
+static const struct clk_hw *clk_gate_timer4_parent[] = {
+ &clk_div_timer4.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER4, clk_gate_timer4,
+ clk_gate_timer4_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 12, 0);
+
+static const struct clk_hw *clk_gate_timer5_parent[] = {
+ &clk_div_timer5.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER5, clk_gate_timer5,
+ clk_gate_timer5_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 13, 0);
+
+static const struct clk_hw *clk_gate_timer6_parent[] = {
+ &clk_div_timer6.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER6, clk_gate_timer6,
+ clk_gate_timer6_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 14, 0);
+
+static const struct clk_hw *clk_gate_timer7_parent[] = {
+ &clk_div_timer7.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER7, clk_gate_timer7,
+ clk_gate_timer7_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 15, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_CFG, clk_gate_cxp_cfg,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 15, 0);
+
+static const struct clk_hw *clk_gate_cxp_mac_parent[] = {
+ &clk_mux_cxp_mac.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_MAC, clk_gate_cxp_mac,
+ clk_gate_cxp_mac_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 14, 0);
+
+static const struct clk_hw *clk_gate_cxp_test_phy_parent[] = {
+ &clk_div_cxp_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_TEST_PHY, clk_gate_cxp_test_phy,
+ clk_gate_cxp_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 6, 0);
+
+static const struct clk_hw *clk_gate_cxp_test_eth_phy_parent[] = {
+ &clk_div_cxp_test_eth_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_TEST_ETH_PHY, clk_gate_cxp_test_eth_phy,
+ clk_gate_cxp_test_eth_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 7, 0);
+
+static const struct clk_hw *clk_gate_pcie_1g_parent[] = {
+ &clk_div_pcie_1g.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PCIE_1G, clk_gate_pcie_1g,
+ clk_gate_pcie_1g_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 15, 0);
+
+static const struct clk_hw *clk_gate_c2c0_test_phy_parent[] = {
+ &clk_div_c2c0_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_C2C0_TEST_PHY, clk_gate_c2c0_test_phy,
+ clk_gate_c2c0_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 4, 0);
+
+static const struct clk_hw *clk_gate_c2c1_test_phy_parent[] = {
+ &clk_div_c2c1_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_C2C1_TEST_PHY, clk_gate_c2c1_test_phy,
+ clk_gate_c2c1_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 5, 0);
+
+static const struct clk_hw *clk_gate_uart_500m_parent[] = {
+ &clk_div_uart_500m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_UART_500M, clk_gate_uart_500m,
+ clk_gate_uart_500m_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 1, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_UART, clk_gate_apb_uart,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 2, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_SPI, clk_gate_apb_spi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 22, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AHB_SPIFMC, clk_gate_ahb_spifmc,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 5, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_I2C, clk_gate_apb_i2c,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 23, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_DBG_I2C, clk_gate_axi_dbg_i2c,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 3, 0);
+
+static const struct clk_hw *clk_gate_gpio_db_parent[] = {
+ &clk_div_gpio_db.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_GPIO_DB, clk_gate_gpio_db,
+ clk_gate_gpio_db_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 21, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_GPIO_INTR, clk_gate_apb_gpio_intr,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 20, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_GPIO, clk_gate_apb_gpio,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 19, 0);
+
+static const struct clk_hw *clk_gate_sd_parent[] = {
+ &clk_div_sd.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SD, clk_gate_sd,
+ clk_gate_sd_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 3, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_SD, clk_gate_axi_sd,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 2, 0);
+
+static const struct clk_hw *clk_gate_sd_100k_parent[] = {
+ &clk_div_sd_100k.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SD_100K, clk_gate_sd_100k,
+ clk_gate_sd_100k_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 4, 0);
+
+static const struct clk_hw *clk_gate_emmc_parent[] = {
+ &clk_div_emmc.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EMMC, clk_gate_emmc,
+ clk_gate_emmc_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 0, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_EMMC, clk_gate_axi_emmc,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 31, 0);
+
+static const struct clk_hw *clk_gate_emmc_100k_parent[] = {
+ &clk_div_emmc_100k.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EMMC_100K, clk_gate_emmc_100k,
+ clk_gate_emmc_100k_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 1, 0);
+
+static const struct clk_hw *clk_gate_efuse_parent[] = {
+ &clk_div_efuse.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EFUSE, clk_gate_efuse,
+ clk_gate_efuse_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 17, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_EFUSE, clk_gate_apb_efuse,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 18, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_SYSDMA_AXI, clk_gate_sysdma_axi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 0, 0);
+
+static const struct clk_hw *clk_gate_tx_eth0_parent[] = {
+ &clk_div_tx_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TX_ETH0, clk_gate_tx_eth0,
+ clk_gate_tx_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 27, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_ETH0, clk_gate_axi_eth0,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 28, 0);
+
+static const struct clk_hw *clk_gate_ptp_ref_i_eth0_parent[] = {
+ &clk_div_ptp_ref_i_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PTP_REF_I_ETH0, clk_gate_ptp_ref_i_eth0,
+ clk_gate_ptp_ref_i_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 29, 0);
+
+static const struct clk_hw *clk_gate_ref_eth0_parent[] = {
+ &clk_div_ref_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_REF_ETH0, clk_gate_ref_eth0,
+ clk_gate_ref_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 30, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_RTC, clk_gate_apb_rtc,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 26, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_PWM, clk_gate_apb_pwm,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 25, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_WDT, clk_gate_apb_wdt,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 24, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_SRAM, clk_gate_axi_sram,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 6, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AHB_ROM, clk_gate_ahb_rom,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 4, 0);
+
+static const struct clk_hw *clk_gate_pka_parent[] = {
+ &clk_div_pka.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PKA, clk_gate_pka,
+ clk_gate_pka_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 16, 0);
+
+static struct sg2044_clk_common * const sg2044_div_commons[] = {
+ &clk_div_ap_sys_fixed.common,
+ &clk_div_ap_sys_main.common,
+ &clk_div_rp_sys_fixed.common,
+ &clk_div_rp_sys_main.common,
+ &clk_div_tpu_sys_fixed.common,
+ &clk_div_tpu_sys_main.common,
+ &clk_div_noc_sys_fixed.common,
+ &clk_div_noc_sys_main.common,
+ &clk_div_vc_src0_fixed.common,
+ &clk_div_vc_src0_main.common,
+ &clk_div_vc_src1_fixed.common,
+ &clk_div_vc_src1_main.common,
+ &clk_div_cxp_mac_fixed.common,
+ &clk_div_cxp_mac_main.common,
+ &clk_div_ddr0_fixed.common,
+ &clk_div_ddr0_main.common,
+ &clk_div_ddr1_fixed.common,
+ &clk_div_ddr1_main.common,
+ &clk_div_ddr2_fixed.common,
+ &clk_div_ddr2_main.common,
+ &clk_div_ddr3_fixed.common,
+ &clk_div_ddr3_main.common,
+ &clk_div_ddr4_fixed.common,
+ &clk_div_ddr4_main.common,
+ &clk_div_ddr5_fixed.common,
+ &clk_div_ddr5_main.common,
+ &clk_div_ddr6_fixed.common,
+ &clk_div_ddr6_main.common,
+ &clk_div_ddr7_fixed.common,
+ &clk_div_ddr7_main.common,
+ &clk_div_top_50m.common,
+ &clk_div_top_axi0.common,
+ &clk_div_top_axi_hsperi.common,
+ &clk_div_timer0.common,
+ &clk_div_timer1.common,
+ &clk_div_timer2.common,
+ &clk_div_timer3.common,
+ &clk_div_timer4.common,
+ &clk_div_timer5.common,
+ &clk_div_timer6.common,
+ &clk_div_timer7.common,
+ &clk_div_cxp_test_phy.common,
+ &clk_div_cxp_test_eth_phy.common,
+ &clk_div_c2c0_test_phy.common,
+ &clk_div_c2c1_test_phy.common,
+ &clk_div_pcie_1g.common,
+ &clk_div_uart_500m.common,
+ &clk_div_gpio_db.common,
+ &clk_div_sd.common,
+ &clk_div_sd_100k.common,
+ &clk_div_emmc.common,
+ &clk_div_emmc_100k.common,
+ &clk_div_efuse.common,
+ &clk_div_tx_eth0.common,
+ &clk_div_ptp_ref_i_eth0.common,
+ &clk_div_ref_eth0.common,
+ &clk_div_pka.common,
+};
+
+static struct sg2044_clk_common * const sg2044_mux_commons[] = {
+ &clk_mux_ddr0.common,
+ &clk_mux_ddr1.common,
+ &clk_mux_ddr2.common,
+ &clk_mux_ddr3.common,
+ &clk_mux_ddr4.common,
+ &clk_mux_ddr5.common,
+ &clk_mux_ddr6.common,
+ &clk_mux_ddr7.common,
+ &clk_mux_noc_sys.common,
+ &clk_mux_tpu_sys.common,
+ &clk_mux_rp_sys.common,
+ &clk_mux_ap_sys.common,
+ &clk_mux_vc_src0.common,
+ &clk_mux_vc_src1.common,
+ &clk_mux_cxp_mac.common,
+};
+
+static struct sg2044_clk_common * const sg2044_gate_commons[] = {
+ &clk_gate_ap_sys.common,
+ &clk_gate_rp_sys.common,
+ &clk_gate_tpu_sys.common,
+ &clk_gate_noc_sys.common,
+ &clk_gate_vc_src0.common,
+ &clk_gate_vc_src1.common,
+ &clk_gate_ddr0.common,
+ &clk_gate_ddr1.common,
+ &clk_gate_ddr2.common,
+ &clk_gate_ddr3.common,
+ &clk_gate_ddr4.common,
+ &clk_gate_ddr5.common,
+ &clk_gate_ddr6.common,
+ &clk_gate_ddr7.common,
+ &clk_gate_top_50m.common,
+ &clk_gate_sc_rx.common,
+ &clk_gate_sc_rx_x0y1.common,
+ &clk_gate_top_axi0.common,
+ &clk_gate_intc0.common,
+ &clk_gate_intc1.common,
+ &clk_gate_intc2.common,
+ &clk_gate_intc3.common,
+ &clk_gate_mailbox0.common,
+ &clk_gate_mailbox1.common,
+ &clk_gate_mailbox2.common,
+ &clk_gate_mailbox3.common,
+ &clk_gate_top_axi_hsperi.common,
+ &clk_gate_apb_timer.common,
+ &clk_gate_timer0.common,
+ &clk_gate_timer1.common,
+ &clk_gate_timer2.common,
+ &clk_gate_timer3.common,
+ &clk_gate_timer4.common,
+ &clk_gate_timer5.common,
+ &clk_gate_timer6.common,
+ &clk_gate_timer7.common,
+ &clk_gate_cxp_cfg.common,
+ &clk_gate_cxp_mac.common,
+ &clk_gate_cxp_test_phy.common,
+ &clk_gate_cxp_test_eth_phy.common,
+ &clk_gate_pcie_1g.common,
+ &clk_gate_c2c0_test_phy.common,
+ &clk_gate_c2c1_test_phy.common,
+ &clk_gate_uart_500m.common,
+ &clk_gate_apb_uart.common,
+ &clk_gate_apb_spi.common,
+ &clk_gate_ahb_spifmc.common,
+ &clk_gate_apb_i2c.common,
+ &clk_gate_axi_dbg_i2c.common,
+ &clk_gate_gpio_db.common,
+ &clk_gate_apb_gpio_intr.common,
+ &clk_gate_apb_gpio.common,
+ &clk_gate_sd.common,
+ &clk_gate_axi_sd.common,
+ &clk_gate_sd_100k.common,
+ &clk_gate_emmc.common,
+ &clk_gate_axi_emmc.common,
+ &clk_gate_emmc_100k.common,
+ &clk_gate_efuse.common,
+ &clk_gate_apb_efuse.common,
+ &clk_gate_sysdma_axi.common,
+ &clk_gate_tx_eth0.common,
+ &clk_gate_axi_eth0.common,
+ &clk_gate_ptp_ref_i_eth0.common,
+ &clk_gate_ref_eth0.common,
+ &clk_gate_apb_rtc.common,
+ &clk_gate_apb_pwm.common,
+ &clk_gate_apb_wdt.common,
+ &clk_gate_axi_sram.common,
+ &clk_gate_ahb_rom.common,
+ &clk_gate_pka.common,
+};
+
+static void sg2044_clk_fix_init_parent(struct clk_hw **pdata,
+ const struct clk_init_data *init,
+ struct clk_hw_onecell_data *data)
+{
+ u8 i;
+ const struct clk_hw *hw;
+ const struct sg2044_clk_common *common;
+
+ for (i = 0; i < init->num_parents; i++) {
+ hw = init->parent_hws[i];
+ common = hw_to_sg2044_clk_common(hw);
+
+ WARN(!data->hws[common->id], "clk %u is not register\n",
+ common->id);
+ pdata[i] = data->hws[common->id];
+ }
+}
+
+static int sg2044_clk_init_ctrl(struct device *dev, void __iomem *reg,
+ struct sg2044_clk_ctrl *ctrl,
+ const struct sg2044_clk_desc_data *desc)
+{
+ int ret, i;
+ struct clk_hw *hw;
+
+ spin_lock_init(&ctrl->lock);
+
+ for (i = 0; i < desc->num_div; i++) {
+ struct sg2044_clk_common *common = desc->div[i];
+
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ ret = devm_clk_hw_register(dev, &common->hw);
+ if (ret)
+ return ret;
+
+ ctrl->data.hws[common->id] = &common->hw;
+ }
+
+ for (i = 0; i < desc->num_mux; i++) {
+ struct sg2044_clk_common *common = desc->mux[i];
+ struct sg2044_mux *mux = hw_to_sg2044_mux(&common->hw);
+ const struct clk_init_data *init = common->hw.init;
+
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ hw = devm_clk_hw_register_mux_parent_data_table(dev,
+ init->name,
+ init->parent_data,
+ init->num_parents,
+ init->flags,
+ reg + mux->mux.offset,
+ mux->mux.shift,
+ 1,
+ mux->mux.flags,
+ mux->mux.table,
+ &ctrl->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ if (!(mux->mux.flags & CLK_MUX_READ_ONLY)) {
+ mux->nb.notifier_call = sg2044_mux_notifier_cb;
+ ret = devm_clk_notifier_register(dev, hw->clk,
+ &mux->nb);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "%s: failed to register notifier\n",
+ clk_hw_get_name(hw));
+ }
+
+ ctrl->data.hws[common->id] = hw;
+ }
+
+ for (i = 0; i < desc->num_gate; i++) {
+ struct sg2044_clk_common *common = desc->gate[i];
+ struct sg2044_gate *gate = hw_to_sg2044_gate(&common->hw);
+ const struct clk_init_data *init = common->hw.init;
+ struct clk_hw *parent_hws[1] = { };
+
+ sg2044_clk_fix_init_parent(parent_hws, init, &ctrl->data);
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ hw = devm_clk_hw_register_gate_parent_hw(dev, init->name,
+ parent_hws[0],
+ init->flags,
+ reg + gate->gate.offset,
+ gate->gate.shift,
+ gate->gate.flags,
+ &ctrl->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ ctrl->data.hws[common->id] = hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &ctrl->data);
+}
+
+static int sg2044_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sg2044_clk_ctrl *ctrl;
+ const struct sg2044_clk_desc_data *desc;
+ void __iomem *reg;
+ u32 num_clks;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ desc = device_get_match_data(dev);
+ if (!desc)
+ return dev_err_probe(dev, -EINVAL, "no match data for platform\n");
+
+ num_clks = desc->num_div + desc->num_gate + desc->num_mux;
+
+ ctrl = devm_kzalloc(dev, struct_size(ctrl, data.hws, num_clks), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->data.num = num_clks;
+
+ return sg2044_clk_init_ctrl(dev, reg, ctrl, desc);
+}
+
+static const struct sg2044_clk_desc_data sg2044_clk_desc_data = {
+ .div = sg2044_div_commons,
+ .mux = sg2044_mux_commons,
+ .gate = sg2044_gate_commons,
+ .num_div = ARRAY_SIZE(sg2044_div_commons),
+ .num_mux = ARRAY_SIZE(sg2044_mux_commons),
+ .num_gate = ARRAY_SIZE(sg2044_gate_commons),
+};
+
+static const struct of_device_id sg2044_clk_match[] = {
+ { .compatible = "sophgo,sg2044-clk", .data = &sg2044_clk_desc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sg2044_clk_match);
+
+static struct platform_driver sg2044_clk_driver = {
+ .probe = sg2044_clk_probe,
+ .driver = {
+ .name = "sg2044-clk",
+ .of_match_table = sg2044_clk_match,
+ },
+};
+module_platform_driver(sg2044_clk_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo SG2044 clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/spacemit/Kconfig b/drivers/clk/spacemit/Kconfig
new file mode 100644
index 000000000000..4c4df845b3cb
--- /dev/null
+++ b/drivers/clk/spacemit/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config SPACEMIT_CCU
+ tristate "Clock support for SpacemiT SoCs"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Say Y to enable clock controller unit support for SpacemiT SoCs.
+
+if SPACEMIT_CCU
+
+config SPACEMIT_K1_CCU
+ tristate "Support for SpacemiT K1 SoC"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ help
+ Support for clock controller unit in SpacemiT K1 SoC.
+
+endif
diff --git a/drivers/clk/spacemit/Makefile b/drivers/clk/spacemit/Makefile
new file mode 100644
index 000000000000..5ec6da61db98
--- /dev/null
+++ b/drivers/clk/spacemit/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SPACEMIT_K1_CCU) = spacemit-ccu-k1.o
+spacemit-ccu-k1-y = ccu_pll.o ccu_mix.o ccu_ddn.o
+spacemit-ccu-k1-y += ccu-k1.o
diff --git a/drivers/clk/spacemit/ccu-k1.c b/drivers/clk/spacemit/ccu-k1.c
new file mode 100644
index 000000000000..cdde37a05235
--- /dev/null
+++ b/drivers/clk/spacemit/ccu-k1.c
@@ -0,0 +1,1164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#include <linux/array_size.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_pll.h"
+#include "ccu_mix.h"
+#include "ccu_ddn.h"
+
+#include <dt-bindings/clock/spacemit,k1-syscon.h>
+
+/* APBS register offset */
+#define APBS_PLL1_SWCR1 0x100
+#define APBS_PLL1_SWCR2 0x104
+#define APBS_PLL1_SWCR3 0x108
+#define APBS_PLL2_SWCR1 0x118
+#define APBS_PLL2_SWCR2 0x11c
+#define APBS_PLL2_SWCR3 0x120
+#define APBS_PLL3_SWCR1 0x124
+#define APBS_PLL3_SWCR2 0x128
+#define APBS_PLL3_SWCR3 0x12c
+
+/* MPMU register offset */
+#define MPMU_POSR 0x0010
+#define POSR_PLL1_LOCK BIT(27)
+#define POSR_PLL2_LOCK BIT(28)
+#define POSR_PLL3_LOCK BIT(29)
+#define MPMU_SUCCR 0x0014
+#define MPMU_ISCCR 0x0044
+#define MPMU_WDTPCR 0x0200
+#define MPMU_RIPCCR 0x0210
+#define MPMU_ACGR 0x1024
+#define MPMU_APBCSCR 0x1050
+#define MPMU_SUCCR_1 0x10b0
+
+/* APBC register offset */
+#define APBC_UART1_CLK_RST 0x00
+#define APBC_UART2_CLK_RST 0x04
+#define APBC_GPIO_CLK_RST 0x08
+#define APBC_PWM0_CLK_RST 0x0c
+#define APBC_PWM1_CLK_RST 0x10
+#define APBC_PWM2_CLK_RST 0x14
+#define APBC_PWM3_CLK_RST 0x18
+#define APBC_TWSI8_CLK_RST 0x20
+#define APBC_UART3_CLK_RST 0x24
+#define APBC_RTC_CLK_RST 0x28
+#define APBC_TWSI0_CLK_RST 0x2c
+#define APBC_TWSI1_CLK_RST 0x30
+#define APBC_TIMERS1_CLK_RST 0x34
+#define APBC_TWSI2_CLK_RST 0x38
+#define APBC_AIB_CLK_RST 0x3c
+#define APBC_TWSI4_CLK_RST 0x40
+#define APBC_TIMERS2_CLK_RST 0x44
+#define APBC_ONEWIRE_CLK_RST 0x48
+#define APBC_TWSI5_CLK_RST 0x4c
+#define APBC_DRO_CLK_RST 0x58
+#define APBC_IR_CLK_RST 0x5c
+#define APBC_TWSI6_CLK_RST 0x60
+#define APBC_COUNTER_CLK_SEL 0x64
+#define APBC_TWSI7_CLK_RST 0x68
+#define APBC_TSEN_CLK_RST 0x6c
+#define APBC_UART4_CLK_RST 0x70
+#define APBC_UART5_CLK_RST 0x74
+#define APBC_UART6_CLK_RST 0x78
+#define APBC_SSP3_CLK_RST 0x7c
+#define APBC_SSPA0_CLK_RST 0x80
+#define APBC_SSPA1_CLK_RST 0x84
+#define APBC_IPC_AP2AUD_CLK_RST 0x90
+#define APBC_UART7_CLK_RST 0x94
+#define APBC_UART8_CLK_RST 0x98
+#define APBC_UART9_CLK_RST 0x9c
+#define APBC_CAN0_CLK_RST 0xa0
+#define APBC_PWM4_CLK_RST 0xa8
+#define APBC_PWM5_CLK_RST 0xac
+#define APBC_PWM6_CLK_RST 0xb0
+#define APBC_PWM7_CLK_RST 0xb4
+#define APBC_PWM8_CLK_RST 0xb8
+#define APBC_PWM9_CLK_RST 0xbc
+#define APBC_PWM10_CLK_RST 0xc0
+#define APBC_PWM11_CLK_RST 0xc4
+#define APBC_PWM12_CLK_RST 0xc8
+#define APBC_PWM13_CLK_RST 0xcc
+#define APBC_PWM14_CLK_RST 0xd0
+#define APBC_PWM15_CLK_RST 0xd4
+#define APBC_PWM16_CLK_RST 0xd8
+#define APBC_PWM17_CLK_RST 0xdc
+#define APBC_PWM18_CLK_RST 0xe0
+#define APBC_PWM19_CLK_RST 0xe4
+
+/* APMU register offset */
+#define APMU_JPG_CLK_RES_CTRL 0x020
+#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x024
+#define APMU_ISP_CLK_RES_CTRL 0x038
+#define APMU_LCD_CLK_RES_CTRL1 0x044
+#define APMU_LCD_SPI_CLK_RES_CTRL 0x048
+#define APMU_LCD_CLK_RES_CTRL2 0x04c
+#define APMU_CCIC_CLK_RES_CTRL 0x050
+#define APMU_SDH0_CLK_RES_CTRL 0x054
+#define APMU_SDH1_CLK_RES_CTRL 0x058
+#define APMU_USB_CLK_RES_CTRL 0x05c
+#define APMU_QSPI_CLK_RES_CTRL 0x060
+#define APMU_DMA_CLK_RES_CTRL 0x064
+#define APMU_AES_CLK_RES_CTRL 0x068
+#define APMU_VPU_CLK_RES_CTRL 0x0a4
+#define APMU_GPU_CLK_RES_CTRL 0x0cc
+#define APMU_SDH2_CLK_RES_CTRL 0x0e0
+#define APMU_PMUA_MC_CTRL 0x0e8
+#define APMU_PMU_CC2_AP 0x100
+#define APMU_PMUA_EM_CLK_RES_CTRL 0x104
+#define APMU_AUDIO_CLK_RES_CTRL 0x14c
+#define APMU_HDMI_CLK_RES_CTRL 0x1b8
+#define APMU_CCI550_CLK_CTRL 0x300
+#define APMU_ACLK_CLK_CTRL 0x388
+#define APMU_CPU_C0_CLK_CTRL 0x38C
+#define APMU_CPU_C1_CLK_CTRL 0x390
+#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc
+#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4
+#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc
+#define APMU_EMAC0_CLK_RES_CTRL 0x3e4
+#define APMU_EMAC1_CLK_RES_CTRL 0x3ec
+
+struct spacemit_ccu_data {
+ struct clk_hw **hws;
+ size_t num;
+};
+
+/* APBS clocks start, APBS region contains and only contains all PLL clocks */
+
+/*
+ * PLL{1,2} must run at fixed frequencies to provide clocks in correct rates for
+ * peripherals.
+ */
+static const struct ccu_pll_rate_tbl pll1_rate_tbl[] = {
+ CCU_PLL_RATE(2457600000UL, 0x0050dd64, 0x330ccccd),
+};
+
+static const struct ccu_pll_rate_tbl pll2_rate_tbl[] = {
+ CCU_PLL_RATE(3000000000UL, 0x0050dd66, 0x3fe00000),
+};
+
+static const struct ccu_pll_rate_tbl pll3_rate_tbl[] = {
+ CCU_PLL_RATE(1600000000UL, 0x0050cd61, 0x43eaaaab),
+ CCU_PLL_RATE(1800000000UL, 0x0050cd61, 0x4b000000),
+ CCU_PLL_RATE(2000000000UL, 0x0050dd62, 0x2aeaaaab),
+ CCU_PLL_RATE(2457600000UL, 0x0050dd64, 0x330ccccd),
+ CCU_PLL_RATE(3000000000UL, 0x0050dd66, 0x3fe00000),
+ CCU_PLL_RATE(3200000000UL, 0x0050dd67, 0x43eaaaab),
+};
+
+CCU_PLL_DEFINE(pll1, pll1_rate_tbl, APBS_PLL1_SWCR1, APBS_PLL1_SWCR3, MPMU_POSR, POSR_PLL1_LOCK,
+ CLK_SET_RATE_GATE);
+CCU_PLL_DEFINE(pll2, pll2_rate_tbl, APBS_PLL2_SWCR1, APBS_PLL2_SWCR3, MPMU_POSR, POSR_PLL2_LOCK,
+ CLK_SET_RATE_GATE);
+CCU_PLL_DEFINE(pll3, pll3_rate_tbl, APBS_PLL3_SWCR1, APBS_PLL3_SWCR3, MPMU_POSR, POSR_PLL3_LOCK,
+ CLK_SET_RATE_GATE);
+
+CCU_FACTOR_GATE_DEFINE(pll1_d2, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d3, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d6, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(7), 8, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d11_223p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(15), 11, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d13_189, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(16), 13, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d23_106p8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(20), 23, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d64_38p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(0), 64, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_aud_245p7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(10), 10, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_aud_24p5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(11), 100, 1);
+
+CCU_FACTOR_GATE_DEFINE(pll2_d1, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(0), 1, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d2, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d3, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d4, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d5, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d6, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d7, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d8, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(7), 8, 1);
+
+CCU_FACTOR_GATE_DEFINE(pll3_d1, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(0), 1, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d2, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d3, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d4, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d5, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d6, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d7, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d8, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(7), 8, 1);
+
+CCU_FACTOR_DEFINE(pll3_20, CCU_PARENT_HW(pll3_d8), 20, 1);
+CCU_FACTOR_DEFINE(pll3_40, CCU_PARENT_HW(pll3_d8), 10, 1);
+CCU_FACTOR_DEFINE(pll3_80, CCU_PARENT_HW(pll3_d8), 5, 1);
+
+/* APBS clocks end */
+
+/* MPMU clocks start */
+CCU_GATE_DEFINE(pll1_d8_307p2, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(13), 0);
+
+CCU_FACTOR_DEFINE(pll1_d32_76p8, CCU_PARENT_HW(pll1_d8_307p2), 4, 1);
+
+CCU_FACTOR_DEFINE(pll1_d40_61p44, CCU_PARENT_HW(pll1_d8_307p2), 5, 1);
+
+CCU_FACTOR_DEFINE(pll1_d16_153p6, CCU_PARENT_HW(pll1_d8), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d24_102p4, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(12), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d48_51p2, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(7), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d48_51p2_ap, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(11), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_m3d128_57p6, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(8), 16, 3);
+CCU_FACTOR_GATE_DEFINE(pll1_d96_25p6, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(4), 12, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d192_12p8, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(3), 24, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d192_12p8_wdt, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(19), 24, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d384_6p4, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(2), 48, 1);
+
+CCU_FACTOR_DEFINE(pll1_d768_3p2, CCU_PARENT_HW(pll1_d384_6p4), 2, 1);
+CCU_FACTOR_DEFINE(pll1_d1536_1p6, CCU_PARENT_HW(pll1_d384_6p4), 4, 1);
+CCU_FACTOR_DEFINE(pll1_d3072_0p8, CCU_PARENT_HW(pll1_d384_6p4), 8, 1);
+
+CCU_GATE_DEFINE(pll1_d6_409p6, CCU_PARENT_HW(pll1_d6), MPMU_ACGR, BIT(0), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d12_204p8, CCU_PARENT_HW(pll1_d6), MPMU_ACGR, BIT(5), 2, 1);
+
+CCU_GATE_DEFINE(pll1_d5_491p52, CCU_PARENT_HW(pll1_d5), MPMU_ACGR, BIT(21), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d10_245p76, CCU_PARENT_HW(pll1_d5), MPMU_ACGR, BIT(18), 2, 1);
+
+CCU_GATE_DEFINE(pll1_d4_614p4, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(15), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d52_47p26, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(10), 13, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d78_31p5, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(6), 39, 2);
+
+CCU_GATE_DEFINE(pll1_d3_819p2, CCU_PARENT_HW(pll1_d3), MPMU_ACGR, BIT(14), 0);
+
+CCU_GATE_DEFINE(pll1_d2_1228p8, CCU_PARENT_HW(pll1_d2), MPMU_ACGR, BIT(16), 0);
+
+CCU_GATE_DEFINE(slow_uart, CCU_PARENT_NAME(osc), MPMU_ACGR, BIT(1), CLK_IGNORE_UNUSED);
+CCU_DDN_DEFINE(slow_uart1_14p74, pll1_d16_153p6, MPMU_SUCCR, 16, 13, 0, 13, 0);
+CCU_DDN_DEFINE(slow_uart2_48, pll1_d4_614p4, MPMU_SUCCR_1, 16, 13, 0, 13, 0);
+
+CCU_GATE_DEFINE(wdt_clk, CCU_PARENT_HW(pll1_d96_25p6), MPMU_WDTPCR, BIT(1), 0);
+
+CCU_FACTOR_GATE_DEFINE(i2s_sysclk, CCU_PARENT_HW(pll1_d16_153p6), MPMU_ISCCR, BIT(31), 50, 1);
+CCU_FACTOR_GATE_DEFINE(i2s_bclk, CCU_PARENT_HW(i2s_sysclk), MPMU_ISCCR, BIT(29), 1, 1);
+
+static const struct clk_parent_data apb_parents[] = {
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d24_102p4),
+};
+CCU_MUX_DEFINE(apb_clk, apb_parents, MPMU_APBCSCR, 0, 2, 0);
+
+CCU_GATE_DEFINE(wdt_bus_clk, CCU_PARENT_HW(apb_clk), MPMU_WDTPCR, BIT(0), 0);
+
+CCU_GATE_DEFINE(ripc_clk, CCU_PARENT_HW(apb_clk), MPMU_RIPCCR, 0x1, 0);
+/* MPMU clocks end */
+
+/* APBC clocks start */
+static const struct clk_parent_data uart_clk_parents[] = {
+ CCU_PARENT_HW(pll1_m3d128_57p6),
+ CCU_PARENT_HW(slow_uart1_14p74),
+ CCU_PARENT_HW(slow_uart2_48),
+};
+CCU_MUX_GATE_DEFINE(uart0_clk, uart_clk_parents, APBC_UART1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart2_clk, uart_clk_parents, APBC_UART2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart3_clk, uart_clk_parents, APBC_UART3_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart4_clk, uart_clk_parents, APBC_UART4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart5_clk, uart_clk_parents, APBC_UART5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart6_clk, uart_clk_parents, APBC_UART6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart7_clk, uart_clk_parents, APBC_UART7_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart8_clk, uart_clk_parents, APBC_UART8_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart9_clk, uart_clk_parents, APBC_UART9_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(gpio_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_GPIO_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data pwm_parents[] = {
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_NAME(osc),
+};
+CCU_MUX_GATE_DEFINE(pwm0_clk, pwm_parents, APBC_PWM0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm1_clk, pwm_parents, APBC_PWM1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm2_clk, pwm_parents, APBC_PWM2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm3_clk, pwm_parents, APBC_PWM3_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm4_clk, pwm_parents, APBC_PWM4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm5_clk, pwm_parents, APBC_PWM5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm6_clk, pwm_parents, APBC_PWM6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm7_clk, pwm_parents, APBC_PWM7_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm8_clk, pwm_parents, APBC_PWM8_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm9_clk, pwm_parents, APBC_PWM9_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm10_clk, pwm_parents, APBC_PWM10_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm11_clk, pwm_parents, APBC_PWM11_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm12_clk, pwm_parents, APBC_PWM12_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm13_clk, pwm_parents, APBC_PWM13_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm14_clk, pwm_parents, APBC_PWM14_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm15_clk, pwm_parents, APBC_PWM15_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm16_clk, pwm_parents, APBC_PWM16_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm17_clk, pwm_parents, APBC_PWM17_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm18_clk, pwm_parents, APBC_PWM18_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm19_clk, pwm_parents, APBC_PWM19_CLK_RST, 4, 3, BIT(1), 0);
+
+static const struct clk_parent_data ssp_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+};
+CCU_MUX_GATE_DEFINE(ssp3_clk, ssp_parents, APBC_SSP3_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(rtc_clk, CCU_PARENT_NAME(osc), APBC_RTC_CLK_RST,
+ BIT(7) | BIT(1), 0);
+
+static const struct clk_parent_data twsi_parents[] = {
+ CCU_PARENT_HW(pll1_d78_31p5),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d40_61p44),
+};
+CCU_MUX_GATE_DEFINE(twsi0_clk, twsi_parents, APBC_TWSI0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi1_clk, twsi_parents, APBC_TWSI1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi2_clk, twsi_parents, APBC_TWSI2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi4_clk, twsi_parents, APBC_TWSI4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi5_clk, twsi_parents, APBC_TWSI5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi6_clk, twsi_parents, APBC_TWSI6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi7_clk, twsi_parents, APBC_TWSI7_CLK_RST, 4, 3, BIT(1), 0);
+/*
+ * APBC_TWSI8_CLK_RST has a quirk that reading always results in zero.
+ * Combine functional and bus bits together as a gate to avoid sharing the
+ * write-only register between different clock hardwares.
+ */
+CCU_GATE_DEFINE(twsi8_clk, CCU_PARENT_HW(pll1_d78_31p5), APBC_TWSI8_CLK_RST, BIT(1) | BIT(0), 0);
+
+static const struct clk_parent_data timer_parents[] = {
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_NAME(osc),
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_NAME(vctcxo_3m),
+ CCU_PARENT_NAME(vctcxo_1m),
+};
+CCU_MUX_GATE_DEFINE(timers1_clk, timer_parents, APBC_TIMERS1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(timers2_clk, timer_parents, APBC_TIMERS2_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(aib_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_AIB_CLK_RST, BIT(1), 0);
+
+CCU_GATE_DEFINE(onewire_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_ONEWIRE_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data sspa_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+ CCU_PARENT_HW(i2s_bclk),
+};
+CCU_MUX_GATE_DEFINE(sspa0_clk, sspa_parents, APBC_SSPA0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(sspa1_clk, sspa_parents, APBC_SSPA1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_GATE_DEFINE(dro_clk, CCU_PARENT_HW(apb_clk), APBC_DRO_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(ir_clk, CCU_PARENT_HW(apb_clk), APBC_IR_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(tsen_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(ipc_ap2aud_clk, CCU_PARENT_HW(apb_clk), APBC_IPC_AP2AUD_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data can_parents[] = {
+ CCU_PARENT_HW(pll3_20),
+ CCU_PARENT_HW(pll3_40),
+ CCU_PARENT_HW(pll3_80),
+};
+CCU_MUX_GATE_DEFINE(can0_clk, can_parents, APBC_CAN0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_GATE_DEFINE(can0_bus_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_CAN0_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(uart0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART3_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART7_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart8_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART8_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart9_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART9_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(gpio_bus_clk, CCU_PARENT_HW(apb_clk), APBC_GPIO_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(pwm0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM3_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM7_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm8_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM8_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm9_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM9_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm10_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM10_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm11_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM11_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm12_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM12_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm13_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM13_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm14_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM14_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm15_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM15_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm16_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM16_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm17_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM17_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm18_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM18_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm19_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM19_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(ssp3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSP3_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(rtc_bus_clk, CCU_PARENT_HW(apb_clk), APBC_RTC_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(twsi0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI7_CLK_RST, BIT(0), 0);
+/* Placeholder to workaround quirk of the register */
+CCU_FACTOR_DEFINE(twsi8_bus_clk, CCU_PARENT_HW(apb_clk), 1, 1);
+
+CCU_GATE_DEFINE(timers1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TIMERS1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(timers2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TIMERS2_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(aib_bus_clk, CCU_PARENT_HW(apb_clk), APBC_AIB_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(onewire_bus_clk, CCU_PARENT_HW(apb_clk), APBC_ONEWIRE_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(sspa0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSPA0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(sspa1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSPA1_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(tsen_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(ipc_ap2aud_bus_clk, CCU_PARENT_HW(apb_clk), APBC_IPC_AP2AUD_CLK_RST, BIT(0), 0);
+/* APBC clocks end */
+
+/* APMU clocks start */
+static const struct clk_parent_data pmua_aclk_parents[] = {
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_FC_DEFINE(pmua_aclk, pmua_aclk_parents, APMU_ACLK_CLK_CTRL, 1, 2, BIT(4), 0, 1, 0);
+
+static const struct clk_parent_data cci550_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d3),
+};
+CCU_MUX_DIV_FC_DEFINE(cci550_clk, cci550_clk_parents, APMU_CCI550_CLK_CTRL, 8, 3, BIT(12), 0, 2,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data cpu_c0_hi_clk_parents[] = {
+ CCU_PARENT_HW(pll3_d2),
+ CCU_PARENT_HW(pll3_d1),
+};
+CCU_MUX_DEFINE(cpu_c0_hi_clk, cpu_c0_hi_clk_parents, APMU_CPU_C0_CLK_CTRL, 13, 1, 0);
+static const struct clk_parent_data cpu_c0_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll3_d3),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(cpu_c0_hi_clk),
+};
+CCU_MUX_FC_DEFINE(cpu_c0_core_clk, cpu_c0_clk_parents, APMU_CPU_C0_CLK_CTRL, BIT(12), 0, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c0_ace_clk, CCU_PARENT_HW(cpu_c0_core_clk), APMU_CPU_C0_CLK_CTRL, 6, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c0_tcm_clk, CCU_PARENT_HW(cpu_c0_core_clk), APMU_CPU_C0_CLK_CTRL, 9, 3,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data cpu_c1_hi_clk_parents[] = {
+ CCU_PARENT_HW(pll3_d2),
+ CCU_PARENT_HW(pll3_d1),
+};
+CCU_MUX_DEFINE(cpu_c1_hi_clk, cpu_c1_hi_clk_parents, APMU_CPU_C1_CLK_CTRL, 13, 1, 0);
+static const struct clk_parent_data cpu_c1_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll3_d3),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(cpu_c1_hi_clk),
+};
+CCU_MUX_FC_DEFINE(cpu_c1_core_clk, cpu_c1_clk_parents, APMU_CPU_C1_CLK_CTRL, BIT(12), 0, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c1_ace_clk, CCU_PARENT_HW(cpu_c1_core_clk), APMU_CPU_C1_CLK_CTRL, 6, 3,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data jpg_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d3),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(jpg_clk, jpg_parents, APMU_JPG_CLK_RES_CTRL, 5, 3, BIT(15), 2, 3,
+ BIT(1), 0);
+
+static const struct clk_parent_data ccic2phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic2phy_clk, ccic2phy_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 7, 1, BIT(5), 0);
+
+static const struct clk_parent_data ccic3phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic3phy_clk, ccic3phy_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 31, 1, BIT(30), 0);
+
+static const struct clk_parent_data csi_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(csi_clk, csi_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 20, 3, BIT(15),
+ 16, 3, BIT(4), 0);
+
+static const struct clk_parent_data camm_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_NAME(vctcxo_24m),
+};
+CCU_MUX_DIV_GATE_DEFINE(camm0_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(28), 0);
+CCU_MUX_DIV_GATE_DEFINE(camm1_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(6), 0);
+CCU_MUX_DIV_GATE_DEFINE(camm2_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(3), 0);
+
+static const struct clk_parent_data isp_cpp_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+};
+CCU_MUX_DIV_GATE_DEFINE(isp_cpp_clk, isp_cpp_parents, APMU_ISP_CLK_RES_CTRL, 24, 2, 26, 1,
+ BIT(28), 0);
+static const struct clk_parent_data isp_bus_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d10_245p76),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(isp_bus_clk, isp_bus_parents, APMU_ISP_CLK_RES_CTRL, 18, 3, BIT(23),
+ 21, 2, BIT(17), 0);
+static const struct clk_parent_data isp_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(isp_clk, isp_parents, APMU_ISP_CLK_RES_CTRL, 4, 3, BIT(7), 8, 2,
+ BIT(1), 0);
+
+static const struct clk_parent_data dpumclk_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(dpu_mclk, dpumclk_parents, APMU_LCD_CLK_RES_CTRL2,
+ APMU_LCD_CLK_RES_CTRL1, 1, 4, BIT(29), 5, 3, BIT(0), 0);
+
+static const struct clk_parent_data dpuesc_parents[] = {
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+ CCU_PARENT_HW(pll1_d52_47p26),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d32_76p8),
+};
+CCU_MUX_GATE_DEFINE(dpu_esc_clk, dpuesc_parents, APMU_LCD_CLK_RES_CTRL1, 0, 2, BIT(2), 0);
+
+static const struct clk_parent_data dpubit_parents[] = {
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll2_d7),
+ CCU_PARENT_HW(pll2_d8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(dpu_bit_clk, dpubit_parents, APMU_LCD_CLK_RES_CTRL1, 17, 3, BIT(31),
+ 20, 3, BIT(16), 0);
+
+static const struct clk_parent_data dpupx_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll2_d7),
+ CCU_PARENT_HW(pll2_d8),
+};
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(dpu_pxclk, dpupx_parents, APMU_LCD_CLK_RES_CTRL2,
+ APMU_LCD_CLK_RES_CTRL1, 17, 4, BIT(30), 21, 3, BIT(16), 0);
+
+CCU_GATE_DEFINE(dpu_hclk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_CLK_RES_CTRL1,
+ BIT(5), 0);
+
+static const struct clk_parent_data dpu_spi_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(dpu_spi_clk, dpu_spi_parents, APMU_LCD_SPI_CLK_RES_CTRL, 8, 3,
+ BIT(7), 12, 3, BIT(1), 0);
+CCU_GATE_DEFINE(dpu_spi_hbus_clk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(3), 0);
+CCU_GATE_DEFINE(dpu_spi_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(5), 0);
+CCU_GATE_DEFINE(dpu_spi_aclk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(6), 0);
+
+static const struct clk_parent_data v2d_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d4_614p4),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(v2d_clk, v2d_parents, APMU_LCD_CLK_RES_CTRL1, 9, 3, BIT(28), 12, 2,
+ BIT(8), 0);
+
+static const struct clk_parent_data ccic_4x_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(ccic_4x_clk, ccic_4x_parents, APMU_CCIC_CLK_RES_CTRL, 18, 3,
+ BIT(15), 23, 2, BIT(4), 0);
+
+static const struct clk_parent_data ccic1phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic1phy_clk, ccic1phy_parents, APMU_CCIC_CLK_RES_CTRL, 7, 1, BIT(5), 0);
+
+CCU_GATE_DEFINE(sdh_axi_aclk, CCU_PARENT_HW(pmua_aclk), APMU_SDH0_CLK_RES_CTRL, BIT(3), 0);
+static const struct clk_parent_data sdh01_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh0_clk, sdh01_parents, APMU_SDH0_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh1_clk, sdh01_parents, APMU_SDH1_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+static const struct clk_parent_data sdh2_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh2_clk, sdh2_parents, APMU_SDH2_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+
+CCU_GATE_DEFINE(usb_axi_clk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(1), 0);
+CCU_GATE_DEFINE(usb_p1_aclk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(5), 0);
+CCU_GATE_DEFINE(usb30_clk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(8), 0);
+
+static const struct clk_parent_data qspi_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d23_106p8),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d13_189),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(qspi_clk, qspi_parents, APMU_QSPI_CLK_RES_CTRL, 9, 3, BIT(12), 6, 3,
+ BIT(4), 0);
+CCU_GATE_DEFINE(qspi_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_QSPI_CLK_RES_CTRL, BIT(3), 0);
+CCU_GATE_DEFINE(dma_clk, CCU_PARENT_HW(pmua_aclk), APMU_DMA_CLK_RES_CTRL, BIT(3), 0);
+
+static const struct clk_parent_data aes_parents[] = {
+ CCU_PARENT_HW(pll1_d12_204p8),
+ CCU_PARENT_HW(pll1_d24_102p4),
+};
+CCU_MUX_GATE_DEFINE(aes_clk, aes_parents, APMU_AES_CLK_RES_CTRL, 6, 1, BIT(5), 0);
+
+static const struct clk_parent_data vpu_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll3_d6),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(vpu_clk, vpu_parents, APMU_VPU_CLK_RES_CTRL, 13, 3, BIT(21), 10, 3,
+ BIT(3), 0);
+
+static const struct clk_parent_data gpu_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll3_d6),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(gpu_clk, gpu_parents, APMU_GPU_CLK_RES_CTRL, 12, 3, BIT(15), 18, 3,
+ BIT(4), 0);
+
+static const struct clk_parent_data emmc_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d52_47p26),
+ CCU_PARENT_HW(pll1_d3_819p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(emmc_clk, emmc_parents, APMU_PMUA_EM_CLK_RES_CTRL, 8, 3, BIT(11),
+ 6, 2, BIT(4), 0);
+CCU_DIV_GATE_DEFINE(emmc_x_clk, CCU_PARENT_HW(pll1_d2_1228p8), APMU_PMUA_EM_CLK_RES_CTRL, 12,
+ 3, BIT(15), 0);
+
+static const struct clk_parent_data audio_parents[] = {
+ CCU_PARENT_HW(pll1_aud_245p7),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(audio_clk, audio_parents, APMU_AUDIO_CLK_RES_CTRL, 4, 3, BIT(15),
+ 7, 3, BIT(12), 0);
+
+static const struct clk_parent_data hdmi_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(hdmi_mclk, hdmi_parents, APMU_HDMI_CLK_RES_CTRL, 1, 4, BIT(29), 5,
+ 3, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie0_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(2), 0);
+CCU_GATE_DEFINE(pcie0_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(1), 0);
+CCU_GATE_DEFINE(pcie0_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie1_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(2), 0);
+CCU_GATE_DEFINE(pcie1_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(1), 0);
+CCU_GATE_DEFINE(pcie1_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie2_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(2), 0);
+CCU_GATE_DEFINE(pcie2_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(1), 0);
+CCU_GATE_DEFINE(pcie2_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(0), 0);
+
+CCU_GATE_DEFINE(emac0_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_EMAC0_CLK_RES_CTRL, BIT(0), 0);
+CCU_GATE_DEFINE(emac0_ptp_clk, CCU_PARENT_HW(pll2_d6), APMU_EMAC0_CLK_RES_CTRL, BIT(15), 0);
+CCU_GATE_DEFINE(emac1_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_EMAC1_CLK_RES_CTRL, BIT(0), 0);
+CCU_GATE_DEFINE(emac1_ptp_clk, CCU_PARENT_HW(pll2_d6), APMU_EMAC1_CLK_RES_CTRL, BIT(15), 0);
+
+CCU_GATE_DEFINE(emmc_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_PMUA_EM_CLK_RES_CTRL, BIT(3), 0);
+/* APMU clocks end */
+
+static struct clk_hw *k1_ccu_pll_hws[] = {
+ [CLK_PLL1] = &pll1.common.hw,
+ [CLK_PLL2] = &pll2.common.hw,
+ [CLK_PLL3] = &pll3.common.hw,
+ [CLK_PLL1_D2] = &pll1_d2.common.hw,
+ [CLK_PLL1_D3] = &pll1_d3.common.hw,
+ [CLK_PLL1_D4] = &pll1_d4.common.hw,
+ [CLK_PLL1_D5] = &pll1_d5.common.hw,
+ [CLK_PLL1_D6] = &pll1_d6.common.hw,
+ [CLK_PLL1_D7] = &pll1_d7.common.hw,
+ [CLK_PLL1_D8] = &pll1_d8.common.hw,
+ [CLK_PLL1_D11] = &pll1_d11_223p4.common.hw,
+ [CLK_PLL1_D13] = &pll1_d13_189.common.hw,
+ [CLK_PLL1_D23] = &pll1_d23_106p8.common.hw,
+ [CLK_PLL1_D64] = &pll1_d64_38p4.common.hw,
+ [CLK_PLL1_D10_AUD] = &pll1_aud_245p7.common.hw,
+ [CLK_PLL1_D100_AUD] = &pll1_aud_24p5.common.hw,
+ [CLK_PLL2_D1] = &pll2_d1.common.hw,
+ [CLK_PLL2_D2] = &pll2_d2.common.hw,
+ [CLK_PLL2_D3] = &pll2_d3.common.hw,
+ [CLK_PLL2_D4] = &pll2_d4.common.hw,
+ [CLK_PLL2_D5] = &pll2_d5.common.hw,
+ [CLK_PLL2_D6] = &pll2_d6.common.hw,
+ [CLK_PLL2_D7] = &pll2_d7.common.hw,
+ [CLK_PLL2_D8] = &pll2_d8.common.hw,
+ [CLK_PLL3_D1] = &pll3_d1.common.hw,
+ [CLK_PLL3_D2] = &pll3_d2.common.hw,
+ [CLK_PLL3_D3] = &pll3_d3.common.hw,
+ [CLK_PLL3_D4] = &pll3_d4.common.hw,
+ [CLK_PLL3_D5] = &pll3_d5.common.hw,
+ [CLK_PLL3_D6] = &pll3_d6.common.hw,
+ [CLK_PLL3_D7] = &pll3_d7.common.hw,
+ [CLK_PLL3_D8] = &pll3_d8.common.hw,
+ [CLK_PLL3_80] = &pll3_80.common.hw,
+ [CLK_PLL3_40] = &pll3_40.common.hw,
+ [CLK_PLL3_20] = &pll3_20.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_pll_data = {
+ .hws = k1_ccu_pll_hws,
+ .num = ARRAY_SIZE(k1_ccu_pll_hws),
+};
+
+static struct clk_hw *k1_ccu_mpmu_hws[] = {
+ [CLK_PLL1_307P2] = &pll1_d8_307p2.common.hw,
+ [CLK_PLL1_76P8] = &pll1_d32_76p8.common.hw,
+ [CLK_PLL1_61P44] = &pll1_d40_61p44.common.hw,
+ [CLK_PLL1_153P6] = &pll1_d16_153p6.common.hw,
+ [CLK_PLL1_102P4] = &pll1_d24_102p4.common.hw,
+ [CLK_PLL1_51P2] = &pll1_d48_51p2.common.hw,
+ [CLK_PLL1_51P2_AP] = &pll1_d48_51p2_ap.common.hw,
+ [CLK_PLL1_57P6] = &pll1_m3d128_57p6.common.hw,
+ [CLK_PLL1_25P6] = &pll1_d96_25p6.common.hw,
+ [CLK_PLL1_12P8] = &pll1_d192_12p8.common.hw,
+ [CLK_PLL1_12P8_WDT] = &pll1_d192_12p8_wdt.common.hw,
+ [CLK_PLL1_6P4] = &pll1_d384_6p4.common.hw,
+ [CLK_PLL1_3P2] = &pll1_d768_3p2.common.hw,
+ [CLK_PLL1_1P6] = &pll1_d1536_1p6.common.hw,
+ [CLK_PLL1_0P8] = &pll1_d3072_0p8.common.hw,
+ [CLK_PLL1_409P6] = &pll1_d6_409p6.common.hw,
+ [CLK_PLL1_204P8] = &pll1_d12_204p8.common.hw,
+ [CLK_PLL1_491] = &pll1_d5_491p52.common.hw,
+ [CLK_PLL1_245P76] = &pll1_d10_245p76.common.hw,
+ [CLK_PLL1_614] = &pll1_d4_614p4.common.hw,
+ [CLK_PLL1_47P26] = &pll1_d52_47p26.common.hw,
+ [CLK_PLL1_31P5] = &pll1_d78_31p5.common.hw,
+ [CLK_PLL1_819] = &pll1_d3_819p2.common.hw,
+ [CLK_PLL1_1228] = &pll1_d2_1228p8.common.hw,
+ [CLK_SLOW_UART] = &slow_uart.common.hw,
+ [CLK_SLOW_UART1] = &slow_uart1_14p74.common.hw,
+ [CLK_SLOW_UART2] = &slow_uart2_48.common.hw,
+ [CLK_WDT] = &wdt_clk.common.hw,
+ [CLK_RIPC] = &ripc_clk.common.hw,
+ [CLK_I2S_SYSCLK] = &i2s_sysclk.common.hw,
+ [CLK_I2S_BCLK] = &i2s_bclk.common.hw,
+ [CLK_APB] = &apb_clk.common.hw,
+ [CLK_WDT_BUS] = &wdt_bus_clk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_mpmu_data = {
+ .hws = k1_ccu_mpmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_mpmu_hws),
+};
+
+static struct clk_hw *k1_ccu_apbc_hws[] = {
+ [CLK_UART0] = &uart0_clk.common.hw,
+ [CLK_UART2] = &uart2_clk.common.hw,
+ [CLK_UART3] = &uart3_clk.common.hw,
+ [CLK_UART4] = &uart4_clk.common.hw,
+ [CLK_UART5] = &uart5_clk.common.hw,
+ [CLK_UART6] = &uart6_clk.common.hw,
+ [CLK_UART7] = &uart7_clk.common.hw,
+ [CLK_UART8] = &uart8_clk.common.hw,
+ [CLK_UART9] = &uart9_clk.common.hw,
+ [CLK_GPIO] = &gpio_clk.common.hw,
+ [CLK_PWM0] = &pwm0_clk.common.hw,
+ [CLK_PWM1] = &pwm1_clk.common.hw,
+ [CLK_PWM2] = &pwm2_clk.common.hw,
+ [CLK_PWM3] = &pwm3_clk.common.hw,
+ [CLK_PWM4] = &pwm4_clk.common.hw,
+ [CLK_PWM5] = &pwm5_clk.common.hw,
+ [CLK_PWM6] = &pwm6_clk.common.hw,
+ [CLK_PWM7] = &pwm7_clk.common.hw,
+ [CLK_PWM8] = &pwm8_clk.common.hw,
+ [CLK_PWM9] = &pwm9_clk.common.hw,
+ [CLK_PWM10] = &pwm10_clk.common.hw,
+ [CLK_PWM11] = &pwm11_clk.common.hw,
+ [CLK_PWM12] = &pwm12_clk.common.hw,
+ [CLK_PWM13] = &pwm13_clk.common.hw,
+ [CLK_PWM14] = &pwm14_clk.common.hw,
+ [CLK_PWM15] = &pwm15_clk.common.hw,
+ [CLK_PWM16] = &pwm16_clk.common.hw,
+ [CLK_PWM17] = &pwm17_clk.common.hw,
+ [CLK_PWM18] = &pwm18_clk.common.hw,
+ [CLK_PWM19] = &pwm19_clk.common.hw,
+ [CLK_SSP3] = &ssp3_clk.common.hw,
+ [CLK_RTC] = &rtc_clk.common.hw,
+ [CLK_TWSI0] = &twsi0_clk.common.hw,
+ [CLK_TWSI1] = &twsi1_clk.common.hw,
+ [CLK_TWSI2] = &twsi2_clk.common.hw,
+ [CLK_TWSI4] = &twsi4_clk.common.hw,
+ [CLK_TWSI5] = &twsi5_clk.common.hw,
+ [CLK_TWSI6] = &twsi6_clk.common.hw,
+ [CLK_TWSI7] = &twsi7_clk.common.hw,
+ [CLK_TWSI8] = &twsi8_clk.common.hw,
+ [CLK_TIMERS1] = &timers1_clk.common.hw,
+ [CLK_TIMERS2] = &timers2_clk.common.hw,
+ [CLK_AIB] = &aib_clk.common.hw,
+ [CLK_ONEWIRE] = &onewire_clk.common.hw,
+ [CLK_SSPA0] = &sspa0_clk.common.hw,
+ [CLK_SSPA1] = &sspa1_clk.common.hw,
+ [CLK_DRO] = &dro_clk.common.hw,
+ [CLK_IR] = &ir_clk.common.hw,
+ [CLK_TSEN] = &tsen_clk.common.hw,
+ [CLK_IPC_AP2AUD] = &ipc_ap2aud_clk.common.hw,
+ [CLK_CAN0] = &can0_clk.common.hw,
+ [CLK_CAN0_BUS] = &can0_bus_clk.common.hw,
+ [CLK_UART0_BUS] = &uart0_bus_clk.common.hw,
+ [CLK_UART2_BUS] = &uart2_bus_clk.common.hw,
+ [CLK_UART3_BUS] = &uart3_bus_clk.common.hw,
+ [CLK_UART4_BUS] = &uart4_bus_clk.common.hw,
+ [CLK_UART5_BUS] = &uart5_bus_clk.common.hw,
+ [CLK_UART6_BUS] = &uart6_bus_clk.common.hw,
+ [CLK_UART7_BUS] = &uart7_bus_clk.common.hw,
+ [CLK_UART8_BUS] = &uart8_bus_clk.common.hw,
+ [CLK_UART9_BUS] = &uart9_bus_clk.common.hw,
+ [CLK_GPIO_BUS] = &gpio_bus_clk.common.hw,
+ [CLK_PWM0_BUS] = &pwm0_bus_clk.common.hw,
+ [CLK_PWM1_BUS] = &pwm1_bus_clk.common.hw,
+ [CLK_PWM2_BUS] = &pwm2_bus_clk.common.hw,
+ [CLK_PWM3_BUS] = &pwm3_bus_clk.common.hw,
+ [CLK_PWM4_BUS] = &pwm4_bus_clk.common.hw,
+ [CLK_PWM5_BUS] = &pwm5_bus_clk.common.hw,
+ [CLK_PWM6_BUS] = &pwm6_bus_clk.common.hw,
+ [CLK_PWM7_BUS] = &pwm7_bus_clk.common.hw,
+ [CLK_PWM8_BUS] = &pwm8_bus_clk.common.hw,
+ [CLK_PWM9_BUS] = &pwm9_bus_clk.common.hw,
+ [CLK_PWM10_BUS] = &pwm10_bus_clk.common.hw,
+ [CLK_PWM11_BUS] = &pwm11_bus_clk.common.hw,
+ [CLK_PWM12_BUS] = &pwm12_bus_clk.common.hw,
+ [CLK_PWM13_BUS] = &pwm13_bus_clk.common.hw,
+ [CLK_PWM14_BUS] = &pwm14_bus_clk.common.hw,
+ [CLK_PWM15_BUS] = &pwm15_bus_clk.common.hw,
+ [CLK_PWM16_BUS] = &pwm16_bus_clk.common.hw,
+ [CLK_PWM17_BUS] = &pwm17_bus_clk.common.hw,
+ [CLK_PWM18_BUS] = &pwm18_bus_clk.common.hw,
+ [CLK_PWM19_BUS] = &pwm19_bus_clk.common.hw,
+ [CLK_SSP3_BUS] = &ssp3_bus_clk.common.hw,
+ [CLK_RTC_BUS] = &rtc_bus_clk.common.hw,
+ [CLK_TWSI0_BUS] = &twsi0_bus_clk.common.hw,
+ [CLK_TWSI1_BUS] = &twsi1_bus_clk.common.hw,
+ [CLK_TWSI2_BUS] = &twsi2_bus_clk.common.hw,
+ [CLK_TWSI4_BUS] = &twsi4_bus_clk.common.hw,
+ [CLK_TWSI5_BUS] = &twsi5_bus_clk.common.hw,
+ [CLK_TWSI6_BUS] = &twsi6_bus_clk.common.hw,
+ [CLK_TWSI7_BUS] = &twsi7_bus_clk.common.hw,
+ [CLK_TWSI8_BUS] = &twsi8_bus_clk.common.hw,
+ [CLK_TIMERS1_BUS] = &timers1_bus_clk.common.hw,
+ [CLK_TIMERS2_BUS] = &timers2_bus_clk.common.hw,
+ [CLK_AIB_BUS] = &aib_bus_clk.common.hw,
+ [CLK_ONEWIRE_BUS] = &onewire_bus_clk.common.hw,
+ [CLK_SSPA0_BUS] = &sspa0_bus_clk.common.hw,
+ [CLK_SSPA1_BUS] = &sspa1_bus_clk.common.hw,
+ [CLK_TSEN_BUS] = &tsen_bus_clk.common.hw,
+ [CLK_IPC_AP2AUD_BUS] = &ipc_ap2aud_bus_clk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_apbc_data = {
+ .hws = k1_ccu_apbc_hws,
+ .num = ARRAY_SIZE(k1_ccu_apbc_hws),
+};
+
+static struct clk_hw *k1_ccu_apmu_hws[] = {
+ [CLK_CCI550] = &cci550_clk.common.hw,
+ [CLK_CPU_C0_HI] = &cpu_c0_hi_clk.common.hw,
+ [CLK_CPU_C0_CORE] = &cpu_c0_core_clk.common.hw,
+ [CLK_CPU_C0_ACE] = &cpu_c0_ace_clk.common.hw,
+ [CLK_CPU_C0_TCM] = &cpu_c0_tcm_clk.common.hw,
+ [CLK_CPU_C1_HI] = &cpu_c1_hi_clk.common.hw,
+ [CLK_CPU_C1_CORE] = &cpu_c1_core_clk.common.hw,
+ [CLK_CPU_C1_ACE] = &cpu_c1_ace_clk.common.hw,
+ [CLK_CCIC_4X] = &ccic_4x_clk.common.hw,
+ [CLK_CCIC1PHY] = &ccic1phy_clk.common.hw,
+ [CLK_SDH_AXI] = &sdh_axi_aclk.common.hw,
+ [CLK_SDH0] = &sdh0_clk.common.hw,
+ [CLK_SDH1] = &sdh1_clk.common.hw,
+ [CLK_SDH2] = &sdh2_clk.common.hw,
+ [CLK_USB_P1] = &usb_p1_aclk.common.hw,
+ [CLK_USB_AXI] = &usb_axi_clk.common.hw,
+ [CLK_USB30] = &usb30_clk.common.hw,
+ [CLK_QSPI] = &qspi_clk.common.hw,
+ [CLK_QSPI_BUS] = &qspi_bus_clk.common.hw,
+ [CLK_DMA] = &dma_clk.common.hw,
+ [CLK_AES] = &aes_clk.common.hw,
+ [CLK_VPU] = &vpu_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_EMMC] = &emmc_clk.common.hw,
+ [CLK_EMMC_X] = &emmc_x_clk.common.hw,
+ [CLK_AUDIO] = &audio_clk.common.hw,
+ [CLK_HDMI] = &hdmi_mclk.common.hw,
+ [CLK_PMUA_ACLK] = &pmua_aclk.common.hw,
+ [CLK_PCIE0_MASTER] = &pcie0_master_clk.common.hw,
+ [CLK_PCIE0_SLAVE] = &pcie0_slave_clk.common.hw,
+ [CLK_PCIE0_DBI] = &pcie0_dbi_clk.common.hw,
+ [CLK_PCIE1_MASTER] = &pcie1_master_clk.common.hw,
+ [CLK_PCIE1_SLAVE] = &pcie1_slave_clk.common.hw,
+ [CLK_PCIE1_DBI] = &pcie1_dbi_clk.common.hw,
+ [CLK_PCIE2_MASTER] = &pcie2_master_clk.common.hw,
+ [CLK_PCIE2_SLAVE] = &pcie2_slave_clk.common.hw,
+ [CLK_PCIE2_DBI] = &pcie2_dbi_clk.common.hw,
+ [CLK_EMAC0_BUS] = &emac0_bus_clk.common.hw,
+ [CLK_EMAC0_PTP] = &emac0_ptp_clk.common.hw,
+ [CLK_EMAC1_BUS] = &emac1_bus_clk.common.hw,
+ [CLK_EMAC1_PTP] = &emac1_ptp_clk.common.hw,
+ [CLK_JPG] = &jpg_clk.common.hw,
+ [CLK_CCIC2PHY] = &ccic2phy_clk.common.hw,
+ [CLK_CCIC3PHY] = &ccic3phy_clk.common.hw,
+ [CLK_CSI] = &csi_clk.common.hw,
+ [CLK_CAMM0] = &camm0_clk.common.hw,
+ [CLK_CAMM1] = &camm1_clk.common.hw,
+ [CLK_CAMM2] = &camm2_clk.common.hw,
+ [CLK_ISP_CPP] = &isp_cpp_clk.common.hw,
+ [CLK_ISP_BUS] = &isp_bus_clk.common.hw,
+ [CLK_ISP] = &isp_clk.common.hw,
+ [CLK_DPU_MCLK] = &dpu_mclk.common.hw,
+ [CLK_DPU_ESC] = &dpu_esc_clk.common.hw,
+ [CLK_DPU_BIT] = &dpu_bit_clk.common.hw,
+ [CLK_DPU_PXCLK] = &dpu_pxclk.common.hw,
+ [CLK_DPU_HCLK] = &dpu_hclk.common.hw,
+ [CLK_DPU_SPI] = &dpu_spi_clk.common.hw,
+ [CLK_DPU_SPI_HBUS] = &dpu_spi_hbus_clk.common.hw,
+ [CLK_DPU_SPIBUS] = &dpu_spi_bus_clk.common.hw,
+ [CLK_DPU_SPI_ACLK] = &dpu_spi_aclk.common.hw,
+ [CLK_V2D] = &v2d_clk.common.hw,
+ [CLK_EMMC_BUS] = &emmc_bus_clk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_apmu_data = {
+ .hws = k1_ccu_apmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_apmu_hws),
+};
+
+static int spacemit_ccu_register(struct device *dev,
+ struct regmap *regmap,
+ struct regmap *lock_regmap,
+ const struct spacemit_ccu_data *data)
+{
+ struct clk_hw_onecell_data *clk_data;
+ int i, ret;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, data->num),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num; i++) {
+ struct clk_hw *hw = data->hws[i];
+ struct ccu_common *common;
+ const char *name;
+
+ if (!hw) {
+ clk_data->hws[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ name = hw->init->name;
+
+ common = hw_to_ccu_common(hw);
+ common->regmap = regmap;
+ common->lock_regmap = lock_regmap;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "Cannot register clock %d - %s\n",
+ i, name);
+ return ret;
+ }
+
+ clk_data->hws[i] = hw;
+ }
+
+ clk_data->num = data->num;
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ dev_err(dev, "failed to add clock hardware provider (%d)\n", ret);
+
+ return ret;
+}
+
+static int k1_ccu_probe(struct platform_device *pdev)
+{
+ struct regmap *base_regmap, *lock_regmap = NULL;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ base_regmap = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(base_regmap))
+ return dev_err_probe(dev, PTR_ERR(base_regmap),
+ "failed to get regmap\n");
+
+ /*
+ * The lock status of PLLs locate in MPMU region, while PLLs themselves
+ * are in APBS region. Reference to MPMU syscon is required to check PLL
+ * status.
+ */
+ if (of_device_is_compatible(dev->of_node, "spacemit,k1-pll")) {
+ struct device_node *mpmu = of_parse_phandle(dev->of_node,
+ "spacemit,mpmu", 0);
+ if (!mpmu)
+ return dev_err_probe(dev, -ENODEV,
+ "Cannot parse MPMU region\n");
+
+ lock_regmap = device_node_to_regmap(mpmu);
+ of_node_put(mpmu);
+
+ if (IS_ERR(lock_regmap))
+ return dev_err_probe(dev, PTR_ERR(lock_regmap),
+ "failed to get lock regmap\n");
+ }
+
+ ret = spacemit_ccu_register(dev, base_regmap, lock_regmap,
+ of_device_get_match_data(dev));
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register clocks\n");
+
+ return 0;
+}
+
+static const struct of_device_id of_k1_ccu_match[] = {
+ {
+ .compatible = "spacemit,k1-pll",
+ .data = &k1_ccu_pll_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-mpmu",
+ .data = &k1_ccu_mpmu_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-apbc",
+ .data = &k1_ccu_apbc_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-apmu",
+ .data = &k1_ccu_apmu_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_k1_ccu_match);
+
+static struct platform_driver k1_ccu_driver = {
+ .driver = {
+ .name = "spacemit,k1-ccu",
+ .of_match_table = of_k1_ccu_match,
+ },
+ .probe = k1_ccu_probe,
+};
+module_platform_driver(k1_ccu_driver);
+
+MODULE_DESCRIPTION("SpacemiT K1 CCU driver");
+MODULE_AUTHOR("Haylen Chu <heylenay@4d2.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/spacemit/ccu_common.h b/drivers/clk/spacemit/ccu_common.h
new file mode 100644
index 000000000000..da72f3836e0b
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_common.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_COMMON_H_
+#define _CCU_COMMON_H_
+
+#include <linux/regmap.h>
+
+struct ccu_common {
+ struct regmap *regmap;
+ struct regmap *lock_regmap;
+
+ union {
+ /* For DDN and MIX */
+ struct {
+ u32 reg_ctrl;
+ u32 reg_fc;
+ u32 mask_fc;
+ };
+
+ /* For PLL */
+ struct {
+ u32 reg_swcr1;
+ u32 reg_swcr3;
+ };
+ };
+
+ struct clk_hw hw;
+};
+
+static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw)
+{
+ return container_of(hw, struct ccu_common, hw);
+}
+
+#define ccu_read(c, reg) \
+ ({ \
+ u32 tmp; \
+ regmap_read((c)->regmap, (c)->reg_##reg, &tmp); \
+ tmp; \
+ })
+#define ccu_update(c, reg, mask, val) \
+ regmap_update_bits((c)->regmap, (c)->reg_##reg, mask, val)
+
+#endif /* _CCU_COMMON_H_ */
diff --git a/drivers/clk/spacemit/ccu_ddn.c b/drivers/clk/spacemit/ccu_ddn.c
new file mode 100644
index 000000000000..be311b045698
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_ddn.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ *
+ * DDN stands for "Divider Denominator Numerator", it's M/N clock with a
+ * constant x2 factor. This clock hardware follows the equation below,
+ *
+ * numerator Fin
+ * 2 * ------------- = -------
+ * denominator Fout
+ *
+ * Thus, Fout could be calculated with,
+ *
+ * Fin denominator
+ * Fout = ----- * -------------
+ * 2 numerator
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/rational.h>
+
+#include "ccu_ddn.h"
+
+static unsigned long ccu_ddn_calc_rate(unsigned long prate,
+ unsigned long num, unsigned long den)
+{
+ return prate * den / 2 / num;
+}
+
+static unsigned long ccu_ddn_calc_best_rate(struct ccu_ddn *ddn,
+ unsigned long rate, unsigned long prate,
+ unsigned long *num, unsigned long *den)
+{
+ rational_best_approximation(rate, prate / 2,
+ ddn->den_mask >> ddn->den_shift,
+ ddn->num_mask >> ddn->num_shift,
+ den, num);
+ return ccu_ddn_calc_rate(prate, *num, *den);
+}
+
+static long ccu_ddn_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned long num, den;
+
+ return ccu_ddn_calc_best_rate(ddn, rate, *prate, &num, &den);
+}
+
+static unsigned long ccu_ddn_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned int val, num, den;
+
+ val = ccu_read(&ddn->common, ctrl);
+
+ num = (val & ddn->num_mask) >> ddn->num_shift;
+ den = (val & ddn->den_mask) >> ddn->den_shift;
+
+ return ccu_ddn_calc_rate(prate, num, den);
+}
+
+static int ccu_ddn_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned long num, den;
+
+ ccu_ddn_calc_best_rate(ddn, rate, prate, &num, &den);
+
+ ccu_update(&ddn->common, ctrl,
+ ddn->num_mask | ddn->den_mask,
+ (num << ddn->num_shift) | (den << ddn->den_shift));
+
+ return 0;
+}
+
+const struct clk_ops spacemit_ccu_ddn_ops = {
+ .recalc_rate = ccu_ddn_recalc_rate,
+ .round_rate = ccu_ddn_round_rate,
+ .set_rate = ccu_ddn_set_rate,
+};
diff --git a/drivers/clk/spacemit/ccu_ddn.h b/drivers/clk/spacemit/ccu_ddn.h
new file mode 100644
index 000000000000..a52fabe77d62
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_ddn.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_DDN_H_
+#define _CCU_DDN_H_
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+struct ccu_ddn {
+ struct ccu_common common;
+ unsigned int num_mask;
+ unsigned int num_shift;
+ unsigned int den_mask;
+ unsigned int den_shift;
+};
+
+#define CCU_DDN_INIT(_name, _parent, _flags) \
+ CLK_HW_INIT_HW(#_name, &_parent.common.hw, &spacemit_ccu_ddn_ops, _flags)
+
+#define CCU_DDN_DEFINE(_name, _parent, _reg_ctrl, _num_shift, _num_width, \
+ _den_shift, _den_width, _flags) \
+static struct ccu_ddn _name = { \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .hw.init = CCU_DDN_INIT(_name, _parent, _flags), \
+ }, \
+ .num_mask = GENMASK(_num_shift + _num_width - 1, _num_shift), \
+ .num_shift = _num_shift, \
+ .den_mask = GENMASK(_den_shift + _den_width - 1, _den_shift), \
+ .den_shift = _den_shift, \
+}
+
+static inline struct ccu_ddn *hw_to_ccu_ddn(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_ddn, common);
+}
+
+extern const struct clk_ops spacemit_ccu_ddn_ops;
+
+#endif
diff --git a/drivers/clk/spacemit/ccu_mix.c b/drivers/clk/spacemit/ccu_mix.c
new file mode 100644
index 000000000000..9b852aa61f78
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_mix.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ *
+ * MIX clock type is the combination of mux, factor or divider, and gate
+ */
+
+#include <linux/clk-provider.h>
+
+#include "ccu_mix.h"
+
+#define MIX_FC_TIMEOUT_US 10000
+#define MIX_FC_DELAY_US 5
+
+static void ccu_gate_disable(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+
+ ccu_update(&mix->common, ctrl, mix->gate.mask, 0);
+}
+
+static int ccu_gate_enable(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_gate_config *gate = &mix->gate;
+
+ ccu_update(&mix->common, ctrl, gate->mask, gate->mask);
+
+ return 0;
+}
+
+static int ccu_gate_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_gate_config *gate = &mix->gate;
+
+ return (ccu_read(&mix->common, ctrl) & gate->mask) == gate->mask;
+}
+
+static unsigned long ccu_factor_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+
+ return parent_rate * mix->factor.mul / mix->factor.div;
+}
+
+static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_div_config *div = &mix->div;
+ unsigned long val;
+
+ val = ccu_read(&mix->common, ctrl) >> div->shift;
+ val &= (1 << div->width) - 1;
+
+ return divider_recalc_rate(hw, parent_rate, val, NULL, 0, div->width);
+}
+
+/*
+ * Some clocks require a "FC" (frequency change) bit to be set after changing
+ * their rates or reparenting. This bit will be automatically cleared by
+ * hardware in MIX_FC_TIMEOUT_US, which indicates the operation is completed.
+ */
+static int ccu_mix_trigger_fc(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+ unsigned int val;
+
+ if (common->reg_fc)
+ return 0;
+
+ ccu_update(common, fc, common->mask_fc, common->mask_fc);
+
+ return regmap_read_poll_timeout_atomic(common->regmap, common->reg_fc,
+ val, !(val & common->mask_fc),
+ MIX_FC_DELAY_US,
+ MIX_FC_TIMEOUT_US);
+}
+
+static long ccu_factor_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return ccu_factor_recalc_rate(hw, *prate);
+}
+
+static int ccu_factor_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+static unsigned long
+ccu_mix_calc_best_rate(struct clk_hw *hw, unsigned long rate,
+ struct clk_hw **best_parent,
+ unsigned long *best_parent_rate,
+ u32 *div_val)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ unsigned int parent_num = clk_hw_get_num_parents(hw);
+ struct ccu_div_config *div = &mix->div;
+ u32 div_max = 1 << div->width;
+ unsigned long best_rate = 0;
+
+ for (int i = 0; i < parent_num; i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate;
+
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+
+ for (int j = 1; j <= div_max; j++) {
+ unsigned long tmp = DIV_ROUND_CLOSEST_ULL(parent_rate, j);
+
+ if (abs(tmp - rate) < abs(best_rate - rate)) {
+ best_rate = tmp;
+
+ if (div_val)
+ *div_val = j - 1;
+
+ if (best_parent) {
+ *best_parent = parent;
+ *best_parent_rate = parent_rate;
+ }
+ }
+ }
+ }
+
+ return best_rate;
+}
+
+static int ccu_mix_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ req->rate = ccu_mix_calc_best_rate(hw, req->rate,
+ &req->best_parent_hw,
+ &req->best_parent_rate,
+ NULL);
+ return 0;
+}
+
+static int ccu_mix_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_common *common = &mix->common;
+ struct ccu_div_config *div = &mix->div;
+ u32 current_div, target_div, mask;
+
+ ccu_mix_calc_best_rate(hw, rate, NULL, NULL, &target_div);
+
+ current_div = ccu_read(common, ctrl) >> div->shift;
+ current_div &= (1 << div->width) - 1;
+
+ if (current_div == target_div)
+ return 0;
+
+ mask = GENMASK(div->width + div->shift - 1, div->shift);
+
+ ccu_update(common, ctrl, mask, target_div << div->shift);
+
+ return ccu_mix_trigger_fc(hw);
+}
+
+static u8 ccu_mux_get_parent(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_mux_config *mux = &mix->mux;
+ u8 parent;
+
+ parent = ccu_read(&mix->common, ctrl) >> mux->shift;
+ parent &= (1 << mux->width) - 1;
+
+ return parent;
+}
+
+static int ccu_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_mux_config *mux = &mix->mux;
+ u32 mask;
+
+ mask = GENMASK(mux->width + mux->shift - 1, mux->shift);
+
+ ccu_update(&mix->common, ctrl, mask, index << mux->shift);
+
+ return ccu_mix_trigger_fc(hw);
+}
+
+const struct clk_ops spacemit_ccu_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+};
+
+const struct clk_ops spacemit_ccu_factor_ops = {
+ .round_rate = ccu_factor_round_rate,
+ .recalc_rate = ccu_factor_recalc_rate,
+ .set_rate = ccu_factor_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_ops = {
+ .determine_rate = ccu_mix_determine_rate,
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+};
+
+const struct clk_ops spacemit_ccu_div_ops = {
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_factor_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .round_rate = ccu_factor_round_rate,
+ .recalc_rate = ccu_factor_recalc_rate,
+ .set_rate = ccu_factor_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+};
+
+const struct clk_ops spacemit_ccu_div_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_div_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_div_ops = {
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
diff --git a/drivers/clk/spacemit/ccu_mix.h b/drivers/clk/spacemit/ccu_mix.h
new file mode 100644
index 000000000000..51d19f5d6aac
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_mix.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_MIX_H_
+#define _CCU_MIX_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+/**
+ * struct ccu_gate_config - Gate configuration
+ *
+ * @mask: Mask to enable the gate. Some clocks may have more than one bit
+ * set in this field.
+ */
+struct ccu_gate_config {
+ u32 mask;
+};
+
+struct ccu_factor_config {
+ u32 div;
+ u32 mul;
+};
+
+struct ccu_mux_config {
+ u8 shift;
+ u8 width;
+};
+
+struct ccu_div_config {
+ u8 shift;
+ u8 width;
+};
+
+struct ccu_mix {
+ struct ccu_factor_config factor;
+ struct ccu_gate_config gate;
+ struct ccu_div_config div;
+ struct ccu_mux_config mux;
+ struct ccu_common common;
+};
+
+#define CCU_GATE_INIT(_mask) { .mask = _mask }
+#define CCU_FACTOR_INIT(_div, _mul) { .div = _div, .mul = _mul }
+#define CCU_MUX_INIT(_shift, _width) { .shift = _shift, .width = _width }
+#define CCU_DIV_INIT(_shift, _width) { .shift = _shift, .width = _width }
+
+#define CCU_PARENT_HW(_parent) { .hw = &_parent.common.hw }
+#define CCU_PARENT_NAME(_name) { .fw_name = #_name }
+
+#define CCU_MIX_INITHW(_name, _parent, _ops, _flags) \
+ .hw.init = &(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = #_name, \
+ .parent_data = (const struct clk_parent_data[]) \
+ { _parent }, \
+ .num_parents = 1, \
+ .ops = &_ops, \
+ }
+
+#define CCU_MIX_INITHW_PARENTS(_name, _parents, _ops, _flags) \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(#_name, _parents, &_ops, _flags)
+
+#define CCU_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_gate_ops, _flags), \
+ } \
+}
+
+#define CCU_FACTOR_DEFINE(_name, _parent, _div, _mul) \
+static struct ccu_mix _name = { \
+ .factor = CCU_FACTOR_INIT(_div, _mul), \
+ .common = { \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_ops, 0), \
+ } \
+}
+
+#define CCU_MUX_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, _flags) \
+static struct ccu_mix _name = { \
+ .mux = CCU_MUX_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, spacemit_ccu_mux_ops, \
+ _flags), \
+ } \
+}
+
+#define CCU_DIV_DEFINE(_name, _parent, _reg_ctrl, _shift, _width, _flags) \
+static struct ccu_mix _name = { \
+ .div = CCU_DIV_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_div_ops, _flags) \
+ } \
+}
+
+#define CCU_FACTOR_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
+ _mul) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .factor = CCU_FACTOR_INIT(_div, _mul), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_gate_ops, 0) \
+ } \
+}
+
+#define CCU_MUX_GATE_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, \
+ _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .mux = CCU_MUX_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_gate_ops, _flags), \
+ } \
+}
+
+#define CCU_DIV_GATE_DEFINE(_name, _parent, _reg_ctrl, _shift, _width, \
+ _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_div_gate_ops, \
+ _flags), \
+ } \
+}
+
+#define CCU_MUX_DIV_GATE_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth, \
+ _muxshift, _muxwidth, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_gate_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(_name, _parents, _reg_ctrl, _reg_fc, \
+ _mshift, _mwidth, _mask_fc, _muxshift, \
+ _muxwidth, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_fc, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_gate_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_DIV_GATE_FC_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth,\
+ _mask_fc, _muxshift, _muxwidth, _mask_gate, \
+ _flags) \
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(_name, _parents, _reg_ctrl, _reg_ctrl, _mshift,\
+ _mwidth, _mask_fc, _muxshift, _muxwidth, \
+ _mask_gate, _flags)
+
+#define CCU_MUX_DIV_FC_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth, \
+ _mask_fc, _muxshift, _muxwidth, _flags) \
+static struct ccu_mix _name = { \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_ctrl, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_FC_DEFINE(_name, _parents, _reg_ctrl, _mask_fc, _muxshift, \
+ _muxwidth, _flags) \
+static struct ccu_mix _name = { \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_ctrl, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, spacemit_ccu_mux_ops, \
+ _flags) \
+ }, \
+}
+
+static inline struct ccu_mix *hw_to_ccu_mix(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_mix, common);
+}
+
+extern const struct clk_ops spacemit_ccu_gate_ops;
+extern const struct clk_ops spacemit_ccu_factor_ops;
+extern const struct clk_ops spacemit_ccu_mux_ops;
+extern const struct clk_ops spacemit_ccu_div_ops;
+extern const struct clk_ops spacemit_ccu_factor_gate_ops;
+extern const struct clk_ops spacemit_ccu_div_gate_ops;
+extern const struct clk_ops spacemit_ccu_mux_gate_ops;
+extern const struct clk_ops spacemit_ccu_mux_div_ops;
+extern const struct clk_ops spacemit_ccu_mux_div_gate_ops;
+#endif /* _CCU_DIV_H_ */
diff --git a/drivers/clk/spacemit/ccu_pll.c b/drivers/clk/spacemit/ccu_pll.c
new file mode 100644
index 000000000000..4427dcfbbb97
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_pll.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/math.h>
+#include <linux/regmap.h>
+
+#include "ccu_common.h"
+#include "ccu_pll.h"
+
+#define PLL_TIMEOUT_US 3000
+#define PLL_DELAY_US 5
+
+#define PLL_SWCR3_EN ((u32)BIT(31))
+#define PLL_SWCR3_MASK GENMASK(30, 0)
+
+static const struct ccu_pll_rate_tbl *ccu_pll_lookup_best_rate(struct ccu_pll *pll,
+ unsigned long rate)
+{
+ struct ccu_pll_config *config = &pll->config;
+ const struct ccu_pll_rate_tbl *best_entry;
+ unsigned long best_delta = ULONG_MAX;
+ int i;
+
+ for (i = 0; i < config->tbl_num; i++) {
+ const struct ccu_pll_rate_tbl *entry = &config->rate_tbl[i];
+ unsigned long delta = abs_diff(entry->rate, rate);
+
+ if (delta < best_delta) {
+ best_delta = delta;
+ best_entry = entry;
+ }
+ }
+
+ return best_entry;
+}
+
+static const struct ccu_pll_rate_tbl *ccu_pll_lookup_matched_entry(struct ccu_pll *pll)
+{
+ struct ccu_pll_config *config = &pll->config;
+ u32 swcr1, swcr3;
+ int i;
+
+ swcr1 = ccu_read(&pll->common, swcr1);
+ swcr3 = ccu_read(&pll->common, swcr3);
+ swcr3 &= PLL_SWCR3_MASK;
+
+ for (i = 0; i < config->tbl_num; i++) {
+ const struct ccu_pll_rate_tbl *entry = &config->rate_tbl[i];
+
+ if (swcr1 == entry->swcr1 && swcr3 == entry->swcr3)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void ccu_pll_update_param(struct ccu_pll *pll, const struct ccu_pll_rate_tbl *entry)
+{
+ struct ccu_common *common = &pll->common;
+
+ regmap_write(common->regmap, common->reg_swcr1, entry->swcr1);
+ ccu_update(common, swcr3, PLL_SWCR3_MASK, entry->swcr3);
+}
+
+static int ccu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return ccu_read(common, swcr3) & PLL_SWCR3_EN;
+}
+
+static int ccu_pll_enable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ struct ccu_common *common = &pll->common;
+ unsigned int tmp;
+
+ ccu_update(common, swcr3, PLL_SWCR3_EN, PLL_SWCR3_EN);
+
+ /* check lock status */
+ return regmap_read_poll_timeout_atomic(common->lock_regmap,
+ pll->config.reg_lock,
+ tmp,
+ tmp & pll->config.mask_lock,
+ PLL_DELAY_US, PLL_TIMEOUT_US);
+}
+
+static void ccu_pll_disable(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ ccu_update(common, swcr3, PLL_SWCR3_EN, 0);
+}
+
+/*
+ * PLLs must be gated before changing rate, which is ensured by
+ * flag CLK_SET_RATE_GATE.
+ */
+static int ccu_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ const struct ccu_pll_rate_tbl *entry;
+
+ entry = ccu_pll_lookup_best_rate(pll, rate);
+ ccu_pll_update_param(pll, entry);
+
+ return 0;
+}
+
+static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ const struct ccu_pll_rate_tbl *entry;
+
+ entry = ccu_pll_lookup_matched_entry(pll);
+
+ WARN_ON_ONCE(!entry);
+
+ return entry ? entry->rate : -EINVAL;
+}
+
+static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ return ccu_pll_lookup_best_rate(pll, rate)->rate;
+}
+
+static int ccu_pll_init(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ if (ccu_pll_lookup_matched_entry(pll))
+ return 0;
+
+ ccu_pll_disable(hw);
+ ccu_pll_update_param(pll, &pll->config.rate_tbl[0]);
+
+ return 0;
+}
+
+const struct clk_ops spacemit_ccu_pll_ops = {
+ .init = ccu_pll_init,
+ .enable = ccu_pll_enable,
+ .disable = ccu_pll_disable,
+ .set_rate = ccu_pll_set_rate,
+ .recalc_rate = ccu_pll_recalc_rate,
+ .round_rate = ccu_pll_round_rate,
+ .is_enabled = ccu_pll_is_enabled,
+};
diff --git a/drivers/clk/spacemit/ccu_pll.h b/drivers/clk/spacemit/ccu_pll.h
new file mode 100644
index 000000000000..0592f4c3068c
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_pll.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_PLL_H_
+#define _CCU_PLL_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+/**
+ * struct ccu_pll_rate_tbl - Structure mapping between PLL rate and register
+ * configuration.
+ *
+ * @rate: PLL rate
+ * @swcr1: Register value of PLLX_SW1_CTRL (PLLx_SWCR1).
+ * @swcr3: Register value of the PLLx_SW3_CTRL's lowest 31 bits of
+ * PLLx_SW3_CTRL (PLLx_SWCR3). This highest bit is for enabling
+ * the PLL and not contained in this field.
+ */
+struct ccu_pll_rate_tbl {
+ unsigned long rate;
+ u32 swcr1;
+ u32 swcr3;
+};
+
+struct ccu_pll_config {
+ const struct ccu_pll_rate_tbl *rate_tbl;
+ u32 tbl_num;
+ u32 reg_lock;
+ u32 mask_lock;
+};
+
+#define CCU_PLL_RATE(_rate, _swcr1, _swcr3) \
+ { \
+ .rate = _rate, \
+ .swcr1 = _swcr1, \
+ .swcr3 = _swcr3, \
+ }
+
+struct ccu_pll {
+ struct ccu_common common;
+ struct ccu_pll_config config;
+};
+
+#define CCU_PLL_CONFIG(_table, _reg_lock, _mask_lock) \
+ { \
+ .rate_tbl = _table, \
+ .tbl_num = ARRAY_SIZE(_table), \
+ .reg_lock = (_reg_lock), \
+ .mask_lock = (_mask_lock), \
+ }
+
+#define CCU_PLL_HWINIT(_name, _flags) \
+ (&(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &spacemit_ccu_pll_ops, \
+ .parent_data = &(struct clk_parent_data) { .index = 0 }, \
+ .num_parents = 1, \
+ .flags = _flags, \
+ })
+
+#define CCU_PLL_DEFINE(_name, _table, _reg_swcr1, _reg_swcr3, _reg_lock, \
+ _mask_lock, _flags) \
+static struct ccu_pll _name = { \
+ .config = CCU_PLL_CONFIG(_table, _reg_lock, _mask_lock), \
+ .common = { \
+ .reg_swcr1 = _reg_swcr1, \
+ .reg_swcr3 = _reg_swcr3, \
+ .hw.init = CCU_PLL_HWINIT(_name, _flags) \
+ } \
+}
+
+static inline struct ccu_pll *hw_to_ccu_pll(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_pll, common);
+}
+
+extern const struct clk_ops spacemit_ccu_pll_ops;
+
+#endif
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 5830a9d87bf2..8896fd052ef1 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -9,123 +9,123 @@ if SUNXI_CCU
config SUNIV_F1C100S_CCU
tristate "Support for the Allwinner newer F1C100s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUNIV || COMPILE_TEST
config SUN20I_D1_CCU
tristate "Support for the Allwinner D1/R528/T113 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || RISCV || COMPILE_TEST
config SUN20I_D1_R_CCU
tristate "Support for the Allwinner D1/R528/T113 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || RISCV || COMPILE_TEST
config SUN50I_A64_CCU
tristate "Support for the Allwinner A64 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_A100_CCU
tristate "Support for the Allwinner A100 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_A100_R_CCU
tristate "Support for the Allwinner A100 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H6_CCU
tristate "Support for the Allwinner H6 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H616_CCU
tristate "Support for the Allwinner H616 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H6_R_CCU
tristate "Support for the Allwinner H6 and H616 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN55I_A523_CCU
tristate "Support for the Allwinner A523/T527 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN55I_A523_R_CCU
tristate "Support for the Allwinner A523/T527 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN4I_A10_CCU
tristate "Support for the Allwinner A10/A20 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
config SUN5I_CCU
bool "Support for the Allwinner sun5i family CCM"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN5I || COMPILE_TEST
depends on SUNXI_CCU=y
config SUN6I_A31_CCU
tristate "Support for the Allwinner A31/A31s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN6I || COMPILE_TEST
config SUN6I_RTC_CCU
tristate "Support for the Allwinner H616/R329 RTC CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST
config SUN8I_A23_CCU
tristate "Support for the Allwinner A23 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A33_CCU
tristate "Support for the Allwinner A33 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A83T_CCU
tristate "Support for the Allwinner A83T CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_H3_CCU
tristate "Support for the Allwinner H3 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || COMPILE_TEST
config SUN8I_V3S_CCU
tristate "Support for the Allwinner V3s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_DE2_CCU
tristate "Support for the Allwinner SoCs DE2 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST
config SUN8I_R40_CCU
tristate "Support for the Allwinner R40 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN9I_A80_CCU
tristate "Support for the Allwinner A80 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN9I || COMPILE_TEST
config SUN8I_R_CCU
tristate "Support for Allwinner SoCs' PRCM CCUs"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || COMPILE_TEST
endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
index bb66c906ebbb..e83d4fd40240 100644
--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
@@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = {
{ .hw = &pll_periph0_2x_clk.common.hw },
{ .hw = &pll_audio1_div2_clk.common.hw },
};
-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830,
- 0, 4, /* M */
- 8, 2, /* P */
- 24, 3, /* mux */
- BIT(31), /* gate */
- 0);
-
-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834,
- 0, 4, /* M */
- 8, 2, /* P */
- 24, 3, /* mux */
- BIT(31), /* gate */
- 0);
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0",
+ mmc0_mmc1_parents, 0x830,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1",
+ mmc0_mmc1_parents, 0x834,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
static const struct clk_parent_data mmc2_parents[] = {
{ .fw_name = "hosc" },
@@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = {
{ .hw = &pll_periph0_800M_clk.common.hw },
{ .hw = &pll_audio1_div2_clk.common.hw },
};
-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838,
- 0, 4, /* M */
- 8, 2, /* P */
- 24, 3, /* mux */
- BIT(31), /* gate */
- 0);
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents,
+ 0x838,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws,
0x84c, BIT(0), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index daa462c7d477..955c614830fa 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -1094,6 +1094,7 @@ static const struct ccu_reset_map sun50i_h616_ccu_resets[] = {
[RST_BUS_TCON_LCD1] = { 0xb7c, BIT(17) },
[RST_BUS_TCON_TV0] = { 0xb9c, BIT(16) },
[RST_BUS_TCON_TV1] = { 0xb9c, BIT(17) },
+ [RST_BUS_LVDS] = { 0xbac, BIT(16) },
[RST_BUS_TVE_TOP] = { 0xbbc, BIT(16) },
[RST_BUS_TVE0] = { 0xbbc, BIT(17) },
[RST_BUS_HDCP] = { 0xc4c, BIT(16) },
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index f2aa71206bc2..a6cd0f988859 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -239,6 +240,16 @@ static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = {
.num_resets = ARRAY_SIZE(sun50i_h5_de2_resets),
};
+static const struct sunxi_ccu_desc sun50i_h616_de33_clk_desc = {
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
+
+ .hw_clks = &sun8i_h3_de2_hw_clks,
+
+ .resets = sun50i_h5_de2_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h5_de2_resets),
+};
+
static int sunxi_de2_clk_probe(struct platform_device *pdev)
{
struct clk *bus_clk, *mod_clk;
@@ -291,6 +302,16 @@ static int sunxi_de2_clk_probe(struct platform_device *pdev)
goto err_disable_mod_clk;
}
+ /*
+ * The DE33 requires these additional (unknown) registers set
+ * during initialisation.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun50i-h616-de33-clk")) {
+ writel(0, reg + 0x24);
+ writel(0x0000a980, reg + 0x28);
+ }
+
ret = devm_sunxi_ccu_probe(&pdev->dev, reg, ccu_desc);
if (ret)
goto err_assert_reset;
@@ -335,6 +356,10 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
.compatible = "allwinner,sun50i-h6-de3-clk",
.data = &sun50i_h5_de2_clk_desc,
},
+ {
+ .compatible = "allwinner,sun50i-h616-de33-clk",
+ .data = &sun50i_h616_de33_clk_desc,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sunxi_de2_clk_ids);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
index b35aeec70484..bb09c649bfa3 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.h
+++ b/drivers/clk/sunxi-ng/ccu_mp.h
@@ -52,6 +52,28 @@ struct ccu_mp {
} \
}
+#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \
+ _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _postdiv, _flags)\
+ struct ccu_mp _struct = { \
+ .enable = _gate, \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .fixed_post_div = _postdiv, \
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_FIXED_POSTDIV, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_mp_ops, \
+ _flags), \
+ } \
+ }
+
#define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
_mshift, _mwidth, \
_pshift, _pwidth, \
@@ -109,8 +131,7 @@ struct ccu_mp {
_mshift, _mwidth, \
_pshift, _pwidth, \
_muxshift, _muxwidth, \
- _gate, _features, \
- _flags) \
+ _gate, _flags, _features) \
struct ccu_mp _struct = { \
.enable = _gate, \
.m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
diff --git a/drivers/clk/sunxi/Kconfig b/drivers/clk/sunxi/Kconfig
index 1c4e543366dd..5e2f92bfe412 100644
--- a/drivers/clk/sunxi/Kconfig
+++ b/drivers/clk/sunxi/Kconfig
@@ -2,13 +2,13 @@
menuconfig CLK_SUNXI
bool "Legacy clock support for Allwinner SoCs"
depends on (ARM && ARCH_SUNXI) || COMPILE_TEST
- default y
+ default (ARM && ARCH_SUNXI)
if CLK_SUNXI
config CLK_SUNXI_CLOCKS
bool "Legacy clock drivers"
- default y
+ default ARCH_SUNXI
help
Legacy clock drivers being used on older (A10, A13, A20,
A23, A31, A80) SoCs. These drivers are kept around for
@@ -19,14 +19,14 @@ config CLK_SUNXI_CLOCKS
config CLK_SUNXI_PRCM_SUN6I
bool "Legacy A31 PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the A31 PRCM clocks. Those are
usually needed for the PMIC communication, mostly.
config CLK_SUNXI_PRCM_SUN8I
bool "Legacy sun8i PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the sun8i family PRCM clocks.
Those are usually needed for the PMIC communication,
@@ -34,7 +34,7 @@ config CLK_SUNXI_PRCM_SUN8I
config CLK_SUNXI_PRCM_SUN9I
bool "Legacy A80 PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the A80 PRCM clocks. Those are
usually needed for the PMIC communication, mostly.
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index 4c9555fc6184..ebfb1d59401d 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -847,6 +847,67 @@ static CCU_GATE(CLK_SRAM1, sram1_clk, "sram1", axi_aclk_pd, 0x20c, BIT(3), 0);
static CCU_GATE(CLK_SRAM2, sram2_clk, "sram2", axi_aclk_pd, 0x20c, BIT(2), 0);
static CCU_GATE(CLK_SRAM3, sram3_clk, "sram3", axi_aclk_pd, 0x20c, BIT(1), 0);
+static CCU_GATE(CLK_AXI4_VO_ACLK, axi4_vo_aclk, "axi4-vo-aclk",
+ video_pll_clk_pd, 0x0, BIT(0), 0);
+static CCU_GATE(CLK_GPU_CORE, gpu_core_clk, "gpu-core-clk", video_pll_clk_pd,
+ 0x0, BIT(3), 0);
+static CCU_GATE(CLK_GPU_CFG_ACLK, gpu_cfg_aclk, "gpu-cfg-aclk",
+ video_pll_clk_pd, 0x0, BIT(4), 0);
+static CCU_GATE(CLK_DPU_PIXELCLK0, dpu0_pixelclk, "dpu0-pixelclk",
+ video_pll_clk_pd, 0x0, BIT(5), 0);
+static CCU_GATE(CLK_DPU_PIXELCLK1, dpu1_pixelclk, "dpu1-pixelclk",
+ video_pll_clk_pd, 0x0, BIT(6), 0);
+static CCU_GATE(CLK_DPU_HCLK, dpu_hclk, "dpu-hclk", video_pll_clk_pd, 0x0,
+ BIT(7), 0);
+static CCU_GATE(CLK_DPU_ACLK, dpu_aclk, "dpu-aclk", video_pll_clk_pd, 0x0,
+ BIT(8), 0);
+static CCU_GATE(CLK_DPU_CCLK, dpu_cclk, "dpu-cclk", video_pll_clk_pd, 0x0,
+ BIT(9), 0);
+static CCU_GATE(CLK_HDMI_SFR, hdmi_sfr_clk, "hdmi-sfr-clk", video_pll_clk_pd,
+ 0x0, BIT(10), 0);
+static CCU_GATE(CLK_HDMI_PCLK, hdmi_pclk, "hdmi-pclk", video_pll_clk_pd, 0x0,
+ BIT(11), 0);
+static CCU_GATE(CLK_HDMI_CEC, hdmi_cec_clk, "hdmi-cec-clk", video_pll_clk_pd,
+ 0x0, BIT(12), 0);
+static CCU_GATE(CLK_MIPI_DSI0_PCLK, mipi_dsi0_pclk, "mipi-dsi0-pclk",
+ video_pll_clk_pd, 0x0, BIT(13), 0);
+static CCU_GATE(CLK_MIPI_DSI1_PCLK, mipi_dsi1_pclk, "mipi-dsi1-pclk",
+ video_pll_clk_pd, 0x0, BIT(14), 0);
+static CCU_GATE(CLK_MIPI_DSI0_CFG, mipi_dsi0_cfg_clk, "mipi-dsi0-cfg-clk",
+ video_pll_clk_pd, 0x0, BIT(15), 0);
+static CCU_GATE(CLK_MIPI_DSI1_CFG, mipi_dsi1_cfg_clk, "mipi-dsi1-cfg-clk",
+ video_pll_clk_pd, 0x0, BIT(16), 0);
+static CCU_GATE(CLK_MIPI_DSI0_REFCLK, mipi_dsi0_refclk, "mipi-dsi0-refclk",
+ video_pll_clk_pd, 0x0, BIT(17), 0);
+static CCU_GATE(CLK_MIPI_DSI1_REFCLK, mipi_dsi1_refclk, "mipi-dsi1-refclk",
+ video_pll_clk_pd, 0x0, BIT(18), 0);
+static CCU_GATE(CLK_HDMI_I2S, hdmi_i2s_clk, "hdmi-i2s-clk", video_pll_clk_pd,
+ 0x0, BIT(19), 0);
+static CCU_GATE(CLK_X2H_DPU1_ACLK, x2h_dpu1_aclk, "x2h-dpu1-aclk",
+ video_pll_clk_pd, 0x0, BIT(20), 0);
+static CCU_GATE(CLK_X2H_DPU_ACLK, x2h_dpu_aclk, "x2h-dpu-aclk",
+ video_pll_clk_pd, 0x0, BIT(21), 0);
+static CCU_GATE(CLK_AXI4_VO_PCLK, axi4_vo_pclk, "axi4-vo-pclk",
+ video_pll_clk_pd, 0x0, BIT(22), 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_DPU_PCLK, iopmp_vosys_dpu_pclk,
+ "iopmp-vosys-dpu-pclk", video_pll_clk_pd, 0x0, BIT(23), 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_DPU1_PCLK, iopmp_vosys_dpu1_pclk,
+ "iopmp-vosys-dpu1-pclk", video_pll_clk_pd, 0x0, BIT(24), 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_GPU_PCLK, iopmp_vosys_gpu_pclk,
+ "iopmp-vosys-gpu-pclk", video_pll_clk_pd, 0x0, BIT(25), 0);
+static CCU_GATE(CLK_IOPMP_DPU1_ACLK, iopmp_dpu1_aclk, "iopmp-dpu1-aclk",
+ video_pll_clk_pd, 0x0, BIT(27), 0);
+static CCU_GATE(CLK_IOPMP_DPU_ACLK, iopmp_dpu_aclk, "iopmp-dpu-aclk",
+ video_pll_clk_pd, 0x0, BIT(28), 0);
+static CCU_GATE(CLK_IOPMP_GPU_ACLK, iopmp_gpu_aclk, "iopmp-gpu-aclk",
+ video_pll_clk_pd, 0x0, BIT(29), 0);
+static CCU_GATE(CLK_MIPIDSI0_PIXCLK, mipi_dsi0_pixclk, "mipi-dsi0-pixclk",
+ video_pll_clk_pd, 0x0, BIT(30), 0);
+static CCU_GATE(CLK_MIPIDSI1_PIXCLK, mipi_dsi1_pixclk, "mipi-dsi1-pixclk",
+ video_pll_clk_pd, 0x0, BIT(31), 0);
+static CCU_GATE(CLK_HDMI_PIXCLK, hdmi_pixclk, "hdmi-pixclk", video_pll_clk_pd,
+ 0x4, BIT(0), 0);
+
static CLK_FIXED_FACTOR_HW(gmac_pll_clk_100m, "gmac-pll-clk-100m",
&gmac_pll_clk.common.hw, 10, 1, 0);
@@ -963,7 +1024,38 @@ static struct ccu_common *th1520_gate_clks[] = {
&sram3_clk.common,
};
-#define NR_CLKS (CLK_UART_SCLK + 1)
+static struct ccu_common *th1520_vo_gate_clks[] = {
+ &axi4_vo_aclk.common,
+ &gpu_core_clk.common,
+ &gpu_cfg_aclk.common,
+ &dpu0_pixelclk.common,
+ &dpu1_pixelclk.common,
+ &dpu_hclk.common,
+ &dpu_aclk.common,
+ &dpu_cclk.common,
+ &hdmi_sfr_clk.common,
+ &hdmi_pclk.common,
+ &hdmi_cec_clk.common,
+ &mipi_dsi0_pclk.common,
+ &mipi_dsi1_pclk.common,
+ &mipi_dsi0_cfg_clk.common,
+ &mipi_dsi1_cfg_clk.common,
+ &mipi_dsi0_refclk.common,
+ &mipi_dsi1_refclk.common,
+ &hdmi_i2s_clk.common,
+ &x2h_dpu1_aclk.common,
+ &x2h_dpu_aclk.common,
+ &axi4_vo_pclk.common,
+ &iopmp_vosys_dpu_pclk.common,
+ &iopmp_vosys_dpu1_pclk.common,
+ &iopmp_vosys_gpu_pclk.common,
+ &iopmp_dpu1_aclk.common,
+ &iopmp_dpu_aclk.common,
+ &iopmp_gpu_aclk.common,
+ &mipi_dsi0_pixclk.common,
+ &mipi_dsi1_pixclk.common,
+ &hdmi_pixclk.common
+};
static const struct regmap_config th1520_clk_regmap_config = {
.reg_bits = 32,
@@ -972,8 +1064,44 @@ static const struct regmap_config th1520_clk_regmap_config = {
.fast_io = true,
};
+struct th1520_plat_data {
+ struct ccu_common **th1520_pll_clks;
+ struct ccu_common **th1520_div_clks;
+ struct ccu_common **th1520_mux_clks;
+ struct ccu_common **th1520_gate_clks;
+
+ int nr_clks;
+ int nr_pll_clks;
+ int nr_div_clks;
+ int nr_mux_clks;
+ int nr_gate_clks;
+};
+
+static const struct th1520_plat_data th1520_ap_platdata = {
+ .th1520_pll_clks = th1520_pll_clks,
+ .th1520_div_clks = th1520_div_clks,
+ .th1520_mux_clks = th1520_mux_clks,
+ .th1520_gate_clks = th1520_gate_clks,
+
+ .nr_clks = CLK_UART_SCLK + 1,
+
+ .nr_pll_clks = ARRAY_SIZE(th1520_pll_clks),
+ .nr_div_clks = ARRAY_SIZE(th1520_div_clks),
+ .nr_mux_clks = ARRAY_SIZE(th1520_mux_clks),
+ .nr_gate_clks = ARRAY_SIZE(th1520_gate_clks),
+};
+
+static const struct th1520_plat_data th1520_vo_platdata = {
+ .th1520_gate_clks = th1520_vo_gate_clks,
+
+ .nr_clks = CLK_HDMI_PIXCLK + 1,
+
+ .nr_gate_clks = ARRAY_SIZE(th1520_vo_gate_clks),
+};
+
static int th1520_clk_probe(struct platform_device *pdev)
{
+ const struct th1520_plat_data *plat_data;
struct device *dev = &pdev->dev;
struct clk_hw_onecell_data *priv;
@@ -982,11 +1110,16 @@ static int th1520_clk_probe(struct platform_device *pdev)
struct clk_hw *hw;
int ret, i;
- priv = devm_kzalloc(dev, struct_size(priv, hws, NR_CLKS), GFP_KERNEL);
+ plat_data = device_get_match_data(&pdev->dev);
+ if (!plat_data)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "No device match data found\n");
+
+ priv = devm_kzalloc(dev, struct_size(priv, hws, plat_data->nr_clks), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->num = NR_CLKS;
+ priv->num = plat_data->nr_clks;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -996,35 +1129,35 @@ static int th1520_clk_probe(struct platform_device *pdev)
if (IS_ERR(map))
return PTR_ERR(map);
- for (i = 0; i < ARRAY_SIZE(th1520_pll_clks); i++) {
- struct ccu_pll *cp = hw_to_ccu_pll(&th1520_pll_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_pll_clks; i++) {
+ struct ccu_pll *cp = hw_to_ccu_pll(&plat_data->th1520_pll_clks[i]->hw);
- th1520_pll_clks[i]->map = map;
+ plat_data->th1520_pll_clks[i]->map = map;
- ret = devm_clk_hw_register(dev, &th1520_pll_clks[i]->hw);
+ ret = devm_clk_hw_register(dev, &plat_data->th1520_pll_clks[i]->hw);
if (ret)
return ret;
priv->hws[cp->common.clkid] = &cp->common.hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_div_clks); i++) {
- struct ccu_div *cd = hw_to_ccu_div(&th1520_div_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_div_clks; i++) {
+ struct ccu_div *cd = hw_to_ccu_div(&plat_data->th1520_div_clks[i]->hw);
- th1520_div_clks[i]->map = map;
+ plat_data->th1520_div_clks[i]->map = map;
- ret = devm_clk_hw_register(dev, &th1520_div_clks[i]->hw);
+ ret = devm_clk_hw_register(dev, &plat_data->th1520_div_clks[i]->hw);
if (ret)
return ret;
priv->hws[cd->common.clkid] = &cd->common.hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_mux_clks); i++) {
- struct ccu_mux *cm = hw_to_ccu_mux(&th1520_mux_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_mux_clks; i++) {
+ struct ccu_mux *cm = hw_to_ccu_mux(&plat_data->th1520_mux_clks[i]->hw);
const struct clk_init_data *init = cm->common.hw.init;
- th1520_mux_clks[i]->map = map;
+ plat_data->th1520_mux_clks[i]->map = map;
hw = devm_clk_hw_register_mux_parent_data_table(dev,
init->name,
init->parent_data,
@@ -1040,10 +1173,10 @@ static int th1520_clk_probe(struct platform_device *pdev)
priv->hws[cm->common.clkid] = hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_gate_clks); i++) {
- struct ccu_gate *cg = hw_to_ccu_gate(&th1520_gate_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_gate_clks; i++) {
+ struct ccu_gate *cg = hw_to_ccu_gate(&plat_data->th1520_gate_clks[i]->hw);
- th1520_gate_clks[i]->map = map;
+ plat_data->th1520_gate_clks[i]->map = map;
hw = devm_clk_hw_register_gate_parent_data(dev,
cg->common.hw.init->name,
@@ -1057,19 +1190,21 @@ static int th1520_clk_probe(struct platform_device *pdev)
priv->hws[cg->common.clkid] = hw;
}
- ret = devm_clk_hw_register(dev, &osc12m_clk.hw);
- if (ret)
- return ret;
- priv->hws[CLK_OSC12M] = &osc12m_clk.hw;
+ if (plat_data == &th1520_ap_platdata) {
+ ret = devm_clk_hw_register(dev, &osc12m_clk.hw);
+ if (ret)
+ return ret;
+ priv->hws[CLK_OSC12M] = &osc12m_clk.hw;
- ret = devm_clk_hw_register(dev, &gmac_pll_clk_100m.hw);
- if (ret)
- return ret;
- priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
+ ret = devm_clk_hw_register(dev, &gmac_pll_clk_100m.hw);
+ if (ret)
+ return ret;
+ priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
- ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
- if (ret)
- return ret;
+ ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
+ if (ret)
+ return ret;
+ }
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, priv);
if (ret)
@@ -1081,6 +1216,11 @@ static int th1520_clk_probe(struct platform_device *pdev)
static const struct of_device_id th1520_clk_match[] = {
{
.compatible = "thead,th1520-clk-ap",
+ .data = &th1520_ap_platdata,
+ },
+ {
+ .compatible = "thead,th1520-clk-vo",
+ .data = &th1520_vo_platdata,
},
{ /* sentinel */ },
};
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 487c85259967..645f517a1ac2 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -73,6 +73,14 @@ config DW_APB_TIMER_OF
select DW_APB_TIMER
select TIMER_OF
+config ECONET_EN751221_TIMER
+ bool "EcoNet EN751221 High Precision Timer" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ select TIMER_OF
+ help
+ Support for CPU timer found on EcoNet MIPS based SoCs.
+
config FTTMR010_TIMER
bool "Faraday Technology timer driver" if COMPILE_TEST
depends on HAS_IOMEM
@@ -437,8 +445,8 @@ config ATMEL_ST
config ATMEL_TCB_CLKSRC
bool "Atmel TC Block timer driver" if COMPILE_TEST
- depends on ARM && HAS_IOMEM
- select TIMER_OF if OF
+ depends on ARM && OF && HAS_IOMEM
+ select TIMER_OF
help
Support for Timer Counter Blocks on Atmel SoCs.
@@ -763,4 +771,12 @@ config RALINK_TIMER
Enables support for system tick counter present on
Ralink SoCs RT3352 and MT7620.
+config NXP_STM_TIMER
+ bool "NXP System Timer Module driver"
+ depends on ARCH_S32 || COMPILE_TEST
+ select CLKSRC_MMIO
+ help
+ Enables the support for NXP System Timer Module found in the
+ s32g NXP platform series.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 43ef16a4efa6..205bf3b0a8f3 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o
obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
+obj-$(CONFIG_ECONET_EN751221_TIMER) += timer-econet-en751221.o
obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_OMAP_DM_SYSTIMER) += timer-ti-dm-systimer.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
@@ -92,3 +93,4 @@ obj-$(CONFIG_GXP_TIMER) += timer-gxp.o
obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o
obj-$(CONFIG_EP93XX_TIMER) += timer-ep93xx.o
obj-$(CONFIG_RALINK_TIMER) += timer-ralink.o
+obj-$(CONFIG_NXP_STM_TIMER) += timer-nxp-stm.o
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 39f7c2d736d1..b603c25f3dfa 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -103,7 +103,7 @@ int __init clocksource_i8253_init(void)
#ifdef CONFIG_CLKEVT_I8253
void clockevent_i8253_disable(void)
{
- raw_spin_lock(&i8253_lock);
+ guard(raw_spinlock_irqsave)(&i8253_lock);
/*
* Writing the MODE register should stop the counter, according to
@@ -132,8 +132,6 @@ void clockevent_i8253_disable(void)
outb_p(0, PIT_CH0);
outb_p(0x30, PIT_MODE);
-
- raw_spin_unlock(&i8253_lock);
}
static int pit_shutdown(struct clock_event_device *evt)
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 3fcbd02b2483..2089aeaae225 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -225,7 +225,6 @@ err_free:
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
-#if defined(CONFIG_ARCH_RZG2L) || defined(CONFIG_ARCH_R9A09G057)
static int __init ostm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -233,7 +232,7 @@ static int __init ostm_probe(struct platform_device *pdev)
return ostm_init(dev->of_node);
}
-static const struct of_device_id ostm_of_table[] = {
+static const struct of_device_id __maybe_unused ostm_of_table[] = {
{ .compatible = "renesas,ostm", },
{ /* sentinel */ }
};
@@ -246,4 +245,3 @@ static struct platform_driver ostm_device_driver = {
},
};
builtin_platform_driver_probe(ostm_device_driver, ostm_probe);
-#endif
diff --git a/drivers/clocksource/timer-econet-en751221.c b/drivers/clocksource/timer-econet-en751221.c
new file mode 100644
index 000000000000..3b449fdaafee
--- /dev/null
+++ b/drivers/clocksource/timer-econet-en751221.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Timer present on EcoNet EN75xx MIPS based SoCs.
+ *
+ * Copyright (C) 2025 by Caleb James DeLisle <cjd@cjdns.fr>
+ */
+
+#include <linux/io.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/sched_clock.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/cpuhotplug.h>
+#include <linux/clk.h>
+
+#define ECONET_BITS 32
+#define ECONET_MIN_DELTA 0x00001000
+#define ECONET_MAX_DELTA GENMASK(ECONET_BITS - 2, 0)
+/* 34Kc hardware has 1 block and 1004Kc has 2. */
+#define ECONET_NUM_BLOCKS DIV_ROUND_UP(NR_CPUS, 2)
+
+static struct {
+ void __iomem *membase[ECONET_NUM_BLOCKS];
+ u32 freq_hz;
+} econet_timer __ro_after_init;
+
+static DEFINE_PER_CPU(struct clock_event_device, econet_timer_pcpu);
+
+/* Each memory block has 2 timers, the order of registers is:
+ * CTL, CMR0, CNT0, CMR1, CNT1
+ */
+static inline void __iomem *reg_ctl(u32 timer_n)
+{
+ return econet_timer.membase[timer_n >> 1];
+}
+
+static inline void __iomem *reg_compare(u32 timer_n)
+{
+ return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x04;
+}
+
+static inline void __iomem *reg_count(u32 timer_n)
+{
+ return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x08;
+}
+
+static inline u32 ctl_bit_enabled(u32 timer_n)
+{
+ return 1U << (timer_n & 1);
+}
+
+static inline u32 ctl_bit_pending(u32 timer_n)
+{
+ return 1U << ((timer_n & 1) + 16);
+}
+
+static bool cevt_is_pending(int cpu_id)
+{
+ return ioread32(reg_ctl(cpu_id)) & ctl_bit_pending(cpu_id);
+}
+
+static irqreturn_t cevt_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *dev = this_cpu_ptr(&econet_timer_pcpu);
+ int cpu = cpumask_first(dev->cpumask);
+
+ /* Each VPE has its own events,
+ * so this will only happen on spurious interrupt.
+ */
+ if (!cevt_is_pending(cpu))
+ return IRQ_NONE;
+
+ iowrite32(ioread32(reg_count(cpu)), reg_compare(cpu));
+ dev->event_handler(dev);
+ return IRQ_HANDLED;
+}
+
+static int cevt_set_next_event(ulong delta, struct clock_event_device *dev)
+{
+ u32 next;
+ int cpu;
+
+ cpu = cpumask_first(dev->cpumask);
+ next = ioread32(reg_count(cpu)) + delta;
+ iowrite32(next, reg_compare(cpu));
+
+ if ((s32)(next - ioread32(reg_count(cpu))) < ECONET_MIN_DELTA / 2)
+ return -ETIME;
+
+ return 0;
+}
+
+static int cevt_init_cpu(uint cpu)
+{
+ struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, cpu);
+ u32 reg;
+
+ pr_debug("%s: Setting up clockevent for CPU %d\n", cd->name, cpu);
+
+ reg = ioread32(reg_ctl(cpu)) | ctl_bit_enabled(cpu);
+ iowrite32(reg, reg_ctl(cpu));
+
+ enable_percpu_irq(cd->irq, IRQ_TYPE_NONE);
+
+ /* Do this last because it synchronously configures the timer */
+ clockevents_config_and_register(cd, econet_timer.freq_hz,
+ ECONET_MIN_DELTA, ECONET_MAX_DELTA);
+
+ return 0;
+}
+
+static u64 notrace sched_clock_read(void)
+{
+ /* Always read from clock zero no matter the CPU */
+ return (u64)ioread32(reg_count(0));
+}
+
+/* Init */
+
+static void __init cevt_dev_init(uint cpu)
+{
+ iowrite32(0, reg_count(cpu));
+ iowrite32(U32_MAX, reg_compare(cpu));
+}
+
+static int __init cevt_init(struct device_node *np)
+{
+ int i, irq, ret;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("%pOFn: irq_of_parse_and_map failed", np);
+ return -EINVAL;
+ }
+
+ ret = request_percpu_irq(irq, cevt_interrupt, np->name, &econet_timer_pcpu);
+
+ if (ret < 0) {
+ pr_err("%pOFn: IRQ %d setup failed (%d)\n", np, irq, ret);
+ goto err_unmap_irq;
+ }
+
+ for_each_possible_cpu(i) {
+ struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, i);
+
+ cd->rating = 310,
+ cd->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_C3STOP |
+ CLOCK_EVT_FEAT_PERCPU;
+ cd->set_next_event = cevt_set_next_event;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(i);
+ cd->name = np->name;
+
+ cevt_dev_init(i);
+ }
+
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "clockevents/econet/timer:starting",
+ cevt_init_cpu, NULL);
+ return 0;
+
+err_unmap_irq:
+ irq_dispose_mapping(irq);
+ return ret;
+}
+
+static int __init timer_init(struct device_node *np)
+{
+ int num_blocks = DIV_ROUND_UP(num_possible_cpus(), 2);
+ struct clk *clk;
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("%pOFn: Failed to get CPU clock from DT %ld\n", np, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+
+ econet_timer.freq_hz = clk_get_rate(clk);
+
+ for (int i = 0; i < num_blocks; i++) {
+ econet_timer.membase[i] = of_iomap(np, i);
+ if (!econet_timer.membase[i]) {
+ pr_err("%pOFn: failed to map register [%d]\n", np, i);
+ return -ENXIO;
+ }
+ }
+
+ /* For clocksource purposes always read clock zero, whatever the CPU */
+ ret = clocksource_mmio_init(reg_count(0), np->name,
+ econet_timer.freq_hz, 301, ECONET_BITS,
+ clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("%pOFn: clocksource_mmio_init failed: %d", np, ret);
+ return ret;
+ }
+
+ ret = cevt_init(np);
+ if (ret < 0)
+ return ret;
+
+ sched_clock_register(sched_clock_read, ECONET_BITS,
+ econet_timer.freq_hz);
+
+ pr_info("%pOFn: using %u.%03u MHz high precision timer\n", np,
+ econet_timer.freq_hz / 1000000,
+ (econet_timer.freq_hz / 1000) % 1000);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(econet_timer_hpt, "econet,en751221-timer", timer_init);
diff --git a/drivers/clocksource/timer-nxp-stm.c b/drivers/clocksource/timer-nxp-stm.c
new file mode 100644
index 000000000000..d7ccf9001729
--- /dev/null
+++ b/drivers/clocksource/timer-nxp-stm.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2018,2021-2025 NXP
+ *
+ * NXP System Timer Module:
+ *
+ * STM supports commonly required system and application software
+ * timing functions. STM includes a 32-bit count-up timer and four
+ * 32-bit compare channels with a separate interrupt source for each
+ * channel. The timer is driven by the STM module clock divided by an
+ * 8-bit prescale value (1 to 256). It has ability to stop the timer
+ * in Debug mode
+ */
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched_clock.h>
+#include <linux/units.h>
+
+#define STM_CR(__base) (__base)
+
+#define STM_CR_TEN BIT(0)
+#define STM_CR_FRZ BIT(1)
+#define STM_CR_CPS_OFFSET 8u
+#define STM_CR_CPS_MASK GENMASK(15, STM_CR_CPS_OFFSET)
+
+#define STM_CNT(__base) ((__base) + 0x04)
+
+#define STM_CCR0(__base) ((__base) + 0x10)
+#define STM_CCR1(__base) ((__base) + 0x20)
+#define STM_CCR2(__base) ((__base) + 0x30)
+#define STM_CCR3(__base) ((__base) + 0x40)
+
+#define STM_CCR_CEN BIT(0)
+
+#define STM_CIR0(__base) ((__base) + 0x14)
+#define STM_CIR1(__base) ((__base) + 0x24)
+#define STM_CIR2(__base) ((__base) + 0x34)
+#define STM_CIR3(__base) ((__base) + 0x44)
+
+#define STM_CIR_CIF BIT(0)
+
+#define STM_CMP0(__base) ((__base) + 0x18)
+#define STM_CMP1(__base) ((__base) + 0x28)
+#define STM_CMP2(__base) ((__base) + 0x38)
+#define STM_CMP3(__base) ((__base) + 0x48)
+
+#define STM_ENABLE_MASK (STM_CR_FRZ | STM_CR_TEN)
+
+struct stm_timer {
+ void __iomem *base;
+ unsigned long rate;
+ unsigned long delta;
+ unsigned long counter;
+ struct clock_event_device ced;
+ struct clocksource cs;
+ atomic_t refcnt;
+};
+
+static DEFINE_PER_CPU(struct stm_timer *, stm_timers);
+
+static struct stm_timer *stm_sched_clock;
+
+/*
+ * Global structure for multiple STMs initialization
+ */
+static int stm_instances;
+
+/*
+ * This global lock is used to prevent race conditions with the
+ * stm_instances in case the driver is using the ASYNC option
+ */
+static DEFINE_MUTEX(stm_instances_lock);
+
+DEFINE_GUARD(stm_instances, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
+
+static struct stm_timer *cs_to_stm(struct clocksource *cs)
+{
+ return container_of(cs, struct stm_timer, cs);
+}
+
+static struct stm_timer *ced_to_stm(struct clock_event_device *ced)
+{
+ return container_of(ced, struct stm_timer, ced);
+}
+
+static u64 notrace nxp_stm_read_sched_clock(void)
+{
+ return readl(STM_CNT(stm_sched_clock->base));
+}
+
+static u32 nxp_stm_clocksource_getcnt(struct stm_timer *stm_timer)
+{
+ return readl(STM_CNT(stm_timer->base));
+}
+
+static void nxp_stm_clocksource_setcnt(struct stm_timer *stm_timer, u32 cnt)
+{
+ writel(cnt, STM_CNT(stm_timer->base));
+}
+
+static u64 nxp_stm_clocksource_read(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ return (u64)nxp_stm_clocksource_getcnt(stm_timer);
+}
+
+static void nxp_stm_module_enable(struct stm_timer *stm_timer)
+{
+ u32 reg;
+
+ reg = readl(STM_CR(stm_timer->base));
+
+ reg |= STM_ENABLE_MASK;
+
+ writel(reg, STM_CR(stm_timer->base));
+}
+
+static void nxp_stm_module_disable(struct stm_timer *stm_timer)
+{
+ u32 reg;
+
+ reg = readl(STM_CR(stm_timer->base));
+
+ reg &= ~STM_ENABLE_MASK;
+
+ writel(reg, STM_CR(stm_timer->base));
+}
+
+static void nxp_stm_module_put(struct stm_timer *stm_timer)
+{
+ if (atomic_dec_and_test(&stm_timer->refcnt))
+ nxp_stm_module_disable(stm_timer);
+}
+
+static void nxp_stm_module_get(struct stm_timer *stm_timer)
+{
+ if (atomic_inc_return(&stm_timer->refcnt) == 1)
+ nxp_stm_module_enable(stm_timer);
+}
+
+static int nxp_stm_clocksource_enable(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_module_get(stm_timer);
+
+ return 0;
+}
+
+static void nxp_stm_clocksource_disable(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_module_put(stm_timer);
+}
+
+static void nxp_stm_clocksource_suspend(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_clocksource_disable(cs);
+ stm_timer->counter = nxp_stm_clocksource_getcnt(stm_timer);
+}
+
+static void nxp_stm_clocksource_resume(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_clocksource_setcnt(stm_timer, stm_timer->counter);
+ nxp_stm_clocksource_enable(cs);
+}
+
+static void __init devm_clocksource_unregister(void *data)
+{
+ struct stm_timer *stm_timer = data;
+
+ clocksource_unregister(&stm_timer->cs);
+}
+
+static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, struct clk *clk)
+{
+ int ret;
+
+ stm_timer->base = base;
+ stm_timer->rate = clk_get_rate(clk);
+
+ stm_timer->cs.name = name;
+ stm_timer->cs.rating = 460;
+ stm_timer->cs.read = nxp_stm_clocksource_read;
+ stm_timer->cs.enable = nxp_stm_clocksource_enable;
+ stm_timer->cs.disable = nxp_stm_clocksource_disable;
+ stm_timer->cs.suspend = nxp_stm_clocksource_suspend;
+ stm_timer->cs.resume = nxp_stm_clocksource_resume;
+ stm_timer->cs.mask = CLOCKSOURCE_MASK(32);
+ stm_timer->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ ret = clocksource_register_hz(&stm_timer->cs, stm_timer->rate);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, devm_clocksource_unregister, stm_timer);
+ if (ret) {
+ clocksource_unregister(&stm_timer->cs);
+ return ret;
+ }
+
+ stm_sched_clock = stm_timer;
+
+ sched_clock_register(nxp_stm_read_sched_clock, 32, stm_timer->rate);
+
+ dev_dbg(dev, "Registered clocksource %s\n", name);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_read_counter(struct stm_timer *stm_timer)
+{
+ return readl(STM_CNT(stm_timer->base));
+}
+
+static void nxp_stm_clockevent_disable(struct stm_timer *stm_timer)
+{
+ writel(0, STM_CCR0(stm_timer->base));
+}
+
+static void nxp_stm_clockevent_enable(struct stm_timer *stm_timer)
+{
+ writel(STM_CCR_CEN, STM_CCR0(stm_timer->base));
+}
+
+static int nxp_stm_clockevent_shutdown(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ nxp_stm_clockevent_disable(stm_timer);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_set_next_event(unsigned long delta, struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+ u32 val;
+
+ nxp_stm_clockevent_disable(stm_timer);
+
+ stm_timer->delta = delta;
+
+ val = nxp_stm_clockevent_read_counter(stm_timer) + delta;
+
+ writel(val, STM_CMP0(stm_timer->base));
+
+ /*
+ * The counter is shared across the channels and can not be
+ * stopped while we are setting the next event. If the delta
+ * is very small it is possible the counter increases above
+ * the computed 'val'. The min_delta value specified when
+ * registering the clockevent will prevent that. The second
+ * case is if the counter wraps while we compute the 'val' and
+ * before writing the comparator register. We read the counter,
+ * check if we are back in time and abort the timer with -ETIME.
+ */
+ if (val > nxp_stm_clockevent_read_counter(stm_timer) + delta)
+ return -ETIME;
+
+ nxp_stm_clockevent_enable(stm_timer);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_set_periodic(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ return nxp_stm_clockevent_set_next_event(stm_timer->rate, ced);
+}
+
+static void nxp_stm_clockevent_suspend(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ nxp_stm_module_put(stm_timer);
+}
+
+static void nxp_stm_clockevent_resume(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ nxp_stm_module_get(stm_timer);
+}
+
+static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, int irq,
+ struct clk *clk, int cpu)
+{
+ stm_timer->base = base;
+ stm_timer->rate = clk_get_rate(clk);
+
+ stm_timer->ced.name = name;
+ stm_timer->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ stm_timer->ced.set_state_shutdown = nxp_stm_clockevent_shutdown;
+ stm_timer->ced.set_state_periodic = nxp_stm_clockevent_set_periodic;
+ stm_timer->ced.set_next_event = nxp_stm_clockevent_set_next_event;
+ stm_timer->ced.suspend = nxp_stm_clockevent_suspend;
+ stm_timer->ced.resume = nxp_stm_clockevent_resume;
+ stm_timer->ced.cpumask = cpumask_of(cpu);
+ stm_timer->ced.rating = 460;
+ stm_timer->ced.irq = irq;
+
+ per_cpu(stm_timers, cpu) = stm_timer;
+
+ nxp_stm_module_get(stm_timer);
+
+ dev_dbg(dev, "Initialized per cpu clockevent name=%s, irq=%d, cpu=%d\n", name, irq, cpu);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_starting_cpu(unsigned int cpu)
+{
+ struct stm_timer *stm_timer = per_cpu(stm_timers, cpu);
+ int ret;
+
+ if (WARN_ON(!stm_timer))
+ return -EFAULT;
+
+ ret = irq_force_affinity(stm_timer->ced.irq, cpumask_of(cpu));
+ if (ret)
+ return ret;
+
+ /*
+ * The timings measurement show reading the counter register
+ * and writing to the comparator register takes as a maximum
+ * value 1100 ns at 133MHz rate frequency. The timer must be
+ * set above this value and to be secure we set the minimum
+ * value equal to 2000ns, so 2us.
+ *
+ * minimum ticks = (rate / MICRO) * 2
+ */
+ clockevents_config_and_register(&stm_timer->ced, stm_timer->rate,
+ (stm_timer->rate / MICRO) * 2, ULONG_MAX);
+
+ return 0;
+}
+
+static irqreturn_t nxp_stm_module_interrupt(int irq, void *dev_id)
+{
+ struct stm_timer *stm_timer = dev_id;
+ struct clock_event_device *ced = &stm_timer->ced;
+ u32 val;
+
+ /*
+ * The interrupt is shared across the channels in the
+ * module. But this one is configured to run only one channel,
+ * consequently it is pointless to test the interrupt flags
+ * before and we can directly reset the channel 0 irq flag
+ * register.
+ */
+ writel(STM_CIR_CIF, STM_CIR0(stm_timer->base));
+
+ /*
+ * Update STM_CMP value using the counter value
+ */
+ val = nxp_stm_clockevent_read_counter(stm_timer) + stm_timer->delta;
+
+ writel(val, STM_CMP0(stm_timer->base));
+
+ /*
+ * stm hardware doesn't support oneshot, it will generate an
+ * interrupt and start the counter again so software needs to
+ * disable the timer to stop the counter loop in ONESHOT mode.
+ */
+ if (likely(clockevent_state_oneshot(ced)))
+ nxp_stm_clockevent_disable(stm_timer);
+
+ ced->event_handler(ced);
+
+ return IRQ_HANDLED;
+}
+
+static int __init nxp_stm_timer_probe(struct platform_device *pdev)
+{
+ struct stm_timer *stm_timer;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const char *name = of_node_full_name(np);
+ struct clk *clk;
+ void __iomem *base;
+ int irq, ret;
+
+ /*
+ * The device tree can have multiple STM nodes described, so
+ * it makes this driver a good candidate for the async probe.
+ * It is still unclear if the time framework correctly handles
+ * parallel loading of the timers but at least this driver is
+ * ready to support the option.
+ */
+ guard(stm_instances)(&stm_instances_lock);
+
+ /*
+ * The S32Gx are SoCs featuring a diverse set of cores. Linux
+ * is expected to run on Cortex-A53 cores, while other
+ * software stacks will operate on Cortex-M cores. The number
+ * of STM instances has been sized to include at most one
+ * instance per core.
+ *
+ * As we need a clocksource and a clockevent per cpu, we
+ * simply initialize a clocksource per cpu along with the
+ * clockevent which makes the resulting code simpler.
+ *
+ * However if the device tree is describing more STM instances
+ * than the number of cores, then we ignore them.
+ */
+ if (stm_instances >= num_possible_cpus())
+ return 0;
+
+ base = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "Failed to iomap %pOFn\n", np);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get IRQ\n");
+
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Clock not found\n");
+
+ stm_timer = devm_kzalloc(dev, sizeof(*stm_timer), GFP_KERNEL);
+ if (!stm_timer)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, irq, nxp_stm_module_interrupt,
+ IRQF_TIMER | IRQF_NOBALANCING, name, stm_timer);
+ if (ret)
+ return dev_err_probe(dev, ret, "Unable to allocate interrupt line\n");
+
+ ret = nxp_stm_clocksource_init(dev, stm_timer, name, base, clk);
+ if (ret)
+ return ret;
+
+ /*
+ * Next probed STM will be a per CPU clockevent, until we
+ * probe as many as we have CPUs available on the system, we
+ * do a partial initialization
+ */
+ ret = nxp_stm_clockevent_per_cpu_init(dev, stm_timer, name,
+ base, irq, clk,
+ stm_instances);
+ if (ret)
+ return ret;
+
+ stm_instances++;
+
+ /*
+ * The number of probed STMs for per CPU clockevent is
+ * equal to the number of available CPUs on the
+ * system. We install the cpu hotplug to finish the
+ * initialization by registering the clockevents
+ */
+ if (stm_instances == num_possible_cpus()) {
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "STM timer:starting",
+ nxp_stm_clockevent_starting_cpu, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id nxp_stm_of_match[] = {
+ { .compatible = "nxp,s32g2-stm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nxp_stm_of_match);
+
+static struct platform_driver nxp_stm_probe = {
+ .probe = nxp_stm_timer_probe,
+ .driver = {
+ .name = "nxp-stm",
+ .of_match_table = nxp_stm_of_match,
+ },
+};
+module_platform_driver(nxp_stm_probe);
+
+MODULE_DESCRIPTION("NXP System Timer Module driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index 928da2f6de69..6e7944ffd7c0 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -5,6 +5,7 @@
* Pascal Paillet <p.paillet@st.com> for STMicroelectronics.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@@ -27,6 +28,7 @@ struct stm32_lp_private {
u32 psc;
struct device *dev;
struct clk *clk;
+ u32 version;
};
static struct stm32_lp_private*
@@ -47,12 +49,46 @@ static int stm32_clkevent_lp_shutdown(struct clock_event_device *clkevt)
return 0;
}
-static int stm32_clkevent_lp_set_timer(unsigned long evt,
- struct clock_event_device *clkevt,
- int is_periodic)
+static int stm32mp25_clkevent_lp_set_evt(struct stm32_lp_private *priv, unsigned long evt)
{
- struct stm32_lp_private *priv = to_priv(clkevt);
+ int ret;
+ u32 val;
+
+ regmap_read(priv->reg, STM32_LPTIM_CR, &val);
+ if (!FIELD_GET(STM32_LPTIM_ENABLE, val)) {
+ /* Enable LPTIMER to be able to write into IER and ARR registers */
+ regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE);
+ /*
+ * After setting the ENABLE bit, a delay of two counter clock cycles is needed
+ * before the LPTIM is actually enabled. For 32KHz rate, this makes approximately
+ * 62.5 micro-seconds, round it up.
+ */
+ udelay(63);
+ }
+ /* set next event counter */
+ regmap_write(priv->reg, STM32_LPTIM_ARR, evt);
+ /* enable ARR interrupt */
+ regmap_write(priv->reg, STM32_LPTIM_IER, STM32_LPTIM_ARRMIE);
+
+ /* Poll DIEROK and ARROK to ensure register access has completed */
+ ret = regmap_read_poll_timeout_atomic(priv->reg, STM32_LPTIM_ISR, val,
+ (val & STM32_LPTIM_DIEROK_ARROK) ==
+ STM32_LPTIM_DIEROK_ARROK,
+ 10, 500);
+ if (ret) {
+ dev_err(priv->dev, "access to LPTIM timed out\n");
+ /* Disable LPTIMER */
+ regmap_write(priv->reg, STM32_LPTIM_CR, 0);
+ return ret;
+ }
+ /* Clear DIEROK and ARROK flags */
+ regmap_write(priv->reg, STM32_LPTIM_ICR, STM32_LPTIM_DIEROKCF_ARROKCF);
+ return 0;
+}
+
+static void stm32_clkevent_lp_set_evt(struct stm32_lp_private *priv, unsigned long evt)
+{
/* disable LPTIMER to be able to write into IER register*/
regmap_write(priv->reg, STM32_LPTIM_CR, 0);
/* enable ARR interrupt */
@@ -61,6 +97,22 @@ static int stm32_clkevent_lp_set_timer(unsigned long evt,
regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE);
/* set next event counter */
regmap_write(priv->reg, STM32_LPTIM_ARR, evt);
+}
+
+static int stm32_clkevent_lp_set_timer(unsigned long evt,
+ struct clock_event_device *clkevt,
+ int is_periodic)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+ int ret;
+
+ if (priv->version == STM32_LPTIM_VERR_23) {
+ ret = stm32mp25_clkevent_lp_set_evt(priv, evt);
+ if (ret)
+ return ret;
+ } else {
+ stm32_clkevent_lp_set_evt(priv, evt);
+ }
/* start counter */
if (is_periodic)
@@ -176,6 +228,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
return -ENOMEM;
priv->reg = ddata->regmap;
+ priv->version = ddata->version;
priv->clk = ddata->clk;
ret = clk_prepare_enable(priv->clk);
if (ret)
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
index 5d4cf5237a11..e5394f98a02e 100644
--- a/drivers/clocksource/timer-tegra186.c
+++ b/drivers/clocksource/timer-tegra186.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2019-2020 NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2019-2025 NVIDIA Corporation. All rights reserved.
*/
+#include <linux/bitfield.h>
#include <linux/clocksource.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -29,6 +30,7 @@
#define TMRSR 0x004
#define TMRSR_INTR_CLR BIT(30)
+#define TMRSR_PCV GENMASK(28, 0)
#define TMRCSSR 0x008
#define TMRCSSR_SRC_USEC (0 << 0)
@@ -45,6 +47,9 @@
#define WDTCR_TIMER_SOURCE_MASK 0xf
#define WDTCR_TIMER_SOURCE(x) ((x) & 0xf)
+#define WDTSR 0x004
+#define WDTSR_CURRENT_EXPIRATION_COUNT GENMASK(14, 12)
+
#define WDTCMDR 0x008
#define WDTCMDR_DISABLE_COUNTER BIT(1)
#define WDTCMDR_START_COUNTER BIT(0)
@@ -169,18 +174,6 @@ static void tegra186_wdt_enable(struct tegra186_wdt *wdt)
value &= ~WDTCR_PERIOD_MASK;
value |= WDTCR_PERIOD(1);
- /* enable local interrupt for WDT petting */
- value |= WDTCR_LOCAL_INT_ENABLE;
-
- /* enable local FIQ and remote interrupt for debug dump */
- if (0)
- value |= WDTCR_REMOTE_INT_ENABLE |
- WDTCR_LOCAL_FIQ_ENABLE;
-
- /* enable system debug reset (doesn't properly reboot) */
- if (0)
- value |= WDTCR_SYSTEM_DEBUG_RESET_ENABLE;
-
/* enable system POR reset */
value |= WDTCR_SYSTEM_POR_RESET_ENABLE;
@@ -234,12 +227,69 @@ static int tegra186_wdt_set_timeout(struct watchdog_device *wdd,
return 0;
}
+static unsigned int tegra186_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
+ u32 expiration, val;
+ u64 timeleft;
+
+ if (!watchdog_active(&wdt->base)) {
+ /* return zero if the watchdog timer is not activated. */
+ return 0;
+ }
+
+ /*
+ * Reset occurs on the fifth expiration of the
+ * watchdog timer and so when the watchdog timer is configured,
+ * the actual value programmed into the counter is 1/5 of the
+ * timeout value. Once the counter reaches 0, expiration count
+ * will be increased by 1 and the down counter restarts.
+ * Hence to get the time left before system reset we must
+ * combine 2 parts:
+ * 1. value of the current down counter
+ * 2. (number of counter expirations remaining) * (timeout/5)
+ */
+
+ /* Get the current number of counter expirations. Should be a
+ * value between 0 and 4
+ */
+ val = readl_relaxed(wdt->regs + WDTSR);
+ expiration = FIELD_GET(WDTSR_CURRENT_EXPIRATION_COUNT, val);
+ if (WARN_ON_ONCE(expiration > 4))
+ return 0;
+
+ /* Get the current counter value in microsecond. */
+ val = readl_relaxed(wdt->tmr->regs + TMRSR);
+ timeleft = FIELD_GET(TMRSR_PCV, val);
+
+ /*
+ * Calculate the time remaining by adding the time for the
+ * counter value to the time of the counter expirations that
+ * remain.
+ */
+ timeleft += (((u64)wdt->base.timeout * USEC_PER_SEC) / 5) * (4 - expiration);
+
+ /*
+ * Convert the current counter value to seconds,
+ * rounding up to the nearest second. Cast u64 to
+ * u32 under the assumption that no overflow happens
+ * when coverting to seconds.
+ */
+ timeleft = DIV_ROUND_CLOSEST_ULL(timeleft, USEC_PER_SEC);
+
+ if (WARN_ON_ONCE(timeleft > U32_MAX))
+ return U32_MAX;
+
+ return lower_32_bits(timeleft);
+}
+
static const struct watchdog_ops tegra186_wdt_ops = {
.owner = THIS_MODULE,
.start = tegra186_wdt_start,
.stop = tegra186_wdt_stop,
.ping = tegra186_wdt_ping,
.set_timeout = tegra186_wdt_set_timeout,
+ .get_timeleft = tegra186_wdt_get_timeleft,
};
static struct tegra186_wdt *tegra186_wdt_create(struct tegra186_timer *tegra,
@@ -365,23 +415,10 @@ static int tegra186_timer_usec_init(struct tegra186_timer *tegra)
return clocksource_register_hz(&tegra->usec, USEC_PER_SEC);
}
-static irqreturn_t tegra186_timer_irq(int irq, void *data)
-{
- struct tegra186_timer *tegra = data;
-
- if (watchdog_active(&tegra->wdt->base)) {
- tegra186_wdt_disable(tegra->wdt);
- tegra186_wdt_enable(tegra->wdt);
- }
-
- return IRQ_HANDLED;
-}
-
static int tegra186_timer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra186_timer *tegra;
- unsigned int irq;
int err;
tegra = devm_kzalloc(dev, sizeof(*tegra), GFP_KERNEL);
@@ -400,8 +437,6 @@ static int tegra186_timer_probe(struct platform_device *pdev)
if (err < 0)
return err;
- irq = err;
-
/* create a watchdog using a preconfigured timer */
tegra->wdt = tegra186_wdt_create(tegra, 0);
if (IS_ERR(tegra->wdt)) {
@@ -428,17 +463,8 @@ static int tegra186_timer_probe(struct platform_device *pdev)
goto unregister_osc;
}
- err = devm_request_irq(dev, irq, tegra186_timer_irq, 0,
- "tegra186-timer", tegra);
- if (err < 0) {
- dev_err(dev, "failed to request IRQ#%u: %d\n", irq, err);
- goto unregister_usec;
- }
-
return 0;
-unregister_usec:
- clocksource_unregister(&tegra->usec);
unregister_osc:
clocksource_unregister(&tegra->osc);
unregister_tsc:
diff --git a/drivers/comedi/drivers/jr3_pci.c b/drivers/comedi/drivers/jr3_pci.c
index cdc842b32bab..75dce1ff2419 100644
--- a/drivers/comedi/drivers/jr3_pci.c
+++ b/drivers/comedi/drivers/jr3_pci.c
@@ -758,7 +758,7 @@ static void jr3_pci_detach(struct comedi_device *dev)
struct jr3_pci_dev_private *devpriv = dev->private;
if (devpriv)
- timer_delete_sync(&devpriv->timer);
+ timer_shutdown_sync(&devpriv->timer);
comedi_pci_detach(dev);
}
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index d64b07ec48e5..78702a08364f 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -217,6 +217,18 @@ config CPUFREQ_DT
If in doubt, say N.
+config CPUFREQ_DT_RUST
+ tristate "Rust based Generic DT based cpufreq driver"
+ depends on HAVE_CLK && OF && RUST
+ select CPUFREQ_DT_PLATDEV
+ select PM_OPP
+ help
+ This adds a Rust based generic DT based cpufreq driver for frequency
+ management. It supports both uniprocessor (UP) and symmetric
+ multiprocessor (SMP) systems.
+
+ If in doubt, say N.
+
config CPUFREQ_VIRT
tristate "Virtual cpufreq driver"
depends on GENERIC_ARCH_TOPOLOGY
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 4f9cb943d945..0d46402e3094 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -76,7 +76,7 @@ config ARM_VEXPRESS_SPC_CPUFREQ
config ARM_BRCMSTB_AVS_CPUFREQ
tristate "Broadcom STB AVS CPUfreq driver"
depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST
- default y
+ default y if ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ
help
Some Broadcom STB SoCs use a co-processor running proprietary firmware
("AVS") to handle voltage and frequency scaling. This driver provides
@@ -88,7 +88,7 @@ config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK || COMPILE_TEST
depends on CPUFREQ_DT && REGULATOR && PL320_MBOX
- default m
+ default m if ARCH_HIGHBANK
help
This adds the CPUFreq driver for Calxeda Highbank SoC
based boards.
@@ -133,7 +133,7 @@ config ARM_MEDIATEK_CPUFREQ
config ARM_MEDIATEK_CPUFREQ_HW
tristate "MediaTek CPUFreq HW driver"
depends on ARCH_MEDIATEK || COMPILE_TEST
- default m
+ default m if ARCH_MEDIATEK
help
Support for the CPUFreq HW driver.
Some MediaTek chipsets have a HW engine to offload the steps
@@ -181,7 +181,7 @@ config ARM_RASPBERRYPI_CPUFREQ
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
depends on CPU_S3C6410 || COMPILE_TEST
- default y
+ default CPU_S3C6410
help
This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -190,7 +190,7 @@ config ARM_S3C64XX_CPUFREQ
config ARM_S5PV210_CPUFREQ
bool "Samsung S5PV210 and S5PC110"
depends on CPU_S5PV210 || COMPILE_TEST
- default y
+ default CPU_S5PV210
help
This adds the CPUFreq driver for Samsung S5PV210 and
S5PC110 SoCs.
@@ -214,7 +214,7 @@ config ARM_SCMI_CPUFREQ
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR || COMPILE_TEST
- default y
+ default PLAT_SPEAR
help
This adds the CPUFreq driver support for SPEAr SOCs.
@@ -233,7 +233,7 @@ config ARM_TEGRA20_CPUFREQ
tristate "Tegra20/30 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
- default y
+ default ARCH_TEGRA
help
This adds the CPUFreq driver support for Tegra20/30 SOCs.
@@ -241,7 +241,7 @@ config ARM_TEGRA124_CPUFREQ
bool "Tegra124 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
- default y
+ default ARCH_TEGRA
help
This adds the CPUFreq driver support for Tegra124 SOCs.
@@ -256,14 +256,14 @@ config ARM_TEGRA194_CPUFREQ
tristate "Tegra194 CPUFreq support"
depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST)
depends on TEGRA_BPMP
- default y
+ default ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC
help
This adds CPU frequency driver support for Tegra194 SOCs.
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
- default y
+ default ARCH_OMAP2PLUS || ARCH_K3
help
This driver enables valid OPPs on the running platform based on
values contained within the SoC in use. Enable this in order to
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 22ab45209f9b..d38526b8e063 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
+obj-$(CONFIG_CPUFREQ_DT_RUST) += rcpufreq_dt.o
obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 924314cdeebc..4f7f9201598d 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -79,11 +79,11 @@ static bool boost_state(unsigned int cpu)
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
- rdmsrl_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
+ rdmsrq_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
- rdmsrl_on_cpu(cpu, MSR_K7_HWCR, &msr);
+ rdmsrq_on_cpu(cpu, MSR_K7_HWCR, &msr);
return !(msr & MSR_K7_HWCR_CPB_DIS);
}
return false;
@@ -110,14 +110,14 @@ static int boost_set_msr(bool enable)
return -EINVAL;
}
- rdmsrl(msr_addr, val);
+ rdmsrq(msr_addr, val);
if (enable)
val &= ~msr_mask;
else
val |= msr_mask;
- wrmsrl(msr_addr, val);
+ wrmsrq(msr_addr, val);
return 0;
}
@@ -660,7 +660,7 @@ static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
nominal_perf = perf_caps.nominal_perf;
if (nominal_freq)
- *nominal_freq = perf_caps.nominal_freq;
+ *nominal_freq = perf_caps.nominal_freq * 1000;
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
@@ -909,8 +909,19 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
- if (acpi_cpufreq_driver.set_boost)
- policy->boost_supported = true;
+ if (acpi_cpufreq_driver.set_boost) {
+ if (policy->boost_supported) {
+ /*
+ * The firmware may have altered boost state while the
+ * CPU was offline (for example during a suspend-resume
+ * cycle).
+ */
+ if (policy->boost_enabled != boost_state(cpu))
+ set_boost(policy, policy->boost_enabled);
+ } else {
+ policy->boost_supported = true;
+ }
+ }
return result;
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index e671bc7d1550..447b9aa5ce40 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -31,6 +31,8 @@
#include <acpi/cppc_acpi.h>
+#include <asm/msr.h>
+
#include "amd-pstate.h"
@@ -90,9 +92,9 @@ static int amd_pstate_ut_check_enabled(u32 index)
if (get_shared_mem())
return 0;
- ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
+ ret = rdmsrq_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
if (ret) {
- pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
+ pr_err("%s rdmsrq_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
return ret;
}
@@ -137,7 +139,7 @@ static int amd_pstate_ut_check_perf(u32 index)
lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
lowest_perf = cppc_perf.lowest_perf;
} else {
- ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+ ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
if (ret) {
pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
return ret;
@@ -242,25 +244,30 @@ static int amd_pstate_set_mode(enum amd_pstate_mode mode)
static int amd_pstate_ut_check_driver(u32 index)
{
enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE;
+ enum amd_pstate_mode orig_mode = amd_pstate_get_status();
+ int ret;
for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) {
- int ret = amd_pstate_set_mode(mode1);
+ ret = amd_pstate_set_mode(mode1);
if (ret)
return ret;
for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) {
if (mode1 == mode2)
continue;
ret = amd_pstate_set_mode(mode2);
- if (ret) {
- pr_err("%s: failed to update status for %s->%s\n", __func__,
- amd_pstate_get_mode_string(mode1),
- amd_pstate_get_mode_string(mode2));
- return ret;
- }
+ if (ret)
+ goto out;
}
}
- return 0;
+out:
+ if (ret)
+ pr_warn("%s: failed to update status for %s->%s: %d\n", __func__,
+ amd_pstate_get_mode_string(mode1),
+ amd_pstate_get_mode_string(mode2), ret);
+
+ amd_pstate_set_mode(orig_mode);
+ return ret;
}
static int __init amd_pstate_ut_init(void)
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 6789eed1bb5b..f3477ab37742 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -197,7 +197,7 @@ static u8 msr_get_epp(struct amd_cpudata *cpudata)
u64 value;
int ret;
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
if (ret < 0) {
pr_debug("Could not retrieve energy perf value (%d)\n", ret);
return ret;
@@ -258,10 +258,10 @@ static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
return 0;
if (fast_switch) {
- wrmsrl(MSR_AMD_CPPC_REQ, value);
+ wrmsrq(MSR_AMD_CPPC_REQ, value);
return 0;
} else {
- int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ int ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
if (ret)
return ret;
@@ -309,7 +309,7 @@ static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
if (value == prev)
return 0;
- ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
if (ret) {
pr_err("failed to set energy perf value (%d)\n", ret);
return ret;
@@ -371,7 +371,7 @@ static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
static inline int msr_cppc_enable(struct cpufreq_policy *policy)
{
- return wrmsrl_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
+ return wrmsrq_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
}
static int shmem_cppc_enable(struct cpufreq_policy *policy)
@@ -389,9 +389,10 @@ static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy)
static int msr_init_perf(struct amd_cpudata *cpudata)
{
union perf_cached perf = READ_ONCE(cpudata->perf);
- u64 cap1, numerator;
+ u64 cap1, numerator, cppc_req;
+ u8 min_perf;
- int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
+ int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
if (ret)
return ret;
@@ -400,6 +401,22 @@ static int msr_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->cppc_req_cached, cppc_req);
+ min_perf = FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cppc_req);
+
+ /*
+ * Clear out the min_perf part to check if the rest of the MSR is 0, if yes, this is an
+ * indication that the min_perf value is the one specified through the BIOS option
+ */
+ cppc_req &= ~(AMD_CPPC_MIN_PERF_MASK);
+
+ if (!cppc_req)
+ perf.bios_min_perf = min_perf;
+
perf.highest_perf = numerator;
perf.max_limit_perf = numerator;
perf.min_limit_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
@@ -417,6 +434,7 @@ static int shmem_init_perf(struct amd_cpudata *cpudata)
struct cppc_perf_caps cppc_perf;
union perf_cached perf = READ_ONCE(cpudata->perf);
u64 numerator;
+ bool auto_sel;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
@@ -438,7 +456,7 @@ static int shmem_init_perf(struct amd_cpudata *cpudata)
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
- ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf);
+ ret = cppc_get_auto_sel(cpudata->cpu, &auto_sel);
if (ret) {
pr_warn("failed to get auto_sel, ret: %d\n", ret);
return 0;
@@ -518,8 +536,8 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
unsigned long flags;
local_irq_save(flags);
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
+ rdmsrq(MSR_IA32_APERF, aperf);
+ rdmsrq(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
@@ -554,6 +572,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
if (!policy)
return;
+ /* limit the max perf when core performance boost feature is disabled */
+ if (!cpudata->boost_supported)
+ max_perf = min_t(u8, perf.nominal_perf, max_perf);
+
des_perf = clamp_t(u8, des_perf, min_perf, max_perf);
policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf);
@@ -563,10 +585,6 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
des_perf = 0;
}
- /* limit the max perf when core performance boost feature is disabled */
- if (!cpudata->boost_supported)
- max_perf = min_t(u8, perf.nominal_perf, max_perf);
-
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
@@ -580,20 +598,26 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
{
/*
* Initialize lower frequency limit (i.e.policy->min) with
- * lowest_nonlinear_frequency which is the most energy efficient
- * frequency. Override the initial value set by cpufreq core and
- * amd-pstate qos_requests.
+ * lowest_nonlinear_frequency or the min frequency (if) specified in BIOS,
+ * Override the initial value set by cpufreq core and amd-pstate qos_requests.
*/
if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) {
struct cpufreq_policy *policy __free(put_cpufreq_policy) =
cpufreq_cpu_get(policy_data->cpu);
struct amd_cpudata *cpudata;
+ union perf_cached perf;
if (!policy)
return -EINVAL;
cpudata = policy->driver_data;
- policy_data->min = cpudata->lowest_nonlinear_freq;
+ perf = READ_ONCE(cpudata->perf);
+
+ if (perf.bios_min_perf)
+ policy_data->min = perf_to_freq(perf, cpudata->nominal_freq,
+ perf.bios_min_perf);
+ else
+ policy_data->min = cpudata->lowest_nonlinear_freq;
}
cpufreq_verify_within_cpu_limits(policy_data);
@@ -607,13 +631,16 @@ static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
union perf_cached perf = READ_ONCE(cpudata->perf);
perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max);
- perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
+ WRITE_ONCE(cpudata->max_limit_freq, policy->max);
- if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf);
+ WRITE_ONCE(cpudata->min_limit_freq, min(cpudata->nominal_freq, cpudata->max_limit_freq));
+ } else {
+ perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
+ WRITE_ONCE(cpudata->min_limit_freq, policy->min);
+ }
- WRITE_ONCE(cpudata->max_limit_freq, policy->max);
- WRITE_ONCE(cpudata->min_limit_freq, policy->min);
WRITE_ONCE(cpudata->perf, perf);
}
@@ -769,7 +796,7 @@ static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
goto exit_err;
}
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
+ ret = rdmsrq_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
if (ret) {
pr_err_once("failed to read initial CPU boost state!\n");
ret = -EIO;
@@ -788,18 +815,8 @@ exit_err:
static void amd_perf_ctl_reset(unsigned int cpu)
{
- wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
-}
-
-/*
- * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
- * due to locking, so queue the work for later.
- */
-static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
-{
- sched_set_itmt_support();
+ wrmsrq_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
}
-static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
#define CPPC_MAX_PERF U8_MAX
@@ -811,29 +828,20 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
cpudata->hw_prefcore = true;
- /*
- * The priorities can be set regardless of whether or not
- * sched_set_itmt_support(true) has been called and it is valid to
- * update them at any time after it has been called.
- */
+ /* Priorities must be initialized before ITMT support can be toggled on. */
sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
-
- schedule_work(&sched_prefcore_work);
}
-static void amd_pstate_update_limits(unsigned int cpu)
+static void amd_pstate_update_limits(struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
bool highest_perf_changed = false;
+ unsigned int cpu = policy->cpu;
if (!amd_pstate_prefcore)
return;
- if (!policy)
- return;
-
if (amd_get_highest_perf(cpu, &cur_high))
return;
@@ -844,8 +852,10 @@ static void amd_pstate_update_limits(unsigned int cpu)
if (highest_perf_changed) {
WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
- if (cur_high < CPPC_MAX_PERF)
+ if (cur_high < CPPC_MAX_PERF) {
sched_set_itmt_core_prio((int)cur_high, cpu);
+ sched_update_asym_prefer_cpu(cpu, prev_high, cur_high);
+ }
}
}
@@ -1037,6 +1047,10 @@ free_cpudata1:
static void amd_pstate_cpu_exit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /* Reset CPPC_REQ MSR to the BIOS value */
+ amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
freq_qos_remove_request(&cpudata->req[1]);
freq_qos_remove_request(&cpudata->req[0]);
@@ -1193,6 +1207,9 @@ static ssize_t show_energy_performance_preference(
static void amd_pstate_driver_cleanup(void)
{
+ if (amd_pstate_prefcore)
+ sched_clear_itmt_support();
+
cppc_state = AMD_PSTATE_DISABLE;
current_pstate_driver = NULL;
}
@@ -1235,6 +1252,10 @@ static int amd_pstate_register_driver(int mode)
return ret;
}
+ /* Enable ITMT support once all CPUs have initialized their asym priorities. */
+ if (amd_pstate_prefcore)
+ sched_set_itmt_support();
+
return 0;
}
@@ -1311,6 +1332,12 @@ static ssize_t amd_pstate_show_status(char *buf)
return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
}
+int amd_pstate_get_status(void)
+{
+ return cppc_state;
+}
+EXPORT_SYMBOL_GPL(amd_pstate_get_status);
+
int amd_pstate_update_status(const char *buf, size_t size)
{
int mode_idx;
@@ -1425,7 +1452,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata;
union perf_cached perf;
struct device *dev;
- u64 value;
int ret;
/*
@@ -1490,12 +1516,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE;
}
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
- if (ret)
- return ret;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
- }
ret = amd_pstate_set_epp(policy, cpudata->epp_default);
if (ret)
return ret;
@@ -1515,6 +1535,11 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /* Reset CPPC_REQ MSR to the BIOS value */
+ amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+
kfree(cpudata);
policy->driver_data = NULL;
}
@@ -1565,21 +1590,38 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
return 0;
}
-static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+static int amd_pstate_cpu_online(struct cpufreq_policy *policy)
{
- pr_debug("AMD CPU Core %d going online\n", policy->cpu);
-
return amd_pstate_cppc_enable(policy);
}
-static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
+static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
{
- return 0;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /*
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
+ * min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
+ * limits, epp and desired perf will get reset to the cached values in cpudata struct
+ */
+ return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
}
-static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
+static int amd_pstate_suspend(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ int ret;
+
+ /*
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
+ * min_perf value across kexec reboots. If this CPU is just resumed back without kexec,
+ * the limits, epp and desired perf will get reset to the cached values in cpudata struct
+ */
+ ret = amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+ if (ret)
+ return ret;
/* invalidate to ensure it's rewritten during resume */
cpudata->cppc_req_cached = 0;
@@ -1590,6 +1632,17 @@ static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
return 0;
}
+static int amd_pstate_resume(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ int cur_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->cur);
+
+ /* Set CPPC_REQ to last sane value until the governor updates it */
+ return amd_pstate_update_perf(policy, perf.min_limit_perf, cur_perf, perf.max_limit_perf,
+ 0U, false);
+}
+
static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
@@ -1615,6 +1668,10 @@ static struct cpufreq_driver amd_pstate_driver = {
.fast_switch = amd_pstate_fast_switch,
.init = amd_pstate_cpu_init,
.exit = amd_pstate_cpu_exit,
+ .online = amd_pstate_cpu_online,
+ .offline = amd_pstate_cpu_offline,
+ .suspend = amd_pstate_suspend,
+ .resume = amd_pstate_resume,
.set_boost = amd_pstate_set_boost,
.update_limits = amd_pstate_update_limits,
.name = "amd-pstate",
@@ -1627,9 +1684,9 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.setpolicy = amd_pstate_epp_set_policy,
.init = amd_pstate_epp_cpu_init,
.exit = amd_pstate_epp_cpu_exit,
- .offline = amd_pstate_epp_cpu_offline,
- .online = amd_pstate_epp_cpu_online,
- .suspend = amd_pstate_epp_suspend,
+ .offline = amd_pstate_cpu_offline,
+ .online = amd_pstate_cpu_online,
+ .suspend = amd_pstate_suspend,
.resume = amd_pstate_epp_resume,
.update_limits = amd_pstate_update_limits,
.set_boost = amd_pstate_set_boost,
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index fbe1c08d3f06..cb45fdca27a6 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -30,6 +30,7 @@
* @lowest_perf: the absolute lowest performance level of the processor
* @min_limit_perf: Cached value of the performance corresponding to policy->min
* @max_limit_perf: Cached value of the performance corresponding to policy->max
+ * @bios_min_perf: Cached perf value corresponding to the "Requested CPU Min Frequency" BIOS option
*/
union perf_cached {
struct {
@@ -39,6 +40,7 @@ union perf_cached {
u8 lowest_perf;
u8 min_limit_perf;
u8 max_limit_perf;
+ u8 bios_min_perf;
};
u64 val;
};
@@ -119,6 +121,7 @@ enum amd_pstate_mode {
AMD_PSTATE_MAX,
};
const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode);
+int amd_pstate_get_status(void);
int amd_pstate_update_status(const char *buf, size_t size);
#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 59b19b9975e8..13fed4b9e02b 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -129,7 +129,7 @@ static int __init amd_freq_sensitivity_init(void)
pci_dev_put(pcidev);
}
- if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
+ if (rdmsrq_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
return -ENODEV;
if (!(val >> CLASS_CODE_SHIFT))
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index 4994c86feb57..b1d29b7af232 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -134,11 +134,17 @@ static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct apple_cpu_priv *priv = policy->driver_data;
+ struct cpufreq_policy *policy;
+ struct apple_cpu_priv *priv;
struct cpufreq_frequency_table *p;
unsigned int pstate;
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+
if (priv->info->cur_pstate_mask) {
u32 reg = readl_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index b3d74f9adcf0..b7c688a5659c 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -747,7 +747,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
int ret;
if (!policy)
- return -ENODEV;
+ return 0;
cpu_data = policy->driver_data;
@@ -808,10 +808,119 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
}
+
+static ssize_t show_auto_select(struct cpufreq_policy *policy, char *buf)
+{
+ bool val;
+ int ret;
+
+ ret = cppc_get_auto_sel(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", val);
+}
+
+static ssize_t store_auto_select(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ bool val;
+ int ret;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_auto_sel(policy->cpu, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_auto_act_window(struct cpufreq_policy *policy, char *buf)
+{
+ u64 val;
+ int ret;
+
+ ret = cppc_get_auto_act_window(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", val);
+}
+
+static ssize_t store_auto_act_window(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ u64 usec;
+ int ret;
+
+ ret = kstrtou64(buf, 0, &usec);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_auto_act_window(policy->cpu, usec);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_energy_performance_preference_val(struct cpufreq_policy *policy, char *buf)
+{
+ u64 val;
+ int ret;
+
+ ret = cppc_get_epp_perf(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", val);
+}
+
+static ssize_t store_energy_performance_preference_val(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ u64 val;
+ int ret;
+
+ ret = kstrtou64(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_epp(policy->cpu, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
cpufreq_freq_attr_ro(freqdomain_cpus);
+cpufreq_freq_attr_rw(auto_select);
+cpufreq_freq_attr_rw(auto_act_window);
+cpufreq_freq_attr_rw(energy_performance_preference_val);
static struct freq_attr *cppc_cpufreq_attr[] = {
&freqdomain_cpus,
+ &auto_select,
+ &auto_act_window,
+ &energy_performance_preference_val,
NULL,
};
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 2aa00769cf09..a010da0f6337 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -175,6 +175,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,sm8350", },
{ .compatible = "qcom,sm8450", },
{ .compatible = "qcom,sm8550", },
+ { .compatible = "qcom,sm8650", },
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 3841c9da6cac..d7426e1d8bdd 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -255,51 +255,6 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
-/**
- * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
- * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
- */
-void cpufreq_cpu_release(struct cpufreq_policy *policy)
-{
- if (WARN_ON(!policy))
- return;
-
- lockdep_assert_held(&policy->rwsem);
-
- up_write(&policy->rwsem);
-
- cpufreq_cpu_put(policy);
-}
-
-/**
- * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
- * @cpu: CPU to find the policy for.
- *
- * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
- * if the policy returned by it is not NULL, acquire its rwsem for writing.
- * Return the policy if it is active or release it and return NULL otherwise.
- *
- * The policy returned by this function has to be released with the help of
- * cpufreq_cpu_release() in order to release its rwsem and balance its usage
- * counter properly.
- */
-struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
-
- if (!policy)
- return NULL;
-
- down_write(&policy->rwsem);
-
- if (policy_is_inactive(policy)) {
- cpufreq_cpu_release(policy);
- return NULL;
- }
-
- return policy;
-}
-
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -536,16 +491,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ unsigned int relation)
{
unsigned int idx;
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (!policy->freq_table)
return target_freq;
- idx = cpufreq_frequency_table_target(policy, target_freq, relation);
+ idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
policy->cached_resolved_idx = idx;
policy->cached_target_freq = target_freq;
return policy->freq_table[idx].frequency;
@@ -565,7 +522,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
+ unsigned int min = READ_ONCE(policy->min);
+ unsigned int max = READ_ONCE(policy->max);
+
+ /*
+ * If this function runs in parallel with cpufreq_set_policy(), it may
+ * read policy->min before the update and policy->max after the update
+ * or the other way around, so there is no ordering guarantee.
+ *
+ * Resolve this by always honoring the max (in case it comes from
+ * thermal throttling or similar).
+ */
+ if (unlikely(min > max))
+ min = max;
+
+ return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -620,6 +591,22 @@ static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
return sysfs_emit(buf, "%d\n", policy->boost_enabled);
}
+static int policy_set_boost(struct cpufreq_policy *policy, bool enable)
+{
+ int ret;
+
+ if (policy->boost_enabled == enable)
+ return 0;
+
+ policy->boost_enabled = enable;
+
+ ret = cpufreq_driver->set_boost(policy, enable);
+ if (ret)
+ policy->boost_enabled = !policy->boost_enabled;
+
+ return ret;
+}
+
static ssize_t store_local_boost(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
@@ -635,21 +622,11 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (!policy->boost_supported)
return -EINVAL;
- if (policy->boost_enabled == enable)
+ ret = policy_set_boost(policy, enable);
+ if (!ret)
return count;
- policy->boost_enabled = enable;
-
- cpus_read_lock();
- ret = cpufreq_driver->set_boost(policy, enable);
- cpus_read_unlock();
-
- if (ret) {
- policy->boost_enabled = !policy->boost_enabled;
- return ret;
- }
-
- return count;
+ return ret;
}
static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
@@ -829,7 +806,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
- char str_governor[16];
+ char str_governor[CPUFREQ_NAME_LEN];
int ret;
ret = sscanf(buf, "%15s", str_governor);
@@ -940,9 +917,9 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
if (!policy->governor || !policy->governor->store_setspeed)
return -EINVAL;
- ret = sscanf(buf, "%u", &freq);
- if (ret != 1)
- return -EINVAL;
+ ret = kstrtouint(buf, 0, &freq);
+ if (ret)
+ return ret;
policy->governor->store_setspeed(policy, freq);
@@ -1009,17 +986,16 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EBUSY;
if (!fattr->show)
return -EIO;
- down_read(&policy->rwsem);
+ guard(cpufreq_policy_read)(policy);
+
if (likely(!policy_is_inactive(policy)))
- ret = fattr->show(policy, buf);
- up_read(&policy->rwsem);
+ return fattr->show(policy, buf);
- return ret;
+ return -EBUSY;
}
static ssize_t store(struct kobject *kobj, struct attribute *attr,
@@ -1027,17 +1003,16 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EBUSY;
if (!fattr->store)
return -EIO;
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
+
if (likely(!policy_is_inactive(policy)))
- ret = fattr->store(policy, buf, count);
- up_write(&policy->rwsem);
+ return fattr->store(policy, buf, count);
- return ret;
+ return -EBUSY;
}
static void cpufreq_sysfs_release(struct kobject *kobj)
@@ -1195,7 +1170,8 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (cpumask_test_cpu(cpu, policy->cpus))
return 0;
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
+
if (has_target())
cpufreq_stop_governor(policy);
@@ -1206,7 +1182,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
- up_write(&policy->rwsem);
+
return ret;
}
@@ -1226,9 +1202,10 @@ static void handle_update(struct work_struct *work)
container_of(work, struct cpufreq_policy, update);
pr_debug("handle_update for cpu %u called\n", policy->cpu);
- down_write(&policy->rwsem);
+
+ guard(cpufreq_policy_write)(policy);
+
refresh_frequency_limits(policy);
- up_write(&policy->rwsem);
}
static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
@@ -1254,11 +1231,11 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
struct kobject *kobj;
struct completion *cmp;
- down_write(&policy->rwsem);
- cpufreq_stats_free_table(policy);
- kobj = &policy->kobj;
- cmp = &policy->kobj_unregister;
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ cpufreq_stats_free_table(policy);
+ kobj = &policy->kobj;
+ cmp = &policy->kobj_unregister;
+ }
kobject_put(kobj);
/*
@@ -1334,7 +1311,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
init_waitqueue_head(&policy->transition_wait);
INIT_WORK(&policy->update, handle_update);
- policy->cpu = cpu;
return policy;
err_min_qos_notifier:
@@ -1403,35 +1379,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
kfree(policy);
}
-static int cpufreq_online(unsigned int cpu)
+static int cpufreq_policy_online(struct cpufreq_policy *policy,
+ unsigned int cpu, bool new_policy)
{
- struct cpufreq_policy *policy;
- bool new_policy;
unsigned long flags;
unsigned int j;
int ret;
- pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
-
- /* Check if this CPU already has a policy to manage it */
- policy = per_cpu(cpufreq_cpu_data, cpu);
- if (policy) {
- WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
- if (!policy_is_inactive(policy))
- return cpufreq_add_policy_cpu(policy, cpu);
+ guard(cpufreq_policy_write)(policy);
- /* This is the only online CPU for the policy. Start over. */
- new_policy = false;
- down_write(&policy->rwsem);
- policy->cpu = cpu;
- policy->governor = NULL;
- } else {
- new_policy = true;
- policy = cpufreq_policy_alloc(cpu);
- if (!policy)
- return -ENOMEM;
- down_write(&policy->rwsem);
- }
+ policy->cpu = cpu;
+ policy->governor = NULL;
if (!new_policy && cpufreq_driver->online) {
/* Recover policy->cpus using related_cpus */
@@ -1454,7 +1412,7 @@ static int cpufreq_online(unsigned int cpu)
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
- goto out_free_policy;
+ goto out_clear_policy;
}
/*
@@ -1605,7 +1563,55 @@ static int cpufreq_online(unsigned int cpu)
goto out_destroy_policy;
}
- up_write(&policy->rwsem);
+ return 0;
+
+out_destroy_policy:
+ for_each_cpu(j, policy->real_cpus)
+ remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
+
+out_offline_policy:
+ if (cpufreq_driver->offline)
+ cpufreq_driver->offline(policy);
+
+out_exit_policy:
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+
+out_clear_policy:
+ cpumask_clear(policy->cpus);
+
+ return ret;
+}
+
+static int cpufreq_online(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ bool new_policy;
+ int ret;
+
+ pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
+
+ /* Check if this CPU already has a policy to manage it */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (policy) {
+ WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
+ if (!policy_is_inactive(policy))
+ return cpufreq_add_policy_cpu(policy, cpu);
+
+ /* This is the only online CPU for the policy. Start over. */
+ new_policy = false;
+ } else {
+ new_policy = true;
+ policy = cpufreq_policy_alloc(cpu);
+ if (!policy)
+ return -ENOMEM;
+ }
+
+ ret = cpufreq_policy_online(policy, cpu, new_policy);
+ if (ret) {
+ cpufreq_policy_free(policy);
+ return ret;
+ }
kobject_uevent(&policy->kobj, KOBJ_ADD);
@@ -1617,41 +1623,24 @@ static int cpufreq_online(unsigned int cpu)
if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
- /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+ /*
+ * Let the per-policy boost flag mirror the cpufreq_driver boost during
+ * initialization for a new policy. For an existing policy, maintain the
+ * previous boost value unless global boost is disabled.
+ */
if (cpufreq_driver->set_boost && policy->boost_supported &&
- policy->boost_enabled != cpufreq_boost_enabled()) {
- policy->boost_enabled = cpufreq_boost_enabled();
- ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
+ (new_policy || !cpufreq_boost_enabled())) {
+ ret = policy_set_boost(policy, cpufreq_boost_enabled());
if (ret) {
/* If the set_boost fails, the online operation is not affected */
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
- str_enable_disable(policy->boost_enabled));
- policy->boost_enabled = !policy->boost_enabled;
+ str_enable_disable(cpufreq_boost_enabled()));
}
}
pr_debug("initialization complete\n");
return 0;
-
-out_destroy_policy:
- for_each_cpu(j, policy->real_cpus)
- remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
-
-out_offline_policy:
- if (cpufreq_driver->offline)
- cpufreq_driver->offline(policy);
-
-out_exit_policy:
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
-
-out_free_policy:
- cpumask_clear(policy->cpus);
- up_write(&policy->rwsem);
-
- cpufreq_policy_free(policy);
- return ret;
}
/**
@@ -1741,11 +1730,10 @@ static int cpufreq_offline(unsigned int cpu)
return 0;
}
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
__cpufreq_offline(cpu, policy);
- up_write(&policy->rwsem);
return 0;
}
@@ -1762,33 +1750,29 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (!policy)
return;
- down_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ if (cpu_online(cpu))
+ __cpufreq_offline(cpu, policy);
- if (cpu_online(cpu))
- __cpufreq_offline(cpu, policy);
+ remove_cpu_dev_symlink(policy, cpu, dev);
- remove_cpu_dev_symlink(policy, cpu, dev);
+ if (!cpumask_empty(policy->real_cpus))
+ return;
- if (!cpumask_empty(policy->real_cpus)) {
- up_write(&policy->rwsem);
- return;
- }
+ /*
+ * Unregister cpufreq cooling once all the CPUs of the policy
+ * are removed.
+ */
+ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
+ cpufreq_cooling_unregister(policy->cdev);
+ policy->cdev = NULL;
+ }
- /*
- * Unregister cpufreq cooling once all the CPUs of the policy are
- * removed.
- */
- if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
- cpufreq_cooling_unregister(policy->cdev);
- policy->cdev = NULL;
+ /* We did light-weight exit earlier, do full tear down now */
+ if (cpufreq_driver->offline && cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
}
- /* We did light-weight exit earlier, do full tear down now */
- if (cpufreq_driver->offline && cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
-
- up_write(&policy->rwsem);
-
cpufreq_policy_free(policy);
}
@@ -1858,27 +1842,26 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy;
- unsigned int ret_freq = 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
- ret_freq = cpufreq_driver->get(cpu);
+ unsigned int ret_freq = cpufreq_driver->get(cpu);
+
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
return ret_freq;
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
policy = cpufreq_cpu_get(cpu);
- if (policy) {
- ret_freq = policy->cur;
- cpufreq_cpu_put(policy);
- }
+ if (policy)
+ return policy->cur;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get);
@@ -1890,15 +1873,13 @@ EXPORT_SYMBOL(cpufreq_quick_get);
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
- if (policy) {
- ret_freq = policy->max;
- cpufreq_cpu_put(policy);
- }
+ policy = cpufreq_cpu_get(cpu);
+ if (policy)
+ return policy->max;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get_max);
@@ -1910,15 +1891,13 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
*/
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
- if (policy) {
- ret_freq = policy->cpuinfo.max_freq;
- cpufreq_cpu_put(policy);
- }
+ policy = cpufreq_cpu_get(cpu);
+ if (policy)
+ return policy->cpuinfo.max_freq;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
@@ -1938,19 +1917,18 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
- if (policy) {
- down_read(&policy->rwsem);
- if (cpufreq_driver->get)
- ret_freq = __cpufreq_get(policy);
- up_read(&policy->rwsem);
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return 0;
- cpufreq_cpu_put(policy);
- }
+ guard(cpufreq_policy_read)(policy);
+
+ if (cpufreq_driver->get)
+ return __cpufreq_get(policy);
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_get);
@@ -2009,9 +1987,9 @@ void cpufreq_suspend(void)
for_each_active_policy(policy) {
if (has_target()) {
- down_write(&policy->rwsem);
- cpufreq_stop_governor(policy);
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ cpufreq_stop_governor(policy);
+ }
}
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
@@ -2052,9 +2030,9 @@ void cpufreq_resume(void)
pr_err("%s: Failed to resume driver: %s\n", __func__,
cpufreq_driver->name);
} else if (has_target()) {
- down_write(&policy->rwsem);
- ret = cpufreq_start_governor(policy);
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ ret = cpufreq_start_governor(policy);
+ }
if (ret)
pr_err("%s: Failed to start governor for CPU%u's policy\n",
@@ -2384,7 +2362,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
- target_freq = __resolve_freq(policy, target_freq, relation);
+ target_freq = __resolve_freq(policy, target_freq, policy->min,
+ policy->max, relation);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
@@ -2421,15 +2400,9 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
- int ret;
-
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
- ret = __cpufreq_driver_target(policy, target_freq, relation);
-
- up_write(&policy->rwsem);
-
- return ret;
+ return __cpufreq_driver_target(policy, target_freq, relation);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
@@ -2601,31 +2574,6 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
* POLICY INTERFACE *
*********************************************************************/
-/**
- * cpufreq_get_policy - get the current cpufreq_policy
- * @policy: struct cpufreq_policy into which the current cpufreq_policy
- * is written
- * @cpu: CPU to find the policy for
- *
- * Reads the current cpufreq policy.
- */
-int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
-{
- struct cpufreq_policy *cpu_policy;
- if (!policy)
- return -EINVAL;
-
- cpu_policy = cpufreq_cpu_get(cpu);
- if (!cpu_policy)
- return -EINVAL;
-
- memcpy(policy, cpu_policy, sizeof(*policy));
-
- cpufreq_cpu_put(cpu_policy);
- return 0;
-}
-EXPORT_SYMBOL(cpufreq_get_policy);
-
DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
/**
@@ -2708,11 +2656,18 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* Resolve policy min/max to available frequencies. It ensures
* no frequency resolution will neither overshoot the requested maximum
* nor undershoot the requested minimum.
+ *
+ * Avoid storing intermediate values in policy->max or policy->min and
+ * compiler optimizations around them because they may be accessed
+ * concurrently by cpufreq_driver_resolve_freq() during the update.
*/
- policy->min = new_data.min;
- policy->max = new_data.max;
- policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
- policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
+ WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
+ new_data.min, new_data.max,
+ CPUFREQ_RELATION_H));
+ new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
+ new_data.max, CPUFREQ_RELATION_L);
+ WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
+
trace_cpu_frequency_limits(policy);
cpufreq_update_pressure(policy);
@@ -2769,6 +2724,21 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
return ret;
}
+static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
+{
+ guard(cpufreq_policy_write)(policy);
+
+ /*
+ * BIOS might change freq behind our back
+ * -> ask driver for current freq and notify governors about a change
+ */
+ if (cpufreq_driver->get && has_target() &&
+ (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
+ return;
+
+ refresh_frequency_limits(policy);
+}
+
/**
* cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
* @cpu: CPU to re-evaluate the policy for.
@@ -2780,23 +2750,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
*/
void cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
+ policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
- /*
- * BIOS might change freq behind our back
- * -> ask driver for current freq and notify governors about a change
- */
- if (cpufreq_driver->get && has_target() &&
- (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
- goto unlock;
-
- refresh_frequency_limits(policy);
-
-unlock:
- cpufreq_cpu_release(policy);
+ cpufreq_policy_refresh(policy);
}
EXPORT_SYMBOL(cpufreq_update_policy);
@@ -2805,7 +2765,7 @@ EXPORT_SYMBOL(cpufreq_update_policy);
* @cpu: CPU to update the policy limits for.
*
* Invoke the driver's ->update_limits callback if present or call
- * cpufreq_update_policy() for @cpu.
+ * cpufreq_policy_refresh() for @cpu.
*/
void cpufreq_update_limits(unsigned int cpu)
{
@@ -2816,9 +2776,9 @@ void cpufreq_update_limits(unsigned int cpu)
return;
if (cpufreq_driver->update_limits)
- cpufreq_driver->update_limits(cpu);
+ cpufreq_driver->update_limits(policy);
else
- cpufreq_update_policy(cpu);
+ cpufreq_policy_refresh(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
@@ -2852,8 +2812,10 @@ static int cpufreq_boost_trigger_state(int state)
unsigned long flags;
int ret = 0;
- if (cpufreq_driver->boost_enabled == state)
- return 0;
+ /*
+ * Don't compare 'cpufreq_driver->boost_enabled' with 'state' here to
+ * make sure all policies are in sync with global boost flag.
+ */
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver->boost_enabled = state;
@@ -2864,12 +2826,9 @@ static int cpufreq_boost_trigger_state(int state)
if (!policy->boost_supported)
continue;
- policy->boost_enabled = state;
- ret = cpufreq_driver->set_boost(policy, state);
- if (ret) {
- policy->boost_enabled = !policy->boost_enabled;
+ ret = policy_set_boost(policy, state);
+ if (ret)
goto err_reset_state;
- }
}
cpus_read_unlock();
@@ -3094,6 +3053,36 @@ static int __init cpufreq_core_init(void)
return 0;
}
+
+static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
+{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy) {
+ pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
+ return false;
+ }
+
+ return sugov_is_governor(policy);
+}
+
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask)
+{
+ unsigned int cpu;
+
+ /* Do not attempt EAS if schedutil is not being used. */
+ for_each_cpu(cpu, cpu_mask) {
+ if (!cpufreq_policy_is_good_for_eas(cpu)) {
+ pr_debug("rd %*pbl: schedutil is mandatory for EAS\n",
+ cpumask_pr_args(cpu_mask));
+ return false;
+ }
+ }
+
+ return true;
+}
+
module_param(off, int, 0444);
module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index a7c38b8b3e78..0e65d37c9231 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -76,7 +76,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
return freq_next;
}
- index = cpufreq_frequency_table_target(policy, freq_next, relation);
+ index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
+ policy->max, relation);
freq_req = freq_table[index].frequency;
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index d23a97ba6478..320a0af2266a 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -225,12 +225,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
/* Enable Enhanced PowerSaver */
- rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ rdmsrq(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
- wrmsrl(MSR_IA32_MISC_ENABLE, val);
+ wrmsrq(MSR_IA32_MISC_ENABLE, val);
/* Can be locked at 0 */
- rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ rdmsrq(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
pr_info("Can't enable Enhanced PowerSaver\n");
return -ENODEV;
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 36494b855e41..fc5a58088b35 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -21,7 +21,6 @@
#include <linux/cpufreq.h>
#include <asm/cpu_device_id.h>
-#include <asm/msr.h>
#include <linux/timex.h>
#include <linux/io.h>
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index c03a91502f84..35de513af6c9 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -115,8 +115,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int target_freq, unsigned int min,
+ unsigned int max, unsigned int relation)
{
struct cpufreq_frequency_table optimal = {
.driver_data = ~0,
@@ -147,7 +147,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
- if ((freq < policy->min) || (freq > policy->max))
+ if (freq < min || freq > max)
continue;
if (freq == target_freq) {
optimal.driver_data = i;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4aad79d26c64..64587d318267 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -221,6 +221,7 @@ struct global_params {
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
+ * @pd_registered: Set when a perf domain is registered for this CPU.
* @hwp_notify_work: workqueue for HWP notifications.
*
* This structure stores per CPU instance data for all CPUs.
@@ -260,6 +261,9 @@ struct cpudata {
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
+#ifdef CONFIG_ENERGY_MODEL
+ bool pd_registered;
+#endif
struct delayed_work hwp_notify_work;
};
@@ -303,6 +307,7 @@ static bool hwp_is_hybrid;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
+#define INTEL_PSTATE_CORE_SCALING 100000
#define HYBRID_SCALING_FACTOR_ADL 78741
#define HYBRID_SCALING_FACTOR_MTL 80000
#define HYBRID_SCALING_FACTOR_LNL 86957
@@ -311,7 +316,7 @@ static int hybrid_scaling_factor;
static inline int core_get_scaling(void)
{
- return 100000;
+ return INTEL_PSTATE_CORE_SCALING;
}
#ifdef CONFIG_ACPI
@@ -598,7 +603,10 @@ static bool turbo_is_disabled(void)
{
u64 misc_en;
- rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+ if (!cpu_feature_enabled(X86_FEATURE_IDA))
+ return true;
+
+ rdmsrq(MSR_IA32_MISC_ENABLE, misc_en);
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
}
@@ -620,7 +628,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
- ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ ret = rdmsrq_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret)
return (s16)ret;
@@ -637,7 +645,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
* MSR_HWP_REQUEST, so need to read and get EPP.
*/
if (!hwp_req_data) {
- epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
+ epp = rdmsrq_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
&hwp_req_data);
if (epp)
return epp;
@@ -659,12 +667,12 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
- ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret)
return ret;
epb = (epb & ~0x0f) | pref;
- wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
+ wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
return 0;
}
@@ -762,7 +770,7 @@ static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
* function, so it cannot run in parallel with the update below.
*/
WRITE_ONCE(cpu->hwp_req_cached, value);
- ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ ret = wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
if (!ret)
cpu->epp_cached = epp;
@@ -916,7 +924,7 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
if (ratio <= 0) {
u64 cap;
- rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
+ rdmsrq_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
ratio = HWP_GUARANTEED_PERF(cap);
}
@@ -945,12 +953,124 @@ static struct cpudata *hybrid_max_perf_cpu __read_mostly;
*/
static DEFINE_MUTEX(hybrid_capacity_lock);
+#ifdef CONFIG_ENERGY_MODEL
+#define HYBRID_EM_STATE_COUNT 4
+
+static int hybrid_active_power(struct device *dev, unsigned long *power,
+ unsigned long *freq)
+{
+ /*
+ * Create "utilization bins" of 0-40%, 40%-60%, 60%-80%, and 80%-100%
+ * of the maximum capacity such that two CPUs of the same type will be
+ * regarded as equally attractive if the utilization of each of them
+ * falls into the same bin, which should prevent tasks from being
+ * migrated between them too often.
+ *
+ * For this purpose, return the "frequency" of 2 for the first
+ * performance level and otherwise leave the value set by the caller.
+ */
+ if (!*freq)
+ *freq = 2;
+
+ /* No power information. */
+ *power = EM_MAX_POWER;
+
+ return 0;
+}
+
+static int hybrid_get_cost(struct device *dev, unsigned long freq,
+ unsigned long *cost)
+{
+ struct pstate_data *pstate = &all_cpu_data[dev->id]->pstate;
+ struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(dev->id);
+
+ /*
+ * The smaller the perf-to-frequency scaling factor, the larger the IPC
+ * ratio between the given CPU and the least capable CPU in the system.
+ * Regard that IPC ratio as the primary cost component and assume that
+ * the scaling factors for different CPU types will differ by at least
+ * 5% and they will not be above INTEL_PSTATE_CORE_SCALING.
+ *
+ * Add the freq value to the cost, so that the cost of running on CPUs
+ * of the same type in different "utilization bins" is different.
+ */
+ *cost = div_u64(100ULL * INTEL_PSTATE_CORE_SCALING, pstate->scaling) + freq;
+ /*
+ * Increase the cost slightly for CPUs able to access L3 to avoid
+ * touching it in case some other CPUs of the same type can do the work
+ * without it.
+ */
+ if (cacheinfo) {
+ unsigned int i;
+
+ /* Check if L3 cache is there. */
+ for (i = 0; i < cacheinfo->num_leaves; i++) {
+ if (cacheinfo->info_list[i].level == 3) {
+ *cost += 2;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static bool hybrid_register_perf_domain(unsigned int cpu)
+{
+ static const struct em_data_callback cb
+ = EM_ADV_DATA_CB(hybrid_active_power, hybrid_get_cost);
+ struct cpudata *cpudata = all_cpu_data[cpu];
+ struct device *cpu_dev;
+
+ /*
+ * Registering EM perf domains without enabling asymmetric CPU capacity
+ * support is not really useful and one domain should not be registered
+ * more than once.
+ */
+ if (!hybrid_max_perf_cpu || cpudata->pd_registered)
+ return false;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return false;
+
+ if (em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
+ cpumask_of(cpu), false))
+ return false;
+
+ cpudata->pd_registered = true;
+
+ return true;
+}
+
+static void hybrid_register_all_perf_domains(void)
+{
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu)
+ hybrid_register_perf_domain(cpu);
+}
+
+static void hybrid_update_perf_domain(struct cpudata *cpu)
+{
+ if (cpu->pd_registered)
+ em_adjust_cpu_capacity(cpu->cpu);
+}
+#else /* !CONFIG_ENERGY_MODEL */
+static inline bool hybrid_register_perf_domain(unsigned int cpu) { return false; }
+static inline void hybrid_register_all_perf_domains(void) {}
+static inline void hybrid_update_perf_domain(struct cpudata *cpu) {}
+#endif /* CONFIG_ENERGY_MODEL */
+
static void hybrid_set_cpu_capacity(struct cpudata *cpu)
{
arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf,
hybrid_max_perf_cpu->capacity_perf,
cpu->capacity_perf,
cpu->pstate.max_pstate_physical);
+ hybrid_update_perf_domain(cpu);
+
+ topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu));
pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu,
cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf,
@@ -1039,6 +1159,11 @@ static void hybrid_refresh_cpu_capacity_scaling(void)
guard(mutex)(&hybrid_capacity_lock);
__hybrid_refresh_cpu_capacity_scaling();
+ /*
+ * Perf domains are not registered before setting hybrid_max_perf_cpu,
+ * so register them all after setting up CPU capacity scaling.
+ */
+ hybrid_register_all_perf_domains();
}
static void hybrid_init_cpu_capacity_scaling(bool refresh)
@@ -1066,7 +1191,7 @@ static void hybrid_init_cpu_capacity_scaling(bool refresh)
hybrid_refresh_cpu_capacity_scaling();
/*
* Disabling ITMT causes sched domains to be rebuilt to disable asym
- * packing and enable asym capacity.
+ * packing and enable asym capacity and EAS.
*/
sched_clear_itmt_support();
}
@@ -1088,7 +1213,7 @@ static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
{
u64 cap;
- rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
+ rdmsrq_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
WRITE_ONCE(cpu->hwp_cap_cached, cap);
cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
@@ -1144,6 +1269,14 @@ static void hybrid_update_capacity(struct cpudata *cpu)
}
hybrid_set_cpu_capacity(cpu);
+ /*
+ * If the CPU was offline to start with and it is going online for the
+ * first time, a perf domain needs to be registered for it if hybrid
+ * capacity scaling has been enabled already. In that case, sched
+ * domains need to be rebuilt to take the new perf domain into account.
+ */
+ if (hybrid_register_perf_domain(cpu->cpu))
+ em_rebuild_sched_domains();
unlock:
mutex_unlock(&hybrid_capacity_lock);
@@ -1162,7 +1295,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
min = max;
- rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+ rdmsrq_on_cpu(cpu, MSR_HWP_REQUEST, &value);
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
@@ -1209,7 +1342,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
}
skip_epp:
WRITE_ONCE(cpu_data->hwp_req_cached, value);
- wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata);
@@ -1256,7 +1389,7 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
mutex_lock(&hybrid_capacity_lock);
@@ -1285,7 +1418,7 @@ static void set_power_ctl_ee_state(bool input)
u64 power_ctl;
mutex_lock(&intel_pstate_driver_lock);
- rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
if (input) {
power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
power_ctl_ee_state = POWER_CTL_EE_ENABLE;
@@ -1293,7 +1426,7 @@ static void set_power_ctl_ee_state(bool input)
power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
power_ctl_ee_state = POWER_CTL_EE_DISABLE;
}
- wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ wrmsrq(MSR_IA32_POWER_CTL, power_ctl);
mutex_unlock(&intel_pstate_driver_lock);
}
@@ -1302,7 +1435,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata);
static void intel_pstate_hwp_reenable(struct cpudata *cpu)
{
intel_pstate_hwp_enable(cpu);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
}
static int intel_pstate_suspend(struct cpufreq_policy *policy)
@@ -1353,9 +1486,11 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}
-static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
- struct cpufreq_policy *policy)
+static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy,
+ struct cpudata *cpudata)
{
+ guard(cpufreq_policy_write)(policy);
+
if (hwp_active)
intel_pstate_get_hwp_cap(cpudata);
@@ -1365,42 +1500,34 @@ static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
refresh_frequency_limits(policy);
}
-static void intel_pstate_update_limits(unsigned int cpu)
+static bool intel_pstate_update_max_freq(struct cpudata *cpudata)
{
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
- struct cpudata *cpudata;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
+ policy = cpufreq_cpu_get(cpudata->cpu);
if (!policy)
- return;
+ return false;
- cpudata = all_cpu_data[cpu];
+ __intel_pstate_update_max_freq(policy, cpudata);
- __intel_pstate_update_max_freq(cpudata, policy);
+ return true;
+}
- /* Prevent the driver from being unregistered now. */
- mutex_lock(&intel_pstate_driver_lock);
+static void intel_pstate_update_limits(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpudata = all_cpu_data[policy->cpu];
- cpufreq_cpu_release(policy);
+ __intel_pstate_update_max_freq(policy, cpudata);
hybrid_update_capacity(cpudata);
-
- mutex_unlock(&intel_pstate_driver_lock);
}
static void intel_pstate_update_limits_for_all(void)
{
int cpu;
- for_each_possible_cpu(cpu) {
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
-
- if (!policy)
- continue;
-
- __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
-
- cpufreq_cpu_release(policy);
- }
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_max_freq(all_cpu_data[cpu]);
mutex_lock(&hybrid_capacity_lock);
@@ -1703,7 +1830,7 @@ static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribut
u64 power_ctl;
int enable;
- rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
return sprintf(buf, "%d\n", !enable);
}
@@ -1840,13 +1967,8 @@ static void intel_pstate_notify_work(struct work_struct *work)
{
struct cpudata *cpudata =
container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
-
- if (policy) {
- __intel_pstate_update_max_freq(cpudata, policy);
-
- cpufreq_cpu_release(policy);
+ if (intel_pstate_update_max_freq(cpudata)) {
/*
* The driver will not be unregistered while this function is
* running, so update the capacity without acquiring the driver
@@ -1855,7 +1977,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
hybrid_update_capacity(cpudata);
}
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
@@ -1877,7 +1999,7 @@ void notify_hwp_interrupt(void)
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS;
- rdmsrl_safe(MSR_HWP_STATUS, &value);
+ rdmsrq_safe(MSR_HWP_STATUS, &value);
if (!(value & status_mask))
return;
@@ -1894,7 +2016,7 @@ void notify_hwp_interrupt(void)
return;
ack_intr:
- wrmsrl_safe(MSR_HWP_STATUS, 0);
+ wrmsrq_safe(MSR_HWP_STATUS, 0);
raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
}
@@ -1905,8 +2027,8 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
return;
- /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+ /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
raw_spin_lock_irq(&hwp_notify_lock);
cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
@@ -1933,9 +2055,9 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;
- /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+ /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
}
@@ -1974,9 +2096,9 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt till we activate again */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
- wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
intel_pstate_enable_hwp_interrupt(cpudata);
@@ -1990,7 +2112,7 @@ static int atom_get_min_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_RATIOS, value);
return (value >> 8) & 0x7F;
}
@@ -1998,7 +2120,7 @@ static int atom_get_max_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_RATIOS, value);
return (value >> 16) & 0x7F;
}
@@ -2006,7 +2128,7 @@ static int atom_get_turbo_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_TURBO_RATIOS, value);
return value & 0x7F;
}
@@ -2041,7 +2163,7 @@ static int silvermont_get_scaling(void)
static int silvermont_freq_table[] = {
83300, 100000, 133300, 116700, 80000};
- rdmsrl(MSR_FSB_FREQ, value);
+ rdmsrq(MSR_FSB_FREQ, value);
i = value & 0x7;
WARN_ON(i > 4);
@@ -2057,7 +2179,7 @@ static int airmont_get_scaling(void)
83300, 100000, 133300, 116700, 80000,
93300, 90000, 88900, 87500};
- rdmsrl(MSR_FSB_FREQ, value);
+ rdmsrq(MSR_FSB_FREQ, value);
i = value & 0xF;
WARN_ON(i > 8);
@@ -2068,7 +2190,7 @@ static void atom_get_vid(struct cpudata *cpudata)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_VIDS, value);
+ rdmsrq(MSR_ATOM_CORE_VIDS, value);
cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
cpudata->vid.ratio = div_fp(
@@ -2076,7 +2198,7 @@ static void atom_get_vid(struct cpudata *cpudata)
int_tofp(cpudata->pstate.max_pstate -
cpudata->pstate.min_pstate));
- rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
+ rdmsrq(MSR_ATOM_CORE_TURBO_VIDS, value);
cpudata->vid.turbo = value & 0x7f;
}
@@ -2084,7 +2206,7 @@ static int core_get_min_pstate(int cpu)
{
u64 value;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 40) & 0xFF;
}
@@ -2092,7 +2214,7 @@ static int core_get_max_pstate_physical(int cpu)
{
u64 value;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 8) & 0xFF;
}
@@ -2106,13 +2228,13 @@ static int core_get_tdp_ratio(int cpu, u64 plat_info)
int err;
/* Get the TDP level (0, 1, 2) to get ratios */
- err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
+ err = rdmsrq_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
if (err)
return err;
/* TDP MSR are continuous starting at 0x648 */
tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
- err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
+ err = rdmsrq_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
if (err)
return err;
@@ -2137,7 +2259,7 @@ static int core_get_max_pstate(int cpu)
int tdp_ratio;
int err;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
max_pstate = (plat_info >> 8) & 0xFF;
tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
@@ -2149,7 +2271,7 @@ static int core_get_max_pstate(int cpu)
return tdp_ratio;
}
- err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
+ err = rdmsrq_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
if (!err) {
int tar_levels;
@@ -2169,7 +2291,7 @@ static int core_get_turbo_pstate(int cpu)
u64 value;
int nont, ret;
- rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (value) & 255;
if (ret <= nont)
@@ -2198,7 +2320,7 @@ static int knl_get_turbo_pstate(int cpu)
u64 value;
int nont, ret;
- rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (((value) >> 8) & 0xFF);
if (ret <= nont)
@@ -2209,7 +2331,7 @@ static int knl_get_turbo_pstate(int cpu)
static int hwp_get_cpu_scaling(int cpu)
{
if (hybrid_scaling_factor) {
- struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
u8 cpu_type = c->topo.intel_type;
/*
@@ -2244,7 +2366,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
* the CPU being updated, so force the register update to run on the
* right CPU.
*/
- wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, pstate));
}
@@ -2351,7 +2473,7 @@ static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
return;
hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
- wrmsrl(MSR_HWP_REQUEST, hwp_req);
+ wrmsrq(MSR_HWP_REQUEST, hwp_req);
cpu->last_update = cpu->sample.time;
}
@@ -2364,7 +2486,7 @@ static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
expired = time_after64(cpu->sample.time, cpu->last_update +
hwp_boost_hold_time_ns);
if (expired) {
- wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
+ wrmsrq(MSR_HWP_REQUEST, cpu->hwp_req_cached);
cpu->hwp_boost_min = 0;
}
}
@@ -2425,8 +2547,8 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
u64 tsc;
local_irq_save(flags);
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
+ rdmsrq(MSR_IA32_APERF, aperf);
+ rdmsrq(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
local_irq_restore(flags);
@@ -2520,7 +2642,7 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
return;
cpu->pstate.current_pstate = pstate;
- wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
+ wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}
static void intel_pstate_adjust_pstate(struct cpudata *cpu)
@@ -3100,19 +3222,19 @@ static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
WRITE_ONCE(cpu->hwp_req_cached, value);
if (fast_switch)
- wrmsrl(MSR_HWP_REQUEST, value);
+ wrmsrq(MSR_HWP_REQUEST, value);
else
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
}
static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
u32 target_pstate, bool fast_switch)
{
if (fast_switch)
- wrmsrl(MSR_IA32_PERF_CTL,
+ wrmsrq(MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
else
- wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
}
@@ -3256,7 +3378,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
intel_pstate_get_hwp_cap(cpu);
- rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
+ rdmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
WRITE_ONCE(cpu->hwp_req_cached, value);
cpu->epp_cached = intel_pstate_get_epp(cpu, value);
@@ -3323,7 +3445,7 @@ static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
* written by it may not be suitable.
*/
value &= ~HWP_DESIRED_PERF(~0L);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
WRITE_ONCE(cpu->hwp_req_cached, value);
}
@@ -3573,7 +3695,7 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
id = x86_match_cpu(intel_pstate_cpu_oob_ids);
if (id) {
- rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
+ rdmsrq(MSR_MISC_PWR_MGMT, misc_pwr);
if (misc_pwr & BITMASK_OOB) {
pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
@@ -3629,7 +3751,7 @@ static bool intel_pstate_hwp_is_enabled(void)
{
u64 value;
- rdmsrl(MSR_PM_ENABLE, value);
+ rdmsrq(MSR_PM_ENABLE, value);
return !!(value & 0x1);
}
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 68ccd73c8129..ba0e08c8486a 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -136,7 +136,7 @@ static void do_longhaul1(unsigned int mults_index)
{
union msr_bcr2 bcr2;
- rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ rdmsrq(MSR_VIA_BCR2, bcr2.val);
/* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1;
bcr2.bits.CLOCKMUL = mults_index & 0xff;
@@ -144,16 +144,16 @@ static void do_longhaul1(unsigned int mults_index)
/* Sync to timer tick */
safe_halt();
/* Change frequency on next halt or sleep */
- wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ wrmsrq(MSR_VIA_BCR2, bcr2.val);
/* Invoke transition */
ACPI_FLUSH_CPU_CACHE();
halt();
/* Disable software clock multiplier */
local_irq_disable();
- rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ rdmsrq(MSR_VIA_BCR2, bcr2.val);
bcr2.bits.ESOFTBF = 0;
- wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ wrmsrq(MSR_VIA_BCR2, bcr2.val);
}
/* For processor with Longhaul MSR */
@@ -164,7 +164,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
union msr_longhaul longhaul;
u32 t;
- rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ rdmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Setup new frequency */
if (!revid_errata)
longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
@@ -180,7 +180,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
/* Raise voltage if necessary */
if (can_scale_voltage && dir) {
longhaul.bits.EnableSoftVID = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
@@ -194,12 +194,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
longhaul.bits.EnableSoftVID = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
}
/* Change frequency on next halt or sleep */
longhaul.bits.EnableSoftBusRatio = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
halt();
@@ -212,12 +212,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
}
/* Disable bus ratio bit */
longhaul.bits.EnableSoftBusRatio = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Reduce voltage if necessary */
if (can_scale_voltage && !dir) {
longhaul.bits.EnableSoftVID = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
@@ -231,7 +231,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
longhaul.bits.EnableSoftVID = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
}
}
@@ -534,7 +534,7 @@ static void longhaul_setup_voltagescaling(void)
unsigned int j, speed, pos, kHz_step, numvscales;
int min_vid_speed;
- rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ rdmsrq(MSR_VIA_LONGHAUL, longhaul.val);
if (!(longhaul.bits.RevisionID & 1)) {
pr_info("Voltage scaling not supported by CPU\n");
return;
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index fb2197dc170f..31039330a3ba 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -219,13 +219,13 @@ static void change_FID(int fid)
{
union msr_fidvidctl fidvidctl;
- rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.FID != fid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.FID = fid;
fidvidctl.bits.VIDC = 0;
fidvidctl.bits.FIDC = 1;
- wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
@@ -234,13 +234,13 @@ static void change_VID(int vid)
{
union msr_fidvidctl fidvidctl;
- rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.VID != vid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.VID = vid;
fidvidctl.bits.FIDC = 0;
fidvidctl.bits.VIDC = 1;
- wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
@@ -260,7 +260,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
fid = powernow_table[index].driver_data & 0xFF;
vid = (powernow_table[index].driver_data & 0xFF00) >> 8;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
freqs.old = fsb * fid_codes[cfid] / 10;
@@ -557,7 +557,7 @@ static unsigned int powernow_get(unsigned int cpu)
if (cpu)
return 0;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
return fsb * fid_codes[cfid] / 10;
@@ -598,7 +598,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
if (policy->cpu != 0)
return -ENODEV;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
recalibrate_cpu_khz();
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 4e3ba6e68c32..f7512b4e923e 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -482,7 +482,7 @@ static void check_supported_cpu(void *_rc)
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
if ((edx & P_STATE_TRANSITION_CAPABLE)
!= P_STATE_TRANSITION_CAPABLE) {
- pr_info("Power state transitions not supported\n");
+ pr_info_once("Power state transitions not supported\n");
return;
}
*rc = 0;
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
new file mode 100644
index 000000000000..94ed81644fe1
--- /dev/null
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust based implementation of the cpufreq-dt driver.
+
+use kernel::{
+ c_str,
+ clk::Clk,
+ cpu, cpufreq,
+ cpumask::CpumaskVar,
+ device::{Core, Device},
+ error::code::*,
+ fmt,
+ macros::vtable,
+ module_platform_driver, of, opp, platform,
+ prelude::*,
+ str::CString,
+ sync::Arc,
+};
+
+/// Finds exact supply name from the OF node.
+fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
+ let prop_name = CString::try_from_fmt(fmt!("{}-supply", name)).ok()?;
+ dev.property_present(&prop_name)
+ .then(|| CString::try_from_fmt(fmt!("{name}")).ok())
+ .flatten()
+}
+
+/// Finds supply name for the CPU from DT.
+fn find_supply_names(dev: &Device, cpu: u32) -> Option<KVec<CString>> {
+ // Try "cpu0" for older DTs, fallback to "cpu".
+ let name = (cpu == 0)
+ .then(|| find_supply_name_exact(dev, "cpu0"))
+ .flatten()
+ .or_else(|| find_supply_name_exact(dev, "cpu"))?;
+
+ let mut list = KVec::with_capacity(1, GFP_KERNEL).ok()?;
+ list.push(name, GFP_KERNEL).ok()?;
+
+ Some(list)
+}
+
+/// Represents the cpufreq dt device.
+struct CPUFreqDTDevice {
+ opp_table: opp::Table,
+ freq_table: opp::FreqTable,
+ _mask: CpumaskVar,
+ _token: Option<opp::ConfigToken>,
+ _clk: Clk,
+}
+
+#[derive(Default)]
+struct CPUFreqDTDriver;
+
+#[vtable]
+impl opp::ConfigOps for CPUFreqDTDriver {}
+
+#[vtable]
+impl cpufreq::Driver for CPUFreqDTDriver {
+ const NAME: &'static CStr = c_str!("cpufreq-dt");
+ const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV;
+ const BOOST_ENABLED: bool = true;
+
+ type PData = Arc<CPUFreqDTDevice>;
+
+ fn init(policy: &mut cpufreq::Policy) -> Result<Self::PData> {
+ let cpu = policy.cpu();
+ // SAFETY: The CPU device is only used during init; it won't get hot-unplugged. The cpufreq
+ // core registers with CPU notifiers and the cpufreq core/driver won't use the CPU device,
+ // once the CPU is hot-unplugged.
+ let dev = unsafe { cpu::from_cpu(cpu)? };
+ let mut mask = CpumaskVar::new_zero(GFP_KERNEL)?;
+
+ mask.set(cpu);
+
+ let token = find_supply_names(dev, cpu)
+ .map(|names| {
+ opp::Config::<Self>::new()
+ .set_regulator_names(names)?
+ .set(dev)
+ })
+ .transpose()?;
+
+ // Get OPP-sharing information from "operating-points-v2" bindings.
+ let fallback = match opp::Table::of_sharing_cpus(dev, &mut mask) {
+ Ok(()) => false,
+ Err(e) if e == ENOENT => {
+ // "operating-points-v2" not supported. If the platform hasn't
+ // set sharing CPUs, fallback to all CPUs share the `Policy`
+ // for backward compatibility.
+ opp::Table::sharing_cpus(dev, &mut mask).is_err()
+ }
+ Err(e) => return Err(e),
+ };
+
+ // Initialize OPP tables for all policy cpus.
+ //
+ // For platforms not using "operating-points-v2" bindings, we do this
+ // before updating policy cpus. Otherwise, we will end up creating
+ // duplicate OPPs for the CPUs.
+ //
+ // OPPs might be populated at runtime, don't fail for error here unless
+ // it is -EPROBE_DEFER.
+ let mut opp_table = match opp::Table::from_of_cpumask(dev, &mut mask) {
+ Ok(table) => table,
+ Err(e) => {
+ if e == EPROBE_DEFER {
+ return Err(e);
+ }
+
+ // The table is added dynamically ?
+ opp::Table::from_dev(dev)?
+ }
+ };
+
+ // The OPP table must be initialized, statically or dynamically, by this point.
+ opp_table.opp_count()?;
+
+ // Set sharing cpus for fallback scenario.
+ if fallback {
+ mask.setall();
+ opp_table.set_sharing_cpus(&mut mask)?;
+ }
+
+ let mut transition_latency = opp_table.max_transition_latency_ns() as u32;
+ if transition_latency == 0 {
+ transition_latency = cpufreq::ETERNAL_LATENCY_NS;
+ }
+
+ policy
+ .set_dvfs_possible_from_any_cpu(true)
+ .set_suspend_freq(opp_table.suspend_freq())
+ .set_transition_latency_ns(transition_latency);
+
+ let freq_table = opp_table.cpufreq_table()?;
+ // SAFETY: The `freq_table` is not dropped while it is getting used by the C code.
+ unsafe { policy.set_freq_table(&freq_table) };
+
+ // SAFETY: The returned `clk` is not dropped while it is getting used by the C code.
+ let clk = unsafe { policy.set_clk(dev, None)? };
+
+ mask.copy(policy.cpus());
+
+ Ok(Arc::new(
+ CPUFreqDTDevice {
+ opp_table,
+ freq_table,
+ _mask: mask,
+ _token: token,
+ _clk: clk,
+ },
+ GFP_KERNEL,
+ )?)
+ }
+
+ fn exit(_policy: &mut cpufreq::Policy, _data: Option<Self::PData>) -> Result {
+ Ok(())
+ }
+
+ fn online(_policy: &mut cpufreq::Policy) -> Result {
+ // We did light-weight tear down earlier, nothing to do here.
+ Ok(())
+ }
+
+ fn offline(_policy: &mut cpufreq::Policy) -> Result {
+ // Preserve policy->data and don't free resources on light-weight
+ // tear down.
+ Ok(())
+ }
+
+ fn suspend(policy: &mut cpufreq::Policy) -> Result {
+ policy.generic_suspend()
+ }
+
+ fn verify(data: &mut cpufreq::PolicyData) -> Result {
+ data.generic_verify()
+ }
+
+ fn target_index(policy: &mut cpufreq::Policy, index: cpufreq::TableIndex) -> Result {
+ let Some(data) = policy.data::<Self::PData>() else {
+ return Err(ENOENT);
+ };
+
+ let freq = data.freq_table.freq(index)?;
+ data.opp_table.set_rate(freq)
+ }
+
+ fn get(policy: &mut cpufreq::Policy) -> Result<u32> {
+ policy.generic_get()
+ }
+
+ fn set_boost(_policy: &mut cpufreq::Policy, _state: i32) -> Result {
+ Ok(())
+ }
+
+ fn register_em(policy: &mut cpufreq::Policy) {
+ policy.register_em_opp()
+ }
+}
+
+kernel::of_device_table!(
+ OF_TABLE,
+ MODULE_OF_TABLE,
+ <CPUFreqDTDriver as platform::Driver>::IdInfo,
+ [(of::DeviceId::new(c_str!("operating-points-v2")), ())]
+);
+
+impl platform::Driver for CPUFreqDTDriver {
+ type IdInfo = ();
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+ fn probe(
+ pdev: &platform::Device<Core>,
+ _id_info: Option<&Self::IdInfo>,
+ ) -> Result<Pin<KBox<Self>>> {
+ cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?;
+ Ok(KBox::new(Self {}, GFP_KERNEL)?.into())
+ }
+}
+
+module_platform_driver! {
+ type: CPUFreqDTDriver,
+ name: "cpufreq-dt",
+ author: "Viresh Kumar <viresh.kumar@linaro.org>",
+ description: "Generic CPUFreq DT driver",
+ license: "GPL v2",
+}
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 103d2519dff7..b360f03a116f 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -21,7 +21,6 @@
#include <linux/io.h>
#include <asm/cpu_device_id.h>
-#include <asm/msr.h>
#define MMCR_BASE 0xfffef000 /* The default base address */
#define OFFS_CPUCTL 0x2 /* CPU Control Register */
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index c310aeebc8f3..ef078426bfd5 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -37,11 +37,17 @@ static struct cpufreq_driver scmi_cpufreq_driver;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct scmi_data *priv = policy->driver_data;
+ struct cpufreq_policy *policy;
+ struct scmi_data *priv;
unsigned long rate;
int ret;
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+
ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
if (ret)
return 0;
@@ -387,6 +393,40 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
.set_boost = cpufreq_boost_set_sw,
};
+static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
+{
+ struct device_node *scmi_np = dev_of_node(scmi_dev);
+ struct device_node *cpu_np, *np;
+ struct device *cpu_dev;
+ int cpu, idx;
+
+ if (!scmi_np)
+ return false;
+
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ continue;
+
+ cpu_np = dev_of_node(cpu_dev);
+
+ np = of_parse_phandle(cpu_np, "clocks", 0);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+
+ idx = of_property_match_string(cpu_np, "power-domain-names", "perf");
+ np = of_parse_phandle(cpu_np, "power-domains", idx);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+ }
+
+ return false;
+}
+
static int scmi_cpufreq_probe(struct scmi_device *sdev)
{
int ret;
@@ -395,7 +435,7 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
handle = sdev->handle;
- if (!handle)
+ if (!handle || !scmi_dev_used_by_cpus(dev))
return -ENODEV;
scmi_cpufreq_driver.driver_data = sdev;
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 17cda84f00df..dcbb0ae7dd47 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops;
static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct scpi_data *priv = policy->driver_data;
- unsigned long rate = clk_get_rate(priv->clk);
+ struct cpufreq_policy *policy;
+ struct scpi_data *priv;
+ unsigned long rate;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+ rate = clk_get_rate(priv->clk);
return rate / 1000;
}
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 47d6840b3489..744312a44279 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -194,7 +194,9 @@ static int sun50i_cpufreq_get_efuse(void)
struct nvmem_cell *speedbin_nvmem;
const struct of_device_id *match;
struct device *cpu_dev;
- u32 *speedbin;
+ void *speedbin_ptr;
+ u32 speedbin = 0;
+ size_t len;
int ret;
cpu_dev = get_cpu_device(0);
@@ -217,14 +219,18 @@ static int sun50i_cpufreq_get_efuse(void)
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
- speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
+ speedbin_ptr = nvmem_cell_read(speedbin_nvmem, &len);
nvmem_cell_put(speedbin_nvmem);
- if (IS_ERR(speedbin))
- return PTR_ERR(speedbin);
+ if (IS_ERR(speedbin_ptr))
+ return PTR_ERR(speedbin_ptr);
- ret = opp_data->efuse_xlate(*speedbin);
+ if (len <= 4)
+ memcpy(&speedbin, speedbin_ptr, len);
+ speedbin = le32_to_cpu(speedbin);
- kfree(speedbin);
+ ret = opp_data->efuse_xlate(speedbin);
+
+ kfree(speedbin_ptr);
return ret;
};
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index 5fb5228f6bf1..2041f59116ce 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -43,7 +43,7 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
/* OSI mode is enabled, set the corresponding domain state. */
pd_state = state->data;
- psci_set_domain_state(*pd_state);
+ psci_set_domain_state(pd, pd->state_idx, *pd_state);
return 0;
}
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index b46a83f5ffe4..4e1ba35deda9 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -16,7 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/psci.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -36,19 +36,30 @@ struct psci_cpuidle_data {
struct device *dev;
};
+struct psci_cpuidle_domain_state {
+ struct generic_pm_domain *pd;
+ unsigned int state_idx;
+ u32 state;
+};
+
static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
-static DEFINE_PER_CPU(u32, domain_state);
+static DEFINE_PER_CPU(struct psci_cpuidle_domain_state, psci_domain_state);
static bool psci_cpuidle_use_syscore;
static bool psci_cpuidle_use_cpuhp;
-void psci_set_domain_state(u32 state)
+void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx,
+ u32 state)
{
- __this_cpu_write(domain_state, state);
+ struct psci_cpuidle_domain_state *ds = this_cpu_ptr(&psci_domain_state);
+
+ ds->pd = pd;
+ ds->state_idx = state_idx;
+ ds->state = state;
}
-static inline u32 psci_get_domain_state(void)
+static inline void psci_clear_domain_state(void)
{
- return __this_cpu_read(domain_state);
+ __this_cpu_write(psci_domain_state.state, 0);
}
static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
@@ -58,7 +69,8 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
u32 *states = data->psci_states;
struct device *pd_dev = data->dev;
- u32 state;
+ struct psci_cpuidle_domain_state *ds;
+ u32 state = states[idx];
int ret;
ret = cpu_pm_enter();
@@ -71,9 +83,9 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
else
pm_runtime_put_sync_suspend(pd_dev);
- state = psci_get_domain_state();
- if (!state)
- state = states[idx];
+ ds = this_cpu_ptr(&psci_domain_state);
+ if (ds->state)
+ state = ds->state;
trace_psci_domain_idle_enter(dev->cpu, state, s2idle);
ret = psci_cpu_suspend_enter(state) ? -1 : idx;
@@ -86,8 +98,12 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
cpu_pm_exit();
+ /* Correct domain-idlestate statistics if we failed to enter. */
+ if (ret == -1 && ds->state)
+ pm_genpd_inc_rejected(ds->pd, ds->state_idx);
+
/* Clear the domain state to start fresh when back from idle. */
- psci_set_domain_state(0);
+ psci_clear_domain_state();
return ret;
}
@@ -121,7 +137,7 @@ static int psci_idle_cpuhp_down(unsigned int cpu)
if (pd_dev) {
pm_runtime_put_sync(pd_dev);
/* Clear domain state to start fresh at next online. */
- psci_set_domain_state(0);
+ psci_clear_domain_state();
}
return 0;
@@ -147,7 +163,7 @@ static void psci_idle_syscore_switch(bool suspend)
/* Clear domain state to re-start fresh. */
if (!cleared) {
- psci_set_domain_state(0);
+ psci_clear_domain_state();
cleared = true;
}
}
@@ -407,14 +423,14 @@ deinit:
* to register cpuidle driver then rollback to cancel all CPUs
* registration.
*/
-static int psci_cpuidle_probe(struct platform_device *pdev)
+static int psci_cpuidle_probe(struct faux_device *fdev)
{
int cpu, ret;
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
for_each_present_cpu(cpu) {
- ret = psci_idle_init_cpu(&pdev->dev, cpu);
+ ret = psci_idle_init_cpu(&fdev->dev, cpu);
if (ret)
goto out_fail;
}
@@ -434,26 +450,36 @@ out_fail:
return ret;
}
-static struct platform_driver psci_cpuidle_driver = {
+static struct faux_device_ops psci_cpuidle_ops = {
.probe = psci_cpuidle_probe,
- .driver = {
- .name = "psci-cpuidle",
- },
};
+static bool __init dt_idle_state_present(void)
+{
+ struct device_node *cpu_node __free(device_node) =
+ of_cpu_device_node_get(cpumask_first(cpu_possible_mask));
+ if (!cpu_node)
+ return false;
+
+ struct device_node *state_node __free(device_node) =
+ of_get_cpu_state_node(cpu_node, 0);
+ if (!state_node)
+ return false;
+
+ return !!of_match_node(psci_idle_state_match, state_node);
+}
+
static int __init psci_idle_init(void)
{
- struct platform_device *pdev;
- int ret;
+ struct faux_device *fdev;
- ret = platform_driver_register(&psci_cpuidle_driver);
- if (ret)
- return ret;
+ if (!dt_idle_state_present())
+ return 0;
- pdev = platform_device_register_simple("psci-cpuidle", -1, NULL, 0);
- if (IS_ERR(pdev)) {
- platform_driver_unregister(&psci_cpuidle_driver);
- return PTR_ERR(pdev);
+ fdev = faux_device_create("psci-cpuidle", NULL, &psci_cpuidle_ops);
+ if (!fdev) {
+ pr_err("Failed to create psci-cpuidle device\n");
+ return -ENODEV;
}
return 0;
diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h
index ef004ec7a7c5..d29cbd796cd5 100644
--- a/drivers/cpuidle/cpuidle-psci.h
+++ b/drivers/cpuidle/cpuidle-psci.h
@@ -4,8 +4,10 @@
#define __CPUIDLE_PSCI_H
struct device_node;
+struct generic_pm_domain;
-void psci_set_domain_state(u32 state);
+void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx,
+ u32 state);
int psci_dt_parse_state_node(struct device_node *np, u32 *state);
#endif /* __CPUIDLE_PSCI_H */
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 39aa0aea61c6..52d5d26fc7c6 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -255,7 +255,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
data->next_timer_ns = KTIME_MAX;
delta_tick = TICK_NSEC / 2;
- data->bucket = which_bucket(KTIME_MAX);
+ data->bucket = BUCKETS - 1;
}
if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index 8fe5e1b47ef9..bfa55c1eab5b 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -19,7 +19,7 @@
*
* Of course, non-timer wakeup sources are more important in some use cases,
* but even then it is generally unnecessary to consider idle duration values
- * greater than the time time till the next timer event, referred as the sleep
+ * greater than the time till the next timer event, referred as the sleep
* length in what follows, because the closest timer will ultimately wake up the
* CPU anyway unless it is woken up earlier.
*
@@ -311,7 +311,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct cpuidle_state *s = &drv->states[i];
/*
- * Update the sums of idle state mertics for all of the states
+ * Update the sums of idle state metrics for all of the states
* shallower than the current one.
*/
intercept_sum += prev_bin->intercepts;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 47082782008a..5686369779be 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -530,13 +530,6 @@ source "drivers/crypto/cavium/nitrox/Kconfig"
source "drivers/crypto/marvell/Kconfig"
source "drivers/crypto/intel/Kconfig"
-config CRYPTO_DEV_CAVIUM_ZIP
- tristate "Cavium ZIP driver"
- depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
- help
- Select this option if you want to enable compression/decompression
- acceleration on Cavium's ARM based SoCs
-
config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c97f0ebc55ec..22eadcc8f4a2 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -8,12 +8,9 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
-obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
-obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
@@ -50,3 +47,4 @@ obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += intel/
obj-y += starfive/
+obj-y += cavium/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 19b7fb4a93e8..f9cf00d690e2 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -33,22 +33,30 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
- algt->stat_fb_maxsg++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_maxsg++;
+
return true;
}
if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) {
- algt->stat_fb_leniv++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_leniv++;
+
return true;
}
if (areq->cryptlen == 0) {
- algt->stat_fb_len0++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_len0++;
+
return true;
}
if (areq->cryptlen % 16) {
- algt->stat_fb_mod16++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_mod16++;
+
return true;
}
@@ -56,12 +64,16 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
sg = areq->src;
while (sg) {
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- algt->stat_fb_srcali++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srcali++;
+
return true;
}
todo = min(len, sg->length);
if (todo % 4) {
- algt->stat_fb_srclen++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srclen++;
+
return true;
}
len -= todo;
@@ -72,12 +84,16 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
sg = areq->dst;
while (sg) {
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- algt->stat_fb_dstali++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_dstali++;
+
return true;
}
todo = min(len, sg->length);
if (todo % 4) {
- algt->stat_fb_dstlen++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_dstlen++;
+
return true;
}
len -= todo;
@@ -100,9 +116,7 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
algt = container_of(alg, struct sun8i_ce_alg_template,
alg.skcipher.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
algt->stat_fb++;
-#endif
}
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
@@ -146,9 +160,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
op->keylen);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_req++;
-#endif
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_req++;
flow = rctx->flow;
@@ -275,13 +288,16 @@ theend_sgs:
} else {
if (nr_sgs > 0)
dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
+
+ if (nr_sgd > 0)
+ dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
}
theend_iv:
if (areq->iv && ivsize > 0) {
- if (rctx->addr_iv)
+ if (!dma_mapping_error(ce->dev, rctx->addr_iv))
dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
memcpy(areq->iv, chan->backup_iv, ivsize);
@@ -434,17 +450,17 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm));
- memcpy(algt->fbname,
- crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
- CRYPTO_MAX_ALG_NAME);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ memcpy(algt->fbname,
+ crypto_skcipher_driver_name(op->fallback_tfm),
+ CRYPTO_MAX_ALG_NAME);
- err = pm_runtime_get_sync(op->ce->dev);
+ err = pm_runtime_resume_and_get(op->ce->dev);
if (err < 0)
goto error_pm;
return 0;
error_pm:
- pm_runtime_put_noidle(op->ce->dev);
crypto_free_skcipher(op->fallback_tfm);
return err;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index ec1ffda9ea32..658f520cee0c 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -832,13 +832,12 @@ static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce)
err = pm_runtime_set_suspended(ce->dev);
if (err)
return err;
- pm_runtime_enable(ce->dev);
- return err;
-}
-static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce)
-{
- pm_runtime_disable(ce->dev);
+ err = devm_pm_runtime_enable(ce->dev);
+ if (err)
+ return err;
+
+ return 0;
}
static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce)
@@ -1041,7 +1040,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
"sun8i-ce-ns", ce);
if (err) {
dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err);
- goto error_irq;
+ goto error_pm;
}
err = sun8i_ce_register_algs(ce);
@@ -1082,8 +1081,6 @@ static int sun8i_ce_probe(struct platform_device *pdev)
return 0;
error_alg:
sun8i_ce_unregister_algs(ce);
-error_irq:
- sun8i_ce_pm_exit(ce);
error_pm:
sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
return err;
@@ -1104,8 +1101,6 @@ static void sun8i_ce_remove(struct platform_device *pdev)
#endif
sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
-
- sun8i_ce_pm_exit(ce);
}
static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 6072dd9f390b..bef44f350167 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -23,6 +23,18 @@
#include <linux/string.h>
#include "sun8i-ce.h"
+static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+ algt = container_of(alg, struct sun8i_ce_alg_template,
+ alg.hash.base);
+ algt->stat_fb++;
+ }
+}
+
int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
{
struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
@@ -48,15 +60,16 @@ int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
sizeof(struct sun8i_ce_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
- memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
- CRYPTO_MAX_ALG_NAME);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ memcpy(algt->fbname,
+ crypto_ahash_driver_name(op->fallback_tfm),
+ CRYPTO_MAX_ALG_NAME);
- err = pm_runtime_get_sync(op->ce->dev);
+ err = pm_runtime_resume_and_get(op->ce->dev);
if (err < 0)
goto error_pm;
return 0;
error_pm:
- pm_runtime_put_noidle(op->ce->dev);
crypto_free_ahash(op->fallback_tfm);
return err;
}
@@ -78,7 +91,9 @@ int sun8i_ce_hash_init(struct ahash_request *areq)
memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -90,7 +105,9 @@ int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
@@ -102,7 +119,9 @@ int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -113,21 +132,13 @@ int sun8i_ce_hash_final(struct ahash_request *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = areq->result;
-
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ sun8i_ce_hash_stat_fb_inc(tfm);
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -139,10 +150,10 @@ int sun8i_ce_hash_update(struct ahash_request *areq)
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -153,24 +164,14 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ sun8i_ce_hash_stat_fb_inc(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
-
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -181,24 +182,14 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
-
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ sun8i_ce_hash_stat_fb_inc(tfm);
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -213,22 +204,30 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
if (areq->nbytes == 0) {
- algt->stat_fb_len0++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_len0++;
+
return true;
}
/* we need to reserve one SG for padding one */
if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) {
- algt->stat_fb_maxsg++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_maxsg++;
+
return true;
}
sg = areq->src;
while (sg) {
if (sg->length % 4) {
- algt->stat_fb_srclen++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srclen++;
+
return true;
}
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- algt->stat_fb_srcali++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srcali++;
+
return true;
}
sg = sg_next(sg);
@@ -244,21 +243,11 @@ int sun8i_ce_hash_digest(struct ahash_request *areq)
struct sun8i_ce_alg_template *algt;
struct sun8i_ce_dev *ce;
struct crypto_engine *engine;
- struct scatterlist *sg;
- int nr_sgs, e, i;
+ int e;
if (sun8i_ce_hash_need_fallback(areq))
return sun8i_ce_hash_digest_fb(areq);
- nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
- if (nr_sgs > MAX_SG - 1)
- return sun8i_ce_hash_digest_fb(areq);
-
- for_each_sg(areq->src, sg, nr_sgs, i) {
- if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
- return sun8i_ce_hash_digest_fb(areq);
- }
-
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
@@ -343,9 +332,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
u32 common;
u64 byte_count;
__le32 *bf;
- void *buf = NULL;
+ void *buf, *result;
int j, i, todo;
- void *result = NULL;
u64 bs;
int digestsize;
dma_addr_t addr_res, addr_pad;
@@ -365,22 +353,22 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
if (!buf) {
err = -ENOMEM;
- goto theend;
+ goto err_out;
}
bf = (__le32 *)buf;
result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
if (!result) {
err = -ENOMEM;
- goto theend;
+ goto err_free_buf;
}
flow = rctx->flow;
chan = &ce->chanlist[flow];
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_req++;
-#endif
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_req++;
+
dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
cet = chan->tl;
@@ -398,7 +386,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
- goto theend;
+ goto err_free_result;
}
len = areq->nbytes;
@@ -411,7 +399,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (len > 0) {
dev_err(ce->dev, "remaining len %d\n", len);
err = -EINVAL;
- goto theend;
+ goto err_unmap_src;
}
addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
@@ -419,7 +407,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (dma_mapping_error(ce->dev, addr_res)) {
dev_err(ce->dev, "DMA map dest\n");
err = -EINVAL;
- goto theend;
+ goto err_unmap_src;
}
byte_count = areq->nbytes;
@@ -441,7 +429,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
}
if (!j) {
err = -EINVAL;
- goto theend;
+ goto err_unmap_result;
}
addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
@@ -450,7 +438,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (dma_mapping_error(ce->dev, addr_pad)) {
dev_err(ce->dev, "DMA error on padding SG\n");
err = -EINVAL;
- goto theend;
+ goto err_unmap_result;
}
if (ce->variant->hash_t_dlen_in_bits)
@@ -463,16 +451,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+
+err_unmap_result:
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+ if (!err)
+ memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
+err_unmap_src:
+ dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
- memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
-theend:
- kfree(buf);
+err_free_result:
kfree(result);
+
+err_free_buf:
+ kfree(buf);
+
+err_out:
local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
local_bh_enable();
+
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 3b5c2af013d0..83df4d719053 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -308,8 +308,8 @@ struct sun8i_ce_hash_tfm_ctx {
* @flow: the flow to use for this request
*/
struct sun8i_ce_hash_reqctx {
- struct ahash_request fallback_req;
int flow;
+ struct ahash_request fallback_req; // keep at the end
};
/*
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 9b9605ce8ee6..8831bcb230c2 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -141,7 +141,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
/* we need to copy all IVs from source in case DMA is bi-directionnal */
while (sg && len) {
- if (sg_dma_len(sg) == 0) {
+ if (sg->length == 0) {
sg = sg_next(sg);
continue;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 753f67a36dc5..8bc08089f044 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -150,7 +150,9 @@ int sun8i_ss_hash_init(struct ahash_request *areq)
memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -162,7 +164,9 @@ int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
@@ -174,7 +178,9 @@ int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -186,9 +192,10 @@ int sun8i_ss_hash_final(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
@@ -212,10 +219,10 @@ int sun8i_ss_hash_update(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -227,12 +234,11 @@ int sun8i_ss_hash_finup(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
@@ -256,12 +262,11 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index e0af611a95d8..38e8a61e9166 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -12,9 +12,6 @@
#include <linux/interrupt.h>
#include <linux/spinlock_types.h>
#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <linux/hash.h>
-#include <crypto/internal/hash.h>
#include <linux/dma-mapping.h>
#include <crypto/algapi.h>
#include <crypto/aead.h>
@@ -72,7 +69,7 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
- __le32 iv[AES_IV_SIZE];
+ __le32 iv[AES_IV_SIZE / 4];
if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
return -EINVAL;
@@ -429,7 +426,7 @@ static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- __le32 iv[16];
+ __le32 iv[4];
u32 tmp_sa[SA_AES128_CCM_LEN + 4];
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
unsigned int len = req->cryptlen;
@@ -602,106 +599,3 @@ int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
{
return crypto4xx_crypt_aes_gcm(req, true);
}
-
-/*
- * HASH SHA1 Functions
- */
-static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
- unsigned int sa_len,
- unsigned char ha,
- unsigned char hm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *my_alg;
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
- struct dynamic_sa_hash160 *sa;
- int rc;
-
- my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
- alg.u.hash);
- ctx->dev = my_alg->dev;
-
- /* Create SA */
- if (ctx->sa_in || ctx->sa_out)
- crypto4xx_free_sa(ctx);
-
- rc = crypto4xx_alloc_sa(ctx, sa_len);
- if (rc)
- return rc;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct crypto4xx_ctx));
- sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
- set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
- SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
- SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
- SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
- SA_OPCODE_HASH, DIR_INBOUND);
- set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
- CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
- SA_SEQ_MASK_OFF, SA_MC_ENABLE,
- SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
- SA_NOT_COPY_HDR);
- /* Need to zero hash digest in SA */
- memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
- memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
-
- return 0;
-}
-
-int crypto4xx_hash_init(struct ahash_request *req)
-{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- int ds;
- struct dynamic_sa_ctl *sa;
-
- sa = ctx->sa_in;
- ds = crypto_ahash_digestsize(
- __crypto_ahash_cast(req->base.tfm));
- sa->sa_command_0.bf.digest_len = ds >> 2;
- sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
-
- return 0;
-}
-
-int crypto4xx_hash_update(struct ahash_request *req)
-{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct scatterlist dst;
- unsigned int ds = crypto_ahash_digestsize(ahash);
-
- sg_init_one(&dst, req->result, ds);
-
- return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
- req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len, 0, NULL);
-}
-
-int crypto4xx_hash_final(struct ahash_request *req)
-{
- return 0;
-}
-
-int crypto4xx_hash_digest(struct ahash_request *req)
-{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct scatterlist dst;
- unsigned int ds = crypto_ahash_digestsize(ahash);
-
- sg_init_one(&dst, req->result, ds);
-
- return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
- req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len, 0, NULL);
-}
-
-/*
- * SHA1 Algorithm
- */
-int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
-{
- return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
- SA_HASH_MODE_HASH);
-}
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index ec3ccfa60445..8cdc66d520c9 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -485,18 +485,6 @@ static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
}
}
-static void crypto4xx_copy_digest_to_dst(void *dst,
- struct pd_uinfo *pd_uinfo,
- struct crypto4xx_ctx *ctx)
-{
- struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
-
- if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
- memcpy(dst, pd_uinfo->sr_va->save_digest,
- SA_HASH_ALG_SHA1_DIGEST_SIZE);
- }
-}
-
static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo)
{
@@ -549,23 +537,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
skcipher_request_complete(req, 0);
}
-static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
- struct pd_uinfo *pd_uinfo)
-{
- struct crypto4xx_ctx *ctx;
- struct ahash_request *ahash_req;
-
- ahash_req = ahash_request_cast(pd_uinfo->async_req);
- ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
-
- crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
- crypto4xx_ret_sg_desc(dev, pd_uinfo);
-
- if (pd_uinfo->state & PD_ENTRY_BUSY)
- ahash_request_complete(ahash_req, -EINPROGRESS);
- ahash_request_complete(ahash_req, 0);
-}
-
static void crypto4xx_aead_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
@@ -642,9 +613,6 @@ static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
case CRYPTO_ALG_TYPE_AEAD:
crypto4xx_aead_done(dev, pd_uinfo, pd);
break;
- case CRYPTO_ALG_TYPE_AHASH:
- crypto4xx_ahash_done(dev, pd_uinfo);
- break;
}
}
@@ -676,7 +644,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *src,
struct scatterlist *dst,
const unsigned int datalen,
- const __le32 *iv, const u32 iv_len,
+ const void *iv, const u32 iv_len,
const struct dynamic_sa_ctl *req_sa,
const unsigned int sa_len,
const unsigned int assoclen,
@@ -912,8 +880,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
}
pd->pd_ctl.w = PD_CTL_HOST_READY |
- ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) ||
- (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
+ ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
PD_CTL_HASH_FINAL : 0);
pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
@@ -1019,10 +986,6 @@ static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
rc = crypto_register_aead(&alg->alg.u.aead);
break;
- case CRYPTO_ALG_TYPE_AHASH:
- rc = crypto_register_ahash(&alg->alg.u.hash);
- break;
-
case CRYPTO_ALG_TYPE_RNG:
rc = crypto_register_rng(&alg->alg.u.rng);
break;
@@ -1048,10 +1011,6 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
list_del(&alg->entry);
switch (alg->alg.type) {
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_unregister_ahash(&alg->alg.u.hash);
- break;
-
case CRYPTO_ALG_TYPE_AEAD:
crypto_unregister_aead(&alg->alg.u.aead);
break;
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 3adcc5e65694..ee36630c670f 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -16,7 +16,6 @@
#include <linux/ratelimit.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
@@ -135,7 +134,6 @@ struct crypto4xx_alg_common {
u32 type;
union {
struct skcipher_alg cipher;
- struct ahash_alg hash;
struct aead_alg aead;
struct rng_alg rng;
} u;
@@ -147,6 +145,12 @@ struct crypto4xx_alg {
struct crypto4xx_device *dev;
};
+#if IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION >= 120000
+#define BUILD_PD_ACCESS __attribute__((access(read_only, 6, 7)))
+#else
+#define BUILD_PD_ACCESS
+#endif
+
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
int crypto4xx_build_pd(struct crypto_async_request *req,
@@ -154,11 +158,11 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *src,
struct scatterlist *dst,
const unsigned int datalen,
- const __le32 *iv, const u32 iv_len,
+ const void *iv, const u32 iv_len,
const struct dynamic_sa_ctl *sa,
const unsigned int sa_len,
const unsigned int assoclen,
- struct scatterlist *dst_tmp);
+ struct scatterlist *dst_tmp) BUILD_PD_ACCESS;
int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
@@ -177,11 +181,6 @@ int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
-int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
-int crypto4xx_hash_digest(struct ahash_request *req);
-int crypto4xx_hash_final(struct ahash_request *req);
-int crypto4xx_hash_update(struct ahash_request *req);
-int crypto4xx_hash_init(struct ahash_request *req);
/*
* Note: Only use this function to copy items that is word aligned.
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 14bf86957d31..27c5d000b4b2 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1743,7 +1743,8 @@ static struct skcipher_alg aes_xts_alg = {
.base.cra_driver_name = "atmel-xts-aes",
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
- .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -2220,7 +2221,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
{
- alg->cra_flags |= CRYPTO_ALG_ASYNC;
+ alg->cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->cra_alignmask = 0xf;
alg->cra_priority = ATMEL_AES_PRIORITY;
alg->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 67a170608566..2cc36da163e8 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1254,7 +1254,8 @@ static int atmel_sha_cra_init(struct crypto_tfm *tfm)
static void atmel_sha_alg_init(struct ahash_alg *alg)
{
alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
- alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx);
alg->halg.base.cra_module = THIS_MODULE;
alg->halg.base.cra_init = atmel_sha_cra_init;
@@ -2041,7 +2042,8 @@ static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
static void atmel_sha_hmac_alg_init(struct ahash_alg *alg)
{
alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
- alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx);
alg->halg.base.cra_module = THIS_MODULE;
alg->halg.base.cra_init = atmel_sha_hmac_cra_init;
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index 75bebec2c757..0fcf4a39de27 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -163,6 +163,12 @@ static int atmel_sha204a_probe(struct i2c_client *client)
i2c_priv->hwrng.name = dev_name(&client->dev);
i2c_priv->hwrng.read = atmel_sha204a_rng_read;
+ /*
+ * According to review by Bill Cox [1], this HWRNG has very low entropy.
+ * [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html
+ */
+ i2c_priv->hwrng.quality = 1;
+
ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng);
if (ret)
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index de9717e221e4..098f5532f389 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -785,7 +785,7 @@ static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
{
alg->base.cra_priority = ATMEL_TDES_PRIORITY;
- alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
alg->base.cra_module = THIS_MODULE;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index d4b39184dbdb..38ff931059b4 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -573,6 +573,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8M*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8ULP", .data = &caam_imx8ulp_data },
+ { .soc_id = "i.MX8QM", .data = &caam_imx8ulp_data },
{ .soc_id = "VF*", .data = &caam_vf610_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 7701d00bcb3a..b6e7c0b29d4e 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -122,12 +122,12 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
qm_fd_addr_set64(&fd, addr);
do {
+ refcount_inc(&req->drv_ctx->refcnt);
ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
- if (likely(!ret)) {
- refcount_inc(&req->drv_ctx->refcnt);
+ if (likely(!ret))
return 0;
- }
+ refcount_dec(&req->drv_ctx->refcnt);
if (ret != -EBUSY)
break;
num_retries++;
diff --git a/drivers/crypto/cavium/Makefile b/drivers/crypto/cavium/Makefile
index 4679c06b611f..75227c587ed0 100644
--- a/drivers/crypto/cavium/Makefile
+++ b/drivers/crypto/cavium/Makefile
@@ -2,4 +2,5 @@
#
# Makefile for Cavium crypto device drivers
#
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += zip/
+obj-$(CONFIG_CRYPTO_DEV_CPT) += cpt/
+obj-$(CONFIG_CRYPTO_DEV_NITROX) += nitrox/
diff --git a/drivers/crypto/cavium/zip/Makefile b/drivers/crypto/cavium/zip/Makefile
deleted file mode 100644
index 020d189d793d..000000000000
--- a/drivers/crypto/cavium/zip/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Cavium's ZIP Driver.
-#
-
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += thunderx_zip.o
-thunderx_zip-y := zip_main.o \
- zip_device.o \
- zip_crypto.o \
- zip_mem.o \
- zip_deflate.o \
- zip_inflate.o
diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
deleted file mode 100644
index 54f6fb054119..000000000000
--- a/drivers/crypto/cavium/zip/common.h
+++ /dev/null
@@ -1,222 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __COMMON_H__
-#define __COMMON_H__
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-/* Device specific zlib function definitions */
-#include "zip_device.h"
-
-/* ZIP device definitions */
-#include "zip_main.h"
-
-/* ZIP memory allocation/deallocation related definitions */
-#include "zip_mem.h"
-
-/* Device specific structure definitions */
-#include "zip_regs.h"
-
-#define ZIP_ERROR -1
-
-#define ZIP_FLUSH_FINISH 4
-
-#define RAW_FORMAT 0 /* for rawpipe */
-#define ZLIB_FORMAT 1 /* for zpipe */
-#define GZIP_FORMAT 2 /* for gzpipe */
-#define LZS_FORMAT 3 /* for lzspipe */
-
-/* Max number of ZIP devices supported */
-#define MAX_ZIP_DEVICES 2
-
-/* Configures the number of zip queues to be used */
-#define ZIP_NUM_QUEUES 2
-
-#define DYNAMIC_STOP_EXCESS 1024
-
-/* Maximum buffer sizes in direct mode */
-#define MAX_INPUT_BUFFER_SIZE (64 * 1024)
-#define MAX_OUTPUT_BUFFER_SIZE (64 * 1024)
-
-/**
- * struct zip_operation - common data structure for comp and decomp operations
- * @input: Next input byte is read from here
- * @output: Next output byte written here
- * @ctx_addr: Inflate context buffer address
- * @history: Pointer to the history buffer
- * @input_len: Number of bytes available at next_in
- * @input_total_len: Total number of input bytes read
- * @output_len: Remaining free space at next_out
- * @output_total_len: Total number of bytes output so far
- * @csum: Checksum value of the uncompressed data
- * @flush: Flush flag
- * @format: Format (depends on stream's wrap)
- * @speed: Speed depends on stream's level
- * @ccode: Compression code ( stream's strategy)
- * @lzs_flag: Flag for LZS support
- * @begin_file: Beginning of file indication for inflate
- * @history_len: Size of the history data
- * @end_file: Ending of the file indication for inflate
- * @compcode: Completion status of the ZIP invocation
- * @bytes_read: Input bytes read in current instruction
- * @bits_processed: Total bits processed for entire file
- * @sizeofptr: To distinguish between ILP32 and LP64
- * @sizeofzops: Optional just for padding
- *
- * This structure is used to maintain the required meta data for the
- * comp and decomp operations.
- */
-struct zip_operation {
- u8 *input;
- u8 *output;
- u64 ctx_addr;
- u64 history;
-
- u32 input_len;
- u32 input_total_len;
-
- u32 output_len;
- u32 output_total_len;
-
- u32 csum;
- u32 flush;
-
- u32 format;
- u32 speed;
- u32 ccode;
- u32 lzs_flag;
-
- u32 begin_file;
- u32 history_len;
-
- u32 end_file;
- u32 compcode;
- u32 bytes_read;
- u32 bits_processed;
-
- u32 sizeofptr;
- u32 sizeofzops;
-};
-
-static inline int zip_poll_result(union zip_zres_s *result)
-{
- int retries = 1000;
-
- while (!result->s.compcode) {
- if (!--retries) {
- pr_err("ZIP ERR: request timed out");
- return -ETIMEDOUT;
- }
- udelay(10);
- /*
- * Force re-reading of compcode which is updated
- * by the ZIP coprocessor.
- */
- rmb();
- }
- return 0;
-}
-
-/* error messages */
-#define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
- fmt "\n", __func__, __LINE__, ## args)
-
-#ifdef MSG_ENABLE
-/* Enable all messages */
-#define zip_msg(fmt, args...) pr_info("ZIP_MSG:" fmt "\n", ## args)
-#else
-#define zip_msg(fmt, args...)
-#endif
-
-#if defined(ZIP_DEBUG_ENABLE) && defined(MSG_ENABLE)
-
-#ifdef DEBUG_LEVEL
-
-#define FILE_NAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : \
- strrchr(__FILE__, '\\') ? strrchr(__FILE__, '\\') + 1 : __FILE__)
-
-#if DEBUG_LEVEL >= 4
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
- fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
-
-#elif DEBUG_LEVEL >= 3
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
- fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
-
-#elif DEBUG_LEVEL >= 2
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s() : %d: " \
- fmt "\n", __func__, __LINE__, ## args)
-
-#else
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
-
-#endif /* DEBUG LEVEL >=4 */
-
-#else
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
-
-#endif /* DEBUG_LEVEL */
-#else
-
-#define zip_dbg(fmt, args...)
-
-#endif /* ZIP_DEBUG_ENABLE && MSG_ENABLE*/
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
deleted file mode 100644
index 02e87f2d50db..000000000000
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "zip_crypto.h"
-
-static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
- int lzs_flag)
-{
- zip_ops->flush = ZIP_FLUSH_FINISH;
-
- /* equivalent to level 6 of opensource zlib */
- zip_ops->speed = 1;
-
- if (!lzs_flag) {
- zip_ops->ccode = 0; /* Auto Huffman */
- zip_ops->lzs_flag = 0;
- zip_ops->format = ZLIB_FORMAT;
- } else {
- zip_ops->ccode = 3; /* LZS Encoding */
- zip_ops->lzs_flag = 1;
- zip_ops->format = LZS_FORMAT;
- }
- zip_ops->begin_file = 1;
- zip_ops->history_len = 0;
- zip_ops->end_file = 1;
- zip_ops->compcode = 0;
- zip_ops->csum = 1; /* Adler checksum desired */
-}
-
-static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
-{
- struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
- struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
-
- zip_static_init_zip_ops(comp_ctx, lzs_flag);
- zip_static_init_zip_ops(decomp_ctx, lzs_flag);
-
- comp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
- if (!comp_ctx->input)
- return -ENOMEM;
-
- comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
- if (!comp_ctx->output)
- goto err_comp_input;
-
- decomp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
- if (!decomp_ctx->input)
- goto err_comp_output;
-
- decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
- if (!decomp_ctx->output)
- goto err_decomp_input;
-
- return 0;
-
-err_decomp_input:
- zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE);
-
-err_comp_output:
- zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-
-err_comp_input:
- zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
-
- return -ENOMEM;
-}
-
-static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
- struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
-
- zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
- zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-
- zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE);
- zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-}
-
-static int zip_compress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen,
- struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *zip_ops = NULL;
- struct zip_state *zip_state;
- struct zip_device *zip = NULL;
- int ret;
-
- if (!zip_ctx || !src || !dst || !dlen)
- return -ENOMEM;
-
- zip = zip_get_device(zip_get_node_id());
- if (!zip)
- return -ENODEV;
-
- zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
- if (!zip_state)
- return -ENOMEM;
-
- zip_ops = &zip_ctx->zip_comp;
-
- zip_ops->input_len = slen;
- zip_ops->output_len = *dlen;
- memcpy(zip_ops->input, src, slen);
-
- ret = zip_deflate(zip_ops, zip_state, zip);
-
- if (!ret) {
- *dlen = zip_ops->output_len;
- memcpy(dst, zip_ops->output, *dlen);
- }
- kfree(zip_state);
- return ret;
-}
-
-static int zip_decompress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen,
- struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *zip_ops = NULL;
- struct zip_state *zip_state;
- struct zip_device *zip = NULL;
- int ret;
-
- if (!zip_ctx || !src || !dst || !dlen)
- return -ENOMEM;
-
- zip = zip_get_device(zip_get_node_id());
- if (!zip)
- return -ENODEV;
-
- zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
- if (!zip_state)
- return -ENOMEM;
-
- zip_ops = &zip_ctx->zip_decomp;
- memcpy(zip_ops->input, src, slen);
-
- /* Work around for a bug in zlib which needs an extra bytes sometimes */
- if (zip_ops->ccode != 3) /* Not LZS Encoding */
- zip_ops->input[slen++] = 0;
-
- zip_ops->input_len = slen;
- zip_ops->output_len = *dlen;
-
- ret = zip_inflate(zip_ops, zip_state, zip);
-
- if (!ret) {
- *dlen = zip_ops->output_len;
- memcpy(dst, zip_ops->output, *dlen);
- }
- kfree(zip_state);
- return ret;
-}
-
-/* SCOMP framework start */
-void *zip_alloc_scomp_ctx_deflate(void)
-{
- int ret;
- struct zip_kernel_ctx *zip_ctx;
-
- zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
- if (!zip_ctx)
- return ERR_PTR(-ENOMEM);
-
- ret = zip_ctx_init(zip_ctx, 0);
-
- if (ret) {
- kfree_sensitive(zip_ctx);
- return ERR_PTR(ret);
- }
-
- return zip_ctx;
-}
-
-void *zip_alloc_scomp_ctx_lzs(void)
-{
- int ret;
- struct zip_kernel_ctx *zip_ctx;
-
- zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
- if (!zip_ctx)
- return ERR_PTR(-ENOMEM);
-
- ret = zip_ctx_init(zip_ctx, 1);
-
- if (ret) {
- kfree_sensitive(zip_ctx);
- return ERR_PTR(ret);
- }
-
- return zip_ctx;
-}
-
-void zip_free_scomp_ctx(void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- zip_ctx_exit(zip_ctx);
- kfree_sensitive(zip_ctx);
-}
-
-int zip_scomp_compress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- return zip_compress(src, slen, dst, dlen, zip_ctx);
-}
-
-int zip_scomp_decompress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- return zip_decompress(src, slen, dst, dlen, zip_ctx);
-} /* SCOMP framework end */
diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h
deleted file mode 100644
index 10899ece2d1f..000000000000
--- a/drivers/crypto/cavium/zip/zip_crypto.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_CRYPTO_H__
-#define __ZIP_CRYPTO_H__
-
-#include <crypto/internal/scompress.h>
-#include "common.h"
-#include "zip_deflate.h"
-#include "zip_inflate.h"
-
-struct zip_kernel_ctx {
- struct zip_operation zip_comp;
- struct zip_operation zip_decomp;
-};
-
-void *zip_alloc_scomp_ctx_deflate(void);
-void *zip_alloc_scomp_ctx_lzs(void);
-void zip_free_scomp_ctx(void *zip_ctx);
-int zip_scomp_compress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx);
-int zip_scomp_decompress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c
deleted file mode 100644
index d7133f857d67..000000000000
--- a/drivers/crypto/cavium/zip/zip_deflate.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "common.h"
-#include "zip_deflate.h"
-
-/* Prepares the deflate zip command */
-static int prepare_zip_command(struct zip_operation *zip_ops,
- struct zip_state *s, union zip_inst_s *zip_cmd)
-{
- union zip_zres_s *result_ptr = &s->result;
-
- memset(zip_cmd, 0, sizeof(s->zip_cmd));
- memset(result_ptr, 0, sizeof(s->result));
-
- /* IWORD #0 */
- /* History gather */
- zip_cmd->s.hg = 0;
- /* compression enable = 1 for deflate */
- zip_cmd->s.ce = 1;
- /* sf (sync flush) */
- zip_cmd->s.sf = 1;
- /* ef (end of file) */
- if (zip_ops->flush == ZIP_FLUSH_FINISH) {
- zip_cmd->s.ef = 1;
- zip_cmd->s.sf = 0;
- }
-
- zip_cmd->s.cc = zip_ops->ccode;
- /* ss (compression speed/storage) */
- zip_cmd->s.ss = zip_ops->speed;
-
- /* IWORD #1 */
- /* adler checksum */
- zip_cmd->s.adlercrc32 = zip_ops->csum;
- zip_cmd->s.historylength = zip_ops->history_len;
- zip_cmd->s.dg = 0;
-
- /* IWORD # 6 and 7 - compression input/history pointer */
- zip_cmd->s.inp_ptr_addr.s.addr = __pa(zip_ops->input);
- zip_cmd->s.inp_ptr_ctl.s.length = (zip_ops->input_len +
- zip_ops->history_len);
- zip_cmd->s.ds = 0;
-
- /* IWORD # 8 and 9 - Output pointer */
- zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
- zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
- /* maximum number of output-stream bytes that can be written */
- zip_cmd->s.totaloutputlength = zip_ops->output_len;
-
- /* IWORD # 10 and 11 - Result pointer */
- zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
- /* Clearing completion code */
- result_ptr->s.compcode = 0;
-
- return 0;
-}
-
-/**
- * zip_deflate - API to offload deflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to zip device structure
- *
- * This function prepares the zip deflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev)
-{
- union zip_inst_s *zip_cmd = &s->zip_cmd;
- union zip_zres_s *result_ptr = &s->result;
- u32 queue;
-
- /* Prepares zip command based on the input parameters */
- prepare_zip_command(zip_ops, s, zip_cmd);
-
- atomic64_add(zip_ops->input_len, &zip_dev->stats.comp_in_bytes);
- /* Loads zip command into command queues and rings door bell */
- queue = zip_load_instr(zip_cmd, zip_dev);
-
- /* Stats update for compression requests submitted */
- atomic64_inc(&zip_dev->stats.comp_req_submit);
-
- /* Wait for completion or error */
- zip_poll_result(result_ptr);
-
- /* Stats update for compression requests completed */
- atomic64_inc(&zip_dev->stats.comp_req_complete);
-
- zip_ops->compcode = result_ptr->s.compcode;
- switch (zip_ops->compcode) {
- case ZIP_CMD_NOTDONE:
- zip_dbg("Zip instruction not yet completed");
- return ZIP_ERROR;
-
- case ZIP_CMD_SUCCESS:
- zip_dbg("Zip instruction completed successfully");
- zip_update_cmd_bufs(zip_dev, queue);
- break;
-
- case ZIP_CMD_DTRUNC:
- zip_dbg("Output Truncate error");
- /* Returning ZIP_ERROR to avoid copy to user */
- return ZIP_ERROR;
-
- default:
- zip_err("Zip instruction failed. Code:%d", zip_ops->compcode);
- return ZIP_ERROR;
- }
-
- /* Update the CRC depending on the format */
- switch (zip_ops->format) {
- case RAW_FORMAT:
- zip_dbg("RAW Format: %d ", zip_ops->format);
- /* Get checksum from engine, need to feed it again */
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case ZLIB_FORMAT:
- zip_dbg("ZLIB Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case GZIP_FORMAT:
- zip_dbg("GZIP Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.crc32;
- break;
-
- case LZS_FORMAT:
- zip_dbg("LZS Format: %d ", zip_ops->format);
- break;
-
- default:
- zip_err("Unknown Format:%d\n", zip_ops->format);
- }
-
- atomic64_add(result_ptr->s.totalbyteswritten,
- &zip_dev->stats.comp_out_bytes);
-
- /* Update output_len */
- if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
- /* Dynamic stop && strm->output_len < zipconstants[onfsize] */
- zip_err("output_len (%d) < total bytes written(%d)\n",
- zip_ops->output_len, result_ptr->s.totalbyteswritten);
- zip_ops->output_len = 0;
-
- } else {
- zip_ops->output_len = result_ptr->s.totalbyteswritten;
- }
-
- return 0;
-}
diff --git a/drivers/crypto/cavium/zip/zip_deflate.h b/drivers/crypto/cavium/zip/zip_deflate.h
deleted file mode 100644
index 1d32e76edc4d..000000000000
--- a/drivers/crypto/cavium/zip/zip_deflate.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_DEFLATE_H__
-#define __ZIP_DEFLATE_H__
-
-/**
- * zip_deflate - API to offload deflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to the structure representing zip device
- *
- * This function prepares the zip deflate command and submits it to the zip
- * engine by ringing the doorbell.
- *
- * Return: 0 if successful or error code
- */
-int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_device.c b/drivers/crypto/cavium/zip/zip_device.c
deleted file mode 100644
index f174ec29ed69..000000000000
--- a/drivers/crypto/cavium/zip/zip_device.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "common.h"
-#include "zip_deflate.h"
-
-/**
- * zip_cmd_queue_consumed - Calculates the space consumed in the command queue.
- *
- * @zip_dev: Pointer to zip device structure
- * @queue: Queue number
- *
- * Return: Bytes consumed in the command queue buffer.
- */
-static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue)
-{
- return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) *
- sizeof(u64 *));
-}
-
-/**
- * zip_load_instr - Submits the instruction into the ZIP command queue
- * @instr: Pointer to the instruction to be submitted
- * @zip_dev: Pointer to ZIP device structure to which the instruction is to
- * be submitted
- *
- * This function copies the ZIP instruction to the command queue and rings the
- * doorbell to notify the engine of the instruction submission. The command
- * queue is maintained in a circular fashion. When there is space for exactly
- * one instruction in the queue, next chunk pointer of the queue is made to
- * point to the head of the queue, thus maintaining a circular queue.
- *
- * Return: Queue number to which the instruction was submitted
- */
-u32 zip_load_instr(union zip_inst_s *instr,
- struct zip_device *zip_dev)
-{
- union zip_quex_doorbell dbell;
- u32 queue = 0;
- u32 consumed = 0;
- u64 *ncb_ptr = NULL;
- union zip_nptr_s ncp;
-
- /*
- * Distribute the instructions between the enabled queues based on
- * the CPU id.
- */
- if (raw_smp_processor_id() % 2 == 0)
- queue = 0;
- else
- queue = 1;
-
- zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue);
-
- /* Take cmd buffer lock */
- spin_lock(&zip_dev->iq[queue].lock);
-
- /*
- * Command Queue implementation
- * 1. If there is place for new instructions, push the cmd at sw_head.
- * 2. If there is place for exactly one instruction, push the new cmd
- * at the sw_head. Make sw_head point to the sw_tail to make it
- * circular. Write sw_head's physical address to the "Next-Chunk
- * Buffer Ptr" to make it cmd_hw_tail.
- * 3. Ring the door bell.
- */
- zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head);
- zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail);
-
- consumed = zip_cmd_queue_consumed(zip_dev, queue);
- /* Check if there is space to push just one cmd */
- if ((consumed + 128) == (ZIP_CMD_QBUF_SIZE - 8)) {
- zip_dbg("Cmd queue space available for single command");
- /* Space for one cmd, pust it and make it circular queue */
- memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
- sizeof(union zip_inst_s));
- zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
-
- /* Now, point the "Next-Chunk Buffer Ptr" to sw_head */
- ncb_ptr = zip_dev->iq[queue].sw_head;
-
- zip_dbg("ncb addr :0x%lx sw_head addr :0x%lx",
- ncb_ptr, zip_dev->iq[queue].sw_head - 16);
-
- /* Using Circular command queue */
- zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail;
- /* Mark this buffer for free */
- zip_dev->iq[queue].free_flag = 1;
-
- /* Write new chunk buffer address at "Next-Chunk Buffer Ptr" */
- ncp.u_reg64 = 0ull;
- ncp.s.addr = __pa(zip_dev->iq[queue].sw_head);
- *ncb_ptr = ncp.u_reg64;
- zip_dbg("*ncb_ptr :0x%lx sw_head[phys] :0x%lx",
- *ncb_ptr, __pa(zip_dev->iq[queue].sw_head));
-
- zip_dev->iq[queue].pend_cnt++;
-
- } else {
- zip_dbg("Enough space is available for commands");
- /* Push this cmd to cmd queue buffer */
- memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
- sizeof(union zip_inst_s));
- zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
-
- zip_dev->iq[queue].pend_cnt++;
- }
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
- zip_dev->iq[queue].hw_tail);
-
- zip_dbg(" Pushed the new cmd : pend_cnt : %d",
- zip_dev->iq[queue].pend_cnt);
-
- /* Ring the doorbell */
- dbell.u_reg64 = 0ull;
- dbell.s.dbell_cnt = 1;
- zip_reg_write(dbell.u_reg64,
- (zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue)));
-
- /* Unlock cmd buffer lock */
- spin_unlock(&zip_dev->iq[queue].lock);
-
- return queue;
-}
-
-/**
- * zip_update_cmd_bufs - Updates the queue statistics after posting the
- * instruction
- * @zip_dev: Pointer to zip device structure
- * @queue: Queue number
- */
-void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue)
-{
- /* Take cmd buffer lock */
- spin_lock(&zip_dev->iq[queue].lock);
-
- /* Check if the previous buffer can be freed */
- if (zip_dev->iq[queue].free_flag == 1) {
- zip_dbg("Free flag. Free cmd buffer, adjust sw head and tail");
- /* Reset the free flag */
- zip_dev->iq[queue].free_flag = 0;
-
- /* Point the hw_tail to start of the new chunk buffer */
- zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head;
- } else {
- zip_dbg("Free flag not set. increment hw tail");
- zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */
- }
-
- zip_dev->iq[queue].done_cnt++;
- zip_dev->iq[queue].pend_cnt--;
-
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
- zip_dev->iq[queue].hw_tail);
- zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt);
-
- spin_unlock(&zip_dev->iq[queue].lock);
-}
diff --git a/drivers/crypto/cavium/zip/zip_device.h b/drivers/crypto/cavium/zip/zip_device.h
deleted file mode 100644
index 9e18b3b93d38..000000000000
--- a/drivers/crypto/cavium/zip/zip_device.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_DEVICE_H__
-#define __ZIP_DEVICE_H__
-
-#include <linux/types.h>
-#include "zip_main.h"
-
-struct sg_info {
- /*
- * Pointer to the input data when scatter_gather == 0 and
- * pointer to the input gather list buffer when scatter_gather == 1
- */
- union zip_zptr_s *gather;
-
- /*
- * Pointer to the output data when scatter_gather == 0 and
- * pointer to the output scatter list buffer when scatter_gather == 1
- */
- union zip_zptr_s *scatter;
-
- /*
- * Holds size of the output buffer pointed by scatter list
- * when scatter_gather == 1
- */
- u64 scatter_buf_size;
-
- /* for gather data */
- u64 gather_enable;
-
- /* for scatter data */
- u64 scatter_enable;
-
- /* Number of gather list pointers for gather data */
- u32 gbuf_cnt;
-
- /* Number of scatter list pointers for scatter data */
- u32 sbuf_cnt;
-
- /* Buffers allocation state */
- u8 alloc_state;
-};
-
-/**
- * struct zip_state - Structure representing the required information related
- * to a command
- * @zip_cmd: Pointer to zip instruction structure
- * @result: Pointer to zip result structure
- * @ctx: Context pointer for inflate
- * @history: Decompression history pointer
- * @sginfo: Scatter-gather info structure
- */
-struct zip_state {
- union zip_inst_s zip_cmd;
- union zip_zres_s result;
- union zip_zptr_s *ctx;
- union zip_zptr_s *history;
- struct sg_info sginfo;
-};
-
-#define ZIP_CONTEXT_SIZE 2048
-#define ZIP_INFLATE_HISTORY_SIZE 32768
-#define ZIP_DEFLATE_HISTORY_SIZE 32768
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c
deleted file mode 100644
index 7e0d73e2f89e..000000000000
--- a/drivers/crypto/cavium/zip/zip_inflate.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "common.h"
-#include "zip_inflate.h"
-
-static int prepare_inflate_zcmd(struct zip_operation *zip_ops,
- struct zip_state *s, union zip_inst_s *zip_cmd)
-{
- union zip_zres_s *result_ptr = &s->result;
-
- memset(zip_cmd, 0, sizeof(s->zip_cmd));
- memset(result_ptr, 0, sizeof(s->result));
-
- /* IWORD#0 */
-
- /* Decompression History Gather list - no gather list */
- zip_cmd->s.hg = 0;
- /* For decompression, CE must be 0x0. */
- zip_cmd->s.ce = 0;
- /* For decompression, SS must be 0x0. */
- zip_cmd->s.ss = 0;
- /* For decompression, SF should always be set. */
- zip_cmd->s.sf = 1;
-
- /* Begin File */
- if (zip_ops->begin_file == 0)
- zip_cmd->s.bf = 0;
- else
- zip_cmd->s.bf = 1;
-
- zip_cmd->s.ef = 1;
- /* 0: for Deflate decompression, 3: for LZS decompression */
- zip_cmd->s.cc = zip_ops->ccode;
-
- /* IWORD #1*/
-
- /* adler checksum */
- zip_cmd->s.adlercrc32 = zip_ops->csum;
-
- /*
- * HISTORYLENGTH must be 0x0 for any ZIP decompress operation.
- * History data is added to a decompression operation via IWORD3.
- */
- zip_cmd->s.historylength = 0;
- zip_cmd->s.ds = 0;
-
- /* IWORD # 8 and 9 - Output pointer */
- zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
- zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
-
- /* Maximum number of output-stream bytes that can be written */
- zip_cmd->s.totaloutputlength = zip_ops->output_len;
-
- zip_dbg("Data Direct Input case ");
-
- /* IWORD # 6 and 7 - input pointer */
- zip_cmd->s.dg = 0;
- zip_cmd->s.inp_ptr_addr.s.addr = __pa((u8 *)zip_ops->input);
- zip_cmd->s.inp_ptr_ctl.s.length = zip_ops->input_len;
-
- /* IWORD # 10 and 11 - Result pointer */
- zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
-
- /* Clearing completion code */
- result_ptr->s.compcode = 0;
-
- /* Returning 0 for time being.*/
- return 0;
-}
-
-/**
- * zip_inflate - API to offload inflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to zip device structure
- *
- * This function prepares the zip inflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev)
-{
- union zip_inst_s *zip_cmd = &s->zip_cmd;
- union zip_zres_s *result_ptr = &s->result;
- u32 queue;
-
- /* Prepare inflate zip command */
- prepare_inflate_zcmd(zip_ops, s, zip_cmd);
-
- atomic64_add(zip_ops->input_len, &zip_dev->stats.decomp_in_bytes);
-
- /* Load inflate command to zip queue and ring the doorbell */
- queue = zip_load_instr(zip_cmd, zip_dev);
-
- /* Decompression requests submitted stats update */
- atomic64_inc(&zip_dev->stats.decomp_req_submit);
-
- /* Wait for completion or error */
- zip_poll_result(result_ptr);
-
- /* Decompression requests completed stats update */
- atomic64_inc(&zip_dev->stats.decomp_req_complete);
-
- zip_ops->compcode = result_ptr->s.compcode;
- switch (zip_ops->compcode) {
- case ZIP_CMD_NOTDONE:
- zip_dbg("Zip Instruction not yet completed\n");
- return ZIP_ERROR;
-
- case ZIP_CMD_SUCCESS:
- zip_dbg("Zip Instruction completed successfully\n");
- break;
-
- case ZIP_CMD_DYNAMIC_STOP:
- zip_dbg(" Dynamic stop Initiated\n");
- break;
-
- default:
- zip_dbg("Instruction failed. Code = %d\n", zip_ops->compcode);
- atomic64_inc(&zip_dev->stats.decomp_bad_reqs);
- zip_update_cmd_bufs(zip_dev, queue);
- return ZIP_ERROR;
- }
-
- zip_update_cmd_bufs(zip_dev, queue);
-
- if ((zip_ops->ccode == 3) && (zip_ops->flush == 4) &&
- (zip_ops->compcode != ZIP_CMD_DYNAMIC_STOP))
- result_ptr->s.ef = 1;
-
- zip_ops->csum = result_ptr->s.adler32;
-
- atomic64_add(result_ptr->s.totalbyteswritten,
- &zip_dev->stats.decomp_out_bytes);
-
- if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
- zip_err("output_len (%d) < total bytes written (%d)\n",
- zip_ops->output_len, result_ptr->s.totalbyteswritten);
- zip_ops->output_len = 0;
- } else {
- zip_ops->output_len = result_ptr->s.totalbyteswritten;
- }
-
- zip_ops->bytes_read = result_ptr->s.totalbytesread;
- zip_ops->bits_processed = result_ptr->s.totalbitsprocessed;
- zip_ops->end_file = result_ptr->s.ef;
- if (zip_ops->end_file) {
- switch (zip_ops->format) {
- case RAW_FORMAT:
- zip_dbg("RAW Format: %d ", zip_ops->format);
- /* Get checksum from engine */
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case ZLIB_FORMAT:
- zip_dbg("ZLIB Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case GZIP_FORMAT:
- zip_dbg("GZIP Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.crc32;
- break;
-
- case LZS_FORMAT:
- zip_dbg("LZS Format: %d ", zip_ops->format);
- break;
-
- default:
- zip_err("Format error:%d\n", zip_ops->format);
- }
- }
-
- return 0;
-}
diff --git a/drivers/crypto/cavium/zip/zip_inflate.h b/drivers/crypto/cavium/zip/zip_inflate.h
deleted file mode 100644
index 6b20f179978e..000000000000
--- a/drivers/crypto/cavium/zip/zip_inflate.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_INFLATE_H__
-#define __ZIP_INFLATE_H__
-
-/**
- * zip_inflate - API to offload inflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to the structure representing zip device
- *
- * This function prepares the zip inflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
deleted file mode 100644
index abd58de4343d..000000000000
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ /dev/null
@@ -1,603 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "common.h"
-#include "zip_crypto.h"
-
-#define DRV_NAME "ThunderX-ZIP"
-
-static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
-
-static const struct pci_device_id zip_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
- { 0, }
-};
-
-static void zip_debugfs_init(void);
-static void zip_debugfs_exit(void);
-static int zip_register_compression_device(void);
-static void zip_unregister_compression_device(void);
-
-void zip_reg_write(u64 val, u64 __iomem *addr)
-{
- writeq(val, addr);
-}
-
-u64 zip_reg_read(u64 __iomem *addr)
-{
- return readq(addr);
-}
-
-/*
- * Allocates new ZIP device structure
- * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
- */
-static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
-{
- struct zip_device *zip = NULL;
- int idx;
-
- for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
- if (!zip_dev[idx])
- break;
- }
-
- /* To ensure that the index is within the limit */
- if (idx < MAX_ZIP_DEVICES)
- zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
-
- if (!zip)
- return NULL;
-
- zip_dev[idx] = zip;
- zip->index = idx;
- return zip;
-}
-
-/**
- * zip_get_device - Get ZIP device based on node id of cpu
- *
- * @node: Node id of the current cpu
- * Return: Pointer to Zip device structure
- */
-struct zip_device *zip_get_device(int node)
-{
- if ((node < MAX_ZIP_DEVICES) && (node >= 0))
- return zip_dev[node];
-
- zip_err("ZIP device not found for node id %d\n", node);
- return NULL;
-}
-
-/**
- * zip_get_node_id - Get the node id of the current cpu
- *
- * Return: Node id of the current cpu
- */
-int zip_get_node_id(void)
-{
- return cpu_to_node(raw_smp_processor_id());
-}
-
-/* Initializes the ZIP h/w sub-system */
-static int zip_init_hw(struct zip_device *zip)
-{
- union zip_cmd_ctl cmd_ctl;
- union zip_constants constants;
- union zip_que_ena que_ena;
- union zip_quex_map que_map;
- union zip_que_pri que_pri;
-
- union zip_quex_sbuf_addr que_sbuf_addr;
- union zip_quex_sbuf_ctl que_sbuf_ctl;
-
- int q = 0;
-
- /* Enable the ZIP Engine(Core) Clock */
- cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
- cmd_ctl.s.forceclk = 1;
- zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
-
- zip_msg("ZIP_CMD_CTL : 0x%016llx",
- zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
-
- constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
- zip->depth = constants.s.depth;
- zip->onfsize = constants.s.onfsize;
- zip->ctxsize = constants.s.ctxsize;
-
- zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
- zip->depth, zip->onfsize, zip->ctxsize);
-
- /*
- * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
- * have the correct buffer pointer and size configured for each
- * instruction queue.
- */
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- que_sbuf_ctl.u_reg64 = 0ull;
- que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
- que_sbuf_ctl.s.inst_be = 0;
- que_sbuf_ctl.s.stream_id = 0;
- zip_reg_write(que_sbuf_ctl.u_reg64,
- (zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
-
- zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
- }
-
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
-
- spin_lock_init(&zip->iq[q].lock);
-
- if (zip_cmd_qbuf_alloc(zip, q)) {
- while (q != 0) {
- q--;
- zip_cmd_qbuf_free(zip, q);
- }
- return -ENOMEM;
- }
-
- /* Initialize tail ptr to head */
- zip->iq[q].sw_tail = zip->iq[q].sw_head;
- zip->iq[q].hw_tail = zip->iq[q].sw_head;
-
- /* Write the physical addr to register */
- que_sbuf_addr.u_reg64 = 0ull;
- que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
- ZIP_128B_ALIGN);
-
- zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
- (u64)que_sbuf_addr.s.ptr);
-
- zip_reg_write(que_sbuf_addr.u_reg64,
- (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
-
- zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
-
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip->iq[q].sw_head, zip->iq[q].sw_tail,
- zip->iq[q].hw_tail);
- zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
- }
-
- /*
- * Queue-to-ZIP core mapping
- * If a queue is not mapped to a particular core, it is equivalent to
- * the ZIP core being disabled.
- */
- que_ena.u_reg64 = 0x0ull;
- /* Enabling queues based on ZIP_NUM_QUEUES */
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- que_ena.s.ena |= (0x1 << q);
- zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
-
- zip_msg("QUE_ENA : 0x%016llx",
- zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
-
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- que_map.u_reg64 = 0ull;
- /* Mapping each queue to two ZIP cores */
- que_map.s.zce = 0x3;
- zip_reg_write(que_map.u_reg64,
- (zip->reg_base + ZIP_QUEX_MAP(q)));
-
- zip_msg("QUE_MAP(%d) : 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
- }
-
- que_pri.u_reg64 = 0ull;
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
- zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
-
- zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
-
- return 0;
-}
-
-static void zip_reset(struct zip_device *zip)
-{
- union zip_cmd_ctl cmd_ctl;
-
- cmd_ctl.u_reg64 = 0x0ull;
- cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
- zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
-}
-
-static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct device *dev = &pdev->dev;
- struct zip_device *zip = NULL;
- int err;
-
- zip = zip_alloc_device(pdev);
- if (!zip)
- return -ENOMEM;
-
- dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
- pdev->vendor, pdev->device, dev_to_node(dev));
-
- pci_set_drvdata(pdev, zip);
- zip->pdev = pdev;
-
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(dev, "Failed to enable PCI device");
- goto err_free_device;
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err) {
- dev_err(dev, "PCI request regions failed 0x%x", err);
- goto err_disable_device;
- }
-
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
- if (err) {
- dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
- goto err_release_regions;
- }
-
- /* MAP configuration registers */
- zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
- if (!zip->reg_base) {
- dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
- err = -ENOMEM;
- goto err_release_regions;
- }
-
- /* Initialize ZIP Hardware */
- err = zip_init_hw(zip);
- if (err)
- goto err_release_regions;
-
- /* Register with the Kernel Crypto Interface */
- err = zip_register_compression_device();
- if (err < 0) {
- zip_err("ZIP: Kernel Crypto Registration failed\n");
- goto err_register;
- }
-
- /* comp-decomp statistics are handled with debugfs interface */
- zip_debugfs_init();
-
- return 0;
-
-err_register:
- zip_reset(zip);
-
-err_release_regions:
- if (zip->reg_base)
- iounmap(zip->reg_base);
- pci_release_regions(pdev);
-
-err_disable_device:
- pci_disable_device(pdev);
-
-err_free_device:
- pci_set_drvdata(pdev, NULL);
-
- /* Remove zip_dev from zip_device list, free the zip_device memory */
- zip_dev[zip->index] = NULL;
- devm_kfree(dev, zip);
-
- return err;
-}
-
-static void zip_remove(struct pci_dev *pdev)
-{
- struct zip_device *zip = pci_get_drvdata(pdev);
- int q = 0;
-
- if (!zip)
- return;
-
- zip_debugfs_exit();
-
- zip_unregister_compression_device();
-
- if (zip->reg_base) {
- zip_reset(zip);
- iounmap(zip->reg_base);
- }
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
- /*
- * Free Command Queue buffers. This free should be called for all
- * the enabled Queues.
- */
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- zip_cmd_qbuf_free(zip, q);
-
- pci_set_drvdata(pdev, NULL);
- /* remove zip device from zip device list */
- zip_dev[zip->index] = NULL;
-}
-
-/* PCI Sub-System Interface */
-static struct pci_driver zip_driver = {
- .name = DRV_NAME,
- .id_table = zip_id_table,
- .probe = zip_probe,
- .remove = zip_remove,
-};
-
-/* Kernel Crypto Subsystem Interface */
-
-static struct scomp_alg zip_scomp_deflate = {
- .alloc_ctx = zip_alloc_scomp_ctx_deflate,
- .free_ctx = zip_free_scomp_ctx,
- .compress = zip_scomp_compress,
- .decompress = zip_scomp_decompress,
- .base = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-scomp-cavium",
- .cra_module = THIS_MODULE,
- .cra_priority = 300,
- }
-};
-
-static struct scomp_alg zip_scomp_lzs = {
- .alloc_ctx = zip_alloc_scomp_ctx_lzs,
- .free_ctx = zip_free_scomp_ctx,
- .compress = zip_scomp_compress,
- .decompress = zip_scomp_decompress,
- .base = {
- .cra_name = "lzs",
- .cra_driver_name = "lzs-scomp-cavium",
- .cra_module = THIS_MODULE,
- .cra_priority = 300,
- }
-};
-
-static int zip_register_compression_device(void)
-{
- int ret;
-
- ret = crypto_register_scomp(&zip_scomp_deflate);
- if (ret < 0) {
- zip_err("Deflate scomp algorithm registration failed\n");
- return ret;
- }
-
- ret = crypto_register_scomp(&zip_scomp_lzs);
- if (ret < 0) {
- zip_err("LZS scomp algorithm registration failed\n");
- goto err_unregister_scomp_deflate;
- }
-
- return ret;
-
-err_unregister_scomp_deflate:
- crypto_unregister_scomp(&zip_scomp_deflate);
-
- return ret;
-}
-
-static void zip_unregister_compression_device(void)
-{
- crypto_unregister_scomp(&zip_scomp_deflate);
- crypto_unregister_scomp(&zip_scomp_lzs);
-}
-
-/*
- * debugfs functions
- */
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-
-/* Displays ZIP device statistics */
-static int zip_stats_show(struct seq_file *s, void *unused)
-{
- u64 val = 0ull;
- u64 avg_chunk = 0ull, avg_cr = 0ull;
- u32 q = 0;
-
- int index = 0;
- struct zip_device *zip;
- struct zip_stats *st;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- u64 pending = 0;
-
- if (zip_dev[index]) {
- zip = zip_dev[index];
- st = &zip->stats;
-
- /* Get all the pending requests */
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- val = zip_reg_read((zip->reg_base +
- ZIP_DBG_QUEX_STA(q)));
- pending += val >> 32 & 0xffffff;
- }
-
- val = atomic64_read(&st->comp_req_complete);
- avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
-
- val = atomic64_read(&st->comp_out_bytes);
- avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
- seq_printf(s, " ZIP Device %d Stats\n"
- "-----------------------------------\n"
- "Comp Req Submitted : \t%lld\n"
- "Comp Req Completed : \t%lld\n"
- "Compress In Bytes : \t%lld\n"
- "Compressed Out Bytes : \t%lld\n"
- "Average Chunk size : \t%llu\n"
- "Average Compression ratio : \t%llu\n"
- "Decomp Req Submitted : \t%lld\n"
- "Decomp Req Completed : \t%lld\n"
- "Decompress In Bytes : \t%lld\n"
- "Decompressed Out Bytes : \t%lld\n"
- "Decompress Bad requests : \t%lld\n"
- "Pending Req : \t%lld\n"
- "---------------------------------\n",
- index,
- (u64)atomic64_read(&st->comp_req_submit),
- (u64)atomic64_read(&st->comp_req_complete),
- (u64)atomic64_read(&st->comp_in_bytes),
- (u64)atomic64_read(&st->comp_out_bytes),
- avg_chunk,
- avg_cr,
- (u64)atomic64_read(&st->decomp_req_submit),
- (u64)atomic64_read(&st->decomp_req_complete),
- (u64)atomic64_read(&st->decomp_in_bytes),
- (u64)atomic64_read(&st->decomp_out_bytes),
- (u64)atomic64_read(&st->decomp_bad_reqs),
- pending);
- }
- }
- return 0;
-}
-
-/* Clears stats data */
-static int zip_clear_show(struct seq_file *s, void *unused)
-{
- int index = 0;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- if (zip_dev[index]) {
- memset(&zip_dev[index]->stats, 0,
- sizeof(struct zip_stats));
- seq_printf(s, "Cleared stats for zip %d\n", index);
- }
- }
-
- return 0;
-}
-
-static struct zip_registers zipregs[64] = {
- {"ZIP_CMD_CTL ", 0x0000ull},
- {"ZIP_THROTTLE ", 0x0010ull},
- {"ZIP_CONSTANTS ", 0x00A0ull},
- {"ZIP_QUE0_MAP ", 0x1400ull},
- {"ZIP_QUE1_MAP ", 0x1408ull},
- {"ZIP_QUE_ENA ", 0x0500ull},
- {"ZIP_QUE_PRI ", 0x0508ull},
- {"ZIP_QUE0_DONE ", 0x2000ull},
- {"ZIP_QUE1_DONE ", 0x2008ull},
- {"ZIP_QUE0_DOORBELL ", 0x4000ull},
- {"ZIP_QUE1_DOORBELL ", 0x4008ull},
- {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull},
- {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull},
- {"ZIP_QUE0_SBUF_CTL ", 0x1200ull},
- {"ZIP_QUE1_SBUF_CTL ", 0x1208ull},
- { NULL, 0}
-};
-
-/* Prints registers' contents */
-static int zip_regs_show(struct seq_file *s, void *unused)
-{
- u64 val = 0;
- int i = 0, index = 0;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- if (zip_dev[index]) {
- seq_printf(s, "--------------------------------\n"
- " ZIP Device %d Registers\n"
- "--------------------------------\n",
- index);
-
- i = 0;
-
- while (zipregs[i].reg_name) {
- val = zip_reg_read((zip_dev[index]->reg_base +
- zipregs[i].reg_offset));
- seq_printf(s, "%s: 0x%016llx\n",
- zipregs[i].reg_name, val);
- i++;
- }
- }
- }
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(zip_stats);
-DEFINE_SHOW_ATTRIBUTE(zip_clear);
-DEFINE_SHOW_ATTRIBUTE(zip_regs);
-
-/* Root directory for thunderx_zip debugfs entry */
-static struct dentry *zip_debugfs_root;
-
-static void zip_debugfs_init(void)
-{
- if (!debugfs_initialized())
- return;
-
- zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
-
- /* Creating files for entries inside thunderx_zip directory */
- debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL,
- &zip_stats_fops);
-
- debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL,
- &zip_clear_fops);
-
- debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL,
- &zip_regs_fops);
-
-}
-
-static void zip_debugfs_exit(void)
-{
- debugfs_remove_recursive(zip_debugfs_root);
-}
-
-#else
-static void __init zip_debugfs_init(void) { }
-static void __exit zip_debugfs_exit(void) { }
-#endif
-/* debugfs - end */
-
-module_pci_driver(zip_driver);
-
-MODULE_AUTHOR("Cavium Inc");
-MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(pci, zip_id_table);
diff --git a/drivers/crypto/cavium/zip/zip_main.h b/drivers/crypto/cavium/zip/zip_main.h
deleted file mode 100644
index e1e4fa92ce80..000000000000
--- a/drivers/crypto/cavium/zip/zip_main.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_MAIN_H__
-#define __ZIP_MAIN_H__
-
-#include "zip_device.h"
-#include "zip_regs.h"
-
-/* PCI device IDs */
-#define PCI_DEVICE_ID_THUNDERX_ZIP 0xA01A
-
-/* ZIP device BARs */
-#define PCI_CFG_ZIP_PF_BAR0 0 /* Base addr for normal regs */
-
-/* Maximum available zip queues */
-#define ZIP_MAX_NUM_QUEUES 8
-
-#define ZIP_128B_ALIGN 7
-
-/* Command queue buffer size */
-#define ZIP_CMD_QBUF_SIZE (8064 + 8)
-
-struct zip_registers {
- char *reg_name;
- u64 reg_offset;
-};
-
-/* ZIP Compression - Decompression stats */
-struct zip_stats {
- atomic64_t comp_req_submit;
- atomic64_t comp_req_complete;
- atomic64_t decomp_req_submit;
- atomic64_t decomp_req_complete;
- atomic64_t comp_in_bytes;
- atomic64_t comp_out_bytes;
- atomic64_t decomp_in_bytes;
- atomic64_t decomp_out_bytes;
- atomic64_t decomp_bad_reqs;
-};
-
-/* ZIP Instruction Queue */
-struct zip_iq {
- u64 *sw_head;
- u64 *sw_tail;
- u64 *hw_tail;
- u64 done_cnt;
- u64 pend_cnt;
- u64 free_flag;
-
- /* ZIP IQ lock */
- spinlock_t lock;
-};
-
-/* ZIP Device */
-struct zip_device {
- u32 index;
- void __iomem *reg_base;
- struct pci_dev *pdev;
-
- /* Different ZIP Constants */
- u64 depth;
- u64 onfsize;
- u64 ctxsize;
-
- struct zip_iq iq[ZIP_MAX_NUM_QUEUES];
- struct zip_stats stats;
-};
-
-/* Prototypes */
-struct zip_device *zip_get_device(int node_id);
-int zip_get_node_id(void);
-void zip_reg_write(u64 val, u64 __iomem *addr);
-u64 zip_reg_read(u64 __iomem *addr);
-void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue);
-u32 zip_load_instr(union zip_inst_s *instr, struct zip_device *zip_dev);
-
-#endif /* ZIP_MAIN_H */
diff --git a/drivers/crypto/cavium/zip/zip_mem.c b/drivers/crypto/cavium/zip/zip_mem.c
deleted file mode 100644
index b3e0843a9169..000000000000
--- a/drivers/crypto/cavium/zip/zip_mem.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-
-#include "common.h"
-
-/**
- * zip_cmd_qbuf_alloc - Allocates a cmd buffer for ZIP Instruction Queue
- * @zip: Pointer to zip device structure
- * @q: Queue number to allocate bufffer to
- * Return: 0 if successful, -ENOMEM otherwise
- */
-int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
-{
- zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
- get_order(ZIP_CMD_QBUF_SIZE));
-
- if (!zip->iq[q].sw_head)
- return -ENOMEM;
-
- memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE);
-
- zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head);
- return 0;
-}
-
-/**
- * zip_cmd_qbuf_free - Frees the cmd Queue buffer
- * @zip: Pointer to zip device structure
- * @q: Queue number to free buffer of
- */
-void zip_cmd_qbuf_free(struct zip_device *zip, int q)
-{
- zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail);
-
- free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE));
-}
-
-/**
- * zip_data_buf_alloc - Allocates memory for a data bufffer
- * @size: Size of the buffer to allocate
- * Returns: Pointer to the buffer allocated
- */
-u8 *zip_data_buf_alloc(u64 size)
-{
- u8 *ptr;
-
- ptr = (u8 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
- get_order(size));
-
- if (!ptr)
- return NULL;
-
- memset(ptr, 0, size);
-
- zip_dbg("Data buffer allocation success\n");
- return ptr;
-}
-
-/**
- * zip_data_buf_free - Frees the memory of a data buffer
- * @ptr: Pointer to the buffer
- * @size: Buffer size
- */
-void zip_data_buf_free(u8 *ptr, u64 size)
-{
- zip_dbg("Freeing data buffer 0x%lx\n", ptr);
-
- free_pages((u64)ptr, get_order(size));
-}
diff --git a/drivers/crypto/cavium/zip/zip_mem.h b/drivers/crypto/cavium/zip/zip_mem.h
deleted file mode 100644
index f8f2f08c4a5c..000000000000
--- a/drivers/crypto/cavium/zip/zip_mem.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_MEM_H__
-#define __ZIP_MEM_H__
-
-/**
- * zip_cmd_qbuf_free - Frees the cmd Queue buffer
- * @zip: Pointer to zip device structure
- * @q: Queue nmber to free buffer of
- */
-void zip_cmd_qbuf_free(struct zip_device *zip, int q);
-
-/**
- * zip_cmd_qbuf_alloc - Allocates a Chunk/cmd buffer for ZIP Inst(cmd) Queue
- * @zip: Pointer to zip device structure
- * @q: Queue number to allocate bufffer to
- * Return: 0 if successful, 1 otherwise
- */
-int zip_cmd_qbuf_alloc(struct zip_device *zip, int q);
-
-/**
- * zip_data_buf_alloc - Allocates memory for a data bufffer
- * @size: Size of the buffer to allocate
- * Returns: Pointer to the buffer allocated
- */
-u8 *zip_data_buf_alloc(u64 size);
-
-/**
- * zip_data_buf_free - Frees the memory of a data buffer
- * @ptr: Pointer to the buffer
- * @size: Buffer size
- */
-void zip_data_buf_free(u8 *ptr, u64 size);
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_regs.h b/drivers/crypto/cavium/zip/zip_regs.h
deleted file mode 100644
index 874e0236c87e..000000000000
--- a/drivers/crypto/cavium/zip/zip_regs.h
+++ /dev/null
@@ -1,1347 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_REGS_H__
-#define __ZIP_REGS_H__
-
-/*
- * Configuration and status register (CSR) address and type definitions for
- * Cavium ZIP.
- */
-
-#include <linux/kern_levels.h>
-
-/* ZIP invocation result completion status codes */
-#define ZIP_CMD_NOTDONE 0x0
-
-/* Successful completion. */
-#define ZIP_CMD_SUCCESS 0x1
-
-/* Output truncated */
-#define ZIP_CMD_DTRUNC 0x2
-
-/* Dynamic Stop */
-#define ZIP_CMD_DYNAMIC_STOP 0x3
-
-/* Uncompress ran out of input data when IWORD0[EF] was set */
-#define ZIP_CMD_ITRUNC 0x4
-
-/* Uncompress found the reserved block type 3 */
-#define ZIP_CMD_RBLOCK 0x5
-
-/*
- * Uncompress found LEN != ZIP_CMD_NLEN in an uncompressed block in the input.
- */
-#define ZIP_CMD_NLEN 0x6
-
-/* Uncompress found a bad code in the main Huffman codes. */
-#define ZIP_CMD_BADCODE 0x7
-
-/* Uncompress found a bad code in the 19 Huffman codes encoding lengths. */
-#define ZIP_CMD_BADCODE2 0x8
-
-/* Compress found a zero-length input. */
-#define ZIP_CMD_ZERO_LEN 0x9
-
-/* The compress or decompress encountered an internal parity error. */
-#define ZIP_CMD_PARITY 0xA
-
-/*
- * Uncompress found a string identifier that precedes the uncompressed data and
- * decompression history.
- */
-#define ZIP_CMD_FATAL 0xB
-
-/**
- * enum zip_int_vec_e - ZIP MSI-X Vector Enumeration, enumerates the MSI-X
- * interrupt vectors.
- */
-enum zip_int_vec_e {
- ZIP_INT_VEC_E_ECCE = 0x10,
- ZIP_INT_VEC_E_FIFE = 0x11,
- ZIP_INT_VEC_E_QUE0_DONE = 0x0,
- ZIP_INT_VEC_E_QUE0_ERR = 0x8,
- ZIP_INT_VEC_E_QUE1_DONE = 0x1,
- ZIP_INT_VEC_E_QUE1_ERR = 0x9,
- ZIP_INT_VEC_E_QUE2_DONE = 0x2,
- ZIP_INT_VEC_E_QUE2_ERR = 0xa,
- ZIP_INT_VEC_E_QUE3_DONE = 0x3,
- ZIP_INT_VEC_E_QUE3_ERR = 0xb,
- ZIP_INT_VEC_E_QUE4_DONE = 0x4,
- ZIP_INT_VEC_E_QUE4_ERR = 0xc,
- ZIP_INT_VEC_E_QUE5_DONE = 0x5,
- ZIP_INT_VEC_E_QUE5_ERR = 0xd,
- ZIP_INT_VEC_E_QUE6_DONE = 0x6,
- ZIP_INT_VEC_E_QUE6_ERR = 0xe,
- ZIP_INT_VEC_E_QUE7_DONE = 0x7,
- ZIP_INT_VEC_E_QUE7_ERR = 0xf,
- ZIP_INT_VEC_E_ENUM_LAST = 0x12,
-};
-
-/**
- * union zip_zptr_addr_s - ZIP Generic Pointer Structure for ADDR.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_addr_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-
-};
-
-/**
- * union zip_zptr_ctl_s - ZIP Generic Pointer Structure for CTL.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_ctl_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_112_127 : 16;
- u64 length : 16;
- u64 reserved_67_95 : 29;
- u64 fw : 1;
- u64 nc : 1;
- u64 data_be : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data_be : 1;
- u64 nc : 1;
- u64 fw : 1;
- u64 reserved_67_95 : 29;
- u64 length : 16;
- u64 reserved_112_127 : 16;
-#endif
- } s;
-};
-
-/**
- * union zip_inst_s - ZIP Instruction Structure.
- * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15 within
- * the structure).
- */
-union zip_inst_s {
- u64 u_reg64[16];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 doneint : 1;
- u64 reserved_56_62 : 7;
- u64 totaloutputlength : 24;
- u64 reserved_27_31 : 5;
- u64 exn : 3;
- u64 reserved_23_23 : 1;
- u64 exbits : 7;
- u64 reserved_12_15 : 4;
- u64 sf : 1;
- u64 ss : 2;
- u64 cc : 2;
- u64 ef : 1;
- u64 bf : 1;
- u64 ce : 1;
- u64 reserved_3_3 : 1;
- u64 ds : 1;
- u64 dg : 1;
- u64 hg : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 hg : 1;
- u64 dg : 1;
- u64 ds : 1;
- u64 reserved_3_3 : 1;
- u64 ce : 1;
- u64 bf : 1;
- u64 ef : 1;
- u64 cc : 2;
- u64 ss : 2;
- u64 sf : 1;
- u64 reserved_12_15 : 4;
- u64 exbits : 7;
- u64 reserved_23_23 : 1;
- u64 exn : 3;
- u64 reserved_27_31 : 5;
- u64 totaloutputlength : 24;
- u64 reserved_56_62 : 7;
- u64 doneint : 1;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 historylength : 16;
- u64 reserved_96_111 : 16;
- u64 adlercrc32 : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 adlercrc32 : 32;
- u64 reserved_96_111 : 16;
- u64 historylength : 16;
-#endif
- union zip_zptr_addr_s ctx_ptr_addr;
- union zip_zptr_ctl_s ctx_ptr_ctl;
- union zip_zptr_addr_s his_ptr_addr;
- union zip_zptr_ctl_s his_ptr_ctl;
- union zip_zptr_addr_s inp_ptr_addr;
- union zip_zptr_ctl_s inp_ptr_ctl;
- union zip_zptr_addr_s out_ptr_addr;
- union zip_zptr_ctl_s out_ptr_ctl;
- union zip_zptr_addr_s res_ptr_addr;
- union zip_zptr_ctl_s res_ptr_ctl;
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_817_831 : 15;
- u64 wq_ptr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 wq_ptr : 49;
- u64 reserved_817_831 : 15;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_882_895 : 14;
- u64 tt : 2;
- u64 reserved_874_879 : 6;
- u64 grp : 10;
- u64 tag : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 tag : 32;
- u64 grp : 10;
- u64 reserved_874_879 : 6;
- u64 tt : 2;
- u64 reserved_882_895 : 14;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_896_959 : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_896_959 : 64;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_960_1023 : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_960_1023 : 64;
-#endif
- } s;
-};
-
-/**
- * union zip_nptr_s - ZIP Instruction Next-Chunk-Buffer Pointer (NPTR)
- * Structure
- *
- * ZIP_NPTR structure is used to chain all the zip instruction buffers
- * together. ZIP instruction buffers are managed (allocated and released) by
- * the software.
- */
-union zip_nptr_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-/**
- * union zip_zptr_s - ZIP Generic Pointer Structure.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_s {
- u64 u_reg64[2];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_112_127 : 16;
- u64 length : 16;
- u64 reserved_67_95 : 29;
- u64 fw : 1;
- u64 nc : 1;
- u64 data_be : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data_be : 1;
- u64 nc : 1;
- u64 fw : 1;
- u64 reserved_67_95 : 29;
- u64 length : 16;
- u64 reserved_112_127 : 16;
-#endif
- } s;
-};
-
-/**
- * union zip_zres_s - ZIP Result Structure
- *
- * The ZIP coprocessor writes the result structure after it completes the
- * invocation. The result structure is exactly 24 bytes, and each invocation of
- * the ZIP coprocessor produces exactly one result structure.
- */
-union zip_zres_s {
- u64 u_reg64[3];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 crc32 : 32;
- u64 adler32 : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 adler32 : 32;
- u64 crc32 : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 totalbyteswritten : 32;
- u64 totalbytesread : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 totalbytesread : 32;
- u64 totalbyteswritten : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 totalbitsprocessed : 32;
- u64 doneint : 1;
- u64 reserved_155_158 : 4;
- u64 exn : 3;
- u64 reserved_151_151 : 1;
- u64 exbits : 7;
- u64 reserved_137_143 : 7;
- u64 ef : 1;
-
- volatile u64 compcode : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-
- volatile u64 compcode : 8;
- u64 ef : 1;
- u64 reserved_137_143 : 7;
- u64 exbits : 7;
- u64 reserved_151_151 : 1;
- u64 exn : 3;
- u64 reserved_155_158 : 4;
- u64 doneint : 1;
- u64 totalbitsprocessed : 32;
-#endif
- } s;
-};
-
-/**
- * union zip_cmd_ctl - Structure representing the register that controls
- * clock and reset.
- */
-union zip_cmd_ctl {
- u64 u_reg64;
- struct zip_cmd_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_2_63 : 62;
- u64 forceclk : 1;
- u64 reset : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reset : 1;
- u64 forceclk : 1;
- u64 reserved_2_63 : 62;
-#endif
- } s;
-};
-
-#define ZIP_CMD_CTL 0x0ull
-
-/**
- * union zip_constants - Data structure representing the register that contains
- * all of the current implementation-related parameters of the zip core in this
- * chip.
- */
-union zip_constants {
- u64 u_reg64;
- struct zip_constants_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 nexec : 8;
- u64 reserved_49_55 : 7;
- u64 syncflush_capable : 1;
- u64 depth : 16;
- u64 onfsize : 12;
- u64 ctxsize : 12;
- u64 reserved_1_7 : 7;
- u64 disabled : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 disabled : 1;
- u64 reserved_1_7 : 7;
- u64 ctxsize : 12;
- u64 onfsize : 12;
- u64 depth : 16;
- u64 syncflush_capable : 1;
- u64 reserved_49_55 : 7;
- u64 nexec : 8;
-#endif
- } s;
-};
-
-#define ZIP_CONSTANTS 0x00A0ull
-
-/**
- * union zip_corex_bist_status - Represents registers which have the BIST
- * status of memories in zip cores.
- *
- * Each bit is the BIST result of an individual memory
- * (per bit, 0 = pass and 1 = fail).
- */
-union zip_corex_bist_status {
- u64 u_reg64;
- struct zip_corex_bist_status_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_53_63 : 11;
- u64 bstatus : 53;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 bstatus : 53;
- u64 reserved_53_63 : 11;
-#endif
- } s;
-};
-
-static inline u64 ZIP_COREX_BIST_STATUS(u64 param1)
-{
- if (param1 <= 1)
- return 0x0520ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_COREX_BIST_STATUS: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_ctl_bist_status - Represents register that has the BIST status of
- * memories in ZIP_CTL (instruction buffer, G/S pointer FIFO, input data
- * buffer, output data buffers).
- *
- * Each bit is the BIST result of an individual memory
- * (per bit, 0 = pass and 1 = fail).
- */
-union zip_ctl_bist_status {
- u64 u_reg64;
- struct zip_ctl_bist_status_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_9_63 : 55;
- u64 bstatus : 9;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 bstatus : 9;
- u64 reserved_9_63 : 55;
-#endif
- } s;
-};
-
-#define ZIP_CTL_BIST_STATUS 0x0510ull
-
-/**
- * union zip_ctl_cfg - Represents the register that controls the behavior of
- * the ZIP DMA engines.
- *
- * It is recommended to keep default values for normal operation. Changing the
- * values of the fields may be useful for diagnostics.
- */
-union zip_ctl_cfg {
- u64 u_reg64;
- struct zip_ctl_cfg_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_52_63 : 12;
- u64 ildf : 4;
- u64 reserved_36_47 : 12;
- u64 drtf : 4;
- u64 reserved_27_31 : 5;
- u64 stcf : 3;
- u64 reserved_19_23 : 5;
- u64 ldf : 3;
- u64 reserved_2_15 : 14;
- u64 busy : 1;
- u64 reserved_0_0 : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_0_0 : 1;
- u64 busy : 1;
- u64 reserved_2_15 : 14;
- u64 ldf : 3;
- u64 reserved_19_23 : 5;
- u64 stcf : 3;
- u64 reserved_27_31 : 5;
- u64 drtf : 4;
- u64 reserved_36_47 : 12;
- u64 ildf : 4;
- u64 reserved_52_63 : 12;
-#endif
- } s;
-};
-
-#define ZIP_CTL_CFG 0x0560ull
-
-/**
- * union zip_dbg_corex_inst - Represents the registers that reflect the status
- * of the current instruction that the ZIP core is executing or has executed.
- *
- * These registers are only for debug use.
- */
-union zip_dbg_corex_inst {
- u64 u_reg64;
- struct zip_dbg_corex_inst_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_35_62 : 28;
- u64 qid : 3;
- u64 iid : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 iid : 32;
- u64 qid : 3;
- u64 reserved_35_62 : 28;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_COREX_INST(u64 param1)
-{
- if (param1 <= 1)
- return 0x0640ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_DBG_COREX_INST: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_dbg_corex_sta - Represents registers that reflect the status of
- * the zip cores.
- *
- * They are for debug use only.
- */
-union zip_dbg_corex_sta {
- u64 u_reg64;
- struct zip_dbg_corex_sta_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_37_62 : 26;
- u64 ist : 5;
- u64 nie : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 nie : 32;
- u64 ist : 5;
- u64 reserved_37_62 : 26;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_COREX_STA(u64 param1)
-{
- if (param1 <= 1)
- return 0x0680ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_DBG_COREX_STA: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_dbg_quex_sta - Represets registers that reflect status of the zip
- * instruction queues.
- *
- * They are for debug use only.
- */
-union zip_dbg_quex_sta {
- u64 u_reg64;
- struct zip_dbg_quex_sta_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_56_62 : 7;
- u64 rqwc : 24;
- u64 nii : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 nii : 32;
- u64 rqwc : 24;
- u64 reserved_56_62 : 7;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_QUEX_STA(u64 param1)
-{
- if (param1 <= 7)
- return 0x1800ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_DBG_QUEX_STA: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_ecc_ctl - Represents the register that enables ECC for each
- * individual internal memory that requires ECC.
- *
- * For debug purpose, it can also flip one or two bits in the ECC data.
- */
-union zip_ecc_ctl {
- u64 u_reg64;
- struct zip_ecc_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_19_63 : 45;
- u64 vmem_cdis : 1;
- u64 vmem_fs : 2;
- u64 reserved_15_15 : 1;
- u64 idf1_cdis : 1;
- u64 idf1_fs : 2;
- u64 reserved_11_11 : 1;
- u64 idf0_cdis : 1;
- u64 idf0_fs : 2;
- u64 reserved_7_7 : 1;
- u64 gspf_cdis : 1;
- u64 gspf_fs : 2;
- u64 reserved_3_3 : 1;
- u64 iqf_cdis : 1;
- u64 iqf_fs : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 iqf_fs : 2;
- u64 iqf_cdis : 1;
- u64 reserved_3_3 : 1;
- u64 gspf_fs : 2;
- u64 gspf_cdis : 1;
- u64 reserved_7_7 : 1;
- u64 idf0_fs : 2;
- u64 idf0_cdis : 1;
- u64 reserved_11_11 : 1;
- u64 idf1_fs : 2;
- u64 idf1_cdis : 1;
- u64 reserved_15_15 : 1;
- u64 vmem_fs : 2;
- u64 vmem_cdis : 1;
- u64 reserved_19_63 : 45;
-#endif
- } s;
-};
-
-#define ZIP_ECC_CTL 0x0568ull
-
-/* NCB - zip_ecce_ena_w1c */
-union zip_ecce_ena_w1c {
- u64 u_reg64;
- struct zip_ecce_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_ENA_W1C 0x0598ull
-
-/* NCB - zip_ecce_ena_w1s */
-union zip_ecce_ena_w1s {
- u64 u_reg64;
- struct zip_ecce_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_ENA_W1S 0x0590ull
-
-/**
- * union zip_ecce_int - Represents the register that contains the status of the
- * ECC interrupt sources.
- */
-union zip_ecce_int {
- u64 u_reg64;
- struct zip_ecce_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_INT 0x0580ull
-
-/* NCB - zip_ecce_int_w1s */
-union zip_ecce_int_w1s {
- u64 u_reg64;
- struct zip_ecce_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_INT_W1S 0x0588ull
-
-/* NCB - zip_fife_ena_w1c */
-union zip_fife_ena_w1c {
- u64 u_reg64;
- struct zip_fife_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_ENA_W1C 0x0090ull
-
-/* NCB - zip_fife_ena_w1s */
-union zip_fife_ena_w1s {
- u64 u_reg64;
- struct zip_fife_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_ENA_W1S 0x0088ull
-
-/* NCB - zip_fife_int */
-union zip_fife_int {
- u64 u_reg64;
- struct zip_fife_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_INT 0x0078ull
-
-/* NCB - zip_fife_int_w1s */
-union zip_fife_int_w1s {
- u64 u_reg64;
- struct zip_fife_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_INT_W1S 0x0080ull
-
-/**
- * union zip_msix_pbax - Represents the register that is the MSI-X PBA table
- *
- * The bit number is indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_pbax {
- u64 u_reg64;
- struct zip_msix_pbax_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 pend : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 pend : 64;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_PBAX(u64 param1)
-{
- if (param1 == 0)
- return 0x0000838000FF0000ull;
- pr_err("ZIP_MSIX_PBAX: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_msix_vecx_addr - Represents the register that is the MSI-X vector
- * table, indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_vecx_addr {
- u64 u_reg64;
- struct zip_msix_vecx_addr_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 47;
- u64 reserved_1_1 : 1;
- u64 secvec : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 secvec : 1;
- u64 reserved_1_1 : 1;
- u64 addr : 47;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_VECX_ADDR(u64 param1)
-{
- if (param1 <= 17)
- return 0x0000838000F00000ull + (param1 & 31) * 0x10ull;
- pr_err("ZIP_MSIX_VECX_ADDR: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_msix_vecx_ctl - Represents the register that is the MSI-X vector
- * table, indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_vecx_ctl {
- u64 u_reg64;
- struct zip_msix_vecx_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_33_63 : 31;
- u64 mask : 1;
- u64 reserved_20_31 : 12;
- u64 data : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data : 20;
- u64 reserved_20_31 : 12;
- u64 mask : 1;
- u64 reserved_33_63 : 31;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_VECX_CTL(u64 param1)
-{
- if (param1 <= 17)
- return 0x0000838000F00008ull + (param1 & 31) * 0x10ull;
- pr_err("ZIP_MSIX_VECX_CTL: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done - Represents the registers that contain the per-queue
- * instruction done count.
- */
-union zip_quex_done {
- u64 u_reg64;
- struct zip_quex_done_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 done : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE(u64 param1)
-{
- if (param1 <= 7)
- return 0x2000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ack - Represents the registers on write to which will
- * decrement the per-queue instructiona done count.
- */
-union zip_quex_done_ack {
- u64 u_reg64;
- struct zip_quex_done_ack_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 done_ack : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ack : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ACK(u64 param1)
-{
- if (param1 <= 7)
- return 0x2200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ACK: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ena_w1c - Represents the register which when written
- * 1 to will disable the DONEINT interrupt for the queue.
- */
-union zip_quex_done_ena_w1c {
- u64 u_reg64;
- struct zip_quex_done_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_1_63 : 63;
- u64 done_ena : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ena : 1;
- u64 reserved_1_63 : 63;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ENA_W1C(u64 param1)
-{
- if (param1 <= 7)
- return 0x2600ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ENA_W1C: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ena_w1s - Represents the register that when written 1 to
- * will enable the DONEINT interrupt for the queue.
- */
-union zip_quex_done_ena_w1s {
- u64 u_reg64;
- struct zip_quex_done_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_1_63 : 63;
- u64 done_ena : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ena : 1;
- u64 reserved_1_63 : 63;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ENA_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x2400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ENA_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_wait - Represents the register that specifies the per
- * queue interrupt coalescing settings.
- */
-union zip_quex_done_wait {
- u64 u_reg64;
- struct zip_quex_done_wait_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_48_63 : 16;
- u64 time_wait : 16;
- u64 reserved_20_31 : 12;
- u64 num_wait : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 num_wait : 20;
- u64 reserved_20_31 : 12;
- u64 time_wait : 16;
- u64 reserved_48_63 : 16;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_WAIT(u64 param1)
-{
- if (param1 <= 7)
- return 0x2800ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_WAIT: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_doorbell - Represents doorbell registers for the ZIP
- * instruction queues.
- */
-union zip_quex_doorbell {
- u64 u_reg64;
- struct zip_quex_doorbell_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 dbell_cnt : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dbell_cnt : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DOORBELL(u64 param1)
-{
- if (param1 <= 7)
- return 0x4000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DOORBELL: %llu\n", param1);
- return 0;
-}
-
-union zip_quex_err_ena_w1c {
- u64 u_reg64;
- struct zip_quex_err_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_ENA_W1C(u64 param1)
-{
- if (param1 <= 7)
- return 0x3600ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_ENA_W1C: %llu\n", param1);
- return 0;
-}
-
-union zip_quex_err_ena_w1s {
- u64 u_reg64;
- struct zip_quex_err_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_ENA_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x3400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_ENA_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_err_int - Represents registers that contain the per-queue
- * error interrupts.
- */
-union zip_quex_err_int {
- u64 u_reg64;
- struct zip_quex_err_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_INT(u64 param1)
-{
- if (param1 <= 7)
- return 0x3000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_INT: %llu\n", param1);
- return 0;
-}
-
-/* NCB - zip_que#_err_int_w1s */
-union zip_quex_err_int_w1s {
- u64 u_reg64;
- struct zip_quex_err_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_INT_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x3200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_INT_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_gcfg - Represents the registers that reflect status of the
- * zip instruction queues,debug use only.
- */
-union zip_quex_gcfg {
- u64 u_reg64;
- struct zip_quex_gcfg_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_4_63 : 60;
- u64 iqb_ldwb : 1;
- u64 cbw_sty : 1;
- u64 l2ld_cmd : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 l2ld_cmd : 2;
- u64 cbw_sty : 1;
- u64 iqb_ldwb : 1;
- u64 reserved_4_63 : 60;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_GCFG(u64 param1)
-{
- if (param1 <= 7)
- return 0x1A00ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_GCFG: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_map - Represents the registers that control how each
- * instruction queue maps to zip cores.
- */
-union zip_quex_map {
- u64 u_reg64;
- struct zip_quex_map_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_2_63 : 62;
- u64 zce : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 zce : 2;
- u64 reserved_2_63 : 62;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_MAP(u64 param1)
-{
- if (param1 <= 7)
- return 0x1400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_MAP: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_sbuf_addr - Represents the registers that set the buffer
- * parameters for the instruction queues.
- *
- * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
- * this register to effectively reset the command buffer state machine.
- * These registers must be programmed after SW programs the corresponding
- * ZIP_QUE(0..7)_SBUF_CTL.
- */
-union zip_quex_sbuf_addr {
- u64 u_reg64;
- struct zip_quex_sbuf_addr_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 ptr : 42;
- u64 off : 7;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 off : 7;
- u64 ptr : 42;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_SBUF_ADDR(u64 param1)
-{
- if (param1 <= 7)
- return 0x1000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_SBUF_ADDR: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_sbuf_ctl - Represents the registers that set the buffer
- * parameters for the instruction queues.
- *
- * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
- * this register to effectively reset the command buffer state machine.
- * These registers must be programmed before SW programs the corresponding
- * ZIP_QUE(0..7)_SBUF_ADDR.
- */
-union zip_quex_sbuf_ctl {
- u64 u_reg64;
- struct zip_quex_sbuf_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_45_63 : 19;
- u64 size : 13;
- u64 inst_be : 1;
- u64 reserved_24_30 : 7;
- u64 stream_id : 8;
- u64 reserved_12_15 : 4;
- u64 aura : 12;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 aura : 12;
- u64 reserved_12_15 : 4;
- u64 stream_id : 8;
- u64 reserved_24_30 : 7;
- u64 inst_be : 1;
- u64 size : 13;
- u64 reserved_45_63 : 19;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_SBUF_CTL(u64 param1)
-{
- if (param1 <= 7)
- return 0x1200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_SBUF_CTL: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_que_ena - Represents queue enable register
- *
- * If a queue is disabled, ZIP_CTL stops fetching instructions from the queue.
- */
-union zip_que_ena {
- u64 u_reg64;
- struct zip_que_ena_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_8_63 : 56;
- u64 ena : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 ena : 8;
- u64 reserved_8_63 : 56;
-#endif
- } s;
-};
-
-#define ZIP_QUE_ENA 0x0500ull
-
-/**
- * union zip_que_pri - Represents the register that defines the priority
- * between instruction queues.
- */
-union zip_que_pri {
- u64 u_reg64;
- struct zip_que_pri_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_8_63 : 56;
- u64 pri : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 pri : 8;
- u64 reserved_8_63 : 56;
-#endif
- } s;
-};
-
-#define ZIP_QUE_PRI 0x0508ull
-
-/**
- * union zip_throttle - Represents the register that controls the maximum
- * number of in-flight X2I data fetch transactions.
- *
- * Writing 0 to this register causes the ZIP module to temporarily suspend NCB
- * accesses; it is not recommended for normal operation, but may be useful for
- * diagnostics.
- */
-union zip_throttle {
- u64 u_reg64;
- struct zip_throttle_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_6_63 : 58;
- u64 ld_infl : 6;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 ld_infl : 6;
- u64 reserved_6_63 : 58;
-#endif
- } s;
-};
-
-#define ZIP_THROTTLE 0x0010ull
-
-#endif /* _CSRS_ZIP__ */
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index d11daaf47f06..685d42ec7ade 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -7,15 +7,16 @@
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index afae30adb703..91b1189c47de 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -7,14 +7,15 @@
* Author: Gary R Hook <ghook@amd.com>
*/
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/des.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index ecd58b38c46e..bc90aba5162a 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -7,14 +7,17 @@
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/ccp.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/ccp.h>
+#include <linux/module.h>
#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/akcipher.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index cb8e99936abb..109b5aef4034 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -8,13 +8,14 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
-#include <linux/dma-mapping.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <crypto/scatterwalk.h>
#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/utils.h>
#include <linux/ccp.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include "ccp-dev.h"
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 2e87ca0e292a..3451bada884e 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -33,6 +33,7 @@
#include <asm/cacheflush.h>
#include <asm/e820/types.h>
#include <asm/sev.h>
+#include <asm/msr.h>
#include "psp-dev.h"
#include "sev-dev.h"
@@ -109,6 +110,15 @@ static void *sev_init_ex_buffer;
*/
static struct sev_data_range_list *snp_range_list;
+static void __sev_firmware_shutdown(struct sev_device *sev, bool panic);
+
+static int snp_shutdown_on_panic(struct notifier_block *nb,
+ unsigned long reason, void *arg);
+
+static struct notifier_block snp_panic_notifier = {
+ .notifier_call = snp_shutdown_on_panic,
+};
+
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@@ -1060,7 +1070,7 @@ static inline int __sev_do_init_locked(int *psp_ret)
static void snp_set_hsave_pa(void *arg)
{
- wrmsrl(MSR_VM_HSAVE_PA, 0);
+ wrmsrq(MSR_VM_HSAVE_PA, 0);
}
static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
@@ -1112,7 +1122,7 @@ static int __sev_snp_init_locked(int *error)
if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) {
dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n",
SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR);
- return 0;
+ return -EOPNOTSUPP;
}
/* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */
@@ -1176,21 +1186,34 @@ static int __sev_snp_init_locked(int *error)
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(cmd, arg, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV-SNP: %s failed rc %d, error %#x\n",
+ cmd == SEV_CMD_SNP_INIT_EX ? "SNP_INIT_EX" : "SNP_INIT",
+ rc, *error);
return rc;
+ }
/* Prepare for first SNP guest launch after INIT. */
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV-SNP: SNP_DF_FLUSH failed rc %d, error %#x\n",
+ rc, *error);
return rc;
+ }
sev->snp_initialized = true;
dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
+ dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major,
+ sev->api_minor, sev->build);
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &snp_panic_notifier);
+
sev_es_tmr_size = SNP_TMR_SIZE;
- return rc;
+ return 0;
}
static void __sev_platform_init_handle_tmr(struct sev_device *sev)
@@ -1287,16 +1310,22 @@ static int __sev_platform_init_locked(int *error)
if (error)
*error = psp_ret;
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV: %s failed %#x, rc %d\n",
+ sev_init_ex_buffer ? "INIT_EX" : "INIT", psp_ret, rc);
return rc;
+ }
sev->state = SEV_STATE_INIT;
/* Prepare for first SEV guest launch after INIT */
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n",
+ *error, rc);
return rc;
+ }
dev_dbg(sev->dev, "SEV firmware initialized\n");
@@ -1319,19 +1348,9 @@ static int _sev_platform_init_locked(struct sev_platform_init_args *args)
if (sev->state == SEV_STATE_INIT)
return 0;
- /*
- * Legacy guests cannot be running while SNP_INIT(_EX) is executing,
- * so perform SEV-SNP initialization at probe time.
- */
rc = __sev_snp_init_locked(&args->error);
- if (rc && rc != -ENODEV) {
- /*
- * Don't abort the probe if SNP INIT failed,
- * continue to initialize the legacy SEV firmware.
- */
- dev_err(sev->dev, "SEV-SNP: failed to INIT rc %d, error %#x\n",
- rc, args->error);
- }
+ if (rc && rc != -ENODEV)
+ return rc;
/* Defer legacy SEV/SEV-ES support if allowed by caller/module. */
if (args->probe && !psp_init_on_probe)
@@ -1367,8 +1386,11 @@ static int __sev_platform_shutdown_locked(int *error)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
- if (ret)
+ if (ret) {
+ dev_err(sev->dev, "SEV: failed to SHUTDOWN error %#x, rc %d\n",
+ *error, ret);
return ret;
+ }
sev->state = SEV_STATE_UNINIT;
dev_dbg(sev->dev, "SEV firmware shutdown\n");
@@ -1389,6 +1411,37 @@ static int sev_get_platform_state(int *state, int *error)
return rc;
}
+static int sev_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
+{
+ struct sev_platform_init_args init_args = {0};
+ int rc;
+
+ rc = _sev_platform_init_locked(&init_args);
+ if (rc) {
+ argp->error = SEV_RET_INVALID_PLATFORM_STATE;
+ return rc;
+ }
+
+ *shutdown_required = true;
+
+ return 0;
+}
+
+static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
+{
+ int error, rc;
+
+ rc = __sev_snp_init_locked(&error);
+ if (rc) {
+ argp->error = SEV_RET_INVALID_PLATFORM_STATE;
+ return rc;
+ }
+
+ *shutdown_required = true;
+
+ return 0;
+}
+
static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
{
int state, rc;
@@ -1441,24 +1494,31 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
+ bool shutdown_required = false;
int rc;
if (!writable)
return -EPERM;
if (sev->state == SEV_STATE_UNINIT) {
- rc = __sev_platform_init_locked(&argp->error);
+ rc = sev_move_to_init_state(argp, &shutdown_required);
if (rc)
return rc;
}
- return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+ rc = __sev_do_cmd_locked(cmd, NULL, &argp->error);
+
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
+ return rc;
}
static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_csr input;
+ bool shutdown_required = false;
struct sev_data_pek_csr data;
void __user *input_address;
void *blob = NULL;
@@ -1490,7 +1550,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
cmd:
if (sev->state == SEV_STATE_UNINIT) {
- ret = __sev_platform_init_locked(&argp->error);
+ ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_blob;
}
@@ -1511,6 +1571,9 @@ cmd:
}
e_free_blob:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(blob);
return ret;
}
@@ -1682,9 +1745,12 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error);
/* SHUTDOWN may require DF_FLUSH */
if (*error == SEV_RET_DFFLUSH_REQUIRED) {
- ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, NULL);
+ int dfflush_error = SEV_RET_NO_FW_CALL;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, &dfflush_error);
if (ret) {
- dev_err(sev->dev, "SEV-SNP DF_FLUSH failed\n");
+ dev_err(sev->dev, "SEV-SNP DF_FLUSH failed, ret = %d, error = %#x\n",
+ ret, dfflush_error);
return ret;
}
/* reissue the shutdown command */
@@ -1692,7 +1758,8 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
error);
}
if (ret) {
- dev_err(sev->dev, "SEV-SNP firmware shutdown failed\n");
+ dev_err(sev->dev, "SEV-SNP firmware shutdown failed, rc %d, error %#x\n",
+ ret, *error);
return ret;
}
@@ -1718,6 +1785,12 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
sev->snp_initialized = false;
dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &snp_panic_notifier);
+
+ /* Reset TMR size back to default */
+ sev_es_tmr_size = SEV_TMR_SIZE;
+
return ret;
}
@@ -1726,6 +1799,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_cert_import input;
struct sev_data_pek_cert_import data;
+ bool shutdown_required = false;
void *pek_blob, *oca_blob;
int ret;
@@ -1756,7 +1830,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
/* If platform is not in INIT state then transition it to INIT */
if (sev->state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
+ ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_oca;
}
@@ -1764,6 +1838,9 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error);
e_free_oca:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(oca_blob);
e_free_pek:
kfree(pek_blob);
@@ -1880,32 +1957,23 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
struct sev_data_pdh_cert_export data;
void __user *input_cert_chain_address;
void __user *input_pdh_cert_address;
+ bool shutdown_required = false;
int ret;
- /* If platform is not in INIT state then transition it to INIT. */
- if (sev->state != SEV_STATE_INIT) {
- if (!writable)
- return -EPERM;
-
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- return ret;
- }
-
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
memset(&data, 0, sizeof(data));
+ input_pdh_cert_address = (void __user *)input.pdh_cert_address;
+ input_cert_chain_address = (void __user *)input.cert_chain_address;
+
/* Userspace wants to query the certificate length. */
if (!input.pdh_cert_address ||
!input.pdh_cert_len ||
!input.cert_chain_address)
goto cmd;
- input_pdh_cert_address = (void __user *)input.pdh_cert_address;
- input_cert_chain_address = (void __user *)input.cert_chain_address;
-
/* Allocate a physically contiguous buffer to store the PDH blob. */
if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE)
return -EFAULT;
@@ -1931,6 +1999,17 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
data.cert_chain_len = input.cert_chain_len;
cmd:
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (sev->state != SEV_STATE_INIT) {
+ if (!writable) {
+ ret = -EPERM;
+ goto e_free_cert;
+ }
+ ret = sev_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto e_free_cert;
+ }
+
ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error);
/* If we query the length, FW responded with expected data. */
@@ -1957,6 +2036,9 @@ cmd:
}
e_free_cert:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(cert_blob);
e_free_pdh:
kfree(pdh_blob);
@@ -1966,12 +2048,13 @@ e_free_pdh:
static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
{
struct sev_device *sev = psp_master->sev_data;
+ bool shutdown_required = false;
struct sev_data_snp_addr buf;
struct page *status_page;
+ int ret, error;
void *data;
- int ret;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
status_page = alloc_page(GFP_KERNEL_ACCOUNT);
@@ -1980,6 +2063,12 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
data = page_address(status_page);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto cleanup;
+ }
+
/*
* Firmware expects status page to be in firmware-owned state, otherwise
* it will report firmware error code INVALID_PAGE_STATE (0x1A).
@@ -2008,6 +2097,9 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
ret = -EFAULT;
cleanup:
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
__free_pages(status_page, 0);
return ret;
}
@@ -2016,21 +2108,33 @@ static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_data_snp_commit buf;
+ bool shutdown_required = false;
+ int ret, error;
- if (!sev->snp_initialized)
- return -EINVAL;
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ return ret;
+ }
buf.len = sizeof(buf);
- return __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
+
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+ return ret;
}
static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_snp_config config;
+ bool shutdown_required = false;
+ int ret, error;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
if (!writable)
@@ -2039,17 +2143,29 @@ static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable
if (copy_from_user(&config, (void __user *)argp->data, sizeof(config)))
return -EFAULT;
- return __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ return ret;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
+
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+ return ret;
}
static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_snp_vlek_load input;
+ bool shutdown_required = false;
+ int ret, error;
void *blob;
- int ret;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
if (!writable)
@@ -2068,8 +2184,18 @@ static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable)
input.vlek_wrapped_address = __psp_pa(blob);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto cleanup;
+ }
+
ret = __sev_do_cmd_locked(SEV_CMD_SNP_VLEK_LOAD, &input, &argp->error);
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+cleanup:
kfree(blob);
return ret;
@@ -2339,6 +2465,15 @@ static void sev_firmware_shutdown(struct sev_device *sev)
mutex_unlock(&sev_cmd_mutex);
}
+void sev_platform_shutdown(void)
+{
+ if (!psp_master || !psp_master->sev_data)
+ return;
+
+ sev_firmware_shutdown(psp_master->sev_data);
+}
+EXPORT_SYMBOL_GPL(sev_platform_shutdown);
+
void sev_dev_destroy(struct psp_device *psp)
{
struct sev_device *sev = psp->sev_data;
@@ -2373,10 +2508,6 @@ static int snp_shutdown_on_panic(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct notifier_block snp_panic_notifier = {
- .notifier_call = snp_shutdown_on_panic,
-};
-
int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
void *data, int *error)
{
@@ -2390,9 +2521,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
- struct sev_platform_init_args args = {0};
u8 api_major, api_minor, build;
- int rc;
if (!sev)
return;
@@ -2415,18 +2544,6 @@ void sev_pci_init(void)
api_major, api_minor, build,
sev->api_major, sev->api_minor, sev->build);
- /* Initialize the platform */
- args.probe = true;
- rc = sev_platform_init(&args);
- if (rc)
- dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n",
- args.error, rc);
-
- dev_info(sev->dev, "SEV%s API:%d.%d build:%d\n", sev->snp_initialized ?
- "-SNP" : "", sev->api_major, sev->api_minor, sev->build);
-
- atomic_notifier_chain_register(&panic_notifier_list,
- &snp_panic_notifier);
return;
err:
@@ -2443,7 +2560,4 @@ void sev_pci_exit(void)
return;
sev_firmware_shutdown(sev);
-
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &snp_panic_notifier);
}
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 2ebc878da160..e1be2072d680 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -375,6 +375,7 @@ static const struct tee_vdata teev1 = {
static const struct tee_vdata teev2 = {
.ring_wptr_reg = 0x10950, /* C2PMSG_20 */
.ring_rptr_reg = 0x10954, /* C2PMSG_21 */
+ .info_reg = 0x109e8, /* C2PMSG_58 */
};
static const struct platform_access_vdata pa_v1 = {
@@ -440,6 +441,7 @@ static const struct psp_vdata pspv5 = {
.cmdresp_reg = 0x10944, /* C2PMSG_17 */
.cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
.cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10510, /* P2CMSG_INTEN */
.intsts_reg = 0x10514, /* P2CMSG_INTSTS */
@@ -535,6 +537,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
+ { PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index d3f5d108b898..7c41f9593d03 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -862,7 +862,7 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d
return -EINVAL;
}
- algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN, GFP_KERNEL);
if (!algs)
return -ENOMEM;
@@ -5224,7 +5224,7 @@ static int qm_pre_store_caps(struct hisi_qm *qm)
size_t i, size;
size = ARRAY_SIZE(qm_cap_query_info);
- qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL);
+ qm_cap = devm_kcalloc(&pdev->dev, sizeof(*qm_cap), size, GFP_KERNEL);
if (!qm_cap)
return -ENOMEM;
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 1dc2378aa88b..e050f5ff5efb 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -491,8 +491,9 @@ static int img_hash_init(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -555,10 +556,10 @@ static int img_hash_update(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -570,9 +571,10 @@ static int img_hash_final(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -584,11 +586,12 @@ static int img_hash_finup(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
+
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -600,8 +603,9 @@ static int img_hash_import(struct ahash_request *req, const void *in)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -613,8 +617,9 @@ static int img_hash_export(struct ahash_request *req, void *out)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.c b/drivers/crypto/inside-secure/eip93/eip93-hash.c
index df1b05ac5a57..ac13d90a2b7c 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-hash.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c
@@ -97,12 +97,20 @@ void eip93_hash_handle_result(struct crypto_async_request *async, int err)
static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest)
{
- u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 };
- u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
- SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 };
- u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
- u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 };
+ static const u32 sha256_init[] = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+ };
+ static const u32 sha224_init[] = {
+ SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+ SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7
+ };
+ static const u32 sha1_init[] = {
+ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4
+ };
+ static const u32 md5_init[] = {
+ MD5_H0, MD5_H1, MD5_H2, MD5_H3
+ };
/* Init HASH constant */
switch (hash) {
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index f44c08f5f5ec..d2b632193beb 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -2043,7 +2043,7 @@ struct safexcel_alg_template safexcel_alg_cbcmac = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 09d9589f2d68..23f585219fb4 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -725,7 +725,7 @@ static int alloc_wq_table(int max_wqs)
for (cpu = 0; cpu < nr_cpus; cpu++) {
entry = per_cpu_ptr(wq_table, cpu);
- entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL);
+ entry->wqs = kcalloc(max_wqs, sizeof(*entry->wqs), GFP_KERNEL);
if (!entry->wqs) {
free_wq_table();
return -ENOMEM;
@@ -894,7 +894,7 @@ out:
static void rebalance_wq_table(void)
{
const struct cpumask *node_cpus;
- int node, cpu, iaa = -1;
+ int node_cpu, node, cpu, iaa = 0;
if (nr_iaa == 0)
return;
@@ -905,36 +905,29 @@ static void rebalance_wq_table(void)
clear_wq_table();
if (nr_iaa == 1) {
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- if (WARN_ON(wq_table_add_wqs(0, cpu))) {
- pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu);
- return;
- }
+ for_each_possible_cpu(cpu) {
+ if (WARN_ON(wq_table_add_wqs(0, cpu)))
+ goto err;
}
return;
}
for_each_node_with_cpus(node) {
+ cpu = 0;
node_cpus = cpumask_of_node(node);
- for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) {
- int node_cpu = cpumask_nth(cpu, node_cpus);
-
- if (WARN_ON(node_cpu >= nr_cpu_ids)) {
- pr_debug("node_cpu %d doesn't exist!\n", node_cpu);
- return;
- }
-
- if ((cpu % cpus_per_iaa) == 0)
- iaa++;
-
- if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) {
- pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
- return;
- }
+ for_each_cpu(node_cpu, node_cpus) {
+ iaa = cpu / cpus_per_iaa;
+ if (WARN_ON(wq_table_add_wqs(iaa, node_cpu)))
+ goto err;
+ cpu++;
}
}
+
+ return;
+err:
+ pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
}
static inline int check_completion(struct device *dev,
@@ -999,12 +992,9 @@ out:
static int deflate_generic_decompress(struct acomp_req *req)
{
- ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req));
+ ACOMP_FBREQ_ON_STACK(fbreq, req);
int ret;
- acomp_request_set_callback(fbreq, 0, NULL, NULL);
- acomp_request_set_params(fbreq, req->src, req->dst, req->slen,
- req->dlen);
ret = crypto_acomp_decompress(fbreq);
req->dlen = fbreq->dlen;
@@ -1020,8 +1010,7 @@ static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 compression_crc);
+ dma_addr_t dst_addr, unsigned int *dlen);
static void iaa_desc_complete(struct idxd_desc *idxd_desc,
enum idxd_complete_type comp_type,
@@ -1087,10 +1076,10 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
}
if (ctx->compress && compression_ctx->verify_compress) {
+ u32 *compression_crc = acomp_request_ctx(ctx->req);
dma_addr_t src_addr, dst_addr;
- u32 compression_crc;
- compression_crc = idxd_desc->iax_completion->crc;
+ *compression_crc = idxd_desc->iax_completion->crc;
ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr);
if (ret) {
@@ -1100,8 +1089,7 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
}
ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr,
- ctx->req->slen, dst_addr, &ctx->req->dlen,
- compression_crc);
+ ctx->req->slen, dst_addr, &ctx->req->dlen);
if (ret) {
dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret);
err = -EIO;
@@ -1130,11 +1118,11 @@ out:
static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 *compression_crc)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *compression_crc = acomp_request_ctx(req);
struct iaa_device *iaa_device;
struct idxd_desc *idxd_desc;
struct iax_hw_desc *desc;
@@ -1187,8 +1175,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
- } else if (ctx->async_mode)
- req->base.data = idxd_desc;
+ }
dev_dbg(dev, "%s: compression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
@@ -1282,11 +1269,11 @@ out:
static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 compression_crc)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *compression_crc = acomp_request_ctx(req);
struct iaa_device *iaa_device;
struct idxd_desc *idxd_desc;
struct iax_hw_desc *desc;
@@ -1346,10 +1333,10 @@ static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
goto err;
}
- if (compression_crc != idxd_desc->iax_completion->crc) {
+ if (*compression_crc != idxd_desc->iax_completion->crc) {
ret = -EINVAL;
dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:"
- " comp=0x%x, decomp=0x%x\n", compression_crc,
+ " comp=0x%x, decomp=0x%x\n", *compression_crc,
idxd_desc->iax_completion->crc);
print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET,
8, 1, idxd_desc->iax_completion, 64, 0);
@@ -1369,8 +1356,7 @@ err:
static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- bool disable_async)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1412,7 +1398,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
desc->src1_size = slen;
desc->completion_addr = idxd_desc->compl_dma;
- if (ctx->use_irq && !disable_async) {
+ if (ctx->use_irq) {
desc->flags |= IDXD_OP_FLAG_RCI;
idxd_desc->crypto.req = req;
@@ -1425,8 +1411,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
- } else if (ctx->async_mode && !disable_async)
- req->base.data = idxd_desc;
+ }
dev_dbg(dev, "%s: decompression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
@@ -1446,7 +1431,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
update_total_decomp_calls();
update_wq_decomp_calls(wq);
- if (ctx->async_mode && !disable_async) {
+ if (ctx->async_mode) {
ret = -EINPROGRESS;
dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
goto out;
@@ -1474,7 +1459,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
*dlen = req->dlen;
- if (!ctx->async_mode || disable_async)
+ if (!ctx->async_mode)
idxd_free_desc(wq, idxd_desc);
/* Update stats */
@@ -1496,7 +1481,6 @@ static int iaa_comp_acompress(struct acomp_req *req)
dma_addr_t src_addr, dst_addr;
int nr_sgs, cpu, ret = 0;
struct iaa_wq *iaa_wq;
- u32 compression_crc;
struct idxd_wq *wq;
struct device *dev;
@@ -1557,7 +1541,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
req->dst, req->dlen, sg_dma_len(req->dst));
ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
- &req->dlen, &compression_crc);
+ &req->dlen);
if (ret == -EINPROGRESS)
return ret;
@@ -1569,7 +1553,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
}
ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen,
- dst_addr, &req->dlen, compression_crc);
+ dst_addr, &req->dlen);
if (ret)
dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret);
@@ -1655,7 +1639,7 @@ static int iaa_comp_adecompress(struct acomp_req *req)
req->dst, req->dlen, sg_dma_len(req->dst));
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
- dst_addr, &req->dlen, false);
+ dst_addr, &req->dlen);
if (ret == -EINPROGRESS)
return ret;
@@ -1699,6 +1683,7 @@ static struct acomp_alg iaa_acomp_fixed_deflate = {
.cra_driver_name = "deflate-iaa",
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_ctxsize = sizeof(struct iaa_compression_ctx),
+ .cra_reqsize = sizeof(u32),
.cra_module = THIS_MODULE,
.cra_priority = IAA_ALG_PRIORITY,
}
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 02fb8abe4e6e..359c61f0c8a1 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -70,6 +70,18 @@ config CRYPTO_DEV_QAT_420XX
To compile this as a module, choose M here: the module
will be called qat_420xx.
+config CRYPTO_DEV_QAT_6XXX
+ tristate "Support for Intel(R) QuickAssist Technology QAT_6XXX"
+ depends on (X86 || COMPILE_TEST)
+ depends on PCI
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) QuickAssist Technology QAT_6xxx
+ for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_6xxx.
+
config CRYPTO_DEV_QAT_DH895xCCVF
tristate "Support for Intel(R) DH895xCC Virtual Function"
depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile
index 235b69f4f3f7..abef14207afa 100644
--- a/drivers/crypto/intel/qat/Makefile
+++ b/drivers/crypto/intel/qat/Makefile
@@ -1,10 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
+subdir-ccflags-y := -I$(src)/qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_6XXX) += qat_6xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile
index 72b24b1804cf..f6df54d2993e 100644
--- a/drivers/crypto/intel/qat/qat_420xx/Makefile
+++ b/drivers/crypto/intel/qat/qat_420xx/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o
qat_420xx-y := adf_drv.o adf_420xx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 4feeef83f7a3..7c3c0f561c95 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -9,15 +9,14 @@
#include <adf_common_drv.h>
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
-#include <adf_gen4_dc.h>
#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include <adf_gen4_ras.h>
-#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
#include <adf_gen4_vf_mig.h>
+#include <adf_timer.h>
#include "adf_420xx_hw_data.h"
#include "icp_qat_hw.h"
@@ -93,7 +92,6 @@ static const struct adf_fw_config adf_fw_dcc_config[] = {
static struct adf_hw_device_class adf_420xx_class = {
.name = ADF_420XX_DEVICE_NAME,
.type = DEV_420XX,
- .instances = 0,
};
static u32 get_ae_mask(struct adf_hw_device_data *self)
@@ -469,8 +467,8 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
- hw_data->start_timer = adf_gen4_timer_start;
- hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->start_timer = adf_timer_start;
+ hw_data->stop_timer = adf_timer_stop;
hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_420XX_AE_FREQ;
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
index 8084aa0f7f41..cfa00daeb4fb 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
@@ -14,7 +14,7 @@
#include "adf_420xx_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_420XX) },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -186,11 +186,19 @@ static void adf_remove(struct pci_dev *pdev)
adf_cleanup_accel(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_420XX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile
index e8480bb80dee..188b611445e6 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o
qat_4xxx-y := adf_drv.o adf_4xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 4eb6ef99efdd..bd0b1b1015c0 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -9,15 +9,14 @@
#include <adf_common_drv.h>
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
-#include <adf_gen4_dc.h>
#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include "adf_gen4_ras.h"
-#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
#include <adf_gen4_vf_mig.h>
+#include <adf_timer.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -96,7 +95,6 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
static struct adf_hw_device_class adf_4xxx_class = {
.name = ADF_4XXX_DEVICE_NAME,
.type = DEV_4XXX,
- .instances = 0,
};
static u32 get_ae_mask(struct adf_hw_device_data *self)
@@ -422,13 +420,13 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
hw_data->num_rps = ADF_GEN4_MAX_RPS;
switch (dev_id) {
- case ADF_402XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
hw_data->fw_name = ADF_402XX_FW;
hw_data->fw_mmp_name = ADF_402XX_MMP;
hw_data->uof_get_name = uof_get_name_402xx;
hw_data->get_ena_thd_mask = get_ena_thd_mask;
break;
- case ADF_401XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
hw_data->fw_name = ADF_4XXX_FW;
hw_data->fw_mmp_name = ADF_4XXX_MMP;
hw_data->uof_get_name = uof_get_name_4xxx;
@@ -455,8 +453,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
- hw_data->start_timer = adf_gen4_timer_start;
- hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->start_timer = adf_timer_start;
+ hw_data->stop_timer = adf_timer_stop;
hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 5537a9991e4e..c9be5dcddb27 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -14,9 +14,9 @@
#include "adf_4xxx_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
- { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
- { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_4XXX) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_401XX) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_402XX) },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -188,11 +188,19 @@ static void adf_remove(struct pci_dev *pdev)
adf_cleanup_accel(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_4XXX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
diff --git a/drivers/crypto/intel/qat/qat_6xxx/Makefile b/drivers/crypto/intel/qat/qat_6xxx/Makefile
new file mode 100644
index 000000000000..4b4de67cb0c2
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_QAT_6XXX) += qat_6xxx.o
+qat_6xxx-y := adf_drv.o adf_6xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
new file mode 100644
index 000000000000..359a6447ccb8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include <adf_accel_devices.h>
+#include <adf_admin.h>
+#include <adf_cfg.h>
+#include <adf_cfg_services.h>
+#include <adf_clock.h>
+#include <adf_common_drv.h>
+#include <adf_fw_config.h>
+#include <adf_gen6_pm.h>
+#include <adf_gen6_ras.h>
+#include <adf_gen6_shared.h>
+#include <adf_timer.h>
+#include "adf_6xxx_hw_data.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_hw_51_comp.h"
+
+#define RP_GROUP_0_MASK (BIT(0) | BIT(2))
+#define RP_GROUP_1_MASK (BIT(1) | BIT(3))
+#define RP_GROUP_ALL_MASK (RP_GROUP_0_MASK | RP_GROUP_1_MASK)
+
+#define ADF_AE_GROUP_0 GENMASK(3, 0)
+#define ADF_AE_GROUP_1 GENMASK(7, 4)
+#define ADF_AE_GROUP_2 BIT(8)
+
+struct adf_ring_config {
+ u32 ring_mask;
+ enum adf_cfg_service_type ring_type;
+ const unsigned long *thrd_mask;
+};
+
+static u32 rmask_two_services[] = {
+ RP_GROUP_0_MASK,
+ RP_GROUP_1_MASK,
+};
+
+enum adf_gen6_rps {
+ RP0 = 0,
+ RP1 = 1,
+ RP2 = 2,
+ RP3 = 3,
+ RP_MAX = RP3
+};
+
+/*
+ * thrd_mask_[sym|asym|cpr|dcc]: these static arrays define the thread
+ * configuration for handling requests of specific services across the
+ * accelerator engines. Each element in an array corresponds to an
+ * accelerator engine, with the value being a bitmask that specifies which
+ * threads within that engine are capable of processing the particular service.
+ *
+ * For example, a value of 0x0C means that threads 2 and 3 are enabled for the
+ * service in the respective accelerator engine.
+ */
+static const unsigned long thrd_mask_sym[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x0C, 0x0C, 0x0C, 0x0C, 0x1C, 0x1C, 0x1C, 0x1C, 0x00
+};
+
+static const unsigned long thrd_mask_asym[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x70, 0x70, 0x70, 0x70, 0x60, 0x60, 0x60, 0x60, 0x00
+};
+
+static const unsigned long thrd_mask_cpr[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00
+};
+
+static const unsigned long thrd_mask_dcc[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x03, 0x03, 0x00
+};
+
+static const char *const adf_6xxx_fw_objs[] = {
+ [ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ,
+ [ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ,
+ [ADF_FW_ADMIN_OBJ] = ADF_6XXX_ADMIN_OBJ,
+};
+
+static const struct adf_fw_config adf_default_fw_config[] = {
+ { ADF_AE_GROUP_1, ADF_FW_DC_OBJ },
+ { ADF_AE_GROUP_0, ADF_FW_CY_OBJ },
+ { ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ },
+};
+
+static struct adf_hw_device_class adf_6xxx_class = {
+ .name = ADF_6XXX_DEVICE_NAME,
+ .type = DEV_6XXX,
+};
+
+static bool services_supported(unsigned long mask)
+{
+ int num_svc;
+
+ if (mask >= BIT(SVC_BASE_COUNT))
+ return false;
+
+ num_svc = hweight_long(mask);
+ switch (num_svc) {
+ case ADF_ONE_SERVICE:
+ return true;
+ case ADF_TWO_SERVICES:
+ case ADF_THREE_SERVICES:
+ return !test_bit(SVC_DCC, &mask);
+ default:
+ return false;
+ }
+}
+
+static int get_service(unsigned long *mask)
+{
+ if (test_and_clear_bit(SVC_ASYM, mask))
+ return SVC_ASYM;
+
+ if (test_and_clear_bit(SVC_SYM, mask))
+ return SVC_SYM;
+
+ if (test_and_clear_bit(SVC_DC, mask))
+ return SVC_DC;
+
+ if (test_and_clear_bit(SVC_DCC, mask))
+ return SVC_DCC;
+
+ return -EINVAL;
+}
+
+static enum adf_cfg_service_type get_ring_type(enum adf_services service)
+{
+ switch (service) {
+ case SVC_SYM:
+ return SYM;
+ case SVC_ASYM:
+ return ASYM;
+ case SVC_DC:
+ case SVC_DCC:
+ return COMP;
+ default:
+ return UNUSED;
+ }
+}
+
+static const unsigned long *get_thrd_mask(enum adf_services service)
+{
+ switch (service) {
+ case SVC_SYM:
+ return thrd_mask_sym;
+ case SVC_ASYM:
+ return thrd_mask_asym;
+ case SVC_DC:
+ return thrd_mask_cpr;
+ case SVC_DCC:
+ return thrd_mask_dcc;
+ default:
+ return NULL;
+ }
+}
+
+static int get_rp_config(struct adf_accel_dev *accel_dev, struct adf_ring_config *rp_config,
+ unsigned int *num_services)
+{
+ unsigned int i, nservices;
+ unsigned long mask;
+ int ret, service;
+
+ ret = adf_get_service_mask(accel_dev, &mask);
+ if (ret)
+ return ret;
+
+ nservices = hweight_long(mask);
+ if (nservices > MAX_NUM_CONCURR_SVC)
+ return -EINVAL;
+
+ for (i = 0; i < nservices; i++) {
+ service = get_service(&mask);
+ if (service < 0)
+ return service;
+
+ rp_config[i].ring_type = get_ring_type(service);
+ rp_config[i].thrd_mask = get_thrd_mask(service);
+
+ /*
+ * If there is only one service enabled, use all ring pairs for
+ * that service.
+ * If there are two services enabled, use ring pairs 0 and 2 for
+ * one service and ring pairs 1 and 3 for the other service.
+ */
+ switch (nservices) {
+ case ADF_ONE_SERVICE:
+ rp_config[i].ring_mask = RP_GROUP_ALL_MASK;
+ break;
+ case ADF_TWO_SERVICES:
+ rp_config[i].ring_mask = rmask_two_services[i];
+ break;
+ case ADF_THREE_SERVICES:
+ rp_config[i].ring_mask = BIT(i);
+
+ /* If ASYM is enabled, use additional ring pair */
+ if (service == SVC_ASYM)
+ rp_config[i].ring_mask |= BIT(RP3);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ *num_services = nservices;
+
+ return 0;
+}
+
+static u32 adf_gen6_get_arb_mask(struct adf_accel_dev *accel_dev, unsigned int ae)
+{
+ struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC];
+ unsigned int num_services, i, thrd;
+ u32 ring_mask, thd2arb_mask = 0;
+ const unsigned long *p_mask;
+
+ if (get_rp_config(accel_dev, rp_config, &num_services))
+ return 0;
+
+ /*
+ * The thd2arb_mask maps ring pairs to threads within an accelerator engine.
+ * It ensures that jobs submitted to ring pairs are scheduled on threads capable
+ * of handling the specified service type.
+ *
+ * Each group of 4 bits in the mask corresponds to a thread, with each bit
+ * indicating whether a job from a ring pair can be scheduled on that thread.
+ * The use of 4 bits is due to the organization of ring pairs into groups of
+ * four, where each group shares the same configuration.
+ */
+ for (i = 0; i < num_services; i++) {
+ p_mask = &rp_config[i].thrd_mask[ae];
+ ring_mask = rp_config[i].ring_mask;
+
+ for_each_set_bit(thrd, p_mask, ADF_NUM_THREADS_PER_AE)
+ thd2arb_mask |= ring_mask << (thrd * 4);
+ }
+
+ return thd2arb_mask;
+}
+
+static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
+{
+ enum adf_cfg_service_type rps[ADF_GEN6_NUM_BANKS_PER_VF] = { };
+ struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC];
+ unsigned int num_services, rp_num, i;
+ unsigned long cfg_mask;
+ u16 ring_to_svc_map;
+
+ if (get_rp_config(accel_dev, rp_config, &num_services))
+ return 0;
+
+ /*
+ * Loop through the configured services and populate the `rps` array that
+ * contains what service that particular ring pair can handle (i.e. symmetric
+ * crypto, asymmetric crypto, data compression or compression chaining).
+ */
+ for (i = 0; i < num_services; i++) {
+ cfg_mask = rp_config[i].ring_mask;
+ for_each_set_bit(rp_num, &cfg_mask, ADF_GEN6_NUM_BANKS_PER_VF)
+ rps[rp_num] = rp_config[i].ring_type;
+ }
+
+ /*
+ * The ring_mask is structured into segments of 3 bits, with each
+ * segment representing the service configuration for a specific ring pair.
+ * Since ring pairs are organized into groups of 4, the ring_mask contains 4
+ * such 3-bit segments, each corresponding to one ring pair.
+ *
+ * The device has 64 ring pairs, which are organized in groups of 4, namely
+ * 16 groups. Each group has the same configuration, represented here by
+ * `ring_to_svc_map`.
+ */
+ ring_to_svc_map = rps[RP0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
+ rps[RP1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
+ rps[RP2] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
+ rps[RP3] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
+
+ return ring_to_svc_map;
+}
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_ACCELERATORS_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+ return self ? hweight32(self->ae_mask) : 0;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_1;
+}
+
+static void get_arb_info(struct arb_info *arb_info)
+{
+ arb_info->arb_cfg = ADF_GEN6_ARB_CONFIG;
+ arb_info->arb_offset = ADF_GEN6_ARB_OFFSET;
+ arb_info->wt2sam_offset = ADF_GEN6_ARB_WRK_2_SER_MAP_OFFSET;
+}
+
+static void get_admin_info(struct admin_info *admin_csrs_info)
+{
+ admin_csrs_info->mailbox_offset = ADF_GEN6_MAILBOX_BASE_OFFSET;
+ admin_csrs_info->admin_msg_ur = ADF_GEN6_ADMINMSGUR_OFFSET;
+ admin_csrs_info->admin_msg_lr = ADF_GEN6_ADMINMSGLR_OFFSET;
+}
+
+static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_COUNTER_FREQ;
+}
+
+static void enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ /*
+ * Enable all error notification bits in errsou3 except VFLR
+ * notification on host.
+ */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_VFLNOTIFY);
+}
+
+static void enable_ints(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+
+ /* Enable bundle interrupts */
+ ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X0_MASK_OFFSET, 0);
+ ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X1_MASK_OFFSET, 0);
+
+ /* Enable misc interrupts */
+ ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_MASK_OFFSET, 0);
+}
+
+static void set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+ u64 val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+ u64 val = ADF_SSM_WDT_DEFAULT_VALUE;
+
+ /* Enable watchdog timer for sym and dc */
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTATHL_OFFSET, ADF_SSMWDTATHH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTCNVL_OFFSET, ADF_SSMWDTCNVH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTUCSL_OFFSET, ADF_SSMWDTUCSH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTDCPRL_OFFSET, ADF_SSMWDTDCPRH_OFFSET, val);
+
+ /* Enable watchdog timer for pke */
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, val_pke);
+}
+
+/*
+ * The vector routing table is used to select the MSI-X entry to use for each
+ * interrupt source.
+ * The first ADF_GEN6_ETR_MAX_BANKS entries correspond to ring interrupts.
+ * The final entry corresponds to VF2PF or error interrupts.
+ * This vector table could be used to configure one MSI-X entry to be shared
+ * between multiple interrupt sources.
+ *
+ * The default routing is set to have a one to one correspondence between the
+ * interrupt source and the MSI-X entry used.
+ */
+static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ unsigned int i;
+
+ for (i = 0; i <= ADF_GEN6_ETR_MAX_BANKS; i++)
+ ADF_CSR_WR(csr, ADF_GEN6_MSIX_RTTABLE_OFFSET(i), i);
+}
+
+static int reset_ring_pair(void __iomem *csr, u32 bank_number)
+{
+ u32 status;
+ int ret;
+
+ /*
+ * Write rpresetctl register BIT(0) as 1.
+ * Since rpresetctl registers have no RW fields, no need to preserve
+ * values for other bits. Just write directly.
+ */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
+ ADF_WQM_CSR_RPRESETCTL_RESET);
+
+ /* Read rpresetsts register and wait for rp reset to complete */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_WQM_CSR_RPRESETSTS_STATUS,
+ ADF_RPRESET_POLL_DELAY_US,
+ ADF_RPRESET_POLL_TIMEOUT_US, true,
+ csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
+ if (ret)
+ return ret;
+
+ /* When ring pair reset is done, clear rpresetsts */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number), ADF_WQM_CSR_RPRESETSTS_STATUS);
+
+ return 0;
+}
+
+static int ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+ int ret;
+
+ if (bank_number >= hw_data->num_banks)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "ring pair reset for bank:%d\n", bank_number);
+
+ ret = reset_ring_pair(csr, bank_number);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "ring pair reset failed (timeout)\n");
+ else
+ dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
+
+ return ret;
+}
+
+static int build_comp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_hw_comp_51_config_csr_lower hw_comp_lower_csr = { };
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ u32 lower_val;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_DISABLED;
+ hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1;
+ lower_val = ICP_QAT_FW_COMP_51_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+
+ return 0;
+}
+
+static int build_decomp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = 0;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+
+ return 0;
+}
+
+static void adf_gen6_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_comp_block = build_comp_block;
+ dc_ops->build_decomp_block = build_decomp_block;
+}
+
+static int adf_gen6_init_thd2arb_map(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u32 *thd2arb_map = hw_data->thd_to_arb_map;
+ unsigned int i;
+
+ for (i = 0; i < hw_data->num_engines; i++) {
+ thd2arb_map[i] = adf_gen6_get_arb_mask(accel_dev, i);
+ dev_dbg(&GET_DEV(accel_dev), "ME:%d arb_mask:%#x\n", i, thd2arb_map[i]);
+ }
+
+ return 0;
+}
+
+static void set_vc_csr_for_bank(void __iomem *csr, u32 bank_number)
+{
+ u32 value;
+
+ /*
+ * After each PF FLR, for each of the 64 ring pairs in the PF, the
+ * driver must program the ringmodectl CSRs.
+ */
+ value = ADF_CSR_RD(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number));
+ value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_MASK, ADF_GEN6_RINGMODECTL_TC_DEFAULT);
+ value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_EN_MASK, ADF_GEN6_RINGMODECTL_TC_EN_OP1);
+ ADF_CSR_WR(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number), value);
+}
+
+static int set_vc_config(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ u32 value;
+ int err;
+
+ /*
+ * After each PF FLR, the driver must program the Port Virtual Channel (VC)
+ * Control Registers.
+ * Read PVC0CTL then write the masked values.
+ */
+ pci_read_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, &value);
+ value |= FIELD_PREP(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT);
+ err = pci_write_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, value);
+ if (err) {
+ dev_err(&GET_DEV(accel_dev), "pci write to PVC0CTL failed\n");
+ return pcibios_err_to_errno(err);
+ }
+
+ /* Read PVC1CTL then write masked values */
+ pci_read_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, &value);
+ value |= FIELD_PREP(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT);
+ value |= FIELD_PREP(ADF_GEN6_PVC1CTL_VCEN_MASK, ADF_GEN6_PVC1CTL_VCEN_ON);
+ err = pci_write_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, value);
+ if (err)
+ dev_err(&GET_DEV(accel_dev), "pci write to PVC1CTL failed\n");
+
+ return pcibios_err_to_errno(err);
+}
+
+static int adf_gen6_set_vc(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+ u32 i;
+
+ for (i = 0; i < hw_data->num_banks; i++) {
+ dev_dbg(&GET_DEV(accel_dev), "set virtual channels for bank:%d\n", i);
+ set_vc_csr_for_bank(csr, i);
+ }
+
+ return set_vc_config(accel_dev);
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ unsigned long fuses = self->fuses[ADF_FUSECTL4];
+ u32 mask = ADF_6XXX_ACCELENGINES_MASK;
+
+ /*
+ * If bit 0 is set in the fuses, the first 4 engines are disabled.
+ * If bit 4 is set, the second group of 4 engines are disabled.
+ * If bit 8 is set, the admin engine (bit 8) is disabled.
+ */
+ if (test_bit(0, &fuses))
+ mask &= ~ADF_AE_GROUP_0;
+
+ if (test_bit(4, &fuses))
+ mask &= ~ADF_AE_GROUP_1;
+
+ if (test_bit(8, &fuses))
+ mask &= ~ADF_AE_GROUP_2;
+
+ return mask;
+}
+
+static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+ u32 capabilities_sym, capabilities_asym;
+ u32 capabilities_dc;
+ unsigned long mask;
+ u32 caps = 0;
+ u32 fusectl1;
+
+ fusectl1 = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1];
+
+ /* Read accelerator capabilities mask */
+ capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_SHA3 |
+ ICP_ACCEL_CAPABILITIES_SHA3_EXT |
+ ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+ ICP_ACCEL_CAPABILITIES_AES_V2;
+
+ /* A set bit in fusectl1 means the corresponding feature is OFF in this SKU */
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_UCS_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_AUTH_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ capabilities_asym = 0;
+
+ capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_CPR_SLICE) {
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+ }
+
+ if (adf_get_service_mask(accel_dev, &mask))
+ return 0;
+
+ if (test_bit(SVC_ASYM, &mask))
+ caps |= capabilities_asym;
+ if (test_bit(SVC_SYM, &mask))
+ caps |= capabilities_sym;
+ if (test_bit(SVC_DC, &mask))
+ caps |= capabilities_dc;
+ if (test_bit(SVC_DCC, &mask)) {
+ /*
+ * Sym capabilities are available for chaining operations,
+ * but sym crypto instances cannot be supported
+ */
+ caps = capabilities_dc | capabilities_sym;
+ caps &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ }
+
+ return caps;
+}
+
+static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
+{
+ return ARRAY_SIZE(adf_default_fw_config);
+}
+
+static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ int num_fw_objs = ARRAY_SIZE(adf_6xxx_fw_objs);
+ int id;
+
+ id = adf_default_fw_config[obj_num].obj;
+ if (id >= num_fw_objs)
+ return NULL;
+
+ return adf_6xxx_fw_objs[id];
+}
+
+static const char *uof_get_name_6xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ return uof_get_name(accel_dev, obj_num);
+}
+
+static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return -EINVAL;
+
+ return adf_default_fw_config[obj_num].obj;
+}
+
+static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ return adf_default_fw_config[obj_num].ae_mask;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+ if (adf_gen6_init_thd2arb_map(accel_dev))
+ dev_warn(&GET_DEV(accel_dev),
+ "Failed to generate thread to arbiter mapping");
+
+ return GET_HW_DATA(accel_dev)->thd_to_arb_map;
+}
+
+static int adf_init_device(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+ u32 status;
+ u32 csr;
+ int ret;
+
+ /* Temporarily mask PM interrupt */
+ csr = ADF_CSR_RD(addr, ADF_GEN6_ERRMSK2);
+ csr |= ADF_GEN6_PM_SOU;
+ ADF_CSR_WR(addr, ADF_GEN6_ERRMSK2, csr);
+
+ /* Set DRV_ACTIVE bit to power up the device */
+ ADF_CSR_WR(addr, ADF_GEN6_PM_INTERRUPT, ADF_GEN6_PM_DRV_ACTIVE);
+
+ /* Poll status register to make sure the device is powered up */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_GEN6_PM_INIT_STATE,
+ ADF_GEN6_PM_POLL_DELAY_US,
+ ADF_GEN6_PM_POLL_TIMEOUT_US, true, addr,
+ ADF_GEN6_PM_STATUS);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
+ return ret;
+ }
+
+ dev_dbg(&GET_DEV(accel_dev), "Setting virtual channels for device qat_dev%d\n",
+ accel_dev->accel_id);
+
+ ret = adf_gen6_set_vc(accel_dev);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to set virtual channels\n");
+
+ return ret;
+}
+
+static int enable_pm(struct adf_accel_dev *accel_dev)
+{
+ return adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER);
+}
+
+static int dev_config(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+ if (ret)
+ return ret;
+
+ ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+ if (ret)
+ return ret;
+
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_DC:
+ case SVC_DCC:
+ ret = adf_gen6_comp_dev_config(accel_dev);
+ break;
+ default:
+ ret = adf_gen6_no_dev_config(accel_dev);
+ break;
+ }
+ if (ret)
+ return ret;
+
+ __set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ return ret;
+}
+
+void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &adf_6xxx_class;
+ hw_data->instance_id = adf_6xxx_class.instances++;
+ hw_data->num_banks = ADF_GEN6_ETR_MAX_BANKS;
+ hw_data->num_banks_per_vf = ADF_GEN6_NUM_BANKS_PER_VF;
+ hw_data->num_rings_per_bank = ADF_GEN6_NUM_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_GEN6_MAX_ACCELERATORS;
+ hw_data->num_engines = ADF_6XXX_MAX_ACCELENGINES;
+ hw_data->num_logical_accel = 1;
+ hw_data->tx_rx_gap = ADF_GEN6_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN6_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = 0;
+ hw_data->alloc_irq = adf_isr_resource_alloc;
+ hw_data->free_irq = adf_isr_resource_free;
+ hw_data->enable_error_correction = enable_error_correction;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = get_num_accels;
+ hw_data->get_num_aes = get_num_aes;
+ hw_data->get_sram_bar_id = get_sram_bar_id;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_arb_info = get_arb_info;
+ hw_data->get_admin_info = get_admin_info;
+ hw_data->get_accel_cap = get_accel_cap;
+ hw_data->get_sku = get_sku;
+ hw_data->init_admin_comms = adf_init_admin_comms;
+ hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->send_admin_init = adf_send_admin_init;
+ hw_data->init_arb = adf_init_arb;
+ hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+ hw_data->enable_ints = enable_ints;
+ hw_data->reset_device = adf_reset_flr;
+ hw_data->admin_ae_mask = ADF_6XXX_ADMIN_AE_MASK;
+ hw_data->fw_name = ADF_6XXX_FW;
+ hw_data->fw_mmp_name = ADF_6XXX_MMP;
+ hw_data->uof_get_name = uof_get_name_6xxx;
+ hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_obj_type = uof_get_obj_type;
+ hw_data->uof_get_ae_mask = uof_get_ae_mask;
+ hw_data->set_msix_rttable = set_msix_default_rttable;
+ hw_data->set_ssm_wdtimer = set_ssm_wdtimer;
+ hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->ring_pair_reset = ring_pair_reset;
+ hw_data->dev_config = dev_config;
+ hw_data->get_hb_clock = get_heartbeat_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->start_timer = adf_timer_start;
+ hw_data->stop_timer = adf_timer_stop;
+ hw_data->init_device = adf_init_device;
+ hw_data->enable_pm = enable_pm;
+ hw_data->services_supported = services_supported;
+
+ adf_gen6_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen6_init_dc_ops(&hw_data->dc_ops);
+ adf_gen6_init_ras_ops(&hw_data->ras_ops);
+}
+
+void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data)
+{
+ if (hw_data->dev_class->instances)
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
new file mode 100644
index 000000000000..78e2e2c5816e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_6XXX_HW_DATA_H_
+#define ADF_6XXX_HW_DATA_H_
+
+#include <linux/bits.h>
+#include <linux/time.h>
+#include <linux/units.h>
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+#include "adf_dc.h"
+
+/* PCIe configuration space */
+#define ADF_GEN6_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
+#define ADF_GEN6_SRAM_BAR 0
+#define ADF_GEN6_PMISC_BAR 1
+#define ADF_GEN6_ETR_BAR 2
+#define ADF_6XXX_MAX_ACCELENGINES 9
+
+/* Clocks frequency */
+#define ADF_GEN6_COUNTER_FREQ (100 * HZ_PER_MHZ)
+
+/* Physical function fuses */
+#define ADF_GEN6_FUSECTL0_OFFSET 0x2C8
+#define ADF_GEN6_FUSECTL1_OFFSET 0x2CC
+#define ADF_GEN6_FUSECTL4_OFFSET 0x2D8
+
+/* Accelerators */
+#define ADF_GEN6_ACCELERATORS_MASK 0x1
+#define ADF_GEN6_MAX_ACCELERATORS 1
+
+/* MSI-X interrupt */
+#define ADF_GEN6_SMIAPF_RP_X0_MASK_OFFSET 0x41A040
+#define ADF_GEN6_SMIAPF_RP_X1_MASK_OFFSET 0x41A044
+#define ADF_GEN6_SMIAPF_MASK_OFFSET 0x41A084
+#define ADF_GEN6_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 4))
+
+/* Bank and ring configuration */
+#define ADF_GEN6_NUM_RINGS_PER_BANK 2
+#define ADF_GEN6_NUM_BANKS_PER_VF 4
+#define ADF_GEN6_ETR_MAX_BANKS 64
+#define ADF_GEN6_RX_RINGS_OFFSET 1
+#define ADF_GEN6_TX_RINGS_MASK 0x1
+
+/* Arbiter configuration */
+#define ADF_GEN6_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
+#define ADF_GEN6_ARB_OFFSET 0x000
+#define ADF_GEN6_ARB_WRK_2_SER_MAP_OFFSET 0x400
+
+/* Admin interface configuration */
+#define ADF_GEN6_ADMINMSGUR_OFFSET 0x500574
+#define ADF_GEN6_ADMINMSGLR_OFFSET 0x500578
+#define ADF_GEN6_MAILBOX_BASE_OFFSET 0x600970
+
+/*
+ * Watchdog timers
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000ULL
+#define ADF_SSMWDTATHL_OFFSET 0x5208
+#define ADF_SSMWDTATHH_OFFSET 0x520C
+#define ADF_SSMWDTCNVL_OFFSET 0x5408
+#define ADF_SSMWDTCNVH_OFFSET 0x540C
+#define ADF_SSMWDTUCSL_OFFSET 0x5808
+#define ADF_SSMWDTUCSH_OFFSET 0x580C
+#define ADF_SSMWDTDCPRL_OFFSET 0x5A08
+#define ADF_SSMWDTDCPRH_OFFSET 0x5A0C
+#define ADF_SSMWDTPKEL_OFFSET 0x5E08
+#define ADF_SSMWDTPKEH_OFFSET 0x5E0C
+
+/* Ring reset */
+#define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
+#define ADF_RPRESET_POLL_DELAY_US 20
+#define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0)
+#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + (bank) * 8)
+#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
+#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+
+/* Controls and sets up the corresponding ring mode of operation */
+#define ADF_GEN6_CSR_RINGMODECTL(bank) (0x9000 + (bank) * 4)
+
+/* Specifies the traffic class to use for the transactions to/from the ring */
+#define ADF_GEN6_RINGMODECTL_TC_MASK GENMASK(18, 16)
+#define ADF_GEN6_RINGMODECTL_TC_DEFAULT 0x7
+
+/* Specifies usage of tc for the transactions to/from this ring */
+#define ADF_GEN6_RINGMODECTL_TC_EN_MASK GENMASK(20, 19)
+
+/*
+ * Use the value programmed in the tc field for request descriptor
+ * and metadata read transactions
+ */
+#define ADF_GEN6_RINGMODECTL_TC_EN_OP1 0x1
+
+/* VC0 Resource Control Register */
+#define ADF_GEN6_PVC0CTL_OFFSET 0x204
+#define ADF_GEN6_PVC0CTL_TCVCMAP_OFFSET 1
+#define ADF_GEN6_PVC0CTL_TCVCMAP_MASK GENMASK(7, 1)
+#define ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT 0x7F
+
+/* VC1 Resource Control Register */
+#define ADF_GEN6_PVC1CTL_OFFSET 0x210
+#define ADF_GEN6_PVC1CTL_TCVCMAP_OFFSET 1
+#define ADF_GEN6_PVC1CTL_TCVCMAP_MASK GENMASK(7, 1)
+#define ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT 0x40
+#define ADF_GEN6_PVC1CTL_VCEN_OFFSET 31
+#define ADF_GEN6_PVC1CTL_VCEN_MASK BIT(31)
+/* RW bit: 0x1 - enables a Virtual Channel, 0x0 - disables */
+#define ADF_GEN6_PVC1CTL_VCEN_ON 0x1
+
+/* Error source mask registers */
+#define ADF_GEN6_ERRMSK0 0x41A210
+#define ADF_GEN6_ERRMSK1 0x41A214
+#define ADF_GEN6_ERRMSK2 0x41A218
+#define ADF_GEN6_ERRMSK3 0x41A21C
+
+#define ADF_GEN6_VFLNOTIFY BIT(7)
+
+/* Number of heartbeat counter pairs */
+#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+
+/* Physical function fuses */
+#define ADF_6XXX_ACCELENGINES_MASK GENMASK(8, 0)
+#define ADF_6XXX_ADMIN_AE_MASK GENMASK(8, 8)
+
+/* Firmware binaries */
+#define ADF_6XXX_FW "qat_6xxx.bin"
+#define ADF_6XXX_MMP "qat_6xxx_mmp.bin"
+#define ADF_6XXX_CY_OBJ "qat_6xxx_cy.bin"
+#define ADF_6XXX_DC_OBJ "qat_6xxx_dc.bin"
+#define ADF_6XXX_ADMIN_OBJ "qat_6xxx_admin.bin"
+
+enum icp_qat_gen6_slice_mask {
+ ICP_ACCEL_GEN6_MASK_UCS_SLICE = BIT(0),
+ ICP_ACCEL_GEN6_MASK_AUTH_SLICE = BIT(1),
+ ICP_ACCEL_GEN6_MASK_PKE_SLICE = BIT(2),
+ ICP_ACCEL_GEN6_MASK_CPR_SLICE = BIT(3),
+ ICP_ACCEL_GEN6_MASK_DCPRZ_SLICE = BIT(4),
+ ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE = BIT(6),
+};
+
+void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data);
+
+#endif /* ADF_6XXX_HW_DATA_H_ */
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
new file mode 100644
index 000000000000..c1dc9c56fdf5
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/array_size.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include <adf_accel_devices.h>
+#include <adf_cfg.h>
+#include <adf_common_drv.h>
+#include <adf_dbgfs.h>
+
+#include "adf_gen6_shared.h"
+#include "adf_6xxx_hw_data.h"
+
+static int bar_map[] = {
+ 0, /* SRAM */
+ 2, /* PMISC */
+ 4, /* ETR */
+};
+
+static void adf_device_down(void *accel_dev)
+{
+ adf_dev_down(accel_dev);
+}
+
+static void adf_dbgfs_cleanup(void *accel_dev)
+{
+ adf_dbgfs_exit(accel_dev);
+}
+
+static void adf_cfg_device_remove(void *accel_dev)
+{
+ adf_cfg_dev_remove(accel_dev);
+}
+
+static void adf_cleanup_hw_data(void *accel_dev)
+{
+ struct adf_accel_dev *accel_device = accel_dev;
+
+ if (accel_device->hw_device) {
+ adf_clean_hw_data_6xxx(accel_device->hw_device);
+ accel_device->hw_device = NULL;
+ }
+}
+
+static void adf_devmgr_remove(void *accel_dev)
+{
+ adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ struct device *dev = &pdev->dev;
+ struct adf_accel_dev *accel_dev;
+ struct adf_bar *bar;
+ unsigned int i;
+ int ret;
+
+ if (num_possible_nodes() > 1 && dev_to_node(dev) < 0) {
+ /*
+ * If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow.
+ */
+ return dev_err_probe(dev, -EINVAL, "Invalid NUMA configuration.\n");
+ }
+
+ accel_dev = devm_kzalloc(dev, sizeof(*accel_dev), GFP_KERNEL);
+ if (!accel_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+ INIT_LIST_HEAD(&accel_dev->list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+ accel_dev->owner = THIS_MODULE;
+
+ hw_data = devm_kzalloc(dev, sizeof(*hw_data), GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+ pci_read_config_dword(pdev, ADF_GEN6_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]);
+ pci_read_config_dword(pdev, ADF_GEN6_FUSECTL0_OFFSET, &hw_data->fuses[ADF_FUSECTL0]);
+ pci_read_config_dword(pdev, ADF_GEN6_FUSECTL1_OFFSET, &hw_data->fuses[ADF_FUSECTL1]);
+
+ if (!(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE))
+ return dev_err_probe(dev, -EFAULT, "Wireless mode is not supported.\n");
+
+ /* Enable PCI device */
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable PCI device.\n");
+
+ ret = adf_devmgr_add_dev(accel_dev, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add new accelerator device.\n");
+
+ ret = devm_add_action_or_reset(dev, adf_devmgr_remove, accel_dev);
+ if (ret)
+ return ret;
+
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_6xxx(accel_dev->hw_device);
+
+ ret = devm_add_action_or_reset(dev, adf_cleanup_hw_data, accel_dev);
+ if (ret)
+ return ret;
+
+ /* Get Accelerators and Accelerator Engine masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+ /* If the device has no acceleration engines then ignore it */
+ if (!hw_data->accel_mask || !hw_data->ae_mask ||
+ (~hw_data->ae_mask & ADF_GEN6_ACCELERATORS_MASK)) {
+ ret = -EFAULT;
+ return dev_err_probe(dev, ret, "No acceleration units were found.\n");
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adf_cfg_device_remove, accel_dev);
+ if (ret)
+ return ret;
+
+ /* Set DMA identifier */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return dev_err_probe(dev, ret, "No usable DMA configuration.\n");
+
+ ret = adf_gen6_cfg_dev_init(accel_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize configuration.\n");
+
+ /* Get accelerator capability mask */
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ if (!hw_data->accel_capabilities_mask) {
+ ret = -EINVAL;
+ return dev_err_probe(dev, ret, "Failed to get capabilities mask.\n");
+ }
+
+ for (i = 0; i < ARRAY_SIZE(bar_map); i++) {
+ bar = &accel_pci_dev->pci_bars[i];
+
+ /* Map 64-bit PCIe BAR */
+ bar->virt_addr = pcim_iomap_region(pdev, bar_map[i], pci_name(pdev));
+ if (IS_ERR(bar->virt_addr)) {
+ ret = PTR_ERR(bar->virt_addr);
+ return dev_err_probe(dev, ret, "Failed to ioremap PCI region.\n");
+ }
+ }
+
+ pci_set_master(pdev);
+
+ /*
+ * The PCI config space is saved at this point and will be restored
+ * after a Function Level Reset (FLR) as the FLR does not completely
+ * restore it.
+ */
+ ret = pci_save_state(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to save pci state.\n");
+
+ accel_dev->ras_errors.enabled = true;
+
+ adf_dbgfs_init(accel_dev);
+
+ ret = devm_add_action_or_reset(dev, adf_dbgfs_cleanup, accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_dev_up(accel_dev, true);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adf_device_down, accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_sysfs_init(accel_dev);
+
+ return ret;
+}
+
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_6XXX) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_6XXX_DEVICE_NAME,
+ .probe = adf_probe,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+module_pci_driver(adf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_6XXX_FW);
+MODULE_FIRMWARE(ADF_6XXX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology for GEN6 Devices");
+MODULE_SOFTDEP("pre: crypto-intel_qat");
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
index d9e568572da8..43604c025f0c 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
qat_c3xxx-y := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index e78f7bfd30b8..07f2c42a68f5 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -5,7 +5,6 @@
#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -22,7 +21,6 @@ static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = {
static struct adf_hw_device_class c3xxx_class = {
.name = ADF_C3XXX_DEVICE_NAME,
.type = DEV_C3XXX,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index b825b35ab4bf..bceb5dd8b148 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_c3xxx_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_C3XXX_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C3XXX_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
index 31a908a211ac..03f6745b4aa2 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
qat_c3xxxvf-y := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index a512ca4efd3f..db3c33fa1881 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -3,7 +3,6 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -13,7 +12,6 @@
static struct adf_hw_device_class c3xxxiov_class = {
.name = ADF_C3XXXVF_DEVICE_NAME,
.type = DEV_C3XXXVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile
index cbdaaa135e84..f3d722bef088 100644
--- a/drivers/crypto/intel/qat/qat_c62x/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62x/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
qat_c62x-y := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index 32ebe09477a8..0b410b41474d 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -5,7 +5,6 @@
#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -22,7 +21,6 @@ static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = {
static struct adf_hw_device_class c62x_class = {
.name = ADF_C62X_DEVICE_NAME,
.type = DEV_C62X,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index 8a7bdec358d6..23ccb72b6ea2 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_c62x_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_C62X_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C62X_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
index 60e499b041ec..ed7f3f722d99 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
qat_c62xvf-y := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 4aaaaf921734..7f00035d3661 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -3,7 +3,6 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -13,7 +12,6 @@
static struct adf_hw_device_class c62xiov_class = {
.name = ADF_C62XVF_DEVICE_NAME,
.type = DEV_C62XVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index af5df29fd2e3..66bb295ace28 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -8,19 +8,19 @@ intel_qat-y := adf_accel_engine.o \
adf_cfg_services.o \
adf_clock.o \
adf_ctl_drv.o \
+ adf_dc.o \
adf_dev_mgr.o \
adf_gen2_config.o \
- adf_gen2_dc.o \
adf_gen2_hw_csr_data.o \
adf_gen2_hw_data.o \
adf_gen4_config.o \
- adf_gen4_dc.o \
adf_gen4_hw_csr_data.o \
adf_gen4_hw_data.o \
adf_gen4_pm.o \
adf_gen4_ras.o \
- adf_gen4_timer.o \
adf_gen4_vf_mig.o \
+ adf_gen6_ras.o \
+ adf_gen6_shared.o \
adf_hw_arbiter.o \
adf_init.o \
adf_isr.o \
@@ -30,6 +30,7 @@ intel_qat-y := adf_accel_engine.o \
adf_sysfs.o \
adf_sysfs_ras_counters.o \
adf_sysfs_rl.o \
+ adf_timer.o \
adf_transport.o \
qat_algs.o \
qat_algs_send.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index dc21551153cb..2ee526063213 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -12,6 +12,7 @@
#include <linux/qat/qat_mig_dev.h>
#include <linux/wordpart.h>
#include "adf_cfg_common.h"
+#include "adf_dc.h"
#include "adf_rl.h"
#include "adf_telemetry.h"
#include "adf_pfvf_msg.h"
@@ -25,14 +26,18 @@
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
#define ADF_4XXX_DEVICE_NAME "4xxx"
#define ADF_420XX_DEVICE_NAME "420xx"
-#define ADF_4XXX_PCI_DEVICE_ID 0x4940
-#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
-#define ADF_401XX_PCI_DEVICE_ID 0x4942
-#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
-#define ADF_402XX_PCI_DEVICE_ID 0x4944
-#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
-#define ADF_420XX_PCI_DEVICE_ID 0x4946
-#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947
+#define ADF_6XXX_DEVICE_NAME "6xxx"
+#define PCI_DEVICE_ID_INTEL_QAT_4XXX 0x4940
+#define PCI_DEVICE_ID_INTEL_QAT_4XXXIOV 0x4941
+#define PCI_DEVICE_ID_INTEL_QAT_401XX 0x4942
+#define PCI_DEVICE_ID_INTEL_QAT_401XXIOV 0x4943
+#define PCI_DEVICE_ID_INTEL_QAT_402XX 0x4944
+#define PCI_DEVICE_ID_INTEL_QAT_402XXIOV 0x4945
+#define PCI_DEVICE_ID_INTEL_QAT_420XX 0x4946
+#define PCI_DEVICE_ID_INTEL_QAT_420XXIOV 0x4947
+#define PCI_DEVICE_ID_INTEL_QAT_6XXX 0x4948
+#define PCI_DEVICE_ID_INTEL_QAT_6XXX_IOV 0x4949
+
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -267,7 +272,8 @@ struct adf_pfvf_ops {
};
struct adf_dc_ops {
- void (*build_deflate_ctx)(void *ctx);
+ int (*build_comp_block)(void *ctx, enum adf_dc_algo algo);
+ int (*build_decomp_block)(void *ctx, enum adf_dc_algo algo);
};
struct qat_migdev_ops {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index acad526eb741..573388c37100 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -449,6 +449,7 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
+EXPORT_SYMBOL_GPL(adf_init_admin_pm);
int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr,
size_t buff_size)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
index 89df3888d7ea..15fdf9854b81 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
@@ -48,6 +48,7 @@ enum adf_device_type {
DEV_C3XXXVF,
DEV_4XXX,
DEV_420XX,
+ DEV_6XXX,
};
struct adf_dev_status_info {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
index 30abcd9e1283..c39871291da7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
@@ -116,7 +116,7 @@ int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in,
return adf_service_mask_to_string(mask, out, out_len);
}
-static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask)
+int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
size_t len;
@@ -138,6 +138,7 @@ static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *
return ret;
}
+EXPORT_SYMBOL_GPL(adf_get_service_mask);
int adf_get_service_enabled(struct adf_accel_dev *accel_dev)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
index f6bafc15cbc6..3742c450878f 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
@@ -32,5 +32,6 @@ enum {
int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in,
size_t in_len, char *out, size_t out_len);
int adf_get_service_enabled(struct adf_accel_dev *accel_dev);
+int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/intel/qat/qat_common/adf_dc.c
index 47261b1c1da6..3e8fb4e3ed97 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dc.c
@@ -1,22 +1,21 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation */
#include "adf_accel_devices.h"
-#include "adf_gen2_dc.h"
+#include "adf_dc.h"
#include "icp_qat_fw_comp.h"
-static void qat_comp_build_deflate_ctx(void *ctx)
+int qat_comp_build_ctx(struct adf_accel_dev *accel_dev, void *ctx, enum adf_dc_algo algo)
{
- struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx;
- struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
- struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
- struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl;
+ struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ int ret;
memset(req_tmpl, 0, sizeof(*req_tmpl));
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
header->comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
QAT_COMN_PTR_TYPE_SGL);
@@ -26,12 +25,14 @@ static void qat_comp_build_deflate_ctx(void *ctx)
ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
- cd_pars->u.sl.comp_slice_cfg_word[0] =
- ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
- ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
- ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
- ICP_QAT_HW_COMPRESSION_DEPTH_1,
- ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ /* Build HW config block for compression */
+ ret = GET_DC_OPS(accel_dev)->build_comp_block(ctx, algo);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to build compression block\n");
+ return ret;
+ }
+
req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
req_pars->req_par_flags =
@@ -45,26 +46,19 @@ static void qat_comp_build_deflate_ctx(void *ctx)
ICP_QAT_FW_COMP_NO_XXHASH_ACC,
ICP_QAT_FW_COMP_CNV_ERROR_NONE,
ICP_QAT_FW_COMP_NO_APPEND_CRC,
- ICP_QAT_FW_COMP_NO_DROP_DATA);
+ ICP_QAT_FW_COMP_NO_DROP_DATA,
+ ICP_QAT_FW_COMP_NO_PARTIAL_DECOMPRESS);
ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP);
/* Fill second half of the template for decompression */
memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
req_tmpl++;
- header = &req_tmpl->comn_hdr;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
- cd_pars = &req_tmpl->cd_pars;
- cd_pars->u.sl.comp_slice_cfg_word[0] =
- ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
- ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
- ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
- ICP_QAT_HW_COMPRESSION_DEPTH_1,
- ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
-}
-void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
-{
- dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx;
+ /* Build HW config block for decompression */
+ ret = GET_DC_OPS(accel_dev)->build_decomp_block(req_tmpl, algo);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to build decompression block\n");
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dc.h b/drivers/crypto/intel/qat/qat_common/adf_dc.h
new file mode 100644
index 000000000000..6cb5e09054a6
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_dc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_DC_H
+#define ADF_DC_H
+
+struct adf_accel_dev;
+
+enum adf_dc_algo {
+ QAT_DEFLATE,
+ QAT_LZ4,
+ QAT_LZ4S,
+ QAT_ZSTD,
+};
+
+int qat_comp_build_ctx(struct adf_accel_dev *accel_dev, void *ctx, enum adf_dc_algo algo);
+
+#endif /* ADF_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
index 4f86696800c9..78957fa900b7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
@@ -8,6 +8,7 @@ enum adf_fw_objs {
ADF_FW_ASYM_OBJ,
ADF_FW_DC_OBJ,
ADF_FW_ADMIN_OBJ,
+ ADF_FW_CY_OBJ,
};
struct adf_fw_config {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
deleted file mode 100644
index 6eae023354d7..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN2_DC_H
-#define ADF_GEN2_DC_H
-
-#include "adf_accel_devices.h"
-
-void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
-
-#endif /* ADF_GEN2_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
index 2b263442c856..6a505e9a5cf9 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
#include "adf_common_drv.h"
+#include "adf_dc.h"
#include "adf_gen2_hw_data.h"
+#include "icp_qat_fw_comp.h"
#include "icp_qat_hw.h"
#include <linux/pci.h>
@@ -169,3 +171,58 @@ void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
}
}
EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
+
+static int adf_gen2_build_comp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+ ICP_QAT_HW_COMPRESSION_DEPTH_1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ return 0;
+}
+
+static int adf_gen2_build_decomp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+ ICP_QAT_HW_COMPRESSION_DEPTH_1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ return 0;
+}
+
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_comp_block = adf_gen2_build_comp_block;
+ dc_ops->build_decomp_block = adf_gen2_build_decomp_block;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
index 708e9186127b..59bad368a921 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
@@ -88,5 +88,6 @@ void adf_gen2_get_arb_info(struct arb_info *arb_info);
void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
index a716545a764c..34a63cf40db2 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
index f97e7a880f3a..afcdfdd0a37a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
@@ -11,7 +11,7 @@
#include "qat_compression.h"
#include "qat_crypto.h"
-static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
+int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
@@ -117,7 +117,7 @@ err:
return ret;
}
-static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
+int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
@@ -187,7 +187,7 @@ err:
return ret;
}
-static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
+int adf_no_dev_config(struct adf_accel_dev *accel_dev)
{
unsigned long val;
int ret;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
index bb87655f69a8..38a674c27e40 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
@@ -7,5 +7,8 @@
int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev);
+int adf_crypto_dev_config(struct adf_accel_dev *accel_dev);
+int adf_comp_dev_config(struct adf_accel_dev *accel_dev);
+int adf_no_dev_config(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
deleted file mode 100644
index 5859238e37de..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
+++ /dev/null
@@ -1,83 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "icp_qat_fw_comp.h"
-#include "icp_qat_hw_20_comp.h"
-#include "adf_gen4_dc.h"
-
-static void qat_comp_build_deflate(void *ctx)
-{
- struct icp_qat_fw_comp_req *req_tmpl =
- (struct icp_qat_fw_comp_req *)ctx;
- struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
- struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
- struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
- struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0};
- struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0};
- struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0};
- u32 upper_val;
- u32 lower_val;
-
- memset(req_tmpl, 0, sizeof(*req_tmpl));
- header->hdr_flags =
- ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
- header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
- header->comn_req_flags =
- ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
- QAT_COMN_PTR_TYPE_SGL);
- header->serv_specif_flags =
- ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
- ICP_QAT_FW_COMP_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
- hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
- hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
- hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
- hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
- hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
- hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
- hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
- hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
-
- upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
- lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
-
- cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
- cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
-
- req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
- req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
- req_pars->req_par_flags =
- ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
- ICP_QAT_FW_COMP_EOP,
- ICP_QAT_FW_COMP_BFINAL,
- ICP_QAT_FW_COMP_CNV,
- ICP_QAT_FW_COMP_CNV_RECOVERY,
- ICP_QAT_FW_COMP_NO_CNV_DFX,
- ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
- ICP_QAT_FW_COMP_NO_XXHASH_ACC,
- ICP_QAT_FW_COMP_CNV_ERROR_NONE,
- ICP_QAT_FW_COMP_NO_APPEND_CRC,
- ICP_QAT_FW_COMP_NO_DROP_DATA);
-
- /* Fill second half of the template for decompression */
- memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
- req_tmpl++;
- header = &req_tmpl->comn_hdr;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
- cd_pars = &req_tmpl->cd_pars;
-
- hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
- lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
-
- cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
- cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
-}
-
-void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
-{
- dc_ops->build_deflate_ctx = qat_comp_build_deflate;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
deleted file mode 100644
index 0b1a6774412e..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN4_DC_H
-#define ADF_GEN4_DC_H
-
-#include "adf_accel_devices.h"
-
-void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
-
-#endif /* ADF_GEN4_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 099949a2421c..0406cb09c5bb 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -9,6 +9,8 @@
#include "adf_fw_config.h"
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pm.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_hw_20_comp.h"
u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
{
@@ -663,3 +665,71 @@ int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number
return ret;
}
EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);
+
+static int adf_gen4_build_comp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = { };
+ struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = { };
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ u32 upper_val;
+ u32 lower_val;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
+ hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
+ hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
+ hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
+ hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
+ hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
+ hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
+
+ upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
+ lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
+
+ return 0;
+}
+
+static int adf_gen4_build_decomp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = { };
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ u32 lower_val;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
+ lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+
+ return 0;
+}
+
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_comp_block = adf_gen4_build_comp_block;
+ dc_ops->build_decomp_block = adf_gen4_build_decomp_block;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 51fc2eaa263e..e4f4d5fa616d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -7,6 +7,7 @@
#include "adf_accel_devices.h"
#include "adf_cfg_common.h"
+#include "adf_dc.h"
/* PCIe configuration space */
#define ADF_GEN4_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
@@ -180,5 +181,6 @@ int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
u32 bank_number, struct bank_state *state);
bool adf_gen4_services_supported(unsigned long service_mask);
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
index 17d1b774d4a8..2c8708117f70 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
@@ -4,6 +4,7 @@
#define ADF_GEN4_PFVF_H
#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
#ifdef CONFIG_PCI_IOV
void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
new file mode 100644
index 000000000000..9a5b995f7ada
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_GEN6_PM_H
+#define ADF_GEN6_PM_H
+
+#include <linux/bits.h>
+#include <linux/time.h>
+
+struct adf_accel_dev;
+
+/* Power management */
+#define ADF_GEN6_PM_POLL_DELAY_US 20
+#define ADF_GEN6_PM_POLL_TIMEOUT_US USEC_PER_SEC
+#define ADF_GEN6_PM_STATUS 0x50A00C
+#define ADF_GEN6_PM_INTERRUPT 0x50A028
+
+/* Power management source in ERRSOU2 and ERRMSK2 */
+#define ADF_GEN6_PM_SOU BIT(18)
+
+/* cpm_pm_interrupt bitfields */
+#define ADF_GEN6_PM_DRV_ACTIVE BIT(20)
+
+#define ADF_GEN6_PM_DEFAULT_IDLE_FILTER 0x6
+
+/* cpm_pm_status bitfields */
+#define ADF_GEN6_PM_INIT_STATE BIT(21)
+
+#endif /* ADF_GEN6_PM_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c
new file mode 100644
index 000000000000..967253082a98
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/types.h>
+
+#include "adf_common_drv.h"
+#include "adf_gen6_ras.h"
+#include "adf_sysfs_ras_counters.h"
+
+static void enable_errsou_reporting(void __iomem *csr)
+{
+ /* Enable correctable error reporting in ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, 0);
+
+ /* Enable uncorrectable error reporting in ERRSOU1 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, 0);
+
+ /*
+ * Enable uncorrectable error reporting in ERRSOU2
+ * but disable PM interrupt by default
+ */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, ADF_GEN6_ERRSOU2_PM_INT_BIT);
+
+ /* Enable uncorrectable error reporting in ERRSOU3 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, 0);
+}
+
+static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask;
+
+ /* Enable acceleration engine correctable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, ae_mask);
+
+ /* Enable acceleration engine uncorrectable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, ae_mask);
+}
+
+static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ /* Enable HI CPP agents command parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE,
+ ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK);
+
+ ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_MASK);
+}
+
+static void enable_ti_ri_error_reporting(void __iomem *csr)
+{
+ u32 reg, mask;
+
+ /* Enable RI memory error reporting */
+ mask = ADF_GEN6_RIMEM_PARERR_FATAL_MASK | ADF_GEN6_RIMEM_PARERR_CERR_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, mask);
+
+ /* Enable IOSF primary command parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, ADF_GEN6_RIMISCSTS_BIT);
+
+ /* Enable TI internal memory parity error reporting */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_CI_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_CD_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_TRNSB_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, reg);
+
+ /* Enable error handling in RI, TI CPP interface control registers */
+ ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, ADF_GEN6_RICPPINTCTL_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, ADF_GEN6_TICPPINTCTL_MASK);
+
+ /*
+ * Enable error detection and reporting in TIMISCSTS
+ * with bits 1, 2 and 30 value preserved
+ */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL);
+ reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK;
+ reg |= ADF_GEN6_TIMISCCTL_BIT;
+ ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg);
+}
+
+static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ /* Enable SSM interrupts */
+ ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, 0);
+}
+
+static void adf_gen6_enable_ras(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ enable_errsou_reporting(csr);
+ enable_ae_error_reporting(accel_dev, csr);
+ enable_cpp_error_reporting(accel_dev, csr);
+ enable_ti_ri_error_reporting(csr);
+ enable_ssm_error_reporting(accel_dev, csr);
+}
+
+static void disable_errsou_reporting(void __iomem *csr)
+{
+ u32 val;
+
+ /* Disable correctable error reporting in ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, ADF_GEN6_ERRSOU0_MASK);
+
+ /* Disable uncorrectable error reporting in ERRSOU1 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, ADF_GEN6_ERRMSK1_MASK);
+
+ /* Disable uncorrectable error reporting in ERRSOU2 */
+ val = ADF_CSR_RD(csr, ADF_GEN6_ERRMSK2);
+ val |= ADF_GEN6_ERRSOU2_DIS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, val);
+
+ /* Disable uncorrectable error reporting in ERRSOU3 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_ERRSOU3_DIS_MASK);
+}
+
+static void disable_ae_error_reporting(void __iomem *csr)
+{
+ /* Disable acceleration engine correctable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, 0);
+
+ /* Disable acceleration engine uncorrectable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, 0);
+}
+
+static void disable_cpp_error_reporting(void __iomem *csr)
+{
+ /* Disable HI CPP agents command parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE, 0);
+
+ ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_DIS_MASK);
+}
+
+static void disable_ti_ri_error_reporting(void __iomem *csr)
+{
+ u32 reg;
+
+ /* Disable RI memory error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, 0);
+
+ /* Disable IOSF primary command parity error reporting */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_RIMISCCTL);
+ reg &= ~ADF_GEN6_RIMISCSTS_BIT;
+ ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, reg);
+
+ /* Disable TI internal memory parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, ADF_GEN6_TI_CI_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, ADF_GEN6_TI_CD_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, ADF_GEN6_TI_TRNSB_PAR_STS_MASK);
+
+ /* Disable error handling in RI, TI CPP interface control registers */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTCTL);
+ reg &= ~ADF_GEN6_RICPPINTCTL_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTCTL);
+ reg &= ~ADF_GEN6_TICPPINTCTL_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, reg);
+
+ /*
+ * Disable error detection and reporting in TIMISCSTS
+ * with bits 1, 2 and 30 value preserved
+ */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL);
+ reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg);
+}
+
+static void disable_ssm_error_reporting(void __iomem *csr)
+{
+ /* Disable SSM interrupts */
+ ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, ADF_GEN6_INTMASKSSM_MASK);
+}
+
+static void adf_gen6_disable_ras(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ disable_errsou_reporting(csr);
+ disable_ae_error_reporting(csr);
+ disable_cpp_error_reporting(csr);
+ disable_ti_ri_error_reporting(csr);
+ disable_ssm_error_reporting(csr);
+}
+
+static void adf_gen6_process_errsou0(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ae, errsou;
+
+ ae = ADF_CSR_RD(csr, ADF_GEN6_HIAECORERRLOG_CPP0);
+ ae &= GET_HW_DATA(accel_dev)->ae_mask;
+
+ dev_warn(&GET_DEV(accel_dev), "Correctable error detected: %#x\n", ae);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+
+ /* Clear interrupt from ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOG_CPP0, ae);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0);
+ if (errsou & ADF_GEN6_ERRSOU0_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou0 still set: %#x\n", errsou);
+}
+
+static void adf_handle_cpp_ae_unc(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 ae;
+
+ if (!(errsou & ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT))
+ return;
+
+ ae = ADF_CSR_RD(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0);
+ ae &= GET_HW_DATA(accel_dev)->ae_mask;
+ if (ae) {
+ dev_err(&GET_DEV(accel_dev), "Uncorrectable error detected: %#x\n", ae);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0, ae);
+ }
+}
+
+static void adf_handle_cpp_cmd_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 cmd_par_err;
+
+ if (!(errsou & ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT))
+ return;
+
+ cmd_par_err = ADF_CSR_RD(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG);
+ cmd_par_err &= ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK;
+ if (cmd_par_err) {
+ dev_err(&GET_DEV(accel_dev), "HI CPP agent command parity error: %#x\n",
+ cmd_par_err);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG, cmd_par_err);
+ }
+}
+
+static void adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 rimem_parerr_sts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT))
+ return;
+
+ rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN6_RIMEM_PARERR_STS);
+ rimem_parerr_sts &= ADF_GEN6_RIMEM_PARERR_CERR_MASK |
+ ADF_GEN6_RIMEM_PARERR_FATAL_MASK;
+ if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_CERR_MASK) {
+ dev_err(&GET_DEV(accel_dev), "RI memory parity correctable error: %#x\n",
+ rimem_parerr_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+ }
+
+ if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_FATAL_MASK) {
+ dev_err(&GET_DEV(accel_dev), "RI memory parity fatal error: %#x\n",
+ rimem_parerr_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN6_RIMEM_PARERR_STS, rimem_parerr_sts);
+}
+
+static void adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_ci_par_sts;
+
+ ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_STS);
+ ti_ci_par_sts &= ADF_GEN6_TI_CI_PAR_STS_MASK;
+ if (ti_ci_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI memory parity error: %#x\n", ti_ci_par_sts);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_STS, ti_ci_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+}
+
+static void adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_pullfub_par_sts;
+
+ ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS);
+ ti_pullfub_par_sts &= ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK;
+ if (ti_pullfub_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI pull parity error: %#x\n", ti_pullfub_par_sts);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS, ti_pullfub_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+}
+
+static void adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_pushfub_par_sts;
+
+ ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS);
+ ti_pushfub_par_sts &= ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK;
+ if (ti_pushfub_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI push parity error: %#x\n", ti_pushfub_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS, ti_pushfub_par_sts);
+ }
+}
+
+static void adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_cd_par_sts;
+
+ ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_STS);
+ ti_cd_par_sts &= ADF_GEN6_TI_CD_PAR_STS_MASK;
+ if (ti_cd_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI CD parity error: %#x\n", ti_cd_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_STS, ti_cd_par_sts);
+ }
+}
+
+static void adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_trnsb_par_sts;
+
+ ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_STS);
+ ti_trnsb_par_sts &= ADF_GEN6_TI_TRNSB_PAR_STS_MASK;
+ if (ti_trnsb_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI TRNSB parity error: %#x\n", ti_trnsb_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_STS, ti_trnsb_par_sts);
+ }
+}
+
+static void adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 rimiscsts;
+
+ rimiscsts = ADF_CSR_RD(csr, ADF_GEN6_RIMISCSTS);
+ rimiscsts &= ADF_GEN6_RIMISCSTS_BIT;
+ if (rimiscsts) {
+ dev_err(&GET_DEV(accel_dev), "Command parity error detected on IOSFP: %#x\n",
+ rimiscsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_RIMISCSTS, rimiscsts);
+ }
+}
+
+static void adf_handle_ti_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT))
+ return;
+
+ adf_handle_ti_ci_par_sts(accel_dev, csr);
+ adf_handle_ti_pullfub_par_sts(accel_dev, csr);
+ adf_handle_ti_pushfub_par_sts(accel_dev, csr);
+ adf_handle_ti_cd_par_sts(accel_dev, csr);
+ adf_handle_ti_trnsb_par_sts(accel_dev, csr);
+ adf_handle_iosfp_cmd_parerr(accel_dev, csr);
+}
+
+static void adf_handle_sfi_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Command parity error detected on streaming fabric interface\n");
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_gen6_process_errsou1(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ adf_handle_cpp_ae_unc(accel_dev, csr, errsou);
+ adf_handle_cpp_cmd_par_err(accel_dev, csr, errsou);
+ adf_handle_ri_mem_par_err(accel_dev, csr, errsou);
+ adf_handle_ti_err(accel_dev, csr, errsou);
+ adf_handle_sfi_cmd_parerr(accel_dev, csr, errsou);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1);
+ if (errsou & ADF_GEN6_ERRSOU1_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou1 still set: %#x\n", errsou);
+}
+
+static void adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 reg;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_CERRSSMSH);
+ reg &= ADF_GEN6_CERRSSMSH_ERROR_BIT;
+ if (reg) {
+ dev_warn(&GET_DEV(accel_dev),
+ "Correctable error on ssm shared memory: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+ ADF_CSR_WR(csr, ADF_GEN6_CERRSSMSH, reg);
+ }
+}
+
+static void adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_UERRSSMSH);
+ reg &= ADF_GEN6_UERRSSMSH_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal error on ssm shared memory: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_UERRSSMSH, reg);
+ }
+}
+
+static void adf_handle_pperr_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_PPERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_PPERR);
+ reg &= ADF_GEN6_PPERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal push or pull data error: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_PPERR, reg);
+ }
+}
+
+static void adf_handle_scmpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_SCM_PAR_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error on SCM: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_cpppar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_CPP_PAR_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error on CPP: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_rfpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_RF_PAR_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error on RF Parity: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_unexp_cpl_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_UNEXP_CPL_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal error for AXI unexpected tag/length: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN6_IAINTSTATSSM);
+
+ iastatssm &= ADF_GEN6_IAINTSTATSSM_MASK;
+ if (!iastatssm)
+ return;
+
+ adf_handle_uerrssmsh(accel_dev, csr, iastatssm);
+ adf_handle_pperr_err(accel_dev, csr, iastatssm);
+ adf_handle_scmpar_err(accel_dev, csr, iastatssm);
+ adf_handle_cpppar_err(accel_dev, csr, iastatssm);
+ adf_handle_rfpar_err(accel_dev, csr, iastatssm);
+ adf_handle_unexp_cpl_err(accel_dev, csr, iastatssm);
+
+ ADF_CSR_WR(csr, ADF_GEN6_IAINTSTATSSM, iastatssm);
+}
+
+static void adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU2_SSM_ERR_BIT))
+ return;
+
+ adf_handle_cerrssmsh(accel_dev, csr);
+ adf_handle_iaintstatssm(accel_dev, csr);
+}
+
+static void adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 reg;
+
+ if (!(errsou & ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_CPP_CFC_ERR_STATUS);
+ if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_DATAPAR_BIT) {
+ dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: data parity: %#x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+
+ if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_CMDPAR_BIT) {
+ dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: command parity: %#x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+
+ if (reg & ADF_GEN6_CPP_CFC_FATAL_ERR_BIT) {
+ dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: errors: %#x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_STATUS_CLR,
+ ADF_GEN6_CPP_CFC_ERR_STATUS_CLR_MASK);
+}
+
+static void adf_gen6_process_errsou2(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ adf_handle_ssm(accel_dev, csr, errsou);
+ adf_handle_cpp_cfc_err(accel_dev, csr, errsou);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2);
+ if (errsou & ADF_GEN6_ERRSOU2_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou2 still set: %#x\n", errsou);
+}
+
+static void adf_handle_timiscsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 timiscsts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_TIMISCSTS_BIT))
+ return;
+
+ timiscsts = ADF_CSR_RD(csr, ADF_GEN6_TIMISCSTS);
+ if (timiscsts) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error in transmit interface: %#x\n",
+ timiscsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+}
+
+static void adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 ricppintsts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK))
+ return;
+
+ ricppintsts = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTSTS);
+ ricppintsts &= ADF_GEN6_RICPPINTSTS_MASK;
+ if (ricppintsts) {
+ dev_err(&GET_DEV(accel_dev), "RI push pull error: %#x\n", ricppintsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_RICPPINTSTS, ricppintsts);
+ }
+}
+
+static void adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 ticppintsts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK))
+ return;
+
+ ticppintsts = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTSTS);
+ ticppintsts &= ADF_GEN6_TICPPINTSTS_MASK;
+ if (ticppintsts) {
+ dev_err(&GET_DEV(accel_dev), "TI push pull error: %#x\n", ticppintsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_TICPPINTSTS, ticppintsts);
+ }
+}
+
+static void adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks;
+ u32 atufaultstatus;
+ u32 i;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT))
+ return;
+
+ for (i = 0; i < max_rp_num; i++) {
+ atufaultstatus = ADF_CSR_RD(csr, ADF_GEN6_ATUFAULTSTATUS(i));
+
+ atufaultstatus &= ADF_GEN6_ATUFAULTSTATUS_BIT;
+ if (atufaultstatus) {
+ dev_err(&GET_DEV(accel_dev), "Ring pair (%u) ATU detected fault: %#x\n", i,
+ atufaultstatus);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_ATUFAULTSTATUS(i), atufaultstatus);
+ }
+ }
+}
+
+static void adf_handle_rlterror(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 rlterror;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_RLTERROR_BIT))
+ return;
+
+ rlterror = ADF_CSR_RD(csr, ADF_GEN6_RLT_ERRLOG);
+ rlterror &= ADF_GEN6_RLT_ERRLOG_MASK;
+ if (rlterror) {
+ dev_err(&GET_DEV(accel_dev), "Error in rate limiting block: %#x\n", rlterror);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_RLT_ERRLOG, rlterror);
+ }
+}
+
+static void adf_handle_vflr(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "Uncorrectable error in VF\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+}
+
+static void adf_handle_tc_vc_map_error(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "Violation of PCIe TC VC mapping\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_pcie_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev),
+ "DEVHALT due to an error in an incoming transaction\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_pg_req_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Error due to response failure in response to a page request\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_xlt_cpl_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "Error status for a address translation request\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_ti_int_err_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "DEVHALT due to a TI internal memory error\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_gen6_process_errsou3(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ adf_handle_timiscsts(accel_dev, csr, errsou);
+ adf_handle_ricppintsts(accel_dev, csr, errsou);
+ adf_handle_ticppintsts(accel_dev, csr, errsou);
+ adf_handle_atufaultstatus(accel_dev, csr, errsou);
+ adf_handle_rlterror(accel_dev, csr, errsou);
+ adf_handle_vflr(accel_dev, csr, errsou);
+ adf_handle_tc_vc_map_error(accel_dev, csr, errsou);
+ adf_handle_pcie_devhalt(accel_dev, csr, errsou);
+ adf_handle_pg_req_devhalt(accel_dev, csr, errsou);
+ adf_handle_xlt_cpl_devhalt(accel_dev, csr, errsou);
+ adf_handle_ti_int_err_devhalt(accel_dev, csr, errsou);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3);
+ if (errsou & ADF_GEN6_ERRSOU3_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou3 still set: %#x\n", errsou);
+}
+
+static void adf_gen6_is_reset_required(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ bool *reset_required)
+{
+ u8 reset, dev_state;
+ u32 gensts;
+
+ gensts = ADF_CSR_RD(csr, ADF_GEN6_GENSTS);
+ dev_state = FIELD_GET(ADF_GEN6_GENSTS_DEVICE_STATE_MASK, gensts);
+ reset = FIELD_GET(ADF_GEN6_GENSTS_RESET_TYPE_MASK, gensts);
+ if (dev_state == ADF_GEN6_GENSTS_DEVHALT && reset == ADF_GEN6_GENSTS_PFLR) {
+ *reset_required = true;
+ return;
+ }
+
+ if (reset == ADF_GEN6_GENSTS_COLD_RESET)
+ dev_err(&GET_DEV(accel_dev), "Fatal error, cold reset required\n");
+
+ *reset_required = false;
+}
+
+static bool adf_gen6_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ bool handled = false;
+ u32 errsou;
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0);
+ if (errsou & ADF_GEN6_ERRSOU0_MASK) {
+ adf_gen6_process_errsou0(accel_dev, csr);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1);
+ if (errsou & ADF_GEN6_ERRSOU1_MASK) {
+ adf_gen6_process_errsou1(accel_dev, csr, errsou);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2);
+ if (errsou & ADF_GEN6_ERRSOU2_MASK) {
+ adf_gen6_process_errsou2(accel_dev, csr, errsou);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3);
+ if (errsou & ADF_GEN6_ERRSOU3_MASK) {
+ adf_gen6_process_errsou3(accel_dev, csr, errsou);
+ handled = true;
+ }
+
+ adf_gen6_is_reset_required(accel_dev, csr, reset_required);
+
+ return handled;
+}
+
+void adf_gen6_init_ras_ops(struct adf_ras_ops *ras_ops)
+{
+ ras_ops->enable_ras_errors = adf_gen6_enable_ras;
+ ras_ops->disable_ras_errors = adf_gen6_disable_ras;
+ ras_ops->handle_interrupt = adf_gen6_handle_interrupt;
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_ras_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h
new file mode 100644
index 000000000000..66ced271d173
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h
@@ -0,0 +1,504 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_GEN6_RAS_H_
+#define ADF_GEN6_RAS_H_
+
+#include <linux/bits.h>
+
+struct adf_ras_ops;
+
+/* Error source registers */
+#define ADF_GEN6_ERRSOU0 0x41A200
+#define ADF_GEN6_ERRSOU1 0x41A204
+#define ADF_GEN6_ERRSOU2 0x41A208
+#define ADF_GEN6_ERRSOU3 0x41A20C
+
+/* Error source mask registers */
+#define ADF_GEN6_ERRMSK0 0x41A210
+#define ADF_GEN6_ERRMSK1 0x41A214
+#define ADF_GEN6_ERRMSK2 0x41A218
+#define ADF_GEN6_ERRMSK3 0x41A21C
+
+/* ERRSOU0 Correctable error mask */
+#define ADF_GEN6_ERRSOU0_MASK BIT(0)
+
+#define ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT BIT(0)
+#define ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT BIT(1)
+#define ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT BIT(2)
+#define ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT BIT(3)
+#define ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT BIT(4)
+
+#define ADF_GEN6_ERRSOU1_MASK ( \
+ (ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT) | \
+ (ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT) | \
+ (ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT))
+
+#define ADF_GEN6_ERRMSK1_CPP0_MEUNC_BIT BIT(0)
+#define ADF_GEN6_ERRMSK1_CPP_CMDPARERR_BIT BIT(1)
+#define ADF_GEN6_ERRMSK1_RIMEM_PARERR_STS_BIT BIT(2)
+#define ADF_GEN6_ERRMSK1_TIMEM_PARERR_STS_BIT BIT(3)
+#define ADF_GEN6_ERRMSK1_IOSFCMD_PARERR_BIT BIT(4)
+
+#define ADF_GEN6_ERRMSK1_MASK ( \
+ (ADF_GEN6_ERRMSK1_CPP0_MEUNC_BIT) | \
+ (ADF_GEN6_ERRMSK1_CPP_CMDPARERR_BIT) | \
+ (ADF_GEN6_ERRMSK1_RIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRMSK1_TIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRMSK1_IOSFCMD_PARERR_BIT))
+
+/* HI AE Uncorrectable error log */
+#define ADF_GEN6_HIAEUNCERRLOG_CPP0 0x41A300
+
+/* HI AE Uncorrectable error log enable */
+#define ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0 0x41A320
+
+/* HI AE Correctable error log */
+#define ADF_GEN6_HIAECORERRLOG_CPP0 0x41A308
+
+/* HI AE Correctable error log enable */
+#define ADF_GEN6_HIAECORERRLOGENABLE_CPP0 0x41A318
+
+/* HI CPP Agent Command parity error log */
+#define ADF_GEN6_HICPPAGENTCMDPARERRLOG 0x41A310
+
+/* HI CPP Agent command parity error logging enable */
+#define ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE 0x41A314
+
+#define ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK 0x1B
+
+/* RI Memory parity error status register */
+#define ADF_GEN6_RIMEM_PARERR_STS 0x41B128
+
+/* RI Memory parity error reporting enable */
+#define ADF_GEN6_RI_MEM_PAR_ERR_EN0 0x41B12C
+
+/*
+ * RI Memory parity error mask
+ * BIT(4) - ri_tlq_phdr parity error
+ * BIT(5) - ri_tlq_pdata parity error
+ * BIT(6) - ri_tlq_nphdr parity error
+ * BIT(7) - ri_tlq_npdata parity error
+ * BIT(8) - ri_tlq_cplhdr parity error
+ * BIT(10) - BIT(13) - ri_tlq_cpldata[0:3] parity error
+ * BIT(19) - ri_cds_cmd_fifo parity error
+ * BIT(20) - ri_obc_ricpl_fifo parity error
+ * BIT(21) - ri_obc_tiricpl_fifo parity error
+ * BIT(22) - ri_obc_cppcpl_fifo parity error
+ * BIT(23) - ri_obc_pendcpl_fifo parity error
+ * BIT(24) - ri_cpp_cmd_fifo parity error
+ * BIT(25) - ri_cds_ticmd_fifo parity error
+ * BIT(26) - riti_cmd_fifo parity error
+ * BIT(27) - ri_int_msixtbl parity error
+ * BIT(28) - ri_int_imstbl parity error
+ * BIT(30) - ri_kpt_fuses parity error
+ */
+#define ADF_GEN6_RIMEM_PARERR_FATAL_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | \
+ BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27) | \
+ BIT(28) | BIT(30))
+
+#define ADF_GEN6_RIMEM_PARERR_CERR_MASK \
+ (BIT(10) | BIT(11) | BIT(12) | BIT(13))
+
+/* TI CI parity status */
+#define ADF_GEN6_TI_CI_PAR_STS 0x50060C
+
+/* TI CI parity reporting mask */
+#define ADF_GEN6_TI_CI_PAR_ERR_MASK 0x500608
+
+/*
+ * TI CI parity status mask
+ * BIT(0) - CdCmdQ_sts patiry error status
+ * BIT(1) - CdDataQ_sts parity error status
+ * BIT(3) - CPP_SkidQ_sts parity error status
+ */
+#define ADF_GEN6_TI_CI_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(3))
+
+/* TI PULLFUB parity status */
+#define ADF_GEN6_TI_PULL0FUB_PAR_STS 0x500618
+
+/* TI PULLFUB parity error reporting mask */
+#define ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK 0x500614
+
+/*
+ * TI PULLFUB parity status mask
+ * BIT(0) - TrnPullReqQ_sts parity status
+ * BIT(1) - TrnSharedDataQ_sts parity status
+ * BIT(2) - TrnPullReqDataQ_sts parity status
+ * BIT(4) - CPP_CiPullReqQ_sts parity status
+ * BIT(5) - CPP_TrnPullReqQ_sts parity status
+ * BIT(6) - CPP_PullidQ_sts parity status
+ * BIT(7) - CPP_WaitDataQ_sts parity status
+ * BIT(8) - CPP_CdDataQ_sts parity status
+ * BIT(9) - CPP_TrnDataQP0_sts parity status
+ * BIT(10) - BIT(11) - CPP_TrnDataQRF[00:01]_sts parity status
+ * BIT(12) - CPP_TrnDataQP1_sts parity status
+ * BIT(13) - BIT(14) - CPP_TrnDataQRF[10:11]_sts parity status
+ */
+#define ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | \
+ BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14))
+
+/* TI PUSHUB parity status */
+#define ADF_GEN6_TI_PUSHFUB_PAR_STS 0x500630
+
+/* TI PUSHFUB parity error reporting mask */
+#define ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK 0x50062C
+
+/*
+ * TI PUSHUB parity status mask
+ * BIT(0) - SbPushReqQ_sts parity status
+ * BIT(1) - BIT(2) - SbPushDataQ[0:1]_sts parity status
+ * BIT(4) - CPP_CdPushReqQ_sts parity status
+ * BIT(5) - BIT(6) - CPP_CdPushDataQ[0:1]_sts parity status
+ * BIT(7) - CPP_SbPushReqQ_sts parity status
+ * BIT(8) - CPP_SbPushDataQP_sts parity status
+ * BIT(9) - BIT(10) - CPP_SbPushDataQRF[0:1]_sts parity status
+ */
+#define ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | \
+ BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10))
+
+/* TI CD parity status */
+#define ADF_GEN6_TI_CD_PAR_STS 0x50063C
+
+/* TI CD parity error mask */
+#define ADF_GEN6_TI_CD_PAR_ERR_MASK 0x500638
+
+/*
+ * TI CD parity status mask
+ * BIT(0) - BIT(15) - CtxMdRam[0:15]_sts parity status
+ * BIT(16) - Leaf2ClusterRam_sts parity status
+ * BIT(17) - BIT(18) - Ring2LeafRam[0:1]_sts parity status
+ * BIT(19) - VirtualQ_sts parity status
+ * BIT(20) - DtRdQ_sts parity status
+ * BIT(21) - DtWrQ_sts parity status
+ * BIT(22) - RiCmdQ_sts parity status
+ * BIT(23) - BypassQ_sts parity status
+ * BIT(24) - DtRdQ_sc_sts parity status
+ * BIT(25) - DtWrQ_sc_sts parity status
+ */
+#define ADF_GEN6_TI_CD_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \
+ BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \
+ BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25))
+
+/* TI TRNSB parity status */
+#define ADF_GEN6_TI_TRNSB_PAR_STS 0x500648
+
+/* TI TRNSB parity error reporting mask */
+#define ADF_GEN6_TI_TRNSB_PAR_ERR_MASK 0x500644
+
+/*
+ * TI TRNSB parity status mask
+ * BIT(0) - TrnPHdrQP_sts parity status
+ * BIT(1) - TrnPHdrQRF_sts parity status
+ * BIT(2) - TrnPDataQP_sts parity status
+ * BIT(3) - BIT(6) - TrnPDataQRF[0:3]_sts parity status
+ * BIT(7) - TrnNpHdrQP_sts parity status
+ * BIT(8) - BIT(9) - TrnNpHdrQRF[0:1]_sts parity status
+ * BIT(10) - TrnCplHdrQ_sts parity status
+ * BIT(11) - TrnPutObsReqQ_sts parity status
+ * BIT(12) - TrnPushReqQ_sts parity status
+ * BIT(13) - SbSplitIdRam_sts parity status
+ * BIT(14) - SbReqCountQ_sts parity status
+ * BIT(15) - SbCplTrkRam_sts parity status
+ * BIT(16) - SbGetObsReqQ_sts parity status
+ * BIT(17) - SbEpochIdQ_sts parity status
+ * BIT(18) - SbAtCplHdrQ_sts parity status
+ * BIT(19) - SbAtCplDataQ_sts parity status
+ * BIT(20) - SbReqCountRam_sts parity status
+ * BIT(21) - SbAtCplHdrQ_sc_sts parity status
+ */
+#define ADF_GEN6_TI_TRNSB_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | \
+ BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | \
+ BIT(19) | BIT(20) | BIT(21))
+
+/* Status register to log misc error on RI */
+#define ADF_GEN6_RIMISCSTS 0x41B1B8
+
+/* Status control register to log misc RI error */
+#define ADF_GEN6_RIMISCCTL 0x41B1BC
+
+/*
+ * ERRSOU2 bit mask
+ * BIT(0) - SSM Interrupt Mask
+ * BIT(1) - CFC on CPP. ORed of CFC Push error and Pull error
+ * BIT(2) - BIT(4) - CPP attention interrupts
+ * BIT(18) - PM interrupt
+ */
+#define ADF_GEN6_ERRSOU2_SSM_ERR_BIT BIT(0)
+#define ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT BIT(1)
+#define ADF_GEN6_ERRSOU2_CPP_CFC_ATT_INT_MASK \
+ (BIT(2) | BIT(3) | BIT(4))
+
+#define ADF_GEN6_ERRSOU2_PM_INT_BIT BIT(18)
+
+#define ADF_GEN6_ERRSOU2_MASK \
+ (ADF_GEN6_ERRSOU2_SSM_ERR_BIT | \
+ ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT)
+
+#define ADF_GEN6_ERRSOU2_DIS_MASK \
+ (ADF_GEN6_ERRSOU2_SSM_ERR_BIT | \
+ ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT | \
+ ADF_GEN6_ERRSOU2_CPP_CFC_ATT_INT_MASK)
+
+#define ADF_GEN6_IAINTSTATSSM 0x28
+
+/* IAINTSTATSSM error bit mask definitions */
+#define ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT BIT(0)
+#define ADF_GEN6_IAINTSTATSSM_PPERR_BIT BIT(2)
+#define ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT BIT(4)
+#define ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT BIT(5)
+#define ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT BIT(6)
+#define ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT BIT(7)
+
+#define ADF_GEN6_IAINTSTATSSM_MASK \
+ (ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_PPERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT)
+
+#define ADF_GEN6_UERRSSMSH 0x18
+
+/*
+ * UERRSSMSH error bit mask definitions
+ *
+ * BIT(0) - Indicates one uncorrectable error
+ * BIT(15) - Indicates multiple uncorrectable errors
+ * in device shared memory
+ */
+#define ADF_GEN6_UERRSSMSH_MASK (BIT(0) | BIT(15))
+
+/*
+ * CERRSSMSH error bit
+ * BIT(0) - Indicates one correctable error
+ */
+#define ADF_GEN6_CERRSSMSH_ERROR_BIT (BIT(0) | BIT(15) | BIT(24))
+#define ADF_GEN6_CERRSSMSH 0x10
+
+#define ADF_GEN6_INTMASKSSM 0x0
+
+/*
+ * Error reporting mask in INTMASKSSM
+ * BIT(0) - Shared memory uncorrectable interrupt mask
+ * BIT(2) - PPERR interrupt mask
+ * BIT(4) - SCM parity error interrupt mask
+ * BIT(5) - CPP parity error interrupt mask
+ * BIT(6) - SHRAM RF parity error interrupt mask
+ * BIT(7) - AXI unexpected completion error mask
+ */
+#define ADF_GEN6_INTMASKSSM_MASK \
+ (BIT(0) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7))
+
+/* CPP push or pull error */
+#define ADF_GEN6_PPERR 0x8
+
+#define ADF_GEN6_PPERR_MASK (BIT(0) | BIT(1))
+
+/*
+ * SSM_FERR_STATUS error bit mask definitions
+ */
+#define ADF_GEN6_SCM_PAR_ERR_MASK BIT(5)
+#define ADF_GEN6_CPP_PAR_ERR_MASK (BIT(0) | BIT(1) | BIT(2))
+#define ADF_GEN6_UNEXP_CPL_ERR_MASK (BIT(3) | BIT(4) | BIT(10) | BIT(11))
+#define ADF_GEN6_RF_PAR_ERR_MASK BIT(16)
+
+#define ADF_GEN6_SSM_FERR_STATUS 0x9C
+
+#define ADF_GEN6_CPP_CFC_ERR_STATUS 0x640C04
+
+/*
+ * BIT(0) - Indicates one or more CPP CFC errors
+ * BIT(1) - Indicates multiple CPP CFC errors
+ * BIT(7) - Indicates CPP CFC command parity error type
+ * BIT(8) - Indicates CPP CFC data parity error type
+ */
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_ERR_BIT BIT(0)
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_MERR_BIT BIT(1)
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_CMDPAR_BIT BIT(7)
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_DATAPAR_BIT BIT(8)
+#define ADF_GEN6_CPP_CFC_FATAL_ERR_BIT \
+ (ADF_GEN6_CPP_CFC_ERR_STATUS_ERR_BIT | \
+ ADF_GEN6_CPP_CFC_ERR_STATUS_MERR_BIT)
+
+/*
+ * BIT(0) - Enables CFC to detect and log a push/pull data error
+ * BIT(1) - Enables CFC to generate interrupt to PCIEP for a CPP error
+ * BIT(4) - When 1 parity detection is disabled
+ * BIT(5) - When 1 parity detection is disabled on CPP command bus
+ * BIT(6) - When 1 parity detection is disabled on CPP push/pull bus
+ * BIT(9) - When 1 RF parity error detection is disabled
+ */
+#define ADF_GEN6_CPP_CFC_ERR_CTRL_MASK (BIT(0) | BIT(1))
+
+#define ADF_GEN6_CPP_CFC_ERR_CTRL_DIS_MASK \
+ (BIT(4) | BIT(5) | BIT(6) | BIT(9) | BIT(10))
+
+#define ADF_GEN6_CPP_CFC_ERR_CTRL 0x640C00
+
+/*
+ * BIT(0) - Clears bit(0) of ADF_GEN6_CPP_CFC_ERR_STATUS
+ * when an error is reported on CPP
+ * BIT(1) - Clears bit(1) of ADF_GEN6_CPP_CFC_ERR_STATUS
+ * when multiple errors are reported on CPP
+ * BIT(2) - Clears bit(2) of ADF_GEN6_CPP_CFC_ERR_STATUS
+ * when attention interrupt is reported
+ */
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_CLR_MASK (BIT(0) | BIT(1) | BIT(2))
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_CLR 0x640C08
+
+/*
+ * ERRSOU3 bit masks
+ * BIT(0) - indicates error response order overflow and/or BME error
+ * BIT(1) - indicates RI push/pull error
+ * BIT(2) - indicates TI push/pull error
+ * BIT(5) - indicates TI pull parity error
+ * BIT(6) - indicates RI push parity error
+ * BIT(7) - indicates VFLR interrupt
+ * BIT(8) - indicates ring pair interrupts for ATU detected fault
+ * BIT(9) - indicates rate limiting error
+ */
+#define ADF_GEN6_ERRSOU3_TIMISCSTS_BIT BIT(0)
+#define ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK (BIT(1) | BIT(6))
+#define ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK (BIT(2) | BIT(5))
+#define ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT BIT(7)
+#define ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT BIT(8)
+#define ADF_GEN6_ERRSOU3_RLTERROR_BIT BIT(9)
+#define ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT BIT(16)
+#define ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT BIT(17)
+#define ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT BIT(18)
+#define ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT BIT(19)
+#define ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT BIT(20)
+
+#define ADF_GEN6_ERRSOU3_MASK ( \
+ (ADF_GEN6_ERRSOU3_TIMISCSTS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT) | \
+ (ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RLTERROR_BIT) | \
+ (ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT) | \
+ (ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT) | \
+ (ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT) | \
+ (ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT) | \
+ (ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT))
+
+#define ADF_GEN6_ERRSOU3_DIS_MASK ( \
+ (ADF_GEN6_ERRSOU3_TIMISCSTS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT) | \
+ (ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RLTERROR_BIT) | \
+ (ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT))
+
+/* Rate limiting error log register */
+#define ADF_GEN6_RLT_ERRLOG 0x508814
+
+#define ADF_GEN6_RLT_ERRLOG_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* TI misc status register */
+#define ADF_GEN6_TIMISCSTS 0x50054C
+
+/* TI misc error reporting mask */
+#define ADF_GEN6_TIMISCCTL 0x500548
+
+/*
+ * TI Misc error reporting control mask
+ * BIT(0) - Enables error detection and logging in TIMISCSTS register
+ * BIT(1) - It has effect only when SRIOV enabled, this bit is 0 by default
+ * BIT(2) - Enables the D-F-x counter within the dispatch arbiter
+ * to start based on the command triggered from
+ * BIT(30) - Disables VFLR functionality
+ * bits 1, 2 and 30 value should be preserved and not meant to be changed
+ * within RAS.
+ */
+#define ADF_GEN6_TIMISCCTL_BIT BIT(0)
+#define ADF_GEN6_TIMSCCTL_RELAY_MASK (BIT(1) | BIT(2) | BIT(30))
+
+/* RI CPP interface status register */
+#define ADF_GEN6_RICPPINTSTS 0x41A330
+
+/*
+ * Uncorrectable error mask in RICPPINTSTS register
+ * BIT(0) - RI asserted the CPP error signal during a push
+ * BIT(1) - RI detected the CPP error signal asserted during a pull
+ * BIT(2) - RI detected a push data parity error
+ * BIT(3) - RI detected a push valid parity error
+ */
+#define ADF_GEN6_RICPPINTSTS_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* RI CPP interface register control */
+#define ADF_GEN6_RICPPINTCTL 0x41A32C
+
+/*
+ * Control bit mask for RICPPINTCTL register
+ * BIT(0) - value of 1 enables error detection and reporting
+ * on the RI CPP Push interface
+ * BIT(1) - value of 1 enables error detection and reporting
+ * on the RI CPP Pull interface
+ * BIT(2) - value of 1 enables error detection and reporting
+ * on the RI Parity
+ * BIT(3) - value of 1 enable checking parity on CPP
+ */
+#define ADF_GEN6_RICPPINTCTL_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4))
+
+/* TI CPP interface status register */
+#define ADF_GEN6_TICPPINTSTS 0x50053C
+
+/*
+ * Uncorrectable error mask in TICPPINTSTS register
+ * BIT(0) - value of 1 indicates that the TI asserted
+ * the CPP error signal during a push
+ * BIT(1) - value of 1 indicates that the TI detected
+ * the CPP error signal asserted during a pull
+ * BIT(2) - value of 1 indicates that the TI detected
+ * a pull data parity error
+ */
+#define ADF_GEN6_TICPPINTSTS_MASK (BIT(0) | BIT(1) | BIT(2))
+
+/* TI CPP interface status register control */
+#define ADF_GEN6_TICPPINTCTL 0x500538
+
+/*
+ * Control bit mask for TICPPINTCTL register
+ * BIT(0) - value of 1 enables error detection and reporting on
+ * the TI CPP Push interface
+ * BIT(1) - value of 1 enables error detection and reporting on
+ * the TI CPP Push interface
+ * BIT(2) - value of 1 enables parity error detection and logging on
+ * the TI CPP Pull interface
+ * BIT(3) - value of 1 enables CPP CMD and Pull Data parity checking
+ */
+#define ADF_GEN6_TICPPINTCTL_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4))
+
+/* ATU fault status register */
+#define ADF_GEN6_ATUFAULTSTATUS(i) (0x506000 + ((i) * 0x4))
+
+#define ADF_GEN6_ATUFAULTSTATUS_BIT BIT(0)
+
+/* Command parity error detected on IOSFP command to QAT */
+#define ADF_GEN6_RIMISCSTS_BIT BIT(0)
+
+#define ADF_GEN6_GENSTS 0x41A220
+#define ADF_GEN6_GENSTS_DEVICE_STATE_MASK GENMASK(1, 0)
+#define ADF_GEN6_GENSTS_RESET_TYPE_MASK GENMASK(3, 2)
+#define ADF_GEN6_GENSTS_PFLR 0x1
+#define ADF_GEN6_GENSTS_COLD_RESET 0x3
+#define ADF_GEN6_GENSTS_DEVHALT 0x1
+
+void adf_gen6_init_ras_ops(struct adf_ras_ops *ras_ops);
+
+#endif /* ADF_GEN6_RAS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
new file mode 100644
index 000000000000..58a072e2f936
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/export.h>
+
+#include "adf_gen4_config.h"
+#include "adf_gen4_hw_csr_data.h"
+#include "adf_gen4_pfvf.h"
+#include "adf_gen6_shared.h"
+
+struct adf_accel_dev;
+struct adf_pfvf_ops;
+struct adf_hw_csr_ops;
+
+/*
+ * QAT GEN4 and GEN6 devices often differ in terms of supported features,
+ * options and internal logic. However, some of the mechanisms and register
+ * layout are shared between those two GENs. This file serves as an abstraction
+ * layer that allows to use existing GEN4 implementation that is also
+ * applicable to GEN6 without additional overhead and complexity.
+ */
+void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ adf_gen4_init_pf_pfvf_ops(pfvf_ops);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_pf_pfvf_ops);
+
+void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+ return adf_gen4_init_hw_csr_ops(csr_ops);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_hw_csr_ops);
+
+int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+ return adf_gen4_cfg_dev_init(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_cfg_dev_init);
+
+int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+ return adf_comp_dev_config(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_comp_dev_config);
+
+int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev)
+{
+ return adf_no_dev_config(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_no_dev_config);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
new file mode 100644
index 000000000000..bc8e71e984fc
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_GEN6_SHARED_H_
+#define ADF_GEN6_SHARED_H_
+
+struct adf_hw_csr_ops;
+struct adf_accel_dev;
+struct adf_pfvf_ops;
+
+void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev);
+int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev);
+int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev);
+#endif/* ADF_GEN6_SHARED_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_timer.c
index 35ccb91d6ec1..8962a49f145a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_timer.c
@@ -12,9 +12,9 @@
#include "adf_admin.h"
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
-#include "adf_gen4_timer.h"
+#include "adf_timer.h"
-#define ADF_GEN4_TIMER_PERIOD_MS 200
+#define ADF_DEFAULT_TIMER_PERIOD_MS 200
/* This periodic update is used to trigger HB, RL & TL fw events */
static void work_handler(struct work_struct *work)
@@ -27,16 +27,16 @@ static void work_handler(struct work_struct *work)
accel_dev = timer_ctx->accel_dev;
adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
- msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+ msecs_to_jiffies(ADF_DEFAULT_TIMER_PERIOD_MS));
time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime),
- ADF_GEN4_TIMER_PERIOD_MS);
+ ADF_DEFAULT_TIMER_PERIOD_MS);
if (adf_send_admin_tim_sync(accel_dev, time_periods))
dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n");
}
-int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
+int adf_timer_start(struct adf_accel_dev *accel_dev)
{
struct adf_timer *timer_ctx;
@@ -50,13 +50,13 @@ int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler);
adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
- msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+ msecs_to_jiffies(ADF_DEFAULT_TIMER_PERIOD_MS));
return 0;
}
-EXPORT_SYMBOL_GPL(adf_gen4_timer_start);
+EXPORT_SYMBOL_GPL(adf_timer_start);
-void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
+void adf_timer_stop(struct adf_accel_dev *accel_dev)
{
struct adf_timer *timer_ctx = accel_dev->timer;
@@ -68,4 +68,4 @@ void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
kfree(timer_ctx);
accel_dev->timer = NULL;
}
-EXPORT_SYMBOL_GPL(adf_gen4_timer_stop);
+EXPORT_SYMBOL_GPL(adf_timer_stop);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h b/drivers/crypto/intel/qat/qat_common/adf_timer.h
index 66a709e7b358..68e5136d6ba1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_timer.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright(c) 2023 Intel Corporation */
-#ifndef ADF_GEN4_TIMER_H_
-#define ADF_GEN4_TIMER_H_
+#ifndef ADF_TIMER_H_
+#define ADF_TIMER_H_
#include <linux/ktime.h>
#include <linux/workqueue.h>
@@ -15,7 +15,7 @@ struct adf_timer {
ktime_t initial_ktime;
};
-int adf_gen4_timer_start(struct adf_accel_dev *accel_dev);
-void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev);
+int adf_timer_start(struct adf_accel_dev *accel_dev);
+void adf_timer_stop(struct adf_accel_dev *accel_dev);
-#endif /* ADF_GEN4_TIMER_H_ */
+#endif /* ADF_TIMER_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
index 04f645957e28..81969c515a17 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
@@ -44,6 +44,7 @@ enum icp_qat_fw_comp_20_cmd_id {
#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MAX_VALUE 0xFFFFFFFF
#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
ret_uncomp, secure_ram) \
@@ -117,7 +118,7 @@ struct icp_qat_fw_comp_req_params {
#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr, \
cnvdfx, crc, xxhash_acc, \
cnv_error_type, append_crc, \
- drop_data) \
+ drop_data, partial_decomp) \
((((sop) & ICP_QAT_FW_COMP_SOP_MASK) << \
ICP_QAT_FW_COMP_SOP_BITPOS) | \
(((eop) & ICP_QAT_FW_COMP_EOP_MASK) << \
@@ -139,7 +140,9 @@ struct icp_qat_fw_comp_req_params {
(((append_crc) & ICP_QAT_FW_COMP_APPEND_CRC_MASK) \
<< ICP_QAT_FW_COMP_APPEND_CRC_BITPOS) | \
(((drop_data) & ICP_QAT_FW_COMP_DROP_DATA_MASK) \
- << ICP_QAT_FW_COMP_DROP_DATA_BITPOS))
+ << ICP_QAT_FW_COMP_DROP_DATA_BITPOS) | \
+ (((partial_decomp) & ICP_QAT_FW_COMP_PARTIAL_DECOMP_MASK) \
+ << ICP_QAT_FW_COMP_PARTIAL_DECOMP_BITPOS))
#define ICP_QAT_FW_COMP_NOT_SOP 0
#define ICP_QAT_FW_COMP_SOP 1
@@ -161,6 +164,8 @@ struct icp_qat_fw_comp_req_params {
#define ICP_QAT_FW_COMP_NO_APPEND_CRC 0
#define ICP_QAT_FW_COMP_DROP_DATA 1
#define ICP_QAT_FW_COMP_NO_DROP_DATA 0
+#define ICP_QAT_FW_COMP_PARTIAL_DECOMPRESS 1
+#define ICP_QAT_FW_COMP_NO_PARTIAL_DECOMPRESS 0
#define ICP_QAT_FW_COMP_SOP_BITPOS 0
#define ICP_QAT_FW_COMP_SOP_MASK 0x1
#define ICP_QAT_FW_COMP_EOP_BITPOS 1
@@ -189,6 +194,8 @@ struct icp_qat_fw_comp_req_params {
#define ICP_QAT_FW_COMP_APPEND_CRC_MASK 0x1
#define ICP_QAT_FW_COMP_DROP_DATA_BITPOS 25
#define ICP_QAT_FW_COMP_DROP_DATA_MASK 0x1
+#define ICP_QAT_FW_COMP_PARTIAL_DECOMP_BITPOS 27
+#define ICP_QAT_FW_COMP_PARTIAL_DECOMP_MASK 0x1
#define ICP_QAT_FW_COMP_SOP_GET(flags) \
QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SOP_BITPOS, \
@@ -281,8 +288,18 @@ struct icp_qat_fw_comp_req {
union {
struct icp_qat_fw_xlt_req_params xlt_pars;
__u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
+ struct {
+ __u32 partial_decompress_length;
+ __u32 partial_decompress_offset;
+ } partial_decompress;
} u1;
- __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ union {
+ __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ struct {
+ __u32 asb_value;
+ __u32 reserved;
+ } asb_threshold;
+ } u3;
struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
union {
struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
index 7eb5daef4f88..6887930c7995 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -35,6 +35,7 @@ struct icp_qat_fw_loader_chip_info {
u32 wakeup_event_val;
bool fw_auth;
bool css_3k;
+ bool dual_sign;
bool tgroup_share_ustore;
u32 fcu_ctl_csr;
u32 fcu_sts_csr;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h
new file mode 100644
index 000000000000..dce639152345
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ICP_QAT_HW_51_COMP_H_
+#define ICP_QAT_HW_51_COMP_H_
+
+#include <linux/types.h>
+
+#include "icp_qat_fw.h"
+#include "icp_qat_hw_51_comp_defs.h"
+
+struct icp_qat_hw_comp_51_config_csr_lower {
+ enum icp_qat_hw_comp_51_abd abd;
+ enum icp_qat_hw_comp_51_lllbd_ctrl lllbd;
+ enum icp_qat_hw_comp_51_search_depth sd;
+ enum icp_qat_hw_comp_51_min_match_control mmctrl;
+ enum icp_qat_hw_comp_51_lz4_block_checksum lbc;
+};
+
+static inline u32
+ICP_QAT_FW_COMP_51_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_51_config_csr_lower csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.abd,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_MASK);
+ QAT_FIELD_SET(val32, csr.lllbd,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_MASK);
+ QAT_FIELD_SET(val32, csr.sd,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_MASK);
+ QAT_FIELD_SET(val32, csr.mmctrl,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.lbc,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK);
+
+ return val32;
+}
+
+struct icp_qat_hw_comp_51_config_csr_upper {
+ enum icp_qat_hw_comp_51_dmm_algorithm edmm;
+ enum icp_qat_hw_comp_51_bms bms;
+ enum icp_qat_hw_comp_51_scb_mode_reset_mask scb_mode_reset;
+};
+
+static inline u32
+ICP_QAT_FW_COMP_51_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_51_config_csr_upper csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.edmm,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_MASK);
+ QAT_FIELD_SET(val32, csr.bms,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_MASK);
+ QAT_FIELD_SET(val32, csr.scb_mode_reset,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK);
+
+ return val32;
+}
+
+struct icp_qat_hw_decomp_51_config_csr_lower {
+ enum icp_qat_hw_decomp_51_lz4_block_checksum lbc;
+};
+
+static inline u32
+ICP_QAT_FW_DECOMP_51_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_51_config_csr_lower csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.lbc,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK);
+
+ return val32;
+}
+
+struct icp_qat_hw_decomp_51_config_csr_upper {
+ enum icp_qat_hw_decomp_51_bms bms;
+};
+
+static inline u32
+ICP_QAT_FW_DECOMP_51_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_51_config_csr_upper csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.bms,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_BITPOS,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_MASK);
+
+ return val32;
+}
+
+#endif /* ICP_QAT_HW_51_COMP_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h
new file mode 100644
index 000000000000..e745688c5da4
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ICP_QAT_HW_51_COMP_DEFS_H_
+#define ICP_QAT_HW_51_COMP_DEFS_H_
+
+#include <linux/bits.h>
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_BITPOS 28
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_MASK GENMASK(1, 0)
+enum icp_qat_hw_comp_51_som_control {
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_NORMAL_MODE = 0x0,
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_DICTIONARY_MODE = 0x1,
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_INPUT_CRC = 0x2,
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_RESERVED_MODE = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_NORMAL_MODE
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_rd_control {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_NO_SKIP
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_BITPOS 25
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_bypass_compression {
+ ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_DISABLED = 0x0,
+ ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_ENABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_DISABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_BITPOS 22
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_dmm_algorithm {
+ ICP_QAT_HW_COMP_51_DMM_ALGORITHM_EDMM_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_DMM_ALGORITHM_ZSTD_DMM_LITE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_DMM_ALGORITHM_EDMM_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_BITPOS 21
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_token_fusion_internal_only {
+ ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_BITPOS 19
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_MASK GENMASK(1, 0)
+enum icp_qat_hw_comp_51_bms {
+ ICP_QAT_HW_COMP_51_BMS_BMS_64KB = 0x0,
+ ICP_QAT_HW_COMP_51_BMS_BMS_256KB = 0x1,
+ ICP_QAT_HW_COMP_51_BMS_BMS_1MB = 0x2,
+ ICP_QAT_HW_COMP_51_BMS_BMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_BMS_BMS_64KB
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_scb_mode_reset_mask {
+ ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_DO_NOT_RESET_HB_HT = 0x0,
+ ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_RESET_HB_HT = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_DO_NOT_RESET_HB_HT
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_BITPOS 2
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_zstd_frame_gen_dec_en {
+ ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_DISABLE = 0x0,
+ ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_BITPOS 1
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_cnv_disable {
+ ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_BITPOS 0
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_asb_disable {
+ ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_BITPOS 21
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_spec_decoder_internal_only {
+ ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_BITPOS 20
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_mini_xcam_internal_only {
+ ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_BITPOS 19
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_rep_off_enc_internal_only {
+ ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_BITPOS 18
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_prog_block_drop_internal_only {
+ ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_DISABLE = 0x0,
+ ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_ENABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_DISABLE
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_BITPOS 17
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_override_internal_only {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DETERMINE_HASH_PARAMS = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_OVERRIDE_HASH_PARAMS = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DETERMINE_HASH_PARAMS
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_BITPOS 14
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_MASK GENMASK(2, 0)
+enum icp_qat_hw_comp_51_hbs {
+ ICP_QAT_HW_COMP_51_HBS_32KB = 0x0,
+ ICP_QAT_HW_COMP_51_HBS_64KB = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_HBS_32KB
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_BITPOS 13
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_abd {
+ ICP_QAT_HW_COMP_51_ABD_ABD_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_ABD_ABD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_ABD_ABD_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_BITPOS 12
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_lllbd_ctrl {
+ ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_MASK GENMASK(3, 0)
+enum icp_qat_hw_comp_51_search_depth {
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1 = 0x1,
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_6 = 0x3,
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_9 = 0x4,
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_10 = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_BITPOS 5
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_MASK GENMASK(2, 0)
+enum icp_qat_hw_comp_51_format {
+ ICP_QAT_HW_COMP_51_FORMAT_ILZ77 = 0x1,
+ ICP_QAT_HW_COMP_51_FORMAT_LZ4 = 0x2,
+ ICP_QAT_HW_COMP_51_FORMAT_LZ4s = 0x3,
+ ICP_QAT_HW_COMP_51_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_FORMAT_ILZ77
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_min_match_control {
+ ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
+ ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_3B
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_collision {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_ALLOW = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_ALLOW
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_update {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_ALLOW = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_ALLOW
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_BITPOS 1
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_byte_skip {
+ ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_TOKEN = 0x0,
+ ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_LITERAL = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_TOKEN
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS 0
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_lz4_block_checksum {
+ ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_ABSENT = 0x0,
+ ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_PRESENT = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_ABSENT
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_BITPOS 26
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_discard_data {
+ ICP_QAT_HW_DECOMP_51_DISCARD_DATA_DISABLED = 0x0,
+ ICP_QAT_HW_DECOMP_51_DISCARD_DATA_ENABLED = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_DISCARD_DATA_DISABLED
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_BITPOS 19
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_MASK GENMASK(1, 0)
+enum icp_qat_hw_decomp_51_bms {
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_64KB = 0x0,
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_256KB = 0x1,
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_1MB = 0x2,
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_64KB
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_BITPOS 2
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_zstd_frame_gen_dec_en {
+ ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_DISABLE = 0x0,
+ ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_BITPOS 21
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_spec_decoder_internal_only {
+ ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_BITPOS 20
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_mini_xcam_internal_only {
+ ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_BITPOS 14
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_MASK GENMASK(2, 0)
+enum icp_qat_hw_decomp_51_hbs {
+ ICP_QAT_HW_DECOMP_51_HBS_32KB = 0x0,
+ ICP_QAT_HW_DECOMP_51_HBS_64KB = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_HBS_32KB
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_BITPOS 5
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_MASK GENMASK(2, 0)
+enum icp_qat_hw_decomp_51_format {
+ ICP_QAT_HW_DECOMP_51_FORMAT_ILZ77 = 0x1,
+ ICP_QAT_HW_DECOMP_51_FORMAT_LZ4 = 0x2,
+ ICP_QAT_HW_DECOMP_51_FORMAT_RESERVED = 0x3,
+ ICP_QAT_HW_DECOMP_51_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_FORMAT_ILZ77
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS 0
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_lz4_block_checksum {
+ ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_ABSENT = 0x0,
+ ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_PRESENT = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_ABSENT
+
+#endif /* ICP_QAT_HW_51_COMP_DEFS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
index 1c7bcd8e4055..6313c35eff0c 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
@@ -7,6 +7,7 @@
#define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000
#define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000
#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000
+#define ICP_QAT_AC_6XXX_DEV_TYPE 0x80000000
#define ICP_QAT_UCLO_MAX_AE 17
#define ICP_QAT_UCLO_MAX_CTX 8
#define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
@@ -81,6 +82,21 @@
#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000
#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000
+/* All lengths below are in bytes */
+#define ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN 12
+#define ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN 16
+#define ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN 3540
+#define ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN 64
+#define ICP_QAT_DUALSIGN_XMSS_SIG_LEN 2692
+#define ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN 2696
+#define ICP_QAT_DUALSIGN_MISC_INFO_LEN 16
+#define ICP_QAT_DUALSIGN_FW_TYPE_LEN 7
+#define ICP_QAT_DUALSIGN_MODULE_TYPE 0x14
+#define ICP_QAT_DUALSIGN_HDR_LEN 0x375
+#define ICP_QAT_DUALSIGN_HDR_VER 0x40001
+#define ICP_QAT_DUALSIGN_HDR_LEN_OFFSET 4
+#define ICP_QAT_DUALSIGN_HDR_VER_OFFSET 8
+
#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
@@ -440,6 +456,13 @@ struct icp_qat_fw_auth_desc {
unsigned int img_ae_init_data_low;
unsigned int img_ae_insts_high;
unsigned int img_ae_insts_low;
+ unsigned int cpp_mask;
+ unsigned int reserved;
+ unsigned int xmss_pubkey_high;
+ unsigned int xmss_pubkey_low;
+ unsigned int xmss_sig_high;
+ unsigned int xmss_sig_low;
+ unsigned int reserved2[2];
};
struct icp_qat_auth_chunk {
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
index a6e02405d402..8b123472b71c 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
@@ -8,6 +8,7 @@
#include <linux/workqueue.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
+#include "adf_dc.h"
#include "qat_bl.h"
#include "qat_comp_req.h"
#include "qat_compression.h"
@@ -145,9 +146,7 @@ static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
return -EINVAL;
ctx->inst = inst;
- ctx->inst->build_deflate_ctx(ctx->comp_ctx);
-
- return 0;
+ return qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, QAT_DEFLATE);
}
static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
@@ -241,13 +240,13 @@ static struct acomp_alg qat_acomp[] = { {
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct qat_compression_ctx),
+ .cra_reqsize = sizeof(struct qat_compression_req),
.cra_module = THIS_MODULE,
},
.init = qat_comp_alg_init_tfm,
.exit = qat_comp_alg_exit_tfm,
.compress = qat_comp_alg_compress,
.decompress = qat_comp_alg_decompress,
- .reqsize = sizeof(struct qat_compression_req),
}};
int qat_comp_algs_register(void)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
index 7842a9f22178..c285b45b8679 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
@@ -144,7 +144,6 @@ static int qat_compression_create_instances(struct adf_accel_dev *accel_dev)
inst->id = i;
atomic_set(&inst->refctr, 0);
inst->accel_dev = accel_dev;
- inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.h b/drivers/crypto/intel/qat/qat_common/qat_compression.h
index aebac2302dcf..5ced3ed0e5ea 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.h
@@ -20,7 +20,6 @@ struct qat_compression_instance {
atomic_t refctr;
struct qat_instance_backlog backlog;
struct adf_dc_data *dc_data;
- void (*build_deflate_ctx)(void *ctx);
};
static inline bool adf_hw_dev_has_compression(struct adf_accel_dev *accel_dev)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index ef8a9cf74f0c..da4eca6e1633 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -694,16 +694,17 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->pci_dev = pci_info->pci_dev;
switch (handle->pci_dev->device) {
- case ADF_4XXX_PCI_DEVICE_ID:
- case ADF_401XX_PCI_DEVICE_ID:
- case ADF_402XX_PCI_DEVICE_ID:
- case ADF_420XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_4XXX:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
+ case PCI_DEVICE_ID_INTEL_QAT_420XX:
+ case PCI_DEVICE_ID_INTEL_QAT_6XXX:
handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
- if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID)
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_420XX)
handle->chip_info->icp_rst_mask = 0x100155;
else
handle->chip_info->icp_rst_mask = 0x100015;
@@ -712,6 +713,8 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->chip_info->wakeup_event_val = 0x80000000;
handle->chip_info->fw_auth = true;
handle->chip_info->css_3k = true;
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX)
+ handle->chip_info->dual_sign = true;
handle->chip_info->tgroup_share_ustore = true;
handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 7678a93c6853..21d652a1c8ef 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -1,11 +1,16 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
+
+#define pr_fmt(fmt) "QAT: " fmt
+
#include <linux/align.h>
+#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/pci_ids.h>
+#include <linux/wordpart.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "icp_qat_uclo.h"
@@ -59,7 +64,7 @@ static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
unsigned int i;
if (!ae_data) {
- pr_err("QAT: bad argument, ae_data is NULL\n");
+ pr_err("bad argument, ae_data is NULL\n");
return -EINVAL;
}
@@ -86,12 +91,11 @@ static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
int min = hdr->min_ver & 0xff;
if (hdr->file_id != ICP_QAT_UOF_FID) {
- pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
+ pr_err("Invalid header 0x%x\n", hdr->file_id);
return -EINVAL;
}
if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
- pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
- maj, min);
+ pr_err("bad UOF version, major 0x%x, minor 0x%x\n", maj, min);
return -EINVAL;
}
return 0;
@@ -103,20 +107,19 @@ static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
int min = suof_hdr->min_ver & 0xff;
if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
- pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
+ pr_err("invalid header 0x%x\n", suof_hdr->file_id);
return -EINVAL;
}
if (suof_hdr->fw_type != 0) {
- pr_err("QAT: unsupported firmware type\n");
+ pr_err("unsupported firmware type\n");
return -EINVAL;
}
if (suof_hdr->num_chunks <= 0x1) {
- pr_err("QAT: SUOF chunk amount is incorrect\n");
+ pr_err("SUOF chunk amount is incorrect\n");
return -EINVAL;
}
if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
- pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
- maj, min);
+ pr_err("bad SUOF version, major 0x%x, minor 0x%x\n", maj, min);
return -EINVAL;
}
return 0;
@@ -223,24 +226,24 @@ static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
char *str;
if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
- pr_err("QAT: initmem is out of range");
+ pr_err("initmem is out of range");
return -EINVAL;
}
if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
- pr_err("QAT: Memory scope for init_mem error\n");
+ pr_err("Memory scope for init_mem error\n");
return -EINVAL;
}
str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
if (!str) {
- pr_err("QAT: AE name assigned in UOF init table is NULL\n");
+ pr_err("AE name assigned in UOF init table is NULL\n");
return -EINVAL;
}
if (qat_uclo_parse_num(str, ae)) {
- pr_err("QAT: Parse num for AE number failed\n");
+ pr_err("Parse num for AE number failed\n");
return -EINVAL;
}
if (*ae >= ICP_QAT_UCLO_MAX_AE) {
- pr_err("QAT: ae %d out of range\n", *ae);
+ pr_err("ae %d out of range\n", *ae);
return -EINVAL;
}
return 0;
@@ -356,8 +359,7 @@ static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
return -EINVAL;
break;
default:
- pr_err("QAT: initmem region error. region type=0x%x\n",
- init_mem->region);
+ pr_err("initmem region error. region type=0x%x\n", init_mem->region);
return -EINVAL;
}
return 0;
@@ -431,7 +433,7 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
if (qat_hal_batch_wr_lm(handle, ae,
obj_handle->lm_init_tab[ae])) {
- pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
+ pr_err("fail to batch init lmem for AE %d\n", ae);
return -EINVAL;
}
qat_uclo_cleanup_batch_init_list(handle,
@@ -539,26 +541,26 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
code_page->imp_expr_tab_offset);
if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
imp_expr_tab->entry_num) {
- pr_err("QAT: UOF can't contain imported variable to be parsed\n");
+ pr_err("UOF can't contain imported variable to be parsed\n");
return -EINVAL;
}
neigh_reg_tab = (struct icp_qat_uof_objtable *)
(encap_uof_obj->beg_uof +
code_page->neigh_reg_tab_offset);
if (neigh_reg_tab->entry_num) {
- pr_err("QAT: UOF can't contain neighbor register table\n");
+ pr_err("UOF can't contain neighbor register table\n");
return -EINVAL;
}
if (image->numpages > 1) {
- pr_err("QAT: UOF can't contain multiple pages\n");
+ pr_err("UOF can't contain multiple pages\n");
return -EINVAL;
}
if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use shared control store feature\n");
+ pr_err("UOF can't use shared control store feature\n");
return -EFAULT;
}
if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use reloadable feature\n");
+ pr_err("UOF can't use reloadable feature\n");
return -EFAULT;
}
return 0;
@@ -677,7 +679,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
}
}
if (!mflag) {
- pr_err("QAT: uimage uses AE not set\n");
+ pr_err("uimage uses AE not set\n");
return -EINVAL;
}
return 0;
@@ -731,14 +733,15 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
return ICP_QAT_AC_C62X_DEV_TYPE;
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
return ICP_QAT_AC_C3XXX_DEV_TYPE;
- case ADF_4XXX_PCI_DEVICE_ID:
- case ADF_401XX_PCI_DEVICE_ID:
- case ADF_402XX_PCI_DEVICE_ID:
- case ADF_420XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_4XXX:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
+ case PCI_DEVICE_ID_INTEL_QAT_420XX:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
+ case PCI_DEVICE_ID_INTEL_QAT_6XXX:
+ return ICP_QAT_AC_6XXX_DEV_TYPE;
default:
- pr_err("QAT: unsupported device 0x%x\n",
- handle->pci_dev->device);
+ pr_err("unsupported device 0x%x\n", handle->pci_dev->device);
return 0;
}
}
@@ -748,7 +751,7 @@ static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
unsigned int maj_ver, prod_type = obj_handle->prod_type;
if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
- pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
+ pr_err("UOF type 0x%x doesn't match with platform 0x%x\n",
obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
prod_type);
return -EINVAL;
@@ -756,7 +759,7 @@ static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
maj_ver = obj_handle->prod_rev & 0xff;
if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
- pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
+ pr_err("UOF majVer 0x%x out of range\n", maj_ver);
return -EINVAL;
}
return 0;
@@ -799,7 +802,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_NEIGH_REL:
return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
default:
- pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
+ pr_err("UOF uses not supported reg type 0x%x\n", reg_type);
return -EFAULT;
}
return 0;
@@ -835,8 +838,7 @@ static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
case ICP_QAT_UOF_INIT_REG_CTX:
/* check if ctx is appropriate for the ctxMode */
if (!((1 << init_regsym->ctx) & ctx_mask)) {
- pr_err("QAT: invalid ctx num = 0x%x\n",
- init_regsym->ctx);
+ pr_err("invalid ctx num = 0x%x\n", init_regsym->ctx);
return -EINVAL;
}
qat_uclo_init_reg(handle, ae,
@@ -848,10 +850,10 @@ static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
exp_res);
break;
case ICP_QAT_UOF_INIT_EXPR:
- pr_err("QAT: INIT_EXPR feature not supported\n");
+ pr_err("INIT_EXPR feature not supported\n");
return -EINVAL;
case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
- pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
+ pr_err("INIT_EXPR_ENDIAN_SWAP feature not supported\n");
return -EINVAL;
default:
break;
@@ -871,7 +873,7 @@ static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
return 0;
if (obj_handle->init_mem_tab.entry_num) {
if (qat_uclo_init_memory(handle)) {
- pr_err("QAT: initialize memory failed\n");
+ pr_err("initialize memory failed\n");
return -EINVAL;
}
}
@@ -900,40 +902,40 @@ static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
+ pr_err("qat_hal_set_ae_ctx_mode error\n");
return ret;
}
if (handle->chip_info->nn) {
mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
+ pr_err("qat_hal_set_ae_nn_mode error\n");
return ret;
}
}
mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM0 error\n");
return ret;
}
mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM1 error\n");
return ret;
}
if (handle->chip_info->lm2lm3) {
mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM2 error\n");
return ret;
}
mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM3 error\n");
return ret;
}
mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
@@ -997,7 +999,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
obj_handle->prod_rev = PID_MAJOR_REV |
(PID_MINOR_REV & handle->hal_handle->revision_id);
if (qat_uclo_check_uof_compat(obj_handle)) {
- pr_err("QAT: UOF incompatible\n");
+ pr_err("UOF incompatible\n");
return -EINVAL;
}
obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
@@ -1008,7 +1010,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
if (!obj_handle->obj_hdr->file_buff ||
!qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
&obj_handle->str_table)) {
- pr_err("QAT: UOF doesn't have effective images\n");
+ pr_err("UOF doesn't have effective images\n");
goto out_err;
}
obj_handle->uimage_num =
@@ -1017,7 +1019,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
if (!obj_handle->uimage_num)
goto out_err;
if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
- pr_err("QAT: Bad object\n");
+ pr_err("Bad object\n");
goto out_check_uof_aemask_err;
}
qat_uclo_init_uword_num(handle);
@@ -1034,6 +1036,36 @@ out_err:
return -EFAULT;
}
+static unsigned int qat_uclo_simg_hdr2sign_len(struct icp_qat_fw_loader_handle *handle)
+{
+ if (handle->chip_info->dual_sign)
+ return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN;
+
+ return ICP_QAT_AE_IMG_OFFSET(handle);
+}
+
+static unsigned int qat_uclo_simg_hdr2cont_len(struct icp_qat_fw_loader_handle *handle)
+{
+ if (handle->chip_info->dual_sign)
+ return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN + ICP_QAT_DUALSIGN_MISC_INFO_LEN;
+
+ return ICP_QAT_AE_IMG_OFFSET(handle);
+}
+
+static unsigned int qat_uclo_simg_fw_type(struct icp_qat_fw_loader_handle *handle, void *img_ptr)
+{
+ struct icp_qat_css_hdr *hdr = img_ptr;
+ char *fw_hdr = img_ptr;
+ unsigned int offset;
+
+ if (handle->chip_info->dual_sign) {
+ offset = qat_uclo_simg_hdr2sign_len(handle) + ICP_QAT_DUALSIGN_FW_TYPE_LEN;
+ return *(fw_hdr + offset);
+ }
+
+ return hdr->fw_type;
+}
+
static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_filehdr *suof_ptr,
int suof_size)
@@ -1050,7 +1082,7 @@ static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
min_ver_offset);
if (check_sum != suof_ptr->check_sum) {
- pr_err("QAT: incorrect SUOF checksum\n");
+ pr_err("incorrect SUOF checksum\n");
return -EINVAL;
}
suof_handle->check_sum = suof_ptr->check_sum;
@@ -1065,9 +1097,9 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
{
struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
- unsigned int offset = ICP_QAT_AE_IMG_OFFSET(handle);
- struct icp_qat_simg_ae_mode *ae_mode;
+ unsigned int offset = qat_uclo_simg_hdr2cont_len(handle);
struct icp_qat_suof_objhdr *suof_objhdr;
+ struct icp_qat_simg_ae_mode *ae_mode;
suof_img_hdr->simg_buf = (suof_handle->suof_buf +
suof_chunk_hdr->offset +
@@ -1112,14 +1144,13 @@ static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
prod_rev = PID_MAJOR_REV |
(PID_MINOR_REV & handle->hal_handle->revision_id);
if (img_ae_mode->dev_type != prod_type) {
- pr_err("QAT: incompatible product type %x\n",
- img_ae_mode->dev_type);
+ pr_err("incompatible product type %x\n", img_ae_mode->dev_type);
return -EINVAL;
}
maj_ver = prod_rev & 0xff;
if (maj_ver > img_ae_mode->devmax_ver ||
maj_ver < img_ae_mode->devmin_ver) {
- pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
+ pr_err("incompatible device majver 0x%x\n", maj_ver);
return -EINVAL;
}
return 0;
@@ -1162,7 +1193,7 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_img_hdr img_header;
if (!suof_ptr || suof_size == 0) {
- pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
+ pr_err("input parameter SUOF pointer/size is NULL\n");
return -EINVAL;
}
if (qat_uclo_check_suof_format(suof_ptr))
@@ -1205,7 +1236,6 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
}
#define ADD_ADDR(high, low) ((((u64)high) << 32) + low)
-#define BITS_IN_DWORD 32
static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
@@ -1223,7 +1253,7 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
- SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
+ SET_CAP_CSR(handle, fcu_dram_hi_csr, bus_addr >> BITS_PER_TYPE(u32));
SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
@@ -1237,7 +1267,7 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
return 0;
} while (retry++ < FW_AUTH_MAX_RETRY);
auth_fail:
- pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
+ pr_err("authentication error (FCU_STATUS = 0x%x),retry = %d\n",
fcu_sts & FCU_AUTH_STS_MASK, retry);
return -EINVAL;
}
@@ -1273,14 +1303,13 @@ static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
fcu_sts_csr = handle->chip_info->fcu_sts_csr;
fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
} else {
- pr_err("Chip 0x%x doesn't support broadcast load\n",
- handle->pci_dev->device);
+ pr_err("Chip 0x%x doesn't support broadcast load\n", handle->pci_dev->device);
return -EINVAL;
}
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
- pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
+ pr_err("Broadcast load failed. AE is not enabled or active.\n");
return -EINVAL;
}
@@ -1312,7 +1341,7 @@ static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
} while (retry++ < FW_AUTH_MAX_RETRY);
if (retry > FW_AUTH_MAX_RETRY) {
- pr_err("QAT: broadcast load failed timeout %d\n", retry);
+ pr_err("broadcast load failed timeout %d\n", retry);
return -EINVAL;
}
}
@@ -1366,24 +1395,38 @@ static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
}
static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
- char *image, unsigned int size,
+ void *image, unsigned int size,
unsigned int fw_type)
{
char *fw_type_name = fw_type ? "MMP" : "AE";
unsigned int css_dword_size = sizeof(u32);
+ unsigned int header_len, simg_type;
+ struct icp_qat_css_hdr *css_hdr;
if (handle->chip_info->fw_auth) {
- struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
- unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
+ header_len = qat_uclo_simg_hdr2sign_len(handle);
+ simg_type = qat_uclo_simg_fw_type(handle, image);
+ css_hdr = image;
+
+ if (handle->chip_info->dual_sign) {
+ if (css_hdr->module_type != ICP_QAT_DUALSIGN_MODULE_TYPE)
+ goto err;
+ if (css_hdr->header_len != ICP_QAT_DUALSIGN_HDR_LEN)
+ goto err;
+ if (css_hdr->header_ver != ICP_QAT_DUALSIGN_HDR_VER)
+ goto err;
+ } else {
+ if (css_hdr->header_len * css_dword_size != header_len)
+ goto err;
+ if (css_hdr->size * css_dword_size != size)
+ goto err;
+ if (size <= header_len)
+ goto err;
+ }
- if ((css_hdr->header_len * css_dword_size) != header_len)
- goto err;
- if ((css_hdr->size * css_dword_size) != size)
- goto err;
- if (fw_type != css_hdr->fw_type)
- goto err;
- if (size <= header_len)
+ if (fw_type != simg_type)
goto err;
+
size -= header_len;
}
@@ -1397,123 +1440,95 @@ static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
goto err;
} else {
- pr_err("QAT: Unsupported firmware type\n");
+ pr_err("Unsupported firmware type\n");
return -EINVAL;
}
return 0;
err:
- pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
+ pr_err("Invalid %s firmware image\n", fw_type_name);
return -EINVAL;
}
-static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
- char *image, unsigned int size,
- struct icp_qat_fw_auth_desc **desc)
+static int qat_uclo_build_auth_desc_RSA(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ struct icp_firml_dram_desc *dram_desc,
+ unsigned int fw_type, struct icp_qat_fw_auth_desc **desc)
{
struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
- struct icp_qat_fw_auth_desc *auth_desc;
- struct icp_qat_auth_chunk *auth_chunk;
- u64 virt_addr, bus_addr, virt_base;
- unsigned int simg_offset = sizeof(*auth_chunk);
struct icp_qat_simg_ae_mode *simg_ae_mode;
- struct icp_firml_dram_desc img_desc;
- int ret;
-
- ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN);
- if (ret) {
- pr_err("QAT: error, allocate continuous dram fail\n");
- return ret;
- }
-
- if (!IS_ALIGNED(img_desc.dram_size, 8) || !img_desc.dram_bus_addr) {
- pr_debug("QAT: invalid address\n");
- qat_uclo_simg_free(handle, &img_desc);
- return -EINVAL;
- }
+ struct icp_qat_fw_auth_desc *auth_desc;
+ char *virt_addr, *virt_base;
+ u64 bus_addr;
- auth_chunk = img_desc.dram_base_addr_v;
- auth_chunk->chunk_size = img_desc.dram_size;
- auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
- virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
- bus_addr = img_desc.dram_bus_addr + simg_offset;
- auth_desc = img_desc.dram_base_addr_v;
- auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->css_hdr_low = (unsigned int)bus_addr;
+ virt_base = dram_desc->dram_base_addr_v;
+ virt_base += sizeof(struct icp_qat_auth_chunk);
+ bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk);
+ auth_desc = dram_desc->dram_base_addr_v;
+ auth_desc->css_hdr_high = upper_32_bits(bus_addr);
+ auth_desc->css_hdr_low = lower_32_bits(bus_addr);
virt_addr = virt_base;
- memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
+ memcpy(virt_addr, image, sizeof(*css_hdr));
/* pub key */
bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
sizeof(*css_hdr);
virt_addr = virt_addr + sizeof(*css_hdr);
- auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
+ auth_desc->fwsk_pub_high = upper_32_bits(bus_addr);
+ auth_desc->fwsk_pub_low = lower_32_bits(bus_addr);
- memcpy((void *)(uintptr_t)virt_addr,
- (void *)(image + sizeof(*css_hdr)),
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
+ memcpy(virt_addr, image + sizeof(*css_hdr), ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
/* padding */
memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
/* exponent */
- memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
- ICP_QAT_CSS_FWSK_PAD_LEN(handle)),
- (void *)(image + sizeof(*css_hdr) +
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
- sizeof(unsigned int));
+ memcpy(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+ ICP_QAT_CSS_FWSK_PAD_LEN(handle), image + sizeof(*css_hdr) +
+ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle), sizeof(unsigned int));
/* signature */
bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
auth_desc->fwsk_pub_low) +
ICP_QAT_CSS_FWSK_PUB_LEN(handle);
virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
- auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->signature_low = (unsigned int)bus_addr;
+ auth_desc->signature_high = upper_32_bits(bus_addr);
+ auth_desc->signature_low = lower_32_bits(bus_addr);
- memcpy((void *)(uintptr_t)virt_addr,
- (void *)(image + sizeof(*css_hdr) +
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
- ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)),
- ICP_QAT_CSS_SIGNATURE_LEN(handle));
+ memcpy(virt_addr, image + sizeof(*css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+ ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle), ICP_QAT_CSS_SIGNATURE_LEN(handle));
bus_addr = ADD_ADDR(auth_desc->signature_high,
auth_desc->signature_low) +
ICP_QAT_CSS_SIGNATURE_LEN(handle);
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
- auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->img_low = (unsigned int)bus_addr;
- auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
- if (bus_addr + auth_desc->img_len > img_desc.dram_bus_addr +
- ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) {
- pr_err("QAT: insufficient memory size for authentication data\n");
- qat_uclo_simg_free(handle, &img_desc);
+ auth_desc->img_high = upper_32_bits(bus_addr);
+ auth_desc->img_low = lower_32_bits(bus_addr);
+ auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle);
+ if (bus_addr + auth_desc->img_len >
+ dram_desc->dram_bus_addr + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) {
+ pr_err("insufficient memory size for authentication data\n");
+ qat_uclo_simg_free(handle, dram_desc);
return -ENOMEM;
}
- memcpy((void *)(uintptr_t)virt_addr,
- (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
- auth_desc->img_len);
+ memcpy(virt_addr, image + qat_uclo_simg_hdr2sign_len(handle), auth_desc->img_len);
virt_addr = virt_base;
/* AE firmware */
- if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
- CSS_AE_FIRMWARE) {
+ if (fw_type == CSS_AE_FIRMWARE) {
auth_desc->img_ae_mode_data_high = auth_desc->img_high;
auth_desc->img_ae_mode_data_low = auth_desc->img_low;
bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
auth_desc->img_ae_mode_data_low) +
sizeof(struct icp_qat_simg_ae_mode);
- auth_desc->img_ae_init_data_high = (unsigned int)
- (bus_addr >> BITS_IN_DWORD);
- auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
+ auth_desc->img_ae_init_data_high = upper_32_bits(bus_addr);
+ auth_desc->img_ae_init_data_low = lower_32_bits(bus_addr);
bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
- auth_desc->img_ae_insts_high = (unsigned int)
- (bus_addr >> BITS_IN_DWORD);
- auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
+ auth_desc->img_ae_insts_high = upper_32_bits(bus_addr);
+ auth_desc->img_ae_insts_low = lower_32_bits(bus_addr);
virt_addr += sizeof(struct icp_qat_css_hdr);
virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
@@ -1527,6 +1542,141 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
return 0;
}
+static int qat_uclo_build_auth_desc_dualsign(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ struct icp_firml_dram_desc *dram_desc,
+ unsigned int fw_type,
+ struct icp_qat_fw_auth_desc **desc)
+{
+ struct icp_qat_simg_ae_mode *simg_ae_mode;
+ struct icp_qat_fw_auth_desc *auth_desc;
+ unsigned int chunk_offset, img_offset;
+ u64 bus_addr, addr;
+ char *virt_addr;
+
+ virt_addr = dram_desc->dram_base_addr_v;
+ virt_addr += sizeof(struct icp_qat_auth_chunk);
+ bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk);
+
+ auth_desc = dram_desc->dram_base_addr_v;
+ auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle);
+ auth_desc->css_hdr_high = upper_32_bits(bus_addr);
+ auth_desc->css_hdr_low = lower_32_bits(bus_addr);
+ memcpy(virt_addr, image, ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN);
+
+ img_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN;
+ chunk_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN;
+
+ /* RSA pub key */
+ addr = bus_addr + chunk_offset;
+ auth_desc->fwsk_pub_high = upper_32_bits(addr);
+ auth_desc->fwsk_pub_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
+
+ img_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle);
+ chunk_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle);
+ /* RSA padding */
+ memset(virt_addr + chunk_offset, 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
+
+ chunk_offset += ICP_QAT_CSS_FWSK_PAD_LEN(handle);
+ /* RSA exponent */
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle));
+
+ img_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
+ chunk_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
+ /* RSA signature */
+ addr = bus_addr + chunk_offset;
+ auth_desc->signature_high = upper_32_bits(addr);
+ auth_desc->signature_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_SIGNATURE_LEN(handle));
+
+ img_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+ chunk_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+ /* XMSS pubkey */
+ addr = bus_addr + chunk_offset;
+ auth_desc->xmss_pubkey_high = upper_32_bits(addr);
+ auth_desc->xmss_pubkey_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN);
+
+ img_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN;
+ chunk_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN;
+ /* XMSS signature */
+ addr = bus_addr + chunk_offset;
+ auth_desc->xmss_sig_high = upper_32_bits(addr);
+ auth_desc->xmss_sig_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_SIG_LEN);
+
+ img_offset += ICP_QAT_DUALSIGN_XMSS_SIG_LEN;
+ chunk_offset += ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN;
+
+ if (dram_desc->dram_size < (chunk_offset + auth_desc->img_len)) {
+ pr_err("auth chunk memory size is not enough to store data\n");
+ return -ENOMEM;
+ }
+
+ /* Signed data */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_high = upper_32_bits(addr);
+ auth_desc->img_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, auth_desc->img_len);
+
+ chunk_offset += ICP_QAT_DUALSIGN_MISC_INFO_LEN;
+ /* AE firmware */
+ if (fw_type == CSS_AE_FIRMWARE) {
+ /* AE mode data */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_mode_data_high = upper_32_bits(addr);
+ auth_desc->img_ae_mode_data_low = lower_32_bits(addr);
+ simg_ae_mode =
+ (struct icp_qat_simg_ae_mode *)(virt_addr + chunk_offset);
+ auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
+
+ chunk_offset += sizeof(struct icp_qat_simg_ae_mode);
+ /* AE init seq */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_init_data_high = upper_32_bits(addr);
+ auth_desc->img_ae_init_data_low = lower_32_bits(addr);
+
+ chunk_offset += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
+ /* AE instructions */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_insts_high = upper_32_bits(addr);
+ auth_desc->img_ae_insts_low = lower_32_bits(addr);
+ } else {
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_insts_high = upper_32_bits(addr);
+ auth_desc->img_ae_insts_low = lower_32_bits(addr);
+ }
+ *desc = auth_desc;
+ return 0;
+}
+
+static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ struct icp_qat_fw_auth_desc **desc)
+{
+ struct icp_qat_auth_chunk *auth_chunk;
+ struct icp_firml_dram_desc img_desc;
+ unsigned int simg_fw_type;
+ int ret;
+
+ ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN);
+ if (ret)
+ return ret;
+
+ simg_fw_type = qat_uclo_simg_fw_type(handle, image);
+ auth_chunk = img_desc.dram_base_addr_v;
+ auth_chunk->chunk_size = img_desc.dram_size;
+ auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
+
+ if (handle->chip_info->dual_sign)
+ return qat_uclo_build_auth_desc_dualsign(handle, image, size, &img_desc,
+ simg_fw_type, desc);
+
+ return qat_uclo_build_auth_desc_RSA(handle, image, size, &img_desc,
+ simg_fw_type, desc);
+}
+
static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
{
@@ -1546,7 +1696,7 @@ static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
if (!((desc->ae_mask >> i) & 0x1))
continue;
if (qat_hal_check_ae_active(handle, i)) {
- pr_err("QAT: AE %d is active\n", i);
+ pr_err("AE %d is active\n", i);
return -EINVAL;
}
SET_CAP_CSR(handle, fcu_ctl_csr,
@@ -1566,7 +1716,7 @@ static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
}
} while (retry++ < FW_AUTH_MAX_RETRY);
if (retry > FW_AUTH_MAX_RETRY) {
- pr_err("QAT: firmware load failed timeout %x\n", retry);
+ pr_err("firmware load failed timeout %x\n", retry);
return -EINVAL;
}
}
@@ -1584,7 +1734,7 @@ static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
handle->sobj_handle = suof_handle;
if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
qat_uclo_del_suof(handle);
- pr_err("QAT: map SUOF failed\n");
+ pr_err("map SUOF failed\n");
return -EINVAL;
}
return 0;
@@ -1608,7 +1758,7 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
qat_uclo_ummap_auth_fw(handle, &desc);
} else {
if (handle->chip_info->mmp_sram_size < mem_size) {
- pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
+ pr_err("MMP size is too large: 0x%x\n", mem_size);
return -EFBIG;
}
qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
@@ -1634,7 +1784,7 @@ static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
ICP_QAT_UOF_OBJS);
if (!objhdl->obj_hdr) {
- pr_err("QAT: object file chunk is null\n");
+ pr_err("object file chunk is null\n");
goto out_objhdr_err;
}
handle->obj_handle = objhdl;
@@ -1669,7 +1819,7 @@ static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
min_ver_offset);
if (checksum != mof_ptr->checksum) {
- pr_err("QAT: incorrect MOF checksum\n");
+ pr_err("incorrect MOF checksum\n");
return -EINVAL;
}
@@ -1705,7 +1855,7 @@ static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
}
}
- pr_err("QAT: object %s is not found inside MOF\n", obj_name);
+ pr_err("object %s is not found inside MOF\n", obj_name);
return -EINVAL;
}
@@ -1722,7 +1872,7 @@ static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
} else {
- pr_err("QAT: unsupported chunk id\n");
+ pr_err("unsupported chunk id\n");
return -EINVAL;
}
mobj_hdr->obj_buf = obj;
@@ -1783,7 +1933,7 @@ static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
}
if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
- pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
+ pr_err("inconsistent UOF/SUOF chunk amount\n");
return -EINVAL;
}
return 0;
@@ -1824,17 +1974,16 @@ static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
int min = mof_hdr->min_ver & 0xff;
if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
- pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
+ pr_err("invalid header 0x%x\n", mof_hdr->file_id);
return -EINVAL;
}
if (mof_hdr->num_chunks <= 0x1) {
- pr_err("QAT: MOF chunk amount is incorrect\n");
+ pr_err("MOF chunk amount is incorrect\n");
return -EINVAL;
}
if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
- pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
- maj, min);
+ pr_err("bad MOF version, major 0x%x, minor 0x%x\n", maj, min);
return -EINVAL;
}
return 0;
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
index 5bf5c890c362..1427fe76f171 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
qat_dh895xcc-y := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index e48bcf1818cd..5b4bd0ba1ccb 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -4,7 +4,6 @@
#include <adf_admin.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -24,7 +23,6 @@ static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
static struct adf_hw_device_class dh895xcc_class = {
.name = ADF_DH895XCC_DEVICE_NAME,
.type = DEV_DH895XCC,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index 07e9d7e52861..b59e0cc49e52 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_dh895xcc_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_DH895XCC_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_DH895XCC_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
index 93f9c81edf09..c2fdb6e0f68f 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
qat_dh895xccvf-y := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index f4ee4c2e00da..828456c43b76 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -3,7 +3,6 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -13,7 +12,6 @@
static struct adf_hw_device_class dh895xcciov_class = {
.name = ADF_DH895XCCVF_DEVICE_NAME,
.type = DEV_DH895XCCVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index fa08f10e6f3f..9c21f5d835d2 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -94,7 +94,7 @@ static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
{
- if (engine->chain.first && engine->chain.last)
+ if (engine->chain_hw.first && engine->chain_hw.last)
return mv_cesa_tdma_process(engine, status);
return mv_cesa_std_process(engine, status);
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index d215a6bed6bc..50ca1039fdaa 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -440,8 +440,10 @@ struct mv_cesa_dev {
* SRAM
* @queue: fifo of the pending crypto requests
* @load: engine load counter, useful for load balancing
- * @chain: list of the current tdma descriptors being processed
- * by this engine.
+ * @chain_hw: list of the current tdma descriptors being processed
+ * by the hardware.
+ * @chain_sw: list of the current tdma descriptors that will be
+ * submitted to the hardware.
* @complete_queue: fifo of the processed requests by the engine
*
* Structure storing CESA engine information.
@@ -463,7 +465,8 @@ struct mv_cesa_engine {
struct gen_pool *pool;
struct crypto_queue queue;
atomic_t load;
- struct mv_cesa_tdma_chain chain;
+ struct mv_cesa_tdma_chain chain_hw;
+ struct mv_cesa_tdma_chain chain_sw;
struct list_head complete_queue;
int irq;
};
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index cf62db50f958..48c5c8ea8c43 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -459,6 +459,9 @@ static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_engine *engine;
+ if (!req->cryptlen)
+ return 0;
+
ret = mv_cesa_skcipher_req_init(req, tmpl);
if (ret)
return ret;
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index f150861ceaf6..6815eddc9068 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -663,7 +663,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (ret)
goto err_free_tdma;
- if (iter.src.sg) {
+ if (iter.base.len > iter.src.op_offset) {
/*
* Add all the new data, inserting an operation block and
* launch command between each full SRAM block-worth of
diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index 388a06e180d6..243305354420 100644
--- a/drivers/crypto/marvell/cesa/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -38,6 +38,15 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
{
struct mv_cesa_engine *engine = dreq->engine;
+ spin_lock_bh(&engine->lock);
+ if (engine->chain_sw.first == dreq->chain.first) {
+ engine->chain_sw.first = NULL;
+ engine->chain_sw.last = NULL;
+ }
+ engine->chain_hw.first = dreq->chain.first;
+ engine->chain_hw.last = dreq->chain.last;
+ spin_unlock_bh(&engine->lock);
+
writel_relaxed(0, engine->regs + CESA_SA_CFG);
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
@@ -96,25 +105,27 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
struct mv_cesa_req *dreq)
{
- if (engine->chain.first == NULL && engine->chain.last == NULL) {
- engine->chain.first = dreq->chain.first;
- engine->chain.last = dreq->chain.last;
- } else {
- struct mv_cesa_tdma_desc *last;
+ struct mv_cesa_tdma_desc *last = engine->chain_sw.last;
- last = engine->chain.last;
+ /*
+ * Break the DMA chain if the request being queued needs the IV
+ * regs to be set before lauching the request.
+ */
+ if (!last || dreq->chain.first->flags & CESA_TDMA_SET_STATE)
+ engine->chain_sw.first = dreq->chain.first;
+ else {
last->next = dreq->chain.first;
- engine->chain.last = dreq->chain.last;
-
- /*
- * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
- * the last element of the current chain, or if the request
- * being queued needs the IV regs to be set before lauching
- * the request.
- */
- if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
- !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
- last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
+ last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
+ }
+ last = dreq->chain.last;
+ engine->chain_sw.last = last;
+ /*
+ * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+ * the last element of the current chain.
+ */
+ if (last->flags & CESA_TDMA_BREAK_CHAIN) {
+ engine->chain_sw.first = NULL;
+ engine->chain_sw.last = NULL;
}
}
@@ -127,7 +138,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
- for (tdma = engine->chain.first; tdma; tdma = next) {
+ for (tdma = engine->chain_hw.first; tdma; tdma = next) {
spin_lock_bh(&engine->lock);
next = tdma->next;
spin_unlock_bh(&engine->lock);
@@ -149,12 +160,12 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
&backlog);
/* Re-chaining to the next request */
- engine->chain.first = tdma->next;
+ engine->chain_hw.first = tdma->next;
tdma->next = NULL;
/* If this is the last request, clear the chain */
- if (engine->chain.first == NULL)
- engine->chain.last = NULL;
+ if (engine->chain_hw.first == NULL)
+ engine->chain_hw.last = NULL;
spin_unlock_bh(&engine->lock);
ctx = crypto_tfm_ctx(req->tfm);
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
index 5cae8fafa151..d4aab9e20f2a 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -6,6 +6,7 @@
#include "otx2_cptvf.h"
#include "otx2_cptlf.h"
#include "cn10k_cpt.h"
+#include "otx2_cpt_common.h"
static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
struct otx2_cptlf_info *lf);
@@ -27,7 +28,7 @@ static struct cpt_hw_ops cn10k_hw_ops = {
static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
struct otx2_cptlf_info *lf)
{
- void __iomem *lmtline = lf->lmtline;
+ void *lmtline = lf->lfs->lmt_info.base + (lf->slot * LMTLINE_SIZE);
u64 val = (lf->slot & 0x7FF);
u64 tar_addr = 0;
@@ -41,15 +42,49 @@ static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
dma_wmb();
/* Copy CPT command to LMTLINE */
- memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
+ memcpy(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
cn10k_lmt_flush(val, tar_addr);
}
+void cn10k_cpt_lmtst_free(struct pci_dev *pdev, struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_lmt_info *lmt_info = &lfs->lmt_info;
+
+ if (!lmt_info->base)
+ return;
+
+ dma_free_attrs(&pdev->dev, lmt_info->size,
+ lmt_info->base - lmt_info->align,
+ lmt_info->iova - lmt_info->align,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+}
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_lmtst_free, "CRYPTO_DEV_OCTEONTX2_CPT");
+
+static int cn10k_cpt_lmtst_alloc(struct pci_dev *pdev,
+ struct otx2_cptlfs_info *lfs, u32 size)
+{
+ struct otx2_lmt_info *lmt_info = &lfs->lmt_info;
+ dma_addr_t align_iova;
+ dma_addr_t iova;
+
+ lmt_info->base = dma_alloc_attrs(&pdev->dev, size, &iova, GFP_KERNEL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+ if (!lmt_info->base)
+ return -ENOMEM;
+
+ align_iova = ALIGN((u64)iova, LMTLINE_ALIGN);
+ lmt_info->iova = align_iova;
+ lmt_info->align = align_iova - iova;
+ lmt_info->size = size;
+ lmt_info->base += lmt_info->align;
+ return 0;
+}
+
int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
{
struct pci_dev *pdev = cptpf->pdev;
- resource_size_t size;
- u64 lmt_base;
+ u32 size;
+ int ret;
if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) {
cptpf->lfs.ops = &otx2_hw_ops;
@@ -57,18 +92,19 @@ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
}
cptpf->lfs.ops = &cn10k_hw_ops;
- lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR);
- if (!lmt_base) {
- dev_err(&pdev->dev, "PF LMTLINE address not configured\n");
- return -ENOMEM;
+ size = OTX2_CPT_MAX_VFS_NUM * LMTLINE_SIZE + LMTLINE_ALIGN;
+ ret = cn10k_cpt_lmtst_alloc(pdev, &cptpf->lfs, size);
+ if (ret) {
+ dev_err(&pdev->dev, "PF-%d LMTLINE memory allocation failed\n",
+ cptpf->pf_id);
+ return ret;
}
- size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
- size -= ((1 + cptpf->max_vfs) * MBOX_SIZE);
- cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size);
- if (!cptpf->lfs.lmt_base) {
- dev_err(&pdev->dev,
- "Mapping of PF LMTLINE address failed\n");
- return -ENOMEM;
+
+ ret = otx2_cpt_lmtst_tbl_setup_msg(&cptpf->lfs);
+ if (ret) {
+ dev_err(&pdev->dev, "PF-%d: LMTST Table setup failed\n",
+ cptpf->pf_id);
+ cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
}
return 0;
@@ -78,18 +114,25 @@ EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, "CRYPTO_DEV_OCTEONTX2_CPT");
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
{
struct pci_dev *pdev = cptvf->pdev;
- resource_size_t offset, size;
+ u32 size;
+ int ret;
if (!test_bit(CN10K_LMTST, &cptvf->cap_flag))
return 0;
- offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
- size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
- /* Map VF LMILINE region */
- cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size);
- if (!cptvf->lfs.lmt_base) {
- dev_err(&pdev->dev, "Unable to map BAR4\n");
- return -ENOMEM;
+ size = cptvf->lfs.lfs_num * LMTLINE_SIZE + LMTLINE_ALIGN;
+ ret = cn10k_cpt_lmtst_alloc(pdev, &cptvf->lfs, size);
+ if (ret) {
+ dev_err(&pdev->dev, "VF-%d LMTLINE memory allocation failed\n",
+ cptvf->vf_id);
+ return ret;
+ }
+
+ ret = otx2_cpt_lmtst_tbl_setup_msg(&cptvf->lfs);
+ if (ret) {
+ dev_err(&pdev->dev, "VF-%d: LMTST Table setup failed\n",
+ cptvf->vf_id);
+ cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
}
return 0;
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
index 92be3ecf570f..ea5990048c21 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
@@ -50,6 +50,7 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf);
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf);
+void cn10k_cpt_lmtst_free(struct pci_dev *pdev, struct otx2_cptlfs_info *lfs);
void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval);
int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
struct cn10k_cpt_errata_ctx *er_ctx);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index c5b7c57574ef..d529bcb03775 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -145,11 +145,8 @@ static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot,
static inline bool is_dev_otx2(struct pci_dev *pdev)
{
- if (pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID ||
- pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID)
- return true;
-
- return false;
+ return pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID ||
+ pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID;
}
static inline bool is_dev_cn10ka(struct pci_dev *pdev)
@@ -159,12 +156,10 @@ static inline bool is_dev_cn10ka(struct pci_dev *pdev)
static inline bool is_dev_cn10ka_ax(struct pci_dev *pdev)
{
- if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
- ((pdev->revision & 0xFF) == 4 || (pdev->revision & 0xFF) == 0x50 ||
- (pdev->revision & 0xff) == 0x51))
- return true;
-
- return false;
+ return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
+ ((pdev->revision & 0xFF) == 4 ||
+ (pdev->revision & 0xFF) == 0x50 ||
+ (pdev->revision & 0xFF) == 0x51);
}
static inline bool is_dev_cn10kb(struct pci_dev *pdev)
@@ -174,11 +169,8 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev)
static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
{
- if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
- (pdev->revision & 0xFF) == 0x54)
- return true;
-
- return false;
+ return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
+ (pdev->revision & 0xFF) == 0x54;
}
static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
@@ -192,18 +184,12 @@ static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
static inline bool cpt_is_errata_38550_exists(struct pci_dev *pdev)
{
- if (is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev))
- return true;
-
- return false;
+ return is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev);
}
static inline bool cpt_feature_sgv2(struct pci_dev *pdev)
{
- if (!is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev))
- return true;
-
- return false;
+ return !is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev);
}
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
@@ -223,5 +209,6 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox);
int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot);
+int otx2_cpt_lmtst_tbl_setup_msg(struct otx2_cptlfs_info *lfs);
#endif /* __OTX2_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index b8b7c8a3c0ca..95f3de3a34eb 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -255,3 +255,28 @@ int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot)
return ret;
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
+
+int otx2_cpt_lmtst_tbl_setup_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct pci_dev *pdev = lfs->pdev;
+ struct lmtst_tbl_setup_req *req;
+
+ req = (struct lmtst_tbl_setup_req *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ dev_err(&pdev->dev, "RVU MBOX failed to alloc message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_LMTST_TBL_SETUP;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+
+ req->use_local_lmt_region = true;
+ req->lmt_iova = lfs->lmt_info.iova;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_lmtst_tbl_setup_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index b5d66afcc030..dc7c7a2650a5 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -433,10 +433,7 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
for (slot = 0; slot < lfs->lfs_num; slot++) {
lfs->lf[slot].lfs = lfs;
lfs->lf[slot].slot = slot;
- if (lfs->lmt_base)
- lfs->lf[slot].lmtline = lfs->lmt_base +
- (slot * LMTLINE_SIZE);
- else
+ if (!lfs->lmt_info.base)
lfs->lf[slot].lmtline = lfs->reg_base +
OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
OTX2_CPT_LMT_LF_LMTLINEX(0));
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index bd8604be2952..6e004a5568d8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -105,11 +105,19 @@ struct cpt_hw_ops {
gfp_t gfp);
};
+#define LMTLINE_SIZE 128
+#define LMTLINE_ALIGN 128
+struct otx2_lmt_info {
+ void *base;
+ dma_addr_t iova;
+ u32 size;
+ u8 align;
+};
+
struct otx2_cptlfs_info {
/* Registers start address of VF/PF LFs are attached to */
void __iomem *reg_base;
-#define LMTLINE_SIZE 128
- void __iomem *lmt_base;
+ struct otx2_lmt_info lmt_info;
struct pci_dev *pdev; /* Device LFs are attached to */
struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM];
struct otx2_mbox *mbox;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 12971300296d..1c5c262af48d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -639,6 +639,12 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
/* Disable all cores */
ret = otx2_cpt_disable_all_cores(cptpf);
+ otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
+ &cptpf->afpf_mbox, BLKADDR_CPT0);
+ if (cptpf->has_cpt1)
+ otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
+ cptpf->reg_base, &cptpf->afpf_mbox,
+ BLKADDR_CPT1);
return ret;
}
@@ -786,19 +792,19 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
cptpf->kvf_limits = 1;
- err = cn10k_cptpf_lmtst_init(cptpf);
+ /* Initialize CPT PF device */
+ err = cptpf_device_init(cptpf);
if (err)
goto unregister_intr;
- /* Initialize CPT PF device */
- err = cptpf_device_init(cptpf);
+ err = cn10k_cptpf_lmtst_init(cptpf);
if (err)
goto unregister_intr;
/* Initialize engine groups */
err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
if (err)
- goto unregister_intr;
+ goto free_lmtst;
err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
if (err)
@@ -814,6 +820,8 @@ sysfs_grp_del:
sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
cleanup_eng_grps:
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
+free_lmtst:
+ cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
unregister_intr:
cptpf_disable_afpf_mbox_intr(cptpf);
destroy_afpf_mbox:
@@ -848,6 +856,8 @@ static void otx2_cptpf_remove(struct pci_dev *pdev)
cptpf_disable_afpf_mbox_intr(cptpf);
/* Destroy AF-PF mbox */
cptpf_afpf_mbox_destroy(cptpf);
+ /* Free LMTST memory */
+ cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index ec1ac7e836a3..12c0e966fa65 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -264,8 +264,6 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
return -ENOENT;
}
- otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
- &cptpf->afpf_mbox, BLKADDR_CPT0);
cptpf->lfs.global_slot = 0;
cptpf->lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
cptpf->lfs.ctx_ilen = cfg_req->ctx_ilen;
@@ -278,9 +276,6 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
if (cptpf->has_cpt1) {
cptpf->rsrc_req_blkaddr = BLKADDR_CPT1;
- otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
- cptpf->reg_base, &cptpf->afpf_mbox,
- BLKADDR_CPT1);
cptpf->cpt1_lfs.global_slot = num_lfs;
cptpf->cpt1_lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
cptpf->cpt1_lfs.ctx_ilen = cfg_req->ctx_ilen;
@@ -507,6 +502,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
case MBOX_MSG_CPT_LF_RESET:
+ case MBOX_MSG_LMTST_TBL_SETUP:
break;
default:
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 42c5484ce66a..78367849c3d5 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -1513,8 +1513,6 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
if (ret)
goto delete_grps;
- otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base,
- &cptpf->afpf_mbox, BLKADDR_CPT0);
ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
OTX2_CPT_QUEUE_HI_PRIO, 1);
if (ret)
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index d84eebdf2fa8..56904bdfd6e8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -283,8 +283,6 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
lfs_num = cptvf->lfs.kvf_limits;
- otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base,
- &cptvf->pfvf_mbox, cptvf->blkaddr);
ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
lfs_num);
if (ret)
@@ -378,10 +376,6 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
- ret = cn10k_cptvf_lmtst_init(cptvf);
- if (ret)
- goto clear_drvdata;
-
/* Initialize PF<=>VF mailbox */
ret = cptvf_pfvf_mbox_init(cptvf);
if (ret)
@@ -396,6 +390,9 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
cptvf_hw_ops_get(cptvf);
+ otx2_cptlf_set_dev_info(&cptvf->lfs, cptvf->pdev, cptvf->reg_base,
+ &cptvf->pfvf_mbox, cptvf->blkaddr);
+
ret = otx2_cptvf_send_caps_msg(cptvf);
if (ret) {
dev_err(&pdev->dev, "Couldn't get CPT engine capabilities.\n");
@@ -404,13 +401,19 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
if (cptvf->eng_caps[OTX2_CPT_SE_TYPES] & BIT_ULL(35))
cptvf->lfs.ops->cpt_sg_info_create = cn10k_sgv2_info_create;
+ ret = cn10k_cptvf_lmtst_init(cptvf);
+ if (ret)
+ goto unregister_interrupts;
+
/* Initialize CPT LFs */
ret = cptvf_lf_init(cptvf);
if (ret)
- goto unregister_interrupts;
+ goto free_lmtst;
return 0;
+free_lmtst:
+ cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
unregister_interrupts:
cptvf_disable_pfvf_mbox_intrs(cptvf);
destroy_pfvf_mbox:
@@ -434,6 +437,8 @@ static void otx2_cptvf_remove(struct pci_dev *pdev)
cptvf_disable_pfvf_mbox_intrs(cptvf);
/* Destroy PF-VF mbox */
cptvf_pfvf_mbox_destroy(cptvf);
+ /* Free LMTST memory */
+ cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index d9fa5f6e204d..931b72580fd9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -134,6 +134,7 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
sizeof(cptvf->eng_caps));
break;
case MBOX_MSG_CPT_LF_RESET:
+ case MBOX_MSG_LMTST_TBL_SETUP:
break;
default:
dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n",
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index 0e440f704a8f..35fa5bad1d9f 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -8,10 +8,12 @@
*/
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index dfa3ad1a12f2..709b3ee74657 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -9,10 +9,12 @@
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 502a565074e9..4039cf3b22d4 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -8,10 +8,12 @@
*/
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index eb5c8f689360..bf465d824e2c 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -7,13 +7,14 @@
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
-#include <crypto/internal/hash.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <asm/vio.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -21,8 +22,6 @@
struct xcbc_state {
u8 state[AES_BLOCK_SIZE];
- unsigned int count;
- u8 buffer[AES_BLOCK_SIZE];
};
static int nx_xcbc_set_key(struct crypto_shash *desc,
@@ -58,7 +57,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
*/
static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
u8 keys[2][AES_BLOCK_SIZE];
@@ -135,9 +134,9 @@ out:
return rc;
}
-static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
int err;
@@ -166,31 +165,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg;
struct nx_sg *out_sg;
- u32 to_process = 0, leftover, total;
unsigned int max_sg_len;
unsigned long irq_flags;
+ u32 to_process, total;
int rc = 0;
int data_len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+ memcpy(csbcpb->cpb.aes_xcbc.out_cv_mac, sctx->state, AES_BLOCK_SIZE);
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- total = sctx->count + len;
-
- /* 2 cases for total data len:
- * 1: <= AES_BLOCK_SIZE: copy into state, return 0
- * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
- */
- if (total <= AES_BLOCK_SIZE) {
- memcpy(sctx->buffer + sctx->count, data, len);
- sctx->count += len;
- goto out;
- }
+ total = len;
in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
@@ -200,7 +192,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
data_len = AES_BLOCK_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, nx_ctx->ap->sglen);
+ &data_len, nx_ctx->ap->sglen);
if (data_len != AES_BLOCK_SIZE) {
rc = -EINVAL;
@@ -210,56 +202,21 @@ static int nx_xcbc_update(struct shash_desc *desc,
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
do {
- to_process = total - to_process;
- to_process = to_process & ~(AES_BLOCK_SIZE - 1);
-
- leftover = total - to_process;
-
- /* the hardware will not accept a 0 byte operation for this
- * algorithm and the operation MUST be finalized to be correct.
- * So if we happen to get an update that falls on a block sized
- * boundary, we must save off the last block to finalize with
- * later. */
- if (!leftover) {
- to_process -= AES_BLOCK_SIZE;
- leftover = AES_BLOCK_SIZE;
- }
-
- if (sctx->count) {
- data_len = sctx->count;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
- (u8 *) sctx->buffer,
- &data_len,
- max_sg_len);
- if (data_len != sctx->count) {
- rc = -EINVAL;
- goto out;
- }
- }
+ to_process = total & ~(AES_BLOCK_SIZE - 1);
- data_len = to_process - sctx->count;
in_sg = nx_build_sg_list(in_sg,
(u8 *) data,
- &data_len,
+ &to_process,
max_sg_len);
- if (data_len != to_process - sctx->count) {
- rc = -EINVAL;
- goto out;
- }
-
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
/* we've hit the nx chip previously and we're updating again,
* so copy over the partial digest */
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- memcpy(csbcpb->cpb.aes_xcbc.cv,
- csbcpb->cpb.aes_xcbc.out_cv_mac,
- AES_BLOCK_SIZE);
- }
+ memcpy(csbcpb->cpb.aes_xcbc.cv,
+ csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
@@ -271,28 +228,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
atomic_inc(&(nx_ctx->stats->aes_ops));
- /* everything after the first update is continuation */
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
-
total -= to_process;
- data += to_process - sctx->count;
- sctx->count = 0;
+ data += to_process;
in_sg = nx_ctx->in_sg;
- } while (leftover > AES_BLOCK_SIZE);
+ } while (total >= AES_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- memcpy(sctx->buffer, data, leftover);
- sctx->count = leftover;
+ rc = total;
+ memcpy(sctx->state, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
+static int nx_xcbc_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags;
@@ -301,12 +254,10 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.aes_xcbc.cv,
- csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
- } else if (sctx->count == 0) {
+ if (nbytes) {
+ /* non-zero final, so copy over the partial digest */
+ memcpy(csbcpb->cpb.aes_xcbc.cv, sctx->state, AES_BLOCK_SIZE);
+ } else {
/*
* we've never seen an update, so this is a 0 byte op. The
* hardware cannot handle a 0 byte op, so just ECB to
@@ -320,11 +271,11 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
* this is not an intermediate operation */
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- len = sctx->count;
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
- &len, nx_ctx->ap->sglen);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len,
+ nx_ctx->ap->sglen);
- if (len != sctx->count) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -362,18 +313,19 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.digestsize = AES_BLOCK_SIZE,
.init = nx_xcbc_init,
.update = nx_xcbc_update,
- .final = nx_xcbc_final,
+ .finup = nx_xcbc_finup,
.setkey = nx_xcbc_set_key,
.descsize = sizeof(struct xcbc_state),
- .statesize = sizeof(struct xcbc_state),
+ .init_tfm = nx_crypto_ctx_aes_xcbc_init2,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.base = {
.cra_name = "xcbc(aes)",
.cra_driver_name = "xcbc-aes-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_aes_xcbc_init2,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index c3bebf0feabe..5b29dd026df2 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -9,9 +9,12 @@
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/vio.h>
-#include <asm/byteorder.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -19,12 +22,11 @@
struct sha256_state_be {
__be32 state[SHA256_DIGEST_SIZE / 4];
u64 count;
- u8 buf[SHA256_BLOCK_SIZE];
};
-static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_sha256_init(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
int err;
err = nx_crypto_ctx_sha_init(tfm);
@@ -40,11 +42,10 @@ static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
return 0;
}
-static int nx_sha256_init(struct shash_desc *desc) {
+static int nx_sha256_init(struct shash_desc *desc)
+{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- memset(sctx, 0, sizeof *sctx);
-
sctx->state[0] = __cpu_to_be32(SHA256_H0);
sctx->state[1] = __cpu_to_be32(SHA256_H1);
sctx->state[2] = __cpu_to_be32(SHA256_H2);
@@ -61,30 +62,18 @@ static int nx_sha256_init(struct shash_desc *desc) {
static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ u64 to_process, leftover, total = len;
struct nx_sg *out_sg;
- u64 to_process = 0, leftover, total;
unsigned long irq_flags;
int rc = 0;
int data_len;
u32 max_sg_len;
- u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- /* 2 cases for total data len:
- * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
- * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
- */
- total = (sctx->count % SHA256_BLOCK_SIZE) + len;
- if (total < SHA256_BLOCK_SIZE) {
- memcpy(sctx->buf + buf_len, data, len);
- sctx->count += len;
- goto out;
- }
-
memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
@@ -105,41 +94,17 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
}
do {
- int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;
- if (buf_len) {
- data_len = buf_len;
- in_sg = nx_build_sg_list(in_sg,
- (u8 *) sctx->buf,
- &data_len,
- max_sg_len);
-
- if (data_len != buf_len) {
- rc = -EINVAL;
- goto out;
- }
- used_sgs = in_sg - nx_ctx->in_sg;
- }
+ to_process = total & ~(SHA256_BLOCK_SIZE - 1);
- /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
- * processed in this iteration. This value is restricted
- * by sg list limits and number of sgs we already used
- * for leftover data. (see above)
- * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
- * but because data may not be aligned, we need to account
- * for that too. */
- to_process = min_t(u64, total,
- (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
- to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
-
- data_len = to_process - buf_len;
+ data_len = to_process;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- to_process = data_len + buf_len;
+ to_process = data_len;
leftover = total - to_process;
/*
@@ -162,26 +127,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
atomic_inc(&(nx_ctx->stats->sha256_ops));
total -= to_process;
- data += to_process - buf_len;
- buf_len = 0;
-
+ data += to_process;
+ sctx->count += to_process;
} while (leftover >= SHA256_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- if (leftover)
- memcpy(sctx->buf, data, leftover);
-
- sctx->count += len;
+ rc = leftover;
memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_sha256_final(struct shash_desc *desc, u8 *out)
+static int nx_sha256_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags;
@@ -197,25 +158,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
- if (sctx->count >= SHA256_BLOCK_SIZE) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- } else {
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
- }
+ * this is not an intermediate operation
+ * copy over the partial digest */
+ memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ sctx->count += nbytes;
csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
- len = sctx->count & (SHA256_BLOCK_SIZE - 1);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
- &len, max_sg_len);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len);
- if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -251,18 +206,34 @@ out:
static int nx_sha256_export(struct shash_desc *desc, void *out)
{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ u8 *u8;
+ u32 *u32;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
- memcpy(out, sctx, sizeof(*sctx));
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++)
+ put_unaligned(be32_to_cpu(sctx->state[i]), p.u32++);
+ put_unaligned(sctx->count, p.u64++);
return 0;
}
static int nx_sha256_import(struct shash_desc *desc, const void *in)
{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ const u8 *u8;
+ const u32 *u32;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
- memcpy(sctx, in, sizeof(*sctx));
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++)
+ sctx->state[i] = cpu_to_be32(get_unaligned(p.u32++));
+ sctx->count = get_unaligned(p.u64++);
return 0;
}
@@ -270,19 +241,20 @@ struct shash_alg nx_shash_sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
.init = nx_sha256_init,
.update = nx_sha256_update,
- .final = nx_sha256_final,
+ .finup = nx_sha256_finup,
.export = nx_sha256_export,
.import = nx_sha256_import,
+ .init_tfm = nx_crypto_ctx_sha256_init,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.descsize = sizeof(struct sha256_state_be),
.statesize = sizeof(struct sha256_state_be),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha256_init,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 1ffb40d2c324..f74776b7d7d7 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -9,8 +9,12 @@
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/vio.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -18,12 +22,11 @@
struct sha512_state_be {
__be64 state[SHA512_DIGEST_SIZE / 8];
u64 count[2];
- u8 buf[SHA512_BLOCK_SIZE];
};
-static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_sha512_init(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
int err;
err = nx_crypto_ctx_sha_init(tfm);
@@ -43,8 +46,6 @@ static int nx_sha512_init(struct shash_desc *desc)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- memset(sctx, 0, sizeof *sctx);
-
sctx->state[0] = __cpu_to_be64(SHA512_H0);
sctx->state[1] = __cpu_to_be64(SHA512_H1);
sctx->state[2] = __cpu_to_be64(SHA512_H2);
@@ -54,6 +55,7 @@ static int nx_sha512_init(struct shash_desc *desc)
sctx->state[6] = __cpu_to_be64(SHA512_H6);
sctx->state[7] = __cpu_to_be64(SHA512_H7);
sctx->count[0] = 0;
+ sctx->count[1] = 0;
return 0;
}
@@ -61,30 +63,18 @@ static int nx_sha512_init(struct shash_desc *desc)
static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ u64 to_process, leftover, total = len;
struct nx_sg *out_sg;
- u64 to_process, leftover = 0, total;
unsigned long irq_flags;
int rc = 0;
int data_len;
u32 max_sg_len;
- u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- /* 2 cases for total data len:
- * 1: < SHA512_BLOCK_SIZE: copy into state, return 0
- * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
- */
- total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
- if (total < SHA512_BLOCK_SIZE) {
- memcpy(sctx->buf + buf_len, data, len);
- sctx->count[0] += len;
- goto out;
- }
-
memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
@@ -105,45 +95,17 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
}
do {
- int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;
- if (buf_len) {
- data_len = buf_len;
- in_sg = nx_build_sg_list(in_sg,
- (u8 *) sctx->buf,
- &data_len, max_sg_len);
-
- if (data_len != buf_len) {
- rc = -EINVAL;
- goto out;
- }
- used_sgs = in_sg - nx_ctx->in_sg;
- }
+ to_process = total & ~(SHA512_BLOCK_SIZE - 1);
- /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
- * processed in this iteration. This value is restricted
- * by sg list limits and number of sgs we already used
- * for leftover data. (see above)
- * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
- * but because data may not be aligned, we need to account
- * for that too. */
- to_process = min_t(u64, total,
- (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
- to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
-
- data_len = to_process - buf_len;
+ data_len = to_process;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- if (data_len != (to_process - buf_len)) {
- rc = -EINVAL;
- goto out;
- }
-
- to_process = data_len + buf_len;
+ to_process = data_len;
leftover = total - to_process;
/*
@@ -166,30 +128,29 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
atomic_inc(&(nx_ctx->stats->sha512_ops));
total -= to_process;
- data += to_process - buf_len;
- buf_len = 0;
-
+ data += to_process;
+ sctx->count[0] += to_process;
+ if (sctx->count[0] < to_process)
+ sctx->count[1]++;
} while (leftover >= SHA512_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- if (leftover)
- memcpy(sctx->buf, data, leftover);
- sctx->count[0] += len;
+ rc = leftover;
memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_sha512_final(struct shash_desc *desc, u8 *out)
+static int nx_sha512_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
u32 max_sg_len;
- u64 count0;
unsigned long irq_flags;
+ u64 count0, count1;
int rc = 0;
int len;
@@ -201,30 +162,23 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
- if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
- SHA512_DIGEST_SIZE);
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- } else {
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
- }
-
+ * this is not an intermediate operation
+ * copy over the partial digest */
+ memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, SHA512_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- count0 = sctx->count[0] * 8;
+ count0 = sctx->count[0] + nbytes;
+ count1 = sctx->count[1];
- csbcpb->cpb.sha512.message_bit_length_lo = count0;
+ csbcpb->cpb.sha512.message_bit_length_lo = count0 << 3;
+ csbcpb->cpb.sha512.message_bit_length_hi = (count1 << 3) |
+ (count0 >> 61);
- len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
- max_sg_len);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len);
- if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -246,7 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
- atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
+ atomic64_add(count0, &(nx_ctx->stats->sha512_bytes));
memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
@@ -257,18 +211,34 @@ out:
static int nx_sha512_export(struct shash_desc *desc, void *out)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ u8 *u8;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
- memcpy(out, sctx, sizeof(*sctx));
+ for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++)
+ put_unaligned(be64_to_cpu(sctx->state[i]), p.u64++);
+ put_unaligned(sctx->count[0], p.u64++);
+ put_unaligned(sctx->count[1], p.u64++);
return 0;
}
static int nx_sha512_import(struct shash_desc *desc, const void *in)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ const u8 *u8;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
- memcpy(sctx, in, sizeof(*sctx));
+ for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++)
+ sctx->state[i] = cpu_to_be64(get_unaligned(p.u64++));
+ sctx->count[0] = get_unaligned(p.u64++);
+ sctx->count[1] = get_unaligned(p.u64++);
return 0;
}
@@ -276,19 +246,20 @@ struct shash_alg nx_shash_sha512_alg = {
.digestsize = SHA512_DIGEST_SIZE,
.init = nx_sha512_init,
.update = nx_sha512_update,
- .final = nx_sha512_final,
+ .finup = nx_sha512_finup,
.export = nx_sha512_export,
.import = nx_sha512_import,
+ .init_tfm = nx_crypto_ctx_sha512_init,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.descsize = sizeof(struct sha512_state_be),
.statesize = sizeof(struct sha512_state_be),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha512_init,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index a3b979193d9b..78135fb13f5c 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -7,11 +7,11 @@
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
+#include <crypto/aes.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
-#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/sha2.h>
-#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -124,8 +124,6 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
}
if ((sg - sg_head) == sgmax) {
- pr_err("nx: scatter/gather list overflow, pid: %d\n",
- current->pid);
sg++;
break;
}
@@ -702,14 +700,14 @@ int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm)
NX_MODE_AES_ECB);
}
-int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_sha_init(struct crypto_shash *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
+ return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
}
-int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_AES,
NX_MODE_AES_XCBC_MAC);
}
@@ -744,6 +742,11 @@ void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
kfree_sensitive(nx_ctx->kmem);
}
+void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm)
+{
+ nx_crypto_ctx_exit(crypto_shash_ctx(tfm));
+}
+
static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
{
dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index e1b4b6927bec..36974f08490a 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -3,7 +3,11 @@
#ifndef __NX_H__
#define __NX_H__
+#include <asm/vio.h>
#include <crypto/ctr.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#define NX_NAME "nx-crypto"
#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
@@ -139,19 +143,20 @@ struct nx_crypto_ctx {
} priv;
};
-struct crypto_aead;
+struct scatterlist;
/* prototypes */
int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
-int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm);
int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm);
-int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_sha_init(struct crypto_shash *tfm);
void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm);
void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm);
+void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 551dd32a8db0..1ecf5f6ac04e 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1086,10 +1086,7 @@ static struct attribute *omap_aes_attrs[] = {
&dev_attr_fallback.attr,
NULL,
};
-
-static const struct attribute_group omap_aes_attr_group = {
- .attrs = omap_aes_attrs,
-};
+ATTRIBUTE_GROUPS(omap_aes);
static int omap_aes_probe(struct platform_device *pdev)
{
@@ -1215,12 +1212,6 @@ static int omap_aes_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group);
- if (err) {
- dev_err(dev, "could not create sysfs device attrs\n");
- goto err_aead_algs;
- }
-
return 0;
err_aead_algs:
for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
@@ -1277,8 +1268,6 @@ static void omap_aes_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
-
- sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group);
}
#ifdef CONFIG_PM_SLEEP
@@ -1304,6 +1293,7 @@ static struct platform_driver omap_aes_driver = {
.name = "omap-aes",
.pm = &omap_aes_pm_ops,
.of_match_table = omap_aes_of_match,
+ .dev_groups = omap_aes_groups,
},
};
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 7021481bf027..56f192cb976d 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2039,10 +2039,7 @@ static struct attribute *omap_sham_attrs[] = {
&dev_attr_fallback.attr,
NULL,
};
-
-static const struct attribute_group omap_sham_attr_group = {
- .attrs = omap_sham_attrs,
-};
+ATTRIBUTE_GROUPS(omap_sham);
static int omap_sham_probe(struct platform_device *pdev)
{
@@ -2158,12 +2155,6 @@ static int omap_sham_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
- if (err) {
- dev_err(dev, "could not create sysfs device attrs\n");
- goto err_algs;
- }
-
return 0;
err_algs:
@@ -2210,8 +2201,6 @@ static void omap_sham_remove(struct platform_device *pdev)
if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
-
- sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
}
static struct platform_driver omap_sham_driver = {
@@ -2220,6 +2209,7 @@ static struct platform_driver omap_sham_driver = {
.driver = {
.name = "omap-sham",
.of_match_table = omap_sham_of_match,
+ .dev_groups = omap_sham_groups,
},
};
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index db9e84c0c9fb..329f60ad422e 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -7,59 +7,89 @@
* Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
*/
+#include <asm/cpu_device_id.h>
#include <crypto/internal/hash.h>
#include <crypto/padlock.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
+#include <linux/cpufeature.h>
#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-#include <asm/cpu_device_id.h>
-#include <asm/fpu/api.h>
+#include <linux/module.h>
-struct padlock_sha_desc {
- struct shash_desc fallback;
-};
+#define PADLOCK_SHA_DESCSIZE (128 + ((PADLOCK_ALIGNMENT - 1) & \
+ ~(CRYPTO_MINALIGN - 1)))
struct padlock_sha_ctx {
- struct crypto_shash *fallback;
+ struct crypto_ahash *fallback;
};
-static int padlock_sha_init(struct shash_desc *desc)
+static inline void *padlock_shash_desc_ctx(struct shash_desc *desc)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ return PTR_ALIGN(shash_desc_ctx(desc), PADLOCK_ALIGNMENT);
+}
+
+static int padlock_sha1_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = padlock_shash_desc_ctx(desc);
+
+ *sctx = (struct sha1_state){
+ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+ };
+
+ return 0;
+}
+
+static int padlock_sha256_init(struct shash_desc *desc)
+{
+ struct crypto_sha256_state *sctx = padlock_shash_desc_ctx(desc);
- dctx->fallback.tfm = ctx->fallback;
- return crypto_shash_init(&dctx->fallback);
+ sha256_block_init(sctx);
+ return 0;
}
static int padlock_sha_update(struct shash_desc *desc,
const u8 *data, unsigned int length)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ u8 *state = padlock_shash_desc_ctx(desc);
+ struct crypto_shash *tfm = desc->tfm;
+ int err, remain;
+
+ remain = length - round_down(length, crypto_shash_blocksize(tfm));
+ {
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(tfm);
+ HASH_REQUEST_ON_STACK(req, ctx->fallback);
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_virt(req, data, NULL, length - remain);
+ err = crypto_ahash_import_core(req, state) ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export_core(req, state);
+ HASH_REQUEST_ZERO(req);
+ }
- return crypto_shash_update(&dctx->fallback, data, length);
+ return err ?: remain;
}
static int padlock_sha_export(struct shash_desc *desc, void *out)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_export(&dctx->fallback, out);
+ memcpy(out, padlock_shash_desc_ctx(desc),
+ crypto_shash_coresize(desc->tfm));
+ return 0;
}
static int padlock_sha_import(struct shash_desc *desc, const void *in)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ unsigned int bs = crypto_shash_blocksize(desc->tfm);
+ unsigned int ss = crypto_shash_coresize(desc->tfm);
+ u64 *state = padlock_shash_desc_ctx(desc);
+
+ memcpy(state, in, ss);
+
+ /* Stop evil imports from generating a fault. */
+ state[ss / 8 - 1] &= ~(bs - 1);
- dctx->fallback.tfm = ctx->fallback;
- return crypto_shash_import(&dctx->fallback, in);
+ return 0;
}
static inline void padlock_output_block(uint32_t *src,
@@ -69,65 +99,38 @@ static inline void padlock_output_block(uint32_t *src,
*dst++ = swab32(*src++);
}
+static int padlock_sha_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int count, u8 *out)
+{
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ HASH_REQUEST_ON_STACK(req, ctx->fallback);
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_virt(req, in, out, count);
+ return crypto_ahash_import_core(req, padlock_shash_desc_ctx(desc)) ?:
+ crypto_ahash_finup(req);
+}
+
static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
unsigned int count, u8 *out)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct sha1_state state;
- unsigned int space;
- unsigned int leftover;
- int err;
-
- err = crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
+ struct sha1_state *state = padlock_shash_desc_ctx(desc);
+ u64 start = state->count;
- if (state.count + count > ULONG_MAX)
- return crypto_shash_finup(&dctx->fallback, in, count, out);
-
- leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
- space = SHA1_BLOCK_SIZE - leftover;
- if (space) {
- if (count > space) {
- err = crypto_shash_update(&dctx->fallback, in, space) ?:
- crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
- count -= space;
- in += space;
- } else {
- memcpy(state.buffer + leftover, in, count);
- in = state.buffer;
- count += leftover;
- state.count &= ~(SHA1_BLOCK_SIZE - 1);
- }
- }
-
- memcpy(result, &state.state, SHA1_DIGEST_SIZE);
+ if (start + count > ULONG_MAX)
+ return padlock_sha_finup(desc, in, count, out);
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: \
- : "c"((unsigned long)state.count + count), \
- "a"((unsigned long)state.count), \
- "S"(in), "D"(result));
-
- padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
+ : "c"((unsigned long)start + count), \
+ "a"((unsigned long)start), \
+ "S"(in), "D"(state));
-out:
- return err;
-}
-
-static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
-{
- const u8 *buf = (void *)desc;
-
- return padlock_sha1_finup(desc, buf, 0, out);
+ padlock_output_block(state->state, (uint32_t *)out, 5);
+ return 0;
}
static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
@@ -136,78 +139,46 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct sha256_state state;
- unsigned int space;
- unsigned int leftover;
- int err;
-
- err = crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
+ struct sha256_state *state = padlock_shash_desc_ctx(desc);
+ u64 start = state->count;
- if (state.count + count > ULONG_MAX)
- return crypto_shash_finup(&dctx->fallback, in, count, out);
-
- leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
- space = SHA256_BLOCK_SIZE - leftover;
- if (space) {
- if (count > space) {
- err = crypto_shash_update(&dctx->fallback, in, space) ?:
- crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
- count -= space;
- in += space;
- } else {
- memcpy(state.buf + leftover, in, count);
- in = state.buf;
- count += leftover;
- state.count &= ~(SHA1_BLOCK_SIZE - 1);
- }
- }
-
- memcpy(result, &state.state, SHA256_DIGEST_SIZE);
+ if (start + count > ULONG_MAX)
+ return padlock_sha_finup(desc, in, count, out);
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: \
- : "c"((unsigned long)state.count + count), \
- "a"((unsigned long)state.count), \
- "S"(in), "D"(result));
+ : "c"((unsigned long)start + count), \
+ "a"((unsigned long)start), \
+ "S"(in), "D"(state));
- padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
-
-out:
- return err;
-}
-
-static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
-{
- const u8 *buf = (void *)desc;
-
- return padlock_sha256_finup(desc, buf, 0, out);
+ padlock_output_block(state->state, (uint32_t *)out, 8);
+ return 0;
}
static int padlock_init_tfm(struct crypto_shash *hash)
{
const char *fallback_driver_name = crypto_shash_alg_name(hash);
struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
- struct crypto_shash *fallback_tfm;
+ struct crypto_ahash *fallback_tfm;
/* Allocate a fallback and abort if it failed. */
- fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
return PTR_ERR(fallback_tfm);
}
+ if (crypto_shash_statesize(hash) !=
+ crypto_ahash_statesize(fallback_tfm)) {
+ crypto_free_ahash(fallback_tfm);
+ return -EINVAL;
+ }
+
ctx->fallback = fallback_tfm;
- hash->descsize += crypto_shash_descsize(fallback_tfm);
+
return 0;
}
@@ -215,26 +186,27 @@ static void padlock_exit_tfm(struct crypto_shash *hash)
{
struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
- crypto_free_shash(ctx->fallback);
+ crypto_free_ahash(ctx->fallback);
}
static struct shash_alg sha1_alg = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = padlock_sha_init,
+ .init = padlock_sha1_init,
.update = padlock_sha_update,
.finup = padlock_sha1_finup,
- .final = padlock_sha1_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
.init_tfm = padlock_init_tfm,
.exit_tfm = padlock_exit_tfm,
- .descsize = sizeof(struct padlock_sha_desc),
- .statesize = sizeof(struct sha1_state),
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -243,21 +215,22 @@ static struct shash_alg sha1_alg = {
static struct shash_alg sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
- .init = padlock_sha_init,
+ .init = padlock_sha256_init,
.update = padlock_sha_update,
.finup = padlock_sha256_finup,
- .final = padlock_sha256_final,
+ .init_tfm = padlock_init_tfm,
.export = padlock_sha_export,
.import = padlock_sha_import,
- .init_tfm = padlock_init_tfm,
.exit_tfm = padlock_exit_tfm,
- .descsize = sizeof(struct padlock_sha_desc),
- .statesize = sizeof(struct sha256_state),
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = sizeof(struct crypto_sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -266,207 +239,58 @@ static struct shash_alg sha256_alg = {
/* Add two shash_alg instance for hardware-implemented *
* multiple-parts hash supported by VIA Nano Processor.*/
-static int padlock_sha1_init_nano(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha1_state){
- .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- };
-
- return 0;
-}
static int padlock_sha1_update_nano(struct shash_desc *desc,
- const u8 *data, unsigned int len)
+ const u8 *src, unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
- u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
-
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
- memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
-
- if ((partial + len) >= SHA1_BLOCK_SIZE) {
-
- /* Append the bytes in state's buffer to a block to handle */
- if (partial) {
- done = -partial;
- memcpy(sctx->buffer + partial, data,
- done + SHA1_BLOCK_SIZE);
- src = sctx->buffer;
- asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
- : "+S"(src), "+D"(dst) \
- : "a"((long)-1), "c"((unsigned long)1));
- done += SHA1_BLOCK_SIZE;
- src = data + done;
- }
-
- /* Process the left bytes from the input data */
- if (len - done >= SHA1_BLOCK_SIZE) {
- asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
- : "+S"(src), "+D"(dst)
- : "a"((long)-1),
- "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
- done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
- src = data + done;
- }
- partial = 0;
- }
- memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
- memcpy(sctx->buffer + partial, src, len - done);
-
- return 0;
-}
-
-static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out)
-{
- struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc);
- unsigned int partial, padlen;
- __be64 bits;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_be64(state->count << 3);
-
- /* Pad out to 56 mod 64 */
- partial = state->count & 0x3f;
- padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
- padlock_sha1_update_nano(desc, padding, padlen);
-
- /* Append length field bytes */
- padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Swap to output */
- padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5);
-
- return 0;
-}
-
-static int padlock_sha256_init_nano(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha256_state){
- .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7},
- };
-
- return 0;
+ struct sha1_state *state = padlock_shash_desc_ctx(desc);
+ int blocks = len / SHA1_BLOCK_SIZE;
+
+ len -= blocks * SHA1_BLOCK_SIZE;
+ state->count += blocks * SHA1_BLOCK_SIZE;
+
+ /* Process the left bytes from the input data */
+ asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
+ : "+S"(src), "+D"(state)
+ : "a"((long)-1),
+ "c"((unsigned long)blocks));
+ return len;
}
-static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
+static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
- u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
-
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
- memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
-
- if ((partial + len) >= SHA256_BLOCK_SIZE) {
-
- /* Append the bytes in state's buffer to a block to handle */
- if (partial) {
- done = -partial;
- memcpy(sctx->buf + partial, data,
- done + SHA256_BLOCK_SIZE);
- src = sctx->buf;
- asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
- : "+S"(src), "+D"(dst)
- : "a"((long)-1), "c"((unsigned long)1));
- done += SHA256_BLOCK_SIZE;
- src = data + done;
- }
-
- /* Process the left bytes from input data*/
- if (len - done >= SHA256_BLOCK_SIZE) {
- asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
- : "+S"(src), "+D"(dst)
- : "a"((long)-1),
- "c"((unsigned long)((len - done) / 64)));
- done += ((len - done) - (len - done) % 64);
- src = data + done;
- }
- partial = 0;
- }
- memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
- memcpy(sctx->buf + partial, src, len - done);
-
- return 0;
-}
-
-static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out)
-{
- struct sha256_state *state =
- (struct sha256_state *)shash_desc_ctx(desc);
- unsigned int partial, padlen;
- __be64 bits;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_be64(state->count << 3);
-
- /* Pad out to 56 mod 64 */
- partial = state->count & 0x3f;
- padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
- padlock_sha256_update_nano(desc, padding, padlen);
-
- /* Append length field bytes */
- padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Swap to output */
- padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8);
-
- return 0;
-}
-
-static int padlock_sha_export_nano(struct shash_desc *desc,
- void *out)
-{
- int statesize = crypto_shash_statesize(desc->tfm);
- void *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, statesize);
- return 0;
-}
-
-static int padlock_sha_import_nano(struct shash_desc *desc,
- const void *in)
-{
- int statesize = crypto_shash_statesize(desc->tfm);
- void *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, statesize);
- return 0;
+ struct crypto_sha256_state *state = padlock_shash_desc_ctx(desc);
+ int blocks = len / SHA256_BLOCK_SIZE;
+
+ len -= blocks * SHA256_BLOCK_SIZE;
+ state->count += blocks * SHA256_BLOCK_SIZE;
+
+ /* Process the left bytes from input data*/
+ asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
+ : "+S"(src), "+D"(state)
+ : "a"((long)-1),
+ "c"((unsigned long)blocks));
+ return len;
}
static struct shash_alg sha1_alg_nano = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = padlock_sha1_init_nano,
+ .init = padlock_sha1_init,
.update = padlock_sha1_update_nano,
- .final = padlock_sha1_final_nano,
- .export = padlock_sha_export_nano,
- .import = padlock_sha_import_nano,
- .descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
+ .finup = padlock_sha1_finup,
+ .export = padlock_sha_export,
+ .import = padlock_sha_import,
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -474,17 +298,19 @@ static struct shash_alg sha1_alg_nano = {
static struct shash_alg sha256_alg_nano = {
.digestsize = SHA256_DIGEST_SIZE,
- .init = padlock_sha256_init_nano,
+ .init = padlock_sha256_init,
.update = padlock_sha256_update_nano,
- .final = padlock_sha256_final_nano,
- .export = padlock_sha_export_nano,
- .import = padlock_sha_import_nano,
- .descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
+ .finup = padlock_sha256_finup,
+ .export = padlock_sha_export,
+ .import = padlock_sha_import,
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = sizeof(struct crypto_sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 69d6019d8abc..d6928ebe9526 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -52,12 +52,11 @@ static int rk_ahash_digest_fb(struct ahash_request *areq)
algt->stat_fb++;
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -124,8 +123,9 @@ static int rk_ahash_init(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -137,10 +137,10 @@ static int rk_ahash_update(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -152,9 +152,10 @@ static int rk_ahash_final(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -166,12 +167,11 @@ static int rk_ahash_finup(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -183,8 +183,9 @@ static int rk_ahash_import(struct ahash_request *req, const void *in)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -196,8 +197,9 @@ static int rk_ahash_export(struct ahash_request *req, void *out)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index b4c3c14dafd5..b829c84f60f2 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -9,11 +9,17 @@
//
// Hash part based on omap-sham.c driver.
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/md5.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -22,17 +28,9 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
-
-#include <crypto/ctr.h>
-#include <crypto/aes.h>
-#include <crypto/algapi.h>
-#include <crypto/scatterwalk.h>
-
-#include <crypto/hash.h>
-#include <crypto/md5.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/internal/hash.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#define _SBF(s, v) ((v) << (s))
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 091612b066f1..fdc0b2486069 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -1415,22 +1415,13 @@ static int sa_sha_run(struct ahash_request *req)
(auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
struct ahash_request *subreq = &rctx->fallback_req;
- int ret = 0;
+ int ret;
ahash_request_set_tfm(subreq, ctx->fallback.ahash);
- subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- crypto_ahash_init(subreq);
-
- subreq->nbytes = auth_len;
- subreq->src = req->src;
- subreq->result = req->result;
-
- ret |= crypto_ahash_update(subreq);
-
- subreq->nbytes = 0;
+ ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(subreq, req->src, req->result, auth_len);
- ret |= crypto_ahash_final(subreq);
+ ret = crypto_ahash_digest(subreq);
return ret;
}
@@ -1502,8 +1493,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
return ret;
if (alg_base) {
- ctx->shash = crypto_alloc_shash(alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->shash = crypto_alloc_shash(alg_base, 0, 0);
if (IS_ERR(ctx->shash)) {
dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
alg_base);
@@ -1511,8 +1501,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
/* for fallback */
ctx->fallback.ahash =
- crypto_alloc_ahash(alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ crypto_alloc_ahash(alg_base, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->fallback.ahash)) {
dev_err(ctx->dev_data->dev,
"Could not load fallback driver\n");
@@ -1546,54 +1535,38 @@ static int sa_sha_init(struct ahash_request *req)
crypto_ahash_digestsize(tfm), rctx);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, NULL, 0);
return crypto_ahash_init(&rctx->fallback_req);
}
static int sa_sha_update(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
static int sa_sha_final(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
static int sa_sha_finup(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, req->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -1605,8 +1578,7 @@ static int sa_sha_import(struct ahash_request *req, const void *in)
struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -1614,12 +1586,9 @@ static int sa_sha_import(struct ahash_request *req, const void *in)
static int sa_sha_export(struct ahash_request *req, void *out)
{
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *subreq = &rctx->fallback_req;
- ahash_request_set_tfm(subreq, ctx->fallback.ahash);
- subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
return crypto_ahash_export(subreq, out);
}
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
index ca9d0cca1f74..0e07d0523291 100644
--- a/drivers/crypto/tegra/tegra-se-aes.c
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -269,7 +269,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
unsigned int cmdlen, key1_id, key2_id;
int ret;
- rctx->iv = (u32 *)req->iv;
+ rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv;
rctx->len = req->cryptlen;
key1_id = ctx->key1_id;
key2_id = ctx->key2_id;
@@ -498,9 +498,6 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
if (!req->cryptlen)
return 0;
- if (ctx->alg == SE_ALG_ECB)
- req->iv = NULL;
-
rctx->encrypt = encrypt;
return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 42d007b7af45..d09b4aaeecef 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -117,8 +117,9 @@ static int tegra_sha_fallback_init(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -130,10 +131,10 @@ static int tegra_sha_fallback_update(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -145,9 +146,10 @@ static int tegra_sha_fallback_final(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -159,12 +161,11 @@ static int tegra_sha_fallback_finup(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -176,12 +177,11 @@ static int tegra_sha_fallback_digest(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -193,8 +193,9 @@ static int tegra_sha_fallback_import(struct ahash_request *req, const void *in)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -206,8 +207,9 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 580649f9bff8..5813017b6b79 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -3,18 +3,18 @@
* Xilinx ZynqMP SHA Driver.
* Copyright (c) 2022 Xilinx Inc.
*/
-#include <linux/cacheflush.h>
-#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/sha3.h>
-#include <linux/crypto.h>
+#include <linux/cacheflush.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/firmware/xlnx-zynqmp.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/spinlock.h>
#include <linux/platform_device.h>
#define ZYNQMP_DMA_BIT_MASK 32U
@@ -36,13 +36,11 @@ struct zynqmp_sha_tfm_ctx {
struct crypto_shash *fbk_tfm;
};
-struct zynqmp_sha_desc_ctx {
- struct shash_desc fbk_req;
-};
-
static dma_addr_t update_dma_addr, final_dma_addr;
static char *ubuf, *fbuf;
+static DEFINE_SPINLOCK(zynqmp_sha_lock);
+
static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
{
const char *fallback_driver_name = crypto_shash_alg_name(hash);
@@ -60,8 +58,13 @@ static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
if (IS_ERR(fallback_tfm))
return PTR_ERR(fallback_tfm);
+ if (crypto_shash_descsize(hash) <
+ crypto_shash_statesize(tfm_ctx->fbk_tfm)) {
+ crypto_free_shash(fallback_tfm);
+ return -EINVAL;
+ }
+
tfm_ctx->fbk_tfm = fallback_tfm;
- hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm);
return 0;
}
@@ -70,61 +73,55 @@ static void zynqmp_sha_exit_tfm(struct crypto_shash *hash)
{
struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
- if (tfm_ctx->fbk_tfm) {
- crypto_free_shash(tfm_ctx->fbk_tfm);
- tfm_ctx->fbk_tfm = NULL;
- }
+ crypto_free_shash(tfm_ctx->fbk_tfm);
+}
- memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx));
+static int zynqmp_sha_continue(struct shash_desc *desc,
+ struct shash_desc *fbdesc, int err)
+{
+ err = err ?: crypto_shash_export(fbdesc, shash_desc_ctx(desc));
+ shash_desc_zero(fbdesc);
+ return err;
}
static int zynqmp_sha_init(struct shash_desc *desc)
{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct crypto_shash *fbtfm = tctx->fbk_tfm;
+ SHASH_DESC_ON_STACK(fbdesc, fbtfm);
+ int err;
- dctx->fbk_req.tfm = tctx->fbk_tfm;
- return crypto_shash_init(&dctx->fbk_req);
+ fbdesc->tfm = fbtfm;
+ err = crypto_shash_init(fbdesc);
+ return zynqmp_sha_continue(desc, fbdesc, err);
}
static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length)
{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_update(&dctx->fbk_req, data, length);
-}
-
-static int zynqmp_sha_final(struct shash_desc *desc, u8 *out)
-{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct crypto_shash *fbtfm = tctx->fbk_tfm;
+ SHASH_DESC_ON_STACK(fbdesc, fbtfm);
+ int err;
- return crypto_shash_final(&dctx->fbk_req, out);
+ fbdesc->tfm = fbtfm;
+ err = crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?:
+ crypto_shash_update(fbdesc, data, length);
+ return zynqmp_sha_continue(desc, fbdesc, err);
}
static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_finup(&dctx->fbk_req, data, length, out);
-}
-
-static int zynqmp_sha_import(struct shash_desc *desc, const void *in)
-{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct crypto_shash *fbtfm = tctx->fbk_tfm;
+ SHASH_DESC_ON_STACK(fbdesc, fbtfm);
- dctx->fbk_req.tfm = tctx->fbk_tfm;
- return crypto_shash_import(&dctx->fbk_req, in);
+ fbdesc->tfm = fbtfm;
+ return crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?:
+ crypto_shash_finup(fbdesc, data, length, out);
}
-static int zynqmp_sha_export(struct shash_desc *desc, void *out)
-{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_export(&dctx->fbk_req, out);
-}
-
-static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+static int __zynqmp_sha_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
unsigned int remaining_len = len;
int update_size;
@@ -159,26 +156,27 @@ static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned i
return ret;
}
+static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+{
+ scoped_guard(spinlock_bh, &zynqmp_sha_lock)
+ return __zynqmp_sha_digest(desc, data, len, out);
+}
+
static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
.sha3_384 = {
.init = zynqmp_sha_init,
.update = zynqmp_sha_update,
- .final = zynqmp_sha_final,
.finup = zynqmp_sha_finup,
.digest = zynqmp_sha_digest,
- .export = zynqmp_sha_export,
- .import = zynqmp_sha_import,
.init_tfm = zynqmp_sha_init_tfm,
.exit_tfm = zynqmp_sha_exit_tfm,
- .descsize = sizeof(struct zynqmp_sha_desc_ctx),
- .statesize = sizeof(struct sha3_state),
+ .descsize = SHA3_384_EXPORT_SIZE,
.digestsize = SHA3_384_DIGEST_SIZE,
.base = {
.cra_name = "sha3-384",
.cra_driver_name = "zynqmp-sha3-384",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index cf1ba673b8c2..48b7314afdb8 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -114,6 +114,77 @@ config CXL_FEATURES
If unsure say 'n'
+config CXL_EDAC_MEM_FEATURES
+ bool "CXL: EDAC Memory Features"
+ depends on EXPERT
+ depends on CXL_MEM
+ depends on CXL_FEATURES
+ depends on EDAC >= CXL_BUS
+ help
+ The CXL EDAC memory feature is optional and allows host to
+ control the EDAC memory features configurations of CXL memory
+ expander devices.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory RAS feature established by the platform/device.
+ Otherwise say 'n'.
+
+config CXL_EDAC_SCRUB
+ bool "Enable CXL Patrol Scrub Control (Patrol Read)"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_SCRUB
+ help
+ The CXL EDAC scrub control is optional and allows host to
+ control the scrub feature configurations of CXL memory expander
+ devices.
+
+ When enabled 'cxl_mem' and 'cxl_region' EDAC devices are
+ published with memory scrub control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-scrub.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory scrub feature established by the platform/device
+ (e.g. scrub rates for the patrol scrub feature).
+ Otherwise say 'n'.
+
+config CXL_EDAC_ECS
+ bool "Enable CXL Error Check Scrub (Repair)"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_ECS
+ help
+ The CXL EDAC ECS control is optional and allows host to
+ control the ECS feature configurations of CXL memory expander
+ devices.
+
+ When enabled 'cxl_mem' EDAC devices are published with memory
+ ECS control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-ecs.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory ECS feature established by the platform/device.
+ Otherwise say 'n'.
+
+config CXL_EDAC_MEM_REPAIR
+ bool "Enable CXL Memory Repair"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_MEM_REPAIR
+ help
+ The CXL EDAC memory repair control is optional and allows host
+ to control the memory repair features (e.g. sparing, PPR)
+ configurations of CXL memory expander devices.
+
+ When enabled, the memory repair feature requires an additional
+ memory of approximately 43KB to store CXL DRAM and CXL general
+ media event records.
+
+ When enabled 'cxl_mem' EDAC devices are published with memory
+ repair control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-memory-repair.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory repair feature established by the platform/device.
+ Otherwise say 'n'.
+
config CXL_PORT
default CXL_BUS
tristate
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index cb14829bb9be..a1a99ec3f12c 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -11,8 +11,6 @@
#include "cxlpci.h"
#include "cxl.h"
-#define CXL_RCRB_SIZE SZ_8K
-
struct cxl_cxims_data {
int nr_maps;
u64 xormaps[] __counted_by(nr_maps);
@@ -421,7 +419,15 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
rc = cxl_decoder_add(cxld, target_map);
if (rc)
return rc;
- return cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
+
+ rc = cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
+ if (rc)
+ return rc;
+
+ dev_dbg(root_port->dev.parent, "%s added to %s\n",
+ dev_name(&cxld->dev), dev_name(&root_port->dev));
+
+ return 0;
}
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
@@ -479,7 +485,11 @@ static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg,
chbs = (struct acpi_cedt_chbs *) header;
if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 &&
- chbs->length != CXL_RCRB_SIZE)
+ chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL11)
+ return 0;
+
+ if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20 &&
+ chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL20)
return 0;
if (!chbs->base)
@@ -739,10 +749,10 @@ static void remove_cxl_resources(void *data)
* expanding its boundaries to ensure that any conflicting resources become
* children. If a window is expanded it may then conflict with a another window
* entry and require the window to be truncated or trimmed. Consider this
- * situation:
+ * situation::
*
- * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
- * |--------------- "System RAM" -------------|
+ * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
+ * |--------------- "System RAM" -------------|
*
* ...where platform firmware has established as System RAM resource across 2
* windows, but has left some portion of window 1 for dynamic CXL region
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 086df97a0fcf..79e2ef81fde8 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -20,3 +20,4 @@ cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
cxl_core-$(CONFIG_CXL_MCE) += mce.o
cxl_core-$(CONFIG_CXL_FEATURES) += features.o
+cxl_core-$(CONFIG_CXL_EDAC_MEM_FEATURES) += edac.o
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index edb4f41eeacc..0ccef2f2a26a 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -28,7 +28,7 @@ static u32 cdat_normalize(u16 entry, u64 base, u8 type)
*/
if (entry == 0xffff || !entry)
return 0;
- else if (base > (UINT_MAX / (entry)))
+ if (base > (UINT_MAX / (entry)))
return 0;
/*
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 15699299dc11..29b61828a847 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -76,7 +76,7 @@ void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
struct dentry *cxl_debugfs_create_dir(const char *dir);
int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
enum cxl_partition_mode mode);
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size);
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
@@ -119,11 +119,13 @@ int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
int cxl_ras_init(void);
void cxl_ras_exit(void);
-int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port);
+int cxl_gpf_port_setup(struct cxl_dport *dport);
int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
int nid, resource_size_t *size);
#ifdef CONFIG_CXL_FEATURES
+struct cxl_feat_entry *
+cxl_feature_info(struct cxl_features_state *cxlfs, const uuid_t *uuid);
size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
enum cxl_get_feat_selection selection,
void *feat_out, size_t feat_out_size, u16 offset,
diff --git a/drivers/cxl/core/edac.c b/drivers/cxl/core/edac.c
new file mode 100644
index 000000000000..2cbc664e5d62
--- /dev/null
+++ b/drivers/cxl/core/edac.c
@@ -0,0 +1,2102 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CXL EDAC memory feature driver.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ *
+ * - Supports functions to configure EDAC features of the
+ * CXL memory devices.
+ * - Registers with the EDAC device subsystem driver to expose
+ * the features sysfs attributes to the user for configuring
+ * CXL memory RAS feature.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/edac.h>
+#include <linux/limits.h>
+#include <linux/unaligned.h>
+#include <linux/xarray.h>
+#include <cxl/features.h>
+#include <cxl.h>
+#include <cxlmem.h>
+#include "core.h"
+#include "trace.h"
+
+#define CXL_NR_EDAC_DEV_FEATURES 7
+
+#define CXL_SCRUB_NO_REGION -1
+
+struct cxl_patrol_scrub_context {
+ u8 instance;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+};
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-222 Device Patrol Scrub Control
+ * Feature Readable Attributes.
+ */
+struct cxl_scrub_rd_attrbs {
+ u8 scrub_cycle_cap;
+ __le16 scrub_cycle_hours;
+ u8 scrub_flags;
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-223 Device Patrol Scrub Control
+ * Feature Writable Attributes.
+ */
+struct cxl_scrub_wr_attrbs {
+ u8 scrub_cycle_hours;
+ u8 scrub_flags;
+} __packed;
+
+#define CXL_SCRUB_CONTROL_CHANGEABLE BIT(0)
+#define CXL_SCRUB_CONTROL_REALTIME BIT(1)
+#define CXL_SCRUB_CONTROL_CYCLE_MASK GENMASK(7, 0)
+#define CXL_SCRUB_CONTROL_MIN_CYCLE_MASK GENMASK(15, 8)
+#define CXL_SCRUB_CONTROL_ENABLE BIT(0)
+
+#define CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap) \
+ FIELD_GET(CXL_SCRUB_CONTROL_CHANGEABLE, cap)
+#define CXL_GET_SCRUB_CYCLE(cycle) \
+ FIELD_GET(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle)
+#define CXL_GET_SCRUB_MIN_CYCLE(cycle) \
+ FIELD_GET(CXL_SCRUB_CONTROL_MIN_CYCLE_MASK, cycle)
+#define CXL_GET_SCRUB_EN_STS(flags) FIELD_GET(CXL_SCRUB_CONTROL_ENABLE, flags)
+
+#define CXL_SET_SCRUB_CYCLE(cycle) \
+ FIELD_PREP(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle)
+#define CXL_SET_SCRUB_EN(en) FIELD_PREP(CXL_SCRUB_CONTROL_ENABLE, en)
+
+static int cxl_mem_scrub_get_attrbs(struct cxl_mailbox *cxl_mbox, u8 *cap,
+ u16 *cycle, u8 *flags, u8 *min_cycle)
+{
+ size_t rd_data_size = sizeof(struct cxl_scrub_rd_attrbs);
+ size_t data_size;
+ struct cxl_scrub_rd_attrbs *rd_attrbs __free(kfree) =
+ kzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ *cap = rd_attrbs->scrub_cycle_cap;
+ *cycle = le16_to_cpu(rd_attrbs->scrub_cycle_hours);
+ *flags = rd_attrbs->scrub_flags;
+ if (min_cycle)
+ *min_cycle = CXL_GET_SCRUB_MIN_CYCLE(*cycle);
+
+ return 0;
+}
+
+static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 *cap, u16 *cycle, u8 *flags, u8 *min_cycle)
+{
+ struct cxl_mailbox *cxl_mbox;
+ u8 min_scrub_cycle = U8_MAX;
+ struct cxl_region_params *p;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+ int i, ret;
+
+ if (!cxl_ps_ctx->cxlr) {
+ cxl_mbox = &cxl_ps_ctx->cxlmd->cxlds->cxl_mbox;
+ return cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle,
+ flags, min_cycle);
+ }
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ cxlr = cxl_ps_ctx->cxlr;
+ p = &cxlr->params;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle, flags,
+ min_cycle);
+ if (ret)
+ return ret;
+
+ if (min_cycle)
+ min_scrub_cycle = min(*min_cycle, min_scrub_cycle);
+ }
+
+ if (min_cycle)
+ *min_cycle = min_scrub_cycle;
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs_region(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ struct cxl_scrub_wr_attrbs wr_attrbs;
+ struct cxl_mailbox *cxl_mbox;
+ struct cxl_region_params *p;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+ int ret, i;
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ cxlr = cxl_ps_ctx->cxlr;
+ p = &cxlr->params;
+ wr_attrbs.scrub_cycle_hours = cycle;
+ wr_attrbs.scrub_flags = flags;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ cxl_ps_ctx->set_version, &wr_attrbs,
+ sizeof(wr_attrbs),
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET,
+ 0, NULL);
+ if (ret)
+ return ret;
+
+ if (cycle != cxlmd->scrub_cycle) {
+ if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION)
+ dev_info(dev,
+ "Device scrub rate(%d hours) set by region%d rate overwritten by region%d scrub rate(%d hours)\n",
+ cxlmd->scrub_cycle,
+ cxlmd->scrub_region_id, cxlr->id,
+ cycle);
+
+ cxlmd->scrub_cycle = cycle;
+ cxlmd->scrub_region_id = cxlr->id;
+ }
+ }
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs_device(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ struct cxl_scrub_wr_attrbs wr_attrbs;
+ struct cxl_mailbox *cxl_mbox;
+ struct cxl_memdev *cxlmd;
+ int ret;
+
+ wr_attrbs.scrub_cycle_hours = cycle;
+ wr_attrbs.scrub_flags = flags;
+
+ cxlmd = cxl_ps_ctx->cxlmd;
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ cxl_ps_ctx->set_version, &wr_attrbs,
+ sizeof(wr_attrbs),
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET, 0,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (cycle != cxlmd->scrub_cycle) {
+ if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION)
+ dev_info(dev,
+ "Device scrub rate(%d hours) set by region%d rate overwritten with device local scrub rate(%d hours)\n",
+ cxlmd->scrub_cycle, cxlmd->scrub_region_id,
+ cycle);
+
+ cxlmd->scrub_cycle = cycle;
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+ }
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ if (cxl_ps_ctx->cxlr)
+ return cxl_scrub_set_attrbs_region(dev, cxl_ps_ctx, cycle, flags);
+
+ return cxl_scrub_set_attrbs_device(dev, cxl_ps_ctx, cycle, flags);
+}
+
+static int cxl_patrol_scrub_get_enabled_bg(struct device *dev, void *drv_data,
+ bool *enabled)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ *enabled = CXL_GET_SCRUB_EN_STS(flags);
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_set_enabled_bg(struct device *dev, void *drv_data,
+ bool enable)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags, wr_cycle;
+ u16 rd_cycle;
+ int ret;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ wr_cycle = CXL_GET_SCRUB_CYCLE(rd_cycle);
+ flags = CXL_SET_SCRUB_EN(enable);
+
+ return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags);
+}
+
+static int cxl_patrol_scrub_get_min_scrub_cycle(struct device *dev,
+ void *drv_data, u32 *min)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags, min_cycle;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, &min_cycle);
+ if (ret)
+ return ret;
+
+ *min = min_cycle * 3600;
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_get_max_scrub_cycle(struct device *dev,
+ void *drv_data, u32 *max)
+{
+ *max = U8_MAX * 3600; /* Max set by register size */
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_get_scrub_cycle(struct device *dev, void *drv_data,
+ u32 *scrub_cycle_secs)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ *scrub_cycle_secs = CXL_GET_SCRUB_CYCLE(cycle) * 3600;
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_set_scrub_cycle(struct device *dev, void *drv_data,
+ u32 scrub_cycle_secs)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 scrub_cycle_hours = scrub_cycle_secs / 3600;
+ u8 cap, wr_cycle, flags, min_cycle;
+ u16 rd_cycle;
+ int ret;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, &min_cycle);
+ if (ret)
+ return ret;
+
+ if (!CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap))
+ return -EOPNOTSUPP;
+
+ if (scrub_cycle_hours < min_cycle) {
+ dev_dbg(dev, "Invalid CXL patrol scrub cycle(%d) to set\n",
+ scrub_cycle_hours);
+ dev_dbg(dev,
+ "Minimum supported CXL patrol scrub cycle in hour %d\n",
+ min_cycle);
+ return -EINVAL;
+ }
+ wr_cycle = CXL_SET_SCRUB_CYCLE(scrub_cycle_hours);
+
+ return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags);
+}
+
+static const struct edac_scrub_ops cxl_ps_scrub_ops = {
+ .get_enabled_bg = cxl_patrol_scrub_get_enabled_bg,
+ .set_enabled_bg = cxl_patrol_scrub_set_enabled_bg,
+ .get_min_cycle = cxl_patrol_scrub_get_min_scrub_cycle,
+ .get_max_cycle = cxl_patrol_scrub_get_max_scrub_cycle,
+ .get_cycle_duration = cxl_patrol_scrub_get_scrub_cycle,
+ .set_cycle_duration = cxl_patrol_scrub_set_scrub_cycle,
+};
+
+static int cxl_memdev_scrub_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ u8 scrub_inst)
+{
+ struct cxl_patrol_scrub_context *cxl_ps_ctx;
+ struct cxl_feat_entry *feat_entry;
+ u8 cap, flags;
+ u16 cycle;
+ int rc;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_PATROL_SCRUB_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_ps_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL);
+ if (!cxl_ps_ctx)
+ return -ENOMEM;
+
+ *cxl_ps_ctx = (struct cxl_patrol_scrub_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .instance = scrub_inst,
+ .cxlmd = cxlmd,
+ };
+
+ rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap, &cycle,
+ &flags, NULL);
+ if (rc)
+ return rc;
+
+ cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle);
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+
+ ras_feature->ft_type = RAS_FEAT_SCRUB;
+ ras_feature->instance = cxl_ps_ctx->instance;
+ ras_feature->scrub_ops = &cxl_ps_scrub_ops;
+ ras_feature->ctx = cxl_ps_ctx;
+
+ return 0;
+}
+
+static int cxl_region_scrub_init(struct cxl_region *cxlr,
+ struct edac_dev_feature *ras_feature,
+ u8 scrub_inst)
+{
+ struct cxl_patrol_scrub_context *cxl_ps_ctx;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_feat_entry *feat_entry = NULL;
+ struct cxl_memdev *cxlmd;
+ u8 cap, flags;
+ u16 cycle;
+ int i, rc;
+
+ /*
+ * The cxl_region_rwsem must be held if the code below is used in a context
+ * other than when the region is in the probe state, as shown here.
+ */
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_PATROL_SCRUB_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) &
+ CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap,
+ &cycle, &flags, NULL);
+ if (rc)
+ return rc;
+
+ cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle);
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+ }
+
+ cxl_ps_ctx = devm_kzalloc(&cxlr->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL);
+ if (!cxl_ps_ctx)
+ return -ENOMEM;
+
+ *cxl_ps_ctx = (struct cxl_patrol_scrub_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .instance = scrub_inst,
+ .cxlr = cxlr,
+ };
+
+ ras_feature->ft_type = RAS_FEAT_SCRUB;
+ ras_feature->instance = cxl_ps_ctx->instance;
+ ras_feature->scrub_ops = &cxl_ps_scrub_ops;
+ ras_feature->ctx = cxl_ps_ctx;
+
+ return 0;
+}
+
+struct cxl_ecs_context {
+ u16 num_media_frus;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ struct cxl_memdev *cxlmd;
+};
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-225 DDR5 ECS Control Feature
+ * Readable Attributes.
+ */
+struct cxl_ecs_fru_rd_attrbs {
+ u8 ecs_cap;
+ __le16 ecs_config;
+ u8 ecs_flags;
+} __packed;
+
+struct cxl_ecs_rd_attrbs {
+ u8 ecs_log_cap;
+ struct cxl_ecs_fru_rd_attrbs fru_attrbs[];
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-226 DDR5 ECS Control Feature
+ * Writable Attributes.
+ */
+struct cxl_ecs_fru_wr_attrbs {
+ __le16 ecs_config;
+} __packed;
+
+struct cxl_ecs_wr_attrbs {
+ u8 ecs_log_cap;
+ struct cxl_ecs_fru_wr_attrbs fru_attrbs[];
+} __packed;
+
+#define CXL_ECS_LOG_ENTRY_TYPE_MASK GENMASK(1, 0)
+#define CXL_ECS_REALTIME_REPORT_CAP_MASK BIT(0)
+#define CXL_ECS_THRESHOLD_COUNT_MASK GENMASK(2, 0)
+#define CXL_ECS_COUNT_MODE_MASK BIT(3)
+#define CXL_ECS_RESET_COUNTER_MASK BIT(4)
+#define CXL_ECS_RESET_COUNTER 1
+
+enum {
+ ECS_THRESHOLD_256 = 256,
+ ECS_THRESHOLD_1024 = 1024,
+ ECS_THRESHOLD_4096 = 4096,
+};
+
+enum {
+ ECS_THRESHOLD_IDX_256 = 3,
+ ECS_THRESHOLD_IDX_1024 = 4,
+ ECS_THRESHOLD_IDX_4096 = 5,
+};
+
+static const u16 ecs_supp_threshold[] = {
+ [ECS_THRESHOLD_IDX_256] = 256,
+ [ECS_THRESHOLD_IDX_1024] = 1024,
+ [ECS_THRESHOLD_IDX_4096] = 4096,
+};
+
+enum {
+ ECS_LOG_ENTRY_TYPE_DRAM = 0x0,
+ ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU = 0x1,
+};
+
+enum cxl_ecs_count_mode {
+ ECS_MODE_COUNTS_ROWS = 0,
+ ECS_MODE_COUNTS_CODEWORDS = 1,
+};
+
+static int cxl_mem_ecs_get_attrbs(struct device *dev,
+ struct cxl_ecs_context *cxl_ecs_ctx,
+ int fru_id, u8 *log_cap, u16 *config)
+{
+ struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs;
+ size_t rd_data_size;
+ size_t data_size;
+
+ rd_data_size = cxl_ecs_ctx->get_feat_size;
+
+ struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) =
+ kvzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ fru_rd_attrbs = rd_attrbs->fru_attrbs;
+ *log_cap = rd_attrbs->ecs_log_cap;
+ *config = le16_to_cpu(fru_rd_attrbs[fru_id].ecs_config);
+
+ return 0;
+}
+
+static int cxl_mem_ecs_set_attrbs(struct device *dev,
+ struct cxl_ecs_context *cxl_ecs_ctx,
+ int fru_id, u8 log_cap, u16 config)
+{
+ struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs;
+ struct cxl_ecs_fru_wr_attrbs *fru_wr_attrbs;
+ size_t rd_data_size, wr_data_size;
+ u16 num_media_frus, count;
+ size_t data_size;
+
+ num_media_frus = cxl_ecs_ctx->num_media_frus;
+ rd_data_size = cxl_ecs_ctx->get_feat_size;
+ wr_data_size = cxl_ecs_ctx->set_feat_size;
+ struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) =
+ kvzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ struct cxl_ecs_wr_attrbs *wr_attrbs __free(kvfree) =
+ kvzalloc(wr_data_size, GFP_KERNEL);
+ if (!wr_attrbs)
+ return -ENOMEM;
+
+ /*
+ * Fill writable attributes from the current attributes read
+ * for all the media FRUs.
+ */
+ fru_rd_attrbs = rd_attrbs->fru_attrbs;
+ fru_wr_attrbs = wr_attrbs->fru_attrbs;
+ wr_attrbs->ecs_log_cap = log_cap;
+ for (count = 0; count < num_media_frus; count++)
+ fru_wr_attrbs[count].ecs_config =
+ fru_rd_attrbs[count].ecs_config;
+
+ fru_wr_attrbs[fru_id].ecs_config = cpu_to_le16(config);
+
+ return cxl_set_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ cxl_ecs_ctx->set_version, wr_attrbs,
+ wr_data_size,
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET,
+ 0, NULL);
+}
+
+static u8 cxl_get_ecs_log_entry_type(u8 log_cap, u16 config)
+{
+ return FIELD_GET(CXL_ECS_LOG_ENTRY_TYPE_MASK, log_cap);
+}
+
+static u16 cxl_get_ecs_threshold(u8 log_cap, u16 config)
+{
+ u8 index = FIELD_GET(CXL_ECS_THRESHOLD_COUNT_MASK, config);
+
+ return ecs_supp_threshold[index];
+}
+
+static u8 cxl_get_ecs_count_mode(u8 log_cap, u16 config)
+{
+ return FIELD_GET(CXL_ECS_COUNT_MODE_MASK, config);
+}
+
+#define CXL_ECS_GET_ATTR(attrb) \
+ static int cxl_ecs_get_##attrb(struct device *dev, void *drv_data, \
+ int fru_id, u32 *val) \
+ { \
+ struct cxl_ecs_context *ctx = drv_data; \
+ u8 log_cap; \
+ u16 config; \
+ int ret; \
+ \
+ ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \
+ &config); \
+ if (ret) \
+ return ret; \
+ \
+ *val = cxl_get_ecs_##attrb(log_cap, config); \
+ \
+ return 0; \
+ }
+
+CXL_ECS_GET_ATTR(log_entry_type)
+CXL_ECS_GET_ATTR(count_mode)
+CXL_ECS_GET_ATTR(threshold)
+
+static int cxl_set_ecs_log_entry_type(struct device *dev, u8 *log_cap,
+ u16 *config, u32 val)
+{
+ if (val != ECS_LOG_ENTRY_TYPE_DRAM &&
+ val != ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU)
+ return -EINVAL;
+
+ *log_cap = FIELD_PREP(CXL_ECS_LOG_ENTRY_TYPE_MASK, val);
+
+ return 0;
+}
+
+static int cxl_set_ecs_threshold(struct device *dev, u8 *log_cap, u16 *config,
+ u32 val)
+{
+ *config &= ~CXL_ECS_THRESHOLD_COUNT_MASK;
+
+ switch (val) {
+ case ECS_THRESHOLD_256:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_256);
+ break;
+ case ECS_THRESHOLD_1024:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_1024);
+ break;
+ case ECS_THRESHOLD_4096:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_4096);
+ break;
+ default:
+ dev_dbg(dev, "Invalid CXL ECS threshold count(%d) to set\n",
+ val);
+ dev_dbg(dev, "Supported ECS threshold counts: %u, %u, %u\n",
+ ECS_THRESHOLD_256, ECS_THRESHOLD_1024,
+ ECS_THRESHOLD_4096);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxl_set_ecs_count_mode(struct device *dev, u8 *log_cap, u16 *config,
+ u32 val)
+{
+ if (val != ECS_MODE_COUNTS_ROWS && val != ECS_MODE_COUNTS_CODEWORDS) {
+ dev_dbg(dev, "Invalid CXL ECS scrub mode(%d) to set\n", val);
+ dev_dbg(dev,
+ "Supported ECS Modes: 0: ECS counts rows with errors,"
+ " 1: ECS counts codewords with errors\n");
+ return -EINVAL;
+ }
+
+ *config &= ~CXL_ECS_COUNT_MODE_MASK;
+ *config |= FIELD_PREP(CXL_ECS_COUNT_MODE_MASK, val);
+
+ return 0;
+}
+
+static int cxl_set_ecs_reset_counter(struct device *dev, u8 *log_cap,
+ u16 *config, u32 val)
+{
+ if (val != CXL_ECS_RESET_COUNTER)
+ return -EINVAL;
+
+ *config &= ~CXL_ECS_RESET_COUNTER_MASK;
+ *config |= FIELD_PREP(CXL_ECS_RESET_COUNTER_MASK, val);
+
+ return 0;
+}
+
+#define CXL_ECS_SET_ATTR(attrb) \
+ static int cxl_ecs_set_##attrb(struct device *dev, void *drv_data, \
+ int fru_id, u32 val) \
+ { \
+ struct cxl_ecs_context *ctx = drv_data; \
+ u8 log_cap; \
+ u16 config; \
+ int ret; \
+ \
+ if (!capable(CAP_SYS_RAWIO)) \
+ return -EPERM; \
+ \
+ ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \
+ &config); \
+ if (ret) \
+ return ret; \
+ \
+ ret = cxl_set_ecs_##attrb(dev, &log_cap, &config, val); \
+ if (ret) \
+ return ret; \
+ \
+ return cxl_mem_ecs_set_attrbs(dev, ctx, fru_id, log_cap, \
+ config); \
+ }
+CXL_ECS_SET_ATTR(log_entry_type)
+CXL_ECS_SET_ATTR(count_mode)
+CXL_ECS_SET_ATTR(reset_counter)
+CXL_ECS_SET_ATTR(threshold)
+
+static const struct edac_ecs_ops cxl_ecs_ops = {
+ .get_log_entry_type = cxl_ecs_get_log_entry_type,
+ .set_log_entry_type = cxl_ecs_set_log_entry_type,
+ .get_mode = cxl_ecs_get_count_mode,
+ .set_mode = cxl_ecs_set_count_mode,
+ .reset = cxl_ecs_set_reset_counter,
+ .get_threshold = cxl_ecs_get_threshold,
+ .set_threshold = cxl_ecs_set_threshold,
+};
+
+static int cxl_memdev_ecs_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature)
+{
+ struct cxl_ecs_context *cxl_ecs_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int num_media_frus;
+
+ feat_entry =
+ cxl_feature_info(to_cxlfs(cxlmd->cxlds), &CXL_FEAT_ECS_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ num_media_frus = (le16_to_cpu(feat_entry->get_feat_size) -
+ sizeof(struct cxl_ecs_rd_attrbs)) /
+ sizeof(struct cxl_ecs_fru_rd_attrbs);
+ if (!num_media_frus)
+ return -EOPNOTSUPP;
+
+ cxl_ecs_ctx =
+ devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ecs_ctx), GFP_KERNEL);
+ if (!cxl_ecs_ctx)
+ return -ENOMEM;
+
+ *cxl_ecs_ctx = (struct cxl_ecs_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .num_media_frus = num_media_frus,
+ .cxlmd = cxlmd,
+ };
+
+ ras_feature->ft_type = RAS_FEAT_ECS;
+ ras_feature->ecs_ops = &cxl_ecs_ops;
+ ras_feature->ctx = cxl_ecs_ctx;
+ ras_feature->ecs_info.num_media_frus = num_media_frus;
+
+ return 0;
+}
+
+/*
+ * Perform Maintenance CXL 3.2 Spec 8.2.10.7.1
+ */
+
+/*
+ * Perform Maintenance input payload
+ * CXL rev 3.2 section 8.2.10.7.1 Table 8-117
+ */
+struct cxl_mbox_maintenance_hdr {
+ u8 op_class;
+ u8 op_subclass;
+} __packed;
+
+static int cxl_perform_maintenance(struct cxl_mailbox *cxl_mbox, u8 class,
+ u8 subclass, void *data_in,
+ size_t data_in_size)
+{
+ struct cxl_memdev_maintenance_pi {
+ struct cxl_mbox_maintenance_hdr hdr;
+ u8 data[];
+ } __packed;
+ struct cxl_mbox_cmd mbox_cmd;
+ size_t hdr_size;
+
+ struct cxl_memdev_maintenance_pi *pi __free(kvfree) =
+ kvzalloc(cxl_mbox->payload_size, GFP_KERNEL);
+ if (!pi)
+ return -ENOMEM;
+
+ pi->hdr.op_class = class;
+ pi->hdr.op_subclass = subclass;
+ hdr_size = sizeof(pi->hdr);
+ /*
+ * Check minimum mbox payload size is available for
+ * the maintenance data transfer.
+ */
+ if (hdr_size + data_in_size > cxl_mbox->payload_size)
+ return -ENOMEM;
+
+ memcpy(pi->data, data_in, data_in_size);
+ mbox_cmd = (struct cxl_mbox_cmd){
+ .opcode = CXL_MBOX_OP_DO_MAINTENANCE,
+ .size_in = hdr_size + data_in_size,
+ .payload_in = pi,
+ };
+
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+}
+
+/*
+ * Support for finding a memory operation attributes
+ * are from the current boot or not.
+ */
+
+struct cxl_mem_err_rec {
+ struct xarray rec_gen_media;
+ struct xarray rec_dram;
+};
+
+enum cxl_mem_repair_type {
+ CXL_PPR,
+ CXL_CACHELINE_SPARING,
+ CXL_ROW_SPARING,
+ CXL_BANK_SPARING,
+ CXL_RANK_SPARING,
+ CXL_REPAIR_MAX,
+};
+
+/**
+ * struct cxl_mem_repair_attrbs - CXL memory repair attributes
+ * @dpa: DPA of memory to repair
+ * @nibble_mask: nibble mask, identifies one or more nibbles on the memory bus
+ * @row: row of memory to repair
+ * @column: column of memory to repair
+ * @channel: channel of memory to repair
+ * @sub_channel: sub channel of memory to repair
+ * @rank: rank of memory to repair
+ * @bank_group: bank group of memory to repair
+ * @bank: bank of memory to repair
+ * @repair_type: repair type. For eg. PPR, memory sparing etc.
+ */
+struct cxl_mem_repair_attrbs {
+ u64 dpa;
+ u32 nibble_mask;
+ u32 row;
+ u16 column;
+ u8 channel;
+ u8 sub_channel;
+ u8 rank;
+ u8 bank_group;
+ u8 bank;
+ enum cxl_mem_repair_type repair_type;
+};
+
+static struct cxl_event_gen_media *
+cxl_find_rec_gen_media(struct cxl_memdev *cxlmd,
+ struct cxl_mem_repair_attrbs *attrbs)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec;
+
+ if (!array_rec)
+ return NULL;
+
+ rec = xa_load(&array_rec->rec_gen_media, attrbs->dpa);
+ if (!rec)
+ return NULL;
+
+ if (attrbs->repair_type == CXL_PPR)
+ return rec;
+
+ return NULL;
+}
+
+static struct cxl_event_dram *
+cxl_find_rec_dram(struct cxl_memdev *cxlmd,
+ struct cxl_mem_repair_attrbs *attrbs)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_dram *rec;
+ u16 validity_flags;
+
+ if (!array_rec)
+ return NULL;
+
+ rec = xa_load(&array_rec->rec_dram, attrbs->dpa);
+ if (!rec)
+ return NULL;
+
+ validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
+ if (!(validity_flags & CXL_DER_VALID_CHANNEL) ||
+ !(validity_flags & CXL_DER_VALID_RANK))
+ return NULL;
+
+ switch (attrbs->repair_type) {
+ case CXL_PPR:
+ if (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) == attrbs->nibble_mask)
+ return rec;
+ break;
+ case CXL_CACHELINE_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK) ||
+ !(validity_flags & CXL_DER_VALID_ROW) ||
+ !(validity_flags & CXL_DER_VALID_COLUMN))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ get_unaligned_le24(rec->row) == attrbs->row &&
+ get_unaligned_le16(rec->column) == attrbs->column &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask) &&
+ (!(validity_flags & CXL_DER_VALID_SUB_CHANNEL) ||
+ rec->sub_channel == attrbs->sub_channel))
+ return rec;
+ break;
+ case CXL_ROW_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK) ||
+ !(validity_flags & CXL_DER_VALID_ROW))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ get_unaligned_le24(rec->row) == attrbs->row &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ case CXL_BANK_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ case CXL_RANK_SPARING:
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+#define CXL_MAX_STORAGE_DAYS 10
+#define CXL_MAX_STORAGE_TIME_SECS (CXL_MAX_STORAGE_DAYS * 24 * 60 * 60)
+
+static void cxl_del_expired_gmedia_recs(struct xarray *rec_xarray,
+ struct cxl_event_gen_media *cur_rec)
+{
+ u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp);
+ struct cxl_event_gen_media *rec;
+ unsigned long index;
+ u64 delta_ts_secs;
+
+ xa_for_each(rec_xarray, index, rec) {
+ delta_ts_secs = (cur_ts -
+ le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL;
+ if (delta_ts_secs >= CXL_MAX_STORAGE_TIME_SECS) {
+ xa_erase(rec_xarray, index);
+ kfree(rec);
+ }
+ }
+}
+
+static void cxl_del_expired_dram_recs(struct xarray *rec_xarray,
+ struct cxl_event_dram *cur_rec)
+{
+ u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp);
+ struct cxl_event_dram *rec;
+ unsigned long index;
+ u64 delta_secs;
+
+ xa_for_each(rec_xarray, index, rec) {
+ delta_secs = (cur_ts -
+ le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL;
+ if (delta_secs >= CXL_MAX_STORAGE_TIME_SECS) {
+ xa_erase(rec_xarray, index);
+ kfree(rec);
+ }
+ }
+}
+
+#define CXL_MAX_REC_STORAGE_COUNT 200
+
+static void cxl_del_overflow_old_recs(struct xarray *rec_xarray)
+{
+ void *err_rec;
+ unsigned long index, count = 0;
+
+ xa_for_each(rec_xarray, index, err_rec)
+ count++;
+
+ if (count <= CXL_MAX_REC_STORAGE_COUNT)
+ return;
+
+ count -= CXL_MAX_REC_STORAGE_COUNT;
+ xa_for_each(rec_xarray, index, err_rec) {
+ xa_erase(rec_xarray, index);
+ kfree(err_rec);
+ count--;
+ if (!count)
+ break;
+ }
+}
+
+int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec;
+ void *old_rec;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return 0;
+
+ rec = kmemdup(&evt->gen_media, sizeof(*rec), GFP_KERNEL);
+ if (!rec)
+ return -ENOMEM;
+
+ old_rec = xa_store(&array_rec->rec_gen_media,
+ le64_to_cpu(rec->media_hdr.phys_addr), rec,
+ GFP_KERNEL);
+ if (xa_is_err(old_rec))
+ return xa_err(old_rec);
+
+ kfree(old_rec);
+
+ cxl_del_expired_gmedia_recs(&array_rec->rec_gen_media, rec);
+ cxl_del_overflow_old_recs(&array_rec->rec_gen_media);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_store_rec_gen_media, "CXL");
+
+int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_dram *rec;
+ void *old_rec;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return 0;
+
+ rec = kmemdup(&evt->dram, sizeof(*rec), GFP_KERNEL);
+ if (!rec)
+ return -ENOMEM;
+
+ old_rec = xa_store(&array_rec->rec_dram,
+ le64_to_cpu(rec->media_hdr.phys_addr), rec,
+ GFP_KERNEL);
+ if (xa_is_err(old_rec))
+ return xa_err(old_rec);
+
+ kfree(old_rec);
+
+ cxl_del_expired_dram_recs(&array_rec->rec_dram, rec);
+ cxl_del_overflow_old_recs(&array_rec->rec_dram);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_store_rec_dram, "CXL");
+
+static bool cxl_is_memdev_memory_online(const struct cxl_memdev *cxlmd)
+{
+ struct cxl_port *port = cxlmd->endpoint;
+
+ if (port && cxl_num_decoders_committed(port))
+ return true;
+
+ return false;
+}
+
+/*
+ * CXL memory sparing control
+ */
+enum cxl_mem_sparing_granularity {
+ CXL_MEM_SPARING_CACHELINE,
+ CXL_MEM_SPARING_ROW,
+ CXL_MEM_SPARING_BANK,
+ CXL_MEM_SPARING_RANK,
+ CXL_MEM_SPARING_MAX
+};
+
+struct cxl_mem_sparing_context {
+ struct cxl_memdev *cxlmd;
+ uuid_t repair_uuid;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u16 effects;
+ u8 instance;
+ u8 get_version;
+ u8 set_version;
+ u8 op_class;
+ u8 op_subclass;
+ bool cap_safe_when_in_use;
+ bool cap_hard_sparing;
+ bool cap_soft_sparing;
+ u8 channel;
+ u8 rank;
+ u8 bank_group;
+ u32 nibble_mask;
+ u64 dpa;
+ u32 row;
+ u16 column;
+ u8 bank;
+ u8 sub_channel;
+ enum edac_mem_repair_type repair_type;
+ bool persist_mode;
+};
+
+#define CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK BIT(0)
+#define CXL_SPARING_RD_CAP_HARD_SPARING_MASK BIT(1)
+#define CXL_SPARING_RD_CAP_SOFT_SPARING_MASK BIT(2)
+
+#define CXL_SPARING_WR_DEVICE_INITIATED_MASK BIT(0)
+
+#define CXL_SPARING_QUERY_RESOURCE_FLAG BIT(0)
+#define CXL_SET_HARD_SPARING_FLAG BIT(1)
+#define CXL_SPARING_SUB_CHNL_VALID_FLAG BIT(2)
+#define CXL_SPARING_NIB_MASK_VALID_FLAG BIT(3)
+
+#define CXL_GET_SPARING_SAFE_IN_USE(flags) \
+ (FIELD_GET(CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK, \
+ flags) ^ 1)
+#define CXL_GET_CAP_HARD_SPARING(flags) \
+ FIELD_GET(CXL_SPARING_RD_CAP_HARD_SPARING_MASK, \
+ flags)
+#define CXL_GET_CAP_SOFT_SPARING(flags) \
+ FIELD_GET(CXL_SPARING_RD_CAP_SOFT_SPARING_MASK, \
+ flags)
+
+#define CXL_SET_SPARING_QUERY_RESOURCE(val) \
+ FIELD_PREP(CXL_SPARING_QUERY_RESOURCE_FLAG, val)
+#define CXL_SET_HARD_SPARING(val) \
+ FIELD_PREP(CXL_SET_HARD_SPARING_FLAG, val)
+#define CXL_SET_SPARING_SUB_CHNL_VALID(val) \
+ FIELD_PREP(CXL_SPARING_SUB_CHNL_VALID_FLAG, val)
+#define CXL_SET_SPARING_NIB_MASK_VALID(val) \
+ FIELD_PREP(CXL_SPARING_NIB_MASK_VALID_FLAG, val)
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.7.2.3 Table 8-134 Memory Sparing Feature
+ * Readable Attributes.
+ */
+struct cxl_memdev_repair_rd_attrbs_hdr {
+ u8 max_op_latency;
+ __le16 op_cap;
+ __le16 op_mode;
+ u8 op_class;
+ u8 op_subclass;
+ u8 rsvd[9];
+} __packed;
+
+struct cxl_memdev_sparing_rd_attrbs {
+ struct cxl_memdev_repair_rd_attrbs_hdr hdr;
+ u8 rsvd;
+ __le16 restriction_flags;
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.7.1.4 Table 8-120 Memory Sparing Input Payload.
+ */
+struct cxl_memdev_sparing_in_payload {
+ u8 flags;
+ u8 channel;
+ u8 rank;
+ u8 nibble_mask[3];
+ u8 bank_group;
+ u8 bank;
+ u8 row[3];
+ __le16 column;
+ u8 sub_channel;
+} __packed;
+
+static int
+cxl_mem_sparing_get_attrbs(struct cxl_mem_sparing_context *cxl_sparing_ctx)
+{
+ size_t rd_data_size = sizeof(struct cxl_memdev_sparing_rd_attrbs);
+ struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ u16 restriction_flags;
+ size_t data_size;
+ u16 return_code;
+ struct cxl_memdev_sparing_rd_attrbs *rd_attrbs __free(kfree) =
+ kzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &cxl_sparing_ctx->repair_uuid,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, &return_code);
+ if (!data_size)
+ return -EIO;
+
+ cxl_sparing_ctx->op_class = rd_attrbs->hdr.op_class;
+ cxl_sparing_ctx->op_subclass = rd_attrbs->hdr.op_subclass;
+ restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags);
+ cxl_sparing_ctx->cap_safe_when_in_use =
+ CXL_GET_SPARING_SAFE_IN_USE(restriction_flags);
+ cxl_sparing_ctx->cap_hard_sparing =
+ CXL_GET_CAP_HARD_SPARING(restriction_flags);
+ cxl_sparing_ctx->cap_soft_sparing =
+ CXL_GET_CAP_SOFT_SPARING(restriction_flags);
+
+ return 0;
+}
+
+static struct cxl_event_dram *
+cxl_mem_get_rec_dram(struct cxl_memdev *cxlmd,
+ struct cxl_mem_sparing_context *ctx)
+{
+ struct cxl_mem_repair_attrbs attrbs = { 0 };
+
+ attrbs.dpa = ctx->dpa;
+ attrbs.channel = ctx->channel;
+ attrbs.rank = ctx->rank;
+ attrbs.nibble_mask = ctx->nibble_mask;
+ switch (ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ attrbs.repair_type = CXL_CACHELINE_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ attrbs.row = ctx->row;
+ attrbs.column = ctx->column;
+ attrbs.sub_channel = ctx->sub_channel;
+ break;
+ case EDAC_REPAIR_ROW_SPARING:
+ attrbs.repair_type = CXL_ROW_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ attrbs.row = ctx->row;
+ break;
+ case EDAC_REPAIR_BANK_SPARING:
+ attrbs.repair_type = CXL_BANK_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ break;
+ case EDAC_REPAIR_RANK_SPARING:
+ attrbs.repair_type = CXL_BANK_SPARING;
+ break;
+ default:
+ return NULL;
+ }
+
+ return cxl_find_rec_dram(cxlmd, &attrbs);
+}
+
+static int
+cxl_mem_perform_sparing(struct device *dev,
+ struct cxl_mem_sparing_context *cxl_sparing_ctx)
+{
+ struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd;
+ struct cxl_memdev_sparing_in_payload sparing_pi;
+ struct cxl_event_dram *rec = NULL;
+ u16 validity_flags = 0;
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_dpa_rwsem);
+ if (!dpa_lock)
+ return -EINTR;
+
+ if (!cxl_sparing_ctx->cap_safe_when_in_use) {
+ /* Memory to repair must be offline */
+ if (cxl_is_memdev_memory_online(cxlmd))
+ return -EBUSY;
+ } else {
+ if (cxl_is_memdev_memory_online(cxlmd)) {
+ rec = cxl_mem_get_rec_dram(cxlmd, cxl_sparing_ctx);
+ if (!rec)
+ return -EINVAL;
+
+ if (!get_unaligned_le16(rec->media_hdr.validity_flags))
+ return -EINVAL;
+ }
+ }
+
+ memset(&sparing_pi, 0, sizeof(sparing_pi));
+ sparing_pi.flags = CXL_SET_SPARING_QUERY_RESOURCE(0);
+ if (cxl_sparing_ctx->persist_mode)
+ sparing_pi.flags |= CXL_SET_HARD_SPARING(1);
+
+ if (rec)
+ validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
+
+ switch (cxl_sparing_ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ sparing_pi.column = cpu_to_le16(cxl_sparing_ctx->column);
+ if (!rec || (validity_flags & CXL_DER_VALID_SUB_CHANNEL)) {
+ sparing_pi.flags |= CXL_SET_SPARING_SUB_CHNL_VALID(1);
+ sparing_pi.sub_channel = cxl_sparing_ctx->sub_channel;
+ }
+ fallthrough;
+ case EDAC_REPAIR_ROW_SPARING:
+ put_unaligned_le24(cxl_sparing_ctx->row, sparing_pi.row);
+ fallthrough;
+ case EDAC_REPAIR_BANK_SPARING:
+ sparing_pi.bank_group = cxl_sparing_ctx->bank_group;
+ sparing_pi.bank = cxl_sparing_ctx->bank;
+ fallthrough;
+ case EDAC_REPAIR_RANK_SPARING:
+ sparing_pi.rank = cxl_sparing_ctx->rank;
+ fallthrough;
+ default:
+ sparing_pi.channel = cxl_sparing_ctx->channel;
+ if ((rec && (validity_flags & CXL_DER_VALID_NIBBLE)) ||
+ (!rec && (!cxl_sparing_ctx->nibble_mask ||
+ (cxl_sparing_ctx->nibble_mask & 0xFFFFFF)))) {
+ sparing_pi.flags |= CXL_SET_SPARING_NIB_MASK_VALID(1);
+ put_unaligned_le24(cxl_sparing_ctx->nibble_mask,
+ sparing_pi.nibble_mask);
+ }
+ break;
+ }
+
+ return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox,
+ cxl_sparing_ctx->op_class,
+ cxl_sparing_ctx->op_subclass,
+ &sparing_pi, sizeof(sparing_pi));
+}
+
+static int cxl_mem_sparing_get_repair_type(struct device *dev, void *drv_data,
+ const char **repair_type)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ switch (ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ case EDAC_REPAIR_ROW_SPARING:
+ case EDAC_REPAIR_BANK_SPARING:
+ case EDAC_REPAIR_RANK_SPARING:
+ *repair_type = edac_repair_type[ctx->repair_type];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define CXL_SPARING_GET_ATTR(attrb, data_type) \
+ static int cxl_mem_sparing_get_##attrb( \
+ struct device *dev, void *drv_data, data_type *val) \
+ { \
+ struct cxl_mem_sparing_context *ctx = drv_data; \
+ \
+ *val = ctx->attrb; \
+ \
+ return 0; \
+ }
+CXL_SPARING_GET_ATTR(persist_mode, bool)
+CXL_SPARING_GET_ATTR(dpa, u64)
+CXL_SPARING_GET_ATTR(nibble_mask, u32)
+CXL_SPARING_GET_ATTR(bank_group, u32)
+CXL_SPARING_GET_ATTR(bank, u32)
+CXL_SPARING_GET_ATTR(rank, u32)
+CXL_SPARING_GET_ATTR(row, u32)
+CXL_SPARING_GET_ATTR(column, u32)
+CXL_SPARING_GET_ATTR(channel, u32)
+CXL_SPARING_GET_ATTR(sub_channel, u32)
+
+#define CXL_SPARING_SET_ATTR(attrb, data_type) \
+ static int cxl_mem_sparing_set_##attrb(struct device *dev, \
+ void *drv_data, data_type val) \
+ { \
+ struct cxl_mem_sparing_context *ctx = drv_data; \
+ \
+ ctx->attrb = val; \
+ \
+ return 0; \
+ }
+CXL_SPARING_SET_ATTR(nibble_mask, u32)
+CXL_SPARING_SET_ATTR(bank_group, u32)
+CXL_SPARING_SET_ATTR(bank, u32)
+CXL_SPARING_SET_ATTR(rank, u32)
+CXL_SPARING_SET_ATTR(row, u32)
+CXL_SPARING_SET_ATTR(column, u32)
+CXL_SPARING_SET_ATTR(channel, u32)
+CXL_SPARING_SET_ATTR(sub_channel, u32)
+
+static int cxl_mem_sparing_set_persist_mode(struct device *dev, void *drv_data,
+ bool persist_mode)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ if ((persist_mode && ctx->cap_hard_sparing) ||
+ (!persist_mode && ctx->cap_soft_sparing))
+ ctx->persist_mode = persist_mode;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int cxl_get_mem_sparing_safe_when_in_use(struct device *dev,
+ void *drv_data, bool *safe)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ *safe = ctx->cap_safe_when_in_use;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_get_min_dpa(struct device *dev, void *drv_data,
+ u64 *min_dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *min_dpa = cxlds->dpa_res.start;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_get_max_dpa(struct device *dev, void *drv_data,
+ u64 *max_dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *max_dpa = cxlds->dpa_res.end;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_set_dpa(struct device *dev, void *drv_data, u64 dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ return -EINVAL;
+
+ ctx->dpa = dpa;
+
+ return 0;
+}
+
+static int cxl_do_mem_sparing(struct device *dev, void *drv_data, u32 val)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ if (val != EDAC_DO_MEM_REPAIR)
+ return -EINVAL;
+
+ return cxl_mem_perform_sparing(dev, ctx);
+}
+
+#define RANK_OPS \
+ .get_repair_type = cxl_mem_sparing_get_repair_type, \
+ .get_persist_mode = cxl_mem_sparing_get_persist_mode, \
+ .set_persist_mode = cxl_mem_sparing_set_persist_mode, \
+ .get_repair_safe_when_in_use = cxl_get_mem_sparing_safe_when_in_use, \
+ .get_min_dpa = cxl_mem_sparing_get_min_dpa, \
+ .get_max_dpa = cxl_mem_sparing_get_max_dpa, \
+ .get_dpa = cxl_mem_sparing_get_dpa, \
+ .set_dpa = cxl_mem_sparing_set_dpa, \
+ .get_nibble_mask = cxl_mem_sparing_get_nibble_mask, \
+ .set_nibble_mask = cxl_mem_sparing_set_nibble_mask, \
+ .get_rank = cxl_mem_sparing_get_rank, \
+ .set_rank = cxl_mem_sparing_set_rank, \
+ .get_channel = cxl_mem_sparing_get_channel, \
+ .set_channel = cxl_mem_sparing_set_channel, \
+ .do_repair = cxl_do_mem_sparing
+
+#define BANK_OPS \
+ RANK_OPS, .get_bank_group = cxl_mem_sparing_get_bank_group, \
+ .set_bank_group = cxl_mem_sparing_set_bank_group, \
+ .get_bank = cxl_mem_sparing_get_bank, \
+ .set_bank = cxl_mem_sparing_set_bank
+
+#define ROW_OPS \
+ BANK_OPS, .get_row = cxl_mem_sparing_get_row, \
+ .set_row = cxl_mem_sparing_set_row
+
+#define CACHELINE_OPS \
+ ROW_OPS, .get_column = cxl_mem_sparing_get_column, \
+ .set_column = cxl_mem_sparing_set_column, \
+ .get_sub_channel = cxl_mem_sparing_get_sub_channel, \
+ .set_sub_channel = cxl_mem_sparing_set_sub_channel
+
+static const struct edac_mem_repair_ops cxl_rank_sparing_ops = {
+ RANK_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_bank_sparing_ops = {
+ BANK_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_row_sparing_ops = {
+ ROW_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_cacheline_sparing_ops = {
+ CACHELINE_OPS,
+};
+
+struct cxl_mem_sparing_desc {
+ const uuid_t repair_uuid;
+ enum edac_mem_repair_type repair_type;
+ const struct edac_mem_repair_ops *repair_ops;
+};
+
+static const struct cxl_mem_sparing_desc mem_sparing_desc[] = {
+ {
+ .repair_uuid = CXL_FEAT_CACHELINE_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_CACHELINE_SPARING,
+ .repair_ops = &cxl_cacheline_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_ROW_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_ROW_SPARING,
+ .repair_ops = &cxl_row_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_BANK_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_BANK_SPARING,
+ .repair_ops = &cxl_bank_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_RANK_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_RANK_SPARING,
+ .repair_ops = &cxl_rank_sparing_ops,
+ },
+};
+
+static int cxl_memdev_sparing_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ const struct cxl_mem_sparing_desc *desc,
+ u8 repair_inst)
+{
+ struct cxl_mem_sparing_context *cxl_sparing_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int ret;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &desc->repair_uuid);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_sparing_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sparing_ctx),
+ GFP_KERNEL);
+ if (!cxl_sparing_ctx)
+ return -ENOMEM;
+
+ *cxl_sparing_ctx = (struct cxl_mem_sparing_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .cxlmd = cxlmd,
+ .repair_type = desc->repair_type,
+ .instance = repair_inst++,
+ };
+ uuid_copy(&cxl_sparing_ctx->repair_uuid, &desc->repair_uuid);
+
+ ret = cxl_mem_sparing_get_attrbs(cxl_sparing_ctx);
+ if (ret)
+ return ret;
+
+ if ((cxl_sparing_ctx->cap_soft_sparing &&
+ cxl_sparing_ctx->cap_hard_sparing) ||
+ cxl_sparing_ctx->cap_soft_sparing)
+ cxl_sparing_ctx->persist_mode = 0;
+ else if (cxl_sparing_ctx->cap_hard_sparing)
+ cxl_sparing_ctx->persist_mode = 1;
+ else
+ return -EOPNOTSUPP;
+
+ ras_feature->ft_type = RAS_FEAT_MEM_REPAIR;
+ ras_feature->instance = cxl_sparing_ctx->instance;
+ ras_feature->mem_repair_ops = desc->repair_ops;
+ ras_feature->ctx = cxl_sparing_ctx;
+
+ return 0;
+}
+
+/*
+ * CXL memory soft PPR & hard PPR control
+ */
+struct cxl_ppr_context {
+ uuid_t repair_uuid;
+ u8 instance;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ u8 op_class;
+ u8 op_subclass;
+ bool cap_dpa;
+ bool cap_nib_mask;
+ bool media_accessible;
+ bool data_retained;
+ struct cxl_memdev *cxlmd;
+ enum edac_mem_repair_type repair_type;
+ bool persist_mode;
+ u64 dpa;
+ u32 nibble_mask;
+};
+
+/*
+ * See CXL rev 3.2 @8.2.10.7.2.1 Table 8-128 sPPR Feature Readable Attributes
+ *
+ * See CXL rev 3.2 @8.2.10.7.2.2 Table 8-131 hPPR Feature Readable Attributes
+ */
+
+#define CXL_PPR_OP_CAP_DEVICE_INITIATED BIT(0)
+#define CXL_PPR_OP_MODE_DEV_INITIATED BIT(0)
+
+#define CXL_PPR_FLAG_DPA_SUPPORT_MASK BIT(0)
+#define CXL_PPR_FLAG_NIB_SUPPORT_MASK BIT(1)
+#define CXL_PPR_FLAG_MEM_SPARING_EV_REC_SUPPORT_MASK BIT(2)
+#define CXL_PPR_FLAG_DEV_INITED_PPR_AT_BOOT_CAP_MASK BIT(3)
+
+#define CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK BIT(0)
+#define CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK BIT(2)
+
+#define CXL_PPR_SPARING_EV_REC_EN_MASK BIT(0)
+#define CXL_PPR_DEV_INITED_PPR_AT_BOOT_EN_MASK BIT(1)
+
+#define CXL_PPR_GET_CAP_DPA(flags) \
+ FIELD_GET(CXL_PPR_FLAG_DPA_SUPPORT_MASK, flags)
+#define CXL_PPR_GET_CAP_NIB_MASK(flags) \
+ FIELD_GET(CXL_PPR_FLAG_NIB_SUPPORT_MASK, flags)
+#define CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags) \
+ (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK, \
+ restriction_flags) ^ 1)
+#define CXL_PPR_GET_DATA_RETAINED(restriction_flags) \
+ (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK, \
+ restriction_flags) ^ 1)
+
+struct cxl_memdev_ppr_rd_attrbs {
+ struct cxl_memdev_repair_rd_attrbs_hdr hdr;
+ u8 ppr_flags;
+ __le16 restriction_flags;
+ u8 ppr_op_mode;
+} __packed;
+
+/*
+ * See CXL rev 3.2 @8.2.10.7.1.2 Table 8-118 sPPR Maintenance Input Payload
+ *
+ * See CXL rev 3.2 @8.2.10.7.1.3 Table 8-119 hPPR Maintenance Input Payload
+ */
+struct cxl_memdev_ppr_maintenance_attrbs {
+ u8 flags;
+ __le64 dpa;
+ u8 nibble_mask[3];
+} __packed;
+
+static int cxl_mem_ppr_get_attrbs(struct cxl_ppr_context *cxl_ppr_ctx)
+{
+ size_t rd_data_size = sizeof(struct cxl_memdev_ppr_rd_attrbs);
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ u16 restriction_flags;
+ size_t data_size;
+ u16 return_code;
+
+ struct cxl_memdev_ppr_rd_attrbs *rd_attrbs __free(kfree) =
+ kmalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &cxl_ppr_ctx->repair_uuid,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, &return_code);
+ if (!data_size)
+ return -EIO;
+
+ cxl_ppr_ctx->op_class = rd_attrbs->hdr.op_class;
+ cxl_ppr_ctx->op_subclass = rd_attrbs->hdr.op_subclass;
+ cxl_ppr_ctx->cap_dpa = CXL_PPR_GET_CAP_DPA(rd_attrbs->ppr_flags);
+ cxl_ppr_ctx->cap_nib_mask =
+ CXL_PPR_GET_CAP_NIB_MASK(rd_attrbs->ppr_flags);
+
+ restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags);
+ cxl_ppr_ctx->media_accessible =
+ CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags);
+ cxl_ppr_ctx->data_retained =
+ CXL_PPR_GET_DATA_RETAINED(restriction_flags);
+
+ return 0;
+}
+
+static int cxl_mem_perform_ppr(struct cxl_ppr_context *cxl_ppr_ctx)
+{
+ struct cxl_memdev_ppr_maintenance_attrbs maintenance_attrbs;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_mem_repair_attrbs attrbs = { 0 };
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_dpa_rwsem);
+ if (!dpa_lock)
+ return -EINTR;
+
+ if (!cxl_ppr_ctx->media_accessible || !cxl_ppr_ctx->data_retained) {
+ /* Memory to repair must be offline */
+ if (cxl_is_memdev_memory_online(cxlmd))
+ return -EBUSY;
+ } else {
+ if (cxl_is_memdev_memory_online(cxlmd)) {
+ /* Check memory to repair is from the current boot */
+ attrbs.repair_type = CXL_PPR;
+ attrbs.dpa = cxl_ppr_ctx->dpa;
+ attrbs.nibble_mask = cxl_ppr_ctx->nibble_mask;
+ if (!cxl_find_rec_dram(cxlmd, &attrbs) &&
+ !cxl_find_rec_gen_media(cxlmd, &attrbs))
+ return -EINVAL;
+ }
+ }
+
+ memset(&maintenance_attrbs, 0, sizeof(maintenance_attrbs));
+ maintenance_attrbs.flags = 0;
+ maintenance_attrbs.dpa = cpu_to_le64(cxl_ppr_ctx->dpa);
+ put_unaligned_le24(cxl_ppr_ctx->nibble_mask,
+ maintenance_attrbs.nibble_mask);
+
+ return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox,
+ cxl_ppr_ctx->op_class,
+ cxl_ppr_ctx->op_subclass,
+ &maintenance_attrbs,
+ sizeof(maintenance_attrbs));
+}
+
+static int cxl_ppr_get_repair_type(struct device *dev, void *drv_data,
+ const char **repair_type)
+{
+ *repair_type = edac_repair_type[EDAC_REPAIR_PPR];
+
+ return 0;
+}
+
+static int cxl_ppr_get_persist_mode(struct device *dev, void *drv_data,
+ bool *persist_mode)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *persist_mode = cxl_ppr_ctx->persist_mode;
+
+ return 0;
+}
+
+static int cxl_get_ppr_safe_when_in_use(struct device *dev, void *drv_data,
+ bool *safe)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *safe = cxl_ppr_ctx->media_accessible & cxl_ppr_ctx->data_retained;
+
+ return 0;
+}
+
+static int cxl_ppr_get_min_dpa(struct device *dev, void *drv_data, u64 *min_dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *min_dpa = cxlds->dpa_res.start;
+
+ return 0;
+}
+
+static int cxl_ppr_get_max_dpa(struct device *dev, void *drv_data, u64 *max_dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *max_dpa = cxlds->dpa_res.end;
+
+ return 0;
+}
+
+static int cxl_ppr_get_dpa(struct device *dev, void *drv_data, u64 *dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *dpa = cxl_ppr_ctx->dpa;
+
+ return 0;
+}
+
+static int cxl_ppr_set_dpa(struct device *dev, void *drv_data, u64 dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ return -EINVAL;
+
+ cxl_ppr_ctx->dpa = dpa;
+
+ return 0;
+}
+
+static int cxl_ppr_get_nibble_mask(struct device *dev, void *drv_data,
+ u32 *nibble_mask)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *nibble_mask = cxl_ppr_ctx->nibble_mask;
+
+ return 0;
+}
+
+static int cxl_ppr_set_nibble_mask(struct device *dev, void *drv_data,
+ u32 nibble_mask)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ cxl_ppr_ctx->nibble_mask = nibble_mask;
+
+ return 0;
+}
+
+static int cxl_do_ppr(struct device *dev, void *drv_data, u32 val)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ if (!cxl_ppr_ctx->dpa || val != EDAC_DO_MEM_REPAIR)
+ return -EINVAL;
+
+ return cxl_mem_perform_ppr(cxl_ppr_ctx);
+}
+
+static const struct edac_mem_repair_ops cxl_sppr_ops = {
+ .get_repair_type = cxl_ppr_get_repair_type,
+ .get_persist_mode = cxl_ppr_get_persist_mode,
+ .get_repair_safe_when_in_use = cxl_get_ppr_safe_when_in_use,
+ .get_min_dpa = cxl_ppr_get_min_dpa,
+ .get_max_dpa = cxl_ppr_get_max_dpa,
+ .get_dpa = cxl_ppr_get_dpa,
+ .set_dpa = cxl_ppr_set_dpa,
+ .get_nibble_mask = cxl_ppr_get_nibble_mask,
+ .set_nibble_mask = cxl_ppr_set_nibble_mask,
+ .do_repair = cxl_do_ppr,
+};
+
+static int cxl_memdev_soft_ppr_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ u8 repair_inst)
+{
+ struct cxl_ppr_context *cxl_sppr_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int ret;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_SPPR_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_sppr_ctx =
+ devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sppr_ctx), GFP_KERNEL);
+ if (!cxl_sppr_ctx)
+ return -ENOMEM;
+
+ *cxl_sppr_ctx = (struct cxl_ppr_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .cxlmd = cxlmd,
+ .repair_type = EDAC_REPAIR_PPR,
+ .persist_mode = 0,
+ .instance = repair_inst,
+ };
+ uuid_copy(&cxl_sppr_ctx->repair_uuid, &CXL_FEAT_SPPR_UUID);
+
+ ret = cxl_mem_ppr_get_attrbs(cxl_sppr_ctx);
+ if (ret)
+ return ret;
+
+ ras_feature->ft_type = RAS_FEAT_MEM_REPAIR;
+ ras_feature->instance = cxl_sppr_ctx->instance;
+ ras_feature->mem_repair_ops = &cxl_sppr_ops;
+ ras_feature->ctx = cxl_sppr_ctx;
+
+ return 0;
+}
+
+int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd)
+{
+ struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES];
+ int num_ras_features = 0;
+ u8 repair_inst = 0;
+ int rc;
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_SCRUB)) {
+ rc = cxl_memdev_scrub_init(cxlmd, &ras_features[num_ras_features], 0);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP)
+ num_ras_features++;
+ }
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_ECS)) {
+ rc = cxl_memdev_ecs_init(cxlmd, &ras_features[num_ras_features]);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP)
+ num_ras_features++;
+ }
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR)) {
+ for (int i = 0; i < CXL_MEM_SPARING_MAX; i++) {
+ rc = cxl_memdev_sparing_init(cxlmd,
+ &ras_features[num_ras_features],
+ &mem_sparing_desc[i], repair_inst);
+ if (rc == -EOPNOTSUPP)
+ continue;
+ if (rc < 0)
+ return rc;
+
+ repair_inst++;
+ num_ras_features++;
+ }
+
+ rc = cxl_memdev_soft_ppr_init(cxlmd, &ras_features[num_ras_features],
+ repair_inst);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP) {
+ repair_inst++;
+ num_ras_features++;
+ }
+
+ if (repair_inst) {
+ struct cxl_mem_err_rec *array_rec =
+ devm_kzalloc(&cxlmd->dev, sizeof(*array_rec),
+ GFP_KERNEL);
+ if (!array_rec)
+ return -ENOMEM;
+
+ xa_init(&array_rec->rec_gen_media);
+ xa_init(&array_rec->rec_dram);
+ cxlmd->err_rec_array = array_rec;
+ }
+ }
+
+ if (!num_ras_features)
+ return -EINVAL;
+
+ char *cxl_dev_name __free(kfree) =
+ kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlmd->dev));
+ if (!cxl_dev_name)
+ return -ENOMEM;
+
+ return edac_dev_register(&cxlmd->dev, cxl_dev_name, NULL,
+ num_ras_features, ras_features);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_register, "CXL");
+
+int devm_cxl_region_edac_register(struct cxl_region *cxlr)
+{
+ struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES];
+ int num_ras_features = 0;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_SCRUB))
+ return 0;
+
+ rc = cxl_region_scrub_init(cxlr, &ras_features[num_ras_features], 0);
+ if (rc < 0)
+ return rc;
+
+ num_ras_features++;
+
+ char *cxl_dev_name __free(kfree) =
+ kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlr->dev));
+ if (!cxl_dev_name)
+ return -ENOMEM;
+
+ return edac_dev_register(&cxlr->dev, cxl_dev_name, NULL,
+ num_ras_features, ras_features);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_region_edac_register, "CXL");
+
+void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec_gen_media;
+ struct cxl_event_dram *rec_dram;
+ unsigned long index;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return;
+
+ xa_for_each(&array_rec->rec_dram, index, rec_dram)
+ kfree(rec_dram);
+ xa_destroy(&array_rec->rec_dram);
+
+ xa_for_each(&array_rec->rec_gen_media, index, rec_gen_media)
+ kfree(rec_gen_media);
+ xa_destroy(&array_rec->rec_gen_media);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_release, "CXL");
diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c
index f4daefe3180e..6f2eae1eb126 100644
--- a/drivers/cxl/core/features.c
+++ b/drivers/cxl/core/features.c
@@ -9,6 +9,16 @@
#include "core.h"
#include "cxlmem.h"
+/**
+ * DOC: cxl features
+ *
+ * CXL Features:
+ * A CXL device that includes a mailbox supports commands that allows
+ * listing, getting, and setting of optionally defined features such
+ * as memory sparing or post package sparing. Vendors may define custom
+ * features for the device.
+ */
+
/* All the features below are exclusive to the kernel */
static const uuid_t cxl_exclusive_feats[] = {
CXL_FEAT_PATROL_SCRUB_UUID,
@@ -36,7 +46,7 @@ static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
return is_cxl_feature_exclusive_by_uuid(&entry->uuid);
}
-inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
+struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
{
return cxlds->cxlfs;
}
@@ -355,17 +365,11 @@ static void cxlctl_close_uctx(struct fwctl_uctx *uctx)
{
}
-static struct cxl_feat_entry *
-get_support_feature_info(struct cxl_features_state *cxlfs,
- const struct fwctl_rpc_cxl *rpc_in)
+struct cxl_feat_entry *
+cxl_feature_info(struct cxl_features_state *cxlfs,
+ const uuid_t *uuid)
{
struct cxl_feat_entry *feat;
- const uuid_t *uuid;
-
- if (rpc_in->op_size < sizeof(uuid))
- return ERR_PTR(-EINVAL);
-
- uuid = &rpc_in->set_feat_in.uuid;
for (int i = 0; i < cxlfs->entries->num_features; i++) {
feat = &cxlfs->entries->ent[i];
@@ -416,14 +420,6 @@ static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs,
rpc_out->size = struct_size(feat_out, ents, requested);
feat_out = &rpc_out->get_sup_feats_out;
- if (requested == 0) {
- feat_out->num_entries = cpu_to_le16(requested);
- feat_out->supported_feats =
- cpu_to_le16(cxlfs->entries->num_features);
- rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
- *out_len = out_size;
- return no_free_ptr(rpc_out);
- }
for (i = start, pos = &feat_out->ents[0];
i < cxlfs->entries->num_features; i++, pos++) {
@@ -528,13 +524,13 @@ static void *cxlctl_set_feature(struct cxl_features_state *cxlfs,
rc = cxl_set_feature(cxl_mbox, &feat_in->uuid,
feat_in->version, feat_in->feat_data,
data_size, flags, offset, &return_code);
+ *out_len = sizeof(*rpc_out);
if (rc) {
rpc_out->retval = return_code;
return no_free_ptr(rpc_out);
}
rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
- *out_len = sizeof(*rpc_out);
return no_free_ptr(rpc_out);
}
@@ -547,7 +543,10 @@ static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
struct cxl_feat_entry *feat;
u32 flags;
- feat = get_support_feature_info(cxlfs, rpc_in);
+ if (rpc_in->op_size < sizeof(uuid_t))
+ return ERR_PTR(-EINVAL);
+
+ feat = cxl_feature_info(cxlfs, &rpc_in->set_feat_in.uuid);
if (IS_ERR(feat))
return false;
@@ -614,11 +613,7 @@ static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs,
switch (opcode) {
case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
case CXL_MBOX_OP_GET_FEATURE:
- if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
- return false;
- if (scope >= FWCTL_RPC_CONFIGURATION)
- return true;
- return false;
+ return cxl_mbox->feat_cap >= CXL_FEATURES_RO;
case CXL_MBOX_OP_SET_FEATURE:
if (cxl_mbox->feat_cap < CXL_FEATURES_RW)
return false;
@@ -677,7 +672,7 @@ static void free_memdev_fwctl(void *_fwctl_dev)
fwctl_put(fwctl_dev);
}
-int devm_cxl_setup_fwctl(struct cxl_memdev *cxlmd)
+int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_features_state *cxlfs;
@@ -700,7 +695,7 @@ int devm_cxl_setup_fwctl(struct cxl_memdev *cxlmd)
if (rc)
return rc;
- return devm_add_action_or_reset(&cxlmd->dev, free_memdev_fwctl,
+ return devm_add_action_or_reset(host, free_memdev_fwctl,
no_free_ptr(fwctl_dev));
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fwctl, "CXL");
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 70cae4ebf8a4..ab1007495f6b 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -34,7 +34,8 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
if (rc)
return rc;
- dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
+ dev_dbg(port->uport_dev, "%s added to %s\n",
+ dev_name(&cxld->dev), dev_name(&port->dev));
return 0;
}
@@ -603,7 +604,7 @@ int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
return 0;
}
-static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
@@ -666,15 +667,15 @@ static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long lon
skip = res->start - skip_start;
if (size > avail) {
- dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
- res->name, &avail);
+ dev_dbg(dev, "%llu exceeds available %s capacity: %llu\n", size,
+ res->name, (u64)avail);
return -ENOSPC;
}
return __cxl_dpa_reserve(cxled, start, size, skip);
}
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
{
struct cxl_port *port = cxled_to_port(cxled);
int rc;
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index d72764056ce6..2689e6453c5a 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -922,12 +922,19 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
hpa_alias = hpa - cache_size;
}
- if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
+ if (event_type == CXL_CPER_EVENT_GEN_MEDIA) {
+ if (cxl_store_rec_gen_media((struct cxl_memdev *)cxlmd, evt))
+ dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n");
+
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
hpa_alias, &evt->gen_media);
- else if (event_type == CXL_CPER_EVENT_DRAM)
+ } else if (event_type == CXL_CPER_EVENT_DRAM) {
+ if (cxl_store_rec_dram((struct cxl_memdev *)cxlmd, evt))
+ dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n");
+
trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
&evt->dram);
+ }
}
}
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index a16a5886d40a..f88a13adf7fa 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -27,6 +27,7 @@ static void cxl_memdev_release(struct device *dev)
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
ida_free(&cxl_memdev_ida, cxlmd->id);
+ devm_cxl_memdev_edac_release(cxlmd);
kfree(cxlmd);
}
@@ -153,8 +154,8 @@ static ssize_t security_state_show(struct device *dev,
return sysfs_emit(buf, "frozen\n");
if (state & CXL_PMEM_SEC_STATE_LOCKED)
return sysfs_emit(buf, "locked\n");
- else
- return sysfs_emit(buf, "unlocked\n");
+
+ return sysfs_emit(buf, "unlocked\n");
}
static struct device_attribute dev_attr_security_state =
__ATTR(state, 0444, security_state_show, NULL);
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 96fecb799cbc..b50551601c2e 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -415,17 +415,20 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
*/
if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled))
return devm_cxl_enable_mem(&port->dev, cxlds);
- else if (!hdm)
- return -ENODEV;
- root = to_cxl_port(port->dev.parent);
- while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
- root = to_cxl_port(root->dev.parent);
- if (!is_cxl_root(root)) {
- dev_err(dev, "Failed to acquire root port for HDM enable\n");
+ /*
+ * If the HDM Decoder Capability does not exist and DVSEC was
+ * not setup, the DVSEC based emulation cannot be used.
+ */
+ if (!hdm)
return -ENODEV;
- }
+ /* The HDM Decoder Capability exists but is globally disabled. */
+
+ /*
+ * If the DVSEC CXL Range registers are not enabled, just
+ * enable and use the HDM Decoder Capability registers.
+ */
if (!info->mem_enabled) {
rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
if (rc)
@@ -434,6 +437,26 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
return devm_cxl_enable_mem(&port->dev, cxlds);
}
+ /*
+ * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
+ * [High,Low] when HDM operation is enabled the range register values
+ * are ignored by the device, but the spec also recommends matching the
+ * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
+ * are expected even though Linux does not require or maintain that
+ * match. Check if at least one DVSEC range is enabled and allowed by
+ * the platform. That is, the DVSEC range must be covered by a locked
+ * platform window (CFMWS). Fail otherwise as the endpoint's decoders
+ * cannot be used.
+ */
+
+ root = to_cxl_port(port->dev.parent);
+ while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
+ root = to_cxl_port(root->dev.parent);
+ if (!is_cxl_root(root)) {
+ dev_err(dev, "Failed to acquire root port for HDM enable\n");
+ return -ENODEV;
+ }
+
for (i = 0, allowed = 0; i < info->ranges; i++) {
struct device *cxld_dev;
@@ -453,15 +476,6 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
return -ENXIO;
}
- /*
- * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
- * [High,Low] when HDM operation is enabled the range register values
- * are ignored by the device, but the spec also recommends matching the
- * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
- * are expected even though Linux does not require or maintain that
- * match. If at least one DVSEC range is enabled and allowed, skip HDM
- * Decoder Capability Enable.
- */
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, "CXL");
@@ -1072,14 +1086,20 @@ int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
#define GPF_TIMEOUT_BASE_MAX 2
#define GPF_TIMEOUT_SCALE_MAX 7 /* 10 seconds */
-u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port)
+u16 cxl_gpf_get_dvsec(struct device *dev)
{
+ struct pci_dev *pdev;
+ bool is_port = true;
u16 dvsec;
if (!dev_is_pci(dev))
return 0;
- dvsec = pci_find_dvsec_capability(to_pci_dev(dev), PCI_VENDOR_ID_CXL,
+ pdev = to_pci_dev(dev);
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT)
+ is_port = false;
+
+ dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL,
is_port ? CXL_DVSEC_PORT_GPF : CXL_DVSEC_DEVICE_GPF);
if (!dvsec)
dev_warn(dev, "%s GPF DVSEC not present\n",
@@ -1128,26 +1148,24 @@ static int update_gpf_port_dvsec(struct pci_dev *pdev, int dvsec, int phase)
return rc;
}
-int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port)
+int cxl_gpf_port_setup(struct cxl_dport *dport)
{
- struct pci_dev *pdev;
-
- if (!port)
+ if (!dport)
return -EINVAL;
- if (!port->gpf_dvsec) {
+ if (!dport->gpf_dvsec) {
+ struct pci_dev *pdev;
int dvsec;
- dvsec = cxl_gpf_get_dvsec(dport_dev, true);
+ dvsec = cxl_gpf_get_dvsec(dport->dport_dev);
if (!dvsec)
return -EINVAL;
- port->gpf_dvsec = dvsec;
+ dport->gpf_dvsec = dvsec;
+ pdev = to_pci_dev(dport->dport_dev);
+ update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 1);
+ update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 2);
}
- pdev = to_pci_dev(dport_dev);
- update_gpf_port_dvsec(pdev, port->gpf_dvsec, 1);
- update_gpf_port_dvsec(pdev, port->gpf_dvsec, 2);
-
return 0;
}
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 0fd6646c1a2e..eb46c6764d20 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -602,17 +602,19 @@ struct cxl_port *to_cxl_port(const struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(to_cxl_port, "CXL");
+struct cxl_port *parent_port_of(struct cxl_port *port)
+{
+ if (!port || !port->parent_dport)
+ return NULL;
+ return port->parent_dport->port;
+}
+
static void unregister_port(void *_port)
{
struct cxl_port *port = _port;
- struct cxl_port *parent;
+ struct cxl_port *parent = parent_port_of(port);
struct device *lock_dev;
- if (is_cxl_root(port))
- parent = NULL;
- else
- parent = to_cxl_port(port->dev.parent);
-
/*
* CXL root port's and the first level of ports are unregistered
* under the platform firmware device lock, all other ports are
@@ -1035,15 +1037,6 @@ struct cxl_root *find_cxl_root(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(find_cxl_root, "CXL");
-void put_cxl_root(struct cxl_root *cxl_root)
-{
- if (!cxl_root)
- return;
-
- put_device(&cxl_root->port.dev);
-}
-EXPORT_SYMBOL_NS_GPL(put_cxl_root, "CXL");
-
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
struct cxl_dport *dport;
@@ -1678,7 +1671,7 @@ retry:
if (rc && rc != -EBUSY)
return rc;
- cxl_gpf_port_setup(dport_dev, port);
+ cxl_gpf_port_setup(dport);
/* Any more ports to add between this one and the root? */
if (!dev_is_cxl_root_child(&port->dev))
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index c3f4dc244df7..6e5e1460068d 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -231,11 +231,10 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
&cxlr->dev,
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
return 0;
- } else {
- dev_WARN(&cxlr->dev,
- "Failed to synchronize CPU cache state\n");
- return -ENXIO;
}
+ dev_WARN(&cxlr->dev,
+ "Failed to synchronize CPU cache state\n");
+ return -ENXIO;
}
cpu_cache_invalidate_memregion(IORES_DESC_CXL);
@@ -865,10 +864,23 @@ static int match_auto_decoder(struct device *dev, const void *data)
return 0;
}
+/**
+ * cxl_port_pick_region_decoder() - assign or lookup a decoder for a region
+ * @port: a port in the ancestry of the endpoint implied by @cxled
+ * @cxled: endpoint decoder to be, or currently, mapped by @port
+ * @cxlr: region to establish, or validate, decode @port
+ *
+ * In the region creation path cxl_port_pick_region_decoder() is an
+ * allocator to find a free port. In the region assembly path, it is
+ * recalling the decoder that platform firmware picked for validation
+ * purposes.
+ *
+ * The result is recorded in a 'struct cxl_region_ref' in @port.
+ */
static struct cxl_decoder *
-cxl_region_find_decoder(struct cxl_port *port,
- struct cxl_endpoint_decoder *cxled,
- struct cxl_region *cxlr)
+cxl_port_pick_region_decoder(struct cxl_port *port,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region *cxlr)
{
struct device *dev;
@@ -916,7 +928,8 @@ static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
static struct cxl_region_ref *
alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled)
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_decoder *cxld)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_region_ref *cxl_rr, *iter;
@@ -930,9 +943,6 @@ alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
continue;
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
- struct cxl_decoder *cxld;
-
- cxld = cxl_region_find_decoder(port, cxled, cxlr);
if (auto_order_ok(port, iter->region, cxld))
continue;
}
@@ -1014,19 +1024,11 @@ static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
return 0;
}
-static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled,
- struct cxl_region_ref *cxl_rr)
+static int cxl_rr_assign_decoder(struct cxl_port *port, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region_ref *cxl_rr,
+ struct cxl_decoder *cxld)
{
- struct cxl_decoder *cxld;
-
- cxld = cxl_region_find_decoder(port, cxled, cxlr);
- if (!cxld) {
- dev_dbg(&cxlr->dev, "%s: no decoder available\n",
- dev_name(&port->dev));
- return -EBUSY;
- }
-
if (cxld->region) {
dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
dev_name(&port->dev), dev_name(&cxld->dev),
@@ -1117,7 +1119,16 @@ static int cxl_port_attach_region(struct cxl_port *port,
nr_targets_inc = true;
}
} else {
- cxl_rr = alloc_region_ref(port, cxlr, cxled);
+ struct cxl_decoder *cxld;
+
+ cxld = cxl_port_pick_region_decoder(port, cxled, cxlr);
+ if (!cxld) {
+ dev_dbg(&cxlr->dev, "%s: no decoder available\n",
+ dev_name(&port->dev));
+ return -EBUSY;
+ }
+
+ cxl_rr = alloc_region_ref(port, cxlr, cxled, cxld);
if (IS_ERR(cxl_rr)) {
dev_dbg(&cxlr->dev,
"%s: failed to allocate region reference\n",
@@ -1126,7 +1137,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
}
nr_targets_inc = true;
- rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr);
+ rc = cxl_rr_assign_decoder(port, cxlr, cxled, cxl_rr, cxld);
if (rc)
goto out_erase;
}
@@ -1446,7 +1457,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
if (cxld->interleave_ways != iw ||
- cxld->interleave_granularity != ig ||
+ (iw > 1 && cxld->interleave_granularity != ig) ||
!region_res_match_cxl_range(p, &cxld->hpa_range) ||
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
dev_err(&cxlr->dev,
@@ -1748,13 +1759,6 @@ static int cmp_interleave_pos(const void *a, const void *b)
return cxled_a->pos - cxled_b->pos;
}
-static struct cxl_port *next_port(struct cxl_port *port)
-{
- if (!port->parent_dport)
- return NULL;
- return port->parent_dport->port;
-}
-
static int match_switch_decoder_by_range(struct device *dev,
const void *data)
{
@@ -1781,7 +1785,7 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
struct device *dev;
int rc = -ENXIO;
- parent = next_port(port);
+ parent = parent_port_of(port);
if (!parent)
return rc;
@@ -1805,6 +1809,13 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
}
put_device(dev);
+ if (rc)
+ dev_err(port->uport_dev,
+ "failed to find %s:%s in target list of %s\n",
+ dev_name(&port->dev),
+ dev_name(port->parent_dport->dport_dev),
+ dev_name(&cxlsd->cxld.dev));
+
return rc;
}
@@ -1861,7 +1872,7 @@ static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
*/
/* Iterate from endpoint to root_port refining the position */
- for (iter = port; iter; iter = next_port(iter)) {
+ for (iter = port; iter; iter = parent_port_of(iter)) {
if (is_cxl_root(iter))
break;
@@ -1940,7 +1951,9 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "region already active\n");
return -EBUSY;
- } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ }
+
+ if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "interleave config missing\n");
return -ENXIO;
}
@@ -2160,6 +2173,12 @@ static int attach_target(struct cxl_region *cxlr,
rc = cxl_region_attach(cxlr, cxled, pos);
up_read(&cxl_dpa_rwsem);
up_write(&cxl_region_rwsem);
+
+ if (rc)
+ dev_warn(cxled->cxld.dev.parent,
+ "failed to attach %s to %s: %d\n",
+ dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc);
+
return rc;
}
@@ -3196,20 +3215,49 @@ err:
return rc;
}
-static int match_root_decoder_by_range(struct device *dev,
- const void *data)
+static int match_decoder_by_range(struct device *dev, const void *data)
{
const struct range *r1, *r2 = data;
- struct cxl_root_decoder *cxlrd;
+ struct cxl_decoder *cxld;
- if (!is_root_decoder(dev))
+ if (!is_switch_decoder(dev))
return 0;
- cxlrd = to_cxl_root_decoder(dev);
- r1 = &cxlrd->cxlsd.cxld.hpa_range;
+ cxld = to_cxl_decoder(dev);
+ r1 = &cxld->hpa_range;
return range_contains(r1, r2);
}
+static struct cxl_decoder *
+cxl_port_find_switch_decoder(struct cxl_port *port, struct range *hpa)
+{
+ struct device *cxld_dev = device_find_child(&port->dev, hpa,
+ match_decoder_by_range);
+
+ return cxld_dev ? to_cxl_decoder(cxld_dev) : NULL;
+}
+
+static struct cxl_root_decoder *
+cxl_find_root_decoder(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
+ struct cxl_decoder *root, *cxld = &cxled->cxld;
+ struct range *hpa = &cxld->hpa_range;
+
+ root = cxl_port_find_switch_decoder(&cxl_root->port, hpa);
+ if (!root) {
+ dev_err(cxlmd->dev.parent,
+ "%s:%s no CXL window for range %#llx:%#llx\n",
+ dev_name(&cxlmd->dev), dev_name(&cxld->dev),
+ cxld->hpa_range.start, cxld->hpa_range.end);
+ return NULL;
+ }
+
+ return to_cxl_root_decoder(&root->dev);
+}
+
static int match_region_by_range(struct device *dev, const void *data)
{
struct cxl_region_params *p;
@@ -3376,47 +3424,45 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
return cxlr;
}
-int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
+static struct cxl_region *
+cxl_find_region_by_range(struct cxl_root_decoder *cxlrd, struct range *hpa)
+{
+ struct device *region_dev;
+
+ region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
+ match_region_by_range);
+ if (!region_dev)
+ return NULL;
+
+ return to_cxl_region(region_dev);
+}
+
+int cxl_add_to_region(struct cxl_endpoint_decoder *cxled)
{
- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct range *hpa = &cxled->cxld.hpa_range;
- struct cxl_decoder *cxld = &cxled->cxld;
- struct device *cxlrd_dev, *region_dev;
- struct cxl_root_decoder *cxlrd;
struct cxl_region_params *p;
- struct cxl_region *cxlr;
bool attach = false;
int rc;
- cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
- match_root_decoder_by_range);
- if (!cxlrd_dev) {
- dev_err(cxlmd->dev.parent,
- "%s:%s no CXL window for range %#llx:%#llx\n",
- dev_name(&cxlmd->dev), dev_name(&cxld->dev),
- cxld->hpa_range.start, cxld->hpa_range.end);
+ struct cxl_root_decoder *cxlrd __free(put_cxl_root_decoder) =
+ cxl_find_root_decoder(cxled);
+ if (!cxlrd)
return -ENXIO;
- }
-
- cxlrd = to_cxl_root_decoder(cxlrd_dev);
/*
* Ensure that if multiple threads race to construct_region() for @hpa
* one does the construction and the others add to that.
*/
mutex_lock(&cxlrd->range_lock);
- region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
- match_region_by_range);
- if (!region_dev) {
+ struct cxl_region *cxlr __free(put_cxl_region) =
+ cxl_find_region_by_range(cxlrd, hpa);
+ if (!cxlr)
cxlr = construct_region(cxlrd, cxled);
- region_dev = &cxlr->dev;
- } else
- cxlr = to_cxl_region(region_dev);
mutex_unlock(&cxlrd->range_lock);
rc = PTR_ERR_OR_ZERO(cxlr);
if (rc)
- goto out;
+ return rc;
attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
@@ -3436,9 +3482,6 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
p->res);
}
- put_device(region_dev);
-out:
- put_device(cxlrd_dev);
return rc;
}
EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, "CXL");
@@ -3537,8 +3580,18 @@ out:
switch (cxlr->mode) {
case CXL_PARTMODE_PMEM:
+ rc = devm_cxl_region_edac_register(cxlr);
+ if (rc)
+ dev_dbg(&cxlr->dev, "CXL EDAC registration for region_id=%d failed\n",
+ cxlr->id);
+
return devm_cxl_add_pmem_region(cxlr);
case CXL_PARTMODE_RAM:
+ rc = devm_cxl_region_edac_register(cxlr);
+ if (rc)
+ dev_dbg(&cxlr->dev, "CXL EDAC registration for region_id=%d failed\n",
+ cxlr->id);
+
/*
* The region can not be manged by CXL if any portion of
* it is already online as 'System RAM'
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 117c2e94c761..5ca7b0eed568 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -581,7 +581,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
resource_size_t rcrb = ri->base;
void __iomem *addr;
u32 bar0, bar1;
- u16 cmd;
u32 id;
if (which == CXL_RCRB_UPSTREAM)
@@ -603,7 +602,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
}
id = readl(addr + PCI_VENDOR_ID);
- cmd = readw(addr + PCI_COMMAND);
bar0 = readl(addr + PCI_BASE_ADDRESS_0);
bar1 = readl(addr + PCI_BASE_ADDRESS_1);
iounmap(addr);
@@ -618,8 +616,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
dev_err(dev, "Failed to access Downstream Port RCRB\n");
return CXL_RESOURCE_NONE;
}
- if (!(cmd & PCI_COMMAND_MEMORY))
- return CXL_RESOURCE_NONE;
/* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */
if (bar0 & (PCI_BASE_ADDRESS_MEM_TYPE_1M | PCI_BASE_ADDRESS_SPACE_IO))
return CXL_RESOURCE_NONE;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index be8a7dc77719..3f1695c96abc 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -592,7 +592,6 @@ struct cxl_dax_region {
* @cdat: Cached CDAT data
* @cdat_available: Should a CDAT attribute be available in sysfs
* @pci_latency: Upstream latency in picoseconds
- * @gpf_dvsec: Cached GPF port DVSEC
*/
struct cxl_port {
struct device dev;
@@ -616,7 +615,6 @@ struct cxl_port {
} cdat;
bool cdat_available;
long pci_latency;
- int gpf_dvsec;
};
/**
@@ -664,6 +662,7 @@ struct cxl_rcrb_info {
* @regs: Dport parsed register blocks
* @coord: access coordinates (bandwidth and latency performance attributes)
* @link_latency: calculated PCIe downstream latency
+ * @gpf_dvsec: Cached GPF port DVSEC
*/
struct cxl_dport {
struct device *dport_dev;
@@ -675,6 +674,7 @@ struct cxl_dport {
struct cxl_regs regs;
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
long link_latency;
+ int gpf_dvsec;
};
/**
@@ -724,6 +724,7 @@ static inline bool is_cxl_root(struct cxl_port *port)
int cxl_num_decoders_committed(struct cxl_port *port);
bool is_cxl_port(const struct device *dev);
struct cxl_port *to_cxl_port(const struct device *dev);
+struct cxl_port *parent_port_of(struct cxl_port *port);
void cxl_port_commit_reap(struct cxl_decoder *cxld);
struct pci_bus;
int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
@@ -736,10 +737,12 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
struct cxl_root *devm_cxl_add_root(struct device *host,
const struct cxl_root_ops *ops);
struct cxl_root *find_cxl_root(struct cxl_port *port);
-void put_cxl_root(struct cxl_root *cxl_root);
-DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T))
+DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_device(&_T->port.dev))
DEFINE_FREE(put_cxl_port, struct cxl_port *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
+DEFINE_FREE(put_cxl_root_decoder, struct cxl_root_decoder *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
+DEFINE_FREE(put_cxl_region, struct cxl_region *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
+
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
@@ -856,8 +859,7 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port);
#ifdef CONFIG_CXL_REGION
bool is_cxl_pmem_region(struct device *dev);
struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
-int cxl_add_to_region(struct cxl_port *root,
- struct cxl_endpoint_decoder *cxled);
+int cxl_add_to_region(struct cxl_endpoint_decoder *cxled);
struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa);
#else
@@ -869,8 +871,7 @@ static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
{
return NULL;
}
-static inline int cxl_add_to_region(struct cxl_port *root,
- struct cxl_endpoint_decoder *cxled)
+static inline int cxl_add_to_region(struct cxl_endpoint_decoder *cxled)
{
return 0;
}
@@ -910,6 +911,16 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
#define __mock static
#endif
-u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port);
+u16 cxl_gpf_get_dvsec(struct device *dev);
+
+static inline struct rw_semaphore *rwsem_read_intr_acquire(struct rw_semaphore *rwsem)
+{
+ if (down_read_interruptible(rwsem))
+ return NULL;
+
+ return rwsem;
+}
+
+DEFINE_FREE(rwsem_read_release, struct rw_semaphore *, if (_T) up_read(_T))
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 3ec6b906371b..551b0ba2caa1 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -45,6 +45,11 @@
* @endpoint: connection to the CXL port topology for this memory device
* @id: id number of this memdev instance.
* @depth: endpoint port depth
+ * @scrub_cycle: current scrub cycle set for this device
+ * @scrub_region_id: id number of a backed region (if any) for which current scrub cycle set
+ * @err_rec_array: List of xarrarys to store the memdev error records to
+ * check attributes for a memory repair operation are from
+ * current boot.
*/
struct cxl_memdev {
struct device dev;
@@ -56,6 +61,9 @@ struct cxl_memdev {
struct cxl_port *endpoint;
int id;
int depth;
+ u8 scrub_cycle;
+ int scrub_region_id;
+ void *err_rec_array;
};
static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
@@ -527,6 +535,7 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_SUPPORTED_FEATURES = 0x0500,
CXL_MBOX_OP_GET_FEATURE = 0x0501,
CXL_MBOX_OP_SET_FEATURE = 0x0502,
+ CXL_MBOX_OP_DO_MAINTENANCE = 0x0600,
CXL_MBOX_OP_IDENTIFY = 0x4000,
CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
@@ -853,6 +862,27 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa);
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa);
+#ifdef CONFIG_CXL_EDAC_MEM_FEATURES
+int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd);
+int devm_cxl_region_edac_register(struct cxl_region *cxlr);
+int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt);
+int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt);
+void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd);
+#else
+static inline int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd)
+{ return 0; }
+static inline int devm_cxl_region_edac_register(struct cxl_region *cxlr)
+{ return 0; }
+static inline int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd,
+ union cxl_event *evt)
+{ return 0; }
+static inline int cxl_store_rec_dram(struct cxl_memdev *cxlmd,
+ union cxl_event *evt)
+{ return 0; }
+static inline void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd)
+{ return; }
+#endif
+
#ifdef CONFIG_CXL_SUSPEND
void cxl_mem_active_inc(void);
void cxl_mem_active_dec(void);
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 9675243bd05b..6e6777b7bafb 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -180,6 +180,10 @@ static int cxl_mem_probe(struct device *dev)
return rc;
}
+ rc = devm_cxl_memdev_edac_register(cxlmd);
+ if (rc)
+ dev_dbg(dev, "CXL memdev EDAC registration failed rc=%d\n", rc);
+
/*
* The kernel may be operating out of CXL memory on this device,
* there is no spec defined way to determine whether this device
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 7b14a154463c..785aa2af5eaa 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -1018,7 +1018,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- rc = devm_cxl_setup_fwctl(cxlmd);
+ rc = devm_cxl_setup_fwctl(&pdev->dev, cxlmd);
if (rc)
dev_dbg(&pdev->dev, "No CXL FWCTL setup\n");
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index d061fe3d2b86..e197883690ef 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -108,7 +108,7 @@ static void cxl_nvdimm_arm_dirty_shutdown_tracking(struct cxl_nvdimm *cxl_nvd)
return;
}
- if (!cxl_gpf_get_dvsec(cxlds->dev, false))
+ if (!cxl_gpf_get_dvsec(cxlds->dev))
return;
if (cxl_get_dirty_count(mds, &count)) {
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index a35fc5552845..fe4b593331da 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -30,7 +30,7 @@ static void schedule_detach(void *cxlmd)
schedule_cxl_memdev_detach(cxlmd);
}
-static int discover_region(struct device *dev, void *root)
+static int discover_region(struct device *dev, void *unused)
{
struct cxl_endpoint_decoder *cxled;
int rc;
@@ -49,7 +49,7 @@ static int discover_region(struct device *dev, void *root)
* Region enumeration is opportunistic, if this add-event fails,
* continue to the next endpoint decoder.
*/
- rc = cxl_add_to_region(root, cxled);
+ rc = cxl_add_to_region(cxled);
if (rc)
dev_dbg(dev, "failed to add to region: %#llx-%#llx\n",
cxled->cxld.hpa_range.start, cxled->cxld.hpa_range.end);
@@ -95,7 +95,6 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_hdm *cxlhdm;
- struct cxl_port *root;
int rc;
rc = cxl_dvsec_rr_decode(cxlds, &info);
@@ -127,18 +126,10 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
return rc;
/*
- * This can't fail in practice as CXL root exit unregisters all
- * descendant ports and that in turn synchronizes with cxl_port_probe()
- */
- struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
-
- root = &cxl_root->port;
-
- /*
* Now that all endpoint decoders are successfully enumerated, try to
* assemble regions from committed decoders
*/
- device_for_each_child(&port->dev, root, discover_region);
+ device_for_each_child(&port->dev, NULL, discover_region);
return 0;
}
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index e97d47f42ee2..584c70a34b52 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -13,6 +13,7 @@
#include <linux/mman.h>
#include <linux/memory-tiers.h>
#include <linux/memory_hotplug.h>
+#include <linux/string_helpers.h>
#include "dax-private.h"
#include "bus.h"
@@ -68,7 +69,7 @@ static void kmem_put_memory_types(void)
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
- unsigned long total_len = 0;
+ unsigned long total_len = 0, orig_len = 0;
struct dax_kmem_data *data;
struct memory_dev_type *mtype;
int i, rc, mapped = 0;
@@ -97,6 +98,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
for (i = 0; i < dev_dax->nr_range; i++) {
struct range range;
+ orig_len += range_len(&dev_dax->ranges[i].range);
rc = dax_kmem_range(dev_dax, i, &range);
if (rc) {
dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
@@ -109,6 +111,12 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
if (!total_len) {
dev_warn(dev, "rejecting DAX region without any memory after alignment\n");
return -EINVAL;
+ } else if (total_len != orig_len) {
+ char buf[16];
+
+ string_get_size(orig_len - total_len, 1, STRING_UNITS_2,
+ buf, sizeof(buf));
+ dev_warn(dev, "DAX region truncated by %s due to alignment\n", buf);
}
init_node_memory_type(numa_node, mtype);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 5baa83b85515..890ecac04dac 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -19,7 +19,9 @@
#include <linux/anon_inodes.h>
#include <linux/export.h>
#include <linux/debugfs.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/sync_file.h>
#include <linux/poll.h>
@@ -35,35 +37,91 @@
static inline int is_dma_buf_file(struct file *);
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-static DEFINE_MUTEX(debugfs_list_mutex);
-static LIST_HEAD(debugfs_list);
+static DEFINE_MUTEX(dmabuf_list_mutex);
+static LIST_HEAD(dmabuf_list);
-static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+static void __dma_buf_list_add(struct dma_buf *dmabuf)
{
- mutex_lock(&debugfs_list_mutex);
- list_add(&dmabuf->list_node, &debugfs_list);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_lock(&dmabuf_list_mutex);
+ list_add(&dmabuf->list_node, &dmabuf_list);
+ mutex_unlock(&dmabuf_list_mutex);
}
-static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
+static void __dma_buf_list_del(struct dma_buf *dmabuf)
{
if (!dmabuf)
return;
- mutex_lock(&debugfs_list_mutex);
+ mutex_lock(&dmabuf_list_mutex);
list_del(&dmabuf->list_node);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
}
-#else
-static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+
+/**
+ * dma_buf_iter_begin - begin iteration through global list of all DMA buffers
+ *
+ * Returns the first buffer in the global list of DMA-bufs that's not in the
+ * process of being destroyed. Increments that buffer's reference count to
+ * prevent buffer destruction. Callers must release the reference, either by
+ * continuing iteration with dma_buf_iter_next(), or with dma_buf_put().
+ *
+ * Return:
+ * * First buffer from global list, with refcount elevated
+ * * NULL if no active buffers are present
+ */
+struct dma_buf *dma_buf_iter_begin(void)
{
+ struct dma_buf *ret = NULL, *dmabuf;
+
+ /*
+ * The list mutex does not protect a dmabuf's refcount, so it can be
+ * zeroed while we are iterating. We cannot call get_dma_buf() since the
+ * caller may not already own a reference to the buffer.
+ */
+ mutex_lock(&dmabuf_list_mutex);
+ list_for_each_entry(dmabuf, &dmabuf_list, list_node) {
+ if (file_ref_get(&dmabuf->file->f_ref)) {
+ ret = dmabuf;
+ break;
+ }
+ }
+ mutex_unlock(&dmabuf_list_mutex);
+ return ret;
}
-static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
+/**
+ * dma_buf_iter_next - continue iteration through global list of all DMA buffers
+ * @dmabuf: [in] pointer to dma_buf
+ *
+ * Decrements the reference count on the provided buffer. Returns the next
+ * buffer from the remainder of the global list of DMA-bufs with its reference
+ * count incremented. Callers must release the reference, either by continuing
+ * iteration with dma_buf_iter_next(), or with dma_buf_put().
+ *
+ * Return:
+ * * Next buffer from global list, with refcount elevated
+ * * NULL if no additional active buffers are present
+ */
+struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf)
{
+ struct dma_buf *ret = NULL;
+
+ /*
+ * The list mutex does not protect a dmabuf's refcount, so it can be
+ * zeroed while we are iterating. We cannot call get_dma_buf() since the
+ * caller may not already own a reference to the buffer.
+ */
+ mutex_lock(&dmabuf_list_mutex);
+ dma_buf_put(dmabuf);
+ list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) {
+ if (file_ref_get(&dmabuf->file->f_ref)) {
+ ret = dmabuf;
+ break;
+ }
+ }
+ mutex_unlock(&dmabuf_list_mutex);
+ return ret;
}
-#endif
static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
{
@@ -115,7 +173,7 @@ static int dma_buf_file_release(struct inode *inode, struct file *file)
if (!is_dma_buf_file(file))
return -EINVAL;
- __dma_buf_debugfs_list_del(file->private_data);
+ __dma_buf_list_del(file->private_data);
return 0;
}
@@ -636,10 +694,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|| !exp_info->ops->release))
return ERR_PTR(-EINVAL);
- if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
- (exp_info->ops->pin || exp_info->ops->unpin)))
- return ERR_PTR(-EINVAL);
-
if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
return ERR_PTR(-EINVAL);
@@ -689,7 +743,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file->f_path.dentry->d_fsdata = dmabuf;
dmabuf->file = file;
- __dma_buf_debugfs_list_add(dmabuf);
+ __dma_buf_list_add(dmabuf);
return dmabuf;
@@ -782,7 +836,7 @@ static void mangle_sg_table(struct sg_table *sg_table)
/* To catch abuse of the underlying struct page by importers mix
* up the bits, but take care to preserve the low SG_ bits to
- * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+ * not corrupt the sgt. The mixing is undone on unmap
* before passing the sgt back to the exporter.
*/
for_each_sgtable_sg(sg_table, sg, i)
@@ -790,29 +844,19 @@ static void mangle_sg_table(struct sg_table *sg_table)
#endif
}
-static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction direction)
-{
- struct sg_table *sg_table;
- signed long ret;
- sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
- if (IS_ERR_OR_NULL(sg_table))
- return sg_table;
-
- if (!dma_buf_attachment_is_dynamic(attach)) {
- ret = dma_resv_wait_timeout(attach->dmabuf->resv,
- DMA_RESV_USAGE_KERNEL, true,
- MAX_SCHEDULE_TIMEOUT);
- if (ret < 0) {
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
- direction);
- return ERR_PTR(ret);
- }
- }
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+ return !!attach->importer_ops;
+}
- mangle_sg_table(sg_table);
- return sg_table;
+static bool
+dma_buf_pin_on_map(struct dma_buf_attachment *attach)
+{
+ return attach->dmabuf->ops->pin &&
+ (!dma_buf_attachment_is_dynamic(attach) ||
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));
}
/**
@@ -935,48 +979,11 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
list_add(&attach->node, &dmabuf->attachments);
dma_resv_unlock(dmabuf->resv);
- /* When either the importer or the exporter can't handle dynamic
- * mappings we cache the mapping here to avoid issues with the
- * reservation object lock.
- */
- if (dma_buf_attachment_is_dynamic(attach) !=
- dma_buf_is_dynamic(dmabuf)) {
- struct sg_table *sgt;
-
- dma_resv_lock(attach->dmabuf->resv, NULL);
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- ret = dmabuf->ops->pin(attach);
- if (ret)
- goto err_unlock;
- }
-
- sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
- if (!sgt)
- sgt = ERR_PTR(-ENOMEM);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto err_unpin;
- }
- dma_resv_unlock(attach->dmabuf->resv);
- attach->sgt = sgt;
- attach->dir = DMA_BIDIRECTIONAL;
- }
-
return attach;
err_attach:
kfree(attach);
return ERR_PTR(ret);
-
-err_unpin:
- if (dma_buf_is_dynamic(attach->dmabuf))
- dmabuf->ops->unpin(attach);
-
-err_unlock:
- dma_resv_unlock(attach->dmabuf->resv);
-
- dma_buf_detach(dmabuf, attach);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
@@ -995,16 +1002,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
-static void __unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sg_table,
- enum dma_data_direction direction)
-{
- /* uses XOR, hence this unmangles */
- mangle_sg_table(sg_table);
-
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
-}
-
/**
* dma_buf_detach - Remove the given attachment from dmabuf's attachments list
* @dmabuf: [in] buffer to detach from.
@@ -1020,16 +1017,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
return;
dma_resv_lock(dmabuf->resv, NULL);
-
- if (attach->sgt) {
-
- __unmap_dma_buf(attach, attach->sgt, attach->dir);
-
- if (dma_buf_is_dynamic(attach->dmabuf))
- dmabuf->ops->unpin(attach);
- }
list_del(&attach->node);
-
dma_resv_unlock(dmabuf->resv);
if (dmabuf->ops->detach)
@@ -1058,7 +1046,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
struct dma_buf *dmabuf = attach->dmabuf;
int ret = 0;
- WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+ WARN_ON(!attach->importer_ops);
dma_resv_assert_held(dmabuf->resv);
@@ -1081,7 +1069,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach)
{
struct dma_buf *dmabuf = attach->dmabuf;
- WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+ WARN_ON(!attach->importer_ops);
dma_resv_assert_held(dmabuf->resv);
@@ -1115,7 +1103,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
- int r;
+ signed long ret;
might_sleep();
@@ -1124,41 +1112,37 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt) {
+ if (dma_buf_pin_on_map(attach)) {
+ ret = attach->dmabuf->ops->pin(attach);
/*
- * Two mappings with different directions for the same
- * attachment are not allowed.
+ * Catch exporters making buffers inaccessible even when
+ * attachments preventing that exist.
*/
- if (attach->dir != direction &&
- attach->dir != DMA_BIDIRECTIONAL)
- return ERR_PTR(-EBUSY);
-
- return attach->sgt;
- }
-
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
- r = attach->dmabuf->ops->pin(attach);
- if (r)
- return ERR_PTR(r);
- }
+ WARN_ON_ONCE(ret == EBUSY);
+ if (ret)
+ return ERR_PTR(ret);
}
- sg_table = __map_dma_buf(attach, direction);
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sg_table))
+ goto error_unpin;
- if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- attach->dmabuf->ops->unpin(attach);
-
- if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
- attach->sgt = sg_table;
- attach->dir = direction;
+ /*
+ * Importers with static attachments don't wait for fences.
+ */
+ if (!dma_buf_attachment_is_dynamic(attach)) {
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ goto error_unmap;
}
+ mangle_sg_table(sg_table);
#ifdef CONFIG_DMA_API_DEBUG
- if (!IS_ERR(sg_table)) {
+ {
struct scatterlist *sg;
u64 addr;
int len;
@@ -1175,6 +1159,16 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
#endif /* CONFIG_DMA_API_DEBUG */
return sg_table;
+
+error_unmap:
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+ sg_table = ERR_PTR(ret);
+
+error_unpin:
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
+
+ return sg_table;
}
EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
@@ -1227,14 +1221,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt == sg_table)
- return;
-
- __unmap_dma_buf(attach, sg_table, direction);
+ mangle_sg_table(sg_table);
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
- if (dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- dma_buf_unpin(attach);
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
@@ -1630,7 +1621,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
size_t size = 0;
int ret;
- ret = mutex_lock_interruptible(&debugfs_list_mutex);
+ ret = mutex_lock_interruptible(&dmabuf_list_mutex);
if (ret)
return ret;
@@ -1639,7 +1630,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
"size", "flags", "mode", "count", "ino");
- list_for_each_entry(buf_obj, &debugfs_list, list_node) {
+ list_for_each_entry(buf_obj, &dmabuf_list, list_node) {
ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
if (ret)
@@ -1676,11 +1667,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
return 0;
error_unlock:
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
return ret;
}
diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
index 2a059ac0ed27..a495d8a6c2e3 100644
--- a/drivers/dma-buf/dma-fence-unwrap.c
+++ b/drivers/dma-buf/dma-fence-unwrap.c
@@ -79,6 +79,41 @@ static int fence_cmp(const void *_a, const void *_b)
return 0;
}
+/**
+ * dma_fence_dedup_array - Sort and deduplicate an array of dma_fence pointers
+ * @fences: Array of dma_fence pointers to be deduplicated
+ * @num_fences: Number of entries in the @fences array
+ *
+ * Sorts the input array by context, then removes duplicate
+ * fences with the same context, keeping only the most recent one.
+ *
+ * The array is modified in-place and unreferenced duplicate fences are released
+ * via dma_fence_put(). The function returns the new number of fences after
+ * deduplication.
+ *
+ * Return: Number of unique fences remaining in the array.
+ */
+int dma_fence_dedup_array(struct dma_fence **fences, int num_fences)
+{
+ int i, j;
+
+ sort(fences, num_fences, sizeof(*fences), fence_cmp, NULL);
+
+ /*
+ * Only keep the most recent fence for each context.
+ */
+ j = 0;
+ for (i = 1; i < num_fences; i++) {
+ if (fences[i]->context == fences[j]->context)
+ dma_fence_put(fences[i]);
+ else
+ fences[++j] = fences[i];
+ }
+
+ return ++j;
+}
+EXPORT_SYMBOL_GPL(dma_fence_dedup_array);
+
/* Implementation for the dma_fence_merge() marco, don't use directly */
struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
@@ -87,7 +122,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence *tmp, *unsignaled = NULL, **array;
struct dma_fence_array *result;
ktime_t timestamp;
- int i, j, count;
+ int i, count;
count = 0;
timestamp = ns_to_ktime(0);
@@ -141,19 +176,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
if (count == 0 || count == 1)
goto return_fastpath;
- sort(array, count, sizeof(*array), fence_cmp, NULL);
-
- /*
- * Only keep the most recent fence for each context.
- */
- j = 0;
- for (i = 1; i < count; i++) {
- if (array[i]->context == array[j]->context)
- dma_fence_put(array[i]);
- else
- array[++j] = array[i];
- }
- count = ++j;
+ count = dma_fence_dedup_array(array, count);
if (count > 1) {
result = dma_fence_array_create(count, array,
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 5f8d010516f0..b1ef4546346d 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -320,8 +320,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
count++;
dma_resv_list_set(fobj, i, fence, usage);
- /* pointer update must be visible before we extend the num_fences */
- smp_store_mb(fobj->num_fences, count);
+ /* fence update must be visible before we extend the num_fences */
+ smp_wmb();
+ fobj->num_fences = count;
}
EXPORT_SYMBOL(dma_resv_add_fence);
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 26d5dc89ea16..82b1b714300d 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -21,8 +21,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-static struct dma_heap *sys_heap;
-
struct system_heap_buffer {
struct dma_heap *heap;
struct list_head attachments;
@@ -424,6 +422,7 @@ static const struct dma_heap_ops system_heap_ops = {
static int __init system_heap_create(void)
{
struct dma_heap_export_info exp_info;
+ struct dma_heap *sys_heap;
exp_info.name = "system";
exp_info.ops = &system_heap_ops;
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index 9f80a45498f0..261b38816226 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -413,7 +413,7 @@ static int test_wait_timeout(void *arg)
err = 0;
err_free:
timer_delete_sync(&wt.timer);
- destroy_timer_on_stack(&wt.timer);
+ timer_destroy_on_stack(&wt.timer);
dma_fence_signal(wt.f);
dma_fence_put(wt.f);
return err;
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index f5905d67dedb..4f27ee93a00c 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -173,20 +173,6 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
}
-static void timeline_fence_value_str(struct dma_fence *fence,
- char *str, int size)
-{
- snprintf(str, size, "%lld", fence->seqno);
-}
-
-static void timeline_fence_timeline_value_str(struct dma_fence *fence,
- char *str, int size)
-{
- struct sync_timeline *parent = dma_fence_parent(fence);
-
- snprintf(str, size, "%d", parent->value);
-}
-
static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
{
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
@@ -208,8 +194,6 @@ static const struct dma_fence_ops timeline_fence_ops = {
.get_timeline_name = timeline_fence_get_timeline_name,
.signaled = timeline_fence_signaled,
.release = timeline_fence_release,
- .fence_value_str = timeline_fence_value_str,
- .timeline_value_str = timeline_fence_timeline_value_str,
.set_deadline = timeline_fence_set_deadline,
};
@@ -438,15 +422,17 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
return -EINVAL;
pt = dma_fence_to_sync_pt(fence);
- if (!pt)
- return -EINVAL;
+ if (!pt) {
+ ret = -EINVAL;
+ goto put_fence;
+ }
spin_lock_irqsave(fence->lock, flags);
- if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
- data.deadline_ns = ktime_to_ns(pt->deadline);
- } else {
+ if (!test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
ret = -ENOENT;
+ goto unlock;
}
+ data.deadline_ns = ktime_to_ns(pt->deadline);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_put(fence);
@@ -458,6 +444,13 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
return -EFAULT;
return 0;
+
+unlock:
+ spin_unlock_irqrestore(fence->lock, flags);
+put_fence:
+ dma_fence_put(fence);
+
+ return ret;
}
static long sw_sync_ioctl(struct file *file, unsigned int cmd,
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 237bce21d1e7..67cd69551e42 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -12,8 +12,6 @@ static struct dentry *dbgfs;
static LIST_HEAD(sync_timeline_list_head);
static DEFINE_SPINLOCK(sync_timeline_list_lock);
-static LIST_HEAD(sync_file_list_head);
-static DEFINE_SPINLOCK(sync_file_list_lock);
void sync_timeline_debug_add(struct sync_timeline *obj)
{
@@ -33,24 +31,6 @@ void sync_timeline_debug_remove(struct sync_timeline *obj)
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
}
-void sync_file_debug_add(struct sync_file *sync_file)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_file_list_lock, flags);
- list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
-}
-
-void sync_file_debug_remove(struct sync_file *sync_file)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_file_list_lock, flags);
- list_del(&sync_file->sync_file_list);
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
-}
-
static const char *sync_status_str(int status)
{
if (status < 0)
@@ -82,25 +62,8 @@ static void sync_print_fence(struct seq_file *s,
seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
}
- if (fence->ops->timeline_value_str &&
- fence->ops->fence_value_str) {
- char value[64];
- bool success;
-
- fence->ops->fence_value_str(fence, value, sizeof(value));
- success = strlen(value);
-
- if (success) {
- seq_printf(s, ": %s", value);
-
- fence->ops->timeline_value_str(fence, value,
- sizeof(value));
-
- if (strlen(value))
- seq_printf(s, " / %s", value);
- }
- }
-
+ seq_printf(s, ": %lld", fence->seqno);
+ seq_printf(s, " / %d", parent->value);
seq_putc(s, '\n');
}
@@ -118,26 +81,6 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
spin_unlock(&obj->lock);
}
-static void sync_print_sync_file(struct seq_file *s,
- struct sync_file *sync_file)
-{
- char buf[128];
- int i;
-
- seq_printf(s, "[%p] %s: %s\n", sync_file,
- sync_file_get_name(sync_file, buf, sizeof(buf)),
- sync_status_str(dma_fence_get_status(sync_file->fence)));
-
- if (dma_fence_is_array(sync_file->fence)) {
- struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
-
- for (i = 0; i < array->num_fences; ++i)
- sync_print_fence(s, array->fences[i], true);
- } else {
- sync_print_fence(s, sync_file->fence, true);
- }
-}
-
static int sync_info_debugfs_show(struct seq_file *s, void *unused)
{
struct list_head *pos;
@@ -157,15 +100,6 @@ static int sync_info_debugfs_show(struct seq_file *s, void *unused)
seq_puts(s, "fences:\n--------------\n");
- spin_lock_irq(&sync_file_list_lock);
- list_for_each(pos, &sync_file_list_head) {
- struct sync_file *sync_file =
- container_of(pos, struct sync_file, sync_file_list);
-
- sync_print_sync_file(s, sync_file);
- seq_putc(s, '\n');
- }
- spin_unlock_irq(&sync_file_list_lock);
return 0;
}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index a1bdd62efccd..02af347293d0 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -68,7 +68,5 @@ extern const struct file_operations sw_sync_debugfs_fops;
void sync_timeline_debug_add(struct sync_timeline *obj);
void sync_timeline_debug_remove(struct sync_timeline *obj);
-void sync_file_debug_add(struct sync_file *fence);
-void sync_file_debug_remove(struct sync_file *fence);
#endif /* _LINUX_SYNC_H */
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index cc7398cc17d6..7eee3eb47a8e 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -285,7 +285,6 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
}
static const struct dma_buf_ops udmabuf_ops = {
- .cache_sgt_mapping = true,
.map_dma_buf = map_udmabuf,
.unmap_dma_buf = unmap_udmabuf,
.release = release_udmabuf,
@@ -393,7 +392,7 @@ static long udmabuf_create(struct miscdevice *device,
if (!ubuf)
return -ENOMEM;
- pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
+ pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
for (i = 0; i < head->count; i++) {
pgoff_t subpgcnt;
diff --git a/drivers/dma/amd/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
index 715ac3ae067b..81339664036f 100644
--- a/drivers/dma/amd/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
@@ -342,6 +342,9 @@ static void pt_cmd_callback_work(void *data, int err)
struct pt_dma_chan *chan;
unsigned long flags;
+ if (!desc)
+ return;
+
dma_chan = desc->vd.tx.chan;
chan = to_pt_chan(dma_chan);
@@ -355,16 +358,14 @@ static void pt_cmd_callback_work(void *data, int err)
desc->status = DMA_ERROR;
spin_lock_irqsave(&chan->vc.lock, flags);
- if (desc) {
- if (desc->status != DMA_COMPLETE) {
- if (desc->status != DMA_ERROR)
- desc->status = DMA_COMPLETE;
+ if (desc->status != DMA_COMPLETE) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
- dma_cookie_complete(tx_desc);
- dma_descriptor_unmap(tx_desc);
- } else {
- tx_desc = NULL;
- }
+ dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
+ } else {
+ tx_desc = NULL;
}
spin_unlock_irqrestore(&chan->vc.lock, flags);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index d891dfca358e..91b2fbc0b864 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -841,9 +841,9 @@ static int dmatest_func(void *data)
} else {
dma_async_issue_pending(chan);
- wait_event_timeout(thread->done_wait,
- done->done,
- msecs_to_jiffies(params->timeout));
+ wait_event_freezable_timeout(thread->done_wait,
+ done->done,
+ msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL,
NULL);
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 756d67325db5..66bfa28d984e 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -57,7 +57,7 @@ static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
intr = edma_readl_chreg(fsl_chan, ch_int);
if (!intr)
- return IRQ_HANDLED;
+ return IRQ_NONE;
edma_writel_chreg(fsl_chan, 1, ch_int);
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index ff94ee892339..6d12033649f8 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -222,7 +222,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_wq *wq;
struct device *dev, *fdev;
int rc = 0;
- struct iommu_sva *sva;
+ struct iommu_sva *sva = NULL;
unsigned int pasid;
struct idxd_cdev *idxd_cdev;
@@ -317,7 +317,7 @@ failed_set_pasid:
if (device_user_pasid_enabled(idxd))
idxd_xa_pasid_remove(ctx);
failed_get_pasid:
- if (device_user_pasid_enabled(idxd))
+ if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva))
iommu_sva_unbind_device(sva);
failed:
mutex_unlock(&wq->wq_lock);
@@ -407,6 +407,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (current->mm != ctx->mm)
+ return -EPERM;
+
rc = check_vma(wq, vma, __func__);
if (rc < 0)
return rc;
@@ -473,6 +476,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t
ssize_t written = 0;
int i;
+ if (current->mm != ctx->mm)
+ return -EPERM;
+
for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
int rc = idxd_submit_user_descriptor(ctx, udesc + i);
@@ -493,6 +499,9 @@ static __poll_t idxd_cdev_poll(struct file *filp,
struct idxd_device *idxd = wq->idxd;
__poll_t out = 0;
+ if (current->mm != ctx->mm)
+ return POLLNVAL;
+
poll_wait(filp, &wq->err_queue, wait);
spin_lock(&idxd->dev_lock);
if (idxd->sw_err.valid)
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index fca1d2924999..80355d03004d 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -155,6 +155,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd)
pci_free_irq_vectors(pdev);
}
+static void idxd_clean_wqs(struct idxd_device *idxd)
+{
+ struct idxd_wq *wq;
+ struct device *conf_dev;
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = idxd->wqs[i];
+ if (idxd->hw.wq_cap.op_config)
+ bitmap_free(wq->opcap_bmap);
+ kfree(wq->wqcfg);
+ conf_dev = wq_confdev(wq);
+ put_device(conf_dev);
+ kfree(wq);
+ }
+ bitmap_free(idxd->wq_enable_map);
+ kfree(idxd->wqs);
+}
+
static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -169,8 +188,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
if (!idxd->wq_enable_map) {
- kfree(idxd->wqs);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_bitmap;
}
for (i = 0; i < idxd->max_wqs; i++) {
@@ -189,10 +208,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
conf_dev->bus = &dsa_bus_type;
conf_dev->type = &idxd_wq_device_type;
rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
- if (rc < 0) {
- put_device(conf_dev);
+ if (rc < 0)
goto err;
- }
mutex_init(&wq->wq_lock);
init_waitqueue_head(&wq->err_queue);
@@ -203,7 +220,6 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
- put_device(conf_dev);
rc = -ENOMEM;
goto err;
}
@@ -211,9 +227,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
if (idxd->hw.wq_cap.op_config) {
wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
if (!wq->opcap_bmap) {
- put_device(conf_dev);
rc = -ENOMEM;
- goto err;
+ goto err_opcap_bmap;
}
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
@@ -224,15 +239,46 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
return 0;
- err:
+err_opcap_bmap:
+ kfree(wq->wqcfg);
+
+err:
+ put_device(conf_dev);
+ kfree(wq);
+
while (--i >= 0) {
wq = idxd->wqs[i];
+ if (idxd->hw.wq_cap.op_config)
+ bitmap_free(wq->opcap_bmap);
+ kfree(wq->wqcfg);
conf_dev = wq_confdev(wq);
put_device(conf_dev);
+ kfree(wq);
+
}
+ bitmap_free(idxd->wq_enable_map);
+
+err_bitmap:
+ kfree(idxd->wqs);
+
return rc;
}
+static void idxd_clean_engines(struct idxd_device *idxd)
+{
+ struct idxd_engine *engine;
+ struct device *conf_dev;
+ int i;
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ engine = idxd->engines[i];
+ conf_dev = engine_confdev(engine);
+ put_device(conf_dev);
+ kfree(engine);
+ }
+ kfree(idxd->engines);
+}
+
static int idxd_setup_engines(struct idxd_device *idxd)
{
struct idxd_engine *engine;
@@ -263,6 +309,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
put_device(conf_dev);
+ kfree(engine);
goto err;
}
@@ -276,10 +323,26 @@ static int idxd_setup_engines(struct idxd_device *idxd)
engine = idxd->engines[i];
conf_dev = engine_confdev(engine);
put_device(conf_dev);
+ kfree(engine);
}
+ kfree(idxd->engines);
+
return rc;
}
+static void idxd_clean_groups(struct idxd_device *idxd)
+{
+ struct idxd_group *group;
+ int i;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = idxd->groups[i];
+ put_device(group_confdev(group));
+ kfree(group);
+ }
+ kfree(idxd->groups);
+}
+
static int idxd_setup_groups(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -310,6 +373,7 @@ static int idxd_setup_groups(struct idxd_device *idxd)
rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
if (rc < 0) {
put_device(conf_dev);
+ kfree(group);
goto err;
}
@@ -334,20 +398,18 @@ static int idxd_setup_groups(struct idxd_device *idxd)
while (--i >= 0) {
group = idxd->groups[i];
put_device(group_confdev(group));
+ kfree(group);
}
+ kfree(idxd->groups);
+
return rc;
}
static void idxd_cleanup_internals(struct idxd_device *idxd)
{
- int i;
-
- for (i = 0; i < idxd->max_groups; i++)
- put_device(group_confdev(idxd->groups[i]));
- for (i = 0; i < idxd->max_engines; i++)
- put_device(engine_confdev(idxd->engines[i]));
- for (i = 0; i < idxd->max_wqs; i++)
- put_device(wq_confdev(idxd->wqs[i]));
+ idxd_clean_groups(idxd);
+ idxd_clean_engines(idxd);
+ idxd_clean_wqs(idxd);
destroy_workqueue(idxd->wq);
}
@@ -390,7 +452,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
static int idxd_setup_internals(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
- int rc, i;
+ int rc;
init_waitqueue_head(&idxd->cmd_waitq);
@@ -421,14 +483,11 @@ static int idxd_setup_internals(struct idxd_device *idxd)
err_evl:
destroy_workqueue(idxd->wq);
err_wkq_create:
- for (i = 0; i < idxd->max_groups; i++)
- put_device(group_confdev(idxd->groups[i]));
+ idxd_clean_groups(idxd);
err_group:
- for (i = 0; i < idxd->max_engines; i++)
- put_device(engine_confdev(idxd->engines[i]));
+ idxd_clean_engines(idxd);
err_engine:
- for (i = 0; i < idxd->max_wqs; i++)
- put_device(wq_confdev(idxd->wqs[i]));
+ idxd_clean_wqs(idxd);
err_wqs:
return rc;
}
@@ -528,6 +587,17 @@ static void idxd_read_caps(struct idxd_device *idxd)
idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
}
+static void idxd_free(struct idxd_device *idxd)
+{
+ if (!idxd)
+ return;
+
+ put_device(idxd_confdev(idxd));
+ bitmap_free(idxd->opcap_bmap);
+ ida_free(&idxd_ida, idxd->id);
+ kfree(idxd);
+}
+
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
{
struct device *dev = &pdev->dev;
@@ -545,28 +615,34 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
if (idxd->id < 0)
- return NULL;
+ goto err_ida;
idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
- if (!idxd->opcap_bmap) {
- ida_free(&idxd_ida, idxd->id);
- return NULL;
- }
+ if (!idxd->opcap_bmap)
+ goto err_opcap;
device_initialize(conf_dev);
conf_dev->parent = dev;
conf_dev->bus = &dsa_bus_type;
conf_dev->type = idxd->data->dev_type;
rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
- if (rc < 0) {
- put_device(conf_dev);
- return NULL;
- }
+ if (rc < 0)
+ goto err_name;
spin_lock_init(&idxd->dev_lock);
spin_lock_init(&idxd->cmd_lock);
return idxd;
+
+err_name:
+ put_device(conf_dev);
+ bitmap_free(idxd->opcap_bmap);
+err_opcap:
+ ida_free(&idxd_ida, idxd->id);
+err_ida:
+ kfree(idxd);
+
+ return NULL;
}
static int idxd_enable_system_pasid(struct idxd_device *idxd)
@@ -626,27 +702,6 @@ static void idxd_disable_system_pasid(struct idxd_device *idxd)
idxd->pasid = IOMMU_PASID_INVALID;
}
-static int idxd_enable_sva(struct pci_dev *pdev)
-{
- int ret;
-
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
- if (ret)
- return ret;
-
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- if (ret)
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
-
- return ret;
-}
-
-static void idxd_disable_sva(struct pci_dev *pdev)
-{
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
-}
-
static int idxd_probe(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
@@ -661,17 +716,13 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
- if (idxd_enable_sva(pdev)) {
- dev_warn(dev, "Unable to turn on user SVA feature.\n");
- } else {
- set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
+ set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
- rc = idxd_enable_system_pasid(idxd);
- if (rc)
- dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
- else
- set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
- }
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc)
+ dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
+ else
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}
@@ -709,8 +760,6 @@ static int idxd_probe(struct idxd_device *idxd)
err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(pdev);
return rc;
}
@@ -721,8 +770,6 @@ static void idxd_cleanup(struct idxd_device *idxd)
idxd_cleanup_internals(idxd);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(idxd->pdev);
}
/*
@@ -1190,7 +1237,7 @@ int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
- put_device(idxd_confdev(idxd));
+ idxd_free(idxd);
err_idxd_alloc:
pci_disable_device(pdev);
return rc;
@@ -1232,7 +1279,6 @@ static void idxd_shutdown(struct pci_dev *pdev)
static void idxd_remove(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
- struct idxd_irq_entry *irq_entry;
idxd_unregister_devices(idxd);
/*
@@ -1245,20 +1291,12 @@ static void idxd_remove(struct pci_dev *pdev)
get_device(idxd_confdev(idxd));
device_unregister(idxd_confdev(idxd));
idxd_shutdown(pdev);
- if (device_pasid_enabled(idxd))
- idxd_disable_system_pasid(idxd);
idxd_device_remove_debugfs(idxd);
-
- irq_entry = idxd_get_ie(idxd, 0);
- free_irq(irq_entry->vector, irq_entry);
- pci_free_irq_vectors(pdev);
+ idxd_cleanup(idxd);
pci_iounmap(pdev, idxd->reg_base);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(pdev);
- pci_disable_device(pdev);
- destroy_workqueue(idxd->wq);
- perfmon_pmu_remove(idxd);
put_device(idxd_confdev(idxd));
+ idxd_free(idxd);
+ pci_disable_device(pdev);
}
static struct pci_driver idxd_pci_driver = {
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index c9aba2304de7..5d3c0ae6b342 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -10,7 +10,7 @@
#include <linux/interrupt.h>
#include <linux/dca.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
/* either a kernel change is needed, or we need something like this in kernel */
#ifndef CONFIG_SMP
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index d5ddb4e30e71..47c8adfdc155 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -420,15 +420,11 @@ static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c,
{
struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
struct virt_dma_desc *vd;
- unsigned long flags;
- spin_lock_irqsave(&cvc->pc->lock, flags);
list_for_each_entry(vd, &cvc->pc->queue, node)
if (vd->tx.cookie == cookie) {
- spin_unlock_irqrestore(&cvc->pc->lock, flags);
return vd;
}
- spin_unlock_irqrestore(&cvc->pc->lock, flags);
list_for_each_entry(vd, &cvc->vc.desc_issued, node)
if (vd->tx.cookie == cookie)
@@ -452,9 +448,11 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c,
if (ret == DMA_COMPLETE || !txstate)
return ret;
+ spin_lock_irqsave(&cvc->pc->lock, flags);
spin_lock_irqsave(&cvc->vc.lock, flags);
vd = mtk_cqdma_find_active_desc(c, cookie);
spin_unlock_irqrestore(&cvc->vc.lock, flags);
+ spin_unlock_irqrestore(&cvc->pc->lock, flags);
if (vd) {
cvd = to_cqdma_vdesc(vd);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index b223a7aacb0c..b6255c0601bb 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -1091,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work)
u32 residue_diff;
ktime_t time_diff;
unsigned long delay;
+ unsigned long flags;
while (1) {
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
if (uc->desc) {
/* Get previous residue and time stamp */
residue_diff = uc->tx_drain.residue;
@@ -1127,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
usleep_range(ktime_to_us(delay),
ktime_to_us(delay) + 10);
continue;
@@ -1143,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
}
static irqreturn_t udma_ring_irq_handler(int irq, void *data)
@@ -4246,7 +4253,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct udma_dev *ud = ofdma->of_dma_data;
- dma_cap_mask_t mask = ud->ddev.cap_mask;
struct udma_filter_param filter_param;
struct dma_chan *chan;
@@ -4278,7 +4284,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
}
}
- chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
+ chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param,
ofdma->of_node);
if (!chan) {
dev_err(ud->dev, "get channel fail in %s.\n", __func__);
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 3e971f902363..cae52c654a15 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -99,7 +99,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
if (status & priv->ecc_stat_ce_mask) {
regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset,
&err_addr);
- if (priv->ecc_uecnt_offset)
+ if (priv->ecc_cecnt_offset)
regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset,
&err_count);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
@@ -1005,9 +1005,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
}
}
- /* Interrupt mode set to every SBERR */
- regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST,
- ALTR_A10_ECC_INTMODE);
/* Enable ECC */
ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base +
ALTR_A10_ECC_CTRL_OFST));
@@ -1749,9 +1746,9 @@ altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
local_irq_save(flags);
if (trig_type == ALTR_UE_TRIGGER_CHAR)
- writel(priv->ue_set_mask, set_addr);
+ writew(priv->ue_set_mask, set_addr);
else
- writel(priv->ce_set_mask, set_addr);
+ writew(priv->ce_set_mask, set_addr);
/* Ensure the interrupt test bits are set */
wmb();
@@ -1781,7 +1778,7 @@ altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
local_irq_save(flags);
if (trig_type == ALTR_UE_TRIGGER_CHAR) {
- writel(priv->ue_set_mask, set_addr);
+ writew(priv->ue_set_mask, set_addr);
} else {
/* Setup read/write of 4 bytes */
writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
@@ -2127,11 +2124,15 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
return PTR_ERR(edac->ecc_mgr_map);
}
+ /* Set irq mask for DDR SBE to avoid any pending irq before registration */
+ regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST,
+ (A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0));
+
edac->irq_chip.name = pdev->dev.of_node->name;
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
- edac->domain = irq_domain_add_linear(pdev->dev.of_node, 64,
- &a10_eccmgr_ic_ops, edac);
+ edac->domain = irq_domain_create_linear(of_fwnode_handle(pdev->dev.of_node),
+ 64, &a10_eccmgr_ic_ops, edac);
if (!edac->domain) {
dev_err(&pdev->dev, "Error adding IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 3727e72c8c2e..7248d24c4908 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -249,6 +249,8 @@ struct altr_sdram_mc_data {
#define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94
#define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
#define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1)
+#define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16)
+#define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17)
#define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C
#define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 90f0eb7cc5b9..58b1482a0fbb 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2,8 +2,8 @@
#include <linux/ras.h>
#include <linux/string_choices.h>
#include "amd64_edac.h"
-#include <asm/amd_nb.h>
-#include <asm/amd_node.h>
+#include <asm/amd/nb.h>
+#include <asm/amd/node.h>
static struct edac_pci_ctl_info *pci_ctl;
@@ -2942,13 +2942,13 @@ static void dct_read_mc_regs(struct amd64_pvt *pvt)
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
* those are Read-As-Zero.
*/
- rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
+ rdmsrq(MSR_K8_TOP_MEM1, pvt->top_mem);
edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
/* Check first whether TOP_MEM2 is enabled: */
- rdmsrl(MSR_AMD64_SYSCFG, msr_val);
+ rdmsrq(MSR_AMD64_SYSCFG, msr_val);
if (msr_val & BIT(21)) {
- rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
+ rdmsrq(MSR_K8_TOP_MEM2, pvt->top_mem2);
edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
} else {
edac_dbg(0, " TOP_MEM2 disabled\n");
diff --git a/drivers/edac/bluefield_edac.c b/drivers/edac/bluefield_edac.c
index 4942a240c30f..ae3bb7afa103 100644
--- a/drivers/edac/bluefield_edac.c
+++ b/drivers/edac/bluefield_edac.c
@@ -199,8 +199,10 @@ static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
* error without the detailed information.
*/
err = bluefield_edac_readl(priv, MLXBF_SYNDROM, &dram_syndrom);
- if (err)
+ if (err) {
dev_err(priv->dev, "DRAM syndrom read failed.\n");
+ return;
+ }
serr = FIELD_GET(MLXBF_SYNDROM__SERR, dram_syndrom);
derr = FIELD_GET(MLXBF_SYNDROM__DERR, dram_syndrom);
@@ -213,20 +215,26 @@ static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
}
err = bluefield_edac_readl(priv, MLXBF_ADD_INFO, &dram_additional_info);
- if (err)
+ if (err) {
dev_err(priv->dev, "DRAM additional info read failed.\n");
+ return;
+ }
err_prank = FIELD_GET(MLXBF_ADD_INFO__ERR_PRANK, dram_additional_info);
ecc_dimm = (err_prank >= 2 && priv->dimm_ranks[0] <= 2) ? 1 : 0;
err = bluefield_edac_readl(priv, MLXBF_ERR_ADDR_0, &edea0);
- if (err)
+ if (err) {
dev_err(priv->dev, "Error addr 0 read failed.\n");
+ return;
+ }
err = bluefield_edac_readl(priv, MLXBF_ERR_ADDR_1, &edea1);
- if (err)
+ if (err) {
dev_err(priv->dev, "Error addr 1 read failed.\n");
+ return;
+ }
ecc_dimm_addr = ((u64)edea1 << 32) | edea0;
@@ -250,8 +258,10 @@ static void bluefield_edac_check(struct mem_ctl_info *mci)
return;
err = bluefield_edac_readl(priv, MLXBF_ECC_CNT, &ecc_count);
- if (err)
+ if (err) {
dev_err(priv->dev, "ECC count read failed.\n");
+ return;
+ }
single_error_count = FIELD_GET(MLXBF_ECC_CNT__SERR_CNT, ecc_count);
double_error_count = FIELD_GET(MLXBF_ECC_CNT__DERR_CNT, ecc_count);
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 355a977019e9..a3fca2567752 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -72,12 +72,6 @@
#define I10NM_SAD_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
#define I10NM_SAD_NM_CACHEABLE(reg) GET_BITFIELD(reg, 5, 5)
-#define RETRY_RD_ERR_LOG_UC BIT(1)
-#define RETRY_RD_ERR_LOG_NOOVER BIT(14)
-#define RETRY_RD_ERR_LOG_EN BIT(15)
-#define RETRY_RD_ERR_LOG_NOOVER_UC (BIT(14) | BIT(1))
-#define RETRY_RD_ERR_LOG_OVER_UC_V (BIT(2) | BIT(1) | BIT(0))
-
static struct list_head *i10nm_edac_list;
static struct res_config *res_cfg;
@@ -85,227 +79,319 @@ static int retry_rd_err_log;
static int decoding_via_mca;
static bool mem_cfg_2lm;
-static u32 offsets_scrub_icx[] = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8};
-static u32 offsets_scrub_spr[] = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8};
-static u32 offsets_scrub_spr_hbm0[] = {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8};
-static u32 offsets_scrub_spr_hbm1[] = {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8};
-static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0};
-static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0};
-static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10};
-static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0};
-static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0};
-
-static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable,
- u32 *offsets_scrub, u32 *offsets_demand,
- u32 *offsets_demand2)
+static struct reg_rrl icx_reg_rrl_ddr = {
+ .set_num = 2,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8},
+ {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0},
+ },
+ .widths = {4, 4, 4, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x22c18, 0x22c1c, 0x22c20, 0x22c24},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl spr_reg_rrl_ddr = {
+ .set_num = 3,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND, FRE_DEMAND},
+ .offsets = {
+ {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8},
+ {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0},
+ {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x22c18, 0x22c1c, 0x22c20, 0x22c24},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl spr_reg_rrl_hbm_pch0 = {
+ .set_num = 2,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8},
+ {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x2818, 0x281c, 0x2820, 0x2824},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl spr_reg_rrl_hbm_pch1 = {
+ .set_num = 2,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8},
+ {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x2c18, 0x2c1c, 0x2c20, 0x2c24},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl gnr_reg_rrl_ddr = {
+ .set_num = 4,
+ .reg_num = 6,
+ .modes = {FRE_SCRUB, FRE_DEMAND, LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x2f10, 0x2f20, 0x2f30, 0x2f50, 0x2f60, 0xba0},
+ {0x2f14, 0x2f24, 0x2f38, 0x2f54, 0x2f64, 0xba8},
+ {0x2f18, 0x2f28, 0x2f40, 0x2f58, 0x2f68, 0xbb0},
+ {0x2f1c, 0x2f2c, 0x2f48, 0x2f5c, 0x2f6c, 0xbb8},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(14),
+ .noover_mask = BIT(15),
+ .en_mask = BIT(12),
+
+ .cecnt_num = 8,
+ .cecnt_offsets = {0x2c10, 0x2c14, 0x2c18, 0x2c1c, 0x2c20, 0x2c24, 0x2c28, 0x2c2c},
+ .cecnt_widths = {4, 4, 4, 4, 4, 4, 4, 4},
+};
+
+static u64 read_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width)
{
- u32 s, d, d2;
+ switch (width) {
+ case 4:
+ return I10NM_GET_REG32(imc, chan, offset);
+ case 8:
+ return I10NM_GET_REG64(imc, chan, offset);
+ default:
+ i10nm_printk(KERN_ERR, "Invalid readd RRL 0x%x width %d\n", offset, width);
+ return 0;
+ }
+}
+
+static void write_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width, u64 val)
+{
+ switch (width) {
+ case 4:
+ return I10NM_SET_REG32(imc, chan, offset, (u32)val);
+ default:
+ i10nm_printk(KERN_ERR, "Invalid write RRL 0x%x width %d\n", offset, width);
+ }
+}
- s = I10NM_GET_REG32(imc, chan, offsets_scrub[0]);
- d = I10NM_GET_REG32(imc, chan, offsets_demand[0]);
- if (offsets_demand2)
- d2 = I10NM_GET_REG32(imc, chan, offsets_demand2[0]);
+static void enable_rrl(struct skx_imc *imc, int chan, struct reg_rrl *rrl,
+ int rrl_set, bool enable, u32 *rrl_ctl)
+{
+ enum rrl_mode mode = rrl->modes[rrl_set];
+ u32 offset = rrl->offsets[rrl_set][0], v;
+ u8 width = rrl->widths[0];
+ bool first, scrub;
+
+ /* First or last read error. */
+ first = (mode == FRE_SCRUB || mode == FRE_DEMAND);
+ /* Patrol scrub or on-demand read error. */
+ scrub = (mode == FRE_SCRUB || mode == LRE_SCRUB);
+
+ v = read_imc_reg(imc, chan, offset, width);
if (enable) {
- /* Save default configurations */
- imc->chan[chan].retry_rd_err_log_s = s;
- imc->chan[chan].retry_rd_err_log_d = d;
- if (offsets_demand2)
- imc->chan[chan].retry_rd_err_log_d2 = d2;
-
- s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
- s |= RETRY_RD_ERR_LOG_EN;
- d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
- d |= RETRY_RD_ERR_LOG_EN;
-
- if (offsets_demand2) {
- d2 &= ~RETRY_RD_ERR_LOG_UC;
- d2 |= RETRY_RD_ERR_LOG_NOOVER;
- d2 |= RETRY_RD_ERR_LOG_EN;
- }
+ /* Save default configurations. */
+ *rrl_ctl = v;
+ v &= ~rrl->uc_mask;
+
+ if (first)
+ v |= rrl->noover_mask;
+ else
+ v &= ~rrl->noover_mask;
+
+ if (scrub)
+ v |= rrl->en_patspr_mask;
+ else
+ v &= ~rrl->en_patspr_mask;
+
+ v |= rrl->en_mask;
} else {
- /* Restore default configurations */
- if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
- s |= RETRY_RD_ERR_LOG_UC;
- if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER)
- s |= RETRY_RD_ERR_LOG_NOOVER;
- if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN))
- s &= ~RETRY_RD_ERR_LOG_EN;
- if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC)
- d |= RETRY_RD_ERR_LOG_UC;
- if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER)
- d |= RETRY_RD_ERR_LOG_NOOVER;
- if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
- d &= ~RETRY_RD_ERR_LOG_EN;
-
- if (offsets_demand2) {
- if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC)
- d2 |= RETRY_RD_ERR_LOG_UC;
- if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER))
- d2 &= ~RETRY_RD_ERR_LOG_NOOVER;
- if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN))
- d2 &= ~RETRY_RD_ERR_LOG_EN;
+ /* Restore default configurations. */
+ if (*rrl_ctl & rrl->uc_mask)
+ v |= rrl->uc_mask;
+
+ if (first) {
+ if (!(*rrl_ctl & rrl->noover_mask))
+ v &= ~rrl->noover_mask;
+ } else {
+ if (*rrl_ctl & rrl->noover_mask)
+ v |= rrl->noover_mask;
}
+
+ if (scrub) {
+ if (!(*rrl_ctl & rrl->en_patspr_mask))
+ v &= ~rrl->en_patspr_mask;
+ } else {
+ if (*rrl_ctl & rrl->en_patspr_mask)
+ v |= rrl->en_patspr_mask;
+ }
+
+ if (!(*rrl_ctl & rrl->en_mask))
+ v &= ~rrl->en_mask;
}
- I10NM_SET_REG32(imc, chan, offsets_scrub[0], s);
- I10NM_SET_REG32(imc, chan, offsets_demand[0], d);
- if (offsets_demand2)
- I10NM_SET_REG32(imc, chan, offsets_demand2[0], d2);
+ write_imc_reg(imc, chan, offset, width, v);
+}
+
+static void enable_rrls(struct skx_imc *imc, int chan, struct reg_rrl *rrl,
+ bool enable, u32 *rrl_ctl)
+{
+ for (int i = 0; i < rrl->set_num; i++)
+ enable_rrl(imc, chan, rrl, i, enable, rrl_ctl + i);
+}
+
+static void enable_rrls_ddr(struct skx_imc *imc, bool enable)
+{
+ struct reg_rrl *rrl_ddr = res_cfg->reg_rrl_ddr;
+ int i, chan_num = res_cfg->ddr_chan_num;
+ struct skx_channel *chan = imc->chan;
+
+ if (!imc->mbase)
+ return;
+
+ for (i = 0; i < chan_num; i++)
+ enable_rrls(imc, i, rrl_ddr, enable, chan[i].rrl_ctl[0]);
+}
+
+static void enable_rrls_hbm(struct skx_imc *imc, bool enable)
+{
+ struct reg_rrl **rrl_hbm = res_cfg->reg_rrl_hbm;
+ int i, chan_num = res_cfg->hbm_chan_num;
+ struct skx_channel *chan = imc->chan;
+
+ if (!imc->mbase || !imc->hbm_mc || !rrl_hbm[0] || !rrl_hbm[1])
+ return;
+
+ for (i = 0; i < chan_num; i++) {
+ enable_rrls(imc, i, rrl_hbm[0], enable, chan[i].rrl_ctl[0]);
+ enable_rrls(imc, i, rrl_hbm[1], enable, chan[i].rrl_ctl[1]);
+ }
}
static void enable_retry_rd_err_log(bool enable)
{
- int i, j, imc_num, chan_num;
- struct skx_imc *imc;
struct skx_dev *d;
+ int i, imc_num;
edac_dbg(2, "\n");
list_for_each_entry(d, i10nm_edac_list, list) {
imc_num = res_cfg->ddr_imc_num;
- chan_num = res_cfg->ddr_chan_num;
-
- for (i = 0; i < imc_num; i++) {
- imc = &d->imc[i];
- if (!imc->mbase)
- continue;
-
- for (j = 0; j < chan_num; j++)
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub,
- res_cfg->offsets_demand,
- res_cfg->offsets_demand2);
- }
+ for (i = 0; i < imc_num; i++)
+ enable_rrls_ddr(&d->imc[i], enable);
imc_num += res_cfg->hbm_imc_num;
- chan_num = res_cfg->hbm_chan_num;
-
- for (; i < imc_num; i++) {
- imc = &d->imc[i];
- if (!imc->mbase || !imc->hbm_mc)
- continue;
-
- for (j = 0; j < chan_num; j++) {
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub_hbm0,
- res_cfg->offsets_demand_hbm0,
- NULL);
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub_hbm1,
- res_cfg->offsets_demand_hbm1,
- NULL);
- }
- }
+ for (; i < imc_num; i++)
+ enable_rrls_hbm(&d->imc[i], enable);
}
}
static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
int len, bool scrub_err)
{
+ int i, j, n, ch = res->channel, pch = res->cs & 1;
struct skx_imc *imc = &res->dev->imc[res->imc];
- u32 log0, log1, log2, log3, log4;
- u32 corr0, corr1, corr2, corr3;
- u32 lxg0, lxg1, lxg3, lxg4;
- u32 *xffsets = NULL;
- u64 log2a, log5;
- u64 lxg2a, lxg5;
- u32 *offsets;
- int n, pch;
+ u64 log, corr, status_mask;
+ struct reg_rrl *rrl;
+ bool scrub;
+ u32 offset;
+ u8 width;
if (!imc->mbase)
return;
- if (imc->hbm_mc) {
- pch = res->cs & 1;
+ rrl = imc->hbm_mc ? res_cfg->reg_rrl_hbm[pch] : res_cfg->reg_rrl_ddr;
- if (pch)
- offsets = scrub_err ? res_cfg->offsets_scrub_hbm1 :
- res_cfg->offsets_demand_hbm1;
- else
- offsets = scrub_err ? res_cfg->offsets_scrub_hbm0 :
- res_cfg->offsets_demand_hbm0;
- } else {
- if (scrub_err) {
- offsets = res_cfg->offsets_scrub;
- } else {
- offsets = res_cfg->offsets_demand;
- xffsets = res_cfg->offsets_demand2;
- }
- }
+ if (!rrl)
+ return;
- log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]);
- log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]);
- log3 = I10NM_GET_REG32(imc, res->channel, offsets[3]);
- log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]);
- log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]);
-
- if (xffsets) {
- lxg0 = I10NM_GET_REG32(imc, res->channel, xffsets[0]);
- lxg1 = I10NM_GET_REG32(imc, res->channel, xffsets[1]);
- lxg3 = I10NM_GET_REG32(imc, res->channel, xffsets[3]);
- lxg4 = I10NM_GET_REG32(imc, res->channel, xffsets[4]);
- lxg5 = I10NM_GET_REG64(imc, res->channel, xffsets[5]);
- }
+ status_mask = rrl->over_mask | rrl->uc_mask | rrl->v_mask;
- if (res_cfg->type == SPR) {
- log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]);
- n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx",
- log0, log1, log2a, log3, log4, log5);
+ n = snprintf(msg, len, " retry_rd_err_log[");
+ for (i = 0; i < rrl->set_num; i++) {
+ scrub = (rrl->modes[i] == FRE_SCRUB || rrl->modes[i] == LRE_SCRUB);
+ if (scrub_err != scrub)
+ continue;
- if (len - n > 0) {
- if (xffsets) {
- lxg2a = I10NM_GET_REG64(imc, res->channel, xffsets[2]);
- n += snprintf(msg + n, len - n, " %.8x %.8x %.16llx %.8x %.8x %.16llx]",
- lxg0, lxg1, lxg2a, lxg3, lxg4, lxg5);
- } else {
- n += snprintf(msg + n, len - n, "]");
- }
- }
- } else {
- log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]);
- n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]",
- log0, log1, log2, log3, log4, log5);
- }
+ for (j = 0; j < rrl->reg_num && len - n > 0; j++) {
+ offset = rrl->offsets[i][j];
+ width = rrl->widths[j];
+ log = read_imc_reg(imc, ch, offset, width);
- if (imc->hbm_mc) {
- if (pch) {
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x2c18);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x2c1c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x2c20);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x2c24);
- } else {
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x2818);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x281c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x2820);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x2824);
+ if (width == 4)
+ n += snprintf(msg + n, len - n, "%.8llx ", log);
+ else
+ n += snprintf(msg + n, len - n, "%.16llx ", log);
+
+ /* Clear RRL status if RRL in Linux control mode. */
+ if (retry_rd_err_log == 2 && !j && (log & status_mask))
+ write_imc_reg(imc, ch, offset, width, log & ~status_mask);
}
- } else {
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
}
- if (len - n > 0)
- snprintf(msg + n, len - n,
- " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
- corr0 & 0xffff, corr0 >> 16,
- corr1 & 0xffff, corr1 >> 16,
- corr2 & 0xffff, corr2 >> 16,
- corr3 & 0xffff, corr3 >> 16);
-
- /* Clear status bits */
- if (retry_rd_err_log == 2) {
- if (log0 & RETRY_RD_ERR_LOG_OVER_UC_V) {
- log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
- I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
+ /* Move back one space. */
+ n--;
+ n += snprintf(msg + n, len - n, "]");
+
+ if (len - n > 0) {
+ n += snprintf(msg + n, len - n, " correrrcnt[");
+ for (i = 0; i < rrl->cecnt_num && len - n > 0; i++) {
+ offset = rrl->cecnt_offsets[i];
+ width = rrl->cecnt_widths[i];
+ corr = read_imc_reg(imc, ch, offset, width);
+
+ /* CPUs {ICX,SPR} encode two counters per 4-byte CORRERRCNT register. */
+ if (res_cfg->type <= SPR) {
+ n += snprintf(msg + n, len - n, "%.4llx %.4llx ",
+ corr & 0xffff, corr >> 16);
+ } else {
+ /* CPUs {GNR} encode one counter per CORRERRCNT register. */
+ if (width == 4)
+ n += snprintf(msg + n, len - n, "%.8llx ", corr);
+ else
+ n += snprintf(msg + n, len - n, "%.16llx ", corr);
+ }
}
- if (xffsets && (lxg0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
- lxg0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
- I10NM_SET_REG32(imc, res->channel, xffsets[0], lxg0);
- }
+ /* Move back one space. */
+ n--;
+ n += snprintf(msg + n, len - n, "]");
}
}
@@ -870,8 +956,7 @@ static struct res_config i10nm_cfg0 = {
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
- .offsets_scrub = offsets_scrub_icx,
- .offsets_demand = offsets_demand_icx,
+ .reg_rrl_ddr = &icx_reg_rrl_ddr,
};
static struct res_config i10nm_cfg1 = {
@@ -889,8 +974,7 @@ static struct res_config i10nm_cfg1 = {
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
- .offsets_scrub = offsets_scrub_icx,
- .offsets_demand = offsets_demand_icx,
+ .reg_rrl_ddr = &icx_reg_rrl_ddr,
};
static struct res_config spr_cfg = {
@@ -913,13 +997,9 @@ static struct res_config spr_cfg = {
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x300,
- .offsets_scrub = offsets_scrub_spr,
- .offsets_scrub_hbm0 = offsets_scrub_spr_hbm0,
- .offsets_scrub_hbm1 = offsets_scrub_spr_hbm1,
- .offsets_demand = offsets_demand_spr,
- .offsets_demand2 = offsets_demand2_spr,
- .offsets_demand_hbm0 = offsets_demand_spr_hbm0,
- .offsets_demand_hbm1 = offsets_demand_spr_hbm1,
+ .reg_rrl_ddr = &spr_reg_rrl_ddr,
+ .reg_rrl_hbm[0] = &spr_reg_rrl_hbm_pch0,
+ .reg_rrl_hbm[1] = &spr_reg_rrl_hbm_pch1,
};
static struct res_config gnr_cfg = {
@@ -937,6 +1017,7 @@ static struct res_config gnr_cfg = {
.uracu_bdf = {0, 0, 1},
.ddr_mdev_bdf = {0, 5, 1},
.sad_all_offset = 0x300,
+ .reg_rrl_ddr = &gnr_reg_rrl_ddr,
};
static const struct x86_cpu_id i10nm_cpuids[] = {
@@ -1108,7 +1189,7 @@ static int __init i10nm_init(void)
mce_register_decode_chain(&i10nm_mce_dec);
skx_setup_debug("i10nm_test");
- if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+ if (retry_rd_err_log && res_cfg->reg_rrl_ddr) {
skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log);
if (retry_rd_err_log == 2)
enable_retry_rd_err_log(true);
@@ -1128,7 +1209,7 @@ static void __exit i10nm_exit(void)
{
edac_dbg(2, "\n");
- if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+ if (retry_rd_err_log && res_cfg->reg_rrl_ddr) {
skx_set_decode(NULL, NULL);
if (retry_rd_err_log == 2)
enable_retry_rd_err_log(false);
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 204834149579..a53612be4b2f 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -52,6 +52,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/mce.h>
+#include <asm/msr.h>
#include "edac_module.h"
#define EDAC_MOD_STR "ie31200_edac"
@@ -89,6 +90,10 @@
#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1 0xa703
#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2 0x4640
#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3 0x4630
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4 0xa700
+
+/* Alder Lake-S */
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1 0x4660
#define IE31200_RANKS_PER_CHANNEL 8
#define IE31200_DIMMS_PER_CHANNEL 2
@@ -734,6 +739,8 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1), (kernel_ulong_t)&rpl_s_cfg},
{ 0, } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 5807517ee32d..1930dc00c791 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -127,6 +127,7 @@
static const struct res_config {
bool machine_check;
+ /* The number of present memory controllers. */
int num_imc;
u32 imc_base;
u32 cmf_base;
@@ -240,6 +241,12 @@ static struct work_struct ecclog_work;
#define DID_ADL_N_SKU11 0x467c
#define DID_ADL_N_SKU12 0x4632
+/* Compute die IDs for Arizona Beach with IBECC */
+#define DID_AZB_SKU1 0x4676
+
+/* Compute did IDs for Amston Lake with IBECC */
+#define DID_ASL_SKU1 0x464a
+
/* Compute die IDs for Raptor Lake-P with IBECC */
#define DID_RPL_P_SKU1 0xa706
#define DID_RPL_P_SKU2 0xa707
@@ -595,6 +602,8 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU10), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU11), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU12), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_AZB_SKU1), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ASL_SKU1), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU1), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU2), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU3), (kernel_ulong_t)&rpl_p_cfg },
@@ -1201,23 +1210,21 @@ static void igen6_check(struct mem_ctl_info *mci)
irq_work_queue(&ecclog_irq_work);
}
-static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
+/* Check whether the memory controller is absent. */
+static bool igen6_imc_absent(void __iomem *window)
+{
+ return readl(window + MAD_INTER_CHANNEL_OFFSET) == ~0;
+}
+
+static int igen6_register_mci(int mc, void __iomem *window, struct pci_dev *pdev)
{
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
struct igen6_imc *imc;
- void __iomem *window;
int rc;
edac_dbg(2, "\n");
- mchbar += mc * MCHBAR_SIZE;
- window = ioremap(mchbar, MCHBAR_SIZE);
- if (!window) {
- igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", mchbar);
- return -ENODEV;
- }
-
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = NUM_CHANNELS;
layers[0].is_virt_csrow = false;
@@ -1283,7 +1290,6 @@ fail3:
fail2:
edac_mc_free(mci);
fail:
- iounmap(window);
return rc;
}
@@ -1309,6 +1315,56 @@ static void igen6_unregister_mcis(void)
}
}
+static int igen6_register_mcis(struct pci_dev *pdev, u64 mchbar)
+{
+ void __iomem *window;
+ int lmc, pmc, rc;
+ u64 base;
+
+ for (lmc = 0, pmc = 0; pmc < NUM_IMC; pmc++) {
+ base = mchbar + pmc * MCHBAR_SIZE;
+ window = ioremap(base, MCHBAR_SIZE);
+ if (!window) {
+ igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx for mc%d\n", base, pmc);
+ rc = -ENOMEM;
+ goto out_unregister_mcis;
+ }
+
+ if (igen6_imc_absent(window)) {
+ iounmap(window);
+ edac_dbg(2, "Skip absent mc%d\n", pmc);
+ continue;
+ }
+
+ rc = igen6_register_mci(lmc, window, pdev);
+ if (rc)
+ goto out_iounmap;
+
+ /* Done, if all present MCs are detected and registered. */
+ if (++lmc >= res_cfg->num_imc)
+ break;
+ }
+
+ if (!lmc) {
+ igen6_printk(KERN_ERR, "No mc found.\n");
+ return -ENODEV;
+ }
+
+ if (lmc < res_cfg->num_imc)
+ igen6_printk(KERN_WARNING, "Expected %d mcs, but only %d detected.",
+ res_cfg->num_imc, lmc);
+
+ return 0;
+
+out_iounmap:
+ iounmap(window);
+
+out_unregister_mcis:
+ igen6_unregister_mcis();
+
+ return rc;
+}
+
static int igen6_mem_slice_setup(u64 mchbar)
{
struct igen6_imc *imc = &igen6_pvt->imc[0];
@@ -1405,7 +1461,7 @@ static void opstate_set(const struct res_config *cfg, const struct pci_device_id
static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u64 mchbar;
- int i, rc;
+ int rc;
edac_dbg(2, "\n");
@@ -1421,11 +1477,9 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
opstate_set(res_cfg, ent);
- for (i = 0; i < res_cfg->num_imc; i++) {
- rc = igen6_register_mci(i, mchbar, pdev);
- if (rc)
- goto fail2;
- }
+ rc = igen6_register_mcis(pdev, mchbar);
+ if (rc)
+ goto fail;
if (res_cfg->num_imc > 1) {
rc = igen6_mem_slice_setup(mchbar);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 50d74d3bf0f5..af3c12284a1e 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <asm/cpu.h>
+#include <asm/msr.h>
#include "mce_amd.h"
diff --git a/drivers/edac/mem_repair.c b/drivers/edac/mem_repair.c
index 3b1a845457b0..d1a8caa85369 100755
--- a/drivers/edac/mem_repair.c
+++ b/drivers/edac/mem_repair.c
@@ -45,6 +45,15 @@ struct edac_mem_repair_context {
struct attribute_group group;
};
+const char * const edac_repair_type[] = {
+ [EDAC_REPAIR_PPR] = "ppr",
+ [EDAC_REPAIR_CACHELINE_SPARING] = "cacheline-sparing",
+ [EDAC_REPAIR_ROW_SPARING] = "row-sparing",
+ [EDAC_REPAIR_BANK_SPARING] = "bank-sparing",
+ [EDAC_REPAIR_RANK_SPARING] = "rank-sparing",
+};
+EXPORT_SYMBOL_GPL(edac_repair_type);
+
#define TO_MR_DEV_ATTR(_dev_attr) \
container_of(_dev_attr, struct edac_mem_repair_dev_attr, dev_attr)
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index fa5b442b1844..c9ade45c1a99 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -116,6 +116,7 @@ EXPORT_SYMBOL_GPL(skx_adxl_get);
void skx_adxl_put(void)
{
+ adxl_component_count = 0;
kfree(adxl_values);
kfree(adxl_msg);
}
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index ca5408803f87..ec4966f7ea40 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -79,6 +79,47 @@
*/
#define MCACOD_EXT_MEM_ERR 0x280
+/* Max RRL register sets per {,sub-,pseudo-}channel. */
+#define NUM_RRL_SET 4
+/* Max RRL registers per set. */
+#define NUM_RRL_REG 6
+/* Max correctable error count registers. */
+#define NUM_CECNT_REG 8
+
+/* Modes of RRL register set. */
+enum rrl_mode {
+ /* Last read error from patrol scrub. */
+ LRE_SCRUB,
+ /* Last read error from demand. */
+ LRE_DEMAND,
+ /* First read error from patrol scrub. */
+ FRE_SCRUB,
+ /* First read error from demand. */
+ FRE_DEMAND,
+};
+
+/* RRL registers per {,sub-,pseudo-}channel. */
+struct reg_rrl {
+ /* RRL register parts. */
+ int set_num, reg_num;
+ enum rrl_mode modes[NUM_RRL_SET];
+ u32 offsets[NUM_RRL_SET][NUM_RRL_REG];
+ /* RRL register widths in byte per set. */
+ u8 widths[NUM_RRL_REG];
+ /* RRL control bits of the first register per set. */
+ u32 v_mask;
+ u32 uc_mask;
+ u32 over_mask;
+ u32 en_patspr_mask;
+ u32 noover_mask;
+ u32 en_mask;
+
+ /* CORRERRCNT register parts. */
+ int cecnt_num;
+ u32 cecnt_offsets[NUM_CECNT_REG];
+ u8 cecnt_widths[NUM_CECNT_REG];
+};
+
/*
* Each cpu socket contains some pci devices that provide global
* information, and also some that are local to each of the two
@@ -117,9 +158,11 @@ struct skx_dev {
struct skx_channel {
struct pci_dev *cdev;
struct pci_dev *edev;
- u32 retry_rd_err_log_s;
- u32 retry_rd_err_log_d;
- u32 retry_rd_err_log_d2;
+ /*
+ * Two groups of RRL control registers per channel to save default RRL
+ * settings of two {sub-,pseudo-}channels in Linux RRL control mode.
+ */
+ u32 rrl_ctl[2][NUM_RRL_SET];
struct skx_dimm {
u8 close_pg;
u8 bank_xor_enable;
@@ -232,14 +275,10 @@ struct res_config {
/* HBM mdev device BDF */
struct pci_bdf hbm_mdev_bdf;
int sad_all_offset;
- /* Offsets of retry_rd_err_log registers */
- u32 *offsets_scrub;
- u32 *offsets_scrub_hbm0;
- u32 *offsets_scrub_hbm1;
- u32 *offsets_demand;
- u32 *offsets_demand2;
- u32 *offsets_demand_hbm0;
- u32 *offsets_demand_hbm1;
+ /* RRL register sets per DDR channel */
+ struct reg_rrl *reg_rrl_ddr;
+ /* RRL register sets per HBM channel */
+ struct reg_rrl *reg_rrl_hbm[2];
};
typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 905c82e26ce7..a5f5e250223a 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -83,7 +83,7 @@ config FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
- depends on PCI && FIREWIRE && MMU
+ depends on PCI && FIREWIRE
help
Enable this driver if you have a FireWire controller based
on the OHCI specification. For all practical purposes, this
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index b0f9ef6ac6df..18cacb9edbbc 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -431,7 +431,7 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
fw_send_request(card, &t, tcode, destination_id, generation, speed,
offset, payload, length, transaction_callback, &d);
wait_for_completion(&d.done);
- destroy_timer_on_stack(&t.split_timeout_timer);
+ timer_destroy_on_stack(&t.split_timeout_timer);
return d.rcode;
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index aadc395ee168..bbd2155d8483 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -31,7 +31,6 @@ config ARM_SCPI_PROTOCOL
config ARM_SDE_INTERFACE
bool "ARM Software Delegated Exception Interface (SDEI)"
depends on ARM64
- depends on ACPI_APEI_GHES
help
The Software Delegated Exception Interface (SDEI) is an ARM
standard for registering callbacks from the platform firmware
@@ -268,6 +267,23 @@ config TURRIS_MOX_RWTM
other manufacturing data and also utilize the Entropy Bit Generator
for hardware random number generation.
+if TURRIS_MOX_RWTM
+
+config TURRIS_MOX_RWTM_KEYCTL
+ bool "Turris Mox rWTM ECDSA message signing"
+ default y
+ depends on KEYS
+ depends on ASYMMETRIC_KEY_TYPE
+ select CZNIC_PLATFORMS
+ select TURRIS_SIGNING_KEY
+ help
+ Say Y here to add support for ECDSA message signing with board private
+ key (each Turris Mox has an ECDSA private key generated in the secure
+ coprocessor when manufactured). This functionality is exposed via the
+ keyctl() syscall.
+
+endif # TURRIS_MOX_RWTM
+
source "drivers/firmware/arm_ffa/Kconfig"
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/cirrus/Kconfig"
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 19295282de24..fe55613a8ea9 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -299,7 +299,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid);
}
- ffa_rx_release();
+ if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY))
+ ffa_rx_release();
mutex_unlock(&drv_info->rx_lock);
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index dabd874641d0..e3fb36825978 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -69,6 +69,19 @@ config ARM_SCMI_DEBUG_COUNTERS
such useful debug counters. This can be helpful for debugging and
SCMI monitoring.
+config ARM_SCMI_QUIRKS
+ bool "Enable SCMI Quirks framework"
+ depends on JUMP_LABEL || COMPILE_TEST
+ default y
+ help
+ Enables support for SCMI Quirks framework to workaround SCMI platform
+ firmware bugs on system already deployed in the wild.
+
+ The framework allows the definition of platform-specific code quirks
+ that will be associated and enabled only on the desired platforms
+ depending on the SCMI firmware advertised versions and/or machine
+ compatibles.
+
source "drivers/firmware/arm_scmi/transports/Kconfig"
source "drivers/firmware/arm_scmi/vendors/imx/Kconfig"
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 9ac81adff567..780cd62b2f78 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -3,6 +3,7 @@ scmi-bus-y = bus.o
scmi-core-objs := $(scmi-bus-y)
scmi-driver-y = driver.o notify.o
+scmi-driver-$(CONFIG_ARM_SCMI_QUIRKS) += quirks.o
scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 7af01664ce7e..1adef0389475 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -201,60 +201,59 @@ scmi_protocol_table_unregister(const struct scmi_device_id *id_table)
scmi_protocol_device_unrequest(entry);
}
-static const struct scmi_device_id *
-scmi_dev_match_id(struct scmi_device *scmi_dev, const struct scmi_driver *scmi_drv)
+static int scmi_dev_match_by_id_table(struct scmi_device *scmi_dev,
+ const struct scmi_device_id *id_table)
{
- const struct scmi_device_id *id = scmi_drv->id_table;
-
- if (!id)
- return NULL;
-
- for (; id->protocol_id; id++)
- if (id->protocol_id == scmi_dev->protocol_id) {
- if (!id->name)
- return id;
- else if (!strcmp(id->name, scmi_dev->name))
- return id;
- }
+ if (!id_table || !id_table->name)
+ return 0;
+
+ /* Always skip transport devices from matching */
+ for (; id_table->protocol_id && id_table->name; id_table++)
+ if (id_table->protocol_id == scmi_dev->protocol_id &&
+ strncmp(scmi_dev->name, "__scmi_transport_device", 23) &&
+ !strcmp(id_table->name, scmi_dev->name))
+ return 1;
+ return 0;
+}
- return NULL;
+static int scmi_dev_match_id(struct scmi_device *scmi_dev,
+ const struct scmi_driver *scmi_drv)
+{
+ return scmi_dev_match_by_id_table(scmi_dev, scmi_drv->id_table);
}
static int scmi_dev_match(struct device *dev, const struct device_driver *drv)
{
const struct scmi_driver *scmi_drv = to_scmi_driver(drv);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
- const struct scmi_device_id *id;
- id = scmi_dev_match_id(scmi_dev, scmi_drv);
- if (id)
- return 1;
-
- return 0;
+ return scmi_dev_match_id(scmi_dev, scmi_drv);
}
static int scmi_match_by_id_table(struct device *dev, const void *data)
{
- struct scmi_device *sdev = to_scmi_dev(dev);
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
const struct scmi_device_id *id_table = data;
- return sdev->protocol_id == id_table->protocol_id &&
- (id_table->name && !strcmp(sdev->name, id_table->name));
+ return scmi_dev_match_by_id_table(scmi_dev, id_table);
}
static struct scmi_device *scmi_child_dev_find(struct device *parent,
int prot_id, const char *name)
{
- struct scmi_device_id id_table;
+ struct scmi_device_id id_table[2] = { 0 };
struct device *dev;
- id_table.protocol_id = prot_id;
- id_table.name = name;
+ id_table[0].protocol_id = prot_id;
+ id_table[0].name = name;
dev = device_find_child(parent, &id_table, scmi_match_by_id_table);
if (!dev)
return NULL;
+ /* Drop the refcnt bumped implicitly by device_find_child */
+ put_device(dev);
+
return to_scmi_dev(dev);
}
@@ -460,6 +459,20 @@ put_dev:
return NULL;
}
+static struct scmi_device *
+_scmi_device_create(struct device_node *np, struct device *parent,
+ int protocol, const char *name)
+{
+ struct scmi_device *sdev;
+
+ sdev = __scmi_device_create(np, parent, protocol, name);
+ if (!sdev)
+ pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
+ of_node_full_name(parent->of_node), protocol, name);
+
+ return sdev;
+}
+
/**
* scmi_device_create - A method to create one or more SCMI devices
*
@@ -492,7 +505,7 @@ struct scmi_device *scmi_device_create(struct device_node *np,
struct scmi_device *scmi_dev = NULL;
if (name)
- return __scmi_device_create(np, parent, protocol, name);
+ return _scmi_device_create(np, parent, protocol, name);
mutex_lock(&scmi_requested_devices_mtx);
phead = idr_find(&scmi_requested_devices, protocol);
@@ -506,18 +519,13 @@ struct scmi_device *scmi_device_create(struct device_node *np,
list_for_each_entry(rdev, phead, node) {
struct scmi_device *sdev;
- sdev = __scmi_device_create(np, parent,
- rdev->id_table->protocol_id,
- rdev->id_table->name);
- /* Report errors and carry on... */
+ sdev = _scmi_device_create(np, parent,
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
if (sdev)
scmi_dev = sdev;
- else
- pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node),
- rdev->id_table->protocol_id,
- rdev->id_table->name);
}
+
mutex_unlock(&scmi_requested_devices_mtx);
return scmi_dev;
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 2ed2279388f0..afa7981efe82 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -11,6 +11,7 @@
#include "protocols.h"
#include "notify.h"
+#include "quirks.h"
/* Updated only after ALL the mandatory features for that version are merged */
#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000
@@ -429,6 +430,23 @@ static void iter_clk_describe_prepare_message(void *message,
msg->rate_index = cpu_to_le32(desc_index);
}
+#define QUIRK_OUT_OF_SPEC_TRIPLET \
+ ({ \
+ /* \
+ * A known quirk: a triplet is returned but num_returned != 3 \
+ * Check for a safe payload size and fix. \
+ */ \
+ if (st->num_returned != 3 && st->num_remaining == 0 && \
+ st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { \
+ st->num_returned = 3; \
+ st->num_remaining = 0; \
+ } else { \
+ dev_err(p->dev, \
+ "Cannot fix out-of-spec reply !\n"); \
+ return -EPROTO; \
+ } \
+ })
+
static int
iter_clk_describe_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
@@ -450,19 +468,8 @@ iter_clk_describe_update_state(struct scmi_iterator_state *st,
p->clk->name, st->num_returned, st->num_remaining,
st->rx_len);
- /*
- * A known quirk: a triplet is returned but num_returned != 3
- * Check for a safe payload size and fix.
- */
- if (st->num_returned != 3 && st->num_remaining == 0 &&
- st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
- st->num_returned = 3;
- st->num_remaining = 0;
- } else {
- dev_err(p->dev,
- "Cannot fix out-of-spec reply !\n");
- return -EPROTO;
- }
+ SCMI_QUIRK(clock_rates_triplet_out_of_spec,
+ QUIRK_OUT_OF_SPEC_TRIPLET);
}
return 0;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 10ea7962323e..dab758c5fdea 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -475,6 +475,7 @@ static int __tag##_probe(struct platform_device *pdev) \
if (ret) \
goto err; \
\
+ spdev->dev.parent = dev; \
ret = platform_device_add(spdev); \
if (ret) \
goto err; \
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 1c75a4c9c371..395fe9289035 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -11,7 +11,7 @@
* various power domain DVFS including the core/cluster, certain system
* clocks configuration, thermal sensors and many others.
*
- * Copyright (C) 2018-2024 ARM Ltd.
+ * Copyright (C) 2018-2025 ARM Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -38,6 +38,7 @@
#include "common.h"
#include "notify.h"
+#include "quirks.h"
#include "raw_mode.h"
@@ -439,14 +440,8 @@ static void scmi_create_protocol_devices(struct device_node *np,
struct scmi_info *info,
int prot_id, const char *name)
{
- struct scmi_device *sdev;
-
mutex_lock(&info->devreq_mtx);
- sdev = scmi_device_create(np, info->dev, prot_id, name);
- if (name && !sdev)
- dev_err(info->dev,
- "failed to create device for protocol 0x%X (%s)\n",
- prot_id, name);
+ scmi_device_create(np, info->dev, prot_id, name);
mutex_unlock(&info->devreq_mtx);
}
@@ -1190,7 +1185,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
* RX path since it will be already queued at the end of the TX
* poll loop.
*/
- if (!xfer->hdr.poll_completion)
+ if (!xfer->hdr.poll_completion ||
+ xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
scmi_raw_message_report(info->raw, xfer,
SCMI_RAW_REPLY_QUEUE,
cinfo->id);
@@ -1248,7 +1244,8 @@ static void xfer_put(const struct scmi_protocol_handle *ph,
}
static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
- struct scmi_xfer *xfer, ktime_t stop)
+ struct scmi_xfer *xfer, ktime_t stop,
+ bool *ooo)
{
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
@@ -1257,7 +1254,7 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
* in case of out-of-order receptions of delayed responses
*/
return info->desc->ops->poll_done(cinfo, xfer) ||
- try_wait_for_completion(&xfer->done) ||
+ (*ooo = try_wait_for_completion(&xfer->done)) ||
ktime_after(ktime_get(), stop);
}
@@ -1274,15 +1271,17 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
* itself to support synchronous commands replies.
*/
if (!desc->sync_cmds_completed_on_ret) {
+ bool ooo = false;
+
/*
* Poll on xfer using transport provided .poll_done();
* assumes no completion interrupt was available.
*/
ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
- spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
- xfer, stop));
- if (ktime_after(ktime_get(), stop)) {
+ spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer,
+ stop, &ooo));
+ if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) {
dev_err(dev,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
@@ -1735,6 +1734,39 @@ static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
}
/**
+ * scmi_protocol_msg_check - Check protocol message attributes
+ *
+ * @ph: A reference to the protocol handle.
+ * @message_id: The ID of the message to check.
+ * @attributes: A parameter to optionally return the retrieved message
+ * attributes, in case of Success.
+ *
+ * An helper to check protocol message attributes for a specific protocol
+ * and message pair.
+ *
+ * Return: 0 on SUCCESS
+ */
+static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
+ u32 message_id, u32 *attributes)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
+ sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(message_id, t->tx.buf);
+ ret = do_xfer(ph, t);
+ if (!ret && attributes)
+ *attributes = get_unaligned_le32(t->rx.buf);
+ xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
* struct scmi_iterator - Iterator descriptor
* @msg: A reference to the message TX buffer; filled by @prepare_message with
* a proper custom command payload for each multi-part command request.
@@ -1866,6 +1898,13 @@ struct scmi_msg_resp_desc_fc {
__le32 db_preserve_hmask;
};
+#define QUIRK_PERF_FC_FORCE \
+ ({ \
+ if (pi->proto->id == SCMI_PROTOCOL_PERF && \
+ message_id == 0x8 /* PERF_LEVEL_GET */) \
+ attributes |= BIT(0); \
+ })
+
static void
scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
u8 describe_id, u32 message_id, u32 valid_size,
@@ -1875,6 +1914,7 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
int ret;
u32 flags;
u64 phys_addr;
+ u32 attributes;
u8 size;
void __iomem *addr;
struct scmi_xfer *t;
@@ -1883,6 +1923,16 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
struct scmi_msg_resp_desc_fc *resp;
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ /* Check if the MSG_ID supports fastchannel */
+ ret = scmi_protocol_msg_check(ph, message_id, &attributes);
+ SCMI_QUIRK(perf_level_get_fc_force, QUIRK_PERF_FC_FORCE);
+ if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) {
+ dev_dbg(ph->dev,
+ "Skip FC init for 0x%02X/%d domain:%d - ret:%d\n",
+ pi->proto->id, message_id, domain, ret);
+ return;
+ }
+
if (!p_addr) {
ret = -EINVAL;
goto err_out;
@@ -2000,39 +2050,6 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
SCMI_PROTO_FC_RING_DB(64);
}
-/**
- * scmi_protocol_msg_check - Check protocol message attributes
- *
- * @ph: A reference to the protocol handle.
- * @message_id: The ID of the message to check.
- * @attributes: A parameter to optionally return the retrieved message
- * attributes, in case of Success.
- *
- * An helper to check protocol message attributes for a specific protocol
- * and message pair.
- *
- * Return: 0 on SUCCESS
- */
-static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
- u32 message_id, u32 *attributes)
-{
- int ret;
- struct scmi_xfer *t;
-
- ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
- sizeof(__le32), 0, &t);
- if (ret)
- return ret;
-
- put_unaligned_le32(message_id, t->tx.buf);
- ret = do_xfer(ph, t);
- if (!ret && attributes)
- *attributes = get_unaligned_le32(t->rx.buf);
- xfer_put(ph, t);
-
- return ret;
-}
-
static const struct scmi_proto_helpers_ops helpers_ops = {
.extended_name_get = scmi_common_extended_name_get,
.get_max_msg_size = scmi_common_get_max_msg_size,
@@ -2825,9 +2842,8 @@ static int scmi_bus_notifier(struct notifier_block *nb,
struct scmi_info *info = bus_nb_to_scmi_info(nb);
struct scmi_device *sdev = to_scmi_dev(data);
- /* Skip transport devices and devices of different SCMI instances */
- if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
- sdev->dev.parent != info->dev)
+ /* Skip devices of different SCMI instances */
+ if (sdev->dev.parent != info->dev)
return NOTIFY_DONE;
switch (action) {
@@ -3098,6 +3114,18 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
return &trans->desc;
}
+static void scmi_enable_matching_quirks(struct scmi_info *info)
+{
+ struct scmi_revision_info *rev = &info->version;
+
+ dev_dbg(info->dev, "Looking for quirks matching: %s/%s/0x%08X\n",
+ rev->vendor_id, rev->sub_vendor_id, rev->impl_ver);
+
+ /* Enable applicable quirks */
+ scmi_quirks_enable(info->dev, rev->vendor_id,
+ rev->sub_vendor_id, rev->impl_ver);
+}
+
static int scmi_probe(struct platform_device *pdev)
{
int ret;
@@ -3219,6 +3247,8 @@ static int scmi_probe(struct platform_device *pdev)
list_add_tail(&info->node, &scmi_list);
mutex_unlock(&scmi_list_mutex);
+ scmi_enable_matching_quirks(info);
+
for_each_available_child_of_node(np, child) {
u32 prot_id;
@@ -3377,6 +3407,8 @@ static struct dentry *scmi_debugfs_init(void)
static int __init scmi_driver_init(void)
{
+ scmi_quirks_initialize();
+
/* Bail out if no SCMI transport was configured */
if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
return -EINVAL;
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index aaee57cdcd55..d62c4469d1fd 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -31,6 +31,8 @@
#define SCMI_PROTOCOL_VENDOR_BASE 0x80
+#define MSG_SUPPORTS_FASTCHANNEL(x) ((x) & BIT(0))
+
enum scmi_common_cmd {
PROTOCOL_VERSION = 0x0,
PROTOCOL_ATTRIBUTES = 0x1,
diff --git a/drivers/firmware/arm_scmi/quirks.c b/drivers/firmware/arm_scmi/quirks.c
new file mode 100644
index 000000000000..03960aca3610
--- /dev/null
+++ b/drivers/firmware/arm_scmi/quirks.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol Quirks
+ *
+ * Copyright (C) 2025 ARM Ltd.
+ */
+
+/**
+ * DOC: Theory of operation
+ *
+ * A framework to define SCMI quirks and their activation conditions based on
+ * existing static_keys kernel facilities.
+ *
+ * Quirks are named and their activation conditions defined using the macro
+ * DEFINE_SCMI_QUIRK() in this file.
+ *
+ * After a quirk is defined, a corresponding entry must also be added to the
+ * global @scmi_quirks_table in this file using __DECLARE_SCMI_QUIRK_ENTRY().
+ *
+ * Additionally a corresponding quirk declaration must be added also to the
+ * quirk.h file using DECLARE_SCMI_QUIRK().
+ *
+ * The needed quirk code-snippet itself will be defined local to the SCMI code
+ * that is meant to fix and will be associated to the previously defined quirk
+ * and related activation conditions using the macro SCMI_QUIRK().
+ *
+ * At runtime, during the SCMI stack probe sequence, once the SCMI Server had
+ * advertised the running platform Vendor, SubVendor and Implementation Version
+ * data, all the defined quirks matching the activation conditions will be
+ * enabled.
+ *
+ * Example
+ *
+ * quirk.c
+ * -------
+ * DEFINE_SCMI_QUIRK(fix_me, "vendor", "subvend", "0x12000-0x30000",
+ * "someone,plat_A", "another,plat_b", "vend,sku");
+ *
+ * static struct scmi_quirk *scmi_quirks_table[] = {
+ * ...
+ * __DECLARE_SCMI_QUIRK_ENTRY(fix_me),
+ * NULL
+ * };
+ *
+ * quirk.h
+ * -------
+ * DECLARE_SCMI_QUIRK(fix_me);
+ *
+ * <somewhere_in_the_scmi_stack.c>
+ * ------------------------------
+ *
+ * #define QUIRK_CODE_SNIPPET_FIX_ME() \
+ * ({ \
+ * if (p->condition) \
+ * a_ptr->calculated_val = 123; \
+ * })
+ *
+ *
+ * int some_function_to_fix(int param, struct something *p)
+ * {
+ * struct some_strut *a_ptr;
+ *
+ * a_ptr = some_load_func(p);
+ * SCMI_QUIRK(fix_me, QUIRK_CODE_SNIPPET_FIX_ME);
+ * some_more_func(a_ptr);
+ * ...
+ *
+ * return 0;
+ * }
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/hashtable.h>
+#include <linux/kstrtox.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/static_key.h>
+#include <linux/string.h>
+#include <linux/stringhash.h>
+#include <linux/types.h>
+
+#include "quirks.h"
+
+#define SCMI_QUIRKS_HT_SZ 4
+
+struct scmi_quirk {
+ bool enabled;
+ const char *name;
+ char *vendor;
+ char *sub_vendor_id;
+ char *impl_ver_range;
+ u32 start_range;
+ u32 end_range;
+ struct static_key_false *key;
+ struct hlist_node hash;
+ unsigned int hkey;
+ const char *const compats[];
+};
+
+#define __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ...) \
+ static struct scmi_quirk scmi_quirk_entry_ ## _qn = { \
+ .name = __stringify(quirk_ ## _qn), \
+ .vendor = _ven, \
+ .sub_vendor_id = _sub, \
+ .impl_ver_range = _impl, \
+ .key = &(scmi_quirk_ ## _qn), \
+ .compats = { __VA_ARGS__ __VA_OPT__(,) NULL }, \
+ }
+
+#define __DECLARE_SCMI_QUIRK_ENTRY(_qn) (&(scmi_quirk_entry_ ## _qn))
+
+/*
+ * Define a quirk by name and provide the matching tokens where:
+ *
+ * _qn: A string which will be used to build the quirk and the global
+ * static_key names.
+ * _ven : SCMI Vendor ID string match, NULL means any.
+ * _sub : SCMI SubVendor ID string match, NULL means any.
+ * _impl : SCMI Implementation Version string match, NULL means any.
+ * This string can be used to express version ranges which will be
+ * interpreted as follows:
+ *
+ * NULL [0, 0xFFFFFFFF]
+ * "X" [X, X]
+ * "X-" [X, 0xFFFFFFFF]
+ * "-X" [0, X]
+ * "X-Y" [X, Y]
+ *
+ * with X <= Y and <v> in [X, Y] meaning X <= <v> <= Y
+ *
+ * ... : An optional variadic macros argument used to provide a comma-separated
+ * list of compatible strings matches; when no variadic argument is
+ * provided, ANY compatible will match this quirk.
+ *
+ * This implicitly define also a properly named global static-key that
+ * will be used to dynamically enable the quirk at initialization time.
+ *
+ * Note that it is possible to associate multiple quirks to the same
+ * matching pattern, if your firmware quality is really astounding :P
+ *
+ * Example:
+ *
+ * Compatibles list NOT provided, so ANY compatible will match:
+ *
+ * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000");
+ *
+ *
+ * A few compatibles provided to match against:
+ *
+ * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000",
+ * "xvend,plat_a", "xvend,plat_b", "xvend,sku_name");
+ */
+#define DEFINE_SCMI_QUIRK(_qn, _ven, _sub, _impl, ...) \
+ DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \
+ __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__)
+
+/*
+ * Same as DEFINE_SCMI_QUIRK but EXPORTED: this is meant to address quirks
+ * that possibly reside in code that is included in loadable kernel modules
+ * that needs to be able to access the global static keys at runtime to
+ * determine if enabled or not. (see SCMI_QUIRK to understand usage)
+ */
+#define DEFINE_SCMI_QUIRK_EXPORTED(_qn, _ven, _sub, _impl, ...) \
+ DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \
+ EXPORT_SYMBOL_GPL(scmi_quirk_ ## _qn); \
+ __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__)
+
+/* Global Quirks Definitions */
+DEFINE_SCMI_QUIRK(clock_rates_triplet_out_of_spec, NULL, NULL, NULL);
+DEFINE_SCMI_QUIRK(perf_level_get_fc_force, "Qualcomm", NULL, "0x20000-");
+
+/*
+ * Quirks Pointers Array
+ *
+ * This is filled at compile-time with the list of pointers to all the currently
+ * defined quirks descriptors.
+ */
+static struct scmi_quirk *scmi_quirks_table[] = {
+ __DECLARE_SCMI_QUIRK_ENTRY(clock_rates_triplet_out_of_spec),
+ __DECLARE_SCMI_QUIRK_ENTRY(perf_level_get_fc_force),
+ NULL
+};
+
+/*
+ * Quirks HashTable
+ *
+ * A run-time populated hashtable containing all the defined quirks descriptors
+ * hashed by matching pattern.
+ */
+static DEFINE_READ_MOSTLY_HASHTABLE(scmi_quirks_ht, SCMI_QUIRKS_HT_SZ);
+
+static unsigned int scmi_quirk_signature(const char *vend, const char *sub_vend)
+{
+ char *signature, *p;
+ unsigned int hash32;
+ unsigned long hash = 0;
+
+ /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */
+ signature = kasprintf(GFP_KERNEL, "|%s|%s|", vend ?: "", sub_vend ?: "");
+ if (!signature)
+ return 0;
+
+ pr_debug("SCMI Quirk Signature >>>%s<<<\n", signature);
+
+ p = signature;
+ while (*p)
+ hash = partial_name_hash(tolower(*p++), hash);
+ hash32 = end_name_hash(hash);
+
+ kfree(signature);
+
+ return hash32;
+}
+
+static int scmi_quirk_range_parse(struct scmi_quirk *quirk)
+{
+ const char *last, *first = quirk->impl_ver_range;
+ size_t len;
+ char *sep;
+ int ret;
+
+ quirk->start_range = 0;
+ quirk->end_range = 0xFFFFFFFF;
+ len = quirk->impl_ver_range ? strlen(quirk->impl_ver_range) : 0;
+ if (!len)
+ return 0;
+
+ last = first + len - 1;
+ sep = strchr(quirk->impl_ver_range, '-');
+ if (sep)
+ *sep = '\0';
+
+ if (sep == first) /* -X */
+ ret = kstrtouint(first + 1, 0, &quirk->end_range);
+ else /* X OR X- OR X-y */
+ ret = kstrtouint(first, 0, &quirk->start_range);
+ if (ret)
+ return ret;
+
+ if (!sep)
+ quirk->end_range = quirk->start_range;
+ else if (sep != last) /* x-Y */
+ ret = kstrtouint(sep + 1, 0, &quirk->end_range);
+
+ if (quirk->start_range > quirk->end_range)
+ return -EINVAL;
+
+ return ret;
+}
+
+void scmi_quirks_initialize(void)
+{
+ struct scmi_quirk *quirk;
+ int i;
+
+ for (i = 0, quirk = scmi_quirks_table[0]; quirk;
+ i++, quirk = scmi_quirks_table[i]) {
+ int ret;
+
+ ret = scmi_quirk_range_parse(quirk);
+ if (ret) {
+ pr_err("SCMI skip QUIRK [%s] - BAD RANGE - |%s|\n",
+ quirk->name, quirk->impl_ver_range);
+ continue;
+ }
+ quirk->hkey = scmi_quirk_signature(quirk->vendor,
+ quirk->sub_vendor_id);
+
+ hash_add(scmi_quirks_ht, &quirk->hash, quirk->hkey);
+
+ pr_debug("Registered SCMI QUIRK [%s] -- %p - Key [0x%08X] - %s/%s/[0x%08X-0x%08X]\n",
+ quirk->name, quirk, quirk->hkey,
+ quirk->vendor, quirk->sub_vendor_id,
+ quirk->start_range, quirk->end_range);
+ }
+
+ pr_debug("SCMI Quirks initialized\n");
+}
+
+void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *subv, const u32 impl)
+{
+ for (int i = 3; i >= 0; i--) {
+ struct scmi_quirk *quirk;
+ unsigned int hkey;
+
+ hkey = scmi_quirk_signature(i > 1 ? vend : NULL,
+ i > 2 ? subv : NULL);
+
+ /*
+ * Note that there could be multiple matches so we
+ * will enable multiple quirk part of a hash collision
+ * domain...BUT we cannot assume that ALL quirks on the
+ * same collision domain are a full match.
+ */
+ hash_for_each_possible(scmi_quirks_ht, quirk, hash, hkey) {
+ if (quirk->enabled || quirk->hkey != hkey ||
+ impl < quirk->start_range ||
+ impl > quirk->end_range)
+ continue;
+
+ if (quirk->compats[0] &&
+ !of_machine_compatible_match(quirk->compats))
+ continue;
+
+ dev_info(dev, "Enabling SCMI Quirk [%s]\n",
+ quirk->name);
+
+ dev_dbg(dev,
+ "Quirk matched on: %s/%s/%s/[0x%08X-0x%08X]\n",
+ quirk->compats[0], quirk->vendor,
+ quirk->sub_vendor_id,
+ quirk->start_range, quirk->end_range);
+
+ static_branch_enable(quirk->key);
+ quirk->enabled = true;
+ }
+ }
+}
diff --git a/drivers/firmware/arm_scmi/quirks.h b/drivers/firmware/arm_scmi/quirks.h
new file mode 100644
index 000000000000..a71fde85a527
--- /dev/null
+++ b/drivers/firmware/arm_scmi/quirks.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol Quirks
+ *
+ * Copyright (C) 2025 ARM Ltd.
+ */
+#ifndef _SCMI_QUIRKS_H
+#define _SCMI_QUIRKS_H
+
+#include <linux/static_key.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_ARM_SCMI_QUIRKS
+
+#define DECLARE_SCMI_QUIRK(_qn) \
+ DECLARE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn)
+
+/*
+ * A helper to associate the actual code snippet to use as a quirk
+ * named as _qn.
+ */
+#define SCMI_QUIRK(_qn, _blk) \
+ do { \
+ if (static_branch_unlikely(&(scmi_quirk_ ## _qn))) \
+ (_blk); \
+ } while (0)
+
+void scmi_quirks_initialize(void);
+void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *subv, const u32 impl);
+
+#else
+
+#define DECLARE_SCMI_QUIRK(_qn)
+/* Force quirks compilation even when SCMI Quirks are disabled */
+#define SCMI_QUIRK(_qn, _blk) \
+ do { \
+ if (0) \
+ (_blk); \
+ } while (0)
+
+static inline void scmi_quirks_initialize(void) { }
+static inline void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *sub_vend, const u32 impl) { }
+
+#endif /* CONFIG_ARM_SCMI_QUIRKS */
+
+/* Quirk delarations */
+DECLARE_SCMI_QUIRK(clock_rates_triplet_out_of_spec);
+DECLARE_SCMI_QUIRK(perf_level_get_fc_force);
+
+#endif /* _SCMI_QUIRKS_H */
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 7cc0d616b8de..3d543b1d8947 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -671,11 +671,13 @@ static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
* @len: Length of the message in @buf.
* @chan_id: The channel ID to use.
* @async: A flag stating if an asynchronous command is required.
+ * @poll: A flag stating if a polling transmission is required.
*
* Return: 0 on Success
*/
static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
- void *buf, size_t len, u8 chan_id, bool async)
+ void *buf, size_t len, u8 chan_id,
+ bool async, bool poll)
{
int ret;
struct scmi_xfer *xfer;
@@ -684,6 +686,16 @@ static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
if (ret)
return ret;
+ if (poll) {
+ if (is_transport_polling_capable(raw->desc)) {
+ xfer->hdr.poll_completion = true;
+ } else {
+ dev_err(raw->handle->dev,
+ "Failed to send RAW message - Polling NOT supported\n");
+ return -EINVAL;
+ }
+ }
+
ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
if (ret)
scmi_xfer_raw_put(raw->handle, xfer);
@@ -801,7 +813,7 @@ static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp,
static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos,
- bool async)
+ bool async, bool poll)
{
int ret;
struct scmi_dbg_raw_data *rd = filp->private_data;
@@ -831,7 +843,7 @@ static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
}
ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size,
- rd->chan_id, async);
+ rd->chan_id, async, poll);
/* Reset ppos for next message ... */
rd->tx_size = 0;
@@ -875,7 +887,8 @@ static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
- return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false);
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ false, false);
}
static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
@@ -964,7 +977,8 @@ static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
- return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true);
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ true, false);
}
static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
@@ -976,6 +990,40 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.owner = THIS_MODULE,
};
+static ssize_t scmi_dbg_raw_mode_message_poll_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ false, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_poll_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_poll_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_dbg_raw_mode_message_poll_async_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ true, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_poll_async_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_poll_async_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos)
@@ -1199,6 +1247,12 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle,
debugfs_create_file("message_async", 0600, raw->dentry, raw,
&scmi_dbg_raw_mode_message_async_fops);
+ debugfs_create_file("message_poll", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_poll_fops);
+
+ debugfs_create_file("message_poll_async", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_poll_async_fops);
+
debugfs_create_file("notification", 0400, raw->dentry, raw,
&scmi_dbg_raw_mode_notification_fops);
@@ -1230,6 +1284,14 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle,
debugfs_create_file_aux_num("message_async", 0600, chd,
raw, channels[i],
&scmi_dbg_raw_mode_message_async_fops);
+
+ debugfs_create_file_aux_num("message_poll", 0600, chd,
+ raw, channels[i],
+ &scmi_dbg_raw_mode_message_poll_fops);
+
+ debugfs_create_file_aux_num("message_poll_async", 0600,
+ chd, raw, channels[i],
+ &scmi_dbg_raw_mode_message_poll_async_fops);
}
}
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Kconfig b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
index a01bf5e47301..c34c8c837441 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/Kconfig
+++ b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
@@ -12,6 +12,30 @@ config IMX_SCMI_BBM_EXT
To compile this driver as a module, choose M here: the
module will be called imx-sm-bbm.
+config IMX_SCMI_CPU_EXT
+ tristate "i.MX SCMI CPU EXTENSION"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_CPU_DRV
+ default y if ARCH_MXC
+ help
+ This enables i.MX System CPU Protocol to manage cpu
+ start, stop and etc.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-sm-cpu.
+
+config IMX_SCMI_LMM_EXT
+ tristate "i.MX SCMI LMM EXTENSION"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_LMM_DRV
+ default y if ARCH_MXC
+ help
+ This enables i.MX System Logical Machine Protocol to
+ manage Logical Machines boot, shutdown and etc.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-sm-lmm.
+
config IMX_SCMI_MISC_EXT
tristate "i.MX SCMI MISC EXTENSION"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Makefile b/drivers/firmware/arm_scmi/vendors/imx/Makefile
index d3ee6d544924..e3a5ea46345c 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/Makefile
+++ b/drivers/firmware/arm_scmi/vendors/imx/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_IMX_SCMI_BBM_EXT) += imx-sm-bbm.o
+obj-$(CONFIG_IMX_SCMI_CPU_EXT) += imx-sm-cpu.o
+obj-$(CONFIG_IMX_SCMI_LMM_EXT) += imx-sm-lmm.o
obj-$(CONFIG_IMX_SCMI_MISC_EXT) += imx-sm-misc.o
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c
new file mode 100644
index 000000000000..66f47f5371e5
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System control and Management Interface (SCMI) NXP CPU Protocol
+ *
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+#include "../../protocols.h"
+#include "../../notify.h"
+
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+enum scmi_imx_cpu_protocol_cmd {
+ SCMI_IMX_CPU_ATTRIBUTES = 0x3,
+ SCMI_IMX_CPU_START = 0x4,
+ SCMI_IMX_CPU_STOP = 0x5,
+ SCMI_IMX_CPU_RESET_VECTOR_SET = 0x6,
+ SCMI_IMX_CPU_INFO_GET = 0xC,
+};
+
+struct scmi_imx_cpu_info {
+ u32 nr_cpu;
+};
+
+#define SCMI_IMX_CPU_NR_CPU_MASK GENMASK(15, 0)
+struct scmi_msg_imx_cpu_protocol_attributes {
+ __le32 attributes;
+};
+
+struct scmi_msg_imx_cpu_attributes_out {
+ __le32 attributes;
+#define CPU_MAX_NAME 16
+ u8 name[CPU_MAX_NAME];
+};
+
+struct scmi_imx_cpu_reset_vector_set_in {
+ __le32 cpuid;
+#define CPU_VEC_FLAGS_RESUME BIT(31)
+#define CPU_VEC_FLAGS_START BIT(30)
+#define CPU_VEC_FLAGS_BOOT BIT(29)
+ __le32 flags;
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+struct scmi_imx_cpu_info_get_out {
+#define CPU_RUN_MODE_START 0
+#define CPU_RUN_MODE_HOLD 1
+#define CPU_RUN_MODE_STOP 2
+#define CPU_RUN_MODE_SLEEP 3
+ __le32 runmode;
+ __le32 sleepmode;
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+static int scmi_imx_cpu_validate_cpuid(const struct scmi_protocol_handle *ph,
+ u32 cpuid)
+{
+ struct scmi_imx_cpu_info *info = ph->get_priv(ph);
+
+ if (cpuid >= info->nr_cpu)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int scmi_imx_cpu_start(const struct scmi_protocol_handle *ph,
+ u32 cpuid, bool start)
+{
+ struct scmi_xfer *t;
+ u8 msg_id;
+ int ret;
+
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ if (start)
+ msg_id = SCMI_IMX_CPU_START;
+ else
+ msg_id = SCMI_IMX_CPU_STOP;
+
+ ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_reset_vector_set(const struct scmi_protocol_handle *ph,
+ u32 cpuid, u64 vector, bool start,
+ bool boot, bool resume)
+{
+ struct scmi_imx_cpu_reset_vector_set_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_RESET_VECTOR_SET, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->cpuid = cpu_to_le32(cpuid);
+ in->flags = cpu_to_le32(0);
+ if (start)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_START);
+ if (boot)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_BOOT);
+ if (resume)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_RESUME);
+ in->resetvectorlow = cpu_to_le32(lower_32_bits(vector));
+ in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector));
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_started(const struct scmi_protocol_handle *ph, u32 cpuid,
+ bool *started)
+{
+ struct scmi_imx_cpu_info_get_out *out;
+ struct scmi_xfer *t;
+ u32 mode;
+ int ret;
+
+ if (!started)
+ return -EINVAL;
+
+ *started = false;
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_INFO_GET, sizeof(u32),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ mode = le32_to_cpu(out->runmode);
+ if (mode == CPU_RUN_MODE_START || mode == CPU_RUN_MODE_SLEEP)
+ *started = true;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static const struct scmi_imx_cpu_proto_ops scmi_imx_cpu_proto_ops = {
+ .cpu_reset_vector_set = scmi_imx_cpu_reset_vector_set,
+ .cpu_start = scmi_imx_cpu_start,
+ .cpu_started = scmi_imx_cpu_started,
+};
+
+static int scmi_imx_cpu_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_imx_cpu_info *info)
+{
+ struct scmi_msg_imx_cpu_protocol_attributes *attr;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ info->nr_cpu = le32_get_bits(attr->attributes, SCMI_IMX_CPU_NR_CPU_MASK);
+ dev_info(ph->dev, "i.MX SM CPU: %d cpus\n",
+ info->nr_cpu);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 cpuid)
+{
+ struct scmi_msg_imx_cpu_attributes_out *out;
+ char name[SCMI_SHORT_NAME_MAX_SIZE] = {'\0'};
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_ATTRIBUTES, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->name, SCMI_SHORT_NAME_MAX_SIZE);
+ dev_info(ph->dev, "i.MX CPU: name: %s\n", name);
+ } else {
+ dev_err(ph->dev, "i.MX cpu: Failed to get info of cpu(%u)\n", cpuid);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_cpu_info *info;
+ u32 version;
+ int ret, i;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_info(ph->dev, "NXP SM CPU Protocol Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = scmi_imx_cpu_protocol_attributes_get(ph, info);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->nr_cpu; i++) {
+ ret = scmi_imx_cpu_attributes_get(ph, i);
+ if (ret)
+ return ret;
+ }
+
+ return ph->set_priv(ph, info, version);
+}
+
+static const struct scmi_protocol scmi_imx_cpu = {
+ .id = SCMI_PROTOCOL_IMX_CPU,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_imx_cpu_protocol_init,
+ .ops = &scmi_imx_cpu_proto_ops,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
+};
+module_scmi_protocol(scmi_imx_cpu);
+
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_CPU) "-" SCMI_IMX_VENDOR);
+MODULE_DESCRIPTION("i.MX SCMI CPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c
new file mode 100644
index 000000000000..b519c67fe920
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System control and Management Interface (SCMI) NXP LMM Protocol
+ *
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+#include "../../protocols.h"
+#include "../../notify.h"
+
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+enum scmi_imx_lmm_protocol_cmd {
+ SCMI_IMX_LMM_ATTRIBUTES = 0x3,
+ SCMI_IMX_LMM_BOOT = 0x4,
+ SCMI_IMX_LMM_RESET = 0x5,
+ SCMI_IMX_LMM_SHUTDOWN = 0x6,
+ SCMI_IMX_LMM_WAKE = 0x7,
+ SCMI_IMX_LMM_SUSPEND = 0x8,
+ SCMI_IMX_LMM_NOTIFY = 0x9,
+ SCMI_IMX_LMM_RESET_REASON = 0xA,
+ SCMI_IMX_LMM_POWER_ON = 0xB,
+ SCMI_IMX_LMM_RESET_VECTOR_SET = 0xC,
+};
+
+struct scmi_imx_lmm_priv {
+ u32 nr_lmm;
+};
+
+#define SCMI_IMX_LMM_NR_LM_MASK GENMASK(5, 0)
+#define SCMI_IMX_LMM_NR_MAX 16
+struct scmi_msg_imx_lmm_protocol_attributes {
+ __le32 attributes;
+};
+
+struct scmi_msg_imx_lmm_attributes_out {
+ __le32 lmid;
+ __le32 attributes;
+ __le32 state;
+ __le32 errstatus;
+ u8 name[LMM_MAX_NAME];
+};
+
+struct scmi_imx_lmm_reset_vector_set_in {
+ __le32 lmid;
+ __le32 cpuid;
+ __le32 flags; /* reserved for future extension */
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+struct scmi_imx_lmm_shutdown_in {
+ __le32 lmid;
+#define SCMI_IMX_LMM_SHUTDOWN_GRACEFUL BIT(0)
+ __le32 flags;
+};
+
+static int scmi_imx_lmm_validate_lmid(const struct scmi_protocol_handle *ph, u32 lmid)
+{
+ struct scmi_imx_lmm_priv *priv = ph->get_priv(ph);
+
+ if (lmid >= priv->nr_lmm)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int scmi_imx_lmm_attributes(const struct scmi_protocol_handle *ph,
+ u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ struct scmi_msg_imx_lmm_attributes_out *out;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_ATTRIBUTES, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(lmid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ info->lmid = le32_to_cpu(out->lmid);
+ info->state = le32_to_cpu(out->state);
+ info->errstatus = le32_to_cpu(out->errstatus);
+ strscpy(info->name, out->name);
+ dev_dbg(ph->dev, "i.MX LMM: Logical Machine(%d), name: %s\n",
+ info->lmid, info->name);
+ } else {
+ dev_err(ph->dev, "i.MX LMM: Failed to get info of Logical Machine(%u)\n", lmid);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int
+scmi_imx_lmm_power_boot(const struct scmi_protocol_handle *ph, u32 lmid, bool boot)
+{
+ struct scmi_xfer *t;
+ u8 msg_id;
+ int ret;
+
+ ret = scmi_imx_lmm_validate_lmid(ph, lmid);
+ if (ret)
+ return ret;
+
+ if (boot)
+ msg_id = SCMI_IMX_LMM_BOOT;
+ else
+ msg_id = SCMI_IMX_LMM_POWER_ON;
+
+ ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(lmid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_reset_vector_set(const struct scmi_protocol_handle *ph,
+ u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ struct scmi_imx_lmm_reset_vector_set_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_RESET_VECTOR_SET, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->lmid = cpu_to_le32(lmid);
+ in->cpuid = cpu_to_le32(cpuid);
+ in->flags = cpu_to_le32(0);
+ in->resetvectorlow = cpu_to_le32(lower_32_bits(vector));
+ in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector));
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_shutdown(const struct scmi_protocol_handle *ph, u32 lmid,
+ u32 flags)
+{
+ struct scmi_imx_lmm_shutdown_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = scmi_imx_lmm_validate_lmid(ph, lmid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_SHUTDOWN, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->lmid = cpu_to_le32(lmid);
+ if (flags & SCMI_IMX_LMM_SHUTDOWN_GRACEFUL)
+ in->flags = cpu_to_le32(SCMI_IMX_LMM_SHUTDOWN_GRACEFUL);
+ else
+ in->flags = cpu_to_le32(0);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static const struct scmi_imx_lmm_proto_ops scmi_imx_lmm_proto_ops = {
+ .lmm_power_boot = scmi_imx_lmm_power_boot,
+ .lmm_info = scmi_imx_lmm_attributes,
+ .lmm_reset_vector_set = scmi_imx_lmm_reset_vector_set,
+ .lmm_shutdown = scmi_imx_lmm_shutdown,
+};
+
+static int scmi_imx_lmm_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_imx_lmm_priv *priv)
+{
+ struct scmi_msg_imx_lmm_protocol_attributes *attr;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ priv->nr_lmm = le32_get_bits(attr->attributes, SCMI_IMX_LMM_NR_LM_MASK);
+ if (priv->nr_lmm > SCMI_IMX_LMM_NR_MAX) {
+ dev_err(ph->dev, "i.MX LMM: %d:Exceed max supported Logical Machines\n",
+ priv->nr_lmm);
+ ret = -EINVAL;
+ } else {
+ dev_info(ph->dev, "i.MX LMM: %d Logical Machines\n", priv->nr_lmm);
+ }
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_lmm_priv *info;
+ u32 version;
+ int ret;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_info(ph->dev, "NXP SM LMM Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = scmi_imx_lmm_protocol_attributes_get(ph, info);
+ if (ret)
+ return ret;
+
+ return ph->set_priv(ph, info, version);
+}
+
+static const struct scmi_protocol scmi_imx_lmm = {
+ .id = SCMI_PROTOCOL_IMX_LMM,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_imx_lmm_protocol_init,
+ .ops = &scmi_imx_lmm_proto_ops,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
+};
+module_scmi_protocol(scmi_imx_lmm);
+
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_LMM) "-" SCMI_IMX_VENDOR);
+MODULE_DESCRIPTION("i.MX SCMI LMM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
index b2dfd6c46ca2..4e246a78a042 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
@@ -32,6 +32,518 @@ port, and deploy the SM on supported processors.
The SM implements an interface compliant with the Arm SCMI Specification
with additional vendor specific extensions.
+System Control and Management Logical Machine Management Vendor Protocol
+========================================================================
+
+The SM adds the concept of logical machines (LMs). These are analogous to
+VMs and each has its own instance of SCMI. All normal SCMI calls only apply
+the LM running the calling agent. That includes boot, shutdown, reset,
+suspend, wake, etc. If a caller makes the SCMI base call to get a list
+of agents, it will only get those on that LM. Each LM is completely isolated
+from the others. This is mandatory for these to operate independently.
+
+This protocol is intended to support boot, shutdown, and reset of other logical
+machines (LM). It is usually used to allow one LM(e.g. OSPM) to manage
+another LM which is usually an offload or accelerator engine. Notifications
+from this protocol can also be used to manage a communication link to another
+LM. The LMM protocol provides commands to:
+
+- Describe the protocol version.
+- Discover implementation attributes.
+- Discover all the LMs defined in the system.
+- Boot a target LM.
+- Shutdown a target LM (gracefully or forcibly).
+- Reset a target LM (gracefully or forcibly).
+- Wake a target LM from suspend.
+- Suspend a target LM (gracefully).
+- Read boot/shutdown/reset information for a target LM.
+- Get notifications when a target LM boots or shuts down (e.g. LM 'X' requested
+ notification of LM 'Y' boots or shuts down, when LM 'Y' boots or shuts down,
+ SCMI firmware will send notification to LM 'X').
+
+'Graceful' means asking LM itself to shutdown/reset/etc (e.g. sending
+notification to Linux, Then Linux reboots or powers down itself). It is async
+command that the SUCCESS of the command just means the command successfully
+return, not means reboot/reset successfully finished.
+
+'Forceful' means the SM will force shutdown/reset/etc the LM. It is sync
+command that the SUCCESS of the command means the LM has been successfully
+shutdown/reset/etc.
+If the commands not have Graceful/Forceful flag settings, such as WAKE, SUSEND,
+it is a Graceful command.
+
+Commands:
+_________
+
+PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x80
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++---------------+--------------------------------------------------------------+
+|Name |Description |
++---------------+--------------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++---------------+--------------------------------------------------------------+
+|uint32 version | For this revision of the specification, this value must be |
+| | 0x10000. |
++---------------+--------------------------------------------------------------+
+
+PROTOCOL_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x1
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Protocol attributes: |
+| |Bits[31:5] Reserved, must be zero. |
+| |Bits[4:0] Number of Logical Machines |
+| |Note that due to both hardware limitations and reset reason|
+| |field limitations, the max number of LM is 16. The minimum |
+| |is 1. |
++------------------+-----------------------------------------------------------+
+
+PROTOCOL_MESSAGE_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x2
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: in case the message is implemented and available |
+| |to use. |
+| |NOT_FOUND: if the message identified by message_id is |
+| |invalid or not implemented |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Flags that are associated with a specific command in the |
+| |protocol. For all commands in this protocol, this |
+| |parameter has a value of 0 |
++------------------+-----------------------------------------------------------+
+
+LMM_ATTRIBUTES
+~~~~~~~~~~~~~~
+
+message_id: 0x3
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |DENIED: if the agent does not have permission to get info |
+| |for the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |Identifier of the LM whose identification is requested. |
+| |This field is: Populated with the lmid of the calling |
+| |agent, when the lmid parameter passed via the command is |
+| |0xFFFFFFFF. Identical to the lmid field passed via the |
+| |calling parameters, in all other cases |
++------------------+-----------------------------------------------------------+
+|uint32 attributes | Bits[31:0] reserved. must be zero |
++------------------+-----------------------------------------------------------+
+|uint32 state | Current state of the LM |
++------------------+-----------------------------------------------------------+
+|uint32 errStatus | Last error status recorded |
++------------------+-----------------------------------------------------------+
+|char name[16] | A NULL terminated ASCII string with the LM name, of up |
+| | to 16 bytes |
++------------------+-----------------------------------------------------------+
+
+LMM_BOOT
+~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM boots successfully started. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET
+~~~~~~~~~
+
+message_id: 0x5
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Reset flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] Graceful request: |
+| |Set to 1 if the request is a graceful request. |
+| |Set to 0 if the request is a forceful request. |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: The LMM RESET command finished successfully in |
+| |graceful reset or LM successfully resets in forceful reset.|
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_SHUTDOWN
+~~~~~~~~~~~~
+
+message_id: 0x6
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Reset flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] Graceful request: |
+| |Set to 1 if the request is a graceful request. |
+| |Set to 0 if the request is a forceful request. |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: The LMM shutdown command finished successfully in |
+| |graceful request or LM successfully shutdown in forceful |
+| |request. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_WAKE
+~~~~~~~~
+
+message_id: 0x7
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM wake command successfully returns. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_SUSPEND
+~~~~~~~~~~~
+
+message_id: 0x8
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM suspend command successfully returns. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_NOTIFY
+~~~~~~~~~~
+
+message_id: 0x9
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Notification flags: |
+| |Bits[31:3] Reserved, must be zero. |
+| |Bit[3] Wake (resume) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[2] Suspend (sleep) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[1] Shutdown (off) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[0] Boot (on) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the notification state successfully updated. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if input attributes flag specifies |
+| |unsupported or invalid configurations. |
+| |DENIED: if the agent does not have permission to request |
+| |the notification. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET_REASON
+~~~~~~~~~~~~~~~~
+
+message_id: 0xA
+protocol_id: 0x80
+This command is mandatory.
+
+This command is to return the reset reason that caused the last reset, such as
+POR, WDOG, JTAG and etc.
+
++---------------------+--------------------------------------------------------+
+|Parameters |
++---------------------+--------------------------------------------------------+
+|Name |Description |
++---------------------+--------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++---------------------+--------------------------------------------------------+
+|Return values |
++---------------------+--------------------------------------------------------+
+|Name |Description |
++---------------------+--------------------------------------------------------+
+|int32 status |SUCCESS: if the reset reason of the LM successfully |
+| |updated. |
+| |NOT_FOUND: if lmid not points to a valid logical machine|
+| |DENIED: if the agent does not have permission to request|
+| |the reset reason. |
++---------------------+--------------------------------------------------------+
+|uint32 bootflags |Boot reason flags. This parameter has the format: |
+| |Bits[31] Valid. |
+| |Set to 1 if the entire reason is valid. |
+| |Set to 0 if the entire reason is not valid. |
+| |Bits[30:29] Reserved, must be zero. |
+| |Bit[28] Valid origin: |
+| |Set to 1 if the origin field is valid. |
+| |Set to 0 if the origin field is not valid. |
+| |Bits[27:24] Origin. |
+| |Logical Machine(LM) ID that causes the BOOT of this LM |
+| |Bit[23] Valid err ID: |
+| |Set to 1 if the error ID field is valid. |
+| |Set to 0 if the error ID field is not valid. |
+| |Bits[22:8] Error ID(Agent ID of the system). |
+| |Bit[7:0] Reason(WDOG, POR, FCCU and etc): |
+| |See the SRESR register description in the System |
+| |Reset Controller (SRC) section in SoC reference mannual |
+| |One reason maps to BIT(reason) in SRESR |
++---------------------+--------------------------------------------------------+
+|uint32 shutdownflags |Shutdown reason flags. This parameter has the format: |
+| |Bits[31] Valid. |
+| |Set to 1 if the entire reason is valid. |
+| |Set to 0 if the entire reason is not valid. |
+| |Bits[30:29] Number of valid extended info words. |
+| |Bit[28] Valid origin: |
+| |Set to 1 if the origin field is valid. |
+| |Set to 0 if the origin field is not valid. |
+| |Bits[27:24] Origin. |
+| |Logical Machine(LM) ID that causes the BOOT of this LM |
+| |Bit[23] Valid err ID: |
+| |Set to 1 if the error ID field is valid. |
+| |Set to 0 if the error ID field is not valid. |
+| |Bits[22:8] Error ID(Agent ID of the System). |
+| |Bit[7:0] Reason |
+| |See the SRESR register description in the System |
+| |Reset Controller (SRC) section in SoC reference mannual |
+| |One reason maps to BIT(reason) in SRESR |
++---------------------+--------------------------------------------------------+
+|uint32 extinfo[3] |Array of extended info words(e.g. fault pc) |
++---------------------+--------------------------------------------------------+
+
+LMM_POWER_ON
+~~~~~~~~~~~~
+
+message_id: 0xB
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM successfully powers on. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET_VECTOR_SET
+~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0xC
+protocol_id: 0x80
+This command is mandatory.
+
++-----------------------+------------------------------------------------------+
+|Parameters |
++-----------------------+------------------------------------------------------+
+|Name |Description |
++-----------------------+------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++-----------------------+------------------------------------------------------+
+|uint32 cpuid |ID of the CPU inside the LM |
++-----------------------+------------------------------------------------------+
+|uint32 flags |Reset vector flags |
+| |Bits[31:0] Reserved, must be zero. |
++-----------------------+------------------------------------------------------+
+|uint32 resetVectorLow |Lower vector |
++-----------------------+------------------------------------------------------+
+|uint32 resetVectorHigh |Higher vector |
++-----------------------+------------------------------------------------------+
+|Return values |
++-----------------------+------------------------------------------------------+
+|Name |Description |
++-----------------------+------------------------------------------------------+
+|int32 status |SUCCESS: If reset vector is set successfully. |
+| |NOT_FOUND: if lmid not points to a valid logical |
+| |machine, or cpuId is not valid. |
+| |INVALID_PARAMETERS: if reset vector is invalid. |
+| |DENIED: if the agent does not have permission to set |
+| |the reset vector for the CPU in the LM. |
++-----------------------+------------------------------------------------------+
+
+NEGOTIATE_PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x10
+protocol_id: 0x80
+This command is mandatory.
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 version |The negotiated protocol version the agent intends to use |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: if the negotiated protocol version is supported |
+| |by the platform. All commands, responses, and |
+| |notifications post successful return of this command must|
+| |comply with the negotiated version. |
+| |NOT_SUPPORTED: if the protocol version is not supported. |
++--------------------+---------------------------------------------------------+
+
+Notifications
+_____________
+
+LMM_EVENT
+~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x80
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |Identifier for the LM that caused the transition. |
++------------------+-----------------------------------------------------------+
+|uint32 eventlm |Identifier of the LM this event refers to. |
++------------------+-----------------------------------------------------------+
+|uint32 flags |LM events: |
+| |Bits[31:3] Reserved, must be zero. |
+| |Bit[3] Wake (resume) event: |
+| |1 LM has awakened. |
+| |0 not a wake event. |
+| |Bit[2] Suspend (sleep) event: |
+| |1 LM has suspended. |
+| |0 not a suspend event. |
+| |Bit[1] Shutdown (off) event: |
+| |1 LM has shutdown. |
+| |0 not a shutdown event. |
+| |Bit[0] Boot (on) event: |
+| |1 LM has booted. |
+| |0 not a boot event. |
++------------------+-----------------------------------------------------------+
+
SCMI_BBM: System Control and Management BBM Vendor Protocol
==============================================================
@@ -436,6 +948,322 @@ protocol_id: 0x81
| |0 no button change detected. |
+------------------+-----------------------------------------------------------+
+System Control and Management CPU Vendor Protocol
+=================================================
+
+This protocol allows an agent to start or stop a CPU. It is used to manage
+auxiliary CPUs in a target LM (e.g. additional cores in an AP cluster or
+Cortex-M cores).
+Note:
+ - For cores in AP cluster, PSCI should be used and PSCI firmware will use CPU
+ protocol to handle them. For cores in non-AP cluster, Operating System(e.g.
+ Linux OS) could use CPU protocols to control Cortex-M7 cores.
+ - CPU indicates the core and its auxiliary peripherals(e.g. TCM) inside
+ i.MX SoC
+
+There are cases where giving an agent full control of a CPU via the CPU
+protocol is not desired. The LMM protocol is more restricted to just boot,
+shutdown, etc. So an agent might boot another logical machine but not be
+able to directly mess the state of its CPUs. Its also the reason there is an
+LMM power on command even though that could have been done through the
+power protocol.
+
+The CPU protocol provides commands to:
+
+- Describe the protocol version.
+- Discover implementation attributes.
+- Discover the CPUs defined in the system.
+- Start a CPU.
+- Stop a CPU.
+- Set the boot and resume addresses for a CPU.
+- Set the sleep mode of a CPU.
+- Configure wake-up sources for a CPU.
+- Configure power domain reactions (LPM mode and retention mask) for a CPU.
+- The CPU IDs can be found in the CPU section of the SoC DEVICE: SM Device
+ Interface. They can also be found in the SoC RM. See the CPU Mode Control
+ (CMC) list in General Power Controller (GPC) section.
+
+CPU settings are not aggregated and setting their state is normally exclusive
+to one client.
+
+Commands:
+_________
+
+PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++---------------+--------------------------------------------------------------+
+|Name |Description |
++---------------+--------------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++---------------+--------------------------------------------------------------+
+|uint32 version | For this revision of the specification, this value must be |
+| | 0x10000. |
++---------------+--------------------------------------------------------------+
+
+PROTOCOL_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x1
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Protocol attributes: |
+| |Bits[31:16] Reserved, must be zero. |
+| |Bits[15:0] Number of CPUs |
++------------------+-----------------------------------------------------------+
+
+PROTOCOL_MESSAGE_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x2
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: in case the message is implemented and available |
+| |to use. |
+| |NOT_FOUND: if the message identified by message_id is |
+| |invalid or not implemented |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Flags that are associated with a specific command in the |
+| |protocol. For all commands in this protocol, this |
+| |parameter has a value of 0 |
++------------------+-----------------------------------------------------------+
+
+CPU_ATTRIBUTES
+~~~~~~~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned successfully. |
+| |NOT_FOUND: if the cpuid is not valid. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Bits[31:0] Reserved, must be zero |
++------------------+-----------------------------------------------------------+
+|char name[16] |NULL terminated ASCII string with CPU name up to 16 bytes |
++------------------+-----------------------------------------------------------+
+
+CPU_START
+~~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the cpu is started successfully. |
+| |NOT_FOUND: if cpuid is not valid. |
+| |DENIED: the calling agent is not allowed to start this CPU.|
++------------------+-----------------------------------------------------------+
+
+CPU_STOP
+~~~~~~~~
+
+message_id: 0x5
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the cpu is started successfully. |
+| |NOT_FOUND: if cpuid is not valid. |
+| |DENIED: the calling agent is not allowed to stop this CPU. |
++------------------+-----------------------------------------------------------+
+
+CPU_RESET_VECTOR_SET
+~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x6
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 flags |Reset vector flags: |
+| |Bit[31] Resume flag. |
+| |Set to 1 to update the reset vector used on resume. |
+| |Bit[30] Boot flag. |
+| |Set to 1 to update the reset vector used for boot. |
+| |Bits[29:1] Reserved, must be zero. |
+| |Bit[0] Table flag. |
+| |Set to 1 if vector is the vector table base address. |
++----------------------+-------------------------------------------------------+
+|uint32 resetVectorLow |Lower vector: |
+| |If bit[0] of flags is 0, the lower 32 bits of the |
+| |physical address where the CPU should execute from on |
+| |reset. If bit[0] of flags is 1, the lower 32 bits of |
+| |the vector table base address |
++----------------------+-------------------------------------------------------+
+|uint32 resetVectorhigh|Upper vector: |
+| |If bit[0] of flags is 0, the upper 32 bits of the |
+| |physical address where the CPU should execute from on |
+| |reset. If bit[0] of flags is 1, the upper 32 bits of |
+| |the vector table base address |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if the CPU reset vector is set successfully. |
+| |NOT_FOUND: if cpuId does not point to a valid CPU. |
+| |INVALID_PARAMETERS: the requested vector type is not |
+| |supported by this CPU. |
+| |DENIED: the calling agent is not allowed to set the |
+| |reset vector of this CPU |
++----------------------+-------------------------------------------------------+
+
+CPU_SLEEP_MODE_SET
+~~~~~~~~~~~~~~~~~~
+
+message_id: 0x7
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 flags |Sleep mode flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] IRQ mux: |
+| |If set to 1 the wakeup mux source is the GIC, else if 0|
+| |then the GPC |
++----------------------+-------------------------------------------------------+
+|uint32 sleepmode |target sleep mode. When CPU runs into WFI, the GPC mode|
+| |will be triggered to be in below modes: |
+| |RUN: (0) |
+| |WAIT: (1) |
+| |STOP: (2) |
+| |SUSPEND: (3) |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if the CPU sleep mode is set successfully. |
+| |NOT_FOUND: if cpuId does not point to a valid CPU. |
+| |INVALID_PARAMETERS: the sleepmode or flags is invalid. |
+| |DENIED: the calling agent is not allowed to configure |
+| |the CPU |
++----------------------+-------------------------------------------------------+
+
+CPU_INFO_GET
+~~~~~~~~~~~~
+
+message_id: 0xC
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned successfully.|
+| |NOT_FOUND: if the cpuid is not valid. |
++----------------------+-------------------------------------------------------+
+|uint32 runmode |Run mode for the CPU |
+| |RUN(0):cpu started |
+| |HOLD(1):cpu powered up and reset asserted |
+| |STOP(2):cpu reseted and hold cpu |
+| |SUSPEND(3):in cpuidle state |
++----------------------+-------------------------------------------------------+
+|uint32 sleepmode |Sleep mode for the CPU, see CPU_SLEEP_MODE_SET |
++----------------------+-------------------------------------------------------+
+|uint32 resetvectorlow |Reset vector low 32 bits for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 resetvecothigh |Reset vector high 32 bits for the CPU |
++----------------------+-------------------------------------------------------+
+
+NEGOTIATE_PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x10
+protocol_id: 0x82
+This command is mandatory.
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 version |The negotiated protocol version the agent intends to use |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: if the negotiated protocol version is supported |
+| |by the platform. All commands, responses, and |
+| |notifications post successful return of this command must|
+| |comply with the negotiated version. |
+| |NOT_SUPPORTED: if the protocol version is not supported. |
++--------------------+---------------------------------------------------------+
+
SCMI_MISC: System Control and Management MISC Vendor Protocol
================================================================
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 3e8051fe8296..71e2a9a89f6a 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -1062,13 +1062,12 @@ static bool __init sdei_present_acpi(void)
return true;
}
-void __init sdei_init(void)
+void __init acpi_sdei_init(void)
{
struct platform_device *pdev;
int ret;
- ret = platform_driver_register(&sdei_driver);
- if (ret || !sdei_present_acpi())
+ if (!sdei_present_acpi())
return;
pdev = platform_device_register_simple(sdei_driver.driver.name,
@@ -1081,6 +1080,12 @@ void __init sdei_init(void)
}
}
+static int __init sdei_init(void)
+{
+ return platform_driver_register(&sdei_driver);
+}
+arch_initcall(sdei_init);
+
int sdei_event_handler(struct pt_regs *regs,
struct sdei_registered_event *arg)
{
diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig
index 0a883091259a..e3c2e38b746d 100644
--- a/drivers/firmware/cirrus/Kconfig
+++ b/drivers/firmware/cirrus/Kconfig
@@ -6,14 +6,11 @@ config FW_CS_DSP
config FW_CS_DSP_KUNIT_TEST_UTILS
tristate
- depends on KUNIT && REGMAP
- select FW_CS_DSP
config FW_CS_DSP_KUNIT_TEST
tristate "KUnit tests for Cirrus Logic cs_dsp" if !KUNIT_ALL_TESTS
- depends on KUNIT && REGMAP
+ depends on KUNIT && REGMAP && FW_CS_DSP
default KUNIT_ALL_TESTS
- select FW_CS_DSP
select FW_CS_DSP_KUNIT_TEST_UTILS
help
This builds KUnit tests for cs_dsp.
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
index 49d84f7e59e6..3f8777ee4dc0 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
@@ -96,10 +96,11 @@ static void cs_dsp_mock_bin_add_name_or_info(struct cs_dsp_mock_bin_builder *bui
if (info_len % 4) {
/* Create a padded string with length a multiple of 4 */
+ size_t copy_len = info_len;
info_len = round_up(info_len, 4);
tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
- memcpy(tmp, info, info_len);
+ memcpy(tmp, info, copy_len);
info = tmp;
}
@@ -176,6 +177,9 @@ struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
struct cs_dsp_mock_bin_builder *builder;
struct wmfw_coeff_hdr *hdr;
+ KUNIT_ASSERT_LE(priv->test, format_version, 0xff);
+ KUNIT_ASSERT_LE(priv->test, fw_version, 0xffffff);
+
builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
builder->test_priv = priv;
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
index 161272e47bda..95946fac5563 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
@@ -462,36 +462,6 @@ unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *pri
EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_alg_base_in_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
/**
- * cs_dsp_mock_xm_header_get_fw_version_from_regmap() - Firmware version.
- *
- * @priv: Pointer to struct cs_dsp_test.
- *
- * Return: Firmware version word value.
- */
-unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv)
-{
- unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
- union {
- struct wmfw_id_hdr adsp2;
- struct wmfw_v3_id_hdr halo;
- } hdr;
-
- switch (priv->dsp->type) {
- case WMFW_ADSP2:
- regmap_raw_read(priv->dsp->regmap, xm, &hdr.adsp2, sizeof(hdr.adsp2));
- return be32_to_cpu(hdr.adsp2.ver);
- case WMFW_HALO:
- regmap_raw_read(priv->dsp->regmap, xm, &hdr.halo, sizeof(hdr.halo));
- return be32_to_cpu(hdr.halo.ver);
- default:
- KUNIT_FAIL(priv->test, NULL);
- return 0;
- }
-}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version_from_regmap,
- "FW_CS_DSP_KUNIT_TEST_UTILS");
-
-/**
* cs_dsp_mock_xm_header_get_fw_version() - Firmware version.
*
* @header: Pointer to struct cs_dsp_mock_xm_header.
@@ -535,9 +505,11 @@ void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv)
* Could be one 32-bit register or two 16-bit registers.
* A raw read will read the requested number of bytes.
*/
- regmap_raw_read(priv->dsp->regmap,
- xm + (offsetof(struct wmfw_adsp2_id_hdr, n_algs) / 2),
- &num_algs_be32, sizeof(num_algs_be32));
+ KUNIT_ASSERT_GE(priv->test, 0,
+ regmap_raw_read(priv->dsp->regmap,
+ xm +
+ (offsetof(struct wmfw_adsp2_id_hdr, n_algs) / 2),
+ &num_algs_be32, sizeof(num_algs_be32)));
num_algs = be32_to_cpu(num_algs_be32);
bytes = sizeof(struct wmfw_adsp2_id_hdr) +
(num_algs * sizeof(struct wmfw_adsp2_alg_hdr)) +
@@ -546,9 +518,10 @@ void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv)
regcache_drop_region(priv->dsp->regmap, xm, xm + (bytes / 2) - 1);
break;
case WMFW_HALO:
- regmap_read(priv->dsp->regmap,
- xm + offsetof(struct wmfw_halo_id_hdr, n_algs),
- &num_algs);
+ KUNIT_ASSERT_GE(priv->test, 0,
+ regmap_read(priv->dsp->regmap,
+ xm + offsetof(struct wmfw_halo_id_hdr, n_algs),
+ &num_algs));
bytes = sizeof(struct wmfw_halo_id_hdr) +
(num_algs * sizeof(struct wmfw_halo_alg_hdr)) +
4 /* terminator word */;
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
index 5a3ac03ac37f..934d40a4d709 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
@@ -178,6 +178,8 @@ void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *buil
size_t bytes_needed, name_len, description_len;
int offset;
+ KUNIT_ASSERT_LE(builder->test_priv->test, alg_id, 0xffffff);
+
/* Bytes needed for region header */
bytes_needed = offsetof(struct wmfw_region, data);
@@ -435,6 +437,8 @@ struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
{
struct cs_dsp_mock_wmfw_builder *builder;
+ KUNIT_ASSERT_LE(priv->test, format_version, 0xff);
+
/* If format version isn't given use the default for the target core */
if (format_version < 0) {
switch (priv->dsp->type) {
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
index 1e161bbc5b4a..163b7faecff4 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
@@ -2198,7 +2198,7 @@ static int cs_dsp_bin_test_common_init(struct kunit *test, struct cs_dsp *dsp)
priv->local->bin_builder =
cs_dsp_mock_bin_init(priv, 1,
- cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ cs_dsp_mock_xm_header_get_fw_version(xm_hdr));
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->local->bin_builder);
/* We must provide a dummy wmfw to load */
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
index 8748874f0552..a7ec956d2724 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
@@ -451,7 +451,7 @@ static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *ds
local->bin_builder =
cs_dsp_mock_bin_init(priv, 1,
- cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ cs_dsp_mock_xm_header_get_fw_version(local->xm_header));
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->bin_builder);
/* Init cs_dsp */
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 5fe61b9ab5f9..db8c5c03d3a2 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -281,6 +281,30 @@ config EFI_EMBEDDED_FIRMWARE
bool
select CRYPTO_LIB_SHA256
+config EFI_SBAT
+ def_bool y if EFI_SBAT_FILE!=""
+
+config EFI_SBAT_FILE
+ string "Embedded SBAT section file path"
+ depends on EFI_ZBOOT
+ help
+ SBAT section provides a way to improve SecureBoot revocations of UEFI
+ binaries by introducing a generation-based mechanism. With SBAT, older
+ UEFI binaries can be prevented from booting by bumping the minimal
+ required generation for the specific component in the bootloader.
+
+ Note: SBAT information is distribution specific, i.e. the owner of the
+ signing SecureBoot certificate must define the SBAT policy. Linux
+ kernel upstream does not define SBAT components and their generations.
+
+ See https://github.com/rhboot/shim/blob/main/SBAT.md for the additional
+ details.
+
+ Specify a file with SBAT data which is going to be embedded as '.sbat'
+ section into the kernel.
+
+ If unsure, leave blank.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 7309394b8fc9..e57bff702b5f 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -558,6 +558,7 @@ int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
__weak __alias(__efi_mem_desc_lookup);
+EXPORT_SYMBOL_GPL(efi_mem_desc_lookup);
/*
* Calculate the highest address of an efi memory descriptor.
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d23a1b9fed75..2f173391b63d 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -85,7 +85,6 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o smbios.o
-lib-$(CONFIG_EFI_MIXED) += x86-mixed.o
lib-$(CONFIG_X86_64) += x86-5lvl.o
lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 48842b5c106b..92e3c73502ba 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -44,6 +44,10 @@ AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE
$(obj)/zboot-header.o: $(srctree)/drivers/firmware/efi/libstub/zboot-header.S FORCE
$(call if_changed_rule,as_o_S)
+ifneq ($(CONFIG_EFI_SBAT_FILE),)
+$(obj)/zboot-header.o: $(CONFIG_EFI_SBAT_FILE)
+endif
+
ZBOOT_DEPS := $(obj)/zboot-header.o $(objtree)/drivers/firmware/efi/libstub/lib.a
LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index fd6dc790c5a8..7aa2f9ad2935 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -601,6 +601,7 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
* @image: EFI loaded image protocol
* @soft_limit: preferred address for loading the initrd
* @hard_limit: upper limit address for loading the initrd
+ * @out: pointer to store the address of the initrd table
*
* Return: status code
*/
diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c
index 77359e802181..f1c5fb45d5f7 100644
--- a/drivers/firmware/efi/libstub/x86-5lvl.c
+++ b/drivers/firmware/efi/libstub/x86-5lvl.c
@@ -62,7 +62,7 @@ efi_status_t efi_setup_5level_paging(void)
void efi_5level_switch(void)
{
- bool want_la57 = IS_ENABLED(CONFIG_X86_5LEVEL) && !efi_no5lvl;
+ bool want_la57 = !efi_no5lvl;
bool have_la57 = native_read_cr4() & X86_CR4_LA57;
bool need_toggle = want_la57 ^ have_la57;
u64 *pgt = (void *)la57_toggle + PAGE_SIZE;
diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S
index fb676ded47fa..b6431edd0fc9 100644
--- a/drivers/firmware/efi/libstub/zboot-header.S
+++ b/drivers/firmware/efi/libstub/zboot-header.S
@@ -4,17 +4,17 @@
#ifdef CONFIG_64BIT
.set .Lextra_characteristics, 0x0
- .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32PLUS
+ .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR64_MAGIC
#else
.set .Lextra_characteristics, IMAGE_FILE_32BIT_MACHINE
- .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32
+ .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR32_MAGIC
#endif
.section ".head", "a"
.globl __efistub_efi_zboot_header
__efistub_efi_zboot_header:
.Ldoshdr:
- .long MZ_MAGIC
+ .long IMAGE_DOS_SIGNATURE
.ascii "zimg" // image type
.long __efistub__gzdata_start - .Ldoshdr // payload offset
.long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size
@@ -25,7 +25,7 @@ __efistub_efi_zboot_header:
.long .Lpehdr - .Ldoshdr // PE header offset
.Lpehdr:
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
.short MACHINE_TYPE
.short .Lsection_count
.long 0
@@ -63,7 +63,7 @@ __efistub_efi_zboot_header:
.long .Lefi_header_end - .Ldoshdr
.long 0
.short IMAGE_SUBSYSTEM_EFI_APPLICATION
- .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
+ .short IMAGE_DLLCHARACTERISTICS_NX_COMPAT
#ifdef CONFIG_64BIT
.quad 0, 0, 0, 0
#else
@@ -123,11 +123,29 @@ __efistub_efi_zboot_header:
IMAGE_SCN_MEM_READ | \
IMAGE_SCN_MEM_EXECUTE
+#ifdef CONFIG_EFI_SBAT
+ .ascii ".sbat\0\0\0"
+ .long __sbat_size
+ .long _sbat - .Ldoshdr
+ .long __sbat_size
+ .long _sbat - .Ldoshdr
+
+ .long 0, 0
+ .short 0, 0
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_DISCARDABLE
+
+ .pushsection ".sbat", "a", @progbits
+ .incbin CONFIG_EFI_SBAT_FILE
+ .popsection
+#endif
+
.ascii ".data\0\0\0"
.long __data_size
- .long _etext - .Ldoshdr
+ .long _data - .Ldoshdr
.long __data_rawsize
- .long _etext - .Ldoshdr
+ .long _data - .Ldoshdr
.long 0, 0
.short 0, 0
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
index 9ecc57ff5b45..c3a166675450 100644
--- a/drivers/firmware/efi/libstub/zboot.lds
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -29,7 +29,17 @@ SECTIONS
. = _etext;
}
+#ifdef CONFIG_EFI_SBAT
+ .sbat : ALIGN(4096) {
+ _sbat = .;
+ *(.sbat)
+ _esbat = ALIGN(4096);
+ . = _esbat;
+ }
+#endif
+
.data : ALIGN(4096) {
+ _data = .;
*(.data* .init.data*)
_edata = ALIGN(512);
. = _edata;
@@ -52,3 +62,4 @@ PROVIDE(__efistub__gzdata_size =
PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext));
PROVIDE(__data_size = ABSOLUTE(_end - _etext));
+PROVIDE(__sbat_size = ABSOLUTE(_esbat - _sbat));
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 34109fd86c55..f1c04d7cfd71 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -43,7 +43,8 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
map.map = early_memremap(phys_map, data->size);
if (!map.map) {
- pr_err("Could not map the memory map!\n");
+ pr_err("Could not map the memory map! phys_map=%pa, size=0x%lx\n",
+ &phys_map, data->size);
return -ENOMEM;
}
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 9e2628728aad..77b5f7ac3e20 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -361,6 +361,10 @@ static long efi_runtime_get_waketime(unsigned long arg)
getwakeuptime.enabled))
return -EFAULT;
+ if (getwakeuptime.pending && put_user(pending,
+ getwakeuptime.pending))
+ return -EFAULT;
+
if (getwakeuptime.time) {
if (copy_to_user(getwakeuptime.time, &efi_time,
sizeof(efi_time_t)))
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index c964f4924359..127ad752acf8 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -23,6 +23,28 @@ config IMX_SCU
This driver manages the IPC interface between host CPU and the
SCU firmware running on M4.
+config IMX_SCMI_CPU_DRV
+ tristate "IMX SCMI CPU Protocol driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ default y if ARCH_MXC
+ help
+ The System Controller Management Interface firmware (SCMI FW) is
+ a low-level system function which runs on a dedicated Cortex-M
+ core that could provide cpu management features.
+
+ This driver can also be built as a module.
+
+config IMX_SCMI_LMM_DRV
+ tristate "IMX SCMI LMM Protocol driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ default y if ARCH_MXC
+ help
+ The System Controller Management Interface firmware (SCMI FW) is
+ a low-level system function which runs on a dedicated Cortex-M
+ core that could provide Logical Machine management features.
+
+ This driver can also be built as a module.
+
config IMX_SCMI_MISC_DRV
tristate "IMX SCMI MISC Protocol driver"
depends on ARCH_MXC || COMPILE_TEST
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
index 8d046c341be8..3bbaffa6e347 100644
--- a/drivers/firmware/imx/Makefile
+++ b/drivers/firmware/imx/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IMX_DSP) += imx-dsp.o
obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o
+obj-${CONFIG_IMX_SCMI_CPU_DRV} += sm-cpu.o
obj-${CONFIG_IMX_SCMI_MISC_DRV} += sm-misc.o
+obj-${CONFIG_IMX_SCMI_LMM_DRV} += sm-lmm.o
diff --git a/drivers/firmware/imx/sm-cpu.c b/drivers/firmware/imx/sm-cpu.c
new file mode 100644
index 000000000000..091b014f739f
--- /dev/null
+++ b/drivers/firmware/imx/sm-cpu.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/firmware/imx/sm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+static const struct scmi_imx_cpu_proto_ops *imx_cpu_ops;
+static struct scmi_protocol_handle *ph;
+
+int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start, bool boot,
+ bool resume)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ return imx_cpu_ops->cpu_reset_vector_set(ph, cpuid, vector, start,
+ boot, resume);
+}
+EXPORT_SYMBOL(scmi_imx_cpu_reset_vector_set);
+
+int scmi_imx_cpu_start(u32 cpuid, bool start)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (start)
+ return imx_cpu_ops->cpu_start(ph, cpuid, true);
+
+ return imx_cpu_ops->cpu_start(ph, cpuid, false);
+};
+EXPORT_SYMBOL(scmi_imx_cpu_start);
+
+int scmi_imx_cpu_started(u32 cpuid, bool *started)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (!started)
+ return -EINVAL;
+
+ return imx_cpu_ops->cpu_started(ph, cpuid, started);
+};
+EXPORT_SYMBOL(scmi_imx_cpu_started);
+
+static int scmi_imx_cpu_probe(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ if (imx_cpu_ops) {
+ dev_err(&sdev->dev, "sm cpu already initialized\n");
+ return -EEXIST;
+ }
+
+ imx_cpu_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_CPU, &ph);
+ if (IS_ERR(imx_cpu_ops))
+ return PTR_ERR(imx_cpu_ops);
+
+ return 0;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_IMX_CPU, "imx-cpu" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_imx_cpu_driver = {
+ .name = "scmi-imx-cpu",
+ .probe = scmi_imx_cpu_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_imx_cpu_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("IMX SM CPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/imx/sm-lmm.c b/drivers/firmware/imx/sm-lmm.c
new file mode 100644
index 000000000000..6807bf563c03
--- /dev/null
+++ b/drivers/firmware/imx/sm-lmm.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/firmware/imx/sm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+static const struct scmi_imx_lmm_proto_ops *imx_lmm_ops;
+static struct scmi_protocol_handle *ph;
+
+int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (!info)
+ return -EINVAL;
+
+ return imx_lmm_ops->lmm_info(ph, lmid, info);
+};
+EXPORT_SYMBOL(scmi_imx_lmm_info);
+
+int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ return imx_lmm_ops->lmm_reset_vector_set(ph, lmid, cpuid, flags, vector);
+}
+EXPORT_SYMBOL(scmi_imx_lmm_reset_vector_set);
+
+int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ switch (op) {
+ case SCMI_IMX_LMM_BOOT:
+ return imx_lmm_ops->lmm_power_boot(ph, lmid, true);
+ case SCMI_IMX_LMM_POWER_ON:
+ return imx_lmm_ops->lmm_power_boot(ph, lmid, false);
+ case SCMI_IMX_LMM_SHUTDOWN:
+ return imx_lmm_ops->lmm_shutdown(ph, lmid, flags);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(scmi_imx_lmm_operation);
+
+static int scmi_imx_lmm_probe(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ if (imx_lmm_ops) {
+ dev_err(&sdev->dev, "lmm already initialized\n");
+ return -EEXIST;
+ }
+
+ imx_lmm_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_LMM, &ph);
+ if (IS_ERR(imx_lmm_ops))
+ return PTR_ERR(imx_lmm_ops);
+
+ return 0;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_IMX_LMM, "imx-lmm" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_imx_lmm_driver = {
+ .name = "scmi-imx-lmm",
+ .probe = scmi_imx_lmm_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_imx_lmm_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("IMX SM LMM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index a1ebbe9b73b1..38ca190d4a22 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -804,8 +804,10 @@ int __init psci_dt_init(void)
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
- if (!np || !of_device_is_available(np))
+ if (!np || !of_device_is_available(np)) {
+ of_node_put(np);
return -ENODEV;
+ }
init_fn = (psci_initcall_t)matched_np->data;
ret = init_fn(np);
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index b662b7e28b80..df02a4ec3398 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -343,7 +343,7 @@ static int suspend_test_thread(void *arg)
* later.
*/
timer_delete(&wakeup_timer);
- destroy_timer_on_stack(&wakeup_timer);
+ timer_destroy_on_stack(&wakeup_timer);
if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
complete(&suspend_threads_done);
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index fc4d67e4c4a6..f63b716be5b0 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -1986,7 +1986,10 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
*/
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "asus,vivobook-s15" },
+ { .compatible = "asus,zenbook-a14-ux3407qa" },
+ { .compatible = "asus,zenbook-a14-ux3407ra" },
{ .compatible = "dell,xps13-9345" },
+ { .compatible = "hp,elitebook-ultra-g1q" },
{ .compatible = "hp,omnibook-x14" },
{ .compatible = "huawei,gaokun3" },
{ .compatible = "lenovo,flex-5g" },
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index 097369d38b84..3133d826f5fa 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -44,8 +44,11 @@ enum qcom_scm_arg_types {
/**
* struct qcom_scm_desc
+ * @svc: Service identifier
+ * @cmd: Command identifier
* @arginfo: Metadata describing the arguments in args[]
* @args: The array of arguments for the secure syscall
+ * @owner: Owner identifier
*/
struct qcom_scm_desc {
u32 svc;
diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c
index 92b365178235..94196ad87105 100644
--- a/drivers/firmware/qcom/qcom_tzmem.c
+++ b/drivers/firmware/qcom/qcom_tzmem.c
@@ -79,6 +79,7 @@ static const char *const qcom_tzmem_blacklist[] = {
"qcom,sc8180x",
"qcom,sdm670", /* failure in GPU firmware loading */
"qcom,sdm845", /* reset in rmtfs memory assignment */
+ "qcom,sm7150", /* reset in rmtfs memory assignment */
"qcom,sm8150", /* reset in rmtfs memory assignment */
NULL
};
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c
index 85e90d236da2..39b33a356ebd 100644
--- a/drivers/firmware/samsung/exynos-acpm-pmic.c
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
@@ -43,13 +43,13 @@ static inline u32 acpm_pmic_get_bulk(u32 data, unsigned int i)
return (data >> (ACPM_PMIC_BULK_SHIFT * i)) & ACPM_PMIC_BULK_MASK;
}
-static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd,
+static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd, size_t cmdlen,
unsigned int acpm_chan_id)
{
xfer->txd = cmd;
xfer->rxd = cmd;
- xfer->txlen = sizeof(cmd);
- xfer->rxlen = sizeof(cmd);
+ xfer->txlen = cmdlen;
+ xfer->rxlen = cmdlen;
xfer->acpm_chan_id = acpm_chan_id;
}
@@ -71,7 +71,7 @@ int acpm_pmic_read_reg(const struct acpm_handle *handle,
int ret;
acpm_pmic_init_read_cmd(cmd, type, reg, chan);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -104,7 +104,7 @@ int acpm_pmic_bulk_read(const struct acpm_handle *handle,
return -EINVAL;
acpm_pmic_init_bulk_read_cmd(cmd, type, reg, chan, count);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -144,7 +144,7 @@ int acpm_pmic_write_reg(const struct acpm_handle *handle,
int ret;
acpm_pmic_init_write_cmd(cmd, type, reg, chan, value);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -184,7 +184,7 @@ int acpm_pmic_bulk_write(const struct acpm_handle *handle,
return -EINVAL;
acpm_pmic_init_bulk_write_cmd(cmd, type, reg, chan, count, buf);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -214,7 +214,7 @@ int acpm_pmic_update_reg(const struct acpm_handle *handle,
int ret;
acpm_pmic_init_update_cmd(cmd, type, reg, chan, value, mask);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c
index a85b2dbdd9f0..e02f14f4bd7c 100644
--- a/drivers/firmware/samsung/exynos-acpm.c
+++ b/drivers/firmware/samsung/exynos-acpm.c
@@ -15,6 +15,7 @@
#include <linux/firmware/samsung/exynos-acpm-protocol.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/ktime.h>
#include <linux/mailbox/exynos-message.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
@@ -32,8 +33,7 @@
#define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
-/* The unit of counter is 20 us. 5000 * 20 = 100 ms */
-#define ACPM_POLL_TIMEOUT 5000
+#define ACPM_POLL_TIMEOUT_US (100 * USEC_PER_MSEC)
#define ACPM_TX_TIMEOUT_US 500000
#define ACPM_GS101_INITDATA_BASE 0xa000
@@ -185,6 +185,29 @@ struct acpm_match_data {
#define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle)
/**
+ * acpm_get_saved_rx() - get the response if it was already saved.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer to get response for.
+ * @tx_seqnum: xfer TX sequence number.
+ */
+static void acpm_get_saved_rx(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer, u32 tx_seqnum)
+{
+ const struct acpm_rx_data *rx_data = &achan->rx_data[tx_seqnum - 1];
+ u32 rx_seqnum;
+
+ if (!rx_data->response)
+ return;
+
+ rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, rx_data->cmd[0]);
+
+ if (rx_seqnum == tx_seqnum) {
+ memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
+ clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
+ }
+}
+
+/**
* acpm_get_rx() - get response from RX queue.
* @achan: ACPM channel info.
* @xfer: reference to the transfer to get response for.
@@ -204,15 +227,16 @@ static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
rx_front = readl(achan->rx.front);
i = readl(achan->rx.rear);
- /* Bail out if RX is empty. */
- if (i == rx_front)
+ tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+
+ if (i == rx_front) {
+ acpm_get_saved_rx(achan, xfer, tx_seqnum);
return 0;
+ }
base = achan->rx.base;
mlen = achan->mlen;
- tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
-
/* Drain RX queue. */
do {
/* Read RX seqnum. */
@@ -259,16 +283,8 @@ static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
* If the response was not in this iteration of the queue, check if the
* RX data was previously saved.
*/
- rx_data = &achan->rx_data[tx_seqnum - 1];
- if (!rx_set && rx_data->response) {
- rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM,
- rx_data->cmd[0]);
-
- if (rx_seqnum == tx_seqnum) {
- memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
- clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
- }
- }
+ if (!rx_set)
+ acpm_get_saved_rx(achan, xfer, tx_seqnum);
return 0;
}
@@ -284,12 +300,13 @@ static int acpm_dequeue_by_polling(struct acpm_chan *achan,
const struct acpm_xfer *xfer)
{
struct device *dev = achan->acpm->dev;
- unsigned int cnt_20us = 0;
+ ktime_t timeout;
u32 seqnum;
int ret;
seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+ timeout = ktime_add_us(ktime_get(), ACPM_POLL_TIMEOUT_US);
do {
ret = acpm_get_rx(achan, xfer);
if (ret)
@@ -299,12 +316,11 @@ static int acpm_dequeue_by_polling(struct acpm_chan *achan,
return 0;
/* Determined experimentally. */
- usleep_range(20, 30);
- cnt_20us++;
- } while (cnt_20us < ACPM_POLL_TIMEOUT);
+ udelay(20);
+ } while (ktime_before(ktime_get(), timeout));
- dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx, cnt_20us = %d.\n",
- achan->id, seqnum, achan->bitmap_seqnum[0], cnt_20us);
+ dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx.\n",
+ achan->id, seqnum, achan->bitmap_seqnum[0]);
return -ETIME;
}
@@ -633,7 +649,7 @@ static int acpm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, acpm);
- return 0;
+ return devm_of_platform_populate(dev);
}
/**
@@ -661,43 +677,30 @@ static void devm_acpm_release(struct device *dev, void *res)
}
/**
- * acpm_get_by_phandle() - get the ACPM handle using DT phandle.
- * @dev: device pointer requesting ACPM handle.
- * @property: property name containing phandle on ACPM node.
+ * acpm_get_by_node() - get the ACPM handle using node pointer.
+ * @dev: device pointer requesting ACPM handle.
+ * @np: ACPM device tree node.
*
* Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
*/
-static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
- const char *property)
+static const struct acpm_handle *acpm_get_by_node(struct device *dev,
+ struct device_node *np)
{
struct platform_device *pdev;
- struct device_node *acpm_np;
struct device_link *link;
struct acpm_info *acpm;
- acpm_np = of_parse_phandle(dev->of_node, property, 0);
- if (!acpm_np)
- return ERR_PTR(-ENODEV);
-
- pdev = of_find_device_by_node(acpm_np);
- if (!pdev) {
- dev_err(dev, "Cannot find device node %s\n", acpm_np->name);
- of_node_put(acpm_np);
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
- }
-
- of_node_put(acpm_np);
acpm = platform_get_drvdata(pdev);
if (!acpm) {
- dev_err(dev, "Cannot get drvdata from %s\n",
- dev_name(&pdev->dev));
platform_device_put(pdev);
return ERR_PTR(-EPROBE_DEFER);
}
if (!try_module_get(pdev->dev.driver->owner)) {
- dev_err(dev, "Cannot get module reference.\n");
platform_device_put(pdev);
return ERR_PTR(-EPROBE_DEFER);
}
@@ -716,14 +719,14 @@ static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
}
/**
- * devm_acpm_get_by_phandle() - managed get handle using phandle.
- * @dev: device pointer requesting ACPM handle.
- * @property: property name containing phandle on ACPM node.
+ * devm_acpm_get_by_node() - managed get handle using node pointer.
+ * @dev: device pointer requesting ACPM handle.
+ * @np: ACPM device tree node.
*
* Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
*/
-const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
- const char *property)
+const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+ struct device_node *np)
{
const struct acpm_handle **ptr, *handle;
@@ -731,7 +734,7 @@ const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
if (!ptr)
return ERR_PTR(-ENOMEM);
- handle = acpm_get_by_phandle(dev, property);
+ handle = acpm_get_by_node(dev, np);
if (!IS_ERR(handle)) {
*ptr = handle;
devres_add(dev, ptr);
@@ -741,6 +744,7 @@ const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
return handle;
}
+EXPORT_SYMBOL_GPL(devm_acpm_get_by_node);
static const struct acpm_match_data acpm_gs101 = {
.initdata_base = ACPM_GS101_INITDATA_BASE,
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
index 5767aed25cdc..49e1de83d2e8 100644
--- a/drivers/firmware/smccc/kvm_guest.c
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -17,17 +17,11 @@ static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_afte
void __init kvm_init_hyp_services(void)
{
+ uuid_t kvm_uuid = ARM_SMCCC_VENDOR_HYP_UID_KVM;
struct arm_smccc_res res;
u32 val[4];
- if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
- return;
-
- arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
- if (res.a0 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 ||
- res.a1 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 ||
- res.a2 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 ||
- res.a3 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3)
+ if (!arm_smccc_hypervisor_has_uuid(&kvm_uuid))
return;
memset(&res, 0, sizeof(res));
@@ -95,7 +89,7 @@ void __init kvm_arm_target_impl_cpu_init(void)
for (i = 0; i < max_cpus; i++) {
arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID,
- i, &res);
+ i, 0, 0, &res);
if (res.a0 != SMCCC_RET_SUCCESS) {
pr_warn("Discovering target implementation CPUs failed\n");
goto mem_free;
@@ -103,7 +97,7 @@ void __init kvm_arm_target_impl_cpu_init(void)
target[i].midr = res.a1;
target[i].revidr = res.a2;
target[i].aidr = res.a3;
- };
+ }
if (!cpu_errata_set_target_impl(max_cpus, target)) {
pr_warn("Failed to set target implementation CPUs\n");
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index a74600d9f2d7..cd65b434dc6e 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -67,6 +67,23 @@ s32 arm_smccc_get_soc_id_revision(void)
}
EXPORT_SYMBOL_GPL(arm_smccc_get_soc_id_revision);
+bool arm_smccc_hypervisor_has_uuid(const uuid_t *hyp_uuid)
+{
+ struct arm_smccc_res res = {};
+ uuid_t uuid;
+
+ if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
+ return false;
+
+ arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ return false;
+
+ uuid = smccc_res_to_uuid(res.a0, res.a1, res.a2, res.a3);
+ return uuid_equal(&uuid, hyp_uuid);
+}
+EXPORT_SYMBOL_GPL(arm_smccc_hypervisor_has_uuid);
+
static int __init smccc_devices_init(void)
{
struct platform_device *pdev;
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 3c52cb73237a..e3f990d888d7 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -1224,22 +1224,28 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
if (!svc->intel_svc_fcs) {
dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
ret = -ENOMEM;
- goto err_unregister_dev;
+ goto err_unregister_rsu_dev;
}
ret = platform_device_add(svc->intel_svc_fcs);
if (ret) {
platform_device_put(svc->intel_svc_fcs);
- goto err_unregister_dev;
+ goto err_unregister_rsu_dev;
}
+ ret = of_platform_default_populate(dev_of_node(dev), NULL, dev);
+ if (ret)
+ goto err_unregister_fcs_dev;
+
dev_set_drvdata(dev, svc);
pr_info("Intel Service Layer Driver Initialized\n");
return 0;
-err_unregister_dev:
+err_unregister_fcs_dev:
+ platform_device_unregister(svc->intel_svc_fcs);
+err_unregister_rsu_dev:
platform_device_unregister(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
@@ -1253,6 +1259,8 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev)
struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+ of_platform_depopulate(ctrl->dev);
+
platform_device_unregister(svc->intel_svc_fcs);
platform_device_unregister(svc->stratix10_svc_rsu);
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index 75a186bf8f8e..592d8a644619 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -35,36 +35,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
return false;
- /*
- * The meaning of depth and bpp for direct-color formats is
- * inconsistent:
- *
- * - DRM format info specifies depth as the number of color
- * bits; including alpha, but not including filler bits.
- * - Linux' EFI platform code computes lfb_depth from the
- * individual color channels, including the reserved bits.
- * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
- * versions use 15.
- * - On the kernel command line, 'bpp' of 32 is usually
- * XRGB8888 including the filler bits, but 15 is XRGB1555
- * not including the filler bit.
- *
- * It's not easily possible to fix this in struct screen_info,
- * as this could break UAPI. The best solution is to compute
- * bits_per_pixel from the color bits, reserved bits and
- * reported lfb_depth, whichever is highest. In the loop below,
- * ignore simplefb formats with alpha bits, as EFI and VESA
- * don't specify alpha channels.
- */
- if (si->lfb_depth > 8) {
- bits_per_pixel = max(max3(si->red_size + si->red_pos,
- si->green_size + si->green_pos,
- si->blue_size + si->blue_pos),
- si->rsvd_size + si->rsvd_pos);
- bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
- } else {
- bits_per_pixel = si->lfb_depth;
- }
+ bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
const struct simplefb_format *f = &formats[i];
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 806a975fff22..ae5fd1936ad3 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol Driver
*
- * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2025 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
@@ -3670,6 +3670,7 @@ static int __maybe_unused ti_sci_suspend(struct device *dev)
struct ti_sci_info *info = dev_get_drvdata(dev);
struct device *cpu_dev, *cpu_dev_max = NULL;
s32 val, cpu_lat = 0;
+ u16 cpu_lat_ms;
int i, ret;
if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
@@ -3682,9 +3683,16 @@ static int __maybe_unused ti_sci_suspend(struct device *dev)
}
}
if (cpu_dev_max) {
- dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u\n", __func__, cpu_lat);
+ /*
+ * PM QoS latency unit is usecs, device manager uses msecs.
+ * Convert to msecs and round down for device manager.
+ */
+ cpu_lat_ms = cpu_lat / USEC_PER_MSEC;
+ dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u ms\n", __func__,
+ cpu_lat_ms);
ret = ti_sci_cmd_set_latency_constraint(&info->handle,
- cpu_lat, TISCI_MSG_CONSTRAINT_SET);
+ cpu_lat_ms,
+ TISCI_MSG_CONSTRAINT_SET);
if (ret)
return ret;
}
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 47fe6261f5a3..1eac9948148f 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -2,29 +2,31 @@
/*
* Turris Mox rWTM firmware driver
*
- * Copyright (C) 2019, 2024 Marek Behún <kabel@kernel.org>
+ * Copyright (C) 2019, 2024, 2025 Marek Behún <kabel@kernel.org>
*/
#include <crypto/sha2.h>
#include <linux/align.h>
#include <linux/armada-37xx-rwtm-mailbox.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/container_of.h>
-#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/fs.h>
#include <linux/hw_random.h>
#include <linux/if_ether.h>
+#include <linux/key.h>
#include <linux/kobject.h>
#include <linux/mailbox_client.h>
+#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/sysfs.h>
+#include <linux/turris-signing-key.h>
#include <linux/types.h>
#define DRIVER_NAME "turris-mox-rwtm"
@@ -37,10 +39,13 @@
* https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi.
*/
-#define MOX_ECC_NUMBER_WORDS 17
-#define MOX_ECC_NUMBER_LEN (MOX_ECC_NUMBER_WORDS * sizeof(u32))
-
-#define MOX_ECC_SIGNATURE_WORDS (2 * MOX_ECC_NUMBER_WORDS)
+enum {
+ MOX_ECC_NUM_BITS = 521,
+ MOX_ECC_NUM_LEN = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 8),
+ MOX_ECC_NUM_WORDS = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 32),
+ MOX_ECC_SIG_LEN = 2 * MOX_ECC_NUM_LEN,
+ MOX_ECC_PUBKEY_LEN = 1 + MOX_ECC_NUM_LEN,
+};
#define MBOX_STS_SUCCESS (0 << 30)
#define MBOX_STS_FAIL (1 << 30)
@@ -77,10 +82,7 @@ enum mbox_cmd {
* @ram_size: RAM size of the device
* @mac_address1: first MAC address of the device
* @mac_address2: second MAC address of the device
- * @has_pubkey: whether board ECDSA public key is present
* @pubkey: board ECDSA public key
- * @last_sig: last ECDSA signature generated with board ECDSA private key
- * @last_sig_done: whether the last ECDSA signing is complete
*/
struct mox_rwtm {
struct mbox_client mbox_client;
@@ -100,18 +102,8 @@ struct mox_rwtm {
int board_version, ram_size;
u8 mac_address1[ETH_ALEN], mac_address2[ETH_ALEN];
- bool has_pubkey;
- u8 pubkey[135];
-
-#ifdef CONFIG_DEBUG_FS
- /*
- * Signature process. This is currently done via debugfs, because it
- * does not conform to the sysfs standard "one file per attribute".
- * It should be rewritten via crypto API once akcipher API is available
- * from userspace.
- */
- u32 last_sig[MOX_ECC_SIGNATURE_WORDS];
- bool last_sig_done;
+#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL
+ u8 pubkey[MOX_ECC_PUBKEY_LEN];
#endif
};
@@ -120,24 +112,23 @@ static inline struct device *rwtm_dev(struct mox_rwtm *rwtm)
return rwtm->mbox_client.dev;
}
-#define MOX_ATTR_RO(name, format, cat) \
+#define MOX_ATTR_RO(name, format) \
static ssize_t \
name##_show(struct device *dev, struct device_attribute *a, \
char *buf) \
{ \
struct mox_rwtm *rwtm = dev_get_drvdata(dev); \
- if (!rwtm->has_##cat) \
+ if (!rwtm->has_board_info) \
return -ENODATA; \
return sysfs_emit(buf, format, rwtm->name); \
} \
static DEVICE_ATTR_RO(name)
-MOX_ATTR_RO(serial_number, "%016llX\n", board_info);
-MOX_ATTR_RO(board_version, "%i\n", board_info);
-MOX_ATTR_RO(ram_size, "%i\n", board_info);
-MOX_ATTR_RO(mac_address1, "%pM\n", board_info);
-MOX_ATTR_RO(mac_address2, "%pM\n", board_info);
-MOX_ATTR_RO(pubkey, "%s\n", pubkey);
+MOX_ATTR_RO(serial_number, "%016llX\n");
+MOX_ATTR_RO(board_version, "%i\n");
+MOX_ATTR_RO(ram_size, "%i\n");
+MOX_ATTR_RO(mac_address1, "%pM\n");
+MOX_ATTR_RO(mac_address2, "%pM\n");
static struct attribute *turris_mox_rwtm_attrs[] = {
&dev_attr_serial_number.attr,
@@ -145,7 +136,6 @@ static struct attribute *turris_mox_rwtm_attrs[] = {
&dev_attr_ram_size.attr,
&dev_attr_mac_address1.attr,
&dev_attr_mac_address2.attr,
- &dev_attr_pubkey.attr,
NULL
};
ATTRIBUTE_GROUPS(turris_mox_rwtm);
@@ -247,24 +237,6 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
pr_info(" burned RAM size %i MiB\n", rwtm->ram_size);
}
- ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false);
- if (ret == -ENODATA) {
- dev_warn(dev, "Board has no public key burned!\n");
- } else if (ret == -EOPNOTSUPP) {
- dev_notice(dev,
- "Firmware does not support the ECDSA_PUB_KEY command\n");
- } else if (ret < 0) {
- return ret;
- } else {
- u32 *s = reply->status;
-
- rwtm->has_pubkey = true;
- sprintf(rwtm->pubkey,
- "%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
- ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
- s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]);
- }
-
return 0;
}
@@ -306,127 +278,139 @@ unlock_mutex:
return ret;
}
-#ifdef CONFIG_DEBUG_FS
-static int rwtm_debug_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
+#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL
- return nonseekable_open(inode, file);
-}
-
-static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len,
- loff_t *ppos)
+static void mox_ecc_number_to_bin(void *dst, const u32 *src)
{
- struct mox_rwtm *rwtm = file->private_data;
- ssize_t ret;
+ __be32 tmp[MOX_ECC_NUM_WORDS];
- /* only allow one read, of whole signature, from position 0 */
- if (*ppos != 0)
- return 0;
+ cpu_to_be32_array(tmp, src, MOX_ECC_NUM_WORDS);
- if (len < sizeof(rwtm->last_sig))
- return -EINVAL;
+ memcpy(dst, (void *)tmp + 2, MOX_ECC_NUM_LEN);
+}
- if (!rwtm->last_sig_done)
- return -ENODATA;
+static void mox_ecc_public_key_to_bin(void *dst, u32 src_first,
+ const u32 *src_rest)
+{
+ __be32 tmp[MOX_ECC_NUM_WORDS - 1];
+ u8 *p = dst;
- ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig,
- sizeof(rwtm->last_sig));
- rwtm->last_sig_done = false;
+ /* take 3 bytes from the first word */
+ *p++ = src_first >> 16;
+ *p++ = src_first >> 8;
+ *p++ = src_first;
- return ret;
+ /* take the rest of the words */
+ cpu_to_be32_array(tmp, src_rest, MOX_ECC_NUM_WORDS - 1);
+ memcpy(p, tmp, sizeof(tmp));
}
-static ssize_t do_sign_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
+static int mox_rwtm_sign(const struct key *key, const void *data, void *signature)
{
- struct mox_rwtm *rwtm = file->private_data;
- struct armada_37xx_rwtm_tx_msg msg;
- loff_t dummy = 0;
- ssize_t ret;
-
- if (len != SHA512_DIGEST_SIZE)
- return -EINVAL;
-
- /* if last result is not zero user has not read that information yet */
- if (rwtm->last_sig_done)
- return -EBUSY;
+ struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key));
+ struct armada_37xx_rwtm_tx_msg msg = {};
+ u32 offset_r, offset_s;
+ int ret;
- if (!mutex_trylock(&rwtm->busy))
- return -EBUSY;
+ guard(mutex)(&rwtm->busy);
/*
- * Here we have to send:
- * 1. Address of the input to sign.
- * The input is an array of 17 32-bit words, the first (most
- * significat) is 0, the rest 16 words are copied from the SHA-512
- * hash given by the user and converted from BE to LE.
- * 2. Address of the buffer where ECDSA signature value R shall be
- * stored by the rWTM firmware.
- * 3. Address of the buffer where ECDSA signature value S shall be
- * stored by the rWTM firmware.
+ * For MBOX_CMD_SIGN command:
+ * args[0] - must be 1
+ * args[1] - address of message M to sign; message is a 521-bit number
+ * args[2] - address where the R part of the signature will be stored
+ * args[3] - address where the S part of the signature will be stored
+ *
+ * M, R and S are 521-bit numbers encoded as seventeen 32-bit words,
+ * most significat word first.
+ * Since the message in @data is a sha512 digest, the most significat
+ * word is always zero.
*/
+
+ offset_r = MOX_ECC_NUM_WORDS * sizeof(u32);
+ offset_s = 2 * MOX_ECC_NUM_WORDS * sizeof(u32);
+
memset(rwtm->buf, 0, sizeof(u32));
- ret = simple_write_to_buffer(rwtm->buf + sizeof(u32),
- SHA512_DIGEST_SIZE, &dummy, buf, len);
- if (ret < 0)
- goto unlock_mutex;
- be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUMBER_WORDS);
+ memcpy(rwtm->buf + sizeof(u32), data, SHA512_DIGEST_SIZE);
+ be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUM_WORDS);
msg.args[0] = 1;
msg.args[1] = rwtm->buf_phys;
- msg.args[2] = rwtm->buf_phys + MOX_ECC_NUMBER_LEN;
- msg.args[3] = rwtm->buf_phys + 2 * MOX_ECC_NUMBER_LEN;
+ msg.args[2] = rwtm->buf_phys + offset_r;
+ msg.args[3] = rwtm->buf_phys + offset_s;
ret = mox_rwtm_exec(rwtm, MBOX_CMD_SIGN, &msg, true);
if (ret < 0)
- goto unlock_mutex;
+ return ret;
- /*
- * Here we read the R and S values of the ECDSA signature
- * computed by the rWTM firmware and convert their words from
- * LE to BE.
- */
- memcpy(rwtm->last_sig, rwtm->buf + MOX_ECC_NUMBER_LEN,
- sizeof(rwtm->last_sig));
- cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig,
- MOX_ECC_SIGNATURE_WORDS);
- rwtm->last_sig_done = true;
+ /* convert R and S parts of the signature */
+ mox_ecc_number_to_bin(signature, rwtm->buf + offset_r);
+ mox_ecc_number_to_bin(signature + MOX_ECC_NUM_LEN, rwtm->buf + offset_s);
- mutex_unlock(&rwtm->busy);
- return len;
-unlock_mutex:
- mutex_unlock(&rwtm->busy);
- return ret;
+ return 0;
}
-static const struct file_operations do_sign_fops = {
- .owner = THIS_MODULE,
- .open = rwtm_debug_open,
- .read = do_sign_read,
- .write = do_sign_write,
-};
-
-static void rwtm_debugfs_release(void *root)
+static const void *mox_rwtm_get_public_key(const struct key *key)
{
- debugfs_remove_recursive(root);
+ struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key));
+
+ return rwtm->pubkey;
}
-static void rwtm_register_debugfs(struct mox_rwtm *rwtm)
+static const struct turris_signing_key_subtype mox_signing_key_subtype = {
+ .key_size = MOX_ECC_NUM_BITS,
+ .data_size = SHA512_DIGEST_SIZE,
+ .sig_size = MOX_ECC_SIG_LEN,
+ .public_key_size = MOX_ECC_PUBKEY_LEN,
+ .hash_algo = "sha512",
+ .get_public_key = mox_rwtm_get_public_key,
+ .sign = mox_rwtm_sign,
+};
+
+static int mox_register_signing_key(struct mox_rwtm *rwtm)
{
- struct dentry *root;
+ struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
+ struct device *dev = rwtm_dev(rwtm);
+ int ret;
- root = debugfs_create_dir("turris-mox-rwtm", NULL);
+ ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false);
+ if (ret == -ENODATA) {
+ dev_warn(dev, "Board has no public key burned!\n");
+ } else if (ret == -EOPNOTSUPP) {
+ dev_notice(dev,
+ "Firmware does not support the ECDSA_PUB_KEY command\n");
+ } else if (ret < 0) {
+ return ret;
+ } else {
+ char sn[17] = "unknown";
+ char desc[46];
+
+ if (rwtm->has_board_info)
+ sprintf(sn, "%016llX", rwtm->serial_number);
+
+ sprintf(desc, "Turris MOX SN %s rWTM ECDSA key", sn);
- debugfs_create_file_unsafe("do_sign", 0600, root, rwtm, &do_sign_fops);
+ mox_ecc_public_key_to_bin(rwtm->pubkey, ret, reply->status);
- devm_add_action_or_reset(rwtm_dev(rwtm), rwtm_debugfs_release, root);
+ ret = devm_turris_signing_key_create(dev,
+ &mox_signing_key_subtype,
+ desc);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot create signing key\n");
+ }
+
+ return 0;
}
-#else
-static inline void rwtm_register_debugfs(struct mox_rwtm *rwtm)
+
+#else /* CONFIG_TURRIS_MOX_RWTM_KEYCTL */
+
+static inline int mox_register_signing_key(struct mox_rwtm *rwtm)
{
+ return 0;
}
-#endif
+
+#endif /* !CONFIG_TURRIS_MOX_RWTM_KEYCTL */
static void rwtm_devm_mbox_release(void *mbox)
{
@@ -477,6 +461,10 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
if (ret < 0)
dev_warn(dev, "Cannot read board information: %i\n", ret);
+ ret = mox_register_signing_key(rwtm);
+ if (ret < 0)
+ return ret;
+
ret = check_get_random_support(rwtm);
if (ret < 0) {
dev_notice(dev,
@@ -491,8 +479,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "Cannot register HWRNG!\n");
- rwtm_register_debugfs(rwtm);
-
dev_info(dev, "HWRNG successfully registered\n");
/*
diff --git a/drivers/fpga/tests/fpga-bridge-test.c b/drivers/fpga/tests/fpga-bridge-test.c
index b9ab29809e96..124ba40e32b1 100644
--- a/drivers/fpga/tests/fpga-bridge-test.c
+++ b/drivers/fpga/tests/fpga-bridge-test.c
@@ -170,4 +170,5 @@ static struct kunit_suite fpga_bridge_suite = {
kunit_test_suite(fpga_bridge_suite);
+MODULE_DESCRIPTION("KUnit test for the FPGA Bridge");
MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c
index 9cb37aefbac4..8748babb0504 100644
--- a/drivers/fpga/tests/fpga-mgr-test.c
+++ b/drivers/fpga/tests/fpga-mgr-test.c
@@ -330,4 +330,5 @@ static struct kunit_suite fpga_mgr_suite = {
kunit_test_suite(fpga_mgr_suite);
+MODULE_DESCRIPTION("KUnit test for the FPGA Manager");
MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c
index 6a108cafded8..020ceac48509 100644
--- a/drivers/fpga/tests/fpga-region-test.c
+++ b/drivers/fpga/tests/fpga-region-test.c
@@ -214,4 +214,5 @@ static struct kunit_suite fpga_region_suite = {
kunit_test_suite(fpga_region_suite);
+MODULE_DESCRIPTION("KUnit test for the FPGA Region");
MODULE_LICENSE("GPL");
diff --git a/drivers/fwctl/main.c b/drivers/fwctl/main.c
index cb1ac9c40239..bc6378506296 100644
--- a/drivers/fwctl/main.c
+++ b/drivers/fwctl/main.c
@@ -105,7 +105,7 @@ static int fwctl_cmd_rpc(struct fwctl_ucmd *ucmd)
if (!test_and_set_bit(0, &fwctl_tainted)) {
dev_warn(
&fwctl->dev,
- "%s(%d): has requested full access to the physical device device",
+ "%s(%d): has requested full access to the physical device",
current->comm, task_pid_nr(current));
add_taint(TAINT_FWCTL, LOCKDEP_STILL_OK);
}
diff --git a/drivers/fwctl/pds/main.c b/drivers/fwctl/pds/main.c
index 284c4165fdd4..9b9d1f6b5556 100644
--- a/drivers/fwctl/pds/main.c
+++ b/drivers/fwctl/pds/main.c
@@ -105,12 +105,14 @@ static int pdsfc_identify(struct pdsfc_dev *pdsfc)
static void pdsfc_free_endpoints(struct pdsfc_dev *pdsfc)
{
struct device *dev = &pdsfc->fwctl.dev;
+ u32 num_endpoints;
int i;
if (!pdsfc->endpoints)
return;
- for (i = 0; pdsfc->endpoint_info && i < pdsfc->endpoints->num_entries; i++)
+ num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
+ for (i = 0; pdsfc->endpoint_info && i < num_endpoints; i++)
mutex_destroy(&pdsfc->endpoint_info[i].lock);
vfree(pdsfc->endpoint_info);
pdsfc->endpoint_info = NULL;
@@ -199,7 +201,7 @@ static int pdsfc_init_endpoints(struct pdsfc_dev *pdsfc)
ep_entry = (struct pds_fwctl_query_data_endpoint *)pdsfc->endpoints->entries;
for (i = 0; i < num_endpoints; i++) {
mutex_init(&pdsfc->endpoint_info[i].lock);
- pdsfc->endpoint_info[i].endpoint = ep_entry[i].id;
+ pdsfc->endpoint_info[i].endpoint = le32_to_cpu(ep_entry[i].id);
}
return 0;
@@ -214,6 +216,7 @@ static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc
struct pds_fwctl_query_data *data;
union pds_core_adminq_cmd cmd;
dma_addr_t data_pa;
+ u32 num_entries;
int err;
int i;
@@ -246,8 +249,9 @@ static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc
*pa = data_pa;
entries = (struct pds_fwctl_query_data_operation *)data->entries;
- dev_dbg(dev, "num_entries %d\n", data->num_entries);
- for (i = 0; i < data->num_entries; i++) {
+ num_entries = le32_to_cpu(data->num_entries);
+ dev_dbg(dev, "num_entries %d\n", num_entries);
+ for (i = 0; i < num_entries; i++) {
/* Translate FW command attribute to fwctl scope */
switch (entries[i].scope) {
@@ -267,7 +271,7 @@ static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc
break;
}
dev_dbg(dev, "endpoint %d operation: id %x scope %d\n",
- ep, entries[i].id, entries[i].scope);
+ ep, le32_to_cpu(entries[i].id), entries[i].scope);
}
return data;
@@ -280,24 +284,26 @@ static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc,
struct pds_fwctl_query_data_operation *op_entry;
struct pdsfc_rpc_endpoint_info *ep_info = NULL;
struct device *dev = &pdsfc->fwctl.dev;
+ u32 num_entries;
int i;
/* validate rpc in_len & out_len based
* on ident.max_req_sz & max_resp_sz
*/
- if (rpc->in.len > pdsfc->ident.max_req_sz) {
+ if (rpc->in.len > le32_to_cpu(pdsfc->ident.max_req_sz)) {
dev_dbg(dev, "Invalid request size %u, max %u\n",
- rpc->in.len, pdsfc->ident.max_req_sz);
+ rpc->in.len, le32_to_cpu(pdsfc->ident.max_req_sz));
return -EINVAL;
}
- if (rpc->out.len > pdsfc->ident.max_resp_sz) {
+ if (rpc->out.len > le32_to_cpu(pdsfc->ident.max_resp_sz)) {
dev_dbg(dev, "Invalid response size %u, max %u\n",
- rpc->out.len, pdsfc->ident.max_resp_sz);
+ rpc->out.len, le32_to_cpu(pdsfc->ident.max_resp_sz));
return -EINVAL;
}
- for (i = 0; i < pdsfc->endpoints->num_entries; i++) {
+ num_entries = le32_to_cpu(pdsfc->endpoints->num_entries);
+ for (i = 0; i < num_entries; i++) {
if (pdsfc->endpoint_info[i].endpoint == rpc->in.ep) {
ep_info = &pdsfc->endpoint_info[i];
break;
@@ -326,8 +332,9 @@ static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc,
/* reject unsupported and/or out of scope commands */
op_entry = (struct pds_fwctl_query_data_operation *)ep_info->operations->entries;
- for (i = 0; i < ep_info->operations->num_entries; i++) {
- if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, op_entry[i].id)) {
+ num_entries = le32_to_cpu(ep_info->operations->num_entries);
+ for (i = 0; i < num_entries; i++) {
+ if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, le32_to_cpu(op_entry[i].id))) {
if (scope < op_entry[i].scope)
return -EPERM;
return 0;
@@ -402,7 +409,7 @@ static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
cmd = (union pds_core_adminq_cmd) {
.fwctl_rpc = {
.opcode = PDS_FWCTL_CMD_RPC,
- .flags = PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP,
+ .flags = cpu_to_le16(PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP),
.ep = cpu_to_le32(rpc->in.ep),
.op = cpu_to_le32(rpc->in.op),
.req_pa = cpu_to_le64(in_payload_dma_addr),
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index f2c39bbff83a..44f922e10db2 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -201,6 +201,7 @@ config GPIO_RASPBERRYPI_EXP
config GPIO_BCM_KONA
bool "Broadcom Kona GPIO"
depends on ARCH_BCM_MOBILE || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
help
Turn on GPIO support for Broadcom "Kona" chips.
@@ -213,6 +214,18 @@ config GPIO_BCM_XGS_IPROC
help
Say yes here to enable GPIO support for Broadcom XGS iProc SoCs.
+config GPIO_BLZP1600
+ tristate "Blaize BLZP1600 GPIO support"
+ default y if ARCH_BLAIZE
+ depends on ARCH_BLAIZE || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y or M here to add support for the Blaize BLZP1600 GPIO device.
+ The controller is based on the Verisilicon Microelectronics GPIO APB v0.2
+ IP block.
+
config GPIO_BRCMSTB
tristate "BRCMSTB GPIO support"
default y if (ARCH_BRCMSTB || BMIPS_GENERIC)
@@ -241,6 +254,7 @@ config GPIO_DAVINCI
tristate "TI Davinci/Keystone GPIO support"
default y if ARCH_DAVINCI
depends on ((ARM || ARM64) && (ARCH_DAVINCI || ARCH_KEYSTONE || ARCH_K3)) || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
help
Say yes here to enable GPIO support for TI Davinci/Keystone SoCs.
@@ -340,7 +354,7 @@ config GPIO_GRGPIO
tristate "Aeroflex Gaisler GRGPIO support"
depends on OF || COMPILE_TEST
select GPIO_GENERIC
- select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
help
Select this to support Aeroflex Gaisler GRGPIO cores from the GRLIB
VHDL IP core library.
@@ -368,8 +382,7 @@ config GPIO_HLWD
config GPIO_ICH
tristate "Intel ICH GPIO"
- depends on X86
- depends on LPC_ICH
+ depends on (X86 && LPC_ICH) || (COMPILE_TEST && HAS_IOPORT)
help
Say yes here to support the GPIO functionality of a number of Intel
ICH-based chipsets. Currently supported devices: ICH6, ICH7, ICH8
@@ -425,6 +438,7 @@ config GPIO_LPC18XX
default y if ARCH_LPC18XX
depends on OF_GPIO && (ARCH_LPC18XX || COMPILE_TEST)
select IRQ_DOMAIN_HIERARCHY
+ select GPIOLIB_IRQCHIP
help
Select this option to enable GPIO driver for
NXP LPC18XX/43XX devices.
@@ -468,7 +482,7 @@ config GPIO_MPC8XXX
FSL_SOC_BOOKE || PPC_86xx || ARCH_LAYERSCAPE || ARM || \
COMPILE_TEST
select GPIO_GENERIC
- select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
help
Say Y here if you're going to use hardware that connects to the
MPC512x/831x/834x/837x/8572/8610/QorIQ GPIOs.
@@ -540,7 +554,7 @@ config GPIO_OMAP
config GPIO_PL061
tristate "PrimeCell PL061 GPIO support"
- depends on ARM_AMBA
+ depends on ARM_AMBA || COMPILE_TEST
select IRQ_DOMAIN
select GPIOLIB_IRQCHIP
help
@@ -555,6 +569,7 @@ config GPIO_POLARFIRE_SOC
config GPIO_PXA
bool "PXA GPIO support"
depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
help
Say yes here to support the PXA GPIO device.
@@ -604,7 +619,7 @@ config GPIO_ROCKCHIP
config GPIO_RTD
tristate "Realtek DHC GPIO support"
- depends on ARCH_REALTEK
+ depends on ARCH_REALTEK || COMPILE_TEST
default y
select GPIOLIB_IRQCHIP
help
@@ -656,6 +671,15 @@ config GPIO_SNPS_CREG
where only several fields in register belong to GPIO lines and
each GPIO line owns a field with different length and on/off value.
+config GPIO_SPACEMIT_K1
+ tristate "SPACEMIT K1 GPIO support"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support the SpacemiT's K1 GPIO device.
+
config GPIO_SPEAR_SPICS
bool "ST SPEAr13xx SPI Chip Select as GPIO support"
depends on PLAT_SPEAR
@@ -753,7 +777,7 @@ config GPIO_UNIPHIER
Say yes here to support UniPhier GPIOs.
config GPIO_VF610
- bool "VF610 GPIO support"
+ tristate "VF610 GPIO support"
default y if SOC_VF610
depends on ARCH_MXC || COMPILE_TEST
select GPIOLIB_IRQCHIP
@@ -830,14 +854,14 @@ config GPIO_ZEVIO
config GPIO_ZYNQ
tristate "Xilinx Zynq GPIO support"
- depends on ARCH_ZYNQ || ARCH_ZYNQMP
+ depends on ARCH_ZYNQ || ARCH_ZYNQMP || COMPILE_TEST
select GPIOLIB_IRQCHIP
help
Say yes here to support Xilinx Zynq GPIO controller.
config GPIO_ZYNQMP_MODEPIN
tristate "ZynqMP ps-mode pin GPIO configuration driver"
- depends on ZYNQMP_FIRMWARE
+ depends on ZYNQMP_FIRMWARE || COMPILE_TEST
default ZYNQMP_FIRMWARE
help
Say yes here to support the ZynqMP ps-mode pin GPIO configuration
@@ -866,7 +890,7 @@ config GPIO_AMD_FCH
config GPIO_MSC313
bool "MStar MSC313 GPIO support"
- depends on ARCH_MSTARV7
+ depends on ARCH_MSTARV7 || COMPILE_TEST
default ARCH_MSTARV7
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
@@ -1365,7 +1389,7 @@ config GPIO_DLN2
config HTC_EGPIO
bool "HTC EGPIO support"
- depends on ARM
+ depends on ARM || COMPILE_TEST
help
This driver supports the CPLD egpio chip present on
several HTC phones. It provides basic support for input
@@ -1463,6 +1487,19 @@ config GPIO_MAX77650
GPIO driver for MAX77650/77651 PMIC from Maxim Semiconductor.
These chips have a single pin that can be configured as GPIO.
+config GPIO_MAX77759
+ tristate "Maxim Integrated MAX77759 GPIO support"
+ depends on MFD_MAX77759
+ default MFD_MAX77759
+ select GPIOLIB_IRQCHIP
+ help
+ GPIO driver for MAX77759 PMIC from Maxim Integrated.
+ There are two GPIOs available on these chips in total, both of
+ which can also generate interrupts.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-max77759.
+
config GPIO_PALMAS
bool "TI PALMAS series PMICs GPIO"
depends on MFD_PALMAS
@@ -1520,12 +1557,13 @@ config GPIO_TC3589X
config GPIO_TIMBERDALE
bool "Support for timberdale GPIO IP"
depends on MFD_TIMBERDALE
+ select GPIOLIB_IRQCHIP
help
Add support for the GPIO IP in the timberdale FPGA.
config GPIO_TN48M_CPLD
tristate "Delta Networks TN48M switch CPLD GPIO driver"
- depends on MFD_TN48M_CPLD
+ depends on MFD_TN48M_CPLD || COMPILE_TEST
select GPIO_REGMAP
help
This enables support for the GPIOs found on the Delta
@@ -1869,6 +1907,8 @@ menu "Virtual GPIO drivers"
config GPIO_AGGREGATOR
tristate "GPIO Aggregator"
+ select CONFIGFS_FS
+ select DEV_SYNC_PROBE
help
Say yes here to enable the GPIO Aggregator, which provides a way to
aggregate existing GPIO lines into a new virtual GPIO chip.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index af130882ffee..88dedd298256 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
obj-$(CONFIG_GPIO_CDEV) += gpiolib-cdev.o
obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
+gpiolib-acpi-y := gpiolib-acpi-core.o gpiolib-acpi-quirks.o
obj-$(CONFIG_GPIOLIB) += gpiolib-swnode.o
# Device drivers. Generally keep list sorted alphabetically
@@ -45,6 +46,7 @@ obj-$(CONFIG_GPIO_BCM_XGS_IPROC) += gpio-xgs-iproc.o
obj-$(CONFIG_GPIO_BD71815) += gpio-bd71815.o
obj-$(CONFIG_GPIO_BD71828) += gpio-bd71828.o
obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
+obj-$(CONFIG_GPIO_BLZP1600) += gpio-blzp1600.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
obj-$(CONFIG_GPIO_CADENCE) += gpio-cadence.o
@@ -105,6 +107,7 @@ obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
obj-$(CONFIG_GPIO_MAX77650) += gpio-max77650.o
+obj-$(CONFIG_GPIO_MAX77759) += gpio-max77759.o
obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
@@ -159,6 +162,7 @@ obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
obj-$(CONFIG_GPIO_SL28CPLD) += gpio-sl28cpld.o
obj-$(CONFIG_GPIO_SLOPPY_LOGIC_ANALYZER) += gpio-sloppy-logic-analyzer.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
+obj-$(CONFIG_GPIO_SPACEMIT_K1) += gpio-spacemit-k1.o
obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
obj-$(CONFIG_GPIO_SPRD) += gpio-sprd.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index b5f0a7a2e1bf..4a8b349f2483 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -44,6 +44,13 @@ Work items:
to a machine description such as device tree, ACPI or fwnode that
implicitly does not use global GPIO numbers.
+- Fix drivers to not read back struct gpio_chip::base. Some drivers do
+ that and would be broken by attempts to poison it or make it dynamic.
+ Example in AT91 pinctrl driver:
+ https://lore.kernel.org/all/1d00c056-3d61-4c22-bedd-3bae0bf1ddc4@pengutronix.de/
+ This particular driver is also DT-only, so with the above fixed, the
+ base can be made dynamic (set to -1) if CONFIG_GPIO_SYSFS is disabled.
+
- When this work is complete (will require some of the items in the
following ongoing work as well) we can delete the old global
numberspace accessors from <linux/gpio.h> and eventually delete
@@ -186,3 +193,37 @@ their hardware offsets within the chip.
Encourage users to switch to using them and eventually remove the existing
global export/unexport attribues.
+
+-------------------------------------------------------------------------------
+
+Remove GPIOD_FLAGS_BIT_NONEXCLUSIVE
+
+GPIOs in the linux kernel are meant to be an exclusive resource. This means
+that the GPIO descriptors (the software representation of the hardware concept)
+are not reference counted and - in general - only one user at a time can
+request a GPIO line and control its settings. The consumer API is designed
+around full control of the line's state as evidenced by the fact that, for
+instance, gpiod_set_value() does indeed drive the line as requested, instead
+of bumping an enable counter of some sort.
+
+A problematic use-case for GPIOs is when two consumers want to use the same
+descriptor independently. An example of such a user is the regulator subsystem
+which may instantiate several struct regulator_dev instances containing
+a struct device but using the same enable GPIO line.
+
+A workaround was introduced in the form of the GPIOD_FLAGS_BIT_NONEXCLUSIVE
+flag but its implementation is problematic: it does not provide any
+synchronization of usage nor did it introduce any enable count meaning the
+non-exclusive users of the same descriptor will in fact "fight" for the
+control over it. This flag should be removed and replaced with a better
+solution, possibly based on the new power sequencing subsystem.
+
+-------------------------------------------------------------------------------
+
+Remove devm_gpiod_unhinge()
+
+devm_gpiod_unhinge() is provided as a way to transfer the ownership of managed
+enable GPIOs to the regulator core. Rather than doing that however, we should
+make it possible for the regulator subsystem to deal with GPIO resources the
+lifetime of which it doesn't control as logically, a GPIO obtained by a caller
+should also be freed by it.
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index d232ea865356..6f941db02c04 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -9,10 +9,13 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -27,226 +30,200 @@
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
+#include "dev-sync-probe.h"
+
#define AGGREGATOR_MAX_GPIOS 512
+#define AGGREGATOR_LEGACY_PREFIX "_sysfs"
/*
* GPIO Aggregator sysfs interface
*/
struct gpio_aggregator {
+ struct dev_sync_probe_data probe_data;
+ struct config_group group;
struct gpiod_lookup_table *lookups;
- struct platform_device *pdev;
+ struct mutex lock;
+ int id;
+
+ /* List of gpio_aggregator_line. Always added in order */
+ struct list_head list_head;
+
+ /* used by legacy sysfs interface only */
+ bool init_via_sysfs;
char args[];
};
+struct gpio_aggregator_line {
+ struct config_group group;
+ struct gpio_aggregator *parent;
+ struct list_head entry;
+
+ /* Line index within the aggregator device */
+ unsigned int idx;
+
+ /* Custom name for the virtual line */
+ const char *name;
+ /* GPIO chip label or line name */
+ const char *key;
+ /* Can be negative to indicate lookup by line name */
+ int offset;
+
+ enum gpio_lookup_flags flags;
+};
+
+struct gpio_aggregator_pdev_meta {
+ bool init_via_sysfs;
+};
+
static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */
static DEFINE_IDR(gpio_aggregator_idr);
-static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
- int hwnum, unsigned int *n)
+static int gpio_aggregator_alloc(struct gpio_aggregator **aggr, size_t arg_size)
{
- struct gpiod_lookup_table *lookups;
+ int ret;
- lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
- GFP_KERNEL);
- if (!lookups)
+ struct gpio_aggregator *new __free(kfree) = kzalloc(
+ sizeof(*new) + arg_size, GFP_KERNEL);
+ if (!new)
return -ENOMEM;
- lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
+ scoped_guard(mutex, &gpio_aggregator_lock)
+ ret = idr_alloc(&gpio_aggregator_idr, new, 0, 0, GFP_KERNEL);
- (*n)++;
- memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
+ if (ret < 0)
+ return ret;
- aggr->lookups = lookups;
+ new->id = ret;
+ INIT_LIST_HEAD(&new->list_head);
+ mutex_init(&new->lock);
+ *aggr = no_free_ptr(new);
return 0;
}
-static int aggr_parse(struct gpio_aggregator *aggr)
+static void gpio_aggregator_free(struct gpio_aggregator *aggr)
{
- char *args = skip_spaces(aggr->args);
- char *name, *offsets, *p;
- unsigned int i, n = 0;
- int error = 0;
-
- unsigned long *bitmap __free(bitmap) =
- bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
- if (!bitmap)
- return -ENOMEM;
-
- args = next_arg(args, &name, &p);
- while (*args) {
- args = next_arg(args, &offsets, &p);
-
- p = get_options(offsets, 0, &error);
- if (error == 0 || *p) {
- /* Named GPIO line */
- error = aggr_add_gpio(aggr, name, U16_MAX, &n);
- if (error)
- return error;
+ scoped_guard(mutex, &gpio_aggregator_lock)
+ idr_remove(&gpio_aggregator_idr, aggr->id);
- name = offsets;
- continue;
- }
+ mutex_destroy(&aggr->lock);
+ kfree(aggr);
+}
- /* GPIO chip + offset(s) */
- error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS);
- if (error) {
- pr_err("Cannot parse %s: %d\n", offsets, error);
- return error;
- }
+static int gpio_aggregator_add_gpio(struct gpio_aggregator *aggr,
+ const char *key, int hwnum, unsigned int *n)
+{
+ struct gpiod_lookup_table *lookups;
- for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) {
- error = aggr_add_gpio(aggr, name, i, &n);
- if (error)
- return error;
- }
+ lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
+ GFP_KERNEL);
+ if (!lookups)
+ return -ENOMEM;
- args = next_arg(args, &name, &p);
- }
+ lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
- if (!n) {
- pr_err("No GPIOs specified\n");
- return -EINVAL;
- }
+ (*n)++;
+ memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
+ aggr->lookups = lookups;
return 0;
}
-static ssize_t new_device_store(struct device_driver *driver, const char *buf,
- size_t count)
+static bool gpio_aggregator_is_active(struct gpio_aggregator *aggr)
{
- struct gpio_aggregator *aggr;
- struct platform_device *pdev;
- int res, id;
+ lockdep_assert_held(&aggr->lock);
- if (!try_module_get(THIS_MODULE))
- return -ENOENT;
-
- /* kernfs guarantees string termination, so count + 1 is safe */
- aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
- if (!aggr) {
- res = -ENOMEM;
- goto put_module;
- }
-
- memcpy(aggr->args, buf, count + 1);
+ return aggr->probe_data.pdev && platform_get_drvdata(aggr->probe_data.pdev);
+}
- aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
- GFP_KERNEL);
- if (!aggr->lookups) {
- res = -ENOMEM;
- goto free_ga;
- }
+/* Only aggregators created via legacy sysfs can be "activating". */
+static bool gpio_aggregator_is_activating(struct gpio_aggregator *aggr)
+{
+ lockdep_assert_held(&aggr->lock);
- mutex_lock(&gpio_aggregator_lock);
- id = idr_alloc(&gpio_aggregator_idr, aggr, 0, 0, GFP_KERNEL);
- mutex_unlock(&gpio_aggregator_lock);
+ return aggr->probe_data.pdev && !platform_get_drvdata(aggr->probe_data.pdev);
+}
- if (id < 0) {
- res = id;
- goto free_table;
- }
+static size_t gpio_aggregator_count_lines(struct gpio_aggregator *aggr)
+{
+ lockdep_assert_held(&aggr->lock);
- aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, id);
- if (!aggr->lookups->dev_id) {
- res = -ENOMEM;
- goto remove_idr;
- }
+ return list_count_nodes(&aggr->list_head);
+}
- res = aggr_parse(aggr);
- if (res)
- goto free_dev_id;
+static struct gpio_aggregator_line *
+gpio_aggregator_line_alloc(struct gpio_aggregator *parent, unsigned int idx,
+ char *key, int offset)
+{
+ struct gpio_aggregator_line *line;
- gpiod_add_lookup_table(aggr->lookups);
+ line = kzalloc(sizeof(*line), GFP_KERNEL);
+ if (!line)
+ return ERR_PTR(-ENOMEM);
- pdev = platform_device_register_simple(DRV_NAME, id, NULL, 0);
- if (IS_ERR(pdev)) {
- res = PTR_ERR(pdev);
- goto remove_table;
+ if (key) {
+ line->key = kstrdup(key, GFP_KERNEL);
+ if (!line->key) {
+ kfree(line);
+ return ERR_PTR(-ENOMEM);
+ }
}
- aggr->pdev = pdev;
- module_put(THIS_MODULE);
- return count;
+ line->flags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ line->parent = parent;
+ line->idx = idx;
+ line->offset = offset;
+ INIT_LIST_HEAD(&line->entry);
-remove_table:
- gpiod_remove_lookup_table(aggr->lookups);
-free_dev_id:
- kfree(aggr->lookups->dev_id);
-remove_idr:
- mutex_lock(&gpio_aggregator_lock);
- idr_remove(&gpio_aggregator_idr, id);
- mutex_unlock(&gpio_aggregator_lock);
-free_table:
- kfree(aggr->lookups);
-free_ga:
- kfree(aggr);
-put_module:
- module_put(THIS_MODULE);
- return res;
-}
-
-static DRIVER_ATTR_WO(new_device);
-
-static void gpio_aggregator_free(struct gpio_aggregator *aggr)
-{
- platform_device_unregister(aggr->pdev);
- gpiod_remove_lookup_table(aggr->lookups);
- kfree(aggr->lookups->dev_id);
- kfree(aggr->lookups);
- kfree(aggr);
+ return line;
}
-static ssize_t delete_device_store(struct device_driver *driver,
- const char *buf, size_t count)
+static void gpio_aggregator_line_add(struct gpio_aggregator *aggr,
+ struct gpio_aggregator_line *line)
{
- struct gpio_aggregator *aggr;
- unsigned int id;
- int error;
+ struct gpio_aggregator_line *tmp;
- if (!str_has_prefix(buf, DRV_NAME "."))
- return -EINVAL;
+ lockdep_assert_held(&aggr->lock);
- error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
- if (error)
- return error;
-
- if (!try_module_get(THIS_MODULE))
- return -ENOENT;
-
- mutex_lock(&gpio_aggregator_lock);
- aggr = idr_remove(&gpio_aggregator_idr, id);
- mutex_unlock(&gpio_aggregator_lock);
- if (!aggr) {
- module_put(THIS_MODULE);
- return -ENOENT;
+ list_for_each_entry(tmp, &aggr->list_head, entry) {
+ if (tmp->idx > line->idx) {
+ list_add_tail(&line->entry, &tmp->entry);
+ return;
+ }
}
-
- gpio_aggregator_free(aggr);
- module_put(THIS_MODULE);
- return count;
+ list_add_tail(&line->entry, &aggr->list_head);
}
-static DRIVER_ATTR_WO(delete_device);
-
-static struct attribute *gpio_aggregator_attrs[] = {
- &driver_attr_new_device.attr,
- &driver_attr_delete_device.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(gpio_aggregator);
-static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
+static void gpio_aggregator_line_del(struct gpio_aggregator *aggr,
+ struct gpio_aggregator_line *line)
{
- gpio_aggregator_free(p);
- return 0;
+ lockdep_assert_held(&aggr->lock);
+
+ list_del(&line->entry);
}
-static void __exit gpio_aggregator_remove_all(void)
+static void gpio_aggregator_free_lines(struct gpio_aggregator *aggr)
{
- mutex_lock(&gpio_aggregator_lock);
- idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
- idr_destroy(&gpio_aggregator_idr);
- mutex_unlock(&gpio_aggregator_lock);
+ struct gpio_aggregator_line *line, *tmp;
+
+ list_for_each_entry_safe(line, tmp, &aggr->list_head, entry) {
+ configfs_unregister_group(&line->group);
+ /*
+ * Normally, we acquire aggr->lock within the configfs
+ * callback. However, in the legacy sysfs interface case,
+ * calling configfs_(un)register_group while holding
+ * aggr->lock could cause a deadlock. Fortunately, this is
+ * unnecessary because the new_device/delete_device path
+ * and the module unload path are mutually exclusive,
+ * thanks to an explicit try_module_get. That's why this
+ * minimal scoped_guard suffices.
+ */
+ scoped_guard(mutex, &aggr->lock)
+ gpio_aggregator_line_del(aggr, line);
+ kfree(line->key);
+ kfree(line->name);
+ kfree(line);
+ }
}
@@ -582,6 +559,728 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
return fwd;
}
+/*
+ * Configfs interface
+ */
+
+static struct gpio_aggregator *
+to_gpio_aggregator(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_aggregator, group);
+}
+
+static struct gpio_aggregator_line *
+to_gpio_aggregator_line(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_aggregator_line, group);
+}
+
+static struct fwnode_handle *
+gpio_aggregator_make_device_sw_node(struct gpio_aggregator *aggr)
+{
+ struct property_entry properties[2];
+ struct gpio_aggregator_line *line;
+ size_t num_lines;
+ int n = 0;
+
+ memset(properties, 0, sizeof(properties));
+
+ num_lines = gpio_aggregator_count_lines(aggr);
+ if (num_lines == 0)
+ return NULL;
+
+ const char **line_names __free(kfree) = kcalloc(
+ num_lines, sizeof(*line_names), GFP_KERNEL);
+ if (!line_names)
+ return ERR_PTR(-ENOMEM);
+
+ /* The list is always sorted as new elements are inserted in order. */
+ list_for_each_entry(line, &aggr->list_head, entry)
+ line_names[n++] = line->name ?: "";
+
+ properties[0] = PROPERTY_ENTRY_STRING_ARRAY_LEN(
+ "gpio-line-names",
+ line_names, num_lines);
+
+ return fwnode_create_software_node(properties, NULL);
+}
+
+static int gpio_aggregator_activate(struct gpio_aggregator *aggr)
+{
+ struct platform_device_info pdevinfo;
+ struct gpio_aggregator_line *line;
+ struct fwnode_handle *swnode;
+ unsigned int n = 0;
+ int ret = 0;
+
+ if (gpio_aggregator_count_lines(aggr) == 0)
+ return -EINVAL;
+
+ aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
+ GFP_KERNEL);
+ if (!aggr->lookups)
+ return -ENOMEM;
+
+ swnode = gpio_aggregator_make_device_sw_node(aggr);
+ if (IS_ERR(swnode)) {
+ ret = PTR_ERR(swnode);
+ goto err_remove_lookups;
+ }
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo.name = DRV_NAME;
+ pdevinfo.id = aggr->id;
+ pdevinfo.fwnode = swnode;
+
+ /* The list is always sorted as new elements are inserted in order. */
+ list_for_each_entry(line, &aggr->list_head, entry) {
+ /*
+ * - Either GPIO chip label or line name must be configured
+ * (i.e. line->key must be non-NULL)
+ * - Line directories must be named with sequential numeric
+ * suffixes starting from 0. (i.e. ./line0, ./line1, ...)
+ */
+ if (!line->key || line->idx != n) {
+ ret = -EINVAL;
+ goto err_remove_swnode;
+ }
+
+ if (line->offset < 0)
+ ret = gpio_aggregator_add_gpio(aggr, line->key,
+ U16_MAX, &n);
+ else
+ ret = gpio_aggregator_add_gpio(aggr, line->key,
+ line->offset, &n);
+ if (ret)
+ goto err_remove_swnode;
+ }
+
+ aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
+ if (!aggr->lookups->dev_id) {
+ ret = -ENOMEM;
+ goto err_remove_swnode;
+ }
+
+ gpiod_add_lookup_table(aggr->lookups);
+
+ ret = dev_sync_probe_register(&aggr->probe_data, &pdevinfo);
+ if (ret)
+ goto err_remove_lookup_table;
+
+ return 0;
+
+err_remove_lookup_table:
+ kfree(aggr->lookups->dev_id);
+ gpiod_remove_lookup_table(aggr->lookups);
+err_remove_swnode:
+ fwnode_remove_software_node(swnode);
+err_remove_lookups:
+ kfree(aggr->lookups);
+
+ return ret;
+}
+
+static void gpio_aggregator_deactivate(struct gpio_aggregator *aggr)
+{
+ dev_sync_probe_unregister(&aggr->probe_data);
+ gpiod_remove_lookup_table(aggr->lookups);
+ kfree(aggr->lookups->dev_id);
+ kfree(aggr->lookups);
+}
+
+static void gpio_aggregator_lockup_configfs(struct gpio_aggregator *aggr,
+ bool lock)
+{
+ struct configfs_subsystem *subsys = aggr->group.cg_subsys;
+ struct gpio_aggregator_line *line;
+
+ /*
+ * The device only needs to depend on leaf lines. This is
+ * sufficient to lock up all the configfs entries that the
+ * instantiated, alive device depends on.
+ */
+ list_for_each_entry(line, &aggr->list_head, entry) {
+ if (lock)
+ configfs_depend_item_unlocked(
+ subsys, &line->group.cg_item);
+ else
+ configfs_undepend_item_unlocked(
+ &line->group.cg_item);
+ }
+}
+
+static ssize_t
+gpio_aggregator_line_key_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%s\n", line->key ?: "");
+}
+
+static ssize_t
+gpio_aggregator_line_key_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ char *key __free(kfree) = kstrndup(skip_spaces(page), count,
+ GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
+ strim(key);
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ return -EBUSY;
+
+ kfree(line->key);
+ line->key = no_free_ptr(key);
+
+ return count;
+}
+CONFIGFS_ATTR(gpio_aggregator_line_, key);
+
+static ssize_t
+gpio_aggregator_line_name_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%s\n", line->name ?: "");
+}
+
+static ssize_t
+gpio_aggregator_line_name_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ char *name __free(kfree) = kstrndup(skip_spaces(page), count,
+ GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ strim(name);
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ return -EBUSY;
+
+ kfree(line->name);
+ line->name = no_free_ptr(name);
+
+ return count;
+}
+CONFIGFS_ATTR(gpio_aggregator_line_, name);
+
+static ssize_t
+gpio_aggregator_line_offset_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%d\n", line->offset);
+}
+
+static ssize_t
+gpio_aggregator_line_offset_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+ int offset, ret;
+
+ ret = kstrtoint(page, 0, &offset);
+ if (ret)
+ return ret;
+
+ /*
+ * When offset == -1, 'key' represents a line name to lookup.
+ * When 0 <= offset < 65535, 'key' represents the label of the chip with
+ * the 'offset' value representing the line within that chip.
+ *
+ * GPIOLIB uses the U16_MAX value to indicate lookup by line name so
+ * the greatest offset we can accept is (U16_MAX - 1).
+ */
+ if (offset > (U16_MAX - 1) || offset < -1)
+ return -EINVAL;
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ return -EBUSY;
+
+ line->offset = offset;
+
+ return count;
+}
+CONFIGFS_ATTR(gpio_aggregator_line_, offset);
+
+static struct configfs_attribute *gpio_aggregator_line_attrs[] = {
+ &gpio_aggregator_line_attr_key,
+ &gpio_aggregator_line_attr_name,
+ &gpio_aggregator_line_attr_offset,
+ NULL
+};
+
+static ssize_t
+gpio_aggregator_device_dev_name_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+ struct platform_device *pdev;
+
+ guard(mutex)(&aggr->lock);
+
+ pdev = aggr->probe_data.pdev;
+ if (pdev)
+ return sysfs_emit(page, "%s\n", dev_name(&pdev->dev));
+
+ return sysfs_emit(page, "%s.%d\n", DRV_NAME, aggr->id);
+}
+CONFIGFS_ATTR_RO(gpio_aggregator_device_, dev_name);
+
+static ssize_t
+gpio_aggregator_device_live_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%c\n",
+ gpio_aggregator_is_active(aggr) ? '1' : '0');
+}
+
+static ssize_t
+gpio_aggregator_device_live_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+ int ret = 0;
+ bool live;
+
+ ret = kstrtobool(page, &live);
+ if (ret)
+ return ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ if (live && !aggr->init_via_sysfs)
+ gpio_aggregator_lockup_configfs(aggr, true);
+
+ scoped_guard(mutex, &aggr->lock) {
+ if (gpio_aggregator_is_activating(aggr) ||
+ (live == gpio_aggregator_is_active(aggr)))
+ ret = -EPERM;
+ else if (live)
+ ret = gpio_aggregator_activate(aggr);
+ else
+ gpio_aggregator_deactivate(aggr);
+ }
+
+ /*
+ * Undepend is required only if device disablement (live == 0)
+ * succeeds or if device enablement (live == 1) fails.
+ */
+ if (live == !!ret && !aggr->init_via_sysfs)
+ gpio_aggregator_lockup_configfs(aggr, false);
+
+ module_put(THIS_MODULE);
+
+ return ret ?: count;
+}
+CONFIGFS_ATTR(gpio_aggregator_device_, live);
+
+static struct configfs_attribute *gpio_aggregator_device_attrs[] = {
+ &gpio_aggregator_device_attr_dev_name,
+ &gpio_aggregator_device_attr_live,
+ NULL
+};
+
+static void
+gpio_aggregator_line_release(struct config_item *item)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ gpio_aggregator_line_del(aggr, line);
+ kfree(line->key);
+ kfree(line->name);
+ kfree(line);
+}
+
+static struct configfs_item_operations gpio_aggregator_line_item_ops = {
+ .release = gpio_aggregator_line_release,
+};
+
+static const struct config_item_type gpio_aggregator_line_type = {
+ .ct_item_ops = &gpio_aggregator_line_item_ops,
+ .ct_attrs = gpio_aggregator_line_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void gpio_aggregator_device_release(struct config_item *item)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+
+ /*
+ * At this point, aggr is neither active nor activating,
+ * so calling gpio_aggregator_deactivate() is always unnecessary.
+ */
+ gpio_aggregator_free(aggr);
+}
+
+static struct configfs_item_operations gpio_aggregator_device_item_ops = {
+ .release = gpio_aggregator_device_release,
+};
+
+static struct config_group *
+gpio_aggregator_device_make_group(struct config_group *group, const char *name)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(&group->cg_item);
+ struct gpio_aggregator_line *line;
+ unsigned int idx;
+ int ret, nchar;
+
+ ret = sscanf(name, "line%u%n", &idx, &nchar);
+ if (ret != 1 || nchar != strlen(name))
+ return ERR_PTR(-EINVAL);
+
+ if (aggr->init_via_sysfs)
+ /*
+ * Aggregators created via legacy sysfs interface are exposed as
+ * default groups, which means rmdir(2) is prohibited for them.
+ * For simplicity, and to avoid confusion, we also prohibit
+ * mkdir(2).
+ */
+ return ERR_PTR(-EPERM);
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_active(aggr))
+ return ERR_PTR(-EBUSY);
+
+ list_for_each_entry(line, &aggr->list_head, entry)
+ if (line->idx == idx)
+ return ERR_PTR(-EINVAL);
+
+ line = gpio_aggregator_line_alloc(aggr, idx, NULL, -1);
+ if (IS_ERR(line))
+ return ERR_CAST(line);
+
+ config_group_init_type_name(&line->group, name, &gpio_aggregator_line_type);
+
+ gpio_aggregator_line_add(aggr, line);
+
+ return &line->group;
+}
+
+static struct configfs_group_operations gpio_aggregator_device_group_ops = {
+ .make_group = gpio_aggregator_device_make_group,
+};
+
+static const struct config_item_type gpio_aggregator_device_type = {
+ .ct_group_ops = &gpio_aggregator_device_group_ops,
+ .ct_item_ops = &gpio_aggregator_device_item_ops,
+ .ct_attrs = gpio_aggregator_device_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *
+gpio_aggregator_make_group(struct config_group *group, const char *name)
+{
+ struct gpio_aggregator *aggr;
+ int ret;
+
+ /*
+ * "_sysfs" prefix is reserved for auto-generated config group
+ * for devices create via legacy sysfs interface.
+ */
+ if (strncmp(name, AGGREGATOR_LEGACY_PREFIX,
+ sizeof(AGGREGATOR_LEGACY_PREFIX) - 1) == 0)
+ return ERR_PTR(-EINVAL);
+
+ /* arg space is unneeded */
+ ret = gpio_aggregator_alloc(&aggr, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
+ dev_sync_probe_init(&aggr->probe_data);
+
+ return &aggr->group;
+}
+
+static struct configfs_group_operations gpio_aggregator_group_ops = {
+ .make_group = gpio_aggregator_make_group,
+};
+
+static const struct config_item_type gpio_aggregator_type = {
+ .ct_group_ops = &gpio_aggregator_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem gpio_aggregator_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = DRV_NAME,
+ .ci_type = &gpio_aggregator_type,
+ },
+ },
+};
+
+/*
+ * Sysfs interface
+ */
+static int gpio_aggregator_parse(struct gpio_aggregator *aggr)
+{
+ char *args = skip_spaces(aggr->args);
+ struct gpio_aggregator_line *line;
+ char name[CONFIGFS_ITEM_NAME_LEN];
+ char *key, *offsets, *p;
+ unsigned int i, n = 0;
+ int error = 0;
+
+ unsigned long *bitmap __free(bitmap) =
+ bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
+ if (!bitmap)
+ return -ENOMEM;
+
+ args = next_arg(args, &key, &p);
+ while (*args) {
+ args = next_arg(args, &offsets, &p);
+
+ p = get_options(offsets, 0, &error);
+ if (error == 0 || *p) {
+ /* Named GPIO line */
+ scnprintf(name, sizeof(name), "line%u", n);
+ line = gpio_aggregator_line_alloc(aggr, n, key, -1);
+ if (IS_ERR(line)) {
+ error = PTR_ERR(line);
+ goto err;
+ }
+ config_group_init_type_name(&line->group, name,
+ &gpio_aggregator_line_type);
+ error = configfs_register_group(&aggr->group,
+ &line->group);
+ if (error)
+ goto err;
+ scoped_guard(mutex, &aggr->lock)
+ gpio_aggregator_line_add(aggr, line);
+
+ error = gpio_aggregator_add_gpio(aggr, key, U16_MAX, &n);
+ if (error)
+ goto err;
+
+ key = offsets;
+ continue;
+ }
+
+ /* GPIO chip + offset(s) */
+ error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS);
+ if (error) {
+ pr_err("Cannot parse %s: %d\n", offsets, error);
+ goto err;
+ }
+
+ for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) {
+ scnprintf(name, sizeof(name), "line%u", n);
+ line = gpio_aggregator_line_alloc(aggr, n, key, i);
+ if (IS_ERR(line)) {
+ error = PTR_ERR(line);
+ goto err;
+ }
+ config_group_init_type_name(&line->group, name,
+ &gpio_aggregator_line_type);
+ error = configfs_register_group(&aggr->group,
+ &line->group);
+ if (error)
+ goto err;
+ scoped_guard(mutex, &aggr->lock)
+ gpio_aggregator_line_add(aggr, line);
+
+ error = gpio_aggregator_add_gpio(aggr, key, i, &n);
+ if (error)
+ goto err;
+ }
+
+ args = next_arg(args, &key, &p);
+ }
+
+ if (!n) {
+ pr_err("No GPIOs specified\n");
+ error = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ gpio_aggregator_free_lines(aggr);
+ return error;
+}
+
+static ssize_t gpio_aggregator_new_device_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ struct gpio_aggregator_pdev_meta meta = { .init_via_sysfs = true };
+ char name[CONFIGFS_ITEM_NAME_LEN];
+ struct gpio_aggregator *aggr;
+ struct platform_device *pdev;
+ int res;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ /* kernfs guarantees string termination, so count + 1 is safe */
+ res = gpio_aggregator_alloc(&aggr, count + 1);
+ if (res)
+ goto put_module;
+
+ memcpy(aggr->args, buf, count + 1);
+
+ aggr->init_via_sysfs = true;
+ aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
+ GFP_KERNEL);
+ if (!aggr->lookups) {
+ res = -ENOMEM;
+ goto free_ga;
+ }
+
+ aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
+ if (!aggr->lookups->dev_id) {
+ res = -ENOMEM;
+ goto free_table;
+ }
+
+ scnprintf(name, sizeof(name), "%s.%d", AGGREGATOR_LEGACY_PREFIX, aggr->id);
+ config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
+
+ /*
+ * Since the device created by sysfs might be toggled via configfs
+ * 'live' attribute later, this initialization is needed.
+ */
+ dev_sync_probe_init(&aggr->probe_data);
+
+ /* Expose to configfs */
+ res = configfs_register_group(&gpio_aggregator_subsys.su_group,
+ &aggr->group);
+ if (res)
+ goto free_dev_id;
+
+ res = gpio_aggregator_parse(aggr);
+ if (res)
+ goto unregister_group;
+
+ gpiod_add_lookup_table(aggr->lookups);
+
+ pdev = platform_device_register_data(NULL, DRV_NAME, aggr->id, &meta, sizeof(meta));
+ if (IS_ERR(pdev)) {
+ res = PTR_ERR(pdev);
+ goto remove_table;
+ }
+
+ aggr->probe_data.pdev = pdev;
+ module_put(THIS_MODULE);
+ return count;
+
+remove_table:
+ gpiod_remove_lookup_table(aggr->lookups);
+unregister_group:
+ configfs_unregister_group(&aggr->group);
+free_dev_id:
+ kfree(aggr->lookups->dev_id);
+free_table:
+ kfree(aggr->lookups);
+free_ga:
+ gpio_aggregator_free(aggr);
+put_module:
+ module_put(THIS_MODULE);
+ return res;
+}
+
+static struct driver_attribute driver_attr_gpio_aggregator_new_device =
+ __ATTR(new_device, 0200, NULL, gpio_aggregator_new_device_store);
+
+static void gpio_aggregator_destroy(struct gpio_aggregator *aggr)
+{
+ scoped_guard(mutex, &aggr->lock) {
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ gpio_aggregator_deactivate(aggr);
+ }
+ gpio_aggregator_free_lines(aggr);
+ configfs_unregister_group(&aggr->group);
+ kfree(aggr);
+}
+
+static ssize_t gpio_aggregator_delete_device_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ struct gpio_aggregator *aggr;
+ unsigned int id;
+ int error;
+
+ if (!str_has_prefix(buf, DRV_NAME "."))
+ return -EINVAL;
+
+ error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
+ if (error)
+ return error;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ mutex_lock(&gpio_aggregator_lock);
+ aggr = idr_find(&gpio_aggregator_idr, id);
+ /*
+ * For simplicity, devices created via configfs cannot be deleted
+ * via sysfs.
+ */
+ if (aggr && aggr->init_via_sysfs)
+ idr_remove(&gpio_aggregator_idr, id);
+ else {
+ mutex_unlock(&gpio_aggregator_lock);
+ module_put(THIS_MODULE);
+ return -ENOENT;
+ }
+ mutex_unlock(&gpio_aggregator_lock);
+
+ gpio_aggregator_destroy(aggr);
+ module_put(THIS_MODULE);
+ return count;
+}
+
+static struct driver_attribute driver_attr_gpio_aggregator_delete_device =
+ __ATTR(delete_device, 0200, NULL, gpio_aggregator_delete_device_store);
+
+static struct attribute *gpio_aggregator_attrs[] = {
+ &driver_attr_gpio_aggregator_new_device.attr,
+ &driver_attr_gpio_aggregator_delete_device.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(gpio_aggregator);
/*
* GPIO Aggregator platform device
@@ -589,7 +1288,9 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
static int gpio_aggregator_probe(struct platform_device *pdev)
{
+ struct gpio_aggregator_pdev_meta *meta;
struct device *dev = &pdev->dev;
+ bool init_via_sysfs = false;
struct gpio_desc **descs;
struct gpiochip_fwd *fwd;
unsigned long features;
@@ -603,10 +1304,28 @@ static int gpio_aggregator_probe(struct platform_device *pdev)
if (!descs)
return -ENOMEM;
+ meta = dev_get_platdata(&pdev->dev);
+ if (meta && meta->init_via_sysfs)
+ init_via_sysfs = true;
+
for (i = 0; i < n; i++) {
descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
- if (IS_ERR(descs[i]))
+ if (IS_ERR(descs[i])) {
+ /*
+ * Deferred probing is not suitable when the aggregator
+ * is created via configfs. They should just retry later
+ * whenever they like. For device creation via sysfs,
+ * error is propagated without overriding for backward
+ * compatibility. .prevent_deferred_probe is kept unset
+ * for other cases.
+ */
+ if (!init_via_sysfs && !dev_of_node(dev) &&
+ descs[i] == ERR_PTR(-EPROBE_DEFER)) {
+ pr_warn("Deferred probe canceled for creation via configfs.\n");
+ return -ENODEV;
+ }
return PTR_ERR(descs[i]);
+ }
}
features = (uintptr_t)device_get_match_data(dev);
@@ -640,9 +1359,63 @@ static struct platform_driver gpio_aggregator_driver = {
},
};
+static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
+{
+ /*
+ * There should be no aggregator created via configfs, as their
+ * presence would prevent module unloading.
+ */
+ gpio_aggregator_destroy(p);
+ return 0;
+}
+
+static void __exit gpio_aggregator_remove_all(void)
+{
+ /*
+ * Configfs callbacks acquire gpio_aggregator_lock when accessing
+ * gpio_aggregator_idr, so to prevent lock inversion deadlock, we
+ * cannot protect idr_for_each invocation here with
+ * gpio_aggregator_lock, as gpio_aggregator_idr_remove() accesses
+ * configfs groups. Fortunately, the new_device/delete_device path
+ * and the module unload path are mutually exclusive, thanks to an
+ * explicit try_module_get inside of those driver attr handlers.
+ * Also, when we reach here, no configfs entries present or being
+ * created. Therefore, no need to protect with gpio_aggregator_lock
+ * below.
+ */
+ idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
+ idr_destroy(&gpio_aggregator_idr);
+}
+
static int __init gpio_aggregator_init(void)
{
- return platform_driver_register(&gpio_aggregator_driver);
+ int ret = 0;
+
+ config_group_init(&gpio_aggregator_subsys.su_group);
+ mutex_init(&gpio_aggregator_subsys.su_mutex);
+ ret = configfs_register_subsystem(&gpio_aggregator_subsys);
+ if (ret) {
+ pr_err("Failed to register the '%s' configfs subsystem: %d\n",
+ gpio_aggregator_subsys.su_group.cg_item.ci_namebuf, ret);
+ mutex_destroy(&gpio_aggregator_subsys.su_mutex);
+ return ret;
+ }
+
+ /*
+ * CAVEAT: This must occur after configfs registration. Otherwise,
+ * a race condition could arise: driver attribute groups might be
+ * exposed and accessed by users before configfs registration
+ * completes. new_device_store() does not expect a partially
+ * initialized configfs state.
+ */
+ ret = platform_driver_register(&gpio_aggregator_driver);
+ if (ret) {
+ pr_err("Failed to register the platform driver: %d\n", ret);
+ mutex_destroy(&gpio_aggregator_subsys.su_mutex);
+ configfs_unregister_subsystem(&gpio_aggregator_subsys);
+ }
+
+ return ret;
}
module_init(gpio_aggregator_init);
@@ -650,6 +1423,7 @@ static void __exit gpio_aggregator_exit(void)
{
gpio_aggregator_remove_all();
platform_driver_unregister(&gpio_aggregator_driver);
+ configfs_unregister_subsystem(&gpio_aggregator_subsys);
}
module_exit(gpio_aggregator_exit);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 17c287dc7471..8f22cb36004d 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -516,6 +516,7 @@ static struct irq_chip bcm_gpio_irq_chip = {
.irq_set_type = bcm_kona_gpio_irq_set_type,
.irq_request_resources = bcm_kona_gpio_irq_reqres,
.irq_release_resources = bcm_kona_gpio_irq_relres,
+ .flags = IRQCHIP_IMMUTABLE,
};
static struct of_device_id const bcm_kona_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-blzp1600.c b/drivers/gpio/gpio-blzp1600.c
new file mode 100644
index 000000000000..055cb296ae54
--- /dev/null
+++ b/drivers/gpio/gpio-blzp1600.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 VeriSilicon Limited.
+ * Copyright (C) 2025 Blaize, Inc.
+ */
+
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define GPIO_DIR_REG 0x00
+#define GPIO_CTRL_REG 0x04
+#define GPIO_SET_REG 0x08
+#define GPIO_CLR_REG 0x0C
+#define GPIO_ODATA_REG 0x10
+#define GPIO_IDATA_REG 0x14
+#define GPIO_IEN_REG 0x18
+#define GPIO_IS_REG 0x1C
+#define GPIO_IBE_REG 0x20
+#define GPIO_IEV_REG 0x24
+#define GPIO_RIS_REG 0x28
+#define GPIO_IM_REG 0x2C
+#define GPIO_MIS_REG 0x30
+#define GPIO_IC_REG 0x34
+#define GPIO_DB_REG 0x38
+#define GPIO_DFG_REG 0x3C
+
+#define DRIVER_NAME "blzp1600-gpio"
+
+struct blzp1600_gpio {
+ void __iomem *base;
+ struct gpio_chip gc;
+ int irq;
+};
+
+static inline struct blzp1600_gpio *get_blzp1600_gpio_from_irq_data(struct irq_data *d)
+{
+ return gpiochip_get_data(irq_data_get_irq_chip_data(d));
+}
+
+static inline struct blzp1600_gpio *get_blzp1600_gpio_from_irq_desc(struct irq_desc *d)
+{
+ return gpiochip_get_data(irq_desc_get_handler_data(d));
+}
+
+static inline u32 blzp1600_gpio_read(struct blzp1600_gpio *chip, unsigned int offset)
+{
+ return readl_relaxed(chip->base + offset);
+}
+
+static inline void blzp1600_gpio_write(struct blzp1600_gpio *chip, unsigned int offset, u32 val)
+{
+ writel_relaxed(val, chip->base + offset);
+}
+
+static inline void blzp1600_gpio_rmw(void __iomem *reg, u32 mask, bool set)
+{
+ u32 val = readl_relaxed(reg);
+
+ if (set)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ writel_relaxed(val, reg);
+}
+
+static void blzp1600_gpio_irq_mask(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 1);
+}
+
+static void blzp1600_gpio_irq_unmask(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 0);
+}
+
+static void blzp1600_gpio_irq_ack(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ blzp1600_gpio_write(chip, GPIO_IC_REG, BIT(d->hwirq));
+}
+
+static void blzp1600_gpio_irq_enable(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ gpiochip_enable_irq(&chip->gc, irqd_to_hwirq(d));
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_DIR_REG, BIT(d->hwirq), 0);
+ blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 1);
+}
+
+static void blzp1600_gpio_irq_disable(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 0);
+ gpiochip_disable_irq(&chip->gc, irqd_to_hwirq(d));
+}
+
+static int blzp1600_gpio_irq_set_type(struct irq_data *d, u32 type)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+ u32 edge_level, single_both, fall_rise;
+ int mask = BIT(d->hwirq);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ edge_level = blzp1600_gpio_read(chip, GPIO_IS_REG);
+ single_both = blzp1600_gpio_read(chip, GPIO_IBE_REG);
+ fall_rise = blzp1600_gpio_read(chip, GPIO_IEV_REG);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ edge_level &= ~mask;
+ single_both |= mask;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ edge_level &= ~mask;
+ single_both &= ~mask;
+ fall_rise |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge_level &= ~mask;
+ single_both &= ~mask;
+ fall_rise &= ~mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ edge_level |= mask;
+ fall_rise |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ edge_level |= mask;
+ fall_rise &= ~mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ blzp1600_gpio_write(chip, GPIO_IS_REG, edge_level);
+ blzp1600_gpio_write(chip, GPIO_IBE_REG, single_both);
+ blzp1600_gpio_write(chip, GPIO_IEV_REG, fall_rise);
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+ else
+ irq_set_handler_locked(d, handle_edge_irq);
+
+ return 0;
+}
+
+static const struct irq_chip blzp1600_gpio_irqchip = {
+ .name = DRIVER_NAME,
+ .irq_ack = blzp1600_gpio_irq_ack,
+ .irq_mask = blzp1600_gpio_irq_mask,
+ .irq_unmask = blzp1600_gpio_irq_unmask,
+ .irq_set_type = blzp1600_gpio_irq_set_type,
+ .irq_enable = blzp1600_gpio_irq_enable,
+ .irq_disable = blzp1600_gpio_irq_disable,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void blzp1600_gpio_irqhandler(struct irq_desc *desc)
+{
+ struct blzp1600_gpio *gpio = get_blzp1600_gpio_from_irq_desc(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned long irq_status;
+ int hwirq = 0;
+
+ chained_irq_enter(irqchip, desc);
+ irq_status = blzp1600_gpio_read(gpio, GPIO_RIS_REG);
+ for_each_set_bit(hwirq, &irq_status, gpio->gc.ngpio)
+ generic_handle_domain_irq(gpio->gc.irq.domain, hwirq);
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int blzp1600_gpio_set_debounce(struct gpio_chip *gc, unsigned int offset,
+ unsigned int debounce)
+{
+ struct blzp1600_gpio *chip = gpiochip_get_data(gc);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_DB_REG, BIT(offset), debounce);
+
+ return 0;
+}
+
+static int blzp1600_gpio_set_config(struct gpio_chip *gc, unsigned int offset, unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return blzp1600_gpio_set_debounce(gc, offset, debounce);
+}
+
+static int blzp1600_gpio_probe(struct platform_device *pdev)
+{
+ struct blzp1600_gpio *chip;
+ struct gpio_chip *gc;
+ int ret;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(chip->base))
+ return PTR_ERR(chip->base);
+
+ ret = bgpio_init(&chip->gc, &pdev->dev, 4, chip->base + GPIO_IDATA_REG,
+ chip->base + GPIO_SET_REG, chip->base + GPIO_CLR_REG,
+ chip->base + GPIO_DIR_REG, NULL, 0);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register generic gpio\n");
+
+ /* configure the gpio chip */
+ gc = &chip->gc;
+ gc->set_config = blzp1600_gpio_set_config;
+
+ if (device_property_present(&pdev->dev, "interrupt-controller")) {
+ struct gpio_irq_chip *girq;
+
+ chip->irq = platform_get_irq(pdev, 0);
+ if (chip->irq < 0)
+ return chip->irq;
+
+ girq = &gc->irq;
+ gpio_irq_chip_set_chip(girq, &blzp1600_gpio_irqchip);
+ girq->parent_handler = blzp1600_gpio_irqhandler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+
+ girq->parents[0] = chip->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ }
+
+ return devm_gpiochip_add_data(&pdev->dev, gc, chip);
+}
+
+static const struct of_device_id blzp1600_gpio_of_match[] = {
+ { .compatible = "blaize,blzp1600-gpio", },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, blzp1600_gpio_of_match);
+
+static struct platform_driver blzp1600_gpio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = blzp1600_gpio_of_match,
+ },
+ .probe = blzp1600_gpio_probe,
+};
+
+module_platform_driver(blzp1600_gpio_driver);
+
+MODULE_AUTHOR("Nikolaos Pasaloukos <nikolaos.pasaloukos@blaize.com>");
+MODULE_DESCRIPTION("Blaize BLZP1600 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index ca3472977431..e7671bcd5c07 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -437,7 +437,7 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
int err;
priv->irq_domain =
- irq_domain_add_linear(np, priv->num_gpios,
+ irq_domain_create_linear(of_fwnode_handle(np), priv->num_gpios,
&brcmstb_gpio_irq_domain_ops,
priv);
if (!priv->irq_domain) {
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 63fc7888c1d4..80a82492171e 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -68,15 +68,6 @@ static inline u32 __gpio_mask(unsigned gpio)
return 1 << (gpio % 32);
}
-static inline struct davinci_gpio_regs __iomem *irq2regs(struct irq_data *d)
-{
- struct davinci_gpio_regs __iomem *g;
-
- g = (__force struct davinci_gpio_regs __iomem *)irq_data_get_irq_chip_data(d);
-
- return g;
-}
-
static int davinci_gpio_irq_setup(struct platform_device *pdev);
/*--------------------------------------------------------------------------*/
@@ -255,19 +246,27 @@ static int davinci_gpio_probe(struct platform_device *pdev)
static void gpio_irq_mask(struct irq_data *d)
{
- struct davinci_gpio_regs __iomem *g = irq2regs(d);
+ struct davinci_gpio_controller *chips = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct davinci_gpio_regs __iomem *g = chips->regs[hwirq / 32];
uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
writel_relaxed(mask, &g->clr_falling);
writel_relaxed(mask, &g->clr_rising);
+
+ gpiochip_disable_irq(&chips->chip, hwirq);
}
static void gpio_irq_unmask(struct irq_data *d)
{
- struct davinci_gpio_regs __iomem *g = irq2regs(d);
+ struct davinci_gpio_controller *chips = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct davinci_gpio_regs __iomem *g = chips->regs[hwirq / 32];
uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
unsigned status = irqd_get_trigger_type(d);
+ gpiochip_enable_irq(&chips->chip, hwirq);
+
status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
if (!status)
status = IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
@@ -286,12 +285,13 @@ static int gpio_irq_type(struct irq_data *d, unsigned trigger)
return 0;
}
-static struct irq_chip gpio_irqchip = {
+static const struct irq_chip gpio_irqchip = {
.name = "GPIO",
.irq_unmask = gpio_irq_unmask,
.irq_mask = gpio_irq_mask,
.irq_set_type = gpio_irq_type,
- .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void gpio_irq_handler(struct irq_desc *desc)
@@ -399,12 +399,11 @@ davinci_gpio_irq_map(struct irq_domain *d, unsigned int irq,
{
struct davinci_gpio_controller *chips =
(struct davinci_gpio_controller *)d->host_data;
- struct davinci_gpio_regs __iomem *g = chips->regs[hw / 32];
irq_set_chip_and_handler_name(irq, &gpio_irqchip, handle_simple_irq,
"davinci_gpio");
irq_set_irq_type(irq, IRQ_TYPE_NONE);
- irq_set_chip_data(irq, (__force void *)g);
+ irq_set_chip_data(irq, (__force void *)chips);
irq_set_handler_data(irq, (void *)(uintptr_t)__gpio_mask(hw));
return 0;
@@ -479,9 +478,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
return irq;
}
- irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0,
- &davinci_gpio_irq_ops,
- chips);
+ irq_domain = irq_domain_create_legacy(of_fwnode_handle(dev->of_node), ngpio, irq, 0,
+ &davinci_gpio_irq_ops, chips);
if (!irq_domain) {
dev_err(dev, "Couldn't register an IRQ domain\n");
return -ENODEV;
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 596da59d4b13..4bd3c47eaf93 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -220,11 +220,12 @@ static int dln2_gpio_get(struct gpio_chip *chip, unsigned int offset)
return dln2_gpio_pin_get_out_val(dln2, offset);
}
-static void dln2_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int dln2_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct dln2_gpio *dln2 = gpiochip_get_data(chip);
- dln2_gpio_pin_set_out_val(dln2, offset, value);
+ return dln2_gpio_pin_set_out_val(dln2, offset, value);
}
static int dln2_gpio_set_direction(struct gpio_chip *chip, unsigned offset,
@@ -468,7 +469,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.base = -1;
dln2->gpio.ngpio = pins;
dln2->gpio.can_sleep = true;
- dln2->gpio.set = dln2_gpio_set;
+ dln2->gpio.set_rv = dln2_gpio_set;
dln2->gpio.get = dln2_gpio_get;
dln2->gpio.request = dln2_gpio_request;
dln2->gpio.free = dln2_gpio_free;
diff --git a/drivers/gpio/gpio-ds4520.c b/drivers/gpio/gpio-ds4520.c
index 1903deaef3e9..f52ecae382a4 100644
--- a/drivers/gpio/gpio-ds4520.c
+++ b/drivers/gpio/gpio-ds4520.c
@@ -25,7 +25,6 @@ static int ds4520_gpio_probe(struct i2c_client *client)
struct gpio_regmap_config config = { };
struct device *dev = &client->dev;
struct regmap *regmap;
- u32 ngpio;
u32 base;
int ret;
@@ -33,10 +32,6 @@ static int ds4520_gpio_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "Missing 'reg' property.\n");
- ret = device_property_read_u32(dev, "ngpios", &ngpio);
- if (ret)
- return dev_err_probe(dev, ret, "Missing 'ngpios' property.\n");
-
regmap = devm_regmap_init_i2c(client, &ds4520_regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
@@ -44,7 +39,6 @@ static int ds4520_gpio_probe(struct i2c_client *client)
config.regmap = regmap;
config.parent = dev;
- config.ngpio = ngpio;
config.reg_dat_base = base + DS4520_IO_STATUS0;
config.reg_set_base = base + DS4520_PULLUP0;
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index d4bf8d187e16..f2973d0b7138 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -203,9 +203,10 @@ static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
return 0;
}
-static void sprd_eic_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int sprd_eic_set(struct gpio_chip *chip, unsigned int offset, int value)
{
/* EICs are always input, nothing need to do here. */
+ return 0;
}
static int sprd_eic_set_debounce(struct gpio_chip *chip, unsigned int offset,
@@ -662,7 +663,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
sprd_eic->chip.request = sprd_eic_request;
sprd_eic->chip.free = sprd_eic_free;
sprd_eic->chip.set_config = sprd_eic_set_config;
- sprd_eic->chip.set = sprd_eic_set;
+ sprd_eic->chip.set_rv = sprd_eic_set;
fallthrough;
case SPRD_EIC_ASYNC:
case SPRD_EIC_SYNC:
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 6c862c572322..a5e6e446f39c 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -204,13 +204,15 @@ static void __em_gio_set(struct gpio_chip *chip, unsigned int reg,
(BIT(shift + 16)) | (value << shift));
}
-static void em_gio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int em_gio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
/* output is split into two registers */
if (offset < 16)
__em_gio_set(chip, GIO_OL, offset, value);
else
__em_gio_set(chip, GIO_OH, offset - 16, value);
+
+ return 0;
}
static int em_gio_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -304,7 +306,7 @@ static int em_gio_probe(struct platform_device *pdev)
gpio_chip->direction_input = em_gio_direction_input;
gpio_chip->get = em_gio_get;
gpio_chip->direction_output = em_gio_direction_output;
- gpio_chip->set = em_gio_set;
+ gpio_chip->set_rv = em_gio_set;
gpio_chip->to_irq = em_gio_to_irq;
gpio_chip->request = pinctrl_gpio_request;
gpio_chip->free = em_gio_free;
@@ -323,8 +325,9 @@ static int em_gio_probe(struct platform_device *pdev)
irq_chip->irq_release_resources = em_gio_irq_relres;
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
- p->irq_domain = irq_domain_add_simple(dev->of_node, ngpios, 0,
- &em_gio_irq_domain_ops, p);
+ p->irq_domain = irq_domain_create_simple(of_fwnode_handle(dev->of_node),
+ ngpios, 0,
+ &em_gio_irq_domain_ops, p);
if (!p->irq_domain) {
dev_err(dev, "cannot initialize irq domain\n");
return -ENXIO;
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index d5909a4f0433..beb98286d13e 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -93,8 +93,8 @@ static int exar_get_value(struct gpio_chip *chip, unsigned int offset)
return !!(regmap_test_bits(exar_gpio->regmap, addr, BIT(bit)));
}
-static void exar_set_value(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int exar_set_value(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
unsigned int addr = exar_offset_to_lvl_addr(exar_gpio, offset);
@@ -105,7 +105,7 @@ static void exar_set_value(struct gpio_chip *chip, unsigned int offset,
* regmap_write_bits() forces value to be written when an external
* pull up/down might otherwise indicate value was already set.
*/
- regmap_write_bits(exar_gpio->regmap, addr, BIT(bit), bit_value);
+ return regmap_write_bits(exar_gpio->regmap, addr, BIT(bit), bit_value);
}
static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
@@ -114,11 +114,13 @@ static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
unsigned int addr = exar_offset_to_sel_addr(exar_gpio, offset);
unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
+ int ret;
- exar_set_value(chip, offset, value);
- regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
+ ret = exar_set_value(chip, offset, value);
+ if (ret)
+ return ret;
- return 0;
+ return regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
}
static int exar_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -209,7 +211,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
exar_gpio->gpio_chip.direction_input = exar_direction_input;
exar_gpio->gpio_chip.get_direction = exar_get_direction;
exar_gpio->gpio_chip.get = exar_get_value;
- exar_gpio->gpio_chip.set = exar_set_value;
+ exar_gpio->gpio_chip.set_rv = exar_set_value;
exar_gpio->gpio_chip.base = -1;
exar_gpio->gpio_chip.ngpio = ngpios;
exar_gpio->index = index;
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index 3875fd940ccb..dfcd3634f279 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -159,7 +159,8 @@ static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value);
-static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
+static int f7188x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value);
static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
unsigned long config);
@@ -172,7 +173,7 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
.direction_input = f7188x_gpio_direction_in, \
.get = f7188x_gpio_get, \
.direction_output = f7188x_gpio_direction_out, \
- .set = f7188x_gpio_set, \
+ .set_rv = f7188x_gpio_set, \
.set_config = f7188x_gpio_set_config, \
.base = -1, \
.ngpio = _ngpio, \
@@ -391,7 +392,8 @@ static int f7188x_gpio_direction_out(struct gpio_chip *chip,
return 0;
}
-static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int f7188x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
int err;
struct f7188x_gpio_bank *bank = gpiochip_get_data(chip);
@@ -400,7 +402,8 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
err = superio_enter(sio->addr);
if (err)
- return;
+ return err;
+
superio_select(sio->addr, sio->device);
data_out = superio_inb(sio->addr, f7188x_gpio_data_out(bank->regbase));
@@ -411,6 +414,8 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
superio_outb(sio->addr, f7188x_gpio_data_out(bank->regbase), data_out);
superio_exit(sio->addr);
+
+ return 0;
}
static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
index ad6a045fd3d2..f25283e5239d 100644
--- a/drivers/gpio/gpio-graniterapids.c
+++ b/drivers/gpio/gpio-graniterapids.c
@@ -116,7 +116,7 @@ static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio)
return !!(dw & GNR_CFG_DW_RXSTATE);
}
-static void gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+static int gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
u32 clear = 0;
u32 set = 0;
@@ -126,7 +126,7 @@ static void gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
else
clear = GNR_CFG_DW_TXSTATE;
- gnr_gpio_configure_line(gc, gpio, clear, set);
+ return gnr_gpio_configure_line(gc, gpio, clear, set);
}
static int gnr_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
@@ -159,7 +159,7 @@ static const struct gpio_chip gnr_gpio_chip = {
.owner = THIS_MODULE,
.request = gnr_gpio_request,
.get = gnr_gpio_get,
- .set = gnr_gpio_set,
+ .set_rv = gnr_gpio_set,
.get_direction = gnr_gpio_get_direction,
.direction_input = gnr_gpio_direction_input,
.direction_output = gnr_gpio_direction_output,
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 30a0522ae735..d38a2d9854ca 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -170,6 +170,8 @@ static void grgpio_irq_mask(struct irq_data *d)
grgpio_set_imask(priv, offset, 0);
raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+
+ gpiochip_disable_irq(&priv->gc, d->hwirq);
}
static void grgpio_irq_unmask(struct irq_data *d)
@@ -178,6 +180,7 @@ static void grgpio_irq_unmask(struct irq_data *d)
int offset = d->hwirq;
unsigned long flags;
+ gpiochip_enable_irq(&priv->gc, d->hwirq);
raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
grgpio_set_imask(priv, offset, 1);
@@ -185,11 +188,13 @@ static void grgpio_irq_unmask(struct irq_data *d)
raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
}
-static struct irq_chip grgpio_irq_chip = {
+static const struct irq_chip grgpio_irq_chip = {
.name = "grgpio",
.irq_mask = grgpio_irq_mask,
.irq_unmask = grgpio_irq_unmask,
.irq_set_type = grgpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t grgpio_irq_handler(int irq, void *dev)
@@ -397,7 +402,7 @@ static int grgpio_probe(struct platform_device *ofdev)
return -EINVAL;
}
- priv->domain = irq_domain_add_linear(np, gc->ngpio,
+ priv->domain = irq_domain_create_linear(of_fwnode_handle(np), gc->ngpio,
&grgpio_irq_domain_ops,
priv);
if (!priv->domain) {
diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c
index 7e29a2d8de1a..a40ba99a3aea 100644
--- a/drivers/gpio/gpio-gw-pld.c
+++ b/drivers/gpio/gpio-gw-pld.c
@@ -62,9 +62,9 @@ static int gw_pld_output8(struct gpio_chip *gc, unsigned offset, int value)
return i2c_smbus_write_byte(gw->client, gw->out);
}
-static void gw_pld_set8(struct gpio_chip *gc, unsigned offset, int value)
+static int gw_pld_set8(struct gpio_chip *gc, unsigned int offset, int value)
{
- gw_pld_output8(gc, offset, value);
+ return gw_pld_output8(gc, offset, value);
}
static int gw_pld_probe(struct i2c_client *client)
@@ -86,7 +86,7 @@ static int gw_pld_probe(struct i2c_client *client)
gw->chip.direction_input = gw_pld_input8;
gw->chip.get = gw_pld_get8;
gw->chip.direction_output = gw_pld_output8;
- gw->chip.set = gw_pld_set8;
+ gw->chip.set_rv = gw_pld_set8;
gw->client = client;
/*
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index a40bd56673fe..b1844a676c7c 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -170,7 +170,7 @@ static int egpio_direction_input(struct gpio_chip *chip, unsigned offset)
* Output pins
*/
-static void egpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int egpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
unsigned long flag;
struct egpio_chip *egpio;
@@ -198,6 +198,8 @@ static void egpio_set(struct gpio_chip *chip, unsigned offset, int value)
egpio->cached_values &= ~(1 << offset);
egpio_writew((egpio->cached_values >> shift) & ei->reg_mask, ei, reg);
spin_unlock_irqrestore(&ei->lock, flag);
+
+ return 0;
}
static int egpio_direction_output(struct gpio_chip *chip,
@@ -206,12 +208,10 @@ static int egpio_direction_output(struct gpio_chip *chip,
struct egpio_chip *egpio;
egpio = gpiochip_get_data(chip);
- if (test_bit(offset, &egpio->is_out)) {
- egpio_set(chip, offset, value);
- return 0;
- } else {
- return -EINVAL;
- }
+ if (test_bit(offset, &egpio->is_out))
+ return egpio_set(chip, offset, value);
+
+ return -EINVAL;
}
static int egpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -324,7 +324,7 @@ static int __init egpio_probe(struct platform_device *pdev)
chip->parent = &pdev->dev;
chip->owner = THIS_MODULE;
chip->get = egpio_get;
- chip->set = egpio_set;
+ chip->set_rv = egpio_set;
chip->direction_input = egpio_direction_input;
chip->direction_output = egpio_direction_output;
chip->get_direction = egpio_get_direction;
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 0be9285efebc..67089b2423d8 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -175,12 +175,16 @@ static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned int nr)
static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned int nr,
int val)
{
+ int ret;
+
/* Disable blink hardware which is available for GPIOs from 0 to 31. */
if (nr < 32 && ichx_priv.desc->have_blink)
ichx_write_bit(GPO_BLINK, nr, 0, 0);
/* Set GPIO output value. */
- ichx_write_bit(GPIO_LVL, nr, val, 0);
+ ret = ichx_write_bit(GPIO_LVL, nr, val, 0);
+ if (ret)
+ return ret;
/*
* Try setting pin as an output and verify it worked since many pins
@@ -252,9 +256,9 @@ static int ich6_gpio_request(struct gpio_chip *chip, unsigned int nr)
return ichx_gpio_request(chip, nr);
}
-static void ichx_gpio_set(struct gpio_chip *chip, unsigned int nr, int val)
+static int ichx_gpio_set(struct gpio_chip *chip, unsigned int nr, int val)
{
- ichx_write_bit(GPIO_LVL, nr, val, 0);
+ return ichx_write_bit(GPIO_LVL, nr, val, 0);
}
static void ichx_gpiolib_setup(struct gpio_chip *chip)
@@ -269,7 +273,7 @@ static void ichx_gpiolib_setup(struct gpio_chip *chip)
chip->get = ichx_priv.desc->get ?
ichx_priv.desc->get : ichx_gpio_get;
- chip->set = ichx_gpio_set;
+ chip->set_rv = ichx_gpio_set;
chip->get_direction = ichx_gpio_get_direction;
chip->direction_input = ichx_gpio_direction_input;
chip->direction_output = ichx_gpio_direction_output;
diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c
index 00f547d26254..535f25514455 100644
--- a/drivers/gpio/gpio-idt3243x.c
+++ b/drivers/gpio/gpio-idt3243x.c
@@ -37,7 +37,7 @@ static void idt_gpio_dispatch(struct irq_desc *desc)
pending = readl(ctrl->pic + IDT_PIC_IRQ_PEND);
pending &= ~ctrl->mask_cache;
for_each_set_bit(bit, &pending, gc->ngpio) {
- virq = irq_linear_revmap(gc->irq.domain, bit);
+ virq = irq_find_mapping(gc->irq.domain, bit);
if (virq)
generic_handle_irq(virq);
}
diff --git a/drivers/gpio/gpio-imx-scu.c b/drivers/gpio/gpio-imx-scu.c
index 13baf465aedf..1693dbf1b777 100644
--- a/drivers/gpio/gpio-imx-scu.c
+++ b/drivers/gpio/gpio-imx-scu.c
@@ -6,8 +6,10 @@
* to control the PIN resources on SCU domain.
*/
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/firmware/imx/svc/rm.h>
@@ -37,16 +39,11 @@ static int imx_scu_gpio_get(struct gpio_chip *chip, unsigned int offset)
int level;
int err;
- if (offset >= chip->ngpio)
- return -EINVAL;
-
- mutex_lock(&priv->lock);
-
- /* to read PIN state via scu api */
- err = imx_sc_misc_get_control(priv->handle,
- scu_rsrc_arr[offset], 0, &level);
- mutex_unlock(&priv->lock);
-
+ scoped_guard(mutex, &priv->lock) {
+ /* to read PIN state via scu api */
+ err = imx_sc_misc_get_control(priv->handle,
+ scu_rsrc_arr[offset], 0, &level);
+ }
if (err) {
dev_err(priv->dev, "SCU get failed: %d\n", err);
return err;
@@ -55,31 +52,26 @@ static int imx_scu_gpio_get(struct gpio_chip *chip, unsigned int offset)
return level;
}
-static void imx_scu_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int imx_scu_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct scu_gpio_priv *priv = gpiochip_get_data(chip);
int err;
- if (offset >= chip->ngpio)
- return;
-
- mutex_lock(&priv->lock);
-
- /* to set PIN output level via scu api */
- err = imx_sc_misc_set_control(priv->handle,
- scu_rsrc_arr[offset], 0, value);
- mutex_unlock(&priv->lock);
-
+ scoped_guard(mutex, &priv->lock) {
+ /* to set PIN output level via scu api */
+ err = imx_sc_misc_set_control(priv->handle,
+ scu_rsrc_arr[offset], 0, value);
+ }
if (err)
dev_err(priv->dev, "SCU set (%d) failed: %d\n",
scu_rsrc_arr[offset], err);
+
+ return err;
}
static int imx_scu_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
- if (offset >= chip->ngpio)
- return -EINVAL;
-
return GPIO_LINE_DIRECTION_OUT;
}
@@ -99,7 +91,10 @@ static int imx_scu_gpio_probe(struct platform_device *pdev)
return ret;
priv->dev = dev;
- mutex_init(&priv->lock);
+
+ ret = devm_mutex_init(&pdev->dev, &priv->lock);
+ if (ret)
+ return ret;
gc = &priv->chip;
gc->base = -1;
@@ -107,7 +102,7 @@ static int imx_scu_gpio_probe(struct platform_device *pdev)
gc->ngpio = ARRAY_SIZE(scu_rsrc_arr);
gc->label = dev_name(dev);
gc->get = imx_scu_gpio_get;
- gc->set = imx_scu_gpio_set;
+ gc->set_rv = imx_scu_gpio_set;
gc->get_direction = imx_scu_gpio_get_direction;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index f332341fd4c8..d8184b527bac 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -213,8 +213,7 @@ exit:
return rc;
}
-static void it87_gpio_set(struct gpio_chip *chip,
- unsigned gpio_num, int val)
+static int it87_gpio_set(struct gpio_chip *chip, unsigned int gpio_num, int val)
{
u8 mask, curr_vals;
u16 reg;
@@ -228,6 +227,8 @@ static void it87_gpio_set(struct gpio_chip *chip,
outb(curr_vals | mask, reg);
else
outb(curr_vals & ~mask, reg);
+
+ return 0;
}
static int it87_gpio_direction_out(struct gpio_chip *chip,
@@ -249,7 +250,9 @@ static int it87_gpio_direction_out(struct gpio_chip *chip,
/* set the output enable bit */
superio_set_mask(mask, group + it87_gpio->output_base);
- it87_gpio_set(chip, gpio_num, val);
+ rc = it87_gpio_set(chip, gpio_num, val);
+ if (rc)
+ goto exit;
superio_exit();
@@ -264,7 +267,7 @@ static const struct gpio_chip it87_template_chip = {
.request = it87_gpio_request,
.get = it87_gpio_get,
.direction_input = it87_gpio_direction_in,
- .set = it87_gpio_set,
+ .set_rv = it87_gpio_set,
.direction_output = it87_gpio_direction_out,
.base = -1
};
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index cdf50e4ea165..9f548eda3888 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -76,7 +76,7 @@ static int ttl_get_value(struct gpio_chip *gpio, unsigned offset)
return !!ret;
}
-static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
+static int ttl_set_value(struct gpio_chip *gpio, unsigned int offset, int value)
{
struct ttl_module *mod = dev_get_drvdata(gpio->parent);
void __iomem *port;
@@ -103,6 +103,8 @@ static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
iowrite16be(*shadow, port);
spin_unlock(&mod->lock);
+
+ return 0;
}
static void ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val)
@@ -169,7 +171,7 @@ static int ttl_probe(struct platform_device *pdev)
gpio->parent = &pdev->dev;
gpio->label = pdev->name;
gpio->get = ttl_get_value;
- gpio->set = ttl_set_value;
+ gpio->set_rv = ttl_set_value;
gpio->owner = THIS_MODULE;
/* request dynamic allocation */
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index 4ea15f08e0f4..e38e604baa22 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -63,7 +63,8 @@ static int kempld_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!kempld_gpio_get_bit(pld, KEMPLD_GPIO_LVL_NUM(offset), offset);
}
-static void kempld_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int kempld_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct kempld_gpio_data *gpio = gpiochip_get_data(chip);
struct kempld_device_data *pld = gpio->pld;
@@ -71,6 +72,8 @@ static void kempld_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
kempld_get_mutex(pld);
kempld_gpio_bitop(pld, KEMPLD_GPIO_LVL_NUM(offset), offset, value);
kempld_release_mutex(pld);
+
+ return 0;
}
static int kempld_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -166,7 +169,7 @@ static int kempld_gpio_probe(struct platform_device *pdev)
chip->direction_output = kempld_gpio_direction_output;
chip->get_direction = kempld_gpio_get_direction;
chip->get = kempld_gpio_get;
- chip->set = kempld_gpio_set;
+ chip->set_rv = kempld_gpio_set;
chip->ngpio = kempld_gpio_pincount(pld);
if (chip->ngpio == 0) {
dev_err(dev, "No GPIO pins detected\n");
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index 817ecb12d550..61524a9ba765 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -144,8 +144,8 @@ static int ljca_gpio_get_value(struct gpio_chip *chip, unsigned int offset)
return ljca_gpio_read(ljca_gpio, offset);
}
-static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
int ret;
@@ -155,6 +155,8 @@ static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
dev_err(chip->parent,
"set value failed offset: %u val: %d ret: %d\n",
offset, val, ret);
+
+ return ret;
}
static int ljca_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -183,7 +185,10 @@ static int ljca_gpio_direction_output(struct gpio_chip *chip,
if (ret)
return ret;
- ljca_gpio_set_value(chip, offset, val);
+ ret = ljca_gpio_set_value(chip, offset, val);
+ if (ret)
+ return ret;
+
set_bit(offset, ljca_gpio->output_enabled);
return 0;
@@ -432,7 +437,7 @@ static int ljca_gpio_probe(struct auxiliary_device *auxdev,
ljca_gpio->gc.direction_output = ljca_gpio_direction_output;
ljca_gpio->gc.get_direction = ljca_gpio_get_direction;
ljca_gpio->gc.get = ljca_gpio_get_value;
- ljca_gpio->gc.set = ljca_gpio_set_value;
+ ljca_gpio->gc.set_rv = ljca_gpio_set_value;
ljca_gpio->gc.set_config = ljca_gpio_set_config;
ljca_gpio->gc.init_valid_mask = ljca_gpio_init_valid_mask;
ljca_gpio->gc.can_sleep = true;
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
index 05d62011f335..19cd2847467c 100644
--- a/drivers/gpio/gpio-logicvc.c
+++ b/drivers/gpio/gpio-logicvc.c
@@ -61,23 +61,22 @@ static int logicvc_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(value & bit);
}
-static void logicvc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int logicvc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct logicvc_gpio *logicvc = gpiochip_get_data(chip);
unsigned int reg, bit;
logicvc_gpio_offset(logicvc, offset, &reg, &bit);
- regmap_update_bits(logicvc->regmap, reg, bit, value ? bit : 0);
+ return regmap_update_bits(logicvc->regmap, reg, bit, value ? bit : 0);
}
static int logicvc_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
/* Pins are always configured as output, so just set the value. */
- logicvc_gpio_set(chip, offset, value);
-
- return 0;
+ return logicvc_gpio_set(chip, offset, value);
}
static struct regmap_config logicvc_gpio_regmap_config = {
@@ -135,7 +134,7 @@ static int logicvc_gpio_probe(struct platform_device *pdev)
logicvc->chip.ngpio = LOGICVC_CTRL_GPIO_BITS +
LOGICVC_POWER_CTRL_GPIO_BITS;
logicvc->chip.get = logicvc_gpio_get;
- logicvc->chip.set = logicvc_gpio_set;
+ logicvc->chip.set_rv = logicvc_gpio_set;
logicvc->chip.direction_output = logicvc_gpio_direction_output;
return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc);
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index a9a93036f08f..26227669f026 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -105,7 +105,7 @@ static int loongson_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
return GPIO_LINE_DIRECTION_OUT;
}
-static void loongson_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
+static int loongson_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
unsigned long flags;
struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
@@ -113,6 +113,8 @@ static void loongson_gpio_set(struct gpio_chip *chip, unsigned int pin, int valu
spin_lock_irqsave(&lgpio->lock, flags);
loongson_commit_level(lgpio, pin, value);
spin_unlock_irqrestore(&lgpio->lock, flags);
+
+ return 0;
}
static int loongson_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
@@ -155,7 +157,7 @@ static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgp
lgpio->chip.get = loongson_gpio_get;
lgpio->chip.get_direction = loongson_gpio_get_direction;
lgpio->chip.direction_output = loongson_gpio_direction_output;
- lgpio->chip.set = loongson_gpio_set;
+ lgpio->chip.set_rv = loongson_gpio_set;
lgpio->chip.parent = dev;
spin_lock_init(&lgpio->lock);
}
diff --git a/drivers/gpio/gpio-loongson.c b/drivers/gpio/gpio-loongson.c
index a42145873cc9..8f3668169ebf 100644
--- a/drivers/gpio/gpio-loongson.c
+++ b/drivers/gpio/gpio-loongson.c
@@ -48,8 +48,8 @@ static int loongson_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
return !!(val & BIT(gpio + LOONGSON_GPIO_IN_OFFSET));
}
-static void loongson_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
+static int loongson_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
u32 val;
@@ -61,6 +61,8 @@ static void loongson_gpio_set_value(struct gpio_chip *chip,
val &= ~BIT(gpio);
LOONGSON_GPIODATA = val;
spin_unlock(&gpio_lock);
+
+ return 0;
}
static int loongson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
@@ -104,7 +106,7 @@ static int loongson_gpio_probe(struct platform_device *pdev)
gc->base = 0;
gc->ngpio = LOONGSON_N_GPIO;
gc->get = loongson_gpio_get_value;
- gc->set = loongson_gpio_set_value;
+ gc->set_rv = loongson_gpio_set_value;
gc->direction_input = loongson_gpio_direction_input;
gc->direction_output = loongson_gpio_direction_output;
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 8e58242f5123..52ab3ac4844c 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -147,7 +147,8 @@ static int lp3943_gpio_get(struct gpio_chip *chip, unsigned int offset)
return lp3943_get_gpio_out_status(lp3943_gpio, chip, offset);
}
-static void lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
u8 data;
@@ -157,15 +158,19 @@ static void lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset, int val
else
data = LP3943_GPIO_OUT_LOW;
- lp3943_gpio_set_mode(lp3943_gpio, offset, data);
+ return lp3943_gpio_set_mode(lp3943_gpio, offset, data);
}
static int lp3943_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
+ int ret;
+
+ ret = lp3943_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
- lp3943_gpio_set(chip, offset, value);
lp3943_gpio->input_mask &= ~BIT(offset);
return 0;
@@ -179,7 +184,7 @@ static const struct gpio_chip lp3943_gpio_chip = {
.direction_input = lp3943_gpio_direction_input,
.get = lp3943_gpio_get,
.direction_output = lp3943_gpio_direction_output,
- .set = lp3943_gpio_set,
+ .set_rv = lp3943_gpio_set,
.base = -1,
.ngpio = LP3943_MAX_GPIO,
.can_sleep = 1,
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index 5c79ba1f229c..1908ed302e92 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -58,14 +58,14 @@ static int lp873x_gpio_get(struct gpio_chip *chip, unsigned int offset)
return val & BIT(offset * BITS_PER_GPO);
}
-static void lp873x_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int lp873x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lp873x_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL,
- BIT(offset * BITS_PER_GPO),
- value ? BIT(offset * BITS_PER_GPO) : 0);
+ return regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL,
+ BIT(offset * BITS_PER_GPO),
+ value ? BIT(offset * BITS_PER_GPO) : 0);
}
static int lp873x_gpio_request(struct gpio_chip *gc, unsigned int offset)
@@ -124,7 +124,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp873x_gpio_direction_input,
.direction_output = lp873x_gpio_direction_output,
.get = lp873x_gpio_get,
- .set = lp873x_gpio_set,
+ .set_rv = lp873x_gpio_set,
.set_config = lp873x_gpio_set_config,
.base = -1,
.ngpio = 2,
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index d3ce027de081..8ea687d5d028 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -30,13 +30,13 @@ static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset));
}
-static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lp87565_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
- BIT(offset), value ? BIT(offset) : 0);
+ return regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
+ BIT(offset), value ? BIT(offset) : 0);
}
static int lp87565_gpio_get_direction(struct gpio_chip *chip,
@@ -69,8 +69,11 @@ static int lp87565_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+ int ret;
- lp87565_gpio_set(chip, offset, value);
+ ret = lp87565_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
return regmap_update_bits(gpio->map,
LP87565_REG_GPIO_CONFIG,
@@ -136,7 +139,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp87565_gpio_direction_input,
.direction_output = lp87565_gpio_direction_output,
.get = lp87565_gpio_get,
- .set = lp87565_gpio_set,
+ .set_rv = lp87565_gpio_set,
.set_config = lp87565_gpio_set_config,
.base = -1,
.ngpio = 3,
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index 2cf9fb4637a2..b0a8da5c058d 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -42,6 +42,7 @@ struct lpc18xx_gpio_pin_ic {
void __iomem *base;
struct irq_domain *domain;
struct raw_spinlock lock;
+ struct gpio_chip *gpio;
};
struct lpc18xx_gpio_chip {
@@ -74,6 +75,7 @@ static void lpc18xx_gpio_pin_ic_mask(struct irq_data *d)
{
struct lpc18xx_gpio_pin_ic *ic = d->chip_data;
u32 type = irqd_get_trigger_type(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
raw_spin_lock(&ic->lock);
@@ -88,12 +90,17 @@ static void lpc18xx_gpio_pin_ic_mask(struct irq_data *d)
raw_spin_unlock(&ic->lock);
irq_chip_mask_parent(d);
+
+ gpiochip_disable_irq(ic->gpio, hwirq);
}
static void lpc18xx_gpio_pin_ic_unmask(struct irq_data *d)
{
struct lpc18xx_gpio_pin_ic *ic = d->chip_data;
u32 type = irqd_get_trigger_type(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(ic->gpio, hwirq);
raw_spin_lock(&ic->lock);
@@ -149,13 +156,14 @@ static int lpc18xx_gpio_pin_ic_set_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip lpc18xx_gpio_pin_ic = {
+static const struct irq_chip lpc18xx_gpio_pin_ic = {
.name = "LPC18xx GPIO pin",
.irq_mask = lpc18xx_gpio_pin_ic_mask,
.irq_unmask = lpc18xx_gpio_pin_ic_unmask,
.irq_eoi = lpc18xx_gpio_pin_ic_eoi,
.irq_set_type = lpc18xx_gpio_pin_ic_set_type,
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SET_TYPE_MASKED,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int lpc18xx_gpio_pin_ic_domain_alloc(struct irq_domain *domain,
@@ -240,17 +248,16 @@ static int lpc18xx_gpio_pin_ic_probe(struct lpc18xx_gpio_chip *gc)
raw_spin_lock_init(&ic->lock);
- ic->domain = irq_domain_add_hierarchy(parent_domain, 0,
- NR_LPC18XX_GPIO_PIN_IC_IRQS,
- dev->of_node,
- &lpc18xx_gpio_pin_ic_domain_ops,
- ic);
+ ic->domain = irq_domain_create_hierarchy(parent_domain, 0, NR_LPC18XX_GPIO_PIN_IC_IRQS,
+ of_fwnode_handle(dev->of_node),
+ &lpc18xx_gpio_pin_ic_domain_ops, ic);
if (!ic->domain) {
pr_err("unable to add irq domain\n");
ret = -ENODEV;
goto free_iomap;
}
+ ic->gpio = &gc->gpio;
gc->pin_ic = ic;
return 0;
@@ -263,10 +270,14 @@ free_ic:
return ret;
}
-static void lpc18xx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int lpc18xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lpc18xx_gpio_chip *gc = gpiochip_get_data(chip);
+
writeb(value ? 1 : 0, gc->base + offset);
+
+ return 0;
}
static int lpc18xx_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -316,7 +327,7 @@ static const struct gpio_chip lpc18xx_chip = {
.free = gpiochip_generic_free,
.direction_input = lpc18xx_gpio_direction_input,
.direction_output = lpc18xx_gpio_direction_output,
- .set = lpc18xx_gpio_set,
+ .set_rv = lpc18xx_gpio_set,
.get = lpc18xx_gpio_get,
.ngpio = LPC18XX_MAX_PORTS * LPC18XX_PINS_PER_PORT,
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index c097e310c9e8..6668b8bd9f1e 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -340,28 +340,34 @@ static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
return 0;
}
-static void lpc32xx_gpio_set_value_p012(struct gpio_chip *chip, unsigned pin,
- int value)
+static int lpc32xx_gpio_set_value_p012(struct gpio_chip *chip,
+ unsigned int pin, int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_level_p012(group, pin, value);
+
+ return 0;
}
-static void lpc32xx_gpio_set_value_p3(struct gpio_chip *chip, unsigned pin,
- int value)
+static int lpc32xx_gpio_set_value_p3(struct gpio_chip *chip,
+ unsigned int pin, int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_level_p3(group, pin, value);
+
+ return 0;
}
-static void lpc32xx_gpo_set_value(struct gpio_chip *chip, unsigned pin,
- int value)
+static int lpc32xx_gpo_set_value(struct gpio_chip *chip, unsigned int pin,
+ int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpo_level_p3(group, pin, value);
+
+ return 0;
}
static int lpc32xx_gpo_get_value(struct gpio_chip *chip, unsigned pin)
@@ -401,7 +407,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set = lpc32xx_gpio_set_value_p012,
+ .set_rv = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P0_GRP,
@@ -417,7 +423,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set = lpc32xx_gpio_set_value_p012,
+ .set_rv = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P1_GRP,
@@ -433,7 +439,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set = lpc32xx_gpio_set_value_p012,
+ .set_rv = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPIO_P2_GRP,
.ngpio = LPC32XX_GPIO_P2_MAX,
@@ -448,7 +454,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p3,
.get = lpc32xx_gpio_get_value_p3,
.direction_output = lpc32xx_gpio_dir_output_p3,
- .set = lpc32xx_gpio_set_value_p3,
+ .set_rv = lpc32xx_gpio_set_value_p3,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_gpio_p3,
.base = LPC32XX_GPIO_P3_GRP,
@@ -476,7 +482,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.chip = {
.label = "gpo_p3",
.direction_output = lpc32xx_gpio_dir_out_always,
- .set = lpc32xx_gpo_set_value,
+ .set_rv = lpc32xx_gpo_set_value,
.get = lpc32xx_gpo_get_value,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPO_P3_GRP,
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
index 8f38303fcbc4..e73e72d62bc8 100644
--- a/drivers/gpio/gpio-madera.c
+++ b/drivers/gpio/gpio-madera.c
@@ -87,23 +87,17 @@ static int madera_gpio_direction_out(struct gpio_chip *chip,
MADERA_GP1_LVL_MASK, reg_val);
}
-static void madera_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int madera_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
struct madera *madera = madera_gpio->madera;
unsigned int reg_offset = 2 * offset;
unsigned int reg_val = value ? MADERA_GP1_LVL : 0;
- int ret;
-
- ret = regmap_update_bits(madera->regmap,
- MADERA_GPIO1_CTRL_1 + reg_offset,
- MADERA_GP1_LVL_MASK, reg_val);
- /* set() doesn't return an error so log a warning */
- if (ret)
- dev_warn(madera->dev, "Failed to write to 0x%x (%d)\n",
- MADERA_GPIO1_CTRL_1 + reg_offset, ret);
+ return regmap_update_bits(madera->regmap,
+ MADERA_GPIO1_CTRL_1 + reg_offset,
+ MADERA_GP1_LVL_MASK, reg_val);
}
static const struct gpio_chip madera_gpio_chip = {
@@ -115,7 +109,7 @@ static const struct gpio_chip madera_gpio_chip = {
.direction_input = madera_gpio_direction_in,
.get = madera_gpio_get,
.direction_output = madera_gpio_direction_out,
- .set = madera_gpio_set,
+ .set_rv = madera_gpio_set,
.set_config = gpiochip_generic_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index fc0708ab5192..6e6504ab740a 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -103,19 +103,6 @@ static int max3191x_direction_input(struct gpio_chip *gpio, unsigned int offset)
return 0;
}
-static int max3191x_direction_output(struct gpio_chip *gpio,
- unsigned int offset, int value)
-{
- return -EINVAL;
-}
-
-static void max3191x_set(struct gpio_chip *gpio, unsigned int offset, int value)
-{ }
-
-static void max3191x_set_multiple(struct gpio_chip *gpio, unsigned long *mask,
- unsigned long *bits)
-{ }
-
static unsigned int max3191x_wordlen(struct max3191x_chip *max3191x)
{
return max3191x->mode == STATUS_BYTE_ENABLED ? 2 : 1;
@@ -421,9 +408,6 @@ static int max3191x_probe(struct spi_device *spi)
max3191x->gpio.get_direction = max3191x_get_direction;
max3191x->gpio.direction_input = max3191x_direction_input;
- max3191x->gpio.direction_output = max3191x_direction_output;
- max3191x->gpio.set = max3191x_set;
- max3191x->gpio.set_multiple = max3191x_set_multiple;
max3191x->gpio.get = max3191x_get;
max3191x->gpio.get_multiple = max3191x_get_multiple;
max3191x->gpio.set_config = max3191x_set_config;
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index e688c13c8cc3..75d414d8c992 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -143,18 +143,21 @@ static int max7301_get(struct gpio_chip *chip, unsigned offset)
return level;
}
-static void max7301_set(struct gpio_chip *chip, unsigned offset, int value)
+static int max7301_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct max7301 *ts = gpiochip_get_data(chip);
+ int ret;
/* First 4 pins are unused in the controller */
offset += 4;
mutex_lock(&ts->lock);
- __max7301_set(ts, offset, value);
+ ret = __max7301_set(ts, offset, value);
mutex_unlock(&ts->lock);
+
+ return ret;
}
int __max730x_probe(struct max7301 *ts)
@@ -185,7 +188,7 @@ int __max730x_probe(struct max7301 *ts)
ts->chip.direction_input = max7301_direction_input;
ts->chip.get = max7301_get;
ts->chip.direction_output = max7301_direction_output;
- ts->chip.set = max7301_set;
+ ts->chip.set_rv = max7301_set;
ts->chip.ngpio = PIN_NUMBER;
ts->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 49d362907bc7..d5ffedb086af 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -225,16 +225,19 @@ out:
mutex_unlock(&chip->lock);
}
-static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
+static int max732x_gpio_set_value(struct gpio_chip *gc, unsigned int off,
+ int val)
{
unsigned base = off & ~0x7;
uint8_t mask = 1u << (off & 0x7);
max732x_gpio_set_mask(gc, base, mask, val << (off & 0x7));
+
+ return 0;
}
-static void max732x_gpio_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int max732x_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
unsigned mask_lo = mask[0] & 0xff;
unsigned mask_hi = (mask[0] >> 8) & 0xff;
@@ -243,6 +246,8 @@ static void max732x_gpio_set_multiple(struct gpio_chip *gc,
max732x_gpio_set_mask(gc, 0, mask_lo, bits[0] & 0xff);
if (mask_hi)
max732x_gpio_set_mask(gc, 8, mask_hi, (bits[0] >> 8) & 0xff);
+
+ return 0;
}
static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
@@ -580,8 +585,8 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
gc->direction_input = max732x_gpio_direction_input;
if (chip->dir_output) {
gc->direction_output = max732x_gpio_direction_output;
- gc->set = max732x_gpio_set_value;
- gc->set_multiple = max732x_gpio_set_multiple;
+ gc->set_rv = max732x_gpio_set_value;
+ gc->set_multiple_rv = max732x_gpio_set_multiple;
}
gc->get = max732x_gpio_get_value;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index 8c2a5609161f..af7af8e40afe 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -223,20 +223,17 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
return ret;
}
-static void max77620_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int max77620_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct max77620_gpio *mgpio = gpiochip_get_data(gc);
u8 val;
- int ret;
val = (value) ? MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH :
MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW;
- ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
- MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val);
- if (ret < 0)
- dev_err(mgpio->dev, "CNFG_GPIO_OUT update failed: %d\n", ret);
+ return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
+ MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val);
}
static int max77620_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
@@ -314,7 +311,7 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->gpio_chip.direction_input = max77620_gpio_dir_input;
mgpio->gpio_chip.get = max77620_gpio_get;
mgpio->gpio_chip.direction_output = max77620_gpio_dir_output;
- mgpio->gpio_chip.set = max77620_gpio_set;
+ mgpio->gpio_chip.set_rv = max77620_gpio_set;
mgpio->gpio_chip.set_config = max77620_gpio_set_config;
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
diff --git a/drivers/gpio/gpio-max77759.c b/drivers/gpio/gpio-max77759.c
new file mode 100644
index 000000000000..7fe8e6f697d0
--- /dev/null
+++ b/drivers/gpio/gpio-max77759.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright 2020 Google Inc
+// Copyright 2025 Linaro Ltd.
+//
+// GPIO driver for Maxim MAX77759
+
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/device/driver.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/lockdep.h>
+#include <linux/mfd/max77759.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+
+#define MAX77759_N_GPIOS ARRAY_SIZE(max77759_gpio_line_names)
+static const char * const max77759_gpio_line_names[] = { "GPIO5", "GPIO6" };
+
+struct max77759_gpio_chip {
+ struct regmap *map;
+ struct max77759 *max77759;
+ struct gpio_chip gc;
+ struct mutex maxq_lock; /* protect MaxQ r/m/w operations */
+
+ struct mutex irq_lock; /* protect irq bus */
+ int irq_mask;
+ int irq_mask_changed;
+ int irq_trig;
+ int irq_trig_changed;
+};
+
+#define MAX77759_GPIOx_TRIGGER(offs, val) (((val) & 1) << (offs))
+#define MAX77759_GPIOx_TRIGGER_MASK(offs) MAX77759_GPIOx_TRIGGER(offs, ~0)
+enum max77759_trigger_gpio_type {
+ MAX77759_GPIO_TRIGGER_RISING = 0,
+ MAX77759_GPIO_TRIGGER_FALLING = 1
+};
+
+#define MAX77759_GPIOx_DIR(offs, dir) (((dir) & 1) << (2 + (3 * (offs))))
+#define MAX77759_GPIOx_DIR_MASK(offs) MAX77759_GPIOx_DIR(offs, ~0)
+enum max77759_control_gpio_dir {
+ MAX77759_GPIO_DIR_IN = 0,
+ MAX77759_GPIO_DIR_OUT = 1
+};
+
+#define MAX77759_GPIOx_OUTVAL(offs, val) (((val) & 1) << (3 + (3 * (offs))))
+#define MAX77759_GPIOx_OUTVAL_MASK(offs) MAX77759_GPIOx_OUTVAL(offs, ~0)
+
+#define MAX77759_GPIOx_INVAL_MASK(offs) (BIT(4) << (3 * (offs)))
+
+static int max77759_gpio_maxq_gpio_trigger_read(struct max77759_gpio_chip *chip)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 1);
+ DEFINE_FLEX(struct max77759_maxq_response, rsp, rsp, length, 2);
+ int ret;
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_READ;
+
+ ret = max77759_maxq_command(chip->max77759, cmd, rsp);
+ if (ret < 0)
+ return ret;
+
+ return rsp->rsp[1];
+}
+
+static int max77759_gpio_maxq_gpio_trigger_write(struct max77759_gpio_chip *chip,
+ u8 trigger)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 2);
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_WRITE;
+ cmd->cmd[1] = trigger;
+
+ return max77759_maxq_command(chip->max77759, cmd, NULL);
+}
+
+static int max77759_gpio_maxq_gpio_control_read(struct max77759_gpio_chip *chip)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 1);
+ DEFINE_FLEX(struct max77759_maxq_response, rsp, rsp, length, 2);
+ int ret;
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_CONTROL_READ;
+
+ ret = max77759_maxq_command(chip->max77759, cmd, rsp);
+ if (ret < 0)
+ return ret;
+
+ return rsp->rsp[1];
+}
+
+static int max77759_gpio_maxq_gpio_control_write(struct max77759_gpio_chip *chip,
+ u8 ctrl)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 2);
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_CONTROL_WRITE;
+ cmd->cmd[1] = ctrl;
+
+ return max77759_maxq_command(chip->max77759, cmd, NULL);
+}
+
+static int
+max77759_gpio_direction_from_control(int ctrl, unsigned int offset)
+{
+ enum max77759_control_gpio_dir dir;
+
+ dir = !!(ctrl & MAX77759_GPIOx_DIR_MASK(offset));
+ return ((dir == MAX77759_GPIO_DIR_OUT)
+ ? GPIO_LINE_DIRECTION_OUT
+ : GPIO_LINE_DIRECTION_IN);
+}
+
+static int max77759_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl;
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ return max77759_gpio_direction_from_control(ctrl, offset);
+}
+
+static int max77759_gpio_direction_helper(struct gpio_chip *gc,
+ unsigned int offset,
+ enum max77759_control_gpio_dir dir,
+ int value)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl, new_ctrl;
+
+ guard(mutex)(&chip->maxq_lock);
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ new_ctrl = ctrl & ~MAX77759_GPIOx_DIR_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_DIR(offset, dir);
+
+ if (dir == MAX77759_GPIO_DIR_OUT) {
+ new_ctrl &= ~MAX77759_GPIOx_OUTVAL_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_OUTVAL(offset, value);
+ }
+
+ if (new_ctrl == ctrl)
+ return 0;
+
+ return max77759_gpio_maxq_gpio_control_write(chip, new_ctrl);
+}
+
+static int max77759_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ return max77759_gpio_direction_helper(gc, offset,
+ MAX77759_GPIO_DIR_IN, -1);
+}
+
+static int max77759_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ return max77759_gpio_direction_helper(gc, offset,
+ MAX77759_GPIO_DIR_OUT, value);
+}
+
+static int max77759_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl, mask;
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ /*
+ * The input status bit doesn't reflect the pin state when the GPIO is
+ * configured as an output. Check the direction, and inspect the input
+ * or output bit accordingly.
+ */
+ mask = ((max77759_gpio_direction_from_control(ctrl, offset)
+ == GPIO_LINE_DIRECTION_IN)
+ ? MAX77759_GPIOx_INVAL_MASK(offset)
+ : MAX77759_GPIOx_OUTVAL_MASK(offset));
+
+ return !!(ctrl & mask);
+}
+
+static int max77759_gpio_set_value(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl, new_ctrl;
+
+ guard(mutex)(&chip->maxq_lock);
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ new_ctrl = ctrl & ~MAX77759_GPIOx_OUTVAL_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_OUTVAL(offset, value);
+
+ if (new_ctrl == ctrl)
+ return 0;
+
+ return max77759_gpio_maxq_gpio_control_write(chip, new_ctrl);
+}
+
+static void max77759_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ chip->irq_mask &= ~MAX77759_MAXQ_REG_UIC_INT1_GPIOxI_MASK(hwirq);
+ chip->irq_mask |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 1);
+ chip->irq_mask_changed |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 1);
+
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void max77759_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
+
+ chip->irq_mask &= ~MAX77759_MAXQ_REG_UIC_INT1_GPIOxI_MASK(hwirq);
+ chip->irq_mask |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 0);
+ chip->irq_mask_changed |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 1);
+}
+
+static int max77759_gpio_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ chip->irq_trig &= ~MAX77759_GPIOx_TRIGGER_MASK(hwirq);
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ chip->irq_trig |= MAX77759_GPIOx_TRIGGER(hwirq,
+ MAX77759_GPIO_TRIGGER_RISING);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ chip->irq_trig |= MAX77759_GPIOx_TRIGGER(hwirq,
+ MAX77759_GPIO_TRIGGER_FALLING);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ chip->irq_trig_changed |= MAX77759_GPIOx_TRIGGER(hwirq, 1);
+
+ return 0;
+}
+
+static void max77759_gpio_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+
+ mutex_lock(&chip->irq_lock);
+}
+
+static int max77759_gpio_bus_sync_unlock_helper(struct gpio_chip *gc,
+ struct max77759_gpio_chip *chip)
+ __must_hold(&chip->maxq_lock)
+{
+ int ctrl, trigger, new_trigger, new_ctrl;
+ unsigned long irq_trig_changed;
+ int offset;
+ int ret;
+
+ lockdep_assert_held(&chip->maxq_lock);
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ trigger = max77759_gpio_maxq_gpio_trigger_read(chip);
+ if (ctrl < 0 || trigger < 0) {
+ dev_err(gc->parent, "failed to read current state: %d / %d\n",
+ ctrl, trigger);
+ return (ctrl < 0) ? ctrl : trigger;
+ }
+
+ new_trigger = trigger & ~chip->irq_trig_changed;
+ new_trigger |= (chip->irq_trig & chip->irq_trig_changed);
+
+ /* change GPIO direction if required */
+ new_ctrl = ctrl;
+ irq_trig_changed = chip->irq_trig_changed;
+ for_each_set_bit(offset, &irq_trig_changed, MAX77759_N_GPIOS) {
+ new_ctrl &= ~MAX77759_GPIOx_DIR_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_DIR(offset, MAX77759_GPIO_DIR_IN);
+ }
+
+ if (new_trigger != trigger) {
+ ret = max77759_gpio_maxq_gpio_trigger_write(chip, new_trigger);
+ if (ret) {
+ dev_err(gc->parent,
+ "failed to write new trigger: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (new_ctrl != ctrl) {
+ ret = max77759_gpio_maxq_gpio_control_write(chip, new_ctrl);
+ if (ret) {
+ dev_err(gc->parent,
+ "failed to write new control: %d\n", ret);
+ return ret;
+ }
+ }
+
+ chip->irq_trig_changed = 0;
+
+ return 0;
+}
+
+static void max77759_gpio_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ret;
+
+ scoped_guard(mutex, &chip->maxq_lock) {
+ ret = max77759_gpio_bus_sync_unlock_helper(gc, chip);
+ if (ret)
+ goto out_unlock;
+ }
+
+ ret = regmap_update_bits(chip->map,
+ MAX77759_MAXQ_REG_UIC_INT1_M,
+ chip->irq_mask_changed, chip->irq_mask);
+ if (ret) {
+ dev_err(gc->parent,
+ "failed to update UIC_INT1 irq mask: %d\n", ret);
+ goto out_unlock;
+ }
+
+ chip->irq_mask_changed = 0;
+
+out_unlock:
+ mutex_unlock(&chip->irq_lock);
+}
+
+static void max77759_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ seq_puts(p, dev_name(gc->parent));
+}
+
+static const struct irq_chip max77759_gpio_irq_chip = {
+ .irq_mask = max77759_gpio_irq_mask,
+ .irq_unmask = max77759_gpio_irq_unmask,
+ .irq_set_type = max77759_gpio_set_irq_type,
+ .irq_bus_lock = max77759_gpio_bus_lock,
+ .irq_bus_sync_unlock = max77759_gpio_bus_sync_unlock,
+ .irq_print_chip = max77759_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static irqreturn_t max77759_gpio_irqhandler(int irq, void *data)
+{
+ struct max77759_gpio_chip *chip = data;
+ struct gpio_chip *gc = &chip->gc;
+ bool handled = false;
+
+ /* iterate until no interrupt is pending */
+ while (true) {
+ unsigned int uic_int1;
+ int ret;
+ unsigned long pending;
+ int offset;
+
+ ret = regmap_read(chip->map, MAX77759_MAXQ_REG_UIC_INT1,
+ &uic_int1);
+ if (ret < 0) {
+ dev_err_ratelimited(gc->parent,
+ "failed to read IRQ status: %d\n",
+ ret);
+ /*
+ * If !handled, we have looped not even once, which
+ * means we should return IRQ_NONE in that case (and
+ * of course IRQ_HANDLED otherwise).
+ */
+ return IRQ_RETVAL(handled);
+ }
+
+ pending = uic_int1;
+ pending &= (MAX77759_MAXQ_REG_UIC_INT1_GPIO6I
+ | MAX77759_MAXQ_REG_UIC_INT1_GPIO5I);
+ if (!pending)
+ break;
+
+ for_each_set_bit(offset, &pending, MAX77759_N_GPIOS) {
+ /*
+ * ACK interrupt by writing 1 to bit 'offset', all
+ * others need to be written as 0. This needs to be
+ * done unconditionally hence regmap_set_bits() is
+ * inappropriate here.
+ */
+ regmap_write(chip->map, MAX77759_MAXQ_REG_UIC_INT1,
+ BIT(offset));
+
+ handle_nested_irq(irq_find_mapping(gc->irq.domain,
+ offset));
+
+ handled = true;
+ }
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static int max77759_gpio_probe(struct platform_device *pdev)
+{
+ struct max77759_gpio_chip *chip;
+ int irq;
+ struct gpio_irq_chip *girq;
+ int ret;
+ unsigned long irq_flags;
+ struct irq_data *irqd;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->map = dev_get_regmap(pdev->dev.parent, "maxq");
+ if (!chip->map)
+ return dev_err_probe(&pdev->dev, -ENODEV, "Missing regmap\n");
+
+ irq = platform_get_irq_byname(pdev, "GPI");
+ if (irq < 0)
+ return dev_err_probe(&pdev->dev, irq, "Failed to get IRQ\n");
+
+ chip->max77759 = dev_get_drvdata(pdev->dev.parent);
+ ret = devm_mutex_init(&pdev->dev, &chip->maxq_lock);
+ if (ret)
+ return ret;
+ ret = devm_mutex_init(&pdev->dev, &chip->irq_lock);
+ if (ret)
+ return ret;
+
+ chip->gc.base = -1;
+ chip->gc.label = dev_name(&pdev->dev);
+ chip->gc.parent = &pdev->dev;
+ chip->gc.can_sleep = true;
+
+ chip->gc.names = max77759_gpio_line_names;
+ chip->gc.ngpio = MAX77759_N_GPIOS;
+ chip->gc.get_direction = max77759_gpio_get_direction;
+ chip->gc.direction_input = max77759_gpio_direction_input;
+ chip->gc.direction_output = max77759_gpio_direction_output;
+ chip->gc.get = max77759_gpio_get_value;
+ chip->gc.set_rv = max77759_gpio_set_value;
+
+ girq = &chip->gc.irq;
+ gpio_irq_chip_set_chip(girq, &max77759_gpio_irq_chip);
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ girq->threaded = true;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to add GPIO chip\n");
+
+ irq_flags = IRQF_ONESHOT | IRQF_SHARED;
+ irqd = irq_get_irq_data(irq);
+ if (irqd)
+ irq_flags |= irqd_get_trigger_type(irqd);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ max77759_gpio_irqhandler, irq_flags,
+ dev_name(&pdev->dev), chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to request IRQ\n");
+
+ return ret;
+}
+
+static const struct of_device_id max77759_gpio_of_id[] = {
+ { .compatible = "maxim,max77759-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77759_gpio_of_id);
+
+static const struct platform_device_id max77759_gpio_platform_id[] = {
+ { "max77759-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, max77759_gpio_platform_id);
+
+static struct platform_driver max77759_gpio_driver = {
+ .driver = {
+ .name = "max77759-gpio",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = max77759_gpio_of_id,
+ },
+ .probe = max77759_gpio_probe,
+ .id_table = max77759_gpio_platform_id,
+};
+
+module_platform_driver(max77759_gpio_driver);
+
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("GPIO driver for Maxim MAX77759");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 7ee891ef6905..5ee2991ecdfd 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -119,7 +119,7 @@ static int mb86s70_gpio_get(struct gpio_chip *gc, unsigned gpio)
return !!(readl(gchip->base + PDR(gpio)) & OFFSET(gpio));
}
-static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
+static int mb86s70_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct mb86s70_gpio_chip *gchip = gpiochip_get_data(gc);
unsigned long flags;
@@ -135,6 +135,8 @@ static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
writel(val, gchip->base + PDR(gpio));
spin_unlock_irqrestore(&gchip->lock, flags);
+
+ return 0;
}
static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
@@ -178,7 +180,7 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.request = mb86s70_gpio_request;
gchip->gc.free = mb86s70_gpio_free;
gchip->gc.get = mb86s70_gpio_get;
- gchip->gc.set = mb86s70_gpio_set;
+ gchip->gc.set_rv = mb86s70_gpio_set;
gchip->gc.to_irq = mb86s70_gpio_to_irq;
gchip->gc.label = dev_name(&pdev->dev);
gchip->gc.ngpio = 32;
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index 5fb357d7b78a..e68956104161 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -57,15 +57,18 @@ static int __mc33880_set(struct mc33880 *mc, unsigned offset, int value)
}
-static void mc33880_set(struct gpio_chip *chip, unsigned offset, int value)
+static int mc33880_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct mc33880 *mc = gpiochip_get_data(chip);
+ int ret;
mutex_lock(&mc->lock);
- __mc33880_set(mc, offset, value);
+ ret = __mc33880_set(mc, offset, value);
mutex_unlock(&mc->lock);
+
+ return ret;
}
static int mc33880_probe(struct spi_device *spi)
@@ -100,7 +103,7 @@ static int mc33880_probe(struct spi_device *spi)
mc->spi = spi;
mc->chip.label = DRIVER_NAME;
- mc->chip.set = mc33880_set;
+ mc->chip.set_rv = mc33880_set;
mc->chip.base = pdata->base;
mc->chip.ngpio = PIN_NUMBER;
mc->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 48e3768a830e..12cf36f9ca63 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -89,7 +89,7 @@ struct ioh_gpio {
static const int num_ports[] = {6, 12, 16, 16, 15, 16, 16, 12};
-static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+static int ioh_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
u32 reg_val;
struct ioh_gpio *chip = gpiochip_get_data(gpio);
@@ -104,6 +104,8 @@ static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
spin_unlock_irqrestore(&chip->spinlock, flags);
+
+ return 0;
}
static int ioh_gpio_get(struct gpio_chip *gpio, unsigned nr)
@@ -222,7 +224,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
gpio->direction_input = ioh_gpio_direction_input;
gpio->get = ioh_gpio_get;
gpio->direction_output = ioh_gpio_direction_output;
- gpio->set = ioh_gpio_set;
+ gpio->set_rv = ioh_gpio_set;
gpio->dbg_show = NULL;
gpio->base = -1;
gpio->ngpio = num_port;
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 0cd4c36ae8aa..121efdd71e45 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -123,9 +123,12 @@ static irqreturn_t mpc8xxx_gpio_irq_cascade(int irq, void *data)
static void mpc8xxx_irq_unmask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
+ gpiochip_enable_irq(gc, hwirq);
+
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
@@ -138,6 +141,7 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
static void mpc8xxx_irq_mask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
@@ -148,6 +152,8 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
& ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+
+ gpiochip_disable_irq(gc, hwirq);
}
static void mpc8xxx_irq_ack(struct irq_data *d)
@@ -244,6 +250,8 @@ static struct irq_chip mpc8xxx_irq_chip = {
.irq_ack = mpc8xxx_irq_ack,
/* this might get overwritten in mpc8xxx_probe() */
.irq_set_type = mpc8xxx_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
@@ -410,7 +418,9 @@ static int mpc8xxx_probe(struct platform_device *pdev)
goto err;
}
- device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
return 0;
err:
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 3604abcb6fec..57633a7b4270 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -408,9 +408,8 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
struct mvebu_gpio_chip *mvchip = gc->private;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
mvebu_gpio_write_edge_cause(mvchip, ~mask);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
@@ -420,10 +419,9 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv &= ~mask;
mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
@@ -433,11 +431,10 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
mvebu_gpio_write_edge_cause(mvchip, ~mask);
ct->mask_cache_priv |= mask;
mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_level_irq_mask(struct irq_data *d)
@@ -447,10 +444,9 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv &= ~mask;
mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
@@ -460,10 +456,9 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv |= mask;
mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
/*****************************************************************************
@@ -1242,7 +1237,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
return 0;
mvchip->domain =
- irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL);
+ irq_domain_create_linear(of_fwnode_handle(np), ngpios, &irq_generic_chip_ops, NULL);
if (!mvchip->domain) {
dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
mvchip->chip.label);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 619b6fb9d833..fae1a30f8ae6 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -490,7 +490,14 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->gc.request = mxc_gpio_request;
port->gc.free = mxc_gpio_free;
port->gc.to_irq = mxc_gpio_to_irq;
- port->gc.base = of_alias_get_id(np, "gpio") * 32;
+ /*
+ * Driver is DT-only, so a fixed base needs only be maintained for legacy
+ * userspace with sysfs interface.
+ */
+ if (IS_ENABLED(CONFIG_GPIO_SYSFS))
+ port->gc.base = of_alias_get_id(np, "gpio") * 32;
+ else /* silence boot time warning */
+ port->gc.base = -1;
err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port);
if (err)
@@ -502,7 +509,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
goto out_bgio;
}
- port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
+ port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0,
&irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 024ad077e98d..b418fbccb26c 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -303,8 +303,8 @@ static int mxs_gpio_probe(struct platform_device *pdev)
goto out_iounmap;
}
- port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
goto out_iounmap;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 442435ded020..b852e4997629 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -215,6 +215,8 @@ struct pca953x_chip {
DECLARE_BITMAP(irq_stat, MAX_LINE);
DECLARE_BITMAP(irq_trig_raise, MAX_LINE);
DECLARE_BITMAP(irq_trig_fall, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_level_high, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_level_low, MAX_LINE);
#endif
atomic_t wakeup_path;
@@ -774,6 +776,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio);
+ bitmap_or(irq_mask, irq_mask, chip->irq_trig_level_high, gc->ngpio);
+ bitmap_or(irq_mask, irq_mask, chip->irq_trig_level_low, gc->ngpio);
bitmap_complement(reg_direction, reg_direction, gc->ngpio);
bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio);
@@ -791,13 +795,15 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
struct device *dev = &chip->client->dev;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (!(type & IRQ_TYPE_EDGE_BOTH)) {
+ if (!(type & IRQ_TYPE_SENSE_MASK)) {
dev_err(dev, "irq %d: unsupported type %d\n", d->irq, type);
return -EINVAL;
}
assign_bit(hwirq, chip->irq_trig_fall, type & IRQ_TYPE_EDGE_FALLING);
assign_bit(hwirq, chip->irq_trig_raise, type & IRQ_TYPE_EDGE_RISING);
+ assign_bit(hwirq, chip->irq_trig_level_low, type & IRQ_TYPE_LEVEL_LOW);
+ assign_bit(hwirq, chip->irq_trig_level_high, type & IRQ_TYPE_LEVEL_HIGH);
return 0;
}
@@ -810,6 +816,8 @@ static void pca953x_irq_shutdown(struct irq_data *d)
clear_bit(hwirq, chip->irq_trig_raise);
clear_bit(hwirq, chip->irq_trig_fall);
+ clear_bit(hwirq, chip->irq_trig_level_low);
+ clear_bit(hwirq, chip->irq_trig_level_high);
}
static void pca953x_irq_print_chip(struct irq_data *data, struct seq_file *p)
@@ -840,6 +848,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
DECLARE_BITMAP(cur_stat, MAX_LINE);
DECLARE_BITMAP(new_stat, MAX_LINE);
DECLARE_BITMAP(trigger, MAX_LINE);
+ DECLARE_BITMAP(edges, MAX_LINE);
int ret;
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
@@ -857,13 +866,26 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
- if (bitmap_empty(trigger, gc->ngpio))
- return false;
+ if (bitmap_empty(chip->irq_trig_level_high, gc->ngpio) &&
+ bitmap_empty(chip->irq_trig_level_low, gc->ngpio)) {
+ if (bitmap_empty(trigger, gc->ngpio))
+ return false;
+ }
bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio);
bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
- bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio);
- bitmap_and(pending, new_stat, trigger, gc->ngpio);
+ bitmap_or(edges, old_stat, cur_stat, gc->ngpio);
+ bitmap_and(pending, edges, trigger, gc->ngpio);
+
+ bitmap_and(cur_stat, new_stat, chip->irq_trig_level_high, gc->ngpio);
+ bitmap_and(cur_stat, cur_stat, chip->irq_mask, gc->ngpio);
+ bitmap_or(pending, pending, cur_stat, gc->ngpio);
+
+ bitmap_complement(cur_stat, new_stat, gc->ngpio);
+ bitmap_and(cur_stat, cur_stat, reg_direction, gc->ngpio);
+ bitmap_and(old_stat, cur_stat, chip->irq_trig_level_low, gc->ngpio);
+ bitmap_and(old_stat, old_stat, chip->irq_mask, gc->ngpio);
+ bitmap_or(pending, pending, old_stat, gc->ngpio);
return !bitmap_empty(pending, gc->ngpio);
}
@@ -1204,6 +1226,8 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
guard(mutex)(&chip->i2c_lock);
+ if (chip->client->irq > 0)
+ enable_irq(chip->client->irq);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(chip);
@@ -1216,6 +1240,10 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
static void pca953x_save_context(struct pca953x_chip *chip)
{
guard(mutex)(&chip->i2c_lock);
+
+ /* Disable IRQ to prevent early triggering while regmap "cache only" is on */
+ if (chip->client->irq > 0)
+ disable_irq(chip->client->irq);
regcache_cache_only(chip->regmap, true);
}
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 91cea97255fa..aead35ea090e 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -497,6 +497,8 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
gfer = readl_relaxed(base + GFER_OFFSET) & ~GPIO_bit(gpio);
writel_relaxed(grer, base + GRER_OFFSET);
writel_relaxed(gfer, base + GFER_OFFSET);
+
+ gpiochip_disable_irq(&pchip->chip, gpio);
}
static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
@@ -516,17 +518,21 @@ static void pxa_unmask_muxed_gpio(struct irq_data *d)
unsigned int gpio = irqd_to_hwirq(d);
struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio);
+ gpiochip_enable_irq(&pchip->chip, gpio);
+
c->irq_mask |= GPIO_bit(gpio);
update_edge_detect(c);
}
-static struct irq_chip pxa_muxed_gpio_chip = {
+static const struct irq_chip pxa_muxed_gpio_chip = {
.name = "GPIO",
.irq_ack = pxa_ack_muxed_gpio,
.irq_mask = pxa_mask_muxed_gpio,
.irq_unmask = pxa_unmask_muxed_gpio,
.irq_set_type = pxa_gpio_irq_type,
.irq_set_wake = pxa_gpio_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int pxa_gpio_nums(struct platform_device *pdev)
@@ -636,9 +642,9 @@ static int pxa_gpio_probe(struct platform_device *pdev)
if (!pxa_last_gpio)
return -EINVAL;
- pchip->irqdomain = irq_domain_add_legacy(pdev->dev.of_node,
- pxa_last_gpio + 1, irq_base,
- 0, &pxa_irq_domain_ops, pchip);
+ pchip->irqdomain = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node),
+ pxa_last_gpio + 1, irq_base, 0,
+ &pxa_irq_domain_ops, pchip);
if (!pchip->irqdomain)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 01a3b3dac58b..c63352f2f1ec 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -521,7 +521,7 @@ static int rockchip_interrupts_register(struct rockchip_pin_bank *bank)
struct irq_chip_generic *gc;
int ret;
- bank->domain = irq_domain_add_linear(bank->of_node, 32,
+ bank->domain = irq_domain_create_linear(of_fwnode_handle(bank->of_node), 32,
&irq_generic_chip_ops, NULL);
if (!bank->domain) {
dev_warn(bank->dev, "could not init irq domain for bank %s\n",
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 242dad763ac4..3f3ee36bc3cb 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -319,7 +319,7 @@ void __init sa1100_init_gpio(void)
gpiochip_add_data(&sa1100_gpio_chip.chip, NULL);
- sa1100_gpio_irqdomain = irq_domain_add_simple(NULL,
+ sa1100_gpio_irqdomain = irq_domain_create_simple(NULL,
28, IRQ_GPIO0,
&sa1100_gpio_irqdomain_ops, sgc);
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index c2a2c76c1652..6a3c4c625138 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -169,7 +169,7 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST,
IRQ_LEVEL | IRQ_NOPROBE);
- sd->id = irq_domain_add_legacy(pdev->dev.of_node, SDV_NUM_PUB_GPIOS,
+ sd->id = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), SDV_NUM_PUB_GPIOS,
sd->irq_base, 0, &irq_domain_sdv_ops, sd);
if (!sd->id)
return -ENODEV;
diff --git a/drivers/gpio/gpio-spacemit-k1.c b/drivers/gpio/gpio-spacemit-k1.c
new file mode 100644
index 000000000000..f027066365ff
--- /dev/null
+++ b/drivers/gpio/gpio-spacemit-k1.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (C) 2025 Yixun Lan <dlan@gentoo.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+/* register offset */
+#define SPACEMIT_GPLR 0x00 /* port level - R */
+#define SPACEMIT_GPDR 0x0c /* port direction - R/W */
+#define SPACEMIT_GPSR 0x18 /* port set - W */
+#define SPACEMIT_GPCR 0x24 /* port clear - W */
+#define SPACEMIT_GRER 0x30 /* port rising edge R/W */
+#define SPACEMIT_GFER 0x3c /* port falling edge R/W */
+#define SPACEMIT_GEDR 0x48 /* edge detect status - R/W1C */
+#define SPACEMIT_GSDR 0x54 /* (set) direction - W */
+#define SPACEMIT_GCDR 0x60 /* (clear) direction - W */
+#define SPACEMIT_GSRER 0x6c /* (set) rising edge detect enable - W */
+#define SPACEMIT_GCRER 0x78 /* (clear) rising edge detect enable - W */
+#define SPACEMIT_GSFER 0x84 /* (set) falling edge detect enable - W */
+#define SPACEMIT_GCFER 0x90 /* (clear) falling edge detect enable - W */
+#define SPACEMIT_GAPMASK 0x9c /* interrupt mask , 0 disable, 1 enable - R/W */
+
+#define SPACEMIT_NR_BANKS 4
+#define SPACEMIT_NR_GPIOS_PER_BANK 32
+
+#define to_spacemit_gpio_bank(x) container_of((x), struct spacemit_gpio_bank, gc)
+
+struct spacemit_gpio;
+
+struct spacemit_gpio_bank {
+ struct gpio_chip gc;
+ struct spacemit_gpio *sg;
+ void __iomem *base;
+ u32 irq_mask;
+ u32 irq_rising_edge;
+ u32 irq_falling_edge;
+};
+
+struct spacemit_gpio {
+ struct device *dev;
+ struct spacemit_gpio_bank sgb[SPACEMIT_NR_BANKS];
+};
+
+static u32 spacemit_gpio_bank_index(struct spacemit_gpio_bank *gb)
+{
+ return (u32)(gb - gb->sg->sgb);
+}
+
+static irqreturn_t spacemit_gpio_irq_handler(int irq, void *dev_id)
+{
+ struct spacemit_gpio_bank *gb = dev_id;
+ unsigned long pending;
+ u32 n, gedr;
+
+ gedr = readl(gb->base + SPACEMIT_GEDR);
+ if (!gedr)
+ return IRQ_NONE;
+ writel(gedr, gb->base + SPACEMIT_GEDR);
+
+ pending = gedr & gb->irq_mask;
+ if (!pending)
+ return IRQ_NONE;
+
+ for_each_set_bit(n, &pending, BITS_PER_LONG)
+ handle_nested_irq(irq_find_mapping(gb->gc.irq.domain, n));
+
+ return IRQ_HANDLED;
+}
+
+static void spacemit_gpio_irq_ack(struct irq_data *d)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(irqd_to_hwirq(d)), gb->base + SPACEMIT_GEDR);
+}
+
+static void spacemit_gpio_irq_mask(struct irq_data *d)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+ u32 bit = BIT(irqd_to_hwirq(d));
+
+ gb->irq_mask &= ~bit;
+ writel(gb->irq_mask, gb->base + SPACEMIT_GAPMASK);
+
+ if (bit & gb->irq_rising_edge)
+ writel(bit, gb->base + SPACEMIT_GCRER);
+
+ if (bit & gb->irq_falling_edge)
+ writel(bit, gb->base + SPACEMIT_GCFER);
+}
+
+static void spacemit_gpio_irq_unmask(struct irq_data *d)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+ u32 bit = BIT(irqd_to_hwirq(d));
+
+ gb->irq_mask |= bit;
+
+ if (bit & gb->irq_rising_edge)
+ writel(bit, gb->base + SPACEMIT_GSRER);
+
+ if (bit & gb->irq_falling_edge)
+ writel(bit, gb->base + SPACEMIT_GSFER);
+
+ writel(gb->irq_mask, gb->base + SPACEMIT_GAPMASK);
+}
+
+static int spacemit_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+ u32 bit = BIT(irqd_to_hwirq(d));
+
+ if (type & IRQ_TYPE_EDGE_RISING) {
+ gb->irq_rising_edge |= bit;
+ writel(bit, gb->base + SPACEMIT_GSRER);
+ } else {
+ gb->irq_rising_edge &= ~bit;
+ writel(bit, gb->base + SPACEMIT_GCRER);
+ }
+
+ if (type & IRQ_TYPE_EDGE_FALLING) {
+ gb->irq_falling_edge |= bit;
+ writel(bit, gb->base + SPACEMIT_GSFER);
+ } else {
+ gb->irq_falling_edge &= ~bit;
+ writel(bit, gb->base + SPACEMIT_GCFER);
+ }
+
+ return 0;
+}
+
+static void spacemit_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(data);
+
+ seq_printf(p, "%s-%d", dev_name(gb->gc.parent), spacemit_gpio_bank_index(gb));
+}
+
+static struct irq_chip spacemit_gpio_chip = {
+ .name = "k1-gpio-irqchip",
+ .irq_ack = spacemit_gpio_irq_ack,
+ .irq_mask = spacemit_gpio_irq_mask,
+ .irq_unmask = spacemit_gpio_irq_unmask,
+ .irq_set_type = spacemit_gpio_irq_set_type,
+ .irq_print_chip = spacemit_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SKIP_SET_WAKE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static bool spacemit_of_node_instance_match(struct gpio_chip *gc, unsigned int i)
+{
+ struct spacemit_gpio_bank *gb = gpiochip_get_data(gc);
+ struct spacemit_gpio *sg = gb->sg;
+
+ if (i >= SPACEMIT_NR_BANKS)
+ return false;
+
+ return (gc == &sg->sgb[i].gc);
+}
+
+static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
+ void __iomem *regs,
+ int index, int irq)
+{
+ struct spacemit_gpio_bank *gb = &sg->sgb[index];
+ struct gpio_chip *gc = &gb->gc;
+ struct device *dev = sg->dev;
+ struct gpio_irq_chip *girq;
+ void __iomem *dat, *set, *clr, *dirin, *dirout;
+ int ret, bank_base[] = { 0x0, 0x4, 0x8, 0x100 };
+
+ gb->base = regs + bank_base[index];
+
+ dat = gb->base + SPACEMIT_GPLR;
+ set = gb->base + SPACEMIT_GPSR;
+ clr = gb->base + SPACEMIT_GPCR;
+ dirin = gb->base + SPACEMIT_GCDR;
+ dirout = gb->base + SPACEMIT_GSDR;
+
+ /* This registers 32 GPIO lines per bank */
+ ret = bgpio_init(gc, dev, 4, dat, set, clr, dirout, dirin,
+ BGPIOF_UNREADABLE_REG_SET | BGPIOF_UNREADABLE_REG_DIR);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init gpio chip\n");
+
+ gb->sg = sg;
+
+ gc->label = dev_name(dev);
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
+ gc->ngpio = SPACEMIT_NR_GPIOS_PER_BANK;
+ gc->base = -1;
+ gc->of_gpio_n_cells = 3;
+ gc->of_node_instance_match = spacemit_of_node_instance_match;
+
+ girq = &gc->irq;
+ girq->threaded = true;
+ girq->handler = handle_simple_irq;
+
+ gpio_irq_chip_set_chip(girq, &spacemit_gpio_chip);
+
+ /* Disable Interrupt */
+ writel(0, gb->base + SPACEMIT_GAPMASK);
+ /* Disable Edge Detection Settings */
+ writel(0x0, gb->base + SPACEMIT_GRER);
+ writel(0x0, gb->base + SPACEMIT_GFER);
+ /* Clear Interrupt */
+ writel(0xffffffff, gb->base + SPACEMIT_GCRER);
+ writel(0xffffffff, gb->base + SPACEMIT_GCFER);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ spacemit_gpio_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ gb->gc.label, gb);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to register IRQ\n");
+
+ ret = devm_gpiochip_add_data(dev, gc, gb);
+ if (ret)
+ return ret;
+
+ /* Distuingish IRQ domain, for selecting threecells mode */
+ irq_domain_update_bus_token(girq->domain, DOMAIN_BUS_WIRED);
+
+ return 0;
+}
+
+static int spacemit_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spacemit_gpio *sg;
+ struct clk *core_clk, *bus_clk;
+ void __iomem *regs;
+ int i, irq, ret;
+
+ sg = devm_kzalloc(dev, sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return -ENOMEM;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ sg->dev = dev;
+
+ core_clk = devm_clk_get_enabled(dev, "core");
+ if (IS_ERR(core_clk))
+ return dev_err_probe(dev, PTR_ERR(core_clk), "failed to get clock\n");
+
+ bus_clk = devm_clk_get_enabled(dev, "bus");
+ if (IS_ERR(bus_clk))
+ return dev_err_probe(dev, PTR_ERR(bus_clk), "failed to get bus clock\n");
+
+ for (i = 0; i < SPACEMIT_NR_BANKS; i++) {
+ ret = spacemit_gpio_add_bank(sg, regs, i, irq);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id spacemit_gpio_dt_ids[] = {
+ { .compatible = "spacemit,k1-gpio" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver spacemit_gpio_driver = {
+ .probe = spacemit_gpio_probe,
+ .driver = {
+ .name = "k1-gpio",
+ .of_match_table = spacemit_gpio_dt_ids,
+ },
+};
+module_platform_driver(spacemit_gpio_driver);
+
+MODULE_AUTHOR("Yixun Lan <dlan@gentoo.org>");
+MODULE_DESCRIPTION("GPIO driver for SpacemiT K1 SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index b6335cde455f..8cf676fd0a0b 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -183,7 +183,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (ret != 0)
return ret;
- tb10x_gpio->domain = irq_domain_add_linear(np,
+ tb10x_gpio->domain = irq_domain_create_linear(of_fwnode_handle(np),
tb10x_gpio->gc.ngpio,
&irq_generic_chip_ops, NULL);
if (!tb10x_gpio->domain) {
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 6895b65c86af..d27bfac6c9f5 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -823,6 +823,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *irq;
struct tegra_gpio *gpio;
struct device_node *np;
+ struct resource *res;
char **names;
int err;
@@ -842,19 +843,19 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->num_banks++;
/* get register apertures */
- gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security");
- if (IS_ERR(gpio->secure)) {
- gpio->secure = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(gpio->secure))
- return PTR_ERR(gpio->secure);
- }
-
- gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio");
- if (IS_ERR(gpio->base)) {
- gpio->base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(gpio->base))
- return PTR_ERR(gpio->base);
- }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "security");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gpio->secure = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpio->secure))
+ return PTR_ERR(gpio->secure);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
err = platform_irq_count(pdev);
if (err < 0)
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index fad979797486..cb303a26f4d3 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -103,20 +103,26 @@ static void timbgpio_irq_disable(struct irq_data *d)
{
struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
int offset = d->irq - tgpio->irq_base;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
spin_lock_irqsave(&tgpio->lock, flags);
tgpio->last_ier &= ~(1UL << offset);
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
spin_unlock_irqrestore(&tgpio->lock, flags);
+
+ gpiochip_disable_irq(&tgpio->gpio, hwirq);
}
static void timbgpio_irq_enable(struct irq_data *d)
{
struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
int offset = d->irq - tgpio->irq_base;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
+ gpiochip_enable_irq(&tgpio->gpio, hwirq);
+
spin_lock_irqsave(&tgpio->lock, flags);
tgpio->last_ier |= 1UL << offset;
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
@@ -205,11 +211,13 @@ static void timbgpio_irq(struct irq_desc *desc)
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
}
-static struct irq_chip timbgpio_irqchip = {
+static const struct irq_chip timbgpio_irqchip = {
.name = "GPIO",
.irq_enable = timbgpio_irq_enable,
.irq_disable = timbgpio_irq_disable,
.irq_set_type = timbgpio_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int timbgpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index bcd692229c7c..0d17985a5fdc 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -502,7 +502,6 @@ static void gpio_twl4030_power_off_action(void *data)
static int gpio_twl4030_probe(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata;
- struct device_node *node = pdev->dev.of_node;
struct gpio_twl4030_priv *priv;
int ret, irq_base;
@@ -524,8 +523,8 @@ static int gpio_twl4030_probe(struct platform_device *pdev)
return irq_base;
}
- irq_domain_add_legacy(node, TWL4030_GPIO_MAX, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), TWL4030_GPIO_MAX, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
ret = twl4030_sih_setup(&pdev->dev, TWL4030_MODULE_GPIO, irq_base);
if (ret < 0)
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 4dad7ce0c4dc..7de0d5b53d56 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -345,4 +345,6 @@ static struct platform_driver vf610_gpio_driver = {
.probe = vf610_gpio_probe,
};
-builtin_platform_driver(vf610_gpio_driver);
+module_platform_driver(vf610_gpio_driver);
+MODULE_DESCRIPTION("VF610 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index 13407fd4f0eb..eab6726953b4 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -401,10 +401,15 @@ static ssize_t gpio_virtuser_direction_do_write(struct file *file,
char buf[32], *trimmed;
int ret, dir, val = 0;
- ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
if (ret < 0)
return ret;
+ buf[ret] = '\0';
+
trimmed = strim(buf);
if (strcmp(trimmed, "input") == 0) {
@@ -623,12 +628,15 @@ static ssize_t gpio_virtuser_consumer_write(struct file *file,
char buf[GPIO_VIRTUSER_NAME_BUF_LEN + 2];
int ret;
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
ret = simple_write_to_buffer(buf, GPIO_VIRTUSER_NAME_BUF_LEN, ppos,
user_buf, count);
if (ret < 0)
return ret;
- buf[strlen(buf) - 1] = '\0';
+ buf[ret] = '\0';
ret = gpiod_set_consumer_name(data->ad.desc, buf);
if (ret)
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 48b829733b15..b51b1fa726bb 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -103,12 +103,32 @@ static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
-static struct irq_chip xgene_gpio_sb_irq_chip = {
+static void xgene_gpio_sb_irq_mask(struct irq_data *d)
+{
+ struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d);
+
+ irq_chip_mask_parent(d);
+
+ gpiochip_disable_irq(&priv->gc, d->hwirq);
+}
+
+static void xgene_gpio_sb_irq_unmask(struct irq_data *d)
+{
+ struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d);
+
+ gpiochip_enable_irq(&priv->gc, d->hwirq);
+
+ irq_chip_unmask_parent(d);
+}
+
+static const struct irq_chip xgene_gpio_sb_irq_chip = {
.name = "sbgpio",
.irq_eoi = irq_chip_eoi_parent,
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
+ .irq_mask = xgene_gpio_sb_irq_mask,
+ .irq_unmask = xgene_gpio_sb_irq_unmask,
.irq_set_type = xgene_gpio_sb_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index be81fa2b17ab..3dae63f3ea21 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -1011,6 +1011,7 @@ static void zynq_gpio_remove(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n");
+ device_init_wakeup(&pdev->dev, 0);
gpiochip_remove(&gpio->chip);
device_set_wakeup_capable(&pdev->dev, 0);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi-core.c
index 69caa35c58df..12b24a717e43 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -23,29 +23,6 @@
#include "gpiolib.h"
#include "gpiolib-acpi.h"
-static int run_edge_events_on_boot = -1;
-module_param(run_edge_events_on_boot, int, 0444);
-MODULE_PARM_DESC(run_edge_events_on_boot,
- "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
-
-static char *ignore_wake;
-module_param(ignore_wake, charp, 0444);
-MODULE_PARM_DESC(ignore_wake,
- "controller@pin combos on which to ignore the ACPI wake flag "
- "ignore_wake=controller@pin[,controller@pin[,...]]");
-
-static char *ignore_interrupt;
-module_param(ignore_interrupt, charp, 0444);
-MODULE_PARM_DESC(ignore_interrupt,
- "controller@pin combos on which to ignore interrupt "
- "ignore_interrupt=controller@pin[,controller@pin[,...]]");
-
-struct acpi_gpiolib_dmi_quirk {
- bool no_edge_events_on_boot;
- char *ignore_wake;
- char *ignore_interrupt;
-};
-
/**
* struct acpi_gpio_event - ACPI GPIO event handler data
*
@@ -96,10 +73,10 @@ struct acpi_gpio_chip {
* @adev: reference to ACPI device which consumes GPIO resource
* @flags: GPIO initialization flags
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
+ * @wake_capable: wake capability as provided by ACPI
* @pin_config: pin bias as provided by ACPI
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
- * @wake_capable: wake capability as provided by ACPI
* @debounce: debounce timeout as provided by ACPI
* @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
*/
@@ -107,25 +84,14 @@ struct acpi_gpio_info {
struct acpi_device *adev;
enum gpiod_flags flags;
bool gpioint;
+ bool wake_capable;
int pin_config;
int polarity;
int triggering;
- bool wake_capable;
unsigned int debounce;
unsigned int quirks;
};
-/*
- * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init
- * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
- * late_initcall_sync() handler, so that other builtin drivers can register their
- * OpRegions before the event handlers can run. This list contains GPIO chips
- * for which the acpi_gpiochip_request_irqs() call has been deferred.
- */
-static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
-static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
-static bool acpi_gpio_deferred_req_irqs_done;
-
static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data)
{
/* First check the actual GPIO device */
@@ -268,7 +234,7 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
event->irq_requested = true;
/* Make sure we trigger the initial state of edge-triggered IRQs */
- if (run_edge_events_on_boot &&
+ if (acpi_gpio_need_run_edge_events_on_boot() &&
(event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
value = gpiod_get_raw_value_cansleep(event->desc);
if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
@@ -350,42 +316,6 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
return desc;
}
-static bool acpi_gpio_in_ignore_list(const char *ignore_list, const char *controller_in,
- unsigned int pin_in)
-{
- const char *controller, *pin_str;
- unsigned int pin;
- char *endp;
- int len;
-
- controller = ignore_list;
- while (controller) {
- pin_str = strchr(controller, '@');
- if (!pin_str)
- goto err;
-
- len = pin_str - controller;
- if (len == strlen(controller_in) &&
- strncmp(controller, controller_in, len) == 0) {
- pin = simple_strtoul(pin_str + 1, &endp, 10);
- if (*endp != 0 && *endp != ',')
- goto err;
-
- if (pin == pin_in)
- return true;
- }
-
- controller = strchr(controller, ',');
- if (controller)
- controller++;
- }
-
- return false;
-err:
- pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list);
- return false;
-}
-
static bool acpi_gpio_irq_is_wake(struct device *parent,
const struct acpi_resource_gpio *agpio)
{
@@ -394,7 +324,7 @@ static bool acpi_gpio_irq_is_wake(struct device *parent,
if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
return false;
- if (acpi_gpio_in_ignore_list(ignore_wake, dev_name(parent), pin)) {
+ if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_WAKE, dev_name(parent), pin)) {
dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
return false;
}
@@ -437,7 +367,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
if (!handler)
return AE_OK;
- if (acpi_gpio_in_ignore_list(ignore_interrupt, dev_name(chip->parent), pin)) {
+ if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_INTERRUPT, dev_name(chip->parent), pin)) {
dev_info(chip->parent, "Ignoring interrupt on pin %u\n", pin);
return AE_OK;
}
@@ -525,7 +455,6 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
struct acpi_gpio_chip *acpi_gpio;
acpi_handle handle;
acpi_status status;
- bool defer;
if (!chip->parent || !chip->to_irq)
return;
@@ -544,14 +473,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
acpi_walk_resources(handle, METHOD_NAME__AEI,
acpi_gpiochip_alloc_event, acpi_gpio);
- mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
- defer = !acpi_gpio_deferred_req_irqs_done;
- if (defer)
- list_add(&acpi_gpio->deferred_req_irqs_list_entry,
- &acpi_gpio_deferred_req_irqs_list);
- mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
-
- if (defer)
+ if (acpi_gpio_add_to_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry))
return;
acpi_gpiochip_request_irqs(acpi_gpio);
@@ -583,10 +505,7 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
if (ACPI_FAILURE(status))
return;
- mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
- if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
- list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
- mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+ acpi_gpio_remove_from_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry);
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
if (event->irq_requested) {
@@ -604,6 +523,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
}
EXPORT_SYMBOL_GPL(acpi_gpiochip_free_interrupts);
+void __init acpi_gpio_process_deferred_list(struct list_head *list)
+{
+ struct acpi_gpio_chip *acpi_gpio, *tmp;
+
+ list_for_each_entry_safe(acpi_gpio, tmp, list, deferred_req_irqs_list_entry)
+ acpi_gpiochip_request_irqs(acpi_gpio);
+}
+
int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios)
{
@@ -653,12 +580,12 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
for (gm = adev->driver_gpios; gm->name; gm++)
if (!strcmp(name, gm->name) && gm->data && index < gm->size) {
- const struct acpi_gpio_params *par = gm->data + index;
+ const struct acpi_gpio_params *params = gm->data + index;
args->fwnode = acpi_fwnode_handle(adev);
- args->args[0] = par->crs_entry_index;
- args->args[1] = par->line_index;
- args->args[2] = par->active_low;
+ args->args[0] = params->crs_entry_index;
+ args->args[1] = params->line_index;
+ args->args[2] = params->active_low;
args->nargs = 3;
*quirks = gm->quirks;
@@ -743,10 +670,8 @@ static int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
}
struct acpi_gpio_lookup {
- struct acpi_gpio_info info;
- int index;
- u16 pin_index;
- bool active_low;
+ struct acpi_gpio_params params;
+ struct acpi_gpio_info *info;
struct gpio_desc *desc;
int n;
};
@@ -754,6 +679,8 @@ struct acpi_gpio_lookup {
static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
{
struct acpi_gpio_lookup *lookup = data;
+ struct acpi_gpio_params *params = &lookup->params;
+ struct acpi_gpio_info *info = lookup->info;
if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
return 1;
@@ -764,26 +691,26 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
struct gpio_desc *desc;
u16 pin_index;
- if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
- lookup->index++;
+ if (info->quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
+ params->crs_entry_index++;
- if (lookup->n++ != lookup->index)
+ if (lookup->n++ != params->crs_entry_index)
return 1;
- pin_index = lookup->pin_index;
+ pin_index = params->line_index;
if (pin_index >= agpio->pin_table_length)
return 1;
- if (lookup->info.quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER)
+ if (info->quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER)
desc = gpio_to_desc(agpio->pin_table[pin_index]);
else
desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
agpio->pin_table[pin_index]);
lookup->desc = desc;
- lookup->info.pin_config = agpio->pin_config;
- lookup->info.debounce = agpio->debounce_timeout;
- lookup->info.gpioint = gpioint;
- lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio);
+ info->pin_config = agpio->pin_config;
+ info->debounce = agpio->debounce_timeout;
+ info->gpioint = gpioint;
+ info->wake_capable = acpi_gpio_irq_is_wake(&info->adev->dev, agpio);
/*
* Polarity and triggering are only specified for GpioInt
@@ -792,23 +719,23 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
* - ACPI_ACTIVE_LOW == GPIO_ACTIVE_LOW
* - ACPI_ACTIVE_HIGH == GPIO_ACTIVE_HIGH
*/
- if (lookup->info.gpioint) {
- lookup->info.polarity = agpio->polarity;
- lookup->info.triggering = agpio->triggering;
+ if (info->gpioint) {
+ info->polarity = agpio->polarity;
+ info->triggering = agpio->triggering;
} else {
- lookup->info.polarity = lookup->active_low;
+ info->polarity = params->active_low;
}
- lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio, lookup->info.polarity);
+ info->flags = acpi_gpio_to_gpiod_flags(agpio, info->polarity);
}
return 1;
}
-static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
- struct acpi_gpio_info *info)
+static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup)
{
- struct acpi_device *adev = lookup->info.adev;
+ struct acpi_gpio_info *info = lookup->info;
+ struct acpi_device *adev = info->adev;
struct list_head res_list;
int ret;
@@ -825,22 +752,22 @@ static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
if (!lookup->desc)
return -ENOENT;
- if (info)
- *info = lookup->info;
return 0;
}
-static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
- const char *propname, int index,
+static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode, const char *propname,
struct acpi_gpio_lookup *lookup)
{
struct fwnode_reference_args args;
+ struct acpi_gpio_params *params = &lookup->params;
+ struct acpi_gpio_info *info = lookup->info;
+ unsigned int index = params->crs_entry_index;
unsigned int quirks = 0;
int ret;
memset(&args, 0, sizeof(args));
- ret = __acpi_node_get_property_reference(fwnode, propname, index, 3,
- &args);
+
+ ret = __acpi_node_get_property_reference(fwnode, propname, index, 3, &args);
if (ret) {
struct acpi_device *adev;
@@ -857,12 +784,12 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
if (args.nargs != 3)
return -EPROTO;
- lookup->index = args.args[0];
- lookup->pin_index = args.args[1];
- lookup->active_low = !!args.args[2];
+ params->crs_entry_index = args.args[0];
+ params->line_index = args.args[1];
+ params->active_low = !!args.args[2];
- lookup->info.adev = to_acpi_device_node(args.fwnode);
- lookup->info.quirks = quirks;
+ info->adev = to_acpi_device_node(args.fwnode);
+ info->quirks = quirks;
return 0;
}
@@ -871,96 +798,83 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
* acpi_get_gpiod_by_index() - get a GPIO descriptor from device resources
* @adev: pointer to a ACPI device to get GPIO from
* @propname: Property name of the GPIO (optional)
- * @index: index of GpioIo/GpioInt resource (starting from %0)
- * @info: info pointer to fill in (optional)
+ * @lookup: pointer to struct acpi_gpio_lookup to fill in
*
- * Function goes through ACPI resources for @adev and based on @index looks
+ * Function goes through ACPI resources for @adev and based on @lookup.index looks
* up a GpioIo/GpioInt resource, translates it to the Linux GPIO descriptor,
- * and returns it. @index matches GpioIo/GpioInt resources only so if there
- * are total %3 GPIO resources, the index goes from %0 to %2.
+ * and returns it. @lookup.index matches GpioIo/GpioInt resources only so if there
+ * are total 3 GPIO resources, the index goes from 0 to 2.
*
* If @propname is specified the GPIO is looked using device property. In
* that case @index is used to select the GPIO entry in the property value
* (in case of multiple).
*
* Returns:
- * GPIO descriptor to use with Linux generic GPIO API.
- * If the GPIO cannot be translated or there is an error an ERR_PTR is
- * returned.
+ * 0 on success, negative errno on failure.
+ *
+ * The @lookup is filled with GPIO descriptor to use with Linux generic GPIO API.
+ * If the GPIO cannot be translated an error will be returned.
*
* Note: if the GPIO resource has multiple entries in the pin list, this
* function only returns the first.
*/
-static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
- const char *propname,
- int index,
- struct acpi_gpio_info *info)
+static int acpi_get_gpiod_by_index(struct acpi_device *adev, const char *propname,
+ struct acpi_gpio_lookup *lookup)
{
- struct acpi_gpio_lookup lookup;
+ struct acpi_gpio_params *params = &lookup->params;
+ struct acpi_gpio_info *info = lookup->info;
int ret;
- memset(&lookup, 0, sizeof(lookup));
- lookup.index = index;
-
if (propname) {
dev_dbg(&adev->dev, "GPIO: looking up %s\n", propname);
- ret = acpi_gpio_property_lookup(acpi_fwnode_handle(adev),
- propname, index, &lookup);
+ ret = acpi_gpio_property_lookup(acpi_fwnode_handle(adev), propname, lookup);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %u %u\n",
- dev_name(&lookup.info.adev->dev), lookup.index,
- lookup.pin_index, lookup.active_low);
+ dev_dbg(&adev->dev, "GPIO: _DSD returned %s %u %u %u\n",
+ dev_name(&info->adev->dev),
+ params->crs_entry_index, params->line_index, params->active_low);
} else {
- dev_dbg(&adev->dev, "GPIO: looking up %d in _CRS\n", index);
- lookup.info.adev = adev;
+ dev_dbg(&adev->dev, "GPIO: looking up %u in _CRS\n", params->crs_entry_index);
+ info->adev = adev;
}
- ret = acpi_gpio_resource_lookup(&lookup, info);
- return ret ? ERR_PTR(ret) : lookup.desc;
+ return acpi_gpio_resource_lookup(lookup);
}
/**
* acpi_get_gpiod_from_data() - get a GPIO descriptor from ACPI data node
* @fwnode: pointer to an ACPI firmware node to get the GPIO information from
* @propname: Property name of the GPIO
- * @index: index of GpioIo/GpioInt resource (starting from %0)
- * @info: info pointer to fill in (optional)
+ * @lookup: pointer to struct acpi_gpio_lookup to fill in
*
* This function uses the property-based GPIO lookup to get to the GPIO
* resource with the relevant information from a data-only ACPI firmware node
* and uses that to obtain the GPIO descriptor to return.
*
* Returns:
- * GPIO descriptor to use with Linux generic GPIO API.
- * If the GPIO cannot be translated or there is an error an ERR_PTR is
- * returned.
+ * 0 on success, negative errno on failure.
+ *
+ * The @lookup is filled with GPIO descriptor to use with Linux generic GPIO API.
+ * If the GPIO cannot be translated an error will be returned.
*/
-static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode,
- const char *propname,
- int index,
- struct acpi_gpio_info *info)
+static int acpi_get_gpiod_from_data(struct fwnode_handle *fwnode, const char *propname,
+ struct acpi_gpio_lookup *lookup)
{
- struct acpi_gpio_lookup lookup;
int ret;
if (!is_acpi_data_node(fwnode))
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
if (!propname)
- return ERR_PTR(-EINVAL);
-
- memset(&lookup, 0, sizeof(lookup));
- lookup.index = index;
+ return -EINVAL;
- ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup);
+ ret = acpi_gpio_property_lookup(fwnode, propname, lookup);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- ret = acpi_gpio_resource_lookup(&lookup, info);
- return ret ? ERR_PTR(ret) : lookup.desc;
+ return acpi_gpio_resource_lookup(lookup);
}
static bool acpi_can_fallback_to_crs(struct acpi_device *adev,
@@ -982,17 +896,25 @@ __acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int
bool can_fallback, struct acpi_gpio_info *info)
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
+ struct acpi_gpio_lookup lookup;
struct gpio_desc *desc;
char propname[32];
+ int ret;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.params.crs_entry_index = idx;
+ lookup.info = info;
/* Try first from _DSD */
for_each_gpio_property_name(propname, con_id) {
if (adev)
- desc = acpi_get_gpiod_by_index(adev,
- propname, idx, info);
+ ret = acpi_get_gpiod_by_index(adev, propname, &lookup);
else
- desc = acpi_get_gpiod_from_data(fwnode,
- propname, idx, info);
+ ret = acpi_get_gpiod_from_data(fwnode, propname, &lookup);
+ if (ret)
+ continue;
+
+ desc = lookup.desc;
if (PTR_ERR(desc) == -EPROBE_DEFER)
return desc;
@@ -1001,8 +923,13 @@ __acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int
}
/* Then from plain _CRS GPIOs */
- if (can_fallback)
- return acpi_get_gpiod_by_index(adev, NULL, idx, info);
+ if (can_fallback) {
+ ret = acpi_get_gpiod_by_index(adev, NULL, &lookup);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return lookup.desc;
+ }
return ERR_PTR(-ENOENT);
}
@@ -1488,248 +1415,3 @@ int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
}
return count ? count : -ENOENT;
}
-
-/* Run deferred acpi_gpiochip_request_irqs() */
-static int __init acpi_gpio_handle_deferred_request_irqs(void)
-{
- struct acpi_gpio_chip *acpi_gpio, *tmp;
-
- mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
- list_for_each_entry_safe(acpi_gpio, tmp,
- &acpi_gpio_deferred_req_irqs_list,
- deferred_req_irqs_list_entry)
- acpi_gpiochip_request_irqs(acpi_gpio);
-
- acpi_gpio_deferred_req_irqs_done = true;
- mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
-
- return 0;
-}
-/* We must use _sync so that this runs after the first deferred_probe run */
-late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
-
-static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
- {
- /*
- * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
- * a non existing micro-USB-B connector which puts the HDMI
- * DDC pins in GPIO mode, breaking HDMI support.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .no_edge_events_on_boot = true,
- },
- },
- {
- /*
- * The Terra Pad 1061 has a micro-USB-B id-pin handler, which
- * instead of controlling the actual micro-USB-B turns the 5V
- * boost for its USB-A connector off. The actual micro-USB-B
- * connector is wired for charging only.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .no_edge_events_on_boot = true,
- },
- },
- {
- /*
- * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FC:02@12",
- },
- },
- {
- /*
- * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FF:01 pin 0, causing spurious wakeups.
- * When suspending by closing the LID, the power to the USB
- * keyboard is turned off, causing INT0002 ACPI events to
- * trigger once the XHCI controller notices the keyboard is
- * gone. So INT0002 events cause spurious wakeups too. Ignoring
- * EC wakes breaks wakeup when opening the lid, the user needs
- * to press the power-button to wakeup the system. The
- * alternative is suspend simply not working, which is worse.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FF:01@0,INT0002:00@2",
- },
- },
- {
- /*
- * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FC:02 pin 28, causing spurious wakeups.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
- DMI_MATCH(DMI_BOARD_NAME, "815D"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FC:02@28",
- },
- },
- {
- /*
- * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FF:01 pin 0, causing spurious wakeups.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
- DMI_MATCH(DMI_BOARD_NAME, "813E"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FF:01@0",
- },
- },
- {
- /*
- * Interrupt storm caused from edge triggered floating pin
- * Found in BIOS UX325UAZ.300
- * https://bugzilla.kernel.org/show_bug.cgi?id=216208
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_interrupt = "AMDI0030:00@18",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 1.7.8
- * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
- */
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "ELAN0415:00@9",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 1.7.8
- * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
- */
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "ELAN0415:00@9",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 1.7.7
- */
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "SYNA1202:00@16",
- },
- },
- {
- /*
- * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
- * a "dolby" button. At the ACPI level an _AEI event-handler
- * is connected which sets an ACPI variable to 1 on both
- * edges. This variable can be polled + cleared to 0 using
- * WMI. But since the variable is set on both edges the WMI
- * interface is pretty useless even when polling.
- * So instead the x86-android-tablets code instantiates
- * a gpio-keys platform device for it.
- * Ignore the _AEI handler for the pin, so that it is not busy.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_interrupt = "INT33FC:00@3",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 0.35
- * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
- DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "PNP0C50:00@8",
- },
- },
- {
- /*
- * Spurious wakeups from GPIO 11
- * Found in BIOS 1.04
- * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_interrupt = "AMDI0030:00@11",
- },
- },
- {} /* Terminating entry */
-};
-
-static int __init acpi_gpio_setup_params(void)
-{
- const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
- const struct dmi_system_id *id;
-
- id = dmi_first_match(gpiolib_acpi_quirks);
- if (id)
- quirk = id->driver_data;
-
- if (run_edge_events_on_boot < 0) {
- if (quirk && quirk->no_edge_events_on_boot)
- run_edge_events_on_boot = 0;
- else
- run_edge_events_on_boot = 1;
- }
-
- if (ignore_wake == NULL && quirk && quirk->ignore_wake)
- ignore_wake = quirk->ignore_wake;
-
- if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt)
- ignore_interrupt = quirk->ignore_interrupt;
-
- return 0;
-}
-
-/* Directly after dmi_setup() which runs as core_initcall() */
-postcore_initcall(acpi_gpio_setup_params);
diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c
new file mode 100644
index 000000000000..219667315b2c
--- /dev/null
+++ b/drivers/gpio/gpiolib-acpi-quirks.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI quirks for GPIO ACPI helpers
+ *
+ * Author: Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/kstrtox.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/printk.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "gpiolib-acpi.h"
+
+static int run_edge_events_on_boot = -1;
+module_param(run_edge_events_on_boot, int, 0444);
+MODULE_PARM_DESC(run_edge_events_on_boot,
+ "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
+
+static char *ignore_wake;
+module_param(ignore_wake, charp, 0444);
+MODULE_PARM_DESC(ignore_wake,
+ "controller@pin combos on which to ignore the ACPI wake flag "
+ "ignore_wake=controller@pin[,controller@pin[,...]]");
+
+static char *ignore_interrupt;
+module_param(ignore_interrupt, charp, 0444);
+MODULE_PARM_DESC(ignore_interrupt,
+ "controller@pin combos on which to ignore interrupt "
+ "ignore_interrupt=controller@pin[,controller@pin[,...]]");
+
+/*
+ * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init
+ * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
+ * late_initcall_sync() handler, so that other builtin drivers can register their
+ * OpRegions before the event handlers can run. This list contains GPIO chips
+ * for which the acpi_gpiochip_request_irqs() call has been deferred.
+ */
+static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
+static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
+static bool acpi_gpio_deferred_req_irqs_done;
+
+bool acpi_gpio_add_to_deferred_list(struct list_head *list)
+{
+ bool defer;
+
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ defer = !acpi_gpio_deferred_req_irqs_done;
+ if (defer)
+ list_add(list, &acpi_gpio_deferred_req_irqs_list);
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
+ return defer;
+}
+
+void acpi_gpio_remove_from_deferred_list(struct list_head *list)
+{
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ if (!list_empty(list))
+ list_del_init(list);
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+}
+
+int acpi_gpio_need_run_edge_events_on_boot(void)
+{
+ return run_edge_events_on_boot;
+}
+
+bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list,
+ const char *controller_in, unsigned int pin_in)
+{
+ const char *ignore_list, *controller, *pin_str;
+ unsigned int pin;
+ char *endp;
+ int len;
+
+ switch (list) {
+ case ACPI_GPIO_IGNORE_WAKE:
+ ignore_list = ignore_wake;
+ break;
+ case ACPI_GPIO_IGNORE_INTERRUPT:
+ ignore_list = ignore_interrupt;
+ break;
+ default:
+ return false;
+ }
+
+ controller = ignore_list;
+ while (controller) {
+ pin_str = strchr(controller, '@');
+ if (!pin_str)
+ goto err;
+
+ len = pin_str - controller;
+ if (len == strlen(controller_in) &&
+ strncmp(controller, controller_in, len) == 0) {
+ pin = simple_strtoul(pin_str + 1, &endp, 10);
+ if (*endp != 0 && *endp != ',')
+ goto err;
+
+ if (pin == pin_in)
+ return true;
+ }
+
+ controller = strchr(controller, ',');
+ if (controller)
+ controller++;
+ }
+
+ return false;
+err:
+ pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list);
+ return false;
+}
+
+/* Run deferred acpi_gpiochip_request_irqs() */
+static int __init acpi_gpio_handle_deferred_request_irqs(void)
+{
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ acpi_gpio_process_deferred_list(&acpi_gpio_deferred_req_irqs_list);
+ acpi_gpio_deferred_req_irqs_done = true;
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
+ return 0;
+}
+/* We must use _sync so that this runs after the first deferred_probe run */
+late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
+
+struct acpi_gpiolib_dmi_quirk {
+ bool no_edge_events_on_boot;
+ char *ignore_wake;
+ char *ignore_interrupt;
+};
+
+static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ {
+ /*
+ * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
+ * a non existing micro-USB-B connector which puts the HDMI
+ * DDC pins in GPIO mode, breaking HDMI support.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .no_edge_events_on_boot = true,
+ },
+ },
+ {
+ /*
+ * The Terra Pad 1061 has a micro-USB-B id-pin handler, which
+ * instead of controlling the actual micro-USB-B turns the 5V
+ * boost for its USB-A connector off. The actual micro-USB-B
+ * connector is wired for charging only.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .no_edge_events_on_boot = true,
+ },
+ },
+ {
+ /*
+ * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FC:02@12",
+ },
+ },
+ {
+ /*
+ * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+ * When suspending by closing the LID, the power to the USB
+ * keyboard is turned off, causing INT0002 ACPI events to
+ * trigger once the XHCI controller notices the keyboard is
+ * gone. So INT0002 events cause spurious wakeups too. Ignoring
+ * EC wakes breaks wakeup when opening the lid, the user needs
+ * to press the power-button to wakeup the system. The
+ * alternative is suspend simply not working, which is worse.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FF:01@0,INT0002:00@2",
+ },
+ },
+ {
+ /*
+ * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FC:02 pin 28, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_MATCH(DMI_BOARD_NAME, "815D"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FC:02@28",
+ },
+ },
+ {
+ /*
+ * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_MATCH(DMI_BOARD_NAME, "813E"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FF:01@0",
+ },
+ },
+ {
+ /*
+ * Interrupt storm caused from edge triggered floating pin
+ * Found in BIOS UX325UAZ.300
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216208
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@18",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.8
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ELAN0415:00@9",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.8
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ELAN0415:00@9",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.7
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "SYNA1202:00@16",
+ },
+ },
+ {
+ /*
+ * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
+ * a "dolby" button. At the ACPI level an _AEI event-handler
+ * is connected which sets an ACPI variable to 1 on both
+ * edges. This variable can be polled + cleared to 0 using
+ * WMI. But since the variable is set on both edges the WMI
+ * interface is pretty useless even when polling.
+ * So instead the x86-android-tablets code instantiates
+ * a gpio-keys platform device for it.
+ * Ignore the _AEI handler for the pin, so that it is not busy.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "INT33FC:00@3",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 0.35
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "PNP0C50:00@8",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from GPIO 11
+ * Found in BIOS 1.04
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@11",
+ },
+ },
+ {} /* Terminating entry */
+};
+
+static int __init acpi_gpio_setup_params(void)
+{
+ const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
+ const struct dmi_system_id *id;
+
+ id = dmi_first_match(gpiolib_acpi_quirks);
+ if (id)
+ quirk = id->driver_data;
+
+ if (run_edge_events_on_boot < 0) {
+ if (quirk && quirk->no_edge_events_on_boot)
+ run_edge_events_on_boot = 0;
+ else
+ run_edge_events_on_boot = 1;
+ }
+
+ if (ignore_wake == NULL && quirk && quirk->ignore_wake)
+ ignore_wake = quirk->ignore_wake;
+
+ if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt)
+ ignore_interrupt = quirk->ignore_interrupt;
+
+ return 0;
+}
+
+/* Directly after dmi_setup() which runs as core_initcall() */
+postcore_initcall(acpi_gpio_setup_params);
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index 7e1c51d04040..a90267470a4e 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -58,4 +58,19 @@ static inline int acpi_gpio_count(const struct fwnode_handle *fwnode,
}
#endif
+void acpi_gpio_process_deferred_list(struct list_head *list);
+
+bool acpi_gpio_add_to_deferred_list(struct list_head *list);
+void acpi_gpio_remove_from_deferred_list(struct list_head *list);
+
+int acpi_gpio_need_run_edge_events_on_boot(void);
+
+enum acpi_gpio_ignore_list {
+ ACPI_GPIO_IGNORE_WAKE,
+ ACPI_GPIO_IGNORE_INTERRUPT,
+};
+
+bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list,
+ const char *controller_in, unsigned int pin_in);
+
#endif /* GPIOLIB_ACPI_H */
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 107d75558b5a..e6a289fa0f8f 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1366,9 +1366,6 @@ static long linereq_set_values(struct linereq *lr, void __user *ip)
/* scan requested lines to determine the subset to be set */
for (num_set = 0, i = 0; i < lr->num_lines; i++) {
if (lv.mask & BIT_ULL(i)) {
- /* setting inputs is not allowed */
- if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
- return -EPERM;
/* add to compacted values */
if (lv.bits & BIT_ULL(i))
__set_bit(num_set, vals);
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 08205f355ceb..4d5f83b17624 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -6,7 +6,7 @@
* Copyright (c) 2011 John Crispin <john@phrozen.org>
*/
-#include <linux/device.h>
+#include <linux/device/devres.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/gfp.h>
@@ -19,32 +19,14 @@
struct fwnode_handle;
struct lock_class_key;
-static void devm_gpiod_release(struct device *dev, void *res)
+static void devm_gpiod_release(void *desc)
{
- struct gpio_desc **desc = res;
-
- gpiod_put(*desc);
-}
-
-static int devm_gpiod_match(struct device *dev, void *res, void *data)
-{
- struct gpio_desc **this = res, **gpio = data;
-
- return *this == *gpio;
-}
-
-static void devm_gpiod_release_array(struct device *dev, void *res)
-{
- struct gpio_descs **descs = res;
-
- gpiod_put_array(*descs);
+ gpiod_put(desc);
}
-static int devm_gpiod_match_array(struct device *dev, void *res, void *data)
+static void devm_gpiod_release_array(void *descs)
{
- struct gpio_descs **this = res, **gpios = data;
-
- return *this == *gpios;
+ gpiod_put_array(descs);
}
/**
@@ -114,8 +96,8 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
unsigned int idx,
enum gpiod_flags flags)
{
- struct gpio_desc **dr;
struct gpio_desc *desc;
+ int ret;
desc = gpiod_get_index(dev, con_id, idx, flags);
if (IS_ERR(desc))
@@ -126,23 +108,16 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
* already under resource management by this device.
*/
if (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
- struct devres *dres;
+ bool dres;
- dres = devres_find(dev, devm_gpiod_release,
- devm_gpiod_match, &desc);
+ dres = devm_is_action_added(dev, devm_gpiod_release, desc);
if (dres)
return desc;
}
- dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
- GFP_KERNEL);
- if (!dr) {
- gpiod_put(desc);
- return ERR_PTR(-ENOMEM);
- }
-
- *dr = desc;
- devres_add(dev, dr);
+ ret = devm_add_action_or_reset(dev, devm_gpiod_release, desc);
+ if (ret)
+ return ERR_PTR(ret);
return desc;
}
@@ -171,22 +146,16 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
enum gpiod_flags flags,
const char *label)
{
- struct gpio_desc **dr;
struct gpio_desc *desc;
-
- dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
- GFP_KERNEL);
- if (!dr)
- return ERR_PTR(-ENOMEM);
+ int ret;
desc = gpiod_find_and_request(dev, fwnode, con_id, index, flags, label, false);
- if (IS_ERR(desc)) {
- devres_free(dr);
+ if (IS_ERR(desc))
return desc;
- }
- *dr = desc;
- devres_add(dev, dr);
+ ret = devm_add_action_or_reset(dev, devm_gpiod_release, desc);
+ if (ret)
+ return ERR_PTR(ret);
return desc;
}
@@ -244,22 +213,16 @@ struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
- struct gpio_descs **dr;
struct gpio_descs *descs;
-
- dr = devres_alloc(devm_gpiod_release_array,
- sizeof(struct gpio_descs *), GFP_KERNEL);
- if (!dr)
- return ERR_PTR(-ENOMEM);
+ int ret;
descs = gpiod_get_array(dev, con_id, flags);
- if (IS_ERR(descs)) {
- devres_free(dr);
+ if (IS_ERR(descs))
return descs;
- }
- *dr = descs;
- devres_add(dev, dr);
+ ret = devm_add_action_or_reset(dev, devm_gpiod_release_array, descs);
+ if (ret)
+ return ERR_PTR(ret);
return descs;
}
@@ -307,8 +270,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_array_optional);
*/
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
{
- WARN_ON(devres_release(dev, devm_gpiod_release, devm_gpiod_match,
- &desc));
+ devm_release_action(dev, devm_gpiod_release, desc);
}
EXPORT_SYMBOL_GPL(devm_gpiod_put);
@@ -317,24 +279,28 @@ EXPORT_SYMBOL_GPL(devm_gpiod_put);
* @dev: GPIO consumer
* @desc: GPIO descriptor to remove resource management from
*
+ * *DEPRECATED*
+ * This function should not be used. It's been provided as a workaround for
+ * resource ownership issues in the regulator framework and should be replaced
+ * with a better solution.
+ *
* Remove resource management from a GPIO descriptor. This is needed when
* you want to hand over lifecycle management of a descriptor to another
* mechanism.
*/
-
void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc)
{
int ret;
if (IS_ERR_OR_NULL(desc))
return;
- ret = devres_destroy(dev, devm_gpiod_release,
- devm_gpiod_match, &desc);
+
/*
* If the GPIO descriptor is requested as nonexclusive, we
* may call this function several times on the same descriptor
* so it is OK if devres_destroy() returns -ENOENT.
*/
+ ret = devm_remove_action_nowarn(dev, devm_gpiod_release, desc);
if (ret == -ENOENT)
return;
/* Anything else we should warn about */
@@ -353,8 +319,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_unhinge);
*/
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
{
- WARN_ON(devres_release(dev, devm_gpiod_release_array,
- devm_gpiod_match_array, &descs));
+ devm_remove_action(dev, devm_gpiod_release_array, descs);
}
EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index eb667f8f1ead..73ba73b31cb1 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -193,6 +193,8 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
*/
{ "himax,hx8357", "gpios-reset", false },
{ "himax,hx8369", "gpios-reset", false },
+#endif
+#if IS_ENABLED(CONFIG_MTD_NAND_JZ4780)
/*
* The rb-gpios semantics was undocumented and qi,lb60 (along with
* the ingenic driver) got it wrong. The active state encodes the
@@ -222,6 +224,15 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
*/
{ "lantiq,pci-xway", "gpio-reset", false },
#endif
+#if IS_ENABLED(CONFIG_REGULATOR_S5M8767)
+ /*
+ * According to S5M8767, the DVS and DS pin are
+ * active-high signals. However, exynos5250-spring.dts use
+ * active-low setting.
+ */
+ { "samsung,s5m8767-pmic", "s5m8767,pmic-buck-dvs-gpios", true },
+ { "samsung,s5m8767-pmic", "s5m8767,pmic-buck-ds-gpios", true },
+#endif
#if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005)
/*
* DTS for Nokia N900 incorrectly specified "active high"
@@ -266,6 +277,9 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
{ "fsl,imx8qm-fec", "phy-reset-gpios", "phy-reset-active-high" },
{ "fsl,s32v234-fec", "phy-reset-gpios", "phy-reset-active-high" },
#endif
+#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
+ { "atmel,hsmci", "cd-gpios", "cd-inverted" },
+#endif
#if IS_ENABLED(CONFIG_PCI_IMX6)
{ "fsl,imx6q-pcie", "reset-gpio", "reset-gpio-active-high" },
{ "fsl,imx6sx-pcie", "reset-gpio", "reset-gpio-active-high" },
@@ -292,9 +306,6 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
{ "regulator-gpio", "enable-gpio", "enable-active-high" },
{ "regulator-gpio", "enable-gpios", "enable-active-high" },
#endif
-#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
- { "atmel,hsmci", "cd-gpios", "cd-inverted" },
-#endif
};
unsigned int i;
bool active_high;
@@ -1276,3 +1287,11 @@ void of_gpiochip_remove(struct gpio_chip *chip)
{
of_node_put(dev_of_node(&chip->gpiodev->dev));
}
+
+bool of_gpiochip_instance_match(struct gpio_chip *gc, unsigned int index)
+{
+ if (gc->of_node_instance_match)
+ return gc->of_node_instance_match(gc, index);
+
+ return false;
+}
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
index 16d6ac8cb156..3eebfac290c5 100644
--- a/drivers/gpio/gpiolib-of.h
+++ b/drivers/gpio/gpiolib-of.h
@@ -22,6 +22,7 @@ struct gpio_desc *of_find_gpio(struct device_node *np,
unsigned long *lookupflags);
int of_gpiochip_add(struct gpio_chip *gc);
void of_gpiochip_remove(struct gpio_chip *gc);
+bool of_gpiochip_instance_match(struct gpio_chip *gc, unsigned int index);
int of_gpio_count(const struct fwnode_handle *fwnode, const char *con_id);
#else
static inline struct gpio_desc *of_find_gpio(struct device_node *np,
@@ -33,6 +34,11 @@ static inline struct gpio_desc *of_find_gpio(struct device_node *np,
}
static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
+static inline bool of_gpiochip_instance_match(struct gpio_chip *gc,
+ unsigned int index)
+{
+ return false;
+}
static inline int of_gpio_count(const struct fwnode_handle *fwnode,
const char *con_id)
{
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 1acfa43bf1ab..4a3aa09dad9d 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -134,17 +134,15 @@ static ssize_t value_store(struct device *dev,
long value;
status = kstrtol(buf, 0, &value);
+ if (status)
+ return status;
guard(mutex)(&data->mutex);
- if (!test_bit(FLAG_IS_OUT, &desc->flags))
- return -EPERM;
-
+ status = gpiod_set_value_cansleep(desc, value);
if (status)
return status;
- gpiod_set_value_cansleep(desc, value);
-
return size;
}
static DEVICE_ATTR_PREALLOC(value, S_IWUSR | S_IRUGO, value_show, value_store);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index b8197502a5ac..fdafa0df1b43 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -266,6 +266,20 @@ struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc)
EXPORT_SYMBOL_GPL(gpiod_to_gpio_device);
/**
+ * gpiod_is_equal() - Check if two GPIO descriptors refer to the same pin.
+ * @desc: Descriptor to compare.
+ * @other: The second descriptor to compare against.
+ *
+ * Returns:
+ * True if the descriptors refer to the same physical pin. False otherwise.
+ */
+bool gpiod_is_equal(struct gpio_desc *desc, struct gpio_desc *other)
+{
+ return desc == other;
+}
+EXPORT_SYMBOL_GPL(gpiod_is_equal);
+
+/**
* gpio_device_get_base() - Get the base GPIO number allocated by this device
* @gdev: GPIO device
*
@@ -342,6 +356,37 @@ static int gpiochip_find_base_unlocked(u16 ngpio)
}
}
+/*
+ * This descriptor validation needs to be inserted verbatim into each
+ * function taking a descriptor, so we need to use a preprocessor
+ * macro to avoid endless duplication. If the desc is NULL it is an
+ * optional GPIO and calls should just bail out.
+ */
+static int validate_desc(const struct gpio_desc *desc, const char *func)
+{
+ if (!desc)
+ return 0;
+
+ if (IS_ERR(desc)) {
+ pr_warn("%s: invalid GPIO (errorpointer: %pe)\n", func, desc);
+ return PTR_ERR(desc);
+ }
+
+ return 1;
+}
+
+#define VALIDATE_DESC(desc) do { \
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
+ return __valid; \
+ } while (0)
+
+#define VALIDATE_DESC_VOID(desc) do { \
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
+ return; \
+ } while (0)
+
static int gpiochip_get_direction(struct gpio_chip *gc, unsigned int offset)
{
int ret;
@@ -376,11 +421,8 @@ int gpiod_get_direction(struct gpio_desc *desc)
unsigned int offset;
int ret;
- /*
- * We cannot use VALIDATE_DESC() as we must not return 0 for a NULL
- * descriptor like we usually do.
- */
- if (IS_ERR_OR_NULL(desc))
+ ret = validate_desc(desc, __func__);
+ if (ret <= 0)
return -EINVAL;
CLASS(gpio_chip_guard, guard)(desc);
@@ -742,6 +784,12 @@ EXPORT_SYMBOL_GPL(gpiochip_query_valid_mask);
bool gpiochip_line_is_valid(const struct gpio_chip *gc,
unsigned int offset)
{
+ /*
+ * hog pins are requested before registering GPIO chip
+ */
+ if (!gc->gpiodev)
+ return true;
+
/* No mask means all valid */
if (likely(!gc->gpiodev->valid_mask))
return true;
@@ -874,14 +922,12 @@ static void machine_gpiochip_add(struct gpio_chip *gc)
{
struct gpiod_hog *hog;
- mutex_lock(&gpio_machine_hogs_mutex);
+ guard(mutex)(&gpio_machine_hogs_mutex);
list_for_each_entry(hog, &gpio_machine_hogs, list) {
if (!strcmp(gc->label, hog->chip_label))
gpiochip_machine_hog(gc, hog);
}
-
- mutex_unlock(&gpio_machine_hogs_mutex);
}
static void gpiochip_setup_devs(void)
@@ -975,7 +1021,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct gpio_device *gdev;
unsigned int desc_index;
int base = 0;
- int ret = 0;
+ int ret;
/* Only allow one set() and one set_multiple(). */
if ((gc->set && gc->set_rv) ||
@@ -1000,11 +1046,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
device_set_node(&gdev->dev, gpiochip_choose_fwnode(gc));
- gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
- if (gdev->id < 0) {
- ret = gdev->id;
+ ret = ida_alloc(&gpio_ida, GFP_KERNEL);
+ if (ret < 0)
goto err_free_gdev;
- }
+ gdev->id = ret;
ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
if (ret)
@@ -1507,9 +1552,8 @@ static int gpiochip_hierarchy_irq_domain_translate(struct irq_domain *d,
unsigned int *type)
{
/* We support standard DT translation */
- if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
- return irq_domain_translate_twocell(d, fwspec, hwirq, type);
- }
+ if (is_of_node(fwspec->fwnode))
+ return irq_domain_translate_twothreecell(d, fwspec, hwirq, type);
/* This is for board files and others not using DT */
if (is_fwnode_irqchip(fwspec->fwnode)) {
@@ -1811,11 +1855,26 @@ static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
irq_set_chip_data(irq, NULL);
}
+static int gpiochip_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ struct fwnode_handle *fwnode = fwspec->fwnode;
+ struct gpio_chip *gc = d->host_data;
+ unsigned int index = fwspec->param[0];
+
+ if (fwspec->param_count == 3 && is_of_node(fwnode))
+ return of_gpiochip_instance_match(gc, index);
+
+ /* Fallback for twocells */
+ return (fwnode && (d->fwnode == fwnode) && (d->bus_token == bus_token));
+}
+
static const struct irq_domain_ops gpiochip_domain_ops = {
.map = gpiochip_irq_map,
.unmap = gpiochip_irq_unmap,
+ .select = gpiochip_irq_select,
/* Virtually all GPIO irqchips are twocell:ed */
- .xlate = irq_domain_xlate_twocell,
+ .xlate = irq_domain_xlate_twothreecell,
};
static struct irq_domain *gpiochip_simple_create_domain(struct gpio_chip *gc)
@@ -1835,7 +1894,6 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct irq_domain *domain = gc->irq.domain;
-#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
* Avoid race condition with other code, which tries to lookup
* an IRQ before the irqchip has been properly registered,
@@ -1843,7 +1901,6 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
*/
if (!gc->irq.initialized)
return -EPROBE_DEFER;
-#endif
if (!gpiochip_irqchip_irq_valid(gc, offset))
return -ENXIO;
@@ -2405,37 +2462,6 @@ out_clear_bit:
return ret;
}
-/*
- * This descriptor validation needs to be inserted verbatim into each
- * function taking a descriptor, so we need to use a preprocessor
- * macro to avoid endless duplication. If the desc is NULL it is an
- * optional GPIO and calls should just bail out.
- */
-static int validate_desc(const struct gpio_desc *desc, const char *func)
-{
- if (!desc)
- return 0;
-
- if (IS_ERR(desc)) {
- pr_warn("%s: invalid GPIO (errorpointer)\n", func);
- return PTR_ERR(desc);
- }
-
- return 1;
-}
-
-#define VALIDATE_DESC(desc) do { \
- int __valid = validate_desc(desc, __func__); \
- if (__valid <= 0) \
- return __valid; \
- } while (0)
-
-#define VALIDATE_DESC_VOID(desc) do { \
- int __valid = validate_desc(desc, __func__); \
- if (__valid <= 0) \
- return; \
- } while (0)
-
int gpiod_request(struct gpio_desc *desc, const char *label)
{
int ret = -EPROBE_DEFER;
@@ -2879,7 +2905,7 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
* output-only, but if there is then not even a .set() operation it
* is pretty tricky to drive the output line.
*/
- if (!guard.gc->set && !guard.gc->direction_output) {
+ if (!guard.gc->set && !guard.gc->set_rv && !guard.gc->direction_output) {
gpiod_warn(desc,
"%s: missing set() and direction_output() operations\n",
__func__);
@@ -3045,7 +3071,7 @@ set_output_flag:
*/
int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
{
- int ret = 0;
+ int ret;
VALIDATE_DESC(desc);
@@ -3078,7 +3104,7 @@ EXPORT_SYMBOL_GPL(gpiod_enable_hw_timestamp_ns);
*/
int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
{
- int ret = 0;
+ int ret;
VALIDATE_DESC(desc);
@@ -3593,6 +3619,9 @@ static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
static int gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
+ if (unlikely(!test_bit(FLAG_IS_OUT, &desc->flags)))
+ return -EPERM;
+
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
return -ENODEV;
@@ -3664,6 +3693,12 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
if (!can_sleep)
WARN_ON(array_info->gdev->can_sleep);
+ for (i = 0; i < array_size; i++) {
+ if (unlikely(!test_bit(FLAG_IS_OUT,
+ &desc_array[i]->flags)))
+ return -EPERM;
+ }
+
guard(srcu)(&array_info->gdev->srcu);
gc = srcu_dereference(array_info->gdev->chip,
&array_info->gdev->srcu);
@@ -3723,6 +3758,9 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
int hwgpio = gpio_chip_hwgpio(desc);
int value = test_bit(i, value_bitmap);
+ if (unlikely(!test_bit(FLAG_IS_OUT, &desc->flags)))
+ return -EPERM;
+
/*
* Pins applicable for fast input but not for
* fast output processing may have been already
@@ -3944,13 +3982,10 @@ int gpiod_to_irq(const struct gpio_desc *desc)
struct gpio_device *gdev;
struct gpio_chip *gc;
int offset;
+ int ret;
- /*
- * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics
- * requires this function to not return zero on an invalid descriptor
- * but rather a negative error number.
- */
- if (IS_ERR_OR_NULL(desc))
+ ret = validate_desc(desc, __func__);
+ if (ret <= 0)
return -EINVAL;
gdev = desc->gdev;
@@ -3962,13 +3997,12 @@ int gpiod_to_irq(const struct gpio_desc *desc)
offset = gpio_chip_hwgpio(desc);
if (gc->to_irq) {
- int retirq = gc->to_irq(gc, offset);
+ ret = gc->to_irq(gc, offset);
+ if (ret)
+ return ret;
/* Zero means NO_IRQ */
- if (!retirq)
- return -ENXIO;
-
- return retirq;
+ return -ENXIO;
}
#ifdef CONFIG_GPIOLIB_IRQCHIP
if (gc->irq.chip) {
@@ -4323,12 +4357,10 @@ void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n)
{
unsigned int i;
- mutex_lock(&gpio_lookup_lock);
+ guard(mutex)(&gpio_lookup_lock);
for (i = 0; i < n; i++)
list_add_tail(&tables[i]->list, &gpio_lookup_list);
-
- mutex_unlock(&gpio_lookup_lock);
}
/**
@@ -4387,11 +4419,9 @@ void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
if (!table)
return;
- mutex_lock(&gpio_lookup_lock);
+ guard(mutex)(&gpio_lookup_lock);
list_del(&table->list);
-
- mutex_unlock(&gpio_lookup_lock);
}
EXPORT_SYMBOL_GPL(gpiod_remove_lookup_table);
@@ -4403,7 +4433,7 @@ void gpiod_add_hogs(struct gpiod_hog *hogs)
{
struct gpiod_hog *hog;
- mutex_lock(&gpio_machine_hogs_mutex);
+ guard(mutex)(&gpio_machine_hogs_mutex);
for (hog = &hogs[0]; hog->chip_label; hog++) {
list_add_tail(&hog->list, &gpio_machine_hogs);
@@ -4417,8 +4447,6 @@ void gpiod_add_hogs(struct gpiod_hog *hogs)
if (gdev)
gpiochip_machine_hog(gpio_device_get_chip(gdev), hog);
}
-
- mutex_unlock(&gpio_machine_hogs_mutex);
}
EXPORT_SYMBOL_GPL(gpiod_add_hogs);
@@ -4426,10 +4454,10 @@ void gpiod_remove_hogs(struct gpiod_hog *hogs)
{
struct gpiod_hog *hog;
- mutex_lock(&gpio_machine_hogs_mutex);
+ guard(mutex)(&gpio_machine_hogs_mutex);
+
for (hog = &hogs[0]; hog->chip_label; hog++)
list_del(&hog->list);
- mutex_unlock(&gpio_machine_hogs_mutex);
}
EXPORT_SYMBOL_GPL(gpiod_remove_hogs);
@@ -5108,8 +5136,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_optional);
*/
void gpiod_put(struct gpio_desc *desc)
{
- if (desc)
- gpiod_free(desc);
+ gpiod_free(desc);
}
EXPORT_SYMBOL_GPL(gpiod_put);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2cba2b6ebe1c..f7ea8e895c0c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -26,6 +26,11 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support if it is available for your platform.
+menu "DRM debugging options"
+depends on DRM
+source "drivers/gpu/drm/Kconfig.debug"
+endmenu
+
if DRM
config DRM_MIPI_DBI
@@ -37,65 +42,6 @@ config DRM_MIPI_DSI
bool
depends on DRM
-config DRM_DEBUG_MM
- bool "Insert extra checks and debug info into the DRM range managers"
- default n
- depends on DRM
- depends on STACKTRACE_SUPPORT
- select STACKDEPOT
- help
- Enable allocation tracking of memory manager and leak detection on
- shutdown.
-
- Recommended for driver developers only.
-
- If in doubt, say "N".
-
-config DRM_USE_DYNAMIC_DEBUG
- bool "use dynamic debug to implement drm.debug"
- default n
- depends on BROKEN
- depends on DRM
- depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
- depends on JUMP_LABEL
- help
- Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
- Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
- bytes per callsite, the .data costs can be substantial, and
- are therefore configurable.
-
-config DRM_KUNIT_TEST_HELPERS
- tristate
- depends on DRM && KUNIT
- select DRM_KMS_HELPER
- help
- KUnit Helpers for KMS drivers.
-
-config DRM_KUNIT_TEST
- tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT && MMU
- select DRM_BUDDY
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDMI_STATE_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_EXEC
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_GEM_SHMEM_HELPER
- select DRM_KUNIT_TEST_HELPERS
- select DRM_LIB_RANDOM
- select PRIME_NUMBERS
- default KUNIT_ALL_TESTS
- help
- This builds unit tests for DRM. This option is not useful for
- distributions or general kernels, but only for kernel
- developers working on DRM and associated drivers.
-
- For more information on KUnit and unit tests in general,
- please refer to the KUnit documentation in
- Documentation/dev-tools/kunit/.
-
- If in doubt, say "N".
-
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -188,7 +134,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
depends on STACKTRACE_SUPPORT
select STACKDEPOT
- depends on DRM_KMS_HELPER
+ select DRM_KMS_HELPER
depends on DEBUG_KERNEL
depends on EXPERT
help
@@ -242,28 +188,12 @@ source "drivers/gpu/drm/display/Kconfig"
config DRM_TTM
tristate
depends on DRM && MMU
+ select SHMEM
help
GPU memory management subsystem for devices with multiple
GPU memory types. Will be enabled automatically if a device driver
uses it.
-config DRM_TTM_KUNIT_TEST
- tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
- default n
- depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
- select DRM_TTM
- select DRM_BUDDY
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_KUNIT_TEST_HELPERS
- default KUNIT_ALL_TESTS
- help
- Enables unit tests for TTM, a GPU memory manager subsystem used
- to manage memory buffers. This option is mostly useful for kernel
- developers. It depends on (UML || COMPILE_TEST) since no other driver
- which uses TTM can be loaded while running the tests.
-
- If in doubt, say "N".
-
config DRM_EXEC
tristate
depends on DRM
@@ -335,6 +265,8 @@ config DRM_SCHED
tristate
depends on DRM
+source "drivers/gpu/drm/sysfb/Kconfig"
+
source "drivers/gpu/drm/arm/Kconfig"
source "drivers/gpu/drm/radeon/Kconfig"
@@ -343,6 +275,8 @@ source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"
+source "drivers/gpu/drm/nova/Kconfig"
+
source "drivers/gpu/drm/i915/Kconfig"
source "drivers/gpu/drm/xe/Kconfig"
@@ -454,6 +388,8 @@ source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
+source "drivers/gpu/drm/sitronix/Kconfig"
+
source "drivers/gpu/drm/solomon/Kconfig"
source "drivers/gpu/drm/sprd/Kconfig"
@@ -462,7 +398,7 @@ source "drivers/gpu/drm/imagination/Kconfig"
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
- depends on DRM && PCI && MMU && HYPERV
+ depends on DRM && PCI && HYPERV
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
@@ -474,9 +410,6 @@ config DRM_HYPERV
If M is selected the module will be called hyperv_drm.
-config DRM_EXPORT_FOR_TESTS
- bool
-
# Separate option as not all DRM drivers use it
config DRM_PANEL_BACKLIGHT_QUIRKS
tristate
@@ -489,31 +422,6 @@ config DRM_PRIVACY_SCREEN
bool
default n
-config DRM_WERROR
- bool "Compile the drm subsystem with warnings as errors"
- depends on DRM && EXPERT
- depends on !WERROR
- default n
- help
- A kernel build should not cause any compiler warnings, and this
- enables the '-Werror' flag to enforce that rule in the drm subsystem.
-
- The drm subsystem enables more warnings than the kernel default, so
- this config option is disabled by default.
-
- If in doubt, say N.
-
-config DRM_HEADER_TEST
- bool "Ensure DRM headers are self-contained and pass kernel-doc"
- depends on DRM && EXPERT && BROKEN
- default n
- help
- Ensure the DRM subsystem headers both under drivers/gpu/drm and
- include/drm compile, are self-contained, have header guards, and have
- no kernel-doc warnings.
-
- If in doubt, say N.
-
endif
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug
new file mode 100644
index 000000000000..fa6ee76f4d3c
--- /dev/null
+++ b/drivers/gpu/drm/Kconfig.debug
@@ -0,0 +1,116 @@
+config DRM_USE_DYNAMIC_DEBUG
+ bool "use dynamic debug to implement drm.debug"
+ default n
+ depends on BROKEN
+ depends on DRM
+ depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
+ depends on JUMP_LABEL
+ help
+ Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
+ Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
+ bytes per callsite, the .data costs can be substantial, and
+ are therefore configurable.
+
+config DRM_WERROR
+ bool "Compile the drm subsystem with warnings as errors"
+ depends on DRM && EXPERT
+ depends on !WERROR
+ default n
+ help
+ A kernel build should not cause any compiler warnings, and this
+ enables the '-Werror' flag to enforce that rule in the drm subsystem.
+
+ The drm subsystem enables more warnings than the kernel default, so
+ this config option is disabled by default.
+
+ If in doubt, say N.
+
+config DRM_HEADER_TEST
+ bool "Ensure DRM headers are self-contained and pass kernel-doc"
+ depends on DRM && EXPERT && BROKEN
+ default n
+ help
+ Ensure the DRM subsystem headers both under drivers/gpu/drm and
+ include/drm compile, are self-contained, have header guards, and have
+ no kernel-doc warnings.
+
+ If in doubt, say N.
+
+config DRM_DEBUG_MM
+ bool "Insert extra checks and debug info into the DRM range managers"
+ default n
+ depends on DRM
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+ help
+ Enable allocation tracking of memory manager and leak detection on
+ shutdown.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_KUNIT_TEST_HELPERS
+ tristate
+ depends on DRM && KUNIT
+ select DRM_KMS_HELPER
+ help
+ KUnit Helpers for KMS drivers.
+
+config DRM_KUNIT_TEST
+ tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
+ depends on DRM && KUNIT && MMU
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_BUDDY
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_EXEC
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KUNIT_TEST_HELPERS
+ select DRM_LIB_RANDOM
+ select PRIME_NUMBERS
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for DRM. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on DRM and associated drivers.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
+
+config DRM_TTM_KUNIT_TEST
+ tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
+ default n
+ depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
+ select DRM_TTM
+ select DRM_BUDDY
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
+ default KUNIT_ALL_TESTS
+ help
+ Enables unit tests for TTM, a GPU memory manager subsystem used
+ to manage memory buffers. This option is mostly useful for kernel
+ developers. It depends on (UML || COMPILE_TEST) since no other driver
+ which uses TTM can be loaded while running the tests.
+
+ If in doubt, say "N".
+
+config DRM_SCHED_KUNIT_TEST
+ tristate "KUnit tests for the DRM scheduler" if !KUNIT_ALL_TESTS
+ select DRM_SCHED
+ depends on DRM && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Choose this option to build unit tests for the DRM scheduler.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_EXPORT_FOR_TESTS
+ bool
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ed54a546bbe2..5050ac32bba2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -134,6 +134,7 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
drm_kms_helper-y := \
drm_atomic_helper.o \
drm_atomic_state_helper.o \
+ drm_bridge_helper.o \
drm_crtc_helper.o \
drm_damage_helper.o \
drm_flip_work.o \
@@ -176,6 +177,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VGEM) += vgem/
obj-$(CONFIG_DRM_VKMS) += vkms/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-$(CONFIG_DRM_NOVA) += nova/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
obj-$(CONFIG_DRM_GMA500) += gma500/
@@ -204,6 +206,7 @@ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-y += hisilicon/
obj-y += mxsfb/
+obj-y += sysfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
@@ -219,6 +222,7 @@ obj-$(CONFIG_DRM_TIDSS) += tidss/
obj-y += xlnx/
obj-y += gud/
obj-$(CONFIG_DRM_HYPERV) += hyperv/
+obj-y += sitronix/
obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
obj-$(CONFIG_DRM_LOONGSON) += loongson/
@@ -236,7 +240,7 @@ always-$(CONFIG_DRM_HEADER_TEST) += \
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = \
$(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
- $(srctree)/scripts/kernel-doc -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
index ad80542b60ed..2b60128e2c69 100644
--- a/drivers/gpu/drm/adp/adp-mipi.c
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -212,12 +212,13 @@ static const struct mipi_dsi_host_ops adp_dsi_host_ops = {
};
static int adp_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct adp_mipi_drv_private *adp =
container_of(bridge, struct adp_mipi_drv_private, bridge);
- return drm_bridge_attach(bridge->encoder, adp->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, adp->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs adp_dsi_bridge_funcs = {
diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c
index c98c647f981d..54cde090c3f4 100644
--- a/drivers/gpu/drm/adp/adp_drv.c
+++ b/drivers/gpu/drm/adp/adp_drv.c
@@ -121,7 +121,6 @@ struct adp_drv_private {
dma_addr_t mask_iova;
int be_irq;
int fe_irq;
- spinlock_t irq_lock;
struct drm_pending_vblank_event *event;
};
@@ -288,6 +287,7 @@ static void adp_crtc_atomic_enable(struct drm_crtc *crtc,
writel(BIT(0), adp->be + ADBE_BLEND_EN3);
writel(BIT(0), adp->be + ADBE_BLEND_BYPASS);
writel(BIT(0), adp->be + ADBE_BLEND_EN4);
+ drm_crtc_vblank_on(crtc);
}
static void adp_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -310,6 +310,7 @@ static void adp_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
u32 frame_num = 1;
+ unsigned long flags;
struct adp_drv_private *adp = crtc_to_adp(crtc);
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc);
u64 new_size = ALIGN(new_state->mode.hdisplay *
@@ -330,13 +331,19 @@ static void adp_crtc_atomic_flush(struct drm_crtc *crtc,
}
writel(ADBE_FIFO_SYNC | frame_num, adp->be + ADBE_FIFO);
//FIXME: use adbe flush interrupt
- spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
- drm_crtc_vblank_get(crtc);
- adp->event = crtc->state->event;
+ struct drm_pending_vblank_event *event = crtc->state->event;
+
+ crtc->state->event = NULL;
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, event);
+ else
+ adp->event = event;
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
}
static const struct drm_crtc_funcs adp_crtc_funcs = {
@@ -482,8 +489,6 @@ static irqreturn_t adp_fe_irq(int irq, void *arg)
u32 int_status;
u32 int_ctl;
- spin_lock(&adp->irq_lock);
-
int_status = readl(adp->fe + ADP_INT_STATUS);
if (int_status & ADP_INT_STATUS_VBLANK) {
drm_crtc_handle_vblank(&adp->crtc);
@@ -501,7 +506,6 @@ static irqreturn_t adp_fe_irq(int irq, void *arg)
writel(int_status, adp->fe + ADP_INT_STATUS);
- spin_unlock(&adp->irq_lock);
return IRQ_HANDLED;
}
@@ -512,8 +516,7 @@ static int adp_drm_bind(struct device *dev)
struct adp_drv_private *adp = to_adp(drm);
int err;
- adp_disable_vblank(adp);
- writel(ADP_CTRL_FIFO_ON | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+ writel(ADP_CTRL_FIFO_ON, adp->fe + ADP_CTRL);
adp->next_bridge = drmm_of_get_bridge(&adp->drm, dev->of_node, 0, 0);
if (IS_ERR(adp->next_bridge)) {
@@ -567,8 +570,6 @@ static int adp_probe(struct platform_device *pdev)
if (IS_ERR(adp))
return PTR_ERR(adp);
- spin_lock_init(&adp->irq_lock);
-
dev_set_drvdata(&pdev->dev, &adp->drm);
err = adp_parse_of(pdev, adp);
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 1a11cab741ac..058e3b3ad520 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -2,7 +2,7 @@
config DRM_AMDGPU
tristate "AMD GPU"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on !UML
select FW_LOADER
select DRM_CLIENT
@@ -68,7 +68,6 @@ config DRM_AMDGPU_CIK
config DRM_AMDGPU_USERPTR
bool "Always enable userptr write support"
depends on DRM_AMDGPU
- depends on MMU
select HMM_MIRROR
select MMU_NOTIFIER
help
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index aacc810cabb3..87080c06e5fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -66,7 +66,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \
- amdgpu_cper.o
+ amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@@ -174,7 +174,10 @@ amdgpu-y += \
amdgpu-y += \
amdgpu_mes.o \
mes_v11_0.o \
- mes_v12_0.o
+ mes_v12_0.o \
+
+# add GFX userqueue support
+amdgpu-y += mes_userqueue.o
# add UVD block
amdgpu-y += \
@@ -253,6 +256,8 @@ amdgpu-y += \
# add amdkfd interfaces
amdgpu-y += amdgpu_amdkfd.o
+# add gfx usermode queue
+amdgpu-y += amdgpu_userq.o
ifneq ($(CONFIG_HSA_AMD),)
AMDKFD_PATH := ../amdkfd
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6d83ccfa42ee..836ea081088a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -113,6 +113,8 @@
#include "amdgpu_xcp.h"
#include "amdgpu_seq64.h"
#include "amdgpu_reg_state.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_eviction_fence.h"
#if defined(CONFIG_DRM_AMD_ISP)
#include "amdgpu_isp.h"
#endif
@@ -228,7 +230,7 @@ extern int amdgpu_force_asic_type;
extern int amdgpu_smartshift_bias;
extern int amdgpu_use_xgmi_p2p;
extern int amdgpu_mtype_local;
-extern bool enforce_isolation;
+extern int amdgpu_enforce_isolation;
#ifdef CONFIG_HSA_AMD
extern int sched_policy;
extern bool debug_evictions;
@@ -266,8 +268,10 @@ extern int amdgpu_umsch_mm_fwlog;
extern int amdgpu_user_partt_mode;
extern int amdgpu_agp;
+extern int amdgpu_rebar;
extern int amdgpu_wbrf;
+extern int amdgpu_user_queue;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
@@ -353,7 +357,6 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
@@ -489,7 +492,6 @@ struct amdgpu_flip_work {
bool async;
};
-
/*
* file private structure
*/
@@ -502,6 +504,11 @@ struct amdgpu_fpriv {
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
+ struct amdgpu_userq_mgr userq_mgr;
+
+ /* Eviction fence infra */
+ struct amdgpu_eviction_fence_mgr evf_mgr;
+
/** GPU partition selection */
uint32_t xcp_id;
};
@@ -513,12 +520,62 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
*/
#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
+/**
+ * amdgpu_wb - This struct is used for small GPU memory allocation.
+ *
+ * This struct is used to allocate a small amount of GPU memory that can be
+ * used to shadow certain states into the memory. This is especially useful for
+ * providing easy CPU access to some states without requiring register access
+ * (e.g., if some block is power gated, reading register may be problematic).
+ *
+ * Note: the term writeback was initially used because many of the amdgpu
+ * components had some level of writeback memory, and this struct initially
+ * described those components.
+ */
struct amdgpu_wb {
+
+ /**
+ * @wb_obj:
+ *
+ * Buffer Object used for the writeback memory.
+ */
struct amdgpu_bo *wb_obj;
+
+ /**
+ * @wb:
+ *
+ * Pointer to the first writeback slot. In terms of CPU address
+ * this value can be accessed directly by using the offset as an index.
+ * For the GPU address, it is necessary to use gpu_addr and the offset.
+ */
volatile uint32_t *wb;
+
+ /**
+ * @gpu_addr:
+ *
+ * Writeback base address in the GPU.
+ */
uint64_t gpu_addr;
- u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
+
+ /**
+ * @num_wb:
+ *
+ * Number of writeback slots reserved for amdgpu.
+ */
+ u32 num_wb;
+
+ /**
+ * @used:
+ *
+ * Track the writeback slot already used.
+ */
unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
+
+ /**
+ * @lock:
+ *
+ * Protects read and write of the used field array.
+ */
spinlock_t lock;
};
@@ -552,6 +609,7 @@ struct amdgpu_allowed_register_entry {
* are reset depends on the ASIC. Notably doesn't reset IPs
* shared with the CPU on APUs or the memory controllers (so
* VRAM is not lost). Not available on all ASICs.
+ * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs
* @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
* but without powering off the PCI bus. Suitable only for
* discrete GPUs.
@@ -569,6 +627,7 @@ enum amd_reset_method {
AMD_RESET_METHOD_MODE0,
AMD_RESET_METHOD_MODE1,
AMD_RESET_METHOD_MODE2,
+ AMD_RESET_METHOD_LINK,
AMD_RESET_METHOD_BACO,
AMD_RESET_METHOD_PCI,
AMD_RESET_METHOD_ON_INIT,
@@ -822,6 +881,11 @@ struct amdgpu_mqd_prop {
uint32_t hqd_queue_priority;
bool allow_tunneling;
bool hqd_active;
+ uint64_t shadow_addr;
+ uint64_t gds_bkup_addr;
+ uint64_t csa_addr;
+ uint64_t fence_address;
+ bool tmz_queue;
};
struct amdgpu_mqd {
@@ -830,6 +894,12 @@ struct amdgpu_mqd {
struct amdgpu_mqd_prop *p);
};
+struct amdgpu_pcie_reset_ctx {
+ bool in_link_reset;
+ bool occurs_dpc;
+ bool audio_suspended;
+};
+
/*
* Custom Init levels could be defined for different situations where a full
* initialization of all hardware blocks are not expected. Sample cases are
@@ -854,6 +924,14 @@ struct amdgpu_init_level {
struct amdgpu_reset_domain;
struct amdgpu_fru_info;
+enum amdgpu_enforce_isolation_mode {
+ AMDGPU_ENFORCE_ISOLATION_DISABLE = 0,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE = 1,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY = 2,
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
+};
+
+
/*
* Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
*/
@@ -1082,6 +1160,13 @@ struct amdgpu_device {
bool enable_uni_mes;
struct amdgpu_mes mes;
struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM];
+ const struct amdgpu_userq_funcs *userq_funcs[AMDGPU_HW_IP_NUM];
+
+ /* xarray used to retrieve the user queue fence driver reference
+ * in the EOP interrupt handler to signal the particular user
+ * queue fence.
+ */
+ struct xarray userq_xa;
/* df */
struct amdgpu_df df;
@@ -1124,6 +1209,7 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
+ suspend_state_t last_suspend_state;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;
@@ -1160,6 +1246,8 @@ struct amdgpu_device {
struct pci_saved_state *pci_state;
pci_channel_state_t pci_channel_state;
+ struct amdgpu_pcie_reset_ctx pcie_reset_ctx;
+
/* Track auto wait count on s_barrier settings */
bool barrier_has_auto_waitcnt;
@@ -1193,10 +1281,11 @@ struct amdgpu_device {
bool debug_enable_ras_aca;
bool debug_exp_resets;
bool debug_disable_gpu_ring_reset;
+ bool debug_vm_userptr;
/* Protection for the following isolation structure */
struct mutex enforce_isolation_mutex;
- bool enforce_isolation[MAX_XCP];
+ enum amdgpu_enforce_isolation_mode enforce_isolation[MAX_XCP];
struct amdgpu_isolation {
void *owner;
struct dma_fence *spearhead;
@@ -1210,6 +1299,10 @@ struct amdgpu_device {
* in KFD: VRAM or GTT.
*/
bool apu_prefer_gtt;
+
+ struct list_head userq_mgr_list;
+ struct mutex userq_mutex;
+ bool userq_halt_for_enforce_isolation;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1464,6 +1557,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 array_size);
int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
+int amdgpu_device_link_reset(struct amdgpu_device *adev);
bool amdgpu_device_supports_atpx(struct drm_device *dev);
bool amdgpu_device_supports_px(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
@@ -1614,11 +1708,9 @@ static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_cap
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
-void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
-static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index b4ad163f42a7..3835f2592914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -120,6 +120,9 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+
+ if (ACA_REG__STATUS__SCRUB(bank->regs[ACA_REG_IDX_STATUS]))
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n");
}
static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index b7f8f2ff143d..707e131f89d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1533,22 +1533,4 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
#endif /* CONFIG_AMD_PMC */
}
-/**
- * amdgpu_choose_low_power_state
- *
- * @adev: amdgpu_device_pointer
- *
- * Choose the target low power state for the GPU
- */
-void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
-{
- if (adev->in_runpm)
- return;
-
- if (amdgpu_acpi_is_s0ix_active(adev))
- adev->in_s0ix = true;
- else if (amdgpu_acpi_is_s3_active(adev))
- adev->in_s3 = true;
-}
-
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index d2ec4130a316..260165bbe373 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2559,6 +2559,18 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
if (ret != -EFAULT)
return ret;
+ /* If applications unmap memory before destroying the userptr
+ * from the KFD, trigger a segmentation fault in VM debug mode.
+ */
+ if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) {
+ pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n",
+ pid_nr(process_info->pid), mem->va);
+
+ // Send GPU VM fault to user space
+ kfd_signal_vm_fault_event_with_userptr(kfd_lookup_process_by_pid(process_info->pid),
+ mem->va);
+ }
+
ret = 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index eb015bdda8a7..c7d32fb216e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -281,6 +281,9 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
case ATOM_DGPU_VRAM_TYPE_GDDR6:
vram_type = AMDGPU_VRAM_TYPE_GDDR6;
break;
+ case ATOM_DGPU_VRAM_TYPE_HBM3E:
+ vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+ break;
default:
vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 68bce6a6d09d..004a6a9d6b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -252,83 +252,22 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
- case CHIP_TAHITI:
- strcpy(fw_name, "radeon/tahiti_smc.bin");
- break;
- case CHIP_PITCAIRN:
- if ((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6810) ||
- (adev->pdev->device == 0x6811))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/pitcairn_smc.bin");
- }
- break;
- case CHIP_VERDE:
- if (((adev->pdev->device == 0x6820) &&
- ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83))) ||
- ((adev->pdev->device == 0x6821) &&
- ((adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87))) ||
- ((adev->pdev->revision == 0x87) &&
- ((adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682b)))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/verde_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/verde_smc.bin");
- }
- break;
- case CHIP_OLAND:
- if (((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6600) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605) ||
- (adev->pdev->device == 0x6610))) ||
- ((adev->pdev->revision == 0x83) &&
- (adev->pdev->device == 0x6610))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/oland_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/oland_smc.bin");
- }
- break;
- case CHIP_HAINAN:
- if (((adev->pdev->revision == 0x81) &&
- (adev->pdev->device == 0x6660)) ||
- ((adev->pdev->revision == 0x83) &&
- ((adev->pdev->device == 0x6660) ||
- (adev->pdev->device == 0x6663) ||
- (adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667)))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/hainan_k_smc.bin");
- } else if ((adev->pdev->revision == 0xc3) &&
- (adev->pdev->device == 0x6665)) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/banks_k_2_smc.bin");
- } else {
- strcpy(fw_name, "radeon/hainan_smc.bin");
- }
- break;
case CHIP_BONAIRE:
if ((adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->device == 0x665f)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
+ strscpy(fw_name, "amdgpu/bonaire_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/bonaire_smc.bin");
+ strscpy(fw_name, "amdgpu/bonaire_smc.bin");
}
break;
case CHIP_HAWAII:
if (adev->pdev->revision == 0x80) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
+ strscpy(fw_name, "amdgpu/hawaii_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/hawaii_smc.bin");
+ strscpy(fw_name, "amdgpu/hawaii_smc.bin");
}
break;
case CHIP_TOPAZ:
@@ -338,76 +277,76 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/topaz_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_smc.bin");
break;
case CHIP_TONGA:
if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/tonga_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
- strcpy(fw_name, "amdgpu/fiji_smc.bin");
+ strscpy(fw_name, "amdgpu/fiji_smc.bin");
break;
case CHIP_POLARIS11:
if (type == CGS_UCODE_ID_SMU) {
if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_k_smc.bin");
} else if (ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
}
break;
case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU) {
if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_k_smc.bin");
} else if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
}
break;
case CHIP_POLARIS12:
if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris12_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris12_smc.bin");
}
break;
case CHIP_VEGAM:
- strcpy(fw_name, "amdgpu/vegam_smc.bin");
+ strscpy(fw_name, "amdgpu/vegam_smc.bin");
break;
case CHIP_VEGA10:
if ((adev->pdev->device == 0x687f) &&
((adev->pdev->revision == 0xc0) ||
(adev->pdev->revision == 0xc1) ||
(adev->pdev->revision == 0xc3)))
- strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
+ strscpy(fw_name, "amdgpu/vega10_acg_smc.bin");
else
- strcpy(fw_name, "amdgpu/vega10_smc.bin");
+ strscpy(fw_name, "amdgpu/vega10_smc.bin");
break;
case CHIP_VEGA12:
- strcpy(fw_name, "amdgpu/vega12_smc.bin");
+ strscpy(fw_name, "amdgpu/vega12_smc.bin");
break;
case CHIP_VEGA20:
- strcpy(fw_name, "amdgpu/vega20_smc.bin");
+ strscpy(fw_name, "amdgpu/vega20_smc.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index 360e07a5c7c1..5a234eadae8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -549,7 +549,7 @@ int amdgpu_cper_init(struct amdgpu_device *adev)
{
int r;
- if (!amdgpu_aca_is_enabled(adev))
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
return 0;
r = amdgpu_cper_ring_init(adev);
@@ -568,7 +568,7 @@ int amdgpu_cper_init(struct amdgpu_device *adev)
int amdgpu_cper_fini(struct amdgpu_device *adev)
{
- if (!amdgpu_aca_is_enabled(adev))
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
return 0;
adev->cper.enabled = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82df06a72ee0..9ea0d9b71f48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -296,7 +296,25 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
num_ibs[i], &p->jobs[i]);
if (ret)
goto free_all_kdata;
- p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id];
+ switch (p->adev->enforce_isolation[fpriv->xcp_id]) {
+ case AMDGPU_ENFORCE_ISOLATION_DISABLE:
+ default:
+ p->jobs[i]->enforce_isolation = false;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = true;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ }
}
p->gang_leader = p->jobs[p->gang_leader_idx];
@@ -349,6 +367,10 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
ring = amdgpu_job_ring(job);
ib = &job->ibs[job->num_ibs++];
+ /* submissions to kernel queues are disabled */
+ if (ring->no_user_submission)
+ return -EINVAL;
+
/* MM engine doesn't support user fences */
if (p->uf_bo && ring->funcs->no_user_fence)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index cfdf558b48b6..02138aa55793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
int r;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a1450f13d963..8e626f50b362 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -2105,6 +2105,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_rap_debugfs_init(adev);
amdgpu_securedisplay_debugfs_init(adev);
amdgpu_fw_attestation_debugfs_init(adev);
+ amdgpu_psp_debugfs_init(adev);
debugfs_create_file("amdgpu_evict_vram", 0400, root, adev,
&amdgpu_evict_vram_fops);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a30111d2c3ea..4d1b54f58495 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -85,6 +85,7 @@
#if IS_ENABLED(CONFIG_X86)
#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#endif
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
@@ -1680,6 +1681,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ if (!amdgpu_rebar)
+ return 0;
+
/* resizing on Dell G5 SE platforms causes problems with runtime pm */
if ((amdgpu_runtime_pm != 0) &&
adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
@@ -1870,6 +1874,35 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device
return true;
}
+static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
+{
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1)))
+ return false;
+
+ if (c->x86 == 6 &&
+ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) {
+ switch (c->x86_model) {
+ case VFM_MODEL(INTEL_ALDERLAKE):
+ case VFM_MODEL(INTEL_ALDERLAKE_L):
+ case VFM_MODEL(INTEL_RAPTORLAKE):
+ case VFM_MODEL(INTEL_RAPTORLAKE_P):
+ case VFM_MODEL(INTEL_RAPTORLAKE_S):
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ return false;
+ }
+#else
+ return false;
+#endif
+}
+
/**
* amdgpu_device_should_use_aspm - check if the device should program ASPM
*
@@ -1894,7 +1927,7 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
}
if (adev->flags & AMD_IS_APU)
return false;
- if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
+ if (amdgpu_device_aspm_support_quirk(adev))
return false;
return pcie_aspm_enabled(adev->pdev);
}
@@ -2112,8 +2145,31 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- for (i = 0; i < MAX_XCP; i++)
- adev->enforce_isolation[i] = !!enforce_isolation;
+ for (i = 0; i < MAX_XCP; i++) {
+ switch (amdgpu_enforce_isolation) {
+ case -1:
+ case 0:
+ default:
+ /* disable */
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ /* enable */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ /* enable legacy mode */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ /* enable only process isolation without submitting cleaner shader */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
+ }
+ }
return 0;
}
@@ -2689,6 +2745,13 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
break;
}
+ /* Check for IP version 9.4.3 with A0 hardware */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ !amdgpu_device_get_rev_id(adev)) {
+ dev_err(adev->dev, "Unsupported A0 hardware\n");
+ return -ENODEV; /* device unsupported - no device error */
+ }
+
if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
@@ -2701,7 +2764,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
}
-
adev->pm.pp_feature = amdgpu_pp_feature_mask;
if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
@@ -3172,6 +3234,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_LINK:
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:
return true;
@@ -3455,6 +3518,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_userq_suspend(adev);
/* Workaround for ASICs need to disable SMC first */
amdgpu_device_smu_fini_early(adev);
@@ -3510,6 +3574,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_device_mem_scratch_fini(adev);
amdgpu_ib_pool_fini(adev);
amdgpu_seq64_fini(adev);
+ amdgpu_doorbell_fini(adev);
}
if (adev->ip_blocks[i].version->funcs->sw_fini) {
r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
@@ -3643,6 +3708,13 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev, adev->ip_blocks[i].version->type))
continue;
+ /* Since we skip suspend for S0i3, we need to cancel the delayed
+ * idle work here as the suspend callback never gets called.
+ */
+ if (adev->in_s0ix &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
/* skip suspend of gfx/mes and psp for S0ix
* gfx is in gfxoff state, so on resume it will exit gfxoff just
* like at runtime. PSP is also part of the always on hardware
@@ -4299,9 +4371,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_sync_create(&adev->isolation[i].active);
amdgpu_sync_create(&adev->isolation[i].prev);
}
- mutex_init(&adev->gfx.kfd_sch_mutex);
+ mutex_init(&adev->gfx.userq_sch_mutex);
mutex_init(&adev->gfx.workload_profile_mutex);
mutex_init(&adev->vcn.workload_profile_mutex);
+ mutex_init(&adev->userq_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4321,12 +4394,16 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->virt.rlcg_reg_lock);
spin_lock_init(&adev->wb.lock);
+ xa_init_flags(&adev->userq_xa, XA_FLAGS_LOCK_IRQ);
+
INIT_LIST_HEAD(&adev->reset_list);
INIT_LIST_HEAD(&adev->ras_list);
INIT_LIST_HEAD(&adev->pm.od_kobj_list);
+ INIT_LIST_HEAD(&adev->userq_mgr_list);
+
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -4851,7 +4928,6 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
iounmap(adev->rmmio);
adev->rmmio = NULL;
- amdgpu_doorbell_fini(adev);
drm_dev_exit(idx);
}
@@ -4900,28 +4976,20 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
* @data: data
*
* This function is called when the system is about to suspend or hibernate.
- * It is used to evict resources from the device before the system goes to
- * sleep while there is still access to swap.
+ * It is used to set the appropriate flags so that eviction can be optimized
+ * in the pm prepare callback.
*/
static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
void *data)
{
struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
- int r;
switch (mode) {
case PM_HIBERNATION_PREPARE:
adev->in_s4 = true;
- fallthrough;
- case PM_SUSPEND_PREPARE:
- r = amdgpu_device_evict_resources(adev);
- /*
- * This is considered non-fatal at this time because
- * amdgpu_device_prepare() will also fatally evict resources.
- * See https://gitlab.freedesktop.org/drm/amd/-/issues/3781
- */
- if (r)
- drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r);
+ break;
+ case PM_POST_HIBERNATION:
+ adev->in_s4 = false;
break;
}
@@ -4942,15 +5010,13 @@ int amdgpu_device_prepare(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
int i, r;
- amdgpu_choose_low_power_state(adev);
-
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev);
if (r)
- goto unprepare;
+ return r;
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
@@ -4961,15 +5027,10 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
if (r)
- goto unprepare;
+ return r;
}
return 0;
-
-unprepare:
- adev->in_s0ix = adev->in_s3 = adev->in_s4 = false;
-
- return r;
}
/**
@@ -5011,8 +5072,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_device_ip_suspend_phase1(adev);
- if (!adev->in_s0ix)
+ if (!adev->in_s0ix) {
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+ amdgpu_userq_suspend(adev);
+ }
r = amdgpu_device_evict_resources(adev);
if (r)
@@ -5079,6 +5142,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
if (r)
goto exit;
+
+ r = amdgpu_userq_resume(adev);
+ if (r)
+ goto exit;
}
r = amdgpu_device_ip_late_init(adev);
@@ -5127,9 +5194,6 @@ exit:
}
adev->in_suspend = false;
- if (adev->enable_mes)
- amdgpu_mes_self_test(adev);
-
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
DRM_WARN("smart shift update failed\n");
@@ -5510,6 +5574,29 @@ mode1_reset_failed:
return ret;
}
+int amdgpu_device_link_reset(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ dev_info(adev->dev, "GPU link reset\n");
+
+ if (!adev->pcie_reset_ctx.occurs_dpc)
+ ret = amdgpu_dpm_link_reset(adev);
+
+ if (ret)
+ goto link_reset_failed;
+
+ ret = amdgpu_psp_wait_for_bootloader(adev);
+ if (ret)
+ goto link_reset_failed;
+
+ return 0;
+
+link_reset_failed:
+ dev_err(adev->dev, "GPU link reset failed\n");
+ return ret;
+}
+
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context)
{
@@ -5814,6 +5901,7 @@ static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1:
+ case AMD_RESET_METHOD_LINK:
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
break;
case AMD_RESET_METHOD_MODE2:
@@ -5930,94 +6018,42 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
return ret;
}
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu_device pointer
- * @job: which job trigger hang
- * @reset_context: amdgpu reset context pointer
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Attempt to do soft-reset or full-reset and reinitialize Asic
- * Returns 0 for success or an error on failure.
- */
-
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+static int amdgpu_device_halt_activities(struct amdgpu_device *adev,
struct amdgpu_job *job,
- struct amdgpu_reset_context *reset_context)
+ struct amdgpu_reset_context *reset_context,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive,
+ bool need_emergency_restart)
{
- struct list_head device_list, *device_list_handle = NULL;
- bool job_signaled = false;
- struct amdgpu_hive_info *hive = NULL;
+ struct list_head *device_list_handle = NULL;
struct amdgpu_device *tmp_adev = NULL;
int i, r = 0;
- bool need_emergency_restart = false;
- bool audio_suspended = false;
- int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
-
- /*
- * If it reaches here because of hang/timeout and a RAS error is
- * detected at the same time, let RAS recovery take care of it.
- */
- if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
- !amdgpu_sriov_vf(adev) &&
- reset_context->src != AMDGPU_RESET_SRC_RAS) {
- dev_dbg(adev->dev,
- "Gpu recovery from source: %d yielding to RAS error recovery handling",
- reset_context->src);
- return 0;
- }
- /*
- * Special case: RAS triggered and full reset isn't supported
- */
- need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
-
- /*
- * Flush RAM to disk so that after reboot
- * the user can read log and see why the system rebooted.
- */
- if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
- amdgpu_ras_get_context(adev)->reboot) {
- DRM_WARN("Emergency reboot.");
-
- ksys_sync_helper();
- emergency_restart();
- }
-
- dev_info(adev->dev, "GPU %s begin!\n",
- need_emergency_restart ? "jobs stop":"reset");
-
- if (!amdgpu_sriov_vf(adev))
- hive = amdgpu_get_xgmi_hive(adev);
- if (hive)
- mutex_lock(&hive->hive_lock);
- reset_context->job = job;
- reset_context->hive = hive;
/*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
* to put adev in the 1st position.
*/
- INIT_LIST_HEAD(&device_list);
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- list_add_tail(&tmp_adev->reset_list, &device_list);
+ list_add_tail(&tmp_adev->reset_list, device_list);
if (adev->shutdown)
tmp_adev->shutdown = true;
+ if (adev->pcie_reset_ctx.occurs_dpc)
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
}
- if (!list_is_first(&adev->reset_list, &device_list))
- list_rotate_to_front(&adev->reset_list, &device_list);
- device_list_handle = &device_list;
+ if (!list_is_first(&adev->reset_list, device_list))
+ list_rotate_to_front(&adev->reset_list, device_list);
+ device_list_handle = device_list;
} else {
- list_add_tail(&adev->reset_list, &device_list);
- device_list_handle = &device_list;
+ list_add_tail(&adev->reset_list, device_list);
+ device_list_handle = device_list;
}
- if (!amdgpu_sriov_vf(adev)) {
+ if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) {
r = amdgpu_device_health_check(device_list_handle);
if (r)
- goto end_reset;
+ return r;
}
/* We need to lock reset domain only once both for XGMI and single device */
@@ -6041,7 +6077,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* some audio codec errors.
*/
if (!amdgpu_device_suspend_display_audio(tmp_adev))
- audio_suspended = true;
+ tmp_adev->pcie_reset_ctx.audio_suspended = true;
amdgpu_ras_set_error_query_ready(tmp_adev, false);
@@ -6059,6 +6095,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* disable ras on ALL IPs */
if (!need_emergency_restart &&
+ (!adev->pcie_reset_ctx.occurs_dpc) &&
amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
@@ -6076,24 +6113,24 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
atomic_inc(&tmp_adev->gpu_reset_counter);
}
- if (need_emergency_restart)
- goto skip_sched_resume;
+ return r;
+}
- /*
- * Must check guilty signal here since after this point all old
- * HW fences are force signaled.
- *
- * job->base holds a reference to parent fence
- */
- if (job && dma_fence_is_signaled(&job->hw_fence)) {
- job_signaled = true;
- dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
- goto skip_hw_reset;
- }
+static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
+ int r = 0;
retry: /* Rest of adevs pre asic reset from XGMI hive. */
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ if (adev->pcie_reset_ctx.occurs_dpc)
+ tmp_adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
+ if (adev->pcie_reset_ctx.occurs_dpc)
+ tmp_adev->no_hw_access = false;
/*TODO Should we stop ?*/
if (r) {
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -6105,6 +6142,11 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
/* Actual ASIC resets if needed.*/
/* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
+
+ /* Bail out of reset early */
+ if (amdgpu_ras_is_rma(adev))
+ return -ENODEV;
+
if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
amdgpu_ras_set_fed(adev, true);
@@ -6119,12 +6161,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (r)
adev->asic_reset_res = r;
} else {
- r = amdgpu_do_asic_reset(device_list_handle, reset_context);
+ r = amdgpu_do_asic_reset(device_list, reset_context);
if (r && r == -EAGAIN)
goto retry;
}
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
/*
* Drop any pending non scheduler resets queued before reset is done.
* Any reset scheduled after this point would be valid. Scheduler resets
@@ -6134,10 +6176,18 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_device_stop_pending_resets(tmp_adev);
}
-skip_hw_reset:
+ return r;
+}
+
+static int amdgpu_device_sched_resume(struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context,
+ bool job_signaled)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i, r = 0;
/* Post ASIC reset for all devs .*/
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -6173,8 +6223,16 @@ skip_hw_reset:
}
}
-skip_sched_resume:
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ return r;
+}
+
+static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
/* unlock kfd: SRIOV would do it separately */
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
@@ -6185,18 +6243,114 @@ skip_sched_resume:
if (!adev->kfd.init_complete)
amdgpu_amdkfd_device_init(adev);
- if (audio_suspended)
+ if (tmp_adev->pcie_reset_ctx.audio_suspended)
amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unset_mp1_state(tmp_adev);
amdgpu_ras_set_error_query_ready(tmp_adev, true);
+
}
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
+ tmp_adev = list_first_entry(device_list, struct amdgpu_device,
reset_list);
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+}
+
+
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu_device pointer
+ * @job: which job trigger hang
+ * @reset_context: amdgpu reset context pointer
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head device_list;
+ bool job_signaled = false;
+ struct amdgpu_hive_info *hive = NULL;
+ int r = 0;
+ bool need_emergency_restart = false;
+
+ /*
+ * If it reaches here because of hang/timeout and a RAS error is
+ * detected at the same time, let RAS recovery take care of it.
+ */
+ if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
+ !amdgpu_sriov_vf(adev) &&
+ reset_context->src != AMDGPU_RESET_SRC_RAS) {
+ dev_dbg(adev->dev,
+ "Gpu recovery from source: %d yielding to RAS error recovery handling",
+ reset_context->src);
+ return 0;
+ }
+
+ /*
+ * Special case: RAS triggered and full reset isn't supported
+ */
+ need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
+
+ /*
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+ if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
+ amdgpu_ras_get_context(adev)->reboot) {
+ DRM_WARN("Emergency reboot.");
+
+ ksys_sync_helper();
+ emergency_restart();
+ }
+
+ dev_info(adev->dev, "GPU %s begin!\n",
+ need_emergency_restart ? "jobs stop":"reset");
+
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+
+ reset_context->job = job;
+ reset_context->hive = hive;
+ INIT_LIST_HEAD(&device_list);
+
+ r = amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
+ hive, need_emergency_restart);
+ if (r)
+ goto end_reset;
+
+ if (need_emergency_restart)
+ goto skip_sched_resume;
+ /*
+ * Must check guilty signal here since after this point all old
+ * HW fences are force signaled.
+ *
+ * job->base holds a reference to parent fence
+ */
+ if (job && dma_fence_is_signaled(&job->hw_fence)) {
+ job_signaled = true;
+ dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
+ goto skip_hw_reset;
+ }
+
+ r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
+ if (r)
+ goto end_reset;
+skip_hw_reset:
+ r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
+ if (r)
+ goto end_reset;
+skip_sched_resume:
+ amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
end_reset:
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -6580,12 +6734,15 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int i;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ struct amdgpu_reset_context reset_context;
+ struct list_head device_list;
+ int r = 0;
- DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
+ dev_info(adev->dev, "PCI error: detected callback!!\n");
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- DRM_WARN("No support for XGMI hive yet...");
+ if (!amdgpu_dpm_is_link_reset_supported(adev)) {
+ dev_warn(adev->dev, "No support for XGMI hive yet...\n");
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -6593,32 +6750,30 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
switch (state) {
case pci_channel_io_normal:
+ dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state);
return PCI_ERS_RESULT_CAN_RECOVER;
- /* Fatal error, prepare for slot reset */
case pci_channel_io_frozen:
- /*
- * Locking adev->reset_domain->sem will prevent any external access
- * to GPU during PCI error recovery
- */
- amdgpu_device_lock_reset_domain(adev->reset_domain);
- amdgpu_device_set_mp1_state(adev);
-
- /*
- * Block any work scheduling as we do for regular GPU reset
- * for the duration of the recovery
- */
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!amdgpu_ring_sched_ready(ring))
- continue;
-
- drm_sched_stop(&ring->sched, NULL);
+ /* Fatal error, prepare for slot reset */
+ dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
+
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+ adev->pcie_reset_ctx.occurs_dpc = true;
+ memset(&reset_context, 0, sizeof(reset_context));
+ INIT_LIST_HEAD(&device_list);
+
+ r = amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
+ hive, false);
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
}
- atomic_inc(&adev->gpu_reset_counter);
+ if (r)
+ return PCI_ERS_RESULT_DISCONNECT;
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
+ dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -6631,8 +6786,10 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
*/
pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
- DRM_INFO("PCI error: mmio enabled callback!!\n");
+ dev_info(adev->dev, "PCI error: mmio enabled callback!!\n");
/* TODO - dump whatever for debugging purposes */
@@ -6656,10 +6813,12 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int r, i;
struct amdgpu_reset_context reset_context;
- u32 memsize;
+ struct amdgpu_device *tmp_adev;
+ struct amdgpu_hive_info *hive;
struct list_head device_list;
+ int r = 0, i;
+ u32 memsize;
/* PCI error slot reset should be skipped During RAS recovery */
if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
@@ -6667,15 +6826,12 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
amdgpu_ras_in_recovery(adev))
return PCI_ERS_RESULT_RECOVERED;
- DRM_INFO("PCI error: slot reset callback!!\n");
+ dev_info(adev->dev, "PCI error: slot reset callback!!\n");
memset(&reset_context, 0, sizeof(reset_context));
- INIT_LIST_HEAD(&device_list);
- list_add_tail(&adev->reset_list, &device_list);
-
/* wait for asic to come out of reset */
- msleep(500);
+ msleep(700);
/* Restore PCI confspace */
amdgpu_device_load_pci_state(pdev);
@@ -6696,26 +6852,40 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
-
- adev->no_hw_access = true;
- r = amdgpu_device_pre_asic_reset(adev, &reset_context);
- adev->no_hw_access = false;
- if (r)
- goto out;
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+ INIT_LIST_HEAD(&device_list);
- r = amdgpu_do_asic_reset(&device_list, &reset_context);
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ reset_context.hive = hive;
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
+ } else {
+ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ list_add_tail(&adev->reset_list, &device_list);
+ }
+ r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
out:
if (!r) {
if (amdgpu_device_cache_pci_state(adev->pdev))
pci_restore_state(adev->pdev);
-
- DRM_INFO("PCIe error recovery succeeded\n");
+ dev_info(adev->dev, "PCIe error recovery succeeded\n");
} else {
- DRM_ERROR("PCIe error recovery failed, err:%d", r);
- amdgpu_device_unset_mp1_state(adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r);
+ if (hive) {
+ list_for_each_entry(tmp_adev, &device_list, reset_list)
+ amdgpu_device_unset_mp1_state(tmp_adev);
+ amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ }
+ }
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
}
return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
@@ -6732,26 +6902,36 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int i;
-
+ struct list_head device_list;
+ struct amdgpu_hive_info *hive = NULL;
+ struct amdgpu_device *tmp_adev = NULL;
- DRM_INFO("PCI error: resume callback!!\n");
+ dev_info(adev->dev, "PCI error: resume callback!!\n");
/* Only continue execution for the case of pci_channel_io_frozen */
if (adev->pci_channel_state != pci_channel_io_frozen)
return;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
+ INIT_LIST_HEAD(&device_list);
- if (!amdgpu_ring_sched_ready(ring))
- continue;
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = false;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
+ } else
+ list_add_tail(&adev->reset_list, &device_list);
- drm_sched_start(&ring->sched, 0);
- }
+ amdgpu_device_sched_resume(&device_list, NULL, NULL);
+ amdgpu_device_gpu_resume(adev, &device_list, false);
+ adev->pcie_reset_ctx.occurs_dpc = false;
- amdgpu_device_unset_mp1_state(adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
}
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index dc2713ec95a5..9e738fae2b74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -120,6 +120,8 @@ MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
#define mmIP_DISCOVERY_VERSION 0x16A00
#define mmRCC_CONFIG_MEMSIZE 0xde3
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 9f627caedc3f..44e120f9f764 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -43,6 +43,29 @@
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
+static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops;
+
+/**
+ * dma_buf_attach_adev - Helper to get adev of an attachment
+ *
+ * @attach: attachment
+ *
+ * Returns:
+ * A struct amdgpu_device * if the attaching device is an amdgpu device or
+ * partition, NULL otherwise.
+ */
+static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach)
+{
+ if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) {
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ return amdgpu_ttm_adev(bo->tbo.bdev);
+ }
+
+ return NULL;
+}
+
/**
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
*
@@ -54,11 +77,13 @@
static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach)
{
+ struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach);
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
+ if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
+ pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
amdgpu_vm_bo_update_shared(bo);
@@ -75,11 +100,35 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
*/
static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct dma_buf *dmabuf = attach->dmabuf;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dmabuf->priv);
+ u32 domains = bo->allowed_domains;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ /* Try pinning into VRAM to allow P2P with RDMA NICs without ODP
+ * support if all attachments can do P2P. If any attachment can't do
+ * P2P just pin into GTT instead.
+ *
+ * To avoid with conflicting pinnings between GPUs and RDMA when move
+ * notifiers are disabled, only allow pinning in VRAM when move
+ * notiers are enabled.
+ */
+ if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+ domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
+ } else {
+ list_for_each_entry(attach, &dmabuf->attachments, node)
+ if (!attach->peer2peer)
+ domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
+ }
- /* pin buffer into GTT */
- return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (domains & AMDGPU_GEM_DOMAIN_VRAM)
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ if (WARN_ON(!domains))
+ return -EINVAL;
+
+ return amdgpu_bo_pin(bo, domains);
}
/**
@@ -134,9 +183,6 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return ERR_PTR(r);
-
- } else if (bo->tbo.resource->mem_type != TTM_PL_TT) {
- return ERR_PTR(-EBUSY);
}
switch (bo->tbo.resource->mem_type) {
@@ -153,6 +199,11 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
break;
case TTM_PL_VRAM:
+ /* XGMI-accessible memory should never be DMA-mapped */
+ if (WARN_ON(amdgpu_dmabuf_is_xgmi_accessible(
+ dma_buf_attach_adev(attach), bo)))
+ return ERR_PTR(-EINVAL);
+
r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
bo->tbo.base.size, attach->dev,
dir, &sgt);
@@ -184,7 +235,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- if (sgt->sgl->page_link) {
+ if (sg_page(sgt->sgl)) {
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt);
kfree(sgt);
@@ -459,6 +510,9 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
struct drm_gem_object *obj = &bo->tbo.base;
struct drm_gem_object *gobj;
+ if (!adev)
+ return false;
+
if (obj->import_attach) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 26bf896f1444..4ddd08ce8885 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -51,6 +51,8 @@
#include "amdgpu_reset.h"
#include "amdgpu_sched.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
#include "../amdxcp/amdgpu_xcp_drv.h"
/*
@@ -123,9 +125,10 @@
* - 3.61.0 - Contains fix for RV/PCO compute queues
* - 3.62.0 - Add AMDGPU_IDS_FLAGS_MODE_PF, AMDGPU_IDS_FLAGS_MODE_VF & AMDGPU_IDS_FLAGS_MODE_PT
* - 3.63.0 - GFX12 display DCC supports 256B max compressed block size
+ * - 3.64.0 - Userq IP support query
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 63
+#define KMS_DRIVER_MINOR 64
#define KMS_DRIVER_PATCHLEVEL 0
/*
@@ -140,6 +143,7 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
AMDGPU_DEBUG_SMU_POOL = BIT(7),
+ AMDGPU_DEBUG_VM_USERPTR = BIT(8),
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -176,7 +180,7 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu;
char *amdgpu_virtual_display;
-bool enforce_isolation;
+int amdgpu_enforce_isolation = -1;
int amdgpu_modeset = -1;
/* Specifies the default granularity for SVM, used in buffer
@@ -238,6 +242,8 @@ int amdgpu_agp = -1; /* auto */
int amdgpu_wbrf = -1;
int amdgpu_damage_clips = -1; /* auto */
int amdgpu_umsch_mm_fwlog;
+int amdgpu_rebar = -1; /* auto */
+int amdgpu_user_queue = -1;
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
@@ -1033,11 +1039,13 @@ module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444);
/**
- * DOC: enforce_isolation (bool)
- * enforce process isolation between graphics and compute via using the same reserved vmid.
+ * DOC: enforce_isolation (int)
+ * enforce process isolation between graphics and compute.
+ * (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)
*/
-module_param(enforce_isolation, bool, 0444);
-MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
+module_param_named(enforce_isolation, amdgpu_enforce_isolation, int, 0444);
+MODULE_PARM_DESC(enforce_isolation,
+"enforce process isolation between graphics and compute. (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)");
/**
* DOC: modeset (int)
@@ -1096,6 +1104,28 @@ MODULE_PARM_DESC(wbrf,
"Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
module_param_named(wbrf, amdgpu_wbrf, int, 0444);
+/**
+ * DOC: rebar (int)
+ * Allow BAR resizing. Disable this to prevent the driver from attempting
+ * to resize the BAR if the GPU supports it and there is available MMIO space.
+ * Note that this just prevents the driver from resizing the BAR. The BIOS
+ * may have already resized the BAR at boot time.
+ */
+MODULE_PARM_DESC(rebar, "Resizable BAR (-1 = auto (default), 0 = disable, 1 = enable)");
+module_param_named(rebar, amdgpu_rebar, int, 0444);
+
+/**
+ * DOC: user_queue (int)
+ * Enable user queues on systems that support user queues. Possible values:
+ *
+ * - -1 = auto (ASIC specific default)
+ * - 0 = user queues disabled
+ * - 1 = user queues enabled and kernel queues enabled (if supported)
+ * - 2 = user queues enabled and kernel queues disabled
+ */
+MODULE_PARM_DESC(user_queue, "Enable user queues (-1 = auto (default), 0 = disable, 1 = enable, 2 = enable UQs and disable KQs)");
+module_param_named(user_queue, amdgpu_user_queue, int, 0444);
+
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
@@ -2244,6 +2274,10 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: use vram for smu pool\n");
adev->pm.smu_debug_mask |= SMU_DEBUG_POOL_USE_VRAM;
}
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_VM_USERPTR) {
+ pr_info("debug: VM mode debug for userptr is enabled\n");
+ adev->debug_vm_userptr = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2548,8 +2582,20 @@ static int amdgpu_pmops_suspend(struct device *dev)
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
- if (!adev->in_s0ix && !adev->in_s3)
+ if (!adev->in_s0ix && !adev->in_s3) {
+ /* don't allow going deep first time followed by s2idle the next time */
+ if (adev->last_suspend_state != PM_SUSPEND_ON &&
+ adev->last_suspend_state != pm_suspend_target_state) {
+ drm_err_once(drm_dev, "Unsupported suspend state %d\n",
+ pm_suspend_target_state);
+ return -EINVAL;
+ }
return 0;
+ }
+
+ /* cache the state last used for suspend */
+ adev->last_suspend_state = pm_suspend_target_state;
+
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2603,13 +2649,8 @@ static int amdgpu_pmops_freeze(struct device *dev)
static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(drm_dev);
- int r;
-
- r = amdgpu_device_resume(drm_dev, true);
- adev->in_s4 = false;
- return r;
+ return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
@@ -2622,9 +2663,6 @@ static int amdgpu_pmops_poweroff(struct device *dev)
static int amdgpu_pmops_restore(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(drm_dev);
-
- adev->in_s4 = false;
return amdgpu_device_resume(drm_dev, true);
}
@@ -2696,6 +2734,29 @@ static int amdgpu_runtime_idle_check_display(struct device *dev)
return 0;
}
+static int amdgpu_runtime_idle_check_userq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+done:
+ mutex_unlock(&adev->userq_mutex);
+
+ return ret;
+}
+
static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2711,6 +2772,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
ret = amdgpu_runtime_idle_check_display(dev);
if (ret)
return ret;
+ ret = amdgpu_runtime_idle_check_userq(dev);
+ if (ret)
+ return ret;
/* wait for all rings to drain before suspending */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -2832,12 +2896,30 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
}
ret = amdgpu_runtime_idle_check_display(dev);
+ if (ret)
+ goto done;
+ ret = amdgpu_runtime_idle_check_userq(dev);
+done:
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return ret;
}
+static int amdgpu_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+
+ if (fpriv) {
+ fpriv->evf_mgr.fd_closing = true;
+ amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
+ }
+
+ return drm_release(inode, filp);
+}
+
long amdgpu_drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
@@ -2889,7 +2971,7 @@ static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.flush = amdgpu_flush,
- .release = drm_release,
+ .release = amdgpu_drm_release,
.unlocked_ioctl = amdgpu_drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
@@ -2936,6 +3018,9 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct drm_driver amdgpu_kms_driver = {
@@ -3026,6 +3111,10 @@ static int __init amdgpu_init(void)
if (r)
goto error_fence;
+ r = amdgpu_userq_fence_slab_init();
+ if (r)
+ goto error_fence;
+
DRM_INFO("amdgpu kernel modesetting enabled.\n");
amdgpu_register_atpx_handler();
amdgpu_acpi_detect();
@@ -3057,6 +3146,7 @@ static void __exit amdgpu_exit(void)
amdgpu_acpi_release();
amdgpu_sync_fini();
amdgpu_fence_slab_fini();
+ amdgpu_userq_fence_slab_fini();
mmu_notifier_synchronize();
amdgpu_xcp_drv_release();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
new file mode 100644
index 000000000000..73b629b5f56f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/sched.h>
+#include <drm/drm_exec.h>
+#include "amdgpu.h"
+
+#define work_to_evf_mgr(w, name) container_of(w, struct amdgpu_eviction_fence_mgr, name)
+#define evf_mgr_to_fpriv(e) container_of(e, struct amdgpu_fpriv, evf_mgr)
+
+static const char *
+amdgpu_eviction_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "amdgpu_eviction_fence";
+}
+
+static const char *
+amdgpu_eviction_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence *ef;
+
+ ef = container_of(f, struct amdgpu_eviction_fence, base);
+ return ef->timeline_name;
+}
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec)
+{
+ struct amdgpu_eviction_fence *old_ef, *new_ef;
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int ret;
+
+ if (evf_mgr->ev_fence &&
+ !dma_fence_is_signaled(&evf_mgr->ev_fence->base))
+ return 0;
+ /*
+ * Steps to replace eviction fence:
+ * * lock all objects in exec (caller)
+ * * create a new eviction fence
+ * * update new eviction fence in evf_mgr
+ * * attach the new eviction fence to BOs
+ * * release the old fence
+ * * unlock the objects (caller)
+ */
+ new_ef = amdgpu_eviction_fence_create(evf_mgr);
+ if (!new_ef) {
+ DRM_ERROR("Failed to create new eviction fence\n");
+ return -ENOMEM;
+ }
+
+ /* Update the eviction fence now */
+ spin_lock(&evf_mgr->ev_fence_lock);
+ old_ef = evf_mgr->ev_fence;
+ evf_mgr->ev_fence = new_ef;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ /* Attach the new fence */
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ if (!bo)
+ continue;
+ ret = amdgpu_eviction_fence_attach(evf_mgr, bo);
+ if (ret) {
+ DRM_ERROR("Failed to attch new eviction fence\n");
+ goto free_err;
+ }
+ }
+
+ /* Free old fence */
+ if (old_ef)
+ dma_fence_put(&old_ef->base);
+ return 0;
+
+free_err:
+ kfree(new_ef);
+ return ret;
+}
+
+static void
+amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr = work_to_evf_mgr(work, suspend_work.work);
+ struct amdgpu_fpriv *fpriv = evf_mgr_to_fpriv(evf_mgr);
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ ev_fence = evf_mgr->ev_fence;
+ if (!ev_fence)
+ goto unlock;
+
+ amdgpu_userq_evict(uq_mgr, ev_fence);
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ if (!f)
+ return true;
+
+ ev_fence = to_ev_fence(f);
+ evf_mgr = ev_fence->evf_mgr;
+
+ schedule_delayed_work(&evf_mgr->suspend_work, 0);
+ return true;
+}
+
+static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
+ .use_64bit_seqno = true,
+ .get_driver_name = amdgpu_eviction_fence_get_driver_name,
+ .get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
+ .enable_signaling = amdgpu_eviction_fence_enable_signaling,
+};
+
+void amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ spin_lock(&evf_mgr->ev_fence_lock);
+ dma_fence_signal(&ev_fence->base);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+}
+
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ ev_fence = kzalloc(sizeof(*ev_fence), GFP_KERNEL);
+ if (!ev_fence)
+ return NULL;
+
+ ev_fence->evf_mgr = evf_mgr;
+ get_task_comm(ev_fence->timeline_name, current);
+ spin_lock_init(&ev_fence->lock);
+ dma_fence_init(&ev_fence->base, &amdgpu_eviction_fence_ops,
+ &ev_fence->lock, evf_mgr->ev_fence_ctx,
+ atomic_inc_return(&evf_mgr->ev_fence_seq));
+ return ev_fence;
+}
+
+void amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ /* Wait for any pending work to execute */
+ flush_delayed_work(&evf_mgr->suspend_work);
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ if (!ev_fence)
+ return;
+
+ dma_fence_wait(&ev_fence->base, false);
+
+ /* Last unref of ev_fence */
+ dma_fence_put(&ev_fence->base);
+}
+
+int amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+ struct dma_resv *resv = bo->tbo.base.resv;
+ int ret;
+
+ if (!resv)
+ return 0;
+
+ ret = dma_resv_reserve_fences(resv, 1);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Failed to resv fence space\n");
+ return ret;
+ }
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ if (ev_fence)
+ dma_resv_add_fence(resv, &ev_fence->base, DMA_RESV_USAGE_BOOKKEEP);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ return 0;
+}
+
+void amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct dma_fence *stub = dma_fence_get_stub();
+
+ dma_resv_replace_fences(bo->tbo.base.resv, evf_mgr->ev_fence_ctx,
+ stub, DMA_RESV_USAGE_BOOKKEEP);
+ dma_fence_put(stub);
+}
+
+int amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ /* This needs to be done one time per open */
+ atomic_set(&evf_mgr->ev_fence_seq, 0);
+ evf_mgr->ev_fence_ctx = dma_fence_context_alloc(1);
+ spin_lock_init(&evf_mgr->ev_fence_lock);
+
+ INIT_DELAYED_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
new file mode 100644
index 000000000000..fcd867b7147d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_EV_FENCE_H_
+#define AMDGPU_EV_FENCE_H_
+
+struct amdgpu_eviction_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+ char timeline_name[TASK_COMM_LEN];
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+};
+
+struct amdgpu_eviction_fence_mgr {
+ u64 ev_fence_ctx;
+ atomic_t ev_fence_seq;
+ spinlock_t ev_fence_lock;
+ struct amdgpu_eviction_fence *ev_fence;
+ struct delayed_work suspend_work;
+ uint8_t fd_closing;
+};
+
+/* Eviction fence helper functions */
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+int
+amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+void
+amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+int
+amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 69429df09477..2c68118fe9fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,6 +36,7 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_tt.h>
+#include <drm/drm_syncobj.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
@@ -44,6 +45,114 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_vm.h"
+static int
+amdgpu_gem_add_input_fence(struct drm_file *filp,
+ uint64_t syncobj_handles_array,
+ uint32_t num_syncobj_handles)
+{
+ struct dma_fence *fence;
+ uint32_t *syncobj_handles;
+ int ret, i;
+
+ if (!num_syncobj_handles)
+ return 0;
+
+ syncobj_handles = memdup_user(u64_to_user_ptr(syncobj_handles_array),
+ sizeof(uint32_t) * num_syncobj_handles);
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ for (i = 0; i < num_syncobj_handles; i++) {
+
+ if (!syncobj_handles[i]) {
+ ret = -EINVAL;
+ goto free_memdup;
+ }
+
+ ret = drm_syncobj_find_fence(filp, syncobj_handles[i], 0, 0, &fence);
+ if (ret)
+ goto free_memdup;
+
+ dma_fence_wait(fence, false);
+
+ /* TODO: optimize async handling */
+ dma_fence_put(fence);
+ }
+
+free_memdup:
+ kfree(syncobj_handles);
+ return ret;
+}
+
+static int
+amdgpu_gem_update_timeline_node(struct drm_file *filp,
+ uint32_t syncobj_handle,
+ uint64_t point,
+ struct drm_syncobj **syncobj,
+ struct dma_fence_chain **chain)
+{
+ if (!syncobj_handle)
+ return 0;
+
+ /* Find the sync object */
+ *syncobj = drm_syncobj_find(filp, syncobj_handle);
+ if (!*syncobj)
+ return -ENOENT;
+
+ if (!point)
+ return 0;
+
+ /* Allocate the chain node */
+ *chain = dma_fence_chain_alloc();
+ if (!*chain) {
+ drm_syncobj_put(*syncobj);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+amdgpu_gem_update_bo_mapping(struct drm_file *filp,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation,
+ uint64_t point,
+ struct dma_fence *fence,
+ struct drm_syncobj *syncobj,
+ struct dma_fence_chain *chain)
+{
+ struct amdgpu_bo *bo = bo_va ? bo_va->base.bo : NULL;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct dma_fence *last_update;
+
+ if (!syncobj)
+ return;
+
+ /* Find the last update fence */
+ switch (operation) {
+ case AMDGPU_VA_OP_MAP:
+ case AMDGPU_VA_OP_REPLACE:
+ if (bo && (bo->tbo.base.resv == vm->root.bo->tbo.base.resv))
+ last_update = vm->last_update;
+ else
+ last_update = bo_va->last_pt_update;
+ break;
+ case AMDGPU_VA_OP_UNMAP:
+ case AMDGPU_VA_OP_CLEAR:
+ last_update = fence;
+ break;
+ default:
+ return;
+ }
+
+ /* Add fence to timeline */
+ if (!point)
+ drm_syncobj_replace_fence(syncobj, last_update);
+ else
+ drm_syncobj_add_point(syncobj, chain, last_update, point);
+}
+
static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
@@ -184,6 +293,15 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
else
++bo_va->ref_count;
+
+ /* attach gfx eviction fence */
+ r = amdgpu_eviction_fence_attach(&fpriv->evf_mgr, abo);
+ if (r) {
+ DRM_DEBUG_DRIVER("Failed to attach eviction fence to BO\n");
+ amdgpu_bo_unreserve(abo);
+ return r;
+ }
+
amdgpu_bo_unreserve(abo);
/* Validate and add eviction fence to DMABuf imports with dynamic
@@ -247,6 +365,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
goto out_unlock;
}
+ if (!amdgpu_vm_is_bo_always_valid(vm, bo))
+ amdgpu_eviction_fence_detach(&fpriv->evf_mgr, bo);
+
bo_va = amdgpu_vm_bo_find(vm, bo);
if (!bo_va || --bo_va->ref_count)
goto out_unlock;
@@ -321,10 +442,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle, initial_domain;
int r;
- /* reject DOORBELLs until userspace code to use it is available */
- if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
- return -EINVAL;
-
/* reject invalid gem flags */
if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
@@ -638,18 +755,23 @@ out:
*
* Update the bo_va directly after setting its address. Errors are not
* vital here, so they are not reported back to userspace.
+ *
+ * Returns resulting fence if freed BO(s) got cleared from the PT.
+ * otherwise stub fence in case of error.
*/
-static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo_va *bo_va,
- uint32_t operation)
+static struct dma_fence *
+amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation)
{
+ struct dma_fence *fence = dma_fence_get_stub();
int r;
if (!amdgpu_vm_ready(vm))
- return;
+ return fence;
- r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (r)
goto error;
@@ -665,6 +787,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
+
+ return fence;
}
/**
@@ -713,6 +837,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
+ struct drm_syncobj *timeline_syncobj = NULL;
+ struct dma_fence_chain *timeline_chain = NULL;
+ struct dma_fence *fence;
struct drm_exec exec;
uint64_t va_flags;
uint64_t vm_size;
@@ -774,6 +901,12 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
abo = NULL;
}
+ r = amdgpu_gem_add_input_fence(filp,
+ args->input_fence_syncobj_handles,
+ args->num_syncobj_handles);
+ if (r)
+ goto error_put_gobj;
+
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
@@ -802,6 +935,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
bo_va = NULL;
}
+ r = amdgpu_gem_update_timeline_node(filp,
+ args->vm_timeline_syncobj_out,
+ args->vm_timeline_point,
+ &timeline_syncobj,
+ &timeline_chain);
+ if (r)
+ goto error;
+
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
@@ -827,12 +968,24 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
default:
break;
}
- if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm)
- amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
- args->operation);
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) {
+ fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
+ args->operation);
+
+ if (timeline_syncobj)
+ amdgpu_gem_update_bo_mapping(filp, bo_va,
+ args->operation,
+ args->vm_timeline_point,
+ fence, timeline_syncobj,
+ timeline_chain);
+ else
+ dma_fence_put(fence);
+
+ }
error:
drm_exec_fini(&exec);
+error_put_gobj:
drm_gem_object_put(gobj);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 72af5e5a894a..1db1e6ec0184 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -33,6 +33,7 @@
#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"
+#include "nvd.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -74,14 +75,15 @@ bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
}
-int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
- int me, int pipe, int queue)
+static int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
+ int me, int pipe, int queue)
{
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
int bit = 0;
bit += me * adev->gfx.me.num_pipe_per_me
- * adev->gfx.me.num_queue_per_pipe;
- bit += pipe * adev->gfx.me.num_queue_per_pipe;
+ * num_queue_per_pipe;
+ bit += pipe * num_queue_per_pipe;
bit += queue;
return bit;
@@ -238,8 +240,8 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe;
bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
- int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
- adev->gfx.me.num_queue_per_pipe;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
+ int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe;
if (multipipe_policy) {
/* policy: amdgpu owns the first queue per pipe at this stage
@@ -247,9 +249,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
for (i = 0; i < max_queues_per_me; i++) {
pipe = i % adev->gfx.me.num_pipe_per_me;
queue = (i / adev->gfx.me.num_pipe_per_me) %
- adev->gfx.me.num_queue_per_pipe;
+ num_queue_per_pipe;
- set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
+ set_bit(pipe * num_queue_per_pipe + queue,
adev->gfx.me.queue_bitmap);
}
} else {
@@ -258,8 +260,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
}
/* update the number of active graphics rings */
- adev->gfx.num_gfx_rings =
- bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
+ if (adev->gfx.num_gfx_rings)
+ adev->gfx.num_gfx_rings =
+ bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
}
static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
@@ -1351,6 +1354,10 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int mode;
+ /* Only minimal precaution taken to reject requests while in reset.*/
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
AMDGPU_XCP_FL_NONE);
@@ -1394,8 +1401,14 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
return -EINVAL;
}
+ /* Don't allow a switch while under reset */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -EPERM;
+
ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
+ up_read(&adev->reset_domain->sem);
+
if (ret)
return ret;
@@ -1438,9 +1451,11 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct drm_gpu_scheduler *sched = &ring->sched;
struct drm_sched_entity entity;
+ static atomic_t counter;
struct dma_fence *f;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
+ void *owner;
int i, r;
/* Initialize the scheduler entity */
@@ -1451,13 +1466,21 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
goto err;
}
- r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
- 64, 0,
- &job);
+ /*
+ * Use some unique dummy value as the owner to make sure we execute
+ * the cleaner shader on each submission. The value just need to change
+ * for each submission and is otherwise meaningless.
+ */
+ owner = (void *)(unsigned long)atomic_inc_return(&counter);
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
+ 64, 0, &job);
if (r)
goto err;
job->enforce_isolation = true;
+ /* always run the cleaner shader */
+ job->run_cleaner_shader = true;
ib = &job->ibs[0];
for (i = 0; i <= ring->funcs->align_mask; ++i)
@@ -1544,6 +1567,9 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
+ if (adev->gfx.disable_kq)
+ return -EPERM;
+
ret = kstrtol(buf, 0, &value);
if (ret)
@@ -1586,7 +1612,8 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
* Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
* feature for each GPU partition. Reading from the 'enforce_isolation'
* sysfs file returns the isolation settings for all partitions, where '0'
- * indicates disabled and '1' indicates enabled.
+ * indicates disabled, '1' indicates enabled, and '2' indicates enabled in legacy mode,
+ * and '3' indicates enabled without cleaner shader.
*
* Return: The number of bytes read from the sysfs file.
*/
@@ -1621,9 +1648,12 @@ static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
* @count: The size of the input data
*
* This function allows control over the 'enforce_isolation' feature, which
- * serializes access to the graphics engine. Writing '1' or '0' to the
- * 'enforce_isolation' sysfs file enables or disables process isolation for
- * each partition. The input should specify the setting for all partitions.
+ * serializes access to the graphics engine. Writing '0' to disable, '1' to
+ * enable isolation with cleaner shader, '2' to enable legacy isolation without
+ * cleaner shader, or '3' to enable process isolation without submitting the
+ * cleaner shader to the 'enforce_isolation' sysfs file sets the isolation mode
+ * for each partition. The input should specify the setting for all
+ * partitions.
*
* Return: The number of bytes written to the sysfs file.
*/
@@ -1660,13 +1690,34 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
return -EINVAL;
for (i = 0; i < num_partitions; i++) {
- if (partition_values[i] != 0 && partition_values[i] != 1)
+ if (partition_values[i] != 0 &&
+ partition_values[i] != 1 &&
+ partition_values[i] != 2 &&
+ partition_values[i] != 3)
return -EINVAL;
}
mutex_lock(&adev->enforce_isolation_mutex);
- for (i = 0; i < num_partitions; i++)
- adev->enforce_isolation[i] = partition_values[i];
+ for (i = 0; i < num_partitions; i++) {
+ switch (partition_values[i]) {
+ case 0:
+ default:
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
+ }
+ }
mutex_unlock(&adev->enforce_isolation_mutex);
amdgpu_mes_update_enforce_isolation(adev);
@@ -1915,39 +1966,41 @@ void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
bool enable)
{
- mutex_lock(&adev->gfx.kfd_sch_mutex);
+ mutex_lock(&adev->gfx.userq_sch_mutex);
if (enable) {
/* If the count is already 0, it means there's an imbalance bug somewhere.
* Note that the bug may be in a different caller than the one which triggers the
* WARN_ON_ONCE.
*/
- if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
+ if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) {
dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
goto unlock;
}
- adev->gfx.kfd_sch_req_count[idx]--;
+ adev->gfx.userq_sch_req_count[idx]--;
- if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
- adev->gfx.kfd_sch_inactive[idx]) {
+ if (adev->gfx.userq_sch_req_count[idx] == 0 &&
+ adev->gfx.userq_sch_inactive[idx]) {
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
}
} else {
- if (adev->gfx.kfd_sch_req_count[idx] == 0) {
+ if (adev->gfx.userq_sch_req_count[idx] == 0) {
cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
- if (!adev->gfx.kfd_sch_inactive[idx]) {
- amdgpu_amdkfd_stop_sched(adev, idx);
- adev->gfx.kfd_sch_inactive[idx] = true;
+ if (!adev->gfx.userq_sch_inactive[idx]) {
+ amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
+ amdgpu_amdkfd_stop_sched(adev, idx);
+ adev->gfx.userq_sch_inactive[idx] = true;
}
}
- adev->gfx.kfd_sch_req_count[idx]++;
+ adev->gfx.userq_sch_req_count[idx]++;
}
unlock:
- mutex_unlock(&adev->gfx.kfd_sch_mutex);
+ mutex_unlock(&adev->gfx.userq_sch_mutex);
}
/**
@@ -1992,12 +2045,13 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
msecs_to_jiffies(1));
} else {
/* Tell KFD to resume the runqueue */
- if (adev->kfd.init_complete) {
- WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
- WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
+ WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]);
+ WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]);
+
+ amdgpu_userq_start_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
amdgpu_amdkfd_start_sched(adev, idx);
- adev->gfx.kfd_sch_inactive[idx] = false;
- }
+ adev->gfx.userq_sch_inactive[idx] = false;
}
mutex_unlock(&adev->enforce_isolation_mutex);
}
@@ -2021,7 +2075,7 @@ amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
bool wait = false;
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
/* set the initial values if nothing is set */
if (!adev->gfx.enforce_isolation_jiffies[idx]) {
adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
@@ -2088,7 +2142,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
if (adev->kfd.init_complete)
sched_work = true;
}
@@ -2125,7 +2179,7 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
return;
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
if (adev->kfd.init_complete)
sched_work = true;
}
@@ -2209,6 +2263,74 @@ void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
}
+/**
+ * amdgpu_gfx_csb_preamble_start - Set CSB preamble start
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble setup.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
+{
+ u32 count = 0;
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_data_parser - Parser CS data
+ *
+ * @adev: amdgpu_device pointer used to get the CS data and other gfx info.
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count)
+{
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+ u32 i;
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
+ }
+ }
+ }
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_preamble_end - Set CSB preamble end
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ */
+void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count)
+{
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
+}
+
/*
* debugfs for to enable/disable gfx job submission to specific core.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 87e862188766..08f268dab8f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -170,10 +170,46 @@ struct amdgpu_kiq {
#define AMDGPU_GFX_MAX_SE 4
#define AMDGPU_GFX_MAX_SH_PER_SE 2
+/**
+ * amdgpu_rb_config - Configure a single Render Backend (RB)
+ *
+ * Bad RBs are fused off and there is a harvest register the driver reads to
+ * determine which RB(s) are fused off so that the driver can configure the
+ * hardware state so that nothing gets sent to them. There are also user
+ * harvest registers that the driver can program to disable additional RBs,
+ * etc., for testing purposes.
+ */
struct amdgpu_rb_config {
+ /**
+ * @rb_backend_disable:
+ *
+ * The value captured from register RB_BACKEND_DISABLE indicates if the
+ * RB backend is disabled or not.
+ */
uint32_t rb_backend_disable;
+
+ /**
+ * @user_rb_backend_disable:
+ *
+ * The value captured from register USER_RB_BACKEND_DISABLE indicates
+ * if the User RB backend is disabled or not.
+ */
uint32_t user_rb_backend_disable;
+
+ /**
+ * @raster_config:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the first register.
+ */
uint32_t raster_config;
+
+ /**
+ * @raster_config_1:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the second register.
+ */
uint32_t raster_config_1;
};
@@ -221,6 +257,13 @@ struct amdgpu_gfx_config {
uint32_t macrotile_mode_array[16];
struct gb_addr_config gb_addr_config_fields;
+
+ /**
+ * @rb_config:
+ *
+ * Matrix that keeps all the Render Backend (color and depth buffer
+ * handling) configuration on the 3D engine.
+ */
struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
/* gfx configure feature */
@@ -305,7 +348,8 @@ struct amdgpu_gfx_funcs {
void (*init_spm_golden)(struct amdgpu_device *adev);
void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
int (*get_gfx_shadow_info)(struct amdgpu_device *adev,
- struct amdgpu_gfx_shadow_info *shadow_info);
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check);
enum amdgpu_gfx_partition
(*query_partition_mode)(struct amdgpu_device *adev);
int (*switch_partition_mode)(struct amdgpu_device *adev,
@@ -474,9 +518,9 @@ struct amdgpu_gfx {
bool enable_cleaner_shader;
struct amdgpu_isolation_work enforce_isolation[MAX_XCP];
/* Mutex for synchronizing KFD scheduler operations */
- struct mutex kfd_sch_mutex;
- u64 kfd_sch_req_count[MAX_XCP];
- bool kfd_sch_inactive[MAX_XCP];
+ struct mutex userq_sch_mutex;
+ u64 userq_sch_req_count[MAX_XCP];
+ bool userq_sch_inactive[MAX_XCP];
unsigned long enforce_isolation_jiffies[MAX_XCP];
unsigned long enforce_isolation_time[MAX_XCP];
@@ -484,6 +528,9 @@ struct amdgpu_gfx {
struct delayed_work idle_work;
bool workload_profile_active;
struct mutex workload_profile_mutex;
+
+ bool disable_kq;
+ bool disable_uq;
};
struct amdgpu_gfx_ras_reg_entry {
@@ -503,7 +550,7 @@ struct amdgpu_gfx_ras_mem_id_entry {
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id)))
#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id)))
#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev))
-#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si)))
+#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si), false))
/**
* amdgpu_gfx_create_bitmask - create a bitmask
@@ -550,8 +597,6 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
-int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
- int pipe, int queue);
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
@@ -597,6 +642,9 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
+u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer);
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count);
+void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count);
void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 464625282872..6b0fbbb91e57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -699,12 +699,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
uint32_t flush_type, bool all_hub,
uint32_t inst)
{
- u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT :
- adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
- int r;
+ int r, cnt = 0;
uint32_t seq;
/*
@@ -761,10 +759,21 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
+ !amdgpu_reset_pending(adev->reset_domain)) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY) {
dev_err(adev->dev, "timeout waiting for kiq fence\n");
r = -ETIME;
- }
+ } else
+ r = 0;
}
error_unlock_reset:
@@ -1221,6 +1230,10 @@ static ssize_t current_memory_partition_show(
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amdgpu_memory_partition mode;
+ /* Only minimal precaution taken to reject requests while in reset */
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
if ((mode >= ARRAY_SIZE(nps_desc)) ||
(BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index bd7fc123b8f9..80fa29c26e9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -62,6 +62,9 @@
*/
#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
+/* XNACK flags */
+#define AMDGPU_GMC_XNACK_FLAG_CHAIN BIT(0)
+
struct firmware;
enum amdgpu_memory_partition {
@@ -301,6 +304,7 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
int noretry;
+ uint32_t xnack_flags;
uint32_t vmid0_page_table_block_size;
uint32_t vmid0_page_table_depth;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
index b6cf801939aa..6e02fb9ac2f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -22,6 +22,7 @@
*/
#include "amdgpu.h"
#include "amdgpu_ras.h"
+#include <uapi/linux/kfd_ioctl.h>
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
{
@@ -46,3 +47,22 @@ int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
/* hdp ras follows amdgpu_ras_block_late_init_default for late init */
return 0;
}
+
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32((adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
+ } else {
+ amdgpu_ring_emit_wreg(ring,
+ (adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index 7b8a6152dc8d..4cfd932b7e91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -44,4 +44,6 @@ struct amdgpu_hdp {
};
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev);
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 2ea98ec60220..802743efa3b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -163,12 +163,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
init_shadow = false;
}
- if (!ring->sched.ready && !ring->is_mes_queue) {
+ if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
return -EINVAL;
}
- if (vm && !job->vmid && !ring->is_mes_queue) {
+ if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 4c4e087230ac..5dd78a9cb12d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -576,8 +576,16 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
INIT_LIST_HEAD(&id_mgr->ids_lru);
id_mgr->reserved_use_count = 0;
- /* manage only VMIDs not used by KFD */
- id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+ /* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+ else if (AMDGPU_IS_MMHUB0(i) ||
+ AMDGPU_IS_MMHUB1(i))
+ id_mgr->num_ids = 16;
+ else
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
/* skip over VMID 0, since it is the system VM */
for (j = 1; j < id_mgr->num_ids; ++j) {
@@ -588,7 +596,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
}
/* alloc a default reserved vmid to enforce isolation */
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i])
+ if (adev->enforce_isolation[i] != AMDGPU_ENFORCE_ISOLATION_DISABLE)
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 901f8b12c672..30f16968b578 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_ih.h"
+#include "amdgpu_reset.h"
/**
* amdgpu_ih_ring_init - initialize the IH state
@@ -227,13 +228,23 @@ restart_ih:
ih->rptr &= ih->ptr_mask;
}
- amdgpu_ih_set_rptr(adev, ih);
+ if (!ih->overflow)
+ amdgpu_ih_set_rptr(adev, ih);
+
wake_up_all(&ih->wait_process);
/* make sure wptr hasn't changed while processing */
wptr = amdgpu_ih_get_wptr(adev, ih);
if (wptr != ih->rptr)
- goto restart_ih;
+ if (!ih->overflow)
+ goto restart_ih;
+
+ if (ih->overflow)
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index b0a88f92cd82..7f7ea046e209 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -72,6 +72,7 @@ struct amdgpu_ih_ring {
/* For waiting on IH processing at checkpoint. */
wait_queue_head_t wait_process;
uint64_t processed_timestamp;
+ bool overflow;
};
/* return true if time stamp t2 is after t1 with 48bit wrap around */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 19ce4da285e8..13c60cac4261 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -619,6 +619,10 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned int type)
{
+ /* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */
+ if (amdgpu_ras_is_rma(adev))
+ return -EINVAL;
+
if (!adev->irq.installed)
return -ENOENT;
@@ -725,8 +729,8 @@ static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
*/
int amdgpu_irq_add_domain(struct amdgpu_device *adev)
{
- adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
- &amdgpu_hw_irqdomain_ops, adev);
+ adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
+ &amdgpu_hw_irqdomain_ops, adev);
if (!adev->irq.domain) {
DRM_ERROR("GPU irq add domain failed\n");
return -ENODEV;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index ce6b9ba967ff..f2c049129661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -78,6 +78,7 @@ struct amdgpu_job {
/* enforce isolation */
bool enforce_isolation;
+ bool run_cleaner_shader;
uint32_t num_ibs;
struct amdgpu_ib ibs[];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 27bfe9c8af06..9fbb04aee97b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -45,6 +45,7 @@
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
#include "amd_pcie.h"
+#include "amdgpu_userq.h"
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
{
@@ -370,6 +371,26 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
return 0;
}
+static int amdgpu_userq_metadata_info_gfx(struct amdgpu_device *adev,
+ struct drm_amdgpu_info *info,
+ struct drm_amdgpu_info_uq_metadata_gfx *meta)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ struct amdgpu_gfx_shadow_info shadow = {};
+
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow, true);
+ meta->shadow_size = shadow.shadow_size;
+ meta->shadow_alignment = shadow.shadow_alignment;
+ meta->csa_size = shadow.csa_size;
+ meta->csa_alignment = shadow.csa_alignment;
+ ret = 0;
+ }
+
+ return ret;
+}
+
static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
struct drm_amdgpu_info *info,
struct drm_amdgpu_info_hw_ip *result)
@@ -387,7 +408,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- if (adev->gfx.gfx_ring[i].sched.ready)
+ if (adev->gfx.gfx_ring[i].sched.ready &&
+ !adev->gfx.gfx_ring[i].no_user_submission)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
@@ -395,7 +417,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- if (adev->gfx.compute_ring[i].sched.ready)
+ if (adev->gfx.compute_ring[i].sched.ready &&
+ !adev->gfx.compute_ring[i].no_user_submission)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
@@ -403,7 +426,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
- if (adev->sdma.instance[i].ring.sched.ready)
+ if (adev->sdma.instance[i].ring.sched.ready &&
+ !adev->sdma.instance[i].ring.no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -414,7 +438,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->uvd.harvest_config & (1 << i))
continue;
- if (adev->uvd.inst[i].ring.sched.ready)
+ if (adev->uvd.inst[i].ring.sched.ready &&
+ !adev->uvd.inst[i].ring.no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -423,7 +448,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
- if (adev->vce.ring[i].sched.ready)
+ if (adev->vce.ring[i].sched.ready &&
+ !adev->vce.ring[i].no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -435,7 +461,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
- if (adev->uvd.inst[i].ring_enc[j].sched.ready)
+ if (adev->uvd.inst[i].ring_enc[j].sched.ready &&
+ !adev->uvd.inst[i].ring_enc[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -447,7 +474,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->vcn.harvest_config & (1 << i))
continue;
- if (adev->vcn.inst[i].ring_dec.sched.ready)
+ if (adev->vcn.inst[i].ring_dec.sched.ready &&
+ !adev->vcn.inst[i].ring_dec.no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -460,7 +488,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++)
- if (adev->vcn.inst[i].ring_enc[j].sched.ready)
+ if (adev->vcn.inst[i].ring_enc[j].sched.ready &&
+ !adev->vcn.inst[i].ring_enc[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -475,7 +504,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->jpeg.num_jpeg_rings; j++)
- if (adev->jpeg.inst[i].ring_dec[j].sched.ready)
+ if (adev->jpeg.inst[i].ring_dec[j].sched.ready &&
+ !adev->jpeg.inst[i].ring_dec[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -483,7 +513,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
break;
case AMDGPU_HW_IP_VPE:
type = AMD_IP_BLOCK_TYPE_VPE;
- if (adev->vpe.ring.sched.ready)
+ if (adev->vpe.ring.sched.ready &&
+ !adev->vpe.ring.no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -978,6 +1009,8 @@ out:
}
}
+ dev_info->userq_ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+
ret = copy_to_user(out, dev_info,
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
kfree(dev_info);
@@ -1293,6 +1326,22 @@ out:
return copy_to_user(out, &gpuvm_fault,
min((size_t)size, sizeof(gpuvm_fault))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_UQ_FW_AREAS: {
+ struct drm_amdgpu_info_uq_metadata meta_info = {};
+
+ switch (info->query_hw_ip.type) {
+ case AMDGPU_HW_IP_GFX:
+ ret = amdgpu_userq_metadata_info_gfx(adev, info, &meta_info.gfx);
+ if (ret)
+ return ret;
+
+ ret = copy_to_user(out, &meta_info,
+ min((size_t)size, sizeof(meta_info))) ? -EFAULT : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -1376,6 +1425,14 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
mutex_init(&fpriv->bo_list_lock);
idr_init_base(&fpriv->bo_list_handles, 1);
+ r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev);
+ if (r)
+ DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n");
+
+ r = amdgpu_eviction_fence_init(&fpriv->evf_mgr);
+ if (r)
+ goto error_vm;
+
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
file_priv->driver_priv = fpriv;
@@ -1445,6 +1502,11 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_bo_unreserve(pd);
}
+ if (!fpriv->evf_mgr.fd_closing) {
+ fpriv->evf_mgr.fd_closing = true;
+ amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
+ }
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
amdgpu_vm_fini(adev, &fpriv->vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index fb212f0a1136..2febb63ab232 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -39,42 +39,6 @@ int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
PAGE_SIZE);
}
-static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
- int ip_type, uint64_t *doorbell_index)
-{
- unsigned int offset, found;
- struct amdgpu_mes *mes = &adev->mes;
-
- if (ip_type == AMDGPU_RING_TYPE_SDMA)
- offset = adev->doorbell_index.sdma_engine[0];
- else
- offset = 0;
-
- found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
- if (found >= mes->num_mes_dbs) {
- DRM_WARN("No doorbell available\n");
- return -ENOSPC;
- }
-
- set_bit(found, mes->doorbell_bitmap);
-
- /* Get the absolute doorbell index on BAR */
- *doorbell_index = mes->db_start_dw_offset + found * 2;
- return 0;
-}
-
-static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
- uint32_t doorbell_index)
-{
- unsigned int old, rel_index;
- struct amdgpu_mes *mes = &adev->mes;
-
- /* Find the relative index of the doorbell in this object */
- rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
- old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
- WARN_ON(!old);
-}
-
static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
{
int i;
@@ -126,7 +90,7 @@ static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
int amdgpu_mes_init(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, num_pipes;
adev->mes.adev = adev;
@@ -142,19 +106,52 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
adev->mes.vmid_mask_mmhub = 0xffffff00;
- adev->mes.vmid_mask_gfxhub = 0xffffff00;
+ adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00;
+
+ num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
+ if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
+ dev_warn(adev->dev, "more gfx pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_GFX_PIPES);
+
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) {
+ if (i >= num_pipes)
+ break;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
+ IP_VERSION(12, 0, 0))
+ /*
+ * GFX V12 has only one GFX pipe, but 8 queues in it.
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1-7 for MES scheduling
+ * mask = 1111 1110b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE;
+ else
+ /*
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1 for MES scheduling
+ * mask = 10b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2;
+ }
+
+ num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec;
+ if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES)
+ dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES);
for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
- if (i >= (adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec))
+ if (i >= num_pipes)
break;
- adev->mes.compute_hqd_mask[i] = 0xc;
+ adev->mes.compute_hqd_mask[i] = adev->gfx.disable_kq ? 0xF : 0xC;
}
- for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
- adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
+ num_pipes = adev->sdma.num_instances;
+ if (num_pipes > AMDGPU_MES_MAX_SDMA_PIPES)
+ dev_warn(adev->dev, "more SDMA pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_SDMA_PIPES);
for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
- if (i >= adev->sdma.num_instances)
+ if (i >= num_pipes)
break;
adev->mes.sdma_hqd_mask[i] = 0xfc;
}
@@ -240,244 +237,6 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
mutex_destroy(&adev->mes.mutex_hidden);
}
-static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
-{
- amdgpu_bo_free_kernel(&q->mqd_obj,
- &q->mqd_gpu_addr,
- &q->mqd_cpu_ptr);
-}
-
-int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
- struct amdgpu_vm *vm)
-{
- struct amdgpu_mes_process *process;
- int r;
-
- /* allocate the mes process buffer */
- process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
- if (!process) {
- DRM_ERROR("no more memory to create mes process\n");
- return -ENOMEM;
- }
-
- /* allocate the process context bo and map it */
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
- if (r) {
- DRM_ERROR("failed to allocate process context bo\n");
- goto clean_up_memory;
- }
- memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* add the mes process to idr list */
- r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
- GFP_KERNEL);
- if (r < 0) {
- DRM_ERROR("failed to lock pasid=%d\n", pasid);
- goto clean_up_ctx;
- }
-
- INIT_LIST_HEAD(&process->gang_list);
- process->vm = vm;
- process->pasid = pasid;
- process->process_quantum = adev->mes.default_process_quantum;
- process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_ctx:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_bo_free_kernel(&process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
-clean_up_memory:
- kfree(process);
- return r;
-}
-
-void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
-{
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang, *tmp1;
- struct amdgpu_mes_queue *queue, *tmp2;
- struct mes_remove_queue_input queue_input;
- unsigned long flags;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- process = idr_find(&adev->mes.pasid_idr, pasid);
- if (!process) {
- DRM_WARN("pasid %d doesn't exist\n", pasid);
- amdgpu_mes_unlock(&adev->mes);
- return;
- }
-
- /* Remove all queues from hardware */
- list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
- list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->remove_hw_queue(&adev->mes,
- &queue_input);
- if (r)
- DRM_WARN("failed to remove hardware queue\n");
- }
-
- idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
- }
-
- idr_remove(&adev->mes.pasid_idr, pasid);
- amdgpu_mes_unlock(&adev->mes);
-
- /* free all memory allocated by the process */
- list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
- /* free all queues in the gang */
- list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
- amdgpu_mes_queue_free_mqd(queue);
- list_del(&queue->list);
- kfree(queue);
- }
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
- list_del(&gang->list);
- kfree(gang);
-
- }
- amdgpu_bo_free_kernel(&process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
- kfree(process);
-}
-
-int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
- struct amdgpu_mes_gang_properties *gprops,
- int *gang_id)
-{
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang;
- int r;
-
- /* allocate the mes gang buffer */
- gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
- if (!gang) {
- return -ENOMEM;
- }
-
- /* allocate the gang context bo and map it to cpu space */
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
- if (r) {
- DRM_ERROR("failed to allocate process context bo\n");
- goto clean_up_mem;
- }
- memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- process = idr_find(&adev->mes.pasid_idr, pasid);
- if (!process) {
- DRM_ERROR("pasid %d doesn't exist\n", pasid);
- r = -EINVAL;
- goto clean_up_ctx;
- }
-
- /* add the mes gang to idr list */
- r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
- GFP_KERNEL);
- if (r < 0) {
- DRM_ERROR("failed to allocate idr for gang\n");
- goto clean_up_ctx;
- }
-
- gang->gang_id = r;
- *gang_id = r;
-
- INIT_LIST_HEAD(&gang->queue_list);
- gang->process = process;
- gang->priority = gprops->priority;
- gang->gang_quantum = gprops->gang_quantum ?
- gprops->gang_quantum : adev->mes.default_gang_quantum;
- gang->global_priority_level = gprops->global_priority_level;
- gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
- list_add_tail(&gang->list, &process->gang_list);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_ctx:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
-clean_up_mem:
- kfree(gang);
- return r;
-}
-
-int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
-{
- struct amdgpu_mes_gang *gang;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- amdgpu_mes_unlock(&adev->mes);
- return -EINVAL;
- }
-
- if (!list_empty(&gang->queue_list)) {
- DRM_ERROR("queue list is not empty\n");
- amdgpu_mes_unlock(&adev->mes);
- return -EBUSY;
- }
-
- idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
- list_del(&gang->list);
- amdgpu_mes_unlock(&adev->mes);
-
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
-
- kfree(gang);
-
- return 0;
-}
-
int amdgpu_mes_suspend(struct amdgpu_device *adev)
{
struct mes_suspend_gang_input input;
@@ -526,304 +285,6 @@ int amdgpu_mes_resume(struct amdgpu_device *adev)
return r;
}
-static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
- struct amdgpu_mes_queue *q,
- struct amdgpu_mes_queue_properties *p)
-{
- struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
- u32 mqd_size = mqd_mgr->mqd_size;
- int r;
-
- r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &q->mqd_obj,
- &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
- if (r) {
- dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
- return r;
- }
- memset(q->mqd_cpu_ptr, 0, mqd_size);
-
- r = amdgpu_bo_reserve(q->mqd_obj, false);
- if (unlikely(r != 0))
- goto clean_up;
-
- return 0;
-
-clean_up:
- amdgpu_bo_free_kernel(&q->mqd_obj,
- &q->mqd_gpu_addr,
- &q->mqd_cpu_ptr);
- return r;
-}
-
-static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
- struct amdgpu_mes_queue *q,
- struct amdgpu_mes_queue_properties *p)
-{
- struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
- struct amdgpu_mqd_prop mqd_prop = {0};
-
- mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
- mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
- mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
- mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
- mqd_prop.queue_size = p->queue_size;
- mqd_prop.use_doorbell = true;
- mqd_prop.doorbell_index = p->doorbell_off;
- mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
- mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
- mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
- mqd_prop.hqd_active = false;
-
- if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
- p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- mutex_lock(&adev->srbm_mutex);
- amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
- }
-
- mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
-
- if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
- p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- }
-
- amdgpu_bo_unreserve(q->mqd_obj);
-}
-
-int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
- struct amdgpu_mes_queue_properties *qprops,
- int *queue_id)
-{
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_add_queue_input queue_input;
- unsigned long flags;
- int r;
-
- memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
-
- /* allocate the mes queue buffer */
- queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
- if (!queue) {
- DRM_ERROR("Failed to allocate memory for queue\n");
- return -ENOMEM;
- }
-
- /* Allocate the queue mqd */
- r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
- if (r)
- goto clean_up_memory;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- r = -EINVAL;
- goto clean_up_mqd;
- }
-
- /* add the mes gang to idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
- GFP_ATOMIC);
- if (r < 0) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- goto clean_up_mqd;
- }
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- *queue_id = queue->queue_id = r;
-
- /* allocate a doorbell index for the queue */
- r = amdgpu_mes_kernel_doorbell_get(adev,
- qprops->queue_type,
- &qprops->doorbell_off);
- if (r)
- goto clean_up_queue_id;
-
- /* initialize the queue mqd */
- amdgpu_mes_queue_init_mqd(adev, queue, qprops);
-
- /* add hw queue to mes */
- queue_input.process_id = gang->process->pasid;
-
- queue_input.page_table_base_addr =
- adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
- adev->gmc.vram_start;
-
- queue_input.process_va_start = 0;
- queue_input.process_va_end =
- (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
- queue_input.process_quantum = gang->process->process_quantum;
- queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
- queue_input.gang_quantum = gang->gang_quantum;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
- queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
- queue_input.gang_global_priority_level = gang->global_priority_level;
- queue_input.doorbell_offset = qprops->doorbell_off;
- queue_input.mqd_addr = queue->mqd_gpu_addr;
- queue_input.wptr_addr = qprops->wptr_gpu_addr;
- queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
- queue_input.queue_type = qprops->queue_type;
- queue_input.paging = qprops->paging;
- queue_input.is_kfd_process = 0;
-
- r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
- if (r) {
- DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
- qprops->doorbell_off);
- goto clean_up_doorbell;
- }
-
- DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
- "queue type=%d, doorbell=0x%llx\n",
- gang->process->pasid, gang_id, qprops->queue_type,
- qprops->doorbell_off);
-
- queue->ring = qprops->ring;
- queue->doorbell_off = qprops->doorbell_off;
- queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
- queue->queue_type = qprops->queue_type;
- queue->paging = qprops->paging;
- queue->gang = gang;
- queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
- list_add_tail(&queue->list, &gang->queue_list);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_doorbell:
- amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
-clean_up_queue_id:
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-clean_up_mqd:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_mes_queue_free_mqd(queue);
-clean_up_memory:
- kfree(queue);
- return r;
-}
-
-int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
-{
- unsigned long flags;
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_remove_queue_input queue_input;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* remove the mes gang from idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
-
- queue = idr_find(&adev->mes.queue_id_idr, queue_id);
- if (!queue) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- amdgpu_mes_unlock(&adev->mes);
- DRM_ERROR("queue id %d doesn't exist\n", queue_id);
- return -EINVAL;
- }
-
- idr_remove(&adev->mes.queue_id_idr, queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
- queue->doorbell_off);
-
- gang = queue->gang;
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
- queue_id);
-
- list_del(&queue->list);
- amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
- amdgpu_mes_unlock(&adev->mes);
-
- amdgpu_mes_queue_free_mqd(queue);
- kfree(queue);
- return 0;
-}
-
-int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
-{
- unsigned long flags;
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_reset_queue_input queue_input;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* remove the mes gang from idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
-
- queue = idr_find(&adev->mes.queue_id_idr, queue_id);
- if (!queue) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- amdgpu_mes_unlock(&adev->mes);
- DRM_ERROR("queue id %d doesn't exist\n", queue_id);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
- queue->doorbell_off);
-
- gang = queue->gang;
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
- queue_id);
-
- amdgpu_mes_unlock(&adev->mes);
-
- return 0;
-}
-
-int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
- int me_id, int pipe_id, int queue_id, int vmid)
-{
- struct mes_reset_queue_input queue_input;
- int r;
-
- queue_input.queue_type = queue_type;
- queue_input.use_mmio = true;
- queue_input.me_id = me_id;
- queue_input.pipe_id = pipe_id;
- queue_input.queue_id = queue_id;
- queue_input.vmid = vmid;
- r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
- queue_id);
- return r;
-}
-
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -874,7 +335,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
unsigned int vmid,
bool use_mmio)
{
- struct mes_reset_legacy_queue_input queue_input;
+ struct mes_reset_queue_input queue_input;
int r;
memset(&queue_input, 0, sizeof(queue_input));
@@ -888,8 +349,11 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
queue_input.wptr_addr = ring->wptr_gpu_addr;
queue_input.vmid = vmid;
queue_input.use_mmio = use_mmio;
+ queue_input.is_kq = true;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX)
+ queue_input.legacy_gfx = true;
- r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
if (r)
DRM_ERROR("failed to reset legacy queue\n");
@@ -905,7 +369,7 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
uint32_t *read_val_ptr;
if (amdgpu_device_wb_get(adev, &addr_offset)) {
- DRM_ERROR("critical bug! too many mes readers\n");
+ dev_err(adev->dev, "critical bug! too many mes readers\n");
goto error;
}
read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
@@ -915,13 +379,13 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
op_input.read_reg.buffer_addr = read_val_gpu_addr;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes rreg is not supported!\n");
+ dev_err(adev->dev, "mes rreg is not supported!\n");
goto error;
}
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to read reg (0x%x)\n", reg);
+ dev_err(adev->dev, "failed to read reg (0x%x)\n", reg);
else
val = *(read_val_ptr);
@@ -942,14 +406,14 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev,
op_input.write_reg.reg_value = val;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes wreg is not supported!\n");
+ dev_err(adev->dev, "mes wreg is not supported!\n");
r = -EINVAL;
goto error;
}
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to write reg (0x%x)\n", reg);
+ dev_err(adev->dev, "failed to write reg (0x%x)\n", reg);
error:
return r;
@@ -969,14 +433,14 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
op_input.wrm_reg.mask = mask;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
+ dev_err(adev->dev, "mes reg_write_reg_wait is not supported!\n");
r = -EINVAL;
goto error;
}
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to reg_write_reg_wait\n");
+ dev_err(adev->dev, "failed to reg_write_reg_wait\n");
error:
return r;
@@ -994,14 +458,14 @@ int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
op_input.wrm_reg.mask = mask;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes reg wait is not supported!\n");
+ dev_err(adev->dev, "mes reg wait is not supported!\n");
r = -EINVAL;
goto error;
}
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to reg_write_reg_wait\n");
+ dev_err(adev->dev, "failed to reg_write_reg_wait\n");
error:
return r;
@@ -1075,25 +539,6 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
return r;
}
-static void
-amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_mes_queue_properties *props)
-{
- props->queue_type = ring->funcs->type;
- props->hqd_base_gpu_addr = ring->gpu_addr;
- props->rptr_gpu_addr = ring->rptr_gpu_addr;
- props->wptr_gpu_addr = ring->wptr_gpu_addr;
- props->wptr_mc_addr =
- ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
- props->queue_size = ring->ring_size;
- props->eop_gpu_addr = ring->eop_gpu_addr;
- props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
- props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
- props->paging = false;
- props->ring = ring;
-}
-
#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
do { \
if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
@@ -1130,453 +575,12 @@ int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
return -EINVAL;
}
-int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
- int queue_type, int idx,
- struct amdgpu_mes_ctx_data *ctx_data,
- struct amdgpu_ring **out)
-{
- struct amdgpu_ring *ring;
- struct amdgpu_mes_gang *gang;
- struct amdgpu_mes_queue_properties qprops = {0};
- int r, queue_id, pasid;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- amdgpu_mes_unlock(&adev->mes);
- return -EINVAL;
- }
- pasid = gang->process->pasid;
-
- ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
- if (!ring) {
- amdgpu_mes_unlock(&adev->mes);
- return -ENOMEM;
- }
-
- ring->ring_obj = NULL;
- ring->use_doorbell = true;
- ring->is_mes_queue = true;
- ring->mes_ctx = ctx_data;
- ring->idx = idx;
- ring->no_scheduler = true;
-
- if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- compute[ring->idx].mec_hpd);
- ring->eop_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- }
-
- switch (queue_type) {
- case AMDGPU_RING_TYPE_GFX:
- ring->funcs = adev->gfx.gfx_ring[0].funcs;
- ring->me = adev->gfx.gfx_ring[0].me;
- ring->pipe = adev->gfx.gfx_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- ring->funcs = adev->gfx.compute_ring[0].funcs;
- ring->me = adev->gfx.compute_ring[0].me;
- ring->pipe = adev->gfx.compute_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- ring->funcs = adev->sdma.instance[0].ring.funcs;
- break;
- default:
- BUG();
- }
-
- r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
- if (r) {
- amdgpu_mes_unlock(&adev->mes);
- goto clean_up_memory;
- }
-
- amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
-
- dma_fence_wait(gang->process->vm->last_update, false);
- dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
- amdgpu_mes_unlock(&adev->mes);
-
- r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
- if (r)
- goto clean_up_ring;
-
- ring->hw_queue_id = queue_id;
- ring->doorbell_index = qprops.doorbell_off;
-
- if (queue_type == AMDGPU_RING_TYPE_GFX)
- sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
- else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
- sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
- queue_id);
- else if (queue_type == AMDGPU_RING_TYPE_SDMA)
- sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
- queue_id);
- else
- BUG();
-
- *out = ring;
- return 0;
-
-clean_up_ring:
- amdgpu_ring_fini(ring);
-clean_up_memory:
- kfree(ring);
- return r;
-}
-
-void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring)
- return;
-
- amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
- timer_delete_sync(&ring->fence_drv.fallback_timer);
- amdgpu_ring_fini(ring);
- kfree(ring);
-}
-
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio)
{
return adev->mes.aggregated_doorbells[prio];
}
-int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- int r;
-
- r = amdgpu_bo_create_kernel(adev,
- sizeof(struct amdgpu_mes_ctx_meta_data),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &ctx_data->meta_data_obj,
- &ctx_data->meta_data_mc_addr,
- &ctx_data->meta_data_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
- return r;
- }
-
- if (!ctx_data->meta_data_obj)
- return -ENOMEM;
-
- memset(ctx_data->meta_data_ptr, 0,
- sizeof(struct amdgpu_mes_ctx_meta_data));
-
- return 0;
-}
-
-void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
-{
- if (ctx_data->meta_data_obj)
- amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
- &ctx_data->meta_data_mc_addr,
- &ctx_data->meta_data_ptr);
-}
-
-int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_bo_va *bo_va;
- struct amdgpu_sync sync;
- struct drm_exec exec;
- int r;
-
- amdgpu_sync_create(&sync);
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec,
- &ctx_data->meta_data_obj->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
- }
-
- bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
- if (!bo_va) {
- DRM_ERROR("failed to create bo_va for meta data BO\n");
- r = -ENOMEM;
- goto error_fini_exec;
- }
-
- r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
- sizeof(struct amdgpu_mes_ctx_meta_data),
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
-
- if (r) {
- DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
- goto error_del_bo_va;
- }
-
- r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r) {
- DRM_ERROR("failed to do vm_bo_update on meta data\n");
- goto error_del_bo_va;
- }
- amdgpu_sync_fence(&sync, bo_va->last_pt_update, GFP_KERNEL);
-
- r = amdgpu_vm_update_pdes(adev, vm, false);
- if (r) {
- DRM_ERROR("failed to update pdes on meta data\n");
- goto error_del_bo_va;
- }
- amdgpu_sync_fence(&sync, vm->last_update, GFP_KERNEL);
-
- amdgpu_sync_wait(&sync, false);
- drm_exec_fini(&exec);
-
- amdgpu_sync_free(&sync);
- ctx_data->meta_data_va = bo_va;
- return 0;
-
-error_del_bo_va:
- amdgpu_vm_bo_del(adev, bo_va);
-
-error_fini_exec:
- drm_exec_fini(&exec);
- amdgpu_sync_free(&sync);
- return r;
-}
-
-int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
- struct amdgpu_bo *bo = ctx_data->meta_data_obj;
- struct amdgpu_vm *vm = bo_va->base.vm;
- struct dma_fence *fence;
- struct drm_exec exec;
- long r;
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec,
- &ctx_data->meta_data_obj->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
- }
-
- amdgpu_vm_bo_del(adev, bo_va);
- if (!amdgpu_vm_ready(vm))
- goto out_unlock;
-
- r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
- &fence);
- if (r)
- goto out_unlock;
- if (fence) {
- amdgpu_bo_fence(bo, fence, true);
- fence = NULL;
- }
-
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
- if (r || !fence)
- goto out_unlock;
-
- dma_fence_wait(fence, false);
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
-
-out_unlock:
- if (unlikely(r < 0))
- dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
- drm_exec_fini(&exec);
-
- return r;
-}
-
-static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
- int pasid, int *gang_id,
- int queue_type, int num_queue,
- struct amdgpu_ring **added_rings,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_ring *ring;
- struct amdgpu_mes_gang_properties gprops = {0};
- int r, j;
-
- /* create a gang for the process */
- gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.gang_quantum = adev->mes.default_gang_quantum;
- gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
-
- r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
- if (r) {
- DRM_ERROR("failed to add gang\n");
- return r;
- }
-
- /* create queues for the gang */
- for (j = 0; j < num_queue; j++) {
- r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
- ctx_data, &ring);
- if (r) {
- DRM_ERROR("failed to add ring\n");
- break;
- }
-
- DRM_INFO("ring %s was added\n", ring->name);
- added_rings[j] = ring;
- }
-
- return 0;
-}
-
-static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
-{
- struct amdgpu_ring *ring;
- int i, r;
-
- for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
- ring = added_rings[i];
- if (!ring)
- continue;
-
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
-
- r = amdgpu_ring_test_ib(ring, 1000 * 10);
- if (r) {
- DRM_DEV_ERROR(ring->adev->dev,
- "ring %s ib test failed (%d)\n",
- ring->name, r);
- return r;
- } else
- DRM_INFO("ring %s ib test pass\n", ring->name);
- }
-
- return 0;
-}
-
-int amdgpu_mes_self_test(struct amdgpu_device *adev)
-{
- struct amdgpu_vm *vm = NULL;
- struct amdgpu_mes_ctx_data ctx_data = {0};
- struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
- int gang_ids[3] = {0};
- int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
- { AMDGPU_RING_TYPE_COMPUTE, 1 },
- { AMDGPU_RING_TYPE_SDMA, 1} };
- int i, r, pasid, k = 0;
-
- pasid = amdgpu_pasid_alloc(16);
- if (pasid < 0) {
- dev_warn(adev->dev, "No more PASIDs available!");
- pasid = 0;
- }
-
- vm = kzalloc(sizeof(*vm), GFP_KERNEL);
- if (!vm) {
- r = -ENOMEM;
- goto error_pasid;
- }
-
- r = amdgpu_vm_init(adev, vm, -1);
- if (r) {
- DRM_ERROR("failed to initialize vm\n");
- goto error_pasid;
- }
-
- r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
- if (r) {
- DRM_ERROR("failed to alloc ctx meta data\n");
- goto error_fini;
- }
-
- ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
- r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
- if (r) {
- DRM_ERROR("failed to map ctx meta data\n");
- goto error_vm;
- }
-
- r = amdgpu_mes_create_process(adev, pasid, vm);
- if (r) {
- DRM_ERROR("failed to create MES process\n");
- goto error_vm;
- }
-
- for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
- /* On GFX v10.3, fw hasn't supported to map sdma queue. */
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
- IP_VERSION(10, 3, 0) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) <
- IP_VERSION(11, 0, 0) &&
- queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
- continue;
-
- r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
- &gang_ids[i],
- queue_types[i][0],
- queue_types[i][1],
- &added_rings[k],
- &ctx_data);
- if (r)
- goto error_queues;
-
- k += queue_types[i][1];
- }
-
- /* start ring test and ib test for MES queues */
- amdgpu_mes_test_queues(added_rings);
-
-error_queues:
- /* remove all queues */
- for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
- if (!added_rings[i])
- continue;
- amdgpu_mes_remove_ring(adev, added_rings[i]);
- }
-
- for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
- if (!gang_ids[i])
- continue;
- amdgpu_mes_remove_gang(adev, gang_ids[i]);
- }
-
- amdgpu_mes_destroy_process(adev, pasid);
-
-error_vm:
- amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
-
-error_fini:
- amdgpu_vm_fini(adev, vm);
-
-error_pasid:
- if (pasid)
- amdgpu_pasid_free(pasid);
-
- amdgpu_mes_ctx_free_meta_data(&ctx_data);
- kfree(vm);
- return 0;
-}
-
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
{
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -1705,7 +709,7 @@ int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
mutex_lock(&adev->enforce_isolation_mutex);
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i])
+ if (adev->enforce_isolation[i] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
else
r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index da2c9a8cb3e0..a41f65b4f733 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -111,8 +111,8 @@ struct amdgpu_mes {
uint32_t vmid_mask_gfxhub;
uint32_t vmid_mask_mmhub;
- uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
+ uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
uint32_t sch_ctx_offs[AMDGPU_MAX_MES_PIPES];
@@ -149,19 +149,6 @@ struct amdgpu_mes {
};
-struct amdgpu_mes_process {
- int pasid;
- struct amdgpu_vm *vm;
- uint64_t pd_gpu_addr;
- struct amdgpu_bo *proc_ctx_bo;
- uint64_t proc_ctx_gpu_addr;
- void *proc_ctx_cpu_ptr;
- uint64_t process_quantum;
- struct list_head gang_list;
- uint32_t doorbell_index;
- struct mutex doorbell_lock;
-};
-
struct amdgpu_mes_gang {
int gang_id;
int priority;
@@ -248,18 +235,6 @@ struct mes_remove_queue_input {
uint64_t gang_context_addr;
};
-struct mes_reset_queue_input {
- uint32_t doorbell_offset;
- uint64_t gang_context_addr;
- bool use_mmio;
- uint32_t queue_type;
- uint32_t me_id;
- uint32_t pipe_id;
- uint32_t queue_id;
- uint32_t xcc_id;
- uint32_t vmid;
-};
-
struct mes_map_legacy_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
@@ -291,7 +266,7 @@ struct mes_resume_gang_input {
uint64_t gang_context_addr;
};
-struct mes_reset_legacy_queue_input {
+struct mes_reset_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
bool use_mmio;
@@ -301,6 +276,8 @@ struct mes_reset_legacy_queue_input {
uint64_t mqd_addr;
uint64_t wptr_addr;
uint32_t vmid;
+ bool legacy_gfx;
+ bool is_kq;
};
enum mes_misc_opcode {
@@ -388,9 +365,6 @@ struct amdgpu_mes_funcs {
int (*misc_op)(struct amdgpu_mes *mes,
struct mes_misc_op_input *input);
- int (*reset_legacy_queue)(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input);
-
int (*reset_hw_queue)(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input);
};
@@ -404,26 +378,9 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
int amdgpu_mes_init(struct amdgpu_device *adev);
void amdgpu_mes_fini(struct amdgpu_device *adev);
-int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
- struct amdgpu_vm *vm);
-void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid);
-
-int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
- struct amdgpu_mes_gang_properties *gprops,
- int *gang_id);
-int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id);
-
int amdgpu_mes_suspend(struct amdgpu_device *adev);
int amdgpu_mes_resume(struct amdgpu_device *adev);
-int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
- struct amdgpu_mes_queue_properties *qprops,
- int *queue_id);
-int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
-int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id);
-int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
- int me_id, int pipe_id, int queue_id, int vmid);
-
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
@@ -451,27 +408,10 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
bool trap_en);
int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr);
-int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
- int queue_type, int idx,
- struct amdgpu_mes_ctx_data *ctx_data,
- struct amdgpu_ring **out);
-void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio);
-int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data);
-void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data);
-int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_mes_ctx_data *ctx_data);
-int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data);
-
-int amdgpu_mes_self_test(struct amdgpu_device *adev);
-
int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev);
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 80cd6f5273db..73403744331a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -163,8 +163,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
* When GTT is just an alternative to VRAM make sure that we
* only use it as fallback and still try to fill up VRAM first.
*/
- if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
- !(adev->flags & AMD_IS_APU))
+ if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) &&
+ domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++;
}
@@ -1044,7 +1044,8 @@ static const char * const amdgpu_vram_names[] = {
"GDDR6",
"DDR5",
"LPDDR4",
- "LPDDR5"
+ "LPDDR5",
+ "HBM3E"
};
/**
@@ -1644,7 +1645,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
-
+ /* Add the gem obj resv fence dump*/
+ if (dma_resv_trylock(bo->tbo.base.resv)) {
+ dma_resv_describe(bo->tbo.base.resv, m);
+ dma_resv_unlock(bo->tbo.base.resv);
+ }
seq_puts(m, "\n");
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index df5d5dbd7f0f..e6f0b035e20b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2214,7 +2214,8 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay_context.context.bin_desc.start_addr) {
- dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
+ dev_info(psp->adev->dev,
+ "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
return 0;
}
@@ -4185,6 +4186,110 @@ const struct attribute_group amdgpu_flash_attr_group = {
.is_visible = amdgpu_flash_attr_is_visible,
};
+#if defined(CONFIG_DEBUG_FS)
+static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet;
+ int ret;
+
+ /* serialize the open() file calling */
+ if (!mutex_trylock(&adev->psp.mutex))
+ return -EBUSY;
+
+ /*
+ * make sure only one userpace process is alive for dumping so that
+ * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
+ * let's say the case where one process try opening the file while
+ * another one has proceeded to read or release. In this way, eliminate
+ * the use of mutex for read() or release() callback as well.
+ */
+ if (adev->psp.spirom_dump_trip) {
+ mutex_unlock(&adev->psp.mutex);
+ return -EBUSY;
+ }
+
+ bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
+ if (!bo_triplet) {
+ mutex_unlock(&adev->psp.mutex);
+ return -ENOMEM;
+ }
+
+ ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &bo_triplet->bo,
+ &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ if (ret)
+ goto rel_trip;
+
+ ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
+ if (ret)
+ goto rel_bo;
+
+ adev->psp.spirom_dump_trip = bo_triplet;
+ mutex_unlock(&adev->psp.mutex);
+ return 0;
+rel_bo:
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+rel_trip:
+ kfree(bo_triplet);
+ mutex_unlock(&adev->psp.mutex);
+ dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
+ return ret;
+}
+
+static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (!bo_triplet)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf,
+ size,
+ pos, bo_triplet->cpu_addr,
+ AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+}
+
+static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (bo_triplet) {
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ kfree(bo_triplet);
+ }
+
+ adev->psp.spirom_dump_trip = NULL;
+ return 0;
+}
+
+static const struct file_operations psp_dump_spirom_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = psp_read_spirom_debugfs_open,
+ .read = psp_read_spirom_debugfs_read,
+ .release = psp_read_spirom_debugfs_release,
+ .llseek = default_llseek,
+};
+#endif
+
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+
+ debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
+ adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+#endif
+}
+
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 8d5acc415d38..428adc7f741d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -39,6 +39,18 @@
#define PSP_TMR_ALIGNMENT 0x100000
#define PSP_FW_NAME_LEN 0x24
+/* VBIOS gfl defines */
+#define MBOX_READY_MASK 0x80000000
+#define MBOX_STATUS_MASK 0x0000FFFF
+#define MBOX_COMMAND_MASK 0x00FF0000
+#define MBOX_READY_FLAG 0x80000000
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
+#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO 0xf
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10
+#define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11
+
extern const struct attribute_group amdgpu_flash_attr_group;
enum psp_shared_mem_size {
@@ -107,6 +119,7 @@ enum psp_reg_prog_id {
PSP_REG_IH_RB_CNTL = 0, /* register IH_RB_CNTL */
PSP_REG_IH_RB_CNTL_RING1 = 1, /* register IH_RB_CNTL_RING1 */
PSP_REG_IH_RB_CNTL_RING2 = 2, /* register IH_RB_CNTL_RING2 */
+ PSP_REG_MMHUB_L1_TLB_CNTL = 25,
PSP_REG_LAST
};
@@ -137,11 +150,14 @@ struct psp_funcs {
int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
+ int (*dump_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*vbflash_stat)(struct psp_context *psp);
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
bool (*get_ras_capability)(struct psp_context *psp);
bool (*is_aux_sos_load_required)(struct psp_context *psp);
bool (*is_reload_needed)(struct psp_context *psp);
+ int (*reg_program_no_ring)(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
};
struct ta_funcs {
@@ -319,6 +335,14 @@ struct psp_runtime_scpm_entry {
enum psp_runtime_scpm_authentication scpm_status;
};
+#if defined(CONFIG_DEBUG_FS)
+struct spirom_bo {
+ struct amdgpu_bo *bo;
+ uint64_t mc_addr;
+ void *cpu_addr;
+};
+#endif
+
struct psp_context {
struct amdgpu_device *adev;
struct psp_ring km_ring;
@@ -406,6 +430,9 @@ struct psp_context {
char *vbflash_tmp_buf;
size_t vbflash_image_size;
bool vbflash_done;
+#if defined(CONFIG_DEBUG_FS)
+ struct spirom_bo *spirom_dump_trip;
+#endif
};
struct amdgpu_psp_funcs {
@@ -464,6 +491,10 @@ struct amdgpu_psp_funcs {
((psp)->funcs->update_spirom ? \
(psp)->funcs->update_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+#define psp_dump_spirom(psp, fw_pri_mc_addr) \
+ ((psp)->funcs->dump_spirom ? \
+ (psp)->funcs->dump_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+
#define psp_vbflash_status(psp) \
((psp)->funcs->vbflash_stat ? \
(psp)->funcs->vbflash_stat((psp)) : -EINVAL)
@@ -475,6 +506,10 @@ struct amdgpu_psp_funcs {
#define psp_is_aux_sos_load_required(psp) \
((psp)->funcs->is_aux_sos_load_required ? (psp)->funcs->is_aux_sos_load_required((psp)) : 0)
+#define psp_reg_program_no_ring(psp, val, id) \
+ ((psp)->funcs->reg_program_no_ring ? \
+ (psp)->funcs->reg_program_no_ring((psp), val, id) : -EINVAL)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -569,5 +604,9 @@ bool amdgpu_psp_get_ras_capability(struct psp_context *psp);
int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id,
bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev);
+int amdgpu_psp_reg_program_no_ring(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 443409d4f4b0..dc07936d2fcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1498,6 +1498,9 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
!amdgpu_ras_get_aca_debug_mode(adev))
return -EOPNOTSUPP;
+ if (amdgpu_sriov_vf(adev))
+ return -EOPNOTSUPP;
+
/* skip ras error reset in gpu reset */
if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
((smu_funcs && smu_funcs->set_debug_mode) ||
@@ -2161,7 +2164,7 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
/* Fatal error events are handled on host side */
if (amdgpu_sriov_vf(adev))
return;
- /**
+ /*
* If the current interrupt is caused by a non-fatal RAS error, skip
* check for fatal error. For fatal errors, FED status of all devices
* in XGMI hive gets set when the first device gets fatal error
@@ -2886,6 +2889,7 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
return -EINVAL;
}
+
return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
adev->umc.retire_unit);
}
@@ -2900,7 +2904,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
&adev->psp.ras_context.ras->eeprom_control;
enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
- uint32_t i;
+ uint32_t i = 0;
if (!con || !con->eh_data || !bps || pages <= 0)
return 0;
@@ -2921,34 +2925,36 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
mutex_lock(&con->recovery_lock);
if (from_rom) {
- for (i = 0; i < pages; i++) {
- if (control->ras_num_recs - i >= adev->umc.retire_unit) {
- if ((bps[i].address == bps[i + 1].address) &&
- (bps[i].mem_channel == bps[i + 1].mem_channel)) {
- //deal with retire_unit records a time
- ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
- &bps[i], &err_data, nps);
- if (ret)
- goto free;
- i += (adev->umc.retire_unit - 1);
+ /* there is no pa recs in V3, so skip pa recs processing */
+ if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ for (i = 0; i < pages; i++) {
+ if (control->ras_num_recs - i >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ /* deal with retire_unit records a time */
+ ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
+ control->ras_num_bad_pages -= adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ break;
+ }
} else {
break;
}
- } else {
- break;
}
}
for (; i < pages; i++) {
ret = __amdgpu_ras_convert_rec_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- goto free;
+ control->ras_num_bad_pages -= adev->umc.retire_unit;
}
} else {
ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
}
-free:
if (from_rom)
kfree(err_data.err_addr);
mutex_unlock(&con->recovery_lock);
@@ -3037,21 +3043,28 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
dev_err(adev->dev, "Failed to load EEPROM table records!");
} else {
if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
- for (i = 0; i < control->ras_num_recs; i++) {
- if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
- if ((bps[i].address == bps[i + 1].address) &&
- (bps[i].mem_channel == bps[i + 1].mem_channel)) {
- control->ras_num_pa_recs += adev->umc.retire_unit;
- i += (adev->umc.retire_unit - 1);
+ /*In V3, there is no pa recs, and some cases(when address==0) may be parsed
+ as pa recs, so add verion check to avoid it.
+ */
+ if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ for (i = 0; i < control->ras_num_recs; i++) {
+ if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ control->ras_num_pa_recs += adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ control->ras_num_mca_recs +=
+ (control->ras_num_recs - i);
+ break;
+ }
} else {
- control->ras_num_mca_recs +=
- (control->ras_num_recs - i);
+ control->ras_num_mca_recs += (control->ras_num_recs - i);
break;
}
- } else {
- control->ras_num_mca_recs += (control->ras_num_recs - i);
- break;
}
+ } else {
+ control->ras_num_mca_recs = control->ras_num_recs;
}
}
@@ -3460,6 +3473,10 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
control->ras_num_pa_recs = control->ras_num_recs;
+ if (adev->umc.ras &&
+ adev->umc.ras->get_retire_flip_bits)
+ adev->umc.ras->get_retire_flip_bits(adev);
+
if (control->ras_num_recs) {
ret = amdgpu_ras_load_bad_pages(adev);
if (ret)
@@ -3793,10 +3810,12 @@ init_ras_enabled_flag:
adev->ras_hw_enabled & amdgpu_ras_mask;
/* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
- adev->aca.is_enabled =
- (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
- amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
- amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->aca.is_enabled =
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
+ }
/* bad page feature is not applicable to specific app platform */
if (adev->gmc.is_app_apu &&
@@ -4479,8 +4498,11 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
u64 event_id;
- if (amdgpu_ras_mark_ras_event(adev, type))
+ if (amdgpu_ras_mark_ras_event(adev, type)) {
+ dev_err(adev->dev,
+ "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n");
return;
+ }
event_id = amdgpu_ras_acquire_event_id(adev, type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 0ea7cfaf3587..2c58e09e56f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -418,6 +418,7 @@ static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control
hdr->version = RAS_TABLE_VER_V2_1;
return;
case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 5, 0):
hdr->version = RAS_TABLE_VER_V3;
return;
default:
@@ -1392,17 +1393,39 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
__decode_table_header_from_buf(hdr, buf);
- if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ if (hdr->header != RAS_TABLE_HDR_VAL &&
+ hdr->header != RAS_TABLE_HDR_BAD) {
+ dev_info(adev->dev, "Creating a new EEPROM table");
+ return amdgpu_ras_eeprom_reset_table(control);
+ }
+
+ switch (hdr->version) {
+ case RAS_TABLE_VER_V2_1:
+ case RAS_TABLE_VER_V3:
control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
control->ras_record_offset = RAS_RECORD_START_V2_1;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
- } else {
+ break;
+ case RAS_TABLE_VER_V1:
control->ras_num_recs = RAS_NUM_RECS(hdr);
control->ras_record_offset = RAS_RECORD_START;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ break;
+ default:
+ dev_err(adev->dev,
+ "RAS header invalid, unsupported version: %u",
+ hdr->version);
+ return -EINVAL;
}
- control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+ if (control->ras_num_recs > control->ras_max_record_count) {
+ dev_err(adev->dev,
+ "RAS header invalid, records in header: %u max allowed :%u",
+ control->ras_num_recs, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
+ control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
control->ras_num_mca_recs = 0;
control->ras_num_pa_recs = 0;
return 0;
@@ -1413,7 +1436,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_device *adev = to_amdgpu_device(control);
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- int res;
+ int res = 0;
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -1494,10 +1517,6 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
"User defined threshold is set, runtime service will be halt when threshold is reached\n");
}
}
- } else {
- DRM_INFO("Creating a new EEPROM table");
-
- res = amdgpu_ras_eeprom_reset_table(control);
}
return res < 0 ? res : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 59acdbfe28d8..426834806fbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -187,14 +187,10 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
#define amdgpu_ring_get_gpu_addr(ring, offset) \
- (ring->is_mes_queue ? \
- (ring->mes_ctx->meta_data_gpu_addr + offset) : \
- (ring->adev->wb.gpu_addr + offset * 4))
+ (ring->adev->wb.gpu_addr + offset * 4)
#define amdgpu_ring_get_cpu_addr(ring, offset) \
- (ring->is_mes_queue ? \
- (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
- (&ring->adev->wb.wb[offset]))
+ (&ring->adev->wb.wb[offset])
/**
* amdgpu_ring_init - init driver ring struct.
@@ -243,57 +239,42 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->sched_score = sched_score;
ring->vmid_wait = dma_fence_get_stub();
- if (!ring->is_mes_queue) {
- ring->idx = adev->num_rings++;
- adev->rings[ring->idx] = ring;
- }
+ ring->idx = adev->num_rings++;
+ adev->rings[ring->idx] = ring;
r = amdgpu_fence_driver_init_ring(ring);
if (r)
return r;
}
- if (ring->is_mes_queue) {
- ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_RPTR_OFFS);
- ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_WPTR_OFFS);
- ring->fence_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_FENCE_OFFS);
- ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_TRAIL_FENCE_OFFS);
- ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_COND_EXE_OFFS);
- } else {
- r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->fence_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
+ return r;
}
ring->fence_gpu_addr =
@@ -353,18 +334,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->cached_rptr = 0;
/* Allocate ring buffer */
- if (ring->is_mes_queue) {
- int offset = 0;
-
- BUG_ON(ring->ring_size > PAGE_SIZE*4);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_RING_OFFS);
- ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- amdgpu_ring_clear_ring(ring);
-
- } else if (ring->ring_obj == NULL) {
+ if (ring->ring_obj == NULL) {
r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
@@ -401,32 +371,26 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
/* Not to finish a ring which is not initialized */
- if (!(ring->adev) ||
- (!ring->is_mes_queue && !(ring->adev->rings[ring->idx])))
+ if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
ring->sched.ready = false;
- if (!ring->is_mes_queue) {
- amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
- amdgpu_device_wb_free(ring->adev, ring->fence_offs);
+ amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
+ amdgpu_device_wb_free(ring->adev, ring->fence_offs);
- amdgpu_bo_free_kernel(&ring->ring_obj,
- &ring->gpu_addr,
- (void **)&ring->ring);
- } else {
- kfree(ring->fence_drv.fences);
- }
+ amdgpu_bo_free_kernel(&ring->ring_obj,
+ &ring->gpu_addr,
+ (void **)&ring->ring);
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
- if (!ring->is_mes_queue)
- ring->adev->rings[ring->idx] = NULL;
+ ring->adev->rings[ring->idx] = NULL;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index bb2b66385223..b95b47110769 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -164,8 +164,24 @@ void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
/* provided by hw blocks that expose a ring buffer for commands */
struct amdgpu_ring_funcs {
+ /**
+ * @type:
+ *
+ * GFX, Compute, SDMA, UVD, VCE, VCN, VPE, KIQ, MES, UMSCH, and CPER
+ * use ring buffers. The type field just identifies which component the
+ * ring buffer is associated with.
+ */
enum amdgpu_ring_type type;
uint32_t align_mask;
+
+ /**
+ * @nop:
+ *
+ * Every block in the amdgpu has no-op instructions (e.g., GFX 10
+ * uses PACKET3(PACKET3_NOP, 0x3FFF), VCN 5 uses VCN_ENC_CMD_NO_OP,
+ * etc). This field receives the specific no-op for the component
+ * that initializes the ring.
+ */
u32 nop;
bool support_64bit_ptrs;
bool no_user_fence;
@@ -241,6 +257,9 @@ struct amdgpu_ring_funcs {
bool (*is_guilty)(struct amdgpu_ring *ring);
};
+/**
+ * amdgpu_ring - Holds ring information
+ */
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
@@ -252,13 +271,61 @@ struct amdgpu_ring {
unsigned rptr_offs;
u64 rptr_gpu_addr;
volatile u32 *rptr_cpu_addr;
+
+ /**
+ * @wptr:
+ *
+ * This is part of the Ring buffer implementation and represents the
+ * write pointer. The wptr determines where the host has written.
+ */
u64 wptr;
+
+ /**
+ * @wptr_old:
+ *
+ * Before update wptr with the new value, usually the old value is
+ * stored in the wptr_old.
+ */
u64 wptr_old;
unsigned ring_size;
+
+ /**
+ * @max_dw:
+ *
+ * Maximum number of DWords for ring allocation. This information is
+ * provided at the ring initialization time, and each IP block can
+ * specify a specific value. Check places that invoke
+ * amdgpu_ring_init() to see the maximum size per block.
+ */
unsigned max_dw;
+
+ /**
+ * @count_dw:
+ *
+ * This value starts with the maximum amount of DWords supported by the
+ * ring. This value is updated based on the ring manipulation.
+ */
int count_dw;
uint64_t gpu_addr;
+
+ /**
+ * @ptr_mask:
+ *
+ * Some IPs provide support for 64-bit pointers and others for 32-bit
+ * only; this behavior is component-specific and defined by the field
+ * support_64bit_ptr. If the IP block supports 64-bits, the mask
+ * 0xffffffffffffffff is set; otherwise, this value assumes buf_mask.
+ * Notice that this field is used to keep wptr under a valid range.
+ */
uint64_t ptr_mask;
+
+ /**
+ * @buf_mask:
+ *
+ * Buffer mask is a value used to keep wptr count under its
+ * thresholding. Buffer mask initialized during the ring buffer
+ * initialization time, and it is defined as (ring_size / 4) -1.
+ */
uint32_t buf_mask;
u32 idx;
u32 xcc_id;
@@ -276,6 +343,13 @@ struct amdgpu_ring {
bool use_pollmem;
unsigned wptr_offs;
u64 wptr_gpu_addr;
+
+ /**
+ * @wptr_cpu_addr:
+ *
+ * This is the CPU address pointer in the writeback slot. This is used
+ * to commit changes to the GPU.
+ */
volatile u32 *wptr_cpu_addr;
unsigned fence_offs;
u64 fence_gpu_addr;
@@ -297,20 +371,15 @@ struct amdgpu_ring {
struct dma_fence *vmid_wait;
bool has_compute_vm_bug;
bool no_scheduler;
+ bool no_user_submission;
int hw_prio;
unsigned num_hw_submission;
atomic_t *sched_score;
- /* used for mes */
- bool is_mes_queue;
- uint32_t hw_queue_id;
- struct amdgpu_mes_ctx_data *mes_ctx;
-
bool is_sw_ring;
unsigned int entry_index;
/* store the cached rptr to restore after reset */
uint64_t cached_rptr;
-
};
#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
@@ -435,15 +504,6 @@ static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring,
ring->ring[offset] = cur - offset;
}
-#define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \
- (ring->is_mes_queue && ring->mes_ctx ? \
- (ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
-
-#define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \
- (ring->is_mes_queue && ring->mes_ctx ? \
- (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
- NULL)
-
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index fce22d3f816b..c210625be220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -237,6 +237,20 @@ struct amdgpu_rlc_funcs {
void (*unset_safe_mode)(struct amdgpu_device *adev, int xcc_id);
int (*init)(struct amdgpu_device *adev);
u32 (*get_csb_size)(struct amdgpu_device *adev);
+
+ /**
+ * @get_csb_buffer: Get the clear state to be put into the hardware.
+ *
+ * The parameter adev is used to get the CS data and other gfx info,
+ * and buffer is the RLC CS pointer
+ *
+ * Sometimes, the user space puts a request to clear the state in the
+ * command buffer; this function provides the clear state that gets put
+ * into the hardware. Note that the driver programs Clear State
+ * Indirect Buffer (CSB) explicitly when it sets up the kernel rings,
+ * and it also provides a pointer to it which is used by the firmware
+ * to load the clear state in some cases.
+ */
void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
int (*get_cp_table_num)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 529c9696c2f3..6716ac281c49 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -26,6 +26,8 @@
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
+#include "gc/gc_10_1_0_offset.h"
+#include "gc/gc_10_3_0_sh_mask.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
@@ -76,22 +78,14 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
return 0;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
+ r = amdgpu_sdma_get_index_from_ring(ring, &index);
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- sdma[ring->idx].sdma_meta_data);
- csa_mc_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- r = amdgpu_sdma_get_index_from_ring(ring, &index);
-
- if (r || index > 31)
- csa_mc_addr = 0;
- else
- csa_mc_addr = amdgpu_csa_vaddr(adev) +
- AMDGPU_CSA_SDMA_OFFSET +
- index * AMDGPU_CSA_SDMA_SIZE;
- }
+ if (r || index > 31)
+ csa_mc_addr = 0;
+ else
+ csa_mc_addr = amdgpu_csa_vaddr(adev) +
+ AMDGPU_CSA_SDMA_OFFSET +
+ index * AMDGPU_CSA_SDMA_SIZE;
return csa_mc_addr;
}
@@ -537,28 +531,38 @@ bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_rin
return false;
}
-/**
- * amdgpu_sdma_register_on_reset_callbacks - Register SDMA reset callbacks
- * @funcs: Pointer to the callback structure containing pre_reset and post_reset functions
- *
- * This function allows KFD and AMDGPU to register their own callbacks for handling
- * pre-reset and post-reset operations for engine reset. These are needed because engine
- * reset will stop all queues on that engine.
- */
-void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs)
+static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
{
- if (!funcs)
- return;
-
- /* Ensure the reset_callback_list is initialized */
- if (!adev->sdma.reset_callback_list.next) {
- INIT_LIST_HEAD(&adev->sdma.reset_callback_list);
+ struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
+ int r = -EOPNOTSUPP;
+
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(4, 4, 2):
+ case IP_VERSION(4, 4, 4):
+ case IP_VERSION(4, 4, 5):
+ /* For SDMA 4.x, use the existing DPM interface for backward compatibility */
+ r = amdgpu_dpm_reset_sdma(adev, 1 << instance_id);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 5):
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 2):
+ case IP_VERSION(5, 2, 4):
+ case IP_VERSION(5, 2, 5):
+ case IP_VERSION(5, 2, 6):
+ case IP_VERSION(5, 2, 3):
+ case IP_VERSION(5, 2, 1):
+ case IP_VERSION(5, 2, 7):
+ if (sdma_instance->funcs->soft_reset_kernel_queue)
+ r = sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id);
+ break;
+ default:
+ break;
}
- /* Initialize the list node in the callback structure */
- INIT_LIST_HEAD(&funcs->list);
- /* Add the callback structure to the global list */
- list_add_tail(&funcs->list, &adev->sdma.reset_callback_list);
+ return r;
}
/**
@@ -566,16 +570,10 @@ void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct
* @adev: Pointer to the AMDGPU device
* @instance_id: ID of the SDMA engine instance to reset
*
- * This function performs the following steps:
- * 1. Calls all registered pre_reset callbacks to allow KFD and AMDGPU to save their state.
- * 2. Resets the specified SDMA engine instance.
- * 3. Calls all registered post_reset callbacks to allow KFD and AMDGPU to restore their state.
- *
* Returns: 0 on success, or a negative error code on failure.
*/
int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
{
- struct sdma_on_reset_funcs *funcs;
int ret = 0;
struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
@@ -597,38 +595,18 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
page_sched_stopped = true;
}
- /* Invoke all registered pre_reset callbacks */
- list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
- if (funcs->pre_reset) {
- ret = funcs->pre_reset(adev, instance_id);
- if (ret) {
- dev_err(adev->dev,
- "beforeReset callback failed for instance %u: %d\n",
- instance_id, ret);
- goto exit;
- }
- }
- }
+ if (sdma_instance->funcs->stop_kernel_queue)
+ sdma_instance->funcs->stop_kernel_queue(gfx_ring);
/* Perform the SDMA reset for the specified instance */
- ret = amdgpu_dpm_reset_sdma(adev, 1 << instance_id);
+ ret = amdgpu_sdma_soft_reset(adev, instance_id);
if (ret) {
dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id);
goto exit;
}
- /* Invoke all registered post_reset callbacks */
- list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
- if (funcs->post_reset) {
- ret = funcs->post_reset(adev, instance_id);
- if (ret) {
- dev_err(adev->dev,
- "afterReset callback failed for instance %u: %d\n",
- instance_id, ret);
- goto exit;
- }
- }
- }
+ if (sdma_instance->funcs->start_kernel_queue)
+ sdma_instance->funcs->start_kernel_queue(gfx_ring);
exit:
/* Restart the scheduler's work queue for the GFX and page rings
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 47d56fd0589f..5605921212f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -50,6 +50,12 @@ enum amdgpu_sdma_irq {
#define NUM_SDMA(x) hweight32(x)
+struct amdgpu_sdma_funcs {
+ int (*stop_kernel_queue)(struct amdgpu_ring *ring);
+ int (*start_kernel_queue)(struct amdgpu_ring *ring);
+ int (*soft_reset_kernel_queue)(struct amdgpu_device *adev, u32 instance_id);
+};
+
struct amdgpu_sdma_instance {
/* SDMA firmware */
const struct firmware *fw;
@@ -68,7 +74,7 @@ struct amdgpu_sdma_instance {
/* track guilty state of GFX and PAGE queues */
bool gfx_guilty;
bool page_guilty;
-
+ const struct amdgpu_sdma_funcs *funcs;
};
enum amdgpu_sdma_ras_memory_id {
@@ -103,13 +109,6 @@ struct amdgpu_sdma_ras {
struct amdgpu_ras_block_object ras_block;
};
-struct sdma_on_reset_funcs {
- int (*pre_reset)(struct amdgpu_device *adev, uint32_t instance_id);
- int (*post_reset)(struct amdgpu_device *adev, uint32_t instance_id);
- /* Linked list node to store this structure in a list; */
- struct list_head list;
-};
-
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
@@ -131,6 +130,8 @@ struct amdgpu_sdma {
uint32_t *ip_dump;
uint32_t supported_reset;
struct list_head reset_callback_list;
+ bool no_user_submission;
+ bool disable_uq;
};
/*
@@ -170,7 +171,6 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
-void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs);
int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id);
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index e22cb2b5cd92..3939761be31c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -45,7 +45,11 @@
*/
static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
{
- return AMDGPU_VA_RESERVED_SEQ64_START(adev);
+ u64 addr = AMDGPU_VA_RESERVED_SEQ64_START(adev);
+
+ addr = amdgpu_gmc_sign_extend(addr);
+
+ return addr;
}
/**
@@ -63,9 +67,9 @@ static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va)
{
+ u64 seq64_addr, va_flags;
struct amdgpu_bo *bo;
struct drm_exec exec;
- u64 seq64_addr;
int r;
bo = adev->seq64.sbo;
@@ -88,9 +92,11 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
goto error;
}
- seq64_addr = amdgpu_seq64_get_va_base(adev);
+ seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK;
+
+ va_flags = amdgpu_gem_va_map_flags(adev, AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
- AMDGPU_PTE_READABLE);
+ va_flags);
if (r) {
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
@@ -156,6 +162,7 @@ error:
*
* @adev: amdgpu_device pointer
* @va: VA to access the seq in process address space
+ * @gpu_addr: GPU address to access the seq
* @cpu_addr: CPU address to access the seq
*
* Alloc a 64 bit memory from seq64 pool.
@@ -163,7 +170,8 @@ error:
* Returns:
* 0 on success or a negative error code on failure
*/
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va,
+ u64 *gpu_addr, u64 **cpu_addr)
{
unsigned long bit_pos;
@@ -172,7 +180,12 @@ int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
return -ENOSPC;
__set_bit(bit_pos, adev->seq64.used);
+
*va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev);
+
+ if (gpu_addr)
+ *gpu_addr = bit_pos * sizeof(u64) + adev->seq64.gpu_addr;
+
*cpu_addr = bit_pos + adev->seq64.cpu_base_addr;
return 0;
@@ -233,7 +246,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev)
*/
r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &adev->seq64.sbo, NULL,
+ &adev->seq64.sbo, &adev->seq64.gpu_addr,
(void **)&adev->seq64.cpu_base_addr);
if (r) {
dev_warn(adev->dev, "(%d) create seq64 failed\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
index 4203b2ab318d..26a249aaaee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
@@ -32,13 +32,14 @@
struct amdgpu_seq64 {
struct amdgpu_bo *sbo;
u32 num_sem;
+ u64 gpu_addr;
u64 *cpu_base_addr;
DECLARE_BITMAP(used, AMDGPU_MAX_SEQ64_SLOTS);
};
void amdgpu_seq64_fini(struct amdgpu_device *adev);
int amdgpu_seq64_init(struct amdgpu_device *adev);
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr);
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 *gpu_addr, u64 **cpu_addr);
void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr);
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 5576ed0b508f..d6ae9974c952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -249,9 +249,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
if (resv == NULL)
return -EINVAL;
-
- /* TODO: Use DMA_RESV_USAGE_READ here */
- dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) {
+ /* Implicitly sync only to KERNEL, WRITE and READ */
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) {
dma_fence_chain_for_each(f, f) {
struct dma_fence *tmp = dma_fence_chain_contained(f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 53b71e9d8076..9c5df35f05b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -2081,6 +2081,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
+ amdgpu_doorbell_fini(adev);
+
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 3d9e9fdc10b4..4a72c2bbd49e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -767,6 +767,7 @@ FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK);
FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK);
+FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version);
static struct attribute *fw_attrs[] = {
&dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr,
@@ -781,7 +782,7 @@ static struct attribute *fw_attrs[] = {
&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
&dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
&dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr,
- NULL
+ &dev_attr_pldm_fw_version.attr, NULL
};
#define to_dev_attr(x) container_of(x, struct device_attribute, attr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 4eedd92f000b..9e89c3487be5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -25,6 +25,8 @@
#include "amdgpu_socbb.h"
+#define RS64_FW_UC_START_ADDR_LO 0x3000
+
struct common_firmware_header {
uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
uint32_t header_size_bytes; /* size of just the header in bytes */
@@ -600,6 +602,7 @@ struct amdgpu_firmware {
void *fw_buf_ptr;
uint64_t fw_buf_mc;
+ uint32_t pldm_version;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 0a1ef95b2866..8c6e55b5b967 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -529,6 +529,7 @@ int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
pfns[i] = err_data.err_addr[i].retired_page;
}
ret = i;
+ adev->umc.err_addr_cnt = err_data.err_addr_cnt;
out:
kfree(err_data.err_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 857693bcd8d4..29ce6b1d214a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -78,6 +78,18 @@
#define UMC_NPS_SHIFT 40
#define UMC_NPS_MASK 0xffULL
+/* three column bits and one row bit in MCA address flip
+ * in bad page retirement
+ */
+#define RETIRE_FLIP_BITS_NUM 4
+
+struct amdgpu_umc_flip_bits {
+ uint32_t flip_bits_in_pa[RETIRE_FLIP_BITS_NUM];
+ uint32_t flip_row_bit;
+ uint32_t r13_in_pa;
+ uint32_t bit_num;
+};
+
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
@@ -100,6 +112,7 @@ struct amdgpu_umc_ras {
bool dump_addr);
uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
uint64_t mca_addr, uint64_t retired_page);
+ void (*get_retire_flip_bits)(struct amdgpu_device *adev);
};
struct amdgpu_umc_funcs {
@@ -130,6 +143,10 @@ struct amdgpu_umc {
/* active mask for umc node instance */
unsigned long active_mask;
+
+ struct amdgpu_umc_flip_bits flip_bits;
+
+ unsigned long err_addr_cnt;
};
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
new file mode 100644
index 000000000000..295e7186e156
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -0,0 +1,924 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drm_auth.h>
+#include <drm/drm_exec.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
+{
+ int i;
+ u32 userq_ip_mask = 0;
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+ if (adev->userq_funcs[i])
+ userq_ip_mask |= (1 << i);
+ }
+
+ return userq_ip_mask;
+}
+
+static int
+amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ r = userq_funcs->unmap(uq_mgr, queue);
+ if (r)
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ else
+ queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
+ }
+ return r;
+}
+
+static int
+amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
+ r = userq_funcs->map(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+ return r;
+}
+
+static void
+amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct dma_fence *f = queue->last_fence;
+ int ret;
+
+ if (f && !dma_fence_is_signaled(f)) {
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if (ret <= 0)
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ }
+}
+
+static void
+amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ int queue_id)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
+
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ amdgpu_userq_fence_driver_free(queue);
+ idr_remove(&uq_mgr->userq_idr, queue_id);
+ kfree(queue);
+}
+
+int
+amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0;
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ /* Resume all the queues for this process */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
+ ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
+
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return ret;
+}
+
+static struct amdgpu_usermode_queue *
+amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
+{
+ return idr_find(&uq_mgr->userq_idr, qid);
+}
+
+void
+amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+retry:
+ /* Flush any pending resume work to create ev_fence */
+ flush_delayed_work(&uq_mgr->resume_work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+ if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
+ mutex_unlock(&uq_mgr->userq_mutex);
+ /*
+ * Looks like there was no pending resume work,
+ * add one now to create a valid eviction fence
+ */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+ goto retry;
+ }
+}
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_bo_param bp;
+ int r;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ bp.type = ttm_bo_type_kernel;
+ bp.size = size;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(userq_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
+ goto free_obj;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
+ goto unresv;
+ }
+
+ r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
+ goto unresv;
+ }
+
+ userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
+ amdgpu_bo_unreserve(userq_obj->obj);
+ memset(userq_obj->cpu_ptr, 0, size);
+ return 0;
+
+unresv:
+ amdgpu_bo_unreserve(userq_obj->obj);
+
+free_obj:
+ amdgpu_bo_unref(&userq_obj->obj);
+ return r;
+}
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj)
+{
+ amdgpu_bo_kunmap(userq_obj->obj);
+ amdgpu_bo_unref(&userq_obj->obj);
+}
+
+uint64_t
+amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp)
+{
+ uint64_t index;
+ struct drm_gem_object *gobj;
+ struct amdgpu_userq_obj *db_obj = db_info->db_obj;
+ int r, db_size;
+
+ gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
+ if (gobj == NULL) {
+ drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
+ return -EINVAL;
+ }
+
+ db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ drm_gem_object_put(gobj);
+
+ r = amdgpu_bo_reserve(db_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unref_bo;
+ }
+
+ /* Pin the BO before generating the index, unpin in queue destroy */
+ r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unresv_bo;
+ }
+
+ switch (db_info->queue_type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_HW_IP_COMPUTE:
+ case AMDGPU_HW_IP_DMA:
+ db_size = sizeof(u64);
+ break;
+
+ case AMDGPU_HW_IP_VCN_ENC:
+ db_size = sizeof(u32);
+ db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
+ break;
+
+ case AMDGPU_HW_IP_VPE:
+ db_size = sizeof(u32);
+ db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
+ break;
+
+ default:
+ drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
+ db_info->queue_type);
+ r = -EINVAL;
+ goto unpin_bo;
+ }
+
+ index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
+ db_info->doorbell_offset, db_size);
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev),
+ "[Usermode queues] doorbell index=%lld\n", index);
+ amdgpu_bo_unreserve(db_obj->obj);
+ return index;
+
+unpin_bo:
+ amdgpu_bo_unpin(db_obj->obj);
+unresv_bo:
+ amdgpu_bo_unreserve(db_obj->obj);
+unref_bo:
+ amdgpu_bo_unref(&db_obj->obj);
+ return r;
+}
+
+static int
+amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_usermode_queue *queue;
+ int r = 0;
+
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ queue = amdgpu_userq_find(uq_mgr, queue_id);
+ if (!queue) {
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n");
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return -EINVAL;
+ }
+ amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
+ r = amdgpu_bo_reserve(queue->db_obj.obj, true);
+ if (!r) {
+ amdgpu_bo_unpin(queue->db_obj.obj);
+ amdgpu_bo_unreserve(queue->db_obj.obj);
+ }
+ amdgpu_bo_unref(&queue->db_obj.obj);
+ r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
+ mutex_unlock(&uq_mgr->userq_mutex);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+static int amdgpu_userq_priority_permit(struct drm_file *filp,
+ int priority)
+{
+ if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
+ return 0;
+
+ if (capable(CAP_SYS_NICE))
+ return 0;
+
+ if (drm_is_current_master(filp))
+ return 0;
+
+ return -EACCES;
+}
+
+static int
+amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_db_info db_info;
+ bool skip_map_queue;
+ uint64_t index;
+ int qid, r = 0;
+ int priority =
+ (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
+
+ /* Usermode queues are only supported for GFX IP as of now */
+ if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
+ args->in.ip_type != AMDGPU_HW_IP_DMA &&
+ args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
+ drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n",
+ args->in.ip_type);
+ return -EINVAL;
+ }
+
+ r = amdgpu_userq_priority_permit(filp, priority);
+ if (r)
+ return r;
+
+ if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
+ (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
+ (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
+ !amdgpu_is_tmz(adev)) {
+ drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n");
+ return -EINVAL;
+ }
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ /*
+ * There could be a situation that we are creating a new queue while
+ * the other queues under this UQ_mgr are suspended. So if there is any
+ * resume work pending, wait for it to get done.
+ *
+ * This will also make sure we have a valid eviction fence ready to be used.
+ */
+ mutex_lock(&adev->userq_mutex);
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ uq_funcs = adev->userq_funcs[args->in.ip_type];
+ if (!uq_funcs) {
+ drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
+ args->in.ip_type);
+ r = -EINVAL;
+ goto unlock;
+ }
+
+ queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
+ if (!queue) {
+ drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
+ r = -ENOMEM;
+ goto unlock;
+ }
+ queue->doorbell_handle = args->in.doorbell_handle;
+ queue->queue_type = args->in.ip_type;
+ queue->vm = &fpriv->vm;
+ queue->priority = priority;
+
+ db_info.queue_type = queue->queue_type;
+ db_info.doorbell_handle = queue->doorbell_handle;
+ db_info.db_obj = &queue->db_obj;
+ db_info.doorbell_offset = args->in.doorbell_offset;
+
+ /* Convert relative doorbell offset into absolute doorbell index */
+ index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
+ if (index == (uint64_t)-EINVAL) {
+ drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
+ kfree(queue);
+ goto unlock;
+ }
+
+ queue->doorbell_index = index;
+ xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
+ r = amdgpu_userq_fence_driver_alloc(adev, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
+ goto unlock;
+ }
+
+ r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to create Queue\n");
+ amdgpu_userq_fence_driver_free(queue);
+ kfree(queue);
+ goto unlock;
+ }
+
+
+ qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
+ if (qid < 0) {
+ drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+ /* don't map the queue if scheduling is halted */
+ if (adev->userq_halt_for_enforce_isolation &&
+ ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
+ skip_map_queue = true;
+ else
+ skip_map_queue = false;
+ if (!skip_map_queue) {
+ r = amdgpu_userq_map_helper(uq_mgr, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map Queue\n");
+ idr_remove(&uq_mgr->userq_idr, qid);
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ goto unlock;
+ }
+ }
+
+
+ args->out.queue_id = qid;
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+ mutex_unlock(&adev->userq_mutex);
+
+ return r;
+}
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_userq *args = data;
+ int r;
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
+ return -EINVAL;
+ r = amdgpu_userq_create(filp, args);
+ if (r)
+ drm_file_err(filp, "Failed to create usermode queue\n");
+ break;
+
+ case AMDGPU_USERQ_OP_FREE:
+ if (args->in.ip_type ||
+ args->in.doorbell_handle ||
+ args->in.doorbell_offset ||
+ args->in.flags ||
+ args->in.queue_va ||
+ args->in.queue_size ||
+ args->in.rptr_va ||
+ args->in.wptr_va ||
+ args->in.wptr_va ||
+ args->in.mqd ||
+ args->in.mqd_size)
+ return -EINVAL;
+ r = amdgpu_userq_destroy(filp, args->in.queue_id);
+ if (r)
+ drm_file_err(filp, "Failed to destroy usermode queue\n");
+ break;
+
+ default:
+ drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
+ return -EINVAL;
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0, r;
+
+ /* Resume all the queues for this process */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_map_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
+ return ret;
+}
+
+static int
+amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ int ret;
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ DRM_ERROR("Fail to validate\n");
+
+ return ret;
+}
+
+static int
+amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_bo_va *bo_va;
+ struct ww_acquire_ctx *ticket;
+ struct drm_exec exec;
+ struct amdgpu_bo *bo;
+ struct dma_resv *resv;
+ bool clear, unlock;
+ int ret = 0;
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ ret = amdgpu_vm_lock_pd(vm, &exec, 2);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret)) {
+ drm_file_err(uq_mgr->file, "Failed to lock PD\n");
+ goto unlock_all;
+ }
+
+ /* Lock the done list */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status) {
+ bo = bo_va->base.bo;
+ if (!bo)
+ continue;
+
+ ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+ }
+ }
+
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->moved)) {
+ bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
+
+ /* Per VM BOs never need to bo cleared in the page tables */
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (ret)
+ goto unlock_all;
+ spin_lock(&vm->status_lock);
+ }
+
+ ticket = &exec.ticket;
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
+ base.vm_status);
+ resv = bo_va->base.bo->tbo.base.resv;
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_va->base.bo;
+ ret = amdgpu_userq_validate_vm_bo(NULL, bo);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to validate BO\n");
+ goto unlock_all;
+ }
+
+ /* Try to reserve the BO to avoid clearing its ptes */
+ if (!adev->debug_vm && dma_resv_trylock(resv)) {
+ clear = false;
+ unlock = true;
+ /* The caller is already holding the reservation lock */
+ } else if (dma_resv_locking_ctx(resv) == ticket) {
+ clear = false;
+ unlock = false;
+ /* Somebody else is using the BO right now */
+ } else {
+ clear = true;
+ unlock = false;
+ }
+
+ ret = amdgpu_vm_bo_update(adev, bo_va, clear);
+
+ if (unlock)
+ dma_resv_unlock(resv);
+ if (ret)
+ goto unlock_all;
+
+ spin_lock(&vm->status_lock);
+ }
+ spin_unlock(&vm->status_lock);
+
+ ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
+
+unlock_all:
+ drm_exec_fini(&exec);
+ return ret;
+}
+
+static void amdgpu_userq_restore_worker(struct work_struct *work)
+{
+ struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ int ret;
+
+ flush_work(&fpriv->evf_mgr.suspend_work.work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ ret = amdgpu_userq_validate_bos(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
+ goto unlock;
+ }
+
+ ret = amdgpu_userq_restore_all(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static int
+amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0, r;
+
+ /* Try to unmap all the queues in this process ctx */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
+ return ret;
+}
+
+static int
+amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id, ret;
+
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ struct dma_fence *f = queue->last_fence;
+
+ if (!f || dma_fence_is_signaled(f))
+ continue;
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if (ret <= 0) {
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+void
+amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ int ret;
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
+
+ /* Wait for any pending userqueue fence work to finish */
+ ret = amdgpu_userq_wait_for_signal(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n");
+ return;
+ }
+
+ ret = amdgpu_userq_evict_all(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
+ return;
+ }
+
+ /* Signal current eviction fence */
+ amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
+
+ if (evf_mgr->fd_closing) {
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ return;
+ }
+
+ /* Schedule a resume work */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+}
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev)
+{
+ mutex_init(&userq_mgr->userq_mutex);
+ idr_init_base(&userq_mgr->userq_idr, 1);
+ userq_mgr->adev = adev;
+ userq_mgr->file = file_priv;
+
+ mutex_lock(&adev->userq_mutex);
+ list_add(&userq_mgr->list, &adev->userq_mgr_list);
+ mutex_unlock(&adev->userq_mutex);
+
+ INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
+ return 0;
+}
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
+{
+ struct amdgpu_device *adev = userq_mgr->adev;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ uint32_t queue_id;
+
+ cancel_delayed_work_sync(&userq_mgr->resume_work);
+
+ mutex_lock(&adev->userq_mutex);
+ mutex_lock(&userq_mgr->userq_mutex);
+ idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+ amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
+ amdgpu_userq_unmap_helper(userq_mgr, queue);
+ amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
+ }
+
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ if (uqm == userq_mgr) {
+ list_del(&uqm->list);
+ break;
+ }
+ }
+ idr_destroy(&userq_mgr->userq_idr);
+ mutex_unlock(&userq_mgr->userq_mutex);
+ mutex_unlock(&adev->userq_mutex);
+ mutex_destroy(&userq_mgr->userq_mutex);
+}
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ if (!ip_mask)
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ cancel_delayed_work_sync(&uqm->resume_work);
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_resume(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ if (!ip_mask)
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ if (adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already stopped!\n");
+ adev->userq_halt_for_enforce_isolation = true;
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ cancel_delayed_work_sync(&uqm->resume_work);
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ if (!adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already started!\n");
+ adev->userq_halt_for_enforce_isolation = false;
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
new file mode 100644
index 000000000000..ec040c2fd6c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_USERQ_H_
+#define AMDGPU_USERQ_H_
+#include "amdgpu_eviction_fence.h"
+
+#define AMDGPU_MAX_USERQ_COUNT 512
+
+#define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base)
+#define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr)
+#define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name)
+
+enum amdgpu_userq_state {
+ AMDGPU_USERQ_STATE_UNMAPPED = 0,
+ AMDGPU_USERQ_STATE_MAPPED,
+ AMDGPU_USERQ_STATE_PREEMPTED,
+ AMDGPU_USERQ_STATE_HUNG,
+};
+
+struct amdgpu_mqd_prop;
+
+struct amdgpu_userq_obj {
+ void *cpu_ptr;
+ uint64_t gpu_addr;
+ struct amdgpu_bo *obj;
+};
+
+struct amdgpu_usermode_queue {
+ int queue_type;
+ enum amdgpu_userq_state state;
+ uint64_t doorbell_handle;
+ uint64_t doorbell_index;
+ uint64_t flags;
+ struct amdgpu_mqd_prop *userq_prop;
+ struct amdgpu_userq_mgr *userq_mgr;
+ struct amdgpu_vm *vm;
+ struct amdgpu_userq_obj mqd;
+ struct amdgpu_userq_obj db_obj;
+ struct amdgpu_userq_obj fw_obj;
+ struct amdgpu_userq_obj wptr_obj;
+ struct xarray fence_drv_xa;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *last_fence;
+ u32 xcp_id;
+ int priority;
+};
+
+struct amdgpu_userq_funcs {
+ int (*mqd_create)(struct amdgpu_userq_mgr *uq_mgr,
+ struct drm_amdgpu_userq_in *args,
+ struct amdgpu_usermode_queue *queue);
+ void (*mqd_destroy)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *uq);
+ int (*unmap)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*map)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+};
+
+/* Usermode queues for gfx */
+struct amdgpu_userq_mgr {
+ struct idr userq_idr;
+ struct mutex userq_mutex;
+ struct amdgpu_device *adev;
+ struct delayed_work resume_work;
+ struct list_head list;
+ struct drm_file *file;
+};
+
+struct amdgpu_db_info {
+ uint64_t doorbell_handle;
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ struct amdgpu_userq_obj *db_obj;
+};
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev);
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr);
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size);
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj);
+
+void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr);
+
+void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp);
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev);
+int amdgpu_userq_resume(struct amdgpu_device *adev);
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
new file mode 100644
index 000000000000..fc4d0d42e223
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -0,0 +1,968 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/dma-fence-unwrap.h>
+
+#include <drm/drm_exec.h>
+#include <drm/drm_syncobj.h>
+
+#include "amdgpu.h"
+#include "amdgpu_userq_fence.h"
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops;
+static struct kmem_cache *amdgpu_userq_fence_slab;
+
+int amdgpu_userq_fence_slab_init(void)
+{
+ amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
+ sizeof(struct amdgpu_userq_fence),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!amdgpu_userq_fence_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void amdgpu_userq_fence_slab_fini(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(amdgpu_userq_fence_slab);
+}
+
+static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
+{
+ if (!f || f->ops != &amdgpu_userq_fence_ops)
+ return NULL;
+
+ return container_of(f, struct amdgpu_userq_fence, base);
+}
+
+static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ return le64_to_cpu(*fence_drv->cpu_addr);
+}
+
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long flags;
+ int r;
+
+ fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL);
+ if (!fence_drv)
+ return -ENOMEM;
+
+ /* Acquire seq64 memory */
+ r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
+ &fence_drv->cpu_addr);
+ if (r)
+ goto free_fence_drv;
+
+ memset(fence_drv->cpu_addr, 0, sizeof(u64));
+
+ kref_init(&fence_drv->refcount);
+ INIT_LIST_HEAD(&fence_drv->fences);
+ spin_lock_init(&fence_drv->fence_list_lock);
+
+ fence_drv->adev = adev;
+ fence_drv->context = dma_fence_context_alloc(1);
+ get_task_comm(fence_drv->timeline_name, current);
+
+ xa_lock_irqsave(&adev->userq_xa, flags);
+ r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
+ fence_drv, GFP_KERNEL));
+ xa_unlock_irqrestore(&adev->userq_xa, flags);
+ if (r)
+ goto free_seq64;
+
+ userq->fence_drv = fence_drv;
+
+ return 0;
+
+free_seq64:
+ amdgpu_seq64_free(adev, fence_drv->va);
+free_fence_drv:
+ kfree(fence_drv);
+
+ return r;
+}
+
+static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long index;
+
+ if (xa_empty(xa))
+ return;
+
+ xa_lock(xa);
+ xa_for_each(xa, index, fence_drv) {
+ __xa_erase(xa, index);
+ amdgpu_userq_fence_driver_put(fence_drv);
+ }
+
+ xa_unlock(xa);
+}
+
+void
+amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
+{
+ amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
+ xa_destroy(&userq->fence_drv_xa);
+ /* Drop the fence_drv reference held by user queue */
+ amdgpu_userq_fence_driver_put(userq->fence_drv);
+}
+
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ struct amdgpu_userq_fence *userq_fence, *tmp;
+ struct dma_fence *fence;
+ u64 rptr;
+ int i;
+
+ if (!fence_drv)
+ return;
+
+ rptr = amdgpu_userq_fence_read(fence_drv);
+
+ spin_lock(&fence_drv->fence_list_lock);
+ list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
+ fence = &userq_fence->base;
+
+ if (rptr < fence->seqno)
+ break;
+
+ dma_fence_signal(fence);
+
+ for (i = 0; i < userq_fence->fence_drv_array_count; i++)
+ amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
+
+ list_del(&userq_fence->link);
+ dma_fence_put(fence);
+ }
+ spin_unlock(&fence_drv->fence_list_lock);
+}
+
+void amdgpu_userq_fence_driver_destroy(struct kref *ref)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
+ struct amdgpu_userq_fence_driver,
+ refcount);
+ struct amdgpu_userq_fence_driver *xa_fence_drv;
+ struct amdgpu_device *adev = fence_drv->adev;
+ struct amdgpu_userq_fence *fence, *tmp;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long index, flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
+ f = &fence->base;
+
+ if (!dma_fence_is_signaled(f)) {
+ dma_fence_set_error(f, -ECANCELED);
+ dma_fence_signal(f);
+ }
+
+ list_del(&fence->link);
+ dma_fence_put(f);
+ }
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ xa_lock_irqsave(xa, flags);
+ xa_for_each(xa, index, xa_fence_drv)
+ if (xa_fence_drv == fence_drv)
+ __xa_erase(xa, index);
+ xa_unlock_irqrestore(xa, flags);
+
+ /* Free seq64 memory */
+ amdgpu_seq64_free(adev, fence_drv->va);
+ kfree(fence_drv);
+}
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_get(&fence_drv->refcount);
+}
+
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
+}
+
+static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
+{
+ *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
+ return *userq_fence ? 0 : -ENOMEM;
+}
+
+static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
+ struct amdgpu_userq_fence *userq_fence,
+ u64 seq, struct dma_fence **f)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *fence;
+ unsigned long flags;
+
+ fence_drv = userq->fence_drv;
+ if (!fence_drv)
+ return -EINVAL;
+
+ spin_lock_init(&userq_fence->lock);
+ INIT_LIST_HEAD(&userq_fence->link);
+ fence = &userq_fence->base;
+ userq_fence->fence_drv = fence_drv;
+
+ dma_fence_init(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
+ fence_drv->context, seq);
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+ dma_fence_get(fence);
+
+ if (!xa_empty(&userq->fence_drv_xa)) {
+ struct amdgpu_userq_fence_driver *stored_fence_drv;
+ unsigned long index, count = 0;
+ int i = 0;
+
+ xa_lock(&userq->fence_drv_xa);
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
+ count++;
+
+ userq_fence->fence_drv_array =
+ kvmalloc_array(count,
+ sizeof(struct amdgpu_userq_fence_driver *),
+ GFP_ATOMIC);
+
+ if (userq_fence->fence_drv_array) {
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
+ userq_fence->fence_drv_array[i] = stored_fence_drv;
+ __xa_erase(&userq->fence_drv_xa, index);
+ i++;
+ }
+ }
+
+ userq_fence->fence_drv_array_count = i;
+ xa_unlock(&userq->fence_drv_xa);
+ } else {
+ userq_fence->fence_drv_array = NULL;
+ userq_fence->fence_drv_array_count = 0;
+ }
+
+ /* Check if hardware has already processed the job */
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ if (!dma_fence_is_signaled_locked(fence))
+ list_add_tail(&userq_fence->link, &fence_drv->fences);
+ else
+ dma_fence_put(fence);
+
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ *f = fence;
+
+ return 0;
+}
+
+static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
+{
+ return "amdgpu_userq_fence";
+}
+
+static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+
+ return fence->fence_drv->timeline_name;
+}
+
+static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 rptr, wptr;
+
+ rptr = amdgpu_userq_fence_read(fence_drv);
+ wptr = fence->base.seqno;
+
+ if (rptr >= wptr)
+ return true;
+
+ return false;
+}
+
+static void amdgpu_userq_fence_free(struct rcu_head *rcu)
+{
+ struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
+ struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
+ struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
+
+ /* Release the fence driver reference */
+ amdgpu_userq_fence_driver_put(fence_drv);
+
+ kvfree(userq_fence->fence_drv_array);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+}
+
+static void amdgpu_userq_fence_release(struct dma_fence *f)
+{
+ call_rcu(&f->rcu, amdgpu_userq_fence_free);
+}
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops = {
+ .use_64bit_seqno = true,
+ .get_driver_name = amdgpu_userq_fence_get_driver_name,
+ .get_timeline_name = amdgpu_userq_fence_get_timeline_name,
+ .signaled = amdgpu_userq_fence_signaled,
+ .release = amdgpu_userq_fence_release,
+};
+
+/**
+ * amdgpu_userq_fence_read_wptr - Read the userq wptr value
+ *
+ * @queue: user mode queue structure pointer
+ * @wptr: write pointer value
+ *
+ * Read the wptr value from userq's MQD. The userq signal IOCTL
+ * creates a dma_fence for the shared buffers that expects the
+ * RPTR value written to seq64 memory >= WPTR.
+ *
+ * Returns wptr value on success, error on failure.
+ */
+static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
+ u64 *wptr)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_bo *bo;
+ u64 addr, *ptr;
+ int r;
+
+ r = amdgpu_bo_reserve(queue->vm->root.bo, false);
+ if (r)
+ return r;
+
+ addr = queue->userq_prop->wptr_gpu_addr;
+ addr &= AMDGPU_GMC_HOLE_MASK;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
+ if (!mapping) {
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
+ return -EINVAL;
+ }
+
+ bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("Failed to reserve userqueue wptr bo");
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(bo, (void **)&ptr);
+ if (r) {
+ DRM_ERROR("Failed mapping the userqueue wptr bo");
+ goto map_error;
+ }
+
+ *wptr = le64_to_cpu(*ptr);
+
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+
+map_error:
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return r;
+}
+
+static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
+{
+ dma_fence_put(fence);
+}
+
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct drm_amdgpu_userq_signal *args = data;
+ struct drm_gem_object **gobj_write = NULL;
+ struct drm_gem_object **gobj_read = NULL;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_fence *userq_fence;
+ struct drm_syncobj **syncobj = NULL;
+ u32 *bo_handles_write, num_write_bo_handles;
+ u32 *syncobj_handles, num_syncobj_handles;
+ u32 *bo_handles_read, num_read_bo_handles;
+ int r, i, entry, rentry, wentry;
+ struct dma_fence *fence;
+ struct drm_exec exec;
+ u64 wptr;
+
+ num_syncobj_handles = args->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
+ sizeof(u32) * num_syncobj_handles);
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ /* Array of pointers to the looked up syncobjs */
+ syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
+ if (!syncobj) {
+ r = -ENOMEM;
+ goto free_syncobj_handles;
+ }
+
+ for (entry = 0; entry < num_syncobj_handles; entry++) {
+ syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
+ if (!syncobj[entry]) {
+ r = -ENOENT;
+ goto free_syncobj;
+ }
+ }
+
+ num_read_bo_handles = args->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
+ sizeof(u32) * num_read_bo_handles);
+ if (IS_ERR(bo_handles_read)) {
+ r = PTR_ERR(bo_handles_read);
+ goto free_syncobj;
+ }
+
+ /* Array of pointers to the GEM read objects */
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_bo_handles_read;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ num_write_bo_handles = args->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
+ sizeof(u32) * num_write_bo_handles);
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto put_gobj_read;
+ }
+
+ /* Array of pointers to the GEM write objects */
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto free_bo_handles_write;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ /* Retrieve the user queue */
+ queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
+ if (!queue) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+
+ r = amdgpu_userq_fence_read_wptr(queue, &wptr);
+ if (r)
+ goto put_gobj_write;
+
+ r = amdgpu_userq_fence_alloc(&userq_fence);
+ if (r)
+ goto put_gobj_write;
+
+ /* We are here means UQ is active, make sure the eviction fence is valid */
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ /* Create a new fence */
+ r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
+ if (r) {
+ mutex_unlock(&userq_mgr->userq_mutex);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+ goto put_gobj_write;
+ }
+
+ dma_fence_put(queue->last_fence);
+ queue->last_fence = dma_fence_get(fence);
+ mutex_unlock(&userq_mgr->userq_mutex);
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+ }
+
+ for (i = 0; i < num_read_bo_handles; i++) {
+ if (!gobj_read || !gobj_read[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_read[i]->resv, fence,
+ DMA_RESV_USAGE_READ);
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ if (!gobj_write || !gobj_write[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_write[i]->resv, fence,
+ DMA_RESV_USAGE_WRITE);
+ }
+
+ /* Add the created fence to syncobj/BO's */
+ for (i = 0; i < num_syncobj_handles; i++)
+ drm_syncobj_replace_fence(syncobj[i], fence);
+
+ /* drop the reference acquired in fence creation function */
+ dma_fence_put(fence);
+
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+free_syncobj:
+ while (entry-- > 0)
+ if (syncobj[entry])
+ drm_syncobj_put(syncobj[entry]);
+ kfree(syncobj);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+
+ return r;
+}
+
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
+ u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
+ struct drm_amdgpu_userq_fence_info *fence_info = NULL;
+ struct drm_amdgpu_userq_wait *wait_info = data;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_usermode_queue *waitq;
+ struct drm_gem_object **gobj_write;
+ struct drm_gem_object **gobj_read;
+ struct dma_fence **fences = NULL;
+ u16 num_points, num_fences = 0;
+ int r, i, rentry, wentry, cnt;
+ struct drm_exec exec;
+
+ num_read_bo_handles = wait_info->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
+ sizeof(u32) * num_read_bo_handles);
+ if (IS_ERR(bo_handles_read))
+ return PTR_ERR(bo_handles_read);
+
+ num_write_bo_handles = wait_info->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
+ sizeof(u32) * num_write_bo_handles);
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto free_bo_handles_read;
+ }
+
+ num_syncobj = wait_info->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
+ sizeof(u32) * num_syncobj);
+ if (IS_ERR(syncobj_handles)) {
+ r = PTR_ERR(syncobj_handles);
+ goto free_bo_handles_write;
+ }
+
+ num_points = wait_info->num_syncobj_timeline_handles;
+ timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_handles)) {
+ r = PTR_ERR(timeline_handles);
+ goto free_syncobj_handles;
+ }
+
+ timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_points)) {
+ r = PTR_ERR(timeline_points);
+ goto free_timeline_handles;
+ }
+
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_timeline_points;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto put_gobj_read;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+ }
+
+ if (!wait_info->num_fences) {
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ dma_fence_unwrap_for_each(f, &iter, fence)
+ num_fences++;
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Count syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ num_fences++;
+ dma_fence_put(fence);
+ }
+
+ /* Count GEM objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence)
+ num_fences++;
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence)
+ num_fences++;
+ }
+
+ /*
+ * Passing num_fences = 0 means that userspace doesn't want to
+ * retrieve userq_fence_info. If num_fences = 0 we skip filling
+ * userq_fence_info and return the actual number of fences on
+ * args->num_fences.
+ */
+ wait_info->num_fences = num_fences;
+ } else {
+ /* Array of fence info */
+ fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
+ if (!fence_info) {
+ r = -ENOMEM;
+ goto exec_fini;
+ }
+
+ /* Array of fences */
+ fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
+ if (!fences) {
+ r = -ENOMEM;
+ goto free_fence_info;
+ }
+
+ /* Retrieve GEM read objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ /* Retrieve GEM write objects fence */
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ dma_fence_unwrap_for_each(f, &iter, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ dma_fence_get(f);
+ fences[num_fences++] = f;
+ }
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Retrieve syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ }
+
+ /*
+ * Keep only the latest fences to reduce the number of values
+ * given back to userspace.
+ */
+ num_fences = dma_fence_dedup_array(fences, num_fences);
+
+ waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
+ if (!waitq) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ for (i = 0, cnt = 0; i < num_fences; i++) {
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence *userq_fence;
+ u32 index;
+
+ userq_fence = to_amdgpu_userq_fence(fences[i]);
+ if (!userq_fence) {
+ /*
+ * Just waiting on other driver fences should
+ * be good for now
+ */
+ r = dma_fence_wait(fences[i], true);
+ if (r) {
+ dma_fence_put(fences[i]);
+ goto free_fences;
+ }
+
+ dma_fence_put(fences[i]);
+ continue;
+ }
+
+ fence_drv = userq_fence->fence_drv;
+ /*
+ * We need to make sure the user queue release their reference
+ * to the fence drivers at some point before queue destruction.
+ * Otherwise, we would gather those references until we don't
+ * have any more space left and crash.
+ */
+ r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
+ xa_limit_32b, GFP_KERNEL);
+ if (r)
+ goto free_fences;
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+
+ /* Store drm syncobj's gpu va address and value */
+ fence_info[cnt].va = fence_drv->va;
+ fence_info[cnt].value = fences[i]->seqno;
+
+ dma_fence_put(fences[i]);
+ /* Increment the actual userq fence count */
+ cnt++;
+ }
+
+ wait_info->num_fences = cnt;
+ /* Copy userq fence info to user space */
+ if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
+ fence_info, wait_info->num_fences * sizeof(*fence_info))) {
+ r = -EFAULT;
+ goto free_fences;
+ }
+
+ kfree(fences);
+ kfree(fence_info);
+ }
+
+ drm_exec_fini(&exec);
+ for (i = 0; i < num_read_bo_handles; i++)
+ drm_gem_object_put(gobj_read[i]);
+ kfree(gobj_read);
+
+ for (i = 0; i < num_write_bo_handles; i++)
+ drm_gem_object_put(gobj_write[i]);
+ kfree(gobj_write);
+
+ kfree(timeline_points);
+ kfree(timeline_handles);
+ kfree(syncobj_handles);
+ kfree(bo_handles_write);
+ kfree(bo_handles_read);
+
+ return 0;
+
+free_fences:
+ while (num_fences-- > 0)
+ dma_fence_put(fences[num_fences]);
+ kfree(fences);
+free_fence_info:
+ kfree(fence_info);
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_timeline_points:
+ kfree(timeline_points);
+free_timeline_handles:
+ kfree(timeline_handles);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
new file mode 100644
index 000000000000..97a125ab8a78
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_USERQ_FENCE_H__
+#define __AMDGPU_USERQ_FENCE_H__
+
+#include <linux/types.h>
+
+#include "amdgpu_userq.h"
+
+struct amdgpu_userq_fence {
+ struct dma_fence base;
+ /*
+ * This lock is necessary to synchronize the
+ * userqueue dma fence operations.
+ */
+ spinlock_t lock;
+ struct list_head link;
+ unsigned long fence_drv_array_count;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence_driver **fence_drv_array;
+};
+
+struct amdgpu_userq_fence_driver {
+ struct kref refcount;
+ u64 va;
+ u64 gpu_addr;
+ u64 *cpu_addr;
+ u64 context;
+ /*
+ * This lock is necesaary to synchronize the access
+ * to the fences list by the fence driver.
+ */
+ spinlock_t fence_list_lock;
+ struct list_head fences;
+ struct amdgpu_device *adev;
+ char timeline_name[TASK_COMM_LEN];
+};
+
+int amdgpu_userq_fence_slab_init(void);
+void amdgpu_userq_fence_slab_fini(void);
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv);
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_destroy(struct kref *ref);
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 1991dd3d1056..c8885c3d54b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -353,9 +353,9 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
- /* err_event_athub will corrupt VCPU buffer, so we need to
+ /* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
- if (in_ras_intr)
+ if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
return 0;
return amdgpu_vcn_save_vcpu_bo_inst(adev, i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index cdcdae7f71ce..83adf81defc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -66,7 +66,6 @@
#define VCN_ENC_CMD_REG_WAIT 0x0000000c
#define VCN_AON_SOC_ADDRESS_2_0 0x1f800
-#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_VID_IP_ADDRESS_2_0 0x0
#define VCN_AON_IP_ADDRESS_2_0 0x30000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 0bb8cbe0dcc0..13f0cdeb59c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -1323,6 +1323,9 @@ static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bo
{
struct amdgpu_virt *virt = &adev->virt;
+ if (!virt->ops || !virt->ops->req_ras_err_count)
+ return -EOPNOTSUPP;
+
/* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
* will ignore incoming guest messages. Ratelimit the guest messages to
* prevent guest self DOS.
@@ -1378,14 +1381,16 @@ amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
used_size = host_telemetry->header.used_size;
if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
- return 0;
+ return -EINVAL;
cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
if (!cper_dump)
return -ENOMEM;
- if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0))
+ if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0)) {
+ ret = -EINVAL;
goto out;
+ }
*more = cper_dump->more;
@@ -1425,7 +1430,7 @@ static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
int ret = 0;
uint32_t more = 0;
- if (!amdgpu_sriov_ras_cper_en(adev))
+ if (!virt->ops || !virt->ops->req_ras_cper_dump)
return -EOPNOTSUPP;
do {
@@ -1434,7 +1439,7 @@ static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
adev, virt->fw_reserve.ras_telemetry, &more);
else
ret = 0;
- } while (more);
+ } while (more && !ret);
return ret;
}
@@ -1444,6 +1449,9 @@ int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
struct amdgpu_virt *virt = &adev->virt;
int ret = 0;
+ if (!amdgpu_sriov_ras_cper_en(adev))
+ return -EOPNOTSUPP;
+
if ((__ratelimit(&virt->ras.ras_cper_dump_rs) || force_update) &&
down_read_trylock(&adev->reset_domain->sem)) {
mutex_lock(&virt->ras.ras_telemetry_mutex);
@@ -1480,3 +1488,16 @@ bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
return true;
}
+
+/*
+ * amdgpu_virt_request_bad_pages() - request bad pages
+ * @adev: amdgpu device.
+ * Send command to GPU hypervisor to write new bad pages into the shared PF2VF region
+ */
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->req_bad_pages)
+ virt->ops->req_bad_pages(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index df03dba67ab8..577c6194db78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -97,6 +97,7 @@ struct amdgpu_virt_ops {
bool (*rcvd_ras_intr)(struct amdgpu_device *adev);
int (*req_ras_err_count)(struct amdgpu_device *adev);
int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
+ int (*req_bad_pages)(struct amdgpu_device *adev);
};
/*
@@ -146,11 +147,13 @@ enum AMDGIM_FEATURE_FLAG {
enum AMDGIM_REG_ACCESS_FLAG {
/* Use PSP to program IH_RB_CNTL */
- AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
+ AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
/* Use RLC to program MMHUB regs */
- AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
+ AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
/* Use RLC to program GC regs */
- AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
+ AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
+ /* Use PSP to program L1_TLB_CNTL*/
+ AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3),
};
struct amdgim_pf2vf_info_v1 {
@@ -260,7 +263,10 @@ struct amdgpu_virt {
uint32_t reg_val_offs;
struct amdgpu_irq_src ack_irq;
struct amdgpu_irq_src rcv_irq;
+
struct work_struct flr_work;
+ struct work_struct bad_pages_work;
+
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
@@ -330,6 +336,10 @@ struct amdgpu_video_codec_info;
(amdgpu_sriov_vf((adev)) && \
((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
+#define amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN)))
+
#define amdgpu_sriov_rlcg_error_report_enabled(adev) \
(amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
@@ -423,4 +433,5 @@ int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
enum amdgpu_ras_block block);
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ce52b4d75e94..3911c78f8282 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -787,7 +787,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
- cleaner_shader_needed = adev->gfx.enable_cleaner_shader &&
+ cleaner_shader_needed = job->run_cleaner_shader &&
+ adev->gfx.enable_cleaner_shader &&
ring->funcs->emit_cleaner_shader && job->base.s_fence &&
&job->base.s_fence->scheduled == isolation->spearhead;
@@ -817,7 +818,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
- if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
+ if (ring->funcs->emit_gds_switch &&
gds_switch_needed) {
amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
job->gds_size, job->gws_base,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 6da8994e0469..2d7f82e98df9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -24,6 +24,7 @@
#include <linux/dma-mapping.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
@@ -907,6 +908,9 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
struct ttm_resource_manager *man = &mgr->manager;
int err;
+ man->cg = drmm_cgroup_register_region(adev_to_drm(adev), "vram", adev->gmc.real_vram_size);
+ if (IS_ERR(man->cg))
+ return PTR_ERR(man->cg);
ttm_resource_manager_init(man, &adev->mman.bdev,
adev->gmc.real_vram_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index 23b6f7a4aa4a..b03c3895897b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -709,10 +709,10 @@ void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
struct amdgpu_xcp_cfg *xcp_cfg;
int i;
- if (!adev->xcp_mgr)
+ if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg)
return;
- xcp_cfg = adev->xcp_mgr->xcp_cfg;
+ xcp_cfg = adev->xcp_mgr->xcp_cfg;
for (i = 0; i < xcp_cfg->num_res; i++) {
xcp_res = &xcp_cfg->xcp_res[i];
kobject_put(&xcp_res->kobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 477424472bbe..f51ef4cf16e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -296,15 +296,27 @@ static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link_num)
{
- const u32 smnpcs_xgmi3x16_pcs_state_hist1 = 0x11a00070;
- const int xgmi_inst = 2;
- u32 link_inst;
+ const u32 smn_xgmi_6_4_pcs_state_hist1[2] = { 0x11a00070, 0x11b00070 };
+ const u32 smn_xgmi_6_4_1_pcs_state_hist1[2] = { 0x12100070,
+ 0x11b00070 };
+ u32 i, n;
u64 addr;
- link_inst = global_link_num % xgmi_inst;
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ n = ARRAY_SIZE(smn_xgmi_6_4_pcs_state_hist1);
+ addr = smn_xgmi_6_4_pcs_state_hist1[global_link_num % n];
+ break;
+ case IP_VERSION(6, 4, 1):
+ n = ARRAY_SIZE(smn_xgmi_6_4_1_pcs_state_hist1);
+ addr = smn_xgmi_6_4_1_pcs_state_hist1[global_link_num % n];
+ break;
+ default:
+ return U32_MAX;
+ }
- addr = (smnpcs_xgmi3x16_pcs_state_hist1 | (link_inst << 20)) +
- adev->asic_funcs->encode_ext_smn_addressing(global_link_num / xgmi_inst);
+ i = global_link_num / n;
+ addr += adev->asic_funcs->encode_ext_smn_addressing(i);
return RREG32_PCIE_EXT(addr);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index d6ac2652f0ac..92ca13097aaa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -109,10 +109,11 @@ union amd_sriov_msg_feature_flags {
union amd_sriov_reg_access_flags {
struct {
- uint32_t vf_reg_access_ih : 1;
- uint32_t vf_reg_access_mmhub : 1;
- uint32_t vf_reg_access_gc : 1;
- uint32_t reserved : 29;
+ uint32_t vf_reg_access_ih : 1;
+ uint32_t vf_reg_access_mmhub : 1;
+ uint32_t vf_reg_access_gc : 1;
+ uint32_t vf_reg_access_l1_tlb_cntl : 1;
+ uint32_t reserved : 28;
} flags;
uint32_t all;
};
@@ -330,6 +331,7 @@ enum amd_sriov_mailbox_request_message {
MB_REQ_MSG_RAS_POISON = 202,
MB_REQ_RAS_ERROR_COUNT = 203,
MB_REQ_RAS_CPER_DUMP = 204,
+ MB_REQ_RAS_BAD_PAGES = 205,
};
/* mailbox message send from host to guest */
@@ -347,6 +349,9 @@ enum amd_sriov_mailbox_response_message {
MB_RES_MSG_GPU_RMA = 10,
MB_RES_MSG_RAS_ERROR_COUNT_READY = 11,
MB_REQ_RAS_CPER_DUMP_READY = 14,
+ MB_RES_MSG_RAS_BAD_PAGES_READY = 15,
+ MB_RES_MSG_RAS_BAD_PAGES_NOTIFICATION = 16,
+ MB_RES_MSG_UNRECOV_ERR_NOTIFICATION = 17,
MB_RES_MSG_TEXT_MESSAGE = 255
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index ae071985f26e..1c083304ae77 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -448,53 +448,71 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x
return 0;
}
-static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
- int mode,
- struct amdgpu_xcp_cfg *xcp_cfg)
+static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int px_mode, int *num_xcp,
+ uint16_t *nps_modes)
{
struct amdgpu_device *adev = xcp_mgr->adev;
- int max_res[AMDGPU_XCP_RES_MAX] = {};
- bool res_lt_xcp;
- int num_xcp, i;
- u16 nps_modes;
- if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
+ if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
return -EINVAL;
- max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
- max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
- max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
- max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
-
- switch (mode) {
+ switch (px_mode) {
case AMDGPU_SPX_PARTITION_MODE:
- num_xcp = 1;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
+ *num_xcp = 1;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
break;
case AMDGPU_DPX_PARTITION_MODE:
- num_xcp = 2;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS2_PARTITION_MODE);
+ *num_xcp = 2;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_TPX_PARTITION_MODE:
- num_xcp = 3;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = 3;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
break;
case AMDGPU_QPX_PARTITION_MODE:
- num_xcp = 4;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = 4;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
break;
case AMDGPU_CPX_PARTITION_MODE:
- num_xcp = NUM_XCC(adev->gfx.xcc_mask);
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = NUM_XCC(adev->gfx.xcc_mask);
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ if (amdgpu_sriov_vf(adev))
+ *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
default:
return -EINVAL;
}
+ return 0;
+}
+
+static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int max_res[AMDGPU_XCP_RES_MAX] = {};
+ bool res_lt_xcp;
+ int num_xcp, i, r;
+ u16 nps_modes;
+
+ if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
+ return -EINVAL;
+
+ max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
+ max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
+ max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
+ max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
+
+ r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes);
+ if (r)
+ return r;
+
xcp_cfg->compatible_nps_modes =
(adev->gmc.supported_nps_modes & nps_modes);
xcp_cfg->num_res = ARRAY_SIZE(max_res);
@@ -543,30 +561,31 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
enum amdgpu_gfx_partition mode)
{
struct amdgpu_device *adev = xcp_mgr->adev;
- int num_xcc, num_xccs_per_xcp;
+ int num_xcc, num_xccs_per_xcp, r;
+ int num_xcp, nps_mode;
+ u16 supp_nps_modes;
+ bool comp_mode;
+
+ nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp,
+ &supp_nps_modes);
+ if (r)
+ return false;
+ comp_mode = !!(BIT(nps_mode) & supp_nps_modes);
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (mode) {
case AMDGPU_SPX_PARTITION_MODE:
- return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
+ return comp_mode && num_xcc > 0;
case AMDGPU_DPX_PARTITION_MODE:
- return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
+ return comp_mode && (num_xcc % 4) == 0;
case AMDGPU_TPX_PARTITION_MODE:
- return (adev->gmc.num_mem_partitions == 1 ||
- adev->gmc.num_mem_partitions == 3) &&
- ((num_xcc % 3) == 0);
+ return comp_mode && ((num_xcc % 3) == 0);
case AMDGPU_QPX_PARTITION_MODE:
num_xccs_per_xcp = num_xcc / 4;
- return (adev->gmc.num_mem_partitions == 1 ||
- adev->gmc.num_mem_partitions == 4) &&
- (num_xccs_per_xcp >= 2);
+ return comp_mode && (num_xccs_per_xcp >= 2);
case AMDGPU_CPX_PARTITION_MODE:
- /* (num_xcc > 1) because 1 XCC is considered SPX, not CPX.
- * (num_xcc % adev->gmc.num_mem_partitions) == 0 because
- * num_compute_partitions can't be less than num_mem_partitions
- */
- return ((num_xcc > 1) &&
- (num_xcc % adev->gmc.num_mem_partitions) == 0);
+ return comp_mode && (num_xcc > 1);
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 81d195d366ce..427b073de2fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1444,6 +1444,7 @@ static void atom_get_vbios_pn(struct atom_context *ctx)
if (vbios_str == NULL)
vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1;
}
+ OPTIMIZER_HIDE_VAR(vbios_str);
if (vbios_str != NULL && *vbios_str == 0)
vbios_str++;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 521b9faab180..492813ab1b54 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -458,8 +458,8 @@ bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connect
u8 link_status[DP_LINK_STATUS_SIZE];
struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
- if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status)
- <= 0)
+ if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux,
+ link_status) < 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
@@ -616,7 +616,7 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i
drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -681,7 +681,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i
drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 508cea965983..9e8715b4739d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -56,6 +56,8 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block);
+u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
@@ -67,9 +69,6 @@ MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
-u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
-
-
static void cik_sdma_free_microcode(struct amdgpu_device *adev)
{
int i;
@@ -993,14 +992,9 @@ static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block)
static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block)
{
- int r;
struct amdgpu_device *adev = ip_block->adev;
- r = cik_sdma_start(adev);
- if (r)
- return r;
-
- return r;
+ return cik_sdma_start(adev);
}
static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1040,14 +1034,10 @@ static bool cik_sdma_is_idle(struct amdgpu_ip_block *ip_block)
static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- u32 tmp;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
- SRBM_STATUS2__SDMA1_BUSY_MASK);
-
- if (!tmp)
+ if (cik_sdma_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 279288365940..8aca4f2734f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -60,9 +60,6 @@
#define AUD5_REGISTER_OFFSET (0x179d - 0x1780)
#define AUD6_REGISTER_OFFSET (0x17a4 - 0x1780)
-#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
-#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
-
#define PIPEID(x) ((x) << 0)
#define MEID(x) ((x) << 2)
#define VMID(x) ((x) << 4)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index df401aded662..bf7c22f81cda 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3075,7 +3075,7 @@ static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
@@ -3227,7 +3227,7 @@ static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 80f01c3989cd..47e05783c4a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3206,7 +3206,7 @@ static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
@@ -3358,7 +3358,7 @@ static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -3488,8 +3488,7 @@ static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.set_powergating_state = dce_v11_0_set_powergating_state,
};
-static void
-dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
+static void dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 255c70959343..276c025c4c03 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -287,7 +287,7 @@ static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -412,7 +412,7 @@ static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
{
if (!render)
WREG32(mmVGA_RENDER_CONTROL,
- RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
+ RREG32(mmVGA_RENDER_CONTROL) & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK);
}
static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
@@ -1011,16 +1011,16 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* select wm A */
arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
tmp = arb_control3;
- tmp &= ~LATENCY_WATERMARK_MASK(3);
- tmp |= LATENCY_WATERMARK_MASK(1);
+ tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (1 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
/* select wm B */
tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
- tmp &= ~LATENCY_WATERMARK_MASK(3);
- tmp |= LATENCY_WATERMARK_MASK(2);
+ tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (2 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
@@ -1089,7 +1089,7 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
}
WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
- DC_LB_MEMORY_CONFIG(tmp));
+ (tmp << DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG__SHIFT));
WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
(buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
@@ -1306,6 +1306,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 offset;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
@@ -1327,6 +1328,11 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
+ if (!dig || !dig->afmt || !dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
@@ -1348,7 +1354,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 tmp = 0;
+ u32 value = 0;
u8 stereo_freqs = 0;
int max_channels = -1;
int j;
@@ -1358,12 +1364,12 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (sad->format == eld_reg_to_type[i][1]) {
if (sad->channels > max_channels) {
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- MAX_CHANNELS, sad->channels);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- DESCRIPTOR_BYTE_2, sad->byte2);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES, sad->freq);
+ value = (sad->channels <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
+ (sad->byte2 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
+ (sad->freq <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
max_channels = sad->channels;
}
@@ -1374,13 +1380,13 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
}
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
+ value |= (stereo_freqs <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
+
+ WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
}
kfree(sads);
-
}
static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
@@ -1886,7 +1892,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels, pipe_config;
- u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
+ u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
@@ -1926,76 +1932,76 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
switch (target_fb->format->format) {
case DRM_FORMAT_C8:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
- GRPH_FORMAT(GRPH_FORMAT_INDEXED));
+ fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
break;
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_BGRA5551:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_RGB565:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB565));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_BGRA1010102:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
- fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
- GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+ fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
+ (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
default:
@@ -2013,18 +2019,18 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
- fb_format |= GRPH_NUM_BANKS(num_banks);
- fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
- fb_format |= GRPH_TILE_SPLIT(tile_split);
- fb_format |= GRPH_BANK_WIDTH(bankw);
- fb_format |= GRPH_BANK_HEIGHT(bankh);
- fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
+ fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
+ fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
+ fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
+ fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
+ fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
+ fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
+ fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
}
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
- fb_format |= GRPH_PIPE_CONFIG(pipe_config);
+ fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
dce_v6_0_vga_enable(crtc, false);
@@ -2040,7 +2046,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+ (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
@@ -2108,14 +2114,13 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
- INTERLEAVE_EN);
+ DATA_FORMAT__INTERLEAVE_EN_MASK);
else
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
}
static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
{
-
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -2125,15 +2130,15 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
- (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
+ ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
+ (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
- (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
+ ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
+ (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
@@ -2160,19 +2165,19 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
}
WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
- (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
- ICON_DEGAMMA_MODE(0) |
- (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
+ ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
- (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
+ ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
+ (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
- (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
+ ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
+ (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
- (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
+ ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
+ (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
@@ -2267,8 +2272,6 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
-
-
}
static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
@@ -2285,7 +2288,6 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
CUR_CONTROL__CURSOR_EN_MASK |
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
-
}
static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
@@ -2596,7 +2598,6 @@ static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
@@ -2669,7 +2670,7 @@ static void dce_v6_0_panic_flush(struct drm_plane *plane)
/* Disable DC tiling */
fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
- fb_format &= ~GRPH_ARRAY_MODE(0x7);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
}
@@ -2745,7 +2746,6 @@ static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block)
static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- bool ret;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -2789,8 +2789,7 @@ static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
- if (ret)
+ if (amdgpu_atombios_get_connector_info_from_object_table(adev))
amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2986,12 +2985,12 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
- interrupt_mask &= ~VBLANK_INT_MASK;
+ interrupt_mask &= ~INT_MASK__VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
case AMDGPU_IRQ_STATE_ENABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
- interrupt_mask |= VBLANK_INT_MASK;
+ interrupt_mask |= INT_MASK__VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
default:
@@ -3006,28 +3005,28 @@ static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
}
-static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned hpd,
enum amdgpu_interrupt_state state)
{
u32 dc_hpd_int_cntl;
- if (type >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", type);
+ if (hpd >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
- dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
- dc_hpd_int_cntl |= DC_HPDx_INT_EN;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
break;
default:
break;
@@ -3036,7 +3035,7 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3096,7 +3095,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data[0]) {
case 0: /* vblank */
if (disp_int & interrupt_status_offsets[crtc].vblank)
- WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+ WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_STATUS__VBLANK_ACK_MASK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -3107,7 +3106,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
break;
case 1: /* vline */
if (disp_int & interrupt_status_offsets[crtc].vline)
- WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+ WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_STATUS__VLINE_ACK_MASK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -3121,7 +3120,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_pageflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3172,7 +3171,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
@@ -3249,12 +3248,10 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.set_powergating_state = dce_v6_0_set_powergating_state,
};
-static void
-dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
+static void dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
@@ -3274,7 +3271,6 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
{
-
struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3314,7 +3310,6 @@ static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
{
-
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -3325,7 +3320,6 @@ static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
{
-
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig;
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
@@ -3541,17 +3535,17 @@ static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
- .set = dce_v6_0_set_crtc_interrupt_state,
+ .set = dce_v6_0_set_crtc_irq_state,
.process = dce_v6_0_crtc_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
- .set = dce_v6_0_set_pageflip_interrupt_state,
+ .set = dce_v6_0_set_pageflip_irq_state,
.process = dce_v6_0_pageflip_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
- .set = dce_v6_0_set_hpd_interrupt_state,
+ .set = dce_v6_0_set_hpd_irq_state,
.process = dce_v6_0_hpd_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 07358546581f..e62ccf9eb73d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -271,7 +271,7 @@ static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -3021,7 +3021,7 @@ static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
}
}
-static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3029,7 +3029,7 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
u32 dc_hpd_int_cntl;
if (type >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", type);
+ DRM_DEBUG("invalid hpd %d\n", type);
return 0;
}
@@ -3051,7 +3051,7 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3136,7 +3136,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_pageflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3547,17 +3547,17 @@ static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
- .set = dce_v8_0_set_crtc_interrupt_state,
+ .set = dce_v8_0_set_crtc_irq_state,
.process = dce_v8_0_crtc_irq,
};
static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
- .set = dce_v8_0_set_pageflip_interrupt_state,
+ .set = dce_v8_0_set_pageflip_irq_state,
.process = dce_v8_0_pageflip_irq,
};
static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
- .set = dce_v8_0_set_hpd_interrupt_state,
+ .set = dce_v8_0_set_hpd_irq_state,
.process = dce_v8_0_hpd_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index a63ce747863f..75ea071744eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -368,11 +368,6 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_10_1[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_LX6_CORE_PDEBUG_INST),
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME2_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
@@ -421,7 +416,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_10[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_10[] = {
@@ -448,7 +452,32 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_10[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_MQD_BASE_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_MQD_BASE_ADDR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI)
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI),
+ /* gfx header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_10_1[] = {
@@ -4296,9 +4325,7 @@ static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
int ctx_reg_offset;
if (adev->gfx.rlc.cs_data == NULL)
@@ -4306,39 +4333,15 @@ static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
-
- ctx_reg_offset =
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
+ ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(ctx_reg_offset);
buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
@@ -4752,6 +4755,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
int i, j, k, r, ring_id = 0;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
@@ -4763,7 +4767,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(10, 1, 4):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 8;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
@@ -4778,7 +4782,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 2;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 2;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -4800,7 +4804,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gfx.cleaner_shader_size = sizeof(gfx_10_1_10_cleaner_shader_hex);
if (adev->gfx.me_fw_version >= 101 &&
adev->gfx.pfp_fw_version >= 158 &&
- adev->gfx.mec_fw_version >= 152) {
+ adev->gfx.mec_fw_version >= 151) {
adev->gfx.enable_cleaner_shader = true;
r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
if (r) {
@@ -4810,7 +4814,9 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
}
break;
case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
@@ -4826,6 +4832,34 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(10, 3, 6):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 14 &&
+ adev->gfx.pfp_fw_version >= 17 &&
+ adev->gfx.mec_fw_version >= 24) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(10, 3, 7):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 4 &&
+ adev->gfx.pfp_fw_version >= 9 &&
+ adev->gfx.mec_fw_version >= 12) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -4886,7 +4920,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
/* set up the gfx ring */
for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
continue;
@@ -6114,7 +6148,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -6192,7 +6226,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -6269,7 +6303,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -6644,7 +6678,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -9645,9 +9679,14 @@ static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printe
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_10[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_10[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "mmCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_10[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -9708,9 +9747,13 @@ static void gfx_v10_ip_dump(struct amdgpu_ip_block *ip_block)
nv_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_10[reg]));
+ if (i && gc_cp_reg_list_10[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_10[reg]));
}
index += reg_count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index d57db42f9536..afd6d59164bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -48,6 +48,8 @@
#include "gfx_v11_0_3.h"
#include "nbio_v4_3.h"
#include "mes_v11_0.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
#define GFX11_NUM_GFX_RINGS 1
#define GFX11_MEC_HPD_SIZE 2048
@@ -177,9 +179,13 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
@@ -230,7 +236,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
@@ -259,7 +274,24 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
@@ -580,33 +612,18 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t padding, offset;
-
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- padding = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
-
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
- *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r)
- return r;
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- cpu_ptr = &adev->wb.wb[index];
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ cpu_ptr = &adev->wb.wb[index];
- r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err1;
- }
+ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
@@ -633,12 +650,10 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err2:
- if (!ring->is_mes_queue)
- amdgpu_ib_free(&ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -833,9 +848,7 @@ static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
int ctx_reg_offset;
if (adev->gfx.rlc.cs_data == NULL)
@@ -843,39 +856,15 @@ static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
- ctx_reg_offset =
- SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
+ ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(ctx_reg_offset);
buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
@@ -1056,14 +1045,21 @@ static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
#define MQD_FWWORKAREA_SIZE 484
#define MQD_FWWORKAREA_ALIGNMENT 256
-static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+static void gfx_v11_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
struct amdgpu_gfx_shadow_info *shadow_info)
{
- if (adev->gfx.cp_gfx_shadow) {
- shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
- shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
- shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
- shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+ shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
+ shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
+ shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
+ shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+}
+
+static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check)
+{
+ if (adev->gfx.cp_gfx_shadow || skip_check) {
+ gfx_v11_0_get_gfx_shadow_info_nocheck(adev, shadow_info);
return 0;
} else {
memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
@@ -1136,6 +1132,10 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
+ if (adev->gfx.disable_kq) {
+ ring->no_scheduler = true;
+ ring->no_user_submission = true;
+ }
if (!ring_id)
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
@@ -1568,24 +1568,18 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- int i, j, k, r, ring_id = 0;
+ int i, j, k, r, ring_id;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
- adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
- adev->gfx.mec.num_mec = 1;
- adev->gfx.mec.num_pipe_per_mec = 4;
- adev->gfx.mec.num_queue_per_pipe = 4;
- break;
- case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
@@ -1593,7 +1587,7 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 5, 3):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 2;
adev->gfx.mec.num_mec = 1;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -1612,6 +1606,35 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
+ if (!adev->gfx.disable_uq &&
+ adev->gfx.me_fw_version >= 2390 &&
+ adev->gfx.pfp_fw_version >= 2530 &&
+ adev->gfx.mec_fw_version >= 2600 &&
+ adev->mes.fw_version[0] >= 120) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
+ /* add firmware version checks here */
+ if (0 && !adev->gfx.disable_uq) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
if (adev->gfx.me_fw_version >= 2280 &&
@@ -1640,6 +1663,34 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(11, 5, 2):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 12 &&
+ adev->gfx.pfp_fw_version >= 15 &&
+ adev->gfx.mec_fw_version >= 15) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(11, 5, 3):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 7 &&
+ adev->gfx.pfp_fw_version >= 8 &&
+ adev->gfx.mec_fw_version >= 8) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -1701,37 +1752,42 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* set up the gfx ring */
- for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
- if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
- continue;
-
- r = gfx_v11_0_gfx_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
- ring_id++;
+ if (adev->gfx.num_gfx_rings) {
+ ring_id = 0;
+ /* set up the gfx ring */
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
+ if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
+ continue;
+
+ r = gfx_v11_0_gfx_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
+ ring_id++;
+ }
}
}
}
- ring_id = 0;
- /* set up the compute queues - allocate horizontally across pipes */
- for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
- for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
- k, j))
- continue;
+ if (adev->gfx.num_compute_rings) {
+ ring_id = 0;
+ /* set up the compute queues - allocate horizontally across pipes */
+ for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
+ for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
+ continue;
- r = gfx_v11_0_compute_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
+ r = gfx_v11_0_compute_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
- ring_id++;
+ ring_id++;
+ }
}
}
}
@@ -2428,7 +2484,7 @@ static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -2472,7 +2528,7 @@ static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -2517,7 +2573,7 @@ static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -3153,7 +3209,7 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
@@ -3371,7 +3427,7 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
@@ -4061,6 +4117,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
#endif
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -4081,6 +4139,16 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
+ /* set gfx UQ items */
+ mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr);
+ mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr);
+ mqd->gds_bkup_base_lo = lower_32_bits(prop->gds_bkup_addr);
+ mqd->gds_bkup_base_hi = upper_32_bits(prop->gds_bkup_addr);
+ mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr);
+ mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr);
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -4205,6 +4273,8 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
prop->allow_tunneling);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address whether it's enabled or not */
@@ -4256,6 +4326,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_active = prop->hqd_active;
+ /* set UQ fenceaddress */
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -4509,11 +4583,23 @@ static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
return r;
}
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
+ if (adev->gfx.disable_kq) {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ /* we don't want to set ring->ready */
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ return r;
+ }
+ if (amdgpu_async_gfx_ring)
+ amdgpu_gfx_disable_kgq(adev, 0);
+ } else {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
@@ -4541,7 +4627,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
if (r)
return r;
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
@@ -4722,6 +4808,49 @@ static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+static int gfx_v11_0_set_userq_eop_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int m, p, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) {
+ for (m = 0; m < adev->gfx.me.num_me; m++) {
+ for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) {
+ irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) {
+ for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
+ for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + (m * adev->gfx.mec.num_pipe_per_mec)
+ + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -4731,9 +4860,11 @@ static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
+ gfx_v11_0_set_userq_eop_interrupts(adev, false);
if (!adev->no_hw_access) {
- if (amdgpu_async_gfx_ring) {
+ if (amdgpu_async_gfx_ring &&
+ !adev->gfx.disable_kq) {
if (amdgpu_gfx_disable_kgq(adev, 0))
DRM_ERROR("KGQ disable failed\n");
}
@@ -5059,11 +5190,36 @@ static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = true;
+ break;
+ case 1:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = false;
+ break;
+ case 2:
+ adev->gfx.disable_kq = true;
+ adev->gfx.disable_uq = false;
+ break;
+ }
+
adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
- adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
- AMDGPU_MAX_COMPUTE_RINGS);
+ if (adev->gfx.disable_kq) {
+ /* We need one GFX ring temporarily to set up
+ * the clear state.
+ */
+ adev->gfx.num_gfx_rings = 1;
+ adev->gfx.num_compute_rings = 0;
+ } else {
+ adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
+ }
gfx_v11_0_set_kiq_pm4_funcs(adev);
gfx_v11_0_set_ring_funcs(adev);
@@ -5094,6 +5250,11 @@ static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block)
r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
if (r)
return r;
+
+ r = gfx_v11_0_set_userq_eop_interrupts(adev, true);
+ if (r)
+ return r;
+
return 0;
}
@@ -5691,10 +5852,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
}
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x400000;
-
amdgpu_ring_write(ring, header);
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -5714,10 +5871,6 @@ static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x40000000;
-
/* Currently, there is a high possibility to get wave ID mismatch
* between ME and GDS, leading to a hw deadlock, because ME generates
* different wave IDs than the GDS expects. This situation happens
@@ -5775,8 +5928,7 @@ static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
- amdgpu_ring_write(ring, ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
+ amdgpu_ring_write(ring, 0);
}
static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
@@ -5804,10 +5956,7 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- if (ring->is_mes_queue)
- gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
- else
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* compute doesn't have PFP */
if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
@@ -6036,28 +6185,13 @@ static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
void *de_payload_cpu_addr;
int cnt;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v10_gfx_meta_data, de_payload);
- de_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gds_backup) +
- offsetof(struct v10_gfx_meta_data, de_payload);
- gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v10_gfx_meta_data, de_payload);
- de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
+ offset = offsetof(struct v10_gfx_meta_data, de_payload);
+ de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
- AMDGPU_CSA_SIZE - adev->gds.gds_size,
- PAGE_SIZE);
- }
+ gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
+ AMDGPU_CSA_SIZE - adev->gds.gds_size,
+ PAGE_SIZE);
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
@@ -6296,25 +6430,23 @@ static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- int i;
+ u32 doorbell_offset = entry->src_data[0];
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
- uint32_t mes_queue_id = entry->src_data[0];
+ int i;
DRM_DEBUG("IH: CP EOP\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
-
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
} else {
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@@ -6481,27 +6613,29 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
- switch (me_id) {
- case 0:
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
- }
- break;
- case 1:
- case 2:
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
+ if (!adev->gfx.disable_kq) {
+ switch (me_id) {
+ case 0:
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ default:
+ BUG();
+ break;
}
- break;
- default:
- BUG();
- break;
}
}
@@ -6609,6 +6743,69 @@ static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static bool gfx_v11_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /* Disable the pipe reset until the CPFW fully support it.*/
+ dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n");
+ return false;
+}
+
+
+static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r;
+
+ if (!gfx_v11_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v11_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe);
+
+ r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v11_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe reset to the ME firmware start PC: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* FIXME: Sometimes driver can't cache the ME firmware start PC correctly,
+ * so the pipe reset status relies on the later gfx ring test result.
+ */
+ return 0;
+}
+
static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6618,8 +6815,13 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return -EINVAL;
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
- if (r)
- return r;
+ if (r) {
+
+ dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
+ r = gfx_v11_reset_gfx_pipe(ring);
+ if (r)
+ return r;
+ }
r = gfx_v11_0_kgq_init_queue(ring, true);
if (r) {
@@ -6636,6 +6838,136 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return amdgpu_ring_test_ring(ring);
}
+static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
+{
+
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r;
+
+ if (!gfx_v11_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v11_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (adev->gfx.rs64_enable) {
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe);
+ r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ } else {
+ if (ring->me == 1) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ /* mec1 fw pc: CP_MEC1_INSTR_PNTR */
+ } else {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ /* mec2 fw pc: CP:CP_MEC2_INSTR_PNTR */
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe);
+ r = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_MEC1_INSTR_PNTR));
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v11_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe resets to MEC FW start PC: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /*FIXME:Sometimes driver can't cache the MEC firmware start PC correctly, so the pipe
+ * reset status relies on the compute ring test result.
+ */
+ return 0;
+}
+
static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6646,8 +6978,10 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
- dev_err(adev->dev, "reset via MMIO failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
+ r = gfx_v11_0_reset_compute_pipe(ring);
+ if (r)
+ return r;
}
r = gfx_v11_0_kcq_init_queue(ring, true);
@@ -6693,9 +7027,14 @@ static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printe
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_11[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_11[reg].reg_offset == regCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "regCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_11[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -6755,9 +7094,16 @@ static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block)
/* ME0 is for GFX so start from 1 for CP */
soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_11[reg]));
+ if (i &&
+ gc_cp_reg_list_11[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0,
+ regCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_11[reg]));
}
index += reg_count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index e7b58e470292..f09d96bfee16 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -44,6 +44,8 @@
#include "gfx_v12_0.h"
#include "nbif_v6_3_1.h"
#include "mes_v12_0.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
#define GFX12_NUM_GFX_RINGS 1
#define GFX12_MEC_HPD_SIZE 2048
@@ -133,11 +135,14 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR0),
SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR1),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_RS64_INSTR_PNTR),
-
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
@@ -186,7 +191,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
@@ -215,7 +229,24 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = {
@@ -475,33 +506,18 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t padding, offset;
-
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- padding = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
-
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
- *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r)
- return r;
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- cpu_ptr = &adev->wb.wb[index];
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ cpu_ptr = &adev->wb.wb[index];
- r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
- goto err1;
- }
+ r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
@@ -528,12 +544,10 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err2:
- if (!ring->is_mes_queue)
- amdgpu_ib_free(&ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -881,6 +895,34 @@ static void gfx_v12_0_select_me_pipe_q(struct amdgpu_device *adev,
soc24_grbm_select(adev, me, pipe, q, vm);
}
+/* all sizes are in bytes */
+#define MQD_SHADOW_BASE_SIZE 73728
+#define MQD_SHADOW_BASE_ALIGNMENT 256
+#define MQD_FWWORKAREA_SIZE 484
+#define MQD_FWWORKAREA_ALIGNMENT 256
+
+static void gfx_v12_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info)
+{
+ shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
+ shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
+ shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
+ shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+}
+
+static int gfx_v12_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check)
+{
+ if (adev->gfx.cp_gfx_shadow || skip_check) {
+ gfx_v12_0_get_gfx_shadow_info_nocheck(adev, shadow_info);
+ return 0;
+ }
+
+ memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
+ return -EINVAL;
+}
+
static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v12_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v12_0_select_se_sh,
@@ -889,6 +931,7 @@ static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v12_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v12_0_select_me_pipe_q,
.update_perfmon_mgcg = &gfx_v12_0_update_perf_clk,
+ .get_gfx_shadow_info = &gfx_v12_0_get_gfx_shadow_info,
};
static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1346,6 +1389,7 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
unsigned num_compute_rings;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
@@ -1354,7 +1398,7 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(12, 0, 1):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 8;
adev->gfx.mec.num_mec = 1;
adev->gfx.mec.num_pipe_per_mec = 2;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -1372,6 +1416,22 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
+ if (!adev->gfx.disable_uq &&
+ adev->gfx.me_fw_version >= 2780 &&
+ adev->gfx.pfp_fw_version >= 2840 &&
+ adev->gfx.mec_fw_version >= 3050 &&
+ adev->mes.fw_version[0] >= 123) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
if (adev->gfx.me_fw_version >= 2480 &&
adev->gfx.pfp_fw_version >= 2530 &&
adev->gfx.mec_fw_version >= 2680 &&
@@ -1383,11 +1443,13 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
break;
}
- /* recalculate compute rings to use based on hardware configuration */
- num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
- adev->gfx.mec.num_queue_per_pipe) / 2;
- adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
- num_compute_rings);
+ if (adev->gfx.num_compute_rings) {
+ /* recalculate compute rings to use based on hardware configuration */
+ num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe) / 2;
+ adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
+ num_compute_rings);
+ }
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
@@ -1433,37 +1495,41 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* set up the gfx ring */
- for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
- if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
- continue;
-
- r = gfx_v12_0_gfx_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
- ring_id++;
+ if (adev->gfx.num_gfx_rings) {
+ /* set up the gfx ring */
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
+ if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
+ continue;
+
+ r = gfx_v12_0_gfx_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
+ ring_id++;
+ }
}
}
}
- ring_id = 0;
- /* set up the compute queues - allocate horizontally across pipes */
- for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
- for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev,
- 0, i, k, j))
- continue;
+ if (adev->gfx.num_compute_rings) {
+ ring_id = 0;
+ /* set up the compute queues - allocate horizontally across pipes */
+ for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
+ for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev,
+ 0, i, k, j))
+ continue;
- r = gfx_v12_0_compute_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
+ r = gfx_v12_0_compute_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
- ring_id++;
+ ring_id++;
+ }
}
}
}
@@ -2324,7 +2390,7 @@ static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
@@ -2468,7 +2534,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
@@ -2948,6 +3014,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
#endif
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -2968,6 +3036,14 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
+ /* set gfx UQ items */
+ mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr);
+ mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr);
+ mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr);
+ mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr);
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -3091,6 +3167,8 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address whether it's enabled or not */
@@ -3142,6 +3220,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_active = prop->hqd_active;
+ /* set UQ fenceaddress */
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -3426,7 +3508,7 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev)
if (r)
return r;
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
@@ -3600,6 +3682,49 @@ static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+static int gfx_v12_0_set_userq_eop_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int m, p, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) {
+ for (m = 0; m < adev->gfx.me.num_me; m++) {
+ for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) {
+ irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) {
+ for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
+ for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + (m * adev->gfx.mec.num_pipe_per_mec)
+ + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -3610,6 +3735,7 @@ static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
+ gfx_v12_0_set_userq_eop_interrupts(adev, false);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -3698,11 +3824,33 @@ static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = true;
+ break;
+ case 1:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = false;
+ break;
+ case 2:
+ adev->gfx.disable_kq = true;
+ adev->gfx.disable_uq = false;
+ break;
+ }
+
adev->gfx.funcs = &gfx_v12_0_gfx_funcs;
- adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
- AMDGPU_MAX_COMPUTE_RINGS);
+ if (adev->gfx.disable_kq) {
+ adev->gfx.num_gfx_rings = 0;
+ adev->gfx.num_compute_rings = 0;
+ } else {
+ adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS;
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
+ }
gfx_v12_0_set_kiq_pm4_funcs(adev);
gfx_v12_0_set_ring_funcs(adev);
@@ -3733,6 +3881,10 @@ static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = gfx_v12_0_set_userq_eop_interrupts(adev, true);
+ if (r)
+ return r;
+
return 0;
}
@@ -4172,45 +4324,17 @@ static u64 gfx_v12_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
static void gfx_v12_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size;
- uint64_t wptr_tmp;
-
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
- wptr_tmp = ring->wptr & ring->buf_mask;
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
- *wptr_saved = wptr_tmp;
- /* assume doorbell always being used by mes mapped queue */
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- } else {
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- }
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
+ ring->wptr);
+ WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
- if (ring->use_doorbell) {
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr);
- WDOORBELL64(ring->doorbell_index, ring->wptr);
- } else {
- WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
- lower_32_bits(ring->wptr));
- WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
- upper_32_bits(ring->wptr));
- }
+ WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
+ lower_32_bits(ring->wptr));
+ WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
+ upper_32_bits(ring->wptr));
}
}
@@ -4235,42 +4359,14 @@ static u64 gfx_v12_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
static void gfx_v12_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size;
- uint64_t wptr_tmp;
-
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
- wptr_tmp = ring->wptr & ring->buf_mask;
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
- *wptr_saved = wptr_tmp;
- /* assume doorbell always used by mes mapped queue */
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- } else {
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- }
+ /* XXX check if swapping is necessary on BE */
+ if (ring->use_doorbell) {
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
+ ring->wptr);
+ WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
- /* XXX check if swapping is necessary on BE */
- if (ring->use_doorbell) {
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr);
- WDOORBELL64(ring->doorbell_index, ring->wptr);
- } else {
- BUG(); /* only DOORBELL method supported on gfx12 now */
- }
+ BUG(); /* only DOORBELL method supported on gfx12 now */
}
}
@@ -4317,10 +4413,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
control |= ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x400000;
-
amdgpu_ring_write(ring, header);
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -4340,10 +4432,6 @@ static void gfx_v12_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x40000000;
-
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -4383,8 +4471,7 @@ static void gfx_v12_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
- amdgpu_ring_write(ring, ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
+ amdgpu_ring_write(ring, 0);
}
static void gfx_v12_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
@@ -4412,10 +4499,7 @@ static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v12_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- if (ring->is_mes_queue)
- gfx_v12_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
- else
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* compute doesn't have PFP */
if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
@@ -4749,25 +4833,23 @@ static int gfx_v12_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- int i;
+ u32 doorbell_offset = entry->src_data[0];
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
- uint32_t mes_queue_id = entry->src_data[0];
+ int i;
DRM_DEBUG("IH: CP EOP\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
-
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
} else {
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@@ -4934,27 +5016,29 @@ static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev,
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
- switch (me_id) {
- case 0:
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
- }
- break;
- case 1:
- case 2:
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
+ if (!adev->gfx.disable_kq) {
+ switch (me_id) {
+ case 0:
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ default:
+ BUG();
+ break;
}
- break;
- default:
- BUG();
- break;
}
}
@@ -5160,6 +5244,69 @@ static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block)
amdgpu_gfx_off_ctrl(adev, true);
}
+static bool gfx_v12_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /* Disable the pipe reset until the CPFW fully support it.*/
+ dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n");
+ return false;
+}
+
+static int gfx_v12_reset_gfx_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r;
+
+ if (!gfx_v12_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v12_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe);
+
+ r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v12_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe reset: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* Sometimes the ME start pc counter can't cache correctly, so the
+ * PC check only as a reference and pipe reset result rely on the
+ * later ring test.
+ */
+ return 0;
+}
+
static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -5170,8 +5317,10 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
if (r) {
- dev_err(adev->dev, "reset via MES failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
+ r = gfx_v12_reset_gfx_pipe(ring);
+ if (r)
+ return r;
}
r = gfx_v12_0_kgq_init_queue(ring, true);
@@ -5189,6 +5338,89 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return amdgpu_ring_test_ring(ring);
}
+static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r = 0;
+
+ if (!gfx_v12_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v12_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (adev->gfx.rs64_enable) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe);
+ r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ } else {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe);
+ /* Doesn't find the F32 MEC instruction pointer register, and suppose
+ * the driver won't run into the F32 mode.
+ */
+ }
+
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v12_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe resets: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* Need the ring test to verify the pipe reset result.*/
+ return 0;
+}
+
static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -5199,8 +5431,10 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
- dev_err(adev->dev, "reset via MMIO failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
+ r = gfx_v12_0_reset_compute_pipe(ring);
+ if (r)
+ return r;
}
r = gfx_v12_0_kcq_init_queue(ring, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 13fbee46417a..70d7a1f434c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -53,6 +53,9 @@
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define GFX6_NUM_GFX_RINGS 1
+#define GFX6_NUM_COMPUTE_RINGS 2
+
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
@@ -1732,10 +1735,14 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
gfx_v6_0_get_cu_info(adev);
gfx_v6_0_config_init(adev);
- WREG32(mmCP_QUEUE_THRESHOLDS, ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) |
- (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT)));
- WREG32(mmCP_MEQ_THRESHOLDS, (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
- (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
+ WREG32(mmCP_QUEUE_THRESHOLDS,
+ ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) |
+ (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT)));
+
+ /* set HW defaults for 3D engine */
+ WREG32(mmCP_MEQ_THRESHOLDS,
+ (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
+ (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
sx_debug_1 = RREG32(mmSX_DEBUG_1);
WREG32(mmSX_DEBUG_1, sx_debug_1);
@@ -2851,44 +2858,21 @@ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 8181bd0e4f18..da0534ff1271 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -55,6 +55,9 @@
#define GFX7_NUM_GFX_RINGS 1
#define GFX7_MEC_HPD_SIZE 2048
+#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
+#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
+
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
@@ -3882,67 +3885,22 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_KAVERI:
- buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_KABINI:
- case CHIP_MULLINS:
- buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_HAWAII:
- buffer[count++] = cpu_to_le32(0x3a00161a);
- buffer[count++] = cpu_to_le32(0x0000002e);
- break;
- default:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- }
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index bfedd487efc5..5ee2237d8ee8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1223,48 +1223,22 @@ out:
static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
- PACKET3_SET_CONTEXT_REG_START);
+ buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d7db4cb907ae..d377a7c57d5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -225,17 +225,36 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_9[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6),
- /* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME2_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE1),
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE2),
- SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3)
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3),
+ /* packet headers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP)
};
static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
@@ -277,6 +296,14 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_LO),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GFX_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP)
};
enum ta_ras_gfx_subblock {
@@ -1624,42 +1651,16 @@ static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
@@ -5441,16 +5442,8 @@ static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
payload_size = sizeof(struct v9_ce_ib_state);
- if (ring->is_mes_queue) {
- payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
- } else {
- payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
- }
+ payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
@@ -5473,16 +5466,8 @@ static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
payload_size = sizeof(struct v9_de_ib_state);
- if (ring->is_mes_queue) {
- payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
- } else {
- payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
- }
+ payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
IB_COMPLETION_STATUS_PREEMPTED;
@@ -5672,19 +5657,9 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ce_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- }
+ offset = offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
@@ -5770,28 +5745,13 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bo
void *de_payload_cpu_addr;
int cnt;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gds_backup) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
+ offset = offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
- AMDGPU_CSA_SIZE - adev->gds.gds_size,
- PAGE_SIZE);
- }
+ gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
+ AMDGPU_CSA_SIZE - adev->gds.gds_size,
+ PAGE_SIZE);
if (usegds) {
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
@@ -7339,9 +7299,14 @@ static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_9[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "mmCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_9[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -7378,9 +7343,13 @@ static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
soc15_grbm_select(adev, 1 + i, j, k, 0, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_9[reg]));
+ if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_9[reg]));
}
index += reg_count;
}
@@ -7394,8 +7363,14 @@ static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
/* Emit the cleaner shader */
- amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ else
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER_9_0, 0));
+
amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index d81449f9d822..c48cd47b531f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -1547,7 +1547,7 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
{
uint32_t bank, way, mem;
static const char * const vml2_way_str[] = { "BIGK", "4K" };
- static const char * const utcl2_rounter_str[] = { "VMC", "APT" };
+ static const char * const utcl2_router_str[] = { "VMC", "APT" };
mem = instance % blk->num_mem_blocks;
way = (instance / blk->num_mem_blocks) % blk->num_ways;
@@ -1568,7 +1568,7 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
dev_info(
adev->dev,
"GFX SubBlock UTCL2_ROUTER_IFIF%d_GROUP0_%s, SED %d, DED %d\n",
- bank, utcl2_rounter_str[mem], sec_cnt, ded_cnt);
+ bank, utcl2_router_str[mem], sec_cnt, ded_cnt);
break;
case ATC_L2_CACHE_2M:
dev_info(
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 53fbf6ca7cdb..c233edf60569 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -105,9 +105,6 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
- /* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
@@ -154,6 +151,14 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
};
struct amdgpu_gfx_ras gfx_v9_4_3_ras;
@@ -1148,6 +1153,12 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
break;
+ case IP_VERSION(9, 5, 0):
+ if (adev->gfx.mec_fw_version >= 21) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
+ }
+ break;
default:
break;
}
@@ -1262,6 +1273,22 @@ static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
}
}
+/* For ASICs that needs xnack chain and MEC version supports, set SG_CONFIG1
+ * DISABLE_XNACK_CHECK_IN_RETRY_DISABLE bit and inform KFD to set xnack_chain
+ * bit in SET_RESOURCES
+ */
+static void gfx_v9_4_3_xcc_init_sq(struct amdgpu_device *adev, int xcc_id)
+{
+ uint32_t data;
+
+ if (!(adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
+ return;
+
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_CONFIG1);
+ data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
+ WREG32_SOC15(GC, xcc_id, regSQ_CONFIG1, data);
+}
+
static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
int xcc_id)
{
@@ -1306,6 +1333,7 @@ static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
+ gfx_v9_4_3_xcc_init_sq(adev, xcc_id);
}
static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
@@ -1318,6 +1346,20 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
adev->gfx.config.db_debug2 =
RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ /* ToDo: GC 9.4.4 */
+ case IP_VERSION(9, 4, 3):
+ if (adev->gfx.mec_fw_version >= 184)
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ break;
+ case IP_VERSION(9, 5, 0):
+ if (adev->gfx.mec_fw_version >= 23)
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < num_xcc; i++)
gfx_v9_4_3_xcc_constants_init(adev, i);
}
@@ -3447,9 +3489,7 @@ static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
{
- /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
- adev->gfx.mec_fw_version >= 0x0000009b)
+ if (!!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
return true;
else
dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
@@ -4558,12 +4598,21 @@ static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_pri
"\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
xcc_id, i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p,
- "%-50s \t 0x%08x\n",
- gc_cp_reg_list_9_4_3[reg].reg_name,
- adev->gfx.ip_dump_compute_queues
- [xcc_offset + inst_offset +
- reg]);
+ if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p,
+ "%-50s \t 0x%08x\n",
+ "regCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset + inst_offset +
+ reg]);
+ else
+ drm_printf(p,
+ "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_9_4_3[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset + inst_offset +
+ reg]);
}
inst_offset += reg_count;
}
@@ -4612,12 +4661,20 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
GET_INST(GC, xcc_id));
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues
- [xcc_offset +
- inst_offset + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(
- gc_cp_reg_list_9_4_3[reg],
- GET_INST(GC, xcc_id)));
+ if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset +
+ inst_offset + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
+ regCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset +
+ inst_offset + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(
+ gc_cp_reg_list_9_4_3[reg],
+ GET_INST(GC, xcc_id)));
}
inst_offset += reg_count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 95d894a231fc..a3e2787501f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -268,7 +268,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -428,10 +428,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
else
@@ -969,7 +965,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
adev->hdp.funcs->init_registers(adev);
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index ad099f136f84..72211409227b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -229,7 +229,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -393,10 +393,6 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
@@ -752,6 +748,18 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor;
+ /* The mall_size is already calculated as mall_size_per_umc * num_umc.
+ * However, for gfx1151, which features a 2-to-1 UMC mapping,
+ * the result must be multiplied by 2 to determine the actual mall size.
+ */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 5, 1):
+ adev->gmc.mall_size *= 2;
+ break;
+ default:
+ break;
+ }
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
@@ -832,7 +840,7 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.first_kfd_vmid = 8;
+ adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8;
amdgpu_vm_manager_init(adev);
@@ -899,7 +907,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
return r;
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index 05c026d0b0d9..b645d3e6a6c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -297,7 +297,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -413,10 +413,6 @@ static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
@@ -820,7 +816,7 @@ static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.first_kfd_vmid = 8;
+ adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8;
amdgpu_vm_manager_init(adev);
@@ -881,7 +877,7 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
return r;
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index a992e79d9581..8030fcd64210 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -249,7 +249,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
/* disable VGA render */
tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp &= ~VGA_VSTATUS_CNTL;
+ tmp &= VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK;
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
/* Update configuration */
@@ -627,17 +627,16 @@ static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
"write" : "read", block, mc_client, mc_id);
}
-/*
static const u32 mc_cg_registers[] = {
- MC_HUB_MISC_HUB_CG,
- MC_HUB_MISC_SIP_CG,
- MC_HUB_MISC_VM_CG,
- MC_XPB_CLK_GAT,
- ATC_MISC_CG,
- MC_CITF_MISC_WR_CG,
- MC_CITF_MISC_RD_CG,
- MC_CITF_MISC_VM_CG,
- VM_L2_CG,
+ mmMC_HUB_MISC_HUB_CG,
+ mmMC_HUB_MISC_SIP_CG,
+ mmMC_HUB_MISC_VM_CG,
+ mmMC_XPB_CLK_GAT,
+ mmATC_MISC_CG,
+ mmMC_CITF_MISC_WR_CG,
+ mmMC_CITF_MISC_RD_CG,
+ mmMC_CITF_MISC_VM_CG,
+ mmVM_L2_CG,
};
static const u32 mc_cg_ls_en[] = {
@@ -672,7 +671,7 @@ static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
data |= mc_cg_ls_en[i];
else
data &= ~mc_cg_ls_en[i];
@@ -689,7 +688,7 @@ static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
data |= mc_cg_en[i];
else
data &= ~mc_cg_en[i];
@@ -705,7 +704,7 @@ static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
orig = data = RREG32_PCIE(ixPCIE_CNTL2);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
@@ -728,7 +727,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
else
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
@@ -744,7 +743,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_MEM_POWER_LS);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
else
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
@@ -752,7 +751,6 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
if (orig != data)
WREG32(mmHDP_MEM_POWER_LS, data);
}
-*/
static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
{
@@ -1098,6 +1096,20 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
static int gmc_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool gate = false;
+
+ if (state == AMD_CG_STATE_GATE)
+ gate = true;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ gmc_v6_0_enable_mc_mgcg(adev, gate);
+ gmc_v6_0_enable_mc_ls(adev, gate);
+ }
+ gmc_v6_0_enable_bif_mgls(adev, gate);
+ gmc_v6_0_enable_hdp_mgcg(adev, gate);
+ gmc_v6_0_enable_hdp_ls(adev, gate);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 83e39f16044a..a8d5795084fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1157,17 +1157,10 @@ static bool gmc_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
- u32 tmp;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
- SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
- SRBM_STATUS__MCC_BUSY_MASK |
- SRBM_STATUS__MCD_BUSY_MASK |
- SRBM_STATUS__VMC_BUSY_MASK);
- if (!tmp)
+ if (gmc_v7_0_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 783e0c3b86b4..282197f4ffb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1213,10 +1213,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
if (uncached) {
mtype = MTYPE_UC;
} else if (ext_coherent) {
- if (gc_ip_version == IP_VERSION(9, 5, 0) || adev->rev_id)
- mtype = is_local ? MTYPE_CC : MTYPE_UC;
- else
- mtype = MTYPE_UC;
+ mtype = is_local ? MTYPE_CC : MTYPE_UC;
} else if (adev->flags & AMD_IS_APU) {
mtype = is_local ? mtype_local : MTYPE_NC;
} else {
@@ -1336,7 +1333,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
mtype_local = MTYPE_CC;
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype_local);
- } else if (adev->rev_id) {
+ } else {
/* MTYPE_UC case */
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
}
@@ -1505,7 +1502,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
- adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
adev->umc.ras = &umc_v12_0_ras;
break;
@@ -2075,6 +2071,9 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
}
static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
@@ -2411,13 +2410,6 @@ static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.flush_tlb_needs_extra_type_2 =
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) &&
adev->gmc.xgmi.num_physical_nodes;
- /*
- * TODO: This workaround is badly documented and had a buggy
- * implementation. We should probably verify what we do here.
- */
- adev->gmc.flush_tlb_needs_extra_type_0 =
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
- adev->rev_id == 0;
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
@@ -2435,7 +2427,7 @@ static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
adev->hdp.funcs->init_registers(adev);
/* After HDP is initialized, flush HDP.*/
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index f1dc13b3ab38..e6c0d86d3486 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -36,17 +36,6 @@
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
-static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -180,7 +169,7 @@ struct amdgpu_hdp_ras hdp_v4_0_ras = {
};
const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
- .flush_hdp = hdp_v4_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.invalidate_hdp = hdp_v4_0_invalidate_hdp,
.update_clock_gating = hdp_v4_0_update_clock_gating,
.get_clock_gating_state = hdp_v4_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index 43195c079748..8bc001dc9f63 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -27,17 +27,6 @@
#include "hdp/hdp_5_0_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
-static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -217,7 +206,7 @@ static void hdp_v5_0_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_hdp_funcs hdp_v5_0_funcs = {
- .flush_hdp = hdp_v5_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.invalidate_hdp = hdp_v5_0_invalidate_hdp,
.update_clock_gating = hdp_v5_0_update_clock_gating,
.get_clock_gating_state = hdp_v5_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index fcb8dd2876bc..40940b4ab400 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -33,7 +33,17 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
if (!ring || !ring->funcs->emit_wreg) {
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
- RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ if (amdgpu_sriov_vf(adev)) {
+ /* this is fine because SR_IOV doesn't remap the register */
+ RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
+ /* We just need to read back a register to post the write.
+ * Reading back the remapped register causes problems on
+ * some platforms so just read back the memory size register.
+ */
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
+ }
} else {
amdgpu_ring_emit_wreg(ring,
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
index a88d25a06c29..ec20daf4272c 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
@@ -30,17 +30,6 @@
#define regHDP_CLK_CNTL_V6_1 0xd5
#define regHDP_CLK_CNTL_V6_1_BASE_IDX 0
-static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -149,7 +138,7 @@ static void hdp_v6_0_get_clockgating_state(struct amdgpu_device *adev,
}
const struct amdgpu_hdp_funcs hdp_v6_0_funcs = {
- .flush_hdp = hdp_v6_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.update_clock_gating = hdp_v6_0_update_clock_gating,
.get_clock_gating_state = hdp_v6_0_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
index 49f7eb4fbd11..ed1debc03507 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -27,17 +27,6 @@
#include "hdp/hdp_7_0_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
-static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -137,7 +126,7 @@ static void hdp_v7_0_get_clockgating_state(struct amdgpu_device *adev,
}
const struct amdgpu_hdp_funcs hdp_v7_0_funcs = {
- .flush_hdp = hdp_v7_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.update_clock_gating = hdp_v7_0_update_clock_gating,
.get_clock_gating_state = hdp_v7_0_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index eb4185dcbd1d..5900b560b7de 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -349,6 +349,7 @@ static int ih_v6_0_irq_init(struct amdgpu_device *adev)
if (ret)
return ret;
}
+ ih[i]->overflow = false;
}
/* update doorbell range for ih ring 0 */
@@ -446,7 +447,10 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ if (!amdgpu_sriov_vf(adev))
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ else
+ ih->overflow = true;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 32). Hopefully
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 218e16b68f1d..cb94bd71300f 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -28,11 +28,13 @@
#include "soc15d.h"
#include "jpeg_v4_0_3.h"
#include "jpeg_v5_0_1.h"
+#include "mmsch_v5_0.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
+static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
@@ -156,21 +158,16 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
- ring->use_doorbell = false;
+ ring->use_doorbell = true;
ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id);
if (!amdgpu_sriov_vf(adev)) {
ring->doorbell_index =
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
1 + j + 11 * jpeg_inst;
} else {
- if (j < 4)
- ring->doorbell_index =
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 4 + j + 32 * jpeg_inst;
- else
- ring->doorbell_index =
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 8 + j + 32 * jpeg_inst;
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 2 + j + 32 * jpeg_inst;
}
sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
@@ -237,7 +234,10 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
int i, j, r, jpeg_inst;
if (amdgpu_sriov_vf(adev)) {
- /* jpeg_v5_0_1_start_sriov(adev); */
+ r = jpeg_v5_0_1_start_sriov(adev);
+ if (r)
+ return r;
+
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
@@ -264,7 +264,7 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->jpeg.inst[i].ring_dec[j];
if (ring->use_doorbell)
WREG32_SOC15_OFFSET(VCN, GET_INST(VCN, i), regVCN_JPEG_DB_CTRL,
- (ring->pipe ? (ring->pipe - 0x15) : 0),
+ ring->pipe,
ring->doorbell_index <<
VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
VCN_JPEG_DB_CTRL__EN_MASK);
@@ -291,8 +291,10 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
cancel_delayed_work_sync(&adev->jpeg.idle_work);
- if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
- ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
+ ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ }
return ret;
}
@@ -422,6 +424,119 @@ static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring)
reg_offset);
}
+static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw, item_offset;
+ uint32_t init_status;
+ int i, j, jpeg_inst;
+
+ struct mmsch_v5_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v5_0_cmd_end end = { {0} };
+ struct mmsch_v5_0_init_header header;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ end.cmd_header.command_type =
+ MMSCH_COMMAND__END;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+
+ item_offset = header.total_size;
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ table_size = 0;
+
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr));
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr));
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC_RB_SIZE);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4);
+
+ if (j < 5) {
+ header.mjpegdec0[j].table_offset = item_offset;
+ header.mjpegdec0[j].init_status = 0;
+ header.mjpegdec0[j].table_size = table_size;
+ } else {
+ header.mjpegdec1[j - 5].table_offset = item_offset;
+ header.mjpegdec1[j - 5].init_status = 0;
+ header.mjpegdec1[j - 5].table_size = table_size;
+ }
+ header.total_size += table_size;
+ item_offset += table_size;
+ }
+
+ MMSCH_V5_0_INSERT_END();
+
+ /* send init table to MMSCH */
+ size = sizeof(struct mmsch_v5_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp);
+
+ size = header.total_size;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size);
+
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ param = 0x00000001;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ init_status =
+ ((struct mmsch_v5_0_init_header *)(table_loc))->mjpegdec0[i].init_status;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP);
+
+ if (resp != 0)
+ break;
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE &&
+ init_status != MMSCH_VF_ENGINE_STATUS__PASS)
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n",
+ resp, init_status);
+
+ }
+ return 0;
+}
+
/**
* jpeg_v5_0_1_start - start JPEG block
*
@@ -581,6 +696,11 @@ static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
struct amdgpu_device *adev = ip_block->adev;
int ret;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == adev->jpeg.cur_state)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
new file mode 100644
index 000000000000..d6f50b13e2ba
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
+
+#define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
+#define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
+
+static int
+mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
+{
+ int ret;
+
+ ret = amdgpu_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
+ goto err_reserve_bo_failed;
+ }
+
+ ret = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (ret) {
+ DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
+ goto err_map_bo_gart_failed;
+ }
+
+ amdgpu_bo_unreserve(bo);
+ bo = amdgpu_bo_ref(bo);
+
+ return 0;
+
+err_map_bo_gart_failed:
+ amdgpu_bo_unreserve(bo);
+err_reserve_bo_failed:
+ return ret;
+}
+
+static int
+mes_userq_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ uint64_t wptr)
+{
+ struct amdgpu_bo_va_mapping *wptr_mapping;
+ struct amdgpu_vm *wptr_vm;
+ struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
+ int ret;
+
+ wptr_vm = queue->vm;
+ ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
+ if (ret)
+ return ret;
+
+ wptr &= AMDGPU_GMC_HOLE_MASK;
+ wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
+ amdgpu_bo_unreserve(wptr_vm->root.bo);
+ if (!wptr_mapping) {
+ DRM_ERROR("Failed to lookup wptr bo\n");
+ return -EINVAL;
+ }
+
+ wptr_obj->obj = wptr_mapping->bo_va->base.bo;
+ if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
+ DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
+ return -EINVAL;
+ }
+
+ ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj);
+ if (ret) {
+ DRM_ERROR("Failed to map wptr bo to GART\n");
+ return ret;
+ }
+
+ queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset_no_check(wptr_obj->obj);
+ return 0;
+}
+
+static int convert_to_mes_priority(int priority)
+{
+ switch (priority) {
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW:
+ default:
+ return AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW:
+ return AMDGPU_MES_PRIORITY_LEVEL_LOW;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH:
+ return AMDGPU_MES_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH:
+ return AMDGPU_MES_PRIORITY_LEVEL_HIGH;
+ }
+}
+
+static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ struct amdgpu_mqd_prop *userq_props = queue->userq_prop;
+ struct mes_add_queue_input queue_input;
+ int r;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
+
+ queue_input.process_va_start = 0;
+ queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
+
+ /* set process quantum to 10 ms and gang quantum to 1 ms as default */
+ queue_input.process_quantum = 100000;
+ queue_input.gang_quantum = 10000;
+ queue_input.paging = false;
+
+ queue_input.process_context_addr = ctx->gpu_addr;
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+ queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
+ queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority);
+
+ queue_input.process_id = queue->vm->pasid;
+ queue_input.queue_type = queue->queue_type;
+ queue_input.mqd_addr = queue->mqd.gpu_addr;
+ queue_input.wptr_addr = userq_props->wptr_gpu_addr;
+ queue_input.queue_size = userq_props->queue_size >> 2;
+ queue_input.doorbell_offset = userq_props->doorbell_index;
+ queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
+ queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ DRM_ERROR("Failed to map queue in HW, err (%d)\n", r);
+ return r;
+ }
+
+ DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index);
+ return 0;
+}
+
+static int mes_userq_unmap(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_remove_queue_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
+ queue_input.doorbell_offset = queue->doorbell_index;
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r);
+ return r;
+}
+
+static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ struct drm_amdgpu_userq_in *mqd_user)
+{
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r, size;
+
+ /*
+ * The FW expects at least one page space allocated for
+ * process ctx and gang ctx each. Create an object
+ * for the same.
+ */
+ size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ;
+ r = amdgpu_userq_create_object(uq_mgr, ctx, size);
+ if (r) {
+ DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
+ struct drm_amdgpu_userq_in *args_in,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
+ struct drm_amdgpu_userq_in *mqd_user = args_in;
+ struct amdgpu_mqd_prop *userq_props;
+ int r;
+
+ /* Structure to initialize MQD for userqueue using generic MQD init function */
+ userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL);
+ if (!userq_props) {
+ DRM_ERROR("Failed to allocate memory for userq_props\n");
+ return -ENOMEM;
+ }
+
+ if (!mqd_user->wptr_va || !mqd_user->rptr_va ||
+ !mqd_user->queue_va || mqd_user->queue_size == 0) {
+ DRM_ERROR("Invalid MQD parameters for userqueue\n");
+ r = -EINVAL;
+ goto free_props;
+ }
+
+ r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
+ if (r) {
+ DRM_ERROR("Failed to create MQD object for userqueue\n");
+ goto free_props;
+ }
+
+ /* Initialize the MQD BO with user given values */
+ userq_props->wptr_gpu_addr = mqd_user->wptr_va;
+ userq_props->rptr_gpu_addr = mqd_user->rptr_va;
+ userq_props->queue_size = mqd_user->queue_size;
+ userq_props->hqd_base_gpu_addr = mqd_user->queue_va;
+ userq_props->mqd_gpu_addr = queue->mqd.gpu_addr;
+ userq_props->use_doorbell = true;
+ userq_props->doorbell_index = queue->doorbell_index;
+ userq_props->fence_address = queue->fence_drv->gpu_addr;
+
+ if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
+ struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
+
+ if (mqd_user->mqd_size != sizeof(*compute_mqd)) {
+ DRM_ERROR("Invalid compute IP MQD size\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(compute_mqd)) {
+ DRM_ERROR("Failed to read user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ userq_props->eop_gpu_addr = compute_mqd->eop_va;
+ userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
+ userq_props->hqd_active = false;
+ userq_props->tmz_queue =
+ mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+ kfree(compute_mqd);
+ } else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
+ struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
+
+ if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
+ DRM_ERROR("Invalid GFX MQD\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(mqd_gfx_v11)) {
+ DRM_ERROR("Failed to read user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ userq_props->shadow_addr = mqd_gfx_v11->shadow_va;
+ userq_props->csa_addr = mqd_gfx_v11->csa_va;
+ userq_props->tmz_queue =
+ mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+ kfree(mqd_gfx_v11);
+ } else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
+ struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
+
+ if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
+ DRM_ERROR("Invalid SDMA MQD\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(mqd_sdma_v11)) {
+ DRM_ERROR("Failed to read sdma user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ userq_props->csa_addr = mqd_sdma_v11->csa_va;
+ kfree(mqd_sdma_v11);
+ }
+
+ queue->userq_prop = userq_props;
+
+ r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props);
+ if (r) {
+ DRM_ERROR("Failed to initialize MQD for userqueue\n");
+ goto free_mqd;
+ }
+
+ /* Create BO for FW operations */
+ r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user);
+ if (r) {
+ DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
+ goto free_mqd;
+ }
+
+ /* FW expects WPTR BOs to be mapped into GART */
+ r = mes_userq_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr);
+ if (r) {
+ DRM_ERROR("Failed to create WPTR mapping\n");
+ goto free_ctx;
+ }
+
+ return 0;
+
+free_ctx:
+ amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
+
+free_mqd:
+ amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
+
+free_props:
+ kfree(userq_props);
+
+ return r;
+}
+
+static void
+mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
+ kfree(queue->userq_prop);
+ amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
+}
+
+const struct amdgpu_userq_funcs userq_mes_funcs = {
+ .mqd_create = mes_userq_mqd_create,
+ .mqd_destroy = mes_userq_mqd_destroy,
+ .unmap = mes_userq_unmap,
+ .map = mes_userq_map,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h
index 7ac87ef26aec..090ae8897770 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2023 Red Hat Inc.
+ * Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +19,12 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ad102_ofa = {
- .sclass = {
- { -1, -1, NVC9FA_VIDEO_OFA },
- {}
- }
-};
-int
-ad102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ad102_ofa, device, type, inst, pengine);
+#ifndef MES_USERQ_H
+#define MES_USERQ_H
+#include "amdgpu_userq.h"
- return -ENODEV;
-}
+extern const struct amdgpu_userq_funcs userq_mes_funcs;
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index e65916ada23b..c9eba537de09 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -287,6 +287,23 @@ static int convert_to_mes_queue_type(int queue_type)
return -1;
}
+static int convert_to_mes_priority_level(int priority_level)
+{
+ switch (priority_level) {
+ case AMDGPU_MES_PRIORITY_LEVEL_LOW:
+ return AMD_PRIORITY_LEVEL_LOW;
+ case AMDGPU_MES_PRIORITY_LEVEL_NORMAL:
+ default:
+ return AMD_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM:
+ return AMD_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_MES_PRIORITY_LEVEL_HIGH:
+ return AMD_PRIORITY_LEVEL_HIGH;
+ case AMDGPU_MES_PRIORITY_LEVEL_REALTIME:
+ return AMD_PRIORITY_LEVEL_REALTIME;
+ }
+}
+
static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
struct mes_add_queue_input *input)
{
@@ -310,9 +327,9 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.gang_quantum = input->gang_quantum;
mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
mes_add_queue_pkt.inprocess_gang_priority =
- input->inprocess_gang_priority;
+ convert_to_mes_priority_level(input->inprocess_gang_priority);
mes_add_queue_pkt.gang_global_priority_level =
- input->gang_global_priority_level;
+ convert_to_mes_priority_level(input->gang_global_priority_level);
mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_add_queue_pkt.mqd_addr = input->mqd_addr;
@@ -458,31 +475,6 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
return r;
}
-static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
- struct mes_reset_queue_input *input)
-{
- if (input->use_mmio)
- return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
- input->me_id, input->pipe_id,
- input->queue_id, input->vmid);
-
- union MESAPI__RESET mes_reset_queue_pkt;
-
- memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
-
- mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
- mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
- mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
-
- mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
- mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
- /*mes_reset_queue_pkt.reset_queue_only = 1;*/
-
- return mes_v11_0_submit_pkt_and_poll_completion(mes,
- &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
- offsetof(union MESAPI__REMOVE_QUEUE, api_status));
-}
-
static int mes_v11_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -649,7 +641,7 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
break;
case MES_MISC_OP_CHANGE_CONFIG:
if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
- dev_err(mes->adev->dev, "MES FW versoin must be larger than 0x63 to support limit single process feature.\n");
+ dev_err(mes->adev->dev, "MES FW version must be larger than 0x63 to support limit single process feature.\n");
return -EINVAL;
}
misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
@@ -694,7 +686,8 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes->compute_hqd_mask[i];
for (i = 0; i < MAX_GFX_PIPES; i++)
- mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
+ mes_set_hw_res_pkt.gfx_hqd_mask[i] =
+ mes->gfx_hqd_mask[i];
for (i = 0; i < MAX_SDMA_PIPES; i++)
mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
@@ -723,7 +716,7 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes->event_log_gpu_addr;
}
- if (enforce_isolation)
+ if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
mes_set_hw_res_pkt.limit_single_process = 1;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
@@ -753,8 +746,8 @@ static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
}
-static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input)
+static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
{
union MESAPI__RESET mes_reset_queue_pkt;
@@ -772,7 +765,7 @@ static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
- if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ if (input->legacy_gfx) {
mes_reset_queue_pkt.reset_legacy_gfx = 1;
mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
mes_reset_queue_pkt.queue_id_lp = input->queue_id;
@@ -798,7 +791,6 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.suspend_gang = mes_v11_0_suspend_gang,
.resume_gang = mes_v11_0_resume_gang,
.misc_op = mes_v11_0_misc_op,
- .reset_legacy_queue = mes_v11_0_reset_legacy_queue,
.reset_hw_queue = mes_v11_0_reset_hw_queue,
};
@@ -894,6 +886,10 @@ static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
{
int pipe;
+ /* return early if we have already fetched these */
+ if (adev->mes.sched_version && adev->mes.kiq_version)
+ return;
+
/* get MES scheduler/KIQ versions */
mutex_lock(&adev->srbm_mutex);
@@ -1697,22 +1693,10 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int mes_v11_0_late_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- /* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
- (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3)))
- amdgpu_mes_self_test(adev);
-
- return 0;
-}
-
static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.name = "mes_v11_0",
.early_init = mes_v11_0_early_init,
- .late_init = mes_v11_0_late_init,
+ .late_init = NULL,
.sw_init = mes_v11_0_sw_init,
.sw_fini = mes_v11_0_sw_fini,
.hw_init = mes_v11_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 183dd3346da5..b4f17332d466 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -274,6 +274,23 @@ static int convert_to_mes_queue_type(int queue_type)
return -1;
}
+static int convert_to_mes_priority_level(int priority_level)
+{
+ switch (priority_level) {
+ case AMDGPU_MES_PRIORITY_LEVEL_LOW:
+ return AMD_PRIORITY_LEVEL_LOW;
+ case AMDGPU_MES_PRIORITY_LEVEL_NORMAL:
+ default:
+ return AMD_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM:
+ return AMD_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_MES_PRIORITY_LEVEL_HIGH:
+ return AMD_PRIORITY_LEVEL_HIGH;
+ case AMDGPU_MES_PRIORITY_LEVEL_REALTIME:
+ return AMD_PRIORITY_LEVEL_REALTIME;
+ }
+}
+
static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
struct mes_add_queue_input *input)
{
@@ -297,9 +314,9 @@ static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.gang_quantum = input->gang_quantum;
mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
mes_add_queue_pkt.inprocess_gang_priority =
- input->inprocess_gang_priority;
+ convert_to_mes_priority_level(input->inprocess_gang_priority);
mes_add_queue_pkt.gang_global_priority_level =
- input->gang_global_priority_level;
+ convert_to_mes_priority_level(input->gang_global_priority_level);
mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_add_queue_pkt.mqd_addr = input->mqd_addr;
@@ -477,32 +494,6 @@ static int mes_v12_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
return r;
}
-static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
- struct mes_reset_queue_input *input)
-{
- union MESAPI__RESET mes_reset_queue_pkt;
- int pipe;
-
- memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
-
- mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
- mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
- mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
-
- mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
- mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
- /*mes_reset_queue_pkt.reset_queue_only = 1;*/
-
- if (mes->adev->enable_uni_mes)
- pipe = AMDGPU_MES_KIQ_PIPE;
- else
- pipe = AMDGPU_MES_SCHED_PIPE;
-
- return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
- &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
- offsetof(union MESAPI__REMOVE_QUEUE, api_status));
-}
-
static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -762,7 +753,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
pipe * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE);
}
- if (enforce_isolation)
+ if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
mes_set_hw_res_pkt.limit_single_process = 1;
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
@@ -845,8 +836,8 @@ static void mes_v12_0_enable_unmapped_doorbell_handling(
WREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL, data);
}
-static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input)
+static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
{
union MESAPI__RESET mes_reset_queue_pkt;
int pipe;
@@ -865,7 +856,7 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
- if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ if (input->legacy_gfx) {
mes_reset_queue_pkt.reset_legacy_gfx = 1;
mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
mes_reset_queue_pkt.queue_id_lp = input->queue_id;
@@ -878,7 +869,7 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
}
- if (mes->adev->enable_uni_mes)
+ if (input->is_kq)
pipe = AMDGPU_MES_KIQ_PIPE;
else
pipe = AMDGPU_MES_SCHED_PIPE;
@@ -896,7 +887,6 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.suspend_gang = mes_v12_0_suspend_gang,
.resume_gang = mes_v12_0_resume_gang,
.misc_op = mes_v12_0_misc_op,
- .reset_legacy_queue = mes_v12_0_reset_legacy_queue,
.reset_hw_queue = mes_v12_0_reset_hw_queue,
};
@@ -1392,17 +1382,20 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
mes_v12_0_queue_init_register(ring);
}
- /* get MES scheduler/KIQ versions */
- mutex_lock(&adev->srbm_mutex);
- soc21_grbm_select(adev, 3, pipe, 0, 0);
+ if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) ||
+ ((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) {
+ /* get MES scheduler/KIQ versions */
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, 3, pipe, 0, 0);
- if (pipe == AMDGPU_MES_SCHED_PIPE)
- adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
- else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
- adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ if (pipe == AMDGPU_MES_SCHED_PIPE)
+ adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
+ adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
- soc21_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
return 0;
}
@@ -1808,21 +1801,10 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int mes_v12_0_late_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- /* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend)
- amdgpu_mes_self_test(adev);
-
- return 0;
-}
-
static const struct amd_ip_funcs mes_v12_0_ip_funcs = {
.name = "mes_v12_0",
.early_init = mes_v12_0_early_init,
- .late_init = mes_v12_0_late_init,
+ .late_init = NULL,
.sw_init = mes_v12_0_sw_init,
.sw_fini = mes_v12_0_sw_fini,
.hw_init = mes_v12_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 84cde1239ee4..76167fadb292 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -30,6 +30,7 @@
#include "soc15_common.h"
#include "soc15.h"
#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
#define regVM_L2_CNTL3_DEFAULT 0x80100007
#define regVM_L2_CNTL4_DEFAULT 0x000000c1
@@ -192,10 +193,8 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
uint32_t tmp, inst_mask;
int i;
- /* Setup TLB control */
- inst_mask = adev->aid_mask;
- for_each_inst(i, inst_mask) {
- tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+ if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) {
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
1);
@@ -209,7 +208,26 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
- WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL);
+ } else {
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
+ 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC);/* XXX for emulation. */
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
+
+ WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
}
}
@@ -221,6 +239,9 @@ static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev)
uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
inst_mask = adev->aid_mask;
for_each_inst(i, inst_mask) {
for (j = 0; j < 5; j++) { /* DAGB instances */
@@ -454,6 +475,30 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
return 0;
}
+static void mmhub_v1_8_disable_l1_tlb(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ u32 i, inst_mask;
+
+ if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) {
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL);
+ } else {
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
+ 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
+ }
+}
+
static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub;
@@ -467,15 +512,6 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
for (i = 0; i < 16; i++)
WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL,
i * hub->ctx_distance, 0);
-
- /* Setup TLB control */
- tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
- 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- ENABLE_ADVANCED_DRIVER_MODEL, 0);
- WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp);
-
if (!amdgpu_sriov_vf(adev)) {
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL);
@@ -485,6 +521,8 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0);
}
}
+
+ mmhub_v1_8_disable_l1_tlb(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h
new file mode 100644
index 000000000000..6f749814929f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V5_0_H__
+#define __MMSCH_V5_0_H__
+
+#include "amdgpu_vcn.h"
+
+#define MMSCH_VERSION_MAJOR 5
+#define MMSCH_VERSION_MINOR 0
+#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+
+#define RB_ENABLED (1 << 0)
+#define RB4_ENABLED (1 << 1)
+
+#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
+
+#define MMSCH_VF_MAILBOX_RESP__OK 0x1
+#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
+#define MMSCH_VF_MAILBOX_RESP__FAILED 0x3
+#define MMSCH_VF_MAILBOX_RESP__FAILED_SMALL_CTX_SIZE 0x4
+#define MMSCH_VF_MAILBOX_RESP__UNKNOWN_CMD 0x5
+
+enum mmsch_v5_0_command_type {
+ MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
+ MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
+ MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
+ MMSCH_COMMAND__END = 0xf
+};
+
+struct mmsch_v5_0_table_info {
+ uint32_t init_status;
+ uint32_t table_offset;
+ uint32_t table_size;
+};
+
+struct mmsch_v5_0_init_header {
+ uint32_t version;
+ uint32_t total_size;
+ struct mmsch_v5_0_table_info vcn0;
+ struct mmsch_v5_0_table_info mjpegdec0[5];
+ struct mmsch_v5_0_table_info mjpegdec1[5];
+};
+
+struct mmsch_v5_0_cmd_direct_reg_header {
+ uint32_t reg_offset : 28;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v5_0_cmd_indirect_reg_header {
+ uint32_t reg_offset : 20;
+ uint32_t reg_idx_space : 8;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v5_0_cmd_direct_write {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+struct mmsch_v5_0_cmd_direct_read_modify_write {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t write_data;
+ uint32_t mask_value;
+};
+
+struct mmsch_v5_0_cmd_direct_polling {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t mask_value;
+ uint32_t wait_value;
+};
+
+struct mmsch_v5_0_cmd_end {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+};
+
+struct mmsch_v5_0_cmd_indirect_write {
+ struct mmsch_v5_0_cmd_indirect_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+#define MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_read_modify_write); \
+ size_dw = size / 4; \
+ direct_rd_mod_wt.cmd_header.reg_offset = reg; \
+ direct_rd_mod_wt.mask_value = mask; \
+ direct_rd_mod_wt.write_data = data; \
+ memcpy((void *)table_loc, &direct_rd_mod_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_DIRECT_WT(reg, value) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_write); \
+ size_dw = size / 4; \
+ direct_wt.cmd_header.reg_offset = reg; \
+ direct_wt.reg_value = value; \
+ memcpy((void *)table_loc, &direct_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_polling); \
+ size_dw = size / 4; \
+ direct_poll.cmd_header.reg_offset = reg; \
+ direct_poll.mask_value = mask; \
+ direct_poll.wait_value = wait; \
+ memcpy((void *)table_loc, &direct_poll, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_END() { \
+ size = sizeof(struct mmsch_v5_0_cmd_end); \
+ size_dw = size / 4; \
+ memcpy((void *)table_loc, &end, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index f5411b798e11..48101a34e049 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -274,6 +274,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ struct amdgpu_reset_context reset_context = { 0 };
amdgpu_virt_fini_data_exchange(adev);
@@ -281,8 +282,6 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
if (amdgpu_device_should_recover_gpu(adev)
&& (!amdgpu_device_has_job_running(adev) ||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) {
- struct amdgpu_reset_context reset_context;
- memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
@@ -293,6 +292,19 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
}
}
+static void xgpu_ai_mailbox_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_request_bad_pages(adev);
+ amdgpu_virt_init_data_exchange(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -312,26 +324,42 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
- case IDH_FLR_NOTIFICATION:
+ case IDH_RAS_BAD_PAGES_NOTIFICATION:
+ xgpu_ai_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.bad_pages_work);
+ break;
+ case IDH_UNRECOV_ERR_NOTIFICATION:
+ xgpu_ai_mailbox_send_ack(adev);
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
if (amdgpu_sriov_runtime(adev))
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
- &adev->virt.flr_work),
- "Failed to queue work! at %s",
- __func__);
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
break;
- case IDH_QUERY_ALIVE:
- xgpu_ai_mailbox_send_ack(adev);
- break;
- /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
- * it byfar since that polling thread will handle it,
- * other msg like flr complete is not handled here.
- */
- case IDH_CLR_MSG_BUF:
- case IDH_FLR_NOTIFICATION_CMPL:
- case IDH_READY_TO_ACCESS_GPU:
- default:
+ case IDH_FLR_NOTIFICATION:
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
+ break;
+ case IDH_QUERY_ALIVE:
+ xgpu_ai_mailbox_send_ack(adev);
+ break;
+ /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
+ * it byfar since that polling thread will handle it,
+ * other msg like flr complete is not handled here.
+ */
+ case IDH_CLR_MSG_BUF:
+ case IDH_FLR_NOTIFICATION_CMPL:
+ case IDH_READY_TO_ACCESS_GPU:
+ default:
break;
}
@@ -387,6 +415,7 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
+ INIT_WORK(&adev->virt.bad_pages_work, xgpu_ai_mailbox_bad_pages_work);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index ed57cbc150af..874b9f8f9804 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -40,6 +40,7 @@ enum idh_request {
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
IDH_RAS_POISON = 202,
+ IDH_REQ_RAS_BAD_PAGES = 205,
};
enum idh_event {
@@ -54,6 +55,9 @@ enum idh_event {
IDH_RAS_POISON_READY,
IDH_PF_SOFT_FLR_NOTIFICATION,
IDH_RAS_ERROR_DETECTED,
+ IDH_RAS_BAD_PAGES_READY = 15,
+ IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
+ IDH_UNRECOV_ERR_NOTIFICATION = 17,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 5aadf24cb202..f6d8597452ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -67,6 +67,8 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
if (reg == IDH_FAIL)
r = -EINVAL;
+ if (reg == IDH_UNRECOV_ERR_NOTIFICATION)
+ r = -ENODEV;
else if (reg != event)
return -ENOENT;
@@ -103,6 +105,7 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
int r;
uint64_t timeout, now;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
now = (uint64_t)ktime_to_ms(ktime_get());
timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
@@ -110,8 +113,16 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
do {
r = xgpu_nv_mailbox_rcv_msg(adev, event);
if (!r) {
- dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
+ dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n",
+ event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
return 0;
+ } else if (r == -ENODEV) {
+ if (!amdgpu_ras_is_rma(adev)) {
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. "
+ "Runtime Services are halted.\n");
+ }
+ return r;
}
msleep(10);
@@ -166,6 +177,10 @@ static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
enum idh_event event = -1;
send_request:
+
+ if (amdgpu_ras_is_rma(adev))
+ return -ENODEV;
+
xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
switch (req) {
@@ -187,6 +202,9 @@ send_request:
case IDH_REQ_RAS_CPER_DUMP:
event = IDH_RAS_CPER_DUMP_READY;
break;
+ case IDH_REQ_RAS_BAD_PAGES:
+ event = IDH_RAS_BAD_PAGES_READY;
+ break;
default:
break;
}
@@ -320,6 +338,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ struct amdgpu_reset_context reset_context = { 0 };
amdgpu_virt_fini_data_exchange(adev);
@@ -330,8 +349,6 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
- struct amdgpu_reset_context reset_context;
- memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
@@ -342,6 +359,19 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
}
}
+static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_request_bad_pages(adev);
+ amdgpu_virt_init_data_exchange(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -364,8 +394,27 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_NOTIFICATION:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.bad_pages_work);
+ break;
+ case IDH_UNRECOV_ERR_NOTIFICATION:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (!amdgpu_ras_is_rma(adev)) {
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
+ }
+
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
+ break;
case IDH_FLR_NOTIFICATION:
if (amdgpu_sriov_runtime(adev))
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
@@ -436,6 +485,7 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
+ INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work);
return 0;
}
@@ -480,6 +530,11 @@ static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
}
+static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -492,4 +547,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
.req_ras_err_count = xgpu_nv_req_ras_err_count,
.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
+ .req_bad_pages = xgpu_nv_req_ras_bad_pages,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 72c9fceb9d79..5808689562cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -42,6 +42,7 @@ enum idh_request {
IDH_RAS_POISON = 202,
IDH_REQ_RAS_ERROR_COUNT = 203,
IDH_REQ_RAS_CPER_DUMP = 204,
+ IDH_REQ_RAS_BAD_PAGES = 205,
};
enum idh_event {
@@ -58,6 +59,9 @@ enum idh_event {
IDH_RAS_ERROR_DETECTED,
IDH_RAS_ERROR_COUNT_READY = 11,
IDH_RAS_CPER_DUMP_READY = 14,
+ IDH_RAS_BAD_PAGES_READY = 15,
+ IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
+ IDH_UNRECOV_ERR_NOTIFICATION = 17,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
index 2ece3ae75ec1..bed5ef4d8788 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
@@ -360,7 +360,7 @@ static void nbio_v7_11_get_clockgating_state(struct amdgpu_device *adev,
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
-#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
+#define MMIO_REG_HOLE_OFFSET 0x44000
static void nbio_v7_11_set_reg_remap(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index f23cb79110d6..a376f072700d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -177,8 +177,12 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
{
u32 doorbell_range = 0, doorbell_ctrl = 0;
u32 aid_id = instance;
+ u32 range_size;
if (use_doorbell) {
+ range_size = (amdgpu_ip_version(adev, GC_HWIP, 0) ==
+ IP_VERSION(9, 5, 0)) ?
+ 0xb : 0x9;
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_OFFSET_ENTRY,
@@ -186,7 +190,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_SIZE_ENTRY,
- 0x9);
+ range_size);
if (aid_id)
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
@@ -204,7 +208,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x4);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
- S2A_DOORBELL_PORT1_RANGE_SIZE, 0x9);
+ S2A_DOORBELL_PORT1_RANGE_SIZE, range_size);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index bb5dfc410a66..215543575f47 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -533,7 +533,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index fcd708eae75c..80153f837470 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -34,9 +34,6 @@
#include "sdma0/sdma0_4_0_offset.h"
#include "nbio/nbio_7_4_offset.h"
-#include "oss/osssys_4_0_offset.h"
-#include "oss/osssys_4_0_sh_mask.h"
-
MODULE_FIRMWARE("amdgpu/renoir_asd.bin");
MODULE_FIRMWARE("amdgpu/renoir_ta.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_asd.bin");
@@ -99,9 +96,6 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
0x80000000, 0x80000000, false);
@@ -138,8 +132,6 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
0, true);
@@ -147,37 +139,6 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
-static void psp_v12_0_reroute_ih(struct psp_context *psp)
-{
- struct amdgpu_device *adev = psp->adev;
- uint32_t tmp;
-
- /* Change IH ring for VMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-
- /* Change IH ring for UMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-}
-
static int psp_v12_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -186,49 +147,23 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- psp_v12_0_reroute_ih(psp);
-
- if (amdgpu_sriov_vf(psp->adev)) {
- /* Write low address of the ring to C2PMSG_102 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_103 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
-
- /* Write the ring initialization command to C2PMSG_101 */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
- GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
-
- } else {
- /* Write low address of the ring to C2PMSG_69 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_70 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
- /* Write size of ring to C2PMSG_71 */
- psp_ring_reg = ring->ring_size;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
- /* Write the ring initialization command to C2PMSG_64 */
- psp_ring_reg = ring_type;
- psp_ring_reg = psp_ring_reg << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
- }
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
return ret;
}
@@ -247,9 +182,6 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
GFX_CTRL_CMD_ID_DESTROY_RINGS);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index cc621064610f..df612fd9cc50 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -71,20 +71,13 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_4_ta.bin");
/* Retry times for vmbx ready wait */
#define PSP_VMBX_POLLING_LIMIT 3000
-/* VBIOS gfl defines */
-#define MBOX_READY_MASK 0x80000000
-#define MBOX_STATUS_MASK 0x0000FFFF
-#define MBOX_COMMAND_MASK 0x00FF0000
-#define MBOX_READY_FLAG 0x80000000
-#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
-#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
-#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
-
/* memory training timeout define */
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
#define regMP1_PUB_SCRATCH0 0x3b10090
+#define PSP13_BL_STATUS_SIZE 100
+
static int psp_v13_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -151,6 +144,32 @@ static bool psp_v13_0_is_sos_alive(struct psp_context *psp)
return sol_reg != 0x0;
}
+static void psp_v13_0_bootloader_print_status(struct psp_context *psp,
+ const char *msg)
+{
+ struct amdgpu_device *adev = psp->adev;
+ u32 bl_status_reg;
+ char bl_status_msg[PSP13_BL_STATUS_SIZE];
+ int i, at;
+
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
+ at = 0;
+ for_each_inst(i, adev->aid_mask) {
+ bl_status_reg =
+ (SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_92)
+ << 2) +
+ adev->asic_funcs->encode_ext_smn_addressing(i);
+ at += snprintf(bl_status_msg + at,
+ PSP13_BL_STATUS_SIZE - at,
+ " status(%02i): 0x%08x", i,
+ RREG32_PCIE_EXT(bl_status_reg));
+ }
+ dev_info(adev->dev, "%s - %s", msg, bl_status_msg);
+ }
+}
+
static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -196,6 +215,9 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
if (ret == 0)
return 0;
+ if (retry_loop && !(retry_loop % 10))
+ psp_v13_0_bootloader_print_status(
+ psp, "Waiting for bootloader completion");
}
return ret;
@@ -610,7 +632,7 @@ static int psp_v13_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
@@ -710,7 +732,8 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd)
/* Ring the doorbell */
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_73, 1);
- if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE)
+ if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE ||
+ cmd == C2PMSG_CMD_SPI_GET_FLASH_IMAGE)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
@@ -766,6 +789,37 @@ static int psp_v13_0_update_spirom(struct psp_context *psp,
return 0;
}
+static int psp_v13_0_dump_spirom(struct psp_context *psp,
+ uint64_t fw_pri_mc_addr)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ /* Confirm PSP is ready to start */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ if (ret) {
+ dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
+ return ret;
+ }
+
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, lower_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO);
+ if (ret)
+ return ret;
+
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, upper_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI);
+ if (ret)
+ return ret;
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_FLASH_IMAGE);
+
+ return ret;
+}
+
static int psp_v13_0_vbflash_status(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -858,6 +912,25 @@ static bool psp_v13_0_is_reload_needed(struct psp_context *psp)
return false;
}
+static int psp_v13_0_reg_program_no_ring(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret = -EOPNOTSUPP;
+
+ /* PSP will broadcast the value to all instances */
+ if (amdgpu_sriov_vf(adev)) {
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_GBR_IH_SET);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, id);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, val);
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ }
+
+ return ret;
+}
+
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
.wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
@@ -879,11 +952,13 @@ static const struct psp_funcs psp_v13_0_funcs = {
.load_usbc_pd_fw = psp_v13_0_load_usbc_pd_fw,
.read_usbc_pd_fw = psp_v13_0_read_usbc_pd_fw,
.update_spirom = psp_v13_0_update_spirom,
+ .dump_spirom = psp_v13_0_dump_spirom,
.vbflash_stat = psp_v13_0_vbflash_status,
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
.get_ras_capability = psp_v13_0_get_ras_capability,
.is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required,
.is_reload_needed = psp_v13_0_is_reload_needed,
+ .reg_program_no_ring = psp_v13_0_reg_program_no_ring,
};
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index 7c49c3f3c388..256288c6cd78 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -498,7 +498,7 @@ static int psp_v14_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 688a720bbbbd..9c169112a5e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -106,8 +106,9 @@ static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
-static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
+static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring);
static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
u32 instance, u32 offset)
@@ -1333,6 +1334,11 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
}
}
+static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v4_4_2_stop_queue,
+ .start_kernel_queue = &sdma_v4_4_2_restore_queue,
+};
+
static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1351,8 +1357,6 @@ static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
sdma_v4_4_2_set_vm_pte_funcs(adev);
sdma_v4_4_2_set_irq_funcs(adev);
sdma_v4_4_2_set_ras_funcs(adev);
- sdma_v4_4_2_set_engine_reset_funcs(adev);
-
return 0;
}
@@ -1447,6 +1451,7 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
/* Initialize guilty flags for GFX and PAGE queues */
adev->sdma.instance[i].gfx_guilty = false;
adev->sdma.instance[i].page_guilty = false;
+ adev->sdma.instance[i].funcs = &sdma_v4_4_2_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1678,11 +1683,12 @@ static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
-static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_id)
+static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+ u32 instance_id = GET_INST(SDMA0, ring->me);
u32 inst_mask;
uint64_t rptr;
- struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -1715,11 +1721,11 @@ static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_
return 0;
}
-static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instance_id)
+static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
{
- int i;
+ struct amdgpu_device *adev = ring->adev;
u32 inst_mask;
- struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
+ int i;
inst_mask = 1 << ring->me;
udelay(50);
@@ -1739,16 +1745,6 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instan
return sdma_v4_4_2_inst_start(adev, inst_mask, true);
}
-static struct sdma_on_reset_funcs sdma_v4_4_2_engine_reset_funcs = {
- .pre_reset = sdma_v4_4_2_stop_queue,
- .post_reset = sdma_v4_4_2_restore_queue,
-};
-
-static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev)
-{
- amdgpu_sdma_register_on_reset_callbacks(adev, &sdma_v4_4_2_engine_reset_funcs);
-}
-
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -2373,7 +2369,9 @@ static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(9, 5, 0):
- /*TODO: enable the queue reset flag until fw supported */
+ if ((adev->gfx.mec_fw_version >= 0xf) && amdgpu_dpm_reset_sdma_is_supported(adev))
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 0dce59f4f6e2..9505ae96fbec 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -112,6 +112,8 @@ static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
+static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring);
static const struct soc15_reg_golden golden_settings_sdma_5[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
@@ -369,67 +371,36 @@ static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
DRM_DEBUG("Setting write pointer\n");
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- AMDGPU_MES_PRIORITY_LEVEL_NORMAL);
-
+ if (ring->use_doorbell) {
+ DRM_DEBUG("Using doorbell -- "
+ "wptr_offs == 0x%08x "
+ "lower_32_bits(ring->wptr) << 2 == 0x%08x "
+ "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ /* XXX check if swapping is necessary on BE */
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
- *wptr_saved = ring->wptr << 2;
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
-
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index,
- ring->wptr << 2);
- }
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
- if (ring->use_doorbell) {
- DRM_DEBUG("Using doorbell -- "
- "wptr_offs == 0x%08x "
- "lower_32_bits(ring->wptr) << 2 == 0x%08x "
- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
- ring->wptr_offs,
- lower_32_bits(ring->wptr << 2),
- upper_32_bits(ring->wptr << 2));
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("Not using doorbell -- "
- "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
- "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
- ring->me,
- lower_32_bits(ring->wptr << 2),
- ring->me,
- upper_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
- ring->me, mmSDMA0_GFX_RB_WPTR),
- lower_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
- ring->me, mmSDMA0_GFX_RB_WPTR_HI),
- upper_32_bits(ring->wptr << 2));
- }
+ DRM_DEBUG("Not using doorbell -- "
+ "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+ ring->me,
+ lower_32_bits(ring->wptr << 2),
+ ring->me,
+ upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
+ ring->me, mmSDMA0_GFX_RB_WPTR),
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
+ ring->me, mmSDMA0_GFX_RB_WPTR_HI),
+ upper_32_bits(ring->wptr << 2));
}
}
@@ -575,11 +546,9 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -588,15 +557,15 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
* sdma_v5_0_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
- *
+ * @inst_mask: mask of dma engine instances to be disabled
* Stop the gfx async dma ring buffers (NAVI10).
*/
-static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
{
u32 rb_cntl, ib_cntl;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -688,9 +657,11 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
+ uint32_t inst_mask;
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
if (!enable) {
- sdma_v5_0_gfx_stop(adev);
+ sdma_v5_0_gfx_stop(adev, 1 << inst_mask);
sdma_v5_0_rlc_stop(adev);
}
@@ -1046,33 +1017,22 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1085,10 +1045,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -1100,8 +1057,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1124,38 +1080,24 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256,
- AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -1183,10 +1125,7 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1197,8 +1136,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1389,6 +1327,36 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
+static int sdma_v5_0_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id)
+{
+ u32 grbm_soft_reset;
+ u32 tmp;
+
+ grbm_soft_reset = REG_SET_FIELD(0,
+ GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+ 1);
+ grbm_soft_reset <<= instance_id;
+
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ return 0;
+}
+
+static const struct amdgpu_sdma_funcs sdma_v5_0_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v5_0_stop_queue,
+ .start_kernel_queue = &sdma_v5_0_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v5_0_soft_reset_engine,
+};
+
static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1431,6 +1399,7 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.instance[i].funcs = &sdma_v5_0_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -1572,32 +1541,25 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int i, j, r;
- u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+ u32 inst_id = ring->me;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ return amdgpu_sdma_reset_engine(adev, inst_id);
+}
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
- }
+static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)
+{
+ u32 f32_cntl, freeze, cntl, stat1_reg;
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r = 0;
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
+ if (amdgpu_sriov_vf(adev))
return -EINVAL;
- }
+ i = ring->me;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* stop queue */
- ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
-
- rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ sdma_v5_0_gfx_stop(adev, 1 << i);
/* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
@@ -1628,30 +1590,25 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
- /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
- preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
- preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
-
- soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
-
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
-
- udelay(50);
-
- soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 inst_id = ring->me;
+ u32 freeze;
+ int r;
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* unfreeze*/
- freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE));
freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+ WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
- r = sdma_v5_0_gfx_resume_instance(adev, i, true);
-
-err0:
+ r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 2b39a03ff0c1..a6e612b4a892 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -113,6 +113,8 @@ static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev);
+static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring);
static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
{
@@ -394,11 +396,9 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if ((flags & AMDGPU_FENCE_FLAG_INT)) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -407,15 +407,15 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
* sdma_v5_2_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
- *
+ * @inst_mask: mask of dma engine instances to be disabled
* Stop the gfx async dma ring buffers.
*/
-static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
{
u32 rb_cntl, ib_cntl;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -506,9 +506,11 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
+ uint32_t inst_mask;
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
if (!enable) {
- sdma_v5_2_gfx_stop(adev);
+ sdma_v5_2_gfx_stop(adev, inst_mask);
sdma_v5_2_rlc_stop(adev);
}
@@ -761,37 +763,49 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
return 0;
}
-static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block)
+static int sdma_v5_2_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id)
{
- struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset;
u32 tmp;
- int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- grbm_soft_reset = REG_SET_FIELD(0,
- GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
- 1);
- grbm_soft_reset <<= i;
+ grbm_soft_reset = REG_SET_FIELD(0,
+ GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+ 1);
+ grbm_soft_reset <<= instance_id;
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ return 0;
+}
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sdma_v5_2_soft_reset_engine(adev, i);
udelay(50);
}
return 0;
}
+static const struct amdgpu_sdma_funcs sdma_v5_2_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v5_2_stop_queue,
+ .start_kernel_queue = &sdma_v5_2_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v5_2_soft_reset_engine,
+};
+
/**
* sdma_v5_2_start - setup and start the async dma engines
*
@@ -903,33 +917,22 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -942,10 +945,7 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -957,8 +957,7 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -981,37 +980,23 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -1039,10 +1024,7 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1053,8 +1035,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1337,6 +1318,7 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.instance[i].funcs = &sdma_v5_2_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -1472,32 +1454,25 @@ static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int i, j, r;
- u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+ u32 inst_id = ring->me;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ return amdgpu_sdma_reset_engine(adev, inst_id);
+}
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
- }
+static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring)
+{
+ u32 f32_cntl, freeze, cntl, stat1_reg;
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r = 0;
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
+ if (amdgpu_sriov_vf(adev))
return -EINVAL;
- }
+ i = ring->me;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* stop queue */
- ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
-
- rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ sdma_v5_2_gfx_stop(adev, 1 << i);
/*engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
@@ -1530,31 +1505,26 @@ static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
- /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
- preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
- preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
-
- soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
-
-
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
-
- udelay(50);
-
- soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 inst_id = ring->me;
+ u32 freeze;
+ int r;
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* unfreeze and unhalt */
- freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = RREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE));
freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+ WREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
- r = sdma_v5_2_gfx_resume_instance(adev, i, true);
+ r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true);
-err0:
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index c214c3d2149b..da5b5d64f137 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -43,6 +43,7 @@
#include "sdma_common.h"
#include "sdma_v6_0.h"
#include "v11_structs.h"
+#include "mes_userqueue.h"
MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
@@ -376,11 +377,9 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -891,6 +890,9 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_aql_cntl = regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
m->sdmax_rlcx_dummy_reg = regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
+ m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr);
+ m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr);
+
return 0;
}
@@ -917,33 +919,22 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -956,10 +947,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -971,8 +959,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -995,37 +982,23 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
@@ -1053,10 +1026,7 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1067,8 +1037,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1300,6 +1269,23 @@ static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int r;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = true;
+ break;
+ case 1:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = false;
+ break;
+ case 2:
+ adev->sdma.no_user_submission = true;
+ adev->sdma.disable_uq = false;
+ break;
+ }
+
r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r)
return r;
@@ -1334,6 +1320,7 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->me = i;
+ ring->no_user_submission = adev->sdma.no_user_submission;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
@@ -1376,6 +1363,10 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ /* add firmware version checks here */
+ if (0 && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)
return r;
@@ -1399,11 +1390,39 @@ static int sdma_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int sdma_v6_0_set_userq_trap_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int i, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ irq_type = AMDGPU_SDMA_IRQ_INSTANCE0 + i;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->sdma.trap_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->sdma.trap_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static int sdma_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
- return sdma_v6_0_start(adev);
+ r = sdma_v6_0_start(adev);
+ if (r)
+ return r;
+
+ return sdma_v6_0_set_userq_trap_interrupts(adev, true);
}
static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1415,6 +1434,7 @@ static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
sdma_v6_0_ctxempty_int_enable(adev, false);
sdma_v6_0_enable(adev, false);
+ sdma_v6_0_set_userq_trap_interrupts(adev, false);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index b2706221df99..befe013b11a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -42,6 +42,7 @@
#include "sdma_common.h"
#include "sdma_v7_0.h"
#include "v12_structs.h"
+#include "mes_userqueue.h"
MODULE_FIRMWARE("amdgpu/sdma_7_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
@@ -204,66 +205,39 @@ static uint64_t sdma_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
DRM_DEBUG("Setting write pointer\n");
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
+ if (ring->use_doorbell) {
+ DRM_DEBUG("Using doorbell -- "
+ "wptr_offs == 0x%08x "
+ "lower_32_bits(ring->wptr) << 2 == 0x%08x "
+ "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ /* XXX check if swapping is necessary on BE */
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
- *wptr_saved = ring->wptr << 2;
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- }
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
- if (ring->use_doorbell) {
- DRM_DEBUG("Using doorbell -- "
- "wptr_offs == 0x%08x "
- "lower_32_bits(ring->wptr) << 2 == 0x%08x "
- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
- ring->wptr_offs,
- lower_32_bits(ring->wptr << 2),
- upper_32_bits(ring->wptr << 2));
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("Not using doorbell -- "
- "regSDMA%i_GFX_RB_WPTR == 0x%08x "
- "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
- ring->me,
- lower_32_bits(ring->wptr << 2),
- ring->me,
- upper_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
- ring->me,
- regSDMA0_QUEUE0_RB_WPTR),
- lower_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
- ring->me,
- regSDMA0_QUEUE0_RB_WPTR_HI),
- upper_32_bits(ring->wptr << 2));
- }
+ DRM_DEBUG("Not using doorbell -- "
+ "regSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+ ring->me,
+ lower_32_bits(ring->wptr << 2),
+ ring->me,
+ upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
+ ring->me,
+ regSDMA0_QUEUE0_RB_WPTR),
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
+ ring->me,
+ regSDMA0_QUEUE0_RB_WPTR_HI),
+ upper_32_bits(ring->wptr << 2));
}
}
@@ -407,11 +381,9 @@ static void sdma_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -935,6 +907,9 @@ static int sdma_v7_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_aql_cntl = 0x4000; //regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
m->sdmax_rlcx_dummy_reg = 0xf; //regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
+ m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr);
+ m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr);
+
return 0;
}
@@ -961,33 +936,22 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1000,10 +964,7 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -1015,8 +976,7 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1039,37 +999,23 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
@@ -1097,10 +1043,7 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1111,8 +1054,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1312,6 +1254,23 @@ static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int r;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = true;
+ break;
+ case 1:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = false;
+ break;
+ case 2:
+ adev->sdma.no_user_submission = true;
+ adev->sdma.disable_uq = false;
+ break;
+ }
+
r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r) {
DRM_ERROR("Failed to init sdma firmware!\n");
@@ -1347,6 +1306,7 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->me = i;
+ ring->no_user_submission = adev->sdma.no_user_submission;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
@@ -1378,6 +1338,10 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ /* add firmware version checks here */
+ if (0 && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+
return r;
}
@@ -1400,11 +1364,39 @@ static int sdma_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int sdma_v7_0_set_userq_trap_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int i, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ irq_type = AMDGPU_SDMA_IRQ_INSTANCE0 + i;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->sdma.trap_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->sdma.trap_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static int sdma_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
- return sdma_v7_0_start(adev);
+ r = sdma_v7_0_start(adev);
+ if (r)
+ return r;
+
+ return sdma_v7_0_set_userq_trap_interrupts(adev, true);
}
static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1416,6 +1408,7 @@ static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
sdma_v7_0_ctx_switch_enable(adev, false);
sdma_v7_0_enable(adev, false);
+ sdma_v7_0_set_userq_trap_interrupts(adev, false);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 2247f6a94858..e0f139de7991 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -35,6 +35,7 @@
#include "amdgpu_vce.h"
#include "atom.h"
#include "amd_pcie.h"
+
#include "si_dpm.h"
#include "sid.h"
#include "si_ih.h"
@@ -44,17 +45,30 @@
#include "dce_v6_0.h"
#include "si.h"
#include "uvd_v3_1.h"
-#include "amdgpu_vkms.h"
+
+#include "uvd/uvd_4_0_d.h"
+
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
+
#include "gmc/gmc_6_0_d.h"
+#include"gmc/gmc_6_0_sh_mask.h"
+
#include "dce/dce_6_0_d.h"
-#include "uvd/uvd_4_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
+#include "si_enums.h"
#include "amdgpu_dm.h"
+#include "amdgpu_vkms.h"
static const u32 tahiti_golden_registers[] =
{
@@ -1071,8 +1085,8 @@ static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, (reg));
- r = RREG32(SMC_IND_DATA_0);
+ WREG32(mmSMC_IND_INDEX_0, (reg));
+ r = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
@@ -1082,8 +1096,8 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, (reg));
- WREG32(SMC_IND_DATA_0, (v));
+ WREG32(mmSMC_IND_INDEX_0, (reg));
+ WREG32(mmSMC_IND_DATA_0, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
@@ -1110,20 +1124,20 @@ static void si_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
}
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
- {GRBM_STATUS},
+ {mmGRBM_STATUS},
{mmGRBM_STATUS2},
{mmGRBM_STATUS_SE0},
{mmGRBM_STATUS_SE1},
{mmSRBM_STATUS},
{mmSRBM_STATUS2},
- {DMA_STATUS_REG + DMA0_REGISTER_OFFSET},
- {DMA_STATUS_REG + DMA1_REGISTER_OFFSET},
+ {mmDMA_STATUS_REG + DMA0_REGISTER_OFFSET},
+ {mmDMA_STATUS_REG + DMA1_REGISTER_OFFSET},
{mmCP_STAT},
{mmCP_STALLED_STAT1},
{mmCP_STALLED_STAT2},
{mmCP_STALLED_STAT3},
- {GB_ADDR_CONFIG},
- {MC_ARB_RAMCFG},
+ {mmGB_ADDR_CONFIG},
+ {mmMC_ARB_RAMCFG},
{mmGB_TILE_MODE0},
{mmGB_TILE_MODE1},
{mmGB_TILE_MODE2},
@@ -1156,7 +1170,7 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{mmGB_TILE_MODE29},
{mmGB_TILE_MODE30},
{mmGB_TILE_MODE31},
- {CC_RB_BACKEND_DISABLE, true},
+ {mmCC_RB_BACKEND_DISABLE, true},
{mmGC_USER_RB_BACKEND_DISABLE, true},
{mmPA_SC_RASTER_CONFIG, true},
};
@@ -1264,37 +1278,37 @@ static bool si_read_disabled_bios(struct amdgpu_device *adev)
u32 rom_cntl;
bool r;
- bus_cntl = RREG32(R600_BUS_CNTL);
+ bus_cntl = RREG32(mmBUS_CNTL);
if (adev->mode_info.num_crtc) {
- d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
- d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
- vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ d1vga_control = RREG32(mmD1VGA_CONTROL);
+ d2vga_control = RREG32(mmD2VGA_CONTROL);
+ vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
}
rom_cntl = RREG32(R600_ROM_CNTL);
/* enable the rom */
- WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+ WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
if (adev->mode_info.num_crtc) {
/* Disable VGA mode */
- WREG32(AVIVO_D1VGA_CONTROL,
- (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(AVIVO_D2VGA_CONTROL,
- (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(VGA_RENDER_CONTROL,
- (vga_render_control & C_000300_VGA_VSTATUS_CNTL));
+ WREG32(mmD1VGA_CONTROL,
+ (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+ D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+ WREG32(mmD2VGA_CONTROL,
+ (d2vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+ D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+ WREG32(mmVGA_RENDER_CONTROL,
+ (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
}
WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
r = amdgpu_read_bios(adev);
/* restore regs */
- WREG32(R600_BUS_CNTL, bus_cntl);
+ WREG32(mmBUS_CNTL, bus_cntl);
if (adev->mode_info.num_crtc) {
- WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
- WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
- WREG32(VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(mmD1VGA_CONTROL, d1vga_control);
+ WREG32(mmD2VGA_CONTROL, d2vga_control);
+ WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
}
WREG32(R600_ROM_CNTL, rom_cntl);
return r;
@@ -1331,23 +1345,24 @@ static void si_set_clk_bypass_mode(struct amdgpu_device *adev)
{
u32 tmp, i;
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_BYPASS_EN;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
- tmp |= SPLL_CTLREQ_CHG;
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ tmp |= CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL_2, tmp);
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
+ if (RREG32(mmCG_SPLL_STATUS) & CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK)
break;
udelay(1);
}
- tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
- tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ tmp &= ~(CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK |
+ CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK);
+ WREG32(mmCG_SPLL_FUNC_CNTL_2, tmp);
tmp = RREG32(MPLL_CNTL_MODE);
tmp &= ~MPLL_MCLK_SEL;
@@ -1358,21 +1373,21 @@ static void si_spll_powerdown(struct amdgpu_device *adev)
{
u32 tmp;
- tmp = RREG32(SPLL_CNTL_MODE);
- tmp |= SPLL_SW_DIR_CONTROL;
- WREG32(SPLL_CNTL_MODE, tmp);
+ tmp = RREG32(mmSPLL_CNTL_MODE);
+ tmp |= SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK;
+ WREG32(mmSPLL_CNTL_MODE, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_RESET;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_SLEEP;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_SLEEP_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(SPLL_CNTL_MODE);
- tmp &= ~SPLL_SW_DIR_CONTROL;
- WREG32(SPLL_CNTL_MODE, tmp);
+ tmp = RREG32(mmSPLL_CNTL_MODE);
+ tmp &= ~SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK;
+ WREG32(mmSPLL_CNTL_MODE, tmp);
}
static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
@@ -1454,14 +1469,14 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state)
{
uint32_t temp;
- temp = RREG32(CONFIG_CNTL);
+ temp = RREG32(mmCONFIG_CNTL);
if (!state) {
temp &= ~(1<<0);
temp |= (1<<1);
} else {
temp &= ~(1<<1);
}
- WREG32(CONFIG_CNTL, temp);
+ WREG32(mmCONFIG_CNTL, temp);
}
static u32 si_get_xclk(struct amdgpu_device *adev)
@@ -1469,12 +1484,12 @@ static u32 si_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
- tmp = RREG32(CG_CLKPIN_CNTL_2);
- if (tmp & MUX_TCLK_TO_XCLK)
+ tmp = RREG32(mmCG_CLKPIN_CNTL_2);
+ if (tmp & CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK)
return TCLK;
- tmp = RREG32(CG_CLKPIN_CNTL);
- if (tmp & XTALIN_DIVIDE)
+ tmp = RREG32(mmCG_CLKPIN_CNTL);
+ if (tmp & CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK)
return reference_clock / 4;
return reference_clock;
@@ -1519,9 +1534,9 @@ static int si_get_pcie_lanes(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return 0;
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
- switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
+ switch ((link_width_cntl & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT) {
case LC_LINK_WIDTH_X1:
return 1;
case LC_LINK_WIDTH_X2:
@@ -1568,13 +1583,13 @@ static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
return;
}
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- link_width_cntl &= ~LC_LINK_WIDTH_MASK;
- link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
- link_width_cntl |= (LC_RECONFIG_NOW |
- LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK;
+ link_width_cntl |= mask << PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT;
+ link_width_cntl |= (PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK |
+ PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE_MASK);
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
static void si_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
@@ -2018,7 +2033,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
{
- return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+ return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
@@ -2239,9 +2254,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
return;
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
- LC_CURRENT_DATA_RATE_SHIFT;
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >>
+ PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
@@ -2268,17 +2283,17 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
- tmp = RREG32_PCIE(PCIE_LC_STATUS1);
- max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
- current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+ tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >> PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT;
+ current_lw = (tmp & PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK) >> PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT;
if (current_lw < max_lw) {
- tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- if (tmp & LC_RENEGOTIATION_SUPPORT) {
- tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
- tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
- tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ if (tmp & PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK) {
+ tmp &= ~(PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK);
+ tmp |= (max_lw << PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT);
+ tmp |= PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, tmp);
}
}
@@ -2301,13 +2316,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
PCI_EXP_LNKCTL2,
&gpu_cfg2);
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp |= LC_SET_QUIESCE;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp |= LC_REDO_EQ;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
mdelay(100);
@@ -2333,16 +2348,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
(PCI_EXP_LNKCTL2_ENTER_COMP |
PCI_EXP_LNKCTL2_TX_MARGIN));
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp &= ~LC_SET_QUIESCE;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
}
}
}
- speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
- speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ speed_cntl |= PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK | PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK;
+ speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL, speed_cntl);
tmp16 = 0;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -2354,13 +2369,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2,
PCI_EXP_LNKCTL2_TLS, tmp16);
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL, speed_cntl);
for (i = 0; i < adev->usec_timeout; i++) {
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK) == 0)
break;
udelay(1);
}
@@ -2418,121 +2433,121 @@ static void si_program_aspm(struct amdgpu_device *adev)
if (!amdgpu_device_should_use_aspm(adev))
return;
- orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
- data &= ~LC_XMIT_N_FTS_MASK;
- data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL);
+ data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
+ data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) | PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL, data);
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
- data |= LC_GO_TO_RECOVERY;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL3, data);
- orig = data = RREG32_PCIE(PCIE_P_CNTL);
- data |= P_IGNORE_EDB_ERR;
+ orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
+ data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
if (orig != data)
- WREG32_PCIE(PCIE_P_CNTL, data);
+ WREG32_PCIE(ixPCIE_P_CNTL, data);
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
- data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
- data |= LC_PMI_TO_L1_DIS;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL);
+ data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK | PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (!disable_l0s)
- data |= LC_L0S_INACTIVITY(7);
+ data |= (7 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT);
if (!disable_l1) {
- data |= LC_L1_INACTIVITY(7);
- data &= ~LC_PMI_TO_L1_DIS;
+ data |= (7 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT);
+ data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
if (!disable_plloff_in_l1) {
bool clk_req_support;
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_0);
+ data &= ~(PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_1);
+ data &= ~(PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_0);
+ data &= ~(PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_1);
+ data &= ~(PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_1, data);
if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_0);
+ data &= ~PB0_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_1);
+ data &= ~PB0_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2);
- data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_2);
+ data &= ~PB0_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_2, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3);
- data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_3);
+ data &= ~PB0_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_3, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_0);
+ data &= ~PB1_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_1);
+ data &= ~PB1_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2);
- data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_2);
+ data &= ~PB1_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_2, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3);
- data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_3);
+ data &= ~PB1_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_3, data);
}
- orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- data &= ~LC_DYN_LANES_PWR_STATE_MASK;
- data |= LC_DYN_LANES_PWR_STATE(3);
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ data &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
+ data |= (3 << PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT);
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_CNTL);
+ data &= ~PB0_PIF_CNTL__LS2_EXIT_TIME_MASK;
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
- data |= LS2_EXIT_TIME(5);
+ data |= (5 << PB0_PIF_CNTL__LS2_EXIT_TIME__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_CNTL, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_CNTL);
+ data &= ~PB1_PIF_CNTL__LS2_EXIT_TIME_MASK;
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
- data |= LS2_EXIT_TIME(5);
+ data |= (5 << PB1_PIF_CNTL__LS2_EXIT_TIME__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_CNTL, data);
if (!disable_clkreq &&
!pci_is_root_bus(adev->pdev->bus)) {
@@ -2548,64 +2563,64 @@ static void si_program_aspm(struct amdgpu_device *adev)
}
if (clk_req_support) {
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
- data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL2);
+ data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL2, data);
- orig = data = RREG32(THM_CLK_CNTL);
- data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
- data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+ orig = data = RREG32(mmTHM_CLK_CNTL);
+ data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
+ data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) | (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
if (orig != data)
- WREG32(THM_CLK_CNTL, data);
+ WREG32(mmTHM_CLK_CNTL, data);
- orig = data = RREG32(MISC_CLK_CNTL);
- data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
- data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+ orig = data = RREG32(mmMISC_CLK_CNTL);
+ data &= ~(MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK | MISC_CLK_CNTL__ZCLK_SEL_MASK);
+ data |= (1 << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT) | (1 << MISC_CLK_CNTL__ZCLK_SEL__SHIFT);
if (orig != data)
- WREG32(MISC_CLK_CNTL, data);
+ WREG32(mmMISC_CLK_CNTL, data);
- orig = data = RREG32(CG_CLKPIN_CNTL);
- data &= ~BCLK_AS_XCLK;
+ orig = data = RREG32(mmCG_CLKPIN_CNTL);
+ data &= ~CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK;
if (orig != data)
- WREG32(CG_CLKPIN_CNTL, data);
+ WREG32(mmCG_CLKPIN_CNTL, data);
- orig = data = RREG32(CG_CLKPIN_CNTL_2);
- data &= ~FORCE_BIF_REFCLK_EN;
+ orig = data = RREG32(mmCG_CLKPIN_CNTL_2);
+ data &= ~CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK;
if (orig != data)
- WREG32(CG_CLKPIN_CNTL_2, data);
+ WREG32(mmCG_CLKPIN_CNTL_2, data);
- orig = data = RREG32(MPLL_BYPASSCLK_SEL);
- data &= ~MPLL_CLKOUT_SEL_MASK;
- data |= MPLL_CLKOUT_SEL(4);
+ orig = data = RREG32(mmMPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
+ data |= 4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT;
if (orig != data)
- WREG32(MPLL_BYPASSCLK_SEL, data);
+ WREG32(mmMPLL_BYPASSCLK_SEL, data);
- orig = data = RREG32(SPLL_CNTL_MODE);
- data &= ~SPLL_REFCLK_SEL_MASK;
+ orig = data = RREG32(mmSPLL_CNTL_MODE);
+ data &= ~SPLL_CNTL_MODE__SPLL_REFCLK_SEL_MASK;
if (orig != data)
- WREG32(SPLL_CNTL_MODE, data);
+ WREG32(mmSPLL_CNTL_MODE, data);
}
}
} else {
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
}
- orig = data = RREG32_PCIE(PCIE_CNTL2);
- data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+ orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+ data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | PCIE_CNTL2__MST_MEM_LS_EN_MASK | PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
if (orig != data)
- WREG32_PCIE(PCIE_CNTL2, data);
+ WREG32_PCIE(ixPCIE_CNTL2, data);
if (!disable_l0s) {
- data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
- if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
- data = RREG32_PCIE(PCIE_LC_STATUS1);
- if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
- data &= ~LC_L0S_INACTIVITY_MASK;
+ data = RREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL);
+ if((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) {
+ data = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ if ((data & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK) && (data & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK)) {
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index e2089c8da71b..7f18e4875287 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -27,6 +27,8 @@
#include "si.h"
#include "sid.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{
DMA0_REGISTER_OFFSET,
@@ -38,17 +40,31 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
+/**
+ * si_dma_ring_get_rptr - get the current read pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current rptr from the hardware (SI).
+ */
static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
{
return *ring->rptr_cpu_addr;
}
+/**
+ * si_dma_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (SI).
+ */
static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+ return (RREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
}
static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
@@ -56,7 +72,7 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
}
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
@@ -117,9 +133,9 @@ static void si_dma_stop(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
/* dma0 */
- rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+ rb_cntl = RREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i]);
+ rb_cntl &= ~DMA_GFX_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
}
}
@@ -133,44 +149,44 @@ static int si_dma_start(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
- WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+ WREG32(mmDMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
+ WREG32(mmDMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN
- rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+ rb_cntl |= DMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK | DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
#endif
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
/* Initialize the ring buffer's read and write pointers */
- WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
- WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmDMA_GFX_RB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[i], 0);
rptr_addr = ring->rptr_gpu_addr;
- WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
- WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
+ WREG32(mmDMA_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
+ WREG32(mmDMA_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
- rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+ rb_cntl |= DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
- WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+ WREG32(mmDMA_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
/* enable DMA IBs */
- ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+ ib_cntl = DMA_GFX_IB_CNTL__IB_ENABLE_MASK | DMA_GFX_IB_CNTL__CMD_VMID_FORCE_MASK;
#ifdef __BIG_ENDIAN
- ib_cntl |= DMA_IB_SWAP_ENABLE;
+ ib_cntl |= DMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
#endif
- WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
+ WREG32(mmDMA_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
- dma_cntl &= ~CTXEMPTY_INT_ENABLE;
- WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
+ dma_cntl = RREG32(mmDMA_CNTL + sdma_offsets[i]);
+ dma_cntl &= ~DMA_CNTL__CTXEMPTY_INT_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + sdma_offsets[i], dma_cntl);
ring->wptr = 0;
- WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_GFX_RB_CNTL__RB_ENABLE_MASK);
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -461,7 +477,7 @@ static int si_dma_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- adev->sdma.num_instances = 2;
+ adev->sdma.num_instances = SDMA_MAX_INSTANCE;
si_dma_set_ring_funcs(adev);
si_dma_set_buffer_funcs(adev);
@@ -545,9 +561,9 @@ static bool si_dma_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- u32 tmp = RREG32(SRBM_STATUS2);
+ u32 tmp = RREG32(mmSRBM_STATUS2);
- if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
+ if (tmp & (SRBM_STATUS2__DMA_BUSY_MASK | SRBM_STATUS2__DMA1_BUSY_MASK))
return false;
return true;
@@ -583,14 +599,14 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
- sdma_cntl &= ~TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl &= ~DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
- sdma_cntl |= TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl |= DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
@@ -599,14 +615,14 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
- sdma_cntl &= ~TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl &= ~DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
- sdma_cntl |= TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl |= DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
@@ -645,11 +661,11 @@ static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
offset = DMA0_REGISTER_OFFSET;
else
offset = DMA1_REGISTER_OFFSET;
- orig = data = RREG32(DMA_POWER_CNTL + offset);
- data &= ~MEM_POWER_OVERRIDE;
+ orig = data = RREG32(mmDMA_POWER_CNTL + offset);
+ data &= ~DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (data != orig)
- WREG32(DMA_POWER_CNTL + offset, data);
- WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+ WREG32(mmDMA_POWER_CNTL + offset, data);
+ WREG32(mmDMA_CLK_CTRL + offset, 0x00000100);
}
} else {
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -657,15 +673,15 @@ static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
offset = DMA0_REGISTER_OFFSET;
else
offset = DMA1_REGISTER_OFFSET;
- orig = data = RREG32(DMA_POWER_CNTL + offset);
- data |= MEM_POWER_OVERRIDE;
+ orig = data = RREG32(mmDMA_POWER_CNTL + offset);
+ data |= DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (data != orig)
- WREG32(DMA_POWER_CNTL + offset, data);
+ WREG32(mmDMA_POWER_CNTL + offset, data);
- orig = data = RREG32(DMA_CLK_CTRL + offset);
+ orig = data = RREG32(mmDMA_CLK_CTRL + offset);
data = 0xff000000;
if (data != orig)
- WREG32(DMA_CLK_CTRL + offset, data);
+ WREG32(mmDMA_CLK_CTRL + offset, data);
}
}
@@ -679,11 +695,11 @@ static int si_dma_set_powergating_state(struct amdgpu_ip_block *ip_block,
struct amdgpu_device *adev = ip_block->adev;
- WREG32(DMA_PGFSM_WRITE, 0x00002000);
- WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
+ WREG32(mmDMA_PGFSM_WRITE, 0x00002000);
+ WREG32(mmDMA_PGFSM_CONFIG, 0x100010ff);
for (tmp = 0; tmp < 5; tmp++)
- WREG32(DMA_PGFSM_WRITE, 0);
+ WREG32(mmDMA_PGFSM_WRITE, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
index d656ef1fa6e1..6da65778292b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_enums.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
@@ -23,115 +23,15 @@
#ifndef SI_ENUMS_H
#define SI_ENUMS_H
-#define VBLANK_INT_MASK (1 << 0)
-#define DC_HPDx_INT_EN (1 << 16)
-#define VBLANK_ACK (1 << 4)
-#define VLINE_ACK (1 << 4)
-
-#define CURSOR_WIDTH 64
-#define CURSOR_HEIGHT 64
-
-#define VGA_VSTATUS_CNTL 0xFFFCFFFF
#define PRIORITY_MARK_MASK 0x7fff
#define PRIORITY_OFF (1 << 16)
#define PRIORITY_ALWAYS_ON (1 << 20)
-#define INTERLEAVE_EN (1 << 0)
-
-#define LATENCY_WATERMARK_MASK(x) ((x) << 16)
-#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
-#define ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
-
-#define GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
-#define GRPH_ENDIAN_NONE 0
-#define GRPH_ENDIAN_8IN16 1
-#define GRPH_ENDIAN_8IN32 2
-#define GRPH_ENDIAN_8IN64 3
-#define GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
-#define GRPH_RED_SEL_R 0
-#define GRPH_RED_SEL_G 1
-#define GRPH_RED_SEL_B 2
-#define GRPH_RED_SEL_A 3
-#define GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
-#define GRPH_GREEN_SEL_G 0
-#define GRPH_GREEN_SEL_B 1
-#define GRPH_GREEN_SEL_A 2
-#define GRPH_GREEN_SEL_R 3
-#define GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
-#define GRPH_BLUE_SEL_B 0
-#define GRPH_BLUE_SEL_A 1
-#define GRPH_BLUE_SEL_R 2
-#define GRPH_BLUE_SEL_G 3
-#define GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
-#define GRPH_ALPHA_SEL_A 0
-#define GRPH_ALPHA_SEL_R 1
-#define GRPH_ALPHA_SEL_G 2
-#define GRPH_ALPHA_SEL_B 3
-
-#define GRPH_DEPTH(x) (((x) & 0x3) << 0)
-#define GRPH_DEPTH_8BPP 0
-#define GRPH_DEPTH_16BPP 1
-#define GRPH_DEPTH_32BPP 2
-
-#define GRPH_FORMAT(x) (((x) & 0x7) << 8)
-#define GRPH_FORMAT_INDEXED 0
-#define GRPH_FORMAT_ARGB1555 0
-#define GRPH_FORMAT_ARGB565 1
-#define GRPH_FORMAT_ARGB4444 2
-#define GRPH_FORMAT_AI88 3
-#define GRPH_FORMAT_MONO16 4
-#define GRPH_FORMAT_BGRA5551 5
-#define GRPH_FORMAT_ARGB8888 0
-#define GRPH_FORMAT_ARGB2101010 1
-#define GRPH_FORMAT_32BPP_DIG 2
-#define GRPH_FORMAT_8B_ARGB2101010 3
-#define GRPH_FORMAT_BGRA1010102 4
-#define GRPH_FORMAT_8B_BGRA1010102 5
-#define GRPH_FORMAT_RGB111110 6
-#define GRPH_FORMAT_BGR101111 7
-
-#define GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
-#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define GRPH_ARRAY_LINEAR_GENERAL 0
-#define GRPH_ARRAY_LINEAR_ALIGNED 1
-#define GRPH_ARRAY_1D_TILED_THIN1 2
-#define GRPH_ARRAY_2D_TILED_THIN1 4
-#define GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
-#define GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
-#define GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
-#define GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
-#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
-
-#define CURSOR_EN (1 << 0)
-#define CURSOR_MODE(x) (((x) & 0x3) << 8)
-#define CURSOR_MONO 0
-#define CURSOR_24_1 1
-#define CURSOR_24_8_PRE_MULT 2
-#define CURSOR_24_8_UNPRE_MULT 3
-#define CURSOR_2X_MAGNIFY (1 << 16)
-#define CURSOR_FORCE_MC_ON (1 << 20)
-#define CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
-#define CURSOR_URGENT_ALWAYS 0
-#define CURSOR_URGENT_1_8 1
-#define CURSOR_URGENT_1_4 2
-#define CURSOR_URGENT_3_8 3
-#define CURSOR_URGENT_1_2 4
-#define CURSOR_UPDATE_PENDING (1 << 0)
-#define CURSOR_UPDATE_TAKEN (1 << 1)
-#define CURSOR_UPDATE_LOCK (1 << 16)
-#define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
-
-
-#define ES_AND_GS_AUTO 3
-#define RADEON_PACKET_TYPE3 3
-#define CE_PARTITION_BASE 3
-#define BUF_SWAP_32BIT (2 << 16)
#define GFX_POWER_STATUS (1 << 1)
#define GFX_CLOCK_STATUS (1 << 2)
#define GFX_LS_STATUS (1 << 3)
-#define RLC_BUSY_STATUS (1 << 0)
+#define RLC_BUSY_STATUS (1 << 0)
#define RLC_PUD(x) ((x) << 0)
#define RLC_PUD_MASK (0xff << 0)
#define RLC_PDD(x) ((x) << 8)
@@ -140,140 +40,8 @@
#define RLC_TTPD_MASK (0xff << 16)
#define RLC_MSD(x) ((x) << 24)
#define RLC_MSD_MASK (0xff << 24)
-#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
-#define WRITE_DATA_DST_SEL(x) ((x) << 8)
-#define EVENT_TYPE(x) ((x) << 0)
-#define EVENT_INDEX(x) ((x) << 8)
-#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
-#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
-#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
-#define GFX6_NUM_GFX_RINGS 1
-#define GFX6_NUM_COMPUTE_RINGS 2
#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
-#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
- (((op) & 0xFF) << 8) | \
- ((n) & 0x3FFF) << 16)
-#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
-#define PACKET3_NOP 0x10
-#define PACKET3_SET_BASE 0x11
-#define PACKET3_BASE_INDEX(x) ((x) << 0)
-#define PACKET3_CLEAR_STATE 0x12
-#define PACKET3_INDEX_BUFFER_SIZE 0x13
-#define PACKET3_DISPATCH_DIRECT 0x15
-#define PACKET3_DISPATCH_INDIRECT 0x16
-#define PACKET3_ALLOC_GDS 0x1B
-#define PACKET3_WRITE_GDS_RAM 0x1C
-#define PACKET3_ATOMIC_GDS 0x1D
-#define PACKET3_ATOMIC 0x1E
-#define PACKET3_OCCLUSION_QUERY 0x1F
-#define PACKET3_SET_PREDICATION 0x20
-#define PACKET3_REG_RMW 0x21
-#define PACKET3_COND_EXEC 0x22
-#define PACKET3_PRED_EXEC 0x23
-#define PACKET3_DRAW_INDIRECT 0x24
-#define PACKET3_DRAW_INDEX_INDIRECT 0x25
-#define PACKET3_INDEX_BASE 0x26
-#define PACKET3_DRAW_INDEX_2 0x27
-#define PACKET3_CONTEXT_CONTROL 0x28
-#define PACKET3_INDEX_TYPE 0x2A
-#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
-#define PACKET3_DRAW_INDEX_AUTO 0x2D
-#define PACKET3_DRAW_INDEX_IMMD 0x2E
-#define PACKET3_NUM_INSTANCES 0x2F
-#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
-#define PACKET3_INDIRECT_BUFFER_CONST 0x31
-#define PACKET3_INDIRECT_BUFFER 0x3F
-#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
-#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
-#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
-#define PACKET3_WRITE_DATA 0x37
-#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
-#define PACKET3_MEM_SEMAPHORE 0x39
-#define PACKET3_MPEG_INDEX 0x3A
-#define PACKET3_COPY_DW 0x3B
-#define PACKET3_WAIT_REG_MEM 0x3C
-#define PACKET3_MEM_WRITE 0x3D
-#define PACKET3_COPY_DATA 0x40
-#define PACKET3_CP_DMA 0x41
-# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
-# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
-# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
-# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
-# define PACKET3_CP_DMA_DIS_WC (1 << 21)
-# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
-# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
-# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
-# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
-# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
-# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
-# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_PFP_SYNC_ME 0x42
-#define PACKET3_SURFACE_SYNC 0x43
-# define PACKET3_DEST_BASE_0_ENA (1 << 0)
-# define PACKET3_DEST_BASE_1_ENA (1 << 1)
-# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
-# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
-# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
-# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
-# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
-# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
-# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
-# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
-# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
-# define PACKET3_DEST_BASE_2_ENA (1 << 19)
-# define PACKET3_DEST_BASE_3_ENA (1 << 21)
-# define PACKET3_TCL1_ACTION_ENA (1 << 22)
-# define PACKET3_TC_ACTION_ENA (1 << 23)
-# define PACKET3_CB_ACTION_ENA (1 << 25)
-# define PACKET3_DB_ACTION_ENA (1 << 26)
-# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
-# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
-#define PACKET3_ME_INITIALIZE 0x44
-#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
-#define PACKET3_COND_WRITE 0x45
-#define PACKET3_EVENT_WRITE 0x46
-#define PACKET3_EVENT_WRITE_EOP 0x47
-#define PACKET3_EVENT_WRITE_EOS 0x48
-#define PACKET3_PREAMBLE_CNTL 0x4A
-# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
-# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
-#define PACKET3_ONE_REG_WRITE 0x57
-#define PACKET3_LOAD_CONFIG_REG 0x5F
-#define PACKET3_LOAD_CONTEXT_REG 0x60
-#define PACKET3_LOAD_SH_REG 0x61
-#define PACKET3_SET_CONFIG_REG 0x68
-#define PACKET3_SET_CONFIG_REG_START 0x00002000
-#define PACKET3_SET_CONFIG_REG_END 0x00002c00
-#define PACKET3_SET_CONTEXT_REG 0x69
-#define PACKET3_SET_CONTEXT_REG_START 0x000a000
-#define PACKET3_SET_CONTEXT_REG_END 0x000a400
-#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
-#define PACKET3_SET_RESOURCE_INDIRECT 0x74
-#define PACKET3_SET_SH_REG 0x76
-#define PACKET3_SET_SH_REG_START 0x00002c00
-#define PACKET3_SET_SH_REG_END 0x00003000
-#define PACKET3_SET_SH_REG_OFFSET 0x77
-#define PACKET3_ME_WRITE 0x7A
-#define PACKET3_SCRATCH_RAM_WRITE 0x7D
-#define PACKET3_SCRATCH_RAM_READ 0x7E
-#define PACKET3_CE_WRITE 0x7F
-#define PACKET3_LOAD_CONST_RAM 0x80
-#define PACKET3_WRITE_CONST_RAM 0x81
-#define PACKET3_WRITE_CONST_RAM_OFFSET 0x82
-#define PACKET3_DUMP_CONST_RAM 0x83
-#define PACKET3_INCREMENT_CE_COUNTER 0x84
-#define PACKET3_INCREMENT_DE_COUNTER 0x85
-#define PACKET3_WAIT_ON_CE_COUNTER 0x86
-#define PACKET3_WAIT_ON_DE_COUNTER 0x87
-#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
-#define PACKET3_SET_CE_DE_COUNTERS 0x89
-#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
-#define PACKET3_SWITCH_BUFFER 0x8B
-#define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
-#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
-#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 5c38e1fb1dca..1df00f8a2406 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -27,6 +27,7 @@
#include "amdgpu_ih.h"
#include "sid.h"
#include "si_ih.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
@@ -213,7 +214,7 @@ static int si_ih_resume(struct amdgpu_ip_block *ip_block)
static bool si_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- u32 tmp = RREG32(SRBM_STATUS);
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
return false;
@@ -239,23 +240,23 @@ static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
- u32 tmp = RREG32(SRBM_STATUS);
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
if (srbm_soft_reset) {
- tmp = RREG32(SRBM_SOFT_RESET);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ dev_info(adev->dev, "mmSRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h
index cbf232f5235b..cbd4f8951cfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sid.h
+++ b/drivers/gpu/drm/amd/amdgpu/sid.h
@@ -24,43 +24,12 @@
#ifndef SI_H
#define SI_H
-#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
-
-#define SI_MAX_SH_GPRS 256
-#define SI_MAX_TEMP_GPRS 16
-#define SI_MAX_SH_THREADS 256
-#define SI_MAX_SH_STACK_ENTRIES 4096
-#define SI_MAX_FRC_EOV_CNT 16384
-#define SI_MAX_BACKENDS 8
-#define SI_MAX_BACKENDS_MASK 0xFF
-#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F
-#define SI_MAX_SIMDS 12
-#define SI_MAX_SIMDS_MASK 0x0FFF
-#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF
-#define SI_MAX_PIPES 8
-#define SI_MAX_PIPES_MASK 0xFF
-#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F
-#define SI_MAX_LDS_NUM 0xFFFF
-#define SI_MAX_TCC 16
-#define SI_MAX_TCC_MASK 0xFFFF
#define SI_MAX_CTLACKS_ASSERTION_WAIT 100
-/* SMC IND accessor regs */
-#define SMC_IND_INDEX_0 0x80
-#define SMC_IND_DATA_0 0x81
-
-#define SMC_IND_ACCESS_CNTL 0x8A
-# define AUTO_INCREMENT_IND_0 (1 << 0)
-#define SMC_MESSAGE_0 0x8B
-#define SMC_RESP_0 0x8C
-
/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
#define SMC_CG_IND_START 0xc0030000
#define SMC_CG_IND_END 0xc0040000
-#define CG_CGTT_LOCAL_0 0x400
-#define CG_CGTT_LOCAL_1 0x401
-
/* SMC IND registers */
#define SMC_SYSCON_RESET_CNTL 0x80000000
# define RST_REG (1 << 0)
@@ -68,9 +37,6 @@
# define CK_DISABLE (1 << 0)
# define CKEN (1 << 24)
-#define VGA_HDP_CONTROL 0xCA
-#define VGA_MEMORY_DISABLE (1 << 4)
-
#define DCCG_DISP_SLOW_SELECT_REG 0x13F
#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
@@ -79,47 +45,6 @@
#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
-#define CG_SPLL_FUNC_CNTL 0x180
-#define SPLL_RESET (1 << 0)
-#define SPLL_SLEEP (1 << 1)
-#define SPLL_BYPASS_EN (1 << 3)
-#define SPLL_REF_DIV(x) ((x) << 4)
-#define SPLL_REF_DIV_MASK (0x3f << 4)
-#define SPLL_PDIV_A(x) ((x) << 20)
-#define SPLL_PDIV_A_MASK (0x7f << 20)
-#define SPLL_PDIV_A_SHIFT 20
-#define CG_SPLL_FUNC_CNTL_2 0x181
-#define SCLK_MUX_SEL(x) ((x) << 0)
-#define SCLK_MUX_SEL_MASK (0x1ff << 0)
-#define SPLL_CTLREQ_CHG (1 << 23)
-#define SCLK_MUX_UPDATE (1 << 26)
-#define CG_SPLL_FUNC_CNTL_3 0x182
-#define SPLL_FB_DIV(x) ((x) << 0)
-#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
-#define SPLL_FB_DIV_SHIFT 0
-#define SPLL_DITHEN (1 << 28)
-#define CG_SPLL_FUNC_CNTL_4 0x183
-
-#define SPLL_STATUS 0x185
-#define SPLL_CHG_STATUS (1 << 1)
-#define SPLL_CNTL_MODE 0x186
-#define SPLL_SW_DIR_CONTROL (1 << 0)
-# define SPLL_REFCLK_SEL(x) ((x) << 26)
-# define SPLL_REFCLK_SEL_MASK (3 << 26)
-
-#define CG_SPLL_SPREAD_SPECTRUM 0x188
-#define SSEN (1 << 0)
-#define CLK_S(x) ((x) << 4)
-#define CLK_S_MASK (0xfff << 4)
-#define CLK_S_SHIFT 4
-#define CG_SPLL_SPREAD_SPECTRUM_2 0x189
-#define CLK_V(x) ((x) << 0)
-#define CLK_V_MASK (0x3ffffff << 0)
-#define CLK_V_SHIFT 0
-
-#define CG_SPLL_AUTOSCALE_CNTL 0x18b
-# define AUTOSCALE_ON_SS_CLEAR (1 << 9)
-
/* discrete uvd clocks */
#define CG_UPLL_FUNC_CNTL 0x18d
# define UPLL_RESET_MASK 0x00000001
@@ -149,317 +74,13 @@
#define CG_UPLL_SPREAD_SPECTRUM 0x194
# define SSEN_MASK 0x00000001
-#define MPLL_BYPASSCLK_SEL 0x197
-# define MPLL_CLKOUT_SEL(x) ((x) << 8)
-# define MPLL_CLKOUT_SEL_MASK 0xFF00
-
-#define CG_CLKPIN_CNTL 0x198
-# define XTALIN_DIVIDE (1 << 1)
-# define BCLK_AS_XCLK (1 << 2)
-#define CG_CLKPIN_CNTL_2 0x199
-# define FORCE_BIF_REFCLK_EN (1 << 3)
-# define MUX_TCLK_TO_XCLK (1 << 8)
-
-#define THM_CLK_CNTL 0x19b
-# define CMON_CLK_SEL(x) ((x) << 0)
-# define CMON_CLK_SEL_MASK 0xFF
-# define TMON_CLK_SEL(x) ((x) << 8)
-# define TMON_CLK_SEL_MASK 0xFF00
-#define MISC_CLK_CNTL 0x19c
-# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
-# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
-# define ZCLK_SEL(x) ((x) << 8)
-# define ZCLK_SEL_MASK 0xFF00
-
-#define CG_THERMAL_CTRL 0x1c0
-#define DPM_EVENT_SRC(x) ((x) << 0)
-#define DPM_EVENT_SRC_MASK (7 << 0)
-#define DIG_THERM_DPM(x) ((x) << 14)
-#define DIG_THERM_DPM_MASK 0x003FC000
-#define DIG_THERM_DPM_SHIFT 14
-#define CG_THERMAL_STATUS 0x1c1
-#define FDO_PWM_DUTY(x) ((x) << 9)
-#define FDO_PWM_DUTY_MASK (0xff << 9)
-#define FDO_PWM_DUTY_SHIFT 9
-#define CG_THERMAL_INT 0x1c2
-#define DIG_THERM_INTH(x) ((x) << 8)
-#define DIG_THERM_INTH_MASK 0x0000FF00
-#define DIG_THERM_INTH_SHIFT 8
-#define DIG_THERM_INTL(x) ((x) << 16)
-#define DIG_THERM_INTL_MASK 0x00FF0000
-#define DIG_THERM_INTL_SHIFT 16
-#define THERM_INT_MASK_HIGH (1 << 24)
-#define THERM_INT_MASK_LOW (1 << 25)
-
-#define CG_MULT_THERMAL_CTRL 0x1c4
-#define TEMP_SEL(x) ((x) << 20)
-#define TEMP_SEL_MASK (0xff << 20)
-#define TEMP_SEL_SHIFT 20
-#define CG_MULT_THERMAL_STATUS 0x1c5
-#define ASIC_MAX_TEMP(x) ((x) << 0)
-#define ASIC_MAX_TEMP_MASK 0x000001ff
-#define ASIC_MAX_TEMP_SHIFT 0
-#define CTF_TEMP(x) ((x) << 9)
-#define CTF_TEMP_MASK 0x0003fe00
-#define CTF_TEMP_SHIFT 9
-
-#define CG_FDO_CTRL0 0x1d5
-#define FDO_STATIC_DUTY(x) ((x) << 0)
-#define FDO_STATIC_DUTY_MASK 0x000000FF
-#define FDO_STATIC_DUTY_SHIFT 0
-#define CG_FDO_CTRL1 0x1d6
-#define FMAX_DUTY100(x) ((x) << 0)
-#define FMAX_DUTY100_MASK 0x000000FF
-#define FMAX_DUTY100_SHIFT 0
-#define CG_FDO_CTRL2 0x1d7
-#define TMIN(x) ((x) << 0)
-#define TMIN_MASK 0x000000FF
-#define TMIN_SHIFT 0
-#define FDO_PWM_MODE(x) ((x) << 11)
-#define FDO_PWM_MODE_MASK (7 << 11)
-#define FDO_PWM_MODE_SHIFT 11
-#define TACH_PWM_RESP_RATE(x) ((x) << 25)
-#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
-#define TACH_PWM_RESP_RATE_SHIFT 25
-
-#define CG_TACH_CTRL 0x1dc
-# define EDGE_PER_REV(x) ((x) << 0)
-# define EDGE_PER_REV_MASK (0x7 << 0)
-# define EDGE_PER_REV_SHIFT 0
-# define TARGET_PERIOD(x) ((x) << 3)
-# define TARGET_PERIOD_MASK 0xfffffff8
-# define TARGET_PERIOD_SHIFT 3
-#define CG_TACH_STATUS 0x1dd
-# define TACH_PERIOD(x) ((x) << 0)
-# define TACH_PERIOD_MASK 0xffffffff
-# define TACH_PERIOD_SHIFT 0
-
-#define GENERAL_PWRMGT 0x1e0
-# define GLOBAL_PWRMGT_EN (1 << 0)
-# define STATIC_PM_EN (1 << 1)
-# define THERMAL_PROTECTION_DIS (1 << 2)
-# define THERMAL_PROTECTION_TYPE (1 << 3)
-# define SW_SMIO_INDEX(x) ((x) << 6)
-# define SW_SMIO_INDEX_MASK (1 << 6)
-# define SW_SMIO_INDEX_SHIFT 6
-# define VOLT_PWRMGT_EN (1 << 10)
-# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
-#define CG_TPC 0x1e1
-#define SCLK_PWRMGT_CNTL 0x1e2
-# define SCLK_PWRMGT_OFF (1 << 0)
-# define SCLK_LOW_D1 (1 << 1)
-# define FIR_RESET (1 << 4)
-# define FIR_FORCE_TREND_SEL (1 << 5)
-# define FIR_TREND_MODE (1 << 6)
-# define DYN_GFX_CLK_OFF_EN (1 << 7)
-# define GFX_CLK_FORCE_ON (1 << 8)
-# define GFX_CLK_REQUEST_OFF (1 << 9)
-# define GFX_CLK_FORCE_OFF (1 << 10)
-# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
-# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
-# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
-# define DYN_LIGHT_SLEEP_EN (1 << 14)
-
-#define TARGET_AND_CURRENT_PROFILE_INDEX 0x1e6
-# define CURRENT_STATE_INDEX_MASK (0xf << 4)
-# define CURRENT_STATE_INDEX_SHIFT 4
-
-#define CG_FTV 0x1ef
-
-#define CG_FFCT_0 0x1f0
-# define UTC_0(x) ((x) << 0)
-# define UTC_0_MASK (0x3ff << 0)
-# define DTC_0(x) ((x) << 10)
-# define DTC_0_MASK (0x3ff << 10)
-
-#define CG_BSP 0x1ff
-# define BSP(x) ((x) << 0)
-# define BSP_MASK (0xffff << 0)
-# define BSU(x) ((x) << 16)
-# define BSU_MASK (0xf << 16)
-#define CG_AT 0x200
-# define CG_R(x) ((x) << 0)
-# define CG_R_MASK (0xffff << 0)
-# define CG_L(x) ((x) << 16)
-# define CG_L_MASK (0xffff << 16)
-
-#define CG_GIT 0x201
-# define CG_GICST(x) ((x) << 0)
-# define CG_GICST_MASK (0xffff << 0)
-# define CG_GIPOT(x) ((x) << 16)
-# define CG_GIPOT_MASK (0xffff << 16)
-
-#define CG_SSP 0x203
-# define SST(x) ((x) << 0)
-# define SST_MASK (0xffff << 0)
-# define SSTU(x) ((x) << 16)
-# define SSTU_MASK (0xf << 16)
-
-#define CG_DISPLAY_GAP_CNTL 0x20a
-# define DISP1_GAP(x) ((x) << 0)
-# define DISP1_GAP_MASK (3 << 0)
-# define DISP2_GAP(x) ((x) << 2)
-# define DISP2_GAP_MASK (3 << 2)
-# define VBI_TIMER_COUNT(x) ((x) << 4)
-# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
-# define VBI_TIMER_UNIT(x) ((x) << 20)
-# define VBI_TIMER_UNIT_MASK (7 << 20)
-# define DISP1_GAP_MCHG(x) ((x) << 24)
-# define DISP1_GAP_MCHG_MASK (3 << 24)
-# define DISP2_GAP_MCHG(x) ((x) << 26)
-# define DISP2_GAP_MCHG_MASK (3 << 26)
-
-#define CG_ULV_CONTROL 0x21e
-#define CG_ULV_PARAMETER 0x21f
-
-#define SMC_SCRATCH0 0x221
-
-#define CG_CAC_CTRL 0x22e
-# define CAC_WINDOW(x) ((x) << 0)
-# define CAC_WINDOW_MASK 0x00ffffff
-
-#define DMIF_ADDR_CONFIG 0x2F5
-
-#define DMIF_ADDR_CALC 0x300
-
-#define PIPE0_DMIF_BUFFER_CONTROL 0x0328
-# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
-# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
-
-#define SRBM_STATUS 0x394
-#define GRBM_RQ_PENDING (1 << 5)
-#define VMC_BUSY (1 << 8)
-#define MCB_BUSY (1 << 9)
-#define MCB_NON_DISPLAY_BUSY (1 << 10)
-#define MCC_BUSY (1 << 11)
-#define MCD_BUSY (1 << 12)
-#define SEM_BUSY (1 << 14)
-#define IH_BUSY (1 << 17)
-
-#define SRBM_SOFT_RESET 0x398
-#define SOFT_RESET_BIF (1 << 1)
-#define SOFT_RESET_DC (1 << 5)
-#define SOFT_RESET_DMA1 (1 << 6)
-#define SOFT_RESET_GRBM (1 << 8)
-#define SOFT_RESET_HDP (1 << 9)
-#define SOFT_RESET_IH (1 << 10)
-#define SOFT_RESET_MC (1 << 11)
-#define SOFT_RESET_ROM (1 << 14)
-#define SOFT_RESET_SEM (1 << 15)
-#define SOFT_RESET_VMC (1 << 17)
-#define SOFT_RESET_DMA (1 << 20)
-#define SOFT_RESET_TST (1 << 21)
-#define SOFT_RESET_REGBB (1 << 22)
-#define SOFT_RESET_ORB (1 << 23)
-
-#define CC_SYS_RB_BACKEND_DISABLE 0x3A0
-#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3A1
-
-#define SRBM_READ_ERROR 0x3A6
-#define SRBM_INT_CNTL 0x3A8
-#define SRBM_INT_ACK 0x3AA
-
-#define SRBM_STATUS2 0x3B1
-#define DMA_BUSY (1 << 5)
-#define DMA1_BUSY (1 << 6)
-
-#define VM_L2_CNTL 0x500
-#define ENABLE_L2_CACHE (1 << 0)
-#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
-#define L2_CACHE_PTE_ENDIAN_SWAP_MODE(x) ((x) << 2)
-#define L2_CACHE_PDE_ENDIAN_SWAP_MODE(x) ((x) << 4)
-#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
-#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
-#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 15)
-#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 19)
-#define VM_L2_CNTL2 0x501
-#define INVALIDATE_ALL_L1_TLBS (1 << 0)
-#define INVALIDATE_L2_CACHE (1 << 1)
-#define INVALIDATE_CACHE_MODE(x) ((x) << 26)
-#define INVALIDATE_PTE_AND_PDE_CACHES 0
-#define INVALIDATE_ONLY_PTE_CACHES 1
-#define INVALIDATE_ONLY_PDE_CACHES 2
-#define VM_L2_CNTL3 0x502
-#define BANK_SELECT(x) ((x) << 0)
-#define L2_CACHE_UPDATE_MODE(x) ((x) << 6)
-#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
-#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
-#define VM_L2_STATUS 0x503
-#define L2_BUSY (1 << 0)
-#define VM_CONTEXT0_CNTL 0x504
-#define ENABLE_CONTEXT (1 << 0)
-#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
-#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
-#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
-#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
-#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
-#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
-#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
-#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
-#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
-#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
-#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
-#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
-#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
-#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
-#define VM_CONTEXT1_CNTL 0x505
-#define VM_CONTEXT0_CNTL2 0x50C
-#define VM_CONTEXT1_CNTL2 0x50D
-#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x50E
-#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x50F
-#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x510
-#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x511
-#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x512
-#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x513
-#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x514
-#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x515
-
-#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x53f
-#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x537
-#define PROTECTIONS_MASK (0xf << 0)
-#define PROTECTIONS_SHIFT 0
- /* bit 0: range
- * bit 1: pde0
- * bit 2: valid
- * bit 3: read
- * bit 4: write
- */
-#define MEMORY_CLIENT_ID_MASK (0xff << 12)
-#define MEMORY_CLIENT_ID_SHIFT 12
-#define MEMORY_CLIENT_RW_MASK (1 << 24)
-#define MEMORY_CLIENT_RW_SHIFT 24
-#define FAULT_VMID_MASK (0xf << 25)
-#define FAULT_VMID_SHIFT 25
-
#define VM_INVALIDATE_REQUEST 0x51E
#define VM_INVALIDATE_RESPONSE 0x51F
-#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x546
-#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x547
-
-#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x54F
-#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x550
-#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x551
-#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x552
-#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x553
-#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x554
-#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x555
-#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x556
-#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x557
-#define VM_CONTEXT1_PAGE_TABLE_START_ADDR 0x558
-
-#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x55F
-#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x560
-
#define VM_L2_CG 0x570
#define MC_CG_ENABLE (1 << 18)
#define MC_LS_ENABLE (1 << 19)
-#define MC_SHARED_CHMAP 0x801
-#define NOOFCHAN_SHIFT 12
-#define NOOFCHAN_MASK 0x0000f000
-#define MC_SHARED_CHREMAP 0x802
-
#define MC_VM_FB_LOCATION 0x809
#define MC_VM_AGP_TOP 0x80A
#define MC_VM_AGP_BOT 0x80B
@@ -491,21 +112,6 @@
#define MC_CITF_MISC_WR_CG 0x993
#define MC_CITF_MISC_VM_CG 0x994
-#define MC_ARB_RAMCFG 0x9D8
-#define NOOFBANK_SHIFT 0
-#define NOOFBANK_MASK 0x00000003
-#define NOOFRANK_SHIFT 2
-#define NOOFRANK_MASK 0x00000004
-#define NOOFROWS_SHIFT 3
-#define NOOFROWS_MASK 0x00000038
-#define NOOFCOLS_SHIFT 6
-#define NOOFCOLS_MASK 0x000000C0
-#define CHANSIZE_SHIFT 8
-#define CHANSIZE_MASK 0x00000100
-#define CHANSIZE_OVERRIDE (1 << 11)
-#define NOOFGROUPS_SHIFT 12
-#define NOOFGROUPS_MASK 0x00001000
-
#define MC_ARB_DRAM_TIMING 0x9DD
#define MC_ARB_DRAM_TIMING2 0x9DE
@@ -631,20 +237,6 @@
#define CLKS(x) ((x) << 0)
#define CLKS_MASK (0xfff << 0)
-#define HDP_HOST_PATH_CNTL 0xB00
-#define CLOCK_GATING_DIS (1 << 23)
-#define HDP_NONSURFACE_BASE 0xB01
-#define HDP_NONSURFACE_INFO 0xB02
-#define HDP_NONSURFACE_SIZE 0xB03
-
-#define HDP_DEBUG0 0xBCC
-
-#define HDP_ADDR_CONFIG 0xBD2
-#define HDP_MISC_CNTL 0xBD3
-#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
-#define HDP_MEM_POWER_LS 0xBD4
-#define HDP_LS_ENABLE (1 << 0)
-
#define ATC_MISC_CG 0xCD4
#define IH_RB_CNTL 0xF80
@@ -674,8 +266,6 @@
# define MC_WR_CLEAN_CNT(x) ((x) << 20)
# define MC_VMID(x) ((x) << 25)
-#define CONFIG_MEMSIZE 0x150A
-
#define INTERRUPT_CNTL 0x151A
# define IH_DUMMY_RD_OVERRIDE (1 << 0)
# define IH_DUMMY_RD_EN (1 << 1)
@@ -683,486 +273,22 @@
# define GEN_IH_INT_EN (1 << 8)
#define INTERRUPT_CNTL2 0x151B
-#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x1520
-
-#define BIF_FB_EN 0x1524
-#define FB_READ_EN (1 << 0)
-#define FB_WRITE_EN (1 << 1)
-
-#define HDP_REG_COHERENCY_FLUSH_CNTL 0x1528
-
-/* DCE6 ELD audio interface */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
-# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
-/* max channels minus one. 7 = 8 channels */
-# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
-# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
-# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
-/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
- * bit0 = 32 kHz
- * bit1 = 44.1 kHz
- * bit2 = 48 kHz
- * bit3 = 88.2 kHz
- * bit4 = 96 kHz
- * bit5 = 176.4 kHz
- * bit6 = 192 kHz
- */
-
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
-# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
-# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
-/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
- * 0 = invalid
- * x = legal delay value
- * 255 = sync not supported
- */
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
-# define HBR_CAPABLE (1 << 0) /* enabled by default */
-
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
-# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
-# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
-# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
-# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
-# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
-# define DESCRIPTION0(x) (((x) & 0xff) << 0)
-# define DESCRIPTION1(x) (((x) & 0xff) << 8)
-# define DESCRIPTION2(x) (((x) & 0xff) << 16)
-# define DESCRIPTION3(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
-# define DESCRIPTION4(x) (((x) & 0xff) << 0)
-# define DESCRIPTION5(x) (((x) & 0xff) << 8)
-# define DESCRIPTION6(x) (((x) & 0xff) << 16)
-# define DESCRIPTION7(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
-# define DESCRIPTION8(x) (((x) & 0xff) << 0)
-# define DESCRIPTION9(x) (((x) & 0xff) << 8)
-# define DESCRIPTION10(x) (((x) & 0xff) << 16)
-# define DESCRIPTION11(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
-# define DESCRIPTION12(x) (((x) & 0xff) << 0)
-# define DESCRIPTION13(x) (((x) & 0xff) << 8)
-# define DESCRIPTION14(x) (((x) & 0xff) << 16)
-# define DESCRIPTION15(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
-# define DESCRIPTION16(x) (((x) & 0xff) << 0)
-# define DESCRIPTION17(x) (((x) & 0xff) << 8)
-
-#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54
-# define AUDIO_ENABLED (1 << 31)
-
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
-#define PORT_CONNECTIVITY_MASK (3 << 30)
-#define PORT_CONNECTIVITY_SHIFT 30
-
-#define DC_LB_MEMORY_SPLIT 0x1AC3
-#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
-
-#define PRIORITY_A_CNT 0x1AC6
-#define PRIORITY_MARK_MASK 0x7fff
-#define PRIORITY_OFF (1 << 16)
-#define PRIORITY_ALWAYS_ON (1 << 20)
-#define PRIORITY_B_CNT 0x1AC7
-
-#define DPG_PIPE_ARBITRATION_CONTROL3 0x1B32
-# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
-#define DPG_PIPE_LATENCY_CONTROL 0x1B33
-# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
-# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
-
-/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
-#define VLINE_STATUS 0x1AEE
-# define VLINE_OCCURRED (1 << 0)
-# define VLINE_ACK (1 << 4)
-# define VLINE_STAT (1 << 12)
-# define VLINE_INTERRUPT (1 << 16)
-# define VLINE_INTERRUPT_TYPE (1 << 17)
-/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
-#define VBLANK_STATUS 0x1AEF
-# define VBLANK_OCCURRED (1 << 0)
-# define VBLANK_ACK (1 << 4)
-# define VBLANK_STAT (1 << 12)
-# define VBLANK_INTERRUPT (1 << 16)
-# define VBLANK_INTERRUPT_TYPE (1 << 17)
-
-/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
-#define INT_MASK 0x1AD0
-# define VBLANK_INT_MASK (1 << 0)
-# define VLINE_INT_MASK (1 << 4)
-
-#define DISP_INTERRUPT_STATUS 0x183D
-# define LB_D1_VLINE_INTERRUPT (1 << 2)
-# define LB_D1_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD1_INTERRUPT (1 << 17)
-# define DC_HPD1_RX_INTERRUPT (1 << 18)
-# define DACA_AUTODETECT_INTERRUPT (1 << 22)
-# define DACB_AUTODETECT_INTERRUPT (1 << 23)
-# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
-# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
-#define DISP_INTERRUPT_STATUS_CONTINUE 0x183E
-# define LB_D2_VLINE_INTERRUPT (1 << 2)
-# define LB_D2_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD2_INTERRUPT (1 << 17)
-# define DC_HPD2_RX_INTERRUPT (1 << 18)
-# define DISP_TIMER_INTERRUPT (1 << 24)
-#define DISP_INTERRUPT_STATUS_CONTINUE2 0x183F
-# define LB_D3_VLINE_INTERRUPT (1 << 2)
-# define LB_D3_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD3_INTERRUPT (1 << 17)
-# define DC_HPD3_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE3 0x1840
-# define LB_D4_VLINE_INTERRUPT (1 << 2)
-# define LB_D4_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD4_INTERRUPT (1 << 17)
-# define DC_HPD4_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE4 0x1853
-# define LB_D5_VLINE_INTERRUPT (1 << 2)
-# define LB_D5_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD5_INTERRUPT (1 << 17)
-# define DC_HPD5_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE5 0x1854
-# define LB_D6_VLINE_INTERRUPT (1 << 2)
-# define LB_D6_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD6_INTERRUPT (1 << 17)
-# define DC_HPD6_RX_INTERRUPT (1 << 18)
-
-/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
-#define GRPH_INT_STATUS 0x1A16
-# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
-# define GRPH_PFLIP_INT_CLEAR (1 << 8)
-/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
-#define GRPH_INT_CONTROL 0x1A17
-# define GRPH_PFLIP_INT_MASK (1 << 0)
-# define GRPH_PFLIP_INT_TYPE (1 << 8)
-
-#define DAC_AUTODETECT_INT_CONTROL 0x19F2
-
-#define DC_HPD1_INT_STATUS 0x1807
-#define DC_HPD2_INT_STATUS 0x180A
-#define DC_HPD3_INT_STATUS 0x180D
-#define DC_HPD4_INT_STATUS 0x1810
-#define DC_HPD5_INT_STATUS 0x1813
-#define DC_HPD6_INT_STATUS 0x1816
-# define DC_HPDx_INT_STATUS (1 << 0)
-# define DC_HPDx_SENSE (1 << 1)
-# define DC_HPDx_RX_INT_STATUS (1 << 8)
-
-#define DC_HPD1_INT_CONTROL 0x1808
-#define DC_HPD2_INT_CONTROL 0x180B
-#define DC_HPD3_INT_CONTROL 0x180E
-#define DC_HPD4_INT_CONTROL 0x1811
-#define DC_HPD5_INT_CONTROL 0x1814
-#define DC_HPD6_INT_CONTROL 0x1817
-# define DC_HPDx_INT_ACK (1 << 0)
-# define DC_HPDx_INT_POLARITY (1 << 8)
-# define DC_HPDx_INT_EN (1 << 16)
-# define DC_HPDx_RX_INT_ACK (1 << 20)
-# define DC_HPDx_RX_INT_EN (1 << 24)
-
-#define DC_HPD1_CONTROL 0x1809
-#define DC_HPD2_CONTROL 0x180C
-#define DC_HPD3_CONTROL 0x180F
-#define DC_HPD4_CONTROL 0x1812
-#define DC_HPD5_CONTROL 0x1815
-#define DC_HPD6_CONTROL 0x1818
-# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
-# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
-# define DC_HPDx_EN (1 << 28)
-
-#define DPG_PIPE_STUTTER_CONTROL 0x1B35
-# define STUTTER_ENABLE (1 << 0)
-
-/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
-#define CRTC_STATUS_FRAME_COUNT 0x1BA6
-
-/* Audio clocks */
-#define DCCG_AUDIO_DTO0_PHASE 0x05b0
-#define DCCG_AUDIO_DTO0_MODULE 0x05b4
-#define DCCG_AUDIO_DTO1_PHASE 0x05c0
-#define DCCG_AUDIO_DTO1_MODULE 0x05c4
-
-#define GRBM_CNTL 0x2000
-#define GRBM_READ_TIMEOUT(x) ((x) << 0)
-
-#define GRBM_STATUS2 0x2002
-#define RLC_RQ_PENDING (1 << 0)
-#define RLC_BUSY (1 << 8)
-#define TC_BUSY (1 << 9)
-
-#define GRBM_STATUS 0x2004
-#define CMDFIFO_AVAIL_MASK 0x0000000F
-#define RING2_RQ_PENDING (1 << 4)
-#define SRBM_RQ_PENDING (1 << 5)
-#define RING1_RQ_PENDING (1 << 6)
-#define CF_RQ_PENDING (1 << 7)
-#define PF_RQ_PENDING (1 << 8)
-#define GDS_DMA_RQ_PENDING (1 << 9)
-#define GRBM_EE_BUSY (1 << 10)
-#define DB_CLEAN (1 << 12)
-#define CB_CLEAN (1 << 13)
-#define TA_BUSY (1 << 14)
-#define GDS_BUSY (1 << 15)
-#define VGT_BUSY (1 << 17)
-#define IA_BUSY_NO_DMA (1 << 18)
-#define IA_BUSY (1 << 19)
-#define SX_BUSY (1 << 20)
-#define SPI_BUSY (1 << 22)
-#define BCI_BUSY (1 << 23)
-#define SC_BUSY (1 << 24)
-#define PA_BUSY (1 << 25)
-#define DB_BUSY (1 << 26)
-#define CP_COHERENCY_BUSY (1 << 28)
-#define CP_BUSY (1 << 29)
-#define CB_BUSY (1 << 30)
-#define GUI_ACTIVE (1 << 31)
-#define GRBM_STATUS_SE0 0x2005
-#define GRBM_STATUS_SE1 0x2006
-#define SE_DB_CLEAN (1 << 1)
-#define SE_CB_CLEAN (1 << 2)
-#define SE_BCI_BUSY (1 << 22)
-#define SE_VGT_BUSY (1 << 23)
-#define SE_PA_BUSY (1 << 24)
-#define SE_TA_BUSY (1 << 25)
-#define SE_SX_BUSY (1 << 26)
-#define SE_SPI_BUSY (1 << 27)
-#define SE_SC_BUSY (1 << 29)
-#define SE_DB_BUSY (1 << 30)
-#define SE_CB_BUSY (1 << 31)
-
-#define GRBM_INT_CNTL 0x2018
-# define RDERR_INT_ENABLE (1 << 0)
-# define GUI_IDLE_INT_ENABLE (1 << 19)
-
-#define CP_STRMOUT_CNTL 0x213F
-#define SCRATCH_REG0 0x2140
-#define SCRATCH_REG1 0x2141
-#define SCRATCH_REG2 0x2142
-#define SCRATCH_REG3 0x2143
-#define SCRATCH_REG4 0x2144
-#define SCRATCH_REG5 0x2145
-#define SCRATCH_REG6 0x2146
-#define SCRATCH_REG7 0x2147
-
-#define SCRATCH_UMSK 0x2150
-#define SCRATCH_ADDR 0x2151
-
-#define CP_SEM_WAIT_TIMER 0x216F
-
-#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x2172
-
-#define CP_ME_CNTL 0x21B6
-#define CP_CE_HALT (1 << 24)
-#define CP_PFP_HALT (1 << 26)
-#define CP_ME_HALT (1 << 28)
-
-#define CP_COHER_CNTL2 0x217A
-
-#define CP_RB2_RPTR 0x21BE
-#define CP_RB1_RPTR 0x21BF
-#define CP_RB0_RPTR 0x21C0
-#define CP_RB_WPTR_DELAY 0x21C1
-
-#define CP_QUEUE_THRESHOLDS 0x21D8
-#define ROQ_IB1_START(x) ((x) << 0)
-#define ROQ_IB2_START(x) ((x) << 8)
-#define CP_MEQ_THRESHOLDS 0x21D9
-#define MEQ1_START(x) ((x) << 0)
-#define MEQ2_START(x) ((x) << 8)
-
-#define CP_PERFMON_CNTL 0x21FF
-
#define VGT_VTX_VECT_EJECT_REG 0x222C
-
#define VGT_ESGS_RING_SIZE 0x2232
#define VGT_GSVS_RING_SIZE 0x2233
-
#define VGT_GS_VERTEX_REUSE 0x2235
-
#define VGT_PRIMITIVE_TYPE 0x2256
#define VGT_INDEX_TYPE 0x2257
-
#define VGT_NUM_INDICES 0x225C
#define VGT_NUM_INSTANCES 0x225D
-
#define VGT_TF_RING_SIZE 0x2262
-
#define VGT_HS_OFFCHIP_PARAM 0x226C
-
#define VGT_TF_MEMORY_BASE 0x226E
-#define PA_CL_ENHANCE 0x2285
-#define CLIP_VTX_REORDER_ENA (1 << 0)
-#define NUM_CLIP_SEQ(x) ((x) << 1)
-
-#define PA_SU_LINE_STIPPLE_VALUE 0x2298
-
-#define PA_SC_LINE_STIPPLE_STATE 0x22C4
-
-#define PA_SC_FORCE_EOV_MAX_CNTS 0x22C9
-#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
-#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
-
-#define PA_SC_FIFO_SIZE 0x22F3
-#define SC_FRONTEND_PRIM_FIFO_SIZE(x) ((x) << 0)
-#define SC_BACKEND_PRIM_FIFO_SIZE(x) ((x) << 6)
-#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 15)
-#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 23)
-
#define PA_SC_ENHANCE 0x22FC
-#define SQ_CONFIG 0x2300
-
-#define SQC_CACHES 0x2302
-
-#define SQ_POWER_THROTTLE 0x2396
-#define MIN_POWER(x) ((x) << 0)
-#define MIN_POWER_MASK (0x3fff << 0)
-#define MIN_POWER_SHIFT 0
-#define MAX_POWER(x) ((x) << 16)
-#define MAX_POWER_MASK (0x3fff << 16)
-#define MAX_POWER_SHIFT 0
-#define SQ_POWER_THROTTLE2 0x2397
-#define MAX_POWER_DELTA(x) ((x) << 0)
-#define MAX_POWER_DELTA_MASK (0x3fff << 0)
-#define MAX_POWER_DELTA_SHIFT 0
-#define STI_SIZE(x) ((x) << 16)
-#define STI_SIZE_MASK (0x3ff << 16)
-#define STI_SIZE_SHIFT 16
-#define LTI_RATIO(x) ((x) << 27)
-#define LTI_RATIO_MASK (0xf << 27)
-#define LTI_RATIO_SHIFT 27
-
-#define SX_DEBUG_1 0x2418
-
-#define SPI_STATIC_THREAD_MGMT_1 0x2438
-#define SPI_STATIC_THREAD_MGMT_2 0x2439
-#define SPI_STATIC_THREAD_MGMT_3 0x243A
-#define SPI_PS_MAX_WAVE_ID 0x243B
-
-#define SPI_CONFIG_CNTL 0x2440
-
-#define SPI_CONFIG_CNTL_1 0x244F
-#define VTX_DONE_DELAY(x) ((x) << 0)
-#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
-
-#define CGTS_TCC_DISABLE 0x2452
-#define CGTS_USER_TCC_DISABLE 0x2453
-#define TCC_DISABLE_MASK 0xFFFF0000
-#define TCC_DISABLE_SHIFT 16
-#define CGTS_SM_CTRL_REG 0x2454
-#define OVERRIDE (1 << 21)
-#define LS_OVERRIDE (1 << 22)
-
-#define SPI_LB_CU_MASK 0x24D5
-
#define TA_CNTL_AUX 0x2542
-#define CC_RB_BACKEND_DISABLE 0x263D
-#define BACKEND_DISABLE(x) ((x) << 16)
-#define GB_ADDR_CONFIG 0x263E
-#define NUM_PIPES(x) ((x) << 0)
-#define NUM_PIPES_MASK 0x00000007
-#define NUM_PIPES_SHIFT 0
-#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
-#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
-#define PIPE_INTERLEAVE_SIZE_SHIFT 4
-#define NUM_SHADER_ENGINES(x) ((x) << 12)
-#define NUM_SHADER_ENGINES_MASK 0x00003000
-#define NUM_SHADER_ENGINES_SHIFT 12
-#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
-#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
-#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
-#define NUM_GPUS(x) ((x) << 20)
-#define NUM_GPUS_MASK 0x00700000
-#define NUM_GPUS_SHIFT 20
-#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
-#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
-#define MULTI_GPU_TILE_SIZE_SHIFT 24
-#define ROW_SIZE(x) ((x) << 28)
-#define ROW_SIZE_MASK 0x30000000
-#define ROW_SIZE_SHIFT 28
-
-#define CB_PERFCOUNTER0_SELECT0 0x2688
-#define CB_PERFCOUNTER0_SELECT1 0x2689
-#define CB_PERFCOUNTER1_SELECT0 0x268A
-#define CB_PERFCOUNTER1_SELECT1 0x268B
-#define CB_PERFCOUNTER2_SELECT0 0x268C
-#define CB_PERFCOUNTER2_SELECT1 0x268D
-#define CB_PERFCOUNTER3_SELECT0 0x268E
-#define CB_PERFCOUNTER3_SELECT1 0x268F
-
-#define CB_CGTT_SCLK_CTRL 0x2698
-
-#define TCP_CHAN_STEER_LO 0x2B03
-#define TCP_CHAN_STEER_HI 0x2B94
-
-#define CP_RB0_BASE 0x3040
-#define CP_RB0_CNTL 0x3041
-#define RB_BUFSZ(x) ((x) << 0)
-#define RB_BLKSZ(x) ((x) << 8)
-#define BUF_SWAP_32BIT (2 << 16)
-#define RB_NO_UPDATE (1 << 27)
-#define RB_RPTR_WR_ENA (1 << 31)
-
-#define CP_RB0_RPTR_ADDR 0x3043
-#define CP_RB0_RPTR_ADDR_HI 0x3044
-#define CP_RB0_WPTR 0x3045
-
-#define CP_PFP_UCODE_ADDR 0x3054
-#define CP_PFP_UCODE_DATA 0x3055
-#define CP_ME_RAM_RADDR 0x3056
-#define CP_ME_RAM_WADDR 0x3057
-#define CP_ME_RAM_DATA 0x3058
-
-#define CP_CE_UCODE_ADDR 0x305A
-#define CP_CE_UCODE_DATA 0x305B
-
-#define CP_RB1_BASE 0x3060
-#define CP_RB1_CNTL 0x3061
-#define CP_RB1_RPTR_ADDR 0x3062
-#define CP_RB1_RPTR_ADDR_HI 0x3063
-#define CP_RB1_WPTR 0x3064
-#define CP_RB2_BASE 0x3065
-#define CP_RB2_CNTL 0x3066
-#define CP_RB2_RPTR_ADDR 0x3067
-#define CP_RB2_RPTR_ADDR_HI 0x3068
-#define CP_RB2_WPTR 0x3069
-#define CP_INT_CNTL_RING0 0x306A
-#define CP_INT_CNTL_RING1 0x306B
-#define CP_INT_CNTL_RING2 0x306C
-# define CNTX_BUSY_INT_ENABLE (1 << 19)
-# define CNTX_EMPTY_INT_ENABLE (1 << 20)
-# define WAIT_MEM_SEM_INT_ENABLE (1 << 21)
-# define TIME_STAMP_INT_ENABLE (1 << 26)
-# define CP_RINGID2_INT_ENABLE (1 << 29)
-# define CP_RINGID1_INT_ENABLE (1 << 30)
-# define CP_RINGID0_INT_ENABLE (1 << 31)
-#define CP_INT_STATUS_RING0 0x306D
-#define CP_INT_STATUS_RING1 0x306E
-#define CP_INT_STATUS_RING2 0x306F
-# define WAIT_MEM_SEM_INT_STAT (1 << 21)
-# define TIME_STAMP_INT_STAT (1 << 26)
-# define CP_RINGID2_INT_STAT (1 << 29)
-# define CP_RINGID1_INT_STAT (1 << 30)
-# define CP_RINGID0_INT_STAT (1 << 31)
-
// #define PA_SC_RASTER_CONFIG 0xA0D4
# define RB_XSEL2(x) ((x) << 4)
# define RB_XSEL2_MASK (0x3 << 4)
@@ -1185,171 +311,14 @@
# define SE_YSEL(x) ((x) << 28)
# define SE_YSEL_MASK (0x3 << 28)
-/* PIF PHY0 registers idx/data 0x8/0xc */
-#define PB0_PIF_CNTL 0x10
-# define LS2_EXIT_TIME(x) ((x) << 17)
-# define LS2_EXIT_TIME_MASK (0x7 << 17)
-# define LS2_EXIT_TIME_SHIFT 17
-#define PB0_PIF_PAIRING 0x11
-# define MULTI_PIF (1 << 25)
-#define PB0_PIF_PWRDOWN_0 0x12
-# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
-# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_0_SHIFT 24
-#define PB0_PIF_PWRDOWN_1 0x13
-# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
-# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_1_SHIFT 24
-
-#define PB0_PIF_PWRDOWN_2 0x17
-# define PLL_POWER_STATE_IN_TXS2_2(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_2_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_2_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_2(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_2_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_2_SHIFT 10
-# define PLL_RAMP_UP_TIME_2(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_2_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_2_SHIFT 24
-#define PB0_PIF_PWRDOWN_3 0x18
-# define PLL_POWER_STATE_IN_TXS2_3(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_3_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_3_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_3(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_3_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_3_SHIFT 10
-# define PLL_RAMP_UP_TIME_3(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_3_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_3_SHIFT 24
-/* PIF PHY1 registers idx/data 0x10/0x14 */
-#define PB1_PIF_CNTL 0x10
-#define PB1_PIF_PAIRING 0x11
-#define PB1_PIF_PWRDOWN_0 0x12
-#define PB1_PIF_PWRDOWN_1 0x13
-
-#define PB1_PIF_PWRDOWN_2 0x17
-#define PB1_PIF_PWRDOWN_3 0x18
-/* PCIE registers idx/data 0x30/0x34 */
-#define PCIE_CNTL2 0x1c /* PCIE */
-# define SLV_MEM_LS_EN (1 << 16)
-# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
-# define MST_MEM_LS_EN (1 << 18)
-# define REPLAY_MEM_LS_EN (1 << 19)
-#define PCIE_LC_STATUS1 0x28 /* PCIE */
-# define LC_REVERSE_RCVR (1 << 0)
-# define LC_REVERSE_XMIT (1 << 1)
-# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
-# define LC_OPERATING_LINK_WIDTH_SHIFT 2
-# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
-# define LC_DETECTED_LINK_WIDTH_SHIFT 5
-
-#define PCIE_P_CNTL 0x40 /* PCIE */
-# define P_IGNORE_EDB_ERR (1 << 6)
-
/* PCIE PORT registers idx/data 0x38/0x3c */
-#define PCIE_LC_CNTL 0xa0
-# define LC_L0S_INACTIVITY(x) ((x) << 8)
-# define LC_L0S_INACTIVITY_MASK (0xf << 8)
-# define LC_L0S_INACTIVITY_SHIFT 8
-# define LC_L1_INACTIVITY(x) ((x) << 12)
-# define LC_L1_INACTIVITY_MASK (0xf << 12)
-# define LC_L1_INACTIVITY_SHIFT 12
-# define LC_PMI_TO_L1_DIS (1 << 16)
-# define LC_ASPM_TO_L1_DIS (1 << 24)
-#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
-# define LC_LINK_WIDTH_SHIFT 0
-# define LC_LINK_WIDTH_MASK 0x7
+// #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_X0 0
# define LC_LINK_WIDTH_X1 1
# define LC_LINK_WIDTH_X2 2
# define LC_LINK_WIDTH_X4 3
# define LC_LINK_WIDTH_X8 4
# define LC_LINK_WIDTH_X16 6
-# define LC_LINK_WIDTH_RD_SHIFT 4
-# define LC_LINK_WIDTH_RD_MASK 0x70
-# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
-# define LC_RECONFIG_NOW (1 << 8)
-# define LC_RENEGOTIATION_SUPPORT (1 << 9)
-# define LC_RENEGOTIATE_EN (1 << 10)
-# define LC_SHORT_RECONFIG_EN (1 << 11)
-# define LC_UPCONFIGURE_SUPPORT (1 << 12)
-# define LC_UPCONFIGURE_DIS (1 << 13)
-# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
-# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
-# define LC_DYN_LANES_PWR_STATE_SHIFT 21
-#define PCIE_LC_N_FTS_CNTL 0xa3 /* PCIE_P */
-# define LC_XMIT_N_FTS(x) ((x) << 0)
-# define LC_XMIT_N_FTS_MASK (0xff << 0)
-# define LC_XMIT_N_FTS_SHIFT 0
-# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
-# define LC_N_FTS_MASK (0xff << 24)
-#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
-# define LC_GEN2_EN_STRAP (1 << 0)
-# define LC_GEN3_EN_STRAP (1 << 1)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
-# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
-# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
-# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
-# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
-# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
-# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
-# define LC_CURRENT_DATA_RATE_SHIFT 13
-# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
-# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
-# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
-# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
-# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
-
-#define PCIE_LC_CNTL2 0xb1
-# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
-# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
-
-#define PCIE_LC_CNTL3 0xb5 /* PCIE_P */
-# define LC_GO_TO_RECOVERY (1 << 30)
-#define PCIE_LC_CNTL4 0xb6 /* PCIE_P */
-# define LC_REDO_EQ (1 << 5)
-# define LC_SET_QUIESCE (1 << 13)
-
-/*
- * UVD
- */
-#define UVD_UDEC_ADDR_CONFIG 0x3bd3
-#define UVD_UDEC_DB_ADDR_CONFIG 0x3bd4
-#define UVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
-#define UVD_RBC_RB_RPTR 0x3da4
-#define UVD_RBC_RB_WPTR 0x3da5
-#define UVD_STATUS 0x3daf
-
-#define UVD_CGC_CTRL 0x3dc2
-# define DCM (1 << 0)
-# define CG_DT(x) ((x) << 2)
-# define CG_DT_MASK (0xf << 2)
-# define CLK_OD(x) ((x) << 6)
-# define CLK_OD_MASK (0x1f << 6)
-
- /* UVD CTX indirect */
-#define UVD_CGC_MEM_CTRL 0xC0
-#define UVD_CGC_CTRL2 0xC1
-# define DYN_OR_EN (1 << 0)
-# define DYN_RR_EN (1 << 1)
-# define G_DIV_ID(x) ((x) << 2)
-# define G_DIV_ID_MASK (0x7 << 2)
/*
* PM4
@@ -1583,45 +552,7 @@
/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
#define DMA1_REGISTER_OFFSET 0x200 /* not a register */
-
-#define DMA_RB_CNTL 0x3400
-# define DMA_RB_ENABLE (1 << 0)
-# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
-# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
-# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
-# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
-# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
-#define DMA_RB_BASE 0x3401
-#define DMA_RB_RPTR 0x3402
-#define DMA_RB_WPTR 0x3403
-
-#define DMA_RB_RPTR_ADDR_HI 0x3407
-#define DMA_RB_RPTR_ADDR_LO 0x3408
-
-#define DMA_IB_CNTL 0x3409
-# define DMA_IB_ENABLE (1 << 0)
-# define DMA_IB_SWAP_ENABLE (1 << 4)
-# define CMD_VMID_FORCE (1 << 31)
-#define DMA_IB_RPTR 0x340a
-#define DMA_CNTL 0x340b
-# define TRAP_ENABLE (1 << 0)
-# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
-# define SEM_WAIT_INT_ENABLE (1 << 2)
-# define DATA_SWAP_ENABLE (1 << 3)
-# define FENCE_SWAP_ENABLE (1 << 4)
-# define CTXEMPTY_INT_ENABLE (1 << 28)
-#define DMA_STATUS_REG 0x340d
-# define DMA_IDLE (1 << 0)
-#define DMA_TILING_CONFIG 0x342e
-
-#define DMA_POWER_CNTL 0x342f
-# define MEM_POWER_OVERRIDE (1 << 8)
-#define DMA_CLK_CTRL 0x3430
-
-#define DMA_PG 0x3435
-# define PG_CNTL_ENABLE (1 << 0)
-#define DMA_PGFSM_CONFIG 0x3436
-#define DMA_PGFSM_WRITE 0x3437
+#define SDMA_MAX_INSTANCE 2
#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
(((b) & 0x1) << 26) | \
@@ -1650,6 +581,7 @@
#define DMA_PACKET_POLL_REG_MEM 0xe
#define DMA_PACKET_NOP 0xf
+/* VCE */
#define VCE_STATUS 0x20004
#define VCE_VCPU_CNTL 0x20014
#define VCE_CLK_EN (1 << 0)
@@ -1726,378 +658,118 @@
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
-#define AMDGPU_MM_INDEX 0x0000
-#define AMDGPU_MM_DATA 0x0001
-
-#define VERDE_NUM_CRTC 6
-#define BLACKOUT_MODE_MASK 0x00000007
-#define VGA_RENDER_CONTROL 0xC0
-#define R_000300_VGA_RENDER_CONTROL 0xC0
-#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
-#define EVERGREEN_CRTC_STATUS 0x1BA3
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
-/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
-#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
-#define EVERGREEN_CRTC_CONTROL 0x1b9c
-#define EVERGREEN_CRTC_MASTER_EN (1 << 0)
-#define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
-#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
-#define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
-#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
-#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
-#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-
-#define EVERGREEN_DATA_FORMAT 0x1ac0
-# define EVERGREEN_INTERLEAVE_EN (1 << 0)
-
-#define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20)
-#define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20)
-#define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20)
-#define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20)
-
-#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a45
-#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1845
-
-#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1847
-#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a47
-
-#define R600_D1GRPH_SWAP_CONTROL 0x1843
-#define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0)
-
-#define AVIVO_D1VGA_CONTROL 0x00cc
-# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1 << 0)
-# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1 << 8)
-# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1 << 9)
-# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
-# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1 << 16)
-# define AVIVO_DVGA_CONTROL_ROTATE (1 << 24)
-#define AVIVO_D2VGA_CONTROL 0x00ce
-
-#define R600_BUS_CNTL 0x1508
-# define R600_BIOS_ROM_DIS (1 << 1)
+
#define R600_ROM_CNTL 0x580
# define R600_SCK_OVERWRITE (1 << 1)
# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28)
-#define FMT_BIT_DEPTH_CONTROL 0x1bf2
-#define FMT_TRUNCATE_EN (1 << 0)
-#define FMT_TRUNCATE_DEPTH (1 << 4)
-#define FMT_SPATIAL_DITHER_EN (1 << 8)
-#define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
-#define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
-#define FMT_FRAME_RANDOM_ENABLE (1 << 13)
-#define FMT_RGB_RANDOM_ENABLE (1 << 14)
-#define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
-#define FMT_TEMPORAL_DITHER_EN (1 << 16)
-#define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
-#define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
-#define FMT_TEMPORAL_LEVEL (1 << 24)
-#define FMT_TEMPORAL_DITHER_RESET (1 << 25)
-#define FMT_25FRC_SEL(x) ((x) << 26)
-#define FMT_50FRC_SEL(x) ((x) << 28)
-#define FMT_75FRC_SEL(x) ((x) << 30)
-
-#define EVERGREEN_DC_LUT_CONTROL 0x1a80
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x1a81
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x1a82
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x1a83
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x1a84
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x1a85
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x1a86
-#define EVERGREEN_DC_LUT_30_COLOR 0x1a7c
-#define EVERGREEN_DC_LUT_RW_INDEX 0x1a79
-#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x1a7e
-#define EVERGREEN_DC_LUT_RW_MODE 0x1a78
-
-#define EVERGREEN_GRPH_ENABLE 0x1a00
-#define EVERGREEN_GRPH_CONTROL 0x1a01
-#define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
-#define EVERGREEN_GRPH_DEPTH_8BPP 0
-#define EVERGREEN_GRPH_DEPTH_16BPP 1
-#define EVERGREEN_GRPH_DEPTH_32BPP 2
-#define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
-#define EVERGREEN_ADDR_SURF_2_BANK 0
-#define EVERGREEN_ADDR_SURF_4_BANK 1
-#define EVERGREEN_ADDR_SURF_8_BANK 2
-#define EVERGREEN_ADDR_SURF_16_BANK 3
-#define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
-#define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
-#define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
-
-#define EVERGREEN_GRPH_FORMAT_INDEXED 0
-#define EVERGREEN_GRPH_FORMAT_ARGB1555 0
-#define EVERGREEN_GRPH_FORMAT_ARGB565 1
-#define EVERGREEN_GRPH_FORMAT_ARGB4444 2
-#define EVERGREEN_GRPH_FORMAT_AI88 3
-#define EVERGREEN_GRPH_FORMAT_MONO16 4
-#define EVERGREEN_GRPH_FORMAT_BGRA5551 5
+#define GRPH_ARRAY_LINEAR_GENERAL 0
+#define GRPH_ARRAY_LINEAR_ALIGNED 1
+#define GRPH_ARRAY_1D_TILED_THIN1 2
+#define GRPH_ARRAY_2D_TILED_THIN1 4
+
+#define ES_AND_GS_AUTO 3
+#define BUF_SWAP_32BIT (2 << 16)
+
+#define GRPH_DEPTH_8BPP 0
+#define GRPH_DEPTH_16BPP 1
+#define GRPH_DEPTH_32BPP 2
+
+/* 8 BPP */
+#define GRPH_FORMAT_INDEXED 0
+
+/* 16 BPP */
+#define GRPH_FORMAT_ARGB1555 0
+#define GRPH_FORMAT_ARGB565 1
+#define GRPH_FORMAT_ARGB4444 2
+#define GRPH_FORMAT_AI88 3
+#define GRPH_FORMAT_MONO16 4
+#define GRPH_FORMAT_BGRA5551 5
/* 32 BPP */
-#define EVERGREEN_GRPH_FORMAT_ARGB8888 0
-#define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
-#define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
-#define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
-#define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
-#define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
-#define EVERGREEN_GRPH_FORMAT_RGB111110 6
-#define EVERGREEN_GRPH_FORMAT_BGR101111 7
-#define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
-#define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
-#define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
-#define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
-#define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
-#define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
-#define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
-
-#define EVERGREEN_GRPH_SWAP_CONTROL 0x1a03
-#define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
-# define EVERGREEN_GRPH_ENDIAN_NONE 0
-# define EVERGREEN_GRPH_ENDIAN_8IN16 1
-# define EVERGREEN_GRPH_ENDIAN_8IN32 2
-# define EVERGREEN_GRPH_ENDIAN_8IN64 3
-#define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
-# define EVERGREEN_GRPH_RED_SEL_R 0
-# define EVERGREEN_GRPH_RED_SEL_G 1
-# define EVERGREEN_GRPH_RED_SEL_B 2
-# define EVERGREEN_GRPH_RED_SEL_A 3
-#define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
-# define EVERGREEN_GRPH_GREEN_SEL_G 0
-# define EVERGREEN_GRPH_GREEN_SEL_B 1
-# define EVERGREEN_GRPH_GREEN_SEL_A 2
-# define EVERGREEN_GRPH_GREEN_SEL_R 3
-#define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
-# define EVERGREEN_GRPH_BLUE_SEL_B 0
-# define EVERGREEN_GRPH_BLUE_SEL_A 1
-# define EVERGREEN_GRPH_BLUE_SEL_R 2
-# define EVERGREEN_GRPH_BLUE_SEL_G 3
-#define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
-# define EVERGREEN_GRPH_ALPHA_SEL_A 0
-# define EVERGREEN_GRPH_ALPHA_SEL_R 1
-# define EVERGREEN_GRPH_ALPHA_SEL_G 2
-# define EVERGREEN_GRPH_ALPHA_SEL_B 3
-
-#define EVERGREEN_D3VGA_CONTROL 0xf8
-#define EVERGREEN_D4VGA_CONTROL 0xf9
-#define EVERGREEN_D5VGA_CONTROL 0xfa
-#define EVERGREEN_D6VGA_CONTROL 0xfb
-
-#define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
-
-#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x1a02
-#define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8)
-
-#define EVERGREEN_GRPH_PITCH 0x1a06
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x1a09
-#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x1a0a
-#define EVERGREEN_GRPH_X_START 0x1a0b
-#define EVERGREEN_GRPH_Y_START 0x1a0c
-#define EVERGREEN_GRPH_X_END 0x1a0d
-#define EVERGREEN_GRPH_Y_END 0x1a0e
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_FLIP_CONTROL 0x1a12
-#define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
-
-#define EVERGREEN_VIEWPORT_START 0x1b5c
-#define EVERGREEN_VIEWPORT_SIZE 0x1b5d
-#define EVERGREEN_DESKTOP_HEIGHT 0x1ac1
-
-/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
-#define EVERGREEN_CUR_CONTROL 0x1a66
-# define EVERGREEN_CURSOR_EN (1 << 0)
-# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
-# define EVERGREEN_CURSOR_MONO 0
-# define EVERGREEN_CURSOR_24_1 1
-# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
-# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
-# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
-# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
-# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
-# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
-# define EVERGREEN_CURSOR_URGENT_1_8 1
-# define EVERGREEN_CURSOR_URGENT_1_4 2
-# define EVERGREEN_CURSOR_URGENT_3_8 3
-# define EVERGREEN_CURSOR_URGENT_1_2 4
-#define EVERGREEN_CUR_SURFACE_ADDRESS 0x1a67
-# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
-#define EVERGREEN_CUR_SIZE 0x1a68
-#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x1a69
-#define EVERGREEN_CUR_POSITION 0x1a6a
-#define EVERGREEN_CUR_HOT_SPOT 0x1a6b
-#define EVERGREEN_CUR_COLOR1 0x1a6c
-#define EVERGREEN_CUR_COLOR2 0x1a6d
-#define EVERGREEN_CUR_UPDATE 0x1a6e
-# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
-# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
-# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
-# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
-
-
-#define NI_INPUT_CSC_CONTROL 0x1a35
-# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0)
-# define NI_INPUT_CSC_BYPASS 0
-# define NI_INPUT_CSC_PROG_COEFF 1
-# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2
-# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4)
-
-#define NI_OUTPUT_CSC_CONTROL 0x1a3c
-# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0)
-# define NI_OUTPUT_CSC_BYPASS 0
-# define NI_OUTPUT_CSC_TV_RGB 1
-# define NI_OUTPUT_CSC_YCBCR_601 2
-# define NI_OUTPUT_CSC_YCBCR_709 3
-# define NI_OUTPUT_CSC_PROG_COEFF 4
-# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5
-# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4)
-
-#define NI_DEGAMMA_CONTROL 0x1a58
-# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0)
-# define NI_DEGAMMA_BYPASS 0
-# define NI_DEGAMMA_SRGB_24 1
-# define NI_DEGAMMA_XVYCC_222 2
-# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4)
-# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
-# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12)
-
-#define NI_GAMUT_REMAP_CONTROL 0x1a59
-# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0)
-# define NI_GAMUT_REMAP_BYPASS 0
-# define NI_GAMUT_REMAP_PROG_COEFF 1
-# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2
-# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3
-# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4)
-
-#define NI_REGAMMA_CONTROL 0x1aa0
-# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0)
-# define NI_REGAMMA_BYPASS 0
-# define NI_REGAMMA_SRGB_24 1
-# define NI_REGAMMA_XVYCC_222 2
-# define NI_REGAMMA_PROG_A 3
-# define NI_REGAMMA_PROG_B 4
-# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
-
-
-#define NI_PRESCALE_GRPH_CONTROL 0x1a2d
-# define NI_GRPH_PRESCALE_BYPASS (1 << 4)
-
-#define NI_PRESCALE_OVL_CONTROL 0x1a31
-# define NI_OVL_PRESCALE_BYPASS (1 << 4)
-
-#define NI_INPUT_GAMMA_CONTROL 0x1a10
-# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0)
-# define NI_INPUT_GAMMA_USE_LUT 0
-# define NI_INPUT_GAMMA_BYPASS 1
-# define NI_INPUT_GAMMA_SRGB_24 2
-# define NI_INPUT_GAMMA_XVYCC_222 3
-# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4)
-
-#define BLACKOUT_MODE_MASK 0x00000007
-#define VGA_RENDER_CONTROL 0xC0
-#define R_000300_VGA_RENDER_CONTROL 0xC0
-#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
-#define EVERGREEN_CRTC_STATUS 0x1BA3
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
-/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
-#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
-#define EVERGREEN_CRTC_CONTROL 0x1b9c
-# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
-# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
-#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
-# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
-# define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
-#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
-#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
-#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-
-#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10
-#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
-#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80
-#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
-#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x400
-#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
-#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x2000
-#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xd
-#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10000
-#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
-#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80000
-#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x13
-
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID_MASK 0x1e000000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID__SHIFT 0x19
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS_MASK 0xff
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS__SHIFT 0x0
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID_MASK 0xff000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID__SHIFT 0xc
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW_MASK 0x1000000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW__SHIFT 0x18
-
-#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE_MASK 0x7
-#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE__SHIFT 0x0
-
-#define mmBIF_FB_EN__xxFB_READ_EN_MASK 0x1
-#define mmBIF_FB_EN__xxFB_READ_EN__SHIFT 0x0
-#define mmBIF_FB_EN__xxFB_WRITE_EN_MASK 0x2
-#define mmBIF_FB_EN__xxFB_WRITE_EN__SHIFT 0x1
-
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC_MASK 0x20000
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC__SHIFT 0x11
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC_MASK 0x800
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC__SHIFT 0xb
+#define GRPH_FORMAT_ARGB8888 0
+#define GRPH_FORMAT_ARGB2101010 1
+#define GRPH_FORMAT_32BPP_DIG 2
+#define GRPH_FORMAT_8B_ARGB2101010 3
+#define GRPH_FORMAT_BGRA1010102 4
+#define GRPH_FORMAT_8B_BGRA1010102 5
+#define GRPH_FORMAT_RGB111110 6
+#define GRPH_FORMAT_BGR101111 7
+
+#define GRPH_ENDIAN_NONE 0
+#define GRPH_ENDIAN_8IN16 1
+#define GRPH_ENDIAN_8IN32 2
+#define GRPH_ENDIAN_8IN64 3
+#define GRPH_RED_SEL_R 0
+#define GRPH_RED_SEL_G 1
+#define GRPH_RED_SEL_B 2
+#define GRPH_RED_SEL_A 3
+
+#define GRPH_GREEN_SEL_G 0
+#define GRPH_GREEN_SEL_B 1
+#define GRPH_GREEN_SEL_A 2
+#define GRPH_GREEN_SEL_R 3
+
+#define GRPH_BLUE_SEL_B 0
+#define GRPH_BLUE_SEL_A 1
+#define GRPH_BLUE_SEL_R 2
+#define GRPH_BLUE_SEL_G 3
+
+#define GRPH_ALPHA_SEL_A 0
+#define GRPH_ALPHA_SEL_R 1
+#define GRPH_ALPHA_SEL_G 2
+#define GRPH_ALPHA_SEL_B 3
+
+/* CUR_CONTROL */
+ #define CURSOR_MONO 0
+ #define CURSOR_24_1 1
+ #define CURSOR_24_8_PRE_MULT 2
+ #define CURSOR_24_8_UNPRE_MULT 3
+ #define CURSOR_URGENT_ALWAYS 0
+ #define CURSOR_URGENT_1_8 1
+ #define CURSOR_URGENT_1_4 2
+ #define CURSOR_URGENT_3_8 3
+ #define CURSOR_URGENT_1_2 4
+
+/* INPUT_CSC_CONTROL */
+# define INPUT_CSC_BYPASS 0
+# define INPUT_CSC_PROG_COEFF 1
+# define INPUT_CSC_PROG_SHARED_MATRIXA 2
+
+/* OUTPUT_CSC_CONTROL */
+# define OUTPUT_CSC_BYPASS 0
+# define OUTPUT_CSC_TV_RGB 1
+# define OUTPUT_CSC_YCBCR_601 2
+# define OUTPUT_CSC_YCBCR_709 3
+# define OUTPUT_CSC_PROG_COEFF 4
+# define OUTPUT_CSC_PROG_SHARED_MATRIXB 5
+
+/* DEGAMMA_CONTROL */
+# define DEGAMMA_BYPASS 0
+# define DEGAMMA_SRGB_24 1
+# define DEGAMMA_XVYCC_222 2
+
+/* GAMUT_REMAP_CONTROL */
+# define GAMUT_REMAP_BYPASS 0
+# define GAMUT_REMAP_PROG_COEFF 1
+# define GAMUT_REMAP_PROG_SHARED_MATRIXA 2
+# define GAMUT_REMAP_PROG_SHARED_MATRIXB 3
+
+/* REGAMMA_CONTROL */
+# define REGAMMA_BYPASS 0
+# define REGAMMA_SRGB_24 1
+# define REGAMMA_XVYCC_222 2
+# define REGAMMA_PROG_A 3
+# define REGAMMA_PROG_B 4
+
+
+/* INPUT_GAMMA_CONTROL */
+# define INPUT_GAMMA_USE_LUT 0
+# define INPUT_GAMMA_BYPASS 1
+# define INPUT_GAMMA_SRGB_24 2
+# define INPUT_GAMMA_XVYCC_222 3
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -2113,20 +785,9 @@
#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
-#define CONFIG_CNTL 0x1509
-#define CC_DRM_ID_STRAPS 0X1559
#define AMDGPU_PCIE_INDEX 0xc
#define AMDGPU_PCIE_DATA 0xd
-#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411
-#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412
-#define DMA_MODE 0x342f
-#define DMA_RB_RPTR_ADDR_HI 0x3407
-#define DMA_RB_RPTR_ADDR_LO 0x3408
-#define DMA_BUSY_MASK 0x20
-#define DMA1_BUSY_MASK 0X40
-#define SDMA_MAX_INSTANCE 2
-
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
#define PCIE_PORT_INDEX 0xe
@@ -2136,8 +797,6 @@
#define EVERGREEN_PIF_PHY1_INDEX 0x10
#define EVERGREEN_PIF_PHY1_DATA 0x14
-#define MC_VM_FB_OFFSET 0x81a
-
/* Discrete VCE clocks */
#define CG_VCEPLL_FUNC_CNTL 0xc0030600
#define VCEPLL_RESET_MASK 0x00000001
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 659eab9b90be..c457be3a3c56 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -584,6 +584,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
* Enable triggering of GPU reset only if specified
* by module parameter.
*/
+ if (adev->pcie_reset_ctx.in_link_reset)
+ return AMD_RESET_METHOD_LINK;
if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
return AMD_RESET_METHOD_MODE2;
else if (!(adev->flags & AMD_IS_APU))
@@ -640,6 +642,9 @@ asic_reset:
case AMD_RESET_METHOD_MODE2:
dev_info(adev->dev, "MODE2 reset\n");
return amdgpu_dpm_mode2_reset(adev);
+ case AMD_RESET_METHOD_LINK:
+ dev_info(adev->dev, "Link reset\n");
+ return amdgpu_device_link_reset(adev);
default:
dev_info(adev->dev, "MODE1 reset\n");
return amdgpu_device_mode1_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index a5000c171c02..cf93fa477674 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -552,6 +552,11 @@
# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
+#define PACKET3_RUN_CLEANER_SHADER_9_0 0xD7
+/* 1. header
+ * 2. RESERVED [31:0]
+ */
+
#define PACKET3_RUN_CLEANER_SHADER 0xD2
/* 1. header
* 2. RESERVED [31:0]
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 0e404c074975..e590cbdd8de9 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -174,19 +174,76 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
umc_v12_0_reset_error_count(adev);
}
+static void umc_v12_0_get_retire_flip_bits(struct amdgpu_device *adev)
+{
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
+ uint32_t vram_type = adev->gmc.vram_type;
+ struct amdgpu_umc_flip_bits *flip_bits = &(adev->umc.flip_bits);
+
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+
+ /* default setting */
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT;
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT;
+ flip_bits->flip_row_bit = 13;
+ flip_bits->bit_num = 4;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT;
+
+ if (nps == AMDGPU_NPS2_PARTITION_MODE) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT;
+ } else if (nps == AMDGPU_NPS4_PARTITION_MODE) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT;
+ }
+
+ switch (vram_type) {
+ case AMDGPU_VRAM_TYPE_HBM:
+ /* other nps modes are taken as nps1 */
+ if (nps == AMDGPU_NPS2_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ else if (nps == AMDGPU_NPS4_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+
+ break;
+ case AMDGPU_VRAM_TYPE_HBM3E:
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ flip_bits->flip_row_bit = 12;
+
+ if (nps == AMDGPU_NPS2_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+ else if (nps == AMDGPU_NPS4_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT;
+
+ break;
+ default:
+ dev_warn(adev->dev,
+ "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n");
+ break;
+ }
+
+ adev->umc.retire_unit = 0x1 << flip_bits->bit_num;
+}
+
static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
struct ta_ras_query_address_input *addr_in,
struct ta_ras_query_address_output *addr_out,
bool dump_addr)
{
- uint32_t col, col_lower, row, row_lower, bank;
+ uint32_t col, col_lower, row, row_lower, row_high, bank;
uint32_t channel_index = 0, umc_inst = 0;
- uint32_t i, loop_bits[UMC_V12_0_RETIRE_LOOP_BITS];
+ uint32_t i, bit_num, retire_unit, *flip_bits;
uint64_t soc_pa, column, err_addr;
struct ta_ras_query_address_output addr_out_tmp;
struct ta_ras_query_address_output *paddr_out;
- enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
if (!addr_out)
@@ -211,46 +268,46 @@ static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
umc_inst = addr_in->ma.umc_inst;
}
- loop_bits[0] = UMC_V12_0_PA_C2_BIT;
- loop_bits[1] = UMC_V12_0_PA_C3_BIT;
- loop_bits[2] = UMC_V12_0_PA_C4_BIT;
- loop_bits[3] = UMC_V12_0_PA_R13_BIT;
-
- if (adev->gmc.gmc_funcs->query_mem_partition_mode)
- nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
-
- /* other nps modes are taken as nps1 */
- if (nps == AMDGPU_NPS4_PARTITION_MODE) {
- loop_bits[0] = UMC_V12_0_PA_CH4_BIT;
- loop_bits[1] = UMC_V12_0_PA_CH5_BIT;
- loop_bits[2] = UMC_V12_0_PA_B0_BIT;
- loop_bits[3] = UMC_V12_0_PA_R11_BIT;
- }
+ flip_bits = adev->umc.flip_bits.flip_bits_in_pa;
+ bit_num = adev->umc.flip_bits.bit_num;
+ retire_unit = adev->umc.retire_unit;
soc_pa = paddr_out->pa.pa;
channel_index = paddr_out->pa.channel_idx;
/* clear loop bits in soc physical address */
- for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
- soc_pa &= ~BIT_ULL(loop_bits[i]);
+ for (i = 0; i < bit_num; i++)
+ soc_pa &= ~BIT_ULL(flip_bits[i]);
paddr_out->pa.pa = soc_pa;
/* get column bit 0 and 1 in mca address */
col_lower = (err_addr >> 1) & 0x3ULL;
- /* MA_R13_BIT will be handled later */
+ /* extra row bit will be handled later */
row_lower = (err_addr >> UMC_V12_0_MA_R0_BIT) & 0x1fffULL;
+ row_lower &= ~BIT_ULL(adev->umc.flip_bits.flip_row_bit);
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 5, 0)) {
+ row_high = (soc_pa >> adev->umc.flip_bits.r13_in_pa) & 0x3ULL;
+ /* it's 2.25GB in each channel, from MCA address to PA
+ * [R14 R13] is converted if the two bits value are 0x3,
+ * get them from PA instead of MCA address.
+ */
+ row_lower |= (row_high << 13);
+ }
if (!err_data && !dump_addr)
goto out;
/* loop for all possibilities of retired bits */
- for (column = 0; column < UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; column++) {
+ for (column = 0; column < retire_unit; column++) {
soc_pa = paddr_out->pa.pa;
- for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
- soc_pa |= (((column >> i) & 0x1ULL) << loop_bits[i]);
+ for (i = 0; i < bit_num; i++)
+ soc_pa |= (((column >> i) & 0x1ULL) << flip_bits[i]);
col = ((column & 0x7) << 2) | col_lower;
- /* add row bit 13 */
- row = ((column >> 3) << 13) | row_lower;
+ /* handle extra row bit */
+ if (bit_num == RETIRE_FLIP_BITS_NUM)
+ row = ((column >> 3) << adev->umc.flip_bits.flip_row_bit) |
+ row_lower;
if (dump_addr)
dev_info(adev->dev,
@@ -428,8 +485,12 @@ static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank
bank->regs[ACA_REG_IDX_ADDR]);
ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
- count = ext_error_code == 0 ?
- ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
+ if (umc_v12_0_is_deferred_error(adev, status))
+ count = ext_error_code == 0 ?
+ adev->umc.err_addr_cnt / adev->umc.retire_unit : 1ULL;
+ else
+ count = ext_error_code == 0 ?
+ ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
return aca_error_cache_log_bank_error(handle, &info, err_type, count);
}
@@ -469,8 +530,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
uint64_t err_addr, pa_addr = 0;
struct ras_ecc_err *ecc_err;
struct ta_ras_query_address_output addr_out;
- enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
- uint32_t shift_bit = UMC_V12_0_PA_C4_BIT;
+ uint32_t shift_bit = adev->umc.flip_bits.flip_bits_in_pa[2];
int count, ret, i;
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
@@ -515,11 +575,6 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
ecc_err->pa_pfn = pa_addr >> AMDGPU_GPU_PAGE_SHIFT;
ecc_err->channel_idx = addr_out.pa.channel_idx;
- if (adev->gmc.gmc_funcs->query_mem_partition_mode)
- nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
- if (nps == AMDGPU_NPS4_PARTITION_MODE)
- shift_bit = UMC_V12_0_PA_B0_BIT;
-
/* If converted pa_pfn is 0, use pa C4 pfn. */
if (!ecc_err->pa_pfn)
ecc_err->pa_pfn = BIT_ULL(shift_bit) >> AMDGPU_GPU_PAGE_SHIFT;
@@ -665,5 +720,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
.update_ecc_status = umc_v12_0_update_ecc_status,
.convert_ras_err_addr = umc_v12_0_convert_error_address,
.get_die_id_from_pa = umc_v12_0_get_die_id,
+ .get_retire_flip_bits = umc_v12_0_get_retire_flip_bits,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index 9298018d938f..63b7e7254526 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -55,8 +55,6 @@
#define UMC_V12_0_NA_MAP_PA_NUM 8
/* R13 bit shift should be considered, double the number */
#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
-/* C2, C3, C4, R13, four bits in MCA address are looped in retirement */
-#define UMC_V12_0_RETIRE_LOOP_BITS 4
/* column bits in SOC physical address */
#define UMC_V12_0_PA_C2_BIT 15
@@ -64,13 +62,16 @@
#define UMC_V12_0_PA_C4_BIT 21
/* row bits in SOC physical address */
#define UMC_V12_0_PA_R0_BIT 22
+#define UMC_V12_0_PA_R10_BIT 32
#define UMC_V12_0_PA_R11_BIT 33
+#define UMC_V12_0_PA_R12_BIT 34
#define UMC_V12_0_PA_R13_BIT 35
/* channel bit in SOC physical address */
#define UMC_V12_0_PA_CH4_BIT 12
#define UMC_V12_0_PA_CH5_BIT 13
/* bank bit in SOC physical address */
#define UMC_V12_0_PA_B0_BIT 19
+#define UMC_V12_0_PA_B1_BIT 20
/* row bits in MCA address */
#define UMC_V12_0_MA_R0_BIT 10
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 8e7a36f26e9c..b8d835c9e17e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index d716510b8dd6..3eec1b8feaee 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 22ae1939476f..0b19f0ab4480 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -40,6 +40,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index c6f6392c1c20..8fff470bce87 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_HARVEST_MMSCH 0
@@ -238,9 +239,9 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
@@ -614,7 +615,8 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
@@ -1945,6 +1947,20 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
return 0;
}
+static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ return -EOPNOTSUPP;
+
+ vcn_v4_0_stop(vinst);
+ vcn_v4_0_start(vinst);
+
+ return amdgpu_ring_test_helper(ring);
+}
+
static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1974,6 +1990,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 3e176b4b7c69..712e1fba33ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -45,6 +45,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
@@ -287,6 +288,31 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int vcn_v4_0_3_hw_init_inst(struct amdgpu_vcn_inst *vinst)
+{
+ int vcn_inst;
+ struct amdgpu_device *adev = vinst->adev;
+ struct amdgpu_ring *ring;
+ int inst_idx = vinst->inst;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+ if (ring->use_doorbell) {
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst,
+ adev->vcn.inst[inst_idx].aid_id);
+
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+ }
+
+ return 0;
+}
+
/**
* vcn_v4_0_3_hw_init - start and test VCN block
*
@@ -298,7 +324,8 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r, vcn_inst;
+ struct amdgpu_vcn_inst *vinst;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
r = vcn_v4_0_3_start_sriov(adev);
@@ -321,28 +348,9 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
struct amdgpu_vcn4_fw_shared *fw_shared;
- vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell) {
- adev->nbio.funcs->vcn_doorbell_range(
- adev, ring->use_doorbell,
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 9 * vcn_inst,
- adev->vcn.inst[i].aid_id);
-
- WREG32_SOC15(
- VCN, GET_INST(VCN, ring->me),
- regVCN_RB1_DB_CTRL,
- ring->doorbell_index
- << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- /* Read DB_CTRL to flush the write DB_CTRL command. */
- RREG32_SOC15(
- VCN, GET_INST(VCN, ring->me),
- regVCN_RB1_DB_CTRL);
- }
+ vinst = &adev->vcn.inst[i];
+ vcn_v4_0_3_hw_init_inst(vinst);
/* Re-init fw_shared when RAS fatal error occurred */
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
@@ -1563,6 +1571,37 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ int r = 0;
+ int vcn_inst;
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ if (amdgpu_sriov_vf(ring->adev))
+ return -EOPNOTSUPP;
+
+ if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ return -EOPNOTSUPP;
+
+ vcn_inst = GET_INST(VCN, ring->me);
+ r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
+
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN reset fail : %d\n", r);
+ return r;
+ }
+
+ /* This flag is not set for VF, assumed to be disabled always */
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
+ vcn_v4_0_3_hw_init_inst(vinst);
+ vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[ring->me].indirect_sram);
+ r = amdgpu_ring_test_helper(ring);
+
+ return r;
+}
+
static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1591,6 +1630,7 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_3_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index ba603b2246e2..a09f9a2dd471 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
+#define VCN1_AON_SOC_ADDRESS_3_0 (0x48000 + 0x38000)
#define VCN_HARVEST_MMSCH 0
@@ -207,6 +208,10 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+ fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
+ fw_shared->drm_key_wa.method =
+ AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
+
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
@@ -214,6 +219,13 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
}
+ adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -1022,6 +1034,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
+ /* Keeping one read-back to ensure all register writes are done, otherwise
+ * it may introduce race conditions */
+ RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL);
+
return 0;
}
@@ -1204,6 +1220,10 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ /* Keeping one read-back to ensure all register writes are done, otherwise
+ * it may introduce race conditions */
+ RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+
return 0;
}
@@ -1435,6 +1455,20 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ return -EOPNOTSUPP;
+
+ vcn_v4_0_5_stop(vinst);
+ vcn_v4_0_5_start(vinst);
+
+ return amdgpu_ring_test_helper(ring);
+}
+
static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1462,6 +1496,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_5_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index d99d05f42f1d..27dcc6f37a73 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -196,9 +196,9 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
vcn_v5_0_0_alloc_ip_dump(adev);
@@ -533,7 +533,8 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
/* VCN global tiling registers */
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
return;
}
@@ -1171,6 +1172,20 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ return -EOPNOTSUPP;
+
+ vcn_v5_0_0_stop(vinst);
+ vcn_v5_0_0_start(vinst);
+
+ return amdgpu_ring_test_helper(ring);
+}
+
static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1198,6 +1213,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v5_0_0_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index 581d8629b9d9..8e843011703c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -30,6 +30,7 @@
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
#include "vcn_v4_0_3.h"
+#include "mmsch_v5_0.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
@@ -39,6 +40,7 @@
#include <drm/drm_drv.h>
+static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
@@ -126,7 +128,14 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst;
+ if (!amdgpu_sriov_vf(adev))
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst;
+ else
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 32 * vcn_inst;
ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
@@ -143,6 +152,12 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+ }
+
vcn_v5_0_0_alloc_ip_dump(adev);
return amdgpu_vcn_sysfs_reset_mask_init(adev);
@@ -172,6 +187,9 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
drm_dev_exit(idx);
}
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_free_mm_table(adev);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i);
if (r)
@@ -204,24 +222,38 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, r, vcn_inst;
- if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
- adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
- ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell)
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 9 * vcn_inst),
- adev->vcn.inst[i].aid_id);
-
- /* Re-init fw_shared, if required */
- vcn_v5_0_1_fw_shared_init(adev, i);
-
- r = amdgpu_ring_test_helper(ring);
+ if (amdgpu_sriov_vf(adev)) {
+ r = vcn_v5_0_1_start_sriov(adev);
if (r)
return r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v5_0_1_unified_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
+ } else {
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ vcn_inst = GET_INST(VCN, i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst),
+ adev->vcn.inst[i].aid_id);
+
+ /* Re-init fw_shared, if required */
+ vcn_v5_0_1_fw_shared_init(adev, i);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
}
return 0;
@@ -503,6 +535,52 @@ static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
}
/**
+ * vcn_v5_0_1_pause_dpg_mode - VCN pause with dpg mode
+ *
+ * @vinst: VCN instance
+ * @new_state: pause state
+ *
+ * Pause dpg mode for VCN block
+ */
+static int vcn_v5_0_1_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
+{
+ struct amdgpu_device *adev = vinst->adev;
+ uint32_t reg_data = 0;
+ int vcn_inst;
+
+ vcn_inst = GET_INST(VCN, vinst->inst);
+
+ /* pause/unpause if state is changed */
+ if (vinst->pause_state.fw_based != new_state->fw_based) {
+ DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n",
+ vinst->pause_state.fw_based, new_state->fw_based,
+ new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE");
+ reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+ /* pause DPG */
+ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
+
+ /* wait for ACK */
+ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+ } else {
+ /* unpause DPG, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
+ }
+ vinst->pause_state.fw_based = new_state->fw_based;
+ }
+
+ return 0;
+}
+
+
+/**
* vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode
*
* @vinst: VCN instance
@@ -518,6 +596,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
volatile struct amdgpu_vcn5_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
int vcn_inst;
uint32_t tmp;
@@ -582,6 +661,9 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
if (indirect)
amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ /* Pause dpg */
+ vcn_v5_0_1_pause_dpg_mode(vinst, &state);
+
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
@@ -613,6 +695,195 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
return 0;
}
+static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev)
+{
+ int i, vcn_inst;
+ struct amdgpu_ring *ring_enc;
+ uint64_t cache_addr;
+ uint64_t rb_enc_addr;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t offset, cache_size;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw;
+ uint32_t init_status;
+ uint32_t enabled_vcn;
+
+ struct mmsch_v5_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v5_0_cmd_direct_read_modify_write
+ direct_rd_mod_wt = { {0} };
+ struct mmsch_v5_0_cmd_end end = { {0} };
+ struct mmsch_v5_0_init_header header;
+
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ vcn_inst = GET_INST(VCN, i);
+
+ vcn_v5_0_1_fw_shared_init(adev, vcn_inst);
+
+ memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+
+ table_size = 0;
+
+ MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
+ ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
+
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
+
+ offset = 0;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET0), 0);
+ } else {
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = cache_size;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE0),
+ cache_size);
+
+ cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET1), 0);
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE);
+
+ cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE;
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr));
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET2), 0);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE);
+
+ fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr;
+ rb_setup = &fw_shared->rb_setup;
+
+ ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0];
+ ring_enc->wptr = 0;
+ rb_enc_addr = ring_enc->gpu_addr;
+
+ rb_setup->is_rb_enabled_flags |= RB_ENABLED;
+ rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
+ rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
+ rb_setup->rb_size = ring_enc->ring_size / 4;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+ MMSCH_V5_0_INSERT_END();
+
+ header.vcn0.init_status = 0;
+ header.vcn0.table_offset = header.total_size;
+ header.vcn0.table_size = table_size;
+ header.total_size += table_size;
+
+ /* Send init table to mmsch */
+ size = sizeof(struct mmsch_v5_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp);
+
+ size = header.total_size;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size);
+
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ param = 0x00000001;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP);
+ if (resp != 0)
+ break;
+
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+
+ enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
+ init_status = ((struct mmsch_v5_0_init_header *)(table_loc))->vcn0.init_status;
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
+ && init_status != MMSCH_VF_ENGINE_STATUS__PASS) {
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
+ "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
+ }
+ }
+
+ return 0;
+}
+
/**
* vcn_v5_0_1_start - VCN start
*
@@ -775,9 +1046,13 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
int inst_idx = vinst->inst;
uint32_t tmp;
int vcn_inst;
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
vcn_inst = GET_INST(VCN, inst_idx);
+ /* Unpause dpg */
+ vcn_v5_0_1_pause_dpg_mode(vinst, &state);
+
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -1049,8 +1324,18 @@ static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state)
{
+ struct amdgpu_device *adev = vinst->adev;
int ret = 0;
+ /* for SRIOV, guest should not control VCN Power-gating
+ * MMSCH FW should control Power-gating and clock-gating
+ * guest should avoid touching CGC and PG
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == vinst->cur_state)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index faa0dd75dd6d..85846fd08ce4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -350,6 +350,7 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
if (ret)
return ret;
}
+ ih[i]->overflow = false;
}
if (!amdgpu_sriov_vf(adev))
@@ -437,7 +438,10 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ if (!amdgpu_sriov_vf(adev))
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ else
+ ih->overflow = true;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 32). Hopefully
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 86d8bc10d90a..9b3510e53112 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -239,6 +239,13 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
.max_pixels_per_frame = 4096 * 4096,
.max_level = 186,
},
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
};
static const struct amdgpu_video_codecs cz_video_codecs_decode =
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 4a5a0a4e00f2..9bde2c64540f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -27,6 +27,16 @@
#include "kfd_priv.h"
static struct dentry *debugfs_root;
+static struct dentry *debugfs_proc;
+static struct list_head procs;
+
+struct debugfs_proc_entry {
+ struct list_head list;
+ struct dentry *proc_dentry;
+ pid_t pid;
+};
+
+#define MAX_DEBUGFS_FILENAME_LEN 32
static int kfd_debugfs_open(struct inode *inode, struct file *file)
{
@@ -92,6 +102,8 @@ static const struct file_operations kfd_debugfs_hang_hws_fops = {
void kfd_debugfs_init(void)
{
debugfs_root = debugfs_create_dir("kfd", NULL);
+ debugfs_proc = debugfs_create_dir("proc", debugfs_root);
+ INIT_LIST_HEAD(&procs);
debugfs_create_file("mqds", S_IFREG | 0444, debugfs_root,
kfd_debugfs_mqds_by_process, &kfd_debugfs_fops);
@@ -107,5 +119,69 @@ void kfd_debugfs_init(void)
void kfd_debugfs_fini(void)
{
+ debugfs_remove_recursive(debugfs_proc);
debugfs_remove_recursive(debugfs_root);
}
+
+static ssize_t kfd_debugfs_pasid_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kfd_process_device *pdd = file_inode(file)->i_private;
+ char tmp[32];
+ int len;
+
+ len = snprintf(tmp, sizeof(tmp), "%u\n", pdd->pasid);
+
+ return simple_read_from_buffer(buf, count, ppos, tmp, len);
+}
+
+static const struct file_operations kfd_debugfs_pasid_fops = {
+ .owner = THIS_MODULE,
+ .read = kfd_debugfs_pasid_read,
+};
+
+void kfd_debugfs_add_process(struct kfd_process *p)
+{
+ int i;
+ char name[MAX_DEBUGFS_FILENAME_LEN];
+ struct debugfs_proc_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ list_add(&entry->list, &procs);
+ entry->pid = p->lead_thread->pid;
+ snprintf(name, MAX_DEBUGFS_FILENAME_LEN, "%d",
+ (int)entry->pid);
+ entry->proc_dentry = debugfs_create_dir(name, debugfs_proc);
+
+ /* Create debugfs files for each GPU:
+ * - proc/<pid>/pasid_<gpuid>
+ */
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ snprintf(name, MAX_DEBUGFS_FILENAME_LEN, "pasid_%u",
+ pdd->dev->id);
+ debugfs_create_file((const char *)name, S_IFREG | 0444,
+ entry->proc_dentry, pdd,
+ &kfd_debugfs_pasid_fops);
+ }
+}
+
+void kfd_debugfs_remove_process(struct kfd_process *p)
+{
+ struct debugfs_proc_entry *entry, *next;
+
+ mutex_lock(&kfd_processes_mutex);
+ list_for_each_entry_safe(entry, next, &procs, list) {
+ if (entry->pid != p->lead_thread->pid)
+ continue;
+
+ debugfs_remove_recursive(entry->proc_dentry);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ mutex_unlock(&kfd_processes_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index b9c82be6ce13..bf0854bd5555 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -352,11 +352,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &aldebaran_kfd2kgd;
break;
case IP_VERSION(9, 4, 3):
- gfx_target_version = adev->rev_id >= 1 ? 90402
- : adev->flags & AMD_IS_APU ? 90400
- : 90401;
- f2g = &gc_9_4_3_kfd2kgd;
- break;
case IP_VERSION(9, 4, 4):
gfx_target_version = 90402;
f2g = &gc_9_4_3_kfd2kgd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c610e172a2b8..76359c6a3f3a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1576,8 +1576,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
- dev_err(dev, "No more SDMA queue to allocate\n");
+ if (bitmap_empty(dqm->sdma_bitmap, get_num_sdma_queues(dqm))) {
+ dev_warn(dev, "No more SDMA queue to allocate (%d total queues)\n",
+ get_num_sdma_queues(dqm));
return -ENOMEM;
}
@@ -1602,8 +1603,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
q->properties.sdma_queue_id = q->sdma_id /
kfd_get_num_sdma_engines(dqm->dev);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
- dev_err(dev, "No more XGMI SDMA queue to allocate\n");
+ if (bitmap_empty(dqm->xgmi_sdma_bitmap, get_num_xgmi_sdma_queues(dqm))) {
+ dev_warn(dev, "No more XGMI SDMA queue to allocate (%d total queues)\n",
+ get_num_xgmi_sdma_queues(dqm));
return -ENOMEM;
}
if (restore_sdma_id) {
@@ -1662,8 +1664,8 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
}
if (!free_bit_found) {
- dev_err(dev, "No more SDMA queue to allocate for target ID %i\n",
- q->properties.sdma_engine_id);
+ dev_warn(dev, "No more SDMA queue to allocate for target ID %i (%d total queues)\n",
+ q->properties.sdma_engine_id, num_queues);
return -ENOMEM;
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index fecdb6794075..e54e708ed82d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -1177,6 +1177,25 @@ void kfd_signal_hw_exception_event(u32 pasid)
kfd_unref_process(p);
}
+void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_hsa_memory_exception_data exception_data;
+ int i;
+
+ memset(&exception_data, 0, sizeof(exception_data));
+ exception_data.va = gpu_va;
+ exception_data.failure.NotPresent = 1;
+
+ // Send VM seg fault to all kfd process device
+ for (i = 0; i < p->n_pdds; i++) {
+ pdd = p->pdds[i];
+ exception_data.gpu_id = pdd->user_gpu_id;
+ kfd_evict_process_device(pdd);
+ kfd_signal_vm_fault_event(pdd, NULL, &exception_data);
+ }
+}
+
void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
index 37b69fe0ede3..3e1ad8974797 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -168,14 +168,14 @@ static bool event_interrupt_isr_v10(struct kfd_node *dev,
client_id != SOC15_IH_CLIENTID_SE3SH)
return false;
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
- /* If there is no valid PASID, it's likely a bug */
- if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ if (pasid == 0)
return 0;
/* Interrupt types we care about: various signals and faults.
@@ -217,37 +217,66 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
switch (encoding) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_BUF0_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_BUF1_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_AUTO_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ WLT),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF0_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF1_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_UTC_ERROR));
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SA_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- WGP_ID));
+ dev_dbg_ratelimited(
+ dev->adev->dev,
+ "sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID));
if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK) {
if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
@@ -259,21 +288,37 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
ERR_TYPE);
- pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SA_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- WGP_ID),
+ dev_warn_ratelimited(
+ dev->adev->dev,
+ "sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID),
sq_intr_err_type);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index c5f97e6e36ff..2788a52714d1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -148,44 +148,69 @@ enum SQ_INTERRUPT_ERROR_TYPE {
#define KFD_CTXID0_DOORBELL_ID(ctxid0) ((ctxid0) & \
KFD_CTXID0_DOORBELL_ID_MASK)
-static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_auto(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_BUF_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, REG_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, CMD_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_CMD_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_REG_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, IMMED_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ REG_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ CMD_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ HOST_REG_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ IMMED_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_UTC_ERROR));
}
-static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_inst(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SH_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, WAVE_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, WGP_ID));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SIMD_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID));
}
-static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_error(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_warn_ratelimited(
+ dev_warn_ratelimited(
+ dev->adev->dev,
"sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, WGP_ID));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ DETAIL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ TYPE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1,
+ SIMD_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1,
+ WGP_ID));
}
static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
@@ -255,14 +280,14 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev,
(context_id0 & AMDGPU_FENCE_MES_QUEUE_FLAG))
return false;
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
- /* If there is no valid PASID, it's likely a bug */
- if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ if (pasid == 0)
return false;
/* Interrupt types we care about: various signals and faults.
@@ -353,10 +378,10 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
switch (sq_int_enc) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- print_sq_intr_info_auto(context_id0, context_id1);
+ print_sq_intr_info_auto(dev, context_id0, context_id1);
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- print_sq_intr_info_inst(context_id0, context_id1);
+ print_sq_intr_info_inst(dev, context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
@@ -366,7 +391,7 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
return;
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
- print_sq_intr_info_error(context_id0, context_id1);
+ print_sq_intr_info_error(dev, context_id0, context_id1);
sq_int_errtype = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE);
if (sq_int_errtype != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index b8a91bf4ef30..4ceb251312a6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -314,11 +314,12 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev,
& ~pasid_mask) | pasid);
}
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
/* If there is no valid PASID, it's likely a bug */
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
@@ -379,28 +380,82 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
switch (encoding) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_BUF_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, REG_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, CMD_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_CMD_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_REG_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, IMMED_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ WLT),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ REG_TIMESTAMP),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ CMD_TIMESTAMP),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ HOST_REG_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ IMMED_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE_UTC_ERROR));
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ dev_dbg_ratelimited(
+ dev->adev->dev,
+ "sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SH_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ CU_ID),
sq_int_data);
if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) {
if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
@@ -412,14 +467,37 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
- pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ dev_warn_ratelimited(
+ dev->adev->dev,
+ "sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SH_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ CU_ID),
sq_intr_err);
if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 80320a6c8854..97933d2a3803 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -495,6 +495,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
+ /* Allow context switch so we don't cross-process starve with a massive
+ * command buffer of long-running SDMA commands
+ */
+ m->sdmax_rlcx_ib_cntl |= SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK;
q->is_active = QUEUE_IS_ACTIVE(*q);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 271c567242ab..b1a6eb349bb3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -31,6 +31,7 @@
#define OVER_SUBSCRIPTION_PROCESS_COUNT (1 << 0)
#define OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT (1 << 1)
#define OVER_SUBSCRIPTION_GWS_QUEUE_COUNT (1 << 2)
+#define OVER_SUBSCRIPTION_XNACK_CONFLICT (1 << 3)
static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
unsigned int buffer_size_bytes)
@@ -44,7 +45,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
- int *over_subscription)
+ int *over_subscription,
+ int xnack_conflict)
{
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
@@ -73,6 +75,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
*over_subscription |= OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT;
if (gws_queue_count > 1)
*over_subscription |= OVER_SUBSCRIPTION_GWS_QUEUE_COUNT;
+ if (xnack_conflict && (node->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
+ *over_subscription |= OVER_SUBSCRIPTION_XNACK_CONFLICT;
if (*over_subscription)
dev_dbg(dev, "Over subscribed runlist\n");
@@ -96,7 +100,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
unsigned int **rl_buffer,
uint64_t *rl_gpu_buffer,
unsigned int *rl_buffer_size,
- int *is_over_subscription)
+ int *is_over_subscription,
+ int xnack_conflict)
{
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
@@ -105,7 +110,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
if (WARN_ON(pm->allocated))
return -EINVAL;
- pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
+ pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription,
+ xnack_conflict);
mutex_lock(&pm->lock);
@@ -142,11 +148,27 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
struct queue *q;
struct kernel_queue *kq;
int is_over_subscription;
+ int xnack_enabled = -1;
+ bool xnack_conflict = 0;
rl_wptr = retval = processes_mapped = 0;
+ /* Check if processes set different xnack modes */
+ list_for_each_entry(cur, queues, list) {
+ qpd = cur->qpd;
+ if (xnack_enabled < 0)
+ /* First process */
+ xnack_enabled = qpd->pqm->process->xnack_enabled;
+ else if (qpd->pqm->process->xnack_enabled != xnack_enabled) {
+ /* Found a process with a different xnack mode */
+ xnack_conflict = 1;
+ break;
+ }
+ }
+
retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
- &alloc_size_bytes, &is_over_subscription);
+ &alloc_size_bytes, &is_over_subscription,
+ xnack_conflict);
if (retval)
return retval;
@@ -156,9 +178,13 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->active_queue_count);
+build_runlist_ib:
/* build the run list ib packet */
list_for_each_entry(cur, queues, list) {
qpd = cur->qpd;
+ /* group processes with the same xnack mode together */
+ if (qpd->pqm->process->xnack_enabled != xnack_enabled)
+ continue;
/* build map process packet */
if (processes_mapped >= pm->dqm->processes_count) {
dev_dbg(dev, "Not enough space left in runlist IB\n");
@@ -215,18 +241,26 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
alloc_size_bytes);
}
}
+ if (xnack_conflict) {
+ /* pick up processes with the other xnack mode */
+ xnack_enabled = !xnack_enabled;
+ xnack_conflict = 0;
+ goto build_runlist_ib;
+ }
dev_dbg(dev, "Finished map process and queues to runlist\n");
if (is_over_subscription) {
if (!pm->is_over_subscription)
- dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s. Expect reduced ROCm performance.\n",
- is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
- " too many processes." : "",
- is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
- " too many queues." : "",
- is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
- " multiple processes using cooperative launch." : "");
+ dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s%s. Expect reduced ROCm performance.\n",
+ is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
+ " too many processes" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
+ " too many queues" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
+ " multiple processes using cooperative launch" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_XNACK_CONFLICT ?
+ " xnack on/off processes mixed on gfx9" : "");
retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
*rl_gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 2893fd5e5d00..8fa6489b6f5d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -43,7 +43,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process));
- if (adev->enforce_isolation[kfd->node_id])
+ if (adev->enforce_isolation[kfd->node_id] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
@@ -102,7 +102,8 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process_aldebaran));
- if (adev->enforce_isolation[knode->node_id])
+ if (adev->enforce_isolation[knode->node_id] ==
+ AMDGPU_ENFORCE_ISOLATION_ENABLE)
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
@@ -165,9 +166,9 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
* hws_max_conc_proc has been done in
* kgd2kfd_device_init().
*/
- concurrent_proc_cnt = adev->enforce_isolation[kfd->node_id] ?
- 1 : min(pm->dqm->processes_count,
- kfd->max_proc_per_quantum);
+ concurrent_proc_cnt = (adev->enforce_isolation[kfd->node_id] ==
+ AMDGPU_ENFORCE_ISOLATION_ENABLE) ?
+ 1 : min(pm->dqm->processes_count, kfd->max_proc_per_quantum);
packet = (struct pm4_mes_runlist *)buffer;
@@ -202,6 +203,8 @@ static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
queue_type__mes_set_resources__hsa_interface_queue_hiq;
packet->bitfields2.vmid_mask = res->vmid_mask;
packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
+ if (pm->dqm->dev->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN)
+ packet->bitfields2.enb_xnack_retry_disable_check = 1;
packet->bitfields7.oac_mask = res->oac_mask;
packet->bitfields8.gds_heap_base = res->gds_heap_base;
packet->bitfields8.gds_heap_size = res->gds_heap_size;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
index cd8611401a66..e356a207d03c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
@@ -63,7 +63,8 @@ struct pm4_mes_set_resources {
struct {
uint32_t vmid_mask:16;
uint32_t unmap_latency:8;
- uint32_t reserved1:5;
+ uint32_t reserved1:4;
+ uint32_t enb_xnack_retry_disable_check:1;
enum mes_set_resources_queue_type_enum queue_type:3;
} bitfields2;
uint32_t ordinal2;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f6aedf69c644..d221c58dccc3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1507,6 +1507,8 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
int kfd_get_num_events(struct kfd_process *p);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va);
+
void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data);
@@ -1581,10 +1583,15 @@ int kfd_debugfs_hang_hws(struct kfd_node *dev);
int pm_debugfs_hang_hws(struct packet_manager *pm);
int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
+void kfd_debugfs_add_process(struct kfd_process *p);
+void kfd_debugfs_remove_process(struct kfd_process *p);
+
#else
static inline void kfd_debugfs_init(void) {}
static inline void kfd_debugfs_fini(void) {}
+static inline void kfd_debugfs_add_process(struct kfd_process *p) {}
+static inline void kfd_debugfs_remove_process(struct kfd_process *p) {}
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 7c0c24732481..722ac1662bdc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -900,6 +900,8 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
kfd_procfs_add_sysfs_files(process);
kfd_procfs_add_sysfs_counters(process);
+ kfd_debugfs_add_process(process);
+
init_waitqueue_head(&process->wait_irq_drain);
}
out:
@@ -1054,6 +1056,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
+ kfd_smi_event_process(pdd, false);
+
pr_debug("Releasing pdd (topology id %d, for pid %d)\n",
pdd->dev->id, p->lead_thread->pid);
kfd_process_device_destroy_cwsr_dgpu(pdd);
@@ -1174,6 +1178,7 @@ static void kfd_process_wq_release(struct work_struct *work)
dma_fence_signal(ef);
kfd_process_remove_sysfs(p);
+ kfd_debugfs_remove_process(p);
kfd_process_kunmap_signal_bo(p);
kfd_process_free_outstanding_kfd_bos(p);
@@ -1715,6 +1720,8 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
pdd->pasid = avm->pasid;
pdd->drm_file = drm_file;
+ kfd_smi_event_process(pdd, true);
+
return 0;
err_get_pasid:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7eb370b68159..6d5fa57d4a23 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -451,8 +451,15 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("process pid %d DQM create queue type %d failed. ret %d\n",
- pqm->process->lead_thread->pid, type, retval);
+ if ((type == KFD_QUEUE_TYPE_SDMA ||
+ type == KFD_QUEUE_TYPE_SDMA_XGMI ||
+ type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) &&
+ retval == -ENOMEM)
+ pr_warn("process pid %d DQM create queue type %d failed. ret %d\n",
+ pqm->process->lead_thread->pid, type, retval);
+ else
+ pr_err("process pid %d DQM create queue type %d failed. ret %d\n",
+ pqm->process->lead_thread->pid, type, retval);
goto err_create_queue;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 4afff7094caf..a65c67cf56ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -402,7 +402,7 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
{
u32 vgpr_size = 0x40000;
- if ((gfxv / 100 * 100) == 90400 || /* GFX_VERSION_AQUA_VANJARAM */
+ if (gfxv == 90402 || /* GFX_VERSION_AQUA_VANJARAM */
gfxv == 90010 || /* GFX_VERSION_ALDEBARAN */
gfxv == 90008 || /* GFX_VERSION_ARCTURUS */
gfxv == 90500)
@@ -462,7 +462,7 @@ void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev)
if (gfxv == 80002) /* GFX_VERSION_TONGA */
props->eop_buffer_size = 0x8000;
- else if ((gfxv / 100 * 100) == 90400) /* GFX_VERSION_AQUA_VANJARAM */
+ else if (gfxv == 90402) /* GFX_VERSION_AQUA_VANJARAM */
props->eop_buffer_size = 4096;
else if (gfxv >= 80000)
props->eop_buffer_size = 4096;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 9b8169761ec5..83d9384ac815 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -163,10 +163,9 @@ static int kfd_smi_ev_release(struct inode *inode, struct file *filep)
static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client,
unsigned int event)
{
- uint64_t all = KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS);
uint64_t events = READ_ONCE(client->events);
- if (pid && client->pid != pid && !(client->suser && (events & all)))
+ if (pid && client->pid != pid && !client->suser)
return false;
return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event);
@@ -345,6 +344,27 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
pid, address, last - address + 1, node->id, trigger));
}
+void kfd_smi_event_process(struct kfd_process_device *pdd, bool start)
+{
+ struct amdgpu_task_info *task_info;
+ struct amdgpu_vm *avm;
+
+ if (!pdd->drm_priv)
+ return;
+
+ avm = drm_priv_to_vm(pdd->drm_priv);
+ task_info = amdgpu_vm_get_task_info_vm(avm);
+
+ if (task_info) {
+ kfd_smi_event_add(0, pdd->dev,
+ start ? KFD_SMI_EVENT_PROCESS_START :
+ KFD_SMI_EVENT_PROCESS_END,
+ KFD_EVENT_FMT_PROCESS(task_info->pid,
+ task_info->task_name));
+ amdgpu_vm_put_task_info(task_info);
+ }
+}
+
int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
{
struct kfd_smi_client *client;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
index 503bff13d815..bb4d72b57387 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
@@ -53,4 +53,5 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm);
void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
unsigned long address, unsigned long last,
uint32_t trigger);
+void kfd_smi_event_process(struct kfd_process_device *pdd, bool start);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 100717a98ec1..72be6e152e88 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1245,8 +1245,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
case IP_VERSION(9, 4, 4):
case IP_VERSION(9, 5, 0):
if (ext_coherent)
- mtype_local = (gc_ip_version < IP_VERSION(9, 5, 0) && !node->adev->rev_id) ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_CC;
+ mtype_local = AMDGPU_VM_MTYPE_CC;
else
mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e477d7509646..baa2374acdeb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1267,34 +1267,41 @@ static void kfd_set_recommended_sdma_engines(struct kfd_topology_device *to_dev,
{
struct kfd_node *gpu = outbound_link->gpu;
struct amdgpu_device *adev = gpu->adev;
- int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes;
+ unsigned int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes;
+ unsigned int num_xgmi_sdma_engines = kfd_get_num_xgmi_sdma_engines(gpu);
+ unsigned int num_sdma_engines = kfd_get_num_sdma_engines(gpu);
+ uint32_t sdma_eng_id_mask = (1 << num_sdma_engines) - 1;
+ uint32_t xgmi_sdma_eng_id_mask =
+ ((1 << num_xgmi_sdma_engines) - 1) << num_sdma_engines;
+
bool support_rec_eng = !amdgpu_sriov_vf(adev) && to_dev->gpu &&
adev->aid_mask && num_xgmi_nodes && gpu->kfd->num_nodes == 1 &&
- kfd_get_num_xgmi_sdma_engines(gpu) >= 14 &&
- (!(adev->flags & AMD_IS_APU) && num_xgmi_nodes == 8);
+ num_xgmi_sdma_engines >= 6 && (!(adev->flags & AMD_IS_APU) &&
+ num_xgmi_nodes == 8);
if (support_rec_eng) {
int src_socket_id = adev->gmc.xgmi.physical_node_id;
int dst_socket_id = to_dev->gpu->adev->gmc.xgmi.physical_node_id;
+ unsigned int reshift = num_xgmi_sdma_engines == 6 ? 1 : 0;
outbound_link->rec_sdma_eng_id_mask =
- 1 << rec_sdma_eng_map[src_socket_id][dst_socket_id];
+ 1 << (rec_sdma_eng_map[src_socket_id][dst_socket_id] >> reshift);
inbound_link->rec_sdma_eng_id_mask =
- 1 << rec_sdma_eng_map[dst_socket_id][src_socket_id];
- } else {
- int num_sdma_eng = kfd_get_num_sdma_engines(gpu);
- int i, eng_offset = 0;
+ 1 << (rec_sdma_eng_map[dst_socket_id][src_socket_id] >> reshift);
- if (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
- kfd_get_num_xgmi_sdma_engines(gpu) && to_dev->gpu) {
- eng_offset = num_sdma_eng;
- num_sdma_eng = kfd_get_num_xgmi_sdma_engines(gpu);
- }
+ /* If recommended engine is out of range, need to reset the mask */
+ if (outbound_link->rec_sdma_eng_id_mask & sdma_eng_id_mask)
+ outbound_link->rec_sdma_eng_id_mask = xgmi_sdma_eng_id_mask;
+ if (inbound_link->rec_sdma_eng_id_mask & sdma_eng_id_mask)
+ inbound_link->rec_sdma_eng_id_mask = xgmi_sdma_eng_id_mask;
- for (i = 0; i < num_sdma_eng; i++) {
- outbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
- inbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
- }
+ } else {
+ uint32_t engine_mask = (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
+ num_xgmi_sdma_engines && to_dev->gpu) ? xgmi_sdma_eng_id_mask :
+ sdma_eng_id_mask;
+
+ outbound_link->rec_sdma_eng_id_mask = engine_mask;
+ inbound_link->rec_sdma_eng_id_mask = engine_mask;
}
}
@@ -1983,9 +1990,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
if (kfd_dbg_has_ttmps_always_setup(dev->gpu))
dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
- if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
- dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
-
if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) {
if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3) ||
KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 4))
@@ -2001,7 +2005,11 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
dev->node_props.capability |=
HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
- dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
+ if (!amdgpu_sriov_vf(dev->gpu->adev))
+ dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
+
+ if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
+ dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
} else {
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index ab2a97e354da..7329b8cc2576 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -38,6 +38,7 @@ AMDGPUDM = \
amdgpu_dm_pp_smu.o \
amdgpu_dm_psr.o \
amdgpu_dm_replay.o \
+ amdgpu_dm_quirks.o \
amdgpu_dm_wb.o
ifdef CONFIG_DRM_AMD_DC_FP
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d0d8ad5368c3..742b10881112 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -80,7 +80,6 @@
#include <linux/power_supply.h>
#include <linux/firmware.h>
#include <linux/component.h>
-#include <linux/dmi.h>
#include <linux/sort.h>
#include <drm/display/drm_dp_mst_helper.h>
@@ -115,6 +114,8 @@
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
+static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch");
+
#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
@@ -280,7 +281,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
acrtc = adev->mode_info.crtcs[crtc];
if (!acrtc->dm_irq_params.stream) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
@@ -301,7 +302,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
acrtc = adev->mode_info.crtcs[crtc];
if (!acrtc->dm_irq_params.stream) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
@@ -372,6 +373,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
{
+ if (new_state->stream->adjust.timing_adjust_pending)
+ return true;
if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
return true;
else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
@@ -673,15 +676,21 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
- acrtc->dm_irq_params.vrr_params.supported &&
- acrtc->dm_irq_params.freesync_config.state ==
- VRR_STATE_ACTIVE_VARIABLE) {
+ acrtc->dm_irq_params.vrr_params.supported) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
+
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ dc_stream_adjust_vmin_vmax(adev->dm.dc,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
}
/*
@@ -749,6 +758,29 @@ static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
complete(&adev->dm.dmub_aux_transfer_done);
}
+static void dmub_aux_fused_io_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ if (!adev || !notify) {
+ ASSERT(false);
+ return;
+ }
+
+ const struct dmub_cmd_fused_request *req = &notify->fused_request;
+ const uint8_t ddc_line = req->u.aux.ddc_line;
+
+ if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) {
+ ASSERT(false);
+ return;
+ }
+
+ struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line];
+
+ static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch");
+ memcpy(sync->reply_data, req, sizeof(*req));
+ complete(&sync->replied);
+}
+
/**
* dmub_hpd_callback - DMUB HPD interrupt processing callback.
* @adev: amdgpu_device pointer
@@ -772,18 +804,18 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
return;
if (notify == NULL) {
- DRM_ERROR("DMUB HPD callback notification was NULL");
+ drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL");
return;
}
if (notify->link_index > adev->dm.dc->link_count) {
- DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
+ drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index);
return;
}
/* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
- DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n");
+ drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n");
return;
}
@@ -800,11 +832,11 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
if (notify->type == DMUB_NOTIFICATION_HPD)
- DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ drm_info(adev_to_drm(adev), "DMUB HPD IRQ callback: link_index=%u\n", link_index);
else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
- DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
+ drm_info(adev_to_drm(adev), "DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
else
- DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+ drm_warn(adev_to_drm(adev), "DMUB Unknown HPD callback type %d, link_index=%u\n",
notify->type, link_index);
hpd_aconnector = aconnector;
@@ -816,7 +848,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
if (hpd_aconnector) {
if (notify->type == DMUB_NOTIFICATION_HPD) {
if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG))
- DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index);
+ drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index);
handle_hpd_irq_helper(hpd_aconnector);
} else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) {
handle_hpd_rx_irq(hpd_aconnector);
@@ -835,7 +867,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
static void dmub_hpd_sense_callback(struct amdgpu_device *adev,
struct dmub_notification *notify)
{
- DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n");
+ drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n");
}
/**
@@ -871,7 +903,7 @@ static void dm_handle_hpd_work(struct work_struct *work)
dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
if (!dmub_hpd_wrk->dmub_notify) {
- DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
+ drm_err(adev_to_drm(dmub_hpd_wrk->adev), "dmub_hpd_wrk dmub_notify is NULL");
return;
}
@@ -885,6 +917,30 @@ static void dm_handle_hpd_work(struct work_struct *work)
}
+static const char *dmub_notification_type_str(enum dmub_notification_type e)
+{
+ switch (e) {
+ case DMUB_NOTIFICATION_NO_DATA:
+ return "NO_DATA";
+ case DMUB_NOTIFICATION_AUX_REPLY:
+ return "AUX_REPLY";
+ case DMUB_NOTIFICATION_HPD:
+ return "HPD";
+ case DMUB_NOTIFICATION_HPD_IRQ:
+ return "HPD_IRQ";
+ case DMUB_NOTIFICATION_SET_CONFIG_REPLY:
+ return "SET_CONFIG_REPLY";
+ case DMUB_NOTIFICATION_DPIA_NOTIFICATION:
+ return "DPIA_NOTIFICATION";
+ case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY:
+ return "HPD_SENSE_NOTIFY";
+ case DMUB_NOTIFICATION_FUSED_IO:
+ return "FUSED_IO";
+ default:
+ return "<unknown>";
+ }
+}
+
#define DMUB_TRACE_MAX_READ 64
/**
* dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
@@ -902,22 +958,13 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct dmcub_trace_buf_entry entry = { 0 };
u32 count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
- static const char *const event_type[] = {
- "NO_DATA",
- "AUX_REPLY",
- "HPD",
- "HPD_IRQ",
- "SET_CONFIGC_REPLY",
- "DPIA_NOTIFICATION",
- "HPD_SENSE_NOTIFY",
- };
do {
if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
entry.param0, entry.param1);
- DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
entry.trace_code, entry.tick_count, entry.param0, entry.param1);
} else
break;
@@ -927,7 +974,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
} while (count <= DMUB_TRACE_MAX_READ);
if (count > DMUB_TRACE_MAX_READ)
- DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
+ drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ");
if (dc_enable_dmub_notifications(adev->dm.dc) &&
irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
@@ -935,25 +982,25 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
do {
dc_stat_get_dmub_notification(adev->dm.dc, &notify);
if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
- DRM_ERROR("DM: notify type %d invalid!", notify.type);
+ drm_err(adev_to_drm(adev), "DM: notify type %d invalid!", notify.type);
continue;
}
if (!dm->dmub_callback[notify.type]) {
- DRM_WARN("DMUB notification skipped due to no handler: type=%s\n",
- event_type[notify.type]);
+ drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n",
+ dmub_notification_type_str(notify.type));
continue;
}
if (dm->dmub_thread_offload[notify.type] == true) {
dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
if (!dmub_hpd_wrk) {
- DRM_ERROR("Failed to allocate dmub_hpd_wrk");
+ drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk");
return;
}
dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
GFP_ATOMIC);
if (!dmub_hpd_wrk->dmub_notify) {
kfree(dmub_hpd_wrk);
- DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
+ drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk->dmub_notify");
return;
}
INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
@@ -1011,10 +1058,10 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
&compressor->gpu_addr, &compressor->cpu_addr);
if (r)
- DRM_ERROR("DM: Failed to initialize FBC\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n");
else {
adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
- DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
+ drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4);
}
}
@@ -1179,13 +1226,13 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
if (!fb_info) {
- DRM_ERROR("No framebuffer info for DMUB service.\n");
+ drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n");
return -EINVAL;
}
if (!dmub_fw) {
/* Firmware required for DMUB support. */
- DRM_ERROR("No firmware provided for DMUB.\n");
+ drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n");
return -EINVAL;
}
@@ -1195,19 +1242,19 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status);
return -EINVAL;
}
if (!has_hw_support) {
- DRM_INFO("DMUB unsupported on ASIC\n");
+ drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n");
return 0;
}
/* Reset DMCUB if it was previously running - before we overwrite its memory. */
status = dmub_srv_hw_reset(dmub_srv);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Error resetting DMUB HW: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status);
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
@@ -1290,6 +1337,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 1):
case IP_VERSION(3, 6, 0):
hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
+ hw_params.lower_hbr3_phy_ssc = true;
break;
default:
break;
@@ -1297,14 +1345,14 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
status = dmub_srv_hw_init(dmub_srv, &hw_params);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status);
return -EINVAL;
}
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
/* Init DMCU and ABM if available. */
if (dmcu && abm) {
@@ -1315,11 +1363,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
if (!adev->dm.dc->ctx->dmub_srv)
adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
if (!adev->dm.dc->ctx->dmub_srv) {
- DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+ drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n");
return -ENOMEM;
}
- DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+ drm_info(adev_to_drm(adev), "DMUB hardware initialized: version=0x%08X\n",
adev->dm.dmcub_fw_version);
/* Keeping sanity checks off if
@@ -1362,18 +1410,18 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
status = dmub_srv_is_hw_init(dmub_srv, &init);
if (status != DMUB_STATUS_OK)
- DRM_WARN("DMUB hardware init check failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "DMUB hardware init check failed: %d\n", status);
if (status == DMUB_STATUS_OK && init) {
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
} else {
/* Perform the full hardware initialization. */
r = dm_dmub_hw_init(adev);
if (r)
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
}
}
@@ -1483,18 +1531,18 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
aconnector = offload_work->offload_wq->aconnector;
+ adev = offload_work->adev;
if (!aconnector) {
- DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+ drm_err(adev_to_drm(adev), "Can't retrieve aconnector in hpd_rx_irq_offload_work");
goto skip;
}
- adev = drm_to_adev(aconnector->base.dev);
dc_link = aconnector->dc_link;
mutex_lock(&aconnector->hpd_lock);
if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
mutex_unlock(&aconnector->hpd_lock);
if (new_connection_type == dc_connection_none)
@@ -1563,8 +1611,9 @@ skip:
}
-static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct amdgpu_device *adev)
{
+ struct dc *dc = adev->dm.dc;
int max_caps = dc->caps.max_links;
int i = 0;
struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
@@ -1580,7 +1629,7 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct
create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
if (hpd_rx_offload_wq[i].wq == NULL) {
- DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+ drm_err(adev_to_drm(adev), "create amdgpu_dm_hpd_rx_offload_wq fail!");
goto out_err;
}
@@ -1629,132 +1678,6 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
return false;
}
-struct amdgpu_dm_quirks {
- bool aux_hpd_discon;
- bool support_edp0_on_dp1;
-};
-
-static struct amdgpu_dm_quirks quirk_entries = {
- .aux_hpd_discon = false,
- .support_edp0_on_dp1 = false
-};
-
-static int edp0_on_dp1_callback(const struct dmi_system_id *id)
-{
- quirk_entries.support_edp0_on_dp1 = true;
- return 0;
-}
-
-static int aux_hpd_discon_callback(const struct dmi_system_id *id)
-{
- quirk_entries.aux_hpd_discon = true;
- return 0;
-}
-
-static const struct dmi_system_id dmi_quirk_table[] = {
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
- },
- },
- {}
- /* TODO: refactor this from a fixed table to a dynamic option */
-};
-
-static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
-{
- int dmi_id;
- struct drm_device *dev = dm->ddev;
-
- dm->aux_hpd_discon_quirk = false;
- init_data->flags.support_edp0_on_dp1 = false;
-
- dmi_id = dmi_check_system(dmi_quirk_table);
-
- if (!dmi_id)
- return;
-
- if (quirk_entries.aux_hpd_discon) {
- dm->aux_hpd_discon_quirk = true;
- drm_info(dev, "aux_hpd_discon_quirk attached\n");
- }
- if (quirk_entries.support_edp0_on_dp1) {
- init_data->flags.support_edp0_on_dp1 = true;
- drm_info(dev, "support_edp0_on_dp1 attached\n");
- }
-}
void*
dm_allocate_gpu_mem(
@@ -1899,26 +1822,6 @@ static enum dmub_ips_disable_type dm_get_default_ips_mode(
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 6, 0):
- /*
- * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
- * cause a hard hang. A fix exists for newer PMFW.
- *
- * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
- * IPS state in all cases, except for s0ix and all displays off (DPMS),
- * where IPS2 is allowed.
- *
- * When checking pmfw version, use the major and minor only.
- */
- if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
- ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
- else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0))
- /*
- * Other ASICs with DCN35 that have residency issues with
- * IPS2 in idle.
- * We want them to use IPS2 only in display off cases.
- */
- ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
- break;
case IP_VERSION(3, 5, 1):
ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
break;
@@ -1950,7 +1853,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.audio_lock);
if (amdgpu_dm_irq_init(adev)) {
- DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to initialize DM IRQ support.\n");
goto error;
}
@@ -2061,7 +1964,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
- retrieve_dmi_info(&adev->dm, &init_data);
+ retrieve_dmi_info(&adev->dm);
+ if (adev->dm.edp0_on_dp1_quirk)
+ init_data.flags.support_edp0_on_dp1 = true;
if (adev->dm.bb_from_dmub)
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
@@ -2072,10 +1977,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.dc = dc_create(&init_data);
if (adev->dm.dc) {
- DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ drm_info(adev_to_drm(adev), "Display Core v%s initialized on %s\n", DC_VER,
dce_version_to_string(adev->dm.dc->ctx->dce_version));
} else {
- DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ drm_info(adev_to_drm(adev), "Display Core failed to initialize with v%s!\n", DC_VER);
goto error;
}
@@ -2109,25 +2014,31 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.dc->debug.using_dml21 = true;
}
+ if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE)
+ adev->dm.dc->debug.hdcp_lc_force_fw_enable = true;
+
+ if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK)
+ adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true;
+
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
adev->dm.dc->debug.ignore_cable_id = true;
if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
- DRM_INFO("DP-HDMI FRL PCON supported\n");
+ drm_info(adev_to_drm(adev), "DP-HDMI FRL PCON supported\n");
r = dm_dmub_hw_init(adev);
if (r) {
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
goto error;
}
dc_hardware_init(adev->dm.dc);
- adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
if (!adev->dm.hpd_rx_offload_wq) {
- DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd rx offload workqueue.\n");
goto error;
}
@@ -2142,10 +2053,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"amdgpu: failed to initialize freesync_module.\n");
} else
- DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n",
adev->dm.freesync_module);
amdgpu_dm_init_color_mod();
@@ -2154,7 +2065,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.vblank_control_workqueue =
create_singlethread_workqueue("dm_vblank_control_workqueue");
if (!adev->dm.vblank_control_workqueue)
- DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to initialize vblank_workqueue.\n");
}
if (adev->dm.dc->caps.ips_support &&
@@ -2165,9 +2076,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
- DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to initialize hdcp_workqueue.\n");
else
- DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+ drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
dc_init_callbacks(adev->dm.dc, &init_params);
}
@@ -2175,20 +2086,29 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
if (!adev->dm.dmub_notify) {
- DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
+ drm_info(adev_to_drm(adev), "amdgpu: fail to allocate adev->dm.dmub_notify");
goto error;
}
adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
if (!adev->dm.delayed_hpd_wq) {
- DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd offload workqueue.\n");
goto error;
}
amdgpu_dm_outbox_init(adev);
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) {
- DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub aux callback");
+ goto error;
+ }
+
+ for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++)
+ init_completion(&adev->dm.fused_io[i].replied);
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO,
+ dmub_aux_fused_io_callback, false)) {
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub fused io callback");
goto error;
}
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
@@ -2205,7 +2125,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
if (amdgpu_dm_initialize_drm_device(adev)) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"amdgpu: failed to initialize sw for display support.\n");
goto error;
}
@@ -2220,7 +2140,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"amdgpu: failed to initialize sw for display support.\n");
goto error;
}
@@ -2228,14 +2148,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_dm_crtc_secure_display_create_contexts(adev);
if (!adev->dm.secure_display_ctx.crtc_ctx)
- DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to initialize secure display contexts.\n");
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1))
adev->dm.secure_display_ctx.support_mul_roi = true;
#endif
- DRM_DEBUG_DRIVER("KMS initialized.\n");
+ drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n");
return 0;
error:
@@ -2408,7 +2328,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
default:
break;
}
- DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
+ drm_err(adev_to_drm(adev), "Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
}
@@ -2426,7 +2346,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
if (r) {
- dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
+ drm_err(adev_to_drm(adev), "amdgpu_dm: Can't validate firmware \"%s\"\n",
fw_name_dmcu);
amdgpu_ucode_release(&adev->dm.fw_dmcu);
return r;
@@ -2551,7 +2471,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ drm_info(adev_to_drm(adev), "Loading DMUB firmware via PSP: version=0x%08X\n",
adev->dm.dmcub_fw_version);
}
@@ -2560,7 +2480,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
dmub_srv = adev->dm.dmub_srv;
if (!dmub_srv) {
- DRM_ERROR("Failed to allocate DMUB service!\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate DMUB service!\n");
return -ENOMEM;
}
@@ -2573,7 +2493,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
/* Create the DMUB service. */
status = dmub_srv_create(dmub_srv, &create_params);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error creating DMUB service: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error creating DMUB service: %d\n", status);
return -EINVAL;
}
@@ -2598,7 +2518,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
&region_info);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error calculating DMUB region info: %d\n", status);
return -EINVAL;
}
@@ -2627,14 +2547,14 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
fb_info = adev->dm.dmub_fb_info;
if (!fb_info) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"Failed to allocate framebuffer info for DMUB service!\n");
return -ENOMEM;
}
status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error calculating DMUB FB info: %d\n", status);
return -EINVAL;
}
@@ -2651,7 +2571,7 @@ static int dm_sw_init(struct amdgpu_ip_block *ip_block)
adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
if (!adev->dm.cgs_device) {
- DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to create cgs device.\n");
return -EINVAL;
}
@@ -2957,7 +2877,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
ret = amdgpu_dpm_write_watermarks_table(adev);
if (ret) {
- DRM_ERROR("Failed to update WMTABLE!\n");
+ drm_err(adev_to_drm(adev), "Failed to update WMTABLE!\n");
return ret;
}
@@ -2975,13 +2895,13 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
if (oem_ddc_service) {
oem_i2c = create_i2c(oem_ddc_service, true);
if (!oem_i2c) {
- dev_info(adev->dev, "Failed to create oem i2c adapter data\n");
+ drm_info(adev_to_drm(adev), "Failed to create oem i2c adapter data\n");
return -ENOMEM;
}
r = i2c_add_adapter(&oem_i2c->base);
if (r) {
- dev_info(adev->dev, "Failed to register oem i2c\n");
+ drm_info(adev_to_drm(adev), "Failed to register oem i2c\n");
kfree(oem_i2c);
return r;
}
@@ -3024,7 +2944,7 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block)
r = dm_oem_i2c_hw_init(adev);
if (r)
- dev_info(adev->dev, "Failed to add OEM i2c bus\n");
+ drm_info(adev_to_drm(adev), "Failed to add OEM i2c bus\n");
return 0;
}
@@ -3067,7 +2987,7 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
if (rc)
- DRM_WARN("Failed to %s pflip interrupts\n",
+ drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
if (enable) {
@@ -3077,14 +2997,14 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
if (rc)
- DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+ drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
/* During gpu-reset we disable and then enable vblank irq, so
* don't use amdgpu_irq_get/put() to avoid refcount change.
*/
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
- DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
+ drm_warn(adev_to_drm(adev), "Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
}
}
@@ -3334,16 +3254,16 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
for (k = 0; k < dc_state->stream_count; k++) {
bundle->stream_update.stream = dc_state->streams[k];
- for (m = 0; m < dc_state->stream_status->plane_count; m++) {
+ for (m = 0; m < dc_state->stream_status[k].plane_count; m++) {
bundle->surface_updates[m].surface =
- dc_state->stream_status->plane_states[m];
+ dc_state->stream_status[k].plane_states[m];
bundle->surface_updates[m].surface->force_full_update =
true;
}
update_planes_and_stream_adapter(dm->dc,
UPDATE_TYPE_FULL,
- dc_state->stream_status->plane_count,
+ dc_state->stream_status[k].plane_count,
dc_state->streams[k],
&bundle->stream_update,
bundle->surface_updates);
@@ -3460,11 +3380,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-
- /* leave display off for S4 sequence */
- if (adev->in_s4)
- return 0;
-
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_state_release(dm_state->context);
dm_state->context = dc_state_create(dm->dc, NULL);
@@ -3919,20 +3834,21 @@ static void handle_hpd_irq(void *param)
}
-static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
struct hpd_rx_irq_offload_work *offload_work =
kzalloc(sizeof(*offload_work), GFP_KERNEL);
if (!offload_work) {
- DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n");
return;
}
INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
offload_work->data = hpd_irq_data;
offload_work->offload_wq = offload_wq;
+ offload_work->adev = adev;
queue_work(offload_wq->wq, &offload_work->work);
DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
@@ -3974,7 +3890,7 @@ static void handle_hpd_rx_irq(void *param)
goto out;
if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -3996,7 +3912,7 @@ static void handle_hpd_rx_irq(void *param)
spin_unlock(&offload_wq->offload_lock);
if (!skip)
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -4013,7 +3929,7 @@ static void handle_hpd_rx_irq(void *param)
spin_unlock(&offload_wq->offload_lock);
if (!skip)
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -4023,7 +3939,7 @@ out:
if (result && !is_mst_root_connector) {
/* Downstream Port status changed. */
if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(dc_link);
@@ -4086,19 +4002,19 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD,
dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ,
dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
dmub_hpd_sense_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd sense callback");
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd sense callback");
return -EINVAL;
}
}
@@ -4119,7 +4035,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_HPD1 ||
int_params.irq_source > DC_IRQ_SOURCE_HPD6) {
- DRM_ERROR("Failed to register hpd irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register hpd irq!\n");
return -EINVAL;
}
@@ -4137,7 +4053,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_HPD1RX ||
int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) {
- DRM_ERROR("Failed to register hpd rx irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register hpd rx irq!\n");
return -EINVAL;
}
@@ -4179,7 +4095,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4190,7 +4106,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4209,7 +4125,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4220,7 +4136,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4238,7 +4154,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, client_id,
VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4280,7 +4196,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4291,7 +4207,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4309,7 +4225,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
return r;
}
@@ -4320,7 +4236,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
- DRM_ERROR("Failed to register vupdate irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
return -EINVAL;
}
@@ -4339,7 +4255,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4350,7 +4266,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4368,7 +4284,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, client_id,
VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4418,7 +4334,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4429,7 +4345,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4450,7 +4366,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
vrtl_int_srcid[i], &adev->vline0_irq);
if (r) {
- DRM_ERROR("Failed to add vline0 irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vline0 irq id!\n");
return r;
}
@@ -4461,7 +4377,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 ||
int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) {
- DRM_ERROR("Failed to register vline0 irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vline0 irq!\n");
return -EINVAL;
}
@@ -4489,7 +4405,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
return r;
}
@@ -4500,7 +4416,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
- DRM_ERROR("Failed to register vupdate irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
return -EINVAL;
}
@@ -4520,7 +4436,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4531,7 +4447,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4549,7 +4465,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
&adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4571,7 +4487,7 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
&adev->dmub_outbox_irq);
if (r) {
- DRM_ERROR("Failed to add outbox irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add outbox irq id!\n");
return r;
}
@@ -4803,41 +4719,54 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
return 1;
}
-static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
- uint32_t brightness)
+static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t *brightness)
{
- unsigned int min, max;
u8 prev_signal = 0, prev_lum = 0;
+ int i = 0;
- if (!get_brightness_range(caps, &min, &max))
- return brightness;
-
- for (int i = 0; i < caps->data_points; i++) {
- u8 signal, lum;
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
+ return;
- if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
- break;
+ if (!caps->data_points)
+ return;
- signal = caps->luminance_data[i].input_signal;
- lum = caps->luminance_data[i].luminance;
+ /* choose start to run less interpolation steps */
+ if (caps->luminance_data[caps->data_points/2].input_signal > *brightness)
+ i = caps->data_points/2;
+ do {
+ u8 signal = caps->luminance_data[i].input_signal;
+ u8 lum = caps->luminance_data[i].luminance;
/*
* brightness == signal: luminance is percent numerator
* brightness < signal: interpolate between previous and current luminance numerator
* brightness > signal: find next data point
*/
- if (brightness < signal)
- lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
- (brightness - prev_signal),
- signal - prev_signal);
- else if (brightness > signal) {
+ if (*brightness > signal) {
prev_signal = signal;
prev_lum = lum;
+ i++;
continue;
}
- brightness = DIV_ROUND_CLOSEST(lum * brightness, 101);
- break;
- }
+ if (*brightness < signal)
+ lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
+ (*brightness - prev_signal),
+ signal - prev_signal);
+ *brightness = DIV_ROUND_CLOSEST(lum * *brightness, 101);
+ return;
+ } while (i < caps->data_points);
+}
+
+static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned int min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ convert_custom_brightness(caps, &brightness);
// Rescale 0..255 to min..max
return min + DIV_ROUND_CLOSEST((max - min) * brightness,
@@ -5016,10 +4945,10 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
dm->brightness[aconnector->bl_idx] = props.brightness;
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
- DRM_ERROR("DM: Backlight registration failed!\n");
+ drm_err(drm, "DM: Backlight registration failed!\n");
dm->backlight_dev[aconnector->bl_idx] = NULL;
} else
- DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+ drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name);
}
static int initialize_plane(struct amdgpu_display_manager *dm,
@@ -5033,7 +4962,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
if (!plane) {
- DRM_ERROR("KMS: Failed to allocate plane\n");
+ drm_err(adev_to_drm(dm->adev), "KMS: Failed to allocate plane\n");
return -ENOMEM;
}
plane->type = plane_type;
@@ -5051,7 +4980,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
if (ret) {
- DRM_ERROR("KMS: Failed to initialize plane\n");
+ drm_err(adev_to_drm(dm->adev), "KMS: Failed to initialize plane\n");
kfree(plane);
return ret;
}
@@ -5120,14 +5049,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize mode config\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize mode config\n");
return -EINVAL;
}
/* There is one primary plane per CRTC */
primary_planes = dm->dc->caps.max_streams;
if (primary_planes > AMDGPU_MAX_PLANES) {
- DRM_ERROR("DM: Plane nums out of 6 planes\n");
+ drm_err(adev_to_drm(adev), "DM: Plane nums out of 6 planes\n");
return -EINVAL;
}
@@ -5140,7 +5069,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (initialize_plane(dm, mode_info, i,
DRM_PLANE_TYPE_PRIMARY, plane)) {
- DRM_ERROR("KMS: Failed to initialize primary plane\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize primary plane\n");
goto fail;
}
}
@@ -5172,14 +5101,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (initialize_plane(dm, NULL, primary_planes + i,
DRM_PLANE_TYPE_OVERLAY, plane)) {
- DRM_ERROR("KMS: Failed to initialize overlay plane\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize overlay plane\n");
goto fail;
}
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
- DRM_ERROR("KMS: Failed to initialize crtc\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize crtc\n");
goto fail;
}
@@ -5199,7 +5128,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (register_outbox_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5249,7 +5178,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
if (link_cnt > MAX_LINKS) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"KMS: Cannot support more than %d display indexes\n",
MAX_LINKS);
goto fail;
@@ -5265,12 +5194,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
if (!wbcon) {
- DRM_ERROR("KMS: Failed to allocate writeback connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n");
continue;
}
if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
- DRM_ERROR("KMS: Failed to initialize writeback connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize writeback connector\n");
kfree(wbcon);
continue;
}
@@ -5290,12 +5219,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
- DRM_ERROR("KMS: Failed to initialize encoder\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize encoder\n");
goto fail;
}
if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
- DRM_ERROR("KMS: Failed to initialize connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize connector\n");
goto fail;
}
@@ -5304,7 +5233,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
aconnector;
if (!dc_link_detect_connection_type(link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(link);
@@ -5326,8 +5255,15 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (amdgpu_dm_set_replay_caps(link, aconnector))
psr_feature_enabled = false;
- if (psr_feature_enabled)
+ if (psr_feature_enabled) {
amdgpu_dm_set_psr_caps(link);
+ drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
+ link->psr_settings.psr_feature_enabled,
+ link->psr_settings.psr_version,
+ link->dpcd_caps.psr_info.psr_version,
+ link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
+ link->dpcd_caps.psr_info.psr2_su_y_granularity_cap);
+ }
}
}
amdgpu_set_panel_orientation(&aconnector->base);
@@ -5341,7 +5277,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_OLAND:
if (dce60_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5363,7 +5299,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_VEGA12:
case CHIP_VEGA20:
if (dce110_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5391,12 +5327,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (dcn10_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
default:
- DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
+ drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%X\n",
amdgpu_ip_version(adev, DCE_HWIP, 0));
goto fail;
}
@@ -5557,7 +5493,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block)
/* if there is no object header, skip DM */
if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
- dev_info(adev->dev, "No object header, skipping DM\n");
+ drm_info(adev_to_drm(adev), "No object header, skipping DM\n");
return -ENOENT;
}
@@ -5669,7 +5605,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block)
adev->mode_info.num_dig = 4;
break;
default:
- DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
+ drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%x\n",
amdgpu_ip_version(adev, DCE_HWIP, 0));
return -EINVAL;
}
@@ -5818,7 +5754,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
break;
default:
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"Unsupported screen format %p4cc\n",
&fb->format->format);
return -EINVAL;
@@ -6339,6 +6275,7 @@ static void fill_stream_properties_from_drm_display_mode(
struct amdgpu_dm_connector *aconnector = NULL;
struct hdmi_vendor_infoframe hv_frame;
struct hdmi_avi_infoframe avi_frame;
+ ssize_t err;
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
aconnector = to_amdgpu_dm_connector(connector);
@@ -6385,9 +6322,17 @@ static void fill_stream_properties_from_drm_display_mode(
}
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
- drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame,
+ (struct drm_connector *)connector,
+ mode_in);
+ if (err < 0)
+ drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd \n", connector->name, err);
timing_out->vic = avi_frame.video_code;
- drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame,
+ (struct drm_connector *)connector,
+ mode_in);
+ if (err < 0)
+ drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd \n", connector->name, err);
timing_out->hdmi_vic = hv_frame.vic;
}
@@ -6500,19 +6445,19 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
const struct drm_display_mode *native_mode,
bool scale_enabled)
{
- if (scale_enabled) {
- copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
- } else if (native_mode->clock == drm_mode->clock &&
- native_mode->htotal == drm_mode->htotal &&
- native_mode->vtotal == drm_mode->vtotal) {
- copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ if (scale_enabled || (
+ native_mode->clock == drm_mode->clock &&
+ native_mode->htotal == drm_mode->htotal &&
+ native_mode->vtotal == drm_mode->vtotal)) {
+ if (native_mode->crtc_clock)
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
} else {
/* no scaling nor amdgpu inserted, no need to patch */
}
}
static struct dc_sink *
-create_fake_sink(struct dc_link *link)
+create_fake_sink(struct drm_device *dev, struct dc_link *link)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct dc_sink *sink = NULL;
@@ -6522,7 +6467,7 @@ create_fake_sink(struct dc_link *link)
sink = dc_sink_create(&sink_init_data);
if (!sink) {
- DRM_ERROR("Failed to create sink!\n");
+ drm_err(dev, "Failed to create sink!\n");
return NULL;
}
sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
@@ -6655,7 +6600,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
m_pref = list_first_entry_or_null(
&aconnector->base.modes, struct drm_display_mode, head);
if (!m_pref) {
- DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
+ drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n");
return NULL;
}
}
@@ -6830,7 +6775,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n",
+ drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n",
__func__, drm_connector->name);
}
} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
@@ -6850,7 +6795,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
+ drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
__func__, drm_connector->name);
}
}
@@ -6878,6 +6823,7 @@ create_stream_for_sink(struct drm_connector *connector,
const struct dc_stream_state *old_stream,
int requested_bpc)
{
+ struct drm_device *dev = connector->dev;
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_display_mode *preferred_mode = NULL;
const struct drm_connector_state *con_state = &dm_state->base;
@@ -6900,11 +6846,6 @@ create_stream_for_sink(struct drm_connector *connector,
drm_mode_init(&mode, drm_mode);
memset(&saved_mode, 0, sizeof(saved_mode));
- if (connector == NULL) {
- DRM_ERROR("connector is NULL!\n");
- return stream;
- }
-
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
aconnector = NULL;
aconnector = to_amdgpu_dm_connector(connector);
@@ -6919,7 +6860,7 @@ create_stream_for_sink(struct drm_connector *connector,
}
if (!aconnector || !aconnector->dc_sink) {
- sink = create_fake_sink(link);
+ sink = create_fake_sink(dev, link);
if (!sink)
return stream;
@@ -6931,7 +6872,7 @@ create_stream_for_sink(struct drm_connector *connector,
stream = dc_create_stream_for_sink(sink);
if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
+ drm_err(dev, "Failed to create stream for sink!\n");
goto finish;
}
@@ -6963,7 +6904,7 @@ create_stream_for_sink(struct drm_connector *connector,
* case, we call set mode ourselves to restore the previous mode
* and the modelist may not be filled in time.
*/
- DRM_DEBUG_DRIVER("No preferred mode found\n");
+ drm_dbg_driver(dev, "No preferred mode found\n");
} else if (aconnector) {
recalculate_timing = amdgpu_freesync_vid_mode &&
is_freesync_video_mode(&mode, aconnector);
@@ -7413,6 +7354,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
const struct drm_edid *drm_edid;
struct i2c_adapter *ddc;
+ struct drm_device *dev = connector->dev;
if (dc_link && dc_link->aux_mode)
ddc = &aconnector->dm_dp_aux.aux.ddc;
@@ -7422,7 +7364,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
- DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ drm_err(dev, "No EDID found on connector: %s.\n", connector->name);
return;
}
@@ -7481,7 +7423,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
- DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ drm_err(connector->dev, "No EDID found on connector: %s.\n", connector->name);
return;
}
@@ -7615,7 +7557,7 @@ create_validate_stream_for_sink(struct drm_connector *connector,
dm_state, old_stream,
requested_bpc);
if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
+ drm_err(adev_to_drm(adev), "Failed to create stream for sink!\n");
break;
}
@@ -7690,7 +7632,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
aconnector->base.force != DRM_FORCE_ON) {
- DRM_ERROR("dc_sink is NULL!\n");
+ drm_err(connector->dev, "dc_sink is NULL!\n");
goto fail;
}
@@ -8598,7 +8540,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
i2c = create_i2c(link->ddc, false);
if (!i2c) {
- DRM_ERROR("Failed to create i2c adapter data\n");
+ drm_err(adev_to_drm(dm->adev), "Failed to create i2c adapter data\n");
return -ENOMEM;
}
@@ -8606,7 +8548,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
res = i2c_add_adapter(&i2c->base);
if (res) {
- DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index);
goto out_free;
}
@@ -8620,7 +8562,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
&i2c->base);
if (res) {
- DRM_ERROR("connector_init failed\n");
+ drm_err(adev_to_drm(dm->adev), "connector_init failed\n");
aconnector->connector_id = -1;
goto out_free;
}
@@ -9110,7 +9052,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
- DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
+ drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
} else if (old_vrr_active && !new_vrr_active) {
/* Transition VRR active -> inactive:
@@ -9118,7 +9060,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
- DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
+ drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
}
}
@@ -9205,13 +9147,13 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
if (crtc_state->stream) {
if (!dc_stream_set_cursor_attributes(crtc_state->stream,
&attributes))
- DRM_ERROR("DC failed to set cursor attributes\n");
+ drm_err(adev_to_drm(adev), "DC failed to set cursor attributes\n");
update->cursor_attributes = &crtc_state->stream->cursor_attributes;
if (!dc_stream_set_cursor_position(crtc_state->stream,
&position))
- DRM_ERROR("DC failed to set cursor position\n");
+ drm_err(adev_to_drm(adev), "DC failed to set cursor position\n");
update->cursor_position = &crtc_state->stream->cursor_position;
}
@@ -9462,7 +9404,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].surface = dc_plane;
if (!bundle->surface_updates[planes_count].surface) {
- DRM_ERROR("No surface for CRTC: id=%d\n",
+ drm_err(dev, "No surface for CRTC: id=%d\n",
acrtc_attach->crtc_id);
continue;
}
@@ -9978,20 +9920,20 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm,
wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
if (!wb_info) {
- DRM_ERROR("Failed to allocate wb_info\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate wb_info\n");
return;
}
acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
if (!acrtc) {
- DRM_ERROR("no amdgpu_crtc found\n");
+ drm_err(adev_to_drm(adev), "no amdgpu_crtc found\n");
kfree(wb_info);
return;
}
afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
if (!afb) {
- DRM_ERROR("No amdgpu_framebuffer found\n");
+ drm_err(adev_to_drm(adev), "No amdgpu_framebuffer found\n");
kfree(wb_info);
return;
}
@@ -10212,7 +10154,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
enable_encryption = true;
- DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+ drm_info(adev_to_drm(adev), "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
if (aconnector->dc_link)
hdcp_update_display(
@@ -10304,7 +10246,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
*/
dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
if (!dummy_updates) {
- DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n");
continue;
}
for (j = 0; j < status->plane_count; j++)
@@ -10512,16 +10454,20 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
*/
conn_state = drm_atomic_get_connector_state(state, connector);
- ret = PTR_ERR_OR_ZERO(conn_state);
- if (ret)
+ /* Check for error in getting connector state */
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
goto out;
+ }
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
- ret = PTR_ERR_OR_ZERO(crtc_state);
- if (ret)
+ /* Check for error in getting crtc state */
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
goto out;
+ }
/* force a restore */
crtc_state->mode_changed = true;
@@ -10529,9 +10475,11 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
/* Attach plane to drm_atomic_state */
plane_state = drm_atomic_get_plane_state(state, plane);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (ret)
+ /* Check for error in getting plane state */
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
goto out;
+ }
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
@@ -10539,7 +10487,7 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
out:
drm_atomic_state_put(state);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(ddev, "Restoring old state failed with %i\n", ret);
return ret;
}
@@ -10623,7 +10571,7 @@ static int do_aquire_global_lock(struct drm_device *dev,
&commit->flip_done, 10*HZ);
if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+ drm_err(dev, "[CRTC:%d:%s] hw_done or flip_done timed out\n",
crtc->base.id, crtc->name);
drm_crtc_commit_put(commit);
@@ -10739,6 +10687,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct dm_atomic_state *dm_state = NULL;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
struct dc_stream_state *new_stream;
+ struct amdgpu_device *adev = dm->adev;
int ret = 0;
/*
@@ -10768,8 +10717,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
drm_old_conn_state = drm_atomic_get_old_connector_state(state,
connector);
- if (IS_ERR(drm_new_conn_state)) {
- ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
+ if (WARN_ON(!drm_new_conn_state)) {
+ ret = -EINVAL;
goto fail;
}
@@ -10792,7 +10741,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
*/
if (!new_stream) {
- DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n",
__func__, acrtc->base.base.id);
ret = -ENOMEM;
goto fail;
@@ -10830,7 +10779,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
new_crtc_state->mode_changed = false;
- DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d",
new_crtc_state->mode_changed);
}
}
@@ -10868,7 +10817,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
- DRM_DEBUG_DRIVER(
+ drm_dbg_driver(adev_to_drm(adev),
"Mode change not required for front porch change, setting mode_changed to %d",
new_crtc_state->mode_changed);
@@ -10889,7 +10838,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (ret)
goto fail;
- DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n",
crtc->base.id);
/* i.e. reset mode */
@@ -11022,6 +10971,9 @@ static bool should_reset_plane(struct drm_atomic_state *state,
state->allow_modeset)
return true;
+ if (amdgpu_in_reset(adev) && state->allow_modeset)
+ return true;
+
/* Exit early if we know that we're adding or removing the plane. */
if (old_plane_state->crtc != new_plane_state->crtc)
return true;
@@ -11739,7 +11691,7 @@ static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
old_plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) {
- DRM_ERROR("Failed to get plane state for plane %s\n", plane->name);
+ drm_err(dev, "Failed to get plane state for plane %s\n", plane->name);
return false;
}
@@ -12308,7 +12260,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
if (!res) {
- DRM_ERROR("EDID CEA parser failed\n");
+ drm_err(adev_to_drm(dm->adev), "EDID CEA parser failed\n");
return false;
}
@@ -12316,7 +12268,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
if (output->type == DMUB_CMD__EDID_CEA_ACK) {
if (!output->ack.success) {
- DRM_ERROR("EDID CEA ack failed at offset %d\n",
+ drm_err(adev_to_drm(dm->adev), "EDID CEA ack failed at offset %d\n",
output->ack.offset);
}
} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
@@ -12328,7 +12280,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
} else {
- DRM_WARN("Unknown EDID CEA parser results\n");
+ drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n");
return false;
}
@@ -12544,7 +12496,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
if (!connector->state) {
- DRM_ERROR("%s - Connector has no state", __func__);
+ drm_err(adev_to_drm(adev), "%s - Connector has no state", __func__);
goto update;
}
@@ -12729,7 +12681,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
}
if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
- DRM_ERROR("wait_for_completion_timeout timeout!");
+ drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
*operation_result = AUX_RET_ERROR_TIMEOUT;
goto out;
}
@@ -12739,31 +12691,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
* Transient states before tunneling is enabled could
* lead to this error. We can ignore this for now.
*/
- if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
- DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
+ if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) {
+ drm_warn(adev_to_drm(adev), "DPIA AUX failed on 0x%x(%d), error %d\n",
payload->address, payload->length,
p_notify->result);
}
- *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ *operation_result = p_notify->result;
goto out;
}
+ payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF;
+ if (adev->dm.dmub_notify->aux_reply.command & 0xF0)
+ /* The reply is stored in the top nibble of the command. */
+ payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
- payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
- if (!payload->write && p_notify->aux_reply.length &&
- (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
-
- if (payload->length != p_notify->aux_reply.length) {
- DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
- p_notify->aux_reply.length,
- payload->address, payload->length);
- *operation_result = AUX_RET_ERROR_INVALID_REPLY;
- goto out;
- }
-
+ /*write req may receive a byte indicating partially written number as well*/
+ if (p_notify->aux_reply.length)
memcpy(payload->data, p_notify->aux_reply.data,
p_notify->aux_reply.length);
- }
/* success */
ret = p_notify->aux_reply.length;
@@ -12774,6 +12719,79 @@ out:
return ret;
}
+static void abort_fused_io(
+ struct dc_context *ctx,
+ const struct dmub_cmd_fused_request *request
+)
+{
+ union dmub_rb_cmd command = { 0 };
+ struct dmub_rb_cmd_fused_io *io = &command.fused_io;
+
+ io->header.type = DMUB_CMD__FUSED_IO;
+ io->header.sub_type = DMUB_CMD__FUSED_IO_ABORT;
+ io->header.payload_bytes = sizeof(*io) - sizeof(io->header);
+ io->request = *request;
+ dm_execute_dmub_cmd(ctx, &command, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
+static bool execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_context *ctx,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+)
+{
+ const uint8_t ddc_line = commands[0].fused_io.request.u.aux.ddc_line;
+
+ if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io))
+ return false;
+
+ struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line];
+ struct dmub_rb_cmd_fused_io *first = &commands[0].fused_io;
+ const bool result = dm_execute_dmub_cmd_list(ctx, count, commands, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
+ && first->header.ret_status
+ && first->request.status == FUSED_REQUEST_STATUS_SUCCESS;
+
+ if (!result)
+ return false;
+
+ while (wait_for_completion_timeout(&sync->replied, usecs_to_jiffies(timeout_us))) {
+ reinit_completion(&sync->replied);
+
+ struct dmub_cmd_fused_request *reply = (struct dmub_cmd_fused_request *) sync->reply_data;
+
+ static_assert(sizeof(*reply) <= sizeof(sync->reply_data), "Size mismatch");
+
+ if (reply->identifier == first->request.identifier) {
+ first->request = *reply;
+ return true;
+ }
+ }
+
+ reinit_completion(&sync->replied);
+ first->request.status = FUSED_REQUEST_STATUS_TIMEOUT;
+ abort_fused_io(ctx, &first->request);
+ return false;
+}
+
+bool amdgpu_dm_execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us)
+{
+ struct amdgpu_display_manager *dm = &dev->dm;
+
+ mutex_lock(&dm->dpia_aux_lock);
+
+ const bool result = execute_fused_io(dev, link->ctx, commands, count, timeout_us);
+
+ mutex_unlock(&dm->dpia_aux_lock);
+ return result;
+}
+
int amdgpu_dm_process_dmub_set_config_sync(
struct dc_context *ctx,
unsigned int link_index,
@@ -12792,7 +12810,7 @@ int amdgpu_dm_process_dmub_set_config_sync(
ret = 0;
*operation_result = adev->dm.dmub_notify->sc_status;
} else {
- DRM_ERROR("wait_for_completion_timeout timeout!");
+ drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
ret = -1;
*operation_result = SET_CONFIG_UNKNOWN_ERROR;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 385faaca6e26..d7d92f9911e4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -50,7 +50,7 @@
#define AMDGPU_DM_MAX_NUM_EDP 2
-#define AMDGPU_DMUB_NOTIFICATION_MAX 7
+#define AMDGPU_DMUB_NOTIFICATION_MAX 8
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
@@ -81,6 +81,7 @@ struct amdgpu_bo;
struct dmub_srv;
struct dc_plane_state;
struct dmub_notification;
+struct dmub_cmd_fused_request;
struct amd_vsdb_block {
unsigned char ieee_id[3];
@@ -276,6 +277,10 @@ struct hpd_rx_irq_offload_work {
* @offload_wq: offload work queue that this work is queued to
*/
struct hpd_rx_irq_offload_work_queue *offload_wq;
+ /**
+ * @adev: amdgpu_device pointer
+ */
+ struct amdgpu_device *adev;
};
/**
@@ -614,6 +619,13 @@ struct amdgpu_display_manager {
bool aux_hpd_discon_quirk;
/**
+ * @edp0_on_dp1_quirk:
+ *
+ * quirk for platforms that put edp0 on DP1.
+ */
+ bool edp0_on_dp1_quirk;
+
+ /**
* @dpia_aux_lock:
*
* Guards access to DPIA AUX
@@ -633,6 +645,16 @@ struct amdgpu_display_manager {
* OEM i2c bus
*/
struct amdgpu_i2c_adapter *oem_i2c;
+
+ /**
+ * @fused_io:
+ *
+ * dmub fused io interface
+ */
+ struct fused_io_sync {
+ struct completion replied;
+ char reply_data[0x40]; // Cannot include dmub_cmd here
+ } fused_io[8];
};
enum dsc_clock_force_state {
@@ -1012,6 +1034,14 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index,
struct aux_payload *payload, enum aux_return_code_type *operation_result);
+bool amdgpu_dm_execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+);
+
int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index,
struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
@@ -1045,4 +1075,6 @@ void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector);
void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector);
int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector);
+void retrieve_dmi_info(struct amdgpu_display_manager *dm);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 36a830a7440f..e8bdd7f0c460 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -113,6 +113,7 @@ bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
*
* Panel Replay and PSR SU
* - Enable when:
+ * - VRR is disabled
* - vblank counter is disabled
* - entry is allowed: usermode demonstrates an adequate number of fast
* commits)
@@ -131,19 +132,20 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
bool is_sr_active = (link->replay_settings.replay_allow_active ||
link->psr_settings.psr_allow_active);
bool is_crc_window_active = false;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
is_crc_window_active =
amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base);
#endif
- if (link->replay_settings.replay_feature_enabled &&
+ if (link->replay_settings.replay_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
amdgpu_dm_replay_enable(vblank_work->stream, true);
} else if (vblank_enabled) {
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
amdgpu_dm_psr_disable(vblank_work->stream, false);
- } else if (link->psr_settings.psr_feature_enabled &&
+ } else if (link->psr_settings.psr_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
struct amdgpu_dm_connector *aconn =
@@ -244,6 +246,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+ int r;
mutex_lock(&dm->dc_lock);
@@ -271,8 +275,15 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
- if (dm->active_vblank_irq_count == 0)
+ if (dm->active_vblank_irq_count == 0) {
+ r = amdgpu_dpm_pause_power_profile(adev, true);
+ if (r)
+ dev_warn(adev->dev, "failed to set default power profile mode\n");
dc_allow_idle_optimizations(dm->dc, true);
+ r = amdgpu_dpm_pause_power_profile(adev, false);
+ if (r)
+ dev_warn(adev->dev, "failed to restore the power profile mode\n");
+ }
mutex_unlock(&dm->dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 5198a079b463..c16962256514 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -26,6 +26,7 @@
#include "amdgpu_dm_hdcp.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
+#include "dc_fused_io.h"
#include "dm_helpers.h"
#include <drm/display/drm_hdcp_helper.h>
#include "hdcp_psp.h"
@@ -76,6 +77,34 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
}
+static bool lp_atomic_write_poll_read_i2c(
+ void *handle,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ struct dc_link *link = handle;
+
+ return dm_atomic_write_poll_read_i2c(link, write, poll, read, poll_timeout_us, poll_mask_msb);
+}
+
+static bool lp_atomic_write_poll_read_aux(
+ void *handle,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ struct dc_link *link = handle;
+
+ return dm_atomic_write_poll_read_aux(link, write, poll, read, poll_timeout_us, poll_mask_msb);
+}
+
static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
{
struct ta_hdcp_shared_memory *hdcp_cmd;
@@ -173,6 +202,9 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
unsigned int conn_index = aconnector->base.index;
guard(mutex)(&hdcp_w->mutex);
+ drm_connector_get(&aconnector->base);
+ if (hdcp_w->aconnector[conn_index])
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
hdcp_w->aconnector[conn_index] = aconnector;
memset(&link_adjust, 0, sizeof(link_adjust));
@@ -220,7 +252,6 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
unsigned int conn_index = aconnector->base.index;
guard(mutex)(&hdcp_w->mutex);
- hdcp_w->aconnector[conn_index] = aconnector;
/* the removal of display will invoke auth reset -> hdcp destroy and
* we'd expect the Content Protection (CP) property changed back to
@@ -236,7 +267,10 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
}
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
-
+ if (hdcp_w->aconnector[conn_index]) {
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = NULL;
+ }
process_output(hdcp_w);
}
@@ -254,6 +288,10 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
hdcp_w->encryption_status[conn_index] =
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ if (hdcp_w->aconnector[conn_index]) {
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = NULL;
+ }
}
process_output(hdcp_w);
@@ -488,6 +526,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct hdcp_workqueue *hdcp_work = handle;
struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
int link_index = aconnector->dc_link->link_index;
+ unsigned int conn_index = aconnector->base.index;
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -544,7 +583,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
guard(mutex)(&hdcp_w->mutex);
mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
-
+ drm_connector_get(&aconnector->base);
+ if (hdcp_w->aconnector[conn_index])
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = aconnector;
process_output(hdcp_w);
}
@@ -719,7 +761,10 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
- hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
+ struct mod_hdcp_config *config = &hdcp_work[i].hdcp.config;
+ struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs;
+
+ config->psp.handle = &adev->psp;
if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
dc->ctx->dce_version == DCN_VERSION_3_14 ||
dc->ctx->dce_version == DCN_VERSION_3_15 ||
@@ -727,12 +772,22 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
dc->ctx->dce_version == DCN_VERSION_3_51 ||
dc->ctx->dce_version == DCN_VERSION_3_6 ||
dc->ctx->dce_version == DCN_VERSION_3_16)
- hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1;
- hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
- hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
- hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
- hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
- hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
+ config->psp.caps.dtm_v3_supported = 1;
+ config->ddc.handle = dc_get_link_at_index(dc, i);
+
+ ddc_funcs->write_i2c = lp_write_i2c;
+ ddc_funcs->read_i2c = lp_read_i2c;
+ ddc_funcs->write_dpcd = lp_write_dpcd;
+ ddc_funcs->read_dpcd = lp_read_dpcd;
+
+ config->debug.lc_enable_sw_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
+ if (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable) {
+ ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
+ ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
+ } else {
+ ddc_funcs->atomic_write_poll_read_i2c = NULL;
+ ddc_funcs->atomic_write_poll_read_aux = NULL;
+ }
memset(hdcp_work[i].aconnector, 0,
sizeof(struct amdgpu_dm_connector *) *
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 2cd35392e2da..d4395b92fb85 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -630,6 +630,19 @@ bool dm_helpers_submit_i2c(
return result;
}
+bool dm_helpers_execute_fused_io(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+)
+{
+ struct amdgpu_device *dev = ctx->driver_context;
+
+ return amdgpu_dm_execute_fused_io(dev, link, commands, count, timeout_us);
+}
+
static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
bool is_write_cmd,
unsigned char cmd,
@@ -918,7 +931,7 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
{
struct drm_connector *connector = data;
struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev);
- unsigned char start = block * EDID_LENGTH;
+ unsigned short start = block * EDID_LENGTH;
struct edid *edid;
int r;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 7ceedf626d23..25e8befbcc47 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -51,6 +51,9 @@
#define PEAK_FACTOR_X1000 1006
+/*
+ * This function handles both native AUX and I2C-Over-AUX transactions.
+ */
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -59,6 +62,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
enum aux_return_code_type operation_result;
struct amdgpu_device *adev;
struct ddc_service *ddc;
+ uint8_t copy[16];
if (WARN_ON(msg->size > 16))
return -E2BIG;
@@ -74,6 +78,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
(msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
payload.defer_delay = 0;
+ if (payload.write) {
+ memcpy(copy, msg->buffer, msg->size);
+ payload.data = copy;
+ }
+
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
&operation_result);
@@ -87,15 +96,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
if (adev->dm.aux_hpd_discon_quirk) {
if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
operation_result == AUX_RET_ERROR_HPD_DISCON) {
- result = 0;
+ result = msg->size;
operation_result = AUX_RET_SUCCESS;
}
}
- if (payload.write && result >= 0)
- result = msg->size;
+ /*
+ * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER
+ */
+ if (payload.write && result >= 0) {
+ if (result) {
+ /*one byte indicating partially written bytes*/
+ drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n");
+ result = payload.data[0];
+ } else if (!payload.reply[0])
+ /*I2C_ACK|AUX_ACK*/
+ result = msg->size;
+ }
- if (result < 0)
+ if (result < 0) {
switch (operation_result) {
case AUX_RET_SUCCESS:
break;
@@ -114,6 +133,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
+ drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
+ }
+
+ if (payload.reply[0])
+ drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
+ payload.reply[0]);
+
return result;
}
@@ -1713,16 +1739,17 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
struct dc_dsc_bw_range *bw_range)
{
struct dc_dsc_policy dsc_policy = {0};
+ bool is_dsc_possible;
dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
- dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
- stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
- dsc_policy.min_target_bpp * 16,
- dsc_policy.max_target_bpp * 16,
- &stream->sink->dsc_caps.dsc_dec_caps,
- &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
-
- return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
+ is_dsc_possible = dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+ dsc_policy.min_target_bpp * 16,
+ dsc_policy.max_target_bpp * 16,
+ &stream->sink->dsc_caps.dsc_dec_caps,
+ &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
+
+ return is_dsc_possible;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 3e0f45f1711c..b7c6e8d13435 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -948,13 +948,13 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
r = amdgpu_bo_reserve(rbo, true);
if (r) {
- dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
return r;
}
r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
if (r) {
- dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
goto error_unlock;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index e140b7a04d72..f984cb0cb889 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -87,14 +87,6 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link)
link->psr_settings.psr_feature_enabled = true;
}
-
- DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
- link->psr_settings.psr_feature_enabled,
- link->psr_settings.psr_version,
- link->dpcd_caps.psr_info.psr_version,
- link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
- link->dpcd_caps.psr_info.psr2_su_y_granularity_cap);
-
}
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c
new file mode 100644
index 000000000000..1da07ebf9217
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/dmi.h>
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+
+struct amdgpu_dm_quirks {
+ bool aux_hpd_discon;
+ bool support_edp0_on_dp1;
+};
+
+static struct amdgpu_dm_quirks quirk_entries = {
+ .aux_hpd_discon = false,
+ .support_edp0_on_dp1 = false
+};
+
+static int edp0_on_dp1_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.support_edp0_on_dp1 = true;
+ return 0;
+}
+
+static int aux_hpd_discon_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.aux_hpd_discon = true;
+ return 0;
+}
+
+static const struct dmi_system_id dmi_quirk_table[] = {
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
+ },
+ },
+ {}
+ /* TODO: refactor this from a fixed table to a dynamic option */
+};
+
+void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+{
+ struct drm_device *dev = dm->ddev;
+ int dmi_id;
+
+ dm->aux_hpd_discon_quirk = false;
+ dm->edp0_on_dp1_quirk = false;
+
+ dmi_id = dmi_check_system(dmi_quirk_table);
+
+ if (!dmi_id)
+ return;
+
+ if (quirk_entries.aux_hpd_discon) {
+ dm->aux_hpd_discon_quirk = true;
+ drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ }
+ if (quirk_entries.support_edp0_on_dp1) {
+ dm->edp0_on_dp1_quirk = true;
+ drm_info(dev, "support_edp0_on_dp1 attached\n");
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index 0d5fefb0f591..d9527c05fc87 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -102,13 +102,13 @@ static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector
r = amdgpu_bo_reserve(rbo, true);
if (r) {
- dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
return r;
}
r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
if (r) {
- dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
goto error_unlock;
}
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 3e1f5b689718..3c9ecea7eebc 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -53,31 +53,30 @@ DC_LIBS += hdcp
ifdef CONFIG_DRM_AMD_DC_FP
DC_LIBS += sspl
-DC_SPL_TRANS += dc_spl_translate.o
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, dc_spl_translate.o)
endif
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
-DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o
+FILES =
+FILES += dc_dmub_srv.o
+FILES += dc_edid_parser.o
+FILES += dc_fused_io.o
+FILES += dc_helper.o
+FILES += core/dc.o
+FILES += core/dc_debug.o
+FILES += core/dc_hw_sequencer.o
+FILES += core/dc_link_enc_cfg.o
+FILES += core/dc_link_exports.o
+FILES += core/dc_resource.o
+FILES += core/dc_sink.o
+FILES += core/dc_stat.o
+FILES += core/dc_state.o
+FILES += core/dc_stream.o
+FILES += core/dc_surface.o
+FILES += core/dc_vm_helper.o
+
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, $(FILES))
-DISPLAY_CORE += dc_vm_helper.o
-
-AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
-
-AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
-
-AMD_DC_SPL_TRANS = $(addprefix $(AMDDALPATH)/dc/,$(DC_SPL_TRANS))
-
-AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
-AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
-
-DC_DMUB += dc_dmub_srv.o
-DC_EDID += dc_edid_parser.o
-AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
-AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
-AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
-
-AMD_DISPLAY_FILES += $(AMD_DC_SPL_TRANS)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 88d3f9d7dd55..452206b5095e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -51,8 +51,6 @@ static inline unsigned long long complete_integer_division_u64(
{
unsigned long long result;
- ASSERT(divisor);
-
result = div64_u64_rem(dividend, divisor, remainder);
return result;
@@ -213,9 +211,6 @@ struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg)
* @note
* Good idea to use Newton's method
*/
-
- ASSERT(arg.value);
-
return dc_fixpt_from_fraction(
dc_fixpt_one.value,
arg.value);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 3bacf470f7c5..67f08495b7e6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2384,10 +2384,10 @@ static enum bp_result get_integrated_info_v8(
}
/*
- * get_integrated_info_v8
+ * get_integrated_info_v9
*
* @brief
- * Get V8 integrated BIOS information
+ * Get V9 integrated BIOS information
*
* @param
* bios_parser *bp - [in]BIOS parser handler to get master data table
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 19897fa52e7e..d82a52319088 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -142,17 +142,3 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
return actual_dispclk_set_mhz * 1000;
}
-
-int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
-{
- int actual_dprefclk_set_mhz = -1;
-
- actual_dprefclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDprefclkFreq,
- khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
-
- /* TODO: add code for programing DP DTO, currently this is down by command table */
-
- return actual_dprefclk_set_mhz * 1000;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
index 083cb3158859..81d7c912549c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
@@ -27,6 +27,5 @@
#define DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_
int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
-int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 23b390245b5d..5a633333dbb5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -164,20 +164,6 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
return actual_dispclk_set_mhz * 1000;
}
-int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
-{
- int actual_dprefclk_set_mhz = -1;
-
- actual_dprefclk_set_mhz = rn_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDprefclkFreq,
- khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
-
- /* TODO: add code for programing DP DTO, currently this is down by command table */
-
- return actual_dprefclk_set_mhz * 1000;
-}
-
int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
{
int actual_dcfclk_set_mhz = -1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
index 1ce19d875358..f76fad87f0e1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
@@ -30,7 +30,6 @@ enum dcn_pwr_state;
int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr);
int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
-int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz);
int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 2d14346b680e..478b4d6a3544 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -49,12 +49,9 @@ static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E0000
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
+
+#define CTX clk_mgr->base.ctx
+#define IND_REG(offset) offset
#define regBIF_BX_PF2_RSMU_INDEX 0x0000
#define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1
@@ -67,9 +64,6 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define FN(reg_name, field) \
FD(reg_name##__##field)
-#define REG_NBIO(reg_name) \
- (NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name)
-
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
@@ -77,6 +71,13 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define mmMP1_C2PMSG_3 0x3B1050C
+#define reg__MP1_C2PMSG_3_MASK (0xFFFFFFFF)
+#define reg__MP1_C2PMSG_3__SHIFT (0)
+
+
+#define data_reg_name__MP1_C2PMSG_3_MASK (0xFFFFFFFF)
+#define data_reg_name__MP1_C2PMSG_3__SHIFT (0)
+
#define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
#define VBIOSSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
#define VBIOSSMC_MSG_Spare0 0x03 ///< Spare0
@@ -153,12 +154,10 @@ static int dcn315_smu_send_msg_with_param(
for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
/* Trigger the message transaction by writing the message ID */
- generic_write_indirect_reg(CTX,
- REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
- mmMP1_C2PMSG_3, msg_id);
- read_back_data = generic_read_indirect_reg(CTX,
- REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
- mmMP1_C2PMSG_3);
+ IX_REG_SET_SYNC(mmMP1_C2PMSG_3, 0,
+ MP1_C2PMSG_3, msg_id);
+ IX_REG_GET_SYNC(mmMP1_C2PMSG_3,
+ MP1_C2PMSG_3, &read_back_data);
if (read_back_data == msg_id)
break;
udelay(2);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
index 6a6ae618650b..4607eff07253 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
@@ -65,6 +65,7 @@
#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
#define mmCLK5_spll_field_8 0x1B04B
+#define mmCLK6_spll_field_8 0x1B24B
#define mmDENTIST_DISPCLK_CNTL 0x0124
#define regDENTIST_DISPCLK_CNTL 0x0064
#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 142de8938d7c..bb1ac12a2b09 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -90,6 +90,7 @@
#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
#define mmCLK5_spll_field_8 0x1B24B
+#define mmCLK6_spll_field_8 0x1B24B
#define mmDENTIST_DISPCLK_CNTL 0x0124
#define regDENTIST_DISPCLK_CNTL 0x0064
#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
@@ -116,6 +117,7 @@
#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+#define CLK6_spll_field_8__spll_ssc_en_MASK 0x00002000L
#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
#undef FN
@@ -596,7 +598,11 @@ static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
uint32_t ssc_enable;
- ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
+ if (clk_mgr_base->ctx->dce_version == DCN_VERSION_3_51) {
+ ssc_enable = REG_READ(CLK6_spll_field_8) & CLK6_spll_field_8__spll_ssc_en_MASK;
+ } else {
+ ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
+ }
return ssc_enable != 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index f6f0e6a33001..604d256cb47a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -84,8 +84,8 @@
#define VBIOSSMC_MSG_AllowZstatesEntry 0x15
#define VBIOSSMC_MSG_DisallowZstatesEntry 0x16
#define VBIOSSMC_MSG_SetDtbClk 0x17
-#define VBIOSSMC_MSG_DispPsrEntry 0x18 ///< Display PSR entry, DMU
-#define VBIOSSMC_MSG_DispPsrExit 0x19 ///< Display PSR exit, DMU
+#define VBIOSSMC_MSG_DispIPS2Entry 0x18 ///< Display IPS2 entry, DMU
+#define VBIOSSMC_MSG_DispIPS2Exit 0x19 ///< Display IPS2 exit, DMU
#define VBIOSSMC_MSG_DisableLSdma 0x1A ///< Disable LSDMA; only sent by VBIOS
#define VBIOSSMC_MSG_DpControllerPhyStatus 0x1B ///< Inform PMFW about the pre conditions for turning SLDO2 on/off . bit[0]==1 precondition is met, bit[1-2] are for DPPHY number
#define VBIOSSMC_MSG_QueryIPS2Support 0x1C ///< Return 1: support; else not supported
@@ -475,7 +475,7 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
retv = dcn35_smu_send_msg_with_param(
clk_mgr,
- VBIOSSMC_MSG_DispPsrExit,
+ VBIOSSMC_MSG_DispIPS2Exit,
0);
smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv);
return retv;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 28d1353f403d..56d011a1323c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -36,7 +36,9 @@
#include "resource.h"
#include "dc_state.h"
#include "dc_state_priv.h"
+#include "dc_plane.h"
#include "dc_plane_priv.h"
+#include "dc_stream_priv.h"
#include "gpio_service_interface.h"
#include "clk_mgr.h"
@@ -439,9 +441,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* Don't adjust DRR while there's bandwidth optimizations pending to
* avoid conflicting with firmware updates.
*/
- if (dc->ctx->dce_version > DCE_VERSION_MAX)
- if (dc->optimized_required || dc->wm_optimized_required)
+ if (dc->ctx->dce_version > DCE_VERSION_MAX) {
+ if (dc->optimized_required || dc->wm_optimized_required) {
+ stream->adjust.timing_adjust_pending = true;
return false;
+ }
+ }
dc_exit_ips_for_hw_access(dc);
@@ -1192,6 +1197,12 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
+ if (dc->debug.visual_confirm & VISUAL_CONFIRM_EXPLICIT) {
+ memcpy(&pipe_ctx->visual_confirm_color, &pipe_ctx->plane_state->visual_confirm_color,
+ sizeof(pipe_ctx->visual_confirm_color));
+ return;
+ }
+
if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
@@ -1225,6 +1236,51 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte
}
}
+void dc_get_visual_confirm_for_stream(
+ struct dc *dc,
+ struct dc_stream_state *stream_state,
+ struct tg_color *color)
+{
+ struct dc_stream_status *stream_status = dc_stream_get_status(stream_state);
+ struct pipe_ctx *pipe_ctx;
+ int i;
+ struct dc_plane_state *plane_state = NULL;
+
+ if (!stream_status)
+ return;
+
+ switch (dc->debug.visual_confirm) {
+ case VISUAL_CONFIRM_DISABLE:
+ return;
+ case VISUAL_CONFIRM_PSR:
+ case VISUAL_CONFIRM_FAMS:
+ pipe_ctx = dc_stream_get_pipe_ctx(stream_state);
+ if (!pipe_ctx)
+ return;
+ dc_dmub_srv_get_visual_confirm_color_cmd(dc, pipe_ctx);
+ memcpy(color, &dc->ctx->dmub_srv->dmub->visual_confirm_color, sizeof(struct tg_color));
+ return;
+
+ default:
+ /* find plane with highest layer_index */
+ for (i = 0; i < stream_status->plane_count; i++) {
+ if (stream_status->plane_states[i]->visible)
+ plane_state = stream_status->plane_states[i];
+ }
+ if (!plane_state)
+ return;
+ /* find pipe that contains plane with highest layer index */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state == plane_state) {
+ memcpy(color, &pipe->visual_confirm_color, sizeof(struct tg_color));
+ return;
+ }
+ }
+ }
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -2053,6 +2109,18 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.enable_accelerated_mode(dc, context);
}
+ if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ //Only delay otg master for a given config
+ if (resource_is_pipe_type(pipe, OTG_MASTER)) {
+ //dc_commit_state_no_check is always a full update
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, pipe, false);
+ break;
+ }
+ }
+ }
+
if (context->stream_count > get_seamless_boot_stream_count(context) ||
context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
@@ -2117,6 +2185,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context);
+
+ if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe);
+ }
+ }
+
dc->hwss.interdependent_update_lock(dc, context, false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
@@ -2258,11 +2334,15 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
for (i = 0; i < params->stream_count; i++) {
struct dc_stream_state *stream = params->streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
+ struct dc_sink *sink = stream->sink;
/* revalidate streams */
- res = dc_validate_stream(dc, stream);
- if (res != DC_OK)
- return res;
+ if (!dc_is_virtual_signal(sink->sink_signal)) {
+ res = dc_validate_stream(dc, stream);
+ if (res != DC_OK)
+ return res;
+ }
+
dc_stream_log(dc, stream);
@@ -2815,7 +2895,7 @@ static enum surface_update_type check_update_surfaces_for_stream(
int i;
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
- if (dc->idle_optimizations_allowed)
+ if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc))
overall_type = UPDATE_TYPE_FULL;
if (stream_status == NULL || stream_status->plane_count != surface_count)
@@ -3168,7 +3248,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->crtc_timing_adjust) {
if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min ||
- stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max)
+ stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max ||
+ stream->adjust.timing_adjust_pending)
update->crtc_timing_adjust->timing_adjust_pending = true;
stream->adjust = *update->crtc_timing_adjust;
update->crtc_timing_adjust->timing_adjust_pending = false;
@@ -3219,7 +3300,7 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (dsc_validate_context) {
stream->timing.dsc_cfg = *update->dsc_config;
stream->timing.flags.DSC = enable_dsc;
- if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true) != DC_OK) {
stream->timing.dsc_cfg = old_dsc_cfg;
stream->timing.flags.DSC = old_dsc_enabled;
update->dsc_config = NULL;
@@ -3248,7 +3329,7 @@ static void backup_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
- scratch->plane_states[i] = *status->plane_states[i];
+ dc_plane_copy_config(&scratch->plane_states[i], status->plane_states[i]);
}
scratch->stream_state = *stream;
}
@@ -3264,10 +3345,7 @@ static void restore_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
- /* refcount will always be valid, restore everything else */
- struct kref refcount = status->plane_states[i]->refcount;
- *status->plane_states[i] = scratch->plane_states[i];
- status->plane_states[i]->refcount = refcount;
+ dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]);
}
*stream = scratch->stream_state;
}
@@ -3444,7 +3522,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
if (update_type == UPDATE_TYPE_FULL) {
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) {
BREAK_TO_DEBUGGER();
goto fail;
}
@@ -3998,6 +4076,7 @@ static void commit_planes_for_stream(struct dc *dc,
&context->res_ctx,
stream);
ASSERT(top_pipe_to_program != NULL);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -4048,6 +4127,9 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program);
}
+ if (dc->hwseq->funcs.wait_for_pipe_update_if_needed)
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type == UPDATE_TYPE_FAST);
+
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
@@ -4168,12 +4250,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FAST)
continue;
- ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
- if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
- /*turn off triple buffer for full update*/
- dc->hwss.program_triplebuffer(
- dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
- }
stream_status =
stream_get_status(context, pipe_ctx->stream);
@@ -4182,8 +4258,37 @@ static void commit_planes_for_stream(struct dc *dc,
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
}
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ }
+
if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
+
+ //Pipe busy until some frame and line #
+ if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe && update_type == UPDATE_TYPE_FULL) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe_ctx);
+ }
+ }
+
if (dc->debug.validate_dml_output) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
@@ -4523,7 +4628,7 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
/* commit minimal state */
- if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false) == DC_OK) {
/* prevent underflow and corruption when reconfiguring pipes */
force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
} else {
@@ -4958,6 +5063,9 @@ static bool full_update_required(struct dc *dc,
if (dc->idle_optimizations_allowed)
return true;
+ if (dc_can_clear_cursor_limit(dc))
+ return true;
+
return false;
}
@@ -5043,7 +5151,7 @@ static bool update_planes_and_stream_v1(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
if (update_type >= UPDATE_TYPE_FULL) {
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) {
DC_ERROR("Mode validation failed for stream update!\n");
dc_state_release(context);
return false;
@@ -6187,15 +6295,22 @@ bool dc_abm_save_restore(
void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
{
unsigned int i;
- bool subvp_sw_cursor_req = false;
+ unsigned int max_cursor_size = dc->caps.max_cursor_size;
+ unsigned int stream_cursor_size;
- for (i = 0; i < dc->current_state->stream_count; i++) {
- if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i]) && !dc->current_state->streams[i]->hw_cursor_req) {
- subvp_sw_cursor_req = true;
- break;
+ if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) {
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ stream_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc,
+ dc->current_state,
+ dc->current_state->streams[i]);
+
+ if (stream_cursor_size < max_cursor_size) {
+ max_cursor_size = stream_cursor_size;
+ }
}
}
- properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
+
+ properties->cursor_size_limit = max_cursor_size;
}
/**
@@ -6261,3 +6376,27 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
else
return 0;
}
+
+bool dc_is_cursor_limit_pending(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc_stream_is_cursor_limit_pending(dc, dc->current_state->streams[i]))
+ return true;
+ }
+
+ return false;
+}
+
+bool dc_can_clear_cursor_limit(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc_state_can_clear_stream_cursor_subvp_limit(dc->current_state->streams[i], dc->current_state))
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 650e89825968..7551d0a3fe82 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -266,6 +266,8 @@ char *dc_status_to_str(enum dc_status status)
return "Fail dp payload allocation";
case DC_FAIL_DP_LINK_BANDWIDTH:
return "Insufficient DP link bandwidth";
+ case DC_FAIL_HW_CURSOR_SUPPORT:
+ return "HW Cursor not supported";
case DC_ERROR_UNEXPECTED:
return "Unexpected error";
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 55b32dfbfdd6..7014b8d000bb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -697,7 +697,7 @@ void get_fams2_visual_confirm_color(
void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
struct dc_stream_status *stream_status,
@@ -896,7 +896,7 @@ void hwss_build_fast_sequence(struct dc *dc,
}
void hwss_execute_sequence(struct dc *dc,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
int num_steps)
{
unsigned int i;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 313a32248cd7..3da25bd8b578 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1342,32 +1342,6 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
data->viewport_c.y += src.y / vpc_div;
}
-static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream)
-{
- uint32_t refresh_rate;
- struct dc *dc = stream->ctx->dc;
-
- refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 +
- stream->timing.v_total * stream->timing.h_total - (uint64_t)1);
- refresh_rate = div_u64(refresh_rate, stream->timing.v_total);
- refresh_rate = div_u64(refresh_rate, stream->timing.h_total);
-
- /* If there's any stream that fits the SubVP high refresh criteria,
- * we must return true. This is because cursor updates are asynchronous
- * with full updates, so we could transition into a SubVP config and
- * remain in HW cursor mode if there's no cursor update which will
- * then cause corruption.
- */
- if ((refresh_rate >= 120 && refresh_rate <= 175 &&
- stream->timing.v_addressable >= 1080 &&
- stream->timing.v_addressable <= 2160) &&
- (dc->current_state->stream_count > 1 ||
- (dc->current_state->stream_count == 1 && !stream->allow_freesync)))
- return true;
-
- return false;
-}
-
static enum controller_dp_test_pattern convert_dp_to_controller_test_pattern(
enum dp_test_pattern test_pattern)
{
@@ -3937,6 +3911,10 @@ enum dc_status resource_map_pool_resources(
if (!dc->link_srv->dp_decide_link_settings(stream,
&pipe_ctx->link_config.dp_link_settings))
return DC_FAIL_DP_LINK_BANDWIDTH;
+
+ dc->link_srv->dp_decide_tunnel_settings(stream,
+ &pipe_ctx->link_config.dp_tunnel_settings);
+
if (dc->link_srv->dp_get_encoding_format(
&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
pipe_ctx->stream_res.hpo_dp_stream_enc =
@@ -4259,6 +4237,11 @@ enum dc_status dc_validate_with_context(struct dc *dc,
}
}
+ /* clear subvp cursor limitations */
+ for (i = 0; i < context->stream_count; i++) {
+ dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false);
+ }
+
res = dc_validate_global_state(dc, context, fast_validate);
/* calculate pixel rate divider after deciding pxiel clock & odm combine */
@@ -4385,8 +4368,7 @@ enum dc_status dc_validate_global_state(
result = resource_build_scaling_params_for_context(dc, new_ctx);
if (result == DC_OK)
- if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
- result = DC_FAIL_BANDWIDTH_VALIDATE;
+ result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate);
return result;
}
@@ -5538,23 +5520,17 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
return DC_OK;
}
-bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream)
+struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
{
- if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
- return true;
- if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 &&
- ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
- return true;
- else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 &&
- ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
- return true;
-
- return false;
+ return &pipe_ctx->plane_res.scl_data.dscl_prog_data;
}
-struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
+static bool resource_allocate_mcache(struct dc_state *context, const struct dc_mcache_params *mcache_params)
{
- return &pipe_ctx->plane_res.scl_data.dscl_prog_data;
+ if (context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config)
+ context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config(context, mcache_params);
+
+ return true;
}
void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options)
@@ -5576,6 +5552,7 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio
dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status;
dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id;
dml2_options->callbacks.get_max_flickerless_instant_vtotal_increase = &dc_stream_get_max_flickerless_instant_vtotal_increase;
+ dml2_options->callbacks.allocate_mcache = &resource_allocate_mcache;
dml2_options->svp_pstate.callbacks.dc = dc;
dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 1b2cce127981..4db7383720fd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -22,6 +22,7 @@
* Authors: AMD
*
*/
+#include "dc_types.h"
#include "core_types.h"
#include "core_status.h"
#include "dc_state.h"
@@ -812,8 +813,12 @@ enum dc_status dc_state_add_phantom_stream(const struct dc *dc,
if (phantom_stream_status) {
phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM;
phantom_stream_status->mall_stream_config.paired_stream = main_stream;
+ phantom_stream_status->mall_stream_config.subvp_limit_cursor_size = false;
+ phantom_stream_status->mall_stream_config.cursor_size_limit_subvp = false;
}
+ dc_state_set_stream_subvp_cursor_limit(main_stream, state, true);
+
return res;
}
@@ -939,13 +944,20 @@ void dc_state_release_phantom_streams_and_planes(
const struct dc *dc,
struct dc_state *state)
{
+ unsigned int phantom_count;
+ struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES];
+ struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES];
int i;
- for (i = 0; i < state->phantom_stream_count; i++)
- dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]);
+ phantom_count = state->phantom_stream_count;
+ memcpy(phantom_streams, state->phantom_streams, sizeof(struct dc_stream_state *) * MAX_PHANTOM_PIPES);
+ for (i = 0; i < phantom_count; i++)
+ dc_state_release_phantom_stream(dc, state, phantom_streams[i]);
- for (i = 0; i < state->phantom_plane_count; i++)
- dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]);
+ phantom_count = state->phantom_plane_count;
+ memcpy(phantom_planes, state->phantom_planes, sizeof(struct dc_plane_state *) * MAX_PHANTOM_PIPES);
+ for (i = 0; i < phantom_count; i++)
+ dc_state_release_phantom_plane(dc, state, phantom_planes[i]);
}
struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id)
@@ -977,3 +989,94 @@ bool dc_state_is_fams2_in_use(
return is_fams2_in_use;
}
+
+void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit)
+{
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ stream_status->mall_stream_config.subvp_limit_cursor_size = limit;
+ }
+}
+
+bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ limit = stream_status->mall_stream_config.subvp_limit_cursor_size;
+ }
+
+ return limit;
+}
+
+void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit)
+{
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ stream_status->mall_stream_config.cursor_size_limit_subvp = limit;
+ }
+}
+
+bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ limit = stream_status->mall_stream_config.cursor_size_limit_subvp;
+ }
+
+ return limit;
+}
+
+bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool can_clear_limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, state) &&
+ (stream_status->mall_stream_config.type == SUBVP_PHANTOM ||
+ stream->hw_cursor_req ||
+ !stream_status->mall_stream_config.subvp_limit_cursor_size ||
+ !stream->cursor_position.enable ||
+ dc_stream_check_cursor_attributes(stream, state, &stream->cursor_attributes));
+ }
+
+ return can_clear_limit;
+}
+
+bool dc_state_is_subvp_in_use(struct dc_state *state)
+{
+ uint32_t i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (dc_state_get_stream_subvp_type(state, state->streams[i]) != SUBVP_NONE)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 0478dd856d8c..b883fb24fa12 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -265,13 +265,16 @@ void program_cursor_attributes(
}
/*
- * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
+ * dc_stream_check_cursor_attributes() - Check validitity of cursor attributes and surface address
*/
-bool dc_stream_set_cursor_attributes(
- struct dc_stream_state *stream,
+bool dc_stream_check_cursor_attributes(
+ const struct dc_stream_state *stream,
+ struct dc_state *state,
const struct dc_cursor_attributes *attributes)
{
- struct dc *dc;
+ const struct dc *dc;
+
+ unsigned int max_cursor_size;
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -289,24 +292,38 @@ bool dc_stream_set_cursor_attributes(
dc = stream->ctx->dc;
- /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4.
- * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case:
- * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs)
- * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz
- * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz
+ /* SubVP is not compatible with HW cursor larger than what can fit in cursor SRAM.
+ * Therefore, if cursor is greater than this, fallback to SW cursor.
*/
- if (dc->debug.allow_sw_cursor_fallback &&
- attributes->height * attributes->width * 4 > 16384 &&
- !stream->hw_cursor_req) {
- if (check_subvp_sw_cursor_fallback_req(dc, stream))
+ if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) {
+ max_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, state, stream);
+ max_cursor_size = max_cursor_size * max_cursor_size * 4;
+
+ if (attributes->height * attributes->width * 4 > max_cursor_size) {
return false;
+ }
}
- stream->cursor_attributes = *attributes;
-
return true;
}
+/*
+ * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
+ */
+bool dc_stream_set_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes)
+{
+ bool result = false;
+
+ if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) {
+ stream->cursor_attributes = *attributes;
+ result = true;
+ }
+
+ return result;
+}
+
bool dc_stream_program_cursor_attributes(
struct dc_stream_state *stream,
const struct dc_cursor_attributes *attributes)
@@ -552,6 +569,14 @@ bool dc_stream_fc_disable_writeback(struct dc *dc,
return true;
}
+/**
+ * dc_stream_remove_writeback() - Disables writeback and removes writeback info.
+ * @dc: Display core control structure.
+ * @stream: Display core stream state.
+ * @dwb_pipe_inst: Display writeback pipe.
+ *
+ * Return: returns true on success, false otherwise.
+ */
bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst)
@@ -1109,3 +1134,26 @@ unsigned int dc_stream_get_max_flickerless_instant_vtotal_increase(struct dc_str
return dc_stream_get_max_flickerless_instant_vtotal_delta(stream, is_gaming, false);
}
+
+bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream)
+{
+ bool is_limit_pending = false;
+
+ if (dc->current_state)
+ is_limit_pending = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state);
+
+ return is_limit_pending;
+}
+
+bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream)
+{
+ bool can_clear_limit = false;
+
+ if (dc->current_state)
+ can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state) &&
+ (stream->hw_cursor_req ||
+ !stream->cursor_position.enable ||
+ dc_stream_check_cursor_attributes(stream, dc->current_state, &stream->cursor_attributes));
+
+ return can_clear_limit;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index e6fcc21bb9bc..922f23557f5d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -109,7 +109,8 @@ struct dc_plane_state *dc_create_plane_state(const struct dc *dc)
*****************************************************************************
*/
const struct dc_plane_status *dc_plane_get_status(
- const struct dc_plane_state *plane_state)
+ const struct dc_plane_state *plane_state,
+ union dc_plane_status_update_flags flags)
{
const struct dc_plane_status *plane_status;
struct dc *dc;
@@ -136,7 +137,7 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state != plane_state)
continue;
- if (pipe_ctx->plane_state)
+ if (pipe_ctx->plane_state && flags.bits.address)
pipe_ctx->plane_state->status.is_flip_pending = false;
break;
@@ -151,7 +152,8 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state != plane_state)
continue;
- dc->hwss.update_pending_status(pipe_ctx);
+ if (flags.bits.address)
+ dc->hwss.update_pending_status(pipe_ctx);
}
return plane_status;
@@ -294,3 +296,17 @@ void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
dc->hwss.clear_surface_dcc_and_tiling(pipe_ctx, plane_state, clear_tiling);
}
}
+
+void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src)
+{
+ struct kref temp_refcount;
+
+ /* backup persistent info */
+ memcpy(&temp_refcount, &dst->refcount, sizeof(struct kref));
+
+ /* copy all configuration information */
+ memcpy(dst, src, sizeof(struct dc_plane_state));
+
+ /* restore persistent info */
+ memcpy(&dst->refcount, &temp_refcount, sizeof(struct kref));
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 7c2ee0526926..1d917be36fc4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -53,7 +53,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.325"
+#define DC_VER "3.2.334"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -249,6 +249,7 @@ struct dc_caps {
uint32_t i2c_speed_in_khz_hdcp;
uint32_t dmdata_alloc_size;
unsigned int max_cursor_size;
+ unsigned int max_buffered_cursor_size;
unsigned int max_video_width;
/*
* max video plane width that can be safely assumed to be always
@@ -282,6 +283,7 @@ struct dc_caps {
bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
+ bool fused_io_supported;
uint32_t max_otg_num;
uint32_t max_cab_allocation_bytes;
uint32_t cache_line_size;
@@ -447,6 +449,7 @@ struct dc_config {
bool enable_windowed_mpo_odm;
bool forceHBR2CP2520; // Used for switching between test patterns TPS4 and CP2520
uint32_t allow_edp_hotplug_detection;
+ bool skip_riommu_prefetch_wa;
bool clamp_min_dcfclk;
uint64_t vblank_alignment_dto_params;
uint8_t vblank_alignment_max_frame_time_diff;
@@ -496,6 +499,7 @@ enum visual_confirm {
VISUAL_CONFIRM_HW_CURSOR = 20,
VISUAL_CONFIRM_VABC = 21,
VISUAL_CONFIRM_DCC = 22,
+ VISUAL_CONFIRM_EXPLICIT = 0x80000000,
};
enum dc_psr_power_opts {
@@ -902,6 +906,9 @@ struct dc_debug_options {
bool voltage_align_fclk;
bool disable_min_fclk;
+ bool hdcp_lc_force_fw_enable;
+ bool hdcp_lc_enable_sw_fallback;
+
bool disable_dfs_bypass;
bool disable_dpp_power_gate;
bool disable_hubp_power_gate;
@@ -1418,6 +1425,171 @@ struct dc_scratch_space {
struct dc_stream_state stream_state;
};
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+ struct dc_link {
+ struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+ unsigned int sink_count;
+ struct dc_sink *local_sink;
+ unsigned int link_index;
+ enum dc_connection_type type;
+ enum signal_type connector_signal;
+ enum dc_irq_source irq_source_hpd;
+ enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
+ enum dc_irq_source irq_source_read_request;/* Read Request */
+
+ bool is_hpd_filter_disabled;
+ bool dp_ss_off;
+
+ /**
+ * @link_state_valid:
+ *
+ * If there is no link and local sink, this variable should be set to
+ * false. Otherwise, it should be set to true; usually, the function
+ * core_link_enable_stream sets this field to true.
+ */
+ bool link_state_valid;
+ bool aux_access_disabled;
+ bool sync_lt_in_progress;
+ bool skip_stream_reenable;
+ bool is_internal_display;
+ /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
+ bool is_dig_mapping_flexible;
+ bool hpd_status; /* HPD status of link without physical HPD pin. */
+ bool is_hpd_pending; /* Indicates a new received hpd */
+
+ /* USB4 DPIA links skip verifying link cap, instead performing the fallback method
+ * for every link training. This is incompatible with DP LL compliance automation,
+ * which expects the same link settings to be used every retry on a link loss.
+ * This flag is used to skip the fallback when link loss occurs during automation.
+ */
+ bool skip_fallback_on_link_loss;
+
+ bool edp_sink_present;
+
+ struct dp_trace dp_trace;
+
+ /* caps is the same as reported_link_cap. link_traing use
+ * reported_link_cap. Will clean up. TODO
+ */
+ struct dc_link_settings reported_link_cap;
+ struct dc_link_settings verified_link_cap;
+ struct dc_link_settings cur_link_settings;
+ struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
+ struct dc_link_settings preferred_link_setting;
+ /* preferred_training_settings are override values that
+ * come from DM. DM is responsible for the memory
+ * management of the override pointers.
+ */
+ struct dc_link_training_overrides preferred_training_settings;
+ struct dp_audio_test_data audio_test_data;
+
+ uint8_t ddc_hw_inst;
+
+ uint8_t hpd_src;
+
+ uint8_t link_enc_hw_inst;
+ /* DIG link encoder ID. Used as index in link encoder resource pool.
+ * For links with fixed mapping to DIG, this is not changed after dc_link
+ * object creation.
+ */
+ enum engine_id eng_id;
+ enum engine_id dpia_preferred_eng_id;
+
+ bool test_pattern_enabled;
+ /* Pending/Current test pattern are only used to perform and track
+ * FIXED_VS retimer test pattern/lane adjustment override state.
+ * Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
+ * to perform specific lane adjust overrides before setting certain
+ * PHY test patterns. In cases when lane adjust and set test pattern
+ * calls are not performed atomically (i.e. performing link training),
+ * pending_test_pattern will be invalid or contain a non-PHY test pattern
+ * and current_test_pattern will contain required context for any future
+ * set pattern/set lane adjust to transition between override state(s).
+ * */
+ enum dp_test_pattern current_test_pattern;
+ enum dp_test_pattern pending_test_pattern;
+
+ union compliance_test_state compliance_test_state;
+
+ void *priv;
+
+ struct ddc_service *ddc;
+
+ enum dp_panel_mode panel_mode;
+ bool aux_mode;
+
+ /* Private to DC core */
+
+ const struct dc *dc;
+
+ struct dc_context *ctx;
+
+ struct panel_cntl *panel_cntl;
+ struct link_encoder *link_enc;
+ struct graphics_object_id link_id;
+ /* Endpoint type distinguishes display endpoints which do not have entries
+ * in the BIOS connector table from those that do. Helps when tracking link
+ * encoder to display endpoint assignments.
+ */
+ enum display_endpoint_type ep_type;
+ union ddi_channel_mapping ddi_channel_mapping;
+ struct connector_device_tag_info device_tag;
+ struct dpcd_caps dpcd_caps;
+ uint32_t dongle_max_pix_clk;
+ unsigned short chip_caps;
+ unsigned int dpcd_sink_count;
+ struct hdcp_caps hdcp_caps;
+ enum edp_revision edp_revision;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+
+ struct psr_settings psr_settings;
+ struct replay_settings replay_settings;
+
+ /* Drive settings read from integrated info table */
+ struct dc_lane_settings bios_forced_drive_settings;
+
+ /* Vendor specific LTTPR workaround variables */
+ uint8_t vendor_specific_lttpr_link_rate_wa;
+ bool apply_vendor_specific_lttpr_link_rate_wa;
+
+ /* MST record stream using this link */
+ struct link_flags {
+ bool dp_keep_receiver_powered;
+ bool dp_skip_DID2;
+ bool dp_skip_reset_segment;
+ bool dp_skip_fs_144hz;
+ bool dp_mot_reset_segment;
+ /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
+ bool dpia_mst_dsc_always_on;
+ /* Forced DPIA into TBT3 compatibility mode. */
+ bool dpia_forced_tbt3_mode;
+ bool dongle_mode_timing_override;
+ bool blank_stream_on_ocs_change;
+ bool read_dpcd204h_on_irq_hpd;
+ bool force_dp_ffe_preset;
+ } wa_flags;
+ union dc_dp_ffe_preset forced_dp_ffe_preset;
+ struct link_mst_stream_allocation_table mst_stream_alloc_table;
+
+ struct dc_link_status link_status;
+ struct dprx_states dprx_states;
+
+ struct gpio *hpd_gpio;
+ enum dc_link_fec_state fec_state;
+ bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
+
+ struct dc_panel_config panel_config;
+ struct phy_state phy_state;
+ uint32_t phy_transition_bitmask;
+ // BW ALLOCATON USB4 ONLY
+ struct dc_dpia_bw_alloc dpia_bw_alloc_config;
+ bool skip_implict_edp_power_control;
+ enum backlight_control_type backlight_control_type;
+};
+
struct dc {
struct dc_debug_options debug;
struct dc_versions versions;
@@ -1485,6 +1657,7 @@ struct dc {
struct dc_scratch_space current_state;
struct dc_scratch_space new_state;
struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack
+ struct dc_link temp_link;
bool pipes_to_unlock_first[MAX_PIPES]; /* Any of the pipes indicated here should be unlocked first */
} scratch;
@@ -1651,170 +1824,6 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
const enum dc_link_encoding_format link_encoding);
/* Link Interfaces */
-/*
- * A link contains one or more sinks and their connected status.
- * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
- */
-struct dc_link {
- struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
- unsigned int sink_count;
- struct dc_sink *local_sink;
- unsigned int link_index;
- enum dc_connection_type type;
- enum signal_type connector_signal;
- enum dc_irq_source irq_source_hpd;
- enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
-
- bool is_hpd_filter_disabled;
- bool dp_ss_off;
-
- /**
- * @link_state_valid:
- *
- * If there is no link and local sink, this variable should be set to
- * false. Otherwise, it should be set to true; usually, the function
- * core_link_enable_stream sets this field to true.
- */
- bool link_state_valid;
- bool aux_access_disabled;
- bool sync_lt_in_progress;
- bool skip_stream_reenable;
- bool is_internal_display;
- /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
- bool is_dig_mapping_flexible;
- bool hpd_status; /* HPD status of link without physical HPD pin. */
- bool is_hpd_pending; /* Indicates a new received hpd */
-
- /* USB4 DPIA links skip verifying link cap, instead performing the fallback method
- * for every link training. This is incompatible with DP LL compliance automation,
- * which expects the same link settings to be used every retry on a link loss.
- * This flag is used to skip the fallback when link loss occurs during automation.
- */
- bool skip_fallback_on_link_loss;
-
- bool edp_sink_present;
-
- struct dp_trace dp_trace;
-
- /* caps is the same as reported_link_cap. link_traing use
- * reported_link_cap. Will clean up. TODO
- */
- struct dc_link_settings reported_link_cap;
- struct dc_link_settings verified_link_cap;
- struct dc_link_settings cur_link_settings;
- struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
- struct dc_link_settings preferred_link_setting;
- /* preferred_training_settings are override values that
- * come from DM. DM is responsible for the memory
- * management of the override pointers.
- */
- struct dc_link_training_overrides preferred_training_settings;
- struct dp_audio_test_data audio_test_data;
-
- uint8_t ddc_hw_inst;
-
- uint8_t hpd_src;
-
- uint8_t link_enc_hw_inst;
- /* DIG link encoder ID. Used as index in link encoder resource pool.
- * For links with fixed mapping to DIG, this is not changed after dc_link
- * object creation.
- */
- enum engine_id eng_id;
- enum engine_id dpia_preferred_eng_id;
-
- bool test_pattern_enabled;
- /* Pending/Current test pattern are only used to perform and track
- * FIXED_VS retimer test pattern/lane adjustment override state.
- * Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
- * to perform specific lane adjust overrides before setting certain
- * PHY test patterns. In cases when lane adjust and set test pattern
- * calls are not performed atomically (i.e. performing link training),
- * pending_test_pattern will be invalid or contain a non-PHY test pattern
- * and current_test_pattern will contain required context for any future
- * set pattern/set lane adjust to transition between override state(s).
- * */
- enum dp_test_pattern current_test_pattern;
- enum dp_test_pattern pending_test_pattern;
-
- union compliance_test_state compliance_test_state;
-
- void *priv;
-
- struct ddc_service *ddc;
-
- enum dp_panel_mode panel_mode;
- bool aux_mode;
-
- /* Private to DC core */
-
- const struct dc *dc;
-
- struct dc_context *ctx;
-
- struct panel_cntl *panel_cntl;
- struct link_encoder *link_enc;
- struct graphics_object_id link_id;
- /* Endpoint type distinguishes display endpoints which do not have entries
- * in the BIOS connector table from those that do. Helps when tracking link
- * encoder to display endpoint assignments.
- */
- enum display_endpoint_type ep_type;
- union ddi_channel_mapping ddi_channel_mapping;
- struct connector_device_tag_info device_tag;
- struct dpcd_caps dpcd_caps;
- uint32_t dongle_max_pix_clk;
- unsigned short chip_caps;
- unsigned int dpcd_sink_count;
- struct hdcp_caps hdcp_caps;
- enum edp_revision edp_revision;
- union dpcd_sink_ext_caps dpcd_sink_ext_caps;
-
- struct psr_settings psr_settings;
- struct replay_settings replay_settings;
-
- /* Drive settings read from integrated info table */
- struct dc_lane_settings bios_forced_drive_settings;
-
- /* Vendor specific LTTPR workaround variables */
- uint8_t vendor_specific_lttpr_link_rate_wa;
- bool apply_vendor_specific_lttpr_link_rate_wa;
-
- /* MST record stream using this link */
- struct link_flags {
- bool dp_keep_receiver_powered;
- bool dp_skip_DID2;
- bool dp_skip_reset_segment;
- bool dp_skip_fs_144hz;
- bool dp_mot_reset_segment;
- /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
- bool dpia_mst_dsc_always_on;
- /* Forced DPIA into TBT3 compatibility mode. */
- bool dpia_forced_tbt3_mode;
- bool dongle_mode_timing_override;
- bool blank_stream_on_ocs_change;
- bool read_dpcd204h_on_irq_hpd;
- bool force_dp_ffe_preset;
- } wa_flags;
- union dc_dp_ffe_preset forced_dp_ffe_preset;
- struct link_mst_stream_allocation_table mst_stream_alloc_table;
-
- struct dc_link_status link_status;
- struct dprx_states dprx_states;
-
- struct gpio *hpd_gpio;
- enum dc_link_fec_state fec_state;
- bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
-
- struct dc_panel_config panel_config;
- struct phy_state phy_state;
- uint32_t phy_transition_bitmask;
- // BW ALLOCATON USB4 ONLY
- struct dc_dpia_bw_alloc dpia_bw_alloc_config;
- bool skip_implict_edp_power_control;
- enum backlight_control_type backlight_control_type;
-};
-
/* Return an enumerated dc_link.
* dc_link order is constant and determined at
* boot time. They cannot be created or destroyed.
@@ -2589,10 +2598,18 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
/* DSC Interfaces */
#include "dc_dsc.h"
+void dc_get_visual_confirm_for_stream(
+ struct dc *dc,
+ struct dc_stream_state *stream_state,
+ struct tg_color *color);
+
/* Disable acc mode Interfaces */
void dc_disable_accelerated_mode(struct dc *dc);
bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream);
+bool dc_is_cursor_limit_pending(struct dc *dc);
+bool dc_can_clear_cursor_limit(struct dc *dc);
+
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 614e03bfd598..afbcf866520e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -39,6 +39,7 @@
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
+#define GPINT_RETRY_NUM 20
static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
struct dmub_srv *dmub)
@@ -70,20 +71,28 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
}
}
-void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
+bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv)
{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dmub_srv *dmub;
+ struct dc_context *dc_ctx;
enum dmub_status status;
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ dc_ctx = dc_dmub_srv->ctx;
+ dmub = dc_dmub_srv->dmub;
+
do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ status = dmub_srv_wait_for_pending(dmub, 100000);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
if (status != DMUB_STATUS_OK) {
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
}
+
+ return status == DMUB_STATUS_OK;
}
void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
@@ -126,7 +135,49 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
}
}
-bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int count,
+ union dmub_rb_cmd *cmd_list)
+{
+ struct dc_context *dc_ctx;
+ struct dmub_srv *dmub;
+ enum dmub_status status = DMUB_STATUS_OK;
+ int i;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ dc_ctx = dc_dmub_srv->ctx;
+ dmub = dc_dmub_srv->dmub;
+
+ for (i = 0 ; i < count; i++) {
+ /* confirm no messages pending */
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
+
+ /* queue command */
+ if (status == DMUB_STATUS_OK)
+ status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]);
+
+ /* check for errors */
+ if (status != DMUB_STATUS_OK) {
+ break;
+ }
+ }
+
+ if (status != DMUB_STATUS_OK) {
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
@@ -143,20 +194,25 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
for (i = 0 ; i < count; i++) {
// Queue command
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ if (!cmd_list[i].cmd_common.header.multi_cmd_pending ||
+ dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) {
+ status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
+ } else {
+ status = DMUB_STATUS_QUEUE_FULL;
+ }
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
- status = dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_fb_cmd_execute(dmub);
if (status == DMUB_STATUS_POWER_STATE_D3)
return false;
do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
}
if (status != DMUB_STATUS_OK) {
@@ -168,7 +224,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
}
}
- status = dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_fb_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
if (status != DMUB_STATUS_POWER_STATE_D3) {
DC_ERROR("Error starting DMUB execution: status=%d\n", status);
@@ -180,6 +236,26 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
return true;
}
+bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int count,
+ union dmub_rb_cmd *cmd_list)
+{
+ bool res = false;
+
+ if (dc_dmub_srv && dc_dmub_srv->dmub) {
+ if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) {
+ res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
+ } else {
+ res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
+ }
+
+ if (res)
+ res = dmub_srv_update_inbox_status(dc_dmub_srv->dmub) == DMUB_STATUS_OK;
+ }
+
+ return res;
+}
+
bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
enum dm_dmub_wait_type wait_type,
union dmub_rb_cmd *cmd_list)
@@ -202,7 +278,8 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
if (!dmub->debug.timeout_info.timeout_occured) {
dmub->debug.timeout_info.timeout_occured = true;
- dmub->debug.timeout_info.timeout_cmd = *cmd_list;
+ if (cmd_list)
+ dmub->debug.timeout_info.timeout_cmd = *cmd_list;
dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
}
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
@@ -210,8 +287,9 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
}
// Copy data back from ring buffer into command
- if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
+ if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) {
+ dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list);
+ }
}
return true;
@@ -224,74 +302,10 @@ bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd
bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
{
- struct dc_context *dc_ctx;
- struct dmub_srv *dmub;
- enum dmub_status status;
- int i;
-
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
- return false;
-
- dc_ctx = dc_dmub_srv->ctx;
- dmub = dc_dmub_srv->dmub;
-
- for (i = 0 ; i < count; i++) {
- // Queue command
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
-
- if (status == DMUB_STATUS_QUEUE_FULL) {
- /* Execute and wait for queue to become empty again. */
- status = dmub_srv_cmd_execute(dmub);
- if (status == DMUB_STATUS_POWER_STATE_D3)
- return false;
-
- status = dmub_srv_wait_for_idle(dmub, 100000);
- if (status != DMUB_STATUS_OK)
- return false;
-
- /* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
- }
-
- if (status != DMUB_STATUS_OK) {
- if (status != DMUB_STATUS_POWER_STATE_D3) {
- DC_ERROR("Error queueing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
- return false;
- }
- }
-
- status = dmub_srv_cmd_execute(dmub);
- if (status != DMUB_STATUS_OK) {
- if (status != DMUB_STATUS_POWER_STATE_D3) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
+ if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list))
return false;
- }
- // Wait for DMUB to process command
- if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
- if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
- do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
- } while (status != DMUB_STATUS_OK);
- } else
- status = dmub_srv_wait_for_idle(dmub, 100000);
-
- if (status != DMUB_STATUS_OK) {
- DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- return false;
- }
-
- // Copy data back from ring buffer into command
- if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
- }
-
- return true;
+ return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list);
}
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
@@ -1243,7 +1257,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
memset(&new_signals, 0, sizeof(new_signals));
@@ -1355,14 +1369,15 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
udelay(dc->debug.ips2_eval_delay_us);
- if (ips_fw->signals.bits.ips2_commit) {
- DC_LOG_IPS(
- "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
- ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ DC_LOG_IPS(
+ "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
- // Tell PMFW to exit low power state
- dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ // Tell PMFW to exit low power state
+ dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+
+ if (ips_fw->signals.bits.ips2_commit) {
DC_LOG_IPS(
"wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)",
@@ -1400,7 +1415,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
- dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
+ dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub);
}
}
@@ -1654,7 +1669,8 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* fill in generic command header */
global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ global_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
if (enable) {
/* send global configuration parameters */
@@ -1673,11 +1689,13 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* configure command header */
stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_base_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_base_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_base_cmd->header.multi_cmd_pending = 1;
stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_sub_state_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_sub_state_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_sub_state_cmd->header.multi_cmd_pending = 1;
/* copy stream static base state */
memcpy(&stream_base_cmd->config,
@@ -1723,7 +1741,8 @@ void dc_dmub_srv_fams2_drr_update(struct dc *dc,
cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num;
cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger;
- cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
+ cmd.fams2_drr_update.header.payload_bytes =
+ sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -1759,7 +1778,8 @@ void dc_dmub_srv_fams2_passthrough_flip(
/* build command header */
cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP;
- cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip);
+ cmds[num_cmds].fams2_flip.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header);
/* for chaining multiple commands, all but last command should set to 1 */
cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1;
@@ -1869,11 +1889,14 @@ void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struc
if (command_code == DMUB_GPINT__INVALID_COMMAND)
return;
- // send gpint commands and wait for ack
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
- (uint16_t)(output->ips_mode),
- &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->residency_percent = 0;
+ for (i = 0; i < GPINT_RETRY_NUM; i++) {
+ // false could mean GPINT timeout, in which case we should retry
+ if (dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
+ (uint16_t)(output->ips_mode), &output->residency_percent,
+ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ break;
+ udelay(100);
+ }
if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER,
(uint16_t)(output->ips_mode),
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index a636f4c3f01d..ada5c2fb2db3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -58,7 +58,7 @@ struct dc_dmub_srv {
bool needs_idle_wake;
};
-void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
+bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv);
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 77c87ad57220..0bad8304ccf6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -159,6 +159,11 @@ struct dc_link_settings {
uint8_t link_rate_set;
};
+struct dc_tunnel_settings {
+ bool should_enable_dp_tunneling;
+ bool should_use_dp_bw_allocation;
+};
+
union dc_dp_ffe_preset {
struct {
uint8_t level : 4;
@@ -943,10 +948,20 @@ union dpia_info {
uint8_t raw;
};
+/* DPCD[0xE0020] USB4_DRIVER_BW_CAPABILITY register. */
+union usb4_driver_bw_cap {
+ struct {
+ uint8_t rsvd :7;
+ uint8_t driver_bw_alloc_support :1;
+ } bits;
+ uint8_t raw;
+};
+
/* DP Tunneling over USB4 */
struct dpcd_usb4_dp_tunneling_info {
union dp_tun_cap_support dp_tun_cap;
union dpia_info dpia_info;
+ union usb4_driver_bw_cap driver_bw_cap;
uint8_t usb4_driver_id;
uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
};
@@ -1486,5 +1501,11 @@ struct dp_trace {
# ifndef DP_TUNNELING_BW_ALLOC_CAP_CHANGED
# define DP_TUNNELING_BW_ALLOC_CAP_CHANGED (1 << 3)
# endif
+# ifndef DPTX_BW_ALLOC_UNMASK_IRQ
+# define DPTX_BW_ALLOC_UNMASK_IRQ (1 << 6)
+# endif
+# ifndef DPTX_BW_ALLOC_MODE_ENABLE
+# define DPTX_BW_ALLOC_MODE_ENABLE (1 << 7)
+# endif
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_fused_io.c b/drivers/gpu/drm/amd/display/dc/dc_fused_io.c
new file mode 100644
index 000000000000..fee69642fb93
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_fused_io.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dc_fused_io.h"
+
+#include "dm_helpers.h"
+#include "gpio.h"
+
+static bool op_i2c_convert(
+ union dmub_rb_cmd *cmd,
+ const struct mod_hdcp_atomic_op_i2c *op,
+ enum dmub_cmd_fused_request_type type,
+ uint32_t ddc_line,
+ bool over_aux
+)
+{
+ struct dmub_cmd_fused_request *req = &cmd->fused_io.request;
+ struct dmub_cmd_fused_request_location_i2c *loc = &req->u.i2c;
+
+ if (!op || op->size > sizeof(req->buffer))
+ return false;
+
+ req->type = type;
+ loc->is_aux = false;
+ loc->ddc_line = ddc_line;
+ loc->over_aux = over_aux;
+ loc->address = op->address;
+ loc->offset = op->offset;
+ loc->length = op->size;
+ memcpy(req->buffer, op->data, op->size);
+
+ return true;
+}
+
+static bool op_aux_convert(
+ union dmub_rb_cmd *cmd,
+ const struct mod_hdcp_atomic_op_aux *op,
+ enum dmub_cmd_fused_request_type type,
+ uint32_t ddc_line
+)
+{
+ struct dmub_cmd_fused_request *req = &cmd->fused_io.request;
+ struct dmub_cmd_fused_request_location_aux *loc = &req->u.aux;
+
+ if (!op || op->size > sizeof(req->buffer))
+ return false;
+
+ req->type = type;
+ loc->is_aux = true;
+ loc->ddc_line = ddc_line;
+ loc->address = op->address;
+ loc->length = op->size;
+ memcpy(req->buffer, op->data, op->size);
+
+ return true;
+}
+
+static bool atomic_write_poll_read(
+ struct dc_link *link,
+ union dmub_rb_cmd commands[3],
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ const uint8_t count = 3;
+ const uint32_t timeout_per_request_us = 10000;
+ const uint32_t timeout_per_aux_transaction_us = 10000;
+ uint64_t timeout_us = 0;
+
+ commands[1].fused_io.request.poll_mask_msb = poll_mask_msb;
+ commands[1].fused_io.request.timeout_us = poll_timeout_us;
+
+ for (uint8_t i = 0; i < count; i++) {
+ struct dmub_rb_cmd_fused_io *io = &commands[i].fused_io;
+
+ io->header.type = DMUB_CMD__FUSED_IO;
+ io->header.sub_type = DMUB_CMD__FUSED_IO_EXECUTE;
+ io->header.multi_cmd_pending = i != count - 1;
+ io->header.payload_bytes = sizeof(commands[i].fused_io) - sizeof(io->header);
+
+ timeout_us += timeout_per_request_us + io->request.timeout_us;
+ if (!io->request.timeout_us && io->request.u.aux.is_aux)
+ timeout_us += timeout_per_aux_transaction_us * (io->request.u.aux.length / 16);
+ }
+
+ if (!dm_helpers_execute_fused_io(link->ctx, link, commands, count, timeout_us))
+ return false;
+
+ return commands[0].fused_io.request.status == FUSED_REQUEST_STATUS_SUCCESS;
+}
+
+bool dm_atomic_write_poll_read_i2c(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ if (!link)
+ return false;
+
+ const bool over_aux = false;
+ const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en;
+
+ union dmub_rb_cmd commands[3] = { 0 };
+ const bool converted = op_i2c_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line, over_aux)
+ && op_i2c_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line, over_aux)
+ && op_i2c_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line, over_aux);
+
+ if (!converted)
+ return false;
+
+ const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb);
+
+ memcpy(read->data, commands[0].fused_io.request.buffer, read->size);
+ return result;
+}
+
+bool dm_atomic_write_poll_read_aux(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ if (!link)
+ return false;
+
+ const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en;
+ union dmub_rb_cmd commands[3] = { 0 };
+ const bool converted = op_aux_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line)
+ && op_aux_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line)
+ && op_aux_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line);
+
+ if (!converted)
+ return false;
+
+ const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb);
+
+ memcpy(read->data, commands[0].fused_io.request.buffer, read->size);
+ return result;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_fused_io.h b/drivers/gpu/drm/amd/display/dc/dc_fused_io.h
new file mode 100644
index 000000000000..c74917240985
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_fused_io.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __DC_FUSED_IO_H__
+#define __DC_FUSED_IO_H__
+
+#include "dc.h"
+#include "mod_hdcp.h"
+
+bool dm_atomic_write_poll_read_i2c(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+);
+
+bool dm_atomic_write_poll_read_aux(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+);
+
+#endif // __DC_FUSED_IO_H__
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 8f077e15b4f0..7217de258851 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -682,13 +682,19 @@ void reg_sequence_wait_done(const struct dc_context *ctx)
if (offload &&
ctx->dc->debug.dmub_offload_enabled &&
!ctx->dc->debug.dmcub_emulation) {
- dc_dmub_srv_wait_idle(ctx->dmub_srv);
+ dc_dmub_srv_wait_for_idle(ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
}
}
char *dce_version_to_string(const int version)
{
switch (version) {
+ case DCE_VERSION_6_0:
+ return "DCE 6.0";
+ case DCE_VERSION_6_1:
+ return "DCE 6.1";
+ case DCE_VERSION_6_4:
+ return "DCE 6.4";
case DCE_VERSION_8_0:
return "DCE 8.0";
case DCE_VERSION_8_1:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index e9413685ed4f..14feb843e694 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -28,13 +28,24 @@
#include "dc_hw_types.h"
+union dc_plane_status_update_flags {
+ struct {
+ uint32_t address : 1;
+ } bits;
+ uint32_t raw;
+};
+
struct dc_plane_state *dc_create_plane_state(const struct dc *dc);
const struct dc_plane_status *dc_plane_get_status(
- const struct dc_plane_state *plane_state);
+ const struct dc_plane_state *plane_state,
+ union dc_plane_status_update_flags flags);
void dc_plane_state_retain(struct dc_plane_state *plane_state);
void dc_plane_state_release(struct dc_plane_state *plane_state);
void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
bool clear_tiling);
+
+void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src);
+
#endif /* _DC_PLANE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
index 1a12ef579ff4..1d9bae56ff6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
@@ -105,4 +105,24 @@ bool dc_state_is_fams2_in_use(
const struct dc *dc,
const struct dc_state *state);
+
+void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit);
+
+bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit);
+
+bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+bool dc_state_is_subvp_in_use(struct dc_state *state);
+
#endif /* _DC_STATE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index e0bfddaa23e3..341d2ffb64b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -44,6 +44,8 @@ struct mall_stream_config {
*/
enum mall_stream_type type;
struct dc_stream_state *paired_stream; // master / slave stream
+ bool subvp_limit_cursor_size; /* stream has/is using subvp limiting hw cursor support */
+ bool cursor_size_limit_subvp; /* stream is using hw cursor config preventing subvp */
};
struct dc_stream_status {
@@ -503,6 +505,11 @@ void program_cursor_position(
struct dc *dc,
struct dc_stream_state *stream);
+bool dc_stream_check_cursor_attributes(
+ const struct dc_stream_state *stream,
+ struct dc_state *state,
+ const struct dc_cursor_attributes *attributes);
+
bool dc_stream_set_cursor_attributes(
struct dc_stream_state *stream,
const struct dc_cursor_attributes *attributes);
@@ -579,4 +586,8 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
struct dc_stream_state *stream,
struct dc_surface_update *srf_updates,
struct dc_state *context);
+
+bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream);
+bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream);
+
#endif /* DC_STREAM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 83ffaae9f439..a4cd0eb39a3a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -210,6 +210,7 @@ struct dc_edid_caps {
bool edid_hdmi;
bool hdr_supported;
+ bool rr_capable;
struct dc_panel_patch panel_patch;
};
@@ -1089,7 +1090,8 @@ union replay_low_refresh_rate_enable_options {
struct {
//BIT[0-3]: Replay Low Hz Support control
unsigned int ENABLE_LOW_RR_SUPPORT :1;
- unsigned int RESERVED_1_3 :3;
+ unsigned int SKIP_ASIC_CHECK :1;
+ unsigned int RESERVED_2_3 :2;
//BIT[4-15]: Replay Low Hz Enable Scenarios
unsigned int ENABLE_STATIC_SCREEN :1;
unsigned int ENABLE_FULL_SCREEN_VIDEO :1;
@@ -1129,6 +1131,10 @@ struct replay_config {
union replay_low_refresh_rate_enable_options low_rr_enable_options;
/* Replay coasting vtotal is within low refresh rate range. */
bool low_rr_activated;
+ /* Replay low refresh rate supported*/
+ bool low_rr_supported;
+ /* Replay Video Conferencing Optimization Enabled */
+ bool replay_video_conferencing_optimization_enabled;
};
/* Replay feature flags*/
@@ -1249,6 +1255,7 @@ enum dc_cm2_gpu_mem_layout {
enum dc_cm2_gpu_mem_pixel_component_order {
DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA,
+ DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA
};
enum dc_cm2_gpu_mem_format {
@@ -1270,7 +1277,8 @@ struct dc_cm2_gpu_mem_format_parameters {
enum dc_cm2_gpu_mem_size {
DC_CM2_GPU_MEM_SIZE_171717,
- DC_CM2_GPU_MEM_SIZE_TRANSFORMED
+ DC_CM2_GPU_MEM_SIZE_333333,
+ DC_CM2_GPU_MEM_SIZE_TRANSFORMED,
};
struct dc_cm2_gpu_mem_parameters {
@@ -1279,6 +1287,7 @@ struct dc_cm2_gpu_mem_parameters {
struct dc_cm2_gpu_mem_format_parameters format_params;
enum dc_cm2_gpu_mem_pixel_component_order component_order;
enum dc_cm2_gpu_mem_size size;
+ uint16_t bit_depth;
};
enum dc_cm2_transfer_func_source {
@@ -1302,6 +1311,10 @@ struct dc_cm2_func_luts {
const struct dc_3dlut *lut3d_func;
struct dc_cm2_gpu_mem_parameters gpu_mem_params;
};
+ bool rmcm_3dlut_shaper_select;
+ bool mpc_3dlut_enable;
+ bool rmcm_3dlut_enable;
+ bool mpc_mcm_post_blend;
} lut3d_data;
const struct dc_transfer_func *lut1d_func;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index b363f5360818..58c84f555c0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -391,6 +391,7 @@ static void dccg35_set_dppclk_rcg(struct dccg *dccg,
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
return;
@@ -411,6 +412,8 @@ static void dccg35_set_dppclk_rcg(struct dccg *dccg,
BREAK_TO_DEBUGGER();
break;
}
+ //DC_LOG_DEBUG("%s: inst(%d) DPPCLK rcg_disable: %d\n", __func__, inst, enable ? 0 : 1);
+
}
static void dccg35_set_dpstreamclk_rcg(
@@ -1035,6 +1038,7 @@ static void dccg35_enable_dpp_clk_new(
DPPCLK0_DTO_MODULO, 0xFF);
}
+
static void dccg35_disable_dpp_clk_new(
struct dccg *dccg,
int inst)
@@ -1112,30 +1116,24 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg,
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
switch (dpp_inst) {
case 0:
REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
break;
case 1:
REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
break;
case 2:
REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
break;
case 3:
REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
break;
default:
break;
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
}
@@ -1163,14 +1161,18 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst,
ASSERT(false);
phase = 0xff;
}
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, false);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, phase,
DPPCLK0_DTO_MODULO, modulo);
dcn35_set_dppclk_enable(dccg, dpp_inst, true);
- } else
+ } else {
dcn35_set_dppclk_enable(dccg, dpp_inst, false);
+ /*we have this in hwss: disable_plane*/
+ //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
+ }
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
@@ -1182,6 +1184,7 @@ static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
return;
+
switch (dpp_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
@@ -1198,6 +1201,8 @@ static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
default:
break;
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) rcg: %d\n", __func__, dpp_inst, enable);
+
}
static void dccg35_get_pixel_rate_div(
@@ -1521,28 +1526,30 @@ static void dccg35_set_physymclk_root_clock_gating(
switch (phy_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 1:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 2:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 3:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 4:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE:\n", __func__, phy_inst, enable ? 0 : 1);
+
}
static void dccg35_set_physymclk(
@@ -1643,6 +1650,8 @@ static void dccg35_dpp_root_clock_control(
return;
if (clock_on) {
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, false);
+
/* turn off the DTO and leave phase/modulo at max */
dcn35_set_dppclk_enable(dccg, dpp_inst, 1);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
@@ -1654,6 +1663,8 @@ static void dccg35_dpp_root_clock_control(
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0,
DPPCLK0_DTO_MODULO, 1);
+ /*we have this in hwss: disable_plane*/
+ //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
}
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
@@ -1771,36 +1782,40 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
//Disable DTO
switch (inst) {
case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
DSCCLK0_DTO_PHASE, 0,
DSCCLK0_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
break;
case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
DSCCLK1_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
break;
case 2:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
DSCCLK2_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
break;
case 3:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
DSCCLK3_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
break;
default:
BREAK_TO_DEBUGGER();
@@ -1813,9 +1828,6 @@ static void dccg35_disable_dscclk(struct dccg *dccg,
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- return;
-
switch (inst) {
case 0:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 077337698e0a..b4f5b4a6331a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -976,11 +976,12 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
- // Apply ssed(spread spectrum) dpref clock for edp only.
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
- && pix_clk_params->signal_type == SIGNAL_TYPE_EDP
- && encoding == DP_8b_10b_ENCODING)
+ // Apply ssed(spread spectrum) dpref clock for edp and dp
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0 &&
+ dc_is_dp_signal(pix_clk_params->signal_type) &&
+ encoding == DP_8b_10b_ENCODING)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
+
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
if (e) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index 0721ae895ae9..94128f7a18b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -257,7 +257,7 @@ bool dce110_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
struct dc_bios *bios,
- enum clock_source_id,
+ enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index ccc154b0281c..3b9011ef9b68 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -28,6 +28,8 @@
#include "dc.h"
#include "core_types.h"
#include "dmub_cmd.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
#define TO_DMUB_ABM(abm)\
container_of(abm, struct dce_abm, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index 0d7e7f3b81a1..a641ae04450c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -240,7 +240,8 @@ bool dmub_abm_save_restore(
cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask;
- cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
+ cmd.abm_save_restore.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_abm_save_restore) - sizeof(struct dmub_cmd_header);
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index c31e4f26a305..fcd3d86ad517 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -280,7 +280,9 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm
memset(&cmd, 0, sizeof(cmd));
pCmd->header.type = DMUB_CMD__REPLAY;
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL;
- pCmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal);
+ pCmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal) -
+ sizeof(struct dmub_cmd_header);
pCmd->replay_set_power_opt_data.power_opt = power_opt;
pCmd->replay_set_power_opt_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
@@ -319,7 +321,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_timing_sync.header.sub_type =
DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED;
cmd.replay_set_timing_sync.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_timing_sync);
+ sizeof(struct dmub_rb_cmd_replay_set_timing_sync) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst =
cmd_element->sync_data.panel_inst;
@@ -331,7 +334,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_frameupdate_timer.header.sub_type =
DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER;
cmd.replay_set_frameupdate_timer.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer);
+ sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_frameupdate_timer.data.panel_inst =
cmd_element->panel_inst;
@@ -345,7 +349,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_pseudo_vtotal.header.sub_type =
DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL;
cmd.replay_set_pseudo_vtotal.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal);
+ sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_pseudo_vtotal.data.panel_inst =
cmd_element->pseudo_vtotal_data.panel_inst;
@@ -357,7 +362,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_disabled_adaptive_sync_sdp.header.sub_type =
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP;
cmd.replay_disabled_adaptive_sync_sdp.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp);
+ sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_disabled_adaptive_sync_sdp.data.panel_inst =
cmd_element->disabled_adaptive_sync_sdp_data.panel_inst;
@@ -369,7 +375,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_general_cmd.header.sub_type =
DMUB_CMD__REPLAY_SET_GENERAL_CMD;
cmd.replay_set_general_cmd.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_general_cmd);
+ sizeof(struct dmub_rb_cmd_replay_set_general_cmd) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_general_cmd.data.panel_inst =
cmd_element->set_general_cmd_data.panel_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index eede83ad91fa..824f73eb3326 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -25,8 +25,7 @@
CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init
-DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
- dce60_resource.o
+DCE60 = dce60_timing_generator.o
AMD_DAL_DCE60 = $(addprefix $(AMDDALPATH)/dc/dce60/,$(DCE60))
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
index 003a9330c286..88e7a1fc9a30 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -105,7 +105,7 @@ static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_100hz)
dm_write_reg(tg->ctx, addr, value);
}
-static void program_timing(struct timing_generator *tg,
+static void dce80_timing_generator_program_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
int vready_offset,
int vstartup_start,
@@ -185,7 +185,7 @@ static void dce80_timing_generator_enable_advanced_request(
static const struct timing_generator_funcs dce80_tg_funcs = {
.validate_timing = dce110_tg_validate_timing,
- .program_timing = program_timing,
+ .program_timing = dce80_timing_generator_program_timing,
.enable_crtc = dce110_timing_generator_enable_crtc,
.disable_crtc = dce110_timing_generator_disable_crtc,
.is_counter_moving = dce110_timing_generator_is_counter_moving,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 5efddd48d5c5..9d160b39e8c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -153,6 +153,14 @@ bool dm_helpers_submit_i2c(
const struct dc_link *link,
struct i2c_command *cmd);
+bool dm_helpers_execute_fused_io(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+);
+
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
const struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index f1fe49401bc0..8d24763938ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -1002,6 +1002,7 @@ static bool CalculatePrefetchSchedule(
dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime
- (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC);
Tsw_oto = Lsw_oto * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index f567a9023682..ed59c77bc6f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -1105,6 +1105,7 @@ static bool CalculatePrefetchSchedule(
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 5865e8fa2d8e..9f3938a50240 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -1123,6 +1123,7 @@ static bool CalculatePrefetchSchedule(
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 56dda686e299..b0fc1fd20208 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -627,6 +627,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
*/
if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) &&
!pipe->stream->hw_cursor_req &&
+ !dc_state_get_stream_cursor_subvp_limit(pipe->stream, context) &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index 21fd466dba26..157ecf008d6c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -99,7 +99,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_ccflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
@@ -117,11 +116,9 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_floa
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_rcflags)
DML21 := src/dml2_top/dml2_top_interfaces.o
DML21 += src/dml2_top/dml2_top_soc15.o
-DML21 += src/inc/dml2_debug.o
DML21 += src/dml2_core/dml2_core_dcn4.o
DML21 += src/dml2_core/dml2_core_factory.o
DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index 0c8ec30ea672..d47cacfdb695 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -526,7 +526,8 @@ static void populate_dml21_output_config_from_stream_state(struct dml2_link_outp
static void populate_dml21_stream_overrides_from_stream_state(
struct dml2_stream_parameters *stream_desc,
- struct dc_stream_state *stream)
+ struct dc_stream_state *stream,
+ struct dc_stream_status *stream_status)
{
switch (stream->debug.force_odm_combine_segments) {
case 0:
@@ -551,7 +552,9 @@ static void populate_dml21_stream_overrides_from_stream_state(
if (!stream->ctx->dc->debug.enable_single_display_2to1_odm_policy ||
stream->debug.force_odm_combine_segments > 0)
stream_desc->overrides.disable_dynamic_odm = true;
- stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || stream->hw_cursor_req;
+ stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp ||
+ stream->hw_cursor_req ||
+ stream_status->mall_stream_config.cursor_size_limit_subvp;
}
static enum dml2_swizzle_mode gfx_addr3_to_dml2_swizzle_mode(enum swizzle_mode_addr3_values addr3_mode)
@@ -885,6 +888,9 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
case DC_CM2_GPU_MEM_SIZE_171717:
plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
break;
+ case DC_CM2_GPU_MEM_SIZE_333333:
+ plane->tdlut.tdlut_width_mode = dml2_tdlut_width_33_cube;
+ break;
case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
//plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined
break;
@@ -910,7 +916,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
}
//TODO : Could be possibly moved to a common helper layer.
-static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, unsigned int *plane_id)
+static bool dml21_wrapper_get_plane_id(const struct dc_state *context, unsigned int stream_id, const struct dc_plane_state *plane, unsigned int *plane_id)
{
int i, j;
@@ -918,10 +924,12 @@ static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const str
return false;
for (i = 0; i < context->stream_count; i++) {
- for (j = 0; j < context->stream_status[i].plane_count; j++) {
- if (context->stream_status[i].plane_states[j] == plane) {
- *plane_id = (i << 16) | j;
- return true;
+ if (context->streams[i]->stream_id == stream_id) {
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
+ if (context->stream_status[i].plane_states[j] == plane) {
+ *plane_id = (i << 16) | j;
+ return true;
+ }
}
}
}
@@ -944,14 +952,14 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d
return location;
}
-static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx,
+unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id,
const struct dc_plane_state *plane, const struct dc_state *context)
{
unsigned int plane_id;
int i = 0;
int location = -1;
- if (!dml21_wrapper_get_plane_id(context, plane, &plane_id)) {
+ if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) {
ASSERT(false);
return -1;
}
@@ -1021,7 +1029,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
- populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]);
+ populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]);
dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.fclk_pstate = dml2_twait_budgeting_setting_if_needed;
dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.uclk_pstate = dml2_twait_budgeting_setting_if_needed;
@@ -1037,7 +1045,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
} else {
for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) {
- disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->stream_status[stream_index].plane_states[plane_index], context);
+ disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], context);
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_planes++;
@@ -1048,7 +1056,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
- if (dml21_wrapper_get_plane_id(context, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
+ if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true;
/* apply forced pstate policy */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
index 069b939c672a..73a013be1e48 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
@@ -11,6 +11,7 @@ struct dc_state;
struct dcn_watermarks;
union dcn_watermark_set;
struct pipe_ctx;
+struct dc_plane_state;
struct dml2_context;
struct dml2_configuration_options;
@@ -25,4 +26,5 @@ void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_se
void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
void dml21_set_dc_p_state_type(struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming, bool sub_vp_enabled);
+unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index be54f0e696ce..208d3651b6ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -2,6 +2,7 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
+#include <linux/vmalloc.h>
#include "dml2_internal_types.h"
#include "dml_top.h"
@@ -11,13 +12,15 @@
#include "dml21_translation_helper.h"
#include "dml2_dc_resource_mgmt.h"
+#define INVALID -1
+
static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
{
- *dml_ctx = kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+ *dml_ctx = vzalloc(sizeof(struct dml2_context));
if (!(*dml_ctx))
return false;
- (*dml_ctx)->v21.dml_init.dml2_instance = kzalloc(sizeof(struct dml2_instance), GFP_KERNEL);
+ (*dml_ctx)->v21.dml_init.dml2_instance = vzalloc(sizeof(struct dml2_instance));
if (!((*dml_ctx)->v21.dml_init.dml2_instance))
return false;
@@ -27,7 +30,7 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
(*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config;
(*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config;
- (*dml_ctx)->v21.mode_programming.programming = kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL);
+ (*dml_ctx)->v21.mode_programming.programming = vzalloc(sizeof(struct dml2_display_cfg_programming));
if (!((*dml_ctx)->v21.mode_programming.programming))
return false;
@@ -86,6 +89,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
/* Store configuration options */
(*dml_ctx)->config = *config;
+ DC_FP_START();
+
/*Initialize SOCBB and DCNIP params */
dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
@@ -96,6 +101,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
/*Initialize DML21 instance */
dml2_initialize_instance(&(*dml_ctx)->v21.dml_init);
+
+ DC_FP_END();
}
bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
@@ -111,8 +118,8 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s
void dml21_destroy(struct dml2_context *dml2)
{
- kfree(dml2->v21.dml_init.dml2_instance);
- kfree(dml2->v21.mode_programming.programming);
+ vfree(dml2->v21.dml_init.dml2_instance);
+ vfree(dml2->v21.mode_programming.programming);
}
static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state,
@@ -203,10 +210,40 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
}
}
+static void dml21_prepare_mcache_params(struct dml2_context *dml_ctx, struct dc_state *context, struct dc_mcache_params *mcache_params)
+{
+ int dc_plane_idx = 0;
+ int dml_prog_idx, stream_idx, plane_idx;
+ struct dml2_per_plane_programming *pln_prog = NULL;
+
+ for (stream_idx = 0; stream_idx < context->stream_count; stream_idx++) {
+ for (plane_idx = 0; plane_idx < context->stream_status[stream_idx].plane_count; plane_idx++) {
+ dml_prog_idx = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_idx]->stream_id, context->stream_status[stream_idx].plane_states[plane_idx], context);
+ if (dml_prog_idx == INVALID) {
+ continue;
+ }
+ pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
+ mcache_params[dc_plane_idx].valid = pln_prog->mcache_allocation.valid;
+ mcache_params[dc_plane_idx].num_mcaches_plane0 = pln_prog->mcache_allocation.num_mcaches_plane0;
+ mcache_params[dc_plane_idx].num_mcaches_plane1 = pln_prog->mcache_allocation.num_mcaches_plane1;
+ mcache_params[dc_plane_idx].requires_dedicated_mall_mcache = pln_prog->mcache_allocation.requires_dedicated_mall_mcache;
+ mcache_params[dc_plane_idx].last_slice_sharing.plane0_plane1 = pln_prog->mcache_allocation.last_slice_sharing.plane0_plane1;
+ memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane0,
+ pln_prog->mcache_allocation.mcache_x_offsets_plane0,
+ sizeof(int) * (DML2_MAX_MCACHES + 1));
+ memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane1,
+ pln_prog->mcache_allocation.mcache_x_offsets_plane1,
+ sizeof(int) * (DML2_MAX_MCACHES + 1));
+ dc_plane_idx++;
+ }
+ }
+}
+
static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
{
bool result = false;
struct dml2_build_mode_programming_in_out *mode_programming = &dml_ctx->v21.mode_programming;
+ struct dc_mcache_params mcache_params[MAX_PLANES] = {0};
memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg));
memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
@@ -229,7 +266,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
if (!result)
return false;
+ DC_FP_START();
result = dml2_build_mode_programming(mode_programming);
+ DC_FP_END();
if (!result)
return false;
@@ -239,6 +278,14 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, in_dc->current_state);
/* if subvp phantoms are present, expand them into dc context */
dml21_handle_phantom_streams_planes(in_dc, context, dml_ctx);
+
+ if (in_dc->res_pool->funcs->program_mcache_pipe_config) {
+ //Prepare mcache params for each plane based on mcache output from DML
+ dml21_prepare_mcache_params(dml_ctx, context, mcache_params);
+
+ //populate mcache regs to each pipe
+ dml_ctx->config.callbacks.allocate_mcache(context, mcache_params);
+ }
}
/* Copy DML CLK, WM and REG outputs to bandwidth context */
@@ -272,7 +319,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
mode_support->dml2_instance = dml_init->dml2_instance;
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
+ DC_FP_START();
is_supported = dml2_check_mode_supported(mode_support);
+ DC_FP_END();
if (!is_supported)
return false;
@@ -284,10 +333,11 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml
bool out = false;
/* Use dml_validate_only for fast_validate path */
- if (fast_validate) {
+ if (fast_validate)
out = dml21_check_mode_support(in_dc, context, dml_ctx);
- } else
+ else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
+
return out;
}
@@ -426,8 +476,12 @@ void dml21_copy(struct dml2_context *dst_dml_ctx,
dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming;
+ DC_FP_START();
+
/* need to initialize copied instance for internal references to be correct */
dml2_initialize_instance(&dst_dml_ctx->v21.dml_init);
+
+ DC_FP_END();
}
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
index b2075b8c363b..42e715024bc9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
@@ -8,6 +8,7 @@
#include "os_types.h"
#include "dml_top_soc_parameter_types.h"
+#include "dml_top_display_cfg_types.h"
struct dc;
struct dc_state;
@@ -65,4 +66,67 @@ struct socbb_ip_params_external {
struct dml2_ip_capabilities ip_params;
struct dml2_soc_bb soc_bb;
};
+
+/*mcache parameters decided by dml*/
+struct dc_mcache_params {
+ bool valid;
+ /*
+ * For iMALL, dedicated mall mcaches are required (sharing of last
+ * slice possible), for legacy phantom or phantom without return
+ * the only mall mcaches need to be valid.
+ */
+ bool requires_dedicated_mall_mcache;
+ unsigned int num_mcaches_plane0;
+ unsigned int num_mcaches_plane1;
+ /*
+ * Generally, plane0/1 slices must use a disjoint set of caches
+ * but in some cases the final segement of the two planes can
+ * use the same cache. If plane0_plane1 is set, then this is
+ * allowed.
+ *
+ * Similarly, the caches allocated to MALL prefetcher are generally
+ * disjoint, but if mall_prefetch is set, then the final segment
+ * between the main and the mall pixel requestor can use the same
+ * cache.
+ *
+ * Note that both bits may be set at the same time.
+ */
+ struct {
+ bool mall_comb_mcache_p0;
+ bool mall_comb_mcache_p1;
+ bool plane0_plane1;
+ } last_slice_sharing;
+ /*
+ * A plane is divided into vertical slices of mcaches,
+ * which wrap on the surface width.
+ *
+ * For example, if the surface width is 7680, and split into
+ * three slices of equal width, the boundary array would contain
+ * [2560, 5120, 7680]
+ *
+ * The assignments are
+ * 0 = [0 .. 2559]
+ * 1 = [2560 .. 5119]
+ * 2 = [5120 .. 7679]
+ * 0 = [7680 .. INF]
+ * The final element implicitly is the same as the first, and
+ * at first seems invalid since it is never referenced (since)
+ * it is outside the surface. However, its useful when shifting
+ * (see below).
+ *
+ * For any given valid mcache assignment, a shifted version, wrapped
+ * on the surface width boundary is also assumed to be valid.
+ *
+ * For example, shifting [2560, 5120, 7680] by -50 results in
+ * [2510, 5170, 7630].
+ *
+ * The assignments are now:
+ * 0 = [0 .. 2509]
+ * 1 = [2510 .. 5169]
+ * 2 = [5170 .. 7629]
+ * 0 = [7630 .. INF]
+ */
+ int mcache_x_offsets_plane0[DML2_MAX_MCACHES + 1];
+ int mcache_x_offsets_plane1[DML2_MAX_MCACHES + 1];
+};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
index a64ec4dcf11a..c047d56527c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
@@ -43,4 +43,5 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o
*/
bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
index 25b607e7b726..84c90050668c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
@@ -156,6 +156,8 @@ struct dml2_dchub_watermark_regs {
uint32_t urgent;
uint32_t sr_enter;
uint32_t sr_exit;
+ uint32_t sr_enter_z8;
+ uint32_t sr_exit_z8;
uint32_t uclk_pstate;
uint32_t fclk_pstate;
uint32_t temp_read_or_ppt;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
index 5e1ab6d97640..255f05de362c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
@@ -166,7 +166,7 @@ struct dml2_surface_cfg {
enum dml2_swizzle_mode tiling;
struct {
- unsigned long pitch;
+ unsigned long pitch; // In elements, two pixels per element in 422 packed format
unsigned long width;
unsigned long height;
} plane0;
@@ -385,6 +385,7 @@ struct dml2_plane_parameters {
long reserved_vblank_time_ns;
unsigned int max_vactive_det_fill_delay_us; // 0 = no reserved time, +ve = explicit max delay
unsigned int gpuvm_min_page_size_kbytes;
+ unsigned int hostvm_min_page_size_kbytes;
enum dml2_svp_mode_override legacy_svp_config; //TODO remove in favor of svp_config
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index bb863c8c6b39..6ee37386f672 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -456,10 +456,10 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out)
in_out->mode_support_result.global.active.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.average_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0);
- dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_sdp_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_dram_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_sdp_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.active.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_dram_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps);
for (i = 0; i < l->svp_expanded_display_cfg.num_planes; i++) {
in_out->mode_support_result.per_plane[i].dppclk_khz = (unsigned int)(core->clean_me_up.mode_lib.ms.RequiredDPPCLK[i] * 1000);
@@ -509,7 +509,7 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out)
stream_index = l->svp_expanded_display_cfg.plane_descriptors[i].stream_index;
in_out->mode_support_result.per_stream[stream_index].dscclk_khz = (unsigned int)core->clean_me_up.mode_lib.ms.required_dscclk_freq_mhz[i] * 1000;
- dml2_printf("CORE_DCN4::%s: i=%d stream_index=%d, in_out->mode_support_result.per_stream[stream_index].dscclk_khz = %u\n", __func__, i, stream_index, in_out->mode_support_result.per_stream[stream_index].dscclk_khz);
+ DML_LOG_VERBOSE("CORE_DCN4::%s: i=%d stream_index=%d, in_out->mode_support_result.per_stream[stream_index].dscclk_khz = %u\n", __func__, i, stream_index, in_out->mode_support_result.per_stream[stream_index].dscclk_khz);
if (!((stream_bitmask >> stream_index) & 0x1)) {
in_out->mode_support_result.cfg_support_info.stream_support_info[stream_index].odms_used = odm_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 4c504cb0e1c5..c4dad7164d31 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -54,104 +54,104 @@ static double dml2_core_div_rem(double dividend, unsigned int divisor, unsigned
static void dml2_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
{
- dml2_printf("DML: ===================================== \n");
- dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
+ DML_LOG_VERBOSE("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: DML_MODE_SUPPORT_INFO_ST\n");
if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
- dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
+ DML_LOG_VERBOSE("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
+ DML_LOG_VERBOSE("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
- dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
+ DML_LOG_VERBOSE("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
- dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ DML_LOG_VERBOSE("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ DML_LOG_VERBOSE("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
- dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
+ DML_LOG_VERBOSE("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
if (!fail_only || support->NotEnoughLanesForMSO == 1)
- dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
+ DML_LOG_VERBOSE("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ DML_LOG_VERBOSE("DML: support: P2IWith420 = %d\n", support->P2IWith420);
if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ DML_LOG_VERBOSE("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
if (!fail_only || support->DSCSlicesODMModeSupported == 0)
- dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ DML_LOG_VERBOSE("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
if (!fail_only || support->NotEnoughDSCUnits == 1)
- dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
if (!fail_only || support->NotEnoughDSCSlices == 1)
- dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ DML_LOG_VERBOSE("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
if (!fail_only || support->ROBSupport == 0)
- dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
+ DML_LOG_VERBOSE("DML: support: ROBSupport = %d\n", support->ROBSupport);
if (!fail_only || support->OutstandingRequestsSupport == 0)
- dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
- dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ DML_LOG_VERBOSE("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ DML_LOG_VERBOSE("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ DML_LOG_VERBOSE("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ DML_LOG_VERBOSE("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ DML_LOG_VERBOSE("DML: support: CursorSupport = %d\n", support->CursorSupport);
if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ DML_LOG_VERBOSE("DML: support: PitchSupport = %d\n", support->PitchSupport);
if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
if (!fail_only || support->PrefetchSupported == 0)
- dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ DML_LOG_VERBOSE("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
+ DML_LOG_VERBOSE("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
if (!fail_only || support->DynamicMetadataSupported == 0)
- dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
+ DML_LOG_VERBOSE("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
- dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
if (!fail_only || support->g6_temp_read_support == 0)
- dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+ DML_LOG_VERBOSE("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
if (!fail_only || support->ModeSupport == 0)
- dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- dml2_printf("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: support: ModeSupport = %d\n", support->ModeSupport);
+ DML_LOG_VERBOSE("DML: ===================================== \n");
}
static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg)
@@ -179,11 +179,9 @@ static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg
} else {
out_bpp[k] = 0;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
- dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
- dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
+ DML_LOG_VERBOSE("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
+ DML_LOG_VERBOSE("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
}
}
@@ -212,9 +210,7 @@ static unsigned int dml_get_num_active_pipes(int unsigned num_planes, const stru
num_active_pipes = num_active_pipes + (unsigned int)cfg_support_info->plane_support_info[k].dpps_used;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
-#endif
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
return num_active_pipes;
}
@@ -251,7 +247,7 @@ static bool dml_get_is_phantom_pipe(const struct dml2_display_cfg *display_cfg,
unsigned int plane_idx = mode_lib->mp.pipe_plane[pipe_idx];
bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[plane_idx]);
- dml2_printf("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom);
+ DML_LOG_VERBOSE("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom);
return is_phantom;
}
@@ -415,19 +411,17 @@ static void CalculateMaxDETAndMinCompressedBufferSize(
*nomDETInKByte = (unsigned int)(math_floor2((double)*MaxTotalDETInKByte / (double)MaxNumDPP, ConfigReturnBufferSegmentSizeInKByte));
*MinCompressedBufferSizeInKByte = ConfigReturnBufferSizeInKByte - *MaxTotalDETInKByte;
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte);
- dml2_printf("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP);
+ DML_LOG_VERBOSE("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte);
if (nomDETInKByteOverrideEnable) {
*nomDETInKByte = nomDETInKByteOverrideValue;
- dml2_printf("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte);
}
}
@@ -502,7 +496,7 @@ static bool dml_is_420(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -535,7 +529,7 @@ static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode
else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
return 262144;
else {
- DML2_ASSERT(0);
+ DML_ASSERT(0);
return 256;
}
}
@@ -570,8 +564,8 @@ static int unsigned dml_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_gfx11_sw_256kb_r_x) {
version = 11;
} else {
- dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
+ DML_ASSERT(0);
}
return version;
@@ -645,21 +639,19 @@ static void CalculateBytePerPixelAndBlockSizes(
*BytePerPixelY = 2;
*BytePerPixelC = 4;
} else {
- dml2_printf("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat);
+ DML_ASSERT(0);
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat);
- dml2_printf("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY);
- dml2_printf("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC);
- dml2_printf("DML::%s: pitch_y = %u\n", __func__, pitch_y);
- dml2_printf("DML::%s: pitch_c = %u\n", __func__, pitch_c);
- dml2_printf("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l);
- dml2_printf("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c);
-#endif
+ DML_LOG_VERBOSE("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC);
+ DML_LOG_VERBOSE("DML::%s: pitch_y = %u\n", __func__, pitch_y);
+ DML_LOG_VERBOSE("DML::%s: pitch_c = %u\n", __func__, pitch_c);
+ DML_LOG_VERBOSE("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l);
+ DML_LOG_VERBOSE("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c);
if (dml_get_gfx_version(SurfaceTiling) == 11) {
*surf_linear128_l = 0;
@@ -703,12 +695,10 @@ static void CalculateBytePerPixelAndBlockSizes(
*BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
*BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY);
- dml2_printf("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY);
- dml2_printf("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC);
- dml2_printf("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC);
-#endif
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY);
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC);
if (dml_get_gfx_version(SurfaceTiling) == 11) {
if (SurfaceTiling == dml2_gfx11_sw_linear) {
@@ -752,8 +742,8 @@ static void CalculateBytePerPixelAndBlockSizes(
} else if (SurfaceTiling == dml2_sw_256kb_2d) {
macro_tile_scale = 32;
} else {
- dml2_printf("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling);
+ DML_ASSERT(0);
}
*MacroTileHeightY = macro_tile_scale * *BlockHeight256BytesY;
@@ -766,12 +756,10 @@ static void CalculateBytePerPixelAndBlockSizes(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY);
- dml2_printf("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY);
- dml2_printf("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC);
- dml2_printf("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC);
-#endif
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY);
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC);
}
static void CalculateSinglePipeDPPCLKAndSCLThroughput(
@@ -860,10 +848,8 @@ static void CalculateSwathWidth(
unsigned int surface_width_ub_c;
unsigned int surface_height_ub_c;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
-#endif
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) {
@@ -872,11 +858,9 @@ static void CalculateSwathWidth(
SwathWidthSingleDPPY[k] = (unsigned int)display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u ViewportWidth=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportWidth=%lu\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportHeight=%lu\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
MainSurfaceODMMode = ODMMode[k];
@@ -899,13 +883,11 @@ static void CalculateSwathWidth(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u HActive=%u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active);
- dml2_printf("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode);
- dml2_printf("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]);
- dml2_printf("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u HActive=%lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active);
+ DML_LOG_VERBOSE("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]);
if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format)) {
SwathWidthC[k] = SwathWidthY[k] / 2;
@@ -934,22 +916,20 @@ static void CalculateSwathWidth(
surface_width_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.width, req_width_horz_c);
surface_height_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.height, Read256BytesBlockHeightC[k]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l);
- dml2_printf("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l);
- dml2_printf("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c);
- dml2_printf("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
req_per_swath_ub_l[k] = 0;
req_per_swath_ub_c[k] = 0;
@@ -995,15 +975,12 @@ static void CalculateSwathWidth(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]);
-#endif
-
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]);
}
}
@@ -1018,13 +995,11 @@ static bool UnboundedRequest(bool unb_req_force_en, bool unb_req_force_val, unsi
if (unb_req_force_en) {
unb_req_en = unb_req_force_val && unb_req_ok;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en);
- dml2_printf("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val);
- dml2_printf("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok);
- dml2_printf("DML::%s: unb_req_en = %u\n", __func__, unb_req_en);
-#endif
- return (unb_req_en);
+ DML_LOG_VERBOSE("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en);
+ DML_LOG_VERBOSE("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val);
+ DML_LOG_VERBOSE("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok);
+ DML_LOG_VERBOSE("DML::%s: unb_req_en = %u\n", __func__, unb_req_en);
+ return unb_req_en;
}
static void CalculateDETBufferSize(
@@ -1054,16 +1029,14 @@ static void CalculateDETBufferSize(
bool NextPotentialSurfaceToAssignDETPieceFound;
bool MinimizeReallocationSuccess = false;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, UnboundedRequestEnabled);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, MaxTotalDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, MinCompressedBufferSizeInKByte);
- dml2_printf("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, CompressedBufferSegmentSizeInkByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: MaxTotalDETInKByte = %u\n", __func__, MaxTotalDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, MinCompressedBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, CompressedBufferSegmentSizeInkByte);
// Note: Will use default det size if that fits 2 swaths
if (UnboundedRequestEnabled) {
@@ -1092,19 +1065,15 @@ static void CalculateDETBufferSize(
l->minDET = l->minDET + ConfigReturnBufferSegmentSizeInkByte;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET = %u\n", __func__, k, l->minDET);
- dml2_printf("DML::%s: k=%u max_minDET = %u\n", __func__, k, l->max_minDET);
- dml2_printf("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, l->minDET_pipe);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, full_swath_bytes_c[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET = %u\n", __func__, k, l->minDET);
+ DML_LOG_VERBOSE("DML::%s: k=%u max_minDET = %u\n", __func__, k, l->max_minDET);
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, l->minDET_pipe);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, full_swath_bytes_c[k]);
if (l->minDET_pipe == 0) {
l->minDET_pipe = (unsigned int)(math_max2(128, math_ceil2(((double)full_swath_bytes_l[k] + (double)full_swath_bytes_c[k]) / 1024.0, ConfigReturnBufferSegmentSizeInkByte)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, l->minDET_pipe);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, l->minDET_pipe);
}
if (dml_is_phantom_pipe(&display_cfg->plane_descriptors[k])) {
@@ -1117,12 +1086,10 @@ static void CalculateDETBufferSize(
l->DETBufferSizePoolInKByte = l->DETBufferSizePoolInKByte - (ForceSingleDPP ? 1 : DPPPerSurface[k]) * l->minDET_pipe;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.det_size_override_kb);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, l->DETBufferSizePoolInKByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.det_size_override_kb);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, l->DETBufferSizePoolInKByte);
}
if (display_cfg->minimize_det_reallocation) {
@@ -1194,14 +1161,12 @@ static void CalculateDETBufferSize(
l->TotalBandwidth = l->TotalBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k];
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- Before bandwidth adjustment ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- Before bandwidth adjustment ---\n", __func__);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
}
- dml2_printf("DML::%s: --- DET allocation with bandwidth ---\n", __func__);
-#endif
- dml2_printf("DML::%s: TotalBandwidth = %f\n", __func__, l->TotalBandwidth);
+ DML_LOG_VERBOSE("DML::%s: --- DET allocation with bandwidth ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: TotalBandwidth = %f\n", __func__, l->TotalBandwidth);
l->BandwidthOfSurfacesNotAssignedDETPiece = l->TotalBandwidth;
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
@@ -1213,10 +1178,8 @@ static void CalculateDETBufferSize(
} else {
DETPieceAssignedToThisSurfaceAlready[k] = false;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, l->BandwidthOfSurfacesNotAssignedDETPiece);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, l->BandwidthOfSurfacesNotAssignedDETPiece);
}
for (unsigned int j = 0; j < NumberOfActiveSurfaces; ++j) {
@@ -1224,22 +1187,18 @@ static void CalculateDETBufferSize(
l->NextSurfaceToAssignDETPiece = 0;
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, l->NextSurfaceToAssignDETPiece);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, l->NextSurfaceToAssignDETPiece);
if (!DETPieceAssignedToThisSurfaceAlready[k] && (!NextPotentialSurfaceToAssignDETPieceFound ||
ReadBandwidthLuma[k] + ReadBandwidthChroma[k] < ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece] + ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece])) {
l->NextSurfaceToAssignDETPiece = k;
NextPotentialSurfaceToAssignDETPieceFound = true;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound);
}
if (NextPotentialSurfaceToAssignDETPieceFound) {
@@ -1249,20 +1208,16 @@ static void CalculateDETBufferSize(
* (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]) * ConfigReturnBufferSegmentSizeInkByte,
math_floor2((double)l->DETBufferSizePoolInKByte, (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]) * ConfigReturnBufferSegmentSizeInkByte)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, l->DETBufferSizePoolInKByte);
- dml2_printf("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, l->NextSurfaceToAssignDETPiece);
- dml2_printf("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, l->BandwidthOfSurfacesNotAssignedDETPiece);
- dml2_printf("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, l->NextDETBufferPieceInKByte);
- dml2_printf("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, l->NextSurfaceToAssignDETPiece, DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, l->DETBufferSizePoolInKByte);
+ DML_LOG_VERBOSE("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, l->NextSurfaceToAssignDETPiece);
+ DML_LOG_VERBOSE("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, l->BandwidthOfSurfacesNotAssignedDETPiece);
+ DML_LOG_VERBOSE("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, l->NextDETBufferPieceInKByte);
+ DML_LOG_VERBOSE("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, l->NextSurfaceToAssignDETPiece, DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece] = DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece] + l->NextDETBufferPieceInKByte / (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("to %u\n", DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
-#endif
+ DML_LOG_VERBOSE("to %u\n", DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
l->DETBufferSizePoolInKByte = l->DETBufferSizePoolInKByte - l->NextDETBufferPieceInKByte;
DETPieceAssignedToThisSurfaceAlready[l->NextSurfaceToAssignDETPiece] = true;
@@ -1274,13 +1229,11 @@ static void CalculateDETBufferSize(
}
*CompressedBufferSizeInkByte = *CompressedBufferSizeInkByte * CompressedBufferSegmentSizeInkByte / ConfigReturnBufferSegmentSizeInkByte;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- After bandwidth adjustment ---\n", __func__);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: --- After bandwidth adjustment ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *CompressedBufferSizeInkByte);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, DETBufferSizeInKByte[k], ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, DETBufferSizeInKByte[k], ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
}
-#endif
}
static double CalculateRequiredDispclk(
@@ -1510,15 +1463,13 @@ static unsigned int dscceComputeDelay(
//pixel delay is group_delay (converted to pixels) + pipeline, however, first group is a special case since it is processed as soon as it arrives (i.e., in 3 cycles regardless of pixel format)
pixels = (group_delay - 1) * cycles_per_group + 3 + pipeline_delay;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: bpc: %u\n", __func__, bpc);
- dml2_printf("DML::%s: BPP: %f\n", __func__, BPP);
- dml2_printf("DML::%s: sliceWidth: %u\n", __func__, sliceWidth);
- dml2_printf("DML::%s: numSlices: %u\n", __func__, numSlices);
- dml2_printf("DML::%s: pixelFormat: %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Output: %u\n", __func__, Output);
- dml2_printf("DML::%s: pixels: %u\n", __func__, pixels);
-#endif
+ DML_LOG_VERBOSE("DML::%s: bpc: %u\n", __func__, bpc);
+ DML_LOG_VERBOSE("DML::%s: BPP: %f\n", __func__, BPP);
+ DML_LOG_VERBOSE("DML::%s: sliceWidth: %u\n", __func__, sliceWidth);
+ DML_LOG_VERBOSE("DML::%s: numSlices: %u\n", __func__, numSlices);
+ DML_LOG_VERBOSE("DML::%s: pixelFormat: %u\n", __func__, pixelFormat);
+ DML_LOG_VERBOSE("DML::%s: Output: %u\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: pixels: %u\n", __func__, pixels);
return pixels;
}
@@ -1593,10 +1544,8 @@ static unsigned int dscComputeDelay(enum dml2_output_format_class pixelFormat, e
// sft
Delay = Delay + 1;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: pixelFormat = %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Delay = %u\n", __func__, Delay);
-#endif
+ DML_LOG_VERBOSE("DML::%s: pixelFormat = %u\n", __func__, pixelFormat);
+ DML_LOG_VERBOSE("DML::%s: Delay = %u\n", __func__, Delay);
return Delay;
}
@@ -1667,10 +1616,8 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
}
meta_surface_bytes = (unsigned int)(p->DCCMetaPitch * vp_height_meta_ub * p->BytePerPixel / 256.0);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch);
- dml2_printf("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes);
-#endif
+ DML_LOG_VERBOSE("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch);
+ DML_LOG_VERBOSE("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes);
if (p->GPUVMEnable == true) {
double meta_vmpg_bytes = 4.0 * 1024.0;
*p->meta_pte_bytes_per_frame_ub = (unsigned int)((math_ceil2((double) (meta_surface_bytes - meta_vmpg_bytes) / (8 * meta_vmpg_bytes), 1) + 1) * 64);
@@ -1724,25 +1671,23 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
vm_bytes = *p->meta_pte_bytes_per_frame_ub + extra_mpde_bytes + *p->dpde0_bytes_per_frame_ub + extra_dpde_bytes;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear);
- dml2_printf("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel);
- dml2_printf("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels);
- dml2_printf("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes);
- dml2_printf("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes);
- dml2_printf("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight);
- dml2_printf("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth);
- dml2_printf("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub);
- dml2_printf("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub);
- dml2_printf("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes);
- dml2_printf("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight);
- dml2_printf("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth);
- dml2_printf("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub);
-#endif
+ DML_LOG_VERBOSE("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel);
+ DML_LOG_VERBOSE("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes);
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight);
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth);
+ DML_LOG_VERBOSE("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub);
+ DML_LOG_VERBOSE("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub);
+ DML_LOG_VERBOSE("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes);
+ DML_LOG_VERBOSE("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight);
+ DML_LOG_VERBOSE("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth);
+ DML_LOG_VERBOSE("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub);
if (p->SurfaceTiling == dml2_sw_linear) {
*p->PixelPTEReqHeight = 1;
@@ -1778,22 +1723,20 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->vmpg_width = 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel);
if (p->GPUVMEnable == true) {
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n",
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n",
__func__, p->GPUVMMinPageSizeKBytes, p->SurfaceTiling, dml_get_tile_block_size_bytes(p->SurfaceTiling));
- DML2_ASSERT(0);
+ DML_ASSERT(0);
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight);
- dml2_printf("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth);
- dml2_printf("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear);
- dml2_printf("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize);
- dml2_printf("DML::%s: Pitch = %u\n", __func__, p->Pitch);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height);
-#endif
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear);
+ DML_LOG_VERBOSE("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize);
+ DML_LOG_VERBOSE("DML::%s: Pitch = %u\n", __func__, p->Pitch);
+ DML_LOG_VERBOSE("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width);
+ DML_LOG_VERBOSE("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height);
*p->dpte_row_height_one_row_per_frame = vp_height_dpte_ub;
*p->dpte_row_width_ub_one_row_per_frame = (unsigned int)((math_ceil2(((double)p->Pitch * (double)*p->dpte_row_height_one_row_per_frame / (double)*p->PixelPTEReqHeight - 1) / (double)*p->PixelPTEReqWidth, 1) + 1) * (double)*p->PixelPTEReqWidth);
@@ -1811,7 +1754,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->dpte_row_height_linear = 128;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub);
#endif
} else if (!dml_is_vertical_rotation(p->RotationAngle)) {
@@ -1825,7 +1768,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->dpte_row_width_ub = (unsigned int)((math_ceil2((double)(p->SwathWidth - 1) / (double)*p->PixelPTEReqWidth, 1) + 1.0) * *p->PixelPTEReqWidth);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub);
#endif
*p->PixelPTEBytesPerRow = *p->dpte_row_width_ub / *p->PixelPTEReqWidth * *p->PTERequestSize;
@@ -1840,7 +1783,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->PixelPTEBytesPerRow = (unsigned int)((double)*p->dpte_row_width_ub / (double)*p->PixelPTEReqHeight * *p->PTERequestSize);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub);
#endif
}
@@ -1852,18 +1795,18 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->PixelPTEBytesPerRowStorage = *p->PixelPTEBytesPerRow;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: meta_row_height = %u\n", __func__, *p->meta_row_height);
- dml2_printf("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height);
- dml2_printf("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear);
- dml2_printf("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage);
- dml2_printf("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests);
- dml2_printf("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame);
- dml2_printf("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame);
- dml2_printf("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: meta_row_height = %u\n", __func__, *p->meta_row_height);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage);
+ DML_LOG_VERBOSE("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame);
#endif
return vm_bytes;
@@ -1894,12 +1837,12 @@ static unsigned int CalculatePrefetchSourceLines(
double numLines = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VTaps = %u\n", __func__, VTaps);
- dml2_printf("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart);
- dml2_printf("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart);
- dml2_printf("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary);
- dml2_printf("DML::%s: SwathHeight = %u\n", __func__, SwathHeight);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: VTaps = %u\n", __func__, VTaps);
+ DML_LOG_VERBOSE("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart);
+ DML_LOG_VERBOSE("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart);
+ DML_LOG_VERBOSE("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary);
+ DML_LOG_VERBOSE("DML::%s: SwathHeight = %u\n", __func__, SwathHeight);
#endif
if (ProgressiveToInterlaceUnitInOPP)
*VInitPreFill = (unsigned int)(math_floor2((VRatio + (double)VTaps + 1) / 2.0, 1));
@@ -1934,11 +1877,11 @@ static unsigned int CalculatePrefetchSourceLines(
numLines = *MaxNumSwath * SwathHeight + MaxPartialSwath;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot);
- dml2_printf("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill);
- dml2_printf("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath);
- dml2_printf("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath);
- dml2_printf("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines);
+ DML_LOG_VERBOSE("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill);
+ DML_LOG_VERBOSE("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath);
+ DML_LOG_VERBOSE("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines);
#endif
return (unsigned int)(numLines);
@@ -2007,8 +1950,8 @@ static void CalculateMALLUseForStaticScreen(
if (is_using_mall_for_ss[k])
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]);
- dml2_printf("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL);
+ DML_LOG_VERBOSE("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL);
#endif
}
@@ -2022,7 +1965,7 @@ static void CalculateMALLUseForStaticScreen(
(!CanAddAnotherSurfaceToMALL || SurfaceSizeInMALL[k] < SurfaceSizeInMALL[SurfaceToAddToMALL])) {
CanAddAnotherSurfaceToMALL = true;
SurfaceToAddToMALL = k;
- dml2_printf("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall);
+ DML_LOG_VERBOSE("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall);
}
}
if (CanAddAnotherSurfaceToMALL) {
@@ -2030,8 +1973,8 @@ static void CalculateMALLUseForStaticScreen(
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[SurfaceToAddToMALL];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL);
+ DML_LOG_VERBOSE("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL);
#endif
}
}
@@ -2203,15 +2146,15 @@ static void CalculateDCCConfiguration(
segment_order_vert_contiguous_chroma = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
- dml2_printf("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC);
- dml2_printf("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l);
- dml2_printf("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c);
- dml2_printf("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l);
- dml2_printf("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c);
- dml2_printf("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma);
- dml2_printf("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma);
+ DML_LOG_VERBOSE("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC);
+ DML_LOG_VERBOSE("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l);
+ DML_LOG_VERBOSE("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c);
+ DML_LOG_VERBOSE("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma);
+ DML_LOG_VERBOSE("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma);
#endif
if (DCCProgrammingAssumesScanDirectionUnknown == true) {
if (req128_horz_wc_l == 0 && req128_vert_wc_l == 0) {
@@ -2301,12 +2244,12 @@ static void CalculateDCCConfiguration(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma);
- dml2_printf("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma);
- dml2_printf("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma);
- dml2_printf("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma);
- dml2_printf("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma);
- dml2_printf("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma);
#endif
}
@@ -2326,26 +2269,26 @@ static void calculate_mcache_row_bytes(
unsigned int mvmpg_per_mcache;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_chans = %u\n", __func__, p->num_chans);
- dml2_printf("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes);
- dml2_printf("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes);
- dml2_printf("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes);
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes);
- dml2_printf("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary);
- dml2_printf("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode);
- dml2_printf("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x);
- dml2_printf("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y);
- dml2_printf("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width);
- dml2_printf("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height);
- dml2_printf("DML::%s: blk_width = %u\n", __func__, p->blk_width);
- dml2_printf("DML::%s: blk_height = %u\n", __func__, p->blk_height);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height);
- dml2_printf("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes);
-#endif
- DML2_ASSERT(p->mcache_line_size_bytes != 0);
- DML2_ASSERT(p->mcache_size_bytes != 0);
+ DML_LOG_VERBOSE("DML::%s: num_chans = %u\n", __func__, p->num_chans);
+ DML_LOG_VERBOSE("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary);
+ DML_LOG_VERBOSE("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode);
+ DML_LOG_VERBOSE("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x);
+ DML_LOG_VERBOSE("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y);
+ DML_LOG_VERBOSE("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width);
+ DML_LOG_VERBOSE("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height);
+ DML_LOG_VERBOSE("DML::%s: blk_width = %u\n", __func__, p->blk_width);
+ DML_LOG_VERBOSE("DML::%s: blk_height = %u\n", __func__, p->blk_height);
+ DML_LOG_VERBOSE("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width);
+ DML_LOG_VERBOSE("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes);
+#endif
+ DML_ASSERT(p->mcache_line_size_bytes != 0);
+ DML_ASSERT(p->mcache_size_bytes != 0);
*p->mvmpg_width = 0;
*p->mvmpg_height = 0;
@@ -2370,8 +2313,8 @@ static void calculate_mcache_row_bytes(
*p->mvmpg_width = p->vmpg_width;
*p->mvmpg_height = p->vmpg_height;
} else if (!((blk_bytes == 65536) && (vmpg_bytes == 4096))) {
- dml2_printf("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__);
+ DML_ASSERT(0);
}
}
@@ -2439,25 +2382,25 @@ static void calculate_mcache_row_bytes(
*p->mvmpg_per_mcache_lb = (unsigned int)math_floor2(mvmpg_per_mcache, 1);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: blk_bytes = %u\n", __func__, blk_bytes);
- dml2_printf("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel);
- dml2_printf("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub);
- dml2_printf("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub);
- dml2_printf("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width);
- dml2_printf("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor);
- dml2_printf("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes);
+ DML_LOG_VERBOSE("DML::%s: blk_bytes = %u\n", __func__, blk_bytes);
+ DML_LOG_VERBOSE("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub);
+ DML_LOG_VERBOSE("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes);
- dml2_printf("DML::%s: mcache_row_bytes_per_channel = %u\n", __func__, *p->mcache_row_bytes_per_channel);
- dml2_printf("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
+ DML_LOG_VERBOSE("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_row_bytes_per_channel = %u\n", __func__, *p->mcache_row_bytes_per_channel);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
#endif
- DML2_ASSERT(*p->num_mcaches > 0);
+ DML_ASSERT(*p->num_mcaches > 0);
}
static void calculate_mcache_setting(
@@ -2523,7 +2466,7 @@ static void calculate_mcache_setting(
l->l_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_l;
calculate_mcache_row_bytes(scratch, &l->l_p);
- DML2_ASSERT(*p->num_mcaches_l > 0);
+ DML_ASSERT(*p->num_mcaches_l > 0);
if (l->is_dual_plane) {
l->c_p.num_chans = p->num_chans;
@@ -2559,7 +2502,7 @@ static void calculate_mcache_setting(
l->c_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_c;
calculate_mcache_row_bytes(scratch, &l->c_p);
- DML2_ASSERT(*p->num_mcaches_c > 0);
+ DML_ASSERT(*p->num_mcaches_c > 0);
}
// Sharing for iMALL access
@@ -2598,28 +2541,28 @@ static void calculate_mcache_setting(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: imall_enable = %u\n", __func__, p->imall_enable);
- dml2_printf("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane);
- dml2_printf("DML::%s: surf_vert = %u\n", __func__, p->surf_vert);
- dml2_printf("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l);
- dml2_printf("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l);
- dml2_printf("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l);
- dml2_printf("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l);
- dml2_printf("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l);
- dml2_printf("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l);
- dml2_printf("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l);
+ DML_LOG_VERBOSE("DML::%s: imall_enable = %u\n", __func__, p->imall_enable);
+ DML_LOG_VERBOSE("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane);
+ DML_LOG_VERBOSE("DML::%s: surf_vert = %u\n", __func__, p->surf_vert);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l);
+ DML_LOG_VERBOSE("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l);
+ DML_LOG_VERBOSE("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l);
+ DML_LOG_VERBOSE("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l);
if (l->is_dual_plane) {
- dml2_printf("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c);
- dml2_printf("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c);
- dml2_printf("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c);
- dml2_printf("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor);
- dml2_printf("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c);
- dml2_printf("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c);
- dml2_printf("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c);
- dml2_printf("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c);
- dml2_printf("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size);
- dml2_printf("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c);
+ DML_LOG_VERBOSE("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c);
+ DML_LOG_VERBOSE("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c);
+ DML_LOG_VERBOSE("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c);
+ DML_LOG_VERBOSE("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c);
+ DML_LOG_VERBOSE("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size);
+ DML_LOG_VERBOSE("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache);
}
#endif
// calculate split_coordinate
@@ -2639,11 +2582,11 @@ static void calculate_mcache_setting(
}
#ifdef __DML_VBA_DEBUG__
for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
if (l->is_dual_plane) {
for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
}
#endif
@@ -2660,10 +2603,10 @@ static void calculate_mcache_setting(
#ifdef __DML_VBA_DEBUG__
for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
#endif
}
@@ -2694,8 +2637,8 @@ static void calculate_mall_bw_overhead_factor(
mall_prefetch_dram_overhead_factor[k] = 2.0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]);
- dml2_printf("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]);
#endif
}
}
@@ -2772,22 +2715,20 @@ static double dml_get_return_bandwidth_available(
else // dml2_core_internal_bw_dram
return_bw_mbps = derate_dram_bandwidth;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw);
- dml2_printf("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en);
- dml2_printf("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
- dml2_printf("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
- dml2_printf("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
- dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
- dml2_printf("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
- dml2_printf("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth);
- dml2_printf("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor);
- dml2_printf("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor);
- dml2_printf("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor);
- dml2_printf("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps);
-#endif
+ DML_LOG_VERBOSE("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw);
+ DML_LOG_VERBOSE("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en);
+ DML_LOG_VERBOSE("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
+ DML_LOG_VERBOSE("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
+ DML_LOG_VERBOSE("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
+ DML_LOG_VERBOSE("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor);
+ DML_LOG_VERBOSE("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor);
+ DML_LOG_VERBOSE("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor);
+ DML_LOG_VERBOSE("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps);
return return_bw_mbps;
}
@@ -2807,9 +2748,9 @@ static noinline_for_stack void calculate_bandwidth_available(
{
unsigned int n, m;
- dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps);
// Calculate all the bandwidth availabe
for (m = 0; m < dml2_core_internal_soc_state_max; m++) {
@@ -2828,8 +2769,8 @@ static noinline_for_stack void calculate_bandwidth_available(
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]);
#endif
// urg_bandwidth_available_vm_only is indexed by soc_state
@@ -2843,9 +2784,9 @@ static noinline_for_stack void calculate_bandwidth_available(
urg_bandwidth_available_min[m] = math_min2(urg_bandwidth_available[m][dml2_core_internal_bw_dram], urg_bandwidth_available[m][dml2_core_internal_bw_sdp]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]);
#endif
}
}
@@ -2879,13 +2820,13 @@ static void calculate_avg_bandwidth_required(
// SysActive and SVP Prefetch AVG bandwidth Check
for (k = 0; k < num_active_planes; ++k) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: plane %0d\n", __func__, k);
- dml2_printf("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]);
- dml2_printf("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: plane %0d\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]);
#endif
sdp_overhead_factor = mall_prefetch_sdp_overhead_factor[k];
@@ -2902,10 +2843,10 @@ static void calculate_avg_bandwidth_required(
avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] += dram_overhead_factor_p0 * ReadBandwidthLuma[k] + dram_overhead_factor_p1 * ReadBandwidthChroma[k] + cursor_bw[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
#endif
}
}
@@ -3080,10 +3021,10 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
&p->MaxNumSwathY[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l);
- dml2_printf("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]);
#endif
p->vm_bytes[k] = (s->vm_bytes_l + s->vm_bytes_c) * (1 + 8 * s->HostVMDynamicLevels);
p->meta_row_bytes[k] = s->meta_row_bytes_per_row_ub_l[k] + s->meta_row_bytes_per_row_ub_c[k];
@@ -3091,8 +3032,8 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
p->meta_row_bytes_per_row_ub_c[k] = s->meta_row_bytes_per_row_ub_c[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]);
#endif
if (s->PixelPTEBytesPerRowStorageY[k] <= 64 * s->PTEBufferSizeInRequestsForLuma[k] && s->PixelPTEBytesPerRowStorageC[k] <= 64 * s->PTEBufferSizeInRequestsForChroma[k]) {
p->PTEBufferSizeNotExceeded[k] = true;
@@ -3104,18 +3045,18 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
s->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * s->PTEBufferSizeInRequestsForChroma[k]);
#ifdef __DML_VBA_DEBUG__
if (p->PTEBufferSizeNotExceeded[k] == 0 || s->one_row_per_frame_fits_in_buffer[k] == 0) {
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]);
}
#endif
}
@@ -3146,8 +3087,8 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
p->DCCMetaBufferSizeNotExceeded[k] = true;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]);
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]);
#endif
p->use_one_row_for_frame[k] = p->myPipe[k].FORCE_ONE_ROW_FOR_FRAME || p->is_using_mall_for_ss[k] || (p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) ||
(dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) || (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes > 64 && dml_is_vertical_rotation(p->myPipe[k].RotationAngle));
@@ -3170,9 +3111,9 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
p->DCCMetaBufferSizeNotExceeded[k] = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]);
#endif
}
@@ -3209,20 +3150,20 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
&p->dpte_row_bw[k],
&p->meta_row_bw[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]);
- dml2_printf("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config);
- dml2_printf("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]);
- dml2_printf("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]);
#endif
}
}
@@ -3257,19 +3198,19 @@ static double CalculateUrgentLatency(
}
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin);
} else {
- dml2_printf("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly);
- dml2_printf("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData);
- dml2_printf("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference);
}
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency);
#endif
return urgent_latency;
}
@@ -3296,18 +3237,18 @@ static double CalculateTripToMemory(
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
- dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
- dml2_printf("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin);
- dml2_printf("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin);
} else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
+ DML_LOG_VERBOSE("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
}
- dml2_printf("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us);
+ DML_LOG_VERBOSE("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us);
#endif
@@ -3334,14 +3275,14 @@ static double CalculateMetaTripToMemory(
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
- dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
} else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
+ DML_LOG_VERBOSE("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
}
- dml2_printf("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us);
#endif
@@ -3358,7 +3299,6 @@ static void calculate_cursor_req_attributes(
unsigned int *cursor_bytes_per_chunk,
unsigned int *cursor_bytes)
{
- unsigned int cursor_pitch = 0;
unsigned int cursor_bytes_per_req = 0;
unsigned int cursor_width_bytes = 0;
unsigned int cursor_height = 0;
@@ -3366,10 +3306,6 @@ static void calculate_cursor_req_attributes(
//SW determines the cursor pitch to support the maximum cursor_width that will be used but the following restrictions apply.
//- For 2bpp, cursor_pitch = 256 pixels due to min cursor request size of 64B
//- For 32 or 64 bpp, cursor_pitch = 64, 128 or 256 pixels depending on the cursor width
- if (cursor_bpp == 2)
- cursor_pitch = 256;
- else
- cursor_pitch = (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1);
//The cursor requestor uses a cursor request size of 64B, 128B, or 256B depending on the cursor_width and cursor_bpp as follows.
@@ -3409,8 +3345,8 @@ static void calculate_cursor_req_attributes(
*cursor_lines_per_chunk = 1;
} else {
if (cursor_width > 0) {
- dml2_printf("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp);
+ DML_ASSERT(0);
}
}
@@ -3421,15 +3357,15 @@ static void calculate_cursor_req_attributes(
cursor_height = cursor_width;
*cursor_bytes = *cursor_bytes_per_line * cursor_height;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp);
- dml2_printf("DML::%s: cursor_width = %d\n", __func__, cursor_width);
- dml2_printf("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes);
- dml2_printf("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
- dml2_printf("DML::%s: cursor_pitch = %d\n", __func__, cursor_pitch);
+ DML_LOG_VERBOSE("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp);
+ DML_LOG_VERBOSE("DML::%s: cursor_width = %d\n", __func__, cursor_width);
+ DML_LOG_VERBOSE("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req);
+ DML_LOG_VERBOSE("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
+ DML_LOG_VERBOSE("DML::%s: cursor_pitch = %d\n", __func__, cursor_bpp == 2 ? 256 : (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1));
#endif
}
@@ -3460,13 +3396,13 @@ static void calculate_cursor_urgent_burst_factor(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer);
- dml2_printf("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime);
- dml2_printf("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk);
- dml2_printf("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
+ DML_LOG_VERBOSE("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer);
+ DML_LOG_VERBOSE("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime);
+ DML_LOG_VERBOSE("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor);
+ DML_LOG_VERBOSE("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
#endif
}
@@ -3501,15 +3437,15 @@ static void CalculateUrgentBurstFactor(
*UrgentBurstFactorChroma = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VRatioC = %f\n", __func__, VRatioC);
- dml2_printf("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY);
- dml2_printf("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC);
- dml2_printf("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY);
- dml2_printf("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: VRatioC = %f\n", __func__, VRatioC);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
- DML2_ASSERT(VRatio > 0);
+ DML_ASSERT(VRatio > 0);
LinesInDETLuma = (dml_is_phantom_pipe(plane_cfg) ? 1024 * 1024 : DETBufferSizeY) / BytePerPixelInDETY / swath_width_luma_ub;
@@ -3534,12 +3470,12 @@ static void CalculateUrgentBurstFactor(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma);
- dml2_printf("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma);
- dml2_printf("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
+ DML_LOG_VERBOSE("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
+ DML_LOG_VERBOSE("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
#endif
}
@@ -3600,10 +3536,10 @@ static void CalculateDCFCLKDeepSleepTdlut(
if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut && tdlut_bytes_to_deliver[k] > 0) {
double tdlut_required_deepsleep_dcfclk = (double) tdlut_bytes_to_deliver[k] / 64.0 / prefetch_swath_time_us[k];
- dml2_printf("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
- dml2_printf("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]);
- dml2_printf("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]);
- dml2_printf("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk);
// increase the deepsleep dcfclk to match the original dispclk throughput rate
if (tdlut_required_deepsleep_dcfclk > DCFClkDeepSleepPerSurface[k]) {
@@ -3613,8 +3549,8 @@ static void CalculateDCFCLKDeepSleepTdlut(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
- dml2_printf("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
#endif
}
@@ -3625,17 +3561,17 @@ static void CalculateDCFCLKDeepSleepTdlut(
*DCFClkDeepSleep = math_max2(8.0, __DML2_CALCS_DCFCLK_FACTOR__ * ReadBandwidth / (double)ReturnBusWidth);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__);
- dml2_printf("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth);
- dml2_printf("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep);
#endif
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
*DCFClkDeepSleep = math_max2(*DCFClkDeepSleep, DCFClkDeepSleepPerSurface[k]);
}
- dml2_printf("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
}
static noinline_for_stack void CalculateDCFCLKDeepSleep(
@@ -3731,12 +3667,12 @@ static unsigned int CalculateMaxVStartup(
else
max_vstartup_lines = vblank_size - (unsigned int)math_max2(1.0, math_ceil2(write_back_delay_us / line_time_us, 1.0));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VBlankNom = %u\n", __func__, timing->vblank_nom);
- dml2_printf("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us);
- dml2_printf("DML::%s: line_time_us = %f\n", __func__, line_time_us);
- dml2_printf("DML::%s: vblank_actual = %u\n", __func__, vblank_actual);
- dml2_printf("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
- dml2_printf("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
+ DML_LOG_VERBOSE("DML::%s: VBlankNom = %lu\n", __func__, timing->vblank_nom);
+ DML_LOG_VERBOSE("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us);
+ DML_LOG_VERBOSE("DML::%s: line_time_us = %f\n", __func__, line_time_us);
+ DML_LOG_VERBOSE("DML::%s: vblank_actual = %u\n", __func__, vblank_actual);
+ DML_LOG_VERBOSE("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
+ DML_LOG_VERBOSE("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
#endif
max_vstartup_lines = (unsigned int)math_min2(max_vstartup_lines, DML_MAX_VSTARTUP_START);
return max_vstartup_lines;
@@ -3761,9 +3697,9 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
const long MAXIMUMCOMPRESSION = 4;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]);
}
#endif
CalculateSwathWidth(
@@ -3797,15 +3733,15 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->full_swath_bytes_l[k] = (unsigned int)(p->swath_width_luma_ub[k] * p->BytePerPixDETY[k] * MaximumSwathHeightY[k]);
p->full_swath_bytes_c[k] = (unsigned int)(p->swath_width_chroma_ub[k] * p->BytePerPixDETC[k] * MaximumSwathHeightC[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, MaximumSwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, MaximumSwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
#endif
if (p->display_cfg->plane_descriptors[k].pixel_format == dml2_420_10) {
p->full_swath_bytes_l[k] = (unsigned int)(math_ceil2((double)p->full_swath_bytes_l[k], 256));
@@ -3848,11 +3784,11 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->CompressedBufferSizeInkByte);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
#endif
*p->ViewportSizeSupport = true;
@@ -3860,7 +3796,7 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
DETBufferSizeInKByteForSwathCalculation = (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]) ? 1024 : p->DETBufferSizeInKByte[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
#endif
if (p->display_cfg->plane_descriptors[k].surface.tiling == dml2_sw_linear) {
p->SwathHeightY[k] = MaximumSwathHeightY[k];
@@ -3917,13 +3853,13 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
if ((p->full_swath_bytes_l[k] / 2 + p->full_swath_bytes_c[k] / 2 > DETBufferSizeInKByteForSwathCalculation * 1024 / 2) ||
p->SwathWidth[k] > p->MaximumSwathWidthLuma[k] || (p->SwathHeightC[k] > 0 && p->SwathWidthChroma[k] > p->MaximumSwathWidthChroma[k])) {
*p->ViewportSizeSupport = false;
- dml2_printf("DML::%s: k=%u full_swath_bytes_l=%u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c=%u\n", __func__, k, p->full_swath_bytes_c[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation=%u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
- dml2_printf("DML::%s: k=%u SwathWidth=%u\n", __func__, k, p->SwathWidth[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, p->MaximumSwathWidthLuma[k]);
- dml2_printf("DML::%s: k=%u SwathWidthChroma=%d\n", __func__, k, p->SwathWidthChroma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, p->MaximumSwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l=%u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c=%u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation=%u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidth=%u\n", __func__, k, p->SwathWidth[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, p->MaximumSwathWidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthChroma=%d\n", __func__, k, p->SwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, p->MaximumSwathWidthChroma[k]);
p->ViewportSizeSupportPerSurface[k] = false;
} else {
p->ViewportSizeSupportPerSurface[k] = true;
@@ -3931,35 +3867,35 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
if (p->SwathHeightC[k] == 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k);
#endif
p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024;
p->DETBufferSizeC[k] = 0;
} else if (RoundedUpSwathSizeBytesY[k] <= 1.5 * RoundedUpSwathSizeBytesC[k]) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k);
#endif
p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
} else {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k);
#endif
p->DETBufferSizeY[k] = (unsigned int)(math_floor2(p->DETBufferSizeInKByte[k] * 1024 * 2 / 3, 1024));
p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 - p->DETBufferSizeY[k];
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, RoundedUpSwathSizeBytesY[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]);
- dml2_printf("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, RoundedUpSwathSizeBytesY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]);
#endif
}
@@ -3969,12 +3905,12 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
*p->compbuf_reserved_space_64b = (unsigned int)math_ceil2(math_max2(*p->compbuf_reserved_space_64b,
(double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / (p->mrq_present ? MAXIMUMCOMPRESSION : 1) / 64)), 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
- dml2_printf("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
+ DML_LOG_VERBOSE("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b);
+ DML_LOG_VERBOSE("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b);
#endif
*p->hw_debug5 = false;
@@ -3989,12 +3925,12 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
+ *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (RoundedUpSwathSizeBytesY[k] + RoundedUpSwathSizeBytesC[k])))
*p->hw_debug5 = true;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
- dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
- dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
+ DML_LOG_VERBOSE("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
+ DML_LOG_VERBOSE("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
+ DML_LOG_VERBOSE("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
#endif
}
#endif
@@ -4192,15 +4128,15 @@ static noinline_for_stack void CalculateODMMode(
SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock);
SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMUse = %d\n", __func__, ODMUse);
- dml2_printf("DML::%s: Output = %d\n", __func__, Output);
- dml2_printf("DML::%s: DSCEnable = %d\n", __func__, DSCEnable);
- dml2_printf("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk);
- dml2_printf("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne);
+ DML_LOG_VERBOSE("DML::%s: ODMUse = %d\n", __func__, ODMUse);
+ DML_LOG_VERBOSE("DML::%s: Output = %d\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: DSCEnable = %d\n", __func__, DSCEnable);
+ DML_LOG_VERBOSE("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk);
+ DML_LOG_VERBOSE("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne);
#endif
if (ODMUse == dml2_odm_mode_auto)
DecidedODMMode = DecideODMMode(HActive,
@@ -4245,10 +4181,10 @@ static noinline_for_stack void CalculateODMMode(
*NumberOfDPP = NumberOfDPPRequired;
*RequiredDISPCLKPerSurface = success ? DISPCLKRequired : 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMMode = %d\n", __func__, *ODMMode);
- dml2_printf("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP);
- dml2_printf("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport);
- dml2_printf("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %d\n", __func__, *ODMMode);
+ DML_LOG_VERBOSE("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP);
+ DML_LOG_VERBOSE("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface);
#endif
}
@@ -4292,17 +4228,17 @@ static noinline_for_stack void CalculateOutputLink(
*OutputRate = dml2_core_internal_output_rate_unknown;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable);
- dml2_printf("DML::%s: PHYCLK = %f\n", __func__, PHYCLK);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC);
- dml2_printf("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC);
- dml2_printf("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP);
- dml2_printf("DML::%s: Output (encoder) = %u\n", __func__, Output);
- dml2_printf("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate);
+ DML_LOG_VERBOSE("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable);
+ DML_LOG_VERBOSE("DML::%s: PHYCLK = %f\n", __func__, PHYCLK);
+ DML_LOG_VERBOSE("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
+ DML_LOG_VERBOSE("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate);
+ DML_LOG_VERBOSE("DML::%s: HActive = %u\n", __func__, HActive);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC);
+ DML_LOG_VERBOSE("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC);
+ DML_LOG_VERBOSE("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP);
+ DML_LOG_VERBOSE("DML::%s: Output (encoder) = %u\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate);
#endif
{
if (Output == dml2_hdmi) {
@@ -4487,9 +4423,9 @@ static noinline_for_stack void CalculateOutputLink(
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC);
- dml2_printf("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC);
- dml2_printf("DML::%s: OutBpp = %f\n", __func__, *OutBpp);
+ DML_LOG_VERBOSE("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC);
+ DML_LOG_VERBOSE("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC);
+ DML_LOG_VERBOSE("DML::%s: OutBpp = %f\n", __func__, *OutBpp);
#endif
}
@@ -4571,17 +4507,17 @@ static unsigned int DSCDelayRequirement(
DSCDelayRequirement_val = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, ODMMode);
- dml2_printf("DML::%s: OutputBpp = %f\n", __func__, OutputBpp);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: OutputFormat = %u\n", __func__, OutputFormat);
- dml2_printf("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent);
- dml2_printf("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices);
- dml2_printf("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val);
+ DML_LOG_VERBOSE("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %u\n", __func__, ODMMode);
+ DML_LOG_VERBOSE("DML::%s: OutputBpp = %f\n", __func__, OutputBpp);
+ DML_LOG_VERBOSE("DML::%s: HActive = %u\n", __func__, HActive);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, PixelClock);
+ DML_LOG_VERBOSE("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
+ DML_LOG_VERBOSE("DML::%s: OutputFormat = %u\n", __func__, OutputFormat);
+ DML_LOG_VERBOSE("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent);
+ DML_LOG_VERBOSE("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices);
+ DML_LOG_VERBOSE("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val);
#endif
return DSCDelayRequirement_val;
@@ -4654,10 +4590,10 @@ static void CalculateSurfaceSizeInMall(
(TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS);
- dml2_printf("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize);
+ DML_LOG_VERBOSE("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS);
+ DML_LOG_VERBOSE("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize);
#endif
}
@@ -4674,7 +4610,6 @@ static void calculate_tdlut_setting(
unsigned int tdlut_vmpg_per_frame;
unsigned int tdlut_pte_req_per_frame;
unsigned int tdlut_bytes_per_line;
- unsigned int tdlut_delivery_cycles;
double tdlut_drain_rate;
unsigned int tdlut_mpc_width;
unsigned int tdlut_bytes_per_group_simple;
@@ -4737,13 +4672,13 @@ static void calculate_tdlut_setting(
*p->tdlut_bytes_per_frame = tdlut_bytes_per_line * tdlut_mpc_width * tdlut_mpc_width;
*p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width;
//the delivery cycles is DispClk cycles per line * number of lines * number of slices
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
+ //tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1);
} else {
//tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements
*p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256);
*p->tdlut_bytes_per_group = tdlut_bytes_per_group_simple;
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width/2.0, 1);
+ //tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width/2.0, 1);
tdlut_drain_rate = 2 * tdlut_bpe * p->dispclk_mhz;
}
@@ -4756,25 +4691,25 @@ static void calculate_tdlut_setting(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame);
- dml2_printf("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame);
- dml2_printf("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
- dml2_printf("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
- dml2_printf("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear");
- dml2_printf("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
- dml2_printf("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
- dml2_printf("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
- dml2_printf("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line);
- dml2_printf("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group);
- dml2_printf("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate);
- dml2_printf("DML::%s: tdlut_delivery_cycles = %u\n", __func__, tdlut_delivery_cycles);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
- dml2_printf("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver);
- dml2_printf("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
+ DML_LOG_VERBOSE("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
+ DML_LOG_VERBOSE("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear");
+ DML_LOG_VERBOSE("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate);
+ DML_LOG_VERBOSE("DML::%s: tdlut_delivery_cycles = %u\n", __func__, p->tdlut_addressing_mode == dml2_tdlut_sw_linear ? (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width : (unsigned int)math_ceil2(tdlut_width/2.0, 1));
+ DML_LOG_VERBOSE("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver);
+ DML_LOG_VERBOSE("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
#endif
}
@@ -4820,10 +4755,10 @@ static void CalculateTarb(
*Tarb = extra_bytes / ReturnBW;
*Tarb_prefetch = extra_bytes_prefetch / ReturnBW;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte);
- dml2_printf("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize);
- dml2_printf("DML::%s: extra_bytes = %f\n", __func__, extra_bytes);
- dml2_printf("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch);
+ DML_LOG_VERBOSE("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize);
+ DML_LOG_VERBOSE("DML::%s: extra_bytes = %f\n", __func__, extra_bytes);
+ DML_LOG_VERBOSE("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch);
#endif
}
@@ -4838,10 +4773,10 @@ static double CalculateTWait(
TWait = math_max2(reserved_vblank_time_ns/1000.0, g6_temp_read_blackout_us) + t_urg_trip;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: reserved_vblank_time_ns = %d\n", __func__, reserved_vblank_time_ns);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, Ttrip);
- dml2_printf("DML::%s: TWait = %f\n", __func__, TWait);
+ DML_LOG_VERBOSE("DML::%s: reserved_vblank_time_ns = %ld\n", __func__, reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, Ttrip);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, TWait);
#endif
return TWait;
}
@@ -4887,20 +4822,20 @@ static void CalculateVUpdateAndDynamicMetadataParameters(
*Tdmsks = *Tdmsks / 2;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired);
- dml2_printf("DML::%s: VBlank = %u\n", __func__, VBlank);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, Dppclk);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep);
- dml2_printf("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters);
- dml2_printf("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime);
+ DML_LOG_VERBOSE("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired);
+ DML_LOG_VERBOSE("DML::%s: VBlank = %u\n", __func__, VBlank);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, PixelClock);
+ DML_LOG_VERBOSE("DML::%s: Dppclk = %f\n", __func__, Dppclk);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters);
+ DML_LOG_VERBOSE("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime);
- dml2_printf("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix);
- dml2_printf("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix);
- dml2_printf("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix);
+ DML_LOG_VERBOSE("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix);
+ DML_LOG_VERBOSE("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix);
+ DML_LOG_VERBOSE("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix);
- dml2_printf("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks);
#endif
}
@@ -4962,11 +4897,11 @@ static double get_urgent_bandwidth_required(
l->adj_factor_cur_pre = UrgentBurstFactorCursorPre[k];
bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]);
- bool exclude_this_plane = 0;
+ bool exclude_this_plane = false;
// Exclude phantom pipe in bw calculation for non svp prefetch state
if (state_type != dml2_core_internal_soc_state_svp_prefetch && is_phantom)
- exclude_this_plane = 1;
+ exclude_this_plane = true;
// The qualified row bandwidth, qual_row_bw, accounts for the regular non-flip row bandwidth when there is no possible immediate flip or HostVM invalidation flip.
// The qual_row_bw is zero if HostVM is possible and only non-zero and equal to row_bw(i) if immediate flip is not allowed for that pipe.
@@ -4995,12 +4930,12 @@ static double get_urgent_bandwidth_required(
surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw);
- dml2_printf("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw);
- dml2_printf("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw);
- dml2_printf("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw);
- dml2_printf("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]);
- dml2_printf("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]);
#endif
} else {
surface_required_bw[k] = 0.0;
@@ -5009,34 +4944,34 @@ static double get_urgent_bandwidth_required(
l->required_bandwidth_mbps += surface_required_bw[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
- dml2_printf("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw);
- dml2_printf("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
- dml2_printf("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
- dml2_printf("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
- dml2_printf("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
- dml2_printf("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur);
- dml2_printf("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre);
- dml2_printf("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]);
- dml2_printf("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]);
- dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]);
- dml2_printf("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
- dml2_printf("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
- dml2_printf("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), soc_state=%s, inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, dml2_core_internal_soc_state_type_str(state_type), inc_flip_bw, is_phantom, exclude_this_plane);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), soc_state=%s, inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, dml2_core_internal_soc_state_type_str(state_type), inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
#endif
}
@@ -5120,19 +5055,19 @@ static void CalculateExtraLatency(
*ExtraLatency_sr = *ExtraLatency_sr + Tarb;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type);
- dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
- dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips);
- dml2_printf("DML::%s: max_outstanding_when_urgent_expected=%u\n", __func__, max_outstanding_when_urgent_expected);
- dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock);
- dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
- dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
- dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
- dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
- dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb);
- dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
- dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
- dml2_printf("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: qos_type=%u\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
+ DML_LOG_VERBOSE("DML::%s: Tex_trips=%f\n", __func__, Tex_trips);
+ DML_LOG_VERBOSE("DML::%s: max_outstanding_when_urgent_expected=%u\n", __func__, max_outstanding_when_urgent_expected);
+ DML_LOG_VERBOSE("DML::%s: FabricClock=%f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
+ DML_LOG_VERBOSE("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
+ DML_LOG_VERBOSE("DML::%s: Tarb=%f\n", __func__, Tarb);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch);
#endif
}
@@ -5199,20 +5134,20 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->HostVMDynamicLevelsTrips = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable);
- dml2_printf("DML::%s: mrq_present = %u\n", __func__, p->mrq_present);
- dml2_printf("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
- dml2_printf("DML::%s: VStartup = %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
- dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk);
+ DML_LOG_VERBOSE("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable);
+ DML_LOG_VERBOSE("DML::%s: mrq_present = %u\n", __func__, p->mrq_present);
+ DML_LOG_VERBOSE("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
+ DML_LOG_VERBOSE("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
+ DML_LOG_VERBOSE("DML::%s: VStartup = %u\n", __func__, p->VStartup);
+ DML_LOG_VERBOSE("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk);
+ DML_LOG_VERBOSE("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk);
#endif
CalculateVUpdateAndDynamicMetadataParameters(
p->MaxInterDCNTileRepeaters,
@@ -5258,11 +5193,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (p->DynamicMetadataEnable == true) {
if (p->VStartup * s->LineTime < *p->TSetup + *p->Tdmdl + s->Tdmbf + s->Tdmec + s->Tdmsks) {
*p->NotEnoughTimeForDynamicMetadata = true;
- dml2_printf("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
+ DML_LOG_VERBOSE("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
+ DML_LOG_VERBOSE("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
} else {
*p->NotEnoughTimeForDynamicMetadata = false;
}
@@ -5288,21 +5223,21 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
((p->myPipe->ODMMode == dml2_odm_mode_mso_1to4) ? (double)p->myPipe->HActive * 3.0 / 4.0 : 0));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled);
- dml2_printf("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles);
- dml2_printf("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk);
- dml2_printf("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode);
- dml2_printf("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH);
- dml2_printf("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled);
+ DML_LOG_VERBOSE("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock);
+ DML_LOG_VERBOSE("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk);
+ DML_LOG_VERBOSE("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles);
+ DML_LOG_VERBOSE("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk);
+ DML_LOG_VERBOSE("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode);
+ DML_LOG_VERBOSE("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
- dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time);
+ DML_LOG_VERBOSE("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
+ DML_LOG_VERBOSE("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time);
#endif
if (p->OutputFormat == dml2_420 || (p->myPipe->InterlaceEnable && p->myPipe->ProgressiveToInterlaceUnitInOPP))
@@ -5314,17 +5249,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->DSTYAfterScaler = (unsigned int)(math_floor2(s->DSTTotalPixelsAfterScaler / p->myPipe->HTotal, 1));
*p->DSTXAfterScaler = (unsigned int)(s->DSTTotalPixelsAfterScaler - ((double)(*p->DSTYAfterScaler * p->myPipe->HTotal)));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
#endif
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
+ DML_LOG_VERBOSE("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips);
#endif
if (p->display_cfg->gpuvm_enable) {
s->Tvm_trips_rounded = math_ceil2(4.0 * *p->Tvm_trips / s->LineTime, 1.0) / 4.0 * s->LineTime;
@@ -5402,7 +5337,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
}
/* oto prefetch bw should be always be less than total vactive bw */
- //DML2_ASSERT(s->prefetch_bw_oto < s->per_pipe_vactive_sw_bw * p->myPipe->DPPPerSurface);
+ //DML_ASSERT(s->prefetch_bw_oto < s->per_pipe_vactive_sw_bw * p->myPipe->DPPPerSurface);
s->prefetch_bw_oto = math_max2(s->per_pipe_vactive_sw_bw, s->prefetch_bw_oto) * p->mall_prefetch_sdp_overhead_factor;
@@ -5421,9 +5356,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->RequiredPrefetchBWOTO = s->prefetch_bw_oto;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
- dml2_printf("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
- dml2_printf("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
+ DML_LOG_VERBOSE("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw);
#endif
if (p->display_cfg->gpuvm_enable == true) {
@@ -5433,9 +5368,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
#endif
} else {
s->Tvm_oto = s->Tvm_trips_rounded;
@@ -5447,9 +5382,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto,
s->LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
#endif
} else
s->Tr0_oto = s->LineTime / 4.0;
@@ -5459,11 +5394,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
#ifdef DML_GLOBAL_PREFETCH_CHECK
- dml2_printf("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre);
+ DML_LOG_VERBOSE("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre);
if (p->impacted_dst_y_pre > 0) {
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
s->dst_y_prefetch_oto = math_max2(s->dst_y_prefetch_oto, p->impacted_dst_y_pre);
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto);
}
#endif
*p->Tpre_oto = s->dst_y_prefetch_oto * s->LineTime;
@@ -5492,72 +5427,71 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
- dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
- dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
- dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
- dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes);
- dml2_printf("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
- dml2_printf("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip);
- dml2_printf("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr);
- dml2_printf("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto);
- dml2_printf("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto);
- dml2_printf("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines);
- dml2_printf("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines);
- dml2_printf("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto);
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
- dml2_printf("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes);
-#endif
- double Tpre = s->dst_y_prefetch_equ * s->LineTime;
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
+ DML_LOG_VERBOSE("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
+ DML_LOG_VERBOSE("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes);
+ DML_LOG_VERBOSE("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
+ DML_LOG_VERBOSE("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines);
+ DML_LOG_VERBOSE("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes);
+#endif
s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0;
*p->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: LineTime: %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: VStartup: %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime);
- dml2_printf("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup);
- dml2_printf("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc);
- dml2_printf("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
- dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: Tex = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
- dml2_printf("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
- dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
- dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
- dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: LineTime: %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: VStartup: %u\n", __func__, p->VStartup);
+ DML_LOG_VERBOSE("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup);
+ DML_LOG_VERBOSE("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc);
+ DML_LOG_VERBOSE("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
+ DML_LOG_VERBOSE("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: Tex = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
+ DML_LOG_VERBOSE("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, (s->dst_y_prefetch_equ * s->LineTime), *p->Tpre_rounded, (*p->Tpre_rounded - (s->dst_y_prefetch_equ * s->LineTime)));
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
#endif
*p->dst_y_per_vm_vblank = 0;
@@ -5596,19 +5530,19 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw1 = 0;
- dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) /
(*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
- dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded);
- dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
- dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
- dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
+ DML_LOG_VERBOSE("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded);
+ DML_LOG_VERBOSE("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
#endif
}
@@ -5620,10 +5554,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw2 = 0;
- dml2_printf("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) {
s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime);
- dml2_printf("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
}
// prefetch_bw3: 2*R0 + SW
@@ -5634,10 +5568,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw3 = 0;
- dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
- dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
}
// prefetch_bw4: SW
@@ -5647,17 +5581,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw4 = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
- dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
- dml2_printf("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
- dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
- dml2_printf("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2);
- dml2_printf("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
- dml2_printf("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
- dml2_printf("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
- dml2_printf("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3);
- dml2_printf("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, s->dst_y_prefetch_equ * s->LineTime, *p->Tpre_rounded, (*p->Tpre_rounded - (s->dst_y_prefetch_equ * s->LineTime)));
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
+ DML_LOG_VERBOSE("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
+ DML_LOG_VERBOSE("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2);
+ DML_LOG_VERBOSE("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4);
#endif
{
bool Case1OK = false;
@@ -5676,14 +5610,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
double total_row_bytes = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes);
- dml2_printf("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded);
- dml2_printf("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded);
if (s->prefetch_bw1 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1;
double row_transfer_time = total_row_bytes / s->prefetch_bw1;
- dml2_printf("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case1OK = true;
}
@@ -5696,8 +5630,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (s->prefetch_bw2 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2;
double row_transfer_time = total_row_bytes / s->prefetch_bw2;
- dml2_printf("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time < s->Tr0_trips_rounded) {
Case2OK = true;
}
@@ -5709,8 +5643,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (s->prefetch_bw3 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3;
double row_transfer_time = total_row_bytes / s->prefetch_bw3;
- dml2_printf("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time < s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case3OK = true;
}
@@ -5730,10 +5664,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Case1OK: %u\n", __func__, Case1OK);
- dml2_printf("DML::%s: Case2OK: %u\n", __func__, Case2OK);
- dml2_printf("DML::%s: Case3OK: %u\n", __func__, Case3OK);
- dml2_printf("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ);
+ DML_LOG_VERBOSE("DML::%s: Case1OK: %u\n", __func__, Case1OK);
+ DML_LOG_VERBOSE("DML::%s: Case2OK: %u\n", __func__, Case2OK);
+ DML_LOG_VERBOSE("DML::%s: Case3OK: %u\n", __func__, Case3OK);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ);
#endif
if (s->prefetch_bw_equ > 0) {
@@ -5753,12 +5687,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else {
s->Tvm_equ = 0;
s->Tr0_equ = 0;
- dml2_printf("DML::%s: prefetch_bw_equ equals 0!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_equ equals 0!\n", __func__);
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
- dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
+ DML_LOG_VERBOSE("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
+ DML_LOG_VERBOSE("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
#endif
// Use the more stressful prefetch schedule
if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) {
@@ -5769,7 +5703,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using oto scheduling for prefetch\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Using oto scheduling for prefetch\n", __func__);
#endif
} else {
@@ -5785,7 +5719,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
#endif
}
@@ -5797,31 +5731,31 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->prefetch_swath_time_us = (s->LinesToRequestPrefetchPixelData * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
- dml2_printf("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us);
+ DML_LOG_VERBOSE("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes);
- dml2_printf("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes);
+ DML_LOG_VERBOSE("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw);
#endif
- DML2_ASSERT(*p->dst_y_prefetch < 64);
+ DML_ASSERT(*p->dst_y_prefetch < 64);
unsigned int min_lsw_required = (unsigned int)math_max2(2, p->tdlut_drain_time / s->LineTime);
if (s->LinesToRequestPrefetchPixelData >= min_lsw_required && s->prefetch_bw_equ > 0) {
*p->VRatioPrefetchY = (double)p->PrefetchSourceLinesY / s->LinesToRequestPrefetchPixelData;
*p->VRatioPrefetchY = math_max2(*p->VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY);
- dml2_printf("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
+ DML_LOG_VERBOSE("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY);
#endif
if ((p->SwathHeightY > 4) && (p->VInitPreFillY > 3)) {
if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillY - 3.0) / 2.0) {
@@ -5829,13 +5763,13 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
(double)p->MaxNumSwathY * p->SwathHeightY / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillY - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
*p->VRatioPrefetchY = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY);
#endif
}
@@ -5843,22 +5777,22 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC);
- dml2_printf("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
+ DML_LOG_VERBOSE("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC);
#endif
if ((p->SwathHeightC > 4) && (p->VInitPreFillC > 3)) {
if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillC - 3.0) / 2.0) {
*p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, (double)p->MaxNumSwathC * p->SwathHeightC / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillC - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
*p->VRatioPrefetchC = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC);
#endif
}
@@ -5866,36 +5800,34 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->RequiredPrefetchPixelDataBWChroma = (double)p->PrefetchSourceLinesC / s->LinesToRequestPrefetchPixelData * p->myPipe->BytePerPixelC * p->swath_width_chroma_ub / s->LineTime;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWChroma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
#endif
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
- dml2_printf("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
*p->VRatioPrefetchY = 0;
*p->VRatioPrefetchC = 0;
*p->RequiredPrefetchPixelDataBWLuma = 0;
*p->RequiredPrefetchPixelDataBWChroma = 0;
}
- dml2_printf("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
- dml2_printf("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
- dml2_printf("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
- dml2_printf("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime);
- dml2_printf("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime);
- dml2_printf("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n");
- dml2_printf("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup);
- dml2_printf("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
+ DML_LOG_VERBOSE("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime);
+ DML_LOG_VERBOSE("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime);
+ DML_LOG_VERBOSE("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n");
+ DML_LOG_VERBOSE("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup);
+ DML_LOG_VERBOSE("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
} else {
- dml2_printf("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
__func__, min_Lsw_equ_ok, *p->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime);
- dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded+Tvm_trips_rounded+2.0*Tr0_trips_rounded+min_Tsw_equ (%f) should be > \n",
- __func__, tpre_gt_req_latency, (s->min_Lsw_equ*s->LineTime + s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded), p->Turg, s->trip_to_mem, p->ExtraLatencyPrefetch);
s->NoTimeToPrefetch = true;
s->TimeForFetchingVM = 0;
s->TimeForFetchingRowInVBlank = 0;
@@ -5916,18 +5848,18 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
prefetch_vm_bw = 0;
} else if (*p->dst_y_per_vm_vblank > 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
#endif
prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
+ DML_LOG_VERBOSE("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
} else {
prefetch_vm_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
}
if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) {
@@ -5936,14 +5868,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
#endif
} else {
prefetch_row_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
}
*p->prefetch_vmrow_bw = math_max2(prefetch_vm_bw, prefetch_row_bw);
@@ -5963,12 +5895,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->prefetch_vmrow_bw = 0;
}
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
- dml2_printf("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
+ DML_LOG_VERBOSE("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
return s->NoTimeToPrefetch;
}
@@ -6005,7 +5937,7 @@ static unsigned int find_max_impact_plane(unsigned int this_plane_idx, unsigned
}
}
if (max_idx <= 0) {
- DML2_ASSERT(max_idx >= 0);
+ DML_ASSERT(max_idx >= 0);
max_idx = this_plane_idx;
}
@@ -6037,12 +5969,12 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
// worst case if the rob and cdb is fully hogged
s->max_Trpd_dcfclk_cycles = (unsigned int) math_ceil2((p->rob_buffer_size_kbytes*1024 + p->compressed_buffer_size_kbytes*DML_MAX_COMPRESSION_RATIO*1024)/64.0, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes);
- dml2_printf("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes);
- dml2_printf("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes);
- dml2_printf("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps);
- dml2_printf("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz);
- dml2_printf("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps);
+ DML_LOG_VERBOSE("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles);
#endif
// calculate the return impact from each plane, request is 256B per dcfclk
@@ -6063,12 +5995,12 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(p->lb_source_lines_l[i] / p->swath_height_l[i], 1) * s->src_swath_bytes_l[i]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]);
- dml2_printf("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l);
- dml2_printf("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]);
- dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]);
- dml2_printf("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]);
- dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det);
+ DML_LOG_VERBOSE("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l);
+ DML_LOG_VERBOSE("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det);
#endif
if (s->src_swath_bytes_c[i] > 0) { // dual_plane
@@ -6079,10 +6011,10 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c);
- dml2_printf("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]);
- dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]);
- dml2_printf("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c);
+ DML_LOG_VERBOSE("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]);
#endif
}
@@ -6090,9 +6022,9 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
s->accumulated_return_path_dcfclk_cycles[i] = (unsigned int) math_ceil2(((DML_MAX_COMPRESSION_RATIO-1) * 64 * p->estimated_dcfclk_mhz) * s->time_to_fill_det_us / 64.0, 1.0); //for 64B per DCFClk
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det);
- dml2_printf("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us);
- dml2_printf("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det);
+ DML_LOG_VERBOSE("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us);
+ DML_LOG_VERBOSE("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]);
#endif
// clamping to worst case delay which is one which occupy the full rob+cdb
if (s->accumulated_return_path_dcfclk_cycles[i] > s->max_Trpd_dcfclk_cycles)
@@ -6109,7 +6041,7 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
p->impacted_dst_y_pre[i] = math_ceil2(p->impacted_dst_y_pre[i] / p->line_time[i], 0.25);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k);
+ DML_LOG_VERBOSE("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k);
#endif
}
@@ -6120,8 +6052,8 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
*p->recalc_prefetch_schedule = 1;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]);
- dml2_printf("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]);
#endif
}
} else {
@@ -6131,8 +6063,8 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed);
- dml2_printf("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule);
+ DML_LOG_VERBOSE("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed);
+ DML_LOG_VERBOSE("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule);
#endif
return s->prefetch_global_check_passed;
@@ -6150,8 +6082,8 @@ static void calculate_peak_bandwidth_required(
memset(l, 0, sizeof(struct dml2_core_shared_calculate_peak_bandwidth_required_locals));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: inc_flip_bw = %d\n", __func__, p->inc_flip_bw);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, p->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: inc_flip_bw = %d\n", __func__, p->inc_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, p->num_active_planes);
#endif
for (unsigned int k = 0; k < p->num_active_planes; ++k) {
@@ -6347,12 +6279,12 @@ static void calculate_peak_bandwidth_required(
p->surface_peak_required_bw[m][n]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_vactive_bandwidth_required[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_required_qual[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->non_urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_vactive_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_qual[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->non_urg_bandwidth_required[m][n]);
#endif
- DML2_ASSERT(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]);
+ DML_ASSERT(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]);
}
}
}
@@ -6414,18 +6346,18 @@ static void check_urgent_bandwidth_support(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall);
- dml2_printf("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall);
+ DML_LOG_VERBOSE("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok);
for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) {
for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) {
- dml2_printf("DML::%s: state:%s bw_type:%s urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
+ DML_LOG_VERBOSE("DML::%s: state:%s bw_type:%s urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
__func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n),
urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required[m][n]) ? "<" : ">=", urg_bandwidth_required[m][n]);
}
@@ -6446,14 +6378,14 @@ static double get_bandwidth_available_for_immediate_flip(enum dml2_core_internal
flip_bw_available_mbps = flip_bw_available_sdp_mbps < flip_bw_available_dram_mbps ? flip_bw_available_sdp_mbps : flip_bw_available_dram_mbps;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps);
- dml2_printf("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps);
- dml2_printf("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps);
+ DML_LOG_VERBOSE("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps);
#endif
return flip_bw_available_mbps;
@@ -6478,28 +6410,28 @@ static void calculate_immediate_flip_bandwidth_support(
*flip_bandwidth_support_ok &= urg_bandwidth_available[eval_state][n] >= urg_bandwidth_required_flip[eval_state][n];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str(n));
- dml2_printf("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str(n));
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
#endif
- DML2_ASSERT(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]);
+ DML_ASSERT(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]);
}
*frac_urg_bandwidth_flip = (frac_urg_bw_flip_sdp > frac_urg_bw_flip_dram) ? frac_urg_bw_flip_sdp : frac_urg_bw_flip_dram;
*flip_bandwidth_support_ok &= (*frac_urg_bandwidth_flip <= 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp);
- dml2_printf("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip);
+ DML_LOG_VERBOSE("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) {
for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) {
- dml2_printf("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
+ DML_LOG_VERBOSE("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
__func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n),
urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required_flip[m][n]) ? "<" : ">=", urg_bandwidth_required_flip[m][n]);
}
@@ -6549,27 +6481,27 @@ static void CalculateFlipSchedule(
l->dpte_row_bytes = DPTEBytesPerRow;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
- dml2_printf("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
- dml2_printf("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
- dml2_printf("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
- dml2_printf("DML::%s: iflip_enable = %u\n", __func__, iflip_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip);
- dml2_printf("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded);
- dml2_printf("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded);
- dml2_printf("DML::%s: vm_bytes = %f\n", __func__, vm_bytes);
- dml2_printf("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes);
- dml2_printf("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes);
- dml2_printf("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height);
- dml2_printf("DML::%s: meta_row_height = %d\n", __func__, meta_row_height);
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
+ DML_LOG_VERBOSE("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines);
+ DML_LOG_VERBOSE("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
+ DML_LOG_VERBOSE("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
+ DML_LOG_VERBOSE("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: iflip_enable = %u\n", __func__, iflip_enable);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, LineTime);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %f\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height);
+ DML_LOG_VERBOSE("DML::%s: meta_row_height = %d\n", __func__, meta_row_height);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
#endif
if (TotImmediateFlipBytes > 0 && (GPUVMEnable || dcc_mrq_enable)) {
@@ -6596,9 +6528,9 @@ static void CalculateFlipSchedule(
l->min_row_time = l->min_row_height * LineTime / VRatio;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min_row_time = %f\n", __func__, l->min_row_time);
+ DML_LOG_VERBOSE("DML::%s: min_row_time = %f\n", __func__, l->min_row_time);
#endif
- DML2_ASSERT(l->min_row_time > 0);
+ DML_ASSERT(l->min_row_time > 0);
if (use_lb_flip_bw) {
// For mode check, calculation the flip bw requirement with worst case flip time
@@ -6619,20 +6551,20 @@ static void CalculateFlipSchedule(
l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded),
l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
- dml2_printf("DML::%s: total row bytes (%d row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes);
- dml2_printf("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
- dml2_printf("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
- dml2_printf("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
- dml2_printf("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
+ DML_LOG_VERBOSE("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: total row bytes (%f row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
if (l->lb_flip_bw > 0) {
- dml2_printf("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw);
- dml2_printf("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
- dml2_printf("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
- dml2_printf("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
- dml2_printf("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
+ DML_LOG_VERBOSE("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
+ DML_LOG_VERBOSE("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded));
}
#endif
l->lb_flip_bw = math_max3(l->lb_flip_bw,
@@ -6640,8 +6572,8 @@ static void CalculateFlipSchedule(
(l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip);
- dml2_printf("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
#endif
}
@@ -6653,13 +6585,12 @@ static void CalculateFlipSchedule(
} else {
if (iflip_enable) {
l->ImmediateFlipBW = (double)per_pipe_flip_bytes * BandwidthAvailableForImmediateFlip / (double)TotImmediateFlipBytes; // flip_bw(i)
- double portion = (double)per_pipe_flip_bytes / (double)TotImmediateFlipBytes;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW);
- dml2_printf("DML::%s: portion of flip bw = %f\n", __func__, portion);
+ DML_LOG_VERBOSE("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes);
+ DML_LOG_VERBOSE("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW);
+ DML_LOG_VERBOSE("DML::%s: portion of flip bw = %f\n", __func__, (double)per_pipe_flip_bytes / (double)TotImmediateFlipBytes);
#endif
if (l->ImmediateFlipBW == 0) {
l->Tvm_flip = 0;
@@ -6674,11 +6605,11 @@ static void CalculateFlipSchedule(
LineTime / 4.0);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor);
- dml2_printf("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes));
+ DML_LOG_VERBOSE("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes));
- dml2_printf("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip);
#endif
*dst_y_per_vm_flip = math_ceil2(4.0 * (l->Tvm_flip / LineTime), 1.0) / 4.0;
*dst_y_per_row_flip = math_ceil2(4.0 * (l->Tr0_flip / LineTime), 1.0) / 4.0;
@@ -6711,14 +6642,14 @@ static void CalculateFlipSchedule(
#ifdef __DML_VBA_DEBUG__
if (!use_lb_flip_bw) {
- dml2_printf("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip);
- dml2_printf("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
- dml2_printf("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
- dml2_printf("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time);
}
- dml2_printf("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
- dml2_printf("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
+ DML_LOG_VERBOSE("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
#endif
}
@@ -6736,7 +6667,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->UrgentWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
#endif
p->Watermark->USRRetrainingWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency + p->mmSOCParameters.USRRetrainingLatency + p->mmSOCParameters.SMNLatency;
@@ -6755,20 +6686,20 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->temp_read_or_ppt_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
- dml2_printf("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency);
- dml2_printf("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency);
- dml2_printf("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time);
- dml2_printf("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime);
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark);
- dml2_printf("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark);
- dml2_printf("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark);
- dml2_printf("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark);
- dml2_printf("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency);
+ DML_LOG_VERBOSE("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time);
+ DML_LOG_VERBOSE("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime);
+ DML_LOG_VERBOSE("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us);
#endif
s->TotalActiveWriteback = 0;
@@ -6801,11 +6732,11 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->WritebackFCLKChangeWatermark = p->Watermark->WritebackFCLKChangeWatermark + p->mmSOCParameters.USRRetrainingLatency;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark);
- dml2_printf("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark);
- dml2_printf("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired);
- dml2_printf("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency);
+ DML_LOG_VERBOSE("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency);
#endif
s->TotalPixelBW = 0.0;
@@ -6836,11 +6767,11 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->LBLatencyHidingSourceLinesC[k] = (unsigned int)(math_min2((double)p->MaxLineBufferLines, math_floor2((double)p->LineBufferSize / LBBitPerPixel / ((double)p->SwathWidthC[k] / math_max2(h_ratio_c, 1.0)), 1)) - (v_taps_c - 1));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines);
- dml2_printf("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
- dml2_printf("DML::%s: k=%u, LBBitPerPixel = %u\n", __func__, k, LBBitPerPixel);
- dml2_printf("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
- dml2_printf("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LBBitPerPixel = %f\n", __func__, k, LBBitPerPixel);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps);
#endif
s->EffectiveLBLatencyHidingY = s->LBLatencyHidingSourceLinesY[k] / v_ratio * (h_total / pixel_clock_mhz);
@@ -6943,16 +6874,16 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->sub_vp_lines_l = s->src_y_pstate_l + s->src_y_ahead_l + p->meta_row_height_l[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]);
- dml2_printf("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate);
- dml2_printf("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l);
- dml2_printf("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l);
- dml2_printf("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, k, p->meta_row_height_l[k]);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, k, p->meta_row_height_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l);
#endif
p->SubViewportLinesNeededInMALL[k] = s->sub_vp_lines_l;
@@ -6967,10 +6898,10 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, s->sub_vp_lines_c));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]);
- dml2_printf("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
- dml2_printf("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
#endif
}
}
@@ -6992,10 +6923,10 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported);
- dml2_printf("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported);
- dml2_printf("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported);
- dml2_printf("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported);
+ DML_LOG_VERBOSE("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported);
+ DML_LOG_VERBOSE("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport);
#endif
}
@@ -7141,7 +7072,7 @@ static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struc
unsigned int index = 0;
for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
+ DML_LOG_VERBOSE("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %ld\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
if (i == 0)
index = 0;
@@ -7153,32 +7084,30 @@ static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struc
break;
}
}
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, index);
-#endif
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, index);
return index;
}
static unsigned int get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
{
unsigned int i;
- bool clk_entry_found = 0;
+ bool clk_entry_found = false;
for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
- dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
+ DML_LOG_VERBOSE("DML::%s: clk_table.uclk.clk_values_khz[%d] = %ld\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
- clk_entry_found = 1;
+ clk_entry_found = true;
break;
}
}
if (!clk_entry_found)
- DML2_ASSERT(clk_entry_found);
+ DML_ASSERT(clk_entry_found);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, i);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, i);
#endif
return i;
}
@@ -7218,10 +7147,10 @@ static void calculate_hostvm_inefficiency_factor(
if ((*HostVMInefficiencyFactorPrefetch < 4) && (remote_iommu_outstanding_translations < max_outstanding_reqs))
*HostVMInefficiencyFactorPrefetch = 4;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm);
- dml2_printf("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor);
- dml2_printf("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch);
#endif
}
}
@@ -7335,30 +7264,659 @@ static void calculate_pstate_keepout_dst_lines(
}
}
+static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_internal_display_mode_lib *mode_lib,
+ const struct dml2_display_cfg *display_cfg)
+{
+ struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals;
+ struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
+ struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
+ struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params;
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
+#endif
+ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
+
+ double min_return_bw_for_latency;
+ unsigned int k;
+
+ mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
+
+ calculate_hostvm_inefficiency_factor(
+ &s->HostVMInefficiencyFactor,
+ &s->HostVMInefficiencyFactorPrefetch,
+
+ display_cfg->gpuvm_enable,
+ display_cfg->hostvm_enable,
+ mode_lib->ip.remote_iommu_outstanding_translations,
+ mode_lib->soc.max_outstanding_reqs,
+ mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
+ mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
+
+ mode_lib->ms.Total3dlutActive = 0;
+ for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
+ mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1;
+
+ // Calculate tdlut schedule related terms
+ calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK;
+ calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
+ calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
+ calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
+ calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
+ calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
+ calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
+ calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag;
+ calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling);
+
+ // output
+ calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
+ calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
+ calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
+ calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
+ calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
+ calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
+ calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
+
+ calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
+ }
+
+ min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
+
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
+ s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
+
+ CalculateExtraLatency(
+ display_cfg,
+ mode_lib->ip.rob_buffer_size_kbytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
+ s->ReorderingBytes,
+ mode_lib->ms.DCFCLK,
+ mode_lib->ms.FabricClock,
+ mode_lib->ip.pixel_chunk_size_kbytes,
+ min_return_bw_for_latency,
+ mode_lib->ms.num_active_planes,
+ mode_lib->ms.NoOfDPP,
+ mode_lib->ms.dpte_group_bytes,
+ s->tdlut_bytes_per_group,
+ s->HostVMInefficiencyFactor,
+ s->HostVMInefficiencyFactorPrefetch,
+ mode_lib->soc.hostvm_min_page_size_kbytes,
+ mode_lib->soc.qos_parameters.qos_type,
+ !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
+ mode_lib->soc.max_outstanding_reqs,
+ mode_lib->ms.support.request_size_bytes_luma,
+ mode_lib->ms.support.request_size_bytes_chroma,
+ mode_lib->ip.meta_chunk_size_kbytes,
+ mode_lib->ip.dchub_arb_to_ret_delay,
+ mode_lib->ms.TripToMemory,
+ mode_lib->ip.hostvm_mode,
+
+ // output
+ &mode_lib->ms.ExtraLatency,
+ &mode_lib->ms.ExtraLatency_sr,
+ &mode_lib->ms.ExtraLatencyPrefetch);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++)
+ s->impacted_dst_y_pre[k] = 0;
+
+ s->recalc_prefetch_schedule = 0;
+ s->recalc_prefetch_done = 0;
+ do {
+ mode_lib->ms.support.PrefetchSupported = true;
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
+
+ s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
+
+ mode_lib->ms.TWait[k] = CalculateTWait(
+ display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
+ mode_lib->ms.UrgLatency,
+ mode_lib->ms.TripToMemory,
+ !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
+ get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), mode_lib->ms.state_idx) : 0.0);
+
+ myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
+ myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
+ myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
+ myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k];
+ myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
+ myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
+ myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
+ myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
+ myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
+ myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
+ myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
+ myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
+ myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
+ myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
+ myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
+ myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
+ myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
+ myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
+ myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
+ myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
+ myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
+ myPipe->ODMMode = mode_lib->ms.ODMMode[k];
+ myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
+ myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
+ myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
+ myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
+
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]);
+#endif
+ CalculatePrefetchSchedule_params->display_cfg = display_cfg;
+ CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
+ CalculatePrefetchSchedule_params->myPipe = myPipe;
+ CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k];
+ CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
+ CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
+ CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
+ CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
+ CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
+ CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
+ CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
+ CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
+ CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
+ CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
+ CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
+ CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
+ CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
+ CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency;
+ CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch;
+ CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc;
+ CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k];
+ CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k];
+ CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k];
+ CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k];
+ CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k];
+ CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k];
+ CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k];
+ CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k];
+ CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k];
+ CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k];
+ CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k];
+ CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k];
+ CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k];
+ CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory;
+ CalculatePrefetchSchedule_params->Turg = mode_lib->ms.UrgLatency;
+ CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
+ CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
+ CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
+ CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
+ CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
+ CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
+ CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
+ CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
+ CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
+ CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
+ CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
+ CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
+ CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k];
+
+ // output
+ CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
+ CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k];
+ CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k];
+ CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k];
+ CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k];
+ CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k];
+ CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
+ CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k];
+ CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
+ CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
+ CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
+ CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k];
+ CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0];
+ CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1];
+ CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2];
+ CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
+ CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
+ CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
+ CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
+ CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
+ CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
+ CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0];
+ CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
+ CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
+ CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
+ CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
+ CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
+ CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
+ CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k];
+
+ mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
+
+ mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k];
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
+ } // for k num_planes
+
+ CalculateDCFCLKDeepSleepTdlut(
+ display_cfg,
+ mode_lib->ms.num_active_planes,
+ mode_lib->ms.BytePerPixelY,
+ mode_lib->ms.BytePerPixelC,
+ mode_lib->ms.SwathWidthY,
+ mode_lib->ms.SwathWidthC,
+ mode_lib->ms.NoOfDPP,
+ mode_lib->ms.PSCL_FACTOR,
+ mode_lib->ms.PSCL_FACTOR_CHROMA,
+ mode_lib->ms.RequiredDPPCLK,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
+ mode_lib->soc.return_bus_width_bytes,
+ mode_lib->ms.RequiredDISPCLK,
+ s->tdlut_bytes_to_deliver,
+ s->prefetch_swath_time_us,
+
+ /* Output */
+ &mode_lib->ms.dcfclk_deepsleep);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.dst_y_prefetch[k] < 2.0
+ || mode_lib->ms.LinesForVM[k] >= 32.0
+ || mode_lib->ms.LinesForDPTERow[k] >= 16.0
+ || mode_lib->ms.NoTimeForPrefetch[k] == true
+ || s->DSTYAfterScaler[k] > 8) {
+ mode_lib->ms.support.PrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
+ }
+ }
+
+ mode_lib->ms.support.DynamicMetadataSupported = true;
+ for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
+ if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) {
+ mode_lib->ms.support.DynamicMetadataSupported = false;
+ }
+ }
+
+ mode_lib->ms.support.VRatioInPrefetchSupported = true;
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
+ mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
+ mode_lib->ms.support.VRatioInPrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
+ }
+ }
+
+ mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported;
+
+ // By default, do not recalc prefetch schedule
+ s->recalc_prefetch_schedule = 0;
+
+ // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
+ if (mode_lib->ms.support.PrefetchSupported) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ // Calculate Urgent burst factor for prefetch
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]);
+#endif
+ CalculateUrgentBurstFactor(
+ &display_cfg->plane_descriptors[k],
+ mode_lib->ms.swath_width_luma_ub[k],
+ mode_lib->ms.swath_width_chroma_ub[k],
+ mode_lib->ms.SwathHeightY[k],
+ mode_lib->ms.SwathHeightC[k],
+ s->line_times[k],
+ mode_lib->ms.UrgLatency,
+ mode_lib->ms.VRatioPreY[k],
+ mode_lib->ms.VRatioPreC[k],
+ mode_lib->ms.BytePerPixelInDETY[k],
+ mode_lib->ms.BytePerPixelInDETC[k],
+ mode_lib->ms.DETBufferSizeY[k],
+ mode_lib->ms.DETBufferSizeC[k],
+ /* Output */
+ &mode_lib->ms.UrgentBurstFactorLumaPre[k],
+ &mode_lib->ms.UrgentBurstFactorChromaPre[k],
+ &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
+ }
+
+ // Calculate urgent bandwidth required, both urg and non urg peak bandwidth
+ // assume flip bw is 0 at this point
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++)
+ mode_lib->ms.final_flip_bw[k] = 0;
+
+ calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = mode_lib->ms.support.urg_vactive_bandwidth_required;
+ calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required;
+ calculate_peak_bandwidth_params->urg_bandwidth_required_qual = mode_lib->ms.support.urg_bandwidth_required_qual;
+ calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required;
+ calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = mode_lib->ms.surface_avg_vactive_required_bw;
+ calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
+
+ calculate_peak_bandwidth_params->display_cfg = display_cfg;
+ calculate_peak_bandwidth_params->inc_flip_bw = 0;
+ calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
+ calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
+ calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
+ calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
+
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
+ calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
+ calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
+ calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
+ calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
+ calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
+ calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
+
+ calculate_peak_bandwidth_required(
+ &mode_lib->scratch,
+ calculate_peak_bandwidth_params);
+
+ // Check urg peak bandwidth against available urg bw
+ // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
+ check_urgent_bandwidth_support(
+ &s->dummy_single[0], // double* frac_urg_bandwidth
+ &s->dummy_single[1], // double* frac_urg_bandwidth_mall
+ &mode_lib->ms.support.UrgVactiveBandwidthSupport,
+ &mode_lib->ms.support.PrefetchBandwidthSupported,
+
+ mode_lib->soc.mall_allocated_for_dcn_mbytes,
+ mode_lib->ms.support.non_urg_bandwidth_required,
+ mode_lib->ms.support.urg_vactive_bandwidth_required,
+ mode_lib->ms.support.urg_bandwidth_required,
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported;
+ DML_LOG_VERBOSE("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) {
+ mode_lib->ms.support.PrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
+ }
+ }
+
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) {
+ CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes;
+ CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC;
+ CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
+ CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
+ CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto;
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
+ CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
+ CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch;
+ if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024)
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024;
+
+ CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) /
+ ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0);
+
+ // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
+ CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule;
+ CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
+ mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params);
+ s->recalc_prefetch_done = 1;
+ s->recalc_prefetch_schedule = 1;
+ }
+#endif
+ } // prefetch schedule ok, do urg bw and flip schedule
+ } while (s->recalc_prefetch_schedule);
+
+ // Flip Schedule
+ // Both prefetch schedule and BW okay
+ if (mode_lib->ms.support.PrefetchSupported == true) {
+ mode_lib->ms.BandwidthAvailableForImmediateFlip =
+ get_bandwidth_available_for_immediate_flip(
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ mode_lib->ms.TotImmediateFlipBytes = 0;
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip) {
+ s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
+ s->HostVMInefficiencyFactor,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.meta_row_bytes[k]);
+ } else {
+ s->per_pipe_flip_bytes[k] = 0;
+ }
+ mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
+
+ }
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ CalculateFlipSchedule(
+ &mode_lib->scratch,
+ display_cfg->plane_descriptors[k].immediate_flip,
+ 1, // use_lb_flip_bw
+ s->HostVMInefficiencyFactor,
+ s->Tvm_trips_flip[k],
+ s->Tr0_trips_flip[k],
+ s->Tvm_trips_flip_rounded[k],
+ s->Tr0_trips_flip_rounded[k],
+ display_cfg->gpuvm_enable,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.BandwidthAvailableForImmediateFlip,
+ mode_lib->ms.TotImmediateFlipBytes,
+ display_cfg->plane_descriptors[k].pixel_format,
+ (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
+ mode_lib->ms.Tno_bw_flip[k],
+ mode_lib->ms.dpte_row_height[k],
+ mode_lib->ms.dpte_row_height_chroma[k],
+ mode_lib->ms.use_one_row_for_frame_flip[k],
+ mode_lib->ip.max_flip_time_us,
+ mode_lib->ip.max_flip_time_lines,
+ s->per_pipe_flip_bytes[k],
+ mode_lib->ms.meta_row_bytes[k],
+ s->meta_row_height_luma[k],
+ s->meta_row_height_chroma[k],
+ mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
+
+ /* Output */
+ &mode_lib->ms.dst_y_per_vm_flip[k],
+ &mode_lib->ms.dst_y_per_row_flip[k],
+ &mode_lib->ms.final_flip_bw[k],
+ &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
+ }
+
+ calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
+ calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
+ calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
+ calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
+
+ calculate_peak_bandwidth_params->display_cfg = display_cfg;
+ calculate_peak_bandwidth_params->inc_flip_bw = 1;
+ calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
+ calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
+ calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
+ calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
+
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
+ calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
+ calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
+ calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
+ calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
+ calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
+ calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
+
+ calculate_peak_bandwidth_required(
+ &mode_lib->scratch,
+ calculate_peak_bandwidth_params);
+
+ calculate_immediate_flip_bandwidth_support(
+ &s->dummy_single[0], // double* frac_urg_bandwidth_flip
+ &mode_lib->ms.support.ImmediateFlipSupport,
+
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_flip,
+ mode_lib->ms.support.non_urg_bandwidth_required_flip,
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
+ mode_lib->ms.support.ImmediateFlipSupport = false;
+ }
+
+ } else { // if prefetch not support, assume iflip is not supported too
+ mode_lib->ms.support.ImmediateFlipSupport = false;
+ }
+
+ s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
+ s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
+ s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
+ s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
+ s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
+ s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
+ s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
+ s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
+ s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
+ s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
+ s->mSOCParameters.USRRetrainingLatency = 0;
+ s->mSOCParameters.SMNLatency = 0;
+ s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), mode_lib->ms.state_idx);
+ s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, mode_lib->ms.state_idx);
+ s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
+ s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
+
+ CalculateWatermarks_params->display_cfg = display_cfg;
+ CalculateWatermarks_params->USRRetrainingRequired = false;
+ CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
+ CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
+ CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
+ CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
+ CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK;
+ CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
+ CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
+ CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
+ CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters;
+ CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
+ CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK;
+ CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
+ CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
+ CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
+ CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
+ CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
+ CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
+ CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
+ CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
+ CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY;
+ CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC;
+ CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler;
+ CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler;
+ CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled;
+ CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte;
+ CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma;
+ CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma;
+
+ // Output
+ CalculateWatermarks_params->Watermark = &mode_lib->ms.support.watermarks; // Watermarks *Watermark
+ CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport;
+ CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported;
+ CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[]
+ CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[]
+ CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport;
+ CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported;
+ CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported
+ CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport;
+ CalculateWatermarks_params->g6_temp_read_support = &mode_lib->ms.support.g6_temp_read_support;
+ CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin;
+ CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
+
+ CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
+
+ calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]);
+ DML_LOG_VERBOSE("DML::%s: Done prefetch calculation\n", __func__);
+
+}
+
static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params)
{
struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
const struct dml2_display_cfg *display_cfg = in_out_params->in_display_cfg;
const struct dml2_mcg_min_clock_table *min_clk_table = in_out_params->min_clk_table;
-#if defined(__DML_VBA_DEBUG__)
- double old_ReadBandwidthLuma;
- double old_ReadBandwidthChroma;
-#endif
double outstanding_latency_us = 0;
- double min_return_bw_for_latency;
struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals;
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
struct dml2_core_calcs_CalculateVMRowAndSwath_params *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params;
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
- struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
-#ifdef DML_GLOBAL_PREFETCH_CHECK
- struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
-#endif
- struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
- struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params;
struct dml2_core_calcs_calculate_bytes_to_fetch_required_to_hide_latency_params *calculate_bytes_to_fetch_required_to_hide_latency_params = &mode_lib->scratch.calculate_bytes_to_fetch_required_to_hide_latency_params;
unsigned int k, m, n;
@@ -7374,9 +7932,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.FabricClock = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz / 1000);
mode_lib->ms.MaxDCFCLK = (double)min_clk_table->max_clocks_khz.dcfclk / 1000;
mode_lib->ms.MaxFabricClock = (double)min_clk_table->max_clocks_khz.fclk / 1000;
- mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dispclk / 1000;
+ mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dispclk / 1000;
mode_lib->ms.max_dscclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dscclk / 1000;
- mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dppclk / 1000;
+ mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dppclk / 1000;
mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config);
mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000);
mode_lib->ms.max_dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[min_clk_table->dram_bw_table.num_entries - 1].pre_derate_dram_bw_kbps / 1000);
@@ -7384,25 +7942,25 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: --- START --- \n", __func__);
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK);
- dml2_printf("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz);
- dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
- dml2_printf("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
- dml2_printf("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
- dml2_printf("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
- dml2_printf("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
+ DML_LOG_VERBOSE("DML::%s: --- START --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index);
+ DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK);
+ DML_LOG_VERBOSE("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
+ DML_LOG_VERBOSE("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
+ DML_LOG_VERBOSE("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
#endif
CalculateMaxDETAndMinCompressedBufferSize(
@@ -7504,12 +8062,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
#ifdef __DML_VBA_DEBUG__
- old_ReadBandwidthLuma = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- old_ReadBandwidthChroma = mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0;
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, old_ReadBandwidthLuma);
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, old_ReadBandwidthChroma);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]);
#endif
}
@@ -7629,13 +8185,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.MaximumSwathWidthLuma[k] = math_min2(s->MaximumSwathWidthSupportLuma, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
mode_lib->ms.MaximumSwathWidthChroma[k] = math_min2(s->MaximumSwathWidthSupportChroma, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthLuma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthSupportLuma=%u\n", __func__, k, s->MaximumSwathWidthSupportLuma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthInLineBufferLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthSupportLuma=%u\n", __func__, k, s->MaximumSwathWidthSupportLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthInLineBufferLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthChroma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthSupportChroma=%u\n", __func__, k, s->MaximumSwathWidthSupportChroma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthInLineBufferChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthSupportChroma=%u\n", __func__, k, s->MaximumSwathWidthSupportChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthInLineBufferChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
}
/* Cursor Support Check */
@@ -7672,11 +8228,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.AlignedCPitch[k] > display_cfg->plane_descriptors[k].surface.plane1.pitch) {
mode_lib->ms.support.PitchSupport = false;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]);
- dml2_printf("DML::%s: k=%u PitchY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch);
- dml2_printf("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]);
- dml2_printf("DML::%s: k=%u PitchC = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch);
- dml2_printf("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport);
+ DML_LOG_VERBOSE("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch);
+ DML_LOG_VERBOSE("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchC = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport);
#endif
}
@@ -7708,11 +8264,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) {
mode_lib->ms.support.ViewportExceedsSurface = true;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u ViewportWidth = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u SurfaceWidthY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u SurfaceHeightY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
- dml2_printf("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportWidth = %ld\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u SurfaceWidthY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportHeight = %ld\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u SurfaceHeightY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
#endif
}
if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
@@ -7894,8 +8450,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.TotalNumberOfActiveDPP = mode_lib->ms.TotalNumberOfActiveDPP + s->NumberOfDPPDSC;
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
- dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
#endif
// ensure the number dsc slices is integer multiple based on ODM mode
@@ -7911,9 +8467,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.DSCSlicesODMModeSupported = ((mode_lib->ms.support.NumberOfDSCSlices[k] % 4) == 0);
#if defined(__DML_VBA_DEBUG__)
if (!mode_lib->ms.support.DSCSlicesODMModeSupported) {
- dml2_printf("DML::%s: k=%d Invalid dsc num_slices and ODM mode setting\n", __func__, k);
- dml2_printf("DML::%s: k=%d num_slices = %d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.overrides.num_slices);
- dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d Invalid dsc num_slices and ODM mode setting\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d num_slices = %d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.overrides.num_slices);
+ DML_LOG_VERBOSE("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
}
#endif
} else {
@@ -7958,7 +8514,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
if (!mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
- dml2_printf("WARNING: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__);
+ DML_LOG_VERBOSE("WARNING: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__);
}
} else {
if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
@@ -7968,7 +8524,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]);
#endif
}
@@ -8138,7 +8694,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_rate,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_layout);
- if (mode_lib->ms.RequiredDTBCLK[k] > ((double)min_clk_table->max_clocks_khz.dtbclk / 1000)) {
+ if (mode_lib->ms.RequiredDTBCLK[k] > ((double)min_clk_table->max_ss_clocks_khz.dtbclk / 1000)) {
mode_lib->ms.support.DTBCLKRequiredMoreThanSupported = true;
}
} else {
@@ -8167,7 +8723,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
s->DSCFormatFactor = 1;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
#endif
if (mode_lib->ms.RequiresDSC[k] == true) {
s->PixelClockBackEndFactor = 3.0;
@@ -8185,10 +8741,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
- dml2_printf("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]);
- dml2_printf("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
- dml2_printf("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported);
#endif
}
}
@@ -8423,13 +8979,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.DCCMetaBufferSizeNotExceeded = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded);
- dml2_printf("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded);
#endif
/* VActive bytes to fetch for UCLK P-State */
@@ -8502,7 +9058,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
double line_time_us = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- bool cursor_not_enough_urgent_latency_hiding = 0;
+ bool cursor_not_enough_urgent_latency_hiding = false;
if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
calculate_cursor_req_attributes(
@@ -8531,9 +9087,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.UrgentBurstFactorCursorPre[k] = mode_lib->ms.UrgentBurstFactorCursor[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
#endif
CalculateUrgentBurstFactor(
@@ -8605,7 +9161,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]);
#endif
/* Immediate Flip and MALL parameters */
@@ -8654,16 +9210,15 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
(s->SubViewportMALLPStateMethod && s->FullFrameMALLPStateMethod) || s->SubViewportMALLRefreshGreaterThan120Hz;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod);
- dml2_printf("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod);
- dml2_printf("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod);
- dml2_printf("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz);
- dml2_printf("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState);
- dml2_printf("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: urgent latency tolarance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)));
+ DML_LOG_VERBOSE("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz);
+ DML_LOG_VERBOSE("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
#endif
mode_lib->ms.support.OutstandingRequestsSupport = true;
@@ -8703,10 +9258,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us);
- dml2_printf("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us);
- dml2_printf("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us);
+ DML_LOG_VERBOSE("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us);
#endif
}
@@ -8722,8 +9277,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = false;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us);
+ DML_LOG_VERBOSE("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us);
#endif
}
}
@@ -8869,7 +9424,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
if (mode_lib->ms.NotEnoughUrgentLatencyHiding[k]) {
mode_lib->ms.support.EnoughUrgentLatencyHidingSupport = false;
- dml2_printf("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k);
}
}
@@ -8878,639 +9433,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (!mode_lib->ms.support.avg_bandwidth_support_ok[m][n] && (m == dml2_core_internal_soc_state_sys_active || mode_lib->soc.mall_allocated_for_dcn_mbytes > 0)) {
mode_lib->ms.support.AvgBandwidthSupport = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n));
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n));
#endif
}
}
}
- /* Prefetch Check */
- {
- mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
-
- calculate_hostvm_inefficiency_factor(
- &s->HostVMInefficiencyFactor,
- &s->HostVMInefficiencyFactorPrefetch,
-
- display_cfg->gpuvm_enable,
- display_cfg->hostvm_enable,
- mode_lib->ip.remote_iommu_outstanding_translations,
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
- mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
-
- mode_lib->ms.Total3dlutActive = 0;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
- mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1;
-
- // Calculate tdlut schedule related terms
- calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK;
- calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
- calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
- calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
- calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
- calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
- calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag;
- calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling);
-
- // output
- calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
- calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
- calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
- calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
-
- calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
- }
-
- min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
- s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
-
- CalculateExtraLatency(
- display_cfg,
- mode_lib->ip.rob_buffer_size_kbytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
- s->ReorderingBytes,
- mode_lib->ms.DCFCLK,
- mode_lib->ms.FabricClock,
- mode_lib->ip.pixel_chunk_size_kbytes,
- min_return_bw_for_latency,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.dpte_group_bytes,
- s->tdlut_bytes_per_group,
- s->HostVMInefficiencyFactor,
- s->HostVMInefficiencyFactorPrefetch,
- mode_lib->soc.hostvm_min_page_size_kbytes,
- mode_lib->soc.qos_parameters.qos_type,
- !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.request_size_bytes_luma,
- mode_lib->ms.support.request_size_bytes_chroma,
- mode_lib->ip.meta_chunk_size_kbytes,
- mode_lib->ip.dchub_arb_to_ret_delay,
- mode_lib->ms.TripToMemory,
- mode_lib->ip.hostvm_mode,
-
- // output
- &mode_lib->ms.ExtraLatency,
- &mode_lib->ms.ExtraLatency_sr,
- &mode_lib->ms.ExtraLatencyPrefetch);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- s->impacted_dst_y_pre[k] = 0;
-
- s->recalc_prefetch_schedule = 0;
- s->recalc_prefetch_done = 0;
- do {
- mode_lib->ms.support.PrefetchSupported = true;
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
-
- s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
- mode_lib->ms.NoOfDPP[k],
- display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
- display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
- display_cfg->plane_descriptors[k].composition.rotation_angle);
-
- s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
- mode_lib->ms.NoOfDPP[k],
- display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
- display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
- display_cfg->plane_descriptors[k].composition.rotation_angle);
-
- struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
-
- mode_lib->ms.TWait[k] = CalculateTWait(
- display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
- mode_lib->ms.UrgLatency,
- mode_lib->ms.TripToMemory,
- !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
- get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
-
- myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
- myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
- myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k];
- myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
- myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
- myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
- myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
- myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
- myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
- myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
- myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
- myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
- myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
- myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
- myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- myPipe->ODMMode = mode_lib->ms.ODMMode[k];
- myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
- myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
- myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
- myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
- dml2_printf("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]);
-#endif
- CalculatePrefetchSchedule_params->display_cfg = display_cfg;
- CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
- CalculatePrefetchSchedule_params->myPipe = myPipe;
- CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k];
- CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
- CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
- CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
- CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
- CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
- CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
- CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
- CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
- CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
- CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
- CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency;
- CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch;
- CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc;
- CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k];
- CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k];
- CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k];
- CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k];
- CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k];
- CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k];
- CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k];
- CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k];
- CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k];
- CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k];
- CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k];
- CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory;
- CalculatePrefetchSchedule_params->Turg = mode_lib->ms.UrgLatency;
- CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
- CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
- CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
- CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
- CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
- CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
- CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
- CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
- CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
- CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k];
- CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k];
-
- // output
- CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
- CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k];
- CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k];
- CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k];
- CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
- CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k];
- CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
- CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
- CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
- CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k];
- CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0];
- CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1];
- CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2];
- CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
- CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0];
- CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
- CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
- CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
- CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
- CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
- CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
- CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k];
-
- mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
-
- mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k];
- dml2_printf("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
- } // for k num_planes
-
- CalculateDCFCLKDeepSleepTdlut(
- display_cfg,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.BytePerPixelY,
- mode_lib->ms.BytePerPixelC,
- mode_lib->ms.SwathWidthY,
- mode_lib->ms.SwathWidthC,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.PSCL_FACTOR,
- mode_lib->ms.PSCL_FACTOR_CHROMA,
- mode_lib->ms.RequiredDPPCLK,
- mode_lib->ms.vactive_sw_bw_l,
- mode_lib->ms.vactive_sw_bw_c,
- mode_lib->soc.return_bus_width_bytes,
- mode_lib->ms.RequiredDISPCLK,
- s->tdlut_bytes_to_deliver,
- s->prefetch_swath_time_us,
-
- /* Output */
- &mode_lib->ms.dcfclk_deepsleep);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.dst_y_prefetch[k] < 2.0
- || mode_lib->ms.LinesForVM[k] >= 32.0
- || mode_lib->ms.LinesForDPTERow[k] >= 16.0
- || mode_lib->ms.NoTimeForPrefetch[k] == true
- || s->DSTYAfterScaler[k] > 8) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
- dml2_printf("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
- dml2_printf("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
- dml2_printf("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
- dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
- }
- }
-
- mode_lib->ms.support.DynamicMetadataSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) {
- mode_lib->ms.support.DynamicMetadataSupported = false;
- }
- }
-
- mode_lib->ms.support.VRatioInPrefetchSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
- mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
- mode_lib->ms.support.VRatioInPrefetchSupported = false;
- dml2_printf("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
- }
- }
-
- mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported;
-
- // By default, do not recalc prefetch schedule
- s->recalc_prefetch_schedule = 0;
-
- // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
- if (mode_lib->ms.support.PrefetchSupported) {
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- // Calculate Urgent burst factor for prefetch
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]);
- dml2_printf("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]);
-#endif
- CalculateUrgentBurstFactor(
- &display_cfg->plane_descriptors[k],
- mode_lib->ms.swath_width_luma_ub[k],
- mode_lib->ms.swath_width_chroma_ub[k],
- mode_lib->ms.SwathHeightY[k],
- mode_lib->ms.SwathHeightC[k],
- s->line_times[k],
- mode_lib->ms.UrgLatency,
- mode_lib->ms.VRatioPreY[k],
- mode_lib->ms.VRatioPreC[k],
- mode_lib->ms.BytePerPixelInDETY[k],
- mode_lib->ms.BytePerPixelInDETC[k],
- mode_lib->ms.DETBufferSizeY[k],
- mode_lib->ms.DETBufferSizeC[k],
- /* Output */
- &mode_lib->ms.UrgentBurstFactorLumaPre[k],
- &mode_lib->ms.UrgentBurstFactorChromaPre[k],
- &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
-
- // Calculate urgent bandwidth required, both urg and non urg peak bandwidth
- // assume flip bw is 0 at this point
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- mode_lib->ms.final_flip_bw[k] = 0;
-
- calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = mode_lib->ms.support.urg_vactive_bandwidth_required;
- calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required;
- calculate_peak_bandwidth_params->urg_bandwidth_required_qual = mode_lib->ms.support.urg_bandwidth_required_qual;
- calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required;
- calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = mode_lib->ms.surface_avg_vactive_required_bw;
- calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
-
- calculate_peak_bandwidth_params->display_cfg = display_cfg;
- calculate_peak_bandwidth_params->inc_flip_bw = 0;
- calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
- calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
- calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
- calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
-
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
- calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
- calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
- calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
- calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
- calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
- calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
- calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- calculate_peak_bandwidth_params);
-
- // Check urg peak bandwidth against available urg bw
- // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
- check_urgent_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth
- &s->dummy_single[1], // double* frac_urg_bandwidth_mall
- &mode_lib->ms.support.UrgVactiveBandwidthSupport,
- &mode_lib->ms.support.PrefetchBandwidthSupported,
-
- mode_lib->soc.mall_allocated_for_dcn_mbytes,
- mode_lib->ms.support.non_urg_bandwidth_required,
- mode_lib->ms.support.urg_vactive_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported;
- dml2_printf("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
- }
-
-#ifdef DML_GLOBAL_PREFETCH_CHECK
- if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) {
- CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes;
- CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
- CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
- CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
- CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
- CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
- CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY;
- CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC;
- CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
- CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte;
- CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY;
- CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC;
- CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
- CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
- CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
- CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded;
- CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto;
- CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
- CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch;
- if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024)
- CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024;
-
- CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) /
- ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0);
-
- // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
- CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule;
- CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
- mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params);
- s->recalc_prefetch_done = 1;
- s->recalc_prefetch_schedule = 1;
- }
-#endif
- } // prefetch schedule ok, do urg bw and flip schedule
- } while (s->recalc_prefetch_schedule);
-
- // Flip Schedule
- // Both prefetch schedule and BW okay
- if (mode_lib->ms.support.PrefetchSupported == true) {
- mode_lib->ms.BandwidthAvailableForImmediateFlip =
- get_bandwidth_available_for_immediate_flip(
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.TotImmediateFlipBytes = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip) {
- s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
- s->HostVMInefficiencyFactor,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.meta_row_bytes[k]);
- } else {
- s->per_pipe_flip_bytes[k] = 0;
- }
- mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
-
- }
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- CalculateFlipSchedule(
- &mode_lib->scratch,
- display_cfg->plane_descriptors[k].immediate_flip,
- 1, // use_lb_flip_bw
- s->HostVMInefficiencyFactor,
- s->Tvm_trips_flip[k],
- s->Tr0_trips_flip[k],
- s->Tvm_trips_flip_rounded[k],
- s->Tr0_trips_flip_rounded[k],
- display_cfg->gpuvm_enable,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.BandwidthAvailableForImmediateFlip,
- mode_lib->ms.TotImmediateFlipBytes,
- display_cfg->plane_descriptors[k].pixel_format,
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ms.Tno_bw_flip[k],
- mode_lib->ms.dpte_row_height[k],
- mode_lib->ms.dpte_row_height_chroma[k],
- mode_lib->ms.use_one_row_for_frame_flip[k],
- mode_lib->ip.max_flip_time_us,
- mode_lib->ip.max_flip_time_lines,
- s->per_pipe_flip_bytes[k],
- mode_lib->ms.meta_row_bytes[k],
- s->meta_row_height_luma[k],
- s->meta_row_height_chroma[k],
- mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
-
- /* Output */
- &mode_lib->ms.dst_y_per_vm_flip[k],
- &mode_lib->ms.dst_y_per_row_flip[k],
- &mode_lib->ms.final_flip_bw[k],
- &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
- }
-
- calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
- calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
- calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
- calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
-
- calculate_peak_bandwidth_params->display_cfg = display_cfg;
- calculate_peak_bandwidth_params->inc_flip_bw = 1;
- calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
- calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
- calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
- calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
-
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
- calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
- calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
- calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
- calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
- calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
- calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
- calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- calculate_peak_bandwidth_params);
-
- calculate_immediate_flip_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth_flip
- &mode_lib->ms.support.ImmediateFlipSupport,
-
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_flip,
- mode_lib->ms.support.non_urg_bandwidth_required_flip,
- mode_lib->ms.support.urg_bandwidth_available);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
-
- } else { // if prefetch not support, assume iflip is not supported too
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
-
- s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
- s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
- s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
- s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
- s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
- s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
- s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.USRRetrainingLatency = 0;
- s->mSOCParameters.SMNLatency = 0;
- s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index);
- s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index);
- s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
- s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
-
- CalculateWatermarks_params->display_cfg = display_cfg;
- CalculateWatermarks_params->USRRetrainingRequired = false;
- CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
- CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
- CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
- CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK;
- CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
- CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
- CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
- CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters;
- CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
- CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK;
- CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
- CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
- CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
- CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
- CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
- CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
- CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
- CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY;
- CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC;
- CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler;
- CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler;
- CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled;
- CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte;
- CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma;
- CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma;
-
- // Output
- CalculateWatermarks_params->Watermark = &mode_lib->ms.support.watermarks; // Watermarks *Watermark
- CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport;
- CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported;
- CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[]
- CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[]
- CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport;
- CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported;
- CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported
- CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport;
- CalculateWatermarks_params->g6_temp_read_support = &mode_lib->ms.support.g6_temp_read_support;
- CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin;
- CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
-
- CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
-
- calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]);
- }
- dml2_printf("DML::%s: Done prefetch calculation\n", __func__);
- // End of Prefetch Check
+ dml_core_ms_prefetch_check(mode_lib, display_cfg);
mode_lib->ms.support.max_urgent_latency_us = s->mSOCParameters.max_urgent_latency_us;
@@ -9546,8 +9475,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.dram_change_vactive_det_fill_delay_us);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
- dml2_printf("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
+ DML_LOG_VERBOSE("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
#endif
/*Mode Support, Voltage State and SOC Configuration*/
@@ -9597,17 +9526,17 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
&& !mode_lib->ms.support.ExceededMALLSize
&& mode_lib->ms.support.g6_temp_read_support
&& ((!display_cfg->hostvm_enable && !s->ImmediateFlipRequired) || mode_lib->ms.support.ImmediateFlipSupport)) {
- dml2_printf("DML::%s: mode is supported\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: mode is supported\n", __func__);
mode_lib->ms.support.ModeSupport = true;
} else {
- dml2_printf("DML::%s: mode is NOT supported\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: mode is NOT supported\n", __func__);
mode_lib->ms.support.ModeSupport = false;
}
}
// Since now the mode_support work on 1 particular power state, so there is only 1 state idx (index 0).
- dml2_printf("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
- dml2_printf("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.support.MPCCombineEnable[k] = mode_lib->ms.MPCCombine[k];
@@ -9623,8 +9552,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.OutputRate[k] = mode_lib->ms.OutputRate[k];
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]);
- dml2_printf("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]);
#endif
}
@@ -9632,7 +9561,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (!mode_lib->ms.support.ModeSupport)
dml2_print_mode_support_info(&mode_lib->ms.support, true);
- dml2_printf("DML::%s: --- DONE --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- DONE --- \n", __func__);
#endif
return mode_lib->ms.support.ModeSupport;
@@ -9642,18 +9571,18 @@ unsigned int dml2_core_calcs_mode_support_ex(struct dml2_core_calcs_mode_support
{
unsigned int result;
- dml2_printf("DML::%s: ------------- START ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- START ----------\n", __func__);
result = dml_core_mode_support(in_out_params);
if (result)
*in_out_params->out_evaluation_info = in_out_params->mode_lib->ms.support;
- dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
for (unsigned int k = 0; k < in_out_params->in_display_cfg->num_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
- dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- DONE ----------\n", __func__);
return result;
}
@@ -9687,19 +9616,19 @@ static void CalculatePixelDeliveryTimes(
double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]);
- dml2_printf("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]);
- dml2_printf("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used);
- dml2_printf("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz);
- dml2_printf("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used);
+ DML_LOG_VERBOSE("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz);
+ DML_LOG_VERBOSE("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]);
#endif
if (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio <= 1) {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio / pixel_clock_mhz;
@@ -9733,10 +9662,10 @@ static void CalculatePixelDeliveryTimes(
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
#endif
}
@@ -9752,12 +9681,12 @@ static void CalculatePixelDeliveryTimes(
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub_c[k];
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]);
#endif
}
}
@@ -9853,14 +9782,14 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]);
#endif
}
@@ -9881,7 +9810,7 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
else
p->time_per_tdlut_group[k] = 0;
- dml2_printf("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]);
if (p->display_cfg->gpuvm_enable == true) {
if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) {
@@ -9897,14 +9826,14 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
if (dpte_groups_per_row_luma_ub <= 2) {
dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1;
}
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub);
p->time_per_pte_group_nom_luma[k] = p->DST_Y_PER_PTE_ROW_NOM_L[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
p->time_per_pte_group_vblank_luma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
@@ -9928,9 +9857,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
if (dpte_groups_per_row_chroma_ub <= 2) {
dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1;
}
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
p->time_per_pte_group_nom_chroma[k] = p->DST_Y_PER_PTE_ROW_NOM_C[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
p->time_per_pte_group_vblank_chroma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
@@ -9945,17 +9874,17 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
p->time_per_pte_group_flip_chroma[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]);
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]);
#endif
}
} // CalculateMetaAndPTETimes
@@ -9991,18 +9920,18 @@ static void CalculateVMGroupAndRequestTimes(
double line_time;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
#endif
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
bool dcc_mrq_enable = display_cfg->plane_descriptors[k].surface.dcc.enable && mrq_present;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable);
- dml2_printf("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]);
#endif
if (display_cfg->gpuvm_enable) {
@@ -10071,13 +10000,13 @@ static void CalculateVMGroupAndRequestTimes(
else
TimePerVMRequestFlip[k] = 0.0;
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]);
- dml2_printf("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %f\n", __func__, k, num_group_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %f\n", __func__, k, num_group_per_lower_vm_stage_flip);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %f\n", __func__, k, num_req_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %f\n", __func__, k, num_req_per_lower_vm_stage_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %d\n", __func__, k, num_group_per_lower_vm_stage_pref);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %d\n", __func__, k, num_group_per_lower_vm_stage_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %d\n", __func__, k, num_req_per_lower_vm_stage_pref);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %d\n", __func__, k, num_req_per_lower_vm_stage_flip);
if (display_cfg->gpuvm_max_page_table_levels > 2) {
TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
@@ -10094,10 +10023,10 @@ static void CalculateVMGroupAndRequestTimes(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]);
#endif
}
}
@@ -10113,7 +10042,6 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
unsigned int SingleVTotal = 0;
bool SameTiming = true;
bool FoundCriticalSurface = false;
- double LastZ8StutterPeriod = 0;
memset(l, 0, sizeof(struct dml2_core_calcs_CalculateStutterEfficiency_locals));
@@ -10127,9 +10055,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0, l->MaximumEffectiveCompressionLuma);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma);
#endif
l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0;
l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0 / l->MaximumEffectiveCompressionLuma;
@@ -10142,9 +10070,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1, l->MaximumEffectiveCompressionChroma);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma);
#endif
l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1;
l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1 / l->MaximumEffectiveCompressionChroma;
@@ -10160,19 +10088,19 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->AverageDCCZeroSizeFraction = l->TotalZeroSizeRequestReadBandwidth / p->TotalDataReadBandwidth;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth);
- dml2_printf("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma);
- dml2_printf("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma);
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma);
+ DML_LOG_VERBOSE("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction);
- dml2_printf("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0);
- dml2_printf("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte);
#endif
if (l->AverageDCCZeroSizeFraction == 1) {
l->AverageZeroSizeCompressionRate = l->TotalZeroSizeRequestReadBandwidth / l->TotalZeroSizeCompressedReadBandwidth;
@@ -10189,10 +10117,10 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
- dml2_printf("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
+ DML_LOG_VERBOSE("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate));
#endif
} else {
l->EffectiveCompressedBufferSize = math_min2((double)p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate,
@@ -10200,16 +10128,16 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
((double)p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64) * (p->rob_alloc_compressed ? l->AverageDCCCompressionRate : 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries);
- dml2_printf("DML::%s: ZeroSizeBufferEntries = %u\n", __func__, p->ZeroSizeBufferEntries);
- dml2_printf("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries);
+ DML_LOG_VERBOSE("DML::%s: ZeroSizeBufferEntries = %u\n", __func__, p->ZeroSizeBufferEntries);
+ DML_LOG_VERBOSE("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
#endif
*p->StutterPeriod = 0;
@@ -10220,15 +10148,15 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->LinesInDETYRoundedDownToSwath = math_floor2(l->LinesInDETY, p->SwathHeightY[k]);
l->DETBufferingTimeY = l->LinesInDETYRoundedDownToSwath * ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY);
- dml2_printf("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath);
- dml2_printf("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY);
#endif
if (!FoundCriticalSurface || l->DETBufferingTimeY < *p->StutterPeriod) {
@@ -10248,17 +10176,17 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->SinglePipeCriticalSurface = (p->DPPPerSurface[k] == 1);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface);
- dml2_printf("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod);
- dml2_printf("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface);
- dml2_printf("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface);
- dml2_printf("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface);
#endif
}
}
@@ -10276,14 +10204,14 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = math_min2(*p->StutterPeriod * p->TotalDataReadBandwidth, l->EffectiveCompressedBufferSize);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
- dml2_printf("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024);
- dml2_printf("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW);
- dml2_printf("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024);
+ DML_LOG_VERBOSE("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK);
#endif
l->StutterBurstTime = l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer
@@ -10292,10 +10220,10 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
/ math_min2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
*p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64));
- dml2_printf("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
+ DML_LOG_VERBOSE("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64));
+ DML_LOG_VERBOSE("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
#endif
l->TotalActiveWriteback = 0;
memset(l->stream_visited, 0, DML2_MAX_PLANES * sizeof(bool));
@@ -10324,9 +10252,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
if (l->TotalActiveWriteback == 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime);
- dml2_printf("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime);
+ DML_LOG_VERBOSE("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
#endif
*p->StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitTime + l->StutterBurstTime) / *p->StutterPeriod) * 100;
*p->Z8StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitZ8Time + l->StutterBurstTime) / *p->StutterPeriod) * 100;
@@ -10339,11 +10267,11 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
*p->Z8NumberOfStutterBurstsPerFrame = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
#endif
if (*p->StutterEfficiencyNotIncludingVBlank > 0) {
@@ -10358,7 +10286,7 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
if (*p->Z8StutterEfficiencyNotIncludingVBlank > 0) {
- LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod;
+ //LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod;
if (!((p->SynchronizeTimings || TotalNumberOfActiveOTG == 1) && SameTiming)) {
*p->Z8StutterEfficiency = *p->Z8StutterEfficiencyNotIncludingVBlank;
} else {
@@ -10370,25 +10298,25 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TotalNumberOfActiveOTG = %u\n", __func__, TotalNumberOfActiveOTG);
- dml2_printf("DML::%s: SameTiming = %u\n", __func__, SameTiming);
- dml2_printf("DML::%s: SynchronizeTimings = %u\n", __func__, p->SynchronizeTimings);
- dml2_printf("DML::%s: LastZ8StutterPeriod = %f\n", __func__, LastZ8StutterPeriod);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
- dml2_printf("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency);
- dml2_printf("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: TotalNumberOfActiveOTG = %u\n", __func__, TotalNumberOfActiveOTG);
+ DML_LOG_VERBOSE("DML::%s: SameTiming = %u\n", __func__, SameTiming);
+ DML_LOG_VERBOSE("DML::%s: SynchronizeTimings = %u\n", __func__, p->SynchronizeTimings);
+ DML_LOG_VERBOSE("DML::%s: LastZ8StutterPeriod = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank > 0 ? l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod : 0);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
#endif
*p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
- dml2_printf("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
#endif
}
@@ -10422,7 +10350,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
double max_uclk_mhz = 0;
double min_return_latency_in_DCFCLK_cycles = 0;
- dml2_printf("DML::%s: --- START --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- START --- \n", __func__);
memset(&mode_lib->scratch, 0, sizeof(struct dml2_core_internal_scratch));
memset(&mode_lib->mp, 0, sizeof(struct dml2_core_internal_mode_program));
@@ -10444,13 +10372,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
- DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 2 ||
cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
if (cfg_support_info->stream_support_info[stream_index].odms_used > 1)
- DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
switch (cfg_support_info->stream_support_info[stream_index].odms_used) {
case (4):
@@ -10476,51 +10404,51 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used;
mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0;
- DML2_ASSERT(mode_lib->mp.Dppclk[k] > 0);
+ DML_ASSERT(mode_lib->mp.Dppclk[k] > 0);
}
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0;
- dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
}
mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0;
mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0;
- DML2_ASSERT(mode_lib->mp.Dcfclk > 0);
- DML2_ASSERT(mode_lib->mp.FabricClock > 0);
- DML2_ASSERT(mode_lib->mp.dram_bw_mbps > 0);
- DML2_ASSERT(mode_lib->mp.uclk_freq_mhz > 0);
- DML2_ASSERT(mode_lib->mp.GlobalDPPCLK > 0);
- DML2_ASSERT(mode_lib->mp.Dispclk > 0);
- DML2_ASSERT(mode_lib->mp.DCFCLKDeepSleep > 0);
- DML2_ASSERT(s->SOCCLK > 0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
- dml2_printf("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
- dml2_printf("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz);
- dml2_printf("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk);
+ DML_ASSERT(mode_lib->mp.Dcfclk > 0);
+ DML_ASSERT(mode_lib->mp.FabricClock > 0);
+ DML_ASSERT(mode_lib->mp.dram_bw_mbps > 0);
+ DML_ASSERT(mode_lib->mp.uclk_freq_mhz > 0);
+ DML_ASSERT(mode_lib->mp.GlobalDPPCLK > 0);
+ DML_ASSERT(mode_lib->mp.Dispclk > 0);
+ DML_ASSERT(mode_lib->mp.DCFCLKDeepSleep > 0);
+ DML_ASSERT(s->SOCCLK > 0);
+
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
+ DML_LOG_VERBOSE("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk);
for (k = 0; k < s->num_active_planes; ++k) {
- dml2_printf("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]);
- }
- dml2_printf("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK);
- dml2_printf("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: min_clk_table min_fclk_khz = %d\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz);
- dml2_printf("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config));
+ DML_LOG_VERBOSE("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]);
+ }
+ DML_LOG_VERBOSE("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK);
+ DML_LOG_VERBOSE("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK);
+ DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: min_clk_table min_fclk_khz = %ld\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz);
+ DML_LOG_VERBOSE("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config));
for (k = 0; k < mode_lib->mp.num_active_pipes; ++k) {
- dml2_printf("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]);
- dml2_printf("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]);
+ DML_LOG_VERBOSE("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
}
for (k = 0; k < s->num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
#endif
CalculateMaxDETAndMinCompressedBufferSize(
@@ -10617,8 +10545,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
mode_lib->mp.vactive_sw_bw_l[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
mode_lib->mp.vactive_sw_bw_c[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- dml2_printf("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
}
CalculateSwathAndDETConfiguration_params->display_cfg = display_cfg;
@@ -11097,7 +11025,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
for (k = 0; k < s->num_active_planes; ++k) {
- bool cursor_not_enough_urgent_latency_hiding = 0;
+ bool cursor_not_enough_urgent_latency_hiding = false;
s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
@@ -11173,8 +11101,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.WritebackDelay[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]);
#endif
}
@@ -11183,7 +11111,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->immediate_flip_required = s->immediate_flip_required || display_cfg->plane_descriptors[k].immediate_flip;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
+ DML_LOG_VERBOSE("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
#endif
if (s->num_active_planes > 1) {
@@ -11219,12 +11147,12 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->DestinationLineTimesForPrefetchLessThan2 = false;
s->VRatioPrefetchMoreThanMax = false;
- dml2_printf("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
mode_lib->mp.TWait[k] = CalculateTWait(
display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
mode_lib->mp.UrgentLatency,
@@ -11261,7 +11189,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
#endif
CalculatePrefetchSchedule_params->display_cfg = display_cfg;
CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
@@ -11356,7 +11284,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.impacted_prefetch_margin_us[k] = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
#endif
mode_lib->mp.VStartupMin[k] = s->MaxVStartupLines[k];
} // for k
@@ -11366,9 +11294,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
if (mode_lib->mp.NoTimeToPrefetch[k] == true ||
mode_lib->mp.NotEnoughTimeForDynamicMetadata[k] ||
mode_lib->mp.DSTYAfterScaler[k] > 8) {
- dml2_printf("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
- dml2_printf("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]);
- dml2_printf("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
if (mode_lib->mp.dst_y_prefetch[k] < 2)
@@ -11377,24 +11305,24 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
s->VRatioPrefetchMoreThanMax = true;
- dml2_printf("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
}
if (mode_lib->mp.NotEnoughUrgentLatencyHiding[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
}
if (s->VRatioPrefetchMoreThanMax == true || s->DestinationLineTimesForPrefetchLessThan2 == true) {
- dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
- dml2_printf("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ DML_LOG_VERBOSE("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
mode_lib->mp.PrefetchModeSupported = false;
}
- dml2_printf("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__,
+ DML_LOG_VERBOSE("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__,
mode_lib->mp.PrefetchModeSupported ? "" : "NOT ", CalculatePrefetchSchedule_params->VStartup);
// Prefetch schedule OK, now check prefetch bw
@@ -11422,24 +11350,24 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
&mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]);
- dml2_printf("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
- dml2_printf("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
- dml2_printf("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]);
- dml2_printf("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]);
#endif
}
@@ -11503,11 +11431,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.urg_bandwidth_available);
if (!mode_lib->mp.PrefetchModeSupported)
- dml2_printf("DML::%s: Bandwidth not sufficient for prefetch!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bandwidth not sufficient for prefetch!\n", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
if (mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
}
@@ -11533,12 +11461,12 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
}
mode_lib->mp.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->mp.NoOfDPP[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k = %u\n", __func__, k);
- dml2_printf("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes);
+ DML_LOG_VERBOSE("DML::%s: k = %u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes);
#endif
}
for (k = 0; k < s->num_active_planes; ++k) {
@@ -11631,13 +11559,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.urg_bandwidth_available);
if (!mode_lib->mp.ImmediateFlipSupported)
- dml2_printf("DML::%s: Bandwidth not sufficient for flip!", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bandwidth not sufficient for flip!", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
if (display_cfg->plane_descriptors[k].immediate_flip && mode_lib->mp.ImmediateFlipSupportedForPipe[k] == false) {
mode_lib->mp.ImmediateFlipSupported = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k);
#endif
}
}
@@ -11650,28 +11578,28 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.PrefetchAndImmediateFlipSupported = (mode_lib->mp.PrefetchModeSupported == true && (!must_support_iflip || mode_lib->mp.ImmediateFlipSupported));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported);
+ DML_LOG_VERBOSE("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported);
for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable);
- dml2_printf("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported);
- dml2_printf("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported);
+ DML_LOG_VERBOSE("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
+ DML_LOG_VERBOSE("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported);
+ DML_LOG_VERBOSE("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported);
#endif
- dml2_printf("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]);
}
for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
if (!mode_lib->mp.PrefetchAndImmediateFlipSupported) {
- dml2_printf("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__);
} else {
- dml2_printf("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__);
// DCC Configuration
for (k = 0; k < s->num_active_planes; ++k) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k);
#endif
CalculateDCCConfiguration(
display_cfg->plane_descriptors[k].surface.dcc.enable,
@@ -11780,8 +11708,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->mp.Watermark, mode_lib->mp.pstate_keepout_dst_lines);
- dml2_printf("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
- dml2_printf("DML::%s: DEBUG PixelClock = %d kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
+ DML_LOG_VERBOSE("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
+ DML_LOG_VERBOSE("DML::%s: DEBUG PixelClock = %ld kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
//Display Pipeline Delivery Time in Prefetch, Groups
CalculatePixelDeliveryTimes(
@@ -11893,15 +11821,15 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.TCalc + mode_lib->mp.MinTTUVBlank[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
#endif
s->Tvstartup_margin = (s->MaxVStartupLines[k] - mode_lib->mp.VStartupMin[k]) * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.MinTTUVBlank[k] + s->Tvstartup_margin;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin);
- dml2_printf("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
#endif
mode_lib->mp.Tdmdl[k] = mode_lib->mp.Tdmdl[k] + s->Tvstartup_margin;
@@ -11920,9 +11848,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->blank_lines_remaining = (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active) - mode_lib->mp.VStartup[k];
if (s->blank_lines_remaining < 0) {
- dml2_printf("ERROR: Vstartup is larger than vblank!?\n");
+ DML_LOG_VERBOSE("ERROR: Vstartup is larger than vblank!?\n");
s->blank_lines_remaining = 0;
- DML2_ASSERT(0);
+ DML_ASSERT(0);
}
mode_lib->mp.MIN_DST_Y_NEXT_START[k] = s->dlg_vblank_start + s->blank_lines_remaining + s->LSetup;
@@ -11936,18 +11864,18 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k] = false;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]);
- dml2_printf("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]);
- dml2_printf("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]);
- dml2_printf("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, HTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total);
- dml2_printf("DML::%s: k=%u, VTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
- dml2_printf("DML::%s: k=%u, VActive = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active);
- dml2_printf("DML::%s: k=%u, VFrontPorch = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
- dml2_printf("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]);
- dml2_printf("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]);
- dml2_printf("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HTotal = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VTotal = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VActive = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VFrontPorch = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]);
#endif
}
@@ -11969,9 +11897,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.vactive_sw_bw_l[k] + mode_lib->mp.vactive_sw_bw_c[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
#endif
}
@@ -12051,28 +11979,28 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
min_return_latency_in_DCFCLK_cycles = (min_return_uclk_cycles / max_uclk_mhz + min_return_fclk_cycles / max_fclk_mhz) * hard_minimum_dcfclk_mhz;
mode_lib->mp.min_return_latency_in_dcfclk = (unsigned int)min_return_latency_in_DCFCLK_cycles;
mode_lib->mp.dcfclk_deep_sleep_hysteresis = (unsigned int)math_max2(32, (double)mode_lib->ip.pixel_chunk_size_kbytes * 1024 * 3 / 4 / 64 - min_return_latency_in_DCFCLK_cycles);
- DML2_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256);
+ DML_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz);
- dml2_printf("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz);
- dml2_printf("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz);
- dml2_printf("DML::%s: min_return_uclk_cycles = %d\n", __func__, min_return_uclk_cycles);
- dml2_printf("DML::%s: min_return_fclk_cycles = %d\n", __func__, min_return_fclk_cycles);
- dml2_printf("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis);
- dml2_printf("DML::%s: --- END --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: min_return_uclk_cycles = %ld\n", __func__, min_return_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: min_return_fclk_cycles = %ld\n", __func__, min_return_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis);
+ DML_LOG_VERBOSE("DML::%s: --- END --- \n", __func__);
#endif
return (in_out_params->mode_lib->mp.PrefetchAndImmediateFlipSupported);
}
bool dml2_core_calcs_mode_programming_ex(struct dml2_core_calcs_mode_programming_ex *in_out_params)
{
- dml2_printf("DML::%s: ------------- START ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- START ----------\n", __func__);
bool result = dml_core_mode_programming(in_out_params);
- dml2_printf("DML::%s: result = %0d\n", __func__, result);
- dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: result = %0d\n", __func__, result);
+ DML_LOG_VERBOSE("DML::%s: ------------- DONE ----------\n", __func__);
return result;
}
@@ -12130,16 +12058,16 @@ void dml2_core_calcs_get_dpte_row_height(
unsigned int MacroTileHeight = is_plane1 ? MacroTileHeightC : MacroTileHeightY;
unsigned int PTEBufferSizeInRequests = is_plane1 ? mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma : mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML: %s: is_plane1 = %u\n", __func__, is_plane1);
- dml2_printf("DML: %s: BytePerPixel = %u\n", __func__, BytePerPixel);
- dml2_printf("DML: %s: BlockHeight256Bytes = %u\n", __func__, BlockHeight256Bytes);
- dml2_printf("DML: %s: BlockWidth256Bytes = %u\n", __func__, BlockWidth256Bytes);
- dml2_printf("DML: %s: MacroTileWidth = %u\n", __func__, MacroTileWidth);
- dml2_printf("DML: %s: MacroTileHeight = %u\n", __func__, MacroTileHeight);
- dml2_printf("DML: %s: PTEBufferSizeInRequests = %u\n", __func__, PTEBufferSizeInRequests);
- dml2_printf("DML: %s: dpte_buffer_size_in_pte_reqs_luma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma);
- dml2_printf("DML: %s: dpte_buffer_size_in_pte_reqs_chroma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
- dml2_printf("DML: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML: %s: is_plane1 = %u\n", __func__, is_plane1);
+ DML_LOG_VERBOSE("DML: %s: BytePerPixel = %u\n", __func__, BytePerPixel);
+ DML_LOG_VERBOSE("DML: %s: BlockHeight256Bytes = %u\n", __func__, BlockHeight256Bytes);
+ DML_LOG_VERBOSE("DML: %s: BlockWidth256Bytes = %u\n", __func__, BlockWidth256Bytes);
+ DML_LOG_VERBOSE("DML: %s: MacroTileWidth = %u\n", __func__, MacroTileWidth);
+ DML_LOG_VERBOSE("DML: %s: MacroTileHeight = %u\n", __func__, MacroTileHeight);
+ DML_LOG_VERBOSE("DML: %s: PTEBufferSizeInRequests = %u\n", __func__, PTEBufferSizeInRequests);
+ DML_LOG_VERBOSE("DML: %s: dpte_buffer_size_in_pte_reqs_luma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma);
+ DML_LOG_VERBOSE("DML: %s: dpte_buffer_size_in_pte_reqs_chroma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
+ DML_LOG_VERBOSE("DML: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes);
#endif
unsigned int dummy_integer[21];
@@ -12193,16 +12121,16 @@ void dml2_core_calcs_get_dpte_row_height(
CalculateVMAndRowBytes(&mode_lib->scratch.calculate_vm_and_row_bytes_params);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML: %s: dpte_row_height = %u\n", __func__, *dpte_row_height);
+ DML_LOG_VERBOSE("DML: %s: dpte_row_height = %u\n", __func__, *dpte_row_height);
#endif
}
static bool is_dual_plane(enum dml2_source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
@@ -12220,6 +12148,8 @@ static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const
wm_regs->fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
wm_regs->sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
wm_regs->uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
wm_regs->urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
@@ -12246,11 +12176,11 @@ void dml2_core_calcs_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs
cursor_dlg_regs->dst_x_offset = (unsigned int) ((dst_x_offset > 0) ? dst_x_offset : 0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position);
- dml2_printf("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz);
- dml2_printf("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset);
+ DML_LOG_VERBOSE("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position);
+ DML_LOG_VERBOSE("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz);
+ DML_LOG_VERBOSE("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz);
+ DML_LOG_VERBOSE("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset);
+ DML_LOG_VERBOSE("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset);
#endif
cursor_dlg_regs->chunk_hdl_adjust = 3;
@@ -12286,7 +12216,7 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
double stored_swath_c_bytes;
bool is_phantom_pipe;
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx);
pixel_chunk_bytes = (unsigned int)(mode_lib->ip.pixel_chunk_size_kbytes * 1024);
min_pixel_chunk_bytes = (unsigned int)(mode_lib->ip.min_pixel_chunk_size_bytes);
@@ -12329,19 +12259,19 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
if (sw_mode == dml2_sw_linear && display_cfg->gpuvm_enable) {
unsigned int p0_pte_row_height_linear = (unsigned int)(dml_get_dpte_row_height_linear_l(mode_lib, pipe_idx));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear);
+ DML_LOG_VERBOSE("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear);
#endif
- DML2_ASSERT(p0_pte_row_height_linear >= 8);
+ DML_ASSERT(p0_pte_row_height_linear >= 8);
rq_regs->rq_regs_l.pte_row_height_linear = math_log2_approx(p0_pte_row_height_linear) - 3;
if (dual_plane) {
unsigned int p1_pte_row_height_linear = (unsigned int)(dml_get_dpte_row_height_linear_c(mode_lib, pipe_idx));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear);
+ DML_LOG_VERBOSE("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear);
#endif
if (sw_mode == dml2_sw_linear) {
- DML2_ASSERT(p1_pte_row_height_linear >= 8);
+ DML_ASSERT(p1_pte_row_height_linear >= 8);
}
rq_regs->rq_regs_c.pte_row_height_linear = math_log2_approx(p1_pte_row_height_linear) - 3;
}
@@ -12375,12 +12305,12 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
if (stored_swath_l_bytes / stored_swath_c_bytes <= 1.5) {
detile_buf_plane1_addr = (unsigned int)(detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
#endif
} else {
detile_buf_plane1_addr = (unsigned int)(dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0), 1024, 0) / 1024.0); // 2/3 to luma
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
#endif
}
}
@@ -12388,15 +12318,15 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
rq_regs->plane1_base_address = detile_buf_plane1_addr;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
- dml2_printf("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
- dml2_printf("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
- dml2_printf("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
+ DML_LOG_VERBOSE("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
+ DML_LOG_VERBOSE("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
#endif
- //dml2_printf_rq_regs_st(rq_regs);
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
+ //DML_LOG_VERBOSE_rq_regs_st(rq_regs);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
}
static void rq_dlg_get_dlg_reg(
@@ -12411,10 +12341,10 @@ static void rq_dlg_get_dlg_reg(
memset(l, 0, sizeof(struct dml2_core_shared_rq_dlg_get_dlg_reg_locals));
- dml2_printf("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
l->plane_idx = dml_get_plane_idx(mode_lib, pipe_idx);
- DML2_ASSERT(l->plane_idx < DML2_MAX_PLANES);
+ DML_ASSERT(l->plane_idx < DML2_MAX_PLANES);
l->source_format = dml2_444_8;
l->odm_mode = dml2_odm_mode_bypass;
@@ -12444,18 +12374,18 @@ static void rq_dlg_get_dlg_reg(
l->pclk_freq_in_mhz = (double)l->timing->pixel_clock_khz / 1000;
l->ref_freq_to_pix_freq = l->refclk_freq_in_mhz / l->pclk_freq_in_mhz;
- dml2_printf("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx);
- dml2_printf("DML_DLG: %s: htotal = %d\n", __func__, l->htotal);
- dml2_printf("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz);
- dml2_printf("DML_DLG: %s: soc.refclk_mhz = %3.2f\n", __func__, mode_lib->soc.dchub_refclk_mhz);
- dml2_printf("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced);
+ DML_LOG_VERBOSE("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx);
+ DML_LOG_VERBOSE("DML_DLG: %s: htotal = %d\n", __func__, l->htotal);
+ DML_LOG_VERBOSE("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: soc.refclk_mhz = %d\n", __func__, mode_lib->soc.dchub_refclk_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
+ DML_LOG_VERBOSE("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced);
- DML2_ASSERT(l->refclk_freq_in_mhz != 0);
- DML2_ASSERT(l->pclk_freq_in_mhz != 0);
- DML2_ASSERT(l->ref_freq_to_pix_freq < 4.0);
+ DML_ASSERT(l->refclk_freq_in_mhz != 0);
+ DML_ASSERT(l->pclk_freq_in_mhz != 0);
+ DML_ASSERT(l->ref_freq_to_pix_freq < 4.0);
// Need to figure out which side of odm combine we're in
// Assume the pipe instance under the same plane is in order
@@ -12484,14 +12414,14 @@ static void rq_dlg_get_dlg_reg(
l->pipe_idx_in_combine = pipe_idx - l->first_pipe_idx_in_plane; // DML assumes the pipes in the same plane will have continuous indexing (i.e. plane 0 use pipe 0, 1, and plane 1 uses pipe 2, 3, etc.)
disp_dlg_regs->refcyc_h_blank_end = (unsigned int)(((double)l->hblank_end + (double)l->pipe_idx_in_combine * (double)l->hactive / (double)l->odm_combine_factor) * l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx);
- dml2_printf("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane);
- dml2_printf("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine);
- dml2_printf("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor);
+ DML_LOG_VERBOSE("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane);
+ DML_LOG_VERBOSE("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine);
+ DML_LOG_VERBOSE("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor);
}
- dml2_printf("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end);
- DML2_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13));
disp_dlg_regs->ref_freq_to_pix_freq = (unsigned int)(l->ref_freq_to_pix_freq * math_pow(2, 19));
disp_dlg_regs->refcyc_per_htotal = (unsigned int)(l->ref_freq_to_pix_freq * (double)l->htotal * math_pow(2, 8));
@@ -12500,20 +12430,20 @@ static void rq_dlg_get_dlg_reg(
l->min_ttu_vblank = mode_lib->mp.MinTTUVBlank[mode_lib->mp.pipe_plane[pipe_idx]];
l->min_dst_y_next_start = (unsigned int)(mode_lib->mp.MIN_DST_Y_NEXT_START[mode_lib->mp.pipe_plane[pipe_idx]]);
- dml2_printf("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank);
- dml2_printf("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
+ DML_LOG_VERBOSE("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start);
+ DML_LOG_VERBOSE("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
l->vready_after_vcount0 = (unsigned int)(mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[mode_lib->mp.pipe_plane[pipe_idx]]);
disp_dlg_regs->vready_after_vcount0 = l->vready_after_vcount0;
- dml2_printf("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
+ DML_LOG_VERBOSE("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
l->dst_x_after_scaler = (unsigned int)(mode_lib->mp.DSTXAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
l->dst_y_after_scaler = (unsigned int)(mode_lib->mp.DSTYAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
- dml2_printf("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler);
- dml2_printf("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler);
l->dst_y_prefetch = mode_lib->mp.dst_y_prefetch[mode_lib->mp.pipe_plane[pipe_idx]];
l->dst_y_per_vm_vblank = mode_lib->mp.dst_y_per_vm_vblank[mode_lib->mp.pipe_plane[pipe_idx]];
@@ -12521,28 +12451,28 @@ static void rq_dlg_get_dlg_reg(
l->dst_y_per_vm_flip = mode_lib->mp.dst_y_per_vm_flip[mode_lib->mp.pipe_plane[pipe_idx]];
l->dst_y_per_row_flip = mode_lib->mp.dst_y_per_row_flip[mode_lib->mp.pipe_plane[pipe_idx]];
- dml2_printf("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank);
if (l->dst_y_prefetch > 0 && l->dst_y_per_vm_vblank > 0 && l->dst_y_per_row_vblank > 0) {
- DML2_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank));
+ DML_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank));
}
l->vratio_pre_l = mode_lib->mp.VRatioPrefetchY[mode_lib->mp.pipe_plane[pipe_idx]];
l->vratio_pre_c = mode_lib->mp.VRatioPrefetchC[mode_lib->mp.pipe_plane[pipe_idx]];
- dml2_printf("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l);
- dml2_printf("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c);
// Active
l->refcyc_per_line_delivery_pre_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_line_delivery_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l);
l->refcyc_per_line_delivery_pre_c = 0.0;
l->refcyc_per_line_delivery_c = 0.0;
@@ -12551,8 +12481,8 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_line_delivery_pre_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_line_delivery_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c);
}
disp_dlg_regs->refcyc_per_vm_dmdata = (unsigned int)(mode_lib->mp.Tdmdl_vm[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
@@ -12561,8 +12491,8 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_req_delivery_pre_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_req_delivery_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l);
l->refcyc_per_req_delivery_pre_c = 0.0;
l->refcyc_per_req_delivery_c = 0.0;
@@ -12570,16 +12500,16 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_req_delivery_pre_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_req_delivery_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c);
}
// TTU - Cursor
- DML2_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1);
+ DML_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1);
// Assign to register structures
disp_dlg_regs->min_dst_y_next_start = (unsigned int)((double)l->min_dst_y_next_start * math_pow(2, 2));
- DML2_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18));
+ DML_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18));
disp_dlg_regs->dst_y_after_scaler = l->dst_y_after_scaler; // in terms of line
disp_dlg_regs->refcyc_x_after_scaler = (unsigned int)((double)l->dst_x_after_scaler * l->ref_freq_to_pix_freq); // in terms of refclk
@@ -12592,10 +12522,10 @@ static void rq_dlg_get_dlg_reg(
disp_dlg_regs->vratio_prefetch = (unsigned int)(l->vratio_pre_l * math_pow(2, 19));
disp_dlg_regs->vratio_prefetch_c = (unsigned int)(l->vratio_pre_c * math_pow(2, 19));
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(mode_lib->mp.TimePerVMGroupVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
disp_dlg_regs->refcyc_per_vm_group_flip = (unsigned int)(mode_lib->mp.TimePerVMGroupFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
@@ -12662,11 +12592,11 @@ static void rq_dlg_get_dlg_reg(
disp_ttu_regs->qos_ramp_disable_c = 0;
disp_ttu_regs->min_ttu_vblank = (unsigned int)(l->min_ttu_vblank * l->refclk_freq_in_mhz);
- // CHECK for HW registers' range, DML2_ASSERT or clamp
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13));
+ // CHECK for HW registers' range, DML_ASSERT or clamp
+ DML_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13));
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)math_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(math_pow(2, 23) - 1);
@@ -12680,16 +12610,16 @@ static void rq_dlg_get_dlg_reg(
disp_dlg_regs->refcyc_per_vm_req_flip = (unsigned int)(math_pow(2, 23) - 1);
- DML2_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
- DML2_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
+ DML_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
if (disp_dlg_regs->dst_y_per_pte_row_nom_l >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1);
+ DML_LOG_VERBOSE("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1);
l->dst_y_per_pte_row_nom_l = (unsigned int)math_pow(2, 17) - 1;
}
if (l->dual_plane) {
if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1);
+ DML_LOG_VERBOSE("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1);
l->dst_y_per_pte_row_nom_c = (unsigned int)math_pow(2, 17) - 1;
}
}
@@ -12700,20 +12630,20 @@ static void rq_dlg_get_dlg_reg(
if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int)math_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int)(math_pow(2, 23) - 1);
}
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13));
if (l->dual_plane) {
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13));
}
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14));
+ DML_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14));
+ DML_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24));
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
}
}
@@ -12736,11 +12666,11 @@ static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, co
arb_param->pstate_stall_threshold = (unsigned int)(mode_lib->ip_caps.fams2.max_allow_delay_us * refclk_freq_in_mhz);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding);
- dml2_printf("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit);
- dml2_printf("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes);
- dml2_printf("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis);
+ DML_LOG_VERBOSE("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding);
+ DML_LOG_VERBOSE("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit);
+ DML_LOG_VERBOSE("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes);
+ DML_LOG_VERBOSE("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis);
#endif
}
@@ -13013,10 +12943,10 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp
out->vblank_reserved_time_us = display_cfg->plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us);
- dml2_printf("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us);
- dml2_printf("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines);
- dml2_printf("DML::%s: vblank_reserved_time_us = %f\n", __func__, out->vblank_reserved_time_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines);
+ DML_LOG_VERBOSE("DML::%s: vblank_reserved_time_us = %u\n", __func__, out->vblank_reserved_time_us);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index 4e502f0a6d20..bdee6ad7bc59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -1078,6 +1078,8 @@ struct dml2_core_calcs_mode_programming_locals {
enum dml2_source_format_class pixel_format[DML2_MAX_PLANES];
unsigned int lb_source_lines_l[DML2_MAX_PLANES];
unsigned int lb_source_lines_c[DML2_MAX_PLANES];
+ unsigned int num_dsc_slices[DML2_MAX_PLANES];
+ bool dsc_enable[DML2_MAX_PLANES];
};
struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
index 2504d9c2ec34..7a220c0141c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
@@ -82,7 +82,7 @@ bool dml2_core_utils_is_420(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -145,7 +145,7 @@ bool dml2_core_utils_is_422_planar(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -208,7 +208,7 @@ bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format)
val = 1;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -216,104 +216,104 @@ bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format)
void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
{
- dml2_printf("DML: ===================================== \n");
- dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
+ DML_LOG_VERBOSE("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: DML_MODE_SUPPORT_INFO_ST\n");
if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
- dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
+ DML_LOG_VERBOSE("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
+ DML_LOG_VERBOSE("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
- dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
+ DML_LOG_VERBOSE("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
- dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ DML_LOG_VERBOSE("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ DML_LOG_VERBOSE("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
- dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
+ DML_LOG_VERBOSE("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
if (!fail_only || support->NotEnoughLanesForMSO == 1)
- dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
+ DML_LOG_VERBOSE("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ DML_LOG_VERBOSE("DML: support: P2IWith420 = %d\n", support->P2IWith420);
if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ DML_LOG_VERBOSE("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
if (!fail_only || support->DSCSlicesODMModeSupported == 0)
- dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ DML_LOG_VERBOSE("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
if (!fail_only || support->NotEnoughDSCUnits == 1)
- dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
if (!fail_only || support->NotEnoughDSCSlices == 1)
- dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ DML_LOG_VERBOSE("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
if (!fail_only || support->ROBSupport == 0)
- dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
+ DML_LOG_VERBOSE("DML: support: ROBSupport = %d\n", support->ROBSupport);
if (!fail_only || support->OutstandingRequestsSupport == 0)
- dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
- dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ DML_LOG_VERBOSE("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ DML_LOG_VERBOSE("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ DML_LOG_VERBOSE("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ DML_LOG_VERBOSE("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ DML_LOG_VERBOSE("DML: support: CursorSupport = %d\n", support->CursorSupport);
if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ DML_LOG_VERBOSE("DML: support: PitchSupport = %d\n", support->PitchSupport);
if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
if (!fail_only || support->PrefetchSupported == 0)
- dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ DML_LOG_VERBOSE("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
+ DML_LOG_VERBOSE("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
if (!fail_only || support->DynamicMetadataSupported == 0)
- dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
+ DML_LOG_VERBOSE("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
- dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
if (!fail_only || support->g6_temp_read_support == 0)
- dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+ DML_LOG_VERBOSE("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
if (!fail_only || support->ModeSupport == 0)
- dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- dml2_printf("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: support: ModeSupport = %d\n", support->ModeSupport);
+ DML_LOG_VERBOSE("DML: ===================================== \n");
}
const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type)
@@ -358,9 +358,9 @@ void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_di
out_bpp[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
- dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
- dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
+ DML_LOG_VERBOSE("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
+ DML_LOG_VERBOSE("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
#endif
}
}
@@ -391,7 +391,7 @@ unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
#endif
return num_active_pipes;
}
@@ -452,7 +452,7 @@ unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw
else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
return 262144;
else {
- DML2_ASSERT(0);
+ DML_ASSERT(0);
return 256;
};
}
@@ -498,8 +498,8 @@ int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_gfx11_sw_256kb_r_x)
version = 11;
else {
- dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
+ DML_ASSERT(0);
}
return version;
@@ -511,7 +511,7 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
unsigned int index = 0;
for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
+ DML_LOG_VERBOSE("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %ld\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
if (i == 0)
index = 0;
@@ -524,8 +524,8 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
}
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, index);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, index);
#endif
return index;
}
@@ -533,32 +533,32 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
{
unsigned int i;
- bool clk_entry_found = 0;
+ bool clk_entry_found = false;
for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
- dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
+ DML_LOG_VERBOSE("DML::%s: clk_table.uclk.clk_values_khz[%d] = %ld\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
- clk_entry_found = 1;
+ clk_entry_found = true;
break;
}
}
if (!clk_entry_found)
- DML2_ASSERT(clk_entry_found);
+ DML_ASSERT(clk_entry_found);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, i);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, i);
#endif
return i;
}
bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if (dml2_core_utils_is_420(source_format) || dml2_core_utils_is_422_planar(source_format) || (source_format == dml2_rgbe_alpha))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index 15507926f3a4..f486b090bbfc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -754,6 +754,8 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
@@ -768,6 +770,8 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
index f4b1a7d02d42..a265f254152c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
@@ -182,6 +182,10 @@ static bool build_min_clock_table(const struct dml2_soc_bb *soc_bb, struct dml2_
min_table->max_clocks_khz.dtbclk = soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1];
min_table->max_clocks_khz.phyclk = soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1];
+ min_table->max_ss_clocks_khz.dispclk = (unsigned int)((double)min_table->max_clocks_khz.dispclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dppclk = (unsigned int)((double)min_table->max_clocks_khz.dppclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dtbclk = (unsigned int)((double)min_table->max_clocks_khz.dtbclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+
min_table->max_clocks_khz.dcfclk = soc_bb->clk_table.dcfclk.clk_values_khz[soc_bb->clk_table.dcfclk.num_clk_values - 1];
min_table->max_clocks_khz.fclk = soc_bb->clk_table.fclk.clk_values_khz[soc_bb->clk_table.fclk.num_clk_values - 1];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index f50662b83296..d88b3e0082dd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -659,7 +659,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) {
switch (i) {
case 1:
- DML2_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
@@ -670,7 +670,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
&pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 2:
- DML2_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
@@ -681,7 +681,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
&pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 3:
- DML2_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
@@ -692,7 +692,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
&pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 4:
- DML2_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
index dc2ce5e77f57..4a7c4c62111e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
@@ -761,7 +761,7 @@ bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache
total_mcaches_required--;
}
}
- dml2_printf("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
+ DML_LOG_VERBOSE("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
if (total_mcaches_required > dml->soc_bbox.num_dcc_mcaches) {
result = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
deleted file mode 100644
index c506667897c4..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dml2_debug.h"
-
-int dml2_log_internal(const char *format, ...)
-{
- return 0;
-}
-
-int dml2_printf(const char *format, ...)
-{
-#ifdef _DEBUG
-#ifdef _DEBUG_PRINTS
- int result;
- va_list args;
- va_start(args, format);
-
- result = vprintf(format, args);
-
- va_end(args);
-
- return result;
-#else
- return 0;
-#endif
-#else
- return 0;
-#endif
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
index bfe6f236d2e4..b226225103c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
@@ -5,55 +5,62 @@
#ifndef __DML2_DEBUG_H__
#define __DML2_DEBUG_H__
-#ifndef DML2_ASSERT
-#define DML2_ASSERT(condition) ((void)0)
-#endif
+#include "os_types.h"
+#define DML_ASSERT(condition) ASSERT(condition)
+#define DML_LOG_LEVEL_DEFAULT DML_LOG_LEVEL_WARN
+#define DML_LOG_INTERNAL(fmt, ...) dm_output_to_console(fmt, ## __VA_ARGS__)
-/*
- * DML_LOG_FATAL - fatal errors for unrecoverable DML states until a restart.
- * DML_LOG_ERROR - unexpected but recoverable failures inside DML
- * DML_LOG_WARN - unexpected inputs or events to DML
- * DML_LOG_INFO - high level tracing of DML interfaces
- * DML_LOG_DEBUG - detailed tracing of DML internal components
- * DML_LOG_VERBOSE - detailed tracing of DML calculation procedure
- */
-#if !defined(DML_LOG_LEVEL)
-#if defined(_DEBUG) && defined(_DEBUG_PRINTS)
-/* for backward compatibility with old macros */
-#define DML_LOG_LEVEL 5
-#else
-#define DML_LOG_LEVEL 0
-#endif
-#endif
+/* ASSERT with message output */
+#define DML_ASSERT_MSG(condition, fmt, ...) \
+ do { \
+ if (!(condition)) { \
+ DML_LOG_ERROR("DML ASSERT hit in %s line %d\n", __func__, __LINE__); \
+ DML_LOG_ERROR(fmt, ## __VA_ARGS__); \
+ DML_ASSERT(condition); \
+ } \
+ } while (0)
+
+/* fatal errors for unrecoverable DML states until a full reset */
+#define DML_LOG_LEVEL_FATAL 0
+/* unexpected but recoverable failures inside DML */
+#define DML_LOG_LEVEL_ERROR 1
+/* unexpected inputs or events to DML */
+#define DML_LOG_LEVEL_WARN 2
+/* high level tracing of DML interfaces */
+#define DML_LOG_LEVEL_INFO 3
+/* detailed tracing of DML internal components */
+#define DML_LOG_LEVEL_DEBUG 4
+/* detailed tracing of DML calculation procedure */
+#define DML_LOG_LEVEL_VERBOSE 5
-#define DML_LOG_FATAL(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
-#if DML_LOG_LEVEL >= 1
-#define DML_LOG_ERROR(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#ifndef DML_LOG_LEVEL
+#define DML_LOG_LEVEL DML_LOG_LEVEL_DEFAULT
+#endif /* #ifndef DML_LOG_LEVEL */
+
+#define DML_LOG_FATAL(fmt, ...) DML_LOG_INTERNAL("[DML FATAL] " fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_ERROR
+#define DML_LOG_ERROR(fmt, ...) DML_LOG_INTERNAL("[DML ERROR] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_ERROR(fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 2
-#define DML_LOG_WARN(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_WARN
+#define DML_LOG_WARN(fmt, ...) DML_LOG_INTERNAL("[DML WARN] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_WARN(fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 3
-#define DML_LOG_INFO(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_INFO
+#define DML_LOG_INFO(fmt, ...) DML_LOG_INTERNAL("[DML INFO] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_INFO(fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 4
-#define DML_LOG_DEBUG(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_DEBUG
+#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL("[DML DEBUG] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_DEBUG(fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 5
-#define DML_LOG_VERBOSE(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE
+#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL("[DML VERBOSE] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_VERBOSE(fmt, ...) ((void)0)
#endif
-
-int dml2_log_internal(const char *format, ...);
-int dml2_printf(const char *format, ...);
-
-#endif
+#endif /* __DML2_DEBUG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
index d8d01dceacdd..00688b9f1df4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
@@ -38,6 +38,12 @@ struct dml2_mcg_min_clock_table {
} max_clocks_khz;
struct {
+ unsigned int dispclk;
+ unsigned int dppclk;
+ unsigned int dtbclk;
+ } max_ss_clocks_khz;
+
+ struct {
unsigned int dprefclk;
unsigned int xtalclk;
unsigned int pcierefclk;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index a966abd40788..5f1b49a50049 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -1082,22 +1082,22 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s
if (stream_disp_cfg_index >= disp_cfg_index_max)
continue;
- if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_bypass) {
- scratch.odm_info.odm_factor = 1;
- } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_2to1) {
- scratch.odm_info.odm_factor = 2;
- } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_4to1) {
- scratch.odm_info.odm_factor = 4;
- } else {
- ASSERT(false);
- scratch.odm_info.odm_factor = 1;
- }
-
+ if (ctx->architecture == dml2_architecture_20) {
+ if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_bypass) {
+ scratch.odm_info.odm_factor = 1;
+ } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_2to1) {
+ scratch.odm_info.odm_factor = 2;
+ } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_4to1) {
+ scratch.odm_info.odm_factor = 4;
+ } else {
+ ASSERT(false);
+ scratch.odm_info.odm_factor = 1;
+ }
+ } else if (ctx->architecture == dml2_architecture_21) {
/* After DML2.1 update, ODM interpretation needs to change and is no longer same as for DML2.0.
* This is not an issue with new resource management logic. This block ensure backcompat
* with legacy pipe management with updated DML.
* */
- if (ctx->architecture == dml2_architecture_21) {
if (ODMMode[stream_disp_cfg_index] == 1) {
scratch.odm_info.odm_factor = 1;
} else if (ODMMode[stream_disp_cfg_index] == 2) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 2061d43b92e1..5de775fd8fce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -896,7 +896,7 @@ static void populate_dummy_dml_surface_cfg(struct dml_surface_cfg_st *out, unsig
out->SurfaceWidthC[location] = in->timing.h_addressable;
out->SurfaceHeightC[location] = in->timing.v_addressable;
out->PitchY[location] = ((out->SurfaceWidthY[location] + 127) / 128) * 128;
- out->PitchC[location] = 0;
+ out->PitchC[location] = 1;
out->DCCEnable[location] = false;
out->DCCMetaPitchY[location] = 0;
out->DCCMetaPitchC[location] = 0;
@@ -973,7 +973,9 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
}
}
-static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context, struct scaler_data *out)
+static struct scaler_data *get_scaler_data_for_plane(
+ const struct dc_plane_state *in,
+ struct dc_state *context)
{
int i;
struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe;
@@ -994,7 +996,7 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc
}
ASSERT(i < MAX_PIPES);
- memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out));
+ return &temp_pipe->plane_res.scl_data;
}
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location,
@@ -1057,11 +1059,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
const struct dc_plane_state *in, struct dc_state *context,
const struct soc_bounding_box_st *soc)
{
- struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL);
- if (!scaler_data)
- return;
-
- get_scaler_data_for_plane(in, context, scaler_data);
+ struct scaler_data *scaler_data = get_scaler_data_for_plane(in, context);
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
@@ -1126,8 +1124,6 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
out->DynamicMetadataTransmittedBytes[location] = 0;
out->NumberOfCursors[location] = 1;
-
- kfree(scaler_data);
}
static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 939ee0708bd2..525b7d04bf84 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -24,6 +24,8 @@
*
*/
+#include <linux/vmalloc.h>
+
#include "display_mode_core.h"
#include "dml2_internal_types.h"
#include "dml2_utils.h"
@@ -661,7 +663,10 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
dml2_copy_clocks_to_dc_state(&out_clks, context);
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.a, &dml2->v20.dml_core_ctx);
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
- memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
+ if (context->streams[0]->sink->link->dc->caps.is_apu)
+ dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.dml_core_ctx);
+ else
+ memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx);
//copy for deciding zstate use
@@ -732,17 +737,22 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
return out;
}
+ DC_FP_START();
+
/* Use dml_validate_only for fast_validate path */
if (fast_validate)
out = dml2_validate_only(context);
else
out = dml2_validate_and_build_resource(in_dc, context);
+
+ DC_FP_END();
+
return out;
}
static inline struct dml2_context *dml2_allocate_memory(void)
{
- return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+ return (struct dml2_context *) vzalloc(sizeof(struct dml2_context));
}
static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
@@ -779,11 +789,15 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
break;
}
+ DC_FP_START();
+
initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip);
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
+
+ DC_FP_END();
}
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
@@ -812,7 +826,7 @@ void dml2_destroy(struct dml2_context *dml2)
if (dml2->architecture == dml2_architecture_21)
dml21_destroy(dml2);
- kfree(dml2);
+ vfree(dml2);
}
void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 785226945699..5100f269368e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -40,6 +40,7 @@ struct dc_sink;
struct dc_stream_state;
struct resource_context;
struct display_stream_compressor;
+struct dc_mcache_params;
// Configuration of the MALL on the SoC
struct dml2_soc_mall_info {
@@ -107,6 +108,7 @@ struct dml2_dc_callbacks {
unsigned int (*get_max_flickerless_instant_vtotal_increase)(
struct dc_stream_state *stream,
bool is_gaming);
+ bool (*allocate_mcache)(struct dc_state *context, const struct dc_mcache_params *mcache_params);
};
struct dml2_dc_svp_callbacks {
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index abf439e743f2..2d70586cef40 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -790,8 +790,7 @@ static bool dpp3_program_blnd_lut(struct dpp *dpp_base,
if (params == NULL) {
REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_MODE, 0);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_blnd_lut(dpp_base, false);
+ dpp3_power_on_blnd_lut(dpp_base, false);
return false;
}
@@ -1204,8 +1203,7 @@ static bool dpp3_program_shaper(struct dpp *dpp_base,
if (params == NULL) {
REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_shaper(dpp_base, false);
+ dpp3_power_on_shaper(dpp_base, false);
return false;
}
@@ -1399,8 +1397,7 @@ static bool dpp3_program_3dlut(struct dpp *dpp_base,
if (params == NULL) {
dpp3_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_hdr3dlut(dpp_base, false);
+ dpp3_power_on_hdr3dlut(dpp_base, false);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
index 62b7012cda43..f7a373a3d70a 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -138,7 +138,7 @@ bool dpp35_construct(
dpp->base.funcs = &dcn35_dpp_funcs;
// w/a for cursor memory stuck in LS by programming DISPCLK_R_GATE_DISABLE, limit w/a to some ASIC revs
- if (dpp->base.ctx->asic_id.hw_internal_rev <= 0x10)
+ if (dpp->base.ctx->asic_id.hw_internal_rev < 0x40)
dpp->dispclk_r_gate_disable = true;
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 1236e0f9a256..712aff7e17f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -120,10 +120,11 @@ void dpp401_set_cursor_attributes(
enum dc_cursor_color_format color_format = cursor_attributes->color_format;
int cur_rom_en = 0;
- // DCN4 should always do Cursor degamma for Cursor Color modes
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
- cur_rom_en = 1;
+ if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
+ cur_rom_en = 1;
+ }
}
REG_UPDATE_3(CURSOR0_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index 75128fd34306..bd1b9aef6d5c 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -57,13 +57,6 @@ static const struct dsc_funcs dcn20_dsc_funcs = {
#define DC_LOGGER \
dsc->ctx->logger
-enum dsc_bits_per_comp {
- DSC_BPC_8 = 8,
- DSC_BPC_10 = 10,
- DSC_BPC_12 = 12,
- DSC_BPC_UNKNOWN
-};
-
/* API functions (external or via structure->function_pointer) */
void dsc2_construct(struct dcn20_dsc *dsc,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
index 1fb90b52b814..a9c04fc95bd1 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
@@ -457,6 +457,12 @@
type DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING; \
type DSCRM_DSC_FORWARD_EN_STATUS
+enum dsc_bits_per_comp {
+ DSC_BPC_8 = 8,
+ DSC_BPC_10 = 10,
+ DSC_BPC_12 = 12,
+ DSC_BPC_UNKNOWN
+};
struct dcn20_dsc_registers {
uint32_t DSC_TOP_CONTROL;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 4893b793fec0..4222679fd4c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -45,12 +45,6 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
#define DC_LOGGER \
dsc->ctx->logger
-enum dsc_bits_per_comp {
- DSC_BPC_8 = 8,
- DSC_BPC_10 = 10,
- DSC_BPC_12 = 12,
- DSC_BPC_UNKNOWN
-};
/* API functions (external or via structure->function_pointer) */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index b099989d9364..942d9f0b6df2 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -411,6 +411,20 @@ enum dc_irq_source dal_irq_get_rx_source(
}
}
+enum dc_irq_source dal_irq_get_read_request(
+ const struct gpio *irq)
+{
+ enum gpio_id id = dal_gpio_get_id(irq);
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_DCI2C_RR_DDC1 +
+ dal_gpio_get_enum(irq));
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
enum gpio_result dal_irq_setup_hpd_filter(
struct gpio *irq,
struct gpio_hpd_config *config)
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
index 2546224b326a..e4496ad203b2 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
@@ -132,9 +132,9 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
// Init VMID 0 based on PA config
dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
}
-
- dcn21_dchvm_init(hubbub);
-
+ if (!hubbub1->base.ctx->dc->config.skip_riommu_prefetch_wa) {
+ dcn21_dchvm_init(hubbub);
+ }
return hubbub1->num_vmid;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 5ed195377a6c..baed31611477 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -1032,7 +1032,7 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected,
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
- .hubp_clear_tiling = hubp2_clear_tiling,
+ .hubp_clear_tiling = hubp401_clear_tiling,
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
index 40ecebea1ba0..bee617ca0838 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
@@ -27,6 +27,24 @@
# DCE
###############################################################################
+ifdef CONFIG_DRM_AMD_DC_SI
+HWSS_DCE60 = dce60_hwseq.o
+
+AMD_DAL_HWSS_DCE60 = $(addprefix $(AMDDALPATH)/dc/hwss/dce60/,$(HWSS_DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE60)
+endif
+
+###############################################################################
+
+HWSS_DCE80 = dce80_hwseq.o
+
+AMD_DAL_HWSS_DCE80 = $(addprefix $(AMDDALPATH)/dc/hwss/dce80/,$(HWSS_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE80)
+
+###############################################################################
+
HWSS_DCE = dce_hwseq.o
AMD_DAL_HWSS_DCE = $(addprefix $(AMDDALPATH)/dc/hwss/dce/,$(HWSS_DCE))
@@ -65,14 +83,6 @@ AMD_DAL_HWSS_DCE120 = $(addprefix $(AMDDALPATH)/dc/hwss/dce120/,$(HWSS_DCE120))
AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE120)
-###############################################################################
-
-HWSS_DCE80 = dce80_hwseq.o
-
-AMD_DAL_HWSS_DCE80 = $(addprefix $(AMDDALPATH)/dc/hwss/dce80/,$(HWSS_DCE80))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE80)
-
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
# DCN
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 5656d10368ad..23bec5d25ed6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -2763,12 +2763,12 @@ static void dce110_enable_per_frame_crtc_position_reset(
}
-static void init_pipes(struct dc *dc, struct dc_state *context)
+static void dce110_init_pipes(struct dc *dc, struct dc_state *context)
{
// Do nothing
}
-static void init_hw(struct dc *dc)
+static void dce110_init_hw(struct dc *dc)
{
int i;
struct dc_bios *bp;
@@ -3327,7 +3327,7 @@ void dce110_disable_link_output(struct dc_link *link,
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
- .init_hw = init_hw,
+ .init_hw = dce110_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
.post_unlock_program_front_end = dce110_post_unlock_program_front_end,
@@ -3371,7 +3371,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
};
static const struct hwseq_private_funcs dce110_private_funcs = {
- .init_pipes = init_pipes,
+ .init_pipes = dce110_init_pipes,
.set_input_transfer_func = dce110_set_input_transfer_func,
.set_output_transfer_func = dce110_set_output_transfer_func,
.power_down = dce110_power_down,
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c
index 44b56490e152..a08e9f9eec17 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c
@@ -26,7 +26,7 @@
#include "dm_services.h"
#include "dc.h"
#include "core_types.h"
-#include "dce60_hw_sequencer.h"
+#include "dce60_hwseq.h"
#include "dce/dce_hwseq.h"
#include "dce110/dce110_hwseq.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h
index f3b2d8b60d5b..f3b2d8b60d5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 912f96323ed6..f9ee55998b6b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -94,6 +94,128 @@ static void print_microsec(struct dc_context *dc_ctx,
us_x10 % frac);
}
+/*
+ * Delay until we passed busy-until-point to which we can
+ * do necessary locking/programming on consecutive full updates
+ */
+void dcn10_wait_for_pipe_update_if_needed(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only)
+{
+ struct crtc_position position;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ unsigned int vpos, frame_count;
+ uint32_t vupdate_start, vupdate_end, vblank_start;
+ unsigned int lines_to_vupdate, us_to_vupdate;
+ unsigned int us_per_line, us_vupdate;
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->stream_res.tg ||
+ !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ if (pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream)
+ return;
+
+ if (!pipe_ctx->wait_is_required)
+ return;
+
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+ return;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ frame_count = tg->funcs->get_frame_count(tg);
+
+ if (frame_count - pipe_ctx->wait_frame_count > 2)
+ return;
+
+ vblank_start = pipe_ctx->pipe_dlg_param.vblank_start;
+
+ if (vpos >= vupdate_start && vupdate_start >= vblank_start)
+ lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
+ else
+ lines_to_vupdate = vupdate_start - vpos;
+
+ us_per_line =
+ stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
+ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+ if (vupdate_end < vupdate_start)
+ vupdate_end += stream->timing.v_total;
+
+ if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
+ us_to_vupdate = 0;
+
+ us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
+
+ if (is_surface_update_only && us_to_vupdate + us_vupdate > 200) {
+ //surface updates come in at high irql
+ pipe_ctx->wait_is_required = true;
+ return;
+ }
+
+ fsleep(us_to_vupdate + us_vupdate);
+
+ //clear
+ pipe_ctx->next_vupdate = 0;
+ pipe_ctx->wait_frame_count = 0;
+ pipe_ctx->wait_is_required = false;
+}
+
+/*
+ * On pipe unlock and programming, indicate pipe will be busy
+ * until some frame and line (vupdate), this is required for consecutive
+ * full updates, need to wait for updates
+ * to latch to try and program the next update
+ */
+void dcn10_set_wait_for_update_needed_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ uint32_t vupdate_start, vupdate_end;
+ struct crtc_position position;
+ unsigned int vpos, cur_frame;
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->stream_res.tg ||
+ !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ ASSERT(optc1->max_frame_count != 0);
+
+ if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+ return;
+
+ pipe_ctx->next_vupdate = vupdate_start;
+
+ cur_frame = tg->funcs->get_frame_count(tg);
+
+ if (vpos < vupdate_start) {
+ pipe_ctx->wait_frame_count = cur_frame;
+ } else {
+ if (cur_frame + 1 > optc1->max_frame_count)
+ pipe_ctx->wait_frame_count = cur_frame + 1 - optc1->max_frame_count;
+ else
+ pipe_ctx->wait_frame_count = cur_frame + 1;
+ }
+
+ pipe_ctx->wait_is_required = true;
+}
+
void dcn10_lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock)
@@ -2664,7 +2786,6 @@ void dcn10_update_visual_confirm_color(struct dc *dc,
struct mpc *mpc = dc->res_pool->mpc;
if (mpc->funcs->set_bg_color) {
- memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
index 42ffd1e1299c..57d30ea225f2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
@@ -50,6 +50,13 @@ void dcn10_optimize_bandwidth(
void dcn10_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
+void dcn10_wait_for_pipe_update_if_needed(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool is_surface_update_only);
+void dcn10_set_wait_for_update_needed_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
void dcn10_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 846c9c51f2d9..858288c3b1ac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -2053,7 +2053,7 @@ void dcn20_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ if (pipe->plane_state) {
ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
@@ -2482,7 +2482,7 @@ bool dcn20_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK)
return false;
/* apply updated bandwidth parameters */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index be26c925fdfa..e68f21fd5f0f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -84,6 +84,20 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
struct dsc_config dsc_cfg;
struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
+ struct dcn_dsc_state dsc_state = {0};
+
+ if (!dsc) {
+ DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+
+ if (dsc->funcs->dsc_read_state) {
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+ if (!dsc_state.dsc_fw_en) {
+ DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+ }
/* Enable DSC hw block */
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index cd0adf72b223..a0b05b9ef660 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1181,6 +1181,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
bool two_pix_per_container = false;
+ struct dce_hwseq *hws = stream->ctx->dc->hwseq;
two_pix_per_container = pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
@@ -1201,7 +1202,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ if ((odm_combine_factor == 2) || (hws->funcs.is_dp_dig_pixel_rate_div_policy &&
+ hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx)))
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 922b8d71cf1a..c814d957305a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -241,11 +241,6 @@ void dcn35_init_hw(struct dc *dc)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
- if (res_pool->dccg->funcs->dccg_root_gate_disable_control) {
- for (i = 0; i < res_pool->pipe_count; i++)
- res_pool->dccg->funcs->dccg_root_gate_disable_control(res_pool->dccg, i, 0);
- }
-
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
@@ -901,12 +896,18 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dccg *dccg = dc->res_pool->dccg;
+
+
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
/* initialize HUBP on power up */
pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
-
+ /*make sure DPPCLK is on*/
+ dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, true);
+ dpp->funcs->dpp_dppclk_control(dpp, false, true);
/* make sure OPP_PIPE_CLOCK_EN = 1 */
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
pipe_ctx->stream_res.opp,
@@ -923,6 +924,7 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
// Program system aperture settings
pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) =\n", __func__, dpp->inst);
if (!pipe_ctx->top_pipe
&& pipe_ctx->plane_state
@@ -938,6 +940,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dccg *dccg = dc->res_pool->dccg;
+
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
@@ -955,7 +959,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->funcs->hubp_clk_cntl(hubp, false);
dpp->funcs->dpp_dppclk_control(dpp, false, false);
-/*to do, need to support both case*/
+ dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, false);
+
hubp->power_gated = true;
hubp->funcs->hubp_reset(hubp);
@@ -967,6 +972,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->top_pipe = NULL;
pipe_ctx->bottom_pipe = NULL;
pipe_ctx->plane_state = NULL;
+ //DC_LOG_DEBUG("%s: dpp_inst(%d)=\n", __func__, dpp->inst);
+
}
void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
@@ -1543,7 +1550,7 @@ static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
const struct dc *dc = pipe_ctx->stream->link->dc;
- if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_enable_dp_tunneling == false)
return false;
// Not necessary for MST configurations
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index 6a82a865209c..a3ccf805bd16 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -168,6 +168,8 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
+ .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+ .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
};
void dcn35_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 902a96940a01..58f2be2a326b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -158,10 +158,12 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
.set_mcm_luts = dcn32_set_mcm_luts,
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
- .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
+ .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+ .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
};
void dcn351_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 5489f3d431f6..c4177a9a662f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -396,6 +396,249 @@ static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ct
}
}
+static void dcn401_set_mcm_location_post_blend(struct dc *dc, struct pipe_ctx *pipe_ctx, bool bPostBlend)
+{
+ struct mpc *mpc = dc->res_pool->mpc;
+ int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+
+ if (!pipe_ctx->plane_state)
+ return;
+
+ mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
+ pipe_ctx->plane_state->mcm_location = (bPostBlend) ?
+ MPCC_MOVABLE_CM_LOCATION_AFTER :
+ MPCC_MOVABLE_CM_LOCATION_BEFORE;
+}
+
+static void dc_get_lut_mode(
+ enum dc_cm2_gpu_mem_layout layout,
+ enum hubp_3dlut_fl_mode *mode,
+ enum hubp_3dlut_fl_addressing_mode *addr_mode)
+{
+ switch (layout) {
+ case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
+ *mode = hubp_3dlut_fl_mode_native_1;
+ *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
+ break;
+ case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
+ *mode = hubp_3dlut_fl_mode_native_2;
+ *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
+ break;
+ case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
+ *mode = hubp_3dlut_fl_mode_transform;
+ *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
+ break;
+ default:
+ *mode = hubp_3dlut_fl_mode_disable;
+ *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
+ break;
+ }
+}
+
+static void dc_get_lut_format(
+ enum dc_cm2_gpu_mem_format dc_format,
+ enum hubp_3dlut_fl_format *format)
+{
+ switch (dc_format) {
+ case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
+ *format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
+ break;
+ case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
+ *format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
+ break;
+ case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
+ *format = hubp_3dlut_fl_format_float_fp1_5_10;
+ break;
+ }
+}
+
+static void dc_get_lut_xbar(
+ enum dc_cm2_gpu_mem_pixel_component_order order,
+ enum hubp_3dlut_fl_crossbar_bit_slice *cr_r,
+ enum hubp_3dlut_fl_crossbar_bit_slice *y_g,
+ enum hubp_3dlut_fl_crossbar_bit_slice *cb_b)
+{
+ switch (order) {
+ case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
+ *cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ break;
+ case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA:
+ *cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ break;
+ }
+}
+
+static void dc_get_lut_width(
+ enum dc_cm2_gpu_mem_size size,
+ enum hubp_3dlut_fl_width *width)
+{
+ switch (size) {
+ case DC_CM2_GPU_MEM_SIZE_333333:
+ *width = hubp_3dlut_fl_width_33;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_171717:
+ *width = hubp_3dlut_fl_width_17;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ *width = hubp_3dlut_fl_width_transformed;
+ break;
+ }
+}
+static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc)
+{
+ if (mpc->funcs->rmcm.update_3dlut_fast_load_select &&
+ mpc->funcs->rmcm.program_lut_read_write_control &&
+ hubp->funcs->hubp_program_3dlut_fl_addr &&
+ mpc->funcs->rmcm.program_bit_depth &&
+ hubp->funcs->hubp_program_3dlut_fl_mode &&
+ hubp->funcs->hubp_program_3dlut_fl_addressing_mode &&
+ hubp->funcs->hubp_program_3dlut_fl_format &&
+ hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
+ mpc->funcs->rmcm.program_bias_scale &&
+ hubp->funcs->hubp_program_3dlut_fl_crossbar &&
+ hubp->funcs->hubp_program_3dlut_fl_width &&
+ mpc->funcs->rmcm.update_3dlut_fast_load_select &&
+ mpc->funcs->rmcm.populate_lut &&
+ mpc->funcs->rmcm.program_lut_mode &&
+ hubp->funcs->hubp_enable_3dlut_fl &&
+ mpc->funcs->rmcm.enable_3dlut_fl)
+ return true;
+
+ return false;
+}
+
+bool dcn401_program_rmcm_luts(
+ struct hubp *hubp,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_cm2_transfer_func_source lut3d_src,
+ struct dc_cm2_func_luts *mcm_luts,
+ struct mpc *mpc,
+ bool lut_bank_a,
+ int mpcc_id)
+{
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ union mcm_lut_params m_lut_params;
+ enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable;
+ enum hubp_3dlut_fl_mode mode;
+ enum hubp_3dlut_fl_addressing_mode addr_mode;
+ enum hubp_3dlut_fl_format format = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
+ enum hubp_3dlut_fl_width width = 0;
+ struct dc *dc = hubp->ctx->dc;
+
+ bool bypass_rmcm_3dlut = false;
+ bool bypass_rmcm_shaper = false;
+
+ dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
+
+ /* 3DLUT */
+ switch (lut3d_src) {
+ case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
+ memset(&m_lut_params, 0, sizeof(m_lut_params));
+ // Don't know what to do in this case.
+ //case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
+ break;
+ case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
+ dc_get_lut_width(mcm_luts->lut3d_data.gpu_mem_params.size, &width);
+ if (!dc_is_rmcm_3dlut_supported(hubp, mpc) ||
+ !mpc->funcs->rmcm.is_config_supported(width))
+ return false;
+
+ //0. disable fl on mpc
+ mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, 0xF);
+
+ //1. power down the block
+ mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, false);
+
+ //2. program RMCM
+ //2a. 3dlut reg programming
+ mpc->funcs->rmcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a,
+ (!bypass_rmcm_3dlut) && lut3d_xable != MCM_LUT_DISABLE, mpcc_id);
+
+ hubp->funcs->hubp_program_3dlut_fl_addr(hubp,
+ mcm_luts->lut3d_data.gpu_mem_params.addr);
+
+ mpc->funcs->rmcm.program_bit_depth(mpc,
+ mcm_luts->lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
+
+ // setting native or transformed mode,
+ dc_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout, &mode, &addr_mode);
+
+ //these program the mcm 3dlut
+ hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
+
+ hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
+
+ //seems to be only for the MCM
+ dc_get_lut_format(mcm_luts->lut3d_data.gpu_mem_params.format_params.format, &format);
+ hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
+
+ mpc->funcs->rmcm.program_bias_scale(mpc,
+ mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale,
+ mpcc_id);
+ hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
+ mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale);
+
+ dc_get_lut_xbar(
+ mcm_luts->lut3d_data.gpu_mem_params.component_order,
+ &crossbar_bit_slice_cr_r,
+ &crossbar_bit_slice_y_g,
+ &crossbar_bit_slice_cb_b);
+
+ hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
+ crossbar_bit_slice_cr_r,
+ crossbar_bit_slice_y_g,
+ crossbar_bit_slice_cb_b);
+
+ mpc->funcs->rmcm.program_3dlut_size(mpc, width, mpcc_id);
+
+ mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
+
+ //2b. shaper reg programming
+ memset(&m_lut_params, 0, sizeof(m_lut_params));
+
+ if (mcm_luts->shaper->type == TF_TYPE_HWPWL) {
+ m_lut_params.pwl = &mcm_luts->shaper->pwl;
+ } else if (mcm_luts->shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ ASSERT(false);
+ cm_helper_translate_curve_to_hw_format(
+ dc->ctx,
+ mcm_luts->shaper,
+ &dpp_base->regamma_params, true);
+ m_lut_params.pwl = &dpp_base->regamma_params;
+ }
+ if (m_lut_params.pwl) {
+ mpc->funcs->rmcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
+ mpc->funcs->rmcm.program_lut_mode(mpc, !bypass_rmcm_shaper, lut_bank_a, mpcc_id);
+ } else {
+ //RMCM 3dlut won't work without its shaper
+ return false;
+ }
+
+ //3. Select the hubp connected to this RMCM
+ hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
+ mpc->funcs->rmcm.enable_3dlut_fl(mpc, true, mpcc_id);
+
+ //4. power on the block
+ if (m_lut_params.pwl)
+ mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, true);
+
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
void dcn401_populate_mcm_luts(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_cm2_func_luts mcm_luts,
@@ -407,21 +650,39 @@ void dcn401_populate_mcm_luts(struct dc *dc,
struct mpc *mpc = dc->res_pool->mpc;
union mcm_lut_params m_lut_params;
enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
- enum hubp_3dlut_fl_format format;
+ enum hubp_3dlut_fl_format format = 0;
enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_width width;
+ enum hubp_3dlut_fl_width width = 0;
enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
- bool is_17x17x17 = true;
bool rval;
dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
+ //MCM - setting its location (Before/After) blender
+ //set to post blend (true)
+ dcn401_set_mcm_location_post_blend(
+ dc,
+ pipe_ctx,
+ mcm_luts.lut3d_data.mpc_mcm_post_blend);
+
+ //RMCM - 3dLUT+Shaper
+ if (mcm_luts.lut3d_data.rmcm_3dlut_enable) {
+ dcn401_program_rmcm_luts(
+ hubp,
+ pipe_ctx,
+ lut3d_src,
+ &mcm_luts,
+ mpc,
+ lut_bank_a,
+ mpcc_id);
+ }
+
/* 1D LUT */
if (mcm_luts.lut1d_func) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
@@ -442,7 +703,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
}
/* Shaper */
- if (mcm_luts.shaper) {
+ if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
m_lut_params.pwl = &mcm_luts.shaper->pwl;
@@ -454,11 +715,11 @@ void dcn401_populate_mcm_luts(struct dc *dc,
m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
}
if (m_lut_params.pwl) {
- if (mpc->funcs->populate_lut)
- mpc->funcs->populate_lut(mpc, MCM_LUT_SHAPER, m_lut_params, lut_bank_a, mpcc_id);
+ if (mpc->funcs->mcm.populate_lut)
+ mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
+ if (mpc->funcs->program_lut_mode)
+ mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id);
}
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, shaper_xable, lut_bank_a, mpcc_id);
}
/* 3DLUT */
@@ -467,6 +728,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
memset(&m_lut_params, 0, sizeof(m_lut_params));
if (hubp->funcs->hubp_enable_3dlut_fl)
hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
+
if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
if (mpc->funcs->populate_lut)
@@ -476,16 +738,35 @@ void dcn401_populate_mcm_luts(struct dc *dc,
mpcc_id);
}
break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
+ case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
+ switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
+ case DC_CM2_GPU_MEM_SIZE_333333:
+ width = hubp_3dlut_fl_width_33;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_171717:
+ width = hubp_3dlut_fl_width_17;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ width = hubp_3dlut_fl_width_transformed;
+ break;
+ }
+
+ //check for support
+ if (mpc->funcs->mcm.is_config_supported &&
+ !mpc->funcs->mcm.is_config_supported(width))
+ break;
if (mpc->funcs->program_lut_read_write_control)
mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
if (mpc->funcs->program_lut_mode)
mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_3dlut_size)
- mpc->funcs->program_3dlut_size(mpc, is_17x17x17, mpcc_id);
+
if (hubp->funcs->hubp_program_3dlut_fl_addr)
hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
+
+ if (mpc->funcs->mcm.program_bit_depth)
+ mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
+
switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
mode = hubp_3dlut_fl_mode_native_1;
@@ -512,7 +793,6 @@ void dcn401_populate_mcm_luts(struct dc *dc,
switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- default:
format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
break;
case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
@@ -524,37 +804,37 @@ void dcn401_populate_mcm_luts(struct dc *dc,
}
if (hubp->funcs->hubp_program_3dlut_fl_format)
hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
- if (hubp->funcs->hubp_update_3dlut_fl_bias_scale)
+ if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
+ mpc->funcs->mcm.program_bias_scale) {
+ mpc->funcs->mcm.program_bias_scale(mpc,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
+ mpcc_id);
hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
-
- switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
- default:
- crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- break;
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
}
+ //navi 4x has a bug and r and blue are swapped and need to be worked around here in
+ //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
+ dc_get_lut_xbar(
+ mcm_luts.lut3d_data.gpu_mem_params.component_order,
+ &crossbar_bit_slice_cr_r,
+ &crossbar_bit_slice_y_g,
+ &crossbar_bit_slice_cb_b);
+
if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
+ crossbar_bit_slice_cr_r,
crossbar_bit_slice_y_g,
- crossbar_bit_slice_cb_b,
- crossbar_bit_slice_cr_r);
+ crossbar_bit_slice_cb_b);
+
+ if (mpc->funcs->mcm.program_lut_read_write_control)
+ mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
+
+ if (mpc->funcs->mcm.program_3dlut_size)
+ mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
- switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_171717:
- default:
- width = hubp_3dlut_fl_width_17;
- break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
- width = hubp_3dlut_fl_width_transformed;
- break;
- }
- if (hubp->funcs->hubp_program_3dlut_fl_width)
- hubp->funcs->hubp_program_3dlut_fl_width(hubp, width);
if (mpc->funcs->update_3dlut_fast_load_select)
mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
@@ -1980,9 +2260,9 @@ void dcn401_program_pipe(
dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
}
- if (pipe_ctx->update_flags.raw ||
- (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
- pipe_ctx->stream->update_flags.raw)
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
+ pipe_ctx->plane_state->update_flags.raw ||
+ pipe_ctx->stream->update_flags.raw))
dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
@@ -2081,7 +2361,7 @@ void dcn401_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ if (pipe->plane_state) {
if (pipe->plane_state->triplebuffer_flips)
BREAK_TO_DEBUGGER();
@@ -2371,7 +2651,7 @@ bool dcn401_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK)
return false;
/* apply updated bandwidth parameters */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 781cf0efccc6..ce65b4f6c672 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -109,4 +109,12 @@ void dcn401_detect_pipe_changes(
void dcn401_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
+bool dcn401_program_rmcm_luts(
+ struct hubp *hubp,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_cm2_transfer_func_source lut3d_src,
+ struct dc_cm2_func_luts *mcm_luts,
+ struct mpc *mpc,
+ bool lut_bank_a,
+ int mpcc_id);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index c8b5ed834579..3a0795045bc6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -195,6 +195,8 @@ enum block_sequence_func {
DMUB_SUBVP_SAVE_SURF_ADDR,
HUBP_WAIT_FOR_DCC_META_PROP,
DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
+ /* This must be the last value in this enum, add new ones above */
+ HWSS_BLOCK_SEQUENCE_FUNC_COUNT
};
struct block_sequence {
@@ -202,6 +204,8 @@ struct block_sequence {
enum block_sequence_func func;
};
+#define MAX_HWSS_BLOCK_SEQUENCE_SIZE (HWSS_BLOCK_SEQUENCE_FUNC_COUNT * MAX_PIPES)
+
struct hw_sequencer_funcs {
void (*hardware_release)(struct dc *dc);
/* Embedded Display Related */
@@ -534,13 +538,13 @@ void set_drr_and_clear_adjust_pending(
struct drr_params *params);
void hwss_execute_sequence(struct dc *dc,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
int num_steps);
void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
struct dc_stream_status *stream_status,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 22a5d4a03c98..09bc65c2fa23 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -183,6 +183,8 @@ struct hwseq_private_funcs {
struct dc_cm2_func_luts mcm_luts,
bool lut_bank_a);
void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx);
+ void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only);
+ void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index b5afd8c3103d..f3696143590c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -26,6 +26,8 @@
#ifndef _CORE_STATUS_H_
#define _CORE_STATUS_H_
+#include "dc_hw_types.h"
+
enum dc_status {
DC_OK = 1,
@@ -56,6 +58,7 @@ enum dc_status {
DC_NO_LINK_ENC_RESOURCE = 26,
DC_FAIL_DP_PAYLOAD_ALLOCATION = 27,
DC_FAIL_DP_LINK_BANDWIDTH = 28,
+ DC_FAIL_HW_CURSOR_SUPPORT = 29,
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d0021f25f3d8..0cf349cafb3e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -65,6 +65,7 @@ struct resource_pool;
struct dc_state;
struct resource_context;
struct clk_bw_params;
+struct dc_mcache_params;
struct resource_funcs {
enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
@@ -78,8 +79,7 @@ struct resource_funcs {
/* Create a minimal link encoder object with no dc_link object
* associated with it. */
struct link_encoder *(*link_enc_create_minimal)(struct dc_context *ctx, enum engine_id eng_id);
-
- bool (*validate_bandwidth)(
+ enum dc_status (*validate_bandwidth)(
struct dc *dc,
struct dc_state *context,
bool fast_validate);
@@ -218,6 +218,11 @@ struct resource_funcs {
int (*get_power_profile)(const struct dc_state *context);
unsigned int (*get_det_buffer_size)(const struct dc_state *context);
unsigned int (*get_vstartup_for_pipe)(struct pipe_ctx *pipe_ctx);
+ unsigned int (*get_max_hw_cursor_size)(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream);
+ bool (*program_mcache_pipe_config)(struct dc_state *context,
+ const struct dc_mcache_params *mcache_params);
};
struct audio_support{
@@ -382,7 +387,9 @@ struct link_resource {
struct link_config {
struct dc_link_settings dp_link_settings;
+ struct dc_tunnel_settings dp_tunnel_settings;
};
+
union pipe_update_flags {
struct {
uint32_t enable : 1;
@@ -480,6 +487,10 @@ struct pipe_ctx {
struct pixel_rate_divider pixel_rate_divider;
/* pixels borrowed from hblank to hactive */
uint8_t hblank_borrow;
+ /* next vupdate */
+ uint32_t next_vupdate;
+ uint32_t wait_frame_count;
+ bool wait_is_required;
};
/* Data used for dynamic link encoder assignment.
@@ -507,7 +518,7 @@ struct resource_context {
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
bool is_mpc_3dlut_acquired[MAX_PIPES];
- /* solely used for build scalar data in dml2 */
+ /* used to build scalar data in dml2 and for edp backlight programming */
struct pipe_ctx temp_pipe;
};
@@ -630,7 +641,7 @@ struct dc_state {
*/
struct bw_context bw_ctx;
- struct block_sequence block_sequence[100];
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE];
unsigned int block_sequence_steps;
struct dc_dmub_cmd dc_dmub_cmd[10];
unsigned int dmub_cmd_count;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 221645c023b5..bac8febad69a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -199,6 +199,7 @@ enum dentist_divider_range {
CLK_SR_DCN35(CLK1_CLK4_ALLOW_DS), \
CLK_SR_DCN35(CLK1_CLK5_ALLOW_DS), \
CLK_SR_DCN35(CLK5_spll_field_8), \
+ CLK_SR_DCN35(CLK6_spll_field_8), \
SR(DENTIST_DISPCLK_CNTL), \
#define CLK_COMMON_MASK_SH_LIST_DCN32(mask_sh) \
@@ -307,7 +308,7 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK4_ALLOW_DS;
uint32_t CLK1_CLK5_ALLOW_DS;
uint32_t CLK5_spll_field_8;
-
+ uint32_t CLK6_spll_field_8;
};
struct clk_mgr_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 3a89cc0cffc1..6e303b81bfb0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -967,23 +967,6 @@ struct mpc_funcs {
*/
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
- /**
- * @get_3dlut_fast_load_status:
- *
- * Get 3D LUT fast load status and reference them with done, soft_underflow and hard_underflow pointers.
- *
- * Parameters:
- * - [in/out] mpc - MPC context.
- * - [in] mpcc_id
- * - [in/out] done
- * - [in/out] soft_underflow
- * - [in/out] hard_underflow
- *
- * Return:
- *
- * void
- */
- void (*get_3dlut_fast_load_status)(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow);
/**
* @populate_lut:
@@ -1054,6 +1037,35 @@ struct mpc_funcs {
* void
*/
void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
+
+ struct {
+ void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
+ void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
+ void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
+ bool (*is_config_supported)(uint32_t width);
+ void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
+ bool lut_bank_a, bool enabled, int mpcc_id);
+
+ void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params,
+ bool lut_bank_a, int mpcc_id);
+ } mcm;
+
+ struct {
+ void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id);
+ void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
+ void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
+ bool lut_bank_a, bool enabled, int mpcc_id);
+ void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_XABLE xable,
+ bool lut_bank_a, int mpcc_id);
+ void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
+ void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
+ void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
+ bool (*is_config_supported)(uint32_t width);
+
+ void (*power_on_shaper_3dlut)(struct mpc *mpc, uint32_t mpcc_id, bool power_on);
+ void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params,
+ bool lut_bank_a, int mpcc_id);
+ } rmcm;
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 7f371cbb35cd..0d5a8358a778 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -68,6 +68,7 @@ struct optc {
int pstate_keepout;
struct dc_crtc_timing orginal_patched_timing;
enum signal_type signal;
+ uint32_t max_frame_count;
};
void optc1_read_otg_state(struct timing_generator *optc, struct dcn_otg_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index 2948a696ee12..7d16351bba99 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -207,6 +207,9 @@ struct link_service {
bool (*dp_decide_link_settings)(
struct dc_stream_state *stream,
struct dc_link_settings *link_setting);
+ void (*dp_decide_tunnel_settings)(
+ struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting);
enum dp_link_encoding (*mst_decide_link_encoding_format)(
const struct dc_link *link);
bool (*edp_decide_link_settings)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index a402df225a76..26cb1459b743 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -508,6 +508,10 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
initial_val, \
n, __VA_ARGS__)
+#define IX_REG_SET_SYNC(index, init_value, f1, v1) \
+ IX_REG_SET_N_SYNC(index, 1, init_value, \
+ FN(reg, f1), v1)
+
#define IX_REG_SET_2_SYNC(index, init_value, f1, v1, f2, v2) \
IX_REG_SET_N_SYNC(index, 2, init_value, \
FN(reg, f1), v1,\
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 9458187b834d..a890f581f4e8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -32,6 +32,7 @@
#define MEMORY_TYPE_MULTIPLIER_CZ 4
#define MEMORY_TYPE_HBM 2
+#define MAX_MCACHES 8
#define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0)
@@ -65,6 +66,13 @@ struct resource_straps {
uint32_t audio_stream_number;
};
+struct dc_mcache_allocations {
+ int global_mcache_ids_plane0[MAX_MCACHES + 1];
+ int global_mcache_ids_plane1[MAX_MCACHES + 1];
+ int global_mcache_ids_mall_plane0[MAX_MCACHES + 1];
+ int global_mcache_ids_mall_plane1[MAX_MCACHES + 1];
+};
+
struct resource_create_funcs {
void (*read_dce_straps)(
struct dc_context *ctx, struct resource_straps *straps);
@@ -628,8 +636,6 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
struct dc_state *context,
struct pipe_ctx *pipe_ctx);
-bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream);
-
/* Get hw programming parameters container from pipe context
* @pipe_ctx: pipe context
* @dscl_prog_data: struct to hold programmable hw reg values
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 953f4a4dacad..33ce470e4c88 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -37,36 +37,9 @@
#include "ivsrcid/ivsrcid_vislands30.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
index 2c72074310c7..d777b85e70da 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
@@ -46,36 +46,9 @@
#include "dc_types.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- DC_HPD1_INT_STATUS,
- DC_HPD1_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- DC_HPD1_INT_CONTROL,
- DC_HPD1_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd1_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -391,5 +364,3 @@ struct irq_service *dal_irq_service_dce60_create(
dce60_irq_construct(irq_service, init_data);
return irq_service;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index 49317934ef4f..3a9163acb49b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -37,36 +37,9 @@
#include "dc_types.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- DC_HPD1_INT_STATUS,
- DC_HPD1_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- DC_HPD1_INT_CONTROL,
- DC_HPD1_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd1_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -303,5 +276,3 @@ struct irq_service *dal_irq_service_dce80_create(
dce80_irq_construct(irq_service, init_data);
return irq_service;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index 9ca28565a9d1..4ce9edd16344 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_servic
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index 916f0c974637..5847af0e66cb 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn20(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index 1d61d475d36f..6417011d2246 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -80,36 +80,9 @@ static enum dc_irq_source to_dal_irq_source_dcn201(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 42cdfe6c3538..71d2f065140b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -132,36 +132,9 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic
return DC_IRQ_SOURCE_INVALID;
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
index a443a8abb1ea..2a4080bdcf6b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
@@ -139,36 +139,9 @@ static enum dc_irq_source to_dal_irq_source_dcn30(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -447,4 +420,3 @@ struct irq_service *dal_irq_service_dcn30_create(
dcn30_irq_construct(irq_service, init_data);
return irq_service;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
index 8ffc7e2c681a..624f1ac309f8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
@@ -126,26 +126,9 @@ static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_servi
}
}
-static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
index 262bb8b74b15..137caffae916 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
@@ -77,26 +77,9 @@ static enum dc_irq_source to_dal_irq_source_dcn303(struct irq_service *irq_servi
}
}
-static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
index 53e78ae7eecf..921cb167d920 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
@@ -128,36 +128,9 @@ static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_servic
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
index e0563e880432..0118fd6e5db0 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
@@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_servi
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
index 2ef22299101a..adebfc888618 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
@@ -135,36 +135,9 @@ static enum dc_irq_source to_dal_irq_source_dcn315(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
index f0ac0aeeac51..e9e315c75d76 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
@@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn32(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -191,6 +164,16 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.ack = NULL
};
+static struct irq_source_info_funcs vline1_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline2_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
@@ -259,6 +242,13 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
@@ -270,14 +260,6 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &vupdate_no_lock_irq_info_funcs\
}
-#define vblank_int_entry(reg_num)\
- [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
- IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
-}
-
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
@@ -285,6 +267,20 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
+#define vline1_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_CLEAR),\
+ .funcs = &vline1_irq_info_funcs\
+ }
+#define vline2_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE2 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_CLEAR),\
+ .funcs = &vline2_irq_info_funcs\
+ }
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
@@ -387,21 +383,29 @@ irq_source_info_dcn32[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_no_lock_int_entry(0),
- vupdate_no_lock_int_entry(1),
- vupdate_no_lock_int_entry(2),
- vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
+ [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
+ dmub_outbox_int_entry(),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
- [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
- dmub_outbox_int_entry(),
+ vline1_int_entry(0),
+ vline1_int_entry(1),
+ vline1_int_entry(2),
+ vline1_int_entry(3),
+ vline2_int_entry(0),
+ vline2_int_entry(1),
+ vline2_int_entry(2),
+ vline2_int_entry(3)
};
static const struct irq_service_funcs irq_service_funcs_dcn32 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
index ea8c271171bc..79e5e8c137ca 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
@@ -127,36 +127,9 @@ static enum dc_irq_source to_dal_irq_source_dcn35(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
index 7ec8e0de2f01..163b8ee9ebf7 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
@@ -106,36 +106,9 @@ static enum dc_irq_source to_dal_irq_source_dcn351(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
index ea958628f8b8..f716ab0fd30e 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
@@ -105,36 +105,9 @@ static enum dc_irq_source to_dal_irq_source_dcn36(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
index b43c9524b0de..fd9bb1950c20 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
@@ -109,36 +109,9 @@ static enum dc_irq_source to_dal_irq_source_dcn401(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -171,6 +144,16 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.ack = NULL
};
+static struct irq_source_info_funcs vline1_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline2_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
@@ -239,6 +222,13 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
@@ -250,13 +240,6 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &vupdate_no_lock_irq_info_funcs\
}
-#define vblank_int_entry(reg_num)\
- [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
- IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
- }
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
@@ -264,6 +247,20 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
+#define vline1_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_CLEAR),\
+ .funcs = &vline1_irq_info_funcs\
+ }
+#define vline2_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE2 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_CLEAR),\
+ .funcs = &vline2_irq_info_funcs\
+ }
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
@@ -364,21 +361,29 @@ irq_source_info_dcn401[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_no_lock_int_entry(0),
- vupdate_no_lock_int_entry(1),
- vupdate_no_lock_int_entry(2),
- vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
+ [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
+ dmub_outbox_int_entry(),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
- [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
- dmub_outbox_int_entry(),
+ vline1_int_entry(0),
+ vline1_int_entry(1),
+ vline1_int_entry(2),
+ vline1_int_entry(3),
+ vline2_int_entry(0),
+ vline2_int_entry(1),
+ vline2_int_entry(2),
+ vline2_int_entry(3),
};
static const struct irq_service_funcs irq_service_funcs_dcn401 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index eca3d7ee7e4e..b595a11c5eaf 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -41,6 +41,16 @@
#include "reg_helper.h"
#include "irq_service.h"
+//HPD0_DC_HPD_INT_STATUS
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+//HPD1_DC_HPD_INT_STATUS
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED_MASK 0x10
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED__SHIFT 0x4
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK 0x100
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY__SHIFT 0x8
#define CTX \
@@ -177,3 +187,57 @@ enum dc_irq_source dal_irq_service_to_irq_source(
src_id,
ext_id);
}
+
+bool hpd0_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+bool hpd1_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ DC_HPD1_INT_STATUS,
+ DC_HPD1_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ DC_HPD1_INT_CONTROL,
+ DC_HPD1_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
index b178f85944cd..bbcef3d2fe33 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
@@ -82,4 +82,12 @@ void dal_irq_service_set_generic(
const struct irq_source_info *info,
bool enable);
+bool hpd0_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
+bool hpd1_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index 110f656d43ae..a2f7b933bebf 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -161,6 +161,20 @@ enum dc_irq_source {
DC_IRQ_SOURCE_DPCX_TX_PHYE,
DC_IRQ_SOURCE_DPCX_TX_PHYF,
+ DC_IRQ_SOURCE_DC1_VLINE2,
+ DC_IRQ_SOURCE_DC2_VLINE2,
+ DC_IRQ_SOURCE_DC3_VLINE2,
+ DC_IRQ_SOURCE_DC4_VLINE2,
+ DC_IRQ_SOURCE_DC5_VLINE2,
+ DC_IRQ_SOURCE_DC6_VLINE2,
+
+ DC_IRQ_SOURCE_DCI2C_RR_DDC1,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC2,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC3,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC4,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC5,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC6,
+
DAL_IRQ_SOURCES_NUMBER
};
@@ -170,6 +184,8 @@ enum irq_type
IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1,
IRQ_TYPE_VLINE0 = DC_IRQ_SOURCE_DC1_VLINE0,
+ IRQ_TYPE_VLINE1 = DC_IRQ_SOURCE_DC1_VLINE1,
+ IRQ_TYPE_VLINE2 = DC_IRQ_SOURCE_DC1_VLINE2,
IRQ_TYPE_DCUNDERFLOW = DC_IRQ_SOURCE_DC1UNDERFLOW,
};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index cc9191a5c9e6..9655e6fa53a4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -611,6 +611,7 @@ static bool detect_dp(struct dc_link *link,
link->dpcd_caps.dongle_type = sink_caps->dongle_type;
link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one;
link->dpcd_caps.dpcd_rev.raw = 0;
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = 0;
}
return true;
@@ -1007,21 +1008,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
- /*
- * If this is DP over USB4 link then we need to:
- * - Enable BW ALLOC support on DPtx if applicable
- */
- if (dc->config.usb4_bw_alloc_support) {
- if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) {
- /* update with non reduced link cap if bw allocation mode is supported */
- if (link->dpia_bw_alloc_config.nrd_max_link_rate &&
- link->dpia_bw_alloc_config.nrd_max_lane_count) {
- link->reported_link_cap.link_rate =
- link->dpia_bw_alloc_config.nrd_max_link_rate;
- link->reported_link_cap.lane_count =
- link->dpia_bw_alloc_config.nrd_max_lane_count;
- }
- }
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) {
+ if (link_dpia_enable_usb4_dp_bw_alloc_mode(link) == false)
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc = false;
}
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 268626e73c54..273a3be6d593 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -148,6 +148,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
{
struct pipe_ctx *pipes[MAX_PIPES];
+ struct dc_stream_state *streams[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
uint8_t count;
int i;
@@ -160,10 +161,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
+ /* The subsequent call to dc_commit_updates_for_stream for a full update
+ * will release the current state and swap to a new state. Releasing the
+ * current state results in the stream pointers in the pipe_ctx structs
+ * to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream.
+ */
+ for (i = 0; i < count; i++)
+ streams[i] = pipes[i]->stream;
+
for (i = 0; i < count; i++) {
- stream_update.stream = pipes[i]->stream;
+ stream_update.stream = streams[i];
dc_commit_updates_for_stream(link->ctx->dc, NULL, 0,
- pipes[i]->stream, &stream_update,
+ streams[i], &stream_update,
state);
}
@@ -2365,7 +2374,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation)
deallocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
@@ -2433,7 +2442,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (link->connector_signal == SIGNAL_TYPE_EDP && dc->debug.psp_disabled_wa) {
/* reset internal save state to default since eDP is off */
enum dp_panel_mode panel_mode = dp_get_panel_mode(pipe_ctx->stream->link);
- /* since current psp not loaded, we need to reset it to default*/
+ /* since current psp not loaded, we need to reset it to default */
link->panel_mode = panel_mode;
}
}
@@ -2611,7 +2620,7 @@ void link_set_dpms_on(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_set_hblank_reduction_on_rx(pipe_ctx);
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation)
allocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index f6b6b19e7481..1a04f4b74585 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -156,6 +156,7 @@ static void construct_link_service_dp_capability(struct link_service *link_srv)
link_srv->dp_get_encoding_format = link_dp_get_encoding_format;
link_srv->dp_should_enable_fec = dp_should_enable_fec;
link_srv->dp_decide_link_settings = link_decide_link_settings;
+ link_srv->dp_decide_tunnel_settings = link_decide_dp_tunnel_settings;
link_srv->mst_decide_link_encoding_format =
mst_decide_link_encoding_format;
link_srv->edp_decide_link_settings = edp_decide_link_settings;
@@ -464,6 +465,7 @@ static bool construct_phy(struct dc_link *link,
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_read_request = DC_IRQ_SOURCE_INVALID;
link->link_status.dpcd_caps = &link->dpcd_caps;
link->dc = init_params->dc;
@@ -514,6 +516,9 @@ static bool construct_phy(struct dc_link *link,
case CONNECTOR_ID_HDMI_TYPE_A:
link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ if (link->hpd_gpio)
+ link->irq_source_read_request =
+ dal_irq_get_read_request(link->hpd_gpio);
break;
case CONNECTOR_ID_SINGLE_LINK_DVID:
case CONNECTOR_ID_SINGLE_LINK_DVII:
@@ -653,7 +658,7 @@ static bool construct_phy(struct dc_link *link,
}
/* Look for device tag that matches connector signal,
- * CRT for rgb, LCD for other supported signal tyes
+ * CRT for rgb, LCD for other supported signal types
*/
if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
link->device_tag.dev_id))
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 21ee0d96c9d4..8f79881ad9f1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -158,6 +158,14 @@ uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count)
return 0; // invalid value
}
+uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count)
+{
+ /* Calculate offset for LTTPR closest to DPTX which is highest in the chain
+ * Offset is 0 for single LTTPR cases as base LTTPR DPCD addresses target LTTPR 1
+ */
+ return DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE * (lttpr_count - 1);
+}
+
uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
{
switch (bw) {
@@ -2013,11 +2021,9 @@ static bool retrieve_link_cap(struct dc_link *link)
sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
/* Read DP tunneling information. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- status = dpcd_get_tunneling_device_data(link);
- if (status != DC_OK)
- dm_error("%s: Read DP tunneling device data failed.\n", __func__);
- }
+ status = dpcd_get_tunneling_device_data(link);
+ if (status != DC_OK)
+ dm_error("%s: Read DP tunneling device data failed.\n", __func__);
retrieve_cable_id(link);
dpcd_write_cable_id_to_dprx(link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
index 0ce0af3ddbeb..940b147cc5d4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -48,6 +48,9 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link);
/* Convert PHY repeater count read from DPCD uint8_t. */
uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count);
+/* Calculate embedded LTTPR address offset for vendor-specific behaviour */
+uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count);
+
bool dp_is_sink_present(struct dc_link *link);
bool dp_is_lttpr_present(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 0d123e647652..22bfdced64ab 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -62,6 +62,36 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (status != DC_OK)
goto err;
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
+ dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling == false)
+ goto err;
+
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
+ dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
+ dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc) {
+ status = core_link_read_dpcd(link, USB4_DRIVER_BW_CAPABILITY,
+ dpcd_dp_tun_data, 1);
+
+ if (status != DC_OK)
+ goto err;
+
+ link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = dpcd_dp_tun_data[0];
+ }
+
+ DC_LOG_DEBUG("%s: Link[%d] DP tunneling support (RouterId=%d AdapterId=%d) "
+ "DPIA_BW_Alloc_support=%d "
+ "CM_BW_Alloc_support=%d ",
+ __func__, link->link_index,
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id,
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.bits.dpia_num,
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc,
+ link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
+
status = core_link_read_dpcd(
link,
DP_USB4_ROUTER_TOPOLOGY_ID,
@@ -71,13 +101,6 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (status != DC_OK)
goto err;
- link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
- dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
- dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
- dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
-
for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
@@ -92,6 +115,7 @@ bool dpia_query_hpd_status(struct dc_link *link)
/* prepare QUERY_HPD command */
cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
+ cmd.query_hpd.header.payload_bytes = sizeof(cmd.query_hpd.data);
cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
@@ -119,3 +143,20 @@ bool dpia_query_hpd_status(struct dc_link *link)
return link->hpd_status;
}
+void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting)
+{
+ struct dc_link *link = stream->link;
+
+ memset(dp_tunnel_setting, 0, sizeof(*dp_tunnel_setting));
+
+ if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT) || (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
+ dp_tunnel_setting->should_enable_dp_tunneling =
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling;
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support)
+ dp_tunnel_setting->should_use_dp_bw_allocation = true;
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
index 363f45a1a964..a61edfc9ca7a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
@@ -38,4 +38,10 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
* Returns true if HPD high.
*/
bool dpia_query_hpd_status(struct dc_link *link);
+
+/* Decide the DP tunneling settings based on the DPCD capabilities
+ */
+void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting);
+
#endif /* __DC_LINK_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index a254ead2f7e8..3af7564a84f1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -46,9 +46,10 @@
*/
static bool link_dp_is_bw_alloc_available(struct dc_link *link)
{
- return (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA
- && link->hpd_status
- && link->dpia_bw_alloc_config.bw_alloc_enabled);
+ return (link && link->hpd_status
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
}
static void reset_bw_alloc_struct(struct dc_link *link)
@@ -141,7 +142,7 @@ static int get_non_reduced_max_lane_count(struct dc_link *link)
* granuality, Driver_ID, CM_Group, & populate the BW allocation structs
* for host router and dpia
*/
-static void init_usb4_bw_struct(struct dc_link *link)
+static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link)
{
reset_bw_alloc_struct(link);
@@ -282,49 +283,26 @@ static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw)
// ------------------------------------------------------------------
// PUBLIC FUNCTIONS
// ------------------------------------------------------------------
-bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
+bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
{
bool ret = false;
- uint8_t response = 0,
- bw_support_dpia = 0,
- bw_support_cm = 0;
+ uint8_t val;
- if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status))
- goto out;
+ if (link->hpd_status) {
+ val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
- if (core_link_read_dpcd(
- link,
- DP_TUNNELING_CAPABILITIES,
- &response,
- sizeof(uint8_t)) == DC_OK)
- bw_support_dpia = (response >> 7) & 1;
-
- if (core_link_read_dpcd(
- link,
- USB4_DRIVER_BW_CAPABILITY,
- &response,
- sizeof(uint8_t)) == DC_OK)
- bw_support_cm = (response >> 7) & 1;
-
- /* Send request acknowledgment to Turn ON DPTX support */
- if (bw_support_cm && bw_support_dpia) {
-
- response = 0x80;
- if (core_link_write_dpcd(
- link,
- DPTX_BW_ALLOCATION_MODE_CONTROL,
- &response,
- sizeof(uint8_t)) != DC_OK) {
- DC_LOG_DEBUG("%s: FAILURE Enabling DPtx BW Allocation Mode Support for link(%d)\n",
- __func__, link->link_index);
- } else {
- // SUCCESS Enabled DPtx BW Allocation Mode Support
- DC_LOG_DEBUG("%s: SUCCESS Enabling DPtx BW Allocation Mode Support for link(%d)\n",
- __func__, link->link_index);
+ if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
+ DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
+
+ retrieve_usb4_dp_bw_allocation_info(link);
+
+ if (link->dpia_bw_alloc_config.nrd_max_link_rate && link->dpia_bw_alloc_config.nrd_max_lane_count) {
+ link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
+ link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
+ }
- ret = true;
- init_usb4_bw_struct(link);
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+ ret = true;
/*
* During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
@@ -332,11 +310,12 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
* to make the CM to release preallocation and update estimated BW correctly for
* all DPIAs per host router
*/
+ // TODO: Zero allocation can be removed once the MSFT CM fix has been released
link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
- }
+ } else
+ DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
}
-out:
return ret;
}
@@ -378,7 +357,8 @@ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
*/
void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
{
- if (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dpia_bw_alloc_config.bw_alloc_enabled) {
+ if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpia_bw_alloc_config.bw_alloc_enabled) {
//1. Hot Plug
if (link->hpd_status && peak_bw > 0) {
// If DP over USB4 then we need to check BW allocation
@@ -401,7 +381,7 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
if (link_dp_is_bw_alloc_available(link))
link_dpia_send_bw_alloc_request(link, req_bw);
else
- DC_LOG_DEBUG("%s: Not able to send the BW Allocation request", __func__);
+ DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__);
}
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 6df9b946b00f..801965b5f9a4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -43,13 +43,13 @@ enum bw_type {
};
/*
- * Enable BW Allocation Mode Support from the DP-Tx side
+ * Enable USB4 DP BW allocation mode
*
* @link: pointer to the dc_link struct instance
*
* return: SUCCESS or FAILURE
*/
-bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
+bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link);
/*
* Allocates only what the stream needs for bw, so if:
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 5be00e4ce10b..693477413347 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -229,6 +229,10 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
link->replay_settings.config.replay_error_status.raw |= replay_error_status.raw;
+ /* Increment desync error counter if a desync error is detected */
+ if (replay_configuration.bits.DESYNC_ERROR_STATUS)
+ link->replay_settings.replay_desync_error_fail_count++;
+
if (link->replay_settings.config.force_disable_desync_error_check)
return;
@@ -240,9 +244,6 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
&replay_configuration.raw,
sizeof(replay_configuration.raw));
- /* Update desync error counter */
- link->replay_settings.replay_desync_error_fail_count++;
-
/* Acknowledge and clear error bits */
dm_helpers_dp_write_dpcd(
link->ctx,
@@ -351,7 +352,7 @@ enum dc_status dp_read_hpd_rx_irq_data(
irq_data->raw,
DP_SINK_STATUS - DP_SINK_COUNT + 1);
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) {
retval = core_link_read_dpcd(
link, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
&irq_data->bytes.link_service_irq_esi0.raw, 1);
@@ -520,7 +521,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
dp_trace_link_loss_increment(link);
}
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) {
if (hpd_irq_dpcd_data.bytes.link_service_irq_esi0.bits.DP_LINK_TUNNELING_IRQ)
dp_handle_tunneling_irq(link);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index ef358afdfb65..2dc1a660e504 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -785,7 +785,6 @@ void override_training_settings(
lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
-
}
enum dc_dp_training_pattern decide_cr_training_pattern(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
index 34d2e097ca2e..66d0fb1b9b9d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -35,6 +35,17 @@
#define DC_LOGGER \
link->ctx->logger
+static void get_default_8b_10b_lttpr_aux_rd_interval(
+ union training_aux_rd_interval *training_rd_interval)
+{
+ /* LTTPR are required to program DPCD 0000Eh to 0x4 (16ms) upon AUX
+ * read reply to this register. Since old sinks with DPCD rev 1.1
+ * and earlier may not support this register, assume the mandatory
+ * value is programmed by the LTTPR to avoid AUX timeout issues.
+ */
+ training_rd_interval->raw = 0x4;
+}
+
static int32_t get_cr_training_aux_rd_interval(struct dc_link *link,
const struct dc_link_settings *link_settings,
enum lttpr_mode lttpr_mode)
@@ -43,17 +54,22 @@ static int32_t get_cr_training_aux_rd_interval(struct dc_link *link,
uint32_t wait_in_micro_secs = 100;
memset(&training_rd_interval, 0, sizeof(training_rd_interval));
- if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
- link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
- core_link_read_dpcd(
- link,
- DP_TRAINING_AUX_RD_INTERVAL,
- (uint8_t *)&training_rd_interval,
- sizeof(training_rd_interval));
- if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT)
- wait_in_micro_secs = 400;
- if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
- wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12)
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ else if (dp_is_lttpr_present(link))
+ get_default_8b_10b_lttpr_aux_rd_interval(&training_rd_interval);
+
+ if (training_rd_interval.raw != 0) {
+ if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT)
+ wait_in_micro_secs = 400;
+ if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+ wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ }
}
return wait_in_micro_secs;
}
@@ -71,13 +87,15 @@ static uint32_t get_eq_training_aux_rd_interval(
DP_128B132B_TRAINING_AUX_RD_INTERVAL,
(uint8_t *)&training_rd_interval,
sizeof(training_rd_interval));
- } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
- link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
- core_link_read_dpcd(
- link,
- DP_TRAINING_AUX_RD_INTERVAL,
- (uint8_t *)&training_rd_interval,
- sizeof(training_rd_interval));
+ } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12)
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ else if (dp_is_lttpr_present(link))
+ get_default_8b_10b_lttpr_aux_rd_interval(&training_rd_interval);
}
switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) {
@@ -124,6 +142,14 @@ void decide_8b_10b_training_settings(
lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting, lt_settings->lttpr_mode);
dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
+ /* Some embedded LTTPRs rely on receiving TPS2 before LT to interop reliably with sensitive VGA dongles
+ * This allows these LTTPRs to minimize freq/phase and skew variation during lock and deskew sequences
+ */
+ if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) ==
+ AMD_EXT_DISPLAY_PATH_CAPS__DP_EARLY_8B10B_TPS2) {
+ lt_settings->lttpr_early_tps2 = true;
+ }
}
enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
@@ -155,6 +181,42 @@ enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
return LTTPR_MODE_NON_LTTPR;
}
+static void set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t lttpr_count)
+{
+ /* Vendor-specific LTTPR early TPS2 sequence:
+ * 1. Output TPS2
+ * 2. Wait 400us
+ * 3. Set link settings as usual
+ * 4. Write TPS1 to DP_TRAINING_PATTERN_SET_PHY_REPEATERx targeting LTTPR closest to host
+ * 5. Wait 1ms
+ * 6. Begin link training as usual
+ * */
+
+ uint32_t closest_lttpr_address_offset = dp_get_closest_lttpr_offset(lttpr_count);
+
+ union dpcd_training_pattern dpcd_pattern = {0};
+
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = 1;
+ dpcd_pattern.v1_4.SCRAMBLING_DISABLE = 1;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS2. Wait 400us.\n", __func__);
+
+ dp_set_hw_training_pattern(link, link_res, DP_TRAINING_PATTERN_SEQUENCE_2, DPRX);
+
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
+
+ udelay(400);
+
+ dpcd_set_link_settings(link, lt_settings);
+
+ core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + closest_lttpr_address_offset, &dpcd_pattern.raw, 1);
+
+ udelay(1000);
+ }
+
enum link_training_result perform_8b_10b_clock_recovery_sequence(
struct dc_link *link,
const struct link_resource *link_res,
@@ -365,7 +427,7 @@ enum link_training_result dp_perform_8b_10b_link_training(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
- uint8_t repeater_cnt;
+ uint8_t repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
uint8_t repeater_id;
uint8_t lane = 0;
@@ -373,14 +435,16 @@ enum link_training_result dp_perform_8b_10b_link_training(
start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
/* 1. set link rate, lane count and spread. */
- dpcd_set_link_settings(link, lt_settings);
+ if (lt_settings->lttpr_early_tps2)
+ set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(link, link_res, lt_settings, repeater_cnt);
+ else
+ dpcd_set_link_settings(link, lt_settings);
if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
*/
- repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
repeater_id--) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 1e4adbc764ea..da74c2b5854f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -524,7 +524,7 @@ bool edp_set_backlight_level(const struct dc_link *link,
struct dc *dc = link->ctx->dc;
uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16;
uint32_t frame_ramp = backlight_level_params->frame_ramp;
- DC_LOGGER_INIT(link->ctx->logger);
+
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
backlight_pwm_u16_16, backlight_pwm_u16_16);
@@ -1022,6 +1022,9 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
&alpm_config.raw,
sizeof(alpm_config.raw));
}
+
+ link->replay_settings.config.replay_video_conferencing_optimization_enabled = false;
+
return true;
}
@@ -1130,11 +1133,11 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
struct abm *abm = NULL;
for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
- struct dc_stream_state *stream = pipe_ctx.stream;
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx->stream;
if (stream && stream->link == link) {
- abm = pipe_ctx.stream_res.abm;
+ abm = pipe_ctx->stream_res.abm;
break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
index a0e9e9f0441a..b4cea2b8cb2a 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
@@ -370,275 +370,279 @@ void mpc32_program_shaper_luta_settings(
MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
curve = params->arr_curve_points;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_0_1[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_2_3[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_4_5[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_6_7[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_8_9[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_10_11[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_12_13[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_14_15[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_16_17[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_18_19[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_20_21[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_22_23[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_24_25[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_26_27[mpcc_id], 0,
+ if (curve) {
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_0_1[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_28_29[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_30_31[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_32_33[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-}
-
-
-void mpc32_program_shaper_lutb_settings(
- struct mpc *mpc,
- const struct pwl_params *params,
- uint32_t mpcc_id)
-{
- const struct gamma_curve *curve;
- struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
-
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_B[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_G[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_R[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
-
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_B[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_G[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_R[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
-
- curve = params->arr_curve_points;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_0_1[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_2_3[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_2_3[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_4_5[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_4_5[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_6_7[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_6_7[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_8_9[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_8_9[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_10_11[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_10_11[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_12_13[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_12_13[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_14_15[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_14_15[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_16_17[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_16_17[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_18_19[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_18_19[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_20_21[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_20_21[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_22_23[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_22_23[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_24_25[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_24_25[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_26_27[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_26_27[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_28_29[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_28_29[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_30_31[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_30_31[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_32_33[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
+}
+
+
+void mpc32_program_shaper_lutb_settings(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id)
+{
+ const struct gamma_curve *curve;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_B[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_G[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_R[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_32_33[mpcc_id], 0,
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_B[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_G[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_R[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
+
+ curve = params->arr_curve_points;
+ if (curve) {
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_0_1[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_2_3[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_4_5[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_6_7[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_8_9[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_10_11[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_12_13[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_14_15[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_16_17[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_18_19[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_20_21[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_22_23[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_24_25[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_26_27[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_28_29[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_30_31[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_32_33[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index ad67197557ca..98cf0cbd59ba 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -47,16 +47,6 @@ void mpc401_update_3dlut_fast_load_select(struct mpc *mpc, int mpcc_id, int hubp
REG_SET(MPCC_MCM_3DLUT_FAST_LOAD_SELECT[mpcc_id], 0, MPCC_MCM_3DLUT_FL_SEL, hubp_idx);
}
-void mpc401_get_3dlut_fast_load_status(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow)
-{
- struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
-
- REG_GET_3(MPCC_MCM_3DLUT_FAST_LOAD_STATUS[mpcc_id],
- MPCC_MCM_3DLUT_FL_DONE, done,
- MPCC_MCM_3DLUT_FL_SOFT_UNDERFLOW, soft_underflow,
- MPCC_MCM_3DLUT_FL_HARD_UNDERFLOW, hard_underflow);
-}
-
void mpc401_set_movable_cm_location(struct mpc *mpc, enum mpcc_movable_cm_location location, int mpcc_id)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
@@ -618,7 +608,6 @@ static const struct mpc_funcs dcn401_mpc_funcs = {
.set_bg_color = mpc1_set_bg_color,
.set_movable_cm_location = mpc401_set_movable_cm_location,
.update_3dlut_fast_load_select = mpc401_update_3dlut_fast_load_select,
- .get_3dlut_fast_load_status = mpc401_get_3dlut_fast_load_status,
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index ce6fbcf14d7a..8e35ebc603a9 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -241,23 +241,9 @@ void mpc401_update_3dlut_fast_load_select(
int mpcc_id,
int hubp_idx);
-void mpc401_get_3dlut_fast_load_status(
- struct mpc *mpc,
- int mpcc_id,
- uint32_t *done,
- uint32_t *soft_underflow,
- uint32_t *hard_underflow);
-
void mpc401_update_3dlut_fast_load_select(
struct mpc *mpc,
int mpcc_id,
int hubp_idx);
-void mpc401_get_3dlut_fast_load_status(
- struct mpc *mpc,
- int mpcc_id,
- uint32_t *done,
- uint32_t *soft_underflow,
- uint32_t *hard_underflow);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index b86fe2b094f8..4cfc6c0fa147 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -507,6 +507,7 @@ void dcn35_timing_generator_init(struct optc *optc1)
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
+ optc1->max_frame_count = 0xFFFFFF;
dcn35_timing_generator_set_fgcg(
optc1, CTX->dc->debug.enable_fine_grain_clock_gating.bits.optc);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
index b8cddef6b3d2..5b42da8b79c2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -27,6 +27,24 @@
# DCE
###############################################################################
+ifdef CONFIG_DRM_AMD_DC_SI
+RESOURCE_DCE60 = dce60_resource.o
+
+AMD_DAL_RESOURCE_DCE60 = $(addprefix $(AMDDALPATH)/dc/resource/dce60/,$(RESOURCE_DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE60)
+endif
+
+###############################################################################
+
+RESOURCE_DCE80 = dce80_resource.o
+
+AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80)
+
+###############################################################################
+
RESOURCE_DCE100 = dce100_resource.o
AMD_DAL_RESOURCE_DCE100 = $(addprefix $(AMDDALPATH)/dc/resource/dce100/,$(RESOURCE_DCE100))
@@ -57,14 +75,6 @@ AMD_DAL_RESOURCE_DCE120 = $(addprefix $(AMDDALPATH)/dc/resource/dce120/,$(RESOUR
AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE120)
-###############################################################################
-
-RESOURCE_DCE80 = dce80_resource.o
-
-AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80)
-
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
# DCN
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index e698543ec937..84f73fdb0f95 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -836,7 +836,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-static bool dce100_validate_bandwidth(
+static enum dc_status dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -858,7 +858,7 @@ static bool dce100_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz = 0;
}
- return true;
+ return DC_OK;
}
static bool dce100_validate_surface_sets(
@@ -1069,7 +1069,7 @@ static bool dce100_resource_construct(
pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 40;
- dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.i2c_speed_in_khz_hdcp = 40;
dc->caps.max_cursor_size = 128;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dual_link_dvi = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index 035c6cfdaee5..f3d5baac11bf 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -960,7 +960,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-static bool dce110_validate_bandwidth(
+static enum dc_status dce110_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -1031,7 +1031,7 @@ static bool dce110_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz,
context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
- return result;
+ return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 480a50967385..4225cae68c10 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -883,7 +883,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-bool dce112_validate_bandwidth(
+enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -952,7 +952,7 @@ bool dce112_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz,
context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
- return result;
+ return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
enum dc_status resource_map_phy_clock_resources(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
index 1f57ebc6f9b4..6221d749246d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
@@ -42,7 +42,7 @@ enum dc_status dce112_validate_with_context(
struct dc_state *context,
struct dc_state *old_context);
-bool dce112_validate_bandwidth(
+enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index 889f314cac65..d9ffdded5ce1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -48,7 +48,7 @@
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
-#include "dce60/dce60_hw_sequencer.h"
+#include "dce60/dce60_hwseq.h"
#include "dce100/dce100_resource.h"
#include "dce/dce_panel_cntl.h"
@@ -863,7 +863,7 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static bool dce60_validate_bandwidth(
+static enum dc_status dce60_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -885,7 +885,7 @@ static bool dce60_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz = 0;
}
- return true;
+ return DC_OK;
}
static bool dce60_validate_surface_sets(
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h
index 5d653a76b0b0..5d653a76b0b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 3d5113f010bb..bd5811f97531 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -869,7 +869,7 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static bool dce80_validate_bandwidth(
+static enum dc_status dce80_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -891,7 +891,7 @@ static bool dce80_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz = 0;
}
- return true;
+ return DC_OK;
}
static bool dce80_validate_surface_sets(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index e92f14d50adb..be4ade0853e9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -23,6 +23,7 @@
*
*/
+#include "core_status.h"
#include "dm_services.h"
#include "dc.h"
@@ -1125,7 +1126,7 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool)
*pool = NULL;
}
-static bool dcn10_validate_bandwidth(
+static enum dc_status dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -1136,7 +1137,7 @@ static bool dcn10_validate_bandwidth(
voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate);
DC_FP_END();
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
@@ -1245,6 +1246,10 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id ==
link->link_enc->preferred_engine)
return pool->stream_enc[i];
+
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && pool->stream_enc[i]->id ==
+ link->dpia_preferred_eng_id)
+ return pool->stream_enc[i];
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index e4eca3e32c1b..3405be07f5e3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -2124,7 +2124,7 @@ validate_out:
return out;
}
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
bool voltage_supported;
@@ -2132,14 +2132,14 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
if (!pipes)
- return false;
+ return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes);
DC_FP_END();
kfree(pipes);
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
index 4cee3fa11a7f..c0e062c7407d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
@@ -119,7 +119,7 @@ void dcn20_set_mcif_arb_params(
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
void dcn20_merge_pipes_for_validate(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 4bd5c2278596..9ab01b65b177 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -923,7 +923,7 @@ validate_out:
* with DC_FP_START()/DC_FP_END(). Use the same approach as for
* dcn20_validate_bandwidth in dcn20_resource.c.
*/
-static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
bool voltage_supported;
@@ -931,14 +931,14 @@ static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
if (!pipes)
- return false;
+ return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes);
DC_FP_END();
kfree(pipes);
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_NOT_SUPPORTED;
}
static void dcn21_destroy_resource_pool(struct resource_pool **pool)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index f01ced015072..f631ae34e320 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -1891,8 +1891,6 @@ static int get_refresh_rate(struct dc_state *context)
/* check if refresh rate at least 120hz */
timing = &context->streams[0]->timing;
- if (timing == NULL)
- return 0;
h_v_total = timing->h_total * timing->v_total;
if (h_v_total == 0)
@@ -2037,7 +2035,7 @@ void dcn30_calculate_wm_and_dlg(
DC_FP_END();
}
-bool dcn30_validate_bandwidth(struct dc *dc,
+enum dc_status dcn30_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -2094,7 +2092,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
index 8e6b8b7368fd..689d9bdace81 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
@@ -56,7 +56,7 @@ unsigned int dcn30_calc_max_scaled_time(
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark);
-bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
+enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate);
bool dcn30_internal_validate_bw(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index dddddbfef85f..7e0af5297dc4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1758,7 +1758,7 @@ dcn31_set_mcif_arb_params(struct dc *dc,
DC_FP_END();
}
-bool dcn31_validate_bandwidth(struct dc *dc,
+enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1813,7 +1813,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
index 551ad912f7be..dd82815d7efe 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
@@ -37,7 +37,7 @@ struct dcn31_resource_pool {
struct resource_pool base;
};
-bool dcn31_validate_bandwidth(struct dc *dc,
+enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
void dcn31_calculate_wm_and_dlg(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 26becc4cb804..d96bc6cb73ad 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -1694,7 +1694,7 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
*panel_config = panel_config_defaults;
}
-bool dcn314_validate_bandwidth(struct dc *dc,
+enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1750,7 +1750,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static struct resource_funcs dcn314_res_pool_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
index 49ffe71018df..f8ba531d6342 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
@@ -39,7 +39,7 @@ struct dcn314_resource_pool {
struct resource_pool base;
};
-bool dcn314_validate_bandwidth(struct dc *dc,
+enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 2a59cc61ed8c..bb0dae0be5b8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -24,6 +24,7 @@
*
*/
+#include "dc_types.h"
#include "dm_services.h"
#include "dc.h"
@@ -1806,19 +1807,56 @@ validate_out:
return out;
}
-bool dcn32_validate_bandwidth(struct dc *dc,
+enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
- bool out = false;
+ unsigned int i;
+ enum dc_status status;
+ const struct dc_stream_state *stream;
+
+ /* reset cursor limitations on subvp */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) {
+ dc_state_set_stream_cursor_subvp_limit(stream, context, false);
+ }
+ }
if (dc->debug.using_dml2)
- out = dml2_validate(dc, context,
+ status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
else
- out = dml1_validate(dc, context, fast_validate);
- return out;
+ status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+
+ if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ /* check new stream configuration still supports cursor if subvp used */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM &&
+ stream->cursor_position.enable &&
+ !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) {
+ /* hw cursor cannot be supported with subvp active, so disable subvp for now */
+ dc_state_set_stream_cursor_subvp_limit(stream, context, true);
+ status = DC_FAIL_HW_CURSOR_SUPPORT;
+ }
+ };
+ }
+
+ if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ /* attempt to validate again with subvp disabled due to cursor */
+ if (dc->debug.using_dml2)
+ status = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ else
+ status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+ return status;
}
int dcn32_populate_dml_pipes_from_context(
@@ -2042,6 +2080,18 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw
DC_FP_END();
}
+unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ bool limit_cur_to_buf;
+
+ limit_cur_to_buf = dc_state_get_stream_subvp_cursor_limit(stream, state) &&
+ !stream->hw_cursor_req;
+
+ return limit_cur_to_buf ? dc->caps.max_buffered_cursor_size : dc->caps.max_cursor_size;
+}
+
static struct resource_funcs dcn32_res_pool_funcs = {
.destroy = dcn32_destroy_resource_pool,
.link_enc_create = dcn32_link_encoder_create,
@@ -2067,7 +2117,8 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -2114,8 +2165,6 @@ static bool dcn32_resource_construct(
#define REG_STRUCT dccg_regs
dccg_regs_init();
- DC_FP_START();
-
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dcn32;
@@ -2153,6 +2202,7 @@ static bool dcn32_resource_construct(
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
/* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4)
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
@@ -2501,14 +2551,10 @@ static bool dcn32_resource_construct(
if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0))
dc->config.sdpif_request_limit_words_per_umc = 16;
- DC_FP_END();
-
return true;
create_fail:
- DC_FP_END();
-
dcn32_resource_destruct(pool);
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 1aa4ced29291..d60ed77eda80 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -98,7 +98,7 @@ void dcn32_add_phantom_pipes(struct dc *dc,
unsigned int pipe_cnt,
unsigned int index);
-bool dcn32_validate_bandwidth(struct dc *dc,
+enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
@@ -188,6 +188,10 @@ void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes);
+unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 38d76434683e..7db1f7a5613f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1624,7 +1624,8 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1709,6 +1710,7 @@ static bool dcn321_resource_construct(
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
/* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4)
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index ffd2b816cd02..72c6cf047db0 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1732,7 +1732,7 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn35_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1743,13 +1743,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
fast_validate);
if (fast_validate)
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state)
@@ -1903,7 +1903,7 @@ static bool dcn35_resource_construct(
dc->caps.max_disp_clock_khz_at_vmin = 650000;
/* Sequential ONO is based on ASIC. */
- if (dc->ctx->asic_id.hw_internal_rev > 0x10)
+ if (dc->ctx->asic_id.hw_internal_rev >= 0x40)
dc->caps.sequential_ono = true;
/* Use pipe context based otg sync logic */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 98f5bc1b929e..989a270f7dea 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1712,7 +1712,7 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn351_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn351_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1723,13 +1723,13 @@ static bool dcn351_validate_bandwidth(struct dc *dc,
fast_validate);
if (fast_validate)
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static struct resource_funcs dcn351_res_pool_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index b6468573dc33..48e1f234185f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -1713,7 +1713,7 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn35_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1724,13 +1724,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
fast_validate);
if (fast_validate)
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
@@ -1876,7 +1876,7 @@ static bool dcn36_resource_construct(
dc->caps.max_disp_clock_khz_at_vmin = 650000;
/* Sequential ONO is based on ASIC. */
- if (dc->ctx->asic_id.hw_internal_rev > 0x10)
+ if (dc->ctx->asic_id.hw_internal_rev >= 0x40)
dc->caps.sequential_ono = true;
/* Use pipe context based otg sync logic */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 7436dfbdf927..e0e32975ca34 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -1642,16 +1642,52 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta
return DC_OK;
}
-bool dcn401_validate_bandwidth(struct dc *dc,
+enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
- bool out = false;
+ unsigned int i;
+ enum dc_status status = DC_OK;
+ const struct dc_stream_state *stream;
+
+ /* reset cursor limitations on subvp */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) {
+ dc_state_set_stream_cursor_subvp_limit(stream, context, false);
+ }
+ }
+
if (dc->debug.using_dml2)
- out = dml2_validate(dc, context,
+ status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
- return out;
+ fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+
+ if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ /* check new stream configuration still supports cursor if subvp used */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM &&
+ stream->cursor_position.enable &&
+ !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) {
+ /* hw cursor cannot be supported with subvp active, so disable subvp for now */
+ dc_state_set_stream_cursor_subvp_limit(stream, context, true);
+ status = DC_FAIL_HW_CURSOR_SUPPORT;
+ }
+ };
+ }
+
+ if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ /* attempt to validate again with subvp disabled due to cursor */
+ if (dc->debug.using_dml2)
+ status = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+ return status;
}
void dcn401_prepare_mcache_programming(struct dc *dc,
@@ -1770,7 +1806,8 @@ static struct resource_funcs dcn401_res_pool_funcs = {
.build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
.get_power_profile = dcn401_get_power_profile,
- .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1846,8 +1883,9 @@ static bool dcn401_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 95;
dc->caps.i2c_speed_in_khz_hdcp = 95; /*1.4 w/a applied by default*/
- /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
+ /* used to set cursor pitch, so must be aligned to power of 2 (HW actually supported 78x78) */
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64;
dc->caps.cursor_not_scaled = true;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 4c259745d519..dc52a30991af 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -22,7 +22,7 @@ struct resource_pool *dcn401_create_resource_pool(
enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state);
-bool dcn401_validate_bandwidth(struct dc *dc,
+enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index 28348734d900..e0008c5f08ad 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -776,7 +776,7 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
* Do not bypass UV at 1:1 for cositing to be applied
*/
if (!enable_isharp) {
- if (data->ratios.horz.value == one && data->ratios.vert.value == one)
+ if (data->ratios.horz.value == one && data->ratios.vert.value == one && !spl_in->basic_out.always_scale)
return SCL_MODE_SCALING_420_LUMA_BYPASS;
}
@@ -884,7 +884,7 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
/* Calculate number of tap with adaptive scaling off */
static void spl_get_taps_non_adaptive_scaler(
- struct spl_scratch *spl_scratch, const struct spl_taps *in_taps)
+ struct spl_scratch *spl_scratch, const struct spl_taps *in_taps, bool always_scale)
{
bool check_max_downscale = false;
@@ -944,15 +944,15 @@ static void spl_get_taps_non_adaptive_scaler(
spl_fixpt_from_fraction(6, 1));
SPL_ASSERT(check_max_downscale);
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz))
+
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz) && !always_scale)
spl_scratch->scl_data.taps.h_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert) && !always_scale)
spl_scratch->scl_data.taps.v_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !always_scale)
spl_scratch->scl_data.taps.h_taps_c = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !always_scale)
spl_scratch->scl_data.taps.v_taps_c = 1;
-
}
/* Calculate optimal number of taps */
@@ -965,13 +965,15 @@ static bool spl_get_optimal_number_of_taps(
unsigned int max_taps_y, max_taps_c;
unsigned int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
- bool skip_easf = false;
+ bool skip_easf = false;
+ bool always_scale = spl_in->basic_out.always_scale;
bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format);
+
if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active &&
max_downscale_src_width != 0 &&
spl_scratch->scl_data.viewport.width > max_downscale_src_width) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -980,7 +982,7 @@ static bool spl_get_optimal_number_of_taps(
/* Disable adaptive scaler and sharpener when integer scaling is enabled */
if (spl_in->scaling_quality.integer_scaling) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -996,7 +998,7 @@ static bool spl_get_optimal_number_of_taps(
* taps = 4 for upscaling
*/
if (skip_easf)
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
else {
if (spl_is_video_format(spl_in->basic_in.format)) {
spl_scratch->scl_data.taps.h_taps = 6;
@@ -1297,7 +1299,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
if (enable_easf_v) {
dscl_prog_data->easf_v_en = true;
dscl_prog_data->easf_v_ring = 0;
- dscl_prog_data->easf_v_sharp_factor = 0;
+ dscl_prog_data->easf_v_sharp_factor = 1;
dscl_prog_data->easf_v_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_v_bf2_mode = 0xF; // 4-bit, BF2 calculation mode
/* 2-bit, BF3 chroma mode correction calculation mode */
@@ -1461,7 +1463,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
if (enable_easf_h) {
dscl_prog_data->easf_h_en = true;
dscl_prog_data->easf_h_ring = 0;
- dscl_prog_data->easf_h_sharp_factor = 0;
+ dscl_prog_data->easf_h_sharp_factor = 1;
dscl_prog_data->easf_h_bf1_en =
1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_h_bf2_mode =
@@ -1898,3 +1900,4 @@ bool SPL_NAMESPACE(spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out
spl_set_taps_data(dscl_prog_data, data);
return res;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
index 1c3949b24611..36a284305a70 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
@@ -480,6 +480,10 @@ enum sharpness_setting {
SHARPNESS_ZERO,
SHARPNESS_CUSTOM
};
+enum sharpness_range_source {
+ SHARPNESS_RANGE_DCN = 0,
+ SHARPNESS_RANGE_DCN_OVERRIDE
+};
struct spl_sharpness_range {
int sdr_rgb_min;
int sdr_rgb_max;
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
index 52d97918a3bd..ebf0287417e0 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
@@ -29,8 +29,6 @@ static inline unsigned long long spl_complete_integer_division_u64(
{
unsigned long long result;
- SPL_ASSERT(divisor);
-
result = spl_div64_u64_rem(dividend, divisor, remainder);
return result;
@@ -196,8 +194,6 @@ struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg)
* Good idea to use Newton's method
*/
- SPL_ASSERT(arg.value);
-
return spl_fixpt_from_fraction(
spl_fixpt_one.value,
arg.value);
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 4e0efff92dca..3f3fa1b6a69e 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -51,8 +51,8 @@
* for the cache windows.
*
* The call to dmub_srv_hw_init() programs the DMCUB registers to prepare
- * for command submission. Commands can be queued via dmub_srv_cmd_queue()
- * and executed via dmub_srv_cmd_execute().
+ * for command submission. Commands can be queued via dmub_srv_fb_cmd_queue()
+ * and executed via dmub_srv_fb_cmd_execute().
*
* If the queue is full the dmub_srv_wait_for_idle() call can be used to
* wait until the queue has been cleared.
@@ -142,6 +142,7 @@ enum dmub_notification_type {
DMUB_NOTIFICATION_SET_CONFIG_REPLY,
DMUB_NOTIFICATION_DPIA_NOTIFICATION,
DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
+ DMUB_NOTIFICATION_FUSED_IO,
DMUB_NOTIFICATION_MAX
};
@@ -170,6 +171,13 @@ enum dmub_srv_power_state_type {
DMUB_POWER_STATE_D3 = 8
};
+/* enum dmub_inbox_cmd_interface type - defines default interface for host->dmub commands */
+enum dmub_inbox_cmd_interface_type {
+ DMUB_CMD_INTERFACE_DEFAULT = 0,
+ DMUB_CMD_INTERFACE_FB = 1,
+ DMUB_CMD_INTERFACE_REG = 2,
+};
+
/**
* struct dmub_region - dmub hw memory region
* @base: base address for region, must be 256 byte aligned
@@ -349,6 +357,21 @@ struct dmub_diagnostic_data {
uint8_t is_cw6_enabled : 1;
};
+struct dmub_srv_inbox {
+ /* generic status */
+ uint64_t num_submitted;
+ uint64_t num_reported;
+ union {
+ /* frame buffer mailbox status */
+ struct dmub_rb rb;
+ /* register mailbox status */
+ struct {
+ bool is_pending;
+ bool is_multi_pending;
+ };
+ };
+};
+
/**
* struct dmub_srv_base_funcs - Driver specific base callbacks
*/
@@ -422,6 +445,8 @@ struct dmub_srv_hw_funcs {
uint32_t (*emul_get_inbox1_rptr)(struct dmub_srv *dmub);
+ uint32_t (*emul_get_inbox1_wptr)(struct dmub_srv *dmub);
+
void (*emul_set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
bool (*is_supported)(struct dmub_srv *dmub);
@@ -462,18 +487,21 @@ struct dmub_srv_hw_funcs {
void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx);
void (*subvp_save_surf_addr)(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
+
void (*send_reg_inbox0_cmd_msg)(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
uint32_t (*read_reg_inbox0_rsp_int_status)(struct dmub_srv *dmub);
void (*read_reg_inbox0_cmd_rsp)(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
void (*write_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub);
+ void (*clear_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub);
+ void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable);
+
uint32_t (*read_reg_outbox0_rdy_int_status)(struct dmub_srv *dmub);
void (*write_reg_outbox0_rdy_int_ack)(struct dmub_srv *dmub);
void (*read_reg_outbox0_msg)(struct dmub_srv *dmub, uint32_t *msg);
void (*write_reg_outbox0_rsp)(struct dmub_srv *dmub, uint32_t *rsp);
uint32_t (*read_reg_outbox0_rsp_int_status)(struct dmub_srv *dmub);
- void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable);
void (*enable_reg_outbox0_rdy_int)(struct dmub_srv *dmub, bool enable);
};
@@ -493,6 +521,7 @@ struct dmub_srv_create_params {
enum dmub_asic asic;
uint32_t fw_version;
bool is_virtual;
+ enum dmub_inbox_cmd_interface_type inbox_type;
};
/**
@@ -521,8 +550,9 @@ struct dmub_srv {
const struct dmub_srv_dcn401_regs *regs_dcn401;
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs hw_funcs;
- struct dmub_rb inbox1_rb;
+ struct dmub_srv_inbox inbox1;
uint32_t inbox1_last_wptr;
+ struct dmub_srv_inbox reg_inbox0;
/**
* outbox1_rb is accessed without locks (dal & dc)
* and to be used only in dmub_srv_stat_get_notification()
@@ -542,6 +572,7 @@ struct dmub_srv {
struct dmub_fw_meta_info meta_info;
struct dmub_feature_caps feature_caps;
struct dmub_visual_confirm_color visual_confirm_color;
+ enum dmub_inbox_cmd_interface_type inbox_type;
enum dmub_srv_power_state_type power_state;
struct dmub_diagnostic_data debug;
@@ -566,11 +597,8 @@ struct dmub_notification {
struct aux_reply_data aux_reply;
enum dp_hpd_status hpd_status;
enum set_config_status sc_status;
- /**
- * DPIA notification command.
- */
- struct dmub_rb_cmd_dpia_notification dpia_notification;
struct dmub_rb_cmd_hpd_sense_notify_data hpd_sense_notify;
+ struct dmub_cmd_fused_request fused_request;
};
};
@@ -699,19 +727,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
/**
- * dmub_srv_sync_inbox1() - sync sw state with hw state
- * @dmub: the dmub service
- *
- * Sync sw state with hw state when resume from S0i3
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
-
-/**
- * dmub_srv_cmd_queue() - queues a command to the DMUB
+ * dmub_srv_fb_cmd_queue() - queues a command to the DMUB
* @dmub: the dmub service
* @cmd: the command to queue
*
@@ -723,11 +739,11 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
* DMUB_STATUS_QUEUE_FULL - no remaining room in queue
* DMUB_STATUS_INVALID - unspecified error
*/
-enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
const union dmub_rb_cmd *cmd);
/**
- * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
+ * dmub_srv_fb_cmd_execute() - Executes a queued sequence to the dmub
* @dmub: the dmub service
*
* Begins execution of queued commands on the dmub.
@@ -736,7 +752,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
* DMUB_STATUS_OK - success
* DMUB_STATUS_INVALID - unspecified error
*/
-enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub);
+enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub);
/**
* dmub_srv_wait_for_hw_pwr_up() - Waits for firmware hardware power up is completed
@@ -795,6 +811,23 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
uint32_t timeout_us);
/**
+ * dmub_srv_wait_for_pending() - Re-entrant wait for messages currently pending
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the commands queued prior to this call are complete.
+ * If interfaces remain busy due to additional work being submitted
+ * concurrently, this function will not continue to wait.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
+ uint32_t timeout_us);
+
+/**
* dmub_srv_wait_for_idle() - Waits for the DMUB to be idle
* @dmub: the dmub service
* @timeout_us: the maximum number of microseconds to wait
@@ -892,9 +925,6 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
union dmub_fw_boot_options *option);
-enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd);
-
enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
bool skip);
@@ -959,26 +989,6 @@ enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub);
void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
/**
- * dmub_srv_send_reg_inbox0_cmd() - send a dmub command and wait for the command
- * being processed by DMUB.
- * @dmub: The dmub service
- * @cmd: The dmub command being sent. If with_replay is true, the function will
- * update cmd with replied data.
- * @with_reply: true if DMUB reply needs to be copied back to cmd. false if the
- * cmd doesn't need to be replied.
- * @timeout_us: timeout in microseconds.
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_TIMEOUT - DMUB fails to process the command within the timeout
- * interval.
- */
-enum dmub_status dmub_srv_send_reg_inbox0_cmd(
- struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd,
- bool with_reply, uint32_t timeout_us);
-
-/**
* dmub_srv_set_power_state() - Track DC power state in dmub_srv
* @dmub: The dmub service
* @power_state: DC power state setting
@@ -990,4 +1000,71 @@ enum dmub_status dmub_srv_send_reg_inbox0_cmd(
*/
void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state);
+/**
+ * dmub_srv_reg_cmd_execute() - Executes provided command to the dmub
+ * @dmub: the dmub service
+ * @cmd: the command packet to be executed
+ *
+ * Executes a single command for the dmub.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd);
+
+
+/**
+ * dmub_srv_cmd_get_response() - Copies return data for command into buffer
+ * @dmub: the dmub service
+ * @cmd_rsp: response buffer
+ *
+ * Copies return data for command into buffer
+ */
+void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
+ union dmub_rb_cmd *cmd_rsp);
+
+/**
+ * dmub_srv_sync_inboxes() - Sync inbox state
+ * @dmub: the dmub service
+ *
+ * Sync inbox state
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_wait_for_inbox_free() - Waits for space in the DMUB inbox to free up
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ * @num_free_required: number of free entries required
+ *
+ * Waits until the DMUB buffer is freed to the specified number.
+ * The maximum wait time is given in microseconds to prevent spinning
+ * forever.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
+ uint32_t timeout_us,
+ uint32_t num_free_required);
+
+/**
+ * dmub_srv_update_inbox_status() - Updates pending status for inbox & reg inbox0
+ * @dmub: the dmub service
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_HW_FAILURE - issue with HW programming
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub);
+
#endif /* _DMUB_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 1f5f4e3e49d4..57fa05bddb45 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -36,6 +36,9 @@
//<DMUB_TYPES>==================================================================
/* Basic type definitions. */
+#ifdef __forceinline
+#undef __forceinline
+#endif
#define __forceinline inline
/**
@@ -547,6 +550,11 @@ union replay_hw_flags {
* @is_alpm_initialized: Indicates whether ALPM is initialized
*/
uint32_t is_alpm_initialized : 1;
+
+ /**
+ * @alpm_mode: Indicates ALPM mode selected
+ */
+ uint32_t alpm_mode : 2;
} bitfields;
uint32_t u32All;
@@ -739,6 +747,14 @@ enum dmub_ips_disable_type {
DMUB_IPS_DISABLE_IPS2_Z10 = 4,
DMUB_IPS_DISABLE_DYNAMIC = 5,
DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6,
+ DMUB_IPS_DISABLE_Z8_RETENTION = 7,
+};
+
+enum dmub_ips_rcg_disable_type {
+ DMUB_IPS_RCG_ENABLE = 0,
+ DMUB_IPS0_RCG_DISABLE = 1,
+ DMUB_IPS1_RCG_DISABLE = 2,
+ DMUB_IPS_RCG_DISABLE = 3
};
#define DMUB_IPS1_ALLOW_MASK 0x00000001
@@ -817,11 +833,12 @@ enum dmub_shared_state_feature_id {
*/
union dmub_shared_state_ips_fw_signals {
struct {
- uint32_t ips1_commit : 1; /**< 1 if in IPS1 */
+ uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */
uint32_t detection_required : 1; /**< 1 if detection is required */
- uint32_t reserved_bits : 28; /**< Reversed */
+ uint32_t ips1z8_commit: 1; /**< 1 if in IPS1 Z8 Retention */
+ uint32_t reserved_bits : 27; /**< Reversed */
} bits;
uint32_t all;
};
@@ -836,7 +853,10 @@ union dmub_shared_state_ips_driver_signals {
uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
uint32_t allow_idle: 1; /**< 1 if driver is allowing idle */
- uint32_t reserved_bits : 27; /**< Reversed bits */
+ uint32_t allow_ips0_rcg : 1; /**< 1 is IPS0 RCG is allowed */
+ uint32_t allow_ips1_rcg : 1; /**< 1 is IPS1 RCG is allowed */
+ uint32_t allow_ips1z8 : 1; /**< 1 is IPS1 Z8 Retention is allowed */
+ uint32_t reserved_bits : 24; /**< Reversed bits */
} bits;
uint32_t all;
};
@@ -865,7 +885,9 @@ struct dmub_shared_state_ips_fw {
uint32_t ips1_exit_count; /**< Exit counter for IPS1 */
uint32_t ips2_entry_count; /**< Entry counter for IPS2 */
uint32_t ips2_exit_count; /**< Exit counter for IPS2 */
- uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */
+ uint32_t ips1_z8ret_entry_count; /**< Entry counter for IPS1 Z8 Retention */
+ uint32_t ips1_z8ret_exit_count; /**< Exit counter for IPS1 Z8 Retention */
+ uint32_t reserved[53]; /**< Reversed, to be updated when adding new fields. */
}; /* 248-bytes, fixed */
/**
@@ -1253,6 +1275,10 @@ enum dmub_gpint_command {
* DESC: Setup debug configs.
*/
DMUB_GPINT__SETUP_DEBUG_MODE = 136,
+ /**
+ * DESC: Initiates IPS wake sequence.
+ */
+ DMUB_GPINT__IPS_DEBUG_WAKE = 137,
};
/**
@@ -2113,6 +2139,11 @@ union dmub_cmd_fams2_config {
} stream_v1; //v1
};
+struct dmub_fams2_config_v2 {
+ struct dmub_cmd_fams2_global_config global;
+ struct dmub_fams2_stream_static_state_v1 stream_v1[DMUB_MAX_STREAMS]; //v1
+};
+
/**
* DMUB rb command definition for FAMS2 (merged SubVP, FPO, Legacy)
*/
@@ -2122,6 +2153,22 @@ struct dmub_rb_cmd_fams2 {
};
/**
+ * Indirect buffer descriptor
+ */
+struct dmub_ib_data {
+ union dmub_addr src; // location of indirect buffer in memory
+ uint16_t size; // indirect buffer size in bytes
+};
+
+/**
+ * DMUB rb command definition for commands passed over indirect buffer
+ */
+struct dmub_rb_cmd_ib {
+ struct dmub_cmd_header header;
+ struct dmub_ib_data ib_data;
+};
+
+/**
* enum dmub_cmd_idle_opt_type - Idle optimization command type.
*/
enum dmub_cmd_idle_opt_type {
@@ -2144,6 +2191,11 @@ enum dmub_cmd_idle_opt_type {
* DCN hardware notify power state.
*/
DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3,
+
+ /**
+ * DCN notify to release HW.
+ */
+ DMUB_CMD__IDLE_OPT_RELEASE_HW = 4,
};
/**
@@ -2636,7 +2688,11 @@ enum dp_hpd_type {
/**
* DP HPD short pulse
*/
- DP_IRQ
+ DP_IRQ = 1,
+ /**
+ * Failure to acquire DP HPD state
+ */
+ DP_NONE_HPD = 2
};
/**
@@ -2901,8 +2957,9 @@ enum dmub_cmd_fams_type {
*/
DMUB_CMD__FAMS_SET_MANUAL_TRIGGER = 3,
DMUB_CMD__FAMS2_CONFIG = 4,
- DMUB_CMD__FAMS2_DRR_UPDATE = 5,
- DMUB_CMD__FAMS2_FLIP = 6,
+ DMUB_CMD__FAMS2_IB_CONFIG = 5,
+ DMUB_CMD__FAMS2_DRR_UPDATE = 6,
+ DMUB_CMD__FAMS2_FLIP = 7,
};
/**
@@ -3609,6 +3666,12 @@ struct dmub_rb_cmd_psr_set_power_opt {
struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data;
};
+enum dmub_alpm_mode {
+ ALPM_AUXWAKE = 0,
+ ALPM_AUXLESS = 1,
+ ALPM_UNSUPPORTED = 2,
+};
+
/**
* Definition of Replay Residency GPINT command.
* Bit[0] - Residency mode for Revision 0
@@ -3742,6 +3805,15 @@ enum dmub_cmd_replay_general_subtype {
REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE,
};
+struct dmub_alpm_auxless_data {
+ uint16_t lfps_setup_ns;
+ uint16_t lfps_period_ns;
+ uint16_t lfps_silence_ns;
+ uint16_t lfps_t1_t2_override_us;
+ short lfps_t1_t2_offset_us;
+ uint8_t lttpr_count;
+};
+
/**
* Data passed from driver to FW in a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
@@ -3812,6 +3884,10 @@ struct dmub_cmd_replay_copy_settings_data {
* Use FSM state for Replay power up/down
*/
uint8_t use_phy_fsm;
+ /**
+ * Use for AUX-less ALPM LFPS wake operation
+ */
+ struct dmub_alpm_auxless_data auxless_alpm_data;
};
/**
@@ -4360,6 +4436,11 @@ enum dmub_cmd_abm_type {
* Get the current ACE curve.
*/
DMUB_CMD__ABM_GET_ACE_CURVE = 10,
+
+ /**
+ * Get current histogram data
+ */
+ DMUB_CMD__ABM_GET_HISTOGRAM_DATA = 11,
};
struct abm_ace_curve {
@@ -4954,6 +5035,20 @@ enum dmub_abm_ace_curve_type {
};
/**
+ * enum dmub_abm_histogram_type - Histogram type.
+ */
+enum dmub_abm_histogram_type {
+ /**
+ * ACE curve as defined by the SW layer.
+ */
+ ABM_HISTOGRAM_TYPE__SW = 0,
+ /**
+ * ACE curve as defined by the SW to HW translation interface layer.
+ */
+ ABM_HISTOGRAM_TYPE__SW_IF = 1,
+};
+
+/**
* Definition of a DMUB_CMD__ABM_GET_ACE_CURVE command.
*/
struct dmub_rb_cmd_abm_get_ace_curve {
@@ -4989,6 +5084,41 @@ struct dmub_rb_cmd_abm_get_ace_curve {
};
/**
+ * Definition of a DMUB_CMD__ABM_GET_HISTOGRAM command.
+ */
+struct dmub_rb_cmd_abm_get_histogram {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ /**
+ * Address where Histogram should be copied.
+ */
+ union dmub_addr dest;
+
+ /**
+ * Type of Histogram being queried.
+ */
+ enum dmub_abm_histogram_type histogram_type;
+
+ /**
+ * Indirect buffer length.
+ */
+ uint16_t bytes;
+
+ /**
+ * eDP panel instance.
+ */
+ uint8_t panel_inst;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad;
+};
+
+/**
* Definition of a DMUB_CMD__ABM_SAVE_RESTORE command.
*/
struct dmub_rb_cmd_abm_save_restore {
@@ -5389,7 +5519,8 @@ struct dmub_cmd_fused_request {
struct dmub_cmd_fused_request_location_i2c {
uint8_t is_aux : 1; // False
uint8_t ddc_line : 3;
- uint8_t _reserved0 : 4;
+ uint8_t over_aux : 1;
+ uint8_t _reserved0 : 3;
uint8_t address;
uint8_t offset;
uint8_t length;
@@ -5687,6 +5818,11 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_get_ace_curve abm_get_ace_curve;
/**
+ * Definition of a DMUB_CMD__ABM_GET_HISTOGRAM command.
+ */
+ struct dmub_rb_cmd_abm_get_histogram abm_get_histogram;
+
+ /**
* Definition of a DMUB_CMD__ABM_SET_EVENT command.
*/
struct dmub_rb_cmd_abm_set_event abm_set_event;
@@ -5817,8 +5953,11 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
*/
struct dmub_rb_cmd_assr_enable assr_enable;
+
struct dmub_rb_cmd_fams2 fams2_config;
+ struct dmub_rb_cmd_ib ib_fams2_config;
+
struct dmub_rb_cmd_fams2_drr_update fams2_drr_update;
struct dmub_rb_cmd_fams2_flip fams2_flip;
@@ -5934,6 +6073,9 @@ static inline uint32_t dmub_rb_num_free(struct dmub_rb *rb)
else
data_count = rb->capacity - (rb->rptr - rb->wrpt);
+ /* +1 because 1 entry is always unusable */
+ data_count += DMUB_RB_CMD_SIZE;
+
return (rb->capacity - data_count) / DMUB_RB_CMD_SIZE;
}
@@ -5953,6 +6095,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
else
data_count = rb->capacity - (rb->rptr - rb->wrpt);
+ /* -1 because 1 entry is always unusable */
return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE));
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index e67f7c4784eb..2575dbc448f7 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -66,24 +66,20 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
const uint32_t timeout_us = 1 * 1000 * 1000; //1s
const uint32_t poll_delay_us = 1; //1us
uint32_t i = 0;
- uint32_t in_reset, scratch, pwait_mode;
+ uint32_t enabled, in_reset, scratch, pwait_mode;
- REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ REG_GET(DMCUB_CNTL,
+ DMCUB_ENABLE, &enabled);
+ REG_GET(DMCUB_CNTL2,
+ DMCUB_SOFT_RESET, &in_reset);
- if (in_reset == 0) {
+ if (enabled && in_reset == 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- for (i = 0; i < timeout_us; i++) {
- if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
- break;
-
- udelay(poll_delay_us);
- }
-
for (; i < timeout_us; i++) {
scratch = dmub->hw_funcs.get_gpint_response(dmub);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
@@ -517,28 +513,69 @@ void dmub_dcn401_send_reg_inbox0_cmd_msg(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd)
{
uint32_t *dwords = (uint32_t *)cmd;
-
+ int32_t payload_size_bytes = cmd->cmd_common.header.payload_bytes;
+ uint32_t msg_index;
static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch");
- REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[0]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[1]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[2]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[3]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[4]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[5]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[6]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[7]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[8]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[9]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[10]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[11]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[12]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[13]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[14]);
+ /* read remaining data based on payload size */
+ for (msg_index = 0; msg_index < 15; msg_index++) {
+ if (payload_size_bytes <= msg_index * 4) {
+ break;
+ }
+
+ switch (msg_index) {
+ case 0:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[msg_index + 1]);
+ break;
+ case 1:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[msg_index + 1]);
+ break;
+ case 2:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[msg_index + 1]);
+ break;
+ case 3:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[msg_index + 1]);
+ break;
+ case 4:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[msg_index + 1]);
+ break;
+ case 5:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[msg_index + 1]);
+ break;
+ case 6:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[msg_index + 1]);
+ break;
+ case 7:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[msg_index + 1]);
+ break;
+ case 8:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[msg_index + 1]);
+ break;
+ case 9:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[msg_index + 1]);
+ break;
+ case 10:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[msg_index + 1]);
+ break;
+ case 11:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[msg_index + 1]);
+ break;
+ case 12:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[msg_index + 1]);
+ break;
+ case 13:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[msg_index + 1]);
+ break;
+ case 14:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[msg_index + 1]);
+ break;
+ }
+ }
+
/* writing to INBOX RDY register will trigger DMUB REG INBOX0 RDY
* interrupt.
*/
- REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[15]);
+ REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[0]);
}
uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub)
@@ -556,30 +593,39 @@ void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub,
static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch");
- dwords[0] = REG_READ(DMCUB_REG_INBOX0_MSG0);
- dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG1);
- dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG2);
- dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG3);
- dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG4);
- dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG5);
- dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG6);
- dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG7);
- dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG8);
- dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG9);
- dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG10);
- dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG11);
- dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG12);
- dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG13);
- dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG14);
- dwords[15] = REG_READ(DMCUB_REG_INBOX0_RSP);
+ dwords[0] = REG_READ(DMCUB_REG_INBOX0_RSP);
+ dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG0);
+ dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG1);
+ dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG2);
+ dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG3);
+ dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG4);
+ dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG5);
+ dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG6);
+ dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG7);
+ dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG8);
+ dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG9);
+ dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG10);
+ dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG11);
+ dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG12);
+ dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG13);
+ dwords[15] = REG_READ(DMCUB_REG_INBOX0_MSG14);
}
void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 1);
+}
+
+void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub)
+{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 0);
}
+void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable)
+{
+ REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0);
+}
+
void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK, 1);
@@ -604,11 +650,6 @@ uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub)
return status;
}
-void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable)
-{
- REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0);
-}
-
void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN, enable ? 1:0);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
index c35be52676f6..88c3a44d67d9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
@@ -277,11 +277,13 @@ uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub);
void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub);
+void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub);
+void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable);
+
void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub);
void dmub_dcn401_read_reg_outbox0_msg(struct dmub_srv *dmub, uint32_t *msg);
void dmub_dcn401_write_reg_outbox0_rsp(struct dmub_srv *dmub, uint32_t *msg);
uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub);
-void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable);
void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable);
uint32_t dmub_dcn401_read_reg_outbox0_rdy_int_status(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index ae8133816b43..acca7943a8c8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -157,6 +157,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
{
struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
+ /* default to specifying now inbox type */
+ enum dmub_inbox_cmd_interface_type default_inbox_type = DMUB_CMD_INTERFACE_DEFAULT;
+
switch (asic) {
case DMUB_ASIC_DCN20:
case DMUB_ASIC_DCN21:
@@ -395,10 +398,15 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_current_time = dmub_dcn401_get_current_time;
funcs->get_diagnostic_data = dmub_dcn401_get_diagnostic_data;
+
funcs->send_reg_inbox0_cmd_msg = dmub_dcn401_send_reg_inbox0_cmd_msg;
funcs->read_reg_inbox0_rsp_int_status = dmub_dcn401_read_reg_inbox0_rsp_int_status;
funcs->read_reg_inbox0_cmd_rsp = dmub_dcn401_read_reg_inbox0_cmd_rsp;
funcs->write_reg_inbox0_rsp_int_ack = dmub_dcn401_write_reg_inbox0_rsp_int_ack;
+ funcs->clear_reg_inbox0_rsp_int_ack = dmub_dcn401_clear_reg_inbox0_rsp_int_ack;
+ funcs->enable_reg_inbox0_rsp_int = dmub_dcn401_enable_reg_inbox0_rsp_int;
+ default_inbox_type = DMUB_CMD_INTERFACE_FB; // still default to FB for now
+
funcs->write_reg_outbox0_rdy_int_ack = dmub_dcn401_write_reg_outbox0_rdy_int_ack;
funcs->read_reg_outbox0_msg = dmub_dcn401_read_reg_outbox0_msg;
funcs->write_reg_outbox0_rsp = dmub_dcn401_write_reg_outbox0_rsp;
@@ -411,6 +419,20 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
return false;
}
+ /* set default inbox type if not overriden */
+ if (dmub->inbox_type == DMUB_CMD_INTERFACE_DEFAULT) {
+ if (default_inbox_type != DMUB_CMD_INTERFACE_DEFAULT) {
+ /* use default inbox type as specified by DCN rev */
+ dmub->inbox_type = default_inbox_type;
+ } else if (funcs->send_reg_inbox0_cmd_msg) {
+ /* prefer reg as default inbox type if present */
+ dmub->inbox_type = DMUB_CMD_INTERFACE_REG;
+ } else {
+ /* use fb as fallback */
+ dmub->inbox_type = DMUB_CMD_INTERFACE_FB;
+ }
+ }
+
return true;
}
@@ -426,6 +448,7 @@ enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
dmub->asic = params->asic;
dmub->fw_version = params->fw_version;
dmub->is_virtual = params->is_virtual;
+ dmub->inbox_type = params->inbox_type;
/* Setup asic dependent hardware funcs. */
if (!dmub_srv_hw_setup(dmub, params->asic)) {
@@ -695,7 +718,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
inbox1.base = cw4.region.base;
inbox1.top = cw4.region.base + DMUB_RB_SIZE;
outbox1.base = inbox1.top;
- outbox1.top = cw4.region.top;
+ outbox1.top = inbox1.top + DMUB_RB_SIZE;
cw5.offset.quad_part = tracebuff_fb->gpu_addr;
cw5.region.base = DMUB_CW5_BASE;
@@ -737,7 +760,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
rb_params.ctx = dmub;
rb_params.base_address = mail_fb->cpu_addr;
rb_params.capacity = DMUB_RB_SIZE;
- dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+ dmub_rb_init(&dmub->inbox1.rb, &rb_params);
// Initialize outbox1 ring buffer
rb_params.ctx = dmub;
@@ -768,27 +791,6 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
-{
- if (!dmub->sw_init)
- return DMUB_STATUS_INVALID;
-
- if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
- uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
- uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
-
- if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
- return DMUB_STATUS_HW_FAILURE;
- } else {
- dmub->inbox1_rb.rptr = rptr;
- dmub->inbox1_rb.wrpt = wptr;
- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
- }
- }
-
- return DMUB_STATUS_OK;
-}
-
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
{
if (!dmub->sw_init)
@@ -799,8 +801,13 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
/* mailboxes have been reset in hw, so reset the sw state as well */
dmub->inbox1_last_wptr = 0;
- dmub->inbox1_rb.wrpt = 0;
- dmub->inbox1_rb.rptr = 0;
+ dmub->inbox1.rb.wrpt = 0;
+ dmub->inbox1.rb.rptr = 0;
+ dmub->inbox1.num_reported = 0;
+ dmub->inbox1.num_submitted = 0;
+ dmub->reg_inbox0.num_reported = 0;
+ dmub->reg_inbox0.num_submitted = 0;
+ dmub->reg_inbox0.is_pending = 0;
dmub->outbox0_rb.wrpt = 0;
dmub->outbox0_rb.rptr = 0;
dmub->outbox1_rb.wrpt = 0;
@@ -811,7 +818,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
const union dmub_rb_cmd *cmd)
{
if (!dmub->hw_init)
@@ -820,18 +827,20 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
if (dmub->power_state != DMUB_POWER_STATE_D0)
return DMUB_STATUS_POWER_STATE_D3;
- if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
- dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
+ if (dmub->inbox1.rb.rptr > dmub->inbox1.rb.capacity ||
+ dmub->inbox1.rb.wrpt > dmub->inbox1.rb.capacity) {
return DMUB_STATUS_HW_FAILURE;
}
- if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ if (dmub_rb_push_front(&dmub->inbox1.rb, cmd)) {
+ dmub->inbox1.num_submitted++;
return DMUB_STATUS_OK;
+ }
return DMUB_STATUS_QUEUE_FULL;
}
-enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
+enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub)
{
struct dmub_rb flush_rb;
@@ -846,13 +855,13 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
* been flushed to framebuffer memory. Otherwise DMCUB might
* read back stale, fully invalid or partially invalid data.
*/
- flush_rb = dmub->inbox1_rb;
+ flush_rb = dmub->inbox1.rb;
flush_rb.rptr = dmub->inbox1_last_wptr;
dmub_rb_flush_pending(&flush_rb);
- dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+ dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1.rb.wrpt);
- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
+ dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
return DMUB_STATUS_OK;
}
@@ -910,26 +919,84 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
+static void dmub_srv_update_reg_inbox0_status(struct dmub_srv *dmub)
+{
+ if (dmub->reg_inbox0.is_pending) {
+ dmub->reg_inbox0.is_pending = dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
+ !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
+
+ if (!dmub->reg_inbox0.is_pending) {
+ /* ack the rsp interrupt */
+ if (dmub->hw_funcs.write_reg_inbox0_rsp_int_ack)
+ dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
+
+ /* only update the reported count if commands aren't being batched */
+ if (!dmub->reg_inbox0.is_pending && !dmub->reg_inbox0.is_multi_pending) {
+ dmub->reg_inbox0.num_reported = dmub->reg_inbox0.num_submitted;
+ }
+ }
+ }
+}
+
+enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
+ uint32_t timeout_us)
+{
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
+ struct dmub_srv_inbox scratch_reg_inbox0 = dmub->reg_inbox0;
+ struct dmub_srv_inbox scratch_inbox1 = dmub->inbox1;
+ const volatile struct dmub_srv_inbox *reg_inbox0 = &dmub->reg_inbox0;
+ const volatile struct dmub_srv_inbox *inbox1 = &dmub->inbox1;
+
+ if (!dmub->hw_init ||
+ !dmub->hw_funcs.get_inbox1_wptr)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i <= timeout_us; i += polling_interval_us) {
+ scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
+ scratch_inbox1.rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+ scratch_reg_inbox0.is_pending = scratch_reg_inbox0.is_pending &&
+ dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
+ !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
+
+ if (scratch_inbox1.rb.rptr > dmub->inbox1.rb.capacity)
+ return DMUB_STATUS_HW_FAILURE;
+
+ /* check current HW state first, but use command submission vs reported as a fallback */
+ if ((dmub_rb_empty(&scratch_inbox1.rb) ||
+ inbox1->num_reported >= scratch_inbox1.num_submitted) &&
+ (!scratch_reg_inbox0.is_pending ||
+ reg_inbox0->num_reported >= scratch_reg_inbox0.num_submitted))
+ return DMUB_STATUS_OK;
+
+ udelay(polling_interval_us);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us)
{
- uint32_t i, rptr;
+ enum dmub_status status;
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
- for (i = 0; i <= timeout_us; ++i) {
- rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ for (i = 0; i < timeout_us; i += polling_interval_us) {
+ status = dmub_srv_update_inbox_status(dmub);
- if (rptr > dmub->inbox1_rb.capacity)
- return DMUB_STATUS_HW_FAILURE;
+ if (status != DMUB_STATUS_OK)
+ return status;
- dmub->inbox1_rb.rptr = rptr;
-
- if (dmub_rb_empty(&dmub->inbox1_rb))
+ /* check for idle */
+ if (dmub_rb_empty(&dmub->inbox1.rb) && !dmub->reg_inbox0.is_pending)
return DMUB_STATUS_OK;
- udelay(1);
+ udelay(polling_interval_us);
}
return DMUB_STATUS_TIMEOUT;
@@ -1040,35 +1107,6 @@ enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd)
-{
- enum dmub_status status = DMUB_STATUS_OK;
-
- // Queue command
- status = dmub_srv_cmd_queue(dmub, cmd);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Execute command
- status = dmub_srv_cmd_execute(dmub);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Wait for DMUB to process command
- status = dmub_srv_wait_for_idle(dmub, 100000);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Copy data back from ring buffer into command
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd);
-
- return status;
-}
-
static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb,
void *entry)
{
@@ -1160,47 +1198,162 @@ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_
}
}
+void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
+{
+ if (!dmub || !dmub->hw_init)
+ return;
+
+ dmub->power_state = dmub_srv_power_state;
+}
-enum dmub_status dmub_srv_send_reg_inbox0_cmd(
- struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd,
- bool with_reply, uint32_t timeout_us)
+enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd)
{
- uint32_t rsp_ready = 0;
- uint32_t i;
+ uint32_t num_pending = 0;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+ if (dmub->power_state != DMUB_POWER_STATE_D0)
+ return DMUB_STATUS_POWER_STATE_D3;
+
+ if (!dmub->hw_funcs.send_reg_inbox0_cmd_msg ||
+ !dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->reg_inbox0.num_submitted >= dmub->reg_inbox0.num_reported)
+ num_pending = dmub->reg_inbox0.num_submitted - dmub->reg_inbox0.num_reported;
+ else
+ /* num_submitted wrapped */
+ num_pending = DMUB_REG_INBOX0_RB_MAX_ENTRY -
+ (dmub->reg_inbox0.num_reported - dmub->reg_inbox0.num_submitted);
+
+ if (num_pending >= DMUB_REG_INBOX0_RB_MAX_ENTRY)
+ return DMUB_STATUS_QUEUE_FULL;
+
+ /* clear last rsp ack and send message */
+ dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack(dmub);
dmub->hw_funcs.send_reg_inbox0_cmd_msg(dmub, cmd);
- for (i = 0; i < timeout_us; i++) {
- rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
- if (rsp_ready)
- break;
- udelay(1);
+ dmub->reg_inbox0.num_submitted++;
+ dmub->reg_inbox0.is_pending = true;
+ dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending;
+
+ return DMUB_STATUS_OK;
+}
+
+void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
+ union dmub_rb_cmd *cmd_rsp)
+{
+ if (dmub) {
+ if (dmub->inbox_type == DMUB_CMD_INTERFACE_REG &&
+ dmub->hw_funcs.read_reg_inbox0_cmd_rsp) {
+ dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd_rsp);
+ } else {
+ dmub_rb_get_return_data(&dmub->inbox1.rb, cmd_rsp);
+ }
}
- if (rsp_ready == 0)
- return DMUB_STATUS_TIMEOUT;
+}
+
+static enum dmub_status dmub_srv_sync_reg_inbox0(struct dmub_srv *dmub)
+{
+ if (!dmub || !dmub->sw_init)
+ return DMUB_STATUS_INVALID;
- if (with_reply)
- dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd);
+ dmub->reg_inbox0.is_pending = 0;
+ dmub->reg_inbox0.is_multi_pending = 0;
- dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
+ return DMUB_STATUS_OK;
+}
- /* wait for rsp int status is cleared to initial state before exit */
- for (; i <= timeout_us; i++) {
- rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
- if (rsp_ready == 0)
- break;
- udelay(1);
+static enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
+{
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
+ uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
+
+ if (rptr > dmub->inbox1.rb.capacity || wptr > dmub->inbox1.rb.capacity) {
+ return DMUB_STATUS_HW_FAILURE;
+ } else {
+ dmub->inbox1.rb.rptr = rptr;
+ dmub->inbox1.rb.wrpt = wptr;
+ dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
+ }
}
- ASSERT(rsp_ready == 0);
return DMUB_STATUS_OK;
}
-void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
+enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub)
{
- if (!dmub || !dmub->hw_init)
- return;
+ enum dmub_status status;
- dmub->power_state = dmub_srv_power_state;
+ status = dmub_srv_sync_reg_inbox0(dmub);
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ status = dmub_srv_sync_inbox1(dmub);
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
+ uint32_t timeout_us,
+ uint32_t num_free_required)
+{
+ enum dmub_status status;
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i < timeout_us; i += polling_interval_us) {
+ status = dmub_srv_update_inbox_status(dmub);
+
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ /* check for space in inbox1 */
+ if (dmub_rb_num_free(&dmub->inbox1.rb) >= num_free_required)
+ return DMUB_STATUS_OK;
+
+ udelay(polling_interval_us);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub)
+{
+ uint32_t rptr;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->power_state != DMUB_POWER_STATE_D0)
+ return DMUB_STATUS_POWER_STATE_D3;
+
+ /* update inbox1 state */
+ rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+ if (rptr > dmub->inbox1.rb.capacity)
+ return DMUB_STATUS_HW_FAILURE;
+
+ if (dmub->inbox1.rb.rptr > rptr) {
+ /* rb wrapped */
+ dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
+ } else {
+ dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
+ }
+ dmub->inbox1.rb.rptr = rptr;
+
+ /* update reg_inbox0 */
+ dmub_srv_update_reg_inbox0_status(dmub);
+
+ return DMUB_STATUS_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index cce887cefc01..567c5b1aeb7a 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -95,23 +95,6 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
case DMUB_OUT_CMD__DPIA_NOTIFICATION:
notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION;
notify->link_index = cmd.dpia_notification.payload.header.instance;
-
- if (cmd.dpia_notification.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) {
-
- notify->dpia_notification.payload.data.dpia_bw_alloc.estimated_bw =
- cmd.dpia_notification.payload.data.dpia_bw_alloc.estimated_bw;
- notify->dpia_notification.payload.data.dpia_bw_alloc.allocated_bw =
- cmd.dpia_notification.payload.data.dpia_bw_alloc.allocated_bw;
-
- if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_failed)
- notify->result = DPIA_BW_REQ_FAILED;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_succeeded)
- notify->result = DPIA_BW_REQ_SUCCESS;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.est_bw_changed)
- notify->result = DPIA_EST_BW_CHANGED;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed)
- notify->result = DPIA_BW_ALLOC_CAPS_CHANGED;
- }
break;
case DMUB_OUT_CMD__HPD_SENSE_NOTIFY:
notify->type = DMUB_NOTIFICATION_HPD_SENSE_NOTIFY;
@@ -119,6 +102,10 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
&cmd.hpd_sense_notify.data,
sizeof(cmd.hpd_sense_notify.data));
break;
+ case DMUB_OUT_CMD__FUSED_IO:
+ notify->type = DMUB_NOTIFICATION_FUSED_IO;
+ dmub_memcpy(&notify->fused_request, &cmd.fused_io.request, sizeof(cmd.fused_io.request));
+ break;
default:
notify->type = DMUB_NOTIFICATION_NO_DATA;
break;
diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
index 7e3240e73c1f..63813009a3a6 100644
--- a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
+++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
@@ -86,6 +86,9 @@ enum dc_irq_source dal_irq_get_source(
enum dc_irq_source dal_irq_get_rx_source(
const struct gpio *irq);
+enum dc_irq_source dal_irq_get_read_request(
+ const struct gpio *irq);
+
enum gpio_result dal_irq_setup_hpd_filter(
struct gpio *irq,
struct gpio_hpd_config *config);
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 1867aac57cf2..da74ed66c8f9 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -89,6 +89,8 @@ struct link_training_settings {
bool enhanced_framing;
enum lttpr_mode lttpr_mode;
+ bool lttpr_early_tps2;
+
/* disallow different lanes to have different lane settings */
bool disallow_per_lane_settings;
/* dpcd lane settings will always use the same hw lane settings
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 55c7d873175f..a37634942b07 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -386,6 +386,7 @@ enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp);
/* hdcp version helpers */
static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 1d41dd58f6bc..bb8ae80b37f8 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -452,21 +452,12 @@ out:
return status;
}
-static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
+static enum mod_hdcp_status locality_check_sw(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
- if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
- event_ctx->unexpected_event = 1;
- goto out;
- }
-
- if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init,
- &input->lc_init_prepare, &status,
- hdcp, "lc_init_prepare"))
- goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
&input->lc_init_write, &status,
hdcp, "lc_init_write"))
@@ -482,6 +473,48 @@ static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
&input->l_prime_read, &status,
hdcp, "l_prime_read"))
goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status locality_check_fw(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw,
+ &input->l_prime_read, &status,
+ hdcp, "l_prime_read"))
+ goto out;
+
+out:
+ return status;
+}
+
+static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c
+ && hdcp->config.ddc.funcs.atomic_write_poll_read_aux
+ && !hdcp->connection.link.adjust.hdcp2.force_sw_locality_check;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init,
+ &input->lc_init_prepare, &status,
+ hdcp, "lc_init_prepare"))
+ goto out;
+
+ status = (use_fw ? locality_check_fw : locality_check_sw)(hdcp, event_ctx, input);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime,
&input->l_prime_validation, &status,
hdcp, "l_prime_validation"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index c5f6c11de7e5..89ffb89e1932 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -184,17 +184,28 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
break;
- case H2_A2_LOCALITY_CHECK:
+ case H2_A2_LOCALITY_CHECK: {
+ const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c
+ && !adjust->hdcp2.force_sw_locality_check;
+
+ /*
+ * 1A-05: consider disconnection after LC init a failure
+ * 1A-13-1: consider invalid l' a failure
+ * 1A-13-2: consider l' timeout a failure
+ */
if (hdcp->state.stay_count > 10 ||
input->lc_init_prepare != PASS ||
- input->lc_init_write != PASS ||
- input->l_prime_available_poll != PASS ||
- input->l_prime_read != PASS) {
- /*
- * 1A-05: consider disconnection after LC init a failure
- * 1A-13-1: consider invalid l' a failure
- * 1A-13-2: consider l' timeout a failure
- */
+ (!use_fw && input->lc_init_write != PASS) ||
+ (!use_fw && input->l_prime_available_poll != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->l_prime_read != PASS) {
+ if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) {
+ adjust->hdcp2.force_sw_locality_check = true;
+ callback_in_ms(0, output);
+ break;
+ }
+
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->l_prime_validation != PASS) {
@@ -205,6 +216,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
+ }
case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
@@ -498,14 +510,25 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
break;
- case D2_A2_LOCALITY_CHECK:
+ case D2_A2_LOCALITY_CHECK: {
+ const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_aux
+ && !adjust->hdcp2.force_sw_locality_check;
+
if (hdcp->state.stay_count > 10 ||
input->lc_init_prepare != PASS ||
- input->lc_init_write != PASS ||
- input->l_prime_read != PASS) {
+ (!use_fw && input->lc_init_write != PASS)) {
/* 1A-12: consider invalid l' a failure */
fail_and_restart_in_ms(0, &status, output);
break;
+ } else if (input->l_prime_read != PASS) {
+ if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) {
+ adjust->hdcp2.force_sw_locality_check = true;
+ callback_in_ms(0, output);
+ break;
+ }
+
+ fail_and_restart_in_ms(0, &status, output);
+ break;
} else if (input->l_prime_validation != PASS) {
callback_in_ms(0, output);
increment_stay_counter(hdcp);
@@ -514,6 +537,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
+ }
case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index 6e064e6ae949..2e6408579194 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -688,3 +688,76 @@ enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_INVALID_OPERATION;
}
+
+static bool write_stall_read_lc_fw_aux(struct mod_hdcp *hdcp)
+{
+ struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2;
+
+ struct mod_hdcp_atomic_op_aux write = {
+ hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT],
+ hdcp2->lc_init + 1,
+ sizeof(hdcp2->lc_init) - 1,
+ };
+ struct mod_hdcp_atomic_op_aux stall = { 0, NULL, 0, };
+ struct mod_hdcp_atomic_op_aux read = {
+ hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME],
+ hdcp2->lc_l_prime + 1,
+ sizeof(hdcp2->lc_l_prime) - 1,
+ };
+
+ hdcp2->lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME;
+
+ return hdcp->config.ddc.funcs.atomic_write_poll_read_aux(
+ hdcp->config.ddc.handle,
+ &write,
+ &stall,
+ &read,
+ 16 * 1000,
+ 0
+ );
+}
+
+static bool write_poll_read_lc_fw_i2c(struct mod_hdcp *hdcp)
+{
+ struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2;
+ uint8_t expected_rxstatus[2] = { sizeof(hdcp2->lc_l_prime) };
+
+ hdcp->buf[0] = hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT];
+ memmove(&hdcp->buf[1], hdcp2->lc_init, sizeof(hdcp2->lc_init));
+
+ struct mod_hdcp_atomic_op_i2c write = {
+ HDCP_I2C_ADDR,
+ 0,
+ hdcp->buf,
+ sizeof(hdcp2->lc_init) + 1,
+ };
+ struct mod_hdcp_atomic_op_i2c poll = {
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_RXSTATUS],
+ expected_rxstatus,
+ sizeof(expected_rxstatus),
+ };
+ struct mod_hdcp_atomic_op_i2c read = {
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME],
+ hdcp2->lc_l_prime,
+ sizeof(hdcp2->lc_l_prime),
+ };
+
+ return hdcp->config.ddc.funcs.atomic_write_poll_read_i2c(
+ hdcp->config.ddc.handle,
+ &write,
+ &poll,
+ &read,
+ 20 * 1000,
+ 6
+ );
+}
+
+enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp)
+{
+ const bool success = (is_dp_hdcp(hdcp) ? write_stall_read_lc_fw_aux : write_poll_read_lc_fw_i2c)(hdcp);
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index a4d344a4db9e..c42468bb70ac 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -133,9 +133,22 @@ enum mod_hdcp_display_disable_option {
MOD_HDCP_DISPLAY_DISABLE_ENCRYPTION,
};
+struct mod_hdcp_atomic_op_i2c {
+ uint8_t address;
+ uint8_t offset;
+ uint8_t *data;
+ uint32_t size;
+};
+
+struct mod_hdcp_atomic_op_aux {
+ uint32_t address;
+ uint8_t *data;
+ uint32_t size;
+};
+
struct mod_hdcp_ddc {
void *handle;
- struct {
+ struct mod_hdcp_ddc_funcs {
bool (*read_i2c)(void *handle,
uint32_t address,
uint8_t offset,
@@ -153,6 +166,22 @@ struct mod_hdcp_ddc {
uint32_t address,
const uint8_t *data,
uint32_t size);
+ bool (*atomic_write_poll_read_i2c)(
+ void *handle,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+ );
+ bool (*atomic_write_poll_read_aux)(
+ void *handle,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+ );
} funcs;
};
@@ -185,7 +214,8 @@ struct mod_hdcp_link_adjustment_hdcp2 {
uint8_t force_type : 2;
uint8_t force_no_stored_km : 1;
uint8_t increase_h_prime_timeout: 1;
- uint8_t reserved : 3;
+ uint8_t force_sw_locality_check : 1;
+ uint8_t reserved : 2;
};
struct mod_hdcp_link_adjustment {
@@ -272,6 +302,10 @@ struct mod_hdcp_display_query {
struct mod_hdcp_config {
struct mod_hdcp_psp psp;
struct mod_hdcp_ddc ddc;
+ struct {
+ uint8_t lc_enable_sw_fallback : 1;
+ uint8_t reserved : 7;
+ } debug;
uint8_t index;
};
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 4c95b885d1d0..c8eccee9b023 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -366,7 +366,7 @@ enum DC_DEBUG_MASK {
DC_HDCP_LC_FORCE_FW_ENABLE = 0x80000,
/**
- * @DC_HDCP_LC_ENABLE_SW_FALLBACK If set, upon HDCP Locality Check FW
+ * @DC_HDCP_LC_ENABLE_SW_FALLBACK: If set, upon HDCP Locality Check FW
* path failure, retry using legacy SW path.
*/
DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
index bd8085ec54ed..2d6a598a6c25 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
@@ -5242,6 +5242,8 @@
#define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT 0x0000000c
#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE_MASK 0x00000003L
#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT 0x00000000
+#define DEGAMMA_CONTROL__ICON_DEGAMMA_MODE_MASK 0x00000300L
+#define DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT 0x00000008
#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE_MASK 0x00000030L
#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT 0x00000004
#define DENORM_CONTROL__DENORM_MODE_MASK 0x00000007L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
index c75aee25619e..6f44345277af 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
@@ -1779,6 +1779,8 @@
#define mmRLC_TTOP_D 0x3105
#define mmRLC_CLEAR_STATE_RESTORE_BASE 0x30C8
#define mmRLC_PG_AO_CU_MASK 0x310B
+#define mmSPI_STATIC_THREAD_MGMT_1 0x2438
+#define mmSPI_STATIC_THREAD_MGMT_2 0x2439
#define mmSPI_STATIC_THREAD_MGMT_3 0x243A
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
index edc8a793a95d..4dd386b98748 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
@@ -234,6 +234,26 @@
#define mmIH_RB_WPTR_ADDR_HI 0x0F84
#define mmIH_RB_WPTR_ADDR_LO 0x0F85
#define mmIH_STATUS 0x0F88
+
+#define mmDMA_GFX_RB_CNTL 0x3400
+#define mmDMA_GFX_RB_BASE 0x3401
+#define mmDMA_GFX_RB_RPTR 0x3402
+#define mmDMA_GFX_RB_WPTR 0x3403
+#define mmDMA_GFX_RB_RPTR_ADDR_HI 0x3407
+#define mmDMA_GFX_RB_RPTR_ADDR_LO 0x3408
+#define mmDMA_GFX_IB_CNTL 0x3409
+#define mmDMA_GFX_IB_RPTR 0x340a
+#define mmDMA_CNTL 0x340b
+#define mmDMA_STATUS_REG 0x340D
+#define mmDMA_TILING_CONFIG 0x342E
+#define mmDMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411
+#define mmDMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412
+#define mmDMA_POWER_CNTL 0x342F
+#define mmDMA_CLK_CTRL 0x3430
+#define mmDMA_PG 0x3435
+#define mmDMA_PGFSM_CONFIG 0x3436
+#define mmDMA_PGFSM_WRITE 0x3437
+
#define mmSEM_MAILBOX 0x0F9B
#define mmSEM_MAILBOX_CLIENTCONFIG 0x0F9A
#define mmSEM_MAILBOX_CONTROL 0x0F9C
@@ -269,7 +289,4 @@
#define mmVCE_CONFIG 0x0F94
#define mmXDMA_MSTR_MEM_OVERFLOW_CNTL 0x03F8
-/* from the old sid.h */
-#define mmDMA_TILING_CONFIG 0x342E
-
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
index 1c540fe136cb..9f7fc2428b69 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
@@ -823,6 +823,43 @@
#define LX3__RESERVED__SHIFT 0x00000000
#define RINGOSC_MASK__MASK_MASK 0x0000ffffL
#define RINGOSC_MASK__MASK__SHIFT 0x00000000
+
+#define DMA_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define DMA_CNTL__TRAP_ENABLE__SHIFT 0x00000000
+#define DMA_CNTL__SEM_INCOMPLETE_INT_ENABLE_MASK 0x00000002L
+#define DMA_CNTL__SEM_INCOMPLETE_INT_ENABLE__SHIFT 0x00000001
+#define DMA_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define DMA_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x00000002
+#define DMA_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define DMA_CNTL__DATA_SWAP_ENABLE__SHIFT 0x00000003
+#define DMA_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define DMA_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x00000004
+#define DMA_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define DMA_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x0000001C
+#define DMA_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define DMA_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x00000000
+#define DMA_GFX_RB_CNTL__RB_SIZE__SHIFT 0x00000001
+#define DMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define DMA_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x00000009
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0x0000000C
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0x0000000D
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x00000010
+#define DMA_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define DMA_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x00000000
+#define DMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define DMA_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x00000004
+#define DMA_GFX_IB_CNTL__CMD_VMID_FORCE_MASK 0x80000000L
+#define DMA_GFX_IB_CNTL__CMD_VMID_FORCE__SHIFT 0x0000001F
+
+#define DMA_STATUS_REG__IDLE_MASK 0x00000001L
+#define DMA_STATUS_REG__IDLE__SHIFT 0x00000000
+#define DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define DMA_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x00000008
+#define DMA_PG__PG_CNTL_ENABLE_MASK 0x00000001L
+#define DMA_PG__PG_CNTL_ENABLE__SHIFT 0x00000000
+
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0_MASK 0x00000007L
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0__SHIFT 0x00000000
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1_MASK 0x00000038L
@@ -1015,6 +1052,10 @@
#define SRBM_STATUS2__VCE_BUSY__SHIFT 0x00000007
#define SRBM_STATUS2__VCE_RQ_PENDING_MASK 0x00000008L
#define SRBM_STATUS2__VCE_RQ_PENDING__SHIFT 0x00000003
+#define SRBM_STATUS2__DMA_BUSY_MASK 0x00000020L
+#define SRBM_STATUS2__DMA_BUSY__SHIFT 0x00000005
+#define SRBM_STATUS2__DMA1_BUSY_MASK 0x00000040L
+#define SRBM_STATUS2__DMA1_BUSY__SHIFT 0x00000006
#define SRBM_STATUS2__XDMA_BUSY_MASK 0x00000100L
#define SRBM_STATUS2__XDMA_BUSY__SHIFT 0x00000008
#define SRBM_STATUS2__XSP_BUSY_MASK 0x00000010L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
index 6b10be61efc3..bdef1f743df7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
@@ -41,7 +41,49 @@
#define ixLCAC_MC5_CNTL 0x012B
#define ixLCAC_MC5_OVR_SEL 0x012C
#define ixLCAC_MC5_OVR_VAL 0x012D
+
+#define mmCG_SPLL_FUNC_CNTL 0x0180
+#define mmCG_SPLL_FUNC_CNTL_2 0x0181
+#define mmCG_SPLL_FUNC_CNTL_3 0x0182
+#define mmCG_SPLL_FUNC_CNTL_4 0x0183
+#define mmCG_SPLL_STATUS 0x0185
+#define mmSPLL_CNTL_MODE 0x0186
+#define mmCG_SPLL_SPREAD_SPECTRUM 0x0188
+#define mmCG_SPLL_SPREAD_SPECTRUM_2 0x0189
+#define mmCG_SPLL_AUTOSCALE_CNTL 0x018B
+#define mmMPLL_BYPASSCLK_SEL 0x0197
+#define mmCG_CLKPIN_CNTL 0x0198
+#define mmCG_CLKPIN_CNTL_2 0x0199
+#define mmTHM_CLK_CNTL 0x019B
+#define mmMISC_CLK_CNTL 0x019C
+#define mmCG_THERMAL_CTRL 0x01C0
+#define mmCG_THERMAL_STATUS 0x01C1
+#define mmCG_THERMAL_INT 0x01C2
+#define mmCG_MULT_THERMAL_CTRL 0x01C4
+#define mmCG_MULT_THERMAL_STATUS 0x01C5
+#define mmCG_FDO_CTRL0 0x01D5
+#define mmCG_FDO_CTRL1 0x01D6
+#define mmCG_FDO_CTRL2 0x01D7
+#define mmCG_TACH_CTRL 0x01DC
+#define mmCG_TACH_STATUS 0x01DD
+#define mmGENERAL_PWRMGT 0x1E0
+#define mmCG_TPC 0x1E1
+#define mmSCLK_PWRMGT_CNTL 0x1E2
+#define mmTARGET_AND_CURRENT_PROFILE_INDEX 0x01E6
+#define mmCG_FTV 0x01EF
+#define mmCG_FFCT_0 0x01F0
+#define mmCG_BSP 0x01FF
+#define mmCG_AT 0x0200
+#define mmCG_GIT 0x0201
+#define mmCG_SSP 0x0203
+#define mmCG_DISPLAY_GAP_CNTL 0x020A
+#define mmCG_ULV_CONTROL 0x021E
+#define mmCG_ULV_PARAMETER 0x021F
+#define mmSMC_SCRATCH0 0x0221
+#define mmCG_CAC_CTRL 0x022E
+
#define ixSMC_PC_C 0x80000370
+
#define ixTHM_TMON0_DEBUG 0x03F0
#define ixTHM_TMON0_INT_DATA 0x0380
#define ixTHM_TMON0_RDIL0_DATA 0x0300
@@ -110,6 +152,7 @@
#define ixTHM_TMON1_RDIR7_DATA 0x0337
#define ixTHM_TMON1_RDIR8_DATA 0x0338
#define ixTHM_TMON1_RDIR9_DATA 0x0339
+
#define mmGPIOPAD_A 0x05E7
#define mmGPIOPAD_EN 0x05E8
#define mmGPIOPAD_EXTERN_TRIG_CNTL 0x05F1
@@ -127,6 +170,7 @@
#define mmGPIOPAD_STRENGTH 0x05E5
#define mmGPIOPAD_SW_INT_STAT 0x05E4
#define mmGPIOPAD_Y 0x05E9
+
#define mmSMC_IND_ACCESS_CNTL 0x008A
#define mmSMC_IND_DATA_0 0x0081
#define mmSMC_IND_DATA 0x0081
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
index 7d3925b7266e..67d3c7e13a48 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
@@ -23,10 +23,142 @@
#ifndef SMU_6_0_SH_MASK_H
#define SMU_6_0_SH_MASK_H
-#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03ffffffL
-#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000
-#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003f0L
+#define CG_AT__CG_R_MASK 0x0000FFFFL
+#define CG_AT__CG_R__SHIFT 0x00000000
+#define CG_AT__CG_L_MASK 0xFFFF0000L
+#define CG_AT__CG_L__SHIFT 0x00000010
+
+#define CG_BSP__BSP_MASK 0x0000FFFFL
+#define CG_BSP__BSP__SHIFT 0x00000000
+#define CG_BSP__BSU_MASK 0x000F0000L
+#define CG_BSP__BSU__SHIFT 0x00000010
+
+#define CG_CAC_CTRL__CAC_WINDOW_MASK 0x00FFFFFFL
+#define CG_CAC_CTRL__CAC_WINDOW__SHIFT 0x00000000
+
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x00000002L
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x00000001
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x00000004L
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x00000002
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x00000008L
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x00000003
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x00000100L
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x00000008
+
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK 0x00000003L
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT 0x00000000
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK 0x0000000CL
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT 0x00000002
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x0003FFF0L
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x00000004
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x00700000
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x00000014
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK 0x03000000L
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT 0x00000018
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK 0x0C000000L
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT 0x0000001A
+
+#define CG_FFCT_0__UTC_0_MASK 0x000003FFL
+#define CG_FFCT_0__UTC_0__SHIFT 0x00000000
+#define CG_FFCT_0__DTC_0_MASK 0x000FFC00L
+#define CG_FFCT_0__DTC_0__SHIFT 0x0000000A
+
+#define CG_GIT__CG_GICST_MASK 0x0000FFFFL
+#define CG_GIT__CG_GICST__SHIFT 0x00000000
+#define CG_GIT__CG_GIPOT_MASK 0xFFFF0000L
+#define CG_GIT__CG_GIPOT__SHIFT 0x00000010
+
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x00000001L
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL__SPLL_SLEEP_MASK 0x00000002L
+#define CG_SPLL_FUNC_CNTL__SPLL_SLEEP__SHIFT 0x00000001
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x00000008L
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x00000003
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003F0L
#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x00000004
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x007F00000
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x00000014
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x0000001FF
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x00800000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x00000017
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x04000000
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x0000001A
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03FFFFFFL
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000L
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x0000001C
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x00000002L
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x00000001
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x00000001L
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x00000000
+#define CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK 0x0000FFF0L
+#define CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT 0x00000004
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK 0x00000200L
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT 0x00000000
+#define CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK 0x03FFFFFFL
+#define CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR__SHIFT 0x00000009
+
+#define CG_SSP__SST_MASK 0x0000FFFFL
+#define CG_SSP__SST__SHIFT 0x00000000
+#define CG_SSP__SSTU_MASK 0x000F0000L
+#define CG_SSP__SSTU__SHIFT 0x00000010
+
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK 0x00000007L
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT 0x00000000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM_MASK 0x003FC000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT 0x0000000E
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x0001FE00L
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x00000009
+#define CG_THERMAL_INT__DIG_THERM_INTH_MASK 0x0000FF00L
+#define CG_THERMAL_INT__DIG_THERM_INTH__SHIFT 0x00000008
+#define CG_THERMAL_INT__DIG_THERM_INTL_MASK 0x00FF0000L
+#define CG_THERMAL_INT__DIG_THERM_INTL__SHIFT 0x00000010
+#define CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK 0x01000000L
+#define CG_THERMAL_INT__THERM_INT_MASK_HIGH__SHIFT 0x00000018
+#define CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK 0x02000000
+#define CG_THERMAL_INT__THERM_INT_MASK_LOW__SHIFT 0x00000019
+
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK 0x0FF00000L
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT 0x00000014
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x000001FFL
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x00000000
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x0003fe00L
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x00000009
+
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0x000000FFL
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x00000000
+#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0x000000FFL
+#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x00000000
+#define CG_FDO_CTRL2__TMIN_MASK 0x000000FFL
+#define CG_FDO_CTRL2__TMIN__SHIFT 0x00000000
+#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x00003800L
+#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0x0000000B
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK 0xFE000000L
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT 0x00000019
+
+#define CG_TACH_CTRL__EDGE_PER_REV_MASK 0x00000007L
+#define CG_TACH_CTRL__EDGE_PER_REV__SHIFT 0x00000000
+#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xFFFFFFF8L
+#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x00000003
+#define CG_TACH_STATUS__TACH_PERIOD_MASK 0xFFFFFFFFL
+#define CG_TACH_STATUS__TACH_PERIOD__SHIFT 0x00000000
+
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x00000001L
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x00000000
+#define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x00000002L
+#define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x00000001
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x00000004L
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x00000002
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x00000008L
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x00000003
+#define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x00000040L
+#define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x00000006
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x00000400L
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0x0000000A
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x00800000L
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x00000017
+
#define GPIOPAD_A__GPIO_A_MASK 0x7fffffffL
#define GPIOPAD_A__GPIO_A__SHIFT 0x00000000
#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffffL
@@ -195,6 +327,7 @@
#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x00000000
#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffffL
#define GPIOPAD_Y__GPIO_Y__SHIFT 0x00000000
+
#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x00000001L
#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x00000000
#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x0001fffeL
@@ -243,6 +376,37 @@
#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x00000000
#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffffL
#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x00000000
+
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0x0000FF00L
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x00000008
+
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x00000001L
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x00000000
+#define SCLK_PWRMGT_CNTL__SCLK_LOW_D1_MASK 0x00000002L
+#define SCLK_PWRMGT_CNTL__SCLK_LOW_D1__SHIFT 0x00000001
+#define SCLK_PWRMGT_CNTL__FIR_RESET_MASK 0x00000010L
+#define SCLK_PWRMGT_CNTL__FIR_RESET__SHIFT 0x00000004
+#define SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK 0x00000020L
+#define SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL__SHIFT 0x00000005
+#define SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK 0x00000040L
+#define SCLK_PWRMGT_CNTL__FIR_TREND_MODE__SHIFT 0x00000006
+#define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN_MASK 0x00000080L
+#define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN__SHIFT 0x00000007
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON_MASK 0x00000100L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON__SHIFT 0x00000008
+#define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF_MASK 0x00000200L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF__SHIFT 0x00000009
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF_MASK 0x00000400L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF__SHIFT 0x0000000A
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1_MASK 0x00000800L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1__SHIFT 0x0000000B
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2_MASK 0x00001000L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2__SHIFT 0x0000000C
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3_MASK 0x00002000L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3__SHIFT 0x0000000D
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x00004000L
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0x0000000E
+
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x00000001L
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x00000000
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x00000100L
@@ -285,6 +449,7 @@
#define SMC_RESP_1__SMC_RESP__SHIFT 0x00000000
#define SMC_RESP_2__SMC_RESP_MASK 0xffffffffL
#define SMC_RESP_2__SMC_RESP__SHIFT 0x00000000
+
#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0x000ff000L
#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0x0000000c
#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x00000010L
@@ -293,6 +458,8 @@
#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x00000003
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x00000002L
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x00000001
+#define SPLL_CNTL_MODE__SPLL_REFCLK_SEL_MASK 0x0C000000L
+#define SPLL_CNTL_MODE__SPLL_REFCLK_SEL__SHIFT 0x0000001A
#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000L
#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x0000001c
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x00000001L
@@ -303,10 +470,25 @@
#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x00000002
#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000L
#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x0000001d
+
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0x0f000000L
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x00000018
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000L
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x0000001c
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK 0x000000F0L
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT 0x00000004
+
+#define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0x000000FFL
+#define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x00000000
+#define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0x0000FF00L
+#define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x00000008
+
+#define MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK 0x000000FFL
+#define MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT 0x00000000
+#define MISC_CLK_CNTL__ZCLK_SEL_MASK 0x0000FF00L
+#define MISC_CLK_CNTL__ZCLK_SEL__SHIFT 0x00000008
+
#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x0000001fL
#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x00000000
#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0x0000ffe0L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
index 14574112c469..c4aaa86a95e2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
@@ -1147,6 +1147,22 @@
#define regUVD_DPG_LMA_CTL2_BASE_IDX 1
+// addressBlock: uvd_mmsch_dec
+// base address: 0x20d2c
+#define regMMSCH_VF_VMID 0x054b
+#define regMMSCH_VF_VMID_BASE_IDX 1
+#define regMMSCH_VF_CTX_ADDR_LO 0x054c
+#define regMMSCH_VF_CTX_ADDR_LO_BASE_IDX 1
+#define regMMSCH_VF_CTX_ADDR_HI 0x054d
+#define regMMSCH_VF_CTX_ADDR_HI_BASE_IDX 1
+#define regMMSCH_VF_CTX_SIZE 0x054e
+#define regMMSCH_VF_CTX_SIZE_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_HOST 0x0552
+#define regMMSCH_VF_MAILBOX_HOST_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_RESP 0x0553
+#define regMMSCH_VF_MAILBOX_RESP_BASE_IDX 1
+
+
// addressBlock: uvd_vcn_umsch_dec
// base address: 0x21500
#define regVCN_UMSCH_MES_CNTL 0x0740
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
index 5c119a6b87fb..bd7242e4e9c6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
@@ -5929,6 +5929,29 @@
#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR_MASK 0x0000FE00L
+// addressBlock: uvd_mmsch_dec
+//MMSCH_VF_VMID
+#define MMSCH_VF_VMID__VF_CTX_VMID__SHIFT 0x0
+#define MMSCH_VF_VMID__VF_GPCOM_VMID__SHIFT 0x5
+#define MMSCH_VF_VMID__VF_CTX_VMID_MASK 0x0000001FL
+#define MMSCH_VF_VMID__VF_GPCOM_VMID_MASK 0x000003E0L
+//MMSCH_VF_CTX_ADDR_LO
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO__SHIFT 0x6
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO_MASK 0xFFFFFFC0L
+//MMSCH_VF_CTX_ADDR_HI
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI__SHIFT 0x0
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI_MASK 0xFFFFFFFFL
+//MMSCH_VF_CTX_SIZE
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE__SHIFT 0x0
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_HOST
+#define MMSCH_VF_MAILBOX_HOST__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_HOST__DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_RESP
+#define MMSCH_VF_MAILBOX_RESP__RESP__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL
+
+
// addressBlock: uvd_vcn_umsch_dec
//VCN_UMSCH_MES_CNTL
#define VCN_UMSCH_MES_CNTL__PIPE_ID__SHIFT 0x0
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index b78360a71bc9..52bac19fb404 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -4308,7 +4308,7 @@ typedef struct _ATOM_DPCD_INFO
// note2: From RV770, the memory is more than 32bit addressable, so we will change
// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
-// (in offset to start of memory address) is KB aligned instead of byte aligend.
+// (in offset to start of memory address) is KB aligned instead of byte aligned.
// Note3:
/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged
constant across VGA or non VGA adapter,
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 0160d65f3f5e..2d1135bdc4b9 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -183,6 +183,7 @@ enum atom_dgpu_vram_type {
ATOM_DGPU_VRAM_TYPE_HBM2E = 0x61,
ATOM_DGPU_VRAM_TYPE_GDDR6 = 0x70,
ATOM_DGPU_VRAM_TYPE_HBM3 = 0x80,
+ ATOM_DGPU_VRAM_TYPE_HBM3E = 0x81,
};
enum atom_dp_vs_preemph_def{
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 2a9606118d89..0f7542d7074b 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -128,6 +128,7 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_CPU_CLK,
AMDGPU_PP_SENSOR_VDDNB,
AMDGPU_PP_SENSOR_VDDGFX,
+ AMDGPU_PP_SENSOR_VDDBOARD,
AMDGPU_PP_SENSOR_UVD_VCLK,
AMDGPU_PP_SENSOR_UVD_DCLK,
AMDGPU_PP_SENSOR_VCE_ECCLK,
@@ -429,6 +430,7 @@ struct amd_pm_funcs {
int (*set_pp_table)(void *handle, const char *buf, size_t size);
void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
+ int (*pause_power_profile)(void *handle, bool pause);
/* export to amdgpu */
struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
diff --git a/drivers/gpu/drm/amd/include/v11_structs.h b/drivers/gpu/drm/amd/include/v11_structs.h
index f8008270f813..3728389fc3be 100644
--- a/drivers/gpu/drm/amd/include/v11_structs.h
+++ b/drivers/gpu/drm/amd/include/v11_structs.h
@@ -535,8 +535,8 @@ struct v11_gfx_mqd {
uint32_t reserved_507; // offset: 507 (0x1FB)
uint32_t reserved_508; // offset: 508 (0x1FC)
uint32_t reserved_509; // offset: 509 (0x1FD)
- uint32_t reserved_510; // offset: 510 (0x1FE)
- uint32_t reserved_511; // offset: 511 (0x1FF)
+ uint32_t fence_address_lo; // offset: 510 (0x1FE)
+ uint32_t fence_address_hi; // offset: 511 (0x1FF)
};
struct v11_sdma_mqd {
@@ -1118,8 +1118,8 @@ struct v11_compute_mqd {
uint32_t reserved_443; // offset: 443 (0x1BB)
uint32_t reserved_444; // offset: 444 (0x1BC)
uint32_t reserved_445; // offset: 445 (0x1BD)
- uint32_t reserved_446; // offset: 446 (0x1BE)
- uint32_t reserved_447; // offset: 447 (0x1BF)
+ uint32_t fence_address_lo; // offset: 446 (0x1BE)
+ uint32_t fence_address_hi; // offset: 447 (0x1BF)
uint32_t gws_0_val; // offset: 448 (0x1C0)
uint32_t gws_1_val; // offset: 449 (0x1C1)
uint32_t gws_2_val; // offset: 450 (0x1C2)
diff --git a/drivers/gpu/drm/amd/include/v12_structs.h b/drivers/gpu/drm/amd/include/v12_structs.h
index 5eabab611b02..03a35f8a65b0 100644
--- a/drivers/gpu/drm/amd/include/v12_structs.h
+++ b/drivers/gpu/drm/amd/include/v12_structs.h
@@ -535,8 +535,8 @@ struct v12_gfx_mqd {
uint32_t reserved_507; // offset: 507 (0x1FB)
uint32_t reserved_508; // offset: 508 (0x1FC)
uint32_t reserved_509; // offset: 509 (0x1FD)
- uint32_t reserved_510; // offset: 510 (0x1FE)
- uint32_t reserved_511; // offset: 511 (0x1FF)
+ uint32_t fence_address_lo; // offset: 510 (0x1FE)
+ uint32_t fence_address_hi; // offset: 511 (0x1FF)
};
struct v12_sdma_mqd {
@@ -1118,8 +1118,8 @@ struct v12_compute_mqd {
uint32_t reserved_443; // offset: 443 (0x1BB)
uint32_t reserved_444; // offset: 444 (0x1BC)
uint32_t reserved_445; // offset: 445 (0x1BD)
- uint32_t reserved_446; // offset: 446 (0x1BE)
- uint32_t reserved_447; // offset: 447 (0x1BF)
+ uint32_t fence_address_lo; // offset: 446 (0x1BE)
+ uint32_t fence_address_hi; // offset: 447 (0x1BF)
uint32_t gws_0_val; // offset: 448 (0x1C0)
uint32_t gws_1_val; // offset: 449 (0x1C1)
uint32_t gws_2_val; // offset: 450 (0x1C2)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 81e9b443ca0a..2148c8db5a59 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -329,6 +329,34 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
return ret;
}
+bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool support_link_reset = false;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ support_link_reset = smu_link_reset_is_support(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return support_link_reset;
+}
+
+int amdgpu_dpm_link_reset(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = -EOPNOTSUPP;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_link_reset(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en)
@@ -349,6 +377,25 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
return ret;
}
+int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
+ bool pause)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (pp_funcs && pp_funcs->pause_power_profile) {
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->pause_power_profile(
+ adev->powerplay.pp_handle, pause);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
uint32_t pstate)
{
@@ -761,6 +808,21 @@ int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
return ret;
}
+int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_vcn(smu, inst_mask);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 922def51685b..edd9895b46c0 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1606,7 +1606,6 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
long throttling_logging_interval;
- unsigned long flags;
int ret = 0;
ret = kstrtol(buf, 0, &throttling_logging_interval);
@@ -1617,18 +1616,12 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
return -EINVAL;
if (throttling_logging_interval > 0) {
- raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
/*
* Reset the ratelimit timer internals.
* This can effectively restart the timer.
*/
- adev->throttling_logging_rs.interval =
- (throttling_logging_interval - 1) * HZ;
- adev->throttling_logging_rs.begin = 0;
- adev->throttling_logging_rs.printed = 0;
- adev->throttling_logging_rs.missed = 0;
- raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
-
+ ratelimit_state_reset_interval(&adev->throttling_logging_rs,
+ (throttling_logging_interval - 1) * HZ);
atomic_set(&adev->throttling_logging_enabled, 1);
} else {
atomic_set(&adev->throttling_logging_enabled, 0);
@@ -2944,6 +2937,23 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
return sysfs_emit(buf, "%d\n", vddgfx);
}
+static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ u32 vddboard;
+ int r;
+
+ /* get the voltage */
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&vddboard);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%d\n", vddboard);
+}
+
static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -2951,6 +2961,12 @@ static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
return sysfs_emit(buf, "vddgfx\n");
}
+static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "vddboard\n");
+}
static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -3294,6 +3310,8 @@ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0)
static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
@@ -3341,6 +3359,8 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_in0_label.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_label.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_label.dev_attr.attr,
&sensor_dev_attr_power1_average.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
@@ -3492,6 +3512,13 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
+ /* only few boards support vddboard */
+ if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
+ amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&tmp) == -EOPNOTSUPP)
+ return 0;
+
/* no mclk on APUs other than gc 9,4,3*/
if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index f93d287dbf13..2c3c97587dd5 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -410,15 +410,19 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en);
+int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
+ bool pause);
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
+int amdgpu_dpm_link_reset(struct amdgpu_device *adev);
int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev);
int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev);
+bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev);
int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
@@ -605,5 +609,6 @@ ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
enum pp_pm_policy p_type, char *buf);
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 1c25f3023e93..4c0e976004ba 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -30,16 +30,32 @@
#include "amdgpu_atombios.h"
#include "amdgpu_dpm_internal.h"
#include "amd_pcie.h"
-#include "sid.h"
+#include "atom.h"
+#include "gfx_v6_0.h"
#include "r600_dpm.h"
+#include "sid.h"
#include "si_dpm.h"
-#include "atom.h"
#include "../include/pptable.h"
#include <linux/math64.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <legacy_dpm.h>
+#include "bif/bif_3_0_d.h"
+#include "bif/bif_3_0_sh_mask.h"
+
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
+#include"gmc/gmc_6_0_d.h"
+#include"gmc/gmc_6_0_sh_mask.h"
+
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
#define MC_CG_ARB_FREQ_F2 0x0c
@@ -2193,7 +2209,7 @@ static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
if (xclk == 0)
return 0;
- cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
+ cac_window = RREG32(mmCG_CAC_CTRL) & CG_CAC_CTRL__CAC_WINDOW_MASK;
cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
wintime = (cac_window_size * 100) / xclk;
@@ -2489,19 +2505,19 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
if (adev->pm.dpm.sq_ramping_threshold == 0)
return -EINVAL;
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (SQ_POWER_THROTTLE__MAX_POWER_MASK >> SQ_POWER_THROTTLE__MAX_POWER__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (SQ_POWER_THROTTLE__MIN_POWER_MASK >> SQ_POWER_THROTTLE__MIN_POWER__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK >> SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK >> SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK >> SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -2510,14 +2526,17 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
enable_sq_ramping) {
- sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
- sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
- sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
- sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
- sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
+ sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER << SQ_POWER_THROTTLE__MAX_POWER__SHIFT;
+ sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MIN_POWER << SQ_POWER_THROTTLE__MIN_POWER__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA << SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_STI_SIZE << SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_LTI_RATIO << SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT;
} else {
- sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
- sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ sq_power_throttle |= SQ_POWER_THROTTLE__MAX_POWER_MASK |
+ SQ_POWER_THROTTLE__MIN_POWER_MASK;
+ sq_power_throttle2 |= SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |
+ SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |
+ SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
}
smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
@@ -2761,9 +2780,9 @@ static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
if (!cac_tables)
return -ENOMEM;
- reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
- reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
- WREG32(CG_CAC_CTRL, reg);
+ reg = RREG32(mmCG_CAC_CTRL) & ~CG_CAC_CTRL__CAC_WINDOW_MASK;
+ reg |= (si_pi->powertune_data->cac_window << CG_CAC_CTRL__CAC_WINDOW__SHIFT);
+ WREG32(mmCG_CAC_CTRL, reg);
si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
si_pi->dyn_powertune_data.dc_pwr_value =
@@ -2962,10 +2981,10 @@ static int si_init_smc_spll_table(struct amdgpu_device *adev)
ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
if (ret)
break;
- p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
- fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
- clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
- clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
+ p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK) >> CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;
+ fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK) >> CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;
+ clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK) >> CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;
+ clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK) >> CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;
fb_div &= ~0x00001FFF;
fb_div >>= 1;
@@ -3669,10 +3688,10 @@ static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
- tmp = RREG32(MC_ARB_RAMCFG);
- row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
- column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
- bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ row = ((tmp & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT) + 10;
+ column = ((tmp & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT) + 8;
+ bank = ((tmp & MC_ARB_RAMCFG__NOOFBANK_MASK) >> MC_ARB_RAMCFG__NOOFBANK__SHIFT) + 2;
density = (1 << (row + column - 20 + bank)) * width;
@@ -3756,11 +3775,11 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
}
if (want_thermal_protection) {
- WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+ WREG32_P(mmCG_THERMAL_CTRL, dpm_event_src << CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT, ~CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK);
if (pi->thermal_protection)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
} else {
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
}
}
@@ -3785,20 +3804,20 @@ static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
static void si_start_dpm(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);
}
static void si_stop_dpm(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);
}
static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
{
if (enable)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);
else
- WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);
}
@@ -3838,7 +3857,7 @@ static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power
static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
PPSMC_Msg msg, u32 parameter)
{
- WREG32(SMC_SCRATCH0, parameter);
+ WREG32(mmSMC_SCRATCH0, parameter);
return amdgpu_si_send_msg_to_smc(adev, msg);
}
@@ -4023,12 +4042,12 @@ static void si_read_clock_registers(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
- si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
- si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
- si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
- si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
- si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
- si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+ si_pi->clock_registers.cg_spll_func_cntl = RREG32(mmCG_SPLL_FUNC_CNTL);
+ si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(mmCG_SPLL_FUNC_CNTL_3);
+ si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(mmCG_SPLL_FUNC_CNTL_4);
+ si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(mmCG_SPLL_SPREAD_SPECTRUM);
+ si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(mmCG_SPLL_SPREAD_SPECTRUM_2);
si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
@@ -4044,14 +4063,14 @@ static void si_enable_thermal_protection(struct amdgpu_device *adev,
bool enable)
{
if (enable)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
else
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
}
static void si_enable_acpi_power_management(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__STATIC_PM_EN_MASK, ~GENERAL_PWRMGT__STATIC_PM_EN_MASK);
}
#if 0
@@ -4132,9 +4151,9 @@ static void si_program_ds_registers(struct amdgpu_device *adev)
tmp = 0x1;
if (eg_pi->sclk_deep_sleep) {
- WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
- WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
- ~AUTOSCALE_ON_SS_CLEAR);
+ WREG32_P(mmMISC_CLK_CNTL, (tmp << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT), ~MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK);
+ WREG32_P(mmCG_SPLL_AUTOSCALE_CNTL, CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK,
+ ~CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK);
}
}
@@ -4143,18 +4162,18 @@ static void si_program_display_gap(struct amdgpu_device *adev)
u32 tmp, pipe;
int i;
- tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+ tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
if (adev->pm.dpm.new_active_crtc_count > 0)
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
else
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+ tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
if (adev->pm.dpm.new_active_crtc_count > 1)
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
else
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+ tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+ WREG32(mmCG_DISPLAY_GAP_CNTL, tmp);
tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
@@ -4189,10 +4208,10 @@ static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
if (enable) {
if (pi->sclk_ss)
- WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);
} else {
- WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
- WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+ WREG32_P(mmCG_SPLL_SPREAD_SPECTRUM, 0, ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);
}
}
@@ -4214,15 +4233,15 @@ static void si_setup_bsp(struct amdgpu_device *adev)
&pi->pbsu);
- pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
- pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+ pi->dsp = (pi->bsp << CG_BSP__BSP__SHIFT) | (pi->bsu << CG_BSP__BSU__SHIFT);
+ pi->psp = (pi->pbsp << CG_BSP__BSP__SHIFT) | (pi->pbsu << CG_BSP__BSU__SHIFT);
- WREG32(CG_BSP, pi->dsp);
+ WREG32(mmCG_BSP, pi->dsp);
}
static void si_program_git(struct amdgpu_device *adev)
{
- WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
+ WREG32_P(mmCG_GIT, R600_GICST_DFLT << CG_GIT__CG_GICST__SHIFT, ~CG_GIT__CG_GICST_MASK);
}
static void si_program_tp(struct amdgpu_device *adev)
@@ -4231,54 +4250,54 @@ static void si_program_tp(struct amdgpu_device *adev)
enum r600_td td = R600_TD_DFLT;
for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
- WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
+ WREG32(mmCG_FFCT_0 + i, (r600_utc[i] << CG_FFCT_0__UTC_0__SHIFT | r600_dtc[i] << CG_FFCT_0__DTC_0__SHIFT));
if (td == R600_TD_AUTO)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);
else
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);
if (td == R600_TD_UP)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);
if (td == R600_TD_DOWN)
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);
}
static void si_program_tpp(struct amdgpu_device *adev)
{
- WREG32(CG_TPC, R600_TPC_DFLT);
+ WREG32(mmCG_TPC, R600_TPC_DFLT);
}
static void si_program_sstp(struct amdgpu_device *adev)
{
- WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+ WREG32(mmCG_SSP, (R600_SSTU_DFLT << CG_SSP__SSTU__SHIFT| R600_SST_DFLT << CG_SSP__SST__SHIFT));
}
static void si_enable_display_gap(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+ u32 tmp = RREG32(mmCG_DISPLAY_GAP_CNTL);
- tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
- tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
- DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+ tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
+ tmp |= (R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT |
+ R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT);
- tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
- tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
- DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+ tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK);
+ tmp |= (R600_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT |
+ R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT);
+ WREG32(mmCG_DISPLAY_GAP_CNTL, tmp);
}
static void si_program_vc(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
- WREG32(CG_FTV, pi->vrc);
+ WREG32(mmCG_FTV, pi->vrc);
}
static void si_clear_vc(struct amdgpu_device *adev)
{
- WREG32(CG_FTV, 0);
+ WREG32(mmCG_FTV, 0);
}
static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
@@ -4735,7 +4754,7 @@ static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
u32 dram_rows;
u32 dram_refresh_rate;
u32 mc_arb_rfsh_rate;
- u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+ u32 tmp = (RREG32(mmMC_ARB_RAMCFG) & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT;
if (tmp >= 4)
dram_rows = 16384;
@@ -4909,7 +4928,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd);
- reg = CG_R(0xffff) | CG_L(0);
+ reg = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;
table->initialState.level.aT = cpu_to_be32(reg);
table->initialState.level.bSP = cpu_to_be32(pi->dsp);
table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
@@ -4935,10 +4954,13 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
table->initialState.level.dpm2.BelowSafeInc = 0;
table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ reg = SQ_POWER_THROTTLE__MIN_POWER_MASK |
+ SQ_POWER_THROTTLE__MAX_POWER_MASK;
table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |
+ SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |
+ SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
@@ -5057,8 +5079,8 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+ spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= 4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;
table->ACPIState.level.mclk.vDLL_CNTL =
cpu_to_be32(dll_cntl);
@@ -5102,10 +5124,10 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
table->ACPIState.level.dpm2.BelowSafeInc = 0;
table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ reg = SQ_POWER_THROTTLE__MIN_POWER_MASK | SQ_POWER_THROTTLE__MAX_POWER_MASK;
table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK | SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK | SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
@@ -5250,8 +5272,8 @@ static int si_init_smc_table(struct amdgpu_device *adev)
if (ret)
return ret;
- WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
- WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+ WREG32(mmCG_ULV_CONTROL, ulv->cg_ulv_control);
+ WREG32(mmCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
lane_width = amdgpu_get_pcie_lanes(adev);
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
@@ -5294,16 +5316,16 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
do_div(tmp, reference_clock);
fbdiv = (u32) tmp;
- spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
- spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
- spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+ spll_func_cntl &= ~(CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK | CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK);
+ spll_func_cntl |= dividers.ref_div << CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT;
+ spll_func_cntl |= dividers.post_div << CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+ spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= 2 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;
- spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
- spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
- spll_func_cntl_3 |= SPLL_DITHEN;
+ spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;
+ spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
if (pi->sclk_ss) {
struct amdgpu_atom_ss ss;
@@ -5314,12 +5336,12 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
- cg_spll_spread_spectrum &= ~CLK_S_MASK;
- cg_spll_spread_spectrum |= CLK_S(clk_s);
- cg_spll_spread_spectrum |= SSEN;
+ cg_spll_spread_spectrum &= ~CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK;
+ cg_spll_spread_spectrum |= clk_s << CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;
+ cg_spll_spread_spectrum |= CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
- cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
- cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+ cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK;
+ cg_spll_spread_spectrum_2 |= clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;
}
}
@@ -5485,7 +5507,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
if (pi->mclk_stutter_mode_threshold &&
(pl->mclk <= pi->mclk_stutter_mode_threshold) &&
!eg_pi->uvd_enabled &&
- (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+ (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
(adev->pm.dpm.new_active_crtc_count <= 2)) {
level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
}
@@ -5579,7 +5601,7 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
return -EINVAL;
if (state->performance_level_count < 2) {
- a_t = CG_R(0xffff) | CG_L(0);
+ a_t = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;
smc_state->levels[0].aT = cpu_to_be32(a_t);
return 0;
}
@@ -5600,13 +5622,13 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
}
- a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
- a_t |= CG_R(t_l * pi->bsp / 20000);
+ a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_AT__CG_R_MASK;
+ a_t |= (t_l * pi->bsp / 20000) << CG_AT__CG_R__SHIFT;
smc_state->levels[i].aT = cpu_to_be32(a_t);
high_bsp = (i == state->performance_level_count - 2) ?
pi->pbsp : pi->bsp;
- a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
+ a_t = (0xffff) << CG_AT__CG_R__SHIFT | (t_h * high_bsp / 20000) << CG_AT__CG_L__SHIFT;
smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
}
@@ -6180,9 +6202,9 @@ static int si_upload_mc_reg_table(struct amdgpu_device *adev,
static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
{
if (enable)
- WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);
else
- WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);
}
static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
@@ -6204,8 +6226,8 @@ static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
{
u32 speed_cntl;
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
- speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL) & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
+ speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
return (u16)speed_cntl;
}
@@ -6412,21 +6434,21 @@ static void si_dpm_setup_asic(struct amdgpu_device *adev)
static int si_thermal_enable_alert(struct amdgpu_device *adev,
bool enable)
{
- u32 thermal_int = RREG32(CG_THERMAL_INT);
+ u32 thermal_int = RREG32(mmCG_THERMAL_INT);
if (enable) {
PPSMC_Result result;
- thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
- WREG32(CG_THERMAL_INT, thermal_int);
+ thermal_int &= ~(CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK);
+ WREG32(mmCG_THERMAL_INT, thermal_int);
result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
if (result != PPSMC_Result_OK) {
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
return -EINVAL;
}
} else {
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
- WREG32(CG_THERMAL_INT, thermal_int);
+ thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32(mmCG_THERMAL_INT, thermal_int);
}
return 0;
@@ -6447,9 +6469,9 @@ static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
return -EINVAL;
}
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
- WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+ WREG32_P(mmCG_THERMAL_INT, (high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTH_MASK);
+ WREG32_P(mmCG_THERMAL_INT, (low_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTL_MASK);
+ WREG32_P(mmCG_THERMAL_CTRL, (high_temp / 1000) << CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT, ~CG_THERMAL_CTRL__DIG_THERM_DPM_MASK);
adev->pm.dpm.thermal.min_temp = low_temp;
adev->pm.dpm.thermal.max_temp = high_temp;
@@ -6463,20 +6485,20 @@ static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
u32 tmp;
if (si_pi->fan_ctrl_is_in_default_mode) {
- tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+ tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
si_pi->fan_ctrl_default_mode = tmp;
- tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+ tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK) >> CG_FDO_CTRL2__TMIN__SHIFT;
si_pi->t_min = tmp;
si_pi->fan_ctrl_is_in_default_mode = false;
}
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(0);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+ tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(mode);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
}
static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
@@ -6495,7 +6517,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
return 0;
}
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
if (duty100 == 0) {
adev->pm.dpm.fan.ucode_fan_control = false;
@@ -6531,7 +6553,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
reference_clock) / 1600);
fan_table.fdo_max = cpu_to_be16((u16)duty100);
- tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+ tmp = (RREG32(mmCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK) >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
fan_table.temp_src = (uint8_t)tmp;
ret = amdgpu_si_copy_bytes_to_smc(adev,
@@ -6590,8 +6612,8 @@ static int si_dpm_get_fan_speed_pwm(void *handle,
if (adev->pm.no_fan)
return -ENOENT;
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
- duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+ duty = (RREG32(mmCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK) >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
if (duty100 == 0)
return -EINVAL;
@@ -6621,7 +6643,7 @@ static int si_dpm_set_fan_speed_pwm(void *handle,
if (speed > 255)
return -EINVAL;
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
if (duty100 == 0)
return -EINVAL;
@@ -6630,9 +6652,9 @@ static int si_dpm_set_fan_speed_pwm(void *handle,
do_div(tmp64, 255);
duty = (u32)tmp64;
- tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
- tmp |= FDO_STATIC_DUTY(duty);
- WREG32(CG_FDO_CTRL0, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
+ tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
+ WREG32(mmCG_FDO_CTRL0, tmp);
return 0;
}
@@ -6672,8 +6694,8 @@ static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode)
if (si_pi->fan_is_controlled_by_smc)
return 0;
- tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
- *fan_mode = (tmp >> FDO_PWM_MODE_SHIFT);
+ tmp = RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ *fan_mode = (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
return 0;
}
@@ -6691,7 +6713,7 @@ static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
if (adev->pm.fan_pulses_per_revolution == 0)
return -ENOENT;
- tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+ tach_period = (RREG32(mmCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK) >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
if (tach_period == 0)
return -ENOENT;
@@ -6720,9 +6742,9 @@ static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
si_fan_ctrl_stop_smc_fan_control(adev);
tach_period = 60 * xclk * 10000 / (8 * speed);
- tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
- tmp |= TARGET_PERIOD(tach_period);
- WREG32(CG_TACH_CTRL, tmp);
+ tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
+ tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
+ WREG32(mmCG_TACH_CTRL, tmp);
si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
@@ -6736,13 +6758,13 @@ static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
u32 tmp;
if (!si_pi->fan_ctrl_is_in_default_mode) {
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ tmp |= si_pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(si_pi->t_min);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+ tmp |= si_pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
si_pi->fan_ctrl_is_in_default_mode = true;
}
}
@@ -6760,14 +6782,14 @@ static void si_thermal_initialize(struct amdgpu_device *adev)
u32 tmp;
if (adev->pm.fan_pulses_per_revolution) {
- tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
- tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
- WREG32(CG_TACH_CTRL, tmp);
+ tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
+ tmp |= (adev->pm.fan_pulses_per_revolution -1) << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
+ WREG32(mmCG_TACH_CTRL, tmp);
}
- tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
- tmp |= TACH_PWM_RESP_RATE(0x28);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
+ tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
}
static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
@@ -7530,8 +7552,8 @@ static void si_dpm_debugfs_print_current_performance_level(void *handle,
struct si_ps *ps = si_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
+ (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;
if (current_index >= ps->performance_level_count) {
seq_printf(m, "invalid dpm profile %d\n", current_index);
@@ -7554,14 +7576,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
@@ -7571,14 +7593,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
@@ -7883,8 +7905,8 @@ static int si_dpm_get_temp(void *handle)
int actual_temp = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
- CTF_TEMP_SHIFT;
+ temp = (RREG32(mmCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
if (temp & 0x200)
actual_temp = 255;
@@ -8014,8 +8036,8 @@ static int si_dpm_read_sensor(void *handle, int idx,
struct si_ps *ps = si_get_ps(rps);
uint32_t sclk, mclk;
u32 pl_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
+ (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
index 8f994ffa9cd1..4e65ab9e931c 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
@@ -30,6 +30,12 @@
#include "amdgpu_ucode.h"
#include "sislands_smc.h"
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
static int si_set_smc_sram_address(struct amdgpu_device *adev,
u32 smc_address, u32 limit)
{
@@ -38,8 +44,8 @@ static int si_set_smc_sram_address(struct amdgpu_device *adev,
if ((smc_address + 3) > limit)
return -EINVAL;
- WREG32(SMC_IND_INDEX_0, smc_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ WREG32(mmSMC_IND_INDEX_0, smc_address);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
return 0;
}
@@ -68,7 +74,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
@@ -83,7 +89,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- original_data = RREG32(SMC_IND_DATA_0);
+ original_data = RREG32(mmSMC_IND_DATA_0);
extra_shift = 8 * (4 - byte_count);
while (byte_count > 0) {
@@ -99,7 +105,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
}
done:
@@ -121,10 +127,10 @@ void amdgpu_si_reset_smc(struct amdgpu_device *adev)
{
u32 tmp;
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
RST_REG;
@@ -170,16 +176,16 @@ PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
if (!amdgpu_si_is_smc_running(adev))
return PPSMC_Result_Failed;
- WREG32(SMC_MESSAGE_0, msg);
+ WREG32(mmSMC_MESSAGE_0, msg);
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SMC_RESP_0);
+ tmp = RREG32(mmSMC_RESP_0);
if (tmp != 0)
break;
udelay(1);
}
- return (PPSMC_Result)RREG32(SMC_RESP_0);
+ return (PPSMC_Result)RREG32(mmSMC_RESP_0);
}
PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
@@ -225,18 +231,18 @@ int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
return -EINVAL;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, ucode_start_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+ WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
while (ucode_size >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
src += 4;
ucode_size -= 4;
}
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return 0;
@@ -251,7 +257,7 @@ int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
spin_lock_irqsave(&adev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
- *value = RREG32(SMC_IND_DATA_0);
+ *value = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return ret;
@@ -266,7 +272,7 @@ int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
spin_lock_irqsave(&adev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
- WREG32(SMC_IND_DATA_0, value);
+ WREG32(mmSMC_IND_DATA_0, value);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return ret;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index 4bd92fd782be..8d40ed0f0e83 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -143,6 +143,10 @@ int atomctrl_initialize_mc_reg_table(
vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
+ if (!vram_info) {
+ pr_err("Could not retrieve the VramInfo table!");
+ return -EINVAL;
+ }
if (module_index >= vram_info->ucNumOfVRAMModule) {
pr_err("Invalid VramInfo table.");
@@ -180,6 +184,10 @@ int atomctrl_initialize_mc_reg_table_v2_2(
vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *)
smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
+ if (!vram_info) {
+ pr_err("Could not retrieve the VramInfo table!");
+ return -EINVAL;
+ }
if (module_index >= vram_info->ucNumOfVRAMModule) {
pr_err("Invalid VramInfo table.");
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
index 5a010cd38303..baf51cd82a35 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
@@ -46,42 +46,6 @@ static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
}
-int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
-{
- uint32_t data;
- uint32_t addr;
- uint8_t *dest_byte;
- uint8_t i, data_byte[4] = {0};
- uint32_t *pdata = (uint32_t *)&data_byte;
-
- PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
- PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
-
- addr = smc_start_address;
-
- while (byte_count >= 4) {
- smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
-
- *dest = PP_SMC_TO_HOST_UL(data);
-
- dest += 1;
- byte_count -= 4;
- addr += 4;
- }
-
- if (byte_count) {
- smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
- *pdata = PP_SMC_TO_HOST_UL(data);
- /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
- dest_byte = (uint8_t *)dest;
- for (i = 0; i < byte_count; i++)
- dest_byte[i] = data_byte[i];
- }
-
- return 0;
-}
-
-
int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
index e7303dc8c260..63e428ceaee4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
@@ -53,8 +53,6 @@ struct smu7_smumgr {
};
-int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
- uint32_t *dest, uint32_t byte_count, uint32_t limit);
int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit);
int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 033c3229b555..f24a1d8c77db 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2398,7 +2398,11 @@ static int smu_switch_power_profile(void *handle,
smu_power_profile_mode_get(smu, type);
else
smu_power_profile_mode_put(smu, type);
- ret = smu_bump_power_profile_mode(smu, NULL, 0);
+ /* don't switch the active workload when paused */
+ if (smu->pause_workload)
+ ret = 0;
+ else
+ ret = smu_bump_power_profile_mode(smu, NULL, 0);
if (ret) {
if (enable)
smu_power_profile_mode_put(smu, type);
@@ -2411,6 +2415,35 @@ static int smu_switch_power_profile(void *handle,
return 0;
}
+static int smu_pause_power_profile(void *handle,
+ bool pause)
+{
+ struct smu_context *smu = handle;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ int ret;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ smu->pause_workload = pause;
+
+ /* force to bootup default profile */
+ if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode)
+ ret = smu->ppt_funcs->set_power_profile_mode(smu,
+ workload_mask,
+ NULL,
+ 0);
+ else
+ ret = smu_bump_power_profile_mode(smu, NULL, 0);
+ return ret;
+ }
+
+ return 0;
+}
+
static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
{
struct smu_context *smu = handle;
@@ -3399,15 +3432,15 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
return ret;
}
-bool smu_mode2_reset_is_support(struct smu_context *smu)
+bool smu_link_reset_is_support(struct smu_context *smu)
{
bool ret = false;
if (!smu->pm_enabled)
return false;
- if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
- ret = smu->ppt_funcs->mode2_reset_is_support(smu);
+ if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support)
+ ret = smu->ppt_funcs->link_reset_is_support(smu);
return ret;
}
@@ -3442,6 +3475,19 @@ static int smu_mode2_reset(void *handle)
return ret;
}
+int smu_link_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
+ if (smu->ppt_funcs->link_reset)
+ ret = smu->ppt_funcs->link_reset(smu);
+
+ return ret;
+}
+
static int smu_enable_gfx_features(void *handle)
{
struct smu_context *smu = handle;
@@ -3733,6 +3779,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_pp_table = smu_sys_get_pp_table,
.set_pp_table = smu_sys_set_pp_table,
.switch_power_profile = smu_switch_power_profile,
+ .pause_power_profile = smu_pause_power_profile,
/* export to amdgpu */
.dispatch_tasks = smu_handle_dpm_task,
.load_firmware = smu_load_microcode,
@@ -3941,3 +3988,11 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+ if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
+ smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 3ba169639f54..d47e32ae4671 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -438,9 +438,11 @@ struct mclock_latency_table {
};
enum smu_reset_mode {
- SMU_RESET_MODE_0,
- SMU_RESET_MODE_1,
- SMU_RESET_MODE_2,
+ SMU_RESET_MODE_0,
+ SMU_RESET_MODE_1,
+ SMU_RESET_MODE_2,
+ SMU_RESET_MODE_3,
+ SMU_RESET_MODE_4,
};
enum smu_baco_state {
@@ -558,6 +560,7 @@ struct smu_context {
/* asic agnostic workload mask */
uint32_t workload_mask;
+ bool pause_workload;
/* default/user workload preference */
uint32_t power_profile_mode;
uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
@@ -1228,10 +1231,11 @@ struct pptable_funcs {
* @mode1_reset_is_support: Check if GPU supports mode1 reset.
*/
bool (*mode1_reset_is_support)(struct smu_context *smu);
+
/**
- * @mode2_reset_is_support: Check if GPU supports mode2 reset.
+ * @link_reset_is_support: Check if GPU supports link reset.
*/
- bool (*mode2_reset_is_support)(struct smu_context *smu);
+ bool (*link_reset_is_support)(struct smu_context *smu);
/**
* @mode1_reset: Perform mode1 reset.
@@ -1251,6 +1255,13 @@ struct pptable_funcs {
int (*enable_gfx_features)(struct smu_context *smu);
/**
+ * @link_reset: Perform link reset.
+ *
+ * The gfx device driver reset
+ */
+ int (*link_reset)(struct smu_context *smu);
+
+ /**
* @get_dpm_ultimate_freq: Get the hard frequency range of a clock
* domain in MHz.
*/
@@ -1382,6 +1393,11 @@ struct pptable_funcs {
bool (*reset_sdma_is_supported)(struct smu_context *smu);
/**
+ * @reset_vcn: message SMU to soft reset vcn instance.
+ */
+ int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask);
+
+ /**
* @get_ecc_table: message SMU to get ECC INFO table.
*/
ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
@@ -1600,8 +1616,9 @@ int smu_get_power_limit(void *handle,
enum pp_power_type pp_power_type);
bool smu_mode1_reset_is_support(struct smu_context *smu);
-bool smu_mode2_reset_is_support(struct smu_context *smu);
+bool smu_link_reset_is_support(struct smu_context *smu);
int smu_mode1_reset(struct smu_context *smu);
+int smu_link_reset(struct smu_context *smu);
extern const struct amd_ip_funcs smu_ip_funcs;
@@ -1642,6 +1659,7 @@ int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
int smu_send_rma_reason(struct smu_context *smu);
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
bool smu_reset_sdma_is_supported(struct smu_context *smu);
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index d26f35119a12..3d9e5e967c94 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -459,4 +459,11 @@ typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccGfxclkBelowHostLimit;
} VfMetricsTable_t;
+#pragma pack(push, 4)
+typedef struct {
+ // Telemetry
+ uint32_t InputTelemetryVoltageInmV;
+} StaticMetricsTable_t;
+#pragma pack(pop)
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 288b2576432b..41f268313613 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -94,7 +94,9 @@
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
#define PPSMC_MSG_SetThrottlingPolicy 0x44
#define PPSMC_MSG_ResetSDMA 0x4D
-#define PPSMC_Message_Count 0x4E
+#define PPSMC_MSG_ResetVCN 0x4E
+#define PPSMC_MSG_GetStaticMetricsTable 0x59
+#define PPSMC_Message_Count 0x5A
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index c9dee09395e3..eefdaa0b5df6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -277,6 +277,7 @@
__SMU_DUMMY_MAP(MALLPowerController), \
__SMU_DUMMY_MAP(MALLPowerState), \
__SMU_DUMMY_MAP(ResetSDMA), \
+ __SMU_DUMMY_MAP(ResetVCN), \
__SMU_DUMMY_MAP(GetStaticMetricsTable),
#undef __SMU_DUMMY_MAP
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index ed8304d82831..56ae555bb52a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -281,11 +281,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_11_0_dpm_table *single_dpm_table);
-int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value);
-
int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu);
uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index cd03caffe317..4263798d716b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -112,6 +112,7 @@ struct smu_13_0_dpm_context {
uint32_t workload_policy_mask;
uint32_t dcef_min_ds_clk;
uint64_t caps;
+ uint32_t board_volt;
};
enum smu_13_0_power_state {
@@ -162,8 +163,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu);
int smu_v13_0_system_features_control(struct smu_context *smu,
bool en);
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count);
-
int smu_v13_0_set_allowed_mask(struct smu_context *smu);
int smu_v13_0_notify_display_change(struct smu_context *smu);
@@ -183,13 +182,6 @@ int smu_v13_0_disable_thermal_alert(struct smu_context *smu);
int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value);
-int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk);
-
-int
-smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request
- *clock_req);
-
uint32_t
smu_v13_0_get_fan_control_mode(struct smu_context *smu);
@@ -226,11 +218,6 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max, bool automatic);
-int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max);
-
int smu_v13_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level);
@@ -310,14 +297,6 @@ int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
uint32_t *value);
void smu_v13_0_interrupt_work(struct smu_context *smu);
-bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
-int smu_v13_0_12_get_max_metrics_size(void);
-int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
-int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
- MetricsMember_t member,
- uint32_t *value);
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table);
-extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
-extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
+void smu_v13_0_reset_custom_level(struct smu_context *smu);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 453952cdc353..9ad46f545d15 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1347,7 +1347,7 @@ static int arcturus_get_power_limit(struct smu_context *smu,
*default_power_limit = power_limit;
if (max_power_limit)
*max_power_limit = power_limit;
- /**
+ /*
* No lower bound is imposed on the limit. Any unreasonable limit set
* will result in frequent throttling.
*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 19a25fdc2f5b..115e3fa456bc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -3089,11 +3089,6 @@ static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
return 0;
}
-static bool sienna_cichlid_is_mode2_reset_supported(struct smu_context *smu)
-{
- return true;
-}
-
static int sienna_cichlid_mode2_reset(struct smu_context *smu)
{
int ret = 0, index;
@@ -3229,7 +3224,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings,
.set_config_table = sienna_cichlid_set_config_table,
.get_unique_id = sienna_cichlid_get_unique_id,
- .mode2_reset_is_support = sienna_cichlid_is_mode2_reset_supported,
.mode2_reset = sienna_cichlid_mode2_reset,
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 78391d8f35a9..78e4186d06cc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1204,7 +1204,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t crystal_clock_freq = 2500;
uint32_t tach_period;
- if (speed == 0)
+ if (!speed || speed > UINT_MAX/8)
return -EINVAL;
/*
* To prevent from possible overheat, some ASICs may have requirement
@@ -2059,45 +2059,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
return 0;
}
-int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value)
-{
- uint32_t level_count = 0;
- int ret = 0;
-
- if (!min_value && !max_value)
- return -EINVAL;
-
- if (min_value) {
- /* by default, level 0 clock value as min value */
- ret = smu_v11_0_get_dpm_freq_by_index(smu,
- clk_type,
- 0,
- min_value);
- if (ret)
- return ret;
- }
-
- if (max_value) {
- ret = smu_v11_0_get_dpm_level_count(smu,
- clk_type,
- &level_count);
- if (ret)
- return ret;
-
- ret = smu_v11_0_get_dpm_freq_by_index(smu,
- clk_type,
- level_count - 1,
- max_value);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 83163d7c7f00..6de653d2ed62 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1270,6 +1270,7 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
struct smu_13_0_dpm_table *gfx_table =
&dpm_context->dpm_tables.gfx_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ int r;
/* Disable determinism if switching to another mode */
if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
@@ -1282,7 +1283,11 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
return 0;
-
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ r = smu_v13_0_set_performance_level(smu, level);
+ if (!r)
+ smu_v13_0_reset_custom_level(smu);
+ return r;
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_LOW:
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
@@ -1423,7 +1428,11 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
min_clk = dpm_context->dpm_tables.gfx_table.min;
max_clk = dpm_context->dpm_tables.gfx_table.max;
- return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
+ ret = aldebaran_set_soft_freq_limited_range(
+ smu, SMU_GFXCLK, min_clk, max_clk, false);
+ if (ret)
+ return ret;
+ smu_v13_0_reset_custom_level(smu);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -1976,11 +1985,6 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
return true;
}
-static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
-{
- return true;
-}
-
static int aldebaran_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state)
{
@@ -2086,7 +2090,6 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = aldebaran_get_gpu_metrics,
.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
- .mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
.smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr,
.mode1_reset = aldebaran_mode1_reset,
.set_mp1_state = aldebaran_set_mp1_state,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index ba5a9012dbd5..a7167668d189 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -709,18 +709,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu)
return ret;
}
-int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
-{
- int ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
- if (ret)
- dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!");
-
- return ret;
-}
-
int smu_v13_0_set_driver_table_location(struct smu_context *smu)
{
struct smu_table *driver_table = &smu->smu_table.driver_table;
@@ -761,18 +749,6 @@ int smu_v13_0_set_tool_table_location(struct smu_context *smu)
return ret;
}
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count)
-{
- int ret = 0;
-
- if (!smu->pm_enabled)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
-
- return ret;
-}
-
int smu_v13_0_set_allowed_mask(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -1073,56 +1049,6 @@ int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
}
-int
-smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request
- *clock_req)
-{
- enum amd_pp_clock_type clk_type = clock_req->clock_type;
- int ret = 0;
- enum smu_clk_type clk_select = 0;
- uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
- smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
- switch (clk_type) {
- case amd_pp_dcef_clock:
- clk_select = SMU_DCEFCLK;
- break;
- case amd_pp_disp_clock:
- clk_select = SMU_DISPCLK;
- break;
- case amd_pp_pixel_clock:
- clk_select = SMU_PIXCLK;
- break;
- case amd_pp_phy_clock:
- clk_select = SMU_PHYCLK;
- break;
- case amd_pp_mem_clock:
- clk_select = SMU_UCLK;
- break;
- default:
- dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
- ret = -EINVAL;
- break;
- }
-
- if (ret)
- goto failed;
-
- if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
- return 0;
-
- ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
-
- if (clk_select == SMU_UCLK)
- smu->hard_min_uclk_req_from_dal = clk_freq;
- }
-
-failed:
- return ret;
-}
-
uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu)
{
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
@@ -1647,45 +1573,6 @@ out:
return ret;
}
-int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
-{
- int ret = 0, clk_id = 0;
- uint32_t param;
-
- if (min <= 0 && max <= 0)
- return -EINVAL;
-
- if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
- return 0;
-
- clk_id = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_CLK,
- clk_type);
- if (clk_id < 0)
- return clk_id;
-
- if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
- param, NULL);
- if (ret)
- return ret;
- }
-
- if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
- param, NULL);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v13_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
@@ -2595,3 +2482,13 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
return ret;
}
+
+void smu_v13_0_reset_custom_level(struct smu_context *smu)
+{
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+
+ pstate_table->uclk_pstate.custom.min = 0;
+ pstate_table->uclk_pstate.custom.max = 0;
+ pstate_table->gfxclk_pstate.custom.min = 0;
+ pstate_table->gfxclk_pstate.custom.max = 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 238bd71baa6d..533d58e57d05 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -187,26 +187,6 @@ int smu_v13_0_12_get_max_metrics_size(void)
return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
}
-static int smu_v13_0_12_get_static_metrics_table(struct smu_context *smu)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
- uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
- struct smu_table *table = &smu_table->driver_table;
- int ret;
-
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
- if (ret) {
- dev_info(smu->adev->dev,
- "Failed to export static metrics table!\n");
- return ret;
- }
-
- amdgpu_asic_invalidate_hdp(smu->adev, NULL);
- memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
-
- return 0;
-}
-
int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -217,7 +197,7 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
int ret, i;
if (!pptable->Init) {
- ret = smu_v13_0_12_get_static_metrics_table(smu);
+ ret = smu_v13_0_6_get_static_metrics_table(smu);
if (ret)
return ret;
@@ -345,8 +325,8 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_7 *gpu_metrics =
- (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_8 *gpu_metrics =
+ (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
u8 num_jpeg_rings_gpu_metrics;
@@ -357,7 +337,7 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
memcpy(metrics, smu_table->metrics_table, sizeof(MetricsTable_t));
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(metrics->MaxSocketTemperature);
@@ -474,6 +454,16 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
SMUQ10_ROUND(metrics->GfxBusy[inst]);
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
idx++;
}
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index c478b3be37af..7d4ff09be7e8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -101,24 +101,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
#define MCA_BANK_IPID(_ip, _hwid, _type) \
[AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
-#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
-
-enum smu_v13_0_6_caps {
- SMU_CAP(DPM),
- SMU_CAP(DPM_POLICY),
- SMU_CAP(OTHER_END_METRICS),
- SMU_CAP(SET_UCLK_MAX),
- SMU_CAP(PCIE_METRICS),
- SMU_CAP(MCA_DEBUG_MODE),
- SMU_CAP(PER_INST_METRICS),
- SMU_CAP(CTF_LIMIT),
- SMU_CAP(RMA_MSG),
- SMU_CAP(ACA_SYND),
- SMU_CAP(SDMA_RESET),
- SMU_CAP(STATIC_METRICS),
- SMU_CAP(ALL),
-};
-
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
uint16_t hwid;
@@ -194,6 +176,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0),
};
// clang-format on
@@ -299,8 +283,8 @@ static inline void smu_v13_0_6_cap_clear(struct smu_context *smu,
dpm_context->caps &= ~BIT_ULL(cap);
}
-static inline bool smu_v13_0_6_cap_supported(struct smu_context *smu,
- enum smu_v13_0_6_caps cap)
+bool smu_v13_0_6_cap_supported(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
@@ -353,6 +337,9 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
if (fw_ver >= 0x00561E00)
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+
+ if (fw_ver >= 0x00562500)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
@@ -402,6 +389,13 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
if (fw_ver < 0x00555600)
smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
+ if ((pgm == 7 && fw_ver >= 0x7550E00) ||
+ (pgm == 0 && fw_ver >= 0x00557E00))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ if (fw_ver >= 0x00557F01) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ }
}
if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
((pgm == 0) && (fw_ver >= 0x00557900)) ||
@@ -525,7 +519,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_7);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8);
smu_table->gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
@@ -747,9 +741,43 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
return pm_metrics->common_header.structure_size;
}
+static void smu_v13_0_6_fill_static_metrics_table(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ if (!static_metrics->InputTelemetryVoltageInmV) {
+ dev_warn(smu->adev->dev, "Invalid board voltage %d\n",
+ static_metrics->InputTelemetryVoltageInmV);
+ }
+
+ dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+}
+
+int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export static metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ return 0;
+}
+
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
@@ -759,7 +787,8 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
int ret, i, retry = 100;
uint32_t table_version;
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_setup_driver_pptable(smu);
/* Store one-time values in driver PPTable */
@@ -813,6 +842,12 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
pptable->Init = true;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ ret = smu_v13_0_6_get_static_metrics_table(smu);
+ if (ret)
+ return ret;
+ smu_v13_0_6_fill_static_metrics_table(smu, static_metrics);
+ }
}
return 0;
@@ -1142,7 +1177,8 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
if (ret)
return ret;
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_get_smu_metrics_data(smu, member, value);
/* For clocks with multiple instances, only report the first one */
@@ -1616,6 +1652,7 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor, void *data,
uint32_t *size)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
int ret = 0;
if (amdgpu_ras_intr_triggered())
@@ -1660,6 +1697,15 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VDDBOARD:
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) {
+ *(uint32_t *)data = dpm_context->board_volt;
+ *size = 4;
+ break;
+ } else {
+ ret = -EOPNOTSUPP;
+ break;
+ }
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
@@ -1927,7 +1973,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
return ret;
pstate_table->uclk_pstate.curr.max = uclk_table->max;
}
- pstate_table->uclk_pstate.custom.max = 0;
+ smu_v13_0_reset_custom_level(smu);
return 0;
case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -2140,7 +2186,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
smu, SMU_UCLK, min_clk, max_clk, false);
if (ret)
return ret;
- pstate_table->uclk_pstate.custom.max = 0;
+ smu_v13_0_reset_custom_level(smu);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -2486,8 +2532,8 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_7 *gpu_metrics =
- (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_8 *gpu_metrics =
+ (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
int version = smu_v13_0_6_get_metrics_version(smu);
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
@@ -2507,13 +2553,14 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
return ret;
}
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_get_gpu_metrics(smu, table);
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version));
@@ -2666,6 +2713,20 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc,
version)[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
idx++;
}
}
@@ -2844,14 +2905,29 @@ static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_link_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_4, NULL);
+ return ret;
+}
+
static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
{
return true;
}
-static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
+static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu)
{
- return true;
+ struct amdgpu_device *adev = smu->adev;
+ int var = (adev->pdev->device & 0xF);
+
+ if (var == 0x1)
+ return true;
+
+ return false;
}
static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
@@ -2924,6 +3000,19 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ResetVCN, inst_mask, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "failed to send ResetVCN event with mask 0x%x\n",
+ inst_mask);
+ return ret;
+}
+
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -3586,9 +3675,10 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_pm_metrics = smu_v13_0_6_get_pm_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
- .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
+ .link_reset_is_support = smu_v13_0_6_is_link_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,
.mode2_reset = smu_v13_0_6_mode2_reset,
+ .link_reset = smu_v13_0_6_link_reset,
.wait_for_event = smu_v13_0_wait_for_event,
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
@@ -3596,6 +3686,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.send_rma_reason = smu_v13_0_6_send_rma_reason,
.reset_sdma = smu_v13_0_6_reset_sdma,
.reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported,
+ .dpm_reset_vcn = smu_v13_0_6_reset_vcn,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index 83745909e564..d151bcd0cca7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -26,6 +26,7 @@
#define SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL 0x2
#define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4
#define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2
+#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
typedef enum {
/*0*/ METRICS_VERSION_V0 = 0,
@@ -51,6 +52,34 @@ struct PPTable_t {
bool Init;
};
+enum smu_v13_0_6_caps {
+ SMU_CAP(DPM),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(OTHER_END_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(PER_INST_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND),
+ SMU_CAP(SDMA_RESET),
+ SMU_CAP(STATIC_METRICS),
+ SMU_CAP(HST_LIMIT_METRICS),
+ SMU_CAP(BOARD_VOLTAGE),
+ SMU_CAP(ALL),
+};
+
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
+bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
+int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu);
+bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
+int smu_v13_0_12_get_max_metrics_size(void);
+int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
+int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member, uint32_t *value);
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table);
+extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
+extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
#endif
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index da0663542e8a..242fbccdf844 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_AST
tristate "AST server chips"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
index 139ab00dee8f..2d3ad7610c2e 100644
--- a/drivers/gpu/drm/ast/ast_cursor.c
+++ b/drivers/gpu/drm/ast/ast_cursor.c
@@ -37,6 +37,7 @@
*/
/* define for signature structure */
+#define AST_HWC_SIGNATURE_SIZE SZ_32
#define AST_HWC_SIGNATURE_CHECKSUM 0x00
#define AST_HWC_SIGNATURE_SizeX 0x04
#define AST_HWC_SIGNATURE_SizeY 0x08
@@ -45,6 +46,21 @@
#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+static unsigned long ast_cursor_vram_size(void)
+{
+ return AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE;
+}
+
+long ast_cursor_vram_offset(struct ast_device *ast)
+{
+ unsigned long size = ast_cursor_vram_size();
+
+ if (size > ast->vram_size)
+ return -EINVAL;
+
+ return ALIGN_DOWN(ast->vram_size - size, SZ_8);
+}
+
static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, unsigned int height)
{
u32 csum = 0;
@@ -75,7 +91,7 @@ static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, un
static void ast_set_cursor_image(struct ast_device *ast, const u8 *src,
unsigned int width, unsigned int height)
{
- u8 __iomem *dst = ast->cursor_plane.base.vaddr;
+ u8 __iomem *dst = ast_plane_vaddr(&ast->cursor_plane.base);
u32 csum;
csum = ast_cursor_calculate_checksum(src, width, height);
@@ -177,7 +193,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
struct ast_device *ast = to_ast_device(plane->dev);
struct drm_rect damage;
u64 dst_off = ast_plane->offset;
- u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
+ u8 __iomem *dst = ast_plane_vaddr(ast_plane); /* TODO: Use mapping abstraction properly */
u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
unsigned int offset_x, offset_y;
u16 x, y;
@@ -274,25 +290,16 @@ int ast_cursor_plane_init(struct ast_device *ast)
struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane;
struct ast_plane *ast_plane = &ast_cursor_plane->base;
struct drm_plane *cursor_plane = &ast_plane->base;
- size_t size;
- void __iomem *vaddr;
- u64 offset;
+ unsigned long size;
+ long offset;
int ret;
- /*
- * Allocate backing storage for cursors. The BOs are permanently
- * pinned to the top end of the VRAM.
- */
-
- size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
+ size = ast_cursor_vram_size();
+ offset = ast_cursor_vram_offset(ast);
+ if (offset < 0)
+ return offset;
- if (ast->vram_fb_available < size)
- return -ENOMEM;
-
- vaddr = ast->vram + ast->vram_fb_available - size;
- offset = ast->vram_fb_available - size;
-
- ret = ast_plane_init(dev, ast_plane, vaddr, offset, size,
+ ret = ast_plane_init(dev, ast_plane, offset, size,
0x01, &ast_cursor_plane_funcs,
ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
NULL, DRM_PLANE_TYPE_CURSOR);
@@ -303,7 +310,5 @@ int ast_cursor_plane_init(struct ast_device *ast)
drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(cursor_plane);
- ast->vram_fb_available -= size;
-
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index d2c2605d2728..2ee402096cd9 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -112,12 +112,9 @@ enum ast_config_mode {
#define AST_MAX_HWC_WIDTH 64
#define AST_MAX_HWC_HEIGHT 64
-
#define AST_HWC_PITCH (AST_MAX_HWC_WIDTH * SZ_2)
#define AST_HWC_SIZE (AST_MAX_HWC_HEIGHT * AST_HWC_PITCH)
-#define AST_HWC_SIGNATURE_SIZE 32
-
/*
* Planes
*/
@@ -125,7 +122,6 @@ enum ast_config_mode {
struct ast_plane {
struct drm_plane base;
- void __iomem *vaddr;
u64 offset;
unsigned long size;
};
@@ -183,7 +179,6 @@ struct ast_device {
void __iomem *vram;
unsigned long vram_base;
unsigned long vram_size;
- unsigned long vram_fb_available;
struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */
@@ -340,14 +335,6 @@ static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 i
__ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val);
}
-#define AST_VIDMEM_SIZE_8M 0x00800000
-#define AST_VIDMEM_SIZE_16M 0x01000000
-#define AST_VIDMEM_SIZE_32M 0x02000000
-#define AST_VIDMEM_SIZE_64M 0x04000000
-#define AST_VIDMEM_SIZE_128M 0x08000000
-
-#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
-
struct ast_vbios_stdtable {
u8 misc;
u8 seq[4];
@@ -440,6 +427,7 @@ int ast_vga_output_init(struct ast_device *ast);
int ast_sil164_output_init(struct ast_device *ast);
/* ast_cursor.c */
+long ast_cursor_vram_offset(struct ast_device *ast);
int ast_cursor_plane_init(struct ast_device *ast);
/* ast dp501 */
@@ -454,11 +442,12 @@ int ast_astdp_output_init(struct ast_device *ast);
/* ast_mode.c */
int ast_mode_config_init(struct ast_device *ast);
int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
- void __iomem *vaddr, u64 offset, unsigned long size,
+ u64 offset, unsigned long size,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
const uint64_t *format_modifiers,
enum drm_plane_type type);
+void __iomem *ast_plane_vaddr(struct ast_plane *ast);
#endif
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 6dfe6d9777d4..0bc140319464 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -35,36 +35,35 @@
static u32 ast_get_vram_size(struct ast_device *ast)
{
- u8 jreg;
u32 vram_size;
+ u8 vgacr99, vgacraa;
- vram_size = AST_VIDMEM_DEFAULT_SIZE;
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xaa, 0xff);
- switch (jreg & 3) {
+ vgacraa = ast_get_index_reg(ast, AST_IO_VGACRI, 0xaa);
+ switch (vgacraa & AST_IO_VGACRAA_VGAMEM_SIZE_MASK) {
case 0:
- vram_size = AST_VIDMEM_SIZE_8M;
+ vram_size = SZ_8M;
break;
case 1:
- vram_size = AST_VIDMEM_SIZE_16M;
+ vram_size = SZ_16M;
break;
case 2:
- vram_size = AST_VIDMEM_SIZE_32M;
+ vram_size = SZ_32M;
break;
case 3:
- vram_size = AST_VIDMEM_SIZE_64M;
+ vram_size = SZ_64M;
break;
}
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0x99, 0xff);
- switch (jreg & 0x03) {
+ vgacr99 = ast_get_index_reg(ast, AST_IO_VGACRI, 0x99);
+ switch (vgacr99 & AST_IO_VGACR99_VGAMEM_RSRV_MASK) {
case 1:
- vram_size -= 0x100000;
+ vram_size -= SZ_1M;
break;
case 2:
- vram_size -= 0x200000;
+ vram_size -= SZ_2M;
break;
case 3:
- vram_size -= 0x400000;
+ vram_size -= SZ_4M;
break;
}
@@ -93,7 +92,6 @@ int ast_mm_init(struct ast_device *ast)
ast->vram_base = base;
ast->vram_size = vram_size;
- ast->vram_fb_available = vram_size;
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c3b950675485..1de832964e92 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -51,6 +51,26 @@
#define AST_LUT_SIZE 256
+#define AST_PRIMARY_PLANE_MAX_OFFSET (BIT(16) - 1)
+
+static unsigned long ast_fb_vram_offset(void)
+{
+ return 0; // with shmem, the primary plane is always at offset 0
+}
+
+static unsigned long ast_fb_vram_size(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ unsigned long offset = ast_fb_vram_offset(); // starts at offset
+ long cursor_offset = ast_cursor_vram_offset(ast); // ends at cursor offset
+
+ if (cursor_offset < 0)
+ cursor_offset = ast->vram_size; // no cursor; it's all ours
+ if (drm_WARN_ON_ONCE(dev, offset > cursor_offset))
+ return 0; // cannot legally happen; signal error
+ return cursor_offset - offset;
+}
+
static inline void ast_load_palette_index(struct ast_device *ast,
u8 index, u8 red, u8 green,
u8 blue)
@@ -439,7 +459,7 @@ static void ast_wait_for_vretrace(struct ast_device *ast)
*/
int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
- void __iomem *vaddr, u64 offset, unsigned long size,
+ u64 offset, unsigned long size,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
@@ -448,7 +468,6 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
{
struct drm_plane *plane = &ast_plane->base;
- ast_plane->vaddr = vaddr;
ast_plane->offset = offset;
ast_plane->size = size;
@@ -457,6 +476,13 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
type, NULL);
}
+void __iomem *ast_plane_vaddr(struct ast_plane *ast_plane)
+{
+ struct ast_device *ast = to_ast_device(ast_plane->base.dev);
+
+ return ast->vram + ast_plane->offset;
+}
+
/*
* Primary plane
*/
@@ -503,7 +529,7 @@ static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src
struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
- struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr);
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane_vaddr(ast_plane));
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
@@ -576,12 +602,12 @@ static int ast_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
{
struct ast_plane *ast_plane = to_ast_plane(plane);
- if (plane->state && plane->state->fb && ast_plane->vaddr) {
+ if (plane->state && plane->state->fb) {
sb->format = plane->state->fb->format;
sb->width = plane->state->fb->width;
sb->height = plane->state->fb->height;
sb->pitch[0] = plane->state->fb->pitches[0];
- iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane->vaddr);
+ iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane_vaddr(ast_plane));
return 0;
}
return -ENODEV;
@@ -608,13 +634,11 @@ static int ast_primary_plane_init(struct ast_device *ast)
struct drm_device *dev = &ast->base;
struct ast_plane *ast_primary_plane = &ast->primary_plane;
struct drm_plane *primary_plane = &ast_primary_plane->base;
- void __iomem *vaddr = ast->vram;
- u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
- unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
- unsigned long size = ast->vram_fb_available - cursor_size;
+ u64 offset = ast_fb_vram_offset();
+ unsigned long size = ast_fb_vram_size(ast);
int ret;
- ret = ast_plane_init(dev, ast_primary_plane, vaddr, offset, size,
+ ret = ast_plane_init(dev, ast_primary_plane, offset, size,
0x01, &ast_primary_plane_funcs,
ast_primary_plane_formats, ARRAY_SIZE(ast_primary_plane_formats),
NULL, DRM_PLANE_TYPE_PRIMARY);
@@ -922,9 +946,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s
/*
* Concurrent operations could possibly trigger a call to
- * drm_connector_helper_funcs.get_modes by trying to read the
- * display modes. Protect access to I/O registers by acquiring
- * the I/O-register lock. Released in atomic_flush().
+ * drm_connector_helper_funcs.get_modes by reading the display
+ * modes. Protect access to registers by acquiring the modeset
+ * lock.
*/
mutex_lock(&ast->modeset_lock);
drm_atomic_helper_commit_tail(state);
@@ -938,16 +962,20 @@ static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
- static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB8888);
struct ast_device *ast = to_ast_device(dev);
- unsigned long fbsize, fbpages, max_fbpages;
-
- max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
-
- fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
- fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
-
- if (fbpages > max_fbpages)
+ unsigned long max_fb_size = ast_fb_vram_size(ast);
+ u64 pitch;
+
+ if (drm_WARN_ON_ONCE(dev, !info))
+ return MODE_ERROR; /* driver bug */
+
+ pitch = drm_format_info_min_pitch(info, 0, mode->hdisplay);
+ if (!pitch)
+ return MODE_BAD_WIDTH;
+ if (pitch > AST_PRIMARY_PLANE_MAX_OFFSET)
+ return MODE_BAD_WIDTH; /* maximum programmable pitch */
+ if (pitch > max_fb_size / mode->vdisplay)
return MODE_MEM;
return MODE_OK;
@@ -1018,10 +1046,7 @@ int ast_mode_config_init(struct ast_device *ast)
return ret;
drm_mode_config_reset(dev);
-
- ret = drmm_kms_helper_poll_init(dev);
- if (ret)
- return ret;
+ drmm_kms_helper_poll_init(dev);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 91e85e457bdf..37568cf3822c 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -1075,16 +1075,16 @@ static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *par
switch (param->vram_size) {
default:
- case AST_VIDMEM_SIZE_8M:
+ case SZ_8M:
param->dram_config |= 0x00;
break;
- case AST_VIDMEM_SIZE_16M:
+ case SZ_16M:
param->dram_config |= 0x04;
break;
- case AST_VIDMEM_SIZE_32M:
+ case SZ_32M:
param->dram_config |= 0x08;
break;
- case AST_VIDMEM_SIZE_64M:
+ case SZ_64M:
param->dram_config |= 0x0c;
break;
}
@@ -1446,16 +1446,16 @@ static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *par
switch (param->vram_size) {
default:
- case AST_VIDMEM_SIZE_8M:
+ case SZ_8M:
param->dram_config |= 0x00;
break;
- case AST_VIDMEM_SIZE_16M:
+ case SZ_16M:
param->dram_config |= 0x04;
break;
- case AST_VIDMEM_SIZE_32M:
+ case SZ_32M:
param->dram_config |= 0x08;
break;
- case AST_VIDMEM_SIZE_64M:
+ case SZ_64M:
param->dram_config |= 0x0c;
break;
}
@@ -1635,19 +1635,19 @@ static void ast_post_chip_2300(struct ast_device *ast)
switch (temp & 0x0c) {
default:
case 0x00:
- param.vram_size = AST_VIDMEM_SIZE_8M;
+ param.vram_size = SZ_8M;
break;
case 0x04:
- param.vram_size = AST_VIDMEM_SIZE_16M;
+ param.vram_size = SZ_16M;
break;
case 0x08:
- param.vram_size = AST_VIDMEM_SIZE_32M;
+ param.vram_size = SZ_32M;
break;
case 0x0c:
- param.vram_size = AST_VIDMEM_SIZE_64M;
+ param.vram_size = SZ_64M;
break;
}
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index bb2cc1d8b84e..e15adaf3a80e 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -30,9 +30,11 @@
#define AST_IO_VGACRI (0x54)
#define AST_IO_VGACR80_PASSWORD (0xa8)
+#define AST_IO_VGACR99_VGAMEM_RSRV_MASK GENMASK(1, 0)
#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1)
#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2)
#define AST_IO_VGACRA3_DVO_ENABLED BIT(7)
+#define AST_IO_VGACRAA_VGAMEM_SIZE_MASK GENMASK(1, 0)
#define AST_IO_VGACRB6_HSYNC_OFF BIT(0)
#define AST_IO_VGACRB6_VSYNC_OFF BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 09a1be234f71..b9e0ca85226a 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -16,6 +16,7 @@ config DRM_AUX_BRIDGE
tristate
depends on DRM_BRIDGE && OF
select AUXILIARY_BUS
+ select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
Simple transparent bridge that is used by several non-DRM drivers to
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 050dae338ffe..1257009e850c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -948,13 +948,14 @@ static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
}
static int adv7511_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
int ret = 0;
if (adv->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, adv->next_bridge, bridge,
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 83d711ee3a2e..f3fe47b12edc 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -143,35 +143,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345)
if (err)
return err;
- /*
- * Power up the sink (DP_SET_POWER register is only available on DPCD
- * v1.1 and later).
- */
- if (anx6345->dpcd[DP_DPCD_REV] >= 0x11) {
- err = drm_dp_dpcd_readb(&anx6345->aux, DP_SET_POWER, &dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
- err);
- return err;
- }
-
- dpcd[0] &= ~DP_SET_POWER_MASK;
- dpcd[0] |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(&anx6345->aux, DP_SET_POWER, dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n",
- err);
- return err;
- }
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
+ drm_dp_link_power_up(&anx6345->aux, anx6345->dpcd[DP_DPCD_REV]);
/* Possibly enable downspread on the sink */
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
@@ -517,6 +489,7 @@ static const struct drm_connector_funcs anx6345_connector_funcs = {
};
static int anx6345_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
@@ -553,7 +526,7 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge,
anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx6345->connector,
- bridge->encoder);
+ encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
goto connector_cleanup;
@@ -691,9 +664,10 @@ static int anx6345_i2c_probe(struct i2c_client *client)
struct device *dev;
int i, err;
- anx6345 = devm_kzalloc(&client->dev, sizeof(*anx6345), GFP_KERNEL);
- if (!anx6345)
- return -ENOMEM;
+ anx6345 = devm_drm_bridge_alloc(&client->dev, struct anx6345, bridge,
+ &anx6345_bridge_funcs);
+ if (IS_ERR(anx6345))
+ return PTR_ERR(anx6345);
mutex_init(&anx6345->lock);
@@ -765,7 +739,6 @@ static int anx6345_i2c_probe(struct i2c_client *client)
/* Look for supported chip ID */
anx6345_poweron(anx6345);
if (anx6345_get_chip_id(anx6345)) {
- anx6345->bridge.funcs = &anx6345_bridge_funcs;
drm_bridge_add(&anx6345->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index f74694bb9c50..a83020d6576f 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -656,35 +656,7 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- /*
- * Power up the sink (DP_SET_POWER register is only available on DPCD
- * v1.1 and later).
- */
- if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
- err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
- err);
- return err;
- }
-
- dpcd[0] &= ~DP_SET_POWER_MASK;
- dpcd[0] |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n",
- err);
- return err;
- }
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
+ drm_dp_link_power_up(&anx78xx->aux, anx78xx->dpcd[DP_DPCD_REV]);
/* Possibly enable downspread on the sink */
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
@@ -888,6 +860,7 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = {
};
static int anx78xx_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
@@ -924,7 +897,7 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge,
anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx78xx->connector,
- bridge->encoder);
+ encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
goto connector_cleanup;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 071168aa0c3b..a761941bc3c2 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -838,10 +838,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
int ret;
/* Keep the panel disabled while we configure video */
- if (dp->plat_data->panel) {
- if (drm_panel_disable(dp->plat_data->panel))
- DRM_ERROR("failed to disable the panel\n");
- }
+ drm_panel_disable(dp->plat_data->panel);
ret = analogix_dp_train_link(dp);
if (ret) {
@@ -863,13 +860,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
}
/* Safe to enable the panel now */
- if (dp->plat_data->panel) {
- ret = drm_panel_enable(dp->plat_data->panel);
- if (ret) {
- DRM_ERROR("failed to enable the panel\n");
- return ret;
- }
- }
+ drm_panel_enable(dp->plat_data->panel);
/* Check whether panel supports fast training */
ret = analogix_dp_fast_link_train_detection(dp);
@@ -955,67 +946,15 @@ static int analogix_dp_disable_psr(struct analogix_dp_device *dp)
return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
}
-/*
- * This function is a bit of a catch-all for panel preparation, hopefully
- * simplifying the logic of functions that need to prepare/unprepare the panel
- * below.
- *
- * If @prepare is true, this function will prepare the panel. Conversely, if it
- * is false, the panel will be unprepared.
- *
- * If @is_modeset_prepare is true, the function will disregard the current state
- * of the panel and either prepare/unprepare the panel based on @prepare. Once
- * it finishes, it will update dp->panel_is_modeset to reflect the current state
- * of the panel.
- */
-static int analogix_dp_prepare_panel(struct analogix_dp_device *dp,
- bool prepare, bool is_modeset_prepare)
-{
- int ret = 0;
-
- if (!dp->plat_data->panel)
- return 0;
-
- mutex_lock(&dp->panel_lock);
-
- /*
- * Exit early if this is a temporary prepare/unprepare and we're already
- * modeset (since we neither want to prepare twice or unprepare early).
- */
- if (dp->panel_is_modeset && !is_modeset_prepare)
- goto out;
-
- if (prepare)
- ret = drm_panel_prepare(dp->plat_data->panel);
- else
- ret = drm_panel_unprepare(dp->plat_data->panel);
-
- if (ret)
- goto out;
-
- if (is_modeset_prepare)
- dp->panel_is_modeset = prepare;
-
-out:
- mutex_unlock(&dp->panel_lock);
- return ret;
-}
-
static int analogix_dp_get_modes(struct drm_connector *connector)
{
struct analogix_dp_device *dp = to_dp(connector);
const struct drm_edid *drm_edid;
- int ret, num_modes = 0;
+ int num_modes = 0;
if (dp->plat_data->panel) {
num_modes += drm_panel_get_modes(dp->plat_data->panel, connector);
} else {
- ret = analogix_dp_prepare_panel(dp, true, false);
- if (ret) {
- DRM_ERROR("Failed to prepare panel (%d)\n", ret);
- return 0;
- }
-
drm_edid = drm_edid_read_ddc(connector, &dp->aux.ddc);
drm_edid_connector_update(&dp->connector, drm_edid);
@@ -1024,10 +963,6 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
num_modes += drm_edid_connector_add_modes(&dp->connector);
drm_edid_free(drm_edid);
}
-
- ret = analogix_dp_prepare_panel(dp, false, false);
- if (ret)
- DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
}
if (dp->plat_data->get_modes)
@@ -1082,24 +1017,13 @@ analogix_dp_detect(struct drm_connector *connector, bool force)
{
struct analogix_dp_device *dp = to_dp(connector);
enum drm_connector_status status = connector_status_disconnected;
- int ret;
if (dp->plat_data->panel)
return connector_status_connected;
- ret = analogix_dp_prepare_panel(dp, true, false);
- if (ret) {
- DRM_ERROR("Failed to prepare panel (%d)\n", ret);
- return connector_status_disconnected;
- }
-
if (!analogix_dp_detect_hpd(dp))
status = connector_status_connected;
- ret = analogix_dp_prepare_panel(dp, false, false);
- if (ret)
- DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
-
return status;
}
@@ -1113,10 +1037,10 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = {
};
static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct analogix_dp_device *dp = bridge->driver_private;
- struct drm_encoder *encoder = dp->encoder;
struct drm_connector *connector = NULL;
int ret = 0;
@@ -1203,7 +1127,6 @@ static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
- int ret;
crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
@@ -1214,9 +1137,7 @@ static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
if (old_crtc_state && old_crtc_state->self_refresh_active)
return;
- ret = analogix_dp_prepare_panel(dp, true, true);
- if (ret)
- DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+ drm_panel_prepare(dp->plat_data->panel);
}
static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
@@ -1296,17 +1217,11 @@ static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
{
struct analogix_dp_device *dp = bridge->driver_private;
- int ret;
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
- if (dp->plat_data->panel) {
- if (drm_panel_disable(dp->plat_data->panel)) {
- DRM_ERROR("failed to disable the panel\n");
- return;
- }
- }
+ drm_panel_disable(dp->plat_data->panel);
disable_irq(dp->irq);
@@ -1314,9 +1229,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
pm_runtime_put_sync(dp->dev);
- ret = analogix_dp_prepare_panel(dp, false, true);
- if (ret)
- DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+ drm_panel_unprepare(dp->plat_data->panel);
dp->fast_train_enable = false;
dp->psr_supported = false;
@@ -1505,6 +1418,10 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
video_info->max_link_rate = 0x0A;
video_info->max_lane_count = 0x04;
break;
+ case RK3588_EDP:
+ video_info->max_link_rate = 0x14;
+ video_info->max_lane_count = 0x04;
+ break;
case EXYNOS_DP:
/*
* NOTE: those property parseing code is used for
@@ -1540,6 +1457,26 @@ out:
return ret;
}
+static int analogix_dpaux_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us)
+{
+ struct analogix_dp_device *dp = to_dp(aux);
+ int val;
+ int ret;
+
+ if (dp->force_hpd)
+ return 0;
+
+ pm_runtime_get_sync(dp->dev);
+
+ ret = readx_poll_timeout(analogix_dp_get_plug_in_status, dp, val, !val,
+ wait_us / 100, wait_us);
+
+ pm_runtime_mark_last_busy(dp->dev);
+ pm_runtime_put_autosuspend(dp->dev);
+
+ return ret;
+}
+
struct analogix_dp_device *
analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
{
@@ -1560,9 +1497,6 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
- mutex_init(&dp->panel_lock);
- dp->panel_is_modeset = false;
-
/*
* platform dp driver need containor_of the plat_data to get
* the driver private data, so we need to store the point of
@@ -1625,10 +1559,10 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
* that we can get the current state of the GPIO.
*/
dp->irq = gpiod_to_irq(dp->hpd_gpiod);
- irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+ irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN;
} else {
dp->irq = platform_get_irq(pdev, 0);
- irq_flags = 0;
+ irq_flags = IRQF_NO_AUTOEN;
}
if (dp->irq == -ENXIO) {
@@ -1645,7 +1579,18 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
dev_err(&pdev->dev, "failed to request irq\n");
goto err_disable_clk;
}
- disable_irq(dp->irq);
+
+ dp->aux.name = "DP-AUX";
+ dp->aux.transfer = analogix_dpaux_transfer;
+ dp->aux.wait_hpd_asserted = analogix_dpaux_wait_hpd_asserted;
+ dp->aux.dev = dp->dev;
+ drm_dp_aux_init(&dp->aux);
+
+ pm_runtime_use_autosuspend(dp->dev);
+ pm_runtime_set_autosuspend_delay(dp->dev, 100);
+ ret = devm_pm_runtime_enable(dp->dev);
+ if (ret)
+ goto err_disable_clk;
return dp;
@@ -1681,6 +1626,7 @@ int analogix_dp_resume(struct analogix_dp_device *dp)
if (dp->plat_data->power_on)
dp->plat_data->power_on(dp->plat_data);
+ phy_set_mode(dp->phy, PHY_MODE_DP);
phy_power_on(dp->phy);
analogix_dp_init_dp(dp);
@@ -1696,25 +1642,12 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
dp->drm_dev = drm_dev;
dp->encoder = dp->plat_data->encoder;
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_use_autosuspend(dp->dev);
- pm_runtime_set_autosuspend_delay(dp->dev, 100);
- pm_runtime_enable(dp->dev);
- } else {
- ret = analogix_dp_resume(dp);
- if (ret)
- return ret;
- }
-
- dp->aux.name = "DP-AUX";
- dp->aux.transfer = analogix_dpaux_transfer;
- dp->aux.dev = dp->dev;
dp->aux.drm_dev = drm_dev;
ret = drm_dp_aux_register(&dp->aux);
if (ret) {
DRM_ERROR("failed to register AUX (%d)\n", ret);
- goto err_disable_pm_runtime;
+ return ret;
}
ret = analogix_dp_create_bridge(drm_dev, dp);
@@ -1727,13 +1660,6 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
err_unregister_aux:
drm_dp_aux_unregister(&dp->aux);
-err_disable_pm_runtime:
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_dont_use_autosuspend(dp->dev);
- pm_runtime_disable(dp->dev);
- } else {
- analogix_dp_suspend(dp);
- }
return ret;
}
@@ -1744,19 +1670,9 @@ void analogix_dp_unbind(struct analogix_dp_device *dp)
analogix_dp_bridge_disable(dp->bridge);
dp->connector.funcs->destroy(&dp->connector);
- if (dp->plat_data->panel) {
- if (drm_panel_unprepare(dp->plat_data->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
+ drm_panel_unprepare(dp->plat_data->panel);
drm_dp_aux_unregister(&dp->aux);
-
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_dont_use_autosuspend(dp->dev);
- pm_runtime_disable(dp->dev);
- } else {
- analogix_dp_suspend(dp);
- }
}
EXPORT_SYMBOL_GPL(analogix_dp_unbind);
@@ -1782,6 +1698,20 @@ int analogix_dp_stop_crc(struct drm_connector *connector)
}
EXPORT_SYMBOL_GPL(analogix_dp_stop_crc);
+struct analogix_dp_plat_data *analogix_dp_aux_to_plat_data(struct drm_dp_aux *aux)
+{
+ struct analogix_dp_device *dp = to_dp(aux);
+
+ return dp->plat_data;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_aux_to_plat_data);
+
+struct drm_dp_aux *analogix_dp_get_aux(struct analogix_dp_device *dp)
+{
+ return &dp->aux;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_get_aux);
+
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
MODULE_DESCRIPTION("Analogix DP Core Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index 774d11574b09..2b54120ba4a3 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -169,9 +169,6 @@ struct analogix_dp_device {
bool fast_train_enable;
bool psr_supported;
- struct mutex panel_lock;
- bool panel_is_modeset;
-
struct analogix_dp_plat_data *plat_data;
};
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 3afc73c858c4..38fd8d5014d2 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -11,6 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
#include <drm/bridge/analogix_dp.h>
@@ -513,10 +514,24 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp)
void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
{
u32 reg;
+ int ret;
reg = bwtype;
if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62))
writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ phy_cfg.dp.link_rate =
+ drm_dp_bw_code_to_link_rate(dp->link_train.link_rate) / 100;
+ phy_cfg.dp.set_rate = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
@@ -530,9 +545,22 @@ void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count)
{
u32 reg;
+ int ret;
reg = count;
writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ phy_cfg.dp.lanes = dp->link_train.lane_count;
+ phy_cfg.dp.set_lanes = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
@@ -546,10 +574,34 @@ void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp)
{
u8 lane;
+ int ret;
for (lane = 0; lane < dp->link_train.lane_count; lane++)
writel(dp->link_train.training_lane[lane],
dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ for (lane = 0; lane < dp->link_train.lane_count; lane++) {
+ u8 training_lane = dp->link_train.training_lane[lane];
+ u8 vs, pe;
+
+ vs = (training_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ pe = (training_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ phy_cfg.dp.voltage[lane] = vs;
+ phy_cfg.dp.pre[lane] = pe;
+ }
+
+ phy_cfg.dp.set_voltages = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane)
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 0b97b66de577..8a9079c2ed5c 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1257,10 +1257,10 @@ static void anx7625_power_on(struct anx7625_data *ctx)
usleep_range(11000, 12000);
/* Power on pin enable */
- gpiod_set_value(ctx->pdata.gpio_p_on, 1);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_p_on, 1);
usleep_range(10000, 11000);
/* Power reset pin enable */
- gpiod_set_value(ctx->pdata.gpio_reset, 1);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_reset, 1);
usleep_range(10000, 11000);
DRM_DEV_DEBUG_DRIVER(dev, "power on !\n");
@@ -1280,9 +1280,9 @@ static void anx7625_power_standby(struct anx7625_data *ctx)
return;
}
- gpiod_set_value(ctx->pdata.gpio_reset, 0);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_reset, 0);
usleep_range(1000, 1100);
- gpiod_set_value(ctx->pdata.gpio_p_on, 0);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_p_on, 0);
usleep_range(1000, 1100);
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->pdata.supplies),
@@ -1814,9 +1814,6 @@ static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
DRM_DEV_DEBUG_DRIVER(dev, "sink detect\n");
- if (ctx->pdata.panel_bridge)
- return connector_status_connected;
-
return ctx->hpd_status ? connector_status_connected :
connector_status_disconnected;
}
@@ -2141,6 +2138,7 @@ static void hdcp_check_work_func(struct work_struct *work)
}
static int anx7625_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
@@ -2159,7 +2157,7 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
}
if (ctx->pdata.panel_bridge) {
- err = drm_bridge_attach(bridge->encoder,
+ err = drm_bridge_attach(encoder,
ctx->pdata.panel_bridge,
&ctx->bridge, flags);
if (err)
@@ -2474,6 +2472,22 @@ static const struct drm_edid *anx7625_bridge_edid_read(struct drm_bridge *bridge
return anx7625_edid_read(ctx);
}
+static void anx7625_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = ctx->dev;
+
+ pm_runtime_get_sync(dev);
+}
+
+static void anx7625_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = ctx->dev;
+
+ pm_runtime_put_sync(dev);
+}
+
static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.attach = anx7625_bridge_attach,
.detach = anx7625_bridge_detach,
@@ -2487,6 +2501,8 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.detect = anx7625_bridge_detect,
.edid_read = anx7625_bridge_edid_read,
+ .hpd_enable = anx7625_bridge_hpd_enable,
+ .hpd_disable = anx7625_bridge_hpd_disable,
};
static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
@@ -2568,12 +2584,6 @@ static const struct dev_pm_ops anx7625_pm_ops = {
anx7625_runtime_pm_resume, NULL)
};
-static void anx7625_runtime_disable(void *data)
-{
- pm_runtime_dont_use_autosuspend(data);
- pm_runtime_disable(data);
-}
-
static int anx7625_link_bridge(struct drm_dp_aux *aux)
{
struct anx7625_data *platform = container_of(aux, struct anx7625_data, aux);
@@ -2590,9 +2600,8 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux)
platform->bridge.of_node = dev->of_node;
if (!anx7625_of_panel_on_aux_bus(dev))
platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
- if (!platform->pdata.panel_bridge)
- platform->bridge.ops |= DRM_BRIDGE_OP_HPD |
- DRM_BRIDGE_OP_DETECT;
+ if (!platform->pdata.panel_bridge || !anx7625_of_panel_on_aux_bus(dev))
+ platform->bridge.ops |= DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_DETECT;
platform->bridge.type = platform->pdata.panel_bridge ?
DRM_MODE_CONNECTOR_eDP :
DRM_MODE_CONNECTOR_DisplayPort;
@@ -2707,11 +2716,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
goto free_wq;
}
- pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
pm_suspend_ignore_children(dev, true);
- ret = devm_add_action_or_reset(dev, anx7625_runtime_disable, dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
goto free_wq;
@@ -2771,7 +2779,6 @@ static void anx7625_i2c_remove(struct i2c_client *client)
if (platform->hdcp_workqueue) {
cancel_delayed_work(&platform->hdcp_work);
- flush_workqueue(platform->hdcp_workqueue);
destroy_workqueue(platform->hdcp_workqueue);
}
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index 015983c015e5..c179b86d208f 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -86,6 +86,7 @@ struct drm_aux_bridge_data {
};
static int drm_aux_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct drm_aux_bridge_data *data;
@@ -95,7 +96,7 @@ static int drm_aux_bridge_attach(struct drm_bridge *bridge,
data = container_of(bridge, struct drm_aux_bridge_data, bridge);
- return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge,
+ return drm_bridge_attach(encoder, data->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index 48f297c78ee6..b3f588b71a7d 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -156,6 +156,7 @@ void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status sta
EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify);
static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index c7a0247e06ad..b022dd6e6b6e 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -425,6 +425,17 @@
#define DSI_NULL_FRAME_OVERHEAD 6
#define DSI_EOT_PKT_SIZE 4
+struct cdns_dsi_bridge_state {
+ struct drm_bridge_state base;
+ struct cdns_dsi_cfg dsi_cfg;
+};
+
+static inline struct cdns_dsi_bridge_state *
+to_cdns_dsi_bridge_state(struct drm_bridge_state *bridge_state)
+{
+ return container_of(bridge_state, struct cdns_dsi_bridge_state, base);
+}
+
static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
{
return container_of(input, struct cdns_dsi, input);
@@ -568,15 +579,18 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
unsigned long dsi_hss_hsa_hse_hbp;
unsigned int nlanes = output->dev->lanes;
+ int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock);
int ret;
ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
if (ret)
return ret;
- phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000,
- mipi_dsi_pixel_format_to_bpp(output->dev->format),
- nlanes, phy_cfg);
+ ret = phy_mipi_dphy_get_default_config(mode_clock * 1000,
+ mipi_dsi_pixel_format_to_bpp(output->dev->format),
+ nlanes, phy_cfg);
+ if (ret)
+ return ret;
ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
if (ret)
@@ -605,6 +619,7 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
}
static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
@@ -617,7 +632,7 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, output->bridge, bridge,
+ return drm_bridge_attach(encoder, output->bridge, bridge,
flags);
}
@@ -655,7 +670,8 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -675,11 +691,17 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
pm_runtime_put(dsi->base.dev);
}
-static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
+ dsi->phy_initialized = false;
+ dsi->link_initialized = false;
+ phy_power_off(dsi->dphy);
+ phy_exit(dsi->dphy);
+
pm_runtime_put(dsi->base.dev);
}
@@ -752,32 +774,59 @@ static void cdns_dsi_init_link(struct cdns_dsi *dsi)
dsi->link_initialized = true;
}
-static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_output *output = &dsi->output;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct cdns_dsi_bridge_state *dsi_state;
+ struct drm_bridge_state *new_bridge_state;
struct drm_display_mode *mode;
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
+ struct drm_connector *connector;
unsigned long tx_byte_period;
struct cdns_dsi_cfg dsi_cfg;
- u32 tmp, reg_wakeup, div;
+ u32 tmp, reg_wakeup, div, status;
int nlanes;
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
+ new_bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+ if (WARN_ON(!new_bridge_state))
+ return;
+
+ dsi_state = to_cdns_dsi_bridge_state(new_bridge_state);
+ dsi_cfg = dsi_state->dsi_cfg;
+
if (dsi->platform_ops && dsi->platform_ops->enable)
dsi->platform_ops->enable(dsi);
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->adjusted_mode;
nlanes = output->dev->lanes;
- WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false));
-
cdns_dsi_hs_init(dsi);
cdns_dsi_init_link(dsi);
+ /*
+ * Now that the DSI Link and DSI Phy are initialized,
+ * wait for the CLK and Data Lanes to be ready.
+ */
+ tmp = CLK_LANE_RDY;
+ for (int i = 0; i < nlanes; i++)
+ tmp |= DATA_LANE_RDY(i);
+
+ if (readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status,
+ (tmp == (status & tmp)), 100, 500000))
+ dev_err(dsi->base.dev,
+ "Timed Out: DSI-DPhy Clock and Data Lanes not ready.\n");
+
writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
dsi->regs + VID_HSIZE1);
writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact),
@@ -892,7 +941,8 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
writel(tmp, dsi->regs + MCTL_MAIN_EN);
}
-static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -904,13 +954,109 @@ static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
cdns_dsi_hs_init(dsi);
}
+static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+ struct cdns_dsi_output *output = &dsi->output;
+ u32 *input_fmts;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = drm_mipi_dsi_get_input_bus_fmt(output->dev->format);
+ if (!input_fmts[0])
+ return NULL;
+
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+ struct cdns_dsi_bridge_state *dsi_state = to_cdns_dsi_bridge_state(bridge_state);
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ struct cdns_dsi_cfg *dsi_cfg = &dsi_state->dsi_cfg;
+
+ return cdns_dsi_check_conf(dsi, mode, dsi_cfg, false);
+}
+
+static struct drm_bridge_state *
+cdns_dsi_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_bridge_state *dsi_state, *old_dsi_state;
+ struct drm_bridge_state *bridge_state;
+
+ if (WARN_ON(!bridge->base.state))
+ return NULL;
+
+ bridge_state = drm_priv_to_bridge_state(bridge->base.state);
+ old_dsi_state = to_cdns_dsi_bridge_state(bridge_state);
+
+ dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
+ if (!dsi_state)
+ return NULL;
+
+ __drm_atomic_helper_bridge_duplicate_state(bridge, &dsi_state->base);
+
+ memcpy(&dsi_state->dsi_cfg, &old_dsi_state->dsi_cfg,
+ sizeof(dsi_state->dsi_cfg));
+
+ return &dsi_state->base;
+}
+
+static void
+cdns_dsi_bridge_atomic_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ struct cdns_dsi_bridge_state *dsi_state;
+
+ dsi_state = to_cdns_dsi_bridge_state(state);
+
+ kfree(dsi_state);
+}
+
+static struct drm_bridge_state *
+cdns_dsi_bridge_atomic_reset(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_bridge_state *dsi_state;
+
+ dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
+ if (!dsi_state)
+ return NULL;
+
+ memset(dsi_state, 0, sizeof(*dsi_state));
+ dsi_state->base.bridge = bridge;
+
+ return &dsi_state->base;
+}
+
static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
.attach = cdns_dsi_bridge_attach,
.mode_valid = cdns_dsi_bridge_mode_valid,
- .disable = cdns_dsi_bridge_disable,
- .pre_enable = cdns_dsi_bridge_pre_enable,
- .enable = cdns_dsi_bridge_enable,
- .post_disable = cdns_dsi_bridge_post_disable,
+ .atomic_disable = cdns_dsi_bridge_atomic_disable,
+ .atomic_pre_enable = cdns_dsi_bridge_atomic_pre_enable,
+ .atomic_enable = cdns_dsi_bridge_atomic_enable,
+ .atomic_post_disable = cdns_dsi_bridge_atomic_post_disable,
+ .atomic_check = cdns_dsi_bridge_atomic_check,
+ .atomic_reset = cdns_dsi_bridge_atomic_reset,
+ .atomic_duplicate_state = cdns_dsi_bridge_atomic_duplicate_state,
+ .atomic_destroy_state = cdns_dsi_bridge_atomic_destroy_state,
+ .atomic_get_input_bus_fmts = cdns_dsi_bridge_get_input_bus_fmts,
};
static int cdns_dsi_attach(struct mipi_dsi_host *host,
@@ -920,8 +1066,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
struct cdns_dsi_output *output = &dsi->output;
struct cdns_dsi_input *input = &dsi->input;
struct drm_bridge *bridge;
- struct drm_panel *panel;
- struct device_node *np;
int ret;
/*
@@ -939,26 +1083,10 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
/*
* The host <-> device link might be described using an OF-graph
* representation, in this case we extract the device of_node from
- * this representation, otherwise we use dsidev->dev.of_node which
- * should have been filled by the core.
+ * this representation.
*/
- np = of_graph_get_remote_node(dsi->base.dev->of_node, DSI_OUTPUT_PORT,
- dev->channel);
- if (!np)
- np = of_node_get(dev->dev.of_node);
-
- panel = of_drm_find_panel(np);
- if (!IS_ERR(panel)) {
- bridge = drm_panel_bridge_add_typed(panel,
- DRM_MODE_CONNECTOR_DSI);
- } else {
- bridge = of_drm_find_bridge(dev->dev.of_node);
- if (!bridge)
- bridge = ERR_PTR(-EINVAL);
- }
-
- of_node_put(np);
-
+ bridge = devm_drm_of_get_bridge(dsi->base.dev, dsi->base.dev->of_node,
+ DSI_OUTPUT_PORT, dev->channel);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
dev_err(host->dev, "failed to add DSI device %s (err = %d)",
@@ -968,7 +1096,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
output->dev = dev;
output->bridge = bridge;
- output->panel = panel;
/*
* The DSI output has been properly configured, we can now safely
@@ -984,12 +1111,9 @@ static int cdns_dsi_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dev)
{
struct cdns_dsi *dsi = to_cdns_dsi(host);
- struct cdns_dsi_output *output = &dsi->output;
struct cdns_dsi_input *input = &dsi->input;
drm_bridge_remove(&input->bridge);
- if (output->panel)
- drm_panel_bridge_remove(output->bridge);
return 0;
}
@@ -1152,7 +1276,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev)
clk_disable_unprepare(dsi->dsi_sys_clk);
clk_disable_unprepare(dsi->dsi_p_clk);
reset_control_assert(dsi->dsi_p_rst);
- dsi->link_initialized = false;
return 0;
}
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
index ca7ea2da635c..5db5dbbbcaad 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
@@ -10,7 +10,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include <linux/bits.h>
#include <linux/completion.h>
@@ -21,7 +20,6 @@ struct reset_control;
struct cdns_dsi_output {
struct mipi_dsi_device *dev;
- struct drm_panel *panel;
struct drm_bridge *bridge;
union phy_configure_opts phy_opts;
};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 81fad14c2cd5..b431e7efd1f0 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -546,76 +546,6 @@ out:
}
/**
- * cdns_mhdp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-static
-int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-/**
- * cdns_mhdp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-static
-int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
- struct cdns_mhdp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-/**
* cdns_mhdp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
@@ -1453,7 +1383,7 @@ static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
- cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
+ drm_dp_link_power_up(&mhdp->aux, mhdp->link.revision);
cdns_mhdp_fill_sink_caps(mhdp, dpcd);
@@ -1500,7 +1430,7 @@ static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
if (mhdp->plugged)
- cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
+ drm_dp_link_power_down(&mhdp->aux, mhdp->link.revision);
mhdp->link_up = false;
}
@@ -1726,6 +1656,7 @@ static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
}
static int cdns_mhdp_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
@@ -2305,7 +2236,7 @@ static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
* If everything looks fine, just return, as we don't handle
* DP IRQs.
*/
- if (ret > 0 &&
+ if (!ret &&
drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
goto out;
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 81f7c701961f..634c5b030667 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -580,11 +580,13 @@ static int chipone_dsi_host_attach(struct chipone *icn)
return ret;
}
-static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int chipone_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
struct chipone *icn = bridge_to_chipone(bridge);
- return drm_bridge_attach(bridge->encoder, icn->panel_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, icn->panel_bridge, bridge, flags);
}
#define MAX_INPUT_SEL_FORMATS 1
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index da17f0978a79..210c45c1efd4 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -268,13 +268,14 @@ static void ch7033_hpd_event(void *arg, enum drm_connector_status status)
}
static int ch7033_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
struct drm_connector *connector = &priv->connector;
int ret;
- ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, priv->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ret;
@@ -305,7 +306,7 @@ static int ch7033_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- return drm_connector_attach_encoder(&priv->connector, bridge->encoder);
+ return drm_connector_attach_encoder(&priv->connector, encoder);
}
static void ch7033_bridge_detach(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 72bc508d4e6e..badd2c7f91a1 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -34,6 +34,7 @@ to_display_connector(struct drm_bridge *bridge)
}
static int display_connector_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
@@ -209,9 +210,10 @@ static int display_connector_probe(struct platform_device *pdev)
const char *label = NULL;
int ret;
- conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL);
- if (!conn)
- return -ENOMEM;
+ conn = devm_drm_bridge_alloc(&pdev->dev, struct display_connector, bridge,
+ &display_connector_bridge_funcs);
+ if (IS_ERR(conn))
+ return PTR_ERR(conn);
platform_set_drvdata(pdev, conn);
@@ -361,7 +363,6 @@ static int display_connector_probe(struct platform_device *pdev)
}
}
- conn->bridge.funcs = &display_connector_bridge_funcs;
conn->bridge.of_node = pdev->dev.of_node;
if (conn->bridge.ddc)
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index 26ae1ab5237f..2cb6dfc7a6d3 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -113,11 +113,12 @@ static unsigned long fsl_ldb_link_frequency(struct fsl_ldb *fsl_ldb, int clock)
}
static int fsl_ldb_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- return drm_bridge_attach(bridge->encoder, fsl_ldb->panel_bridge,
+ return drm_bridge_attach(encoder, fsl_ldb->panel_bridge,
bridge, flags);
}
@@ -180,9 +181,9 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
configured_link_freq = clk_get_rate(fsl_ldb->clk);
if (configured_link_freq != requested_link_freq)
- dev_warn(fsl_ldb->dev, "Configured LDB clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
- configured_link_freq,
- requested_link_freq);
+ dev_warn(fsl_ldb->dev,
+ "Configured %pC clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
+ fsl_ldb->clk, configured_link_freq, requested_link_freq);
clk_prepare_enable(fsl_ldb->clk);
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
index 9b5bebbe357d..6149ba141a38 100644
--- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
@@ -104,7 +104,7 @@ void ldb_bridge_disable_helper(struct drm_bridge *bridge)
}
EXPORT_SYMBOL_GPL(ldb_bridge_disable_helper);
-int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
@@ -116,9 +116,8 @@ int ldb_bridge_attach_helper(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
- ldb_ch->next_bridge, bridge,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ return drm_bridge_attach(encoder, ldb_ch->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
EXPORT_SYMBOL_GPL(ldb_bridge_attach_helper);
@@ -191,8 +190,7 @@ int ldb_find_next_bridge_helper(struct ldb *ldb)
}
EXPORT_SYMBOL_GPL(ldb_find_next_bridge_helper);
-void ldb_add_bridge_helper(struct ldb *ldb,
- const struct drm_bridge_funcs *bridge_funcs)
+void ldb_add_bridge_helper(struct ldb *ldb)
{
struct ldb_channel *ldb_ch;
int i;
@@ -204,7 +202,6 @@ void ldb_add_bridge_helper(struct ldb *ldb,
continue;
ldb_ch->bridge.driver_private = ldb_ch;
- ldb_ch->bridge.funcs = bridge_funcs;
ldb_ch->bridge.of_node = ldb_ch->np;
drm_bridge_add(&ldb_ch->bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
index a0a5cde27fbc..de187e326999 100644
--- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
@@ -81,15 +81,14 @@ void ldb_bridge_enable_helper(struct drm_bridge *bridge);
void ldb_bridge_disable_helper(struct drm_bridge *bridge);
-int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags);
int ldb_init_helper(struct ldb *ldb);
int ldb_find_next_bridge_helper(struct ldb *ldb);
-void ldb_add_bridge_helper(struct ldb *ldb,
- const struct drm_bridge_funcs *bridge_funcs);
+void ldb_add_bridge_helper(struct ldb *ldb);
void ldb_remove_bridge_helper(struct ldb *ldb);
diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
index 3ebf0b9866de..f072c6ed39ef 100644
--- a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
+++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
@@ -23,7 +23,8 @@ struct imx_legacy_bridge {
#define to_imx_legacy_bridge(bridge) container_of(bridge, struct imx_legacy_bridge, base)
static int imx_legacy_bridge_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
@@ -76,9 +77,9 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
imx_bridge->base.ops = DRM_BRIDGE_OP_MODES;
imx_bridge->base.type = type;
- ret = devm_drm_bridge_add(dev, &imx_bridge->base);
- if (ret)
- return ERR_PTR(ret);
+ ret = devm_drm_bridge_add(dev, &imx_bridge->base);
+ if (ret)
+ return ERR_PTR(ret);
return &imx_bridge->base;
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index a17433a7c755..8a4fd7d77a8d 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -40,11 +40,12 @@ to_imx8mp_hdmi_pvi(struct drm_bridge *bridge)
}
static int imx8mp_hdmi_pvi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
- return drm_bridge_attach(bridge->encoder, pvi->next_bridge,
+ return drm_bridge_attach(encoder, pvi->next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
index 524aac751359..47aa65938e6a 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
@@ -47,7 +47,7 @@ struct imx8qm_ldb_channel {
struct imx8qm_ldb {
struct ldb base;
struct device *dev;
- struct imx8qm_ldb_channel channel[MAX_LDB_CHAN_NUM];
+ struct imx8qm_ldb_channel *channel[MAX_LDB_CHAN_NUM];
struct clk *clk_pixel;
struct clk *clk_bypass;
int active_chno;
@@ -107,7 +107,7 @@ static int imx8qm_ldb_bridge_atomic_check(struct drm_bridge *bridge,
if (is_split) {
imx8qm_ldb_ch =
- &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
+ imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
phy_cfg);
ret = phy_validate(imx8qm_ldb_ch->phy, PHY_MODE_LVDS, 0, &opts);
@@ -158,7 +158,7 @@ imx8qm_ldb_bridge_mode_set(struct drm_bridge *bridge,
if (is_split) {
imx8qm_ldb_ch =
- &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
+ imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
phy_cfg);
ret = phy_configure(imx8qm_ldb_ch->phy, &opts);
@@ -226,13 +226,13 @@ static void imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
}
if (is_split) {
- ret = phy_power_on(imx8qm_ldb->channel[0].phy);
+ ret = phy_power_on(imx8qm_ldb->channel[0]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power on channel0 PHY: %d\n",
ret);
- ret = phy_power_on(imx8qm_ldb->channel[1].phy);
+ ret = phy_power_on(imx8qm_ldb->channel[1]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power on channel1 PHY: %d\n",
@@ -261,12 +261,12 @@ static void imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
ldb_bridge_disable_helper(bridge);
if (is_split) {
- ret = phy_power_off(imx8qm_ldb->channel[0].phy);
+ ret = phy_power_off(imx8qm_ldb->channel[0]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power off channel0 PHY: %d\n",
ret);
- ret = phy_power_off(imx8qm_ldb->channel[1].phy);
+ ret = phy_power_off(imx8qm_ldb->channel[1]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power off channel1 PHY: %d\n",
@@ -412,7 +412,7 @@ static int imx8qm_ldb_get_phy(struct imx8qm_ldb *imx8qm_ldb)
int i, ret;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[i];
ldb_ch = &imx8qm_ldb_ch->base;
if (!ldb_ch->is_available)
@@ -448,6 +448,14 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
if (!imx8qm_ldb)
return -ENOMEM;
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ imx8qm_ldb->channel[i] =
+ devm_drm_bridge_alloc(dev, struct imx8qm_ldb_channel, base.bridge,
+ &imx8qm_ldb_bridge_funcs);
+ if (IS_ERR(imx8qm_ldb->channel[i]))
+ return PTR_ERR(imx8qm_ldb->channel[i]);
+ }
+
imx8qm_ldb->clk_pixel = devm_clk_get(dev, "pixel");
if (IS_ERR(imx8qm_ldb->clk_pixel)) {
ret = PTR_ERR(imx8qm_ldb->clk_pixel);
@@ -473,7 +481,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
ldb->ctrl_reg = 0xe0;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
- ldb->channel[i] = &imx8qm_ldb->channel[i].base;
+ ldb->channel[i] = &imx8qm_ldb->channel[i]->base;
ret = ldb_init_helper(ldb);
if (ret)
@@ -499,12 +507,12 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
}
imx8qm_ldb->active_chno = 0;
- imx8qm_ldb_ch = &imx8qm_ldb->channel[0];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[0];
ldb_ch = &imx8qm_ldb_ch->base;
ldb_ch->link_type = pixel_order;
} else {
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[i];
ldb_ch = &imx8qm_ldb_ch->base;
if (ldb_ch->is_available) {
@@ -525,7 +533,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx8qm_ldb);
pm_runtime_enable(dev);
- ldb_add_bridge_helper(ldb, &imx8qm_ldb_bridge_funcs);
+ ldb_add_bridge_helper(ldb);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
index 3cb484773ddf..5d272916e200 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
@@ -44,7 +44,7 @@ struct imx8qxp_ldb_channel {
struct imx8qxp_ldb {
struct ldb base;
struct device *dev;
- struct imx8qxp_ldb_channel channel[MAX_LDB_CHAN_NUM];
+ struct imx8qxp_ldb_channel *channel[MAX_LDB_CHAN_NUM];
struct clk *clk_pixel;
struct clk *clk_bypass;
struct drm_bridge *companion;
@@ -410,7 +410,7 @@ static const struct drm_bridge_funcs imx8qxp_ldb_bridge_funcs = {
static int imx8qxp_ldb_set_di_id(struct imx8qxp_ldb *imx8qxp_ldb)
{
struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
- &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
+ imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
struct device_node *ep, *remote;
struct device *dev = imx8qxp_ldb->dev;
@@ -456,7 +456,7 @@ imx8qxp_ldb_check_chno_and_dual_link(struct ldb_channel *ldb_ch, int link)
static int imx8qxp_ldb_parse_dt_companion(struct imx8qxp_ldb *imx8qxp_ldb)
{
struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
- &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
+ imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
struct ldb_channel *companion_ldb_ch;
struct device_node *companion;
@@ -586,6 +586,14 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
if (!imx8qxp_ldb)
return -ENOMEM;
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ imx8qxp_ldb->channel[i] =
+ devm_drm_bridge_alloc(dev, struct imx8qxp_ldb_channel, base.bridge,
+ &imx8qxp_ldb_bridge_funcs);
+ if (IS_ERR(imx8qxp_ldb->channel[i]))
+ return PTR_ERR(imx8qxp_ldb->channel[i]);
+ }
+
imx8qxp_ldb->clk_pixel = devm_clk_get(dev, "pixel");
if (IS_ERR(imx8qxp_ldb->clk_pixel)) {
ret = PTR_ERR(imx8qxp_ldb->clk_pixel);
@@ -611,7 +619,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
ldb->ctrl_reg = 0xe0;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
- ldb->channel[i] = &imx8qxp_ldb->channel[i].base;
+ ldb->channel[i] = &imx8qxp_ldb->channel[i]->base;
ret = ldb_init_helper(ldb);
if (ret)
@@ -627,7 +635,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
}
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qxp_ldb_ch = &imx8qxp_ldb->channel[i];
+ imx8qxp_ldb_ch = imx8qxp_ldb->channel[i];
ldb_ch = &imx8qxp_ldb_ch->base;
if (ldb_ch->is_available) {
@@ -660,9 +668,9 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx8qxp_ldb);
pm_runtime_enable(dev);
- ldb_add_bridge_helper(ldb, &imx8qxp_ldb_bridge_funcs);
+ ldb_add_bridge_helper(ldb);
- return ret;
+ return 0;
}
static void imx8qxp_ldb_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 1d9529dc7f2a..1f6fd488e703 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -108,6 +108,7 @@ imx8qxp_pc_bridge_mode_valid(struct drm_bridge *bridge,
}
static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pc_channel *ch = bridge->driver_private;
@@ -119,7 +120,7 @@ static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
ch->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index cd6818db0fd3..e092c9ea99b0 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -128,6 +128,7 @@ static void imx8qxp_pixel_link_set_mst_addr(struct imx8qxp_pixel_link *pl)
}
static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pixel_link *pl = bridge->driver_private;
@@ -138,7 +139,7 @@ static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
pl->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index 49dd4f96d52c..da138ab51b3b 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -48,6 +48,7 @@ struct imx8qxp_pxl2dpi {
#define bridge_to_p2d(b) container_of(b, struct imx8qxp_pxl2dpi, bridge)
static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
@@ -58,7 +59,7 @@ static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
p2d->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index 21152a1c28f7..a3a63a977b0a 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -665,13 +665,14 @@ it6263_bridge_mode_valid(struct drm_bridge *bridge,
}
static int it6263_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it6263 *it = bridge_to_it6263(bridge);
struct drm_connector *connector;
int ret;
- ret = drm_bridge_attach(bridge->encoder, it->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, it->next_bridge, bridge,
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -679,7 +680,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
- connector = drm_bridge_connector_init(bridge->dev, bridge->encoder);
+ connector = drm_bridge_connector_init(bridge->dev, encoder);
if (IS_ERR(connector)) {
ret = PTR_ERR(connector);
dev_err(it->dev, "failed to initialize bridge connector: %d\n",
@@ -687,7 +688,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- drm_connector_attach_encoder(connector, bridge->encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 8a607558ac89..1383d1e21afe 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -771,40 +771,6 @@ static void it6505_calc_video_info(struct it6505 *it6505)
DRM_MODE_ARG(&it6505->video_info));
}
-static int it6505_drm_dp_link_set_power(struct drm_dp_aux *aux,
- struct it6505_drm_dp_link *link,
- u8 mode)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < DPCD_V_1_1)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= mode;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- if (mode == DP_SET_POWER_D0) {
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
-
- return 0;
-}
-
static void it6505_clear_int(struct it6505 *it6505)
{
it6505_write(it6505, INT_STATUS_01, 0xFF);
@@ -2578,8 +2544,7 @@ static void it6505_irq_hpd(struct it6505 *it6505)
}
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
@@ -2910,8 +2875,7 @@ static enum drm_connector_status it6505_detect(struct it6505 *it6505)
}
if (it6505->hpd_state) {
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d",
@@ -3124,6 +3088,7 @@ static inline struct it6505 *bridge_to_it6505(struct drm_bridge *bridge)
}
static int it6505_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
@@ -3233,8 +3198,7 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
it6505_int_mask_enable(it6505);
it6505_video_reset(it6505);
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
}
static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -3246,8 +3210,7 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
DRM_DEV_DEBUG_DRIVER(dev, "start");
if (it6505->powered) {
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D3);
+ drm_dp_link_power_down(&it6505->aux, it6505->link.revision);
it6505_video_disable(it6505);
}
}
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index b9f90f32145d..7b110ae53291 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -586,6 +586,7 @@ static bool it66121_is_hpd_detect(struct it66121_ctx *ctx)
}
static int it66121_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
@@ -594,7 +595,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- ret = drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
+ ret = drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 52da204f5740..3e49d855b364 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -543,12 +543,13 @@ exit:
}
static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge,
+ ret = drm_bridge_attach(encoder, lt->hdmi_port, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0) {
dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 0fc5ea18fe6a..9b2dac9bd63c 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -99,11 +99,12 @@ static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge)
}
static int lt9211_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
&ctx->bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 026803034231..a35a8b8ca89c 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -740,11 +740,12 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
}
static int lt9611_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- return drm_bridge_attach(bridge->encoder, lt9611->next_bridge,
+ return drm_bridge_attach(encoder, lt9611->next_bridge,
bridge, flags);
}
@@ -1130,7 +1131,7 @@ static int lt9611_probe(struct i2c_client *client)
lt9611->bridge.of_node = client->dev.of_node;
lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES |
- DRM_BRIDGE_OP_HDMI;
+ DRM_BRIDGE_OP_HDMI | DRM_BRIDGE_OP_HDMI_AUDIO;
lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
lt9611->bridge.vendor = "Lontium";
lt9611->bridge.product = "LT9611";
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index f4c3ff1fdc69..766da2cb45a7 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -280,11 +280,12 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
}
static int lt9611uxc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
- return drm_bridge_attach(bridge->encoder, lt9611uxc->next_bridge,
+ return drm_bridge_attach(encoder, lt9611uxc->next_bridge,
bridge, flags);
}
@@ -774,9 +775,9 @@ static int lt9611uxc_probe(struct i2c_client *client)
return -ENODEV;
}
- lt9611uxc = devm_kzalloc(dev, sizeof(*lt9611uxc), GFP_KERNEL);
- if (!lt9611uxc)
- return -ENOMEM;
+ lt9611uxc = devm_drm_bridge_alloc(dev, struct lt9611uxc, bridge, &lt9611uxc_bridge_funcs);
+ if (IS_ERR(lt9611uxc))
+ return PTR_ERR(lt9611uxc);
lt9611uxc->dev = dev;
lt9611uxc->client = client;
@@ -855,7 +856,6 @@ retry:
i2c_set_clientdata(client, lt9611uxc);
- lt9611uxc->bridge.funcs = &lt9611uxc_bridge_funcs;
lt9611uxc->bridge.of_node = client->dev.of_node;
lt9611uxc->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
if (lt9611uxc->hpd_supported)
@@ -880,7 +880,11 @@ retry:
}
}
- return lt9611uxc_audio_init(dev, lt9611uxc);
+ ret = lt9611uxc_audio_init(dev, lt9611uxc);
+ if (ret)
+ goto err_remove_bridge;
+
+ return 0;
err_remove_bridge:
free_irq(client->irq, lt9611uxc);
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 389af0233fcd..1646e454e0b0 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -34,11 +34,12 @@ static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
}
static int lvds_codec_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
- return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
+ return drm_bridge_attach(encoder, lvds_codec->panel_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index a47aabf134fd..15a5a1f644fc 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -190,6 +190,7 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
}
static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct i2c_client *stdp4028_i2c
diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c
index 53dd140a1b8d..1d4ae0097df8 100644
--- a/drivers/gpu/drm/bridge/microchip-lvds.c
+++ b/drivers/gpu/drm/bridge/microchip-lvds.c
@@ -104,11 +104,12 @@ static void lvds_serialiser_on(struct mchp_lvds *lvds)
}
static int mchp_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mchp_lvds *lvds = bridge_to_lvds(bridge);
- return drm_bridge_attach(bridge->encoder, lvds->panel_bridge,
+ return drm_bridge_attach(encoder, lvds->panel_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index d04c62a0cb9f..55912ae11f46 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -910,6 +910,7 @@ static void nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
}
static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
@@ -919,7 +920,7 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
- return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, panel_bridge, bridge, flags);
}
static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 27261b2ac9c8..25d7c415478b 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -214,13 +214,14 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = {
};
static int ptn3460_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
/* Let this driver create connector if requested */
- ret = drm_bridge_attach(bridge->encoder, ptn_bridge->panel_bridge,
+ ret = drm_bridge_attach(encoder, ptn_bridge->panel_bridge,
bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -239,7 +240,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge,
&ptn3460_connector_helper_funcs);
drm_connector_register(&ptn_bridge->connector);
drm_connector_attach_encoder(&ptn_bridge->connector,
- bridge->encoder);
+ encoder);
drm_helper_hpd_irq_event(ptn_bridge->connector.dev);
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 258c85c83a28..79b009ab9396 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -58,6 +58,7 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = {
};
static int panel_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
@@ -81,7 +82,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
drm_panel_bridge_set_orientation(connector, bridge);
drm_connector_attach_encoder(&panel_bridge->connector,
- bridge->encoder);
+ encoder);
if (bridge->dev->registered) {
if (connector->funcs->reset)
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 13ada42a5514..8726fefc5c65 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -418,6 +418,7 @@ static void ps8622_post_disable(struct drm_bridge *bridge)
}
static int ps8622_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index a42138b33258..2422ff68c104 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -494,6 +494,7 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
}
static int ps8640_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
@@ -518,7 +519,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
}
/* Attach the panel-bridge to the dsi bridge */
- ret = drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
+ ret = drm_bridge_attach(encoder, ps_bridge->panel_bridge,
&ps_bridge->bridge, flags);
if (ret)
goto err_bridge_attach;
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index 54de6ed2fae8..0014c497e3fe 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -1640,11 +1640,12 @@ static void samsung_dsim_mode_set(struct drm_bridge *bridge,
}
static int samsung_dsim_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->out_bridge, bridge,
flags);
}
@@ -1935,9 +1936,9 @@ int samsung_dsim_probe(struct platform_device *pdev)
struct samsung_dsim *dsi;
int ret, i;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct samsung_dsim, bridge, &samsung_dsim_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
@@ -2007,7 +2008,6 @@ int samsung_dsim_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- dsi->bridge.funcs = &samsung_dsim_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 914a2609a685..6de61d9fe064 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -416,6 +416,7 @@ out:
}
static int sii902x_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -424,7 +425,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
int ret;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
- return drm_bridge_attach(bridge->encoder, sii902x->next_bridge,
+ return drm_bridge_attach(encoder, sii902x->next_bridge,
bridge, flags);
drm_connector_helper_add(&sii902x->connector,
@@ -452,7 +453,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
if (ret)
return ret;
- drm_connector_attach_encoder(&sii902x->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sii902x->connector, encoder);
return 0;
}
@@ -1138,6 +1139,7 @@ static int sii902x_init(struct sii902x *sii902x)
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ sii902x->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
if (sii902x->i2c->irq > 0)
sii902x->bridge.ops |= DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 28a2e1ee04b2..3af650dc92a1 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2203,6 +2203,7 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
}
static int sii8620_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index ab0b0e36e97a..70db5b99e5bb 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -103,12 +103,13 @@ static const struct drm_connector_funcs simple_bridge_con_funcs = {
};
static int simple_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, sbridge->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, sbridge->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -127,7 +128,7 @@ static int simple_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- drm_connector_attach_encoder(&sbridge->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sbridge->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index 6166f197e37b..5e5f8c2f95be 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -1077,6 +1077,7 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HDMI_AUDIO |
DRM_BRIDGE_OP_HPD;
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 0890add5f707..8791408dd1ff 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -22,8 +22,8 @@
#include <media/cec-notifier.h>
-#include <uapi/linux/media-bus-format.h>
-#include <uapi/linux/videodev2.h>
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -2889,12 +2889,13 @@ static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge,
}
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_hdmi *hdmi = bridge->driver_private;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
- return drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
+ return drm_bridge_attach(encoder, hdmi->next_bridge,
bridge, flags);
return dw_hdmi_connector_create(hdmi);
@@ -3332,9 +3333,9 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
u8 config0;
u8 config3;
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return ERR_PTR(-ENOMEM);
+ hdmi = devm_drm_bridge_alloc(dev, struct dw_hdmi, bridge, &dw_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return hdmi;
hdmi->plat_data = plat_data;
hdmi->dev = dev;
@@ -3494,7 +3495,6 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
}
hdmi->bridge.driver_private = hdmi;
- hdmi->bridge.funcs = &dw_hdmi_bridge_funcs;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
hdmi->bridge.interlace_allowed = true;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 2b6e70a49f43..b08ada920a50 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -1072,15 +1072,16 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+ encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->panel_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
index 5fd7a459efdd..c76f5f2e74d1 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -870,15 +870,16 @@ dw_mipi_dsi2_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dw_mipi_dsi2_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+ encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi2->panel_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi2->panel_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 49c76027f831..edf01476f2ef 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -202,11 +202,12 @@ static void tc358762_enable(struct drm_bridge *bridge,
}
static int tc358762_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 3d3d135b4348..3f76c890fad9 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -295,11 +295,12 @@ static void tc358764_pre_enable(struct drm_bridge *bridge)
}
static int tc358764_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 39e2d3a7a27d..7e5449fb86a3 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1795,6 +1795,7 @@ static const struct drm_connector_funcs tc_connector_funcs = {
};
static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
@@ -1807,6 +1808,7 @@ static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
}
static int tc_edp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index ec79b0dd0e2c..063f217a17b6 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -554,6 +554,7 @@ static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = {
};
static int tc358768_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
@@ -563,7 +564,7 @@ static int tc358768_bridge_attach(struct drm_bridge *bridge,
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ return drm_bridge_attach(encoder, priv->output.bridge, bridge,
flags);
}
@@ -580,7 +581,8 @@ tc358768_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void tc358768_bridge_disable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
int ret;
@@ -602,7 +604,8 @@ static void tc358768_bridge_disable(struct drm_bridge *bridge)
dev_warn(priv->dev, "Software disable failed: %d\n", ret);
}
-static void tc358768_bridge_post_disable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
@@ -682,13 +685,17 @@ static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val)
return (u32)div_u64(m, n);
}
-static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
struct mipi_dsi_device *dsi_dev = priv->output.dev;
unsigned long mode_flags = dsi_dev->mode_flags;
u32 val, val2, lptxcnt, hact, data_type;
s32 raw_val;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
const struct drm_display_mode *mode;
u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
u32 dsiclk, hsbyteclk;
@@ -719,7 +726,10 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
return;
}
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->adjusted_mode;
ret = tc358768_setup_pll(priv, mode);
if (ret) {
dev_err(dev, "PLL setup failed: %d\n", ret);
@@ -1076,14 +1086,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
tc358768_write(priv, TC358768_DSI_CONFW, val);
ret = tc358768_clear_error(priv);
- if (ret) {
+ if (ret)
dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
- tc358768_bridge_disable(bridge);
- tc358768_bridge_post_disable(bridge);
- }
}
-static void tc358768_bridge_enable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
int ret;
@@ -1100,11 +1108,8 @@ static void tc358768_bridge_enable(struct drm_bridge *bridge)
tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6));
ret = tc358768_clear_error(priv);
- if (ret) {
+ if (ret)
dev_err(priv->dev, "Bridge enable failed: %d\n", ret);
- tc358768_bridge_disable(bridge);
- tc358768_bridge_post_disable(bridge);
- }
}
#define MAX_INPUT_SEL_FORMATS 1
@@ -1166,10 +1171,10 @@ static const struct drm_bridge_funcs tc358768_bridge_funcs = {
.attach = tc358768_bridge_attach,
.mode_valid = tc358768_bridge_mode_valid,
.mode_fixup = tc358768_mode_fixup,
- .pre_enable = tc358768_bridge_pre_enable,
- .enable = tc358768_bridge_enable,
- .disable = tc358768_bridge_disable,
- .post_disable = tc358768_bridge_post_disable,
+ .atomic_pre_enable = tc358768_bridge_atomic_pre_enable,
+ .atomic_enable = tc358768_bridge_atomic_enable,
+ .atomic_disable = tc358768_bridge_atomic_disable,
+ .atomic_post_disable = tc358768_bridge_atomic_post_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index c89757bec4e6..1b10e6ee1724 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -286,7 +286,8 @@ static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
return container_of(b, struct tc_data, bridge);
}
-static void tc_bridge_pre_enable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
struct device *dev = &tc->dsi->dev;
@@ -309,7 +310,8 @@ static void tc_bridge_pre_enable(struct drm_bridge *bridge)
usleep_range(10, 20);
}
-static void tc_bridge_post_disable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
struct device *dev = &tc->dsi->dev;
@@ -368,30 +370,21 @@ static void d2l_write(struct i2c_client *i2c, u16 addr, u32 val)
ret, addr);
}
-/* helper function to access bus_formats */
-static struct drm_connector *get_connector(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- return connector;
-
- return NULL;
-}
-
-static void tc_bridge_enable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
u32 hback_porch, hsync_len, hfront_porch, hactive, htime1, htime2;
u32 vback_porch, vsync_len, vfront_porch, vactive, vtime1, vtime2;
u32 val = 0;
u16 dsiclk, clkdiv, byteclk, t1, t2, t3, vsdelay;
- struct drm_display_mode *mode;
- struct drm_connector *connector = get_connector(bridge->encoder);
-
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ struct drm_connector *connector =
+ drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
hback_porch = mode->htotal - mode->hsync_end;
hsync_len = mode->hsync_end - mode->hsync_start;
@@ -589,21 +582,25 @@ static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc)
}
static int tc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, tc->panel_bridge,
+ return drm_bridge_attach(encoder, tc->panel_bridge,
&tc->bridge, flags);
}
static const struct drm_bridge_funcs tc_bridge_funcs = {
.attach = tc_bridge_attach,
- .pre_enable = tc_bridge_pre_enable,
- .enable = tc_bridge_enable,
+ .atomic_pre_enable = tc_bridge_atomic_pre_enable,
+ .atomic_enable = tc_bridge_atomic_enable,
.mode_valid = tc_mode_valid,
- .post_disable = tc_bridge_post_disable,
+ .atomic_post_disable = tc_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
};
static int tc_attach_host(struct tc_data *tc)
diff --git a/drivers/gpu/drm/bridge/tda998x_drv.c b/drivers/gpu/drm/bridge/tda998x_drv.c
index 20658258fb51..850909f78a7b 100644
--- a/drivers/gpu/drm/bridge/tda998x_drv.c
+++ b/drivers/gpu/drm/bridge/tda998x_drv.c
@@ -1365,6 +1365,7 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
/* DRM bridge functions */
static int tda998x_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
@@ -1780,9 +1781,9 @@ static int tda998x_create(struct device *dev)
u32 video;
int rev_lo, rev_hi, ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct tda998x_priv, bridge, &tda998x_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
@@ -1947,7 +1948,6 @@ static int tda998x_create(struct device *dev)
tda998x_audio_codec_init(priv, &client->dev);
}
- priv->bridge.funcs = &tda998x_bridge_funcs;
#ifdef CONFIG_OF
priv->bridge.of_node = dev->of_node;
#endif
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index bba10cf9b4f9..e2fc78adebcf 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -43,11 +43,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
}
static int thc63_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct thc63_dev *thc63 = to_thc63(bridge);
- return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags);
+ return drm_bridge_attach(encoder, thc63->next, bridge, flags);
}
static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index 85f2a0e74a1c..47638d1c96ec 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -242,12 +242,12 @@ static void dlpc_mode_set(struct drm_bridge *bridge,
drm_mode_copy(&dlpc->mode, adjusted_mode);
}
-static int dlpc_attach(struct drm_bridge *bridge,
+static int dlpc_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
- return drm_bridge_attach(bridge->encoder, dlpc->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, dlpc->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs dlpc_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 95563aa1b450..033c44326552 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -40,7 +40,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_drv.h> /* DRM_MODESET_LOCK_ALL_BEGIN() needs drm_drv_uses_atomic_modeset() */
+#include <drm/drm_bridge_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -290,11 +290,12 @@ static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
}
static int sn65dsi83_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
&ctx->bridge, flags);
}
@@ -370,7 +371,6 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
{
- struct drm_device *dev = sn65dsi83->bridge.dev;
struct drm_modeset_acquire_ctx ctx;
int err;
@@ -385,26 +385,21 @@ static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
* Keep the lock during the whole operation to be atomic.
*/
- DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
-
- if (!sn65dsi83->bridge.encoder->crtc) {
- /*
- * No CRTC attached -> No CRTC active outputs to reset
- * This can happen when the SN65DSI83 is reset. Simply do
- * nothing without returning any errors.
- */
- err = 0;
- goto end;
- }
+ drm_modeset_acquire_init(&ctx, 0);
dev_warn(sn65dsi83->dev, "reset the pipe\n");
- err = drm_atomic_helper_reset_crtc(sn65dsi83->bridge.encoder->crtc, &ctx);
+retry:
+ err = drm_bridge_helper_reset_crtc(&sn65dsi83->bridge, &ctx);
+ if (err == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
-end:
- DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
- return err;
+ return 0;
}
static void sn65dsi83_reset_work(struct work_struct *ws)
@@ -946,9 +941,9 @@ static int sn65dsi83_probe(struct i2c_client *client)
struct sn65dsi83 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sn65dsi83, bridge, &sn65dsi83_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
INIT_WORK(&ctx->reset_work, sn65dsi83_reset_work);
@@ -988,7 +983,6 @@ static int sn65dsi83_probe(struct i2c_client *client)
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sn65dsi83_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 01d456b955ab..60224f476e1d 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -35,6 +35,7 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#define SN_DEVICE_ID_REGS 0x00 /* up to 0x07 */
#define SN_DEVICE_REV_REG 0x08
#define SN_DPPLL_SRC_REG 0x0A
#define DPPLL_CLK_SRC_DSICLK BIT(0)
@@ -243,11 +244,26 @@ static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata,
regmap_bulk_write(pdata->regmap, reg, buf, ARRAY_SIZE(buf));
}
-static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata)
+static struct drm_display_mode *
+get_new_adjusted_display_mode(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector =
+ drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+
+ return &crtc_state->adjusted_mode;
+}
+
+static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
u32 bit_rate_khz, clk_freq_khz;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
bit_rate_khz = mode->clock *
mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
@@ -274,7 +290,8 @@ static const u32 ti_sn_bridge_dsiclk_lut[] = {
460800000,
};
-static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
int i;
u32 refclk_rate;
@@ -287,7 +304,7 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut);
clk_prepare_enable(pdata->refclk);
} else {
- refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000;
+ refclk_rate = ti_sn_bridge_get_dsi_freq(pdata, state) * 1000;
refclk_lut = ti_sn_bridge_dsiclk_lut;
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut);
}
@@ -311,12 +328,13 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
pdata->pwm_refclk_freq = ti_sn_bridge_refclk_lut[i];
}
-static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
+static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
mutex_lock(&pdata->comms_mutex);
/* configure bridge ref_clk */
- ti_sn_bridge_set_refclk_freq(pdata);
+ ti_sn_bridge_set_refclk_freq(pdata, state);
/*
* HPD on this bridge chip is a bit useless. This is an eDP bridge
@@ -376,7 +394,7 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
* clock so reading early doesn't work.
*/
if (pdata->refclk)
- ti_sn65dsi86_enable_comms(pdata);
+ ti_sn65dsi86_enable_comms(pdata, NULL);
return ret;
}
@@ -423,36 +441,8 @@ static int status_show(struct seq_file *s, void *data)
return 0;
}
-
DEFINE_SHOW_ATTRIBUTE(status);
-static void ti_sn65dsi86_debugfs_remove(void *data)
-{
- debugfs_remove_recursive(data);
-}
-
-static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata)
-{
- struct device *dev = pdata->dev;
- struct dentry *debugfs;
- int ret;
-
- debugfs = debugfs_create_dir(dev_name(dev), NULL);
-
- /*
- * We might get an error back if debugfs wasn't enabled in the kernel
- * so let's just silently return upon failure.
- */
- if (IS_ERR_OR_NULL(debugfs))
- return;
-
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs);
- if (ret)
- return;
-
- debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
-}
-
/* -----------------------------------------------------------------------------
* Auxiliary Devices (*not* AUX)
*/
@@ -732,6 +722,7 @@ static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86
}
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -748,7 +739,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
* Attach the next bridge.
* We never want the next bridge to *also* create a connector.
*/
- ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge,
+ ret = drm_bridge_attach(encoder, pdata->next_bridge,
&pdata->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
goto err_initted_aux;
@@ -821,12 +812,13 @@ static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge,
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, 0);
}
-static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
unsigned int bit_rate_mhz, clk_freq_mhz;
unsigned int val;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
/* set DSIA clk frequency */
bit_rate_mhz = (mode->clock / 1000) *
@@ -856,12 +848,14 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
};
-static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, unsigned int bpp)
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state,
+ unsigned int bpp)
{
unsigned int bit_rate_khz, dp_rate_mhz;
unsigned int i;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
/* Calculate minimum bit rate based on our pixel clock. */
bit_rate_khz = mode->clock * bpp;
@@ -960,10 +954,11 @@ static unsigned int ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata)
return valid_rates;
}
-static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
u8 hsync_polarity = 0, vsync_polarity = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -1105,7 +1100,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
pdata->ln_polrs << LN_POLRS_OFFSET);
/* set dsi clk frequency value */
- ti_sn_bridge_set_dsi_rate(pdata);
+ ti_sn_bridge_set_dsi_rate(pdata, state);
/*
* The SN65DSI86 only supports ASSR Display Authentication method and
@@ -1140,7 +1135,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
valid_rates = ti_sn_bridge_read_valid_rates(pdata);
/* Train until we run out of rates */
- for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, bpp);
+ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, state, bpp);
dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
dp_rate_idx++) {
if (!(valid_rates & BIT(dp_rate_idx)))
@@ -1156,7 +1151,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
}
/* config video parameters */
- ti_sn_bridge_set_video_timings(pdata);
+ ti_sn_bridge_set_video_timings(pdata, state);
/* enable video stream */
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE,
@@ -1171,7 +1166,7 @@ static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge,
pm_runtime_get_sync(pdata->dev);
if (!pdata->refclk)
- ti_sn65dsi86_enable_comms(pdata);
+ ti_sn65dsi86_enable_comms(pdata, state);
/* td7: min 100 us after enable before DSI data */
usleep_range(100, 110);
@@ -1216,6 +1211,15 @@ static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge,
return drm_edid_read_ddc(connector, &pdata->aux.ddc);
}
+static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ struct dentry *debugfs;
+
+ debugfs = debugfs_create_dir(dev_name(pdata->dev), root);
+ debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
+}
+
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
@@ -1229,6 +1233,7 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .debugfs_init = ti_sn65dsi86_debugfs_init,
};
static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
@@ -1312,7 +1317,6 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
if (ret)
return ret;
- pdata->bridge.funcs = &ti_sn_bridge_funcs;
pdata->bridge.of_node = np;
pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort
? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
@@ -1894,6 +1898,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ti_sn65dsi86 *pdata;
+ u8 id_buf[8];
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
@@ -1901,9 +1906,9 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
return -ENODEV;
}
- pdata = devm_kzalloc(dev, sizeof(struct ti_sn65dsi86), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ pdata = devm_drm_bridge_alloc(dev, struct ti_sn65dsi86, bridge, &ti_sn_bridge_funcs);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
dev_set_drvdata(dev, pdata);
pdata->dev = dev;
@@ -1937,7 +1942,15 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
if (ret)
return ret;
- ti_sn65dsi86_debugfs_init(pdata);
+ pm_runtime_get_sync(dev);
+ ret = regmap_bulk_read(pdata->regmap, SN_DEVICE_ID_REGS, id_buf, ARRAY_SIZE(id_buf));
+ pm_runtime_put_autosuspend(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to read device id\n");
+
+ /* The ID string is stored backwards */
+ if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf)))
+ return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n");
/*
* Break ourselves up into a collection of aux devices. The only real
diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c
index 22316382451f..cca75443f012 100644
--- a/drivers/gpu/drm/bridge/ti-tdp158.c
+++ b/drivers/gpu/drm/bridge/ti-tdp158.c
@@ -45,11 +45,13 @@ static void tdp158_disable(struct drm_bridge *bridge,
regulator_disable(tdp158->vcc);
}
-static int tdp158_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int tdp158_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
struct tdp158 *tdp158 = bridge->driver_private;
- return drm_bridge_attach(bridge->encoder, tdp158->next, bridge, flags);
+ return drm_bridge_attach(encoder, tdp158->next, bridge, flags);
}
static const struct drm_bridge_funcs tdp158_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 79ab5da827e1..e15d232ddbac 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -120,12 +120,13 @@ static void tfp410_hpd_callback(void *arg, enum drm_connector_status status)
}
static int tfp410_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, dvi->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -159,7 +160,7 @@ static int tfp410_attach(struct drm_bridge *bridge,
drm_display_info_set_bus_formats(&dvi->connector.display_info,
&dvi->bus_format, 1);
- drm_connector_attach_encoder(&dvi->connector, bridge->encoder);
+ drm_connector_attach_encoder(&dvi->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
index 47b74cb25b14..1c289051a598 100644
--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -38,6 +38,7 @@ static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge)
}
static int tpd12s015_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tpd12s015_device *tpd = to_tpd12s015(bridge);
@@ -46,7 +47,7 @@ static int tpd12s015_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge,
+ ret = drm_bridge_attach(encoder, tpd->next_bridge,
bridge, flags);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
index a8fca079921b..fddfbd4d2493 100644
--- a/drivers/gpu/drm/ci/arm64.config
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -193,6 +193,8 @@ CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y
CONFIG_REGULATOR_DA9211=y
CONFIG_DRM_ANALOGIX_ANX7625=y
+CONFIG_PHY_MTK_HDMI=y
+CONFIG_PHY_MTK_MIPI_DSI=y
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
CONFIG_ARCH_TEGRA=y
diff --git a/drivers/gpu/drm/ci/build-igt.sh b/drivers/gpu/drm/ci/build-igt.sh
index eddb5f782a5e..caa2f4804ed5 100644
--- a/drivers/gpu/drm/ci/build-igt.sh
+++ b/drivers/gpu/drm/ci/build-igt.sh
@@ -71,4 +71,4 @@ tar -cf artifacts/igt.tar /igt
# Pass needed files to the test stage
S3_ARTIFACT_NAME="igt.tar.gz"
gzip -c artifacts/igt.tar > ${S3_ARTIFACT_NAME}
-ci-fairy s3cp --token-file "${S3_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${KERNEL_ARCH}/${S3_ARTIFACT_NAME}
+s3_upload ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${KERNEL_ARCH}/
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index 19fe01257ab9..6fb74c51abe2 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -98,14 +98,14 @@ done
make ${KERNEL_IMAGE_NAME}
-mkdir -p /lava-files/
+mkdir -p /kernel/
for image in ${KERNEL_IMAGE_NAME}; do
- cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
+ cp arch/${KERNEL_ARCH}/boot/${image} /kernel/.
done
if [[ -n ${DEVICE_TREES} ]]; then
make dtbs
- cp ${DEVICE_TREES} /lava-files/.
+ cp ${DEVICE_TREES} /kernel/.
fi
make modules
@@ -121,11 +121,11 @@ if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
-d arch/arm64/boot/Image.lzma \
-C lzma\
-b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
- /lava-files/cheza-kernel
+ /kernel/cheza-kernel
KERNEL_IMAGE_NAME+=" cheza-kernel"
# Make a gzipped copy of the Image for db410c.
- gzip -k /lava-files/Image
+ gzip -k /kernel/Image
KERNEL_IMAGE_NAME+=" Image.gz"
fi
@@ -139,7 +139,7 @@ cp -rfv drivers/gpu/drm/ci/* install/.
. .gitlab-ci/container/container_post_build.sh
if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
- xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /lava-files/vmlinux.xz
+ xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /kernel/vmlinux.xz
FILES_TO_UPLOAD="$KERNEL_IMAGE_NAME vmlinux.xz"
if [[ -n $DEVICE_TREES ]]; then
@@ -148,13 +148,13 @@ if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
ls -l "${S3_JWT_FILE}"
for f in $FILES_TO_UPLOAD; do
- ci-fairy s3cp --token-file "${S3_JWT_FILE}" /lava-files/$f \
- https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/$f
+ s3_upload /kernel/$f \
+ https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/
done
S3_ARTIFACT_NAME="kernel-files.tar.zst"
tar --zstd -cf $S3_ARTIFACT_NAME install
- ci-fairy s3cp --token-file "${S3_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/${S3_ARTIFACT_NAME}
+ s3_upload ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/
echo "Download vmlinux.xz from https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/vmlinux.xz"
fi
@@ -165,7 +165,7 @@ ln -s common artifacts/install/ci-common
cp .config artifacts/${CI_JOB_NAME}_config
for image in ${KERNEL_IMAGE_NAME}; do
- cp /lava-files/$image artifacts/install/.
+ cp /kernel/$image artifacts/install/.
done
tar -C artifacts -cf artifacts/install.tar install
diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml
index 274f118533a7..8eb56ebcf4aa 100644
--- a/drivers/gpu/drm/ci/build.yml
+++ b/drivers/gpu/drm/ci/build.yml
@@ -67,7 +67,7 @@ testing:arm32:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: arm.config
@@ -79,7 +79,7 @@ testing:arm64:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: arm64.config
@@ -91,7 +91,7 @@ testing:x86_64:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: x86_64.config
@@ -143,6 +143,10 @@ debian-arm64-release:
rules:
- when: never
+debian-arm64-ubsan:
+ rules:
+ - when: never
+
debian-build-testing:
rules:
- when: never
@@ -183,6 +187,10 @@ debian-testing-msan:
rules:
- when: never
+debian-testing-ubsan:
+ rules:
+ - when: never
+
debian-vulkan:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
index 07dc13ff865d..56c95c2f91ae 100644
--- a/drivers/gpu/drm/ci/container.yml
+++ b/drivers/gpu/drm/ci/container.yml
@@ -24,6 +24,18 @@ alpine/x86_64_build:
rules:
- when: never
+debian/arm32_test-base:
+ rules:
+ - when: never
+
+debian/arm32_test-gl:
+ rules:
+ - when: never
+
+debian/arm32_test-vk:
+ rules:
+ - when: never
+
debian/arm64_test-gl:
rules:
- when: never
@@ -32,6 +44,10 @@ debian/arm64_test-vk:
rules:
- when: never
+debian/baremetal_arm32_test:
+ rules:
+ - when: never
+
debian/ppc64el_build:
rules:
- when: never
@@ -40,6 +56,14 @@ debian/s390x_build:
rules:
- when: never
+debian/x86_32_build:
+ rules:
+ - when: never
+
+debian/x86_64_test-android:
+ rules:
+ - when: never
+
debian/x86_64_test-vk:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index f04aabe8327c..ba75b3a7eca4 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,11 +1,11 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 7d3062470f3ccc6cb40540e772e902c7e2248024
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha f73132f1215a37ce8ffc711a0136c90649aaf128
UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: 33adea9ebafd059ac88a5ccfec60536394f36c7c
+ IGT_VERSION: 04bedb9238586b81d4d4ca62b02e584f6cfc77af
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.20.0
@@ -20,8 +20,10 @@ variables:
rm download-git-cache.sh
set +o xtrace
S3_JWT_FILE: /s3_jwt
+ S3_JWT_HEADER_FILE: /s3_jwt_header
S3_JWT_FILE_SCRIPT: |-
echo -n '${S3_JWT}' > '${S3_JWT_FILE}' &&
+ echo -n "Authorization: Bearer ${S3_JWT}" > '${S3_JWT_HEADER_FILE}' &&
unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables
S3_HOST: s3.freedesktop.org
# This bucket is used to fetch the kernel image
@@ -143,11 +145,11 @@ stages:
# Pre-merge pipeline
- if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event"
# Push to a branch on a fork
- - if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"
+ - if: &is-fork-push $CI_PIPELINE_SOURCE == "push"
# nightly pipeline
- if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
# pipeline for direct pushes that bypassed the CI
- - if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
+ - if: &is-direct-push $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
# Rules applied to every job in the pipeline
@@ -170,29 +172,48 @@ stages:
- !reference [.disable-farm-mr-rules, rules]
# Never run immediately after merging, as we just ran everything
- !reference [.never-post-merge-rules, rules]
- # Build everything in merge pipelines, if any files affecting the pipeline
- # were changed
+ # Build everything in merge pipelines
- if: *is-merge-attempt
- changes: &all_paths
- - drivers/gpu/drm/ci/**/*
when: on_success
# Same as above, but for pre-merge pipelines
- if: *is-pre-merge
- changes:
- *all_paths
when: manual
- # Skip everything for pre-merge and merge pipelines which don't change
- # anything in the build
+ # Build everything after someone bypassed the CI
+ - if: *is-direct-push
+ when: manual
+ # Build everything in scheduled pipelines
+ - if: *is-scheduled-pipeline
+ when: on_success
+ # Allow building everything in fork pipelines, but build nothing unless
+ # manually triggered
+ - when: manual
+
+
+# Repeat of the above but with `when: on_success` replaced with
+# `when: delayed` + `start_in:`, for build-only jobs.
+# Note: make sure the branches in this list are the same as in
+# `.container+build-rules` above.
+.build-only-delayed-rules:
+ rules:
+ - !reference [.common-rules, rules]
+ # Run when re-enabling a disabled farm, but not when disabling it
+ - !reference [.disable-farm-mr-rules, rules]
+ # Never run immediately after merging, as we just ran everything
+ - !reference [.never-post-merge-rules, rules]
+ # Build everything in merge pipelines
- if: *is-merge-attempt
- when: never
+ when: delayed
+ start_in: &build-delay 5 minutes
+ # Same as above, but for pre-merge pipelines
- if: *is-pre-merge
- when: never
+ when: manual
# Build everything after someone bypassed the CI
- if: *is-direct-push
- when: on_success
+ when: manual
# Build everything in scheduled pipelines
- if: *is-scheduled-pipeline
- when: on_success
+ when: delayed
+ start_in: *build-delay
# Allow building everything in fork pipelines, but build nothing unless
# manually triggered
- when: manual
@@ -232,7 +253,7 @@ make git archive:
- tar -cvzf ../$CI_PROJECT_NAME.tar.gz .
# Use id_tokens for JWT auth
- - ci-fairy s3cp --token-file "${S3_JWT_FILE}" ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/${S3_GITCACHE_BUCKET}/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
+ - s3_upload ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/${S3_GITCACHE_BUCKET}/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/
# Sanity checks of MR settings and commit logs
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index 68b042e43b7f..2a0599f12c58 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -85,5 +85,16 @@ deqp-runner junit \
--limit 50 \
--template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml"
+# Check if /proc/lockdep_stats exists
+if [ -f /proc/lockdep_stats ]; then
+ # If debug_locks is 0, it indicates lockdep is detected and it turns itself off.
+ debug_locks=$(grep 'debug_locks:' /proc/lockdep_stats | awk '{print $2}')
+ if [ "$debug_locks" -eq 0 ] && [ "$ret" -eq 0 ]; then
+ echo "Warning: LOCKDEP issue detected. Please check dmesg logs for more information."
+ cat /proc/lockdep_stats
+ ret=101
+ fi
+fi
+
cd $oldpath
exit $ret
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
index 20049f3626b2..53fe34b86578 100644
--- a/drivers/gpu/drm/ci/image-tags.yml
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -1,5 +1,5 @@
variables:
- CONTAINER_TAG: "20250204-mesa-uprev"
+ CONTAINER_TAG: "20250328-mesa-uprev"
DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
@@ -20,3 +20,5 @@ variables:
DEBIAN_PYUTILS_TAG: "${CONTAINER_TAG}"
ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}"
+
+ CONDITIONAL_BUILD_ANGLE_TAG: fec96cc945650c5fe9f7188cabe80d8a
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
index 6e5ac51e8c0a..a1e8b34fb2d4 100755
--- a/drivers/gpu/drm/ci/lava-submit.sh
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -48,12 +48,13 @@ ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)"
rm -rf results
mkdir -p results/job-rootfs-overlay/
-artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
+artifacts/ci-common/export-gitlab-job-env-for-dut.sh \
+ > results/job-rootfs-overlay/set-job-env-vars.sh
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
-ci-fairy s3cp --token-file "${S3_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
+s3_upload job-rootfs-overlay.tar.gz "https://${JOB_ARTIFACTS_BASE}"
# Prepare env vars for upload.
section_switch variables "Environment variables passed through to device:"
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 6a1e059858e5..84a25f0e783b 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -1,6 +1,14 @@
+.allow_failure_lockdep:
+ variables:
+ FF_USE_NEW_BASH_EVAL_STRATEGY: 'true'
+ allow_failure:
+ exit_codes:
+ - 101
+
.lava-test:
extends:
- .container+build-rules
+ - .allow_failure_lockdep
timeout: "1h30m"
rules:
- !reference [.scheduled_pipeline-rules, rules]
@@ -69,6 +77,7 @@
extends:
- .baremetal-test-arm64
- .use-debian/baremetal_arm64_test
+ - .allow_failure_lockdep
timeout: "1h30m"
rules:
- !reference [.scheduled_pipeline-rules, rules]
@@ -89,6 +98,28 @@
tags:
- $RUNNER_TAG
+.software-driver:
+ stage: software-driver
+ extends:
+ - .allow_failure_lockdep
+ timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - when: on_success
+ extends:
+ - .test-gl
+ tags:
+ - kvm
+ script:
+ - ln -sf $CI_PROJECT_DIR/install /install
+ - mv install/bzImage /kernel/bzImage
+ - mkdir -p /lib/modules
+ - install/crosvm-runner.sh install/igt_runner.sh
+ needs:
+ - debian/x86_64_test-gl
+ - testing:x86_64
+ - igt:x86_64
+
.msm-sc7180:
extends:
- .lava-igt:arm64
@@ -133,7 +164,7 @@ msm:apq8016:
BM_KERNEL_EXTRA_ARGS: clk_ignore_unused
RUNNER_TAG: google-freedreno-db410c
script:
- - ./install/bare-metal/fastboot.sh
+ - ./install/bare-metal/fastboot.sh || exit $?
msm:apq8096:
extends:
@@ -147,7 +178,7 @@ msm:apq8096:
GPU_VERSION: apq8096
RUNNER_TAG: google-freedreno-db820c
script:
- - ./install/bare-metal/fastboot.sh
+ - ./install/bare-metal/fastboot.sh || exit $?
msm:sdm845:
extends:
@@ -161,7 +192,7 @@ msm:sdm845:
GPU_VERSION: sdm845
RUNNER_TAG: google-freedreno-cheza
script:
- - ./install/bare-metal/cros-servo.sh
+ - ./install/bare-metal/cros-servo.sh || exit $?
msm:sm8350-hdk:
extends:
@@ -440,47 +471,16 @@ panfrost:g12b:
- .panfrost-gpu
virtio_gpu:none:
- stage: software-driver
- timeout: "1h30m"
- rules:
- - !reference [.scheduled_pipeline-rules, rules]
- - when: on_success
+ extends:
+ - .software-driver
variables:
CROSVM_GALLIUM_DRIVER: llvmpipe
DRIVER_NAME: virtio_gpu
GPU_VERSION: none
- extends:
- - .test-gl
- tags:
- - kvm
- script:
- - ln -sf $CI_PROJECT_DIR/install /install
- - mv install/bzImage /lava-files/bzImage
- - install/crosvm-runner.sh install/igt_runner.sh
- needs:
- - debian/x86_64_test-gl
- - testing:x86_64
- - igt:x86_64
vkms:none:
- stage: software-driver
- timeout: "1h30m"
- rules:
- - !reference [.scheduled_pipeline-rules, rules]
- - when: on_success
+ extends:
+ - .software-driver
variables:
DRIVER_NAME: vkms
GPU_VERSION: none
- extends:
- - .test-gl
- tags:
- - kvm
- script:
- - ln -sf $CI_PROJECT_DIR/install /install
- - mv install/bzImage /lava-files/bzImage
- - mkdir -p /lib/modules
- - ./install/crosvm-runner.sh ./install/igt_runner.sh
- needs:
- - debian/x86_64_test-gl
- - testing:x86_64
- - igt:x86_64
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
index 75374085f40f..f44dbce3151a 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
@@ -14,16 +14,10 @@ amdgpu/amd_plane@mpo-scale-nv12,Fail
amdgpu/amd_plane@mpo-scale-p010,Fail
amdgpu/amd_plane@mpo-scale-rgb,Crash
amdgpu/amd_plane@mpo-swizzle-toggle,Fail
-amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Crash
+amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Fail
kms_addfb_basic@bad-pitch-65536,Fail
kms_addfb_basic@bo-too-small,Fail
kms_addfb_basic@too-high,Fail
-kms_async_flips@alternate-sync-async-flip,Fail
-kms_async_flips@alternate-sync-async-flip-atomic,Fail
-kms_async_flips@test-cursor,Fail
-kms_async_flips@test-cursor-atomic,Fail
-kms_async_flips@test-time-stamp,Fail
-kms_async_flips@test-time-stamp-atomic,Fail
kms_atomic_transition@plane-all-modeset-transition-internal-panels,Fail
kms_atomic_transition@plane-all-transition,Fail
kms_atomic_transition@plane-all-transition-nonblocking,Fail
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
index 3879c4812a22..902d54027506 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
@@ -14,6 +14,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
index a29cea4f234c..8e2b5504004e 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -1,22 +1,18 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
-kms_flip@dpms-off-confusion-interruptible,Timeout
-kms_flip@wf_vblank-ts-check-interruptible,Fail
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
@@ -31,12 +27,18 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -44,8 +46,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
index 2ef1dc35a7fa..922327632eff 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
index ee11999e3da1..7353ab11e940 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
@@ -1,4 +1,3 @@
-core_setmaster@master-drop-set-user,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -16,6 +15,7 @@ kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
@@ -30,7 +30,6 @@ kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_pm_backlight@basic-brightness,Fail
-kms_pm_backlight@brightness-with-dpms,Crash
kms_pm_backlight@fade,Fail
kms_pm_backlight@fade-with-dpms,Fail
kms_pm_rpm@modeset-stress-extra-wait,Timeout
@@ -43,8 +42,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
index 4f50e0240ff4..80bf2741866c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
index 47b3f1d42bb6..6fef7c1e56ea 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -1,4 +1,4 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -8,8 +8,9 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_cursor_crc@cursor-suspend,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip@busy-flip,Timeout
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
@@ -34,17 +35,22 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
+kms_pipe_stress@stress-xrgb8888-untiled,Fail
+kms_pipe_stress@stress-xrgb8888-ytiled,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
kms_psr2_sf@cursor-plane-update-sf,Fail
kms_psr2_sf@overlay-plane-update-continuous-sf,Fail
kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail
kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail
kms_psr2_sf@plane-move-sf-dmg-area,Fail
-kms_psr2_sf@pr-cursor-plane-update-sf,Timeout
kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_sf@psr2-cursor-plane-update-sf,Fail
@@ -57,6 +63,7 @@ kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_su@page_flip-NV12,Fail
kms_psr2_su@page_flip-P010,Fail
kms_setmode@basic,Fail
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -65,6 +72,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
index c87ff8b40e99..c393a138b8a6 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
@@ -16,7 +17,6 @@ gem_.*
# Hangs the machine and timeout occurs
i915_pm_rc6_residency.*
i915_suspend.*
-xe_module_load.*
api_intel_allocator.*
kms_cursor_legacy.*
i915_pm_rpm.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
index 843c363b42f5..8adf5f0a6e80 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -1,39 +1,47 @@
-core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
+core_setmaster_vs_auth,Fail
gen9_exec_parse@unaligned-access,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-kms_dirtyfb@default-dirtyfb-ioctl,Fail
kms_dirtyfb@drrs-dirtyfb-ioctl,Fail
-kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
-kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
+kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_rotation_crc@multiplane-rotation,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@non-zero-reason,Timeout
sysfs_heartbeat_interval@long,Timeout
+sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
index 219ae839323a..2e4ef9f35654 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
@@ -12,6 +12,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
index 0e08fff741aa..57453e340040 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
@@ -3,12 +3,13 @@ i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
+i915_pm_rpm@gem-execbuf-stress,Timeout
kms_flip@dpms-off-confusion,Fail
+kms_flip@nonexisting-fb,Fail
kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset,Fail
-kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,UnexpectedImprovement(Skip)
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
@@ -28,7 +29,6 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
-kms_frontbuffer_tracking@fbc-rgb565-draw-blt,Timeout
kms_lease@lease-uevent,Fail
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_rotation_crc@bad-pixel-format,Fail
@@ -37,13 +37,10 @@ kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
+perf_pmu@most-busy-idle-check-all,Fail
perf_pmu@rc6,Crash
+prime_busy@before-wait,Fail
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
index 1a3d87c0ca6e..8dec57da1bb3 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
index d4fba4f55ec1..117098bc95d9 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
@@ -21,8 +21,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
index dc722d6a774e..e287462a491a 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
@@ -12,6 +12,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
index 93d42b146df9..462c050a8b2d 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -1,13 +1,14 @@
api_intel_allocator@reopen,Timeout
api_intel_bb@destroy-bb,Timeout
core_hotunplug@hotrebind-lateclose,Timeout
-drm_read@short-buffer-block,Timeout
dumb_buffer@map-valid,Timeout
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
+i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rps@engine-order,Timeout
+i915_pm_rps@waitboost,Fail
kms_lease@lease-uevent,Fail
kms_rotation_crc@multiplane-rotation,Fail
perf@i915-ref-count,Fail
@@ -16,6 +17,7 @@ perf_pmu@enable-race,Timeout
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
perf_pmu@semaphore-wait-idle,Timeout
+prime_busy@before,Fail
prime_mmap@test_refcounting,Timeout
sriov_basic@enable-vfs-bind-unbind-each-numvfs-all,Timeout
syncobj_basic@illegal-fd-to-handle,Timeout
@@ -26,8 +28,3 @@ syncobj_wait@multi-wait-all-submitted,Timeout
syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout
syncobj_wait@wait-any-complex,Timeout
syncobj_wait@wait-delayed-signal,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
index 938377896841..429dc3c731df 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
@@ -18,6 +18,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
index 1cb6978c86dc..0f167cfd503c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -1,4 +1,4 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -6,10 +6,9 @@ i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
-kms_dirtyfb@default-dirtyfb-ioctl,Fail
-kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_cursor_crc@cursor-suspend,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -30,13 +29,19 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -45,8 +50,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
index 29bff8922ae1..7e7374ebf3d1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index 4f176c04ec4e..592d7d69e6fc 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -17,8 +17,28 @@ kms_bw@linear-tiling-2-displays-3840x2160p,Fail
kms_color@invalid-gamma-lut-sizes,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-varying-size,Fail
+kms_flip@basic-plain-flip,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@dpms-off-confusion-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
kms_flip@flip-vs-suspend-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
index 2956567c3048..443596d9e662 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
@@ -46,3 +46,10 @@ kms_prop_blob@invalid-set-prop
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@connected-linear-tiling-1-displays-2160x1440p
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/d25442b9-0b6b-433c-8e23-997840fad305@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@flip-vs-wf_vblank-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
index d0db51874aef..b5ee7323a160 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
index 5a063361d7f2..184d0cccc318 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -1,12 +1,38 @@
-core_setmaster@master-drop-set-user,Fail
dumb_buffer@create-clear,Crash
kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_color@invalid-gamma-lut-sizes,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-varying-size,Fail
+kms_flip@basic-flip-vs-wf_vblank,Fail
+kms_flip@basic-plain-flip,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@dpms-off-confusion-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
+kms_flip@flip-vs-suspend-interruptible,Fail
+kms_flip@flip-vs-wf_vblank-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
index df7e5ce7a036..0c67fec92450 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
@@ -18,3 +18,24 @@ kms_cursor_legacy@cursor-vs-flip-atomic-transitions
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
fbdev@write
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/a520d1d6-95b3-4573-b8f2-689f05bc2230@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@basic-flip-vs-modeset
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/ca960a82-00fc-4183-b983-998f7ac2fbb5@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_atomic_transition@plane-all-modeset-transition-internal-panels
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/da578eed-224f-4374-853a-1ff0aa20d03b@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_atomic_transition@plane-toggle-modeset-transition
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
index d0db51874aef..b5ee7323a160 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
index 8198e06344a3..9fd44a4b962a 100644
--- a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 7752adff05c1..72c469021b66 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -1,8 +1,4 @@
kms_3d,Fail
-kms_cursor_legacy@forked-bo,Fail
-kms_cursor_legacy@forked-move,Fail
-kms_cursor_legacy@single-bo,Fail
-kms_cursor_legacy@torture-bo,Fail
kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
index 1674c8e214d6..87724413174c 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
index 5550be5486ed..a4d2f2a7963a 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
index 8910afb6acf2..d270af1cca52 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
@@ -32,3 +32,10 @@ kms_lease@page-flip-implicit-plane
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane@plane-position-hole-dpms
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/73
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_plane@plane-position-covered
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
index 478d7c161616..d4b8ba3a54a9 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -28,3 +29,6 @@ kms_cursor_crc@cursor-random-max-size
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
kms_display_modes@extended-mode-basic
kms_display_modes@mst-extended-mode-negative
+
+# It causes other tests to fail, so skip it.
+kms_invalid_mode@overflow-vrefresh
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
index cd3d3b0befe4..cafc802cecea 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
@@ -11,3 +11,10 @@ msm/msm_mapping@shadow
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_lease@page-flip-implicit-plane
+
+# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/74
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_cursor_crc@cursor-random-128x128
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
index ef9318afcd89..022db559cc7d 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
index 38ec0305c1f4..e32d73c6c98e 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
@@ -130,3 +130,10 @@ kms_lease@page-flip-implicit-plane
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc5
kms_flip@flip-vs-expired-vblank
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/75
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@plain-flip-ts-check-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
index 2ce7f7e23a01..6c86d1953e11 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -18,6 +18,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -35,3 +36,315 @@ kms_content_protection@uevent
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
kms_display_modes@extended-mode-basic
kms_display_modes@mst-extended-mode-negative
+
+# Kernel panic
+msm/msm_recovery@hangcheck
+# DEBUG - Begin test msm/msm_recovery@hangcheck
+# Console: switching to colour dummy device 80x25
+# [ 489.526286] [IGT] msm_recovery: executing
+# [ 489.531926] [IGT] msm_recovery: starting subtest hangcheck
+# [ 492.808574] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 492.820358] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 492.831154] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 493.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 493.844177] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 493.854971] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 494.824633] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 494.836237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 494.847034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 495.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 495.828170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 495.838966] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 496.804643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 496.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 496.827041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 497.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 497.844170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 497.854963] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 498.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 498.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 498.843024] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 499.816568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 499.828163] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 499.838958] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 500.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 500.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 500.830960] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 501.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 501.844175] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 501.854965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 502.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 502.836171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 502.846965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 503.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 503.828176] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 503.838969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 504.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 504.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 504.827033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 505.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 505.840247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 505.851043] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 506.820637] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 506.832233] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 506.843026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 507.816567] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 507.828171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 507.838965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 508.808568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 508.820173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 508.830969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 509.832568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 509.844173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 509.854967] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 510.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 510.836162] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 510.846954] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 511.816569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 511.828173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 511.838968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 512.804641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 512.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 512.827040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 513.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 513.840239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 513.851035] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 514.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 514.836164] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 514.846959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 515.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 515.824235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 515.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 515.912427] rcu: INFO: rcu_preempt self-detected stall on CPU
+# [ 515.918398] rcu: 0-....: (6452 ticks this GP) idle=6afc/1/0x4000000000000000 softirq=12492/12697 fqs=3179
+# [ 515.929296] rcu: (t=6505 jiffies g=36205 q=58 ncpus=8)
+# [ 515.934709] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1
+# [ 515.934727] Tainted: [W]=WARN
+# [ 515.934732] Hardware name: Google Cheza (rev3+) (DT)
+# [ 515.934739] pstate: 00400009 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+# [ 515.934751] pc : rcu_core+0x59c/0xe68
+# [ 515.934769] lr : rcu_core+0x74/0xe68
+# [ 515.934781] sp : ffff800080003e50
+# [ 515.934785] x29: ffff800080003e50 x28: ffff225d038e9bc0 x27: 0000000000000002
+# [ 515.934805] x26: ffffc171a8ee6108 x25: ffffc171a85bc2c0 x24: ffff60ecd691e000
+# [ 515.934820] x23: ffffc171a85d15c0 x22: ffffc171a8f8d780 x21: ffff225e7eeef5c0
+# [ 515.934835] x20: ffffc171a8ef0e80 x19: ffffc171a85d15d1 x18: ffffc171a9461e70
+# [ 515.934850] x17: ffff60ecd691e000 x16: ffff800080000000 x15: 0000000000000000
+# [ 515.934866] x14: ffffc171a85d0780 x13: 0000000000000400 x12: 0000000000000000
+# [ 515.934880] x11: ffffc171a85ce900 x10: ffffc171a8ef5000 x9 : ffffc171a8ef0000
+# [ 515.934894] x8 : ffff800080003d88 x7 : ffffc171a8ee6100 x6 : ffff800080003de0
+# [ 515.934909] x5 : ffff800080003dc8 x4 : 0000000000000003 x3 : 0000000000000000
+# [ 515.934923] x2 : 0000000000000101 x1 : 0000000000000000 x0 : ffff225d038e9bc0
+# [ 515.934939] Call trace:
+# [ 515.934945] rcu_core+0x59c/0xe68 (P)
+# [ 515.934962] rcu_core_si+0x10/0x1c
+# [ 515.934976] handle_softirqs+0x118/0x4b8
+# [ 515.934994] __do_softirq+0x14/0x20
+# [ 515.935007] ____do_softirq+0x10/0x1c
+# [ 515.935021] call_on_irq_stack+0x24/0x4c
+# [ 515.935034] do_softirq_own_stack+0x1c/0x28
+# [ 515.935048] __irq_exit_rcu+0x174/0x1b4
+# [ 515.935063] irq_exit_rcu+0x10/0x38
+# [ 515.935077] el1_interrupt+0x38/0x64
+# [ 515.935092] el1h_64_irq_handler+0x18/0x24
+# [ 515.935104] el1h_64_irq+0x6c/0x70
+# [ 515.935115] lock_acquire+0x1e0/0x338 (P)
+# [ 515.935129] __mutex_lock+0xa8/0x4b8
+# [ 515.935144] mutex_lock_nested+0x24/0x30
+# [ 515.935159] _find_opp_table_unlocked+0x40/0xfc
+# [ 515.935174] _find_key+0x64/0x16c
+# [ 515.935184] dev_pm_opp_find_freq_exact+0x4c/0x74
+# [ 515.935197] qcom_cpufreq_hw_target_index+0xe8/0x128
+# [ 515.935211] __cpufreq_driver_target+0x144/0x29c
+# [ 515.935227] sugov_work+0x58/0x74
+# [ 515.935239] kthread_worker_fn+0xf4/0x324
+# [ 515.935254] kthread+0x12c/0x208
+# [ 515.935266] ret_from_fork+0x10/0x20
+# [ 516.808569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 516.820174] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 516.830968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 517.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 517.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 517.851032] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 518.820642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 518.832237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 518.843030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 519.812636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 519.824231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 519.835026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 520.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 520.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 520.830959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 521.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 521.840238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 521.851033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 522.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 522.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 522.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 523.812639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 523.824239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 523.835034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 524.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 524.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 524.827026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 525.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 525.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 525.851031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 526.820641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 526.832244] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 526.843041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 527.812642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 527.824242] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 527.835038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 528.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 528.816234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 528.827027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 529.832634] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 529.844231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 529.855017] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 530.820646] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 530.832270] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 530.843065] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 531.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 531.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 531.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 532.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 532.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 532.827031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 533.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 533.840243] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 533.851037] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 534.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 534.832245] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 534.843038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 535.812641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 535.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 535.835033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 536.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 536.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 536.827030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 537.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 537.840234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 537.851020] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 538.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 538.832235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 538.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 539.812644] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 539.824247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 539.835040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 540.124426] watchdog: BUG: soft lockup - CPU#0 stuck for 49s! [sugov:0:126]
+# [ 540.124439] Modules linked in:
+# [ 540.124448] irq event stamp: 9912389
+# [ 540.124453] hardirqs last enabled at (9912388): [<ffffc171a767a24c>] exit_to_kernel_mode+0x38/0x130
+# [ 540.124473] hardirqs last disabled at (9912389): [<ffffc171a767a368>] el1_interrupt+0x24/0x64
+# [ 540.124486] softirqs last enabled at (9898068): [<ffffc171a62bc290>] handle_softirqs+0x4a0/0x4b8
+# [ 540.124505] softirqs last disabled at (9898071): [<ffffc171a62105b0>] __do_softirq+0x14/0x20
+# [ 540.124525] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1
+# [ 540.124540] Tainted: [W]=WARN
+# [ 540.124544] Hardware name: Google Cheza (rev3+) (DT)
+# [ 540.124549] pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+# [ 540.124560] pc : xhci_urb_enqueue+0xbc/0x32c
+# [ 540.124573] lr : xhci_urb_enqueue+0xb4/0x32c
+# [ 540.124581] sp : ffff800080003c20
+# [ 540.124586] x29: ffff800080003c20 x28: 0000000000000000 x27: ffff225d00b1e6a0
+# [ 540.124602] x26: ffff225d01c3d800 x25: 0000000000000001 x24: 0000000000000006
+# [ 540.124617] x23: ffff225d044dc000 x22: ffff225d044dc000 x21: 0000000000000001
+# [ 540.124632] x20: ffff225d002d7280 x19: ffff225d0573a780 x18: ffff225e7eff0f50
+# [ 540.124647] x17: 000000000000cab0 x16: 0000000000000000 x15: ffff225d0353a000
+# [ 540.124661] x14: 0000000000000000 x13: 0000000000000820 x12: 0000000000000000
+# [ 540.124674] x11: ffff800080003a30 x10: 0000000000000001 x9 : 0000000000000000
+# [ 540.124689] x8 : ffff225d002d7300 x7 : 0000000000000000 x6 : 000000000000003f
+# [ 540.124702] x5 : 00000000ffffffff x4 : 0000000000000920 x3 : 0000000000000080
+# [ 540.124716] x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff225d002d7280
+# [ 540.124731] Call trace:
+# [ 540.124736] xhci_urb_enqueue+0xbc/0x32c (P)
+# [ 540.124751] usb_hcd_submit_urb+0x98/0x7fc
+# [ 540.124766] usb_submit_urb+0x294/0x560
+# [ 540.124780] intr_callback+0x78/0x1fc
+# [ 540.124798] __usb_hcd_giveback_urb+0x68/0x128
+# [ 540.124812] usb_giveback_urb_bh+0xa8/0x140
+# [ 540.124825] process_one_work+0x208/0x5e8
+# [ 540.124840] bh_worker+0x1a8/0x20c
+# [ 540.124853] workqueue_softirq_action+0x78/0x88
+# [ 540.124868] tasklet_hi_action+0x14/0x3c
+# [ 540.124883] handle_softirqs+0x118/0x4b8
+# [ 540.124897] __do_softirq+0x14/0x20
+# [ 540.124908] ____do_softirq+0x10/0x1c
+# [ 540.124922] call_on_irq_stack+0x24/0x4c
+# [ 540.124934] do_softirq_own_stack+0x1c/0x28
+# [ 540.124947] __irq_exit_rcu+0x174/0x1b4
+# [ 540.124961] irq_exit_rcu+0x10/0x38
+# [ 540.124976] el1_interrupt+0x38/0x64
+# [ 540.124987] el1h_64_irq_handler+0x18/0x24
+# [ 540.124998] el1h_64_irq+0x6c/0x70
+# [ 540.125009] lock_acquire+0x1e0/0x338 (P)
+# [ 540.125023] __mutex_lock+0xa8/0x4b8
+# [ 540.125038] mutex_lock_nested+0x24/0x30
+# [ 540.125052] _find_opp_table_unlocked+0x40/0xfc
+# [ 540.125067] _find_key+0x64/0x16c
+# [ 540.125078] dev_pm_opp_find_freq_exact+0x4c/0x74
+# [ 540.125090] qcom_cpufreq_hw_target_index+0xe8/0x128
+# [ 540.125105] __cpufreq_driver_target+0x144/0x29c
+# [ 540.125121] sugov_work+0x58/0x74
+# [ 540.125133] kthread_worker_fn+0xf4/0x324
+# [ 540.125148] kthread+0x12c/0x208
+# [ 540.125160] ret_from_fork+0x10/0x20
+# [ 540.125176] Kernel panic - not syncing: softlockup: hung tasks
+# [ 540.423567] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W L 6.14.0-rc4-gdddf15cff632 #1
+# [ 540.433411] Tainted: [W]=WARN, [L]=SOFTLOCKUP
+# [ 540.437901] Hardware name: Google Cheza (rev3+) (DT)
+# [ 540.443022] Call trace:
+# [ 540.445559] show_stack+0x18/0x24 (C)
+# [ 540.449357] dump_stack_lvl+0x38/0xd0
+# [ 540.453157] dump_stack+0x18/0x24
+# [ 540.456599] panic+0x3bc/0x41c
+# [ 540.459767] watchdog_timer_fn+0x254/0x2e4
+# [ 540.464005] __hrtimer_run_queues+0x3c4/0x440
+# [ 540.468508] hrtimer_interrupt+0xe4/0x244
+# [ 540.472662] arch_timer_handler_phys+0x2c/0x44
+# [ 540.477256] handle_percpu_devid_irq+0x90/0x1f0
+# [ 540.481943] handle_irq_desc+0x40/0x58
+# [ 540.485829] generic_handle_domain_irq+0x1c/0x28
+# [ 540.490604] gic_handle_irq+0x4c/0x11c
+# [ 540.494483] do_interrupt_handler+0x50/0x84
+# [ 540.498811] el1_interrupt+0x34/0x64
+# [ 540.502518] el1h_64_irq_handler+0x18/0x24
+# [ 540.506758] el1h_64_irq+0x6c/0x70
+# [ 540.510279] xhci_urb_enqueue+0xbc/0x32c (P)
+# [ 540.514693] usb_hcd_submit_urb+0x98/0x7fc
+# [ 540.518932] usb_submit_urb+0x294/0x560
+# [ 540.522901] intr_callback+0x78/0x1fc
+# [ 540.526700] __usb_hcd_giveback_urb+0x68/0x128
+# [ 540.531288] usb_giveback_urb_bh+0xa8/0x140
+# [ 540.535614] process_one_work+0x208/0x5e8
+# [ 540.539769] bh_worker+0x1a8/0x20c
+# [ 540.543293] workqueue_softirq_action+0x78/0x88
+# [ 540.547980] tasklet_hi_action+0x14/0x3c
+# [ 540.552038] handle_softirqs+0x118/0x4b8
+# [ 540.556096] __do_softirq+0x14/0x20
+# [ 540.559705] ____do_softirq+0x10/0x1c
+# [ 540.563500] call_on_irq_stack+0x24/0x4c
+# [ 540.567554] do_softirq_own_stack+0x1c/0x28
+# [ 540.571878] __irq_exit_rcu+0x174/0x1b4
+# [ 540.575849] irq_exit_rcu+0x10/0x38
+# [ 540.579462] el1_interrupt+0x38/0x64
+# [ 540.583158] el1h_64_irq_handler+0x18/0x24
+# [ 540.587397] el1h_64_irq+0x6c/0x70
+# [ 540.590918] lock_acquire+0x1e0/0x338 (P)
+# [ 540.595060] __mutex_lock+0xa8/0x4b8
+# [ 540.598760] mutex_lock_nested+0x24/0x30
+# [ 540.602818] _find_opp_table_unlocked+0x40/0xfc
+# [ 540.607503] _find_key+0x64/0x16c
+# [ 540.610940] dev_pm_opp_find_freq_exact+0x4c/0x74
+# [ 540.615798] qcom_cpufreq_hw_target_index+0xe8/0x128
+# [ 540.620924] __cpufreq_driver_target+0x144/0x29c
+# [ 540.625698] sugov_work+0x58/0x74
+# [ 540.629134] kthread_worker_fn+0xf4/0x324
+# [ 540.633278] kthread+0x12c/0x208
+# [ 540.636619] ret_from_fork+0x10/0x20
+# [ 540.640321] SMP: stopping secondary CPUs
+# [ 540.644518] Kernel Offset: 0x417126200000 from 0xffff800080000000
+# [ 540.650848] PHYS_OFFSET: 0xfff0dda400000000
+# [ 540.655170] CPU features: 0x000,00000100,00901250,8200721b
+# [ 540.660829] Memory Limit: none
+# [ 540.663999] ---[ end Kernel panic - not syncing: softlockup: hung tasks ]---
diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
index 329770c520d9..9450f2a002fd 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
index 3c7e494857b5..198deea3faa9 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
index 3c7e494857b5..198deea3faa9 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
index feeed89b6c3f..af99ac54c3a5 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
index feeed89b6c3f..af99ac54c3a5 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
index ba9160d4d8eb..61122ea7f008 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -5,6 +5,5 @@ core_setmaster_vs_auth,Crash
dumb_buffer@create-clear,Crash
fbdev@pan,Crash
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
-kms_flip@flip-vs-modeset-vs-hang,Crash
kms_prop_blob@invalid-set-prop,Crash
kms_prop_blob@invalid-set-prop-any,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
index eb16b29dee48..71418ea35a17 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
@@ -14,6 +14,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
index 2803d0d80192..45dd8d493f6e 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -2,7 +2,6 @@ dumb_buffer@create-clear,Crash
kms_atomic_transition@modeset-transition,Fail
kms_atomic_transition@modeset-transition-fencing,Fail
kms_atomic_transition@plane-toggle-modeset-transition,Fail
-kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_color@gamma,Fail
kms_color@legacy-gamma,Fail
kms_cursor_crc@cursor-alpha-opaque,Fail
@@ -55,6 +54,7 @@ kms_flip@plain-flip-ts-check,Fail
kms_flip@plain-flip-ts-check-interruptible,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
kms_lease@page-flip-implicit-plane,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
index 348b4ce7eb4b..b467991d4094 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -75,58 +75,72 @@ kms_bw@linear-tiling-2-displays-2160x1440p
# Linux Version: 6.11.0-rc5
kms_flip@flip-vs-expired-vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/f944dd08-c88c-49ae-aff0-274374550a93@collabora.com/T/#u
# Failure Rate: 40
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-1-displays-2160x1440p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/afa2d3bf-29f2-488d-8cc9-f30d461444b0@collabora.com/T/#u
# Failure Rate: 80
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane_multiple@tiling-none
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/6fdaa97f-c1a5-4216-831f-dbb7c5f90498@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-1-displays-1920x1080p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/616aa015-9574-4527-9d07-d8d698bbcc3c@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane@plane-position-hole-dpms
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/7a1b888f-d7db-4ed7-96cd-3975ace837fb@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@flip-vs-absolute-wf_vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/f17fffb6-abc4-464e-8465-395311b01f6a@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@flip-vs-blocking-wf-vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/9b590b26-1bf9-4951-b6a3-ef6c67e6a1c6@collabora.com/T/#u
# Failure Rate: 60
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-2-displays-1920x1080p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/059545fa-65b1-4f5c-a13e-4d2898679f51@collabora.com/T/#u
# Failure Rate: 20
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@modeset-vs-vblank-race-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/eece9a80-42f3-41f4-86cc-69d8a51b976a@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_bw@connected-linear-tiling-1-displays-2160x1440p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/63dfd5b7-8a54-44a3-9530-f8dcd77a21d1@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_bw@linear-tiling-1-displays-3840x2160p
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
index e8e994d92557..b83ec75161b2 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
@@ -14,6 +14,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
index c72fee70e739..9749ddb75121 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
@@ -157,6 +157,7 @@ kms_flip@plain-flip-ts-check-interruptible,Fail
kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@cursor-implicit-plane,Fail
kms_lease@lease-uevent,Fail
kms_lease@page-flip-implicit-plane,Fail
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
index adbcdd0f28d2..28e37185bac0 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
@@ -19,6 +19,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
index 62428f3c8f31..e3ca6da8cde7 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
@@ -88,3 +88,31 @@ kms_flip@flip-vs-expired-vblank
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_pipe_crc_basic@nonblocking-crc-frame-sequence
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/2364a6bf-e6bc-4741-8c78-cea8bdb06e03@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@modeset-vs-vblank-race
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/f7d72ed9-a783-46d7-b75d-54072bda32a3@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_pipe_crc_basic@suspend-read-crc
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/98d3ba54-bcb9-41ab-adb1-a18ba61ee2e4@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_plane@plane-panning-bottom-right-suspend
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/b58d15eb-094d-4ac2-aad3-83e518c2f55d@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_vblank@ts-continuation-dpms-suspend
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
index 319789806271..716d2d4e452d 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
@@ -1,5 +1,6 @@
# keeps printing vkms_vblank_simulate: vblank timer overrun and never ends
kms_invalid_mode@int-max-clock
+kms_invalid_mode@overflow-vrefresh
# kernel panic seen with kms_cursor_crc tests
kms_cursor_crc.*
@@ -802,6 +803,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 30c736fc0067..7d2e499ea5de 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -98,6 +98,21 @@ struct drm_bridge_connector {
* HDMI connector infrastructure, if any (see &DRM_BRIDGE_OP_HDMI).
*/
struct drm_bridge *bridge_hdmi;
+ /**
+ * @bridge_hdmi_audio:
+ *
+ * The bridge in the chain that implements necessary support for the
+ * HDMI Audio infrastructure, if any (see &DRM_BRIDGE_OP_HDMI_AUDIO).
+ */
+ struct drm_bridge *bridge_hdmi_audio;
+ /**
+ * @bridge_dp_audio:
+ *
+ * The bridge in the chain that implements necessary support for the
+ * DisplayPort Audio infrastructure, if any (see
+ * &DRM_BRIDGE_OP_DP_AUDIO).
+ */
+ struct drm_bridge *bridge_dp_audio;
};
#define to_drm_bridge_connector(x) \
@@ -433,14 +448,25 @@ static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ if (!bridge->funcs->hdmi_audio_startup)
+ return 0;
+
+ return bridge->funcs->hdmi_audio_startup(connector, bridge);
+ }
+
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
- if (!bridge->funcs->hdmi_audio_startup)
- return 0;
+ if (!bridge->funcs->dp_audio_startup)
+ return 0;
- return bridge->funcs->hdmi_audio_startup(connector, bridge);
+ return bridge->funcs->dp_audio_startup(connector, bridge);
+ }
+
+ return -EINVAL;
}
static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
@@ -451,11 +477,19 @@ static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+ }
- return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+
+ return bridge->funcs->dp_audio_prepare(connector, bridge, fmt, hparms);
+ }
+
+ return -EINVAL;
}
static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
@@ -464,11 +498,15 @@ static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+ bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+ }
- bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+ bridge->funcs->dp_audio_shutdown(connector, bridge);
+ }
}
static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connector,
@@ -478,15 +516,27 @@ static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connecto
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ if (!bridge->funcs->hdmi_audio_mute_stream)
+ return -ENOTSUPP;
- if (bridge->funcs->hdmi_audio_mute_stream)
return bridge->funcs->hdmi_audio_mute_stream(connector, bridge,
enable, direction);
- else
- return -ENOTSUPP;
+ }
+
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+
+ if (!bridge->funcs->dp_audio_mute_stream)
+ return -ENOTSUPP;
+
+ return bridge->funcs->dp_audio_mute_stream(connector, bridge,
+ enable, direction);
+ }
+
+ return -EINVAL;
}
static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_audio_funcs = {
@@ -576,6 +626,42 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
max_bpc = bridge->max_bpc;
}
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_AUDIO) {
+ if (bridge_connector->bridge_hdmi_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (bridge_connector->bridge_dp_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (!bridge->hdmi_audio_max_i2s_playback_channels &&
+ !bridge->hdmi_audio_spdif_playback)
+ return ERR_PTR(-EINVAL);
+
+ if (!bridge->funcs->hdmi_audio_prepare ||
+ !bridge->funcs->hdmi_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ bridge_connector->bridge_hdmi_audio = bridge;
+ }
+
+ if (bridge->ops & DRM_BRIDGE_OP_DP_AUDIO) {
+ if (bridge_connector->bridge_dp_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (bridge_connector->bridge_hdmi_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (!bridge->hdmi_audio_max_i2s_playback_channels &&
+ !bridge->hdmi_audio_spdif_playback)
+ return ERR_PTR(-EINVAL);
+
+ if (!bridge->funcs->dp_audio_prepare ||
+ !bridge->funcs->dp_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ bridge_connector->bridge_dp_audio = bridge;
+ }
+
if (!drm_bridge_get_next_bridge(bridge))
connector_type = bridge->type;
@@ -611,22 +697,6 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
max_bpc);
if (ret)
return ERR_PTR(ret);
-
- if (bridge->hdmi_audio_max_i2s_playback_channels ||
- bridge->hdmi_audio_spdif_playback) {
- if (!bridge->funcs->hdmi_audio_prepare ||
- !bridge->funcs->hdmi_audio_shutdown)
- return ERR_PTR(-EINVAL);
-
- ret = drm_connector_hdmi_audio_init(connector,
- bridge->hdmi_audio_dev,
- &drm_bridge_connector_hdmi_audio_funcs,
- bridge->hdmi_audio_max_i2s_playback_channels,
- bridge->hdmi_audio_spdif_playback,
- bridge->hdmi_audio_dai_port);
- if (ret)
- return ERR_PTR(ret);
- }
} else {
ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs,
@@ -635,6 +705,24 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
return ERR_PTR(ret);
}
+ if (bridge_connector->bridge_hdmi_audio ||
+ bridge_connector->bridge_dp_audio) {
+ struct device *dev;
+
+ if (bridge_connector->bridge_hdmi_audio)
+ dev = bridge_connector->bridge_hdmi_audio->hdmi_audio_dev;
+ else
+ dev = bridge_connector->bridge_dp_audio->hdmi_audio_dev;
+
+ ret = drm_connector_hdmi_audio_init(connector, dev,
+ &drm_bridge_connector_hdmi_audio_funcs,
+ bridge->hdmi_audio_max_i2s_playback_channels,
+ bridge->hdmi_audio_spdif_playback,
+ bridge->hdmi_audio_dai_port);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
if (bridge_connector->bridge_hpd)
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 56a4965e518c..ed31471bd0e2 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -96,7 +96,7 @@ static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
ssize_t err = 0;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val);
return (enable && err < 0) ? err : 0;
}
@@ -112,7 +112,7 @@ static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
mask[0] = la_mask & 0xff;
mask[1] = la_mask >> 8;
- err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
+ err = drm_dp_dpcd_write_data(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
}
@@ -123,15 +123,14 @@ static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
unsigned int retries = min(5, attempts - 1);
ssize_t err;
- err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
- msg->msg, msg->len);
+ err = drm_dp_dpcd_write_data(aux, DP_CEC_TX_MESSAGE_BUFFER,
+ msg->msg, msg->len);
if (err < 0)
return err;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
- (msg->len - 1) | (retries << 4) |
- DP_CEC_TX_MESSAGE_SEND);
- return err < 0 ? err : 0;
+ return drm_dp_dpcd_write_byte(aux, DP_CEC_TX_MESSAGE_INFO,
+ (msg->len - 1) | (retries << 4) |
+ DP_CEC_TX_MESSAGE_SEND);
}
static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
@@ -144,13 +143,13 @@ static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
return 0;
- err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
- if (err >= 0) {
+ err = drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CONTROL, &val);
+ if (!err) {
if (enable)
val |= DP_CEC_SNOOPING_ENABLE;
else
val &= ~DP_CEC_SNOOPING_ENABLE;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val);
}
return (enable && err < 0) ? err : 0;
}
@@ -194,7 +193,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux)
u8 rx_msg_info;
ssize_t err;
- err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
+ err = drm_dp_dpcd_read_byte(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
if (err < 0)
return err;
@@ -202,7 +201,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux)
return 0;
msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
- err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
+ err = drm_dp_dpcd_read_data(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
if (err < 0)
return err;
@@ -215,7 +214,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
struct cec_adapter *adap = aux->cec.adap;
u8 flags;
- if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
return;
if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
@@ -230,7 +229,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
(DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES);
- drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
+ drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
}
/**
@@ -253,13 +252,13 @@ void drm_dp_cec_irq(struct drm_dp_aux *aux)
if (!aux->cec.adap)
goto unlock;
- ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
- &cec_irq);
+ ret = drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
+ &cec_irq);
if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
goto unlock;
drm_dp_cec_handle_irq(aux);
- drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
+ drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
unlock:
mutex_unlock(&aux->cec.lock);
}
@@ -269,7 +268,7 @@ static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
{
u8 cap = 0;
- if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
+ if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) < 0 ||
!(cap & DP_CEC_TUNNELING_CAPABLE))
return false;
if (cec_cap)
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index dbce1c3f4969..f2a6559a2710 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -327,7 +327,7 @@ static int __read_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SI
if (offset < DP_RECEIVER_CAP_SIZE) {
rd_interval = dpcd[offset];
} else {
- if (drm_dp_dpcd_readb(aux, offset, &rd_interval) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, offset, &rd_interval) < 0) {
drm_dbg_kms(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* arbitrary default delay */
@@ -358,7 +358,7 @@ int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux)
int unit;
u8 val;
- if (drm_dp_dpcd_readb(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) < 0) {
drm_err(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* default to max */
@@ -704,6 +704,8 @@ EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
+ *
+ * In most of the cases you want to use drm_dp_dpcd_read_data() instead.
*/
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
@@ -752,6 +754,8 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
+ *
+ * In most of the cases you want to use drm_dp_dpcd_write_data() instead.
*/
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
@@ -774,14 +778,13 @@ EXPORT_SYMBOL(drm_dp_dpcd_write);
* @aux: DisplayPort AUX channel
* @status: buffer to store the link status in (must be at least 6 bytes)
*
- * Returns the number of bytes transferred on success or a negative error
- * code on failure.
+ * Returns a negative error code on failure or 0 on success.
*/
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE])
{
- return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status,
- DP_LINK_STATUS_SIZE);
+ return drm_dp_dpcd_read_data(aux, DP_LANE0_1_STATUS, status,
+ DP_LINK_STATUS_SIZE);
}
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
@@ -804,30 +807,20 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
{
int ret;
- if (dp_phy == DP_PHY_DPRX) {
- ret = drm_dp_dpcd_read(aux,
- DP_LANE0_1_STATUS,
- link_status,
- DP_LINK_STATUS_SIZE);
-
- if (ret < 0)
- return ret;
-
- WARN_ON(ret != DP_LINK_STATUS_SIZE);
-
- return 0;
- }
+ if (dp_phy == DP_PHY_DPRX)
+ return drm_dp_dpcd_read_data(aux,
+ DP_LANE0_1_STATUS,
+ link_status,
+ DP_LINK_STATUS_SIZE);
- ret = drm_dp_dpcd_read(aux,
- DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
- link_status,
- DP_LINK_STATUS_SIZE - 1);
+ ret = drm_dp_dpcd_read_data(aux,
+ DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
+ link_status,
+ DP_LINK_STATUS_SIZE - 1);
if (ret < 0)
return ret;
- WARN_ON(ret != DP_LINK_STATUS_SIZE - 1);
-
/* Convert the LTTPR to the sink PHY link status layout */
memmove(&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS + 1],
&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS],
@@ -838,12 +831,81 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status);
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @revision: DPCD revision supported on the link
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (revision < DP_DPCD_REV_11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_up);
+
+/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @revision: DPCD revision supported on the link
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (revision < DP_DPCD_REV_11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_down);
+
static int read_payload_update_status(struct drm_dp_aux *aux)
{
int ret;
u8 status;
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0)
return ret;
@@ -870,21 +932,21 @@ int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
int ret;
int retries = 0;
- drm_dp_dpcd_writeb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
- DP_PAYLOAD_TABLE_UPDATED);
+ drm_dp_dpcd_write_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ DP_PAYLOAD_TABLE_UPDATED);
payload_alloc[0] = vcpid;
payload_alloc[1] = start_time_slot;
payload_alloc[2] = time_slot_count;
- ret = drm_dp_dpcd_write(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
- if (ret != 3) {
+ ret = drm_dp_dpcd_write_data(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "failed to write payload allocation %d\n", ret);
goto fail;
}
retry:
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "failed to read payload table status %d\n", ret);
goto fail;
@@ -1040,15 +1102,15 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
{
u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0;
- if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- &auto_test_req, 1) < 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
- if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_TEST_REQUEST, &link_edid_read) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_TEST_REQUEST);
return false;
@@ -1061,23 +1123,23 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
return false;
}
- if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- &auto_test_req, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ auto_test_req) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
/* send back checksum for the last edid extension block data */
- if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM,
- &real_edid_checksum, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_TEST_EDID_CHECKSUM,
+ real_edid_checksum) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_EDID_CHECKSUM);
return false;
}
test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
- if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_TEST_RESPONSE, test_resp) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_RESPONSE);
return false;
@@ -1114,12 +1176,10 @@ static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux,
DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
return 0;
- ret = drm_dp_dpcd_read(aux, DP_DP13_DPCD_REV, &dpcd_ext,
- sizeof(dpcd_ext));
+ ret = drm_dp_dpcd_read_data(aux, DP_DP13_DPCD_REV, &dpcd_ext,
+ sizeof(dpcd_ext));
if (ret < 0)
return ret;
- if (ret != sizeof(dpcd_ext))
- return -EIO;
if (dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
drm_dbg_kms(aux->drm_dev,
@@ -1156,10 +1216,10 @@ int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
{
int ret;
- ret = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
+ ret = drm_dp_dpcd_read_data(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
if (ret < 0)
return ret;
- if (ret != DP_RECEIVER_CAP_SIZE || dpcd[DP_DPCD_REV] == 0)
+ if (dpcd[DP_DPCD_REV] == 0)
return -EIO;
ret = drm_dp_read_extended_dpcd_caps(aux, dpcd);
@@ -1209,11 +1269,9 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE)
len *= 4;
- ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
+ ret = drm_dp_dpcd_read_data(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
if (ret < 0)
return ret;
- if (ret != len)
- return -EIO;
drm_dbg_kms(aux->drm_dev, "%s: DPCD DFP: %*ph\n", aux->name, len, downstream_ports);
@@ -1570,7 +1628,7 @@ EXPORT_SYMBOL(drm_dp_downstream_mode);
*/
int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6])
{
- return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6);
+ return drm_dp_dpcd_read_data(aux, DP_BRANCH_ID, id, 6);
}
EXPORT_SYMBOL(drm_dp_downstream_id);
@@ -1635,13 +1693,13 @@ void drm_dp_downstream_debug(struct seq_file *m,
drm_dp_downstream_id(aux, id);
seq_printf(m, "\t\tID: %s\n", id);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1);
- if (len > 0)
+ len = drm_dp_dpcd_read_data(aux, DP_BRANCH_HW_REV, &rev[0], 1);
+ if (!len)
seq_printf(m, "\t\tHW: %d.%d\n",
(rev[0] & 0xf0) >> 4, rev[0] & 0xf);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
- if (len > 0)
+ len = drm_dp_dpcd_read_data(aux, DP_BRANCH_SW_REV, rev, 2);
+ if (!len)
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
if (detailed_cap_info) {
@@ -1779,11 +1837,9 @@ int drm_dp_read_sink_count(struct drm_dp_aux *aux)
u8 count;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_SINK_COUNT, &count);
+ ret = drm_dp_dpcd_read_byte(aux, DP_SINK_COUNT, &count);
if (ret < 0)
return ret;
- if (ret != 1)
- return -EIO;
return DP_GET_SINK_COUNT(count);
}
@@ -2081,14 +2137,17 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
- drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
- /* Send a bare address packet to start the transaction.
- * Zero sized messages specify an address only (bare
- * address) transaction.
- */
- msg.buffer = NULL;
- msg.size = 0;
- err = drm_dp_i2c_do_msg(aux, &msg);
+
+ if (!aux->no_zero_sized) {
+ drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
+ /* Send a bare address packet to start the transaction.
+ * Zero sized messages specify an address only (bare
+ * address) transaction.
+ */
+ msg.buffer = NULL;
+ msg.size = 0;
+ err = drm_dp_i2c_do_msg(aux, &msg);
+ }
/*
* Reset msg.request in case in case it got
@@ -2107,6 +2166,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.buffer = msgs[i].buf + j;
msg.size = min(transfer_size, msgs[i].len - j);
+ if (j + msg.size == msgs[i].len && aux->no_zero_sized)
+ msg.request &= ~DP_AUX_I2C_MOT;
err = drm_dp_i2c_drain_msg(aux, &msg);
/*
@@ -2124,15 +2185,17 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
}
if (err >= 0)
err = num;
- /* Send a bare address packet to close out the transaction.
- * Zero sized messages specify an address only (bare
- * address) transaction.
- */
- msg.request &= ~DP_AUX_I2C_MOT;
- msg.buffer = NULL;
- msg.size = 0;
- (void)drm_dp_i2c_do_msg(aux, &msg);
+ if (!aux->no_zero_sized) {
+ /* Send a bare address packet to close out the transaction.
+ * Zero sized messages specify an address only (bare
+ * address) transaction.
+ */
+ msg.request &= ~DP_AUX_I2C_MOT;
+ msg.buffer = NULL;
+ msg.size = 0;
+ (void)drm_dp_i2c_do_msg(aux, &msg);
+ }
return err;
}
@@ -2172,13 +2235,13 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc)
u8 buf, count;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
WARN_ON(!(buf & DP_TEST_SINK_START));
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK_MISC, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK_MISC, &buf);
if (ret < 0)
return ret;
@@ -2192,11 +2255,7 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc)
* At DP_TEST_CRC_R_CR, there's 6 bytes containing CRC data, 2 bytes
* per component (RGB or CrYCb).
*/
- ret = drm_dp_dpcd_read(aux, DP_TEST_CRC_R_CR, crc, 6);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_read_data(aux, DP_TEST_CRC_R_CR, crc, 6);
}
static void drm_dp_aux_crc_work(struct work_struct *work)
@@ -2395,11 +2454,11 @@ int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
+ ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
if (ret < 0)
return ret;
@@ -2422,11 +2481,11 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START);
+ ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START);
if (ret < 0)
return ret;
@@ -2512,11 +2571,7 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
static int drm_dp_read_ident(struct drm_dp_aux *aux, unsigned int offset,
struct drm_dp_dpcd_ident *ident)
{
- int ret;
-
- ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
-
- return ret < 0 ? ret : 0;
+ return drm_dp_dpcd_read_data(aux, offset, ident, sizeof(*ident));
}
static void drm_dp_dump_desc(struct drm_dp_aux *aux,
@@ -2774,13 +2829,11 @@ static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux,
int ret;
for (offset = 0; offset < buf_size; offset += block_size) {
- ret = drm_dp_dpcd_read(aux,
- address + offset,
- &buf[offset], block_size);
+ ret = drm_dp_dpcd_read_data(aux,
+ address + offset,
+ &buf[offset], block_size);
if (ret < 0)
return ret;
-
- WARN_ON(ret != block_size);
}
return 0;
@@ -2995,12 +3048,12 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
int err;
u8 rate, lanes;
- err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate);
+ err = drm_dp_dpcd_read_byte(aux, DP_TEST_LINK_RATE, &rate);
if (err < 0)
return err;
data->link_rate = drm_dp_bw_code_to_link_rate(rate);
- err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes);
+ err = drm_dp_dpcd_read_byte(aux, DP_TEST_LANE_COUNT, &lanes);
if (err < 0)
return err;
data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK;
@@ -3008,22 +3061,22 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
if (lanes & DP_ENHANCED_FRAME_CAP)
data->enhanced_frame_cap = true;
- err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
+ err = drm_dp_dpcd_read_byte(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
if (err < 0)
return err;
switch (data->phy_pattern) {
case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
- err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
- &data->custom80, sizeof(data->custom80));
+ err = drm_dp_dpcd_read_data(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ &data->custom80, sizeof(data->custom80));
if (err < 0)
return err;
break;
case DP_PHY_TEST_PATTERN_CP2520:
- err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
- &data->hbr2_reset,
- sizeof(data->hbr2_reset));
+ err = drm_dp_dpcd_read_data(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
+ &data->hbr2_reset,
+ sizeof(data->hbr2_reset));
if (err < 0)
return err;
}
@@ -3050,15 +3103,15 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
if (dp_rev < 0x12) {
test_pattern = (test_pattern << 2) &
DP_LINK_QUAL_PATTERN_11_MASK;
- err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET,
- test_pattern);
+ err = drm_dp_dpcd_write_byte(aux, DP_TRAINING_PATTERN_SET,
+ test_pattern);
if (err < 0)
return err;
} else {
for (i = 0; i < data->num_lanes; i++) {
- err = drm_dp_dpcd_writeb(aux,
- DP_LINK_QUAL_LANE0_SET + i,
- test_pattern);
+ err = drm_dp_dpcd_write_byte(aux,
+ DP_LINK_QUAL_LANE0_SET + i,
+ test_pattern);
if (err < 0)
return err;
}
@@ -3265,8 +3318,8 @@ bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_C
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
return false;
- if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
- &rx_feature) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
+ &rx_feature) < 0) {
drm_dbg_dp(aux->drm_dev,
"Failed to read DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1\n");
return false;
@@ -3290,7 +3343,7 @@ bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
return false;
- if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) < 0) {
drm_dbg_dp(aux->drm_dev, "failed to read DP_DPRX_FEATURE_ENUMERATION_LIST\n");
return false;
}
@@ -3421,16 +3474,13 @@ EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw);
*/
int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd)
{
- int ret;
u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE |
DP_PCON_ENABLE_LINK_FRL_MODE;
if (enable_frl_ready_hpd)
buf |= DP_PCON_ENABLE_HPD_READY;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
-
- return ret;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_prepare);
@@ -3445,7 +3495,7 @@ bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux)
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
@@ -3474,7 +3524,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
@@ -3509,11 +3559,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
return -EINVAL;
}
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1);
@@ -3539,7 +3585,7 @@ int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
else
buf &= ~DP_PCON_FRL_LINK_TRAIN_EXTENDED;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
if (ret < 0)
return ret;
@@ -3555,13 +3601,7 @@ EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2);
*/
int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux)
{
- int ret;
-
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
}
EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config);
@@ -3576,7 +3616,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
int ret;
u8 buf = 0;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) {
@@ -3585,11 +3625,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
return -EINVAL;
}
buf |= DP_PCON_ENABLE_HDMI_LINK;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_enable);
@@ -3604,7 +3640,7 @@ bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
@@ -3629,7 +3665,7 @@ int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask)
int mode;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
if (ret < 0)
return ret;
@@ -3658,7 +3694,7 @@ void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
for (i = 0; i < hdmi->max_lanes; i++) {
- if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
return;
error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK;
@@ -3793,7 +3829,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
@@ -3804,11 +3840,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
buf |= pps_buf_config << 2;
}
- ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
}
/**
@@ -3820,13 +3852,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
*/
int drm_dp_pcon_pps_default(struct drm_dp_aux *aux)
{
- int ret;
-
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_default);
@@ -3842,15 +3868,11 @@ int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128])
{
int ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
if (ret < 0)
return ret;
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf);
@@ -3867,21 +3889,17 @@ int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6])
{
int ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
if (ret < 0)
return ret;
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_param);
@@ -3897,7 +3915,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
@@ -3906,11 +3924,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
else
buf &= ~DP_CONVERSION_RGB_YCBCR_MASK;
- ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
@@ -3942,12 +3956,12 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
buf[0] = level;
}
- ret = drm_dp_dpcd_write(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf));
- if (ret != sizeof(buf)) {
+ ret = drm_dp_dpcd_write_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf));
+ if (ret < 0) {
drm_err(aux->drm_dev,
"%s: Failed to write aux backlight level: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
return 0;
@@ -3965,22 +3979,22 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
if (!bl->aux_enable)
return 0;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf);
+ if (ret < 0) {
drm_err(aux->drm_dev, "%s: Failed to read eDP display control register: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
if (enable)
buf |= DP_EDP_BACKLIGHT_ENABLE;
else
buf &= ~DP_EDP_BACKLIGHT_ENABLE;
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf);
+ if (ret < 0) {
drm_err(aux->drm_dev, "%s: Failed to write eDP display control register: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
return 0;
@@ -4016,15 +4030,16 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM;
if (bl->pwmgen_bit_count) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
- if (ret != 1)
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
+ if (ret < 0)
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
}
if (bl->pwm_freq_pre_divider) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_FREQ_SET, bl->pwm_freq_pre_divider);
- if (ret != 1)
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_FREQ_SET,
+ bl->pwm_freq_pre_divider);
+ if (ret < 0)
drm_dbg_kms(aux->drm_dev,
"%s: Failed to write aux backlight frequency: %d\n",
aux->name, ret);
@@ -4032,8 +4047,8 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
}
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
@@ -4088,8 +4103,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
if (!bl->aux_set)
return 0;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n",
aux->name, ret);
return -ENODEV;
@@ -4122,14 +4137,14 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
* - FxP is within 25% of desired value.
* Note: 25% is arbitrary value and may need some tweak.
*/
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
aux->name, ret);
return 0;
}
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
aux->name, ret);
return 0;
@@ -4154,8 +4169,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
break;
}
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
return 0;
@@ -4180,8 +4195,8 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
u8 buf[2];
u8 mode_reg;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
@@ -4194,11 +4209,11 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
int size = 1 + bl->lsb_reg_used;
- ret = drm_dp_dpcd_read(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size);
- if (ret != size) {
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight level: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
if (bl->lsb_reg_used)
@@ -4343,8 +4358,8 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
if (!panel || !panel->dev || !aux)
return -EINVAL;
- ret = drm_dp_dpcd_read(aux, DP_EDP_DPCD_REV, edp_dpcd,
- EDP_DISPLAY_CTL_CAP_SIZE);
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_DPCD_REV, edp_dpcd,
+ EDP_DISPLAY_CTL_CAP_SIZE);
if (ret < 0)
return ret;
@@ -4385,8 +4400,9 @@ EXPORT_SYMBOL(drm_panel_dp_aux_backlight);
#endif
/* See DP Standard v2.1 2.6.4.4.1.1, 2.8.4.4, 2.8.7 */
-static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16,
- int symbol_size, bool is_mst)
+static int drm_dp_link_data_symbol_cycles(int lane_count, int pixels,
+ int bpp_x16, int symbol_size,
+ bool is_mst)
{
int cycles = DIV_ROUND_UP(pixels * bpp_x16, 16 * symbol_size * lane_count);
int align = is_mst ? 4 / lane_count : 1;
@@ -4394,22 +4410,42 @@ static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16,
return ALIGN(cycles, align);
}
-static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_count,
- int bpp_x16, int symbol_size, bool is_mst)
+/**
+ * drm_dp_link_symbol_cycles - calculate the link symbol count with/without dsc
+ * @lane_count: DP link lane count
+ * @pixels: number of pixels in a scanline
+ * @dsc_slice_count: number of slices for DSC or '0' for non-DSC
+ * @bpp_x16: bits per pixel in .4 binary fixed format
+ * @symbol_size: DP symbol size
+ * @is_mst: %true for MST and %false for SST
+ *
+ * Calculate the link symbol cycles for both DSC (@dsc_slice_count !=0) and
+ * non-DSC case (@dsc_slice_count == 0) and return the count.
+ */
+int drm_dp_link_symbol_cycles(int lane_count, int pixels, int dsc_slice_count,
+ int bpp_x16, int symbol_size, bool is_mst)
{
+ int slice_count = dsc_slice_count ? : 1;
int slice_pixels = DIV_ROUND_UP(pixels, slice_count);
- int slice_data_cycles = drm_dp_link_symbol_cycles(lane_count, slice_pixels,
- bpp_x16, symbol_size, is_mst);
- int slice_eoc_cycles = is_mst ? 4 / lane_count : 1;
+ int slice_data_cycles = drm_dp_link_data_symbol_cycles(lane_count,
+ slice_pixels,
+ bpp_x16,
+ symbol_size,
+ is_mst);
+ int slice_eoc_cycles = 0;
+
+ if (dsc_slice_count)
+ slice_eoc_cycles = is_mst ? 4 / lane_count : 1;
return slice_count * (slice_data_cycles + slice_eoc_cycles);
}
+EXPORT_SYMBOL(drm_dp_link_symbol_cycles);
/**
* drm_dp_bw_overhead - Calculate the BW overhead of a DP link stream
* @lane_count: DP link lane count
* @hactive: pixel count of the active period in one scanline of the stream
- * @dsc_slice_count: DSC slice count if @flags/DRM_DP_LINK_BW_OVERHEAD_DSC is set
+ * @dsc_slice_count: number of slices for DSC or '0' for non-DSC
* @bpp_x16: bits per pixel in .4 binary fixed point
* @flags: DRM_DP_OVERHEAD_x flags
*
@@ -4423,7 +4459,7 @@ static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_c
* as well as the stream's
* - @hactive timing
* - @bpp_x16 color depth
- * - compression mode (@flags / %DRM_DP_OVERHEAD_DSC).
+ * - compression mode (@dsc_slice_count != 0)
* Note that this overhead doesn't account for the 8b/10b, 128b/132b
* channel coding efficiency, for that see
* @drm_dp_link_bw_channel_coding_efficiency().
@@ -4478,15 +4514,10 @@ int drm_dp_bw_overhead(int lane_count, int hactive,
WARN_ON((flags & DRM_DP_BW_OVERHEAD_UHBR) &&
(flags & DRM_DP_BW_OVERHEAD_FEC));
- if (flags & DRM_DP_BW_OVERHEAD_DSC)
- symbol_cycles = drm_dp_link_dsc_symbol_cycles(lane_count, hactive,
- dsc_slice_count,
- bpp_x16, symbol_size,
- is_mst);
- else
- symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive,
- bpp_x16, symbol_size,
- is_mst);
+ symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive,
+ dsc_slice_count,
+ bpp_x16, symbol_size,
+ is_mst);
return DIV_ROUND_UP_ULL(mul_u32_u32(symbol_cycles * symbol_size * lane_count,
overhead * 16),
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 3a1f1ffc7b55..a89f38fd3218 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -2192,24 +2192,20 @@ static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid)
guid_copy(&mstb->guid, guid);
if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) {
+ struct drm_dp_aux *aux;
u8 buf[UUID_SIZE];
export_guid(buf, &mstb->guid);
- if (mstb->port_parent) {
- ret = drm_dp_send_dpcd_write(mstb->mgr,
- mstb->port_parent,
- DP_GUID, sizeof(buf), buf);
- } else {
- ret = drm_dp_dpcd_write(mstb->mgr->aux,
- DP_GUID, buf, sizeof(buf));
- }
- }
+ if (mstb->port_parent)
+ aux = &mstb->port_parent->aux;
+ else
+ aux = mstb->mgr->aux;
- if (ret < 16 && ret > 0)
- return -EPROTO;
+ ret = drm_dp_dpcd_write_data(aux, DP_GUID, buf, sizeof(buf));
+ }
- return ret == 16 ? 0 : ret;
+ return ret;
}
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
@@ -2744,14 +2740,13 @@ retry:
do {
tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
- ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
- &msg[offset],
- tosend);
- if (ret != tosend) {
- if (ret == -EIO && retries < 5) {
- retries++;
- goto retry;
- }
+ ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset,
+ &msg[offset],
+ tosend);
+ if (ret == -EIO && retries < 5) {
+ retries++;
+ goto retry;
+ } else if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
return -EIO;
@@ -3624,7 +3619,7 @@ enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
return DRM_DP_SST;
- if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
+ if (drm_dp_dpcd_read_byte(aux, DP_MSTM_CAP, &mstm_cap) < 0)
return DRM_DP_SST;
if (mstm_cap & DP_MST_CAP)
@@ -3679,10 +3674,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb;
drm_dp_mst_topology_get_mstb(mgr->mst_primary);
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0)
goto out_unlock;
@@ -3697,7 +3692,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mstb = mgr->mst_primary;
mgr->mst_primary = NULL;
/* this can fail if the device is gone */
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mgr->payload_id_table_cleared = false;
@@ -3763,8 +3758,8 @@ EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe);
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->lock);
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UPSTREAM_IS_SRC);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
flush_work(&mgr->up_req_work);
flush_work(&mgr->work);
@@ -3813,18 +3808,18 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
goto out_fail;
}
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail;
}
/* Some hubs forget their guids after they resume */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
- if (ret != sizeof(buf)) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf));
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
@@ -3883,8 +3878,8 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
*mstb = NULL;
len = min(mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
return false;
}
@@ -3922,9 +3917,9 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
curreply = len;
while (replylen > 0) {
len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
- replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply,
+ replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
len, ret);
return false;
@@ -4881,9 +4876,9 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
int i;
for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
- if (drm_dp_dpcd_read(mgr->aux,
- DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
- &buf[i], 16) != 16)
+ if (drm_dp_dpcd_read_data(mgr->aux,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
+ &buf[i], 16) < 0)
return false;
}
return true;
@@ -4972,23 +4967,24 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
}
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
- if (ret != 2) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2);
+ if (ret < 0) {
seq_printf(m, "faux/mst read failed\n");
goto out;
}
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ if (ret < 0) {
seq_printf(m, "mst ctrl read failed\n");
goto out;
}
seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
- ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
- if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf,
+ DP_BRANCH_OUI_HEADER_SIZE);
+ if (ret < 0) {
seq_printf(m, "branch oui read failed\n");
goto out;
}
@@ -6112,14 +6108,14 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
/* DP-to-DP peer device */
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&immediate_upstream_port->aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
/* Enpoint decompression with DP-to-DP peer device */
@@ -6157,8 +6153,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
- if (drm_dp_dpcd_read(immediate_upstream_aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(immediate_upstream_aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
@@ -6180,11 +6176,11 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
* therefore the endpoint needs to be
* both DSC and FEC capable.
*/
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
(endpoint_fec & DP_FEC_CAPABLE))
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 90fe07a89260..076edf161048 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -222,7 +222,7 @@ static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *r
while ((len = next_reg_area(&offset))) {
int address = DP_TUNNELING_BASE + offset;
- if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
+ if (drm_dp_dpcd_read_data(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
return -EIO;
offset += len;
@@ -913,7 +913,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ;
u8 val;
- if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
goto out_err;
if (enable)
@@ -921,7 +921,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
else
val &= ~mask;
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
goto out_err;
tunnel->bw_alloc_enabled = enable;
@@ -1039,7 +1039,7 @@ static int clear_bw_req_state(struct drm_dp_aux *aux)
{
u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
- if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
+ if (drm_dp_dpcd_write_byte(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
return -EIO;
return 0;
@@ -1052,7 +1052,7 @@ static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed)
u8 val;
int err;
- if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
return -EIO;
*status_changed = val & status_change_mask;
@@ -1095,7 +1095,7 @@ static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw)
if (err)
goto out;
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
err = -EIO;
goto out;
}
@@ -1196,13 +1196,13 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
u8 val;
- if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
goto out_err;
val &= mask;
if (val) {
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
goto out_err;
return 1;
@@ -1215,7 +1215,7 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
* Check for estimated BW changes explicitly to account for lost
* BW change notifications.
*/
- if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
goto out_err;
if (val * tunnel->bw_granularity != tunnel->estimated_bw)
@@ -1300,7 +1300,7 @@ int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *a
{
u8 val;
- if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
return -EIO;
if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED))
diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
index 74dd4d01dd9b..855cb02b827d 100644
--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
@@ -256,3 +256,171 @@ drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode,
return DIV_ROUND_CLOSEST_ULL(clock * bpc, 8);
}
EXPORT_SYMBOL(drm_hdmi_compute_mode_clock);
+
+struct drm_hdmi_acr_n_cts_entry {
+ unsigned int n;
+ unsigned int cts;
+};
+
+struct drm_hdmi_acr_data {
+ unsigned long tmds_clock_khz;
+ struct drm_hdmi_acr_n_cts_entry n_cts_32k,
+ n_cts_44k1,
+ n_cts_48k;
+};
+
+static const struct drm_hdmi_acr_data hdmi_acr_n_cts[] = {
+ {
+ /* "Other" entry */
+ .n_cts_32k = { .n = 4096, },
+ .n_cts_44k1 = { .n = 6272, },
+ .n_cts_48k = { .n = 6144, },
+ }, {
+ .tmds_clock_khz = 25175,
+ .n_cts_32k = { .n = 4576, .cts = 28125, },
+ .n_cts_44k1 = { .n = 7007, .cts = 31250, },
+ .n_cts_48k = { .n = 6864, .cts = 28125, },
+ }, {
+ .tmds_clock_khz = 25200,
+ .n_cts_32k = { .n = 4096, .cts = 25200, },
+ .n_cts_44k1 = { .n = 6272, .cts = 28000, },
+ .n_cts_48k = { .n = 6144, .cts = 25200, },
+ }, {
+ .tmds_clock_khz = 27000,
+ .n_cts_32k = { .n = 4096, .cts = 27000, },
+ .n_cts_44k1 = { .n = 6272, .cts = 30000, },
+ .n_cts_48k = { .n = 6144, .cts = 27000, },
+ }, {
+ .tmds_clock_khz = 27027,
+ .n_cts_32k = { .n = 4096, .cts = 27027, },
+ .n_cts_44k1 = { .n = 6272, .cts = 30030, },
+ .n_cts_48k = { .n = 6144, .cts = 27027, },
+ }, {
+ .tmds_clock_khz = 54000,
+ .n_cts_32k = { .n = 4096, .cts = 54000, },
+ .n_cts_44k1 = { .n = 6272, .cts = 60000, },
+ .n_cts_48k = { .n = 6144, .cts = 54000, },
+ }, {
+ .tmds_clock_khz = 54054,
+ .n_cts_32k = { .n = 4096, .cts = 54054, },
+ .n_cts_44k1 = { .n = 6272, .cts = 60060, },
+ .n_cts_48k = { .n = 6144, .cts = 54054, },
+ }, {
+ .tmds_clock_khz = 74176,
+ .n_cts_32k = { .n = 11648, .cts = 210937, }, /* and 210938 */
+ .n_cts_44k1 = { .n = 17836, .cts = 234375, },
+ .n_cts_48k = { .n = 11648, .cts = 140625, },
+ }, {
+ .tmds_clock_khz = 74250,
+ .n_cts_32k = { .n = 4096, .cts = 74250, },
+ .n_cts_44k1 = { .n = 6272, .cts = 82500, },
+ .n_cts_48k = { .n = 6144, .cts = 74250, },
+ }, {
+ .tmds_clock_khz = 148352,
+ .n_cts_32k = { .n = 11648, .cts = 421875, },
+ .n_cts_44k1 = { .n = 8918, .cts = 234375, },
+ .n_cts_48k = { .n = 5824, .cts = 140625, },
+ }, {
+ .tmds_clock_khz = 148500,
+ .n_cts_32k = { .n = 4096, .cts = 148500, },
+ .n_cts_44k1 = { .n = 6272, .cts = 165000, },
+ .n_cts_48k = { .n = 6144, .cts = 148500, },
+ }, {
+ .tmds_clock_khz = 296703,
+ .n_cts_32k = { .n = 5824, .cts = 421875, },
+ .n_cts_44k1 = { .n = 4459, .cts = 234375, },
+ .n_cts_48k = { .n = 5824, .cts = 281250, },
+ }, {
+ .tmds_clock_khz = 297000,
+ .n_cts_32k = { .n = 3072, .cts = 222750, },
+ .n_cts_44k1 = { .n = 4704, .cts = 247500, },
+ .n_cts_48k = { .n = 5120, .cts = 247500, },
+ }, {
+ .tmds_clock_khz = 593407,
+ .n_cts_32k = { .n = 5824, .cts = 843750, },
+ .n_cts_44k1 = { .n = 8918, .cts = 937500, },
+ .n_cts_48k = { .n = 5824, .cts = 562500, },
+ }, {
+ .tmds_clock_khz = 594000,
+ .n_cts_32k = { .n = 3072, .cts = 445500, },
+ .n_cts_44k1 = { .n = 9408, .cts = 990000, },
+ .n_cts_48k = { .n = 6144, .cts = 594000, },
+ },
+};
+
+static int drm_hdmi_acr_find_tmds_entry(unsigned long tmds_clock_khz)
+{
+ int i;
+
+ /* skip the "other" entry */
+ for (i = 1; i < ARRAY_SIZE(hdmi_acr_n_cts); i++) {
+ if (hdmi_acr_n_cts[i].tmds_clock_khz == tmds_clock_khz)
+ return i;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_hdmi_acr_get_n_cts() - get N and CTS values for Audio Clock Regeneration
+ *
+ * @tmds_char_rate: TMDS clock (char rate) as used by the HDMI connector
+ * @sample_rate: audio sample rate
+ * @out_n: a pointer to write the N value
+ * @out_cts: a pointer to write the CTS value
+ *
+ * Get the N and CTS values (either by calculating them or by returning data
+ * from the tables. This follows the HDMI 1.4b Section 7.2 "Audio Sample Clock
+ * Capture and Regeneration".
+ *
+ * Note, @sample_rate corresponds to the Fs value, see sections 7.2.4 - 7.2.6
+ * on how to select Fs for non-L-PCM formats.
+ */
+void
+drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate,
+ unsigned int sample_rate,
+ unsigned int *out_n,
+ unsigned int *out_cts)
+{
+ /* be a bit more tolerant, especially for the 1.001 entries */
+ unsigned long tmds_clock_khz = DIV_ROUND_CLOSEST_ULL(tmds_char_rate, 1000);
+ const struct drm_hdmi_acr_n_cts_entry *entry;
+ unsigned int n, cts, mult;
+ int tmds_idx;
+
+ tmds_idx = drm_hdmi_acr_find_tmds_entry(tmds_clock_khz);
+
+ /*
+ * Don't change the order, 192 kHz is divisible by 48k and 32k, but it
+ * should use 48k entry.
+ */
+ if (sample_rate % 48000 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_48k;
+ mult = sample_rate / 48000;
+ } else if (sample_rate % 44100 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_44k1;
+ mult = sample_rate / 44100;
+ } else if (sample_rate % 32000 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_32k;
+ mult = sample_rate / 32000;
+ } else {
+ entry = NULL;
+ }
+
+ if (entry) {
+ n = entry->n * mult;
+ cts = entry->cts;
+ } else {
+ /* Recommended optimal value, HDMI 1.4b, Section 7.2.1 */
+ n = 128 * sample_rate / 1000;
+ cts = 0;
+ }
+
+ if (!cts)
+ cts = DIV_ROUND_CLOSEST_ULL(tmds_char_rate * n,
+ 128 * sample_rate);
+
+ *out_n = n;
+ *out_cts = cts;
+}
+EXPORT_SYMBOL(drm_hdmi_acr_get_n_cts);
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index c205f37da1e1..d9d9948b29e9 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -10,6 +10,298 @@
#include <drm/display/drm_hdmi_state_helper.h>
/**
+ * DOC: hdmi helpers
+ *
+ * These functions contain an implementation of the HDMI specification
+ * in the form of KMS helpers.
+ *
+ * It contains TMDS character rate computation, automatic selection of
+ * output formats, infoframes generation, etc.
+ *
+ * Infoframes Compliance
+ * ~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Drivers using the helpers will expose the various infoframes
+ * generated according to the HDMI specification in debugfs.
+ *
+ * Compliance can then be tested using ``edid-decode`` from the ``v4l-utils`` project
+ * (https://git.linuxtv.org/v4l-utils.git/). A sample run would look like:
+ *
+ * .. code-block:: bash
+ *
+ * # edid-decode \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/avi \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdmi \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/spd \
+ * /sys/class/drm/card1-HDMI-A-1/edid \
+ * -c
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 1e 6d f4 5b 1e ef 06 00
+ * 07 20 01 03 80 2f 34 78 ea 24 05 af 4f 42 ab 25
+ * 0f 50 54 21 08 00 d1 c0 61 40 45 40 01 01 01 01
+ * 01 01 01 01 01 01 98 d0 00 40 a1 40 d4 b0 30 20
+ * 3a 00 d1 0b 12 00 00 1a 00 00 00 fd 00 3b 3d 1e
+ * b2 31 00 0a 20 20 20 20 20 20 00 00 00 fc 00 4c
+ * 47 20 53 44 51 48 44 0a 20 20 20 20 00 00 00 ff
+ * 00 32 30 37 4e 54 52 4c 44 43 34 33 30 0a 01 46
+ *
+ * 02 03 42 72 23 09 07 07 4d 01 03 04 90 12 13 1f
+ * 22 5d 5e 5f 60 61 83 01 00 00 6d 03 0c 00 10 00
+ * b8 3c 20 00 60 01 02 03 67 d8 5d c4 01 78 80 03
+ * e3 0f 00 18 e2 00 6a e3 05 c0 00 e6 06 05 01 52
+ * 52 51 11 5d 00 a0 a0 40 29 b0 30 20 3a 00 d1 0b
+ * 12 00 00 1a 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 c3
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: GSM
+ * Model: 23540
+ * Serial Number: 454430 (0x0006ef1e)
+ * Made in: week 7 of 2022
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 47 cm x 52 cm
+ * Gamma: 2.20
+ * DPMS levels: Standby Suspend Off
+ * RGB color display
+ * First detailed timing is the preferred timing
+ * Color Characteristics:
+ * Red : 0.6835, 0.3105
+ * Green: 0.2587, 0.6679
+ * Blue : 0.1445, 0.0585
+ * White: 0.3134, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz
+ * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz
+ * Standard Timings:
+ * DMT 0x52: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz
+ * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz
+ * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz
+ * Detailed Timing Descriptors:
+ * DTD 1: 2560x2880 59.966580 Hz 8:9 185.417 kHz 534.000000 MHz (465 mm x 523 mm)
+ * Hfront 48 Hsync 32 Hback 240 Hpol P
+ * Vfront 3 Vsync 10 Vback 199 Vpol N
+ * Display Range Limits:
+ * Monitor ranges (GTF): 59-61 Hz V, 30-178 kHz H, max dotclock 490 MHz
+ * Display Product Name: 'LG SDQHD'
+ * Display Product Serial Number: '207NTRLDC430'
+ * Extension blocks: 1
+ * Checksum: 0x46
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Basic audio support
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 2
+ * Audio Data Block:
+ * Linear PCM:
+ * Max channels: 2
+ * Supported sample rates (kHz): 48 44.1 32
+ * Supported sample sizes (bits): 24 20 16
+ * Video Data Block:
+ * VIC 1: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * VIC 3: 720x480 59.940060 Hz 16:9 31.469 kHz 27.000000 MHz
+ * VIC 4: 1280x720 60.000000 Hz 16:9 45.000 kHz 74.250000 MHz
+ * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (native)
+ * VIC 18: 720x576 50.000000 Hz 16:9 31.250 kHz 27.000000 MHz
+ * VIC 19: 1280x720 50.000000 Hz 16:9 37.500 kHz 74.250000 MHz
+ * VIC 31: 1920x1080 50.000000 Hz 16:9 56.250 kHz 148.500000 MHz
+ * VIC 34: 1920x1080 30.000000 Hz 16:9 33.750 kHz 74.250000 MHz
+ * VIC 93: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz
+ * VIC 94: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz
+ * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz
+ * Speaker Allocation Data Block:
+ * FL/FR - Front Left/Right
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * Supports_AI
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 300 MHz
+ * Extended HDMI video details:
+ * HDMI VICs:
+ * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * HDMI VIC 2: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz
+ * HDMI VIC 3: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 600 MHz
+ * SCDC Present
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz
+ * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz
+ * Video Capability Data Block:
+ * YCbCr quantization: No Data
+ * RGB quantization: Selectable (via AVI Q)
+ * PT scan behavior: Always Underscanned
+ * IT scan behavior: Always Underscanned
+ * CE scan behavior: Always Underscanned
+ * Colorimetry Data Block:
+ * BT2020YCC
+ * BT2020RGB
+ * HDR Static Metadata Data Block:
+ * Electro optical transfer functions:
+ * Traditional gamma - SDR luminance range
+ * SMPTE ST2084
+ * Supported static metadata descriptors:
+ * Static metadata type 1
+ * Desired content max luminance: 82 (295.365 cd/m^2)
+ * Desired content max frame-average luminance: 82 (295.365 cd/m^2)
+ * Desired content min luminance: 81 (0.298 cd/m^2)
+ * Detailed Timing Descriptors:
+ * DTD 2: 2560x2880 29.986961 Hz 8:9 87.592 kHz 238.250000 MHz (465 mm x 523 mm)
+ * Hfront 48 Hsync 32 Hback 80 Hpol P
+ * Vfront 3 Vsync 10 Vback 28 Vpol N
+ * Checksum: 0xc3 Unused space in Extension Block: 43 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.29.0-5346
+ * edid-decode SHA: c363e9aa6d70 2025-03-11 11:41:18
+ *
+ * Warnings:
+ *
+ * Block 1, CTA-861 Extension Block:
+ * IT Video Formats are overscanned by default, but normally this should be underscanned.
+ * Video Data Block: VIC 1 and the first DTD are not identical. Is this intended?
+ * Video Data Block: All VICs are in ascending order, and the first (preferred) VIC <= 4, is that intended?
+ * Video Capability Data Block: Set Selectable YCbCr Quantization to avoid interop issues.
+ * Video Capability Data Block: S_PT is equal to S_IT and S_CE, so should be set to 0 instead.
+ * Colorimetry Data Block: Set the sRGB colorimetry bit to avoid interop issues.
+ * Display Product Serial Number is set, so the Serial Number in the Base EDID should be 0.
+ * EDID:
+ * Base EDID: Some timings are out of range of the Monitor Ranges:
+ * Vertical Freq: 24.000 - 60.317 Hz (Monitor: 59.000 - 61.000 Hz)
+ * Horizontal Freq: 31.250 - 185.416 kHz (Monitor: 30.000 - 178.000 kHz)
+ * Maximum Clock: 594.000 MHz (Monitor: 490.000 MHz)
+ *
+ * Failures:
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Video Capability Data Block: IT video formats are always underscanned, but bit 7 of Byte 3 of the CTA-861 Extension header is set to overscanned.
+ * EDID:
+ * CTA-861: Native progressive timings are a mix of several resolutions.
+ *
+ * EDID conformity: FAIL
+ *
+ * ================
+ *
+ * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio' was empty.
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 82 02 0d 31 12 28 04 00 00 00 00 00 00 00 00 00
+ * 00
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x31
+ *
+ * AVI InfoFrame
+ * Version: 2
+ * Length: 13
+ * Y: Color Component Sample Format: RGB
+ * A: Active Format Information Present: Yes
+ * B: Bar Data Present: Bar Data not present
+ * S: Scan Information: Composed for an underscanned display
+ * C: Colorimetry: No Data
+ * M: Picture Aspect Ratio: 16:9
+ * R: Active Portion Aspect Ratio: 8
+ * ITC: IT Content: No Data
+ * EC: Extended Colorimetry: xvYCC601
+ * Q: RGB Quantization Range: Limited Range
+ * SC: Non-Uniform Picture Scaling: No Known non-uniform scaling
+ * YQ: YCC Quantization Range: Limited Range
+ * CN: IT Content Type: Graphics
+ * PR: Pixel Data Repetition Count: 0
+ * Line Number of End of Top Bar: 0
+ * Line Number of Start of Bottom Bar: 0
+ * Pixel Number of End of Left Bar: 0
+ * Pixel Number of Start of Right Bar: 0
+ *
+ * ----------------
+ *
+ * AVI InfoFrame conformity: PASS
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 81 01 05 49 03 0c 00 20 01
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x49
+ *
+ * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03
+ * Version: 1
+ * Length: 5
+ * HDMI Video Format: HDMI_VIC is present
+ * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ *
+ * ----------------
+ *
+ * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03 conformity: PASS
+ *
+ * ================
+ *
+ * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm' was empty.
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 83 01 19 93 42 72 6f 61 64 63 6f 6d 56 69 64 65
+ * 6f 63 6f 72 65 00 00 00 00 00 00 00 09
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x93
+ *
+ * Source Product Description InfoFrame
+ * Version: 1
+ * Length: 25
+ * Vendor Name: 'Broadcom'
+ * Product Description: 'Videocore'
+ * Source Information: PC general
+ *
+ * ----------------
+ *
+ * Source Product Description InfoFrame conformity: PASS
+ *
+ * Testing
+ * ~~~~~~~
+ *
+ * The helpers have unit testing and can be tested using kunit with:
+ *
+ * .. code-block:: bash
+ *
+ * $ ./tools/testing/kunit/kunit.py run \
+ * --kunitconfig=drivers/gpu/drm/tests \
+ * drm_atomic_helper_connector_hdmi_*
+ */
+
+/**
* __drm_atomic_helper_connector_hdmi_reset() - Initializes all HDMI @drm_connector_state resources
* @connector: DRM connector
* @new_conn_state: connector state to reset
@@ -816,7 +1108,7 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
* @status: Connection status
*
* This function should be called as a part of the .detect() / .detect_ctx()
- * callbacks, updating the HDMI-specific connector's data.
+ * callbacks for all status changes.
*/
void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector,
enum drm_connector_status status)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9ea2611770f4..0138cf0b8b63 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -933,6 +933,9 @@ EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
* state). This is especially true in enable hooks because the pipeline has
* changed.
*
+ * If you don't have access to the atomic state, see
+ * drm_atomic_get_connector_for_encoder().
+ *
* Returns: The old connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
@@ -967,6 +970,9 @@ EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
* attached to @encoder vs ones that do (and to inspect their state). This is
* especially true in disable hooks because the pipeline will change.
*
+ * If you don't have access to the atomic state, see
+ * drm_atomic_get_connector_for_encoder().
+ *
* Returns: The new connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
@@ -988,6 +994,59 @@ drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder);
/**
+ * drm_atomic_get_connector_for_encoder - Get connector currently assigned to an encoder
+ * @encoder: The encoder to find the connector of
+ * @ctx: Modeset locking context
+ *
+ * This function finds and returns the connector currently assigned to
+ * an @encoder.
+ *
+ * It is similar to the drm_atomic_get_old_connector_for_encoder() and
+ * drm_atomic_get_new_connector_for_encoder() helpers, but doesn't
+ * require access to the atomic state. If you have access to it, prefer
+ * using these. This helper is typically useful in situations where you
+ * don't have access to the atomic state, like detect, link repair,
+ * threaded interrupt handlers, or hooks from other frameworks (ALSA,
+ * CEC, etc.).
+ *
+ * Returns:
+ * The connector connected to @encoder, or an error pointer otherwise.
+ * When the error is EDEADLK, a deadlock has been detected and the
+ * sequence must be restarted.
+ */
+struct drm_connector *
+drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *out_connector = ERR_PTR(-EINVAL);
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ int ret;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (!connector->state)
+ continue;
+
+ if (encoder == connector->state->best_encoder) {
+ out_connector = connector;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return out_connector;
+}
+EXPORT_SYMBOL(drm_atomic_get_connector_for_encoder);
+
+
+/**
* drm_atomic_get_old_crtc_for_encoder - Get old crtc for an encoder
* @state: Atomic state
* @encoder: The encoder to fetch the crtc state for
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5302ab324898..ee64ca1b1bec 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3409,6 +3409,9 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_all);
* This implies a reset of all active components available between the CRTC and
* connectors.
*
+ * A variant of this function exists with
+ * drm_bridge_helper_reset_crtc(), dedicated to bridges.
+ *
* NOTE: This relies on resetting &drm_crtc_state.connectors_changed.
* For drivers which optimize out unnecessary modesets this will result in
* a no-op commit, achieving nothing.
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 6e74de833466..6852d73c931c 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -75,6 +75,12 @@
* the currently visible vertical area of the &drm_crtc.
* FB_ID:
* Mode object ID of the &drm_framebuffer this plane should scan out.
+ *
+ * When a KMS client is performing front-buffer rendering, it should set
+ * FB_ID to the same front-buffer FB on each atomic commit. This implies
+ * to the driver that it needs to re-read the same FB again. Otherwise
+ * drivers which do not employ continuously repeated scanout cycles might
+ * not update the screen.
* CRTC_ID:
* Mode object ID of the &drm_crtc this plane should be connected to.
*
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index fa2794217a90..b4c89ec01998 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
@@ -198,10 +199,96 @@
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
+static void __drm_bridge_free(struct kref *kref)
+{
+ struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
+
+ kfree(bridge->container);
+}
+
+/**
+ * drm_bridge_get - Acquire a bridge reference
+ * @bridge: DRM bridge
+ *
+ * This function increments the bridge's refcount.
+ *
+ * Returns:
+ * Pointer to @bridge.
+ */
+struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge)
+{
+ if (bridge)
+ kref_get(&bridge->refcount);
+
+ return bridge;
+}
+EXPORT_SYMBOL(drm_bridge_get);
+
+/**
+ * drm_bridge_put - Release a bridge reference
+ * @bridge: DRM bridge
+ *
+ * This function decrements the bridge's reference count and frees the
+ * object if the reference count drops to zero.
+ */
+void drm_bridge_put(struct drm_bridge *bridge)
+{
+ if (bridge)
+ kref_put(&bridge->refcount, __drm_bridge_free);
+}
+EXPORT_SYMBOL(drm_bridge_put);
+
+/**
+ * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer
+ *
+ * @data: pointer to @struct drm_bridge, cast to a void pointer
+ *
+ * Wrapper of drm_bridge_put() to be used when a function taking a void
+ * pointer is needed, for example as a devm action.
+ */
+static void drm_bridge_put_void(void *data)
+{
+ struct drm_bridge *bridge = (struct drm_bridge *)data;
+
+ drm_bridge_put(bridge);
+}
+
+void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_bridge_funcs *funcs)
+{
+ void *container;
+ struct drm_bridge *bridge;
+ int err;
+
+ if (!funcs) {
+ dev_warn(dev, "Missing funcs pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ bridge = container + offset;
+ bridge->container = container;
+ bridge->funcs = funcs;
+ kref_init(&bridge->refcount);
+
+ err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge);
+ if (err)
+ return ERR_PTR(err);
+
+ return container;
+}
+EXPORT_SYMBOL(__devm_drm_bridge_alloc);
+
/**
* drm_bridge_add - add the given bridge to the global bridge list
*
* @bridge: bridge control structure
+ *
+ * The bridge to be added must have been allocated by
+ * devm_drm_bridge_alloc().
*/
void drm_bridge_add(struct drm_bridge *bridge)
{
@@ -280,6 +367,11 @@ static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
};
+static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
+{
+ return bridge->funcs->atomic_reset != NULL;
+}
+
/**
* drm_bridge_attach - attach the bridge to an encoder's chain
*
@@ -327,12 +419,12 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
list_add(&bridge->chain_node, &encoder->bridge_chain);
if (bridge->funcs->attach) {
- ret = bridge->funcs->attach(bridge, flags);
+ ret = bridge->funcs->attach(bridge, encoder, flags);
if (ret < 0)
goto err_reset_bridge;
}
- if (bridge->funcs->atomic_reset) {
+ if (drm_bridge_is_atomic(bridge)) {
struct drm_bridge_state *state;
state = bridge->funcs->atomic_reset(bridge);
@@ -377,7 +469,7 @@ void drm_bridge_detach(struct drm_bridge *bridge)
if (WARN_ON(!bridge->dev))
return;
- if (bridge->funcs->atomic_reset)
+ if (drm_bridge_is_atomic(bridge))
drm_atomic_private_obj_fini(&bridge->base);
if (bridge->funcs->detach)
@@ -1300,6 +1392,75 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
+static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
+ struct drm_bridge *bridge,
+ unsigned int idx)
+{
+ drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
+ drm_printf(p, "\ttype: [%d] %s\n",
+ bridge->type,
+ drm_get_connector_type_name(bridge->type));
+
+ if (bridge->of_node)
+ drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
+
+ drm_printf(p, "\tops: [0x%x]", bridge->ops);
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ drm_puts(p, " detect");
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ drm_puts(p, " edid");
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ drm_puts(p, " hpd");
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ drm_puts(p, " modes");
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI)
+ drm_puts(p, " hdmi");
+ drm_puts(p, "\n");
+}
+
+static int allbridges_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ mutex_lock(&bridge_lock);
+
+ list_for_each_entry(bridge, &bridge_list, list)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+
+ mutex_unlock(&bridge_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(allbridges);
+
+static int encoder_bridges_show(struct seq_file *m, void *data)
+{
+ struct drm_encoder *encoder = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ drm_for_each_bridge_in_chain(encoder, bridge)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
+
+void drm_bridge_debugfs_params(struct dentry *root)
+{
+ debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
+}
+
+void drm_bridge_debugfs_encoder_params(struct dentry *root,
+ struct drm_encoder *encoder)
+{
+ /* bridges list */
+ debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
+}
+
MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
MODULE_DESCRIPTION("DRM bridge infrastructure");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_bridge_helper.c b/drivers/gpu/drm/drm_bridge_helper.c
new file mode 100644
index 000000000000..af80d2496194
--- /dev/null
+++ b/drivers/gpu/drm/drm_bridge_helper.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_helper.h>
+#include <drm/drm_modeset_lock.h>
+
+/**
+ * drm_bridge_helper_reset_crtc - Reset the pipeline feeding a bridge
+ * @bridge: DRM bridge to reset
+ * @ctx: lock acquisition context
+ *
+ * Reset a @bridge pipeline. It will power-cycle all active components
+ * between the CRTC and connector that bridge is connected to.
+ *
+ * As it relies on drm_atomic_helper_reset_crtc(), the same limitations
+ * apply.
+ *
+ * Returns:
+ *
+ * 0 on success or a negative error code on failure. If the error
+ * returned is EDEADLK, the whole atomic sequence must be restarted.
+ */
+int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_connector *connector;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc;
+ int ret;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ret;
+
+ connector = drm_atomic_get_connector_for_encoder(encoder, ctx);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ goto out;
+ }
+
+ if (!connector->state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ crtc = connector->state->crtc;
+ ret = drm_atomic_helper_reset_crtc(crtc, ctx);
+ if (ret)
+ goto out;
+
+out:
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_bridge_helper_reset_crtc);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 549b28a5918c..f1de7faf9fb4 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(drm_client_release);
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
if (buffer->gem) {
- drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
+ drm_gem_vunmap(buffer->gem, &buffer->map);
drm_gem_object_put(buffer->gem);
}
@@ -252,7 +252,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
drm_gem_lock(gem);
- ret = drm_gem_vmap(gem, map);
+ ret = drm_gem_vmap_locked(gem, map);
if (ret)
goto err_drm_gem_vmap_unlocked;
*map_copy = *map;
@@ -278,7 +278,7 @@ void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer)
struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
- drm_gem_vunmap(gem, map);
+ drm_gem_vunmap_locked(gem, map);
drm_gem_unlock(gem);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
@@ -316,7 +316,7 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer,
ret = drm_gem_pin_locked(gem);
if (ret)
goto err_drm_gem_pin_locked;
- ret = drm_gem_vmap(gem, map);
+ ret = drm_gem_vmap_locked(gem, map);
if (ret)
goto err_drm_gem_vmap;
@@ -348,7 +348,7 @@ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
struct iosys_map *map = &buffer->map;
drm_gem_lock(gem);
- drm_gem_vunmap(gem, map);
+ drm_gem_vunmap_locked(gem, map);
drm_gem_unpin_locked(gem);
drm_gem_unlock(gem);
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index aca442c25209..0f9d5ba36c81 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -39,7 +39,7 @@ int drm_client_modeset_create(struct drm_client_dev *client)
unsigned int max_connector_count = 1;
struct drm_mode_set *modeset;
struct drm_crtc *crtc;
- unsigned int i = 0;
+ int i = 0;
/* Add terminating zero entry to enable index less iteration */
client->modesets = kcalloc(num_crtc + 1, sizeof(*client->modesets), GFP_KERNEL);
@@ -73,9 +73,10 @@ err_free:
static void drm_client_modeset_release(struct drm_client_dev *client)
{
struct drm_mode_set *modeset;
- unsigned int i;
drm_client_for_each_modeset(modeset, client) {
+ int i;
+
drm_mode_destroy(client->dev, modeset->mode);
modeset->mode = NULL;
modeset->fb = NULL;
@@ -117,10 +118,10 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_get_tiled_mode(struct drm_connector *connector)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
@@ -130,10 +131,10 @@ drm_connector_get_tiled_mode(struct drm_connector *connector)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
@@ -144,10 +145,10 @@ drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_preferred_mode(struct drm_connector *connector, int width, int height)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay > width ||
@@ -159,16 +160,18 @@ drm_connector_preferred_mode(struct drm_connector *connector, int width, int hei
return NULL;
}
-static struct drm_display_mode *drm_connector_first_mode(struct drm_connector *connector)
+static const struct drm_display_mode *
+drm_connector_first_mode(struct drm_connector *connector)
{
return list_first_entry_or_null(&connector->modes,
struct drm_display_mode, head);
}
-static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector)
+static const struct drm_display_mode *
+drm_connector_pick_cmdline_mode(struct drm_connector *connector)
{
- struct drm_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode;
+ const struct drm_cmdline_mode *cmdline_mode;
+ const struct drm_display_mode *mode;
bool prefer_non_interlace;
/*
@@ -237,9 +240,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
return enable;
}
-static void drm_client_connectors_enabled(struct drm_connector **connectors,
+static void drm_client_connectors_enabled(struct drm_connector *connectors[],
unsigned int connector_count,
- bool *enabled)
+ bool enabled[])
{
bool any_enabled = false;
struct drm_connector *connector;
@@ -263,16 +266,35 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors,
enabled[i] = drm_connector_enabled(connectors[i], false);
}
+static void mode_replace(struct drm_device *dev,
+ const struct drm_display_mode **dst,
+ const struct drm_display_mode *src)
+{
+ drm_mode_destroy(dev, (struct drm_display_mode *)*dst);
+
+ *dst = src ? drm_mode_duplicate(dev, src) : NULL;
+}
+
+static void modes_destroy(struct drm_device *dev,
+ const struct drm_display_mode *modes[],
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ mode_replace(dev, &modes[i], NULL);
+}
+
static bool drm_client_target_cloned(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
- int count, i, j;
+ int count, i;
bool can_clone = false;
- struct drm_display_mode *dmt_mode, *mode;
+ struct drm_display_mode *dmt_mode;
/* only contemplate cloning in the single crtc case */
if (dev->mode_config.num_crtc > 1)
@@ -291,9 +313,13 @@ static bool drm_client_target_cloned(struct drm_device *dev,
/* check the command line or if nothing common pick 1024x768 */
can_clone = true;
for (i = 0; i < connector_count; i++) {
+ int j;
+
if (!enabled[i])
continue;
- modes[i] = drm_connector_pick_cmdline_mode(connectors[i]);
+
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connectors[i]));
if (!modes[i]) {
can_clone = false;
break;
@@ -323,6 +349,8 @@ static bool drm_client_target_cloned(struct drm_device *dev,
goto fail;
for (i = 0; i < connector_count; i++) {
+ const struct drm_display_mode *mode;
+
if (!enabled[i])
continue;
@@ -332,7 +360,7 @@ static bool drm_client_target_cloned(struct drm_device *dev,
DRM_MODE_MATCH_CLOCK |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS))
- modes[i] = mode;
+ mode_replace(dev, &modes[i], mode);
}
if (!modes[i])
can_clone = false;
@@ -349,19 +377,19 @@ fail:
}
static int drm_client_get_tile_offsets(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
int idx,
int h_idx, int v_idx)
{
- struct drm_connector *connector;
int i;
int hoffset = 0, voffset = 0;
for (i = 0; i < connector_count; i++) {
- connector = connectors[i];
+ struct drm_connector *connector = connectors[i];
+
if (!connector->has_tile)
continue;
@@ -384,14 +412,13 @@ static int drm_client_get_tile_offsets(struct drm_device *dev,
}
static bool drm_client_target_preferred(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
const u64 mask = BIT_ULL(connector_count) - 1;
- struct drm_connector *connector;
u64 conn_configured = 0;
int tile_pass = 0;
int num_tiled_conns = 0;
@@ -405,7 +432,9 @@ static bool drm_client_target_preferred(struct drm_device *dev,
retry:
for (i = 0; i < connector_count; i++) {
- connector = connectors[i];
+ struct drm_connector *connector = connectors[i];
+ const char *mode_type;
+
if (conn_configured & BIT_ULL(i))
continue;
@@ -438,20 +467,23 @@ retry:
modes, offsets, i,
connector->tile_h_loc, connector->tile_v_loc);
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
- connector->base.id, connector->name);
- /* got for command line mode first */
- modes[i] = drm_connector_pick_cmdline_mode(connector);
+ mode_type = "cmdline";
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connector));
+
if (!modes[i]) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n",
- connector->base.id, connector->name,
- connector->tile_group ? connector->tile_group->id : 0);
- modes[i] = drm_connector_preferred_mode(connector, width, height);
+ mode_type = "preferred";
+ mode_replace(dev, &modes[i],
+ drm_connector_preferred_mode(connector, width, height));
}
- /* No preferred modes, pick one off the list */
- if (!modes[i])
- modes[i] = drm_connector_first_mode(connector);
+
+ if (!modes[i]) {
+ mode_type = "first";
+ mode_replace(dev, &modes[i],
+ drm_connector_first_mode(connector));
+ }
+
/*
* In case of tiled mode if all tiles not present fallback to
* first available non tiled mode.
@@ -466,18 +498,24 @@ retry:
(connector->tile_h_loc == 0 &&
connector->tile_v_loc == 0 &&
!drm_connector_get_tiled_mode(connector))) {
- drm_dbg_kms(dev,
- "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ mode_type = "non tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_fallback_non_tiled_mode(connector));
} else {
- modes[i] = drm_connector_get_tiled_mode(connector);
+ mode_type = "tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_get_tiled_mode(connector));
}
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Found mode %s\n",
- connector->base.id, connector->name,
- modes[i] ? modes[i]->name : "none");
+ if (modes[i])
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] found %s mode: %s\n",
+ connector->base.id, connector->name,
+ mode_type, modes[i]->name);
+ else
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] no mode found\n",
+ connector->base.id, connector->name);
+
conn_configured |= BIT_ULL(i);
}
@@ -502,18 +540,17 @@ static bool connector_has_possible_crtc(struct drm_connector *connector,
}
static int drm_client_pick_crtcs(struct drm_client_dev *client,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_crtc **best_crtcs,
- struct drm_display_mode **modes,
+ struct drm_crtc *best_crtcs[],
+ const struct drm_display_mode *modes[],
int n, int width, int height)
{
struct drm_device *dev = client->dev;
struct drm_connector *connector;
int my_score, best_score, score;
- struct drm_crtc **crtcs, *crtc;
+ struct drm_crtc **crtcs;
struct drm_mode_set *modeset;
- int o;
if (n == connector_count)
return 0;
@@ -543,7 +580,8 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client,
* remaining connectors
*/
drm_client_for_each_modeset(modeset, client) {
- crtc = modeset->crtc;
+ struct drm_crtc *crtc = modeset->crtc;
+ int o;
if (!connector_has_possible_crtc(connector, crtc))
continue;
@@ -577,17 +615,17 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client,
/* Try to read the BIOS display configuration and use it for the initial config */
static bool drm_client_firmware_config(struct drm_client_dev *client,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_crtc **crtcs,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ struct drm_crtc *crtcs[],
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
const int count = min_t(unsigned int, connector_count, BITS_PER_LONG);
unsigned long conn_configured, conn_seq, mask;
struct drm_device *dev = client->dev;
- int i, j;
+ int i;
bool *save_enabled;
bool fallback = true, ret = true;
int num_connectors_enabled = 0;
@@ -621,11 +659,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
- struct drm_connector *connector;
+ struct drm_connector *connector = connectors[i];
struct drm_encoder *encoder;
- struct drm_crtc *new_crtc;
-
- connector = connectors[i];
+ struct drm_crtc *crtc;
+ const char *mode_type;
+ int j;
if (conn_configured & BIT(i))
continue;
@@ -664,7 +702,7 @@ retry:
num_connectors_enabled++;
- new_crtc = connector->state->crtc;
+ crtc = connector->state->crtc;
/*
* Make sure we're not trying to drive multiple connectors
@@ -672,69 +710,52 @@ retry:
* match the BIOS.
*/
for (j = 0; j < count; j++) {
- if (crtcs[j] == new_crtc) {
- drm_dbg_kms(dev, "fallback: cloned configuration\n");
+ if (crtcs[j] == crtc) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] fallback: cloned configuration\n",
+ connector->base.id, connector->name);
goto bail;
}
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
- connector->base.id, connector->name);
-
- /* go for command line mode first */
- modes[i] = drm_connector_pick_cmdline_mode(connector);
+ mode_type = "cmdline";
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connector));
- /* try for preferred next */
if (!modes[i]) {
- drm_dbg_kms(dev,
- "[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n",
- connector->base.id, connector->name,
- str_yes_no(connector->has_tile));
- modes[i] = drm_connector_preferred_mode(connector, width, height);
+ mode_type = "preferred";
+ mode_replace(dev, &modes[i],
+ drm_connector_preferred_mode(connector, width, height));
}
- /* No preferred mode marked by the EDID? Are there any modes? */
- if (!modes[i] && !list_empty(&connector->modes)) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_first_mode(connector);
+ if (!modes[i]) {
+ mode_type = "first";
+ mode_replace(dev, &modes[i],
+ drm_connector_first_mode(connector));
}
/* last resort: use current mode */
if (!modes[i]) {
- /*
- * IMPORTANT: We want to use the adjusted mode (i.e.
- * after the panel fitter upscaling) as the initial
- * config, not the input mode, which is what crtc->mode
- * usually contains. But since our current
- * code puts a mode derived from the post-pfit timings
- * into crtc->mode this works out correctly.
- *
- * This is crtc->mode and not crtc->state->mode for the
- * fastboot check to work correctly.
- */
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for current mode\n",
- connector->base.id, connector->name);
- modes[i] = &connector->state->crtc->mode;
+ mode_type = "current";
+ mode_replace(dev, &modes[i],
+ &crtc->state->mode);
}
+
/*
* In case of tiled modes, if all tiles are not present
* then fallback to a non tiled mode.
*/
if (connector->has_tile &&
num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ mode_type = "non tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_fallback_non_tiled_mode(connector));
}
- crtcs[i] = new_crtc;
+ crtcs[i] = crtc;
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] on [CRTC:%d:%s]: %dx%d%s\n",
+ drm_dbg_kms(dev, "[CONNECTOR::%d:%s] on [CRTC:%d:%s] using %s mode: %s\n",
connector->base.id, connector->name,
- connector->state->crtc->base.id,
- connector->state->crtc->name,
- modes[i]->hdisplay, modes[i]->vdisplay,
- modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
+ crtc->base.id, crtc->name,
+ mode_type, modes[i]->name);
fallback = false;
conn_configured |= BIT(i);
@@ -799,8 +820,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
- /* points to modes protected by mode_config.mutex */
- struct drm_display_mode **modes;
+ const struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
bool *enabled;
@@ -851,7 +871,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
if (!drm_client_firmware_config(client, connectors, connector_count, crtcs,
modes, offsets, enabled, width, height)) {
- memset(modes, 0, connector_count * sizeof(*modes));
+ modes_destroy(dev, modes, connector_count);
memset(crtcs, 0, connector_count * sizeof(*crtcs));
memset(offsets, 0, connector_count * sizeof(*offsets));
@@ -868,10 +888,12 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
crtcs, modes, 0, width, height);
}
+ mutex_unlock(&dev->mode_config.mutex);
+
drm_client_modeset_release(client);
for (i = 0; i < connector_count; i++) {
- struct drm_display_mode *mode = modes[i];
+ const struct drm_display_mode *mode = modes[i];
struct drm_crtc *crtc = crtcs[i];
struct drm_client_offset *offset = &offsets[i];
@@ -902,11 +924,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
- mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:
kfree(crtcs);
+ modes_destroy(dev, modes, connector_count);
kfree(modes);
kfree(offsets);
kfree(enabled);
@@ -938,7 +960,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
struct drm_plane *plane = modeset->crtc->primary;
struct drm_cmdline_mode *cmdline;
u64 valid_mask = 0;
- unsigned int i;
+ int i;
if (!modeset->num_connectors)
return false;
@@ -1219,11 +1241,12 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp
struct drm_connector *connector;
struct drm_mode_set *modeset;
struct drm_modeset_acquire_ctx ctx;
- int j;
int ret;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
drm_client_for_each_modeset(modeset, client) {
+ int j;
+
if (!modeset->crtc->enabled)
continue;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 0955f1c385dd..39497493f74c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -334,7 +334,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- encoder_funcs = encoder->helper_private;
if (encoder_funcs->mode_fixup) {
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 6b2178864c7e..3dfd8b34dceb 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -740,40 +740,6 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
crtc->debugfs_entry = NULL;
}
-static int bridges_show(struct seq_file *m, void *data)
-{
- struct drm_encoder *encoder = m->private;
- struct drm_printer p = drm_seq_file_printer(m);
- struct drm_bridge *bridge;
- unsigned int idx = 0;
-
- drm_for_each_bridge_in_chain(encoder, bridge) {
- drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs);
- drm_printf(&p, "\ttype: [%d] %s\n",
- bridge->type,
- drm_get_connector_type_name(bridge->type));
-
- if (bridge->of_node)
- drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node);
-
- drm_printf(&p, "\tops: [0x%x]", bridge->ops);
- if (bridge->ops & DRM_BRIDGE_OP_DETECT)
- drm_puts(&p, " detect");
- if (bridge->ops & DRM_BRIDGE_OP_EDID)
- drm_puts(&p, " edid");
- if (bridge->ops & DRM_BRIDGE_OP_HPD)
- drm_puts(&p, " hpd");
- if (bridge->ops & DRM_BRIDGE_OP_MODES)
- drm_puts(&p, " modes");
- if (bridge->ops & DRM_BRIDGE_OP_HDMI)
- drm_puts(&p, " hdmi");
- drm_puts(&p, "\n");
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(bridges);
-
void drm_debugfs_encoder_add(struct drm_encoder *encoder)
{
struct drm_minor *minor = encoder->dev->primary;
@@ -789,9 +755,7 @@ void drm_debugfs_encoder_add(struct drm_encoder *encoder)
encoder->debugfs_entry = root;
- /* bridges list */
- debugfs_create_file("bridges", 0444, root, encoder,
- &bridges_fops);
+ drm_bridge_debugfs_encoder_params(root, encoder);
if (encoder->funcs && encoder->funcs->debugfs_init)
encoder->funcs->debugfs_init(encoder, root);
diff --git a/drivers/gpu/drm/drm_displayid_internal.h b/drivers/gpu/drm/drm_displayid_internal.h
index aee1b86a73c1..957dd0619f5c 100644
--- a/drivers/gpu/drm/drm_displayid_internal.h
+++ b/drivers/gpu/drm/drm_displayid_internal.h
@@ -66,6 +66,7 @@ struct drm_edid;
#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27
#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28
#define DATA_BLOCK_2_CONTAINER_ID 0x29
+#define DATA_BLOCK_2_TYPE_10_FORMULA_TIMING 0x2a
#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e
#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81
@@ -114,20 +115,32 @@ struct displayid_tiled_block {
struct displayid_detailed_timings_1 {
u8 pixel_clock[3];
u8 flags;
- u8 hactive[2];
- u8 hblank[2];
- u8 hsync[2];
- u8 hsw[2];
- u8 vactive[2];
- u8 vblank[2];
- u8 vsync[2];
- u8 vsw[2];
+ __le16 hactive;
+ __le16 hblank;
+ __le16 hsync;
+ __le16 hsw;
+ __le16 vactive;
+ __le16 vblank;
+ __le16 vsync;
+ __le16 vsw;
} __packed;
struct displayid_detailed_timing_block {
struct displayid_block base;
struct displayid_detailed_timings_1 timings[];
-};
+} __packed;
+
+struct displayid_formula_timings_9 {
+ u8 flags;
+ __le16 hactive;
+ __le16 vactive;
+ u8 vrefresh;
+} __packed;
+
+struct displayid_formula_timing_block {
+ struct displayid_block base;
+ struct displayid_formula_timings_9 timings[];
+} __packed;
#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0)
#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5)
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
index 385eb5e10047..9dc0408fbbea 100644
--- a/drivers/gpu/drm/drm_draw.c
+++ b/drivers/gpu/drm/drm_draw.c
@@ -13,85 +13,7 @@
#include <drm/drm_fourcc.h>
#include "drm_draw_internal.h"
-
-/*
- * Conversions from xrgb8888
- */
-
-static u16 convert_xrgb8888_to_rgb565(u32 pix)
-{
- return ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_rgba5551(u32 pix)
-{
- return ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
-}
-
-static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
-{
- return ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_argb1555(u32 pix)
-{
- return BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u32 convert_xrgb8888_to_argb8888(u32 pix)
-{
- return pix | GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
-}
-
-static u32 convert_xrgb8888_to_abgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_argb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
-{
- pix = ((pix & 0x00FF0000) >> 14) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x000000FF) << 22);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
+#include "drm_format_internal.h"
/**
* drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
@@ -106,28 +28,28 @@ u32 drm_draw_color_from_xrgb8888(u32 color, u32 format)
{
switch (format) {
case DRM_FORMAT_RGB565:
- return convert_xrgb8888_to_rgb565(color);
+ return drm_pixel_xrgb8888_to_rgb565(color);
case DRM_FORMAT_RGBA5551:
- return convert_xrgb8888_to_rgba5551(color);
+ return drm_pixel_xrgb8888_to_rgba5551(color);
case DRM_FORMAT_XRGB1555:
- return convert_xrgb8888_to_xrgb1555(color);
+ return drm_pixel_xrgb8888_to_xrgb1555(color);
case DRM_FORMAT_ARGB1555:
- return convert_xrgb8888_to_argb1555(color);
+ return drm_pixel_xrgb8888_to_argb1555(color);
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
return color;
case DRM_FORMAT_ARGB8888:
- return convert_xrgb8888_to_argb8888(color);
+ return drm_pixel_xrgb8888_to_argb8888(color);
case DRM_FORMAT_XBGR8888:
- return convert_xrgb8888_to_xbgr8888(color);
+ return drm_pixel_xrgb8888_to_xbgr8888(color);
case DRM_FORMAT_ABGR8888:
- return convert_xrgb8888_to_abgr8888(color);
+ return drm_pixel_xrgb8888_to_abgr8888(color);
case DRM_FORMAT_XRGB2101010:
- return convert_xrgb8888_to_xrgb2101010(color);
+ return drm_pixel_xrgb8888_to_xrgb2101010(color);
case DRM_FORMAT_ARGB2101010:
- return convert_xrgb8888_to_argb2101010(color);
+ return drm_pixel_xrgb8888_to_argb2101010(color);
case DRM_FORMAT_ABGR2101010:
- return convert_xrgb8888_to_abgr2101010(color);
+ return drm_pixel_xrgb8888_to_abgr2101010(color);
default:
WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
return 0;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 17fc5dc708f4..56dd61f8e05a 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -40,6 +40,7 @@
#include <linux/xarray.h>
#include <drm/drm_accel.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_cache.h>
#include <drm/drm_client_event.h>
#include <drm/drm_color_mgmt.h>
@@ -500,6 +501,25 @@ void drm_dev_unplug(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unplug);
+/**
+ * drm_dev_set_dma_dev - set the DMA device for a DRM device
+ * @dev: DRM device
+ * @dma_dev: DMA device or NULL
+ *
+ * Sets the DMA device of the given DRM device. Only required if
+ * the DMA device is different from the DRM device's parent. After
+ * calling this function, the DRM device holds a reference on
+ * @dma_dev. Pass NULL to clear the DMA device.
+ */
+void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev)
+{
+ dma_dev = get_device(dma_dev);
+
+ put_device(dev->dma_dev);
+ dev->dma_dev = dma_dev;
+}
+EXPORT_SYMBOL(drm_dev_set_dma_dev);
+
/*
* Available recovery methods for wedged device. To be sent along with device
* wedged uevent.
@@ -549,7 +569,7 @@ int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
if (drm_WARN_ONCE(dev, !recovery, "invalid recovery method %u\n", opt))
break;
- len += scnprintf(event_string + len, sizeof(event_string), "%s,", recovery);
+ len += scnprintf(event_string + len, sizeof(event_string) - len, "%s,", recovery);
}
if (recovery)
@@ -654,6 +674,8 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
{
drm_fs_inode_free(dev->anon_inode);
+ put_device(dev->dma_dev);
+ dev->dma_dev = NULL;
put_device(dev->dev);
/* Prevent use-after-free in drm_managed_release when debugging is
* enabled. Slightly awkward, but can't really be helped. */
@@ -808,36 +830,62 @@ void *__devm_drm_dev_alloc(struct device *parent,
EXPORT_SYMBOL(__devm_drm_dev_alloc);
/**
- * drm_dev_alloc - Allocate new DRM device
- * @driver: DRM driver to allocate device for
+ * __drm_dev_alloc - Allocation of a &drm_device instance
* @parent: Parent device object
+ * @driver: DRM driver
+ * @size: the size of the struct which contains struct drm_device
+ * @offset: the offset of the &drm_device within the container.
*
- * This is the deprecated version of devm_drm_dev_alloc(), which does not support
- * subclassing through embedding the struct &drm_device in a driver private
- * structure, and which does not support automatic cleanup through devres.
+ * This should *NOT* be by any drivers, but is a dedicated interface for the
+ * corresponding Rust abstraction.
*
- * RETURNS:
- * Pointer to new DRM device, or ERR_PTR on failure.
+ * This is the same as devm_drm_dev_alloc(), but without the corresponding
+ * resource management through the parent device, but not the same as
+ * drm_dev_alloc(), since the latter is the deprecated version, which does not
+ * support subclassing.
+ *
+ * Returns: A pointer to new DRM device, or an ERR_PTR on failure.
*/
-struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
- struct device *parent)
+void *__drm_dev_alloc(struct device *parent,
+ const struct drm_driver *driver,
+ size_t size, size_t offset)
{
- struct drm_device *dev;
+ void *container;
+ struct drm_device *drm;
int ret;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
return ERR_PTR(-ENOMEM);
- ret = drm_dev_init(dev, driver, parent);
+ drm = container + offset;
+ ret = drm_dev_init(drm, driver, parent);
if (ret) {
- kfree(dev);
+ kfree(container);
return ERR_PTR(ret);
}
+ drmm_add_final_kfree(drm, container);
- drmm_add_final_kfree(dev, dev);
+ return container;
+}
+EXPORT_SYMBOL(__drm_dev_alloc);
- return dev;
+/**
+ * drm_dev_alloc - Allocate new DRM device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * This is the deprecated version of devm_drm_dev_alloc(), which does not support
+ * subclassing through embedding the struct &drm_device in a driver private
+ * structure, and which does not support automatic cleanup through devres.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or ERR_PTR on failure.
+ */
+struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
+ struct device *parent)
+{
+ return __drm_dev_alloc(parent, driver, sizeof(struct drm_device), 0);
}
EXPORT_SYMBOL(drm_dev_alloc);
@@ -1188,6 +1236,7 @@ static int __init drm_core_init(void)
}
drm_debugfs_root = debugfs_create_dir("dri", NULL);
+ drm_bridge_debugfs_params(drm_debugfs_root);
ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
if (ret < 0)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 13bc4c290b17..74e77742b2bd 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -6596,6 +6596,7 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
+ memset(&connector->hdr_sink_metadata, 0, sizeof(connector->hdr_sink_metadata));
info->edid_hdmi_rgb444_dc_modes = 0;
info->edid_hdmi_ycbcr444_dc_modes = 0;
@@ -6760,23 +6761,23 @@ out:
}
static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev,
- struct displayid_detailed_timings_1 *timings,
+ const struct displayid_detailed_timings_1 *timings,
bool type_7)
{
struct drm_display_mode *mode;
- unsigned pixel_clock = (timings->pixel_clock[0] |
- (timings->pixel_clock[1] << 8) |
- (timings->pixel_clock[2] << 16)) + 1;
- unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
- unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
- unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
- unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1;
- unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1;
- unsigned vblank = (timings->vblank[0] | timings->vblank[1] << 8) + 1;
- unsigned vsync = (timings->vsync[0] | (timings->vsync[1] & 0x7f) << 8) + 1;
- unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1;
- bool hsync_positive = (timings->hsync[1] >> 7) & 0x1;
- bool vsync_positive = (timings->vsync[1] >> 7) & 0x1;
+ unsigned int pixel_clock = (timings->pixel_clock[0] |
+ (timings->pixel_clock[1] << 8) |
+ (timings->pixel_clock[2] << 16)) + 1;
+ unsigned int hactive = le16_to_cpu(timings->hactive) + 1;
+ unsigned int hblank = le16_to_cpu(timings->hblank) + 1;
+ unsigned int hsync = (le16_to_cpu(timings->hsync) & 0x7fff) + 1;
+ unsigned int hsync_width = le16_to_cpu(timings->hsw) + 1;
+ unsigned int vactive = le16_to_cpu(timings->vactive) + 1;
+ unsigned int vblank = le16_to_cpu(timings->vblank) + 1;
+ unsigned int vsync = (le16_to_cpu(timings->vsync) & 0x7fff) + 1;
+ unsigned int vsync_width = le16_to_cpu(timings->vsw) + 1;
+ bool hsync_positive = le16_to_cpu(timings->hsync) & (1 << 15);
+ bool vsync_positive = le16_to_cpu(timings->vsync) & (1 << 15);
mode = drm_mode_create(dev);
if (!mode)
@@ -6833,6 +6834,66 @@ static int add_displayid_detailed_1_modes(struct drm_connector *connector,
return num_modes;
}
+static struct drm_display_mode *drm_mode_displayid_formula(struct drm_device *dev,
+ const struct displayid_formula_timings_9 *timings,
+ bool type_10)
+{
+ struct drm_display_mode *mode;
+ u16 hactive = le16_to_cpu(timings->hactive) + 1;
+ u16 vactive = le16_to_cpu(timings->vactive) + 1;
+ u8 timing_formula = timings->flags & 0x7;
+
+ /* TODO: support RB-v2 & RB-v3 */
+ if (timing_formula > 1)
+ return NULL;
+
+ /* TODO: support video-optimized refresh rate */
+ if (timings->flags & (1 << 4))
+ drm_dbg_kms(dev, "Fractional vrefresh is not implemented, proceeding with non-video-optimized refresh rate");
+
+ mode = drm_cvt_mode(dev, hactive, vactive, timings->vrefresh + 1, timing_formula == 1, false, false);
+ if (!mode)
+ return NULL;
+
+ /* TODO: interpret S3D flags */
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
+
+ return mode;
+}
+
+static int add_displayid_formula_modes(struct drm_connector *connector,
+ const struct displayid_block *block)
+{
+ const struct displayid_formula_timing_block *formula_block = (struct displayid_formula_timing_block *)block;
+ int num_timings;
+ struct drm_display_mode *newmode;
+ int num_modes = 0;
+ bool type_10 = block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING;
+ int timing_size = 6 + ((formula_block->base.rev & 0x70) >> 4);
+
+ /* extended blocks are not supported yet */
+ if (timing_size != 6)
+ return 0;
+
+ if (block->num_bytes % timing_size)
+ return 0;
+
+ num_timings = block->num_bytes / timing_size;
+ for (int i = 0; i < num_timings; i++) {
+ const struct displayid_formula_timings_9 *timings = &formula_block->timings[i];
+
+ newmode = drm_mode_displayid_formula(connector->dev, timings, type_10);
+ if (!newmode)
+ continue;
+
+ drm_mode_probed_add(connector, newmode);
+ num_modes++;
+ }
+ return num_modes;
+}
+
static int add_displayid_detailed_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
@@ -6845,6 +6906,9 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
if (block->tag == DATA_BLOCK_TYPE_1_DETAILED_TIMING ||
block->tag == DATA_BLOCK_2_TYPE_7_DETAILED_TIMING)
num_modes += add_displayid_detailed_1_modes(connector, block);
+ else if (block->tag == DATA_BLOCK_2_TYPE_9_FORMULA_TIMING ||
+ block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING)
+ num_modes += add_displayid_formula_modes(connector, block);
}
displayid_iter_end(&iter);
@@ -7099,18 +7163,12 @@ EXPORT_SYMBOL(drm_add_edid_modes);
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay)
+ unsigned int hdisplay, unsigned int vdisplay)
{
- int i, count, num_modes = 0;
+ int i, count = ARRAY_SIZE(drm_dmt_modes), num_modes = 0;
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
- count = ARRAY_SIZE(drm_dmt_modes);
- if (hdisplay < 0)
- hdisplay = 0;
- if (vdisplay < 0)
- vdisplay = 0;
-
for (i = 0; i < count; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index c299cd94d3f7..246cf845e2c9 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -964,6 +964,10 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
struct drm_file *file = f->private_data;
struct drm_device *dev = file->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name);
drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id);
@@ -983,10 +987,46 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
if (dev->driver->show_fdinfo)
dev->driver->show_fdinfo(&p, file);
+
+ drm_dev_exit(idx);
}
EXPORT_SYMBOL(drm_show_fdinfo);
/**
+ * drm_file_err - log process name, pid and client_name associated with a drm_file
+ * @file_priv: context of interest for process name and pid
+ * @fmt: printf() like format string
+ *
+ * Helper function for clients which needs to log process details such
+ * as name and pid etc along with user logs.
+ */
+void drm_file_err(struct drm_file *file_priv, const char *fmt, ...)
+{
+ va_list args;
+ struct va_format vaf;
+ struct pid *pid;
+ struct task_struct *task;
+ struct drm_device *dev = file_priv->minor->dev;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ mutex_lock(&file_priv->client_name_lock);
+ rcu_read_lock();
+ pid = rcu_dereference(file_priv->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+
+ drm_err(dev, "comm: %s pid: %d client: %s ... %pV", task ? task->comm : "Unset",
+ task ? task->pid : 0, file_priv->client_name ?: "Unset", &vaf);
+
+ va_end(args);
+ rcu_read_unlock();
+ mutex_unlock(&file_priv->client_name_lock);
+}
+EXPORT_SYMBOL(drm_file_err);
+
+/**
* mock_drm_getfile - Create a new struct file for the drm device
* @minor: drm minor to wrap (e.g. #drm_device.primary)
* @flags: file creation mode (O_RDWR etc)
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 01d3ab307ac3..d36e6cacc575 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -20,6 +20,8 @@
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
+#include "drm_format_internal.h"
+
/**
* drm_format_conv_state_init - Initialize format-conversion state
* @state: The state to initialize
@@ -244,6 +246,152 @@ static int drm_fb_xfrm(struct iosys_map *dst,
xfrm_line);
}
+#define ALIGN_DOWN_PIXELS(end, n, a) \
+ ((end) - ((n) & ((a) - 1)))
+
+static __always_inline void drm_fb_xfrm_line_32to8(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ u8 *dbuf8;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ /* write 4 pixels at once */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 pix[4] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ le32_to_cpup(sbuf32 + 2),
+ le32_to_cpup(sbuf32 + 3),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u32 val32 = xfrm_pixel(pix[0]) |
+ (xfrm_pixel(pix[1]) << 8) |
+ (xfrm_pixel(pix[2]) << 16) |
+ (xfrm_pixel(pix[3]) << 24);
+ *dbuf32++ = cpu_to_le32(val32);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+
+ /* write trailing pixels */
+ dbuf8 = (u8 __force *)dbuf32;
+ while (sbuf32 < send32)
+ *dbuf8++ = xfrm_pixel(le32_to_cpup(sbuf32++));
+}
+
+static __always_inline void drm_fb_xfrm_line_32to16(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le64 *dbuf64 = dbuf;
+ __le32 *dbuf32;
+ __le16 *dbuf16;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+#if defined(CONFIG_64BIT)
+ /* write 4 pixels at once */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 pix[4] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ le32_to_cpup(sbuf32 + 2),
+ le32_to_cpup(sbuf32 + 3),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u64 val64 = ((u64)xfrm_pixel(pix[0])) |
+ ((u64)xfrm_pixel(pix[1]) << 16) |
+ ((u64)xfrm_pixel(pix[2]) << 32) |
+ ((u64)xfrm_pixel(pix[3]) << 48);
+ *dbuf64++ = cpu_to_le64(val64);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+#endif
+
+ /* write 2 pixels at once */
+ dbuf32 = (__le32 __force *)dbuf64;
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 2)) {
+ u32 pix[2] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u32 val32 = xfrm_pixel(pix[0]) |
+ (xfrm_pixel(pix[1]) << 16);
+ *dbuf32++ = cpu_to_le32(val32);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+
+ /* write trailing pixel */
+ dbuf16 = (__le16 __force *)dbuf32;
+ while (sbuf32 < send32)
+ *dbuf16++ = cpu_to_le16(xfrm_pixel(le32_to_cpup(sbuf32++)));
+}
+
+static __always_inline void drm_fb_xfrm_line_32to24(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ u8 *dbuf8;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ /* write pixels in chunks of 4 */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 val24[4] = {
+ xfrm_pixel(le32_to_cpup(sbuf32)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 1)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 2)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 3)),
+ };
+ u32 out32[3] = {
+ /* write output bytes in reverse order for little endianness */
+ ((val24[0] & 0x000000ff)) |
+ ((val24[0] & 0x0000ff00)) |
+ ((val24[0] & 0x00ff0000)) |
+ ((val24[1] & 0x000000ff) << 24),
+ ((val24[1] & 0x0000ff00) >> 8) |
+ ((val24[1] & 0x00ff0000) >> 8) |
+ ((val24[2] & 0x000000ff) << 16) |
+ ((val24[2] & 0x0000ff00) << 16),
+ ((val24[2] & 0x00ff0000) >> 16) |
+ ((val24[3] & 0x000000ff) << 8) |
+ ((val24[3] & 0x0000ff00) << 8) |
+ ((val24[3] & 0x00ff0000) << 8),
+ };
+
+ *dbuf32++ = cpu_to_le32(out32[0]);
+ *dbuf32++ = cpu_to_le32(out32[1]);
+ *dbuf32++ = cpu_to_le32(out32[2]);
+ sbuf32 += ARRAY_SIZE(val24);
+ }
+
+ /* write trailing pixel */
+ dbuf8 = (u8 __force *)dbuf32;
+ while (sbuf32 < send32) {
+ u32 val24 = xfrm_pixel(le32_to_cpup(sbuf32++));
+ /* write output in reverse order for little endianness */
+ *dbuf8++ = (val24 & 0x000000ff);
+ *dbuf8++ = (val24 & 0x0000ff00) >> 8;
+ *dbuf8++ = (val24 & 0x00ff0000) >> 16;
+ }
+}
+
+static __always_inline void drm_fb_xfrm_line_32to32(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ while (sbuf32 < send32)
+ *dbuf32++ = cpu_to_le32(xfrm_pixel(le32_to_cpup(sbuf32++)));
+}
+
/**
* drm_fb_memcpy - Copy clip buffer
* @dst: Array of destination buffers
@@ -368,17 +516,7 @@ EXPORT_SYMBOL(drm_fb_swab);
static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- dbuf8[x] = ((pix & 0x00e00000) >> 16) |
- ((pix & 0x0000e000) >> 11) |
- ((pix & 0x000000c0) >> 6);
- }
+ drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb332);
}
/**
@@ -417,38 +555,19 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565);
+}
+
+static __always_inline u32 drm_xrgb8888_to_rgb565_swab(u32 pix)
+{
+ return swab16(drm_pixel_xrgb8888_to_rgb565(pix));
}
/* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */
static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
- dbuf16[x] = cpu_to_le16(swab16(val16));
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_xrgb8888_to_rgb565_swab);
}
/**
@@ -495,19 +614,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb1555);
}
/**
@@ -547,20 +654,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555);
static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb1555);
}
/**
@@ -600,20 +694,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555);
static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgba5551);
}
/**
@@ -653,18 +734,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551);
static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- /* write blue-green-red to output in little endianness */
- *dbuf8++ = (pix & 0x000000FF) >> 0;
- *dbuf8++ = (pix & 0x0000FF00) >> 8;
- *dbuf8++ = (pix & 0x00FF0000) >> 16;
- }
+ drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb888);
}
/**
@@ -704,18 +774,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- /* write red-green-blue to output in little endianness */
- *dbuf8++ = (pix & 0x00ff0000) >> 16;
- *dbuf8++ = (pix & 0x0000ff00) >> 8;
- *dbuf8++ = (pix & 0x000000ff) >> 0;
- }
+ drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgr888);
}
/**
@@ -755,16 +814,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888);
static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix |= GENMASK(31, 24); /* fill alpha bits */
- dbuf32[x] = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb8888);
}
/**
@@ -804,19 +854,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix = ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888);
}
static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
@@ -835,19 +873,7 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in
static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix = ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888);
}
static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
@@ -866,20 +892,7 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val32 = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- pix = val32 | ((val32 >> 8) & 0x00300C03);
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb2101010);
}
/**
@@ -920,21 +933,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val32 = ((pix & 0x000000ff) << 2) |
- ((pix & 0x0000ff00) << 4) |
- ((pix & 0x00ff0000) << 6);
- pix = GENMASK(31, 30) | /* set alpha bits */
- val32 | ((val32 >> 8) & 0x00300c03);
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb2101010);
}
/**
@@ -975,19 +974,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010);
static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
-
- for (x = 0; x < pixels; x++) {
- u32 pix = le32_to_cpu(sbuf32[x]);
- u8 r = (pix & 0x00ff0000) >> 16;
- u8 g = (pix & 0x0000ff00) >> 8;
- u8 b = pix & 0x000000ff;
-
- /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- *dbuf8++ = (3 * r + 6 * g + b) / 10;
- }
+ drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_r8_bt601);
}
/**
@@ -1031,36 +1018,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
static void drm_fb_argb8888_to_argb4444_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- unsigned int pixels2 = pixels & ~GENMASK_ULL(0, 0);
- __le32 *dbuf32 = dbuf;
- __le16 *dbuf16 = dbuf + pixels2 * sizeof(*dbuf16);
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u16 val16;
- u32 pix[2];
-
- for (x = 0; x < pixels2; x += 2, ++dbuf32) {
- pix[0] = le32_to_cpu(sbuf32[x]);
- pix[1] = le32_to_cpu(sbuf32[x + 1]);
- val32 = ((pix[0] & 0xf0000000) >> 16) |
- ((pix[0] & 0x00f00000) >> 12) |
- ((pix[0] & 0x0000f000) >> 8) |
- ((pix[0] & 0x000000f0) >> 4) |
- ((pix[1] & 0xf0000000) >> 0) |
- ((pix[1] & 0x00f00000) << 4) |
- ((pix[1] & 0x0000f000) << 8) |
- ((pix[1] & 0x000000f0) << 12);
- *dbuf32 = cpu_to_le32(val32);
- }
- for (; x < pixels; x++) {
- pix[0] = le32_to_cpu(sbuf32[x]);
- val16 = ((pix[0] & 0xf0000000) >> 16) |
- ((pix[0] & 0x00f00000) >> 12) |
- ((pix[0] & 0x0000f000) >> 8) |
- ((pix[0] & 0x000000f0) >> 4);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_argb8888_to_argb4444);
}
/**
diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h
new file mode 100644
index 000000000000..9f857bfa368d
--- /dev/null
+++ b/drivers/gpu/drm/drm_format_internal.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+
+#ifndef DRM_FORMAT_INTERNAL_H
+#define DRM_FORMAT_INTERNAL_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/*
+ * Each pixel-format conversion helper takes a raw pixel in a
+ * specific input format and returns a raw pixel in a specific
+ * output format. All pixels are in little-endian byte order.
+ *
+ * Function names are
+ *
+ * drm_pixel_<input>_to_<output>_<algorithm>()
+ *
+ * where <input> and <output> refer to pixel formats. The
+ * <algorithm> is optional and hints to the method used for the
+ * conversion. Helpers with no algorithm given apply pixel-bit
+ * shifting.
+ *
+ * The argument type is u32. We expect this to be wide enough to
+ * hold all conversion input from 32-bit RGB to any output format.
+ * The Linux kernel should avoid format conversion for anything
+ * but XRGB8888 input data. Converting from other format can still
+ * be acceptable in some cases.
+ *
+ * The return type is u32. It is wide enough to hold all conversion
+ * output from XRGB8888. For output formats wider than 32 bit, a
+ * return type of u64 would be acceptable.
+ */
+
+/*
+ * Conversions from XRGB8888
+ */
+
+static inline u32 drm_pixel_xrgb8888_to_r8_bt601(u32 pix)
+{
+ u32 r = (pix & 0x00ff0000) >> 16;
+ u32 g = (pix & 0x0000ff00) >> 8;
+ u32 b = pix & 0x000000ff;
+
+ /* ITU-R BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
+ return (3 * r + 6 * g + b) / 10;
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb332(u32 pix)
+{
+ return ((pix & 0x00e00000) >> 16) |
+ ((pix & 0x0000e000) >> 11) |
+ ((pix & 0x000000c0) >> 6);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb565(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000fc00) >> 5) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgbx5551(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgba5551(u32 pix)
+{
+ return drm_pixel_xrgb8888_to_rgbx5551(pix) |
+ BIT(0); /* set alpha bit */
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xrgb1555(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb1555(u32 pix)
+{
+ return BIT(15) | /* set alpha bit */
+ drm_pixel_xrgb8888_to_xrgb1555(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb888(u32 pix)
+{
+ return pix & GENMASK(23, 0);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_bgr888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) |
+ ((pix & 0x0000ff00)) |
+ ((pix & 0x000000ff) << 16);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb8888(u32 pix)
+{
+ return GENMASK(31, 24) | /* fill alpha bits */
+ pix;
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix)
+{
+ return ((pix & 0xff000000)) | /* also copy filler bits */
+ ((pix & 0x00ff0000) >> 16) |
+ ((pix & 0x0000ff00)) |
+ ((pix & 0x000000ff) << 16);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix)
+{
+ return GENMASK(31, 24) | /* fill alpha bits */
+ drm_pixel_xrgb8888_to_xbgr8888(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xrgb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000ff) << 2) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x00ff0000) << 6);
+ return pix | ((pix >> 8) & 0x00300c03);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb2101010(u32 pix)
+{
+ return GENMASK(31, 30) | /* set alpha bits */
+ drm_pixel_xrgb8888_to_xrgb2101010(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xbgr2101010(u32 pix)
+{
+ pix = ((pix & 0x00ff0000) >> 14) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x000000ff) << 22);
+ return pix | ((pix >> 8) & 0x00300c03);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_abgr2101010(u32 pix)
+{
+ return GENMASK(31, 30) | /* set alpha bits */
+ drm_pixel_xrgb8888_to_xbgr2101010(pix);
+}
+
+/*
+ * Conversion from ARGB8888
+ */
+
+static inline u32 drm_pixel_argb8888_to_argb4444(u32 pix)
+{
+ return ((pix & 0xf0000000) >> 16) |
+ ((pix & 0x00f00000) >> 12) |
+ ((pix & 0x0000f000) >> 8) |
+ ((pix & 0x000000f0) >> 4);
+}
+
+#endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c6240bab3fa5..1e659d2660f7 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1216,7 +1216,7 @@ void drm_gem_unpin(struct drm_gem_object *obj)
dma_resv_unlock(obj->resv);
}
-int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
@@ -1233,9 +1233,9 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
return 0;
}
-EXPORT_SYMBOL(drm_gem_vmap);
+EXPORT_SYMBOL(drm_gem_vmap_locked);
-void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_assert_held(obj->resv);
@@ -1248,7 +1248,7 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
/* Always set the mapping to NULL. Callers may rely on this. */
iosys_map_clear(map);
}
-EXPORT_SYMBOL(drm_gem_vunmap);
+EXPORT_SYMBOL(drm_gem_vunmap_locked);
void drm_gem_lock(struct drm_gem_object *obj)
{
@@ -1262,25 +1262,25 @@ void drm_gem_unlock(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_unlock);
-int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
dma_resv_lock(obj->resv, NULL);
- ret = drm_gem_vmap(obj, map);
+ ret = drm_gem_vmap_locked(obj, map);
dma_resv_unlock(obj->resv);
return ret;
}
-EXPORT_SYMBOL(drm_gem_vmap_unlocked);
+EXPORT_SYMBOL(drm_gem_vmap);
-void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_lock(obj->resv, NULL);
- drm_gem_vunmap(obj, map);
+ drm_gem_vunmap_locked(obj, map);
dma_resv_unlock(obj->resv);
}
-EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
+EXPORT_SYMBOL(drm_gem_vunmap);
/**
* drm_gem_lock_reservations - Sets up the ww context and acquires
@@ -1543,10 +1543,10 @@ tail:
EXPORT_SYMBOL(drm_gem_lru_scan);
/**
- * drm_gem_evict - helper to evict backing pages for a GEM object
+ * drm_gem_evict_locked - helper to evict backing pages for a GEM object
* @obj: obj in question
*/
-int drm_gem_evict(struct drm_gem_object *obj)
+int drm_gem_evict_locked(struct drm_gem_object *obj)
{
dma_resv_assert_held(obj->resv);
@@ -1558,4 +1558,4 @@ int drm_gem_evict(struct drm_gem_object *obj)
return 0;
}
-EXPORT_SYMBOL(drm_gem_evict);
+EXPORT_SYMBOL(drm_gem_evict_locked);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 0fbeb686e561..6f72e7a0f427 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -362,7 +362,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
ret = -EINVAL;
goto err_drm_gem_vunmap;
}
- ret = drm_gem_vmap_unlocked(obj, &map[i]);
+ ret = drm_gem_vmap(obj, &map[i]);
if (ret)
goto err_drm_gem_vunmap;
}
@@ -384,7 +384,7 @@ err_drm_gem_vunmap:
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
- drm_gem_vunmap_unlocked(obj, &map[i]);
+ drm_gem_vunmap(obj, &map[i]);
}
return ret;
}
@@ -411,7 +411,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
continue;
if (iosys_map_is_null(&map[i]))
continue;
- drm_gem_vunmap_unlocked(obj, &map[i]);
+ drm_gem_vunmap(obj, &map[i]);
}
}
EXPORT_SYMBOL(drm_gem_fb_vunmap);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index d99dee67353a..aa43265f4f4f 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -165,7 +165,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
} else {
dma_resv_lock(shmem->base.resv, NULL);
- drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
if (shmem->sgt) {
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
@@ -174,9 +174,10 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
}
if (shmem->pages)
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
- drm_WARN_ON(obj->dev, shmem->pages_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
dma_resv_unlock(shmem->base.resv);
}
@@ -186,21 +187,20 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
-static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct page **pages;
dma_resv_assert_held(shmem->base.resv);
- if (shmem->pages_use_count++ > 0)
+ if (refcount_inc_not_zero(&shmem->pages_use_count))
return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
PTR_ERR(pages));
- shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
@@ -216,38 +216,36 @@ static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
shmem->pages = pages;
+ refcount_set(&shmem->pages_use_count, 1);
+
return 0;
}
/*
- * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
+ * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
* @shmem: shmem GEM object
*
* This function decreases the use count and puts the backing pages when use drops to zero.
*/
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- return;
-
- if (--shmem->pages_use_count > 0)
- return;
-
+ if (refcount_dec_and_test(&shmem->pages_use_count)) {
#ifdef CONFIG_X86
- if (shmem->map_wc)
- set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
+ if (shmem->map_wc)
+ set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
#endif
- drm_gem_put_pages(obj, shmem->pages,
- shmem->pages_mark_dirty_on_put,
- shmem->pages_mark_accessed_on_put);
- shmem->pages = NULL;
+ drm_gem_put_pages(obj, shmem->pages,
+ shmem->pages_mark_dirty_on_put,
+ shmem->pages_mark_accessed_on_put);
+ shmem->pages = NULL;
+ }
}
-EXPORT_SYMBOL(drm_gem_shmem_put_pages);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
{
@@ -257,7 +255,12 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
- ret = drm_gem_shmem_get_pages(shmem);
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
+
+ ret = drm_gem_shmem_get_pages_locked(shmem);
+ if (!ret)
+ refcount_set(&shmem->pages_pin_count, 1);
return ret;
}
@@ -267,7 +270,8 @@ void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
{
dma_resv_assert_held(shmem->base.resv);
- drm_gem_shmem_put_pages(shmem);
+ if (refcount_dec_and_test(&shmem->pages_pin_count))
+ drm_gem_shmem_put_pages_locked(shmem);
}
EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
@@ -288,6 +292,9 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
+
ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
if (ret)
return ret;
@@ -296,7 +303,7 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
return ret;
}
-EXPORT_SYMBOL(drm_gem_shmem_pin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
/**
* drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
@@ -311,14 +318,17 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
+ if (refcount_dec_not_one(&shmem->pages_pin_count))
+ return;
+
dma_resv_lock(shmem->base.resv, NULL);
drm_gem_shmem_unpin_locked(shmem);
dma_resv_unlock(shmem->base.resv);
}
-EXPORT_SYMBOL(drm_gem_shmem_unpin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
/*
- * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing
* store.
@@ -327,47 +337,43 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin);
* exists for the buffer backing the shmem GEM object. It hides the differences
* between dma-buf imported and natively allocated objects.
*
- * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
if (drm_gem_is_imported(obj)) {
ret = dma_buf_vmap(obj->dma_buf, map);
- if (!ret) {
- if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->dma_buf, map);
- return -EIO;
- }
- }
} else {
pgprot_t prot = PAGE_KERNEL;
dma_resv_assert_held(shmem->base.resv);
- if (shmem->vmap_use_count++ > 0) {
+ if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
}
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_pin_locked(shmem);
if (ret)
- goto err_zero_use;
+ return ret;
if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
- if (!shmem->vaddr)
+ if (!shmem->vaddr) {
ret = -ENOMEM;
- else
+ } else {
iosys_map_set_vaddr(map, shmem->vaddr);
+ refcount_set(&shmem->vmap_use_count, 1);
+ }
}
if (ret) {
@@ -379,28 +385,26 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
err_put_pages:
if (!drm_gem_is_imported(obj))
- drm_gem_shmem_put_pages(shmem);
-err_zero_use:
- shmem->vmap_use_count = 0;
+ drm_gem_shmem_unpin_locked(shmem);
return ret;
}
-EXPORT_SYMBOL(drm_gem_shmem_vmap);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
/*
- * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
* This function cleans up a kernel virtual address mapping acquired by
- * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
- * zero.
+ * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
+ * drops to zero.
*
* This function hides the differences between dma-buf imported and natively
* allocated objects.
*/
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
@@ -409,19 +413,15 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
} else {
dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
- return;
-
- if (--shmem->vmap_use_count > 0)
- return;
+ if (refcount_dec_and_test(&shmem->vmap_use_count)) {
+ vunmap(shmem->vaddr);
+ shmem->vaddr = NULL;
- vunmap(shmem->vaddr);
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_unpin_locked(shmem);
+ }
}
-
- shmem->vaddr = NULL;
}
-EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
static int
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
@@ -449,7 +449,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
-int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
+int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
{
dma_resv_assert_held(shmem->base.resv);
@@ -460,9 +460,9 @@ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
return (madv >= 0);
}
-EXPORT_SYMBOL(drm_gem_shmem_madvise);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
-void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
@@ -476,7 +476,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
shmem->sgt = NULL;
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
shmem->madv = -1;
@@ -492,7 +492,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
}
-EXPORT_SYMBOL(drm_gem_shmem_purge);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
@@ -575,8 +575,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
- if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- shmem->pages_use_count++;
+ drm_WARN_ON_ONCE(obj->dev,
+ !refcount_inc_not_zero(&shmem->pages_use_count));
dma_resv_unlock(shmem->base.resv);
@@ -589,7 +589,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
dma_resv_lock(shmem->base.resv, NULL);
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
dma_resv_unlock(shmem->base.resv);
drm_gem_vm_close(vma);
@@ -639,7 +639,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return -EINVAL;
dma_resv_lock(shmem->base.resv, NULL);
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
dma_resv_unlock(shmem->base.resv);
if (ret)
@@ -666,11 +666,12 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
if (drm_gem_is_imported(&shmem->base))
return;
- drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
- drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+ drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
+ drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
+ drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
}
-EXPORT_SYMBOL(drm_gem_shmem_print_info);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
/**
* drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
@@ -707,7 +708,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
if (ret)
return ERR_PTR(ret);
@@ -729,7 +730,7 @@ err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
err_put_pages:
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 38431e8360e7..735bfdf4322f 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -1118,6 +1118,10 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
lockdep_assert_held(&gpusvm->notifier_lock);
if (range->flags.has_dma_mapping) {
+ struct drm_gpusvm_range_flags flags = {
+ .__flags = range->flags.__flags,
+ };
+
for (i = 0, j = 0; i < npages; j++) {
struct drm_pagemap_device_addr *addr = &range->dma_addr[j];
@@ -1131,8 +1135,12 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
dev, *addr);
i += 1 << addr->order;
}
- range->flags.has_devmem_pages = false;
- range->flags.has_dma_mapping = false;
+
+ /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
+ flags.has_devmem_pages = false;
+ flags.has_dma_mapping = false;
+ WRITE_ONCE(range->flags.__flags, flags.__flags);
+
range->dpagemap = NULL;
}
}
@@ -1330,10 +1338,10 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
unsigned long num_dma_mapped;
unsigned int order = 0;
unsigned long *pfns;
- struct page **pages;
int err = 0;
struct dev_pagemap *pagemap;
struct drm_pagemap *dpagemap;
+ struct drm_gpusvm_range_flags flags;
retry:
hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
@@ -1369,7 +1377,6 @@ retry:
if (err)
goto err_free;
- pages = (struct page **)pfns;
map_pages:
/*
* Perform all dma mappings under the notifier lock to not
@@ -1378,7 +1385,8 @@ map_pages:
*/
drm_gpusvm_notifier_lock(gpusvm);
- if (range->flags.unmapped) {
+ flags.__flags = range->flags.__flags;
+ if (flags.unmapped) {
drm_gpusvm_notifier_unlock(gpusvm);
err = -EFAULT;
goto err_free;
@@ -1444,8 +1452,6 @@ map_pages:
err = -EFAULT;
goto err_unmap;
}
-
- pages[i] = page;
} else {
dma_addr_t addr;
@@ -1454,6 +1460,11 @@ map_pages:
goto err_unmap;
}
+ if (ctx->devmem_only) {
+ err = -EFAULT;
+ goto err_unmap;
+ }
+
addr = dma_map_page(gpusvm->drm->dev,
page, 0,
PAGE_SIZE << order,
@@ -1469,14 +1480,17 @@ map_pages:
}
i += 1 << order;
num_dma_mapped = i;
+ flags.has_dma_mapping = true;
}
- range->flags.has_dma_mapping = true;
if (zdd) {
- range->flags.has_devmem_pages = true;
+ flags.has_devmem_pages = true;
range->dpagemap = dpagemap;
}
+ /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
+ WRITE_ONCE(range->flags.__flags, flags.__flags);
+
drm_gpusvm_notifier_unlock(gpusvm);
kvfree(pfns);
set_seqno:
@@ -1765,6 +1779,8 @@ int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
goto err_finalize;
/* Upon success bind devmem allocation to range and zdd */
+ devmem_allocation->timeslice_expiration = get_jiffies_64() +
+ msecs_to_jiffies(ctx->timeslice_ms);
zdd->devmem_allocation = devmem_allocation; /* Owns ref */
err_finalize:
@@ -1985,6 +2001,13 @@ static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas,
void *buf;
int i, err = 0;
+ if (page) {
+ zdd = page->zone_device_data;
+ if (time_before64(get_jiffies_64(),
+ zdd->devmem_allocation->timeslice_expiration))
+ return 0;
+ }
+
start = ALIGN_DOWN(fault_addr, size);
end = ALIGN(fault_addr + 1, size);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b2b6a8e49dda..e44f28fd81d3 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -179,8 +179,8 @@ int drm_gem_pin_locked(struct drm_gem_object *obj);
void drm_gem_unpin_locked(struct drm_gem_object *obj);
int drm_gem_pin(struct drm_gem_object *obj);
void drm_gem_unpin(struct drm_gem_object *obj);
-int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
-void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
+int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 89e05a5bed1d..a4cd476f9b30 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -404,12 +404,16 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
u16 height = drm->mode_config.min_height;
u16 width = drm->mode_config.min_width;
struct mipi_dbi *dbi = &dbidev->dbi;
- size_t len = width * height * 2;
+ const struct drm_format_info *dst_format;
+ size_t len;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
+ dst_format = drm_format_info(dbidev->pixel_format);
+ len = drm_format_info_min_pitch(dst_format, 0, width) * height;
+
memset(dbidev->tx_buf, 0, len);
mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index dfa595556320..e5184a0c2465 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -36,6 +36,8 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_print.h>
+#include <linux/media-bus-format.h>
+
#include <video/mipi_display.h>
/**
@@ -871,6 +873,41 @@ ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
EXPORT_SYMBOL(mipi_dsi_generic_read);
/**
+ * drm_mipi_dsi_get_input_bus_fmt() - Get the required MEDIA_BUS_FMT_* based
+ * input pixel format for a given DSI output
+ * pixel format
+ * @dsi_format: pixel format that a DSI host needs to output
+ *
+ * Various DSI hosts can use this function during their
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts operation to ascertain
+ * the MEDIA_BUS_FMT_* pixel format required as input.
+ *
+ * RETURNS:
+ * a 32-bit MEDIA_BUS_FMT_* value on success or 0 in case of failure.
+ */
+u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format)
+{
+ switch (dsi_format) {
+ case MIPI_DSI_FMT_RGB888:
+ return MEDIA_BUS_FMT_RGB888_1X24;
+
+ case MIPI_DSI_FMT_RGB666:
+ return MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return MEDIA_BUS_FMT_RGB666_1X18;
+
+ case MIPI_DSI_FMT_RGB565:
+ return MEDIA_BUS_FMT_RGB565_1X16;
+
+ default:
+ /* Unsupported DSI Format */
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_mipi_dsi_get_input_bus_fmt);
+
+/**
* mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload
* @dsi: DSI peripheral device
* @data: buffer containing data to be transmitted
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 8642a2fb25a9..b4239fd04e9d 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -383,6 +383,13 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
prop = drm_property_create(dev,
DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
+ "IN_FORMATS_ASYNC", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.async_modifiers_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
"SIZE_HINTS", 0);
if (!prop)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index c627e42a7ce7..650de4da0853 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -74,8 +74,9 @@ EXPORT_SYMBOL(drm_panel_init);
* drm_panel_add - add a panel to the global registry
* @panel: panel to add
*
- * Add a panel to the global registry so that it can be looked up by display
- * drivers.
+ * Add a panel to the global registry so that it can be looked
+ * up by display drivers. The panel to be added must have been
+ * allocated by devm_drm_panel_alloc().
*/
void drm_panel_add(struct drm_panel *panel)
{
@@ -105,21 +106,21 @@ EXPORT_SYMBOL(drm_panel_remove);
*
* Calling this function will enable power and deassert any reset signals to
* the panel. After this has completed it is possible to communicate with any
- * integrated circuitry via a command bus.
- *
- * Return: 0 on success or a negative error code on failure.
+ * integrated circuitry via a command bus. This function cannot fail (as it is
+ * called from the pre_enable call chain). There will always be a call to
+ * drm_panel_disable() afterwards.
*/
-int drm_panel_prepare(struct drm_panel *panel)
+void drm_panel_prepare(struct drm_panel *panel)
{
struct drm_panel_follower *follower;
int ret;
if (!panel)
- return -EINVAL;
+ return;
if (panel->prepared) {
dev_warn(panel->dev, "Skipping prepare of already prepared panel\n");
- return 0;
+ return;
}
mutex_lock(&panel->follower_lock);
@@ -138,11 +139,8 @@ int drm_panel_prepare(struct drm_panel *panel)
follower->funcs->panel_prepared, ret);
}
- ret = 0;
exit:
mutex_unlock(&panel->follower_lock);
-
- return ret;
}
EXPORT_SYMBOL(drm_panel_prepare);
@@ -154,16 +152,14 @@ EXPORT_SYMBOL(drm_panel_prepare);
* reset, turn off power supplies, ...). After this function has completed, it
* is usually no longer possible to communicate with the panel until another
* call to drm_panel_prepare().
- *
- * Return: 0 on success or a negative error code on failure.
*/
-int drm_panel_unprepare(struct drm_panel *panel)
+void drm_panel_unprepare(struct drm_panel *panel)
{
struct drm_panel_follower *follower;
int ret;
if (!panel)
- return -EINVAL;
+ return;
/*
* If you are seeing the warning below it likely means one of two things:
@@ -176,7 +172,7 @@ int drm_panel_unprepare(struct drm_panel *panel)
*/
if (!panel->prepared) {
dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n");
- return 0;
+ return;
}
mutex_lock(&panel->follower_lock);
@@ -195,11 +191,8 @@ int drm_panel_unprepare(struct drm_panel *panel)
}
panel->prepared = false;
- ret = 0;
exit:
mutex_unlock(&panel->follower_lock);
-
- return ret;
}
EXPORT_SYMBOL(drm_panel_unprepare);
@@ -209,26 +202,26 @@ EXPORT_SYMBOL(drm_panel_unprepare);
*
* Calling this function will cause the panel display drivers to be turned on
* and the backlight to be enabled. Content will be visible on screen after
- * this call completes.
- *
- * Return: 0 on success or a negative error code on failure.
+ * this call completes. This function cannot fail (as it is called from the
+ * enable call chain). There will always be a call to drm_panel_disable()
+ * afterwards.
*/
-int drm_panel_enable(struct drm_panel *panel)
+void drm_panel_enable(struct drm_panel *panel)
{
int ret;
if (!panel)
- return -EINVAL;
+ return;
if (panel->enabled) {
dev_warn(panel->dev, "Skipping enable of already enabled panel\n");
- return 0;
+ return;
}
if (panel->funcs && panel->funcs->enable) {
ret = panel->funcs->enable(panel);
if (ret < 0)
- return ret;
+ return;
}
panel->enabled = true;
@@ -236,8 +229,6 @@ int drm_panel_enable(struct drm_panel *panel)
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
ret);
-
- return 0;
}
EXPORT_SYMBOL(drm_panel_enable);
@@ -248,15 +239,13 @@ EXPORT_SYMBOL(drm_panel_enable);
* This will typically turn off the panel's backlight or disable the display
* drivers. For smart panels it should still be possible to communicate with
* the integrated circuitry via any command bus after this call.
- *
- * Return: 0 on success or a negative error code on failure.
*/
-int drm_panel_disable(struct drm_panel *panel)
+void drm_panel_disable(struct drm_panel *panel)
{
int ret;
if (!panel)
- return -EINVAL;
+ return;
/*
* If you are seeing the warning below it likely means one of two things:
@@ -269,7 +258,7 @@ int drm_panel_disable(struct drm_panel *panel)
*/
if (!panel->enabled) {
dev_warn(panel->dev, "Skipping disable of already disabled panel\n");
- return 0;
+ return;
}
ret = backlight_disable(panel->backlight);
@@ -280,11 +269,9 @@ int drm_panel_disable(struct drm_panel *panel)
if (panel->funcs && panel->funcs->disable) {
ret = panel->funcs->disable(panel);
if (ret < 0)
- return ret;
+ return;
}
panel->enabled = false;
-
- return 0;
}
EXPORT_SYMBOL(drm_panel_disable);
@@ -317,6 +304,93 @@ int drm_panel_get_modes(struct drm_panel *panel,
}
EXPORT_SYMBOL(drm_panel_get_modes);
+static void __drm_panel_free(struct kref *kref)
+{
+ struct drm_panel *panel = container_of(kref, struct drm_panel, refcount);
+
+ kfree(panel->container);
+}
+
+/**
+ * drm_panel_get - Acquire a panel reference
+ * @panel: DRM panel
+ *
+ * This function increments the panel's refcount.
+ * Returns:
+ * Pointer to @panel
+ */
+struct drm_panel *drm_panel_get(struct drm_panel *panel)
+{
+ if (!panel)
+ return panel;
+
+ kref_get(&panel->refcount);
+
+ return panel;
+}
+EXPORT_SYMBOL(drm_panel_get);
+
+/**
+ * drm_panel_put - Release a panel reference
+ * @panel: DRM panel
+ *
+ * This function decrements the panel's reference count and frees the
+ * object if the reference count drops to zero.
+ */
+void drm_panel_put(struct drm_panel *panel)
+{
+ if (panel)
+ kref_put(&panel->refcount, __drm_panel_free);
+}
+EXPORT_SYMBOL(drm_panel_put);
+
+/**
+ * drm_panel_put_void - wrapper to drm_panel_put() taking a void pointer
+ *
+ * @data: pointer to @struct drm_panel, cast to a void pointer
+ *
+ * Wrapper of drm_panel_put() to be used when a function taking a void
+ * pointer is needed, for example as a devm action.
+ */
+static void drm_panel_put_void(void *data)
+{
+ struct drm_panel *panel = (struct drm_panel *)data;
+
+ drm_panel_put(panel);
+}
+
+void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_panel_funcs *funcs,
+ int connector_type)
+{
+ void *container;
+ struct drm_panel *panel;
+ int err;
+
+ if (!funcs) {
+ dev_warn(dev, "Missing funcs pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ panel = container + offset;
+ panel->container = container;
+ panel->funcs = funcs;
+ kref_init(&panel->refcount);
+
+ err = devm_add_action_or_reset(dev, drm_panel_put_void, panel);
+ if (err)
+ return ERR_PTR(err);
+
+ drm_panel_init(panel, dev, funcs, connector_type);
+
+ return container;
+}
+EXPORT_SYMBOL(__devm_drm_panel_alloc);
+
#ifdef CONFIG_OF
/**
* of_drm_find_panel - look up a panel using a device tree node
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index c554ad8f246b..7ac0fd5391fe 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -517,6 +517,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* ZOTAC Gaming Zone */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ZOTAC"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "G0A1W"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* One Mix 2S (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index b47ea25fdfaa..b4de79583805 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -7,6 +7,7 @@
*/
#include <linux/font.h>
+#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/iosys-map.h>
#include <linux/kdebug.h>
@@ -154,6 +155,90 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
}
+static void drm_panic_write_pixel16(void *vaddr, unsigned int offset, u16 color)
+{
+ u16 *p = vaddr + offset;
+
+ *p = color;
+}
+
+static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
+{
+ u8 *p = vaddr + offset;
+
+ *p++ = color & 0xff;
+ color >>= 8;
+ *p++ = color & 0xff;
+ color >>= 8;
+ *p = color & 0xff;
+}
+
+static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
+{
+ u32 *p = vaddr + offset;
+
+ *p = color;
+}
+
+static void drm_panic_write_pixel(void *vaddr, unsigned int offset, u32 color, unsigned int cpp)
+{
+ switch (cpp) {
+ case 2:
+ drm_panic_write_pixel16(vaddr, offset, color);
+ break;
+ case 3:
+ drm_panic_write_pixel24(vaddr, offset, color);
+ break;
+ case 4:
+ drm_panic_write_pixel32(vaddr, offset, color);
+ break;
+ default:
+ pr_debug_once("Can't blit with pixel width %d\n", cpp);
+ }
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,
+ unsigned int cpp, const u8 *sbuf8,
+ unsigned int spitch, struct drm_rect *clip,
+ unsigned int scale, u32 fg32)
+{
+ unsigned int y, x;
+ unsigned int page = ~0;
+ unsigned int height = drm_rect_height(clip);
+ unsigned int width = drm_rect_width(clip);
+ void *vaddr = NULL;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
+ unsigned int new_page;
+ unsigned int offset;
+
+ offset = (y + clip->y1) * dpitch + (x + clip->x1) * cpp;
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != page) {
+ if (!pages[new_page])
+ continue;
+ if (vaddr)
+ kunmap_local(vaddr);
+ page = new_page;
+ vaddr = kmap_local_page_try_from_panic(pages[page]);
+ }
+ if (vaddr)
+ drm_panic_write_pixel(vaddr, offset, fg32, cpp);
+ }
+ }
+ }
+ if (vaddr)
+ kunmap_local(vaddr);
+}
+
/*
* drm_panic_blit - convert a monochrome image to a linear framebuffer
* @sb: destination scanout buffer
@@ -177,6 +262,10 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
if (sb->set_pixel)
return drm_panic_blit_pixel(sb, clip, sbuf8, spitch, scale, fg_color);
+ if (sb->pages)
+ return drm_panic_blit_page(sb->pages, sb->pitch[0], sb->format->cpp[0],
+ sbuf8, spitch, clip, scale, fg_color);
+
map = sb->map[0];
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
@@ -209,6 +298,35 @@ static void drm_panic_fill_pixel(struct drm_scanout_buffer *sb,
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, color);
}
+static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,
+ unsigned int cpp, struct drm_rect *clip,
+ u32 color)
+{
+ unsigned int y, x;
+ unsigned int page = ~0;
+ void *vaddr = NULL;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ for (x = clip->x1; x < clip->x2; x++) {
+ unsigned int new_page;
+ unsigned int offset;
+
+ offset = y * dpitch + x * cpp;
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != page) {
+ if (vaddr)
+ kunmap_local(vaddr);
+ page = new_page;
+ vaddr = kmap_local_page_try_from_panic(pages[page]);
+ }
+ drm_panic_write_pixel(vaddr, offset, color, cpp);
+ }
+ }
+ if (vaddr)
+ kunmap_local(vaddr);
+}
+
/*
* drm_panic_fill - Fill a rectangle with a color
* @sb: destination scanout buffer
@@ -225,6 +343,10 @@ static void drm_panic_fill(struct drm_scanout_buffer *sb, struct drm_rect *clip,
if (sb->set_pixel)
return drm_panic_fill_pixel(sb, clip, color);
+ if (sb->pages)
+ return drm_panic_fill_page(sb->pages, sb->pitch[0], sb->format->cpp[0],
+ clip, color);
+
map = sb->map[0];
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
@@ -709,16 +831,24 @@ static void draw_panic_plane(struct drm_plane *plane, const char *description)
if (!drm_panic_trylock(plane->dev, flags))
return;
+ ret = plane->helper_private->get_scanout_buffer(plane, &sb);
+
+ if (ret || !drm_panic_is_format_supported(sb.format))
+ goto unlock;
+
+ /* One of these should be set, or it can't draw pixels */
+ if (!sb.set_pixel && !sb.pages && iosys_map_is_null(&sb.map[0]))
+ goto unlock;
+
drm_panic_set_description(description);
- ret = plane->helper_private->get_scanout_buffer(plane, &sb);
+ draw_panic_dispatch(&sb);
+ if (plane->helper_private->panic_flush)
+ plane->helper_private->panic_flush(plane);
- if (!ret && drm_panic_is_format_supported(sb.format)) {
- draw_panic_dispatch(&sb);
- if (plane->helper_private->panic_flush)
- plane->helper_private->panic_flush(plane);
- }
drm_panic_clear_description();
+
+unlock:
drm_panic_unlock(plane->dev, flags);
}
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index f2a99681b998..dd55b1cb764d 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -5,7 +5,7 @@
//! It is called from a panic handler, so it should't allocate memory and
//! does all the work on the stack or on the provided buffers. For
//! simplification, it only supports low error correction, and applies the
-//! first mask (checkerboard). It will draw the smallest QRcode that can
+//! first mask (checkerboard). It will draw the smallest QR code that can
//! contain the string passed as parameter. To get the most compact
//! QR code, the start of the URL is encoded as binary, and the
//! compressed kmsg is encoded as numeric.
@@ -315,7 +315,7 @@ impl Segment<'_> {
}
}
- // Returns the size of the length field in bits, depending on QR Version.
+ /// Returns the size of the length field in bits, depending on QR Version.
fn length_bits_count(&self, version: Version) -> usize {
let Version(v) = version;
match self {
@@ -331,7 +331,7 @@ impl Segment<'_> {
}
}
- // Number of characters in the segment.
+ /// Number of characters in the segment.
fn character_count(&self) -> usize {
match self {
Segment::Binary(data) => data.len(),
@@ -366,8 +366,48 @@ impl Segment<'_> {
SegmentIterator {
segment: self,
offset: 0,
- carry: 0,
- carry_len: 0,
+ decfifo: Default::default(),
+ }
+ }
+}
+
+/// Max fifo size is 17 (max push) + 2 (max remaining)
+const MAX_FIFO_SIZE: usize = 19;
+
+/// A simple Decimal digit FIFO
+#[derive(Default)]
+struct DecFifo {
+ decimals: [u8; MAX_FIFO_SIZE],
+ len: usize,
+}
+
+impl DecFifo {
+ fn push(&mut self, data: u64, len: usize) {
+ let mut chunk = data;
+ for i in (0..self.len).rev() {
+ self.decimals[i + len] = self.decimals[i];
+ }
+ for i in 0..len {
+ self.decimals[i] = (chunk % 10) as u8;
+ chunk /= 10;
+ }
+ self.len += len;
+ }
+
+ /// Pop 3 decimal digits from the FIFO
+ fn pop3(&mut self) -> Option<(u16, usize)> {
+ if self.len == 0 {
+ None
+ } else {
+ let poplen = 3.min(self.len);
+ self.len -= poplen;
+ let mut out = 0;
+ let mut exp = 1;
+ for i in 0..poplen {
+ out += self.decimals[self.len + i] as u16 * exp;
+ exp *= 10;
+ }
+ Some((out, NUM_CHARS_BITS[poplen]))
}
}
}
@@ -375,8 +415,7 @@ impl Segment<'_> {
struct SegmentIterator<'a> {
segment: &'a Segment<'a>,
offset: usize,
- carry: u64,
- carry_len: usize,
+ decfifo: DecFifo,
}
impl Iterator for SegmentIterator<'_> {
@@ -394,31 +433,17 @@ impl Iterator for SegmentIterator<'_> {
}
}
Segment::Numeric(data) => {
- if self.carry_len < 3 && self.offset < data.len() {
- // If there are less than 3 decimal digits in the carry,
- // take the next 7 bytes of input, and add them to the carry.
+ if self.decfifo.len < 3 && self.offset < data.len() {
+ // If there are less than 3 decimal digits in the fifo,
+ // take the next 7 bytes of input, and push them to the fifo.
let mut buf = [0u8; 8];
let len = 7.min(data.len() - self.offset);
buf[..len].copy_from_slice(&data[self.offset..self.offset + len]);
let chunk = u64::from_le_bytes(buf);
- let pow = u64::pow(10, BYTES_TO_DIGITS[len] as u32);
- self.carry = chunk + self.carry * pow;
+ self.decfifo.push(chunk, BYTES_TO_DIGITS[len]);
self.offset += len;
- self.carry_len += BYTES_TO_DIGITS[len];
- }
- match self.carry_len {
- 0 => None,
- len => {
- // take the next 3 decimal digits of the carry
- // and return 10bits of numeric data.
- let out_len = 3.min(len);
- self.carry_len -= out_len;
- let pow = u64::pow(10, self.carry_len as u32);
- let out = (self.carry / pow) as u16;
- self.carry = self.carry % pow;
- Some((out, NUM_CHARS_BITS[out_len]))
- }
}
+ self.decfifo.pop3()
}
}
}
@@ -569,8 +594,8 @@ struct EncodedMsgIterator<'a> {
impl Iterator for EncodedMsgIterator<'_> {
type Item = u8;
- // Send the bytes in interleaved mode, first byte of first block of group1,
- // then first byte of second block of group1, ...
+ /// Send the bytes in interleaved mode, first byte of first block of group1,
+ /// then first byte of second block of group1, ...
fn next(&mut self) -> Option<Self::Item> {
let em = self.em;
let blocks = em.g1_blocks + em.g2_blocks;
@@ -638,7 +663,7 @@ impl QrImage<'_> {
self.data.fill(0);
}
- // Set pixel to light color.
+ /// Set pixel to light color.
fn set(&mut self, x: u8, y: u8) {
let off = y as usize * self.stride as usize + x as usize / 8;
let mut v = self.data[off];
@@ -646,13 +671,13 @@ impl QrImage<'_> {
self.data[off] = v;
}
- // Invert a module color.
+ /// Invert a module color.
fn xor(&mut self, x: u8, y: u8) {
let off = y as usize * self.stride as usize + x as usize / 8;
self.data[off] ^= 0x80 >> (x % 8);
}
- // Draw a light square at (x, y) top left corner.
+ /// Draw a light square at (x, y) top left corner.
fn draw_square(&mut self, x: u8, y: u8, size: u8) {
for k in 0..size {
self.set(x + k, y);
@@ -784,7 +809,7 @@ impl QrImage<'_> {
vinfo != 0 && ((x >= pos && x < pos + 3 && y < 6) || (y >= pos && y < pos + 3 && x < 6))
}
- // Returns true if the module is reserved (Not usable for data and EC).
+ /// Returns true if the module is reserved (Not usable for data and EC).
fn is_reserved(&self, x: u8, y: u8) -> bool {
self.is_alignment(x, y)
|| self.is_finder(x, y)
@@ -793,13 +818,14 @@ impl QrImage<'_> {
|| self.is_version_info(x, y)
}
- // Last module to draw, at bottom left corner.
+ /// Last module to draw, at bottom left corner.
fn is_last(&self, x: u8, y: u8) -> bool {
x == 0 && y == self.width - 1
}
- // Move to the next module according to QR code order.
- // From bottom right corner, to bottom left corner.
+ /// Move to the next module according to QR code order.
+ ///
+ /// From bottom right corner, to bottom left corner.
fn next(&self, x: u8, y: u8) -> (u8, u8) {
let x_adj = if x <= 6 { x + 1 } else { x };
let column_type = (self.width - x_adj) % 4;
@@ -812,7 +838,7 @@ impl QrImage<'_> {
}
}
- // Find next module that can hold data.
+ /// Find next module that can hold data.
fn next_available(&self, x: u8, y: u8) -> (u8, u8) {
let (mut x, mut y) = self.next(x, y);
while self.is_reserved(x, y) && !self.is_last(x, y) {
@@ -841,7 +867,7 @@ impl QrImage<'_> {
}
}
- // Apply checkerboard mask to all non-reserved modules.
+ /// Apply checkerboard mask to all non-reserved modules.
fn apply_mask(&mut self) {
for x in 0..self.width {
for y in 0..self.width {
@@ -852,7 +878,7 @@ impl QrImage<'_> {
}
}
- // Draw the QR code with the provided data iterator.
+ /// Draw the QR code with the provided data iterator.
fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
// First clear the table, as it may have already some data.
self.clear();
@@ -876,7 +902,7 @@ impl QrImage<'_> {
/// will be encoded as binary segment, otherwise it will be encoded
/// efficiently as a numeric segment, and appended to the URL.
/// * `data_len`: Length of the data, that needs to be encoded, must be less
-/// than data_size.
+/// than `data_size`.
/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
/// a V40 QR code. It will then be overwritten with the QR code image.
/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index a28b22fdd7a4..04992dfd4c79 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -141,6 +141,14 @@
* various bugs in this area with inconsistencies between the capability
* flag and per-plane properties.
*
+ * IN_FORMATS_ASYNC:
+ * Blob property which contains the set of buffer format and modifier
+ * pairs supported by this plane for asynchronous flips. The blob is a struct
+ * drm_format_modifier_blob. Userspace cannot change this property. This is an
+ * optional property and if not present then user should expect a failure in
+ * atomic ioctl when the modifier/format is not supported by that plane under
+ * asynchronous flip.
+ *
* SIZE_HINTS:
* Blob property which contains the set of recommended plane size
* which can used for simple "cursor like" use cases (eg. no scaling).
@@ -185,9 +193,13 @@ modifiers_ptr(struct drm_format_modifier_blob *blob)
return (struct drm_format_modifier *)(((char *)blob) + blob->modifiers_offset);
}
-static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane)
+static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
+ struct drm_plane *plane,
+ bool (*format_mod_supported)
+ (struct drm_plane *plane,
+ u32 format,
+ u64 modifier))
{
- const struct drm_mode_config *config = &dev->mode_config;
struct drm_property_blob *blob;
struct drm_format_modifier *mod;
size_t blob_size, formats_size, modifiers_size;
@@ -213,7 +225,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
blob = drm_property_create_blob(dev, blob_size, NULL);
if (IS_ERR(blob))
- return -1;
+ return NULL;
blob_data = blob->data;
blob_data->version = FORMAT_BLOB_CURRENT;
@@ -229,10 +241,10 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
mod = modifiers_ptr(blob_data);
for (i = 0; i < plane->modifier_count; i++) {
for (j = 0; j < plane->format_count; j++) {
- if (!plane->funcs->format_mod_supported ||
- plane->funcs->format_mod_supported(plane,
- plane->format_types[j],
- plane->modifiers[i])) {
+ if (!format_mod_supported ||
+ format_mod_supported(plane,
+ plane->format_types[j],
+ plane->modifiers[i])) {
mod->formats |= 1ULL << j;
}
}
@@ -243,10 +255,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
mod++;
}
- drm_object_attach_property(&plane->base, config->modifiers_property,
- blob->base.id);
-
- return 0;
+ return blob;
}
/**
@@ -358,6 +367,7 @@ static int __drm_universal_plane_init(struct drm_device *dev,
const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property_blob *blob;
static const uint64_t default_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
};
@@ -469,8 +479,24 @@ static int __drm_universal_plane_init(struct drm_device *dev,
drm_plane_create_hotspot_properties(plane);
}
- if (format_modifier_count)
- create_in_format_blob(dev, plane);
+ if (format_modifier_count) {
+ blob = create_in_format_blob(dev, plane,
+ plane->funcs->format_mod_supported);
+ if (!IS_ERR(blob))
+ drm_object_attach_property(&plane->base,
+ config->modifiers_property,
+ blob->base.id);
+ }
+
+ if (plane->funcs->format_mod_supported_async) {
+ blob = create_in_format_blob(dev, plane,
+ plane->funcs->format_mod_supported_async);
+ if (!IS_ERR(blob))
+ drm_object_attach_property(&plane->base,
+ config->async_modifiers_property,
+ blob->base.id);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index bdb51c8f262e..d828502268b8 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -707,7 +707,7 @@ int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- return drm_gem_vmap(obj, map);
+ return drm_gem_vmap_locked(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
@@ -723,7 +723,7 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- drm_gem_vunmap(obj, map);
+ drm_gem_vunmap_locked(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
@@ -804,7 +804,6 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
- .cache_sgt_mapping = true,
.attach = drm_gem_map_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
@@ -998,7 +997,7 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
- return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
+ return drm_gem_prime_import_dev(dev, dma_buf, drm_dev_dma_dev(dev));
}
EXPORT_SYMBOL(drm_gem_prime_import);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 7ba16323e7c2..6b3541159c0f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -958,15 +958,16 @@ static void drm_kms_helper_poll_init_release(struct drm_device *dev, void *res)
* cleaned up when the DRM device goes away.
*
* See drm_kms_helper_poll_init() for more information.
- *
- * Returns:
- * 0 on success, or a negative errno code otherwise.
*/
-int drmm_kms_helper_poll_init(struct drm_device *dev)
+void drmm_kms_helper_poll_init(struct drm_device *dev)
{
+ int ret;
+
drm_kms_helper_poll_init(dev);
- return drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+ ret = drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+ if (ret)
+ drm_warn(dev, "Connector status will not be updated, error %d\n", ret);
}
EXPORT_SYMBOL(drmm_kms_helper_poll_init);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 4f2ab8a7b50f..636cd83ca29e 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -741,7 +741,7 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
}
static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
- int fd, int handle)
+ int fd, int handle, u64 point)
{
struct dma_fence *fence = sync_file_get_fence(fd);
struct drm_syncobj *syncobj;
@@ -755,14 +755,24 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return -ENOENT;
}
- drm_syncobj_replace_fence(syncobj, fence);
+ if (point) {
+ struct dma_fence_chain *chain = dma_fence_chain_alloc();
+
+ if (!chain)
+ return -ENOMEM;
+
+ drm_syncobj_add_point(syncobj, chain, fence, point);
+ } else {
+ drm_syncobj_replace_fence(syncobj, fence);
+ }
+
dma_fence_put(fence);
drm_syncobj_put(syncobj);
return 0;
}
static int drm_syncobj_export_sync_file(struct drm_file *file_private,
- int handle, int *p_fd)
+ int handle, u64 point, int *p_fd)
{
int ret;
struct dma_fence *fence;
@@ -772,7 +782,7 @@ static int drm_syncobj_export_sync_file(struct drm_file *file_private,
if (fd < 0)
return fd;
- ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
+ ret = drm_syncobj_find_fence(file_private, handle, point, 0, &fence);
if (ret)
goto err_put_fd;
@@ -869,6 +879,9 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
+ unsigned int valid_flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE |
+ DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE;
+ u64 point = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
@@ -876,13 +889,18 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
+ if (args->flags & ~valid_flags)
return -EINVAL;
+ if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE)
+ point = args->point;
+
if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
return drm_syncobj_export_sync_file(file_private, args->handle,
- &args->fd);
+ point, &args->fd);
+
+ if (args->point)
+ return -EINVAL;
return drm_syncobj_handle_to_fd(file_private, args->handle,
&args->fd);
@@ -893,6 +911,9 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
+ unsigned int valid_flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE |
+ DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
+ u64 point = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
@@ -900,14 +921,20 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
+ if (args->flags & ~valid_flags)
return -EINVAL;
+ if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE)
+ point = args->point;
+
if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
return drm_syncobj_import_sync_file_fence(file_private,
args->fd,
- args->handle);
+ args->handle,
+ point);
+
+ if (args->point)
+ return -EINVAL;
return drm_syncobj_fd_to_handle(file_private, args->fd,
&args->handle);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 42e57d142554..917ad527c961 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -39,7 +39,7 @@ int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
{
- if (!obj->import_attach) {
+ if (!drm_gem_is_imported(obj)) {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
mutex_lock(&etnaviv_obj->lock);
@@ -51,7 +51,7 @@ int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
{
- if (!obj->import_attach) {
+ if (!drm_gem_is_imported(obj)) {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
mutex_lock(&etnaviv_obj->lock);
@@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
if (etnaviv_obj->vaddr)
- dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map);
+ dma_buf_vunmap_unlocked(etnaviv_obj->base.dma_buf, &map);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@@ -82,7 +82,7 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
lockdep_assert_held(&etnaviv_obj->lock);
- ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
+ ret = dma_buf_vmap(etnaviv_obj->base.dma_buf, &map);
if (ret)
return NULL;
return map.vaddr;
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 5170f72b0830..f91daefa9d2b 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -43,13 +43,13 @@ struct decon_data {
unsigned int wincon_burstlen_shift;
};
-static struct decon_data exynos7_decon_data = {
+static const struct decon_data exynos7_decon_data = {
.vidw_buf_start_base = 0x80,
.shadowcon_win_protect_shift = 10,
.wincon_burstlen_shift = 11,
};
-static struct decon_data exynos7870_decon_data = {
+static const struct decon_data exynos7870_decon_data = {
.vidw_buf_start_base = 0x880,
.shadowcon_win_protect_shift = 8,
.wincon_burstlen_shift = 10,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index f313ae7bc3a3..6cc7bf77bcac 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -355,8 +355,7 @@ static void exynos_drm_platform_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
- if (drm)
- drm_atomic_helper_shutdown(drm);
+ drm_atomic_helper_shutdown(drm);
}
static struct platform_driver exynos_drm_platform_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index b150cfd92f6e..09e33a26caaf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -908,7 +908,7 @@ static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
u32 buf_num;
u32 cfg;
- DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]enqueu[%d]\n", buf_id, enqueue);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]enqueue[%d]\n", buf_id, enqueue);
spin_lock_irqsave(&ctx->lock, flags);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 1ad87584b1c2..c394cc702d7d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -731,7 +731,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
/*
* Setting dma-burst to 16Word causes permanent tearing for very small
* buffers, e.g. cursor buffer. Burst Mode switching which based on
- * plane size is not recommended as plane size varies alot towards the
+ * plane size is not recommended as plane size varies a lot towards the
* end of the screen and rapid movement causes unstable DMA, but it is
* still better to change dma-burst than displaying garbage.
*/
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index b34ec6728337..29a8366513fa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -379,11 +379,11 @@ static int exynos_mic_probe(struct platform_device *pdev)
struct resource res;
int ret, i;
- mic = devm_kzalloc(dev, sizeof(*mic), GFP_KERNEL);
- if (!mic) {
+ mic = devm_drm_bridge_alloc(dev, struct exynos_mic, bridge, &mic_bridge_funcs);
+ if (IS_ERR(mic)) {
DRM_DEV_ERROR(dev,
"mic: Failed to allocate memory for MIC object\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(mic);
goto err;
}
@@ -421,7 +421,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mic);
- mic->bridge.funcs = &mic_bridge_funcs;
mic->bridge.of_node = dev->of_node;
drm_bridge_add(&mic->bridge);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 08cf79a62025..e644e2382d77 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -312,9 +312,6 @@ static int vidi_get_modes(struct drm_connector *connector)
else
drm_edid = drm_edid_alloc(fake_edid_info, sizeof(fake_edid_info));
- if (!drm_edid)
- return 0;
-
drm_edid_connector_update(connector, drm_edid);
count = drm_edid_connector_add_modes(connector);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 03b076db9381..3bbfc1b56a65 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -260,7 +260,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
struct fsl_dcu_drm_device *fsl_dev;
struct drm_device *drm;
struct device *dev = &pdev->dev;
- struct resource *res;
void __iomem *base;
struct clk *pix_clk_in;
char pix_clk_name[32];
@@ -278,8 +277,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
return -ENODEV;
fsl_dev->soc = id->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
return ret;
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index aa2ea128aa2f..a2acaa699dd5 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_GMA500
tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
- depends on DRM && PCI && X86 && MMU && HAS_IOPORT
+ depends on DRM && PCI && X86 && HAS_IOPORT
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 4d78b33eaa82..e6753282e70e 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -730,44 +730,3 @@ out:
return ret;
}
-
-int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
- unsigned long *pfn)
-{
- int ret;
- struct psb_mmu_pt *pt;
- uint32_t tmp;
- spinlock_t *lock = &pd->driver->lock;
-
- down_read(&pd->driver->sem);
- pt = psb_mmu_pt_map_lock(pd, virtual);
- if (!pt) {
- uint32_t *v;
-
- spin_lock(lock);
- v = kmap_atomic(pd->p);
- tmp = v[psb_mmu_pd_index(virtual)];
- kunmap_atomic(v);
- spin_unlock(lock);
-
- if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
- !(pd->invalid_pte & PSB_PTE_VALID)) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *pfn = pd->invalid_pte >> PAGE_SHIFT;
- goto out;
- }
- tmp = pt->v[psb_mmu_pt_index(virtual)];
- if (!(tmp & PSB_PTE_VALID)) {
- ret = -EINVAL;
- } else {
- ret = 0;
- *pfn = tmp >> PAGE_SHIFT;
- }
- psb_mmu_pt_unmap_unlock(pt);
-out:
- up_read(&pd->driver->sem);
- return ret;
-}
diff --git a/drivers/gpu/drm/gma500/mmu.h b/drivers/gpu/drm/gma500/mmu.h
index d4b5720ef08e..e6d39703718c 100644
--- a/drivers/gpu/drm/gma500/mmu.h
+++ b/drivers/gpu/drm/gma500/mmu.h
@@ -71,8 +71,6 @@ extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
uint32_t start_pfn,
unsigned long address,
uint32_t num_pages, int type);
-extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
- unsigned long *pfn);
extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index de8ccfe9890f..ea9b41af0867 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -658,10 +658,3 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.prepare = gma_crtc_prepare,
.commit = gma_crtc_commit,
};
-
-/* Not used yet */
-const struct gma_clock_funcs mrst_clock_funcs = {
- .clock = mrst_lvds_clock,
- .limit = mrst_limit,
- .pll_is_valid = gma_pll_is_valid,
-};
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 9dc9dcd1b09f..979ea8ecf0d5 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -182,7 +182,6 @@ struct gma_i2c_chan *gma_i2c_create(struct drm_device *dev, const u32 reg,
void gma_i2c_destroy(struct gma_i2c_chan *chan);
int psb_intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter);
-extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 8be0ec340de5..45b10f30a2a9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -12,37 +12,6 @@
#include "psb_intel_drv.h"
/**
- * psb_intel_ddc_probe
- * @adapter: Associated I2C adaptor
- */
-bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
-{
- u8 out_buf[] = { 0x0, 0x0 };
- u8 buf[2];
- int ret;
- struct i2c_msg msgs[] = {
- {
- .addr = 0x50,
- .flags = 0,
- .len = 1,
- .buf = out_buf,
- },
- {
- .addr = 0x50,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
-
- ret = i2c_transfer(adapter, msgs, 2);
- if (ret == 2)
- return true;
-
- return false;
-}
-
-/**
* psb_intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
* @adapter: Associated I2C adaptor
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index cb405771d6e2..5385a2126e45 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -309,21 +309,6 @@ out:
return ret;
}
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf)
-{
- struct gud_device *gdrm = to_gud_device(drm);
-
- if (!gdrm->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev);
-}
-
static int gud_stats_debugfs(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
@@ -376,7 +361,6 @@ static const struct drm_driver gud_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &gud_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = gud_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = "gud",
@@ -434,6 +418,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
size_t max_buffer_size = 0;
struct gud_device *gdrm;
struct drm_device *drm;
+ struct device *dma_dev;
u8 *formats_dev;
u32 *formats;
int ret, i;
@@ -609,17 +594,19 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_set_intfdata(intf, gdrm);
- gdrm->dmadev = usb_intf_get_dma_device(intf);
- if (!gdrm->dmadev)
- dev_warn(dev, "buffer sharing not supported");
+ dma_dev = usb_intf_get_dma_device(intf);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(drm, dma_dev);
+ put_device(dma_dev);
+ } else {
+ dev_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL);
ret = drm_dev_register(drm, 0);
- if (ret) {
- put_device(gdrm->dmadev);
+ if (ret)
return ret;
- }
drm_kms_helper_poll_init(drm);
@@ -638,8 +625,6 @@ static void gud_disconnect(struct usb_interface *interface)
drm_kms_helper_poll_fini(drm);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
- put_device(gdrm->dmadev);
- gdrm->dmadev = NULL;
}
static int gud_suspend(struct usb_interface *intf, pm_message_t message)
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index 0d148a6f27aa..d6fb25388722 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -16,7 +16,6 @@
struct gud_device {
struct drm_device drm;
struct drm_simple_display_pipe pipe;
- struct device *dmadev;
struct work_struct work;
u32 flags;
const struct drm_format_info *xrgb8888_emulation_format;
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 77cfcf37ddd2..feff73cc0005 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -261,7 +261,7 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
else if (ctx.sgr.bytes != len)
ret = -EIO;
- destroy_timer_on_stack(&ctx.timer);
+ timer_destroy_on_stack(&ctx.timer);
return ret;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 98d77d74999d..d1f3f5793f34 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -2,7 +2,6 @@
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
depends on DRM && PCI
- depends on MMU
select DRM_CLIENT_SELECTION
select DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index 95a4ed599d98..1f65c683282f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o \
- dp/dp_aux.o dp/dp_link.o dp/dp_hw.o hibmc_drm_dp.o
+ dp/dp_aux.o dp/dp_link.o dp/dp_hw.o dp/dp_serdes.o hibmc_drm_dp.o \
+ hibmc_drm_debugfs.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
index 0a903cce1fa9..8732cd1d8cb6 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
@@ -8,6 +8,7 @@
#include <drm/drm_print.h>
#include "dp_comm.h"
#include "dp_reg.h"
+#include "dp_hw.h"
#define HIBMC_AUX_CMD_REQ_LEN GENMASK(7, 4)
#define HIBMC_AUX_CMD_ADDR GENMASK(27, 8)
@@ -124,7 +125,8 @@ static int hibmc_dp_aux_parse_xfer(struct hibmc_dp_dev *dp, struct drm_dp_aux_ms
/* ret >= 0 ,ret is size; ret < 0, ret is err code */
static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
- struct hibmc_dp_dev *dp = container_of(aux, struct hibmc_dp_dev, aux);
+ struct hibmc_dp *dp_priv = container_of(aux, struct hibmc_dp, aux);
+ struct hibmc_dp_dev *dp = dp_priv->dp_dev;
u32 aux_cmd;
int ret;
u32 val; /* val will be assigned at the beginning of readl_poll_timeout function */
@@ -151,14 +153,16 @@ static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *
return hibmc_dp_aux_parse_xfer(dp, msg);
}
-void hibmc_dp_aux_init(struct hibmc_dp_dev *dp)
+void hibmc_dp_aux_init(struct hibmc_dp *dp)
{
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
HIBMC_DP_MIN_PULSE_NUM);
dp->aux.transfer = hibmc_dp_aux_xfer;
- dp->aux.is_remote = 0;
+ dp->aux.name = "HIBMC DRM dp aux";
+ dp->aux.drm_dev = dp->drm_dev;
drm_dp_aux_init(&dp->aux);
+ dp->dp_dev->aux = &dp->aux;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
index 2c52a4476c4d..4add05c7f161 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
@@ -13,6 +13,8 @@
#include <linux/io.h>
#include <drm/display/drm_dp_helper.h>
+#include "dp_hw.h"
+
#define HIBMC_DP_LANE_NUM_MAX 2
struct hibmc_link_status {
@@ -32,12 +34,13 @@ struct hibmc_dp_link {
};
struct hibmc_dp_dev {
- struct drm_dp_aux aux;
+ struct drm_dp_aux *aux;
struct drm_device *dev;
void __iomem *base;
struct mutex lock; /* protects concurrent RW in hibmc_dp_reg_write_field() */
struct hibmc_dp_link link;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ void __iomem *serdes_base;
};
#define dp_field_modify(reg_value, mask, val) \
@@ -57,7 +60,10 @@ struct hibmc_dp_dev {
mutex_unlock(&_dp->lock); \
} while (0)
-void hibmc_dp_aux_init(struct hibmc_dp_dev *dp);
+void hibmc_dp_aux_init(struct hibmc_dp *dp);
int hibmc_dp_link_training(struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX]);
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
index 74dd9956144e..08f9e1caf7fc 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
@@ -15,5 +15,7 @@
#define HIBMC_DP_CLK_EN 0x7
#define HIBMC_DP_SYNC_EN_MASK 0x3
#define HIBMC_DP_LINK_RATE_CAL 27
+#define HIBMC_DP_SYNC_DELAY(lanes) ((lanes) == 0x2 ? 86 : 46)
+#define HIBMC_DP_INT_ENABLE 0xc
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
index a8d543881c09..8f0daec7d174 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
@@ -72,6 +72,9 @@ static void hibmc_dp_set_sst(struct hibmc_dp_dev *dp, struct drm_display_mode *m
HIBMC_DP_CFG_STREAM_HTOTAL_SIZE, htotal_size);
hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_HORIZONTAL_SIZE,
HIBMC_DP_CFG_STREAM_HBLANK_SIZE, hblank_size);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_PACKET,
+ HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION,
+ HIBMC_DP_SYNC_DELAY(dp->link.cap.lanes));
}
static void hibmc_dp_link_cfg(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
@@ -151,6 +154,7 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
{
struct drm_device *drm_dev = dp->drm_dev;
struct hibmc_dp_dev *dp_dev;
+ int ret;
dp_dev = devm_kzalloc(drm_dev->dev, sizeof(struct hibmc_dp_dev), GFP_KERNEL);
if (!dp_dev)
@@ -163,10 +167,14 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
dp_dev->dev = drm_dev;
dp_dev->base = dp->mmio + HIBMC_DP_OFFSET;
- hibmc_dp_aux_init(dp_dev);
+ hibmc_dp_aux_init(dp);
+
+ ret = hibmc_dp_serdes_init(dp_dev);
+ if (ret)
+ return ret;
dp_dev->link.cap.lanes = 0x2;
- dp_dev->link.cap.link_rate = DP_LINK_BW_2_7;
+ dp_dev->link.cap.link_rate = DP_LINK_BW_8_1;
/* hdcp data */
writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
@@ -181,6 +189,36 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
return 0;
}
+void hibmc_dp_enable_int(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+}
+
+void hibmc_dp_disable_int(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
+}
+
+void hibmc_dp_hpd_cfg(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM, 0x9);
+ writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
+ writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
+ writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_DPTX_RST, dp_dev->base + HIBMC_DP_DPTX_RST_CTRL);
+ writel(HIBMC_DP_CLK_EN, dp_dev->base + HIBMC_DP_DPTX_CLK_CTRL);
+}
+
void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable)
{
struct hibmc_dp_dev *dp_dev = dp->dp_dev;
@@ -218,3 +256,52 @@ int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode)
return 0;
}
+
+void hibmc_dp_reset_link(struct hibmc_dp *dp)
+{
+ dp->dp_dev->link.status.clock_recovered = false;
+ dp->dp_dev->link.status.channel_equalized = false;
+}
+
+static const struct hibmc_dp_color_raw g_rgb_raw[] = {
+ {CBAR_COLOR_BAR, 0x000, 0x000, 0x000},
+ {CBAR_WHITE, 0xfff, 0xfff, 0xfff},
+ {CBAR_RED, 0xfff, 0x000, 0x000},
+ {CBAR_ORANGE, 0xfff, 0x800, 0x000},
+ {CBAR_YELLOW, 0xfff, 0xfff, 0x000},
+ {CBAR_GREEN, 0x000, 0xfff, 0x000},
+ {CBAR_CYAN, 0x000, 0x800, 0x800},
+ {CBAR_BLUE, 0x000, 0x000, 0xfff},
+ {CBAR_PURPLE, 0x800, 0x000, 0x800},
+ {CBAR_BLACK, 0x000, 0x000, 0x000},
+};
+
+void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+ struct hibmc_dp_color_raw raw_data;
+
+ if (cfg->enable) {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(9),
+ cfg->self_timing);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(8, 1),
+ cfg->dynamic_rate);
+ if (cfg->pattern == CBAR_COLOR_BAR) {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 0);
+ } else {
+ raw_data = g_rgb_raw[cfg->pattern];
+ drm_dbg_dp(dp->drm_dev, "r:%x g:%x b:%x\n", raw_data.r_value,
+ raw_data.g_value, raw_data.b_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 1);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(23, 12),
+ raw_data.r_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(23, 12),
+ raw_data.g_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(11, 0),
+ raw_data.b_value);
+ }
+ }
+
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(0), cfg->enable);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
index 4dc13b3d9875..665f5b166dfb 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
@@ -10,19 +10,55 @@
#include <drm/drm_encoder.h>
#include <drm/drm_connector.h>
#include <drm/drm_print.h>
+#include <drm/display/drm_dp_helper.h>
struct hibmc_dp_dev;
+enum hibmc_dp_cbar_pattern {
+ CBAR_COLOR_BAR,
+ CBAR_WHITE,
+ CBAR_RED,
+ CBAR_ORANGE,
+ CBAR_YELLOW,
+ CBAR_GREEN,
+ CBAR_CYAN,
+ CBAR_BLUE,
+ CBAR_PURPLE,
+ CBAR_BLACK,
+};
+
+struct hibmc_dp_color_raw {
+ enum hibmc_dp_cbar_pattern pattern;
+ u32 r_value;
+ u32 g_value;
+ u32 b_value;
+};
+
+struct hibmc_dp_cbar_cfg {
+ u8 enable;
+ u8 self_timing;
+ u8 dynamic_rate; /* 0:static, 1-255(frame):dynamic */
+ enum hibmc_dp_cbar_pattern pattern;
+};
+
struct hibmc_dp {
struct hibmc_dp_dev *dp_dev;
struct drm_device *drm_dev;
struct drm_encoder encoder;
struct drm_connector connector;
void __iomem *mmio;
+ struct drm_dp_aux aux;
+ struct hibmc_dp_cbar_cfg cfg;
+ u32 irq_status;
};
int hibmc_dp_hw_init(struct hibmc_dp *dp);
int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode);
void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable);
+void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg);
+void hibmc_dp_reset_link(struct hibmc_dp *dp);
+void hibmc_dp_hpd_cfg(struct hibmc_dp *dp);
+void hibmc_dp_enable_int(struct hibmc_dp *dp);
+void hibmc_dp_disable_int(struct hibmc_dp *dp);
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
index f6355c16cc0a..74f7832ea53e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
@@ -9,6 +9,22 @@
#define HIBMC_EQ_MAX_RETRY 5
+static inline int hibmc_dp_get_serdes_rate_cfg(struct hibmc_dp_dev *dp)
+{
+ switch (dp->link.cap.link_rate) {
+ case DP_LINK_BW_1_62:
+ return DP_SERDES_BW_1_62;
+ case DP_LINK_BW_2_7:
+ return DP_SERDES_BW_2_7;
+ case DP_LINK_BW_5_4:
+ return DP_SERDES_BW_5_4;
+ case DP_LINK_BW_8_1:
+ return DP_SERDES_BW_8_1;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
{
u8 buf[2];
@@ -26,7 +42,7 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
/* set rate and lane count */
buf[0] = dp->link.cap.link_rate;
buf[1] = DP_LANE_COUNT_ENHANCED_FRAME_EN | dp->link.cap.lanes;
- ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write link rate and lanes failed, ret: %d\n", ret);
return ret >= 0 ? -EIO : ret;
@@ -35,17 +51,13 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
/* set 8b/10b and downspread */
buf[0] = DP_SPREAD_AMP_0_5;
buf[1] = DP_SET_ANSI_8B10B;
- ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write 8b/10b and downspread failed, ret: %d\n", ret);
return ret >= 0 ? -EIO : ret;
}
- ret = drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd);
- if (ret)
- drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
-
- return ret;
+ return 0;
}
static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
@@ -84,7 +96,7 @@ static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_PAT_SEL, val);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write training pattern set failed\n");
return ret >= 0 ? -EIO : ret;
@@ -108,9 +120,13 @@ static int hibmc_dp_link_training_cr_pre(struct hibmc_dp_dev *dp)
return ret;
for (i = 0; i < dp->link.cap.lanes; i++)
- train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "dp aux write training lane set failed\n");
return ret >= 0 ? -EIO : ret;
@@ -137,21 +153,29 @@ static bool hibmc_dp_link_get_adjust_train(struct hibmc_dp_dev *dp,
return false;
}
-static inline int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
+static int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
{
+ int ret;
+
switch (dp->link.cap.link_rate) {
case DP_LINK_BW_2_7:
dp->link.cap.link_rate = DP_LINK_BW_1_62;
- return 0;
+ break;
case DP_LINK_BW_5_4:
dp->link.cap.link_rate = DP_LINK_BW_2_7;
- return 0;
+ break;
case DP_LINK_BW_8_1:
dp->link.cap.link_rate = DP_LINK_BW_5_4;
- return 0;
+ break;
default:
return -EINVAL;
}
+
+ ret = hibmc_dp_get_serdes_rate_cfg(dp);
+ if (ret < 0)
+ return ret;
+
+ return hibmc_dp_serdes_rate_switch(ret, dp);
}
static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
@@ -159,6 +183,7 @@ static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
switch (dp->link.cap.lanes) {
case 0x2:
dp->link.cap.lanes--;
+ drm_dbg_dp(dp->dev, "dp link training reduce to 1 lane\n");
break;
case 0x1:
drm_err(dp->dev, "dp link training reduce lane failed, already reach minimum\n");
@@ -185,10 +210,10 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
voltage_tries = 1;
for (cr_tries = 0; cr_tries < 80; cr_tries++) {
- drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd);
+ drm_dp_link_train_clock_recovery_delay(dp->aux, dp->dpcd);
- ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
- if (ret != DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status);
+ if (ret) {
drm_err(dp->dev, "Get lane status failed\n");
return ret;
}
@@ -206,7 +231,12 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
}
level_changed = hibmc_dp_link_get_adjust_train(dp, lane_status);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "Update link training failed\n");
@@ -233,10 +263,10 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
return ret;
for (eq_tries = 0; eq_tries < HIBMC_EQ_MAX_RETRY; eq_tries++) {
- drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd);
+ drm_dp_link_train_channel_eq_delay(dp->aux, dp->dpcd);
- ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
- if (ret != DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status);
+ if (ret) {
drm_err(dp->dev, "get lane status failed\n");
break;
}
@@ -255,7 +285,12 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
}
hibmc_dp_link_get_adjust_train(dp, lane_status);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET,
dp->link.train_set, dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "Update link training failed\n");
@@ -295,6 +330,21 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
struct hibmc_dp_link *link = &dp->link;
int ret;
+ ret = drm_dp_read_dpcd_caps(dp->aux, dp->dpcd);
+ if (ret)
+ drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
+
+ dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
+ dp->link.cap.lanes = 0x2;
+
+ ret = hibmc_dp_get_serdes_rate_cfg(dp);
+ if (ret < 0)
+ return ret;
+
+ ret = hibmc_dp_serdes_rate_switch(ret, dp);
+ if (ret)
+ return ret;
+
while (true) {
ret = hibmc_dp_link_training_cr_pre(dp);
if (ret)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
index 4a515c726d52..394b1e933c3a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
@@ -5,72 +5,128 @@
#define DP_REG_H
#define HIBMC_DP_AUX_CMD_ADDR 0x50
+
#define HIBMC_DP_AUX_WR_DATA0 0x54
#define HIBMC_DP_AUX_WR_DATA1 0x58
#define HIBMC_DP_AUX_WR_DATA2 0x5c
#define HIBMC_DP_AUX_WR_DATA3 0x60
#define HIBMC_DP_AUX_RD_DATA0 0x64
+
#define HIBMC_DP_AUX_REQ 0x74
+#define HIBMC_DP_CFG_AUX_REQ BIT(0)
+#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
+#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
+#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
+
#define HIBMC_DP_AUX_STATUS 0x78
+#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
+#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
+#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
+#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
+
#define HIBMC_DP_PHYIF_CTRL0 0xa0
+#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
+#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
+#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
+
#define HIBMC_DP_VIDEO_CTRL 0x100
+#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
+#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
+#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
+#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
+#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
+
#define HIBMC_DP_VIDEO_CONFIG0 0x104
+#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG1 0x108
+#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG2 0x10c
+#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG3 0x110
+#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
+
#define HIBMC_DP_VIDEO_PACKET 0x114
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
+#define HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION GENMASK(31, 20)
+
#define HIBMC_DP_VIDEO_MSA0 0x118
+#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_MSA1 0x11c
#define HIBMC_DP_VIDEO_MSA2 0x120
+
#define HIBMC_DP_VIDEO_HORIZONTAL_SIZE 0X124
+#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
+
+#define HIBMC_DP_COLOR_BAR_CTRL 0x260
+#define HIBMC_DP_COLOR_BAR_CTRL1 0x264
+
#define HIBMC_DP_TIMING_GEN_CONFIG0 0x26c
+#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
+
#define HIBMC_DP_TIMING_GEN_CONFIG2 0x274
+#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
+
#define HIBMC_DP_TIMING_GEN_CONFIG3 0x278
+#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
+
#define HIBMC_DP_HDCP_CFG 0x600
+
#define HIBMC_DP_DPTX_RST_CTRL 0x700
+#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
+
#define HIBMC_DP_DPTX_CLK_CTRL 0x704
+
#define HIBMC_DP_DPTX_GCTL0 0x708
+#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
+
#define HIBMC_DP_INTR_ENABLE 0x720
#define HIBMC_DP_INTR_ORIGINAL_STATUS 0x728
+
#define HIBMC_DP_TIMING_MODEL_CTRL 0x884
+#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
+
#define HIBMC_DP_TIMING_SYNC_CTRL 0xFF0
-#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
-#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
-#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
-#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
-#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
-#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
-#define HIBMC_DP_CFG_AUX_REQ BIT(0)
-#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
-#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
-#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
-#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
-#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
-#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
-#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
-#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
-#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
-#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
-#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
-#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
-#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
-#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
+#define HIBMC_DP_INTSTAT 0x1e0724
+#define HIBMC_DP_INTCLR 0x1e0728
+
+/* dp serdes reg */
+#define HIBMC_DP_HOST_OFFSET 0x10000
+#define HIBMC_DP_LANE0_RATE_OFFSET 0x4
+#define HIBMC_DP_LANE1_RATE_OFFSET 0xc
+#define HIBMC_DP_LANE_STATUS_OFFSET 0x10
+#define HIBMC_DP_PMA_LANE0_OFFSET 0x18
+#define HIBMC_DP_PMA_LANE1_OFFSET 0x1c
+#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
+#define HIBMC_DP_PMA_TXDEEMPH GENMASK(18, 1)
+#define DP_SERDES_DONE 0x3
+
+/* dp serdes TX-Deempth Configuration */
+#define DP_SERDES_VOL0_PRE0 0x280
+#define DP_SERDES_VOL0_PRE1 0x2300
+#define DP_SERDES_VOL0_PRE2 0x53c0
+#define DP_SERDES_VOL0_PRE3 0x8400
+#define DP_SERDES_VOL1_PRE0 0x380
+#define DP_SERDES_VOL1_PRE1 0x3440
+#define DP_SERDES_VOL1_PRE2 0x6480
+#define DP_SERDES_VOL2_PRE0 0x4c1
+#define DP_SERDES_VOL2_PRE1 0x4500
+#define DP_SERDES_VOL3_PRE0 0x600
+#define DP_SERDES_BW_8_1 0x3
+#define DP_SERDES_BW_5_4 0x2
+#define DP_SERDES_BW_2_7 0x1
+#define DP_SERDES_BW_1_62 0x0
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c
new file mode 100644
index 000000000000..676059d4c1e6
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2025 Hisilicon Limited.
+
+#include <linux/delay.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include "dp_comm.h"
+#include "dp_config.h"
+#include "dp_reg.h"
+
+int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX])
+{
+ static const u32 serdes_tx_cfg[4][4] = { {DP_SERDES_VOL0_PRE0, DP_SERDES_VOL0_PRE1,
+ DP_SERDES_VOL0_PRE2, DP_SERDES_VOL0_PRE3},
+ {DP_SERDES_VOL1_PRE0, DP_SERDES_VOL1_PRE1,
+ DP_SERDES_VOL1_PRE2}, {DP_SERDES_VOL2_PRE0,
+ DP_SERDES_VOL2_PRE1}, {DP_SERDES_VOL3_PRE0}};
+ int cfg[2];
+ int i;
+
+ for (i = 0; i < HIBMC_DP_LANE_NUM_MAX; i++) {
+ cfg[i] = serdes_tx_cfg[FIELD_GET(DP_TRAIN_VOLTAGE_SWING_MASK, train_set[i])]
+ [FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK, train_set[i])];
+ if (!cfg[i])
+ return -EINVAL;
+
+ /* lane1 offset is 4 */
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, cfg[i]),
+ dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET + i * 4);
+ }
+
+ usleep_range(300, 500);
+
+ if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) {
+ drm_dbg_dp(dp->dev, "dp serdes cfg failed\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp)
+{
+ writel(rate, dp->serdes_base + HIBMC_DP_LANE0_RATE_OFFSET);
+ writel(rate, dp->serdes_base + HIBMC_DP_LANE1_RATE_OFFSET);
+
+ usleep_range(300, 500);
+
+ if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) {
+ drm_dbg_dp(dp->dev, "dp serdes rate switching failed\n");
+ return -EAGAIN;
+ }
+
+ if (rate < DP_SERDES_BW_8_1)
+ drm_dbg_dp(dp->dev, "reducing serdes rate to :%d\n",
+ rate ? rate * HIBMC_DP_LINK_RATE_CAL * 10 : 162);
+
+ return 0;
+}
+
+int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp)
+{
+ dp->serdes_base = dp->base + HIBMC_DP_HOST_OFFSET;
+
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0),
+ dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET);
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0),
+ dp->serdes_base + HIBMC_DP_PMA_LANE1_OFFSET);
+
+ return hibmc_dp_serdes_rate_switch(DP_SERDES_BW_8_1, dp);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c
new file mode 100644
index 000000000000..f585387c3a49
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
+
+#include "hibmc_drm_drv.h"
+
+#define MAX_BUF_SIZE 12
+
+static ssize_t hibmc_control_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hibmc_drm_private *priv = file_inode(file)->i_private;
+ struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg;
+ int ret, idx;
+ u8 buf[MAX_BUF_SIZE];
+
+ if (count >= MAX_BUF_SIZE)
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+
+ /* Only 4 parameters is allowed, the ranger are as follow:
+ * [0] enable/disable colorbar feature
+ 0: enable colorbar, 1: disable colorbar
+ * [1] the timing source of colorbar displaying
+ 0: timing follows XDP, 1: internal self timing
+ * [2] the movment of colorbar displaying
+ 0: static colorbar image,
+ * 1~255: right shifting a type of color per (1~255)frames
+ * [3] the color type of colorbar displaying
+ 0~9: color bar, white, red, orange,
+ * yellow, green, cyan, bule, pupper, black
+ */
+ if (sscanf(buf, "%hhu %hhu %hhu %u", &cfg->enable, &cfg->self_timing,
+ &cfg->dynamic_rate, &cfg->pattern) != 4) {
+ return -EINVAL;
+ }
+
+ if (cfg->pattern > 9 || cfg->enable > 1 || cfg->self_timing > 1)
+ return -EINVAL;
+
+ ret = drm_dev_enter(&priv->dev, &idx);
+ if (!ret)
+ return -ENODEV;
+
+ hibmc_dp_set_cbar(&priv->dp, cfg);
+
+ drm_dev_exit(idx);
+
+ return count;
+}
+
+static int hibmc_dp_dbgfs_show(struct seq_file *m, void *arg)
+{
+ struct hibmc_drm_private *priv = m->private;
+ struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg;
+ int idx;
+
+ if (!drm_dev_enter(&priv->dev, &idx))
+ return -ENODEV;
+
+ seq_printf(m, "hibmc dp colorbar cfg: %u %u %u %u\n", cfg->enable, cfg->self_timing,
+ cfg->dynamic_rate, cfg->pattern);
+
+ drm_dev_exit(idx);
+
+ return 0;
+}
+
+static int hibmc_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hibmc_dp_dbgfs_show, inode->i_private);
+}
+
+static const struct file_operations hibmc_dbg_fops = {
+ .owner = THIS_MODULE,
+ .write = hibmc_control_write,
+ .read = seq_read,
+ .open = hibmc_open,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root)
+{
+ struct drm_device *dev = connector->dev;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+
+ /* create the file in drm directory, so we don't need to remove manually */
+ debugfs_create_file("colorbar-cfg", 0200,
+ root, priv, &hibmc_dbg_fops);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
index 603d6b198a54..d06832e62e96 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
@@ -13,27 +13,64 @@
#include "hibmc_drm_drv.h"
#include "dp/dp_hw.h"
+#define DP_MASKED_SINK_HPD_PLUG_INT BIT(2)
+
static int hibmc_dp_connector_get_modes(struct drm_connector *connector)
{
+ const struct drm_edid *drm_edid;
int count;
- count = drm_add_modes_noedid(connector, connector->dev->mode_config.max_width,
- connector->dev->mode_config.max_height);
- drm_set_preferred_mode(connector, 1024, 768); // temporary implementation
+ drm_edid = drm_edid_read(connector);
+
+ drm_edid_connector_update(connector, drm_edid);
+
+ count = drm_edid_connector_add_modes(connector);
+
+ drm_edid_free(drm_edid);
return count;
}
+static int hibmc_dp_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ mdelay(200);
+
+ return drm_connector_helper_detect_from_ddc(connector, ctx, force);
+}
+
static const struct drm_connector_helper_funcs hibmc_dp_conn_helper_funcs = {
.get_modes = hibmc_dp_connector_get_modes,
+ .detect_ctx = hibmc_dp_detect,
};
+static int hibmc_dp_late_register(struct drm_connector *connector)
+{
+ struct hibmc_dp *dp = to_hibmc_dp(connector);
+
+ hibmc_dp_enable_int(dp);
+
+ return drm_dp_aux_register(&dp->aux);
+}
+
+static void hibmc_dp_early_unregister(struct drm_connector *connector)
+{
+ struct hibmc_dp *dp = to_hibmc_dp(connector);
+
+ drm_dp_aux_unregister(&dp->aux);
+
+ hibmc_dp_disable_int(dp);
+}
+
static const struct drm_connector_funcs hibmc_dp_conn_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = hibmc_dp_late_register,
+ .early_unregister = hibmc_dp_early_unregister,
+ .debugfs_init = hibmc_debugfs_init,
};
static inline int hibmc_dp_prepare(struct hibmc_dp *dp, struct drm_display_mode *mode)
@@ -74,6 +111,31 @@ static const struct drm_encoder_helper_funcs hibmc_dp_encoder_helper_funcs = {
.atomic_disable = hibmc_dp_encoder_disable,
};
+irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ if (priv->dp.irq_status & DP_MASKED_SINK_HPD_PLUG_INT) {
+ drm_dbg_dp(&priv->dev, "HPD IN isr occur!\n");
+ hibmc_dp_hpd_cfg(&priv->dp);
+ } else {
+ drm_dbg_dp(&priv->dev, "HPD OUT isr occur!\n");
+ hibmc_dp_reset_link(&priv->dp);
+ }
+
+ if (dev->registered)
+ drm_connector_helper_hpd_irq_event(&priv->dp.connector);
+
+ drm_dev_exit(idx);
+
+ return IRQ_HANDLED;
+}
+
int hibmc_dp_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
@@ -103,8 +165,8 @@ int hibmc_dp_init(struct hibmc_drm_private *priv)
drm_encoder_helper_add(encoder, &hibmc_dp_encoder_helper_funcs);
- ret = drm_connector_init(dev, connector, &hibmc_dp_conn_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_init_with_ddc(dev, connector, &hibmc_dp_conn_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort, &dp->aux.ddc);
if (ret) {
drm_err(dev, "init dp connector failed: %d\n", ret);
return ret;
@@ -114,5 +176,7 @@ int hibmc_dp_init(struct hibmc_drm_private *priv)
drm_connector_attach_encoder(connector, encoder);
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index e6de6d5edf6b..768b97f9e74a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -28,12 +28,12 @@
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
-#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
-#define HIBMC_DP_HOST_SERDES_CTRL_VAL 0x8a00
-#define HIBMC_DP_HOST_SERDES_CTRL_MASK 0x7ffff
+#include "dp/dp_reg.h"
DEFINE_DRM_GEM_FOPS(hibmc_fops);
+static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" };
+
static irqreturn_t hibmc_interrupt(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
@@ -51,6 +51,22 @@ static irqreturn_t hibmc_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t hibmc_dp_interrupt(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+ u32 status;
+
+ status = readl(priv->mmio + HIBMC_DP_INTSTAT);
+ if (status) {
+ priv->dp.irq_status = status;
+ writel(status, priv->mmio + HIBMC_DP_INTCLR);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_HANDLED;
+}
+
static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
@@ -121,9 +137,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
return ret;
}
- /* if DP existed, init DP */
- if ((readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL) &
- HIBMC_DP_HOST_SERDES_CTRL_MASK) == HIBMC_DP_HOST_SERDES_CTRL_VAL) {
+ /*
+ * If the serdes reg is readable and is not equal to 0,
+ * DP block exists and initializes it.
+ */
+ ret = readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL);
+ if (ret) {
ret = hibmc_dp_init(priv);
if (ret)
drm_err(dev, "failed to init dp: %d\n", ret);
@@ -250,15 +269,48 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv)
return 0;
}
-static int hibmc_unload(struct drm_device *dev)
+static void hibmc_unload(struct drm_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
drm_atomic_helper_shutdown(dev);
+}
- free_irq(pdev->irq, dev);
+static int hibmc_msi_init(struct drm_device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ char name[32] = {0};
+ int valid_irq_num;
+ int irq;
+ int ret;
- pci_disable_msi(to_pci_dev(dev->dev));
+ ret = pci_alloc_irq_vectors(pdev, HIBMC_MIN_VECTORS,
+ HIBMC_MAX_VECTORS, PCI_IRQ_MSI);
+ if (ret < 0) {
+ drm_err(dev, "enabling MSI failed: %d\n", ret);
+ return ret;
+ }
+
+ valid_irq_num = ret;
+
+ for (int i = 0; i < valid_irq_num; i++) {
+ snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s",
+ dev->driver->name, pci_name(pdev), g_irqs_names_map[i]);
+
+ irq = pci_irq_vector(pdev, i);
+
+ if (i)
+ /* PCI devices require shared interrupts. */
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ hibmc_dp_interrupt,
+ hibmc_dp_hpd_isr,
+ IRQF_SHARED, name, dev);
+ else
+ ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt,
+ IRQF_SHARED, name, dev);
+ if (ret) {
+ drm_err(dev, "install irq failed: %d\n", ret);
+ return ret;
+ }
+ }
return 0;
}
@@ -290,15 +342,10 @@ static int hibmc_load(struct drm_device *dev)
goto err;
}
- ret = pci_enable_msi(pdev);
+ ret = hibmc_msi_init(dev);
if (ret) {
- drm_warn(dev, "enabling MSI failed: %d\n", ret);
- } else {
- /* PCI devices require shared interrupts. */
- ret = request_irq(pdev->irq, hibmc_interrupt, IRQF_SHARED,
- dev->driver->name, dev);
- if (ret)
- drm_warn(dev, "install irq failed: %d\n", ret);
+ drm_err(dev, "hibmc msi init failed, ret:%d\n", ret);
+ goto err;
}
/* reset all the states of crtc/plane/encoder/connector */
@@ -374,7 +421,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
static void hibmc_pci_shutdown(struct pci_dev *pdev)
{
- drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+ hibmc_pci_remove(pdev);
}
static const struct pci_device_id hibmc_pci_table[] = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index d982f1e4b958..274feabe7df0 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -22,6 +22,9 @@
#include "dp/dp_hw.h"
+#define HIBMC_MIN_VECTORS 1
+#define HIBMC_MAX_VECTORS 2
+
struct hibmc_vdac {
struct drm_device *dev;
struct drm_encoder encoder;
@@ -47,6 +50,11 @@ static inline struct hibmc_vdac *to_hibmc_vdac(struct drm_connector *connector)
return container_of(connector, struct hibmc_vdac, connector);
}
+static inline struct hibmc_dp *to_hibmc_dp(struct drm_connector *connector)
+{
+ return container_of(connector, struct hibmc_dp, connector);
+}
+
static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
{
return container_of(dev, struct hibmc_drm_private, dev);
@@ -64,4 +72,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
int hibmc_dp_init(struct hibmc_drm_private *priv);
+void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root);
+
+irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg);
+
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 05e19ea4c9f9..e8a527ede854 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -60,6 +60,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
+ .detect_ctx = drm_connector_helper_detect_from_ddc,
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
@@ -127,5 +128,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
drm_connector_attach_encoder(connector, encoder);
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 2eea9fb0e76b..e80debdc4176 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -825,7 +825,6 @@ static const struct component_ops dsi_ops = {
static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
{
struct dsi_hw_ctx *ctx = dsi->ctx;
- struct resource *res;
ctx->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(ctx->pclk)) {
@@ -833,8 +832,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
return PTR_ERR(ctx->pclk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap dsi io region\n");
return PTR_ERR(ctx->base);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 2eb49177ac42..45c4eb008ad5 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -844,7 +844,6 @@ static struct drm_plane_funcs ade_plane_funcs = {
static void *ade_hw_ctx_alloc(struct platform_device *pdev,
struct drm_crtc *crtc)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct ade_hw_ctx *ctx = NULL;
@@ -856,8 +855,7 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
return ERR_PTR(-ENOMEM);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap ade io base\n");
return ERR_PTR(-EIO);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ed05b131ed3a..e153686256c9 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -52,7 +52,6 @@ i915-y += \
i915-y += \
soc/intel_dram.o \
soc/intel_gmch.o \
- soc/intel_pch.o \
soc/intel_rom.o
# core library code
@@ -247,6 +246,7 @@ i915-y += \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
display/intel_display_reset.o \
+ display/intel_display_rpm.o \
display/intel_display_rps.o \
display/intel_display_snapshot.o \
display/intel_display_wa.o \
@@ -281,6 +281,7 @@ i915-y += \
display/intel_modeset_setup.o \
display/intel_modeset_verify.o \
display/intel_overlay.o \
+ display/intel_pch.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
display/intel_plane_initial.o \
@@ -408,7 +409,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
#
# Enable locally for CONFIG_DRM_I915_WERROR=y. See also scripts/Makefile.build
ifdef CONFIG_DRM_I915_WERROR
- cmd_checkdoc = $(srctree)/scripts/kernel-doc -none -Werror $<
+ cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none -Werror $<
endif
# header test
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index 206818f9ad49..f10c0fb8d2c8 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 10ab3cc73e58..49f02aca818b 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index d9c3152d4338..0713b2709412 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -29,6 +29,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 92d32d6b5bce..80b71bd6a837 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -26,6 +26,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index b42c717085f3..017b617a8069 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index 280699438526..ed560e3438db 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 55b9e9bfcc4d..e0a98e6fd6d1 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -7,9 +7,11 @@
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_audio.h"
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -28,7 +30,6 @@
#include "intel_hotplug.h"
#include "intel_pch_display.h"
#include "intel_pps.h"
-#include "vlv_sideband.h"
static const struct dpll g4x_dpll[] = {
{ .dot = 162000, .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8, },
@@ -60,14 +61,13 @@ static void g4x_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct dpll *divisor = NULL;
int i, count = 0;
if (display->platform.g4x) {
divisor = g4x_dpll;
count = ARRAY_SIZE(g4x_dpll);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
} else if (display->platform.cherryview) {
@@ -93,7 +93,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
@@ -141,7 +140,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
- } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ } else if (HAS_PCH_CPT(display) && port != PORT_A) {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
intel_de_rmw(display, TRANS_DP_CTL(crtc->pipe),
@@ -183,7 +182,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
static void assert_edp_pll(struct intel_display *display, bool state)
{
- bool cur_state = intel_de_read(display, DP_A) & DP_PLL_ENABLE;
+ bool cur_state = intel_de_read(display, DP_A) & EDP_PLL_ENABLE;
INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
@@ -205,12 +204,12 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "enabling eDP PLL for clock %d\n",
pipe_config->port_clock);
- intel_dp->DP &= ~DP_PLL_FREQ_MASK;
+ intel_dp->DP &= ~EDP_PLL_FREQ_MASK;
if (pipe_config->port_clock == 162000)
- intel_dp->DP |= DP_PLL_FREQ_162MHZ;
+ intel_dp->DP |= EDP_PLL_FREQ_162MHZ;
else
- intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ intel_dp->DP |= EDP_PLL_FREQ_270MHZ;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -225,7 +224,7 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
if (display->platform.ironlake)
intel_wait_for_vblank_if_active(display, !crtc->pipe);
- intel_dp->DP |= DP_PLL_ENABLE;
+ intel_dp->DP |= EDP_PLL_ENABLE;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -243,7 +242,7 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "disabling eDP PLL\n");
- intel_dp->DP &= ~DP_PLL_ENABLE;
+ intel_dp->DP &= ~EDP_PLL_ENABLE;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -277,7 +276,6 @@ bool g4x_dp_port_enabled(struct intel_display *display,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
bool ret;
u32 val;
@@ -287,13 +285,13 @@ bool g4x_dp_port_enabled(struct intel_display *display,
/* asserts want to know the pipe even if the port is disabled */
if (display->platform.ivybridge && port == PORT_A)
- *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
- else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK_IVB, val);
+ else if (HAS_PCH_CPT(display) && port != PORT_A)
ret &= cpt_dp_port_selected(display, port, pipe);
else if (display->platform.cherryview)
- *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK_CHV, val);
else
- *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK, val);
return ret;
}
@@ -338,7 +336,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 tmp, flags = 0;
enum port port = encoder->port;
@@ -353,7 +350,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
- if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ if (HAS_PCH_CPT(display) && port != PORT_A) {
u32 trans_dp = intel_de_read(display,
TRANS_DP_CTL(crtc->pipe));
@@ -389,13 +386,12 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
if (display->platform.g4x && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- pipe_config->lane_count =
- ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
+ pipe_config->lane_count = REG_FIELD_GET(DP_PORT_WIDTH_MASK, tmp) + 1;
g4x_dp_get_m_n(pipe_config);
if (port == PORT_A) {
- if ((intel_de_read(display, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+ if ((intel_de_read(display, DP_A) & EDP_PLL_FREQ_MASK) == EDP_PLL_FREQ_162MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
@@ -416,7 +412,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
@@ -429,7 +424,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
drm_dbg_kms(display->drm, "\n");
if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
+ (HAS_PCH_CPT(display) && port != PORT_A)) {
intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
@@ -448,7 +443,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
* to transcoder A after disabling it to allow the
* matching HDMI port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B && port != PORT_A) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -519,7 +514,7 @@ static void intel_disable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
/*
* Make sure the panel is off before trying to change the mode.
@@ -581,16 +576,10 @@ static void chv_post_disable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
intel_dp_link_down(encoder, old_crtc_state);
- vlv_dpio_get(dev_priv);
-
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
-
- vlv_dpio_put(dev_priv);
}
static void
@@ -1223,10 +1212,10 @@ static int g4x_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
int ret;
- if (HAS_PCH_SPLIT(i915) && encoder->port != PORT_A)
+ if (HAS_PCH_SPLIT(display) && encoder->port != PORT_A)
crtc_state->has_pch_encoder = true;
ret = intel_dp_compute_config(encoder, crtc_state, conn_state);
@@ -1279,7 +1268,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
bool g4x_dp_init(struct intel_display *display,
i915_reg_t output_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
@@ -1353,7 +1341,7 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->audio_disable = g4x_dp_audio_disable;
if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A))
+ (HAS_PCH_CPT(display) && port != PORT_A))
dig_port->dp.set_link_train = cpt_set_link_train;
else
dig_port->dp.set_link_train = g4x_set_link_train;
@@ -1370,7 +1358,7 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->set_signal_levels = g4x_set_signal_levels;
if (display->platform.valleyview || display->platform.cherryview ||
- (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
+ (HAS_PCH_SPLIT(display) && port != PORT_A)) {
dig_port->dp.preemph_max = intel_dp_preemph_max_3;
dig_port->dp.voltage_max = intel_dp_voltage_max_3;
} else {
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 3dc2c59a3df0..1d252432d729 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -5,8 +5,9 @@
* HDMI support for G4x,ILK,SNB,IVB,VLV,CHV (HSW+ handled by the DDI code).
*/
+#include <drm/drm_print.h>
+
#include "g4x_hdmi.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -22,13 +23,11 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_sdvo.h"
-#include "vlv_sideband.h"
static void intel_hdmi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -37,7 +36,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
hdmi_val = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
+ if (!HAS_PCH_SPLIT(display) && crtc_state->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
@@ -52,7 +51,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
if (crtc_state->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else if (display->platform.cherryview)
hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
@@ -134,9 +133,8 @@ static int g4x_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
crtc_state->has_pch_encoder = true;
if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
@@ -155,7 +153,6 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 tmp, flags = 0;
int dotclock;
@@ -186,7 +183,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_AUDIO_ENABLE)
pipe_config->has_audio = true;
- if (!HAS_PCH_SPLIT(dev_priv) &&
+ if (!HAS_PCH_SPLIT(display) &&
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -383,7 +380,6 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *dig_port =
hdmi_to_dig_port(intel_hdmi);
@@ -401,7 +397,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -539,15 +535,8 @@ static void chv_hdmi_post_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- vlv_dpio_get(dev_priv);
-
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
-
- vlv_dpio_put(dev_priv);
}
static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
@@ -682,7 +671,6 @@ static bool assert_hdmi_port_valid(struct intel_display *display, enum port port
bool g4x_hdmi_init(struct intel_display *display,
i915_reg_t hdmi_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
@@ -724,7 +712,7 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->hotplug = intel_hdmi_hotplug;
intel_encoder->compute_config = g4x_hdmi_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
intel_encoder->disable = pch_disable_hdmi;
intel_encoder->post_disable = pch_post_disable_hdmi;
} else {
@@ -745,9 +733,9 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
intel_encoder->enable = cpt_enable_hdmi;
- else if (HAS_PCH_IBX(dev_priv))
+ else if (HAS_PCH_IBX(display))
intel_encoder->enable = ibx_enable_hdmi;
else
intel_encoder->enable = g4x_enable_hdmi;
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 674a0e5f0858..4307e2ed03d9 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -10,6 +10,7 @@
#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_pcode.h"
@@ -344,10 +345,9 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_crtc *crtc = m->private;
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
seq_printf(m, "Enabled by kernel parameter: %s\n",
str_yes_no(display->params.enable_ips));
@@ -361,7 +361,7 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
seq_puts(m, "Currently: disabled\n");
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 013295f66d56..a2a6d52be0a5 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -7,9 +7,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
@@ -630,85 +631,85 @@ vlv_primary_async_flip(struct intel_dsb *dsb,
static void
bdw_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&display->irq.lock);
}
static void
bdw_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&display->irq.lock);
}
static void
ivb_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ivb_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ilk_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ilk_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
vlv_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&display->irq.lock);
}
static void
vlv_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_disable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&display->irq.lock);
}
static bool i9xx_plane_can_async_flip(u64 modifier)
@@ -820,7 +821,7 @@ unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
- if (intel_plane_can_async_flip(plane, fb->modifier))
+ if (intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
return 256 * 1024;
/* FIXME undocumented so not sure what's actually needed */
@@ -844,7 +845,7 @@ static unsigned int g4x_primary_min_alignment(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
- if (intel_plane_can_async_flip(plane, fb->modifier))
+ if (intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
return 256 * 1024;
if (intel_scanout_needs_vtd_wa(display))
@@ -889,6 +890,7 @@ static const struct drm_plane_funcs i965_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i965_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs i8xx_plane_funcs = {
@@ -898,6 +900,7 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i8xx_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
struct intel_plane *
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 7c80e37c1c5f..77876ef735b7 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -9,6 +9,7 @@
#include "i9xx_wm_regs.h"
#include "intel_atomic.h"
#include "intel_bo.h"
+#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_trace.h"
#include "intel_fb.h"
@@ -81,13 +82,14 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private *i915)
+static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int i;
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
const struct cxsr_latency *latency = &cxsr_latency_table[i];
- bool is_desktop = !IS_MOBILE(i915);
+ bool is_desktop = !display->platform.mobile;
if (is_desktop == latency->is_desktop &&
i915->is_ddr3 == latency->is_ddr3 &&
@@ -96,15 +98,16 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private *
return latency;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Could not find CxSR latency for DDR%s, FSB %u kHz, MEM %u kHz\n",
i915->is_ddr3 ? "3" : "2", i915->fsb_freq, i915->mem_freq);
return NULL;
}
-static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
vlv_punit_get(dev_priv);
@@ -120,14 +123,15 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timed out waiting for Punit DDR DVFS request\n");
vlv_punit_put(dev_priv);
}
-static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_pm5(struct intel_display *display, bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
vlv_punit_get(dev_priv);
@@ -145,53 +149,52 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
#define FW_WM(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
-static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+static bool _intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
- struct intel_display *display = &dev_priv->display;
bool was_enabled;
u32 val;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
- } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_PINEVIEW(dev_priv)) {
- val = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ if (display->platform.valleyview || display->platform.cherryview) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_de_write(display, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ intel_de_posting_read(display, FW_BLC_SELF_VLV);
+ } else if (display->platform.g4x || display->platform.i965gm) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_de_write(display, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ intel_de_posting_read(display, FW_BLC_SELF);
+ } else if (display->platform.pineview) {
+ val = intel_de_read(display, DSPFW3(display));
was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
if (enable)
val |= PINEVIEW_SELF_REFRESH_EN;
else
val &= ~PINEVIEW_SELF_REFRESH_EN;
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), val);
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW3(dev_priv));
- } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_de_write(display, DSPFW3(display), val);
+ intel_de_posting_read(display, DSPFW3(display));
+ } else if (display->platform.i945g || display->platform.i945gm) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_I915GM(dev_priv)) {
+ intel_de_write(display, FW_BLC_SELF, val);
+ intel_de_posting_read(display, FW_BLC_SELF);
+ } else if (display->platform.i915gm) {
/*
* FIXME can't find a bit like this for 915G, and
* yet it does have the related watermark in
* FW_BLC_SELF. What's going on?
*/
- was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
+ was_enabled = intel_de_read(display, INSTPM) & INSTPM_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, INSTPM, val);
- intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
+ intel_de_write(display, INSTPM, val);
+ intel_de_posting_read(display, INSTPM);
} else {
return false;
}
trace_intel_memory_cxsr(display, was_enabled, enable);
- drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
+ drm_dbg_kms(display->drm, "memory self-refresh is %s (was %s)\n",
str_enabled_disabled(enable),
str_enabled_disabled(was_enabled));
@@ -200,7 +203,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
/**
* intel_set_memory_cxsr - Configure CxSR state
- * @dev_priv: i915 device
+ * @display: display device
* @enable: Allow vs. disallow CxSR
*
* Allow or disallow the system to enter a special CxSR
@@ -235,17 +238,17 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
* the hardware w.r.t. HPLL SR when writing to plane registers.
* Disallowing just CxSR is sufficient.
*/
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+bool intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
bool ret;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- ret = _intel_set_memory_cxsr(dev_priv, enable);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display.wm.vlv.cxsr = enable;
- else if (IS_G4X(dev_priv))
- dev_priv->display.wm.g4x.cxsr = enable;
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
+ ret = _intel_set_memory_cxsr(display, enable);
+ if (display->platform.valleyview || display->platform.cherryview)
+ display->wm.vlv.cxsr = enable;
+ else if (display->platform.g4x)
+ display->wm.g4x.cxsr = enable;
+ mutex_unlock(&display->wm.wm_mutex);
return ret;
}
@@ -271,8 +274,8 @@ static const int pessimal_latency_ns = 5000;
static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
enum pipe pipe = crtc->pipe;
int sprite0_start, sprite1_start;
@@ -280,22 +283,20 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
switch (pipe) {
case PIPE_A:
- dsparb = intel_uncore_read(&dev_priv->uncore,
- DSPARB(dev_priv));
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb = intel_de_read(display, DSPARB(display));
+ dsparb2 = intel_de_read(display, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
break;
case PIPE_B:
- dsparb = intel_uncore_read(&dev_priv->uncore,
- DSPARB(dev_priv));
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb = intel_de_read(display, DSPARB(display));
+ dsparb2 = intel_de_read(display, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
break;
case PIPE_C:
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
+ dsparb2 = intel_de_read(display, DSPARB2);
+ dsparb3 = intel_de_read(display, DSPARB3);
sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
break;
@@ -310,26 +311,26 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
fifo_state->plane[PLANE_CURSOR] = 63;
}
-static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i9xx_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x7f;
if (i9xx_plane == PLANE_B)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i830_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x1ff;
@@ -337,22 +338,22 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i845_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
@@ -537,7 +538,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
/**
* intel_calculate_wm - calculate watermark level
- * @i915: the device
+ * @display: display device
* @pixel_rate: pixel clock
* @wm: chip FIFO params
* @fifo_size: size of the FIFO buffer
@@ -555,7 +556,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
* past the watermark point. If the FIFO drains completely, a FIFO underrun
* will occur, and a display engine hang could result.
*/
-static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
+static unsigned int intel_calculate_wm(struct intel_display *display,
int pixel_rate,
const struct intel_watermark_params *wm,
int fifo_size, int cpp,
@@ -573,10 +574,10 @@ static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
wm->guard_size;
- drm_dbg_kms(&i915->drm, "FIFO entries required for mode: %d\n", entries);
+ drm_dbg_kms(display->drm, "FIFO entries required for mode: %d\n", entries);
wm_size = fifo_size - entries;
- drm_dbg_kms(&i915->drm, "FIFO watermark level: %d\n", wm_size);
+ drm_dbg_kms(display->drm, "FIFO watermark level: %d\n", wm_size);
/* Don't promote wm_size to unsigned... */
if (wm_size > wm->max_wm)
@@ -626,11 +627,11 @@ static bool intel_crtc_active(struct intel_crtc *crtc)
crtc->config->hw.adjusted_mode.crtc_clock;
}
-static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
+static struct intel_crtc *single_enabled_crtc(struct intel_display *display)
{
struct intel_crtc *crtc, *enabled = NULL;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
@@ -641,21 +642,21 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
return enabled;
}
-static void pnv_update_wm(struct drm_i915_private *dev_priv)
+static void pnv_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned int wm;
- latency = pnv_get_cxsr_latency(dev_priv);
+ latency = pnv_get_cxsr_latency(display);
if (!latency) {
- drm_dbg_kms(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
- intel_set_memory_cxsr(dev_priv, false);
+ drm_dbg_kms(display->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ intel_set_memory_cxsr(display, false);
return;
}
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
@@ -663,47 +664,46 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
int cpp = fb->format->cpp[0];
/* Display SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_display_wm,
pnv_display_wm.fifo_size,
cpp, latency->display_sr);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ reg = intel_de_read(display, DSPFW1(display));
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv), reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
+ intel_de_write(display, DSPFW1(display), reg);
+ drm_dbg_kms(display->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_cursor_wm,
pnv_display_wm.fifo_size,
4, latency->cursor_sr);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
- DSPFW_CURSOR_SR_MASK,
- FW_WM(wm, CURSOR_SR));
+ intel_de_rmw(display, DSPFW3(display),
+ DSPFW_CURSOR_SR_MASK, FW_WM(wm, CURSOR_SR));
/* Display HPLL off SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_display_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
- DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
+ intel_de_rmw(display, DSPFW3(display),
+ DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
/* cursor HPLL off SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_cursor_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ reg = intel_de_read(display, DSPFW3(display));
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
+ intel_de_write(display, DSPFW3(display), reg);
+ drm_dbg_kms(display->drm, "DSPFW3 register is %x\n", reg);
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
} else {
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
}
}
@@ -794,53 +794,51 @@ static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
return max(0, tlb_miss);
}
-static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_write_wm_values(struct intel_display *display,
const struct g4x_wm_values *wm)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
trace_g4x_wm(intel_crtc_for_pipe(display, pipe), wm);
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
- FW_WM(wm->sr.fbc, FBC_SR) |
- FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
- FW_WM(wm->sr.cursor, CURSOR_SR) |
- FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
- FW_WM(wm->hpll.plane, HPLL_SR));
-
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
+ FW_WM(wm->sr.fbc, FBC_SR) |
+ FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_de_write(display, DSPFW3(display),
+ (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
+ FW_WM(wm->sr.cursor, CURSOR_SR) |
+ FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
+ FW_WM(wm->hpll.plane, HPLL_SR));
+
+ intel_de_posting_read(display, DSPFW1(display));
}
#define FW_WM_VLV(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
-static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_write_wm_values(struct intel_display *display,
const struct vlv_wm_values *wm)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
trace_vlv_wm(intel_crtc_for_pipe(display, pipe), wm);
- intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
- (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
- (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
- (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
- (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
+ intel_de_write(display, VLV_DDL(pipe),
+ (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
}
/*
@@ -848,72 +846,72 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
* high order bits so that there are no out of bounds values
* present in the registers during the reprogramming.
*/
- intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
- intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
-
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- FW_WM(wm->sr.cursor, CURSOR_SR));
-
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
- intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ intel_de_write(display, DSPHOWM, 0);
+ intel_de_write(display, DSPHOWM1, 0);
+ intel_de_write(display, DSPFW4, 0);
+ intel_de_write(display, DSPFW5, 0);
+ intel_de_write(display, DSPFW6, 0);
+
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_de_write(display, DSPFW3(display),
+ FW_WM(wm->sr.cursor, CURSOR_SR));
+
+ if (display->platform.cherryview) {
+ intel_de_write(display, DSPFW7_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_de_write(display, DSPFW8_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
+ intel_de_write(display, DSPFW9_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
+ intel_de_write(display, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
} else {
- intel_uncore_write(&dev_priv->uncore, DSPFW7,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ intel_de_write(display, DSPFW7,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_de_write(display, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
}
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ intel_de_posting_read(display, DSPFW1(display));
}
#undef FW_WM_VLV
-static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void g4x_setup_wm_latency(struct intel_display *display)
{
/* all latencies in usec */
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+ display->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+ display->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+ display->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
- dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
+ display->wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
}
static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
@@ -962,11 +960,11 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
- unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
+ unsigned int latency = display->wm.pri_latency[level] * 10;
unsigned int pixel_rate, htotal, cpp, width, wm;
if (latency == 0)
@@ -1017,10 +1015,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1033,13 +1031,13 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
int level, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
/* NORMAL level doesn't have an FBC watermark */
level = max(level, G4X_WM_LEVEL_SR);
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->fbc != value;
@@ -1056,8 +1054,8 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
bool dirty = false;
int level;
@@ -1069,7 +1067,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
int wm, max_wm;
@@ -1109,7 +1107,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
plane->base.name,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
@@ -1117,7 +1115,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
if (plane_id == PLANE_PRIMARY)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FBC watermarks: SR=%d, HPLL=%d\n",
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
@@ -1137,9 +1135,9 @@ static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (level >= dev_priv->display.wm.num_levels)
+ if (level >= display->wm.num_levels)
return false;
return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@ -1281,7 +1279,7 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
@@ -1311,7 +1309,7 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
max(optimal->wm.plane[plane_id],
active->wm.plane[plane_id]);
- drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
+ drm_WARN_ON(display->drm, intermediate->wm.plane[plane_id] >
g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
}
@@ -1329,23 +1327,23 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
intermediate->hpll.fbc = max(optimal->hpll.fbc,
active->hpll.fbc);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
(intermediate->sr.plane >
g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
intermediate->sr.cursor >
g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
(intermediate->sr.plane >
g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
intermediate->sr.cursor >
g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
intermediate->hpll_en);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
intermediate->fbc_en && intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
intermediate->fbc_en && intermediate->hpll_en);
@@ -1376,7 +1374,7 @@ static int g4x_compute_watermarks(struct intel_atomic_state *state,
return 0;
}
-static void g4x_merge_wm(struct drm_i915_private *dev_priv,
+static void g4x_merge_wm(struct intel_display *display,
struct g4x_wm_values *wm)
{
struct intel_crtc *crtc;
@@ -1386,7 +1384,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
wm->hpll_en = true;
wm->fbc_en = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
if (!crtc->active)
@@ -1408,7 +1406,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
wm->fbc_en = false;
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
enum pipe pipe = crtc->pipe;
@@ -1420,23 +1418,23 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
}
}
-static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
+static void g4x_program_watermarks(struct intel_display *display)
{
- struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values *old_wm = &display->wm.g4x;
struct g4x_wm_values new_wm = {};
- g4x_merge_wm(dev_priv, &new_wm);
+ g4x_merge_wm(display, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
return;
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
+ _intel_set_memory_cxsr(display, false);
- g4x_write_wm_values(dev_priv, &new_wm);
+ g4x_write_wm_values(display, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
+ _intel_set_memory_cxsr(display, true);
*old_wm = new_wm;
}
@@ -1444,30 +1442,30 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
static void g4x_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ g4x_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void g4x_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ g4x_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
/* latency must be in 0.1us units. */
@@ -1486,18 +1484,18 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
return ret;
}
-static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void vlv_setup_wm_latency(struct intel_display *display)
{
/* all latencies in usec */
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+ display->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+ if (display->platform.cherryview) {
+ display->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ display->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
}
}
@@ -1505,13 +1503,13 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
unsigned int pixel_rate, htotal, cpp, width, wm;
- if (dev_priv->display.wm.pri_latency[level] == 0)
+ if (display->wm.pri_latency[level] == 0)
return USHRT_MAX;
if (!intel_wm_plane_visible(crtc_state, plane_state))
@@ -1532,7 +1530,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
wm = 63;
} else {
wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
- dev_priv->display.wm.pri_latency[level] * 10);
+ display->wm.pri_latency[level] * 10);
}
return min_t(unsigned int, wm, USHRT_MAX);
@@ -1546,8 +1544,8 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
@@ -1616,11 +1614,11 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
fifo_left -= plane_extra;
}
- drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
+ drm_WARN_ON(display->drm, active_planes != 0 && fifo_left != 0);
/* give it all to the first plane if none are active */
if (active_planes == 0) {
- drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
+ drm_WARN_ON(display->drm, fifo_left != fifo_size);
fifo_state->plane[PLANE_PRIMARY] = fifo_left;
}
@@ -1631,9 +1629,9 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
static void vlv_invalidate_wms(struct intel_crtc *crtc,
struct vlv_wm_state *wm_state, int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id)
@@ -1659,10 +1657,10 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1675,8 +1673,8 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
int level;
bool dirty = false;
@@ -1686,7 +1684,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
@@ -1703,7 +1701,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
plane->base.name,
crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
@@ -1734,8 +1732,8 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
@@ -1745,7 +1743,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
int level;
/* initially allow all levels */
- wm_state->num_levels = dev_priv->display.wm.num_levels;
+ wm_state->num_levels = display->wm.num_levels;
/*
* Note that enabling cxsr with no primary/sprite planes
* enabled can wedge the pipe. Hence we only allow cxsr
@@ -1755,7 +1753,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 0; level < wm_state->num_levels; level++) {
const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
+ const int sr_fifo_size = INTEL_NUM_PIPES(display) * 512 - 1;
if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
break;
@@ -1855,6 +1853,7 @@ static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_uncore *uncore = &dev_priv->uncore;
const struct intel_crtc_state *crtc_state =
@@ -1871,8 +1870,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
- drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
- drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
+ drm_WARN_ON(display->drm, fifo_state->plane[PLANE_CURSOR] != 63);
+ drm_WARN_ON(display->drm, fifo_size != 511);
trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
@@ -1889,8 +1888,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
switch (crtc->pipe) {
case PIPE_A:
- dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb = intel_de_read_fw(display, DSPARB(display));
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
VLV_FIFO(SPRITEB, 0xff));
@@ -1902,12 +1901,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB(display), dsparb);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
case PIPE_B:
- dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb = intel_de_read_fw(display, DSPARB(display));
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
VLV_FIFO(SPRITED, 0xff));
@@ -1919,12 +1918,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB(display), dsparb);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
case PIPE_C:
- dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb3 = intel_de_read_fw(display, DSPARB3);
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
VLV_FIFO(SPRITEF, 0xff));
@@ -1936,14 +1935,14 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB3, dsparb3);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
default:
break;
}
- intel_uncore_posting_read_fw(uncore, DSPARB(dev_priv));
+ intel_de_read_fw(display, DSPARB(display));
spin_unlock(&uncore->lock);
}
@@ -2018,16 +2017,16 @@ static int vlv_compute_watermarks(struct intel_atomic_state *state,
return 0;
}
-static void vlv_merge_wm(struct drm_i915_private *dev_priv,
+static void vlv_merge_wm(struct intel_display *display,
struct vlv_wm_values *wm)
{
struct intel_crtc *crtc;
int num_active_pipes = 0;
- wm->level = dev_priv->display.wm.num_levels - 1;
+ wm->level = display->wm.num_levels - 1;
wm->cxsr = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
if (!crtc->active)
@@ -2046,7 +2045,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
if (num_active_pipes > 1)
wm->level = VLV_WM_LEVEL_PM2;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
enum pipe pipe = crtc->pipe;
@@ -2061,35 +2060,35 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
}
}
-static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
+static void vlv_program_watermarks(struct intel_display *display)
{
- struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
+ struct vlv_wm_values *old_wm = &display->wm.vlv;
struct vlv_wm_values new_wm = {};
- vlv_merge_wm(dev_priv, &new_wm);
+ vlv_merge_wm(display, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
return;
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, false);
+ chv_set_memory_dvfs(display, false);
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, false);
+ chv_set_memory_pm5(display, false);
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
+ _intel_set_memory_cxsr(display, false);
- vlv_write_wm_values(dev_priv, &new_wm);
+ vlv_write_wm_values(display, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
+ _intel_set_memory_cxsr(display, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, true);
+ chv_set_memory_pm5(display, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, true);
+ chv_set_memory_dvfs(display, true);
*old_wm = new_wm;
}
@@ -2097,33 +2096,33 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
static void vlv_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ vlv_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void vlv_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ vlv_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
-static void i965_update_wm(struct drm_i915_private *dev_priv)
+static void i965_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
int srwm = 1;
@@ -2131,7 +2130,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
bool cxsr_enabled;
/* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
@@ -2152,7 +2151,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh entries: %d, wm: %d\n",
entries, srwm);
@@ -2167,7 +2166,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
@@ -2175,39 +2174,38 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
} else {
cxsr_enabled = false;
/* Turn off self refresh if both pipes are enabled */
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
srwm);
/* 965 has limitations... */
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(srwm, SR) |
- FW_WM(8, CURSORB) |
- FW_WM(8, PLANEB) |
- FW_WM(8, PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- FW_WM(8, CURSORA) |
- FW_WM(8, PLANEC_OLD));
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(srwm, SR) |
+ FW_WM(8, CURSORB) |
+ FW_WM(8, PLANEB) |
+ FW_WM(8, PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ FW_WM(8, CURSORA) |
+ FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- FW_WM(cursor_sr, CURSOR_SR));
+ intel_de_write(display, DSPFW3(display),
+ FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
}
#undef FW_WM
-static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
+static struct intel_crtc *intel_crtc_for_plane(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- struct intel_display *display = &i915->display;
struct intel_plane *plane;
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
if (plane->id == PLANE_PRIMARY &&
plane->i9xx_plane == i9xx_plane)
return intel_crtc_for_pipe(display, plane->pipe);
@@ -2216,7 +2214,7 @@ static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
return NULL;
}
-static void i9xx_update_wm(struct drm_i915_private *dev_priv)
+static void i9xx_update_wm(struct intel_display *display)
{
const struct intel_watermark_params *wm_info;
u32 fwater_lo;
@@ -2226,29 +2224,29 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
int planea_wm, planeb_wm;
struct intel_crtc *crtc;
- if (IS_I945GM(dev_priv))
+ if (display->platform.i945gm)
wm_info = &i945_wm_info;
- else if (DISPLAY_VER(dev_priv) != 2)
+ else if (DISPLAY_VER(display) != 2)
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
+ if (DISPLAY_VER(display) == 2)
+ fifo_size = i830_get_fifo_size(display, PLANE_A);
else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
+ fifo_size = i9xx_get_fifo_size(display, PLANE_A);
+ crtc = intel_crtc_for_plane(display, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
- planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2257,25 +2255,25 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
planea_wm = wm_info->max_wm;
}
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
wm_info = &i830_bc_wm_info;
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
+ if (DISPLAY_VER(display) == 2)
+ fifo_size = i830_get_fifo_size(display, PLANE_B);
else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
+ fifo_size = i9xx_get_fifo_size(display, PLANE_B);
+ crtc = intel_crtc_for_plane(display, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
- planeb_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planeb_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2284,11 +2282,11 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
planeb_wm = wm_info->max_wm;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
- crtc = single_enabled_crtc(dev_priv);
- if (IS_I915GM(dev_priv) && crtc) {
+ crtc = single_enabled_crtc(display);
+ if (display->platform.i915gm && crtc) {
struct drm_gem_object *obj;
obj = intel_fb_bo(crtc->base.primary->state->fb);
@@ -2304,10 +2302,10 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
/* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev_priv) && crtc) {
+ if (HAS_FW_BLC(display) && crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
const struct drm_display_mode *pipe_mode =
@@ -2320,7 +2318,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
int cpp;
int entries;
- if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ if (display->platform.i915gm || display->platform.i945gm)
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2328,20 +2326,20 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
sr_latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
- FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ if (display->platform.i945g || display->platform.i945gm)
+ intel_de_write(display, FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
+ intel_de_write(display, FW_BLC_SELF, srwm & 0x3f);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
planea_wm, planeb_wm, cwm, srwm);
@@ -2352,34 +2350,34 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
- intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
+ intel_de_write(display, FW_BLC, fwater_lo);
+ intel_de_write(display, FW_BLC2, fwater_hi);
if (crtc)
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
}
-static void i845_update_wm(struct drm_i915_private *dev_priv)
+static void i845_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
u32 fwater_lo;
int planea_wm;
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc == NULL)
return;
- planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
&i845_wm_info,
- i845_get_fifo_size(dev_priv, PLANE_A),
+ i845_get_fifo_size(display, PLANE_A),
4, pessimal_latency_ns);
- fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
+ fwater_lo = intel_de_read(display, FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: %d\n", planea_wm);
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+ intel_de_write(display, FW_BLC, fwater_lo);
}
/* latency must be in 0.1us units. */
@@ -2534,24 +2532,24 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
}
static unsigned int
-ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
+ilk_display_fifo_size(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
return 3072;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
return 768;
else
return 512;
}
static unsigned int
-ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ilk_plane_wm_reg_max(struct intel_display *display,
int level, bool is_sprite)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
/* BDW primary/sprite plane watermarks */
return level == 0 ? 255 : 2047;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
/* IVB/HSW primary/sprite plane watermarks */
return level == 0 ? 127 : 1023;
else if (!is_sprite)
@@ -2563,30 +2561,30 @@ ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
}
static unsigned int
-ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
+ilk_cursor_wm_reg_max(struct intel_display *display, int level)
{
- if (DISPLAY_VER(dev_priv) >= 7)
+ if (DISPLAY_VER(display) >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
}
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
+static unsigned int ilk_fbc_wm_reg_max(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
return 31;
else
return 15;
}
/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_plane_wm_max(struct intel_display *display,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
+ unsigned int fifo_size = ilk_display_fifo_size(display);
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
@@ -2594,14 +2592,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_NUM_PIPES(dev_priv);
+ fifo_size /= INTEL_NUM_PIPES(display);
/*
* For some reason the non self refresh
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (DISPLAY_VER(dev_priv) < 7)
+ if (DISPLAY_VER(display) < 7)
fifo_size /= 2;
}
@@ -2617,11 +2615,11 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
}
/* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
+ return min(fifo_size, ilk_plane_wm_reg_max(display, level, is_sprite));
}
/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_cursor_wm_max(struct intel_display *display,
int level,
const struct intel_wm_config *config)
{
@@ -2630,32 +2628,32 @@ static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev_priv, level);
+ return ilk_cursor_wm_reg_max(display, level);
}
-static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_maximums(struct intel_display *display,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
- max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
- max->cur = ilk_cursor_wm_max(dev_priv, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+ max->pri = ilk_plane_wm_max(display, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(display, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(display, level, config);
+ max->fbc = ilk_fbc_wm_reg_max(display);
}
-static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_reg_maximums(struct intel_display *display,
int level,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
- max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+ max->pri = ilk_plane_wm_reg_max(display, level, false);
+ max->spr = ilk_plane_wm_reg_max(display, level, true);
+ max->cur = ilk_cursor_wm_reg_max(display, level);
+ max->fbc = ilk_fbc_wm_reg_max(display);
}
-static bool ilk_validate_wm_level(struct drm_i915_private *i915,
+static bool ilk_validate_wm_level(struct intel_display *display,
int level,
const struct ilk_wm_maximums *max,
struct intel_wm_level *result)
@@ -2679,15 +2677,15 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915,
*/
if (level == 0 && !result->enable) {
if (result->pri_val > max->pri)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Primary WM%d too large %u (max %u)\n",
level, result->pri_val, max->pri);
if (result->spr_val > max->spr)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Sprite WM%d too large %u (max %u)\n",
level, result->spr_val, max->spr);
if (result->cur_val > max->cur)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Cursor WM%d too large %u (max %u)\n",
level, result->cur_val, max->cur);
@@ -2700,7 +2698,7 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915,
return ret;
}
-static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_level(struct intel_display *display,
const struct intel_crtc *crtc,
int level,
struct intel_crtc_state *crtc_state,
@@ -2709,9 +2707,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
- u16 pri_latency = dev_priv->display.wm.pri_latency[level];
- u16 spr_latency = dev_priv->display.wm.spr_latency[level];
- u16 cur_latency = dev_priv->display.wm.cur_latency[level];
+ u16 pri_latency = display->wm.pri_latency[level];
+ u16 spr_latency = display->wm.spr_latency[level];
+ u16 cur_latency = display->wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
@@ -2735,11 +2733,12 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void hsw_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u64 sskpd;
- i915->display.wm.num_levels = 5;
+ display->wm.num_levels = 5;
sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
@@ -2752,11 +2751,12 @@ static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
}
-static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void snb_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 sskpd;
- i915->display.wm.num_levels = 4;
+ display->wm.num_levels = 4;
sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
@@ -2766,11 +2766,12 @@ static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
}
-static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void ilk_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 mltr;
- i915->display.wm.num_levels = 3;
+ display->wm.num_levels = 3;
mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
@@ -2780,24 +2781,21 @@ static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
}
-static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
+static void intel_fixup_spr_wm_latency(struct intel_display *display, u16 wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
+ if (DISPLAY_VER(display) == 5)
wm[0] = 13;
}
-static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
+static void intel_fixup_cur_wm_latency(struct intel_display *display, u16 wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
+ if (DISPLAY_VER(display) == 5)
wm[0] = 13;
}
-static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5], u16 min)
+static bool ilk_increase_wm_latency(struct intel_display *display, u16 wm[5], u16 min)
{
int level;
@@ -2805,13 +2803,13 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
return false;
wm[0] = max(wm[0], min);
- for (level = 1; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 1; level < display->wm.num_levels; level++)
wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
return true;
}
-static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_latency_quirk(struct intel_display *display)
{
bool changed;
@@ -2819,21 +2817,21 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
* The BIOS provided WM memory latency values are often
* inadequate for high resolution displays. Adjust them.
*/
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
+ changed = ilk_increase_wm_latency(display, display->wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(display, display->wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(display, display->wm.cur_latency, 12);
if (!changed)
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
}
-static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_lp3_irq_quirk(struct intel_display *display)
{
/*
* On some SNB machines (Thinkpad X220 Tablet at least)
@@ -2846,50 +2844,50 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
* interrupts only. To play it safe we disable LP3
* watermarks entirely.
*/
- if (dev_priv->display.wm.pri_latency[3] == 0 &&
- dev_priv->display.wm.spr_latency[3] == 0 &&
- dev_priv->display.wm.cur_latency[3] == 0)
+ if (display->wm.pri_latency[3] == 0 &&
+ display->wm.spr_latency[3] == 0 &&
+ display->wm.cur_latency[3] == 0)
return;
- dev_priv->display.wm.pri_latency[3] = 0;
- dev_priv->display.wm.spr_latency[3] = 0;
- dev_priv->display.wm.cur_latency[3] = 0;
+ display->wm.pri_latency[3] = 0;
+ display->wm.spr_latency[3] = 0;
+ display->wm.cur_latency[3] = 0;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"LP3 watermarks disabled due to potential for lost interrupts\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
}
-static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void ilk_setup_wm_latency(struct intel_display *display)
{
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- else if (DISPLAY_VER(dev_priv) >= 6)
- snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ if (display->platform.broadwell || display->platform.haswell)
+ hsw_read_wm_latency(display, display->wm.pri_latency);
+ else if (DISPLAY_VER(display) >= 6)
+ snb_read_wm_latency(display, display->wm.pri_latency);
else
- ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ ilk_read_wm_latency(display, display->wm.pri_latency);
- memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
- memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
+ memcpy(display->wm.spr_latency, display->wm.pri_latency,
+ sizeof(display->wm.pri_latency));
+ memcpy(display->wm.cur_latency, display->wm.pri_latency,
+ sizeof(display->wm.pri_latency));
- intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
- intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
+ intel_fixup_spr_wm_latency(display, display->wm.spr_latency);
+ intel_fixup_cur_wm_latency(display, display->wm.cur_latency);
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
- if (DISPLAY_VER(dev_priv) == 6) {
- snb_wm_latency_quirk(dev_priv);
- snb_wm_lp3_irq_quirk(dev_priv);
+ if (DISPLAY_VER(display) == 6) {
+ snb_wm_latency_quirk(display);
+ snb_wm_lp3_irq_quirk(display);
}
}
-static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
+static bool ilk_validate_pipe_wm(struct intel_display *display,
struct intel_pipe_wm *pipe_wm)
{
/* LP0 watermark maximums depend on this pipe alone */
@@ -2901,11 +2899,11 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
struct ilk_wm_maximums max;
/* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_compute_wm_maximums(display, 0, &config, INTEL_DDB_PART_1_2, &max);
/* At least LP0 must be valid */
- if (!ilk_validate_wm_level(dev_priv, 0, &max, &pipe_wm->wm[0])) {
- drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
+ if (!ilk_validate_wm_level(display, 0, &max, &pipe_wm->wm[0])) {
+ drm_dbg_kms(display->drm, "LP0 watermark invalid\n");
return false;
}
@@ -2916,7 +2914,7 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_pipe_wm *pipe_wm;
@@ -2943,10 +2941,10 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
- usable_level = dev_priv->display.wm.num_levels - 1;
+ usable_level = display->wm.num_levels - 1;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (DISPLAY_VER(dev_priv) < 7 && pipe_wm->sprites_enabled)
+ if (DISPLAY_VER(display) < 7 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2954,18 +2952,18 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
+ ilk_compute_wm_level(display, crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
- if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
+ if (!ilk_validate_pipe_wm(display, pipe_wm))
return -EINVAL;
- ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
+ ilk_compute_wm_reg_maximums(display, 1, &max);
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
+ ilk_compute_wm_level(display, crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -2973,7 +2971,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
* register maximums since such watermarks are
* always invalid.
*/
- if (!ilk_validate_wm_level(dev_priv, level, &max, wm)) {
+ if (!ilk_validate_wm_level(display, level, &max, wm)) {
memset(wm, 0, sizeof(*wm));
break;
}
@@ -2990,7 +2988,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
@@ -3015,7 +3013,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
intermediate->sprites_enabled |= active->sprites_enabled;
intermediate->sprites_scaled |= active->sprites_scaled;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct intel_wm_level *intermediate_wm = &intermediate->wm[level];
const struct intel_wm_level *active_wm = &active->wm[level];
@@ -3036,7 +3034,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
* there's no safe way to transition from the old state to
* the new state, so we need to fail the atomic transaction.
*/
- if (!ilk_validate_pipe_wm(dev_priv, intermediate))
+ if (!ilk_validate_pipe_wm(display, intermediate))
return -EINVAL;
/*
@@ -3068,7 +3066,7 @@ static int ilk_compute_watermarks(struct intel_atomic_state *state,
/*
* Merge the watermarks from all active pipes for a specific level.
*/
-static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
+static void ilk_merge_wm_level(struct intel_display *display,
int level,
struct intel_wm_level *ret_wm)
{
@@ -3076,7 +3074,7 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
ret_wm->enable = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
const struct intel_wm_level *wm = &active->wm[level];
@@ -3101,31 +3099,31 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
/*
* Merge all low power watermarks for all active pipes.
*/
-static void ilk_wm_merge(struct drm_i915_private *dev_priv,
+static void ilk_wm_merge(struct intel_display *display,
const struct intel_wm_config *config,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
- int level, num_levels = dev_priv->display.wm.num_levels;
+ int level, num_levels = display->wm.num_levels;
int last_enabled_level = num_levels - 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((DISPLAY_VER(dev_priv) < 7 || IS_IVYBRIDGE(dev_priv)) &&
+ if ((DISPLAY_VER(display) < 7 || display->platform.ivybridge) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
+ merged->fbc_wm_enabled = DISPLAY_VER(display) >= 6;
/* merge each WM1+ level */
for (level = 1; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
- ilk_merge_wm_level(dev_priv, level, wm);
+ ilk_merge_wm_level(display, level, wm);
if (level > last_enabled_level)
wm->enable = false;
- else if (!ilk_validate_wm_level(dev_priv, level, max, wm))
+ else if (!ilk_validate_wm_level(display, level, max, wm))
/* make sure all following levels get disabled */
last_enabled_level = level - 1;
@@ -3141,8 +3139,8 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
}
/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
- if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
- dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) {
+ if (DISPLAY_VER(display) == 5 && HAS_FBC(display) &&
+ display->params.enable_fbc && !merged->fbc_wm_enabled) {
for (level = 2; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3158,16 +3156,16 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
}
/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+static unsigned int ilk_wm_lp_latency(struct intel_display *display,
int level)
{
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (display->platform.haswell || display->platform.broadwell)
return 2 * level;
else
- return dev_priv->display.wm.pri_latency[level];
+ return display->wm.pri_latency[level];
}
-static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_results(struct intel_display *display,
const struct intel_pipe_wm *merged,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
@@ -3191,14 +3189,14 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* disabled. Doing otherwise could cause underruns.
*/
results->wm_lp[wm_lp - 1] =
- WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
+ WM_LP_LATENCY(ilk_wm_lp_latency(display, level)) |
WM_LP_PRIMARY(r->pri_val) |
WM_LP_CURSOR(r->cur_val);
if (r->enable)
results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
else
results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
@@ -3209,19 +3207,19 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (DISPLAY_VER(dev_priv) < 7 && r->spr_val) {
- drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
+ if (DISPLAY_VER(display) < 7 && r->spr_val) {
+ drm_WARN_ON(display->drm, wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
}
}
/* LP0 register values */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
enum pipe pipe = crtc->pipe;
const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
const struct intel_wm_level *r = &pipe_wm->wm[0];
- if (drm_WARN_ON(&dev_priv->drm, !r->enable))
+ if (drm_WARN_ON(display->drm, !r->enable))
continue;
results->wm_pipe[pipe] =
@@ -3236,13 +3234,13 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* case both are at the same level. Prefer r1 in case they're the same.
*/
static struct intel_pipe_wm *
-ilk_find_best_result(struct drm_i915_private *dev_priv,
+ilk_find_best_result(struct intel_display *display,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
int level, level1 = 0, level2 = 0;
- for (level = 1; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 1; level < display->wm.num_levels; level++) {
if (r1->wm[level].enable)
level1 = level;
if (r2->wm[level].enable)
@@ -3268,7 +3266,7 @@ ilk_find_best_result(struct drm_i915_private *dev_priv,
#define WM_DIRTY_FBC (1 << 24)
#define WM_DIRTY_DDB (1 << 25)
-static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
+static unsigned int ilk_compute_wm_dirty(struct intel_display *display,
const struct ilk_wm_values *old,
const struct ilk_wm_values *new)
{
@@ -3276,7 +3274,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
enum pipe pipe;
int wm_lp;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
dirty |= WM_DIRTY_PIPE(pipe);
/* Must disable LP1+ watermarks too */
@@ -3314,25 +3312,25 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
return dirty;
}
-static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+static bool _ilk_disable_lp_wm(struct intel_display *display,
unsigned int dirty)
{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *previous = &display->wm.hw;
bool changed = false;
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
previous->wm_lp[2] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
+ intel_de_write(display, WM3_LP_ILK, previous->wm_lp[2]);
changed = true;
}
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
previous->wm_lp[1] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
+ intel_de_write(display, WM2_LP_ILK, previous->wm_lp[1]);
changed = true;
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
previous->wm_lp[0] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
+ intel_de_write(display, WM1_LP_ILK, previous->wm_lp[0]);
changed = true;
}
@@ -3348,73 +3346,73 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
* The spec says we shouldn't write when we don't need, because every write
* causes WMs to be re-evaluated, expending some power.
*/
-static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+static void ilk_write_wm_values(struct intel_display *display,
struct ilk_wm_values *results)
{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *previous = &display->wm.hw;
unsigned int dirty;
- dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
+ dirty = ilk_compute_wm_dirty(display, previous, results);
if (!dirty)
return;
- _ilk_disable_lp_wm(dev_priv, dirty);
+ _ilk_disable_lp_wm(display, dirty);
if (dirty & WM_DIRTY_PIPE(PIPE_A))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
if (dirty & WM_DIRTY_PIPE(PIPE_B))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
if (dirty & WM_DIRTY_PIPE(PIPE_C))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- WM_MISC_DATA_PARTITION_5_6);
+ if (display->platform.haswell || display->platform.broadwell)
+ intel_de_rmw(display, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ WM_MISC_DATA_PARTITION_5_6);
else
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- DISP_DATA_PARTITION_5_6);
+ intel_de_rmw(display, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ DISP_DATA_PARTITION_5_6);
}
if (dirty & WM_DIRTY_FBC)
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
- results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
+ intel_de_rmw(display, DISP_ARB_CTL, DISP_FBC_WM_DIS,
+ results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
if (dirty & WM_DIRTY_LP(1) &&
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
- intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
+ intel_de_write(display, WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (DISPLAY_VER(dev_priv) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
+ intel_de_write(display, WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
+ intel_de_write(display, WM3S_LP_IVB, results->wm_lp_spr[2]);
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
+ intel_de_write(display, WM1_LP_ILK, results->wm_lp[0]);
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
+ intel_de_write(display, WM2_LP_ILK, results->wm_lp[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
+ intel_de_write(display, WM3_LP_ILK, results->wm_lp[2]);
- dev_priv->display.wm.hw = *results;
+ display->wm.hw = *results;
}
-bool ilk_disable_cxsr(struct drm_i915_private *dev_priv)
+bool ilk_disable_cxsr(struct intel_display *display)
{
- return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+ return _ilk_disable_lp_wm(display, WM_DIRTY_LP_ALL);
}
-static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_config(struct intel_display *display,
struct intel_wm_config *config)
{
struct intel_crtc *crtc;
/* Compute the currently _active_ config */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
if (!wm->pipe_enabled)
@@ -3426,7 +3424,7 @@ static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
}
}
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_program_watermarks(struct intel_display *display)
{
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
@@ -3434,18 +3432,18 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
- ilk_compute_wm_config(dev_priv, &config);
+ ilk_compute_wm_config(display, &config);
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
+ ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(display, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
- if (DISPLAY_VER(dev_priv) >= 7 &&
+ if (DISPLAY_VER(display) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
+ ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(display, &config, &max, &lp_wm_5_6);
- best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
+ best_lp_wm = ilk_find_best_result(display, &lp_wm_1_2, &lp_wm_5_6);
} else {
best_lp_wm = &lp_wm_1_2;
}
@@ -3453,50 +3451,49 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
+ ilk_compute_wm_results(display, best_lp_wm, partitioning, &results);
- ilk_write_wm_values(dev_priv, &results);
+ ilk_write_wm_values(display, &results);
}
static void ilk_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ ilk_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ ilk_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct intel_display *display = to_intel_display(crtc);
+ struct ilk_wm_values *hw = &display->wm.hw;
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
- hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
+ hw->wm_pipe[pipe] = intel_de_read(display, WM0_PIPE_ILK(pipe));
memset(active, 0, sizeof(*active));
@@ -3523,7 +3520,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
* should be marked as enabled but zeroed,
* which is what we'd compute them to.
*/
- for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
active->wm[level].enable = true;
}
@@ -3572,7 +3569,7 @@ static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state)
* through the atomic check code to calculate new watermark values in the
* state object.
*/
-void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
+void ilk_wm_sanitize(struct intel_display *display)
{
struct drm_atomic_state *state;
struct intel_atomic_state *intel_state;
@@ -3583,14 +3580,14 @@ void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.funcs.wm->optimize_watermarks)
+ if (!display->funcs.wm->optimize_watermarks)
return;
- if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9))
+ if (drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 9))
return;
- state = drm_atomic_state_alloc(&dev_priv->drm);
- if (drm_WARN_ON(&dev_priv->drm, !state))
+ state = drm_atomic_state_alloc(display->drm);
+ if (drm_WARN_ON(display->drm, !state))
return;
intel_state = to_intel_atomic_state(state);
@@ -3606,14 +3603,14 @@ retry:
* intermediate watermarks (since we don't trust the current
* watermarks).
*/
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(display))
intel_state->skip_intermediate_wm = true;
ret = ilk_sanitize_watermarks_add_affected(state);
if (ret)
goto fail;
- ret = intel_atomic_check(&dev_priv->drm, state);
+ ret = intel_atomic_check(display->drm, state);
if (ret)
goto fail;
@@ -3643,7 +3640,7 @@ fail:
* If this actually happens, we'll have to just leave the
* BIOS-programmed watermarks untouched and hope for the best.
*/
- drm_WARN(&dev_priv->drm, ret,
+ drm_WARN(display->drm, ret,
"Could not determine valid watermarks for inherited state\n");
drm_atomic_state_put(state);
@@ -3657,18 +3654,18 @@ fail:
#define _FW_WM_VLV(value, plane) \
(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
-static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_read_wm_values(struct intel_display *display,
struct g4x_wm_values *wm)
{
u32 tmp;
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ tmp = intel_de_read(display, DSPFW1(display));
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
+ tmp = intel_de_read(display, DSPFW2(display));
wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
wm->sr.fbc = _FW_WM(tmp, FBC_SR);
wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
@@ -3676,21 +3673,21 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ tmp = intel_de_read(display, DSPFW3(display));
wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
}
-static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_read_wm_values(struct intel_display *display,
struct vlv_wm_values *wm)
{
enum pipe pipe;
u32 tmp;
- for_each_pipe(dev_priv, pipe) {
- tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
+ for_each_pipe(display, pipe) {
+ tmp = intel_de_read(display, VLV_DDL(pipe));
wm->ddl[pipe].plane[PLANE_PRIMARY] =
(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
@@ -3702,34 +3699,34 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
}
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ tmp = intel_de_read(display, DSPFW1(display));
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
+ tmp = intel_de_read(display, DSPFW2(display));
wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ tmp = intel_de_read(display, DSPFW3(display));
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
- if (IS_CHERRYVIEW(dev_priv)) {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
+ if (display->platform.cherryview) {
+ tmp = intel_de_read(display, DSPFW7_CHV);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
+ tmp = intel_de_read(display, DSPFW8_CHV);
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
+ tmp = intel_de_read(display, DSPFW9_CHV);
wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ tmp = intel_de_read(display, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
@@ -3741,11 +3738,11 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
} else {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
+ tmp = intel_de_read(display, DSPFW7);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ tmp = intel_de_read(display, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
@@ -3759,16 +3756,16 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
#undef _FW_WM
#undef _FW_WM_VLV
-static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void g4x_wm_get_hw_state(struct intel_display *display)
{
- struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values *wm = &display->wm.g4x;
struct intel_crtc *crtc;
- g4x_read_wm_values(dev_priv, wm);
+ g4x_read_wm_values(display, wm);
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ wm->cxsr = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct g4x_wm_state *active = &crtc->wm.active.g4x;
@@ -3833,7 +3830,7 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.g4x.optimal = *active;
crtc_state->wm.g4x.intermediate = *active;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
pipe_name(pipe),
wm->pipe[pipe].plane[PLANE_PRIMARY],
@@ -3841,26 +3838,25 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->pipe[pipe].plane[PLANE_SPRITE0]);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
- drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
+ drm_dbg_kms(display->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
str_yes_no(wm->fbc_en));
}
-static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
+static void g4x_wm_sanitize(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
@@ -3873,7 +3869,7 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.g4x.raw[level];
@@ -3884,36 +3880,37 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
}
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int ret;
ret = _g4x_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
+ drm_WARN_ON(display->drm, ret);
crtc_state->wm.g4x.intermediate =
crtc_state->wm.g4x.optimal;
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
}
- g4x_program_watermarks(dev_priv);
+ g4x_program_watermarks(display);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_unlock(&display->wm.wm_mutex);
}
-static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void vlv_wm_get_hw_state(struct intel_display *display)
{
- struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct vlv_wm_values *wm = &display->wm.vlv;
struct intel_crtc *crtc;
u32 val;
- vlv_read_wm_values(dev_priv, wm);
+ vlv_read_wm_values(display, wm);
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->cxsr = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
@@ -3935,10 +3932,10 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
} else {
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if ((val & FORCE_DDR_HIGH_FREQ) == 0)
@@ -3948,7 +3945,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
vlv_punit_put(dev_priv);
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct vlv_wm_state *active = &crtc->wm.active.vlv;
@@ -3988,7 +3985,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.vlv.optimal = *active;
crtc_state->wm.vlv.intermediate = *active;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
pipe_name(pipe),
wm->pipe[pipe].plane[PLANE_PRIMARY],
@@ -3997,20 +3994,19 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->pipe[pipe].plane[PLANE_SPRITE1]);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
}
-static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
+static void vlv_wm_sanitize(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
@@ -4023,7 +4019,7 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[level];
@@ -4031,33 +4027,33 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
}
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int ret;
ret = _vlv_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
+ drm_WARN_ON(display->drm, ret);
crtc_state->wm.vlv.intermediate =
crtc_state->wm.vlv.optimal;
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
}
- vlv_program_watermarks(dev_priv);
+ vlv_program_watermarks(display);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_unlock(&display->wm.wm_mutex);
}
/*
* FIXME should probably kill this and improve
* the real watermark readout/sanitation instead
*/
-static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_init_lp_watermarks(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM3_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM2_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM1_LP_ILK, WM_LP_ENABLE, 0);
/*
* Don't touch WM_LP_SPRITE_ENABLE here.
@@ -4065,37 +4061,37 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
*/
}
-static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void ilk_wm_get_hw_state(struct intel_display *display)
{
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *hw = &display->wm.hw;
struct intel_crtc *crtc;
- ilk_init_lp_watermarks(dev_priv);
+ ilk_init_lp_watermarks(display);
- for_each_intel_crtc(&dev_priv->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
ilk_pipe_wm_get_hw_state(crtc);
- hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
- hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
- hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
+ hw->wm_lp[0] = intel_de_read(display, WM1_LP_ILK);
+ hw->wm_lp[1] = intel_de_read(display, WM2_LP_ILK);
+ hw->wm_lp[2] = intel_de_read(display, WM3_LP_ILK);
- hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
- if (DISPLAY_VER(dev_priv) >= 7) {
- hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
- hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
+ hw->wm_lp_spr[0] = intel_de_read(display, WM1S_LP_ILK);
+ if (DISPLAY_VER(display) >= 7) {
+ hw->wm_lp_spr[1] = intel_de_read(display, WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = intel_de_read(display, WM3S_LP_IVB);
}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) &
+ if (display->platform.haswell || display->platform.broadwell)
+ hw->partitioning = (intel_de_read(display, WM_MISC) &
WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) &
+ else if (display->platform.ivybridge)
+ hw->partitioning = (intel_de_read(display, DISP_ARB_CTL2) &
DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
hw->enable_fbc_wm =
- !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+ !(intel_de_read(display, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
static const struct intel_wm_funcs ilk_wm_funcs = {
@@ -4145,39 +4141,39 @@ static const struct intel_wm_funcs i845_wm_funcs = {
static const struct intel_wm_funcs nop_funcs = {
};
-void i9xx_wm_init(struct drm_i915_private *dev_priv)
+void i9xx_wm_init(struct intel_display *display)
{
/* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &ilk_wm_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &vlv_wm_funcs;
- } else if (IS_G4X(dev_priv)) {
- g4x_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &g4x_wm_funcs;
- } else if (IS_PINEVIEW(dev_priv)) {
- if (!pnv_get_cxsr_latency(dev_priv)) {
- drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ if (HAS_PCH_SPLIT(display)) {
+ ilk_setup_wm_latency(display);
+ display->funcs.wm = &ilk_wm_funcs;
+ } else if (display->platform.valleyview || display->platform.cherryview) {
+ vlv_setup_wm_latency(display);
+ display->funcs.wm = &vlv_wm_funcs;
+ } else if (display->platform.g4x) {
+ g4x_setup_wm_latency(display);
+ display->funcs.wm = &g4x_wm_funcs;
+ } else if (display->platform.pineview) {
+ if (!pnv_get_cxsr_latency(display)) {
+ drm_info(display->drm, "Unknown FSB/MEM, disabling CxSR\n");
/* Disable CxSR and never update its watermark again */
- intel_set_memory_cxsr(dev_priv, false);
- dev_priv->display.funcs.wm = &nop_funcs;
+ intel_set_memory_cxsr(display, false);
+ display->funcs.wm = &nop_funcs;
} else {
- dev_priv->display.funcs.wm = &pnv_wm_funcs;
+ display->funcs.wm = &pnv_wm_funcs;
}
- } else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->display.funcs.wm = &i965_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 2) {
- if (INTEL_NUM_PIPES(dev_priv) == 1)
- dev_priv->display.funcs.wm = &i845_wm_funcs;
+ } else if (DISPLAY_VER(display) == 4) {
+ display->funcs.wm = &i965_wm_funcs;
+ } else if (DISPLAY_VER(display) == 3) {
+ display->funcs.wm = &i9xx_wm_funcs;
+ } else if (DISPLAY_VER(display) == 2) {
+ if (INTEL_NUM_PIPES(display) == 1)
+ display->funcs.wm = &i845_wm_funcs;
else
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
+ display->funcs.wm = &i9xx_wm_funcs;
} else {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"unexpected fall-through in %s\n", __func__);
- dev_priv->display.funcs.wm = &nop_funcs;
+ display->funcs.wm = &nop_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h
index 06ac37c6c94b..7bb363b2a756 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.h
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.h
@@ -8,28 +8,28 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_plane_state;
#ifdef I915
-bool ilk_disable_cxsr(struct drm_i915_private *i915);
-void ilk_wm_sanitize(struct drm_i915_private *i915);
-bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable);
-void i9xx_wm_init(struct drm_i915_private *i915);
+bool ilk_disable_cxsr(struct intel_display *display);
+void ilk_wm_sanitize(struct intel_display *display);
+bool intel_set_memory_cxsr(struct intel_display *display, bool enable);
+void i9xx_wm_init(struct intel_display *display);
#else
-static inline bool ilk_disable_cxsr(struct drm_i915_private *i915)
+static inline bool ilk_disable_cxsr(struct intel_display *display)
{
return false;
}
-static inline void ilk_wm_sanitize(struct drm_i915_private *i915)
+static inline void ilk_wm_sanitize(struct intel_display *display)
{
}
-static inline bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable)
+static inline bool intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
return false;
}
-static inline void i9xx_wm_init(struct drm_i915_private *i915)
+static inline void i9xx_wm_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 402b7b2e1829..ca7033251e91 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
@@ -1826,107 +1827,56 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
.transfer = gen11_dsi_host_transfer,
};
-#define ICL_PREPARE_CNT_MAX 0x7
-#define ICL_CLK_ZERO_CNT_MAX 0xf
-#define ICL_TRAIL_CNT_MAX 0x7
-#define ICL_TCLK_PRE_CNT_MAX 0x3
-#define ICL_TCLK_POST_CNT_MAX 0x7
-#define ICL_HS_ZERO_CNT_MAX 0xf
-#define ICL_EXIT_ZERO_CNT_MAX 0x7
-
static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns;
- u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
- u32 ths_prepare_ns, tclk_trail_ns;
- u32 hs_zero_cnt;
- u32 tclk_pre_cnt;
+ u32 tclk_prepare_esc_clk, tclk_zero_esc_clk, tclk_pre_esc_clk;
+ u32 ths_prepare_esc_clk, ths_zero_esc_clk, ths_exit_esc_clk;
tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
- tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
- ths_prepare_ns = max(mipi_config->ths_prepare,
- mipi_config->tclk_prepare);
-
/*
- * prepare cnt in escape clocks
- * this field represents a hexadecimal value with a precision
- * of 1.2 – i.e. the most significant bit is the integer
- * and the least significant 2 bits are fraction bits.
- * so, the field can represent a range of 0.25 to 1.75
+ * The clock and data lane prepare timing parameters are in expressed in
+ * units of 1/4 escape clocks, and all the other timings parameters in
+ * escape clocks.
*/
- prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
- if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
- drm_dbg_kms(display->drm, "prepare_cnt out of range (%d)\n",
- prepare_cnt);
- prepare_cnt = ICL_PREPARE_CNT_MAX;
- }
+ tclk_prepare_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare * 4, tlpx_ns);
+ tclk_prepare_esc_clk = min(tclk_prepare_esc_clk, 7);
- /* clk zero count in escape clocks */
- clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
- ths_prepare_ns, tlpx_ns);
- if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
- clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
- }
+ tclk_zero_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+ mipi_config->tclk_prepare, tlpx_ns);
+ tclk_zero_esc_clk = min(tclk_zero_esc_clk, 15);
- /* trail cnt in escape clocks*/
- trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
- if (trail_cnt > ICL_TRAIL_CNT_MAX) {
- drm_dbg_kms(display->drm, "trail_cnt out of range (%d)\n",
- trail_cnt);
- trail_cnt = ICL_TRAIL_CNT_MAX;
- }
+ tclk_pre_esc_clk = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+ tclk_pre_esc_clk = min(tclk_pre_esc_clk, 3);
- /* tclk pre count in escape clocks */
- tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
- if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
- tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
- }
+ ths_prepare_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare * 4, tlpx_ns);
+ ths_prepare_esc_clk = min(ths_prepare_esc_clk, 7);
- /* hs zero cnt in escape clocks */
- hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
- ths_prepare_ns, tlpx_ns);
- if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm, "hs_zero_cnt out of range (%d)\n",
- hs_zero_cnt);
- hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
- }
+ ths_zero_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+ mipi_config->ths_prepare, tlpx_ns);
+ ths_zero_esc_clk = min(ths_zero_esc_clk, 15);
- /* hs exit zero cnt in escape clocks */
- exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
- if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "exit_zero_cnt out of range (%d)\n",
- exit_zero_cnt);
- exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
- }
+ ths_exit_esc_clk = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+ ths_exit_esc_clk = min(ths_exit_esc_clk, 7);
/* clock lane dphy timings */
intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
- CLK_PREPARE(prepare_cnt) |
+ CLK_PREPARE(tclk_prepare_esc_clk) |
CLK_ZERO_OVERRIDE |
- CLK_ZERO(clk_zero_cnt) |
+ CLK_ZERO(tclk_zero_esc_clk) |
CLK_PRE_OVERRIDE |
- CLK_PRE(tclk_pre_cnt) |
- CLK_TRAIL_OVERRIDE |
- CLK_TRAIL(trail_cnt));
+ CLK_PRE(tclk_pre_esc_clk));
/* data lanes dphy timings */
intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
- HS_PREPARE(prepare_cnt) |
+ HS_PREPARE(ths_prepare_esc_clk) |
HS_ZERO_OVERRIDE |
- HS_ZERO(hs_zero_cnt) |
- HS_TRAIL_OVERRIDE |
- HS_TRAIL(trail_cnt) |
+ HS_ZERO(ths_zero_esc_clk) |
HS_EXIT_OVERRIDE |
- HS_EXIT(exit_zero_cnt));
+ HS_EXIT(ths_exit_esc_clk));
intel_dsi_log_params(intel_dsi);
}
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index 55f3ae1e68c9..c176bdbc19a3 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -5,12 +5,15 @@
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
#include "intel_alpm.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
+#include "intel_psr.h"
#include "intel_psr_regs.h"
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp)
@@ -23,7 +26,7 @@ bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp)
return intel_dp->alpm_dpcd & DP_ALPM_AUX_LESS_CAP;
}
-void intel_alpm_init_dpcd(struct intel_dp *intel_dp)
+void intel_alpm_init(struct intel_dp *intel_dp)
{
u8 dpcd;
@@ -31,6 +34,7 @@ void intel_alpm_init_dpcd(struct intel_dp *intel_dp)
return;
intel_dp->alpm_dpcd = dpcd;
+ mutex_init(&intel_dp->alpm_parameters.lock);
}
/*
@@ -276,6 +280,14 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
int waketime_in_lines, first_sdp_position;
int context_latency, guardband;
+ if (intel_dp->alpm_parameters.lobf_disable_debug) {
+ drm_dbg_kms(display->drm, "LOBF is disabled by debug flag\n");
+ return;
+ }
+
+ if (intel_dp->alpm_parameters.sink_alpm_error)
+ return;
+
if (!intel_dp_is_edp(intel_dp))
return;
@@ -288,6 +300,10 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
if (crtc_state->has_psr)
return;
+ if (crtc_state->vrr.vmin != crtc_state->vrr.vmax ||
+ crtc_state->vrr.vmin != crtc_state->vrr.flipline)
+ return;
+
if (!(intel_alpm_aux_wake_supported(intel_dp) ||
intel_alpm_aux_less_wake_supported(intel_dp)))
return;
@@ -316,15 +332,16 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
enum port port = dp_to_dig_port(intel_dp)->base.port;
u32 alpm_ctl;
- if (DISPLAY_VER(display) < 20 ||
- (!intel_dp->psr.sel_update_enabled && !intel_dp_is_edp(intel_dp)))
+ if (DISPLAY_VER(display) < 20 || (!intel_psr_needs_alpm(intel_dp, crtc_state) &&
+ !crtc_state->has_lobf))
return;
+ mutex_lock(&intel_dp->alpm_parameters.lock);
/*
* Panel Replay on eDP is always using ALPM aux less. I.e. no need to
* check panel support at this point.
*/
- if ((intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) ||
+ if ((crtc_state->has_panel_replay && intel_dp_is_edp(intel_dp)) ||
(crtc_state->has_lobf && intel_alpm_aux_less_wake_supported(intel_dp))) {
alpm_ctl = ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE |
@@ -353,18 +370,107 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
ALPM_CTL_EXTENDED_FAST_WAKE_TIME(intel_dp->alpm_parameters.fast_wake_lines);
}
- if (crtc_state->has_lobf)
+ if (crtc_state->has_lobf) {
alpm_ctl |= ALPM_CTL_LOBF_ENABLE;
+ drm_dbg_kms(display->drm, "Link off between frames (LOBF) enabled\n");
+ }
alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(intel_dp->alpm_parameters.check_entry_lines);
intel_de_write(display, ALPM_CTL(display, cpu_transcoder), alpm_ctl);
+ mutex_unlock(&intel_dp->alpm_parameters.lock);
}
void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
lnl_alpm_configure(intel_dp, crtc_state);
+ intel_dp->alpm_parameters.transcoder = crtc_state->cpu_transcoder;
+}
+
+void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_encoder *encoder;
+
+ if (DISPLAY_VER(display) < 20)
+ return;
+
+ if (crtc_state->has_lobf || crtc_state->has_lobf == old_crtc_state->has_lobf)
+ return;
+
+ for_each_intel_encoder_mask(display->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp;
+
+ if (!intel_encoder_is_dp(encoder))
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ if (!intel_dp_is_edp(intel_dp))
+ continue;
+
+ if (old_crtc_state->has_lobf) {
+ mutex_lock(&intel_dp->alpm_parameters.lock);
+ intel_de_write(display, ALPM_CTL(display, cpu_transcoder), 0);
+ drm_dbg_kms(display->drm, "Link off between frames (LOBF) disabled\n");
+ mutex_unlock(&intel_dp->alpm_parameters.lock);
+ }
+ }
+}
+
+void intel_alpm_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 val;
+
+ if (!intel_psr_needs_alpm(intel_dp, crtc_state) && !crtc_state->has_lobf)
+ return;
+
+ val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
+
+ if (crtc_state->has_panel_replay || (crtc_state->has_lobf &&
+ intel_alpm_aux_less_wake_supported(intel_dp)))
+ val |= DP_ALPM_MODE_AUX_LESS;
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
+}
+
+void intel_alpm_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_encoder *encoder;
+
+ if (crtc_state->has_psr || !crtc_state->has_lobf ||
+ crtc_state->has_lobf == old_crtc_state->has_lobf)
+ return;
+
+ for_each_intel_encoder_mask(display->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp;
+
+ if (!intel_encoder_is_dp(encoder))
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ if (intel_dp_is_edp(intel_dp)) {
+ intel_alpm_enable_sink(intel_dp, crtc_state);
+ intel_alpm_configure(intel_dp, crtc_state);
+ }
+ }
}
static int i915_edp_lobf_info_show(struct seq_file *m, void *data)
@@ -403,6 +509,32 @@ out:
DEFINE_SHOW_ATTRIBUTE(i915_edp_lobf_info);
+static int
+i915_edp_lobf_debug_get(void *data, u64 *val)
+{
+ struct intel_connector *connector = data;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+
+ *val = intel_dp->alpm_parameters.lobf_disable_debug;
+
+ return 0;
+}
+
+static int
+i915_edp_lobf_debug_set(void *data, u64 val)
+{
+ struct intel_connector *connector = data;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+
+ intel_dp->alpm_parameters.lobf_disable_debug = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_lobf_debug_fops,
+ i915_edp_lobf_debug_get, i915_edp_lobf_debug_set,
+ "%llu\n");
+
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
@@ -412,6 +544,55 @@ void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
return;
+ debugfs_create_file("i915_edp_lobf_debug", 0644, root,
+ connector, &i915_edp_lobf_debug_fops);
+
debugfs_create_file("i915_edp_lobf_info", 0444, root,
connector, &i915_edp_lobf_info_fops);
}
+
+void intel_alpm_disable(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->alpm_parameters.transcoder;
+
+ if (DISPLAY_VER(display) < 20 || !intel_dp->alpm_dpcd)
+ return;
+
+ mutex_lock(&intel_dp->alpm_parameters.lock);
+
+ intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
+ ALPM_CTL_ALPM_ENABLE | ALPM_CTL_LOBF_ENABLE |
+ ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+
+ intel_de_rmw(display,
+ PORT_ALPM_CTL(cpu_transcoder),
+ PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+
+ drm_dbg_kms(display->drm, "Disabling ALPM\n");
+ mutex_unlock(&intel_dp->alpm_parameters.lock);
+}
+
+bool intel_alpm_get_error(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ u8 val;
+ int r;
+
+ r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
+ if (r != 1) {
+ drm_err(display->drm, "Error reading ALPM status\n");
+ return true;
+ }
+
+ if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+ drm_dbg_kms(display->drm, "ALPM lock timeout error\n");
+
+ /* Clearing error */
+ drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.h b/drivers/gpu/drm/i915/display/intel_alpm.h
index 8c409b10dce6..c9fe21e3e72c 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.h
+++ b/drivers/gpu/drm/i915/display/intel_alpm.h
@@ -12,8 +12,10 @@ struct intel_dp;
struct intel_crtc_state;
struct drm_connector_state;
struct intel_connector;
+struct intel_atomic_state;
+struct intel_crtc;
-void intel_alpm_init_dpcd(struct intel_dp *intel_dp);
+void intel_alpm_init(struct intel_dp *intel_dp);
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
@@ -21,7 +23,15 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
struct drm_connector_state *conn_state);
void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
+void intel_alpm_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_alpm_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector);
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp);
bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp);
+void intel_alpm_disable(struct intel_dp *intel_dp);
+bool intel_alpm_get_error(struct intel_dp *intel_dp);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 03dc54c802d3..e83feca5c9c9 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -33,16 +33,17 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_cdclk.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp_tunnel.h"
+#include "intel_fb.h"
#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
-#include "intel_fb.h"
#include "skl_universal_plane.h"
/**
@@ -59,17 +60,16 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
struct drm_property *property,
u64 *val)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->display.properties.force_audio)
+ if (property == display->properties.force_audio)
*val = intel_conn_state->force_audio;
- else if (property == dev_priv->display.properties.broadcast_rgb)
+ else if (property == display->properties.broadcast_rgb)
*val = intel_conn_state->broadcast_rgb;
else {
- drm_dbg_atomic(&dev_priv->drm,
+ drm_dbg_atomic(display->drm,
"Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
@@ -92,22 +92,21 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_property *property,
u64 val)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->display.properties.force_audio) {
+ if (property == display->properties.force_audio) {
intel_conn_state->force_audio = val;
return 0;
}
- if (property == dev_priv->display.properties.broadcast_rgb) {
+ if (property == display->properties.broadcast_rgb) {
intel_conn_state->broadcast_rgb = val;
return 0;
}
- drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n",
+ drm_dbg_atomic(display->drm, "Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 7276179df878..1bcfa5f4fd63 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -174,11 +174,27 @@ bool intel_plane_needs_physical(struct intel_plane *plane)
DISPLAY_INFO(display)->cursor_needs_physical;
}
-bool intel_plane_can_async_flip(struct intel_plane *plane, u64 modifier)
+bool intel_plane_can_async_flip(struct intel_plane *plane, u32 format,
+ u64 modifier)
{
+ if (intel_format_info_is_yuv_semiplanar(drm_format_info(format), modifier) ||
+ format == DRM_FORMAT_C8)
+ return false;
+
return plane->can_async_flip && plane->can_async_flip(modifier);
}
+bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
+ u32 format,
+ u64 modifier)
+{
+ if (!plane->funcs->format_mod_supported(plane, format, modifier))
+ return false;
+
+ return intel_plane_can_async_flip(to_intel_plane(plane),
+ format, modifier);
+}
+
unsigned int intel_adjusted_rate(const struct drm_rect *src,
const struct drm_rect *dst,
unsigned int rate)
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 6efac923dcbc..317320c32285 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -21,7 +21,8 @@ enum plane_id;
struct intel_plane *
intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id);
-bool intel_plane_can_async_flip(struct intel_plane *plane, u64 modifier);
+bool intel_plane_can_async_flip(struct intel_plane *plane, u32 format,
+ u64 modifier);
unsigned int intel_adjusted_rate(const struct drm_rect *src,
const struct drm_rect *dst,
unsigned int rate);
@@ -89,5 +90,8 @@ int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
int intel_atomic_check_planes(struct intel_atomic_state *state);
u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state);
+bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
+ u32 format,
+ u64 modifier);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ea935a5d94c8..40d8bbd8107d 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -27,9 +27,9 @@
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/intel/i915_component.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_audio_regs.h"
@@ -587,19 +587,17 @@ static void ibx_audio_regs_init(struct intel_display *display,
enum pipe pipe,
struct ibx_audio_regs *regs)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->platform.valleyview || display->platform.cherryview) {
regs->hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
regs->aud_config = VLV_AUD_CFG(pipe);
regs->aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
- } else if (HAS_PCH_CPT(i915)) {
+ } else if (HAS_PCH_CPT(display)) {
regs->hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
regs->aud_config = CPT_AUD_CFG(pipe);
regs->aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
- } else if (HAS_PCH_IBX(i915)) {
+ } else if (HAS_PCH_IBX(display)) {
regs->hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
regs->aud_config = IBX_AUD_CFG(pipe);
regs->aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
@@ -889,12 +887,10 @@ static const struct intel_audio_funcs hsw_audio_funcs = {
*/
void intel_audio_hooks_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->platform.g4x)
display->funcs.audio = &g4x_audio_funcs;
else if (display->platform.valleyview || display->platform.cherryview ||
- HAS_PCH_CPT(i915) || HAS_PCH_IBX(i915))
+ HAS_PCH_CPT(display) || HAS_PCH_IBX(display))
display->funcs.audio = &ibx_audio_funcs;
else if (display->platform.haswell || DISPLAY_VER(display) >= 8)
display->funcs.audio = &hsw_audio_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 178dc6c8de80..5827da586003 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -10,12 +10,16 @@
#include <acpi/video.h>
-#include "i915_drv.h"
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_backlight_regs.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
#include "intel_dsi_dcs_backlight.h"
@@ -472,7 +476,6 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2;
@@ -485,7 +488,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
}
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
intel_de_rmw(display, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY,
panel->backlight.alternate_pwm_increment ?
LPT_PWM_GRANULARITY : 0);
@@ -502,7 +505,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
pch_ctl1 |= BLM_PCH_POLARITY;
/* After LPT, override is the default. */
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
@@ -901,11 +904,9 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- intel_wakeref_t wakeref;
int ret = 0;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ with_intel_display_rpm(display) {
u32 hw_level;
drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
@@ -1065,7 +1066,7 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 mul, clock;
@@ -1074,7 +1075,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
else
mul = 128;
- if (HAS_PCH_LPT_H(i915))
+ if (HAS_PCH_LPT_H(display))
clock = MHz(135); /* LPT:H */
else
clock = MHz(24); /* LPT:LP */
@@ -1231,12 +1232,11 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
bool alt, cpu_mode;
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
alt = intel_de_read(display, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
else
alt = intel_de_read(display, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
@@ -1260,7 +1260,7 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
- cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(i915) &&
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(display) &&
!(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
(cpu_ctl2 & BLM_PWM_ENABLE);
@@ -1467,15 +1467,13 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
static int cnp_num_backlight_controllers(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
return 2;
- if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_DG1)
return 1;
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
return 2;
return 1;
@@ -1483,14 +1481,12 @@ static int cnp_num_backlight_controllers(struct intel_display *display)
static bool cnp_backlight_controller_is_valid(struct intel_display *display, int controller)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (controller < 0 || controller >= cnp_num_backlight_controllers(display))
return false;
if (controller == 1 &&
- INTEL_PCH_TYPE(i915) >= PCH_ICP &&
- INTEL_PCH_TYPE(i915) <= PCH_ADP)
+ INTEL_PCH_TYPE(display) >= PCH_ICP &&
+ INTEL_PCH_TYPE(display) <= PCH_ADP)
return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
@@ -1819,7 +1815,6 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
struct intel_connector *connector =
container_of(panel, struct intel_connector, panel);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
@@ -1827,14 +1822,14 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
if (display->platform.geminilake || display->platform.broxton) {
panel->backlight.pwm_funcs = &bxt_pwm_funcs;
- } else if (INTEL_PCH_TYPE(i915) >= PCH_CNP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_CNP) {
panel->backlight.pwm_funcs = &cnp_pwm_funcs;
- } else if (INTEL_PCH_TYPE(i915) >= PCH_LPT_H) {
- if (HAS_PCH_LPT(i915))
+ } else if (INTEL_PCH_TYPE(display) >= PCH_LPT_H) {
+ if (HAS_PCH_LPT(display))
panel->backlight.pwm_funcs = &lpt_pwm_funcs;
else
panel->backlight.pwm_funcs = &spt_pwm_funcs;
- } else if (HAS_PCH_SPLIT(i915)) {
+ } else if (HAS_PCH_SPLIT(display)) {
panel->backlight.pwm_funcs = &pch_pwm_funcs;
} else if (display->platform.valleyview || display->platform.cherryview) {
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index a8d08d7d82b3..ba7b8938b17c 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -37,6 +37,7 @@
#include "i915_drv.h"
#include "intel_display.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
@@ -2244,28 +2245,27 @@ static const u8 adlp_ddc_pin_map[] = {
static u8 map_ddc_pin(struct intel_display *display, u8 vbt_pin)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const u8 *ddc_pin_map;
int i, n_entries;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL || display->platform.alderlake_p) {
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL || display->platform.alderlake_p) {
ddc_pin_map = adlp_ddc_pin_map;
n_entries = ARRAY_SIZE(adlp_ddc_pin_map);
} else if (display->platform.alderlake_s) {
ddc_pin_map = adls_ddc_pin_map;
n_entries = ARRAY_SIZE(adls_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_DG1) {
return vbt_pin;
- } else if (display->platform.rocketlake && INTEL_PCH_TYPE(i915) == PCH_TGP) {
+ } else if (display->platform.rocketlake && INTEL_PCH_TYPE(display) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
- } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(display) == 9) {
+ } else if (HAS_PCH_TGP(display) && DISPLAY_VER(display) == 9) {
ddc_pin_map = gen9bc_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
- } else if (HAS_PCH_CNP(i915)) {
+ } else if (HAS_PCH_CNP(display)) {
ddc_pin_map = cnp_ddc_pin_map;
n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
} else {
@@ -2864,8 +2864,6 @@ parse_general_definitions(struct intel_display *display)
static void
init_vbt_defaults(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
display->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* general features */
@@ -2882,7 +2880,7 @@ init_vbt_defaults(struct intel_display *display)
* clock for LVDS.
*/
display->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(display,
- !HAS_PCH_SPLIT(i915));
+ !HAS_PCH_SPLIT(display));
drm_dbg_kms(display->drm, "Set default to SSC at %d kHz\n",
display->vbt.lvds_ssc_freq);
}
@@ -3115,7 +3113,6 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
{
struct drm_i915_private *i915 = to_i915(display->drm);
const struct vbt_header *vbt = NULL;
- intel_wakeref_t wakeref;
vbt = firmware_get_vbt(display, sizep);
@@ -3126,12 +3123,12 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
* If the OpRegion does not have VBT, look in SPI flash
* through MMIO or PCI mapping
*/
- if (!vbt && IS_DGFX(i915))
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ if (!vbt && display->platform.dgfx)
+ with_intel_display_rpm(display)
vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash");
if (!vbt)
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_display_rpm(display)
vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM");
return vbt;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index f9841f0498c6..6cd7a011b8c4 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -24,7 +24,7 @@
/*
* Please use intel_vbt_defs.h for VBT private data, to hide and abstract away
* the VBT from the rest of the driver. Add the parsed, clean data to struct
- * intel_vbt_data within struct drm_i915_private.
+ * intel_vbt_data within struct intel_display.
*/
#ifndef _INTEL_BIOS_H_
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 048be2872247..a5dd2932b852 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -39,14 +39,15 @@ struct intel_qgv_info {
u8 deinterleave;
};
-static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int dg1_mchbar_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 dclk_ratio, dclk_reference;
u32 val;
- val = intel_uncore_read(&dev_priv->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
+ val = intel_uncore_read(&i915->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
if (val & DG1_QCLK_REFERENCE)
dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
@@ -54,18 +55,18 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
- val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+ val = intel_uncore_read(&i915->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
if (val & DG1_GEAR_TYPE)
sp->dclk *= 2;
if (sp->dclk == 0)
return -EINVAL;
- val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
+ val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
- val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
+ val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
@@ -74,22 +75,23 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int icl_pcode_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val = 0, val2 = 0;
u16 dclk;
int ret;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
&val, &val2);
if (ret)
return ret;
dclk = val & 0xffff;
- sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) >= 12 ? 500 : 0),
+ sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0),
1000);
sp->t_rp = (val & 0xff0000) >> 16;
sp->t_rcd = (val & 0xff000000) >> 24;
@@ -102,14 +104,15 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
- struct intel_psf_gv_point *points)
+static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
+ struct intel_psf_gv_point *points)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val = 0;
int ret;
int i;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
if (ret)
return ret;
@@ -122,10 +125,10 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
+static u16 icl_qgv_points_mask(struct intel_display *display)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
u16 qgv_points = 0, psf_points = 0;
/*
@@ -142,49 +145,51 @@ static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
}
-static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask)
+static bool is_sagv_enabled(struct intel_display *display, u16 points_mask)
{
- return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) &
+ return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) &
ICL_PCODE_REQ_QGV_PT_MASK);
}
-int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+int icl_pcode_restrict_qgv_points(struct intel_display *display,
u32 points_mask)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return 0;
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ ret = skl_pcode_request(&i915->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
points_mask,
ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
1);
if (ret < 0) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to disable qgv points (0x%x) points: 0x%x\n",
ret, points_mask);
return ret;
}
- dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ?
+ display->sagv.status = is_sagv_enabled(display, points_mask) ?
I915_SAGV_ENABLED : I915_SAGV_DISABLED;
return 0;
}
-static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int mtl_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp, int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val, val2;
u16 dclk;
- val = intel_uncore_read(&dev_priv->uncore,
+ val = intel_uncore_read(&i915->uncore,
MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
- val2 = intel_uncore_read(&dev_priv->uncore,
+ val2 = intel_uncore_read(&i915->uncore,
MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000);
@@ -200,29 +205,30 @@ static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
}
static int
-intel_read_qgv_point_info(struct drm_i915_private *dev_priv,
+intel_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
- if (DISPLAY_VER(dev_priv) >= 14)
- return mtl_read_qgv_point_info(dev_priv, sp, point);
- else if (IS_DG1(dev_priv))
- return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point);
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_read_qgv_point_info(display, sp, point);
+ else if (display->platform.dg1)
+ return dg1_mchbar_read_qgv_point_info(display, sp, point);
else
- return icl_pcode_read_qgv_point_info(dev_priv, sp, point);
+ return icl_pcode_read_qgv_point_info(display, sp, point);
}
-static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
+static int icl_get_qgv_points(struct intel_display *display,
struct intel_qgv_info *qi,
bool is_y_tile)
{
- const struct dram_info *dram_info = &dev_priv->dram_info;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct dram_info *dram_info = &i915->dram_info;
int i, ret;
qi->num_points = dram_info->num_qgv_points;
qi->num_psf_points = dram_info->num_psf_gv_points;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = 4;
@@ -244,13 +250,14 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->deinterleave = 4;
break;
case INTEL_DRAM_GDDR:
+ case INTEL_DRAM_GDDR_ECC:
qi->channel_width = 32;
break;
default:
MISSING_CASE(dram_info->type);
return -EINVAL;
}
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = is_y_tile ? 8 : 4;
@@ -265,7 +272,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->deinterleave = is_y_tile ? 1 : 2;
break;
case INTEL_DRAM_LPDDR4:
- if (IS_ROCKETLAKE(dev_priv)) {
+ if (display->platform.rocketlake) {
qi->t_bl = 8;
qi->max_numchannels = 4;
qi->channel_width = 32;
@@ -284,39 +291,39 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->max_numchannels = 1;
break;
}
- } else if (DISPLAY_VER(dev_priv) == 11) {
- qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
+ } else if (DISPLAY_VER(display) == 11) {
+ qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8;
qi->max_numchannels = 1;
}
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
qi->num_points > ARRAY_SIZE(qi->points)))
qi->num_points = ARRAY_SIZE(qi->points);
for (i = 0; i < qi->num_points; i++) {
struct intel_qgv_point *sp = &qi->points[i];
- ret = intel_read_qgv_point_info(dev_priv, sp, i);
+ ret = intel_read_qgv_point_info(display, sp, i);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "Could not read QGV %d info\n", i);
+ drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i);
return ret;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
sp->t_rcd, sp->t_rc);
}
if (qi->num_psf_points > 0) {
- ret = adls_pcode_read_psf_gv_point_info(dev_priv, qi->psf_points);
+ ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points);
if (ret) {
- drm_err(&dev_priv->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
+ drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
qi->num_psf_points = 0;
}
for (i = 0; i < qi->num_psf_points; i++)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSF GV %d: CLK=%d \n",
i, qi->psf_points[i].clk);
}
@@ -398,20 +405,34 @@ static const struct intel_sa_info xe2_hpd_sa_info = {
/* Other values not used by simplified algorithm */
};
-static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
+ .derating = 45,
+ .deprogbwlimit = 53,
+ /* Other values not used by simplified algorithm */
+};
+
+static const struct intel_sa_info xe3lpd_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 65, /* GB/s */
+ .displayrtids = 256,
+ .derating = 10,
+};
+
+static int icl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, i915->dram_info.num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw;
- int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, &qi, is_y_tile);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
@@ -422,7 +443,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
int clpchgroup;
int j;
@@ -449,7 +470,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->deratedbw[j] = min(maxdebw,
bw * (100 - sa->derating) / 100);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
i, j, bi->num_planes, bi->deratedbw[j]);
}
@@ -460,44 +481,45 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+static int tgl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
- const struct dram_info *dram_info = &dev_priv->dram_info;
+ const struct dram_info *dram_info = &i915->dram_info;
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, dram_info->num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw, peakbw;
int clperchgroup;
- int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, &qi, is_y_tile);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
- if (DISPLAY_VER(dev_priv) < 14 &&
+ if (DISPLAY_VER(display) < 14 &&
(dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5))
num_channels *= 2;
qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
- if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12)
+ if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12)
qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
- if (DISPLAY_VER(dev_priv) >= 12 && num_channels > qi.max_numchannels)
- drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels.");
+ if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels)
+ drm_warn(display->drm, "Number of channels exceeds max number of channels.");
if (qi.max_numchannels != 0)
num_channels = min_t(u8, num_channels, qi.max_numchannels);
@@ -514,7 +536,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
struct intel_bw_info *bi_next;
int clpchgroup;
int j;
@@ -522,7 +544,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
if (i < num_groups - 1) {
- bi_next = &dev_priv->display.bw.max[i + 1];
+ bi_next = &display->bw.max[i + 1];
if (clpchgroup < clperchgroup)
bi_next->num_planes = (ipqdepth - clpchgroup) /
@@ -554,7 +576,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
num_channels *
qi.channel_width, 8);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n",
i, j, bi->num_planes, bi->deratedbw[j],
bi->peakbw[j]);
@@ -565,7 +587,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / PSF GV %d: num_planes=%d bw=%u\n",
i, j, bi->num_planes, bi->psf_bw[j]);
}
@@ -577,17 +599,17 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static void dg2_get_bw_info(struct drm_i915_private *i915)
+static void dg2_get_bw_info(struct intel_display *display)
{
- unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000;
- int num_groups = ARRAY_SIZE(i915->display.bw.max);
+ unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000;
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i;
/*
@@ -598,7 +620,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
* whereas DG2-G11 platforms have 38 GB/s.
*/
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &i915->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
bi->num_planes = 1;
/* Need only one dummy QGV point per group */
@@ -606,20 +628,21 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
bi->deratedbw[0] = deratedbw;
}
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
}
-static int xe2_hpd_get_bw_info(struct drm_i915_private *i915,
+static int xe2_hpd_get_bw_info(struct intel_display *display,
const struct intel_sa_info *sa)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
int num_channels = i915->dram_info.num_channels;
int peakbw, maxdebw;
int ret, i;
- ret = icl_get_qgv_points(i915, &qi, true);
+ ret = icl_get_qgv_points(display, &qi, true);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
@@ -631,33 +654,33 @@ static int xe2_hpd_get_bw_info(struct drm_i915_private *i915,
const struct intel_qgv_point *point = &qi.points[i];
int bw = num_channels * (qi.channel_width / 8) * point->dclk;
- i915->display.bw.max[0].deratedbw[i] =
+ display->bw.max[0].deratedbw[i] =
min(maxdebw, (100 - sa->derating) * bw / 100);
- i915->display.bw.max[0].peakbw[i] = bw;
+ display->bw.max[0].peakbw[i] = bw;
- drm_dbg_kms(&i915->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
- i, i915->display.bw.max[0].deratedbw[i],
- i915->display.bw.max[0].peakbw[i]);
+ drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
+ i, display->bw.max[0].deratedbw[i],
+ display->bw.max[0].peakbw[i]);
}
/* Bandwidth does not depend on # of planes; set all groups the same */
- i915->display.bw.max[0].num_planes = 1;
- i915->display.bw.max[0].num_qgv_points = qi.num_points;
- for (i = 1; i < ARRAY_SIZE(i915->display.bw.max); i++)
- memcpy(&i915->display.bw.max[i], &i915->display.bw.max[0],
- sizeof(i915->display.bw.max[0]));
+ display->bw.max[0].num_planes = 1;
+ display->bw.max[0].num_qgv_points = qi.num_points;
+ for (i = 1; i < ARRAY_SIZE(display->bw.max); i++)
+ memcpy(&display->bw.max[i], &display->bw.max[0],
+ sizeof(display->bw.max[0]));
/*
* Xe2_HPD should always have exactly two QGV points representing
* battery and plugged-in operation.
*/
- drm_WARN_ON(&i915->drm, qi.num_points != 2);
- i915->display.sagv.status = I915_SAGV_ENABLED;
+ drm_WARN_ON(display->drm, qi.num_points != 2);
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
+static unsigned int icl_max_bw_index(struct intel_display *display,
int num_planes, int qgv_point)
{
int i;
@@ -667,9 +690,9 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) {
+ for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) {
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[i];
+ &display->bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -685,7 +708,7 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
return UINT_MAX;
}
-static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
+static unsigned int tgl_max_bw_index(struct intel_display *display,
int num_planes, int qgv_point)
{
int i;
@@ -695,9 +718,9 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) {
+ for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) {
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[i];
+ &display->bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -713,52 +736,59 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
return 0;
}
-static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
+static unsigned int adl_psf_bw(struct intel_display *display,
int psf_gv_point)
{
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[0];
+ &display->bw.max[0];
return bi->psf_bw[psf_gv_point];
}
-static unsigned int icl_qgv_bw(struct drm_i915_private *i915,
+static unsigned int icl_qgv_bw(struct intel_display *display,
int num_active_planes, int qgv_point)
{
unsigned int idx;
- if (DISPLAY_VER(i915) >= 12)
- idx = tgl_max_bw_index(i915, num_active_planes, qgv_point);
+ if (DISPLAY_VER(display) >= 12)
+ idx = tgl_max_bw_index(display, num_active_planes, qgv_point);
else
- idx = icl_max_bw_index(i915, num_active_planes, qgv_point);
+ idx = icl_max_bw_index(display, num_active_planes, qgv_point);
- if (idx >= ARRAY_SIZE(i915->display.bw.max))
+ if (idx >= ARRAY_SIZE(display->bw.max))
return 0;
- return i915->display.bw.max[idx].deratedbw[qgv_point];
+ return display->bw.max[idx].deratedbw[qgv_point];
}
-void intel_bw_init_hw(struct drm_i915_private *dev_priv)
+void intel_bw_init_hw(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
+ const struct dram_info *dram_info = &to_i915(display->drm)->dram_info;
+
+ if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
- xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info);
- else if (DISPLAY_VER(dev_priv) >= 14)
- tgl_get_bw_info(dev_priv, &mtl_sa_info);
- else if (IS_DG2(dev_priv))
- dg2_get_bw_info(dev_priv);
- else if (IS_ALDERLAKE_P(dev_priv))
- tgl_get_bw_info(dev_priv, &adlp_sa_info);
- else if (IS_ALDERLAKE_S(dev_priv))
- tgl_get_bw_info(dev_priv, &adls_sa_info);
- else if (IS_ROCKETLAKE(dev_priv))
- tgl_get_bw_info(dev_priv, &rkl_sa_info);
- else if (DISPLAY_VER(dev_priv) == 12)
- tgl_get_bw_info(dev_priv, &tgl_sa_info);
- else if (DISPLAY_VER(dev_priv) == 11)
- icl_get_bw_info(dev_priv, &icl_sa_info);
+ if (DISPLAY_VER(display) >= 30)
+ tgl_get_bw_info(display, &xe3lpd_sa_info);
+ else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx &&
+ dram_info->type == INTEL_DRAM_GDDR_ECC)
+ xe2_hpd_get_bw_info(display, &xe2_hpd_ecc_sa_info);
+ else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx)
+ xe2_hpd_get_bw_info(display, &xe2_hpd_sa_info);
+ else if (DISPLAY_VER(display) >= 14)
+ tgl_get_bw_info(display, &mtl_sa_info);
+ else if (display->platform.dg2)
+ dg2_get_bw_info(display);
+ else if (display->platform.alderlake_p)
+ tgl_get_bw_info(display, &adlp_sa_info);
+ else if (display->platform.alderlake_s)
+ tgl_get_bw_info(display, &adls_sa_info);
+ else if (display->platform.rocketlake)
+ tgl_get_bw_info(display, &rkl_sa_info);
+ else if (DISPLAY_VER(display) == 12)
+ tgl_get_bw_info(display, &tgl_sa_info);
+ else if (DISPLAY_VER(display) == 11)
+ icl_get_bw_info(display, &icl_sa_info);
}
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
@@ -772,8 +802,8 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat
static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
unsigned int data_rate = 0;
enum plane_id plane_id;
@@ -787,7 +817,7 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
data_rate += crtc_state->data_rate[plane_id];
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
data_rate += crtc_state->data_rate_y[plane_id];
}
@@ -795,39 +825,38 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
}
/* "Maximum Pipe Read Bandwidth" */
-static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
+static int intel_bw_crtc_min_cdclk(struct intel_display *display,
+ unsigned int data_rate)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (DISPLAY_VER(i915) < 12)
+ if (DISPLAY_VER(display) < 12)
return 0;
- return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(data_rate, 10), 512);
}
-static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_num_active_planes(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
unsigned int num_active_planes = 0;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
num_active_planes += bw_state->num_active_planes[pipe];
return num_active_planes;
}
-static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_data_rate(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int data_rate = 0;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
data_rate += bw_state->data_rate[pipe];
- if (DISPLAY_VER(dev_priv) >= 13 && i915_vtd_active(dev_priv))
+ if (DISPLAY_VER(display) >= 13 && i915_vtd_active(i915))
data_rate = DIV_ROUND_UP(data_rate * 105, 100);
return data_rate;
@@ -836,10 +865,10 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -847,10 +876,10 @@ intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
struct intel_bw_state *
intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -858,27 +887,27 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
return to_intel_bw_state(bw_state);
}
-static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
+static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display,
int num_active_planes)
{
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
unsigned int max_bw_point = 0;
unsigned int max_bw = 0;
int i;
for (i = 0; i < num_qgv_points; i++) {
unsigned int max_data_rate =
- icl_qgv_bw(i915, num_active_planes, i);
+ icl_qgv_bw(display, num_active_planes, i);
/*
* We need to know which qgv point gives us
@@ -897,23 +926,23 @@ static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
return max_bw_point;
}
-static u16 icl_prepare_qgv_points_mask(struct drm_i915_private *i915,
+static u16 icl_prepare_qgv_points_mask(struct intel_display *display,
unsigned int qgv_points,
unsigned int psf_points)
{
return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
- ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915);
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display);
}
-static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
+static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
unsigned int max_bw_point_mask = 0;
unsigned int max_bw = 0;
int i;
for (i = 0; i < num_psf_gv_points; i++) {
- unsigned int max_data_rate = adl_psf_bw(i915, i);
+ unsigned int max_data_rate = adl_psf_bw(display, i);
if (max_data_rate > max_bw) {
max_bw_point_mask = BIT(i);
@@ -926,29 +955,29 @@ static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
return max_bw_point_mask;
}
-static void icl_force_disable_sagv(struct drm_i915_private *i915,
+static void icl_force_disable_sagv(struct intel_display *display,
struct intel_bw_state *bw_state)
{
- unsigned int qgv_points = icl_max_bw_qgv_point_mask(i915, 0);
- unsigned int psf_points = icl_max_bw_psf_gv_point_mask(i915);
+ unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0);
+ unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display);
- bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
qgv_points,
psf_points);
- drm_dbg_kms(&i915->drm, "Forcing SAGV disable: mask 0x%x\n",
+ drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n",
bw_state->qgv_points_mask);
- icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask);
+ icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask);
}
-static int mtl_find_qgv_points(struct drm_i915_private *i915,
+static int mtl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
struct intel_bw_state *new_bw_state)
{
unsigned int best_rate = UINT_MAX;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
unsigned int qgv_peak_bw = 0;
int i;
int ret;
@@ -962,9 +991,9 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
* for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
* not enabled. PM Demand code will clamp the value for the register
*/
- if (!intel_can_enable_sagv(i915, new_bw_state)) {
+ if (!intel_can_enable_sagv(display, new_bw_state)) {
new_bw_state->qgv_point_peakbw = U16_MAX;
- drm_dbg_kms(&i915->drm, "No SAGV, use UINT_MAX as peak bw.");
+ drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw.");
return 0;
}
@@ -974,27 +1003,27 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
*/
for (i = 0; i < num_qgv_points; i++) {
unsigned int bw_index =
- tgl_max_bw_index(i915, num_active_planes, i);
+ tgl_max_bw_index(display, num_active_planes, i);
unsigned int max_data_rate;
- if (bw_index >= ARRAY_SIZE(i915->display.bw.max))
+ if (bw_index >= ARRAY_SIZE(display->bw.max))
continue;
- max_data_rate = i915->display.bw.max[bw_index].deratedbw[i];
+ max_data_rate = display->bw.max[bw_index].deratedbw[i];
if (max_data_rate < data_rate)
continue;
if (max_data_rate - data_rate < best_rate) {
best_rate = max_data_rate - data_rate;
- qgv_peak_bw = i915->display.bw.max[bw_index].peakbw[i];
+ qgv_peak_bw = display->bw.max[bw_index].peakbw[i];
}
- drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
+ drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
i, max_data_rate, data_rate, qgv_peak_bw);
}
- drm_dbg_kms(&i915->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
+ drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
qgv_peak_bw, data_rate);
/*
@@ -1002,7 +1031,7 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
* satisfying the required data rate is found
*/
if (qgv_peak_bw == 0) {
- drm_dbg_kms(&i915->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
+ drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
@@ -1013,14 +1042,14 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
return 0;
}
-static int icl_find_qgv_points(struct drm_i915_private *i915,
+static int icl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
u16 psf_points = 0;
u16 qgv_points = 0;
int i;
@@ -1031,22 +1060,22 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return ret;
for (i = 0; i < num_qgv_points; i++) {
- unsigned int max_data_rate = icl_qgv_bw(i915,
+ unsigned int max_data_rate = icl_qgv_bw(display,
num_active_planes, i);
if (max_data_rate >= data_rate)
qgv_points |= BIT(i);
- drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d\n",
+ drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n",
i, max_data_rate, data_rate);
}
for (i = 0; i < num_psf_gv_points; i++) {
- unsigned int max_data_rate = adl_psf_bw(i915, i);
+ unsigned int max_data_rate = adl_psf_bw(display, i);
if (max_data_rate >= data_rate)
psf_points |= BIT(i);
- drm_dbg_kms(&i915->drm, "PSF GV point %d: max bw %d"
+ drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d"
" required %d\n",
i, max_data_rate, data_rate);
}
@@ -1057,14 +1086,14 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* reasons.
*/
if (qgv_points == 0) {
- drm_dbg_kms(&i915->drm, "No QGV points provide sufficient memory"
+ drm_dbg_kms(display->drm, "No QGV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
if (num_psf_gv_points > 0 && psf_points == 0) {
- drm_dbg_kms(&i915->drm, "No PSF GV points provide sufficient memory"
+ drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
@@ -1075,9 +1104,9 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* we can't enable SAGV due to the increased memory latency it may
* cause.
*/
- if (!intel_can_enable_sagv(i915, new_bw_state)) {
- qgv_points = icl_max_bw_qgv_point_mask(i915, num_active_planes);
- drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point mask 0x%x\n",
+ if (!intel_can_enable_sagv(display, new_bw_state)) {
+ qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
+ drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
qgv_points);
}
@@ -1085,7 +1114,7 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
- new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
qgv_points,
psf_points);
/*
@@ -1101,80 +1130,90 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return 0;
}
-static int intel_bw_check_qgv_points(struct drm_i915_private *i915,
+static int intel_bw_check_qgv_points(struct intel_display *display,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int data_rate = intel_bw_data_rate(i915, new_bw_state);
+ unsigned int data_rate = intel_bw_data_rate(display, new_bw_state);
unsigned int num_active_planes =
- intel_bw_num_active_planes(i915, new_bw_state);
+ intel_bw_num_active_planes(display, new_bw_state);
data_rate = DIV_ROUND_UP(data_rate, 1000);
- if (DISPLAY_VER(i915) >= 14)
- return mtl_find_qgv_points(i915, data_rate, num_active_planes,
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_find_qgv_points(display, data_rate, num_active_planes,
new_bw_state);
else
- return icl_find_qgv_points(i915, data_rate, num_active_planes,
+ return icl_find_qgv_points(display, data_rate, num_active_planes,
old_bw_state, new_bw_state);
}
-static bool intel_bw_state_changed(struct drm_i915_private *i915,
+static bool intel_dbuf_bw_changed(struct intel_display *display,
+ const struct intel_dbuf_bw *old_dbuf_bw,
+ const struct intel_dbuf_bw *new_dbuf_bw)
+{
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(display, slice) {
+ if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
+ old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_bw_state_changed(struct intel_display *display,
const struct intel_bw_state *old_bw_state,
const struct intel_bw_state *new_bw_state)
{
enum pipe pipe;
- for_each_pipe(i915, pipe) {
- const struct intel_dbuf_bw *old_crtc_bw =
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *old_dbuf_bw =
&old_bw_state->dbuf_bw[pipe];
- const struct intel_dbuf_bw *new_crtc_bw =
+ const struct intel_dbuf_bw *new_dbuf_bw =
&new_bw_state->dbuf_bw[pipe];
- enum dbuf_slice slice;
- for_each_dbuf_slice(i915, slice) {
- if (old_crtc_bw->max_bw[slice] != new_crtc_bw->max_bw[slice] ||
- old_crtc_bw->active_planes[slice] != new_crtc_bw->active_planes[slice])
- return true;
- }
+ if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
+ return true;
- if (old_bw_state->min_cdclk[pipe] != new_bw_state->min_cdclk[pipe])
+ if (intel_bw_crtc_min_cdclk(display, old_bw_state->data_rate[pipe]) !=
+ intel_bw_crtc_min_cdclk(display, new_bw_state->data_rate[pipe]))
return true;
}
return false;
}
-static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state,
+static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
struct intel_crtc *crtc,
enum plane_id plane_id,
const struct skl_ddb_entry *ddb,
unsigned int data_rate)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
- unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(i915, ddb);
+ struct intel_display *display = to_intel_display(crtc);
+ unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
enum dbuf_slice slice;
/*
* The arbiter can only really guarantee an
* equal share of the total bw to each plane.
*/
- for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) {
- crtc_bw->max_bw[slice] = max(crtc_bw->max_bw[slice], data_rate);
- crtc_bw->active_planes[slice] |= BIT(plane_id);
+ for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
+ dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
+ dbuf_bw->active_planes[slice] |= BIT(plane_id);
}
}
-static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
+static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
enum plane_id plane_id;
- memset(crtc_bw, 0, sizeof(*crtc_bw));
+ memset(dbuf_bw, 0, sizeof(*dbuf_bw));
if (!crtc_state->hw.active)
return;
@@ -1187,12 +1226,12 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
if (plane_id == PLANE_CURSOR)
continue;
- skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
&crtc_state->wm.skl.plane_ddb[plane_id],
crtc_state->data_rate[plane_id]);
- if (DISPLAY_VER(i915) < 11)
- skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ if (DISPLAY_VER(display) < 11)
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
&crtc_state->wm.skl.plane_ddb_y[plane_id],
crtc_state->data_rate[plane_id]);
}
@@ -1200,13 +1239,13 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
/* "Maximum Data Buffer Bandwidth" */
static int
-intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
+intel_bw_dbuf_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
unsigned int total_max_bw = 0;
enum dbuf_slice slice;
- for_each_dbuf_slice(i915, slice) {
+ for_each_dbuf_slice(display, slice) {
int num_active_planes = 0;
unsigned int max_bw = 0;
enum pipe pipe;
@@ -1215,11 +1254,11 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
* The arbiter can only really guarantee an
* equal share of the total bw to each plane.
*/
- for_each_pipe(i915, pipe) {
- const struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[pipe];
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *dbuf_bw = &bw_state->dbuf_bw[pipe];
- max_bw = max(crtc_bw->max_bw[slice], max_bw);
- num_active_planes += hweight8(crtc_bw->active_planes[slice]);
+ max_bw = max(dbuf_bw->max_bw[slice], max_bw);
+ num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
}
max_bw *= num_active_planes;
@@ -1229,16 +1268,18 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
return DIV_ROUND_UP(total_max_bw, 64);
}
-int intel_bw_min_cdclk(struct drm_i915_private *i915,
+int intel_bw_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
enum pipe pipe;
int min_cdclk;
- min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
+ min_cdclk = intel_bw_dbuf_min_cdclk(display, bw_state);
- for_each_pipe(i915, pipe)
- min_cdclk = max(min_cdclk, bw_state->min_cdclk[pipe]);
+ for_each_pipe(display, pipe)
+ min_cdclk = max(min_cdclk,
+ intel_bw_crtc_min_cdclk(display,
+ bw_state->data_rate[pipe]));
return min_cdclk;
}
@@ -1246,42 +1287,49 @@ int intel_bw_min_cdclk(struct drm_i915_private *i915,
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_bw_state *new_bw_state = NULL;
const struct intel_bw_state *old_bw_state = NULL;
const struct intel_cdclk_state *cdclk_state;
- const struct intel_crtc_state *crtc_state;
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
int old_min_cdclk, new_min_cdclk;
struct intel_crtc *crtc;
int i;
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
+
+ skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
+ skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
+
+ if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
+ continue;
+
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
- skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
-
- new_bw_state->min_cdclk[crtc->pipe] =
- intel_bw_crtc_min_cdclk(crtc_state);
+ new_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
}
if (!old_bw_state)
return 0;
- if (intel_bw_state_changed(dev_priv, old_bw_state, new_bw_state)) {
+ if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) {
int ret = intel_atomic_lock_global_state(&new_bw_state->base);
if (ret)
return ret;
}
- old_min_cdclk = intel_bw_min_cdclk(dev_priv, old_bw_state);
- new_min_cdclk = intel_bw_min_cdclk(dev_priv, new_bw_state);
+ old_min_cdclk = intel_bw_min_cdclk(display, old_bw_state);
+ new_min_cdclk = intel_bw_min_cdclk(display, new_bw_state);
/*
* No need to check against the cdclk state if
@@ -1309,7 +1357,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
if (new_min_cdclk <= cdclk_state->bw_min_cdclk)
return 0;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
new_min_cdclk, cdclk_state->bw_min_cdclk);
*need_cdclk_calc = true;
@@ -1319,7 +1367,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -1353,7 +1401,7 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
*changed = true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] data rate %u num active planes %u\n",
crtc->base.base.id, crtc->base.name,
new_bw_state->data_rate[crtc->pipe],
@@ -1363,16 +1411,103 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
return 0;
}
-int intel_bw_atomic_check(struct intel_atomic_state *state)
+static int intel_bw_modeset_checks(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_bw_state *old_bw_state;
+ struct intel_bw_state *new_bw_state;
+
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ new_bw_state->active_pipes =
+ intel_calc_active_pipes(state, old_bw_state->active_pipes);
+
+ if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
+ int ret;
+
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_bw_state *old_bw_state = NULL;
+ struct intel_bw_state *new_bw_state = NULL;
+ struct intel_crtc *crtc;
+ int ret, i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_can_enable_sagv(old_crtc_state) ==
+ intel_crtc_can_enable_sagv(new_crtc_state))
+ continue;
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (intel_crtc_can_enable_sagv(new_crtc_state))
+ new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
+ else
+ new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
+ }
+
+ if (!new_bw_state)
+ return 0;
+
+ if (intel_can_enable_sagv(display, new_bw_state) !=
+ intel_can_enable_sagv(display, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
+{
+ struct intel_display *display = to_intel_display(state);
bool changed = false;
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_bw_state *new_bw_state;
const struct intel_bw_state *old_bw_state;
int ret;
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ if (any_ms) {
+ ret = intel_bw_modeset_checks(state);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_bw_check_sagv_mask(state);
+ if (ret)
+ return ret;
+
/* FIXME earlier gens need some checks too */
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return 0;
ret = intel_bw_check_data_rate(state, &changed);
@@ -1383,9 +1518,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
- (intel_can_enable_sagv(i915, old_bw_state) !=
- intel_can_enable_sagv(i915, new_bw_state) ||
- new_bw_state->force_check_qgv))
+ intel_can_enable_sagv(display, old_bw_state) !=
+ intel_can_enable_sagv(display, new_bw_state))
changed = true;
/*
@@ -1395,28 +1529,25 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
if (!changed)
return 0;
- ret = intel_bw_check_qgv_points(i915, old_bw_state, new_bw_state);
+ ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state);
if (ret)
return ret;
- new_bw_state->force_check_qgv = false;
-
return 0;
}
static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
bw_state->data_rate[crtc->pipe] =
intel_bw_crtc_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
intel_bw_crtc_num_active_planes(crtc_state);
- bw_state->force_check_qgv = true;
- drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
+ drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
bw_state->data_rate[crtc->pipe],
bw_state->num_active_planes[crtc->pipe]);
@@ -1432,6 +1563,7 @@ void intel_bw_update_hw_state(struct intel_display *display)
return;
bw_state->active_pipes = 0;
+ bw_state->pipe_sagv_reject = 0;
for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
@@ -1443,6 +1575,11 @@ void intel_bw_update_hw_state(struct intel_display *display)
if (DISPLAY_VER(display) >= 11)
intel_bw_crtc_update(bw_state, crtc_state);
+
+ skl_crtc_calc_dbuf_bw(&bw_state->dbuf_bw[pipe], crtc_state);
+
+ /* initially SAGV has been forced off */
+ bw_state->pipe_sagv_reject |= BIT(pipe);
}
}
@@ -1458,6 +1595,7 @@ void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
+ memset(&bw_state->dbuf_bw[pipe], 0, sizeof(bw_state->dbuf_bw[pipe]));
}
static struct intel_global_state *
@@ -1483,9 +1621,8 @@ static const struct intel_global_state_funcs intel_bw_funcs = {
.atomic_destroy_state = intel_bw_destroy_state,
};
-int intel_bw_init(struct drm_i915_private *i915)
+int intel_bw_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_bw_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -1499,8 +1636,8 @@ int intel_bw_init(struct drm_i915_private *i915)
* Limit this only if we have SAGV. And for Display version 14 onwards
* sagv is handled though pmdemand requests
*/
- if (intel_has_sagv(i915) && IS_DISPLAY_VER(i915, 11, 13))
- icl_force_disable_sagv(i915, state);
+ if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13))
+ icl_force_disable_sagv(display, state);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 3313e4eac4f0..eb2cc883e9c1 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -12,7 +12,6 @@
#include "intel_display_power.h"
#include "intel_global_state.h"
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -49,13 +48,6 @@ struct intel_bw_state {
*/
u16 qgv_points_mask;
- /*
- * Flag to force the QGV comparison in atomic check right after the
- * hw state readout
- */
- bool force_check_qgv;
-
- int min_cdclk[I915_MAX_PIPES];
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
};
@@ -72,14 +64,14 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state);
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state);
-void intel_bw_init_hw(struct drm_i915_private *dev_priv);
-int intel_bw_init(struct drm_i915_private *dev_priv);
-int intel_bw_atomic_check(struct intel_atomic_state *state);
-int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+void intel_bw_init_hw(struct intel_display *display);
+int intel_bw_init(struct intel_display *display);
+int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms);
+int icl_pcode_restrict_qgv_points(struct intel_display *display,
u32 points_mask);
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc);
-int intel_bw_min_cdclk(struct drm_i915_private *i915,
+int intel_bw_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state);
void intel_bw_update_hw_state(struct intel_display *display);
void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 2a8749a0213e..b1718b491ffd 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1972,9 +1972,7 @@ int intel_mdclk_cdclk_ratio(struct intel_display *display,
static void xe2lpd_mdclk_cdclk_ratio_program(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_dbuf_mdclk_cdclk_ratio_update(display,
intel_mdclk_cdclk_ratio(display, cdclk_config),
cdclk_config->joined_mbus);
}
@@ -2808,7 +2806,6 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat
static int intel_compute_min_cdclk(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state =
intel_atomic_get_new_cdclk_state(state);
const struct intel_bw_state *bw_state;
@@ -2836,7 +2833,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
bw_state = intel_atomic_get_new_bw_state(state);
if (bw_state) {
- min_cdclk = intel_bw_min_cdclk(dev_priv, bw_state);
+ min_cdclk = intel_bw_min_cdclk(display, bw_state);
if (cdclk_state->bw_min_cdclk != min_cdclk) {
int ret;
@@ -3342,6 +3339,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
void intel_cdclk_update_hw_state(struct intel_display *display)
{
+ const struct intel_bw_state *bw_state =
+ to_intel_bw_state(display->bw.obj.state);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(display->cdclk.obj.state);
struct intel_crtc *crtc;
@@ -3359,6 +3358,8 @@ void intel_cdclk_update_hw_state(struct intel_display *display)
cdclk_state->min_cdclk[pipe] = intel_crtc_compute_min_cdclk(crtc_state);
cdclk_state->min_voltage_level[pipe] = crtc_state->min_voltage_level;
}
+
+ cdclk_state->bw_min_cdclk = intel_bw_min_cdclk(display, bw_state);
}
void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc)
@@ -3493,7 +3494,6 @@ static int dg1_rawclk(struct intel_display *display)
static int cnp_rawclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int divider, fraction;
u32 rawclk;
@@ -3513,7 +3513,7 @@ static int cnp_rawclk(struct intel_display *display)
rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
fraction) - 1);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
rawclk |= ICP_RAWCLK_NUM(numerator);
}
@@ -3552,21 +3552,20 @@ static int i9xx_hrawclk(struct intel_display *display)
*/
u32 intel_read_rawclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 freq;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
/*
* MTL always uses a 38.4 MHz rawclk. The bspec tells us
* "RAWCLK_FREQ defaults to the values for 38.4 and does
* not need to be programmed."
*/
freq = 38400;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ else if (INTEL_PCH_TYPE(display) >= PCH_DG1)
freq = dg1_rawclk(display);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ else if (INTEL_PCH_TYPE(display) >= PCH_CNP)
freq = cnp_rawclk(display);
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
freq = pch_rawclk(display);
else if (display->platform.valleyview || display->platform.cherryview)
freq = vlv_hrawclk(display);
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg.c b/drivers/gpu/drm/i915/display/intel_cmtg.c
index 07d7f4e8f60f..82606ebae1de 100644
--- a/drivers/gpu/drm/i915/display/intel_cmtg.c
+++ b/drivers/gpu/drm/i915/display/intel_cmtg.c
@@ -9,7 +9,6 @@
#include <drm/drm_device.h>
#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_crtc.h"
#include "intel_cmtg.h"
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index cfe14162231d..98dddf72c0eb 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -22,7 +22,9 @@
*
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
#include "i9xx_plane_regs.h"
#include "intel_color.h"
#include "intel_color_regs.h"
@@ -405,14 +407,13 @@ static void icl_read_csc(struct intel_crtc_state *crtc_state)
static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(display->drm);
/* icl+ have dedicated output CSC */
if (DISPLAY_VER(display) >= 11)
return false;
/* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */
- if (DISPLAY_VER(display) < 7 || IS_IVYBRIDGE(i915))
+ if (DISPLAY_VER(display) < 7 || display->platform.ivybridge)
return false;
return crtc_state->limited_color_range;
@@ -516,7 +517,6 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(display->drm);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
if (crtc_state->hw.ctm) {
@@ -538,7 +538,7 @@ static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
* LUT is needed but CSC is not we need to load an
* identity matrix.
*/
- drm_WARN_ON(display->drm, !IS_GEMINILAKE(i915));
+ drm_WARN_ON(display->drm, !display->platform.geminilake);
ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_identity);
} else {
@@ -3983,12 +3983,10 @@ int intel_color_init(struct intel_display *display)
void intel_color_init_hooks(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (HAS_GMCH(display)) {
- if (IS_CHERRYVIEW(i915))
+ if (display->platform.cherryview)
display->funcs.color = &chv_color_funcs;
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
display->funcs.color = &vlv_color_funcs;
else if (DISPLAY_VER(display) >= 4)
display->funcs.color = &i965_color_funcs;
@@ -4005,7 +4003,7 @@ void intel_color_init_hooks(struct intel_display *display)
display->funcs.color = &skl_color_funcs;
else if (DISPLAY_VER(display) == 8)
display->funcs.color = &bdw_color_funcs;
- else if (IS_HASWELL(i915))
+ else if (display->platform.haswell)
display->funcs.color = &hsw_color_funcs;
else if (DISPLAY_VER(display) == 7)
display->funcs.color = &ivb_color_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 17eea244cc83..f5cc38dbe559 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -3,6 +3,8 @@
* Copyright © 2018 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_combo_phy.h"
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index e42357bd9e80..6c81c9f2fd09 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -31,8 +31,10 @@
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_connector.h"
+#include "intel_display_core.h"
#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
@@ -154,13 +156,14 @@ void intel_connector_destroy(struct drm_connector *connector)
int intel_connector_register(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_i915_private *i915 = to_i915(connector->dev);
int ret;
ret = intel_backlight_device_register(intel_connector);
if (ret)
goto err;
- if (i915_inject_probe_failure(to_i915(connector->dev))) {
+ if (i915_inject_probe_failure(i915)) {
ret = -EFAULT;
goto err_backlight;
}
@@ -204,10 +207,10 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
enum pipe intel_connector_get_pipe(struct intel_connector *connector)
{
- struct drm_device *dev = connector->base.dev;
+ struct intel_display *display = to_intel_display(connector);
- drm_WARN_ON(dev,
- !drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_WARN_ON(display->drm,
+ !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
if (!connector->base.state->crtc)
return INVALID_PIPE;
@@ -264,20 +267,19 @@ static const struct drm_prop_enum_list force_audio_names[] = {
void
intel_attach_force_audio_property(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_property *prop;
- prop = dev_priv->display.properties.force_audio;
+ prop = display->properties.force_audio;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, 0,
- "audio",
- force_audio_names,
- ARRAY_SIZE(force_audio_names));
+ prop = drm_property_create_enum(display->drm, 0,
+ "audio",
+ force_audio_names,
+ ARRAY_SIZE(force_audio_names));
if (prop == NULL)
return;
- dev_priv->display.properties.force_audio = prop;
+ display->properties.force_audio = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
}
@@ -291,20 +293,19 @@ static const struct drm_prop_enum_list broadcast_rgb_names[] = {
void
intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_property *prop;
- prop = dev_priv->display.properties.broadcast_rgb;
+ prop = display->properties.broadcast_rgb;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
- "Broadcast RGB",
- broadcast_rgb_names,
- ARRAY_SIZE(broadcast_rgb_names));
+ prop = drm_property_create_enum(display->drm, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ broadcast_rgb_names,
+ ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
- dev_priv->display.properties.broadcast_rgb = prop;
+ display->properties.broadcast_rgb = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
@@ -336,14 +337,14 @@ intel_attach_dp_colorspace_property(struct drm_connector *connector)
void
intel_attach_scaling_mode_property(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
u32 scaling_modes;
scaling_modes = BIT(DRM_MODE_SCALE_ASPECT) |
BIT(DRM_MODE_SCALE_FULLSCREEN);
/* On GMCH platforms borders are only possible on the LVDS port */
- if (!HAS_GMCH(i915) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (!HAS_GMCH(display) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
scaling_modes |= BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(connector, scaling_modes);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 76ffb3f8467c..38b50a779b6b 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -31,9 +31,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_connector.h"
@@ -91,13 +91,12 @@ static struct intel_crt *intel_attached_crt(struct intel_connector *connector)
bool intel_crt_port_enabled(struct intel_display *display,
i915_reg_t adpa_reg, enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
val = intel_de_read(display, adpa_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
*pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK_CPT, val);
else
*pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK, val);
@@ -177,7 +176,6 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
int mode)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -194,14 +192,14 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_LPT(dev_priv))
+ if (HAS_PCH_LPT(display))
; /* Those bits don't exist here */
- else if (HAS_PCH_CPT(dev_priv))
+ else if (HAS_PCH_CPT(display))
adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
else
adpa |= ADPA_PIPE_SEL(crtc->pipe);
- if (!HAS_PCH_SPLIT(dev_priv))
+ if (!HAS_PCH_SPLIT(display))
intel_de_write(display, BCLRPAT(display, crtc->pipe), 0);
switch (mode) {
@@ -356,7 +354,6 @@ intel_crt_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
int max_dotclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
int max_clock;
@@ -368,9 +365,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (HAS_PCH_LPT(dev_priv))
+ if (HAS_PCH_LPT(display))
max_clock = 180000;
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
/*
* 270 MHz due to current DPLL limits,
* DAC limit supposedly 355 MHz.
@@ -387,7 +384,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
- if (HAS_PCH_LPT(dev_priv) &&
+ if (HAS_PCH_LPT(display) &&
ilk_get_lanes_required(mode->clock, 270000, 24) > 2)
return MODE_CLOCK_HIGH;
@@ -438,7 +435,6 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -457,7 +453,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev_priv)) {
+ if (HAS_PCH_LPT(display)) {
/* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */
if (crtc_state->bw_constrained && crtc_state->pipe_bpp < 24) {
drm_dbg_kms(display->drm,
@@ -482,13 +478,12 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 adpa;
bool ret;
/* The first time through, trigger an explicit detection cycle */
if (crt->force_hotplug_required) {
- bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
+ bool turn_off_dac = HAS_PCH_SPLIT(display);
u32 save_adpa;
crt->force_hotplug_required = false;
@@ -532,8 +527,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- bool reenable_hpd;
u32 adpa;
bool ret;
u32 save_adpa;
@@ -550,7 +543,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
*
* Just disable HPD interrupts here to prevent this
*/
- reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
+ intel_hpd_block(&crt->base);
save_adpa = adpa = intel_de_read(display, crt->adpa_reg);
drm_dbg_kms(display->drm,
@@ -577,8 +570,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
drm_dbg_kms(display->drm,
"valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
- if (reenable_hpd)
- intel_hpd_enable(dev_priv, crt->base.hpd_pin);
+ intel_hpd_clear_and_unblock(&crt->base);
return ret;
}
@@ -586,15 +578,14 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 stat;
bool ret = false;
int i, tries = 0;
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
return ilk_crt_detect_hotplug(connector);
- if (IS_VALLEYVIEW(dev_priv))
+ if (display->platform.valleyview)
return valleyview_crt_detect_hotplug(connector);
/*
@@ -602,14 +593,14 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
* to get a reliable result.
*/
- if (IS_G45(dev_priv))
+ if (display->platform.g45)
tries = 2;
else
tries = 1;
for (i = 0; i < tries ; i++) {
/* turn on the FORCE_DETECT */
- i915_hotplug_interrupt_update(dev_priv,
+ i915_hotplug_interrupt_update(display,
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
@@ -627,7 +618,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
intel_de_write(display, PORT_HOTPLUG_STAT(display),
CRT_HOTPLUG_INT_STATUS);
- i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
+ i915_hotplug_interrupt_update(display, CRT_HOTPLUG_FORCE_DETECT, 0);
return ret;
}
@@ -880,7 +871,7 @@ intel_crt_detect(struct drm_connector *connector,
wakeref = intel_display_power_get(display, encoder->power_domain);
- if (I915_HAS_HOTPLUG(display)) {
+ if (HAS_HOTPLUG(display)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
@@ -904,7 +895,7 @@ intel_crt_detect(struct drm_connector *connector,
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(display)) {
+ if (HAS_HOTPLUG(display)) {
status = connector_status_disconnected;
goto out;
}
@@ -943,7 +934,6 @@ out:
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *encoder = &crt->base;
intel_wakeref_t wakeref;
@@ -956,7 +946,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
wakeref = intel_display_power_get(display, encoder->power_domain);
ret = intel_crt_ddc_get_modes(connector, connector->ddc);
- if (ret || !IS_G4X(dev_priv))
+ if (ret || !display->platform.g4x)
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
@@ -1015,16 +1005,15 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
void intel_crt_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_connector *connector;
struct intel_crt *crt;
i915_reg_t adpa_reg;
u8 ddc_pin;
u32 adpa;
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
adpa_reg = PCH_ADPA;
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
adpa_reg = VLV_ADPA;
else
adpa_reg = ADPA;
@@ -1072,7 +1061,7 @@ void intel_crt_init(struct intel_display *display)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = BIT(INTEL_OUTPUT_DVO) | BIT(INTEL_OUTPUT_HDMI);
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
crt->base.pipe_mask = BIT(PIPE_A);
else
crt->base.pipe_mask = ~0;
@@ -1084,7 +1073,7 @@ void intel_crt_init(struct intel_display *display)
crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
- if (I915_HAS_HOTPLUG(display) &&
+ if (HAS_HOTPLUG(display) &&
!dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
crt->base.hotplug = intel_encoder_hotplug;
@@ -1112,7 +1101,7 @@ void intel_crt_init(struct intel_display *display)
intel_ddi_buf_trans_init(&crt->base);
} else {
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
crt->base.compute_config = pch_crt_compute_config;
crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt;
@@ -1134,7 +1123,7 @@ void intel_crt_init(struct intel_display *display)
* polarity and link reversal bits or not, instead of relying on the
* BIOS.
*/
- if (HAS_PCH_LPT(dev_priv)) {
+ if (HAS_PCH_LPT(display)) {
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 5b2603ef2ff7..29cfc38f12e0 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -124,7 +124,7 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- crtc->block_dc_for_vblank = intel_psr_needs_block_dc_vblank(crtc_state);
+ crtc->vblank_psr_notify = intel_psr_needs_vblank_notification(crtc_state);
assert_vblank_disabled(&crtc->base);
drm_crtc_set_max_vblank_count(&crtc->base,
@@ -154,9 +154,9 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
drm_crtc_vblank_off(&crtc->base);
assert_vblank_disabled(&crtc->base);
- crtc->block_dc_for_vblank = false;
+ crtc->vblank_psr_notify = false;
- flush_work(&display->irq.vblank_dc_work);
+ flush_work(&display->irq.vblank_notify_work);
}
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
@@ -305,7 +305,6 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
int intel_crtc_init(struct intel_display *display, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_plane *primary, *cursor;
const struct drm_crtc_funcs *funcs;
struct intel_crtc *crtc;
@@ -333,7 +332,7 @@ int intel_crtc_init(struct intel_display *display, enum pipe pipe)
for_each_sprite(display, pipe, sprite) {
struct intel_plane *plane;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
plane = skl_universal_plane_create(display, pipe, PLANE_2 + sprite);
else
plane = intel_sprite_plane_create(display, pipe, sprite);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 599ddce96371..0c7f91046996 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -5,9 +5,10 @@
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_crtc_state_dump.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_hdmi.h"
#include "intel_vblank.h"
@@ -42,13 +43,13 @@ intel_dump_m_n_config(struct drm_printer *p,
}
static void
-intel_dump_infoframe(struct drm_i915_private *i915,
+intel_dump_infoframe(struct intel_display *display,
const union hdmi_infoframe *frame)
{
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
+ hdmi_infoframe_log(KERN_DEBUG, display->drm->dev, frame);
}
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
@@ -136,7 +137,7 @@ static void intel_dump_plane_state(struct drm_printer *p,
}
static void
-ilk_dump_csc(struct drm_i915_private *i915,
+ilk_dump_csc(struct intel_display *display,
struct drm_printer *p,
const char *name,
const struct intel_csc_matrix *csc)
@@ -152,7 +153,7 @@ ilk_dump_csc(struct drm_i915_private *i915,
csc->coeff[3 * i + 1],
csc->coeff[3 * i + 2]);
- if (DISPLAY_VER(i915) < 7)
+ if (DISPLAY_VER(display) < 7)
return;
drm_printf(p, "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
@@ -178,7 +179,6 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
{
struct intel_display *display = to_intel_display(pipe_config);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
struct drm_printer p;
@@ -188,7 +188,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
drm_printf(&p, "[CRTC:%d:%s] enable: %s [%s]\n",
crtc->base.base.id, crtc->base.name,
@@ -262,19 +262,19 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "GCP: 0x%x\n", pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
- intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
+ intel_dump_infoframe(display, &pipe_config->infoframes.avi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
- intel_dump_infoframe(i915, &pipe_config->infoframes.spd);
+ intel_dump_infoframe(display, &pipe_config->infoframes.spd);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
- intel_dump_infoframe(i915, &pipe_config->infoframes.hdmi);
+ intel_dump_infoframe(display, &pipe_config->infoframes.hdmi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
- intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ intel_dump_infoframe(display, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
- intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ intel_dump_infoframe(display, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(DP_SDP_VSC))
drm_dp_vsc_sdp_log(&p, &pipe_config->infoframes.vsc);
@@ -294,8 +294,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
pipe_config->hw.adjusted_mode.crtc_vdisplay,
pipe_config->framestart_delay, pipe_config->msa_timing_delay);
- drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
+ drm_printf(&p, "vrr: %s, fixed rr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
str_yes_no(pipe_config->vrr.enable),
+ str_yes_no(intel_vrr_is_fixed_rr(pipe_config)),
pipe_config->vrr.vmin, pipe_config->vrr.vmax, pipe_config->vrr.flipline,
pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
pipe_config->vrr.vsync_start, pipe_config->vrr.vsync_end);
@@ -319,14 +320,14 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "linetime: %d, ips linetime: %d\n",
pipe_config->linetime, pipe_config->ips_linetime);
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
drm_printf(&p, "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
crtc->num_scalers,
pipe_config->scaler_state.scaler_users,
pipe_config->scaler_state.scaler_id,
pipe_config->hw.scaling_filter);
- if (HAS_GMCH(i915))
+ if (HAS_GMCH(display))
drm_printf(&p, "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
@@ -343,7 +344,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_dpll_dump_hw_state(display, &p, &pipe_config->dpll_hw_state);
- if (IS_CHERRYVIEW(i915))
+ if (display->platform.cherryview)
drm_printf(&p, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
pipe_config->cgm_mode, pipe_config->gamma_mode,
pipe_config->gamma_enable, pipe_config->csc_enable);
@@ -354,20 +355,20 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
- i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
+ display->color.glk_linear_degamma_lut ? "(linear) " : "",
pipe_config->pre_csc_lut ?
drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
pipe_config->post_csc_lut ?
drm_color_lut_size(pipe_config->post_csc_lut) : 0);
- if (DISPLAY_VER(i915) >= 11)
- ilk_dump_csc(i915, &p, "output csc", &pipe_config->output_csc);
+ if (DISPLAY_VER(display) >= 11)
+ ilk_dump_csc(display, &p, "output csc", &pipe_config->output_csc);
- if (!HAS_GMCH(i915))
- ilk_dump_csc(i915, &p, "pipe csc", &pipe_config->csc);
- else if (IS_CHERRYVIEW(i915))
+ if (!HAS_GMCH(display))
+ ilk_dump_csc(display, &p, "pipe csc", &pipe_config->csc);
+ else if (display->platform.cherryview)
vlv_dump_csc(&p, "cgm csc", &pipe_config->csc);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
vlv_dump_csc(&p, "wgc csc", &pipe_config->csc);
intel_vdsc_state_dump(&p, 0, pipe_config);
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 3276a5b4a9b0..2fec5ba58373 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -9,10 +9,11 @@
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_cursor.h"
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 22595766eac5..a82b93cbc81d 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -6,8 +6,10 @@
#include <linux/log2.h>
#include <linux/math64.h>
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -2761,9 +2763,9 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
val |= XELPDP_FORWARD_CLOCK_UNGATE;
if (!is_dp && is_hdmi_frl(port_clock))
- val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
- val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
/* TODO: HDMI FRL */
/* DP2.0 10G and 20G rates enable MPLLA*/
@@ -2774,7 +2776,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
- XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA |
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA |
XELPDP_SSC_ENABLE_PLLB, val);
}
@@ -3097,10 +3099,7 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
- if (DISPLAY_VER(display) >= 30)
- clock = REG_FIELD_GET(XE3_DDI_CLOCK_SELECT_MASK, val);
- else
- clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val);
drm_WARN_ON(display->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE));
drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_REQUEST));
@@ -3168,13 +3167,9 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
* clock muxes, gating and SSC
*/
- if (DISPLAY_VER(display) >= 30) {
- mask = XE3_DDI_CLOCK_SELECT_MASK;
- val |= XE3_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
- } else {
- mask = XELPDP_DDI_CLOCK_SELECT_MASK;
- val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
- }
+ mask = XELPDP_DDI_CLOCK_SELECT_MASK(display);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display,
+ intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
mask |= XELPDP_FORWARD_CLOCK_UNGATE;
val |= XELPDP_FORWARD_CLOCK_UNGATE;
@@ -3287,7 +3282,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK, 0);
+ XELPDP_DDI_CLOCK_SELECT_MASK(display), 0);
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_FORWARD_CLOCK_UNGATE, 0);
@@ -3336,7 +3331,7 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
* 5. Program PORT CLOCK CTRL register to disable and gate clocks
*/
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK |
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) |
XELPDP_FORWARD_CLOCK_UNGATE, 0);
/* 6. Program DDI_CLK_VALFREQ to 0. */
@@ -3365,7 +3360,7 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder,
* handling is done via the standard shared DPLL framework.
*/
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
- clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val);
if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK ||
clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK)
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index 960f7f778fb8..59c22beaf1de 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -192,10 +192,17 @@
#define XELPDP_TBT_CLOCK_REQUEST REG_BIT(19)
#define XELPDP_TBT_CLOCK_ACK REG_BIT(18)
-#define XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
-#define XE3_DDI_CLOCK_SELECT_MASK REG_GENMASK(16, 12)
-#define XELPDP_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val)
-#define XE3_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XE3_DDI_CLOCK_SELECT_MASK, val)
+#define _XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
+#define _XE3_DDI_CLOCK_SELECT_MASK REG_GENMASK(16, 12)
+#define XELPDP_DDI_CLOCK_SELECT_MASK(display) (DISPLAY_VER(display) >= 30 ? \
+ _XE3_DDI_CLOCK_SELECT_MASK : _XELPDP_DDI_CLOCK_SELECT_MASK)
+#define XELPDP_DDI_CLOCK_SELECT_PREP(display, val) (DISPLAY_VER(display) >= 30 ? \
+ REG_FIELD_PREP(_XE3_DDI_CLOCK_SELECT_MASK, (val)) : \
+ REG_FIELD_PREP(_XELPDP_DDI_CLOCK_SELECT_MASK, (val)))
+#define XELPDP_DDI_CLOCK_SELECT_GET(display, val) (DISPLAY_VER(display) >= 30 ? \
+ REG_FIELD_GET(_XE3_DDI_CLOCK_SELECT_MASK, (val)) : \
+ REG_FIELD_GET(_XELPDP_DDI_CLOCK_SELECT_MASK, (val)))
+
#define XELPDP_DDI_CLOCK_SELECT_NONE 0x0
#define XELPDP_DDI_CLOCK_SELECT_MAXPCLK 0x8
#define XELPDP_DDI_CLOCK_SELECT_DIV18CLK 0x9
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index f38c998935b9..74132c1d6385 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -30,11 +30,13 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_scdc_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_privacy_screen_consumer.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "icl_dsi.h"
+#include "intel_alpm.h"
#include "intel_audio.h"
#include "intel_audio_regs.h"
#include "intel_backlight.h"
@@ -78,6 +80,7 @@
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vdsc_regs.h"
+#include "intel_vrr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -106,14 +109,14 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
return level;
}
-static bool has_buf_trans_select(struct drm_i915_private *i915)
+static bool has_buf_trans_select(struct intel_display *display)
{
- return DISPLAY_VER(i915) < 10 && !IS_BROXTON(i915);
+ return DISPLAY_VER(display) < 10 && !display->platform.broxton;
}
-static bool has_iboost(struct drm_i915_private *i915)
+static bool has_iboost(struct intel_display *display)
{
- return DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915);
+ return DISPLAY_VER(display) == 9 && !display->platform.broxton;
}
/*
@@ -124,25 +127,25 @@ static bool has_iboost(struct drm_i915_private *i915)
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 iboost_bit = 0;
int i, n_entries;
enum port port = encoder->port;
const struct intel_ddi_buf_trans *trans;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (has_iboost(dev_priv) &&
+ if (has_iboost(display) &&
intel_bios_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
- intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
+ intel_de_write(display, DDI_BUF_TRANS_LO(port, i),
trans->entries[i].hsw.trans1 | iboost_bit);
- intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
+ intel_de_write(display, DDI_BUF_TRANS_HI(port, i),
trans->entries[i].hsw.trans2);
}
}
@@ -155,7 +158,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
int level = intel_ddi_level(encoder, crtc_state, 0);
u32 iboost_bit = 0;
int n_entries;
@@ -163,27 +166,25 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
const struct intel_ddi_buf_trans *trans;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (has_iboost(dev_priv) &&
+ if (has_iboost(display) &&
intel_bios_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
- intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
+ intel_de_write(display, DDI_BUF_TRANS_LO(port, 9),
trans->entries[level].hsw.trans1 | iboost_bit);
- intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
+ intel_de_write(display, DDI_BUF_TRANS_HI(port, 9),
trans->entries[level].hsw.trans2);
}
static i915_reg_t intel_ddi_buf_status_reg(struct intel_display *display, enum port port)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 14)
- return XELPDP_PORT_BUF_CTL1(i915, port);
+ return XELPDP_PORT_BUF_CTL1(display, port);
else
return DDI_BUF_CTL(port);
}
@@ -346,7 +347,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -359,14 +359,14 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
if (dig_port->ddi_a_4_lanes)
intel_dp->DP |= DDI_A_4_LANES;
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
if (intel_dp_is_uhbr(crtc_state))
intel_dp->DP |= DDI_BUF_PORT_DATA_40BIT;
else
intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT;
}
- if (IS_ALDERLAKE_P(i915) && intel_encoder_is_tc(encoder)) {
+ if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) {
intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
@@ -379,8 +379,7 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
}
}
-static int icl_calc_tbt_pll_link(struct intel_display *display,
- enum port port)
+static int icl_calc_tbt_pll_link(struct intel_display *display, enum port port)
{
u32 val = intel_de_read(display, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
@@ -414,15 +413,14 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
if (!intel_crtc_has_dp_encoder(crtc_state))
return;
- drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
+ drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder));
temp = DP_MSA_MISC_SYNC_CLOCK;
@@ -445,7 +443,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
}
/* nonsense combination */
- drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ drm_WARN_ON(display->drm, crtc_state->limited_color_range &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
@@ -468,7 +466,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
temp |= DP_MSA_MISC_COLOR_VSC_SDP;
- intel_de_write(dev_priv, TRANS_MSA_MISC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_MSA_MISC(display, cpu_transcoder),
temp);
}
@@ -507,8 +505,8 @@ static u32
intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
@@ -516,7 +514,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
temp |= TGL_TRANS_DDI_SELECT_PORT(port);
else
temp |= TRANS_DDI_SELECT_PORT(port);
@@ -578,7 +576,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_HDMI_SCRAMBLING;
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
temp |= TRANS_DDI_PORT_WIDTH(crtc_state->lane_count);
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
@@ -591,11 +589,11 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder master;
master = crtc_state->mst_master_transcoder;
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
master == INVALID_TRANSCODER);
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
}
@@ -604,7 +602,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
- if (IS_DISPLAY_VER(dev_priv, 8, 10) &&
+ if (IS_DISPLAY_VER(display, 8, 10) &&
crtc_state->master_transcoder != INVALID_TRANSCODER) {
u8 master_select =
bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
@@ -619,11 +617,10 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
enum transcoder master_transcoder = crtc_state->master_transcoder;
u32 ctl2 = 0;
@@ -635,12 +632,12 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
PORT_SYNC_MODE_MASTER_SELECT(master_select);
}
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder),
+ intel_de_write(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder),
ctl2);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
intel_ddi_transcoder_func_reg_val_get(encoder,
crtc_state));
}
@@ -654,8 +651,7 @@ void
intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
@@ -663,7 +659,7 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
ctl);
}
@@ -677,27 +673,26 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
- if (DISPLAY_VER(dev_priv) >= 11)
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder),
+ if (DISPLAY_VER(display) >= 11)
+ intel_de_write(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder),
0);
- ctl = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ctl = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
drm_WARN_ON(crtc->base.dev, ctl & TRANS_DDI_HDCP_SIGNALLING);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- if (IS_DISPLAY_VER(dev_priv, 8, 10))
+ if (IS_DISPLAY_VER(display, 8, 10))
ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
if (!intel_dp_mst_is_master_trans(crtc_state)) {
ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
@@ -706,7 +701,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
ctl);
if (intel_dp_mst_is_slave_trans(crtc_state))
@@ -725,17 +720,15 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool enable, u32 hdcp_mask)
{
struct intel_display *display = to_intel_display(intel_encoder);
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
intel_wakeref_t wakeref;
int ret = 0;
wakeref = intel_display_power_get_if_enabled(display,
intel_encoder->power_domain);
- if (drm_WARN_ON(dev, !wakeref))
+ if (drm_WARN_ON(display->drm, !wakeref))
return -ENXIO;
- intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
hdcp_mask, enable ? hdcp_mask : 0);
intel_display_power_put(display, intel_encoder->power_domain, wakeref);
return ret;
@@ -744,7 +737,6 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct intel_display *display = to_intel_display(intel_connector);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
@@ -765,12 +757,12 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
goto out;
}
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = (enum transcoder) pipe;
- ddi_mode = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) &
+ ddi_mode = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) &
TRANS_DDI_MODE_SELECT_MASK;
if (ddi_mode == TRANS_DDI_MODE_SELECT_HDMI ||
@@ -804,7 +796,6 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
u8 *pipe_mask, bool *is_dp_mst)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = encoder->port;
intel_wakeref_t wakeref;
enum pipe p;
@@ -819,13 +810,13 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!wakeref)
return;
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ tmp = intel_de_read(display, DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) {
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_EDP));
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A) {
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
@@ -846,7 +837,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
goto out;
}
- for_each_pipe(dev_priv, p) {
+ for_each_pipe(display, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
u32 port_mask, ddi_select, ddi_mode;
intel_wakeref_t trans_wakeref;
@@ -856,7 +847,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!trans_wakeref)
continue;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
port_mask = TGL_TRANS_DDI_PORT_MASK;
ddi_select = TGL_TRANS_DDI_SELECT_PORT(port);
} else {
@@ -864,8 +855,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
ddi_select = TRANS_DDI_SELECT_PORT(port);
}
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
intel_display_power_put(display, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
trans_wakeref);
@@ -883,12 +874,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (!*pipe_mask)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"No pipe for [ENCODER:%d:%s] found\n",
encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && dp128b132b_pipe_mask) {
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
/*
* If we don't have 8b/10b MST, but have more than one
@@ -901,12 +892,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
* can assume it's SST.
*/
if (hweight8(dp128b132b_pipe_mask) > 1 ||
- intel_dp_mst_encoder_active_links(dig_port))
+ intel_dp_mst_active_streams(intel_dp))
mst_pipe_mask = dp128b132b_pipe_mask;
}
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
encoder->base.base.id, encoder->base.name,
*pipe_mask);
@@ -914,7 +905,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe masks: all %02x, MST %02x, 128b/132b %02x)\n",
encoder->base.base.id, encoder->base.name,
*pipe_mask, mst_pipe_mask, dp128b132b_pipe_mask);
@@ -922,12 +913,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
*is_dp_mst = mst_pipe_mask;
out:
- if (*pipe_mask && (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))) {
- tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port));
+ if (*pipe_mask && (display->platform.geminilake || display->platform.broxton)) {
+ tmp = intel_de_read(display, BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n",
encoder->base.base.id, encoder->base.name, tmp);
}
@@ -1041,8 +1032,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
@@ -1050,53 +1040,53 @@ void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
if (cpu_transcoder == TRANSCODER_EDP)
return;
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
val = TGL_TRANS_CLK_SEL_PORT(phy);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
else
val = TRANS_CLK_SEL_PORT(encoder->port);
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val);
}
void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val;
if (cpu_transcoder == TRANSCODER_EDP)
return;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
val = TGL_TRANS_CLK_SEL_DISABLED;
else
val = TRANS_CLK_SEL_DISABLED;
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val);
}
-static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+static void _skl_ddi_set_iboost(struct intel_display *display,
enum port port, u8 iboost)
{
u32 tmp;
- tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0);
+ tmp = intel_de_read(display, DISPIO_CR_TX_BMU_CR0);
tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
if (iboost)
tmp |= iboost << BALANCE_LEG_SHIFT(port);
else
tmp |= BALANCE_LEG_DISABLE(port);
- intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp);
+ intel_de_write(display, DISPIO_CR_TX_BMU_CR0, tmp);
}
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int level)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u8 iboost;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -1109,7 +1099,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
iboost = trans->entries[level].hsw.i_boost;
@@ -1117,28 +1107,28 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
/* Make sure that the requested I_boost is valid */
if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
- drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost);
+ drm_err(display->drm, "Invalid I_boost value %u\n", iboost);
return;
}
- _skl_ddi_set_iboost(dev_priv, encoder->port, iboost);
+ _skl_ddi_set_iboost(display, encoder->port, iboost);
if (encoder->port == PORT_A && dig_port->max_lanes == 4)
- _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
+ _skl_ddi_set_iboost(display, PORT_E, iboost);
}
static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int n_entries;
encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
+ if (drm_WARN_ON(display->drm, n_entries < 1))
n_entries = 1;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
@@ -1171,14 +1161,14 @@ static u32 icl_combo_phy_loadgen_select(const struct intel_crtc_state *crtc_stat
static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
u32 val;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
@@ -1186,25 +1176,25 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED;
intel_dp->hobl_active = is_hobl_buf_trans(trans);
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val,
+ intel_de_rmw(display, ICL_PORT_CL_DW10(phy), val,
intel_dp->hobl_active ? val : 0);
}
/* Set PORT_TX_DW5 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
COEFF_POLARITY | CURSOR_PROGRAM |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW2_LN(ln, phy),
SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK,
SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) |
SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) |
@@ -1216,7 +1206,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy),
POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK,
POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) |
POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) |
@@ -1227,7 +1217,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW7_LN(ln, phy),
N_SCALAR_MASK,
N_SCALAR(trans->entries[level].icl.dw7_n_scalar));
}
@@ -1236,7 +1226,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
int ln;
@@ -1246,12 +1236,12 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val &= ~COMMON_KEEPER_EN;
else
val |= COMMON_KEEPER_EN;
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), val);
/* 2. Program loadgen select */
/*
@@ -1261,33 +1251,33 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln < 4; ln++) {
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy),
LOADGEN_SELECT,
icl_combo_phy_loadgen_select(crtc_state, ln));
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ intel_de_rmw(display, ICL_PORT_CL_DW5(phy),
0, SUS_CLOCK_CONFIG);
/* 4. Clear training enable to change swing values */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
icl_ddi_combo_vswing_program(encoder, crtc_state);
/* 6. Set training enable to trigger update */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val |= TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
}
static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1296,13 +1286,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
return;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
- intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
}
@@ -1312,13 +1302,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
}
@@ -1329,7 +1319,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
@@ -1338,7 +1328,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
@@ -1354,21 +1344,21 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port),
+ intel_de_rmw(display, MG_CLKHUB(ln, tc_port),
CFG_LOW_RATE_LKREN_EN,
crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
CFG_AMI_CK_DIV_OVERRIDE_VAL(1) |
CFG_AMI_CK_DIV_OVERRIDE_EN : 0);
- intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
@@ -1378,9 +1368,9 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
- intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
}
}
@@ -1490,12 +1480,12 @@ int intel_ddi_level(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int lane)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
int level, n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&i915->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return 0;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -1504,7 +1494,7 @@ int intel_ddi_level(struct intel_encoder *encoder,
level = intel_ddi_dp_level(enc_to_intel_dp(encoder), crtc_state,
lane);
- if (drm_WARN_ON_ONCE(&i915->drm, level >= n_entries))
+ if (drm_WARN_ON_ONCE(display->drm, level >= n_entries))
level = n_entries - 1;
return level;
@@ -1514,13 +1504,13 @@ static void
hsw_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int level = intel_ddi_level(encoder, crtc_state, 0);
enum port port = encoder->port;
u32 signal_levels;
- if (has_iboost(dev_priv))
+ if (has_iboost(display))
skl_ddi_set_iboost(encoder, crtc_state, level);
/* HDMI ignores the rest */
@@ -1529,46 +1519,46 @@ hsw_set_signal_levels(struct intel_encoder *encoder,
signal_levels = DDI_BUF_TRANS_SELECT(level);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~DDI_BUF_EMP_MASK;
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+ intel_de_write(display, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(display, DDI_BUF_CTL(port));
}
-static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_enable_clock(struct intel_display *display, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
{
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, reg, clk_sel_mask, clk_sel);
+ intel_de_rmw(display, reg, clk_sel_mask, clk_sel);
/*
* "This step and the step before must be
* done with separate register writes."
*/
- intel_de_rmw(i915, reg, clk_off, 0);
+ intel_de_rmw(display, reg, clk_off, 0);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
-static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_disable_clock(struct intel_display *display, i915_reg_t reg,
u32 clk_off)
{
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, reg, 0, clk_off);
+ intel_de_rmw(display, reg, 0, clk_off);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
-static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
+static bool _icl_ddi_is_clock_enabled(struct intel_display *display, i915_reg_t reg,
u32 clk_off)
{
- return !(intel_de_read(i915, reg) & clk_off);
+ return !(intel_de_read(display, reg) & clk_off);
}
static struct intel_shared_dpll *
@@ -1585,14 +1575,14 @@ _icl_ddi_get_pll(struct intel_display *display, i915_reg_t reg,
static void adls_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ _icl_ddi_enable_clock(display, ADLS_DPCLKA_CFGCR(phy),
ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1600,19 +1590,19 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
static void adls_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ _icl_ddi_disable_clock(display, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
+ return _icl_ddi_is_clock_enabled(display, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1629,14 +1619,14 @@ static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1644,19 +1634,19 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1673,23 +1663,23 @@ static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
/*
* If we fail this, something went very wrong: first 2 PLLs should be
* used by first 2 phys and last 2 PLLs by last phys
*/
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
(pll->info->id < DPLL_ID_DG1_DPLL2 && phy >= PHY_C) ||
(pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C)))
return;
- _icl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ _icl_ddi_enable_clock(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1697,19 +1687,19 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ _icl_ddi_disable_clock(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
+ return _icl_ddi_is_clock_enabled(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1739,14 +1729,14 @@ static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1754,19 +1744,19 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1783,39 +1773,39 @@ struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
/*
* "For DDIC and DDID, program DDI_CLK_SEL to map the MG clock to the port.
* MG does not exist, but the programming is required to ungate DDIC and DDID."
*/
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
icl_ddi_combo_enable_clock(encoder, crtc_state);
}
static void jsl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
icl_ddi_combo_disable_clock(encoder);
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
return false;
@@ -1826,54 +1816,54 @@ static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- intel_de_write(i915, DDI_CLK_SEL(port),
+ intel_de_write(display, DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ intel_de_rmw(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ intel_de_rmw(display, ICL_DPCLKA_CFGCR0,
0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
return false;
- tmp = intel_de_read(i915, ICL_DPCLKA_CFGCR0);
+ tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0);
return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
}
@@ -1934,47 +1924,47 @@ static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
static void skl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, DPLL_CTRL2,
+ intel_de_rmw(display, DPLL_CTRL2,
DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port),
DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static void skl_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, DPLL_CTRL2,
+ intel_de_rmw(display, DPLL_CTRL2,
0, DPLL_CTRL2_DDI_CLK_OFF(port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
/*
* FIXME Not sure if the override affects both
* the PLL selection and the CLK_OFF bit.
*/
- return !(intel_de_read(i915, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
+ return !(intel_de_read(display, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
}
static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
@@ -2002,30 +1992,30 @@ static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- intel_de_write(i915, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
+ intel_de_write(display, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
void hsw_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- intel_de_write(i915, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ intel_de_write(display, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- return intel_de_read(i915, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
+ return intel_de_read(display, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
}
static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
@@ -2081,7 +2071,7 @@ void intel_ddi_disable_clock(struct intel_encoder *encoder)
void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 port_mask;
bool ddi_clk_needed;
@@ -2101,7 +2091,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* In the unlikely case that BIOS enables DP in MST mode, just
* warn since our MST HW readout is incomplete.
*/
- if (drm_WARN_ON(&i915->drm, is_mst))
+ if (drm_WARN_ON(display->drm, is_mst))
return;
}
@@ -2116,11 +2106,11 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* Sanity check that we haven't incorrectly registered another
* encoder using any of the ports of this DSI encoder.
*/
- for_each_intel_encoder(&i915->drm, other_encoder) {
+ for_each_intel_encoder(display->drm, other_encoder) {
if (other_encoder == encoder)
continue;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
port_mask & BIT(other_encoder->port)))
return;
}
@@ -2135,7 +2125,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
!encoder->is_clock_enabled(encoder))
return;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] is disabled/in DSI mode with an ungated DDI clock, gate it\n",
encoder->base.base.id, encoder->base.name);
@@ -2255,10 +2245,10 @@ tgl_dp_tp_transcoder(const struct intel_crtc_state *crtc_state)
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- return TGL_DP_TP_CTL(dev_priv,
+ if (DISPLAY_VER(display) >= 12)
+ return TGL_DP_TP_CTL(display,
tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_CTL(encoder->port);
@@ -2267,10 +2257,10 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
static i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- return TGL_DP_TP_STATUS(dev_priv,
+ if (DISPLAY_VER(display) >= 12)
+ return TGL_DP_TP_STATUS(display,
tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_STATUS(encoder->port);
@@ -2445,14 +2435,14 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
static void intel_ddi_disable_fec(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
if (!crtc_state->fec_enable)
return;
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_FEC_ENABLE, 0);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
}
static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
@@ -2474,11 +2464,11 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
* Splitter enable for eDP MSO is limited to certain pipes, on certain
* platforms.
*/
-static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
+static u8 intel_ddi_splitter_pipe_mask(struct intel_display *display)
{
- if (DISPLAY_VER(i915) > 20)
+ if (DISPLAY_VER(display) > 20)
return ~0;
- else if (IS_ALDERLAKE_P(i915))
+ else if (display->platform.alderlake_p)
return BIT(PIPE_A) | BIT(PIPE_B);
else
return BIT(PIPE_A);
@@ -2487,28 +2477,28 @@ static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(pipe_config);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dss1;
- if (!HAS_MSO(i915))
+ if (!HAS_MSO(display))
return;
- dss1 = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe));
+ dss1 = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
pipe_config->splitter.enable = dss1 & SPLITTER_ENABLE;
if (!pipe_config->splitter.enable)
return;
- if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
+ if (drm_WARN_ON(display->drm, !(intel_ddi_splitter_pipe_mask(display) & BIT(pipe)))) {
pipe_config->splitter.enable = false;
return;
}
switch (dss1 & SPLITTER_CONFIGURATION_MASK) {
default:
- drm_WARN(&i915->drm, true,
+ drm_WARN(display->drm, true,
"Invalid splitter configuration, dss1=0x%08x\n", dss1);
fallthrough;
case SPLITTER_CONFIGURATION_2_SEGMENT:
@@ -2524,12 +2514,12 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dss1 = 0;
- if (!HAS_MSO(i915))
+ if (!HAS_MSO(display))
return;
if (crtc_state->splitter.enable) {
@@ -2541,7 +2531,7 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
dss1 |= SPLITTER_CONFIGURATION_4_SEGMENT;
}
- intel_de_rmw(i915, ICL_PIPE_DSS_CTL1(pipe),
+ intel_de_rmw(display, ICL_PIPE_DSS_CTL1(pipe),
SPLITTER_ENABLE | SPLITTER_CONFIGURATION_MASK |
OVERLAP_PIXELS_MASK, dss1);
}
@@ -2549,27 +2539,27 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
static void
mtl_ddi_enable_d2d(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
i915_reg_t reg;
u32 set_bits, wait_bits;
- if (DISPLAY_VER(dev_priv) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
- if (DISPLAY_VER(dev_priv) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
reg = DDI_BUF_CTL(port);
set_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
- reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ reg = XELPDP_PORT_BUF_CTL1(display, port);
set_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
- intel_de_rmw(dev_priv, reg, 0, set_bits);
- if (wait_for_us(intel_de_read(dev_priv, reg) & wait_bits, 100)) {
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
+ intel_de_rmw(display, reg, 0, set_bits);
+ if (wait_for_us(intel_de_read(display, reg) & wait_bits, 100)) {
+ drm_err(display->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
}
@@ -2599,13 +2589,13 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 val;
val = intel_tc_port_in_tbt_alt_mode(dig_port) ?
XELPDP_PORT_BUF_IO_SELECT_TBT : 0;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, val);
}
@@ -2734,7 +2724,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int ret;
@@ -2778,7 +2767,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
@@ -2882,16 +2871,15 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
- if (DISPLAY_VER(dev_priv) < 11)
- drm_WARN_ON(&dev_priv->drm,
+ if (DISPLAY_VER(display) < 11)
+ drm_WARN_ON(display->drm,
is_mst && (port == PORT_A || port == PORT_E));
else
- drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A);
+ drm_WARN_ON(display->drm, is_mst && port == PORT_A);
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2908,14 +2896,14 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_clock(encoder, crtc_state);
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
icl_program_mg_dp_mode(dig_port, crtc_state);
- if (has_buf_trans_select(dev_priv))
+ if (has_buf_trans_select(display))
hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
encoder->set_signal_levels(encoder, crtc_state);
@@ -2931,7 +2919,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_start_link_train(state, intel_dp, crtc_state);
- if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) &&
+ if ((port != PORT_A || DISPLAY_VER(display) >= 9) &&
!is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp, crtc_state);
@@ -2979,12 +2967,11 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_enable_clock(encoder, crtc_state);
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
@@ -3022,10 +3009,9 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
+ drm_WARN_ON(display->drm, crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
@@ -3050,27 +3036,27 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
static void
mtl_ddi_disable_d2d(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
i915_reg_t reg;
u32 clr_bits, wait_bits;
- if (DISPLAY_VER(dev_priv) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
- if (DISPLAY_VER(dev_priv) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
reg = DDI_BUF_CTL(port);
clr_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
- reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ reg = XELPDP_PORT_BUF_CTL1(display, port);
clr_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
- intel_de_rmw(dev_priv, reg, clr_bits, 0);
- if (wait_for_us(!(intel_de_read(dev_priv, reg) & wait_bits), 100))
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
+ intel_de_rmw(display, reg, clr_bits, 0);
+ if (wait_for_us(!(intel_de_read(display, reg) & wait_bits), 100))
+ drm_err(display->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@@ -3089,10 +3075,9 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
+ intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
if (DISPLAY_VER(display) >= 14)
intel_wait_ddi_buf_idle(display, port);
@@ -3100,7 +3085,7 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder,
mtl_ddi_disable_d2d(encoder);
if (intel_crtc_has_dp_encoder(crtc_state)) {
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_ENABLE, 0);
}
@@ -3118,7 +3103,6 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
intel_wakeref_t wakeref;
@@ -3135,12 +3119,12 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
*/
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
if (is_mst || intel_dp_is_uhbr(old_crtc_state)) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- intel_de_rmw(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK,
0);
}
@@ -3160,7 +3144,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
* Configure Transcoder Clock select to direct no clock to the
* transcoder"
*/
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_pps_vdd_on(intel_dp);
@@ -3176,8 +3160,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_clock(encoder);
/* De-select Thunderbolt */
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, encoder->port),
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, 0);
}
@@ -3187,7 +3171,6 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
intel_wakeref_t wakeref;
@@ -3195,12 +3178,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_ddi_buf_disable(encoder, old_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref);
@@ -3220,7 +3203,6 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *pipe_crtc;
bool is_hdmi = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI);
@@ -3249,6 +3231,8 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0);
}
+ intel_vrr_transcoder_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -3257,7 +3241,7 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
intel_dsc_disable(old_pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_scaler_disable(old_pipe_crtc_state);
else
ilk_pfit_disable(old_pipe_crtc_state);
@@ -3359,12 +3343,12 @@ static void intel_ddi_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
- if (port == PORT_A && DISPLAY_VER(dev_priv) < 9)
+ if (port == PORT_A && DISPLAY_VER(display) < 9)
intel_dp_stop_link_train(intel_dp, crtc_state);
drm_connector_update_privacy_screen(conn_state);
@@ -3401,7 +3385,6 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
@@ -3410,11 +3393,11 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
- if (has_buf_trans_select(dev_priv))
+ if (has_buf_trans_select(display))
hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state);
/* e. Enable D2D Link for C10/C20 Phy */
@@ -3423,7 +3406,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
encoder->set_signal_levels(encoder, crtc_state);
/* Display WA #1143: skl,kbl,cfl */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
/*
* For some reason these chicken bits have been
* stuffed into a transcoder register, event though
@@ -3433,7 +3416,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
i915_reg_t reg = gen9_chicken_trans_reg_by_port(display, port);
u32 val;
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(display, reg);
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3442,8 +3425,8 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
udelay(1);
@@ -3454,7 +3437,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
}
intel_ddi_power_up_lanes(encoder, crtc_state);
@@ -3475,7 +3458,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (dig_port->ddi_a_4_lanes)
buf_ctl |= DDI_A_4_LANES;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
u32 port_buf = 0;
port_buf |= XELPDP_PORT_WIDTH(crtc_state->lane_count);
@@ -3483,15 +3466,15 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (dig_port->lane_reversal)
port_buf |= XELPDP_PORT_REVERSAL;
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- } else if (IS_ALDERLAKE_P(dev_priv) && intel_encoder_is_tc(encoder)) {
- drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
+ } else if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) {
+ drm_WARN_ON(display->drm, !intel_tc_port_in_legacy_mode(dig_port));
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
@@ -3522,6 +3505,8 @@ static void intel_ddi_enable(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, crtc_state);
+ intel_vrr_transcoder_enable(crtc_state);
+
/* Enable/Disable DP2.0 SDP split config before transcoder */
intel_audio_sdp_split_update(crtc_state);
@@ -3567,9 +3552,10 @@ static void intel_ddi_disable_dp(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
intel_psr_disable(intel_dp, old_crtc_state);
+ intel_alpm_disable(intel_dp);
intel_edp_backlight_off(old_conn_state);
/* Disable the decompression in DP Sink */
intel_dp_sink_disable_decompression(state,
@@ -3584,12 +3570,12 @@ static void intel_ddi_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = old_conn_state->connector;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
}
@@ -3653,16 +3639,16 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc *pipe_crtc;
/* FIXME: Add MTL pll_mgr */
- if (DISPLAY_VER(i915) >= 14 || !intel_encoder_is_tc(encoder))
+ if (DISPLAY_VER(display) >= 14 || !intel_encoder_is_tc(encoder))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(crtc_state))
intel_update_active_dpll(state, pipe_crtc, encoder);
}
@@ -3678,7 +3664,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_tc_port = intel_encoder_is_tc(encoder);
@@ -3697,7 +3683,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
* Type-C ports. Skip this step for TBT.
*/
intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
bxt_dpio_phy_set_lane_optim_mask(encoder,
crtc_state->lane_lat_optim_mask);
}
@@ -3765,10 +3751,9 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 dp_tp_ctl;
- dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ dp_tp_ctl = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
drm_WARN_ON(display->drm, dp_tp_ctl & DP_TP_CTL_ENABLE);
@@ -3781,10 +3766,10 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
if (crtc_state->enhanced_framing)
dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
- if (IS_ALDERLAKE_P(dev_priv) &&
+ if (display->platform.alderlake_p &&
(intel_tc_port_in_dp_alt_mode(dig_port) || intel_tc_port_in_legacy_mode(dig_port)))
adlp_tbt_to_dp_alt_switch_wa(encoder);
@@ -3796,11 +3781,11 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 temp;
- temp = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ temp = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
@@ -3821,17 +3806,17 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
break;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), temp);
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), temp);
}
static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE);
/*
@@ -3841,28 +3826,26 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
- if (port == PORT_A && DISPLAY_VER(dev_priv) < 12)
+ if (port == PORT_A && DISPLAY_VER(display) < 12)
return;
- if (intel_de_wait_for_set(dev_priv,
+ if (intel_de_wait_for_set(display,
dp_tp_status_reg(encoder, crtc_state),
DP_TP_STATUS_IDLE_DONE, 2))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timed out waiting for DP idle patterns\n");
}
-static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+static bool intel_ddi_is_audio_enabled(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- struct intel_display *display = &dev_priv->display;
-
if (cpu_transcoder == TRANSCODER_EDP)
return false;
if (!intel_display_power_is_enabled(display, POWER_DOMAIN_AUDIO_MMIO))
return false;
- return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
+ return intel_de_read(display, HSW_AUD_PIN_ELD_CP_VLD) &
AUDIO_OUTPUT_ENABLE(cpu_transcoder);
}
@@ -3892,34 +3875,34 @@ static int icl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
crtc_state->min_voltage_level = tgl_ddi_min_voltage_level(crtc_state);
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
crtc_state->min_voltage_level = jsl_ddi_min_voltage_level(crtc_state);
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
}
-static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
+static enum transcoder bdw_transcoder_master_readout(struct intel_display *display,
enum transcoder cpu_transcoder)
{
u32 master_select;
- if (DISPLAY_VER(dev_priv) >= 11) {
- u32 ctl2 = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder));
+ if (DISPLAY_VER(display) >= 11) {
+ u32 ctl2 = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder));
if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0)
return INVALID_TRANSCODER;
master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2);
} else {
- u32 ctl = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ u32 ctl = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0)
return INVALID_TRANSCODER;
@@ -3936,15 +3919,14 @@ static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *de
static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum transcoder cpu_transcoder;
crtc_state->master_transcoder =
- bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder);
+ bdw_transcoder_master_readout(display, crtc_state->cpu_transcoder);
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ for_each_cpu_transcoder_masked(display, cpu_transcoder, transcoders) {
enum intel_display_power_domain power_domain;
intel_wakeref_t trans_wakeref;
@@ -3955,14 +3937,14 @@ static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
if (!trans_wakeref)
continue;
- if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ if (bdw_transcoder_master_readout(display, cpu_transcoder) ==
crtc_state->cpu_transcoder)
crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
intel_display_power_put(display, power_domain, trans_wakeref);
}
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
crtc_state->master_transcoder != INVALID_TRANSCODER &&
crtc_state->sync_mode_slaves_mask);
}
@@ -4085,11 +4067,10 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 ddi_func_ctl, ddi_mode, flags = 0;
- ddi_func_ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ddi_func_ctl = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if (ddi_func_ctl & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -4131,13 +4112,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
/*
* If this is true, we know we're being called from mst stream
* encoder's ->get_config().
*/
- if (intel_dp_mst_encoder_active_links(dig_port))
+ if (intel_dp_mst_active_streams(intel_dp))
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
else
intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
@@ -4152,11 +4133,11 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
static void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
/* XXX: DSI transcoder paranoia */
- if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
+ if (drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder)))
return;
intel_ddi_read_func_ctl(encoder, pipe_config);
@@ -4164,14 +4145,14 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_mso_get_config(encoder, pipe_config);
pipe_config->has_audio =
- intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
+ intel_ddi_is_audio_enabled(display, cpu_transcoder);
if (encoder->type == INTEL_OUTPUT_EDP)
intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
ddi_dotclock_get(pipe_config);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_get_lane_lat_optim_mask(encoder);
@@ -4192,7 +4173,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
HDMI_INFOFRAME_TYPE_DRM,
&pipe_config->infoframes.drm);
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
bdw_get_trans_port_sync_config(pipe_config);
intel_psr_get_config(encoder, pipe_config);
@@ -4285,10 +4266,10 @@ static enum icl_port_dpll_id
icl_ddi_tc_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return ICL_PORT_DPLL_DEFAULT;
if (icl_ddi_tc_pll_is_tbt(pll))
@@ -4382,11 +4363,11 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
bool fastset = true;
if (intel_encoder_is_tc(encoder)) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
fastset = false;
@@ -4421,12 +4402,12 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int ret;
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
@@ -4441,13 +4422,13 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+ if (display->platform.haswell && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
pipe_config->pch_pfit.force_thru =
pipe_config->pch_pfit.enabled ||
pipe_config->crc_enabled;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -4498,9 +4479,9 @@ static u8
intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
int tile_group_id)
{
+ struct intel_display *display = to_intel_display(ref_crtc_state);
struct drm_connector *connector;
const struct drm_connector_state *conn_state;
- struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev);
struct intel_atomic_state *state =
to_intel_atomic_state(ref_crtc_state->uapi.state);
u8 transcoders = 0;
@@ -4510,7 +4491,7 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
@@ -4542,11 +4523,11 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = conn_state->connector;
u8 port_sync_transcoders = 0;
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
encoder->base.base.id, encoder->base.name,
crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
@@ -4618,7 +4599,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_connector *connector;
enum port port = dig_port->base.port;
@@ -4627,7 +4608,7 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
return -ENOMEM;
dig_port->dp.output_reg = DDI_BUF_CTL(port);
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
dig_port->dp.prepare_link_retrain = mtl_ddi_prepare_link_retrain;
else
dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain;
@@ -4643,15 +4624,14 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
}
if (dig_port->base.type == INTEL_OUTPUT_EDP) {
- struct drm_device *dev = dig_port->base.base.dev;
struct drm_privacy_screen *privacy_screen;
- privacy_screen = drm_privacy_screen_get(dev->dev, NULL);
+ privacy_screen = drm_privacy_screen_get(display->drm->dev, NULL);
if (!IS_ERR(privacy_screen)) {
drm_connector_attach_privacy_screen_provider(&connector->base,
privacy_screen);
} else if (PTR_ERR(privacy_screen) != -ENODEV) {
- drm_warn(dev, "Error getting privacy-screen\n");
+ drm_warn(display->drm, "Error getting privacy-screen\n");
}
}
@@ -4662,7 +4642,6 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
struct intel_connector *connector = hdmi->attached_connector;
struct i2c_adapter *ddc = connector->base.ddc;
@@ -4675,7 +4654,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
if (connector->base.status != connector_status_connected)
return 0;
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex,
ctx);
if (ret)
return ret;
@@ -4692,7 +4671,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
if (!crtc_state->hw.active)
@@ -4708,7 +4687,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
ret = drm_scdc_readb(ddc, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- drm_err(&dev_priv->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
+ drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
connector->base.base.id, connector->base.name, ret);
return 0;
}
@@ -4733,11 +4712,11 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
static void intel_ddi_link_check(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/* TODO: Move checking the HDMI link state here as well. */
- drm_WARN_ON(&i915->drm, !dig_port->dp.attached_connector);
+ drm_WARN_ON(display->drm, !dig_port->dp.attached_connector);
intel_dp_link_check(encoder);
}
@@ -4800,26 +4779,26 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
static bool lpt_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.pch_hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, SDEISR) & bit;
+ return intel_de_read(display, SDEISR) & bit;
}
static bool hsw_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, DEISR) & bit;
+ return intel_de_read(display, DEISR) & bit;
}
static bool bdw_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
+ return intel_de_read(display, GEN8_DE_PORT_ISR) & bit;
}
static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
@@ -4848,7 +4827,7 @@ static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
if (dig_port->base.port != PORT_A)
return false;
@@ -4859,7 +4838,7 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
* supported configuration
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
return true;
return false;
@@ -4868,15 +4847,15 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
static int
intel_ddi_max_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
enum port port = dig_port->base.port;
int max_lanes = 4;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return max_lanes;
if (port == PORT_A || port == PORT_E) {
- if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
max_lanes = port == PORT_A ? 4 : 0;
else
/* Both A and E share 2 lanes */
@@ -4889,7 +4868,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
* so we use the proper lane count for our calculations.
*/
if (intel_ddi_a_force_4_lanes(dig_port)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Forcing DDI_A_4_LANES for port A\n");
dig_port->ddi_a_4_lanes = true;
max_lanes = 4;
@@ -4898,8 +4877,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
return max_lanes;
}
-static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin xelpd_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_D_XELPD)
return HPD_PORT_D + port - PORT_D_XELPD;
@@ -4909,8 +4887,7 @@ static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin dg1_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_TC1)
return HPD_PORT_C + port - PORT_TC1;
@@ -4918,8 +4895,7 @@ static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin tgl_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_TC1)
return HPD_PORT_TC1 + port - PORT_TC1;
@@ -4927,11 +4903,10 @@ static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin rkl_hpd_pin(struct intel_display *display, enum port port)
{
- if (HAS_PCH_TGP(dev_priv))
- return tgl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return tgl_hpd_pin(display, port);
if (port >= PORT_TC1)
return HPD_PORT_C + port - PORT_TC1;
@@ -4939,8 +4914,7 @@ static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin icl_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_C)
return HPD_PORT_TC1 + port - PORT_C;
@@ -4948,31 +4922,30 @@ static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin ehl_hpd_pin(struct intel_display *display, enum port port)
{
if (port == PORT_D)
return HPD_PORT_A;
- if (HAS_PCH_TGP(dev_priv))
- return icl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return icl_hpd_pin(display, port);
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port)
+static enum hpd_pin skl_hpd_pin(struct intel_display *display, enum port port)
{
- if (HAS_PCH_TGP(dev_priv))
- return icl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return icl_hpd_pin(display, port);
return HPD_PORT_A + port - PORT_A;
}
-static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port)
+static bool intel_ddi_is_tc(struct intel_display *display, enum port port)
{
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return port >= PORT_TC1;
- else if (DISPLAY_VER(i915) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
return port >= PORT_C;
else
return false;
@@ -5015,21 +4988,21 @@ static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
#define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
-static bool port_strap_detected(struct drm_i915_private *i915, enum port port)
+static bool port_strap_detected(struct intel_display *display, enum port port)
{
/* straps not used on skl+ */
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return true;
switch (port) {
case PORT_A:
- return intel_de_read(i915, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+ return intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
case PORT_B:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
case PORT_C:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
case PORT_D:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
case PORT_E:
return true; /* no strap for DDI-E */
default:
@@ -5043,18 +5016,18 @@ static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp)
return init_dp || intel_encoder_is_tc(encoder);
}
-static bool assert_has_icl_dsi(struct drm_i915_private *i915)
+static bool assert_has_icl_dsi(struct intel_display *display)
{
- return !drm_WARN(&i915->drm, !IS_ALDERLAKE_P(i915) &&
- !IS_TIGERLAKE(i915) && DISPLAY_VER(i915) != 11,
+ return !drm_WARN(display->drm, !display->platform.alderlake_p &&
+ !display->platform.tigerlake && DISPLAY_VER(display) != 11,
"Platform does not support DSI\n");
}
-static bool port_in_use(struct drm_i915_private *i915, enum port port)
+static bool port_in_use(struct intel_display *display, enum port port)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
/* FIXME what about second port for dual link DSI? */
if (encoder->port == port)
return true;
@@ -5066,7 +5039,6 @@ static bool port_in_use(struct drm_i915_private *i915, enum port port)
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
bool init_hdmi, init_dp;
@@ -5078,8 +5050,8 @@ void intel_ddi_init(struct intel_display *display,
if (port == PORT_NONE)
return;
- if (!port_strap_detected(dev_priv, port)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!port_strap_detected(display, port)) {
+ drm_dbg_kms(display->drm,
"Port %c strap not detected\n", port_name(port));
return;
}
@@ -5087,15 +5059,15 @@ void intel_ddi_init(struct intel_display *display,
if (!assert_port_valid(display, port))
return;
- if (port_in_use(dev_priv, port)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (port_in_use(display, port)) {
+ drm_dbg_kms(display->drm,
"Port %c already claimed\n", port_name(port));
return;
}
if (intel_bios_encoder_supports_dsi(devdata)) {
/* BXT/GLK handled elsewhere, for now at least */
- if (!assert_has_icl_dsi(dev_priv))
+ if (!assert_has_icl_dsi(display))
return;
icl_dsi_init(display, devdata);
@@ -5111,7 +5083,7 @@ void intel_ddi_init(struct intel_display *display,
* outputs.
*/
if (intel_hti_uses_phy(display, phy)) {
- drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n",
+ drm_dbg_kms(display->drm, "PORT %c / PHY %c reserved by HTI\n",
port_name(port), phy_name(phy));
return;
}
@@ -5128,20 +5100,20 @@ void intel_ddi_init(struct intel_display *display,
*/
init_dp = true;
init_hdmi = false;
- drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n",
+ drm_dbg_kms(display->drm, "VBT says port %c has lspcon\n",
port_name(port));
}
if (!init_dp && !init_hdmi) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
return;
}
if (intel_phy_is_snps(display, phy) &&
- dev_priv->display.snps.phy_failed_calibration & BIT(phy)) {
- drm_dbg_kms(&dev_priv->drm,
+ display->snps.phy_failed_calibration & BIT(phy)) {
+ drm_dbg_kms(display->drm,
"SNPS PHY %c failed to calibrate, proceeding anyway\n",
phy_name(phy));
}
@@ -5155,26 +5127,26 @@ void intel_ddi_init(struct intel_display *display,
encoder = &dig_port->base;
encoder->devdata = devdata;
- if (DISPLAY_VER(dev_priv) >= 13 && port >= PORT_D_XELPD) {
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) {
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c/PHY %c",
port_name(port - PORT_D_XELPD + PORT_D),
phy_name(phy));
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
enum tc_port tc_port = intel_port_to_tc(display, port);
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %s%c/PHY %s%c",
port >= PORT_TC1 ? "TC" : "",
port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
enum tc_port tc_port = intel_port_to_tc(display, port);
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c%s/PHY %s%c",
port_name(port),
@@ -5182,7 +5154,7 @@ void intel_ddi_init(struct intel_display *display,
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
} else {
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c/PHY %c", port_name(port), phy_name(phy));
}
@@ -5218,32 +5190,32 @@ void intel_ddi_init(struct intel_display *display,
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
encoder->enable_clock = intel_mtl_pll_enable;
encoder->disable_clock = intel_mtl_pll_disable;
encoder->port_pll_type = intel_mtl_port_pll_type;
encoder->get_config = mtl_ddi_get_config;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
encoder->enable_clock = intel_mpllb_enable;
encoder->disable_clock = intel_mpllb_disable;
encoder->get_config = dg2_ddi_get_config;
- } else if (IS_ALDERLAKE_S(dev_priv)) {
+ } else if (display->platform.alderlake_s) {
encoder->enable_clock = adls_ddi_enable_clock;
encoder->disable_clock = adls_ddi_disable_clock;
encoder->is_clock_enabled = adls_ddi_is_clock_enabled;
encoder->get_config = adls_ddi_get_config;
- } else if (IS_ROCKETLAKE(dev_priv)) {
+ } else if (display->platform.rocketlake) {
encoder->enable_clock = rkl_ddi_enable_clock;
encoder->disable_clock = rkl_ddi_disable_clock;
encoder->is_clock_enabled = rkl_ddi_is_clock_enabled;
encoder->get_config = rkl_ddi_get_config;
- } else if (IS_DG1(dev_priv)) {
+ } else if (display->platform.dg1) {
encoder->enable_clock = dg1_ddi_enable_clock;
encoder->disable_clock = dg1_ddi_disable_clock;
encoder->is_clock_enabled = dg1_ddi_is_clock_enabled;
encoder->get_config = dg1_ddi_get_config;
- } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
- if (intel_ddi_is_tc(dev_priv, port)) {
+ } else if (display->platform.jasperlake || display->platform.elkhartlake) {
+ if (intel_ddi_is_tc(display, port)) {
encoder->enable_clock = jsl_ddi_tc_enable_clock;
encoder->disable_clock = jsl_ddi_tc_disable_clock;
encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled;
@@ -5255,8 +5227,8 @@ void intel_ddi_init(struct intel_display *display,
encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
encoder->get_config = icl_ddi_combo_get_config;
}
- } else if (DISPLAY_VER(dev_priv) >= 11) {
- if (intel_ddi_is_tc(dev_priv, port)) {
+ } else if (DISPLAY_VER(display) >= 11) {
+ if (intel_ddi_is_tc(display, port)) {
encoder->enable_clock = icl_ddi_tc_enable_clock;
encoder->disable_clock = icl_ddi_tc_disable_clock;
encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled;
@@ -5268,36 +5240,36 @@ void intel_ddi_init(struct intel_display *display,
encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
encoder->get_config = icl_ddi_combo_get_config;
}
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
/* BXT/GLK have fixed PLL->port mapping */
encoder->get_config = bxt_ddi_get_config;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
encoder->enable_clock = skl_ddi_enable_clock;
encoder->disable_clock = skl_ddi_disable_clock;
encoder->is_clock_enabled = skl_ddi_is_clock_enabled;
encoder->get_config = skl_ddi_get_config;
- } else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ } else if (display->platform.broadwell || display->platform.haswell) {
encoder->enable_clock = hsw_ddi_enable_clock;
encoder->disable_clock = hsw_ddi_disable_clock;
encoder->is_clock_enabled = hsw_ddi_is_clock_enabled;
encoder->get_config = hsw_ddi_get_config;
}
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
encoder->set_signal_levels = intel_cx0_phy_set_signal_levels;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = icl_mg_phy_set_signal_levels;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
encoder->set_signal_levels = bxt_dpio_phy_set_signal_levels;
} else {
encoder->set_signal_levels = hsw_set_signal_levels;
@@ -5305,29 +5277,29 @@ void intel_ddi_init(struct intel_display *display,
intel_ddi_buf_trans_init(encoder);
- if (DISPLAY_VER(dev_priv) >= 13)
- encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port);
- else if (IS_DG1(dev_priv))
- encoder->hpd_pin = dg1_hpd_pin(dev_priv, port);
- else if (IS_ROCKETLAKE(dev_priv))
- encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) >= 12)
- encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
- encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) == 11)
- encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- encoder->hpd_pin = skl_hpd_pin(dev_priv, port);
+ if (DISPLAY_VER(display) >= 13)
+ encoder->hpd_pin = xelpd_hpd_pin(display, port);
+ else if (display->platform.dg1)
+ encoder->hpd_pin = dg1_hpd_pin(display, port);
+ else if (display->platform.rocketlake)
+ encoder->hpd_pin = rkl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) >= 12)
+ encoder->hpd_pin = tgl_hpd_pin(display, port);
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
+ encoder->hpd_pin = ehl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) == 11)
+ encoder->hpd_pin = icl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
+ encoder->hpd_pin = skl_hpd_pin(display, port);
else
encoder->hpd_pin = intel_hpd_pin_default(port);
- ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ ddi_buf_ctl = intel_de_read(display, DDI_BUF_CTL(port));
dig_port->lane_reversal = intel_bios_encoder_lane_reversal(devdata) ||
ddi_buf_ctl & DDI_BUF_PORT_REVERSAL;
- dig_port->ddi_a_4_lanes = DISPLAY_VER(dev_priv) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
+ dig_port->ddi_a_4_lanes = DISPLAY_VER(display) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
@@ -5346,7 +5318,7 @@ void intel_ddi_init(struct intel_display *display,
if (!is_legacy && init_hdmi) {
is_legacy = !init_dp;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n",
port_name(port),
str_yes_no(init_dp),
@@ -5363,24 +5335,24 @@ void intel_ddi_init(struct intel_display *display,
goto err;
}
- drm_WARN_ON(&dev_priv->drm, port > PORT_I);
+ drm_WARN_ON(display->drm, port > PORT_I);
dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(display, port);
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
if (intel_encoder_is_tc(encoder))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
dig_port->connected = bdw_digital_port_connected;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
if (port == PORT_A)
dig_port->connected = bdw_digital_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_HASWELL(dev_priv)) {
+ } else if (display->platform.haswell) {
if (port == PORT_A)
dig_port->connected = hsw_digital_port_connected;
else
@@ -5396,7 +5368,7 @@ void intel_ddi_init(struct intel_display *display,
dig_port->hpd_pulse = intel_dp_hpd_pulse;
if (dig_port->dp.mso_link_count)
- encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(display);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index b7399e9d11cc..54ce3e4f8fd9 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -6,7 +6,6 @@
#ifndef __INTEL_DE_H__
#define __INTEL_DE_H__
-#include "intel_display_conversion.h"
#include "intel_display_core.h"
#include "intel_dmc_wl.h"
#include "intel_dsb.h"
@@ -19,7 +18,7 @@ static inline struct intel_uncore *__to_uncore(struct intel_display *display)
}
static inline u32
-__intel_de_read(struct intel_display *display, i915_reg_t reg)
+intel_de_read(struct intel_display *display, i915_reg_t reg)
{
u32 val;
@@ -31,7 +30,6 @@ __intel_de_read(struct intel_display *display, i915_reg_t reg)
return val;
}
-#define intel_de_read(p,...) __intel_de_read(__to_intel_display(p), __VA_ARGS__)
static inline u8
intel_de_read8(struct intel_display *display, i915_reg_t reg)
@@ -66,7 +64,7 @@ intel_de_read64_2x32(struct intel_display *display,
}
static inline void
-__intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
+intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
{
intel_dmc_wl_get(display, reg);
@@ -74,10 +72,9 @@ __intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
intel_dmc_wl_put(display, reg);
}
-#define intel_de_posting_read(p,...) __intel_de_posting_read(__to_intel_display(p), __VA_ARGS__)
static inline void
-__intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
+intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
{
intel_dmc_wl_get(display, reg);
@@ -85,7 +82,6 @@ __intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
intel_dmc_wl_put(display, reg);
}
-#define intel_de_write(p,...) __intel_de_write(__to_intel_display(p), __VA_ARGS__)
static inline u32
__intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
@@ -95,8 +91,7 @@ __intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
}
static inline u32
-__intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
- u32 set)
+intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
{
u32 val;
@@ -108,7 +103,6 @@ __intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
return val;
}
-#define intel_de_rmw(p,...) __intel_de_rmw(__to_intel_display(p), __VA_ARGS__)
static inline int
__intel_de_wait_for_register_nowl(struct intel_display *display,
@@ -181,20 +175,18 @@ intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
}
static inline int
-__intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
{
return intel_de_wait(display, reg, mask, mask, timeout);
}
-#define intel_de_wait_for_set(p,...) __intel_de_wait_for_set(__to_intel_display(p), __VA_ARGS__)
static inline int
-__intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
{
return intel_de_wait(display, reg, mask, 0, timeout);
}
-#define intel_de_wait_for_clear(p,...) __intel_de_wait_for_clear(__to_intel_display(p), __VA_ARGS__)
/*
* Unlocked mmio-accessors, think carefully before using these.
@@ -205,7 +197,7 @@ __intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
* a more localised lock guarding all access to that bank of registers.
*/
static inline u32
-__intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
+intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
{
u32 val;
@@ -214,15 +206,13 @@ __intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
return val;
}
-#define intel_de_read_fw(p,...) __intel_de_read_fw(__to_intel_display(p), __VA_ARGS__)
static inline void
-__intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
+intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
{
trace_i915_reg_rw(true, reg, val, sizeof(val), true);
intel_uncore_write_fw(__to_uncore(display), reg, val);
}
-#define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__)
static inline u32
intel_de_read_notrace(struct intel_display *display, i915_reg_t reg)
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3afb85fe8536..6f0a0bc71b06 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -55,6 +55,7 @@
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "i9xx_wm.h"
+#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_audio.h"
@@ -73,6 +74,7 @@
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_power.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -663,7 +665,6 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
@@ -696,7 +697,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
* wait-for-vblank between disabling the plane and the pipe.
*/
if (HAS_GMCH(display) &&
- intel_set_memory_cxsr(dev_priv, false))
+ intel_set_memory_cxsr(display, false))
intel_plane_initial_vblank_wait(crtc);
/*
@@ -968,7 +969,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin ||
old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax ||
old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband ||
- old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full;
+ old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full ||
+ old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start ||
+ old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end;
}
static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state,
@@ -1041,19 +1044,16 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
- intel_psr_post_plane_update(state, crtc);
-
- intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
+ intel_frontbuffer_flip(display, new_crtc_state->fb_bits);
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
intel_fbc_post_update(state, crtc);
@@ -1078,6 +1078,10 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (audio_enabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_enable(state, crtc);
+
+ intel_alpm_post_plane_update(state, crtc);
+
+ intel_psr_post_plane_update(state, crtc);
}
static void intel_post_plane_update_after_readout(struct intel_atomic_state *state,
@@ -1166,13 +1170,15 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
+ intel_alpm_pre_plane_update(state, crtc);
+ intel_psr_pre_plane_update(state, crtc);
+
if (intel_crtc_vrr_disabling(state, crtc)) {
intel_vrr_disable(old_crtc_state);
intel_crtc_update_active_timings(old_crtc_state, false);
@@ -1183,8 +1189,6 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_drrs_deactivate(old_crtc_state);
- intel_psr_pre_plane_update(state, crtc);
-
if (hsw_ips_pre_update(state, crtc))
intel_crtc_wait_for_next_vblank(crtc);
@@ -1220,7 +1224,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* wait-for-vblank between disabling the plane and the pipe.
*/
if (HAS_GMCH(display) && old_crtc_state->hw.active &&
- new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+ new_crtc_state->disable_cxsr && intel_set_memory_cxsr(display, false))
intel_crtc_wait_for_next_vblank(crtc);
/*
@@ -1231,7 +1235,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* WaCxSRDisabledForSpriteScaling:ivb
*/
if (!HAS_GMCH(display) && old_crtc_state->hw.active &&
- new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv))
+ new_crtc_state->disable_cxsr && ilk_disable_cxsr(display))
intel_crtc_wait_for_next_vblank(crtc);
/*
@@ -1255,7 +1259,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
*/
if (!intel_initial_watermarks(state, crtc))
if (new_crtc_state->update_wm_pre)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
}
/*
@@ -1280,7 +1284,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
static void intel_crtc_disable_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
unsigned int update_mask = new_crtc_state->update_planes;
@@ -1302,7 +1306,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
fb_bits |= plane->frontbuffer_bit;
}
- intel_frontbuffer_flip(dev_priv, fb_bits);
+ intel_frontbuffer_flip(display, fb_bits);
}
static void intel_encoders_update_prepare(struct intel_atomic_state *state)
@@ -1509,7 +1513,6 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (drm_WARN_ON(display->drm, crtc->active))
@@ -1561,7 +1564,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
intel_wait_for_pipe_scanline_moving(crtc);
/*
@@ -1660,13 +1663,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_pll_enable(state, crtc);
- for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
- const struct intel_crtc_state *pipe_crtc_state =
- intel_atomic_get_new_crtc_state(state, pipe_crtc);
-
- if (pipe_crtc_state->shared_dpll)
- intel_enable_shared_dpll(pipe_crtc_state);
- }
+ if (new_crtc_state->shared_dpll)
+ intel_enable_shared_dpll(new_crtc_state);
intel_encoders_pre_enable(state, crtc);
@@ -1777,8 +1775,6 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
intel_set_pch_fifo_underrun_reporting(display, pipe, true);
-
- intel_disable_shared_dpll(old_crtc_state);
}
static void hsw_crtc_disable(struct intel_atomic_state *state,
@@ -1797,12 +1793,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
- for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
- const struct intel_crtc_state *old_pipe_crtc_state =
- intel_atomic_get_old_crtc_state(state, pipe_crtc);
-
- intel_disable_shared_dpll(old_pipe_crtc_state);
- }
+ intel_disable_shared_dpll(old_crtc_state);
intel_encoders_post_pll_disable(state, crtc);
@@ -2081,7 +2072,6 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (drm_WARN_ON(display->drm, crtc->active))
@@ -2105,7 +2095,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_color_modeset(new_crtc_state);
if (!intel_initial_watermarks(state, crtc))
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
intel_enable_transcoder(new_crtc_state);
intel_crtc_vblank_on(new_crtc_state);
@@ -2121,7 +2111,6 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
@@ -2145,9 +2134,9 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
if (display->platform.cherryview)
- chv_disable_pll(dev_priv, pipe);
+ chv_disable_pll(display, pipe);
else if (display->platform.valleyview)
- vlv_disable_pll(dev_priv, pipe);
+ vlv_disable_pll(display, pipe);
else
i9xx_disable_pll(old_crtc_state);
}
@@ -2158,7 +2147,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
if (!display->funcs.wm->initial_watermarks)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
/* clock the pipe down to 640x480@60 to potentially save power */
if (display->platform.i830)
@@ -2341,7 +2330,6 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
intel_joiner_compute_pipe_src(crtc_state);
@@ -2360,7 +2348,7 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_is_dual_link_lvds(i915)) {
+ intel_is_dual_link_lvds(display)) {
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
crtc->base.base.id, crtc->base.name);
@@ -2418,14 +2406,6 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
return 0;
}
-static bool intel_crtc_needs_wa_14015401596(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
-
- return intel_vrr_possible(crtc_state) && crtc_state->has_psr &&
- IS_DISPLAY_VER(display, 13, 14);
-}
-
static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -2434,9 +2414,7 @@ static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state)
if (!HAS_DSB(display))
return 0;
- /* Wa_14015401596 */
- if (intel_crtc_needs_wa_14015401596(crtc_state))
- vblank_delay = max(vblank_delay, 1);
+ vblank_delay = max(vblank_delay, intel_psr_min_vblank_delay(crtc_state));
return vblank_delay;
}
@@ -2548,15 +2526,13 @@ intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
void intel_panel_sanitize_ssc(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
/*
* There may be no VBT; and if the BIOS enabled SSC we can
* just keep using it to avoid unnecessary flicker. Whereas if the
* BIOS isn't using it, don't assume it will work even if the VBT
* indicates as much.
*/
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) {
bool bios_lvds_use_ssc = intel_de_read(display,
PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE;
@@ -2637,6 +2613,15 @@ void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
PIPE_LINK_N2(display, transcoder));
}
+static bool
+transcoder_has_vrr(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ return HAS_VRR(display) && !transcoder_is_dsi(cpu_transcoder);
+}
+
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -2701,6 +2686,15 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
+ /*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not required. Since the support for these bits is going to
+ * be deprecated in upcoming platforms, avoid writing these bits for the
+ * platforms that do not use legacy Timing Generator.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_vtotal = 1;
+
intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
@@ -2720,6 +2714,19 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
intel_de_write(display, TRANS_VTOTAL(display, pipe),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
+
+ if (DISPLAY_VER(display) >= 30) {
+ /*
+ * Address issues for resolutions with high refresh rate that
+ * have small Hblank, specifically where Hblank is smaller than
+ * one MTP. Simulations indicate this will address the
+ * jitter issues that currently causes BS to be immediately
+ * followed by BE which DPRX devices are unable to handle.
+ * https://groups.vesa.org/wg/DP/document/20494
+ */
+ intel_de_write(display, DP_MIN_HBLANK_CTL(cpu_transcoder),
+ crtc_state->min_hblank);
+ }
}
static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state)
@@ -2762,12 +2769,24 @@ static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc
VBLANK_START(crtc_vblank_start - 1) |
VBLANK_END(crtc_vblank_end - 1));
/*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not required. Since the support for these bits is going to
+ * be deprecated in upcoming platforms, avoid writing these bits for the
+ * platforms that do not use legacy Timing Generator.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_vtotal = 1;
+
+ /*
* The double buffer latch point for TRANS_VTOTAL
* is the transcoder's undelayed vblank.
*/
intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
+
+ intel_vrr_set_fixed_rr_timings(crtc_state);
+ intel_vrr_transcoder_enable(crtc_state);
}
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
@@ -2851,6 +2870,10 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
adjusted_mode->crtc_vdisplay +
intel_de_read(display,
TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder));
+
+ if (DISPLAY_VER(display) >= 30)
+ pipe_config->min_hblank = intel_de_read(display,
+ DP_MIN_HBLANK_CTL(cpu_transcoder));
}
static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
@@ -3833,7 +3856,6 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_display_power_domain_set *power_domain_set)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder;
enum port port;
u32 tmp;
@@ -3855,7 +3877,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
* registers/MIPI[BXT]. We can break out here early, since we
* need the same DSI PLL to be enabled for both DSI ports.
*/
- if (!bxt_dsi_pll_is_enabled(dev_priv))
+ if (!bxt_dsi_pll_is_enabled(display))
break;
/* XXX: this works for video mode only */
@@ -3918,7 +3940,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
DISPLAY_VER(display) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
- if (HAS_VRR(display) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ if (transcoder_has_vrr(pipe_config))
intel_vrr_get_config(pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
@@ -4145,8 +4167,6 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
int linetime_wm;
@@ -4159,7 +4179,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
/* Display WA #1135: BXT:ALL GLK:ALL */
if ((display->platform.geminilake || display->platform.broxton) &&
- skl_watermark_ipc_enabled(dev_priv))
+ skl_watermark_ipc_enabled(display))
linetime_wm /= 2;
return min(linetime_wm, 0x1ff);
@@ -5204,6 +5224,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
+ PIPE_CONF_CHECK_I(min_hblank);
+
if (HAS_DOUBLE_BUFFERED_M_N(display)) {
if (!fastset || !pipe_config->update_m_n)
PIPE_CONF_CHECK_M_N(dp_m_n);
@@ -5385,8 +5407,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.vmin);
PIPE_CONF_CHECK_I(vrr.vmax);
PIPE_CONF_CHECK_I(vrr.flipline);
- PIPE_CONF_CHECK_I(vrr.pipeline_full);
- PIPE_CONF_CHECK_I(vrr.guardband);
PIPE_CONF_CHECK_I(vrr.vsync_start);
PIPE_CONF_CHECK_I(vrr.vsync_end);
PIPE_CONF_CHECK_LLI(cmrr.cmrr_m);
@@ -5394,6 +5414,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(cmrr.enable);
}
+ if (!fastset || intel_vrr_always_use_vrr_tg(display)) {
+ PIPE_CONF_CHECK_I(vrr.pipeline_full);
+ PIPE_CONF_CHECK_I(vrr.guardband);
+ }
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_LLI
@@ -6005,22 +6030,16 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (!plane->async_flip)
continue;
- if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->modifier)) {
+ if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->format->format,
+ new_plane_state->hw.fb->modifier)) {
drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n",
+ "[PLANE:%d:%s] pixel format %p4cc / modifier 0x%llx does not support async flip\n",
plane->base.base.id, plane->base.name,
+ &new_plane_state->hw.fb->format->format,
new_plane_state->hw.fb->modifier);
return -EINVAL;
}
- if (intel_format_info_is_yuv_semiplanar(new_plane_state->hw.fb->format,
- new_plane_state->hw.fb->modifier)) {
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] Planar formats do not support async flips\n",
- plane->base.base.id, plane->base.name);
- return -EINVAL;
- }
-
/*
* We turn the first async flip request into a sync flip
* so that we can reconfigure the plane (eg. change modifier).
@@ -6427,7 +6446,7 @@ int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- ret = intel_bw_atomic_check(state);
+ ret = intel_bw_atomic_check(state, any_ms);
if (ret)
goto fail;
@@ -6531,7 +6550,6 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
{
struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
* Update pipe size and adjust fitter if needed: the reason for this is
@@ -6547,7 +6565,7 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
if (DISPLAY_VER(display) >= 9) {
if (new_crtc_state->pch_pfit.enabled)
skl_pfit_enable(new_crtc_state);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
if (new_crtc_state->pch_pfit.enabled)
ilk_pfit_enable(new_crtc_state);
else if (old_crtc_state->pch_pfit.enabled)
@@ -6648,6 +6666,8 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_update_active_timings(pipe_crtc_state, false);
}
+ intel_psr_notify_pipe_change(state, crtc, true);
+
display->funcs.display->crtc_enable(state, crtc);
/* vblanks work again, re-enable pipe CRC. */
@@ -6767,6 +6787,8 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
intel_crtc_joined_pipe_mask(old_crtc_state))
intel_crtc_disable_pipe_crc(pipe_crtc);
+ intel_psr_notify_pipe_change(state, crtc, false);
+
display->funcs.display->crtc_disable(state, crtc);
for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
@@ -7229,7 +7251,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
@@ -7443,7 +7465,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* toggling overhead at and above 60 FPS.
*/
intel_display_power_put_async_delay(display, POWER_DOMAIN_DC_OFF, wakeref, 17);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
/*
* Defer the cleanup of the old state to a separate worker to not
@@ -7515,10 +7537,9 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
{
struct intel_display *display = to_intel_display(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ state->wakeref = intel_display_rpm_get(display);
/*
* The intel_legacy_cursor_update() fast path takes care
@@ -7552,7 +7573,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_dbg_atomic(display->drm,
"Preparing state failed with %i\n", ret);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
return ret;
}
@@ -7562,7 +7583,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_atomic_helper_unprepare_planes(dev, &state->base);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
return ret;
}
@@ -7624,15 +7645,13 @@ static bool ilk_has_edp_a(struct intel_display *display)
static bool intel_ddi_crt_present(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 9)
return false;
if (display->platform.haswell_ult || display->platform.broadwell_ult)
return false;
- if (HAS_PCH_LPT_H(dev_priv) &&
+ if (HAS_PCH_LPT_H(display) &&
intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
@@ -7654,7 +7673,6 @@ bool assert_port_valid(struct intel_display *display, enum port port)
void intel_setup_outputs(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
bool dpd_is_edp = false;
@@ -7670,8 +7688,8 @@ void intel_setup_outputs(struct intel_display *display)
intel_bios_for_each_encoder(display, intel_ddi_init);
if (display->platform.geminilake || display->platform.broxton)
- vlv_dsi_init(dev_priv);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ vlv_dsi_init(display);
+ } else if (HAS_PCH_SPLIT(display)) {
int found;
/*
@@ -7679,7 +7697,7 @@ void intel_setup_outputs(struct intel_display *display)
* to prevent the registration of both eDP and LVDS and the
* incorrect sharing of the PPS.
*/
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
dpd_is_edp = intel_dp_is_port_edp(display, PORT_D);
@@ -7754,15 +7772,15 @@ void intel_setup_outputs(struct intel_display *display)
g4x_hdmi_init(display, CHV_HDMID, PORT_D);
}
- vlv_dsi_init(dev_priv);
+ vlv_dsi_init(display);
} else if (display->platform.pineview) {
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
} else if (IS_DISPLAY_VER(display, 3, 4)) {
bool found = false;
if (display->platform.mobile)
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
@@ -7804,10 +7822,10 @@ void intel_setup_outputs(struct intel_display *display)
intel_tv_init(display);
} else if (DISPLAY_VER(display) == 2) {
if (display->platform.i85x)
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
- intel_dvo_init(dev_priv);
+ intel_dvo_init(display);
}
for_each_intel_encoder(display->drm, encoder) {
@@ -7817,7 +7835,7 @@ void intel_setup_outputs(struct intel_display *display)
intel_encoder_possible_clones(encoder);
}
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
drm_helper_move_panel_connectors_to_head(display->drm);
}
@@ -8039,13 +8057,11 @@ static const struct intel_display_funcs i9xx_display_funcs = {
*/
void intel_init_display_hooks(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 9) {
display->funcs.display = &skl_display_funcs;
} else if (HAS_DDI(display)) {
display->funcs.display = &ddi_display_funcs;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
display->funcs.display = &pch_split_display_funcs;
} else if (display->platform.cherryview ||
display->platform.valleyview) {
@@ -8081,6 +8097,9 @@ retry:
goto out;
}
+ if (!crtc_state->hw.active)
+ crtc_state->inherited = false;
+
if (crtc_state->hw.active) {
struct intel_encoder *encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index eeb7ae3eaea8..b4937e102360 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -21,17 +21,15 @@
#include "intel_display_limits.h"
#include "intel_display_params.h"
#include "intel_display_power.h"
+#include "intel_dmc_wl.h"
#include "intel_dpll_mgr.h"
#include "intel_fbc.h"
#include "intel_global_state.h"
#include "intel_gmbus.h"
#include "intel_opregion.h"
-#include "intel_dmc_wl.h"
+#include "intel_pch.h"
#include "intel_wm_types.h"
-struct task_struct;
-
-struct drm_i915_private;
struct drm_property;
struct drm_property_blob;
struct i915_audio_component;
@@ -52,6 +50,7 @@ struct intel_hotplug_funcs;
struct intel_initial_plane_config;
struct intel_opregion;
struct intel_overlay;
+struct task_struct;
/* Amount of SAGV/QGV points, BSpec precisely defines this */
#define I915_NUM_QGV_POINTS 8
@@ -80,7 +79,7 @@ struct intel_display_funcs {
/* functions used for watermark calcs for display. */
struct intel_wm_funcs {
/* update_wm is for legacy wm management */
- void (*update_wm)(struct drm_i915_private *dev_priv);
+ void (*update_wm)(struct intel_display *display);
int (*compute_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*initial_watermarks)(struct intel_atomic_state *state,
@@ -90,8 +89,8 @@ struct intel_wm_funcs {
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
- void (*get_hw_state)(struct drm_i915_private *i915);
- void (*sanitize)(struct drm_i915_private *i915);
+ void (*get_hw_state)(struct intel_display *display);
+ void (*sanitize)(struct intel_display *display);
};
struct intel_audio_state {
@@ -160,6 +159,7 @@ struct intel_hotplug {
struct {
unsigned long last_jiffies;
int count;
+ int blocked_count;
enum {
HPD_ENABLED = 0,
HPD_DISABLED = 1,
@@ -170,8 +170,8 @@ struct intel_hotplug {
u32 retry_bits;
struct delayed_work reenable_work;
- u32 long_port_mask;
- u32 short_port_mask;
+ u32 long_hpd_pin_mask;
+ u32 short_hpd_pin_mask;
struct work_struct dig_port_work;
struct work_struct poll_init_work;
@@ -179,7 +179,7 @@ struct intel_hotplug {
/*
* Queuing of hotplug_work, reenable_work and poll_init_work is
- * enabled. Protected by drm_i915_private::irq_lock.
+ * enabled. Protected by intel_display::irq::lock.
*/
bool detection_work_enabled;
@@ -288,6 +288,9 @@ struct intel_display {
/* Platform (and subplatform, if any) identification */
struct intel_display_platforms platform;
+ /* Intel PCH: where the south display engine lives */
+ enum intel_pch pch_type;
+
/* Display functions */
struct {
/* Top level crtc-ish functions */
@@ -425,7 +428,7 @@ struct intel_display {
* reused when sending message to gsc cs.
* this is only populated post Meteorlake
*/
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct intel_hdcp_gsc_context *gsc_context;
/* Mutex to protect the above hdcp related values. */
struct mutex hdcp_mutex;
} hdcp;
@@ -453,6 +456,9 @@ struct intel_display {
} ips;
struct {
+ /* protects the irq masks */
+ spinlock_t lock;
+
/*
* Most platforms treat the display irq block as an always-on
* power domain. vlv/chv can disable it at runtime and need
@@ -465,9 +471,9 @@ struct intel_display {
/* For i915gm/i945gm vblank irq workaround */
u8 vblank_enabled;
- int vblank_wa_num_pipes;
+ int vblank_enable_count;
- struct work_struct vblank_dc_work;
+ struct work_struct vblank_notify_work;
u32 de_irq_mask[I915_MAX_PIPES];
u32 pipestat_irq_mask[I915_MAX_PIPES];
@@ -574,6 +580,8 @@ struct intel_display {
struct intel_vbt_data vbt;
struct intel_dmc_wl wl;
struct intel_wm wm;
+
+ struct work_struct psr_dc5_dc6_wa_work;
};
#endif /* __INTEL_DISPLAY_CORE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index fdedf65bee53..8d0a1779dd19 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -7,11 +7,12 @@
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include "hsw_ips.h"
-#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "i9xx_wm_regs.h"
@@ -24,6 +25,7 @@
#include "intel_display_debugfs_params.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -54,6 +56,8 @@ static int intel_display_caps(struct seq_file *m, void *data)
struct intel_display *display = node_to_intel_display(m->private);
struct drm_printer p = drm_seq_file_printer(m);
+ drm_printf(&p, "PCH type: %d\n", INTEL_PCH_TYPE(display));
+
intel_display_device_info_print(DISPLAY_INFO(display),
DISPLAY_RUNTIME_INFO(display), &p);
intel_display_params_dump(&display->params, display->drm->driver->name, &p);
@@ -81,7 +85,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
static int i915_sr_status(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
intel_wakeref_t wakeref;
bool sr_enabled = false;
@@ -89,7 +92,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
if (DISPLAY_VER(display) >= 9)
/* no global SR status; inspect per-plane WM */;
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
sr_enabled = intel_de_read(display, WM1_LP_ILK) & WM_LP_ENABLE;
else if (display->platform.i965gm || display->platform.g4x ||
display->platform.i945g || display->platform.i945gm)
@@ -554,6 +557,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
seq_printf(m, "\tpipe src=" DRM_RECT_FMT ", dither=%s, bpp=%d\n",
DRM_RECT_ARG(&crtc_state->pipe_src),
str_yes_no(crtc_state->dither), crtc_state->pipe_bpp);
+ seq_printf(m, "\tport_clock=%d, lane_count=%d\n",
+ crtc_state->port_clock, crtc_state->lane_count);
intel_scaler_info(m, crtc);
@@ -580,13 +585,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
static int i915_display_info(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
drm_modeset_lock_all(display->drm);
@@ -605,18 +609,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
drm_modeset_unlock_all(display->drm);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_display_capabilities(struct seq_file *m, void *unused)
-{
- struct intel_display *display = node_to_intel_display(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- intel_display_device_info_print(DISPLAY_INFO(display),
- DISPLAY_RUNTIME_INFO(display), &p);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
@@ -690,14 +683,11 @@ static bool
intel_lpsp_power_well_enabled(struct intel_display *display,
enum i915_power_well_id power_well_id)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- intel_wakeref_t wakeref;
bool is_enabled;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- is_enabled = intel_display_power_well_is_enabled(display,
- power_well_id);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ with_intel_display_rpm(display)
+ is_enabled = intel_display_power_well_is_enabled(display,
+ power_well_id);
return is_enabled;
}
@@ -820,7 +810,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
- {"i915_display_capabilities", i915_display_capabilities, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_ddb_info", i915_ddb_info, 0},
@@ -829,7 +818,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
void intel_display_debugfs_register(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root,
@@ -844,10 +832,10 @@ void intel_display_debugfs_register(struct intel_display *display)
intel_dmc_debugfs_register(display);
intel_dp_test_debugfs_register(display);
intel_fbc_debugfs_register(display);
- intel_hpd_debugfs_register(i915);
+ intel_hpd_debugfs_register(display);
intel_opregion_debugfs_register(display);
intel_psr_debugfs_register(display);
- intel_wm_debugfs_register(i915);
+ intel_wm_debugfs_register(display);
intel_display_debugfs_params(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 738ae522c8f4..90d714598664 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -3,11 +3,13 @@
* Copyright © 2023 Intel Corporation
*/
-#include <drm/intel/pciids.h>
-#include <drm/drm_color_mgmt.h>
#include <linux/pci.h>
-#include "i915_drv.h"
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+#include <drm/intel/pciids.h>
+
#include "i915_reg.h"
#include "intel_cx0_phy_regs.h"
#include "intel_de.h"
@@ -1711,7 +1713,6 @@ void intel_display_device_remove(struct intel_display *display)
static void __intel_display_device_info_runtime_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(display);
enum pipe pipe;
@@ -1775,7 +1776,7 @@ static void __intel_display_device_info_runtime_init(struct intel_display *displ
goto display_fused_off;
}
- if (IS_DISPLAY_VER(display, 7, 8) && HAS_PCH_SPLIT(i915)) {
+ if (IS_DISPLAY_VER(display, 7, 8) && HAS_PCH_SPLIT(display)) {
u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
u32 sfuse_strap = intel_de_read(display, SFUSE_STRAP);
@@ -1790,7 +1791,7 @@ static void __intel_display_device_info_runtime_init(struct intel_display *displ
*/
if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
- (HAS_PCH_CPT(i915) &&
+ (HAS_PCH_CPT(display) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
drm_info(display->drm,
"Display fused off, disabling\n");
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 717286981687..87c666792c0d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -143,9 +143,11 @@ struct intel_display_platforms {
#define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5)
+#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
+#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_CMTG(__display) (!(__display)->platform.dg2 && DISPLAY_VER(__display) >= 13)
#define HAS_CUR_FBC(__display) (!HAS_GMCH(__display) && IS_DISPLAY_VER(__display, 7, 13))
#define HAS_D12_PLANE_MINIMIZATION(__display) ((__display)->platform.rocketlake || (__display)->platform.alderlake_s)
@@ -156,19 +158,21 @@ struct intel_display_platforms {
#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
-#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
#define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb)
#define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc)
+#define HAS_DSC_3ENGINES(__display) (DISPLAY_VERx100(__display) == 1401 && HAS_DSC(__display))
#define HAS_DSC_MST(__display) (DISPLAY_VER(__display) >= 12 && HAS_DSC(__display))
#define HAS_FBC(__display) (DISPLAY_RUNTIME_INFO(__display)->fbc_mask != 0)
#define HAS_FBC_DIRTY_RECT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg)
#define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3)
-#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake)
+#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch)
+#define HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx)
#define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc)
#define HAS_IPS(__display) ((__display)->platform.haswell_ult || (__display)->platform.broadwell)
@@ -189,10 +193,7 @@ struct intel_display_platforms {
((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \
HAS_DSC(__display))
#define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11)
-#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
-#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
#define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask))
-#define I915_HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical)
#define SUPPORTS_TV(__display) (DISPLAY_INFO(__display)->supports_tv)
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 31740a677dd8..411fe7b918a7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -54,6 +54,7 @@
#include "intel_plane_initial.h"
#include "intel_pmdemand.h"
#include "intel_pps.h"
+#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_vga.h"
#include "intel_wm.h"
@@ -82,7 +83,6 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev)
void intel_display_driver_init_hw(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state;
if (!HAS_DISPLAY(display))
@@ -94,7 +94,7 @@ void intel_display_driver_init_hw(struct intel_display *display)
intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
cdclk_state->logical = cdclk_state->actual = display->cdclk.hw;
- intel_display_wa_apply(i915);
+ intel_display_wa_apply(display);
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
@@ -181,7 +181,8 @@ static void intel_plane_possible_crtcs_init(struct intel_display *display)
void intel_display_driver_early_probe(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_pch_detect(display);
if (!HAS_DISPLAY(display))
return;
@@ -193,12 +194,12 @@ void intel_display_driver_early_probe(struct intel_display *display)
mutex_init(&display->pps.mutex);
mutex_init(&display->hdcp.hdcp_mutex);
- intel_display_irq_init(i915);
+ intel_display_irq_init(display);
intel_dkl_phy_init(display);
intel_color_init_hooks(display);
intel_init_cdclk_hooks(display);
intel_audio_hooks_init(display);
- intel_dpll_init_clock_hook(i915);
+ intel_dpll_init_clock_hook(display);
intel_init_display_hooks(display);
intel_fdi_init_hook(display);
intel_dmc_wl_init(display);
@@ -226,6 +227,8 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
if (ret)
goto cleanup_bios;
+ intel_psr_dc5_dc6_wa_init(display);
+
/* FIXME: completely on the wrong abstraction layer */
ret = intel_power_domains_init(display);
if (ret < 0)
@@ -241,31 +244,45 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
intel_dmc_init(display);
display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
+ if (!display->wq.modeset) {
+ ret = -ENOMEM;
+ goto cleanup_vga_client_pw_domain_dmc;
+ }
+
display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ if (!display->wq.flip) {
+ ret = -ENOMEM;
+ goto cleanup_wq_modeset;
+ }
+
display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
+ if (!display->wq.cleanup) {
+ ret = -ENOMEM;
+ goto cleanup_wq_flip;
+ }
intel_mode_config_init(display);
ret = intel_cdclk_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
ret = intel_color_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
- ret = intel_dbuf_init(i915);
+ ret = intel_dbuf_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
- ret = intel_bw_init(i915);
+ ret = intel_bw_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
ret = intel_pmdemand_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
intel_init_quirks(display);
@@ -273,6 +290,12 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
return 0;
+cleanup_wq_cleanup:
+ destroy_workqueue(display->wq.cleanup);
+cleanup_wq_flip:
+ destroy_workqueue(display->wq.flip);
+cleanup_wq_modeset:
+ destroy_workqueue(display->wq.modeset);
cleanup_vga_client_pw_domain_dmc:
intel_dmc_fini(display);
intel_power_domains_driver_remove(display);
@@ -315,11 +338,9 @@ static void set_display_access(struct intel_display *display,
*/
void intel_display_driver_enable_user_access(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
set_display_access(display, true, NULL);
- intel_hpd_enable_detection_work(i915);
+ intel_hpd_enable_detection_work(display);
}
/**
@@ -341,9 +362,7 @@ void intel_display_driver_enable_user_access(struct intel_display *display)
*/
void intel_display_driver_disable_user_access(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- intel_hpd_disable_detection_work(i915);
+ intel_hpd_disable_detection_work(display);
set_display_access(display, false, current);
}
@@ -422,14 +441,13 @@ bool intel_display_driver_check_access(struct intel_display *display)
/* part #2: call after irq install, but before gem init */
int intel_display_driver_probe_nogem(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
int ret;
if (!HAS_DISPLAY(display))
return 0;
- intel_wm_init(i915);
+ intel_wm_init(display);
intel_panel_sanitize_ssc(display);
@@ -460,8 +478,6 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_hti_init(display);
- /* Just disable it once at startup */
- intel_vga_disable(display);
intel_setup_outputs(display);
ret = intel_dp_tunnel_mgr_init(display);
@@ -471,7 +487,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_display_driver_disable_user_access(display);
drm_modeset_lock_all(display->drm);
- intel_modeset_setup_hw_state(i915, display->drm->mode_config.acquire_ctx);
+ intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx);
intel_acpi_assign_connector_fwnodes(display);
drm_modeset_unlock_all(display->drm);
@@ -483,7 +499,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
* since the watermark calculation done here will use pstate->fb.
*/
if (!HAS_GMCH(display))
- ilk_wm_sanitize(i915);
+ ilk_wm_sanitize(display);
return 0;
@@ -498,7 +514,6 @@ err_mode_config:
/* part #3: call after gem init */
int intel_display_driver_probe(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (!HAS_DISPLAY(display))
@@ -524,16 +539,15 @@ int intel_display_driver_probe(struct intel_display *display)
intel_overlay_setup(display);
/* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(i915);
+ intel_hpd_init(display);
- skl_watermark_ipc_init(i915);
+ skl_watermark_ipc_init(display);
return 0;
}
void intel_display_driver_register(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
"i915 display info:");
@@ -558,9 +572,9 @@ void intel_display_driver_register(struct intel_display *display)
* fbdev->async_cookie.
*/
drm_kms_helper_poll_init(display->drm);
- intel_hpd_poll_disable(i915);
+ intel_hpd_poll_disable(display);
- intel_fbdev_setup(i915);
+ intel_fbdev_setup(display);
intel_display_device_info_print(DISPLAY_INFO(display),
DISPLAY_RUNTIME_INFO(display), &p);
@@ -600,7 +614,7 @@ void intel_display_driver_remove_noirq(struct intel_display *display)
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
- intel_hpd_poll_fini(i915);
+ intel_hpd_poll_fini(display);
intel_unregister_dsm_handler();
@@ -695,13 +709,11 @@ __intel_display_driver_resume(struct intel_display *display,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int ret, i;
- intel_modeset_setup_hw_state(i915, ctx);
- intel_vga_redisable(display);
+ intel_modeset_setup_hw_state(display, ctx);
if (!state)
return 0;
@@ -733,7 +745,6 @@ __intel_display_driver_resume(struct intel_display *display,
void intel_display_driver_resume(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_atomic_state *state = display->restore.modeset_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
@@ -761,7 +772,7 @@ void intel_display_driver_resume(struct intel_display *display)
if (!ret)
ret = __intel_display_driver_resume(display, state, &ctx);
- skl_watermark_ipc_update(i915);
+ skl_watermark_ipc_update(display);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index aa23bb817805..3e73832e5e81 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -5,7 +5,6 @@
#include <drm/drm_vblank.h>
-#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -14,6 +13,8 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_rpm.h"
+#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_dmc_wl.h"
@@ -115,9 +116,8 @@ static void intel_pipe_fault_irq_handler(struct intel_display *display,
}
static void
-intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
+intel_handle_vblank(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
drm_crtc_handle_vblank(&crtc->base);
@@ -125,59 +125,59 @@ intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
/**
* ilk_update_display_irq - update DEIMR
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+void ilk_update_display_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
new_val = dev_priv->irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != dev_priv->irq_mask &&
- !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
+ !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) {
dev_priv->irq_mask = new_val;
intel_de_write(display, DEIMR, dev_priv->irq_mask);
intel_de_posting_read(display, DEIMR);
}
}
-void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
+void ilk_enable_display_irq(struct intel_display *display, u32 bits)
{
- ilk_update_display_irq(i915, bits, bits);
+ ilk_update_display_irq(display, bits, bits);
}
-void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
+void ilk_disable_display_irq(struct intel_display *display, u32 bits)
{
- ilk_update_display_irq(i915, bits, 0);
+ ilk_update_display_irq(display, bits, 0);
}
/**
* bdw_update_port_irq - update DE port interrupt
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void bdw_update_port_irq(struct drm_i915_private *dev_priv,
+void bdw_update_port_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
u32 old_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
old_val = intel_de_read(display, GEN8_DE_PORT_IMR);
@@ -194,93 +194,92 @@ void bdw_update_port_irq(struct drm_i915_private *dev_priv,
/**
* bdw_update_pipe_irq - update DE pipe interrupt
- * @dev_priv: driver private
+ * @display: display device
* @pipe: pipe whose interrupt to update
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+static void bdw_update_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 interrupt_mask,
u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
- new_val = dev_priv->display.irq.de_irq_mask[pipe];
+ new_val = display->irq.de_irq_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
- dev_priv->display.irq.de_irq_mask[pipe] = new_val;
+ if (new_val != display->irq.de_irq_mask[pipe]) {
+ display->irq.de_irq_mask[pipe] = new_val;
intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]);
intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
}
}
-void bdw_enable_pipe_irq(struct drm_i915_private *i915,
+void bdw_enable_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 bits)
{
- bdw_update_pipe_irq(i915, pipe, bits, bits);
+ bdw_update_pipe_irq(display, pipe, bits, bits);
}
-void bdw_disable_pipe_irq(struct drm_i915_private *i915,
+void bdw_disable_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 bits)
{
- bdw_update_pipe_irq(i915, pipe, bits, 0);
+ bdw_update_pipe_irq(display, pipe, bits, 0);
}
/**
* ibx_display_interrupt_update - update SDEIMR
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+void ibx_display_interrupt_update(struct intel_display *display,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 sdeimr = intel_de_read(display, SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
intel_de_write(display, SDEIMR, sdeimr);
intel_de_posting_read(display, SDEIMR);
}
-void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+void ibx_enable_display_interrupt(struct intel_display *display, u32 bits)
{
- ibx_display_interrupt_update(i915, bits, bits);
+ ibx_display_interrupt_update(display, bits, bits);
}
-void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+void ibx_disable_display_interrupt(struct intel_display *display, u32 bits)
{
- ibx_display_interrupt_update(i915, bits, 0);
+ ibx_display_interrupt_update(display, bits, 0);
}
u32 i915_pipestat_enable_mask(struct intel_display *display,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 status_mask = display->irq.pipestat_irq_mask[pipe];
u32 enable_mask = status_mask << 16;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if (DISPLAY_VER(display) < 5)
goto out;
@@ -318,48 +317,48 @@ out:
return enable_mask;
}
-void i915_enable_pipestat(struct drm_i915_private *dev_priv,
+void i915_enable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
- struct intel_display *display = &dev_priv->display;
- i915_reg_t reg = PIPESTAT(dev_priv, pipe);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
- drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
+ if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
return;
- dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
+ display->irq.pipestat_irq_mask[pipe] |= status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
intel_de_write(display, reg, enable_mask | status_mask);
intel_de_posting_read(display, reg);
}
-void i915_disable_pipestat(struct drm_i915_private *dev_priv,
+void i915_disable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
- struct intel_display *display = &dev_priv->display;
- i915_reg_t reg = PIPESTAT(dev_priv, pipe);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
- drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0)
+ if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0)
return;
- dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
+ display->irq.pipestat_irq_mask[pipe] &= ~status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
intel_de_write(display, reg, enable_mask | status_mask);
@@ -368,49 +367,41 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
static bool i915_has_legacy_blc_interrupt(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_I85X(i915))
+ if (display->platform.i85x)
return true;
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
return true;
- return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915);
+ return IS_DISPLAY_VER(display, 3, 4) && display->platform.mobile;
}
-/**
- * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
- * @dev_priv: i915 device private
- */
-void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
+/* enable ASLE pipestat for OpRegion */
+static void i915_enable_asle_pipestat(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
if (!intel_opregion_asle_present(display))
return;
if (!i915_has_legacy_blc_interrupt(display))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
- if (DISPLAY_VER(dev_priv) >= 4)
- i915_enable_pipestat(dev_priv, PIPE_A,
+ i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
+ if (DISPLAY_VER(display) >= 4)
+ i915_enable_pipestat(display, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
-static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void display_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe,
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
@@ -427,7 +418,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
* don't trust that one either.
*/
if (pipe_crc->skipped <= 0 ||
- (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
+ (DISPLAY_VER(display) >= 8 && pipe_crc->skipped == 1)) {
pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
return;
@@ -440,20 +431,19 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
}
#else
static inline void
-display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+display_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe,
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4) {}
#endif
-static void flip_done_handler(struct drm_i915_private *i915,
+static void flip_done_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
- spin_lock(&i915->drm.event_lock);
+ spin_lock(&display->drm->event_lock);
if (crtc->flip_done_event) {
trace_intel_crtc_flip_done(crtc);
@@ -461,25 +451,21 @@ static void flip_done_handler(struct drm_i915_private *i915,
crtc->flip_done_event = NULL;
}
- spin_unlock(&i915->drm.event_lock);
+ spin_unlock(&display->drm->event_lock);
}
-static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void hsw_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
-
- display_pipe_crc_irq_handler(dev_priv, pipe,
+ display_pipe_crc_irq_handler(display, pipe,
intel_de_read(display, PIPE_CRC_RES_HSW(pipe)),
0, 0, 0, 0);
}
-static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void ivb_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
-
- display_pipe_crc_irq_handler(dev_priv, pipe,
+ display_pipe_crc_irq_handler(display, pipe,
intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)),
intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)),
intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)),
@@ -487,58 +473,55 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe)));
}
-static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void i9xx_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 res1, res2;
- if (DISPLAY_VER(dev_priv) >= 3)
- res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(dev_priv, pipe));
+ if (DISPLAY_VER(display) >= 3)
+ res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(display, pipe));
else
res1 = 0;
- if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
- res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(dev_priv, pipe));
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
+ res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(display, pipe));
else
res2 = 0;
- display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_de_read(display, PIPE_CRC_RES_RED(dev_priv, pipe)),
- intel_de_read(display, PIPE_CRC_RES_GREEN(dev_priv, pipe)),
- intel_de_read(display, PIPE_CRC_RES_BLUE(dev_priv, pipe)),
+ display_pipe_crc_irq_handler(display, pipe,
+ intel_de_read(display, PIPE_CRC_RES_RED(display, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_GREEN(display, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_BLUE(display, pipe)),
res1, res2);
}
-static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
+static void i9xx_pipestat_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
intel_de_write(display,
- PIPESTAT(dev_priv, pipe),
+ PIPESTAT(display, pipe),
PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS);
- dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
+ display->irq.pipestat_irq_mask[pipe] = 0;
}
}
-void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
+void i9xx_pipestat_irq_ack(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- spin_lock(&dev_priv->irq_lock);
+ spin_lock(&display->irq.lock);
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- !dev_priv->display.irq.vlv_display_irqs_enabled) {
- spin_unlock(&dev_priv->irq_lock);
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ !display->irq.vlv_display_irqs_enabled) {
+ spin_unlock(&display->irq.lock);
return;
}
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
i915_reg_t reg;
u32 status_mask, enable_mask, iir_bit = 0;
@@ -566,12 +549,12 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
break;
}
if (iir & iir_bit)
- status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe];
+ status_mask |= display->irq.pipestat_irq_mask[pipe];
if (!status_mask)
continue;
- reg = PIPESTAT(dev_priv, pipe);
+ reg = PIPESTAT(display, pipe);
pipe_stats[pipe] = intel_de_read(display, reg) & status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
@@ -589,25 +572,24 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
intel_de_write(display, reg, enable_mask);
}
}
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&display->irq.lock);
}
-void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void i915_pipestat_irq_handler(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -617,22 +599,21 @@ void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(display);
}
-void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void i965_pipestat_irq_handler(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -645,21 +626,20 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_gmbus_irq_handler(display);
}
-void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void valleyview_pipestat_irq_handler(struct intel_display *display,
u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -669,18 +649,17 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_gmbus_irq_handler(display);
}
-static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void ibx_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ibx_hpd_irq_handler(display, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
- drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
+ drm_dbg(display->drm, "PCH audio power change on port %d\n",
port_name(port));
}
@@ -691,26 +670,26 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
- drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
+ drm_dbg(display->drm, "PCH HDCP audio interrupt\n");
if (pch_iir & SDE_AUDIO_TRANS_MASK)
- drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
+ drm_dbg(display->drm, "PCH transcoder audio interrupt\n");
if (pch_iir & SDE_POISON)
- drm_err(&dev_priv->drm, "PCH poison interrupt\n");
+ drm_err(display->drm, "PCH poison interrupt\n");
if (pch_iir & SDE_FDI_MASK) {
- for_each_pipe(dev_priv, pipe)
- drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ for_each_pipe(display, pipe)
+ drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
- drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
+ drm_dbg(display->drm, "PCH transcoder CRC done interrupt\n");
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
@@ -753,14 +732,13 @@ static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = {
{}
};
-static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
+static void ivb_err_int_handler(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 err_int = intel_de_read(display, GEN7_ERR_INT);
enum pipe pipe;
if (err_int & ERR_INT_POISON)
- drm_err(&dev_priv->drm, "Poison interrupt\n");
+ drm_err(display->drm, "Poison interrupt\n");
if (err_int & ERR_INT_INVALID_GTT_PTE)
drm_err_ratelimited(display->drm, "Invalid GTT PTE\n");
@@ -768,17 +746,17 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
if (err_int & ERR_INT_INVALID_PTE_DATA)
drm_err_ratelimited(display->drm, "Invalid PTE data\n");
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
u32 fault_errors;
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
- if (IS_IVYBRIDGE(dev_priv))
- ivb_pipe_crc_irq_handler(dev_priv, pipe);
+ if (display->platform.ivybridge)
+ ivb_pipe_crc_irq_handler(display, pipe);
else
- hsw_pipe_crc_irq_handler(dev_priv, pipe);
+ hsw_pipe_crc_irq_handler(display, pipe);
}
fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe);
@@ -790,34 +768,32 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
intel_de_write(display, GEN7_ERR_INT, err_int);
}
-static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
+static void cpt_serr_int_handler(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 serr_int = intel_de_read(display, SERR_INT);
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
- drm_err(&dev_priv->drm, "PCH poison interrupt\n");
+ drm_err(display->drm, "PCH poison interrupt\n");
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
intel_pch_fifo_underrun_irq_handler(display, pipe);
intel_de_write(display, SERR_INT, serr_int);
}
-static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void cpt_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ibx_hpd_irq_handler(display, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
- drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
+ drm_dbg(display->drm, "PCH audio power change on port %c\n",
port_name(port));
}
@@ -828,20 +804,20 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
- drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
+ drm_dbg(display->drm, "Audio CP request interrupt\n");
if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
- drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
+ drm_dbg(display->drm, "Audio CP change interrupt\n");
if (pch_iir & SDE_FDI_MASK_CPT) {
- for_each_pipe(dev_priv, pipe)
- drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ for_each_pipe(display, pipe)
+ drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & SDE_ERROR_CPT)
- cpt_serr_int_handler(dev_priv);
+ cpt_serr_int_handler(display);
}
static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe)
@@ -894,14 +870,13 @@ static void ilk_gtt_fault_irq_handler(struct intel_display *display)
}
}
-void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
+void ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
if (de_iir & DE_AUX_CHANNEL_A)
intel_dp_aux_irq_handler(display);
@@ -910,58 +885,57 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
intel_opregion_asle_intr(display);
if (de_iir & DE_POISON)
- drm_err(&dev_priv->drm, "Poison interrupt\n");
+ drm_err(display->drm, "Poison interrupt\n");
if (de_iir & DE_GTT_FAULT)
ilk_gtt_fault_irq_handler(display);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe))
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (de_iir & DE_PLANE_FLIP_DONE(pipe))
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
}
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
u32 pch_iir = intel_de_read(display, SDEIIR);
- if (HAS_PCH_CPT(dev_priv))
- cpt_irq_handler(dev_priv, pch_iir);
+ if (HAS_PCH_CPT(display))
+ cpt_irq_handler(display, pch_iir);
else
- ibx_irq_handler(dev_priv, pch_iir);
+ ibx_irq_handler(display, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
intel_de_write(display, SDEIIR, pch_iir);
}
- if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
- gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
+ if (DISPLAY_VER(display) == 5 && de_iir & DE_PCU_EVENT)
+ ilk_display_rps_irq_handler(display);
}
-void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
+void ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
if (de_iir & DE_ERR_INT_IVB)
- ivb_err_int_handler(dev_priv);
+ ivb_err_int_handler(display);
if (de_iir & DE_EDP_PSR_INT_HSW) {
struct intel_encoder *encoder;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 psr_iir;
@@ -977,35 +951,35 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(display);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
}
/* check event from PCH */
- if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
+ if (!HAS_PCH_NOP(display) && (de_iir & DE_PCH_EVENT_IVB)) {
u32 pch_iir = intel_de_read(display, SDEIIR);
- cpt_irq_handler(dev_priv, pch_iir);
+ cpt_irq_handler(display, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
intel_de_write(display, SDEIIR, pch_iir);
}
}
-static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
+static u32 gen8_de_port_aux_mask(struct intel_display *display)
{
u32 mask;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
return 0;
- else if (DISPLAY_VER(dev_priv) >= 14)
+ else if (DISPLAY_VER(display) >= 14)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB;
- else if (DISPLAY_VER(dev_priv) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -1015,7 +989,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
TGL_DE_PORT_AUX_USBC2 |
TGL_DE_PORT_AUX_USBC3 |
TGL_DE_PORT_AUX_USBC4;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -1027,12 +1001,12 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
TGL_DE_PORT_AUX_USBC6;
mask = GEN8_AUX_CHANNEL_A;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (DISPLAY_VER(dev_priv) == 11) {
+ if (DISPLAY_VER(display) == 11) {
mask |= ICL_AUX_CHANNEL_F;
mask |= ICL_AUX_CHANNEL_E;
}
@@ -1040,10 +1014,8 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
return mask;
}
-static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
+static u32 gen8_de_pipe_fault_mask(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
if (DISPLAY_VER(display) >= 14)
return MTL_PIPEDMC_ATS_FAULT |
MTL_PLANE_ATS_FAULT |
@@ -1195,15 +1167,14 @@ gen8_pipe_fault_handlers(struct intel_display *display)
return bdw_pipe_fault_handlers;
}
-static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
+static void intel_pmdemand_irq_handler(struct intel_display *display)
{
- wake_up_all(&dev_priv->display.pmdemand.waitqueue);
+ wake_up_all(&display->pmdemand.waitqueue);
}
static void
-gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+gen8_de_misc_irq_handler(struct intel_display *display, u32 iir)
{
- struct intel_display *display = &dev_priv->display;
bool found = false;
if (HAS_DBUF_OVERLAP_DETECTION(display)) {
@@ -1213,20 +1184,20 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
}
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
if (iir & (XELPDP_PMDEMAND_RSP |
XELPDP_PMDEMAND_RSPTOUT_ERR)) {
if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Error waiting for Punit PM Demand Response\n");
- intel_pmdemand_irq_handler(dev_priv);
+ intel_pmdemand_irq_handler(display);
found = true;
}
if (iir & XELPDP_RM_TIMEOUT) {
u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE);
- drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val);
+ drm_warn(display->drm, "Register Access Timeout = 0x%x\n", val);
found = true;
}
} else if (iir & GEN8_DE_MISC_GSE) {
@@ -1239,12 +1210,12 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
u32 psr_iir;
i915_reg_t iir_reg;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- iir_reg = TRANS_PSR_IIR(dev_priv,
- intel_dp->psr.transcoder);
+ if (DISPLAY_VER(display) >= 12)
+ iir_reg = TRANS_PSR_IIR(display,
+ intel_dp->psr.transcoder);
else
iir_reg = EDP_PSR_IIR;
@@ -1256,19 +1227,18 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_psr_irq_handler(intel_dp, psr_iir);
/* prior GEN12 only have one EDP PSR */
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
break;
}
}
if (!found)
- drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
+ drm_err(display->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
}
-static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
+static void gen11_dsi_te_interrupt_handler(struct intel_display *display,
u32 te_trigger)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe = INVALID_PIPE;
enum transcoder dsi_trans;
enum port port;
@@ -1278,7 +1248,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
* Incase of dual link, TE comes from DSI_1
* this is to check if dual link is enabled
*/
- val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_DSI_0));
val &= PORT_SYNC_MODE_ENABLE;
/*
@@ -1294,12 +1264,12 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
val = val & OP_MODE_MASK;
if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
- drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
+ drm_err(display->drm, "DSI trancoder not configured in command mode\n");
return;
}
/* Get PIPE for handling VBLANK event */
- val = intel_de_read(display, TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans));
switch (val & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
pipe = PIPE_A;
@@ -1311,28 +1281,27 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
pipe = PIPE_C;
break;
default:
- drm_err(&dev_priv->drm, "Invalid PIPE\n");
+ drm_err(display->drm, "Invalid PIPE\n");
return;
}
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
}
-static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
+static u32 gen8_de_pipe_flip_done_mask(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return GEN9_PIPE_PLANE1_FLIP_DONE;
else
return GEN8_PIPE_PRIMARY_FLIP_DONE;
}
-static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
+static void gen8_read_and_ack_pch_irqs(struct intel_display *display, u32 *pch_iir, u32 *pica_iir)
{
- struct intel_display *display = &i915->display;
u32 pica_ier = 0;
*pica_iir = 0;
@@ -1346,7 +1315,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
* their flags both in the PICA and SDE IIR.
*/
if (*pch_iir & SDE_PICAINTERRUPT) {
- drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
+ drm_WARN_ON(display->drm, INTEL_PCH_TYPE(display) < PCH_MTL);
pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0);
*pica_iir = intel_de_read(display, PICAINTERRUPT_IIR);
@@ -1359,32 +1328,31 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
intel_de_write(display, PICAINTERRUPT_IER, pica_ier);
}
-void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
+void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl)
{
- struct intel_display *display = &dev_priv->display;
u32 iir;
enum pipe pipe;
- drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
+ drm_WARN_ON_ONCE(display->drm, !HAS_DISPLAY(display));
if (master_ctl & GEN8_DE_MISC_IRQ) {
iir = intel_de_read(display, GEN8_DE_MISC_IIR);
if (iir) {
intel_de_write(display, GEN8_DE_MISC_IIR, iir);
- gen8_de_misc_irq_handler(dev_priv, iir);
+ gen8_de_misc_irq_handler(display, iir);
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE MISC)!\n");
}
}
- if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+ if (DISPLAY_VER(display) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
iir = intel_de_read(display, GEN11_DE_HPD_IIR);
if (iir) {
intel_de_write(display, GEN11_DE_HPD_IIR, iir);
- gen11_hpd_irq_handler(dev_priv, iir);
+ gen11_hpd_irq_handler(display, iir);
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied, (DE HPD)!\n");
}
}
@@ -1396,52 +1364,52 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
intel_de_write(display, GEN8_DE_PORT_IIR, iir);
- if (iir & gen8_de_port_aux_mask(dev_priv)) {
+ if (iir & gen8_de_port_aux_mask(display)) {
intel_dp_aux_irq_handler(display);
found = true;
}
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (hotplug_trigger) {
- bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
+ bxt_hpd_irq_handler(display, hotplug_trigger);
found = true;
}
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
if (hotplug_trigger) {
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
found = true;
}
}
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
(iir & BXT_DE_PORT_GMBUS)) {
intel_gmbus_irq_handler(display);
found = true;
}
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
if (te_trigger) {
- gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
+ gen11_dsi_te_interrupt_handler(display, te_trigger);
found = true;
}
}
if (!found)
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"Unexpected DE Port interrupt\n");
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE PORT)!\n");
}
}
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
u32 fault_errors;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
@@ -1449,7 +1417,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE PIPE)!\n");
continue;
}
@@ -1457,36 +1425,36 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
- if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
- flip_done_handler(dev_priv, pipe);
+ if (iir & gen8_de_pipe_flip_done_mask(display))
+ flip_done_handler(display, pipe);
- if (HAS_DSB(dev_priv)) {
+ if (HAS_DSB(display)) {
if (iir & GEN12_DSB_INT(INTEL_DSB_0))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_0);
if (iir & GEN12_DSB_INT(INTEL_DSB_1))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_1);
if (iir & GEN12_DSB_INT(INTEL_DSB_2))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_2);
}
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev_priv, pipe);
+ hsw_pipe_crc_irq_handler(display, pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
- fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
+ fault_errors = iir & gen8_de_pipe_fault_mask(display);
if (fault_errors)
intel_pipe_fault_irq_handler(display,
gen8_pipe_fault_handlers(display),
pipe, fault_errors);
}
- if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
+ if (HAS_PCH_SPLIT(display) && !HAS_PCH_NOP(display) &&
master_ctl & GEN8_DE_PCH_IRQ) {
u32 pica_iir;
@@ -1495,31 +1463,30 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
- gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
+ gen8_read_and_ack_pch_irqs(display, &iir, &pica_iir);
if (iir) {
if (pica_iir)
- xelpdp_pica_irq_handler(dev_priv, pica_iir);
+ xelpdp_pica_irq_handler(display, pica_iir);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_handler(dev_priv, iir);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- spt_irq_handler(dev_priv, iir);
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_irq_handler(display, iir);
+ else if (INTEL_PCH_TYPE(display) >= PCH_SPT)
+ spt_irq_handler(display, iir);
else
- cpt_irq_handler(dev_priv, iir);
+ cpt_irq_handler(display, iir);
} else {
/*
* Like on previous PCH there seems to be something
* fishy going on with forwarding PCH interrupts.
*/
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"The master control interrupt lied (SDE)!\n");
}
}
}
-u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
+u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl)
{
- struct intel_display *display = &i915->display;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -1532,20 +1499,17 @@ u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
return iir;
}
-void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
+void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir)
{
- struct intel_display *display = &i915->display;
-
if (iir & GEN11_GU_MISC_GSE)
intel_opregion_asle_intr(display);
}
-void gen11_display_irq_handler(struct drm_i915_private *i915)
+void gen11_display_irq_handler(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
u32 disp_ctl;
- disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_display_rpm_assert_block(display);
/*
* GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
* for the display related bits.
@@ -1553,16 +1517,15 @@ void gen11_display_irq_handler(struct drm_i915_private *i915)
disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
- gen8_de_irq_handler(i915, disp_ctl);
+ gen8_de_irq_handler(display, disp_ctl);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_display_rpm_assert_unblock(display);
}
-static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
+static void i915gm_irq_cstate_wa_enable(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- lockdep_assert_held(&i915->drm.vblank_time_lock);
+ lockdep_assert_held(&display->drm->vblank_time_lock);
/*
* Vblank/CRC interrupts fail to wake the device up from C2+.
@@ -1570,117 +1533,116 @@ static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
* the problem. There is a small power cost so we do this
* only when vblank/CRC interrupts are actually enabled.
*/
- if (i915->display.irq.vblank_enabled++ == 0)
+ if (display->irq.vblank_enabled++ == 0)
intel_de_write(display, SCPD0,
_MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
-static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915)
+static void i915gm_irq_cstate_wa_disable(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- lockdep_assert_held(&i915->drm.vblank_time_lock);
+ lockdep_assert_held(&display->drm->vblank_time_lock);
- if (--i915->display.irq.vblank_enabled == 0)
+ if (--display->irq.vblank_enabled == 0)
intel_de_write(display, SCPD0,
_MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
-void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable)
+void i915gm_irq_cstate_wa(struct intel_display *display, bool enable)
{
- spin_lock_irq(&i915->drm.vblank_time_lock);
+ spin_lock_irq(&display->drm->vblank_time_lock);
if (enable)
- i915gm_irq_cstate_wa_enable(i915);
+ i915gm_irq_cstate_wa_enable(display);
else
- i915gm_irq_cstate_wa_disable(i915);
+ i915gm_irq_cstate_wa_disable(display);
- spin_unlock_irq(&i915->drm.vblank_time_lock);
+ spin_unlock_irq(&display->drm->vblank_time_lock);
}
int i8xx_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_enable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
return 0;
}
void i8xx_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_disable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
int i915gm_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
- i915gm_irq_cstate_wa_enable(i915);
+ i915gm_irq_cstate_wa_enable(display);
return i8xx_enable_vblank(crtc);
}
void i915gm_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
i8xx_disable_vblank(crtc);
- i915gm_irq_cstate_wa_disable(i915);
+ i915gm_irq_cstate_wa_disable(display);
}
int i965_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe,
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_enable_pipestat(display, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
return 0;
}
void i965_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_disable_pipestat(display, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
int ilk_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(display) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_enable_display_irq(dev_priv, bit);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ ilk_enable_display_irq(display, bit);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
/* Even though there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated.
*/
- if (HAS_PSR(dev_priv))
+ if (HAS_PSR(display))
drm_crtc_vblank_restore(crtc);
return 0;
@@ -1688,15 +1650,15 @@ int ilk_enable_vblank(struct drm_crtc *crtc)
void ilk_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(display) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_disable_display_irq(dev_priv, bit);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ ilk_disable_display_irq(display, bit);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
@@ -1722,44 +1684,36 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
return true;
}
-static void intel_display_vblank_dc_work(struct work_struct *work)
+static void intel_display_vblank_notify_work(struct work_struct *work)
{
struct intel_display *display =
- container_of(work, typeof(*display), irq.vblank_dc_work);
- int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes);
+ container_of(work, typeof(*display), irq.vblank_notify_work);
+ int vblank_enable_count = READ_ONCE(display->irq.vblank_enable_count);
- /*
- * NOTE: intel_display_power_set_target_dc_state is used only by PSR
- * code for DC3CO handling. DC3CO target state is currently disabled in
- * PSR code. If DC3CO is taken into use we need take that into account
- * here as well.
- */
- intel_display_power_set_target_dc_state(display, vblank_wa_num_pipes ? DC_STATE_DISABLE :
- DC_STATE_EN_UPTO_DC6);
+ intel_psr_notify_vblank_enable_disable(display, vblank_enable_count);
}
int bdw_enable_vblank(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
unsigned long irqflags;
if (gen11_dsi_configure_te(crtc, true))
return 0;
- if (crtc->block_dc_for_vblank && display->irq.vblank_wa_num_pipes++ == 0)
- schedule_work(&display->irq.vblank_dc_work);
+ if (crtc->vblank_psr_notify && display->irq.vblank_enable_count++ == 0)
+ schedule_work(&display->irq.vblank_notify_work);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
/* Even if there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated, so check only for PSR.
*/
- if (HAS_PSR(dev_priv))
+ if (HAS_PSR(display))
drm_crtc_vblank_restore(&crtc->base);
return 0;
@@ -1769,19 +1723,18 @@ void bdw_disable_vblank(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
unsigned long irqflags;
if (gen11_dsi_configure_te(crtc, false))
return;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
- if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0)
- schedule_work(&display->irq.vblank_dc_work);
+ if (crtc->vblank_psr_notify && --display->irq.vblank_enable_count == 0)
+ schedule_work(&display->irq.vblank_notify_work);
}
static u32 vlv_dpinvgtt_pipe_fault_mask(enum pipe pipe)
@@ -1892,11 +1845,11 @@ void vlv_display_error_irq_handler(struct intel_display *display,
vlv_page_table_error_irq_handler(display, dpinvgtt);
}
-static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+static void _vlv_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
@@ -1904,31 +1857,60 @@ static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
gen2_error_reset(to_intel_uncore(display->drm),
VLV_ERROR_REGS);
- i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- intel_de_rmw(display, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
+ i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
- i9xx_pipestat_irq_reset(dev_priv);
+ i9xx_pipestat_irq_reset(display);
intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
dev_priv->irq_mask = ~0u;
}
-void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+void vlv_display_irq_reset(struct intel_display *display)
{
- if (dev_priv->display.irq.vlv_display_irqs_enabled)
- _vlv_display_irq_reset(dev_priv);
+ spin_lock_irq(&display->irq.lock);
+ if (display->irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_reset(display);
+ spin_unlock_irq(&display->irq.lock);
}
-void i9xx_display_irq_reset(struct drm_i915_private *i915)
+void i9xx_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
- if (I915_HAS_HOTPLUG(i915)) {
- i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
- intel_de_rmw(display, PORT_HOTPLUG_STAT(i915), 0, 0);
+ if (HAS_HOTPLUG(display)) {
+ i915_hotplug_interrupt_update(display, 0xffffffff, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
}
- i9xx_pipestat_irq_reset(i915);
+ i9xx_pipestat_irq_reset(display);
+}
+
+void i915_display_irq_postinstall(struct intel_display *display)
+{
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy.
+ */
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irq(&display->irq.lock);
+
+ i915_enable_asle_pipestat(display);
+}
+
+void i965_display_irq_postinstall(struct intel_display *display)
+{
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy.
+ */
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irq(&display->irq.lock);
+
+ i915_enable_asle_pipestat(display);
}
static u32 vlv_error_mask(void)
@@ -1937,17 +1919,14 @@ static u32 vlv_error_mask(void)
return VLV_ERROR_PAGE_TABLE;
}
-void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+static void _vlv_display_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
- if (!dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
-
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_de_write(display, DPINVGTT,
DPINVGTT_STATUS_MASK_CHV |
DPINVGTT_EN_MASK_CHV);
@@ -1961,9 +1940,9 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- for_each_pipe(dev_priv, pipe)
- i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
+ i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(display, pipe)
+ i915_enable_pipestat(display, pipe, pipestat_mask);
enable_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -1972,53 +1951,76 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
I915_LPE_PIPE_B_INTERRUPT |
I915_MASTER_ERROR_INTERRUPT;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
+ drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u);
dev_priv->irq_mask = ~enable_mask;
intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
}
-void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
+void vlv_display_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ spin_lock_irq(&display->irq.lock);
+ if (display->irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_postinstall(display);
+ spin_unlock_irq(&display->irq.lock);
+}
+
+void ibx_display_irq_reset(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (HAS_PCH_NOP(i915))
+ return;
+
+ gen2_irq_reset(to_intel_uncore(display->drm), SDE_IRQ_REGS);
+
+ if (HAS_PCH_CPT(i915) || HAS_PCH_LPT(i915))
+ intel_de_write(display, SERR_INT, 0xffffffff);
+}
+
+void gen8_display_irq_reset(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
+
+ if (HAS_PCH_SPLIT(i915))
+ ibx_display_irq_reset(display);
}
-void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
+void gen11_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder trans;
- for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
+ for_each_cpu_transcoder_masked(display, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
@@ -2026,10 +2028,10 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
continue;
intel_de_write(display,
- TRANS_PSR_IMR(dev_priv, trans),
+ TRANS_PSR_IMR(display, trans),
0xffffffff);
intel_de_write(display,
- TRANS_PSR_IIR(dev_priv, trans),
+ TRANS_PSR_IIR(display, trans),
0xffffffff);
}
} else {
@@ -2037,7 +2039,7 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
}
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
@@ -2045,55 +2047,55 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS);
else
intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
intel_display_irq_regs_reset(display, SDE_IRQ_REGS);
}
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_post_enable(struct intel_display *display,
u8 pipe_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
- gen8_de_pipe_flip_done_mask(dev_priv);
+ gen8_de_pipe_flip_done_mask(display);
enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
return;
}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
- ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
+ display->irq.de_irq_mask[pipe],
+ ~display->irq.de_irq_mask[pipe] | extra_ier);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_pre_disable(struct intel_display *display,
u8 pipe_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
return;
}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
@@ -2110,17 +2112,16 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
* to avoid races with the irq handler, assuming we have MSI. Shared legacy
* interrupts could still race.
*/
-static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
+static void ibx_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 mask;
- if (HAS_PCH_NOP(dev_priv))
+ if (HAS_PCH_NOP(display))
return;
- if (HAS_PCH_IBX(dev_priv))
+ if (HAS_PCH_IBX(display))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
- else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
+ else if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display))
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
else
mask = SDE_GMBUS_CPT;
@@ -2128,40 +2129,50 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
-void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_enable_display_irqs(struct intel_display *display)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
+ spin_lock_irq(&display->irq.lock);
+
+ if (display->irq.vlv_display_irqs_enabled)
+ goto out;
- dev_priv->display.irq.vlv_display_irqs_enabled = true;
+ display->irq.vlv_display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv)) {
- _vlv_display_irq_reset(dev_priv);
- vlv_display_irq_postinstall(dev_priv);
+ _vlv_display_irq_reset(display);
+ _vlv_display_irq_postinstall(display);
}
+
+out:
+ spin_unlock_irq(&display->irq.lock);
}
-void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_disable_display_irqs(struct intel_display *display)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (!dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
+ spin_lock_irq(&display->irq.lock);
+
+ if (!display->irq.vlv_display_irqs_enabled)
+ goto out;
- dev_priv->display.irq.vlv_display_irqs_enabled = false;
+ display->irq.vlv_display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
- _vlv_display_irq_reset(dev_priv);
+ _vlv_display_irq_reset(display);
+out:
+ spin_unlock_irq(&display->irq.lock);
}
-void ilk_de_irq_postinstall(struct drm_i915_private *i915)
+void ilk_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
u32 display_mask, extra_mask;
- if (DISPLAY_VER(i915) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
@@ -2182,59 +2193,57 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
DE_DP_A_HOTPLUG);
}
- if (IS_HASWELL(i915)) {
+ if (display->platform.haswell) {
intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
display_mask |= DE_EDP_PSR_INT_HSW;
}
- if (IS_IRONLAKE_M(i915))
+ if (display->platform.ironlake && display->platform.mobile)
extra_mask |= DE_PCU_EVENT;
i915->irq_mask = ~display_mask;
- ibx_irq_postinstall(i915);
+ ibx_irq_postinstall(display);
intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask,
display_mask | extra_mask);
}
-static void mtp_irq_postinstall(struct drm_i915_private *i915);
-static void icp_irq_postinstall(struct drm_i915_private *i915);
+static void mtp_irq_postinstall(struct intel_display *display);
+static void icp_irq_postinstall(struct intel_display *display);
-void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
+void gen8_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
+ u32 de_pipe_masked = gen8_de_pipe_fault_mask(display) |
GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables;
- u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
+ u32 de_port_masked = gen8_de_port_aux_mask(display);
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_EDP_PSR;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum pipe pipe;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VER(dev_priv) >= 14)
- mtp_irq_postinstall(dev_priv);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_postinstall(dev_priv);
- else if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_postinstall(dev_priv);
+ if (DISPLAY_VER(display) >= 14)
+ mtp_irq_postinstall(display);
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_irq_postinstall(display);
+ else if (HAS_PCH_SPLIT(display))
+ ibx_irq_postinstall(display);
- if (DISPLAY_VER(dev_priv) < 11)
+ if (DISPLAY_VER(display) < 11)
de_misc_masked |= GEN8_DE_MISC_GSE;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
de_port_masked |= BXT_DE_PORT_GMBUS;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
enum port port;
if (intel_bios_is_dsi_present(display, &port))
@@ -2244,25 +2253,25 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_DBUF_OVERLAP_DETECTION(display))
de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED;
- if (HAS_DSB(dev_priv))
+ if (HAS_DSB(display))
de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) |
GEN12_DSB_INT(INTEL_DSB_1) |
GEN12_DSB_INT(INTEL_DSB_2);
de_pipe_enables = de_pipe_masked |
GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
- gen8_de_pipe_flip_done_mask(dev_priv);
+ gen8_de_pipe_flip_done_mask(display);
de_port_enables = de_port_masked;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
- else if (IS_BROADWELL(dev_priv))
+ else if (display->platform.broadwell)
de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder trans;
- for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
+ for_each_cpu_transcoder_masked(display, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
@@ -2270,19 +2279,19 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
continue;
intel_display_irq_regs_assert_irr_is_zero(display,
- TRANS_PSR_IIR(dev_priv, trans));
+ TRANS_PSR_IIR(display, trans));
}
} else {
intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
}
- for_each_pipe(dev_priv, pipe) {
- dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
+ for_each_pipe(display, pipe) {
+ display->irq.de_irq_mask[pipe] = ~de_pipe_masked;
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
+ display->irq.de_irq_mask[pipe],
de_pipe_enables);
}
@@ -2291,7 +2300,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked,
de_misc_masked);
- if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
+ if (IS_DISPLAY_VER(display, 11, 13)) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
@@ -2301,9 +2310,8 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
}
-static void mtp_irq_postinstall(struct drm_i915_private *i915)
+static void mtp_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
@@ -2315,43 +2323,68 @@ static void mtp_irq_postinstall(struct drm_i915_private *i915)
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
}
-static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
+static void icp_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 mask = SDE_GMBUS_ICP;
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
-void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
+void gen11_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- gen8_de_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(display);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
-void dg1_de_irq_postinstall(struct drm_i915_private *i915)
+void dg1_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- gen8_de_irq_postinstall(i915);
+ gen8_de_irq_postinstall(display);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
-void intel_display_irq_init(struct drm_i915_private *i915)
+void intel_display_irq_init(struct intel_display *display)
{
- i915->drm.vblank_disable_immediate = true;
+ spin_lock_init(&display->irq.lock);
- intel_hotplug_irq_init(i915);
+ display->drm->vblank_disable_immediate = true;
+
+ intel_hotplug_irq_init(display);
+
+ INIT_WORK(&display->irq.vblank_notify_work,
+ intel_display_vblank_notify_work);
+}
+
+struct intel_display_irq_snapshot {
+ u32 derrmr;
+};
+
+struct intel_display_irq_snapshot *
+intel_display_irq_snapshot_capture(struct intel_display *display)
+{
+ struct intel_display_irq_snapshot *snapshot;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
+ if (!snapshot)
+ return NULL;
+
+ if (DISPLAY_VER(display) >= 6 && DISPLAY_VER(display) < 20 && !HAS_GMCH(display))
+ snapshot->derrmr = intel_de_read(display, DERRMR);
+
+ return snapshot;
+}
+
+void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ if (!snapshot)
+ return;
- INIT_WORK(&i915->display.irq.vblank_dc_work,
- intel_display_vblank_dc_work);
+ drm_printf(p, "DERRMR: 0x%08x\n", snapshot->derrmr);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h
index d9867cd0a220..c66db3851da4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.h
@@ -12,28 +12,29 @@
enum pipe;
struct drm_crtc;
-struct drm_i915_private;
+struct drm_printer;
struct intel_display;
+struct intel_display_irq_snapshot;
-void valleyview_enable_display_irqs(struct drm_i915_private *i915);
-void valleyview_disable_display_irqs(struct drm_i915_private *i915);
+void valleyview_enable_display_irqs(struct intel_display *display);
+void valleyview_disable_display_irqs(struct intel_display *display);
-void ilk_update_display_irq(struct drm_i915_private *i915,
+void ilk_update_display_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask);
-void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits);
-void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits);
+void ilk_enable_display_irq(struct intel_display *display, u32 bits);
+void ilk_disable_display_irq(struct intel_display *display, u32 bits);
-void bdw_update_port_irq(struct drm_i915_private *i915, u32 interrupt_mask, u32 enabled_irq_mask);
-void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
-void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
+void bdw_update_port_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask);
+void bdw_enable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits);
+void bdw_disable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits);
-void ibx_display_interrupt_update(struct drm_i915_private *i915,
+void ibx_display_interrupt_update(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask);
-void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits);
-void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits);
+void ibx_enable_display_interrupt(struct intel_display *display, u32 bits);
+void ibx_disable_display_interrupt(struct intel_display *display, u32 bits);
-void gen8_irq_power_well_post_enable(struct drm_i915_private *i915, u8 pipe_mask);
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915, u8 pipe_mask);
+void gen8_irq_power_well_post_enable(struct intel_display *display, u8 pipe_mask);
+void gen8_irq_power_well_pre_disable(struct intel_display *display, u8 pipe_mask);
int i8xx_enable_vblank(struct drm_crtc *crtc);
int i915gm_enable_vblank(struct drm_crtc *crtc);
@@ -46,41 +47,46 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
-void ivb_display_irq_handler(struct drm_i915_private *i915, u32 de_iir);
-void ilk_display_irq_handler(struct drm_i915_private *i915, u32 de_iir);
-void gen8_de_irq_handler(struct drm_i915_private *i915, u32 master_ctl);
-void gen11_display_irq_handler(struct drm_i915_private *i915);
+void ivb_display_irq_handler(struct intel_display *display, u32 de_iir);
+void ilk_display_irq_handler(struct intel_display *display, u32 de_iir);
+void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl);
+void gen11_display_irq_handler(struct intel_display *display);
-u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl);
-void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir);
+u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl);
+void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir);
-void i9xx_display_irq_reset(struct drm_i915_private *i915);
-void vlv_display_irq_reset(struct drm_i915_private *i915);
-void gen8_display_irq_reset(struct drm_i915_private *i915);
-void gen11_display_irq_reset(struct drm_i915_private *i915);
+void i9xx_display_irq_reset(struct intel_display *display);
+void ibx_display_irq_reset(struct intel_display *display);
+void vlv_display_irq_reset(struct intel_display *display);
+void gen8_display_irq_reset(struct intel_display *display);
+void gen11_display_irq_reset(struct intel_display *display);
-void vlv_display_irq_postinstall(struct drm_i915_private *i915);
-void ilk_de_irq_postinstall(struct drm_i915_private *i915);
-void gen8_de_irq_postinstall(struct drm_i915_private *i915);
-void gen11_de_irq_postinstall(struct drm_i915_private *i915);
-void dg1_de_irq_postinstall(struct drm_i915_private *i915);
+void i915_display_irq_postinstall(struct intel_display *display);
+void i965_display_irq_postinstall(struct intel_display *display);
+void vlv_display_irq_postinstall(struct intel_display *display);
+void ilk_de_irq_postinstall(struct intel_display *display);
+void gen8_de_irq_postinstall(struct intel_display *display);
+void gen11_de_irq_postinstall(struct intel_display *display);
+void dg1_de_irq_postinstall(struct intel_display *display);
u32 i915_pipestat_enable_mask(struct intel_display *display, enum pipe pipe);
-void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
-void i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
-void i915_enable_asle_pipestat(struct drm_i915_private *i915);
+void i915_enable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask);
+void i915_disable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask);
-void i9xx_pipestat_irq_ack(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void i9xx_pipestat_irq_ack(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]);
+void i915_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void i965_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void valleyview_pipestat_irq_handler(struct intel_display *display, u32 pipe_stats[I915_MAX_PIPES]);
void vlv_display_error_irq_ack(struct intel_display *display, u32 *eir, u32 *dpinvgtt);
void vlv_display_error_irq_handler(struct intel_display *display, u32 eir, u32 dpinvgtt);
-void intel_display_irq_init(struct drm_i915_private *i915);
+void intel_display_irq_init(struct intel_display *display);
-void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable);
+void i915gm_irq_cstate_wa(struct intel_display *display, bool enable);
+
+struct intel_display_irq_snapshot *intel_display_irq_snapshot_capture(struct intel_display *display);
+void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot, struct drm_printer *p);
#endif /* __INTEL_DISPLAY_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index f7171e6932dc..16356523816f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -16,6 +16,7 @@
#include "intel_display_power.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_mchbar_regs.h"
@@ -204,7 +205,7 @@ static bool __intel_display_power_is_enabled(struct intel_display *display,
struct i915_power_well *power_well;
bool is_enabled;
- if (pm_runtime_suspended(display->drm->dev))
+ if (intel_display_rpm_suspended(display))
return false;
is_enabled = true;
@@ -322,6 +323,35 @@ unlock:
mutex_unlock(&power_domains->lock);
}
+/**
+ * intel_display_power_get_current_dc_state - Set target dc state.
+ * @display: display device
+ *
+ * This function set the "DC off" power well target_dc_state,
+ * based upon this target_dc_stste, "DC off" power well will
+ * enable desired DC state.
+ */
+u32 intel_display_power_get_current_dc_state(struct intel_display *display)
+{
+ struct i915_power_well *power_well;
+ struct i915_power_domains *power_domains = &display->power.domains;
+ u32 current_dc_state = DC_STATE_DISABLE;
+
+ mutex_lock(&power_domains->lock);
+ power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
+
+ if (drm_WARN_ON(display->drm, !power_well))
+ goto unlock;
+
+ current_dc_state = intel_power_well_is_enabled(display, power_well) ?
+ DC_STATE_DISABLE : power_domains->target_dc_state;
+
+unlock:
+ mutex_unlock(&power_domains->lock);
+
+ return current_dc_state;
+}
+
static void __async_put_domains_mask(struct i915_power_domains *power_domains,
struct intel_power_domain_mask *mask)
{
@@ -455,7 +485,6 @@ static bool
intel_display_power_grab_async_put_ref(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
bool ret = false;
@@ -473,8 +502,8 @@ intel_display_power_grab_async_put_ref(struct intel_display *display,
goto out_verify;
cancel_async_put_work(power_domains, false);
- intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
- fetch_and_zero(&power_domains->async_put_wakeref));
+ intel_display_rpm_put_raw(display,
+ fetch_and_zero(&power_domains->async_put_wakeref));
out_verify:
verify_async_put_domains_state(power_domains);
@@ -512,9 +541,10 @@ __intel_display_power_get_domain(struct intel_display *display,
intel_wakeref_t intel_display_power_get(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ struct ref_tracker *wakeref;
+
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&power_domains->lock);
__intel_display_power_get_domain(display, domain);
@@ -539,12 +569,11 @@ intel_wakeref_t
intel_display_power_get_if_enabled(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
bool is_enabled;
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get_if_in_use(display);
if (!wakeref)
return NULL;
@@ -560,7 +589,7 @@ intel_display_power_get_if_enabled(struct intel_display *display,
mutex_unlock(&power_domains->lock);
if (!is_enabled) {
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
wakeref = NULL;
}
@@ -623,12 +652,10 @@ release_async_put_domains(struct i915_power_domains *power_domains,
struct intel_display *display = container_of(power_domains,
struct intel_display,
power.domains);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get_noresume(rpm);
+ wakeref = intel_display_rpm_get_noresume(display);
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
@@ -636,7 +663,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
__intel_display_power_put_domain(display, domain);
}
- intel_runtime_pm_put(rpm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
static void
@@ -644,11 +671,10 @@ intel_display_power_put_async_work(struct work_struct *work)
{
struct intel_display *display = container_of(work, struct intel_display,
power.domains.async_put_work.work);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
- intel_wakeref_t old_work_wakeref = NULL;
+ struct ref_tracker *new_work_wakeref, *old_work_wakeref = NULL;
+
+ new_work_wakeref = intel_display_rpm_get_raw(display);
mutex_lock(&power_domains->lock);
@@ -688,9 +714,9 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (old_work_wakeref)
- intel_runtime_pm_put_raw(rpm, old_work_wakeref);
+ intel_display_rpm_put_raw(display, old_work_wakeref);
if (new_work_wakeref)
- intel_runtime_pm_put_raw(rpm, new_work_wakeref);
+ intel_display_rpm_put_raw(display, new_work_wakeref);
}
/**
@@ -711,10 +737,10 @@ void __intel_display_power_put_async(struct intel_display *display,
intel_wakeref_t wakeref,
int delay_ms)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_runtime_pm *rpm = &i915->runtime_pm;
- intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
+ struct ref_tracker *work_wakeref;
+
+ work_wakeref = intel_display_rpm_get_raw(display);
delay_ms = delay_ms >= 0 ? delay_ms : 100;
@@ -746,9 +772,9 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(rpm, work_wakeref);
+ intel_display_rpm_put_raw(display, work_wakeref);
- intel_runtime_pm_put(rpm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
/**
@@ -765,7 +791,6 @@ out_verify:
*/
void intel_display_power_flush_work(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
@@ -786,7 +811,7 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
+ intel_display_rpm_put_raw(display, work_wakeref);
}
/**
@@ -824,10 +849,8 @@ void intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
__intel_display_power_put(display, domain);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
#else
/**
@@ -846,10 +869,8 @@ void intel_display_power_put(struct intel_display *display,
void intel_display_power_put_unchecked(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
__intel_display_power_put(display, domain);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_display_rpm_put_unchecked(display);
}
#endif
@@ -1373,26 +1394,24 @@ static void hsw_restore_lcpll(struct intel_display *display)
*/
static void hsw_enable_pc8(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
drm_dbg_kms(display->drm, "Enabling package C8+\n");
- if (HAS_PCH_LPT_LP(dev_priv))
+ if (HAS_PCH_LPT_LP(display))
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_LP_PARTITION_LEVEL_DISABLE, 0);
- lpt_disable_clkout_dp(dev_priv);
+ lpt_disable_clkout_dp(display);
hsw_disable_lcpll(display, true, true);
}
static void hsw_disable_pc8(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
drm_dbg_kms(display->drm, "Disabling package C8+\n");
hsw_restore_lcpll(display);
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
/* Many display registers don't survive PC8+ */
#ifdef I915 /* FIXME */
@@ -1423,14 +1442,13 @@ static void intel_pch_reset_handshake(struct intel_display *display,
static void skl_display_core_init(struct intel_display *display,
bool resume)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* enable PCH reset handshake */
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
if (!HAS_DISPLAY(display))
return;
@@ -1632,20 +1650,19 @@ static void tgl_bw_buddy_init(struct intel_display *display)
static void icl_display_core_init(struct intel_display *display,
bool resume)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
- INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_TGP &&
+ INTEL_PCH_TYPE(display) < PCH_DG1)
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
/* 1. Enable PCH reset handshake. */
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
if (!HAS_DISPLAY(display))
return;
@@ -1916,7 +1933,6 @@ static void intel_power_domains_verify_state(struct intel_display *display);
*/
void intel_power_domains_init_hw(struct intel_display *display, bool resume)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
power_domains->initializing = true;
@@ -1940,9 +1956,9 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
assert_isp_power_gated(display);
} else if (display->platform.broadwell || display->platform.haswell) {
hsw_assert_cdclk(display);
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
} else if (display->platform.ivybridge) {
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
}
/*
@@ -1979,7 +1995,6 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
*/
void intel_power_domains_driver_remove(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&display->power.domains.init_wakeref);
@@ -1993,7 +2008,7 @@ void intel_power_domains_driver_remove(struct intel_display *display)
intel_power_domains_verify_state(display);
/* Keep the power well enabled, but cancel its rpm wakeref. */
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
/**
@@ -2238,8 +2253,6 @@ static void intel_power_domains_verify_state(struct intel_display *display)
void intel_display_power_suspend_late(struct intel_display *display, bool s2idle)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
intel_power_domains_suspend(display, s2idle);
if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
@@ -2250,14 +2263,12 @@ void intel_display_power_suspend_late(struct intel_display *display, bool s2idle
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
- intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
+ intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
void intel_display_power_resume_early(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
display->platform.broxton) {
gen9_sanitize_dc_state(display);
@@ -2267,8 +2278,8 @@ void intel_display_power_resume_early(struct intel_display *display)
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
- intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
+ intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
intel_power_domains_resume(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 1b53d67f9b60..f8813b0e16df 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -183,6 +183,7 @@ void intel_display_power_suspend(struct intel_display *display);
void intel_display_power_resume(struct intel_display *display);
void intel_display_power_set_target_dc_state(struct intel_display *display,
u32 state);
+u32 intel_display_power_get_current_dc_state(struct intel_display *display);
bool intel_display_power_is_enabled(struct intel_display *display,
enum intel_display_power_domain domain);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index e80e1fd611ca..ab1163744bc5 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1696,6 +1696,7 @@ I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_dc_off,
XE3LPD_PW_C_POWER_DOMAINS,
XE3LPD_PW_D_POWER_DOMAINS,
POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
POWER_DOMAIN_INIT);
static const struct i915_power_well_desc xe3lpd_power_wells_dcoff[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 8ec87ffd87d2..b104bce0e14d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -13,6 +13,7 @@
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
@@ -24,6 +25,7 @@
#include "intel_hotplug.h"
#include "intel_pcode.h"
#include "intel_pps.h"
+#include "intel_psr.h"
#include "intel_tc.h"
#include "intel_vga.h"
#include "skl_watermark.h"
@@ -186,22 +188,18 @@ int intel_power_well_refcount(struct i915_power_well *power_well)
static void hsw_power_well_post_enable(struct intel_display *display,
u8 irq_pipe_mask, bool has_vga)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (has_vga)
intel_vga_reset_io_mem(display);
if (irq_pipe_mask)
- gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
+ gen8_irq_power_well_post_enable(display, irq_pipe_mask);
}
static void hsw_power_well_pre_disable(struct intel_display *display,
u8 irq_pipe_mask)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (irq_pipe_mask)
- gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
+ gen8_irq_power_well_pre_disable(display, irq_pipe_mask);
}
#define ICL_AUX_PW_TO_PHY(pw_idx) \
@@ -752,8 +750,9 @@ void gen9_sanitize_dc_state(struct intel_display *display)
void gen9_set_dc_state(struct intel_display *display, u32 state)
{
struct i915_power_domains *power_domains = &display->power.domains;
- u32 val;
+ bool dc6_was_enabled, enable_dc6;
u32 mask;
+ u32 val;
if (!HAS_DISPLAY(display))
return;
@@ -762,6 +761,9 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
state & ~power_domains->allowed_dc_mask))
state &= power_domains->allowed_dc_mask;
+ if (!power_domains->initializing)
+ intel_psr_notify_dc5_dc6(display);
+
val = intel_de_read(display, DC_STATE_EN);
mask = gen9_dc_mask(display);
drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n",
@@ -772,11 +774,19 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n",
power_domains->dc_state, val & mask);
+ enable_dc6 = state & DC_STATE_EN_UPTO_DC6;
+ dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6;
+ if (!dc6_was_enabled && enable_dc6)
+ intel_dmc_update_dc6_allowed_count(display, true);
+
val &= ~mask;
val |= state;
gen9_write_dc_state(display, val);
+ if (!enable_dc6 && dc6_was_enabled)
+ intel_dmc_update_dc6_allowed_count(display, false);
+
power_domains->dc_state = val & mask;
}
@@ -816,7 +826,8 @@ static void assert_can_enable_dc5(struct intel_display *display)
(intel_de_read(display, DC_STATE_EN) &
DC_STATE_EN_UPTO_DC5),
"DC5 already programmed to be enabled.\n");
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+
+ assert_display_rpm_held(display);
assert_dmc_loaded(display);
}
@@ -1201,7 +1212,6 @@ static void vlv_init_display_clock_gating(struct intel_display *display)
static void vlv_display_power_well_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
enum pipe pipe;
@@ -1225,9 +1235,7 @@ static void vlv_display_power_well_init(struct intel_display *display)
vlv_init_display_clock_gating(display);
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(display);
/*
* During driver initialization/resume we can avoid restoring the
@@ -1236,8 +1244,8 @@ static void vlv_display_power_well_init(struct intel_display *display)
if (display->power.domains.initializing)
return;
- intel_hpd_init(dev_priv);
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
/* Re-enable the ADPA, if we have one */
for_each_intel_encoder(display->drm, encoder) {
@@ -1245,7 +1253,7 @@ static void vlv_display_power_well_init(struct intel_display *display)
intel_crt_reset(&encoder->base);
}
- intel_vga_redisable_power_on(display);
+ intel_vga_disable(display);
intel_pps_unlock_regs_wa(display);
}
@@ -1254,9 +1262,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(display);
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
@@ -1265,7 +1271,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display)
/* Prevent us from re-enabling polling on accident in late suspend */
if (!display->drm->dev->power.is_suspended)
- intel_hpd_poll_enable(dev_priv);
+ intel_hpd_poll_enable(display);
}
static void vlv_display_power_well_enable(struct intel_display *display,
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 1f2798404f2c..1dbd3e841df3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -107,14 +107,14 @@ void intel_display_reset_finish(struct intel_display *display, bool test_only)
intel_display_driver_init_hw(display);
intel_clock_gating_init(i915);
intel_cx0_pll_power_save_wa(display);
- intel_hpd_init(i915);
+ intel_hpd_init(display);
ret = __intel_display_driver_resume(display, state, ctx);
if (ret)
drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
- intel_hpd_poll_disable(i915);
+ intel_hpd_poll_disable(display);
}
drm_atomic_state_put(state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c
new file mode 100644
index 000000000000..48da67dd0136
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "i915_drv.h"
+#include "intel_display_rpm.h"
+#include "intel_runtime_pm.h"
+
+static struct intel_runtime_pm *display_to_rpm(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ return &i915->runtime_pm;
+}
+
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+{
+ return intel_runtime_pm_get_raw(display_to_rpm(display));
+}
+
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put_raw(display_to_rpm(display), wakeref);
+}
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+{
+ return intel_runtime_pm_get(display_to_rpm(display));
+}
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
+{
+ return intel_runtime_pm_get_if_in_use(display_to_rpm(display));
+}
+
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
+{
+ return intel_runtime_pm_get_noresume(display_to_rpm(display));
+}
+
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put(display_to_rpm(display), wakeref);
+}
+
+void intel_display_rpm_put_unchecked(struct intel_display *display)
+{
+ intel_runtime_pm_put_unchecked(display_to_rpm(display));
+}
+
+bool intel_display_rpm_suspended(struct intel_display *display)
+{
+ return intel_runtime_pm_suspended(display_to_rpm(display));
+}
+
+void assert_display_rpm_held(struct intel_display *display)
+{
+ assert_rpm_wakelock_held(display_to_rpm(display));
+}
+
+void intel_display_rpm_assert_block(struct intel_display *display)
+{
+ disable_rpm_wakeref_asserts(display_to_rpm(display));
+}
+
+void intel_display_rpm_assert_unblock(struct intel_display *display)
+{
+ enable_rpm_wakeref_asserts(display_to_rpm(display));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.h b/drivers/gpu/drm/i915/display/intel_display_rpm.h
new file mode 100644
index 000000000000..6ef48515f84b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_RPM__
+#define __INTEL_DISPLAY_RPM__
+
+#include <linux/types.h>
+
+struct intel_display;
+struct ref_tracker;
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display);
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref);
+
+#define __with_intel_display_rpm(__display, __wakeref) \
+ for (struct ref_tracker *(__wakeref) = intel_display_rpm_get(__display); (__wakeref); \
+ intel_display_rpm_put((__display), (__wakeref)), (__wakeref) = NULL)
+
+#define with_intel_display_rpm(__display) \
+ __with_intel_display_rpm((__display), __UNIQUE_ID(wakeref))
+
+/* Only for special cases. */
+bool intel_display_rpm_suspended(struct intel_display *display);
+
+void assert_display_rpm_held(struct intel_display *display);
+void intel_display_rpm_assert_block(struct intel_display *display);
+void intel_display_rpm_assert_unblock(struct intel_display *display);
+
+/* Only for display power implementation. */
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display);
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref);
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display);
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display);
+void intel_display_rpm_put_unchecked(struct intel_display *display);
+
+#endif /* __INTEL_DISPLAY_RPM__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
index 4074a1879828..678b24115951 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -8,6 +8,8 @@
#include "gt/intel_rps.h"
#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_display_irq.h"
#include "intel_display_rps.h"
#include "intel_display_types.h"
@@ -81,3 +83,24 @@ void intel_display_rps_mark_interactive(struct intel_display *display,
intel_rps_mark_interactive(&to_gt(i915)->rps, interactive);
state->rps_interactive = interactive;
}
+
+void ilk_display_rps_enable(struct intel_display *display)
+{
+ spin_lock(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PCU_EVENT);
+ spin_unlock(&display->irq.lock);
+}
+
+void ilk_display_rps_disable(struct intel_display *display)
+{
+ spin_lock(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PCU_EVENT);
+ spin_unlock(&display->irq.lock);
+}
+
+void ilk_display_rps_irq_handler(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ gen5_rps_irq_handler(&to_gt(i915)->rps);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.h b/drivers/gpu/drm/i915/display/intel_display_rps.h
index 556891edb2dd..183d154f2c7c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.h
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.h
@@ -13,10 +13,34 @@ struct drm_crtc;
struct intel_atomic_state;
struct intel_display;
+#ifdef I915
void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence);
void intel_display_rps_mark_interactive(struct intel_display *display,
struct intel_atomic_state *state,
bool interactive);
+void ilk_display_rps_enable(struct intel_display *display);
+void ilk_display_rps_disable(struct intel_display *display);
+void ilk_display_rps_irq_handler(struct intel_display *display);
+#else
+static inline void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+}
+static inline void intel_display_rps_mark_interactive(struct intel_display *display,
+ struct intel_atomic_state *state,
+ bool interactive)
+{
+}
+static inline void ilk_display_rps_enable(struct intel_display *display)
+{
+}
+static inline void ilk_display_rps_disable(struct intel_display *display)
+{
+}
+static inline void ilk_display_rps_irq_handler(struct intel_display *display)
+{
+}
+#endif
#endif /* __INTEL_DISPLAY_RPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.c b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
index 25ba043cbb65..66087302fdbc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_snapshot.c
+++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
@@ -7,6 +7,7 @@
#include "intel_display_core.h"
#include "intel_display_device.h"
+#include "intel_display_irq.h"
#include "intel_display_params.h"
#include "intel_display_snapshot.h"
#include "intel_dmc.h"
@@ -20,6 +21,7 @@ struct intel_display_snapshot {
struct intel_display_params params;
struct intel_overlay_snapshot *overlay;
struct intel_dmc_snapshot *dmc;
+ struct intel_display_irq_snapshot *irq;
};
struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_display *display)
@@ -38,6 +40,7 @@ struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_displ
intel_display_params_copy(&snapshot->params);
+ snapshot->irq = intel_display_irq_snapshot_capture(display);
snapshot->overlay = intel_overlay_snapshot_capture(display);
snapshot->dmc = intel_dmc_snapshot_capture(display);
@@ -57,6 +60,7 @@ void intel_display_snapshot_print(const struct intel_display_snapshot *snapshot,
intel_display_device_info_print(&snapshot->info, &snapshot->runtime_info, p);
intel_display_params_dump(&snapshot->params, display->drm->driver->name, p);
+ intel_display_irq_snapshot_print(snapshot->irq, p);
intel_overlay_snapshot_print(snapshot->overlay, p);
intel_dmc_snapshot_print(snapshot->dmc, p);
}
@@ -68,6 +72,7 @@ void intel_display_snapshot_free(struct intel_display_snapshot *snapshot)
intel_display_params_free(&snapshot->params);
+ kfree(snapshot->irq);
kfree(snapshot->overlay);
kfree(snapshot->dmc);
kfree(snapshot);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 99a6fd2900b9..d6d0440dcee9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -581,7 +581,7 @@ struct dpll {
struct intel_atomic_state {
struct drm_atomic_state base;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct __intel_global_objs_state *global_objs;
int num_global_objs;
@@ -1114,6 +1114,7 @@ struct intel_crtc_state {
bool wm_level_disabled;
u32 dc3co_exitline;
u16 su_y_granularity;
+ u8 active_non_psr_pipes;
/*
* Frequency the dpll for the port should run at. Differs from the
@@ -1387,7 +1388,7 @@ struct intel_crtc {
/* armed event for DSB based updates */
struct drm_pending_vblank_event *dsb_event;
- /* Access to these should be protected by dev_priv->irq_lock. */
+ /* Access to these should be protected by display->irq.lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
@@ -1439,7 +1440,7 @@ struct intel_crtc {
struct intel_pipe_crc pipe_crc;
#endif
- bool block_dc_for_vblank;
+ bool vblank_psr_notify;
};
struct intel_plane_error {
@@ -1620,7 +1621,7 @@ struct intel_psr {
bool sink_support;
bool source_support;
bool enabled;
- bool paused;
+ int pause_counter;
enum pipe pipe;
enum transcoder transcoder;
bool active;
@@ -1650,6 +1651,8 @@ struct intel_psr {
u8 entry_setup_frames;
bool link_ok;
+
+ u8 active_non_psr_pipes;
};
struct intel_dp {
@@ -1658,7 +1661,6 @@ struct intel_dp {
int link_rate;
u8 lane_count;
u8 sink_count;
- bool link_trained;
bool needs_modeset_retry;
bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -1683,6 +1685,7 @@ struct intel_dp {
int common_rates[DP_MAX_SUPPORTED_RATES];
struct {
/* TODO: move the rest of link specific fields to here */
+ bool active;
/* common rate,lane_count configs in bw order */
int num_configs;
#define INTEL_DP_MAX_LANE_COUNT 4
@@ -1739,7 +1742,7 @@ struct intel_dp {
struct {
struct intel_dp_mst_encoder *stream_encoders[I915_MAX_PIPES];
struct drm_dp_mst_topology_mgr mgr;
- int active_links;
+ int active_streams;
} mst;
u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index);
@@ -1805,12 +1808,16 @@ struct intel_dp {
struct {
u8 io_wake_lines;
u8 fast_wake_lines;
+ enum transcoder transcoder;
+ struct mutex lock;
/* LNL and beyond */
u8 check_entry_lines;
u8 aux_less_wake_lines;
u8 silence_period_sym_clocks;
u8 lfps_half_cycle_num_of_syms;
+ bool lobf_disable_debug;
+ bool sink_alpm_error;
} alpm_parameters;
u8 alpm_dpcd;
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index e5a8022db664..da429c332914 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -3,38 +3,38 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_core.h"
#include "intel_display_wa.h"
-static void gen11_display_wa_apply(struct drm_i915_private *i915)
+static void gen11_display_wa_apply(struct intel_display *display)
{
/* Wa_14010594013 */
- intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
}
-static void xe_d_display_wa_apply(struct drm_i915_private *i915)
+static void xe_d_display_wa_apply(struct intel_display *display)
{
/* Wa_14013723622 */
- intel_de_rmw(i915, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
+ intel_de_rmw(display, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
}
-static void adlp_display_wa_apply(struct drm_i915_private *i915)
+static void adlp_display_wa_apply(struct intel_display *display)
{
/* Wa_22011091694:adlp */
- intel_de_rmw(i915, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
/* Bspec/49189 Initialize Sequence */
- intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
}
-void intel_display_wa_apply(struct drm_i915_private *i915)
+void intel_display_wa_apply(struct intel_display *display)
{
- if (IS_ALDERLAKE_P(i915))
- adlp_display_wa_apply(i915);
- else if (DISPLAY_VER(i915) == 12)
- xe_d_display_wa_apply(i915);
- else if (DISPLAY_VER(i915) == 11)
- gen11_display_wa_apply(i915);
+ if (display->platform.alderlake_p)
+ adlp_display_wa_apply(display);
+ else if (DISPLAY_VER(display) == 12)
+ xe_d_display_wa_apply(display);
+ else if (DISPLAY_VER(display) == 11)
+ gen11_display_wa_apply(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index be644ab6ae00..babd9d16603d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -8,14 +8,17 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
-void intel_display_wa_apply(struct drm_i915_private *i915);
+void intel_display_wa_apply(struct intel_display *display);
#ifdef I915
-static inline bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) { return false; }
+static inline bool intel_display_needs_wa_16023588340(struct intel_display *display)
+{
+ return false;
+}
#else
-bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915);
+bool intel_display_needs_wa_16023588340(struct intel_display *display);
#endif
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
index 0813fb9b5823..dad7192132ad 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include "intel_de.h"
#include "intel_display.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index fa6944e55d95..b58189d24e7e 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -28,6 +28,8 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
+#include "intel_display_power_well.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_step.h"
@@ -57,6 +59,10 @@ struct intel_dmc {
const char *fw_path;
u32 max_fw_size; /* bytes */
u32 version;
+ struct {
+ u32 dc5_start;
+ u32 count;
+ } dc6_allowed;
struct dmc_fw_info {
u32 mmio_count;
i915_reg_t mmioaddr[20];
@@ -167,7 +173,6 @@ MODULE_FIRMWARE(BXT_DMC_PATH);
static const char *dmc_firmware_default(struct intel_display *display, u32 *size)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const char *fw_path = NULL;
u32 max_fw_size = 0;
@@ -183,39 +188,39 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
} else if (DISPLAY_VERx100(display) == 1400) {
fw_path = MTL_DMC_PATH;
max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
- } else if (IS_DG2(i915)) {
+ } else if (display->platform.dg2) {
fw_path = DG2_DMC_PATH;
max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_P(i915)) {
+ } else if (display->platform.alderlake_p) {
fw_path = ADLP_DMC_PATH;
max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_S(i915)) {
+ } else if (display->platform.alderlake_s) {
fw_path = ADLS_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_DG1(i915)) {
+ } else if (display->platform.dg1) {
fw_path = DG1_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_ROCKETLAKE(i915)) {
+ } else if (display->platform.rocketlake) {
fw_path = RKL_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_TIGERLAKE(i915)) {
+ } else if (display->platform.tigerlake) {
fw_path = TGL_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VER(display) == 11) {
fw_path = ICL_DMC_PATH;
max_fw_size = ICL_DMC_MAX_FW_SIZE;
- } else if (IS_GEMINILAKE(i915)) {
+ } else if (display->platform.geminilake) {
fw_path = GLK_DMC_PATH;
max_fw_size = GLK_DMC_MAX_FW_SIZE;
- } else if (IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) ||
- IS_COMETLAKE(i915)) {
+ } else if (display->platform.kabylake ||
+ display->platform.coffeelake ||
+ display->platform.cometlake) {
fw_path = KBL_DMC_PATH;
max_fw_size = KBL_DMC_MAX_FW_SIZE;
- } else if (IS_SKYLAKE(i915)) {
+ } else if (display->platform.skylake) {
fw_path = SKL_DMC_PATH;
max_fw_size = SKL_DMC_MAX_FW_SIZE;
- } else if (IS_BROXTON(i915)) {
+ } else if (display->platform.broxton) {
fw_path = BXT_DMC_PATH;
max_fw_size = BXT_DMC_MAX_FW_SIZE;
}
@@ -511,6 +516,54 @@ void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe)
intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
}
+/**
+ * intel_dmc_block_pkgc() - block PKG C-state
+ * @display: display instance
+ * @pipe: pipe which register use to block
+ * @block: block/unblock
+ *
+ * This interface is target for Wa_16025596647 usage. I.e. to set/clear
+ * PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS bit in PIPEDMC_BLOCK_PKGC_SW register.
+ */
+void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
+ bool block)
+{
+ intel_de_rmw(display, PIPEDMC_BLOCK_PKGC_SW(pipe),
+ PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS, block ?
+ PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS : 0);
+}
+
+/**
+ * intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank() - start of PKG
+ * C-state exit
+ * @display: display instance
+ * @pipe: pipe which register use to block
+ * @enable: enable/disable
+ *
+ * This interface is target for Wa_16025596647 usage. I.e. start the package C
+ * exit at the start of the undelayed vblank
+ */
+void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
+ enum pipe pipe, bool enable)
+{
+ u32 val;
+
+ if (enable)
+ val = DMC_EVT_CTL_ENABLE | DMC_EVT_CTL_RECURRING |
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_VBLANK_A);
+ else
+ val = REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_FALSE) |
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1);
+
+ intel_de_write(display, MTL_PIPEDMC_EVT_CTL_4(pipe),
+ val);
+}
+
static bool is_dmc_evt_ctl_reg(struct intel_display *display,
enum intel_dmc_id dmc_id, i915_reg_t reg)
{
@@ -535,8 +588,6 @@ static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!is_dmc_evt_ctl_reg(display, dmc_id, reg))
return false;
@@ -545,12 +596,12 @@ static bool disable_dmc_evt(struct intel_display *display,
return true;
/* also disable the flip queue event on the main DMC on TGL */
- if (IS_TIGERLAKE(i915) &&
+ if (display->platform.tigerlake &&
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_CLK_MSEC)
return true;
/* also disable the HRR event on the main DMC on TGL/ADLS */
- if ((IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915)) &&
+ if ((display->platform.tigerlake || display->platform.alderlake_s) &&
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_VBLANK_A)
return true;
@@ -582,7 +633,6 @@ static u32 dmc_mmiodata(struct intel_display *display,
*/
void intel_dmc_load_program(struct intel_display *display)
{
- struct drm_i915_private *i915 __maybe_unused = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_dmc *dmc = display_to_dmc(display);
enum intel_dmc_id dmc_id;
@@ -595,7 +645,7 @@ void intel_dmc_load_program(struct intel_display *display)
disable_all_event_handlers(display);
- assert_rpm_wakelock_held(&i915->runtime_pm);
+ assert_display_rpm_held(display);
preempt_disable();
@@ -1006,9 +1056,7 @@ static void intel_dmc_runtime_pm_put(struct intel_display *display)
static const char *dmc_fallback_path(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_ALDERLAKE_P(i915))
+ if (display->platform.alderlake_p)
return ADLP_DMC_FALLBACK_PATH;
return NULL;
@@ -1232,18 +1280,56 @@ void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct
DMC_VERSION_MINOR(snapshot->version));
}
+void intel_dmc_update_dc6_allowed_count(struct intel_display *display,
+ bool start_tracking)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ u32 dc5_cur_count;
+
+ if (DISPLAY_VER(dmc->display) < 14)
+ return;
+
+ dc5_cur_count = intel_de_read(dmc->display, DG1_DMC_DEBUG_DC5_COUNT);
+
+ if (!start_tracking)
+ dmc->dc6_allowed.count += dc5_cur_count - dmc->dc6_allowed.dc5_start;
+
+ dmc->dc6_allowed.dc5_start = dc5_cur_count;
+}
+
+static bool intel_dmc_get_dc6_allowed_count(struct intel_display *display, u32 *count)
+{
+ struct i915_power_domains *power_domains = &display->power.domains;
+ struct intel_dmc *dmc = display_to_dmc(display);
+ bool dc6_enabled;
+
+ if (DISPLAY_VER(display) < 14)
+ return false;
+
+ mutex_lock(&power_domains->lock);
+ dc6_enabled = intel_de_read(display, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC6;
+ if (dc6_enabled)
+ intel_dmc_update_dc6_allowed_count(display, false);
+
+ *count = dmc->dc6_allowed.count;
+ mutex_unlock(&power_domains->lock);
+
+ return true;
+}
+
static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_display *display = m->private;
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_dmc *dmc = display_to_dmc(display);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
+ u32 dc6_allowed_count;
if (!HAS_DMC(display))
return -ENODEV;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
seq_printf(m, "fw loaded: %s\n",
@@ -1254,7 +1340,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "Pipe A fw loaded: %s\n",
str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEA)));
seq_printf(m, "Pipe B fw needed: %s\n",
- str_yes_no(IS_ALDERLAKE_P(i915) ||
+ str_yes_no(display->platform.alderlake_p ||
DISPLAY_VER(display) >= 14));
seq_printf(m, "Pipe B fw loaded: %s\n",
str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEB)));
@@ -1268,7 +1354,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
if (DISPLAY_VER(display) >= 12) {
i915_reg_t dc3co_reg;
- if (IS_DGFX(i915) || DISPLAY_VER(display) >= 14) {
+ if (display->platform.dgfx || DISPLAY_VER(display) >= 14) {
dc3co_reg = DG1_DMC_DEBUG3;
dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
} else {
@@ -1280,14 +1366,18 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "DC3CO count: %d\n",
intel_de_read(display, dc3co_reg));
} else {
- dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT :
+ dc5_reg = display->platform.broxton ? BXT_DMC_DC3_DC5_COUNT :
SKL_DMC_DC3_DC5_COUNT;
- if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915))
+ if (!display->platform.geminilake && !display->platform.broxton)
dc6_reg = SKL_DMC_DC5_DC6_COUNT;
}
seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(display, dc5_reg));
- if (i915_mmio_reg_valid(dc6_reg))
+
+ if (intel_dmc_get_dc6_allowed_count(display, &dc6_allowed_count))
+ seq_printf(m, "DC5 -> DC6 allowed count: %d\n",
+ dc6_allowed_count);
+ else if (i915_mmio_reg_valid(dc6_reg))
seq_printf(m, "DC5 -> DC6 count: %d\n",
intel_de_read(display, dc6_reg));
@@ -1299,7 +1389,7 @@ out:
intel_de_read(display, DMC_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", intel_de_read(display, DMC_HTP_SKL));
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 44cecef98e73..bd1c459b0075 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -18,6 +18,10 @@ void intel_dmc_load_program(struct intel_display *display);
void intel_dmc_disable_program(struct intel_display *display);
void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe);
void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe);
+void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
+ bool block);
+void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
+ enum pipe pipe, bool enable);
void intel_dmc_fini(struct intel_display *display);
void intel_dmc_suspend(struct intel_display *display);
void intel_dmc_resume(struct intel_display *display);
@@ -26,6 +30,7 @@ void intel_dmc_debugfs_register(struct intel_display *display);
struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display);
void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p);
+void intel_dmc_update_dc6_allowed_count(struct intel_display *display, bool start_tracking);
void assert_dmc_loaded(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 1bf446f96a10..e16ea3f16ed8 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -21,6 +21,20 @@
#define MTL_PIPEDMC_CONTROL _MMIO(0x45250)
#define PIPEDMC_ENABLE_MTL(pipe) REG_BIT(((pipe) - PIPE_A) * 4)
+#define _MTL_PIPEDMC_EVT_CTL_4_A 0x5f044
+#define _MTL_PIPEDMC_EVT_CTL_4_B 0x5f444
+#define MTL_PIPEDMC_EVT_CTL_4(pipe) _MMIO_PIPE(pipe, \
+ _MTL_PIPEDMC_EVT_CTL_4_A, \
+ _MTL_PIPEDMC_EVT_CTL_4_B)
+
+#define PIPEDMC_BLOCK_PKGC_SW_A 0x5f1d0
+#define PIPEDMC_BLOCK_PKGC_SW_B 0x5F5d0
+#define PIPEDMC_BLOCK_PKGC_SW(pipe) _MMIO_PIPE(pipe, \
+ PIPEDMC_BLOCK_PKGC_SW_A, \
+ PIPEDMC_BLOCK_PKGC_SW_B)
+#define PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS BIT(31)
+#define PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_UNTIL_NEXT_FRAMESTART BIT(15)
+
#define _ADLP_PIPEDMC_REG_MMIO_BASE_A 0x5f000
#define _TGL_PIPEDMC_REG_MMIO_BASE_A 0x92000
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index a236b5fc7a3d..640c43bf62d4 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -45,12 +45,13 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -58,10 +59,12 @@
#include "intel_combo_phy_regs.h"
#include "intel_connector.h"
#include "intel_crtc.h"
+#include "intel_crtc_state_dump.h"
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@@ -87,12 +90,10 @@
#include "intel_pfit.h"
#include "intel_pps.h"
#include "intel_psr.h"
-#include "intel_runtime_pm.h"
#include "intel_quirks.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
-#include "intel_crtc_state_dump.h"
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
@@ -172,10 +173,28 @@ int intel_dp_link_symbol_clock(int rate)
static int max_dprx_rate(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int max_rate;
+
if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
- return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
+ max_rate = drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
+ else
+ max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
- return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+ /*
+ * Some broken eDP sinks illegally declare support for
+ * HBR3 without TPS4, and are unable to produce a stable
+ * output. Reject HBR3 when TPS4 is not available.
+ */
+ if (max_rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
+ encoder->base.base.id, encoder->base.name);
+ max_rate = 540000;
+ }
+
+ return max_rate;
}
static int max_dprx_lane_count(struct intel_dp *intel_dp)
@@ -1032,10 +1051,11 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
u8 test_slice_count = valid_dsc_slicecount[i] * num_joined_pipes;
/*
- * 3 DSC Slices per pipe need 3 DSC engines,
- * which is supported only with Ultrajoiner.
+ * 3 DSC Slices per pipe need 3 DSC engines, which is supported only
+ * with Ultrajoiner only for some platforms.
*/
- if (valid_dsc_slicecount[i] == 3 && num_joined_pipes != 4)
+ if (valid_dsc_slicecount[i] == 3 &&
+ (!HAS_DSC_3ENGINES(display) || num_joined_pipes != 4))
continue;
if (test_slice_count >
@@ -2504,6 +2524,7 @@ intel_dp_dsc_compute_pipe_bpp_limits(struct intel_dp *intel_dp,
bool
intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
@@ -2557,7 +2578,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
intel_dp_test_compute_config(intel_dp, crtc_state, limits);
return intel_dp_compute_config_link_bpp_limits(intel_dp,
- intel_dp->attached_connector,
+ connector,
crtc_state,
dsc,
limits);
@@ -2618,7 +2639,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
- !intel_dp_compute_config_limits(intel_dp, pipe_config,
+ !intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
respect_downstream_limits,
false,
&limits);
@@ -2652,7 +2673,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
- if (!intel_dp_compute_config_limits(intel_dp, pipe_config,
+ if (!intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
respect_downstream_limits,
true,
&limits))
@@ -3085,6 +3106,76 @@ intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
}
}
+int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ int symbol_size = intel_dp_is_uhbr(crtc_state) ? 32 : 8;
+ /*
+ * min symbol cycles is 3(BS,VBID, BE) for 128b/132b and
+ * 5(BS, VBID, MVID, MAUD, BE) for 8b/10b
+ */
+ int min_sym_cycles = intel_dp_is_uhbr(crtc_state) ? 3 : 5;
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state);
+ int min_hblank;
+ int max_lane_count = 4;
+ int hactive_sym_cycles, htotal_sym_cycles;
+ int dsc_slices = 0;
+ int link_bpp_x16;
+
+ if (DISPLAY_VER(display) < 30)
+ return 0;
+
+ /* MIN_HBLANK should be set only for 8b/10b MST or for 128b/132b SST/MST */
+ if (!is_mst && !intel_dp_is_uhbr(crtc_state))
+ return 0;
+
+ if (crtc_state->dsc.compression_enable) {
+ dsc_slices = intel_dp_dsc_get_slice_count(connector,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay,
+ num_joined_pipes);
+ if (!dsc_slices) {
+ drm_dbg(display->drm, "failed to calculate dsc slice count\n");
+ return -EINVAL;
+ }
+ }
+
+ if (crtc_state->dsc.compression_enable)
+ link_bpp_x16 = crtc_state->dsc.compressed_bpp_x16;
+ else
+ link_bpp_x16 = fxp_q4_from_int(intel_dp_output_bpp(crtc_state->output_format,
+ crtc_state->pipe_bpp));
+
+ /* Calculate min Hblank Link Layer Symbol Cycle Count for 8b/10b MST & 128b/132b */
+ hactive_sym_cycles = drm_dp_link_symbol_cycles(max_lane_count,
+ adjusted_mode->hdisplay,
+ dsc_slices,
+ link_bpp_x16,
+ symbol_size, is_mst);
+ htotal_sym_cycles = adjusted_mode->htotal * hactive_sym_cycles /
+ adjusted_mode->hdisplay;
+
+ min_hblank = htotal_sym_cycles - hactive_sym_cycles;
+ /* minimum Hblank calculation: https://groups.vesa.org/wg/DP/document/20494 */
+ min_hblank = max(min_hblank, min_sym_cycles);
+
+ /*
+ * adjust the BlankingStart/BlankingEnd framing control from
+ * the calculated value
+ */
+ min_hblank = min_hblank - 2;
+
+ min_hblank = min(10, min_hblank);
+ crtc_state->min_hblank = min_hblank;
+
+ return 0;
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -3184,6 +3275,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
&pipe_config->dp_m_n);
}
+ ret = intel_dp_compute_min_hblank(pipe_config, conn_state);
+ if (ret)
+ return ret;
+
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count;
@@ -3204,7 +3299,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, int lane_count)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
intel_dp->needs_modeset_retry = false;
intel_dp->link_rate = link_rate;
intel_dp->lane_count = lane_count;
@@ -3568,7 +3663,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
if (crtc_state) {
intel_dp_reset_link_params(intel_dp);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count);
- intel_dp->link_trained = true;
+ intel_dp->link.active = true;
}
}
@@ -4170,6 +4265,9 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
static void
intel_edp_set_sink_rates(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
intel_dp->num_sink_rates = 0;
if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
@@ -4180,10 +4278,7 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
sink_rates, sizeof(sink_rates));
for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
- int val = le16_to_cpu(sink_rates[i]);
-
- if (val == 0)
- break;
+ int rate;
/* Value read multiplied by 200kHz gives the per-lane
* link rate in kHz. The source rates are, however,
@@ -4191,7 +4286,24 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
* back to symbols is
* (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
*/
- intel_dp->sink_rates[i] = (val * 200) / 10;
+ rate = le16_to_cpu(sink_rates[i]) * 200 / 10;
+
+ if (rate == 0)
+ break;
+
+ /*
+ * Some broken eDP sinks illegally declare support for
+ * HBR3 without TPS4, and are unable to produce a stable
+ * output. Reject HBR3 when TPS4 is not available.
+ */
+ if (rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
+ encoder->base.base.id, encoder->base.name);
+ break;
+ }
+
+ intel_dp->sink_rates[i] = rate;
}
intel_dp->num_sink_rates = i;
}
@@ -4420,6 +4532,23 @@ intel_dp_mst_disconnect(struct intel_dp *intel_dp)
static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ /*
+ * Display WA for HSD #13013007775: mtl/arl/lnl
+ * Read the sink count and link service IRQ registers in separate
+ * transactions to prevent disconnecting the sink on a TBT link
+ * inadvertently.
+ */
+ if (IS_DISPLAY_VER(display, 14, 20) && !display->platform.battlemage) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 3) != 3)
+ return false;
+
+ /* DP_SINK_COUNT_ESI + 3 == DP_LINK_SERVICE_IRQ_VECTOR_ESI0 */
+ return drm_dp_dpcd_readb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
+ &esi[3]) == 1;
+ }
+
return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4;
}
@@ -4969,8 +5098,6 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
bool link_ok = true;
bool reprobe_needed = false;
- drm_WARN_ON_ONCE(display->drm, intel_dp->mst.active_links < 0);
-
for (;;) {
u8 esi[4] = {};
u8 ack[4] = {};
@@ -4985,7 +5112,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi);
- if (intel_dp->mst.active_links > 0 && link_ok &&
+ if (intel_dp_mst_active_streams(intel_dp) > 0 && link_ok &&
esi[3] & LINK_STATUS_CHANGED) {
if (!intel_dp_mst_link_status(intel_dp))
link_ok = false;
@@ -5046,7 +5173,7 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
{
u8 link_status[DP_LINK_STATUS_SIZE];
- if (!intel_dp->link_trained)
+ if (!intel_dp->link.active)
return false;
/*
@@ -5358,6 +5485,11 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
intel_psr_short_pulse(intel_dp);
+ if (intel_alpm_get_error(intel_dp)) {
+ intel_alpm_disable(intel_dp);
+ intel_dp->alpm_parameters.sink_alpm_error = true;
+ }
+
if (intel_dp_test_short_pulse(intel_dp))
reprobe_needed = true;
@@ -5793,20 +5925,21 @@ out_vdd_off:
}
static void
-intel_dp_force(struct drm_connector *connector)
+intel_dp_force(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
if (!intel_display_driver_check_access(display))
return;
intel_dp_unset_edid(intel_dp);
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return;
intel_dp_set_edid(intel_dp);
@@ -5845,24 +5978,25 @@ static int intel_dp_get_modes(struct drm_connector *_connector)
}
static int
-intel_dp_connector_register(struct drm_connector *connector)
+intel_dp_connector_register(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
int ret;
- ret = intel_connector_register(connector);
+ ret = intel_connector_register(&connector->base);
if (ret)
return ret;
drm_dbg_kms(display->drm, "registering %s bus for %s\n",
- intel_dp->aux.name, connector->kdev->kobj.name);
+ intel_dp->aux.name, connector->base.kdev->kobj.name);
- intel_dp->aux.dev = connector->kdev;
+ intel_dp->aux.dev = connector->base.kdev;
ret = drm_dp_aux_register(&intel_dp->aux);
if (!ret)
- drm_dp_cec_register_connector(&intel_dp->aux, connector);
+ drm_dp_cec_register_connector(&intel_dp->aux, &connector->base);
if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
return ret;
@@ -5873,20 +6007,21 @@ intel_dp_connector_register(struct drm_connector *connector)
*/
if (intel_lspcon_init(dig_port)) {
if (intel_lspcon_detect_hdr_capability(dig_port))
- drm_connector_attach_hdr_output_metadata_property(connector);
+ drm_connector_attach_hdr_output_metadata_property(&connector->base);
}
return ret;
}
static void
-intel_dp_connector_unregister(struct drm_connector *connector)
+intel_dp_connector_unregister(struct drm_connector *_connector)
{
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
drm_dp_cec_unregister_connector(&intel_dp->aux);
drm_dp_aux_unregister(&intel_dp->aux);
- intel_connector_unregister(connector);
+ intel_connector_unregister(&connector->base);
}
void intel_dp_connector_sync_state(struct intel_connector *connector,
@@ -5947,21 +6082,21 @@ static int intel_modeset_tile_group(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(state);
struct drm_connector_list_iter conn_iter;
- struct drm_connector *connector;
+ struct intel_connector *connector;
int ret = 0;
drm_connector_list_iter_begin(display->drm, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
+ for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state;
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
- if (!connector->has_tile ||
- connector->tile_group->id != tile_group_id)
+ if (!connector->base.has_tile ||
+ connector->base.tile_group->id != tile_group_id)
continue;
conn_state = drm_atomic_get_connector_state(&state->base,
- connector);
+ &connector->base);
if (IS_ERR(conn_state)) {
ret = PTR_ERR(conn_state);
break;
@@ -6025,10 +6160,11 @@ static int intel_modeset_affected_transcoders(struct intel_atomic_state *state,
}
static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
- struct drm_connector *connector)
+ struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
const struct drm_connector_state *old_conn_state =
- drm_atomic_get_old_connector_state(&state->base, connector);
+ drm_atomic_get_old_connector_state(&state->base, &connector->base);
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc *crtc;
u8 transcoders;
@@ -6050,17 +6186,18 @@ static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
transcoders);
}
-static int intel_dp_connector_atomic_check(struct drm_connector *conn,
+static int intel_dp_connector_atomic_check(struct drm_connector *_connector,
struct drm_atomic_state *_state)
{
- struct intel_display *display = to_intel_display(conn->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn);
- struct intel_connector *intel_conn = to_intel_connector(conn);
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(_state, &connector->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
int ret;
- ret = intel_digital_connector_atomic_check(conn, &state->base);
+ ret = intel_digital_connector_atomic_check(&connector->base, &state->base);
if (ret)
return ret;
@@ -6070,12 +6207,12 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
return ret;
}
- if (!intel_connector_needs_modeset(state, conn))
+ if (!intel_connector_needs_modeset(state, &connector->base))
return 0;
ret = intel_dp_tunnel_atomic_check_state(state,
intel_dp,
- intel_conn);
+ connector);
if (ret)
return ret;
@@ -6086,26 +6223,26 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
if (DISPLAY_VER(display) < 9)
return 0;
- if (conn->has_tile) {
- ret = intel_modeset_tile_group(state, conn->tile_group->id);
+ if (connector->base.has_tile) {
+ ret = intel_modeset_tile_group(state, connector->base.tile_group->id);
if (ret)
return ret;
}
- return intel_modeset_synced_crtcs(state, conn);
+ return intel_modeset_synced_crtcs(state, &connector->base);
}
-static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
+static void intel_dp_oob_hotplug_event(struct drm_connector *_connector,
enum drm_connector_status hpd_state)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
bool hpd_high = hpd_state == connector_status_connected;
unsigned int hpd_pin = encoder->hpd_pin;
bool need_work = false;
- spin_lock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (hpd_high != test_bit(hpd_pin, &display->hotplug.oob_hotplug_last_state)) {
display->hotplug.event_bits |= BIT(hpd_pin);
@@ -6114,10 +6251,10 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
hpd_high);
need_work = true;
}
- spin_unlock_irq(&i915->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
if (need_work)
- intel_hpd_schedule_detection(i915);
+ intel_hpd_schedule_detection(display);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -6144,13 +6281,12 @@ enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_dp *intel_dp = &dig_port->dp;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
(long_hpd ||
- intel_runtime_pm_suspended(&i915->runtime_pm) ||
+ intel_display_rpm_suspended(display) ||
!intel_pps_have_panel_power_or_vdd(intel_dp))) {
/*
* vdd off can generate a long/short pulse on eDP which
@@ -6247,36 +6383,37 @@ intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder)
}
static void
-intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->base.port;
if (!intel_dp_is_edp(intel_dp))
- drm_connector_attach_dp_subconnector_property(connector);
+ drm_connector_attach_dp_subconnector_property(&connector->base);
if (!display->platform.g4x && port != PORT_A)
- intel_attach_force_audio_property(connector);
+ intel_attach_force_audio_property(&connector->base);
- intel_attach_broadcast_rgb_property(connector);
+ intel_attach_broadcast_rgb_property(&connector->base);
if (HAS_GMCH(display))
- drm_connector_attach_max_bpc_property(connector, 6, 10);
+ drm_connector_attach_max_bpc_property(&connector->base, 6, 10);
else if (DISPLAY_VER(display) >= 5)
- drm_connector_attach_max_bpc_property(connector, 6, 12);
+ drm_connector_attach_max_bpc_property(&connector->base, 6, 12);
/* Register HDMI colorspace for case of lspcon */
if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) {
- drm_connector_attach_content_type_property(connector);
- intel_attach_hdmi_colorspace_property(connector);
+ drm_connector_attach_content_type_property(&connector->base);
+ intel_attach_hdmi_colorspace_property(&connector->base);
} else {
- intel_attach_dp_colorspace_property(connector);
+ intel_attach_dp_colorspace_property(&connector->base);
}
if (intel_dp_has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
- drm_connector_attach_hdr_output_metadata_property(connector);
+ drm_connector_attach_hdr_output_metadata_property(&connector->base);
if (HAS_VRR(display))
- drm_connector_attach_vrr_capable_property(connector);
+ drm_connector_attach_vrr_capable_property(&connector->base);
}
static void
@@ -6311,7 +6448,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_display_mode *fixed_mode;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool has_dpcd;
@@ -6326,9 +6462,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* eDP and LVDS bail out early in this case to prevent interfering
* with an already powered-on LVDS power sequencer.
*/
- if (intel_get_lvds_encoder(dev_priv)) {
+ if (intel_get_lvds_encoder(display)) {
drm_WARN_ON(display->drm,
- !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+ !(HAS_PCH_IBX(display) || HAS_PCH_CPT(display)));
drm_info(display->drm,
"LVDS was detected, not registering eDP\n");
@@ -6359,7 +6495,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
*/
intel_hpd_enable_detection(encoder);
- intel_alpm_init_dpcd(intel_dp);
+ intel_alpm_init(intel_dp);
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp, connector);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 9189db4c2594..742ae26ac4a9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -194,6 +194,7 @@ void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
int intel_dp_output_bpp(enum intel_output_format output_format, int bpp);
bool intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
@@ -208,5 +209,7 @@ bool intel_dp_has_connector(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state);
int intel_dp_dsc_max_src_input_bpc(struct intel_display *display);
int intel_dp_dsc_min_src_input_bpc(void);
+int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index ec27bbd70bcf..bf8e8e0cc19c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -3,8 +3,10 @@
* Copyright © 2020-2021 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -111,10 +113,9 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(display)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -177,12 +178,11 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 aux_clock_divider)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
u32 timeout;
/* Max timeout value on G4x-BDW: 1.6ms */
- if (IS_BROADWELL(i915))
+ if (display->platform.broadwell)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -247,7 +247,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain;
intel_wakeref_t aux_wakeref;
- intel_wakeref_t pps_wakeref;
+ intel_wakeref_t pps_wakeref = NULL;
int i, ret, recv_bytes;
int try, clock = 0;
u32 status;
@@ -272,7 +272,20 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
aux_domain = intel_aux_power_domain(dig_port);
aux_wakeref = intel_display_power_get(display, aux_domain);
- pps_wakeref = intel_pps_lock(intel_dp);
+
+ /*
+ * The PPS state needs to be locked for:
+ * - eDP on all platforms, since AUX transfers on eDP need VDD power
+ * (either forced or via panel power) which depends on the PPS
+ * state.
+ * - non-eDP on platforms where the PPS is a pipe instance (VLV/CHV),
+ * since changing the PPS state (via a parallel modeset for
+ * instance) may interfere with the AUX transfers on a non-eDP
+ * output as well.
+ */
+ if (intel_dp_is_edp(intel_dp) ||
+ display->platform.valleyview || display->platform.cherryview)
+ pps_wakeref = intel_pps_lock(intel_dp);
/*
* We will be called with VDD already enabled for dpcd/edid/oui reads.
@@ -430,7 +443,9 @@ out:
if (vdd)
intel_pps_vdd_off_unlocked(intel_dp, false);
- intel_pps_unlock(intel_dp, pps_wakeref);
+ if (pps_wakeref)
+ intel_pps_unlock(intel_dp, pps_wakeref);
+
intel_display_power_put_async(display, aux_domain, aux_wakeref);
out_unlock:
intel_digital_port_unlock(encoder);
@@ -771,7 +786,6 @@ void intel_dp_aux_fini(struct intel_dp *intel_dp)
void intel_dp_aux_init(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -786,10 +800,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
} else if (DISPLAY_VER(display) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = skl_aux_data_reg;
- } else if (HAS_PCH_SPLIT(i915)) {
+ } else if (HAS_PCH_SPLIT(display)) {
intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ } else if (display->platform.valleyview || display->platform.cherryview) {
intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg;
intel_dp->aux_ch_data_reg = vlv_aux_data_reg;
} else {
@@ -799,9 +813,9 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
if (DISPLAY_VER(display) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
+ else if (display->platform.broadwell || display->platform.haswell)
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(i915))
+ else if (HAS_PCH_SPLIT(display))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 8173de8aec63..271b27c9de51 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -36,7 +36,6 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
@@ -149,7 +148,7 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(display->drm,
- "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d.\n",
connector->base.base.id, connector->base.name,
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
return false;
@@ -663,7 +662,8 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_panel *panel = &connector->panel;
- if ((intel_dp->edp_dpcd[3] & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE)) {
+ if ((intel_dp->edp_dpcd[3] & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) &&
+ (intel_dp->edp_dpcd[3] & DP_EDP_SMOOTH_BRIGHTNESS_CAPABLE)) {
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] AUX Luminance Based Backlight Control Supported!\n",
connector->base.base.id, connector->base.name);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 2966f5b39392..a479b63112ea 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -56,6 +56,8 @@
lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \
} while (0)
+#define MAX_SEQ_TRAIN_FAILURES 2
+
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
{
memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
@@ -164,7 +166,7 @@ static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_
* resetting its internal state when the mode is changed from
* non-transparent to transparent.
*/
- if (intel_dp->link_trained) {
+ if (intel_dp->link.active) {
if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp))
goto out_reset_lttpr_count;
@@ -711,8 +713,21 @@ void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, int link_rate, b
static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ /*
+ * Currently, we set the MSA ignore bit based on vrr.in_range.
+ * We can't really read that out during driver load since we don't have
+ * the connector information read in yet. So if we do end up doing a
+ * modeset during initial_commit() we'll clear the MSA ignore bit.
+ * GOP likely wouldn't have set this bit so after the initial commit,
+ * if there are no modesets and we enable VRR mode seamlessly
+ * (without a full modeset), the MSA ignore bit might never get set.
+ *
+ * #TODO: Implement readout of vrr.in_range.
+ * We need fastset support for setting the MSA ignore bit in DPCD,
+ * especially on the first real commit when clearing the inherited flag.
+ */
intel_dp_link_training_set_mode(intel_dp,
- crtc_state->port_clock, crtc_state->vrr.flipline);
+ crtc_state->port_clock, crtc_state->vrr.in_range);
}
void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
@@ -1110,7 +1125,10 @@ intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp,
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- intel_dp->link_trained = true;
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ intel_dp->link.active = true;
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
@@ -1120,6 +1138,15 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
}
+
+ intel_hpd_unblock(encoder);
+
+ if (!display->hotplug.ignore_long_hpd &&
+ intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES) {
+ int delay_ms = intel_dp->link.seq_train_failures ? 0 : 2000;
+
+ intel_encoder_link_check_queue_work(encoder, delay_ms);
+ }
}
static bool
@@ -1602,7 +1629,11 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
* non-transparent mode. During an earlier LTTPR detection this
* could've been prevented by an active link.
*/
- int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
+ int lttpr_count;
+
+ intel_hpd_block(encoder);
+
+ lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
if (lttpr_count < 0)
/* Still continue with enabling the port and link training. */
@@ -1620,7 +1651,6 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
lt_dbg(intel_dp, DP_PHY_DPRX, "Forcing link training failure\n");
} else if (passed) {
intel_dp->link.seq_train_failures = 0;
- intel_encoder_link_check_queue_work(encoder, 2000);
return;
}
@@ -1643,10 +1673,8 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
return;
}
- if (intel_dp->link.seq_train_failures < 2) {
- intel_encoder_link_check_queue_work(encoder, 0);
+ if (intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES)
return;
- }
if (intel_dp_schedule_fallback_link_training(state, intel_dp, crtc_state))
return;
@@ -1693,7 +1721,7 @@ static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
if (err)
return err;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
current_rate = intel_dp->link_rate;
force_rate = intel_dp->link.force_rate;
@@ -1791,7 +1819,7 @@ static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
if (err)
return err;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
current_lane_count = intel_dp->lane_count;
force_lane_count = intel_dp->link.force_lane_count;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 02f95108c637..06f4ad8de591 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -27,10 +27,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -51,7 +52,9 @@
#include "intel_link_bw.h"
#include "intel_pfit.h"
#include "intel_psr.h"
+#include "intel_step.h"
#include "intel_vdsc.h"
+#include "intel_vrr.h"
#include "skl_scaler.h"
/*
@@ -104,6 +107,34 @@ static struct intel_dp *to_primary_dp(struct intel_encoder *encoder)
return &dig_port->dp;
}
+int intel_dp_mst_active_streams(struct intel_dp *intel_dp)
+{
+ return intel_dp->mst.active_streams;
+}
+
+static bool intel_dp_mst_dec_active_streams(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ drm_dbg_kms(display->drm, "active MST streams %d -> %d\n",
+ intel_dp->mst.active_streams, intel_dp->mst.active_streams - 1);
+
+ if (drm_WARN_ON(display->drm, intel_dp->mst.active_streams == 0))
+ return true;
+
+ return --intel_dp->mst.active_streams == 0;
+}
+
+static bool intel_dp_mst_inc_active_streams(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ drm_dbg_kms(display->drm, "active MST streams %d -> %d\n",
+ intel_dp->mst.active_streams, intel_dp->mst.active_streams + 1);
+
+ return intel_dp->mst.active_streams++ == 0;
+}
+
static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
bool dsc)
{
@@ -210,26 +241,6 @@ static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connec
num_joined_pipes);
}
-static void intel_dp_mst_compute_min_hblank(struct intel_crtc_state *crtc_state,
- int bpp_x16)
-{
- struct intel_display *display = to_intel_display(crtc_state);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- int symbol_size = intel_dp_is_uhbr(crtc_state) ? 32 : 8;
- int hblank;
-
- if (DISPLAY_VER(display) < 20)
- return;
-
- /* Calculate min Hblank Link Layer Symbol Cycle Count for 8b/10b MST & 128b/132b */
- hblank = DIV_ROUND_UP((DIV_ROUND_UP
- (adjusted_mode->htotal - adjusted_mode->hdisplay, 4) * bpp_x16),
- symbol_size);
-
- crtc_state->min_hblank = hblank;
-}
-
int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
@@ -242,7 +253,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- bool is_mst = intel_dp->is_mst;
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int bpp_x16, slots = -EINVAL;
int dsc_slice_count = 0;
int max_dpt_bpp_x16;
@@ -300,8 +311,6 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
false, dsc_slice_count, link_bpp_x16);
- intel_dp_mst_compute_min_hblank(crtc_state, link_bpp_x16);
-
intel_dp_mst_compute_m_n(crtc_state,
local_bw_overhead,
link_bpp_x16,
@@ -590,12 +599,13 @@ adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp,
static bool
mst_stream_compute_config_limits(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
+ struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
{
- if (!intel_dp_compute_config_limits(intel_dp, crtc_state, false, dsc,
+ if (!intel_dp_compute_config_limits(intel_dp, connector,
+ crtc_state, false, dsc,
limits))
return false;
@@ -710,6 +720,12 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+ ret = intel_dp_compute_min_hblank(pipe_config, conn_state);
+ if (ret)
+ return ret;
+
+ intel_vrr_compute_config(pipe_config, conn_state);
+
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
intel_ddi_compute_min_voltage_level(pipe_config);
@@ -990,25 +1006,17 @@ static void mst_stream_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(state);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- enum transcoder trans = old_crtc_state->cpu_transcoder;
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
- if (intel_dp->mst.active_links == 1)
- intel_dp->link_trained = false;
+ if (intel_dp_mst_active_streams(intel_dp) == 1)
+ intel_dp->link.active = false;
intel_hdcp_disable(intel_mst->connector);
intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
-
- if (DISPLAY_VER(display) >= 20)
- intel_de_write(display, DP_MIN_HBLANK_CTL(trans), 0);
}
static void mst_stream_post_disable(struct intel_atomic_state *state,
@@ -1034,8 +1042,8 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
bool last_mst_stream;
int i;
- intel_dp->mst.active_links--;
- last_mst_stream = intel_dp->mst.active_links == 0;
+ last_mst_stream = intel_dp_mst_dec_active_streams(intel_dp);
+
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
@@ -1062,6 +1070,8 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
drm_dp_remove_payload_part2(&intel_dp->mst.mgr, new_mst_state,
old_payload, new_payload);
+ intel_vrr_transcoder_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -1104,8 +1114,6 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
primary_encoder->post_disable(state, primary_encoder,
old_crtc_state, NULL);
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
}
static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
@@ -1116,7 +1124,7 @@ static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->mst.active_links == 0 &&
+ if (intel_dp_mst_active_streams(intel_dp) == 0 &&
primary_encoder->post_pll_disable)
primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state);
}
@@ -1129,7 +1137,7 @@ static void mst_stream_pre_pll_enable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->mst.active_links == 0)
+ if (intel_dp_mst_active_streams(intel_dp) == 0)
primary_encoder->pre_pll_enable(state, primary_encoder,
pipe_config, NULL);
else
@@ -1189,13 +1197,11 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
*/
connector->encoder = encoder;
intel_mst->connector = connector;
- first_mst_stream = intel_dp->mst.active_links == 0;
+
+ first_mst_stream = intel_dp_mst_inc_active_streams(intel_dp);
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
if (first_mst_stream)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
@@ -1210,8 +1216,6 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
intel_mst_reprobe_topology(intel_dp, pipe_config);
}
- intel_dp->mst.active_links++;
-
ret = drm_dp_add_payload_part1(&intel_dp->mst.mgr, mst_state,
drm_atomic_get_mst_payload_state(mst_state, connector->mst.port));
if (ret < 0)
@@ -1279,9 +1283,9 @@ static void mst_stream_enable(struct intel_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
- bool first_mst_stream = intel_dp->mst.active_links == 1;
+ bool first_mst_stream = intel_dp_mst_active_streams(intel_dp) == 1;
struct intel_crtc *pipe_crtc;
- int ret, i, min_hblank;
+ int ret, i;
drm_WARN_ON(display->drm, pipe_config->has_pch_encoder);
@@ -1296,41 +1300,17 @@ static void mst_stream_enable(struct intel_atomic_state *state,
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
}
- if (DISPLAY_VER(display) >= 20) {
- /*
- * adjust the BlankingStart/BlankingEnd framing control from
- * the calculated value
- */
- min_hblank = pipe_config->min_hblank - 2;
-
- /* Maximum value to be programmed is limited to 0x10 */
- min_hblank = min(0x10, min_hblank);
-
- /*
- * Minimum hblank accepted for 128b/132b would be 5 and for
- * 8b/10b would be 3 symbol count
- */
- if (intel_dp_is_uhbr(pipe_config))
- min_hblank = max(min_hblank, 5);
- else
- min_hblank = max(min_hblank, 3);
-
- intel_de_write(display, DP_MIN_HBLANK_CTL(trans),
- min_hblank);
- }
-
enable_bs_jitter_was(pipe_config);
intel_ddi_enable_transcoder_func(encoder, pipe_config);
+ intel_vrr_transcoder_enable(pipe_config);
+
intel_ddi_clear_act_sent(encoder, pipe_config);
intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, trans), 0,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
intel_ddi_wait_for_act_sent(encoder, pipe_config);
drm_dp_check_act_status(&intel_dp->mst.mgr);
@@ -1870,12 +1850,6 @@ mst_stream_encoders_create(struct intel_digital_port *dig_port)
}
int
-intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
-{
- return dig_port->dp.mst.active_links;
-}
-
-int
intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
{
struct intel_display *display = to_intel_display(dig_port);
@@ -2101,7 +2075,7 @@ void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp)
u8 rate_select;
u8 link_bw;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
return;
if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index c1bbfeb02ca9..ab09b487c6bb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -18,7 +18,7 @@ struct intel_link_bw_limits;
int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
-int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
+int intel_dp_mst_active_streams(struct intel_dp *intel_dp);
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 429f89543789..69f242139420 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -222,9 +222,7 @@ static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = {
static const struct bxt_dpio_phy_info *
bxt_get_phy_list(struct intel_display *display, int *count)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
*count = ARRAY_SIZE(glk_dpio_phy_info);
return glk_dpio_phy_info;
} else {
@@ -808,9 +806,9 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
vlv_dpio_put(dev_priv);
}
-void chv_data_lane_soft_reset(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- bool reset)
+static void __chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -853,6 +851,17 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
}
}
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool reset)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ vlv_dpio_get(i915);
+ __chv_data_lane_soft_reset(encoder, crtc_state, reset);
+ vlv_dpio_put(i915);
+}
+
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -880,7 +889,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
vlv_dpio_get(dev_priv);
/* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, crtc_state, true);
+ __chv_data_lane_soft_reset(encoder, crtc_state, true);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
@@ -1008,7 +1017,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
}
/* Deassert data lane reset */
- chv_data_lane_soft_reset(encoder, crtc_state, false);
+ __chv_data_lane_soft_reset(encoder, crtc_state, false);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 08a30e5aafce..a9e9b98d0bf9 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -373,14 +373,14 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return i915->display.vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(i915))
+ return display->vbt.lvds_ssc_freq;
+ else if (HAS_PCH_SPLIT(display))
return 120000;
- else if (DISPLAY_VER(i915) != 2)
+ else if (DISPLAY_VER(display) != 2)
return 96000;
else
return 48000;
@@ -389,27 +389,27 @@ static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
struct intel_dpll_hw_state *dpll_hw_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
u32 tmp;
/* No way to read it out on pipes B and C */
- if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
- tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
+ if (display->platform.cherryview && crtc->pipe != PIPE_A)
+ tmp = display->state.chv_dpll_md[crtc->pipe];
else
- tmp = intel_de_read(dev_priv,
- DPLL_MD(dev_priv, crtc->pipe));
+ tmp = intel_de_read(display,
+ DPLL_MD(display, crtc->pipe));
hw_state->dpll_md = tmp;
}
- hw_state->dpll = intel_de_read(dev_priv, DPLL(dev_priv, crtc->pipe));
+ hw_state->dpll = intel_de_read(display, DPLL(display, crtc->pipe));
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe));
- hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe));
+ if (!display->platform.valleyview && !display->platform.cherryview) {
+ hw_state->fp0 = intel_de_read(display, FP0(crtc->pipe));
+ hw_state->fp1 = intel_de_read(display, FP1(crtc->pipe));
} else {
/* Mask out read-only status bits. */
hw_state->dpll &= ~(DPLL_LOCK_VLV |
@@ -421,8 +421,8 @@ void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
/* Returns the clock of the currently programmed mode of the given pipe. */
void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
u32 dpll = hw_state->dpll;
u32 fp;
@@ -436,7 +436,7 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
fp = hw_state->fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_PINEVIEW(dev_priv)) {
+ if (display->platform.pineview) {
clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
@@ -444,8 +444,8 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (DISPLAY_VER(dev_priv) != 2) {
- if (IS_PINEVIEW(dev_priv))
+ if (DISPLAY_VER(display) != 2) {
+ if (display->platform.pineview)
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
@@ -462,23 +462,23 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
7 : 14;
break;
default:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
return;
}
- if (IS_PINEVIEW(dev_priv))
+ if (display->platform.pineview)
port_clock = pnv_calc_dpll_params(refclk, &clock);
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
enum pipe lvds_pipe;
- if (IS_I85X(dev_priv) &&
- intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
+ if (display->platform.i85x &&
+ intel_lvds_port_enabled(display, LVDS, &lvds_pipe) &&
lvds_pipe == crtc->pipe) {
- u32 lvds = intel_de_read(dev_priv, LVDS);
+ u32 lvds = intel_de_read(display, LVDS);
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -577,7 +577,7 @@ void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
*/
-static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
+static bool intel_pll_is_valid(struct intel_display *display,
const struct intel_limit *limit,
const struct dpll *clock)
{
@@ -590,14 +590,14 @@ static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
return false;
- if (!IS_PINEVIEW(dev_priv) &&
- !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv))
+ if (!display->platform.pineview &&
+ !display->platform.valleyview && !display->platform.cherryview &&
+ !display->platform.broxton && !display->platform.geminilake)
if (clock->m1 <= clock->m2)
return false;
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
+ if (!display->platform.valleyview && !display->platform.cherryview &&
+ !display->platform.broxton && !display->platform.geminilake) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
return false;
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -620,7 +620,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@@ -628,7 +628,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
* We haven't figured out how to reliably set up different
* single/dual channel state, if we even can.
*/
- if (intel_is_dual_link_lvds(dev_priv))
+ if (intel_is_dual_link_lvds(display))
return limit->p2.p2_fast;
else
return limit->p2.p2_slow;
@@ -656,7 +656,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int err = target;
@@ -677,7 +677,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -714,7 +714,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int err = target;
@@ -733,7 +733,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int this_err;
pnv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -770,7 +770,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int max_n;
bool found = false;
@@ -794,7 +794,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -817,7 +817,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
* Check if the calculated PLL configuration is more optimal compared to the
* best configuration and error found so far. Return the calculated error.
*/
-static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+static bool vlv_PLL_is_optimal(struct intel_display *display, int target_freq,
const struct dpll *calculated_clock,
const struct dpll *best_clock,
unsigned int best_error_ppm,
@@ -827,13 +827,13 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
* For CHV ignore the error and consider only the P value.
* Prefer a bigger P value based on HW requirements.
*/
- if (IS_CHERRYVIEW(to_i915(dev))) {
+ if (display->platform.cherryview) {
*error_ppm = 0;
return calculated_clock->p > best_clock->p;
}
- if (drm_WARN_ON_ONCE(dev, !target_freq))
+ if (drm_WARN_ON_ONCE(display->drm, !target_freq))
return false;
*error_ppm = div_u64(1000000ULL *
@@ -864,8 +864,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
unsigned int bestppm = 1000000;
/* min update 19.2 MHz */
@@ -889,12 +888,12 @@ vlv_find_best_dpll(const struct intel_limit *limit,
vlv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
- if (!vlv_PLL_is_optimal(dev, target,
+ if (!vlv_PLL_is_optimal(display, target,
&clock,
best_clock,
bestppm, &ppm))
@@ -922,8 +921,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct intel_display *display = to_intel_display(crtc_state);
unsigned int best_error_ppm;
struct dpll clock;
u64 m2;
@@ -958,10 +956,10 @@ chv_find_best_dpll(const struct intel_limit *limit,
chv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
+ if (!intel_pll_is_valid(display, limit, &clock))
continue;
- if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ if (!vlv_PLL_is_optimal(display, target, &clock, best_clock,
best_error_ppm, &error_ppm))
continue;
@@ -1005,8 +1003,6 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
@@ -1016,8 +1012,8 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ if (display->platform.i945g || display->platform.i945gm ||
+ display->platform.g33 || display->platform.pineview) {
dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
@@ -1030,10 +1026,10 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_G4X(dev_priv)) {
+ if (display->platform.g4x) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- } else if (IS_PINEVIEW(dev_priv)) {
+ } else if (display->platform.pineview) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
WARN_ON(reduced_clock->p1 != clock->p1);
} else {
@@ -1057,7 +1053,7 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
}
WARN_ON(reduced_clock->p2 != clock->p2);
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
if (crtc_state->sdvo_tv_clock)
@@ -1075,11 +1071,10 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
const struct dpll *clock,
const struct dpll *reduced_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
- if (IS_PINEVIEW(dev_priv)) {
+ if (display->platform.pineview) {
hw_state->fp0 = pnv_dpll_compute_fp(clock);
hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
} else {
@@ -1089,7 +1084,7 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
hw_state->dpll_md = i965_dpll_md(crtc_state);
}
@@ -1098,8 +1093,6 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
@@ -1129,7 +1122,7 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
* both DPLLS. The spec says we should disable the DVO 2X clock
* when not needed, but this seems to work fine in practice.
*/
- if (IS_I830(dev_priv) ||
+ if (display->platform.i830 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
@@ -1157,14 +1150,14 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
int ret;
- if (DISPLAY_VER(dev_priv) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
@@ -1186,13 +1179,13 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- if (DISPLAY_VER(dev_priv) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
@@ -1241,12 +1234,10 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- ((intel_panel_use_ssc(display) && i915->display.vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915))))
+ ((intel_panel_use_ssc(display) && display->vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(display) && intel_is_dual_link_lvds(display))))
return 25;
if (crtc_state->sdvo_tv_clock)
@@ -1276,8 +1267,6 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE;
@@ -1311,7 +1300,7 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
* clear if it''s a win or loss power wise. No point in doing
* this on ILK at all since it has a fixed DPLL<->pipe mapping.
*/
- if (INTEL_NUM_PIPES(dev_priv) == 3 &&
+ if (INTEL_NUM_PIPES(display) == 3 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
dpll |= DPLL_SDVO_HIGH_SPEED;
@@ -1362,7 +1351,6 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1375,13 +1363,13 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
- dev_priv->display.vbt.lvds_ssc_freq);
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
+ display->vbt.lvds_ssc_freq);
+ refclk = display->vbt.lvds_ssc_freq;
}
- if (intel_is_dual_link_lvds(dev_priv)) {
+ if (intel_is_dual_link_lvds(display)) {
if (refclk == 100000)
limit = &ilk_limits_dual_lvds_100m;
else
@@ -1539,7 +1527,6 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1547,13 +1534,13 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
- if (intel_is_dual_link_lvds(dev_priv))
+ if (intel_is_dual_link_lvds(display))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
@@ -1589,7 +1576,6 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1597,8 +1583,8 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1628,7 +1614,6 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1636,8 +1621,8 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1669,7 +1654,6 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1677,8 +1661,8 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1751,12 +1735,12 @@ static const struct intel_dpll_funcs i8xx_dpll_funcs = {
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -1764,9 +1748,9 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->hw.enable)
return 0;
- ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc);
+ ret = display->funcs.dpll->crtc_compute_clock(state, crtc);
if (ret) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
crtc->base.base.id, crtc->base.name);
return ret;
}
@@ -1777,23 +1761,23 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
- drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
+ drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
if (!crtc_state->hw.enable || crtc_state->shared_dpll)
return 0;
- if (!i915->display.funcs.dpll->crtc_get_shared_dpll)
+ if (!display->funcs.dpll->crtc_get_shared_dpll)
return 0;
- ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc);
+ ret = display->funcs.dpll->crtc_get_shared_dpll(state, crtc);
if (ret) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
crtc->base.base.id, crtc->base.name);
return ret;
}
@@ -1802,43 +1786,42 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
}
void
-intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 14)
- dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
- else if (IS_DG2(dev_priv))
- dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
- else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
- dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
- else if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->display.funcs.dpll = &ilk_dpll_funcs;
- else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &chv_dpll_funcs;
- else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &vlv_dpll_funcs;
- else if (IS_G4X(dev_priv))
- dev_priv->display.funcs.dpll = &g4x_dpll_funcs;
- else if (IS_PINEVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &pnv_dpll_funcs;
- else if (DISPLAY_VER(dev_priv) != 2)
- dev_priv->display.funcs.dpll = &i9xx_dpll_funcs;
+intel_dpll_init_clock_hook(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 14)
+ display->funcs.dpll = &mtl_dpll_funcs;
+ else if (display->platform.dg2)
+ display->funcs.dpll = &dg2_dpll_funcs;
+ else if (DISPLAY_VER(display) >= 9 || HAS_DDI(display))
+ display->funcs.dpll = &hsw_dpll_funcs;
+ else if (HAS_PCH_SPLIT(display))
+ display->funcs.dpll = &ilk_dpll_funcs;
+ else if (display->platform.cherryview)
+ display->funcs.dpll = &chv_dpll_funcs;
+ else if (display->platform.valleyview)
+ display->funcs.dpll = &vlv_dpll_funcs;
+ else if (display->platform.g4x)
+ display->funcs.dpll = &g4x_dpll_funcs;
+ else if (display->platform.pineview)
+ display->funcs.dpll = &pnv_dpll_funcs;
+ else if (DISPLAY_VER(display) != 2)
+ display->funcs.dpll = &i9xx_dpll_funcs;
else
- dev_priv->display.funcs.dpll = &i8xx_dpll_funcs;
+ display->funcs.dpll = &i8xx_dpll_funcs;
}
-static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
+static bool i9xx_has_pps(struct intel_display *display)
{
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
return false;
- return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+ return display->platform.pineview || display->platform.mobile;
}
void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
int i;
@@ -1846,27 +1829,27 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
- if (i9xx_has_pps(dev_priv))
+ if (i9xx_has_pps(display))
assert_pps_unlocked(display, pipe);
- intel_de_write(dev_priv, FP0(pipe), hw_state->fp0);
- intel_de_write(dev_priv, FP1(pipe), hw_state->fp1);
+ intel_de_write(display, FP0(pipe), hw_state->fp0);
+ intel_de_write(display, FP1(pipe), hw_state->fp1);
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~DPLL_VGA_MODE_DIS);
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
/* Wait for the clocks to stabilize. */
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150);
- if (DISPLAY_VER(dev_priv) >= 4) {
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
+ if (DISPLAY_VER(display) >= 4) {
+ intel_de_write(display, DPLL_MD(display, pipe),
hw_state->dpll_md);
} else {
/* The pixel multiplier can only be updated once the
@@ -1874,20 +1857,21 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
*
* So write it again.
*/
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
}
/* We do this three times for luck */
for (i = 0; i < 3; i++) {
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150); /* wait for warmup */
}
}
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
+static void vlv_pllb_recal_opamp(struct intel_display *display,
enum dpio_phy phy, enum dpio_channel ch)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
/*
@@ -1916,6 +1900,7 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct dpll *clock = &crtc_state->dpll;
@@ -1930,7 +1915,7 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
/* PLL B needs special handling */
if (pipe == PIPE_B)
- vlv_pllb_recal_opamp(dev_priv, phy, ch);
+ vlv_pllb_recal_opamp(display, phy, ch);
/* Set up Tx target for periodic Rcomp update */
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
@@ -2003,24 +1988,23 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150);
- if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
+ if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ drm_err(display->drm, "DPLL %d failed to lock\n", pipe);
}
void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
@@ -2030,7 +2014,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_pps_unlocked(display, pipe);
/* Enable Refclk */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
if (hw_state->dpll & DPLL_VCO_ENABLE) {
@@ -2038,8 +2022,8 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
_vlv_enable_pll(crtc_state);
}
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), hw_state->dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
+ intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md);
+ intel_de_posting_read(display, DPLL_MD(display, pipe));
}
static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
@@ -2133,6 +2117,7 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -2156,18 +2141,17 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
udelay(1);
/* Enable PLL */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
/* Check PLL is locked */
- if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
+ if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ drm_err(display->drm, "PLL %d failed to lock\n", pipe);
}
void chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
@@ -2177,7 +2161,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_pps_unlocked(display, pipe);
/* Enable Refclk and SSC */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~DPLL_VCO_ENABLE);
if (hw_state->dpll & DPLL_VCO_ENABLE) {
@@ -2192,29 +2176,29 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
* DPLLCMD is AWOL. Use chicken bits to propagate
* the value from DPLLBMD to either pipe B or C.
*/
- intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- intel_de_write(dev_priv, DPLL_MD(dev_priv, PIPE_B),
+ intel_de_write(display, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+ intel_de_write(display, DPLL_MD(display, PIPE_B),
hw_state->dpll_md);
- intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md;
+ intel_de_write(display, CBR4_VLV, 0);
+ display->state.chv_dpll_md[pipe] = hw_state->dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
* We should always have it disabled.
*/
- drm_WARN_ON(&dev_priv->drm,
- (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) &
+ drm_WARN_ON(display->drm,
+ (intel_de_read(display, DPLL(display, PIPE_B)) &
DPLL_VGA_MODE_DIS) == 0);
} else {
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
+ intel_de_write(display, DPLL_MD(display, pipe),
hw_state->dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
+ intel_de_posting_read(display, DPLL_MD(display, pipe));
}
}
/**
* vlv_force_pll_on - forcibly enable just the PLL
- * @dev_priv: i915 private structure
+ * @display: display device
* @pipe: pipe PLL to enable
* @dpll: PLL configuration
*
@@ -2222,10 +2206,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
* in cases where we need the PLL enabled even when @pipe is not going to
* be enabled.
*/
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
const struct dpll *dpll)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_crtc_state *crtc_state;
@@ -2238,7 +2221,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
crtc_state->dpll = *dpll;
crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
chv_compute_dpll(crtc_state);
chv_enable_pll(crtc_state);
} else {
@@ -2251,9 +2234,8 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
return 0;
}
-void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+void vlv_disable_pll(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 val;
/* Make sure the pipe isn't still relying on us */
@@ -2268,9 +2250,9 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_de_posting_read(display, DPLL(display, pipe));
}
-void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+void chv_disable_pll(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
@@ -2316,18 +2298,18 @@ void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
/**
* vlv_force_pll_off - forcibly disable just the PLL
- * @dev_priv: i915 private structure
+ * @display: display device
* @pipe: pipe PLL to disable
*
* Disable the PLL for @pipe. To be used in cases where we need
* the PLL enabled even when @pipe is not going to be enabled.
*/
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
+void vlv_force_pll_off(struct intel_display *display, enum pipe pipe)
{
- if (IS_CHERRYVIEW(dev_priv))
- chv_disable_pll(dev_priv, pipe);
+ if (display->platform.cherryview)
+ chv_disable_pll(display, pipe);
else
- vlv_disable_pll(dev_priv, pipe);
+ vlv_disable_pll(display, pipe);
}
/* Only for pre-ILK configs */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 21d06cbd2ce7..280e90a57c87 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -8,16 +8,15 @@
#include <linux/types.h>
+enum pipe;
struct dpll;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
struct intel_dpll_hw_state;
-enum pipe;
-void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
+void intel_dpll_init_clock_hook(struct intel_display *display);
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -29,14 +28,14 @@ void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
void chv_compute_dpll(struct intel_crtc_state *crtc_state);
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
+void vlv_force_pll_off(struct intel_display *display, enum pipe pipe);
void chv_enable_pll(const struct intel_crtc_state *crtc_state);
-void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void chv_disable_pll(struct intel_display *display, enum pipe pipe);
void vlv_enable_pll(const struct intel_crtc_state *crtc_state);
-void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void vlv_disable_pll(struct intel_display *display, enum pipe pipe);
void i9xx_enable_pll(const struct intel_crtc_state *crtc_state);
void i9xx_disable_pll(const struct intel_crtc_state *crtc_state);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index c825a507b905..9da051a3f455 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -24,9 +24,11 @@
#include <linux/math.h>
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "bxt_dpio_phy_regs.h"
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -38,6 +40,7 @@
#include "intel_hti.h"
#include "intel_mg_phy_regs.h"
#include "intel_pch_refclk.h"
+#include "intel_step.h"
#include "intel_tc.h"
/**
@@ -257,7 +260,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int pipe_mask = BIT(crtc->pipe);
+ unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
unsigned int old_mask;
if (drm_WARN_ON(display->drm, !pll))
@@ -303,7 +306,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int pipe_mask = BIT(crtc->pipe);
+ unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
/* PCH only available on ILK+ */
if (DISPLAY_VER(display) < 5)
@@ -609,13 +612,12 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
enum intel_dpll_id id;
- if (HAS_PCH_IBX(i915)) {
+ if (HAS_PCH_IBX(display)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
id = (enum intel_dpll_id) crtc->pipe;
pll = intel_get_shared_dpll_by_id(display, id);
@@ -715,7 +717,6 @@ static void hsw_ddi_spll_enable(struct intel_display *display,
static void hsw_ddi_wrpll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const enum intel_dpll_id id = pll->info->id;
intel_de_rmw(display, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
@@ -726,13 +727,12 @@ static void hsw_ddi_wrpll_disable(struct intel_display *display,
* that depend on it have been shut down.
*/
if (display->dpll.pch_ssc_use & BIT(id))
- intel_init_pch_refclk(i915);
+ intel_init_pch_refclk(display);
}
static void hsw_ddi_spll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum intel_dpll_id id = pll->info->id;
intel_de_rmw(display, SPLL_CTL, SPLL_PLL_ENABLE, 0);
@@ -743,7 +743,7 @@ static void hsw_ddi_spll_disable(struct intel_display *display,
* that depend on it have been shut down.
*/
if (display->dpll.pch_ssc_use & BIT(id))
- intel_init_pch_refclk(i915);
+ intel_init_pch_refclk(display);
}
static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
@@ -2606,10 +2606,8 @@ ehl_combo_pll_div_frac_wa_needed(struct intel_display *display)
{
return ((display->platform.elkhartlake &&
IS_DISPLAY_STEP(display, STEP_B0, STEP_FOREVER)) ||
- display->platform.tigerlake ||
- display->platform.alderlake_s ||
- display->platform.alderlake_p) &&
- display->dpll.ref_clks.nssc == 38400;
+ DISPLAY_VER(display) >= 12) &&
+ display->dpll.ref_clks.nssc == 38400;
}
struct icl_combo_pll_params {
@@ -4309,7 +4307,6 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
*/
void intel_shared_dpll_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_dpll_mgr *dpll_mgr = NULL;
const struct dpll_info *dpll_info;
int i;
@@ -4339,7 +4336,7 @@ void intel_shared_dpll_init(struct intel_display *display)
dpll_mgr = &skl_pll_mgr;
else if (HAS_DDI(display))
dpll_mgr = &hsw_pll_mgr;
- else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
+ else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display))
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr)
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 0d8ebe38226e..43bd97e4f589 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -9,6 +9,7 @@
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -127,7 +128,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
struct drm_i915_private *i915 = vm->i915;
struct intel_display *display = &i915->display;
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct i915_vma *vma;
void __iomem *iomem;
struct i915_gem_ww_ctx ww;
@@ -137,7 +138,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
if (i915_gem_object_is_stolen(dpt->obj))
pin_flags |= PIN_MAPPABLE;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
atomic_inc(&display->restore.pending_fb_pin);
for_i915_gem_ww(&ww, err, true) {
@@ -169,7 +170,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
dpt->obj->mm.dirty = true;
atomic_dec(&display->restore.pending_fb_pin);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return err ? ERR_PTR(err) : vma;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
index d2dede0a5229..ce5aa0ca0fa5 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt_common.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.c
@@ -3,7 +3,6 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -12,9 +11,9 @@
void intel_dpt_configure(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (DISPLAY_VER(i915) == 14) {
+ if (DISPLAY_VER(display) == 14) {
enum pipe pipe = crtc->pipe;
enum plane_id plane_id;
@@ -22,15 +21,15 @@ void intel_dpt_configure(struct intel_crtc *crtc)
if (plane_id == PLANE_CURSOR)
continue;
- intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
+ intel_de_rmw(display, PLANE_CHICKEN(pipe, plane_id),
PLANE_CHICKEN_DISABLE_DPT,
- i915->display.params.enable_dpt ? 0 :
+ display->params.enable_dpt ? 0 :
PLANE_CHICKEN_DISABLE_DPT);
}
- } else if (DISPLAY_VER(i915) == 13) {
- intel_de_rmw(i915, CHICKEN_MISC_2,
+ } else if (DISPLAY_VER(display) == 13) {
+ intel_de_rmw(display, CHICKEN_MISC_2,
CHICKEN_MISC_DISABLE_DPT,
- i915->display.params.enable_dpt ? 0 :
+ display->params.enable_dpt ? 0 :
CHICKEN_MISC_DISABLE_DPT);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 9fc4003d1579..481488d1fe67 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -4,13 +4,15 @@
*
*/
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
#include "intel_dsb_buffer.h"
@@ -142,10 +144,10 @@ static int dsb_vtotal(struct intel_atomic_state *state,
static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *crtc_state =
intel_pre_commit_crtc_state(state, crtc);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- unsigned int latency = skl_watermark_max_latency(i915, 0);
+ unsigned int latency = skl_watermark_max_latency(display, 0);
return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) -
intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency);
@@ -795,22 +797,22 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
enum intel_dsb_id dsb_id,
unsigned int max_cmds)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- intel_wakeref_t wakeref;
+ struct intel_display *display = to_intel_display(state);
+ struct ref_tracker *wakeref;
struct intel_dsb *dsb;
unsigned int size;
- if (!HAS_DSB(i915))
+ if (!HAS_DSB(display))
return NULL;
- if (!i915->display.params.enable_dsb)
+ if (!display->params.enable_dsb)
return NULL;
dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
if (!dsb)
goto out;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
/* ~1 qword per instruction, full cachelines */
size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
@@ -818,7 +820,7 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
goto out_put_rpm;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
dsb->id = dsb_id;
dsb->crtc = crtc;
@@ -831,10 +833,10 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
return dsb;
out_put_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
kfree(dsb);
out:
- drm_info_once(&i915->drm,
+ drm_info_once(display->drm,
"[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
crtc->base.base.id, crtc->base.name, dsb_id);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 403151175a87..a8f012119165 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -4,8 +4,9 @@
*/
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_dsi.h"
#include "intel_panel.h"
@@ -116,14 +117,14 @@ struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
enum drm_panel_orientation
intel_dsi_get_panel_orientation(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
enum drm_panel_orientation orientation;
orientation = connector->panel.vbt.dsi.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
- orientation = dev_priv->display.vbt.orientation;
+ orientation = display->vbt.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 049443245310..b3c453bf7d5c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -24,9 +24,10 @@
*/
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_dcs_backlight.h"
@@ -162,7 +163,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
if (panel->vbt.backlight.brightness_precision_bits > 8)
@@ -172,7 +173,7 @@ static int dcs_setup_backlight(struct intel_connector *connector,
panel->backlight.level = panel->backlight.max;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using DCS for backlight control\n",
connector->base.base.id, connector->base.name);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 7b2ffd14ae6e..29c920983413 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -31,16 +31,16 @@
#include <linux/pinctrl/machine.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
-
#include <linux/unaligned.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
@@ -102,13 +102,13 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
const u8 *data)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct mipi_dsi_device *dsi_device;
u8 type, flags, seq_port;
u16 len;
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
flags = *data++;
type = *data++;
@@ -120,12 +120,12 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
port = intel_dsi_seq_port_to_port(intel_dsi, seq_port);
- if (drm_WARN_ON(&dev_priv->drm, !intel_dsi->dsi_hosts[port]))
+ if (drm_WARN_ON(display->drm, !intel_dsi->dsi_hosts[port]))
goto out;
dsi_device = intel_dsi->dsi_hosts[port]->device;
if (!dsi_device) {
- drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n",
+ drm_dbg_kms(display->drm, "no dsi device for port %c\n",
port_name(port));
goto out;
}
@@ -150,8 +150,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- drm_dbg(&dev_priv->drm,
- "Generic Read not yet implemented or used\n");
+ drm_dbg_kms(display->drm, "Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
mipi_dsi_generic_write(dsi_device, data, len);
@@ -163,15 +162,14 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
- drm_dbg(&dev_priv->drm,
- "DCS Read not yet implemented or used\n");
+ drm_dbg_kms(display->drm, "DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
mipi_dsi_dcs_write_buffer(dsi_device, data, len);
break;
}
- if (DISPLAY_VER(dev_priv) < 11)
+ if (DISPLAY_VER(display) < 11)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
@@ -182,10 +180,10 @@ out:
static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
u32 delay = *((const u32 *) data);
- drm_dbg_kms(&i915->drm, "%d usecs\n", delay);
+ drm_dbg_kms(display->drm, "%d usecs\n", delay);
usleep_range(delay, delay + 10);
data += 4;
@@ -196,7 +194,7 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
const char *con_id, u8 idx, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/* XXX: this table is a quick ugly hack. */
static struct gpio_desc *soc_gpio_table[U8_MAX + 1];
struct gpio_desc *gpio_desc = soc_gpio_table[gpio_index];
@@ -204,10 +202,10 @@ static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
if (gpio_desc) {
gpiod_set_value(gpio_desc, value);
} else {
- gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, con_id, idx,
+ gpio_desc = devm_gpiod_get_index(display->drm->dev, con_id, idx,
value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
if (IS_ERR(gpio_desc)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"GPIO index %u request failed (%pe)\n",
gpio_index, gpio_desc);
return;
@@ -242,16 +240,16 @@ static void soc_opaque_gpio_set_value(struct intel_connector *connector,
static void vlv_gpio_set_value(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
if (connector->panel.vbt.dsi.seq_version < 3) {
if (gpio_source == 1) {
- drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
+ drm_dbg_kms(display->drm, "SC gpio not supported\n");
return;
}
if (gpio_source > 1) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unknown gpio source %u\n", gpio_source);
return;
}
@@ -264,7 +262,7 @@ static void vlv_gpio_set_value(struct intel_connector *connector,
static void chv_gpio_set_value(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (connector->panel.vbt.dsi.seq_version >= 3) {
if (gpio_index >= CHV_GPIO_IDX_START_SE) {
@@ -284,13 +282,13 @@ static void chv_gpio_set_value(struct intel_connector *connector,
} else {
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
if (gpio_source != 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unknown gpio source %u\n", gpio_source);
return;
}
if (gpio_index >= CHV_GPIO_IDX_START_E) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"invalid gpio index %u for GPIO N\n",
gpio_index);
return;
@@ -320,13 +318,12 @@ enum {
MIPI_VIO_EN_2,
};
-static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
+static void icl_native_gpio_set_value(struct intel_display *display,
int gpio, bool value)
{
- struct intel_display *display = &dev_priv->display;
int index;
- if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
+ if (drm_WARN_ON(display->drm, DISPLAY_VER(display) == 11 && gpio >= MIPI_RESET_2))
return;
switch (gpio) {
@@ -343,25 +340,25 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
* The locking protects against concurrent SHOTPLUG_CTL_DDI
* modifications in irq setup and handling.
*/
- spin_lock_irq(&dev_priv->irq_lock);
- intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI,
+ spin_lock_irq(&display->irq.lock);
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
SHOTPLUG_CTL_DDI_HPD_ENABLE(index) |
SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index),
value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
break;
case MIPI_AVDD_EN_1:
case MIPI_AVDD_EN_2:
index = gpio == MIPI_AVDD_EN_1 ? 0 : 1;
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), PANEL_POWER_ON,
+ intel_de_rmw(display, PP_CONTROL(display, index), PANEL_POWER_ON,
value ? PANEL_POWER_ON : 0);
break;
case MIPI_BKLT_EN_1:
case MIPI_BKLT_EN_2:
index = gpio == MIPI_BKLT_EN_1 ? 0 : 1;
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), EDP_BLC_ENABLE,
+ intel_de_rmw(display, PP_CONTROL(display, index), EDP_BLC_ENABLE,
value ? EDP_BLC_ENABLE : 0);
break;
case MIPI_AVEE_EN_1:
@@ -389,13 +386,12 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
u8 gpio_source = 0, gpio_index = 0, gpio_number;
bool value;
int size;
- bool native = DISPLAY_VER(i915) >= 11;
+ bool native = DISPLAY_VER(display) >= 11;
if (connector->panel.vbt.dsi.seq_version >= 3) {
size = 3;
@@ -416,16 +412,16 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
gpio_source = (data[1] >> 1) & 3;
}
- drm_dbg_kms(&i915->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
+ drm_dbg_kms(display->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
if (native)
- icl_native_gpio_set_value(i915, gpio_number, value);
- else if (DISPLAY_VER(i915) >= 9)
+ icl_native_gpio_set_value(display, gpio_number, value);
+ else if (DISPLAY_VER(display) >= 9)
bxt_gpio_set_value(connector, gpio_index, value);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
vlv_gpio_set_value(connector, gpio_source, gpio_number, value);
- else if (IS_CHERRYVIEW(i915))
+ else if (display->platform.cherryview)
chv_gpio_set_value(connector, gpio_source, gpio_number, value);
return data + size;
@@ -463,8 +459,8 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
const u16 target_addr)
{
- struct drm_device *drm_dev = intel_dsi->base.base.dev;
- struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
+ struct acpi_device *adev = ACPI_COMPANION(display->drm->dev);
struct i2c_adapter_lookup lookup = {
.target_addr = target_addr,
.intel_dsi = intel_dsi,
@@ -484,7 +480,7 @@ static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct i2c_adapter *adapter;
struct i2c_msg msg;
int ret;
@@ -494,7 +490,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
u8 payload_size = *(data + 6);
u8 *payload_data;
- drm_dbg_kms(&i915->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n",
+ drm_dbg_kms(display->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n",
vbt_i2c_bus_num, target_addr, reg_offset, payload_size, data + 7);
if (intel_dsi->i2c_bus_num < 0) {
@@ -504,7 +500,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
if (!adapter) {
- drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n");
+ drm_err(display->drm, "Cannot find a valid i2c bus for xfer\n");
goto err_bus;
}
@@ -522,7 +518,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
ret = i2c_transfer(adapter, &msg, 1);
if (ret < 0)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to xfer payload of size (%u) to reg (%u)\n",
payload_size, reg_offset);
@@ -535,16 +531,16 @@ err_bus:
static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
- drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n");
+ drm_dbg_kms(display->drm, "Skipping SPI element execution\n");
return data + *(data + 5) + 6;
}
static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
#ifdef CONFIG_PMIC_OPREGION
u32 value, mask, reg_address;
u16 i2c_address;
@@ -560,9 +556,9 @@ static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
reg_address,
value, mask);
if (ret)
- drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret);
+ drm_err(display->drm, "%s failed, error: %d\n", __func__, ret);
#else
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
#endif
@@ -612,12 +608,12 @@ static const char *sequence_name(enum mipi_seq seq_id)
static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence)))
return;
@@ -625,9 +621,9 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
if (!data)
return;
- drm_WARN_ON(&dev_priv->drm, *data != seq_id);
+ drm_WARN_ON(display->drm, *data != seq_id);
- drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n",
+ drm_dbg_kms(display->drm, "Starting MIPI sequence %d - %s\n",
seq_id, sequence_name(seq_id));
/* Skip Sequence Byte. */
@@ -657,19 +653,19 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
/* Consistency check if we have size. */
if (operation_size && data != next) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Inconsistent operation size\n");
return;
}
} else if (operation_size) {
/* We have size, skip. */
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Unsupported MIPI operation byte %u\n",
operation_byte);
data += operation_size;
} else {
/* No size, can't skip without parsing. */
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unsupported MIPI operation byte %u\n",
operation_byte);
return;
@@ -695,54 +691,44 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
void intel_dsi_log_params(struct intel_dsi *intel_dsi)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
-
- drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk);
- drm_dbg_kms(&i915->drm, "Pixel overlap %d\n",
- intel_dsi->pixel_overlap);
- drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count);
- drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
- drm_dbg_kms(&i915->drm, "Video mode format %s\n",
- intel_dsi->video_mode == NON_BURST_SYNC_PULSE ?
- "non-burst with sync pulse" :
- intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ?
- "non-burst with sync events" :
- intel_dsi->video_mode == BURST_MODE ?
- "burst" : "<unknown>");
- drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n",
- intel_dsi->burst_mode_ratio);
- drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val);
- drm_dbg_kms(&i915->drm, "Eot %s\n",
- str_enabled_disabled(intel_dsi->eotp_pkt));
- drm_dbg_kms(&i915->drm, "Clockstop %s\n",
- str_enabled_disabled(!intel_dsi->clock_stop));
- drm_dbg_kms(&i915->drm, "Mode %s\n",
- intel_dsi->operation_mode ? "command" : "video");
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
+ struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
+ "DSI parameters:");
+
+ drm_printf(&p, "Pclk %d\n", intel_dsi->pclk);
+ drm_printf(&p, "Pixel overlap %d\n", intel_dsi->pixel_overlap);
+ drm_printf(&p, "Lane count %d\n", intel_dsi->lane_count);
+ drm_printf(&p, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
+ drm_printf(&p, "Video mode format %s\n",
+ intel_dsi->video_mode == NON_BURST_SYNC_PULSE ?
+ "non-burst with sync pulse" :
+ intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ?
+ "non-burst with sync events" :
+ intel_dsi->video_mode == BURST_MODE ?
+ "burst" : "<unknown>");
+ drm_printf(&p, "Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
+ drm_printf(&p, "Reset timer %d\n", intel_dsi->rst_timer_val);
+ drm_printf(&p, "Eot %s\n", str_enabled_disabled(intel_dsi->eotp_pkt));
+ drm_printf(&p, "Clockstop %s\n", str_enabled_disabled(!intel_dsi->clock_stop));
+ drm_printf(&p, "Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
- drm_dbg_kms(&i915->drm,
- "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ drm_printf(&p, "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
- drm_dbg_kms(&i915->drm,
- "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ drm_printf(&p, "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
else
- drm_dbg_kms(&i915->drm, "Dual link: NONE\n");
- drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format);
- drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div);
- drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n",
- intel_dsi->lp_rx_timeout);
- drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n",
- intel_dsi->turn_arnd_val);
- drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count);
- drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n",
- intel_dsi->hs_to_lp_count);
- drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
- drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
- drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n",
- intel_dsi->clk_lp_to_hs_count);
- drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n",
- intel_dsi->clk_hs_to_lp_count);
- drm_dbg_kms(&i915->drm, "BTA %s\n",
- str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
+ drm_printf(&p, "Dual link: NONE\n");
+ drm_printf(&p, "Pixel Format %d\n", intel_dsi->pixel_format);
+ drm_printf(&p, "TLPX %d\n", intel_dsi->escape_clk_div);
+ drm_printf(&p, "LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
+ drm_printf(&p, "Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
+ drm_printf(&p, "Init Count 0x%x\n", intel_dsi->init_count);
+ drm_printf(&p, "HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
+ drm_printf(&p, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+ drm_printf(&p, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+ drm_printf(&p, "LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
+ drm_printf(&p, "HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
+ drm_printf(&p, "BTA %s\n",
+ str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format)
@@ -764,8 +750,7 @@ static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format)
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps;
@@ -773,7 +758,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
u16 burst_mode_ratio;
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
@@ -819,7 +804,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
u32 bitrate;
if (mipi_config->target_burst_mode_freq == 0) {
- drm_err(&dev_priv->drm, "Burst mode target is not set\n");
+ drm_err(display->drm, "Burst mode target is not set\n");
return false;
}
@@ -836,7 +821,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
mipi_config->target_burst_mode_freq = bitrate;
if (mipi_config->target_burst_mode_freq < bitrate) {
- drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n");
+ drm_err(display->drm, "Burst mode freq is less than computed\n");
return false;
}
@@ -900,8 +885,7 @@ static const struct pinctrl_map soc_pwm_pinctrl_map[] = {
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
@@ -911,13 +895,13 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
struct pinctrl *pinctrl;
int ret;
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
mipi_config->pwm_blc == PPS_BLC_PMIC) {
gpiod_lookup_table = &pmic_panel_gpio_table;
want_panel_gpio = true;
}
- if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+ if (display->platform.valleyview && mipi_config->pwm_blc == PPS_BLC_SOC) {
gpiod_lookup_table = &soc_panel_gpio_table;
want_panel_gpio = true;
want_backlight_gpio = true;
@@ -926,12 +910,12 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
ARRAY_SIZE(soc_pwm_pinctrl_map));
if (ret)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to register pwm0 pinmux mapping\n");
- pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
+ pinctrl = devm_pinctrl_get_select(display->drm->dev, "soc_pwm0");
if (IS_ERR(pinctrl))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to set pinmux to PWM\n");
}
@@ -939,9 +923,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
gpiod_add_lookup_table(gpiod_lookup_table);
if (want_panel_gpio) {
- intel_dsi->gpio_panel = devm_gpiod_get(dev->dev, "panel", flags);
+ intel_dsi->gpio_panel = devm_gpiod_get(display->drm->dev, "panel", flags);
if (IS_ERR(intel_dsi->gpio_panel)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to own gpio for panel control\n");
intel_dsi->gpio_panel = NULL;
}
@@ -949,9 +933,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
if (want_backlight_gpio) {
intel_dsi->gpio_backlight =
- devm_gpiod_get(dev->dev, "backlight", flags);
+ devm_gpiod_get(display->drm->dev, "backlight", flags);
if (IS_ERR(intel_dsi->gpio_backlight)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to own gpio for backlight control\n");
intel_dsi->gpio_backlight = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index c16fb34b737d..b61520353c92 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -31,10 +31,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_driver.h"
@@ -129,13 +130,13 @@ static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
if (!(tmp & DVO_ENABLE))
return false;
@@ -146,11 +147,11 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
*pipe = REG_FIELD_GET(DVO_PIPE_SEL_MASK, tmp);
@@ -160,13 +161,13 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp, flags = 0;
pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -186,14 +187,14 @@ static void intel_disable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
- intel_de_rmw(i915, DVO(port), DVO_ENABLE, 0);
- intel_de_posting_read(i915, DVO(port));
+ intel_de_rmw(display, DVO(port), DVO_ENABLE, 0);
+ intel_de_posting_read(display, DVO(port));
}
static void intel_enable_dvo(struct intel_atomic_state *state,
@@ -201,7 +202,7 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
@@ -209,8 +210,8 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
&pipe_config->hw.mode,
&pipe_config->hw.adjusted_mode);
- intel_de_rmw(i915, DVO(port), 0, DVO_ENABLE);
- intel_de_posting_read(i915, DVO(port));
+ intel_de_rmw(display, DVO(port), 0, DVO_ENABLE);
+ intel_de_posting_read(display, DVO(port));
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
@@ -288,7 +289,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum port port = encoder->port;
@@ -296,7 +297,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
u32 dvo_val;
/* Save the active data order, since I don't know what it should be set to. */
- dvo_val = intel_de_read(i915, DVO(port)) &
+ dvo_val = intel_de_read(display, DVO(port)) &
(DVO_DEDICATED_INT_ENABLE |
DVO_PRESERVE_MASK | DVO_ACT_DATA_ORDER_MASK);
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
@@ -309,10 +310,10 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- intel_de_write(i915, DVO_SRCDIM(port),
+ intel_de_write(display, DVO_SRCDIM(port),
DVO_SRCDIM_HORIZONTAL(adjusted_mode->crtc_hdisplay) |
DVO_SRCDIM_VERTICAL(adjusted_mode->crtc_vdisplay));
- intel_de_write(i915, DVO(port), dvo_val);
+ intel_de_write(display, DVO(port), dvo_val);
}
static enum drm_connector_status
@@ -320,10 +321,9 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
{
struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);
if (!intel_display_device_enabled(display))
@@ -414,11 +414,10 @@ static int intel_dvo_connector_type(const struct intel_dvo_device *dvo)
}
}
-static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
+static bool intel_dvo_init_dev(struct intel_display *display,
struct intel_dvo *intel_dvo,
const struct intel_dvo_device *dvo)
{
- struct intel_display *display = &dev_priv->display;
struct i2c_adapter *i2c;
u32 dpll[I915_MAX_PIPES];
enum pipe pipe;
@@ -458,15 +457,15 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
* the clock enabled before we attempt to initialize
* the device.
*/
- for_each_pipe(dev_priv, pipe)
- dpll[pipe] = intel_de_rmw(dev_priv, DPLL(dev_priv, pipe), 0,
+ for_each_pipe(display, pipe)
+ dpll[pipe] = intel_de_rmw(display, DPLL(display, pipe), 0,
DPLL_DVO_2X_MODE);
ret = dvo->dev_ops->init(&intel_dvo->dev, i2c);
/* restore the DVO 2x clock state to original */
- for_each_pipe(dev_priv, pipe) {
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll[pipe]);
+ for_each_pipe(display, pipe) {
+ intel_de_write(display, DPLL(display, pipe), dpll[pipe]);
}
intel_gmbus_force_bit(i2c, false);
@@ -474,14 +473,14 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
return ret;
}
-static bool intel_dvo_probe(struct drm_i915_private *i915,
+static bool intel_dvo_probe(struct intel_display *display,
struct intel_dvo *intel_dvo)
{
int i;
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- if (intel_dvo_init_dev(i915, intel_dvo,
+ if (intel_dvo_init_dev(display, intel_dvo,
&intel_dvo_devices[i]))
return true;
}
@@ -489,9 +488,8 @@ static bool intel_dvo_probe(struct drm_i915_private *i915,
return false;
}
-void intel_dvo_init(struct drm_i915_private *i915)
+void intel_dvo_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_connector *connector;
struct intel_encoder *encoder;
struct intel_dvo *intel_dvo;
@@ -518,7 +516,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
encoder->pre_enable = intel_dvo_pre_enable;
connector->get_hw_state = intel_dvo_connector_get_hw_state;
- if (!intel_dvo_probe(i915, intel_dvo)) {
+ if (!intel_dvo_probe(display, intel_dvo)) {
kfree(intel_dvo);
intel_connector_free(connector);
return;
@@ -535,12 +533,12 @@ void intel_dvo_init(struct drm_i915_private *i915)
encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG) |
BIT(INTEL_OUTPUT_DVO);
- drm_encoder_init(&i915->drm, &encoder->base,
+ drm_encoder_init(display->drm, &encoder->base,
&intel_dvo_enc_funcs,
intel_dvo_encoder_type(&intel_dvo->dev),
"DVO %c", port_name(encoder->port));
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] detected %s\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] detected %s\n",
encoder->base.base.id, encoder->base.name,
intel_dvo->dev.name);
@@ -549,7 +547,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
DRM_CONNECTOR_POLL_DISCONNECT;
connector->base.polled = connector->polled;
- drm_connector_init_with_ddc(&i915->drm, &connector->base,
+ drm_connector_init_with_ddc(display->drm, &connector->base,
&intel_dvo_connector_funcs,
intel_dvo_connector_type(&intel_dvo->dev),
intel_gmbus_get_adapter(display, GMBUS_PIN_DPC));
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.h b/drivers/gpu/drm/i915/display/intel_dvo.h
index bf7a356422ab..83776552fc87 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo.h
@@ -6,12 +6,12 @@
#ifndef __INTEL_DVO_H__
#define __INTEL_DVO_H__
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-void intel_dvo_init(struct drm_i915_private *dev_priv);
+void intel_dvo_init(struct intel_display *display);
#else
-static inline void intel_dvo_init(struct drm_i915_private *dev_priv)
+static inline void intel_dvo_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 2b0e0f220442..05393bd60c98 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -560,11 +560,11 @@ static bool plane_has_modifier(struct intel_display *display,
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_BMG_CCS &&
- (GRAPHICS_VER(i915) < 20 || !IS_DGFX(i915)))
+ (GRAPHICS_VER(i915) < 20 || !display->platform.dgfx))
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_LNL_CCS &&
- (GRAPHICS_VER(i915) < 20 || IS_DGFX(i915)))
+ (GRAPHICS_VER(i915) < 20 || display->platform.dgfx))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 30ac9b089ad6..c648ab8a93d7 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -12,6 +12,7 @@
#include "i915_drv.h"
#include "intel_atomic_plane.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -117,7 +118,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_gem_object *_obj = intel_fb_bo(fb);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
unsigned int pinctl;
@@ -136,7 +137,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
* intel_runtime_pm_put(), so it is correct to wrap only the
* pin/unpin/fence and not more.
*/
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
atomic_inc(&display->restore.pending_fb_pin);
@@ -215,7 +216,7 @@ err:
vma = ERR_PTR(ret);
atomic_dec(&display->restore.pending_fb_pin);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return vma;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index b6978135e8ad..bed2bba20b55 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -55,6 +55,7 @@
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_device.h"
+#include "intel_display_rpm.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_display_wa.h"
@@ -251,9 +252,12 @@ static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_s
* Gen9 hw miscalculates cfb stride for linear as
* PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
* we always need to use the override there.
+ *
+ * wa_14022269668 For bmg, always program the FBC_STRIDE before fbc enable
*/
if (stride != stride_aligned ||
- (DISPLAY_VER(display) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
+ (DISPLAY_VER(display) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR) ||
+ display->platform.battlemage)
return stride_aligned * 4 / 64;
return 0;
@@ -519,6 +523,20 @@ static void ilk_fbc_activate(struct intel_fbc *fbc)
DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
}
+static void fbc_compressor_clkgate_disable_wa(struct intel_fbc *fbc,
+ bool disable)
+{
+ struct intel_display *display = fbc->display;
+
+ if (display->platform.dg2)
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_4, DG2_DPFC_GATING_DIS,
+ disable ? DG2_DPFC_GATING_DIS : 0);
+ else if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, MTL_PIPE_CLKGATE_DIS2(fbc->id),
+ MTL_DPFC_GATING_DIS,
+ disable ? MTL_DPFC_GATING_DIS : 0);
+}
+
static void ilk_fbc_deactivate(struct intel_fbc *fbc)
{
struct intel_display *display = fbc->display;
@@ -532,6 +550,10 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc)
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+
+ /* wa_18038517565 Enable DPFC clock gating after FBC disable */
+ if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
+ fbc_compressor_clkgate_disable_wa(fbc, false);
}
}
@@ -921,6 +943,10 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
+
+ /* wa_18038517565 Disable DPFC clock gating before FBC enable */
+ if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
+ fbc_compressor_clkgate_disable_wa(fbc, true);
}
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
@@ -1436,7 +1462,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (intel_display_needs_wa_16023588340(i915)) {
+ if (intel_display_needs_wa_16023588340(display)) {
plane_state->no_fbc_reason = "Wa_16023588340";
return 0;
}
@@ -1464,14 +1490,15 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Recommendation is to keep this combination disabled
* Bspec: 50422 HSD: 14010260002
*
- * In Xe3, PSR2 selective fetch and FBC dirty rect feature cannot
- * coexist. So if PSR2 selective fetch is supported then mark that
- * FBC is not supported.
- * TODO: Need a logic to decide between PSR2 and FBC Dirty rect
+ * TODO: Implement a logic to select between PSR2 selective fetch and
+ * FBC based on Bspec: 68881 in xe2lpd onwards.
+ *
+ * As we still see some strange underruns in those platforms while
+ * disabling PSR2, keep FBC disabled in case of selective update is on
+ * until the selection logic is implemented.
*/
- if ((IS_DISPLAY_VER(display, 12, 14) || HAS_FBC_DIRTY_RECT(display)) &&
- crtc_state->has_sel_update && !crtc_state->has_panel_replay) {
- plane_state->no_fbc_reason = "PSR2 enabled";
+ if (DISPLAY_VER(display) >= 12 && crtc_state->has_sel_update) {
+ plane_state->no_fbc_reason = "Selective update enabled";
return 0;
}
@@ -2120,13 +2147,12 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_fbc *fbc = m->private;
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_plane *plane;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
drm_modeset_lock_all(display->drm);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&fbc->lock);
if (fbc->active) {
@@ -2151,7 +2177,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
}
mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
drm_modeset_unlock_all(display->drm);
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index adc19d5607de..2dc4029d71ed 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -47,9 +47,10 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_bo.h"
+#include "intel_display_core.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
@@ -65,9 +66,9 @@ struct intel_fbdev {
static struct intel_fbdev *to_intel_fbdev(struct drm_fb_helper *fb_helper)
{
- struct drm_i915_private *i915 = to_i915(fb_helper->client.dev);
+ struct intel_display *display = to_intel_display(fb_helper->client.dev);
- return i915->display.fbdev.fbdev;
+ return display->fbdev.fbdev;
}
static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
@@ -209,11 +210,10 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_display *display = to_intel_display(helper->dev);
struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
struct intel_framebuffer *fb = ifbdev->fb;
- struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct fb_info *info;
struct i915_vma *vma;
unsigned long flags = 0;
@@ -226,7 +226,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
if (fb &&
(sizes->fb_width > fb->base.width ||
sizes->fb_height > fb->base.height)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BIOS fb too small (%dx%d), we require (%dx%d),"
" releasing it\n",
fb->base.width, fb->base.height,
@@ -234,20 +234,20 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
drm_framebuffer_put(&fb->base);
fb = NULL;
}
- if (!fb || drm_WARN_ON(dev, !intel_fb_bo(&fb->base))) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!fb || drm_WARN_ON(display->drm, !intel_fb_bo(&fb->base))) {
+ drm_dbg_kms(display->drm,
"no BIOS fb, allocating a new one\n");
fb = intel_fbdev_fb_alloc(helper, sizes);
if (IS_ERR(fb))
return PTR_ERR(fb);
} else {
- drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
+ drm_dbg_kms(display->drm, "re-using BIOS fb\n");
prealloc = true;
sizes->fb_width = fb->base.width;
sizes->fb_height = fb->base.height;
}
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
@@ -265,7 +265,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
info = drm_fb_helper_alloc_info(helper);
if (IS_ERR(info)) {
- drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info);
+ drm_err(display->drm, "Failed to allocate fb_info (%pe)\n", info);
ret = PTR_ERR(info);
goto out_unpin;
}
@@ -277,11 +277,11 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
obj = intel_fb_bo(&fb->base);
- ret = intel_fbdev_fb_fill_info(dev_priv, info, obj, vma);
+ ret = intel_fbdev_fb_fill_info(display, info, obj, vma);
if (ret)
goto out_unpin;
- drm_fb_helper_fill_info(info, dev->fb_helper, sizes);
+ drm_fb_helper_fill_info(info, display->drm->fb_helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
@@ -292,21 +292,22 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n",
+ drm_dbg_kms(display->drm, "allocated %dx%d fb: 0x%08x\n",
fb->base.width, fb->base.height,
i915_ggtt_offset(vma));
ifbdev->fb = fb;
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
out_unpin:
intel_fb_unpin_vma(vma, flags);
out_unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
+
return ret;
}
@@ -319,16 +320,15 @@ out_unlock:
* Note we only support a single fb shared across pipes for boot (mostly for
* fbcon), so we just find the biggest and use that.
*/
-static bool intel_fbdev_init_bios(struct drm_device *dev,
+static bool intel_fbdev_init_bios(struct intel_display *display,
struct intel_fbdev *ifbdev)
{
- struct drm_i915_private *i915 = to_i915(dev);
struct intel_framebuffer *fb = NULL;
struct intel_crtc *crtc;
unsigned int max_size = 0;
/* Find the largest fb */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
@@ -338,21 +338,21 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
struct drm_gem_object *obj = intel_fb_bo(plane_state->uapi.fb);
if (!crtc_state->uapi.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active, skipping\n",
crtc->base.base.id, crtc->base.name);
continue;
}
if (!obj) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] no fb, skipping\n",
plane->base.base.id, plane->base.name);
continue;
}
if (obj->size > max_size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"found possible fb from [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
fb = to_intel_framebuffer(plane_state->uapi.fb);
@@ -361,13 +361,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
}
if (!fb) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"no active fbs found, not using BIOS config\n");
goto out;
}
/* Now make sure all the pipes will fit into it */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
@@ -375,13 +375,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
unsigned int cur_size;
if (!crtc_state->uapi.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active, skipping\n",
crtc->base.base.id, crtc->base.name);
continue;
}
- drm_dbg_kms(&i915->drm, "checking [PLANE:%d:%s] for BIOS fb\n",
+ drm_dbg_kms(display->drm, "checking [PLANE:%d:%s] for BIOS fb\n",
plane->base.base.id, plane->base.name);
/*
@@ -392,7 +392,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc_state->uapi.adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"fb not wide enough for [PLANE:%d:%s] (%d vs %d)\n",
plane->base.base.id, plane->base.name,
cur_size, fb->base.pitches[0]);
@@ -403,7 +403,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
cur_size *= fb->base.pitches[0];
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] area: %dx%d, bpp: %d, size: %d\n",
crtc->base.base.id, crtc->base.name,
crtc_state->uapi.adjusted_mode.crtc_hdisplay,
@@ -412,7 +412,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size);
if (cur_size > max_size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"fb not big enough for [PLANE:%d:%s] (%d vs %d)\n",
plane->base.base.id, plane->base.name,
cur_size, max_size);
@@ -420,14 +420,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
break;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"fb big enough [PLANE:%d:%s] (%d >= %d)\n",
plane->base.base.id, plane->base.name,
max_size, cur_size);
}
if (!fb) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"BIOS fb not suitable for all pipes, not using\n");
goto out;
}
@@ -437,7 +437,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
drm_framebuffer_get(&ifbdev->fb->base);
/* Final pass to check if any active pipes don't have fbs */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
@@ -448,13 +448,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
if (!crtc_state->uapi.active)
continue;
- drm_WARN(dev, !plane_state->uapi.fb,
+ drm_WARN(display->drm, !plane_state->uapi.fb,
"re-used BIOS config but lost an fb on [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
}
- drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n");
+ drm_dbg_kms(display->drm, "using BIOS fb for initial console\n");
return true;
out:
@@ -479,26 +479,25 @@ static unsigned int intel_fbdev_color_mode(const struct drm_format_info *info)
}
}
-void intel_fbdev_setup(struct drm_i915_private *i915)
+void intel_fbdev_setup(struct intel_display *display)
{
- struct drm_device *dev = &i915->drm;
struct intel_fbdev *ifbdev;
unsigned int preferred_bpp = 0;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- ifbdev = drmm_kzalloc(dev, sizeof(*ifbdev), GFP_KERNEL);
+ ifbdev = drmm_kzalloc(display->drm, sizeof(*ifbdev), GFP_KERNEL);
if (!ifbdev)
return;
- i915->display.fbdev.fbdev = ifbdev;
- if (intel_fbdev_init_bios(dev, ifbdev))
+ display->fbdev.fbdev = ifbdev;
+ if (intel_fbdev_init_bios(display, ifbdev))
preferred_bpp = intel_fbdev_color_mode(ifbdev->fb->base.format);
if (!preferred_bpp)
preferred_bpp = 32;
- drm_client_setup_with_color_mode(dev, preferred_bpp);
+ drm_client_setup_with_color_mode(display->drm, preferred_bpp);
}
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 89bad3a2b01a..a15e3e222a0c 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -10,7 +10,7 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
-struct drm_i915_private;
+struct intel_display;
struct intel_fbdev;
struct intel_framebuffer;
@@ -19,14 +19,14 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
#define INTEL_FBDEV_DRIVER_OPS \
.fbdev_probe = intel_fbdev_driver_fbdev_probe
-void intel_fbdev_setup(struct drm_i915_private *dev_priv);
+void intel_fbdev_setup(struct intel_display *display);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev);
#else
#define INTEL_FBDEV_DRIVER_OPS \
.fbdev_probe = NULL
-static inline void intel_fbdev_setup(struct drm_i915_private *dev_priv)
+static inline void intel_fbdev_setup(struct intel_display *display)
{
}
static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index 4991c35a2632..5f4cb3328265 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -15,9 +15,9 @@
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_display *display = to_intel_display(helper->dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_framebuffer *fb;
- struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
int size;
@@ -50,14 +50,14 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
*
* Also skip stolen on MTL as Wa_22018444074 mitigation.
*/
- if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size)
+ if (!display->platform.meteorlake && size * 2 < dev_priv->dsm.usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
}
if (IS_ERR(obj)) {
- drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
+ drm_err(display->drm, "failed to allocate framebuffer (%pe)\n", obj);
return ERR_PTR(-ENOMEM);
}
@@ -67,9 +67,10 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
return to_intel_framebuffer(fb);
}
-int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
struct drm_gem_object *_obj, struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
struct i915_gem_ww_ctx ww;
void __iomem *vaddr;
@@ -101,7 +102,7 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
ret = PTR_ERR(vaddr);
continue;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
index e502ae375fc0..cb7957272715 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
@@ -9,13 +9,13 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
struct drm_gem_object;
-struct drm_i915_private;
struct fb_info;
struct i915_vma;
+struct intel_display;
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
-int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
struct drm_gem_object *obj, struct i915_vma *vma);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 40deee0769ae..169bbe154b5c 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -6,15 +6,16 @@
#include <linux/string_helpers.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
-#include "intel_dp.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
#include "intel_link_bw.h"
@@ -464,7 +465,6 @@ static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_st
void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -483,7 +483,7 @@ void intel_fdi_normal_train(struct intel_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
} else {
@@ -607,7 +607,6 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, retry;
@@ -647,7 +646,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
@@ -698,7 +697,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
} else {
@@ -1077,7 +1076,6 @@ void ilk_fdi_pll_disable(struct intel_crtc *crtc)
void ilk_fdi_disable(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -1096,7 +1094,7 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev_priv))
+ if (HAS_PCH_IBX(display))
intel_de_write(display, FDI_RX_CHICKEN(pipe),
FDI_RX_PHASE_SYNC_POINTER_OVR);
@@ -1106,7 +1104,7 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 7a8fbff39be0..2a787897b2d3 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -25,7 +25,8 @@
*
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_irq.h"
@@ -57,11 +58,10 @@
static bool ivb_can_enable_err_int(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
enum pipe pipe;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
for_each_pipe(display, pipe) {
crtc = intel_crtc_for_pipe(display, pipe);
@@ -75,11 +75,10 @@ static bool ivb_can_enable_err_int(struct intel_display *display)
static bool cpt_can_enable_serr_int(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
struct intel_crtc *crtc;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
for_each_pipe(display, pipe) {
crtc = intel_crtc_for_pipe(display, pipe);
@@ -94,11 +93,10 @@ static bool cpt_can_enable_serr_int(struct intel_display *display)
static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = PIPESTAT(display, crtc->pipe);
u32 enable_mask;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if ((intel_de_read(display, reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
return;
@@ -115,10 +113,9 @@ static void i9xx_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
i915_reg_t reg = PIPESTAT(display, pipe);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if (enable) {
u32 enable_mask = i915_pipestat_enable_mask(display, pipe);
@@ -136,24 +133,22 @@ static void i9xx_set_fifo_underrun_reporting(struct intel_display *display,
static void ilk_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bit = (pipe == PIPE_A) ?
DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
if (enable)
- ilk_enable_display_irq(dev_priv, bit);
+ ilk_enable_display_irq(display, bit);
else
- ilk_disable_display_irq(dev_priv, bit);
+ ilk_disable_display_irq(display, bit);
}
static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 err_int = intel_de_read(display, GEN7_ERR_INT);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
return;
@@ -169,7 +164,6 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable,
bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
if (enable) {
intel_de_write(display, GEN7_ERR_INT,
ERR_INT_FIFO_UNDERRUN(pipe));
@@ -177,9 +171,9 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
if (!ivb_can_enable_err_int(display))
return;
- ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ ilk_enable_display_irq(display, DE_ERR_INT_IVB);
} else {
- ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ ilk_disable_display_irq(display, DE_ERR_INT_IVB);
if (old &&
intel_de_read(display, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
@@ -193,36 +187,32 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
static void bdw_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (enable)
- bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN);
else
- bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN);
}
static void ibx_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bit = (pch_transcoder == PIPE_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
if (enable)
- ibx_enable_display_interrupt(dev_priv, bit);
+ ibx_enable_display_interrupt(display, bit);
else
- ibx_disable_display_interrupt(dev_priv, bit);
+ ibx_disable_display_interrupt(display, bit);
}
static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pch_transcoder = crtc->pipe;
u32 serr_int = intel_de_read(display, SERR_INT);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
return;
@@ -240,8 +230,6 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (enable) {
intel_de_write(display, SERR_INT,
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
@@ -249,9 +237,9 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
if (!cpt_can_enable_serr_int(display))
return;
- ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ ibx_enable_display_interrupt(display, SDE_ERROR_CPT);
} else {
- ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ ibx_disable_display_interrupt(display, SDE_ERROR_CPT);
if (old && intel_de_read(display, SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
@@ -265,11 +253,10 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
static bool __intel_set_cpu_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
bool old;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
old = !crtc->cpu_fifo_underrun_disabled;
crtc->cpu_fifo_underrun_disabled = !enable;
@@ -305,13 +292,12 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct intel_display *displa
bool intel_set_cpu_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
unsigned long flags;
bool ret;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock_irqsave(&display->irq.lock, flags);
ret = __intel_set_cpu_fifo_underrun_reporting(display, pipe, enable);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock_irqrestore(&display->irq.lock, flags);
return ret;
}
@@ -334,7 +320,6 @@ bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pch_transcoder);
unsigned long flags;
bool old;
@@ -348,12 +333,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
* crtc on LPT won't cause issues.
*/
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock_irqsave(&display->irq.lock, flags);
old = !crtc->pch_fifo_underrun_disabled;
crtc->pch_fifo_underrun_disabled = !enable;
- if (HAS_PCH_IBX(dev_priv))
+ if (HAS_PCH_IBX(display))
ibx_set_fifo_underrun_reporting(display,
pch_transcoder,
enable);
@@ -362,7 +347,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
pch_transcoder,
enable, old);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock_irqrestore(&display->irq.lock, flags);
return old;
}
@@ -429,10 +414,9 @@ void intel_pch_fifo_underrun_irq_handler(struct intel_display *display,
*/
void intel_check_cpu_fifo_underruns(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
for_each_intel_crtc(display->drm, crtc) {
if (crtc->cpu_fifo_underrun_disabled)
@@ -444,7 +428,7 @@ void intel_check_cpu_fifo_underruns(struct intel_display *display)
ivb_check_fifo_underruns(crtc);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
/**
@@ -457,28 +441,25 @@ void intel_check_cpu_fifo_underruns(struct intel_display *display)
*/
void intel_check_pch_fifo_underruns(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
for_each_intel_crtc(display->drm, crtc) {
if (crtc->pch_fifo_underrun_disabled)
continue;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
cpt_check_pch_fifo_underruns(crtc);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
void intel_init_fifo_underrun_reporting(struct intel_display *display,
struct intel_crtc *crtc,
bool enable)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
crtc->cpu_fifo_underrun_disabled = !enable;
/*
@@ -490,6 +471,6 @@ void intel_init_fifo_underrun_reporting(struct intel_display *display,
* PCH transcoders B and C would prevent enabling the south
* error interrupt (see cpt_can_enable_serr_int()).
*/
- if (intel_has_pch_trancoder(i915, crtc->pipe))
+ if (intel_has_pch_trancoder(display, crtc->pipe))
crtc->pch_fifo_underrun_disabled = !enable;
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index ba2f88ca6117..43be5377ddc1 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -58,7 +58,6 @@
#include <drm/drm_gem.h>
#include "i915_active.h"
-#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_bo.h"
#include "intel_display_trace.h"
@@ -72,7 +71,7 @@
/**
* frontbuffer_flush - flush frontbuffer
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
@@ -82,16 +81,14 @@
*
* Can be called without any locks held.
*/
-static void frontbuffer_flush(struct drm_i915_private *i915,
+static void frontbuffer_flush(struct intel_display *display,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
- struct intel_display *display = &i915->display;
-
/* Delay flushing when rings are still busy.*/
- spin_lock(&i915->display.fb_tracking.lock);
- frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
+ frontbuffer_bits &= ~display->fb_tracking.busy_bits;
+ spin_unlock(&display->fb_tracking.lock);
if (!frontbuffer_bits)
return;
@@ -107,7 +104,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
/**
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. The actual
@@ -117,19 +114,19 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_prepare(struct intel_display *display,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->display.fb_tracking.lock);
- i915->display.fb_tracking.flip_bits |= frontbuffer_bits;
+ spin_lock(&display->fb_tracking.lock);
+ display->fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
- i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ display->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
}
/**
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after the flip has been latched and will complete
@@ -137,22 +134,22 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_complete(struct intel_display *display,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
/* Mask any cancelled flips. */
- frontbuffer_bits &= i915->display.fb_tracking.flip_bits;
- i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ frontbuffer_bits &= display->fb_tracking.flip_bits;
+ display->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
if (frontbuffer_bits)
- frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+ frontbuffer_flush(display, frontbuffer_bits, ORIGIN_FLIP);
}
/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. This is for
@@ -161,15 +158,15 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip(struct drm_i915_private *i915,
+void intel_frontbuffer_flip(struct intel_display *display,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
- i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ display->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
- frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+ frontbuffer_flush(display, frontbuffer_bits, ORIGIN_FLIP);
}
void __intel_fb_invalidate(struct intel_frontbuffer *front,
@@ -198,7 +195,6 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
unsigned int frontbuffer_bits)
{
struct intel_display *display = to_intel_display(front->obj->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (origin == ORIGIN_CS) {
spin_lock(&display->fb_tracking.lock);
@@ -209,7 +205,7 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
}
if (frontbuffer_bits)
- frontbuffer_flush(i915, frontbuffer_bits, origin);
+ frontbuffer_flush(display, frontbuffer_bits, origin);
}
static void intel_frontbuffer_flush_work(struct work_struct *work)
@@ -280,7 +276,7 @@ static void frontbuffer_release(struct kref *ref)
struct intel_frontbuffer *
intel_frontbuffer_get(struct drm_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->dev);
+ struct intel_display *display = to_intel_display(obj->dev);
struct intel_frontbuffer *front, *cur;
front = intel_bo_get_frontbuffer(obj);
@@ -300,9 +296,9 @@ intel_frontbuffer_get(struct drm_gem_object *obj)
I915_ACTIVE_RETIRE_SLEEPS);
INIT_WORK(&front->flush_work, intel_frontbuffer_flush_work);
- spin_lock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
cur = intel_bo_set_frontbuffer(obj, front);
- spin_unlock(&i915->display.fb_tracking.lock);
+ spin_unlock(&display->fb_tracking.lock);
if (cur != front)
kfree(front);
return cur;
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 6237780a9f68..2fee12eaf9b6 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -31,7 +31,7 @@
#include "i915_active_types.h"
struct drm_gem_object;
-struct drm_i915_private;
+struct intel_display;
enum fb_op_origin {
ORIGIN_CPU = 0,
@@ -68,11 +68,11 @@ struct intel_frontbuffer {
GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_prepare(struct intel_display *display,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_complete(struct intel_display *display,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_i915_private *i915,
+void intel_frontbuffer_flip(struct intel_display *display,
unsigned frontbuffer_bits);
void intel_frontbuffer_put(struct intel_frontbuffer *front);
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index 8a49e2bb37fa..000a898c9480 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -3,10 +3,13 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/pci.h>
#include <linux/string.h>
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "intel_atomic.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_global_state.h"
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index abf457e68ee9..d55cc77650b7 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -152,32 +152,31 @@ static const struct gmbus_pin gmbus_pins_mtp[] = {
static const struct gmbus_pin *get_gmbus_pin(struct intel_display *display,
unsigned int pin)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct gmbus_pin *pins;
size_t size;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL) {
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL) {
pins = gmbus_pins_mtp;
size = ARRAY_SIZE(gmbus_pins_mtp);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG2) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_DG2) {
pins = gmbus_pins_dg2;
size = ARRAY_SIZE(gmbus_pins_dg2);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_DG1) {
pins = gmbus_pins_dg1;
size = ARRAY_SIZE(gmbus_pins_dg1);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_ICP) {
pins = gmbus_pins_icp;
size = ARRAY_SIZE(gmbus_pins_icp);
- } else if (HAS_PCH_CNP(i915)) {
+ } else if (HAS_PCH_CNP(display)) {
pins = gmbus_pins_cnp;
size = ARRAY_SIZE(gmbus_pins_cnp);
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
pins = gmbus_pins_bxt;
size = ARRAY_SIZE(gmbus_pins_bxt);
} else if (DISPLAY_VER(display) == 9) {
pins = gmbus_pins_skl;
size = ARRAY_SIZE(gmbus_pins_skl);
- } else if (IS_BROADWELL(i915)) {
+ } else if (display->platform.broadwell) {
pins = gmbus_pins_bdw;
size = ARRAY_SIZE(gmbus_pins_bdw);
} else {
@@ -240,11 +239,10 @@ static void bxt_gmbus_clock_gating(struct intel_display *display,
static u32 get_reserved(struct intel_gmbus *bus)
{
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
- if (!IS_I830(i915) && !IS_I845G(i915))
+ if (!display->platform.i830 && !display->platform.i845g)
reserved = intel_de_read_notrace(display, bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE);
@@ -314,11 +312,10 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_gmbus_reset(display);
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
pnv_gmbus_clock_gating(display, false);
set_data(bus, 1);
@@ -332,12 +329,11 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
set_data(bus, 1);
set_clock(bus, 1);
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
pnv_gmbus_clock_gating(display, true);
}
@@ -630,14 +626,13 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
int i = 0, inc, try = 0;
int ret = 0;
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ if (display->platform.geminilake || display->platform.broxton)
bxt_gmbus_clock_gating(display, false);
- else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ else if (HAS_PCH_SPT(display) || HAS_PCH_CNP(display))
pch_gmbus_clock_gating(display, false);
retry:
@@ -748,9 +743,9 @@ timeout:
out:
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ if (display->platform.geminilake || display->platform.broxton)
bxt_gmbus_clock_gating(display, true);
- else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ else if (HAS_PCH_SPT(display) || HAS_PCH_CNP(display))
pch_gmbus_clock_gating(display, true);
return ret;
@@ -873,12 +868,11 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
*/
int intel_gmbus_setup(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
unsigned int pin;
int ret;
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ if (display->platform.valleyview || display->platform.cherryview)
display->gmbus.mmio_base = VLV_DISPLAY_BASE;
else if (!HAS_GMCH(display))
/*
@@ -925,7 +919,7 @@ int intel_gmbus_setup(struct intel_display *display)
bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
- if (IS_I830(i915))
+ if (display->platform.i830)
bus->force_bit = 1;
intel_gpio_setup(bus, GPIO(display, gmbus_pin->gpio));
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 1bf424a822f3..3e3038f4ee1f 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -22,13 +22,18 @@
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_dp_mst.h"
#include "intel_hdcp.h"
#include "intel_hdcp_gsc.h"
+#include "intel_hdcp_gsc_message.h"
#include "intel_hdcp_regs.h"
#include "intel_hdcp_shim.h"
#include "intel_pcode.h"
+#define USE_HDCP_GSC(__display) (DISPLAY_VER(__display) >= 14)
+
#define KEY_LOAD_TRIES 5
#define HDCP2_LC_RETRY_CNT 3
@@ -136,7 +141,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state,
data->k++;
/* if there is only one active stream */
- if (dig_port->dp.mst.active_links <= 1)
+ if (intel_dp_mst_active_streams(&dig_port->dp) <= 1)
break;
}
drm_connector_list_iter_end(&conn_iter);
@@ -248,8 +253,8 @@ static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
return false;
/* If MTL+ make sure gsc is loaded and proxy is setup */
- if (intel_hdcp_gsc_cs_required(display)) {
- if (!intel_hdcp_gsc_check_status(display))
+ if (USE_HDCP_GSC(display)) {
+ if (!intel_hdcp_gsc_check_status(display->drm))
return false;
}
@@ -334,9 +339,7 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
static bool hdcp_key_loadable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum i915_power_well_id id;
- intel_wakeref_t wakeref;
bool enabled = false;
/*
@@ -349,7 +352,7 @@ static bool hdcp_key_loadable(struct intel_display *display)
id = SKL_DISP_PW_1;
/* PG1 (power well #1) needs to be enabled */
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_display_rpm(display)
enabled = intel_display_power_well_is_enabled(display, id);
/*
@@ -2339,7 +2342,7 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
static bool is_hdcp2_supported(struct intel_display *display)
{
- if (intel_hdcp_gsc_cs_required(display))
+ if (USE_HDCP_GSC(display))
return true;
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
@@ -2363,7 +2366,7 @@ void intel_hdcp_component_init(struct intel_display *display)
display->hdcp.comp_added = true;
mutex_unlock(&display->hdcp.hdcp_mutex);
- if (intel_hdcp_gsc_cs_required(display))
+ if (USE_HDCP_GSC(display))
ret = intel_hdcp_gsc_init(display);
else
ret = component_add_typed(display->drm->dev, &i915_hdcp_ops,
@@ -2638,7 +2641,7 @@ void intel_hdcp_component_fini(struct intel_display *display)
display->hdcp.comp_added = false;
mutex_unlock(&display->hdcp.hdcp_mutex);
- if (intel_hdcp_gsc_cs_required(display))
+ if (USE_HDCP_GSC(display))
intel_hdcp_gsc_fini(display);
else
component_del(display->drm->dev, &i915_hdcp_ops);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index 55965844d829..6a22862d6be1 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -11,27 +11,22 @@
#include "i915_drv.h"
#include "i915_utils.h"
#include "intel_hdcp_gsc.h"
-#include "intel_hdcp_gsc_message.h"
-struct intel_hdcp_gsc_message {
+struct intel_hdcp_gsc_context {
+ struct drm_i915_private *i915;
struct i915_vma *vma;
void *hdcp_cmd_in;
void *hdcp_cmd_out;
};
-bool intel_hdcp_gsc_cs_required(struct intel_display *display)
+bool intel_hdcp_gsc_check_status(struct drm_device *drm)
{
- return DISPLAY_VER(display) >= 14;
-}
-
-bool intel_hdcp_gsc_check_status(struct intel_display *display)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_i915_private *i915 = to_i915(drm);
struct intel_gt *gt = i915->media_gt;
struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) {
- drm_dbg_kms(display->drm,
+ drm_dbg_kms(&i915->drm,
"GSC components required for HDCP2.2 are not ready\n");
return false;
}
@@ -41,7 +36,7 @@ bool intel_hdcp_gsc_check_status(struct intel_display *display)
/*This function helps allocate memory for the command that we will send to gsc cs */
static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
- struct intel_hdcp_gsc_message *hdcp_message)
+ struct intel_hdcp_gsc_context *gsc_context)
{
struct intel_gt *gt = i915->media_gt;
struct drm_i915_gem_object *obj = NULL;
@@ -78,9 +73,10 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
memset(cmd_in, 0, obj->base.size);
- hdcp_message->hdcp_cmd_in = cmd_in;
- hdcp_message->hdcp_cmd_out = cmd_out;
- hdcp_message->vma = vma;
+ gsc_context->hdcp_cmd_in = cmd_in;
+ gsc_context->hdcp_cmd_out = cmd_out;
+ gsc_context->vma = vma;
+ gsc_context->i915 = i915;
return 0;
@@ -91,80 +87,37 @@ out_unpin:
return err;
}
-static const struct i915_hdcp_ops gsc_hdcp_ops = {
- .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
- .verify_receiver_cert_prepare_km =
- intel_hdcp_gsc_verify_receiver_cert_prepare_km,
- .verify_hprime = intel_hdcp_gsc_verify_hprime,
- .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
- .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
- .verify_lprime = intel_hdcp_gsc_verify_lprime,
- .get_session_key = intel_hdcp_gsc_get_session_key,
- .repeater_check_flow_prepare_ack =
- intel_hdcp_gsc_repeater_check_flow_prepare_ack,
- .verify_mprime = intel_hdcp_gsc_verify_mprime,
- .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
- .close_hdcp_session = intel_hdcp_gsc_close_session,
-};
-
-static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display)
+struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct intel_hdcp_gsc_context *gsc_context;
int ret;
- hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
-
- if (!hdcp_message)
- return -ENOMEM;
+ gsc_context = kzalloc(sizeof(*gsc_context), GFP_KERNEL);
+ if (!gsc_context)
+ return ERR_PTR(-ENOMEM);
/*
* NOTE: No need to lock the comp mutex here as it is already
* going to be taken before this function called
*/
- display->hdcp.hdcp_message = hdcp_message;
- ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message);
-
- if (ret)
- drm_err(display->drm, "Could not initialize hdcp_message\n");
-
- return ret;
-}
-
-static void intel_hdcp_gsc_free_message(struct intel_display *display)
-{
- struct intel_hdcp_gsc_message *hdcp_message =
- display->hdcp.hdcp_message;
+ ret = intel_hdcp_gsc_initialize_message(i915, gsc_context);
+ if (ret) {
+ drm_err(&i915->drm, "Could not initialize gsc_context\n");
+ kfree(gsc_context);
+ gsc_context = ERR_PTR(ret);
+ }
- hdcp_message->hdcp_cmd_in = NULL;
- hdcp_message->hdcp_cmd_out = NULL;
- i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP);
- kfree(hdcp_message);
+ return gsc_context;
}
-int intel_hdcp_gsc_init(struct intel_display *display)
+void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context)
{
- struct i915_hdcp_arbiter *data;
- int ret;
-
- data = kzalloc(sizeof(struct i915_hdcp_arbiter), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ if (!gsc_context)
+ return;
- mutex_lock(&display->hdcp.hdcp_mutex);
- display->hdcp.arbiter = data;
- display->hdcp.arbiter->hdcp_dev = display->drm->dev;
- display->hdcp.arbiter->ops = &gsc_hdcp_ops;
- ret = intel_hdcp_gsc_hdcp2_init(display);
- mutex_unlock(&display->hdcp.hdcp_mutex);
-
- return ret;
-}
-
-void intel_hdcp_gsc_fini(struct intel_display *display)
-{
- intel_hdcp_gsc_free_message(display);
- kfree(display->hdcp.arbiter);
+ i915_vma_unpin_and_release(&gsc_context->vma, I915_VMA_RELEASE_MAP);
+ kfree(gsc_context);
}
static int intel_gsc_send_sync(struct drm_i915_private *i915,
@@ -211,18 +164,18 @@ static int intel_gsc_send_sync(struct drm_i915_private *i915,
/*
* This function can now be used for sending requests and will also handle
* receipt of reply messages hence no different function of message retrieval
- * is required. We will initialize intel_hdcp_gsc_message structure then add
+ * is required. We will initialize intel_hdcp_gsc_context structure then add
* gsc cs memory header as stated in specs after which the normal HDCP payload
* will follow
*/
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len)
+ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len)
{
+ struct drm_i915_private *i915 = gsc_context->i915;
struct intel_gt *gt = i915->media_gt;
struct intel_gsc_mtl_header *header_in, *header_out;
const size_t max_msg_size = PAGE_SIZE - sizeof(*header_in);
- struct intel_hdcp_gsc_message *hdcp_message;
u64 addr_in, addr_out, host_session_id;
u32 reply_size, msg_size_in, msg_size_out;
int ret, tries = 0;
@@ -235,10 +188,9 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
msg_size_in = msg_in_len + sizeof(*header_in);
msg_size_out = msg_out_len + sizeof(*header_out);
- hdcp_message = i915->display.hdcp.hdcp_message;
- header_in = hdcp_message->hdcp_cmd_in;
- header_out = hdcp_message->hdcp_cmd_out;
- addr_in = i915_ggtt_offset(hdcp_message->vma);
+ header_in = gsc_context->hdcp_cmd_in;
+ header_out = gsc_context->hdcp_cmd_out;
+ addr_in = i915_ggtt_offset(gsc_context->vma);
addr_out = addr_in + PAGE_SIZE;
memset(header_in, 0, msg_size_in);
@@ -246,7 +198,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
get_random_bytes(&host_session_id, sizeof(u64));
intel_gsc_uc_heci_cmd_emit_mtl_header(header_in, HECI_MEADDRESS_HDCP,
msg_size_in, host_session_id);
- memcpy(hdcp_message->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len);
+ memcpy(gsc_context->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len);
/*
* Keep sending request in case the pending bit is set no need to add
@@ -280,7 +232,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
reply_size, (u32)msg_out_len);
}
- memcpy(msg_out, hdcp_message->hdcp_cmd_out + sizeof(*header_out), msg_out_len);
+ memcpy(msg_out, gsc_context->hdcp_cmd_out + sizeof(*header_out), msg_out_len);
err:
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
index 5695a5e4f609..9305c14aaffe 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
@@ -6,19 +6,17 @@
#ifndef __INTEL_HDCP_GSC_H__
#define __INTEL_HDCP_GSC_H__
-#include <linux/err.h>
#include <linux/types.h>
-struct drm_i915_private;
-struct intel_display;
-struct intel_hdcp_gsc_message;
+struct drm_device;
+struct intel_hdcp_gsc_context;
-bool intel_hdcp_gsc_cs_required(struct intel_display *display);
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len);
-int intel_hdcp_gsc_init(struct intel_display *display);
-void intel_hdcp_gsc_fini(struct intel_display *display);
-bool intel_hdcp_gsc_check_status(struct intel_display *display);
+ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len);
+bool intel_hdcp_gsc_check_status(struct drm_device *drm);
+
+struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm);
+void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context);
#endif /* __INTEL_HDCP_GCS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
index 129104fa9b16..98967bb148e3 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
@@ -4,20 +4,23 @@
*/
#include <linux/err.h>
+
+#include <drm/drm_print.h>
#include <drm/intel/i915_hdcp_interface.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_hdcp_gsc.h"
#include "intel_hdcp_gsc_message.h"
-int
+static int
intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_init *ake_data)
{
struct wired_cmd_initiate_hdcp2_session_in session_init_in = {};
struct wired_cmd_initiate_hdcp2_session_out session_init_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ake_data)
@@ -28,7 +31,7 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
session_init_in.header.api_version = HDCP_API_VERSION;
session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
@@ -41,9 +44,9 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
session_init_in.protocol = data->protocol;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_init_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &session_init_in,
sizeof(session_init_in),
- (u8 *)&session_init_out,
+ &session_init_out,
sizeof(session_init_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -64,7 +67,7 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
return 0;
}
-int
+static int
intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_send_cert *rx_cert,
@@ -75,8 +78,8 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
{
struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = {};
struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
@@ -87,7 +90,7 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
verify_rxcert_in.header.api_version = HDCP_API_VERSION;
verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
@@ -103,9 +106,9 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_rxcert_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_rxcert_in,
sizeof(verify_rxcert_in),
- (u8 *)&verify_rxcert_out,
+ &verify_rxcert_out,
sizeof(verify_rxcert_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte);
@@ -134,14 +137,14 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_send_hprime *rx_hprime)
{
struct wired_cmd_ake_send_hprime_in send_hprime_in = {};
struct wired_cmd_ake_send_hprime_out send_hprime_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_hprime)
@@ -152,7 +155,7 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
send_hprime_in.header.api_version = HDCP_API_VERSION;
send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
@@ -166,9 +169,9 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
HDCP_2_2_H_PRIME_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&send_hprime_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &send_hprime_in,
sizeof(send_hprime_in),
- (u8 *)&send_hprime_out,
+ &send_hprime_out,
sizeof(send_hprime_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -184,14 +187,14 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
return 0;
}
-int
+static int
intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_send_pairing_info *pairing_info)
{
struct wired_cmd_ake_send_pairing_info_in pairing_info_in = {};
struct wired_cmd_ake_send_pairing_info_out pairing_info_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !pairing_info)
@@ -202,7 +205,7 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
pairing_info_in.header.api_version = HDCP_API_VERSION;
pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
@@ -217,9 +220,9 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
HDCP_2_2_E_KH_KM_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&pairing_info_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &pairing_info_in,
sizeof(pairing_info_in),
- (u8 *)&pairing_info_out,
+ &pairing_info_out,
sizeof(pairing_info_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -236,15 +239,15 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
return 0;
}
-int
+static int
intel_hdcp_gsc_initiate_locality_check(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_lc_init *lc_init_data)
{
struct wired_cmd_init_locality_check_in lc_init_in = {};
struct wired_cmd_init_locality_check_out lc_init_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !lc_init_data)
@@ -255,7 +258,7 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
lc_init_in.header.api_version = HDCP_API_VERSION;
lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
@@ -266,8 +269,8 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
lc_init_in.port.physical_port = (u8)data->hdcp_ddi;
lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in),
- (u8 *)&lc_init_out, sizeof(lc_init_out));
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &lc_init_in, sizeof(lc_init_in),
+ &lc_init_out, sizeof(lc_init_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@@ -285,14 +288,14 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_lc_send_lprime *rx_lprime)
{
struct wired_cmd_validate_locality_in verify_lprime_in = {};
struct wired_cmd_validate_locality_out verify_lprime_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_lprime)
@@ -303,7 +306,7 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
verify_lprime_in.header.api_version = HDCP_API_VERSION;
verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
@@ -318,9 +321,9 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
HDCP_2_2_L_PRIME_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_lprime_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_lprime_in,
sizeof(verify_lprime_in),
- (u8 *)&verify_lprime_out,
+ &verify_lprime_out,
sizeof(verify_lprime_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -337,14 +340,15 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
return 0;
}
-int intel_hdcp_gsc_get_session_key(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ske_send_eks *ske_data)
+static int
+intel_hdcp_gsc_get_session_key(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ske_send_eks *ske_data)
{
struct wired_cmd_get_session_key_in get_skey_in = {};
struct wired_cmd_get_session_key_out get_skey_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ske_data)
@@ -355,7 +359,7 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
get_skey_in.header.api_version = HDCP_API_VERSION;
get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
@@ -366,8 +370,8 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
get_skey_in.port.physical_port = (u8)data->hdcp_ddi;
get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in),
- (u8 *)&get_skey_out, sizeof(get_skey_out));
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &get_skey_in, sizeof(get_skey_in),
+ &get_skey_out, sizeof(get_skey_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@@ -387,7 +391,7 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_rep_send_receiverid_list
@@ -397,8 +401,8 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
{
struct wired_cmd_verify_repeater_in verify_repeater_in = {};
struct wired_cmd_verify_repeater_out verify_repeater_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !rep_topology || !rep_send_ack || !data)
@@ -409,7 +413,7 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
verify_repeater_in.header.api_version = HDCP_API_VERSION;
verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
@@ -430,9 +434,9 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids,
HDCP_2_2_RECEIVER_IDS_MAX_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_repeater_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_repeater_in,
sizeof(verify_repeater_in),
- (u8 *)&verify_repeater_out,
+ &verify_repeater_out,
sizeof(verify_repeater_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -453,14 +457,15 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
return 0;
}
-int intel_hdcp_gsc_verify_mprime(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_stream_ready *stream_ready)
+static int
+intel_hdcp_gsc_verify_mprime(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_stream_ready *stream_ready)
{
struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
struct wired_cmd_repeater_auth_stream_req_out verify_mprime_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
size_t cmd_size;
@@ -472,7 +477,7 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
cmd_size = struct_size(verify_mprime_in, streams, data->k);
if (cmd_size == SIZE_MAX)
@@ -499,8 +504,8 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
verify_mprime_in->k = cpu_to_be16(data->k);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)verify_mprime_in, cmd_size,
- (u8 *)&verify_mprime_out,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, verify_mprime_in, cmd_size,
+ &verify_mprime_out,
sizeof(verify_mprime_out));
kfree(verify_mprime_in);
if (byte < 0) {
@@ -518,13 +523,13 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
return 0;
}
-int intel_hdcp_gsc_enable_authentication(struct device *dev,
- struct hdcp_port_data *data)
+static int intel_hdcp_gsc_enable_authentication(struct device *dev,
+ struct hdcp_port_data *data)
{
struct wired_cmd_enable_auth_in enable_auth_in = {};
struct wired_cmd_enable_auth_out enable_auth_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
@@ -535,7 +540,7 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
enable_auth_in.header.api_version = HDCP_API_VERSION;
enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
@@ -547,9 +552,9 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
enable_auth_in.stream_type = data->streams[0].stream_type;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&enable_auth_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &enable_auth_in,
sizeof(enable_auth_in),
- (u8 *)&enable_auth_out,
+ &enable_auth_out,
sizeof(enable_auth_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -565,13 +570,13 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
{
struct wired_cmd_close_session_in session_close_in = {};
struct wired_cmd_close_session_out session_close_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
@@ -582,7 +587,7 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
session_close_in.header.api_version = HDCP_API_VERSION;
session_close_in.header.command_id = WIRED_CLOSE_SESSION;
@@ -594,9 +599,9 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
session_close_in.port.physical_port = (u8)data->hdcp_ddi;
session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_close_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &session_close_in,
sizeof(session_close_in),
- (u8 *)&session_close_out,
+ &session_close_out,
sizeof(session_close_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -611,3 +616,57 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
return 0;
}
+
+static const struct i915_hdcp_ops gsc_hdcp_ops = {
+ .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
+ .verify_receiver_cert_prepare_km =
+ intel_hdcp_gsc_verify_receiver_cert_prepare_km,
+ .verify_hprime = intel_hdcp_gsc_verify_hprime,
+ .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
+ .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
+ .verify_lprime = intel_hdcp_gsc_verify_lprime,
+ .get_session_key = intel_hdcp_gsc_get_session_key,
+ .repeater_check_flow_prepare_ack =
+ intel_hdcp_gsc_repeater_check_flow_prepare_ack,
+ .verify_mprime = intel_hdcp_gsc_verify_mprime,
+ .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
+ .close_hdcp_session = intel_hdcp_gsc_close_session,
+};
+
+int intel_hdcp_gsc_init(struct intel_display *display)
+{
+ struct intel_hdcp_gsc_context *gsc_context;
+ struct i915_hdcp_arbiter *arbiter;
+ int ret = 0;
+
+ arbiter = kzalloc(sizeof(*arbiter), GFP_KERNEL);
+ if (!arbiter)
+ return -ENOMEM;
+
+ mutex_lock(&display->hdcp.hdcp_mutex);
+
+ gsc_context = intel_hdcp_gsc_context_alloc(display->drm);
+ if (IS_ERR(gsc_context)) {
+ ret = PTR_ERR(gsc_context);
+ kfree(arbiter);
+ goto out;
+ }
+
+ display->hdcp.arbiter = arbiter;
+ display->hdcp.arbiter->hdcp_dev = display->drm->dev;
+ display->hdcp.arbiter->ops = &gsc_hdcp_ops;
+ display->hdcp.gsc_context = gsc_context;
+
+out:
+ mutex_unlock(&display->hdcp.hdcp_mutex);
+
+ return ret;
+}
+
+void intel_hdcp_gsc_fini(struct intel_display *display)
+{
+ intel_hdcp_gsc_context_free(display->hdcp.gsc_context);
+ display->hdcp.gsc_context = NULL;
+ kfree(display->hdcp.arbiter);
+ display->hdcp.arbiter = NULL;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
index 2d597f27e931..9f54157a4a3e 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
@@ -6,68 +6,9 @@
#ifndef __INTEL_HDCP_GSC_MESSAGE_H__
#define __INTEL_HDCP_GSC_MESSAGE_H__
-#include <linux/types.h>
-
-struct device;
-struct drm_i915_private;
-struct hdcp_port_data;
-struct hdcp2_ake_init;
-struct hdcp2_ake_send_cert;
-struct hdcp2_ake_no_stored_km;
-struct hdcp2_ake_send_hprime;
-struct hdcp2_ake_send_pairing_info;
-struct hdcp2_lc_init;
-struct hdcp2_lc_send_lprime;
-struct hdcp2_ske_send_eks;
-struct hdcp2_rep_send_receiverid_list;
-struct hdcp2_rep_send_ack;
-struct hdcp2_rep_stream_ready;
struct intel_display;
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len);
-bool intel_hdcp_gsc_check_status(struct intel_display *display);
-int
-intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_ake_init *ake_data);
-int
-intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ake_send_cert *rx_cert,
- bool *km_stored,
- struct hdcp2_ake_no_stored_km
- *ek_pub_km,
- size_t *msg_sz);
-int
-intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_ake_send_hprime *rx_hprime);
-int
-intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_ake_send_pairing_info *pairing_info);
-int
-intel_hdcp_gsc_initiate_locality_check(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_lc_init *lc_init_data);
-int
-intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_lc_send_lprime *rx_lprime);
-int intel_hdcp_gsc_get_session_key(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ske_send_eks *ske_data);
-int
-intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_send_receiverid_list
- *rep_topology,
- struct hdcp2_rep_send_ack
- *rep_send_ack);
-int intel_hdcp_gsc_verify_mprime(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_stream_ready *stream_ready);
-int intel_hdcp_gsc_enable_authentication(struct device *dev,
- struct hdcp_port_data *data);
-int
-intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data);
+int intel_hdcp_gsc_init(struct intel_display *display);
+void intel_hdcp_gsc_fini(struct intel_display *display);
#endif /* __INTEL_HDCP_GSC_MESSAGE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 33b8d5229db0..98033471902c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -38,14 +38,15 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/intel/intel_lpe_audio.h>
#include <media/cec-notifier.h>
#include "g4x_hdmi.h"
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -64,6 +65,7 @@
#include "intel_panel.h"
#include "intel_pfit.h"
#include "intel_snps_phy.h"
+#include "intel_vrr.h"
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
@@ -714,7 +716,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- struct drm_connector *connector = conn_state->connector;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
int ret;
if (!crtc_state->has_infoframe)
@@ -723,7 +725,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
- ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector,
+ ret = drm_hdmi_avi_infoframe_from_display_mode(frame, &connector->base,
adjusted_mode);
if (ret)
return false;
@@ -742,7 +744,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
- drm_hdmi_avi_infoframe_quant_range(frame, connector,
+ drm_hdmi_avi_infoframe_quant_range(frame, &connector->base,
adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
@@ -768,7 +770,7 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd;
int ret;
@@ -778,7 +780,7 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
- if (IS_DGFX(i915))
+ if (display->platform.dgfx)
ret = hdmi_spd_infoframe_init(frame, "Intel", "Discrete gfx");
else
ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
@@ -978,7 +980,6 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -988,9 +989,9 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
if (HAS_DDI(display))
reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.valleyview || display->platform.cherryview)
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
reg = TVIDEO_DIP_GCP(crtc->pipe);
else
return false;
@@ -1004,7 +1005,6 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -1014,9 +1014,9 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
if (HAS_DDI(display))
reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.valleyview || display->platform.cherryview)
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
reg = TVIDEO_DIP_GCP(crtc->pipe);
else
return;
@@ -1028,9 +1028,9 @@ static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_G4X(dev_priv) || !crtc_state->has_infoframe)
+ if (display->platform.g4x || !crtc_state->has_infoframe)
return;
crtc_state->infoframes.enable |=
@@ -1538,7 +1538,6 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
struct intel_display *display = to_intel_display(dig_port);
struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_connector *connector = hdmi->attached_connector;
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int ret;
if (!enable)
@@ -1557,7 +1556,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
* WA: To fix incorrect positioning of the window of
* opportunity and enc_en signalling in KABYLAKE.
*/
- if (IS_KABYLAKE(dev_priv) && enable)
+ if (display->platform.kabylake && enable)
return kbl_repositioning_enc_en_signal(connector,
cpu_transcoder);
@@ -1569,7 +1568,6 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
int ret;
@@ -1582,15 +1580,15 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
if (ret)
return false;
- intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg);
+ intel_de_write(display, HDCP_RPRIME(display, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
+ if (wait_for((intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n",
- intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
- port)));
+ intel_de_read(display, HDCP_STATUS(display, cpu_transcoder,
+ port)));
return false;
}
return true;
@@ -1813,14 +1811,13 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int max_tmds_clock, vbt_max_tmds_clock;
- if (DISPLAY_VER(display) >= 13 || IS_ALDERLAKE_S(dev_priv))
+ if (DISPLAY_VER(display) >= 13 || display->platform.alderlake_s)
max_tmds_clock = 600000;
else if (DISPLAY_VER(display) >= 10)
max_tmds_clock = 594000;
- else if (DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv))
+ else if (DISPLAY_VER(display) >= 8 || display->platform.haswell)
max_tmds_clock = 300000;
else if (DISPLAY_VER(display) >= 5)
max_tmds_clock = 225000;
@@ -1879,7 +1876,6 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
bool has_hdmi_sink)
{
struct intel_display *display = to_intel_display(hdmi);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
if (clock < 25000)
@@ -1889,16 +1885,16 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_HIGH;
/* GLK DPLL can't generate 446-480 MHz */
- if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
+ if (display->platform.geminilake && clock > 446666 && clock < 480000)
return MODE_CLOCK_RANGE;
/* BXT/GLK DPLL can't generate 223-240 MHz */
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
- if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
+ if (display->platform.cherryview && clock > 216000 && clock < 240000)
return MODE_CLOCK_RANGE;
/* ICL+ combo PHY PLL can't generate 500-533.2 MHz */
@@ -1942,11 +1938,12 @@ static bool intel_hdmi_source_bpc_possible(struct intel_display *display, int bp
}
}
-static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
+static bool intel_hdmi_sink_bpc_possible(struct drm_connector *_connector,
int bpc, bool has_hdmi_sink,
enum intel_output_format sink_format)
{
- const struct drm_display_info *info = &connector->display_info;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ const struct drm_display_info *info = &connector->base.display_info;
const struct drm_hdmi_info *hdmi = &info->hdmi;
switch (bpc) {
@@ -1975,12 +1972,13 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
}
static enum drm_mode_status
-intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
+intel_hdmi_mode_clock_valid(struct drm_connector *_connector, int clock,
bool has_hdmi_sink,
enum intel_output_format sink_format)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
enum drm_mode_status status = MODE_OK;
int bpc;
@@ -1995,7 +1993,8 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
if (!intel_hdmi_source_bpc_possible(display, bpc))
continue;
- if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, sink_format))
+ if (!intel_hdmi_sink_bpc_possible(&connector->base, bpc, has_hdmi_sink,
+ sink_format))
continue;
status = hdmi_port_clock_valid(hdmi, tmds_clock, true, has_hdmi_sink);
@@ -2010,15 +2009,16 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
}
static enum drm_mode_status
-intel_hdmi_mode_valid(struct drm_connector *connector,
+intel_hdmi_mode_valid(struct drm_connector *_connector,
const struct drm_display_mode *mode)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
enum drm_mode_status status;
int clock = mode->clock;
- int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
- bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
+ int max_dotclk = display->cdclk.max_dotclk_freq;
+ bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->base.state);
bool ycbcr_420_only;
enum intel_output_format sink_format;
@@ -2047,22 +2047,23 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
if (clock > 600000)
return MODE_CLOCK_HIGH;
- ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode);
+ ycbcr_420_only = drm_mode_is_420_only(&connector->base.display_info, mode);
if (ycbcr_420_only)
sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
else
sink_format = INTEL_OUTPUT_FORMAT_RGB;
- status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format);
+ status = intel_hdmi_mode_clock_valid(&connector->base, clock, has_hdmi_sink, sink_format);
if (status != MODE_OK) {
if (ycbcr_420_only ||
- !connector->ycbcr_420_allowed ||
- !drm_mode_is_420_also(&connector->display_info, mode))
+ !connector->base.ycbcr_420_allowed ||
+ !drm_mode_is_420_also(&connector->base.display_info, mode))
return status;
sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
- status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format);
+ status = intel_hdmi_mode_clock_valid(&connector->base, clock, has_hdmi_sink,
+ sink_format);
if (status != MODE_OK)
return status;
}
@@ -2073,16 +2074,16 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
int bpc, bool has_hdmi_sink)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_connector_state *connector_state;
- struct drm_connector *connector;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_digital_connector_state *connector_state;
+ struct intel_connector *connector;
int i;
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->uapi.crtc)
+ for_each_new_intel_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->base.crtc != crtc_state->uapi.crtc)
continue;
- if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink,
+ if (!intel_hdmi_sink_bpc_possible(&connector->base, bpc, has_hdmi_sink,
crtc_state->sink_format))
return false;
}
@@ -2210,7 +2211,7 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_connector *connector = conn_state->connector;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
@@ -2218,7 +2219,7 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
return false;
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- return connector->display_info.has_audio;
+ return connector->base.display_info.has_audio;
else
return intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
@@ -2322,14 +2323,14 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct drm_connector *connector = conn_state->connector;
- struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_scdc *scdc = &connector->base.display_info.hdmi.scdc;
int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- if (!connector->interlace_allowed &&
+ if (!connector->base.interlace_allowed &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return -EINVAL;
@@ -2384,6 +2385,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
}
}
+ intel_vrr_compute_config(pipe_config, conn_state);
+
intel_hdmi_compute_gcp_infoframe(encoder, pipe_config,
conn_state);
@@ -2422,25 +2425,26 @@ void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder)
}
static void
-intel_hdmi_unset_edid(struct drm_connector *connector)
+intel_hdmi_unset_edid(struct drm_connector *_connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
- drm_edid_free(to_intel_connector(connector)->detect_edid);
- to_intel_connector(connector)->detect_edid = NULL;
+ drm_edid_free(connector->detect_edid);
+ connector->detect_edid = NULL;
}
static void
-intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
+intel_hdmi_dp_dual_mode_detect(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
- struct i2c_adapter *ddc = connector->ddc;
+ struct i2c_adapter *ddc = connector->base.ddc;
enum drm_dp_dual_mode_type type;
type = drm_dp_dual_mode_detect(display->drm, ddc);
@@ -2455,7 +2459,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
* if the port is a dual mode capable DP port.
*/
if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
- if (!connector->force &&
+ if (!connector->base.force &&
intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
drm_dbg_kms(display->drm,
"Assuming DP dual mode adaptor presence based on VBT\n");
@@ -2478,7 +2482,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
hdmi->dp_dual_mode.max_tmds_clock);
/* Older VBTs are often buggy and can't be trusted :( Play it safe. */
- if ((DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv)) &&
+ if ((DISPLAY_VER(display) >= 8 || display->platform.haswell) &&
!intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
drm_dbg_kms(display->drm,
"Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n");
@@ -2487,34 +2491,35 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
}
static bool
-intel_hdmi_set_edid(struct drm_connector *connector)
+intel_hdmi_set_edid(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
- struct i2c_adapter *ddc = connector->ddc;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct i2c_adapter *ddc = connector->base.ddc;
intel_wakeref_t wakeref;
const struct drm_edid *drm_edid;
bool connected = false;
wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);
- drm_edid = drm_edid_read_ddc(connector, ddc);
+ drm_edid = drm_edid_read_ddc(&connector->base, ddc);
if (!drm_edid && !intel_gmbus_is_forced_bit(ddc)) {
drm_dbg_kms(display->drm,
"HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(ddc, true);
- drm_edid = drm_edid_read_ddc(connector, ddc);
+ drm_edid = drm_edid_read_ddc(&connector->base, ddc);
intel_gmbus_force_bit(ddc, false);
}
/* Below we depend on display info having been updated */
- drm_edid_connector_update(connector, drm_edid);
+ drm_edid_connector_update(&connector->base, drm_edid);
- to_intel_connector(connector)->detect_edid = drm_edid;
+ connector->detect_edid = drm_edid;
if (drm_edid_is_digital(drm_edid)) {
- intel_hdmi_dp_dual_mode_detect(connector);
+ intel_hdmi_dp_dual_mode_detect(&connector->base);
connected = true;
}
@@ -2522,28 +2527,29 @@ intel_hdmi_set_edid(struct drm_connector *connector)
intel_display_power_put(display, POWER_DOMAIN_GMBUS, wakeref);
cec_notifier_set_phys_addr(intel_hdmi->cec_notifier,
- connector->display_info.source_physical_address);
+ connector->base.display_info.source_physical_address);
return connected;
}
static enum drm_connector_status
-intel_hdmi_detect(struct drm_connector *connector, bool force)
+intel_hdmi_detect(struct drm_connector *_connector, bool force)
{
- struct intel_display *display = to_intel_display(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
enum drm_connector_status status = connector_status_disconnected;
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
if (!intel_display_device_enabled(display))
return connector_status_disconnected;
if (!intel_display_driver_check_access(display))
- return connector->status;
+ return connector->base.status;
wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);
@@ -2551,9 +2557,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
!intel_digital_port_connected(encoder))
goto out;
- intel_hdmi_unset_edid(connector);
+ intel_hdmi_unset_edid(&connector->base);
- if (intel_hdmi_set_edid(connector))
+ if (intel_hdmi_set_edid(&connector->base))
status = connector_status_connected;
out:
@@ -2566,49 +2572,54 @@ out:
}
static void
-intel_hdmi_force(struct drm_connector *connector)
+intel_hdmi_force(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
if (!intel_display_driver_check_access(display))
return;
- intel_hdmi_unset_edid(connector);
+ intel_hdmi_unset_edid(&connector->base);
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return;
- intel_hdmi_set_edid(connector);
+ intel_hdmi_set_edid(&connector->base);
}
-static int intel_hdmi_get_modes(struct drm_connector *connector)
+static int intel_hdmi_get_modes(struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
+
/* drm_edid_connector_update() done in ->detect() or ->force() */
- return drm_edid_connector_add_modes(connector);
+ return drm_edid_connector_add_modes(&connector->base);
}
static int
-intel_hdmi_connector_register(struct drm_connector *connector)
+intel_hdmi_connector_register(struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
int ret;
- ret = intel_connector_register(connector);
+ ret = intel_connector_register(&connector->base);
if (ret)
return ret;
return ret;
}
-static void intel_hdmi_connector_unregister(struct drm_connector *connector)
+static void intel_hdmi_connector_unregister(struct drm_connector *_connector)
{
- struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier;
cec_notifier_conn_unregister(n);
- intel_connector_unregister(connector);
+ intel_connector_unregister(&connector->base);
}
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -2624,15 +2635,16 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static int intel_hdmi_connector_atomic_check(struct drm_connector *connector,
+static int intel_hdmi_connector_atomic_check(struct drm_connector *_connector,
struct drm_atomic_state *state)
{
- struct intel_display *display = to_intel_display(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
if (HAS_DDI(display))
- return intel_digital_connector_atomic_check(connector, state);
+ return intel_digital_connector_atomic_check(&connector->base, state);
else
- return g4x_hdmi_connector_atomic_check(connector, state);
+ return g4x_hdmi_connector_atomic_check(&connector->base, state);
}
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
@@ -2642,22 +2654,23 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
};
static void
-intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(intel_hdmi);
- intel_attach_force_audio_property(connector);
- intel_attach_broadcast_rgb_property(connector);
- intel_attach_aspect_ratio_property(connector);
+ intel_attach_force_audio_property(&connector->base);
+ intel_attach_broadcast_rgb_property(&connector->base);
+ intel_attach_aspect_ratio_property(&connector->base);
- intel_attach_hdmi_colorspace_property(connector);
- drm_connector_attach_content_type_property(connector);
+ intel_attach_hdmi_colorspace_property(&connector->base);
+ drm_connector_attach_content_type_property(&connector->base);
if (DISPLAY_VER(display) >= 10)
- drm_connector_attach_hdr_output_metadata_property(connector);
+ drm_connector_attach_hdr_output_metadata_property(&connector->base);
if (!HAS_GMCH(display))
- drm_connector_attach_max_bpc_property(connector, 8, 12);
+ drm_connector_attach_max_bpc_property(&connector->base, 8, 12);
}
/*
@@ -2679,25 +2692,26 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
* True on success, false on failure.
*/
bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
- struct drm_connector *connector,
+ struct drm_connector *_connector,
bool high_tmds_clock_ratio,
bool scrambling)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(encoder);
struct drm_scrambling *sink_scrambling =
- &connector->display_info.hdmi.scdc.scrambling;
+ &connector->base.display_info.hdmi.scdc.scrambling;
if (!sink_scrambling->supported)
return true;
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
- connector->base.id, connector->name,
+ connector->base.base.id, connector->base.name,
str_yes_no(scrambling), high_tmds_clock_ratio ? 40 : 10);
/* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */
- return drm_scdc_set_high_tmds_clock_ratio(connector, high_tmds_clock_ratio) &&
- drm_scdc_set_scrambling(connector, scrambling);
+ return drm_scdc_set_high_tmds_clock_ratio(&connector->base, high_tmds_clock_ratio) &&
+ drm_scdc_set_scrambling(&connector->base, scrambling);
}
static u8 chv_encoder_to_ddc_pin(struct intel_encoder *encoder)
@@ -2808,7 +2822,7 @@ static u8 mcc_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
WARN_ON(encoder->port == PORT_C);
@@ -2819,7 +2833,7 @@ static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
* combo outputs. With CMP, the traditional DDI A-D pins are used for
* all outputs.
*/
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && phy >= PHY_C)
+ if (INTEL_PCH_TYPE(display) >= PCH_TGP && phy >= PHY_C)
return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
return GMBUS_PIN_1_BXT + phy;
@@ -2828,7 +2842,6 @@ static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum phy phy = intel_encoder_to_phy(encoder);
drm_WARN_ON(display->drm, encoder->port == PORT_A);
@@ -2839,7 +2852,7 @@ static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
* combo outputs. With CMP, the traditional DDI A-D pins are used for
* all outputs.
*/
- if (INTEL_PCH_TYPE(i915) >= PCH_TGP && phy >= PHY_C)
+ if (INTEL_PCH_TYPE(display) >= PCH_TGP && phy >= PHY_C)
return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
return GMBUS_PIN_1_BXT + phy;
@@ -2892,27 +2905,26 @@ static u8 g4x_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u8 ddc_pin;
- if (IS_ALDERLAKE_S(dev_priv))
+ if (display->platform.alderlake_s)
ddc_pin = adls_encoder_to_ddc_pin(encoder);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ else if (INTEL_PCH_TYPE(display) >= PCH_DG1)
ddc_pin = dg1_encoder_to_ddc_pin(encoder);
- else if (IS_ROCKETLAKE(dev_priv))
+ else if (display->platform.rocketlake)
ddc_pin = rkl_encoder_to_ddc_pin(encoder);
- else if (DISPLAY_VER(display) == 9 && HAS_PCH_TGP(dev_priv))
+ else if (DISPLAY_VER(display) == 9 && HAS_PCH_TGP(display))
ddc_pin = gen9bc_tgp_encoder_to_ddc_pin(encoder);
- else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
- HAS_PCH_TGP(dev_priv))
+ else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
+ HAS_PCH_TGP(display))
ddc_pin = mcc_encoder_to_ddc_pin(encoder);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
ddc_pin = icl_encoder_to_ddc_pin(encoder);
- else if (HAS_PCH_CNP(dev_priv))
+ else if (HAS_PCH_CNP(display))
ddc_pin = cnp_encoder_to_ddc_pin(encoder);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
ddc_pin = bxt_encoder_to_ddc_pin(encoder);
- else if (IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.cherryview)
ddc_pin = chv_encoder_to_ddc_pin(encoder);
else
ddc_pin = g4x_encoder_to_ddc_pin(encoder);
@@ -2986,15 +2998,13 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
void intel_infoframe_init(struct intel_digital_port *dig_port)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *dev_priv =
- to_i915(dig_port->base.base.dev);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
dig_port->write_infoframe = vlv_write_infoframe;
dig_port->read_infoframe = vlv_read_infoframe;
dig_port->set_infoframes = vlv_set_infoframes;
dig_port->infoframes_enabled = vlv_infoframes_enabled;
- } else if (IS_G4X(dev_priv)) {
+ } else if (display->platform.g4x) {
dig_port->write_infoframe = g4x_write_infoframe;
dig_port->read_infoframe = g4x_read_infoframe;
dig_port->set_infoframes = g4x_set_infoframes;
@@ -3011,7 +3021,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
dig_port->set_infoframes = hsw_set_infoframes;
dig_port->infoframes_enabled = hsw_infoframes_enabled;
}
- } else if (HAS_PCH_IBX(dev_priv)) {
+ } else if (HAS_PCH_IBX(display)) {
dig_port->write_infoframe = ibx_write_infoframe;
dig_port->read_infoframe = ibx_read_infoframe;
dig_port->set_infoframes = ibx_set_infoframes;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 00d7b1ccf190..fc5d8928c37e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -30,6 +30,7 @@
#include "i915_irq.h"
#include "intel_connector.h"
#include "intel_display_power.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@@ -118,7 +119,7 @@ intel_connector_hpd_pin(struct intel_connector *connector)
/**
* intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
- * @dev_priv: private driver data pointer
+ * @display: display device
* @pin: the pin to gather stats on
* @long_hpd: whether the HPD IRQ was long or short
*
@@ -127,13 +128,13 @@ intel_connector_hpd_pin(struct intel_connector *connector)
* responsible for further action.
*
* The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
- * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to
+ * stored in @display->hotplug.hpd_storm_threshold which defaults to
* @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
* short IRQs count as +1. If this threshold is exceeded, it's considered an
* IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
*
* By default, most systems will only count long IRQs towards
- * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also
+ * &display->hotplug.hpd_storm_threshold. However, some older systems also
* suffer from short IRQ storms and must also track these. Because short IRQ
* storms are naturally caused by sideband interactions with DP MST devices,
* short IRQ detection is only enabled for systems without DP MST support.
@@ -145,10 +146,10 @@ intel_connector_hpd_pin(struct intel_connector *connector)
*
* Return true if an IRQ storm was detected on @pin.
*/
-static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+static bool intel_hpd_irq_storm_detect(struct intel_display *display,
enum hpd_pin pin, bool long_hpd)
{
- struct intel_hotplug *hpd = &dev_priv->display.hotplug;
+ struct intel_hotplug *hpd = &display->hotplug;
unsigned long start = hpd->stats[pin].last_jiffies;
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
const int increment = long_hpd ? 10 : 1;
@@ -156,7 +157,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
bool storm = false;
if (!threshold ||
- (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled))
+ (!long_hpd && !display->hotplug.hpd_short_storm_enabled))
return false;
if (!time_in_range(jiffies, start, end)) {
@@ -167,11 +168,11 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
hpd->stats[pin].count += increment;
if (hpd->stats[pin].count > threshold) {
hpd->stats[pin].state = HPD_MARK_DISABLED;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Received HPD interrupt on PIN %d - cnt: %d\n",
pin,
hpd->stats[pin].count);
@@ -180,56 +181,62 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
return storm;
}
-static bool detection_work_enabled(struct drm_i915_private *i915)
+static bool detection_work_enabled(struct intel_display *display)
{
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- return i915->display.hotplug.detection_work_enabled;
+ return display->hotplug.detection_work_enabled;
}
static bool
-mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
- lockdep_assert_held(&i915->irq_lock);
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (!detection_work_enabled(i915))
+ lockdep_assert_held(&display->irq.lock);
+
+ if (!detection_work_enabled(display))
return false;
return mod_delayed_work(i915->unordered_wq, work, delay);
}
static bool
-queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
- lockdep_assert_held(&i915->irq_lock);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ lockdep_assert_held(&display->irq.lock);
- if (!detection_work_enabled(i915))
+ if (!detection_work_enabled(display))
return false;
return queue_delayed_work(i915->unordered_wq, work, delay);
}
static bool
-queue_detection_work(struct drm_i915_private *i915, struct work_struct *work)
+queue_detection_work(struct intel_display *display, struct work_struct *work)
{
- lockdep_assert_held(&i915->irq_lock);
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (!detection_work_enabled(i915))
+ lockdep_assert_held(&display->irq.lock);
+
+ if (!detection_work_enabled(display))
return false;
return queue_work(i915->unordered_wq, work);
}
static void
-intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
+intel_hpd_irq_storm_switch_to_polling(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
bool hpd_disabled = false;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -238,15 +245,15 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED)
+ display->hotplug.stats[pin].state != HPD_MARK_DISABLED)
continue;
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
connector->base.name);
- dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
+ display->hotplug.stats[pin].state = HPD_DISABLED;
connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
@@ -255,36 +262,35 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
- drm_kms_helper_poll_reschedule(&dev_priv->drm);
- mod_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.reenable_work,
+ drm_kms_helper_poll_reschedule(display->drm);
+ mod_delayed_detection_work(display,
+ &display->hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
}
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- display.hotplug.reenable_work.work);
+ struct intel_display *display =
+ container_of(work, typeof(*display), hotplug.reenable_work.work);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
enum hpd_pin pin;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED)
+ display->hotplug.stats[pin].state != HPD_DISABLED)
continue;
if (connector->base.polled != connector->polled)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Reenabling HPD on connector %s\n",
connector->base.name);
connector->base.polled = connector->polled;
@@ -292,15 +298,15 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
drm_connector_list_iter_end(&conn_iter);
for_each_hpd_pin(pin) {
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
- dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
+ if (display->hotplug.stats[pin].state == HPD_DISABLED)
+ display->hotplug.stats[pin].state = HPD_ENABLED;
}
- intel_hpd_irq_setup(dev_priv);
+ intel_hpd_irq_setup(display);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
static enum intel_hotplug_state
@@ -349,32 +355,72 @@ static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
enc_to_dig_port(encoder)->hpd_pulse != NULL;
}
+static bool hpd_pin_has_pulse(struct intel_display *display, enum hpd_pin pin)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(display->drm, encoder) {
+ if (encoder->hpd_pin != pin)
+ continue;
+
+ if (intel_encoder_has_hpd_pulse(encoder))
+ return true;
+ }
+
+ return false;
+}
+
+static bool hpd_pin_is_blocked(struct intel_display *display, enum hpd_pin pin)
+{
+ lockdep_assert_held(&display->irq.lock);
+
+ return display->hotplug.stats[pin].blocked_count;
+}
+
+static u32 get_blocked_hpd_pin_mask(struct intel_display *display)
+{
+ enum hpd_pin pin;
+ u32 hpd_pin_mask = 0;
+
+ for_each_hpd_pin(pin) {
+ if (hpd_pin_is_blocked(display, pin))
+ hpd_pin_mask |= BIT(pin);
+ }
+
+ return hpd_pin_mask;
+}
+
static void i915_digport_work_func(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, display.hotplug.dig_port_work);
- u32 long_port_mask, short_port_mask;
+ struct intel_display *display =
+ container_of(work, struct intel_display, hotplug.dig_port_work);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ u32 long_hpd_pin_mask, short_hpd_pin_mask;
struct intel_encoder *encoder;
+ u32 blocked_hpd_pin_mask;
u32 old_bits = 0;
- spin_lock_irq(&dev_priv->irq_lock);
- long_port_mask = dev_priv->display.hotplug.long_port_mask;
- dev_priv->display.hotplug.long_port_mask = 0;
- short_port_mask = dev_priv->display.hotplug.short_port_mask;
- dev_priv->display.hotplug.short_port_mask = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ long_hpd_pin_mask = hotplug->long_hpd_pin_mask & ~blocked_hpd_pin_mask;
+ hotplug->long_hpd_pin_mask &= ~long_hpd_pin_mask;
+ short_hpd_pin_mask = hotplug->short_hpd_pin_mask & ~blocked_hpd_pin_mask;
+ hotplug->short_hpd_pin_mask &= ~short_hpd_pin_mask;
+
+ spin_unlock_irq(&display->irq.lock);
+
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_digital_port *dig_port;
- enum port port = encoder->port;
+ enum hpd_pin pin = encoder->hpd_pin;
bool long_hpd, short_hpd;
enum irqreturn ret;
if (!intel_encoder_has_hpd_pulse(encoder))
continue;
- long_hpd = long_port_mask & BIT(port);
- short_hpd = short_port_mask & BIT(port);
+ long_hpd = long_hpd_pin_mask & BIT(pin);
+ short_hpd = short_hpd_pin_mask & BIT(pin);
if (!long_hpd && !short_hpd)
continue;
@@ -384,16 +430,16 @@ static void i915_digport_work_func(struct work_struct *work)
ret = dig_port->hpd_pulse(dig_port, long_hpd);
if (ret == IRQ_NONE) {
/* fall back to old school hpd */
- old_bits |= BIT(encoder->hpd_pin);
+ old_bits |= BIT(pin);
}
}
if (old_bits) {
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.event_bits |= old_bits;
- queue_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work, 0);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.event_bits |= old_bits;
+ queue_delayed_detection_work(display,
+ &display->hotplug.hotplug_work, 0);
+ spin_unlock_irq(&display->irq.lock);
}
}
@@ -406,13 +452,17 @@ static void i915_digport_work_func(struct work_struct *work)
*/
void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ struct intel_encoder *encoder = &dig_port->base;
+
+ spin_lock_irq(&display->irq.lock);
- spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port);
- spin_unlock_irq(&i915->irq_lock);
+ hotplug->short_hpd_pin_mask |= BIT(encoder->hpd_pin);
+ if (!hpd_pin_is_blocked(display, encoder->hpd_pin))
+ queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
- queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work);
+ spin_unlock_irq(&display->irq.lock);
}
/*
@@ -420,9 +470,9 @@ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
*/
static void i915_hotplug_work_func(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- display.hotplug.hotplug_work.work);
+ struct intel_display *display =
+ container_of(work, struct intel_display, hotplug.hotplug_work.work);
+ struct intel_hotplug *hotplug = &display->hotplug;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
u32 changed = 0, retry = 0;
@@ -430,30 +480,32 @@ static void i915_hotplug_work_func(struct work_struct *work)
u32 hpd_retry_bits;
struct drm_connector *first_changed_connector = NULL;
int changed_connectors = 0;
+ u32 blocked_hpd_pin_mask;
- mutex_lock(&dev_priv->drm.mode_config.mutex);
- drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
+ mutex_lock(&display->drm->mode_config.mutex);
+ drm_dbg_kms(display->drm, "running encoder hotplug functions\n");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- hpd_event_bits = dev_priv->display.hotplug.event_bits;
- dev_priv->display.hotplug.event_bits = 0;
- hpd_retry_bits = dev_priv->display.hotplug.retry_bits;
- dev_priv->display.hotplug.retry_bits = 0;
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ hpd_event_bits = hotplug->event_bits & ~blocked_hpd_pin_mask;
+ hotplug->event_bits &= ~hpd_event_bits;
+ hpd_retry_bits = hotplug->retry_bits & ~blocked_hpd_pin_mask;
+ hotplug->retry_bits &= ~hpd_retry_bits;
/* Enable polling for connectors which had HPD IRQ storms */
- intel_hpd_irq_storm_switch_to_polling(dev_priv);
+ intel_hpd_irq_storm_switch_to_polling(display);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* Skip calling encode hotplug handlers if ignore long HPD set*/
- if (dev_priv->display.hotplug.ignore_long_hpd) {
- drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ if (display->hotplug.ignore_long_hpd) {
+ drm_dbg_kms(display->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
+ mutex_unlock(&display->drm->mode_config.mutex);
return;
}
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
u32 hpd_bit;
@@ -472,7 +524,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
else
connector->hotplug_retries++;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Connector %s (pin %i) received hotplug event. (retry %d)\n",
connector->base.name, pin,
connector->hotplug_retries);
@@ -495,12 +547,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
}
}
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (changed_connectors == 1)
drm_kms_helper_connector_hotplug_event(first_changed_connector);
else if (changed_connectors > 0)
- drm_kms_helper_hotplug_event(&dev_priv->drm);
+ drm_kms_helper_hotplug_event(display->drm);
if (first_changed_connector)
drm_connector_put(first_changed_connector);
@@ -508,20 +560,20 @@ static void i915_hotplug_work_func(struct work_struct *work)
/* Remove shared HPD pins that have changed */
retry &= ~changed;
if (retry) {
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.retry_bits |= retry;
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.retry_bits |= retry;
- mod_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work,
+ mod_delayed_detection_work(display,
+ &display->hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY));
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
}
/**
* intel_hpd_irq_handler - main hotplug irq handler
- * @dev_priv: drm_i915_private
+ * @display: display device
* @pin_mask: a mask of hpd pins that have triggered the irq
* @long_mask: a mask of hpd pins that may be long hpd pulses
*
@@ -535,7 +587,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
* Here, we do hotplug irq storm detection and mitigation, and pass further
* processing to appropriate bottom halves.
*/
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+void intel_hpd_irq_handler(struct intel_display *display,
u32 pin_mask, u32 long_mask)
{
struct intel_encoder *encoder;
@@ -548,7 +600,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!pin_mask)
return;
- spin_lock(&dev_priv->irq_lock);
+ spin_lock(&display->irq.lock);
/*
* Determine whether ->hpd_pulse() exists for each pin, and
@@ -556,8 +608,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* as each pin may have up to two encoders (HDMI and DP) and
* only the one of them (DP) will have ->hpd_pulse().
*/
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- enum port port = encoder->port;
+ for_each_intel_encoder(display->drm, encoder) {
bool long_hpd;
pin = encoder->hpd_pin;
@@ -569,18 +620,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"digital hpd on [ENCODER:%d:%s] - %s\n",
encoder->base.base.id, encoder->base.name,
long_hpd ? "long" : "short");
- queue_dig = true;
+
+ if (!hpd_pin_is_blocked(display, pin))
+ queue_dig = true;
if (long_hpd) {
long_hpd_pulse_mask |= BIT(pin);
- dev_priv->display.hotplug.long_port_mask |= BIT(port);
+ display->hotplug.long_hpd_pin_mask |= BIT(pin);
} else {
short_hpd_pulse_mask |= BIT(pin);
- dev_priv->display.hotplug.short_port_mask |= BIT(port);
+ display->hotplug.short_hpd_pin_mask |= BIT(pin);
}
}
@@ -591,20 +644,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!(BIT(pin) & pin_mask))
continue;
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) {
+ if (display->hotplug.stats[pin].state == HPD_DISABLED) {
/*
* On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
- drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
+ drm_WARN_ONCE(display->drm, !HAS_GMCH(display),
"Received HPD interrupt on pin %d although disabled\n",
pin);
continue;
}
- if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED)
+ if (display->hotplug.stats[pin].state != HPD_ENABLED)
continue;
/*
@@ -615,13 +668,15 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
long_hpd = long_hpd_pulse_mask & BIT(pin);
} else {
- dev_priv->display.hotplug.event_bits |= BIT(pin);
+ display->hotplug.event_bits |= BIT(pin);
long_hpd = true;
- queue_hp = true;
+
+ if (!hpd_pin_is_blocked(display, pin))
+ queue_hp = true;
}
- if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
- dev_priv->display.hotplug.event_bits &= ~BIT(pin);
+ if (intel_hpd_irq_storm_detect(display, pin, long_hpd)) {
+ display->hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
queue_hp = true;
}
@@ -632,7 +687,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* happens later in our hotplug work.
*/
if (storm_detected)
- intel_hpd_irq_setup(dev_priv);
+ intel_hpd_irq_setup(display);
/*
* Our hotplug handler can grab modeset locks (by calling down into the
@@ -641,17 +696,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* deadlock.
*/
if (queue_dig)
- queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
+ queue_work(display->hotplug.dp_wq, &display->hotplug.dig_port_work);
if (queue_hp)
- queue_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work, 0);
+ queue_delayed_detection_work(display,
+ &display->hotplug.hotplug_work, 0);
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&display->irq.lock);
}
/**
* intel_hpd_init - initializes and enables hpd support
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function enables the hotplug support. It requires that interrupts have
* already been enabled with intel_irq_init_hw(). From this point on hotplug and
@@ -663,40 +718,40 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
*
* Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
*/
-void intel_hpd_init(struct drm_i915_private *dev_priv)
+void intel_hpd_init(struct intel_display *display)
{
int i;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
for_each_hpd_pin(i) {
- dev_priv->display.hotplug.stats[i].count = 0;
- dev_priv->display.hotplug.stats[i].state = HPD_ENABLED;
+ display->hotplug.stats[i].count = 0;
+ display->hotplug.stats[i].state = HPD_ENABLED;
}
/*
* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy.
*/
- spin_lock_irq(&dev_priv->irq_lock);
- intel_hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ intel_hpd_irq_setup(display);
+ spin_unlock_irq(&display->irq.lock);
}
-static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915)
+static void i915_hpd_poll_detect_connectors(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
struct intel_connector *first_changed_connector = NULL;
int changed = 0;
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- if (!i915->drm.mode_config.poll_enabled)
+ if (!display->drm->mode_config.poll_enabled)
goto out;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD))
continue;
@@ -714,7 +769,7 @@ static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
out:
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (!changed)
return;
@@ -722,25 +777,23 @@ out:
if (changed == 1)
drm_kms_helper_connector_hotplug_event(&first_changed_connector->base);
else
- drm_kms_helper_hotplug_event(&i915->drm);
+ drm_kms_helper_hotplug_event(display->drm);
drm_connector_put(&first_changed_connector->base);
}
static void i915_hpd_poll_init_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- display.hotplug.poll_init_work);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display =
+ container_of(work, typeof(*display), hotplug.poll_init_work);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
intel_wakeref_t wakeref;
bool enabled;
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled);
+ enabled = READ_ONCE(display->hotplug.poll_enabled);
/*
* Prevent taking a power reference from this sequence of
* i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() ->
@@ -750,14 +803,14 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (!enabled) {
wakeref = intel_display_power_get(display,
POWER_DOMAIN_DISPLAY_CORE);
- drm_WARN_ON(&dev_priv->drm,
- READ_ONCE(dev_priv->display.hotplug.poll_enabled));
- cancel_work(&dev_priv->display.hotplug.poll_init_work);
+ drm_WARN_ON(display->drm,
+ READ_ONCE(display->hotplug.poll_enabled));
+ cancel_work(&display->hotplug.poll_init_work);
}
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -765,7 +818,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (pin == HPD_NONE)
continue;
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
+ if (display->hotplug.stats[pin].state == HPD_DISABLED)
continue;
connector->base.polled = connector->polled;
@@ -776,19 +829,19 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
}
drm_connector_list_iter_end(&conn_iter);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
if (enabled)
- drm_kms_helper_poll_reschedule(&dev_priv->drm);
+ drm_kms_helper_poll_reschedule(display->drm);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
/*
* We might have missed any hotplugs that happened while we were
* in the middle of disabling polling
*/
if (!enabled) {
- i915_hpd_poll_detect_connectors(dev_priv);
+ i915_hpd_poll_detect_connectors(display);
intel_display_power_put(display,
POWER_DOMAIN_DISPLAY_CORE,
@@ -798,7 +851,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
/**
* intel_hpd_poll_enable - enable polling for connectors with hpd
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function enables polling for all connectors which support HPD.
* Under certain conditions HPD may not be functional. On most Intel GPUs,
@@ -812,15 +865,12 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
*
* Also see: intel_hpd_init() and intel_hpd_poll_disable().
*/
-void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
+void intel_hpd_poll_enable(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- if (!HAS_DISPLAY(dev_priv) ||
- !intel_display_device_enabled(display))
+ if (!HAS_DISPLAY(display) || !intel_display_device_enabled(display))
return;
- WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
+ WRITE_ONCE(display->hotplug.poll_enabled, true);
/*
* We might already be holding dev->mode_config.mutex, so do this in a
@@ -828,15 +878,15 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
- spin_lock_irq(&dev_priv->irq_lock);
- queue_detection_work(dev_priv,
- &dev_priv->display.hotplug.poll_init_work);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ queue_detection_work(display,
+ &display->hotplug.poll_init_work);
+ spin_unlock_irq(&display->irq.lock);
}
/**
* intel_hpd_poll_disable - disable polling for connectors with hpd
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function disables polling for all connectors which support HPD.
* Under certain conditions HPD may not be functional. On most Intel GPUs,
@@ -853,26 +903,26 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
*
* Also see: intel_hpd_init() and intel_hpd_poll_enable().
*/
-void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
+void intel_hpd_poll_disable(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
+ WRITE_ONCE(display->hotplug.poll_enabled, false);
- spin_lock_irq(&dev_priv->irq_lock);
- queue_detection_work(dev_priv,
- &dev_priv->display.hotplug.poll_init_work);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ queue_detection_work(display,
+ &display->hotplug.poll_init_work);
+ spin_unlock_irq(&display->irq.lock);
}
-void intel_hpd_poll_fini(struct drm_i915_private *i915)
+void intel_hpd_poll_fini(struct intel_display *display)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
/* Kill all the work that may have been queued by hpd. */
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
intel_connector_cancel_modeset_retry_work(connector);
intel_hdcp_cancel_works(connector);
@@ -880,157 +930,261 @@ void intel_hpd_poll_fini(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
}
-void intel_hpd_init_early(struct drm_i915_private *i915)
+void intel_hpd_init_early(struct intel_display *display)
{
- INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work,
+ INIT_DELAYED_WORK(&display->hotplug.hotplug_work,
i915_hotplug_work_func);
- INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func);
- INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work);
- INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work,
+ INIT_WORK(&display->hotplug.dig_port_work, i915_digport_work_func);
+ INIT_WORK(&display->hotplug.poll_init_work, i915_hpd_poll_init_work);
+ INIT_DELAYED_WORK(&display->hotplug.reenable_work,
intel_hpd_irq_storm_reenable_work);
- i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ display->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
/* If we have MST support, we want to avoid doing short HPD IRQ storm
* detection, as short HPD storms will occur as a natural part of
* sideband messaging with MST.
* On older platforms however, IRQ storms can occur with both long and
* short pulses, as seen on some G4x systems.
*/
- i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
+ display->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(display);
}
-static bool cancel_all_detection_work(struct drm_i915_private *i915)
+static bool cancel_all_detection_work(struct intel_display *display)
{
bool was_pending = false;
- if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work))
+ if (cancel_delayed_work_sync(&display->hotplug.hotplug_work))
was_pending = true;
- if (cancel_work_sync(&i915->display.hotplug.poll_init_work))
+ if (cancel_work_sync(&display->hotplug.poll_init_work))
was_pending = true;
- if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work))
+ if (cancel_delayed_work_sync(&display->hotplug.reenable_work))
was_pending = true;
return was_pending;
}
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+void intel_hpd_cancel_work(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+
+ drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display));
- dev_priv->display.hotplug.long_port_mask = 0;
- dev_priv->display.hotplug.short_port_mask = 0;
- dev_priv->display.hotplug.event_bits = 0;
- dev_priv->display.hotplug.retry_bits = 0;
+ display->hotplug.long_hpd_pin_mask = 0;
+ display->hotplug.short_hpd_pin_mask = 0;
+ display->hotplug.event_bits = 0;
+ display->hotplug.retry_bits = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
- cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
+ cancel_work_sync(&display->hotplug.dig_port_work);
/*
* All other work triggered by hotplug events should be canceled by
* now.
*/
- if (cancel_all_detection_work(dev_priv))
- drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n");
+ if (cancel_all_detection_work(display))
+ drm_dbg_kms(display->drm, "Hotplug detection work still active\n");
}
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+static void queue_work_for_missed_irqs(struct intel_display *display)
{
- bool ret = false;
+ struct intel_hotplug *hotplug = &display->hotplug;
+ bool queue_hp_work = false;
+ u32 blocked_hpd_pin_mask;
+ enum hpd_pin pin;
- if (pin == HPD_NONE)
- return false;
+ lockdep_assert_held(&display->irq.lock);
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) {
- dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
- ret = true;
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ if ((hotplug->event_bits | hotplug->retry_bits) & ~blocked_hpd_pin_mask)
+ queue_hp_work = true;
+
+ for_each_hpd_pin(pin) {
+ switch (display->hotplug.stats[pin].state) {
+ case HPD_MARK_DISABLED:
+ queue_hp_work = true;
+ break;
+ case HPD_DISABLED:
+ case HPD_ENABLED:
+ break;
+ default:
+ MISSING_CASE(display->hotplug.stats[pin].state);
+ }
}
- spin_unlock_irq(&dev_priv->irq_lock);
- return ret;
+ if ((hotplug->long_hpd_pin_mask | hotplug->short_hpd_pin_mask) & ~blocked_hpd_pin_mask)
+ queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
+
+ if (queue_hp_work)
+ queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
}
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+static bool block_hpd_pin(struct intel_display *display, enum hpd_pin pin)
{
- if (pin == HPD_NONE)
+ struct intel_hotplug *hotplug = &display->hotplug;
+
+ lockdep_assert_held(&display->irq.lock);
+
+ hotplug->stats[pin].blocked_count++;
+
+ return hotplug->stats[pin].blocked_count == 1;
+}
+
+static bool unblock_hpd_pin(struct intel_display *display, enum hpd_pin pin)
+{
+ struct intel_hotplug *hotplug = &display->hotplug;
+
+ lockdep_assert_held(&display->irq.lock);
+
+ if (drm_WARN_ON(display->drm, hotplug->stats[pin].blocked_count == 0))
+ return true;
+
+ hotplug->stats[pin].blocked_count--;
+
+ return hotplug->stats[pin].blocked_count == 0;
+}
+
+/**
+ * intel_hpd_block - Block handling of HPD IRQs on an HPD pin
+ * @encoder: Encoder to block the HPD handling for
+ *
+ * Blocks the handling of HPD IRQs on the HPD pin of @encoder.
+ *
+ * On return:
+ *
+ * - It's guaranteed that the blocked encoders' HPD pulse handler
+ * (via intel_digital_port::hpd_pulse()) is not running.
+ * - The hotplug event handling (via intel_encoder::hotplug()) of an
+ * HPD IRQ pending at the time this function is called may be still
+ * running.
+ * - Detection on the encoder's connector (via
+ * drm_connector_helper_funcs::detect_ctx(),
+ * drm_connector_funcs::detect()) remains allowed, for instance as part of
+ * userspace connector probing, or DRM core's connector polling.
+ *
+ * The call must be followed by calling intel_hpd_unblock(), or
+ * intel_hpd_clear_and_unblock().
+ *
+ * Note that the handling of HPD IRQs for another encoder using the same HPD
+ * pin as that of @encoder will be also blocked.
+ */
+void intel_hpd_block(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ bool do_flush = false;
+
+ if (encoder->hpd_pin == HPD_NONE)
+ return;
+
+ spin_lock_irq(&display->irq.lock);
+
+ if (block_hpd_pin(display, encoder->hpd_pin))
+ do_flush = true;
+
+ spin_unlock_irq(&display->irq.lock);
+
+ if (do_flush && hpd_pin_has_pulse(display, encoder->hpd_pin))
+ flush_work(&hotplug->dig_port_work);
+}
+
+/**
+ * intel_hpd_unblock - Unblock handling of HPD IRQs on an HPD pin
+ * @encoder: Encoder to unblock the HPD handling for
+ *
+ * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
+ * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
+ * HPD pin while it was blocked will be handled for @encoder and for any
+ * other encoder sharing the same HPD pin.
+ */
+void intel_hpd_unblock(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ if (encoder->hpd_pin == HPD_NONE)
return;
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+
+ if (unblock_hpd_pin(display, encoder->hpd_pin))
+ queue_work_for_missed_irqs(display);
+
+ spin_unlock_irq(&display->irq.lock);
}
-static void queue_work_for_missed_irqs(struct drm_i915_private *i915)
+/**
+ * intel_hpd_clear_and_unblock - Unblock handling of new HPD IRQs on an HPD pin
+ * @encoder: Encoder to unblock the HPD handling for
+ *
+ * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
+ * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
+ * HPD pin while it was blocked will be cleared, handling only new IRQs.
+ */
+void intel_hpd_clear_and_unblock(struct intel_encoder *encoder)
{
- bool queue_work = false;
- enum hpd_pin pin;
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ enum hpd_pin pin = encoder->hpd_pin;
- lockdep_assert_held(&i915->irq_lock);
+ if (pin == HPD_NONE)
+ return;
- if (i915->display.hotplug.event_bits ||
- i915->display.hotplug.retry_bits)
- queue_work = true;
+ spin_lock_irq(&display->irq.lock);
- for_each_hpd_pin(pin) {
- switch (i915->display.hotplug.stats[pin].state) {
- case HPD_MARK_DISABLED:
- queue_work = true;
- break;
- case HPD_ENABLED:
- break;
- default:
- MISSING_CASE(i915->display.hotplug.stats[pin].state);
- }
+ if (unblock_hpd_pin(display, pin)) {
+ hotplug->event_bits &= ~BIT(pin);
+ hotplug->retry_bits &= ~BIT(pin);
+ hotplug->short_hpd_pin_mask &= ~BIT(pin);
+ hotplug->long_hpd_pin_mask &= ~BIT(pin);
}
- if (queue_work)
- queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
+ spin_unlock_irq(&display->irq.lock);
}
-void intel_hpd_enable_detection_work(struct drm_i915_private *i915)
+void intel_hpd_enable_detection_work(struct intel_display *display)
{
- spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.detection_work_enabled = true;
- queue_work_for_missed_irqs(i915);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.detection_work_enabled = true;
+ queue_work_for_missed_irqs(display);
+ spin_unlock_irq(&display->irq.lock);
}
-void intel_hpd_disable_detection_work(struct drm_i915_private *i915)
+void intel_hpd_disable_detection_work(struct intel_display *display)
{
- spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.detection_work_enabled = false;
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.detection_work_enabled = false;
+ spin_unlock_irq(&display->irq.lock);
- cancel_all_detection_work(i915);
+ cancel_all_detection_work(display);
}
-bool intel_hpd_schedule_detection(struct drm_i915_private *i915)
+bool intel_hpd_schedule_detection(struct intel_display *display)
{
unsigned long flags;
bool ret;
- spin_lock_irqsave(&i915->irq_lock, flags);
- ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
- spin_unlock_irqrestore(&i915->irq_lock, flags);
+ spin_lock_irqsave(&display->irq.lock, flags);
+ ret = queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
+ spin_unlock_irqrestore(&display->irq.lock, flags);
return ret;
}
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
/* Synchronize with everything first in case there's been an HPD
* storm, but we haven't finished handling it in the kernel yet
*/
intel_synchronize_irq(dev_priv);
- flush_work(&dev_priv->display.hotplug.dig_port_work);
- flush_delayed_work(&dev_priv->display.hotplug.hotplug_work);
+ flush_work(&display->hotplug.dig_port_work);
+ flush_delayed_work(&display->hotplug.hotplug_work);
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
@@ -1044,8 +1198,8 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct intel_hotplug *hotplug = &display->hotplug;
unsigned int new_threshold;
int i;
char *newline;
@@ -1070,21 +1224,21 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
return -EINVAL;
if (new_threshold > 0)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting HPD storm detection threshold to %d\n",
new_threshold);
else
- drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
+ drm_dbg_kms(display->drm, "Disabling HPD storm detection\n");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
hotplug->hpd_storm_threshold = new_threshold;
/* Reset the HPD storm stats so we don't accidentally trigger a storm */
for_each_hpd_pin(i)
hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+ flush_delayed_work(&display->hotplug.reenable_work);
return len;
}
@@ -1105,10 +1259,10 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
seq_printf(m, "Enabled: %s\n",
- str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled));
+ str_yes_no(display->hotplug.hpd_short_storm_enabled));
return 0;
}
@@ -1125,8 +1279,8 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct intel_hotplug *hotplug = &display->hotplug;
char *newline;
char tmp[16];
int i;
@@ -1147,22 +1301,22 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
/* Reset to the "default" state for this system */
if (strcmp(tmp, "reset") == 0)
- new_state = !HAS_DP_MST(dev_priv);
+ new_state = !HAS_DP_MST(display);
else if (kstrtobool(tmp, &new_state) != 0)
return -EINVAL;
- drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
+ drm_dbg_kms(display->drm, "%sabling HPD short storm detection\n",
new_state ? "En" : "Dis");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
hotplug->hpd_short_storm_enabled = new_state;
/* Reset the HPD storm stats so we don't accidentally trigger a storm */
for_each_hpd_pin(i)
hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+ flush_delayed_work(&display->hotplug.reenable_work);
return len;
}
@@ -1176,14 +1330,14 @@ static const struct file_operations i915_hpd_short_storm_ctl_fops = {
.write = i915_hpd_short_storm_ctl_write,
};
-void intel_hpd_debugfs_register(struct drm_i915_private *i915)
+void intel_hpd_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root,
- i915, &i915_hpd_storm_ctl_fops);
+ display, &i915_hpd_storm_ctl_fops);
debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
- i915, &i915_hpd_short_storm_ctl_fops);
+ display, &i915_hpd_short_storm_ctl_fops);
debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
- &i915->display.hotplug.ignore_long_hpd);
+ &display->hotplug.ignore_long_hpd);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index d6986902b054..edc41c9d3d65 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -8,30 +8,31 @@
#include <linux/types.h>
-struct drm_i915_private;
+enum port;
struct intel_connector;
struct intel_digital_port;
+struct intel_display;
struct intel_encoder;
-enum port;
-void intel_hpd_poll_enable(struct drm_i915_private *dev_priv);
-void intel_hpd_poll_disable(struct drm_i915_private *dev_priv);
-void intel_hpd_poll_fini(struct drm_i915_private *i915);
+void intel_hpd_poll_enable(struct intel_display *display);
+void intel_hpd_poll_disable(struct intel_display *display);
+void intel_hpd_poll_fini(struct intel_display *display);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector);
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+void intel_hpd_irq_handler(struct intel_display *display,
u32 pin_mask, u32 long_mask);
void intel_hpd_trigger_irq(struct intel_digital_port *dig_port);
-void intel_hpd_init(struct drm_i915_private *dev_priv);
-void intel_hpd_init_early(struct drm_i915_private *i915);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+void intel_hpd_init(struct intel_display *display);
+void intel_hpd_init_early(struct intel_display *display);
+void intel_hpd_cancel_work(struct intel_display *display);
enum hpd_pin intel_hpd_pin_default(enum port port);
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_debugfs_register(struct drm_i915_private *i915);
+void intel_hpd_block(struct intel_encoder *encoder);
+void intel_hpd_unblock(struct intel_encoder *encoder);
+void intel_hpd_clear_and_unblock(struct intel_encoder *encoder);
+void intel_hpd_debugfs_register(struct intel_display *display);
-void intel_hpd_enable_detection_work(struct drm_i915_private *i915);
-void intel_hpd_disable_detection_work(struct drm_i915_private *i915);
-bool intel_hpd_schedule_detection(struct drm_i915_private *i915);
+void intel_hpd_enable_detection_work(struct intel_display *display);
+void intel_hpd_disable_detection_work(struct intel_display *display);
+bool intel_hpd_schedule_detection(struct intel_display *display);
#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 2137ac7b882a..c024b42369c8 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -3,8 +3,10 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_types.h"
@@ -131,68 +133,67 @@ static const u32 hpd_mtp[HPD_NUM_PINS] = {
[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
};
-static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
+static void intel_hpd_init_pins(struct intel_display *display)
{
- struct intel_hotplug *hpd = &dev_priv->display.hotplug;
+ struct intel_hotplug *hpd = &display->hotplug;
- if (HAS_GMCH(dev_priv)) {
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv))
+ if (HAS_GMCH(display)) {
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview)
hpd->hpd = hpd_status_g4x;
else
hpd->hpd = hpd_status_i915;
return;
}
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
hpd->hpd = hpd_xelpdp;
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
hpd->hpd = hpd_gen11;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
hpd->hpd = hpd_bxt;
- else if (DISPLAY_VER(dev_priv) == 9)
+ else if (DISPLAY_VER(display) == 9)
hpd->hpd = NULL; /* no north HPD on SKL */
- else if (DISPLAY_VER(dev_priv) >= 8)
+ else if (DISPLAY_VER(display) >= 8)
hpd->hpd = hpd_bdw;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
hpd->hpd = hpd_ivb;
else
hpd->hpd = hpd_ilk;
- if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
- (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
+ if ((INTEL_PCH_TYPE(display) < PCH_DG1) &&
+ (!HAS_PCH_SPLIT(display) || HAS_PCH_NOP(display)))
return;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
hpd->pch_hpd = hpd_mtp;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ else if (INTEL_PCH_TYPE(display) >= PCH_DG1)
hpd->pch_hpd = hpd_sde_dg1;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
hpd->pch_hpd = hpd_icp;
- else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
+ else if (HAS_PCH_CNP(display) || HAS_PCH_SPT(display))
hpd->pch_hpd = hpd_spt;
- else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
+ else if (HAS_PCH_LPT(display) || HAS_PCH_CPT(display))
hpd->pch_hpd = hpd_cpt;
- else if (HAS_PCH_IBX(dev_priv))
+ else if (HAS_PCH_IBX(display))
hpd->pch_hpd = hpd_ibx;
else
- MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
+ MISSING_CASE(INTEL_PCH_TYPE(display));
}
/* For display hotplug interrupt */
-void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
+void i915_hotplug_interrupt_update_locked(struct intel_display *display,
u32 mask, u32 bits)
{
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, bits & ~mask);
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, bits & ~mask);
- intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN(dev_priv), mask,
- bits);
+ intel_de_rmw(display, PORT_HOTPLUG_EN(display), mask, bits);
}
/**
* i915_hotplug_interrupt_update - update hotplug interrupt enable
- * @dev_priv: driver private
+ * @display: display device
* @mask: bits to update
* @bits: bits to enable
* NOTE: the HPD enable bits are modified both inside and outside
@@ -202,13 +203,13 @@ void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
* held already, this function acquires the lock itself. A non-locking
* version is also available.
*/
-void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
+void i915_hotplug_interrupt_update(struct intel_display *display,
u32 mask,
u32 bits)
{
- spin_lock_irq(&dev_priv->irq_lock);
- i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_hotplug_interrupt_update_locked(display, mask, bits);
+ spin_unlock_irq(&display->irq.lock);
}
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
@@ -339,7 +340,7 @@ static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
*
* Note that the caller is expected to zero out the masks initially.
*/
-static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
+static void intel_get_hpd_pins(struct intel_display *display,
u32 *pin_mask, u32 *long_mask,
u32 hotplug_trigger, u32 dig_hotplug_reg,
const u32 hpd[HPD_NUM_PINS],
@@ -359,37 +360,37 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
*long_mask |= BIT(pin);
}
- drm_dbg(&dev_priv->drm,
- "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
+ drm_dbg_kms(display->drm,
+ "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
}
-static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
+static u32 intel_hpd_enabled_irqs(struct intel_display *display,
const u32 hpd[HPD_NUM_PINS])
{
struct intel_encoder *encoder;
u32 enabled_irqs = 0;
- for_each_intel_encoder(&dev_priv->drm, encoder)
- if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
+ for_each_intel_encoder(display->drm, encoder)
+ if (display->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd[encoder->hpd_pin];
return enabled_irqs;
}
-static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
+static u32 intel_hpd_hotplug_irqs(struct intel_display *display,
const u32 hpd[HPD_NUM_PINS])
{
struct intel_encoder *encoder;
u32 hotplug_irqs = 0;
- for_each_intel_encoder(&dev_priv->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
hotplug_irqs |= hpd[encoder->hpd_pin];
return hotplug_irqs;
}
-static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915,
+static u32 intel_hpd_hotplug_mask(struct intel_display *display,
hotplug_mask_func hotplug_mask)
{
enum hpd_pin pin;
@@ -401,25 +402,25 @@ static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915,
return hotplug;
}
-static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
+static u32 intel_hpd_hotplug_enables(struct intel_display *display,
hotplug_enables_func hotplug_enables)
{
struct intel_encoder *encoder;
u32 hotplug = 0;
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
hotplug |= hotplug_enables(encoder);
return hotplug;
}
-u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
+u32 i9xx_hpd_irq_ack(struct intel_display *display)
{
u32 hotplug_status = 0, hotplug_status_mask;
int i;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview)
hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
else
@@ -435,53 +436,51 @@ u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
* bits can itself generate a new hotplug interrupt :(
*/
for (i = 0; i < 10; i++) {
- u32 tmp = intel_uncore_read(&dev_priv->uncore,
- PORT_HOTPLUG_STAT(dev_priv)) & hotplug_status_mask;
+ u32 tmp = intel_de_read(display,
+ PORT_HOTPLUG_STAT(display)) & hotplug_status_mask;
if (tmp == 0)
return hotplug_status;
hotplug_status |= tmp;
- intel_uncore_write(&dev_priv->uncore,
- PORT_HOTPLUG_STAT(dev_priv),
- hotplug_status);
+ intel_de_write(display, PORT_HOTPLUG_STAT(display),
+ hotplug_status);
}
- drm_WARN_ONCE(&dev_priv->drm, 1,
+ drm_WARN_ONCE(display->drm, 1,
"PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
- intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT(dev_priv)));
+ intel_de_read(display, PORT_HOTPLUG_STAT(display)));
return hotplug_status;
}
-void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status)
+void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status)
{
- struct intel_display *display = &dev_priv->display;
u32 pin_mask = 0, long_mask = 0;
u32 hotplug_trigger;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview)
hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
else
hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
if (hotplug_trigger) {
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, hotplug_trigger,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
- if ((IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview) &&
hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
intel_dp_aux_irq_handler(display);
}
-void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
@@ -491,7 +490,7 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
* zero. Not acking leads to "The master control interrupt lied (SDE)!"
* errors.
*/
- dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+ dig_hotplug_reg = intel_de_read(display, PCH_PORT_HOTPLUG);
if (!hotplug_trigger) {
u32 mask = PORTA_HOTPLUG_STATUS_MASK |
PORTD_HOTPLUG_STATUS_MASK |
@@ -500,63 +499,61 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
dig_hotplug_reg &= ~mask;
}
- intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ intel_de_write(display, PCH_PORT_HOTPLUG, dig_hotplug_reg);
if (!hotplug_trigger)
return;
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
pch_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
+void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir)
{
- struct intel_display *display = &i915->display;
enum hpd_pin pin;
u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK);
u32 trigger_aux = iir & XELPDP_AUX_TC_MASK;
u32 pin_mask = 0, long_mask = 0;
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
trigger_aux |= iir & XE2LPD_AUX_DDI_MASK;
for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) {
u32 val;
- if (!(i915->display.hotplug.hpd[pin] & hotplug_trigger))
+ if (!(display->hotplug.hpd[pin] & hotplug_trigger))
continue;
pin_mask |= BIT(pin);
- val = intel_de_read(i915, XELPDP_PORT_HOTPLUG_CTL(pin));
- intel_de_write(i915, XELPDP_PORT_HOTPLUG_CTL(pin), val);
+ val = intel_de_read(display, XELPDP_PORT_HOTPLUG_CTL(pin));
+ intel_de_write(display, XELPDP_PORT_HOTPLUG_CTL(pin), val);
if (val & (XELPDP_DP_ALT_HPD_LONG_DETECT | XELPDP_TBT_HPD_LONG_DETECT))
long_mask |= BIT(pin);
}
if (pin_mask) {
- drm_dbg(&i915->drm,
- "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n",
- hotplug_trigger, pin_mask, long_mask);
+ drm_dbg_kms(display->drm,
+ "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, pin_mask, long_mask);
- intel_hpd_irq_handler(i915, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
if (trigger_aux)
intel_dp_aux_irq_handler(display);
if (!pin_mask && !trigger_aux)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Unexpected DE HPD/AUX interrupt 0x%08x\n", iir);
}
-void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+void icp_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
u32 pin_mask = 0, long_mask = 0;
@@ -565,37 +562,36 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
u32 dig_hotplug_reg;
/* Locking due to DSI native GPIO sequences */
- spin_lock(&dev_priv->irq_lock);
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
- spin_unlock(&dev_priv->irq_lock);
+ spin_lock(&display->irq.lock);
+ dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_DDI, 0, 0);
+ spin_unlock(&display->irq.lock);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
icp_ddi_port_hotplug_long_detect);
}
if (tc_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_TC, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
icp_tc_port_hotplug_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_ICP)
intel_gmbus_irq_handler(display);
}
-void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+void spt_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
~SDE_PORTE_HOTPLUG_SPT;
u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -604,61 +600,61 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
spt_port_hotplug_long_detect);
}
if (hotplug2_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG2, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug2_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
spt_port_hotplug2_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_CPT)
intel_gmbus_irq_handler(display);
}
-void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
ilk_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
bxt_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+void gen11_hpd_irq_handler(struct intel_display *display, u32 iir)
{
u32 pin_mask = 0, long_mask = 0;
u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
@@ -667,29 +663,29 @@ void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tc) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
gen11_port_hotplug_long_detect);
}
if (trigger_tbt) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
gen11_port_hotplug_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
else
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unexpected DE HPD interrupt 0x%08x\n", iir);
}
@@ -711,7 +707,7 @@ static u32 ibx_hotplug_mask(enum hpd_pin hpd_pin)
static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
switch (encoder->hpd_pin) {
case HPD_PORT_A:
@@ -719,7 +715,7 @@ static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
* When CPU and PCH are on the same package, port A
* HPD must be enabled in both north and south.
*/
- return HAS_PCH_LPT_LP(i915) ?
+ return HAS_PCH_LPT_LP(display) ?
PORTA_HOTPLUG_ENABLE : 0;
case HPD_PORT_B:
return PORTB_HOTPLUG_ENABLE |
@@ -735,37 +731,37 @@ static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void ibx_hpd_detection_setup(struct intel_display *display)
{
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec).
* The pulse duration bits are reserved on LPT+.
*/
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, ibx_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, ibx_hotplug_mask),
+ intel_hpd_hotplug_enables(display, ibx_hotplug_enables));
}
static void ibx_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- ibx_hotplug_mask(encoder->hpd_pin),
- ibx_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ ibx_hotplug_mask(encoder->hpd_pin),
+ ibx_hotplug_enables(encoder));
}
-static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void ibx_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- ibx_hpd_detection_setup(dev_priv);
+ ibx_hpd_detection_setup(display);
}
static u32 icp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
@@ -806,36 +802,36 @@ static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
return icp_tc_hotplug_mask(encoder->hpd_pin);
}
-static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_ddi_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
- intel_hpd_hotplug_mask(dev_priv, icp_ddi_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ intel_hpd_hotplug_mask(display, icp_ddi_hotplug_mask),
+ intel_hpd_hotplug_enables(display, icp_ddi_hotplug_enables));
}
static void icp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_DDI,
- icp_ddi_hotplug_mask(encoder->hpd_pin),
- icp_ddi_hotplug_enables(encoder));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ icp_ddi_hotplug_mask(encoder->hpd_pin),
+ icp_ddi_hotplug_enables(encoder));
}
-static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
- intel_hpd_hotplug_mask(dev_priv, icp_tc_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ intel_hpd_hotplug_mask(display, icp_tc_hotplug_mask),
+ intel_hpd_hotplug_enables(display, icp_tc_hotplug_enables));
}
static void icp_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_TC,
- icp_tc_hotplug_mask(encoder->hpd_pin),
- icp_tc_hotplug_enables(encoder));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ icp_tc_hotplug_mask(encoder->hpd_pin),
+ icp_tc_hotplug_enables(encoder));
}
static void icp_hpd_enable_detection(struct intel_encoder *encoder)
@@ -844,23 +840,23 @@ static void icp_hpd_enable_detection(struct intel_encoder *encoder)
icp_tc_hpd_enable_detection(encoder);
}
-static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
/*
* We reduce the value to 250us to be able to detect SHPD when an external display
* is connected. This is also expected of us as stated in DP1.4a Table 3-4.
*/
- intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- icp_ddi_hpd_detection_setup(dev_priv);
- icp_tc_hpd_detection_setup(dev_priv);
+ icp_ddi_hpd_detection_setup(display);
+ icp_tc_hpd_detection_setup(display);
}
static u32 gen11_hotplug_mask(enum hpd_pin hpd_pin)
@@ -883,88 +879,88 @@ static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
return gen11_hotplug_mask(encoder->hpd_pin);
}
-static void dg1_hpd_invert(struct drm_i915_private *i915)
+static void dg1_hpd_invert(struct intel_display *display)
{
u32 val = (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
INVERT_DDIC_HPD |
INVERT_DDID_HPD);
- intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
+ intel_de_rmw(display, SOUTH_CHICKEN1, 0, val);
}
static void dg1_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- dg1_hpd_invert(i915);
+ dg1_hpd_invert(display);
icp_hpd_enable_detection(encoder);
}
-static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void dg1_hpd_irq_setup(struct intel_display *display)
{
- dg1_hpd_invert(dev_priv);
- icp_hpd_irq_setup(dev_priv);
+ dg1_hpd_invert(display);
+ icp_hpd_irq_setup(display);
}
-static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void gen11_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
- intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
+ intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL,
+ intel_hpd_hotplug_mask(display, gen11_hotplug_mask),
+ intel_hpd_hotplug_enables(display, gen11_hotplug_enables));
}
static void gen11_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, GEN11_TC_HOTPLUG_CTL,
- gen11_hotplug_mask(encoder->hpd_pin),
- gen11_hotplug_enables(encoder));
+ intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL,
+ gen11_hotplug_mask(encoder->hpd_pin),
+ gen11_hotplug_enables(encoder));
}
-static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void gen11_tbt_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
- intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
+ intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL,
+ intel_hpd_hotplug_mask(display, gen11_hotplug_mask),
+ intel_hpd_hotplug_enables(display, gen11_hotplug_enables));
}
static void gen11_tbt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, GEN11_TBT_HOTPLUG_CTL,
- gen11_hotplug_mask(encoder->hpd_pin),
- gen11_hotplug_enables(encoder));
+ intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL,
+ gen11_hotplug_mask(encoder->hpd_pin),
+ gen11_hotplug_enables(encoder));
}
static void gen11_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
gen11_tc_hpd_enable_detection(encoder);
gen11_tbt_hpd_enable_detection(encoder);
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
icp_hpd_enable_detection(encoder);
}
-static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void gen11_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
- ~enabled_irqs & hotplug_irqs);
- intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
+ intel_de_rmw(display, GEN11_DE_HPD_IMR, hotplug_irqs,
+ ~enabled_irqs & hotplug_irqs);
+ intel_de_posting_read(display, GEN11_DE_HPD_IMR);
- gen11_tc_hpd_detection_setup(dev_priv);
- gen11_tbt_hpd_detection_setup(dev_priv);
+ gen11_tc_hpd_detection_setup(display);
+ gen11_tbt_hpd_detection_setup(display);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_hpd_irq_setup(dev_priv);
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_hpd_irq_setup(display);
}
static u32 mtp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1001,39 +997,39 @@ static u32 mtp_tc_hotplug_enables(struct intel_encoder *encoder)
return mtp_tc_hotplug_mask(encoder->hpd_pin);
}
-static void mtp_ddi_hpd_detection_setup(struct drm_i915_private *i915)
+static void mtp_ddi_hpd_detection_setup(struct intel_display *display)
{
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
- intel_hpd_hotplug_mask(i915, mtp_ddi_hotplug_mask),
- intel_hpd_hotplug_enables(i915, mtp_ddi_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ intel_hpd_hotplug_mask(display, mtp_ddi_hotplug_mask),
+ intel_hpd_hotplug_enables(display, mtp_ddi_hotplug_enables));
}
static void mtp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
mtp_ddi_hotplug_mask(encoder->hpd_pin),
mtp_ddi_hotplug_enables(encoder));
}
-static void mtp_tc_hpd_detection_setup(struct drm_i915_private *i915)
+static void mtp_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_de_rmw(i915, SHOTPLUG_CTL_TC,
- intel_hpd_hotplug_mask(i915, mtp_tc_hotplug_mask),
- intel_hpd_hotplug_enables(i915, mtp_tc_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ intel_hpd_hotplug_mask(display, mtp_tc_hotplug_mask),
+ intel_hpd_hotplug_enables(display, mtp_tc_hotplug_enables));
}
static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
mtp_tc_hotplug_mask(encoder->hpd_pin),
mtp_tc_hotplug_enables(encoder));
}
-static void mtp_hpd_invert(struct drm_i915_private *i915)
+static void mtp_hpd_invert(struct intel_display *display)
{
u32 val = (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
@@ -1044,49 +1040,49 @@ static void mtp_hpd_invert(struct drm_i915_private *i915)
INVERT_TC4_HPD |
INVERT_DDID_HPD_MTP |
INVERT_DDIE_HPD);
- intel_de_rmw(i915, SOUTH_CHICKEN1, 0, val);
+ intel_de_rmw(display, SOUTH_CHICKEN1, 0, val);
}
static void mtp_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- mtp_hpd_invert(i915);
+ mtp_hpd_invert(display);
mtp_ddi_hpd_enable_detection(encoder);
mtp_tc_hpd_enable_detection(encoder);
}
-static void mtp_hpd_irq_setup(struct drm_i915_private *i915)
+static void mtp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
/*
* Use 250us here to align with the DP1.4a(Table 3-4) spec as to what the
* SHPD_FILTER_CNT value should be.
*/
- intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
- mtp_hpd_invert(i915);
- ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
+ mtp_hpd_invert(display);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- mtp_ddi_hpd_detection_setup(i915);
- mtp_tc_hpd_detection_setup(i915);
+ mtp_ddi_hpd_detection_setup(display);
+ mtp_tc_hpd_detection_setup(display);
}
-static void xe2lpd_sde_hpd_irq_setup(struct drm_i915_private *i915)
+static void xe2lpd_sde_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- mtp_ddi_hpd_detection_setup(i915);
- mtp_tc_hpd_detection_setup(i915);
+ mtp_ddi_hpd_detection_setup(display);
+ mtp_tc_hpd_detection_setup(display);
}
static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin)
@@ -1094,7 +1090,7 @@ static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin)
return hpd_pin >= HPD_PORT_TC1 && hpd_pin <= HPD_PORT_TC4;
}
-static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915,
+static void _xelpdp_pica_hpd_detection_setup(struct intel_display *display,
enum hpd_pin hpd_pin, bool enable)
{
u32 mask = XELPDP_TBT_HOTPLUG_ENABLE |
@@ -1103,18 +1099,18 @@ static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915,
if (!is_xelpdp_pica_hpd_pin(hpd_pin))
return;
- intel_de_rmw(i915, XELPDP_PORT_HOTPLUG_CTL(hpd_pin),
+ intel_de_rmw(display, XELPDP_PORT_HOTPLUG_CTL(hpd_pin),
mask, enable ? mask : 0);
}
static void xelpdp_pica_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- _xelpdp_pica_hpd_detection_setup(i915, encoder->hpd_pin, true);
+ _xelpdp_pica_hpd_detection_setup(display, encoder->hpd_pin, true);
}
-static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915)
+static void xelpdp_pica_hpd_detection_setup(struct intel_display *display)
{
struct intel_encoder *encoder;
u32 available_pins = 0;
@@ -1122,11 +1118,11 @@ static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915)
BUILD_BUG_ON(BITS_PER_TYPE(available_pins) < HPD_NUM_PINS);
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
available_pins |= BIT(encoder->hpd_pin);
for_each_hpd_pin(pin)
- _xelpdp_pica_hpd_detection_setup(i915, pin, available_pins & BIT(pin));
+ _xelpdp_pica_hpd_detection_setup(display, pin, available_pins & BIT(pin));
}
static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder)
@@ -1135,23 +1131,23 @@ static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder)
mtp_hpd_enable_detection(encoder);
}
-static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915)
+static void xelpdp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- intel_de_rmw(i915, PICAINTERRUPT_IMR, hotplug_irqs,
+ intel_de_rmw(display, PICAINTERRUPT_IMR, hotplug_irqs,
~enabled_irqs & hotplug_irqs);
- intel_uncore_posting_read(&i915->uncore, PICAINTERRUPT_IMR);
+ intel_de_posting_read(display, PICAINTERRUPT_IMR);
- xelpdp_pica_hpd_detection_setup(i915);
+ xelpdp_pica_hpd_detection_setup(display);
- if (INTEL_PCH_TYPE(i915) >= PCH_LNL)
- xe2lpd_sde_hpd_irq_setup(i915);
- else if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
- mtp_hpd_irq_setup(i915);
+ if (INTEL_PCH_TYPE(display) >= PCH_LNL)
+ xe2lpd_sde_hpd_irq_setup(display);
+ else if (INTEL_PCH_TYPE(display) >= PCH_MTL)
+ mtp_hpd_irq_setup(display);
}
static u32 spt_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1190,57 +1186,57 @@ static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
return spt_hotplug2_mask(encoder->hpd_pin);
}
-static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_detection_setup(struct intel_display *display)
{
/* Display WA #1179 WaHardHangonHotPlug: cnp */
- if (HAS_PCH_CNP(dev_priv)) {
- intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
- CHASSIS_CLK_REQ_DURATION(0xf));
+ if (HAS_PCH_CNP(display)) {
+ intel_de_rmw(display, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
+ CHASSIS_CLK_REQ_DURATION(0xf));
}
/* Enable digital hotplug on the PCH */
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, spt_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, spt_hotplug_mask),
+ intel_hpd_hotplug_enables(display, spt_hotplug_enables));
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2,
- intel_hpd_hotplug_mask(dev_priv, spt_hotplug2_mask),
- intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG2,
+ intel_hpd_hotplug_mask(display, spt_hotplug2_mask),
+ intel_hpd_hotplug_enables(display, spt_hotplug2_enables));
}
static void spt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
/* Display WA #1179 WaHardHangonHotPlug: cnp */
- if (HAS_PCH_CNP(i915)) {
- intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1,
- CHASSIS_CLK_REQ_DURATION_MASK,
- CHASSIS_CLK_REQ_DURATION(0xf));
+ if (HAS_PCH_CNP(display)) {
+ intel_de_rmw(display, SOUTH_CHICKEN1,
+ CHASSIS_CLK_REQ_DURATION_MASK,
+ CHASSIS_CLK_REQ_DURATION(0xf));
}
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- spt_hotplug_mask(encoder->hpd_pin),
- spt_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ spt_hotplug_mask(encoder->hpd_pin),
+ spt_hotplug_enables(encoder));
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG2,
- spt_hotplug2_mask(encoder->hpd_pin),
- spt_hotplug2_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG2,
+ spt_hotplug2_mask(encoder->hpd_pin),
+ spt_hotplug2_enables(encoder));
}
-static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP)
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- spt_hpd_detection_setup(dev_priv);
+ spt_hpd_detection_setup(display);
}
static u32 ilk_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1265,44 +1261,44 @@ static u32 ilk_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void ilk_hpd_detection_setup(struct intel_display *display)
{
/*
* Enable digital hotplug on the CPU, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
* The pulse duration bits are reserved on HSW+.
*/
- intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
- intel_hpd_hotplug_mask(dev_priv, ilk_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
+ intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL,
+ intel_hpd_hotplug_mask(display, ilk_hotplug_mask),
+ intel_hpd_hotplug_enables(display, ilk_hotplug_enables));
}
static void ilk_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
- ilk_hotplug_mask(encoder->hpd_pin),
- ilk_hotplug_enables(encoder));
+ intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL,
+ ilk_hotplug_mask(encoder->hpd_pin),
+ ilk_hotplug_enables(encoder));
ibx_hpd_enable_detection(encoder);
}
-static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void ilk_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- if (DISPLAY_VER(dev_priv) >= 8)
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ if (DISPLAY_VER(display) >= 8)
+ bdw_update_port_irq(display, hotplug_irqs, enabled_irqs);
else
- ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ ilk_update_display_irq(display, hotplug_irqs, enabled_irqs);
- ilk_hpd_detection_setup(dev_priv);
+ ilk_hpd_detection_setup(display);
- ibx_hpd_irq_setup(dev_priv);
+ ibx_hpd_irq_setup(display);
}
static u32 bxt_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1344,80 +1340,80 @@ static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void bxt_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, bxt_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, bxt_hotplug_mask),
+ intel_hpd_hotplug_enables(display, bxt_hotplug_enables));
}
static void bxt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- bxt_hotplug_mask(encoder->hpd_pin),
- bxt_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ bxt_hotplug_mask(encoder->hpd_pin),
+ bxt_hotplug_enables(encoder));
}
-static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void bxt_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ bdw_update_port_irq(display, hotplug_irqs, enabled_irqs);
- bxt_hpd_detection_setup(dev_priv);
+ bxt_hpd_detection_setup(display);
}
-static void g45_hpd_peg_band_gap_wa(struct drm_i915_private *i915)
+static void g45_hpd_peg_band_gap_wa(struct intel_display *display)
{
/*
* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- intel_de_rmw(i915, PEG_BAND_GAP_DATA, 0xf, 0xd);
+ intel_de_rmw(display, PEG_BAND_GAP_DATA, 0xf, 0xd);
}
static void i915_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin];
- if (IS_G45(i915))
- g45_hpd_peg_band_gap_wa(i915);
+ if (display->platform.g45)
+ g45_hpd_peg_band_gap_wa(display);
/* HPD sense and interrupt enable are one and the same */
- i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en);
+ i915_hotplug_interrupt_update(display, hotplug_en, hotplug_en);
}
-static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void i915_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_en;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
/*
* Note HDMI and DP share hotplug bits. Enable bits are the same for all
* generations.
*/
- hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
+ hotplug_en = intel_hpd_enabled_irqs(display, hpd_mask_i915);
/*
* Programming the CRT detection parameters tends to generate a spurious
* hotplug event about three seconds later. So just do it once.
*/
- if (IS_G4X(dev_priv))
+ if (display->platform.g4x)
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
- if (IS_G45(dev_priv))
- g45_hpd_peg_band_gap_wa(dev_priv);
+ if (display->platform.g45)
+ g45_hpd_peg_band_gap_wa(display);
/* Ignore TV since it's buggy */
- i915_hotplug_interrupt_update_locked(dev_priv,
+ i915_hotplug_interrupt_update_locked(display,
HOTPLUG_INT_EN_MASK |
CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
CRT_HOTPLUG_ACTIVATION_PERIOD_64,
@@ -1426,7 +1422,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
struct intel_hotplug_funcs {
/* Enable HPD sense and interrupts for all present encoders */
- void (*hpd_irq_setup)(struct drm_i915_private *i915);
+ void (*hpd_irq_setup)(struct intel_display *display);
/* Enable HPD sense for a single encoder */
void (*hpd_enable_detection)(struct intel_encoder *encoder);
};
@@ -1449,47 +1445,47 @@ HPD_FUNCS(ilk);
void intel_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (i915->display.funcs.hotplug)
- i915->display.funcs.hotplug->hpd_enable_detection(encoder);
+ if (display->funcs.hotplug)
+ display->funcs.hotplug->hpd_enable_detection(encoder);
}
-void intel_hpd_irq_setup(struct drm_i915_private *i915)
+void intel_hpd_irq_setup(struct intel_display *display)
{
- if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
- !i915->display.irq.vlv_display_irqs_enabled)
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ !display->irq.vlv_display_irqs_enabled)
return;
- if (i915->display.funcs.hotplug)
- i915->display.funcs.hotplug->hpd_irq_setup(i915);
+ if (display->funcs.hotplug)
+ display->funcs.hotplug->hpd_irq_setup(display);
}
-void intel_hotplug_irq_init(struct drm_i915_private *i915)
+void intel_hotplug_irq_init(struct intel_display *display)
{
- intel_hpd_init_pins(i915);
+ intel_hpd_init_pins(display);
- intel_hpd_init_early(i915);
+ intel_hpd_init_early(display);
- if (HAS_GMCH(i915)) {
- if (I915_HAS_HOTPLUG(i915))
- i915->display.funcs.hotplug = &i915_hpd_funcs;
+ if (HAS_GMCH(display)) {
+ if (HAS_HOTPLUG(display))
+ display->funcs.hotplug = &i915_hpd_funcs;
} else {
- if (HAS_PCH_DG2(i915))
- i915->display.funcs.hotplug = &icp_hpd_funcs;
- else if (HAS_PCH_DG1(i915))
- i915->display.funcs.hotplug = &dg1_hpd_funcs;
- else if (DISPLAY_VER(i915) >= 14)
- i915->display.funcs.hotplug = &xelpdp_hpd_funcs;
- else if (DISPLAY_VER(i915) >= 11)
- i915->display.funcs.hotplug = &gen11_hpd_funcs;
- else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- i915->display.funcs.hotplug = &bxt_hpd_funcs;
- else if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
- i915->display.funcs.hotplug = &icp_hpd_funcs;
- else if (INTEL_PCH_TYPE(i915) >= PCH_SPT)
- i915->display.funcs.hotplug = &spt_hpd_funcs;
+ if (HAS_PCH_DG2(display))
+ display->funcs.hotplug = &icp_hpd_funcs;
+ else if (HAS_PCH_DG1(display))
+ display->funcs.hotplug = &dg1_hpd_funcs;
+ else if (DISPLAY_VER(display) >= 14)
+ display->funcs.hotplug = &xelpdp_hpd_funcs;
+ else if (DISPLAY_VER(display) >= 11)
+ display->funcs.hotplug = &gen11_hpd_funcs;
+ else if (display->platform.geminilake || display->platform.broxton)
+ display->funcs.hotplug = &bxt_hpd_funcs;
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ display->funcs.hotplug = &icp_hpd_funcs;
+ else if (INTEL_PCH_TYPE(display) >= PCH_SPT)
+ display->funcs.hotplug = &spt_hpd_funcs;
else
- i915->display.funcs.hotplug = &ilk_hpd_funcs;
+ display->funcs.hotplug = &ilk_hpd_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
index e4db752df096..9063bb02a2e9 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
@@ -8,28 +8,28 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
struct intel_encoder;
-u32 i9xx_hpd_irq_ack(struct drm_i915_private *i915);
+u32 i9xx_hpd_irq_ack(struct intel_display *display);
-void i9xx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_status);
-void ibx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void ilk_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void gen11_hpd_irq_handler(struct drm_i915_private *i915, u32 iir);
-void bxt_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir);
-void icp_irq_handler(struct drm_i915_private *i915, u32 pch_iir);
-void spt_irq_handler(struct drm_i915_private *i915, u32 pch_iir);
+void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status);
+void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void gen11_hpd_irq_handler(struct intel_display *display, u32 iir);
+void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir);
+void icp_irq_handler(struct intel_display *display, u32 pch_iir);
+void spt_irq_handler(struct intel_display *display, u32 pch_iir);
-void i915_hotplug_interrupt_update_locked(struct drm_i915_private *i915,
+void i915_hotplug_interrupt_update_locked(struct intel_display *display,
u32 mask, u32 bits);
-void i915_hotplug_interrupt_update(struct drm_i915_private *i915,
+void i915_hotplug_interrupt_update(struct intel_display *display,
u32 mask, u32 bits);
void intel_hpd_enable_detection(struct intel_encoder *encoder);
-void intel_hpd_irq_setup(struct drm_i915_private *i915);
+void intel_hpd_irq_setup(struct intel_display *display);
-void intel_hotplug_irq_init(struct drm_i915_private *i915);
+void intel_hotplug_irq_init(struct intel_display *display);
#endif /* __INTEL_HOTPLUG_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c
index fb6b84f6a81d..dc454420c134 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.c
+++ b/drivers/gpu/drm/i915/display/intel_hti.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include "intel_de.h"
#include "intel_display.h"
diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c
index 86cc03a4413c..aad52d0d83e1 100644
--- a/drivers/gpu/drm/i915/display/intel_load_detect.c
+++ b/drivers/gpu/drm/i915/display/intel_load_detect.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_print.h>
#include "intel_atomic.h"
#include "intel_crtc.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 59551c8414c2..666148a14522 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -179,7 +179,7 @@ static int lpe_audio_irq_init(struct intel_display *display)
handle_simple_irq,
"hdmi_lpe_audio_irq_handler");
- return irq_set_chip_data(irq, dev_priv);
+ return 0;
}
static bool lpe_audio_detect(struct intel_display *display)
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 63c1afa30b05..f94b7eeae20f 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -27,6 +27,7 @@
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include "i915_reg.h"
#include "i915_utils.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 19f52d1659fa..8ce7c630da52 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -37,9 +37,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
@@ -84,15 +84,15 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder)
return container_of(encoder, struct intel_lvds_encoder, base);
}
-bool intel_lvds_port_enabled(struct drm_i915_private *i915,
+bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe)
{
u32 val;
- val = intel_de_read(i915, lvds_reg);
+ val = intel_de_read(display, lvds_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(i915))
+ if (HAS_PCH_CPT(display))
*pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val);
else
*pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val);
@@ -104,7 +104,6 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_wakeref_t wakeref;
bool ret;
@@ -113,7 +112,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
if (!wakeref)
return false;
- ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe);
+ ret = intel_lvds_port_enabled(display, lvds_encoder->reg, pipe);
intel_display_power_put(display, encoder->power_domain, wakeref);
@@ -123,13 +122,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
u32 tmp, flags = 0;
crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS);
- tmp = intel_de_read(dev_priv, lvds_encoder->reg);
+ tmp = intel_de_read(display, lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
else
@@ -141,13 +140,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
crtc_state->hw.adjusted_mode.flags |= flags;
- if (DISPLAY_VER(dev_priv) < 5)
+ if (DISPLAY_VER(display) < 5)
crtc_state->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
- if (DISPLAY_VER(dev_priv) < 4) {
- tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv));
+ if (DISPLAY_VER(display) < 4) {
+ tmp = intel_de_read(display, PFIT_CONTROL(display));
crtc_state->gmch_pfit.control |= tmp & PFIT_PANEL_8TO6_DITHER_ENABLE;
}
@@ -155,24 +154,24 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock;
}
-static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
+static void intel_lvds_pps_get_hw_state(struct intel_display *display,
struct intel_lvds_pps *pps)
{
u32 val;
- pps->powerdown_on_reset = intel_de_read(dev_priv,
- PP_CONTROL(dev_priv, 0)) & PANEL_POWER_RESET;
+ pps->powerdown_on_reset = intel_de_read(display,
+ PP_CONTROL(display, 0)) & PANEL_POWER_RESET;
- val = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0));
+ val = intel_de_read(display, PP_ON_DELAYS(display, 0));
pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
pps->delays.power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
pps->delays.backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
- val = intel_de_read(dev_priv, PP_OFF_DELAYS(dev_priv, 0));
+ val = intel_de_read(display, PP_OFF_DELAYS(display, 0));
pps->delays.power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
pps->delays.backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
- val = intel_de_read(dev_priv, PP_DIVISOR(dev_priv, 0));
+ val = intel_de_read(display, PP_DIVISOR(display, 0));
pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
/*
@@ -185,12 +184,12 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
/* Convert from 100ms to 100us units */
pps->delays.power_cycle = val * 1000;
- if (DISPLAY_VER(dev_priv) < 5 &&
+ if (DISPLAY_VER(display) < 5 &&
pps->delays.power_up == 0 &&
pps->delays.backlight_on == 0 &&
pps->delays.power_down == 0 &&
pps->delays.backlight_off == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Panel power timings uninitialized, "
"setting defaults\n");
/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
@@ -201,7 +200,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->delays.backlight_off = 200 * 10;
}
- drm_dbg(&dev_priv->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d "
+ drm_dbg(display->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d "
"divider %d port %d powerdown_on_reset %d\n",
pps->delays.power_up, pps->delays.power_down,
pps->delays.power_cycle, pps->delays.backlight_on,
@@ -209,28 +208,28 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->port, pps->powerdown_on_reset);
}
-static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
+static void intel_lvds_pps_init_hw(struct intel_display *display,
struct intel_lvds_pps *pps)
{
u32 val;
- val = intel_de_read(dev_priv, PP_CONTROL(dev_priv, 0));
- drm_WARN_ON(&dev_priv->drm,
+ val = intel_de_read(display, PP_CONTROL(display, 0));
+ drm_WARN_ON(display->drm,
(val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
if (pps->powerdown_on_reset)
val |= PANEL_POWER_RESET;
- intel_de_write(dev_priv, PP_CONTROL(dev_priv, 0), val);
+ intel_de_write(display, PP_CONTROL(display, 0), val);
- intel_de_write(dev_priv, PP_ON_DELAYS(dev_priv, 0),
+ intel_de_write(display, PP_ON_DELAYS(display, 0),
REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->delays.power_up) |
REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->delays.backlight_on));
- intel_de_write(dev_priv, PP_OFF_DELAYS(dev_priv, 0),
+ intel_de_write(display, PP_OFF_DELAYS(display, 0),
REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->delays.power_down) |
REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->delays.backlight_off));
- intel_de_write(dev_priv, PP_DIVISOR(dev_priv, 0),
+ intel_de_write(display, PP_DIVISOR(display, 0),
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
DIV_ROUND_UP(pps->delays.power_cycle, 1000) + 1));
@@ -243,25 +242,24 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(state);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
u32 temp;
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
assert_fdi_rx_pll_disabled(display, pipe);
assert_shared_dpll_disabled(display, crtc_state->shared_dpll);
} else {
assert_pll_disabled(display, pipe);
}
- intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps);
+ intel_lvds_pps_init_hw(display, &lvds_encoder->init_pps);
temp = lvds_encoder->init_lvds_val;
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(i915)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~LVDS_PIPE_SEL_MASK_CPT;
temp |= LVDS_PIPE_SEL_CPT(pipe);
} else {
@@ -296,7 +294,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the TRANSCONF reg.
*/
- if (DISPLAY_VER(i915) == 4) {
+ if (DISPLAY_VER(display) == 4) {
/*
* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels.
@@ -312,7 +310,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
- intel_de_write(i915, lvds_encoder->reg, temp);
+ intel_de_write(display, lvds_encoder->reg, temp);
}
/*
@@ -323,16 +321,16 @@ static void intel_enable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN);
+ intel_de_rmw(display, lvds_encoder->reg, 0, LVDS_PORT_EN);
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, 0), 0, PANEL_POWER_ON);
- intel_de_posting_read(dev_priv, lvds_encoder->reg);
+ intel_de_rmw(display, PP_CONTROL(display, 0), 0, PANEL_POWER_ON);
+ intel_de_posting_read(display, lvds_encoder->reg);
- if (intel_de_wait_for_set(dev_priv, PP_STATUS(dev_priv, 0), PP_ON, 5000))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_set(display, PP_STATUS(display, 0), PP_ON, 5000))
+ drm_err(display->drm,
"timed out waiting for panel to power on\n");
intel_backlight_enable(crtc_state, conn_state);
@@ -343,16 +341,16 @@ static void intel_disable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, 0), PANEL_POWER_ON, 0);
- if (intel_de_wait_for_clear(dev_priv, PP_STATUS(dev_priv, 0), PP_ON, 1000))
- drm_err(&dev_priv->drm,
+ intel_de_rmw(display, PP_CONTROL(display, 0), PANEL_POWER_ON, 0);
+ if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_ON, 1000))
+ drm_err(display->drm,
"timed out waiting for panel to power off\n");
- intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0);
- intel_de_posting_read(dev_priv, lvds_encoder->reg);
+ intel_de_rmw(display, lvds_encoder->reg, LVDS_PORT_EN, 0);
+ intel_de_posting_read(display, lvds_encoder->reg);
}
static void gmch_disable_lvds(struct intel_atomic_state *state,
@@ -384,10 +382,10 @@ static void pch_post_disable_lvds(struct intel_atomic_state *state,
static void intel_lvds_shutdown(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (intel_de_wait_for_clear(dev_priv, PP_STATUS(dev_priv, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
+ drm_err(display->drm,
"timed out waiting for panel power cycle delay\n");
}
@@ -420,7 +418,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct intel_connector *connector = lvds_encoder->attached_connector;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -429,12 +427,12 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
int ret;
/* Should never happen!! */
- if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) {
- drm_err(&i915->drm, "Can't support LVDS on pipe A\n");
+ if (DISPLAY_VER(display) < 4 && crtc->pipe == 0) {
+ drm_err(display->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
crtc_state->has_pch_encoder = true;
if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
@@ -447,7 +445,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
/* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */
if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"forcing display bpp (was %d) to LVDS (%d)\n",
crtc_state->pipe_bpp, lvds_bpp);
crtc_state->pipe_bpp = lvds_bpp;
@@ -775,11 +773,11 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
+struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_LVDS)
return encoder;
}
@@ -787,24 +785,24 @@ struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
return NULL;
}
-bool intel_is_dual_link_lvds(struct drm_i915_private *i915)
+bool intel_is_dual_link_lvds(struct intel_display *display)
{
- struct intel_encoder *encoder = intel_get_lvds_encoder(i915);
+ struct intel_encoder *encoder = intel_get_lvds_encoder(display);
return encoder && to_lvds_encoder(encoder)->is_dual_link;
}
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
- struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev);
+ struct intel_display *display = to_intel_display(&lvds_encoder->base);
struct intel_connector *connector = lvds_encoder->attached_connector;
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
unsigned int val;
/* use the module option value if specified */
- if (i915->display.params.lvds_channel_mode > 0)
- return i915->display.params.lvds_channel_mode == 2;
+ if (display->params.lvds_channel_mode > 0)
+ return display->params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (fixed_mode->clock > 112999)
@@ -819,8 +817,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
- val = intel_de_read(i915, lvds_encoder->reg);
- if (HAS_PCH_CPT(i915))
+ val = intel_de_read(display, lvds_encoder->reg);
+ if (HAS_PCH_CPT(display))
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
else
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
@@ -837,14 +835,13 @@ static void intel_lvds_add_properties(struct drm_connector *connector)
/**
* intel_lvds_init - setup LVDS connectors on this device
- * @i915: i915 device
+ * @display: display device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_i915_private *i915)
+void intel_lvds_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_lvds_encoder *lvds_encoder;
struct intel_connector *connector;
const struct drm_edid *drm_edid;
@@ -855,25 +852,25 @@ void intel_lvds_init(struct drm_i915_private *i915)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support,
+ drm_WARN(display->drm, !display->vbt.int_lvds_support,
"Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
- if (!i915->display.vbt.int_lvds_support) {
- drm_dbg_kms(&i915->drm,
+ if (!display->vbt.int_lvds_support) {
+ drm_dbg_kms(display->drm,
"Internal LVDS support disabled by VBT\n");
return;
}
- if (HAS_PCH_SPLIT(i915))
+ if (HAS_PCH_SPLIT(display))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
- lvds = intel_de_read(i915, lvds_reg);
+ lvds = intel_de_read(display, lvds_reg);
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
}
@@ -881,11 +878,11 @@ void intel_lvds_init(struct drm_i915_private *i915)
ddc_pin = GMBUS_PIN_PANEL;
if (!intel_bios_is_lvds_present(display, &ddc_pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"LVDS is not present in VBT\n");
return;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"LVDS is not present in VBT, but enabled anyway\n");
}
@@ -902,18 +899,18 @@ void intel_lvds_init(struct drm_i915_private *i915)
lvds_encoder->attached_connector = connector;
encoder = &lvds_encoder->base;
- drm_connector_init_with_ddc(&i915->drm, &connector->base,
+ drm_connector_init_with_ddc(display->drm, &connector->base,
&intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS,
intel_gmbus_get_adapter(display, ddc_pin));
- drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS, "LVDS");
encoder->enable = intel_enable_lvds;
encoder->pre_enable = intel_pre_enable_lvds;
encoder->compute_config = intel_lvds_compute_config;
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
encoder->disable = pch_disable_lvds;
encoder->post_disable = pch_post_disable_lvds;
} else {
@@ -931,7 +928,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
encoder->port = PORT_NONE;
encoder->cloneable = 0;
- if (DISPLAY_VER(i915) < 4)
+ if (DISPLAY_VER(display) < 4)
encoder->pipe_mask = BIT(PIPE_B);
else
encoder->pipe_mask = ~0;
@@ -943,7 +940,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
intel_lvds_add_properties(&connector->base);
- intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps);
+ intel_lvds_pps_get_hw_state(display, &lvds_encoder->init_pps);
lvds_encoder->init_lvds_val = lvds;
/*
@@ -958,7 +955,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
drm_edid = drm_edid_read_switcheroo(&connector->base, connector->base.ddc);
else
@@ -991,7 +988,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
if (!intel_panel_preferred_fixed_mode(connector))
intel_panel_add_encoder_fixed_mode(connector, encoder);
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
/* If we still don't have a mode after all that, give up. */
if (!intel_panel_preferred_fixed_mode(connector))
@@ -1002,7 +999,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
intel_backlight_setup(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n",
+ drm_dbg_kms(display->drm, "detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -1010,7 +1007,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
return;
failed:
- drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n");
+ drm_dbg_kms(display->drm, "No LVDS modes found, disabling.\n");
drm_connector_cleanup(&connector->base);
drm_encoder_cleanup(&encoder->base);
kfree(lvds_encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.h b/drivers/gpu/drm/i915/display/intel_lvds.h
index 7ad5fa9c0434..a6db1706a97c 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.h
+++ b/drivers/gpu/drm/i915/display/intel_lvds.h
@@ -11,28 +11,28 @@
#include "i915_reg_defs.h"
enum pipe;
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe);
-void intel_lvds_init(struct drm_i915_private *dev_priv);
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv);
-bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv);
+void intel_lvds_init(struct intel_display *display);
+struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display);
+bool intel_is_dual_link_lvds(struct intel_display *display);
#else
-static inline bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+static inline bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe)
{
return false;
}
-static inline void intel_lvds_init(struct drm_i915_private *dev_priv)
+static inline void intel_lvds_init(struct intel_display *display)
{
}
-static inline struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
+static inline struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display)
{
return NULL;
}
-static inline bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
+static inline bool intel_is_dual_link_lvds(struct intel_display *display)
{
return false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 312b21b1ab59..0325b0c9506d 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -6,11 +6,11 @@
* state.
*/
-#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
@@ -31,13 +31,14 @@
#include "intel_pmdemand.h"
#include "intel_tc.h"
#include "intel_vblank.h"
+#include "intel_vga.h"
#include "intel_wm.h"
#include "skl_watermark.h"
static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
@@ -48,7 +49,7 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
if (!crtc_state->hw.active)
return;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -56,9 +57,9 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
intel_plane_disable_noatomic(crtc, plane);
}
- state = drm_atomic_state_alloc(&i915->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"failed to disable [CRTC:%d:%s], out of memory",
crtc->base.base.id, crtc->base.name);
return;
@@ -68,7 +69,7 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
to_intel_atomic_state(state)->internal = true;
/* Everything's already locked, -EDEADLK can't happen. */
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc,
BIT(pipe) |
intel_crtc_joiner_secondary_pipes(crtc_state)) {
struct intel_crtc_state *temp_crtc_state =
@@ -77,14 +78,14 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
ret = drm_atomic_add_affected_connectors(state, &temp_crtc->base);
- drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret);
+ drm_WARN_ON(display->drm, IS_ERR(temp_crtc_state) || ret);
}
- i915->display.funcs.display->crtc_disable(to_intel_atomic_state(state), crtc);
+ display->funcs.display->crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
crtc->base.base.id, crtc->base.name);
@@ -118,13 +119,12 @@ static void set_encoder_for_connector(struct intel_connector *connector,
static void reset_encoder_connector_state(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->base.encoder != &encoder->base)
continue;
@@ -143,10 +143,10 @@ static void reset_encoder_connector_state(struct intel_encoder *encoder)
static void reset_crtc_encoder_state(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_encoder *encoder;
- for_each_encoder_on_crtc(&i915->drm, &crtc->base, encoder) {
+ for_each_encoder_on_crtc(display->drm, &crtc->base, encoder) {
reset_encoder_connector_state(encoder);
encoder->base.crtc = NULL;
}
@@ -155,9 +155,8 @@ static void reset_crtc_encoder_state(struct intel_crtc *crtc)
static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
@@ -169,7 +168,7 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
reset_crtc_encoder_state(crtc);
intel_fbc_disable(crtc);
- intel_update_watermarks(i915);
+ intel_update_watermarks(display);
intel_display_power_put_all_in_set(display, &crtc->enabled_power_domains);
@@ -184,13 +183,13 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
* Return all the pipes using a transcoder in @transcoder_mask.
* For joiner configs return only the joiner primary.
*/
-static u8 get_transcoder_pipes(struct drm_i915_private *i915,
+static u8 get_transcoder_pipes(struct intel_display *display,
u8 transcoder_mask)
{
struct intel_crtc *temp_crtc;
u8 pipes = 0;
- for_each_intel_crtc(&i915->drm, temp_crtc) {
+ for_each_intel_crtc(display->drm, temp_crtc) {
struct intel_crtc_state *temp_crtc_state =
to_intel_crtc_state(temp_crtc->base.state);
@@ -215,7 +214,6 @@ static void get_portsync_pipes(struct intel_crtc *crtc,
u8 *master_pipe_mask, u8 *slave_pipes_mask)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_crtc *master_crtc;
@@ -234,20 +232,20 @@ static void get_portsync_pipes(struct intel_crtc *crtc,
else
master_transcoder = crtc_state->master_transcoder;
- *master_pipe_mask = get_transcoder_pipes(i915, BIT(master_transcoder));
- drm_WARN_ON(&i915->drm, !is_power_of_2(*master_pipe_mask));
+ *master_pipe_mask = get_transcoder_pipes(display, BIT(master_transcoder));
+ drm_WARN_ON(display->drm, !is_power_of_2(*master_pipe_mask));
master_crtc = intel_crtc_for_pipe(display, ffs(*master_pipe_mask) - 1);
master_crtc_state = to_intel_crtc_state(master_crtc->base.state);
- *slave_pipes_mask = get_transcoder_pipes(i915, master_crtc_state->sync_mode_slaves_mask);
+ *slave_pipes_mask = get_transcoder_pipes(display, master_crtc_state->sync_mode_slaves_mask);
}
-static u8 get_joiner_secondary_pipes(struct drm_i915_private *i915, u8 primary_pipes_mask)
+static u8 get_joiner_secondary_pipes(struct intel_display *display, u8 primary_pipes_mask)
{
struct intel_crtc *primary_crtc;
u8 pipes = 0;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, primary_crtc, primary_pipes_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, primary_crtc, primary_pipes_mask) {
struct intel_crtc_state *primary_crtc_state =
to_intel_crtc_state(primary_crtc->base.state);
@@ -260,45 +258,45 @@ static u8 get_joiner_secondary_pipes(struct drm_i915_private *i915, u8 primary_p
static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_crtc *temp_crtc;
u8 portsync_master_mask;
u8 portsync_slaves_mask;
u8 joiner_secondaries_mask;
- struct intel_crtc *temp_crtc;
/* TODO: Add support for MST */
get_portsync_pipes(crtc, &portsync_master_mask, &portsync_slaves_mask);
- joiner_secondaries_mask = get_joiner_secondary_pipes(i915,
+ joiner_secondaries_mask = get_joiner_secondary_pipes(display,
portsync_master_mask |
portsync_slaves_mask);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
portsync_master_mask & portsync_slaves_mask ||
portsync_master_mask & joiner_secondaries_mask ||
portsync_slaves_mask & joiner_secondaries_mask);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, joiner_secondaries_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc, joiner_secondaries_mask)
intel_crtc_disable_noatomic_begin(temp_crtc, ctx);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_slaves_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc, portsync_slaves_mask)
intel_crtc_disable_noatomic_begin(temp_crtc, ctx);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_master_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc, portsync_master_mask)
intel_crtc_disable_noatomic_begin(temp_crtc, ctx);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc,
joiner_secondaries_mask |
portsync_slaves_mask |
portsync_master_mask)
intel_crtc_disable_noatomic_complete(temp_crtc);
}
-static void intel_modeset_update_connector_atomic_state(struct drm_i915_private *i915)
+static void intel_modeset_update_connector_atomic_state(struct intel_display *display)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state = connector->base.state;
struct intel_encoder *encoder =
@@ -320,7 +318,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private
static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (intel_crtc_is_joiner_secondary(crtc_state))
return;
@@ -333,7 +331,7 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
- if (DISPLAY_INFO(i915)->color.degamma_lut_size) {
+ if (DISPLAY_INFO(display)->color.degamma_lut_size) {
/* assume 1:1 mapping */
drm_property_replace_blob(&crtc_state->hw.degamma_lut,
crtc_state->pre_csc_lut);
@@ -348,7 +346,7 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
* to gamma_lut as that is the only valid source of LUTs
* in the uapi.
*/
- drm_WARN_ON(&i915->drm, crtc_state->post_csc_lut &&
+ drm_WARN_ON(display->drm, crtc_state->post_csc_lut &&
crtc_state->pre_csc_lut);
drm_property_replace_blob(&crtc_state->hw.degamma_lut,
@@ -367,15 +365,14 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
}
static void
-intel_sanitize_plane_mapping(struct drm_i915_private *i915)
+intel_sanitize_plane_mapping(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc;
- if (DISPLAY_VER(i915) >= 4)
+ if (DISPLAY_VER(display) >= 4)
return;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_crtc *plane_crtc;
@@ -387,7 +384,7 @@ intel_sanitize_plane_mapping(struct drm_i915_private *i915)
if (pipe == crtc->pipe)
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
plane->base.base.id, plane->base.name);
@@ -424,12 +421,12 @@ static bool intel_crtc_needs_link_reset(struct intel_crtc *crtc)
static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
struct intel_connector *found_connector = NULL;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (&encoder->base == connector->base.encoder) {
found_connector = connector;
@@ -467,7 +464,7 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state
static bool intel_sanitize_crtc(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
bool needs_link_reset;
@@ -475,7 +472,7 @@ static bool intel_sanitize_crtc(struct intel_crtc *crtc,
struct intel_plane *plane;
/* Disable everything but the primary plane */
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -516,7 +513,7 @@ static bool intel_sanitize_crtc(struct intel_crtc *crtc,
return true;
}
-static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
+static void intel_sanitize_all_crtcs(struct intel_display *display,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_crtc *crtc;
@@ -531,7 +528,7 @@ static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
for (;;) {
u32 old_mask = crtcs_forced_off;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
u32 crtc_mask = drm_crtc_mask(&crtc->base);
if (crtcs_forced_off & crtc_mask)
@@ -544,7 +541,7 @@ static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
break;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -554,7 +551,7 @@ static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/*
* Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
@@ -566,7 +563,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
* without several WARNs, but for now let's take the easy
* road.
*/
- return IS_SANDYBRIDGE(i915) &&
+ return display->platform.sandybridge &&
crtc_state->hw.active &&
crtc_state->shared_dpll &&
crtc_state->port_clock == 0;
@@ -575,13 +572,12 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_connector *connector;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_crtc_state *crtc_state = crtc ?
to_intel_crtc_state(crtc->base.state) : NULL;
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
/*
* We need to check both for a crtc link (meaning that the encoder is
@@ -592,7 +588,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
crtc_state->hw.active;
if (crtc_state && has_bogus_dpll_config(crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"BIOS has misprogrammed the hardware. Disabling pipe %c\n",
pipe_name(crtc->pipe));
has_active_crtc = false;
@@ -600,7 +596,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
connector = intel_encoder_find_connector(encoder);
if (connector && !has_active_crtc) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
encoder->base.name);
@@ -617,7 +613,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
if (crtc_state) {
struct drm_encoder *best_encoder;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
encoder->base.name);
@@ -651,18 +647,17 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
/* notify opregion of the sanitized encoder state */
intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
- if (HAS_DDI(i915))
+ if (HAS_DDI(display))
intel_ddi_sanitize_encoder_pll_mapping(encoder);
}
/* FIXME read out full plane state for all planes */
-static void readout_plane_state(struct drm_i915_private *i915)
+static void readout_plane_state(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
struct intel_crtc_state *crtc_state;
@@ -676,13 +671,13 @@ static void readout_plane_state(struct drm_i915_private *i915)
intel_set_plane_visible(crtc_state, plane_state, visible);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
plane->base.base.id, plane->base.name,
str_enabled_disabled(visible), pipe_name(pipe));
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -690,18 +685,17 @@ static void readout_plane_state(struct drm_i915_private *i915)
}
}
-static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
+static void intel_modeset_readout_hw_state(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -716,15 +710,15 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
crtc->base.enabled = crtc_state->hw.enable;
crtc->active = crtc_state->hw.active;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
str_enabled_disabled(crtc_state->hw.active));
}
- readout_plane_state(i915);
+ readout_plane_state(display);
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_crtc_state *crtc_state = NULL;
pipe = 0;
@@ -743,7 +737,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
/* encoder should read be linked to joiner primary */
WARN_ON(intel_crtc_is_joiner_secondary(crtc_state));
- for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc,
intel_crtc_joiner_secondary_pipes(crtc_state)) {
struct intel_crtc_state *secondary_crtc_state;
@@ -766,7 +760,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
if (encoder->sync_state)
encoder->sync_state(encoder, crtc_state);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id, encoder->base.name,
str_enabled_disabled(encoder->base.crtc),
@@ -775,7 +769,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
intel_dpll_readout_hw_state(display);
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct intel_crtc_state *crtc_state = NULL;
@@ -809,37 +803,37 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
if (connector->sync_state)
connector->sync_state(connector, crtc_state);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id, connector->base.name,
str_enabled_disabled(connector->base.encoder));
}
drm_connector_list_iter_end(&conn_iter);
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
- if (crtc_state->hw.active) {
- /*
- * The initial mode needs to be set in order to keep
- * the atomic core happy. It wants a valid mode if the
- * crtc's enabled, so we do the above call.
- *
- * But we don't set all the derived state fully, hence
- * set a flag to indicate that a full recalculation is
- * needed on the next commit.
- */
- crtc_state->inherited = true;
+ /*
+ * The initial mode needs to be set in order to keep
+ * the atomic core happy. It wants a valid mode if the
+ * crtc's enabled, so we do the above call.
+ *
+ * But we don't set all the derived state fully, hence
+ * set a flag to indicate that a full recalculation is
+ * needed on the next commit.
+ */
+ crtc_state->inherited = true;
+ if (crtc_state->hw.active) {
intel_crtc_update_active_timings(crtc_state,
crtc_state->vrr.enable);
intel_crtc_copy_hw_to_uapi_state(crtc_state);
}
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -855,14 +849,14 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
* use plane->min_cdclk() :(
*/
if (plane_state->uapi.visible && plane->min_cdclk) {
- if (crtc_state->double_wide || DISPLAY_VER(i915) >= 10)
+ if (crtc_state->double_wide || DISPLAY_VER(display) >= 10)
crtc_state->min_cdclk[plane->id] =
DIV_ROUND_UP(crtc_state->pixel_rate, 2);
else
crtc_state->min_cdclk[plane->id] =
crtc_state->pixel_rate;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min_cdclk %d kHz\n",
plane->base.base.id, plane->base.name,
crtc_state->min_cdclk[plane->id]);
@@ -874,7 +868,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
/* TODO move here (or even earlier?) on all platforms */
if (DISPLAY_VER(display) >= 9)
- intel_wm_get_hw_state(i915);
+ intel_wm_get_hw_state(display);
intel_bw_update_hw_state(display);
intel_cdclk_update_hw_state(display);
@@ -883,11 +877,11 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
}
static void
-get_encoder_power_domains(struct drm_i915_private *i915)
+get_encoder_power_domains(struct intel_display *display)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_crtc_state *crtc_state;
if (!encoder->get_power_domains)
@@ -905,49 +899,51 @@ get_encoder_power_domains(struct drm_i915_private *i915)
}
}
-static void intel_early_display_was(struct drm_i915_private *i915)
+static void intel_early_display_was(struct intel_display *display)
{
/*
* Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
* Also known as Wa_14010480278.
*/
- if (IS_DISPLAY_VER(i915, 10, 12))
- intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
+ if (IS_DISPLAY_VER(display, 10, 12))
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
/*
* WaRsPkgCStateDisplayPMReq:hsw
* System hang if this isn't done before disabling all planes!
*/
- if (IS_HASWELL(i915))
- intel_de_rmw(i915, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES);
+ if (display->platform.haswell)
+ intel_de_rmw(display, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES);
- if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
+ if (display->platform.kabylake || display->platform.coffeelake ||
+ display->platform.cometlake) {
/* Display WA #1142:kbl,cfl,cml */
- intel_de_rmw(i915, CHICKEN_PAR1_1,
+ intel_de_rmw(display, CHICKEN_PAR1_1,
KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
- intel_de_rmw(i915, CHICKEN_MISC_2,
+ intel_de_rmw(display, CHICKEN_MISC_2,
KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
KBL_ARB_FILL_SPARE_14);
}
}
-void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+void intel_modeset_setup_hw_state(struct intel_display *display,
struct drm_modeset_acquire_ctx *ctx)
{
- struct intel_display *display = &i915->display;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
wakeref = intel_display_power_get(display, POWER_DOMAIN_INIT);
- intel_early_display_was(i915);
- intel_modeset_readout_hw_state(i915);
+ intel_early_display_was(display);
+ intel_vga_disable(display);
+
+ intel_modeset_readout_hw_state(display);
/* HW state is read out, now we need to sanitize this mess. */
- get_encoder_power_domains(i915);
+ get_encoder_power_domains(display);
- intel_pch_sanitize(i915);
+ intel_pch_sanitize(display);
intel_cmtg_sanitize(display);
@@ -955,7 +951,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
* intel_sanitize_plane_mapping() may need to do vblank
* waits, so we need vblank interrupts restored beforehand.
*/
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -969,35 +965,35 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
}
}
- intel_fbc_sanitize(&i915->display);
+ intel_fbc_sanitize(display);
- intel_sanitize_plane_mapping(i915);
+ intel_sanitize_plane_mapping(display);
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
intel_sanitize_encoder(encoder);
/*
* Sanitizing CRTCs needs their connector atomic state to be
* up-to-date, so ensure that already here.
*/
- intel_modeset_update_connector_atomic_state(i915);
+ intel_modeset_update_connector_atomic_state(display);
- intel_sanitize_all_crtcs(i915, ctx);
+ intel_sanitize_all_crtcs(display, ctx);
intel_dpll_sanitize_state(display);
/* TODO move earlier on all platforms */
if (DISPLAY_VER(display) < 9)
- intel_wm_get_hw_state(i915);
- intel_wm_sanitize(i915);
+ intel_wm_get_hw_state(display);
+ intel_wm_sanitize(display);
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_power_domain_mask put_domains;
intel_modeset_get_crtc_power_domains(crtc_state, &put_domains);
- if (drm_WARN_ON(&i915->drm, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
+ if (drm_WARN_ON(display->drm, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
intel_modeset_put_crtc_power_domains(crtc, &put_domains);
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.h b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
index 3beff67b33d0..f5e6f3ae9572 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.h
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
@@ -6,10 +6,10 @@
#ifndef __INTEL_MODESET_SETUP_H__
#define __INTEL_MODESET_SETUP_H__
-struct drm_i915_private;
struct drm_modeset_acquire_ctx;
+struct intel_display;
-void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+void intel_modeset_setup_hw_state(struct intel_display *display,
struct drm_modeset_acquire_ctx *ctx);
#endif /* __INTEL_MODESET_SETUP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index a008412fdd04..766a9983665a 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -6,13 +6,14 @@
*/
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
#include "intel_cx0_phy.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_modeset_verify.h"
@@ -28,9 +29,8 @@ static void intel_connector_verify_state(const struct intel_crtc_state *crtc_sta
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);
if (connector->get_hw_state(connector)) {
@@ -91,7 +91,6 @@ verify_connector_state(struct intel_atomic_state *state,
static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
if (crtc_state->has_pch_encoder) {
int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(display, crtc_state),
@@ -103,7 +102,7 @@ static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_s
* Yell if the encoder disagrees. Allow for slight
* rounding differences.
*/
- drm_WARN(&i915->drm, abs(fdi_dotclock - dotclock) > 1,
+ drm_WARN(display->drm, abs(fdi_dotclock - dotclock) > 1,
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
fdi_dotclock, dotclock);
}
@@ -113,17 +112,16 @@ static void
verify_encoder_state(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_encoder *encoder;
struct drm_connector *connector;
const struct drm_connector_state *old_conn_state, *new_conn_state;
int i;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
bool enabled = false, found = false;
enum pipe pipe;
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s]\n",
encoder->base.base.id,
encoder->base.name);
@@ -166,7 +164,6 @@ verify_crtc_state(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_crtc_state *sw_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc_state *hw_crtc_state;
@@ -185,7 +182,7 @@ verify_crtc_state(struct intel_atomic_state *state,
intel_crtc_get_pipe_config(hw_crtc_state);
/* we keep both pipes enabled on 830 */
- if (IS_I830(i915) && hw_crtc_state->hw.active)
+ if (display->platform.i830 && hw_crtc_state->hw.active)
hw_crtc_state->hw.active = sw_crtc_state->hw.active;
INTEL_DISPLAY_STATE_WARN(display,
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index aff9a3455c1b..12308495afa5 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -291,7 +291,6 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
struct intel_display *display = overlay->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe = overlay->crtc->pipe;
struct intel_frontbuffer *frontbuffer = NULL;
@@ -307,7 +306,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
intel_frontbuffer_put(overlay->frontbuffer);
overlay->frontbuffer = frontbuffer;
- intel_frontbuffer_flip_prepare(i915, INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_flip_prepare(display, INTEL_FRONTBUFFER_OVERLAY(pipe));
overlay->old_vma = overlay->vma;
if (vma)
@@ -359,14 +358,13 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
{
struct intel_display *display = overlay->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_vma *vma;
vma = fetch_and_zero(&overlay->old_vma);
if (drm_WARN_ON(display->drm, !vma))
return;
- intel_frontbuffer_flip_complete(i915, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+ intel_frontbuffer_flip_complete(display, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
i915_vma_unpin(vma);
i915_vma_put(vma);
diff --git a/drivers/gpu/drm/i915/display/intel_pch.c b/drivers/gpu/drm/i915/display/intel_pch.c
new file mode 100644
index 000000000000..469e8a3cfb49
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Intel Corporation.
+ */
+
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
+#include "intel_display_core.h"
+#include "intel_pch.h"
+
+#define INTEL_PCH_DEVICE_ID_MASK 0xff80
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
+#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
+#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
+#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
+#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
+#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
+#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
+#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
+#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
+#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
+#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
+#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
+#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
+#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
+#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
+#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
+
+/*
+ * Check for platforms where the south display is on the same PCI device or SoC
+ * die as the north display. The PCH (if it even exists) is not involved in
+ * display. Return a fake PCH type for south display handling on these
+ * platforms, without actually detecting the PCH, and PCH_NONE otherwise.
+ */
+static enum intel_pch intel_pch_fake_for_south_display(struct intel_display *display)
+{
+ enum intel_pch pch_type = PCH_NONE;
+
+ if (DISPLAY_VER(display) >= 20)
+ pch_type = PCH_LNL;
+ else if (display->platform.battlemage || display->platform.meteorlake)
+ pch_type = PCH_MTL;
+ else if (display->platform.dg2)
+ pch_type = PCH_DG2;
+ else if (display->platform.dg1)
+ pch_type = PCH_DG1;
+
+ return pch_type;
+}
+
+/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
+static enum intel_pch
+intel_pch_type(const struct intel_display *display, unsigned short id)
+{
+ switch (id) {
+ case INTEL_PCH_IBX_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Ibex Peak PCH\n");
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) != 5);
+ return PCH_IBX;
+ case INTEL_PCH_CPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found CougarPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ DISPLAY_VER(display) != 6 &&
+ !display->platform.ivybridge);
+ return PCH_CPT;
+ case INTEL_PCH_PPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found PantherPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ DISPLAY_VER(display) != 6 &&
+ !display->platform.ivybridge);
+ /* PPT is CPT compatible */
+ return PCH_CPT;
+ case INTEL_PCH_LPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found LynxPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ display->platform.haswell_ult ||
+ display->platform.broadwell_ult);
+ return PCH_LPT_H;
+ case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found LynxPoint LP PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell_ult &&
+ !display->platform.broadwell_ult);
+ return PCH_LPT_LP;
+ case INTEL_PCH_WPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found WildcatPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ display->platform.haswell_ult ||
+ display->platform.broadwell_ult);
+ /* WPT is LPT compatible */
+ return PCH_LPT_H;
+ case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found WildcatPoint LP PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell_ult &&
+ !display->platform.broadwell_ult);
+ /* WPT is LPT compatible */
+ return PCH_LPT_LP;
+ case INTEL_PCH_SPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found SunrisePoint PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake);
+ return PCH_SPT;
+ case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found SunrisePoint LP PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_SPT;
+ case INTEL_PCH_KBP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Kaby Lake PCH (KBP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ /* KBP is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_CNP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Cannon Lake PCH (CNP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_CNP;
+ case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm,
+ "Found Cannon Lake LP PCH (CNP-LP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_CNP;
+ case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+ case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Comet Lake PCH (CMP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake &&
+ !display->platform.rocketlake);
+ /* CMP is CNP compatible */
+ return PCH_CNP;
+ case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Comet Lake V PCH (CMP-V)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ /* CMP-V is based on KBP, which is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Ice Lake PCH\n");
+ drm_WARN_ON(display->drm, !display->platform.icelake);
+ return PCH_ICP;
+ case INTEL_PCH_MCC_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Mule Creek Canyon PCH\n");
+ drm_WARN_ON(display->drm, !(display->platform.jasperlake ||
+ display->platform.elkhartlake));
+ /* MCC is TGP compatible */
+ return PCH_TGP;
+ case INTEL_PCH_TGP_DEVICE_ID_TYPE:
+ case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Tiger Lake LP PCH\n");
+ drm_WARN_ON(display->drm, !display->platform.tigerlake &&
+ !display->platform.rocketlake &&
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_TGP;
+ case INTEL_PCH_JSP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Jasper Lake PCH\n");
+ drm_WARN_ON(display->drm, !(display->platform.jasperlake ||
+ display->platform.elkhartlake));
+ /* JSP is ICP compatible */
+ return PCH_ICP;
+ case INTEL_PCH_ADP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP3_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP4_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Alder Lake PCH\n");
+ drm_WARN_ON(display->drm, !display->platform.alderlake_s &&
+ !display->platform.alderlake_p);
+ return PCH_ADP;
+ default:
+ return PCH_NONE;
+ }
+}
+
+static bool intel_is_virt_pch(unsigned short id,
+ unsigned short svendor, unsigned short sdevice)
+{
+ return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+ id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
+ svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+ sdevice == PCI_SUBDEVICE_ID_QEMU));
+}
+
+static void
+intel_virt_detect_pch(const struct intel_display *display,
+ unsigned short *pch_id, enum intel_pch *pch_type)
+{
+ unsigned short id = 0;
+
+ /*
+ * In a virtualized passthrough environment we can be in a
+ * setup where the ISA bridge is not able to be passed through.
+ * In this case, a south bridge can be emulated and we have to
+ * make an educated guess as to which PCH is really there.
+ */
+
+ if (display->platform.alderlake_s || display->platform.alderlake_p)
+ id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
+ else if (display->platform.tigerlake || display->platform.rocketlake)
+ id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
+ id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
+ else if (display->platform.icelake)
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (display->platform.coffeelake ||
+ display->platform.cometlake)
+ id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (display->platform.kabylake || display->platform.skylake)
+ id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
+ else if (display->platform.haswell_ult ||
+ display->platform.broadwell_ult)
+ id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ else if (display->platform.haswell || display->platform.broadwell)
+ id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
+ else if (DISPLAY_VER(display) == 6 || display->platform.ivybridge)
+ id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ else if (DISPLAY_VER(display) == 5)
+ id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
+
+ if (id)
+ drm_dbg_kms(display->drm, "Assuming PCH ID %04x\n", id);
+ else
+ drm_dbg_kms(display->drm, "Assuming no PCH\n");
+
+ *pch_type = intel_pch_type(display, id);
+
+ /* Sanity check virtual PCH id */
+ if (drm_WARN_ON(display->drm,
+ id && *pch_type == PCH_NONE))
+ id = 0;
+
+ *pch_id = id;
+}
+
+void intel_pch_detect(struct intel_display *display)
+{
+ struct pci_dev *pch = NULL;
+ unsigned short id;
+ enum intel_pch pch_type;
+
+ pch_type = intel_pch_fake_for_south_display(display);
+ if (pch_type != PCH_NONE) {
+ display->pch_type = pch_type;
+ drm_dbg_kms(display->drm,
+ "PCH not involved in display, using fake PCH type %d for south display\n",
+ pch_type);
+ return;
+ }
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ *
+ * In some virtualized environments (e.g. XEN), there is irrelevant
+ * ISA bridge in the system. To work reliably, we should scan through
+ * all the ISA bridge devices and check for the first match, instead
+ * of only checking the first one.
+ */
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+ if (pch->vendor != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+ pch_type = intel_pch_type(display, id);
+ if (pch_type != PCH_NONE) {
+ display->pch_type = pch_type;
+ break;
+ } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
+ pch->subsystem_device)) {
+ intel_virt_detect_pch(display, &id, &pch_type);
+ display->pch_type = pch_type;
+ break;
+ }
+ }
+
+ /*
+ * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+ * display.
+ */
+ if (pch && !HAS_DISPLAY(display)) {
+ drm_dbg_kms(display->drm,
+ "Display disabled, reverting to NOP PCH\n");
+ display->pch_type = PCH_NOP;
+ } else if (!pch) {
+ if (i915_run_as_guest() && HAS_DISPLAY(display)) {
+ intel_virt_detect_pch(display, &id, &pch_type);
+ display->pch_type = pch_type;
+ } else {
+ drm_dbg_kms(display->drm, "No PCH found.\n");
+ }
+ }
+
+ pci_dev_put(pch);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pch.h b/drivers/gpu/drm/i915/display/intel_pch.h
new file mode 100644
index 000000000000..cf4dab1b98bf
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Intel Corporation.
+ */
+
+#ifndef __INTEL_PCH__
+#define __INTEL_PCH__
+
+#include "intel_display_conversion.h"
+
+struct intel_display;
+
+/*
+ * Sorted by south display engine compatibility.
+ * If the new PCH comes with a south display engine that is not
+ * inherited from the latest item, please do not add it to the
+ * end. Instead, add it right after its "parent" PCH.
+ */
+enum intel_pch {
+ PCH_NOP = -1, /* PCH without south display */
+ PCH_NONE = 0, /* No PCH present */
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
+ PCH_LPT_H, /* Lynxpoint/Wildcatpoint H PCH */
+ PCH_LPT_LP, /* Lynxpoint/Wildcatpoint LP PCH */
+ PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
+ PCH_CNP, /* Cannon/Comet Lake PCH */
+ PCH_ICP, /* Ice Lake/Jasper Lake PCH */
+ PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
+ PCH_ADP, /* Alder Lake PCH */
+
+ /* Fake PCHs, functionality handled on the same PCI dev */
+ PCH_DG1 = 1024,
+ PCH_DG2,
+ PCH_MTL,
+ PCH_LNL,
+};
+
+#define INTEL_PCH_TYPE(_display) (__to_intel_display(_display)->pch_type)
+#define HAS_PCH_DG2(display) (INTEL_PCH_TYPE(display) == PCH_DG2)
+#define HAS_PCH_ADP(display) (INTEL_PCH_TYPE(display) == PCH_ADP)
+#define HAS_PCH_DG1(display) (INTEL_PCH_TYPE(display) == PCH_DG1)
+#define HAS_PCH_TGP(display) (INTEL_PCH_TYPE(display) == PCH_TGP)
+#define HAS_PCH_ICP(display) (INTEL_PCH_TYPE(display) == PCH_ICP)
+#define HAS_PCH_CNP(display) (INTEL_PCH_TYPE(display) == PCH_CNP)
+#define HAS_PCH_SPT(display) (INTEL_PCH_TYPE(display) == PCH_SPT)
+#define HAS_PCH_LPT_H(display) (INTEL_PCH_TYPE(display) == PCH_LPT_H)
+#define HAS_PCH_LPT_LP(display) (INTEL_PCH_TYPE(display) == PCH_LPT_LP)
+#define HAS_PCH_LPT(display) (INTEL_PCH_TYPE(display) == PCH_LPT_H || \
+ INTEL_PCH_TYPE(display) == PCH_LPT_LP)
+#define HAS_PCH_CPT(display) (INTEL_PCH_TYPE(display) == PCH_CPT)
+#define HAS_PCH_IBX(display) (INTEL_PCH_TYPE(display) == PCH_IBX)
+#define HAS_PCH_NOP(display) (INTEL_PCH_TYPE(display) == PCH_NOP)
+#define HAS_PCH_SPLIT(display) (INTEL_PCH_TYPE(display) != PCH_NONE)
+
+void intel_pch_detect(struct intel_display *display);
+
+#endif /* __INTEL_PCH__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 99f6d6f53fa7..1743ebf551cb 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -3,8 +3,9 @@
* Copyright © 2021 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_crt.h"
#include "intel_crt_regs.h"
@@ -20,28 +21,27 @@
#include "intel_pps.h"
#include "intel_sdvo.h"
-bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder)
{
- return HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915) ||
- (HAS_PCH_LPT_H(i915) && pch_transcoder == PIPE_A);
+ return HAS_PCH_IBX(display) || HAS_PCH_CPT(display) ||
+ (HAS_PCH_LPT_H(display) && pch_transcoder == PIPE_A);
}
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
return PIPE_A;
else
return crtc->pipe;
}
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_dp_disabled(struct intel_display *display,
enum pipe pipe, enum port port,
i915_reg_t dp_reg)
{
- struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
bool state;
@@ -52,16 +52,15 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
port_name(port), pipe_name(pipe));
INTEL_DISPLAY_STATE_WARN(display,
- HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ HAS_PCH_IBX(display) && !state && port_pipe == PIPE_B,
"IBX PCH DP %c still using transcoder B\n",
port_name(port));
}
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_hdmi_disabled(struct intel_display *display,
enum pipe pipe, enum port port,
i915_reg_t hdmi_reg)
{
- struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
bool state;
@@ -72,20 +71,19 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
port_name(port), pipe_name(pipe));
INTEL_DISPLAY_STATE_WARN(display,
- HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ HAS_PCH_IBX(display) && !state && port_pipe == PIPE_B,
"IBX PCH HDMI %c still using transcoder B\n",
port_name(port));
}
-static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_ports_disabled(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
- assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
+ assert_pch_dp_disabled(display, pipe, PORT_B, PCH_DP_B);
+ assert_pch_dp_disabled(display, pipe, PORT_C, PCH_DP_C);
+ assert_pch_dp_disabled(display, pipe, PORT_D, PCH_DP_D);
INTEL_DISPLAY_STATE_WARN(display,
intel_crt_port_enabled(display, PCH_ADPA, &port_pipe) && port_pipe == pipe,
@@ -93,20 +91,19 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
INTEL_DISPLAY_STATE_WARN(display,
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe,
+ intel_lvds_port_enabled(display, PCH_LVDS, &port_pipe) && port_pipe == pipe,
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
/* PCH SDVOB multiplex with HDMIB */
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
+ assert_pch_hdmi_disabled(display, pipe, PORT_B, PCH_HDMIB);
+ assert_pch_hdmi_disabled(display, pipe, PORT_C, PCH_HDMIC);
+ assert_pch_hdmi_disabled(display, pipe, PORT_D, PCH_HDMID);
}
-static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_transcoder_disabled(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 val;
bool enabled;
@@ -117,45 +114,45 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+static void ibx_sanitize_pch_hdmi_port(struct intel_display *display,
enum port port, i915_reg_t hdmi_reg)
{
- u32 val = intel_de_read(dev_priv, hdmi_reg);
+ u32 val = intel_de_read(display, hdmi_reg);
if (val & SDVO_ENABLE ||
(val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Sanitizing transcoder select for HDMI %c\n",
port_name(port));
val &= ~SDVO_PIPE_SEL_MASK;
val |= SDVO_PIPE_SEL(PIPE_A);
- intel_de_write(dev_priv, hdmi_reg, val);
+ intel_de_write(display, hdmi_reg, val);
}
-static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+static void ibx_sanitize_pch_dp_port(struct intel_display *display,
enum port port, i915_reg_t dp_reg)
{
- u32 val = intel_de_read(dev_priv, dp_reg);
+ u32 val = intel_de_read(display, dp_reg);
if (val & DP_PORT_EN ||
(val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Sanitizing transcoder select for DP %c\n",
port_name(port));
val &= ~DP_PIPE_SEL_MASK;
val |= DP_PIPE_SEL(PIPE_A);
- intel_de_write(dev_priv, dp_reg, val);
+ intel_de_write(display, dp_reg, val);
}
-static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+static void ibx_sanitize_pch_ports(struct intel_display *display)
{
/*
* The BIOS may select transcoder B on some of the PCH
@@ -168,14 +165,14 @@ static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
* (see. intel_dp_link_down(), intel_disable_hdmi(),
* intel_disable_sdvo()).
*/
- ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
- ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
- ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+ ibx_sanitize_pch_dp_port(display, PORT_B, PCH_DP_B);
+ ibx_sanitize_pch_dp_port(display, PORT_C, PCH_DP_C);
+ ibx_sanitize_pch_dp_port(display, PORT_D, PCH_DP_D);
/* PCH SDVOB multiplex with HDMIB */
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+ ibx_sanitize_pch_hdmi_port(display, PORT_B, PCH_HDMIB);
+ ibx_sanitize_pch_hdmi_port(display, PORT_C, PCH_HDMIC);
+ ibx_sanitize_pch_hdmi_port(display, PORT_D, PCH_HDMID);
}
static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc,
@@ -225,32 +222,30 @@ void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
enum pipe pch_transcoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder)));
-
- intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HTOTAL(pch_transcoder),
+ intel_de_read(display, TRANS_HTOTAL(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HBLANK(pch_transcoder),
+ intel_de_read(display, TRANS_HBLANK(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HSYNC(pch_transcoder),
+ intel_de_read(display, TRANS_HSYNC(display, cpu_transcoder)));
+
+ intel_de_write(display, PCH_TRANS_VTOTAL(pch_transcoder),
+ intel_de_read(display, TRANS_VTOTAL(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VBLANK(pch_transcoder),
+ intel_de_read(display, TRANS_VBLANK(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VSYNC(pch_transcoder),
+ intel_de_read(display, TRANS_VSYNC(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+ intel_de_read(display, TRANS_VSYNCSHIFT(display, cpu_transcoder)));
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 val, pipeconf_val;
@@ -262,7 +257,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
assert_fdi_tx_enabled(display, pipe);
assert_fdi_rx_enabled(display, pipe);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
reg = TRANS_CHICKEN2(pipe);
val = intel_de_read(display, reg);
/*
@@ -280,7 +275,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val = intel_de_read(display, reg);
pipeconf_val = intel_de_read(display, TRANSCONF(display, pipe));
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
/* Configure frame start delay to match the CPU */
val &= ~TRANS_FRAME_START_DELAY_MASK;
val |= TRANS_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
@@ -299,7 +294,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_ILK) == TRANSCONF_INTERLACE_IF_ID_ILK) {
- if (HAS_PCH_IBX(dev_priv) &&
+ if (HAS_PCH_IBX(display) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX;
else
@@ -317,7 +312,6 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
@@ -326,18 +320,18 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
assert_fdi_rx_disabled(display, pipe);
/* Ports must be off as well */
- assert_pch_ports_disabled(dev_priv, pipe);
+ assert_pch_ports_disabled(display, pipe);
reg = PCH_TRANSCONF(pipe);
- intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0);
+ intel_de_rmw(display, reg, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
+ if (intel_de_wait_for_clear(display, reg, TRANS_STATE_ENABLE, 50))
+ drm_err(display->drm, "failed to disable transcoder %c\n",
pipe_name(pipe));
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
/* Workaround: Clear the timing override chicken bit again. */
- intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe),
+ intel_de_rmw(display, TRANS_CHICKEN2(pipe),
TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
@@ -366,14 +360,13 @@ void ilk_pch_pre_enable(struct intel_atomic_state *state,
void ilk_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
u32 temp;
- assert_pch_transcoder_disabled(dev_priv, pipe);
+ assert_pch_transcoder_disabled(display, pipe);
/* For PCH output, training FDI link */
intel_fdi_link_train(crtc, crtc_state);
@@ -382,7 +375,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
* We need to program the right clock selection
* before writing the pixel multiplier into the DPLL.
*/
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
u32 sel;
temp = intel_de_read(display, PCH_DPLL_SEL);
@@ -418,7 +411,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev_priv) &&
+ if (HAS_PCH_CPT(display) &&
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -459,23 +452,27 @@ void ilk_pch_disable(struct intel_atomic_state *state,
void ilk_pch_post_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
ilk_disable_pch_transcoder(crtc);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
/* disable TRANS_DP_CTL */
- intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe),
+ intel_de_rmw(display, TRANS_DP_CTL(pipe),
TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK,
TRANS_DP_PORT_SEL_NONE);
/* disable DPLL_SEL */
- intel_de_rmw(dev_priv, PCH_DPLL_SEL,
+ intel_de_rmw(display, PCH_DPLL_SEL,
TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0);
}
ilk_fdi_pll_disable(crtc);
+
+ intel_disable_shared_dpll(old_crtc_state);
}
static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
@@ -497,9 +494,8 @@ static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum pipe pipe = crtc->pipe;
enum intel_dpll_id pll_id;
@@ -518,7 +514,7 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder,
&crtc_state->fdi_m_n);
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
/*
* The pipe->pch transcoder and pch transcoder->pll
* mapping is fixed.
@@ -550,8 +546,6 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val, pipeconf_val;
@@ -559,49 +553,49 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
assert_fdi_tx_enabled(display, (enum pipe)cpu_transcoder);
assert_fdi_rx_enabled(display, PIPE_A);
- val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
+ val = intel_de_read(display, TRANS_CHICKEN2(PIPE_A));
/* Workaround: set timing override bit. */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_write(display, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
- pipeconf_val = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder));
+ pipeconf_val = intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder));
if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK)
val |= TRANS_INTERLACE_INTERLACED;
else
val |= TRANS_INTERLACE_PROGRESSIVE;
- intel_de_write(dev_priv, LPT_TRANSCONF, val);
- if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
+ intel_de_write(display, LPT_TRANSCONF, val);
+ if (intel_de_wait_for_set(display, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 100))
- drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
+ drm_err(display->drm, "Failed to enable PCH transcoder\n");
}
-static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+static void lpt_disable_pch_transcoder(struct intel_display *display)
{
- intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0);
+ intel_de_rmw(display, LPT_TRANSCONF, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
+ if (intel_de_wait_for_clear(display, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
+ drm_err(display->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
- intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
+ intel_de_rmw(display, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
void lpt_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- assert_pch_transcoder_disabled(dev_priv, PIPE_A);
+ assert_pch_transcoder_disabled(display, PIPE_A);
lpt_program_iclkip(crtc_state);
@@ -614,36 +608,36 @@ void lpt_pch_enable(struct intel_atomic_state *state,
void lpt_pch_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- lpt_disable_pch_transcoder(dev_priv);
+ lpt_disable_pch_transcoder(display);
- lpt_disable_iclkip(dev_priv);
+ lpt_disable_iclkip(display);
}
void lpt_pch_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0)
+ if ((intel_de_read(display, LPT_TRANSCONF) & TRANS_ENABLE) == 0)
return;
crtc_state->has_pch_encoder = true;
- tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ tmp = intel_de_read(display, FDI_RX_CTL(PIPE_A));
crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder,
&crtc_state->fdi_m_n);
- crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
+ crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(display);
}
-void intel_pch_sanitize(struct drm_i915_private *i915)
+void intel_pch_sanitize(struct intel_display *display)
{
- if (HAS_PCH_IBX(i915))
- ibx_sanitize_pch_ports(i915);
+ if (HAS_PCH_IBX(display))
+ ibx_sanitize_pch_ports(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h
index 35f8288af3d1..cd6b3ed05887 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.h
@@ -9,14 +9,14 @@
#include <linux/types.h>
enum pipe;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_link_m_n;
#ifdef I915
-bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
@@ -41,9 +41,9 @@ void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc,
void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
-void intel_pch_sanitize(struct drm_i915_private *i915);
+void intel_pch_sanitize(struct intel_display *display);
#else
-static inline bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+static inline bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder)
{
return false;
@@ -90,7 +90,7 @@ static inline void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
}
-static inline void intel_pch_sanitize(struct drm_i915_private *i915)
+static inline void intel_pch_sanitize(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 33467de3d115..693b90e3dfc3 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -11,27 +11,28 @@
#include "intel_pch_refclk.h"
#include "intel_sbi.h"
-static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
+static void lpt_fdi_reset_mphy(struct intel_display *display)
{
- intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
+ intel_de_rmw(display, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
- if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ if (wait_for_us(intel_de_read(display, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
+ drm_err(display->drm, "FDI mPHY reset assert timeout\n");
- intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
+ intel_de_rmw(display, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
- if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ if (wait_for_us((intel_de_read(display, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
+ drm_err(display->drm, "FDI mPHY reset de-assert timeout\n");
}
/* WaMPhyProgramming:hsw */
-static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
+static void lpt_fdi_program_mphy(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
- lpt_fdi_reset_mphy(dev_priv);
+ lpt_fdi_reset_mphy(display);
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
@@ -103,11 +104,12 @@ static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
}
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+void lpt_disable_iclkip(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 temp;
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
+ intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_GATE);
intel_sbi_lock(dev_priv);
@@ -175,24 +177,25 @@ int lpt_iclkip(const struct intel_crtc_state *crtc_state)
/* Program iCLKIP clock to the desired frequency */
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int clock = crtc_state->hw.adjusted_mode.crtc_clock;
struct iclkip_params p;
u32 temp;
- lpt_disable_iclkip(dev_priv);
+ lpt_disable_iclkip(display);
lpt_compute_iclkip(&p, clock);
- drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock);
+ drm_WARN_ON(display->drm, lpt_iclkip_freq(&p) != clock);
/* This should not happen with any sane values */
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
+ drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
+ drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
~SBI_SSCDIVINTPHASE_INCVAL_MASK);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
@@ -224,15 +227,16 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
/* Wait for initialization time */
udelay(24);
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+ intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
-int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+int lpt_get_iclkip(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct iclkip_params p;
u32 temp;
- if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+ if ((intel_de_read(display, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
return 0;
iclkip_params_init(&p);
@@ -268,15 +272,16 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
* - Sequence to enable CLKOUT_DP without spread
* - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
*/
-static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
+static void lpt_enable_clkout_dp(struct intel_display *display,
bool with_spread, bool with_fdi)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, tmp;
- if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
+ if (drm_WARN(display->drm, with_fdi && !with_spread,
"FDI requires downspread\n"))
with_spread = true;
- if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
+ if (drm_WARN(display->drm, HAS_PCH_LPT_LP(display) &&
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
@@ -295,10 +300,10 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
if (with_fdi)
- lpt_fdi_program_mphy(dev_priv);
+ lpt_fdi_program_mphy(display);
}
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(display) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -307,13 +312,14 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
}
/* Sequence to disable CLKOUT_DP */
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+void lpt_disable_clkout_dp(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, tmp;
intel_sbi_lock(dev_priv);
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(display) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -364,15 +370,16 @@ static const u16 sscdivintphase[] = {
* < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
* change in clock period = -(steps / 10) * 5.787 ps
*/
-static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
+static void lpt_bend_clkout_dp(struct intel_display *display, int steps)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
int idx = BEND_IDX(steps);
- if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
+ if (drm_WARN_ON(display->drm, steps % 5 != 0))
return;
- if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
+ if (drm_WARN_ON(display->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
intel_sbi_lock(dev_priv);
@@ -393,10 +400,10 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
#undef BEND_IDX
-static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
+static bool spll_uses_pch_ssc(struct intel_display *display)
{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
+ u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
+ u32 ctl = intel_de_read(display, SPLL_CTL);
if ((ctl & SPLL_PLL_ENABLE) == 0)
return false;
@@ -405,18 +412,17 @@ static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
- if (IS_BROADWELL(dev_priv) &&
+ if (display->platform.broadwell &&
(ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
return true;
return false;
}
-static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
- enum intel_dpll_id id)
+static bool wrpll_uses_pch_ssc(struct intel_display *display, enum intel_dpll_id id)
{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
+ u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
+ u32 ctl = intel_de_read(display, WRPLL_CTL(id));
if ((ctl & WRPLL_PLL_ENABLE) == 0)
return false;
@@ -424,7 +430,7 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
return true;
- if ((IS_BROADWELL(dev_priv) || IS_HASWELL_ULT(dev_priv)) &&
+ if ((display->platform.broadwell || display->platform.haswell_ult) &&
(ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
@@ -432,12 +438,12 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
return false;
}
-static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void lpt_init_pch_refclk(struct intel_display *display)
{
struct intel_encoder *encoder;
bool has_fdi = false;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_fdi = true;
@@ -462,37 +468,36 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
* clock hierarchy. That would also allow us to do
* clock bending finally.
*/
- dev_priv->display.dpll.pch_ssc_use = 0;
+ display->dpll.pch_ssc_use = 0;
- if (spll_uses_pch_ssc(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
+ if (spll_uses_pch_ssc(display)) {
+ drm_dbg_kms(display->drm, "SPLL using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
+ if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL1)) {
+ drm_dbg_kms(display->drm, "WRPLL1 using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
+ if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL2)) {
+ drm_dbg_kms(display->drm, "WRPLL2 using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
- if (dev_priv->display.dpll.pch_ssc_use)
+ if (display->dpll.pch_ssc_use)
return;
if (has_fdi) {
- lpt_bend_clkout_dp(dev_priv, 0);
- lpt_enable_clkout_dp(dev_priv, true, true);
+ lpt_bend_clkout_dp(display, 0);
+ lpt_enable_clkout_dp(display, true, true);
} else {
- lpt_disable_clkout_dp(dev_priv);
+ lpt_disable_clkout_dp(display);
}
}
-static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void ilk_init_pch_refclk(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
int i;
@@ -521,7 +526,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
}
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
has_ck505 = display->vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
@@ -607,7 +612,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* SSC must be turned on before enabling the CPU output */
if (intel_panel_use_ssc(display) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
+ drm_dbg_kms(display->drm, "Using SSC on panel\n");
val |= DREF_SSC1_ENABLE;
} else {
val &= ~DREF_SSC1_ENABLE;
@@ -623,7 +628,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Enable CPU source on CPU attached eDP */
if (has_cpu_edp) {
if (intel_panel_use_ssc(display) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Using SSC on eDP\n");
val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
} else {
@@ -670,10 +675,10 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/*
* Initialize reference clocks when the driver loads
*/
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+void intel_init_pch_refclk(struct intel_display *display)
{
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
- ilk_init_pch_refclk(dev_priv);
- else if (HAS_PCH_LPT(dev_priv))
- lpt_init_pch_refclk(dev_priv);
+ if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display))
+ ilk_init_pch_refclk(display);
+ else if (HAS_PCH_LPT(display))
+ lpt_init_pch_refclk(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
index ae3403c0ced8..25cc53c568bc 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
@@ -8,25 +8,25 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
#ifdef I915
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state);
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
-int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+void lpt_disable_iclkip(struct intel_display *display);
+int lpt_get_iclkip(struct intel_display *display);
int lpt_iclkip(const struct intel_crtc_state *crtc_state);
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
+void intel_init_pch_refclk(struct intel_display *display);
+void lpt_disable_clkout_dp(struct intel_display *display);
#else
static inline void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
}
-static inline void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+static inline void lpt_disable_iclkip(struct intel_display *display)
{
}
-static inline int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+static inline int lpt_get_iclkip(struct intel_display *display)
{
return 0;
}
@@ -34,10 +34,10 @@ static inline int lpt_iclkip(const struct intel_crtc_state *crtc_state)
{
return 0;
}
-static inline void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+static inline void intel_init_pch_refclk(struct intel_display *display)
{
}
-static inline void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+static inline void lpt_disable_clkout_dp(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 10e26c3db946..6182f484b5bd 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -75,7 +75,7 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+static void i9xx_pipe_crc_auto_source(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source)
{
@@ -85,8 +85,8 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
- drm_modeset_lock_all(&dev_priv->drm);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ drm_modeset_lock_all(display->drm);
+ for_each_intel_encoder(display->drm, encoder) {
if (!encoder->base.crtc)
continue;
@@ -113,7 +113,7 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_DP_D;
break;
default:
- drm_WARN(&dev_priv->drm, 1, "nonexisting DP port %c\n",
+ drm_WARN(display->drm, 1, "nonexisting DP port %c\n",
port_name(dig_port->base.port));
break;
}
@@ -122,10 +122,10 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
break;
}
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
}
-static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int vlv_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -133,7 +133,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ i9xx_pipe_crc_auto_source(display, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
@@ -148,7 +148,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_CHERRYVIEW(dev_priv))
+ if (!display->platform.cherryview)
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
need_stable_symbols = true;
@@ -170,7 +170,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
- u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X(dev_priv));
+ u32 tmp = intel_de_read(display, PORT_DFT2_G4X(display));
tmp |= DC_BALANCE_RESET_VLV;
switch (pipe) {
@@ -186,26 +186,26 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
default:
return -EINVAL;
}
- intel_de_write(dev_priv, PORT_DFT2_G4X(dev_priv), tmp);
+ intel_de_write(display, PORT_DFT2_G4X(display), tmp);
}
return 0;
}
-static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int i9xx_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ i9xx_pipe_crc_auto_source(display, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
break;
case INTEL_PIPE_CRC_SOURCE_TV:
- if (!SUPPORTS_TV(dev_priv))
+ if (!SUPPORTS_TV(display))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
break;
@@ -229,10 +229,10 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
+static void vlv_undo_pipe_scramble_reset(struct intel_display *display,
enum pipe pipe)
{
- u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X(dev_priv));
+ u32 tmp = intel_de_read(display, PORT_DFT2_G4X(display));
switch (pipe) {
case PIPE_A:
@@ -249,7 +249,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
}
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
- intel_de_write(dev_priv, PORT_DFT2_G4X(dev_priv), tmp);
+ intel_de_write(display, PORT_DFT2_G4X(display), tmp);
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -281,18 +281,18 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
static void
intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
int ret;
- if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
- i915gm_irq_cstate_wa(dev_priv, enable);
+ if (display->platform.i945gm || display->platform.i915gm)
+ i915gm_irq_cstate_wa(display, enable);
drm_modeset_acquire_init(&ctx, 0);
- state = drm_atomic_state_alloc(&dev_priv->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state) {
ret = -ENOMEM;
goto unlock;
@@ -311,7 +311,7 @@ retry:
pipe_config->uapi.mode_changed = pipe_config->has_psr;
pipe_config->crc_enabled = enable;
- if (IS_HASWELL(dev_priv) &&
+ if (display->platform.haswell &&
pipe_config->hw.active && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
pipe_config->uapi.mode_changed = true;
@@ -327,13 +327,13 @@ put_state:
drm_atomic_state_put(state);
unlock:
- drm_WARN(&dev_priv->drm, ret,
+ drm_WARN(display->drm, ret,
"Toggling workaround to %i returns %i\n", enable, ret);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
-static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int ivb_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -361,7 +361,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int skl_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -404,22 +404,22 @@ static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int get_new_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source, u32 *val)
{
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
return i8xx_pipe_crc_ctl_reg(source, val);
- else if (DISPLAY_VER(dev_priv) < 5)
- return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
- else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
+ else if (DISPLAY_VER(display) < 5)
+ return i9xx_pipe_crc_ctl_reg(display, pipe, source, val);
+ else if (display->platform.valleyview || display->platform.cherryview)
+ return vlv_pipe_crc_ctl_reg(display, pipe, source, val);
+ else if (display->platform.ironlake || display->platform.sandybridge)
return ilk_pipe_crc_ctl_reg(source, val);
- else if (DISPLAY_VER(dev_priv) < 9)
- return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ else if (DISPLAY_VER(display) < 9)
+ return ivb_pipe_crc_ctl_reg(display, pipe, source, val);
else
- return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ return skl_pipe_crc_ctl_reg(display, pipe, source, val);
}
static int
@@ -447,7 +447,7 @@ void intel_crtc_crc_init(struct intel_crtc *crtc)
spin_lock_init(&pipe_crc->lock);
}
-static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
+static int i8xx_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -459,7 +459,7 @@ static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
+static int i9xx_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -472,7 +472,7 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
+static int vlv_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -487,7 +487,7 @@ static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
+static int ilk_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -501,7 +501,7 @@ static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
+static int ivb_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -515,7 +515,7 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
+static int skl_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -535,21 +535,21 @@ static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
}
static int
-intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
+intel_is_valid_crc_source(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
- if (DISPLAY_VER(dev_priv) == 2)
- return i8xx_crc_source_valid(dev_priv, source);
- else if (DISPLAY_VER(dev_priv) < 5)
- return i9xx_crc_source_valid(dev_priv, source);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_crc_source_valid(dev_priv, source);
- else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
- return ilk_crc_source_valid(dev_priv, source);
- else if (DISPLAY_VER(dev_priv) < 9)
- return ivb_crc_source_valid(dev_priv, source);
+ if (DISPLAY_VER(display) == 2)
+ return i8xx_crc_source_valid(display, source);
+ else if (DISPLAY_VER(display) < 5)
+ return i9xx_crc_source_valid(display, source);
+ else if (display->platform.valleyview || display->platform.cherryview)
+ return vlv_crc_source_valid(display, source);
+ else if (display->platform.ironlake || display->platform.sandybridge)
+ return ilk_crc_source_valid(display, source);
+ else if (DISPLAY_VER(display) < 9)
+ return ivb_crc_source_valid(display, source);
else
- return skl_crc_source_valid(dev_priv, source);
+ return skl_crc_source_valid(display, source);
}
const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
@@ -562,16 +562,16 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum intel_pipe_crc_source source;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
+ drm_dbg_kms(display->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
if (source == INTEL_PIPE_CRC_SOURCE_AUTO ||
- intel_is_valid_crc_source(dev_priv, source) == 0) {
+ intel_is_valid_crc_source(display, source) == 0) {
*values_cnt = 5;
return 0;
}
@@ -583,7 +583,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
@@ -594,14 +593,14 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
bool enable;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
+ drm_dbg_kms(display->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
power_domain = POWER_DOMAIN_PIPE(pipe);
wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -610,17 +609,17 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
if (enable)
intel_crtc_crc_setup_workarounds(crtc, true);
- ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
+ ret = get_new_crc_ctl_reg(display, pipe, &source, &val);
if (ret != 0)
goto out;
pipe_crc->source = source;
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), val);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), val);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
if (!source) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+ if (display->platform.valleyview || display->platform.cherryview)
+ vlv_undo_pipe_scramble_reset(display, pipe);
}
pipe_crc->skipped = 0;
@@ -636,7 +635,7 @@ out:
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum pipe pipe = crtc->pipe;
u32 val = 0;
@@ -644,19 +643,20 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
if (!crtc->base.crc.opened)
return;
- if (get_new_crc_ctl_reg(dev_priv, pipe, &pipe_crc->source, &val) < 0)
+ if (get_new_crc_ctl_reg(display, pipe, &pipe_crc->source, &val) < 0)
return;
/* Don't need pipe_crc->lock here, IRQs are not generated. */
pipe_crc->skipped = 0;
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), val);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), val);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
}
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum pipe pipe = crtc->pipe;
@@ -665,7 +665,7 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
pipe_crc->skipped = INT_MIN;
spin_unlock_irq(&pipe_crc->lock);
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), 0);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), 0);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
intel_synchronize_irq(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index b1675b46e06c..c00d9184c586 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -52,44 +52,57 @@ intel_reuse_initial_plane_obj(struct intel_crtc *this,
return false;
}
+static enum intel_memory_type
+initial_plane_memory_type(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (display->platform.dgfx)
+ return INTEL_MEMORY_LOCAL;
+ else if (HAS_LMEMBAR_SMEM_STOLEN(i915))
+ return INTEL_MEMORY_STOLEN_LOCAL;
+ else
+ return INTEL_MEMORY_STOLEN_SYSTEM;
+}
+
static bool
-initial_plane_phys_lmem(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
+initial_plane_phys(struct intel_display *display,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_i915_private *i915 = to_i915(display->drm);
- gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_memory_region *mem;
+ enum intel_memory_type mem_type;
+ bool is_present, is_local;
dma_addr_t dma_addr;
- gen8_pte_t pte;
u32 base;
+ mem_type = initial_plane_memory_type(display);
+ mem = intel_memory_region_by_type(i915, mem_type);
+ if (!mem) {
+ drm_dbg_kms(display->drm,
+ "Initial plane memory region (type %s) not initialized\n",
+ intel_memory_type_str(mem_type));
+ return false;
+ }
+
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
- gte += base / I915_GTT_PAGE_SIZE;
+ dma_addr = intel_ggtt_read_entry(&ggtt->vm, base, &is_present, &is_local);
- pte = ioread64(gte);
- if (!(pte & GEN12_GGTT_PTE_LM)) {
+ if (!is_present) {
drm_err(display->drm,
- "Initial plane programming missing PTE_LM bit\n");
+ "Initial plane FB PTE not present\n");
return false;
}
- dma_addr = pte & GEN12_GGTT_PTE_ADDR_MASK;
-
- if (IS_DGFX(i915))
- mem = i915->mm.regions[INTEL_REGION_LMEM_0];
- else
- mem = i915->mm.stolen_region;
- if (!mem) {
- drm_dbg_kms(display->drm,
- "Initial plane memory region not initialized\n");
+ if (intel_memory_type_is_local(mem->type) != is_local) {
+ drm_err(display->drm,
+ "Initial plane FB PTE unsuitable for %s\n",
+ mem->region.name);
return false;
}
- /*
- * On lmem we don't currently expect this to
- * ever be placed in the stolen portion.
- */
if (dma_addr < mem->region.start || dma_addr > mem->region.end) {
drm_err(display->drm,
"Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n",
@@ -107,42 +120,6 @@ initial_plane_phys_lmem(struct intel_display *display,
return true;
}
-static bool
-initial_plane_phys_smem(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct intel_memory_region *mem;
- u32 base;
-
- base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
-
- mem = i915->mm.stolen_region;
- if (!mem) {
- drm_dbg_kms(display->drm,
- "Initial plane memory region not initialized\n");
- return false;
- }
-
- /* FIXME get and validate the dma_addr from the PTE */
- plane_config->phys_base = base;
- plane_config->mem = mem;
-
- return true;
-}
-
-static bool
-initial_plane_phys(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
- return initial_plane_phys_lmem(display, plane_config);
- else
- return initial_plane_phys_smem(display, plane_config);
-}
-
static struct i915_vma *
initial_plane_vma(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 63301a01906c..1253376c7654 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -5,6 +5,8 @@
#include <linux/bitops.h>
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
@@ -476,13 +478,34 @@ static bool intel_pmdemand_req_complete(struct intel_display *display)
XELPDP_PMDEMAND_REQ_ENABLE);
}
-static void intel_pmdemand_wait(struct intel_display *display)
+static void intel_pmdemand_poll(struct intel_display *display)
{
- if (!wait_event_timeout(display->pmdemand.waitqueue,
- intel_pmdemand_req_complete(display),
- msecs_to_jiffies_timeout(10)))
+ const unsigned int timeout_ms = 10;
+ u32 status;
+ int ret;
+
+ ret = intel_de_wait_custom(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
+ XELPDP_PMDEMAND_REQ_ENABLE, 0,
+ 50, timeout_ms, &status);
+
+ if (ret == -ETIMEDOUT)
drm_err(display->drm,
- "timed out waiting for Punit PM Demand Response\n");
+ "timed out waiting for Punit PM Demand Response within %ums (status 0x%08x)\n",
+ timeout_ms, status);
+}
+
+static void intel_pmdemand_wait(struct intel_display *display)
+{
+ /* Wa_14024400148 For lnl use polling method */
+ if (DISPLAY_VER(display) == 20) {
+ intel_pmdemand_poll(display);
+ } else {
+ if (!wait_event_timeout(display->pmdemand.waitqueue,
+ intel_pmdemand_req_complete(display),
+ msecs_to_jiffies_timeout(10)))
+ drm_err(display->drm,
+ "timed out waiting for Punit PM Demand Response\n");
+ }
}
/* Required to be programmed during Display Init Sequences. */
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 617ce4993172..05e1e5c7e8b7 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -91,7 +91,6 @@ static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps.vlv_pps_pipe;
bool pll_enabled, release_cl_override = false;
@@ -134,7 +133,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
release_cl_override = display->platform.cherryview &&
!chv_phy_powergate_ch(display, phy, ch, true);
- if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(display))) {
+ if (vlv_force_pll_on(display, pipe, vlv_get_dpll(display))) {
drm_err(display->drm,
"Failed to force on PLL for pipe %c!\n",
pipe_name(pipe));
@@ -158,7 +157,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
intel_de_posting_read(display, intel_dp->output_reg);
if (!pll_enabled) {
- vlv_force_pll_off(dev_priv, pipe);
+ vlv_force_pll_off(display, pipe);
if (release_cl_override)
chv_phy_powergate_ch(display, phy, ch, false);
@@ -351,21 +350,19 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
static int intel_num_pps(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->platform.valleyview || display->platform.cherryview)
return 2;
if (display->platform.geminilake || display->platform.broxton)
return 2;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
return 2;
- if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_DG1)
return 1;
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
return 2;
return 1;
@@ -374,11 +371,10 @@ static int intel_num_pps(struct intel_display *display)
static bool intel_pps_is_valid(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (intel_dp->pps.pps_idx == 1 &&
- INTEL_PCH_TYPE(i915) >= PCH_ICP &&
- INTEL_PCH_TYPE(i915) <= PCH_ADP)
+ INTEL_PCH_TYPE(display) >= PCH_ICP &&
+ INTEL_PCH_TYPE(display) <= PCH_ADP)
return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
@@ -500,7 +496,6 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
struct pps_registers *regs)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int pps_idx;
memset(regs, 0, sizeof(*regs));
@@ -519,7 +514,7 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
if (display->platform.geminilake || display->platform.broxton ||
- INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ INTEL_PCH_TYPE(display) >= PCH_CNP)
regs->pp_div = INVALID_MMIO_REG;
else
regs->pp_div = PP_DIVISOR(display, pps_idx);
@@ -744,11 +739,11 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->pps.want_panel_vdd;
- lockdep_assert_held(&display->pps.mutex);
-
if (!intel_dp_is_edp(intel_dp))
return false;
+ lockdep_assert_held(&display->pps.mutex);
+
cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
intel_dp->pps.want_panel_vdd = true;
@@ -925,11 +920,11 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
{
struct intel_display *display = to_intel_display(intel_dp);
- lockdep_assert_held(&display->pps.mutex);
-
if (!intel_dp_is_edp(intel_dp))
return;
+ lockdep_assert_held(&display->pps.mutex);
+
INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd,
"[ENCODER:%d:%s] %s VDD not forced on",
dp_to_dig_port(intel_dp)->base.base.base.id,
@@ -1592,7 +1587,6 @@ static void pps_init_delays(struct intel_dp *intel_dp)
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pp_on, pp_off, port_sel = 0;
int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
struct pps_registers regs;
@@ -1639,7 +1633,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
* power sequencer any more. */
if (display->platform.valleyview || display->platform.cherryview) {
port_sel = PANEL_PORT_SELECT_VLV(port);
- } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ } else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) {
switch (port) {
case PORT_A:
port_sel = PANEL_PORT_SELECT_DPA;
@@ -1792,9 +1786,7 @@ void intel_pps_unlock_regs_wa(struct intel_display *display)
void intel_pps_setup(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (HAS_PCH_SPLIT(i915) || display->platform.geminilake || display->platform.broxton)
+ if (HAS_PCH_SPLIT(display) || display->platform.geminilake || display->platform.broxton)
display->pps.mmio_base = PCH_PPS_BASE;
else if (display->platform.valleyview || display->platform.cherryview)
display->pps.mmio_base = VLV_PPS_BASE;
@@ -1837,7 +1829,6 @@ void intel_pps_connector_debugfs_add(struct intel_connector *connector)
void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = INVALID_PIPE;
@@ -1846,7 +1837,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
if (drm_WARN_ON(display->drm, HAS_DDI(display)))
return;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
u32 port_sel;
pp_reg = PP_CONTROL(display, 0);
@@ -1855,7 +1846,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
switch (port_sel) {
case PANEL_PORT_SELECT_LVDS:
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+ intel_lvds_port_enabled(display, PCH_LVDS, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPA:
g4x_dp_port_enabled(display, DP_A, PORT_A, &panel_pipe);
@@ -1883,7 +1874,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
drm_WARN_ON(display->drm,
port_sel != PANEL_PORT_SELECT_LVDS);
- intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
+ intel_lvds_port_enabled(display, LVDS, &panel_pipe);
}
val = intel_de_read(display, pp_reg);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 4e938bad808c..430ad4ef7146 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_vblank.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -36,7 +37,9 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_frontbuffer.h"
@@ -45,6 +48,7 @@
#include "intel_psr_regs.h"
#include "intel_snps_phy.h"
#include "intel_vblank.h"
+#include "intel_vrr.h"
#include "skl_universal_plane.h"
/**
@@ -463,8 +467,8 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
if (DISPLAY_VER(display) >= 9) {
u32 val;
- val = intel_de_rmw(dev_priv,
- PSR_EVENT(dev_priv, cpu_transcoder),
+ val = intel_de_rmw(display,
+ PSR_EVENT(display, cpu_transcoder),
0, 0);
psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
@@ -689,7 +693,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 aux_clock_divider, aux_ctl;
/* write DP_SET_POWER=D0 */
@@ -704,7 +707,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
- intel_de_write(dev_priv,
+ intel_de_write(display,
psr_aux_data_reg(display, cpu_transcoder, i >> 2),
intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
@@ -794,31 +797,10 @@ static void _psr_enable_sink(struct intel_dp *intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
}
-static void intel_psr_enable_sink_alpm(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- u8 val;
-
- /*
- * eDP Panel Replay uses always ALPM
- * PSR2 uses ALPM but PSR1 doesn't
- */
- if (!intel_dp_is_edp(intel_dp) || (!crtc_state->has_panel_replay &&
- !crtc_state->has_sel_update))
- return;
-
- val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
-
- if (crtc_state->has_panel_replay)
- val |= DP_ALPM_MODE_AUX_LESS;
-
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
-}
-
static void intel_psr_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- intel_psr_enable_sink_alpm(intel_dp, crtc_state);
+ intel_alpm_enable_sink(intel_dp, crtc_state);
crtc_state->has_panel_replay ?
_panel_replay_enable_sink(intel_dp, crtc_state) :
@@ -839,7 +821,6 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val = 0;
if (DISPLAY_VER(display) >= 11)
@@ -873,7 +854,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
* WA 0479: hsw,bdw
* "Do not skip both TP1 and TP2/TP3"
*/
- if (DISPLAY_VER(dev_priv) < 9 &&
+ if (DISPLAY_VER(display) < 9 &&
connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
val |= EDP_PSR_TP2_TP3_TIME_100us;
@@ -906,10 +887,21 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
return idle_frames;
}
+static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ u32 current_dc_state = intel_display_power_get_current_dc_state(display);
+ struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
+
+ return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
+ current_dc_state != DC_STATE_EN_UPTO_DC6) ||
+ intel_dp->psr.active_non_psr_pipes ||
+ READ_ONCE(vblank->enabled);
+}
+
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 max_sleep_time = 0x1f;
u32 val = EDP_PSR_ENABLE;
@@ -919,7 +911,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (DISPLAY_VER(display) < 20)
val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (intel_dp->psr.link_standby)
@@ -935,6 +927,14 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
+
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ is_dc5_dc6_blocked(intel_dp))
+ intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
+ intel_dp->psr.pipe,
+ true);
}
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
@@ -1013,14 +1013,21 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
u32 psr_val = 0;
+ u8 idle_frames;
- val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ is_dc5_dc6_blocked(intel_dp))
+ idle_frames = 0;
+ else
+ idle_frames = psr_compute_idle_frames(intel_dp);
+ val |= EDP_PSR2_IDLE_FRAMES(idle_frames);
- if (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))
+ if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
val |= EDP_SU_TRACK_ENABLE;
if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
@@ -1038,7 +1045,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
}
/* Wa_22012278275:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
static const u8 map[] = {
2, /* 5 lines */
1, /* 6 lines */
@@ -1103,9 +1110,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
static bool
transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
else if (DISPLAY_VER(display) >= 12)
return cpu_transcoder == TRANSCODER_A;
@@ -1183,10 +1188,9 @@ dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = dig_port->base.port;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
return pipe <= PIPE_B && port <= PORT_B;
else
return pipe == PIPE_A && port == PORT_A;
@@ -1197,7 +1201,6 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
struct i915_power_domains *power_domains = &display->power.domains;
u32 exit_scanlines;
@@ -1223,7 +1226,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
return;
/* Wa_16011303918:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
return;
/*
@@ -1264,7 +1267,6 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
@@ -1286,7 +1288,7 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
* For other platforms with SW tracking we can adjust the y coordinates
* to match sink requirement if multiple of 4.
*/
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
y_granularity = intel_dp->psr.su_y_granularity;
else if (intel_dp->psr.su_y_granularity <= 2)
y_granularity = 4;
@@ -1412,7 +1414,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
@@ -1421,20 +1422,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
/* JSL and EHL only supports eDP 1.3 */
- if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
+ if (display->platform.jasperlake || display->platform.elkhartlake) {
drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
return false;
}
/* Wa_16011181250 */
- if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
- IS_DG2(dev_priv)) {
+ if (display->platform.rocketlake || display->platform.alderlake_s ||
+ display->platform.dg2) {
drm_dbg_kms(display->drm,
"PSR2 is defeatured for this platform\n");
return false;
}
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
drm_dbg_kms(display->drm,
"PSR2 not completely functional in this stepping\n");
return false;
@@ -1453,7 +1454,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable &&
- (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
+ (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
drm_dbg_kms(display->drm,
"PSR2 cannot be enabled since DSC is enabled\n");
return false;
@@ -1486,7 +1487,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
- IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
drm_dbg_kms(display->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
@@ -1604,6 +1605,12 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
}
+ if (crtc_state->crc_enabled) {
+ drm_dbg_kms(display->drm,
+ "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
+ return false;
+ }
+
if (!intel_dp_is_edp(intel_dp))
return true;
@@ -1634,12 +1641,6 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
if (!alpm_config_valid(intel_dp, crtc_state, true))
return false;
- if (crtc_state->crc_enabled) {
- drm_dbg_kms(display->drm,
- "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
- return false;
- }
-
return true;
}
@@ -1658,6 +1659,9 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_crtc *crtc;
+ u8 active_pipes = 0;
if (!psr_global_enabled(intel_dp)) {
drm_dbg_kms(display->drm, "PSR disabled by flag\n");
@@ -1711,6 +1715,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm,
"PSR disabled to workaround PSR FSM hang issue\n");
}
+
+ /* Rest is for Wa_16025596647 */
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ /* Not needed by Panel Replay */
+ if (crtc_state->has_panel_replay)
+ return;
+
+ /* We ignore possible secondary PSR/Panel Replay capable eDP */
+ for_each_intel_crtc(display->drm, crtc)
+ active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
+
+ active_pipes = intel_calc_active_pipes(state, active_pipes);
+
+ crtc_state->active_non_psr_pipes = active_pipes &
+ ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1827,7 +1849,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask = 0;
@@ -1866,7 +1887,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* As a workaround leave LPSP unmasked to prevent PSR entry
* when external displays are active.
*/
- if (DISPLAY_VER(display) >= 8 || IS_HASWELL_ULT(dev_priv))
+ if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
mask |= EDP_PSR_DEBUG_MASK_LPSP;
if (DISPLAY_VER(display) < 20)
@@ -1880,7 +1901,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
/* allow PSR with sprite enabled */
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
}
@@ -1903,9 +1924,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
- if (intel_dp_is_edp(intel_dp))
- intel_alpm_configure(intel_dp, crtc_state);
-
/*
* Wa_16013835468
* Wa_14015648006
@@ -1925,7 +1943,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
*/
if (!intel_dp->psr.panel_replay_enabled &&
(IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
- IS_ALDERLAKE_P(dev_priv)))
+ display->platform.alderlake_p))
intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
0, ADLP_1_BASED_X_GRANULARITY);
@@ -1936,10 +1954,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
0,
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
- else if (IS_ALDERLAKE_P(dev_priv))
+ else if (display->platform.alderlake_p)
intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
CLKGATE_DIS_MISC_DMASC_GATING_DIS);
}
+
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ !intel_dp->psr.panel_replay_enabled)
+ intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
+
+ intel_alpm_configure(intel_dp, crtc_state);
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
@@ -1995,6 +2021,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
intel_dp->psr.req_psr2_sdp_prior_scanline =
crtc_state->req_psr2_sdp_prior_scanline;
+ intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
if (!psr_interrupt_error_check(intel_dp))
return;
@@ -2006,8 +2033,9 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.sel_update_enabled ? "2" : "1");
/*
- * Enabling here only for PSR. Panel Replay enable bit is already
- * written at this point. See
+ * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable
+ * bit is already written at this point. Sink ALPM is enabled here for
+ * PSR and Panel Replay. See
* intel_psr_panel_replay_enable_sink. Modifiers/options:
* - Selective Update
* - Region Early Transport
@@ -2024,7 +2052,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_psr_enable_source(intel_dp, crtc_state);
intel_dp->psr.enabled = true;
- intel_dp->psr.paused = false;
+ intel_dp->psr.pause_counter = 0;
/*
* Link_ok is sticky and set here on PSR enable. We can assume link
@@ -2070,6 +2098,12 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
} else {
+ if (DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
+ intel_dp->psr.pipe,
+ false);
+
val = intel_de_rmw(display,
psr_ctl_reg(display, cpu_transcoder),
EDP_PSR_ENABLE, 0);
@@ -2104,7 +2138,6 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
lockdep_assert_held(&intel_dp->psr.lock);
@@ -2136,7 +2169,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_de_rmw(display,
MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
- else if (IS_ALDERLAKE_P(dev_priv))
+ else if (display->platform.alderlake_p)
intel_de_rmw(display, CLKGATE_DIS_MISC,
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
}
@@ -2144,16 +2177,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
if (intel_dp_is_edp(intel_dp))
intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
- /* Panel Replay on eDP is always using ALPM aux less. */
- if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
- intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
- ALPM_CTL_ALPM_ENABLE |
- ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
-
- intel_de_rmw(display,
- PORT_ALPM_CTL(cpu_transcoder),
- PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
- }
+ if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp))
+ intel_alpm_disable(intel_dp);
/* Disable PSR on Sink */
if (!intel_dp->psr.panel_replay_enabled) {
@@ -2164,12 +2189,19 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
DP_RECEIVER_ALPM_CONFIG, 0);
}
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ !intel_dp->psr.panel_replay_enabled)
+ intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);
+
intel_dp->psr.enabled = false;
intel_dp->psr.panel_replay_enabled = false;
intel_dp->psr.sel_update_enabled = false;
intel_dp->psr.psr2_sel_fetch_enabled = false;
intel_dp->psr.su_region_et_enabled = false;
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
+ intel_dp->psr.active_non_psr_pipes = 0;
}
/**
@@ -2210,7 +2242,6 @@ void intel_psr_disable(struct intel_dp *intel_dp,
*/
void intel_psr_pause(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
@@ -2223,12 +2254,10 @@ void intel_psr_pause(struct intel_dp *intel_dp)
return;
}
- /* If we ever hit this, we will need to add refcount to pause/resume */
- drm_WARN_ON(display->drm, psr->paused);
-
- intel_psr_exit(intel_dp);
- intel_psr_wait_exit_locked(intel_dp);
- psr->paused = true;
+ if (intel_dp->psr.pause_counter++ == 0) {
+ intel_psr_exit(intel_dp);
+ intel_psr_wait_exit_locked(intel_dp);
+ }
mutex_unlock(&psr->lock);
@@ -2244,6 +2273,7 @@ void intel_psr_pause(struct intel_dp *intel_dp)
*/
void intel_psr_resume(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
@@ -2251,28 +2281,36 @@ void intel_psr_resume(struct intel_dp *intel_dp)
mutex_lock(&psr->lock);
- if (!psr->paused)
- goto unlock;
+ if (!psr->enabled)
+ goto out;
- psr->paused = false;
- intel_psr_activate(intel_dp);
+ if (!psr->pause_counter) {
+ drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
+ goto out;
+ }
-unlock:
+ if (--intel_dp->psr.pause_counter == 0)
+ intel_psr_activate(intel_dp);
+
+out:
mutex_unlock(&psr->lock);
}
/**
- * intel_psr_needs_block_dc_vblank - Check if block dc entry is needed
+ * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
+ * notification.
* @crtc_state: CRTC status
*
* We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
* prevent it in case of Panel Replay. Panel Replay switches main link off on
* DC entry. This means vblank interrupts are not fired and is a problem if
- * user-space is polling for vblank events.
+ * user-space is polling for vblank events. Also Wa_16025596647 needs
+ * information when vblank is enabled/disabled.
*/
-bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
+bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_encoder *encoder;
for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
@@ -2283,8 +2321,15 @@ bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp_is_edp(intel_dp) &&
- CAN_PANEL_REPLAY(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
+ continue;
+
+ if (CAN_PANEL_REPLAY(intel_dp))
+ return true;
+
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ CAN_PSR(intel_dp))
return true;
}
@@ -2312,37 +2357,76 @@ void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
CURSURFLIVE(display, crtc->pipe), 0);
}
-static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
+/**
+ * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR
+ * @crtc_state: the crtc state
+ *
+ * Return minimum vblank delay needed by PSR.
+ */
+int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!crtc_state->has_psr)
+ return 0;
+
+ /* Wa_14015401596 */
+ if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
+ return 1;
+
+ /* Rest is for SRD_STATUS needed on LunarLake and onwards */
+ if (DISPLAY_VER(display) < 20)
+ return 0;
+
+ /*
+ * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
+ *
+ * To deterministically capture the transition of the state machine
+ * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
+ * one line after the non-delayed V. Blank.
+ *
+ * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
+ * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
+ * - TRANS_VTOTAL[ Vertical Active ])
+ *
+ * SRD_STATUS is used only by PSR1 on PantherLake.
+ * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
+ */
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? 0 :
+ if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay ||
+ crtc_state->has_sel_update))
+ return 0;
+ else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update ||
+ intel_crtc_has_type(crtc_state,
+ INTEL_OUTPUT_EDP)))
+ return 0;
+ else
+ return 1;
+}
+
+static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
+{
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
PSR2_MAN_TRK_CTL_ENABLE;
}
static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
}
static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
}
static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
}
@@ -2405,8 +2489,6 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
bool full_update)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = man_trk_ctl_enable_bit_get(display);
/* SF partial frame enable has to be set even on full update */
@@ -2420,7 +2502,7 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
if (crtc_state->psr2_su_area.y1 == -1)
goto exit;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) {
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
} else {
@@ -2474,13 +2556,12 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u16 y_alignment;
/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
if (crtc_state->dsc.compression_enable &&
- (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14))
+ (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
y_alignment = vdsc_cfg->slice_height;
else
y_alignment = crtc_state->su_y_granularity;
@@ -2601,12 +2682,11 @@ static void
intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
/* Wa_14014971492 */
if (!crtc_state->has_panel_replay &&
((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
- IS_ALDERLAKE_P(i915) || IS_TIGERLAKE(i915))) &&
+ display->platform.alderlake_p || display->platform.tigerlake)) &&
crtc_state->splitter.enable)
crtc_state->psr2_su_area.y1 = 0;
@@ -2807,7 +2887,6 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
@@ -2839,7 +2918,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
new_crtc_state->has_sel_update != psr->sel_update_enabled ||
new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
- (DISPLAY_VER(i915) < 11 && new_crtc_state->wm_level_disabled))
+ (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
intel_psr_disable_locked(intel_dp);
else if (new_crtc_state->wm_level_disabled)
/* Wa_14015648006 */
@@ -3322,7 +3401,7 @@ void intel_psr_flush(struct intel_display *display,
* we have to ensure that the PSR is not activated until
* intel_psr_resume() is called.
*/
- if (intel_dp->psr.paused)
+ if (intel_dp->psr.pause_counter)
goto unlock;
if (origin == ORIGIN_FLIP ||
@@ -3419,29 +3498,14 @@ static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
static void psr_alpm_check(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
- struct drm_dp_aux *aux = &intel_dp->aux;
struct intel_psr *psr = &intel_dp->psr;
- u8 val;
- int r;
if (!psr->sel_update_enabled)
return;
- r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
- if (r != 1) {
- drm_err(display->drm, "Error reading ALPM status\n");
- return;
- }
-
- if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+ if (intel_alpm_get_error(intel_dp)) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- drm_dbg_kms(display->drm,
- "ALPM lock timeout error, disabling PSR\n");
-
- /* Clearing error */
- drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
}
}
@@ -3626,6 +3690,168 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
}
}
+/* Wa_16025596647 */
+static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ bool dc5_dc6_blocked;
+
+ if (!intel_dp->psr.active)
+ return;
+
+ dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
+
+ if (intel_dp->psr.sel_update_enabled)
+ psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
+ psr_compute_idle_frames(intel_dp));
+ else
+ intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
+ intel_dp->psr.pipe,
+ dc5_dc6_blocked);
+}
+
+static void psr_dc5_dc6_wa_work(struct work_struct *work)
+{
+ struct intel_display *display = container_of(work, typeof(*display),
+ psr_dc5_dc6_wa_work);
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+
+ if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled)
+ intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
+
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
+
+/**
+ * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
+ * @display: intel atomic state
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
+ * psr_dc5_dc6_wa_work used for applying/removing the workaround.
+ */
+void intel_psr_notify_dc5_dc6(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ schedule_work(&display->psr_dc5_dc6_wa_work);
+}
+
+/**
+ * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
+ * @display: intel atomic state
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
+ * psr_dc5_dc6_wa_work used for applying the workaround.
+ */
+void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
+}
+
+/**
+ * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
+ * @state: intel atomic state
+ * @crtc: intel crtc
+ * @enable: enable/disable
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
+ * remove the workaround when pipe is getting enabled/disabled
+ */
+void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
+ struct intel_crtc *crtc, bool enable)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_encoder *encoder;
+
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u8 active_non_psr_pipes;
+
+ mutex_lock(&intel_dp->psr.lock);
+
+ if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
+ goto unlock;
+
+ active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;
+
+ if (enable)
+ active_non_psr_pipes |= BIT(crtc->pipe);
+ else
+ active_non_psr_pipes &= ~BIT(crtc->pipe);
+
+ if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
+ goto unlock;
+
+ if ((enable && intel_dp->psr.active_non_psr_pipes) ||
+ (!enable && !intel_dp->psr.active_non_psr_pipes)) {
+ intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
+ goto unlock;
+ }
+
+ intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
+
+ intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
+unlock:
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
+
+/**
+ * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
+ * @display: intel display struct
+ * @enable: enable/disable
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
+ * remove the workaround when vblank is getting enabled/disabled
+ */
+void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
+ bool enable)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (intel_dp->psr.panel_replay_enabled) {
+ mutex_unlock(&intel_dp->psr.lock);
+ break;
+ }
+
+ if (intel_dp->psr.enabled)
+ intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
+
+ mutex_unlock(&intel_dp->psr.lock);
+ return;
+ }
+
+ /*
+ * NOTE: intel_display_power_set_target_dc_state is used
+ * only by PSR * code for DC3CO handling. DC3CO target
+ * state is currently disabled in * PSR code. If DC3CO
+ * is taken into use we need take that into account here
+ * as well.
+ */
+ intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
+ DC_STATE_EN_UPTO_DC6);
+}
+
static void
psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
{
@@ -3634,8 +3860,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
const char *status = "unknown";
u32 val, status_val;
- if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
- intel_dp->psr.panel_replay_enabled)) {
+ if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
+ (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
static const char * const live_status[] = {
"IDLE",
"CAPTURE",
@@ -3728,10 +3954,9 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp,
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
bool enabled;
u32 val, psr2_ctl;
@@ -3740,7 +3965,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
if (!(psr->sink_support || psr->sink_panel_replay_support))
return 0;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&psr->lock);
intel_psr_print_mode(intel_dp, m);
@@ -3822,7 +4047,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
unlock:
mutex_unlock(&psr->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
@@ -3853,9 +4078,7 @@ static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct intel_display *display = data;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
- intel_wakeref_t wakeref;
int ret = -ENODEV;
if (!HAS_PSR(display))
@@ -3866,12 +4089,9 @@ i915_edp_psr_debug_set(void *data, u64 val)
drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
// TODO: split to each transcoder's PSR debug state
- ret = intel_psr_debug_set(intel_dp, val);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ with_intel_display_rpm(display)
+ ret = intel_psr_debug_set(intel_dp, val);
}
return ret;
@@ -4004,3 +4224,13 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
+
+bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * eDP Panel Replay uses always ALPM
+ * PSR2 uses ALPM but PSR1 doesn't
+ */
+ return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
+ crtc_state->has_panel_replay);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index a43a374cff55..73c3fa40844b 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -59,7 +59,13 @@ void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
-bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state);
+bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state);
+void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
+ struct intel_crtc *crtc, bool enable);
+void intel_psr_notify_dc5_dc6(struct intel_display *display);
+void intel_psr_dc5_dc6_wa_init(struct intel_display *display);
+void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
+ bool enable);
bool intel_psr_link_ok(struct intel_dp *intel_dp);
void intel_psr_lock(const struct intel_crtc_state *crtc_state);
@@ -67,7 +73,9 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
void intel_psr_debugfs_register(struct intel_display *display);
+bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 6e2d9929b4d7..8a38df2c0283 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -36,9 +36,9 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -214,18 +214,17 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
{
struct intel_display *display = to_intel_display(&intel_sdvo->base);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bval = val, cval = val;
int i;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
intel_de_write(display, intel_sdvo->sdvo_reg, val);
intel_de_posting_read(display, intel_sdvo->sdvo_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
intel_de_write(display, intel_sdvo->sdvo_reg, val);
intel_de_posting_read(display, intel_sdvo->sdvo_reg);
}
@@ -1360,14 +1359,13 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_display_mode *mode = &pipe_config->hw.mode;
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
pipe_config->has_pch_encoder = true;
if (!intel_fdi_compute_pipe_bpp(pipe_config))
return -EINVAL;
@@ -1527,7 +1525,6 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_encoder);
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct intel_sdvo_connector_state *sdvo_state =
@@ -1634,7 +1631,7 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
@@ -1670,13 +1667,12 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
bool intel_sdvo_port_enabled(struct intel_display *display,
i915_reg_t sdvo_reg, enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
val = intel_de_read(display, sdvo_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
*pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT;
else if (display->platform.cherryview)
*pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV;
@@ -1841,7 +1837,6 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
@@ -1861,7 +1856,7 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -2036,7 +2031,7 @@ static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
struct intel_display *display = to_intel_display(&intel_sdvo->base);
u16 hotplug;
- if (!I915_HAS_HOTPLUG(display))
+ if (!HAS_HOTPLUG(display))
return 0;
/*
@@ -3367,9 +3362,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo_ddc *ddc,
static bool is_sdvo_port_valid(struct intel_display *display, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
return port == PORT_B;
else
return port == PORT_B || port == PORT_C;
@@ -3384,7 +3377,6 @@ static bool assert_sdvo_port_valid(struct intel_display *display, enum port port
bool intel_sdvo_init(struct intel_display *display,
i915_reg_t sdvo_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
int i;
@@ -3427,7 +3419,7 @@ bool intel_sdvo_init(struct intel_display *display,
}
intel_encoder->compute_config = intel_sdvo_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
intel_encoder->disable = pch_disable_sdvo;
intel_encoder->post_disable = pch_post_disable_sdvo;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index b9acd9fe160c..2b53ac9f4935 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -5,6 +5,8 @@
#include <linux/math.h>
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_ddi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 1ad6c8a94b3d..fd92e6b89b43 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -36,9 +36,10 @@
#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_rect.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "i9xx_plane.h"
#include "intel_atomic_plane.h"
#include "intel_de.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
index 1d0b84b464c1..4981cc34da05 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
@@ -3,21 +3,21 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_sprite_uapi.h"
-static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+static bool has_dst_key_in_primary_plane(struct intel_display *display)
{
- return DISPLAY_VER(dev_priv) >= 9;
+ return DISPLAY_VER(display) >= 9;
}
static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
const struct drm_intel_sprite_colorkey *set)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
*key = *set;
@@ -34,7 +34,7 @@ static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
* On SKL+ we want dst key enabled on
* the primary and not on the sprite.
*/
- if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+ if (DISPLAY_VER(display) >= 9 && plane->id != PLANE_PRIMARY &&
set->flags & I915_SET_COLORKEY_DESTINATION)
key->flags = 0;
}
@@ -43,7 +43,6 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct intel_display *display = to_intel_display(dev);
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
@@ -61,7 +60,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
@@ -74,7 +73,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
* Also multiple planes can't do destination keying on the same
* pipe simultaneously.
*/
- if (DISPLAY_VER(dev_priv) >= 9 &&
+ if (DISPLAY_VER(display) >= 9 &&
to_intel_plane(plane)->id >= PLANE_3 &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
@@ -99,7 +98,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
* On some platforms we have to configure
* the dst colorkey on the primary plane.
*/
- if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+ if (!ret && has_dst_key_in_primary_plane(display)) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display,
to_intel_plane(plane)->pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index b8d14ed8a56e..c1014e74791f 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -3,8 +3,10 @@
* Copyright © 2019 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -92,11 +94,6 @@ static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
return dig_port->tc;
}
-static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
-{
- return to_i915(tc->dig_port->base.base.dev);
-}
-
static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
enum tc_port_mode mode)
{
@@ -219,10 +216,11 @@ __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain doma
static void
tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
{
+ struct intel_display __maybe_unused *display = to_intel_display(tc->dig_port);
enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
- drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
+ drm_WARN_ON(display->drm, tc->lock_power_domain != domain);
#endif
__tc_cold_unblock(tc, domain, wakeref);
}
@@ -266,13 +264,13 @@ assert_tc_port_power_enabled(struct intel_tc_port *tc)
static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 lane_mask;
- lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
- drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
+ drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
assert_tc_cold_blocked(tc);
lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
@@ -281,13 +279,13 @@ static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 pin_mask;
- pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
+ pin_mask = intel_de_read(display, PORT_TX_DFLEXPA1(tc->phy_fia));
- drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
+ drm_WARN_ON(display->drm, pin_mask == 0xffffffff);
assert_tc_cold_blocked(tc);
return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
@@ -297,13 +295,12 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
intel_wakeref_t wakeref;
u32 val, pin_assignment;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
pin_assignment =
REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
@@ -369,7 +366,7 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
@@ -377,10 +374,10 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
assert_tc_cold_blocked(tc);
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
return lnl_tc_port_get_max_lane_count(dig_port);
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return mtl_tc_port_get_max_lane_count(dig_port);
return intel_tc_port_get_max_lane_count(dig_port);
@@ -389,20 +386,20 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
bool lane_reversal = dig_port->lane_reversal;
u32 val;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
lane_reversal && tc->mode != TC_PORT_LEGACY);
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
switch (required_lanes) {
@@ -423,16 +420,16 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
MISSING_CASE(required_lanes);
}
- intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
+ intel_de_write(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
}
static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
u32 live_status_mask)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 valid_hpd_mask;
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
if (hweight32(live_status_mask) != 1)
return;
@@ -447,7 +444,7 @@ static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
return;
/* If live status mismatches the VBT flag, trust the live status. */
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
tc->port_name, live_status_mask, valid_hpd_mask);
@@ -490,21 +487,20 @@ icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
- u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
+ u32 isr_bit = display->hotplug.pch_hpd[dig_port->base.hpd_pin];
intel_wakeref_t wakeref;
u32 fia_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref) {
- fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
- pch_isr = intel_de_read(i915, SDEISR);
+ fia_isr = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ pch_isr = intel_de_read(display, SDEISR);
}
if (fia_isr == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, nothing connected\n",
tc->port_name);
return mask;
@@ -531,14 +527,14 @@ static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
*/
static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPPMS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assuming not ready\n",
tc->port_name);
return false;
@@ -550,14 +546,14 @@ static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, can't %s ownership\n",
tc->port_name, take ? "take" : "release");
@@ -568,21 +564,21 @@ static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
if (take)
val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
- intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
+ intel_de_write(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
return true;
}
static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assume not owned\n",
tc->port_name);
return false;
@@ -619,30 +615,30 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
int max_lanes;
max_lanes = intel_tc_port_max_lane_count(dig_port);
if (tc->mode == TC_PORT_LEGACY) {
- drm_WARN_ON(&i915->drm, max_lanes != 4);
+ drm_WARN_ON(display->drm, max_lanes != 4);
return true;
}
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DP_ALT);
/*
* Now we have to re-check the live state, in case the port recently
* became disconnected. Not necessary for legacy mode.
*/
if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
+ drm_dbg_kms(display->drm, "Port %s: PHY sudden disconnect\n",
tc->port_name);
return false;
}
if (max_lanes < required_lanes) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY max lanes %d < required lanes %d\n",
tc->port_name,
max_lanes, required_lanes);
@@ -655,7 +651,7 @@ static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
static bool icl_tc_phy_connect(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
tc->lock_wakeref = tc_cold_block(tc);
@@ -664,8 +660,8 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc,
if ((!tc_phy_is_ready(tc) ||
!icl_tc_phy_take_ownership(tc, true)) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership (ready %s)\n",
tc->port_name,
str_yes_no(tc_phy_is_ready(tc)));
goto out_unblock_tc_cold;
@@ -733,14 +729,13 @@ tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static void tgl_tc_phy_init(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
intel_wakeref_t wakeref;
u32 val;
with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref)
- val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
+ val = intel_de_read(display, PORT_TX_DFLEXDPSP(FIA1));
- drm_WARN_ON(&i915->drm, val == 0xffffffff);
+ drm_WARN_ON(display->drm, val == 0xffffffff);
tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
}
@@ -775,19 +770,18 @@ adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
- u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
- u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
+ u32 cpu_isr_bits = display->hotplug.hpd[hpd_pin];
+ u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
intel_wakeref_t wakeref;
u32 cpu_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
- cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
- pch_isr = intel_de_read(i915, SDEISR);
+ cpu_isr = intel_de_read(display, GEN11_DE_HPD_ISR);
+ pch_isr = intel_de_read(display, SDEISR);
}
if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
@@ -810,15 +804,15 @@ static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
*/
static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
u32 val;
assert_display_core_power_enabled(tc);
- val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assuming not ready\n",
tc->port_name);
return false;
@@ -830,12 +824,12 @@ static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
assert_tc_port_power_enabled(tc);
- intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
+ intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
return true;
@@ -843,13 +837,13 @@ static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
u32 val;
assert_tc_port_power_enabled(tc);
- val = intel_de_read(i915, DDI_BUF_CTL(port));
+ val = intel_de_read(display, DDI_BUF_CTL(port));
return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
@@ -872,7 +866,6 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
enum intel_display_power_domain port_power_domain =
tc_port_power_domain(tc);
intel_wakeref_t port_wakeref;
@@ -885,15 +878,15 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
port_wakeref = intel_display_power_get(display, port_power_domain);
if (!adlp_tc_phy_take_ownership(tc, true) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership\n",
tc->port_name);
goto out_put_port_power;
}
if (!tc_phy_is_ready(tc) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: PHY not ready\n",
tc->port_name);
goto out_release_phy;
}
@@ -965,19 +958,18 @@ static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
- u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin];
- u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
+ u32 pica_isr_bits = display->hotplug.hpd[hpd_pin];
+ u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
intel_wakeref_t wakeref;
u32 pica_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
- pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
- pch_isr = intel_de_read(i915, SDEISR);
+ pica_isr = intel_de_read(display, PICAINTERRUPT_ISR);
+ pch_isr = intel_de_read(display, SDEISR);
}
if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
@@ -994,22 +986,22 @@ static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
static bool
xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, reg) & XELPDP_TCSS_POWER_STATE;
+ return intel_de_read(display, reg) & XELPDP_TCSS_POWER_STATE;
}
static bool
xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: timeout waiting for TCSS power to get %s\n",
str_enabled_disabled(enabled),
tc->port_name);
@@ -1069,7 +1061,7 @@ static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool ena
static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
__xelpdp_tc_phy_enable_tcss_power(tc, enable);
@@ -1082,7 +1074,7 @@ static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enabl
return true;
out_disable:
- if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY))
+ if (drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY))
return false;
if (!enable)
@@ -1096,35 +1088,35 @@ out_disable:
static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, reg);
+ val = intel_de_read(display, reg);
if (take)
val |= XELPDP_TC_PHY_OWNERSHIP;
else
val &= ~XELPDP_TC_PHY_OWNERSHIP;
- intel_de_write(i915, reg, val);
+ intel_de_write(display, reg, val);
}
static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, reg) & XELPDP_TC_PHY_OWNERSHIP;
+ return intel_de_read(display, reg) & XELPDP_TC_PHY_OWNERSHIP;
}
static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
intel_wakeref_t tc_cold_wref;
enum intel_display_power_domain domain;
@@ -1134,7 +1126,7 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
if (tc->mode != TC_PORT_DISCONNECTED)
tc->lock_wakeref = tc_cold_block(tc);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
(tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
!xelpdp_tc_phy_tcss_power_is_enabled(tc));
@@ -1207,13 +1199,13 @@ tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 mask;
mask = tc->phy_ops->hpd_live_status(tc);
/* The sink can be connected only in a single mode. */
- drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
+ drm_WARN_ON_ONCE(display->drm, hweight32(mask) > 1);
return mask;
}
@@ -1236,9 +1228,9 @@ static void tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
bool phy_is_ready, bool phy_is_owned)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
- drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
+ drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
return phy_is_ready && phy_is_owned;
}
@@ -1246,8 +1238,7 @@ static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
static bool tc_phy_is_connected(struct intel_tc_port *tc,
enum icl_port_dpll_id port_pll_type)
{
- struct intel_encoder *encoder = &tc->dig_port->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(tc->dig_port);
bool phy_is_ready = tc_phy_is_ready(tc);
bool phy_is_owned = tc_phy_is_owned(tc);
bool is_connected;
@@ -1257,7 +1248,7 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
else
is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
tc->port_name,
str_yes_no(is_connected),
@@ -1270,10 +1261,10 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
if (wait_for(tc_phy_is_ready(tc), 500)) {
- drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
+ drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n",
tc->port_name);
return false;
@@ -1343,7 +1334,7 @@ get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
static enum tc_port_mode
tc_phy_get_current_mode(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
bool phy_is_ready;
bool phy_is_owned;
@@ -1363,11 +1354,11 @@ tc_phy_get_current_mode(struct intel_tc_port *tc)
if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
} else {
- drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
+ drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT);
mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
tc->port_name,
tc_port_mode_name(mode),
@@ -1407,7 +1398,7 @@ tc_phy_get_target_mode(struct intel_tc_port *tc)
static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 live_status_mask = tc_phy_hpd_live_status(tc);
bool connected;
@@ -1421,7 +1412,7 @@ static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
connected = tc->phy_ops->connect(tc, required_lanes);
}
- drm_WARN_ON(&i915->drm, !connected);
+ drm_WARN_ON(display->drm, !connected);
}
static void tc_phy_disconnect(struct intel_tc_port *tc)
@@ -1491,12 +1482,12 @@ static void __intel_tc_port_put_link(struct intel_tc_port *tc)
static bool tc_port_is_enabled(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
assert_tc_port_power_enabled(tc);
- return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
+ return intel_de_read(display, DDI_BUF_CTL(dig_port->base.port)) &
DDI_BUF_CTL_ENABLE;
}
@@ -1509,15 +1500,15 @@ static bool tc_port_is_enabled(struct intel_tc_port *tc)
*/
void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
bool update_mode = false;
mutex_lock(&tc->lock);
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
- drm_WARN_ON(&i915->drm, tc->lock_wakeref);
- drm_WARN_ON(&i915->drm, tc->link_refcount);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->lock_wakeref);
+ drm_WARN_ON(display->drm, tc->link_refcount);
tc_phy_get_hw_state(tc);
/*
@@ -1540,8 +1531,8 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
if (!tc_port_is_enabled(tc)) {
update_mode = true;
} else if (tc->mode == TC_PORT_DISCONNECTED) {
- drm_WARN_ON(&i915->drm, !tc->legacy_port);
- drm_err(&i915->drm,
+ drm_WARN_ON(display->drm, !tc->legacy_port);
+ drm_err(display->drm,
"Port %s: PHY disconnected on enabled port, connecting it\n",
tc->port_name);
update_mode = true;
@@ -1556,28 +1547,28 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
mutex_unlock(&tc->lock);
}
-static bool tc_port_has_active_links(struct intel_tc_port *tc,
- const struct intel_crtc_state *crtc_state)
+static bool tc_port_has_active_streams(struct intel_tc_port *tc,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
- int active_links = 0;
+ int active_streams = 0;
if (dig_port->dp.is_mst) {
/* TODO: get the PLL type for MST, once HW readout is done for it. */
- active_links = intel_dp_mst_encoder_active_links(dig_port);
+ active_streams = intel_dp_mst_active_streams(&dig_port->dp);
} else if (crtc_state && crtc_state->hw.active) {
pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
- active_links = 1;
+ active_streams = 1;
}
- if (active_links && !tc_phy_is_connected(tc, pll_type))
- drm_err(&i915->drm,
- "Port %s: PHY disconnected with %d active link(s)\n",
- tc->port_name, active_links);
+ if (active_streams && !tc_phy_is_connected(tc, pll_type))
+ drm_err(display->drm,
+ "Port %s: PHY disconnected with %d active stream(s)\n",
+ tc->port_name, active_streams);
- return active_links;
+ return active_streams;
}
/**
@@ -1595,13 +1586,13 @@ static bool tc_port_has_active_links(struct intel_tc_port *tc,
void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
mutex_lock(&tc->lock);
- drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
- if (!tc_port_has_active_links(tc, crtc_state)) {
+ drm_WARN_ON(display->drm, tc->link_refcount != 1);
+ if (!tc_port_has_active_streams(tc, crtc_state)) {
/*
* TBT-alt is the default mode in any case the PHY ownership is not
* held (regardless of the sink's connected live state), so
@@ -1610,7 +1601,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
*/
if (tc->init_mode != TC_PORT_TBT_ALT &&
tc->init_mode != TC_PORT_DISCONNECTED)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
tc->port_name,
tc_port_mode_name(tc->init_mode));
@@ -1618,7 +1609,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
__intel_tc_port_put_link(tc);
}
- drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
+ drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s)\n",
tc->port_name,
tc_port_mode_name(tc->mode));
@@ -1637,12 +1628,12 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
*/
bool intel_tc_port_connected(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 mask = ~0;
- drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
+ drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
if (tc->mode != TC_PORT_DISCONNECTED)
mask = BIT(tc->mode);
@@ -1677,14 +1668,14 @@ static int reset_link_commit(struct intel_tc_port *tc,
struct intel_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
struct intel_crtc *crtc;
u8 pipe_mask;
int ret;
- ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx);
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, ctx);
if (ret)
return ret;
@@ -1695,7 +1686,7 @@ static int reset_link_commit(struct intel_tc_port *tc,
if (!pipe_mask)
return 0;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
struct intel_crtc_state *crtc_state;
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
@@ -1713,13 +1704,13 @@ static int reset_link_commit(struct intel_tc_port *tc,
static int reset_link(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *_state;
struct intel_atomic_state *state;
int ret;
- _state = drm_atomic_state_alloc(&i915->drm);
+ _state = drm_atomic_state_alloc(display->drm);
if (!_state)
return -ENOMEM;
@@ -1738,21 +1729,21 @@ static void intel_tc_port_link_reset_work(struct work_struct *work)
{
struct intel_tc_port *tc =
container_of(work, struct intel_tc_port, link_reset_work.work);
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
int ret;
if (!__intel_tc_port_link_needs_reset(tc))
return;
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: TypeC DP-alt sink disconnected, resetting link\n",
tc->port_name);
ret = reset_link(tc);
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
}
bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
@@ -1780,7 +1771,7 @@ void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
static void __intel_tc_port_lock(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
mutex_lock(&tc->lock);
@@ -1790,9 +1781,8 @@ static void __intel_tc_port_lock(struct intel_tc_port *tc,
intel_tc_port_update_mode(tc, required_lanes,
false);
- drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
- !tc_phy_is_owned(tc));
+ drm_WARN_ON(display->drm, tc->mode == TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_TBT_ALT && !tc_phy_is_owned(tc));
}
void intel_tc_port_lock(struct intel_digital_port *dig_port)
@@ -1885,12 +1875,12 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port)
int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc;
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
- if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
+ if (drm_WARN_ON(display->drm, tc_port == TC_PORT_NONE))
return -EINVAL;
tc = kzalloc(sizeof(*tc), GFP_KERNEL);
@@ -1900,11 +1890,11 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
dig_port->tc = tc;
tc->dig_port = dig_port;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
tc->phy_ops = &xelpdp_tc_phy_ops;
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
tc->phy_ops = &adlp_tc_phy_ops;
- else if (DISPLAY_VER(i915) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
tc->phy_ops = &tgl_tc_phy_ops;
else
tc->phy_ops = &icl_tc_phy_ops;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 5dbe857ea85b..acf0b3733908 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -33,15 +33,15 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
-#include "intel_display_irq.h"
#include "intel_display_driver.h"
+#include "intel_display_irq.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
@@ -1585,19 +1585,17 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crtc *crtc = to_intel_crtc(connector->state->crtc);
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
int type;
/* Disable TV interrupts around load detect or we'll recurse */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irq(&dev_priv->irq_lock);
- i915_disable_pipestat(dev_priv, 0,
+ spin_lock_irq(&display->irq.lock);
+ i915_disable_pipestat(display, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
save_tv_dac = tv_dac = intel_de_read(display, TV_DAC);
@@ -1668,11 +1666,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, 0,
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
return type;
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 4efd4f7d497a..139fa5deba80 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -222,12 +222,15 @@ int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
* However if queried just before the start of vblank we'll get an
* answer that's slightly in the future.
*/
- if (DISPLAY_VER(display) == 2)
- return -1;
- else if (HAS_DDI(display) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return 2;
- else
+ if (DISPLAY_VER(display) >= 20 || display->platform.battlemage)
+ return 1;
+ else if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell)
+ return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ? 2 : 1;
+ else if (DISPLAY_VER(display) >= 3)
return 1;
+ else
+ return -1;
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 3ed64c17bdff..8e799e225af1 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -9,6 +9,7 @@
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include "i915_utils.h"
#include "intel_crtc.h"
@@ -259,6 +260,15 @@ static int intel_dsc_slice_dimensions_valid(struct intel_crtc_state *pipe_config
return 0;
}
+static bool is_dsi_dsc_1_1(struct intel_crtc_state *crtc_state)
+{
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+
+ return vdsc_cfg->dsc_version_major == 1 &&
+ vdsc_cfg->dsc_version_minor == 1 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI);
+}
+
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(pipe_config);
@@ -317,8 +327,19 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
* From XE_LPD onwards we supports compression bpps in steps of 1
* upto uncompressed bpp-1, hence add calculations for all the rc
* parameters
+ *
+ * We don't want to calculate all rc parameters when the panel
+ * is MIPI DSI and it's using DSC 1.1. The reason being that some
+ * DSI panels vendors have hardcoded PPS params in the VBT causing
+ * the parameters sent from the source which are derived through
+ * interpolation to differ from the params the panel expects.
+ * This causes a noise in the display.
+ * Furthermore for DSI panels we are currently using bits_per_pixel
+ * (compressed bpp) hardcoded from VBT, (unlike other encoders where we
+ * find the optimum compressed bpp) so dont need to rely on interpolation,
+ * as we can get the required rc parameters from the tables.
*/
- if (DISPLAY_VER(display) >= 13) {
+ if (DISPLAY_VER(display) >= 13 && !is_dsi_dsc_1_1(pipe_config)) {
calculate_rc_params(vdsc_cfg);
} else {
if ((compressed_bpp == 8 ||
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index 684b5d1bc87c..05d140c8032d 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -4,15 +4,20 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
#include <linux/vgaarb.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include <video/vga.h>
+
#include "soc/intel_gmch.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display.h"
#include "intel_vga.h"
+#include "intel_vga_regs.h"
static i915_reg_t intel_vga_cntrl_reg(struct intel_display *display)
{
@@ -24,16 +29,42 @@ static i915_reg_t intel_vga_cntrl_reg(struct intel_display *display)
return VGACNTRL;
}
+static bool has_vga_pipe_sel(struct intel_display *display)
+{
+ if (display->platform.i845g ||
+ display->platform.i865g)
+ return false;
+
+ if (display->platform.valleyview ||
+ display->platform.cherryview)
+ return true;
+
+ return DISPLAY_VER(display) < 7;
+}
+
/* Disable the VGA plane that we never use */
void intel_vga_disable(struct intel_display *display)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
+ enum pipe pipe;
+ u32 tmp;
u8 sr1;
- if (intel_de_read(display, vga_reg) & VGA_DISP_DISABLE)
+ tmp = intel_de_read(display, vga_reg);
+ if (tmp & VGA_DISP_DISABLE)
return;
+ if (display->platform.cherryview)
+ pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK_CHV, tmp);
+ else if (has_vga_pipe_sel(display))
+ pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK, tmp);
+ else
+ pipe = PIPE_A;
+
+ drm_dbg_kms(display->drm, "Disabling VGA plane on pipe %c\n",
+ pipe_name(pipe));
+
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(0x01, VGA_SEQ_I);
@@ -46,39 +77,6 @@ void intel_vga_disable(struct intel_display *display)
intel_de_posting_read(display, vga_reg);
}
-void intel_vga_redisable_power_on(struct intel_display *display)
-{
- i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
-
- if (!(intel_de_read(display, vga_reg) & VGA_DISP_DISABLE)) {
- drm_dbg_kms(display->drm,
- "Something enabled VGA plane, disabling it\n");
- intel_vga_disable(display);
- }
-}
-
-void intel_vga_redisable(struct intel_display *display)
-{
- intel_wakeref_t wakeref;
-
- /*
- * This function can be called both from intel_modeset_setup_hw_state or
- * at a very early point in our resume sequence, where the power well
- * structures are not yet restored. Since this function is at a very
- * paranoid "someone might have enabled VGA while we were not looking"
- * level, just check if the power well is enabled instead of trying to
- * follow the "don't touch the power well if we don't need it" policy
- * the rest of the driver uses.
- */
- wakeref = intel_display_power_get_if_enabled(display, POWER_DOMAIN_VGA);
- if (!wakeref)
- return;
-
- intel_vga_redisable_power_on(display);
-
- intel_display_power_put(display, POWER_DOMAIN_VGA, wakeref);
-}
-
void intel_vga_reset_io_mem(struct intel_display *display)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h
index 824dfc32a199..16d699f3b641 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.h
+++ b/drivers/gpu/drm/i915/display/intel_vga.h
@@ -10,8 +10,6 @@ struct intel_display;
void intel_vga_reset_io_mem(struct intel_display *display);
void intel_vga_disable(struct intel_display *display);
-void intel_vga_redisable(struct intel_display *display);
-void intel_vga_redisable_power_on(struct intel_display *display);
int intel_vga_register(struct intel_display *display);
void intel_vga_unregister(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_vga_regs.h b/drivers/gpu/drm/i915/display/intel_vga_regs.h
new file mode 100644
index 000000000000..cbacced1a69f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga_regs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_VGA_REGS_H__
+#define __INTEL_VGA_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define VGACNTRL _MMIO(0x71400)
+#define VLV_VGACNTRL _MMIO(VLV_DISPLAY_BASE + 0x71400)
+#define CPU_VGACNTRL _MMIO(0x41000)
+#define VGA_DISP_DISABLE REG_BIT(31)
+#define VGA_2X_MODE REG_BIT(30) /* pre-ilk */
+#define VGA_PIPE_SEL_MASK REG_BIT(29) /* pre-ivb */
+#define VGA_PIPE_SEL(pipe) REG_FIELD_PREP(VGA_PIPE_SEL_MASK, (pipe))
+#define VGA_PIPE_SEL_MASK_CHV REG_GENMASK(29, 28) /* chv */
+#define VGA_PIPE_SEL_CHV(pipe) REG_FIELD_PREP(VGA_PIPE_SEL_MASK_CHV, (pipe))
+#define VGA_BORDER_ENABLE REG_BIT(26)
+#define VGA_PIPE_CSC_ENABLE REG_BIT(24) /* ilk+ */
+#define VGA_CENTERING_ENABLE_MASK REG_GENMASK(25, 24) /* pre-ilk */
+#define VGA_PALETTE_READ_SEL REG_BIT(23) /* pre-ivb */
+#define VGA_PALETTE_A_WRITE_DISABLE REG_BIT(22) /* pre-ivb */
+#define VGA_PALETTE_B_WRITE_DISABLE REG_BIT(21) /* pre-ivb */
+#define VGA_LEGACY_8BIT_PALETTE_ENABLE REG_BIT(20)
+#define VGA_PALETTE_BYPASS REG_BIT(19)
+#define VGA_NINE_DOT_DISABLE REG_BIT(18)
+#define VGA_PALETTE_READ_SEL_HI_CHV REG_BIT(15) /* chv */
+#define VGA_PALETTE_C_WRITE_DISABLE_CHV REG_BIT(14) /* chv */
+#define VGA_ACTIVE_THROTTLING_MASK REG_GENMASK(15, 12) /* ilk+ */
+#define VGA_BLANK_THROTTLING_MASK REG_GENMASK(11, 8) /* ilk+ */
+#define VGA_BLINK_DUTY_CYCLE_MASK REG_GENMASK(7, 6)
+#define VGA_VSYNC_BLINK_RATE_MASK REG_GENMASK(5, 0)
+
+#endif /* __INTEL_VGA_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index cac49319026d..c6565baf815a 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -4,6 +4,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -32,6 +34,8 @@ bool intel_vrr_is_capable(struct intel_connector *connector)
return false;
fallthrough;
case DRM_MODE_CONNECTOR_DisplayPort:
+ if (connector->mst.dp)
+ return false;
intel_dp = intel_attached_dp(connector);
if (!drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd))
@@ -182,7 +186,8 @@ is_cmrr_frac_required(struct intel_crtc_state *crtc_state)
int calculated_refresh_k, actual_refresh_k, pixel_clock_per_line;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- if (!HAS_CMRR(display))
+ /* Avoid CMRR for now till we have VRR with fixed timings working */
+ if (!HAS_CMRR(display) || true)
return false;
actual_refresh_k =
@@ -222,6 +227,121 @@ cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required)
return vtotal;
}
+static
+void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->cmrr.enable = true;
+ /*
+ * TODO: Compute precise target refresh rate to determine
+ * if video_mode_required should be true. Currently set to
+ * false due to uncertainty about the precise target
+ * refresh Rate.
+ */
+ crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
+ crtc_state->vrr.vmin = crtc_state->vrr.vmax;
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+static
+void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->vrr.enable = true;
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+/*
+ * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to
+ * Vtotal value.
+ */
+static
+int intel_vrr_fixed_rr_vtotal(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int crtc_vtotal = crtc_state->hw.adjusted_mode.crtc_vtotal;
+
+ if (DISPLAY_VER(display) >= 13)
+ return crtc_vtotal;
+ else
+ return crtc_vtotal -
+ intel_vrr_real_vblank_delay(crtc_state);
+}
+
+static
+int intel_vrr_fixed_rr_vmax(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_fixed_rr_vtotal(crtc_state);
+}
+
+static
+int intel_vrr_fixed_rr_vmin(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return intel_vrr_fixed_rr_vtotal(crtc_state) -
+ intel_vrr_flipline_offset(display);
+}
+
+static
+int intel_vrr_fixed_rr_flipline(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_fixed_rr_vtotal(crtc_state);
+}
+
+void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
+ intel_vrr_fixed_rr_vmin(crtc_state) - 1);
+ intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
+ intel_vrr_fixed_rr_vmax(crtc_state) - 1);
+ intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
+ intel_vrr_fixed_rr_flipline(crtc_state) - 1);
+}
+
+static
+void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state)
+{
+ /*
+ * For fixed rr, vmin = vmax = flipline.
+ * vmin is already set to crtc_vtotal set vmax and flipline the same.
+ */
+ crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ crtc_state->vrr.flipline = crtc_state->hw.adjusted_mode.crtc_vtotal;
+}
+
+static
+int intel_vrr_compute_vmin(struct intel_crtc_state *crtc_state)
+{
+ /*
+ * To make fixed rr and vrr work seamless the guardband/pipeline full
+ * should be set such that it satisfies both the fixed and variable
+ * timings.
+ * For this set the vmin as crtc_vtotal. With this we never need to
+ * change anything to do with the guardband.
+ */
+ return crtc_state->hw.adjusted_mode.crtc_vtotal;
+}
+
+static
+int intel_vrr_compute_vmax(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_info *info = &connector->base.display_info;
+ int vmax;
+
+ vmax = adjusted_mode->crtc_clock * 1000 /
+ (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+ vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+
+ return vmax;
+}
+
void
intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -232,14 +352,9 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct intel_dp *intel_dp = intel_attached_dp(connector);
bool is_edp = intel_dp_is_edp(intel_dp);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
- /*
- * FIXME all joined pipes share the same transcoder.
- * Need to account for that during VRR toggle/push/etc.
- */
- if (crtc_state->joiner_pipes)
+ if (!HAS_VRR(display))
return;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -247,28 +362,40 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
crtc_state->vrr.in_range =
intel_vrr_is_in_range(connector, drm_mode_vrefresh(adjusted_mode));
- if (!crtc_state->vrr.in_range)
- return;
-
- if (HAS_LRR(display))
- crtc_state->update_lrr = true;
- vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
- adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq);
- vmax = adjusted_mode->crtc_clock * 1000 /
- (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+ /*
+ * Allow fixed refresh rate with VRR Timing Generator.
+ * For now set the vrr.in_range to 0, to allow fixed_rr but skip actual
+ * VRR and LRR.
+ * #TODO For actual VRR with joiner, we need to figure out how to
+ * correctly sequence transcoder level stuff vs. pipe level stuff
+ * in the commit.
+ */
+ if (crtc_state->joiner_pipes)
+ crtc_state->vrr.in_range = false;
- vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal);
- vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+ vmin = intel_vrr_compute_vmin(crtc_state);
- if (vmin >= vmax)
- return;
+ if (crtc_state->vrr.in_range) {
+ if (HAS_LRR(display))
+ crtc_state->update_lrr = true;
+ vmax = intel_vrr_compute_vmax(connector, adjusted_mode);
+ } else {
+ vmax = vmin;
+ }
crtc_state->vrr.vmin = vmin;
crtc_state->vrr.vmax = vmax;
crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+ if (crtc_state->uapi.vrr_enabled && vmin < vmax)
+ intel_vrr_compute_vrr_timings(crtc_state);
+ else if (is_cmrr_frac_required(crtc_state) && is_edp)
+ intel_vrr_compute_cmrr_timings(crtc_state);
+ else
+ intel_vrr_compute_fixed_rr_timings(crtc_state);
+
/*
* flipline determines the min vblank length the hardware will
* generate, and on ICL/TGL flipline>=vmin+1, hence we reduce
@@ -276,29 +403,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
*/
crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display);
- /*
- * When panel is VRR capable and userspace has
- * not enabled adaptive sync mode then Fixed Average
- * Vtotal mode should be enabled.
- */
- if (crtc_state->uapi.vrr_enabled) {
- crtc_state->vrr.enable = true;
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
- } else if (is_cmrr_frac_required(crtc_state) && is_edp) {
- crtc_state->vrr.enable = true;
- crtc_state->cmrr.enable = true;
- /*
- * TODO: Compute precise target refresh rate to determine
- * if video_mode_required should be true. Currently set to
- * false due to uncertainty about the precise target
- * refresh Rate.
- */
- crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
- crtc_state->vrr.vmin = crtc_state->vrr.vmax;
- crtc_state->vrr.flipline = crtc_state->vrr.vmin;
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
- }
-
if (HAS_AS_SDP(display)) {
crtc_state->vrr.vsync_start =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
@@ -340,7 +444,10 @@ static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(display) >= 13)
+ if (DISPLAY_VER(display) >= 14)
+ return VRR_CTL_FLIP_LINE_EN |
+ XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
+ else if (DISPLAY_VER(display) >= 13)
return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
else
@@ -380,14 +487,11 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
lower_32_bits(crtc_state->cmrr.cmrr_n));
}
- intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
- crtc_state->vrr.vmin - 1);
- intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
- crtc_state->vrr.vmax - 1);
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(crtc_state));
- intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
- crtc_state->vrr.flipline - 1);
+ intel_vrr_set_fixed_rr_timings(crtc_state);
+
+ if (!intel_vrr_always_use_vrr_tg(display) && !crtc_state->vrr.enable)
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(crtc_state));
if (HAS_AS_SDP(display))
intel_de_write(display,
@@ -461,6 +565,17 @@ bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
return intel_de_read(display, TRANS_PUSH(display, cpu_transcoder)) & TRANS_PUSH_SEND;
}
+bool intel_vrr_always_use_vrr_tg(struct intel_display *display)
+{
+ if (!HAS_VRR(display))
+ return false;
+
+ if (DISPLAY_VER(display) >= 30)
+ return true;
+
+ return false;
+}
+
void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -469,16 +584,25 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
if (!crtc_state->vrr.enable)
return;
+ intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
+ crtc_state->vrr.vmin - 1);
+ intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
+ crtc_state->vrr.vmax - 1);
+ intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
+ crtc_state->vrr.flipline - 1);
+
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
TRANS_PUSH_EN);
- if (crtc_state->cmrr.enable) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
- trans_vrr_ctl(crtc_state));
- } else {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ if (crtc_state->cmrr.enable) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
+ trans_vrr_ctl(crtc_state));
+ } else {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+ }
}
}
@@ -490,24 +614,77 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
if (!old_crtc_state->vrr.enable)
return;
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(old_crtc_state));
+ intel_de_wait_for_clear(display,
+ TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_STATUS_VRR_EN_LIVE, 1000);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
+ }
+
+ intel_vrr_set_fixed_rr_timings(old_crtc_state);
+}
+
+void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!HAS_VRR(display))
+ return;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(crtc_state));
+ return;
+ }
+
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
+ TRANS_PUSH_EN);
+
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(old_crtc_state));
- intel_de_wait_for_clear(display,
- TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+}
+
+void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!HAS_VRR(display))
+ return;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), 0);
+
+ intel_de_wait_for_clear(display, TRANS_VRR_STATUS(display, cpu_transcoder),
VRR_STATUS_VRR_EN_LIVE, 1000);
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
}
+bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->vrr.flipline &&
+ crtc_state->vrr.flipline == crtc_state->vrr.vmax &&
+ crtc_state->vrr.flipline == intel_vrr_vmin_flipline(crtc_state);
+}
+
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 trans_vrr_ctl, trans_vrr_vsync;
+ bool vrr_enable;
trans_vrr_ctl = intel_de_read(display,
TRANS_VRR_CTL(display, cpu_transcoder));
- crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
if (HAS_CMRR(display))
crtc_state->cmrr.enable = (trans_vrr_ctl & VRR_CTL_CMRR_ENABLE);
@@ -536,6 +713,16 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmin = intel_de_read(display,
TRANS_VRR_VMIN(display, cpu_transcoder)) + 1;
+ /*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not filled. Since for these platforms TRAN_VMIN is always
+ * filled with crtc_vtotal, use TRAN_VRR_VMIN to get the vtotal for
+ * adjusted_mode.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_state->hw.adjusted_mode.crtc_vtotal =
+ intel_vrr_vmin_vtotal(crtc_state);
+
if (HAS_AS_SDP(display)) {
trans_vrr_vsync =
intel_de_read(display,
@@ -547,6 +734,18 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
}
}
+ vrr_enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
+
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_state->vrr.enable = vrr_enable && !intel_vrr_is_fixed_rr(crtc_state);
+ else
+ crtc_state->vrr.enable = vrr_enable;
+
+ /*
+ * #TODO: For Both VRR and CMRR the flag I915_MODE_FLAG_VRR is set for mode_flags.
+ * Since CMRR is currently disabled, set this flag for VRR for now.
+ * Need to keep this in mind while re-enabling CMRR.
+ */
if (crtc_state->vrr.enable)
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index 514822577e8a..38bf9996b883 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -13,6 +13,7 @@ struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
struct intel_dsb;
+struct intel_display;
bool intel_vrr_is_capable(struct intel_connector *connector);
bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh);
@@ -35,5 +36,10 @@ int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state);
+bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state);
+void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state);
+void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state);
+void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state);
+bool intel_vrr_always_use_vrr_tg(struct intel_display *display);
#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
index f00f4cfc58e5..bba82e888db2 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.c
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -5,15 +5,18 @@
#include <linux/debugfs.h>
-#include "i915_drv.h"
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
#include "i9xx_wm.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_wm.h"
#include "skl_watermark.h"
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
- * @i915: i915 device
+ * @display: display device
*
* Calculate watermark values for the various WM regs based on current mode
* and plane configuration.
@@ -44,10 +47,10 @@
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
-void intel_update_watermarks(struct drm_i915_private *i915)
+void intel_update_watermarks(struct intel_display *display)
{
- if (i915->display.funcs.wm->update_wm)
- i915->display.funcs.wm->update_wm(i915);
+ if (display->funcs.wm->update_wm)
+ display->funcs.wm->update_wm(display);
}
int intel_wm_compute(struct intel_atomic_state *state,
@@ -64,10 +67,10 @@ int intel_wm_compute(struct intel_atomic_state *state,
bool intel_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->initial_watermarks) {
- i915->display.funcs.wm->initial_watermarks(state, crtc);
+ if (display->funcs.wm->initial_watermarks) {
+ display->funcs.wm->initial_watermarks(state, crtc);
return true;
}
@@ -77,41 +80,41 @@ bool intel_initial_watermarks(struct intel_atomic_state *state,
void intel_atomic_update_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->atomic_update_watermarks)
- i915->display.funcs.wm->atomic_update_watermarks(state, crtc);
+ if (display->funcs.wm->atomic_update_watermarks)
+ display->funcs.wm->atomic_update_watermarks(state, crtc);
}
void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->optimize_watermarks)
- i915->display.funcs.wm->optimize_watermarks(state, crtc);
+ if (display->funcs.wm->optimize_watermarks)
+ display->funcs.wm->optimize_watermarks(state, crtc);
}
int intel_compute_global_watermarks(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->compute_global_watermarks)
- return i915->display.funcs.wm->compute_global_watermarks(state);
+ if (display->funcs.wm->compute_global_watermarks)
+ return display->funcs.wm->compute_global_watermarks(state);
return 0;
}
-void intel_wm_get_hw_state(struct drm_i915_private *i915)
+void intel_wm_get_hw_state(struct intel_display *display)
{
- if (i915->display.funcs.wm->get_hw_state)
- return i915->display.funcs.wm->get_hw_state(i915);
+ if (display->funcs.wm->get_hw_state)
+ return display->funcs.wm->get_hw_state(display);
}
-void intel_wm_sanitize(struct drm_i915_private *i915)
+void intel_wm_sanitize(struct intel_display *display)
{
- if (i915->display.funcs.wm->sanitize)
- return i915->display.funcs.wm->sanitize(i915);
+ if (display->funcs.wm->sanitize)
+ return display->funcs.wm->sanitize(display);
}
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
@@ -137,16 +140,16 @@ bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
return plane_state->uapi.visible;
}
-void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+void intel_print_wm_latency(struct intel_display *display,
const char *name, const u16 wm[])
{
int level;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
unsigned int latency = wm[level];
if (latency == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s WM%d latency not provided\n",
name, level);
continue;
@@ -156,43 +159,43 @@ void intel_print_wm_latency(struct drm_i915_private *dev_priv,
* - latencies are in us on gen9.
* - before then, WM1+ latency values are in 0.5us units
*/
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
latency *= 10;
else if (level > 0)
latency *= 5;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s WM%d latency %u (%u.%u usec)\n", name, level,
wm[level], latency / 10, latency % 10);
}
}
-void intel_wm_init(struct drm_i915_private *i915)
+void intel_wm_init(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 9)
- skl_wm_init(i915);
+ if (DISPLAY_VER(display) >= 9)
+ skl_wm_init(display);
else
- i9xx_wm_init(i915);
+ i9xx_wm_init(display);
}
static void wm_latency_show(struct seq_file *m, const u16 wm[8])
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
int level;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
unsigned int latency = wm[level];
/*
* - WM1+ latency values in 0.5us units
* - latencies are in us on gen9/vlv/chv
*/
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.valleyview ||
+ display->platform.cherryview ||
+ display->platform.g4x)
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -201,18 +204,18 @@ static void wm_latency_show(struct seq_file *m, const u16 wm[8])
level, wm[level], latency / 10, latency % 10);
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
}
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.pri_latency;
+ latencies = display->wm.pri_latency;
wm_latency_show(m, latencies);
@@ -221,13 +224,13 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.spr_latency;
+ latencies = display->wm.spr_latency;
wm_latency_show(m, latencies);
@@ -236,13 +239,13 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.cur_latency;
+ latencies = display->wm.cur_latency;
wm_latency_show(m, latencies);
@@ -251,39 +254,39 @@ static int cur_wm_latency_show(struct seq_file *m, void *data)
static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) < 5 && !display->platform.g4x)
return -ENODEV;
- return single_open(file, pri_wm_latency_show, dev_priv);
+ return single_open(file, pri_wm_latency_show, display);
}
static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
return -ENODEV;
- return single_open(file, spr_wm_latency_show, dev_priv);
+ return single_open(file, spr_wm_latency_show, display);
}
static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
return -ENODEV;
- return single_open(file, cur_wm_latency_show, dev_priv);
+ return single_open(file, cur_wm_latency_show, display);
}
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp, u16 wm[8])
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 new[8] = {};
int level;
int ret;
@@ -300,15 +303,15 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
&new[0], &new[1], &new[2], &new[3],
&new[4], &new[5], &new[6], &new[7]);
- if (ret != dev_priv->display.wm.num_levels)
+ if (ret != display->wm.num_levels)
return -EINVAL;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
- for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
wm[level] = new[level];
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
return len;
}
@@ -317,13 +320,13 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.pri_latency;
+ latencies = display->wm.pri_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -332,13 +335,13 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.spr_latency;
+ latencies = display->wm.spr_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -347,13 +350,13 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.cur_latency;
+ latencies = display->wm.cur_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -385,18 +388,18 @@ static const struct file_operations i915_cur_wm_latency_fops = {
.write = cur_wm_latency_write
};
-void intel_wm_debugfs_register(struct drm_i915_private *i915)
+void intel_wm_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_pri_wm_latency_fops);
+ display, &i915_pri_wm_latency_fops);
debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_spr_wm_latency_fops);
+ display, &i915_spr_wm_latency_fops);
debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_cur_wm_latency_fops);
+ display, &i915_cur_wm_latency_fops);
- skl_watermark_debugfs_register(i915);
+ skl_watermark_debugfs_register(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h
index 7d3a447054b3..9ad4e9eae5ca 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.h
+++ b/drivers/gpu/drm/i915/display/intel_wm.h
@@ -8,13 +8,13 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_plane_state;
-void intel_update_watermarks(struct drm_i915_private *i915);
+void intel_update_watermarks(struct intel_display *display);
int intel_wm_compute(struct intel_atomic_state *state,
struct intel_crtc *crtc);
bool intel_initial_watermarks(struct intel_atomic_state *state,
@@ -24,13 +24,13 @@ void intel_atomic_update_watermarks(struct intel_atomic_state *state,
void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_compute_global_watermarks(struct intel_atomic_state *state);
-void intel_wm_get_hw_state(struct drm_i915_private *i915);
-void intel_wm_sanitize(struct drm_i915_private *i915);
+void intel_wm_get_hw_state(struct intel_display *display);
+void intel_wm_sanitize(struct intel_display *display);
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
-void intel_print_wm_latency(struct drm_i915_private *i915,
+void intel_print_wm_latency(struct intel_display *display,
const char *name, const u16 wm[]);
-void intel_wm_init(struct drm_i915_private *i915);
-void intel_wm_debugfs_register(struct drm_i915_private *i915);
+void intel_wm_init(struct intel_display *display);
+void intel_wm_debugfs_register(struct intel_display *display);
#endif /* __INTEL_WM_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index ee81220a7c88..c855426544cf 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -3,8 +3,10 @@
* Copyright © 2020 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 70e550539bb2..c7b336359a5e 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -601,7 +601,7 @@ static u32 tgl_plane_min_alignment(struct intel_plane *plane,
* Figure out what's going on here...
*/
if (display->platform.alderlake_p &&
- intel_plane_can_async_flip(plane, fb->modifier))
+ intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
return mult * 16 * 1024;
switch (fb->modifier) {
@@ -2666,6 +2666,7 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = skl_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs icl_plane_funcs = {
@@ -2675,6 +2676,7 @@ static const struct drm_plane_funcs icl_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = icl_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs tgl_plane_funcs = {
@@ -2684,28 +2686,29 @@ static const struct drm_plane_funcs tgl_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = tgl_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static void
skl_plane_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_enable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&display->irq.lock);
}
static void
skl_plane_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_disable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&display->irq.lock);
}
static bool skl_plane_has_rc_ccs(struct intel_display *display,
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 621e97943542..8080f777910a 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -19,6 +19,7 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fixed.h"
@@ -34,7 +35,7 @@
*/
#define DSB_EXE_TIME 100
-static void skl_sagv_disable(struct drm_i915_private *i915);
+static void skl_sagv_disable(struct intel_display *display);
/* Stores plane specific WM parameters */
struct skl_wm_params {
@@ -69,23 +70,21 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display)
* FIXME: We still don't have the proper code detect if we need to apply the WA,
* so assume we'll always need it in order to avoid underruns.
*/
-static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
+static bool skl_needs_memory_bw_wa(struct intel_display *display)
{
- return DISPLAY_VER(i915) == 9;
+ return DISPLAY_VER(display) == 9;
}
bool
-intel_has_sagv(struct drm_i915_private *i915)
+intel_has_sagv(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
return HAS_SAGV(display) && display->sagv.status != I915_SAGV_NOT_CONTROLLED;
}
static u32
-intel_sagv_block_time(struct drm_i915_private *i915)
+intel_sagv_block_time(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (DISPLAY_VER(display) >= 14) {
u32 val;
@@ -115,10 +114,8 @@ intel_sagv_block_time(struct drm_i915_private *i915)
}
}
-static void intel_sagv_init(struct drm_i915_private *i915)
+static void intel_sagv_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
if (!HAS_SAGV(display))
display->sagv.status = I915_SAGV_NOT_CONTROLLED;
@@ -127,14 +124,14 @@ static void intel_sagv_init(struct drm_i915_private *i915)
* For icl+ this was already determined by intel_bw_init_hw().
*/
if (DISPLAY_VER(display) < 11)
- skl_sagv_disable(i915);
+ skl_sagv_disable(display);
drm_WARN_ON(display->drm, display->sagv.status == I915_SAGV_UNKNOWN);
- display->sagv.block_time_us = intel_sagv_block_time(i915);
+ display->sagv.block_time_us = intel_sagv_block_time(display);
drm_dbg_kms(display->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
- str_yes_no(intel_has_sagv(i915)), display->sagv.block_time_us);
+ str_yes_no(intel_has_sagv(display)), display->sagv.block_time_us);
/* avoid overflow when adding with wm0 latency/etc. */
if (drm_WARN(display->drm, display->sagv.block_time_us > U16_MAX,
@@ -142,7 +139,7 @@ static void intel_sagv_init(struct drm_i915_private *i915)
display->sagv.block_time_us))
display->sagv.block_time_us = 0;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
display->sagv.block_time_us = 0;
}
@@ -157,17 +154,18 @@ static void intel_sagv_init(struct drm_i915_private *i915)
* - All planes can enable watermarks for latencies >= SAGV engine block time
* - We're not using an interlaced display configuration
*/
-static void skl_sagv_enable(struct drm_i915_private *i915)
+static void skl_sagv_enable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (i915->display.sagv.status == I915_SAGV_ENABLED)
+ if (display->sagv.status == I915_SAGV_ENABLED)
return;
- drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
+ drm_dbg_kms(display->drm, "Enabling SAGV\n");
ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
@@ -177,29 +175,30 @@ static void skl_sagv_enable(struct drm_i915_private *i915)
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
*/
- if (IS_SKYLAKE(i915) && ret == -ENXIO) {
- drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ if (display->platform.skylake && ret == -ENXIO) {
+ drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
return;
} else if (ret < 0) {
- drm_err(&i915->drm, "Failed to enable SAGV\n");
+ drm_err(display->drm, "Failed to enable SAGV\n");
return;
}
- i915->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
}
-static void skl_sagv_disable(struct drm_i915_private *i915)
+static void skl_sagv_disable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (i915->display.sagv.status == I915_SAGV_DISABLED)
+ if (display->sagv.status == I915_SAGV_DISABLED)
return;
- drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
+ drm_dbg_kms(display->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
@@ -209,47 +208,47 @@ static void skl_sagv_disable(struct drm_i915_private *i915)
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
*/
- if (IS_SKYLAKE(i915) && ret == -ENXIO) {
- drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ if (display->platform.skylake && ret == -ENXIO) {
+ drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
return;
} else if (ret < 0) {
- drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
+ drm_err(display->drm, "Failed to disable SAGV (%d)\n", ret);
return;
}
- i915->display.sagv.status = I915_SAGV_DISABLED;
+ display->sagv.status = I915_SAGV_DISABLED;
}
static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state =
intel_atomic_get_new_bw_state(state);
if (!new_bw_state)
return;
- if (!intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_disable(i915);
+ if (!intel_can_enable_sagv(display, new_bw_state))
+ skl_sagv_disable(display);
}
static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state =
intel_atomic_get_new_bw_state(state);
if (!new_bw_state)
return;
- if (intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_enable(i915);
+ if (intel_can_enable_sagv(display, new_bw_state))
+ skl_sagv_enable(display);
}
static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *old_bw_state =
intel_atomic_get_old_bw_state(state);
const struct intel_bw_state *new_bw_state =
@@ -267,7 +266,7 @@ static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_bw_state->base.changed);
- drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
+ drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
old_mask, new_mask);
/*
@@ -276,12 +275,12 @@ static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
* time. Also masking should be done before updating the configuration
* and unmasking afterwards.
*/
- icl_pcode_restrict_qgv_points(i915, new_mask);
+ icl_pcode_restrict_qgv_points(display, new_mask);
}
static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *old_bw_state =
intel_atomic_get_old_bw_state(state);
const struct intel_bw_state *new_bw_state =
@@ -299,7 +298,7 @@ static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_bw_state->base.changed);
- drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
+ drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
old_mask, new_mask);
/*
@@ -308,12 +307,12 @@ static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
* time. Also masking should be done before updating the configuration
* and unmasking afterwards.
*/
- icl_pcode_restrict_qgv_points(i915, new_mask);
+ icl_pcode_restrict_qgv_points(display, new_mask);
}
void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
/*
* Just return if we can't control SAGV or don't have it.
@@ -322,10 +321,10 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
* disabled in a BIOS, we are not even allowed to send a PCode request,
* as it will throw an error. So have to check it here.
*/
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
icl_sagv_pre_plane_update(state);
else
skl_sagv_pre_plane_update(state);
@@ -333,7 +332,7 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
void intel_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
/*
* Just return if we can't control SAGV or don't have it.
@@ -342,10 +341,10 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state)
* disabled in a BIOS, we are not even allowed to send a PCode request,
* as it will throw an error. So have to check it here.
*/
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
icl_sagv_post_plane_update(state);
else
skl_sagv_post_plane_update(state);
@@ -353,12 +352,12 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state)
static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum plane_id plane_id;
int max_level = INT_MAX;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return false;
if (!crtc_state->hw.active)
@@ -377,7 +376,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
continue;
/* Find the highest enabled wm level for this plane */
- for (level = i915->display.wm.num_levels - 1;
+ for (level = display->wm.num_levels - 1;
!wm->wm[level].enable; --level)
{ }
@@ -423,104 +422,37 @@ static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
return true;
}
-static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (!i915->display.params.enable_sagv)
+ if (!display->params.enable_sagv)
return false;
- if (DISPLAY_VER(i915) >= 12)
+ /*
+ * SAGV is initially forced off because its current
+ * state can't be queried from pcode. Allow SAGV to
+ * be enabled upon the first real commit.
+ */
+ if (crtc_state->inherited)
+ return false;
+
+ if (DISPLAY_VER(display) >= 12)
return tgl_crtc_can_enable_sagv(crtc_state);
else
return skl_crtc_can_enable_sagv(crtc_state);
}
-bool intel_can_enable_sagv(struct drm_i915_private *i915,
+bool intel_can_enable_sagv(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
return false;
return bw_state->pipe_sagv_reject == 0;
}
-static int intel_compute_sagv_mask(struct intel_atomic_state *state)
-{
- struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- int ret;
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- struct intel_bw_state *new_bw_state = NULL;
- const struct intel_bw_state *old_bw_state = NULL;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- new_bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(new_bw_state))
- return PTR_ERR(new_bw_state);
-
- old_bw_state = intel_atomic_get_old_bw_state(state);
-
- /*
- * We store use_sagv_wm in the crtc state rather than relying on
- * that bw state since we have no convenient way to get at the
- * latter from the plane commit hooks (especially in the legacy
- * cursor case).
- *
- * drm_atomic_check_only() gets upset if we pull more crtcs
- * into the state, so we have to calculate this based on the
- * individual intel_crtc_can_enable_sagv() rather than
- * the overall intel_can_enable_sagv(). Otherwise the
- * crtcs not included in the commit would not switch to the
- * SAGV watermarks when we are about to enable SAGV, and that
- * would lead to underruns. This does mean extra power draw
- * when only a subset of the crtcs are blocking SAGV as the
- * other crtcs can't be allowed to use the more optimal
- * normal (ie. non-SAGV) watermarks.
- */
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
- DISPLAY_VER(i915) >= 12 &&
- intel_crtc_can_enable_sagv(new_crtc_state);
-
- if (intel_crtc_can_enable_sagv(new_crtc_state))
- new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
- else
- new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
- }
-
- if (!new_bw_state)
- return 0;
-
- new_bw_state->active_pipes =
- intel_calc_active_pipes(state, old_bw_state->active_pipes);
-
- if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- if (intel_can_enable_sagv(i915, new_bw_state) !=
- intel_can_enable_sagv(i915, old_bw_state)) {
- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
u16 start, u16 end)
{
@@ -530,17 +462,17 @@ static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
return end;
}
-static int intel_dbuf_slice_size(struct drm_i915_private *i915)
+static int intel_dbuf_slice_size(struct intel_display *display)
{
- return DISPLAY_INFO(i915)->dbuf.size /
- hweight8(DISPLAY_INFO(i915)->dbuf.slice_mask);
+ return DISPLAY_INFO(display)->dbuf.size /
+ hweight8(DISPLAY_INFO(display)->dbuf.slice_mask);
}
static void
-skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
+skl_ddb_entry_for_slices(struct intel_display *display, u8 slice_mask,
struct skl_ddb_entry *ddb)
{
- int slice_size = intel_dbuf_slice_size(i915);
+ int slice_size = intel_dbuf_slice_size(display);
if (!slice_mask) {
ddb->start = 0;
@@ -552,10 +484,10 @@ skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
ddb->end = fls(slice_mask) * slice_size;
WARN_ON(ddb->start >= ddb->end);
- WARN_ON(ddb->end > DISPLAY_INFO(i915)->dbuf.size);
+ WARN_ON(ddb->end > DISPLAY_INFO(display)->dbuf.size);
}
-static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
+static unsigned int mbus_ddb_offset(struct intel_display *display, u8 slice_mask)
{
struct skl_ddb_entry ddb;
@@ -564,15 +496,15 @@ static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask
else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
slice_mask = BIT(DBUF_S3);
- skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
+ skl_ddb_entry_for_slices(display, slice_mask, &ddb);
return ddb.start;
}
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+u32 skl_ddb_dbuf_slice_mask(struct intel_display *display,
const struct skl_ddb_entry *entry)
{
- int slice_size = intel_dbuf_slice_size(i915);
+ int slice_size = intel_dbuf_slice_size(display);
enum dbuf_slice start_slice, end_slice;
u8 slice_mask = 0;
@@ -618,15 +550,14 @@ static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
unsigned int *weight_end,
unsigned int *weight_total)
{
- struct drm_i915_private *i915 =
- to_i915(dbuf_state->base.state->base.dev);
+ struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev);
enum pipe pipe;
*weight_start = 0;
*weight_end = 0;
*weight_total = 0;
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
int weight = dbuf_state->weight[pipe];
/*
@@ -652,7 +583,7 @@ static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
static int
skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
unsigned int weight_total, weight_start, weight_end;
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
@@ -674,8 +605,8 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
dbuf_slice_mask = new_dbuf_state->slices[pipe];
- skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
- mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
+ skl_ddb_entry_for_slices(display, dbuf_slice_mask, &ddb_slices);
+ mbus_offset = mbus_ddb_offset(display, dbuf_slice_mask);
ddb_range_size = skl_ddb_entry_size(&ddb_slices);
intel_crtc_dbuf_weights(new_dbuf_state, pipe,
@@ -709,7 +640,7 @@ out:
crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
crtc->base.base.id, crtc->base.name,
old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
@@ -734,10 +665,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */);
-static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
+static unsigned int skl_wm_latency(struct intel_display *display, int level,
const struct skl_wm_params *wp)
{
- unsigned int latency = i915->display.wm.skl_latency[level];
+ unsigned int latency = display->wm.skl_latency[level];
if (latency == 0)
return 0;
@@ -746,11 +677,11 @@ static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
* WaIncreaseLatencyIPCEnabled: kbl,cfl
* Display WA #1141: kbl,cfl
*/
- if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
- skl_watermark_ipc_enabled(i915))
+ if ((display->platform.kabylake || display->platform.coffeelake ||
+ display->platform.cometlake) && skl_watermark_ipc_enabled(display))
latency += 4;
- if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled)
+ if (skl_needs_memory_bw_wa(display) && wp && wp->x_tiled)
latency += 15;
return latency;
@@ -760,8 +691,8 @@ static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
int num_active)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
struct skl_wm_params wp;
@@ -772,10 +703,10 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
DRM_FORMAT_MOD_LINEAR,
DRM_MODE_ROTATE_0,
crtc_state->pixel_rate, &wp, 0, 0);
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
- for (level = 0; level < i915->display.wm.num_levels; level++) {
- unsigned int latency = skl_wm_latency(i915, level, &wp);
+ for (level = 0; level < display->wm.num_levels; level++) {
+ unsigned int latency = skl_wm_latency(display, level, &wp);
skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
if (wm.min_ddb_alloc == U16_MAX)
@@ -797,14 +728,13 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
}
static void
-skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
+skl_ddb_get_hw_plane_state(struct intel_display *display,
const enum pipe pipe,
const enum plane_id plane_id,
struct skl_ddb_entry *ddb,
struct skl_ddb_entry *ddb_y,
u16 *min_ddb, u16 *interim_ddb)
{
- struct intel_display *display = &i915->display;
u32 val;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
@@ -837,7 +767,6 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
u16 *min_ddb, u16 *interim_ddb)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
@@ -849,7 +778,7 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
return;
for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(i915, pipe,
+ skl_ddb_get_hw_plane_state(display, pipe,
plane_id,
&ddb[plane_id],
&ddb_y[plane_id],
@@ -1367,16 +1296,16 @@ static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbu
static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- if (IS_DG2(i915))
+ if (display->platform.dg2)
return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) == 12)
+ else if (DISPLAY_VER(display) == 12)
return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) == 11)
+ else if (DISPLAY_VER(display) == 11)
return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
/*
* For anything else just return one slice yet.
@@ -1416,8 +1345,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
static u64
skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum plane_id plane_id;
u64 data_rate = 0;
@@ -1427,7 +1356,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
data_rate += crtc_state->rel_data_rate[plane_id];
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
data_rate += crtc_state->rel_data_rate_y[plane_id];
}
@@ -1489,7 +1418,7 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
}
}
-static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level,
+static bool skl_need_wm_copy_wa(struct intel_display *display, int level,
const struct skl_plane_wm *wm)
{
/*
@@ -1543,7 +1472,6 @@ static int
skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_dbuf_state *dbuf_state =
@@ -1585,7 +1513,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* Find the highest watermark level for which we can satisfy the block
* requirement of active planes.
*/
- for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ for (level = display->wm.num_levels - 1; level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
@@ -1596,7 +1524,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.plane_ddb[plane_id];
if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
break;
@@ -1615,9 +1543,9 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
}
if (level < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Requested display configuration exceeds system DDB limitations");
- drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
+ drm_dbg_kms(display->drm, "minimum required %d/%d\n",
blocks, iter.size);
return -EINVAL;
}
@@ -1645,7 +1573,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
if (plane_id == PLANE_CURSOR)
continue;
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id)) {
skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
crtc_state->rel_data_rate_y[plane_id]);
@@ -1661,7 +1589,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
*interim_ddb = wm->sagv.wm0.min_ddb_alloc;
}
}
- drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
+ drm_WARN_ON(display->drm, iter.size != 0 || iter.data_rate != 0);
/*
* When we calculated watermark values we didn't know how high
@@ -1669,7 +1597,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* all levels as "enabled." Go back now and disable the ones
* that aren't actually possible.
*/
- for (level++; level < i915->display.wm.num_levels; level++) {
+ for (level++; level < display->wm.num_levels; level++) {
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
@@ -1678,7 +1606,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id))
skl_check_nv12_wm_level(&wm->wm[level],
&wm->uv_wm[level],
@@ -1686,7 +1614,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
else
skl_check_wm_level(&wm->wm[level], ddb);
- if (skl_need_wm_copy_wa(i915, level, wm)) {
+ if (skl_need_wm_copy_wa(display, level, wm)) {
wm->wm[level].blocks = wm->wm[level - 1].blocks;
wm->wm[level].lines = wm->wm[level - 1].lines;
wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
@@ -1708,7 +1636,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id)) {
skl_check_wm_level(&wm->trans_wm, ddb_y);
} else {
@@ -1734,7 +1662,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
static uint_fixed_16_16_t
-skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
+skl_wm_method1(struct intel_display *display, u32 pixel_rate,
u8 cpp, u32 latency, u32 dbuf_block_size)
{
u32 wm_intermediate_val;
@@ -1746,7 +1674,7 @@ skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
wm_intermediate_val = latency * pixel_rate * cpp;
ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 10)
ret = add_fixed16_u32(ret, 1);
return ret;
@@ -1772,7 +1700,7 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 pixel_rate;
u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
@@ -1782,7 +1710,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
pixel_rate = crtc_state->pixel_rate;
- if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
+ if (drm_WARN_ON(display->drm, pixel_rate == 0))
return u32_to_fixed16(0);
crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
@@ -1798,15 +1726,13 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u32 plane_pixel_rate, struct skl_wm_params *wp,
int color_plane, unsigned int pan_x)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 interm_pbpl;
/* only planar format has two planes */
if (color_plane == 1 &&
!intel_format_info_is_yuv_semiplanar(format, modifier)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Non planar format have single plane\n");
return -EINVAL;
}
@@ -1824,7 +1750,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->cpp = format->cpp[color_plane];
wp->plane_pixel_rate = plane_pixel_rate;
- if (DISPLAY_VER(i915) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
wp->dbuf_block_size = 256;
else
@@ -1849,7 +1775,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_min_scanlines = 4;
}
- if (skl_needs_memory_bw_wa(i915))
+ if (skl_needs_memory_bw_wa(display))
wp->y_min_scanlines *= 2;
wp->plane_bytes_per_line = wp->width * wp->cpp;
@@ -1860,7 +1786,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
if (DISPLAY_VER(display) >= 30)
interm_pbpl += (pan_x != 0);
- else if (DISPLAY_VER(i915) >= 10)
+ else if (DISPLAY_VER(display) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
@@ -1869,7 +1795,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
wp->dbuf_block_size);
- if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
+ if (!wp->x_tiled || DISPLAY_VER(display) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
@@ -1906,18 +1832,18 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
plane_state->uapi.src.x1);
}
-static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
+static bool skl_wm_has_lines(struct intel_display *display, int level)
{
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 10)
return true;
/* The number of lines are ignored for the level 0 watermark. */
return level > 0;
}
-static int skl_wm_max_lines(struct drm_i915_private *i915)
+static int skl_wm_max_lines(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return 255;
else
return 31;
@@ -1938,7 +1864,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
u32 blocks, lines, min_ddb_alloc = 0;
@@ -1950,7 +1876,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
return;
}
- method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
+ method1 = skl_wm_method1(display, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
crtc_state->hw.pipe_mode.crtc_htotal,
@@ -1965,7 +1891,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
} else if (latency >= wp->linetime_us) {
- if (DISPLAY_VER(i915) == 9)
+ if (DISPLAY_VER(display) == 9)
selected_result = min_fixed16(method1, method2);
else
selected_result = method2;
@@ -1975,7 +1901,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
blocks = fixed16_to_u32_round_up(selected_result);
- if (DISPLAY_VER(i915) < 30)
+ if (DISPLAY_VER(display) < 30)
blocks++;
/*
@@ -1994,13 +1920,13 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
* channels' impact on the level 0 memory latency and the relevant
* wm calculations.
*/
- if (skl_wm_has_lines(i915, level))
+ if (skl_wm_has_lines(display, level))
blocks = max(blocks,
fixed16_to_u32_round_up(wp->plane_blocks_per_line));
lines = div_round_up_fixed16(selected_result,
wp->plane_blocks_per_line);
- if (DISPLAY_VER(i915) == 9) {
+ if (DISPLAY_VER(display) == 9) {
/* Display WA #1125: skl,bxt,kbl */
if (level == 0 && wp->rc_surface)
blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
@@ -2025,7 +1951,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
}
- if (DISPLAY_VER(i915) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
if (wp->y_tiled) {
int extra_lines;
@@ -2042,10 +1968,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
}
- if (!skl_wm_has_lines(i915, level))
+ if (!skl_wm_has_lines(display, level))
lines = 0;
- if (lines > skl_wm_max_lines(i915)) {
+ if (lines > skl_wm_max_lines(display)) {
/* reject it */
result->min_ddb_alloc = U16_MAX;
return;
@@ -2064,8 +1990,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
result->enable = true;
result->auto_min_alloc_wm_enable = xe3_auto_min_alloc_capable(plane, level);
- if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
- result->can_sagv = latency >= i915->display.sagv.block_time_us;
+ if (DISPLAY_VER(display) < 12 && display->sagv.block_time_us)
+ result->can_sagv = latency >= display->sagv.block_time_us;
}
static void
@@ -2074,13 +2000,13 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_wm_level *result_prev = &levels[0];
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct skl_wm_level *result = &levels[level];
- unsigned int latency = skl_wm_latency(i915, level, wm_params);
+ unsigned int latency = skl_wm_latency(display, level, wm_params);
skl_compute_plane_wm(crtc_state, plane, level, latency,
wm_params, result_prev, result);
@@ -2094,21 +2020,21 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_plane_wm *plane_wm)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
struct skl_wm_level *levels = plane_wm->wm;
unsigned int latency = 0;
- if (i915->display.sagv.block_time_us)
- latency = i915->display.sagv.block_time_us +
- skl_wm_latency(i915, 0, wm_params);
+ if (display->sagv.block_time_us)
+ latency = display->sagv.block_time_us +
+ skl_wm_latency(display, 0, wm_params);
skl_compute_plane_wm(crtc_state, plane, 0, latency,
wm_params, &levels[0],
sagv_wm);
}
-static void skl_compute_transition_wm(struct drm_i915_private *i915,
+static void skl_compute_transition_wm(struct intel_display *display,
struct skl_wm_level *trans_wm,
const struct skl_wm_level *wm0,
const struct skl_wm_params *wp)
@@ -2117,23 +2043,23 @@ static void skl_compute_transition_wm(struct drm_i915_private *i915,
u16 wm0_blocks, trans_offset, blocks;
/* Transition WM don't make any sense if ipc is disabled */
- if (!skl_watermark_ipc_enabled(i915))
+ if (!skl_watermark_ipc_enabled(display))
return;
/*
* WaDisableTWM:skl,kbl,cfl,bxt
* Transition WM are not recommended by HW team for GEN9
*/
- if (DISPLAY_VER(i915) == 9)
+ if (DISPLAY_VER(display) == 9)
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
trans_min = 4;
else
trans_min = 14;
/* Display WA #1140: glk,cnl */
- if (DISPLAY_VER(i915) == 10)
+ if (DISPLAY_VER(display) == 10)
trans_amount = 0;
else
trans_amount = 10; /* This is configurable amount */
@@ -2175,8 +2101,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
struct intel_plane *plane, int color_plane)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
struct skl_wm_params wm_params;
int ret;
@@ -2188,13 +2113,13 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
- skl_compute_transition_wm(i915, &wm->trans_wm,
+ skl_compute_transition_wm(display, &wm->trans_wm,
&wm->wm[0], &wm_params);
- if (DISPLAY_VER(i915) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
- skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
+ skl_compute_transition_wm(display, &wm->sagv.trans_wm,
&wm->sagv.wm0, &wm_params);
}
@@ -2254,8 +2179,8 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
int ret;
@@ -2269,9 +2194,9 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
if (plane_state->planar_linked_plane) {
const struct drm_framebuffer *fb = plane_state->hw.fb;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!intel_wm_plane_visible(crtc_state, plane_state));
- drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
+ drm_WARN_ON(display->drm, !fb->format->is_yuv ||
fb->format->num_planes == 1);
ret = skl_build_plane_wm_single(crtc_state, plane_state,
@@ -2411,15 +2336,14 @@ static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
int wm0_lines)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
int level;
- for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ for (level = display->wm.num_levels - 1; level >= 0; level--) {
int latency;
/* FIXME should we care about the latency w/a's? */
- latency = skl_wm_latency(i915, level, NULL);
+ latency = skl_wm_latency(display, level, NULL);
if (latency == 0)
continue;
@@ -2436,8 +2360,8 @@ static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
int wm0_lines, level;
if (!crtc_state->hw.active)
@@ -2453,9 +2377,9 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
* PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_*
* based on whether we're limited by the vblank duration.
*/
- crtc_state->wm_level_disabled = level < i915->display.wm.num_levels - 1;
+ crtc_state->wm_level_disabled = level < display->wm.num_levels - 1;
- for (level++; level < i915->display.wm.num_levels; level++) {
+ for (level++; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -2471,10 +2395,10 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
}
}
- if (DISPLAY_VER(i915) >= 12 &&
- i915->display.sagv.block_time_us &&
+ if (DISPLAY_VER(display) >= 12 &&
+ display->sagv.block_time_us &&
skl_is_vblank_too_short(crtc_state, wm0_lines,
- i915->display.sagv.block_time_us)) {
+ display->sagv.block_time_us)) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -2492,7 +2416,7 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
static int skl_build_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_plane_state *plane_state;
@@ -2508,7 +2432,7 @@ static int skl_build_pipe_wm(struct intel_atomic_state *state,
if (plane->pipe != crtc->pipe)
continue;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
ret = icl_build_plane_wm(crtc_state, plane_state);
else
ret = skl_build_plane_wm(crtc_state, plane_state);
@@ -2531,11 +2455,10 @@ static bool skl_wm_level_equals(const struct skl_wm_level *l1,
l1->auto_min_alloc_wm_enable == l2->auto_min_alloc_wm_enable;
}
-static bool skl_plane_wm_equals(struct drm_i915_private *i915,
+static bool skl_plane_wm_equals(struct intel_display *display,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
- struct intel_display *display = &i915->display;
int level;
for (level = 0; level < display->wm.num_levels; level++) {
@@ -2590,14 +2513,14 @@ static int
skl_ddb_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
@@ -2608,7 +2531,7 @@ skl_ddb_add_affected_planes(struct intel_atomic_state *state,
continue;
if (new_crtc_state->do_async_flip) {
- drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n",
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -2627,7 +2550,7 @@ skl_ddb_add_affected_planes(struct intel_atomic_state *state,
static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
{
- struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
+ struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev);
u8 enabled_slices;
enum pipe pipe;
@@ -2637,7 +2560,7 @@ static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
*/
enabled_slices = BIT(DBUF_S1);
- for_each_pipe(i915, pipe)
+ for_each_pipe(display, pipe)
enabled_slices |= dbuf_state->slices[pipe];
return enabled_slices;
@@ -2647,7 +2570,6 @@ static int
skl_compute_ddb(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_dbuf_state *old_dbuf_state;
struct intel_dbuf_state *new_dbuf_state = NULL;
struct intel_crtc_state *new_crtc_state;
@@ -2686,7 +2608,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
}
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
enum pipe pipe = crtc->pipe;
new_dbuf_state->slices[pipe] =
@@ -2709,11 +2631,11 @@ skl_compute_ddb(struct intel_atomic_state *state)
if (ret)
return ret;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
- DISPLAY_INFO(i915)->dbuf.slice_mask,
+ DISPLAY_INFO(display)->dbuf.slice_mask,
str_yes_no(old_dbuf_state->joined_mbus),
str_yes_no(new_dbuf_state->joined_mbus));
}
@@ -2731,7 +2653,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
ret = skl_crtc_allocate_ddb(state, crtc);
if (ret)
return ret;
@@ -2758,7 +2680,7 @@ static char enast(bool enable)
static void
skl_print_wm_changes(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state;
const struct intel_crtc_state *new_crtc_state;
struct intel_plane *plane;
@@ -2775,7 +2697,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
old_pipe_wm = &old_crtc_state->wm.skl.optimal;
new_pipe_wm = &new_crtc_state->wm.skl.optimal;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
@@ -2785,24 +2707,24 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
plane->base.base.id, plane->base.name,
old->start, old->end, new->start, new->end,
skl_ddb_entry_size(old), skl_ddb_entry_size(new));
}
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_plane_wm *old_wm, *new_wm;
old_wm = &old_pipe_wm->planes[plane_id];
new_wm = &new_pipe_wm->planes[plane_id];
- if (skl_plane_wm_equals(i915, old_wm, new_wm))
+ if (skl_plane_wm_equals(display, old_wm, new_wm))
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
" -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
plane->base.base.id, plane->base.name,
@@ -2821,7 +2743,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(new_wm->sagv.wm0.enable),
enast(new_wm->sagv.trans_wm.enable));
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
" -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
plane->base.base.id, plane->base.name,
@@ -2848,7 +2770,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
" -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
@@ -2867,7 +2789,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
new_wm->sagv.wm0.blocks,
new_wm->sagv.trans_wm.blocks);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
" -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
@@ -2945,14 +2867,14 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
@@ -2971,7 +2893,7 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
continue;
if (new_crtc_state->do_async_flip) {
- drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n",
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -3002,7 +2924,6 @@ void
intel_program_dpkgc_latency(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc *crtc;
struct intel_crtc_state *new_crtc_state;
u32 latency = LNL_PKG_C_LATENCY_MASK;
@@ -3028,7 +2949,7 @@ intel_program_dpkgc_latency(struct intel_atomic_state *state)
added_wake_time = DSB_EXE_TIME +
display->sagv.block_time_us;
- latency = skl_watermark_max_latency(i915, 1);
+ latency = skl_watermark_max_latency(display, 1);
/* Wa_22020432604 */
if ((DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30) && !latency) {
@@ -3055,6 +2976,7 @@ intel_program_dpkgc_latency(struct intel_atomic_state *state)
static int
skl_compute_wm(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
struct intel_crtc_state __maybe_unused *new_crtc_state;
int ret, i;
@@ -3069,16 +2991,35 @@ skl_compute_wm(struct intel_atomic_state *state)
if (ret)
return ret;
- ret = intel_compute_sagv_mask(state);
- if (ret)
- return ret;
-
/*
* skl_compute_ddb() will have adjusted the final watermarks
* based on how much ddb is available. Now we can actually
* check if the final watermarks changed.
*/
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case).
+ *
+ * drm_atomic_check_only() gets upset if we pull more crtcs
+ * into the state, so we have to calculate this based on the
+ * individual intel_crtc_can_enable_sagv() rather than
+ * the overall intel_can_enable_sagv(). Otherwise the
+ * crtcs not included in the commit would not switch to the
+ * SAGV watermarks when we are about to enable SAGV, and that
+ * would lead to underruns. This does mean extra power draw
+ * when only a subset of the crtcs are blocking SAGV as the
+ * other crtcs can't be allowed to use the more optimal
+ * normal (ie. non-SAGV) watermarks.
+ */
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
+ DISPLAY_VER(display) >= 12 &&
+ intel_crtc_can_enable_sagv(new_crtc_state);
+
ret = skl_wm_add_affected_planes(state, crtc);
if (ret)
return ret;
@@ -3149,11 +3090,10 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
}
}
-static void skl_wm_get_hw_state(struct drm_i915_private *i915)
+static void skl_wm_get_hw_state(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ to_intel_dbuf_state(display->dbuf.obj.state);
struct intel_crtc *crtc;
if (HAS_MBUS_JOINING(display))
@@ -3193,7 +3133,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
if (!crtc_state->hw.active)
continue;
- skl_ddb_get_hw_plane_state(i915, crtc->pipe,
+ skl_ddb_get_hw_plane_state(display, crtc->pipe,
plane_id, ddb, ddb_y,
min_ddb, interim_ddb);
@@ -3209,13 +3149,13 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
*/
slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
dbuf_state->joined_mbus);
- mbus_offset = mbus_ddb_offset(i915, slices);
+ mbus_offset = mbus_ddb_offset(display, slices);
crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
/* The slices actually used by the planes on the pipe */
dbuf_state->slices[pipe] =
- skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
+ skl_ddb_dbuf_slice_mask(display, &crtc_state->wm.skl.ddb);
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
@@ -3228,49 +3168,52 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
dbuf_state->enabled_slices = display->dbuf.enabled_slices;
}
-bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
+bool skl_watermark_ipc_enabled(struct intel_display *display)
{
- return i915->display.wm.ipc_enabled;
+ return display->wm.ipc_enabled;
}
-void skl_watermark_ipc_update(struct drm_i915_private *i915)
+void skl_watermark_ipc_update(struct intel_display *display)
{
- if (!HAS_IPC(i915))
+ if (!HAS_IPC(display))
return;
- intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
- skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
+ intel_de_rmw(display, DISP_ARB_CTL2, DISP_IPC_ENABLE,
+ skl_watermark_ipc_enabled(display) ? DISP_IPC_ENABLE : 0);
}
-static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
+static bool skl_watermark_ipc_can_enable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
/* Display WA #0477 WaDisableIPC: skl */
- if (IS_SKYLAKE(i915))
+ if (display->platform.skylake)
return false;
/* Display WA #1141: SKL:all KBL:all CFL */
- if (IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) ||
- IS_COMETLAKE(i915))
+ if (display->platform.kabylake ||
+ display->platform.coffeelake ||
+ display->platform.cometlake)
return i915->dram_info.symmetric_memory;
return true;
}
-void skl_watermark_ipc_init(struct drm_i915_private *i915)
+void skl_watermark_ipc_init(struct intel_display *display)
{
- if (!HAS_IPC(i915))
+ if (!HAS_IPC(display))
return;
- i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
+ display->wm.ipc_enabled = skl_watermark_ipc_can_enable(display);
- skl_watermark_ipc_update(i915);
+ skl_watermark_ipc_update(display);
}
static void
-adjust_wm_latency(struct drm_i915_private *i915,
+adjust_wm_latency(struct intel_display *display,
u16 wm[], int num_levels, int read_latency)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
int i, level;
@@ -3311,31 +3254,32 @@ adjust_wm_latency(struct drm_i915_private *i915,
wm[0] += 1;
}
-static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void mtl_read_wm_latency(struct intel_display *display, u16 wm[])
{
- int num_levels = i915->display.wm.num_levels;
+ int num_levels = display->wm.num_levels;
u32 val;
- val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
+ val = intel_de_read(display, MTL_LATENCY_LP0_LP1);
wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
+ val = intel_de_read(display, MTL_LATENCY_LP2_LP3);
wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
+ val = intel_de_read(display, MTL_LATENCY_LP4_LP5);
wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- adjust_wm_latency(i915, wm, num_levels, 6);
+ adjust_wm_latency(display, wm, num_levels, 6);
}
-static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
{
- int num_levels = i915->display.wm.num_levels;
- int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
- int mult = IS_DG2(i915) ? 2 : 1;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ int num_levels = display->wm.num_levels;
+ int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2;
+ int mult = display->platform.dg2 ? 2 : 1;
u32 val;
int ret;
@@ -3343,7 +3287,7 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
val = 0; /* data0 to be programmed to 0 for first set */
ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
- drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -3356,7 +3300,7 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
val = 1; /* data0 to be programmed to 1 for second set */
ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
- drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -3365,24 +3309,22 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
- adjust_wm_latency(i915, wm, num_levels, read_latency);
+ adjust_wm_latency(display, wm, num_levels, read_latency);
}
-static void skl_setup_wm_latency(struct drm_i915_private *i915)
+static void skl_setup_wm_latency(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
if (HAS_HW_SAGV_WM(display))
display->wm.num_levels = 6;
else
display->wm.num_levels = 8;
if (DISPLAY_VER(display) >= 14)
- mtl_read_wm_latency(i915, display->wm.skl_latency);
+ mtl_read_wm_latency(display, display->wm.skl_latency);
else
- skl_read_wm_latency(i915, display->wm.skl_latency);
+ skl_read_wm_latency(display, display->wm.skl_latency);
- intel_print_wm_latency(i915, "Gen9 Plane", display->wm.skl_latency);
+ intel_print_wm_latency(display, "Gen9 Plane", display->wm.skl_latency);
}
static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
@@ -3410,19 +3352,18 @@ static const struct intel_global_state_funcs intel_dbuf_funcs = {
struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *dbuf_state;
- dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
+ dbuf_state = intel_atomic_get_global_obj_state(state, &display->dbuf.obj);
if (IS_ERR(dbuf_state))
return ERR_CAST(dbuf_state);
return to_intel_dbuf_state(dbuf_state);
}
-int intel_dbuf_init(struct drm_i915_private *i915)
+int intel_dbuf_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state;
dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
@@ -3457,34 +3398,34 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
const struct intel_dbuf_state *dbuf_state)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 val = 0;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
val |= MBUS_DBOX_I_CREDIT(2);
- if (DISPLAY_VER(i915) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
}
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8);
- else if (IS_ALDERLAKE_P(i915))
+ else if (display->platform.alderlake_p)
/* Wa_22010947358:adl-p */
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
else
val |= MBUS_DBOX_A_CREDIT(2);
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
val |= MBUS_DBOX_B_CREDIT(0xA);
- } else if (IS_ALDERLAKE_P(i915)) {
+ } else if (display->platform.alderlake_p) {
val |= MBUS_DBOX_BW_CREDIT(2);
val |= MBUS_DBOX_B_CREDIT(8);
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
val |= MBUS_DBOX_BW_CREDIT(2);
val |= MBUS_DBOX_B_CREDIT(12);
} else {
@@ -3492,7 +3433,7 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
val |= MBUS_DBOX_B_CREDIT(8);
}
- if (DISPLAY_VERx100(i915) == 1400) {
+ if (DISPLAY_VERx100(display) == 1400) {
if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, dbuf_state->active_pipes))
val |= MBUS_DBOX_BW_8CREDITS_MTL;
else
@@ -3502,22 +3443,22 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
return val;
}
-static void pipe_mbus_dbox_ctl_update(struct drm_i915_private *i915,
+static void pipe_mbus_dbox_ctl_update(struct intel_display *display,
const struct intel_dbuf_state *dbuf_state)
{
struct intel_crtc *crtc;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, dbuf_state->active_pipes)
- intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe),
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, dbuf_state->active_pipes)
+ intel_de_write(display, PIPE_MBUS_DBOX_CTL(crtc->pipe),
pipe_mbus_dbox_ctl(crtc, dbuf_state));
}
static void intel_mbus_dbox_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return;
new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
@@ -3527,7 +3468,7 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state)
new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
return;
- pipe_mbus_dbox_ctl_update(i915, new_dbuf_state);
+ pipe_mbus_dbox_ctl_update(display, new_dbuf_state);
}
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
@@ -3544,10 +3485,9 @@ int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
return intel_atomic_lock_global_state(&dbuf_state->base);
}
-void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
int ratio, bool joined_mbus)
{
- struct intel_display *display = &i915->display;
enum dbuf_slice slice;
if (!HAS_MBUS_JOINING(display))
@@ -3571,7 +3511,7 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
const struct intel_dbuf_state *new_dbuf_state =
@@ -3586,7 +3526,7 @@ static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state
mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio;
}
- intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio,
+ intel_dbuf_mdclk_cdclk_ratio_update(display, mdclk_cdclk_ratio,
new_dbuf_state->joined_mbus);
}
@@ -3594,13 +3534,12 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
const struct intel_dbuf_state *dbuf_state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
const struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus);
- drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes));
+ drm_WARN_ON(display->drm, !dbuf_state->joined_mbus);
+ drm_WARN_ON(display->drm, !is_power_of_2(dbuf_state->active_pipes));
crtc = intel_crtc_for_pipe(display, pipe);
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -3611,7 +3550,7 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
return INVALID_PIPE;
}
-static void mbus_ctl_join_update(struct drm_i915_private *i915,
+static void mbus_ctl_join_update(struct intel_display *display,
const struct intel_dbuf_state *dbuf_state,
enum pipe pipe)
{
@@ -3627,7 +3566,7 @@ static void mbus_ctl_join_update(struct drm_i915_private *i915,
else
mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE;
- intel_de_rmw(i915, MBUS_CTL,
+ intel_de_rmw(display, MBUS_CTL,
MBUS_HASHING_MODE_MASK | MBUS_JOIN |
MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
}
@@ -3635,18 +3574,18 @@ static void mbus_ctl_join_update(struct drm_i915_private *i915,
static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
const struct intel_dbuf_state *new_dbuf_state =
intel_atomic_get_new_dbuf_state(state);
- drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
+ drm_dbg_kms(display->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
str_yes_no(old_dbuf_state->joined_mbus),
str_yes_no(new_dbuf_state->joined_mbus),
pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
- mbus_ctl_join_update(i915, new_dbuf_state, pipe);
+ mbus_ctl_join_update(display, new_dbuf_state, pipe);
}
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
@@ -3751,9 +3690,8 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
gen9_dbuf_slices_update(display, new_slices);
}
-static void skl_mbus_sanitize(struct drm_i915_private *i915)
+static void skl_mbus_sanitize(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(display->dbuf.obj.state);
@@ -3768,28 +3706,28 @@ static void skl_mbus_sanitize(struct drm_i915_private *i915)
dbuf_state->active_pipes);
dbuf_state->joined_mbus = false;
- intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_dbuf_mdclk_cdclk_ratio_update(display,
dbuf_state->mdclk_cdclk_ratio,
dbuf_state->joined_mbus);
- pipe_mbus_dbox_ctl_update(i915, dbuf_state);
- mbus_ctl_join_update(i915, dbuf_state, INVALID_PIPE);
+ pipe_mbus_dbox_ctl_update(display, dbuf_state);
+ mbus_ctl_join_update(display, dbuf_state, INVALID_PIPE);
}
-static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+static bool skl_dbuf_is_misconfigured(struct intel_display *display)
{
const struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ to_intel_dbuf_state(display->dbuf.obj.state);
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
entries[crtc->pipe] = crtc_state->wm.skl.ddb;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
u8 slices;
@@ -3807,7 +3745,7 @@ static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
return false;
}
-static void skl_dbuf_sanitize(struct drm_i915_private *i915)
+static void skl_dbuf_sanitize(struct intel_display *display)
{
struct intel_crtc *crtc;
@@ -3822,12 +3760,12 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915)
* all the planes so that skl_commit_modeset_enables() can
* simply ignore them.
*/
- if (!skl_dbuf_is_misconfigured(i915))
+ if (!skl_dbuf_is_misconfigured(display))
return;
- drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+ drm_dbg_kms(display->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -3837,16 +3775,16 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915)
if (plane_state->uapi.visible)
intel_plane_disable_noatomic(crtc, plane);
- drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+ drm_WARN_ON(display->drm, crtc_state->active_planes != 0);
memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
}
}
-static void skl_wm_sanitize(struct drm_i915_private *i915)
+static void skl_wm_sanitize(struct intel_display *display)
{
- skl_mbus_sanitize(i915);
- skl_dbuf_sanitize(i915);
+ skl_mbus_sanitize(display);
+ skl_dbuf_sanitize(display);
}
void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc)
@@ -3897,7 +3835,6 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct skl_hw_state {
@@ -3912,7 +3849,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
u8 hw_enabled_slices;
int level;
- if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
+ if (DISPLAY_VER(display) < 9 || !new_crtc_state->hw.active)
return;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
@@ -3925,26 +3862,26 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
hw_enabled_slices = intel_enabled_dbuf_slices_mask(display);
- if (DISPLAY_VER(i915) >= 11 &&
- hw_enabled_slices != i915->display.dbuf.enabled_slices)
- drm_err(&i915->drm,
+ if (DISPLAY_VER(display) >= 11 &&
+ hw_enabled_slices != display->dbuf.enabled_slices)
+ drm_err(display->drm,
"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- i915->display.dbuf.enabled_slices,
+ display->dbuf.enabled_slices,
hw_enabled_slices);
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
const struct skl_wm_level *hw_wm_level, *sw_wm_level;
/* Watermarks */
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
hw_wm_level = &hw->wm.planes[plane->id].wm[level];
sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
continue;
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name, level,
sw_wm_level->enable,
@@ -3959,7 +3896,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -3975,7 +3912,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
if (HAS_HW_SAGV_WM(display) &&
!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -3991,7 +3928,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
if (HAS_HW_SAGV_WM(display) &&
!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -4007,7 +3944,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
plane->base.base.id, plane->base.name,
sw_ddb_entry->start, sw_ddb_entry->end,
@@ -4024,29 +3961,29 @@ static const struct intel_wm_funcs skl_wm_funcs = {
.sanitize = skl_wm_sanitize,
};
-void skl_wm_init(struct drm_i915_private *i915)
+void skl_wm_init(struct intel_display *display)
{
- intel_sagv_init(i915);
+ intel_sagv_init(display);
- skl_setup_wm_latency(i915);
+ skl_setup_wm_latency(display);
- i915->display.funcs.wm = &skl_wm_funcs;
+ display->funcs.wm = &skl_wm_funcs;
}
static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *i915 = m->private;
+ struct intel_display *display = m->private;
seq_printf(m, "Isochronous Priority Control: %s\n",
- str_yes_no(skl_watermark_ipc_enabled(i915)));
+ str_yes_no(skl_watermark_ipc_enabled(display)));
return 0;
}
static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *i915 = inode->i_private;
+ struct intel_display *display = inode->i_private;
- return single_open(file, skl_watermark_ipc_status_show, i915);
+ return single_open(file, skl_watermark_ipc_status_show, display);
}
static ssize_t skl_watermark_ipc_status_write(struct file *file,
@@ -4054,8 +3991,7 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *i915 = m->private;
- intel_wakeref_t wakeref;
+ struct intel_display *display = m->private;
bool enable;
int ret;
@@ -4063,12 +3999,12 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file,
if (ret < 0)
return ret;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- if (!skl_watermark_ipc_enabled(i915) && enable)
- drm_info(&i915->drm,
+ with_intel_display_rpm(display) {
+ if (!skl_watermark_ipc_enabled(display) && enable)
+ drm_info(display->drm,
"Enabling IPC: WM will be proper only after next commit\n");
- i915->display.wm.ipc_enabled = enable;
- skl_watermark_ipc_update(i915);
+ display->wm.ipc_enabled = enable;
+ skl_watermark_ipc_update(display);
}
return len;
@@ -4085,7 +4021,7 @@ static const struct file_operations skl_watermark_ipc_status_fops = {
static int intel_sagv_status_show(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = m->private;
+ struct intel_display *display = m->private;
static const char * const sagv_status[] = {
[I915_SAGV_UNKNOWN] = "unknown",
[I915_SAGV_DISABLED] = "disabled",
@@ -4093,37 +4029,36 @@ static int intel_sagv_status_show(struct seq_file *m, void *unused)
[I915_SAGV_NOT_CONTROLLED] = "not controlled",
};
- seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
+ seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(display)));
seq_printf(m, "SAGV modparam: %s\n",
- str_enabled_disabled(i915->display.params.enable_sagv));
- seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
- seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
+ str_enabled_disabled(display->params.enable_sagv));
+ seq_printf(m, "SAGV status: %s\n", sagv_status[display->sagv.status]);
+ seq_printf(m, "SAGV block time: %d usec\n", display->sagv.block_time_us);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
-void skl_watermark_debugfs_register(struct drm_i915_private *i915)
+void skl_watermark_debugfs_register(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct drm_minor *minor = display->drm->primary;
if (HAS_IPC(display))
- debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
- &skl_watermark_ipc_status_fops);
+ debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root,
+ display, &skl_watermark_ipc_status_fops);
if (HAS_SAGV(display))
- debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
- &intel_sagv_status_fops);
+ debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root,
+ display, &intel_sagv_status_fops);
}
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level)
+unsigned int skl_watermark_max_latency(struct intel_display *display, int initial_wm_level)
{
int level;
- for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) {
- unsigned int latency = skl_wm_latency(i915, level, NULL);
+ for (level = display->wm.num_levels - 1; level >= initial_wm_level; level--) {
+ unsigned int latency = skl_wm_latency(display, level, NULL);
if (latency)
return latency;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index d9cff6c54310..95b0b599d5c3 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -12,7 +12,6 @@
#include "intel_global_state.h"
#include "intel_wm_types.h"
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_bw_state;
struct intel_crtc;
@@ -27,11 +26,12 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
-bool intel_can_enable_sagv(struct drm_i915_private *i915,
+bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state);
+bool intel_can_enable_sagv(struct intel_display *display,
const struct intel_bw_state *bw_state);
-bool intel_has_sagv(struct drm_i915_private *i915);
+bool intel_has_sagv(struct intel_display *display);
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+u32 skl_ddb_dbuf_slice_mask(struct intel_display *display,
const struct skl_ddb_entry *entry);
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
@@ -45,14 +45,14 @@ void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc);
void skl_wm_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane);
-void skl_watermark_ipc_init(struct drm_i915_private *i915);
-void skl_watermark_ipc_update(struct drm_i915_private *i915);
-bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
-void skl_watermark_debugfs_register(struct drm_i915_private *i915);
+void skl_watermark_ipc_init(struct intel_display *display);
+void skl_watermark_ipc_update(struct intel_display *display);
+bool skl_watermark_ipc_enabled(struct intel_display *display);
+void skl_watermark_debugfs_register(struct intel_display *display);
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915,
+unsigned int skl_watermark_max_latency(struct intel_display *display,
int initial_wm_level);
-void skl_wm_init(struct drm_i915_private *i915);
+void skl_wm_init(struct intel_display *display);
const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id,
@@ -86,13 +86,13 @@ intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
#define intel_atomic_get_new_dbuf_state(state) \
to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
-int intel_dbuf_init(struct drm_i915_private *i915);
+int intel_dbuf_init(struct intel_display *display);
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
int ratio);
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
-void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
int ratio, bool joined_mbus);
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index af717df83197..346737f15fa9 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -251,8 +251,10 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
return 0;
}
-static void band_gap_reset(struct drm_i915_private *dev_priv)
+static void band_gap_reset(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
vlv_flisdsi_get(dev_priv);
vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
@@ -269,13 +271,13 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -298,7 +300,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
else
pipe_config->pipe_bpp = 18;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
/* Enable Frame time stamp based scanline reporting */
pipe_config->mode_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
@@ -468,7 +470,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
vlv_flisdsi_put(dev_priv);
/* bandgap reset is needed after everytime we do power gate */
- band_gap_reset(dev_priv);
+ band_gap_reset(display);
for_each_dsi_port(port, intel_dsi->ports) {
@@ -495,11 +497,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
glk_dsi_device_ready(encoder);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
bxt_dsi_device_ready(encoder);
else
vlv_dsi_device_ready(encoder);
@@ -559,23 +561,22 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
glk_dsi_disable_mipi_io(encoder);
}
-static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port)
+static i915_reg_t port_ctrl_reg(struct intel_display *display, enum port port)
{
- return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ?
+ return display->platform.geminilake || display->platform.broxton ?
BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(port);
}
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
drm_dbg_kms(display->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = display->platform.broxton ?
BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(PORT_A);
intel_de_write(display, MIPI_DEVICE_READY(display, port),
@@ -594,7 +595,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
* Port A only. MIPI Port C has no similar bit for checking.
*/
- if ((IS_BROXTON(dev_priv) || port == PORT_A) &&
+ if ((display->platform.broxton || port == PORT_A) &&
intel_de_wait_for_clear(display, port_ctrl,
AFE_LATCHOUT, 30))
drm_err(display->drm, "DSI LP not going Low\n");
@@ -612,7 +613,6 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -620,7 +620,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
u32 temp = intel_dsi->pixel_overlap;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
for_each_dsi_port(port, intel_dsi->ports)
intel_de_rmw(display, MIPI_CTRL(display, port),
BXT_PIXEL_OVERLAP_CNT_MASK,
@@ -633,7 +633,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
u32 temp;
temp = intel_de_read(display, port_ctrl);
@@ -644,7 +644,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
temp |= (intel_dsi->dual_link - 1)
<< DUAL_LINK_MODE_SHIFT;
- if (IS_BROXTON(dev_priv))
+ if (display->platform.broxton)
temp |= LANE_CONFIGURATION_DUAL_LINK_A;
else
temp |= crtc->pipe ?
@@ -664,12 +664,11 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
/* de-assert ip_tg_enable signal */
intel_de_rmw(display, port_ctrl, DPI_ENABLE, 0);
@@ -730,7 +729,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum port port;
bool glk_cold_boot = false;
@@ -745,7 +743,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
* The BIOS may leave the PLL in a wonky state where it doesn't
* lock. It needs to be fully powered down to fix it.
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_pll_disable(encoder);
bxt_dsi_pll_enable(encoder, pipe_config);
} else {
@@ -753,7 +751,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
vlv_dsi_pll_enable(encoder, pipe_config);
}
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
/* Add MIPI IO reset programming for modeset */
intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
@@ -762,13 +760,13 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
/* Disable DPOunit clock gating, can stall pipe */
- intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
0, DPOUNIT_CLOCK_GATE_DISABLE);
}
- if (!IS_GEMINILAKE(dev_priv))
+ if (!display->platform.geminilake)
intel_dsi_prepare(encoder, pipe_config);
/* Give the panel time to power-on and then deassert its reset */
@@ -776,7 +774,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
msleep(intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
glk_cold_boot = glk_dsi_enable_io(encoder);
/* Prepare port in cold boot(s3/s4) scenario */
@@ -788,7 +786,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_device_ready(encoder);
/* Prepare port in normal boot scenario */
- if (IS_GEMINILAKE(dev_priv) && !glk_cold_boot)
+ if (display->platform.geminilake && !glk_cold_boot)
intel_dsi_prepare(encoder, pipe_config);
/* Send initialization commands in LP mode */
@@ -836,11 +834,11 @@ static void intel_dsi_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&i915->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_backlight_disable(old_conn_state);
@@ -860,9 +858,9 @@ static void intel_dsi_disable(struct intel_atomic_state *state,
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
glk_dsi_clear_device_ready(encoder);
else
vlv_dsi_clear_device_ready(encoder);
@@ -874,13 +872,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
drm_dbg_kms(display->drm, "\n");
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
intel_crtc_vblank_off(old_crtc_state);
skl_scaler_disable(old_crtc_state);
@@ -907,7 +904,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
/* Transition to LP-00 */
intel_dsi_clear_device_ready(encoder);
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
/* Power down DSI regulator to save power */
intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL,
@@ -917,12 +914,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
}
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_pll_disable(encoder);
} else {
vlv_dsi_pll_disable(encoder);
- intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
@@ -939,7 +936,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_wakeref_t wakeref;
enum port port;
@@ -957,13 +953,13 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* configuration, otherwise accessing DSI registers will hang the
* machine. See BSpec North Display Engine registers/MIPI[BXT].
*/
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- !bxt_dsi_pll_is_enabled(dev_priv))
+ if ((display->platform.geminilake || display->platform.broxton) &&
+ !bxt_dsi_pll_is_enabled(display))
goto out_put_power;
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
bool enabled = intel_de_read(display, port_ctrl) & DPI_ENABLE;
/*
@@ -971,10 +967,10 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* bit in port C control register does not get set. As a
* workaround, check pipe B conf instead.
*/
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
port == PORT_C)
enabled = intel_de_read(display,
- TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE;
+ TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
@@ -989,7 +985,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY))
continue;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
u32 tmp = intel_de_read(display, MIPI_CTRL(display, port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
@@ -1177,15 +1173,15 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 pclk;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = bxt_dsi_get_pclk(encoder, pipe_config);
} else {
@@ -1218,7 +1214,6 @@ static void set_dsi_timings(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1253,7 +1248,7 @@ static void set_dsi_timings(struct intel_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
@@ -1307,7 +1302,6 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -1327,7 +1321,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
/*
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
@@ -1342,7 +1336,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
tmp &= ~READ_REQUEST_PRIORITY_MASK;
intel_de_write(display, MIPI_CTRL(display, port),
tmp | READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
enum pipe pipe = crtc->pipe;
intel_de_rmw(display, MIPI_CTRL(display, port),
@@ -1377,7 +1371,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
if (intel_dsi->clock_stop)
tmp |= CLOCKSTOP;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
tmp |= BXT_DPHY_DEFEATURE_EN;
if (!is_cmd_mode(intel_dsi))
tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
@@ -1424,7 +1418,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
intel_de_write(display, MIPI_INIT_COUNT(display, port),
txclkesc(intel_dsi->escape_clk_div, 100));
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
!intel_dsi->dual_link) {
/*
* BXT spec says write MIPI_INIT_COUNT for
@@ -1461,7 +1455,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
intel_de_write(display, MIPI_LP_BYTECLK(display, port),
intel_dsi->lp_byte_clk);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
intel_de_write(display, MIPI_TLPX_TIME_COUNT(display, port),
intel_dsi->lp_byte_clk);
/* Shadow of DPHY reg */
@@ -1513,18 +1507,17 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
static void intel_dsi_unprepare(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
return;
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x0);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
bxt_dsi_reset_clocks(encoder, port);
else
vlv_dsi_reset_clocks(encoder, port);
@@ -1596,8 +1589,8 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
struct intel_connector *connector = intel_dsi->attached_connector;
+ struct intel_display *display = to_intel_display(connector);
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns, extra_byte_count, tlpx_ui;
u32 ui_num, ui_den;
@@ -1645,7 +1638,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
* For GEMINILAKE dphy_param_reg will be programmed in terms of
* HS byte clock count for other platform in HS ddr clock count
*/
- mul = IS_GEMINILAKE(dev_priv) ? 8 : 2;
+ mul = display->platform.geminilake ? 8 : 2;
ths_prepare_ns = max(mipi_config->ths_prepare,
mipi_config->tclk_prepare);
@@ -1653,7 +1646,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
if (prepare_cnt > PREPARE_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n",
+ drm_dbg_kms(display->drm, "prepare count too high %u\n",
prepare_cnt);
prepare_cnt = PREPARE_CNT_MAX;
}
@@ -1674,7 +1667,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
exit_zero_cnt += 1;
if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n",
+ drm_dbg_kms(display->drm, "exit zero count too high %u\n",
exit_zero_cnt);
exit_zero_cnt = EXIT_ZERO_CNT_MAX;
}
@@ -1685,7 +1678,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
* ui_den, ui_num * mul);
if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n",
+ drm_dbg_kms(display->drm, "clock zero count too high %u\n",
clk_zero_cnt);
clk_zero_cnt = CLK_ZERO_CNT_MAX;
}
@@ -1695,7 +1688,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
if (trail_cnt > TRAIL_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n",
+ drm_dbg_kms(display->drm, "trail count too high %u\n",
trail_cnt);
trail_cnt = TRAIL_CNT_MAX;
}
@@ -1761,7 +1754,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
@@ -1770,7 +1763,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
* On Valleyview some DSI panels lose (v|h)sync when the clock is lower
* than 320000KHz.
*/
- if (IS_VALLEYVIEW(dev_priv))
+ if (display->platform.valleyview)
return 320000;
/*
@@ -1778,7 +1771,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
* picture gets unstable, despite that values are
* correct for DSI PLL and DE PLL.
*/
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
return 158400;
return 0;
@@ -1903,9 +1896,8 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = {
{ }
};
-void vlv_dsi_init(struct drm_i915_private *dev_priv)
+void vlv_dsi_init(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_dsi *intel_dsi;
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -1914,16 +1906,16 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
enum port port;
enum pipe pipe;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
/* There is no detection method for MIPI so rely on VBT */
if (!intel_bios_is_dsi_present(display, &port))
return;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE;
+ if (display->platform.geminilake || display->platform.broxton)
+ display->dsi.mmio_base = BXT_MIPI_BASE;
else
- dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE;
+ display->dsi.mmio_base = VLV_MIPI_BASE;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
@@ -1938,12 +1930,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
encoder = &intel_dsi->base;
intel_dsi->attached_connector = connector;
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_dsi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_dsi_funcs,
DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
encoder->compute_config = intel_dsi_compute_config;
encoder->pre_enable = intel_dsi_pre_enable;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
encoder->enable = bxt_dsi_enable;
encoder->disable = intel_dsi_disable;
encoder->post_disable = intel_dsi_post_disable;
@@ -1963,7 +1955,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
encoder->pipe_mask = ~0;
else if (port == PORT_A)
encoder->pipe_mask = BIT(PIPE_A);
@@ -1979,10 +1971,10 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
- if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
- if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
/* Create a DSI host (and a device) for each port. */
@@ -1998,18 +1990,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
- drm_dbg_kms(&dev_priv->drm, "no device found\n");
+ drm_dbg_kms(display->drm, "no device found\n");
goto err;
}
/* Use clock read-back from current hw-state for fastboot */
current_mode = intel_encoder_current_mode(encoder);
if (current_mode) {
- drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n",
+ drm_dbg_kms(display->drm, "Calculated pclk %d GOP %d\n",
intel_dsi->pclk, current_mode->clock);
if (intel_fuzzy_clock_check(intel_dsi->pclk,
current_mode->clock)) {
- drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n");
+ drm_dbg_kms(display->drm, "Using GOP pclk\n");
intel_dsi->pclk = current_mode->clock;
}
@@ -2021,7 +2013,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi_vbt_gpio_init(intel_dsi,
intel_dsi_get_hw_state(encoder, &pipe));
- drm_connector_init(&dev_priv->drm, &connector->base, &intel_dsi_connector_funcs,
+ drm_connector_init(display->drm, &connector->base, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(&connector->base, &intel_dsi_connector_helper_funcs);
@@ -2030,12 +2022,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(connector, encoder);
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(connector);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (!intel_panel_preferred_fixed_mode(connector)) {
- drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
+ drm_dbg_kms(display->drm, "no fixed mode\n");
goto err_cleanup_connector;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h
index 277bacfbc551..ff349b5876c2 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.h
@@ -7,14 +7,14 @@
#define __VLV_DSI_H__
enum port;
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_dsi;
#ifdef I915
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state);
-void vlv_dsi_init(struct drm_i915_private *dev_priv);
+void vlv_dsi_init(struct intel_display *display);
#else
static inline void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
@@ -23,7 +23,7 @@ static inline int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
{
return 0;
}
-static inline void vlv_dsi_init(struct drm_i915_private *dev_priv)
+static inline void vlv_dsi_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 2ed47e7d1051..7ce924a5ef90 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -57,7 +57,7 @@ static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt,
return dsi_clk_khz;
}
-static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+static int dsi_calc_mnp(struct intel_display *display,
struct intel_crtc_state *config,
int target_dsi_clk)
{
@@ -68,11 +68,11 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
/* target_dsi_clk is expected in kHz */
if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
- drm_err(&dev_priv->drm, "DSI CLK Out of Range\n");
+ drm_err(display->drm, "DSI CLK Out of Range\n");
return -ECHRNG;
}
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
ref_clk = 100000;
n = 4;
m_min = 70;
@@ -116,13 +116,13 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
static int vlv_dsi_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 dsi_clock;
u32 pll_ctl, pll_div;
u32 m = 0, p = 0, n;
- int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
+ int refclk = display->platform.cherryview ? 100000 : 25000;
int i;
pll_ctl = config->dsi_pll.ctrl;
@@ -147,7 +147,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
p--;
if (!p) {
- drm_err(&dev_priv->drm, "wrong P1 divisor\n");
+ drm_err(display->drm, "wrong P1 divisor\n");
return 0;
}
@@ -157,7 +157,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
}
if (i == ARRAY_SIZE(lfsr_converts)) {
- drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
+ drm_err(display->drm, "wrong m_seed programmed\n");
return 0;
}
@@ -175,16 +175,16 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int pclk, dsi_clk, ret;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
- ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
+ ret = dsi_calc_mnp(display, config, dsi_clk);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n");
+ drm_dbg_kms(display->drm, "dsi_calc_mnp failed\n");
return ret;
}
@@ -196,7 +196,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
- drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n",
+ drm_dbg_kms(display->drm, "dsi pll div %08x, ctrl %08x\n",
config->dsi_pll.div, config->dsi_pll.ctrl);
pclk = vlv_dsi_pclk(encoder, config);
@@ -213,9 +213,10 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_cck_get(dev_priv);
@@ -235,20 +236,21 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
DSI_PLL_LOCK, 20)) {
vlv_cck_put(dev_priv);
- drm_err(&dev_priv->drm, "DSI PLL lock failed\n");
+ drm_err(display->drm, "DSI PLL lock failed\n");
return;
}
vlv_cck_put(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
+ drm_dbg_kms(display->drm, "DSI PLL locked\n");
}
void vlv_dsi_pll_disable(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_cck_get(dev_priv);
@@ -260,14 +262,14 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder)
vlv_cck_put(dev_priv);
}
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+bool bxt_dsi_pll_is_enabled(struct intel_display *display)
{
bool enabled;
u32 val;
u32 mask;
mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
- val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(display, BXT_DSI_PLL_ENABLE);
enabled = (val & mask) == mask;
if (!enabled)
@@ -281,17 +283,17 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
* times, and since accessing DSI registers with invalid dividers
* causes a system hang.
*/
- val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
- if (IS_GEMINILAKE(dev_priv)) {
+ val = intel_de_read(display, BXT_DSI_PLL_CTL);
+ if (display->platform.geminilake) {
if (!(val & BXT_DSIA_16X_MASK)) {
- drm_dbg(&dev_priv->drm,
- "Invalid PLL divider (%08x)\n", val);
+ drm_dbg_kms(display->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
} else {
if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
- drm_dbg(&dev_priv->drm,
- "Invalid PLL divider (%08x)\n", val);
+ drm_dbg_kms(display->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
}
@@ -301,29 +303,30 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
void bxt_dsi_pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
- intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
+ intel_de_rmw(display, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
/*
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
+ if (intel_de_wait_for_clear(display, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for PLL lock deassertion\n");
}
u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pll_ctl, pll_div;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_cck_get(dev_priv);
pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
@@ -352,14 +355,14 @@ static int bxt_dsi_pclk(struct intel_encoder *encoder,
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 pclk;
- config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
+ config->dsi_pll.ctrl = intel_de_read(display, BXT_DSI_PLL_CTL);
pclk = bxt_dsi_pclk(encoder, config);
- drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk);
+ drm_dbg_kms(display->drm, "Calculated pclk=%u\n", pclk);
return pclk;
}
@@ -375,10 +378,9 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT);
}
-static void glk_dsi_program_esc_clock(struct drm_device *dev,
- const struct intel_crtc_state *config)
+static void glk_dsi_program_esc_clock(struct intel_display *display,
+ const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dsi_rate = 0;
u32 pll_ratio = 0;
u32 ddr_clk = 0;
@@ -415,17 +417,16 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
txesc2_div = min_t(u32, div2_value, 10);
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1,
+ intel_de_write(display, MIPIO_TXESC_CLK_DIV1,
(1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2,
+ intel_de_write(display, MIPIO_TXESC_CLK_DIV2,
(1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
}
/* Program BXT Mipi clocks and dividers */
-static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
+static void bxt_dsi_program_clocks(struct intel_display *display, enum port port,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
u32 dsi_rate = 0;
u32 pll_ratio = 0;
@@ -436,7 +437,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
u32 mipi_8by3_divider;
/* Clear old configurations */
- tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
@@ -472,13 +473,13 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
- intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(display, BXT_MIPI_CLOCK_CTL, tmp);
}
int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
u32 dsi_clk;
@@ -494,7 +495,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
*/
dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
dsi_ratio_min = BXT_DSI_PLL_RATIO_MIN;
dsi_ratio_max = BXT_DSI_PLL_RATIO_MAX;
} else {
@@ -503,11 +504,11 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
}
if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Can't get a suitable ratio from DSI PLL ratios\n");
return -ECHRNG;
} else
- drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n");
+ drm_dbg_kms(display->drm, "DSI PLL calculation is Done!!\n");
/*
* Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
@@ -519,7 +520,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
*/
- if (IS_BROXTON(dev_priv) && dsi_ratio <= 50)
+ if (display->platform.broxton && dsi_ratio <= 50)
config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
pclk = bxt_dsi_pclk(encoder, config);
@@ -536,46 +537,45 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
void bxt_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
/* Configure PLL vales */
- intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
- intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL);
+ intel_de_write(display, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+ intel_de_posting_read(display, BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
for_each_dsi_port(port, intel_dsi->ports)
- bxt_dsi_program_clocks(encoder->base.dev, port, config);
+ bxt_dsi_program_clocks(display, port, config);
} else {
- glk_dsi_program_esc_clock(encoder->base.dev, config);
+ glk_dsi_program_esc_clock(display, config);
}
/* Enable DSI PLL */
- intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
+ intel_de_rmw(display, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
/* Timeout and fail if PLL not locked */
- if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
+ if (intel_de_wait_for_set(display, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timed out waiting for DSI PLL to lock\n");
return;
}
- drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
+ drm_dbg_kms(display->drm, "DSI PLL locked\n");
}
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
/* Clear old configurations */
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
index f975660fa609..f26e31a7dd69 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
@@ -9,7 +9,6 @@
#include <linux/types.h>
enum port;
-struct drm_i915_private;
struct intel_crtc_state;
struct intel_display;
struct intel_encoder;
@@ -33,11 +32,11 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
#ifdef I915
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+bool bxt_dsi_pll_is_enabled(struct intel_display *display);
void assert_dsi_pll_enabled(struct intel_display *display);
void assert_dsi_pll_disabled(struct intel_display *display);
#else
-static inline bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+static inline bool bxt_dsi_pll_is_enabled(struct intel_display *display)
{
return false;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index ddda468241ef..6e4d0ce3952f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 7d97ea2a653e..c4854c5b4e0f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.h b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
index e6c382973129..9d7ee1579900 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ab1af978911b..15835952352e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2011-2012 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index e5b0f66ea1fe..6e682a6a0574 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 67ac2586a0f3..0267c924634b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 9473050ac842..05e440643aa2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright 2012 Red Hat Inc
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 75a143d996e0..7a0cc51923b3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 7796c4119ef5..ca7e9216934a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008,2010 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index ea7561ae6e13..232b984f60b6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
index 28d6526e32ab..8044d34707b6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 388f90784d8a..f566191d843b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -48,8 +48,7 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
i915_gem_object_evictable(obj))
assert_object_held(obj);
#endif
- return mr && (mr->type == INTEL_MEMORY_LOCAL ||
- mr->type == INTEL_MEMORY_STOLEN_LOCAL);
+ return mr && intel_memory_type_is_local(mr->type);
}
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c3dabb857960..f6d37dff320d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index 196417fd0f5c..946fb9825eb3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 356530b599ce..1f38e367c60b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2017 Intel Corporation
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index a5f34542135c..c34f41605b46 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 68413c05c812..64600aa8227f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 8780aa243105..7f83f8bdc8fb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index ef85c6dc9fd5..f9e7cab140f8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 900c08337942..f0857c5c96df 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index bedf1e95941a..bd5bd2c5e7f9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index ae3343c81a64..19a3eb82dc6a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
@@ -305,36 +304,20 @@ void __shmem_writeback(size_t size, struct address_space *mapping)
.range_end = LLONG_MAX,
.for_reclaim = 1,
};
- unsigned long i;
+ struct folio *folio = NULL;
+ int error = 0;
/*
* Leave mmapings intact (GTT will have been revoked on unbinding,
- * leaving only CPU mmapings around) and add those pages to the LRU
+ * leaving only CPU mmapings around) and add those folios to the LRU
* instead of invoking writeback so they are aged and paged out
* as normal.
*/
-
- /* Begin writeback on each dirty page */
- for (i = 0; i < size >> PAGE_SHIFT; i++) {
- struct page *page;
-
- page = find_lock_page(mapping, i);
- if (!page)
- continue;
-
- if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
- int ret;
-
- SetPageReclaim(page);
- ret = mapping->a_ops->writepage(page, &wbc);
- if (!PageWriteback(page))
- ClearPageReclaim(page);
- if (!ret)
- goto put;
- }
- unlock_page(page);
-put:
- put_page(page);
+ while ((folio = writeback_iter(mapping, &wbc, folio, &error))) {
+ if (folio_mapped(folio))
+ folio_redirty_for_writepage(&wbc, folio);
+ else
+ error = shmem_writeout(folio, &wbc);
}
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index aec41f0f098f..b81e67504bbe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008-2015 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 9d958a6f377e..3380151edfc1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008-2012 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index af85d0c28168..8814cbcde5b5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 5ac23ff3feff..5a296ba3758a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 09b68713ab32..307a18eede72 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2012-2014 Intel Corporation
*
- * Based on amdgpu_mn, which bears the following notice:
+ * Based on amdgpu_mn, which bears the following notice:
*
* Copyright 2014 Advanced Micro Devices, Inc.
* All Rights Reserved.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 1f55e62044a4..7127e90c1a8f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index 46b9a17d6abc..65d84a93c525 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h b/drivers/gpu/drm/i915/gem/i915_gemfs.h
index 5d835e44c4f6..16d4333c9a4e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.h
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 804f74084bd4..9c3f17e51885 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1837,6 +1837,8 @@ static int igt_mmap_revoke(void *arg)
int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
{
+ int ret;
+ bool unuse_mm = false;
static const struct i915_subtest tests[] = {
SUBTEST(igt_partial_tiling),
SUBTEST(igt_smoke_tiling),
@@ -1848,5 +1850,15 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_mmap_gpu),
};
- return i915_live_subtests(tests, i915);
+ if (!current->mm) {
+ kthread_use_mm(current->active_mm);
+ unuse_mm = true;
+ }
+
+ ret = i915_live_subtests(tests, i915);
+
+ if (unuse_mm)
+ kthread_unuse_mm(current->active_mm);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index f6c59f20832f..46a5aa4ab9c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -289,6 +289,14 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
return pte;
}
+static dma_addr_t gen8_ggtt_pte_decode(u64 pte, bool *is_present, bool *is_local)
+{
+ *is_present = pte & GEN8_PAGE_PRESENT;
+ *is_local = pte & GEN12_GGTT_PTE_LM;
+
+ return pte & GEN12_GGTT_PTE_ADDR_MASK;
+}
+
static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
{
struct intel_gt *gt = ggtt->vm.gt;
@@ -435,6 +443,11 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
writeq(pte, addr);
}
+static gen8_pte_t gen8_get_pte(void __iomem *addr)
+{
+ return readq(addr);
+}
+
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
@@ -450,6 +463,16 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static dma_addr_t gen8_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ return ggtt->vm.pte_decode(gen8_get_pte(pte), is_present, is_local);
+}
+
static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
dma_addr_t addr, u64 offset,
unsigned int pat_index, u32 flags)
@@ -605,6 +628,17 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static dma_addr_t gen6_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset,
+ bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ return vm->pte_decode(ioread32(pte), is_present, is_local);
+}
+
/*
* Binds an object into the global gtt with the specified cache level.
* The object will be accessible to the GPU via commands whose operands
@@ -769,6 +803,14 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm,
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
+dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ return ggtt->vm.read_entry(vm, offset, is_present, is_local);
+}
+
/*
* Reserve the top of the GuC address space for firmware images. Addresses
* beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
@@ -1245,6 +1287,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.scratch_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+ ggtt->vm.read_entry = gen8_ggtt_read_entry;
/*
* Serialize GTT updates with aperture access on BXT if VT-d is on,
@@ -1291,6 +1334,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+ ggtt->vm.pte_decode = gen8_ggtt_pte_decode;
+
return ggtt_probe_common(ggtt, size);
}
@@ -1390,6 +1435,14 @@ static u64 iris_pte_encode(dma_addr_t addr,
return pte;
}
+static dma_addr_t gen6_pte_decode(u64 pte, bool *is_present, bool *is_local)
+{
+ *is_present = pte & GEN6_PTE_VALID;
+ *is_local = false;
+
+ return ((pte & 0xff0) << 28) | (pte & ~0xfff);
+}
+
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
@@ -1428,6 +1481,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.scratch_range = gen6_ggtt_clear_range;
ggtt->vm.insert_page = gen6_ggtt_insert_page;
ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.read_entry = gen6_ggtt_read_entry;
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -1443,6 +1497,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
else
ggtt->vm.pte_encode = snb_pte_encode;
+ ggtt->vm.pte_decode = gen6_pte_decode;
+
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
index 59eed0a0ce90..c5f5f0bdfb2c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
@@ -27,6 +27,13 @@ static void gmch_ggtt_insert_page(struct i915_address_space *vm,
intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
}
+static dma_addr_t gmch_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ return intel_gmch_gtt_read_entry(offset >> PAGE_SHIFT,
+ is_present, is_local);
+}
+
static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
@@ -103,6 +110,7 @@ int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
ggtt->vm.clear_range = gmch_ggtt_clear_range;
ggtt->vm.scratch_range = gmch_ggtt_clear_range;
+ ggtt->vm.read_entry = gmch_ggtt_read_entry;
ggtt->vm.cleanup = gmch_ggtt_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 30b128b1fde7..afbc5c769308 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -176,7 +176,6 @@ static void clear_vm_list(struct list_head *list)
i915_vma_destroy_locked(vma);
i915_gem_object_put(obj);
}
-
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 0a36ea751b63..9d3a3ad567a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -312,6 +312,7 @@ struct i915_address_space {
u64 (*pte_encode)(dma_addr_t addr,
unsigned int pat_index,
u32 flags); /* Create a valid PTE */
+ dma_addr_t (*pte_decode)(u64 pte, bool *is_present, bool *is_local);
#define PTE_READ_ONLY BIT(0)
#define PTE_LM BIT(1)
@@ -340,6 +341,8 @@ struct i915_address_space {
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 flags);
+ dma_addr_t (*read_entry)(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local);
void (*cleanup)(struct i915_address_space *vm);
void (*foreach)(struct i915_address_space *vm,
@@ -590,6 +593,9 @@ void intel_ggtt_bind_vma(struct i915_address_space *vm,
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res);
+dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local);
+
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
int i915_ggtt_init_hw(struct drm_i915_private *i915);
int i915_ggtt_enable_hw(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 51847a846002..c481b56fa67d 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -751,7 +751,6 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
{
-
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
/*
* Note that the CSFE context has a dummy slot for CMD_BUF_CCTL
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index cf41d325712e..5dd8121f4b15 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -314,7 +314,6 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = {
};
static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
-
/* UC */
MOCS_ENTRY(1, 0, L3_1_UC),
/* WB - L3 */
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 9378d5901c49..9ca42589da4d 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -117,21 +117,10 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN6_RC_CTL_RC6_ENABLE |
GEN6_RC_CTL_EI_MODE(1);
- /*
- * BSpec 52698 - Render powergating must be off.
- * FIXME BSpec is outdated, disabling powergating for MTL is just
- * temporary wa and should be removed after fixing real cause
- * of forcewake timeouts.
- */
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
- pg_enable =
- GEN9_MEDIA_PG_ENABLE |
- GEN11_MEDIA_SAMPLER_PG_ENABLE;
- else
- pg_enable =
- GEN9_RENDER_PG_ENABLE |
- GEN9_MEDIA_PG_ENABLE |
- GEN11_MEDIA_SAMPLER_PG_ENABLE;
+ pg_enable =
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE;
if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
for (i = 0; i < I915_MAX_VCS; i++)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 6e9977b2d180..a876a34455f1 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -365,7 +365,13 @@ static void reset_prepare(struct intel_engine_cs *engine)
ENGINE_READ_FW(engine, RING_HEAD),
ENGINE_READ_FW(engine, RING_TAIL),
ENGINE_READ_FW(engine, RING_START));
- if (!stop_ring(engine)) {
+ /*
+ * Sometimes engine head failed to set to zero even after writing into it.
+ * Use wait_for_atomic() with 20ms delay to let engine resumes from
+ * correct RING_HEAD. Experimented different values and determined
+ * that 20ms works best based on testing.
+ */
+ if (wait_for_atomic((!stop_ring(engine) == 0), 20)) {
drm_err(&engine->i915->drm,
"failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 64e9317f58fb..5abc5fcc2514 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -8,7 +8,7 @@
#include <drm/intel/i915_drm.h>
#include "display/intel_display.h"
-#include "display/intel_display_irq.h"
+#include "display/intel_display_rps.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -550,6 +550,7 @@ static unsigned int init_emon(struct intel_uncore *uncore)
static bool gen5_rps_enable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_display *display = &i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u8 fstart, vstart;
u32 rgvmodectl;
@@ -607,9 +608,7 @@ static bool gen5_rps_enable(struct intel_rps *rps)
rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
rps->ips.last_time2 = ktime_get_raw_ns();
- spin_lock(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PCU_EVENT);
- spin_unlock(&i915->irq_lock);
+ ilk_display_rps_enable(display);
spin_unlock_irq(&mchdev_lock);
@@ -621,14 +620,13 @@ static bool gen5_rps_enable(struct intel_rps *rps)
static void gen5_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_display *display = &i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u16 rgvswctl;
spin_lock_irq(&mchdev_lock);
- spin_lock(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PCU_EVENT);
- spin_unlock(&i915->irq_lock);
+ ilk_display_rps_disable(display);
rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
@@ -1001,6 +999,10 @@ void intel_rps_dec_waiters(struct intel_rps *rps)
if (rps_uses_slpc(rps)) {
slpc = rps_to_slpc(rps);
+ /* Don't decrement num_waiters for req where increment was skipped */
+ if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING)
+ return;
+
intel_guc_slpc_dec_waiters(slpc);
} else {
atomic_dec(&rps->num_waiters);
@@ -1029,11 +1031,15 @@ void intel_rps_boost(struct i915_request *rq)
if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING)
return;
- if (slpc->min_freq_softlimit >= slpc->boost_freq)
- return;
-
/* Return if old value is non zero */
if (!atomic_fetch_inc(&slpc->num_waiters)) {
+ /*
+ * Skip queuing boost work if frequency is already boosted,
+ * but still increment num_waiters.
+ */
+ if (slpc->min_freq_softlimit >= slpc->boost_freq)
+ return;
+
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno);
queue_work(rps_to_gt(rps)->i915->unordered_wq,
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 5135b90a2a40..ece445109305 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -57,7 +57,7 @@ struct intel_rps {
/*
* work, interrupts_enabled and pm_iir are protected by
- * i915->irq_lock
+ * gt->irq_lock
*/
struct timer_list timer;
struct work_struct work;
diff --git a/drivers/gpu/drm/i915/gt/intel_wopcm.h b/drivers/gpu/drm/i915/gt/intel_wopcm.h
index 17d6aa86008a..d2038b6de5e7 100644
--- a/drivers/gpu/drm/i915/gt/intel_wopcm.h
+++ b/drivers/gpu/drm/i915/gt/intel_wopcm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017-2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 116683ebe074..b37e400f74e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -156,7 +156,7 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
struct i915_wa *list;
- list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+ list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*list),
GFP_KERNEL);
if (!list) {
drm_err(&i915->drm, "No space for workaround init!\n");
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 22e750108c5f..23f04f6f8fba 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -7,6 +7,7 @@
#include "gem/i915_gem_internal.h"
+#include "i915_drv.h"
#include "i915_selftest.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
@@ -859,6 +860,14 @@ static int live_lrc_timestamp(void *arg)
};
/*
+ * This test was designed to isolate a hardware bug.
+ * The bug was found and fixed in future generations but
+ * now the test pollutes our CI on previous generation.
+ */
+ if (GRAPHICS_VER(gt->i915) == 12)
+ return 0;
+
+ /*
* We want to verify that the timestamp is saved and restore across
* context switches and is monotonic.
*
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 401bee030dbc..32c762eb79ed 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -661,7 +661,7 @@ static int live_emit_pte_full_ring(void *arg)
out_rq:
i915_request_add(rq); /* GEM_BUG_ON(rq->reserved_space > ring->space)? */
timer_delete_sync(&st.timer);
- destroy_timer_on_stack(&st.timer);
+ timer_destroy_on_stack(&st.timer);
out_unpin:
intel_context_unpin(ce);
out_put:
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 908483ab0bc8..41716ed454b7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -33,15 +33,22 @@ int live_rc6_manual(void *arg)
{
struct intel_gt *gt = arg;
struct intel_rc6 *rc6 = &gt->rc6;
- u64 rc0_power, rc6_power;
+ struct intel_rps *rps = &gt->rps;
intel_wakeref_t wakeref;
+ u64 rc0_sample_energy[2];
+ u64 rc6_sample_energy[2];
+ u64 sleep_time = 1000;
+ u32 rc0_freq = 0;
+ u32 rc6_freq = 0;
+ u64 rc0_power;
+ u64 rc6_power;
bool has_power;
+ u64 threshold;
ktime_t dt;
u64 res[2];
int err = 0;
- u32 rc0_freq = 0;
- u32 rc6_freq = 0;
- struct intel_rps *rps = &gt->rps;
+ u64 diff;
+
/*
* Our claim is that we can "encourage" the GPU to enter rc6 at will.
@@ -60,14 +67,15 @@ int live_rc6_manual(void *arg)
/* Force RC6 off for starters */
__intel_rc6_disable(rc6);
- msleep(1); /* wakeup is not immediate, takes about 100us on icl */
+ /* wakeup is not immediate, takes about 100us on icl */
+ usleep_range(1000, 2000);
res[0] = rc6_residency(rc6);
dt = ktime_get();
- rc0_power = librapl_energy_uJ();
- msleep(1000);
- rc0_power = librapl_energy_uJ() - rc0_power;
+ rc0_sample_energy[0] = librapl_energy_uJ();
+ msleep(sleep_time);
+ rc0_sample_energy[1] = librapl_energy_uJ() - rc0_sample_energy[0];
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
rc0_freq = intel_rps_read_actual_frequency_fw(rps);
@@ -79,11 +87,12 @@ int live_rc6_manual(void *arg)
}
if (has_power) {
- rc0_power = div64_u64(NSEC_PER_SEC * rc0_power,
+ rc0_power = div64_u64(NSEC_PER_SEC * rc0_sample_energy[1],
ktime_to_ns(dt));
+
if (!rc0_power) {
if (rc0_freq)
- pr_debug("No power measured while in RC0! GPU Freq: %u in RC0\n",
+ pr_debug("No power measured while in RC0! GPU Freq: %uMHz in RC0\n",
rc0_freq);
else
pr_err("No power and freq measured while in RC0\n");
@@ -98,10 +107,10 @@ int live_rc6_manual(void *arg)
res[0] = rc6_residency(rc6);
intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
dt = ktime_get();
- rc6_power = librapl_energy_uJ();
- msleep(1000);
+ rc6_sample_energy[0] = librapl_energy_uJ();
+ msleep(sleep_time);
rc6_freq = intel_rps_read_actual_frequency_fw(rps);
- rc6_power = librapl_energy_uJ() - rc6_power;
+ rc6_sample_energy[1] = librapl_energy_uJ() - rc6_sample_energy[0];
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
if (res[1] == res[0]) {
@@ -113,13 +122,24 @@ int live_rc6_manual(void *arg)
}
if (has_power) {
- rc6_power = div64_u64(NSEC_PER_SEC * rc6_power,
+ rc6_power = div64_u64(NSEC_PER_SEC * rc6_sample_energy[1],
ktime_to_ns(dt));
- pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
+ pr_info("GPU consumed %lluuW in RC0 and %lluuW in RC6\n",
rc0_power, rc6_power);
+
if (2 * rc6_power > rc0_power) {
- pr_err("GPU leaked energy while in RC6! GPU Freq: %u in RC6 and %u in RC0\n",
- rc6_freq, rc0_freq);
+ pr_err("GPU leaked energy while in RC6!\n"
+ "GPU Freq: %uMHz in RC6 and %uMHz in RC0\n"
+ "RC0 energy before & after sleep respectively: %lluuJ %lluuJ\n"
+ "RC6 energy before & after sleep respectively: %lluuJ %lluuJ\n",
+ rc6_freq, rc0_freq, rc0_sample_energy[0], rc0_sample_energy[1],
+ rc6_sample_energy[0], rc6_sample_energy[1]);
+
+ diff = res[1] - res[0];
+ threshold = (9 * NSEC_PER_MSEC * sleep_time) / 10;
+ if (diff < threshold)
+ pr_err("Did not enter RC6 properly, RC6 start residency=%lluns, RC6 end residency=%lluns\n",
+ res[0], res[1]);
err = -EINVAL;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
index 3941f2d6fa47..69ed946a39e5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_tlb.c
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -143,7 +143,7 @@ pte_tlbinv(struct intel_context *ce,
if (ce->engine->class == OTHER_CLASS)
msleep(200);
else
- msleep(10);
+ usleep_range(10000, 20000);
if (va == vb) {
if (!i915_request_completed(rq)) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index 5dc0ccd07636..d550eb6edfb8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -230,7 +230,7 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
gt_info(gt, "Invalid GSC firmware for MTL/ARL, got %d.%d.%d.%d but need 102.x.x.x",
gsc->release.major, gsc->release.minor,
gsc->release.patch, gsc->release.build);
- return -EINVAL;
+ return -EINVAL;
}
if (min_ver.major) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index d791f9baa11d..456d3372eef8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -317,6 +317,11 @@ void intel_huc_init_early(struct intel_huc *huc)
}
}
+void intel_huc_fini_late(struct intel_huc *huc)
+{
+ delayed_huc_load_fini(huc);
+}
+
#define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
static int check_huc_loading_mode(struct intel_huc *huc)
{
@@ -414,12 +419,6 @@ out:
void intel_huc_fini(struct intel_huc *huc)
{
- /*
- * the fence is initialized in init_early, so we need to clean it up
- * even if HuC loading is off.
- */
- delayed_huc_load_fini(huc);
-
if (huc->heci_pkt)
i915_vma_unpin_and_release(&huc->heci_pkt, 0);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index d5e441b9e08d..921ad4b1687f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -55,6 +55,7 @@ struct intel_huc {
int intel_huc_sanitize(struct intel_huc *huc);
void intel_huc_init_early(struct intel_huc *huc);
+void intel_huc_fini_late(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 90ba1b0b4c9d..4a3493e8d433 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -136,6 +136,7 @@ void intel_uc_init_late(struct intel_uc *uc)
void intel_uc_driver_late_release(struct intel_uc *uc)
{
+ intel_huc_fini_late(&uc->huc);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index eedd1865bb98..62d14f82256f 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -46,6 +46,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
+ intel_wakeref_t wakeref;
int ret;
if (high_gm) {
@@ -63,12 +64,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&gt->ggtt->vm.mutex);
- mmio_hw_access_pre(gt);
+ wakeref = mmio_hw_access_pre(gt);
ret = i915_gem_gtt_insert(&gt->ggtt->vm, NULL, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
- mmio_hw_access_post(gt);
+ mmio_hw_access_post(gt, wakeref);
mutex_unlock(&gt->ggtt->vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
@@ -226,7 +227,7 @@ out_free_fence:
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&gvt->gt->ggtt->vm.mutex);
- intel_runtime_pm_put_unchecked(uncore->rpm);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return -ENOSPC;
}
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index baccbf1761b7..673534f061ef 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -91,16 +91,17 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
.diff = 0,
};
struct diff_mmio *node, *next;
+ intel_wakeref_t wakeref;
INIT_LIST_HEAD(&param.diff_mmio_list);
mutex_lock(&gvt->lock);
spin_lock_bh(&gvt->scheduler.mmio_context_lock);
- mmio_hw_access_pre(gvt->gt);
+ wakeref = mmio_hw_access_pre(gvt->gt);
/* Recognize all the diff mmios to list. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
mutex_unlock(&gvt->lock);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2fa7ca19ba5d..ae9b0ded3651 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -220,9 +220,11 @@ static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
static void ggtt_invalidate(struct intel_gt *gt)
{
- mmio_hw_access_pre(gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gt);
intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- mmio_hw_access_post(gt);
+ mmio_hw_access_post(gt, wakeref);
}
static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 01d890999f25..1d10c16e6465 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -570,14 +570,15 @@ enum {
GVT_FAILSAFE_GUEST_ERR,
};
-static inline void mmio_hw_access_pre(struct intel_gt *gt)
+static inline intel_wakeref_t mmio_hw_access_pre(struct intel_gt *gt)
{
- intel_runtime_pm_get(gt->uncore->rpm);
+ return intel_runtime_pm_get(gt->uncore->rpm);
}
-static inline void mmio_hw_access_post(struct intel_gt *gt)
+static inline void mmio_hw_access_post(struct intel_gt *gt,
+ intel_wakeref_t wakeref)
{
- intel_runtime_pm_put_unchecked(gt->uncore->rpm);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 4efee6797873..1344e6d20a34 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -56,6 +56,7 @@
#include "display/intel_pps_regs.h"
#include "display/intel_psr_regs.h"
#include "display/intel_sprite_regs.h"
+#include "display/intel_vga_regs.h"
#include "display/skl_universal_plane_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
@@ -264,6 +265,7 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
{
struct intel_gvt *gvt = vgpu->gvt;
unsigned int fence_num = offset_to_fence_num(off);
+ intel_wakeref_t wakeref;
int ret;
ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
@@ -271,10 +273,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
write_vreg(vgpu, off, p_data, bytes);
- mmio_hw_access_pre(gvt->gt);
+ wakeref = mmio_hw_access_pre(gvt->gt);
intel_vgpu_write_fence(vgpu, fence_num,
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
return 0;
}
@@ -513,7 +515,7 @@ static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
switch (wrpll_ctl & WRPLL_REF_MASK) {
case WRPLL_REF_PCH_SSC:
- refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc;
+ refclk = 135000;
break;
case WRPLL_REF_LCPLL:
refclk = 2700000;
@@ -544,7 +546,7 @@ out:
static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
{
u32 dp_br = 0;
- int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
+ int refclk = 100000;
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
struct dpll clock = {};
@@ -1975,10 +1977,12 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
vgpu == gvt->scheduler.engine_owner[engine->id] ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
vgpu_vreg(vgpu, offset) =
intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
@@ -3209,10 +3213,12 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt)
int i, id;
idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
for (i = 0; i < vgpu_fence_sz(vgpu); i++)
intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
}
@@ -3233,8 +3239,10 @@ void intel_gvt_restore_mmio(struct intel_gvt *gvt)
int id;
idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 509f9ccae3a9..dbad4d853d3a 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -222,7 +222,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
u8 *buf;
struct opregion_header *header;
struct vbt v;
- const char opregion_signature[16] = OPREGION_SIGNATURE;
gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
@@ -236,8 +235,10 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
/* emulated opregion with VBT mailbox only */
buf = (u8 *)vgpu_opregion(vgpu)->va;
header = (struct opregion_header *)buf;
- memcpy(header->signature, opregion_signature,
- sizeof(opregion_signature));
+
+ static_assert(sizeof(header->signature) == sizeof(OPREGION_SIGNATURE) - 1);
+ memcpy(header->signature, OPREGION_SIGNATURE, sizeof(header->signature));
+
header->size = 0x8;
header->opregion_ver = 0x02000000;
header->mboxes = MBOX_VBT;
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 9f97f743aa71..6c2d68e88266 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -447,6 +447,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
if (!vgpu_data->active)
return;
@@ -465,7 +466,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->current_vgpu = NULL;
}
- intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock);
for_each_engine(engine, vgpu->gvt->gt, id) {
if (scheduler->engine_owner[engine->id] == vgpu) {
@@ -474,6 +475,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0d9e263913ff..967c0501e91e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -66,8 +66,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
struct drm_i915_private *i915 = node_to_i915(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
-
intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
i915_print_iommu_status(i915, &p);
intel_gt_info_print(&to_gt(i915)->info, &p);
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index ce3cc93ea211..273bc43468a0 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -62,7 +62,6 @@
#include "display/intel_pch_refclk.h"
#include "display/intel_pps.h"
#include "display/intel_sprite_uapi.h"
-#include "display/intel_vga.h"
#include "display/skl_watermark.h"
#include "gem/i915_gem_context.h"
@@ -235,7 +234,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_uncore_mmio_debug_init_early(dev_priv);
- spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
intel_sbi_init(dev_priv);
@@ -263,9 +261,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
i915_gem_init_early(dev_priv);
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(dev_priv);
-
intel_irq_init(dev_priv);
intel_display_driver_early_probe(display);
intel_clock_gating_hooks_init(dev_priv);
@@ -578,7 +573,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
*/
intel_dram_detect(dev_priv);
- intel_bw_init_hw(dev_priv);
+ intel_bw_init_hw(display);
return 0;
@@ -622,11 +617,12 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
* Perform any steps necessary to make the driver available via kernel
* internal or userspace interfaces.
*/
-static void i915_driver_register(struct drm_i915_private *dev_priv)
+static int i915_driver_register(struct drm_i915_private *dev_priv)
{
struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
unsigned int i;
+ int ret;
i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
@@ -634,10 +630,14 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
intel_vgpu_register(dev_priv);
/* Reveal our presence to userspace */
- if (drm_dev_register(&dev_priv->drm, 0)) {
- drm_err(&dev_priv->drm,
- "Failed to register driver for userspace access!\n");
- return;
+ ret = drm_dev_register(&dev_priv->drm, 0);
+ if (ret) {
+ i915_probe_error(dev_priv,
+ "Failed to register driver for userspace access!\n");
+ drm_dev_unregister(&dev_priv->drm);
+ i915_pmu_unregister(dev_priv);
+ i915_gem_driver_unregister(dev_priv);
+ return ret;
}
i915_debugfs_register(dev_priv);
@@ -660,6 +660,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
if (i915_switcheroo_register(dev_priv))
drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
+
+ return 0;
}
/**
@@ -834,7 +836,9 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_gem;
- i915_driver_register(i915);
+ ret = i915_driver_register(i915);
+ if (ret)
+ goto out_cleanup_gem;
enable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -845,6 +849,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
out_cleanup_gem:
+ intel_pxp_fini(i915);
i915_gem_suspend(i915);
i915_gem_driver_remove(i915);
i915_gem_driver_release(i915);
@@ -981,7 +986,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_dp_mst_suspend(display);
intel_irq_suspend(i915);
- intel_hpd_cancel_work(i915);
+ intel_hpd_cancel_work(display);
if (HAS_DISPLAY(i915))
intel_display_driver_suspend_access(display);
@@ -1064,7 +1069,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_display_driver_suspend(display);
intel_irq_suspend(dev_priv);
- intel_hpd_cancel_work(dev_priv);
+ intel_hpd_cancel_work(display);
if (HAS_DISPLAY(dev_priv))
intel_display_driver_suspend_access(display);
@@ -1195,13 +1200,11 @@ static int i915_drm_resume(struct drm_device *dev)
i9xx_display_sr_restore(display);
- intel_vga_redisable(display);
-
intel_gmbus_reset(display);
intel_pps_unlock_regs_wa(display);
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
/*
* Interrupts have to be enabled before any batches are run. If not the
@@ -1227,7 +1230,7 @@ static int i915_drm_resume(struct drm_device *dev)
if (HAS_DISPLAY(dev_priv))
intel_display_driver_resume_access(display);
- intel_hpd_init(dev_priv);
+ intel_hpd_init(display);
intel_display_driver_resume(display);
@@ -1235,7 +1238,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_driver_enable_user_access(display);
drm_kms_helper_poll_enable(dev);
}
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -1575,7 +1578,7 @@ static int intel_runtime_suspend(struct device *kdev)
assert_forcewakes_inactive(&dev_priv->uncore);
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
- intel_hpd_poll_enable(dev_priv);
+ intel_hpd_poll_enable(display);
drm_dbg(&dev_priv->drm, "Device suspended\n");
return 0;
@@ -1633,11 +1636,11 @@ static int intel_runtime_resume(struct device *kdev)
* everyone else do it here.
*/
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- intel_hpd_init(dev_priv);
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
}
- skl_watermark_ipc_update(dev_priv);
+ skl_watermark_ipc_update(display);
enable_rpm_wakeref_asserts(rpm);
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
index 4b67ad9a61cd..1e95ecb2a163 100644
--- a/drivers/gpu/drm/i915/i915_driver.h
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -15,7 +15,6 @@ struct drm_printer;
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_TIMESTAMP 1695980603
extern const struct dev_pm_ops i915_pm_ops;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ffc346379cc2..d0e1980dcba2 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -49,8 +49,6 @@
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
-#include "soc/intel_pch.h"
-
#include "i915_drm_client.h"
#include "i915_gem.h"
#include "i915_gpu_error.h"
@@ -224,8 +222,6 @@ struct drm_i915_private {
};
unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
- /* protects the irq masks */
- spinlock_t irq_lock;
bool irqs_enabled;
/* LPT/WPT IOSF sideband protection */
@@ -272,9 +268,6 @@ struct drm_i915_private {
/* pm private clock gating functions */
const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
- /* PCH chipset type */
- enum intel_pch pch_type;
-
unsigned long gem_quirks;
struct i915_gem_mm mm;
@@ -305,6 +298,8 @@ struct drm_i915_private {
INTEL_DRAM_DDR5,
INTEL_DRAM_LPDDR5,
INTEL_DRAM_GDDR,
+ INTEL_DRAM_GDDR_ECC,
+ __INTEL_DRAM_TYPE_MAX,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a6613eed3398..4f785cdbd155 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -726,13 +726,6 @@ static void err_print_gt_info(struct drm_i915_error_state_buf *m,
intel_sseu_print_topology(gt->_gt->i915, &gt->info.sseu, &p);
}
-static void err_print_gt_display(struct drm_i915_error_state_buf *m,
- struct intel_gt_coredump *gt)
-{
- err_printf(m, "IER: 0x%08x\n", gt->ier);
- err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
-}
-
static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
struct intel_gt_coredump *gt)
{
@@ -878,7 +871,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
print_guc_capture = true;
- err_print_gt_display(m, error->gt);
err_print_gt_global_nonguc(m, error->gt);
err_print_gt_fences(m, error->gt);
@@ -1767,27 +1759,6 @@ gt_record_uc(struct intel_gt_coredump *gt,
return error_uc;
}
-/* Capture display registers. */
-static void gt_record_display_regs(struct intel_gt_coredump *gt)
-{
- struct intel_uncore *uncore = gt->_gt->uncore;
- struct drm_i915_private *i915 = uncore->i915;
-
- if (DISPLAY_VER(i915) >= 6 && DISPLAY_VER(i915) < 20)
- gt->derrmr = intel_uncore_read(uncore, DERRMR);
-
- if (GRAPHICS_VER(i915) >= 8)
- gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
- else if (IS_VALLEYVIEW(i915))
- gt->ier = intel_uncore_read(uncore, VLV_IER);
- else if (HAS_PCH_SPLIT(i915))
- gt->ier = intel_uncore_read(uncore, DEIER);
- else if (GRAPHICS_VER(i915) == 2)
- gt->ier = intel_uncore_read16(uncore, GEN2_IER);
- else
- gt->ier = intel_uncore_read(uncore, GEN2_IER);
-}
-
/* Capture all other registers that GuC doesn't capture. */
static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
{
@@ -1821,9 +1792,12 @@ static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
gt->gtier[i] =
intel_uncore_read(uncore, GEN8_GT_IER(i));
gt->ngtier = 4;
- } else if (HAS_PCH_SPLIT(i915)) {
+ } else if (GRAPHICS_VER(i915) >= 5) {
gt->gtier[0] = intel_uncore_read(uncore, GTIER);
gt->ngtier = 1;
+ } else {
+ gt->gtier[0] = intel_uncore_read(uncore, GEN2_IER);
+ gt->ngtier = 1;
}
gt->eir = intel_uncore_read(uncore, EIR);
@@ -2043,7 +2017,6 @@ intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
gc->_gt = gt;
gc->awake = intel_gt_pm_is_awake(gt);
- gt_record_display_regs(gc);
gt_record_global_nonguc_regs(gc);
/*
@@ -2160,7 +2133,6 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump
void i915_error_state_store(struct i915_gpu_coredump *error)
{
struct drm_i915_private *i915;
- static bool warned;
if (IS_ERR_OR_NULL(error))
return;
@@ -2174,16 +2146,8 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
i915_gpu_coredump_get(error);
- if (!xchg(&warned, true) &&
- ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
- pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
- pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
- pr_info("Please see https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html for details.\n");
- pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
- pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
- pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
- i915->drm.primary->index);
- }
+ drm_info(&i915->drm, "GPU error state saved to /sys/class/drm/card%d/error\n",
+ i915->drm.primary->index);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 749e1c55613e..182324979278 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -146,7 +146,6 @@ struct intel_gt_coredump {
/* Generic register state */
u32 eir;
u32 pgtbl_er;
- u32 ier;
u32 gtier[6], ngtier;
u32 forcewake;
u32 error; /* gen6+ */
@@ -164,8 +163,6 @@ struct intel_gt_coredump {
u32 clock_frequency;
u32 clock_period_ns;
- /* Display related */
- u32 derrmr;
u32 sfc_done[I915_MAX_SFC]; /* gen12 */
u32 nfence;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 37ca4a35daf2..95042879bec4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -277,14 +277,14 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT))
@@ -306,12 +306,12 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_handler(display, eir, dpinvgtt);
- valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
+ valleyview_pipestat_irq_handler(display, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -367,14 +367,14 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT |
@@ -392,12 +392,12 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_handler(display, eir, dpinvgtt);
- valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
+ valleyview_pipestat_irq_handler(display, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -418,6 +418,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
static irqreturn_t ilk_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct intel_display *display = &i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -458,9 +459,9 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
if (de_iir) {
raw_reg_write(regs, DEIIR, de_iir);
if (DISPLAY_VER(i915) >= 7)
- ivb_display_irq_handler(i915, de_iir);
+ ivb_display_irq_handler(display, de_iir);
else
- ilk_display_irq_handler(i915, de_iir);
+ ilk_display_irq_handler(display, de_iir);
ret = IRQ_HANDLED;
}
@@ -506,6 +507,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
u32 master_ctl;
@@ -524,7 +526,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- gen8_de_irq_handler(dev_priv, master_ctl);
+ gen8_de_irq_handler(display, master_ctl);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
}
@@ -556,6 +558,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct intel_display *display = &i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
struct intel_gt *gt = to_gt(i915);
u32 master_ctl;
@@ -575,13 +578,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & GEN11_DISPLAY_IRQ)
- gen11_display_irq_handler(i915);
+ gen11_display_irq_handler(display);
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
gen11_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+ gen11_gu_misc_irq_handler(display, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
@@ -613,6 +616,7 @@ static inline void dg1_master_intr_enable(void __iomem * const regs)
static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = arg;
+ struct intel_display *display = &i915->display;
struct intel_gt *gt = to_gt(i915);
void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 master_tile_ctl, master_ctl;
@@ -641,36 +645,22 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
gen11_gt_irq_handler(gt, master_ctl);
if (master_ctl & GEN11_DISPLAY_IRQ)
- gen11_display_irq_handler(i915);
+ gen11_display_irq_handler(display);
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
dg1_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+ gen11_gu_misc_irq_handler(display, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
return IRQ_HANDLED;
}
-static void ibx_irq_reset(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- if (HAS_PCH_NOP(dev_priv))
- return;
-
- gen2_irq_reset(uncore, SDE_IRQ_REGS);
-
- if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
- intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
-}
-
-/* drm_dma.h hooks
-*/
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
gen2_irq_reset(uncore, DE_IRQ_REGS);
@@ -686,45 +676,43 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
gen5_gt_irq_reset(to_gt(dev_priv));
- ibx_irq_reset(dev_priv);
+ ibx_display_irq_reset(display);
}
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
gen5_gt_irq_reset(to_gt(dev_priv));
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_reset(display);
}
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
gen8_master_intr_disable(intel_uncore_regs(uncore));
gen8_gt_irq_reset(to_gt(dev_priv));
- gen8_display_irq_reset(dev_priv);
+ gen8_display_irq_reset(display);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
-
- if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_reset(dev_priv);
-
}
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
gen11_gt_irq_reset(gt);
- gen11_display_irq_reset(dev_priv);
+ gen11_display_irq_reset(display);
gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
@@ -732,6 +720,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
static void dg1_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_gt *gt;
unsigned int i;
@@ -741,7 +730,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
for_each_gt(gt, dev_priv, i)
gen11_gt_irq_reset(gt);
- gen11_display_irq_reset(dev_priv);
+ gen11_display_irq_reset(display);
gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
@@ -751,6 +740,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
@@ -760,25 +750,25 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_reset(display);
}
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen5_gt_irq_postinstall(to_gt(dev_priv));
- ilk_de_irq_postinstall(dev_priv);
+ ilk_de_irq_postinstall(display);
}
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen5_gt_irq_postinstall(to_gt(dev_priv));
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_postinstall(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_postinstall(display);
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
@@ -786,20 +776,23 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen8_gt_irq_postinstall(to_gt(dev_priv));
- gen8_de_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(display);
gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
}
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
gen11_gt_irq_postinstall(gt);
- gen11_de_irq_postinstall(dev_priv);
+ gen11_de_irq_postinstall(display);
gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
@@ -809,6 +802,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
struct intel_gt *gt;
@@ -819,7 +813,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
- dg1_de_irq_postinstall(dev_priv);
+ dg1_de_irq_postinstall(display);
dg1_master_intr_enable(intel_uncore_regs(uncore));
intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
@@ -827,11 +821,11 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen8_gt_irq_postinstall(to_gt(dev_priv));
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_postinstall(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_postinstall(display);
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
@@ -900,9 +894,10 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
static void i915_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
- i9xx_display_irq_reset(dev_priv);
+ i9xx_display_irq_reset(display);
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
@@ -911,6 +906,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -932,26 +928,20 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask |= I915_ASLE_INTERRUPT;
}
- if (I915_HAS_HOTPLUG(dev_priv)) {
+ if (HAS_HOTPLUG(dev_priv)) {
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
}
gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- i915_enable_asle_pipestat(dev_priv);
+ i915_display_irq_postinstall(display);
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -972,13 +962,13 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
- if (I915_HAS_HOTPLUG(dev_priv) &&
+ if (HAS_HOTPLUG(dev_priv) &&
iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
@@ -992,9 +982,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
- i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ i915_pipestat_irq_handler(display, iir, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -1006,9 +996,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i965_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
- i9xx_display_irq_reset(dev_priv);
+ i9xx_display_irq_reset(display);
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
@@ -1036,6 +1027,7 @@ static u32 i965_error_mask(struct drm_i915_private *i915)
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -1061,20 +1053,13 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- i915_enable_asle_pipestat(dev_priv);
+ i965_display_irq_postinstall(display);
}
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1096,11 +1081,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
@@ -1119,9 +1104,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
- i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ i965_pipestat_irq_handler(display, iir, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, IRQ_HANDLED);
@@ -1280,6 +1265,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
int irq = to_pci_dev(dev_priv->drm.dev)->irq;
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
@@ -1289,7 +1275,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
free_irq(irq, dev_priv);
- intel_hpd_cancel_work(dev_priv);
+ intel_hpd_cancel_work(display);
dev_priv->irqs_enabled = false;
}
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 76e2801619f0..c33bd3d83069 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -100,7 +100,7 @@ int remap_io_mapping(struct vm_area_struct *vma,
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
- /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */
r.mm = vma->vm_mm;
r.pfn = pfn;
r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
@@ -140,7 +140,7 @@ int remap_io_sg(struct vm_area_struct *vma,
};
int err;
- /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
while (offset >= r.sgt.max >> PAGE_SHIFT) {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index de0b413600a1..1658f1246c6f 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1666,6 +1666,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
struct i915_perf *perf = stream->perf;
struct intel_gt *gt = stream->engine->gt;
struct i915_perf_group *g = stream->engine->oa_group;
+ int m;
if (WARN_ON(stream != g->exclusive_stream))
return;
@@ -1690,10 +1691,9 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_configs(stream);
free_noa_wait(stream);
- if (perf->spurious_report_rs.missed) {
- gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n",
- perf->spurious_report_rs.missed);
- }
+ m = ratelimit_state_get_miss(&perf->spurious_report_rs);
+ if (m)
+ gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n", m);
}
static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c5064eebe063..2e4190da3e0d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -866,6 +866,7 @@
#define FP_M2_DIV_MASK 0x0000003f
#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
#define FP_M2_DIV_SHIFT 0
+
#define DPLL_TEST _MMIO(0x606c)
#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
@@ -877,11 +878,13 @@
#define DPLLA_TEST_N_BYPASS (1 << 3)
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+
#define D_STATE _MMIO(0x6104)
#define DSTATE_GFX_RESET_I830 (1 << 6)
#define DSTATE_PLL_D3_OFF (1 << 3)
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
+
#define DSPCLK_GATE_D(__i915) _MMIO(DISPLAY_MMIO_BASE(__i915) + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
@@ -1050,7 +1053,6 @@
/*
* Overlay regs
*/
-
#define OVADD _MMIO(0x30000)
#define DOVSTA _MMIO(0x30008)
#define OC_BUF (0x3 << 20)
@@ -1077,6 +1079,7 @@
#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
#define BXT_GMBUS_GATING_DIS (1 << 14)
+#define DG2_DPFC_GATING_DIS REG_BIT(31)
#define GEN9_CLKGATE_DIS_5 _MMIO(0x46540)
#define DPCE_GATING_DIS REG_BIT(17)
@@ -1105,7 +1108,6 @@
/*
* Display engine regs
*/
-
/* Pipe/transcoder A timing regs */
#define _TRANS_HTOTAL_A 0x60000
#define _TRANS_HTOTAL_B 0x61000
@@ -1396,88 +1398,50 @@
#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
-#define DP_PORT_EN (1 << 31)
-#define DP_PIPE_SEL_SHIFT 30
-#define DP_PIPE_SEL_MASK (1 << 30)
-#define DP_PIPE_SEL(pipe) ((pipe) << 30)
-#define DP_PIPE_SEL_SHIFT_IVB 29
-#define DP_PIPE_SEL_MASK_IVB (3 << 29)
-#define DP_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define DP_PORT_EN REG_BIT(31)
+#define DP_PIPE_SEL_MASK REG_GENMASK(30, 30)
+#define DP_PIPE_SEL(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK, (pipe))
+#define DP_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29)
+#define DP_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_IVB, (pipe))
#define DP_PIPE_SEL_SHIFT_CHV 16
-#define DP_PIPE_SEL_MASK_CHV (3 << 16)
-#define DP_PIPE_SEL_CHV(pipe) ((pipe) << 16)
-
-/* Link training mode - select a suitable mode for each stage */
-#define DP_LINK_TRAIN_PAT_1 (0 << 28)
-#define DP_LINK_TRAIN_PAT_2 (1 << 28)
-#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
-#define DP_LINK_TRAIN_OFF (3 << 28)
-#define DP_LINK_TRAIN_MASK (3 << 28)
-#define DP_LINK_TRAIN_SHIFT 28
-
-/* CPT Link training mode */
-#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
-#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
-#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
-#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
-#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
-#define DP_LINK_TRAIN_SHIFT_CPT 8
-
-/* Signal voltages. These are mostly controlled by the other end */
-#define DP_VOLTAGE_0_4 (0 << 25)
-#define DP_VOLTAGE_0_6 (1 << 25)
-#define DP_VOLTAGE_0_8 (2 << 25)
-#define DP_VOLTAGE_1_2 (3 << 25)
-#define DP_VOLTAGE_MASK (7 << 25)
-#define DP_VOLTAGE_SHIFT 25
-
-/* Signal pre-emphasis levels, like voltages, the other end tells us what
- * they want
- */
-#define DP_PRE_EMPHASIS_0 (0 << 22)
-#define DP_PRE_EMPHASIS_3_5 (1 << 22)
-#define DP_PRE_EMPHASIS_6 (2 << 22)
-#define DP_PRE_EMPHASIS_9_5 (3 << 22)
-#define DP_PRE_EMPHASIS_MASK (7 << 22)
-#define DP_PRE_EMPHASIS_SHIFT 22
-
-/* How many wires to use. I guess 3 was too hard */
-#define DP_PORT_WIDTH(width) (((width) - 1) << 19)
-#define DP_PORT_WIDTH_MASK (7 << 19)
-#define DP_PORT_WIDTH_SHIFT 19
-
-/* Mystic DPCD version 1.1 special mode */
-#define DP_ENHANCED_FRAMING (1 << 18)
-
-/* eDP */
-#define DP_PLL_FREQ_270MHZ (0 << 16)
-#define DP_PLL_FREQ_162MHZ (1 << 16)
-#define DP_PLL_FREQ_MASK (3 << 16)
-
-/* locked once port is enabled */
-#define DP_PORT_REVERSAL (1 << 15)
-
-/* eDP */
-#define DP_PLL_ENABLE (1 << 14)
-
-/* sends the clock on lane 15 of the PEG for debug */
-#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
-
-#define DP_SCRAMBLING_DISABLE (1 << 12)
-#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
-
-/* limit RGB values to avoid confusing TVs */
-#define DP_COLOR_RANGE_16_235 (1 << 8)
-
-/* Turn on the audio link */
-#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
-
-/* vs and hs sync polarity */
-#define DP_SYNC_VS_HIGH (1 << 4)
-#define DP_SYNC_HS_HIGH (1 << 3)
-
-/* A fantasy */
-#define DP_DETECTED (1 << 2)
+#define DP_PIPE_SEL_MASK_CHV REG_GENMASK(17, 16)
+#define DP_PIPE_SEL_CHV(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_CHV, (pipe))
+#define DP_LINK_TRAIN_MASK REG_GENMASK(29, 28)
+#define DP_LINK_TRAIN_PAT_1 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 0)
+#define DP_LINK_TRAIN_PAT_2 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 1)
+#define DP_LINK_TRAIN_PAT_IDLE REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 2)
+#define DP_LINK_TRAIN_OFF REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 3)
+#define DP_LINK_TRAIN_MASK_CPT REG_GENMASK(10, 8)
+#define DP_LINK_TRAIN_PAT_1_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 0)
+#define DP_LINK_TRAIN_PAT_2_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 1)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 2)
+#define DP_LINK_TRAIN_OFF_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 3)
+#define DP_VOLTAGE_MASK REG_GENMASK(27, 25)
+#define DP_VOLTAGE_0_4 REG_FIELD_PREP(DP_VOLTAGE_MASK, 0)
+#define DP_VOLTAGE_0_6 REG_FIELD_PREP(DP_VOLTAGE_MASK, 1)
+#define DP_VOLTAGE_0_8 REG_FIELD_PREP(DP_VOLTAGE_MASK, 2)
+#define DP_VOLTAGE_1_2 REG_FIELD_PREP(DP_VOLTAGE_MASK, 3)
+#define DP_PRE_EMPHASIS_MASK REG_GENMASK(24, 22)
+#define DP_PRE_EMPHASIS_0 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 0)
+#define DP_PRE_EMPHASIS_3_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 1)
+#define DP_PRE_EMPHASIS_6 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 2)
+#define DP_PRE_EMPHASIS_9_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 3)
+#define DP_PORT_WIDTH_MASK REG_GENMASK(21, 19)
+#define DP_PORT_WIDTH(width) REG_FIELD_PREP(DP_PORT_WIDTH_MASK, (width) - 1)
+#define DP_ENHANCED_FRAMING REG_BIT(18)
+#define EDP_PLL_FREQ_MASK REG_GENMASK(17, 16)
+#define EDP_PLL_FREQ_270MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 0)
+#define EDP_PLL_FREQ_162MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 1)
+#define DP_PORT_REVERSAL REG_BIT(15)
+#define EDP_PLL_ENABLE REG_BIT(14)
+#define DP_CLOCK_OUTPUT_ENABLE REG_BIT(13)
+#define DP_SCRAMBLING_DISABLE REG_BIT(12)
+#define DP_SCRAMBLING_DISABLE_ILK REG_BIT(7)
+#define DP_COLOR_RANGE_16_235 REG_BIT(8)
+#define DP_AUDIO_OUTPUT_ENABLE REG_BIT(6)
+#define DP_SYNC_VS_HIGH REG_BIT(4)
+#define DP_SYNC_HS_HIGH REG_BIT(3)
+#define DP_DETECTED REG_BIT(2)
/*
* Computing GMCH M and N values for the Display Port link
@@ -1811,18 +1775,6 @@
#define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
-/* VBIOS regs */
-#define VGACNTRL _MMIO(0x71400)
-# define VGA_DISP_DISABLE (1 << 31)
-# define VGA_2X_MODE (1 << 30)
-# define VGA_PIPE_B_SELECT (1 << 29)
-
-#define VLV_VGACNTRL _MMIO(VLV_DISPLAY_BASE + 0x71400)
-
-/* Ironlake */
-
-#define CPU_VGACNTRL _MMIO(0x41000)
-
#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
@@ -2783,7 +2735,6 @@
* functionality covered in PCH_PORT_HOTPLUG is split into
* SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
*/
-
#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
@@ -2863,7 +2814,6 @@
#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
/* transcoder */
-
#define _PCH_TRANS_HTOTAL_A 0xe0000
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
@@ -3794,7 +3744,6 @@ enum skl_power_gate {
/*
* SKL Clocks
*/
-
/* CDCLK_CTL */
#define CDCLK_CTL _MMIO(0x46000)
#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26)
@@ -4242,6 +4191,11 @@ enum skl_power_gate {
#define MTL_CLKGATE_DIS_TRANS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
+#define _MTL_PIPE_CLKGATE_DIS2_A 0x60114
+#define _MTL_PIPE_CLKGATE_DIS2_B 0x61114
+#define MTL_PIPE_CLKGATE_DIS2(pipe) _MMIO_PIPE(pipe, _MTL_PIPE_CLKGATE_DIS2_A, _MTL_PIPE_CLKGATE_DIS2_B)
+#define MTL_DPFC_GATING_DIS REG_BIT(6)
+
#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index 94a8f902689e..bfe98cb9a038 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -9,76 +9,19 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
-/**
- * REG_BIT() - Prepare a u32 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u32, with compile time checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT(__n) \
- ((u32)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 31))))
-
-/**
- * REG_BIT8() - Prepare a u8 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u8, with compile time checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT8(__n) \
- ((u8)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 7))))
-
-/**
- * REG_GENMASK() - Prepare a continuous u32 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u32, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK(__high, __low) \
- ((u32)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
-
-/**
- * REG_GENMASK64() - Prepare a continuous u64 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK_ULL() to force u64, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
+/*
+ * Wrappers over the generic fixed width BIT_U*() and GENMASK_U*()
+ * implementations, for compatibility reasons with previous implementation.
*/
-#define REG_GENMASK64(__high, __low) \
- ((u64)(GENMASK_ULL(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 63 || (__low) > (__high)))))
+#define REG_GENMASK(high, low) GENMASK_U32(high, low)
+#define REG_GENMASK64(high, low) GENMASK_U64(high, low)
+#define REG_GENMASK16(high, low) GENMASK_U16(high, low)
+#define REG_GENMASK8(high, low) GENMASK_U8(high, low)
-/**
- * REG_GENMASK8() - Prepare a continuous u8 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u8, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK8(__high, __low) \
- ((u8)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 7 || (__low) > (__high)))))
+#define REG_BIT(n) BIT_U32(n)
+#define REG_BIT64(n) BIT_U64(n)
+#define REG_BIT16(n) BIT_U16(n)
+#define REG_BIT8(n) BIT_U8(n)
/*
* Local integer constant expression version of is_power_of_2().
@@ -143,35 +86,6 @@
*/
#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val))
-/**
- * REG_BIT16() - Prepare a u16 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u16, with compile time
- * checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT16(__n) \
- ((u16)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 15))))
-
-/**
- * REG_GENMASK16() - Prepare a continuous u8 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u16, with compile time
- * checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK16(__high, __low) \
- ((u16)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 15 || (__low) > (__high)))))
/**
* REG_FIELD_PREP16() - Prepare a u16 bitfield value
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 609214231ffc..f7fb40cfdb70 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -40,8 +40,6 @@
struct drm_i915_private;
struct timer_list;
-#define FDO_BUG_URL "https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html"
-
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 76d84cbb8361..d581a9d2c063 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -21,6 +21,7 @@
#include "display/intel_pfit_regs.h"
#include "display/intel_psr_regs.h"
#include "display/intel_sprite_regs.h"
+#include "display/intel_vga_regs.h"
#include "display/skl_universal_plane_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index d40ee1b42110..59bd603e6deb 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -171,6 +171,17 @@ intel_memory_region_by_type(struct drm_i915_private *i915,
return NULL;
}
+bool intel_memory_type_is_local(enum intel_memory_type mem_type)
+{
+ switch (mem_type) {
+ case INTEL_MEMORY_LOCAL:
+ case INTEL_MEMORY_STOLEN_LOCAL:
+ return true;
+ default:
+ return false;
+ }
+}
+
/**
* intel_memory_region_reserve - Reserve a memory range
* @mem: The region for which we want to reserve a range.
@@ -216,7 +227,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
return err;
}
-static const char *region_type_str(u16 type)
+const char *intel_memory_type_str(enum intel_memory_type type)
{
switch (type) {
case INTEL_MEMORY_SYSTEM:
@@ -260,7 +271,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->instance = instance;
snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
- region_type_str(type), instance);
+ intel_memory_type_str(type), instance);
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 5973b6fe13cf..b3b75be9ced5 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -85,6 +85,8 @@ struct intel_memory_region {
void *region_private;
};
+bool intel_memory_type_is_local(enum intel_memory_type mem_type);
+
struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private *i915,
u16 class, u16 instance);
@@ -107,6 +109,7 @@ void intel_memory_regions_driver_release(struct drm_i915_private *i915);
struct intel_memory_region *
intel_memory_region_by_type(struct drm_i915_private *i915,
enum intel_memory_type mem_type);
+const char *intel_memory_type_str(enum intel_memory_type type);
__printf(2, 3) void
intel_memory_region_set_name(struct intel_memory_region *mem,
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 48836ef52d40..a2894a56e18f 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -7,8 +7,6 @@
#ifndef INTEL_WAKEREF_H
#define INTEL_WAKEREF_H
-#include <drm/drm_print.h>
-
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
@@ -16,11 +14,13 @@
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/ref_tracker.h>
-#include <linux/slab.h>
-#include <linux/stackdepot.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+struct drm_printer;
+struct intel_runtime_pm;
+struct intel_wakeref;
+
typedef struct ref_tracker *intel_wakeref_t;
#define INTEL_REFTRACK_DEAD_COUNT 16
@@ -32,9 +32,6 @@ typedef struct ref_tracker *intel_wakeref_t;
#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
-struct intel_runtime_pm;
-struct intel_wakeref;
-
struct intel_wakeref_ops {
int (*get)(struct intel_wakeref *wf);
int (*put)(struct intel_wakeref *wf);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
index 9aae779c4da3..4969d3de2bac 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
@@ -23,6 +23,7 @@ int intel_pxp_gsccs_init(struct intel_pxp *pxp);
int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id);
void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
+bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
#else
static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
@@ -34,8 +35,11 @@ static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp)
return 0;
}
-#endif
+static inline bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp)
+{
+ return false;
+}
-bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
+#endif
#endif /*__INTEL_PXP_GSCCS_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index fee76c1d2f45..889281819c5b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -23,7 +23,9 @@
#include <linux/random.h>
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_regs.h"
#include "gt/uc/intel_gsc_fw.h"
#include "i915_driver.h"
@@ -253,11 +255,27 @@ int i915_mock_selftests(void)
int i915_live_selftests(struct pci_dev *pdev)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
+ struct intel_uncore *uncore = &i915->uncore;
int err;
+ u32 pg_enable;
+ intel_wakeref_t wakeref;
if (!i915_selftest.live)
return 0;
+ /*
+ * FIXME Disable render powergating, this is temporary wa and should be removed
+ * after fixing real cause of forcewake timeouts.
+ */
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 00), IP_VER(12, 74))) {
+ pg_enable = intel_uncore_read(uncore, GEN9_PG_ENABLE);
+ if (pg_enable & GEN9_RENDER_PG_ENABLE)
+ intel_uncore_write_fw(uncore, GEN9_PG_ENABLE,
+ pg_enable & ~GEN9_RENDER_PG_ENABLE);
+ }
+ }
+
__wait_gsc_proxy_completed(i915);
__wait_gsc_huc_load_completed(i915);
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index d5ecc68155da..522ad49406ce 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -77,7 +77,7 @@ void timed_fence_fini(struct timed_fence *tf)
if (timer_delete_sync(&tf->timer))
i915_sw_fence_commit(&tf->fence);
- destroy_timer_on_stack(&tf->timer);
+ timer_destroy_on_stack(&tf->timer);
i915_sw_fence_fini(&tf->fence);
}
diff --git a/drivers/gpu/drm/i915/selftests/librapl.c b/drivers/gpu/drm/i915/selftests/librapl.c
index eb03b5b28bad..25b8726b9dff 100644
--- a/drivers/gpu/drm/i915/selftests/librapl.c
+++ b/drivers/gpu/drm/i915/selftests/librapl.c
@@ -22,12 +22,12 @@ u64 librapl_energy_uJ(void)
unsigned long long power;
u32 units;
- if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
+ if (rdmsrq_safe(MSR_RAPL_POWER_UNIT, &power))
return 0;
units = (power & 0x1f00) >> 8;
- if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power))
+ if (rdmsrq_safe(MSR_PP1_ENERGY_STATUS, &power))
return 0;
return (1000000 * power) >> units; /* convert to uJ */
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index 9e310f4099f4..eee5c4f45a43 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -33,8 +33,14 @@ static const char *intel_dram_type_str(enum intel_dram_type type)
DRAM_TYPE_STR(DDR4),
DRAM_TYPE_STR(LPDDR3),
DRAM_TYPE_STR(LPDDR4),
+ DRAM_TYPE_STR(DDR5),
+ DRAM_TYPE_STR(LPDDR5),
+ DRAM_TYPE_STR(GDDR),
+ DRAM_TYPE_STR(GDDR_ECC),
};
+ BUILD_BUG_ON(ARRAY_SIZE(str) != __INTEL_DRAM_TYPE_MAX);
+
if (type >= ARRAY_SIZE(str))
type = INTEL_DRAM_UNKNOWN;
@@ -444,8 +450,6 @@ skl_get_dram_info(struct drm_i915_private *i915)
int ret;
dram_info->type = skl_get_dram_type(i915);
- drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
- intel_dram_type_str(dram_info->type));
ret = skl_dram_get_channels_info(i915);
if (ret)
@@ -560,10 +564,9 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
dram_info->type != type);
drm_dbg_kms(&i915->drm,
- "CH%u DIMM size: %u Gb, width: X%u, ranks: %u, type: %s\n",
+ "CH%u DIMM size: %u Gb, width: X%u, ranks: %u\n",
i - BXT_D_CR_DRP0_DUNIT_START,
- dimm.size, dimm.width, dimm.ranks,
- intel_dram_type_str(type));
+ dimm.size, dimm.width, dimm.ranks);
if (valid_ranks == 0)
valid_ranks = dimm.ranks;
@@ -687,6 +690,10 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915)
drm_WARN_ON(&i915->drm, !IS_DGFX(i915));
dram_info->type = INTEL_DRAM_GDDR;
break;
+ case 9:
+ drm_WARN_ON(&i915->drm, !IS_DGFX(i915));
+ dram_info->type = INTEL_DRAM_GDDR_ECC;
+ break;
default:
MISSING_CASE(val);
return -EINVAL;
@@ -726,6 +733,10 @@ void intel_dram_detect(struct drm_i915_private *i915)
ret = bxt_get_dram_info(i915);
else
ret = skl_get_dram_info(i915);
+
+ drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
+ intel_dram_type_str(dram_info->type));
+
if (ret)
return;
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c
deleted file mode 100644
index 82dc7fbd1a3e..000000000000
--- a/drivers/gpu/drm/i915/soc/intel_pch.c
+++ /dev/null
@@ -1,316 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright 2019 Intel Corporation.
- */
-
-#include "i915_drv.h"
-#include "i915_utils.h"
-#include "intel_pch.h"
-
-#define INTEL_PCH_DEVICE_ID_MASK 0xff80
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
-#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
-#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
-#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
-#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
-#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
-#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
-#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
-#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
-#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
-#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
-#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
-#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
-#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
-#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
-#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
-#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
-#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
-#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
-#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
-#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
-#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
-#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
-#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
-#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
-
-/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
-static enum intel_pch
-intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
-{
- switch (id) {
- case INTEL_PCH_IBX_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
- drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) != 5);
- return PCH_IBX;
- case INTEL_PCH_CPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
- return PCH_CPT;
- case INTEL_PCH_PPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
- /* PPT is CPT compatible */
- return PCH_CPT;
- case INTEL_PCH_LPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
- return PCH_LPT_H;
- case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
- return PCH_LPT_LP;
- case INTEL_PCH_WPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
- /* WPT is LPT compatible */
- return PCH_LPT_H;
- case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
- /* WPT is LPT compatible */
- return PCH_LPT_LP;
- case INTEL_PCH_SPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_KBP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- /* KBP is SPT compatible */
- return PCH_SPT;
- case INTEL_PCH_CNP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm,
- "Found Cannon Lake LP PCH (CNP-LP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CMP_DEVICE_ID_TYPE:
- case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv) &&
- !IS_ROCKETLAKE(dev_priv));
- /* CMP is CNP compatible */
- return PCH_CNP;
- case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- /* CMP-V is based on KBP, which is SPT compatible */
- return PCH_SPT;
- case INTEL_PCH_ICP_DEVICE_ID_TYPE:
- case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- return PCH_ICP;
- case INTEL_PCH_MCC_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
- drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
- IS_ELKHARTLAKE(dev_priv)));
- /* MCC is TGP compatible */
- return PCH_TGP;
- case INTEL_PCH_TGP_DEVICE_ID_TYPE:
- case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) &&
- !IS_ROCKETLAKE(dev_priv) &&
- !IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_TGP;
- case INTEL_PCH_JSP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
- IS_ELKHARTLAKE(dev_priv)));
- /* JSP is ICP compatible */
- return PCH_ICP;
- case INTEL_PCH_ADP_DEVICE_ID_TYPE:
- case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
- case INTEL_PCH_ADP3_DEVICE_ID_TYPE:
- case INTEL_PCH_ADP4_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
- !IS_ALDERLAKE_P(dev_priv));
- return PCH_ADP;
- default:
- return PCH_NONE;
- }
-}
-
-static bool intel_is_virt_pch(unsigned short id,
- unsigned short svendor, unsigned short sdevice)
-{
- return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
- id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
- (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
- svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
- sdevice == PCI_SUBDEVICE_ID_QEMU));
-}
-
-static void
-intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
- unsigned short *pch_id, enum intel_pch *pch_type)
-{
- unsigned short id = 0;
-
- /*
- * In a virtualized passthrough environment we can be in a
- * setup where the ISA bridge is not able to be passed through.
- * In this case, a south bridge can be emulated and we have to
- * make an educated guess as to which PCH is really there.
- */
-
- if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
- id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
- else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
- id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
- id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
- else if (IS_ICELAKE(dev_priv))
- id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
- else if (IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv))
- id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
- else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
- id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
- else if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
- id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- else if (GRAPHICS_VER(dev_priv) == 6 || IS_IVYBRIDGE(dev_priv))
- id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
- else if (GRAPHICS_VER(dev_priv) == 5)
- id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
-
- if (id)
- drm_dbg_kms(&dev_priv->drm, "Assuming PCH ID %04x\n", id);
- else
- drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
-
- *pch_type = intel_pch_type(dev_priv, id);
-
- /* Sanity check virtual PCH id */
- if (drm_WARN_ON(&dev_priv->drm,
- id && *pch_type == PCH_NONE))
- id = 0;
-
- *pch_id = id;
-}
-
-void intel_detect_pch(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pch = NULL;
- unsigned short id;
- enum intel_pch pch_type;
-
- /*
- * South display engine on the same PCI device: just assign the fake
- * PCH.
- */
- if (DISPLAY_VER(dev_priv) >= 20) {
- dev_priv->pch_type = PCH_LNL;
- return;
- } else if (IS_BATTLEMAGE(dev_priv) || IS_METEORLAKE(dev_priv)) {
- /*
- * Both north display and south display are on the SoC die.
- * The real PCH (if it even exists) is uninvolved in display.
- */
- dev_priv->pch_type = PCH_MTL;
- return;
- } else if (IS_DG2(dev_priv)) {
- dev_priv->pch_type = PCH_DG2;
- return;
- } else if (IS_DG1(dev_priv)) {
- dev_priv->pch_type = PCH_DG1;
- return;
- }
-
- /*
- * The reason to probe ISA bridge instead of Dev31:Fun0 is to
- * make graphics device passthrough work easy for VMM, that only
- * need to expose ISA bridge to let driver know the real hardware
- * underneath. This is a requirement from virtualization team.
- *
- * In some virtualized environments (e.g. XEN), there is irrelevant
- * ISA bridge in the system. To work reliably, we should scan through
- * all the ISA bridge devices and check for the first match, instead
- * of only checking the first one.
- */
- while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
- if (pch->vendor != PCI_VENDOR_ID_INTEL)
- continue;
-
- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
-
- pch_type = intel_pch_type(dev_priv, id);
- if (pch_type != PCH_NONE) {
- dev_priv->pch_type = pch_type;
- break;
- } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
- pch->subsystem_device)) {
- intel_virt_detect_pch(dev_priv, &id, &pch_type);
- dev_priv->pch_type = pch_type;
- break;
- }
- }
-
- /*
- * Use PCH_NOP (PCH but no South Display) for PCH platforms without
- * display.
- */
- if (pch && !HAS_DISPLAY(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
- "Display disabled, reverting to NOP PCH\n");
- dev_priv->pch_type = PCH_NOP;
- } else if (!pch) {
- if (i915_run_as_guest() && HAS_DISPLAY(dev_priv)) {
- intel_virt_detect_pch(dev_priv, &id, &pch_type);
- dev_priv->pch_type = pch_type;
- } else {
- drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
- }
- }
-
- pci_dev_put(pch);
-}
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.h b/drivers/gpu/drm/i915/soc/intel_pch.h
deleted file mode 100644
index 635aea7a5539..000000000000
--- a/drivers/gpu/drm/i915/soc/intel_pch.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright 2019 Intel Corporation.
- */
-
-#ifndef __INTEL_PCH__
-#define __INTEL_PCH__
-
-struct drm_i915_private;
-
-/*
- * Sorted by south display engine compatibility.
- * If the new PCH comes with a south display engine that is not
- * inherited from the latest item, please do not add it to the
- * end. Instead, add it right after its "parent" PCH.
- */
-enum intel_pch {
- PCH_NOP = -1, /* PCH without south display */
- PCH_NONE = 0, /* No PCH present */
- PCH_IBX, /* Ibexpeak PCH */
- PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
- PCH_LPT_H, /* Lynxpoint/Wildcatpoint H PCH */
- PCH_LPT_LP, /* Lynxpoint/Wildcatpoint LP PCH */
- PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
- PCH_CNP, /* Cannon/Comet Lake PCH */
- PCH_ICP, /* Ice Lake/Jasper Lake PCH */
- PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
- PCH_ADP, /* Alder Lake PCH */
-
- /* Fake PCHs, functionality handled on the same PCI dev */
- PCH_DG1 = 1024,
- PCH_DG2,
- PCH_MTL,
- PCH_LNL,
-};
-
-#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
-#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
-#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
-#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
-#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
-#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
-#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
-#define HAS_PCH_LPT_H(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_H)
-#define HAS_PCH_LPT_LP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_LP)
-#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_H || \
- INTEL_PCH_TYPE(dev_priv) == PCH_LPT_LP)
-#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
-#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
-#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
-
-void intel_detect_pch(struct drm_i915_private *dev_priv);
-
-#endif /* __INTEL_PCH__ */
diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile
index 3d9d4d40fb80..7cca66f00a38 100644
--- a/drivers/gpu/drm/imagination/Makefile
+++ b/drivers/gpu/drm/imagination/Makefile
@@ -12,8 +12,10 @@ powervr-y := \
pvr_fw.o \
pvr_fw_meta.o \
pvr_fw_mips.o \
+ pvr_fw_riscv.o \
pvr_fw_startstop.o \
pvr_fw_trace.o \
+ pvr_fw_util.o \
pvr_gem.o \
pvr_hwrt.o \
pvr_job.o \
diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.c b/drivers/gpu/drm/imagination/pvr_debugfs.c
index 6b77c9b4bde8..c7ce7daaa87a 100644
--- a/drivers/gpu/drm/imagination/pvr_debugfs.c
+++ b/drivers/gpu/drm/imagination/pvr_debugfs.c
@@ -28,9 +28,8 @@ pvr_debugfs_init(struct drm_minor *minor)
struct drm_device *drm_dev = minor->dev;
struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
struct dentry *root = minor->debugfs_root;
- size_t i;
- for (i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) {
+ for (size_t i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) {
const struct pvr_debugfs_entry *entry = &pvr_debugfs_entries[i];
struct dentry *dir;
diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c
index 1704c0268589..8b9ba4983c4c 100644
--- a/drivers/gpu/drm/imagination/pvr_device.c
+++ b/drivers/gpu/drm/imagination/pvr_device.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/types.h>
@@ -120,6 +121,21 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev)
return 0;
}
+static int pvr_device_reset_init(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct reset_control *reset;
+
+ reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
+ "failed to get gpu reset line\n");
+
+ pvr_dev->reset = reset;
+
+ return 0;
+}
+
/**
* pvr_device_process_active_queues() - Process all queue related events.
* @pvr_dev: PowerVR device to check
@@ -146,9 +162,61 @@ static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
mutex_unlock(&pvr_dev->queues.lock);
}
+static bool pvr_device_safety_irq_pending(struct pvr_device *pvr_dev)
+{
+ u32 events;
+
+ WARN_ON_ONCE(!pvr_dev->has_safety_events);
+
+ events = pvr_cr_read32(pvr_dev, ROGUE_CR_EVENT_STATUS);
+
+ return (events & ROGUE_CR_EVENT_STATUS_SAFETY_EN) != 0;
+}
+
+static void pvr_device_safety_irq_clear(struct pvr_device *pvr_dev)
+{
+ WARN_ON_ONCE(!pvr_dev->has_safety_events);
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_EVENT_CLEAR,
+ ROGUE_CR_EVENT_CLEAR_SAFETY_EN);
+}
+
+static void pvr_device_handle_safety_events(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ u32 events;
+
+ WARN_ON_ONCE(!pvr_dev->has_safety_events);
+
+ events = pvr_cr_read32(pvr_dev, ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE);
+
+ /* Handle only these events on the host and leave the rest to the FW. */
+ events &= ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN |
+ ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN;
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE, events);
+
+ if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN) {
+ u32 fault_fw = pvr_cr_read32(pvr_dev, ROGUE_CR_FAULT_FW_STATUS);
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_FAULT_FW_CLEAR, fault_fw);
+
+ drm_info(drm_dev, "Safety event: FW fault (mask=0x%08x)\n", fault_fw);
+ }
+
+ if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN) {
+ /*
+ * The watchdog timer is disabled by the driver so this event
+ * should never be fired.
+ */
+ drm_info(drm_dev, "Safety event: Watchdog timeout\n");
+ }
+}
+
static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
{
struct pvr_device *pvr_dev = data;
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
irqreturn_t ret = IRQ_NONE;
/* We are in the threaded handler, we can keep dequeuing events until we
@@ -164,30 +232,76 @@ static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
pvr_device_process_active_queues(pvr_dev);
}
- pm_runtime_mark_last_busy(from_pvr_device(pvr_dev)->dev);
+ pm_runtime_mark_last_busy(drm_dev->dev);
ret = IRQ_HANDLED;
}
- /* Unmask FW irqs before returning, so new interrupts can be received. */
- pvr_fw_irq_enable(pvr_dev);
+ if (pvr_dev->has_safety_events) {
+ int err;
+
+ /*
+ * Ensure the GPU is powered on since some safety events (such
+ * as ECC faults) can happen outside of job submissions, which
+ * are otherwise the only time a power reference is held.
+ */
+ err = pvr_power_get(pvr_dev);
+ if (err) {
+ drm_err_ratelimited(drm_dev,
+ "%s: could not take power reference (%d)\n",
+ __func__, err);
+ return ret;
+ }
+
+ while (pvr_device_safety_irq_pending(pvr_dev)) {
+ pvr_device_safety_irq_clear(pvr_dev);
+ pvr_device_handle_safety_events(pvr_dev);
+
+ ret = IRQ_HANDLED;
+ }
+
+ pvr_power_put(pvr_dev);
+ }
+
return ret;
}
static irqreturn_t pvr_device_irq_handler(int irq, void *data)
{
struct pvr_device *pvr_dev = data;
+ bool safety_irq_pending = false;
+
+ if (pvr_dev->has_safety_events)
+ safety_irq_pending = pvr_device_safety_irq_pending(pvr_dev);
- if (!pvr_fw_irq_pending(pvr_dev))
+ if (!pvr_fw_irq_pending(pvr_dev) && !safety_irq_pending)
return IRQ_NONE; /* Spurious IRQ - ignore. */
- /* Mask the FW interrupts before waking up the thread. Will be unmasked
- * when the thread handler is done processing events.
- */
- pvr_fw_irq_disable(pvr_dev);
return IRQ_WAKE_THREAD;
}
+static void pvr_device_safety_irq_init(struct pvr_device *pvr_dev)
+{
+ u32 num_ecc_rams = 0;
+
+ /*
+ * Safety events are an optional feature of the RogueXE platform. They
+ * are only enabled if at least one of ECC memory or the watchdog timer
+ * are present in HW. While safety events can be generated by other
+ * systems, that will never happen if the above mentioned hardware is
+ * not present.
+ */
+ if (!PVR_HAS_FEATURE(pvr_dev, roguexe)) {
+ pvr_dev->has_safety_events = false;
+ return;
+ }
+
+ PVR_FEATURE_VALUE(pvr_dev, ecc_rams, &num_ecc_rams);
+
+ pvr_dev->has_safety_events =
+ num_ecc_rams > 0 || PVR_HAS_FEATURE(pvr_dev, watchdog_timer);
+}
+
/**
* pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
* @pvr_dev: Target PowerVR device.
@@ -205,17 +319,25 @@ pvr_device_irq_init(struct pvr_device *pvr_dev)
init_waitqueue_head(&pvr_dev->kccb.rtn_q);
+ pvr_device_safety_irq_init(pvr_dev);
+
pvr_dev->irq = platform_get_irq(plat_dev, 0);
if (pvr_dev->irq < 0)
return pvr_dev->irq;
/* Clear any pending events before requesting the IRQ line. */
pvr_fw_irq_clear(pvr_dev);
- pvr_fw_irq_enable(pvr_dev);
+ if (pvr_dev->has_safety_events)
+ pvr_device_safety_irq_clear(pvr_dev);
+
+ /*
+ * The ONESHOT flag ensures IRQs are masked while the thread handler is
+ * running.
+ */
return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler,
pvr_device_irq_thread_handler,
- IRQF_SHARED, "gpu", pvr_dev);
+ IRQF_SHARED | IRQF_ONESHOT, "gpu", pvr_dev);
}
/**
@@ -509,6 +631,11 @@ pvr_device_init(struct pvr_device *pvr_dev)
if (err)
return err;
+ /* Get the reset line for the GPU */
+ err = pvr_device_reset_init(pvr_dev);
+ if (err)
+ return err;
+
/* Explicitly power the GPU so we can access control registers before the FW is booted. */
err = pm_runtime_resume_and_get(dev);
if (err)
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index 6d0dfacb677b..7cb01c38d2a9 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -18,6 +18,7 @@
#include <linux/bits.h>
#include <linux/compiler_attributes.h>
#include <linux/compiler_types.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -131,6 +132,22 @@ struct pvr_device {
*/
struct clk *mem_clk;
+ struct pvr_device_power {
+ struct device **domain_devs;
+ struct device_link **domain_links;
+
+ u32 domain_count;
+ } power;
+
+ /**
+ * @reset: Optional reset line.
+ *
+ * This may be used on some platforms to provide a reset line that needs to be de-asserted
+ * after power-up procedure. It would also need to be asserted after the power-down
+ * procedure.
+ */
+ struct reset_control *reset;
+
/** @irq: IRQ number. */
int irq;
@@ -300,6 +317,9 @@ struct pvr_device {
* struct pvr_file.
*/
spinlock_t ctx_list_lock;
+
+ /** @has_safety_events: Whether this device can raise safety events. */
+ bool has_safety_events;
};
/**
@@ -728,8 +748,22 @@ pvr_ioctl_union_padding_check(void *instance, size_t union_offset,
__union_size, __member_size); \
})
-#define PVR_FW_PROCESSOR_TYPE_META 0
-#define PVR_FW_PROCESSOR_TYPE_MIPS 1
-#define PVR_FW_PROCESSOR_TYPE_RISCV 2
+/*
+ * These utility functions should more properly be placed in pvr_fw.h, but that
+ * would cause a dependency cycle between that header and this one. Since
+ * they're primarily used in pvr_device.c, let's put them in here for now.
+ */
+
+static __always_inline bool
+pvr_fw_irq_pending(struct pvr_device *pvr_dev)
+{
+ return pvr_dev->fw_dev.defs->irq_pending(pvr_dev);
+}
+
+static __always_inline void
+pvr_fw_irq_clear(struct pvr_device *pvr_dev)
+{
+ pvr_dev->fw_dev.defs->irq_clear(pvr_dev);
+}
#endif /* PVR_DEVICE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
index 0639502137b4..b058ec183bb3 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.c
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -44,6 +44,7 @@
* This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies:
*
* * AXE-1-16M (found in Texas Instruments AM62)
+ * * BXS-4-64 MC1 (found in Texas Instruments J721S2/AM68)
*/
/**
@@ -1411,6 +1412,10 @@ pvr_probe(struct platform_device *plat_dev)
platform_set_drvdata(plat_dev, drm_dev);
+ err = pvr_power_domains_init(pvr_dev);
+ if (err)
+ return err;
+
init_rwsem(&pvr_dev->reset_sem);
pvr_context_device_init(pvr_dev);
@@ -1450,6 +1455,8 @@ err_watchdog_fini:
err_context_fini:
pvr_context_device_fini(pvr_dev);
+ pvr_power_domains_fini(pvr_dev);
+
return err;
}
@@ -1470,9 +1477,17 @@ static void pvr_remove(struct platform_device *plat_dev)
pvr_watchdog_fini(pvr_dev);
pvr_queue_device_fini(pvr_dev);
pvr_context_device_fini(pvr_dev);
+ pvr_power_domains_fini(pvr_dev);
}
static const struct of_device_id dt_match[] = {
+ { .compatible = "img,img-rogue", .data = NULL },
+
+ /*
+ * This legacy compatible string was introduced early on before the more generic
+ * "img,img-rogue" was added. Keep it around here for compatibility, but never use
+ * "img,img-axe" in new devicetrees.
+ */
{ .compatible = "img,img-axe", .data = NULL },
{}
};
@@ -1498,3 +1513,4 @@ MODULE_DESCRIPTION(PVR_DRIVER_DESC);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_IMPORT_NS("DMA_BUF");
MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
+MODULE_FIRMWARE("powervr/rogue_36.53.104.796_v1.fw");
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c
index 5e51bc980751..5228e214491c 100644
--- a/drivers/gpu/drm/imagination/pvr_free_list.c
+++ b/drivers/gpu/drm/imagination/pvr_free_list.c
@@ -237,11 +237,10 @@ pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list,
dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
u64 dma_pfn = dma_addr >>
ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
- u32 dma_addr_offset;
BUILD_BUG_ON(ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE > PAGE_SIZE);
- for (dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
+ for (u32 dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
dma_addr_offset += ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE) {
WARN_ON_ONCE(dma_pfn >> 32);
diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c
index 3debc9870a82..b2f8cba77346 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.c
+++ b/drivers/gpu/drm/imagination/pvr_fw.c
@@ -50,9 +50,8 @@ pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id)
{
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
- u32 entry;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (u32 entry = 0; entry < num_layout_entries; entry++) {
if (layout_entries[entry].id == id)
return &layout_entries[entry];
}
@@ -65,9 +64,8 @@ pvr_fw_find_private_data(struct pvr_device *pvr_dev)
{
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
- u32 entry;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (u32 entry = 0; entry < num_layout_entries; entry++) {
if (layout_entries[entry].id == META_PRIVATE_DATA ||
layout_entries[entry].id == MIPS_PRIVATE_DATA ||
layout_entries[entry].id == RISCV_PRIVATE_DATA)
@@ -97,7 +95,6 @@ pvr_fw_validate(struct pvr_device *pvr_dev)
const u8 *fw = firmware->data;
u32 fw_offset = firmware->size - SZ_4K;
u32 layout_table_size;
- u32 entry;
if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE))
return -EINVAL;
@@ -144,7 +141,7 @@ pvr_fw_validate(struct pvr_device *pvr_dev)
return -EINVAL;
layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset];
- for (entry = 0; entry < header->layout_entry_num; entry++) {
+ for (u32 entry = 0; entry < header->layout_entry_num; entry++) {
u32 start_addr = layout_entries[entry].base_addr;
u32 end_addr = start_addr + layout_entries[entry].alloc_size;
@@ -233,13 +230,12 @@ pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
u32 end_addr = addr + size;
- int entry = 0;
/* Ensure requested range is not zero, and size is not causing addr to overflow. */
if (end_addr <= addr)
return -EINVAL;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (int entry = 0; entry < num_layout_entries; entry++) {
u32 entry_start_addr = layout_entries[entry].base_addr;
u32 entry_end_addr = entry_start_addr + layout_entries[entry].alloc_size;
@@ -441,6 +437,9 @@ fw_runtime_cfg_init(void *cpu_ptr, void *priv)
runtime_cfg->active_pm_latency_persistant = true;
WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_clusters,
&runtime_cfg->default_dusts_num_init) != 0);
+
+ /* Keep watchdog timer disabled. */
+ runtime_cfg->wdg_period_us = 0;
}
static void
@@ -663,7 +662,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
return PTR_ERR(fw_code_ptr);
}
- if (pvr_dev->fw_dev.defs->has_fixed_data_addr()) {
+ if (pvr_dev->fw_dev.defs->has_fixed_data_addr) {
u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask;
fw_data_ptr =
@@ -732,7 +731,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
fw_mem->core_data, fw_mem->core_code_alloc_size);
if (err)
- goto err_free_fw_core_data_obj;
+ goto err_free_kdata;
memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size);
memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size);
@@ -742,10 +741,14 @@ pvr_fw_process(struct pvr_device *pvr_dev)
memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size);
/* We're finished with the firmware section memory on the CPU, unmap. */
- if (fw_core_data_ptr)
+ if (fw_core_data_ptr) {
pvr_fw_object_vunmap(fw_mem->core_data_obj);
- if (fw_core_code_ptr)
+ fw_core_data_ptr = NULL;
+ }
+ if (fw_core_code_ptr) {
pvr_fw_object_vunmap(fw_mem->core_code_obj);
+ fw_core_code_ptr = NULL;
+ }
pvr_fw_object_vunmap(fw_mem->data_obj);
fw_data_ptr = NULL;
pvr_fw_object_vunmap(fw_mem->code_obj);
@@ -753,7 +756,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
err = pvr_fw_create_fwif_connection_ctl(pvr_dev);
if (err)
- goto err_free_fw_core_data_obj;
+ goto err_free_kdata;
return 0;
@@ -763,13 +766,16 @@ err_free_kdata:
kfree(fw_mem->data);
kfree(fw_mem->code);
-err_free_fw_core_data_obj:
if (fw_core_data_ptr)
- pvr_fw_object_unmap_and_destroy(fw_mem->core_data_obj);
+ pvr_fw_object_vunmap(fw_mem->core_data_obj);
+ if (fw_mem->core_data_obj)
+ pvr_fw_object_destroy(fw_mem->core_data_obj);
err_free_fw_core_code_obj:
if (fw_core_code_ptr)
- pvr_fw_object_unmap_and_destroy(fw_mem->core_code_obj);
+ pvr_fw_object_vunmap(fw_mem->core_code_obj);
+ if (fw_mem->core_code_obj)
+ pvr_fw_object_destroy(fw_mem->core_code_obj);
err_free_fw_data_obj:
if (fw_data_ptr)
@@ -836,6 +842,12 @@ pvr_fw_cleanup(struct pvr_device *pvr_dev)
struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
pvr_fw_fini_fwif_connection_ctl(pvr_dev);
+
+ kfree(fw_mem->core_data);
+ kfree(fw_mem->core_code);
+ kfree(fw_mem->data);
+ kfree(fw_mem->code);
+
if (fw_mem->core_code_obj)
pvr_fw_object_destroy(fw_mem->core_code_obj);
if (fw_mem->core_data_obj)
@@ -926,18 +938,22 @@ pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev)
int
pvr_fw_init(struct pvr_device *pvr_dev)
{
+ static const struct pvr_fw_defs *fw_defs[PVR_FW_PROCESSOR_TYPE_COUNT] = {
+ [PVR_FW_PROCESSOR_TYPE_META] = &pvr_fw_defs_meta,
+ [PVR_FW_PROCESSOR_TYPE_MIPS] = &pvr_fw_defs_mips,
+ [PVR_FW_PROCESSOR_TYPE_RISCV] = &pvr_fw_defs_riscv,
+ };
+
u32 kccb_size_log2 = ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
u32 kccb_rtn_size = (1 << kccb_size_log2) * sizeof(*pvr_dev->kccb.rtn);
struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
int err;
- if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_META)
- fw_dev->defs = &pvr_fw_defs_meta;
- else if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_MIPS)
- fw_dev->defs = &pvr_fw_defs_mips;
- else
+ if (fw_dev->processor_type >= PVR_FW_PROCESSOR_TYPE_COUNT)
return -EINVAL;
+ fw_dev->defs = fw_defs[fw_dev->processor_type];
+
err = fw_dev->defs->init(pvr_dev);
if (err)
return err;
@@ -1443,6 +1459,15 @@ void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset,
*fw_addr_out = pvr_dev->fw_dev.defs->get_fw_addr_with_offset(fw_obj, offset);
}
+u64
+pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ return fw_dev->fw_heap_info.gpu_addr + fw_obj->fw_addr_offset;
+}
+
/*
* pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW
* structures
diff --git a/drivers/gpu/drm/imagination/pvr_fw.h b/drivers/gpu/drm/imagination/pvr_fw.h
index b7966bd574a9..1404dd492d7c 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.h
+++ b/drivers/gpu/drm/imagination/pvr_fw.h
@@ -167,47 +167,30 @@ struct pvr_fw_defs {
int (*wrapper_init)(struct pvr_device *pvr_dev);
/**
- * @has_fixed_data_addr:
+ * @irq_pending: Check interrupt status register for pending interrupts.
*
- * Called to check if firmware fixed data must be loaded at the address given by the
- * firmware layout table.
+ * @pvr_dev: Target PowerVR device.
*
* This function is mandatory.
+ */
+ bool (*irq_pending)(struct pvr_device *pvr_dev);
+
+ /**
+ * @irq_clear: Clear pending interrupts.
*
- * Returns:
- * * %true if firmware fixed data must be loaded at the address given by the firmware
- * layout table.
- * * %false otherwise.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function is mandatory.
*/
- bool (*has_fixed_data_addr)(void);
+ void (*irq_clear)(struct pvr_device *pvr_dev);
/**
- * @irq: FW Interrupt information.
+ * @has_fixed_data_addr: Specify whether the firmware fixed data must be loaded at the
+ * address given by the firmware layout table.
*
- * Those are processor dependent, and should be initialized by the
- * processor backend in pvr_fw_funcs::init().
+ * This value is mandatory.
*/
- struct {
- /** @enable_reg: FW interrupt enable register. */
- u32 enable_reg;
-
- /** @status_reg: FW interrupt status register. */
- u32 status_reg;
-
- /**
- * @clear_reg: FW interrupt clear register.
- *
- * If @status_reg == @clear_reg, we clear by write a bit to zero,
- * otherwise we clear by writing a bit to one.
- */
- u32 clear_reg;
-
- /** @event_mask: Bitmask of events to listen for. */
- u32 event_mask;
-
- /** @clear_mask: Value to write to the clear_reg in order to clear FW IRQs. */
- u32 clear_mask;
- } irq;
+ bool has_fixed_data_addr;
};
/**
@@ -400,26 +383,16 @@ struct pvr_fw_device {
} fw_objs;
};
-#define pvr_fw_irq_read_reg(pvr_dev, name) \
- pvr_cr_read32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg)
-
-#define pvr_fw_irq_write_reg(pvr_dev, name, value) \
- pvr_cr_write32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg, value)
-
-#define pvr_fw_irq_pending(pvr_dev) \
- (pvr_fw_irq_read_reg(pvr_dev, status) & (pvr_dev)->fw_dev.defs->irq.event_mask)
-
-#define pvr_fw_irq_clear(pvr_dev) \
- pvr_fw_irq_write_reg(pvr_dev, clear, (pvr_dev)->fw_dev.defs->irq.clear_mask)
-
-#define pvr_fw_irq_enable(pvr_dev) \
- pvr_fw_irq_write_reg(pvr_dev, enable, (pvr_dev)->fw_dev.defs->irq.event_mask)
-
-#define pvr_fw_irq_disable(pvr_dev) \
- pvr_fw_irq_write_reg(pvr_dev, enable, 0)
+enum pvr_fw_processor_type {
+ PVR_FW_PROCESSOR_TYPE_META = 0,
+ PVR_FW_PROCESSOR_TYPE_MIPS,
+ PVR_FW_PROCESSOR_TYPE_RISCV,
+ PVR_FW_PROCESSOR_TYPE_COUNT,
+};
extern const struct pvr_fw_defs pvr_fw_defs_meta;
extern const struct pvr_fw_defs pvr_fw_defs_mips;
+extern const struct pvr_fw_defs pvr_fw_defs_riscv;
int pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev);
int pvr_fw_init(struct pvr_device *pvr_dev);
@@ -506,4 +479,18 @@ pvr_fw_object_get_fw_addr(struct pvr_fw_object *fw_obj, u32 *fw_addr_out)
pvr_fw_object_get_fw_addr_offset(fw_obj, 0, fw_addr_out);
}
+u64
+pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj);
+
+static __always_inline size_t
+pvr_fw_obj_get_object_size(struct pvr_fw_object *fw_obj)
+{
+ return pvr_gem_object_size(fw_obj->gem);
+}
+
+/* Util functions defined in pvr_fw_util.c. These are intended for use in pvr_fw_<arch>.c files. */
+int
+pvr_fw_process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
+ u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr);
+
#endif /* PVR_FW_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
index 6d13864851fc..60db3668ad3c 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -370,13 +370,12 @@ configure_seg_mmu(struct pvr_device *pvr_dev, u32 **boot_conf_ptr)
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
u64 seg_out_addr_top;
- u32 i;
seg_out_addr_top =
ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV,
ROGUE_FW_SEGMMU_META_BIFDM_ID);
- for (i = 0; i < num_layout_entries; i++) {
+ for (u32 i = 0; i < num_layout_entries; i++) {
/*
* FW code is using the bootloader segment which is already
* configured on boot. FW coremem code and data don't use the
@@ -534,9 +533,17 @@ pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
}
static bool
-pvr_meta_has_fixed_data_addr(void)
+pvr_meta_irq_pending(struct pvr_device *pvr_dev)
{
- return false;
+ return pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVIRQSTATUS) &
+ ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN;
+}
+
+static void
+pvr_meta_irq_clear(struct pvr_device *pvr_dev)
+{
+ pvr_cr_write32(pvr_dev, ROGUE_CR_META_SP_MSLVIRQSTATUS,
+ ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK);
}
const struct pvr_fw_defs pvr_fw_defs_meta = {
@@ -546,12 +553,7 @@ const struct pvr_fw_defs pvr_fw_defs_meta = {
.vm_unmap = pvr_meta_vm_unmap,
.get_fw_addr_with_offset = pvr_meta_get_fw_addr_with_offset,
.wrapper_init = pvr_meta_wrapper_init,
- .has_fixed_data_addr = pvr_meta_has_fixed_data_addr,
- .irq = {
- .enable_reg = ROGUE_CR_META_SP_MSLVIRQENABLE,
- .status_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
- .clear_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
- .event_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN,
- .clear_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK,
- },
+ .irq_pending = pvr_meta_irq_pending,
+ .irq_clear = pvr_meta_irq_clear,
+ .has_fixed_data_addr = false,
};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.c b/drivers/gpu/drm/imagination/pvr_fw_mips.c
index 0bed0257e2ab..6914fc46db50 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_mips.c
@@ -8,7 +8,6 @@
#include "pvr_rogue_mips.h"
#include "pvr_vm_mips.h"
-#include <linux/elf.h>
#include <linux/err.h>
#include <linux/types.h>
@@ -16,60 +15,6 @@
#define ROGUE_FW_HEAP_MIPS_SHIFT 24 /* 16 MB */
#define ROGUE_FW_HEAP_MIPS_RESERVED_SIZE SZ_1M
-/**
- * process_elf_command_stream() - Process ELF firmware image and populate
- * firmware sections
- * @pvr_dev: Device pointer.
- * @fw: Pointer to firmware image.
- * @fw_code_ptr: Pointer to FW code section.
- * @fw_data_ptr: Pointer to FW data section.
- * @fw_core_code_ptr: Pointer to FW coremem code section.
- * @fw_core_data_ptr: Pointer to FW coremem data section.
- *
- * Returns :
- * * 0 on success, or
- * * -EINVAL on any error in ELF command stream.
- */
-static int
-process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
- u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
-{
- struct elf32_hdr *header = (struct elf32_hdr *)fw;
- struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
- struct drm_device *drm_dev = from_pvr_device(pvr_dev);
- u32 entry;
- int err;
-
- for (entry = 0; entry < header->e_phnum; entry++, program_header++) {
- void *write_addr;
-
- /* Only consider loadable entries in the ELF segment table */
- if (program_header->p_type != PT_LOAD)
- continue;
-
- err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr,
- program_header->p_memsz, fw_code_ptr, fw_data_ptr,
- fw_core_code_ptr, fw_core_data_ptr, &write_addr);
- if (err) {
- drm_err(drm_dev,
- "Addr 0x%x (size: %d) not found in any firmware segment",
- program_header->p_vaddr, program_header->p_memsz);
- return err;
- }
-
- /* Write to FW allocation only if available */
- if (write_addr) {
- memcpy(write_addr, fw + program_header->p_offset,
- program_header->p_filesz);
-
- memset((u8 *)write_addr + program_header->p_filesz, 0,
- program_header->p_memsz - program_header->p_filesz);
- }
- }
-
- return 0;
-}
-
static int
pvr_mips_init(struct pvr_device *pvr_dev)
{
@@ -97,11 +42,10 @@ pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
const struct pvr_fw_layout_entry *stack_entry;
struct rogue_mipsfw_boot_data *boot_data;
dma_addr_t dma_addr;
- u32 page_nr;
int err;
- err = process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
- fw_core_data_ptr);
+ err = pvr_fw_process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr);
if (err)
return err;
@@ -132,7 +76,7 @@ pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
boot_data->reg_base = pvr_dev->regs_resource->start;
- for (page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) {
+ for (u32 page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) {
/* Firmware expects 4k pages, but host page size might be different. */
u32 src_page_nr = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) >> PAGE_SHIFT;
u32 page_offset = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) & ~PAGE_MASK;
@@ -228,9 +172,17 @@ pvr_mips_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
}
static bool
-pvr_mips_has_fixed_data_addr(void)
+pvr_mips_irq_pending(struct pvr_device *pvr_dev)
+{
+ return pvr_cr_read32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS) &
+ ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN;
+}
+
+static void
+pvr_mips_irq_clear(struct pvr_device *pvr_dev)
{
- return true;
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR,
+ ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN);
}
const struct pvr_fw_defs pvr_fw_defs_mips = {
@@ -241,12 +193,7 @@ const struct pvr_fw_defs pvr_fw_defs_mips = {
.vm_unmap = pvr_vm_mips_unmap,
.get_fw_addr_with_offset = pvr_mips_get_fw_addr_with_offset,
.wrapper_init = pvr_mips_wrapper_init,
- .has_fixed_data_addr = pvr_mips_has_fixed_data_addr,
- .irq = {
- .enable_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE,
- .status_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS,
- .clear_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR,
- .event_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN,
- .clear_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN,
- },
+ .irq_pending = pvr_mips_irq_pending,
+ .irq_clear = pvr_mips_irq_clear,
+ .has_fixed_data_addr = true,
};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_riscv.c b/drivers/gpu/drm/imagination/pvr_fw_riscv.c
new file mode 100644
index 000000000000..fc13d483be9a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_riscv.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2024 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_info.h"
+#include "pvr_fw_mips.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_cr_defs.h"
+#include "pvr_rogue_riscv.h"
+#include "pvr_vm.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define ROGUE_FW_HEAP_RISCV_SHIFT 25 /* 32 MB */
+#define ROGUE_FW_HEAP_RISCV_SIZE (1u << ROGUE_FW_HEAP_RISCV_SHIFT)
+
+static int
+pvr_riscv_wrapper_init(struct pvr_device *pvr_dev)
+{
+ const u64 common_opts =
+ ((u64)(ROGUE_FW_HEAP_RISCV_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT)
+ << ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT) |
+ ((u64)MMU_CONTEXT_MAPPING_FWPRIV
+ << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT);
+
+ u64 code_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.code_obj);
+ u64 data_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.data_obj);
+
+ /* This condition allows us to OR the addresses into the register directly. */
+ static_assert(ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT ==
+ ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT);
+
+ WARN_ON(code_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK);
+ WARN_ON(data_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK);
+
+ pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_CODE),
+ code_addr | common_opts | ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN);
+
+ pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_DATA),
+ data_addr | common_opts |
+ ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN);
+
+ /* Garten IDLE bit controlled by RISC-V. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG,
+ ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+
+ return 0;
+}
+
+struct rogue_riscv_fw_boot_data {
+ u64 coremem_code_dev_vaddr;
+ u64 coremem_data_dev_vaddr;
+ u32 coremem_code_fw_addr;
+ u32 coremem_data_fw_addr;
+ u32 coremem_code_size;
+ u32 coremem_data_size;
+ u32 flags;
+ u32 reserved;
+};
+
+static int
+pvr_riscv_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
+ u32 core_code_alloc_size)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+ struct rogue_riscv_fw_boot_data *boot_data;
+ int err;
+
+ err = pvr_fw_process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr);
+ if (err)
+ goto err_out;
+
+ boot_data = (struct rogue_riscv_fw_boot_data *)fw_data_ptr;
+
+ if (fw_mem->core_code_obj) {
+ boot_data->coremem_code_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_code_obj);
+ pvr_fw_object_get_fw_addr(fw_mem->core_code_obj, &boot_data->coremem_code_fw_addr);
+ boot_data->coremem_code_size = pvr_fw_obj_get_object_size(fw_mem->core_code_obj);
+ }
+
+ if (fw_mem->core_data_obj) {
+ boot_data->coremem_data_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_data_obj);
+ pvr_fw_object_get_fw_addr(fw_mem->core_data_obj, &boot_data->coremem_data_fw_addr);
+ boot_data->coremem_data_size = pvr_fw_obj_get_object_size(fw_mem->core_data_obj);
+ }
+
+ return 0;
+
+err_out:
+ return err;
+}
+
+static int
+pvr_riscv_init(struct pvr_device *pvr_dev)
+{
+ pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_RISCV_SHIFT, 0);
+
+ return 0;
+}
+
+static u32
+pvr_riscv_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
+{
+ u32 fw_addr = fw_obj->fw_addr_offset + offset;
+
+ /* RISC-V cacheability is determined by address. */
+ if (fw_obj->gem->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
+ fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_UNCACHED_DATA);
+ else
+ fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_CACHED_DATA);
+
+ return fw_addr;
+}
+
+static int
+pvr_riscv_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ return pvr_vm_map(pvr_dev->kernel_vm_ctx, pvr_obj, 0, fw_obj->fw_mm_node.start,
+ pvr_gem_object_size(pvr_obj));
+}
+
+static void
+pvr_riscv_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
+ fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
+}
+
+static bool
+pvr_riscv_irq_pending(struct pvr_device *pvr_dev)
+{
+ return pvr_cr_read32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_STATUS) &
+ ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN;
+}
+
+static void
+pvr_riscv_irq_clear(struct pvr_device *pvr_dev)
+{
+ pvr_cr_write32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_CLEAR,
+ ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN);
+}
+
+const struct pvr_fw_defs pvr_fw_defs_riscv = {
+ .init = pvr_riscv_init,
+ .fw_process = pvr_riscv_fw_process,
+ .vm_map = pvr_riscv_vm_map,
+ .vm_unmap = pvr_riscv_vm_unmap,
+ .get_fw_addr_with_offset = pvr_riscv_get_fw_addr_with_offset,
+ .wrapper_init = pvr_riscv_wrapper_init,
+ .irq_pending = pvr_riscv_irq_pending,
+ .irq_clear = pvr_riscv_irq_clear,
+ .has_fixed_data_addr = false,
+};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_startstop.c b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
index 36cec227cfe3..dcbb9903e791 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_startstop.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
@@ -49,6 +49,14 @@ rogue_bif_init(struct pvr_device *pvr_dev)
pvr_cr_write64(pvr_dev, BIF_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV),
pc_addr);
+
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV) {
+ pc_addr = (((u64)pc_dma_addr >> ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT)
+ << ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) &
+ ~ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK;
+
+ pvr_cr_write64(pvr_dev, FWCORE_MEM_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV), pc_addr);
+ }
}
static int
@@ -114,6 +122,9 @@ pvr_fw_start(struct pvr_device *pvr_dev)
(void)pvr_cr_read32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE); /* Fence write */
}
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV)
+ pvr_cr_write32(pvr_dev, ROGUE_CR_FWCORE_BOOT, 0);
+
/* Set Rogue in soft-reset. */
pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask);
if (has_reset2)
@@ -167,6 +178,12 @@ pvr_fw_start(struct pvr_device *pvr_dev)
/* ... and afterwards. */
udelay(3);
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV) {
+ /* Boot the FW. */
+ pvr_cr_write32(pvr_dev, ROGUE_CR_FWCORE_BOOT, 1);
+ udelay(3);
+ }
+
return 0;
err_reset:
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 5dbb636d7d4f..a1098b521485 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -21,7 +21,6 @@ tracebuf_ctrl_init(void *cpu_ptr, void *priv)
{
struct rogue_fwif_tracebuf *tracebuf_ctrl = cpu_ptr;
struct pvr_fw_trace *fw_trace = priv;
- u32 thread_nr;
tracebuf_ctrl->tracebuf_size_in_dwords = ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
tracebuf_ctrl->tracebuf_flags = 0;
@@ -31,7 +30,7 @@ tracebuf_ctrl_init(void *cpu_ptr, void *priv)
else
tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct rogue_fwif_tracebuf_space *tracebuf_space =
&tracebuf_ctrl->tracebuf[thread_nr];
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
@@ -48,10 +47,9 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
- u32 thread_nr;
int err;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
trace_buffer->buf =
@@ -88,7 +86,7 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
BUILD_BUG_ON(ARRAY_SIZE(fw_trace->tracebuf_ctrl->tracebuf) !=
ARRAY_SIZE(fw_trace->buffers));
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct rogue_fwif_tracebuf_space *tracebuf_space =
&fw_trace->tracebuf_ctrl->tracebuf[thread_nr];
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
@@ -99,7 +97,7 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
return 0;
err_free_buf:
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
if (trace_buffer->buf)
@@ -112,9 +110,8 @@ err_free_buf:
void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
- u32 thread_nr;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
@@ -122,8 +119,6 @@ void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj);
}
-#if defined(CONFIG_DEBUG_FS)
-
/**
* update_logtype() - Send KCCB command to trigger FW to update logtype
* @pvr_dev: Target PowerVR device
@@ -184,9 +179,7 @@ struct pvr_fw_trace_seq_data {
static u32 find_sfid(u32 id)
{
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
+ for (u32 i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
if (stid_fmts[i].id == id)
return i;
}
@@ -285,12 +278,11 @@ static void fw_trace_get_first(struct pvr_fw_trace_seq_data *trace_seq_data)
static void *fw_trace_seq_start(struct seq_file *s, loff_t *pos)
{
struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
- u32 i;
/* Reset trace index, then advance to *pos. */
fw_trace_get_first(trace_seq_data);
- for (i = 0; i < *pos; i++) {
+ for (u32 i = 0; i < *pos; i++) {
if (!fw_trace_get_next(trace_seq_data))
return NULL;
}
@@ -447,7 +439,7 @@ static const struct file_operations pvr_fw_trace_fops = {
void
pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask, u32 new_mask)
{
- if (old_mask != new_mask)
+ if (IS_ENABLED(CONFIG_DEBUG_FS) && old_mask != new_mask)
update_logtype(pvr_dev, new_mask);
}
@@ -455,12 +447,14 @@ void
pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
- u32 thread_nr;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10,
"The filename buffer is only large enough for a single-digit thread count");
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
char filename[8];
snprintf(filename, ARRAY_SIZE(filename), "trace_%u", thread_nr);
@@ -469,4 +463,3 @@ pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
&pvr_fw_trace_fops);
}
}
-#endif
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.h b/drivers/gpu/drm/imagination/pvr_fw_trace.h
index 0074d2b18da0..1d0ef937427a 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.h
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.h
@@ -65,7 +65,6 @@ struct pvr_fw_trace {
int pvr_fw_trace_init(struct pvr_device *pvr_dev);
void pvr_fw_trace_fini(struct pvr_device *pvr_dev);
-#if defined(CONFIG_DEBUG_FS)
/* Forward declaration from <linux/dcache.h>. */
struct dentry;
@@ -73,6 +72,5 @@ void pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask,
u32 new_mask);
void pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir);
-#endif /* defined(CONFIG_DEBUG_FS) */
#endif /* PVR_FW_TRACE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_util.c b/drivers/gpu/drm/imagination/pvr_fw_util.c
new file mode 100644
index 000000000000..377fe72d86b8
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_util.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2024 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+#include <linux/elf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+/**
+ * pvr_fw_process_elf_command_stream() - Process ELF firmware image and populate
+ * firmware sections
+ * @pvr_dev: Device pointer.
+ * @fw: Pointer to firmware image.
+ * @fw_code_ptr: Pointer to FW code section.
+ * @fw_data_ptr: Pointer to FW data section.
+ * @fw_core_code_ptr: Pointer to FW coremem code section.
+ * @fw_core_data_ptr: Pointer to FW coremem data section.
+ *
+ * Returns :
+ * * 0 on success, or
+ * * -EINVAL on any error in ELF command stream.
+ */
+int
+pvr_fw_process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr,
+ u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
+{
+ struct elf32_hdr *header = (struct elf32_hdr *)fw;
+ struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ int err;
+
+ for (u32 entry = 0; entry < header->e_phnum; entry++, program_header++) {
+ void *write_addr;
+
+ /* Only consider loadable entries in the ELF segment table */
+ if (program_header->p_type != PT_LOAD)
+ continue;
+
+ err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr,
+ program_header->p_memsz, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr, &write_addr);
+ if (err) {
+ drm_err(drm_dev,
+ "Addr 0x%x (size: %d) not found in any firmware segment",
+ program_header->p_vaddr, program_header->p_memsz);
+ return err;
+ }
+
+ /* Write to FW allocation only if available */
+ if (write_addr) {
+ memcpy(write_addr, fw + program_header->p_offset,
+ program_header->p_filesz);
+
+ memset((u8 *)write_addr + program_header->p_filesz, 0,
+ program_header->p_memsz - program_header->p_filesz);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c
index 6a8c81fe8c1e..a66cf082af24 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.c
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -19,6 +19,7 @@
#include <linux/log2.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
+#include <linux/property.h>
#include <linux/refcount.h>
#include <linux/scatterlist.h>
@@ -76,8 +77,6 @@ pvr_gem_object_flags_validate(u64 flags)
DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS),
};
- int i;
-
/*
* Check for bits set in undefined regions. Reserved regions refer to
* options that can only be set by the kernel. These are explicitly
@@ -91,7 +90,7 @@ pvr_gem_object_flags_validate(u64 flags)
* Check for all combinations of flags marked as invalid in the array
* above.
*/
- for (i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
+ for (int i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
u64 combo = invalid_combinations[i];
if ((flags & combo) == combo)
@@ -203,7 +202,7 @@ pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj)
dma_resv_lock(obj->resv, NULL);
- err = drm_gem_shmem_vmap(shmem_obj, &map);
+ err = drm_gem_shmem_vmap_locked(shmem_obj, &map);
if (err)
goto err_unlock;
@@ -257,7 +256,7 @@ pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj)
dma_sync_sgtable_for_device(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
}
- drm_gem_shmem_vunmap(shmem_obj, &map);
+ drm_gem_shmem_vunmap_locked(shmem_obj, &map);
dma_resv_unlock(obj->resv);
}
@@ -336,6 +335,7 @@ struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t
struct pvr_gem_object *
pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
struct drm_gem_shmem_object *shmem_obj;
struct pvr_gem_object *pvr_obj;
struct sg_table *sgt;
@@ -345,7 +345,10 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
if (size == 0 || !pvr_gem_object_flags_validate(flags))
return ERR_PTR(-EINVAL);
- shmem_obj = drm_gem_shmem_create(from_pvr_device(pvr_dev), size);
+ if (device_get_dma_attr(drm_dev->dev) == DEV_DMA_COHERENT)
+ flags |= PVR_BO_CPU_CACHED;
+
+ shmem_obj = drm_gem_shmem_create(drm_dev, size);
if (IS_ERR(shmem_obj))
return ERR_CAST(shmem_obj);
@@ -360,8 +363,7 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
goto err_shmem_object_free;
}
- dma_sync_sgtable_for_device(shmem_obj->base.dev->dev, sgt,
- DMA_BIDIRECTIONAL);
+ dma_sync_sgtable_for_device(drm_dev->dev, sgt, DMA_BIDIRECTIONAL);
/*
* Do this last because pvr_gem_object_zero() requires a fully
diff --git a/drivers/gpu/drm/imagination/pvr_gem.h b/drivers/gpu/drm/imagination/pvr_gem.h
index e0e5ea509a2e..c99f30cc6208 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.h
+++ b/drivers/gpu/drm/imagination/pvr_gem.h
@@ -44,8 +44,10 @@ struct pvr_file;
* Bits not defined anywhere are "undefined".
*
* CPU mapping options
- * :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set this
- * flag to override this behaviour and map the object cached.
+ * :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set
+ * this flag to override this behaviour and map the object cached. If the dma_coherent
+ * property is present in devicetree, all allocations will be mapped as if this flag was set.
+ * This does not require any additional consideration at allocation time.
*
* Firmware options
* :PVR_BO_FW_NO_CLEAR_ON_RESET: By default, all FW objects are cleared and reinitialised on hard
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.c b/drivers/gpu/drm/imagination/pvr_hwrt.c
index 54f88d6c01e5..dc0c25fa1847 100644
--- a/drivers/gpu/drm/imagination/pvr_hwrt.c
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.c
@@ -44,13 +44,12 @@ hwrt_init_kernel_structure(struct pvr_file *pvr_file,
{
struct pvr_device *pvr_dev = pvr_file->pvr_dev;
int err;
- int i;
hwrt->pvr_dev = pvr_dev;
hwrt->max_rts = args->layers;
/* Get pointers to the free lists */
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
hwrt->free_lists[i] = pvr_free_list_lookup(pvr_file, args->free_list_handles[i]);
if (!hwrt->free_lists[i]) {
err = -EINVAL;
@@ -67,7 +66,7 @@ hwrt_init_kernel_structure(struct pvr_file *pvr_file,
return 0;
err_put_free_lists:
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
pvr_free_list_put(hwrt->free_lists[i]);
hwrt->free_lists[i] = NULL;
}
@@ -78,9 +77,7 @@ err_put_free_lists:
static void
hwrt_fini_kernel_structure(struct pvr_hwrt_dataset *hwrt)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
pvr_free_list_put(hwrt->free_lists[i]);
hwrt->free_lists[i] = NULL;
}
@@ -363,13 +360,12 @@ hwrt_data_init_fw_structure(struct pvr_file *pvr_file,
struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args;
struct pvr_device *pvr_dev = pvr_file->pvr_dev;
struct rogue_fwif_rta_ctl *rta_ctl;
- int free_list_i;
int err;
pvr_fw_object_get_fw_addr(hwrt->common_fw_obj,
&hwrt_data->data.hwrt_data_common_fw_addr);
- for (free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) {
+ for (int free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) {
pvr_fw_object_get_fw_addr(hwrt->free_lists[free_list_i]->fw_obj,
&hwrt_data->data.freelists_fw_addr[free_list_i]);
}
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
index 1cdb3cfd058d..59b334d094fa 100644
--- a/drivers/gpu/drm/imagination/pvr_job.c
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -671,6 +671,13 @@ pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count)
geom_job->paired_job = frag_job;
frag_job->paired_job = geom_job;
+ /* The geometry job pvr_job structure is used when the fragment
+ * job is being prepared by the GPU scheduler. Have the fragment
+ * job hold a reference on the geometry job to prevent it being
+ * freed until the fragment job has finished with it.
+ */
+ pvr_job_get(geom_job);
+
/* Skip the fragment job we just paired to the geometry job. */
i++;
}
diff --git a/drivers/gpu/drm/imagination/pvr_mmu.c b/drivers/gpu/drm/imagination/pvr_mmu.c
index 4fe70610ed94..450d476d183f 100644
--- a/drivers/gpu/drm/imagination/pvr_mmu.c
+++ b/drivers/gpu/drm/imagination/pvr_mmu.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/kmemleak.h>
#include <linux/minmax.h>
+#include <linux/property.h>
#include <linux/sizes.h>
#define PVR_SHIFT_FROM_SIZE(size_) (__builtin_ctzll(size_))
@@ -259,6 +260,7 @@ pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page,
struct device *dev = from_pvr_device(pvr_dev)->dev;
struct page *raw_page;
+ pgprot_t prot;
int err;
dma_addr_t dma_addr;
@@ -268,7 +270,11 @@ pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page,
if (!raw_page)
return -ENOMEM;
- host_ptr = vmap(&raw_page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ prot = PAGE_KERNEL;
+ if (device_get_dma_attr(dev) != DEV_DMA_COHERENT)
+ prot = pgprot_writecombine(prot);
+
+ host_ptr = vmap(&raw_page, 1, VM_MAP, prot);
if (!host_ptr) {
err = -ENOMEM;
goto err_free_page;
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
index ba7816fd28ec..41f5d89e78b8 100644
--- a/drivers/gpu/drm/imagination/pvr_power.c
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -10,11 +10,15 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -252,6 +256,8 @@ pvr_power_device_suspend(struct device *dev)
clk_disable_unprepare(pvr_dev->sys_clk);
clk_disable_unprepare(pvr_dev->core_clk);
+ err = reset_control_assert(pvr_dev->reset);
+
err_drm_dev_exit:
drm_dev_exit(idx);
@@ -282,16 +288,33 @@ pvr_power_device_resume(struct device *dev)
if (err)
goto err_sys_clk_disable;
+ /*
+ * According to the hardware manual, a delay of at least 32 clock
+ * cycles is required between de-asserting the clkgen reset and
+ * de-asserting the GPU reset. Assuming a worst-case scenario with
+ * a very high GPU clock frequency, a delay of 1 microsecond is
+ * sufficient to ensure this requirement is met across all
+ * feasible GPU clock speeds.
+ */
+ udelay(1);
+
+ err = reset_control_deassert(pvr_dev->reset);
+ if (err)
+ goto err_mem_clk_disable;
+
if (pvr_dev->fw_dev.booted) {
err = pvr_power_fw_enable(pvr_dev);
if (err)
- goto err_mem_clk_disable;
+ goto err_reset_assert;
}
drm_dev_exit(idx);
return 0;
+err_reset_assert:
+ reset_control_assert(pvr_dev->reset);
+
err_mem_clk_disable:
clk_disable_unprepare(pvr_dev->mem_clk);
@@ -431,3 +454,114 @@ pvr_watchdog_fini(struct pvr_device *pvr_dev)
{
cancel_delayed_work_sync(&pvr_dev->watchdog.work);
}
+
+int pvr_power_domains_init(struct pvr_device *pvr_dev)
+{
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+
+ struct device_link **domain_links __free(kfree) = NULL;
+ struct device **domain_devs __free(kfree) = NULL;
+ int domain_count;
+ int link_count;
+
+ char dev_name[2] = "a";
+ int err;
+ int i;
+
+ domain_count = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ if (domain_count < 0)
+ return domain_count;
+
+ if (domain_count <= 1)
+ return 0;
+
+ link_count = domain_count + (domain_count - 1);
+
+ domain_devs = kcalloc(domain_count, sizeof(*domain_devs), GFP_KERNEL);
+ if (!domain_devs)
+ return -ENOMEM;
+
+ domain_links = kcalloc(link_count, sizeof(*domain_links), GFP_KERNEL);
+ if (!domain_links)
+ return -ENOMEM;
+
+ for (i = 0; i < domain_count; i++) {
+ struct device *domain_dev;
+
+ dev_name[0] = 'a' + i;
+ domain_dev = dev_pm_domain_attach_by_name(dev, dev_name);
+ if (IS_ERR_OR_NULL(domain_dev)) {
+ err = domain_dev ? PTR_ERR(domain_dev) : -ENODEV;
+ goto err_detach;
+ }
+
+ domain_devs[i] = domain_dev;
+ }
+
+ for (i = 0; i < domain_count; i++) {
+ struct device_link *link;
+
+ link = device_link_add(dev, domain_devs[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
+ if (!link) {
+ err = -ENODEV;
+ goto err_unlink;
+ }
+
+ domain_links[i] = link;
+ }
+
+ for (i = domain_count; i < link_count; i++) {
+ struct device_link *link;
+
+ link = device_link_add(domain_devs[i - domain_count + 1],
+ domain_devs[i - domain_count],
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
+ if (!link) {
+ err = -ENODEV;
+ goto err_unlink;
+ }
+
+ domain_links[i] = link;
+ }
+
+ pvr_dev->power = (struct pvr_device_power){
+ .domain_devs = no_free_ptr(domain_devs),
+ .domain_links = no_free_ptr(domain_links),
+ .domain_count = domain_count,
+ };
+
+ return 0;
+
+err_unlink:
+ while (--i >= 0)
+ device_link_del(domain_links[i]);
+
+ i = domain_count;
+
+err_detach:
+ while (--i >= 0)
+ dev_pm_domain_detach(domain_devs[i], true);
+
+ return err;
+}
+
+void pvr_power_domains_fini(struct pvr_device *pvr_dev)
+{
+ const int domain_count = pvr_dev->power.domain_count;
+
+ int i = domain_count + (domain_count - 1);
+
+ while (--i >= 0)
+ device_link_del(pvr_dev->power.domain_links[i]);
+
+ i = domain_count;
+
+ while (--i >= 0)
+ dev_pm_domain_detach(pvr_dev->power.domain_devs[i], true);
+
+ kfree(pvr_dev->power.domain_links);
+ kfree(pvr_dev->power.domain_devs);
+
+ pvr_dev->power = (struct pvr_device_power){ 0 };
+}
diff --git a/drivers/gpu/drm/imagination/pvr_power.h b/drivers/gpu/drm/imagination/pvr_power.h
index 9a9312dcb2da..ada85674a7ca 100644
--- a/drivers/gpu/drm/imagination/pvr_power.h
+++ b/drivers/gpu/drm/imagination/pvr_power.h
@@ -38,4 +38,7 @@ pvr_power_put(struct pvr_device *pvr_dev)
return pm_runtime_put(drm_dev->dev);
}
+int pvr_power_domains_init(struct pvr_device *pvr_dev);
+void pvr_power_domains_fini(struct pvr_device *pvr_dev);
+
#endif /* PVR_POWER_H */
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index eba69309bb6c..5e9bc0992824 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -866,6 +866,10 @@ static void pvr_queue_free_job(struct drm_sched_job *sched_job)
struct pvr_job *job = container_of(sched_job, struct pvr_job, base);
drm_sched_job_cleanup(sched_job);
+
+ if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job)
+ pvr_job_put(job->paired_job);
+
job->paired_job = NULL;
pvr_job_put(job);
}
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
index 2a90d02796d3..790c97f80a2a 100644
--- a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
+++ b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
@@ -827,6 +827,120 @@
#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU
#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_EN 0x00000001U
+/* Register ROGUE_CR_EVENT_CLEAR */
+#define ROGUE_CR_EVENT_CLEAR 0x0138U
+#define ROGUE_CR_EVENT_CLEAR__ROGUEXE__MASKFULL 0x00000000E01DFFFFULL
+#define ROGUE_CR_EVENT_CLEAR__SIGNALS__MASKFULL 0x00000000E007FFFFULL
+#define ROGUE_CR_EVENT_CLEAR_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT 31U
+#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK 0x7FFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN 0x80000000U
+#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT 30U
+#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN 0x40000000U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT 29U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN 0x20000000U
+#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_SHIFT 28U
+#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_EN 0x10000000U
+#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_SHIFT 27U
+#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_EN 0x08000000U
+#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_SHIFT 26U
+#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_CLRMSK 0xFBFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_EN 0x04000000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_SHIFT 25U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_CLRMSK 0xFDFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_EN 0x02000000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_SHIFT 24U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_CLRMSK 0xFEFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_EN 0x01000000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_SHIFT 23U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_CLRMSK 0xFF7FFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_EN 0x00800000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_SHIFT 22U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_EN 0x00400000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_SHIFT 21U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_EN 0x00200000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_SHIFT 20U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_EN 0x00100000U
+#define ROGUE_CR_EVENT_CLEAR_SAFETY_SHIFT 20U
+#define ROGUE_CR_EVENT_CLEAR_SAFETY_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_SAFETY_EN 0x00100000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_SHIFT 19U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_EN 0x00080000U
+#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_SHIFT 19U
+#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_EN 0x00080000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_SHIFT 17U
+#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_EN 0x00020000U
+#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_SHIFT 17U
+#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_EN 0x00020000U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT 16U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN 0x00010000U
+#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT 15U
+#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK 0xFFFF7FFFU
+#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_EN 0x00008000U
+#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_SHIFT 14U
+#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_CLRMSK 0xFFFFBFFFU
+#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_EN 0x00004000U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_SHIFT 13U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK 0xFFFFDFFFU
+#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_EN 0x00002000U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_SHIFT 12U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK 0xFFFFEFFFU
+#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_EN 0x00001000U
+#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_SHIFT 11U
+#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_EN 0x00000800U
+#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT 10U
+#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_EN 0x00000400U
+#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT 9U
+#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN 0x00000200U
+#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_SHIFT 8U
+#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_EN 0x00000100U
+#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT 7U
+#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN 0x00000080U
+#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT 6U
+#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_EN 0x00000040U
+#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_SHIFT 5U
+#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_EN 0x00000020U
+#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT 4U
+#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN 0x00000010U
+#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_SHIFT 3U
+#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_EN 0x00000008U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT 2U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN 0x00000004U
+#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT 1U
+#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_EN 0x00000002U
+#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_SHIFT 0U
+#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_EN 0x00000001U
+
/* Register ROGUE_CR_TIMER */
#define ROGUE_CR_TIMER 0x0160U
#define ROGUE_CR_TIMER_MASKFULL 0x8000FFFFFFFFFFFFULL
@@ -6031,25 +6145,6 @@
#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
-/* Register ROGUE_CR_ECC_RAM_ERR_INJ */
-#define ROGUE_CR_ECC_RAM_ERR_INJ 0xF340U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MASKFULL 0x000000000000001FULL
-#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT 4U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN 0x00000010U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_SHIFT 3U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_CLRMSK 0xFFFFFFF7U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_EN 0x00000008U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT 2U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN 0x00000004U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT 1U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK 0xFFFFFFFDU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_EN 0x00000002U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_SHIFT 0U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK 0xFFFFFFFEU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_EN 0x00000001U
-
/* Register ROGUE_CR_ECC_RAM_INIT_KICK */
#define ROGUE_CR_ECC_RAM_INIT_KICK 0xF348U
#define ROGUE_CR_ECC_RAM_INIT_KICK_MASKFULL 0x000000000000001FULL
@@ -6163,6 +6258,26 @@
#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+/* Register ROGUE_CR_FAULT_FW_STATUS */
+#define ROGUE_CR_FAULT_FW_STATUS 0xF3B0U
+#define ROGUE_CR_FAULT_FW_STATUS_MASKFULL 0x0000000000010001ULL
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT 16U
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_EN 0x00010000U
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT 0U
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_EN 0x00000001U
+
+/* Register ROGUE_CR_FAULT_FW_CLEAR */
+#define ROGUE_CR_FAULT_FW_CLEAR 0xF3B8U
+#define ROGUE_CR_FAULT_FW_CLEAR_MASKFULL 0x0000000000010001ULL
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_SHIFT 16U
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_EN 0x00010000U
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_SHIFT 0U
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_EN 0x00000001U
+
/* Register ROGUE_CR_MTS_SAFETY_EVENT_ENABLE */
#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE 0xF3D8U
#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_riscv.h b/drivers/gpu/drm/imagination/pvr_rogue_riscv.h
new file mode 100644
index 000000000000..9a070e24fa6a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_riscv.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2024 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_RISCV_H
+#define PVR_ROGUE_RISCV_H
+
+#include "pvr_rogue_cr_defs.h"
+
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#define ROGUE_RISCVFW_REGION_SIZE SZ_256M
+#define ROGUE_RISCVFW_REGION_SHIFT __ffs(ROGUE_RISCVFW_REGION_SIZE)
+
+enum rogue_riscvfw_region {
+ ROGUE_RISCV_REGION__RESERVED_0 = 0,
+ ROGUE_RISCV_REGION__RESERVED_1,
+ ROGUE_RISCV_REGION_SOCIF,
+ ROGUE_RISCV_REGION__RESERVED_3,
+ ROGUE_RISCV_REGION__RESERVED_4,
+ ROGUE_RISCV_REGION_BOOTLDR_DATA,
+ ROGUE_RISCV_REGION_SHARED_CACHED_DATA,
+ ROGUE_RISCV_REGION__RESERVED_7,
+ ROGUE_RISCV_REGION_COREMEM,
+ ROGUE_RISCV_REGION__RESERVED_9,
+ ROGUE_RISCV_REGION__RESERVED_A,
+ ROGUE_RISCV_REGION__RESERVED_B,
+ ROGUE_RISCV_REGION_BOOTLDR_CODE,
+ ROGUE_RISCV_REGION_SHARED_UNCACHED_DATA,
+ ROGUE_RISCV_REGION__RESERVED_E,
+ ROGUE_RISCV_REGION__RESERVED_F,
+
+ ROGUE_RISCV_REGION__COUNT,
+};
+
+#define ROGUE_RISCVFW_REGION_BASE(r) ((u32)(ROGUE_RISCV_REGION_##r) << ROGUE_RISCVFW_REGION_SHIFT)
+#define ROGUE_RISCVFW_REGION_REMAP_CR(r) \
+ (ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 + (u32)(ROGUE_RISCV_REGION_##r) * 8U)
+
+#endif /* PVR_ROGUE_RISCV_H */
diff --git a/drivers/gpu/drm/imagination/pvr_stream.c b/drivers/gpu/drm/imagination/pvr_stream.c
index 975336a4facf..679aa618b7a9 100644
--- a/drivers/gpu/drm/imagination/pvr_stream.c
+++ b/drivers/gpu/drm/imagination/pvr_stream.c
@@ -67,9 +67,8 @@ pvr_stream_process_1(struct pvr_device *pvr_dev, const struct pvr_stream_def *st
u8 *dest, u32 dest_size, u32 *stream_offset_out)
{
int err = 0;
- u32 i;
- for (i = 0; i < nr_entries; i++) {
+ for (u32 i = 0; i < nr_entries; i++) {
if (stream_def[i].offset >= dest_size) {
err = -EINVAL;
break;
@@ -131,7 +130,6 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
u32 musthave_masks[PVR_STREAM_EXTHDR_TYPE_MAX];
u32 ext_header;
int err = 0;
- u32 i;
/* Copy "must have" mask from device. We clear this as we process the stream. */
memcpy(musthave_masks, pvr_dev->stream_musthave_quirks[cmd_defs->type],
@@ -159,7 +157,7 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
musthave_masks[type] &= ~data;
- for (i = 0; i < header->ext_streams_num; i++) {
+ for (u32 i = 0; i < header->ext_streams_num; i++) {
const struct pvr_stream_ext_def *ext_def = &header->ext_streams[i];
if (!(ext_header & ext_def->header_mask))
@@ -181,7 +179,7 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
* Verify that "must have" mask is now zero. If it isn't then one of the "must have" quirks
* for this command was not present.
*/
- for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ for (u32 i = 0; i < cmd_defs->ext_nr_headers; i++) {
if (musthave_masks[i])
return -EINVAL;
}
@@ -245,13 +243,11 @@ pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs
if (err)
return err;
} else {
- u32 i;
-
/*
* If we don't have an extension stream then there must not be any "must have"
* quirks for this command.
*/
- for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ for (u32 i = 0; i < cmd_defs->ext_nr_headers; i++) {
if (pvr_dev->stream_musthave_quirks[cmd_defs->type][i])
return -EINVAL;
}
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
index 94af854547d6..5847a1c92bea 100644
--- a/drivers/gpu/drm/imagination/pvr_vm_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -100,10 +100,9 @@ pvr_vm_mips_fini(struct pvr_device *pvr_dev)
{
struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
- int page_nr;
vunmap(mips_data->pt);
- for (page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
+ for (int page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
dma_unmap_page(from_pvr_device(pvr_dev)->dev,
mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 9e66eb77b1eb..6d8325c76697 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -162,11 +162,12 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
}
static int imx_pd_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
- return drm_bridge_attach(bridge->encoder, imxpd->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, imxpd->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 20b93fff0239..f851e9ffdb28 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -791,11 +791,12 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
}
static int ingenic_drm_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
- struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder);
+ struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(encoder);
- return drm_bridge_attach(bridge->encoder, ib->next_bridge,
+ return drm_bridge_attach(encoder, ib->next_bridge,
&ib->bridge, flags);
}
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 9bb997dbb4b9..5deec673c11e 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -47,7 +47,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ refcount_set(&bo->base.pages_use_count, 1);
mapping_set_unevictable(mapping);
}
@@ -195,7 +195,7 @@ static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
if (bo->heap_size)
return -EINVAL;
- return drm_gem_shmem_vmap(&bo->base, map);
+ return drm_gem_shmem_vmap_locked(&bo->base, map);
}
static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 825135a26aa4..7934098e651b 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
} else {
buffer_chunk->size = lima_bo_size(bo);
- ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
+ ret = drm_gem_vmap(&bo->base.base, &map);
if (ret) {
kvfree(et);
goto out;
@@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
- drm_gem_vunmap_unlocked(&bo->base.base, &map);
+ drm_gem_vunmap(&bo->base.base, &map);
}
buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig
index 552edfec7afb..d739d51cf54c 100644
--- a/drivers/gpu/drm/loongson/Kconfig
+++ b/drivers/gpu/drm/loongson/Kconfig
@@ -2,7 +2,7 @@
config DRM_LOONGSON
tristate "DRM support for Loongson Graphics"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on LOONGARCH || MIPS || COMPILE_TEST
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 395449a72f0a..a3423459dd7a 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1048,6 +1048,7 @@ void mcde_dsi_disable(struct drm_bridge *bridge)
}
static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
@@ -1059,7 +1060,7 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
}
/* Attach the DSI bridge to the output (panel etc) bridge */
- return drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags);
+ return drm_bridge_attach(encoder, d->bridge_out, bridge, flags);
}
static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = {
@@ -1137,7 +1138,6 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
d->bridge_out = bridge;
/* Create a bridge for this DSI channel */
- d->bridge.funcs = &mcde_dsi_bridge_funcs;
d->bridge.of_node = dev->of_node;
drm_bridge_add(&d->bridge);
@@ -1173,9 +1173,9 @@ static int mcde_dsi_probe(struct platform_device *pdev)
u32 dsi_id;
int ret;
- d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
- if (!d)
- return -ENOMEM;
+ d = devm_drm_bridge_alloc(dev, struct mcde_dsi, bridge, &mcde_dsi_bridge_funcs);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
d->dev = dev;
platform_set_drvdata(pdev, d);
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 32a2ed6c0cfe..43afd0a26d14 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -21,10 +21,8 @@ mediatek-drm-y := mtk_crtc.o \
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
-mediatek-drm-hdmi-objs := mtk_cec.o \
- mtk_hdmi.o \
- mtk_hdmi_ddc.o
-
-obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_cec.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi_ddc.o
obj-$(CONFIG_DRM_MEDIATEK_DP) += mtk_dp.o
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index b42c0d87eba3..c7be530ca041 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -12,7 +12,6 @@
#include <linux/platform_device.h>
#include "mtk_cec.h"
-#include "mtk_hdmi.h"
#include "mtk_drm_drv.h"
#define TR_CONFIG 0x00
@@ -102,6 +101,7 @@ void mtk_cec_set_hpd_event(struct device *dev,
cec->hpd_event = hpd_event;
spin_unlock_irqrestore(&cec->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(mtk_cec_set_hpd_event, "DRM_MTK_HDMI_V1");
bool mtk_cec_hpd_high(struct device *dev)
{
@@ -112,6 +112,7 @@ bool mtk_cec_hpd_high(struct device *dev)
return (status & (HDMI_PORD | HDMI_HTPLG)) == (HDMI_PORD | HDMI_HTPLG);
}
+EXPORT_SYMBOL_NS_GPL(mtk_cec_hpd_high, "DRM_MTK_HDMI_V1");
static void mtk_cec_htplg_irq_init(struct mtk_cec *cec)
{
@@ -247,3 +248,7 @@ struct platform_driver mtk_cec_driver = {
.of_match_table = mtk_cec_of_ids,
},
};
+module_platform_driver(mtk_cec_driver);
+
+MODULE_DESCRIPTION("MediaTek HDMI CEC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index fa0e95dd29a0..fe97bb97e004 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -492,11 +492,6 @@ static const struct of_device_id mtk_ovl_adaptor_comp_dt_ids[] = {
{ /* sentinel */ }
};
-static int compare_of(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
static int ovl_adaptor_of_get_ddp_comp_type(struct device_node *node,
enum mtk_ovl_adaptor_comp_type *ctype)
{
@@ -567,7 +562,7 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma
priv->ovl_adaptor_comp[id] = &comp_pdev->dev;
- drm_of_component_match_add(dev, match, compare_of, node);
+ drm_of_component_match_add(dev, match, component_compare_of, node);
dev_dbg(dev, "Adding component match for %pOF\n", node);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index fed3307d3374..b2408abb9d49 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2287,6 +2287,7 @@ static void mtk_dp_poweroff(struct mtk_dp *mtk_dp)
}
static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
@@ -2310,7 +2311,7 @@ static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
goto err_aux_register;
if (mtk_dp->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, mtk_dp->next_bridge,
+ ret = drm_bridge_attach(encoder, mtk_dp->next_bridge,
&mtk_dp->bridge, flags);
if (ret) {
drm_warn(mtk_dp->drm_dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 0fd13e6dd3f1..6fb85bc6487a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -59,7 +59,8 @@ enum mtk_dpi_out_channel_swap {
enum mtk_dpi_out_color_format {
MTK_DPI_COLOR_FORMAT_RGB,
- MTK_DPI_COLOR_FORMAT_YCBCR_422
+ MTK_DPI_COLOR_FORMAT_YCBCR_422,
+ MTK_DPI_COLOR_FORMAT_YCBCR_444
};
struct mtk_dpi {
@@ -450,9 +451,16 @@ static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
enum mtk_dpi_out_color_format format)
{
- mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
+ mtk_dpi_config_channel_swap(dpi, dpi->channel_swap);
- if (format == MTK_DPI_COLOR_FORMAT_YCBCR_422) {
+ switch (format) {
+ case MTK_DPI_COLOR_FORMAT_YCBCR_444:
+ mtk_dpi_config_yuv422_enable(dpi, false);
+ mtk_dpi_config_csc_enable(dpi, true);
+ if (dpi->conf->swap_input_support)
+ mtk_dpi_config_swap_input(dpi, false);
+ break;
+ case MTK_DPI_COLOR_FORMAT_YCBCR_422:
mtk_dpi_config_yuv422_enable(dpi, true);
mtk_dpi_config_csc_enable(dpi, true);
@@ -463,11 +471,14 @@ static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
mtk_dpi_mask(dpi, DPI_MATRIX_SET, dpi->mode.hdisplay <= 720 ?
MATRIX_SEL_RGB_TO_BT601 : MATRIX_SEL_RGB_TO_JPEG,
INT_MATRIX_SEL_MASK);
- } else {
+ break;
+ default:
+ case MTK_DPI_COLOR_FORMAT_RGB:
mtk_dpi_config_yuv422_enable(dpi, false);
mtk_dpi_config_csc_enable(dpi, false);
if (dpi->conf->swap_input_support)
mtk_dpi_config_swap_input(dpi, false);
+ break;
}
}
@@ -734,6 +745,65 @@ static u32 *mtk_dpi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
return input_fmts;
}
+static unsigned int mtk_dpi_bus_fmt_bit_num(unsigned int out_bus_format)
+{
+ switch (out_bus_format) {
+ default:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ return MTK_DPI_OUT_BIT_NUM_8BITS;
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ return MTK_DPI_OUT_BIT_NUM_10BITS;
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ return MTK_DPI_OUT_BIT_NUM_12BITS;
+ }
+}
+
+static unsigned int mtk_dpi_bus_fmt_channel_swap(unsigned int out_bus_format)
+{
+ switch (out_bus_format) {
+ default:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ return MTK_DPI_OUT_CHANNEL_SWAP_RGB;
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ return MTK_DPI_OUT_CHANNEL_SWAP_BGR;
+ }
+}
+
+static unsigned int mtk_dpi_bus_fmt_color_format(unsigned int out_bus_format)
+{
+ switch (out_bus_format) {
+ default:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ return MTK_DPI_COLOR_FORMAT_RGB;
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ return MTK_DPI_COLOR_FORMAT_YCBCR_422;
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ return MTK_DPI_COLOR_FORMAT_YCBCR_444;
+ }
+}
+
static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
@@ -753,18 +823,16 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
bridge_state->output_bus_cfg.format);
dpi->output_fmt = out_bus_format;
- dpi->bit_num = MTK_DPI_OUT_BIT_NUM_8BITS;
- dpi->channel_swap = MTK_DPI_OUT_CHANNEL_SWAP_RGB;
+ dpi->bit_num = mtk_dpi_bus_fmt_bit_num(out_bus_format);
+ dpi->channel_swap = mtk_dpi_bus_fmt_channel_swap(out_bus_format);
dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB;
- if (out_bus_format == MEDIA_BUS_FMT_YUYV8_1X16)
- dpi->color_format = MTK_DPI_COLOR_FORMAT_YCBCR_422;
- else
- dpi->color_format = MTK_DPI_COLOR_FORMAT_RGB;
+ dpi->color_format = mtk_dpi_bus_fmt_color_format(out_bus_format);
return 0;
}
static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
@@ -783,7 +851,7 @@ static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
"Failed to get bridge\n");
}
- return drm_bridge_attach(bridge->encoder, dpi->next_bridge,
+ return drm_bridge_attach(encoder, dpi->next_bridge,
&dpi->bridge, flags);
}
@@ -1026,9 +1094,29 @@ static const u32 mt8183_output_fmts[] = {
MEDIA_BUS_FMT_RGB888_2X12_BE,
};
-static const u32 mt8195_output_fmts[] = {
+static const u32 mt8195_dpi_output_fmts[] = {
+ MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_LE,
+ MEDIA_BUS_FMT_RGB888_2X12_BE,
+ MEDIA_BUS_FMT_RGB101010_1X30,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_YUYV12_1X24,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_YUV10_1X30,
+};
+
+static const u32 mt8195_dp_intf_output_fmts[] = {
+ MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_LE,
+ MEDIA_BUS_FMT_RGB888_2X12_BE,
+ MEDIA_BUS_FMT_RGB101010_1X30,
MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_YUV10_1X30,
};
static const struct mtk_dpi_factor dpi_factor_mt2701[] = {
@@ -1141,8 +1229,8 @@ static const struct mtk_dpi_conf mt8192_conf = {
static const struct mtk_dpi_conf mt8195_conf = {
.max_clock_khz = 594000,
- .output_fmts = mt8183_output_fmts,
- .num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
+ .output_fmts = mt8195_dpi_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8195_dpi_output_fmts),
.pixels_per_iter = 1,
.is_ck_de_pol = true,
.swap_input_support = true,
@@ -1161,8 +1249,8 @@ static const struct mtk_dpi_conf mt8195_dpintf_conf = {
.dpi_factor = dpi_factor_mt8195_dp_intf,
.num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8195_dp_intf),
.max_clock_khz = 600000,
- .output_fmts = mt8195_output_fmts,
- .num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
+ .output_fmts = mt8195_dp_intf_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8195_dp_intf_output_fmts),
.pixels_per_iter = 4,
.dimension_mask = DPINTF_HPW_MASK,
.hvsize_mask = DPINTF_HSIZE_MASK,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 74158b9d6503..7c0c12dde488 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -470,7 +470,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
ret = drmm_mode_config_init(drm);
if (ret)
- goto put_mutex_dev;
+ return ret;
drm->mode_config.min_width = 64;
drm->mode_config.min_height = 64;
@@ -488,8 +488,11 @@ static int mtk_drm_kms_init(struct drm_device *drm)
for (i = 0; i < private->data->mmsys_dev_num; i++) {
drm->dev_private = private->all_drm_private[i];
ret = component_bind_all(private->all_drm_private[i]->dev, drm);
- if (ret)
- goto put_mutex_dev;
+ if (ret) {
+ while (--i >= 0)
+ component_unbind_all(private->all_drm_private[i]->dev, drm);
+ return ret;
+ }
}
/*
@@ -582,9 +585,6 @@ static int mtk_drm_kms_init(struct drm_device *drm)
err_component_unbind:
for (i = 0; i < private->data->mmsys_dev_num; i++)
component_unbind_all(private->all_drm_private[i]->dev, drm);
-put_mutex_dev:
- for (i = 0; i < private->data->mmsys_dev_num; i++)
- put_device(private->all_drm_private[i]->mutex_dev);
return ret;
}
@@ -655,8 +655,10 @@ static int mtk_drm_bind(struct device *dev)
return 0;
drm = drm_dev_alloc(&mtk_drm_driver, dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
+ if (IS_ERR(drm)) {
+ ret = PTR_ERR(drm);
+ goto err_put_dev;
+ }
private->drm_master = true;
drm->dev_private = private;
@@ -682,18 +684,31 @@ err_free:
drm_dev_put(drm);
for (i = 0; i < private->data->mmsys_dev_num; i++)
private->all_drm_private[i]->drm = NULL;
+err_put_dev:
+ for (i = 0; i < private->data->mmsys_dev_num; i++) {
+ /* For device_find_child in mtk_drm_get_all_priv() */
+ put_device(private->all_drm_private[i]->dev);
+ }
+ put_device(private->mutex_dev);
return ret;
}
static void mtk_drm_unbind(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
+ int i;
/* for multi mmsys dev, unregister drm dev in mmsys master */
if (private->drm_master) {
drm_dev_unregister(private->drm);
mtk_drm_kms_deinit(private->drm);
drm_dev_put(private->drm);
+
+ for (i = 0; i < private->data->mmsys_dev_num; i++) {
+ /* For device_find_child in mtk_drm_get_all_priv() */
+ put_device(private->all_drm_private[i]->dev);
+ }
+ put_device(private->mutex_dev);
}
private->mtk_drm_bound = false;
private->drm_master = false;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index d1f407fb7eb1..4fe1f38a3c4b 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -807,12 +807,13 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
}
static int mtk_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
/* Attach the panel or bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge,
&dsi->bridge, flags);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 06e4fac152b7..8803cd4a8bc9 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -31,7 +31,6 @@
#include <drm/drm_probe_helper.h>
#include "mtk_cec.h"
-#include "mtk_hdmi.h"
#include "mtk_hdmi_regs.h"
#define NCTS_BYTES 7
@@ -165,7 +164,7 @@ struct mtk_hdmi {
bool dvi_mode;
struct regmap *sys_regmap;
unsigned int sys_offset;
- void __iomem *regs;
+ struct regmap *regs;
struct platform_device *audio_pdev;
struct hdmi_audio_param aud_param;
bool audio_enable;
@@ -181,50 +180,10 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
return container_of(b, struct mtk_hdmi, bridge);
}
-static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
-{
- return readl(hdmi->regs + offset);
-}
-
-static void mtk_hdmi_write(struct mtk_hdmi *hdmi, u32 offset, u32 val)
-{
- writel(val, hdmi->regs + offset);
-}
-
-static void mtk_hdmi_clear_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
-{
- void __iomem *reg = hdmi->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp &= ~bits;
- writel(tmp, reg);
-}
-
-static void mtk_hdmi_set_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
-{
- void __iomem *reg = hdmi->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp |= bits;
- writel(tmp, reg);
-}
-
-static void mtk_hdmi_mask(struct mtk_hdmi *hdmi, u32 offset, u32 val, u32 mask)
-{
- void __iomem *reg = hdmi->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp = (tmp & ~mask) | (val & mask);
- writel(tmp, reg);
-}
-
static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
{
- mtk_hdmi_mask(hdmi, VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH,
- VIDEO_SOURCE_SEL);
+ regmap_update_bits(hdmi->regs, VIDEO_SOURCE_SEL,
+ VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH);
}
static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
@@ -259,12 +218,12 @@ static void mtk_hdmi_hw_1p4_version_enable(struct mtk_hdmi *hdmi, bool enable)
static void mtk_hdmi_hw_aud_mute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_set_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+ regmap_set_bits(hdmi->regs, GRL_AUDIO_CFG, AUDIO_ZERO);
}
static void mtk_hdmi_hw_aud_unmute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_clear_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+ regmap_clear_bits(hdmi->regs, GRL_AUDIO_CFG, AUDIO_ZERO);
}
static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
@@ -273,25 +232,25 @@ static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
HDMI_RST, HDMI_RST);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
HDMI_RST, 0);
- mtk_hdmi_clear_bits(hdmi, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
+ regmap_clear_bits(hdmi->regs, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
ANLG_ON, ANLG_ON);
}
static void mtk_hdmi_hw_enable_notice(struct mtk_hdmi *hdmi, bool enable_notice)
{
- mtk_hdmi_mask(hdmi, GRL_CFG2, enable_notice ? CFG2_NOTICE_EN : 0,
- CFG2_NOTICE_EN);
+ regmap_update_bits(hdmi->regs, GRL_CFG2, CFG2_NOTICE_EN,
+ enable_notice ? CFG2_NOTICE_EN : 0);
}
static void mtk_hdmi_hw_write_int_mask(struct mtk_hdmi *hdmi, u32 int_mask)
{
- mtk_hdmi_write(hdmi, GRL_INT_MASK, int_mask);
+ regmap_write(hdmi->regs, GRL_INT_MASK, int_mask);
}
static void mtk_hdmi_hw_enable_dvi_mode(struct mtk_hdmi *hdmi, bool enable)
{
- mtk_hdmi_mask(hdmi, GRL_CFG1, enable ? CFG1_DVI : 0, CFG1_DVI);
+ regmap_update_bits(hdmi->regs, GRL_CFG1, CFG1_DVI, enable ? CFG1_DVI : 0);
}
static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
@@ -337,22 +296,22 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
dev_err(hdmi->dev, "Unknown infoframe type %d\n", frame_type);
return;
}
- mtk_hdmi_clear_bits(hdmi, ctrl_reg, ctrl_frame_en);
- mtk_hdmi_write(hdmi, GRL_INFOFRM_TYPE, frame_type);
- mtk_hdmi_write(hdmi, GRL_INFOFRM_VER, frame_ver);
- mtk_hdmi_write(hdmi, GRL_INFOFRM_LNG, frame_len);
+ regmap_clear_bits(hdmi->regs, ctrl_reg, ctrl_frame_en);
+ regmap_write(hdmi->regs, GRL_INFOFRM_TYPE, frame_type);
+ regmap_write(hdmi->regs, GRL_INFOFRM_VER, frame_ver);
+ regmap_write(hdmi->regs, GRL_INFOFRM_LNG, frame_len);
- mtk_hdmi_write(hdmi, GRL_IFM_PORT, checksum);
+ regmap_write(hdmi->regs, GRL_IFM_PORT, checksum);
for (i = 0; i < frame_len; i++)
- mtk_hdmi_write(hdmi, GRL_IFM_PORT, frame_data[i]);
+ regmap_write(hdmi->regs, GRL_IFM_PORT, frame_data[i]);
- mtk_hdmi_set_bits(hdmi, ctrl_reg, ctrl_frame_en);
+ regmap_set_bits(hdmi->regs, ctrl_reg, ctrl_frame_en);
}
static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
{
- mtk_hdmi_mask(hdmi, GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF,
- AUDIO_PACKET_OFF);
+ regmap_update_bits(hdmi->regs, AUDIO_PACKET_OFF,
+ GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF);
}
static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
@@ -373,44 +332,44 @@ static void mtk_hdmi_hw_set_deep_color_mode(struct mtk_hdmi *hdmi)
static void mtk_hdmi_hw_send_av_mute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+ regmap_clear_bits(hdmi->regs, GRL_CFG4, CTRL_AVMUTE);
usleep_range(2000, 4000);
- mtk_hdmi_set_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+ regmap_set_bits(hdmi->regs, GRL_CFG4, CTRL_AVMUTE);
}
static void mtk_hdmi_hw_send_av_unmute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_EN,
- CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+ regmap_update_bits(hdmi->regs, GRL_CFG4, CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET,
+ CFG4_AV_UNMUTE_EN);
usleep_range(2000, 4000);
- mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_SET,
- CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+ regmap_update_bits(hdmi->regs, GRL_CFG4, CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET,
+ CFG4_AV_UNMUTE_SET);
}
static void mtk_hdmi_hw_ncts_enable(struct mtk_hdmi *hdmi, bool on)
{
- mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, on ? 0 : CTS_CTRL_SOFT,
- CTS_CTRL_SOFT);
+ regmap_update_bits(hdmi->regs, GRL_CTS_CTRL, CTS_CTRL_SOFT,
+ on ? 0 : CTS_CTRL_SOFT);
}
static void mtk_hdmi_hw_ncts_auto_write_enable(struct mtk_hdmi *hdmi,
bool enable)
{
- mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, enable ? NCTS_WRI_ANYTIME : 0,
- NCTS_WRI_ANYTIME);
+ regmap_update_bits(hdmi->regs, GRL_CTS_CTRL, NCTS_WRI_ANYTIME,
+ enable ? NCTS_WRI_ANYTIME : 0);
}
static void mtk_hdmi_hw_msic_setting(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
- mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CFG4_MHL_MODE);
+ regmap_clear_bits(hdmi->regs, GRL_CFG4, CFG4_MHL_MODE);
if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
mode->clock == 74250 &&
mode->vdisplay == 1080)
- mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+ regmap_clear_bits(hdmi->regs, GRL_CFG2, CFG2_MHL_DE_SEL);
else
- mtk_hdmi_set_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+ regmap_set_bits(hdmi->regs, GRL_CFG2, CFG2_MHL_DE_SEL);
}
static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
@@ -438,7 +397,7 @@ static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
swap_bit = LFE_CC_SWAP;
break;
}
- mtk_hdmi_mask(hdmi, GRL_CH_SWAP, swap_bit, 0xff);
+ regmap_update_bits(hdmi->regs, GRL_CH_SWAP, 0xff, swap_bit);
}
static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
@@ -459,7 +418,7 @@ static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
break;
}
- mtk_hdmi_mask(hdmi, GRL_AOUT_CFG, val, AOUT_BNUM_SEL_MASK);
+ regmap_update_bits(hdmi->regs, GRL_AOUT_CFG, AOUT_BNUM_SEL_MASK, val);
}
static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
@@ -467,7 +426,7 @@ static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_CFG0);
+ regmap_read(hdmi->regs, GRL_CFG0, &val);
val &= ~(CFG0_W_LENGTH_MASK | CFG0_I2S_MODE_MASK);
switch (i2s_fmt) {
@@ -491,7 +450,7 @@ static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_16BIT;
break;
}
- mtk_hdmi_write(hdmi, GRL_CFG0, val);
+ regmap_write(hdmi->regs, GRL_CFG0, val);
}
static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
@@ -500,14 +459,14 @@ static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
u8 val;
/* Disable high bitrate, set DST packet normal/double */
- mtk_hdmi_clear_bits(hdmi, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
+ regmap_clear_bits(hdmi->regs, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
if (dst)
val = DST_NORMAL_DOUBLE | SACD_DST;
else
val = 0;
- mtk_hdmi_mask(hdmi, GRL_AUDIO_CFG, val, mask);
+ regmap_update_bits(hdmi->regs, GRL_AUDIO_CFG, mask, val);
}
static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
@@ -548,10 +507,10 @@ static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
i2s_uv = I2S_UV_CH_EN(0);
}
- mtk_hdmi_write(hdmi, GRL_CH_SW0, ch_switch & 0xff);
- mtk_hdmi_write(hdmi, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
- mtk_hdmi_write(hdmi, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
- mtk_hdmi_write(hdmi, GRL_I2S_UV, i2s_uv);
+ regmap_write(hdmi->regs, GRL_CH_SW0, ch_switch & 0xff);
+ regmap_write(hdmi->regs, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
+ regmap_write(hdmi->regs, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
+ regmap_write(hdmi->regs, GRL_I2S_UV, i2s_uv);
}
static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
@@ -559,7 +518,7 @@ static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_CFG1);
+ regmap_read(hdmi->regs, GRL_CFG1, &val);
if (input_type == HDMI_AUD_INPUT_I2S &&
(val & CFG1_SPDIF) == CFG1_SPDIF) {
val &= ~CFG1_SPDIF;
@@ -567,7 +526,7 @@ static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
(val & CFG1_SPDIF) == 0) {
val |= CFG1_SPDIF;
}
- mtk_hdmi_write(hdmi, GRL_CFG1, val);
+ regmap_write(hdmi->regs, GRL_CFG1, val);
}
static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
@@ -576,13 +535,13 @@ static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
int i;
for (i = 0; i < 5; i++) {
- mtk_hdmi_write(hdmi, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
- mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, channel_status[i]);
- mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, channel_status[i]);
+ regmap_write(hdmi->regs, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
+ regmap_write(hdmi->regs, GRL_L_STATUS_0 + i * 4, channel_status[i]);
+ regmap_write(hdmi->regs, GRL_R_STATUS_0 + i * 4, channel_status[i]);
}
for (; i < 24; i++) {
- mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, 0);
- mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, 0);
+ regmap_write(hdmi->regs, GRL_L_STATUS_0 + i * 4, 0);
+ regmap_write(hdmi->regs, GRL_R_STATUS_0 + i * 4, 0);
}
}
@@ -590,13 +549,13 @@ static void mtk_hdmi_hw_aud_src_reenable(struct mtk_hdmi *hdmi)
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+ regmap_read(hdmi->regs, GRL_MIX_CTRL, &val);
if (val & MIX_CTRL_SRC_EN) {
val &= ~MIX_CTRL_SRC_EN;
- mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ regmap_write(hdmi->regs, GRL_MIX_CTRL, val);
usleep_range(255, 512);
val |= MIX_CTRL_SRC_EN;
- mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ regmap_write(hdmi->regs, GRL_MIX_CTRL, val);
}
}
@@ -604,10 +563,10 @@ static void mtk_hdmi_hw_aud_src_disable(struct mtk_hdmi *hdmi)
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+ regmap_read(hdmi->regs, GRL_MIX_CTRL, &val);
val &= ~MIX_CTRL_SRC_EN;
- mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
- mtk_hdmi_write(hdmi, GRL_SHIFT_L1, 0x00);
+ regmap_write(hdmi->regs, GRL_MIX_CTRL, val);
+ regmap_write(hdmi->regs, GRL_SHIFT_L1, 0x00);
}
static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
@@ -615,7 +574,7 @@ static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_CFG5);
+ regmap_read(hdmi->regs, GRL_CFG5, &val);
val &= CFG5_CD_RATIO_MASK;
switch (mclk) {
@@ -638,7 +597,7 @@ static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
val |= CFG5_FS256;
break;
}
- mtk_hdmi_write(hdmi, GRL_CFG5, val);
+ regmap_write(hdmi->regs, GRL_CFG5, val);
}
struct hdmi_acr_n {
@@ -716,15 +675,22 @@ static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate,
128 * audio_sample_rate);
}
+static void mtk_hdmi_get_ncts(unsigned int sample_rate, unsigned int clock,
+ unsigned int *n, unsigned int *cts)
+{
+ *n = hdmi_recommended_n(sample_rate, clock);
+ *cts = hdmi_expected_cts(sample_rate, clock, *n);
+}
+
static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
unsigned int cts)
{
unsigned char val[NCTS_BYTES];
int i;
- mtk_hdmi_write(hdmi, GRL_NCTS, 0);
- mtk_hdmi_write(hdmi, GRL_NCTS, 0);
- mtk_hdmi_write(hdmi, GRL_NCTS, 0);
+ regmap_write(hdmi->regs, GRL_NCTS, 0);
+ regmap_write(hdmi->regs, GRL_NCTS, 0);
+ regmap_write(hdmi->regs, GRL_NCTS, 0);
memset(val, 0, sizeof(val));
val[0] = (cts >> 24) & 0xff;
@@ -737,7 +703,7 @@ static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
val[6] = n & 0xff;
for (i = 0; i < NCTS_BYTES; i++)
- mtk_hdmi_write(hdmi, GRL_NCTS, val[i]);
+ regmap_write(hdmi->regs, GRL_NCTS, val[i]);
}
static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
@@ -746,14 +712,12 @@ static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
{
unsigned int n, cts;
- n = hdmi_recommended_n(sample_rate, clock);
- cts = hdmi_expected_cts(sample_rate, clock, n);
+ mtk_hdmi_get_ncts(sample_rate, clock, &n, &cts);
dev_dbg(hdmi->dev, "%s: sample_rate=%u, clock=%d, cts=%u, n=%u\n",
__func__, sample_rate, clock, n, cts);
- mtk_hdmi_mask(hdmi, DUMMY_304, AUDIO_I2S_NCTS_SEL_64,
- AUDIO_I2S_NCTS_SEL);
+ regmap_update_bits(hdmi->regs, DUMMY_304, AUDIO_I2S_NCTS_SEL, AUDIO_I2S_NCTS_SEL_64);
do_hdmi_hw_aud_set_ncts(hdmi, n, cts);
}
@@ -873,7 +837,7 @@ static void mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi)
bool dst;
mtk_hdmi_hw_aud_set_channel_swap(hdmi, HDMI_AUD_SWAP_LFE_CC);
- mtk_hdmi_set_bits(hdmi, GRL_MIX_CTRL, MIX_CTRL_FLAT);
+ regmap_set_bits(hdmi->regs, GRL_MIX_CTRL, MIX_CTRL_FLAT);
if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF &&
hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST) {
@@ -905,7 +869,7 @@ static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi,
mtk_hdmi_hw_ncts_enable(hdmi, false);
mtk_hdmi_hw_aud_src_disable(hdmi);
- mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_ACLK_INV);
+ regmap_clear_bits(hdmi->regs, GRL_CFG2, CFG2_ACLK_INV);
if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_I2S) {
switch (sample_rate) {
@@ -1061,20 +1025,6 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
return 0;
}
-static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
-{
- struct hdmi_audio_param *aud_param = &hdmi->aud_param;
-
- aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
- aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
- aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
- aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
-
- return 0;
-}
-
static void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
{
mtk_hdmi_hw_send_aud_packet(hdmi, true);
@@ -1087,20 +1037,6 @@ static void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
hdmi->audio_enable = false;
}
-static int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
- struct hdmi_audio_param *param)
-{
- if (!hdmi->audio_enable) {
- dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
- return -EINVAL;
- }
- dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
- param->aud_codec, param->aud_input_type,
- param->aud_input_chan_type, param->codec_params.sample_rate);
- memcpy(&hdmi->aud_param, param, sizeof(*param));
- return mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
-}
-
static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
@@ -1269,6 +1205,7 @@ static const struct drm_edid *mtk_hdmi_bridge_edid_read(struct drm_bridge *bridg
}
static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1281,7 +1218,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
}
if (hdmi->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
+ ret = drm_bridge_attach(encoder, hdmi->next_bridge,
bridge, flags);
if (ret)
return ret;
@@ -1407,30 +1344,20 @@ static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
.edid_read = mtk_hdmi_bridge_edid_read,
};
-static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
- struct platform_device *pdev)
+static int mtk_hdmi_get_cec_dev(struct mtk_hdmi *hdmi, struct device *dev, struct device_node *np)
{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct device_node *cec_np, *remote, *i2c_np;
struct platform_device *cec_pdev;
- struct regmap *regmap;
+ struct device_node *cec_np;
int ret;
ret = mtk_hdmi_get_all_clk(hdmi, np);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get clocks: %d\n", ret);
-
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
/* The CEC module handles HDMI hotplug detection */
cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec");
- if (!cec_np) {
- dev_err(dev, "Failed to find CEC node\n");
- return -EINVAL;
- }
+ if (!cec_np)
+ return dev_err_probe(dev, -EINVAL, "Failed to find CEC node\n");
cec_pdev = of_find_device_by_node(cec_np);
if (!cec_pdev) {
@@ -1440,82 +1367,77 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
return -EPROBE_DEFER;
}
of_node_put(cec_np);
- hdmi->cec_dev = &cec_pdev->dev;
/*
* The mediatek,syscon-hdmi property contains a phandle link to the
* MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG
* registers it contains.
*/
- regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,syscon-hdmi");
- ret = of_property_read_u32_index(np, "mediatek,syscon-hdmi", 1,
- &hdmi->sys_offset);
- if (IS_ERR(regmap))
- ret = PTR_ERR(regmap);
- if (ret) {
- dev_err(dev,
- "Failed to get system configuration registers: %d\n",
- ret);
- goto put_device;
- }
- hdmi->sys_regmap = regmap;
+ hdmi->sys_regmap = syscon_regmap_lookup_by_phandle_args(np, "mediatek,syscon-hdmi",
+ 1, &hdmi->sys_offset);
+ if (IS_ERR(hdmi->sys_regmap))
+ return dev_err_probe(dev, PTR_ERR(hdmi->sys_regmap),
+ "Failed to get system configuration registers\n");
- hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hdmi->regs)) {
- ret = PTR_ERR(hdmi->regs);
- goto put_device;
- }
+ hdmi->cec_dev = &cec_pdev->dev;
+ return 0;
+}
+
+static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *remote, *i2c_np;
+ int ret;
+
+ ret = mtk_hdmi_get_all_clk(hdmi, np);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ hdmi->regs = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
remote = of_graph_get_remote_node(np, 1, 0);
- if (!remote) {
- ret = -EINVAL;
- goto put_device;
- }
+ if (!remote)
+ return -EINVAL;
if (!of_device_is_compatible(remote, "hdmi-connector")) {
hdmi->next_bridge = of_drm_find_bridge(remote);
if (!hdmi->next_bridge) {
dev_err(dev, "Waiting for external bridge\n");
of_node_put(remote);
- ret = -EPROBE_DEFER;
- goto put_device;
+ return -EPROBE_DEFER;
}
}
i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0);
- if (!i2c_np) {
- dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n",
- remote);
- of_node_put(remote);
- ret = -EINVAL;
- goto put_device;
- }
of_node_put(remote);
+ if (!i2c_np)
+ return dev_err_probe(dev, -EINVAL, "No ddc-i2c-bus in connector\n");
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
of_node_put(i2c_np);
- if (!hdmi->ddc_adpt) {
- dev_err(dev, "Failed to get ddc i2c adapter by node\n");
- ret = -EINVAL;
- goto put_device;
- }
+ if (!hdmi->ddc_adpt)
+ return dev_err_probe(dev, -EINVAL, "Failed to get ddc i2c adapter by node\n");
+
+ ret = mtk_hdmi_get_cec_dev(hdmi, dev, np);
+ if (ret)
+ return ret;
return 0;
-put_device:
- put_device(hdmi->cec_dev);
- return ret;
}
/*
* HDMI audio codec callbacks
*/
-static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
+static int mtk_hdmi_audio_params(struct mtk_hdmi *hdmi,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
- struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- struct hdmi_audio_param hdmi_params;
+ struct hdmi_audio_param aud_params = { 0 };
unsigned int chan = params->cea.channels;
dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
@@ -1526,16 +1448,16 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
switch (chan) {
case 2:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
break;
case 4:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
break;
case 6:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
break;
case 8:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
break;
default:
dev_err(hdmi->dev, "channel[%d] not supported!\n", chan);
@@ -1559,27 +1481,45 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
switch (daifmt->fmt) {
case HDMI_I2S:
- hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
- hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
- hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
+ aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_params.aud_input_type = HDMI_AUD_INPUT_I2S;
+ aud_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ aud_params.aud_mclk = HDMI_AUD_MCLK_128FS;
break;
case HDMI_SPDIF:
- hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
+ aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
break;
default:
dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__,
daifmt->fmt);
return -EINVAL;
}
+ memcpy(&aud_params.codec_params, params, sizeof(aud_params.codec_params));
+ memcpy(&hdmi->aud_param, &aud_params, sizeof(aud_params));
+
+ dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
+ aud_params.aud_codec, aud_params.aud_input_type,
+ aud_params.aud_input_chan_type, aud_params.codec_params.sample_rate);
- memcpy(&hdmi_params.codec_params, params,
- sizeof(hdmi_params.codec_params));
+ return 0;
+}
+
+static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- mtk_hdmi_audio_set_param(hdmi, &hdmi_params);
+ if (!hdmi->audio_enable) {
+ dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
+ return -EINVAL;
+ }
+
+ mtk_hdmi_audio_params(hdmi, daifmt, params);
+ mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
return 0;
}
@@ -1625,17 +1565,22 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
return 0;
}
-static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data,
- hdmi_codec_plugged_cb fn,
+static void mtk_hdmi_audio_set_plugged_cb(struct mtk_hdmi *hdmi, hdmi_codec_plugged_cb fn,
struct device *codec_dev)
{
- struct mtk_hdmi *hdmi = data;
-
mutex_lock(&hdmi->update_plugged_status_lock);
hdmi->plugged_cb = fn;
hdmi->codec_dev = codec_dev;
mutex_unlock(&hdmi->update_plugged_status_lock);
+}
+
+static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct mtk_hdmi *hdmi = data;
+ mtk_hdmi_audio_set_plugged_cb(hdmi, fn, codec_dev);
mtk_hdmi_update_plugged_status(hdmi);
return 0;
@@ -1658,6 +1603,7 @@ static void mtk_hdmi_unregister_audio_driver(void *data)
static int mtk_hdmi_register_audio_driver(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+ struct hdmi_audio_param *aud_param = &hdmi->aud_param;
struct hdmi_codec_pdata codec_data = {
.ops = &mtk_hdmi_audio_codec_ops,
.max_i2s_channels = 2,
@@ -1667,6 +1613,13 @@ static int mtk_hdmi_register_audio_driver(struct device *dev)
};
int ret;
+ aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
+ aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
+ aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+
hdmi->audio_pdev = platform_device_register_data(dev,
HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO,
@@ -1708,11 +1661,6 @@ static int mtk_hdmi_probe(struct platform_device *pdev)
mutex_init(&hdmi->update_plugged_status_lock);
platform_set_drvdata(pdev, hdmi);
- ret = mtk_hdmi_output_init(hdmi);
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to initialize hdmi output\n");
-
ret = mtk_hdmi_register_audio_driver(dev);
if (ret)
return dev_err_probe(dev, ret,
@@ -1789,28 +1737,9 @@ static struct platform_driver mtk_hdmi_driver = {
.pm = &mtk_hdmi_pm_ops,
},
};
-
-static struct platform_driver * const mtk_hdmi_drivers[] = {
- &mtk_hdmi_ddc_driver,
- &mtk_cec_driver,
- &mtk_hdmi_driver,
-};
-
-static int __init mtk_hdmitx_init(void)
-{
- return platform_register_drivers(mtk_hdmi_drivers,
- ARRAY_SIZE(mtk_hdmi_drivers));
-}
-
-static void __exit mtk_hdmitx_exit(void)
-{
- platform_unregister_drivers(mtk_hdmi_drivers,
- ARRAY_SIZE(mtk_hdmi_drivers));
-}
-
-module_init(mtk_hdmitx_init);
-module_exit(mtk_hdmitx_exit);
+module_platform_driver(mtk_hdmi_driver);
MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
MODULE_DESCRIPTION("MediaTek HDMI Driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("DRM_MTK_HDMI_V1");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h
deleted file mode 100644
index 472bf141c92b..000000000000
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014 MediaTek Inc.
- * Author: Jie Qiu <jie.qiu@mediatek.com>
- */
-#ifndef _MTK_HDMI_CTRL_H
-#define _MTK_HDMI_CTRL_H
-
-struct platform_driver;
-
-extern struct platform_driver mtk_cec_driver;
-extern struct platform_driver mtk_hdmi_ddc_driver;
-
-#endif /* _MTK_HDMI_CTRL_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
index 07db68067844..6358e1af69b4 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -20,7 +20,6 @@
#include <linux/of_platform.h>
#include "mtk_drm_drv.h"
-#include "mtk_hdmi.h"
#define SIF1_CLOK (288)
#define DDC_DDCMCTL0 (0x0)
@@ -337,6 +336,7 @@ struct platform_driver mtk_hdmi_ddc_driver = {
.of_match_table = mtk_hdmi_ddc_match,
},
};
+module_platform_driver(mtk_hdmi_ddc_driver);
MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
MODULE_DESCRIPTION("MediaTek HDMI DDC Driver");
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 81d2ee37e773..49ff9f1f16d3 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -169,7 +169,7 @@ static const struct meson_drm_soc_attr meson_drm_soc_attrs[] = {
/* S805X/S805Y HDMI PLL won't lock for HDMI PHY freq > 1,65GHz */
{
.limits = {
- .max_hdmi_phy_freq = 1650000,
+ .max_hdmi_phy_freq = 1650000000,
},
.attrs = (const struct soc_device_attribute []) {
{ .soc_id = "GXL (S805*)", },
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 3f9345c14f31..be4b0e4df6e1 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -37,7 +37,7 @@ struct meson_drm_match_data {
};
struct meson_drm_soc_limits {
- unsigned int max_hdmi_phy_freq;
+ unsigned long long max_hdmi_phy_freq;
};
struct meson_drm {
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index e79f7c3ce32e..c9678dc68fa1 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -83,12 +83,13 @@ meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
}
static int meson_encoder_cvbs_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_cvbs *meson_encoder_cvbs =
bridge_to_meson_encoder_cvbs(bridge);
- return drm_bridge_attach(bridge->encoder, meson_encoder_cvbs->next_bridge,
+ return drm_bridge_attach(encoder, meson_encoder_cvbs->next_bridge,
&meson_encoder_cvbs->bridge, flags);
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index fe204437bd65..3db518e5f95d 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -33,11 +33,12 @@ struct meson_encoder_dsi {
container_of(x, struct meson_encoder_dsi, bridge)
static int meson_encoder_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, encoder_dsi->next_bridge,
+ return drm_bridge_attach(encoder, encoder_dsi->next_bridge,
&encoder_dsi->bridge, flags);
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 6d1c9262a2cf..47136bbbe8c6 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -49,11 +49,12 @@ struct meson_encoder_hdmi {
container_of(x, struct meson_encoder_hdmi, bridge)
static int meson_encoder_hdmi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
- return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge,
+ return drm_bridge_attach(encoder, encoder_hdmi->next_bridge,
&encoder_hdmi->bridge, flags);
}
@@ -70,12 +71,12 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
{
struct meson_drm *priv = encoder_hdmi->priv;
int vic = drm_match_cea_mode(mode);
- unsigned int phy_freq;
- unsigned int vclk_freq;
- unsigned int venc_freq;
- unsigned int hdmi_freq;
+ unsigned long long phy_freq;
+ unsigned long long vclk_freq;
+ unsigned long long venc_freq;
+ unsigned long long hdmi_freq;
- vclk_freq = mode->clock;
+ vclk_freq = mode->clock * 1000ULL;
/* For 420, pixel clock is half unlike venc clock */
if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
@@ -107,7 +108,8 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- dev_dbg(priv->dev, "vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
+ dev_dbg(priv->dev,
+ "vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
phy_freq, vclk_freq, venc_freq, hdmi_freq,
priv->venc.hdmi_use_enci);
@@ -122,10 +124,11 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
struct meson_drm *priv = encoder_hdmi->priv;
bool is_hdmi2_sink = display_info->hdmi.scdc.supported;
- unsigned int phy_freq;
- unsigned int vclk_freq;
- unsigned int venc_freq;
- unsigned int hdmi_freq;
+ unsigned long long clock = mode->clock * 1000ULL;
+ unsigned long long phy_freq;
+ unsigned long long vclk_freq;
+ unsigned long long venc_freq;
+ unsigned long long hdmi_freq;
int vic = drm_match_cea_mode(mode);
enum drm_mode_status status;
@@ -144,12 +147,12 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
if (status != MODE_OK)
return status;
- return meson_vclk_dmt_supported_freq(priv, mode->clock);
+ return meson_vclk_dmt_supported_freq(priv, clock);
/* Check against supported VIC modes */
} else if (!meson_venc_hdmi_supported_vic(vic))
return MODE_BAD;
- vclk_freq = mode->clock;
+ vclk_freq = clock;
/* For 420, pixel clock is half unlike venc clock */
if (drm_mode_is_420_only(display_info, mode) ||
@@ -179,7 +182,8 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- dev_dbg(priv->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
+ dev_dbg(priv->dev,
+ "%s: vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz\n",
__func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 2a942dc6a6dc..3325580d885d 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -110,7 +110,10 @@
#define HDMI_PLL_LOCK BIT(31)
#define HDMI_PLL_LOCK_G12A (3 << 30)
-#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST(_freq * 1000, 1001)
+#define PIXEL_FREQ_1000_1001(_freq) \
+ DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
+#define PHY_FREQ_1000_1001(_freq) \
+ (PIXEL_FREQ_1000_1001(DIV_ROUND_DOWN_ULL(_freq, 10ULL)) * 10)
/* VID PLL Dividers */
enum {
@@ -360,11 +363,11 @@ enum {
};
struct meson_vclk_params {
- unsigned int pll_freq;
- unsigned int phy_freq;
- unsigned int vclk_freq;
- unsigned int venc_freq;
- unsigned int pixel_freq;
+ unsigned long long pll_freq;
+ unsigned long long phy_freq;
+ unsigned long long vclk_freq;
+ unsigned long long venc_freq;
+ unsigned long long pixel_freq;
unsigned int pll_od1;
unsigned int pll_od2;
unsigned int pll_od3;
@@ -372,11 +375,11 @@ struct meson_vclk_params {
unsigned int vclk_div;
} params[] = {
[MESON_VCLK_HDMI_ENCI_54000] = {
- .pll_freq = 4320000,
- .phy_freq = 270000,
- .vclk_freq = 54000,
- .venc_freq = 54000,
- .pixel_freq = 54000,
+ .pll_freq = 4320000000,
+ .phy_freq = 270000000,
+ .vclk_freq = 54000000,
+ .venc_freq = 54000000,
+ .pixel_freq = 54000000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -384,11 +387,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_54000] = {
- .pll_freq = 4320000,
- .phy_freq = 270000,
- .vclk_freq = 54000,
- .venc_freq = 54000,
- .pixel_freq = 27000,
+ .pll_freq = 4320000000,
+ .phy_freq = 270000000,
+ .vclk_freq = 54000000,
+ .venc_freq = 54000000,
+ .pixel_freq = 27000000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -396,11 +399,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_148500] = {
- .pll_freq = 2970000,
- .phy_freq = 742500,
- .vclk_freq = 148500,
- .venc_freq = 148500,
- .pixel_freq = 74250,
+ .pll_freq = 2970000000,
+ .phy_freq = 742500000,
+ .vclk_freq = 148500000,
+ .venc_freq = 148500000,
+ .pixel_freq = 74250000,
.pll_od1 = 4,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -408,11 +411,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_74250] = {
- .pll_freq = 2970000,
- .phy_freq = 742500,
- .vclk_freq = 74250,
- .venc_freq = 74250,
- .pixel_freq = 74250,
+ .pll_freq = 2970000000,
+ .phy_freq = 742500000,
+ .vclk_freq = 74250000,
+ .venc_freq = 74250000,
+ .pixel_freq = 74250000,
.pll_od1 = 2,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -420,11 +423,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_148500] = {
- .pll_freq = 2970000,
- .phy_freq = 1485000,
- .vclk_freq = 148500,
- .venc_freq = 148500,
- .pixel_freq = 148500,
+ .pll_freq = 2970000000,
+ .phy_freq = 1485000000,
+ .vclk_freq = 148500000,
+ .venc_freq = 148500000,
+ .pixel_freq = 148500000,
.pll_od1 = 1,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -432,11 +435,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_297000] = {
- .pll_freq = 5940000,
- .phy_freq = 2970000,
- .venc_freq = 297000,
- .vclk_freq = 297000,
- .pixel_freq = 297000,
+ .pll_freq = 5940000000,
+ .phy_freq = 2970000000,
+ .venc_freq = 297000000,
+ .vclk_freq = 297000000,
+ .pixel_freq = 297000000,
.pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -444,11 +447,11 @@ struct meson_vclk_params {
.vclk_div = 2,
},
[MESON_VCLK_HDMI_594000] = {
- .pll_freq = 5940000,
- .phy_freq = 5940000,
- .venc_freq = 594000,
- .vclk_freq = 594000,
- .pixel_freq = 594000,
+ .pll_freq = 5940000000,
+ .phy_freq = 5940000000,
+ .venc_freq = 594000000,
+ .vclk_freq = 594000000,
+ .pixel_freq = 594000000,
.pll_od1 = 1,
.pll_od2 = 1,
.pll_od3 = 2,
@@ -456,11 +459,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_594000_YUV420] = {
- .pll_freq = 5940000,
- .phy_freq = 2970000,
- .venc_freq = 594000,
- .vclk_freq = 594000,
- .pixel_freq = 297000,
+ .pll_freq = 5940000000,
+ .phy_freq = 2970000000,
+ .venc_freq = 594000000,
+ .vclk_freq = 594000000,
+ .pixel_freq = 297000000,
.pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -617,16 +620,16 @@ static void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
3 << 20, pll_od_to_reg(od3) << 20);
}
-#define XTAL_FREQ 24000
+#define XTAL_FREQ (24 * 1000 * 1000)
static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
- unsigned int pll_freq)
+ unsigned long long pll_freq)
{
/* The GXBB PLL has a /2 pre-multiplier */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
- pll_freq /= 2;
+ pll_freq = DIV_ROUND_DOWN_ULL(pll_freq, 2);
- return pll_freq / XTAL_FREQ;
+ return DIV_ROUND_DOWN_ULL(pll_freq, XTAL_FREQ);
}
#define HDMI_FRAC_MAX_GXBB 4096
@@ -635,12 +638,13 @@ static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
unsigned int m,
- unsigned int pll_freq)
+ unsigned long long pll_freq)
{
- unsigned int parent_freq = XTAL_FREQ;
+ unsigned long long parent_freq = XTAL_FREQ;
unsigned int frac_max = HDMI_FRAC_MAX_GXL;
unsigned int frac_m;
unsigned int frac;
+ u32 remainder;
/* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
@@ -652,11 +656,11 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
frac_max = HDMI_FRAC_MAX_G12A;
/* We can have a perfect match !*/
- if (pll_freq / m == parent_freq &&
- pll_freq % m == 0)
+ if (div_u64_rem(pll_freq, m, &remainder) == parent_freq &&
+ remainder == 0)
return 0;
- frac = div_u64((u64)pll_freq * (u64)frac_max, parent_freq);
+ frac = mul_u64_u64_div_u64(pll_freq, frac_max, parent_freq);
frac_m = m * frac_max;
if (frac_m > frac)
return frac_max;
@@ -666,7 +670,7 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
}
static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
- unsigned int m,
+ unsigned long long m,
unsigned int frac)
{
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
@@ -694,7 +698,7 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
}
static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
- unsigned int freq,
+ unsigned long long freq,
unsigned int *m,
unsigned int *frac,
unsigned int *od)
@@ -706,7 +710,7 @@ static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
continue;
*frac = meson_hdmi_pll_get_frac(priv, *m, freq * *od);
- DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d\n",
+ DRM_DEBUG_DRIVER("PLL params for %lluHz: m=%x frac=%x od=%d\n",
freq, *m, *frac, *od);
if (meson_hdmi_pll_validate_params(priv, *m, *frac))
@@ -718,7 +722,7 @@ static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
/* pll_freq is the frequency after the OD dividers */
enum drm_mode_status
-meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned long long freq)
{
unsigned int od, m, frac;
@@ -741,7 +745,7 @@ EXPORT_SYMBOL_GPL(meson_vclk_dmt_supported_freq);
/* pll_freq is the frequency after the OD dividers */
static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
- unsigned int pll_freq)
+ unsigned long long pll_freq)
{
unsigned int od, m, frac, od1, od2, od3;
@@ -756,7 +760,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
od1 = od / od2;
}
- DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d/%d/%d\n",
+ DRM_DEBUG_DRIVER("PLL params for %lluHz: m=%x frac=%x od=%d/%d/%d\n",
pll_freq, m, frac, od1, od2, od3);
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
@@ -764,17 +768,18 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
return;
}
- DRM_ERROR("Fatal, unable to find parameters for PLL freq %d\n",
+ DRM_ERROR("Fatal, unable to find parameters for PLL freq %lluHz\n",
pll_freq);
}
enum drm_mode_status
-meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
- unsigned int vclk_freq)
+meson_vclk_vic_supported_freq(struct meson_drm *priv,
+ unsigned long long phy_freq,
+ unsigned long long vclk_freq)
{
int i;
- DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
+ DRM_DEBUG_DRIVER("phy_freq = %lluHz vclk_freq = %lluHz\n",
phy_freq, vclk_freq);
/* Check against soc revision/package limits */
@@ -785,19 +790,19 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
}
for (i = 0 ; params[i].pixel_freq ; ++i) {
- DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
+ DRM_DEBUG_DRIVER("i = %d pixel_freq = %lluHz alt = %lluHz\n",
i, params[i].pixel_freq,
- FREQ_1000_1001(params[i].pixel_freq));
- DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
+ PIXEL_FREQ_1000_1001(params[i].pixel_freq));
+ DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
i, params[i].phy_freq,
- FREQ_1000_1001(params[i].phy_freq/1000)*1000);
+ PHY_FREQ_1000_1001(params[i].phy_freq));
/* Match strict frequency */
if (phy_freq == params[i].phy_freq &&
vclk_freq == params[i].vclk_freq)
return MODE_OK;
/* Match 1000/1001 variant */
- if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
- vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
+ if (phy_freq == PHY_FREQ_1000_1001(params[i].phy_freq) &&
+ vclk_freq == PIXEL_FREQ_1000_1001(params[i].vclk_freq))
return MODE_OK;
}
@@ -805,8 +810,9 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
}
EXPORT_SYMBOL_GPL(meson_vclk_vic_supported_freq);
-static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
- unsigned int od1, unsigned int od2, unsigned int od3,
+static void meson_vclk_set(struct meson_drm *priv,
+ unsigned long long pll_base_freq, unsigned int od1,
+ unsigned int od2, unsigned int od3,
unsigned int vid_pll_div, unsigned int vclk_div,
unsigned int hdmi_tx_div, unsigned int venc_div,
bool hdmi_use_enci, bool vic_alternate_clock)
@@ -826,15 +832,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
meson_hdmi_pll_generic_set(priv, pll_base_freq);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
switch (pll_base_freq) {
- case 2970000:
+ case 2970000000:
m = 0x3d;
frac = vic_alternate_clock ? 0xd02 : 0xe00;
break;
- case 4320000:
+ case 4320000000:
m = vic_alternate_clock ? 0x59 : 0x5a;
frac = vic_alternate_clock ? 0xe8f : 0;
break;
- case 5940000:
+ case 5940000000:
m = 0x7b;
frac = vic_alternate_clock ? 0xa05 : 0xc00;
break;
@@ -844,15 +850,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
switch (pll_base_freq) {
- case 2970000:
+ case 2970000000:
m = 0x7b;
frac = vic_alternate_clock ? 0x281 : 0x300;
break;
- case 4320000:
+ case 4320000000:
m = vic_alternate_clock ? 0xb3 : 0xb4;
frac = vic_alternate_clock ? 0x347 : 0;
break;
- case 5940000:
+ case 5940000000:
m = 0xf7;
frac = vic_alternate_clock ? 0x102 : 0x200;
break;
@@ -861,15 +867,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
switch (pll_base_freq) {
- case 2970000:
+ case 2970000000:
m = 0x7b;
frac = vic_alternate_clock ? 0x140b4 : 0x18000;
break;
- case 4320000:
+ case 4320000000:
m = vic_alternate_clock ? 0xb3 : 0xb4;
frac = vic_alternate_clock ? 0x1a3ee : 0;
break;
- case 5940000:
+ case 5940000000:
m = 0xf7;
frac = vic_alternate_clock ? 0x8148 : 0x10000;
break;
@@ -1025,14 +1031,14 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int phy_freq, unsigned int vclk_freq,
- unsigned int venc_freq, unsigned int dac_freq,
+ unsigned long long phy_freq, unsigned long long vclk_freq,
+ unsigned long long venc_freq, unsigned long long dac_freq,
bool hdmi_use_enci)
{
bool vic_alternate_clock = false;
- unsigned int freq;
- unsigned int hdmi_tx_div;
- unsigned int venc_div;
+ unsigned long long freq;
+ unsigned long long hdmi_tx_div;
+ unsigned long long venc_div;
if (target == MESON_VCLK_TARGET_CVBS) {
meson_venci_cvbs_clock_config(priv);
@@ -1052,27 +1058,27 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
return;
}
- hdmi_tx_div = vclk_freq / dac_freq;
+ hdmi_tx_div = DIV_ROUND_DOWN_ULL(vclk_freq, dac_freq);
if (hdmi_tx_div == 0) {
- pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
+ pr_err("Fatal Error, invalid HDMI-TX freq %lluHz\n",
dac_freq);
return;
}
- venc_div = vclk_freq / venc_freq;
+ venc_div = DIV_ROUND_DOWN_ULL(vclk_freq, venc_freq);
if (venc_div == 0) {
- pr_err("Fatal Error, invalid HDMI venc freq %d\n",
+ pr_err("Fatal Error, invalid HDMI venc freq %lluHz\n",
venc_freq);
return;
}
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
if ((phy_freq == params[freq].phy_freq ||
- phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
+ phy_freq == PHY_FREQ_1000_1001(params[freq].phy_freq)) &&
(vclk_freq == params[freq].vclk_freq ||
- vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
+ vclk_freq == PIXEL_FREQ_1000_1001(params[freq].vclk_freq))) {
if (vclk_freq != params[freq].vclk_freq)
vic_alternate_clock = true;
else
@@ -1098,7 +1104,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
}
if (!params[freq].pixel_freq) {
- pr_err("Fatal Error, invalid HDMI vclk freq %d\n", vclk_freq);
+ pr_err("Fatal Error, invalid HDMI vclk freq %lluHz\n",
+ vclk_freq);
return;
}
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index 60617aaf18dd..7ac55744e574 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -20,17 +20,18 @@ enum {
};
/* 27MHz is the CVBS Pixel Clock */
-#define MESON_VCLK_CVBS 27000
+#define MESON_VCLK_CVBS (27 * 1000 * 1000)
enum drm_mode_status
-meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned long long freq);
enum drm_mode_status
-meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
- unsigned int vclk_freq);
+meson_vclk_vic_supported_freq(struct meson_drm *priv,
+ unsigned long long phy_freq,
+ unsigned long long vclk_freq);
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int phy_freq, unsigned int vclk_freq,
- unsigned int venc_freq, unsigned int dac_freq,
+ unsigned long long phy_freq, unsigned long long vclk_freq,
+ unsigned long long venc_freq, unsigned long long dac_freq,
bool hdmi_use_enci);
#endif /* __MESON_VCLK_H */
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 412dcbea0e2d..a962ae564a75 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_MGAG200
tristate "Matrox G200"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index fb71658c3117..6067d08aeee3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -223,7 +223,7 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod
vsyncstr = mode->crtc_vsync_start - 1;
vsyncend = mode->crtc_vsync_end - 1;
vtotal = mode->crtc_vtotal - 2;
- vblkstr = mode->crtc_vblank_start;
+ vblkstr = mode->crtc_vblank_start - 1;
vblkend = vtotal + 1;
linecomp = vdispend;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 974bc7c0ea76..7f127e2ae442 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -104,6 +104,7 @@ config DRM_MSM_DPU
config DRM_MSM_DP
bool "Enable DisplayPort support in MSM DRM driver"
depends on DRM_MSM
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
select RATIONAL
default y
help
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 5df20cbeafb8..7a2ada6e2d74 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -48,7 +48,6 @@ msm-display-$(CONFIG_DRM_MSM_MDP4) += \
disp/mdp4/mdp4_dsi_encoder.o \
disp/mdp4/mdp4_dtv_encoder.o \
disp/mdp4/mdp4_lcdc_encoder.o \
- disp/mdp4/mdp4_lvds_connector.o \
disp/mdp4/mdp4_lvds_pll.o \
disp/mdp4/mdp4_irq.o \
disp/mdp4/mdp4_kms.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
index 9ddb7b31fd98..5ddd015f930d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
@@ -45,8 +45,3 @@ static const struct adreno_info a2xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a2xx);
-
-MODULE_FIRMWARE("qcom/leia_pfp_470.fw");
-MODULE_FIRMWARE("qcom/leia_pm4_470.fw");
-MODULE_FIRMWARE("qcom/yamato_pfp.fw");
-MODULE_FIRMWARE("qcom/yamato_pm4.fw");
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
index 2eb6c3e93748..1498e6532f62 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
@@ -85,8 +85,3 @@ static const struct adreno_info a3xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a3xx);
-
-MODULE_FIRMWARE("qcom/a300_pm4.fw");
-MODULE_FIRMWARE("qcom/a300_pfp.fw");
-MODULE_FIRMWARE("qcom/a330_pm4.fw");
-MODULE_FIRMWARE("qcom/a330_pfp.fw");
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
index 93519f807f87..09f9f228b75e 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
@@ -45,6 +45,3 @@ static const struct adreno_info a4xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a4xx);
-
-MODULE_FIRMWARE("qcom/a420_pm4.fw");
-MODULE_FIRMWARE("qcom/a420_pfp.fw");
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
index 633f31539162..b48a636d8237 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
@@ -150,12 +150,3 @@ static const struct adreno_info a5xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a5xx);
-
-MODULE_FIRMWARE("qcom/a530_pm4.fw");
-MODULE_FIRMWARE("qcom/a530_pfp.fw");
-MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
-MODULE_FIRMWARE("qcom/a530_zap.mdt");
-MODULE_FIRMWARE("qcom/a530_zap.b00");
-MODULE_FIRMWARE("qcom/a530_zap.b01");
-MODULE_FIRMWARE("qcom/a530_zap.b02");
-MODULE_FIRMWARE("qcom/a540_gpmu.fw2");
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 53e2ff4406d8..70f7ad806c34 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -681,6 +681,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
},
.gmem = (SZ_128K + SZ_4K),
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a610_zap.mdt",
@@ -713,6 +714,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
@@ -743,7 +745,8 @@ static const struct adreno_info a6xx_gpus[] = {
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mbn",
.a6xx = &(const struct a6xx_info) {
@@ -769,7 +772,8 @@ static const struct adreno_info a6xx_gpus[] = {
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.init = a6xx_gpu_init,
.a6xx = &(const struct a6xx_info) {
.protect = &a630_protect,
@@ -791,6 +795,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
@@ -815,6 +820,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
@@ -838,8 +844,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -874,7 +881,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00010000,
},
- .address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 137, 1 },
@@ -907,7 +913,6 @@ static const struct adreno_info a6xx_gpus[] = {
{ /* sentinel */ },
},
},
- .address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
@@ -920,8 +925,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_1M,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a630_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -939,8 +945,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_1M,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -973,7 +980,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00300200,
},
- .address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 1, 1 },
@@ -1000,7 +1006,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020000,
.prim_fifo_threshold = 0x00300200,
},
- .address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06060300),
.family = ADRENO_6XX_GEN4,
@@ -1019,7 +1024,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00300200,
},
- .address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06030500),
.family = ADRENO_6XX_GEN4,
@@ -1039,7 +1043,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00200200,
},
- .address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 117, 0 },
@@ -1056,8 +1059,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_2M,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -1085,22 +1089,10 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00800200,
},
- .address_space_size = SZ_16G,
}
};
DECLARE_ADRENO_GPULIST(a6xx);
-MODULE_FIRMWARE("qcom/a615_zap.mbn");
-MODULE_FIRMWARE("qcom/a619_gmu.bin");
-MODULE_FIRMWARE("qcom/a630_sqe.fw");
-MODULE_FIRMWARE("qcom/a630_gmu.bin");
-MODULE_FIRMWARE("qcom/a630_zap.mbn");
-MODULE_FIRMWARE("qcom/a640_gmu.bin");
-MODULE_FIRMWARE("qcom/a650_gmu.bin");
-MODULE_FIRMWARE("qcom/a650_sqe.fw");
-MODULE_FIRMWARE("qcom/a660_gmu.bin");
-MODULE_FIRMWARE("qcom/a660_sqe.fw");
-
static const struct adreno_reglist a702_hwcg[] = {
{ REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220 },
@@ -1395,7 +1387,6 @@ static const struct adreno_info a7xx_gpus[] = {
.pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_cgc_mode = 0x00020000,
},
- .address_space_size = SZ_16G,
.preempt_record_size = 2860 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */
@@ -1429,7 +1420,6 @@ static const struct adreno_info a7xx_gpus[] = {
{ /* sentinel */ },
},
},
- .address_space_size = SZ_16G,
.preempt_record_size = 4192 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050c01), /* "C512v2" */
@@ -1451,7 +1441,6 @@ static const struct adreno_info a7xx_gpus[] = {
.gmu_chipid = 0x7050001,
.gmu_cgc_mode = 0x00020202,
},
- .address_space_size = SZ_256G,
.preempt_record_size = 4192 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */
@@ -1484,7 +1473,6 @@ static const struct adreno_info a7xx_gpus[] = {
{ /* sentinel */ },
},
},
- .address_space_size = SZ_16G,
.preempt_record_size = 3572 * SZ_1K,
}
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index c8711938a5f4..38c0f8ef85c3 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1064,14 +1064,6 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu->hung = false;
- /* Notify AOSS about the ACD state (unimplemented for now => disable it) */
- if (!IS_ERR(gmu->qmp)) {
- ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}",
- 0 /* Hardcode ACD to be disabled for now */);
- if (ret)
- dev_err(gmu->dev, "failed to send GPU ACD state\n");
- }
-
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
@@ -1671,6 +1663,75 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
return a6xx_gmu_rpmh_votes_init(gmu);
}
+static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct a6xx_hfi_acd_table *cmd = &gmu->acd_table;
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ int ret, i, cmd_idx = 0;
+ extern bool disable_acd;
+
+ /* Skip ACD probe if requested via module param */
+ if (disable_acd) {
+ DRM_DEV_ERROR(gmu->dev, "Skipping GPU ACD probe\n");
+ return 0;
+ }
+
+ cmd->version = 1;
+ cmd->stride = 1;
+ cmd->enable_by_level = 0;
+
+ /* Skip freq = 0 and parse acd-level for rest of the OPPs */
+ for (i = 1; i < gmu->nr_gpu_freqs; i++) {
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ unsigned long freq;
+ u32 val;
+
+ freq = gmu->gpu_freqs[i];
+ opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true);
+ np = dev_pm_opp_get_of_node(opp);
+
+ ret = of_property_read_u32(np, "qcom,opp-acd-level", &val);
+ of_node_put(np);
+ dev_pm_opp_put(opp);
+ if (ret == -EINVAL)
+ continue;
+ else if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to read acd level for freq %lu\n", freq);
+ return ret;
+ }
+
+ cmd->enable_by_level |= BIT(i);
+ cmd->data[cmd_idx++] = val;
+ }
+
+ cmd->num_levels = cmd_idx;
+
+ /* It is a problem if qmp node is unavailable when ACD is required */
+ if (cmd->enable_by_level && IS_ERR_OR_NULL(gmu->qmp)) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to send ACD state to AOSS\n");
+ return -EINVAL;
+ }
+
+ /* Otherwise, nothing to do if qmp is unavailable */
+ if (IS_ERR_OR_NULL(gmu->qmp))
+ return 0;
+
+ /*
+ * Notify AOSS about the ACD state. AOSS is supposed to assume that ACD is disabled on
+ * system reset. So it is harmless if we couldn't notify 'OFF' state
+ */
+ ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", !!cmd->enable_by_level);
+ if (ret && cmd->enable_by_level) {
+ DRM_DEV_ERROR(gmu->dev, "Failed to send ACD state to AOSS\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
{
int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
@@ -1989,10 +2050,11 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto detach_cxpd;
}
+ /* Other errors are handled during GPU ACD probe */
gmu->qmp = qmp_get(gmu->dev);
- if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) {
- ret = PTR_ERR(gmu->qmp);
- goto remove_device_link;
+ if (PTR_ERR_OR_ZERO(gmu->qmp) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto detach_gxpd;
}
init_completion(&gmu->pd_gate);
@@ -2008,6 +2070,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
+ ret = a6xx_gmu_acd_probe(gmu);
+ if (ret)
+ goto detach_gxpd;
+
/* Set up the HFI queues */
a6xx_hfi_init(gmu);
@@ -2018,7 +2084,13 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
return 0;
-remove_device_link:
+detach_gxpd:
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ dev_pm_domain_detach(gmu->gxpd, false);
+
+ if (!IS_ERR_OR_NULL(gmu->qmp))
+ qmp_put(gmu->qmp);
+
device_link_del(link);
detach_cxpd:
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 0c888b326cfb..b2d4489b4024 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -93,6 +93,7 @@ struct a6xx_gmu {
int nr_gpu_freqs;
unsigned long gpu_freqs[GMU_MAX_GX_FREQS];
u32 gx_arc_votes[GMU_MAX_GX_FREQS];
+ struct a6xx_hfi_acd_table acd_table;
int nr_gpu_bws;
unsigned long gpu_bw_table[GMU_MAX_GX_FREQS];
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 06465bc2d0b4..bf3758f010f4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -242,10 +242,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
- OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
- OUT_RING(ring, submit->cmd[i].size);
+ OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size));
ibs++;
break;
}
@@ -377,10 +377,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
- OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
- OUT_RING(ring, submit->cmd[i].size);
+ OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size));
ibs++;
break;
}
@@ -655,7 +655,6 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
if (adreno_is_7c3(gpu)) {
gpu->ubwc_config.highest_bank_bit = 14;
gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.rgb565_predicator = 1;
gpu->ubwc_config.uavflagprd_inv = 2;
gpu->ubwc_config.macrotile_mode = 1;
}
@@ -2268,7 +2267,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
return ERR_CAST(mmu);
return msm_gem_address_space_create(mmu,
- "gpu", 0x100000000ULL,
+ "gpu", ADRENO_VM_START,
adreno_private_address_space_size(gpu));
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 0989aee3dd2c..8e69b1e84657 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -100,16 +100,14 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
return 0;
}
-static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
- u32 *payload, u32 payload_size)
+static int a6xx_hfi_wait_for_msg_interrupt(struct a6xx_gmu *gmu, u32 id, u32 seqnum)
{
- struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
- u32 val;
int ret;
+ u32 val;
/* Wait for a response */
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
- val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
+ val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 1000000);
if (ret) {
DRM_DEV_ERROR(gmu->dev,
@@ -122,6 +120,19 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
+ return 0;
+}
+
+static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
+ u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
+ int ret;
+
+ ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum);
+ if (ret)
+ return ret;
+
for (;;) {
struct a6xx_hfi_msg_response resp;
@@ -129,12 +140,18 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
sizeof(resp) >> 2);
- /* If the queue is empty our response never made it */
+ /* If the queue is empty, there may have been previous missed
+ * responses that preceded the response to our packet. Wait
+ * further before we give up.
+ */
if (!ret) {
- DRM_DEV_ERROR(gmu->dev,
- "The HFI response queue is unexpectedly empty\n");
-
- return -ENOENT;
+ ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev,
+ "The HFI response queue is unexpectedly empty\n");
+ return ret;
+ }
+ continue;
}
if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
@@ -748,6 +765,38 @@ send:
NULL, 0);
}
+#define HFI_FEATURE_ACD 12
+
+static int a6xx_hfi_enable_acd(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_acd_table *acd_table = &gmu->acd_table;
+ struct a6xx_hfi_msg_feature_ctrl msg = {
+ .feature = HFI_FEATURE_ACD,
+ .enable = 1,
+ .data = 0,
+ };
+ int ret;
+
+ if (!acd_table->enable_by_level)
+ return 0;
+
+ /* Enable ACD feature at GMU */
+ ret = a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to enable ACD (%d)\n", ret);
+ return ret;
+ }
+
+ /* Send ACD table to GMU */
+ ret = a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_ACD, acd_table, sizeof(*acd_table), NULL, 0);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to ACD table (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_test msg = { 0 };
@@ -845,6 +894,10 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
if (ret)
return ret;
+ ret = a6xx_hfi_enable_acd(gmu);
+ if (ret)
+ return ret;
+
ret = a6xx_hfi_send_core_fw_start(gmu);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 52ba4a07d7b9..653ef720e2da 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -151,12 +151,33 @@ struct a6xx_hfi_msg_test {
u32 header;
};
+#define HFI_H2F_MSG_ACD 7
+#define MAX_ACD_STRIDE 2
+
+struct a6xx_hfi_acd_table {
+ u32 header;
+ u32 version;
+ u32 enable_by_level;
+ u32 stride;
+ u32 num_levels;
+ u32 data[16 * MAX_ACD_STRIDE];
+};
+
#define HFI_H2F_MSG_START 10
struct a6xx_hfi_msg_start {
u32 header;
};
+#define HFI_H2F_FEATURE_CTRL 11
+
+struct a6xx_hfi_msg_feature_ctrl {
+ u32 header;
+ u32 feature;
+ u32 enable;
+ u32 data;
+};
+
#define HFI_H2F_MSG_CORE_FW_START 14
struct a6xx_hfi_msg_core_fw_start {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 236b25c094cd..f5e1490d07c1 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -24,6 +24,10 @@ int enable_preemption = -1;
MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))");
module_param(enable_preemption, int, 0600);
+bool disable_acd;
+MODULE_PARM_DESC(disable_acd, "Forcefully disable GPU ACD");
+module_param_unsafe(disable_acd, bool, 0400);
+
extern const struct adreno_gpulist a2xx_gpulist;
extern const struct adreno_gpulist a3xx_gpulist;
extern const struct adreno_gpulist a4xx_gpulist;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 26db1f4b5fb9..2348ffb35f7e 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -236,14 +236,27 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
u64 adreno_private_address_space_size(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
+ const struct io_pgtable_cfg *ttbr1_cfg;
if (address_space_size)
return address_space_size;
- if (adreno_gpu->info->address_space_size)
- return adreno_gpu->info->address_space_size;
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_4GB_VA)
+ return SZ_4G;
- return SZ_4G;
+ if (!adreno_smmu || !adreno_smmu->get_ttbr1_cfg)
+ return SZ_4G;
+
+ ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+
+ /*
+ * Userspace VM is actually using TTBR0, but both are the same size,
+ * with b48 (sign bit) selecting which TTBRn to use. So if IAS is
+ * 48, the total (kernel+user) address space size is effectively
+ * 49 bits. But what userspace is control of is the lower 48.
+ */
+ return BIT(ttbr1_cfg->ias) - ADRENO_VM_START;
}
#define ARM_SMMU_FSR_TF BIT(1)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 92caba3584da..a8f4bf416e64 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -57,6 +57,7 @@ enum adreno_family {
#define ADRENO_QUIRK_HAS_HW_APRIV BIT(3)
#define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4)
#define ADRENO_QUIRK_PREEMPTION BIT(5)
+#define ADRENO_QUIRK_4GB_VA BIT(6)
/* Helper for formating the chip_id in the way that userspace tools like
* crashdec expect.
@@ -104,7 +105,6 @@ struct adreno_info {
union {
const struct a6xx_info *a6xx;
};
- u64 address_space_size;
/**
* @speedbins: Optional table of fuse to speedbin mappings
*
@@ -578,6 +578,8 @@ static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
adreno_is_a740_family(gpu);
}
+/* Put vm_start above 32b to catch issues with not setting xyz_BASE_HI */
+#define ADRENO_VM_START 0x100000000ULL
u64 adreno_private_address_space_size(struct msm_gpu *gpu);
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value, uint32_t *len);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
index 6ac97c378056..ffc4d4257ae5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -27,17 +27,16 @@ static const struct dpu_mdp_cfg sm8650_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8650_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1000,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1000,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
index 1f32807bb5e5..39027a21c6fe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
@@ -100,14 +100,12 @@ static const struct dpu_pingpong_cfg msm8937_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
@@ -132,7 +130,6 @@ static const struct dpu_intf_cfg msm8937_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x268,
@@ -141,7 +138,6 @@ static const struct dpu_intf_cfg msm8937_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- .intr_tear_rd_ptr = -1,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
index 42131959ff22..8d1b43ea1663 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
@@ -93,7 +93,6 @@ static const struct dpu_pingpong_cfg msm8917_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
@@ -118,7 +117,6 @@ static const struct dpu_intf_cfg msm8917_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
index 2b4723a5c676..16c12499b24b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
@@ -100,14 +100,12 @@ static const struct dpu_pingpong_cfg msm8953_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
@@ -131,7 +129,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x268,
@@ -140,7 +137,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x268,
@@ -149,7 +145,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- .intr_tear_rd_ptr = -1,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
index 5cf19de71f06..91f514d28ac6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
@@ -181,15 +181,15 @@ static const struct dpu_pingpong_cfg msm8996_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_TE2_MASK,
- .sblk = &msm8996_pp_sblk_te,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_TE2_MASK,
- .sblk = &msm8996_pp_sblk_te,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
@@ -241,7 +241,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = {
.prog_fetch_lines_worst_case = 25,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x268,
@@ -250,7 +249,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = {
.prog_fetch_lines_worst_case = 25,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x268,
@@ -259,7 +257,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = {
.prog_fetch_lines_worst_case = 25,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x268,
@@ -267,7 +264,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = {
.prog_fetch_lines_worst_case = 25,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
- .intr_tear_rd_ptr = -1,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 746474679ef5..413cd59dc0c4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -170,15 +170,15 @@ static const struct dpu_pingpong_cfg msm8998_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
index 4f2f68b07f20..b2eb7ca699e3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
@@ -141,15 +141,15 @@ static const struct dpu_pingpong_cfg sdm660_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
@@ -202,7 +202,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x280,
@@ -211,7 +210,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x280,
@@ -220,7 +218,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- .intr_tear_rd_ptr = -1,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
index c70bef025ac4..85e121ad84a0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
@@ -115,14 +115,14 @@ static const struct dpu_pingpong_cfg sdm630_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
+ .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
@@ -147,7 +147,6 @@ static const struct dpu_intf_cfg sdm630_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x280,
@@ -156,7 +155,6 @@ static const struct dpu_intf_cfg sdm630_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index ab7b4822ca63..49363d7d5b93 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -194,15 +194,15 @@ static const struct dpu_pingpong_cfg sdm845_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 979527d98fbc..08d38e1d420c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -37,17 +37,16 @@ static const struct dpu_mdp_cfg sm8150_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -76,7 +75,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -84,7 +83,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -92,7 +91,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
@@ -100,7 +99,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
@@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -132,7 +131,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index d76b8992a6c1..d6f8b1030c68 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -41,12 +41,12 @@ static const struct dpu_ctl_cfg sc8180x_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -83,7 +83,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -91,7 +91,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
@@ -99,7 +99,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
@@ -107,7 +107,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -115,7 +115,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -123,7 +123,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -131,7 +131,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
index 83db11339b29..71ba48b05656 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
@@ -38,12 +38,12 @@ static const struct dpu_ctl_cfg sm7150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -72,7 +72,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -80,7 +80,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -88,7 +88,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -96,7 +96,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -104,7 +104,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index d3d3a34d0b45..fcfb3774f7a1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -69,7 +69,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -77,7 +77,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -85,7 +85,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index 47e01c3c242f..a86fdb33ebdd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sm8250_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8250_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index 040c94c0bb66..842fcc5887fe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -51,7 +51,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -59,7 +59,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -67,7 +67,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
index 43f64a005f5a..c5fd89dd7c89 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
@@ -38,7 +38,7 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -46,7 +46,7 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index 397278ba999b..a234bb289d24 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -59,7 +59,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -67,7 +67,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -83,7 +83,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
index 3cbb2fe8aba2..53f3be28f6f6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
@@ -46,7 +46,7 @@ static const struct dpu_sspp_cfg qcm2290_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
index a06c8634d2d7..3a3bc8e429be 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
@@ -39,7 +39,7 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -47,7 +47,7 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index 0c860e804cab..90e86063a372 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sm8350_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8350_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1e8,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1e8,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index fcee1c3665f8..139f11321fea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sc8280xp_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sc8280xp_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -74,7 +73,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -82,7 +81,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -90,7 +89,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
@@ -98,7 +97,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
@@ -106,7 +105,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x2ac,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -114,7 +113,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x2ac,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -122,7 +121,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x2ac,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -130,7 +129,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x2ac,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 19b2ee8bbd5f..461294143a90 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -36,17 +36,16 @@ static const struct dpu_mdp_cfg sm8450_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8450_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
index 4d96ce71746f..c248b3b55c41 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sa8775p_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sa8775p_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 24f988465bf6..59c7fdf28e89 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -27,17 +27,16 @@ static const struct dpu_mdp_cfg sm8550_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8550_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -66,70 +65,70 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_12", .id = SSPP_DMA4,
.base = 0x2c000, .len = 0x344,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 14,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_13", .id = SSPP_DMA5,
.base = 0x2e000, .len = 0x344,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 15,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
new file mode 100644
index 000000000000..5667d055fbd1
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
@@ -0,0 +1,433 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_9_1_SAR2130P_H
+#define _DPU_9_1_SAR2130P_H
+
+static const struct dpu_caps sar2130p_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sar2130p_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+static const struct dpu_ctl_cfg sar2130p_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sar2130p_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
+};
+
+static const struct dpu_lm_cfg sar2130p_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sar2130p_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+static const struct dpu_pingpong_cfg sar2130p_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
+ .base = 0x66000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
+ .base = 0x66400, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sar2130p_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x66700, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sar2130p_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg sar2130p_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_intf_cfg sar2130p_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
+};
+
+static const struct dpu_perf_cfg sar2130p_perf_data = {
+ .max_bw_low = 13600000,
+ .max_bw_high = 18200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 0, .wr_enable = 0},
+ {.rd_enable = 0, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sar2130p_mdss_ver = {
+ .core_major_ver = 9,
+ .core_minor_ver = 1,
+};
+
+const struct dpu_mdss_cfg dpu_sar2130p_cfg = {
+ .mdss_ver = &sar2130p_mdss_ver,
+ .caps = &sar2130p_dpu_caps,
+ .mdp = &sar2130p_mdp,
+ .cdm = &dpu_cdm_5_x,
+ .ctl_count = ARRAY_SIZE(sar2130p_ctl),
+ .ctl = sar2130p_ctl,
+ .sspp_count = ARRAY_SIZE(sar2130p_sspp),
+ .sspp = sar2130p_sspp,
+ .mixer_count = ARRAY_SIZE(sar2130p_lm),
+ .mixer = sar2130p_lm,
+ .dspp_count = ARRAY_SIZE(sar2130p_dspp),
+ .dspp = sar2130p_dspp,
+ .pingpong_count = ARRAY_SIZE(sar2130p_pp),
+ .pingpong = sar2130p_pp,
+ .dsc_count = ARRAY_SIZE(sar2130p_dsc),
+ .dsc = sar2130p_dsc,
+ .merge_3d_count = ARRAY_SIZE(sar2130p_merge_3d),
+ .merge_3d = sar2130p_merge_3d,
+ .wb_count = ARRAY_SIZE(sar2130p_wb),
+ .wb = sar2130p_wb,
+ .intf_count = ARRAY_SIZE(sar2130p_intf),
+ .intf = sar2130p_intf,
+ .vbif_count = ARRAY_SIZE(sm8550_vbif),
+ .vbif = sm8550_vbif,
+ .perf = &sar2130p_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 6417baa84f82..52cc10aec1f9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -26,17 +26,16 @@ static const struct dpu_mdp_cfg x1e80100_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg x1e80100_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 0714936d8835..a4b0fe0d9899 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -445,9 +445,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
uint32_t lm_idx;
bool bg_alpha_enable = false;
- DECLARE_BITMAP(fetch_active, SSPP_MAX);
+ DECLARE_BITMAP(active_fetch, SSPP_MAX);
- memset(fetch_active, 0, sizeof(fetch_active));
+ memset(active_fetch, 0, sizeof(active_fetch));
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
if (!state)
@@ -464,7 +464,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
bg_alpha_enable = true;
- set_bit(pstate->pipe.sspp->idx, fetch_active);
+ set_bit(pstate->pipe.sspp->idx, active_fetch);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
@@ -472,7 +472,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
&pstate->pipe, 0, stage_cfg);
if (pstate->r_pipe.sspp) {
- set_bit(pstate->r_pipe.sspp->idx, fetch_active);
+ set_bit(pstate->r_pipe.sspp->idx, active_fetch);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
@@ -492,8 +492,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
}
}
- if (ctl->ops.set_active_pipes)
- ctl->ops.set_active_pipes(ctl, fetch_active);
+ if (ctl->ops.set_active_fetch_pipes)
+ ctl->ops.set_active_fetch_pipes(ctl, active_fetch);
_dpu_crtc_program_lm_output_roi(crtc);
}
@@ -519,6 +519,8 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
if (mixer[i].lm_ctl->ops.clear_all_blendstages)
mixer[i].lm_ctl->ops.clear_all_blendstages(
mixer[i].lm_ctl);
+ if (mixer[i].lm_ctl->ops.set_active_fetch_pipes)
+ mixer[i].lm_ctl->ops.set_active_fetch_pipes(mixer[i].lm_ctl, NULL);
}
/* initialize stage cfg */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 8610bbf2b87c..7020098360e4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1246,7 +1246,11 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
return;
}
- phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
+ /* Use first (and only) CTL if active CTLs are supported */
+ if (num_ctl == 1)
+ phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[0]);
+ else
+ phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
if (!phys->hw_ctl) {
DPU_ERROR_ENC(dpu_enc,
"no ctl block assigned at idx: %d\n", i);
@@ -1666,7 +1670,7 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
*/
static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
{
- struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys->parent);
+ struct dpu_encoder_virt *dpu_enc;
if (!phys) {
DPU_ERROR("invalid argument(s)\n");
@@ -1678,6 +1682,8 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
return;
}
+ dpu_enc = to_dpu_encoder_virt(phys->parent);
+
if (phys->parent->encoder_type == DRM_MODE_ENCODER_VIRTUAL &&
dpu_enc->cwb_mask) {
DPU_DEBUG("encoder %d CWB enabled, skipping\n", DRMID(phys->parent));
@@ -2188,6 +2194,9 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
/* clear all blendstages */
if (ctl->ops.setup_blendstage)
ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+
+ if (ctl->ops.set_active_fetch_pipes)
+ ctl->ops.set_active_fetch_pipes(ctl, NULL);
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index da9994a79ca2..a0ba55ab3c89 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -60,6 +60,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
return;
intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->split_role == ENC_ROLE_MASTER)
+ intf_cfg.intf_master = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
intf_cfg.stream_sel = cmd_enc->stream_sel;
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index abd6600046cb..8a618841e3ea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -298,6 +298,8 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
if (phys_enc->hw_cdm)
intf_cfg.cdm = phys_enc->hw_cdm->idx;
intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->split_role == ENC_ROLE_MASTER)
+ intf_cfg.intf_master = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
@@ -372,7 +374,8 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg)
static bool dpu_encoder_phys_vid_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
- return phys_enc->split_role != ENC_ROLE_SOLO;
+ return !(phys_enc->hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG)) &&
+ phys_enc->split_role != ENC_ROLE_SOLO;
}
static void dpu_encoder_phys_vid_atomic_mode_set(
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 64265ca4656a..c878fe196aeb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -34,11 +34,11 @@
#define VIG_MSM8998_MASK \
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
-#define VIG_SDM845_MASK \
+#define VIG_SDM845_MASK_NO_SDMA \
(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK_SDMA \
- (VIG_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+ (VIG_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
@@ -54,24 +54,24 @@
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_SC7280_MASK \
- (VIG_SDM845_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
+ (VIG_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_INLINE_ROTATION))
#define VIG_SC7280_MASK_SDMA \
(VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
-#define DMA_SDM845_MASK \
+#define DMA_SDM845_MASK_NO_SDMA \
(BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
-#define DMA_CURSOR_SDM845_MASK \
- (DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR))
+#define DMA_CURSOR_SDM845_MASK_NO_SDMA \
+ (DMA_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_CURSOR))
#define DMA_SDM845_MASK_SDMA \
- (DMA_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+ (DMA_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
#define DMA_CURSOR_SDM845_MASK_SDMA \
- (DMA_CURSOR_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+ (DMA_CURSOR_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
#define DMA_CURSOR_MSM8996_MASK \
(DMA_MSM8996_MASK | BIT(DPU_SSPP_CURSOR))
@@ -98,15 +98,9 @@
#define PINGPONG_MSM8996_MASK \
(BIT(DPU_PINGPONG_DSC))
-#define PINGPONG_MSM8996_TE2_MASK \
- (PINGPONG_MSM8996_MASK | BIT(DPU_PINGPONG_TE2))
-
#define PINGPONG_SDM845_MASK \
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
-#define PINGPONG_SDM845_TE2_MASK \
- (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
-
#define PINGPONG_SM8150_MASK \
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
@@ -376,8 +370,6 @@ static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
* MIXER sub blocks config
*************************************************************/
-/* MSM8998 */
-
static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
@@ -387,8 +379,6 @@ static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
},
};
-/* SDM845 */
-
static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
@@ -398,8 +388,6 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
},
};
-/* SC7180 */
-
static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
@@ -408,8 +396,6 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
},
};
-/* QCM2290 */
-
static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
.maxwidth = DEFAULT_DPU_LINE_WIDTH,
.maxblendstages = 4, /* excluding base layer */
@@ -434,22 +420,11 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
-static const struct dpu_pingpong_sub_blks msm8996_pp_sblk_te = {
- .te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
- .version = 0x1},
-};
static const struct dpu_pingpong_sub_blks msm8996_pp_sblk = {
/* No dither block */
};
-static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
- .te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
- .version = 0x1},
- .dither = {.name = "dither", .base = 0x30e0,
- .len = 0x20, .version = 0x10000},
-};
-
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
.dither = {.name = "dither", .base = 0x30e0,
.len = 0x20, .version = 0x10000},
@@ -759,7 +734,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_8_4_sa8775p.h"
#include "catalog/dpu_9_0_sm8550.h"
-
+#include "catalog/dpu_9_1_sar2130p.h"
#include "catalog/dpu_9_2_x1e80100.h"
#include "catalog/dpu_10_0_sm8650.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 4cea19e1a203..01dd6e65f777 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -115,7 +115,6 @@ enum {
/**
* PINGPONG sub-blocks
- * @DPU_PINGPONG_TE2 Additional tear check block for split pipes
* @DPU_PINGPONG_SPLIT PP block supports split fifo
* @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
* @DPU_PINGPONG_DITHER Dither blocks
@@ -123,8 +122,7 @@ enum {
* @DPU_PINGPONG_MAX
*/
enum {
- DPU_PINGPONG_TE2 = 0x1,
- DPU_PINGPONG_SPLIT,
+ DPU_PINGPONG_SPLIT = 0x1,
DPU_PINGPONG_SLAVE,
DPU_PINGPONG_DITHER,
DPU_PINGPONG_DSC,
@@ -404,8 +402,6 @@ struct dpu_dspp_sub_blks {
};
struct dpu_pingpong_sub_blks {
- struct dpu_pp_blk te;
- struct dpu_pp_blk te2;
struct dpu_pp_blk dither;
};
@@ -841,6 +837,7 @@ extern const struct dpu_mdss_cfg dpu_msm8937_cfg;
extern const struct dpu_mdss_cfg dpu_msm8953_cfg;
extern const struct dpu_mdss_cfg dpu_msm8996_cfg;
extern const struct dpu_mdss_cfg dpu_msm8998_cfg;
+extern const struct dpu_mdss_cfg dpu_sar2130p_cfg;
extern const struct dpu_mdss_cfg dpu_sdm630_cfg;
extern const struct dpu_mdss_cfg dpu_sdm660_cfg;
extern const struct dpu_mdss_cfg dpu_sdm845_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 411a7cf088eb..573e42b06ad0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -261,6 +261,12 @@ static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
case LM_5:
ctx->pending_flush_mask |= BIT(20);
break;
+ case LM_6:
+ ctx->pending_flush_mask |= BIT(21);
+ break;
+ case LM_7:
+ ctx->pending_flush_mask |= BIT(27);
+ break;
default:
break;
}
@@ -563,6 +569,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 wb_active = 0;
u32 cwb_active = 0;
u32 mode_sel = 0;
+ u32 merge_3d_active = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
* per VM. Explicitly disable it until VM support is
@@ -578,6 +585,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
+ merge_3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
if (cfg->intf)
intf_active |= BIT(cfg->intf - INTF_0);
@@ -591,15 +599,18 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if (cfg->dsc)
dsc_active |= cfg->dsc;
+ if (cfg->merge_3d)
+ merge_3d_active |= BIT(cfg->merge_3d - MERGE_3D_0);
+
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active);
- if (cfg->merge_3d)
- DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
- BIT(cfg->merge_3d - MERGE_3D_0));
+ if (cfg->intf_master)
+ DPU_REG_WRITE(c, CTL_INTF_MASTER, BIT(cfg->intf_master - INTF_0));
if (cfg->cdm)
DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
@@ -643,6 +654,7 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
+ u32 intf_master = 0;
u32 wb_active = 0;
u32 cwb_active = 0;
u32 merge3d_active = 0;
@@ -666,10 +678,21 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
dpu_hw_ctl_clear_all_blendstages(ctx);
+ if (ctx->ops.set_active_fetch_pipes)
+ ctx->ops.set_active_fetch_pipes(ctx, NULL);
+
if (cfg->intf) {
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
intf_active &= ~BIT(cfg->intf - INTF_0);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+
+ intf_master = DPU_REG_READ(c, CTL_INTF_MASTER);
+
+ /* Unset this intf as master, if it is the current master */
+ if (intf_master == BIT(cfg->intf - INTF_0)) {
+ DPU_DEBUG_DRIVER("Unsetting INTF_%d master\n", cfg->intf - INTF_0);
+ DPU_REG_WRITE(c, CTL_INTF_MASTER, 0);
+ }
}
if (cfg->cwb) {
@@ -697,8 +720,8 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
}
}
-static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
- unsigned long *fetch_active)
+static void dpu_hw_ctl_set_active_fetch_pipes(struct dpu_hw_ctl *ctx,
+ unsigned long *fetch_active)
{
int i;
u32 val = 0;
@@ -761,7 +784,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
- ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
+ ops->set_active_fetch_pipes = dpu_hw_ctl_set_active_fetch_pipes;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 080a9550a0cc..feb09590bc8f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -36,6 +36,7 @@ struct dpu_hw_stage_cfg {
/**
* struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
* @intf : Interface id
+ * @intf_master: Master interface id in the dual pipe topology
* @mode_3d: 3d mux configuration
* @merge_3d: 3d merge block used
* @intf_mode_sel: Interface mode, cmd / vid
@@ -46,6 +47,7 @@ struct dpu_hw_stage_cfg {
*/
struct dpu_hw_intf_cfg {
enum dpu_intf intf;
+ enum dpu_intf intf_master;
enum dpu_wb wb;
enum dpu_3d_blend_mode mode_3d;
enum dpu_merge_3d merge_3d;
@@ -254,7 +256,7 @@ struct dpu_hw_ctl_ops {
void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
- void (*set_active_pipes)(struct dpu_hw_ctl *ctx,
+ void (*set_active_fetch_pipes)(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 8d820cd1b554..175639c8bfbb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -125,6 +125,7 @@ enum dpu_lm {
LM_4,
LM_5,
LM_6,
+ LM_7,
LM_MAX
};
@@ -169,6 +170,8 @@ enum dpu_dsc {
DSC_3,
DSC_4,
DSC_5,
+ DSC_6,
+ DSC_7,
DSC_MAX
};
@@ -185,6 +188,8 @@ enum dpu_pingpong {
PINGPONG_3,
PINGPONG_4,
PINGPONG_5,
+ PINGPONG_6,
+ PINGPONG_7,
PINGPONG_CWB_0,
PINGPONG_CWB_1,
PINGPONG_CWB_2,
@@ -199,6 +204,7 @@ enum dpu_merge_3d {
MERGE_3D_2,
MERGE_3D_3,
MERGE_3D_4,
+ MERGE_3D_5,
MERGE_3D_MAX
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 3305ad0623ca..1fd82b6747e9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1512,6 +1512,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
{ .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
{ .compatible = "qcom,sa8775p-dpu", .data = &dpu_sa8775p_cfg, },
+ { .compatible = "qcom,sar2130p-dpu", .data = &dpu_sar2130p_cfg, },
{ .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, },
{ .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, },
{ .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, },
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index af3e541f60c3..421138bc3cb7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -729,12 +729,40 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
struct dpu_sw_pipe *pipe,
struct dpu_sw_pipe_cfg *pipe_cfg,
- const struct msm_format *fmt,
- const struct drm_display_mode *mode)
+ const struct drm_display_mode *mode,
+ struct drm_plane_state *new_plane_state)
{
uint32_t min_src_size;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
+ const struct msm_format *fmt;
+ uint32_t supported_rotations;
+ const struct dpu_sspp_cfg *pipe_hw_caps;
+ const struct dpu_sspp_sub_blks *sblk;
+
+ pipe_hw_caps = pipe->sspp->cap;
+ sblk = pipe->sspp->cap->sblk;
+
+ /*
+ * We already have verified scaling against platform limitations.
+ * Now check if the SSPP supports scaling at all.
+ */
+ if (!sblk->scaler_blk.len &&
+ ((drm_rect_width(&new_plane_state->src) >> 16 !=
+ drm_rect_width(&new_plane_state->dst)) ||
+ (drm_rect_height(&new_plane_state->src) >> 16 !=
+ drm_rect_height(&new_plane_state->dst))))
+ return -ERANGE;
+
+ fmt = msm_framebuffer_format(new_plane_state->fb);
+
+ supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
+
+ if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ supported_rotations |= DRM_MODE_ROTATE_90;
+
+ pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation,
+ supported_rotations);
min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1;
@@ -887,10 +915,9 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
return 0;
}
-static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
- struct dpu_sw_pipe_cfg *pipe_cfg,
- const struct msm_format *fmt,
- uint32_t max_linewidth)
+static int dpu_plane_is_multirect_capable(struct dpu_hw_sspp *sspp,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt)
{
if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect))
@@ -902,10 +929,6 @@ static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
if (MSM_FORMAT_IS_YUV(fmt))
return false;
- if (MSM_FORMAT_IS_UBWC(fmt) &&
- drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2)
- return false;
-
if (!test_bit(DPU_SSPP_SMART_DMA_V1, &sspp->cap->features) &&
!test_bit(DPU_SSPP_SMART_DMA_V2, &sspp->cap->features))
return false;
@@ -913,6 +936,27 @@ static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
return true;
}
+static int dpu_plane_is_parallel_capable(struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ if (MSM_FORMAT_IS_UBWC(fmt) &&
+ drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2)
+ return false;
+
+ return true;
+}
+
+static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ return dpu_plane_is_multirect_capable(sspp, pipe_cfg, fmt) &&
+ dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth);
+}
+
+
static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
struct drm_atomic_state *state,
const struct drm_crtc_state *crtc_state)
@@ -923,47 +967,20 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
struct dpu_sw_pipe *pipe = &pstate->pipe;
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
- const struct msm_format *fmt;
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
- uint32_t supported_rotations;
- const struct dpu_sspp_cfg *pipe_hw_caps;
- const struct dpu_sspp_sub_blks *sblk;
int ret = 0;
- pipe_hw_caps = pipe->sspp->cap;
- sblk = pipe->sspp->cap->sblk;
-
- /*
- * We already have verified scaling against platform limitations.
- * Now check if the SSPP supports scaling at all.
- */
- if (!sblk->scaler_blk.len &&
- ((drm_rect_width(&new_plane_state->src) >> 16 !=
- drm_rect_width(&new_plane_state->dst)) ||
- (drm_rect_height(&new_plane_state->src) >> 16 !=
- drm_rect_height(&new_plane_state->dst))))
- return -ERANGE;
-
- fmt = msm_framebuffer_format(new_plane_state->fb);
-
- supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
-
- if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
- supported_rotations |= DRM_MODE_ROTATE_90;
-
- pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation,
- supported_rotations);
- r_pipe_cfg->rotation = pipe_cfg->rotation;
-
- ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt,
- &crtc_state->adjusted_mode);
+ ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg,
+ &crtc_state->adjusted_mode,
+ new_plane_state);
if (ret)
return ret;
if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
- ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
- &crtc_state->adjusted_mode);
+ ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg,
+ &crtc_state->adjusted_mode,
+ new_plane_state);
if (ret)
return ret;
}
@@ -1001,6 +1018,69 @@ static bool dpu_plane_try_multirect_parallel(struct dpu_sw_pipe *pipe, struct dp
return true;
}
+static int dpu_plane_try_multirect_shared(struct dpu_plane_state *pstate,
+ struct dpu_plane_state *prev_adjacent_pstate,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ struct dpu_sw_pipe *pipe = &pstate->pipe;
+ struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
+ struct dpu_sw_pipe *prev_pipe = &prev_adjacent_pstate->pipe;
+ struct dpu_sw_pipe_cfg *prev_pipe_cfg = &prev_adjacent_pstate->pipe_cfg;
+ const struct msm_format *prev_fmt = msm_framebuffer_format(prev_adjacent_pstate->base.fb);
+ u16 max_tile_height = 1;
+
+ if (prev_adjacent_pstate->r_pipe.sspp != NULL ||
+ prev_pipe->multirect_mode != DPU_SSPP_MULTIRECT_NONE)
+ return false;
+
+ if (!dpu_plane_is_multirect_capable(pipe->sspp, pipe_cfg, fmt) ||
+ !dpu_plane_is_multirect_capable(prev_pipe->sspp, prev_pipe_cfg, prev_fmt))
+ return false;
+
+ if (MSM_FORMAT_IS_UBWC(fmt))
+ max_tile_height = max(max_tile_height, fmt->tile_height);
+
+ if (MSM_FORMAT_IS_UBWC(prev_fmt))
+ max_tile_height = max(max_tile_height, prev_fmt->tile_height);
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->sspp = NULL;
+
+ if (dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth) &&
+ dpu_plane_is_parallel_capable(prev_pipe_cfg, prev_fmt, max_linewidth) &&
+ (pipe_cfg->dst_rect.x1 >= prev_pipe_cfg->dst_rect.x2 ||
+ prev_pipe_cfg->dst_rect.x1 >= pipe_cfg->dst_rect.x2)) {
+ pipe->sspp = prev_pipe->sspp;
+
+ pipe->multirect_index = DPU_SSPP_RECT_1;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ prev_pipe->multirect_index = DPU_SSPP_RECT_0;
+ prev_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ return true;
+ }
+
+ if (pipe_cfg->dst_rect.y1 >= prev_pipe_cfg->dst_rect.y2 + 2 * max_tile_height ||
+ prev_pipe_cfg->dst_rect.y1 >= pipe_cfg->dst_rect.y2 + 2 * max_tile_height) {
+ pipe->sspp = prev_pipe->sspp;
+
+ pipe->multirect_index = DPU_SSPP_RECT_1;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+
+ prev_pipe->multirect_index = DPU_SSPP_RECT_0;
+ prev_pipe->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+
+ return true;
+ }
+
+ return false;
+}
+
static int dpu_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -1059,6 +1139,9 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *crtc_state;
int ret;
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
if (plane_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
plane_state->crtc);
@@ -1098,13 +1181,14 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
struct dpu_global_state *global_state,
struct drm_atomic_state *state,
- struct drm_plane_state *plane_state)
+ struct drm_plane_state *plane_state,
+ struct drm_plane_state *prev_adjacent_plane_state)
{
const struct drm_crtc_state *crtc_state = NULL;
struct drm_plane *plane = plane_state->plane;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
struct dpu_rm_sspp_requirements reqs;
- struct dpu_plane_state *pstate;
+ struct dpu_plane_state *pstate, *prev_adjacent_pstate;
struct dpu_sw_pipe *pipe;
struct dpu_sw_pipe *r_pipe;
struct dpu_sw_pipe_cfg *pipe_cfg;
@@ -1116,6 +1200,8 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
plane_state->crtc);
pstate = to_dpu_plane_state(plane_state);
+ prev_adjacent_pstate = prev_adjacent_plane_state ?
+ to_dpu_plane_state(prev_adjacent_plane_state) : NULL;
pipe = &pstate->pipe;
r_pipe = &pstate->r_pipe;
pipe_cfg = &pstate->pipe_cfg;
@@ -1134,24 +1220,42 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
reqs.rot90 = drm_rotation_90_or_270(plane_state->rotation);
- pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
- if (!pipe->sspp)
- return -ENODEV;
+ if (drm_rect_width(&r_pipe_cfg->src_rect) == 0) {
+ if (!prev_adjacent_pstate ||
+ !dpu_plane_try_multirect_shared(pstate, prev_adjacent_pstate, fmt,
+ dpu_kms->catalog->caps->max_linewidth)) {
+ pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!pipe->sspp)
+ return -ENODEV;
- if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
- pipe->sspp,
- msm_framebuffer_format(plane_state->fb),
- dpu_kms->catalog->caps->max_linewidth)) {
- /* multirect is not possible, use two SSPP blocks */
- r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
- if (!r_pipe->sspp)
+ r_pipe->sspp = NULL;
+
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ }
+ } else {
+ pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!pipe->sspp)
return -ENODEV;
- pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
+ pipe->sspp,
+ msm_framebuffer_format(plane_state->fb),
+ dpu_kms->catalog->caps->max_linewidth)) {
+ /* multirect is not possible, use two SSPP blocks */
+ r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!r_pipe->sspp)
+ return -ENODEV;
- r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ }
}
return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
@@ -1164,6 +1268,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
unsigned int num_planes)
{
unsigned int i;
+ struct drm_plane_state *prev_adjacent_plane_state = NULL;
for (i = 0; i < num_planes; i++) {
struct drm_plane_state *plane_state = states[i];
@@ -1173,9 +1278,12 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
continue;
int ret = dpu_plane_virtual_assign_resources(crtc, global_state,
- state, plane_state);
+ state, plane_state,
+ prev_adjacent_plane_state);
if (ret)
- return ret;
+ break;
+
+ prev_adjacent_plane_state = plane_state;
}
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 3efbba425ca6..2e296f79cba1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -53,6 +53,8 @@ int dpu_rm_init(struct drm_device *dev,
/* Clear, setup lists */
memset(rm, 0, sizeof(*rm));
+ rm->has_legacy_ctls = (cat->mdss_ver->core_major_ver < 5);
+
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
struct dpu_hw_mixer *hw;
@@ -434,20 +436,19 @@ static int _dpu_rm_reserve_ctls(
int i = 0, j, num_ctls;
bool needs_split_display;
- /*
- * For non-CWB mode, each hw_intf needs its own hw_ctl to program its
- * control path.
- *
- * Hardcode num_ctls to 1 if CWB is enabled because in CWB, both the
- * writeback and real-time encoders must be driven by the same control
- * path
- */
- if (top->cwb_enabled)
- num_ctls = 1;
- else
+ if (rm->has_legacy_ctls) {
+ /*
+ * TODO: check if there is a need for special handling if
+ * DPU < 5.0 get CWB support.
+ */
num_ctls = top->num_intf;
- needs_split_display = _dpu_rm_needs_split_display(top);
+ needs_split_display = _dpu_rm_needs_split_display(top);
+ } else {
+ /* use single CTL */
+ num_ctls = 1;
+ needs_split_display = false;
+ }
for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
const struct dpu_hw_ctl *ctl;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index a19dbdb1b6f4..aa62966056d4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -24,6 +24,7 @@ struct dpu_global_state;
* @dspp_blks: array of dspp hardware resources
* @hw_sspp: array of sspp hardware resources
* @cdm_blk: cdm hardware resource
+ * @has_legacy_ctls: DPU uses pre-ACTIVE CTL blocks.
*/
struct dpu_rm {
struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
@@ -37,6 +38,7 @@ struct dpu_rm {
struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
struct dpu_hw_sspp *hw_sspp[SSPP_MAX - SSPP_NONE];
struct dpu_hw_blk *cdm_blk;
+ bool has_legacy_ctls;
};
struct dpu_rm_sspp_requirements {
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index c469e66cfc11..7e942c1337b3 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -6,6 +6,8 @@
#include <linux/delay.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_vblank.h>
#include "msm_drv.h"
@@ -189,7 +191,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
struct drm_connector *connector;
- struct device_node *panel_node;
+ struct drm_bridge *next_bridge;
int dsi_id;
int ret;
@@ -199,27 +201,43 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
* bail out early if there is no panel node (no need to
* initialize LCDC encoder and LVDS connector)
*/
- panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0);
- if (!panel_node)
- return 0;
+ next_bridge = devm_drm_of_get_bridge(dev->dev, dev->dev->of_node, 0, 0);
+ if (IS_ERR(next_bridge)) {
+ ret = PTR_ERR(next_bridge);
+ if (ret == -ENODEV)
+ return 0;
+ return ret;
+ }
- encoder = mdp4_lcdc_encoder_init(dev, panel_node);
+ encoder = mdp4_lcdc_encoder_init(dev);
if (IS_ERR(encoder)) {
DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
- of_node_put(panel_node);
return PTR_ERR(encoder);
}
/* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
encoder->possible_crtcs = 1 << DMA_P;
- connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
+ ret = drm_bridge_attach(encoder, next_bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to attach LVDS panel/bridge: %d\n", ret);
+
+ return ret;
+ }
+
+ connector = drm_bridge_connector_init(dev, encoder);
if (IS_ERR(connector)) {
DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
- of_node_put(panel_node);
return PTR_ERR(connector);
}
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to attach LVDS connector: %d\n", ret);
+
+ return ret;
+ }
+
break;
case DRM_MODE_ENCODER_TMDS:
encoder = mdp4_dtv_encoder_init(dev);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index 94b1ba92785f..f9d988076337 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -191,12 +191,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
-long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
-struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
- struct device_node *panel_node);
-
-struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
- struct device_node *panel_node, struct drm_encoder *encoder);
+struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev);
#ifdef CONFIG_DRM_MSM_DSI
struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev);
@@ -207,13 +202,6 @@ static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
}
#endif
-#ifdef CONFIG_COMMON_CLK
-struct clk *mpd4_lvds_pll_init(struct drm_device *dev);
-#else
-static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
-{
- return ERR_PTR(-ENODEV);
-}
-#endif
+struct clk *mpd4_get_lcdc_clock(struct drm_device *dev);
#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 8bbc7fb881d5..06a307c1272d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -14,7 +14,6 @@
struct mdp4_lcdc_encoder {
struct drm_encoder base;
- struct device_node *panel_node;
struct drm_panel *panel;
struct clk *lcdc_clk;
unsigned long int pixclock;
@@ -262,19 +261,12 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
- struct drm_panel *panel;
if (WARN_ON(!mdp4_lcdc_encoder->enabled))
return;
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
- panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (!IS_ERR(panel)) {
- drm_panel_disable(panel);
- drm_panel_unprepare(panel);
- }
-
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
@@ -300,7 +292,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
to_mdp4_lcdc_encoder(encoder);
unsigned long pc = mdp4_lcdc_encoder->pixclock;
struct mdp4_kms *mdp4_kms = get_kms(encoder);
- struct drm_panel *panel;
uint32_t config;
int ret;
@@ -335,12 +326,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
- panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (!IS_ERR(panel)) {
- drm_panel_prepare(panel);
- drm_panel_enable(panel);
- }
-
setup_phy(encoder);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1);
@@ -348,22 +333,34 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
mdp4_lcdc_encoder->enabled = true;
}
+static enum drm_mode_status
+mdp4_lcdc_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+ actual = clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, requested);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = {
.mode_set = mdp4_lcdc_encoder_mode_set,
.disable = mdp4_lcdc_encoder_disable,
.enable = mdp4_lcdc_encoder_enable,
+ .mode_valid = mdp4_lcdc_encoder_mode_valid,
};
-long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
-{
- struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
- to_mdp4_lcdc_encoder(encoder);
- return clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, rate);
-}
-
/* initialize encoder */
-struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
- struct device_node *panel_node)
+struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
@@ -374,14 +371,11 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
if (IS_ERR(mdp4_lcdc_encoder))
return ERR_CAST(mdp4_lcdc_encoder);
- mdp4_lcdc_encoder->panel_node = panel_node;
-
encoder = &mdp4_lcdc_encoder->base;
drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
- /* TODO: do we need different pll in other cases? */
- mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
+ mdp4_lcdc_encoder->lcdc_clk = mpd4_get_lcdc_clock(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
return ERR_CAST(mdp4_lcdc_encoder->lcdc_clk);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
deleted file mode 100644
index 52e728181b52..000000000000
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2014 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
- */
-
-#include "mdp4_kms.h"
-
-struct mdp4_lvds_connector {
- struct drm_connector base;
- struct drm_encoder *encoder;
- struct device_node *panel_node;
- struct drm_panel *panel;
-};
-#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base)
-
-static enum drm_connector_status mdp4_lvds_connector_detect(
- struct drm_connector *connector, bool force)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
-
- if (!mdp4_lvds_connector->panel) {
- mdp4_lvds_connector->panel =
- of_drm_find_panel(mdp4_lvds_connector->panel_node);
- if (IS_ERR(mdp4_lvds_connector->panel))
- mdp4_lvds_connector->panel = NULL;
- }
-
- return mdp4_lvds_connector->panel ?
- connector_status_connected :
- connector_status_disconnected;
-}
-
-static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
-
- drm_connector_cleanup(connector);
-
- kfree(mdp4_lvds_connector);
-}
-
-static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
- struct drm_panel *panel = mdp4_lvds_connector->panel;
- int ret = 0;
-
- if (panel)
- ret = drm_panel_get_modes(panel, connector);
-
- return ret;
-}
-
-static enum drm_mode_status
-mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
- struct drm_encoder *encoder = mdp4_lvds_connector->encoder;
- long actual, requested;
-
- requested = 1000 * mode->clock;
- actual = mdp4_lcdc_round_pixclk(encoder, requested);
-
- DBG("requested=%ld, actual=%ld", requested, actual);
-
- if (actual != requested)
- return MODE_CLOCK_RANGE;
-
- return MODE_OK;
-}
-
-static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
- .detect = mdp4_lvds_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = mdp4_lvds_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
- .get_modes = mdp4_lvds_connector_get_modes,
- .mode_valid = mdp4_lvds_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
- struct device_node *panel_node, struct drm_encoder *encoder)
-{
- struct drm_connector *connector = NULL;
- struct mdp4_lvds_connector *mdp4_lvds_connector;
-
- mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL);
- if (!mdp4_lvds_connector)
- return ERR_PTR(-ENOMEM);
-
- mdp4_lvds_connector->encoder = encoder;
- mdp4_lvds_connector->panel_node = panel_node;
-
- connector = &mdp4_lvds_connector->base;
-
- drm_connector_init(dev, connector, &mdp4_lvds_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- drm_connector_helper_add(connector, &mdp4_lvds_connector_helper_funcs);
-
- connector->polled = 0;
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- drm_connector_attach_encoder(connector, encoder);
-
- return connector;
-}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
index ab8c0c187fb2..fa2c29470510 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
@@ -122,40 +122,59 @@ static const struct clk_ops mpd4_lvds_pll_ops = {
.set_rate = mpd4_lvds_pll_set_rate,
};
-static const char *mpd4_lvds_pll_parents[] = {
- "pxo",
+static const struct clk_parent_data mpd4_lvds_pll_parents[] = {
+ { .fw_name = "pxo", .name = "pxo", },
};
static struct clk_init_data pll_init = {
.name = "mpd4_lvds_pll",
.ops = &mpd4_lvds_pll_ops,
- .parent_names = mpd4_lvds_pll_parents,
+ .parent_data = mpd4_lvds_pll_parents,
.num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents),
};
-struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
+static struct clk_hw *mpd4_lvds_pll_init(struct drm_device *dev)
{
struct mdp4_lvds_pll *lvds_pll;
- struct clk *clk;
int ret;
lvds_pll = devm_kzalloc(dev->dev, sizeof(*lvds_pll), GFP_KERNEL);
- if (!lvds_pll) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!lvds_pll)
+ return ERR_PTR(-ENOMEM);
lvds_pll->dev = dev;
lvds_pll->pll_hw.init = &pll_init;
- clk = devm_clk_register(dev->dev, &lvds_pll->pll_hw);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto fail;
+ ret = devm_clk_hw_register(dev->dev, &lvds_pll->pll_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = devm_of_clk_add_hw_provider(dev->dev, of_clk_hw_simple_get, &lvds_pll->pll_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &lvds_pll->pll_hw;
+}
+
+struct clk *mpd4_get_lcdc_clock(struct drm_device *dev)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+
+
+ /* TODO: do we need different pll in other cases? */
+ hw = mpd4_lvds_pll_init(dev);
+ if (IS_ERR(hw)) {
+ DRM_DEV_ERROR(dev->dev, "failed to register LVDS PLL\n");
+ return ERR_CAST(hw);
}
- return clk;
+ clk = devm_clk_get(dev->dev, "lcdc_clk");
+ if (clk == ERR_PTR(-ENOENT)) {
+ drm_warn(dev, "can't get LCDC clock, using PLL directly\n");
-fail:
- return ERR_PTR(ret);
+ return devm_clk_hw_get_clk(dev->dev, hw, "lcdc_clk");
+ }
+
+ return clk;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 70fdc9fe228a..f8bfb908f9b4 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -13,13 +13,13 @@
#include "dp_catalog.h"
#include "dp_audio.h"
+#include "dp_drm.h"
#include "dp_panel.h"
#include "dp_reg.h"
#include "dp_display.h"
#include "dp_utils.h"
struct msm_dp_audio_private {
- struct platform_device *audio_pdev;
struct platform_device *pdev;
struct drm_device *drm_dev;
struct msm_dp_catalog *catalog;
@@ -160,24 +160,11 @@ static void msm_dp_audio_enable(struct msm_dp_audio_private *audio, bool enable)
msm_dp_catalog_audio_enable(catalog, enable);
}
-static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device *pdev)
+static struct msm_dp_audio_private *msm_dp_audio_get_data(struct msm_dp *msm_dp_display)
{
struct msm_dp_audio *msm_dp_audio;
- struct msm_dp *msm_dp_display;
-
- if (!pdev) {
- DRM_ERROR("invalid input\n");
- return ERR_PTR(-ENODEV);
- }
-
- msm_dp_display = platform_get_drvdata(pdev);
- if (!msm_dp_display) {
- DRM_ERROR("invalid input\n");
- return ERR_PTR(-ENODEV);
- }
msm_dp_audio = msm_dp_display->msm_dp_audio;
-
if (!msm_dp_audio) {
DRM_ERROR("invalid msm_dp_audio data\n");
return ERR_PTR(-EINVAL);
@@ -186,68 +173,16 @@ static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device
return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
}
-static int msm_dp_audio_hook_plugged_cb(struct device *dev, void *data,
- hdmi_codec_plugged_cb fn,
- struct device *codec_dev)
-{
-
- struct platform_device *pdev;
- struct msm_dp *msm_dp_display;
-
- pdev = to_platform_device(dev);
- if (!pdev) {
- pr_err("invalid input\n");
- return -ENODEV;
- }
-
- msm_dp_display = platform_get_drvdata(pdev);
- if (!msm_dp_display) {
- pr_err("invalid input\n");
- return -ENODEV;
- }
-
- return msm_dp_display_set_plugged_cb(msm_dp_display, fn, codec_dev);
-}
-
-static int msm_dp_audio_get_eld(struct device *dev,
- void *data, uint8_t *buf, size_t len)
-{
- struct platform_device *pdev;
- struct msm_dp *msm_dp_display;
-
- pdev = to_platform_device(dev);
-
- if (!pdev) {
- DRM_ERROR("invalid input\n");
- return -ENODEV;
- }
-
- msm_dp_display = platform_get_drvdata(pdev);
- if (!msm_dp_display) {
- DRM_ERROR("invalid input\n");
- return -ENODEV;
- }
-
- mutex_lock(&msm_dp_display->connector->eld_mutex);
- memcpy(buf, msm_dp_display->connector->eld,
- min(sizeof(msm_dp_display->connector->eld), len));
- mutex_unlock(&msm_dp_display->connector->eld_mutex);
-
- return 0;
-}
-
-int msm_dp_audio_hw_params(struct device *dev,
- void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
+int msm_dp_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
int rc = 0;
struct msm_dp_audio_private *audio;
- struct platform_device *pdev;
struct msm_dp *msm_dp_display;
- pdev = to_platform_device(dev);
- msm_dp_display = platform_get_drvdata(pdev);
+ msm_dp_display = to_dp_bridge(bridge)->msm_dp_display;
/*
* there could be cases where sound card can be opened even
@@ -262,7 +197,7 @@ int msm_dp_audio_hw_params(struct device *dev,
goto end;
}
- audio = msm_dp_audio_get_data(pdev);
+ audio = msm_dp_audio_get_data(msm_dp_display);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
@@ -281,15 +216,14 @@ end:
return rc;
}
-static void msm_dp_audio_shutdown(struct device *dev, void *data)
+void msm_dp_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge)
{
struct msm_dp_audio_private *audio;
- struct platform_device *pdev;
struct msm_dp *msm_dp_display;
- pdev = to_platform_device(dev);
- msm_dp_display = platform_get_drvdata(pdev);
- audio = msm_dp_audio_get_data(pdev);
+ msm_dp_display = to_dp_bridge(bridge)->msm_dp_display;
+ audio = msm_dp_audio_get_data(msm_dp_display);
if (IS_ERR(audio)) {
DRM_ERROR("failed to get audio data\n");
return;
@@ -311,47 +245,6 @@ static void msm_dp_audio_shutdown(struct device *dev, void *data)
msm_dp_display_signal_audio_complete(msm_dp_display);
}
-static const struct hdmi_codec_ops msm_dp_audio_codec_ops = {
- .hw_params = msm_dp_audio_hw_params,
- .audio_shutdown = msm_dp_audio_shutdown,
- .get_eld = msm_dp_audio_get_eld,
- .hook_plugged_cb = msm_dp_audio_hook_plugged_cb,
-};
-
-static struct hdmi_codec_pdata codec_data = {
- .ops = &msm_dp_audio_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
-};
-
-void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio)
-{
- struct msm_dp_audio_private *audio_priv;
-
- audio_priv = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
-
- if (audio_priv->audio_pdev) {
- platform_device_unregister(audio_priv->audio_pdev);
- audio_priv->audio_pdev = NULL;
- }
-}
-
-int msm_dp_register_audio_driver(struct device *dev,
- struct msm_dp_audio *msm_dp_audio)
-{
- struct msm_dp_audio_private *audio_priv;
-
- audio_priv = container_of(msm_dp_audio,
- struct msm_dp_audio_private, msm_dp_audio);
-
- audio_priv->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- return PTR_ERR_OR_ZERO(audio_priv->audio_pdev);
-}
-
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
struct msm_dp_catalog *catalog)
{
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
index beea34cbab77..58fc14693e48 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -36,23 +36,6 @@ struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
struct msm_dp_catalog *catalog);
/**
- * msm_dp_register_audio_driver()
- *
- * Registers DP device with hdmi_codec interface.
- *
- * @dev: DP device instance.
- * @msm_dp_audio: an instance of msm_dp_audio module.
- *
- *
- * Returns the error code in case of failure, otherwise
- * zero on success.
- */
-int msm_dp_register_audio_driver(struct device *dev,
- struct msm_dp_audio *msm_dp_audio);
-
-void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio);
-
-/**
* msm_dp_audio_put()
*
* Cleans the msm_dp_audio instance.
@@ -61,10 +44,12 @@ void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm
*/
void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio);
-int msm_dp_audio_hw_params(struct device *dev,
- void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params);
+int msm_dp_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params);
+void msm_dp_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge);
#endif /* _DP_AUDIO_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index d8633a596f8d..a50bfafbb4ea 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1034,10 +1034,12 @@ static int msm_dp_ctrl_set_vx_px(struct msm_dp_ctrl_private *ctrl,
return 0;
}
-static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_update_phy_vx_px(struct msm_dp_ctrl_private *ctrl,
+ enum drm_dp_phy dp_phy)
{
struct msm_dp_link *link = ctrl->link;
- int ret = 0, lane, lane_cnt;
+ int lane, lane_cnt, reg;
+ int ret = 0;
u8 buf[4];
u32 max_level_reached = 0;
u32 voltage_swing_level = link->phy_params.v_level;
@@ -1075,8 +1077,13 @@ static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
drm_dbg_dp(ctrl->drm_dev, "sink: p|v=0x%x\n",
voltage_swing_level | pre_emphasis_level);
- ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET,
- buf, lane_cnt);
+
+ if (dp_phy == DP_PHY_DPRX)
+ reg = DP_TRAINING_LANE0_SET;
+ else
+ reg = DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
+
+ ret = drm_dp_dpcd_write(ctrl->aux, reg, buf, lane_cnt);
if (ret == lane_cnt)
ret = 0;
@@ -1084,9 +1091,10 @@ static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
}
static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl,
- u8 pattern)
+ u8 pattern, enum drm_dp_phy dp_phy)
{
u8 buf;
+ int reg;
int ret = 0;
drm_dbg_dp(ctrl->drm_dev, "sink: pattern=%x\n", pattern);
@@ -1096,31 +1104,26 @@ static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl,
if (pattern && pattern != DP_TRAINING_PATTERN_4)
buf |= DP_LINK_SCRAMBLING_DISABLE;
- ret = drm_dp_dpcd_writeb(ctrl->aux, DP_TRAINING_PATTERN_SET, buf);
- return ret == 1;
-}
-
-static int msm_dp_ctrl_read_link_status(struct msm_dp_ctrl_private *ctrl,
- u8 *link_status)
-{
- int ret = 0, len;
-
- len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
- if (len != DP_LINK_STATUS_SIZE) {
- DRM_ERROR("DP link status read failed, err: %d\n", len);
- ret = -EINVAL;
- }
+ if (dp_phy == DP_PHY_DPRX)
+ reg = DP_TRAINING_PATTERN_SET;
+ else
+ reg = DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
- return ret;
+ ret = drm_dp_dpcd_writeb(ctrl->aux, reg, buf);
+ return ret == 1;
}
static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
- int *training_step)
+ int *training_step, enum drm_dp_phy dp_phy)
{
+ int delay_us;
int tries, old_v_level, ret = 0;
u8 link_status[DP_LINK_STATUS_SIZE];
int const maximum_retries = 4;
+ delay_us = drm_dp_read_clock_recovery_delay(ctrl->aux,
+ ctrl->panel->dpcd, dp_phy, false);
+
msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
*training_step = DP_TRAINING_1;
@@ -1129,18 +1132,19 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
if (ret)
return ret;
msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
- DP_LINK_SCRAMBLING_DISABLE);
+ DP_LINK_SCRAMBLING_DISABLE, dp_phy);
- ret = msm_dp_ctrl_update_vx_px(ctrl);
+ msm_dp_link_reset_phy_params_vx_px(ctrl->link);
+ ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy);
if (ret)
return ret;
tries = 0;
old_v_level = ctrl->link->phy_params.v_level;
for (tries = 0; tries < maximum_retries; tries++) {
- drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd);
+ fsleep(delay_us);
- ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
+ ret = drm_dp_dpcd_read_phy_link_status(ctrl->aux, dp_phy, link_status);
if (ret)
return ret;
@@ -1161,7 +1165,7 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
}
msm_dp_link_adjust_levels(ctrl->link, link_status);
- ret = msm_dp_ctrl_update_vx_px(ctrl);
+ ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy);
if (ret)
return ret;
}
@@ -1213,21 +1217,31 @@ static int msm_dp_ctrl_link_lane_down_shift(struct msm_dp_ctrl_private *ctrl)
return 0;
}
-static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl)
+static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl,
+ enum drm_dp_phy dp_phy)
{
- msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
- drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
+ int delay_us;
+
+ msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE, dp_phy);
+
+ delay_us = drm_dp_read_channel_eq_delay(ctrl->aux,
+ ctrl->panel->dpcd, dp_phy, false);
+ fsleep(delay_us);
}
static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
- int *training_step)
+ int *training_step, enum drm_dp_phy dp_phy)
{
+ int delay_us;
int tries = 0, ret = 0;
u8 pattern;
u32 state_ctrl_bit;
int const maximum_retries = 5;
u8 link_status[DP_LINK_STATUS_SIZE];
+ delay_us = drm_dp_read_channel_eq_delay(ctrl->aux,
+ ctrl->panel->dpcd, dp_phy, false);
+
msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
*training_step = DP_TRAINING_2;
@@ -1247,12 +1261,12 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
if (ret)
return ret;
- msm_dp_ctrl_train_pattern_set(ctrl, pattern);
+ msm_dp_ctrl_train_pattern_set(ctrl, pattern, dp_phy);
for (tries = 0; tries <= maximum_retries; tries++) {
- drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
+ fsleep(delay_us);
- ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
+ ret = drm_dp_dpcd_read_phy_link_status(ctrl->aux, dp_phy, link_status);
if (ret)
return ret;
@@ -1262,7 +1276,7 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
}
msm_dp_link_adjust_levels(ctrl->link, link_status);
- ret = msm_dp_ctrl_update_vx_px(ctrl);
+ ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy);
if (ret)
return ret;
@@ -1271,9 +1285,32 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
return -ETIMEDOUT;
}
+static int msm_dp_ctrl_link_train_1_2(struct msm_dp_ctrl_private *ctrl,
+ int *training_step, enum drm_dp_phy dp_phy)
+{
+ int ret;
+
+ ret = msm_dp_ctrl_link_train_1(ctrl, training_step, dp_phy);
+ if (ret) {
+ DRM_ERROR("link training #1 on phy %d failed. ret=%d\n", dp_phy, ret);
+ return ret;
+ }
+ drm_dbg_dp(ctrl->drm_dev, "link training #1 on phy %d successful\n", dp_phy);
+
+ ret = msm_dp_ctrl_link_train_2(ctrl, training_step, dp_phy);
+ if (ret) {
+ DRM_ERROR("link training #2 on phy %d failed. ret=%d\n", dp_phy, ret);
+ return ret;
+ }
+ drm_dbg_dp(ctrl->drm_dev, "link training #2 on phy %d successful\n", dp_phy);
+
+ return 0;
+}
+
static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
int *training_step)
{
+ int i;
int ret = 0;
const u8 *dpcd = ctrl->panel->dpcd;
u8 encoding[] = { 0, DP_SET_ANSI_8B10B };
@@ -1286,8 +1323,6 @@ static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
link_info.rate = ctrl->link->link_params.rate;
link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
- msm_dp_link_reset_phy_params_vx_px(ctrl->link);
-
msm_dp_aux_link_configure(ctrl->aux, &link_info);
if (drm_dp_max_downspread(dpcd))
@@ -1302,24 +1337,27 @@ static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
&assr, 1);
}
- ret = msm_dp_ctrl_link_train_1(ctrl, training_step);
+ for (i = ctrl->link->lttpr_count - 1; i >= 0; i--) {
+ enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
+
+ ret = msm_dp_ctrl_link_train_1_2(ctrl, training_step, dp_phy);
+ msm_dp_ctrl_clear_training_pattern(ctrl, dp_phy);
+
+ if (ret)
+ break;
+ }
+
if (ret) {
- DRM_ERROR("link training #1 failed. ret=%d\n", ret);
+ DRM_ERROR("link training of LTTPR(s) failed. ret=%d\n", ret);
goto end;
}
- /* print success info as this is a result of user initiated action */
- drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n");
-
- ret = msm_dp_ctrl_link_train_2(ctrl, training_step);
+ ret = msm_dp_ctrl_link_train_1_2(ctrl, training_step, DP_PHY_DPRX);
if (ret) {
- DRM_ERROR("link training #2 failed. ret=%d\n", ret);
+ DRM_ERROR("link training on sink failed. ret=%d\n", ret);
goto end;
}
- /* print success info as this is a result of user initiated action */
- drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n");
-
end:
msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
@@ -1636,7 +1674,7 @@ static int msm_dp_ctrl_link_maintenance(struct msm_dp_ctrl_private *ctrl)
if (ret)
goto end;
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
@@ -1660,7 +1698,7 @@ static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl)
return false;
}
msm_dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
- msm_dp_ctrl_update_vx_px(ctrl);
+ msm_dp_ctrl_update_phy_vx_px(ctrl, DP_PHY_DPRX);
msm_dp_link_send_test_response(ctrl->link);
pattern_sent = msm_dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
@@ -1805,7 +1843,7 @@ static bool msm_dp_ctrl_channel_eq_ok(struct msm_dp_ctrl_private *ctrl)
u8 link_status[DP_LINK_STATUS_SIZE];
int num_lanes = ctrl->link->link_params.num_lanes;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
return drm_dp_channel_eq_ok(link_status, num_lanes);
}
@@ -1863,7 +1901,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
break;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
rc = msm_dp_ctrl_link_rate_down_shift(ctrl);
if (rc < 0) { /* already in RBR = 1.6G */
@@ -1888,7 +1926,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
break;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
if (!drm_dp_clock_recovery_ok(link_status,
ctrl->link->link_params.num_lanes))
@@ -1902,7 +1940,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
}
/* stop link training before start re training */
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
}
rc = msm_dp_ctrl_reinitialize_mainlink(ctrl);
@@ -1926,7 +1964,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
* link training failed
* end txing train pattern here
*/
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
msm_dp_ctrl_deinitialize_mainlink(ctrl);
rc = -ECONNRESET;
@@ -1997,7 +2035,7 @@ int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train
msm_dp_ctrl_link_retrain(ctrl);
/* stop txing train pattern to end link training */
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
/*
* Set up transfer unit values and set controller state to send
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index bbc47d86ae9e..386c4669c831 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/string_choices.h>
#include <drm/display/drm_dp_aux_bus.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/drm_edid.h>
#include "msm_drv.h"
@@ -288,13 +289,6 @@ static int msm_dp_display_bind(struct device *dev, struct device *master,
goto end;
}
-
- rc = msm_dp_register_audio_driver(dev, dp->audio);
- if (rc) {
- DRM_ERROR("Audio registration Dp failed\n");
- goto end;
- }
-
rc = msm_dp_hpd_event_thread_start(dp);
if (rc) {
DRM_ERROR("Event thread create failed\n");
@@ -316,7 +310,6 @@ static void msm_dp_display_unbind(struct device *dev, struct device *master,
of_dp_aux_depopulate_bus(dp->aux);
- msm_dp_unregister_audio_driver(dev, dp->audio);
msm_dp_aux_unregister(dp->aux);
dp->drm_dev = NULL;
dp->aux->drm_dev = NULL;
@@ -367,17 +360,21 @@ static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *d
return 0;
}
-static void msm_dp_display_lttpr_init(struct msm_dp_display_private *dp)
+static int msm_dp_display_lttpr_init(struct msm_dp_display_private *dp, u8 *dpcd)
{
- u8 lttpr_caps[DP_LTTPR_COMMON_CAP_SIZE];
- int rc;
+ int rc, lttpr_count;
- if (drm_dp_read_lttpr_common_caps(dp->aux, dp->panel->dpcd, lttpr_caps))
- return;
+ if (drm_dp_read_lttpr_common_caps(dp->aux, dpcd, dp->link->lttpr_common_caps))
+ return 0;
- rc = drm_dp_lttpr_init(dp->aux, drm_dp_lttpr_count(lttpr_caps));
- if (rc)
+ lttpr_count = drm_dp_lttpr_count(dp->link->lttpr_common_caps);
+ rc = drm_dp_lttpr_init(dp->aux, lttpr_count);
+ if (rc) {
DRM_ERROR("failed to set LTTPRs transparency mode, rc=%d\n", rc);
+ return 0;
+ }
+
+ return lttpr_count;
}
static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
@@ -385,12 +382,17 @@ static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
struct drm_connector *connector = dp->msm_dp_display.connector;
const struct drm_display_info *info = &connector->display_info;
int rc = 0;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
- rc = msm_dp_panel_read_sink_caps(dp->panel, connector);
+ rc = drm_dp_read_dpcd_caps(dp->aux, dpcd);
if (rc)
goto end;
- msm_dp_display_lttpr_init(dp);
+ dp->link->lttpr_count = msm_dp_display_lttpr_init(dp, dpcd);
+
+ rc = msm_dp_panel_read_sink_caps(dp->panel, connector);
+ if (rc)
+ goto end;
msm_dp_link_process_request(dp->link);
@@ -626,9 +628,9 @@ static void msm_dp_display_handle_plugged_change(struct msm_dp *msm_dp_display,
struct msm_dp_display_private, msm_dp_display);
/* notify audio subsystem only if sink supports audio */
- if (msm_dp_display->plugged_cb && msm_dp_display->codec_dev &&
- dp->audio_supported)
- msm_dp_display->plugged_cb(msm_dp_display->codec_dev, plugged);
+ if (dp->audio_supported)
+ drm_connector_hdmi_audio_plugged_notify(msm_dp_display->connector,
+ plugged);
}
static int msm_dp_hpd_unplug_handle(struct msm_dp_display_private *dp, u32 data)
@@ -907,19 +909,6 @@ static int msm_dp_display_disable(struct msm_dp_display_private *dp)
return 0;
}
-int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display,
- hdmi_codec_plugged_cb fn, struct device *codec_dev)
-{
- bool plugged;
-
- msm_dp_display->plugged_cb = fn;
- msm_dp_display->codec_dev = codec_dev;
- plugged = msm_dp_display->link_ready;
- msm_dp_display_handle_plugged_change(msm_dp_display, plugged);
-
- return 0;
-}
-
/**
* msm_dp_bridge_mode_valid - callback to determine if specified mode is valid
* @bridge: Pointer to drm bridge structure
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index ecbc2d92f546..cc6e2cab36e9 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -7,7 +7,6 @@
#define _DP_DISPLAY_H_
#include "dp_panel.h"
-#include <sound/hdmi-codec.h>
#include "disp/msm_disp_snapshot.h"
#define DP_MAX_PIXEL_CLK_KHZ 675000
@@ -15,7 +14,6 @@
struct msm_dp {
struct drm_device *drm_dev;
struct platform_device *pdev;
- struct device *codec_dev;
struct drm_connector *connector;
struct drm_bridge *next_bridge;
bool link_ready;
@@ -25,14 +23,10 @@ struct msm_dp {
bool is_edp;
bool internal_hpd;
- hdmi_codec_plugged_cb plugged_cb;
-
struct msm_dp_audio *msm_dp_audio;
bool psr_supported;
};
-int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display,
- hdmi_codec_plugged_cb fn, struct device *codec_dev);
int msm_dp_display_get_modes(struct msm_dp *msm_dp_display);
bool msm_dp_display_check_video_test(struct msm_dp *msm_dp_display);
int msm_dp_display_get_test_bpp(struct msm_dp *msm_dp_display);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index cca57e56c906..f222d7ccaa88 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -12,6 +12,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
+#include "dp_audio.h"
#include "dp_drm.h"
/**
@@ -114,6 +115,9 @@ static const struct drm_bridge_funcs msm_dp_bridge_ops = {
.hpd_disable = msm_dp_bridge_hpd_disable,
.hpd_notify = msm_dp_bridge_hpd_notify,
.debugfs_init = msm_dp_bridge_debugfs_init,
+
+ .dp_audio_prepare = msm_dp_audio_prepare,
+ .dp_audio_shutdown = msm_dp_audio_shutdown,
};
static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
@@ -296,14 +300,15 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
struct msm_dp_bridge *msm_dp_bridge;
struct drm_bridge *bridge;
- msm_dp_bridge = devm_kzalloc(dev->dev, sizeof(*msm_dp_bridge), GFP_KERNEL);
- if (!msm_dp_bridge)
- return -ENOMEM;
+ msm_dp_bridge = devm_drm_bridge_alloc(dev->dev, struct msm_dp_bridge, bridge,
+ msm_dp_display->is_edp ? &msm_edp_bridge_ops :
+ &msm_dp_bridge_ops);
+ if (IS_ERR(msm_dp_bridge))
+ return PTR_ERR(msm_dp_bridge);
msm_dp_bridge->msm_dp_display = msm_dp_display;
bridge = &msm_dp_bridge->bridge;
- bridge->funcs = msm_dp_display->is_edp ? &msm_edp_bridge_ops : &msm_dp_bridge_ops;
bridge->type = msm_dp_display->connector_type;
bridge->ycbcr_420_allowed = yuv_supported;
@@ -320,9 +325,13 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
*/
if (!msm_dp_display->is_edp) {
bridge->ops =
+ DRM_BRIDGE_OP_DP_AUDIO |
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_MODES;
+ bridge->hdmi_audio_dev = &msm_dp_display->pdev->dev;
+ bridge->hdmi_audio_max_i2s_playback_channels = 8;
+ bridge->hdmi_audio_dai_port = -1;
}
rc = devm_drm_bridge_add(dev->dev, bridge);
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 1a1fbb2d7d4f..92a9077959b3 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -714,21 +714,21 @@ end:
static int msm_dp_link_parse_sink_status_field(struct msm_dp_link_private *link)
{
- int len;
+ int ret;
link->prev_sink_count = link->msm_dp_link.sink_count;
- len = drm_dp_read_sink_count(link->aux);
- if (len < 0) {
+ ret = drm_dp_read_sink_count(link->aux);
+ if (ret < 0) {
DRM_ERROR("DP parse sink count failed\n");
- return len;
+ return ret;
}
- link->msm_dp_link.sink_count = len;
+ link->msm_dp_link.sink_count = ret;
- len = drm_dp_dpcd_read_link_status(link->aux,
- link->link_status);
- if (len < DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(link->aux,
+ link->link_status);
+ if (ret < 0) {
DRM_ERROR("DP link status read failed\n");
- return len;
+ return ret;
}
return msm_dp_link_parse_request(link);
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 8db5d5698a97..ba47c6d19fbf 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -7,6 +7,7 @@
#define _DP_LINK_H_
#include "dp_aux.h"
+#include <drm/display/drm_dp_helper.h>
#define DS_PORT_STATUS_CHANGED 0x200
#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
@@ -60,6 +61,9 @@ struct msm_dp_link_phy_params {
};
struct msm_dp_link {
+ u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
+ int lttpr_count;
+
u32 sink_request;
u32 test_response;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 92415bf8aa16..4e8ab75c771b 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -47,7 +47,7 @@ static void msm_dp_panel_read_psr_cap(struct msm_dp_panel_private *panel)
static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel)
{
- int rc;
+ int rc, max_lttpr_lanes, max_lttpr_rate;
struct msm_dp_panel_private *panel;
struct msm_dp_link_info *link_info;
u8 *dpcd, major, minor;
@@ -75,6 +75,16 @@ static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel)
if (link_info->rate > msm_dp_panel->max_dp_link_rate)
link_info->rate = msm_dp_panel->max_dp_link_rate;
+ /* Limit data lanes from LTTPR capabilities, if any */
+ max_lttpr_lanes = drm_dp_lttpr_max_lane_count(panel->link->lttpr_common_caps);
+ if (max_lttpr_lanes && max_lttpr_lanes < link_info->num_lanes)
+ link_info->num_lanes = max_lttpr_lanes;
+
+ /* Limit link rate from LTTPR capabilities, if any */
+ max_lttpr_rate = drm_dp_lttpr_max_link_rate(panel->link->lttpr_common_caps);
+ if (max_lttpr_rate && max_lttpr_rate < link_info->rate)
+ link_info->rate = max_lttpr_rate;
+
drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor);
drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate);
drm_dbg_dp(panel->drm_dev, "lane_count=%d\n", link_info->num_lanes);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 7754dcec33d0..7675558ae2e5 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -221,6 +221,22 @@ static const struct msm_dsi_config sc7280_dsi_cfg = {
},
};
+static const struct regulator_bulk_data sa8775p_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 8300 }, /* 1.2 V */
+ { .supply = "refgen" },
+};
+
+static const struct msm_dsi_config sa8775p_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = sa8775p_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sa8775p_dsi_regulators),
+ .bus_clk_names = dsi_v2_4_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names),
+ .io_start = {
+ { 0xae94000, 0xae96000 },
+ },
+};
+
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.link_clk_set_rate = dsi_link_clk_set_rate_v2,
.link_clk_enable = dsi_link_clk_enable_v2,
@@ -294,6 +310,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0,
&sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_1,
+ &sa8775p_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_6_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_7_0,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 120cb65164c1..65b0705fac0e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -27,6 +27,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
+#define MSM_DSI_6G_VER_MINOR_V2_5_1 0x20050001
#define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000
#define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000
#define MSM_DSI_6G_VER_MINOR_V2_8_0 0x20080000
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4fabb01345aa..ca400924d4ee 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -434,12 +434,13 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dsi_mgr_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- return drm_bridge_attach(bridge->encoder, msm_dsi->next_bridge,
+ return drm_bridge_attach(encoder, msm_dsi->next_bridge,
bridge, flags);
}
@@ -461,15 +462,14 @@ int msm_dsi_manager_connector_init(struct msm_dsi *msm_dsi,
struct drm_connector *connector;
int ret;
- dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
- sizeof(*dsi_bridge), GFP_KERNEL);
- if (!dsi_bridge)
- return -ENOMEM;
+ dsi_bridge = devm_drm_bridge_alloc(msm_dsi->dev->dev, struct dsi_bridge, base,
+ &dsi_mgr_bridge_funcs);
+ if (IS_ERR(dsi_bridge))
+ return PTR_ERR(dsi_bridge);
dsi_bridge->id = msm_dsi->id;
bridge = &dsi_bridge->base;
- bridge->funcs = &dsi_mgr_bridge_funcs;
ret = devm_drm_bridge_add(msm_dsi->dev->dev, bridge);
if (ret)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index c0bcc6828963..5973d7325699 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -581,6 +581,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_7nm_cfgs },
{ .compatible = "qcom,dsi-phy-7nm-8150",
.data = &dsi_phy_7nm_8150_cfgs },
+ { .compatible = "qcom,sa8775p-dsi-phy-5nm",
+ .data = &dsi_phy_5nm_8775p_cfgs },
+ { .compatible = "qcom,sar2130p-dsi-phy-5nm",
+ .data = &dsi_phy_5nm_sar2130p_cfgs },
{ .compatible = "qcom,sc7280-dsi-phy-7nm",
.data = &dsi_phy_7nm_7280_cfgs },
{ .compatible = "qcom,sm6375-dsi-phy-7nm",
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 1925418d9999..7ea608f620fe 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -59,6 +59,8 @@ extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index a92decbee5b5..c19890358b74 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -1147,6 +1147,10 @@ static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 37550 },
};
+static const struct regulator_bulk_data dsi_phy_7nm_48000uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 48000 },
+};
+
static const struct regulator_bulk_data dsi_phy_7nm_98000uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 98000 },
};
@@ -1289,6 +1293,52 @@ const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs = {
.quirks = DSI_PHY_7NM_QUIRK_V4_3,
};
+const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_48000uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_48000uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V4_2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_97800uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_97800uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae95000, 0xae97000 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V5_2,
+};
+
const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_7nm_98400uA_regulators,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 248541ff4492..2fd388b892dc 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -8,6 +8,7 @@
#include <linux/gpio/consumer.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <drm/drm_bridge_connector.h>
@@ -199,12 +200,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- ret = msm_hdmi_hpd_enable(hdmi->bridge);
- if (ret < 0) {
- DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
- goto fail;
- }
-
return 0;
fail:
@@ -220,28 +215,24 @@ fail:
* The hdmi device:
*/
-#define HDMI_CFG(item, entry) \
- .item ## _names = item ##_names_ ## entry, \
- .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry)
-
-static const char *hpd_reg_names_8960[] = {"core-vdda"};
-static const char *hpd_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
+static const char * const pwr_reg_names_8960[] = {"core-vdda"};
+static const char * const pwr_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
static const struct hdmi_platform_config hdmi_tx_8960_config = {
- HDMI_CFG(hpd_reg, 8960),
- HDMI_CFG(hpd_clk, 8960),
+ .pwr_reg_names = pwr_reg_names_8960,
+ .pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names_8960),
+ .pwr_clk_names = pwr_clk_names_8960,
+ .pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names_8960),
};
-static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
-static const char *pwr_clk_names_8x74[] = {"extp", "alt_iface"};
-static const char *hpd_clk_names_8x74[] = {"iface", "core", "mdp_core"};
-static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
+static const char * const pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
+static const char * const pwr_clk_names_8x74[] = {"iface", "core", "mdp_core", "alt_iface"};
static const struct hdmi_platform_config hdmi_tx_8974_config = {
- HDMI_CFG(pwr_reg, 8x74),
- HDMI_CFG(pwr_clk, 8x74),
- HDMI_CFG(hpd_clk, 8x74),
- .hpd_freq = hpd_clk_freq_8x74,
+ .pwr_reg_names = pwr_reg_names_8x74,
+ .pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names_8x74),
+ .pwr_clk_names = pwr_clk_names_8x74,
+ .pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names_8x74),
};
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
@@ -264,9 +255,6 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master);
if (priv->hdmi) {
- if (priv->hdmi->bridge)
- msm_hdmi_hpd_disable(priv->hdmi);
-
msm_hdmi_destroy(priv->hdmi);
priv->hdmi = NULL;
}
@@ -296,6 +284,7 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
hdmi->pdev = pdev;
hdmi->config = config;
spin_lock_init(&hdmi->reg_lock);
+ mutex_init(&hdmi->state_mutex);
ret = drm_of_find_panel_or_bridge(pdev->dev.of_node, 1, 0, NULL, &hdmi->next_bridge);
if (ret && ret != -ENODEV)
@@ -322,20 +311,6 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
if (hdmi->irq < 0)
return hdmi->irq;
- hdmi->hpd_regs = devm_kcalloc(&pdev->dev,
- config->hpd_reg_cnt,
- sizeof(hdmi->hpd_regs[0]),
- GFP_KERNEL);
- if (!hdmi->hpd_regs)
- return -ENOMEM;
-
- for (i = 0; i < config->hpd_reg_cnt; i++)
- hdmi->hpd_regs[i].supply = config->hpd_reg_names[i];
-
- ret = devm_regulator_bulk_get(&pdev->dev, config->hpd_reg_cnt, hdmi->hpd_regs);
- if (ret)
- return dev_err_probe(dev, ret, "failed to get hpd regulators\n");
-
hdmi->pwr_regs = devm_kcalloc(&pdev->dev,
config->pwr_reg_cnt,
sizeof(hdmi->pwr_regs[0]),
@@ -350,25 +325,6 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to get pwr regulators\n");
- hdmi->hpd_clks = devm_kcalloc(&pdev->dev,
- config->hpd_clk_cnt,
- sizeof(hdmi->hpd_clks[0]),
- GFP_KERNEL);
- if (!hdmi->hpd_clks)
- return -ENOMEM;
-
- for (i = 0; i < config->hpd_clk_cnt; i++) {
- struct clk *clk;
-
- clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
- if (IS_ERR(clk))
- return dev_err_probe(dev, PTR_ERR(clk),
- "failed to get hpd clk: %s\n",
- config->hpd_clk_names[i]);
-
- hdmi->hpd_clks[i] = clk;
- }
-
hdmi->pwr_clks = devm_kcalloc(&pdev->dev,
config->pwr_clk_cnt,
sizeof(hdmi->pwr_clks[0]),
@@ -376,17 +332,17 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
if (!hdmi->pwr_clks)
return -ENOMEM;
- for (i = 0; i < config->pwr_clk_cnt; i++) {
- struct clk *clk;
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ hdmi->pwr_clks[i].id = config->pwr_clk_names[i];
- clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
- if (IS_ERR(clk))
- return dev_err_probe(dev, PTR_ERR(clk),
- "failed to get pwr clk: %s\n",
- config->pwr_clk_names[i]);
+ ret = devm_clk_bulk_get(&pdev->dev, config->pwr_clk_cnt, hdmi->pwr_clks);
+ if (ret)
+ return ret;
- hdmi->pwr_clks[i] = clk;
- }
+ hdmi->extp_clk = devm_clk_get_optional(&pdev->dev, "extp");
+ if (IS_ERR(hdmi->extp_clk))
+ return dev_err_probe(dev, PTR_ERR(hdmi->extp_clk),
+ "failed to get extp clock\n");
hdmi->hpd_gpiod = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
/* This will catch e.g. -EPROBE_DEFER */
@@ -432,6 +388,48 @@ static void msm_hdmi_dev_remove(struct platform_device *pdev)
msm_hdmi_put_phy(hdmi);
}
+static int msm_hdmi_runtime_suspend(struct device *dev)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+ const struct hdmi_platform_config *config = hdmi->config;
+
+ clk_bulk_disable_unprepare(config->pwr_clk_cnt, hdmi->pwr_clks);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs);
+
+ return 0;
+}
+
+static int msm_hdmi_runtime_resume(struct device *dev)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+ const struct hdmi_platform_config *config = hdmi->config;
+ int ret;
+
+ ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ goto fail;
+
+ ret = clk_bulk_prepare_enable(config->pwr_clk_cnt, hdmi->pwr_clks);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ pinctrl_pm_select_sleep_state(dev);
+
+ return ret;
+}
+
+DEFINE_RUNTIME_DEV_PM_OPS(msm_hdmi_pm_ops, msm_hdmi_runtime_suspend, msm_hdmi_runtime_resume, NULL);
+
static const struct of_device_id msm_hdmi_dt_match[] = {
{ .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8974_config },
@@ -449,6 +447,7 @@ static struct platform_driver msm_hdmi_driver = {
.driver = {
.name = "hdmi_msm",
.of_match_table = msm_hdmi_dt_match,
+ .pm = &msm_hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index a5f481c39277..d5e572d10d6a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -41,16 +41,17 @@ struct hdmi {
/* video state: */
bool power_on;
+ bool hpd_enabled;
+ struct mutex state_mutex; /* protects two booleans */
unsigned long int pixclock;
void __iomem *mmio;
void __iomem *qfprom_mmio;
phys_addr_t mmio_phy_addr;
- struct regulator_bulk_data *hpd_regs;
struct regulator_bulk_data *pwr_regs;
- struct clk **hpd_clks;
- struct clk **pwr_clks;
+ struct clk_bulk_data *pwr_clks;
+ struct clk *extp_clk;
struct gpio_desc *hpd_gpiod;
@@ -83,21 +84,12 @@ struct hdmi {
/* platform config data (ie. from DT, or pdata) */
struct hdmi_platform_config {
- /* regulators that need to be on for hpd: */
- const char **hpd_reg_names;
- int hpd_reg_cnt;
-
/* regulators that need to be on for screen pwr: */
- const char **pwr_reg_names;
+ const char * const *pwr_reg_names;
int pwr_reg_cnt;
- /* clks that need to be on for hpd: */
- const char **hpd_clk_names;
- const long unsigned *hpd_freq;
- int hpd_clk_cnt;
-
- /* clks that need to be on for screen pwr (ie pixel clk): */
- const char **pwr_clk_names;
+ /* clks that need to be on: */
+ const char * const *pwr_clk_names;
int pwr_clk_cnt;
};
@@ -224,8 +216,8 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi);
void msm_hdmi_hpd_irq(struct drm_bridge *bridge);
enum drm_connector_status msm_hdmi_bridge_detect(
struct drm_bridge *bridge);
-int msm_hdmi_hpd_enable(struct drm_bridge *bridge);
-void msm_hdmi_hpd_disable(struct hdmi *hdmi);
+void msm_hdmi_hpd_enable(struct drm_bridge *bridge);
+void msm_hdmi_hpd_disable(struct drm_bridge *bridge);
/*
* i2c adapter for ddc:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 8bb975e82c17..b9ec14ef2c20 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -4,6 +4,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <linux/hdmi.h>
@@ -12,71 +13,9 @@
#include "hdmi.h"
-/* Supported HDMI Audio sample rates */
-#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
-#define MSM_HDMI_SAMPLE_RATE_44_1KHZ 1
-#define MSM_HDMI_SAMPLE_RATE_48KHZ 2
-#define MSM_HDMI_SAMPLE_RATE_88_2KHZ 3
-#define MSM_HDMI_SAMPLE_RATE_96KHZ 4
-#define MSM_HDMI_SAMPLE_RATE_176_4KHZ 5
-#define MSM_HDMI_SAMPLE_RATE_192KHZ 6
-#define MSM_HDMI_SAMPLE_RATE_MAX 7
-
-
-struct hdmi_msm_audio_acr {
- uint32_t n; /* N parameter for clock regeneration */
- uint32_t cts; /* CTS parameter for clock regeneration */
-};
-
-struct hdmi_msm_audio_arcs {
- unsigned long int pixclock;
- struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX];
-};
-
-#define HDMI_MSM_AUDIO_ARCS(pclk, ...) { (1000 * (pclk)), __VA_ARGS__ }
-
-/* Audio constants lookup table for hdmi_msm_audio_acr_setup */
-/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
-static const struct hdmi_msm_audio_arcs acr_lut[] = {
- /* 25.200MHz */
- HDMI_MSM_AUDIO_ARCS(25200, {
- {4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000},
- {12288, 25200}, {25088, 28000}, {24576, 25200} }),
- /* 27.000MHz */
- HDMI_MSM_AUDIO_ARCS(27000, {
- {4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000},
- {12288, 27000}, {25088, 30000}, {24576, 27000} }),
- /* 27.027MHz */
- HDMI_MSM_AUDIO_ARCS(27030, {
- {4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030},
- {12288, 27027}, {25088, 30030}, {24576, 27027} }),
- /* 74.250MHz */
- HDMI_MSM_AUDIO_ARCS(74250, {
- {4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500},
- {12288, 74250}, {25088, 82500}, {24576, 74250} }),
- /* 148.500MHz */
- HDMI_MSM_AUDIO_ARCS(148500, {
- {4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000},
- {12288, 148500}, {25088, 165000}, {24576, 148500} }),
-};
-
-static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(acr_lut); i++) {
- const struct hdmi_msm_audio_arcs *arcs = &acr_lut[i];
- if (arcs->pixclock == pixclock)
- return arcs;
- }
-
- return NULL;
-}
-
int msm_hdmi_audio_update(struct hdmi *hdmi)
{
struct hdmi_audio *audio = &hdmi->audio;
- const struct hdmi_msm_audio_arcs *arcs = NULL;
bool enabled = audio->enabled;
uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl;
uint32_t audio_config;
@@ -94,15 +33,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
enabled = false;
}
- if (enabled) {
- arcs = get_arcs(hdmi->pixclock);
- if (!arcs) {
- DBG("disabling audio: unsupported pixclock: %lu",
- hdmi->pixclock);
- enabled = false;
- }
- }
-
/* Read first before writing */
acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL);
vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
@@ -116,15 +46,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
uint32_t n, cts, multiplier;
enum hdmi_acr_cts select;
- n = arcs->lut[audio->rate].n;
- cts = arcs->lut[audio->rate].cts;
+ drm_hdmi_acr_get_n_cts(hdmi->pixclock, audio->rate, &n, &cts);
- if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate)) {
+ if (audio->rate == 192000 || audio->rate == 176400) {
multiplier = 4;
n >>= 2; /* divide N by 4 and use multiplier */
- } else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate)) {
+ } else if (audio->rate == 96000 || audio->rate == 88200) {
multiplier = 2;
n >>= 1; /* divide N by 2 and use multiplier */
} else {
@@ -137,13 +64,11 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY;
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_N_MULTIPLIER(multiplier);
- if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate))
+ if (audio->rate == 48000 || audio->rate == 96000 ||
+ audio->rate == 192000)
select = ACR_48;
- else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate))
+ else if (audio->rate == 44100 || audio->rate == 88200 ||
+ audio->rate == 176400)
select = ACR_44;
else /* default to 32k */
select = ACR_32;
@@ -204,7 +129,6 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- unsigned int rate;
int ret;
drm_dbg_driver(bridge->dev, "%u Hz, %d bit, %d channels\n",
@@ -214,25 +138,12 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
switch (params->sample_rate) {
case 32000:
- rate = MSM_HDMI_SAMPLE_RATE_32KHZ;
- break;
case 44100:
- rate = MSM_HDMI_SAMPLE_RATE_44_1KHZ;
- break;
case 48000:
- rate = MSM_HDMI_SAMPLE_RATE_48KHZ;
- break;
case 88200:
- rate = MSM_HDMI_SAMPLE_RATE_88_2KHZ;
- break;
case 96000:
- rate = MSM_HDMI_SAMPLE_RATE_96KHZ;
- break;
case 176400:
- rate = MSM_HDMI_SAMPLE_RATE_176_4KHZ;
- break;
case 192000:
- rate = MSM_HDMI_SAMPLE_RATE_192KHZ;
break;
default:
drm_err(bridge->dev, "rate[%d] not supported!\n",
@@ -245,7 +156,7 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
if (ret)
return ret;
- hdmi->audio.rate = rate;
+ hdmi->audio.rate = params->sample_rate;
hdmi->audio.channels = params->cea.channels;
hdmi->audio.enabled = true;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 1456354c8af4..53a7ce8cc7bc 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -18,52 +18,34 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
struct drm_device *dev = bridge->dev;
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
- int i, ret;
-
- pm_runtime_get_sync(&hdmi->pdev->dev);
+ int ret;
- ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %d\n", ret);
+ pm_runtime_resume_and_get(&hdmi->pdev->dev);
- if (config->pwr_clk_cnt > 0) {
+ if (hdmi->extp_clk) {
DBG("pixclock: %lu", hdmi->pixclock);
- ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n",
- config->pwr_clk_names[0], ret);
- }
- }
+ ret = clk_set_rate(hdmi->extp_clk, hdmi->pixclock);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to set extp clk rate: %d\n", ret);
- for (i = 0; i < config->pwr_clk_cnt; i++) {
- ret = clk_prepare_enable(hdmi->pwr_clks[i]);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n",
- config->pwr_clk_names[i], ret);
- }
+ ret = clk_prepare_enable(hdmi->extp_clk);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable extp clk: %d\n", ret);
}
}
static void power_off(struct drm_bridge *bridge)
{
- struct drm_device *dev = bridge->dev;
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
- int i, ret;
/* TODO do we need to wait for final vblank somewhere before
* cutting the clocks?
*/
mdelay(16 + 4);
- for (i = 0; i < config->pwr_clk_cnt; i++)
- clk_disable_unprepare(hdmi->pwr_clks[i]);
-
- ret = regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret);
+ if (hdmi->extp_clk)
+ clk_disable_unprepare(hdmi->extp_clk);
pm_runtime_put(&hdmi->pdev->dev);
}
@@ -320,13 +302,16 @@ static void msm_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
msm_hdmi_set_timings(hdmi, &crtc_state->adjusted_mode);
+ mutex_lock(&hdmi->state_mutex);
if (!hdmi->power_on) {
msm_hdmi_phy_resource_enable(phy);
msm_hdmi_power_on(bridge);
hdmi->power_on = true;
- if (connector->display_info.is_hdmi)
- msm_hdmi_audio_update(hdmi);
}
+ mutex_unlock(&hdmi->state_mutex);
+
+ if (connector->display_info.is_hdmi)
+ msm_hdmi_audio_update(hdmi);
drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
@@ -349,7 +334,10 @@ static void msm_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
msm_hdmi_hdcp_off(hdmi->hdcp_ctrl);
DBG("power down");
- msm_hdmi_set_mode(hdmi, false);
+
+ /* Keep the HDMI enabled if the HPD is enabled */
+ mutex_lock(&hdmi->state_mutex);
+ msm_hdmi_set_mode(hdmi, hdmi->hpd_enabled);
msm_hdmi_phy_powerdown(phy);
@@ -360,6 +348,7 @@ static void msm_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
msm_hdmi_audio_update(hdmi);
msm_hdmi_phy_resource_disable(phy);
}
+ mutex_unlock(&hdmi->state_mutex);
}
static void msm_hdmi_set_timings(struct hdmi *hdmi,
@@ -411,9 +400,6 @@ static void msm_hdmi_set_timings(struct hdmi *hdmi,
frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
DBG("frame_ctrl=%08x", frame_ctrl);
hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
-
- if (hdmi->connector->display_info.is_hdmi)
- msm_hdmi_audio_update(hdmi);
}
static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridge,
@@ -440,7 +426,6 @@ static enum drm_mode_status msm_hdmi_bridge_tmds_char_rate_valid(const struct dr
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
struct msm_drm_private *priv = bridge->dev->dev_private;
struct msm_kms *kms = priv->kms;
long actual;
@@ -453,8 +438,8 @@ static enum drm_mode_status msm_hdmi_bridge_tmds_char_rate_valid(const struct dr
actual = kms->funcs->round_pixclk(kms,
tmds_rate,
hdmi_bridge->hdmi->encoder);
- else if (config->pwr_clk_cnt > 0)
- actual = clk_round_rate(hdmi->pwr_clks[0], tmds_rate);
+ else if (hdmi->extp_clk)
+ actual = clk_round_rate(hdmi->extp_clk, tmds_rate);
else
actual = tmds_rate;
@@ -474,6 +459,8 @@ static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
.atomic_post_disable = msm_hdmi_bridge_atomic_post_disable,
.edid_read = msm_hdmi_bridge_edid_read,
.detect = msm_hdmi_bridge_detect,
+ .hpd_enable = msm_hdmi_hpd_enable,
+ .hpd_disable = msm_hdmi_hpd_disable,
.hdmi_tmds_char_rate_valid = msm_hdmi_bridge_tmds_char_rate_valid,
.hdmi_clear_infoframe = msm_hdmi_bridge_clear_infoframe,
.hdmi_write_infoframe = msm_hdmi_bridge_write_infoframe,
@@ -498,16 +485,15 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
struct hdmi_bridge *hdmi_bridge;
int ret;
- hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
- sizeof(*hdmi_bridge), GFP_KERNEL);
- if (!hdmi_bridge)
- return -ENOMEM;
+ hdmi_bridge = devm_drm_bridge_alloc(hdmi->dev->dev, struct hdmi_bridge, base,
+ &msm_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi_bridge))
+ return PTR_ERR(hdmi_bridge);
hdmi_bridge->hdmi = hdmi;
INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work);
bridge = &hdmi_bridge->base;
- bridge->funcs = &msm_hdmi_bridge_funcs;
bridge->ddc = hdmi->i2c;
bridge->type = DRM_MODE_CONNECTOR_HDMIA;
bridge->vendor = "Qualcomm";
@@ -515,6 +501,7 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge->ops = DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HDMI_AUDIO |
DRM_BRIDGE_OP_EDID;
bridge->hdmi_audio_max_i2s_playback_channels = 8;
bridge->hdmi_audio_dev = &hdmi->pdev->dev;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
index 9ce0ffa35417..407e6c449ee0 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
@@ -60,68 +60,30 @@ static void msm_hdmi_phy_reset(struct hdmi *hdmi)
}
}
-static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
-{
- const struct hdmi_platform_config *config = hdmi->config;
- struct device *dev = &hdmi->pdev->dev;
- int i, ret;
-
- if (enable) {
- for (i = 0; i < config->hpd_clk_cnt; i++) {
- if (config->hpd_freq && config->hpd_freq[i]) {
- ret = clk_set_rate(hdmi->hpd_clks[i],
- config->hpd_freq[i]);
- if (ret)
- dev_warn(dev,
- "failed to set clk %s (%d)\n",
- config->hpd_clk_names[i], ret);
- }
-
- ret = clk_prepare_enable(hdmi->hpd_clks[i]);
- if (ret) {
- DRM_DEV_ERROR(dev,
- "failed to enable hpd clk: %s (%d)\n",
- config->hpd_clk_names[i], ret);
- }
- }
- } else {
- for (i = config->hpd_clk_cnt - 1; i >= 0; i--)
- clk_disable_unprepare(hdmi->hpd_clks[i]);
- }
-}
-
-int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
+void msm_hdmi_hpd_enable(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
uint32_t hpd_ctrl;
int ret;
unsigned long flags;
- ret = regulator_bulk_enable(config->hpd_reg_cnt, hdmi->hpd_regs);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to enable hpd regulators: %d\n", ret);
- goto fail;
- }
-
- ret = pinctrl_pm_select_default_state(dev);
- if (ret) {
- DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret);
- goto fail;
- }
-
if (hdmi->hpd_gpiod)
gpiod_set_value_cansleep(hdmi->hpd_gpiod, 1);
- pm_runtime_get_sync(dev);
- enable_hpd_clocks(hdmi, true);
+ ret = pm_runtime_resume_and_get(dev);
+ if (WARN_ON(ret))
+ return;
+ mutex_lock(&hdmi->state_mutex);
msm_hdmi_set_mode(hdmi, false);
msm_hdmi_phy_reset(hdmi);
msm_hdmi_set_mode(hdmi, true);
+ hdmi->hpd_enabled = true;
+ mutex_unlock(&hdmi->state_mutex);
+
hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
/* enable HPD events: */
@@ -140,34 +102,23 @@ int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
-
- return 0;
-
-fail:
- return ret;
}
-void msm_hdmi_hpd_disable(struct hdmi *hdmi)
+void msm_hdmi_hpd_disable(struct drm_bridge *bridge)
{
- const struct hdmi_platform_config *config = hdmi->config;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
struct device *dev = &hdmi->pdev->dev;
- int ret;
/* Disable HPD interrupt */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
- msm_hdmi_set_mode(hdmi, false);
+ mutex_lock(&hdmi->state_mutex);
+ hdmi->hpd_enabled = false;
+ msm_hdmi_set_mode(hdmi, hdmi->power_on);
+ mutex_unlock(&hdmi->state_mutex);
- enable_hpd_clocks(hdmi, false);
pm_runtime_put(dev);
-
- ret = pinctrl_pm_select_sleep_state(dev);
- if (ret)
- dev_warn(dev, "pinctrl state chg failed: %d\n", ret);
-
- ret = regulator_bulk_disable(config->hpd_reg_cnt, hdmi->hpd_regs);
- if (ret)
- dev_warn(dev, "failed to disable hpd regulator: %d\n", ret);
}
void msm_hdmi_hpd_irq(struct drm_bridge *bridge)
@@ -202,14 +153,16 @@ void msm_hdmi_hpd_irq(struct drm_bridge *bridge)
static enum drm_connector_status detect_reg(struct hdmi *hdmi)
{
- uint32_t hpd_int_status;
+ u32 hpd_int_status = 0;
+ int ret;
- pm_runtime_get_sync(&hdmi->pdev->dev);
- enable_hpd_clocks(hdmi, true);
+ ret = pm_runtime_resume_and_get(&hdmi->pdev->dev);
+ if (ret)
+ goto out;
hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
- enable_hpd_clocks(hdmi, false);
+out:
pm_runtime_put(&hdmi->pdev->dev);
return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index 7aa500d24240..ebefea4fb408 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -107,11 +107,15 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
if (num == 0)
return num;
+ ret = pm_runtime_resume_and_get(&hdmi->pdev->dev);
+ if (ret)
+ return ret;
+
init_ddc(hdmi_i2c);
ret = ddc_clear_irq(hdmi_i2c);
if (ret)
- return ret;
+ goto fail;
for (i = 0; i < num; i++) {
struct i2c_msg *p = &msgs[i];
@@ -169,7 +173,7 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
- return ret;
+ goto fail;
}
ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS);
@@ -202,7 +206,13 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
}
}
+ pm_runtime_put(&hdmi->pdev->dev);
+
return i;
+
+fail:
+ pm_runtime_put(&hdmi->pdev->dev);
+ return ret;
}
static u32 msm_hdmi_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 03120c54ced6..667573f1db7c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -58,7 +58,11 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
struct device *dev = &phy->pdev->dev;
int i, ret = 0;
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "runtime resume failed: %d\n", ret);
+ return ret;
+ }
ret = regulator_bulk_enable(cfg->num_regs, phy->regs);
if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c3588dc9e537..f316e6776f67 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -671,7 +671,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
break;
case MSM_INFO_GET_FLAGS:
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
ret = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index ebc9ba66efb8..2995e80fec3b 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -735,7 +735,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
msm_gem_assert_locked(obj);
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return ERR_PTR(-ENODEV);
pages = msm_gem_get_pages_locked(obj, madv);
@@ -1074,7 +1074,7 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
put_iova_spaces(obj, true);
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
GEM_WARN_ON(msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 85f0257e83da..ba5c4ff76292 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -224,7 +224,7 @@ msm_gem_assert_locked(struct drm_gem_object *obj)
/* imported/exported objects are not purgeable: */
static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
{
- return msm_obj->base.import_attach || msm_obj->pin_count;
+ return drm_gem_is_imported(&msm_obj->base) || msm_obj->pin_count;
}
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index ee267490c935..2e37913d5a6a 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -50,7 +50,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
struct page **pages;
int ret = 0;
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return 0;
pages = msm_gem_pin_pages_locked(obj);
@@ -62,7 +62,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
void msm_gem_prime_unpin(struct drm_gem_object *obj)
{
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return;
msm_gem_unpin_pages_locked(obj);
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 6970b0f7f457..2e1d5c343272 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -156,6 +156,7 @@ void msm_devfreq_init(struct msm_gpu *gpu)
priv->gpu_devfreq_config.downdifferential = 10;
mutex_init(&df->lock);
+ df->suspended = true;
ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
DEV_PM_QOS_MIN_FREQUENCY, 0);
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index dcb49fd30402..709979fcfab6 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -150,7 +150,7 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
dev = msm_mdss->dev;
- domain = irq_domain_add_linear(dev->of_node, 32,
+ domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), 32,
&msm_mdss_irqdomain_ops, msm_mdss);
if (!domain) {
dev_err(dev, "failed to add irq_domain\n");
@@ -592,6 +592,16 @@ static const struct msm_mdss_data sa8775p_data = {
.reg_bus_bw = 74000,
};
+static const struct msm_mdss_data sar2130p_data = {
+ .ubwc_enc_version = UBWC_3_0, /* 4.0.2 in hw */
+ .ubwc_dec_version = UBWC_4_3,
+ .ubwc_swizzle = 6,
+ .ubwc_bank_spread = true,
+ .highest_bank_bit = 0,
+ .macrotile_mode = 1,
+ .reg_bus_bw = 74000,
+};
+
static const struct msm_mdss_data sc7180_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
@@ -738,6 +748,7 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
{ .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
{ .compatible = "qcom,sa8775p-mdss", .data = &sa8775p_data },
+ { .compatible = "qcom,sar2130p-mdss", .data = &sar2130p_data },
{ .compatible = "qcom,sdm670-mdss", .data = &sdm670_data },
{ .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
{ .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index c5651c39ac2a..89dce15eed3b 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -93,7 +93,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
}
msm_gem_object_set_name(ring->bo, "ring%d", id);
- args.name = to_msm_bo(ring->bo)->name,
+ args.name = to_msm_bo(ring->bo)->name;
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
index 55a35182858c..5a6ae9fc3194 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -2259,5 +2259,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
+<domain name="CP_INDIRECT_BUFFER" width="32" varset="chip" prefix="chip" variants="A5XX-">
+ <reg64 offset="0" name="IB_BASE" type="address"/>
+ <reg32 offset="2" name="2">
+ <bitfield name="IB_SIZE" low="0" high="19"/>
+ </reg32>
+</domain>
+
</database>
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index 8ee00f59ca82..fcb2a7517377 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -134,7 +134,6 @@ static int lcdif_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct lcdif_drm_private *lcdif;
- struct resource *res;
int ret;
lcdif = devm_kzalloc(&pdev->dev, sizeof(*lcdif), GFP_KERNEL);
@@ -144,8 +143,7 @@ static int lcdif_load(struct drm_device *drm)
lcdif->drm = drm;
drm->dev_private = lcdif;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lcdif->base = devm_ioremap_resource(drm->dev, res);
+ lcdif->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lcdif->base))
return PTR_ERR(lcdif->base);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 59020862cf65..c183b1112bc4 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -8,6 +8,7 @@
* Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
*/
+#include <linux/aperture.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -215,7 +216,6 @@ static int mxsfb_load(struct drm_device *drm,
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct mxsfb_drm_private *mxsfb;
- struct resource *res;
int ret;
mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL);
@@ -226,8 +226,7 @@ static int mxsfb_load(struct drm_device *drm,
drm->dev_private = mxsfb;
mxsfb->devdata = devdata;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mxsfb->base = devm_ioremap_resource(drm->dev, res);
+ mxsfb->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxsfb->base))
return PTR_ERR(mxsfb->base);
@@ -361,6 +360,15 @@ static int mxsfb_probe(struct platform_device *pdev)
if (ret)
goto err_free;
+ /*
+ * Remove early framebuffers (ie. simplefb). The framebuffer can be
+ * located anywhere in RAM
+ */
+ ret = aperture_remove_all_conflicting_devices(mxsfb_driver.name);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't kick out existing framebuffers\n");
+
ret = drm_dev_register(drm, 0);
if (ret)
goto err_unload;
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 7b863355c5c6..385d24530d1e 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -2,6 +2,7 @@
ccflags-y += -I $(src)/include
ccflags-y += -I $(src)/include/nvkm
ccflags-y += -I $(src)/nvkm
+ccflags-y += -I $(src)/nvkm/subdev/gsp
ccflags-y += -I $(src)
# NVKM - HW resource manager
@@ -68,5 +69,6 @@ nouveau-y += nv17_fence.o
nouveau-y += nv50_fence.o
nouveau-y += nv84_fence.o
nouveau-y += nvc0_fence.o
+nouveau-y += gv100_fence.o
obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 7b3e979c51ec..d1587639ebb0 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_NOUVEAU
tristate "Nouveau (NVIDIA) cards"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select IOMMU_API
select FW_LOADER
select FW_CACHE if PM_SLEEP
@@ -94,7 +94,6 @@ config DRM_NOUVEAU_SVM
bool "(EXPERIMENTAL) Enable SVM (Shared Virtual Memory) support"
depends on DEVICE_PRIVATE
depends on DRM_NOUVEAU
- depends on MMU
depends on STAGING
select HMM_MIRROR
select MMU_NOTIFIER
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 67146f1e8482..c063756eaea3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -768,9 +768,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
disp->image[nv_crtc->index] = NULL;
}
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_fini(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin_del(&nv_crtc->cursor.nvbo);
nvif_event_dtor(&nv_crtc->vblank);
nvif_head_dtor(&nv_crtc->head);
kfree(nv_crtc);
@@ -1303,6 +1301,7 @@ nv04_crtc_vblank_handler(struct nvif_event *event, void *repv, u32 repc)
int
nv04_crtc_create(struct drm_device *dev, int crtc_num)
{
+ struct nouveau_cli *cli = &nouveau_drm(dev)->client;
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_crtc *nv_crtc;
struct drm_plane *primary;
@@ -1336,20 +1335,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
- ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100,
- NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL,
- &nv_crtc->cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
- NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (!ret) {
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- }
- if (ret)
- nouveau_bo_fini(nv_crtc->cursor.nvbo);
- }
+ ret = nouveau_bo_new_map(cli, NOUVEAU_GEM_DOMAIN_VRAM, 64 * 64 * 4, &nv_crtc->cursor.nvbo);
+ if (ret)
+ return ret;
nv04_cursor_init(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 28be2912ff74..d5049dee4b8c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -9,11 +9,13 @@ nouveau-y += dispnv50/core907d.o
nouveau-y += dispnv50/core917d.o
nouveau-y += dispnv50/corec37d.o
nouveau-y += dispnv50/corec57d.o
+nouveau-y += dispnv50/coreca7d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc907d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc37d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc57d.o
+nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcca7d.o
nouveau-y += dispnv50/dac507d.o
nouveau-y += dispnv50/dac907d.o
@@ -31,6 +33,7 @@ nouveau-y += dispnv50/head907d.o
nouveau-y += dispnv50/head917d.o
nouveau-y += dispnv50/headc37d.o
nouveau-y += dispnv50/headc57d.o
+nouveau-y += dispnv50/headca7d.o
nouveau-y += dispnv50/wimm.o
nouveau-y += dispnv50/wimmc37b.o
@@ -39,6 +42,7 @@ nouveau-y += dispnv50/wndw.o
nouveau-y += dispnv50/wndwc37e.o
nouveau-y += dispnv50/wndwc57e.o
nouveau-y += dispnv50/wndwc67e.o
+nouveau-y += dispnv50/wndwca7e.o
nouveau-y += dispnv50/base.o
nouveau-y += dispnv50/base507c.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index f045515696cb..c6331bf97582 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
+ { GB202_DISP_CORE_CHANNEL_DMA, 0, coreca7d_new },
{ AD102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index f75088186fba..aa07a3ad5dfd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -7,7 +7,10 @@
struct nv50_core {
const struct nv50_core_func *func;
+ struct nv50_disp *disp;
+
struct nv50_dmac chan;
+
bool assign_windows;
};
@@ -18,6 +21,7 @@ struct nv50_core_func {
int (*init)(struct nv50_core *);
void (*ntfy_init)(struct nouveau_bo *, u32 offset);
int (*caps_init)(struct nouveau_drm *, struct nv50_disp *);
+ u32 caps_class;
int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
int (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
@@ -70,4 +74,6 @@ int corec37d_wndw_owner(struct nv50_core *);
extern const struct nv50_outp_func sorc37d;
int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
+
+int coreca7d_new(struct nouveau_drm *, s32, struct nv50_core **);
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index ce2cb78bbdd3..4b947b67a844 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -165,6 +165,7 @@ core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL)))
return -ENOMEM;
core->func = func;
+ core->disp = disp;
ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args),
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 7f637b8830be..83eec2f091f0 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -105,7 +105,7 @@ int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
int ret;
ret = nvif_object_ctor(&disp->disp->object, "dispCaps", 0,
- GV100_DISP_CAPS, NULL, 0, &disp->caps);
+ disp->core->func->caps_class, NULL, 0, &disp->caps);
if (ret) {
NV_ERROR(drm,
"Failed to init notifier caps region: %d\n",
@@ -162,6 +162,7 @@ corec37d = {
.init = corec37d_init,
.ntfy_init = corec37d_ntfy_init,
.caps_init = corec37d_caps_init,
+ .caps_class = GV100_DISP_CAPS,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index 421d0d57e1d8..39be576eadcb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -22,6 +22,7 @@
#include "core.h"
#include "head.h"
+#include <nvif/class.h>
#include <nvif/pushc37b.h>
#include <nvhw/class/clc57d.h>
@@ -63,6 +64,7 @@ corec57d = {
.init = corec57d_init,
.ntfy_init = corec37d_ntfy_init,
.caps_init = corec37d_caps_init,
+ .caps_class = GV100_DISP_CAPS,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c b/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c
new file mode 100644
index 000000000000..171727be400e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "core.h"
+#include "head.h"
+
+#include <nvif/class.h>
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7d.h>
+
+#include <nouveau_bo.h>
+
+static int
+coreca7d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
+{
+ const u64 ntfy_addr = core->disp->sync->offset + NV50_DISP_CORE_NTFY;
+ const u32 ntfy_hi = upper_32_bits(ntfy_addr);
+ const u32 ntfy_lo = lower_32_bits(ntfy_addr);
+ struct nvif_push *push = &core->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5 + (ntfy ? 5 + 2 : 0));
+ if (ret)
+ return ret;
+
+ if (ntfy) {
+ PUSH_MTHD(push, NVCA7D, SET_SURFACE_ADDRESS_HI_NOTIFIER, ntfy_hi,
+
+ SET_SURFACE_ADDRESS_LO_NOTIFIER,
+ NVVAL(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, ADDRESS_LO, ntfy_lo >> 4) |
+ NVDEF(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7D, SET_NOTIFIER_CONTROL,
+ NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
+ NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
+ }
+
+ PUSH_MTHD(push, NVCA7D, SET_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_CURS],
+ SET_WINDOW_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_WNDW]);
+
+ PUSH_MTHD(push, NVCA7D, UPDATE,
+ NVDEF(NVCA7D, UPDATE, RELEASE_ELV, TRUE) |
+ NVDEF(NVCA7D, UPDATE, SPECIAL_HANDLING, NONE) |
+ NVDEF(NVCA7D, UPDATE, INHIBIT_INTERRUPTS, FALSE));
+
+ if (ntfy) {
+ PUSH_MTHD(push, NVCA7D, SET_NOTIFIER_CONTROL,
+ NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
+ }
+
+ return PUSH_KICK(push);
+}
+
+static int
+coreca7d_init(struct nv50_core *core)
+{
+ struct nvif_push *push = &core->chan.push;
+ const u32 windows = 8, heads = 4;
+ int ret, i;
+
+ ret = PUSH_WAIT(push, windows * 6 + heads * 6);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < windows; i++) {
+ PUSH_MTHD(push, NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(i),
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED1BPP, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED2BPP, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED4BPP, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED8BPP, TRUE),
+
+ WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(i), 0x00000000);
+
+ PUSH_MTHD(push, NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS(i),
+ NVVAL(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, MAX_PIXELS_FETCHED_PER_LINE, 0x7fff) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, ILUT_ALLOWED, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, UPSCALING_ALLOWED, FALSE),
+
+ WINDOW_SET_PHYSICAL(i), BIT(i));
+ }
+
+ for (i = 0; i < heads; i++) {
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS(i),
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, CURSOR, USAGE_W256_H256) |
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, OLUT_ALLOWED, TRUE) |
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, OUTPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, UPSCALING_ALLOWED, TRUE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_TILE_MASK(i), BIT(i));
+
+ PUSH_MTHD(push, NVCA7D, TILE_SET_TILE_SIZE(i), 0);
+ }
+
+ core->assign_windows = true;
+ return PUSH_KICK(push);
+}
+
+static const struct nv50_core_func
+coreca7d = {
+ .init = coreca7d_init,
+ .ntfy_init = corec37d_ntfy_init,
+ .caps_init = corec37d_caps_init,
+ .caps_class = GB202_DISP_CAPS,
+ .ntfy_wait_done = corec37d_ntfy_wait_done,
+ .update = coreca7d_update,
+ .wndw.owner = corec37d_wndw_owner,
+ .head = &headca7d,
+ .sor = &sorc37d,
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ .crc = &crcca7d,
+#endif
+};
+
+int
+coreca7d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+ return core507d_new_(&coreca7d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
index 5936b6b3b15d..deb6af40ef32 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -509,6 +509,10 @@ nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
if (ret)
return ret;
+ /* No CTXDMAs on Blackwell. */
+ if (core->chan.base.user.oclass >= GB202_DISP_CORE_CHANNEL_DMA)
+ return 0;
+
ret = nvif_object_ctor(&core->chan.base.user, "kmsCrcNtfyCtxDma",
NV50_DISP_HANDLE_CRC_CTX(head, idx),
NV_DMA_IN_MEMORY,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.h b/drivers/gpu/drm/nouveau/dispnv50/crc.h
index 4823f1fde2dd..75a2009e8193 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.h
@@ -94,6 +94,7 @@ void nv50_crc_atomic_clr(struct nv50_head *);
extern const struct nv50_crc_func crc907d;
extern const struct nv50_crc_func crcc37d;
extern const struct nv50_crc_func crcc57d;
+extern const struct nv50_crc_func crcca7d;
#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
struct nv50_crc {};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c b/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c
new file mode 100644
index 000000000000..912f59aebe87
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "crcc37d.h"
+#include "core.h"
+#include "head.h"
+
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7d.h>
+
+static int
+crcca7d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, ctx ? 3 : 2);
+ if (ret)
+ return ret;
+
+ if (ctx) {
+ const u32 crc_hi = upper_32_bits(ctx->mem.addr);
+ const u32 crc_lo = lower_32_bits(ctx->mem.addr);
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_CRC(i), crc_hi,
+
+ HEAD_SET_SURFACE_ADDRESS_LO_CRC(i),
+ NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ADDRESS_LO, crc_lo >> 4) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ENABLE, ENABLE));
+ } else {
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC(i),
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ENABLE, DISABLE));
+ }
+
+ return 0;
+}
+
+static int
+crcca7d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
+ struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int primary_crc, ret;
+
+ if (!source) {
+ ret = PUSH_WAIT(push, 1);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CRC_CONTROL(i), 0);
+
+ return crcca7d_set_ctx(head, NULL);
+ }
+
+ switch (source) {
+ case NV50_CRC_SOURCE_TYPE_SOR:
+ primary_crc = NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(or);
+ break;
+ case NV50_CRC_SOURCE_TYPE_SF:
+ primary_crc = NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF;
+ break;
+ default:
+ break;
+ }
+
+ ret = crcca7d_set_ctx(head, ctx);
+ if (ret)
+ return ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CRC_CONTROL(i),
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+ NVVAL(NVCA7D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, primary_crc) |
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) |
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE));
+
+ return 0;
+}
+
+const struct nv50_crc_func
+crcca7d = {
+ .set_src = crcca7d_set_src,
+ .set_ctx = crcca7d_set_ctx,
+ .get_entry = crcc37d_get_entry,
+ .ctx_finished = crcc37d_ctx_finished,
+ .flip_threshold = CRCC37D_FLIP_THRESHOLD,
+ .num_entries = CRCC37D_MAX_ENTRIES,
+ .notifier_len = sizeof(struct crcc37d_notifier),
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index 31d8b2e4791d..557bd05240fa 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} curses[] = {
+ { GB202_DISP_CURSOR, 0, cursc37a_new },
{ GA102_DISP_CURSOR, 0, cursc37a_new },
{ TU102_DISP_CURSOR, 0, cursc37a_new },
{ GV100_DISP_CURSOR, 0, cursc37a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 504cb3f2054b..e5d37eee4301 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -279,6 +279,16 @@ nv50_dmac_create(struct nouveau_drm *drm,
if (syncbuf < 0)
return 0;
+ /* No CTXDMAs on Blackwell. */
+ if (disp->oclass >= GB202_DISP) {
+ /* "handle != NULL_HANDLE" is used to determine enable status
+ * in a number of places, so fill in some fake object handles.
+ */
+ dmac->sync.handle = NV50_DISP_HANDLE_SYNCBUF;
+ dmac->vram.handle = NV50_DISP_HANDLE_VRAM;
+ return 0;
+ }
+
ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
@@ -775,10 +785,8 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
union hdmi_infoframe infoframe = { 0 };
const u8 rekey = 56; /* binary driver, and tegra, constant */
u32 max_ac_packet;
- struct {
- struct nvif_outp_infoframe_v0 infoframe;
- u8 data[17];
- } args = { 0 };
+ DEFINE_RAW_FLEX(struct nvif_outp_infoframe_v0, args, data, 17);
+ const u8 data_len = __member_size(args->data);
int ret, size;
max_ac_packet = mode->htotal - mode->hdisplay;
@@ -815,29 +823,29 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
return;
/* AVI InfoFrame. */
- args.infoframe.version = 0;
- args.infoframe.head = nv_crtc->index;
+ args->version = 0;
+ args->head = nv_crtc->index;
if (!drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, &nv_connector->base, mode)) {
drm_hdmi_avi_infoframe_quant_range(&infoframe.avi, &nv_connector->base, mode,
HDMI_QUANTIZATION_RANGE_FULL);
- size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
+ size = hdmi_infoframe_pack(&infoframe, args->data, data_len);
} else {
size = 0;
}
- nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, &args.infoframe, size);
+ nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, args, size);
/* Vendor InfoFrame. */
- memset(&args.data, 0, sizeof(args.data));
+ memset(args->data, 0, data_len);
if (!drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi,
&nv_connector->base, mode))
- size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
+ size = hdmi_infoframe_pack(&infoframe, args->data, data_len);
else
size = 0;
- nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, &args.infoframe, size);
+ nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, args, size);
nv_encoder->hdmi.enabled = true;
}
@@ -2810,10 +2818,7 @@ nv50_display_destroy(struct drm_device *dev)
nvif_object_dtor(&disp->caps);
nv50_core_del(&disp->core);
- nouveau_bo_unmap(disp->sync);
- if (disp->sync)
- nouveau_bo_unpin(disp->sync);
- nouveau_bo_fini(disp->sync);
+ nouveau_bo_unpin_del(&disp->sync);
nouveau_display(dev)->priv = NULL;
kfree(disp);
@@ -2845,20 +2850,7 @@ nv50_display_create(struct drm_device *dev)
dev->mode_config.normalize_zpos = true;
/* small shared memory area we use for notifiers and semaphores */
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
- NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, NULL, NULL, &disp->sync);
- if (!ret) {
- ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true);
- if (!ret) {
- ret = nouveau_bo_map(disp->sync);
- if (ret)
- nouveau_bo_unpin(disp->sync);
- }
- if (ret)
- nouveau_bo_fini(disp->sync);
- }
-
+ ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &disp->sync);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index d7c74cc43ba5..3dd742b4f823 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -577,6 +577,7 @@ nv50_head_create(struct drm_device *dev, int index)
return ERR_PTR(-ENOMEM);
head->func = disp->core->func->head;
+ head->disp = disp;
head->base.index = index;
if (disp->disp->object.oclass < GF110_DISP)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index e9d17037ffcf..8bd2fcb1eff5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -13,6 +13,8 @@
struct nv50_head {
const struct nv50_head_func *func;
+ struct nv50_disp *disp;
+
struct nouveau_crtc base;
struct nv50_crc crc;
struct nv50_lut olut;
@@ -98,4 +100,7 @@ int headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
void headc37d_static_wndw_map(struct nv50_head *, struct nv50_head_atom *);
extern const struct nv50_head_func headc57d;
+bool headc57d_olut(struct nv50_head *, struct nv50_head_atom *, int size);
+
+extern const struct nv50_head_func headca7d;
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index fde4087e7691..3f8ba495de8f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -182,7 +182,7 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-static bool
+bool
headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
if (size != 0 && size != 256 && size != 1024)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headca7d.c b/drivers/gpu/drm/nouveau/dispnv50/headca7d.c
new file mode 100644
index 000000000000..eeaeb15aa664
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/headca7d.c
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "head.h"
+#include "atom.h"
+#include "core.h"
+
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7d.h>
+
+static int
+headca7d_display_id(struct nv50_head *head, u32 display_id)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_DISPLAY_ID(i, 0), display_id);
+
+ return 0;
+}
+
+static int
+headca7d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ u8 depth;
+ int ret;
+
+ switch (asyh->or.depth) {
+ case 6:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444;
+ break;
+ case 5:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444;
+ break;
+ case 2:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444;
+ break;
+ case 0:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, depth) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, COLOR_SPACE_OVERRIDE, DISABLE) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, EXT_PACKET_WIN, NONE));
+
+ return 0;
+}
+
+static int
+headca7d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_PROCAMP(i),
+ NVDEF(NVCA7D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+ NVDEF(NVCA7D, HEAD_SET_PROCAMP, CHROMA_LPF, DISABLE) |
+ NVDEF(NVCA7D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA));
+
+ return 0;
+}
+
+static int
+headca7d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_DITHER_CONTROL(i),
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+ NVDEF(NVCA7D, HEAD_SET_DITHER_CONTROL, OFFSET_ENABLE, DISABLE) |
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+
+ return 0;
+}
+
+static int
+headca7d_curs_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(i, 0),
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+headca7d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const u32 curs_hi = upper_32_bits(asyh->curs.offset);
+ const u32 curs_lo = lower_32_bits(asyh->curs.offset);
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 7);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_CURSOR(i, 0), curs_hi);
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(i, 0),
+ NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ADDRESS_LO, curs_lo >> 4) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0),
+
+ HEAD_SET_CONTROL_CURSOR_COMPOSITION(i),
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, K1, 0xff) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, CURSOR_COLOR_FACTOR_SELECT,
+ K1) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, VIEWPORT_COLOR_FACTOR_SELECT,
+ NEG_K1_TIMES_SRC) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, MODE, BLEND));
+
+ return 0;
+}
+
+static int
+headca7d_olut_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT(i),
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+headca7d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const u32 olut_hi = upper_32_bits(asyh->olut.offset);
+ const u32 olut_lo = lower_32_bits(asyh->olut.offset);
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 6);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_OLUT(i), olut_hi,
+
+ HEAD_SET_SURFACE_ADDRESS_LO_OLUT(i),
+ NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ADDRESS_LO, olut_lo >> 4) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_OLUT_CONTROL(i),
+ NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, INTERPOLATE, asyh->olut.output_mode) |
+ NVDEF(NVCA7D, HEAD_SET_OLUT_CONTROL, MIRROR, DISABLE) |
+ NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, MODE, asyh->olut.mode) |
+ NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, SIZE, asyh->olut.size),
+
+ HEAD_SET_OLUT_FP_NORM_SCALE(i), 0xffffffff);
+
+ return 0;
+}
+
+static int
+headca7d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ struct nv50_head_mode *m = &asyh->mode;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 11);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_RASTER_SIZE(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+ HEAD_SET_RASTER_SYNC_END(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+ HEAD_SET_RASTER_BLANK_END(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+ HEAD_SET_RASTER_BLANK_START(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL(i),
+ NVDEF(NVCA7D, HEAD_SET_CONTROL, STRUCTURE, PROGRESSIVE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
+ NVVAL(NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
+ NVVAL(NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000));
+
+ return 0;
+}
+
+static int
+headca7d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN(i),
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH));
+ return 0;
+}
+
+const struct nv50_head_func
+headca7d = {
+ .view = headca7d_view,
+ .mode = headca7d_mode,
+ .olut = headc57d_olut,
+ .ilut_check = head907d_ilut_check,
+ .olut_identity = true,
+ .olut_size = 1024,
+ .olut_set = headca7d_olut_set,
+ .olut_clr = headca7d_olut_clr,
+ .curs_layout = head917d_curs_layout,
+ .curs_format = headc37d_curs_format,
+ .curs_set = headca7d_curs_set,
+ .curs_clr = headca7d_curs_clr,
+ .dither = headca7d_dither,
+ .procamp = headca7d_procamp,
+ .or = headca7d_or,
+ .static_wndw_map = headc37d_static_wndw_map,
+ .display_id = headca7d_display_id,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
index 566fbddfc8d7..53c9ab6c138b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimm.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} wimms[] = {
+ { GB202_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index f6be426dd525..11d5b923d6e7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -556,14 +556,24 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
return ret;
if (wndw->ctxdma.parent) {
- ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
- if (IS_ERR(ctxdma)) {
- nouveau_bo_unpin(nvbo);
- return PTR_ERR(ctxdma);
+ if (wndw->wndw.base.user.oclass < GB202_DISP_WINDOW_CHANNEL_DMA) {
+ ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
+ if (IS_ERR(ctxdma)) {
+ nouveau_bo_unpin(nvbo);
+ return PTR_ERR(ctxdma);
+ }
+
+ if (asyw->visible)
+ asyw->image.handle[0] = ctxdma->object.handle;
+ } else {
+ /* No CTXDMAs on Blackwell. */
+ if (asyw->visible) {
+ /* "handle != NULL_HANDLE" is used to determine enable status
+ * in a number of places, so fill in a fake object handle.
+ */
+ asyw->image.handle[0] = NV50_DISP_HANDLE_WNDW_CTX(0);
+ }
}
-
- if (asyw->visible)
- asyw->image.handle[0] = ctxdma->object.handle;
}
ret = drm_gem_plane_helper_prepare_fb(plane, state);
@@ -901,6 +911,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
int (*new)(struct nouveau_drm *, enum drm_plane_type,
int, s32, struct nv50_wndw **);
} wndws[] = {
+ { GB202_DISP_WINDOW_CHANNEL_DMA, 0, wndwca7e_new },
{ GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new },
{ TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 76a6ae5d5652..90d100514bef 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -134,6 +134,9 @@ int wndwc57e_csc_clr(struct nv50_wndw *);
int wndwc67e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
+int wndwca7e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+ struct nv50_wndw **);
+
int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
struct nv50_wndw **);
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index 50a7b97d37a2..554c4f91f8be 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -25,6 +25,7 @@
#include <drm/drm_atomic_helper.h>
#include <nouveau_bo.h>
+#include <nvif/class.h>
#include <nvif/if0014.h>
#include <nvif/pushc37b.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
new file mode 100644
index 000000000000..0d8e9a9d1a57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7e.h>
+
+#include <nouveau_bo.h>
+
+static int
+wndwca7e_image_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_PRESENT_CONTROL,
+ NVVAL(NVCA7E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, 0) |
+ NVDEF(NVCA7E, SET_PRESENT_CONTROL, BEGIN_MODE, NON_TEARING));
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ISO(0),
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+wndwca7e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ const u32 iso0_hi = upper_32_bits(asyw->image.offset[0]);
+ const u32 iso0_lo = lower_32_bits(asyw->image.offset[0]);
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret, kind;
+
+ if (asyw->image.kind)
+ kind = NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_BLOCKLINEAR;
+ else
+ kind = NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_PITCH;
+
+ ret = PUSH_WAIT(push, 17);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_ISO(0), iso0_hi);
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ISO(0),
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ADDRESS_LO, iso0_lo >> 4) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, TARGET, PHYSICAL_NVM) |
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, KIND, kind) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_PRESENT_CONTROL,
+ NVVAL(NVCA7E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+ NVVAL(NVCA7E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NVCA7E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_SIZE,
+ NVVAL(NVCA7E, SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NVCA7E, SET_SIZE, HEIGHT, asyw->image.h),
+
+ SET_STORAGE,
+ NVVAL(NVCA7E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+ SET_PARAMS,
+ NVVAL(NVCA7E, SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NVCA7E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+ NVDEF(NVCA7E, SET_PARAMS, SWAP_UV, DISABLE) |
+ NVDEF(NVCA7E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+ SET_PLANAR_STORAGE(0),
+ NVVAL(NVCA7E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NVCA7E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+ PUSH_MTHD(push, NVCA7E, SET_POINT_IN(0),
+ NVVAL(NVCA7E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+ NVVAL(NVCA7E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+ PUSH_MTHD(push, NVCA7E, SET_SIZE_IN,
+ NVVAL(NVCA7E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+ NVVAL(NVCA7E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+ PUSH_MTHD(push, NVCA7E, SET_SIZE_OUT,
+ NVVAL(NVCA7E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+ NVVAL(NVCA7E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+
+ return 0;
+}
+
+static int
+wndwca7e_ilut_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT,
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+wndwca7e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ const u32 ilut_hi = upper_32_bits(asyw->xlut.i.offset);
+ const u32 ilut_lo = lower_32_bits(asyw->xlut.i.offset);
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_ILUT, ilut_hi,
+
+ SET_SURFACE_ADDRESS_LO_ILUT,
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ADDRESS_LO, ilut_lo >> 4) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_ILUT_CONTROL,
+ NVVAL(NVCA7E, SET_ILUT_CONTROL, SIZE, asyw->xlut.i.size) |
+ NVVAL(NVCA7E, SET_ILUT_CONTROL, MODE, asyw->xlut.i.mode) |
+ NVVAL(NVCA7E, SET_ILUT_CONTROL, INTERPOLATE, asyw->xlut.i.output_mode));
+
+ return 0;
+}
+
+static int
+wndwca7e_ntfy_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER,
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+wndwca7e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+ const u64 ntfy_addr = disp->sync->offset + asyw->ntfy.offset;
+ const u32 ntfy_hi = upper_32_bits(ntfy_addr);
+ const u32 ntfy_lo = lower_32_bits(ntfy_addr);
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_NOTIFIER, ntfy_hi,
+
+ SET_SURFACE_ADDRESS_LO_NOTIFIER,
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ADDRESS_LO, ntfy_lo >> 4) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_NOTIFIER_CONTROL,
+ NVVAL(NVCA7E, SET_NOTIFIER_CONTROL, MODE, asyw->ntfy.awaken));
+
+ return 0;
+}
+
+static const struct nv50_wndw_func
+wndwca7e = {
+ .acquire = wndwc37e_acquire,
+ .release = wndwc37e_release,
+ .ntfy_set = wndwca7e_ntfy_set,
+ .ntfy_clr = wndwca7e_ntfy_clr,
+ .ntfy_reset = corec37d_ntfy_init,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = wndwc57e_ilut,
+ .ilut_identity = true,
+ .ilut_size = 1024,
+ .xlut_set = wndwca7e_ilut_set,
+ .xlut_clr = wndwca7e_ilut_clr,
+ .csc = base907c_csc,
+ .csc_set = wndwc57e_csc_set,
+ .csc_clr = wndwc57e_csc_clr,
+ .image_set = wndwca7e_image_set,
+ .image_clr = wndwca7e_image_clr,
+ .blend_set = wndwc37e_blend_set,
+ .update = wndwc37e_update,
+};
+
+int
+wndwca7e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ s32 oclass, struct nv50_wndw **pwndw)
+{
+ return wndwc37e_new_(&wndwca7e, drm, type, index, oclass, BIT(index >> 1), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/gv100_fence.c b/drivers/gpu/drm/nouveau/gv100_fence.c
new file mode 100644
index 000000000000..cccdeca72002
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/gv100_fence.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fence.h"
+
+#include "nv50_display.h"
+
+#include <nvif/push906f.h>
+
+#include <nvhw/class/clc36f.h>
+
+static int
+gv100_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
+{
+ struct nvif_push *push = &chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 8);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(virtual),
+ SEM_ADDR_HI, upper_32_bits(virtual),
+ SEM_PAYLOAD_LO, sequence);
+
+ PUSH_MTHD(push, NVC36F, SEM_EXECUTE,
+ NVDEF(NVC36F, SEM_EXECUTE, OPERATION, RELEASE) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_WFI, EN) |
+ NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS));
+
+ PUSH_MTHD(push, NVC36F, NON_STALL_INTERRUPT, 0);
+
+ PUSH_KICK(push);
+ return 0;
+}
+
+static int
+gv100_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
+{
+ struct nvif_push *push = &chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 6);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(virtual),
+ SEM_ADDR_HI, upper_32_bits(virtual),
+ SEM_PAYLOAD_LO, sequence);
+
+ PUSH_MTHD(push, NVC36F, SEM_EXECUTE,
+ NVDEF(NVC36F, SEM_EXECUTE, OPERATION, ACQ_CIRC_GEQ) |
+ NVDEF(NVC36F, SEM_EXECUTE, ACQUIRE_SWITCH_TSG, EN) |
+ NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT));
+
+ PUSH_KICK(push);
+ return 0;
+}
+
+static int
+gv100_fence_context_new(struct nouveau_channel *chan)
+{
+ struct nv84_fence_chan *fctx;
+ int ret;
+
+ ret = nv84_fence_context_new(chan);
+ if (ret)
+ return ret;
+
+ fctx = chan->fence;
+ fctx->base.emit32 = gv100_fence_emit32;
+ fctx->base.sync32 = gv100_fence_sync32;
+ return 0;
+}
+
+int
+gv100_fence_create(struct nouveau_drm *drm)
+{
+ struct nv84_fence_priv *priv;
+ int ret;
+
+ ret = nv84_fence_create(drm);
+ if (ret)
+ return ret;
+
+ priv = drm->fence;
+ priv->base.context_new = gv100_fence_context_new;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
new file mode 100644
index 000000000000..8735dda4c8a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clc36f_h_
+#define _clc36f_h_
+
+#define NVC36F_NON_STALL_INTERRUPT (0x00000020)
+#define NVC36F_NON_STALL_INTERRUPT_HANDLE 31:0
+#define NVC36F_SEM_ADDR_LO (0x0000005c)
+#define NVC36F_SEM_ADDR_LO_OFFSET 31:2
+#define NVC36F_SEM_ADDR_HI (0x00000060)
+#define NVC36F_SEM_ADDR_HI_OFFSET 7:0
+#define NVC36F_SEM_PAYLOAD_LO (0x00000064)
+#define NVC36F_SEM_PAYLOAD_LO_PAYLOAD 31:0
+#define NVC36F_SEM_PAYLOAD_HI (0x00000068)
+#define NVC36F_SEM_PAYLOAD_HI_PAYLOAD 31:0
+#define NVC36F_SEM_EXECUTE (0x0000006c)
+#define NVC36F_SEM_EXECUTE_OPERATION 2:0
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000
+#define NVC36F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005
+#define NVC36F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006
+#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12
+#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000
+#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001
+#define NVC36F_SEM_EXECUTE_RELEASE_WFI 20:20
+#define NVC36F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000
+#define NVC36F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001
+#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE 24:24
+#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000
+#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001
+#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25
+#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000
+#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001
+#define NVC36F_SEM_EXECUTE_REDUCTION 30:27
+#define NVC36F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000
+#define NVC36F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001
+#define NVC36F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002
+#define NVC36F_SEM_EXECUTE_REDUCTION_IAND 0x00000003
+#define NVC36F_SEM_EXECUTE_REDUCTION_IOR 0x00000004
+#define NVC36F_SEM_EXECUTE_REDUCTION_IADD 0x00000005
+#define NVC36F_SEM_EXECUTE_REDUCTION_INC 0x00000006
+#define NVC36F_SEM_EXECUTE_REDUCTION_DEC 0x00000007
+#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT 31:31
+#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000
+#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h
new file mode 100644
index 000000000000..092aebe9551c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clc97b_h_
+#define _clc97b_h_
+
+// dma opcode instructions
+#define NVC97B_DMA
+#define NVC97B_DMA_OPCODE 31:29
+#define NVC97B_DMA_OPCODE_METHOD 0x00000000
+#define NVC97B_DMA_OPCODE_JUMP 0x00000001
+#define NVC97B_DMA_OPCODE_NONINC_METHOD 0x00000002
+#define NVC97B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003
+#define NVC97B_DMA_METHOD_COUNT 27:18
+#define NVC97B_DMA_METHOD_OFFSET 15:2
+#define NVC97B_DMA_DATA 31:0
+#define NVC97B_DMA_DATA_NOP 0x00000000
+#define NVC97B_DMA_JUMP_OFFSET 15:2
+#define NVC97B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0
+
+#endif // _clc97b_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h
new file mode 100644
index 000000000000..0fec6fc21d44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h
@@ -0,0 +1,868 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clca7d_h_
+#define _clca7d_h_
+
+// class methods
+#define NVCA7D_UPDATE (0x00000200)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING 21:20
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_REASON 19:12
+#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS 24:24
+#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000)
+#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001)
+#define NVCA7D_UPDATE_RELEASE_ELV 0:0
+#define NVCA7D_UPDATE_RELEASE_ELV_FALSE (0x00000000)
+#define NVCA7D_UPDATE_RELEASE_ELV_TRUE (0x00000001)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN 8:4
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_UPDATE_FORCE_FULLSCREEN 28:28
+#define NVCA7D_UPDATE_FORCE_FULLSCREEN_FALSE (0x00000000)
+#define NVCA7D_UPDATE_FORCE_FULLSCREEN_TRUE (0x00000001)
+#define NVCA7D_SET_NOTIFIER_CONTROL (0x0000020C)
+#define NVCA7D_SET_NOTIFIER_CONTROL_MODE 0:0
+#define NVCA7D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000)
+#define NVCA7D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001)
+#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY 12:12
+#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000)
+#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS (0x00000218)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001)
+#define NVCA7D_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000260)
+#define NVCA7D_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000264)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001)
+
+#define NVCA7D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK 7:0
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL 11:8
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL (0x0000000C)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F)
+#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16
+#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002)
+
+#define NVCA7D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER 3:0
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i))
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F)
+#define NVCA7D_WINDOW_SET_CONTROL_HIDE 8:8
+#define NVCA7D_WINDOW_SET_CONTROL_HIDE_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_HIDE_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS 9:9
+#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME 10:10
+#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED 30:30
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT 26:25
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_PITCH_BLOCKLINEAR (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_PITCH (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_BLOCKLINEAR (0x00000002)
+#define NVCA7D_WINDOW_SET_PHYSICAL(a) (0x00001014 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW 31:0
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_NONE (0x00000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW0 (0x00000001)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW1 (0x00000002)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW2 (0x00000004)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW3 (0x00000008)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW4 (0x00000010)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW5 (0x00000020)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW6 (0x00000040)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW7 (0x00000080)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW8 (0x00000100)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW9 (0x00000200)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW10 (0x00000400)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW11 (0x00000800)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW12 (0x00001000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW13 (0x00002000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW14 (0x00004000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW15 (0x00008000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW16 (0x00010000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW17 (0x00020000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW18 (0x00040000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW19 (0x00080000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW20 (0x00100000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW21 (0x00200000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW22 (0x00400000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW23 (0x00800000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW24 (0x01000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW25 (0x02000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW26 (0x04000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW27 (0x08000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW28 (0x10000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW29 (0x20000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW30 (0x40000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW31 (0x80000000)
+
+#define NVCA7D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V 4:4
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28
+#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_444 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444NP (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F)
+#define NVCA7D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_STRUCTURE 1:0
+#define NVCA7D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2
+#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER 3:3
+#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE 11:10
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_NO_LOCK (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_FRAME_LOCK (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_RASTER_LOCK (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN 8:4
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCKOUT_WINDOW 15:12
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE 23:22
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_NO_LOCK (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_FRAME_LOCK (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_RASTER_LOCK (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN 20:16
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN 28:24
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE 30:30
+#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE 31:31
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS 5:4
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE 10:8
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_PHASE 13:12
+#define NVCA7D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000800 + (b)*0x00000004)
+#define NVCA7D_HEAD_SET_DISPLAY_ID_CODE 31:0
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED 5:5
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED 16:16
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_ELV_START 31:17
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16
+#define NVCA7D_HEAD_SET_TILE_MASK(a) (0x00002060 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE 7:0
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE0 (0x00000001)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE1 (0x00000002)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE2 (0x00000004)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE3 (0x00000008)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE4 (0x00000010)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE5 (0x00000020)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE6 (0x00000040)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE7 (0x00000080)
+#define NVCA7D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_SIZE_WIDTH 15:0
+#define NVCA7D_HEAD_SET_RASTER_SIZE_HEIGHT 31:16
+#define NVCA7D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_SYNC_END_X 14:0
+#define NVCA7D_HEAD_SET_RASTER_SYNC_END_Y 30:16
+#define NVCA7D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_BLANK_END_X 14:0
+#define NVCA7D_HEAD_SET_RASTER_BLANK_END_Y 30:16
+#define NVCA7D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_BLANK_START_X 14:0
+#define NVCA7D_HEAD_SET_RASTER_BLANK_START_Y 30:16
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CRC(a) (0x00002150 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CRC_ADDRESS_HI 31:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC(a) (0x00002154 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ADDRESS_LO 31:4
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET 3:2
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_IOVA (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE 0:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT(a) (0x00002158 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT_ADDRESS_HI 31:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT(a) (0x0000215C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ADDRESS_LO 31:4
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET 3:2
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_IOVA (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE 0:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR(a,b) (0x00002170 + (a)*0x00000800 + (b)*0x00000004)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR_ADDRESS_HI 31:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(a,b) (0x00002178 + (a)*0x00000800 + (b)*0x00000004)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ADDRESS_LO 31:4
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET 3:2
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_IOVA (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND 1:1
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_PITCH (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_BLOCKLINEAR (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE 0:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8
+#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i))
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i))
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE 3:2
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SIZE 18:8
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND 4:4
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_LEVEL 25:20
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS 5:5
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS_SIZE_3BITS (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS_SIZE_4BITS (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0
+
+#define NVCA7D_TILE_SET_TILE_SIZE(a) (0x00006000 + (a)*0x00000200)
+#define NVCA7D_TILE_SET_TILE_SIZE_START 14:0
+#define NVCA7D_TILE_SET_TILE_SIZE_WIDTH 30:16
+
+#endif // _clca7d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h
new file mode 100644
index 000000000000..ebfb2e48a4f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clca7e_h_
+#define _clca7e_h_
+
+// class methods
+#define NVCA7E_SET_NOTIFIER_CONTROL (0x00000220)
+#define NVCA7E_SET_NOTIFIER_CONTROL_MODE 0:0
+#define NVCA7E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000)
+#define NVCA7E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001)
+#define NVCA7E_SET_SIZE (0x00000224)
+#define NVCA7E_SET_SIZE_WIDTH 15:0
+#define NVCA7E_SET_SIZE_HEIGHT 31:16
+#define NVCA7E_SET_STORAGE (0x00000228)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NVCA7E_SET_PARAMS (0x0000022C)
+#define NVCA7E_SET_PARAMS_FORMAT 7:0
+#define NVCA7E_SET_PARAMS_FORMAT_I8 (0x0000001E)
+#define NVCA7E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F)
+#define NVCA7E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8)
+#define NVCA7E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NVCA7E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E)
+#define NVCA7E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NVCA7E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6)
+#define NVCA7E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5)
+#define NVCA7E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9)
+#define NVCA7E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF)
+#define NVCA7E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NVCA7E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023)
+#define NVCA7E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6)
+#define NVCA7E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028)
+#define NVCA7E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B)
+#define NVCA7E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055)
+#define NVCA7E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056)
+#define NVCA7E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058)
+#define NVCA7E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075)
+#define NVCA7E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076)
+#define NVCA7E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078)
+#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18
+#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000)
+#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001)
+#define NVCA7E_SET_PARAMS_SWAP_UV 19:19
+#define NVCA7E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000)
+#define NVCA7E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001)
+#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE 22:22
+#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000)
+#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001)
+#define NVCA7E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004)
+#define NVCA7E_SET_PLANAR_STORAGE_PITCH 12:0
+#define NVCA7E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004)
+#define NVCA7E_SET_POINT_IN_X 15:0
+#define NVCA7E_SET_POINT_IN_Y 31:16
+#define NVCA7E_SET_SIZE_IN (0x00000298)
+#define NVCA7E_SET_SIZE_IN_WIDTH 15:0
+#define NVCA7E_SET_SIZE_IN_HEIGHT 31:16
+#define NVCA7E_SET_SIZE_OUT (0x000002A4)
+#define NVCA7E_SET_SIZE_OUT_WIDTH 15:0
+#define NVCA7E_SET_SIZE_OUT_HEIGHT 31:16
+#define NVCA7E_SET_PRESENT_CONTROL (0x00000308)
+#define NVCA7E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0
+#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4
+#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000)
+#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001)
+#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8
+#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000)
+#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001)
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE 13:12
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000)
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001)
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002)
+#define NVCA7E_SET_ILUT_CONTROL (0x00000440)
+#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE 0:0
+#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000)
+#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001)
+#define NVCA7E_SET_ILUT_CONTROL_MIRROR 1:1
+#define NVCA7E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000)
+#define NVCA7E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001)
+#define NVCA7E_SET_ILUT_CONTROL_MODE 3:2
+#define NVCA7E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000)
+#define NVCA7E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001)
+#define NVCA7E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002)
+#define NVCA7E_SET_ILUT_CONTROL_SIZE 18:8
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000650)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000654)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ISO(b) (0x00000658 + (b)*0x00000004)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ISO_ADDRESS_HI 31:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO(b) (0x00000670 + (b)*0x00000004)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ADDRESS_LO 31:4
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET 3:2
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_IOVA (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND 1:1
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_PITCH (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_BLOCKLINEAR (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE 0:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_DISABLE (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_ENABLE (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ILUT (0x00000688)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ILUT_ADDRESS_HI 31:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT (0x0000068C)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ADDRESS_LO 31:4
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET 3:2
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_IOVA (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE 0:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_DISABLE (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_ENABLE (0x00000001)
+
+#endif // _clca7e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h
new file mode 100644
index 000000000000..c9d74bd95e0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb100_dev_hshub_base_h__
+#define __gb100_dev_hshub_base_h__
+
+#define NV_PFB_HSHUB0 0x00870fff:0x00870000
+
+#define NV_PFB_HSHUB 0x00000FFF:0x00000000 /* RW--D */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO 0x00000E50 /* RW-4R */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xFFFFFF00 /* ----V */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI 0x00000E54 /* RW-4R */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO 0x000006C0 /* RW-4R */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xFFFFFF00 /* ----V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI 0x000006C4 /* RW-4R */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */
+
+#endif // __gb100_dev_hshub_base_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h
new file mode 100644
index 000000000000..4d0bb8e14298
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb10b_dev_fb_h__
+#define __gb10b_dev_fb_h__
+
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO 0x008a1d58 /* RW-4R */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xffffff00 /* RW--V */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI 0x008a1d5c /* RW-4R */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000fffff /* RW--V */
+
+#endif // __gb10b_dev_fb_h__
+
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h
new file mode 100644
index 000000000000..b09f04b31738
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb202_dev_ce_h__
+#define __gb202_dev_ce_h__
+
+#define NV_CE_GRCE_MASK 0x001040d8 /* C--4R */
+#define NV_CE_GRCE_MASK_VALUE 9:0 /* C--VF */
+#define NV_CE_GRCE_MASK_VALUE_INIT 0x00f /* C---V */
+
+#endif // __gb202_dev_ce_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h
new file mode 100644
index 000000000000..ed359cb528fb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb202_dev_therm_h__
+#define __gb202_dev_therm_h__
+
+#define NV_THERM_I2CS_SCRATCH 0x00ad00bc /* RW-4R */
+#define NV_THERM_I2CS_SCRATCH_DATA 31:0 /* RWIVF */
+#define NV_THERM_I2CS_SCRATCH_DATA_INIT 0x00000000 /* RWI-V */
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE NV_THERM_I2CS_SCRATCH
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS 31:0
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS 0x000000FF
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_FAILED 0x00000000
+
+#endif // __gb202_dev_therm_h__
+
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h
new file mode 100644
index 000000000000..52171b412aa1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_falcon_v4_h__
+#define __gh100_dev_falcon_v4_h__
+
+#define NV_PFALCON_FALCON_MAILBOX0 0x00000040 /* RW-4R */
+#define NV_PFALCON_FALCON_MAILBOX0_DATA 31:0 /* RWIVF */
+#define NV_PFALCON_FALCON_MAILBOX0_DATA_INIT 0x00000000 /* RWI-V */
+#define NV_PFALCON_FALCON_MAILBOX1 0x00000044 /* RW-4R */
+#define NV_PFALCON_FALCON_MAILBOX1_DATA 31:0 /* RWIVF */
+#define NV_PFALCON_FALCON_MAILBOX1_DATA_INIT 0x00000000 /* RWI-V */
+
+#define NV_PFALCON_FALCON_HWCFG2 0x000000f4 /* R--4R */
+#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN 13:13 /* R--VF */
+#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN_LOCK 0x00000001 /* R---V */
+#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN_UNLOCK 0x00000000 /* R---V */
+
+#endif // __gh100_dev_falcon_v4_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h
new file mode 100644
index 000000000000..819f09465952
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_fb_h_
+#define __gh100_dev_fb_h_
+
+#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT 8 /* */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO 0x00100A34 /* RW-4R */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI 0x00100A38 /* RW-4R */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */
+
+#endif // __gh100_dev_fb_h_
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h
new file mode 100644
index 000000000000..e9507242cae5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_fsp_pri_h__
+#define __gh100_dev_fsp_pri_h__
+
+#define NV_PFSP 0x8F3FFF:0x8F0000 /* RW--D */
+
+#define NV_PFSP_MSGQ_HEAD(i) (0x008F2c80+(i)*8) /* RW-4A */
+#define NV_PFSP_MSGQ_HEAD__SIZE_1 8 /* */
+#define NV_PFSP_MSGQ_HEAD_VAL 31:0 /* RWIUF */
+#define NV_PFSP_MSGQ_HEAD_VAL_INIT 0x00000000 /* RWI-V */
+#define NV_PFSP_MSGQ_TAIL(i) (0x008F2c84+(i)*8) /* RW-4A */
+#define NV_PFSP_MSGQ_TAIL__SIZE_1 8 /* */
+#define NV_PFSP_MSGQ_TAIL_VAL 31:0 /* RWIUF */
+#define NV_PFSP_MSGQ_TAIL_VAL_INIT 0x00000000 /* RWI-V */
+
+#define NV_PFSP_QUEUE_HEAD(i) (0x008F2c00+(i)*8) /* RW-4A */
+#define NV_PFSP_QUEUE_HEAD__SIZE_1 8 /* */
+#define NV_PFSP_QUEUE_HEAD_ADDRESS 31:0 /* RWIVF */
+#define NV_PFSP_QUEUE_HEAD_ADDRESS_INIT 0x00000000 /* RWI-V */
+#define NV_PFSP_QUEUE_TAIL(i) (0x008F2c04+(i)*8) /* RW-4A */
+#define NV_PFSP_QUEUE_TAIL__SIZE_1 8 /* */
+#define NV_PFSP_QUEUE_TAIL_ADDRESS 31:0 /* RWIVF */
+#define NV_PFSP_QUEUE_TAIL_ADDRESS_INIT 0x00000000 /* RWI-V */
+
+#endif // __gh100_dev_fsp_pri_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h
new file mode 100644
index 000000000000..6707e0e3b96b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_mmu_h__
+#define __gh100_dev_mmu_h__
+
+#define NV_MMU_PTE /* ----G */
+#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */
+#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */
+#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_PTE_KIND (1*32+7):(1*32+4) /* RWXVF */
+#define NV_MMU_PTE_KIND_INVALID 0x07 /* R---V */
+#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */
+#define NV_MMU_PTE_KIND_GENERIC_MEMORY 0x6 /* R---V */
+#define NV_MMU_PTE_KIND_Z16 0x1 /* R---V */
+#define NV_MMU_PTE_KIND_S8 0x2 /* R---V */
+#define NV_MMU_PTE_KIND_S8Z24 0x3 /* R---V */
+#define NV_MMU_PTE_KIND_ZF32_X24S8 0x4 /* R---V */
+#define NV_MMU_PTE_KIND_Z24S8 0x5 /* R---V */
+#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE 0x8 /* R---V */
+#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC 0x9 /* R---V */
+#define NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC 0xA /* R---V */
+#define NV_MMU_PTE_KIND_Z16_COMPRESSIBLE_DISABLE_PLC 0xB /* R---V */
+#define NV_MMU_PTE_KIND_S8Z24_COMPRESSIBLE_DISABLE_PLC 0xC /* R---V */
+#define NV_MMU_PTE_KIND_ZF32_X24S8_COMPRESSIBLE_DISABLE_PLC 0xD /* R---V */
+#define NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC 0xE /* R---V */
+#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0xF /* R---V */
+
+#define NV_MMU_VER3_PDE /* ----G */
+#define NV_MMU_VER3_PDE_IS_PTE 0:0 /* RWXVF */
+#define NV_MMU_VER3_PDE_IS_PTE_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_PDE_IS_PTE_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_PDE_VALID 0:0 /* RWXVF */
+#define NV_MMU_VER3_PDE_VALID_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_PDE_VALID_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE 2:1 /* RWXVF */
+#define NV_MMU_VER3_PDE_APERTURE_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF 5:3 /* RWXVF */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_ADDRESS 51:12 /* RWXVF */
+#define NV_MMU_VER3_PDE_ADDRESS_SHIFT 0x0000000c /* */
+#define NV_MMU_VER3_PDE__SIZE 8
+
+#define NV_MMU_VER3_DUAL_PDE /* ----G */
+#define NV_MMU_VER3_DUAL_PDE_IS_PTE 0:0 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_VALID 0:0 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG 5:3 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_BIG 51:8 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL 69:67 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_SMALL 115:76 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */
+#define NV_MMU_VER3_DUAL_PDE__SIZE 16
+
+#define NV_MMU_VER3_PTE /* ----G */
+#define NV_MMU_VER3_PTE_VALID 0:0 /* RWXVF */
+#define NV_MMU_VER3_PTE_VALID_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_PTE_VALID_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE 2:1 /* RWXVF */
+#define NV_MMU_VER3_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF 7:3 /* RWXVF */
+#define NV_MMU_VER3_PTE_PCF_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_SPARSE 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_MAPPING_NOWHERE 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_NO_VALID_4KB_PAGE 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACE 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACE 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACE 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACE 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACE 0x00000004 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACE 0x00000005 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACE 0x00000006 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACE 0x00000007 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACE 0x00000008 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACE 0x00000009 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACE 0x0000000A /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACE 0x0000000B /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACE 0x0000000C /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACE 0x0000000D /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACE 0x0000000E /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACE 0x0000000F /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD 0x00000010 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD 0x00000011 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD 0x00000012 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD 0x00000013 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD 0x00000014 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD 0x00000015 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACD 0x00000016 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACD 0x00000017 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACD 0x00000018 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACD 0x00000019 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACD 0x0000001A /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACD 0x0000001B /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACD 0x0000001C /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACD 0x0000001D /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACD 0x0000001E /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACD 0x0000001F /* RW--V */
+#define NV_MMU_VER3_PTE_KIND 11:8 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS 51:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS_SYS 51:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS_PEER 51:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS_VID 39:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_PEER_ID 63:(64-3) /* RWXVF */
+#define NV_MMU_VER3_PTE_PEER_ID_0 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_1 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_2 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_3 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_4 0x00000004 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_5 0x00000005 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_6 0x00000006 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_7 0x00000007 /* RW--V */
+#define NV_MMU_VER3_PTE_ADDRESS_SHIFT 0x0000000c /* */
+#define NV_MMU_VER3_PTE__SIZE 8
+
+#endif // __gh100_dev_mmu_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h
new file mode 100644
index 000000000000..8ff4663168d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_riscv_pri_h__
+#define __gh100_dev_riscv_pri_h__
+
+#define NV_PRISCV_RISCV_CPUCTL 0x00000388 /* RW-4R */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED 4:4 /* R-IVF */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED_INIT 0x00000001 /* R-I-V */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED_TRUE 0x00000001 /* R---V */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED_FALSE 0x00000000 /* R---V */
+
+#endif // __gh100_dev_riscv_pri_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h
new file mode 100644
index 000000000000..49b4816cb00b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_therm_h__
+#define __gh100_dev_therm_h__
+
+#define NV_THERM_I2CS_SCRATCH 0x000200bc /* RW-4R */
+#define NV_THERM_I2CS_SCRATCH_DATA 31:0 /* RWIVF */
+#define NV_THERM_I2CS_SCRATCH_DATA_INIT 0x00000000 /* RWI-V */
+
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE NV_THERM_I2CS_SCRATCH
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS 31:0
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS 0x000000FF
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_FAILED 0x00000000
+
+#endif // __gh100_dev_therm_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h
new file mode 100644
index 000000000000..12b49e9894a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_xtl_ep_pri_h__
+#define __gh100_dev_xtl_ep_pri_h__
+
+#define NV_EP_PCFGM 0x92FFF:0x92000 /* RW--D */
+
+#endif // __gh100_dev_xtl_ep_pri_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h
new file mode 100644
index 000000000000..1a891bd33fa3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_pri_nv_xal_ep_h__
+#define __gh100_pri_nv_xal_ep_h__
+
+#define NV_XAL_EP_BAR0_WINDOW_BASE_SHIFT 0x000010
+#define NV_XAL_EP_BAR0_WINDOW_BASE 21:0
+#define NV_XAL_EP_BAR0_WINDOW 0x0010fd40
+
+#endif // __gh100_pri_nv_xal_ep_h__
+
diff --git a/drivers/gpu/drm/nouveau/include/nvif/chan.h b/drivers/gpu/drm/nouveau/include/nvif/chan.h
new file mode 100644
index 000000000000..c329a29068d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/chan.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVIF_CHAN_H__
+#define __NVIF_CHAN_H__
+#include "push.h"
+
+struct nvif_chan {
+ const struct nvif_chan_func {
+ struct {
+ u32 (*read_get)(struct nvif_chan *);
+ } push;
+
+ struct {
+ u32 (*read_get)(struct nvif_chan *);
+ void (*push)(struct nvif_chan *, bool main, u64 addr, u32 size,
+ bool no_prefetch);
+ void (*kick)(struct nvif_chan *);
+ int (*post)(struct nvif_chan *, u32 gpptr, u32 pbptr);
+ u32 post_size;
+ } gpfifo;
+
+ struct {
+ int (*release)(struct nvif_chan *, u64 addr, u32 data);
+ } sem;
+ } *func;
+
+ struct {
+ struct nvif_map map;
+ } userd;
+
+ struct {
+ struct nvif_map map;
+ u32 cur;
+ u32 max;
+ int free;
+ } gpfifo;
+
+ struct {
+ struct nvif_map map;
+ u64 addr;
+ } sema;
+
+ struct nvif_push push;
+
+ struct nvif_user *usermode;
+ u32 doorbell_token;
+};
+
+int nvif_chan_dma_wait(struct nvif_chan *, u32 push_nr);
+
+void nvif_chan_gpfifo_ctor(const struct nvif_chan_func *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, struct nvif_chan *);
+int nvif_chan_gpfifo_wait(struct nvif_chan *, u32 gpfifo_nr, u32 push_nr);
+void nvif_chan_gpfifo_push(struct nvif_chan *, u64 addr, u32 size, bool no_prefetch);
+int nvif_chan_gpfifo_post(struct nvif_chan *);
+
+void nvif_chan506f_gpfifo_push(struct nvif_chan *, bool main, u64 addr, u32 size, bool no_prefetch);
+void nvif_chan506f_gpfifo_kick(struct nvif_chan *);
+
+int nvif_chan906f_ctor_(const struct nvif_chan_func *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_chan *);
+u32 nvif_chan906f_read_get(struct nvif_chan *);
+u32 nvif_chan906f_gpfifo_read_get(struct nvif_chan *);
+int nvif_chan906f_gpfifo_post(struct nvif_chan *, u32 gpptr, u32 pbptr);
+
+int nvif_chan506f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size);
+int nvif_chan906f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr);
+int nvif_chanc36f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_user *usermode, u32 doorbell_token);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index ea937fa7bc55..ea8267e0d8da 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -29,6 +29,8 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_TURING 0x0c
#define NV_DEVICE_INFO_V0_AMPERE 0x0d
#define NV_DEVICE_INFO_V0_ADA 0x0e
+#define NV_DEVICE_INFO_V0_HOPPER 0x0f
+#define NV_DEVICE_INFO_V0_BLACKWELL 0x10
__u8 family;
__u8 pad06[2];
__u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 824e052dcc25..ff6823cb2cd8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -57,12 +57,15 @@
#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
#define KEPLER_INLINE_TO_MEMORY_B 0x0000a140
+#define BLACKWELL_INLINE_TO_MEMORY_A 0x0000cd40
#define NV04_DISP /* cl0046.h */ 0x00000046
#define VOLTA_USERMODE_A 0x0000c361
#define TURING_USERMODE_A 0x0000c461
#define AMPERE_USERMODE_A 0x0000c561
+#define HOPPER_USERMODE_A 0x0000c661
+#define BLACKWELL_USERMODE_A 0x0000c761
#define MAXWELL_FAULT_BUFFER_A /* clb069.h */ 0x0000b069
#define VOLTA_FAULT_BUFFER_A /* clb069.h */ 0x0000c369
@@ -85,6 +88,9 @@
#define TURING_CHANNEL_GPFIFO_A /* if0020.h */ 0x0000c46f
#define AMPERE_CHANNEL_GPFIFO_A /* if0020.h */ 0x0000c56f
#define AMPERE_CHANNEL_GPFIFO_B /* if0020.h */ 0x0000c76f
+#define HOPPER_CHANNEL_GPFIFO_A 0x0000c86f
+#define BLACKWELL_CHANNEL_GPFIFO_A 0x0000c96f
+#define BLACKWELL_CHANNEL_GPFIFO_B 0x0000ca6f
#define NV50_DISP /* if0010.h */ 0x00005070
#define G82_DISP /* if0010.h */ 0x00008270
@@ -102,8 +108,10 @@
#define TU102_DISP /* if0010.h */ 0x0000c570
#define GA102_DISP /* if0010.h */ 0x0000c670
#define AD102_DISP /* if0010.h */ 0x0000c770
+#define GB202_DISP 0x0000ca70
#define GV100_DISP_CAPS 0x0000c373
+#define GB202_DISP_CAPS 0x0000ca73
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
@@ -118,6 +126,7 @@
#define GV100_DISP_CURSOR /* if0014.h */ 0x0000c37a
#define TU102_DISP_CURSOR /* if0014.h */ 0x0000c57a
#define GA102_DISP_CURSOR /* if0014.h */ 0x0000c67a
+#define GB202_DISP_CURSOR 0x0000ca7a
#define NV50_DISP_OVERLAY /* if0014.h */ 0x0000507b
#define G82_DISP_OVERLAY /* if0014.h */ 0x0000827b
@@ -128,6 +137,7 @@
#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c37b
#define TU102_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c57b
#define GA102_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c67b
+#define GB202_DISP_WINDOW_IMM_CHANNEL_DMA 0x0000ca7b
#define NV50_DISP_BASE_CHANNEL_DMA /* if0014.h */ 0x0000507c
#define G82_DISP_BASE_CHANNEL_DMA /* if0014.h */ 0x0000827c
@@ -153,6 +163,7 @@
#define TU102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c57d
#define GA102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c67d
#define AD102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c77d
+#define GB202_DISP_CORE_CHANNEL_DMA 0x0000ca7d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000827e
@@ -164,6 +175,7 @@
#define GV100_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c37e
#define TU102_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c57e
#define GA102_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c67e
+#define GB202_DISP_WINDOW_CHANNEL_DMA 0x0000ca7e
#define NV50_TESLA 0x00005097
#define G82_TESLA 0x00008297
@@ -189,16 +201,25 @@
#define TURING_A /* cl9097.h */ 0x0000c597
+#define AMPERE_A 0x0000c697
#define AMPERE_B /* cl9097.h */ 0x0000c797
#define ADA_A /* cl9097.h */ 0x0000c997
+#define HOPPER_A 0x0000cb97
+
+#define BLACKWELL_A 0x0000cd97
+#define BLACKWELL_B 0x0000ce97
+
#define NV74_BSP 0x000074b0
+#define NVB8B0_VIDEO_DECODER 0x0000b8b0
#define NVC4B0_VIDEO_DECODER 0x0000c4b0
#define NVC6B0_VIDEO_DECODER 0x0000c6b0
#define NVC7B0_VIDEO_DECODER 0x0000c7b0
#define NVC9B0_VIDEO_DECODER 0x0000c9b0
+#define NVCDB0_VIDEO_DECODER 0x0000cdb0
+#define NVCFB0_VIDEO_DECODER 0x0000cfb0
#define GT212_MSVLD 0x000085b1
#define IGT21A_MSVLD 0x000086b1
@@ -227,10 +248,14 @@
#define TURING_DMA_COPY_A 0x0000c5b5
#define AMPERE_DMA_COPY_A 0x0000c6b5
#define AMPERE_DMA_COPY_B 0x0000c7b5
+#define HOPPER_DMA_COPY_A 0x0000c8b5
+#define BLACKWELL_DMA_COPY_A 0x0000c9b5
+#define BLACKWELL_DMA_COPY_B 0x0000cab5
#define NVC4B7_VIDEO_ENCODER 0x0000c4b7
#define NVC7B7_VIDEO_ENCODER 0x0000c7b7
#define NVC9B7_VIDEO_ENCODER 0x0000c9b7
+#define NVCFB7_VIDEO_ENCODER 0x0000cfb7
#define FERMI_DECOMPRESS 0x000090b8
@@ -246,15 +271,25 @@
#define PASCAL_COMPUTE_B 0x0000c1c0
#define VOLTA_COMPUTE_A 0x0000c3c0
#define TURING_COMPUTE_A 0x0000c5c0
+#define AMPERE_COMPUTE_A 0x0000c6c0
#define AMPERE_COMPUTE_B 0x0000c7c0
#define ADA_COMPUTE_A 0x0000c9c0
+#define HOPPER_COMPUTE_A 0x0000cbc0
+#define BLACKWELL_COMPUTE_A 0x0000cdc0
+#define BLACKWELL_COMPUTE_B 0x0000cec0
#define NV74_CIPHER 0x000074c1
+#define NVB8D1_VIDEO_NVJPG 0x0000b8d1
#define NVC4D1_VIDEO_NVJPG 0x0000c4d1
#define NVC9D1_VIDEO_NVJPG 0x0000c9d1
+#define NVCDD1_VIDEO_NVJPG 0x0000cdd1
+#define NVCFD1_VIDEO_NVJPG 0x0000cfd1
+#define NVB8FA_VIDEO_OFA 0x0000b8fa
#define NVC6FA_VIDEO_OFA 0x0000c6fa
#define NVC7FA_VIDEO_OFA 0x0000c7fa
#define NVC9FA_VIDEO_OFA 0x0000c9fa
+#define NVCDFA_VIDEO_OFA 0x0000cdfa
+#define NVCFFA_VIDEO_OFA 0x0000cffa
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 8d205b6af46a..1b32dc701f61 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -16,7 +16,7 @@ struct nvif_object {
u32 handle;
s32 oclass;
void *priv; /*XXX: hack */
- struct {
+ struct nvif_map {
void __iomem *ptr;
u64 size;
} map;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h
index 6d3a8a3d2087..a493fababe3c 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/push.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/push.h
@@ -31,6 +31,12 @@ struct nvif_push {
void (*kick)(struct nvif_push *push);
struct nvif_mem mem;
+ u64 addr;
+
+ struct {
+ u32 get;
+ u32 max;
+ } hw;
u32 *bgn;
u32 *cur;
@@ -41,7 +47,7 @@ struct nvif_push {
static inline __must_check int
PUSH_WAIT(struct nvif_push *push, u32 size)
{
- if (push->cur + size >= push->end) {
+ if (push->cur + size > push->end) {
int ret = push->wait(push, size);
if (ret)
return ret;
@@ -55,7 +61,11 @@ PUSH_WAIT(struct nvif_push *push, u32 size)
static inline int
PUSH_KICK(struct nvif_push *push)
{
- push->kick(push);
+ if (push->cur != push->bgn) {
+ push->kick(push);
+ push->bgn = push->cur;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push906f.h b/drivers/gpu/drm/nouveau/include/nvif/push906f.h
index cc2866bc8b0a..79df71de98d2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/push906f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/push906f.h
@@ -7,6 +7,7 @@
#ifndef PUSH906F_SUBC
// Host methods
#define PUSH906F_SUBC_NV906F 0
+#define PUSH906F_SUBC_NVC36F 0
// Twod
#define PUSH906F_SUBC_NV902D 3
diff --git a/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h b/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h
new file mode 100644
index 000000000000..c8d6b6319134
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h
@@ -0,0 +1,18 @@
+#ifndef __NVIF_PUSHC97B_H__
+#define __NVIF_PUSHC97B_H__
+#include <nvif/push.h>
+
+#include <nvhw/class/clc97b.h>
+
+#define PUSH_HDR(p,m,c) do { \
+ PUSH_ASSERT(!((m) & ~DRF_SMASK(NVC97B_DMA_METHOD_OFFSET)), "mthd"); \
+ PUSH_ASSERT(!((c) & ~DRF_MASK(NVC97B_DMA_METHOD_COUNT)), "size"); \
+ PUSH_DATA__((p), NVDEF(NVC97B, DMA, OPCODE, METHOD) | \
+ NVVAL(NVC97B, DMA, METHOD_COUNT, (c)) | \
+ NVVAL(NVC97B, DMA, METHOD_OFFSET, (m) >> 2), \
+ " mthd 0x%04x size %d - %s", (u32)(m), (u32)(c), __func__); \
+} while(0)
+
+#define PUSH_MTHD_HDR(p,s,m,c) PUSH_HDR(p,m,c)
+#define PUSH_MTHD_INC 4:4
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 46afb877a296..99579e7b9376 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -46,7 +46,10 @@ struct nvkm_device {
GV100 = 0x140,
TU100 = 0x160,
GA100 = 0x170,
+ GH100 = 0x180,
AD100 = 0x190,
+ GB10x = 0x1a0,
+ GB20x = 0x1b0,
} card_type;
u32 chipset;
u8 chiprev;
@@ -77,6 +80,13 @@ struct nvkm_device {
struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int type, int inst);
struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int type, int inst);
+enum nvkm_bar_id {
+ NVKM_BAR_INVALID = 0,
+ NVKM_BAR0_PRI,
+ NVKM_BAR1_FB,
+ NVKM_BAR2_INST,
+};
+
struct nvkm_device_func {
struct nvkm_device_pci *(*pci)(struct nvkm_device *);
struct nvkm_device_tegra *(*tegra)(struct nvkm_device *);
@@ -85,8 +95,8 @@ struct nvkm_device_func {
int (*init)(struct nvkm_device *);
void (*fini)(struct nvkm_device *, bool suspend);
int (*irq)(struct nvkm_device *);
- resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
- resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
+ resource_size_t (*resource_addr)(struct nvkm_device *, enum nvkm_bar_id);
+ resource_size_t (*resource_size)(struct nvkm_device *, enum nvkm_bar_id);
bool cpu_coherent;
};
@@ -124,6 +134,9 @@ struct nvkm_device *nvkm_device_find(u64 name);
_temp; \
})
+#define NVKM_RD32_(p,o,dr) nvkm_rd32((p), (o) + (dr))
+#define NVKM_RD32(p,A...) DRF_RV(NVKM_RD32_, (p), 0, ##A)
+
void nvkm_device_del(struct nvkm_device **);
struct nvkm_device_oclass {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
index 9d2a1abf64f9..d92ffd17b729 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: MIT */
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FSP , struct nvkm_fsp , fsp)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GSP , struct nvkm_gsp , gsp)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP , struct nvkm_top , top)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VFN , struct nvkm_vfn , vfn)
@@ -29,7 +30,7 @@ NVKM_LAYOUT_INST(NVKM_SUBDEV_IOCTRL , struct nvkm_subdev , ioctrl, 3)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FLA , struct nvkm_subdev , fla)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_BSP , struct nvkm_engine , bsp)
-NVKM_LAYOUT_INST(NVKM_ENGINE_CE , struct nvkm_engine , ce, 10)
+NVKM_LAYOUT_INST(NVKM_ENGINE_CE , struct nvkm_engine , ce, 20)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_CIPHER , struct nvkm_engine , cipher)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_DISP , struct nvkm_disp , disp)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_DMAOBJ , struct nvkm_dma , dma)
@@ -43,9 +44,9 @@ NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPDEC , struct nvkm_engine , mspdec)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPPP , struct nvkm_engine , msppp)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSVLD , struct nvkm_engine , msvld)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 8)
-NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 3)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 4)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg, 8)
-NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA , struct nvkm_engine , ofa)
+NVKM_LAYOUT_INST(NVKM_ENGINE_OFA , struct nvkm_engine , ofa, 2)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC , struct nvkm_engine , sec)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC2 , struct nvkm_sec2 , sec2)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SW , struct nvkm_sw , sw)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 3e8db8280e2a..7903d7470d19 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -87,5 +87,4 @@ int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
-int ad102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index be508f65b280..96c16cfccf16 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -78,9 +78,6 @@ struct nvkm_fifo {
struct {
struct nvkm_memory *mem;
struct nvkm_vma *bar1;
-
- struct mutex mutex;
- struct list_head list;
} userd;
struct {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 8145796ffc61..a2333cfe6955 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -55,5 +55,4 @@ int gp10b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
int gv100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
int tu102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
int ga102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
-int ad102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 8d2e170883e1..ca83caa55157 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -13,7 +13,5 @@ struct nvkm_nvdec {
int gm107_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
int tu102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
-int ga100_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
int ga102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
-int ad102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 018c58fc32ba..1f6eef13f872 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -13,6 +13,4 @@ struct nvkm_nvenc {
int gm107_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
int tu102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
-int ga102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
-int ad102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h
deleted file mode 100644
index 80b7933a789e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_NVJPG_H__
-#define __NVKM_NVJPG_H__
-#include <core/engine.h>
-
-int ga100_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-int ad102_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h
deleted file mode 100644
index e72e2115333b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_OFA_H__
-#define __NVKM_OFA_H__
-#include <core/engine.h>
-
-int ga100_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-int ga102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-int ad102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 5b798a1a313d..e0d777a933e1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -102,6 +102,9 @@ int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
int tu102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gh100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gb100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gb202_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h
new file mode 100644
index 000000000000..8a3dbb1cbb46
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_FSP_H__
+#define __NVKM_FSP_H__
+#include <core/subdev.h>
+#include <core/falcon.h>
+
+struct nvkm_fsp {
+ const struct nvkm_fsp_func *func;
+ struct nvkm_subdev subdev;
+
+ struct nvkm_falcon falcon;
+};
+
+bool nvkm_fsp_verify_gsp_fmc(struct nvkm_fsp *, u32 hash_size, u32 pkey_size, u32 sig_size);
+int nvkm_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig);
+
+int gh100_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+int gb100_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+int gb202_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 746e126c3ecf..226c7ec56b8e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -17,6 +17,9 @@ struct nvkm_gsp_mem {
dma_addr_t addr;
};
+int nvkm_gsp_mem_ctor(struct nvkm_gsp *, size_t size, struct nvkm_gsp_mem *);
+void nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *);
+
struct nvkm_gsp_radix3 {
struct nvkm_gsp_mem lvl0;
struct nvkm_gsp_mem lvl1;
@@ -31,6 +34,29 @@ typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc);
struct nvkm_gsp_event;
typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 repc);
+/**
+ * DOC: GSP message handling policy
+ *
+ * When sending a GSP RPC command, there can be multiple cases of handling
+ * the GSP RPC messages, which are the reply of GSP RPC commands, according
+ * to the requirement of the callers and the nature of the GSP RPC commands.
+ *
+ * NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the
+ * caller after the GSP RPC command is issued.
+ *
+ * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP
+ * RPC message after the GSP RPC command is issued.
+ *
+ * NVKM_GSP_RPC_REPLY_POLL - If specified, wait for the specific reply and
+ * discard the reply before returning to the caller.
+ *
+ */
+enum nvkm_gsp_rpc_reply_policy {
+ NVKM_GSP_RPC_REPLY_NOWAIT = 0,
+ NVKM_GSP_RPC_REPLY_RECV,
+ NVKM_GSP_RPC_REPLY_POLL,
+};
+
struct nvkm_gsp {
const struct nvkm_gsp_func *func;
struct nvkm_subdev subdev;
@@ -42,6 +68,9 @@ struct nvkm_gsp {
const struct firmware *load;
const struct firmware *unload;
} booter;
+
+ const struct firmware *fmc;
+
const struct firmware *bl;
const struct firmware *rm;
} fws;
@@ -89,6 +118,15 @@ struct nvkm_gsp {
struct {
struct nvkm_gsp_mem fw;
+ u8 *hash;
+ u8 *pkey;
+ u8 *sig;
+
+ struct nvkm_gsp_mem args;
+ } fmc;
+
+ struct {
+ struct nvkm_gsp_mem fw;
u32 code_offset;
u32 data_offset;
u32 manifest_offset;
@@ -107,6 +145,7 @@ struct nvkm_gsp {
struct sg_table sgt;
struct nvkm_gsp_radix3 radix3;
struct nvkm_gsp_mem meta;
+ struct sg_table fbsr;
} sr;
struct {
@@ -186,31 +225,7 @@ struct nvkm_gsp {
u8 tpcs;
} gr;
- const struct nvkm_gsp_rm {
- void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc);
- void *(*rpc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc);
- void (*rpc_done)(struct nvkm_gsp *gsp, void *repv);
-
- void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc);
- int (*rm_ctrl_push)(struct nvkm_gsp_object *, void **argv, u32 repc);
- void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv);
-
- void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc);
- void *(*rm_alloc_push)(struct nvkm_gsp_object *, void *argv);
- void (*rm_alloc_done)(struct nvkm_gsp_object *, void *repv);
-
- int (*rm_free)(struct nvkm_gsp_object *);
-
- int (*client_ctor)(struct nvkm_gsp *, struct nvkm_gsp_client *);
- void (*client_dtor)(struct nvkm_gsp_client *);
-
- int (*device_ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
- void (*device_dtor)(struct nvkm_gsp_device *);
-
- int (*event_ctor)(struct nvkm_gsp_device *, u32 handle, u32 id,
- nvkm_gsp_event_func, struct nvkm_gsp_event *);
- void (*event_dtor)(struct nvkm_gsp_event *);
- } *rm;
+ struct nvkm_rm *rm;
struct {
struct mutex mutex;
@@ -248,16 +263,19 @@ nvkm_gsp_rm(struct nvkm_gsp *gsp)
return gsp && (gsp->fws.rm || gsp->fw.img);
}
+#include <rm/rm.h>
+
static inline void *
nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
{
- return gsp->rm->rpc_get(gsp, fn, argc);
+ return gsp->rm->api->rpc->get(gsp, fn, argc);
}
static inline void *
-nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 repc)
{
- return gsp->rm->rpc_push(gsp, argv, wait, repc);
+ return gsp->rm->api->rpc->push(gsp, argv, policy, repc);
}
static inline void *
@@ -268,13 +286,14 @@ nvkm_gsp_rpc_rd(struct nvkm_gsp *gsp, u32 fn, u32 argc)
if (IS_ERR_OR_NULL(argv))
return argv;
- return nvkm_gsp_rpc_push(gsp, argv, true, argc);
+ return nvkm_gsp_rpc_push(gsp, argv, NVKM_GSP_RPC_REPLY_RECV, argc);
}
static inline int
-nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
+nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy)
{
- void *repv = nvkm_gsp_rpc_push(gsp, argv, wait, 0);
+ void *repv = nvkm_gsp_rpc_push(gsp, argv, policy, 0);
if (IS_ERR(repv))
return PTR_ERR(repv);
@@ -285,19 +304,19 @@ nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
static inline void
nvkm_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
{
- gsp->rm->rpc_done(gsp, repv);
+ gsp->rm->api->rpc->done(gsp, repv);
}
static inline void *
nvkm_gsp_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
{
- return object->client->gsp->rm->rm_ctrl_get(object, cmd, argc);
+ return object->client->gsp->rm->api->ctrl->get(object, cmd, argc);
}
static inline int
nvkm_gsp_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
{
- return object->client->gsp->rm->rm_ctrl_push(object, argv, repc);
+ return object->client->gsp->rm->api->ctrl->push(object, argv, repc);
}
static inline void *
@@ -328,7 +347,7 @@ nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp_object *object, void *argv)
static inline void
nvkm_gsp_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
{
- object->client->gsp->rm->rm_ctrl_done(object, repv);
+ object->client->gsp->rm->api->ctrl->done(object, repv);
}
static inline void *
@@ -343,7 +362,7 @@ nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u3
object->parent = parent;
object->handle = handle;
- argv = gsp->rm->rm_alloc_get(object, oclass, argc);
+ argv = gsp->rm->api->alloc->get(object, oclass, argc);
if (IS_ERR_OR_NULL(argv)) {
object->client = NULL;
return argv;
@@ -355,7 +374,7 @@ nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u3
static inline void *
nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv)
{
- void *repv = object->client->gsp->rm->rm_alloc_push(object, argv);
+ void *repv = object->client->gsp->rm->api->alloc->push(object, argv);
if (IS_ERR(repv))
object->client = NULL;
@@ -377,7 +396,7 @@ nvkm_gsp_rm_alloc_wr(struct nvkm_gsp_object *object, void *argv)
static inline void
nvkm_gsp_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
{
- object->client->gsp->rm->rm_alloc_done(object, repv);
+ object->client->gsp->rm->api->alloc->done(object, repv);
}
static inline int
@@ -395,39 +414,29 @@ nvkm_gsp_rm_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u32 ar
static inline int
nvkm_gsp_rm_free(struct nvkm_gsp_object *object)
{
- if (object->client)
- return object->client->gsp->rm->rm_free(object);
+ if (object->client) {
+ int ret = object->client->gsp->rm->api->alloc->free(object);
+ object->client = NULL;
+ return ret;
+ }
return 0;
}
-static inline int
-nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
-{
- if (WARN_ON(!gsp->rm))
- return -ENOSYS;
-
- return gsp->rm->client_ctor(gsp, client);
-}
-
-static inline void
-nvkm_gsp_client_dtor(struct nvkm_gsp_client *client)
-{
- if (client->gsp)
- client->gsp->rm->client_dtor(client);
-}
+int nvkm_gsp_client_ctor(struct nvkm_gsp *, struct nvkm_gsp_client *);
+void nvkm_gsp_client_dtor(struct nvkm_gsp_client *);
static inline int
nvkm_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
{
- return client->gsp->rm->device_ctor(client, device);
+ return client->gsp->rm->api->device->ctor(client, device);
}
static inline void
nvkm_gsp_device_dtor(struct nvkm_gsp_device *device)
{
if (device->object.client)
- device->object.client->gsp->rm->device_dtor(device);
+ device->object.client->gsp->rm->api->device->dtor(device);
}
static inline int
@@ -459,7 +468,9 @@ static inline int
nvkm_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
{
- return device->object.client->gsp->rm->event_ctor(device, handle, id, func, event);
+ struct nvkm_rm *rm = device->object.client->gsp->rm;
+
+ return rm->api->device->event.ctor(device, handle, id, func, event);
}
static inline void
@@ -468,7 +479,7 @@ nvkm_gsp_event_dtor(struct nvkm_gsp_event *event)
struct nvkm_gsp_device *device = event->device;
if (device)
- device->object.client->gsp->rm->event_dtor(event);
+ device->object.client->gsp->rm->api->device->event.dtor(event);
}
int nvkm_gsp_intr_stall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
@@ -479,5 +490,8 @@ int tu102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_
int tu116_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ga100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ga102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int gh100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ad102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int gb100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int gb202_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index e10cbd9203ec..db835cf7b8ac 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -24,11 +24,6 @@ struct nvkm_instmem {
struct nvkm_ramht *ramht;
struct nvkm_memory *ramro;
struct nvkm_memory *ramfc;
-
- struct {
- struct sg_table fbsr;
- bool fbsr_valid;
- } rm;
};
u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
@@ -41,4 +36,5 @@ int nv04_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nv
int nv40_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
int nv50_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
int gk20a_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+int gh100_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 935b1cacd528..abcb0dbcde70 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -8,7 +8,7 @@ struct nvkm_vma {
struct list_head head;
struct rb_node tree;
u64 addr;
- u64 size:50;
+ u64 size;
bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
#define NVKM_VMA_PAGE_NONE 7
@@ -73,6 +73,7 @@ struct nvkm_vmm {
struct nvkm_gsp_object object;
struct nvkm_vma *rsvd;
+ bool external;
} rm;
};
@@ -165,4 +166,5 @@ int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gh100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index 3c103101d5fc..112b674ed9c8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -50,6 +50,7 @@ int gf100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gf106_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
int gk104_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
int gp100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gh100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
/* pcie functions */
int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h
deleted file mode 100644
index 7a3fc023072d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl0000_h__
-#define __src_common_sdk_nvidia_inc_class_cl0000_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
-
-typedef struct NV0000_ALLOC_PARAMETERS {
- NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
- NvU32 processID;
- char processName[NV_PROC_NAME_MAX_LENGTH];
-} NV0000_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h
deleted file mode 100644
index e4de36d63666..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl0005_h__
-#define __src_common_sdk_nvidia_inc_class_cl0005_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV0005_ALLOC_PARAMETERS {
- NvHandle hParentClient;
- NvHandle hSrcResource;
-
- NvV32 hClass;
- NvV32 notifyIndex;
- NV_DECLARE_ALIGNED(NvP64 data, 8);
-} NV0005_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h
deleted file mode 100644
index 8868118e47d6..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl0080_h__
-#define __src_common_sdk_nvidia_inc_class_cl0080_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */
-
-typedef struct NV0080_ALLOC_PARAMETERS {
- NvU32 deviceId;
- NvHandle hClientShare;
- NvHandle hTargetClient;
- NvHandle hTargetDevice;
- NvV32 flags;
- NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8);
- NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8);
- NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8);
- NvV32 vaMode;
-} NV0080_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h
deleted file mode 100644
index 9040ea5608a0..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl2080_h__
-#define __src_common_sdk_nvidia_inc_class_cl2080_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2002-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */
-
-typedef struct NV2080_ALLOC_PARAMETERS {
- NvU32 subDeviceId;
-} NV2080_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h
deleted file mode 100644
index ba659d6477d3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
-#define __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_NOTIFIERS_HOTPLUG (1)
-
-#define NV2080_NOTIFIERS_DP_IRQ (7)
-
-#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
-#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
-
-#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
-
-#define NV2080_ENGINE_TYPE_BSP (0x00000013)
-#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
-
-#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
-#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
-
-#define NV2080_ENGINE_TYPE_SW (0x00000022)
-
-#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
-
-#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
-#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
-
-#define NV2080_ENGINE_TYPE_OFA (0x00000033)
-
-typedef struct {
- NvU32 plugDisplayMask;
- NvU32 unplugDisplayMask;
-} Nv2080HotplugNotification;
-
-typedef struct Nv2080DpIrqNotificationRec {
- NvU32 displayId;
-} Nv2080DpIrqNotification;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h
deleted file mode 100644
index 9eb780a1ac72..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl84a0_h__
-#define __src_common_sdk_nvidia_inc_class_cl84a0_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_MEMORY_LIST_SYSTEM (0x00000081)
-
-#define NV01_MEMORY_LIST_FBMEM (0x00000082)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h
deleted file mode 100644
index f1d21776e395..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl90f1_h__
-#define __src_common_sdk_nvidia_inc_class_cl90f1_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define FERMI_VASPACE_A (0x000090f1)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h
deleted file mode 100644
index b8f32576cfaa..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
-#define __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NVC0B5_ALLOCATION_PARAMETERS {
- NvU32 version;
- NvU32 engineType;
-} NVC0B5_ALLOCATION_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
deleted file mode 100644
index 58b3ba7badf1..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
- NvBool bDscSupported;
- NvU32 encoderColorFormatMask;
- NvU32 lineBufferSizeKB;
- NvU32 rateBufferSizeKB;
- NvU32 bitsPerPixelPrecision;
- NvU32 maxNumHztSlices;
- NvU32 lineBufferBitDepth;
-} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
deleted file mode 100644
index 596f2ea8344e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 flags;
- NvU32 flags2;
-} NV0073_CTRL_DFP_GET_INFO_PARAMS;
-
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
-#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
-#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
-#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
-#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
-#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
-#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
-#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
-#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
-#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
-#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
-#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
-#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
-#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
-#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
-#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
-#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
-
-#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U
-
-typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 numELDSize;
- NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER];
- NvU32 maxFreqSupported;
- NvU32 ctrl;
- NvU32 deviceEntry;
-} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS;
-
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U)
-
-#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvBool enable;
-} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS;
-
-typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG;
-
-typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO {
- NvU32 displayMask;
- NvU32 sorType;
-} NV0073_CTRL_DFP_ASSIGN_SOR_INFO;
-
-#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U
-
-typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU8 sorExcludeMask;
- NvU32 slaveDisplayId;
- NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig;
- NvBool bIs2Head1Or;
- NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
- NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
- NvU8 reservedSorMask;
- NvU32 flags;
-} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS;
-
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U)
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U)
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U)
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
deleted file mode 100644
index bae4b1997736..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
+++ /dev/null
@@ -1,335 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U
-
-typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvBool bAddrOnly;
- NvU32 cmd;
- NvU32 addr;
- NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE];
- NvU32 size;
- NvU32 replyType;
- NvU32 retryTimeMs;
-} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS;
-
-#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3
-#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U)
-#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U)
-#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2
-#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U)
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U)
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U)
-
-#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 cmd;
- NvU32 data;
- NvU32 err;
- NvU32 retryTimeMs;
- NvU32 eightLaneDpcdBaseAddr;
-} NV0073_CTRL_DP_CTRL_PARAMS;
-
-#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0
-#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1
-#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2
-#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_UNUSED 3:3
-#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4
-#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5
-#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6
-#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7
-#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8
-#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U)
-#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9
-#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10
-#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U)
-#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13
-#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14
-#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15
-#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U)
-
-#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29
-#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30
-#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31
-#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U)
-
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU)
-#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18
-#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U)
-#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U)
-#define NV0073_CTRL_DP_DATA_TARGET 22:19
-#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U)
-
-#define NV0073_CTRL_MAX_LANES 8U
-
-typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 numLanes;
- NvU32 data[NV0073_CTRL_MAX_LANES];
-} NV0073_CTRL_DP_LANE_DATA_PARAMS;
-
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U)
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U)
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U)
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U)
-
-#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 mute;
-} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 preferredDisplayId;
-
- NvBool force;
- NvBool useBFM;
-
- NvU32 displayIdAssigned;
- NvU32 allDisplayMask;
-} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
-} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 head;
- NvU32 sorIndex;
- NvU32 dpLink;
-
- NvBool bEnableOverride;
- NvBool bMST;
- NvU32 singleHeadMultistreamMode;
- NvU32 hBlankSym;
- NvU32 vBlankSym;
- NvU32 colorFormat;
- NvBool bEnableTwoHeadOneOr;
-
- struct {
- NvU32 slotStart;
- NvU32 slotEnd;
- NvU32 PBN;
- NvU32 Timeslice;
- NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
- NvU32 singleHeadMSTPipeline;
- NvBool bEnableAudioOverRightPanel;
- } MST;
-
- struct {
- NvBool bEnhancedFraming;
- NvU32 tuSize;
- NvU32 waterMark;
- NvU32 actualPclkHz; // deprecated -Use MvidWarParams
- NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams
- NvBool bEnableAudioOverRightPanel;
- struct {
- NvU32 activeCnt;
- NvU32 activeFrac;
- NvU32 activePolarity;
- NvBool mvidWarEnabled;
- struct {
- NvU32 actualPclkHz;
- NvU32 linkClkFreqHz;
- } MvidWarParams;
- } Legacy;
- } SST;
-} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS {
- NvU32 subDeviceInstance;
-} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
-
-typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 sorIndex;
- NvU32 maxLinkRate;
- NvU32 dpVersionsSupported;
- NvU32 UHBRSupported;
- NvBool bIsMultistreamSupported;
- NvBool bIsSCEnabled;
- NvBool bHasIncreasedWatermarkLimits;
- NvBool bIsPC2Disabled;
- NvBool isSingleHeadMSTSupported;
- NvBool bFECSupported;
- NvBool bIsTrainPhyRepeater;
- NvBool bOverrideLinkBw;
- NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
-} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
-
-#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
-
-typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
- // In
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
-
- // Out
- NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
- NvU8 linkBwCount;
-} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
-
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U)
-
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
deleted file mode 100644
index 954958dcf834..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
+++ /dev/null
@@ -1,216 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U
-
-typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 bufferSize;
- NvU32 flags;
- NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES];
-} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_MAX_CONNECTORS 4U
-
-typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 flags;
- NvU32 DDCPartners;
- NvU32 count;
- struct {
- NvU32 index;
- NvU32 type;
- NvU32 location;
- } data[NV0073_CTRL_MAX_CONNECTORS];
- NvU32 platform;
-} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS {
- NvU8 subDeviceInstance;
- NvU32 displayId;
- NvU8 enable;
-} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS {
- NvU8 subDeviceInstance;
- NvU32 displayId;
- NvU8 mute;
-} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 headMask;
-} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U
-
-typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 transmitControl;
- NvU32 packetSize;
- NvU32 targetHead;
- NvBool bUsePsrHeadforSdp;
- NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE];
-} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS;
-
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U)
-
-#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 index;
- NvU32 type;
- NvU32 protocol;
- NvU32 ditherType;
- NvU32 ditherAlgo;
- NvU32 location;
- NvU32 rootPortId;
- NvU32 dcbIndex;
- NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8);
- NvBool bIsLitByVbios;
- NvBool bIsDispDynamic;
-} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS;
-
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U)
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U)
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U)
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U)
-
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU)
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 brightness;
- NvBool bUncalibrated;
-} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 caps;
-} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
deleted file mode 100644
index d69cef3c01fd..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 flags;
- NvU32 numHeads;
-} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS;
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayMask;
- NvU32 displayMaskDDC;
-} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 flags;
- NvU32 displayMask;
- NvU32 retryTimeMs;
-} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 head;
- NvU32 flags;
- NvU32 displayId;
-} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
-
-#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
deleted file mode 100644
index 3db099e62364..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
- NvU32 totalVFs;
- NvU32 firstVfOffset;
- NvU32 vfFeatureMask;
- NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
- NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
- NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
- NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
- NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
- NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
- NvBool b64bitBar0;
- NvBool b64bitBar1;
- NvBool b64bitBar2;
- NvBool bSriovEnabled;
- NvBool bSriovHeavyEnabled;
- NvBool bEmulateVFBar0TlbInvalidationRegister;
- NvBool bClientRmAllocatedCtxBuffer;
-} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
deleted file mode 100644
index ed01df925573..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
deleted file mode 100644
index b5b7631de99b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
- NvU32 BoardID;
- char chipSKU[4];
- char chipSKUMod[2];
- char project[5];
- char projectSKU[5];
- char CDP[6];
- char projectSKUMod[2];
- NvU32 businessCycle;
-} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
deleted file mode 100644
index fe912d2bd183..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS {
- NvU32 size;
-} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS;
-
-#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
deleted file mode 100644
index 87bc4ff92ce1..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
- NvU32 event;
- NvU32 action;
- NvBool bNotifyState;
- NvU32 info32;
- NvU16 info16;
-} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
-
-#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
deleted file mode 100644
index 68c81f9f803c..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
-
-typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
-
-typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
- NV_DECLARE_ALIGNED(NvU64 base, 8);
- NV_DECLARE_ALIGNED(NvU64 limit, 8);
- NV_DECLARE_ALIGNED(NvU64 reserved, 8);
- NvU32 performance;
- NvBool supportCompressed;
- NvBool supportISO;
- NvBool bProtected;
- NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
-} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
-
-#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
-
-typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
- NvU32 numFBRegions;
- NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
-} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
deleted file mode 100644
index bc0f63699b06..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */
-
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16
-
-typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY {
- NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES];
- NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
- NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
- NvU32 numPbdmas;
- char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN];
-} NV2080_CTRL_FIFO_DEVICE_ENTRY;
-
-typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS {
- NvU32 baseIndex;
- NvU32 numEntries;
- NvBool bMore;
- // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
- NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
-} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
deleted file mode 100644
index 29d7a1052142..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
+++ /dev/null
@@ -1,100 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
-
-#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U)
-
-#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
-
-typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY {
- NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 size, 8);
- NvU32 physAttr;
- NvU16 bufferId;
- NvU8 bInitialize;
- NvU8 bNonmapped;
-} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY;
-
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U
-
-#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U
-
-#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS {
- NvU32 engineType;
- NvHandle hClient;
- NvU32 ChID;
- NvHandle hChanClient;
- NvHandle hObject;
- NvHandle hVirtMemory;
- NV_DECLARE_ALIGNED(NvU64 virtAddress, 8);
- NV_DECLARE_ALIGNED(NvU64 size, 8);
- NvU32 entryCount;
- // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES];
- NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8);
-} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS;
-
-typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
- NvU32 gpcMask;
-} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
-
-typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
- NvU32 gpcId;
- NvU32 tpcMask;
-} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
-
-typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS {
- NvU32 gpcId;
- NvU32 zcullMask;
-} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS;
-
-#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
-
-typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
- NvU32 index;
- NvU32 flags;
- NvU32 length;
- NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
-} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
deleted file mode 100644
index 59f8895bc5d7..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS {
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8,
-} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
deleted file mode 100644
index e11b2dbe5288..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
+++ /dev/null
@@ -1,162 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
- NvU32 feHwSysCap;
- NvU32 windowPresentMask;
- NvBool bFbRemapperEnabled;
- NvU32 numHeads;
- NvBool bPrimaryVga;
- NvU32 i2cPort;
- NvU32 internalDispActiveMask;
-} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
-
-#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
-
-#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19
-
-typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
- NvU32 size;
- NvU32 alignment;
-} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
-
-typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
- NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
-} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
-
-typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
- NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
-} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO {
- NvU32 engDesc;
- NvU32 ctxAttr;
- NvU32 ctxBufferSize;
- NvU32 addrSpaceList;
- NvU32 registerBase;
-} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO;
-#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40
-
-#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
- NvU32 numConstructedFalcons;
- NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
-} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS {
- NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 instMemSize, 8);
- NvU32 instMemAddrSpace;
- NvU32 instMemCpuCacheAttr;
-} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
- NvU32 addressSpace;
- NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 limit, 8);
- NvU32 cacheSnoop;
- NvU32 hclass;
- NvU32 channelInstance;
- NvBool valid;
-} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
-
-#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128
-
-typedef enum NV2080_INTR_CATEGORY {
- NV2080_INTR_CATEGORY_DEFAULT = 0,
- NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1,
- NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2,
- NV2080_INTR_CATEGORY_RUNLIST = 3,
- NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4,
- NV2080_INTR_CATEGORY_UVM_OWNED = 5,
- NV2080_INTR_CATEGORY_UVM_SHARED = 6,
- NV2080_INTR_CATEGORY_ENUM_COUNT = 7,
-} NV2080_INTR_CATEGORY;
-
-typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP {
- NvU8 subtreeStart;
- NvU8 subtreeEnd;
-} NV2080_INTR_CATEGORY_SUBTREE_MAP;
-
-typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY {
- NvU16 engineIdx;
- NvU32 pmcIntrMask;
- NvU32 vectorStall;
- NvU32 vectorNonStall;
-} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY;
-
-typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS {
- NvU32 tableLen;
- NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
- NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT];
-} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
- NvU32 fbsrType;
- NvU32 numRegions;
- NvHandle hClient;
- NvHandle hSysMem;
- NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8);
- NvBool bEnteringGcoffState;
-} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS {
- NvU32 fbsrType;
- NvHandle hClient;
- NvHandle hVidMem;
- NV_DECLARE_ALIGNED(NvU64 vidOffset, 8);
- NV_DECLARE_ALIGNED(NvU64 sysOffset, 8);
- NV_DECLARE_ALIGNED(NvU64 size, 8);
-} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */
-
-#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */
-
-typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS {
- NvU32 status;
- NvU16 backLightDataSize;
- NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE];
-} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
deleted file mode 100644
index 977e59818533..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
+++ /dev/null
@@ -1,95 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define GMMU_FMT_MAX_LEVELS 6U
-
-#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
-
-typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
- /*!
- * [in] GPU sub-device handle - this API only supports unicast.
- * Pass 0 to use subDeviceId instead.
- */
- NvHandle hSubDevice;
-
- /*!
- * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
- */
- NvU32 subDeviceId;
-
- /*!
- * [in] Page size (VA coverage) of the level to reserve.
- * This need not be a leaf (page table) page size - it can be
- * the coverage of an arbitrary level (including root page directory).
- */
- NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
-
- /*!
- * [in] First GPU virtual address of the range to reserve.
- * This must be aligned to pageSize.
- */
- NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
-
- /*!
- * [in] Last GPU virtual address of the range to reserve.
- * This (+1) must be aligned to pageSize.
- */
- NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
-
- /*!
- * [in] Number of PDE levels to copy.
- */
- NvU32 numLevelsToCopy;
-
- /*!
- * [in] Per-level information.
- */
- struct {
- /*!
- * Physical address of this page level instance.
- */
- NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
-
- /*!
- * Size in bytes allocated for this level instance.
- */
- NV_DECLARE_ALIGNED(NvU64 size, 8);
-
- /*!
- * Aperture in which this page level instance resides.
- */
- NvU32 aperture;
-
- /*!
- * Page shift corresponding to the level
- */
- NvU8 pageShift;
- } levels[GMMU_FMT_MAX_LEVELS];
-} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
deleted file mode 100644
index 684045796232..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */
-
-typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS {
- NvBool bEnable;
- NvBool bSkipSubmit;
-} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS;
-
-#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */
-
-typedef struct NVA06F_CTRL_BIND_PARAMS {
- NvU32 engineType;
-} NVA06F_CTRL_BIND_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h
deleted file mode 100644
index 5c5a004a8031..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_nvlimits_h__
-#define __src_common_sdk_nvidia_inc_nvlimits_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV_MAX_SUBDEVICES 8
-
-#define NV_PROC_NAME_MAX_LENGTH 100U
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h
deleted file mode 100644
index 51b5591c603e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h
+++ /dev/null
@@ -1,148 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_nvos_h__
-#define __src_common_sdk_nvidia_inc_nvos_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NVOS02_FLAGS_PHYSICALITY 7:4
-#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000)
-#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001)
-#define NVOS02_FLAGS_LOCATION 11:8
-#define NVOS02_FLAGS_LOCATION_PCI (0x00000000)
-#define NVOS02_FLAGS_LOCATION_AGP (0x00000001)
-#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002)
-#define NVOS02_FLAGS_COHERENCY 15:12
-#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000)
-#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001)
-#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002)
-#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003)
-#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004)
-#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005)
-#define NVOS02_FLAGS_ALLOC 17:16
-#define NVOS02_FLAGS_ALLOC_NONE (0x00000001)
-#define NVOS02_FLAGS_GPU_CACHEABLE 18:18
-#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000)
-#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001)
-
-#define NVOS02_FLAGS_KERNEL_MAPPING 19:19
-#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000)
-#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001)
-#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20
-#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000)
-#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001)
-
-#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21
-#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000)
-#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001)
-
-#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22
-#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000)
-#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001)
-
-#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23
-#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000)
-#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001)
-
-#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24
-#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001)
-
-#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25
-#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000)
-#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001)
-#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002)
-
-#define NVOS02_FLAGS_MAPPING 31:30
-#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000)
-#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001)
-#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002)
-
-#define NV01_EVENT_CLIENT_RM (0x04000000)
-
-typedef struct
-{
- NvV32 channelInstance; // One of the n channel instances of a given channel type.
- // Note that core channel has only one instance
- // while all others have two (one per head).
- NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
- NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
- NvU32 offset; // Initial offset for put/get, usually zero.
- NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
-
- NvU32 flags;
-#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
-#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
-#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
-
-} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvV32 channelInstance; // One of the n channel instances of a given channel type.
- // All PIO channels have two instances (one per head).
- NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors.
- NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel
-} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances;
- NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2
-} NV_BSP_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC?
- NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2
-} NV_MSENC_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG?
- NvU32 engineInstance;
-} NV_NVJPG_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
-} NV_OFA_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 index;
- NvV32 flags;
- NvU64 vaSize NV_ALIGN_BYTES(8);
- NvU64 vaStartInternal NV_ALIGN_BYTES(8);
- NvU64 vaLimitInternal NV_ALIGN_BYTES(8);
- NvU32 bigPageSize;
- NvU64 vaBase NV_ALIGN_BYTES(8);
-} NV_VASPACE_ALLOCATION_PARAMETERS;
-
-#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 //<! Create new VASpace, by default
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
deleted file mode 100644
index 0e32e71e123f..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
+++ /dev/null
@@ -1,97 +0,0 @@
-#ifndef __src_common_shared_msgq_inc_msgq_msgq_priv_h__
-#define __src_common_shared_msgq_inc_msgq_msgq_priv_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/**
- * msgqTxHeader -- TX queue data structure
- * @version: the version of this structure, must be 0
- * @size: the size of the entire queue, including this header
- * @msgSize: the padded size of queue element, 16 is minimum
- * @msgCount: the number of elements in this queue
- * @writePtr: head index of this queue
- * @flags: 1 = swap the RX pointers
- * @rxHdrOff: offset of readPtr in this structure
- * @entryOff: offset of beginning of queue (msgqRxHeader), relative to
- * beginning of this structure
- *
- * The command queue is a queue of RPCs that are sent from the driver to the
- * GSP. The status queue is a queue of messages/responses from GSP-RM to the
- * driver. Although the driver allocates memory for both queues, the command
- * queue is owned by the driver and the status queue is owned by GSP-RM. In
- * addition, the headers of the two queues must not share the same 4K page.
- *
- * Each queue is prefixed with this data structure. The idea is that a queue
- * and its header are written to only by their owner. That is, only the
- * driver writes to the command queue and command queue header, and only the
- * GSP writes to the status (receive) queue and its header.
- *
- * This is enforced by the concept of "swapping" the RX pointers. This is
- * why the 'flags' field must be set to 1. 'rxHdrOff' is how the GSP knows
- * where the where the tail pointer of its status queue.
- *
- * When the driver writes a new RPC to the command queue, it updates writePtr.
- * When it reads a new message from the status queue, it updates readPtr. In
- * this way, the GSP knows when a new command is in the queue (it polls
- * writePtr) and it knows how much free space is in the status queue (it
- * checks readPtr). The driver never cares about how much free space is in
- * the status queue.
- *
- * As usual, producers write to the head pointer, and consumers read from the
- * tail pointer. When head == tail, the queue is empty.
- *
- * So to summarize:
- * command.writePtr = head of command queue
- * command.readPtr = tail of status queue
- * status.writePtr = head of status queue
- * status.readPtr = tail of command queue
- */
-typedef struct
-{
- NvU32 version; // queue version
- NvU32 size; // bytes, page aligned
- NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum
- NvU32 msgCount; // number of entries in queue
- NvU32 writePtr; // message id of next slot
- NvU32 flags; // if set it means "i want to swap RX"
- NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store.
- NvU32 entryOff; // Offset of entries from start of backing store.
-} msgqTxHeader;
-
-/**
- * msgqRxHeader - RX queue data structure
- * @readPtr: tail index of the other queue
- *
- * Although this is a separate struct, it could easily be merged into
- * msgqTxHeader. msgqTxHeader.rxHdrOff is simply the offset of readPtr
- * from the beginning of msgqTxHeader.
- */
-typedef struct
-{
- NvU32 readPtr; // message id of last message read
-} msgqRxHeader;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h
deleted file mode 100644
index 83cf1b2c15a3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef __src_common_uproc_os_common_include_libos_init_args_h__
-#define __src_common_uproc_os_common_include_libos_init_args_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef NvU64 LibosAddress;
-
-typedef enum {
- LIBOS_MEMORY_REGION_NONE,
- LIBOS_MEMORY_REGION_CONTIGUOUS,
- LIBOS_MEMORY_REGION_RADIX3
-} LibosMemoryRegionKind;
-
-typedef enum {
- LIBOS_MEMORY_REGION_LOC_NONE,
- LIBOS_MEMORY_REGION_LOC_SYSMEM,
- LIBOS_MEMORY_REGION_LOC_FB
-} LibosMemoryRegionLoc;
-
-typedef struct
-{
- LibosAddress id8; // Id tag.
- LibosAddress pa; // Physical address.
- LibosAddress size; // Size of memory area.
- NvU8 kind; // See LibosMemoryRegionKind above.
- NvU8 loc; // See LibosMemoryRegionLoc above.
-} LibosMemoryRegionInitArgument;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h
deleted file mode 100644
index 73213bdfcbda..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
-#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
-#define GSP_FW_SR_META_REVISION 2
-
-typedef struct
-{
- //
- // Magic
- // Use for verification by Booter
- //
- NvU64 magic; // = GSP_FW_SR_META_MAGIC;
-
- //
- // Revision number
- // Bumped up when we change this interface so it is not backward compatible.
- // Bumped up when we revoke GSP-RM ucode
- //
- NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
-
- //
- // ---- Members regarding data in SYSMEM ----------------------------
- // Consumed by Booter for DMA
- //
- NvU64 sysmemAddrOfSuspendResumeData;
- NvU64 sizeOfSuspendResumeData;
-
- // ---- Members for crypto ops across S/R ---------------------------
-
- //
- // HMAC over the entire GspFwSRMeta structure (including padding)
- // with the hmac field itself zeroed.
- //
- NvU8 hmac[32];
-
- // Hash over GspFwWprMeta structure
- NvU8 wprMetaHash[32];
-
- // Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
- NvU8 heapFreeListHash[32];
-
- // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
- NvU8 dataHash[32];
-
- //
- // Pad structure to exactly 256 bytes (1 DMA chunk).
- // Padding initialized to zero.
- //
- NvU32 padding[24];
-
-} GspFwSRMeta;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h
deleted file mode 100644
index a2e141e4b459..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h
+++ /dev/null
@@ -1,170 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
-#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct
-{
- // Magic
- // BL to use for verification (i.e. Booter locked it in WPR2)
- NvU64 magic; // = 0xdc3aae21371a60b3;
-
- // Revision number of Booter-BL-Sequencer handoff interface
- // Bumped up when we change this interface so it is not backward compatible.
- // Bumped up when we revoke GSP-RM ucode
- NvU64 revision; // = 1;
-
- // ---- Members regarding data in SYSMEM ----------------------------
- // Consumed by Booter for DMA
-
- NvU64 sysmemAddrOfRadix3Elf;
- NvU64 sizeOfRadix3Elf;
-
- NvU64 sysmemAddrOfBootloader;
- NvU64 sizeOfBootloader;
-
- // Offsets inside bootloader image needed by Booter
- NvU64 bootloaderCodeOffset;
- NvU64 bootloaderDataOffset;
- NvU64 bootloaderManifestOffset;
-
- union
- {
- // Used only at initial boot
- struct
- {
- NvU64 sysmemAddrOfSignature;
- NvU64 sizeOfSignature;
- };
-
- //
- // Used at suspend/resume to read GspFwHeapFreeList
- // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
- //
- struct
- {
- NvU32 gspFwHeapFreeListWprOffset;
- NvU32 unused0;
- NvU64 unused1;
- };
- };
-
- // ---- Members describing FB layout --------------------------------
- NvU64 gspFwRsvdStart;
-
- NvU64 nonWprHeapOffset;
- NvU64 nonWprHeapSize;
-
- NvU64 gspFwWprStart;
-
- // GSP-RM to use to setup heap.
- NvU64 gspFwHeapOffset;
- NvU64 gspFwHeapSize;
-
- // BL to use to find ELF for jump
- NvU64 gspFwOffset;
- // Size is sizeOfRadix3Elf above.
-
- NvU64 bootBinOffset;
- // Size is sizeOfBootloader above.
-
- NvU64 frtsOffset;
- NvU64 frtsSize;
-
- NvU64 gspFwWprEnd;
-
- // GSP-RM to use for fbRegionInfo?
- NvU64 fbSize;
-
- // ---- Other members -----------------------------------------------
-
- // GSP-RM to use for fbRegionInfo?
- NvU64 vgaWorkspaceOffset;
- NvU64 vgaWorkspaceSize;
-
- // Boot count. Used to determine whether to load the firmware image.
- NvU64 bootCount;
-
- // TODO: the partitionRpc* fields below do not really belong in this
- // structure. The values are patched in by the partition bootstrapper
- // when GSP-RM is booted in a partition, and this structure was a
- // convenient place for the bootstrapper to access them. These should
- // be moved to a different comm. mechanism between the bootstrapper
- // and the GSP-RM tasks.
-
- union
- {
- struct
- {
- // Shared partition RPC memory (physical address)
- NvU64 partitionRpcAddr;
-
- // Offsets relative to partitionRpcAddr
- NvU16 partitionRpcRequestOffset;
- NvU16 partitionRpcReplyOffset;
-
- // Code section and dataSection offset and size.
- NvU32 elfCodeOffset;
- NvU32 elfDataOffset;
- NvU32 elfCodeSize;
- NvU32 elfDataSize;
-
- // Used during GSP-RM resume to check for revocation
- NvU32 lsUcodeVersion;
- };
-
- struct
- {
- // Pad for the partitionRpc* fields, plus 4 bytes
- NvU32 partitionRpcPadding[4];
-
- // CrashCat (contiguous) buffer size/location - occupies same bytes as the
- // elf(Code|Data)(Offset|Size) fields above.
- // TODO: move to GSP_FMC_INIT_PARAMS
- NvU64 sysmemAddrOfCrashReportQueue;
- NvU32 sizeOfCrashReportQueue;
-
- // Pad for the lsUcodeVersion field
- NvU32 lsUcodeVersionPadding[1];
- };
- };
-
- // Number of VF partitions allocating sub-heaps from the WPR heap
- // Used during boot to ensure the heap is adequately sized
- NvU8 gspFwHeapVfPartitionCount;
-
- // Pad structure to exactly 256 bytes. Can replace padding with additional
- // fields without incrementing revision. Padding initialized to 0.
- NvU8 padding[7];
-
- // BL to use for verification (i.e. Booter says OK to boot)
- NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
-} GspFwWprMeta;
-
-#define GSP_FW_WPR_META_REVISION 1
-#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h
deleted file mode 100644
index 4eff473e8990..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
-#define __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct {
- //
- // Version 1
- // Version 2
- // Version 3 = for Partition boot
- // Version 4 = for eb riscv boot
- // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later.
- //
- NvU32 version; // structure version
- NvU32 bootloaderOffset;
- NvU32 bootloaderSize;
- NvU32 bootloaderParamOffset;
- NvU32 bootloaderParamSize;
- NvU32 riscvElfOffset;
- NvU32 riscvElfSize;
- NvU32 appVersion; // Changelist number associated with the image
- //
- // Manifest contains information about Monitor and it is
- // input to BR
- //
- NvU32 manifestOffset;
- NvU32 manifestSize;
- //
- // Monitor Data offset within RISCV image and size
- //
- NvU32 monitorDataOffset;
- NvU32 monitorDataSize;
- //
- // Monitor Code offset withtin RISCV image and size
- //
- NvU32 monitorCodeOffset;
- NvU32 monitorCodeSize;
- NvU32 bIsMonitorEnabled;
- //
- // Swbrom Code offset within RISCV image and size
- //
- NvU32 swbromCodeOffset;
- NvU32 swbromCodeSize;
- //
- // Swbrom Data offset within RISCV image and size
- //
- NvU32 swbromDataOffset;
- NvU32 swbromDataSize;
- //
- // Total size of FB carveout (image and reserved space).
- //
- NvU32 fbReservedSize;
- //
- // Indicates whether the entire RISC-V image is signed as "code" in code section.
- //
- NvU32 bSignedAsCode;
-} RM_RISCV_UCODE_DESC;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h
deleted file mode 100644
index 341ab0dbeaf2..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h
+++ /dev/null
@@ -1,100 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
-#define __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum GSP_SEQ_BUF_OPCODE
-{
- GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
- GSP_SEQ_BUF_OPCODE_REG_MODIFY,
- GSP_SEQ_BUF_OPCODE_REG_POLL,
- GSP_SEQ_BUF_OPCODE_DELAY_US,
- GSP_SEQ_BUF_OPCODE_REG_STORE,
- GSP_SEQ_BUF_OPCODE_CORE_RESET,
- GSP_SEQ_BUF_OPCODE_CORE_START,
- GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
- GSP_SEQ_BUF_OPCODE_CORE_RESUME,
-} GSP_SEQ_BUF_OPCODE;
-
-#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \
- ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \
- /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \
- /* GSP_SEQ_BUF_OPCODE_CORE_START */ \
- /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \
- /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \
- 0)
-
-typedef struct
-{
- NvU32 addr;
- NvU32 val;
-} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
-
-typedef struct
-{
- NvU32 addr;
- NvU32 mask;
- NvU32 val;
-} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
-
-typedef struct
-{
- NvU32 addr;
- NvU32 mask;
- NvU32 val;
- NvU32 timeout;
- NvU32 error;
-} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
-
-typedef struct
-{
- NvU32 val;
-} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
-
-typedef struct
-{
- NvU32 addr;
- NvU32 index;
-} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
-
-typedef struct GSP_SEQUENCER_BUFFER_CMD
-{
- GSP_SEQ_BUF_OPCODE opCode;
- union
- {
- GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
- GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
- GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
- GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
- GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
- } payload;
-} GSP_SEQUENCER_BUFFER_CMD;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h
deleted file mode 100644
index 3144e9beac61..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_nvidia_generated_g_allclasses_h__
-#define __src_nvidia_generated_g_allclasses_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e)
-
-#define NV04_DISPLAY_COMMON (0x00000073)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h
deleted file mode 100644
index 6b8921138c7d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __src_nvidia_generated_g_chipset_nvoc_h__
-#define __src_nvidia_generated_g_chipset_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct
-{
- NvU16 deviceID; // deviceID
- NvU16 vendorID; // vendorID
- NvU16 subdeviceID; // subsystem deviceID
- NvU16 subvendorID; // subsystem vendorID
- NvU8 revisionID; // revision ID
-} BUSINFO;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h
deleted file mode 100644
index a5128f00225b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_nvidia_generated_g_fbsr_nvoc_h__
-#define __src_nvidia_generated_g_fbsr_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest.
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h
deleted file mode 100644
index 5641a21cacca..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __src_nvidia_generated_g_gpu_nvoc_h__
-#define __src_nvidia_generated_g_gpu_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum
-{
- COMPUTE_BRANDING_TYPE_NONE,
- COMPUTE_BRANDING_TYPE_TESLA,
-} COMPUTE_BRANDING_TYPE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h
deleted file mode 100644
index b5ad55f854dc..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __src_nvidia_generated_g_kernel_channel_nvoc_h__
-#define __src_nvidia_generated_g_kernel_channel_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum {
- /*!
- * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
- * kernel CPU-RM clients.
- */
- ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
- /*! @brief Error notifier is explicitly not set.
- *
- * The corresponding hErrorContext or hEccErrorContext must be
- * NV01_NULL_OBJECT.
- */
- ERROR_NOTIFIER_TYPE_NONE,
- /*! @brief Error notifier is a ContextDma */
- ERROR_NOTIFIER_TYPE_CTXDMA,
- /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
- ERROR_NOTIFIER_TYPE_MEMORY
-} ErrorNotifierType;
-
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h
deleted file mode 100644
index 946954ac5b3d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h
+++ /dev/null
@@ -1,119 +0,0 @@
-#ifndef __src_nvidia_generated_g_kernel_fifo_nvoc_h__
-#define __src_nvidia_generated_g_kernel_fifo_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum
-{
- /* *************************************************************************
- * Bug 3820969
- * THINK BEFORE CHANGING ENUM ORDER HERE.
- * VGPU-guest uses this same ordering. Because this enum is not versioned,
- * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
- * ************************************************************************/
-
- // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc.,
- ENGINE_INFO_TYPE_ENG_DESC = 0,
-
- // HW engine ID
- ENGINE_INFO_TYPE_FIFO_TAG,
-
- // RM_ENGINE_TYPE_*
- ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
-
- //
- // runlist id (meaning varies by GPU)
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_RUNLIST,
-
- // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_*
- ENGINE_INFO_TYPE_MMU_FAULT_ID,
-
- // ROBUST_CHANNEL_*
- ENGINE_INFO_TYPE_RC_MASK,
-
- // Reset Bit Position. On Ampere, only valid if not _INVALID
- ENGINE_INFO_TYPE_RESET,
-
- // Interrupt Bit Position
- ENGINE_INFO_TYPE_INTR,
-
- // log2(MC_ENGINE_*)
- ENGINE_INFO_TYPE_MC,
-
- // The DEV_TYPE_ENUM for this engine
- ENGINE_INFO_TYPE_DEV_TYPE_ENUM,
-
- // The particular instance of this engine type
- ENGINE_INFO_TYPE_INSTANCE_ID,
-
- //
- // The base address for this engine's NV_RUNLIST. Valid only on Ampere+
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_RUNLIST_PRI_BASE,
-
- //
- // If this entry is a host-driven engine.
- // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry.
- //
- ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE,
-
- //
- // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID,
-
- //
- // The base address for this engine's NV_CHRAM registers. Valid only on
- // Ampere+
- //
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_CHRAM_PRI_BASE,
-
- // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM
- ENGINE_INFO_TYPE_KERNEL_RM_MAX,
- // Used for iterating the engine info table by the index passed.
- ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX,
-
- // Size of FIFO_ENGINE_LIST.engineData
- ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID,
-
- // Input-only parameter for kfifoEngineInfoXlate.
- ENGINE_INFO_TYPE_PBDMA_ID
-
- /* *************************************************************************
- * Bug 3820969
- * THINK BEFORE CHANGING ENUM ORDER HERE.
- * VGPU-guest uses this same ordering. Because this enum is not versioned,
- * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
- * ************************************************************************/
-} ENGINE_INFO_TYPE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h
deleted file mode 100644
index daabaee41c87..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __src_nvidia_generated_g_mem_desc_nvoc_h__
-#define __src_nvidia_generated_g_mem_desc_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define ADDR_SYSMEM 1 // System memory (PCI)
-#define ADDR_FBMEM 2 // Frame buffer memory space
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
deleted file mode 100644
index 10121218f4d3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef __src_nvidia_generated_g_os_nvoc_h__
-#define __src_nvidia_generated_g_os_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct PACKED_REGISTRY_ENTRY
-{
- NvU32 nameOffset;
- NvU8 type;
- NvU32 data;
- NvU32 length;
-} PACKED_REGISTRY_ENTRY;
-
-typedef struct PACKED_REGISTRY_TABLE
-{
- NvU32 size;
- NvU32 numEntries;
- PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
-} PACKED_REGISTRY_TABLE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h
deleted file mode 100644
index 8d925e24faea..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h
+++ /dev/null
@@ -1,124 +0,0 @@
-#ifndef __src_nvidia_generated_g_rpc_structures_h__
-#define __src_nvidia_generated_g_rpc_structures_h__
-#include <nvrm/535.113.01/nvidia/generated/g_sdk-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct rpc_alloc_memory_v13_01
-{
- NvHandle hClient;
- NvHandle hDevice;
- NvHandle hMemory;
- NvU32 hClass;
- NvU32 flags;
- NvU32 pteAdjust;
- NvU32 format;
- NvU64 length NV_ALIGN_BYTES(8);
- NvU32 pageCount;
- struct pte_desc pteDesc;
-} rpc_alloc_memory_v13_01;
-
-typedef struct rpc_free_v03_00
-{
- NVOS00_PARAMETERS_v03_00 params;
-} rpc_free_v03_00;
-
-typedef struct rpc_unloading_guest_driver_v1F_07
-{
- NvBool bInPMTransition;
- NvBool bGc6Entering;
- NvU32 newLevel;
-} rpc_unloading_guest_driver_v1F_07;
-
-typedef struct rpc_update_bar_pde_v15_00
-{
- UpdateBarPde_v15_00 info;
-} rpc_update_bar_pde_v15_00;
-
-typedef struct rpc_gsp_rm_alloc_v03_00
-{
- NvHandle hClient;
- NvHandle hParent;
- NvHandle hObject;
- NvU32 hClass;
- NvU32 status;
- NvU32 paramsSize;
- NvU32 flags;
- NvU8 reserved[4];
- NvU8 params[];
-} rpc_gsp_rm_alloc_v03_00;
-
-typedef struct rpc_gsp_rm_control_v03_00
-{
- NvHandle hClient;
- NvHandle hObject;
- NvU32 cmd;
- NvU32 status;
- NvU32 paramsSize;
- NvU32 flags;
- NvU8 params[];
-} rpc_gsp_rm_control_v03_00;
-
-typedef struct rpc_run_cpu_sequencer_v17_00
-{
- NvU32 bufferSizeDWord;
- NvU32 cmdIndex;
- NvU32 regSaveArea[8];
- NvU32 commandBuffer[];
-} rpc_run_cpu_sequencer_v17_00;
-
-typedef struct rpc_post_event_v17_00
-{
- NvHandle hClient;
- NvHandle hEvent;
- NvU32 notifyIndex;
- NvU32 data;
- NvU16 info16;
- NvU32 status;
- NvU32 eventDataSize;
- NvBool bNotifyList;
- NvU8 eventData[];
-} rpc_post_event_v17_00;
-
-typedef struct rpc_rc_triggered_v17_02
-{
- NvU32 nv2080EngineType;
- NvU32 chid;
- NvU32 exceptType;
- NvU32 scope;
- NvU16 partitionAttributionId;
-} rpc_rc_triggered_v17_02;
-
-typedef struct rpc_os_error_log_v17_00
-{
- NvU32 exceptType;
- NvU32 runlistId;
- NvU32 chid;
- char errString[0x100];
-} rpc_os_error_log_v17_00;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h
deleted file mode 100644
index e9fed4140468..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef __src_nvidia_generated_g_sdk_structures_h__
-#define __src_nvidia_generated_g_sdk_structures_h__
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NVOS00_PARAMETERS_v03_00
-{
- NvHandle hRoot;
- NvHandle hObjectParent;
- NvHandle hObjectOld;
- NvV32 status;
-} NVOS00_PARAMETERS_v03_00;
-
-typedef struct UpdateBarPde_v15_00
-{
- NV_RPC_UPDATE_PDE_BAR_TYPE barType;
- NvU64 entryValue NV_ALIGN_BYTES(8);
- NvU64 entryLevelShift NV_ALIGN_BYTES(8);
-} UpdateBarPde_v15_00;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h
deleted file mode 100644
index af50b11ec3b4..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h
+++ /dev/null
@@ -1,74 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
-#define __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct DOD_METHOD_DATA
-{
- NV_STATUS status;
- NvU32 acpiIdListLen;
- NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
-} DOD_METHOD_DATA;
-
-typedef struct JT_METHOD_DATA
-{
- NV_STATUS status;
- NvU32 jtCaps;
- NvU16 jtRevId;
- NvBool bSBIOSCaps;
-} JT_METHOD_DATA;
-
-typedef struct MUX_METHOD_DATA_ELEMENT
-{
- NvU32 acpiId;
- NvU32 mode;
- NV_STATUS status;
-} MUX_METHOD_DATA_ELEMENT;
-
-typedef struct MUX_METHOD_DATA
-{
- NvU32 tableLen;
- MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
- MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
-} MUX_METHOD_DATA;
-
-typedef struct CAPS_METHOD_DATA
-{
- NV_STATUS status;
- NvU32 optimusCaps;
-} CAPS_METHOD_DATA;
-
-typedef struct ACPI_METHOD_DATA
-{
- NvBool bValid;
- DOD_METHOD_DATA dodMethodData;
- JT_METHOD_DATA jtMethodData;
- MUX_METHOD_DATA muxMethodData;
- CAPS_METHOD_DATA capsMethodData;
-} ACPI_METHOD_DATA;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h
deleted file mode 100644
index e3160c60036d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h
+++ /dev/null
@@ -1,86 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
-#define __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum
-{
- RM_ENGINE_TYPE_NULL = (0x00000000),
- RM_ENGINE_TYPE_GR0 = (0x00000001),
- RM_ENGINE_TYPE_GR1 = (0x00000002),
- RM_ENGINE_TYPE_GR2 = (0x00000003),
- RM_ENGINE_TYPE_GR3 = (0x00000004),
- RM_ENGINE_TYPE_GR4 = (0x00000005),
- RM_ENGINE_TYPE_GR5 = (0x00000006),
- RM_ENGINE_TYPE_GR6 = (0x00000007),
- RM_ENGINE_TYPE_GR7 = (0x00000008),
- RM_ENGINE_TYPE_COPY0 = (0x00000009),
- RM_ENGINE_TYPE_COPY1 = (0x0000000a),
- RM_ENGINE_TYPE_COPY2 = (0x0000000b),
- RM_ENGINE_TYPE_COPY3 = (0x0000000c),
- RM_ENGINE_TYPE_COPY4 = (0x0000000d),
- RM_ENGINE_TYPE_COPY5 = (0x0000000e),
- RM_ENGINE_TYPE_COPY6 = (0x0000000f),
- RM_ENGINE_TYPE_COPY7 = (0x00000010),
- RM_ENGINE_TYPE_COPY8 = (0x00000011),
- RM_ENGINE_TYPE_COPY9 = (0x00000012),
- RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
- RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
- RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
- RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
- RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
- RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
- RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
- RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
- RM_ENGINE_TYPE_NVENC0 = (0x00000025),
- RM_ENGINE_TYPE_NVENC1 = (0x00000026),
- RM_ENGINE_TYPE_NVENC2 = (0x00000027),
- RM_ENGINE_TYPE_VP = (0x00000028),
- RM_ENGINE_TYPE_ME = (0x00000029),
- RM_ENGINE_TYPE_PPP = (0x0000002a),
- RM_ENGINE_TYPE_MPEG = (0x0000002b),
- RM_ENGINE_TYPE_SW = (0x0000002c),
- RM_ENGINE_TYPE_TSEC = (0x0000002d),
- RM_ENGINE_TYPE_VIC = (0x0000002e),
- RM_ENGINE_TYPE_MP = (0x0000002f),
- RM_ENGINE_TYPE_SEC2 = (0x00000030),
- RM_ENGINE_TYPE_HOST = (0x00000031),
- RM_ENGINE_TYPE_DPU = (0x00000032),
- RM_ENGINE_TYPE_PMU = (0x00000033),
- RM_ENGINE_TYPE_FBFLCN = (0x00000034),
- RM_ENGINE_TYPE_NVJPEG0 = (0x00000035),
- RM_ENGINE_TYPE_NVJPEG1 = (0x00000036),
- RM_ENGINE_TYPE_NVJPEG2 = (0x00000037),
- RM_ENGINE_TYPE_NVJPEG3 = (0x00000038),
- RM_ENGINE_TYPE_NVJPEG4 = (0x00000039),
- RM_ENGINE_TYPE_NVJPEG5 = (0x0000003a),
- RM_ENGINE_TYPE_NVJPEG6 = (0x0000003b),
- RM_ENGINE_TYPE_NVJPEG7 = (0x0000003c),
- RM_ENGINE_TYPE_OFA = (0x0000003d),
- RM_ENGINE_TYPE_LAST = (0x0000003e),
-} RM_ENGINE_TYPE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h
deleted file mode 100644
index 3abec59f0cc4..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
-#define __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures
-
-#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h
deleted file mode 100644
index 4033a6f85a76..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
-#define __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct {
- RmPhysAddr sharedMemPhysAddr;
- NvU32 pageTableEntryCount;
- NvLength cmdQueueOffset;
- NvLength statQueueOffset;
- NvLength locklessCmdQueueOffset;
- NvLength locklessStatQueueOffset;
-} MESSAGE_QUEUE_INIT_ARGUMENTS;
-
-typedef struct {
- NvU32 oldLevel;
- NvU32 flags;
- NvBool bInPMTransition;
-} GSP_SR_INIT_ARGUMENTS;
-
-typedef struct
-{
- MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
- GSP_SR_INIT_ARGUMENTS srInitArguments;
- NvU32 gpuInstance;
-
- struct
- {
- NvU64 pa;
- NvU64 size;
- } profilerArgs;
-} GSP_ARGUMENTS_CACHED;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h
deleted file mode 100644
index eeab25a5e290..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h
+++ /dev/null
@@ -1,174 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
-#define __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h>
-#include <nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct GSP_VF_INFO
-{
- NvU32 totalVFs;
- NvU32 firstVFOffset;
- NvU64 FirstVFBar0Address;
- NvU64 FirstVFBar1Address;
- NvU64 FirstVFBar2Address;
- NvBool b64bitBar0;
- NvBool b64bitBar1;
- NvBool b64bitBar2;
-} GSP_VF_INFO;
-
-typedef struct GspSMInfo_t
-{
- NvU32 version;
- NvU32 regBankCount;
- NvU32 regBankRegCount;
- NvU32 maxWarpsPerSM;
- NvU32 maxThreadsPerWarp;
- NvU32 geomGsObufEntries;
- NvU32 geomXbufEntries;
- NvU32 maxSPPerSM;
- NvU32 rtCoreCount;
-} GspSMInfo;
-
-typedef struct GspStaticConfigInfo_t
-{
- NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
- NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
- NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo;
- NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT];
- NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT];
- NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
- NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
- COMPUTE_BRANDING_TYPE computeBranding;
-
- NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
- NvU32 sriovMaxGfid;
-
- NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
-
- GspSMInfo SM_info;
-
- NvBool poisonFuseEnabled;
-
- NvU64 fb_length;
- NvU32 fbio_mask;
- NvU32 fb_bus_width;
- NvU32 fb_ram_type;
- NvU32 fbp_mask;
- NvU32 l2_cache_size;
-
- NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
- NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
-
- NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
- NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
- NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
- NvBool bGpuInternalSku;
- NvBool bIsQuadroGeneric;
- NvBool bIsQuadroAd;
- NvBool bIsNvidiaNvs;
- NvBool bIsVgx;
- NvBool bGeforceSmb;
- NvBool bIsTitan;
- NvBool bIsTesla;
- NvBool bIsMobile;
- NvBool bIsGc6Rtd3Allowed;
- NvBool bIsGcOffRtd3Allowed;
- NvBool bIsGcoffLegacyAllowed;
-
- NvU64 bar1PdeBase;
- NvU64 bar2PdeBase;
-
- NvBool bVbiosValid;
- NvU32 vbiosSubVendor;
- NvU32 vbiosSubDevice;
-
- NvBool bPageRetirementSupported;
-
- NvBool bSplitVasBetweenServerClientRm;
-
- NvBool bClRootportNeedsNosnoopWAR;
-
- VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
- VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
- NvU64 displaylessMaxPixels;
-
- // Client handle for internal RMAPI control.
- NvHandle hInternalClient;
-
- // Device handle for internal RMAPI control.
- NvHandle hInternalDevice;
-
- // Subdevice handle for internal RMAPI control.
- NvHandle hInternalSubdevice;
-
- NvBool bSelfHostedMode;
- NvBool bAtsSupported;
-
- NvBool bIsGpuUefi;
-} GspStaticConfigInfo;
-
-typedef struct GspSystemInfo
-{
- NvU64 gpuPhysAddr;
- NvU64 gpuPhysFbAddr;
- NvU64 gpuPhysInstAddr;
- NvU64 nvDomainBusDeviceFunc;
- NvU64 simAccessBufPhysAddr;
- NvU64 pcieAtomicsOpMask;
- NvU64 consoleMemSize;
- NvU64 maxUserVa;
- NvU32 pciConfigMirrorBase;
- NvU32 pciConfigMirrorSize;
- NvU8 oorArch;
- NvU64 clPdbProperties;
- NvU32 Chipset;
- NvBool bGpuBehindBridge;
- NvBool bMnocAvailable;
- NvBool bUpstreamL0sUnsupported;
- NvBool bUpstreamL1Unsupported;
- NvBool bUpstreamL1PorSupported;
- NvBool bUpstreamL1PorMobileOnly;
- NvU8 upstreamAddressValid;
- BUSINFO FHBBusInfo;
- BUSINFO chipsetIDInfo;
- ACPI_METHOD_DATA acpiMethodData;
- NvU32 hypervisorType;
- NvBool bIsPassthru;
- NvU64 sysTimerOffsetNs;
- GSP_VF_INFO gspVFInfo;
-} GspSystemInfo;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h
deleted file mode 100644
index bd5e01f9814b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
-#define __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define MC_ENGINE_IDX_DISP 2
-
-#define MC_ENGINE_IDX_CE0 15
-
-#define MC_ENGINE_IDX_CE9 24
-
-#define MC_ENGINE_IDX_MSENC 38
-
-#define MC_ENGINE_IDX_MSENC2 40
-
-#define MC_ENGINE_IDX_GSP 49
-#define MC_ENGINE_IDX_NVJPG 50
-#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
-#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
-
-#define MC_ENGINE_IDX_NVJPEG7 57
-
-#define MC_ENGINE_IDX_BSP 64
-#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
-#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
-
-#define MC_ENGINE_IDX_NVDEC7 71
-
-#define MC_ENGINE_IDX_OFA0 80
-
-#define MC_ENGINE_IDX_GR 82
-#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h
deleted file mode 100644
index 366447a368bf..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_nvbitmask_h__
-#define __src_nvidia_inc_kernel_gpu_nvbitmask_h__
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NVGPU_ENGINE_CAPS_MASK_BITS 32
-#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h
deleted file mode 100644
index 4a850dad4776..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_os_nv_memory_type_h__
-#define __src_nvidia_inc_kernel_os_nv_memory_type_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV_MEMORY_WRITECOMBINED 2
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h
deleted file mode 100644
index f14b23852456..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
-#define __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define MAX_GPC_COUNT 32
-
-typedef enum
-{
- NV_RPC_UPDATE_PDE_BAR_1,
- NV_RPC_UPDATE_PDE_BAR_2,
- NV_RPC_UPDATE_PDE_BAR_INVALID,
-} NV_RPC_UPDATE_PDE_BAR_TYPE;
-
-typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
-{
- NvU32 headIndex;
- NvU32 maxHResolution;
- NvU32 maxVResolution;
-} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
-
-typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
-{
- NvU32 numHeads;
- NvU32 maxNumHeads;
-} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h
deleted file mode 100644
index 7801af232dff..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
-#define __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-struct pte_desc
-{
- NvU32 idr:2;
- NvU32 reserved1:14;
- NvU32 length:16;
- union {
- NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
- NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
- } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
-};
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
index e6833df1ccc7..af11648ad9c8 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
+++ b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
@@ -21,4 +21,6 @@ typedef NvU64 NvLength;
typedef NvU64 RmPhysAddr;
typedef NvU32 NV_STATUS;
+
+typedef union {} rpc_generic_union;
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 2a0617e5fe2a..a3ba07fc48a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -315,7 +315,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
break;
}
case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
- getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
+ getparam->value = nvkm_device->func->resource_size(nvkm_device, NVKM_BAR1_FB);
break;
case NOUVEAU_GETPARAM_VRAM_USED: {
struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
@@ -416,7 +416,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
*/
if (nouveau_cli_uvmm(cli)) {
ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
- chan->chan->dma.ib_max);
+ chan->chan->chan.gpfifo.max);
if (ret)
goto done;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index db961eade225..b96f0555ca14 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -144,6 +144,9 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
+ if (bo->base.import_attach)
+ drm_prime_gem_destroy(&bo->base, bo->sg);
+
/*
* If nouveau_bo_new() allocated this buffer, the GEM object was never
* initialized, so don't attempt to release it.
@@ -398,6 +401,83 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
return 0;
}
+void
+nouveau_bo_unpin_del(struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo = *pnvbo;
+
+ if (!nvbo)
+ return;
+
+ nouveau_bo_unmap(nvbo);
+ nouveau_bo_unpin(nvbo);
+ nouveau_bo_fini(nvbo);
+
+ *pnvbo = NULL;
+}
+
+int
+nouveau_bo_new_pin(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new(cli, size, 0, domain, 0, 0, NULL, NULL, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_pin(nvbo, domain, false);
+ if (ret) {
+ nouveau_bo_fini(nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
+int
+nouveau_bo_new_map(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new_pin(cli, domain, size, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_map(nvbo);
+ if (ret) {
+ nouveau_bo_unpin_del(&nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
+int
+nouveau_bo_new_map_gpu(struct nouveau_cli *cli, u32 domain, u32 size,
+ struct nouveau_bo **pnvbo, struct nouveau_vma **pvma)
+{
+ struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new_map(cli, domain, size, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_vma_new(nvbo, vmm, pvma);
+ if (ret) {
+ nouveau_bo_unpin_del(&nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
@@ -920,6 +1000,9 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_resource *, struct ttm_resource *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xcab5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "COPY", 4, 0xc9b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "COPY", 4, 0xc8b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc7b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc6b5, nve0_bo_move_copy, nve0_bo_move_init },
@@ -1201,7 +1284,7 @@ retry:
fallthrough; /* tiled memory */
case TTM_PL_VRAM:
reg->bus.offset = (reg->start << PAGE_SHIFT) +
- device->func->resource_addr(device, 1);
+ device->func->resource_addr(device, NVKM_BAR1_FB);
reg->bus.is_iomem = true;
/* Some BARs do not support being ioremapped WC */
@@ -1292,7 +1375,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_device *device = nvxx_device(drm);
- u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
+ u32 mappable = device->func->resource_size(device, NVKM_BAR1_FB) >> PAGE_SHIFT;
int i, ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 596a63a50a20..d59fd12268b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -9,6 +9,7 @@ struct nouveau_channel;
struct nouveau_cli;
struct nouveau_drm;
struct nouveau_fence;
+struct nouveau_vma;
struct nouveau_bo {
struct ttm_buffer_object bo;
@@ -89,6 +90,12 @@ void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo);
void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo);
+int nouveau_bo_new_pin(struct nouveau_cli *, u32 domain, u32 size, struct nouveau_bo **);
+int nouveau_bo_new_map(struct nouveau_cli *, u32 domain, u32 size, struct nouveau_bo **);
+int nouveau_bo_new_map_gpu(struct nouveau_cli *, u32 domain, u32 size,
+ struct nouveau_bo **, struct nouveau_vma **);
+void nouveau_bo_unpin_del(struct nouveau_bo **);
+
/* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem *
nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index cd659b9fd1d9..b1e92b1f7a26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -103,12 +103,11 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nvif_event_dtor(&chan->kill);
nvif_object_dtor(&chan->user);
nvif_mem_dtor(&chan->mem_userd);
+ nouveau_vma_del(&chan->sema.vma);
+ nouveau_bo_unpin_del(&chan->sema.bo);
nvif_object_dtor(&chan->push.ctxdma);
nouveau_vma_del(&chan->push.vma);
- nouveau_bo_unmap(chan->push.buffer);
- if (chan->push.buffer && chan->push.buffer->bo.pin_count)
- nouveau_bo_unpin(chan->push.buffer);
- nouveau_bo_fini(chan->push.buffer);
+ nouveau_bo_unpin_del(&chan->push.buffer);
kfree(chan);
}
*pchan = NULL;
@@ -163,14 +162,7 @@ nouveau_channel_prep(struct nouveau_cli *cli,
if (nouveau_vram_pushbuf)
target = NOUVEAU_GEM_DOMAIN_VRAM;
- ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
- &chan->push.buffer);
- if (ret == 0) {
- ret = nouveau_bo_pin(chan->push.buffer, target, false);
- if (ret == 0)
- ret = nouveau_bo_map(chan->push.buffer);
- }
-
+ ret = nouveau_bo_new_map(cli, target, size, &chan->push.buffer);
if (ret) {
nouveau_channel_del(pchan);
return ret;
@@ -199,8 +191,10 @@ nouveau_channel_prep(struct nouveau_cli *cli,
chan->push.addr = chan->push.vma->addr;
- if (device->info.family >= NV_DEVICE_INFO_V0_FERMI)
- return 0;
+ if (device->info.family >= NV_DEVICE_INFO_V0_FERMI) {
+ return nouveau_bo_new_map_gpu(cli, NOUVEAU_GEM_DOMAIN_GART, PAGE_SIZE,
+ &chan->sema.bo, &chan->sema.vma);
+ }
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
@@ -209,13 +203,15 @@ nouveau_channel_prep(struct nouveau_cli *cli,
} else
if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
+ struct nvkm_device *nvkm_device = nvxx_device(drm);
+
/* nv04 vram pushbuf hack, retarget to its location in
* the framebuffer bar rather than direct vram access..
* nfi why this exists, it came from the -nv ddx.
*/
args.target = NV_DMA_V0_TARGET_PCI;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = nvxx_device(drm)->func->resource_addr(nvxx_device(drm), 1);
+ args.start = nvkm_device->func->resource_addr(nvkm_device, NVKM_BAR1_FB);
args.limit = args.start + device->info.ram_user - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
@@ -253,27 +249,27 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
struct nouveau_channel **pchan)
{
const struct nvif_mclass hosts[] = {
- { AMPERE_CHANNEL_GPFIFO_B, 0 },
- { AMPERE_CHANNEL_GPFIFO_A, 0 },
- { TURING_CHANNEL_GPFIFO_A, 0 },
- { VOLTA_CHANNEL_GPFIFO_A, 0 },
- { PASCAL_CHANNEL_GPFIFO_A, 0 },
- { MAXWELL_CHANNEL_GPFIFO_A, 0 },
- { KEPLER_CHANNEL_GPFIFO_B, 0 },
- { KEPLER_CHANNEL_GPFIFO_A, 0 },
- { FERMI_CHANNEL_GPFIFO , 0 },
- { G82_CHANNEL_GPFIFO , 0 },
- { NV50_CHANNEL_GPFIFO , 0 },
- { NV40_CHANNEL_DMA , 0 },
- { NV17_CHANNEL_DMA , 0 },
- { NV10_CHANNEL_DMA , 0 },
- { NV03_CHANNEL_DMA , 0 },
+ { BLACKWELL_CHANNEL_GPFIFO_B, 0 },
+ { BLACKWELL_CHANNEL_GPFIFO_A, 0 },
+ { HOPPER_CHANNEL_GPFIFO_A, 0 },
+ { AMPERE_CHANNEL_GPFIFO_B, 0 },
+ { AMPERE_CHANNEL_GPFIFO_A, 0 },
+ { TURING_CHANNEL_GPFIFO_A, 0 },
+ { VOLTA_CHANNEL_GPFIFO_A, 0 },
+ { PASCAL_CHANNEL_GPFIFO_A, 0 },
+ { MAXWELL_CHANNEL_GPFIFO_A, 0 },
+ { KEPLER_CHANNEL_GPFIFO_B, 0 },
+ { KEPLER_CHANNEL_GPFIFO_A, 0 },
+ { FERMI_CHANNEL_GPFIFO , 0 },
+ { G82_CHANNEL_GPFIFO , 0 },
+ { NV50_CHANNEL_GPFIFO , 0 },
+ { NV40_CHANNEL_DMA , 0 },
+ { NV17_CHANNEL_DMA , 0 },
+ { NV10_CHANNEL_DMA , 0 },
+ { NV03_CHANNEL_DMA , 0 },
{}
};
- struct {
- struct nvif_chan_v0 chan;
- char name[TASK_COMM_LEN+16];
- } args;
+ DEFINE_RAW_FLEX(struct nvif_chan_v0, args, name, TASK_COMM_LEN + 16);
struct nvif_device *device = &cli->device;
struct nouveau_channel *chan;
const u64 plength = 0x10000;
@@ -298,28 +294,28 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
return ret;
/* create channel object */
- args.chan.version = 0;
- args.chan.namelen = sizeof(args.name);
- args.chan.runlist = __ffs64(runm);
- args.chan.runq = 0;
- args.chan.priv = priv;
- args.chan.devm = BIT(0);
+ args->version = 0;
+ args->namelen = __member_size(args->name);
+ args->runlist = __ffs64(runm);
+ args->runq = 0;
+ args->priv = priv;
+ args->devm = BIT(0);
if (hosts[cid].oclass < NV50_CHANNEL_GPFIFO) {
- args.chan.vmm = 0;
- args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
- args.chan.offset = chan->push.addr;
- args.chan.length = 0;
+ args->vmm = 0;
+ args->ctxdma = nvif_handle(&chan->push.ctxdma);
+ args->offset = chan->push.addr;
+ args->length = 0;
} else {
- args.chan.vmm = nvif_handle(&chan->vmm->vmm.object);
+ args->vmm = nvif_handle(&chan->vmm->vmm.object);
if (hosts[cid].oclass < FERMI_CHANNEL_GPFIFO)
- args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
+ args->ctxdma = nvif_handle(&chan->push.ctxdma);
else
- args.chan.ctxdma = 0;
- args.chan.offset = ioffset + chan->push.addr;
- args.chan.length = ilength;
+ args->ctxdma = 0;
+ args->offset = ioffset + chan->push.addr;
+ args->length = ilength;
}
- args.chan.huserd = 0;
- args.chan.ouserd = 0;
+ args->huserd = 0;
+ args->ouserd = 0;
/* allocate userd */
if (hosts[cid].oclass >= VOLTA_CHANNEL_GPFIFO_A) {
@@ -329,27 +325,28 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
if (ret)
return ret;
- args.chan.huserd = nvif_handle(&chan->mem_userd.object);
- args.chan.ouserd = 0;
+ args->huserd = nvif_handle(&chan->mem_userd.object);
+ args->ouserd = 0;
chan->userd = &chan->mem_userd.object;
} else {
chan->userd = &chan->user;
}
- snprintf(args.name, sizeof(args.name), "%s[%d]", current->comm, task_pid_nr(current));
+ snprintf(args->name, __member_size(args->name), "%s[%d]",
+ current->comm, task_pid_nr(current));
ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass,
- &args, sizeof(args), &chan->user);
+ args, __struct_size(args), &chan->user);
if (ret) {
nouveau_channel_del(pchan);
return ret;
}
- chan->runlist = args.chan.runlist;
- chan->chid = args.chan.chid;
- chan->inst = args.chan.inst;
- chan->token = args.chan.token;
+ chan->runlist = args->runlist;
+ chan->chid = args->chid;
+ chan->inst = args->inst;
+ chan->token = args->token;
return 0;
}
@@ -367,17 +364,17 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
return ret;
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
- struct {
- struct nvif_event_v0 base;
- struct nvif_chan_event_v0 host;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_chan_event_v0));
+ struct nvif_chan_event_v0 *host =
+ (struct nvif_chan_event_v0 *)args->data;
- args.host.version = 0;
- args.host.type = NVIF_CHAN_EVENT_V0_KILLED;
+ host->version = 0;
+ host->type = NVIF_CHAN_EVENT_V0_KILLED;
ret = nvif_event_ctor(&chan->user, "abi16ChanKilled", chan->chid,
nouveau_channel_killed, false,
- &args.base, sizeof(args), &chan->kill);
+ args, __struct_size(args), &chan->kill);
if (ret == 0)
ret = nvif_event_allow(&chan->kill);
if (ret) {
@@ -433,25 +430,33 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
}
/* initialise dma tracking parameters */
- switch (chan->user.oclass) {
- case NV03_CHANNEL_DMA:
- case NV10_CHANNEL_DMA:
- case NV17_CHANNEL_DMA:
- case NV40_CHANNEL_DMA:
+ if (chan->user.oclass < NV50_CHANNEL_GPFIFO) {
chan->user_put = 0x40;
chan->user_get = 0x44;
chan->dma.max = (0x10000 / 4) - 2;
- break;
- default:
- chan->user_put = 0x40;
- chan->user_get = 0x44;
- chan->user_get_hi = 0x60;
- chan->dma.ib_base = 0x10000 / 4;
- chan->dma.ib_max = NV50_DMA_IB_MAX;
- chan->dma.ib_put = 0;
- chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
- chan->dma.max = chan->dma.ib_base;
- break;
+ } else
+ if (chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
+ ret = nvif_chan506f_ctor(&chan->chan, chan->userd->map.ptr,
+ (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000,
+ chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000);
+ if (ret)
+ return ret;
+ } else
+ if (chan->user.oclass < VOLTA_CHANNEL_GPFIFO_A) {
+ ret = nvif_chan906f_ctor(&chan->chan, chan->userd->map.ptr,
+ (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000,
+ chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000,
+ chan->sema.bo->kmap.virtual, chan->sema.vma->addr);
+ if (ret)
+ return ret;
+ } else {
+ ret = nvif_chanc36f_ctor(&chan->chan, chan->userd->map.ptr,
+ (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000,
+ chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000,
+ chan->sema.bo->kmap.virtual, chan->sema.vma->addr,
+ &drm->client.device.user, chan->token);
+ if (ret)
+ return ret;
}
chan->dma.put = 0;
@@ -520,46 +525,44 @@ nouveau_channels_fini(struct nouveau_drm *drm)
int
nouveau_channels_init(struct nouveau_drm *drm)
{
- struct {
- struct nv_device_info_v1 m;
- struct {
- struct nv_device_info_v1_data channels;
- struct nv_device_info_v1_data runlists;
- } v;
- } args = {
- .m.version = 1,
- .m.count = sizeof(args.v) / sizeof(args.v.channels),
- .v.channels.mthd = NV_DEVICE_HOST_CHANNELS,
- .v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS,
- };
+ DEFINE_RAW_FLEX(struct nv_device_info_v1, args, data, 2);
+ struct nv_device_info_v1_data *channels = &args->data[0];
+ struct nv_device_info_v1_data *runlists = &args->data[1];
struct nvif_object *device = &drm->client.device.object;
int ret, i;
- ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
+ args->version = 1;
+ args->count = __member_size(args->data) / sizeof(*args->data);
+ channels->mthd = NV_DEVICE_HOST_CHANNELS;
+ runlists->mthd = NV_DEVICE_HOST_RUNLISTS;
+
+ ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, args,
+ __struct_size(args));
if (ret ||
- args.v.runlists.mthd == NV_DEVICE_INFO_INVALID || !args.v.runlists.data ||
- args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
+ runlists->mthd == NV_DEVICE_INFO_INVALID || !runlists->data ||
+ channels->mthd == NV_DEVICE_INFO_INVALID)
return -ENODEV;
- drm->chan_nr = drm->chan_total = args.v.channels.data;
- drm->runl_nr = fls64(args.v.runlists.data);
+ drm->chan_nr = drm->chan_total = channels->data;
+ drm->runl_nr = fls64(runlists->data);
drm->runl = kcalloc(drm->runl_nr, sizeof(*drm->runl), GFP_KERNEL);
if (!drm->runl)
return -ENOMEM;
if (drm->chan_nr == 0) {
for (i = 0; i < drm->runl_nr; i++) {
- if (!(args.v.runlists.data & BIT(i)))
+ if (!(runlists->data & BIT(i)))
continue;
- args.v.channels.mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS;
- args.v.channels.data = i;
+ channels->mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS;
+ channels->data = i;
- ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
- if (ret || args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
+ ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, args,
+ __struct_size(args));
+ if (ret || channels->mthd == NV_DEVICE_INFO_INVALID)
return -ENODEV;
- drm->runl[i].chan_nr = args.v.channels.data;
+ drm->runl[i].chan_nr = channels->data;
drm->runl[i].chan_id_base = drm->chan_total;
drm->runl[i].context_base = dma_fence_context_alloc(drm->runl[i].chan_nr);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 016f668c0bc1..561877725aac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -3,13 +3,11 @@
#define __NOUVEAU_CHAN_H__
#include <nvif/object.h>
#include <nvif/event.h>
-#include <nvif/push.h>
+#include <nvif/chan.h>
struct nvif_device;
struct nouveau_channel {
- struct {
- struct nvif_push push;
- } chan;
+ struct nvif_chan chan;
struct nouveau_cli *cli;
struct nouveau_vmm *vmm;
@@ -41,15 +39,15 @@ struct nouveau_channel {
int free;
int cur;
int put;
- int ib_base;
- int ib_max;
- int ib_free;
- int ib_put;
} dma;
- u32 user_get_hi;
u32 user_get;
u32 user_put;
+ struct {
+ struct nouveau_bo *bo;
+ struct nouveau_vma *vma;
+ } sema;
+
struct nvif_object user;
struct nvif_object blit;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1b10c6c12f46..63621b1510f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1401,6 +1401,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
nv_connector->aux.drm_dev = dev;
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
nv_connector->aux.name = connector->name;
+ if (disp->disp.object.oclass >= GB202_DISP)
+ nv_connector->aux.no_zero_sized = true;
drm_dp_aux_init(&nv_connector->aux);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index a1f329ef0641..017a803121d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -43,8 +43,6 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
uint64_t val;
val = nvif_rd32(chan->userd, chan->user_get);
- if (chan->user_get_hi)
- val |= (uint64_t)nvif_rd32(chan->userd, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
@@ -68,111 +66,12 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
return (val - chan->push.addr) >> 2;
}
-void
-nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length,
- bool no_prefetch)
-{
- struct nvif_user *user = &chan->cli->drm->client.device.user;
- struct nouveau_bo *pb = chan->push.buffer;
- int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
-
- BUG_ON(chan->dma.ib_free < 1);
- WARN_ON(length > NV50_DMA_PUSH_MAX_LENGTH);
-
- nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
- nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8 |
- (no_prefetch ? (1 << 31) : 0));
-
- chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
-
- mb();
- /* Flush writes. */
- nouveau_bo_rd32(pb, 0);
-
- nvif_wr32(chan->userd, 0x8c, chan->dma.ib_put);
- if (user->func && user->func->doorbell)
- user->func->doorbell(user, chan->token);
- chan->dma.ib_free--;
-}
-
-static int
-nv50_dma_push_wait(struct nouveau_channel *chan, int count)
-{
- uint32_t cnt = 0, prev_get = 0;
-
- while (chan->dma.ib_free < count) {
- uint32_t get = nvif_rd32(chan->userd, 0x88);
- if (get != prev_get) {
- prev_get = get;
- cnt = 0;
- }
-
- if ((++cnt & 0xff) == 0) {
- udelay(1);
- if (cnt > 100000)
- return -EBUSY;
- }
-
- chan->dma.ib_free = get - chan->dma.ib_put;
- if (chan->dma.ib_free <= 0)
- chan->dma.ib_free += chan->dma.ib_max;
- }
-
- return 0;
-}
-
-static int
-nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
-{
- uint64_t prev_get = 0;
- int ret, cnt = 0;
-
- ret = nv50_dma_push_wait(chan, slots + 1);
- if (unlikely(ret))
- return ret;
-
- while (chan->dma.free < count) {
- int get = READ_GET(chan, &prev_get, &cnt);
- if (unlikely(get < 0)) {
- if (get == -EINVAL)
- continue;
-
- return get;
- }
-
- if (get <= chan->dma.cur) {
- chan->dma.free = chan->dma.max - chan->dma.cur;
- if (chan->dma.free >= count)
- break;
-
- FIRE_RING(chan);
- do {
- get = READ_GET(chan, &prev_get, &cnt);
- if (unlikely(get < 0)) {
- if (get == -EINVAL)
- continue;
- return get;
- }
- } while (get == 0);
- chan->dma.cur = 0;
- chan->dma.put = 0;
- }
-
- chan->dma.free = get - chan->dma.cur - 1;
- }
-
- return 0;
-}
-
int
-nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
+nouveau_dma_wait(struct nouveau_channel *chan, int size)
{
uint64_t prev_get = 0;
int cnt = 0, get;
- if (chan->dma.ib_max)
- return nv50_dma_wait(chan, slots, size);
-
while (chan->dma.free < size) {
get = READ_GET(chan, &prev_get, &cnt);
if (unlikely(get == -EBUSY))
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index c52cda82353e..0e27b76d1e1c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -30,9 +30,7 @@
#include "nouveau_bo.h"
#include "nouveau_chan.h"
-int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
-void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length,
- bool no_prefetch);
+int nouveau_dma_wait(struct nouveau_channel *, int size);
/*
* There's a hw race condition where you can't jump to your PUT offset,
@@ -67,7 +65,7 @@ RING_SPACE(struct nouveau_channel *chan, int size)
{
int ret;
- ret = nouveau_dma_wait(chan, 1, size);
+ ret = nouveau_dma_wait(chan, size);
if (ret)
return ret;
@@ -94,12 +92,7 @@ FIRE_RING(struct nouveau_channel *chan)
return;
chan->accel_done = true;
- if (chan->dma.ib_max) {
- nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2),
- (chan->dma.cur - chan->dma.put) << 2, false);
- } else {
- WRITE_PUT(chan->dma.cur);
- }
+ WRITE_PUT(chan->dma.cur);
chan->dma.put = chan->dma.cur;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 61d0f411ef84..ca4932a150e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -256,20 +256,15 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
chunk->pagemap.owner = drm->dev;
- ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
- NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
- &chunk->bo);
+ ret = nouveau_bo_new_pin(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, DMEM_CHUNK_SIZE,
+ &chunk->bo);
if (ret)
goto out_release;
- ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (ret)
- goto out_bo_free;
-
ptr = memremap_pages(&chunk->pagemap, numa_node_id());
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
- goto out_bo_unpin;
+ goto out_bo_free;
}
mutex_lock(&drm->dmem->mutex);
@@ -292,10 +287,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
return 0;
-out_bo_unpin:
- nouveau_bo_unpin(chunk->bo);
out_bo_free:
- nouveau_bo_fini(chunk->bo);
+ nouveau_bo_unpin_del(&chunk->bo);
out_release:
release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
out_free:
@@ -426,8 +419,7 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
nouveau_dmem_evict_chunk(chunk);
- nouveau_bo_unpin(chunk->bo);
- nouveau_bo_fini(chunk->bo);
+ nouveau_bo_unpin_del(&chunk->bo);
WARN_ON(chunk->callocated);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e154d08857c5..0c82a63cd49d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -503,11 +503,16 @@ nouveau_accel_init(struct nouveau_drm *drm)
case KEPLER_CHANNEL_GPFIFO_B:
case MAXWELL_CHANNEL_GPFIFO_A:
case PASCAL_CHANNEL_GPFIFO_A:
+ ret = nvc0_fence_create(drm);
+ break;
case VOLTA_CHANNEL_GPFIFO_A:
case TURING_CHANNEL_GPFIFO_A:
case AMPERE_CHANNEL_GPFIFO_A:
case AMPERE_CHANNEL_GPFIFO_B:
- ret = nvc0_fence_create(drm);
+ case HOPPER_CHANNEL_GPFIFO_A:
+ case BLACKWELL_CHANNEL_GPFIFO_A:
+ case BLACKWELL_CHANNEL_GPFIFO_B:
+ ret = gv100_fence_create(drm);
break;
default:
break;
@@ -1079,6 +1084,10 @@ nouveau_pmops_freeze(struct device *dev)
{
struct nouveau_drm *drm = dev_get_drvdata(dev);
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return 0;
+
return nouveau_do_suspend(drm, false);
}
@@ -1087,6 +1096,10 @@ nouveau_pmops_thaw(struct device *dev)
{
struct nouveau_drm *drm = dev_get_drvdata(dev);
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return 0;
+
return nouveau_do_resume(drm, false);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index a0b5f1b16e8b..41b7c608c905 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -10,6 +10,8 @@
#include "nouveau_sched.h"
#include "nouveau_uvmm.h"
+#include <nvif/class.h>
+
/**
* DOC: Overview
*
@@ -131,7 +133,7 @@ nouveau_exec_job_run(struct nouveau_job *job)
struct nouveau_fence *fence = exec_job->fence;
int i, ret;
- ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16);
+ ret = nvif_chan_gpfifo_wait(&chan->chan, exec_job->push.count + 1, 16);
if (ret) {
NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret);
return ERR_PTR(ret);
@@ -141,9 +143,11 @@ nouveau_exec_job_run(struct nouveau_job *job)
struct drm_nouveau_exec_push *p = &exec_job->push.s[i];
bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH;
- nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
+ nvif_chan_gpfifo_push(&chan->chan, p->va, p->va_len, no_prefetch);
}
+ nvif_chan_gpfifo_post(&chan->chan);
+
ret = nouveau_fence_emit(fence);
if (ret) {
nouveau_fence_unref(&exec_job->fence);
@@ -375,10 +379,10 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
if (unlikely(atomic_read(&chan->killed)))
return nouveau_abi16_put(abi16, -ENODEV);
- if (!chan->dma.ib_max)
+ if (chan->user.oclass < NV50_CHANNEL_GPFIFO)
return nouveau_abi16_put(abi16, -ENOSYS);
- push_max = nouveau_exec_push_max_from_ib_max(chan->dma.ib_max);
+ push_max = nouveau_exec_push_max_from_ib_max(chan->chan.gpfifo.max);
if (unlikely(req->push_count > push_max)) {
NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->push_count, push_max);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 7cc84472cece..6ded8c2b6d3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -90,7 +90,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
- if (error)
+ if (error && !dma_fence_is_signaled_locked(&fence->base))
dma_fence_set_error(&fence->base, error);
if (nouveau_fence_signal(fence))
@@ -184,10 +184,10 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
struct nouveau_cli *cli = chan->cli;
struct nouveau_drm *drm = cli->drm;
struct nouveau_fence_priv *priv = (void*)drm->fence;
- struct {
- struct nvif_event_v0 base;
- struct nvif_chan_event_v0 host;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_chan_event_v0));
+ struct nvif_chan_event_v0 *host =
+ (struct nvif_chan_event_v0 *)args->data;
int ret;
INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work);
@@ -207,12 +207,12 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
if (!priv->uevent)
return;
- args.host.version = 0;
- args.host.type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR;
+ host->version = 0;
+ host->type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR;
ret = nvif_event_ctor(&chan->user, "fenceNonStallIntr", (chan->runlist << 16) | chan->chid,
nouveau_fence_wait_uevent_handler, false,
- &args.base, sizeof(args), &fctx->event);
+ args, __struct_size(args), &fctx->event);
WARN_ON(ret);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 8bc065acfe35..6a983dd9f7b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -83,6 +83,7 @@ void nv17_fence_resume(struct nouveau_drm *drm);
int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);
int nvc0_fence_create(struct nouveau_drm *);
+int gv100_fence_create(struct nouveau_drm *);
struct nv84_fence_chan {
struct nouveau_fence_chan base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9ae2cee1c7c5..690e10fbf0bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -87,9 +87,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return;
}
- if (gem->import_attach)
- drm_prime_gem_destroy(gem, nvbo->bo.sg);
-
ttm_bo_put(&nvbo->bo);
pm_runtime_mark_last_busy(dev);
@@ -853,8 +850,8 @@ revalidate:
}
}
- if (chan->dma.ib_max) {
- ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
+ if (chan->user.oclass >= NV50_CHANNEL_GPFIFO) {
+ ret = nvif_chan_gpfifo_wait(&chan->chan, req->nr_push + 1, 16);
if (ret) {
NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
goto out;
@@ -867,8 +864,10 @@ revalidate:
u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
- nv50_dma_push(chan, addr, length, no_prefetch);
+ nvif_chan_gpfifo_push(&chan->chan, addr, length, no_prefetch);
}
+
+ nvif_chan_gpfifo_post(&chan->chan);
} else
if (drm->client.device.info.chipset >= 0x25) {
ret = PUSH_WAIT(&chan->chan.push, req->nr_push * 2);
@@ -961,7 +960,7 @@ out_prevalid:
u_free(push);
out_next:
- if (chan->dma.ib_max) {
+ if (chan->user.oclass >= NV50_CHANNEL_GPFIFO) {
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index e12e2596ed84..6fa387da0637 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -720,10 +720,7 @@ nouveau_svm_fault(struct work_struct *work)
struct nouveau_svm *svm = container_of(buffer, typeof(*svm), buffer[buffer->id]);
struct nvif_object *device = &svm->drm->client.device.object;
struct nouveau_svmm *svmm;
- struct {
- struct nouveau_pfnmap_args i;
- u64 phys[1];
- } args;
+ DEFINE_RAW_FLEX(struct nouveau_pfnmap_args, args, p.phys, 1);
unsigned long hmm_flags;
u64 inst, start, limit;
int fi, fn;
@@ -772,11 +769,11 @@ nouveau_svm_fault(struct work_struct *work)
mutex_unlock(&svm->mutex);
/* Process list of faults. */
- args.i.i.version = 0;
- args.i.i.type = NVIF_IOCTL_V0_MTHD;
- args.i.m.version = 0;
- args.i.m.method = NVIF_VMM_V0_PFNMAP;
- args.i.p.version = 0;
+ args->i.version = 0;
+ args->i.type = NVIF_IOCTL_V0_MTHD;
+ args->m.version = 0;
+ args->m.method = NVIF_VMM_V0_PFNMAP;
+ args->p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
struct svm_notifier notifier;
@@ -802,9 +799,9 @@ nouveau_svm_fault(struct work_struct *work)
* fault window, determining required pages and access
* permissions based on pending faults.
*/
- args.i.p.addr = start;
- args.i.p.page = PAGE_SHIFT;
- args.i.p.size = PAGE_SIZE;
+ args->p.addr = start;
+ args->p.page = PAGE_SHIFT;
+ args->p.size = PAGE_SIZE;
/*
* Determine required permissions based on GPU fault
* access flags.
@@ -832,16 +829,16 @@ nouveau_svm_fault(struct work_struct *work)
notifier.svmm = svmm;
if (atomic)
- ret = nouveau_atomic_range_fault(svmm, svm->drm,
- &args.i, sizeof(args),
+ ret = nouveau_atomic_range_fault(svmm, svm->drm, args,
+ __struct_size(args),
&notifier);
else
- ret = nouveau_range_fault(svmm, svm->drm, &args.i,
- sizeof(args), hmm_flags,
- &notifier);
+ ret = nouveau_range_fault(svmm, svm->drm, args,
+ __struct_size(args),
+ hmm_flags, &notifier);
mmput(mm);
- limit = args.i.p.addr + args.i.p.size;
+ limit = args->p.addr + args->p.size;
for (fn = fi; ++fn < buffer->fault_nr; ) {
/* It's okay to skip over duplicate addresses from the
* same SVMM as faults are ordered by access type such
@@ -855,14 +852,14 @@ nouveau_svm_fault(struct work_struct *work)
if (buffer->fault[fn]->svmm != svmm ||
buffer->fault[fn]->addr >= limit ||
(buffer->fault[fi]->access == FAULT_ACCESS_READ &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
(buffer->fault[fi]->access != FAULT_ACCESS_READ &&
buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
(buffer->fault[fi]->access != FAULT_ACCESS_READ &&
buffer->fault[fi]->access != FAULT_ACCESS_WRITE &&
buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index e244927eb5d4..7d2436e5d50d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -312,8 +312,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* VRAM init */
drm->gem.vram_available = drm->client.device.info.ram_user;
- arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
- device->func->resource_size(device, 1));
+ arch_io_reserve_memtype_wc(device->func->resource_addr(device, NVKM_BAR1_FB),
+ device->func->resource_size(device, NVKM_BAR1_FB));
ret = nouveau_ttm_init_vram(drm);
if (ret) {
@@ -321,8 +321,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
- drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
- device->func->resource_size(device, 1));
+ drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, NVKM_BAR1_FB),
+ device->func->resource_size(device, NVKM_BAR1_FB));
/* GART init */
if (!drm->agp.bridge) {
@@ -357,7 +357,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
- arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
- device->func->resource_size(device, 1));
+ arch_io_free_memtype_wc(device->func->resource_addr(device, NVKM_BAR1_FB),
+ device->func->resource_size(device, NVKM_BAR1_FB));
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 8c73f40e3bda..40ee95340814 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -85,10 +85,8 @@ void
nv10_fence_destroy(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv = drm->fence;
- nouveau_bo_unmap(priv->bo);
- if (priv->bo)
- nouveau_bo_unpin(priv->bo);
- nouveau_bo_fini(priv->bo);
+
+ nouveau_bo_unpin_del(&priv->bo);
drm->fence = NULL;
kfree(priv);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index d09bfd11369f..1b0c0aa3c305 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -130,20 +130,7 @@ nv17_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
- NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, NULL, NULL, &priv->bo);
- if (!ret) {
- ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (!ret) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_fini(priv->bo);
- }
-
+ ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &priv->bo);
if (ret) {
nv10_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 62e28dddf87c..e1f0e8adf313 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -81,20 +81,7 @@ nv50_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
- NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, NULL, NULL, &priv->bo);
- if (!ret) {
- ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (!ret) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_fini(priv->bo);
- }
-
+ ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &priv->bo);
if (ret) {
nv10_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index aa7dd0c5d917..1765b2cedaf9 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -185,10 +185,8 @@ static void
nv84_fence_destroy(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
- nouveau_bo_unmap(priv->bo);
- if (priv->bo)
- nouveau_bo_unpin(priv->bo);
- nouveau_bo_fini(priv->bo);
+
+ nouveau_bo_unpin_del(&priv->bo);
drm->fence = NULL;
kfree(priv);
}
@@ -222,19 +220,8 @@ nv84_fence_create(struct nouveau_drm *drm)
* will lose CPU/GPU coherency!
*/
NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
- ret = nouveau_bo_new(&drm->client, 16 * drm->chan_total, 0,
- domain, 0, 0, NULL, NULL, &priv->bo);
- if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo, domain, false);
- if (ret == 0) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_fini(priv->bo);
- }
+ ret = nouveau_bo_new_map(&drm->client, domain, 16 * drm->chan_total, &priv->bo);
if (ret)
nv84_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index b7963a39dd91..198889c20ce1 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -14,6 +14,12 @@ nvif-y += nvif/outp.o
nvif-y += nvif/timer.o
nvif-y += nvif/vmm.o
+# Channel classes
+nvif-y += nvif/chan.o
+nvif-y += nvif/chan506f.o
+nvif-y += nvif/chan906f.o
+nvif-y += nvif/chanc36f.o
+
# Usermode classes
nvif-y += nvif/user.o
nvif-y += nvif/userc361.o
diff --git a/drivers/gpu/drm/nouveau/nvif/chan.c b/drivers/gpu/drm/nouveau/nvif/chan.c
new file mode 100644
index 000000000000..baa10227d51a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chan.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+
+static void
+nvif_chan_gpfifo_push_kick(struct nvif_push *push)
+{
+ struct nvif_chan *chan = container_of(push, typeof(*chan), push);
+ u32 put = push->bgn - (u32 *)chan->push.mem.object.map.ptr;
+ u32 cnt;
+
+ if (chan->func->gpfifo.post) {
+ if (push->end - push->cur < chan->func->gpfifo.post_size)
+ push->end = push->cur + chan->func->gpfifo.post_size;
+
+ WARN_ON(nvif_chan_gpfifo_post(chan));
+ }
+
+ cnt = push->cur - push->bgn;
+
+ chan->func->gpfifo.push(chan, true, chan->push.addr + (put << 2), cnt << 2, false);
+ chan->func->gpfifo.kick(chan);
+}
+
+static int
+nvif_chan_gpfifo_push_wait(struct nvif_push *push, u32 push_nr)
+{
+ struct nvif_chan *chan = container_of(push, typeof(*chan), push);
+
+ return nvif_chan_gpfifo_wait(chan, 1, push_nr);
+}
+
+int
+nvif_chan_gpfifo_post(struct nvif_chan *chan)
+{
+ const u32 *map = chan->push.mem.object.map.ptr;
+ const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size;
+ const u32 gpptr = (chan->gpfifo.cur + 1) & chan->gpfifo.max;
+
+ return chan->func->gpfifo.post(chan, gpptr, pbptr);
+}
+
+void
+nvif_chan_gpfifo_push(struct nvif_chan *chan, u64 addr, u32 size, bool no_prefetch)
+{
+ chan->func->gpfifo.push(chan, false, addr, size, no_prefetch);
+}
+
+int
+nvif_chan_gpfifo_wait(struct nvif_chan *chan, u32 gpfifo_nr, u32 push_nr)
+{
+ struct nvif_push *push = &chan->push;
+ int ret = 0, time = 1000000;
+
+ if (gpfifo_nr) {
+ /* Account for pushbuf space needed by nvif_chan_gpfifo_post(),
+ * if used after pushing userspace GPFIFO entries.
+ */
+ if (chan->func->gpfifo.post)
+ push_nr += chan->func->gpfifo.post_size;
+ }
+
+ /* Account for the GPFIFO entry needed to submit pushbuf. */
+ if (push_nr)
+ gpfifo_nr++;
+
+ /* Wait for space in main push buffer. */
+ if (push->cur + push_nr > push->end) {
+ ret = nvif_chan_dma_wait(chan, push_nr);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait for GPFIFO space. */
+ while (chan->gpfifo.free < gpfifo_nr) {
+ chan->gpfifo.free = chan->func->gpfifo.read_get(chan) - chan->gpfifo.cur - 1;
+ if (chan->gpfifo.free < 0)
+ chan->gpfifo.free += chan->gpfifo.max + 1;
+
+ if (chan->gpfifo.free < gpfifo_nr) {
+ if (!time--)
+ return -ETIMEDOUT;
+ udelay(1);
+ }
+ }
+
+ return 0;
+}
+
+void
+nvif_chan_gpfifo_ctor(const struct nvif_chan_func *func, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, struct nvif_chan *chan)
+{
+ chan->func = func;
+
+ chan->userd.map.ptr = userd;
+
+ chan->gpfifo.map.ptr = gpfifo;
+ chan->gpfifo.max = (gpfifo_size >> 3) - 1;
+ chan->gpfifo.free = chan->gpfifo.max;
+
+ chan->push.mem.object.map.ptr = push;
+ chan->push.wait = nvif_chan_gpfifo_push_wait;
+ chan->push.kick = nvif_chan_gpfifo_push_kick;
+ chan->push.addr = push_addr;
+ chan->push.hw.max = push_size >> 2;
+ chan->push.bgn = chan->push.cur = chan->push.end = push;
+}
+
+int
+nvif_chan_dma_wait(struct nvif_chan *chan, u32 nr)
+{
+ struct nvif_push *push = &chan->push;
+ u32 cur = push->cur - (u32 *)push->mem.object.map.ptr;
+ u32 free, time = 1000000;
+
+ nr += chan->func->gpfifo.post_size;
+
+ do {
+ u32 get = chan->func->push.read_get(chan);
+
+ if (get <= cur) {
+ free = push->hw.max - cur;
+ if (free >= nr)
+ break;
+
+ PUSH_KICK(push);
+
+ while (get == 0) {
+ get = chan->func->push.read_get(chan);
+ if (get == 0) {
+ if (!time--)
+ return -ETIMEDOUT;
+ udelay(1);
+ }
+ }
+
+ cur = 0;
+ }
+
+ free = get - cur - 1;
+
+ if (free < nr) {
+ if (!time--)
+ return -ETIMEDOUT;
+ udelay(1);
+ }
+ } while (free < nr);
+
+ push->bgn = (u32 *)push->mem.object.map.ptr + cur;
+ push->cur = push->bgn;
+ push->end = push->bgn + free - chan->func->gpfifo.post_size;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/chan506f.c b/drivers/gpu/drm/nouveau/nvif/chan506f.c
new file mode 100644
index 000000000000..d3900887c4a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chan506f.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+
+void
+nvif_chan506f_gpfifo_kick(struct nvif_chan *chan)
+{
+ wmb();
+ nvif_wr32(&chan->userd, 0x8c, chan->gpfifo.cur);
+}
+
+void
+nvif_chan506f_gpfifo_push(struct nvif_chan *chan, bool main, u64 addr, u32 size, bool no_prefetch)
+{
+ u32 gpptr = chan->gpfifo.cur << 3;
+
+ if (WARN_ON(!chan->gpfifo.free))
+ return;
+
+ nvif_wr32(&chan->gpfifo, gpptr + 0, lower_32_bits(addr));
+ nvif_wr32(&chan->gpfifo, gpptr + 4, upper_32_bits(addr) |
+ (main ? 0 : BIT(9)) |
+ (size >> 2) << 10 |
+ (no_prefetch ? BIT(31) : 0));
+
+ chan->gpfifo.cur = (chan->gpfifo.cur + 1) & chan->gpfifo.max;
+ chan->gpfifo.free--;
+ if (!chan->gpfifo.free)
+ chan->push.end = chan->push.cur;
+}
+
+static u32
+nvif_chan506f_gpfifo_read_get(struct nvif_chan *chan)
+{
+ return nvif_rd32(&chan->userd, 0x88);
+}
+
+static u32
+nvif_chan506f_read_get(struct nvif_chan *chan)
+{
+ u32 tlgetlo = nvif_rd32(&chan->userd, 0x58);
+ u32 tlgethi = nvif_rd32(&chan->userd, 0x5c);
+ struct nvif_push *push = &chan->push;
+
+ /* Update cached GET pointer if TOP_LEVEL_GET is valid. */
+ if (tlgethi & BIT(31)) {
+ u64 tlget = ((u64)(tlgethi & 0xff) << 32) | tlgetlo;
+
+ push->hw.get = (tlget - push->addr) >> 2;
+ }
+
+ return push->hw.get;
+}
+
+static const struct nvif_chan_func
+nvif_chan506f = {
+ .push.read_get = nvif_chan506f_read_get,
+ .gpfifo.read_get = nvif_chan506f_gpfifo_read_get,
+ .gpfifo.push = nvif_chan506f_gpfifo_push,
+ .gpfifo.kick = nvif_chan506f_gpfifo_kick,
+};
+
+int
+nvif_chan506f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size)
+{
+ nvif_chan_gpfifo_ctor(&nvif_chan506f, userd, gpfifo, gpfifo_size,
+ push, push_addr, push_size, chan);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/chan906f.c b/drivers/gpu/drm/nouveau/nvif/chan906f.c
new file mode 100644
index 000000000000..c9cfb85179b0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chan906f.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+#include <nvif/user.h>
+#include <nvif/push906f.h>
+
+#include <nvhw/class/cl906f.h>
+
+/* Limits GPFIFO size to 1MiB, and "main" push buffer size to 64KiB. */
+#define NVIF_CHAN906F_PBPTR_BITS 15
+#define NVIF_CHAN906F_PBPTR_MASK ((1 << NVIF_CHAN906F_PBPTR_BITS) - 1)
+
+#define NVIF_CHAN906F_GPPTR_SHIFT NVIF_CHAN906F_PBPTR_BITS
+#define NVIF_CHAN906F_GPPTR_BITS (32 - NVIF_CHAN906F_PBPTR_BITS)
+#define NVIF_CHAN906F_GPPTR_MASK ((1 << NVIF_CHAN906F_GPPTR_BITS) - 1)
+
+#define NVIF_CHAN906F_SEM_RELEASE_SIZE 5
+
+static int
+nvif_chan906f_sem_release(struct nvif_chan *chan, u64 addr, u32 data)
+{
+ struct nvif_push *push = &chan->push;
+ int ret;
+
+ ret = PUSH_WAIT(push, NVIF_CHAN906F_SEM_RELEASE_SIZE);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV906F, SEMAPHOREA,
+ NVVAL(NV906F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(addr)),
+
+ SEMAPHOREB, lower_32_bits(addr),
+
+ SEMAPHOREC, data,
+
+ SEMAPHORED,
+ NVDEF(NV906F, SEMAPHORED, OPERATION, RELEASE) |
+ NVDEF(NV906F, SEMAPHORED, RELEASE_WFI, DIS) |
+ NVDEF(NV906F, SEMAPHORED, RELEASE_SIZE, 16BYTE));
+
+ return 0;
+}
+
+int
+nvif_chan906f_gpfifo_post(struct nvif_chan *chan, u32 gpptr, u32 pbptr)
+{
+ return chan->func->sem.release(chan, chan->sema.addr,
+ (gpptr << NVIF_CHAN906F_GPPTR_SHIFT) | pbptr);
+}
+
+u32
+nvif_chan906f_gpfifo_read_get(struct nvif_chan *chan)
+{
+ return nvif_rd32(&chan->sema, 0) >> NVIF_CHAN906F_GPPTR_SHIFT;
+}
+
+u32
+nvif_chan906f_read_get(struct nvif_chan *chan)
+{
+ return nvif_rd32(&chan->sema, 0) & NVIF_CHAN906F_PBPTR_MASK;
+}
+
+static const struct nvif_chan_func
+nvif_chan906f = {
+ .push.read_get = nvif_chan906f_read_get,
+ .gpfifo.read_get = nvif_chan906f_gpfifo_read_get,
+ .gpfifo.push = nvif_chan506f_gpfifo_push,
+ .gpfifo.kick = nvif_chan506f_gpfifo_kick,
+ .gpfifo.post = nvif_chan906f_gpfifo_post,
+ .gpfifo.post_size = NVIF_CHAN906F_SEM_RELEASE_SIZE,
+ .sem.release = nvif_chan906f_sem_release,
+};
+
+int
+nvif_chan906f_ctor_(const struct nvif_chan_func *func, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_chan *chan)
+{
+ nvif_chan_gpfifo_ctor(func, userd, gpfifo, gpfifo_size, push, push_addr, push_size, chan);
+ chan->sema.map.ptr = sema;
+ chan->sema.addr = sema_addr;
+ return 0;
+}
+
+int
+nvif_chan906f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr)
+{
+ return nvif_chan906f_ctor_(&nvif_chan906f, userd, gpfifo, gpfifo_size,
+ push, push_addr, push_size, sema, sema_addr, chan);
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/chanc36f.c b/drivers/gpu/drm/nouveau/nvif/chanc36f.c
new file mode 100644
index 000000000000..ca02b939c3fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chanc36f.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+#include <nvif/user.h>
+
+#include <nvif/push906f.h>
+#include <nvhw/class/clc36f.h>
+
+static void
+nvif_chanc36f_gpfifo_kick(struct nvif_chan *chan)
+{
+ struct nvif_user *usermode = chan->usermode;
+
+ nvif_wr32(&chan->userd, 0x8c, chan->gpfifo.cur);
+
+ wmb(); /* ensure CPU writes are flushed to BAR1 */
+ nvif_rd32(&chan->userd, 0); /* ensure BAR1 writes are flushed to vidmem */
+
+ usermode->func->doorbell(usermode, chan->doorbell_token);
+}
+
+#define NVIF_CHANC36F_SEM_RELEASE_SIZE 6
+
+static int
+nvif_chanc36f_sem_release(struct nvif_chan *chan, u64 addr, u32 data)
+{
+ struct nvif_push *push = &chan->push;
+ int ret;
+
+ ret = PUSH_WAIT(push, NVIF_CHANC36F_SEM_RELEASE_SIZE);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(addr),
+
+ SEM_ADDR_HI, upper_32_bits(addr),
+
+ SEM_PAYLOAD_LO, data);
+
+ PUSH_MTHD(push, NVC36F, SEM_EXECUTE,
+ NVDEF(NVC36F, SEM_EXECUTE, OPERATION, RELEASE) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_WFI, DIS) |
+ NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS));
+
+ return 0;
+}
+
+static const struct nvif_chan_func
+nvif_chanc36f = {
+ .push.read_get = nvif_chan906f_read_get,
+ .gpfifo.read_get = nvif_chan906f_gpfifo_read_get,
+ .gpfifo.push = nvif_chan506f_gpfifo_push,
+ .gpfifo.kick = nvif_chanc36f_gpfifo_kick,
+ .gpfifo.post = nvif_chan906f_gpfifo_post,
+ .gpfifo.post_size = NVIF_CHANC36F_SEM_RELEASE_SIZE,
+ .sem.release = nvif_chanc36f_sem_release,
+};
+
+int
+nvif_chanc36f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_user *usermode, u32 doorbell_token)
+{
+ int ret;
+
+ ret = nvif_chan906f_ctor_(&nvif_chanc36f, userd, gpfifo, gpfifo_size,
+ push, push_addr, push_size, sema, sema_addr, chan);
+ if (ret)
+ return ret;
+
+ chan->usermode = usermode;
+ chan->doorbell_token = doorbell_token;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/conn.c b/drivers/gpu/drm/nouveau/nvif/conn.c
index 9ee18cb99264..5a1a83c62a2a 100644
--- a/drivers/gpu/drm/nouveau/nvif/conn.c
+++ b/drivers/gpu/drm/nouveau/nvif/conn.c
@@ -30,17 +30,17 @@ int
nvif_conn_event_ctor(struct nvif_conn *conn, const char *name, nvif_event_func func, u8 types,
struct nvif_event *event)
{
- struct {
- struct nvif_event_v0 base;
- struct nvif_conn_event_v0 conn;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_conn_event_v0));
+ struct nvif_conn_event_v0 *args_conn =
+ (struct nvif_conn_event_v0 *)args->data;
int ret;
- args.conn.version = 0;
- args.conn.types = types;
+ args_conn->version = 0;
+ args_conn->types = types;
ret = nvif_event_ctor_(&conn->object, name ?: "nvifConnHpd", nvif_conn_id(conn),
- func, true, &args.base, sizeof(args), false, event);
+ func, true, args, __struct_size(args), false, event);
NVIF_DEBUG(&conn->object, "[NEW EVENT:HPD types:%02x]", types);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 14da22fa3b5b..fa42146252da 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -36,6 +36,7 @@ int
nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass, struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
+ { GB202_DISP, 0 },
{ AD102_DISP, 0 },
{ GA102_DISP, 0 },
{ TU102_DISP, 0 },
diff --git a/drivers/gpu/drm/nouveau/nvif/outp.c b/drivers/gpu/drm/nouveau/nvif/outp.c
index 6daeb7f0b09b..32f6c5eb92af 100644
--- a/drivers/gpu/drm/nouveau/nvif/outp.c
+++ b/drivers/gpu/drm/nouveau/nvif/outp.c
@@ -195,20 +195,17 @@ nvif_outp_dp_aux_pwr(struct nvif_outp *outp, bool enable)
int
nvif_outp_hda_eld(struct nvif_outp *outp, int head, void *data, u32 size)
{
- struct {
- struct nvif_outp_hda_eld_v0 mthd;
- u8 data[128];
- } args;
+ DEFINE_RAW_FLEX(struct nvif_outp_hda_eld_v0, mthd, data, 128);
int ret;
- if (WARN_ON(size > ARRAY_SIZE(args.data)))
+ if (WARN_ON(size > __member_size(mthd->data)))
return -EINVAL;
- args.mthd.version = 0;
- args.mthd.head = head;
+ mthd->version = 0;
+ mthd->head = head;
- memcpy(args.data, data, size);
- ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDA_ELD, &args, sizeof(args.mthd) + size);
+ memcpy(mthd->data, data, size);
+ ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDA_ELD, mthd, sizeof(*mthd) + size);
NVIF_ERRON(ret, &outp->object, "[HDA_ELD head:%d size:%d]", head, size);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/user.c b/drivers/gpu/drm/nouveau/nvif/user.c
index b648a5e036af..53f03fa1c9c2 100644
--- a/drivers/gpu/drm/nouveau/nvif/user.c
+++ b/drivers/gpu/drm/nouveau/nvif/user.c
@@ -41,9 +41,11 @@ nvif_user_ctor(struct nvif_device *device, const char *name)
int version;
const struct nvif_user_func *func;
} users[] = {
- { AMPERE_USERMODE_A, -1, &nvif_userc361 },
- { TURING_USERMODE_A, -1, &nvif_userc361 },
- { VOLTA_USERMODE_A, -1, &nvif_userc361 },
+ { BLACKWELL_USERMODE_A, -1, &nvif_userc361 },
+ { HOPPER_USERMODE_A, -1, &nvif_userc361 },
+ { AMPERE_USERMODE_A, -1, &nvif_userc361 },
+ { TURING_USERMODE_A, -1, &nvif_userc361 },
+ { VOLTA_USERMODE_A, -1, &nvif_userc361 },
{}
};
int cid, ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index 2e48b0816670..ddcf8782d6b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -17,8 +17,6 @@ include $(src)/nvkm/engine/msppp/Kbuild
include $(src)/nvkm/engine/msvld/Kbuild
include $(src)/nvkm/engine/nvenc/Kbuild
include $(src)/nvkm/engine/nvdec/Kbuild
-include $(src)/nvkm/engine/nvjpg/Kbuild
-include $(src)/nvkm/engine/ofa/Kbuild
include $(src)/nvkm/engine/sec/Kbuild
include $(src)/nvkm/engine/sec2/Kbuild
include $(src)/nvkm/engine/sw/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 165d61fc5d6c..9754bac65df7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -10,5 +10,4 @@ nvkm-y += nvkm/engine/ce/gv100.o
nvkm-y += nvkm/engine/ce/tu102.o
nvkm-y += nvkm/engine/ce/ga100.o
nvkm-y += nvkm/engine/ce/ga102.o
-
-nvkm-y += nvkm/engine/ce/r535.o
+nvkm-y += nvkm/engine/ce/gb202.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
index 9427a592bd16..1c0c60138706 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
@@ -90,7 +90,7 @@ ga100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_ce_new(&ga100_ce, device, type, inst, pengine);
+ return -ENODEV;
return nvkm_engine_new_(&ga100_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
index ce56ede7c2e9..9359c5e7aa3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
@@ -44,7 +44,7 @@ ga102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_ce_new(&ga102_ce, device, type, inst, pengine);
+ return -ENODEV;
return nvkm_engine_new_(&ga102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c
new file mode 100644
index 000000000000..37c3c619c71b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb202/dev_ce.h>
+
+u32
+gb202_ce_grce_mask(struct nvkm_device *device)
+{
+ u32 data = nvkm_rd32(device, NV_CE_GRCE_MASK);
+
+ return NVVAL_GET(data, NV_CE, GRCE_MASK, VALUE);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
index 806a76a72249..34fd2657134b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -16,4 +16,6 @@ int ga100_ce_oneinit(struct nvkm_engine *);
int ga100_ce_init(struct nvkm_engine *);
int ga100_ce_fini(struct nvkm_engine *, bool);
int ga100_ce_nonstall(struct nvkm_engine *);
+
+u32 gb202_ce_grce_mask(struct nvkm_device *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c
deleted file mode 100644
index bd0d435dbbd3..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h>
-
-struct r535_ce_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_ce_obj_dtor(struct nvkm_object *object)
-{
- struct r535_ce_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_ce_obj = {
- .dtor = r535_ce_obj_dtor,
-};
-
-static int
-r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_ce_obj *obj;
- NVC0B5_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->version = 1;
- args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_ce_dtor(struct nvkm_engine *engine)
-{
- kfree(engine->func);
- return engine;
-}
-
-int
-r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
- struct nvkm_engine_func *rm;
- int nclass, ret;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_ce_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_ce_obj_ctor;
- }
-
- ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
- if (ret)
- kfree(rm);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
index 7c8647dcb349..67d0545cf902 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
@@ -40,7 +40,7 @@ tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_ce_new(&tu102_ce, device, type, inst, pengine);
+ return -ENODEV;
return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 9093d89b16f3..3375a59ebf1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2529,9 +2529,6 @@ nv170_chipset = {
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x000003ff, ga100_ce_new },
.fifo = { 0x00000001, ga100_fifo_new },
- .nvdec = { 0x0000001f, ga100_nvdec_new },
- .nvjpg = { 0x00000001, ga100_nvjpg_new },
- .ofa = { 0x00000001, ga100_ofa_new },
};
static const struct nvkm_device_chip
@@ -2561,8 +2558,6 @@ nv172_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2593,8 +2588,6 @@ nv173_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2625,8 +2618,6 @@ nv174_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2657,8 +2648,6 @@ nv176_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2689,12 +2678,26 @@ nv177_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
static const struct nvkm_device_chip
+nv180_chipset = {
+ .name = "GH100",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gh100_fb_new },
+ .fsp = { 0x00000001, gh100_fsp_new },
+ .gsp = { 0x00000001, gh100_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
nv192_chipset = {
.name = "AD102",
.bar = { 0x00000001, tu102_bar_new },
@@ -2709,14 +2712,9 @@ nv192_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2735,14 +2733,9 @@ nv193_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2761,14 +2754,9 @@ nv194_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2787,14 +2775,9 @@ nv196_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2813,17 +2796,122 @@ nv197_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
+static const struct nvkm_device_chip
+nv1a0_chipset = {
+ .name = "GB100",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb100_fb_new },
+ .fsp = { 0x00000001, gb100_fsp_new },
+ .gsp = { 0x00000001, gb100_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1a2_chipset = {
+ .name = "GB102",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb100_fb_new },
+ .fsp = { 0x00000001, gb100_fsp_new },
+ .gsp = { 0x00000001, gb100_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b2_chipset = {
+ .name = "GB202",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b3_chipset = {
+ .name = "GB203",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b5_chipset = {
+ .name = "GB205",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b6_chipset = {
+ .name = "GB206",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b7_chipset = {
+ .name = "GB207",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
struct nvkm_subdev *
nvkm_device_subdev(struct nvkm_device *device, int type, int inst)
{
@@ -3065,8 +3153,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
device->debug = nvkm_dbgopt(device->dbgopt, "device");
INIT_LIST_HEAD(&device->subdev);
- mmio_base = device->func->resource_addr(device, 0);
- mmio_size = device->func->resource_size(device, 0);
+ mmio_base = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ mmio_size = device->func->resource_size(device, NVKM_BAR0_PRI);
device->pri = ioremap(mmio_base, mmio_size);
if (device->pri == NULL) {
@@ -3139,7 +3227,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x140: device->card_type = GV100; break;
case 0x160: device->card_type = TU100; break;
case 0x170: device->card_type = GA100; break;
+ case 0x180: device->card_type = GH100; break;
case 0x190: device->card_type = AD100; break;
+ case 0x1a0: device->card_type = GB10x; break;
+ case 0x1b0: device->card_type = GB20x; break;
default:
break;
}
@@ -3242,11 +3333,19 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
+ case 0x180: device->chip = &nv180_chipset; break;
case 0x192: device->chip = &nv192_chipset; break;
case 0x193: device->chip = &nv193_chipset; break;
case 0x194: device->chip = &nv194_chipset; break;
case 0x196: device->chip = &nv196_chipset; break;
case 0x197: device->chip = &nv197_chipset; break;
+ case 0x1a0: device->chip = &nv1a0_chipset; break;
+ case 0x1a2: device->chip = &nv1a2_chipset; break;
+ case 0x1b2: device->chip = &nv1b2_chipset; break;
+ case 0x1b3: device->chip = &nv1b3_chipset; break;
+ case 0x1b5: device->chip = &nv1b5_chipset; break;
+ case 0x1b6: device->chip = &nv1b6_chipset; break;
+ case 0x1b7: device->chip = &nv1b7_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
switch (device->chipset) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 3ff6436007fa..8f0261a0d618 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1560,18 +1560,42 @@ nvkm_device_pci(struct nvkm_device *device)
return container_of(device, struct nvkm_device_pci, device);
}
+static int
+nvkm_device_pci_resource_idx(struct nvkm_device_pci *pdev, enum nvkm_bar_id bar)
+{
+ int idx = 0;
+
+ if (bar == NVKM_BAR0_PRI)
+ return idx;
+
+ idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1;
+ if (bar == NVKM_BAR1_FB)
+ return idx;
+
+ idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1;
+ if (bar == NVKM_BAR2_INST)
+ return idx;
+
+ WARN_ON(1);
+ return -1;
+}
+
static resource_size_t
-nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar)
+nvkm_device_pci_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
- return pci_resource_start(pdev->pdev, bar);
+ int idx = nvkm_device_pci_resource_idx(pdev, bar);
+
+ return idx >= 0 ? pci_resource_start(pdev->pdev, idx) : 0;
}
static resource_size_t
-nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar)
+nvkm_device_pci_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
- return pci_resource_len(pdev->pdev, bar);
+ int idx = nvkm_device_pci_resource_idx(pdev, bar);
+
+ return idx >= 0 ? pci_resource_len(pdev->pdev, idx) : 0;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index e42b18820a95..75ee7506d443 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -11,6 +11,7 @@
#include <subdev/devinit.h>
#include <subdev/fault.h>
#include <subdev/fb.h>
+#include <subdev/fsp.h>
#include <subdev/fuse.h>
#include <subdev/gpio.h>
#include <subdev/gsp.h>
@@ -43,8 +44,6 @@
#include <engine/msvld.h>
#include <engine/nvenc.h>
#include <engine/nvdec.h>
-#include <engine/nvjpg.h>
-#include <engine/ofa.h>
#include <engine/sec.h>
#include <engine/sec2.h>
#include <engine/sw.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 78a83f904bbd..114e50ca1827 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -186,21 +186,31 @@ nvkm_device_tegra(struct nvkm_device *device)
}
static struct resource *
-nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
+nvkm_device_tegra_resource(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
- return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
+ int idx;
+
+ switch (bar) {
+ case NVKM_BAR0_PRI: idx = 0; break;
+ case NVKM_BAR1_FB : idx = 1; break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return platform_get_resource(tdev->pdev, IORESOURCE_MEM, idx);
}
static resource_size_t
-nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
+nvkm_device_tegra_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct resource *res = nvkm_device_tegra_resource(device, bar);
return res ? res->start : 0;
}
static resource_size_t
-nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
+nvkm_device_tegra_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct resource *res = nvkm_device_tegra_resource(device, bar);
return res ? resource_size(res) : 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index d7f75b3a43c8..58191b7a0494 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -148,6 +148,9 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
case AD100: args->v0.family = NV_DEVICE_INFO_V0_ADA; break;
+ case GH100: args->v0.family = NV_DEVICE_INFO_V0_HOPPER; break;
+ case GB10x: args->v0.family = NV_DEVICE_INFO_V0_BLACKWELL; break;
+ case GB20x: args->v0.family = NV_DEVICE_INFO_V0_BLACKWELL; break;
default:
args->v0.family = 0;
break;
@@ -209,8 +212,8 @@ nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_udevice *udev = nvkm_udevice(object);
struct nvkm_device *device = udev->device;
*type = NVKM_OBJECT_MAP_IO;
- *addr = device->func->resource_addr(device, 0);
- *size = device->func->resource_size(device, 0);
+ *addr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ *size = device->func->resource_size(device, NVKM_BAR0_PRI);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index e346e924fee8..e1aecd3fe96c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -27,9 +27,6 @@ nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/gv100.o
nvkm-y += nvkm/engine/disp/tu102.o
nvkm-y += nvkm/engine/disp/ga102.o
-nvkm-y += nvkm/engine/disp/ad102.o
-
-nvkm-y += nvkm/engine/disp/r535.o
nvkm-y += nvkm/engine/disp/udisp.o
nvkm-y += nvkm/engine/disp/uconn.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c
deleted file mode 100644
index 7f300a79aa29..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-#include "chan.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_disp_func
-ad102_disp = {
- .uevent = &gv100_disp_chan_uevent,
- .ramht_size = 0x2000,
- .root = { 0, 0,AD102_DISP },
- .user = {
- {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
- {{ 0, 0,GA102_DISP_CURSOR }, nvkm_disp_chan_new, &gv100_disp_curs },
- {{ 0, 0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm },
- {{ 0, 0,AD102_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gv100_disp_core },
- {{ 0, 0,GA102_DISP_WINDOW_CHANNEL_DMA }, nvkm_disp_wndw_new, &gv100_disp_wndw },
- {}
- },
-};
-
-int
-ad102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_disp **pdisp)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_disp_new(&ad102_disp, device, type, inst, pdisp);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
index 4e43ee383c34..9b84e357d354 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
@@ -49,7 +49,7 @@ nvkm_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_device *device = chan->disp->engine.subdev.device;
- const u64 base = device->func->resource_addr(device, 0);
+ const u64 base = device->func->resource_addr(device, NVKM_BAR0_PRI);
*type = NVKM_OBJECT_MAP_IO;
*addr = base + chan->func->user(chan, size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index cfa3698d3a2f..614921166fba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -805,7 +805,7 @@ gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc,
struct gv100_disp_caps *caps = gv100_disp_caps(object);
struct nvkm_device *device = caps->disp->engine.subdev.device;
*type = NVKM_OBJECT_MAP_IO;
- *addr = 0x640000 + device->func->resource_addr(device, 0);
+ *addr = 0x640000 + device->func->resource_addr(device, NVKM_BAR0_PRI);
*size = 0x1000;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index aff92848abfe..376e9c3bcb1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -25,8 +25,7 @@ nvkm-y += nvkm/engine/fifo/gv100.o
nvkm-y += nvkm/engine/fifo/tu102.o
nvkm-y += nvkm/engine/fifo/ga100.o
nvkm-y += nvkm/engine/fifo/ga102.o
-
-nvkm-y += nvkm/engine/fifo/r535.o
+nvkm-y += nvkm/engine/fifo/gb202.o
nvkm-y += nvkm/engine/fifo/ucgrp.o
nvkm-y += nvkm/engine/fifo/uchan.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 22443fe4a39f..fdffa0391b31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -303,7 +303,7 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
}
/* Allocate USERD + BAR1 polling area. */
- if (fifo->func->chan.func->userd->bar == 1) {
+ if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB) {
struct nvkm_vmm *bar1 = nvkm_bar_bar1_vmm(device);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, fifo->chid->nr *
@@ -349,8 +349,6 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
nvkm_chid_unref(&fifo->cgid);
nvkm_chid_unref(&fifo->chid);
- mutex_destroy(&fifo->userd.mutex);
-
nvkm_event_fini(&fifo->nonstall.event);
mutex_destroy(&fifo->mutex);
@@ -391,8 +389,5 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
spin_lock_init(&fifo->lock);
mutex_init(&fifo->mutex);
- INIT_LIST_HEAD(&fifo->userd.list);
- mutex_init(&fifo->userd.mutex);
-
return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index 7d4716dcd512..4e09985424b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -275,11 +275,7 @@ nvkm_chan_del(struct nvkm_chan **pchan)
nvkm_gpuobj_del(&chan->ramfc);
if (chan->cgrp) {
- if (!chan->func->id_put)
- nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
- else
- chan->func->id_put(chan);
-
+ nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
nvkm_cgrp_unref(&chan->cgrp);
}
@@ -359,14 +355,14 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
/* Validate arguments against class requirements. */
if ((runq && runq >= runl->func->runqs) ||
(!func->inst->vmm != !vmm) ||
- ((func->userd->bar < 0) == !userd) ||
+ (!func->userd->bar == !userd) ||
(!func->ramfc->ctxdma != !dmaobj) ||
((func->ramfc->devm < devm) && devm != BIT(0)) ||
(!func->ramfc->priv && priv)) {
RUNL_DEBUG(runl, "args runq:%d:%d vmm:%d:%p userd:%d:%p "
"push:%d:%p devm:%08x:%08x priv:%d:%d",
runl->func->runqs, runq, func->inst->vmm, vmm,
- func->userd->bar < 0, userd, func->ramfc->ctxdma, dmaobj,
+ func->userd->bar, userd, func->ramfc->ctxdma, dmaobj,
func->ramfc->devm, devm, func->ramfc->priv, priv);
return -EINVAL;
}
@@ -441,30 +437,26 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
}
/* Allocate channel ID. */
- if (!chan->func->id_get) {
- chan->id = nvkm_chid_get(runl->chid, chan);
- if (chan->id >= 0) {
- if (func->userd->bar < 0) {
- if (ouserd + chan->func->userd->size >=
- nvkm_memory_size(userd)) {
- RUNL_DEBUG(runl, "ouserd %llx", ouserd);
- return -EINVAL;
- }
-
- ret = nvkm_memory_kmap(userd, &chan->userd.mem);
- if (ret) {
- RUNL_DEBUG(runl, "userd %d", ret);
- return ret;
- }
-
- chan->userd.base = ouserd;
- } else {
- chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
- chan->userd.base = chan->id * chan->func->userd->size;
+ chan->id = nvkm_chid_get(runl->chid, chan);
+ if (chan->id >= 0) {
+ if (!func->userd->bar) {
+ if (ouserd + chan->func->userd->size >=
+ nvkm_memory_size(userd)) {
+ RUNL_DEBUG(runl, "ouserd %llx", ouserd);
+ return -EINVAL;
+ }
+
+ ret = nvkm_memory_kmap(userd, &chan->userd.mem);
+ if (ret) {
+ RUNL_DEBUG(runl, "userd %d", ret);
+ return ret;
}
+
+ chan->userd.base = ouserd;
+ } else {
+ chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
+ chan->userd.base = chan->id * chan->func->userd->size;
}
- } else {
- chan->id = chan->func->id_get(chan, userd, ouserd);
}
if (chan->id < 0) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 013682a709d5..445db5dfd1e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -17,9 +17,6 @@ struct nvkm_cctx {
};
struct nvkm_chan_func {
- int (*id_get)(struct nvkm_chan *, struct nvkm_memory *userd, u64 ouserd);
- void (*id_put)(struct nvkm_chan *);
-
const struct nvkm_chan_func_inst {
u32 size;
bool zero;
@@ -27,7 +24,7 @@ struct nvkm_chan_func {
} *inst;
const struct nvkm_chan_func_userd {
- int bar;
+ enum nvkm_bar_id bar;
u32 base;
u32 size;
void (*clear)(struct nvkm_chan *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c
new file mode 100644
index 000000000000..b469e8afeb0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "runl.h"
+
+u32
+gb202_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+ return BIT(30) | (chan->cgrp->runl->id << 16) | chan->id;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 6c94451d0faa..e4a4fad2eafc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -133,7 +133,7 @@ gf100_chan_userd_clear(struct nvkm_chan *chan)
static const struct nvkm_chan_func_userd
gf100_chan_userd = {
- .bar = 1,
+ .bar = NVKM_BAR1_FB,
.size = 0x1000,
.clear = gf100_chan_userd_clear,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index d8a4d773a58c..5655eda52a7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -113,7 +113,7 @@ gk104_chan_ramfc = {
const struct nvkm_chan_func_userd
gk104_chan_userd = {
- .bar = 1,
+ .bar = NVKM_BAR1_FB,
.size = 0x200,
.clear = gf100_chan_userd_clear,
};
@@ -745,7 +745,7 @@ gk104_fifo_init(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
- if (fifo->func->chan.func->userd->bar == 1)
+ if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB)
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12);
nvkm_wr32(device, 0x002100, 0xffffffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
index 33066c8cdc64..d7f046c03cfd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -70,7 +70,6 @@ gv100_chan_ramfc = {
const struct nvkm_chan_func_userd
gv100_chan_userd = {
- .bar = -1,
.size = 0x200,
.clear = gf100_chan_userd_clear,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index 674faf002b20..c4b8e567d86f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -154,7 +154,7 @@ nv04_chan_ramfc = {
const struct nvkm_chan_func_userd
nv04_chan_userd = {
- .bar = 0,
+ .bar = NVKM_BAR0_PRI,
.base = 0x800000,
.size = 0x010000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index e50a94b6d7f8..084ca5561ee1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -93,7 +93,7 @@ nv40_chan_ramfc = {
static const struct nvkm_chan_func_userd
nv40_chan_userd = {
- .bar = 0,
+ .bar = NVKM_BAR0_PRI,
.base = 0xc00000,
.size = 0x001000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index 954b5f3a7d57..7bf77661157d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -124,7 +124,7 @@ nv50_chan_ramfc = {
const struct nvkm_chan_func_userd
nv50_chan_userd = {
- .bar = 0,
+ .bar = NVKM_BAR0_PRI,
.base = 0xc00000,
.size = 0x002000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index a0f3277605a5..5e81ae195329 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -6,6 +6,7 @@
#include <core/enum.h>
struct nvkm_cctx;
struct nvkm_cgrp;
+struct nvkm_chan;
struct nvkm_engn;
struct nvkm_memory;
struct nvkm_runl;
@@ -195,6 +196,7 @@ extern const struct nvkm_chan_func_ramfc gv100_chan_ramfc;
void tu102_fifo_intr_ctxsw_timeout_info(struct nvkm_engn *, u32 info);
extern const struct nvkm_fifo_func_mmu_fault tu102_fifo_mmu_fault;
+u32 tu102_chan_doorbell_handle(struct nvkm_chan *);
int ga100_fifo_runl_ctor(struct nvkm_fifo *);
int ga100_fifo_nonstall_ctor(struct nvkm_fifo *);
@@ -206,6 +208,8 @@ extern const struct nvkm_engn_func ga100_engn_ce;
extern const struct nvkm_cgrp_func ga100_cgrp;
extern const struct nvkm_chan_func ga100_chan;
+u32 gb202_chan_doorbell_handle(struct nvkm_chan *);
+
int nvkm_uchan_new(struct nvkm_fifo *, struct nvkm_cgrp *, const struct nvkm_oclass *,
void *argv, u32 argc, struct nvkm_object **);
int nvkm_ucgrp_new(struct nvkm_fifo *, const struct nvkm_oclass *, void *argv, u32 argc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
index 1d39a6840a40..c5a03298e88c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -31,7 +31,7 @@
#include <nvif/class.h>
-static u32
+u32
tu102_chan_doorbell_handle(struct nvkm_chan *chan)
{
return (chan->cgrp->runl->id << 16) | chan->id;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
index 9e56bcc166ed..52420a1edca5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
@@ -258,7 +258,7 @@ nvkm_uchan_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_chan *chan = nvkm_uchan(object)->chan;
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
- if (chan->func->userd->bar < 0)
+ if (!chan->func->userd->bar)
return -ENOSYS;
*type = NVKM_OBJECT_MAP_IO;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 1555f8c40b4f..b5418f05ccd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -41,9 +41,6 @@ nvkm-y += nvkm/engine/gr/gp10b.o
nvkm-y += nvkm/engine/gr/gv100.o
nvkm-y += nvkm/engine/gr/tu102.o
nvkm-y += nvkm/engine/gr/ga102.o
-nvkm-y += nvkm/engine/gr/ad102.o
-
-nvkm-y += nvkm/engine/gr/r535.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
index d285c597aff9..2b51f1d0c281 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
@@ -352,7 +352,7 @@ int
ga102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_gr_new(&ga102_gr, device, type, inst, pgr);
+ return -ENODEV;
return gf100_gr_new_(ga102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index b0e0c9305034..54f686ba39ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -445,6 +445,4 @@ void gp108_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gr **);
-int r535_gr_new(const struct gf100_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index 02a8c62a0a32..13407fafe947 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -297,7 +297,7 @@ nv20_gr_init(struct nvkm_gr *base)
nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
/* begin RAM config */
- vramsz = device->func->resource_size(device, 1) - 1;
+ vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1;
nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index a5e1f02791b4..b609b0150ba1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -386,7 +386,7 @@ nv40_gr_init(struct nvkm_gr *base)
}
/* begin RAM config */
- vramsz = device->func->resource_size(device, 1) - 1;
+ vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1;
switch (device->chipset) {
case 0x40:
nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c
deleted file mode 100644
index f4bed3eb1ec2..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c
+++ /dev/null
@@ -1,508 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "gf100.h"
-
-#include <core/memory.h>
-#include <subdev/gsp.h>
-#include <subdev/mmu/vmm.h>
-#include <engine/fifo/priv.h>
-
-#include <nvif/if900d.h>
-
-#include <nvhw/drf.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
-
-#define r535_gr(p) container_of((p), struct r535_gr, base)
-
-#define R515_GR_MAX_CTXBUFS 9
-
-struct r535_gr {
- struct nvkm_gr base;
-
- struct {
- u16 bufferId;
- u32 size;
- u8 page;
- u8 align;
- bool global;
- bool init;
- bool ro;
- } ctxbuf[R515_GR_MAX_CTXBUFS];
- int ctxbuf_nr;
-
- struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
-};
-
-struct r535_gr_chan {
- struct nvkm_object object;
- struct r535_gr *gr;
-
- struct nvkm_vmm *vmm;
- struct nvkm_chan *chan;
-
- struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
- struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
-};
-
-struct r535_gr_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_gr_obj_dtor(struct nvkm_object *object)
-{
- struct r535_gr_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_gr_obj = {
- .dtor = r535_gr_obj_dtor,
-};
-
-static int
-r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
- struct r535_gr_obj *obj;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0,
- &obj->rm);
-}
-
-static void *
-r535_gr_chan_dtor(struct nvkm_object *object)
-{
- struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
- struct r535_gr *gr = grc->gr;
-
- for (int i = 0; i < gr->ctxbuf_nr; i++) {
- nvkm_vmm_put(grc->vmm, &grc->vma[i]);
- nvkm_memory_unref(&grc->mem[i]);
- }
-
- nvkm_vmm_unref(&grc->vmm);
- return grc;
-}
-
-static const struct nvkm_object_func
-r535_gr_chan = {
- .dtor = r535_gr_chan_dtor,
-};
-
-static int
-r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
- struct nvkm_memory **pmem, struct nvkm_vma **pvma,
- struct nvkm_gsp_object *chan)
-{
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
- NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
- if (WARN_ON(IS_ERR(ctrl)))
- return PTR_ERR(ctrl);
-
- ctrl->engineType = 1;
- ctrl->hChanClient = vmm->rm.client.object.handle;
- ctrl->hObject = chan->handle;
-
- for (int i = 0; i < gr->ctxbuf_nr; i++) {
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
- &ctrl->promoteEntry[ctrl->entryCount];
- const bool alloc = golden || !gr->ctxbuf[i].global;
- int ret;
-
- entry->bufferId = gr->ctxbuf[i].bufferId;
- entry->bInitialize = gr->ctxbuf[i].init && alloc;
-
- if (alloc) {
- ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
- NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
- gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
- gr->ctxbuf[i].init, &pmem[i]);
- if (WARN_ON(ret))
- return ret;
-
- if (gr->ctxbuf[i].bufferId ==
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
- entry->bNonmapped = 1;
- } else {
- if (gr->ctxbuf[i].bufferId ==
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
- continue;
-
- pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
- }
-
- if (!entry->bNonmapped) {
- struct gf100_vmm_map_v0 args = {
- .priv = 1,
- .ro = gr->ctxbuf[i].ro,
- };
-
- mutex_lock(&vmm->mutex.vmm);
- ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
- nvkm_memory_size(pmem[i]), &pvma[i]);
- mutex_unlock(&vmm->mutex.vmm);
- if (ret)
- return ret;
-
- ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
- if (ret)
- return ret;
-
- entry->gpuVirtAddr = pvma[i]->addr;
- }
-
- if (entry->bInitialize) {
- entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
- entry->size = gr->ctxbuf[i].size;
- entry->physAttr = 4;
- }
-
- nvkm_debug(subdev,
- "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
- entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
- entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
-
- ctrl->entryCount++;
- }
-
- return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
-}
-
-static int
-r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
- struct nvkm_object **pobject)
-{
- struct r535_gr *gr = r535_gr(base);
- struct r535_gr_chan *grc;
- int ret;
-
- if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
- grc->gr = gr;
- grc->vmm = nvkm_vmm_ref(chan->vmm);
- grc->chan = chan;
- *pobject = &grc->object;
-
- ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static u64
-r535_gr_units(struct nvkm_gr *gr)
-{
- struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
-
- return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
-}
-
-static int
-r535_gr_oneinit(struct nvkm_gr *base)
-{
- NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
- struct r535_gr *gr = container_of(base, typeof(*gr), base);
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_gsp *gsp = device->gsp;
- struct nvkm_mmu *mmu = device->mmu;
- struct {
- struct nvkm_memory *inst;
- struct nvkm_vmm *vmm;
- struct nvkm_gsp_object chan;
- struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
- } golden = {};
- int ret;
-
- /* Allocate a channel to use for golden context init. */
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
- if (ret)
- goto done;
-
- ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
- if (ret)
- goto done;
-
- ret = mmu->func->promote_vmm(golden.vmm);
- if (ret)
- goto done;
-
- {
- NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
-
- args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000,
- device->fifo->func->chan.user.oclass,
- sizeof(*args), &golden.chan);
- if (IS_ERR(args)) {
- ret = PTR_ERR(args);
- goto done;
- }
-
- args->gpFifoOffset = 0;
- args->gpFifoEntries = 0x1000 / 8;
- args->flags =
- NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) |
- NVDEF(NVOS04, FLAGS, VPR, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) |
- NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) |
- NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) |
- NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) |
- NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) |
- NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) |
- NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) |
- NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) |
- NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) |
- NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) |
- NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) |
- NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
- args->hVASpace = golden.vmm->rm.object.handle;
- args->engineType = 1;
- args->instanceMem.base = nvkm_memory_addr(golden.inst);
- args->instanceMem.size = 0x1000;
- args->instanceMem.addressSpace = 2;
- args->instanceMem.cacheAttrib = 1;
- args->ramfcMem.base = nvkm_memory_addr(golden.inst);
- args->ramfcMem.size = 0x200;
- args->ramfcMem.addressSpace = 2;
- args->ramfcMem.cacheAttrib = 1;
- args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000;
- args->userdMem.size = 0x200;
- args->userdMem.addressSpace = 2;
- args->userdMem.cacheAttrib = 1;
- args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000;
- args->mthdbufMem.size = 0x5000;
- args->mthdbufMem.addressSpace = 2;
- args->mthdbufMem.cacheAttrib = 1;
- args->internalFlags =
- NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) |
- NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) |
- NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
-
- ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args);
- if (ret)
- goto done;
- }
-
- /* Fetch context buffer info from RM and allocate each of them here to use
- * during golden context init (or later as a global context buffer).
- *
- * Also build the information that'll be used to create channel contexts.
- */
- info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
- NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
- sizeof(*info));
- if (WARN_ON(IS_ERR(info))) {
- ret = PTR_ERR(info);
- goto done;
- }
-
- for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) {
- static const struct {
- u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
- u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
- bool global;
- bool init;
- bool ro;
- } map[] = {
-#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
- .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
- .global = (G), .init = (I), .ro = (R) }
-#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
- /* global init ro */
- _A( GRAPHICS, MAIN, false, true, false),
- _B( PATCH, false, true, false),
- _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false),
- _B( PAGEPOOL, true, false, false),
- _B( ATTRIBUTE_CB, true, false, false),
- _B( RTV_CB_GLOBAL, true, false, false),
- _B( FECS_EVENT, true, true, false),
- _B( PRIV_ACCESS_MAP, true, true, true),
-#undef _B
-#undef _A
- };
- u32 size = info->engineContextBuffersInfo[0].engine[i].size;
- u8 align, page;
- int id;
-
- for (id = 0; id < ARRAY_SIZE(map); id++) {
- if (map[id].id0 == i)
- break;
- }
-
- nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
- size, (id < ARRAY_SIZE(map)) ? "*" : "");
- if (id >= ARRAY_SIZE(map))
- continue;
-
- if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
- size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
-
- if (size >= 1 << 21) page = 21;
- else if (size >= 1 << 16) page = 16;
- else page = 12;
-
- if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
- align = order_base_2(size);
- else
- align = page;
-
- if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
- continue;
-
- gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
- gr->ctxbuf[gr->ctxbuf_nr].size = size;
- gr->ctxbuf[gr->ctxbuf_nr].page = page;
- gr->ctxbuf[gr->ctxbuf_nr].align = align;
- gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global;
- gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init;
- gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro;
- gr->ctxbuf_nr++;
-
- if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
- if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
- continue;
-
- gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
- gr->ctxbuf[gr->ctxbuf_nr].bufferId =
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
- gr->ctxbuf_nr++;
- }
- }
-
- nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
-
- /* Promote golden context to RM. */
- ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
- if (ret)
- goto done;
-
- /* Allocate 3D class on channel to trigger golden context init in RM. */
- {
- int i;
-
- for (i = 0; gr->base.func->sclass[i].ctor; i++) {
- if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) {
- struct nvkm_gsp_object threed;
-
- ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000,
- gr->base.func->sclass[i].oclass, 0,
- &threed);
- if (ret)
- goto done;
-
- nvkm_gsp_rm_free(&threed);
- break;
- }
- }
-
- if (WARN_ON(!gr->base.func->sclass[i].ctor)) {
- ret = -EINVAL;
- goto done;
- }
- }
-
-done:
- nvkm_gsp_rm_free(&golden.chan);
- for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
- nvkm_vmm_put(golden.vmm, &golden.vma[i]);
- nvkm_vmm_unref(&golden.vmm);
- nvkm_memory_unref(&golden.inst);
- return ret;
-
-}
-
-static void *
-r535_gr_dtor(struct nvkm_gr *base)
-{
- struct r535_gr *gr = r535_gr(base);
-
- while (gr->ctxbuf_nr)
- nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
-
- kfree(gr->base.func);
- return gr;
-}
-
-int
-r535_gr_new(const struct gf100_gr_func *hw,
- struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
-{
- struct nvkm_gr_func *rm;
- struct r535_gr *gr;
- int nclass;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_gr_dtor;
- rm->oneinit = r535_gr_oneinit;
- rm->units = r535_gr_units;
- rm->chan_new = r535_gr_chan_new;
-
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_gr_obj_ctor;
- }
-
- if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) {
- kfree(rm);
- return -ENOMEM;
- }
-
- *pgr = &gr->base;
-
- return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index b7a458e9040a..bda8054c6b59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -219,7 +219,7 @@ int
tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_gr_new(&tu102_gr, device, type, inst, pgr);
+ return -ENODEV;
return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
index 2b0e923cb755..37b0cdc760c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -2,8 +2,4 @@
nvkm-y += nvkm/engine/nvdec/base.o
nvkm-y += nvkm/engine/nvdec/gm107.o
nvkm-y += nvkm/engine/nvdec/tu102.o
-nvkm-y += nvkm/engine/nvdec/ga100.o
nvkm-y += nvkm/engine/nvdec/ga102.o
-nvkm-y += nvkm/engine/nvdec/ad102.o
-
-nvkm-y += nvkm/engine/nvdec/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
index 022a9c824304..eea6368adae2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
@@ -23,16 +23,6 @@
#include <subdev/gsp.h>
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga102_nvdec_gsp = {
- .sclass = {
- { -1, -1, NVC7B0_VIDEO_DECODER },
- {}
- }
-};
-
static const struct nvkm_falcon_func
ga102_nvdec_flcn = {
.disable = gm200_flcn_disable,
@@ -67,7 +57,7 @@ ga102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst
struct nvkm_nvdec **pnvdec)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&ga102_nvdec_gsp, device, type, inst, pnvdec);
+ return -ENODEV;
return nvkm_nvdec_new_(ga102_nvdec_fwif, device, type, inst, 0x848000, pnvdec);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index f506ae83bfd7..f8d43e913093 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -20,7 +20,4 @@ extern const struct nvkm_nvdec_fwif gm107_nvdec_fwif[];
int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *,
enum nvkm_subdev_type, int, u32 addr, struct nvkm_nvdec **);
-
-int r535_nvdec_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c
deleted file mode 100644
index 75a24f3e6617..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvdec_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvdec_obj_dtor(struct nvkm_object *object)
-{
- struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvdec_obj = {
- .dtor = r535_nvdec_obj_dtor,
-};
-
-static int
-r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_nvdec_obj *obj;
- NV_BSP_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
- args->engineInstance = oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvdec_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
-
- kfree(nvdec->engine.func);
- return nvdec;
-}
-
-int
-r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec)
-{
- struct nvkm_engine_func *rm;
- int nclass;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_nvdec_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_nvdec_obj_ctor;
- }
-
- if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) {
- kfree(rm);
- return -ENOMEM;
- }
-
- return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
index 808c8e010b9e..fe95b6e22f21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
@@ -23,22 +23,12 @@
#include <subdev/gsp.h>
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-tu102_nvdec = {
- .sclass = {
- { -1, -1, NVC4B0_VIDEO_DECODER },
- {}
- }
-};
-
int
tu102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvdec **pnvdec)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&tu102_nvdec, device, type, inst, pnvdec);
+ return -ENODEV;
return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, 0, pnvdec);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
index 2c1495b730f3..6dcb20d1d156 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
@@ -2,7 +2,3 @@
nvkm-y += nvkm/engine/nvenc/base.o
nvkm-y += nvkm/engine/nvenc/gm107.o
nvkm-y += nvkm/engine/nvenc/tu102.o
-nvkm-y += nvkm/engine/nvenc/ga102.o
-nvkm-y += nvkm/engine/nvenc/ad102.o
-
-nvkm-y += nvkm/engine/nvenc/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c
deleted file mode 100644
index 6463ab8e5871..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga102_nvenc = {
- .sclass = {
- { -1, -1, NVC7B7_VIDEO_ENCODER },
- {}
- }
-};
-
-int
-ga102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvenc **pnvenc)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvenc_new(&ga102_nvenc, device, type, inst, pnvenc);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
index 7917affc6505..b097e3f2867b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
@@ -18,7 +18,4 @@ extern const struct nvkm_nvenc_fwif gm107_nvenc_fwif[];
int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
int, struct nvkm_nvenc **pnvenc);
-
-int r535_nvenc_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c
deleted file mode 100644
index c8a2a9196ce5..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvenc_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvenc_obj_dtor(struct nvkm_object *object)
-{
- struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvenc_obj = {
- .dtor = r535_nvenc_obj_dtor,
-};
-
-static int
-r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_nvenc_obj *obj;
- NV_MSENC_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
- args->engineInstance = oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvenc_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
-
- kfree(nvenc->engine.func);
- return nvenc;
-}
-
-int
-r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc)
-{
- struct nvkm_engine_func *rm;
- int nclass;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_nvenc_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_nvenc_obj_ctor;
- }
-
- if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) {
- kfree(rm);
- return -ENOMEM;
- }
-
- return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
index 933864423bb3..8a436b398749 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
@@ -23,22 +23,12 @@
#include <subdev/gsp.h>
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-tu102_nvenc = {
- .sclass = {
- { -1, -1, NVC4B7_VIDEO_ENCODER },
- {}
- }
-};
-
int
tu102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvenc **pnvenc)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_nvenc_new(&tu102_nvenc, device, type, inst, pnvenc);
+ return -ENODEV;
return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild
deleted file mode 100644
index 1408f664add6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/engine/nvjpg/ga100.o
-nvkm-y += nvkm/engine/nvjpg/ad102.o
-
-nvkm-y += nvkm/engine/nvjpg/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c
deleted file mode 100644
index 62705dc6494c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ad102_nvjpg = {
- .sclass = {
- { -1, -1, NVC9D1_VIDEO_NVJPG },
- {}
- }
-};
-
-int
-ad102_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvjpg_new(&ad102_nvjpg, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c
deleted file mode 100644
index f550eb07da5a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga100_nvjpg = {
- .sclass = {
- { -1, -1, NVC4D1_VIDEO_NVJPG },
- {}
- }
-};
-
-int
-ga100_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvjpg_new(&ga100_nvjpg, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h
deleted file mode 100644
index 1e80cf70033a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_NVJPG_PRIV_H__
-#define __NVKM_NVJPG_PRIV_H__
-#include <engine/nvjpg.h>
-
-int r535_nvjpg_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c
deleted file mode 100644
index 1babddc4eb80..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvjpg_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvjpg_obj_dtor(struct nvkm_object *object)
-{
- struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvjpg_obj = {
- .dtor = r535_nvjpg_obj_dtor,
-};
-
-static int
-r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_nvjpg_obj *obj;
- NV_NVJPG_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
- args->engineInstance = oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvjpg_dtor(struct nvkm_engine *engine)
-{
- kfree(engine->func);
- return engine;
-}
-
-int
-r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
- struct nvkm_engine_func *rm;
- int nclass, ret;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_nvjpg_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_nvjpg_obj_ctor;
- }
-
- ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
- if (ret)
- kfree(rm);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild
deleted file mode 100644
index 99f1713d7e51..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/engine/ofa/ga100.o
-nvkm-y += nvkm/engine/ofa/ga102.o
-nvkm-y += nvkm/engine/ofa/ad102.o
-
-nvkm-y += nvkm/engine/ofa/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c
deleted file mode 100644
index ef474f61a1b5..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga100_ofa = {
- .sclass = {
- { -1, -1, NVC6FA_VIDEO_OFA },
- {}
- }
-};
-
-int
-ga100_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ga100_ofa, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c
deleted file mode 100644
index bea255529993..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga102_ofa = {
- .sclass = {
- { -1, -1, NVC7FA_VIDEO_OFA },
- {}
- }
-};
-
-int
-ga102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ga102_ofa, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h
deleted file mode 100644
index caf29e6bddb4..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_OFA_PRIV_H__
-#define __NVKM_OFA_PRIV_H__
-#include <engine/ofa.h>
-
-int r535_ofa_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c
deleted file mode 100644
index 438dc692eefe..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <subdev/mmu.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_ofa_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_ofa_obj_dtor(struct nvkm_object *object)
-{
- struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_ofa_obj = {
- .dtor = r535_ofa_obj_dtor,
-};
-
-static int
-r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_ofa_obj *obj;
- NV_OFA_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_ofa_dtor(struct nvkm_engine *engine)
-{
- kfree(engine->func);
- return engine;
-}
-
-int
-r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
- struct nvkm_engine_func *rm;
- int nclass, ret;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_ofa_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_ofa_obj_ctor;
- }
-
- ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
- if (ret)
- kfree(rm);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index 4c2f6fc4ef58..c19ea4ea9bd3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -9,6 +9,7 @@ include $(src)/nvkm/subdev/fault/Kbuild
include $(src)/nvkm/subdev/fb/Kbuild
include $(src)/nvkm/subdev/fuse/Kbuild
include $(src)/nvkm/subdev/gpio/Kbuild
+include $(src)/nvkm/subdev/fsp/Kbuild
include $(src)/nvkm/subdev/gsp/Kbuild
include $(src)/nvkm/subdev/i2c/Kbuild
include $(src)/nvkm/subdev/iccsense/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 9754c6872543..8faee3317a74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -7,5 +7,3 @@ nvkm-y += nvkm/subdev/bar/gk20a.o
nvkm-y += nvkm/subdev/bar/gm107.o
nvkm-y += nvkm/subdev/bar/gm20b.o
nvkm-y += nvkm/subdev/bar/tu102.o
-
-nvkm-y += nvkm/subdev/bar/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index 51070b7dda85..e5e60915029c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -82,7 +82,7 @@ gf100_bar_bar2_init(struct nvkm_bar *base)
static int
gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
- struct lock_class_key *key, int bar_nr)
+ struct lock_class_key *key, enum nvkm_bar_id bar_id)
{
struct nvkm_device *device = bar->base.subdev.device;
resource_size_t bar_len;
@@ -93,14 +93,14 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
if (ret)
return ret;
- bar_len = device->func->resource_size(device, bar_nr);
+ bar_len = device->func->resource_size(device, bar_id);
if (!bar_len)
return -ENOMEM;
- if (bar_nr == 3 && bar->bar2_halve)
+ if (bar_id == NVKM_BAR2_INST && bar->bar2_halve)
bar_len >>= 1;
ret = nvkm_vmm_new(device, 0, bar_len, NULL, 0, key,
- (bar_nr == 3) ? "bar2" : "bar1", &bar_vm->vmm);
+ (bar_id == NVKM_BAR2_INST) ? "bar2" : "bar1", &bar_vm->vmm);
if (ret)
return ret;
@@ -110,7 +110,7 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
/*
* Bootstrap page table lookup.
*/
- if (bar_nr == 3) {
+ if (bar_id == NVKM_BAR2_INST) {
ret = nvkm_vmm_boot(bar_vm->vmm);
if (ret)
return ret;
@@ -129,7 +129,7 @@ gf100_bar_oneinit(struct nvkm_bar *base)
/* BAR2 */
if (bar->base.func->bar2.init) {
- ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, 3);
+ ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, NVKM_BAR2_INST);
if (ret)
return ret;
@@ -138,7 +138,7 @@ gf100_bar_oneinit(struct nvkm_bar *base)
}
/* BAR1 */
- ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, 1);
+ ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, NVKM_BAR1_FB);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 27d8a1be43e4..6a881becb02c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -127,7 +127,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR2 */
start = 0x0100000000ULL;
- size = device->func->resource_size(device, 3);
+ size = device->func->resource_size(device, NVKM_BAR2_INST);
if (!size)
return -ENOMEM;
limit = start + size;
@@ -167,7 +167,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR1 */
start = 0x0000000000ULL;
- size = device->func->resource_size(device, 1);
+ size = device->func->resource_size(device, NVKM_BAR1_FB);
if (!size)
return -ENOMEM;
limit = start + size;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
index 6c5bbff12eb4..b918e22df5a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
@@ -47,8 +47,8 @@
static inline struct io_mapping *
fbmem_init(struct nvkm_device *dev)
{
- return io_mapping_create_wc(dev->func->resource_addr(dev, 1),
- dev->func->resource_size(dev, 1));
+ return io_mapping_create_wc(dev->func->resource_addr(dev, NVKM_BAR1_FB),
+ dev->func->resource_size(dev, NVKM_BAR1_FB));
}
static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
index c123e5893d76..cd2fbc0472d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
@@ -50,7 +50,7 @@ nvkm_ufault_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
struct nvkm_device *device = buffer->fault->subdev.device;
*type = NVKM_OBJECT_MAP_IO;
- *addr = device->func->resource_addr(device, 3) + buffer->addr;
+ *addr = device->func->resource_addr(device, NVKM_BAR2_INST) + buffer->addr;
*size = nvkm_memory_size(buffer->mem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index d1611ad3bf81..8d8a5382d1b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -35,6 +35,9 @@ nvkm-y += nvkm/subdev/fb/gv100.o
nvkm-y += nvkm/subdev/fb/tu102.o
nvkm-y += nvkm/subdev/fb/ga100.o
nvkm-y += nvkm/subdev/fb/ga102.o
+nvkm-y += nvkm/subdev/fb/gh100.o
+nvkm-y += nvkm/subdev/fb/gb100.o
+nvkm-y += nvkm/subdev/fb/gb202.o
nvkm-y += nvkm/subdev/fb/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
index 25f82b372bca..2819780050d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -25,7 +25,7 @@
#include <subdev/gsp.h>
#include <engine/nvdec.h>
-static u64
+u64
ga102_fb_vidmem_size(struct nvkm_fb *fb)
{
return (u64)nvkm_rd32(fb->subdev.device, 0x1183a4) << 20;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c
new file mode 100644
index 000000000000..1c78c8853617
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb100/dev_hshub_base.h>
+
+static void
+gb100_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
+{
+ const u32 addr_hi = upper_32_bits(fb->sysmem.flush_page_addr);
+ const u32 addr_lo = lower_32_bits(fb->sysmem.flush_page_addr);
+ const u32 hshub = DRF_LO(NV_PFB_HSHUB0);
+ struct nvkm_device *device = fb->subdev.device;
+
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi);
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo);
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi);
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo);
+}
+
+static const struct nvkm_fb_func
+gb100_fb = {
+ .sysmem.flush_page_init = gb100_fb_sysmem_flush_page_init,
+ .vidmem.size = ga102_fb_vidmem_size,
+};
+
+int
+gb100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return r535_fb_new(&gb100_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c
new file mode 100644
index 000000000000..848505026d02
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb10b/dev_fbhub.h>
+
+static void
+gb202_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ const u64 addr = fb->sysmem.flush_page_addr;
+
+ nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr));
+ nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr));
+}
+
+static const struct nvkm_fb_func
+gb202_fb = {
+ .sysmem.flush_page_init = gb202_fb_sysmem_flush_page_init,
+ .vidmem.size = ga102_fb_vidmem_size,
+};
+
+int
+gb202_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return r535_fb_new(&gb202_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c
new file mode 100644
index 000000000000..2d8c51f882d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_fb.h>
+
+static void
+gh100_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
+{
+ const u64 addr = fb->sysmem.flush_page_addr >> NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT;
+ struct nvkm_device *device = fb->subdev.device;
+
+ nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr));
+ nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr));
+}
+
+static const struct nvkm_fb_func
+gh100_fb = {
+ .sysmem.flush_page_init = gh100_fb_sysmem_flush_page_init,
+ .vidmem.size = ga102_fb_vidmem_size,
+};
+
+int
+gh100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return r535_fb_new(&gh100_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 35c55dfba23d..ebe996503ab2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -98,4 +98,6 @@ int gp102_fb_vpr_scrub(struct nvkm_fb *);
int gv100_fb_init_page(struct nvkm_fb *);
bool tu102_fb_vpr_scrub_required(struct nvkm_fb *);
+
+u64 ga102_fb_vidmem_size(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild
new file mode 100644
index 000000000000..1a9ded3a86f8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+
+nvkm-y += nvkm/subdev/fsp/base.o
+nvkm-y += nvkm/subdev/fsp/gh100.o
+nvkm-y += nvkm/subdev/fsp/gb100.o
+nvkm-y += nvkm/subdev/fsp/gb202.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c
new file mode 100644
index 000000000000..e366a980baa9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+int
+nvkm_fsp_boot_gsp_fmc(struct nvkm_fsp *fsp, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig)
+{
+ return fsp->func->cot.boot_gsp_fmc(fsp, args_addr, rsvd_size, resume,
+ img_addr, hash, pkey, sig);
+}
+
+bool
+nvkm_fsp_verify_gsp_fmc(struct nvkm_fsp *fsp, u32 hash_size, u32 pkey_size, u32 sig_size)
+{
+ return hash_size == fsp->func->cot.size_hash &&
+ pkey_size == fsp->func->cot.size_pkey &&
+ sig_size == fsp->func->cot.size_sig;
+}
+
+static int
+nvkm_fsp_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_fsp *fsp = nvkm_fsp(subdev);
+
+ return fsp->func->wait_secure_boot(fsp);
+}
+
+static void *
+nvkm_fsp_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_fsp *fsp = nvkm_fsp(subdev);
+
+ nvkm_falcon_dtor(&fsp->falcon);
+ return fsp;
+}
+
+static const struct nvkm_falcon_func
+nvkm_fsp_flcn = {
+ .emem_pio = &gp102_flcn_emem_pio,
+};
+
+static const struct nvkm_subdev_func
+nvkm_fsp = {
+ .dtor = nvkm_fsp_dtor,
+ .preinit = nvkm_fsp_preinit,
+};
+
+int
+nvkm_fsp_new_(const struct nvkm_fsp_func *func,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fsp **pfsp)
+{
+ struct nvkm_fsp *fsp;
+
+ fsp = *pfsp = kzalloc(sizeof(*fsp), GFP_KERNEL);
+ if (!fsp)
+ return -ENOMEM;
+
+ fsp->func = func;
+ nvkm_subdev_ctor(&nvkm_fsp, device, type, inst, &fsp->subdev);
+
+ return nvkm_falcon_ctor(&nvkm_fsp_flcn, &fsp->subdev, "fsp", 0x8f2000, &fsp->falcon);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c
new file mode 100644
index 000000000000..e06636bf54b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_fsp_func
+gb100_fsp = {
+ .wait_secure_boot = gh100_fsp_wait_secure_boot,
+ .cot = {
+ .version = 2,
+ .size_hash = 48,
+ .size_pkey = 97,
+ .size_sig = 96,
+ .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc,
+ },
+};
+
+int
+gb100_fsp_new(struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp)
+{
+ return nvkm_fsp_new_(&gb100_fsp, device, type, inst, pfsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c
new file mode 100644
index 000000000000..3438aac6383e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb202/dev_therm.h>
+
+static int
+gb202_fsp_wait_secure_boot(struct nvkm_fsp *fsp)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ unsigned timeout_ms = 4000;
+
+ do {
+ u32 status = NVKM_RD32(device, NV_THERM, I2CS_SCRATCH, FSP_BOOT_COMPLETE_STATUS);
+
+ if (status == NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout_ms--);
+
+ return -ETIMEDOUT;
+}
+
+static const struct nvkm_fsp_func
+gb202_fsp = {
+ .wait_secure_boot = gb202_fsp_wait_secure_boot,
+ .cot = {
+ .version = 2,
+ .size_hash = 48,
+ .size_pkey = 97,
+ .size_sig = 96,
+ .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc,
+ },
+};
+
+int
+gb202_fsp_new(struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp)
+{
+ return nvkm_fsp_new_(&gb202_fsp, device, type, inst, pfsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c
new file mode 100644
index 000000000000..2815be4bf5de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_fsp_pri.h>
+#include <nvhw/ref/gh100/dev_therm.h>
+
+#include <nvrm/nvtypes.h>
+
+#define MCTP_HEADER_VERSION 3:0
+#define MCTP_HEADER_RSVD 7:4
+
+#define MCTP_HEADER_DEID 15:8
+#define MCTP_HEADER_SEID 23:16
+
+#define MCTP_HEADER_TAG 26:24
+#define MCTP_HEADER_TO 27:27
+#define MCTP_HEADER_SEQ 29:28
+#define MCTP_HEADER_EOM 30:30
+#define MCTP_HEADER_SOM 31:31
+
+#define MCTP_MSG_HEADER_TYPE 6:0
+#define MCTP_MSG_HEADER_IC 7:7
+
+#define MCTP_MSG_HEADER_VENDOR_ID 23:8
+#define MCTP_MSG_HEADER_NVDM_TYPE 31:24
+
+#define MCTP_MSG_HEADER_TYPE_VENDOR_PCI 0x7e
+#define MCTP_MSG_HEADER_VENDOR_ID_NV 0x10de
+
+#define NVDM_TYPE_COT 0x14
+#define NVDM_TYPE_FSP_RESPONSE 0x15
+
+#pragma pack(1)
+typedef struct nvdm_payload_cot
+{
+ NvU16 version;
+ NvU16 size;
+ NvU64 gspFmcSysmemOffset;
+ NvU64 frtsSysmemOffset;
+ NvU32 frtsSysmemSize;
+
+ // Note this is an offset from the end of FB
+ NvU64 frtsVidmemOffset;
+ NvU32 frtsVidmemSize;
+
+ // Authentication related fields
+ NvU32 hash384[12];
+ NvU32 publicKey[96];
+ NvU32 signature[96];
+
+ NvU64 gspBootArgsSysmemOffset;
+} NVDM_PAYLOAD_COT;
+#pragma pack()
+
+#pragma pack(1)
+typedef struct
+{
+ NvU32 taskId;
+ NvU32 commandNvdmType;
+ NvU32 errorCode;
+} NVDM_PAYLOAD_COMMAND_RESPONSE;
+#pragma pack()
+
+static u32
+gh100_fsp_poll(struct nvkm_fsp *fsp)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ u32 head, tail;
+
+ head = nvkm_rd32(device, NV_PFSP_MSGQ_HEAD(0));
+ tail = nvkm_rd32(device, NV_PFSP_MSGQ_TAIL(0));
+
+ if (head == tail)
+ return 0;
+
+ return (tail - head) + sizeof(u32); /* TAIL points at last DWORD written. */
+}
+
+static int
+gh100_fsp_recv(struct nvkm_fsp *fsp, u8 *packet, u32 max_packet_size)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ u32 packet_size;
+ int ret;
+
+ packet_size = gh100_fsp_poll(fsp);
+ if (!packet_size || WARN_ON(packet_size % 4 || packet_size > max_packet_size))
+ return -EINVAL;
+
+ ret = nvkm_falcon_pio_rd(&fsp->falcon, 0, EMEM, 0, packet, 0, packet_size);
+ if (ret)
+ return ret;
+
+ nvkm_wr32(device, NV_PFSP_MSGQ_TAIL(0), 0);
+ nvkm_wr32(device, NV_PFSP_MSGQ_HEAD(0), 0);
+
+ return packet_size;
+}
+
+static int
+gh100_fsp_wait(struct nvkm_fsp *fsp)
+{
+ int time = 1000;
+
+ do {
+ if (gh100_fsp_poll(fsp))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ return -ETIMEDOUT;
+}
+
+static int
+gh100_fsp_send(struct nvkm_fsp *fsp, const u8 *packet, u32 packet_size)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ int time = 1000, ret;
+
+ if (WARN_ON(packet_size % sizeof(u32)))
+ return -EINVAL;
+
+ /* Ensure any previously sent message has been consumed. */
+ do {
+ u32 head = nvkm_rd32(device, NV_PFSP_QUEUE_HEAD(0));
+ u32 tail = nvkm_rd32(device, NV_PFSP_QUEUE_TAIL(0));
+
+ if (tail == head)
+ break;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ if (time < 0)
+ return -ETIMEDOUT;
+
+ /* Write message to EMEM. */
+ ret = nvkm_falcon_pio_wr(&fsp->falcon, packet, 0, 0, EMEM, 0, packet_size, 0, false);
+ if (ret)
+ return ret;
+
+ /* Update queue pointers - TAIL points at last DWORD written. */
+ nvkm_wr32(device, NV_PFSP_QUEUE_TAIL(0), packet_size - sizeof(u32));
+ nvkm_wr32(device, NV_PFSP_QUEUE_HEAD(0), 0);
+ return 0;
+}
+
+static int
+gh100_fsp_send_sync(struct nvkm_fsp *fsp, u8 nvdm_type, const u8 *packet, u32 packet_size)
+{
+ struct nvkm_subdev *subdev = &fsp->subdev;
+ struct {
+ u32 mctp_header;
+ u32 nvdm_header;
+ NVDM_PAYLOAD_COMMAND_RESPONSE response;
+ } reply;
+ int ret;
+
+ ret = gh100_fsp_send(fsp, packet, packet_size);
+ if (ret)
+ return ret;
+
+ ret = gh100_fsp_wait(fsp);
+ if (ret)
+ return ret;
+
+ ret = gh100_fsp_recv(fsp, (u8 *)&reply, sizeof(reply));
+ if (ret < 0)
+ return ret;
+
+ if (NVVAL_TEST(reply.mctp_header, MCTP, HEADER, SOM, !=, 1) ||
+ NVVAL_TEST(reply.mctp_header, MCTP, HEADER, EOM, !=, 1)) {
+ nvkm_error(subdev, "unexpected MCTP header in reply: 0x%08x\n", reply.mctp_header);
+ return -EIO;
+ }
+
+ if (NVDEF_TEST(reply.nvdm_header, MCTP, MSG_HEADER, TYPE, !=, VENDOR_PCI) ||
+ NVDEF_TEST(reply.nvdm_header, MCTP, MSG_HEADER, VENDOR_ID, !=, NV) ||
+ NVVAL_TEST(reply.nvdm_header, MCTP, MSG_HEADER, NVDM_TYPE, !=, NVDM_TYPE_FSP_RESPONSE)) {
+ nvkm_error(subdev, "unexpected NVDM header in reply: 0x%08x\n", reply.nvdm_header);
+ return -EIO;
+ }
+
+ if (reply.response.commandNvdmType != nvdm_type) {
+ nvkm_error(subdev, "expected NVDM type 0x%02x in reply, got 0x%02x\n",
+ nvdm_type, reply.response.commandNvdmType);
+ return -EIO;
+ }
+
+ if (reply.response.errorCode) {
+ nvkm_error(subdev, "NVDM command 0x%02x failed with error 0x%08x\n",
+ nvdm_type, reply.response.errorCode);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *fsp, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig)
+{
+ struct {
+ u32 mctp_header;
+ u32 nvdm_header;
+ NVDM_PAYLOAD_COT cot;
+ } msg = {};
+
+ msg.mctp_header = NVVAL(MCTP, HEADER, SOM, 1) |
+ NVVAL(MCTP, HEADER, EOM, 1) |
+ NVVAL(MCTP, HEADER, SEID, 0) |
+ NVVAL(MCTP, HEADER, SEQ, 0);
+
+ msg.nvdm_header = NVDEF(MCTP, MSG_HEADER, TYPE, VENDOR_PCI) |
+ NVDEF(MCTP, MSG_HEADER, VENDOR_ID, NV) |
+ NVVAL(MCTP, MSG_HEADER, NVDM_TYPE, NVDM_TYPE_COT);
+
+ msg.cot.version = fsp->func->cot.version;
+ msg.cot.size = sizeof(msg.cot);
+ msg.cot.gspFmcSysmemOffset = img_addr;
+ if (!resume) {
+ msg.cot.frtsVidmemOffset = ALIGN(rsvd_size, 0x200000);
+ msg.cot.frtsVidmemSize = 0x100000;
+ }
+
+ memcpy(msg.cot.hash384, hash, fsp->func->cot.size_hash);
+ memcpy(msg.cot.publicKey, pkey, fsp->func->cot.size_pkey);
+ memcpy(msg.cot.signature, sig, fsp->func->cot.size_sig);
+
+ msg.cot.gspBootArgsSysmemOffset = args_addr;
+
+ return gh100_fsp_send_sync(fsp, NVDM_TYPE_COT, (const u8 *)&msg, sizeof(msg));
+}
+
+int
+gh100_fsp_wait_secure_boot(struct nvkm_fsp *fsp)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ unsigned timeout_ms = 4000;
+
+ do {
+ u32 status = NVKM_RD32(device, NV_THERM, I2CS_SCRATCH, FSP_BOOT_COMPLETE_STATUS);
+
+ if (status == NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout_ms--);
+
+ return -ETIMEDOUT;
+}
+
+static const struct nvkm_fsp_func
+gh100_fsp = {
+ .wait_secure_boot = gh100_fsp_wait_secure_boot,
+ .cot = {
+ .version = 1,
+ .size_hash = 48,
+ .size_pkey = 384,
+ .size_sig = 384,
+ .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc,
+ },
+};
+
+int
+gh100_fsp_new(struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp)
+{
+ return nvkm_fsp_new_(&gh100_fsp, device, type, inst, pfsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h
new file mode 100644
index 000000000000..f0b2c605c33d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_FSP_PRIV_H__
+#define __NVKM_FSP_PRIV_H__
+#define nvkm_fsp(p) container_of((p), struct nvkm_fsp, subdev)
+#include <subdev/fsp.h>
+
+struct nvkm_fsp_func {
+ int (*wait_secure_boot)(struct nvkm_fsp *);
+
+ struct {
+ u32 version;
+ u32 size_hash;
+ u32 size_pkey;
+ u32 size_sig;
+ int (*boot_gsp_fmc)(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig);
+ } cot;
+};
+
+int nvkm_fsp_new_(const struct nvkm_fsp_func *,
+ struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+
+int gh100_fsp_wait_secure_boot(struct nvkm_fsp *);
+int gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
index 16bf2f1bb780..e9c948b67bbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
@@ -7,6 +7,9 @@ nvkm-y += nvkm/subdev/gsp/tu102.o
nvkm-y += nvkm/subdev/gsp/tu116.o
nvkm-y += nvkm/subdev/gsp/ga100.o
nvkm-y += nvkm/subdev/gsp/ga102.o
+nvkm-y += nvkm/subdev/gsp/gh100.o
nvkm-y += nvkm/subdev/gsp/ad102.o
+nvkm-y += nvkm/subdev/gsp/gb100.o
+nvkm-y += nvkm/subdev/gsp/gb202.o
-nvkm-y += nvkm/subdev/gsp/r535.o
+include $(src)/nvkm/subdev/gsp/rm/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
index c849c6299c52..eb765da0876e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
@@ -22,30 +22,27 @@
#include "priv.h"
static const struct nvkm_gsp_func
-ad102_gsp_r535_113_01 = {
+ad102_gsp = {
.flcn = &ga102_gsp_flcn,
.fwsec = &ga102_gsp_fwsec,
.sig_section = ".fwsignature_ad10x",
- .wpr_heap.os_carveout_size = 20 << 20,
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 84 << 20,
-
.booter.ctor = ga102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = ga102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ad10x_gpu,
};
static struct nvkm_gsp_fwif
ad102_gsps[] = {
- { 0, r535_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true },
+ { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144", true },
+ { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01", true },
{}
};
@@ -55,3 +52,15 @@ ad102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ad102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ad102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad103, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad106, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad107, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(ad102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad103, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad106, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad107, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index da1bebb896f7..d23243a83a4c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -52,7 +52,7 @@ nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_gsp *gsp = nvkm_gsp(subdev);
- if (!gsp->func->fini)
+ if (!gsp->func->fini || !gsp->running)
return 0;
return gsp->func->fini(gsp, suspend);
@@ -80,6 +80,21 @@ nvkm_gsp_oneinit(struct nvkm_subdev *subdev)
return gsp->func->oneinit(gsp);
}
+void
+nvkm_gsp_dtor_fws(struct nvkm_gsp *gsp)
+{
+ nvkm_firmware_put(gsp->fws.fmc);
+ gsp->fws.fmc = NULL;
+ nvkm_firmware_put(gsp->fws.bl);
+ gsp->fws.bl = NULL;
+ nvkm_firmware_put(gsp->fws.booter.unload);
+ gsp->fws.booter.unload = NULL;
+ nvkm_firmware_put(gsp->fws.booter.load);
+ gsp->fws.booter.load = NULL;
+ nvkm_firmware_put(gsp->fws.rm);
+ gsp->fws.rm = NULL;
+}
+
static void *
nvkm_gsp_dtor(struct nvkm_subdev *subdev)
{
@@ -89,6 +104,7 @@ nvkm_gsp_dtor(struct nvkm_subdev *subdev)
gsp->func->dtor(gsp);
nvkm_falcon_dtor(&gsp->falcon);
+ kfree(gsp->rm);
return gsp;
}
@@ -101,6 +117,16 @@ nvkm_gsp = {
};
int
+nvkm_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
+ const struct firmware **pfw)
+{
+ char fwname[64];
+
+ snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
+ return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
+}
+
+int
nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_gsp **pgsp)
{
@@ -116,7 +142,19 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
return PTR_ERR(fwif);
gsp->func = fwif->func;
- gsp->rm = gsp->func->rm;
+
+ if (fwif->rm) {
+ nvkm_info(&gsp->subdev, "RM version: %s\n", fwif->ver);
+
+ gsp->rm = kzalloc(sizeof(*gsp->rm), GFP_KERNEL);
+ if (!gsp->rm)
+ return -ENOMEM;
+
+ gsp->rm->device = device;
+ gsp->rm->gpu = fwif->func->rm.gpu;
+ gsp->rm->wpr = fwif->rm->wpr;
+ gsp->rm->api = fwif->rm->api;
+ }
return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0x110000,
&gsp->falcon);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
index 223f68b532ef..d201e8697226 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
@@ -39,29 +39,27 @@ ga100_gsp_flcn = {
};
static const struct nvkm_gsp_func
-ga100_gsp_r535_113_01 = {
+ga100_gsp = {
.flcn = &ga100_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_ga100",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ga100_gpu,
};
static struct nvkm_gsp_fwif
ga100_gsps[] = {
- { 0, r535_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &ga100_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &ga100_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -72,3 +70,6 @@ ga100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ga100_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ga100, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga100, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
index 4c4b4168a266..917f7e2f6c46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
@@ -150,25 +150,21 @@ ga102_gsp_flcn = {
};
static const struct nvkm_gsp_func
-ga102_gsp_r535_113_01 = {
+ga102_gsp_r535 = {
.flcn = &ga102_gsp_flcn,
.fwsec = &ga102_gsp_fwsec,
.sig_section = ".fwsignature_ga10x",
- .wpr_heap.os_carveout_size = 20 << 20,
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 84 << 20,
-
.booter.ctor = ga102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = ga102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ga1xx_gpu,
};
static const struct nvkm_gsp_func
@@ -178,7 +174,8 @@ ga102_gsp = {
static struct nvkm_gsp_fwif
ga102_gsps[] = {
- { 0, r535_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &ga102_gsp_r535, &r570_rm_ga102, "570.144" },
+ { 0, tu102_gsp_load, &ga102_gsp_r535, &r535_rm_ga102, "535.113.01" },
{ -1, gv100_gsp_nofw, &ga102_gsp },
{}
};
@@ -189,3 +186,15 @@ ga102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ga102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ga102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga103, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga106, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga107, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(ga102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga103, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga106, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga107, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
new file mode 100644
index 000000000000..12a3f2c1ed82
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+gb100_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gb10x",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gb10x_gpu,
+};
+
+static struct nvkm_gsp_fwif
+gb100_gsps[] = {
+ { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144", true },
+ {}
+};
+
+int
+gb100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gb100_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gb100, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb102, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
new file mode 100644
index 000000000000..c1d718172ddf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+gb202_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gb20x",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gb20x_gpu,
+};
+
+static struct nvkm_gsp_fwif
+gb202_gsps[] = {
+ { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144", true },
+ {}
+};
+
+int
+gb202_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gb202_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gb202, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb203, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb205, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb206, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb207, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
new file mode 100644
index 000000000000..ce31e8248807
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <linux/elf.h>
+#include <linux/crc32.h>
+
+#include <subdev/fb.h>
+#include <subdev/fsp.h>
+
+#include <rm/r570/nvrm/gsp.h>
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_falcon_v4.h>
+#include <nvhw/ref/gh100/dev_riscv_pri.h>
+
+int
+gh100_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+ struct nvkm_falcon *falcon = &gsp->falcon;
+ int ret, time = 4000;
+
+ /* Shutdown RM. */
+ ret = r535_gsp_fini(gsp, suspend);
+ if (ret && suspend)
+ return ret;
+
+ /* Wait for RISC-V to halt. */
+ do {
+ u32 data = nvkm_falcon_rd32(falcon, falcon->addr2 + NV_PRISCV_RISCV_CPUCTL);
+
+ if (NVVAL_GET(data, NV_PRISCV, RISCV_CPUCTL, HALTED))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ return -ETIMEDOUT;
+}
+
+static bool
+gh100_gsp_lockdown_released(struct nvkm_gsp *gsp, u32 *mbox0)
+{
+ u32 data;
+
+ /* Wait for GSP access via BAR0 to be allowed. */
+ *mbox0 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX0);
+
+ if (*mbox0 && (*mbox0 & 0xffffff00) == 0xbadf4100)
+ return false;
+
+ /* Check if an error code has been reported. */
+ if (*mbox0) {
+ u32 mbox1 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX1);
+
+ /* Any value that's not GSP_FMC_BOOT_PARAMS addr is an error. */
+ if ((((u64)mbox1 << 32) | *mbox0) != gsp->fmc.args.addr)
+ return true;
+ }
+
+ /* Check if lockdown has been released. */
+ data = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_HWCFG2);
+ return !NVVAL_GET(data, NV_PFALCON, FALCON_HWCFG2, RISCV_BR_PRIV_LOCKDOWN);
+}
+
+int
+gh100_gsp_init(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ const bool resume = gsp->sr.meta.data != NULL;
+ struct nvkm_gsp_mem *meta;
+ GSP_FMC_BOOT_PARAMS *args;
+ int ret, time = 4000;
+ u32 rsvd_size;
+ u32 mbox0;
+
+ if (!resume) {
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*args), &gsp->fmc.args);
+ if (ret)
+ return ret;
+
+ meta = &gsp->wpr_meta;
+ } else {
+ gsp->rm->api->gsp->set_rmargs(gsp, true);
+ meta = &gsp->sr.meta;
+ }
+
+ args = gsp->fmc.args.data;
+
+ args->bootGspRmParams.gspRmDescOffset = meta->addr;
+ args->bootGspRmParams.gspRmDescSize = meta->size;
+ args->bootGspRmParams.target = GSP_DMA_TARGET_COHERENT_SYSTEM;
+ args->bootGspRmParams.bIsGspRmBoot = 1;
+
+ args->gspRmParams.target = GSP_DMA_TARGET_NONCOHERENT_SYSTEM;
+ args->gspRmParams.bootArgsOffset = gsp->libos.addr;
+
+ rsvd_size = gsp->fb.heap.size;
+ if (gsp->rm->wpr->rsvd_size_pmu)
+ rsvd_size = ALIGN(rsvd_size + gsp->rm->wpr->rsvd_size_pmu, 0x200000);
+
+ ret = nvkm_fsp_boot_gsp_fmc(device->fsp, gsp->fmc.args.addr, rsvd_size, resume,
+ gsp->fmc.fw.addr, gsp->fmc.hash, gsp->fmc.pkey, gsp->fmc.sig);
+ if (ret)
+ return ret;
+
+ do {
+ if (gh100_gsp_lockdown_released(gsp, &mbox0))
+ break;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ if (time < 0) {
+ nvkm_error(subdev, "GSP-FMC boot timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mbox0) {
+ nvkm_error(subdev, "GSP-FMC boot failed (mbox: 0x%08x)\n", mbox0);
+ return -EIO;
+ }
+
+ return r535_gsp_init(gsp);
+}
+
+static int
+gh100_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta;
+ int ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta);
+ if (ret)
+ return ret;
+
+ gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
+ gsp->fb.bios.vga_workspace.size = 128 * 1024;
+ gsp->fb.heap.size = gsp->rm->wpr->heap_size_non_wpr;
+
+ meta = gsp->wpr_meta.data;
+
+ meta->magic = GSP_FW_WPR_META_MAGIC;
+ meta->revision = GSP_FW_WPR_META_REVISION;
+
+ meta->sizeOfRadix3Elf = gsp->fw.len;
+ meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
+
+ meta->sizeOfBootloader = gsp->boot.fw.size;
+ meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+ meta->bootloaderCodeOffset = gsp->boot.code_offset;
+ meta->bootloaderDataOffset = gsp->boot.data_offset;
+ meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+ meta->sysmemAddrOfSignature = gsp->sig.addr;
+ meta->sizeOfSignature = gsp->sig.size;
+
+ meta->nonWprHeapSize = gsp->fb.heap.size;
+ meta->gspFwHeapSize = tu102_gsp_wpr_heap_size(gsp);
+ meta->frtsSize = 0x100000;
+ meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+ meta->pmuReservedSize = gsp->rm->wpr->rsvd_size_pmu;
+ return 0;
+}
+
+/* The sh_flags value for the binary blobs in the ELF image */
+#define FMC_SHF_FLAGS (SHF_MASKPROC | SHF_MASKOS | SHF_OS_NONCONFORMING | SHF_ALLOC)
+
+#define ELF_HDR_SIZE ((u8)sizeof(struct elf32_hdr))
+#define ELF_SHDR_SIZE ((u8)sizeof(struct elf32_shdr))
+
+/* The FMC ELF header must be exactly this */
+static const u8 elf_header[] = {
+ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 1, 0, 0, 0, /* e_type, e_machine, e_version */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* e_entry, e_phoff */
+
+ ELF_HDR_SIZE, 0, 0, 0, 0, 0, 0, 0, /* e_shoff, e_flags */
+ ELF_HDR_SIZE, 0, 0, 0, /* e_ehsize, e_phentsize */
+ 0, 0, ELF_SHDR_SIZE, 0, /* e_phnum, e_shentsize */
+
+ 6, 0, 1, 0, /* e_shnum, e_shstrndx */
+};
+
+/**
+ * elf_validate_sections - validate each section in the FMC ELF image
+ * @elf: ELF image
+ * @length: size of the entire ELF image
+ */
+static bool
+elf_validate_sections(const void *elf, size_t length)
+{
+ const struct elf32_hdr *ehdr = elf;
+ const struct elf32_shdr *shdr = elf + ehdr->e_shoff;
+
+ /* The offset of the first section */
+ Elf32_Off section_begin = ehdr->e_shoff + ehdr->e_shnum * ehdr->e_shentsize;
+
+ if (section_begin > length)
+ return false;
+
+ /* The first section header is the null section, so skip it */
+ for (unsigned int i = 1; i < ehdr->e_shnum; i++) {
+ if (i == ehdr->e_shstrndx) {
+ if (shdr[i].sh_type != SHT_STRTAB)
+ return false;
+ if (shdr[i].sh_flags != SHF_STRINGS)
+ return false;
+ } else {
+ if (shdr[i].sh_type != SHT_PROGBITS)
+ return false;
+ if (shdr[i].sh_flags != FMC_SHF_FLAGS)
+ return false;
+ }
+
+ /* Ensure that each section is inside the image */
+ if (shdr[i].sh_offset < section_begin ||
+ (u64)shdr[i].sh_offset + shdr[i].sh_size > length)
+ return false;
+
+ /* Non-zero sh_info is a CRC */
+ if (shdr[i].sh_info) {
+ /* The kernel's CRC32 needs a pre- and post-xor to match standard CRCs */
+ u32 crc32 = crc32_le(~0, elf + shdr[i].sh_offset, shdr[i].sh_size) ^ ~0;
+
+ if (shdr[i].sh_info != crc32)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * elf_section - return a pointer to the data for a given section
+ * @elf: ELF image
+ * @name: section name to search for
+ * @len: pointer to returned length of found section
+ */
+static const void *
+elf_section(const void *elf, const char *name, unsigned int *len)
+{
+ const struct elf32_hdr *ehdr = elf;
+ const struct elf32_shdr *shdr = elf + ehdr->e_shoff;
+ const char *names = elf + shdr[ehdr->e_shstrndx].sh_offset;
+
+ for (unsigned int i = 1; i < ehdr->e_shnum; i++) {
+ if (!strcmp(&names[shdr[i].sh_name], name)) {
+ *len = shdr[i].sh_size;
+ return elf + shdr[i].sh_offset;
+ }
+ }
+
+ return NULL;
+}
+
+int
+gh100_gsp_oneinit(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fsp *fsp = device->fsp;
+ const void *fw = gsp->fws.fmc->data;
+ const void *hash, *sig, *pkey, *img;
+ unsigned int img_len = 0, hash_len = 0, pkey_len = 0, sig_len = 0;
+ int ret;
+
+ if (gsp->fws.fmc->size < ELF_HDR_SIZE ||
+ memcmp(fw, elf_header, sizeof(elf_header)) ||
+ !elf_validate_sections(fw, gsp->fws.fmc->size)) {
+ nvkm_error(subdev, "fmc firmware image is invalid\n");
+ return -ENODATA;
+ }
+
+ hash = elf_section(fw, "hash", &hash_len);
+ sig = elf_section(fw, "signature", &sig_len);
+ pkey = elf_section(fw, "publickey", &pkey_len);
+ img = elf_section(fw, "image", &img_len);
+
+ if (!hash || !sig || !pkey || !img) {
+ nvkm_error(subdev, "fmc firmware image is invalid\n");
+ return -ENODATA;
+ }
+
+ if (!nvkm_fsp_verify_gsp_fmc(fsp, hash_len, pkey_len, sig_len))
+ return -EINVAL;
+
+ /* Load GSP-FMC FW into memory. */
+ ret = nvkm_gsp_mem_ctor(gsp, img_len, &gsp->fmc.fw);
+ if (ret)
+ return ret;
+
+ memcpy(gsp->fmc.fw.data, img, img_len);
+
+ gsp->fmc.hash = kmemdup(hash, hash_len, GFP_KERNEL);
+ gsp->fmc.pkey = kmemdup(pkey, pkey_len, GFP_KERNEL);
+ gsp->fmc.sig = kmemdup(sig, sig_len, GFP_KERNEL);
+ if (!gsp->fmc.hash || !gsp->fmc.pkey || !gsp->fmc.sig)
+ return -ENOMEM;
+
+ ret = r535_gsp_oneinit(gsp);
+ if (ret)
+ return ret;
+
+ return gh100_gsp_wpr_meta_init(gsp);
+}
+
+static const struct nvkm_gsp_func
+gh100_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gh100",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gh100_gpu,
+};
+
+int
+gh100_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
+{
+ int ret;
+
+ ret = tu102_gsp_load_rm(gsp, fwif);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "fmc", fwif->ver, &gsp->fws.fmc);
+
+done:
+ if (ret)
+ nvkm_gsp_dtor_fws(gsp);
+
+ return ret;
+}
+
+static struct nvkm_gsp_fwif
+gh100_gsps[] = {
+ { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144", true },
+ {}
+};
+
+int
+gh100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gh100_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gh100, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 9f4a62375a27..4f14e85fc69e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_GSP_PRIV_H__
#define __NVKM_GSP_PRIV_H__
#include <subdev/gsp.h>
+#include <rm/gpu.h>
enum nvkm_acr_lsf_id;
int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
@@ -11,12 +12,32 @@ struct nvkm_gsp_fwif {
int version;
int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *);
const struct nvkm_gsp_func *func;
+ const struct nvkm_rm_impl *rm;
const char *ver;
bool enable;
};
+int nvkm_gsp_load_fw(struct nvkm_gsp *, const char *name, const char *ver,
+ const struct firmware **);
+void nvkm_gsp_dtor_fws(struct nvkm_gsp *);
+
int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
-int r535_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+
+int tu102_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+int tu102_gsp_load_rm(struct nvkm_gsp *, const struct nvkm_gsp_fwif *);
+
+int gh100_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+
+#define NVKM_GSP_FIRMWARE_BOOTER(chip,vers) \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin")
+
+#define NVKM_GSP_FIRMWARE_FMC(chip,vers) \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/fmc-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin")
struct nvkm_gsp_func {
const struct nvkm_falcon_func *flcn;
@@ -25,12 +46,6 @@ struct nvkm_gsp_func {
char *sig_section;
struct {
- u32 os_carveout_size;
- u32 base_size;
- u64 min_size;
- } wpr_heap;
-
- struct {
int (*ctor)(struct nvkm_gsp *, const char *name, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
} booter;
@@ -41,7 +56,9 @@ struct nvkm_gsp_func {
int (*fini)(struct nvkm_gsp *, bool suspend);
int (*reset)(struct nvkm_gsp *);
- const struct nvkm_gsp_rm *rm;
+ struct {
+ const struct nvkm_rm_gpu *gpu;
+ } rm;
};
extern const struct nvkm_falcon_func tu102_gsp_flcn;
@@ -49,7 +66,10 @@ extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int tu102_gsp_oneinit(struct nvkm_gsp *);
+int tu102_gsp_init(struct nvkm_gsp *);
+int tu102_gsp_fini(struct nvkm_gsp *, bool suspend);
int tu102_gsp_reset(struct nvkm_gsp *);
+u64 tu102_gsp_wpr_heap_size(struct nvkm_gsp *);
extern const struct nvkm_falcon_func ga102_gsp_flcn;
extern const struct nvkm_falcon_fw_func ga102_gsp_fwsec;
@@ -57,11 +77,14 @@ int ga102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int ga102_gsp_reset(struct nvkm_gsp *);
+int gh100_gsp_oneinit(struct nvkm_gsp *);
+int gh100_gsp_init(struct nvkm_gsp *);
+int gh100_gsp_fini(struct nvkm_gsp *, bool suspend);
+
void r535_gsp_dtor(struct nvkm_gsp *);
int r535_gsp_oneinit(struct nvkm_gsp *);
int r535_gsp_init(struct nvkm_gsp *);
int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
-extern const struct nvkm_gsp_rm r535_gsp_rm;
int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild
new file mode 100644
index 000000000000..04037394a2da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+nvkm-y += nvkm/subdev/gsp/rm/client.o
+nvkm-y += nvkm/subdev/gsp/rm/engine.o
+nvkm-y += nvkm/subdev/gsp/rm/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/nvdec.o
+nvkm-y += nvkm/subdev/gsp/rm/nvenc.o
+
+nvkm-y += nvkm/subdev/gsp/rm/tu1xx.o
+nvkm-y += nvkm/subdev/gsp/rm/ga100.o
+nvkm-y += nvkm/subdev/gsp/rm/ga1xx.o
+nvkm-y += nvkm/subdev/gsp/rm/ad10x.o
+nvkm-y += nvkm/subdev/gsp/rm/gh100.o
+nvkm-y += nvkm/subdev/gsp/rm/gb10x.o
+nvkm-y += nvkm/subdev/gsp/rm/gb20x.o
+
+include $(src)/nvkm/subdev/gsp/rm/r535/Kbuild
+include $(src)/nvkm/subdev/gsp/rm/r570/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c
new file mode 100644
index 000000000000..e1ce6355c35f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ad10x_gpu = {
+ .disp.class = {
+ .root = AD102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = AD102_DISP_CORE_CHANNEL_DMA,
+ .wndw = GA102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GA102_DISP_CURSOR,
+ },
+
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_B,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = ADA_A,
+ .compute = ADA_COMPUTE_A,
+ },
+ .nvdec.class = NVC9B0_VIDEO_DECODER,
+ .nvenc.class = NVC9B7_VIDEO_ENCODER,
+ .ofa.class = NVC9FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c
new file mode 100644
index 000000000000..72d3e3ca84c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "rm.h"
+
+void
+nvkm_gsp_client_dtor(struct nvkm_gsp_client *client)
+{
+ const unsigned int id = client->object.handle - NVKM_RM_CLIENT(0);
+ struct nvkm_gsp *gsp = client->gsp;
+
+ if (!gsp)
+ return;
+
+ if (client->object.client)
+ nvkm_gsp_rm_free(&client->object);
+
+ mutex_lock(&gsp->client_id.mutex);
+ idr_remove(&gsp->client_id.idr, id);
+ mutex_unlock(&gsp->client_id.mutex);
+
+ client->gsp = NULL;
+}
+
+int
+nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
+{
+ int id, ret;
+
+ if (WARN_ON(!gsp->rm))
+ return -ENOSYS;
+
+ mutex_lock(&gsp->client_id.mutex);
+ id = idr_alloc(&gsp->client_id.idr, client, 0, NVKM_RM_CLIENT_MASK + 1, GFP_KERNEL);
+ mutex_unlock(&gsp->client_id.mutex);
+ if (id < 0)
+ return id;
+
+ client->gsp = gsp;
+ client->object.client = client;
+ INIT_LIST_HEAD(&client->events);
+
+ ret = gsp->rm->api->client->ctor(client, NVKM_RM_CLIENT(id));
+ if (ret)
+ nvkm_gsp_client_dtor(client);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c
new file mode 100644
index 000000000000..3b0e83b2f57f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include "gpu.h"
+
+#include <core/object.h>
+#include <engine/fifo/chan.h>
+
+struct nvkm_rm_engine {
+ struct nvkm_engine engine;
+
+ struct nvkm_engine_func func;
+};
+
+struct nvkm_rm_engine_obj {
+ struct nvkm_object object;
+ struct nvkm_gsp_object rm;
+};
+
+static void*
+nvkm_rm_engine_obj_dtor(struct nvkm_object *object)
+{
+ struct nvkm_rm_engine_obj *obj = container_of(object, typeof(*obj), object);
+
+ nvkm_gsp_rm_free(&obj->rm);
+ return obj;
+}
+
+static const struct nvkm_object_func
+nvkm_rm_engine_obj = {
+ .dtor = nvkm_rm_engine_obj_dtor,
+};
+
+int
+nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_rm *rm = chan->client->gsp->rm;
+ const int inst = oclass->engine->subdev.inst;
+ const u32 class = oclass->base.oclass;
+ const u32 handle = oclass->handle;
+ struct nvkm_rm_engine_obj *obj;
+ int ret;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ switch (oclass->engine->subdev.type) {
+ case NVKM_ENGINE_CE:
+ ret = rm->api->ce->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_GR:
+ ret = nvkm_gsp_rm_alloc(chan, handle, class, 0, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVDEC:
+ ret = rm->api->nvdec->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVENC:
+ ret = rm->api->nvenc->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVJPG:
+ ret = rm->api->nvjpg->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_OFA:
+ ret = rm->api->ofa->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ default:
+ ret = -EINVAL;
+ WARN_ON(1);
+ break;
+ }
+
+ if (ret) {
+ kfree(obj);
+ return ret;
+ }
+
+ nvkm_object_ctor(&nvkm_rm_engine_obj, oclass, &obj->object);
+ *pobject = &obj->object;
+ return 0;
+}
+
+static int
+nvkm_rm_engine_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+
+ return nvkm_rm_engine_obj_new(&chan->rm.object, chan->id, oclass, pobject);
+}
+
+static void *
+nvkm_rm_engine_dtor(struct nvkm_engine *engine)
+{
+ kfree(engine->func);
+ return engine;
+}
+
+int
+nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *rm,
+ enum nvkm_subdev_type type, int inst,
+ const u32 *class, int nclass, struct nvkm_engine *engine)
+{
+ struct nvkm_engine_func *func;
+
+ func = kzalloc(struct_size(func, sclass, nclass + 1), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ func->dtor = dtor;
+
+ for (int i = 0; i < nclass; i++) {
+ func->sclass[i].oclass = class[i];
+ func->sclass[i].minver = -1;
+ func->sclass[i].maxver = 0;
+ func->sclass[i].ctor = nvkm_rm_engine_obj_ctor;
+ }
+
+ nvkm_engine_ctor(func, rm->device, type, inst, true, engine);
+ return 0;
+}
+
+static int
+nvkm_rm_engine_new_(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst, u32 class,
+ struct nvkm_engine **pengine)
+{
+ struct nvkm_engine *engine;
+ int ret;
+
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_engine_dtor, rm, type, inst, &class, 1, engine);
+ if (ret) {
+ kfree(engine);
+ return ret;
+ }
+
+ *pengine = engine;
+ return 0;
+}
+
+int
+nvkm_rm_engine_new(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst)
+{
+ const struct nvkm_rm_gpu *gpu = rm->gpu;
+ struct nvkm_device *device = rm->device;
+
+ switch (type) {
+ case NVKM_ENGINE_CE:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->ce)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->ce.class, &device->ce[inst]);
+ case NVKM_ENGINE_GR:
+ if (inst != 0)
+ return -ENODEV; /* MiG not supported, just ignore. */
+
+ return nvkm_rm_gr_new(rm);
+ case NVKM_ENGINE_NVDEC:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvdec)))
+ return -EINVAL;
+
+ return nvkm_rm_nvdec_new(rm, inst);
+ case NVKM_ENGINE_NVENC:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvenc)))
+ return -EINVAL;
+
+ return nvkm_rm_nvenc_new(rm, inst);
+ case NVKM_ENGINE_NVJPG:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvjpg)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->nvjpg.class, &device->nvjpg[inst]);
+ case NVKM_ENGINE_OFA:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->ofa)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->ofa.class, &device->ofa[inst]);
+ default:
+ break;
+ }
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h
new file mode 100644
index 000000000000..5b8c9c3901d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_ENGINE_H__
+#define __NVKM_RM_ENGINE_H__
+#include "gpu.h"
+
+int nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *,
+ enum nvkm_subdev_type type, int inst,
+ const u32 *class, int nclass, struct nvkm_engine *);
+int nvkm_rm_engine_new(struct nvkm_rm *, enum nvkm_subdev_type, int inst);
+
+int nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *,
+ struct nvkm_object **);
+
+int nvkm_rm_gr_new(struct nvkm_rm *);
+int nvkm_rm_nvdec_new(struct nvkm_rm *, int inst);
+int nvkm_rm_nvenc_new(struct nvkm_rm *, int inst);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c
new file mode 100644
index 000000000000..a48c6134075d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ga100_gpu = {
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = AMPERE_A,
+ .compute = AMPERE_COMPUTE_A,
+ },
+ .nvdec.class = NVC6B0_VIDEO_DECODER,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c
new file mode 100644
index 000000000000..50536ad7f85d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ga1xx_gpu = {
+ .disp.class = {
+ .root = GA102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = GA102_DISP_CORE_CHANNEL_DMA,
+ .wndw = GA102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GA102_DISP_CURSOR,
+ },
+
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_B,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = AMPERE_B,
+ .compute = AMPERE_COMPUTE_B,
+ },
+ .nvdec.class = NVC7B0_VIDEO_DECODER,
+ .nvenc.class = NVC7B7_VIDEO_ENCODER,
+ .ofa.class = NVC7FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c
new file mode 100644
index 000000000000..2f517dcd721a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gb10x_gpu = {
+ .usermode.class = HOPPER_USERMODE_A,
+
+ .fifo.chan = {
+ .class = BLACKWELL_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = BLACKWELL_DMA_COPY_A,
+ .gr.class = {
+ .i2m = BLACKWELL_INLINE_TO_MEMORY_A,
+ .twod = FERMI_TWOD_A,
+ .threed = BLACKWELL_A,
+ .compute = BLACKWELL_COMPUTE_A,
+ },
+ .nvdec.class = NVCDB0_VIDEO_DECODER,
+ .nvjpg.class = NVCDD1_VIDEO_NVJPG,
+ .ofa.class = NVCDFA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c
new file mode 100644
index 000000000000..950471d9996e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/ce/priv.h>
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gb20x_gpu = {
+ .disp.class = {
+ .root = GB202_DISP,
+ .caps = GB202_DISP_CAPS,
+ .core = GB202_DISP_CORE_CHANNEL_DMA,
+ .wndw = GB202_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GB202_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GB202_DISP_CURSOR,
+ },
+
+ .usermode.class = BLACKWELL_USERMODE_A,
+
+ .fifo.chan = {
+ .class = BLACKWELL_CHANNEL_GPFIFO_B,
+ .doorbell_handle = gb202_chan_doorbell_handle,
+ },
+
+ .ce = {
+ .class = BLACKWELL_DMA_COPY_B,
+ .grce_mask = gb202_ce_grce_mask,
+ },
+ .gr.class = {
+ .i2m = BLACKWELL_INLINE_TO_MEMORY_A,
+ .twod = FERMI_TWOD_A,
+ .threed = BLACKWELL_B,
+ .compute = BLACKWELL_COMPUTE_B,
+ },
+ .nvdec.class = NVCFB0_VIDEO_DECODER,
+ .nvenc.class = NVCFB7_VIDEO_ENCODER,
+ .nvjpg.class = NVCFD1_VIDEO_NVJPG,
+ .ofa.class = NVCFFA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c
new file mode 100644
index 000000000000..49e2c54e1aa8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gh100_gpu = {
+ .usermode.class = HOPPER_USERMODE_A,
+
+ .fifo.chan = {
+ .class = HOPPER_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = HOPPER_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = HOPPER_A,
+ .compute = HOPPER_COMPUTE_A,
+ },
+ .nvdec.class = NVB8B0_VIDEO_DECODER,
+ .nvjpg.class = NVB8D1_VIDEO_NVJPG,
+ .ofa.class = NVB8FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h
new file mode 100644
index 000000000000..46a6325641b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_GPU_H__
+#define __NVKM_RM_GPU_H__
+#include "rm.h"
+
+struct nvkm_rm_gpu {
+ struct {
+ struct {
+ u32 root;
+ u32 caps;
+ u32 core;
+ u32 wndw;
+ u32 wimm;
+ u32 curs;
+ } class;
+ } disp;
+
+ struct {
+ u32 class;
+ } usermode;
+
+ struct {
+ struct {
+ u32 class;
+ u32 (*doorbell_handle)(struct nvkm_chan *);
+ } chan;
+ } fifo;
+
+ struct {
+ u32 class;
+ u32 (*grce_mask)(struct nvkm_device *);
+ } ce;
+
+ struct {
+ struct {
+ u32 i2m;
+ u32 twod;
+ u32 threed;
+ u32 compute;
+ } class;
+ } gr;
+
+ struct {
+ u32 class;
+ } nvdec;
+
+ struct {
+ u32 class;
+ } nvenc;
+
+ struct {
+ u32 class;
+ } nvjpg;
+
+ struct {
+ u32 class;
+ } ofa;
+};
+
+extern const struct nvkm_rm_gpu tu1xx_gpu;
+extern const struct nvkm_rm_gpu ga100_gpu;
+extern const struct nvkm_rm_gpu ga1xx_gpu;
+extern const struct nvkm_rm_gpu ad10x_gpu;
+extern const struct nvkm_rm_gpu gh100_gpu;
+extern const struct nvkm_rm_gpu gb10x_gpu;
+extern const struct nvkm_rm_gpu gb20x_gpu;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c
new file mode 100644
index 000000000000..f40b8fcc2bcb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gr.h"
+
+#include <engine/fifo.h>
+#include <engine/gr/priv.h>
+
+static int
+nvkm_rm_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
+
+ return nvkm_rm_engine_obj_new(&chan->chan->rm.object, chan->chan->id, oclass, pobject);
+}
+
+static int
+nvkm_rm_gr_fini(struct nvkm_gr *base, bool suspend)
+{
+ struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm;
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+
+ if (rm->api->gr->scrubber.fini)
+ rm->api->gr->scrubber.fini(gr);
+
+ return 0;
+}
+
+static int
+nvkm_rm_gr_init(struct nvkm_gr *base)
+{
+ struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm;
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+ int ret;
+
+ if (rm->api->gr->scrubber.init) {
+ ret = rm->api->gr->scrubber.init(gr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nvkm_rm_gr_new(struct nvkm_rm *rm)
+{
+ const u32 classes[] = {
+ rm->gpu->gr.class.i2m,
+ rm->gpu->gr.class.twod,
+ rm->gpu->gr.class.threed,
+ rm->gpu->gr.class.compute,
+ };
+ struct nvkm_gr_func *func;
+ struct r535_gr *gr;
+
+ func = kzalloc(struct_size(func, sclass, ARRAY_SIZE(classes) + 1), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ func->dtor = r535_gr_dtor;
+ func->oneinit = r535_gr_oneinit;
+ func->init = nvkm_rm_gr_init;
+ func->fini = nvkm_rm_gr_fini;
+ func->units = r535_gr_units;
+ func->chan_new = r535_gr_chan_new;
+
+ for (int i = 0; i < ARRAY_SIZE(classes); i++) {
+ func->sclass[i].oclass = classes[i];
+ func->sclass[i].minver = -1;
+ func->sclass[i].maxver = 0;
+ func->sclass[i].ctor = nvkm_rm_gr_obj_ctor;
+ }
+
+ gr = kzalloc(sizeof(*gr), GFP_KERNEL);
+ if (!gr) {
+ kfree(func);
+ return -ENOMEM;
+ }
+
+ nvkm_gr_ctor(func, rm->device, NVKM_ENGINE_GR, 0, true, &gr->base);
+ gr->scrubber.chid = -1;
+ rm->device->gr = &gr->base;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h
new file mode 100644
index 000000000000..24980f23aab9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_RM_GR_H__
+#define __NVKM_RM_GR_H__
+#include "engine.h"
+
+#include <core/object.h>
+#include <engine/gr.h>
+
+#define R515_GR_MAX_CTXBUFS 9
+
+struct r535_gr_chan {
+ struct nvkm_object object;
+ struct r535_gr *gr;
+
+ struct nvkm_vmm *vmm;
+ struct nvkm_chan *chan;
+
+ struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+};
+
+struct r535_gr {
+ struct nvkm_gr base;
+
+ struct {
+ u16 bufferId;
+ u32 size;
+ u8 page;
+ u8 align;
+ bool global;
+ bool init;
+ bool ro;
+ } ctxbuf[R515_GR_MAX_CTXBUFS];
+ int ctxbuf_nr;
+
+ struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
+
+ struct {
+ int chid;
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+ struct nvkm_gsp_object chan;
+ struct nvkm_gsp_object threed;
+ struct {
+ struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+ } ctxbuf;
+ bool enabled;
+ } scrubber;
+};
+
+struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+void r535_gr_get_ctxbuf_info(struct r535_gr *, int i,
+ struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h
new file mode 100644
index 000000000000..3bdb5ad320d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_HANDLES_H__
+#define __NVKM_RM_HANDLES_H__
+
+/* RMAPI handles for various objects allocated from GSP-RM with RM_ALLOC. */
+
+#define NVKM_RM_CLIENT(id) (0xc1d00000 | (id))
+#define NVKM_RM_CLIENT_MASK 0x0000ffff
+#define NVKM_RM_DEVICE 0xde1d0000
+#define NVKM_RM_SUBDEVICE 0x5d1d0000
+#define NVKM_RM_DISP 0x00730000
+#define NVKM_RM_VASPACE 0x90f10000
+#define NVKM_RM_CHAN(chid) (0xf1f00000 | (chid))
+#define NVKM_RM_THREED 0x97000000
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c
new file mode 100644
index 000000000000..d9fbfc377864
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include <engine/nvdec.h>
+
+static void *
+nvkm_rm_nvdec_dtor(struct nvkm_engine *engine)
+{
+ return container_of(engine, struct nvkm_nvdec, engine);
+}
+
+int
+nvkm_rm_nvdec_new(struct nvkm_rm *rm, int inst)
+{
+ struct nvkm_nvdec *nvdec;
+ int ret;
+
+ nvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL);
+ if (!nvdec)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_nvdec_dtor, rm, NVKM_ENGINE_NVDEC, inst,
+ &rm->gpu->nvdec.class, 1, &nvdec->engine);
+ if (ret) {
+ kfree(nvdec);
+ return ret;
+ }
+
+ rm->device->nvdec[inst] = nvdec;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c
new file mode 100644
index 000000000000..6dfa7b789e07
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include <engine/nvenc.h>
+
+static void *
+nvkm_rm_nvenc_dtor(struct nvkm_engine *engine)
+{
+ return container_of(engine, struct nvkm_nvenc, engine);
+}
+
+int
+nvkm_rm_nvenc_new(struct nvkm_rm *rm, int inst)
+{
+ struct nvkm_nvenc *nvenc;
+ int ret;
+
+ nvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL);
+ if (!nvenc)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_nvenc_dtor, rm, NVKM_ENGINE_NVENC, inst,
+ &rm->gpu->nvenc.class, 1, &nvenc->engine);
+ if (ret) {
+ kfree(nvenc);
+ return ret;
+ }
+
+ rm->device->nvenc[inst] = nvenc;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild
new file mode 100644
index 000000000000..a5f6b2abfd33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/rm.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/gsp.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/rpc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ctrl.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/alloc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/client.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/device.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/bar.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/fbsr.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/vmm.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/disp.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/fifo.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ce.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvdec.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvenc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvjpg.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ofa.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c
new file mode 100644
index 000000000000..46e3a29f2ad7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/alloc.h"
+#include "nvrm/rpcfn.h"
+
+static int
+r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_free_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
+ client->object.handle, object->handle);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
+ if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+ return -EIO;
+
+ rpc->params.hRoot = client->object.handle;
+ rpc->params.hObjectParent = 0;
+ rpc->params.hObjectOld = object->handle;
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
+}
+
+static void
+r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
+
+ nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
+ struct nvkm_gsp *gsp = object->client->gsp;
+ void *ret = NULL;
+
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, sizeof(*rpc));
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->status) {
+ ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
+ if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
+ nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
+ }
+
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass,
+ u32 params_size)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_gsp_rm_alloc_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n",
+ client->object.handle, object->parent->handle,
+ object->handle);
+
+ nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass,
+ params_size);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC,
+ sizeof(*rpc) + params_size);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client->object.handle;
+ rpc->hParent = object->parent->handle;
+ rpc->hObject = object->handle;
+ rpc->hClass = oclass;
+ rpc->status = 0;
+ rpc->paramsSize = params_size;
+ return rpc->params;
+}
+
+const struct nvkm_rm_api_alloc
+r535_alloc = {
+ .get = r535_gsp_rpc_rm_alloc_get,
+ .push = r535_gsp_rpc_rm_alloc_push,
+ .done = r535_gsp_rpc_rm_alloc_done,
+ .free = r535_gsp_rpc_rm_free,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c
index 3a30bea30e36..d06bf95b9a4a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c
@@ -19,7 +19,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "gf100.h"
+#include <subdev/bar/gf100.h>
#include <core/mm.h>
#include <subdev/fb.h>
@@ -27,14 +27,20 @@
#include <subdev/instmem.h>
#include <subdev/mmu/vmm.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+#include "nvrm/bar.h"
+#include "nvrm/rpcfn.h"
static void
r535_bar_flush(struct nvkm_bar *bar)
{
+ /* Use NV_PFLUSH in resume path - needed on R570 to flush writes before
+ * BAR2 page tables have been restored.
+ */
+ if (unlikely(!bar->bar2)) {
+ g84_bar_flush(bar);
+ return;
+ }
+
ioread32_native(bar->flushBAR2);
}
@@ -44,7 +50,7 @@ r535_bar_bar2_wait(struct nvkm_bar *base)
}
static int
-r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
+r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u8 page_shift, u64 pdbe)
{
rpc_update_bar_pde_v15_00 *rpc;
@@ -53,21 +59,22 @@ r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
return -EIO;
rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2;
- rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */
- rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu!
+ rpc->info.entryValue = pdbe;
+ rpc->info.entryLevelShift = page_shift;
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
}
static void
r535_bar_bar2_fini(struct nvkm_bar *bar)
{
+ struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
struct nvkm_gsp *gsp = bar->subdev.device->gsp;
bar->flushBAR2 = bar->flushBAR2PhysMode;
nvkm_done(bar->flushFBZero);
- WARN_ON(r535_bar_bar2_update_pde(gsp, 0));
+ WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, 0));
}
static void
@@ -76,8 +83,18 @@ r535_bar_bar2_init(struct nvkm_bar *bar)
struct nvkm_device *device = bar->subdev.device;
struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
struct nvkm_gsp *gsp = device->gsp;
-
- WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr));
+ struct nvkm_memory *pdb = vmm->pd->pt[0]->memory;
+ u32 pdb_offset = vmm->pd->pt[0]->base;
+ u32 pdbe_lo, pdbe_hi;
+ u64 pdbe;
+
+ nvkm_kmap(pdb);
+ pdbe_lo = nvkm_ro32(pdb, pdb_offset + 0);
+ pdbe_hi = nvkm_ro32(pdb, pdb_offset + 4);
+ pdbe = ((u64)pdbe_hi << 32) | pdbe_lo;
+ nvkm_done(pdb);
+
+ WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, pdbe));
vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb;
if (!bar->flushFBZero) {
@@ -174,7 +191,7 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
}
*pbar = bar;
- bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
+ bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, NVKM_BAR2_INST), PAGE_SIZE);
if (!bar->flushBAR2PhysMode)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c
new file mode 100644
index 000000000000..2d1ce9db2dcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/ce.h"
+#include "nvrm/engine.h"
+
+static int
+r535_ce_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *ce)
+{
+ NVC0B5_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ce);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->version = 1;
+ args->engineType = NV2080_ENGINE_TYPE_COPY0 + inst;
+
+ return nvkm_gsp_rm_alloc_wr(ce, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_ce = {
+ .alloc = r535_ce_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c
index 932934227b9c..ec71f683e609 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c
@@ -19,26 +19,27 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
+#include <rm/rm.h>
-#include <subdev/gsp.h>
+#include "nvrm/client.h"
-#include <nvif/class.h>
+static int
+r535_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle)
+{
+ NV0000_ALLOC_PARAMETERS *args;
-static const struct nvkm_engine_func
-ga100_nvdec = {
- .sclass = {
- { -1, -1, NVC6B0_VIDEO_DECODER },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args),
+ &client->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
-int
-ga100_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvdec **pnvdec)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&ga100_nvdec, device, type, inst, pnvdec);
+ args->hClient = client->object.handle;
+ args->processID = ~0;
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(&client->object, args);
}
+
+const struct nvkm_rm_api_client
+r535_client = {
+ .ctor = r535_gsp_client_ctor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c
new file mode 100644
index 000000000000..70b9ee911c5e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/ctrl.h"
+#include "nvrm/rpcfn.h"
+
+static void
+r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc);
+
+ if (!params)
+ return;
+ nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static int
+r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc);
+ struct nvkm_gsp *gsp = object->client->gsp;
+ int ret = 0;
+
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, repc);
+ if (IS_ERR_OR_NULL(rpc)) {
+ *params = NULL;
+ return PTR_ERR(rpc);
+ }
+
+ if (rpc->status) {
+ ret = r535_rpc_status_to_errno(rpc->status);
+ if (ret != -EAGAIN && ret != -EBUSY)
+ nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
+ object->client->object.handle, object->handle, rpc->cmd, rpc->status);
+ }
+
+ if (repc)
+ *params = rpc->params;
+ else
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_gsp_rm_control_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n",
+ client->object.handle, object->handle, cmd, params_size);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
+ sizeof(*rpc) + params_size);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client->object.handle;
+ rpc->hObject = object->handle;
+ rpc->cmd = cmd;
+ rpc->status = 0;
+ rpc->paramsSize = params_size;
+ return rpc->params;
+}
+
+const struct nvkm_rm_api_ctrl
+r535_ctrl = {
+ .get = r535_gsp_rpc_rm_ctrl_get,
+ .push = r535_gsp_rpc_rm_ctrl_push,
+ .done = r535_gsp_rpc_rm_ctrl_done,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c
new file mode 100644
index 000000000000..f830e12a8f6e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/device.h"
+#include "nvrm/event.h"
+
+static void
+r535_gsp_event_dtor(struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_device *device = event->device;
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+
+ mutex_lock(&gsp->client_id.mutex);
+ if (event->func) {
+ list_del(&event->head);
+ event->func = NULL;
+ }
+ mutex_unlock(&gsp->client_id.mutex);
+
+ nvkm_gsp_rm_free(&event->object);
+ event->device = NULL;
+}
+
+static int
+r535_gsp_device_event_get(struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_device *device = event->device;
+ NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
+ NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->event = event->id;
+ ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
+ return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
+}
+
+static int
+r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
+ nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+ NV0005_ALLOC_PARAMETERS *args;
+ int ret;
+
+ args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
+ NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
+ &event->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hParentClient = client->object.handle;
+ args->hSrcResource = 0;
+ args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
+ args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
+ args->data = NULL;
+
+ ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
+ if (ret)
+ return ret;
+
+ event->device = device;
+ event->id = id;
+
+ ret = r535_gsp_device_event_get(event);
+ if (ret) {
+ nvkm_gsp_event_dtor(event);
+ return ret;
+ }
+
+ mutex_lock(&gsp->client_id.mutex);
+ event->func = func;
+ list_add(&event->head, &client->events);
+ mutex_unlock(&gsp->client_id.mutex);
+ return 0;
+}
+
+static void
+r535_gsp_device_dtor(struct nvkm_gsp_device *device)
+{
+ nvkm_gsp_rm_free(&device->subdevice);
+ nvkm_gsp_rm_free(&device->object);
+}
+
+static int
+r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
+{
+ NV2080_ALLOC_PARAMETERS *args;
+
+ return nvkm_gsp_rm_alloc(&device->object, NVKM_RM_SUBDEVICE, NV20_SUBDEVICE_0,
+ sizeof(*args), &device->subdevice);
+}
+
+static int
+r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+ NV0080_ALLOC_PARAMETERS *args;
+ int ret;
+
+ args = nvkm_gsp_rm_alloc_get(&client->object, NVKM_RM_DEVICE, NV01_DEVICE_0, sizeof(*args),
+ &device->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClientShare = client->object.handle;
+
+ ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
+ if (ret)
+ return ret;
+
+ ret = r535_gsp_subdevice_ctor(device);
+ if (ret)
+ nvkm_gsp_rm_free(&device->object);
+
+ return ret;
+}
+
+const struct nvkm_rm_api_device
+r535_device = {
+ .ctor = r535_gsp_device_ctor,
+ .dtor = r535_gsp_device_dtor,
+ .event.ctor = r535_gsp_device_event_ctor,
+ .event.dtor = r535_gsp_event_dtor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
index 99110ab2f44d..7e9e2d3564da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
@@ -19,13 +19,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
-#include "chan.h"
-#include "conn.h"
-#include "dp.h"
-#include "head.h"
-#include "ior.h"
-#include "outp.h"
+#include <engine/disp/priv.h>
+#include <engine/disp/chan.h>
+#include <engine/disp/conn.h>
+#include <engine/disp/dp.h>
+#include <engine/disp/head.h>
+#include <engine/disp/ior.h>
+#include <engine/disp/outp.h>
#include <core/ramht.h>
#include <subdev/bios.h>
@@ -34,19 +34,11 @@
#include <subdev/mmu.h>
#include <subdev/vfn.h>
+#include <rm/gpu.h>
+
#include <nvhw/drf.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
-#include <nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h>
+#include "nvrm/disp.h"
#include <linux/acpi.h>
@@ -78,9 +70,9 @@ r535_chan_fini(struct nvkm_disp_chan *chan)
}
static int
-r535_chan_push(struct nvkm_disp_chan *chan)
+r535_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory)
{
- struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp;
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
@@ -89,8 +81,8 @@ r535_chan_push(struct nvkm_disp_chan *chan)
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- if (chan->memory) {
- switch (nvkm_memory_target(chan->memory)) {
+ if (memory) {
+ switch (nvkm_memory_target(memory)) {
case NVKM_MEM_TARGET_NCOH:
ctrl->addressSpace = ADDR_SYSMEM;
ctrl->cacheSnoop = 0;
@@ -107,13 +99,13 @@ r535_chan_push(struct nvkm_disp_chan *chan)
return -EINVAL;
}
- ctrl->physicalAddr = nvkm_memory_addr(chan->memory);
- ctrl->limit = nvkm_memory_size(chan->memory) - 1;
+ ctrl->physicalAddr = nvkm_memory_addr(memory);
+ ctrl->limit = nvkm_memory_size(memory) - 1;
}
- ctrl->hclass = chan->object.oclass;
- ctrl->channelInstance = chan->head;
- ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0;
+ ctrl->hclass = oclass;
+ ctrl->channelInstance = inst;
+ ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0;
return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
}
@@ -121,10 +113,11 @@ r535_chan_push(struct nvkm_disp_chan *chan)
static int
r535_curs_init(struct nvkm_disp_chan *chan)
{
+ const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api;
NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args;
int ret;
- ret = r535_chan_push(chan);
+ ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, NULL);
if (ret)
return ret;
@@ -172,25 +165,34 @@ r535_dmac_fini(struct nvkm_disp_chan *chan)
}
static int
-r535_dmac_init(struct nvkm_disp_chan *chan)
+r535_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *dmac)
{
NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
- int ret;
-
- ret = r535_chan_push(chan);
- if (ret)
- return ret;
- args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
- (chan->object.oclass << 16) | chan->head,
- chan->object.oclass, sizeof(*args), &chan->rm.object);
+ args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass,
+ sizeof(*args), dmac);
if (IS_ERR(args))
return PTR_ERR(args);
- args->channelInstance = chan->head;
- args->offset = chan->suspend_put;
+ args->channelInstance = inst;
+ args->offset = put_offset;
- return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+ return nvkm_gsp_rm_alloc_wr(dmac, args);
+}
+
+static int
+r535_dmac_init(struct nvkm_disp_chan *chan)
+{
+ const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api;
+ int ret;
+
+ ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, chan->memory);
+ if (ret)
+ return ret;
+
+ return rmapi->disp->chan.dmac_alloc(chan->disp, chan->object.oclass, chan->head,
+ chan->suspend_put, &chan->rm.object);
}
static int
@@ -260,47 +262,47 @@ r535_core = {
};
static int
-r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
+r535_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval)
{
- struct nvkm_disp *disp = sor->disp;
+ u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS :
+ NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS;
NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+ int ret;
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
- NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS,
- sizeof(*ctrl));
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl));
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- ctrl->displayId = BIT(sor->asy.outp->index);
- ctrl->brightness = lvl;
+ ctrl->displayId = BIT(display_id);
+ ctrl->brightness = *pval;
- return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pval = ctrl->brightness;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
}
static int
-r535_sor_bl_get(struct nvkm_ior *sor)
+r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
{
struct nvkm_disp *disp = sor->disp;
- NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
- int ret, lvl;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
- NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
- sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api;
- ctrl->displayId = BIT(sor->asy.outp->index);
+ return rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, true, &lvl);
+}
- ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return ret;
- }
+static int
+r535_sor_bl_get(struct nvkm_ior *sor)
+{
+ struct nvkm_disp *disp = sor->disp;
+ const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api;
+ int lvl, ret = rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, false, &lvl);
- lvl = ctrl->brightness;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return lvl;
+ return (ret == 0) ? lvl : ret;
}
static const struct nvkm_ior_func_bl
@@ -730,7 +732,7 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda)
}
static int
-r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
+r535_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid)
{
NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
int ret;
@@ -763,7 +765,9 @@ r535_outp_inherit(struct nvkm_outp *outp)
int ret;
list_for_each_entry(head, &disp->heads, head) {
- ret = r535_disp_head_displayid(disp, head->id, &displayid);
+ const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api;
+
+ ret = rmapi->disp->get_active(disp, head->id, &displayid);
if (WARN_ON(ret))
return NULL;
@@ -858,10 +862,9 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp)
}
static int
-r535_outp_detect(struct nvkm_outp *outp)
+r535_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id)
{
NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
- struct nvkm_disp *disp = outp->disp;
int ret;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
@@ -870,23 +873,29 @@ r535_outp_detect(struct nvkm_outp *outp)
return PTR_ERR(ctrl);
ctrl->subDeviceInstance = 0;
- ctrl->displayMask = BIT(outp->index);
+ ctrl->displayMask = BIT(display_id);
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return ret;
- }
+ if (ret == 0 && (ctrl->displayMask & BIT(display_id)))
+ ret = 1;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
- if (ctrl->displayMask & BIT(outp->index)) {
+static int
+r535_outp_detect(struct nvkm_outp *outp)
+{
+ const struct nvkm_rm_api *rmapi = outp->disp->rm.objcom.client->gsp->rm->api;
+ int ret;
+
+ ret = rmapi->disp->get_connect_state(outp->disp, outp->index);
+ if (ret == 1) {
ret = r535_outp_dfp_get_info(outp);
if (ret == 0)
ret = 1;
- } else {
- ret = 0;
}
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return ret;
}
@@ -1029,15 +1038,11 @@ r535_dp_train(struct nvkm_outp *outp, bool retrain)
}
static int
-r535_dp_rates(struct nvkm_outp *outp)
+r535_dp_set_indexed_link_rates(struct nvkm_outp *outp)
{
NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
struct nvkm_disp *disp = outp->disp;
- if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
- !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
- return 0;
-
if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
return -EINVAL;
@@ -1054,6 +1059,18 @@ r535_dp_rates(struct nvkm_outp *outp)
}
static int
+r535_dp_rates(struct nvkm_outp *outp)
+{
+ struct nvkm_rm *rm = outp->disp->rm.objcom.client->gsp->rm;
+
+ if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
+ !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
+ return 0;
+
+ return rm->api->disp->dp.set_indexed_link_rates(outp);
+}
+
+static int
r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
{
struct nvkm_disp *disp = outp->disp;
@@ -1151,6 +1168,49 @@ r535_dp = {
};
static int
+r535_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm)
+{
+ NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->sorIndex = ~0;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+ *plink_bw = 0x06;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+ *plink_bw = 0x0a;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+ *plink_bw = 0x14;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+ *plink_bw = 0x1e;
+ break;
+ default:
+ *plink_bw = 0x00;
+ break;
+ }
+
+ *pmst = ctrl->bIsMultistreamSupported;
+ *pwm = ctrl->bHasIncreasedWatermarkLimits;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize)
{
NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl;
@@ -1194,6 +1254,7 @@ r535_tmds = {
static int
r535_outp_new(struct nvkm_disp *disp, u32 id)
{
+ const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api;
NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
enum nvkm_ior_proto proto;
struct dcb_output dcbE = {};
@@ -1278,43 +1339,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id)
if (ret)
return ret;
} else {
- NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
bool mst, wm;
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
- NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- ctrl->sorIndex = ~0;
-
- ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ ret = rmapi->disp->dp.get_caps(disp, &dcbE.dpconf.link_bw, &mst, &wm);
+ if (ret)
return ret;
- }
-
- switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
- dcbE.dpconf.link_bw = 0x06;
- break;
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
- dcbE.dpconf.link_bw = 0x0a;
- break;
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
- dcbE.dpconf.link_bw = 0x14;
- break;
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
- dcbE.dpconf.link_bw = 0x1e;
- break;
- default:
- dcbE.dpconf.link_bw = 0x00;
- break;
- }
-
- mst = ctrl->bIsMultistreamSupported;
- wm = ctrl->bHasIncreasedWatermarkLimits;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
if (WARN_ON(!dcbE.dpconf.link_bw))
return -EINVAL;
@@ -1441,11 +1470,47 @@ r535_disp_init(struct nvkm_disp *disp)
}
static int
+r535_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->displayMask;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r535_disp_get_static_info(struct nvkm_disp *disp)
+{
+ NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->wndw.mask = ctrl->windowPresentMask;
+ disp->wndw.nr = fls(disp->wndw.mask);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
r535_disp_oneinit(struct nvkm_disp *disp)
{
struct nvkm_device *device = disp->engine.subdev.device;
struct nvkm_gsp *gsp = device->gsp;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl;
+ unsigned long mask;
int ret, i;
/* RAMIN. */
@@ -1476,24 +1541,14 @@ r535_disp_oneinit(struct nvkm_disp *disp)
if (ret)
return ret;
- ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0,
+ ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, NVKM_RM_DISP, NV04_DISPLAY_COMMON, 0,
&disp->rm.objcom);
if (ret)
return ret;
- {
- NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
-
- ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
- NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
- sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- disp->wndw.mask = ctrl->windowPresentMask;
- disp->wndw.nr = fls(disp->wndw.mask);
- nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
- }
+ ret = rmapi->disp->get_static_info(disp);
+ if (ret)
+ return ret;
/* */
{
@@ -1622,25 +1677,14 @@ r535_disp_oneinit(struct nvkm_disp *disp)
return ret;
}
- /* */
- {
- NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
- unsigned long mask;
- int i;
-
- ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
- NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- mask = ctrl->displayMask;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ ret = rmapi->disp->get_supported(disp, &mask);
+ if (ret)
+ return ret;
- for_each_set_bit(i, &mask, 32) {
- ret = r535_outp_new(disp, i);
- if (ret)
- return ret;
- }
+ for_each_set_bit(i, &mask, 32) {
+ ret = r535_outp_new(disp, i);
+ if (ret)
+ return ret;
}
ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event);
@@ -1686,6 +1730,7 @@ int
r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
struct nvkm_disp_func *rm;
int ret;
@@ -1701,20 +1746,26 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
rm->sor.new = r535_sor_new;
rm->ramht_size = hw->ramht_size;
- rm->root = hw->root;
+ rm->root.oclass = gpu->disp.class.root;
- for (int i = 0; hw->user[i].ctor; i++) {
- switch (hw->user[i].base.oclass & 0xff) {
- case 0x73: rm->user[i] = hw->user[i]; break;
- case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break;
- case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break;
- case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break;
- case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break;
- default:
- WARN_ON(1);
- continue;
- }
- }
+ rm->user[0].base.oclass = gpu->disp.class.caps;
+ rm->user[0].ctor = gv100_disp_caps_new;
+
+ rm->user[1].base.oclass = gpu->disp.class.core;
+ rm->user[1].ctor = nvkm_disp_core_new;
+ rm->user[1].chan = &r535_core;
+
+ rm->user[2].base.oclass = gpu->disp.class.wndw;
+ rm->user[2].ctor = nvkm_disp_wndw_new;
+ rm->user[2].chan = &r535_wndw;
+
+ rm->user[3].base.oclass = gpu->disp.class.wimm;
+ rm->user[3].ctor = nvkm_disp_wndw_new;
+ rm->user[3].chan = &r535_wimm;
+
+ rm->user[4].base.oclass = gpu->disp.class.curs;
+ rm->user[4].ctor = nvkm_disp_chan_new;
+ rm->user[4].chan = &r535_curs;
ret = nvkm_disp_new_(rm, device, type, inst, pdisp);
if (ret)
@@ -1723,3 +1774,20 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
mutex_init(&(*pdisp)->super.mutex); //XXX
return ret;
}
+
+const struct nvkm_rm_api_disp
+r535_disp = {
+ .get_static_info = r535_disp_get_static_info,
+ .get_supported = r535_disp_get_supported,
+ .get_connect_state = r535_disp_get_connect_state,
+ .get_active = r535_disp_get_active,
+ .bl_ctrl = r535_bl_ctrl,
+ .dp = {
+ .get_caps = r535_dp_get_caps,
+ .set_indexed_link_rates = r535_dp_set_indexed_link_rates,
+ },
+ .chan = {
+ .set_pushbuf = r535_disp_chan_set_pushbuf,
+ .dmac_alloc = r535_dmac_alloc,
+ }
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c
index 5f3c9c02a4c0..150e22fde2ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c
@@ -19,19 +19,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
-
+#include <subdev/instmem/priv.h>
#include <subdev/gsp.h>
#include <nvhw/drf.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+#include "nvrm/fbsr.h"
+#include "nvrm/rpcfn.h"
struct fbsr_item {
const char *type;
@@ -54,9 +48,9 @@ struct fbsr {
u64 sys_offset;
};
-static int
-fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
- u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
+int
+r535_fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
+ u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
{
struct nvkm_gsp_client *client = device->object.client;
struct nvkm_gsp *gsp = client->gsp;
@@ -105,7 +99,7 @@ fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target
rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
}
- ret = nvkm_gsp_rpc_wr(gsp, rpc, true);
+ ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL);
if (ret)
return ret;
@@ -123,8 +117,8 @@ fbsr_send(struct fbsr *fbsr, struct fbsr_item *item)
struct nvkm_gsp_object memlist;
int ret;
- ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
- item->addr, item->size, NULL, &memlist);
+ ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
+ item->addr, item->size, NULL, &memlist);
if (ret)
return ret;
@@ -161,8 +155,8 @@ fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size)
struct nvkm_gsp_object memlist;
int ret;
- ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
- 0, fbsr->size, sgt, &memlist);
+ ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
+ 0, fbsr->size, sgt, &memlist);
if (ret)
return ret;
@@ -206,22 +200,19 @@ fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory)
return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory));
}
-static void
-r535_instmem_resume(struct nvkm_instmem *imem)
+void
+r535_fbsr_resume(struct nvkm_gsp *gsp)
{
/* RM has restored VRAM contents already, so just need to free the sysmem buffer. */
- if (imem->rm.fbsr_valid) {
- nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr);
- imem->rm.fbsr_valid = false;
- }
+ nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.fbsr);
}
static int
-r535_instmem_suspend(struct nvkm_instmem *imem)
+r535_fbsr_suspend(struct nvkm_gsp *gsp)
{
- struct nvkm_subdev *subdev = &imem->subdev;
+ struct nvkm_subdev *subdev = &gsp->subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_instmem *imem = device->imem;
struct nvkm_instobj *iobj;
struct fbsr fbsr = {};
struct fbsr_item *item, *temp;
@@ -262,7 +253,7 @@ r535_instmem_suspend(struct nvkm_instmem *imem)
fbsr.size += gsp->fb.bios.vga_workspace.size;
nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size);
- ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr);
+ ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &gsp->sr.fbsr);
if (ret)
goto done;
@@ -271,7 +262,7 @@ r535_instmem_suspend(struct nvkm_instmem *imem)
if (ret)
goto done_sgt;
- ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size);
+ ret = fbsr_init(&fbsr, &gsp->sr.fbsr, items_size);
if (WARN_ON(ret))
goto done_sgt;
@@ -282,12 +273,10 @@ r535_instmem_suspend(struct nvkm_instmem *imem)
goto done_sgt;
}
- imem->rm.fbsr_valid = true;
-
/* Cleanup everything except the sysmem backup, which will be removed after resume. */
done_sgt:
if (ret) /* ... unless we failed already. */
- nvkm_gsp_sg_free(device, &imem->rm.fbsr);
+ nvkm_gsp_sg_free(device, &gsp->sr.fbsr);
done:
list_for_each_entry_safe(item, temp, &fbsr.items, head) {
list_del(&item->head);
@@ -299,6 +288,12 @@ done:
return ret;
}
+const struct nvkm_rm_api_fbsr
+r535_fbsr = {
+ .suspend = r535_fbsr_suspend,
+ .resume = r535_fbsr_resume,
+};
+
static void *
r535_instmem_dtor(struct nvkm_instmem *imem)
{
@@ -319,11 +314,10 @@ r535_instmem_new(const struct nvkm_instmem_func *hw,
rm->dtor = r535_instmem_dtor;
rm->fini = hw->fini;
- rm->suspend = r535_instmem_suspend;
- rm->resume = r535_instmem_resume;
rm->memory_new = hw->memory_new;
rm->memory_wrap = hw->memory_wrap;
rm->zero = false;
+ rm->set_bar0_window_addr = hw->set_bar0_window_addr;
ret = nv50_instmem_new_(rm, device, type, inst, pinstmem);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
index 3454c7d29502..1ac5628c5140 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
@@ -19,11 +19,11 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
-#include "cgrp.h"
-#include "chan.h"
-#include "chid.h"
-#include "runl.h"
+#include <engine/fifo/priv.h>
+#include <engine/fifo/cgrp.h>
+#include <engine/fifo/chan.h>
+#include <engine/fifo/chid.h>
+#include <engine/fifo/runl.h>
#include <core/gpuobj.h>
#include <subdev/gsp.h>
@@ -31,24 +31,19 @@
#include <subdev/vfn.h>
#include <engine/gr.h>
+#include <rm/engine.h>
+
#include <nvhw/drf.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
+#include "nvrm/fifo.h"
+#include "nvrm/engine.h"
static u32
r535_chan_doorbell_handle(struct nvkm_chan *chan)
{
- return (chan->cgrp->runl->id << 16) | chan->id;
+ struct nvkm_gsp *gsp = chan->rm.object.client->gsp;
+
+ return gsp->rm->gpu->fifo.chan.doorbell_handle(chan);
}
static void
@@ -77,50 +72,29 @@ r535_chan_ramfc_clear(struct nvkm_chan *chan)
#define CHID_PER_USERD 8
static int
-r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+r535_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq,
+ bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *chan)
{
- struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
- struct nvkm_engn *engn;
- struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_gsp *gsp = device->object.client->gsp;
+ struct nvkm_fifo *fifo = gsp->subdev.device->fifo;
+ const int userd_p = chid / CHID_PER_USERD;
+ const int userd_i = chid % CHID_PER_USERD;
NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
- const int userd_p = chan->id / CHID_PER_USERD;
- const int userd_i = chan->id % CHID_PER_USERD;
- u32 eT = ~0;
- int ret;
-
- if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
- ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
- if (ret)
- return ret;
- }
-
- nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
- eT = engn->id;
- break;
- }
-
- if (WARN_ON(eT == ~0))
- return -EINVAL;
- chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
- fifo->rm.mthdbuf_size,
- &chan->rm.mthdbuf.addr, GFP_KERNEL);
- if (!chan->rm.mthdbuf.ptr)
- return -ENOMEM;
-
- args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id,
- fifo->func->chan.user.oclass, sizeof(*args),
- &chan->rm.object);
+ args = nvkm_gsp_rm_alloc_get(&device->object, handle,
+ fifo->func->chan.user.oclass, sizeof(*args), chan);
if (WARN_ON(IS_ERR(args)))
return PTR_ERR(args);
- args->gpFifoOffset = offset;
- args->gpFifoEntries = length / 8;
+ args->gpFifoOffset = gpfifo_offset;
+ args->gpFifoEntries = gpfifo_length / 8;
args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
- args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq);
+ args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq);
if (!priv)
args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
else
@@ -143,25 +117,25 @@ r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm,
args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
- args->hVASpace = chan->vmm->rm.object.handle;
- args->engineType = eT;
+ args->hVASpace = vmm->rm.object.handle;
+ args->engineType = nv2080_engine_type;
- args->instanceMem.base = chan->inst->addr;
- args->instanceMem.size = chan->inst->size;
+ args->instanceMem.base = inst_addr;
+ args->instanceMem.size = fifo->func->chan.func->inst->size;
args->instanceMem.addressSpace = 2;
args->instanceMem.cacheAttrib = 1;
- args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
+ args->userdMem.base = userd_addr;
args->userdMem.size = fifo->func->chan.func->userd->size;
args->userdMem.addressSpace = 2;
args->userdMem.cacheAttrib = 1;
- args->ramfcMem.base = chan->inst->addr + 0;
+ args->ramfcMem.base = inst_addr;
args->ramfcMem.size = 0x200;
args->ramfcMem.addressSpace = 2;
args->ramfcMem.cacheAttrib = 1;
- args->mthdbufMem.base = chan->rm.mthdbuf.addr;
+ args->mthdbufMem.base = mthdbuf_addr;
args->mthdbufMem.size = fifo->rm.mthdbuf_size;
args->mthdbufMem.addressSpace = 1;
args->mthdbufMem.cacheAttrib = 0;
@@ -173,7 +147,44 @@ r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm,
args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
- ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+ return nvkm_gsp_rm_alloc_wr(chan, args);
+}
+
+static int
+r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ struct nvkm_engn *engn;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ const struct nvkm_rm_api *rmapi = device->gsp->rm->api;
+ u32 eT = ~0;
+ int ret;
+
+ if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
+ ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
+ if (ret)
+ return ret;
+ }
+
+ nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
+ eT = engn->id;
+ break;
+ }
+
+ if (WARN_ON(eT == ~0))
+ return -EINVAL;
+
+ chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
+ fifo->rm.mthdbuf_size,
+ &chan->rm.mthdbuf.addr, GFP_KERNEL);
+ if (!chan->rm.mthdbuf.ptr)
+ return -ENOMEM;
+
+ ret = rmapi->fifo->chan.alloc(&chan->vmm->rm.device, NVKM_RM_CHAN(chan->id),
+ eT, chan->runq, priv, chan->id, chan->inst->addr,
+ nvkm_memory_addr(chan->userd.mem) + chan->userd.base,
+ chan->rm.mthdbuf.addr, chan->vmm, offset, length,
+ &chan->rm.object);
if (ret)
return ret;
@@ -215,123 +226,8 @@ r535_chan_ramfc = {
.priv = true,
};
-struct r535_chan_userd {
- struct nvkm_memory *mem;
- struct nvkm_memory *map;
- int chid;
- u32 used;
-
- struct list_head head;
-} *userd;
-
-static void
-r535_chan_id_put(struct nvkm_chan *chan)
-{
- struct nvkm_runl *runl = chan->cgrp->runl;
- struct nvkm_fifo *fifo = runl->fifo;
- struct r535_chan_userd *userd;
-
- mutex_lock(&fifo->userd.mutex);
- list_for_each_entry(userd, &fifo->userd.list, head) {
- if (userd->map == chan->userd.mem) {
- u32 chid = chan->userd.base / chan->func->userd->size;
-
- userd->used &= ~BIT(chid);
- if (!userd->used) {
- nvkm_memory_unref(&userd->map);
- nvkm_memory_unref(&userd->mem);
- nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
- list_del(&userd->head);
- kfree(userd);
- }
-
- break;
- }
- }
- mutex_unlock(&fifo->userd.mutex);
-
-}
-
-static int
-r535_chan_id_get_locked(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
-{
- const u32 userd_size = CHID_PER_USERD * chan->func->userd->size;
- struct nvkm_runl *runl = chan->cgrp->runl;
- struct nvkm_fifo *fifo = runl->fifo;
- struct r535_chan_userd *userd;
- u32 chid;
- int ret;
-
- if (ouserd + chan->func->userd->size >= userd_size ||
- (ouserd & (chan->func->userd->size - 1))) {
- RUNL_DEBUG(runl, "ouserd %llx", ouserd);
- return -EINVAL;
- }
-
- chid = div_u64(ouserd, chan->func->userd->size);
-
- list_for_each_entry(userd, &fifo->userd.list, head) {
- if (userd->mem == muserd) {
- if (userd->used & BIT(chid))
- return -EBUSY;
- break;
- }
- }
-
- if (&userd->head == &fifo->userd.list) {
- if (nvkm_memory_size(muserd) < userd_size) {
- RUNL_DEBUG(runl, "userd too small");
- return -EINVAL;
- }
-
- userd = kzalloc(sizeof(*userd), GFP_KERNEL);
- if (!userd)
- return -ENOMEM;
-
- userd->chid = nvkm_chid_get(runl->chid, chan);
- if (userd->chid < 0) {
- ret = userd->chid;
- kfree(userd);
- return ret;
- }
-
- userd->mem = nvkm_memory_ref(muserd);
-
- ret = nvkm_memory_kmap(userd->mem, &userd->map);
- if (ret) {
- nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
- kfree(userd);
- return ret;
- }
-
-
- list_add(&userd->head, &fifo->userd.list);
- }
-
- userd->used |= BIT(chid);
-
- chan->userd.mem = nvkm_memory_ref(userd->map);
- chan->userd.base = ouserd;
-
- return (userd->chid * CHID_PER_USERD) + chid;
-}
-
-static int
-r535_chan_id_get(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
-{
- struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
- int ret;
-
- mutex_lock(&fifo->userd.mutex);
- ret = r535_chan_id_get_locked(chan, muserd, ouserd);
- mutex_unlock(&fifo->userd.mutex);
- return ret;
-}
-
static const struct nvkm_chan_func
r535_chan = {
- .id_get = r535_chan_id_get,
- .id_put = r535_chan_id_put,
.inst = &gf100_chan_inst,
.userd = &gv100_chan_userd,
.ramfc = &r535_chan_ramfc,
@@ -340,10 +236,6 @@ r535_chan = {
.doorbell_handle = r535_chan_doorbell_handle,
};
-static const struct nvkm_cgrp_func
-r535_cgrp = {
-};
-
static int
r535_engn_nonstall(struct nvkm_engn *engn)
{
@@ -356,7 +248,7 @@ r535_engn_nonstall(struct nvkm_engn *engn)
}
static const struct nvkm_engn_func
-r535_ce = {
+r535_engn_ce = {
.nonstall = r535_engn_nonstall,
};
@@ -376,7 +268,7 @@ r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *c
}
static const struct nvkm_engn_func
-r535_gr = {
+r535_engn_gr = {
.nonstall = r535_engn_nonstall,
.ctor2 = r535_gr_ctor,
};
@@ -449,57 +341,86 @@ r535_runl = {
.allow = r535_runl_allow,
};
-static int
-r535_fifo_2080_type(enum nvkm_subdev_type type, int inst)
+void
+r535_fifo_rc_chid(struct nvkm_fifo *fifo, int chid)
{
- switch (type) {
- case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0;
- case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst;
- case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2;
- case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst;
- case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst;
- case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst;
- case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA;
- case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW;
- default:
- break;
+ struct nvkm_chan *chan;
+ unsigned long flags;
+
+ chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags);
+ if (!chan) {
+ nvkm_error(&fifo->engine.subdev, "rc: chid %d not found!\n", chid);
+ return;
}
- WARN_ON(1);
- return -EINVAL;
+ nvkm_chan_error(chan, false);
+ nvkm_chan_put(&chan, flags);
}
static int
-r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype)
+r535_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
{
+ rpc_rc_triggered_v17_02 *msg = repv;
+ struct nvkm_gsp *gsp = priv;
+
+ if (WARN_ON(repc < sizeof(*msg)))
+ return -EINVAL;
+
+ nvkm_error(&gsp->subdev, "rc: engn:%08x chid:%d type:%d scope:%d part:%d\n",
+ msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
+ msg->partitionAttributionId);
+
+ r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid);
+ return 0;
+}
+
+static int
+r535_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080)
+{
+#define RM_ENGINE_TYPE(RM,NVKM,INST) \
+ RM_ENGINE_TYPE_##RM: \
+ *ptype = NVKM_ENGINE_##NVKM; \
+ *p2080 = NV2080_ENGINE_TYPE_##RM; \
+ return INST
+
switch (rm) {
- case RM_ENGINE_TYPE_GR0:
- *ptype = NVKM_ENGINE_GR;
- return 0;
- case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9:
- *ptype = NVKM_ENGINE_CE;
- return rm - RM_ENGINE_TYPE_COPY0;
- case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7:
- *ptype = NVKM_ENGINE_NVDEC;
- return rm - RM_ENGINE_TYPE_NVDEC0;
- case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2:
- *ptype = NVKM_ENGINE_NVENC;
- return rm - RM_ENGINE_TYPE_NVENC0;
- case RM_ENGINE_TYPE_SW:
- *ptype = NVKM_ENGINE_SW;
- return 0;
- case RM_ENGINE_TYPE_SEC2:
- *ptype = NVKM_ENGINE_SEC2;
- return 0;
- case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7:
- *ptype = NVKM_ENGINE_NVJPG;
- return rm - RM_ENGINE_TYPE_NVJPEG0;
- case RM_ENGINE_TYPE_OFA:
- *ptype = NVKM_ENGINE_OFA;
- return 0;
+ case RM_ENGINE_TYPE( GR0, GR, 0);
+ case RM_ENGINE_TYPE( COPY0, CE, 0);
+ case RM_ENGINE_TYPE( COPY1, CE, 1);
+ case RM_ENGINE_TYPE( COPY2, CE, 2);
+ case RM_ENGINE_TYPE( COPY3, CE, 3);
+ case RM_ENGINE_TYPE( COPY4, CE, 4);
+ case RM_ENGINE_TYPE( COPY5, CE, 5);
+ case RM_ENGINE_TYPE( COPY6, CE, 6);
+ case RM_ENGINE_TYPE( COPY7, CE, 7);
+ case RM_ENGINE_TYPE( COPY8, CE, 8);
+ case RM_ENGINE_TYPE( COPY9, CE, 9);
+ case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0);
+ case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1);
+ case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2);
+ case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3);
+ case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4);
+ case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5);
+ case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6);
+ case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7);
+ case RM_ENGINE_TYPE( NVENC0, NVENC, 0);
+ case RM_ENGINE_TYPE( NVENC1, NVENC, 1);
+ case RM_ENGINE_TYPE( NVENC2, NVENC, 2);
+ case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0);
+ case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1);
+ case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2);
+ case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3);
+ case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4);
+ case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5);
+ case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6);
+ case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7);
+ case RM_ENGINE_TYPE( SW, SW, 0);
+ case RM_ENGINE_TYPE( SEC2, SEC2, 0);
+ case RM_ENGINE_TYPE( OFA, OFA, 0);
default:
return -EINVAL;
}
+#undef RM_ENGINE_TYPE
}
static int
@@ -536,16 +457,19 @@ static int
r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
- struct nvkm_gsp *gsp = subdev->device->gsp;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
struct nvkm_runl *runl;
struct nvkm_engn *engn;
- u32 cgids = 2048;
u32 chids = 2048;
+ u32 first = rm->api->fifo->rsvd_chids;
+ u32 count = chids - first;
int ret;
NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;
- if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) ||
- (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid)))
+ if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->cgid)) ||
+ (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->chid)))
return ret;
ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
@@ -576,25 +500,43 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
if (!runl)
continue;
- inst = r535_fifo_engn_type(rmid, &type);
+ inst = rm->api->fifo->xlat_rm_engine_type(rmid, &type, &nv2080);
if (inst < 0) {
nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid);
nvkm_runl_del(runl);
continue;
}
- nv2080 = r535_fifo_2080_type(type, inst);
- if (nv2080 < 0) {
+ /* Skip SW engine - there's currently no support for NV SW classes. */
+ if (type == NVKM_ENGINE_SW)
+ continue;
+
+ /* Skip lone GRCEs (ones not paired with GR on a runlist), as they
+ * don't appear to function as async copy engines.
+ */
+ if (type == NVKM_ENGINE_CE &&
+ rm->gpu->ce.grce_mask &&
+ (rm->gpu->ce.grce_mask(device) & BIT(inst)) &&
+ !nvkm_runl_find_engn(engn, runl, engn->engine->subdev.type == NVKM_ENGINE_GR)) {
+ RUNL_DEBUG(runl, "skip LCE %d - GRCE without GR", inst);
+ nvkm_runl_del(runl);
+ continue;
+ }
+
+ ret = nvkm_rm_engine_new(gsp->rm, type, inst);
+ if (ret) {
nvkm_runl_del(runl);
continue;
}
+ engn = NULL;
+
switch (type) {
case NVKM_ENGINE_CE:
- engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst);
+ engn = nvkm_runl_add(runl, nv2080, &r535_engn_ce, type, inst);
break;
case NVKM_ENGINE_GR:
- engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst);
+ engn = nvkm_runl_add(runl, nv2080, &r535_engn_gr, type, inst);
break;
case NVKM_ENGINE_NVDEC:
case NVKM_ENGINE_NVENC:
@@ -633,7 +575,7 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
}
- return r535_fifo_ectx_size(fifo);
+ return rm->api->fifo->ectx_size(fifo);
}
static void
@@ -646,6 +588,7 @@ int
r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
struct nvkm_fifo_func *rm;
if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
@@ -654,12 +597,20 @@ r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
rm->dtor = r535_fifo_dtor;
rm->runl_ctor = r535_fifo_runl_ctor;
rm->runl = &r535_runl;
- rm->cgrp = hw->cgrp;
- rm->cgrp.func = &r535_cgrp;
- rm->chan = hw->chan;
+ rm->chan.user.oclass = gpu->fifo.chan.class;
rm->chan.func = &r535_chan;
rm->nonstall = &ga100_fifo_nonstall;
rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
return nvkm_fifo_new_(rm, device, type, inst, pfifo);
}
+
+const struct nvkm_rm_api_fifo
+r535_fifo = {
+ .xlat_rm_engine_type = r535_fifo_xlat_rm_engine_type,
+ .ectx_size = r535_fifo_ectx_size,
+ .rc_triggered = r535_fifo_rc_triggered,
+ .chan = {
+ .alloc = r535_chan_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
new file mode 100644
index 000000000000..ddb57d5e73d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/gr.h>
+
+#include <core/memory.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+#include <engine/fifo/priv.h>
+#include <engine/gr/priv.h>
+
+#include <nvif/if900d.h>
+
+#include <nvhw/drf.h>
+
+#include "nvrm/gr.h"
+#include "nvrm/vmm.h"
+
+#define r535_gr(p) container_of((p), struct r535_gr, base)
+
+static void *
+r535_gr_chan_dtor(struct nvkm_object *object)
+{
+ struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
+ struct r535_gr *gr = grc->gr;
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ nvkm_vmm_put(grc->vmm, &grc->vma[i]);
+ nvkm_memory_unref(&grc->mem[i]);
+ }
+
+ nvkm_vmm_unref(&grc->vmm);
+ return grc;
+}
+
+static const struct nvkm_object_func
+r535_gr_chan = {
+ .dtor = r535_gr_chan_dtor,
+};
+
+int
+r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
+ struct nvkm_memory **pmem, struct nvkm_vma **pvma,
+ struct nvkm_gsp_object *chan)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
+ NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ ctrl->engineType = 1;
+ ctrl->hChanClient = vmm->rm.client.object.handle;
+ ctrl->hObject = chan->handle;
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
+ &ctrl->promoteEntry[ctrl->entryCount];
+ const bool alloc = golden || !gr->ctxbuf[i].global;
+ int ret;
+
+ entry->bufferId = gr->ctxbuf[i].bufferId;
+ entry->bInitialize = gr->ctxbuf[i].init && alloc;
+
+ if (alloc) {
+ ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
+ NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
+ gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
+ gr->ctxbuf[i].init, &pmem[i]);
+ if (WARN_ON(ret))
+ return ret;
+
+ if (gr->ctxbuf[i].bufferId ==
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
+ entry->bNonmapped = 1;
+ } else {
+ if (gr->ctxbuf[i].bufferId ==
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
+ continue;
+
+ pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
+ }
+
+ if (!entry->bNonmapped) {
+ struct gf100_vmm_map_v0 args = {
+ .priv = 1,
+ .ro = gr->ctxbuf[i].ro,
+ };
+
+ mutex_lock(&vmm->mutex.vmm);
+ ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
+ nvkm_memory_size(pmem[i]), &pvma[i]);
+ mutex_unlock(&vmm->mutex.vmm);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
+ if (ret)
+ return ret;
+
+ entry->gpuVirtAddr = pvma[i]->addr;
+ }
+
+ if (entry->bInitialize) {
+ entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
+ entry->size = gr->ctxbuf[i].size;
+ entry->physAttr = 4;
+ }
+
+ nvkm_debug(subdev,
+ "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
+ entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
+ entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
+
+ ctrl->entryCount++;
+ }
+
+ return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
+}
+
+int
+r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct r535_gr *gr = r535_gr(base);
+ struct r535_gr_chan *grc;
+ int ret;
+
+ if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
+ grc->gr = gr;
+ grc->vmm = nvkm_vmm_ref(chan->vmm);
+ grc->chan = chan;
+ *pobject = &grc->object;
+
+ ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+u64
+r535_gr_units(struct nvkm_gr *gr)
+{
+ struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
+
+ return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
+}
+
+void
+r535_gr_get_ctxbuf_info(struct r535_gr *gr, int i,
+ struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *info)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ static const struct {
+ u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
+ u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
+ bool global;
+ bool init;
+ bool ro;
+ } map[] = {
+#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
+ .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
+ .global = (G), .init = (I), .ro = (R) }
+#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
+ /* global init ro */
+ _A( GRAPHICS, MAIN, false, true, false),
+ _B( PATCH, false, true, false),
+ _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false),
+ _B( PAGEPOOL, true, false, false),
+ _B( ATTRIBUTE_CB, true, false, false),
+ _B( RTV_CB_GLOBAL, true, false, false),
+ _B( FECS_EVENT, true, true, false),
+ _B( PRIV_ACCESS_MAP, true, true, true),
+#undef _B
+#undef _A
+ };
+ u32 size = info->size;
+ u8 align, page;
+ int id;
+
+ for (id = 0; id < ARRAY_SIZE(map); id++) {
+ if (map[id].id0 == i)
+ break;
+ }
+
+ nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
+ size, (id < ARRAY_SIZE(map)) ? "*" : "");
+ if (id >= ARRAY_SIZE(map))
+ return;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
+ size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
+
+ if (size >= 1 << 21) page = 21;
+ else if (size >= 1 << 16) page = 16;
+ else page = 12;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
+ align = order_base_2(size);
+ else
+ align = page;
+
+ if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+ return;
+
+ gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
+ gr->ctxbuf[gr->ctxbuf_nr].size = size;
+ gr->ctxbuf[gr->ctxbuf_nr].page = page;
+ gr->ctxbuf[gr->ctxbuf_nr].align = align;
+ gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global;
+ gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init;
+ gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro;
+ gr->ctxbuf_nr++;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
+ if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+ return;
+
+ gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
+ gr->ctxbuf[gr->ctxbuf_nr].bufferId =
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
+ gr->ctxbuf_nr++;
+ }
+}
+
+static int
+r535_gr_get_ctxbufs_info(struct r535_gr *gr)
+{
+ NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_gsp *gsp = subdev->device->gsp;
+
+ info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+ sizeof(*info));
+ if (WARN_ON(IS_ERR(info)))
+ return PTR_ERR(info);
+
+ for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++)
+ r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+ return 0;
+}
+
+int
+r535_gr_oneinit(struct nvkm_gr *base)
+{
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
+ struct {
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+ struct nvkm_gsp_object chan;
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+ } golden = {};
+ struct nvkm_gsp_object threed;
+ int ret;
+
+ /* Allocate a channel to use for golden context init. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
+ if (ret)
+ goto done;
+
+ ret = r535_mmu_vaspace_new(golden.vmm, NVKM_RM_VASPACE, false);
+ if (ret)
+ goto done;
+
+ ret = rm->api->fifo->chan.alloc(&golden.vmm->rm.device, NVKM_RM_CHAN(0),
+ 1, 0, true, rm->api->fifo->rsvd_chids,
+ nvkm_memory_addr(golden.inst),
+ nvkm_memory_addr(golden.inst) + 0x1000,
+ nvkm_memory_addr(golden.inst) + 0x2000,
+ golden.vmm, 0, 0x1000, &golden.chan);
+ if (ret)
+ goto done;
+
+ /* Fetch context buffer info from RM and allocate each of them here to use
+ * during golden context init (or later as a global context buffer).
+ *
+ * Also build the information that'll be used to create channel contexts.
+ */
+ ret = rm->api->gr->get_ctxbufs_info(gr);
+ if (ret)
+ goto done;
+
+ /* Promote golden context to RM. */
+ ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
+ if (ret)
+ goto done;
+
+ /* Allocate 3D class on channel to trigger golden context init in RM. */
+ ret = nvkm_gsp_rm_alloc(&golden.chan, NVKM_RM_THREED, rm->gpu->gr.class.threed, 0, &threed);
+ if (ret)
+ goto done;
+
+ /* There's no need to keep the golden channel around, as RM caches the context. */
+ nvkm_gsp_rm_free(&threed);
+done:
+ nvkm_gsp_rm_free(&golden.chan);
+ for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
+ nvkm_vmm_put(golden.vmm, &golden.vma[i]);
+ nvkm_vmm_unref(&golden.vmm);
+ nvkm_memory_unref(&golden.inst);
+ return ret;
+
+}
+
+void *
+r535_gr_dtor(struct nvkm_gr *base)
+{
+ struct r535_gr *gr = r535_gr(base);
+
+ while (gr->ctxbuf_nr)
+ nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
+
+ kfree(gr->base.func);
+ return gr;
+}
+
+const struct nvkm_rm_api_gr
+r535_gr = {
+ .get_ctxbufs_info = r535_gr_get_ctxbufs_info,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
index db2602e88006..baf42339f93e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
@@ -19,9 +19,12 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <rm/rpc.h>
+
#include "priv.h"
#include <core/pci.h>
+#include <subdev/pci/priv.h>
#include <subdev/timer.h>
#include <subdev/vfn.h>
#include <engine/fifo/chan.h>
@@ -30,29 +33,11 @@
#include <nvfw/fw.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h>
-#include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h>
-#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
-#include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+#include "nvrm/gsp.h"
+#include "nvrm/rpcfn.h"
+#include "nvrm/msgfn.h"
+#include "nvrm/event.h"
+#include "nvrm/fifo.h"
#include <linux/acpi.h>
#include <linux/ctype.h>
@@ -60,990 +45,6 @@
extern struct dentry *nouveau_debugfs_root;
-#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
-#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
-
-/**
- * DOC: GSP message queue element
- *
- * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h
- *
- * The GSP command queue and status queue are message queues for the
- * communication between software and GSP. The software submits the GSP
- * RPC via the GSP command queue, GSP writes the status of the submitted
- * RPC in the status queue.
- *
- * A GSP message queue element consists of three parts:
- *
- * - message element header (struct r535_gsp_msg), which mostly maintains
- * the metadata for queuing the element.
- *
- * - RPC message header (struct nvfw_gsp_rpc), which maintains the info
- * of the RPC. E.g., the RPC function number.
- *
- * - The payload, where the RPC message stays. E.g. the params of a
- * specific RPC function. Some RPC functions also have their headers
- * in the payload. E.g. rm_alloc, rm_control.
- *
- * The memory layout of a GSP message element can be illustrated below::
- *
- * +------------------------+
- * | Message Element Header |
- * | (r535_gsp_msg) |
- * | |
- * | (r535_gsp_msg.data) |
- * | | |
- * |----------V-------------|
- * | GSP RPC Header |
- * | (nvfw_gsp_rpc) |
- * | |
- * | (nvfw_gsp_rpc.data) |
- * | | |
- * |----------V-------------|
- * | Payload |
- * | |
- * | header(optional) |
- * | params |
- * +------------------------+
- *
- * The max size of a message queue element is 16 pages (including the
- * headers). When a GSP message to be sent is larger than 16 pages, the
- * message should be split into multiple elements and sent accordingly.
- *
- * In the bunch of the split elements, the first element has the expected
- * function number, while the rest of the elements are sent with the
- * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD.
- *
- * GSP consumes the elements from the cmdq and always writes the result
- * back to the msgq. The result is also formed as split elements.
- *
- * Terminology:
- *
- * - gsp_msg(msg): GSP message element (element header + GSP RPC header +
- * payload)
- * - gsp_rpc(rpc): GSP RPC (RPC header + payload)
- * - gsp_rpc_buf: buffer for (GSP RPC header + payload)
- * - gsp_rpc_len: size of (GSP RPC header + payload)
- * - params_size: size of params in the payload
- * - payload_size: size of (header if exists + params) in the payload
- */
-
-struct r535_gsp_msg {
- u8 auth_tag_buffer[16];
- u8 aad_buffer[16];
- u32 checksum;
- u32 sequence;
- u32 elem_count;
- u32 pad;
- u8 data[];
-};
-
-struct nvfw_gsp_rpc {
- u32 header_version;
- u32 signature;
- u32 length;
- u32 function;
- u32 rpc_result;
- u32 rpc_result_private;
- u32 sequence;
- union {
- u32 spare;
- u32 cpuRmGfid;
- };
- u8 data[];
-};
-
-#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
-
-#define to_gsp_hdr(p, header) \
- container_of((void *)p, typeof(*header), data)
-
-#define to_payload_hdr(p, header) \
- container_of((void *)p, typeof(*header), params)
-
-static int
-r535_rpc_status_to_errno(uint32_t rpc_status)
-{
- switch (rpc_status) {
- case 0x55: /* NV_ERR_NOT_READY */
- case 0x66: /* NV_ERR_TIMEOUT_RETRY */
- return -EBUSY;
- case 0x51: /* NV_ERR_NO_MEMORY */
- return -ENOMEM;
- default:
- return -EINVAL;
- }
-}
-
-static int
-r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
-{
- u32 size, rptr = *gsp->msgq.rptr;
- int used;
-
- size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
- GSP_PAGE_SIZE);
- if (WARN_ON(!size || size >= gsp->msgq.cnt))
- return -EINVAL;
-
- do {
- u32 wptr = *gsp->msgq.wptr;
-
- used = wptr + gsp->msgq.cnt - rptr;
- if (used >= gsp->msgq.cnt)
- used -= gsp->msgq.cnt;
- if (used >= size)
- break;
-
- usleep_range(1, 2);
- } while (--(*ptime));
-
- if (WARN_ON(!*ptime))
- return -ETIMEDOUT;
-
- return used;
-}
-
-static struct r535_gsp_msg *
-r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
-{
- u32 rptr = *gsp->msgq.rptr;
-
- /* Skip the first page, which is the message queue info */
- return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE +
- rptr * GSP_PAGE_SIZE);
-}
-
-/**
- * DOC: Receive a GSP message queue element
- *
- * Receiving a GSP message queue element from the message queue consists of
- * the following steps:
- *
- * - Peek the element from the queue: r535_gsp_msgq_peek().
- * Peek the first page of the element to determine the total size of the
- * message before allocating the proper memory.
- *
- * - Allocate memory for the message.
- * Once the total size of the message is determined from the GSP message
- * queue element, the caller of r535_gsp_msgq_recv() allocates the
- * required memory.
- *
- * - Receive the message: r535_gsp_msgq_recv().
- * Copy the message into the allocated memory. Advance the read pointer.
- * If the message is a large GSP message, r535_gsp_msgq_recv() calls
- * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts
- * until the complete message is received.
- * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into
- * the return of the large GSP message.
- *
- * - Free the allocated memory: r535_gsp_msg_done().
- * The user is responsible for freeing the memory allocated for the GSP
- * message pages after they have been processed.
- */
-static void *
-r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
-{
- struct r535_gsp_msg *mqe;
- int ret;
-
- ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
- if (ret < 0)
- return ERR_PTR(ret);
-
- mqe = r535_gsp_msgq_get_entry(gsp);
-
- return mqe->data;
-}
-
-struct r535_gsp_msg_info {
- int *retries;
- u32 gsp_rpc_len;
- void *gsp_rpc_buf;
- bool continuation;
-};
-
-static void
-r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl);
-
-static void *
-r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp,
- struct r535_gsp_msg_info *info)
-{
- u8 *buf = info->gsp_rpc_buf;
- u32 rptr = *gsp->msgq.rptr;
- struct r535_gsp_msg *mqe;
- u32 size, expected, len;
- int ret;
-
- expected = info->gsp_rpc_len;
-
- ret = r535_gsp_msgq_wait(gsp, expected, info->retries);
- if (ret < 0)
- return ERR_PTR(ret);
-
- mqe = r535_gsp_msgq_get_entry(gsp);
-
- if (info->continuation) {
- struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data;
-
- if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) {
- nvkm_error(&gsp->subdev,
- "Not a continuation of a large RPC\n");
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- return ERR_PTR(-EIO);
- }
- }
-
- size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
-
- len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
- len = min_t(u32, expected, len);
-
- if (info->continuation)
- memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc),
- len - sizeof(struct nvfw_gsp_rpc));
- else
- memcpy(buf, mqe->data, len);
-
- expected -= len;
-
- if (expected) {
- mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
- memcpy(buf + len, mqe, expected);
- }
-
- rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
-
- mb();
- (*gsp->msgq.rptr) = rptr;
- return buf;
-}
-
-static void *
-r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
-{
- struct r535_gsp_msg *mqe;
- const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe);
- struct nvfw_gsp_rpc *rpc;
- struct r535_gsp_msg_info info = {0};
- u32 expected = gsp_rpc_len;
- void *buf;
-
- mqe = r535_gsp_msgq_get_entry(gsp);
- rpc = (struct nvfw_gsp_rpc *)mqe->data;
-
- if (WARN_ON(rpc->length > max_rpc_size))
- return NULL;
-
- buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- info.gsp_rpc_buf = buf;
- info.retries = retries;
- info.gsp_rpc_len = rpc->length;
-
- buf = r535_gsp_msgq_recv_one_elem(gsp, &info);
- if (IS_ERR(buf)) {
- kvfree(info.gsp_rpc_buf);
- info.gsp_rpc_buf = NULL;
- return buf;
- }
-
- if (expected <= max_rpc_size)
- return buf;
-
- info.gsp_rpc_buf += info.gsp_rpc_len;
- expected -= info.gsp_rpc_len;
-
- while (expected) {
- u32 size;
-
- rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
- if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
- return rpc;
- }
-
- info.gsp_rpc_len = rpc->length;
- info.continuation = true;
-
- rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
- if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
- return rpc;
- }
-
- size = info.gsp_rpc_len - sizeof(*rpc);
- expected -= size;
- info.gsp_rpc_buf += size;
- }
-
- rpc = buf;
- rpc->length = gsp_rpc_len;
- return buf;
-}
-
-static int
-r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
-{
- struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
- struct r535_gsp_msg *cqe;
- u32 gsp_rpc_len = msg->checksum;
- u64 *ptr = (void *)msg;
- u64 *end;
- u64 csum = 0;
- int free, time = 1000000;
- u32 wptr, size, step, len;
- u32 off = 0;
-
- len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE);
-
- end = (u64 *)((char *)ptr + len);
- msg->pad = 0;
- msg->checksum = 0;
- msg->sequence = gsp->cmdq.seq++;
- msg->elem_count = DIV_ROUND_UP(len, 0x1000);
-
- while (ptr < end)
- csum ^= *ptr++;
-
- msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
-
- wptr = *gsp->cmdq.wptr;
- do {
- do {
- free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
- if (free >= gsp->cmdq.cnt)
- free -= gsp->cmdq.cnt;
- if (free >= 1)
- break;
-
- usleep_range(1, 2);
- } while(--time);
-
- if (WARN_ON(!time)) {
- kvfree(msg);
- return -ETIMEDOUT;
- }
-
- cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
- step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
- size = min_t(u32, len, step * GSP_PAGE_SIZE);
-
- memcpy(cqe, (u8 *)msg + off, size);
-
- wptr += DIV_ROUND_UP(size, 0x1000);
- if (wptr == gsp->cmdq.cnt)
- wptr = 0;
-
- off += size;
- len -= size;
- } while (len);
-
- nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
- wmb();
- (*gsp->cmdq.wptr) = wptr;
- mb();
-
- nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
-
- kvfree(msg);
- return 0;
-}
-
-static void *
-r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len)
-{
- struct r535_gsp_msg *msg;
- u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len;
-
- size = ALIGN(size, GSP_MSG_MIN_SIZE);
- msg = kvzalloc(size, GFP_KERNEL);
- if (!msg)
- return ERR_PTR(-ENOMEM);
-
- msg->checksum = gsp_rpc_len;
- return msg->data;
-}
-
-static void
-r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
-{
- kvfree(msg);
-}
-
-static void
-r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
-{
- if (gsp->subdev.debug >= lvl) {
- nvkm_printk__(&gsp->subdev, lvl, info,
- "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
- msg->function, msg->length, msg->length - sizeof(*msg),
- msg->rpc_result, msg->rpc_result_private);
- print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
- msg->data, msg->length - sizeof(*msg), true);
- }
-}
-
-static struct nvfw_gsp_rpc *
-r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvfw_gsp_rpc *rpc;
- int retries = 4000000, i;
-
-retry:
- rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries);
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- if (rpc->rpc_result) {
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, rpc);
- return ERR_PTR(-EINVAL);
- }
-
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE);
-
- if (fn && rpc->function == fn) {
- if (gsp_rpc_len) {
- if (rpc->length < gsp_rpc_len) {
- nvkm_error(subdev, "rpc len %d < %d\n",
- rpc->length, gsp_rpc_len);
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, rpc);
- return ERR_PTR(-EIO);
- }
-
- return rpc;
- }
-
- r535_gsp_msg_done(gsp, rpc);
- return NULL;
- }
-
- for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
- struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
-
- if (ntfy->fn == rpc->function) {
- if (ntfy->func)
- ntfy->func(ntfy->priv, ntfy->fn, rpc->data,
- rpc->length - sizeof(*rpc));
- break;
- }
- }
-
- if (i == gsp->msgq.ntfy_nr)
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN);
-
- r535_gsp_msg_done(gsp, rpc);
- if (fn)
- goto retry;
-
- if (*gsp->msgq.rptr != *gsp->msgq.wptr)
- goto retry;
-
- return NULL;
-}
-
-static int
-r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
-{
- int ret = 0;
-
- mutex_lock(&gsp->msgq.mutex);
- if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
- ret = -ENOSPC;
- } else {
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
- gsp->msgq.ntfy_nr++;
- }
- mutex_unlock(&gsp->msgq.mutex);
- return ret;
-}
-
-static int
-r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
-{
- void *repv;
-
- mutex_lock(&gsp->cmdq.mutex);
- repv = r535_gsp_msg_recv(gsp, fn, 0);
- mutex_unlock(&gsp->cmdq.mutex);
- if (IS_ERR(repv))
- return PTR_ERR(repv);
-
- return 0;
-}
-
-static void *
-r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait,
- u32 gsp_rpc_len)
-{
- struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
- struct nvfw_gsp_rpc *msg;
- u32 fn = rpc->function;
- void *repv = NULL;
- int ret;
-
- if (gsp->subdev.debug >= NV_DBG_TRACE) {
- nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
- rpc->length, rpc->length - sizeof(*rpc));
- print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
- rpc->data, rpc->length - sizeof(*rpc), true);
- }
-
- ret = r535_gsp_cmdq_push(gsp, rpc);
- if (ret)
- return ERR_PTR(ret);
-
- if (wait) {
- msg = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
- if (!IS_ERR_OR_NULL(msg))
- repv = msg->data;
- else
- repv = msg;
- }
-
- return repv;
-}
-
-static void
-r535_gsp_event_dtor(struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_device *device = event->device;
- struct nvkm_gsp_client *client = device->object.client;
- struct nvkm_gsp *gsp = client->gsp;
-
- mutex_lock(&gsp->client_id.mutex);
- if (event->func) {
- list_del(&event->head);
- event->func = NULL;
- }
- mutex_unlock(&gsp->client_id.mutex);
-
- nvkm_gsp_rm_free(&event->object);
- event->device = NULL;
-}
-
-static int
-r535_gsp_device_event_get(struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_device *device = event->device;
- NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
- NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- ctrl->event = event->id;
- ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
- return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
-}
-
-static int
-r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
- nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_client *client = device->object.client;
- struct nvkm_gsp *gsp = client->gsp;
- NV0005_ALLOC_PARAMETERS *args;
- int ret;
-
- args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
- NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
- &event->object);
- if (IS_ERR(args))
- return PTR_ERR(args);
-
- args->hParentClient = client->object.handle;
- args->hSrcResource = 0;
- args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
- args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
- args->data = NULL;
-
- ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
- if (ret)
- return ret;
-
- event->device = device;
- event->id = id;
-
- ret = r535_gsp_device_event_get(event);
- if (ret) {
- nvkm_gsp_event_dtor(event);
- return ret;
- }
-
- mutex_lock(&gsp->client_id.mutex);
- event->func = func;
- list_add(&event->head, &client->events);
- mutex_unlock(&gsp->client_id.mutex);
- return 0;
-}
-
-static void
-r535_gsp_device_dtor(struct nvkm_gsp_device *device)
-{
- nvkm_gsp_rm_free(&device->subdevice);
- nvkm_gsp_rm_free(&device->object);
-}
-
-static int
-r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
-{
- NV2080_ALLOC_PARAMETERS *args;
-
- return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args),
- &device->subdevice);
-}
-
-static int
-r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
-{
- NV0080_ALLOC_PARAMETERS *args;
- int ret;
-
- args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args),
- &device->object);
- if (IS_ERR(args))
- return PTR_ERR(args);
-
- args->hClientShare = client->object.handle;
-
- ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
- if (ret)
- return ret;
-
- ret = r535_gsp_subdevice_ctor(device);
- if (ret)
- nvkm_gsp_rm_free(&device->object);
-
- return ret;
-}
-
-static void
-r535_gsp_client_dtor(struct nvkm_gsp_client *client)
-{
- struct nvkm_gsp *gsp = client->gsp;
-
- nvkm_gsp_rm_free(&client->object);
-
- mutex_lock(&gsp->client_id.mutex);
- idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff);
- mutex_unlock(&gsp->client_id.mutex);
-
- client->gsp = NULL;
-}
-
-static int
-r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
-{
- NV0000_ALLOC_PARAMETERS *args;
- int ret;
-
- mutex_lock(&gsp->client_id.mutex);
- ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL);
- mutex_unlock(&gsp->client_id.mutex);
- if (ret < 0)
- return ret;
-
- client->gsp = gsp;
- client->object.client = client;
- INIT_LIST_HEAD(&client->events);
-
- args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args),
- &client->object);
- if (IS_ERR(args)) {
- r535_gsp_client_dtor(client);
- return ret;
- }
-
- args->hClient = client->object.handle;
- args->processID = ~0;
-
- ret = nvkm_gsp_rm_alloc_wr(&client->object, args);
- if (ret) {
- r535_gsp_client_dtor(client);
- return ret;
- }
-
- return 0;
-}
-
-static int
-r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_free_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
- client->object.handle, object->handle);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
- if (WARN_ON(IS_ERR_OR_NULL(rpc)))
- return -EIO;
-
- rpc->params.hRoot = client->object.handle;
- rpc->params.hObjectParent = 0;
- rpc->params.hObjectOld = object->handle;
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
-}
-
-static void
-r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
-
- nvkm_gsp_rpc_done(object->client->gsp, rpc);
-}
-
-static void *
-r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
- struct nvkm_gsp *gsp = object->client->gsp;
- void *ret = NULL;
-
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc));
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- if (rpc->status) {
- ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
- if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
- nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
- }
-
- nvkm_gsp_rpc_done(gsp, rpc);
-
- return ret;
-}
-
-static void *
-r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass,
- u32 params_size)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_gsp_rm_alloc_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n",
- client->object.handle, object->parent->handle,
- object->handle);
-
- nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass,
- params_size);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC,
- sizeof(*rpc) + params_size);
- if (IS_ERR(rpc))
- return rpc;
-
- rpc->hClient = client->object.handle;
- rpc->hParent = object->parent->handle;
- rpc->hObject = object->handle;
- rpc->hClass = oclass;
- rpc->status = 0;
- rpc->paramsSize = params_size;
- return rpc->params;
-}
-
-static void
-r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc);
-
- if (!params)
- return;
- nvkm_gsp_rpc_done(object->client->gsp, rpc);
-}
-
-static int
-r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc)
-{
- rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc);
- struct nvkm_gsp *gsp = object->client->gsp;
- int ret = 0;
-
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
- if (IS_ERR_OR_NULL(rpc)) {
- *params = NULL;
- return PTR_ERR(rpc);
- }
-
- if (rpc->status) {
- ret = r535_rpc_status_to_errno(rpc->status);
- if (ret != -EAGAIN && ret != -EBUSY)
- nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
- object->client->object.handle, object->handle, rpc->cmd, rpc->status);
- }
-
- if (repc)
- *params = rpc->params;
- else
- nvkm_gsp_rpc_done(gsp, rpc);
-
- return ret;
-}
-
-static void *
-r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_gsp_rm_control_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n",
- client->object.handle, object->handle, cmd, params_size);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
- sizeof(*rpc) + params_size);
- if (IS_ERR(rpc))
- return rpc;
-
- rpc->hClient = client->object.handle;
- rpc->hObject = object->handle;
- rpc->cmd = cmd;
- rpc->status = 0;
- rpc->paramsSize = params_size;
- return rpc->params;
-}
-
-static void
-r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
-{
- struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
-
- r535_gsp_msg_done(gsp, rpc);
-}
-
-static void *
-r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
-{
- struct nvfw_gsp_rpc *rpc;
-
- rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size,
- sizeof(u64)));
- if (IS_ERR(rpc))
- return ERR_CAST(rpc);
-
- rpc->header_version = 0x03000000;
- rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
- rpc->function = fn;
- rpc->rpc_result = 0xffffffff;
- rpc->rpc_result_private = 0xffffffff;
- rpc->length = sizeof(*rpc) + payload_size;
- return rpc->data;
-}
-
-static void *
-r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait,
- u32 gsp_rpc_len)
-{
- struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
- struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
- const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg);
- const u32 max_payload_size = max_rpc_size - sizeof(*rpc);
- u32 payload_size = rpc->length - sizeof(*rpc);
- void *repv;
-
- mutex_lock(&gsp->cmdq.mutex);
- if (payload_size > max_payload_size) {
- const u32 fn = rpc->function;
- u32 remain_payload_size = payload_size;
-
- /* Adjust length, and send initial RPC. */
- rpc->length = sizeof(*rpc) + max_payload_size;
- msg->checksum = rpc->length;
-
- repv = r535_gsp_rpc_send(gsp, payload, false, 0);
- if (IS_ERR(repv))
- goto done;
-
- payload += max_payload_size;
- remain_payload_size -= max_payload_size;
-
- /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
- while (remain_payload_size) {
- u32 size = min(remain_payload_size,
- max_payload_size);
- void *next;
-
- next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
- if (IS_ERR(next)) {
- repv = next;
- goto done;
- }
-
- memcpy(next, payload, size);
-
- repv = r535_gsp_rpc_send(gsp, next, false, 0);
- if (IS_ERR(repv))
- goto done;
-
- payload += size;
- remain_payload_size -= size;
- }
-
- /* Wait for reply. */
- rpc = r535_gsp_msg_recv(gsp, fn, payload_size +
- sizeof(*rpc));
- if (!IS_ERR_OR_NULL(rpc)) {
- if (wait) {
- repv = rpc->data;
- } else {
- nvkm_gsp_rpc_done(gsp, rpc);
- repv = NULL;
- }
- } else {
- repv = wait ? rpc : NULL;
- }
- } else {
- repv = r535_gsp_rpc_send(gsp, payload, wait, gsp_rpc_len);
- }
-
-done:
- mutex_unlock(&gsp->cmdq.mutex);
- return repv;
-}
-
-const struct nvkm_gsp_rm
-r535_gsp_rm = {
- .rpc_get = r535_gsp_rpc_get,
- .rpc_push = r535_gsp_rpc_push,
- .rpc_done = r535_gsp_rpc_done,
-
- .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get,
- .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push,
- .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done,
-
- .rm_alloc_get = r535_gsp_rpc_rm_alloc_get,
- .rm_alloc_push = r535_gsp_rpc_rm_alloc_push,
- .rm_alloc_done = r535_gsp_rpc_rm_alloc_done,
-
- .rm_free = r535_gsp_rpc_rm_free,
-
- .client_ctor = r535_gsp_client_ctor,
- .client_dtor = r535_gsp_client_dtor,
-
- .device_ctor = r535_gsp_device_ctor,
- .device_dtor = r535_gsp_device_dtor,
-
- .event_ctor = r535_gsp_device_event_ctor,
- .event_dtor = r535_gsp_event_dtor,
-};
-
static void
r535_gsp_msgq_work(struct work_struct *work)
{
@@ -1086,10 +87,52 @@ r535_gsp_intr(struct nvkm_inth *inth)
return IRQ_HANDLED;
}
+static bool
+r535_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst)
+{
+ switch (mc_engine_idx) {
+ case MC_ENGINE_IDX_GSP:
+ *ptype = NVKM_SUBDEV_GSP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_DISP:
+ *ptype = NVKM_ENGINE_DISP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
+ *ptype = NVKM_ENGINE_CE;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0;
+ return true;
+ case MC_ENGINE_IDX_GR0:
+ *ptype = NVKM_ENGINE_GR;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+ *ptype = NVKM_ENGINE_NVDEC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0;
+ return true;
+ case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
+ *ptype = NVKM_ENGINE_NVENC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_MSENC;
+ return true;
+ case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+ *ptype = NVKM_ENGINE_NVJPG;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0;
+ return true;
+ case MC_ENGINE_IDX_OFA0:
+ *ptype = NVKM_ENGINE_OFA;
+ *pinst = 0;
+ return true;
+ default:
+ return false;
+ }
+}
+
static int
r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
{
NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
int ret = 0;
ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
@@ -1112,42 +155,8 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask,
ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall);
- switch (ctrl->table[i].engineIdx) {
- case MC_ENGINE_IDX_GSP:
- type = NVKM_SUBDEV_GSP;
- inst = 0;
- break;
- case MC_ENGINE_IDX_DISP:
- type = NVKM_ENGINE_DISP;
- inst = 0;
- break;
- case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
- type = NVKM_ENGINE_CE;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0;
- break;
- case MC_ENGINE_IDX_GR0:
- type = NVKM_ENGINE_GR;
- inst = 0;
- break;
- case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
- type = NVKM_ENGINE_NVDEC;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0;
- break;
- case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
- type = NVKM_ENGINE_NVENC;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC;
- break;
- case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
- type = NVKM_ENGINE_NVJPG;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0;
- break;
- case MC_ENGINE_IDX_OFA0:
- type = NVKM_ENGINE_OFA;
- inst = 0;
- break;
- default:
+ if (!rmapi->gsp->xlat_mc_engine_idx(ctrl->table[i].engineIdx, &type, &inst))
continue;
- }
if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) {
ret = -ENOSPC;
@@ -1165,35 +174,14 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
return ret;
}
-static int
-r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
+void
+r535_gsp_get_static_info_fb(struct nvkm_gsp *gsp,
+ const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *info)
{
- GspStaticConfigInfo *rpc;
int last_usable = -1;
- rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
- if (IS_ERR(rpc))
- return PTR_ERR(rpc);
-
- gsp->internal.client.object.client = &gsp->internal.client;
- gsp->internal.client.object.parent = NULL;
- gsp->internal.client.object.handle = rpc->hInternalClient;
- gsp->internal.client.gsp = gsp;
-
- gsp->internal.device.object.client = &gsp->internal.client;
- gsp->internal.device.object.parent = &gsp->internal.client.object;
- gsp->internal.device.object.handle = rpc->hInternalDevice;
-
- gsp->internal.device.subdevice.client = &gsp->internal.client;
- gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
- gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
-
- gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
- gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
-
- for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) {
- NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg =
- &rpc->fbRegionInfoParams.fbRegion[i];
+ for (int i = 0; i < info->numFBRegions; i++) {
+ const NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = &info->fbRegion[i];
nvkm_debug(&gsp->subdev, "fb region %d: "
"%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i,
@@ -1215,10 +203,38 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
}
if (last_usable >= 0) {
- u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1;
+ u32 rsvd_base = info->fbRegion[last_usable].limit + 1;
gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base;
}
+}
+
+static int
+r535_gsp_get_static_info(struct nvkm_gsp *gsp)
+{
+ GspStaticConfigInfo *rpc;
+
+ rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ gsp->internal.client.object.client = &gsp->internal.client;
+ gsp->internal.client.object.parent = NULL;
+ gsp->internal.client.object.handle = rpc->hInternalClient;
+ gsp->internal.client.gsp = gsp;
+
+ gsp->internal.device.object.client = &gsp->internal.client;
+ gsp->internal.device.object.parent = &gsp->internal.client.object;
+ gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+ gsp->internal.device.subdevice.client = &gsp->internal.client;
+ gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+ gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+ gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+ gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+ r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams);
for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) {
if (rpc->gpcInfo.gpcMask & BIT(gpc)) {
@@ -1231,7 +247,7 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
return 0;
}
-static void
+void
nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
{
if (mem->data) {
@@ -1260,7 +276,7 @@ nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
* so we take a device reference to ensure its lifetime. The reference is
* dropped in the destructor.
*/
-static int
+int
nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
{
mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
@@ -1277,9 +293,10 @@ static int
r535_gsp_postinit(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
int ret;
- ret = r535_gsp_rpc_get_gsp_static_info(gsp);
+ ret = rmapi->gsp->get_static_info(gsp);
if (WARN_ON(ret))
return ret;
@@ -1327,7 +344,7 @@ r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend)
rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0;
}
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
}
enum registry_type {
@@ -1684,7 +701,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
build_registry(gsp, rpc);
- return nvkm_gsp_rpc_wr(gsp, rpc, false);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT);
fail:
clean_registry(gsp);
@@ -1692,7 +709,7 @@ fail:
}
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-static void
+void
r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
{
const guid_t NVOP_DSM_GUID =
@@ -1726,7 +743,7 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
kfree(argv4.buffer.pointer);
}
-static void
+void
r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
{
const guid_t JT_DSM_GUID =
@@ -1818,7 +835,7 @@ r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux
}
}
-static void
+void
r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
{
acpi_status status;
@@ -1871,7 +888,7 @@ r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
}
static int
-r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
+r535_gsp_set_system_info(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device);
@@ -1884,16 +901,16 @@ r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
if (IS_ERR(info))
return PTR_ERR(info);
- info->gpuPhysAddr = device->func->resource_addr(device, 0);
- info->gpuPhysFbAddr = device->func->resource_addr(device, 1);
- info->gpuPhysInstAddr = device->func->resource_addr(device, 3);
+ info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB);
+ info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST);
info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev);
info->maxUserVa = TASK_SIZE;
- info->pciConfigMirrorBase = 0x088000;
- info->pciConfigMirrorSize = 0x001000;
+ info->pciConfigMirrorBase = device->pci->func->cfg.addr;
+ info->pciConfigMirrorSize = device->pci->func->cfg.size;
r535_gsp_acpi_info(gsp, &info->acpiMethodData);
- return nvkm_gsp_rpc_wr(gsp, info, false);
+ return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
}
static int
@@ -1911,33 +928,6 @@ r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc)
}
static int
-r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
-{
- rpc_rc_triggered_v17_02 *msg = repv;
- struct nvkm_gsp *gsp = priv;
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvkm_chan *chan;
- unsigned long flags;
-
- if (WARN_ON(repc < sizeof(*msg)))
- return -EINVAL;
-
- nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n",
- msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
- msg->partitionAttributionId);
-
- chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags);
- if (!chan) {
- nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid);
- return 0;
- }
-
- nvkm_chan_error(chan, false);
- nvkm_chan_put(&chan, flags);
- return 0;
-}
-
-static int
r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc)
{
struct nvkm_gsp *gsp = priv;
@@ -2130,97 +1120,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
}
static int
-r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvkm_device *device = subdev->device;
- u32 wpr2_hi;
- int ret;
-
- wpr2_hi = nvkm_rd32(device, 0x1fa828);
- if (!wpr2_hi) {
- nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
- return 0;
- }
-
- ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
- if (WARN_ON(ret))
- return ret;
-
- wpr2_hi = nvkm_rd32(device, 0x1fa828);
- if (WARN_ON(wpr2_hi))
- return -EIO;
-
- return 0;
-}
-
-static int
-r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
-{
- int ret;
-
- ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
- if (ret)
- return ret;
-
- nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
-
- if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
- return -EIO;
-
- return 0;
-}
-
-static int
-r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
-{
- GspFwWprMeta *meta;
- int ret;
-
- ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta);
- if (ret)
- return ret;
-
- meta = gsp->wpr_meta.data;
-
- meta->magic = GSP_FW_WPR_META_MAGIC;
- meta->revision = GSP_FW_WPR_META_REVISION;
-
- meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
- meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
-
- meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
- meta->sizeOfBootloader = gsp->boot.fw.size;
- meta->bootloaderCodeOffset = gsp->boot.code_offset;
- meta->bootloaderDataOffset = gsp->boot.data_offset;
- meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
-
- meta->sysmemAddrOfSignature = gsp->sig.addr;
- meta->sizeOfSignature = gsp->sig.size;
-
- meta->gspFwRsvdStart = gsp->fb.heap.addr;
- meta->nonWprHeapOffset = gsp->fb.heap.addr;
- meta->nonWprHeapSize = gsp->fb.heap.size;
- meta->gspFwWprStart = gsp->fb.wpr2.addr;
- meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
- meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
- meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
- meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
- meta->frtsOffset = gsp->fb.wpr2.frts.addr;
- meta->frtsSize = gsp->fb.wpr2.frts.size;
- meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
- meta->fbSize = gsp->fb.size;
- meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
- meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
- meta->bootCount = 0;
- meta->partitionRpcAddr = 0;
- meta->partitionRpcRequestOffset = 0;
- meta->partitionRpcReplyOffset = 0;
- meta->verified = 0;
- return 0;
-}
-
-static int
r535_gsp_shared_init(struct nvkm_gsp *gsp)
{
struct {
@@ -2271,23 +1170,11 @@ r535_gsp_shared_init(struct nvkm_gsp *gsp)
return 0;
}
-static int
-r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+static void
+r535_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume)
{
- GSP_ARGUMENTS_CACHED *args;
- int ret;
-
- if (!resume) {
- ret = r535_gsp_shared_init(gsp);
- if (ret)
- return ret;
+ GSP_ARGUMENTS_CACHED *args = gsp->rmargs.data;
- ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
- if (ret)
- return ret;
- }
-
- args = gsp->rmargs.data;
args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
args->messageQueueInitArguments.cmdQueueOffset =
@@ -2304,7 +1191,24 @@ r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
args->srInitArguments.flags = 0;
args->srInitArguments.bInPMTransition = 1;
}
+}
+static int
+r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+{
+ int ret;
+
+ if (!resume) {
+ ret = r535_gsp_shared_init(gsp);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
+ if (ret)
+ return ret;
+ }
+
+ gsp->rm->api->gsp->set_rmargs(gsp, resume);
return 0;
}
@@ -2797,18 +1701,22 @@ lvl1_fail:
return ret;
}
+static u32
+r535_gsp_sr_data_size(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ return meta->gspFwWprEnd - meta->gspFwWprStart;
+}
+
int
r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
{
- u32 mbox0 = 0xff, mbox1 = 0xff;
+ struct nvkm_rm *rm = gsp->rm;
int ret;
- if (!gsp->running)
- return 0;
-
if (suspend) {
- GspFwWprMeta *meta = gsp->wpr_meta.data;
- u64 len = meta->gspFwWprEnd - meta->gspFwWprStart;
+ u32 len = rm->api->gsp->sr_data_size(gsp);
GspFwSRMeta *sr;
ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt);
@@ -2829,8 +1737,13 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr;
sr->sizeOfSuspendResumeData = len;
- mbox0 = lower_32_bits(gsp->sr.meta.addr);
- mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ ret = rm->api->fbsr->suspend(gsp);
+ if (ret) {
+ nvkm_gsp_mem_dtor(&gsp->sr.meta);
+ nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
+ nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
+ return ret;
+ }
}
ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend);
@@ -2838,18 +1751,10 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
return ret;
nvkm_msec(gsp->subdev.device, 2000,
- if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000)
+ if (nvkm_falcon_rd32(&gsp->falcon, 0x040) == 0x80000000)
break;
);
- nvkm_falcon_reset(&gsp->falcon);
-
- ret = nvkm_gsp_fwsec_sb(gsp);
- WARN_ON(ret);
-
- ret = r535_gsp_booter_unload(gsp, mbox0, mbox1);
- WARN_ON(ret);
-
gsp->running = false;
return 0;
}
@@ -2857,23 +1762,12 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
int
r535_gsp_init(struct nvkm_gsp *gsp)
{
- u32 mbox0, mbox1;
int ret;
- if (!gsp->sr.meta.data) {
- mbox0 = lower_32_bits(gsp->wpr_meta.addr);
- mbox1 = upper_32_bits(gsp->wpr_meta.addr);
- } else {
- r535_gsp_rmargs_init(gsp, true);
-
- mbox0 = lower_32_bits(gsp->sr.meta.addr);
- mbox1 = upper_32_bits(gsp->sr.meta.addr);
- }
+ nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
- /* Execute booter to handle (eventually...) booting GSP-RM. */
- ret = r535_gsp_booter_load(gsp, mbox0, mbox1);
- if (WARN_ON(ret))
- goto done;
+ if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
+ return -EIO;
ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE);
if (ret)
@@ -2883,6 +1777,8 @@ r535_gsp_init(struct nvkm_gsp *gsp)
done:
if (gsp->sr.meta.data) {
+ gsp->rm->api->fbsr->resume(gsp);
+
nvkm_gsp_mem_dtor(&gsp->sr.meta);
nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
@@ -2944,19 +1840,6 @@ r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u
return -ENOENT;
}
-static void
-r535_gsp_dtor_fws(struct nvkm_gsp *gsp)
-{
- nvkm_firmware_put(gsp->fws.bl);
- gsp->fws.bl = NULL;
- nvkm_firmware_put(gsp->fws.booter.unload);
- gsp->fws.booter.unload = NULL;
- nvkm_firmware_put(gsp->fws.booter.load);
- gsp->fws.booter.load = NULL;
- nvkm_firmware_put(gsp->fws.rm);
- gsp->fws.rm = NULL;
-}
-
#ifdef CONFIG_DEBUG_FS
struct r535_gsp_log {
@@ -3190,10 +2073,16 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
nvkm_falcon_fw_dtor(&gsp->booter.unload);
nvkm_falcon_fw_dtor(&gsp->booter.load);
+ nvkm_gsp_mem_dtor(&gsp->fmc.args);
+ kfree(gsp->fmc.sig);
+ kfree(gsp->fmc.pkey);
+ kfree(gsp->fmc.hash);
+ nvkm_gsp_mem_dtor(&gsp->fmc.fw);
+
mutex_destroy(&gsp->msgq.mutex);
mutex_destroy(&gsp->cmdq.mutex);
- r535_gsp_dtor_fws(gsp);
+ nvkm_gsp_dtor_fws(gsp);
nvkm_gsp_mem_dtor(&gsp->rmargs);
nvkm_gsp_mem_dtor(&gsp->wpr_meta);
@@ -3206,10 +2095,17 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
nvkm_gsp_mem_dtor(&gsp->logrm);
}
+static void
+r535_gsp_drop_send_user_shared_data(struct nvkm_gsp *gsp)
+{
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
+}
+
int
r535_gsp_oneinit(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
const u8 *data;
u64 size;
int ret;
@@ -3217,16 +2113,6 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
mutex_init(&gsp->cmdq.mutex);
mutex_init(&gsp->msgq.mutex);
- ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
- &device->sec2->falcon, &gsp->booter.load);
- if (ret)
- return ret;
-
- ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
- &device->sec2->falcon, &gsp->booter.unload);
- if (ret)
- return ret;
-
/* Load GSP firmware from ELF image into DMA-accessible memory. */
ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size);
if (ret)
@@ -3255,65 +2141,29 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
r535_gsp_msg_run_cpu_sequencer, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp);
- r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED,
- r535_gsp_msg_rc_triggered, gsp);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, rmapi->fifo->rc_triggered, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
r535_gsp_msg_mmu_fault_queued, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL);
- r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
+ if (rmapi->gsp->drop_send_user_shared_data)
+ rmapi->gsp->drop_send_user_shared_data(gsp);
+ if (rmapi->gsp->drop_post_nocat_record)
+ rmapi->gsp->drop_post_nocat_record(gsp);
+
ret = r535_gsp_rm_boot_ctor(gsp);
if (ret)
return ret;
/* Release FW images - we've copied them to DMA buffers now. */
- r535_gsp_dtor_fws(gsp);
-
- /* Calculate FB layout. */
- gsp->fb.wpr2.frts.size = 0x100000;
- gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
-
- gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
- gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
-
- gsp->fb.wpr2.elf.size = gsp->fw.len;
- gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
-
- {
- u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
-
- gsp->fb.wpr2.heap.size =
- gsp->func->wpr_heap.os_carveout_size +
- gsp->func->wpr_heap.base_size +
- ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
- ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
-
- gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size);
- }
-
- gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
- gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
-
- gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
- gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
-
- gsp->fb.heap.size = 0x100000;
- gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
-
- ret = nvkm_gsp_fwsec_frts(gsp);
- if (WARN_ON(ret))
- return ret;
+ nvkm_gsp_dtor_fws(gsp);
ret = r535_gsp_libos_init(gsp);
if (WARN_ON(ret))
return ret;
- ret = r535_gsp_wpr_meta_init(gsp);
- if (WARN_ON(ret))
- return ret;
-
- ret = r535_gsp_rpc_set_system_info(gsp);
+ ret = rmapi->gsp->set_system_info(gsp);
if (WARN_ON(ret))
return ret;
@@ -3321,76 +2171,17 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
if (WARN_ON(ret))
return ret;
- /* Reset GSP into RISC-V mode. */
- ret = gsp->func->reset(gsp);
- if (WARN_ON(ret))
- return ret;
-
- nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
- nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
-
mutex_init(&gsp->client_id.mutex);
idr_init(&gsp->client_id.idr);
return 0;
}
-static int
-r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
- const struct firmware **pfw)
-{
- char fwname[64];
-
- snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
- return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
-}
-
-int
-r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- int ret;
- bool enable_gsp = fwif->enable;
-
-#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
- enable_gsp = true;
-#endif
- if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
- return -EINVAL;
-
- if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
- (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) ||
- (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) ||
- (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) {
- r535_gsp_dtor_fws(gsp);
- return ret;
- }
-
- return 0;
-}
-
-#define NVKM_GSP_FIRMWARE(chip) \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin")
-
-NVKM_GSP_FIRMWARE(tu102);
-NVKM_GSP_FIRMWARE(tu104);
-NVKM_GSP_FIRMWARE(tu106);
-
-NVKM_GSP_FIRMWARE(tu116);
-NVKM_GSP_FIRMWARE(tu117);
-
-NVKM_GSP_FIRMWARE(ga100);
-
-NVKM_GSP_FIRMWARE(ga102);
-NVKM_GSP_FIRMWARE(ga103);
-NVKM_GSP_FIRMWARE(ga104);
-NVKM_GSP_FIRMWARE(ga106);
-NVKM_GSP_FIRMWARE(ga107);
-
-NVKM_GSP_FIRMWARE(ad102);
-NVKM_GSP_FIRMWARE(ad103);
-NVKM_GSP_FIRMWARE(ad104);
-NVKM_GSP_FIRMWARE(ad106);
-NVKM_GSP_FIRMWARE(ad107);
+const struct nvkm_rm_api_gsp
+r535_gsp = {
+ .set_rmargs = r535_gsp_set_rmargs,
+ .set_system_info = r535_gsp_set_system_info,
+ .get_static_info = r535_gsp_get_static_info,
+ .xlat_mc_engine_idx = r535_gsp_xlat_mc_engine_idx,
+ .drop_send_user_shared_data = r535_gsp_drop_send_user_shared_data,
+ .sr_data_size = r535_gsp_sr_data_size,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c
index 1b4619ff9e8e..a8c42ec0367b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c
@@ -19,26 +19,27 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
+#include <rm/engine.h>
-#include <subdev/gsp.h>
+#include "nvrm/nvdec.h"
-#include <nvif/class.h>
+static int
+r535_nvdec_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvdec)
+{
+ NV_BSP_ALLOCATION_PARAMETERS *args;
-static const struct nvkm_engine_func
-ad102_nvenc = {
- .sclass = {
- { -1, -1, NVC9B7_VIDEO_ENCODER },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvdec);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
-int
-ad102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvenc **pnvenc)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvenc_new(&ad102_nvenc, device, type, inst, pnvenc);
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(nvdec, args);
}
+
+const struct nvkm_rm_api_engine
+r535_nvdec = {
+ .alloc = r535_nvdec_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c
index 7bfa6240d283..acb3ce8bb9de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c
@@ -19,28 +19,27 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "gf100.h"
+#include <rm/engine.h>
-#include <subdev/gsp.h>
+#include "nvrm/nvenc.h"
-#include <nvif/class.h>
+static int
+r535_nvenc_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvenc)
+{
+ NV_MSENC_ALLOCATION_PARAMETERS *args;
-static const struct gf100_gr_func
-ad102_gr = {
- .sclass = {
- { -1, -1, FERMI_TWOD_A },
- { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
- { -1, -1, ADA_A },
- { -1, -1, ADA_COMPUTE_A },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvenc);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
-int
-ad102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_gr_new(&ad102_gr, device, type, inst, pgr);
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(nvenc, args);
}
+
+const struct nvkm_rm_api_engine
+r535_nvenc = {
+ .alloc = r535_nvenc_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c
new file mode 100644
index 000000000000..fbc4080ad8d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/nvjpg.h"
+
+static int
+r535_nvjpg_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvjpg)
+{
+ NV_NVJPG_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvjpg);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(nvjpg, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_nvjpg = {
+ .alloc = r535_nvjpg_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h
new file mode 100644
index 000000000000..cbc7e611fbda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ALLOC_H__
+#define __NVRM_ALLOC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct rpc_gsp_rm_alloc_v03_00
+{
+ NvHandle hClient;
+ NvHandle hParent;
+ NvHandle hObject;
+ NvU32 hClass;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvU32 flags;
+ NvU8 reserved[4];
+ NvU8 params[];
+} rpc_gsp_rm_alloc_v03_00;
+
+typedef struct NVOS00_PARAMETERS_v03_00
+{
+ NvHandle hRoot;
+ NvHandle hObjectParent;
+ NvHandle hObjectOld;
+ NvV32 status;
+} NVOS00_PARAMETERS_v03_00;
+
+typedef struct rpc_free_v03_00
+{
+ NVOS00_PARAMETERS_v03_00 params;
+} rpc_free_v03_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h
new file mode 100644
index 000000000000..60b0b08491ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_BAR_H__
+#define __NVRM_BAR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef enum
+{
+ NV_RPC_UPDATE_PDE_BAR_1,
+ NV_RPC_UPDATE_PDE_BAR_2,
+ NV_RPC_UPDATE_PDE_BAR_INVALID,
+} NV_RPC_UPDATE_PDE_BAR_TYPE;
+
+typedef struct UpdateBarPde_v15_00
+{
+ NV_RPC_UPDATE_PDE_BAR_TYPE barType;
+ NvU64 entryValue NV_ALIGN_BYTES(8);
+ NvU64 entryLevelShift NV_ALIGN_BYTES(8);
+} UpdateBarPde_v15_00;
+
+typedef struct rpc_update_bar_pde_v15_00
+{
+ UpdateBarPde_v15_00 info;
+} rpc_update_bar_pde_v15_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h
new file mode 100644
index 000000000000..90b0325203d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CE_H__
+#define __NVRM_CE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct NVC0B5_ALLOCATION_PARAMETERS {
+ NvU32 version;
+ NvU32 engineType;
+} NVC0B5_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h
new file mode 100644
index 000000000000..df0e63c0cb6b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CLIENT_H__
+#define __NVRM_CLIENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+typedef struct NV0000_ALLOC_PARAMETERS {
+ NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+ NvU32 processID;
+ char processName[NV_PROC_NAME_MAX_LENGTH];
+} NV0000_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h
new file mode 100644
index 000000000000..77f10acd82c9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CTRL_H__
+#define __NVRM_CTRL_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct rpc_gsp_rm_control_v03_00
+{
+ NvHandle hClient;
+ NvHandle hObject;
+ NvU32 cmd;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvU32 flags;
+ NvU8 params[];
+} rpc_gsp_rm_control_v03_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h
new file mode 100644
index 000000000000..3933b9ad61ce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DEVICE_H__
+#define __NVRM_DEVICE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV0080_ALLOC_PARAMETERS {
+ NvU32 deviceId;
+ NvHandle hClientShare;
+ NvHandle hTargetClient;
+ NvHandle hTargetDevice;
+ NvV32 flags;
+ NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8);
+ NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8);
+ NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8);
+ NvV32 vaMode;
+} NV0080_ALLOC_PARAMETERS;
+
+#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV2080_ALLOC_PARAMETERS {
+ NvU32 subDeviceId;
+} NV2080_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h
new file mode 100644
index 000000000000..7b7539639540
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h
@@ -0,0 +1,741 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DISP_H__
+#define __NVRM_DISP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 instMemSize, 8);
+ NvU32 instMemAddrSpace;
+ NvU32 instMemCpuCacheAttr;
+} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS;
+
+#define NV_MEMORY_WRITECOMBINED 2
+
+#define NV04_DISPLAY_COMMON (0x00000073)
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
+ NvU32 feHwSysCap;
+ NvU32 windowPresentMask;
+ NvBool bFbRemapperEnabled;
+ NvU32 numHeads;
+ NvBool bPrimaryVga;
+ NvU32 i2cPort;
+ NvU32 internalDispActiveMask;
+} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
+
+#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */
+
+#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS {
+ NvU32 status;
+ NvU16 backLightDataSize;
+ NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE];
+} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS {
+ NvU32 subDeviceInstance;
+} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 numHeads;
+} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 headMask;
+} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayMask;
+ NvU32 displayMaskDDC;
+} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
+
+#define NV0073_CTRL_MAX_CONNECTORS 4U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 DDCPartners;
+ NvU32 count;
+ struct {
+ NvU32 index;
+ NvU32 type;
+ NvU32 location;
+ } data[NV0073_CTRL_MAX_CONNECTORS];
+ NvU32 platform;
+} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 index;
+ NvU32 type;
+ NvU32 protocol;
+ NvU32 ditherType;
+ NvU32 ditherAlgo;
+ NvU32 location;
+ NvU32 rootPortId;
+ NvU32 dcbIndex;
+ NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8);
+ NvBool bIsLitByVbios;
+ NvBool bIsDispDynamic;
+} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
+ NvBool bDscSupported;
+ NvU32 encoderColorFormatMask;
+ NvU32 lineBufferSizeKB;
+ NvU32 rateBufferSizeKB;
+ NvU32 bitsPerPixelPrecision;
+ NvU32 maxNumHztSlices;
+ NvU32 lineBufferBitDepth;
+} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 sorIndex;
+ NvU32 maxLinkRate;
+ NvU32 dpVersionsSupported;
+ NvU32 UHBRSupported;
+ NvBool bIsMultistreamSupported;
+ NvBool bIsSCEnabled;
+ NvBool bHasIncreasedWatermarkLimits;
+ NvBool bIsPC2Disabled;
+ NvBool isSingleHeadMSTSupported;
+ NvBool bFECSupported;
+ NvBool bIsTrainPhyRepeater;
+ NvBool bOverrideLinkBw;
+ NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
+} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
+
+#define NV2080_NOTIFIERS_HOTPLUG (1)
+
+typedef struct {
+ NvU32 plugDisplayMask;
+ NvU32 unplugDisplayMask;
+} Nv2080HotplugNotification;
+
+#define NV2080_NOTIFIERS_DP_IRQ (7)
+
+typedef struct Nv2080DpIrqNotificationRec {
+ NvU32 displayId;
+} Nv2080DpIrqNotification;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 displayMask;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 flags2;
+} NV0073_CTRL_DFP_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
+#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
+#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
+#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 flags;
+ NvU32 displayId;
+} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
+
+typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG;
+
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO {
+ NvU32 displayMask;
+ NvU32 sorType;
+} NV0073_CTRL_DFP_ASSIGN_SOR_INFO;
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 sorExcludeMask;
+ NvU32 slaveDisplayId;
+ NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig;
+ NvBool bIs2Head1Or;
+ NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+ NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+ NvU8 reservedSorMask;
+ NvU32 flags;
+} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS;
+
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U)
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 brightness;
+ NvBool bUncalibrated;
+} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U
+
+typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 numELDSize;
+ NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER];
+ NvU32 maxFreqSupported;
+ NvU32 ctrl;
+ NvU32 deviceEntry;
+} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS;
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U)
+
+#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 bufferSize;
+ NvU32 flags;
+ NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES];
+} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS {
+ NvU8 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 enable;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 caps;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+
+#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 transmitControl;
+ NvU32 packetSize;
+ NvU32 targetHead;
+ NvBool bUsePsrHeadforSdp;
+ NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE];
+} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U)
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS {
+ NvU8 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 mute;
+} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U
+
+#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvBool bAddrOnly;
+ NvU32 cmd;
+ NvU32 addr;
+ NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE];
+ NvU32 size;
+ NvU32 replyType;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U)
+
+#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
+ // In
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+
+ // Out
+ NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+ NvU8 linkBwCount;
+} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 cmd;
+ NvU32 data;
+ NvU32 err;
+ NvU32 retryTimeMs;
+ NvU32 eightLaneDpcdBaseAddr;
+} NV0073_CTRL_DP_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_UNUSED 3:3
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U)
+
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET 22:19
+#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U)
+
+#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_MAX_LANES 8U
+
+typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 numLanes;
+ NvU32 data[NV0073_CTRL_MAX_LANES];
+} NV0073_CTRL_DP_LANE_DATA_PARAMS;
+
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U)
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 preferredDisplayId;
+
+ NvBool force;
+ NvBool useBFM;
+
+ NvU32 displayIdAssigned;
+ NvU32 allDisplayMask;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 sorIndex;
+ NvU32 dpLink;
+
+ NvBool bEnableOverride;
+ NvBool bMST;
+ NvU32 singleHeadMultistreamMode;
+ NvU32 hBlankSym;
+ NvU32 vBlankSym;
+ NvU32 colorFormat;
+ NvBool bEnableTwoHeadOneOr;
+
+ struct {
+ NvU32 slotStart;
+ NvU32 slotEnd;
+ NvU32 PBN;
+ NvU32 Timeslice;
+ NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
+ NvU32 singleHeadMSTPipeline;
+ NvBool bEnableAudioOverRightPanel;
+ } MST;
+
+ struct {
+ NvBool bEnhancedFraming;
+ NvU32 tuSize;
+ NvU32 waterMark;
+ NvU32 actualPclkHz; // deprecated -Use MvidWarParams
+ NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams
+ NvBool bEnableAudioOverRightPanel;
+ struct {
+ NvU32 activeCnt;
+ NvU32 activeFrac;
+ NvU32 activePolarity;
+ NvBool mvidWarEnabled;
+ struct {
+ NvU32 actualPclkHz;
+ NvU32 linkClkFreqHz;
+ } MvidWarParams;
+ } Legacy;
+ } SST;
+} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvBool enable;
+} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 mute;
+} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
+ NvU32 addressSpace;
+ NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NvU32 cacheSnoop;
+ NvU32 hclass;
+ NvU32 channelInstance;
+ NvBool valid;
+} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
+
+#define ADDR_SYSMEM (1) // System memory (PCI)
+
+#define ADDR_FBMEM 2 // Frame buffer memory space
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // All PIO channels have two instances (one per head).
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel
+} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // Note that core channel has only one instance
+ // while all others have two (one per head).
+ NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
+ NvU32 offset; // Initial offset for put/get, usually zero.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
+
+ NvU32 flags;
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
+
+} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h
new file mode 100644
index 000000000000..b26dfc8f8087
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ENGINE_H__
+#define __NVRM_ENGINE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define MC_ENGINE_IDX_NULL 0 // This must be 0
+#define MC_ENGINE_IDX_TMR 1
+#define MC_ENGINE_IDX_DISP 2
+#define MC_ENGINE_IDX_FB 3
+#define MC_ENGINE_IDX_FIFO 4
+#define MC_ENGINE_IDX_VIDEO 5
+#define MC_ENGINE_IDX_MD 6
+#define MC_ENGINE_IDX_BUS 7
+#define MC_ENGINE_IDX_PMGR 8
+#define MC_ENGINE_IDX_VP2 9
+#define MC_ENGINE_IDX_CIPHER 10
+#define MC_ENGINE_IDX_BIF 11
+#define MC_ENGINE_IDX_PPP 12
+#define MC_ENGINE_IDX_PRIVRING 13
+#define MC_ENGINE_IDX_PMU 14
+#define MC_ENGINE_IDX_CE0 15
+#define MC_ENGINE_IDX_CE1 16
+#define MC_ENGINE_IDX_CE2 17
+#define MC_ENGINE_IDX_CE3 18
+#define MC_ENGINE_IDX_CE4 19
+#define MC_ENGINE_IDX_CE5 20
+#define MC_ENGINE_IDX_CE6 21
+#define MC_ENGINE_IDX_CE7 22
+#define MC_ENGINE_IDX_CE8 23
+#define MC_ENGINE_IDX_CE9 24
+#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE9
+#define MC_ENGINE_IDX_VIC 35
+#define MC_ENGINE_IDX_ISOHUB 36
+#define MC_ENGINE_IDX_VGPU 37
+#define MC_ENGINE_IDX_MSENC 38
+#define MC_ENGINE_IDX_MSENC1 39
+#define MC_ENGINE_IDX_MSENC2 40
+#define MC_ENGINE_IDX_C2C 41
+#define MC_ENGINE_IDX_LTC 42
+#define MC_ENGINE_IDX_FBHUB 43
+#define MC_ENGINE_IDX_HDACODEC 44
+#define MC_ENGINE_IDX_GMMU 45
+#define MC_ENGINE_IDX_SEC2 46
+#define MC_ENGINE_IDX_FSP 47
+#define MC_ENGINE_IDX_NVLINK 48
+#define MC_ENGINE_IDX_GSP 49
+#define MC_ENGINE_IDX_NVJPG 50
+#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
+#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
+#define MC_ENGINE_IDX_NVJPEG1 51
+#define MC_ENGINE_IDX_NVJPEG2 52
+#define MC_ENGINE_IDX_NVJPEG3 53
+#define MC_ENGINE_IDX_NVJPEG4 54
+#define MC_ENGINE_IDX_NVJPEG5 55
+#define MC_ENGINE_IDX_NVJPEG6 56
+#define MC_ENGINE_IDX_NVJPEG7 57
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT 58
+#define MC_ENGINE_IDX_ACCESS_CNTR 59
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 60
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 61
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 62
+#define MC_ENGINE_IDX_INFO_FAULT 63
+#define MC_ENGINE_IDX_BSP 64
+#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
+#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
+#define MC_ENGINE_IDX_NVDEC1 65
+#define MC_ENGINE_IDX_NVDEC2 66
+#define MC_ENGINE_IDX_NVDEC3 67
+#define MC_ENGINE_IDX_NVDEC4 68
+#define MC_ENGINE_IDX_NVDEC5 69
+#define MC_ENGINE_IDX_NVDEC6 70
+#define MC_ENGINE_IDX_NVDEC7 71
+#define MC_ENGINE_IDX_CPU_DOORBELL 72
+#define MC_ENGINE_IDX_PRIV_DOORBELL 73
+#define MC_ENGINE_IDX_MMU_ECC_ERROR 74
+#define MC_ENGINE_IDX_BLG 75
+#define MC_ENGINE_IDX_PERFMON 76
+#define MC_ENGINE_IDX_BUF_RESET 77
+#define MC_ENGINE_IDX_XBAR 78
+#define MC_ENGINE_IDX_ZPW 79
+#define MC_ENGINE_IDX_OFA0 80
+#define MC_ENGINE_IDX_TEGRA 81
+#define MC_ENGINE_IDX_GR 82
+#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
+#define MC_ENGINE_IDX_GR1 83
+#define MC_ENGINE_IDX_GR2 84
+#define MC_ENGINE_IDX_GR3 85
+#define MC_ENGINE_IDX_GR4 86
+#define MC_ENGINE_IDX_GR5 87
+#define MC_ENGINE_IDX_GR6 88
+#define MC_ENGINE_IDX_GR7 89
+#define MC_ENGINE_IDX_ESCHED 90
+#define MC_ENGINE_IDX_ESCHED__SIZE 64
+#define MC_ENGINE_IDX_GR_FECS_LOG 154
+#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG
+#define MC_ENGINE_IDX_GR1_FECS_LOG 155
+#define MC_ENGINE_IDX_GR2_FECS_LOG 156
+#define MC_ENGINE_IDX_GR3_FECS_LOG 157
+#define MC_ENGINE_IDX_GR4_FECS_LOG 158
+#define MC_ENGINE_IDX_GR5_FECS_LOG 159
+#define MC_ENGINE_IDX_GR6_FECS_LOG 160
+#define MC_ENGINE_IDX_GR7_FECS_LOG 161
+#define MC_ENGINE_IDX_TMR_SWRL 162
+#define MC_ENGINE_IDX_DISP_GSP 163
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 164
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 165
+#define MC_ENGINE_IDX_PXUC 166
+#define MC_ENGINE_IDX_MAX 167 // This must be kept as the max bit if
+#define MC_ENGINE_IDX_INVALID 0xFFFFFFFF
+#define MC_ENGINE_IDX_GRn(x) (MC_ENGINE_IDX_GR0 + (x))
+#define MC_ENGINE_IDX_GRn_FECS_LOG(x) (MC_ENGINE_IDX_GR0_FECS_LOG + (x))
+#define MC_ENGINE_IDX_CE(x) (MC_ENGINE_IDX_CE0 + (x))
+#define MC_ENGINE_IDX_MSENCn(x) (MC_ENGINE_IDX_MSENC + (x))
+#define MC_ENGINE_IDX_NVDECn(x) (MC_ENGINE_IDX_NVDEC + (x))
+#define MC_ENGINE_IDX_NVJPEGn(x) (MC_ENGINE_IDX_NVJPEG + (x))
+#define MC_ENGINE_IDX_ESCHEDn(x) (MC_ENGINE_IDX_ESCHED + (x))
+
+typedef enum
+{
+ RM_ENGINE_TYPE_NULL = (0x00000000),
+ RM_ENGINE_TYPE_GR0 = (0x00000001),
+ RM_ENGINE_TYPE_GR1 = (0x00000002),
+ RM_ENGINE_TYPE_GR2 = (0x00000003),
+ RM_ENGINE_TYPE_GR3 = (0x00000004),
+ RM_ENGINE_TYPE_GR4 = (0x00000005),
+ RM_ENGINE_TYPE_GR5 = (0x00000006),
+ RM_ENGINE_TYPE_GR6 = (0x00000007),
+ RM_ENGINE_TYPE_GR7 = (0x00000008),
+ RM_ENGINE_TYPE_COPY0 = (0x00000009),
+ RM_ENGINE_TYPE_COPY1 = (0x0000000a),
+ RM_ENGINE_TYPE_COPY2 = (0x0000000b),
+ RM_ENGINE_TYPE_COPY3 = (0x0000000c),
+ RM_ENGINE_TYPE_COPY4 = (0x0000000d),
+ RM_ENGINE_TYPE_COPY5 = (0x0000000e),
+ RM_ENGINE_TYPE_COPY6 = (0x0000000f),
+ RM_ENGINE_TYPE_COPY7 = (0x00000010),
+ RM_ENGINE_TYPE_COPY8 = (0x00000011),
+ RM_ENGINE_TYPE_COPY9 = (0x00000012),
+ RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
+ RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
+ RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
+ RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
+ RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
+ RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
+ RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
+ RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
+ RM_ENGINE_TYPE_NVENC0 = (0x00000025),
+ RM_ENGINE_TYPE_NVENC1 = (0x00000026),
+ RM_ENGINE_TYPE_NVENC2 = (0x00000027),
+ RM_ENGINE_TYPE_VP = (0x00000028),
+ RM_ENGINE_TYPE_ME = (0x00000029),
+ RM_ENGINE_TYPE_PPP = (0x0000002a),
+ RM_ENGINE_TYPE_MPEG = (0x0000002b),
+ RM_ENGINE_TYPE_SW = (0x0000002c),
+ RM_ENGINE_TYPE_TSEC = (0x0000002d),
+ RM_ENGINE_TYPE_VIC = (0x0000002e),
+ RM_ENGINE_TYPE_MP = (0x0000002f),
+ RM_ENGINE_TYPE_SEC2 = (0x00000030),
+ RM_ENGINE_TYPE_HOST = (0x00000031),
+ RM_ENGINE_TYPE_DPU = (0x00000032),
+ RM_ENGINE_TYPE_PMU = (0x00000033),
+ RM_ENGINE_TYPE_FBFLCN = (0x00000034),
+ RM_ENGINE_TYPE_NVJPEG0 = (0x00000035),
+ RM_ENGINE_TYPE_NVJPEG1 = (0x00000036),
+ RM_ENGINE_TYPE_NVJPEG2 = (0x00000037),
+ RM_ENGINE_TYPE_NVJPEG3 = (0x00000038),
+ RM_ENGINE_TYPE_NVJPEG4 = (0x00000039),
+ RM_ENGINE_TYPE_NVJPEG5 = (0x0000003a),
+ RM_ENGINE_TYPE_NVJPEG6 = (0x0000003b),
+ RM_ENGINE_TYPE_NVJPEG7 = (0x0000003c),
+ RM_ENGINE_TYPE_OFA = (0x0000003d),
+ RM_ENGINE_TYPE_LAST = (0x0000003e),
+} RM_ENGINE_TYPE;
+
+#define NV2080_ENGINE_TYPE_NULL (0x00000000)
+#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
+#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
+#define NV2080_ENGINE_TYPE_GR1 (0x00000002)
+#define NV2080_ENGINE_TYPE_GR2 (0x00000003)
+#define NV2080_ENGINE_TYPE_GR3 (0x00000004)
+#define NV2080_ENGINE_TYPE_GR4 (0x00000005)
+#define NV2080_ENGINE_TYPE_GR5 (0x00000006)
+#define NV2080_ENGINE_TYPE_GR6 (0x00000007)
+#define NV2080_ENGINE_TYPE_GR7 (0x00000008)
+#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
+#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a)
+#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b)
+#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c)
+#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d)
+#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e)
+#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f)
+#define NV2080_ENGINE_TYPE_COPY7 (0x00000010)
+#define NV2080_ENGINE_TYPE_COPY8 (0x00000011)
+#define NV2080_ENGINE_TYPE_COPY9 (0x00000012)
+#define NV2080_ENGINE_TYPE_BSP (0x00000013)
+#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
+#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014)
+#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015)
+#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016)
+#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017)
+#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018)
+#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019)
+#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a)
+#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
+#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
+#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c)
+#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d)
+#define NV2080_ENGINE_TYPE_VP (0x0000001e)
+#define NV2080_ENGINE_TYPE_ME (0x0000001f)
+#define NV2080_ENGINE_TYPE_PPP (0x00000020)
+#define NV2080_ENGINE_TYPE_MPEG (0x00000021)
+#define NV2080_ENGINE_TYPE_SW (0x00000022)
+#define NV2080_ENGINE_TYPE_CIPHER (0x00000023)
+#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER
+#define NV2080_ENGINE_TYPE_VIC (0x00000024)
+#define NV2080_ENGINE_TYPE_MP (0x00000025)
+#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
+#define NV2080_ENGINE_TYPE_HOST (0x00000027)
+#define NV2080_ENGINE_TYPE_DPU (0x00000028)
+#define NV2080_ENGINE_TYPE_PMU (0x00000029)
+#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a)
+#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
+#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
+#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c)
+#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d)
+#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e)
+#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f)
+#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030)
+#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031)
+#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032)
+#define NV2080_ENGINE_TYPE_OFA (0x00000033)
+#define NV2080_ENGINE_TYPE_LAST (0x0000003e)
+#define NV2080_ENGINE_TYPE_ALLENGINES (0xffffffff)
+#define NV2080_ENGINE_TYPE_COPY_SIZE 10
+#define NV2080_ENGINE_TYPE_NVENC_SIZE 3
+#define NV2080_ENGINE_TYPE_NVJPEG_SIZE 8
+#define NV2080_ENGINE_TYPE_NVDEC_SIZE 8
+#define NV2080_ENGINE_TYPE_GR_SIZE 8
+#define NV2080_ENGINE_TYPE_COPY(i) (NV2080_ENGINE_TYPE_COPY0+(i))
+#define NV2080_ENGINE_TYPE_IS_COPY(i) (((i) >= NV2080_ENGINE_TYPE_COPY0) && ((i) <= NV2080_ENGINE_TYPE_COPY9))
+#define NV2080_ENGINE_TYPE_COPY_IDX(i) ((i) - NV2080_ENGINE_TYPE_COPY0)
+#define NV2080_ENGINE_TYPE_NVENC(i) (NV2080_ENGINE_TYPE_NVENC0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVENC(i) (((i) >= NV2080_ENGINE_TYPE_NVENC0) && ((i) < NV2080_ENGINE_TYPE_NVENC(NV2080_ENGINE_TYPE_NVENC_SIZE)))
+#define NV2080_ENGINE_TYPE_NVENC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVENC0)
+#define NV2080_ENGINE_TYPE_NVDEC(i) (NV2080_ENGINE_TYPE_NVDEC0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVDEC(i) (((i) >= NV2080_ENGINE_TYPE_NVDEC0) && ((i) < NV2080_ENGINE_TYPE_NVDEC(NV2080_ENGINE_TYPE_NVDEC_SIZE)))
+#define NV2080_ENGINE_TYPE_NVDEC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVDEC0)
+#define NV2080_ENGINE_TYPE_NVJPEG(i) (NV2080_ENGINE_TYPE_NVJPEG0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVJPEG(i) (((i) >= NV2080_ENGINE_TYPE_NVJPEG0) && ((i) < NV2080_ENGINE_TYPE_NVJPEG(NV2080_ENGINE_TYPE_NVJPEG_SIZE)))
+#define NV2080_ENGINE_TYPE_NVJPEG_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVJPEG0)
+#define NV2080_ENGINE_TYPE_GR(i) (NV2080_ENGINE_TYPE_GR0 + (i))
+#define NV2080_ENGINE_TYPE_IS_GR(i) (((i) >= NV2080_ENGINE_TYPE_GR0) && ((i) < NV2080_ENGINE_TYPE_GR(NV2080_ENGINE_TYPE_GR_SIZE)))
+#define NV2080_ENGINE_TYPE_GR_IDX(i) ((i) - NV2080_ENGINE_TYPE_GR0)
+#define NV2080_ENGINE_TYPE_IS_VALID(i) (((i) > (NV2080_ENGINE_TYPE_NULL)) && ((i) < (NV2080_ENGINE_TYPE_LAST)))
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h
new file mode 100644
index 000000000000..057f7220c225
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_EVENT_H__
+#define __NVRM_EVENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e)
+
+typedef struct NV0005_ALLOC_PARAMETERS {
+ NvHandle hParentClient;
+ NvHandle hSrcResource;
+
+ NvV32 hClass;
+ NvV32 notifyIndex;
+ NV_DECLARE_ALIGNED(NvP64 data, 8);
+} NV0005_ALLOC_PARAMETERS;
+
+#define NV01_EVENT_CLIENT_RM (0x04000000)
+
+#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
+ NvU32 event;
+ NvU32 action;
+ NvBool bNotifyState;
+ NvU32 info32;
+ NvU16 info16;
+} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
+
+#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002)
+
+typedef struct rpc_post_event_v17_00
+{
+ NvHandle hClient;
+ NvHandle hEvent;
+ NvU32 notifyIndex;
+ NvU32 data;
+ NvU16 info16;
+ NvU32 status;
+ NvU32 eventDataSize;
+ NvBool bNotifyList;
+ NvU8 eventData[];
+} rpc_post_event_v17_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h
new file mode 100644
index 000000000000..28786ef013a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FBSR_H__
+#define __NVRM_FBSR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_MEMORY_LIST_FBMEM (0x00000082)
+
+#define NV01_MEMORY_LIST_SYSTEM (0x00000081)
+
+#define NVOS02_FLAGS_PHYSICALITY 7:4
+#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000)
+#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001)
+#define NVOS02_FLAGS_LOCATION 11:8
+#define NVOS02_FLAGS_LOCATION_PCI (0x00000000)
+#define NVOS02_FLAGS_LOCATION_AGP (0x00000001)
+#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002)
+#define NVOS02_FLAGS_COHERENCY 15:12
+#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000)
+#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001)
+#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002)
+#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003)
+#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004)
+#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005)
+#define NVOS02_FLAGS_ALLOC 17:16
+#define NVOS02_FLAGS_ALLOC_NONE (0x00000001)
+#define NVOS02_FLAGS_GPU_CACHEABLE 18:18
+#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000)
+#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001)
+#define NVOS02_FLAGS_KERNEL_MAPPING 19:19
+#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000)
+#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001)
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001)
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001)
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001)
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001)
+#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25
+#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002)
+#define NVOS02_FLAGS_MAPPING 31:30
+#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001)
+#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002)
+
+struct pte_desc
+{
+ NvU32 idr:2;
+ NvU32 reserved1:14;
+ NvU32 length:16;
+ union {
+ NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
+ NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
+ } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
+};
+
+typedef struct rpc_alloc_memory_v13_01
+{
+ NvHandle hClient;
+ NvHandle hDevice;
+ NvHandle hMemory;
+ NvU32 hClass;
+ NvU32 flags;
+ NvU32 pteAdjust;
+ NvU32 format;
+ NvU64 length NV_ALIGN_BYTES(8);
+ NvU32 pageCount;
+ struct pte_desc pteDesc;
+} rpc_alloc_memory_v13_01;
+
+#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest.
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
+ NvU32 fbsrType;
+ NvU32 numRegions;
+ NvHandle hClient;
+ NvHandle hSysMem;
+ NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8);
+ NvBool bEnteringGcoffState;
+} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS {
+ NvU32 fbsrType;
+ NvHandle hClient;
+ NvHandle hVidMem;
+ NV_DECLARE_ALIGNED(NvU64 vidOffset, 8);
+ NV_DECLARE_ALIGNED(NvU64 sysOffset, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h
new file mode 100644
index 000000000000..325fdd8b6090
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FIFO_H__
+#define __NVRM_FIFO_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16
+
+typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY {
+ NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES];
+ NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+ NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+ NvU32 numPbdmas;
+ char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN];
+} NV2080_CTRL_FIFO_DEVICE_ENTRY;
+
+#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS {
+ NvU32 baseIndex;
+ NvU32 numEntries;
+ NvBool bMore;
+ // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+ NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS;
+
+typedef enum
+{
+ /* *************************************************************************
+ * Bug 3820969
+ * THINK BEFORE CHANGING ENUM ORDER HERE.
+ * VGPU-guest uses this same ordering. Because this enum is not versioned,
+ * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+ * ************************************************************************/
+
+ // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc.,
+ ENGINE_INFO_TYPE_ENG_DESC = 0,
+
+ // HW engine ID
+ ENGINE_INFO_TYPE_FIFO_TAG,
+
+ // RM_ENGINE_TYPE_*
+ ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
+
+ //
+ // runlist id (meaning varies by GPU)
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST,
+
+ // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_*
+ ENGINE_INFO_TYPE_MMU_FAULT_ID,
+
+ // ROBUST_CHANNEL_*
+ ENGINE_INFO_TYPE_RC_MASK,
+
+ // Reset Bit Position. On Ampere, only valid if not _INVALID
+ ENGINE_INFO_TYPE_RESET,
+
+ // Interrupt Bit Position
+ ENGINE_INFO_TYPE_INTR,
+
+ // log2(MC_ENGINE_*)
+ ENGINE_INFO_TYPE_MC,
+
+ // The DEV_TYPE_ENUM for this engine
+ ENGINE_INFO_TYPE_DEV_TYPE_ENUM,
+
+ // The particular instance of this engine type
+ ENGINE_INFO_TYPE_INSTANCE_ID,
+
+ //
+ // The base address for this engine's NV_RUNLIST. Valid only on Ampere+
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST_PRI_BASE,
+
+ //
+ // If this entry is a host-driven engine.
+ // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry.
+ //
+ ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE,
+
+ //
+ // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID,
+
+ //
+ // The base address for this engine's NV_CHRAM registers. Valid only on
+ // Ampere+
+ //
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_CHRAM_PRI_BASE,
+
+ // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM
+ ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+ // Used for iterating the engine info table by the index passed.
+ ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+
+ // Size of FIFO_ENGINE_LIST.engineData
+ ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID,
+
+ // Input-only parameter for kfifoEngineInfoXlate.
+ ENGINE_INFO_TYPE_PBDMA_ID
+
+ /* *************************************************************************
+ * Bug 3820969
+ * THINK BEFORE CHANGING ENUM ORDER HERE.
+ * VGPU-guest uses this same ordering. Because this enum is not versioned,
+ * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+ * ************************************************************************/
+} ENGINE_INFO_TYPE;
+
+#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS {
+ NvU32 size;
+} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40
+
+typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO {
+ NvU32 engDesc;
+ NvU32 ctxAttr;
+ NvU32 ctxBufferSize;
+ NvU32 addrSpaceList;
+ NvU32 registerBase;
+} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO;
+
+#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
+ NvU32 numConstructedFalcons;
+ NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
+} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
+
+#define NV_MAX_SUBDEVICES 8
+
+typedef struct NV_MEMORY_DESC_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 addressSpace;
+ NvU32 cacheAttrib;
+} NV_MEMORY_DESC_PARAMS;
+
+#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
+
+#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
+
+typedef struct NV_CHANNEL_ALLOC_PARAMS {
+
+ NvHandle hObjectError; // error context DMA
+ NvHandle hObjectBuffer; // no longer used
+ NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
+ NvU32 gpFifoEntries; // number of GP FIFO entries
+
+ NvU32 flags;
+
+
+ NvHandle hContextShare; // context share handle
+ NvHandle hVASpace; // VASpace for the channel
+
+ // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
+ NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
+
+ // offset to beginning of UserD within hUserdMemory[x]
+ NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
+
+ // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
+ NvU32 engineType;
+ // Channel identifier that is unique for the duration of a RM session
+ NvU32 cid;
+ // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
+ NvU32 subDeviceId;
+ NvHandle hObjectEccError; // ECC error context DMA
+
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
+
+ NvHandle hPhysChannelGroup; // reserved
+ NvU32 internalFlags; // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
+ NvU32 ProcessID; // reserved
+ NvU32 SubProcessID; // reserved
+
+ // IV used for CPU-side encryption / GPU-side decryption.
+ NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // IV used for CPU-side decryption / GPU-side encryption.
+ NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // Nonce used CPU-side signing / GPU-side signature verification.
+ NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
+} NV_CHANNEL_ALLOC_PARAMS;
+
+typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
+
+#define NVOS04_FLAGS_CHANNEL_TYPE 1:0
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000
+#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE
+#define NVOS04_FLAGS_VPR 2:2
+#define NVOS04_FLAGS_VPR_FALSE 0x00000000
+#define NVOS04_FLAGS_VPR_TRUE 0x00000001
+#define NVOS04_FLAGS_CC_SECURE 2:2
+#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000
+#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002
+#define NVOS04_FLAGS_MAP_CHANNEL 30:30
+#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001
+
+typedef enum {
+ /*!
+ * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
+ * kernel CPU-RM clients.
+ */
+ ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
+ /*! @brief Error notifier is explicitly not set.
+ *
+ * The corresponding hErrorContext or hEccErrorContext must be
+ * NV01_NULL_OBJECT.
+ */
+ ERROR_NOTIFIER_TYPE_NONE,
+ /*! @brief Error notifier is a ContextDma */
+ ERROR_NOTIFIER_TYPE_CTXDMA,
+ /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
+ ERROR_NOTIFIER_TYPE_MEMORY
+} ErrorNotifierType;
+
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+
+#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */
+typedef struct NVA06F_CTRL_BIND_PARAMS {
+ NvU32 engineType;
+} NVA06F_CTRL_BIND_PARAMS;
+
+#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */
+typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS {
+ NvBool bEnable;
+ NvBool bSkipSubmit;
+} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS;
+
+#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U
+
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY {
+ NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 physAttr;
+ NvU16 bufferId;
+ NvU8 bInitialize;
+ NvU8 bNonmapped;
+} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY;
+
+#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS {
+ NvU32 engineType;
+ NvHandle hClient;
+ NvU32 ChID;
+ NvHandle hChanClient;
+ NvHandle hObject;
+ NvHandle hVirtMemory;
+ NV_DECLARE_ALIGNED(NvU64 virtAddress, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 entryCount;
+ // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES];
+ NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8);
+} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS;
+
+typedef struct rpc_rc_triggered_v17_02
+{
+ NvU32 nv2080EngineType;
+ NvU32 chid;
+ NvU32 exceptType;
+ NvU32 scope;
+ NvU16 partitionAttributionId;
+} rpc_rc_triggered_v17_02;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h
index 6acb3f73242d..82c5ec727bb4 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h
@@ -1,30 +1,31 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GR_H__
+#define __NVRM_GR_H__
+#include <nvrm/nvtypes.h>
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
+
+#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19
+
+typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
+ NvU32 size;
+ NvU32 alignment;
+} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
+ NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
+} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
+ NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
+} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000)
@@ -54,4 +55,19 @@
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018)
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x00000019)
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U
+
+#include "fifo.h"
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h
new file mode 100644
index 000000000000..b6683a5bf870
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GSP_H__
+#define __NVRM_GSP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
+
+typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NV_DECLARE_ALIGNED(NvU64 reserved, 8);
+ NvU32 performance;
+ NvBool supportCompressed;
+ NvBool supportISO;
+ NvBool bProtected;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+ NvU32 numFBRegions;
+ NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+
+#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
+
+#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
+
+typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+ NvU32 index;
+ NvU32 flags;
+ NvU32 length;
+ NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
+} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
+ NvU32 gpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 tpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 zcullMask;
+} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+ NvU32 BoardID;
+ char chipSKU[4];
+ char chipSKUMod[2];
+ char project[5];
+ char projectSKU[5];
+ char CDP[6];
+ char projectSKUMod[2];
+ NvU32 businessCycle;
+} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
+
+typedef enum
+{
+ COMPUTE_BRANDING_TYPE_NONE,
+ COMPUTE_BRANDING_TYPE_TESLA,
+} COMPUTE_BRANDING_TYPE;
+
+#define MAX_GPC_COUNT 32
+
+typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+ NvU32 totalVFs;
+ NvU32 firstVfOffset;
+ NvU32 vfFeatureMask;
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+ NvBool bSriovEnabled;
+ NvBool bSriovHeavyEnabled;
+ NvBool bEmulateVFBar0TlbInvalidationRegister;
+ NvBool bClientRmAllocatedCtxBuffer;
+} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
+
+#include "engine.h"
+
+#define NVGPU_ENGINE_CAPS_MASK_BITS 32
+
+#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
+
+typedef struct GspSMInfo_t
+{
+ NvU32 version;
+ NvU32 regBankCount;
+ NvU32 regBankRegCount;
+ NvU32 maxWarpsPerSM;
+ NvU32 maxThreadsPerWarp;
+ NvU32 geomGsObufEntries;
+ NvU32 geomXbufEntries;
+ NvU32 maxSPPerSM;
+ NvU32 rtCoreCount;
+} GspSMInfo;
+
+typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS {
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8,
+} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS;
+
+#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
+
+typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
+{
+ NvU32 numHeads;
+ NvU32 maxNumHeads;
+} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
+
+typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
+{
+ NvU32 headIndex;
+ NvU32 maxHResolution;
+ NvU32 maxVResolution;
+} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
+
+typedef struct GspStaticConfigInfo_t
+{
+ NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
+ NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
+ NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo;
+ NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT];
+ NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT];
+ NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
+ COMPUTE_BRANDING_TYPE computeBranding;
+
+ NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
+ NvU32 sriovMaxGfid;
+
+ NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
+
+ GspSMInfo SM_info;
+
+ NvBool poisonFuseEnabled;
+
+ NvU64 fb_length;
+ NvU32 fbio_mask;
+ NvU32 fb_bus_width;
+ NvU32 fb_ram_type;
+ NvU32 fbp_mask;
+ NvU32 l2_cache_size;
+
+ NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+ NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+
+ NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvBool bGpuInternalSku;
+ NvBool bIsQuadroGeneric;
+ NvBool bIsQuadroAd;
+ NvBool bIsNvidiaNvs;
+ NvBool bIsVgx;
+ NvBool bGeforceSmb;
+ NvBool bIsTitan;
+ NvBool bIsTesla;
+ NvBool bIsMobile;
+ NvBool bIsGc6Rtd3Allowed;
+ NvBool bIsGcOffRtd3Allowed;
+ NvBool bIsGcoffLegacyAllowed;
+
+ NvU64 bar1PdeBase;
+ NvU64 bar2PdeBase;
+
+ NvBool bVbiosValid;
+ NvU32 vbiosSubVendor;
+ NvU32 vbiosSubDevice;
+
+ NvBool bPageRetirementSupported;
+
+ NvBool bSplitVasBetweenServerClientRm;
+
+ NvBool bClRootportNeedsNosnoopWAR;
+
+ VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
+ VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
+ NvU64 displaylessMaxPixels;
+
+ // Client handle for internal RMAPI control.
+ NvHandle hInternalClient;
+
+ // Device handle for internal RMAPI control.
+ NvHandle hInternalDevice;
+
+ // Subdevice handle for internal RMAPI control.
+ NvHandle hInternalSubdevice;
+
+ NvBool bSelfHostedMode;
+ NvBool bAtsSupported;
+
+ NvBool bIsGpuUefi;
+} GspStaticConfigInfo;
+
+typedef struct rpc_unloading_guest_driver_v1F_07
+{
+ NvBool bInPMTransition;
+ NvBool bGc6Entering;
+ NvU32 newLevel;
+} rpc_unloading_guest_driver_v1F_07;
+
+typedef struct PACKED_REGISTRY_ENTRY
+{
+ NvU32 nameOffset;
+ NvU8 type;
+ NvU32 data;
+ NvU32 length;
+} PACKED_REGISTRY_ENTRY;
+
+typedef struct PACKED_REGISTRY_TABLE
+{
+ NvU32 size;
+ NvU32 numEntries;
+ PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
+} PACKED_REGISTRY_TABLE;
+
+typedef struct
+{
+ NvU16 deviceID; // deviceID
+ NvU16 vendorID; // vendorID
+ NvU16 subdeviceID; // subsystem deviceID
+ NvU16 subvendorID; // subsystem vendorID
+ NvU8 revisionID; // revision ID
+} BUSINFO;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct DOD_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 acpiIdListLen;
+ NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} DOD_METHOD_DATA;
+
+typedef struct JT_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 jtCaps;
+ NvU16 jtRevId;
+ NvBool bSBIOSCaps;
+} JT_METHOD_DATA;
+
+typedef struct MUX_METHOD_DATA_ELEMENT
+{
+ NvU32 acpiId;
+ NvU32 mode;
+ NV_STATUS status;
+} MUX_METHOD_DATA_ELEMENT;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct MUX_METHOD_DATA
+{
+ NvU32 tableLen;
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} MUX_METHOD_DATA;
+
+typedef struct CAPS_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 optimusCaps;
+} CAPS_METHOD_DATA;
+
+typedef struct ACPI_METHOD_DATA
+{
+ NvBool bValid;
+ DOD_METHOD_DATA dodMethodData;
+ JT_METHOD_DATA jtMethodData;
+ MUX_METHOD_DATA muxMethodData;
+ CAPS_METHOD_DATA capsMethodData;
+} ACPI_METHOD_DATA;
+
+typedef struct GSP_VF_INFO
+{
+ NvU32 totalVFs;
+ NvU32 firstVFOffset;
+ NvU64 FirstVFBar0Address;
+ NvU64 FirstVFBar1Address;
+ NvU64 FirstVFBar2Address;
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+} GSP_VF_INFO;
+
+typedef struct GspSystemInfo
+{
+ NvU64 gpuPhysAddr;
+ NvU64 gpuPhysFbAddr;
+ NvU64 gpuPhysInstAddr;
+ NvU64 nvDomainBusDeviceFunc;
+ NvU64 simAccessBufPhysAddr;
+ NvU64 pcieAtomicsOpMask;
+ NvU64 consoleMemSize;
+ NvU64 maxUserVa;
+ NvU32 pciConfigMirrorBase;
+ NvU32 pciConfigMirrorSize;
+ NvU8 oorArch;
+ NvU64 clPdbProperties;
+ NvU32 Chipset;
+ NvBool bGpuBehindBridge;
+ NvBool bMnocAvailable;
+ NvBool bUpstreamL0sUnsupported;
+ NvBool bUpstreamL1Unsupported;
+ NvBool bUpstreamL1PorSupported;
+ NvBool bUpstreamL1PorMobileOnly;
+ NvU8 upstreamAddressValid;
+ BUSINFO FHBBusInfo;
+ BUSINFO chipsetIDInfo;
+ ACPI_METHOD_DATA acpiMethodData;
+ NvU32 hypervisorType;
+ NvBool bIsPassthru;
+ NvU64 sysTimerOffsetNs;
+ GSP_VF_INFO gspVFInfo;
+} GspSystemInfo;
+
+typedef struct rpc_os_error_log_v17_00
+{
+ NvU32 exceptType;
+ NvU32 runlistId;
+ NvU32 chid;
+ char errString[0x100];
+} rpc_os_error_log_v17_00;
+
+typedef struct rpc_run_cpu_sequencer_v17_00
+{
+ NvU32 bufferSizeDWord;
+ NvU32 cmdIndex;
+ NvU32 regSaveArea[8];
+ NvU32 commandBuffer[];
+} rpc_run_cpu_sequencer_v17_00;
+
+typedef enum GSP_SEQ_BUF_OPCODE
+{
+ GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
+ GSP_SEQ_BUF_OPCODE_REG_MODIFY,
+ GSP_SEQ_BUF_OPCODE_REG_POLL,
+ GSP_SEQ_BUF_OPCODE_DELAY_US,
+ GSP_SEQ_BUF_OPCODE_REG_STORE,
+ GSP_SEQ_BUF_OPCODE_CORE_RESET,
+ GSP_SEQ_BUF_OPCODE_CORE_START,
+ GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
+ GSP_SEQ_BUF_OPCODE_CORE_RESUME,
+} GSP_SEQ_BUF_OPCODE;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 mask;
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 mask;
+ NvU32 val;
+ NvU32 timeout;
+ NvU32 error;
+} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
+
+typedef struct
+{
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 index;
+} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
+
+typedef struct GSP_SEQUENCER_BUFFER_CMD
+{
+ GSP_SEQ_BUF_OPCODE opCode;
+ union
+ {
+ GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
+ GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
+ GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
+ GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
+ GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
+ } payload;
+} GSP_SEQUENCER_BUFFER_CMD;
+
+typedef struct
+{
+ // Magic
+ // BL to use for verification (i.e. Booter locked it in WPR2)
+ NvU64 magic; // = 0xdc3aae21371a60b3;
+
+ // Revision number of Booter-BL-Sequencer handoff interface
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ NvU64 revision; // = 1;
+
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+
+ NvU64 sysmemAddrOfRadix3Elf;
+ NvU64 sizeOfRadix3Elf;
+
+ NvU64 sysmemAddrOfBootloader;
+ NvU64 sizeOfBootloader;
+
+ // Offsets inside bootloader image needed by Booter
+ NvU64 bootloaderCodeOffset;
+ NvU64 bootloaderDataOffset;
+ NvU64 bootloaderManifestOffset;
+
+ union
+ {
+ // Used only at initial boot
+ struct
+ {
+ NvU64 sysmemAddrOfSignature;
+ NvU64 sizeOfSignature;
+ };
+
+ //
+ // Used at suspend/resume to read GspFwHeapFreeList
+ // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
+ //
+ struct
+ {
+ NvU32 gspFwHeapFreeListWprOffset;
+ NvU32 unused0;
+ NvU64 unused1;
+ };
+ };
+
+ // ---- Members describing FB layout --------------------------------
+ NvU64 gspFwRsvdStart;
+
+ NvU64 nonWprHeapOffset;
+ NvU64 nonWprHeapSize;
+
+ NvU64 gspFwWprStart;
+
+ // GSP-RM to use to setup heap.
+ NvU64 gspFwHeapOffset;
+ NvU64 gspFwHeapSize;
+
+ // BL to use to find ELF for jump
+ NvU64 gspFwOffset;
+ // Size is sizeOfRadix3Elf above.
+
+ NvU64 bootBinOffset;
+ // Size is sizeOfBootloader above.
+
+ NvU64 frtsOffset;
+ NvU64 frtsSize;
+
+ NvU64 gspFwWprEnd;
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 fbSize;
+
+ // ---- Other members -----------------------------------------------
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 vgaWorkspaceOffset;
+ NvU64 vgaWorkspaceSize;
+
+ // Boot count. Used to determine whether to load the firmware image.
+ NvU64 bootCount;
+
+ // This union is organized the way it is to start at an 8-byte boundary and achieve natural
+ // packing of the internal struct fields.
+ union
+ {
+ struct
+ {
+ // TODO: the partitionRpc* fields below do not really belong in this
+ // structure. The values are patched in by the partition bootstrapper
+ // when GSP-RM is booted in a partition, and this structure was a
+ // convenient place for the bootstrapper to access them. These should
+ // be moved to a different comm. mechanism between the bootstrapper
+ // and the GSP-RM tasks.
+
+ // Shared partition RPC memory (physical address)
+ NvU64 partitionRpcAddr;
+
+ // Offsets relative to partitionRpcAddr
+ NvU16 partitionRpcRequestOffset;
+ NvU16 partitionRpcReplyOffset;
+
+ // Code section and dataSection offset and size.
+ NvU32 elfCodeOffset;
+ NvU32 elfDataOffset;
+ NvU32 elfCodeSize;
+ NvU32 elfDataSize;
+
+ // Used during GSP-RM resume to check for revocation
+ NvU32 lsUcodeVersion;
+ };
+
+ struct
+ {
+ // Pad for the partitionRpc* fields, plus 4 bytes
+ NvU32 partitionRpcPadding[4];
+
+ // CrashCat (contiguous) buffer size/location - occupies same bytes as the
+ // elf(Code|Data)(Offset|Size) fields above.
+ // TODO: move to GSP_FMC_INIT_PARAMS
+ NvU64 sysmemAddrOfCrashReportQueue;
+ NvU32 sizeOfCrashReportQueue;
+
+ // Pad for the lsUcodeVersion field
+ NvU32 lsUcodeVersionPadding[1];
+ };
+ };
+
+ // Number of VF partitions allocating sub-heaps from the WPR heap
+ // Used during boot to ensure the heap is adequately sized
+ NvU8 gspFwHeapVfPartitionCount;
+
+ // Pad structure to exactly 256 bytes. Can replace padding with additional
+ // fields without incrementing revision. Padding initialized to 0.
+ NvU8 padding[7];
+
+ // BL to use for verification (i.e. Booter says OK to boot)
+ NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
+} GspFwWprMeta;
+
+#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
+
+#define GSP_FW_WPR_META_REVISION 1
+
+typedef struct
+{
+ NvU32 version; // queue version
+ NvU32 size; // bytes, page aligned
+ NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum
+ NvU32 msgCount; // number of entries in queue
+ NvU32 writePtr; // message id of next slot
+ NvU32 flags; // if set it means "i want to swap RX"
+ NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store.
+ NvU32 entryOff; // Offset of entries from start of backing store.
+} msgqTxHeader;
+
+typedef struct
+{
+ NvU32 readPtr; // message id of last message read
+} msgqRxHeader;
+
+typedef struct {
+ RmPhysAddr sharedMemPhysAddr;
+ NvU32 pageTableEntryCount;
+ NvLength cmdQueueOffset;
+ NvLength statQueueOffset;
+ NvLength locklessCmdQueueOffset;
+ NvLength locklessStatQueueOffset;
+} MESSAGE_QUEUE_INIT_ARGUMENTS;
+
+typedef struct {
+ NvU32 oldLevel;
+ NvU32 flags;
+ NvBool bInPMTransition;
+} GSP_SR_INIT_ARGUMENTS;
+
+typedef struct
+{
+ MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
+ GSP_SR_INIT_ARGUMENTS srInitArguments;
+ NvU32 gpuInstance;
+
+ struct
+ {
+ NvU64 pa;
+ NvU64 size;
+ } profilerArgs;
+} GSP_ARGUMENTS_CACHED;
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U)
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
+
+typedef NvU64 LibosAddress;
+
+typedef struct
+{
+ LibosAddress id8; // Id tag.
+ LibosAddress pa; // Physical address.
+ LibosAddress size; // Size of memory area.
+ NvU8 kind; // See LibosMemoryRegionKind above.
+ NvU8 loc; // See LibosMemoryRegionLoc above.
+} LibosMemoryRegionInitArgument;
+
+typedef enum {
+ LIBOS_MEMORY_REGION_NONE,
+ LIBOS_MEMORY_REGION_CONTIGUOUS,
+ LIBOS_MEMORY_REGION_RADIX3
+} LibosMemoryRegionKind;
+
+typedef enum {
+ LIBOS_MEMORY_REGION_LOC_NONE,
+ LIBOS_MEMORY_REGION_LOC_SYSMEM,
+ LIBOS_MEMORY_REGION_LOC_FB
+} LibosMemoryRegionLoc;
+
+typedef struct
+{
+ //
+ // Magic
+ // Use for verification by Booter
+ //
+ NvU64 magic; // = GSP_FW_SR_META_MAGIC;
+
+ //
+ // Revision number
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ //
+ NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
+
+ //
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+ //
+ NvU64 sysmemAddrOfSuspendResumeData;
+ NvU64 sizeOfSuspendResumeData;
+
+ // ---- Members for crypto ops across S/R ---------------------------
+
+ //
+ // HMAC over the entire GspFwSRMeta structure (including padding)
+ // with the hmac field itself zeroed.
+ //
+ NvU8 hmac[32];
+
+ // Hash over GspFwWprMeta structure
+ NvU8 wprMetaHash[32];
+
+ // Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
+ NvU8 heapFreeListHash[32];
+
+ // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
+ NvU8 dataHash[32];
+
+ //
+ // Pad structure to exactly 256 bytes (1 DMA chunk).
+ // Padding initialized to zero.
+ //
+ NvU32 padding[24];
+
+} GspFwSRMeta;
+
+#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
+
+#define GSP_FW_SR_META_REVISION 2
+
+#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \
+ ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \
+ /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_START */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \
+ 0)
+
+typedef struct {
+ //
+ // Version 1
+ // Version 2
+ // Version 3 = for Partition boot
+ // Version 4 = for eb riscv boot
+ // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later.
+ //
+ NvU32 version; // structure version
+ NvU32 bootloaderOffset;
+ NvU32 bootloaderSize;
+ NvU32 bootloaderParamOffset;
+ NvU32 bootloaderParamSize;
+ NvU32 riscvElfOffset;
+ NvU32 riscvElfSize;
+ NvU32 appVersion; // Changelist number associated with the image
+ //
+ // Manifest contains information about Monitor and it is
+ // input to BR
+ //
+ NvU32 manifestOffset;
+ NvU32 manifestSize;
+ //
+ // Monitor Data offset within RISCV image and size
+ //
+ NvU32 monitorDataOffset;
+ NvU32 monitorDataSize;
+ //
+ // Monitor Code offset withtin RISCV image and size
+ //
+ NvU32 monitorCodeOffset;
+ NvU32 monitorCodeSize;
+ NvU32 bIsMonitorEnabled;
+ //
+ // Swbrom Code offset within RISCV image and size
+ //
+ NvU32 swbromCodeOffset;
+ NvU32 swbromCodeSize;
+ //
+ // Swbrom Data offset within RISCV image and size
+ //
+ NvU32 swbromDataOffset;
+ NvU32 swbromDataSize;
+ //
+ // Total size of FB carveout (image and reserved space).
+ //
+ NvU32 fbReservedSize;
+ //
+ // Indicates whether the entire RISC-V image is signed as "code" in code section.
+ //
+ NvU32 bSignedAsCode;
+} RM_RISCV_UCODE_DESC;
+
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY {
+ NvU16 engineIdx;
+ NvU32 pmcIntrMask;
+ NvU32 vectorStall;
+ NvU32 vectorNonStall;
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY;
+
+typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP {
+ NvU8 subtreeStart;
+ NvU8 subtreeEnd;
+} NV2080_INTR_CATEGORY_SUBTREE_MAP;
+
+#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128
+
+typedef enum NV2080_INTR_CATEGORY {
+ NV2080_INTR_CATEGORY_DEFAULT = 0,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2,
+ NV2080_INTR_CATEGORY_RUNLIST = 3,
+ NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4,
+ NV2080_INTR_CATEGORY_UVM_OWNED = 5,
+ NV2080_INTR_CATEGORY_UVM_SHARED = 6,
+ NV2080_INTR_CATEGORY_ENUM_COUNT = 7,
+} NV2080_INTR_CATEGORY;
+
+#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS {
+ NvU32 tableLen;
+ NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
+ NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT];
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS;
+
+#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures
+
+#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels
+
+typedef union rpc_message_rpc_union_field_v03_00
+{
+ NvU32 spare;
+ NvU32 cpuRmGfid;
+} rpc_message_rpc_union_field_v03_00;
+
+typedef rpc_message_rpc_union_field_v03_00 rpc_message_rpc_union_field_v;
+
+typedef struct rpc_message_header_v03_00
+{
+ NvU32 header_version;
+ NvU32 signature;
+ NvU32 length;
+ NvU32 function;
+ NvU32 rpc_result;
+ NvU32 rpc_result_private;
+ NvU32 sequence;
+ rpc_message_rpc_union_field_v u;
+ rpc_generic_union rpc_message_data[];
+} rpc_message_header_v03_00;
+
+typedef rpc_message_header_v03_00 rpc_message_header_v;
+
+typedef struct GSP_MSG_QUEUE_ELEMENT
+{
+ NvU8 authTagBuffer[16]; // Authentication tag buffer.
+ NvU8 aadBuffer[16]; // AAD buffer.
+ NvU32 checkSum; // Set to value needed to make checksum always zero.
+ NvU32 seqNum; // Sequence number maintained by the message queue.
+ NvU32 elemCount; // Number of message queue elements this message has.
+ NV_DECLARE_ALIGNED(rpc_message_header_v rpc, 8);
+} GSP_MSG_QUEUE_ELEMENT;
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3 (20 << 20)
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u)
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB (84u)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h
new file mode 100644
index 000000000000..642c13aec325
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_MSGFN_H__
+#define __NVRM_MSGFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#ifndef E
+# define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
+# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ E(FIRST_EVENT = 0x1000) // 0x1000
+ E(GSP_INIT_DONE) // 0x1001
+ E(GSP_RUN_CPU_SEQUENCER) // 0x1002
+ E(POST_EVENT) // 0x1003
+ E(RC_TRIGGERED) // 0x1004
+ E(MMU_FAULT_QUEUED) // 0x1005
+ E(OS_ERROR_LOG) // 0x1006
+ E(RG_LINE_INTR) // 0x1007
+ E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008
+ E(SIM_READ) // 0x1009
+ E(SIM_WRITE) // 0x100a
+ E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b
+ E(UCODE_LIBOS_PRINT) // 0x100c
+ E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d
+ E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e
+ E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f
+ E(VGPU_CONFIG) // 0x1010
+ E(DISPLAY_MODESET) // 0x1011
+ E(EXTDEV_INTR_SERVICE) // 0x1012
+ E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013
+ E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014
+ E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015
+ E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016
+ E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017
+ E(TIMED_SEMAPHORE_RELEASE) // 0x1018
+ E(NVLINK_IS_GPU_DEGRADED) // 0x1019
+ E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a
+ E(GSP_SEND_USER_SHARED_DATA) // 0x101b
+ E(NVLINK_FAULT_UP) // 0x101c
+ E(GSP_LOCKDOWN_NOTICE) // 0x101d
+ E(MIG_CI_CONFIG_UPDATE) // 0x101e
+ E(NUM_EVENTS) // END
+#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef E
+# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h
new file mode 100644
index 000000000000..3a04e702677f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVDEC_H__
+#define __NVRM_NVDEC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances;
+ NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2
+} NV_BSP_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h
new file mode 100644
index 000000000000..203c1d5304d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVENC_H__
+#define __NVRM_NVENC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC?
+ NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2
+} NV_MSENC_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h
new file mode 100644
index 000000000000..71fc53889ec7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVJPG_H__
+#define __NVRM_NVJPG_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG?
+ NvU32 engineInstance;
+} NV_NVJPG_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h
new file mode 100644
index 000000000000..49d81c7673d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_OFA_H__
+#define __NVRM_OFA_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
+} NV_OFA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h
index 73c57f235f6a..2a037acc6b1e 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h
@@ -1,5 +1,10 @@
-#ifndef __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
-#define __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_RPCFN_H__
+#define __NVRM_RPCFN_H__
+#include <nvrm/nvtypes.h>
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
@@ -209,54 +214,12 @@ enum {
X(RM, CTRL_SET_HS_CREDITS) // 198
X(RM, CTRL_PM_AREA_PC_SAMPLER) // 199
X(RM, INVALIDATE_TLB) // 200
+ X(RM, RESERVED_201) // 201
+ X(RM, ECC_NOTIFIER_WRITE_ACK) // 202
X(RM, NUM_FUNCTIONS) //END
#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
};
# undef X
# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
#endif
-
-#ifndef E
-# define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
-# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
-enum {
-#endif
- E(FIRST_EVENT = 0x1000) // 0x1000
- E(GSP_INIT_DONE) // 0x1001
- E(GSP_RUN_CPU_SEQUENCER) // 0x1002
- E(POST_EVENT) // 0x1003
- E(RC_TRIGGERED) // 0x1004
- E(MMU_FAULT_QUEUED) // 0x1005
- E(OS_ERROR_LOG) // 0x1006
- E(RG_LINE_INTR) // 0x1007
- E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008
- E(SIM_READ) // 0x1009
- E(SIM_WRITE) // 0x100a
- E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b
- E(UCODE_LIBOS_PRINT) // 0x100c
- E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d
- E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e
- E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f
- E(VGPU_CONFIG) // 0x1010
- E(DISPLAY_MODESET) // 0x1011
- E(EXTDEV_INTR_SERVICE) // 0x1012
- E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013
- E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014
- E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015
- E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016
- E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017
- E(TIMED_SEMAPHORE_RELEASE) // 0x1018
- E(NVLINK_IS_GPU_DEGRADED) // 0x1019
- E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a
- E(GSP_SEND_USER_SHARED_DATA) // 0x101b
- E(NVLINK_FAULT_UP) // 0x101c
- E(GSP_LOCKDOWN_NOTICE) // 0x101d
- E(MIG_CI_CONFIG_UPDATE) // 0x101e
- E(NUM_EVENTS) // END
-#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
-};
-# undef E
-# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
-#endif
-
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h
new file mode 100644
index 000000000000..f6ec04efd119
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_VMM_H__
+#define __NVRM_VMM_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define FERMI_VASPACE_A (0x000090f1)
+
+typedef struct
+{
+ NvU32 index;
+ NvV32 flags;
+ NvU64 vaSize NV_ALIGN_BYTES(8);
+ NvU64 vaStartInternal NV_ALIGN_BYTES(8);
+ NvU64 vaLimitInternal NV_ALIGN_BYTES(8);
+ NvU32 bigPageSize;
+ NvU64 vaBase NV_ALIGN_BYTES(8);
+} NV_VASPACE_ALLOCATION_PARAMETERS;
+
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 //<! Create new VASpace, by default
+
+#define NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED BIT(3)
+
+#define SPLIT_VAS_SERVER_RM_MANAGED_VA_START 0x100000000ULL // 4GB
+#define SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE 0x20000000ULL // 512MB
+
+#define GMMU_FMT_MAX_LEVELS 6U
+
+#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
+typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
+ /*!
+ * [in] GPU sub-device handle - this API only supports unicast.
+ * Pass 0 to use subDeviceId instead.
+ */
+ NvHandle hSubDevice;
+
+ /*!
+ * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
+ */
+ NvU32 subDeviceId;
+
+ /*!
+ * [in] Page size (VA coverage) of the level to reserve.
+ * This need not be a leaf (page table) page size - it can be
+ * the coverage of an arbitrary level (including root page directory).
+ */
+ NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
+
+ /*!
+ * [in] First GPU virtual address of the range to reserve.
+ * This must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
+
+ /*!
+ * [in] Last GPU virtual address of the range to reserve.
+ * This (+1) must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
+
+ /*!
+ * [in] Number of PDE levels to copy.
+ */
+ NvU32 numLevelsToCopy;
+
+ /*!
+ * [in] Per-level information.
+ */
+ struct {
+ /*!
+ * Physical address of this page level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+
+ /*!
+ * Size in bytes allocated for this level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+
+ /*!
+ * Aperture in which this page level instance resides.
+ */
+ NvU32 aperture;
+
+ /*!
+ * Page shift corresponding to the level
+ */
+ NvU8 pageShift;
+ } levels[GMMU_FMT_MAX_LEVELS];
+} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
+
+#define NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY (0x801813U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_
+ID << 8) | NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+ NvU32 numEntries;
+ NvU32 flags;
+ NvHandle hVASpace;
+ NvU32 chId;
+ NvU32 subDeviceId; // ID+1, 0 for BC
+ NvU32 pasid;
+} NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS;
+
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE 1:0
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_VIDMEM (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_COH (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_NONCOH (0x00000002U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES 2:2
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS 3:3
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY 4:4
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE 5:5
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_TRUE (0x00000001U)
+
+#define NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY (0x801814U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS {
+ NvHandle hVASpace;
+ NvU32 subDeviceId; // ID+1, 0 for BC
+} NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c
index d72b3aae9a2b..2156808cba4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c
@@ -19,26 +19,26 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
+#include <rm/engine.h>
-#include <subdev/gsp.h>
+#include "nvrm/ofa.h"
-#include <nvif/class.h>
+static int
+r535_ofa_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *ofa)
+{
+ NV_OFA_ALLOCATION_PARAMETERS *args;
-static const struct nvkm_engine_func
-ad102_nvdec = {
- .sclass = {
- { -1, -1, NVC9B0_VIDEO_DECODER },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ofa);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
-int
-ad102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvdec **pnvdec)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&ad102_nvdec, device, type, inst, pnvdec);
+ args->size = sizeof(*args);
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(ofa, args);
}
+
+const struct nvkm_rm_api_engine
+r535_ofa = {
+ .alloc = r535_ofa_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c
new file mode 100644
index 000000000000..a4190676e1ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/gsp.h"
+
+static const struct nvkm_rm_wpr
+r535_wpr_libos2 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r535_wpr_libos3 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+};
+
+static const struct nvkm_rm_api
+r535_api = {
+ .gsp = &r535_gsp,
+ .rpc = &r535_rpc,
+ .ctrl = &r535_ctrl,
+ .alloc = &r535_alloc,
+ .client = &r535_client,
+ .device = &r535_device,
+ .fbsr = &r535_fbsr,
+ .disp = &r535_disp,
+ .fifo = &r535_fifo,
+ .ce = &r535_ce,
+ .gr = &r535_gr,
+ .nvdec = &r535_nvdec,
+ .nvenc = &r535_nvenc,
+ .nvjpg = &r535_nvjpg,
+ .ofa = &r535_ofa,
+};
+
+const struct nvkm_rm_impl
+r535_rm_tu102 = {
+ .wpr = &r535_wpr_libos2,
+ .api = &r535_api,
+};
+
+const struct nvkm_rm_impl
+r535_rm_ga102 = {
+ .wpr = &r535_wpr_libos3,
+ .api = &r535_api,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
new file mode 100644
index 000000000000..5acb98d137bd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/rpcfn.h"
+
+#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
+#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
+
+/**
+ * DOC: GSP message queue element
+ *
+ * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h
+ *
+ * The GSP command queue and status queue are message queues for the
+ * communication between software and GSP. The software submits the GSP
+ * RPC via the GSP command queue, GSP writes the status of the submitted
+ * RPC in the status queue.
+ *
+ * A GSP message queue element consists of three parts:
+ *
+ * - message element header (struct r535_gsp_msg), which mostly maintains
+ * the metadata for queuing the element.
+ *
+ * - RPC message header (struct nvfw_gsp_rpc), which maintains the info
+ * of the RPC. E.g., the RPC function number.
+ *
+ * - The payload, where the RPC message stays. E.g. the params of a
+ * specific RPC function. Some RPC functions also have their headers
+ * in the payload. E.g. rm_alloc, rm_control.
+ *
+ * The memory layout of a GSP message element can be illustrated below::
+ *
+ * +------------------------+
+ * | Message Element Header |
+ * | (r535_gsp_msg) |
+ * | |
+ * | (r535_gsp_msg.data) |
+ * | | |
+ * |----------V-------------|
+ * | GSP RPC Header |
+ * | (nvfw_gsp_rpc) |
+ * | |
+ * | (nvfw_gsp_rpc.data) |
+ * | | |
+ * |----------V-------------|
+ * | Payload |
+ * | |
+ * | header(optional) |
+ * | params |
+ * +------------------------+
+ *
+ * The max size of a message queue element is 16 pages (including the
+ * headers). When a GSP message to be sent is larger than 16 pages, the
+ * message should be split into multiple elements and sent accordingly.
+ *
+ * In the bunch of the split elements, the first element has the expected
+ * function number, while the rest of the elements are sent with the
+ * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD.
+ *
+ * GSP consumes the elements from the cmdq and always writes the result
+ * back to the msgq. The result is also formed as split elements.
+ *
+ * Terminology:
+ *
+ * - gsp_msg(msg): GSP message element (element header + GSP RPC header +
+ * payload)
+ * - gsp_rpc(rpc): GSP RPC (RPC header + payload)
+ * - gsp_rpc_buf: buffer for (GSP RPC header + payload)
+ * - gsp_rpc_len: size of (GSP RPC header + payload)
+ * - params_size: size of params in the payload
+ * - payload_size: size of (header if exists + params) in the payload
+ */
+
+struct r535_gsp_msg {
+ u8 auth_tag_buffer[16];
+ u8 aad_buffer[16];
+ u32 checksum;
+ u32 sequence;
+ u32 elem_count;
+ u32 pad;
+ u8 data[];
+};
+
+struct nvfw_gsp_rpc {
+ u32 header_version;
+ u32 signature;
+ u32 length;
+ u32 function;
+ u32 rpc_result;
+ u32 rpc_result_private;
+ u32 sequence;
+ union {
+ u32 spare;
+ u32 cpuRmGfid;
+ };
+ u8 data[];
+};
+
+#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
+
+#define to_gsp_hdr(p, header) \
+ container_of((void *)p, typeof(*header), data)
+
+#define to_payload_hdr(p, header) \
+ container_of((void *)p, typeof(*header), params)
+
+int
+r535_rpc_status_to_errno(uint32_t rpc_status)
+{
+ switch (rpc_status) {
+ case 0x55: /* NV_ERR_NOT_READY */
+ case 0x66: /* NV_ERR_TIMEOUT_RETRY */
+ return -EBUSY;
+ case 0x51: /* NV_ERR_NO_MEMORY */
+ return -ENOMEM;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
+{
+ u32 size, rptr = *gsp->msgq.rptr;
+ int used;
+
+ size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
+ GSP_PAGE_SIZE);
+ if (WARN_ON(!size || size >= gsp->msgq.cnt))
+ return -EINVAL;
+
+ do {
+ u32 wptr = *gsp->msgq.wptr;
+
+ used = wptr + gsp->msgq.cnt - rptr;
+ if (used >= gsp->msgq.cnt)
+ used -= gsp->msgq.cnt;
+ if (used >= size)
+ break;
+
+ usleep_range(1, 2);
+ } while (--(*ptime));
+
+ if (WARN_ON(!*ptime))
+ return -ETIMEDOUT;
+
+ return used;
+}
+
+static struct r535_gsp_msg *
+r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
+{
+ u32 rptr = *gsp->msgq.rptr;
+
+ /* Skip the first page, which is the message queue info */
+ return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE +
+ rptr * GSP_PAGE_SIZE);
+}
+
+/**
+ * DOC: Receive a GSP message queue element
+ *
+ * Receiving a GSP message queue element from the message queue consists of
+ * the following steps:
+ *
+ * - Peek the element from the queue: r535_gsp_msgq_peek().
+ * Peek the first page of the element to determine the total size of the
+ * message before allocating the proper memory.
+ *
+ * - Allocate memory for the message.
+ * Once the total size of the message is determined from the GSP message
+ * queue element, the caller of r535_gsp_msgq_recv() allocates the
+ * required memory.
+ *
+ * - Receive the message: r535_gsp_msgq_recv().
+ * Copy the message into the allocated memory. Advance the read pointer.
+ * If the message is a large GSP message, r535_gsp_msgq_recv() calls
+ * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts
+ * until the complete message is received.
+ * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into
+ * the return of the large GSP message.
+ *
+ * - Free the allocated memory: r535_gsp_msg_done().
+ * The user is responsible for freeing the memory allocated for the GSP
+ * message pages after they have been processed.
+ */
+static void *
+r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+ struct r535_gsp_msg *mqe;
+ int ret;
+
+ ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ return mqe->data;
+}
+
+struct r535_gsp_msg_info {
+ int *retries;
+ u32 gsp_rpc_len;
+ void *gsp_rpc_buf;
+ bool continuation;
+};
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl);
+
+static void *
+r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp,
+ struct r535_gsp_msg_info *info)
+{
+ u8 *buf = info->gsp_rpc_buf;
+ u32 rptr = *gsp->msgq.rptr;
+ struct r535_gsp_msg *mqe;
+ u32 size, expected, len;
+ int ret;
+
+ expected = info->gsp_rpc_len;
+
+ ret = r535_gsp_msgq_wait(gsp, expected, info->retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ if (info->continuation) {
+ struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) {
+ nvkm_error(&gsp->subdev,
+ "Not a continuation of a large RPC\n");
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ return ERR_PTR(-EIO);
+ }
+ }
+
+ size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
+
+ len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
+ len = min_t(u32, expected, len);
+
+ if (info->continuation)
+ memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc),
+ len - sizeof(struct nvfw_gsp_rpc));
+ else
+ memcpy(buf, mqe->data, len);
+
+ expected -= len;
+
+ if (expected) {
+ mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
+ memcpy(buf + len, mqe, expected);
+ }
+
+ rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
+
+ mb();
+ (*gsp->msgq.rptr) = rptr;
+ return buf;
+}
+
+static void *
+r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+ struct r535_gsp_msg *mqe;
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe);
+ struct nvfw_gsp_rpc *rpc;
+ struct r535_gsp_msg_info info = {0};
+ u32 expected = gsp_rpc_len;
+ void *buf;
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+ rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (WARN_ON(rpc->length > max_rpc_size))
+ return NULL;
+
+ buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ info.gsp_rpc_buf = buf;
+ info.retries = retries;
+ info.gsp_rpc_len = rpc->length;
+
+ buf = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR(buf)) {
+ kvfree(info.gsp_rpc_buf);
+ info.gsp_rpc_buf = NULL;
+ return buf;
+ }
+
+ if (expected <= max_rpc_size)
+ return buf;
+
+ info.gsp_rpc_buf += info.gsp_rpc_len;
+ expected -= info.gsp_rpc_len;
+
+ while (expected) {
+ u32 size;
+
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ info.gsp_rpc_len = rpc->length;
+ info.continuation = true;
+
+ rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ size = info.gsp_rpc_len - sizeof(*rpc);
+ expected -= size;
+ info.gsp_rpc_buf += size;
+ }
+
+ rpc = buf;
+ rpc->length = gsp_rpc_len;
+ return buf;
+}
+
+static int
+r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
+{
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
+ struct r535_gsp_msg *cqe;
+ u32 gsp_rpc_len = msg->checksum;
+ u64 *ptr = (void *)msg;
+ u64 *end;
+ u64 csum = 0;
+ int free, time = 1000000;
+ u32 wptr, size, step, len;
+ u32 off = 0;
+
+ len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE);
+
+ end = (u64 *)((char *)ptr + len);
+ msg->pad = 0;
+ msg->checksum = 0;
+ msg->sequence = gsp->cmdq.seq++;
+ msg->elem_count = DIV_ROUND_UP(len, 0x1000);
+
+ while (ptr < end)
+ csum ^= *ptr++;
+
+ msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
+
+ wptr = *gsp->cmdq.wptr;
+ do {
+ do {
+ free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
+ if (free >= gsp->cmdq.cnt)
+ free -= gsp->cmdq.cnt;
+ if (free >= 1)
+ break;
+
+ usleep_range(1, 2);
+ } while(--time);
+
+ if (WARN_ON(!time)) {
+ kvfree(msg);
+ return -ETIMEDOUT;
+ }
+
+ cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
+ step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
+ size = min_t(u32, len, step * GSP_PAGE_SIZE);
+
+ memcpy(cqe, (u8 *)msg + off, size);
+
+ wptr += DIV_ROUND_UP(size, 0x1000);
+ if (wptr == gsp->cmdq.cnt)
+ wptr = 0;
+
+ off += size;
+ len -= size;
+ } while (len);
+
+ nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
+ wmb();
+ (*gsp->cmdq.wptr) = wptr;
+ mb();
+
+ nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
+
+ kvfree(msg);
+ return 0;
+}
+
+static void *
+r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len)
+{
+ struct r535_gsp_msg *msg;
+ u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len;
+
+ size = ALIGN(size, GSP_MSG_MIN_SIZE);
+ msg = kvzalloc(size, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ msg->checksum = gsp_rpc_len;
+ return msg->data;
+}
+
+static void
+r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
+{
+ kvfree(msg);
+}
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
+{
+ if (gsp->subdev.debug >= lvl) {
+ nvkm_printk__(&gsp->subdev, lvl, info,
+ "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
+ msg->function, msg->length, msg->length - sizeof(*msg),
+ msg->rpc_result, msg->rpc_result_private);
+ print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg->data, msg->length - sizeof(*msg), true);
+ }
+}
+
+struct nvfw_gsp_rpc *
+r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvfw_gsp_rpc *rpc;
+ int retries = 4000000, i;
+
+retry:
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->rpc_result) {
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
+ return ERR_PTR(-EINVAL);
+ }
+
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE);
+
+ if (fn && rpc->function == fn) {
+ if (gsp_rpc_len) {
+ if (rpc->length < gsp_rpc_len) {
+ nvkm_error(subdev, "rpc len %d < %d\n",
+ rpc->length, gsp_rpc_len);
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
+ return ERR_PTR(-EIO);
+ }
+
+ return rpc;
+ }
+
+ r535_gsp_msg_done(gsp, rpc);
+ return NULL;
+ }
+
+ for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
+ struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
+
+ if (ntfy->fn == rpc->function) {
+ if (ntfy->func)
+ ntfy->func(ntfy->priv, ntfy->fn, rpc->data,
+ rpc->length - sizeof(*rpc));
+ break;
+ }
+ }
+
+ if (i == gsp->msgq.ntfy_nr)
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN);
+
+ r535_gsp_msg_done(gsp, rpc);
+ if (fn)
+ goto retry;
+
+ if (*gsp->msgq.rptr != *gsp->msgq.wptr)
+ goto retry;
+
+ return NULL;
+}
+
+int
+r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
+{
+ int ret = 0;
+
+ mutex_lock(&gsp->msgq.mutex);
+ if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
+ ret = -ENOSPC;
+ } else {
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
+ gsp->msgq.ntfy_nr++;
+ }
+ mutex_unlock(&gsp->msgq.mutex);
+ return ret;
+}
+
+int
+r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
+{
+ void *repv;
+
+ mutex_lock(&gsp->cmdq.mutex);
+ repv = r535_gsp_msg_recv(gsp, fn, 0);
+ mutex_unlock(&gsp->cmdq.mutex);
+ if (IS_ERR(repv))
+ return PTR_ERR(repv);
+
+ return 0;
+}
+
+static void *
+r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn,
+ enum nvkm_gsp_rpc_reply_policy policy,
+ u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *reply;
+ void *repv = NULL;
+
+ switch (policy) {
+ case NVKM_GSP_RPC_REPLY_NOWAIT:
+ break;
+ case NVKM_GSP_RPC_REPLY_RECV:
+ reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
+ if (!IS_ERR_OR_NULL(reply))
+ repv = reply->data;
+ else
+ repv = reply;
+ break;
+ case NVKM_GSP_RPC_REPLY_POLL:
+ repv = r535_gsp_msg_recv(gsp, fn, 0);
+ break;
+ }
+
+ return repv;
+}
+
+static void *
+r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+ u32 fn = rpc->function;
+ int ret;
+
+ if (gsp->subdev.debug >= NV_DBG_TRACE) {
+ nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
+ rpc->length, rpc->length - sizeof(*rpc));
+ print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
+ rpc->data, rpc->length - sizeof(*rpc), true);
+ }
+
+ ret = r535_gsp_cmdq_push(gsp, rpc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return r535_gsp_rpc_handle_reply(gsp, fn, policy, gsp_rpc_len);
+}
+
+static void
+r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
+{
+ struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
+
+ r535_gsp_msg_done(gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
+{
+ struct nvfw_gsp_rpc *rpc;
+
+ rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size,
+ sizeof(u64)));
+ if (IS_ERR(rpc))
+ return ERR_CAST(rpc);
+
+ rpc->header_version = 0x03000000;
+ rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
+ rpc->function = fn;
+ rpc->rpc_result = 0xffffffff;
+ rpc->rpc_result_private = 0xffffffff;
+ rpc->length = sizeof(*rpc) + payload_size;
+ return rpc->data;
+}
+
+static void *
+r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg);
+ const u32 max_payload_size = max_rpc_size - sizeof(*rpc);
+ u32 payload_size = rpc->length - sizeof(*rpc);
+ void *repv;
+
+ mutex_lock(&gsp->cmdq.mutex);
+ if (payload_size > max_payload_size) {
+ const u32 fn = rpc->function;
+ u32 remain_payload_size = payload_size;
+
+ /* Adjust length, and send initial RPC. */
+ rpc->length = sizeof(*rpc) + max_payload_size;
+ msg->checksum = rpc->length;
+
+ repv = r535_gsp_rpc_send(gsp, payload, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
+ if (IS_ERR(repv))
+ goto done;
+
+ payload += max_payload_size;
+ remain_payload_size -= max_payload_size;
+
+ /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
+ while (remain_payload_size) {
+ u32 size = min(remain_payload_size,
+ max_payload_size);
+ void *next;
+
+ next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
+ if (IS_ERR(next)) {
+ repv = next;
+ goto done;
+ }
+
+ memcpy(next, payload, size);
+
+ repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
+ if (IS_ERR(repv))
+ goto done;
+
+ payload += size;
+ remain_payload_size -= size;
+ }
+
+ /* Wait for reply. */
+ repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size +
+ sizeof(*rpc));
+ } else {
+ repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len);
+ }
+
+done:
+ mutex_unlock(&gsp->cmdq.mutex);
+ return repv;
+}
+
+const struct nvkm_rm_api_rpc
+r535_rpc = {
+ .get = r535_gsp_rpc_get,
+ .push = r535_gsp_rpc_push,
+ .done = r535_gsp_rpc_done,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
index d3e95453f25d..52f2e5f14517 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
@@ -19,15 +19,38 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "vmm.h"
+#include <subdev/mmu/vmm.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvhw/drf.h>
+#include "nvrm/vmm.h"
-static int
-r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
+void
+r535_mmu_vaspace_del(struct nvkm_vmm *vmm)
+{
+ if (vmm->rm.external) {
+ NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object,
+ NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY,
+ sizeof(*ctrl));
+ if (!IS_ERR(ctrl)) {
+ ctrl->hVASpace = vmm->rm.object.handle;
+
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl));
+ }
+
+ vmm->rm.external = false;
+ }
+
+ nvkm_gsp_rm_free(&vmm->rm.object);
+ nvkm_gsp_device_dtor(&vmm->rm.device);
+ nvkm_gsp_client_dtor(&vmm->rm.client);
+
+ nvkm_vmm_put(vmm, &vmm->rm.rsvd);
+}
+
+int
+r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle, bool external)
{
NV_VASPACE_ALLOCATION_PARAMETERS *args;
int ret;
@@ -37,58 +60,103 @@ r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
if (ret)
return ret;
- args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A,
+ args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, handle, FERMI_VASPACE_A,
sizeof(*args), &vmm->rm.object);
if (IS_ERR(args))
return PTR_ERR(args);
args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW;
+ if (external)
+ args->flags = NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED;
ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args);
if (ret)
return ret;
- {
+ if (!external) {
NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl;
+ u8 page_shift = 29; /* 512MiB */
+ const u64 page_size = BIT_ULL(page_shift);
+ const struct nvkm_vmm_page *page;
+ const struct nvkm_vmm_desc *desc;
+ struct nvkm_vmm_pt *pd = vmm->pd;
+
+ for (page = vmm->func->page; page->shift; page++) {
+ if (page->shift == page_shift)
+ break;
+ }
+
+ if (WARN_ON(!page->shift))
+ return -EINVAL;
mutex_lock(&vmm->mutex.vmm);
- ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000,
+ ret = nvkm_vmm_get_locked(vmm, true, false, false, page_shift, 32, page_size,
&vmm->rm.rsvd);
mutex_unlock(&vmm->mutex.vmm);
if (ret)
return ret;
+ /* Some parts of RM expect the server-reserved area to be in a specific location. */
+ if (WARN_ON(vmm->rm.rsvd->addr != SPLIT_VAS_SERVER_RM_MANAGED_VA_START ||
+ vmm->rm.rsvd->size != SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE))
+ return -EINVAL;
+
ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object,
NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES,
sizeof(*ctrl));
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- ctrl->pageSize = 0x20000000;
+ ctrl->pageSize = page_size;
ctrl->virtAddrLo = vmm->rm.rsvd->addr;
ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1;
- ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2;
- ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr;
- ctrl->levels[0].size = 0x20;
- ctrl->levels[0].aperture = 1;
- ctrl->levels[0].pageShift = 0x2f;
- ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr;
- ctrl->levels[1].size = 0x1000;
- ctrl->levels[1].aperture = 1;
- ctrl->levels[1].pageShift = 0x26;
- if (vmm->pd->pde[0]->pde[0]) {
- ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr;
- ctrl->levels[2].size = 0x1000;
- ctrl->levels[2].aperture = 1;
- ctrl->levels[2].pageShift = 0x1d;
+
+ for (desc = page->desc; desc->bits; desc++) {
+ ctrl->numLevelsToCopy++;
+ page_shift += desc->bits;
+ }
+ desc--;
+
+ for (int i = 0; i < ctrl->numLevelsToCopy; i++, desc--) {
+ page_shift -= desc->bits;
+
+ ctrl->levels[i].physAddress = pd->pt[0]->addr;
+ ctrl->levels[i].size = (1 << desc->bits) * desc->size;
+ ctrl->levels[i].aperture = 1;
+ ctrl->levels[i].pageShift = page_shift;
+
+ pd = pd->pde[0];
}
ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl);
+ } else {
+ NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object,
+ NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->physAddress = vmm->pd->pt[0]->addr;
+ ctrl->numEntries = 1 << vmm->func->page[0].desc->bits;
+ ctrl->flags = NVDEF(NV0080_CTRL_DMA_SET_PAGE_DIRECTORY, FLAGS, APERTURE, VIDMEM);
+ ctrl->hVASpace = vmm->rm.object.handle;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl);
+ if (ret == 0)
+ vmm->rm.external = true;
}
return ret;
}
+static int
+r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
+{
+ return r535_mmu_vaspace_new(vmm, NVKM_RM_VASPACE, true);
+}
+
static void
r535_mmu_dtor(struct nvkm_mmu *mmu)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild
new file mode 100644
index 000000000000..5db0e7009e1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/gsp/rm/r570/rm.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/gsp.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/client.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/fbsr.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/disp.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/fifo.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/ofa.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c
new file mode 100644
index 000000000000..87e6240662ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/client.h"
+
+static int
+r570_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle)
+{
+ NV0000_ALLOC_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args),
+ &client->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClient = client->object.handle;
+ args->processID = ~0;
+
+ return nvkm_gsp_rm_alloc_wr(&client->object, args);
+}
+
+const struct nvkm_rm_api_client
+r570_client = {
+ .ctor = r570_gsp_client_ctor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c
new file mode 100644
index 000000000000..a96e31c2d80b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include <engine/disp.h>
+#include <engine/disp/outp.h>
+
+#include "nvhw/drf.h"
+
+#include "nvrm/disp.h"
+
+static int
+r570_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *dmac)
+{
+ NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass,
+ sizeof(*args), dmac);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->channelInstance = inst;
+ args->offset = put_offset;
+ args->subDeviceId = BIT(0);
+
+ return nvkm_gsp_rm_alloc_wr(dmac, args);
+}
+
+static int
+r570_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory)
+{
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
+ NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ if (memory) {
+ switch (nvkm_memory_target(memory)) {
+ case NVKM_MEM_TARGET_NCOH:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 0;
+ ctrl->pbTargetAperture = PHYS_PCI;
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 1;
+ ctrl->pbTargetAperture = PHYS_PCI_COHERENT;
+ break;
+ case NVKM_MEM_TARGET_VRAM:
+ ctrl->addressSpace = ADDR_FBMEM;
+ ctrl->pbTargetAperture = PHYS_NVM;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ctrl->physicalAddr = nvkm_memory_addr(memory);
+ ctrl->limit = nvkm_memory_size(memory) - 1;
+ }
+
+ ctrl->hclass = oclass;
+ ctrl->channelInstance = inst;
+ ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0;
+ ctrl->subDeviceId = BIT(0);
+
+ return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static int
+r570_dp_set_indexed_link_rates(struct nvkm_outp *outp)
+{
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+
+ if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
+ return -EINVAL;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(outp->index);
+ for (int i = 0; i < outp->dp.rates; i++)
+ ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200;
+
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r570_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm)
+{
+ NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->sorIndex = ~0;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+ *plink_bw = 0x06;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+ *plink_bw = 0x0a;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+ *plink_bw = 0x14;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+ *plink_bw = 0x1e;
+ break;
+ default:
+ *plink_bw = 0x00;
+ break;
+ }
+
+ *pmst = ctrl->bIsMultistreamSupported;
+ *pwm = ctrl->bHasIncreasedWatermarkLimits;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval)
+{
+ u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS :
+ NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS;
+ NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(display_id);
+ ctrl->brightness = *pval;
+ ctrl->brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pval = ctrl->brightness;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid)
+{
+ NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->head = head;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ *displayid = ctrl->displayId;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+static int
+r570_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id)
+{
+ NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayMask = BIT(display_id);
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret == 0 && (ctrl->displayMask & BIT(display_id)))
+ ret = 1;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
+
+static int
+r570_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->displayMask;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_disp_get_static_info(struct nvkm_disp *disp)
+{
+ NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = disp->engine.subdev.device->gsp;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->wndw.mask = ctrl->windowPresentMask;
+ disp->wndw.nr = fls(disp->wndw.mask);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+const struct nvkm_rm_api_disp
+r570_disp = {
+ .get_static_info = r570_disp_get_static_info,
+ .get_supported = r570_disp_get_supported,
+ .get_connect_state = r570_disp_get_connect_state,
+ .get_active = r570_disp_get_active,
+ .bl_ctrl = r570_bl_ctrl,
+ .dp = {
+ .get_caps = r570_dp_get_caps,
+ .set_indexed_link_rates = r570_dp_set_indexed_link_rates,
+ },
+ .chan = {
+ .set_pushbuf = r570_disp_chan_set_pushbuf,
+ .dmac_alloc = r570_dmac_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c
new file mode 100644
index 000000000000..2945d5b4e570
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <subdev/instmem/priv.h>
+#include <subdev/bar.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+
+#include "nvrm/fbsr.h"
+#include "nvrm/fifo.h"
+
+static int
+r570_fbsr_suspend_channels(struct nvkm_gsp *gsp, bool suspend)
+{
+ NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->bDisableActiveChannels = suspend;
+
+ return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static void
+r570_fbsr_resume(struct nvkm_gsp *gsp)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_instobj *iobj;
+ struct nvkm_vmm *vmm;
+
+ /* Restore BAR2 page tables via BAR0 window, and re-enable BAR2. */
+ list_for_each_entry(iobj, &imem->boot, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
+ device->bar->bar2 = true;
+
+ vmm = nvkm_bar_bar2_vmm(device);
+ vmm->func->flush(vmm, 0);
+
+ /* Restore remaining BAR2 allocations (including BAR1 page tables) via BAR2. */
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
+ vmm = nvkm_bar_bar1_vmm(device);
+ vmm->func->flush(vmm, 0);
+
+ /* Resume channel scheduling. */
+ r570_fbsr_suspend_channels(device->gsp, false);
+
+ /* Finish cleaning up. */
+ r535_fbsr_resume(gsp);
+}
+
+static int
+r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size)
+{
+ NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl;
+ struct nvkm_gsp_object memlist;
+ int ret;
+
+ ret = r535_fbsr_memlist(&gsp->internal.device, 0xcaf00003, NVKM_MEM_TARGET_HOST,
+ 0, size, sgt, &memlist);
+ if (ret)
+ return ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->hClient = gsp->internal.client.object.handle;
+ ctrl->hSysMem = memlist.handle;
+ ctrl->sysmemAddrOfSuspendResumeData = gsp->sr.meta.addr;
+ ctrl->bEnteringGcoffState = 1;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+ if (ret)
+ return ret;
+
+ nvkm_gsp_rm_free(&memlist);
+ return 0;
+}
+
+static int
+r570_fbsr_suspend(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_instobj *iobj;
+ u64 size;
+ int ret;
+
+ /* Stop channel scheduling. */
+ r570_fbsr_suspend_channels(gsp, true);
+
+ /* Save BAR2 allocations to system memory. */
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->preserve) {
+ ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
+ }
+
+ list_for_each_entry(iobj, &imem->boot, head) {
+ ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
+
+ /* Disable BAR2 access. */
+ device->bar->bar2 = false;
+
+ /* Allocate system memory to hold RM's VRAM allocations across suspend. */
+ size = gsp->fb.heap.size;
+ size += gsp->fb.rsvd_size;
+ size += gsp->fb.bios.vga_workspace.size;
+ nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", size);
+
+ ret = nvkm_gsp_sg(device, size, &gsp->sr.fbsr);
+ if (ret)
+ return ret;
+
+ /* Initialise FBSR on RM. */
+ ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size);
+ if (ret) {
+ nvkm_gsp_sg_free(device, &gsp->sr.fbsr);
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct nvkm_rm_api_fbsr
+r570_fbsr = {
+ .suspend = r570_fbsr_suspend,
+ .resume = r570_fbsr_resume,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c
new file mode 100644
index 000000000000..79132805cfcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include <subdev/mmu.h>
+#include <engine/fifo/priv.h>
+#include <engine/fifo/chan.h>
+#include <engine/fifo/runl.h>
+
+#include "nvhw/drf.h"
+
+#include "nvrm/fifo.h"
+#include "nvrm/engine.h"
+
+#define CHID_PER_USERD 8
+
+static int
+r570_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq,
+ bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *chan)
+{
+ struct nvkm_gsp *gsp = device->object.client->gsp;
+ struct nvkm_fifo *fifo = gsp->subdev.device->fifo;
+ const int userd_p = chid / CHID_PER_USERD;
+ const int userd_i = chid % CHID_PER_USERD;
+ NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&device->object, handle,
+ fifo->func->chan.user.oclass, sizeof(*args), chan);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->gpFifoOffset = gpfifo_offset;
+ args->gpFifoEntries = gpfifo_length / 8;
+
+ args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
+ args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq);
+ if (!priv)
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
+ else
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE);
+ args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE);
+
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE);
+
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT);
+ args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+
+ args->hVASpace = vmm->rm.object.handle;
+ args->engineType = nv2080_engine_type;
+
+ args->instanceMem.base = inst_addr;
+ args->instanceMem.size = fifo->func->chan.func->inst->size;
+ args->instanceMem.addressSpace = 2;
+ args->instanceMem.cacheAttrib = 1;
+
+ args->userdMem.base = userd_addr;
+ args->userdMem.size = fifo->func->chan.func->userd->size;
+ args->userdMem.addressSpace = 2;
+ args->userdMem.cacheAttrib = 1;
+
+ args->ramfcMem.base = inst_addr;
+ args->ramfcMem.size = 0x200;
+ args->ramfcMem.addressSpace = 2;
+ args->ramfcMem.cacheAttrib = 1;
+
+ args->mthdbufMem.base = mthdbuf_addr;
+ args->mthdbufMem.size = fifo->rm.mthdbuf_size;
+ args->mthdbufMem.addressSpace = 1;
+ args->mthdbufMem.cacheAttrib = 0;
+
+ if (!priv)
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER);
+ else
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+ return nvkm_gsp_rm_alloc_wr(chan, args);
+}
+
+static int
+r570_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
+{
+ rpc_rc_triggered_v17_02 *msg = repv;
+ struct nvkm_gsp *gsp = priv;
+
+ if (WARN_ON(repc < sizeof(*msg)))
+ return -EINVAL;
+
+ nvkm_error(&gsp->subdev, "rc engn:%08x chid:%d gfid:%d level:%d type:%d scope:%d part:%d "
+ "fault_addr:%08x%08x fault_type:%08x\n",
+ msg->nv2080EngineType, msg->chid, msg->gfid, msg->exceptLevel, msg->exceptType,
+ msg->scope, msg->partitionAttributionId,
+ msg->mmuFaultAddrHi, msg->mmuFaultAddrLo, msg->mmuFaultType);
+
+ r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid);
+ return 0;
+}
+
+static int
+r570_fifo_ectx_size(struct nvkm_fifo *fifo)
+{
+ NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp;
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO,
+ sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ for (int i = 0; i < ctrl->numConstructedFalcons; i++) {
+ nvkm_runl_foreach(runl, fifo) {
+ nvkm_runl_foreach_engn(engn, runl) {
+ if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) {
+ engn->rm.size =
+ ctrl->constructedFalconsTable[i].ctxBufferSize;
+ break;
+ }
+ }
+ }
+ }
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
+r570_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080)
+{
+#define RM_ENGINE_TYPE(RM,NVKM,INST) \
+ RM_ENGINE_TYPE_##RM: \
+ *ptype = NVKM_ENGINE_##NVKM; \
+ *p2080 = NV2080_ENGINE_TYPE_##RM; \
+ return INST
+
+ switch (rm) {
+ case RM_ENGINE_TYPE( GR0, GR, 0);
+ case RM_ENGINE_TYPE( COPY0, CE, 0);
+ case RM_ENGINE_TYPE( COPY1, CE, 1);
+ case RM_ENGINE_TYPE( COPY2, CE, 2);
+ case RM_ENGINE_TYPE( COPY3, CE, 3);
+ case RM_ENGINE_TYPE( COPY4, CE, 4);
+ case RM_ENGINE_TYPE( COPY5, CE, 5);
+ case RM_ENGINE_TYPE( COPY6, CE, 6);
+ case RM_ENGINE_TYPE( COPY7, CE, 7);
+ case RM_ENGINE_TYPE( COPY8, CE, 8);
+ case RM_ENGINE_TYPE( COPY9, CE, 9);
+ case RM_ENGINE_TYPE( COPY10, CE, 10);
+ case RM_ENGINE_TYPE( COPY11, CE, 11);
+ case RM_ENGINE_TYPE( COPY12, CE, 12);
+ case RM_ENGINE_TYPE( COPY13, CE, 13);
+ case RM_ENGINE_TYPE( COPY14, CE, 14);
+ case RM_ENGINE_TYPE( COPY15, CE, 15);
+ case RM_ENGINE_TYPE( COPY16, CE, 16);
+ case RM_ENGINE_TYPE( COPY17, CE, 17);
+ case RM_ENGINE_TYPE( COPY18, CE, 18);
+ case RM_ENGINE_TYPE( COPY19, CE, 19);
+ case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0);
+ case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1);
+ case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2);
+ case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3);
+ case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4);
+ case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5);
+ case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6);
+ case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7);
+ case RM_ENGINE_TYPE( NVENC0, NVENC, 0);
+ case RM_ENGINE_TYPE( NVENC1, NVENC, 1);
+ case RM_ENGINE_TYPE( NVENC2, NVENC, 2);
+ case RM_ENGINE_TYPE( NVENC3, NVENC, 3);
+ case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0);
+ case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1);
+ case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2);
+ case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3);
+ case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4);
+ case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5);
+ case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6);
+ case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7);
+ case RM_ENGINE_TYPE( SW, SW, 0);
+ case RM_ENGINE_TYPE( SEC2, SEC2, 0);
+ case RM_ENGINE_TYPE( OFA0, OFA, 0);
+ case RM_ENGINE_TYPE( OFA1, OFA, 1);
+ default:
+ return -EINVAL;
+ }
+#undef RM_ENGINE_TYPE
+}
+
+const struct nvkm_rm_api_fifo
+r570_fifo = {
+ .xlat_rm_engine_type = r570_fifo_xlat_rm_engine_type,
+ .ectx_size = r570_fifo_ectx_size,
+ .rsvd_chids = 1,
+ .rc_triggered = r570_fifo_rc_triggered,
+ .chan = {
+ .alloc = r570_chan_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c
new file mode 100644
index 000000000000..b6cced9b8aa1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/gr.h>
+
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+#include <engine/fifo/chid.h>
+#include <engine/gr/priv.h>
+
+#include "nvrm/gr.h"
+#include "nvrm/engine.h"
+
+int
+r570_gr_tpc_mask(struct nvkm_gsp *gsp, int gpc, u32 *pmask)
+{
+ NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->gpcId = gpc;
+
+ ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pmask = ctrl->tpcMask;
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+int
+r570_gr_gpc_mask(struct nvkm_gsp *gsp, u32 *pmask)
+{
+ NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->gpcMask;
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
+r570_gr_scrubber_ctrl(struct r535_gr *gr, bool teardown)
+{
+ NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gr->scrubber.vmm->rm.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->bTeardown = teardown;
+
+ return nvkm_gsp_rm_ctrl_wr(&gr->scrubber.vmm->rm.device.subdevice, ctrl);
+}
+
+static void
+r570_gr_scrubber_fini(struct r535_gr *gr)
+{
+ /* Teardown scrubber channel on RM. */
+ if (gr->scrubber.enabled) {
+ WARN_ON(r570_gr_scrubber_ctrl(gr, true));
+ gr->scrubber.enabled = false;
+ }
+
+ /* Free scrubber channel. */
+ nvkm_gsp_rm_free(&gr->scrubber.threed);
+ nvkm_gsp_rm_free(&gr->scrubber.chan);
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ nvkm_vmm_put(gr->scrubber.vmm, &gr->scrubber.ctxbuf.vma[i]);
+ nvkm_memory_unref(&gr->scrubber.ctxbuf.mem[i]);
+ }
+
+ nvkm_vmm_unref(&gr->scrubber.vmm);
+ nvkm_memory_unref(&gr->scrubber.inst);
+}
+
+static int
+r570_gr_scrubber_init(struct r535_gr *gr)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
+ int ret;
+
+ /* Scrubber channel only required on TU10x. */
+ switch (device->chipset) {
+ case 0x162:
+ case 0x164:
+ case 0x166:
+ break;
+ default:
+ return 0;
+ }
+
+ if (gr->scrubber.chid < 0) {
+ gr->scrubber.chid = nvkm_chid_get(device->fifo->chid, NULL);
+ if (gr->scrubber.chid < 0)
+ return gr->scrubber.chid;
+ }
+
+ /* Allocate scrubber channel. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ 0x2000 + device->fifo->rm.mthdbuf_size, 0, true,
+ &gr->scrubber.inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grScrubberVmm",
+ &gr->scrubber.vmm);
+ if (ret)
+ goto done;
+
+ ret = r535_mmu_vaspace_new(gr->scrubber.vmm, KGRAPHICS_SCRUBBER_HANDLE_VAS, false);
+ if (ret)
+ goto done;
+
+ ret = rm->api->fifo->chan.alloc(&gr->scrubber.vmm->rm.device, KGRAPHICS_SCRUBBER_HANDLE_CHANNEL,
+ NV2080_ENGINE_TYPE_GR0, 0, false, gr->scrubber.chid,
+ nvkm_memory_addr(gr->scrubber.inst),
+ nvkm_memory_addr(gr->scrubber.inst) + 0x1000,
+ nvkm_memory_addr(gr->scrubber.inst) + 0x2000,
+ gr->scrubber.vmm, 0, 0x1000, &gr->scrubber.chan);
+ if (ret)
+ goto done;
+
+ ret = r535_gr_promote_ctx(gr, false, gr->scrubber.vmm, gr->scrubber.ctxbuf.mem,
+ gr->scrubber.ctxbuf.vma, &gr->scrubber.chan);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_rm_alloc(&gr->scrubber.chan, KGRAPHICS_SCRUBBER_HANDLE_3DOBJ,
+ rm->gpu->gr.class.threed, 0, &gr->scrubber.threed);
+ if (ret)
+ goto done;
+
+ /* Initialise scrubber channel on RM. */
+ ret = r570_gr_scrubber_ctrl(gr, false);
+ if (ret)
+ goto done;
+
+ gr->scrubber.enabled = true;
+
+done:
+ if (ret)
+ r570_gr_scrubber_fini(gr);
+
+ return ret;
+}
+
+static int
+r570_gr_get_ctxbufs_info(struct r535_gr *gr)
+{
+ NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_gsp *gsp = subdev->device->gsp;
+
+ info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+ sizeof(*info));
+ if (WARN_ON(IS_ERR(info)))
+ return PTR_ERR(info);
+
+ for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++)
+ r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+ return 0;
+}
+
+const struct nvkm_rm_api_gr
+r570_gr = {
+ .get_ctxbufs_info = r570_gr_get_ctxbufs_info,
+ .scrubber.init = r570_gr_scrubber_init,
+ .scrubber.fini = r570_gr_scrubber_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c
new file mode 100644
index 000000000000..9d2fa4e66d59
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+#include <rm/rpc.h>
+
+#include <asm-generic/video.h>
+
+#include "nvrm/gsp.h"
+#include "nvrm/rpcfn.h"
+#include "nvrm/msgfn.h"
+
+#include <core/pci.h>
+#include <subdev/pci/priv.h>
+
+static u32
+r570_gsp_sr_data_size(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ return (meta->frtsOffset + meta->frtsSize) -
+ (meta->nonWprHeapOffset + meta->nonWprHeapSize);
+}
+
+static void
+r570_gsp_drop_post_nocat_record(struct nvkm_gsp *gsp)
+{
+ if (gsp->subdev.debug < NV_DBG_DEBUG) {
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, NULL, NULL);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, NULL, NULL);
+ }
+}
+
+static bool
+r570_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst)
+{
+ switch (mc_engine_idx) {
+ case MC_ENGINE_IDX_GSP:
+ *ptype = NVKM_SUBDEV_GSP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_DISP:
+ *ptype = NVKM_ENGINE_DISP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE19:
+ *ptype = NVKM_ENGINE_CE;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0;
+ return true;
+ case MC_ENGINE_IDX_GR0:
+ *ptype = NVKM_ENGINE_GR;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+ *ptype = NVKM_ENGINE_NVDEC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0;
+ return true;
+ case MC_ENGINE_IDX_NVENC ... MC_ENGINE_IDX_NVENC3:
+ *ptype = NVKM_ENGINE_NVENC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVENC;
+ return true;
+ case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+ *ptype = NVKM_ENGINE_NVJPG;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0;
+ return true;
+ case MC_ENGINE_IDX_OFA0 ... MC_ENGINE_IDX_OFA1:
+ *ptype = NVKM_ENGINE_OFA;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_OFA0;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int
+r570_gsp_get_static_info(struct nvkm_gsp *gsp)
+{
+ GspStaticConfigInfo *rpc;
+ u32 gpc_mask;
+ u32 tpc_mask;
+ int ret;
+
+ rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ gsp->internal.client.object.client = &gsp->internal.client;
+ gsp->internal.client.object.parent = NULL;
+ gsp->internal.client.object.handle = rpc->hInternalClient;
+ gsp->internal.client.gsp = gsp;
+ INIT_LIST_HEAD(&gsp->internal.client.events);
+
+ gsp->internal.device.object.client = &gsp->internal.client;
+ gsp->internal.device.object.parent = &gsp->internal.client.object;
+ gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+ gsp->internal.device.subdevice.client = &gsp->internal.client;
+ gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+ gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+ gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+ gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+ r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams);
+
+ if (gsp->rm->wpr->offset_set_by_acr) {
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ meta->nonWprHeapOffset = rpc->fwWprLayoutOffset.nonWprHeapOffset;
+ meta->frtsOffset = rpc->fwWprLayoutOffset.frtsOffset;
+ }
+
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ ret = r570_gr_gpc_mask(gsp, &gpc_mask);
+ if (ret)
+ return ret;
+
+ for (int gpc = 0; gpc < 32; gpc++) {
+ if (gpc_mask & BIT(gpc)) {
+ ret = r570_gr_tpc_mask(gsp, gpc, &tpc_mask);
+ if (ret)
+ return ret;
+
+ gsp->gr.tpcs += hweight32(tpc_mask);
+ gsp->gr.gpcs++;
+ }
+ }
+
+ return 0;
+}
+
+static void
+r570_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
+{
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev);
+
+ if (!handle)
+ return;
+
+ acpi->bValid = 1;
+
+ r535_gsp_acpi_dod(handle, &acpi->dodMethodData);
+ r535_gsp_acpi_jt(handle, &acpi->jtMethodData);
+ r535_gsp_acpi_caps(handle, &acpi->capsMethodData);
+#endif
+}
+
+static int
+r570_gsp_set_system_info(struct nvkm_gsp *gsp)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ struct pci_dev *pdev = container_of(device, struct nvkm_device_pci, device)->pdev;
+ GspSystemInfo *info;
+
+ if (WARN_ON(device->type == NVKM_DEVICE_TEGRA))
+ return -ENOSYS;
+
+ info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info));
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB);
+ info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST);
+ info->nvDomainBusDeviceFunc = pci_dev_id(pdev);
+ info->maxUserVa = TASK_SIZE;
+ info->pciConfigMirrorBase = device->pci->func->cfg.addr;
+ info->pciConfigMirrorSize = device->pci->func->cfg.size;
+ info->PCIDeviceID = (pdev->device << 16) | pdev->vendor;
+ info->PCISubDeviceID = (pdev->subsystem_device << 16) | pdev->subsystem_vendor;
+ info->PCIRevisionID = pdev->revision;
+ r570_gsp_acpi_info(gsp, &info->acpiMethodData);
+ info->bIsPrimary = video_is_primary_device(device->dev);
+ info->bPreserveVideoMemoryAllocations = false;
+
+ return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
+}
+
+static void
+r570_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume)
+{
+ GSP_ARGUMENTS_CACHED *args;
+
+ args = gsp->rmargs.data;
+ args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
+ args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
+ args->messageQueueInitArguments.cmdQueueOffset =
+ (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data;
+ args->messageQueueInitArguments.statQueueOffset =
+ (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data;
+
+ if (!resume) {
+ args->srInitArguments.oldLevel = 0;
+ args->srInitArguments.flags = 0;
+ args->srInitArguments.bInPMTransition = 0;
+ } else {
+ args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
+ args->srInitArguments.flags = 0;
+ args->srInitArguments.bInPMTransition = 1;
+ }
+
+ args->bDmemStack = 1;
+}
+
+const struct nvkm_rm_api_gsp
+r570_gsp = {
+ .set_rmargs = r570_gsp_set_rmargs,
+ .set_system_info = r570_gsp_set_system_info,
+ .get_static_info = r570_gsp_get_static_info,
+ .xlat_mc_engine_idx = r570_gsp_xlat_mc_engine_idx,
+ .drop_post_nocat_record = r570_gsp_drop_post_nocat_record,
+ .sr_data_size = r570_gsp_sr_data_size,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h
new file mode 100644
index 000000000000..e8714e0abc37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CLIENT_H__
+#define __NVRM_CLIENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+typedef struct NV0000_ALLOC_PARAMETERS {
+ NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+ NvU32 processID;
+ char processName[NV_PROC_NAME_MAX_LENGTH];
+ NV_DECLARE_ALIGNED(NvP64 pOsPidInfo, 8);
+} NV0000_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h
new file mode 100644
index 000000000000..06e972835d77
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DISP_H__
+#define __NVRM_DISP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
+ NvU32 feHwSysCap;
+ NvU32 windowPresentMask;
+ NvBool bFbRemapperEnabled;
+ NvU32 numHeads;
+ NvU32 i2cPort;
+ NvU32 internalDispActiveMask;
+ NvU32 embeddedDisplayPortMask;
+ NvBool bExternalMuxSupported;
+ NvBool bInternalMuxSupported;
+ NvU32 numDispChannels;
+} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730107U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayMask;
+ NvU32 displayMaskDDC;
+} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
+
+#define NV0073_CTRL_MAX_CONNECTORS 4U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 DDCPartners;
+ NvU32 count;
+ struct {
+ NvU32 index;
+ NvU32 type;
+ NvU32 location;
+ } data[NV0073_CTRL_MAX_CONNECTORS];
+ NvU32 platform;
+} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
+ NvBool bDscSupported;
+ NvU32 encoderColorFormatMask;
+ NvU32 lineBufferSizeKB;
+ NvU32 rateBufferSizeKB;
+ NvU32 bitsPerPixelPrecision;
+ NvU32 maxNumHztSlices;
+ NvU32 lineBufferBitDepth;
+} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 sorIndex;
+ NvU32 maxLinkRate;
+ NvU32 dpVersionsSupported;
+ NvU32 UHBRSupportedByGpu;
+ NvU32 minPClkForCompressed;
+ NvBool bIsMultistreamSupported;
+ NvBool bIsSCEnabled;
+ NvBool bHasIncreasedWatermarkLimits;
+ NvBool bIsPC2Disabled;
+ NvBool isSingleHeadMSTSupported;
+ NvBool bFECSupported;
+ NvBool bIsTrainPhyRepeater;
+ NvBool bOverrideLinkBw;
+ NvBool bUseRgFlushSequence;
+ NvBool bSupportDPDownSpread;
+ NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
+} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0 2:2
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0 2:2
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730108U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 displayMask;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 UHBRSupportedByDfp;
+} NV0073_CTRL_DFP_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
+#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
+#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR 13:13
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
+#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS 0:0
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS 1:1
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS 2:2
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_TRUE (0x00000001U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x73010cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 flags;
+ NvU32 displayId;
+} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 brightness;
+ NvBool bUncalibrated;
+ NvU8 brightnessType;
+} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
+ // In
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+
+ // Out
+ NvU16 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+ NvU8 linkBwCount;
+} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 cmd;
+ NvU32 data;
+ NvU32 err;
+ NvU32 retryTimeMs;
+ NvU32 eightLaneDpcdBaseAddr;
+} NV0073_CTRL_DP_CTRL_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 sorIndex;
+ NvU32 dpLink;
+
+ NvBool bEnableOverride;
+ NvBool bMST;
+ NvU32 singleHeadMultistreamMode;
+ NvU32 hBlankSym;
+ NvU32 vBlankSym;
+ NvU32 colorFormat;
+ NvBool bEnableTwoHeadOneOr;
+
+ struct {
+ NvU32 slotStart;
+ NvU32 slotEnd;
+ NvU32 PBN;
+ NvU32 Timeslice;
+ NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
+ NvU32 singleHeadMSTPipeline;
+ NvBool bEnableAudioOverRightPanel;
+ } MST;
+
+ struct {
+ NvBool bEnhancedFraming;
+ NvU32 tuSize;
+ NvU32 waterMark;
+ NvBool bEnableAudioOverRightPanel;
+ } SST;
+} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 mute;
+} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
+ NvU32 addressSpace;
+ NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NvU32 cacheSnoop;
+ NvU32 hclass;
+ NvU32 channelInstance;
+ NvBool valid;
+ NvU32 pbTargetAperture;
+ NvU32 channelPBSize;
+ NvU32 subDeviceId;
+} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
+
+#define ADDR_SYSMEM (1)
+
+#define ADDR_FBMEM 2 // Frame buffer memory space
+
+typedef enum
+{
+ PB_SIZE_4KB = 0,
+ PB_SIZE_8KB,
+ PB_SIZE_16KB,
+ PB_SIZE_32KB,
+ PB_SIZE_64KB
+} ChannelPBSize;
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // Note that core channel has only one instance
+ // while all others have two (one per head).
+ NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
+ NvU32 offset; // Initial offset for put/get, usually zero.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
+
+ NvU32 flags;
+ ChannelPBSize channelPBSize; // Size of Push Buffer requested by client (allowed values in enum)
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
+
+ NvU32 subDeviceId; // One-hot encoded subDeviceId (i.e. SDM) that will be used to address the channel in the pushbuffer stream (via SSDM method)
+} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
+
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100 1
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT1000 2
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_NITS 3
+
+typedef enum
+{
+ IOVA,
+ PHYS_NVM,
+ PHYS_PCI,
+ PHYS_PCI_COHERENT
+} PBTARGETAPERTURE;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h
new file mode 100644
index 000000000000..7997050a4f29
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ENGINE_H__
+#define __NVRM_ENGINE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define MC_ENGINE_IDX_NULL 0 // This must be 0
+#define MC_ENGINE_IDX_TMR 1
+#define MC_ENGINE_IDX_DISP 2
+#define MC_ENGINE_IDX_FB 3
+#define MC_ENGINE_IDX_FIFO 4
+#define MC_ENGINE_IDX_VIDEO 5
+#define MC_ENGINE_IDX_MD 6
+#define MC_ENGINE_IDX_BUS 7
+#define MC_ENGINE_IDX_PMGR 8
+#define MC_ENGINE_IDX_VP2 9
+#define MC_ENGINE_IDX_CIPHER 10
+#define MC_ENGINE_IDX_BIF 11
+#define MC_ENGINE_IDX_PPP 12
+#define MC_ENGINE_IDX_PRIVRING 13
+#define MC_ENGINE_IDX_PMU 14
+#define MC_ENGINE_IDX_CE0 15
+#define MC_ENGINE_IDX_CE1 16
+#define MC_ENGINE_IDX_CE2 17
+#define MC_ENGINE_IDX_CE3 18
+#define MC_ENGINE_IDX_CE4 19
+#define MC_ENGINE_IDX_CE5 20
+#define MC_ENGINE_IDX_CE6 21
+#define MC_ENGINE_IDX_CE7 22
+#define MC_ENGINE_IDX_CE8 23
+#define MC_ENGINE_IDX_CE9 24
+#define MC_ENGINE_IDX_CE10 25
+#define MC_ENGINE_IDX_CE11 26
+#define MC_ENGINE_IDX_CE12 27
+#define MC_ENGINE_IDX_CE13 28
+#define MC_ENGINE_IDX_CE14 29
+#define MC_ENGINE_IDX_CE15 30
+#define MC_ENGINE_IDX_CE16 31
+#define MC_ENGINE_IDX_CE17 32
+#define MC_ENGINE_IDX_CE18 33
+#define MC_ENGINE_IDX_CE19 34
+#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE19
+#define MC_ENGINE_IDX_VIC 35
+#define MC_ENGINE_IDX_ISOHUB 36
+#define MC_ENGINE_IDX_VGPU 37
+#define MC_ENGINE_IDX_NVENC 38
+#define MC_ENGINE_IDX_NVENC1 39
+#define MC_ENGINE_IDX_NVENC2 40
+#define MC_ENGINE_IDX_NVENC3 41
+#define MC_ENGINE_IDX_C2C 42
+#define MC_ENGINE_IDX_LTC 43
+#define MC_ENGINE_IDX_FBHUB 44
+#define MC_ENGINE_IDX_HDACODEC 45
+#define MC_ENGINE_IDX_GMMU 46
+#define MC_ENGINE_IDX_SEC2 47
+#define MC_ENGINE_IDX_FSP 48
+#define MC_ENGINE_IDX_NVLINK 49
+#define MC_ENGINE_IDX_GSP 50
+#define MC_ENGINE_IDX_NVJPG 51
+#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
+#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
+#define MC_ENGINE_IDX_NVJPEG1 52
+#define MC_ENGINE_IDX_NVJPEG2 53
+#define MC_ENGINE_IDX_NVJPEG3 54
+#define MC_ENGINE_IDX_NVJPEG4 55
+#define MC_ENGINE_IDX_NVJPEG5 56
+#define MC_ENGINE_IDX_NVJPEG6 57
+#define MC_ENGINE_IDX_NVJPEG7 58
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT 59
+#define MC_ENGINE_IDX_ACCESS_CNTR 60
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 61
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 62
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 63
+#define MC_ENGINE_IDX_INFO_FAULT 64
+#define MC_ENGINE_IDX_BSP 65
+#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
+#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
+#define MC_ENGINE_IDX_NVDEC1 66
+#define MC_ENGINE_IDX_NVDEC2 67
+#define MC_ENGINE_IDX_NVDEC3 68
+#define MC_ENGINE_IDX_NVDEC4 69
+#define MC_ENGINE_IDX_NVDEC5 70
+#define MC_ENGINE_IDX_NVDEC6 71
+#define MC_ENGINE_IDX_NVDEC7 72
+#define MC_ENGINE_IDX_CPU_DOORBELL 73
+#define MC_ENGINE_IDX_PRIV_DOORBELL 74
+#define MC_ENGINE_IDX_MMU_ECC_ERROR 75
+#define MC_ENGINE_IDX_BLG 76
+#define MC_ENGINE_IDX_PERFMON 77
+#define MC_ENGINE_IDX_BUF_RESET 78
+#define MC_ENGINE_IDX_XBAR 79
+#define MC_ENGINE_IDX_ZPW 80
+#define MC_ENGINE_IDX_OFA0 81
+#define MC_ENGINE_IDX_OFA1 82
+#define MC_ENGINE_IDX_TEGRA 83
+#define MC_ENGINE_IDX_GR 84
+#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
+#define MC_ENGINE_IDX_GR1 85
+#define MC_ENGINE_IDX_GR2 86
+#define MC_ENGINE_IDX_GR3 87
+#define MC_ENGINE_IDX_GR4 88
+#define MC_ENGINE_IDX_GR5 89
+#define MC_ENGINE_IDX_GR6 90
+#define MC_ENGINE_IDX_GR7 91
+#define MC_ENGINE_IDX_ESCHED 92
+#define MC_ENGINE_IDX_ESCHED__SIZE 64
+#define MC_ENGINE_IDX_GR_FECS_LOG 156
+#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG
+#define MC_ENGINE_IDX_GR1_FECS_LOG 157
+#define MC_ENGINE_IDX_GR2_FECS_LOG 158
+#define MC_ENGINE_IDX_GR3_FECS_LOG 159
+#define MC_ENGINE_IDX_GR4_FECS_LOG 160
+#define MC_ENGINE_IDX_GR5_FECS_LOG 161
+#define MC_ENGINE_IDX_GR6_FECS_LOG 162
+#define MC_ENGINE_IDX_GR7_FECS_LOG 163
+#define MC_ENGINE_IDX_TMR_SWRL 164
+#define MC_ENGINE_IDX_DISP_GSP 165
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 166
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 167
+#define MC_ENGINE_IDX_PXUC 168
+#define MC_ENGINE_IDX_SYSLTC 169
+#define MC_ENGINE_IDX_LRCC 170
+#define MC_ENGINE_IDX_GSPLITE 171
+#define MC_ENGINE_IDX_GSPLITE0 MC_ENGINE_IDX_GSPLITE
+#define MC_ENGINE_IDX_GSPLITE1 172
+#define MC_ENGINE_IDX_GSPLITE2 173
+#define MC_ENGINE_IDX_GSPLITE3 174
+#define MC_ENGINE_IDX_GSPLITE_MAX MC_ENGINE_IDX_GSPLITE3
+#define MC_ENGINE_IDX_DPAUX 175
+#define MC_ENGINE_IDX_DISP_LOW 176
+#define MC_ENGINE_IDX_MAX 177
+
+typedef enum
+{
+ RM_ENGINE_TYPE_NULL = (0x00000000),
+ RM_ENGINE_TYPE_GR0 = (0x00000001),
+ RM_ENGINE_TYPE_GR1 = (0x00000002),
+ RM_ENGINE_TYPE_GR2 = (0x00000003),
+ RM_ENGINE_TYPE_GR3 = (0x00000004),
+ RM_ENGINE_TYPE_GR4 = (0x00000005),
+ RM_ENGINE_TYPE_GR5 = (0x00000006),
+ RM_ENGINE_TYPE_GR6 = (0x00000007),
+ RM_ENGINE_TYPE_GR7 = (0x00000008),
+ RM_ENGINE_TYPE_COPY0 = (0x00000009),
+ RM_ENGINE_TYPE_COPY1 = (0x0000000a),
+ RM_ENGINE_TYPE_COPY2 = (0x0000000b),
+ RM_ENGINE_TYPE_COPY3 = (0x0000000c),
+ RM_ENGINE_TYPE_COPY4 = (0x0000000d),
+ RM_ENGINE_TYPE_COPY5 = (0x0000000e),
+ RM_ENGINE_TYPE_COPY6 = (0x0000000f),
+ RM_ENGINE_TYPE_COPY7 = (0x00000010),
+ RM_ENGINE_TYPE_COPY8 = (0x00000011),
+ RM_ENGINE_TYPE_COPY9 = (0x00000012),
+ RM_ENGINE_TYPE_COPY10 = (0x00000013),
+ RM_ENGINE_TYPE_COPY11 = (0x00000014),
+ RM_ENGINE_TYPE_COPY12 = (0x00000015),
+ RM_ENGINE_TYPE_COPY13 = (0x00000016),
+ RM_ENGINE_TYPE_COPY14 = (0x00000017),
+ RM_ENGINE_TYPE_COPY15 = (0x00000018),
+ RM_ENGINE_TYPE_COPY16 = (0x00000019),
+ RM_ENGINE_TYPE_COPY17 = (0x0000001a),
+ RM_ENGINE_TYPE_COPY18 = (0x0000001b),
+ RM_ENGINE_TYPE_COPY19 = (0x0000001c),
+ RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
+ RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
+ RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
+ RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
+ RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
+ RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
+ RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
+ RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
+ RM_ENGINE_TYPE_NVENC0 = (0x00000025),
+ RM_ENGINE_TYPE_NVENC1 = (0x00000026),
+ RM_ENGINE_TYPE_NVENC2 = (0x00000027),
+ // Bug 4175886 - Use this new value for all chips once GB20X is released
+ RM_ENGINE_TYPE_NVENC3 = (0x00000028),
+ RM_ENGINE_TYPE_VP = (0x00000029),
+ RM_ENGINE_TYPE_ME = (0x0000002a),
+ RM_ENGINE_TYPE_PPP = (0x0000002b),
+ RM_ENGINE_TYPE_MPEG = (0x0000002c),
+ RM_ENGINE_TYPE_SW = (0x0000002d),
+ RM_ENGINE_TYPE_TSEC = (0x0000002e),
+ RM_ENGINE_TYPE_VIC = (0x0000002f),
+ RM_ENGINE_TYPE_MP = (0x00000030),
+ RM_ENGINE_TYPE_SEC2 = (0x00000031),
+ RM_ENGINE_TYPE_HOST = (0x00000032),
+ RM_ENGINE_TYPE_DPU = (0x00000033),
+ RM_ENGINE_TYPE_PMU = (0x00000034),
+ RM_ENGINE_TYPE_FBFLCN = (0x00000035),
+ RM_ENGINE_TYPE_NVJPEG0 = (0x00000036),
+ RM_ENGINE_TYPE_NVJPEG1 = (0x00000037),
+ RM_ENGINE_TYPE_NVJPEG2 = (0x00000038),
+ RM_ENGINE_TYPE_NVJPEG3 = (0x00000039),
+ RM_ENGINE_TYPE_NVJPEG4 = (0x0000003a),
+ RM_ENGINE_TYPE_NVJPEG5 = (0x0000003b),
+ RM_ENGINE_TYPE_NVJPEG6 = (0x0000003c),
+ RM_ENGINE_TYPE_NVJPEG7 = (0x0000003d),
+ RM_ENGINE_TYPE_OFA0 = (0x0000003e),
+ RM_ENGINE_TYPE_OFA1 = (0x0000003f),
+ RM_ENGINE_TYPE_RESERVED40 = (0x00000040),
+ RM_ENGINE_TYPE_RESERVED41 = (0x00000041),
+ RM_ENGINE_TYPE_RESERVED42 = (0x00000042),
+ RM_ENGINE_TYPE_RESERVED43 = (0x00000043),
+ RM_ENGINE_TYPE_RESERVED44 = (0x00000044),
+ RM_ENGINE_TYPE_RESERVED45 = (0x00000045),
+ RM_ENGINE_TYPE_RESERVED46 = (0x00000046),
+ RM_ENGINE_TYPE_RESERVED47 = (0x00000047),
+ RM_ENGINE_TYPE_RESERVED48 = (0x00000048),
+ RM_ENGINE_TYPE_RESERVED49 = (0x00000049),
+ RM_ENGINE_TYPE_RESERVED4a = (0x0000004a),
+ RM_ENGINE_TYPE_RESERVED4b = (0x0000004b),
+ RM_ENGINE_TYPE_RESERVED4c = (0x0000004c),
+ RM_ENGINE_TYPE_RESERVED4d = (0x0000004d),
+ RM_ENGINE_TYPE_RESERVED4e = (0x0000004e),
+ RM_ENGINE_TYPE_RESERVED4f = (0x0000004f),
+ RM_ENGINE_TYPE_RESERVED50 = (0x00000050),
+ RM_ENGINE_TYPE_RESERVED51 = (0x00000051),
+ RM_ENGINE_TYPE_RESERVED52 = (0x00000052),
+ RM_ENGINE_TYPE_RESERVED53 = (0x00000053),
+ RM_ENGINE_TYPE_LAST = (0x00000054),
+} RM_ENGINE_TYPE;
+
+#define NV2080_ENGINE_TYPE_NULL (0x00000000)
+#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
+#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
+#define NV2080_ENGINE_TYPE_GR1 (0x00000002)
+#define NV2080_ENGINE_TYPE_GR2 (0x00000003)
+#define NV2080_ENGINE_TYPE_GR3 (0x00000004)
+#define NV2080_ENGINE_TYPE_GR4 (0x00000005)
+#define NV2080_ENGINE_TYPE_GR5 (0x00000006)
+#define NV2080_ENGINE_TYPE_GR6 (0x00000007)
+#define NV2080_ENGINE_TYPE_GR7 (0x00000008)
+#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
+#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a)
+#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b)
+#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c)
+#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d)
+#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e)
+#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f)
+#define NV2080_ENGINE_TYPE_COPY7 (0x00000010)
+#define NV2080_ENGINE_TYPE_COPY8 (0x00000011)
+#define NV2080_ENGINE_TYPE_COPY9 (0x00000012)
+#define NV2080_ENGINE_TYPE_BSP (0x00000013)
+#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
+#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014)
+#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015)
+#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016)
+#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017)
+#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018)
+#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019)
+#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a)
+#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
+#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
+#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c)
+#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d)
+#define NV2080_ENGINE_TYPE_VP (0x0000001e)
+#define NV2080_ENGINE_TYPE_ME (0x0000001f)
+#define NV2080_ENGINE_TYPE_PPP (0x00000020)
+#define NV2080_ENGINE_TYPE_MPEG (0x00000021)
+#define NV2080_ENGINE_TYPE_SW (0x00000022)
+#define NV2080_ENGINE_TYPE_CIPHER (0x00000023)
+#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER
+#define NV2080_ENGINE_TYPE_VIC (0x00000024)
+#define NV2080_ENGINE_TYPE_MP (0x00000025)
+#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
+#define NV2080_ENGINE_TYPE_HOST (0x00000027)
+#define NV2080_ENGINE_TYPE_DPU (0x00000028)
+#define NV2080_ENGINE_TYPE_PMU (0x00000029)
+#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a)
+#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
+#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
+#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c)
+#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d)
+#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e)
+#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f)
+#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030)
+#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031)
+#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032)
+#define NV2080_ENGINE_TYPE_OFA (0x00000033)
+#define NV2080_ENGINE_TYPE_OFA0 NV2080_ENGINE_TYPE_OFA
+#define NV2080_ENGINE_TYPE_COPY10 (0x00000034)
+#define NV2080_ENGINE_TYPE_COPY11 (0x00000035)
+#define NV2080_ENGINE_TYPE_COPY12 (0x00000036)
+#define NV2080_ENGINE_TYPE_COPY13 (0x00000037)
+#define NV2080_ENGINE_TYPE_COPY14 (0x00000038)
+#define NV2080_ENGINE_TYPE_COPY15 (0x00000039)
+#define NV2080_ENGINE_TYPE_COPY16 (0x0000003a)
+#define NV2080_ENGINE_TYPE_COPY17 (0x0000003b)
+#define NV2080_ENGINE_TYPE_COPY18 (0x0000003c)
+#define NV2080_ENGINE_TYPE_COPY19 (0x0000003d)
+#define NV2080_ENGINE_TYPE_OFA1 (0x0000003e)
+#define NV2080_ENGINE_TYPE_NVENC3 (0x0000003f)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY0 (0x00000040)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY1 (0x00000041)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY2 (0x00000042)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY3 (0x00000043)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY4 (0x00000044)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY5 (0x00000045)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY6 (0x00000046)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY7 (0x00000047)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY8 (0x00000048)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY9 (0x00000049)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY10 (0x0000004a)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY11 (0x0000004b)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY12 (0x0000004c)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY13 (0x0000004d)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY14 (0x0000004e)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY15 (0x0000004f)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY16 (0x00000050)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY17 (0x00000051)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY18 (0x00000052)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY19 (0x00000053)
+#define NV2080_ENGINE_TYPE_LAST (0x00000054)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h
new file mode 100644
index 000000000000..8af432375f7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FBSR_H__
+#define __NVRM_FBSR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
+ NvHandle hClient;
+ NvHandle hSysMem;
+ NvBool bEnteringGcoffState;
+ NV_DECLARE_ALIGNED(NvU64 sysmemAddrOfSuspendResumeData, 8);
+} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h
index 7157c7757698..2b002ca64e0f 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h
@@ -1,31 +1,14 @@
-#ifndef __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
-#define __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FIFO_H__
+#define __NVRM_FIFO_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV_MAX_SUBDEVICES 8
typedef struct NV_MEMORY_DESC_PARAMS {
NV_DECLARE_ALIGNED(NvU64 base, 8);
@@ -34,137 +17,197 @@ typedef struct NV_MEMORY_DESC_PARAMS {
NvU32 cacheAttrib;
} NV_MEMORY_DESC_PARAMS;
+#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
+
+#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
+
+typedef struct NV_CHANNEL_ALLOC_PARAMS {
+
+ NvHandle hObjectError; // error context DMA
+ NvHandle hObjectBuffer; // no longer used
+ NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
+ NvU32 gpFifoEntries; // number of GP FIFO entries
+
+ NvU32 flags;
+
+
+ NvHandle hContextShare; // context share handle
+ NvHandle hVASpace; // VASpace for the channel
+
+ // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
+ NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
+
+ // offset to beginning of UserD within hUserdMemory[x]
+ NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
+
+ // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
+ NvU32 engineType;
+ // Channel identifier that is unique for the duration of a RM session
+ NvU32 cid;
+ // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
+ NvU32 subDeviceId;
+ NvHandle hObjectEccError; // ECC error context DMA
+
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
+
+ NvHandle hPhysChannelGroup; // reserved
+ NvU32 internalFlags; // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
+ NvU32 ProcessID; // reserved
+ NvU32 SubProcessID; // reserved
+
+ // IV used for CPU-side encryption / GPU-side decryption.
+ NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // IV used for CPU-side decryption / GPU-side encryption.
+ NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // Nonce used CPU-side signing / GPU-side signature verification.
+ NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
+ NvU32 tpcConfigID; // TPC Configuration Id as supported by DTD-PG Feature
+} NV_CHANNEL_ALLOC_PARAMS;
+
+typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
+
#define NVOS04_FLAGS_CHANNEL_TYPE 1:0
#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000
#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE
#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE
-
#define NVOS04_FLAGS_VPR 2:2
#define NVOS04_FLAGS_VPR_FALSE 0x00000000
#define NVOS04_FLAGS_VPR_TRUE 0x00000001
-
#define NVOS04_FLAGS_CC_SECURE 2:2
#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000
#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3
#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001
-
#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4
#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000
#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001
-
#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5
#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000
#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001
-
#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6
#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000
#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7
#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22
#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23
#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24
#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001
-
#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25
#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000
#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26
#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27
#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001
-
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002
-
#define NVOS04_FLAGS_MAP_CHANNEL 30:30
#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000
#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001
-
#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31
#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000
#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001
-#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
-#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
-
-typedef struct NV_CHANNEL_ALLOC_PARAMS {
-
- NvHandle hObjectError; // error context DMA
- NvHandle hObjectBuffer; // no longer used
- NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
- NvU32 gpFifoEntries; // number of GP FIFO entries
-
- NvU32 flags;
-
-
- NvHandle hContextShare; // context share handle
- NvHandle hVASpace; // VASpace for the channel
-
- // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
- NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
-
- // offset to beginning of UserD within hUserdMemory[x]
- NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
-
- // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
- NvU32 engineType;
- // Channel identifier that is unique for the duration of a RM session
- NvU32 cid;
- // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
- NvU32 subDeviceId;
- NvHandle hObjectEccError; // ECC error context DMA
-
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
-
- NvHandle hPhysChannelGroup; // reserved
- NvU32 internalFlags; // reserved
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
- NvU32 ProcessID; // reserved
- NvU32 SubProcessID; // reserved
- // IV used for CPU-side encryption / GPU-side decryption.
- NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
- // IV used for CPU-side decryption / GPU-side encryption.
- NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
- // Nonce used CPU-side signing / GPU-side signature verification.
- NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
-} NV_CHANNEL_ALLOC_PARAMS;
-
-typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
-
+typedef enum {
+ /*!
+ * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
+ * kernel CPU-RM clients.
+ */
+ ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
+ /*! @brief Error notifier is explicitly not set.
+ *
+ * The corresponding hErrorContext or hEccErrorContext must be
+ * NV01_NULL_OBJECT.
+ */
+ ERROR_NOTIFIER_TYPE_NONE,
+ /*! @brief Error notifier is a ContextDma */
+ ERROR_NOTIFIER_TYPE_CTXDMA,
+ /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
+ ERROR_NOTIFIER_TYPE_MEMORY
+} ErrorNotifierType;
+
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED 6:6
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_NO 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_YES 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED 7:7
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_NO 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_YES 0x1
+
+typedef struct rpc_rc_triggered_v17_02
+{
+ NvU32 nv2080EngineType;
+ NvU32 chid;
+ NvU32 gfid;
+ NvU32 exceptLevel;
+ NvU32 exceptType;
+ NvU32 scope;
+ NvU16 partitionAttributionId;
+ NvU32 mmuFaultAddrLo;
+ NvU32 mmuFaultAddrHi;
+ NvU32 mmuFaultType;
+ NvBool bCallbackNeeded;
+ NvU32 rcJournalBufferSize;
+ NvU8 rcJournalBuffer[];
+} rpc_rc_triggered_v17_02;
+
+#define NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS 0x40
+
+typedef struct NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO {
+ NvU32 engDesc;
+ NvU32 ctxAttr;
+ NvU32 ctxBufferSize;
+ NvU32 addrSpaceList;
+ NvU32 registerBase;
+} NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO;
+
+#define NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO (0x208001b0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
+ NvU32 numConstructedFalcons;
+ NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS];
+} NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS {
+ NvBool bDisableActiveChannels;
+} NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS_MESSAGE_ID" */
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h
new file mode 100644
index 000000000000..feed1dabd9d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GR_H__
+#define __NVRM_GR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
+
+#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x1a
+
+typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
+ NvU32 size;
+ NvU32 alignment;
+} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
+ NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
+} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
+ NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
+} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
+
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SETUP (0x00000019)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x0000001a)
+
+#define NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO (0x20800137U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
+ NvU32 gpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO (0x20800138U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 tpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
+
+#define KGRAPHICS_SCRUBBER_HANDLE_VAS 0xdada0042
+#define KGRAPHICS_SCRUBBER_HANDLE_CHANNEL (KGRAPHICS_SCRUBBER_HANDLE_VAS + 3)
+#define KGRAPHICS_SCRUBBER_HANDLE_3DOBJ (KGRAPHICS_SCRUBBER_HANDLE_VAS + 4)
+
+typedef struct NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS {
+ NvBool bTeardown;
+} NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR (0x20800a46) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS_MESSAGE_ID" */
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h
new file mode 100644
index 000000000000..b6075021e74f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GSP_H__
+#define __NVRM_GSP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
+
+typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NV_DECLARE_ALIGNED(NvU64 reserved, 8);
+ NvU32 performance;
+ NvBool supportCompressed;
+ NvBool supportISO;
+ NvBool bProtected;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+ NvU32 numFBRegions;
+ NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+
+#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
+
+#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
+
+typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+ NvU32 index;
+ NvU32 flags;
+ NvU32 length;
+ NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
+} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+ NvU32 BoardID;
+ char chipSKU[9];
+ char chipSKUMod[5];
+ NvU32 skuConfigVersion;
+ char project[5];
+ char projectSKU[5];
+ char CDP[6];
+ char projectSKUMod[2];
+ NvU32 businessCycle;
+} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
+
+#define MAX_GPC_COUNT 32
+
+typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+ NvU32 totalVFs;
+ NvU32 firstVfOffset;
+ NvU32 vfFeatureMask;
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+ NvBool bSriovEnabled;
+ NvBool bSriovHeavyEnabled;
+ NvBool bEmulateVFBar0TlbInvalidationRegister;
+ NvBool bClientRmAllocatedCtxBuffer;
+ NvBool bNonPowerOf2ChannelCountSupported;
+ NvBool bVfResizableBAR1Supported;
+} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
+
+#include "engine.h"
+
+#define NVGPU_ENGINE_CAPS_MASK_BITS 32
+
+#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
+
+#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
+
+typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
+{
+ NvU32 numHeads;
+ NvU32 maxNumHeads;
+} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
+
+typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
+{
+ NvU32 headIndex;
+ NvU32 maxHResolution;
+ NvU32 maxVResolution;
+} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
+
+#define MAX_GROUP_COUNT 2
+
+typedef struct
+{
+ NvU32 ecidLow;
+ NvU32 ecidHigh;
+ NvU32 ecidExtended;
+} EcidManufacturingInfo;
+
+typedef struct
+{
+ NvU64 nonWprHeapOffset;
+ NvU64 frtsOffset;
+} FW_WPR_LAYOUT_OFFSET;
+
+typedef struct GspStaticConfigInfo_t
+{
+ NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
+ NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
+ NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
+
+ NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
+ NvU32 sriovMaxGfid;
+
+ NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
+
+ NvBool poisonFuseEnabled;
+
+ NvU64 fb_length;
+ NvU64 fbio_mask;
+ NvU32 fb_bus_width;
+ NvU32 fb_ram_type;
+ NvU64 fbp_mask;
+ NvU32 l2_cache_size;
+
+ NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvBool bGpuInternalSku;
+ NvBool bIsQuadroGeneric;
+ NvBool bIsQuadroAd;
+ NvBool bIsNvidiaNvs;
+ NvBool bIsVgx;
+ NvBool bGeforceSmb;
+ NvBool bIsTitan;
+ NvBool bIsTesla;
+ NvBool bIsMobile;
+ NvBool bIsGc6Rtd3Allowed;
+ NvBool bIsGc8Rtd3Allowed;
+ NvBool bIsGcOffRtd3Allowed;
+ NvBool bIsGcoffLegacyAllowed;
+ NvBool bIsMigSupported;
+
+ /* "Total Board Power" refers to power requirement of GPU,
+ * while in GC6 state. Majority of this power will be used
+ * to keep V-RAM active to preserve its content.
+ * Some energy maybe consumed by Always-on components on GPU chip.
+ * This power will be provided by 3.3v voltage rail.
+ */
+ NvU16 RTD3GC6TotalBoardPower;
+
+ /* PERST# (i.e. PCI Express Reset) is a sideband signal
+ * generated by the PCIe Host to indicate the PCIe devices,
+ * that the power-rails and the reference-clock are stable.
+ * The endpoint device typically uses this signal as a global reset.
+ */
+ NvU16 RTD3GC6PerstDelay;
+
+ NvU64 bar1PdeBase;
+ NvU64 bar2PdeBase;
+
+ NvBool bVbiosValid;
+ NvU32 vbiosSubVendor;
+ NvU32 vbiosSubDevice;
+
+ NvBool bPageRetirementSupported;
+
+ NvBool bSplitVasBetweenServerClientRm;
+
+ NvBool bClRootportNeedsNosnoopWAR;
+
+ VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
+ VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
+ NvU64 displaylessMaxPixels;
+
+ // Client handle for internal RMAPI control.
+ NvHandle hInternalClient;
+
+ // Device handle for internal RMAPI control.
+ NvHandle hInternalDevice;
+
+ // Subdevice handle for internal RMAPI control.
+ NvHandle hInternalSubdevice;
+
+ NvBool bSelfHostedMode;
+ NvBool bAtsSupported;
+
+ NvBool bIsGpuUefi;
+ NvBool bIsEfiInit;
+
+ EcidManufacturingInfo ecidInfo[MAX_GROUP_COUNT];
+
+ FW_WPR_LAYOUT_OFFSET fwWprLayoutOffset;
+} GspStaticConfigInfo;
+
+typedef struct
+{
+ NvU16 deviceID; // deviceID
+ NvU16 vendorID; // vendorID
+ NvU16 subdeviceID; // subsystem deviceID
+ NvU16 subvendorID; // subsystem vendorID
+ NvU8 revisionID; // revision ID
+} BUSINFO;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct DOD_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 acpiIdListLen;
+ NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} DOD_METHOD_DATA;
+
+typedef struct JT_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 jtCaps;
+ NvU16 jtRevId;
+ NvBool bSBIOSCaps;
+} JT_METHOD_DATA;
+
+typedef struct MUX_METHOD_DATA_ELEMENT
+{
+ NvU32 acpiId;
+ NvU32 mode;
+ NV_STATUS status;
+} MUX_METHOD_DATA_ELEMENT;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct MUX_METHOD_DATA
+{
+ NvU32 tableLen;
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxStateTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} MUX_METHOD_DATA;
+
+typedef struct CAPS_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 optimusCaps;
+} CAPS_METHOD_DATA;
+
+typedef struct ACPI_METHOD_DATA
+{
+ NvBool bValid;
+ DOD_METHOD_DATA dodMethodData;
+ JT_METHOD_DATA jtMethodData;
+ MUX_METHOD_DATA muxMethodData;
+ CAPS_METHOD_DATA capsMethodData;
+} ACPI_METHOD_DATA;
+
+typedef struct GSP_VF_INFO
+{
+ NvU32 totalVFs;
+ NvU32 firstVFOffset;
+ NvU64 FirstVFBar0Address;
+ NvU64 FirstVFBar1Address;
+ NvU64 FirstVFBar2Address;
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+} GSP_VF_INFO;
+
+typedef struct
+{
+ // Link capabilities
+ NvU32 linkCap;
+} GSP_PCIE_CONFIG_REG;
+
+typedef struct GspSystemInfo
+{
+ NvU64 gpuPhysAddr;
+ NvU64 gpuPhysFbAddr;
+ NvU64 gpuPhysInstAddr;
+ NvU64 gpuPhysIoAddr;
+ NvU64 nvDomainBusDeviceFunc;
+ NvU64 simAccessBufPhysAddr;
+ NvU64 notifyOpSharedSurfacePhysAddr;
+ NvU64 pcieAtomicsOpMask;
+ NvU64 consoleMemSize;
+ NvU64 maxUserVa;
+ NvU32 pciConfigMirrorBase;
+ NvU32 pciConfigMirrorSize;
+ NvU32 PCIDeviceID;
+ NvU32 PCISubDeviceID;
+ NvU32 PCIRevisionID;
+ NvU32 pcieAtomicsCplDeviceCapMask;
+ NvU8 oorArch;
+ NvU64 clPdbProperties;
+ NvU32 Chipset;
+ NvBool bGpuBehindBridge;
+ NvBool bFlrSupported;
+ NvBool b64bBar0Supported;
+ NvBool bMnocAvailable;
+ NvU32 chipsetL1ssEnable;
+ NvBool bUpstreamL0sUnsupported;
+ NvBool bUpstreamL1Unsupported;
+ NvBool bUpstreamL1PorSupported;
+ NvBool bUpstreamL1PorMobileOnly;
+ NvBool bSystemHasMux;
+ NvU8 upstreamAddressValid;
+ BUSINFO FHBBusInfo;
+ BUSINFO chipsetIDInfo;
+ ACPI_METHOD_DATA acpiMethodData;
+ NvU32 hypervisorType;
+ NvBool bIsPassthru;
+ NvU64 sysTimerOffsetNs;
+ GSP_VF_INFO gspVFInfo;
+ NvBool bIsPrimary;
+ NvBool isGridBuild;
+ GSP_PCIE_CONFIG_REG pcieConfigReg;
+ NvU32 gridBuildCsp;
+ NvBool bPreserveVideoMemoryAllocations;
+ NvBool bTdrEventSupported;
+ NvBool bFeatureStretchVblankCapable;
+ NvBool bEnableDynamicGranularityPageArrays;
+ NvBool bClockBoostSupported;
+ NvBool bRouteDispIntrsToCPU;
+ NvU64 hostPageSize;
+} GspSystemInfo;
+
+typedef struct rpc_os_error_log_v17_00
+{
+ NvU32 exceptType;
+ NvU32 runlistId;
+ NvU32 chid;
+ char errString[0x100];
+ NvU32 preemptiveRemovalPreviousXid;
+} rpc_os_error_log_v17_00;
+
+typedef struct
+{
+ // Magic
+ // BL to use for verification (i.e. Booter locked it in WPR2)
+ NvU64 magic; // = 0xdc3aae21371a60b3;
+
+ // Revision number of Booter-BL-Sequencer handoff interface
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ NvU64 revision; // = 1;
+
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+
+ NvU64 sysmemAddrOfRadix3Elf;
+ NvU64 sizeOfRadix3Elf;
+
+ NvU64 sysmemAddrOfBootloader;
+ NvU64 sizeOfBootloader;
+
+ // Offsets inside bootloader image needed by Booter
+ NvU64 bootloaderCodeOffset;
+ NvU64 bootloaderDataOffset;
+ NvU64 bootloaderManifestOffset;
+
+ union
+ {
+ // Used only at initial boot
+ struct
+ {
+ NvU64 sysmemAddrOfSignature;
+ NvU64 sizeOfSignature;
+ };
+
+ //
+ // Used at suspend/resume to read GspFwHeapFreeList
+ // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
+ //
+ struct
+ {
+ NvU32 gspFwHeapFreeListWprOffset;
+ NvU32 unused0;
+ NvU64 unused1;
+ };
+ };
+
+ // ---- Members describing FB layout --------------------------------
+ NvU64 gspFwRsvdStart;
+
+ NvU64 nonWprHeapOffset;
+ NvU64 nonWprHeapSize;
+
+ NvU64 gspFwWprStart;
+
+ // GSP-RM to use to setup heap.
+ NvU64 gspFwHeapOffset;
+ NvU64 gspFwHeapSize;
+
+ // BL to use to find ELF for jump
+ NvU64 gspFwOffset;
+ // Size is sizeOfRadix3Elf above.
+
+ NvU64 bootBinOffset;
+ // Size is sizeOfBootloader above.
+
+ NvU64 frtsOffset;
+ NvU64 frtsSize;
+
+ NvU64 gspFwWprEnd;
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 fbSize;
+
+ // ---- Other members -----------------------------------------------
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 vgaWorkspaceOffset;
+ NvU64 vgaWorkspaceSize;
+
+ // Boot count. Used to determine whether to load the firmware image.
+ NvU64 bootCount;
+
+ // This union is organized the way it is to start at an 8-byte boundary and achieve natural
+ // packing of the internal struct fields.
+ union
+ {
+ struct
+ {
+ // TODO: the partitionRpc* fields below do not really belong in this
+ // structure. The values are patched in by the partition bootstrapper
+ // when GSP-RM is booted in a partition, and this structure was a
+ // convenient place for the bootstrapper to access them. These should
+ // be moved to a different comm. mechanism between the bootstrapper
+ // and the GSP-RM tasks.
+
+ // Shared partition RPC memory (physical address)
+ NvU64 partitionRpcAddr;
+
+ // Offsets relative to partitionRpcAddr
+ NvU16 partitionRpcRequestOffset;
+ NvU16 partitionRpcReplyOffset;
+
+ // Code section and dataSection offset and size.
+ NvU32 elfCodeOffset;
+ NvU32 elfDataOffset;
+ NvU32 elfCodeSize;
+ NvU32 elfDataSize;
+
+ // Used during GSP-RM resume to check for revocation
+ NvU32 lsUcodeVersion;
+ };
+
+ struct
+ {
+ // Pad for the partitionRpc* fields, plus 4 bytes
+ NvU32 partitionRpcPadding[4];
+
+ // CrashCat (contiguous) buffer size/location - occupies same bytes as the
+ // elf(Code|Data)(Offset|Size) fields above.
+ // TODO: move to GSP_FMC_INIT_PARAMS
+ NvU64 sysmemAddrOfCrashReportQueue;
+ NvU32 sizeOfCrashReportQueue;
+
+ // Pad for the lsUcodeVersion field
+ NvU32 lsUcodeVersionPadding[1];
+ };
+ };
+
+ // Number of VF partitions allocating sub-heaps from the WPR heap
+ // Used during boot to ensure the heap is adequately sized
+ NvU8 gspFwHeapVfPartitionCount;
+
+ // Flags to help decide GSP-FW flow.
+ NvU8 flags;
+
+ // Pad structure to exactly 256 bytes. Can replace padding with additional
+ // fields without incrementing revision. Padding initialized to 0.
+ NvU8 padding[2];
+
+ //
+ // Starts at gspFwWprEnd+frtsSize b/c FRTS is positioned
+ // to end where this allocation starts (when RM requests FSP to create
+ // FRTS).
+ //
+ NvU32 pmuReservedSize;
+
+ // BL to use for verification (i.e. Booter says OK to boot)
+ NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
+} GspFwWprMeta;
+
+#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
+
+#define GSP_FW_WPR_META_REVISION 1
+
+typedef struct {
+ NvU64 sharedMemPhysAddr;
+ NvU32 pageTableEntryCount;
+ NvLength cmdQueueOffset;
+ NvLength statQueueOffset;
+} MESSAGE_QUEUE_INIT_ARGUMENTS;
+
+typedef struct {
+ NvU32 oldLevel;
+ NvU32 flags;
+ NvBool bInPMTransition;
+} GSP_SR_INIT_ARGUMENTS;
+
+typedef struct
+{
+ MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
+ GSP_SR_INIT_ARGUMENTS srInitArguments;
+ NvU32 gpuInstance;
+ NvBool bDmemStack;
+
+ struct
+ {
+ NvU64 pa;
+ NvU64 size;
+ } profilerArgs;
+} GSP_ARGUMENTS_CACHED;
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
+
+typedef struct
+{
+ // Magic for verification by secure ucode
+ NvU64 magic; // = GSP_FW_SR_META_MAGIC;
+
+ //
+ // Revision number
+ // Bumped up when we change this interface so it is not backward compatible.
+ //
+ NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
+
+ // Members regarding data in SYSMEM
+ NvU64 sysmemAddrOfSuspendResumeData;
+ NvU64 sizeOfSuspendResumeData;
+
+ //
+ // Internal members for use by secure ucode
+ // Must be exactly GSP_FW_SR_META_INTERNAL_SIZE bytes.
+ //
+ NvU32 internal[32];
+
+ // Same as flags of GspFwWprMeta
+ NvU32 flags;
+
+ // Subrevision number used by secure ucode
+ NvU32 subrevision;
+
+ //
+ // Pad structure to exactly 256 bytes (1 DMA chunk).
+ // Padding initialized to zero.
+ //
+ NvU32 padding[22];
+} GspFwSRMeta;
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL (22 << 20)
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u)
+
+#define BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA (12u)
+
+#define BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA (70u)
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB \
+ (88u + (BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA) + \
+ (BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA))
+
+typedef struct GSP_FMC_INIT_PARAMS
+{
+ // CC initialization "registry keys"
+ NvU32 regkeys;
+} GSP_FMC_INIT_PARAMS;
+
+typedef enum {
+ GSP_DMA_TARGET_LOCAL_FB,
+ GSP_DMA_TARGET_COHERENT_SYSTEM,
+ GSP_DMA_TARGET_NONCOHERENT_SYSTEM,
+ GSP_DMA_TARGET_COUNT
+} GSP_DMA_TARGET;
+
+typedef struct GSP_ACR_BOOT_GSP_RM_PARAMS
+{
+ // Physical memory aperture through which gspRmDescPa is accessed
+ GSP_DMA_TARGET target;
+ // Size in bytes of the GSP-RM descriptor structure
+ NvU32 gspRmDescSize;
+ // Physical offset in the target aperture of the GSP-RM descriptor structure
+ NvU64 gspRmDescOffset;
+ // Physical offset in FB to set the start of the WPR containing GSP-RM
+ NvU64 wprCarveoutOffset;
+ // Size in bytes of the WPR containing GSP-RM
+ NvU32 wprCarveoutSize;
+ // Whether to boot GSP-RM or GSP-Proxy through ACR
+ NvBool bIsGspRmBoot;
+} GSP_ACR_BOOT_GSP_RM_PARAMS;
+
+typedef struct GSP_RM_PARAMS
+{
+ // Physical memory aperture through which bootArgsOffset is accessed
+ GSP_DMA_TARGET target;
+ // Physical offset in the memory aperture that will be passed to GSP-RM
+ NvU64 bootArgsOffset;
+} GSP_RM_PARAMS;
+
+typedef struct GSP_SPDM_PARAMS
+{
+ // Physical Memory Aperture through which all addresses are accessed
+ GSP_DMA_TARGET target;
+
+ // Physical offset in the memory aperture where SPDM payload is stored
+ NvU64 payloadBufferOffset;
+
+ // Size of the above payload buffer
+ NvU32 payloadBufferSize;
+} GSP_SPDM_PARAMS;
+
+typedef struct GSP_FMC_BOOT_PARAMS
+{
+ GSP_FMC_INIT_PARAMS initParams;
+ GSP_ACR_BOOT_GSP_RM_PARAMS bootGspRmParams;
+ GSP_RM_PARAMS gspRmParams;
+ GSP_SPDM_PARAMS gspSpdmParams;
+} GSP_FMC_BOOT_PARAMS;
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100 (14 << 20) // Hopper+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h
new file mode 100644
index 000000000000..e06643f57695
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_MSGFN_H__
+#define __NVRM_MSGFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#ifndef E
+# define E(RPC, VAL) NV_VGPU_MSG_EVENT_##RPC = VAL,
+# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ E(FIRST_EVENT, 0x1000)
+ E(GSP_INIT_DONE, 0x1001)
+ E(GSP_RUN_CPU_SEQUENCER, 0x1002)
+ E(POST_EVENT, 0x1003)
+ E(RC_TRIGGERED, 0x1004)
+ E(MMU_FAULT_QUEUED, 0x1005)
+ E(OS_ERROR_LOG, 0x1006)
+ E(RG_LINE_INTR, 0x1007)
+ E(GPUACCT_PERFMON_UTIL_SAMPLES, 0x1008)
+ E(SIM_READ, 0x1009)
+ E(SIM_WRITE, 0x100a)
+ E(SEMAPHORE_SCHEDULE_CALLBACK, 0x100b)
+ E(UCODE_LIBOS_PRINT, 0x100c)
+ E(VGPU_GSP_PLUGIN_TRIGGERED, 0x100d)
+ E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK, 0x100e)
+ E(PERF_BRIDGELESS_INFO_UPDATE, 0x100f)
+ E(VGPU_CONFIG, 0x1010)
+ E(DISPLAY_MODESET, 0x1011)
+ E(EXTDEV_INTR_SERVICE, 0x1012)
+ E(NVLINK_INBAND_RECEIVED_DATA_256, 0x1013)
+ E(NVLINK_INBAND_RECEIVED_DATA_512, 0x1014)
+ E(NVLINK_INBAND_RECEIVED_DATA_1024, 0x1015)
+ E(NVLINK_INBAND_RECEIVED_DATA_2048, 0x1016)
+ E(NVLINK_INBAND_RECEIVED_DATA_4096, 0x1017)
+ E(TIMED_SEMAPHORE_RELEASE, 0x1018)
+ E(NVLINK_IS_GPU_DEGRADED, 0x1019)
+ E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK, 0x101a)
+ E(NVLINK_FAULT_UP, 0x101b)
+ E(GSP_LOCKDOWN_NOTICE, 0x101c)
+ E(MIG_CI_CONFIG_UPDATE, 0x101d)
+ E(UPDATE_GSP_TRACE, 0x101e)
+ E(NVLINK_FATAL_ERROR_RECOVERY, 0x101f)
+ E(GSP_POST_NOCAT_RECORD, 0x1020)
+ E(FECS_ERROR, 0x1021)
+ E(RECOVERY_ACTION, 0x1022)
+ E(NUM_EVENTS, 0x1023)
+#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef E
+# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h
new file mode 100644
index 000000000000..fcaef7f553a6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_OFA_H__
+#define __NVRM_OFA_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
+ NvU32 engineInstance;
+} NV_OFA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h
new file mode 100644
index 000000000000..2d67b598c58b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_RPCFN_H__
+#define __NVRM_RPCFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#ifndef X
+# define X(UNIT, RPC, VAL) NV_VGPU_MSG_FUNCTION_##RPC = VAL,
+# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ X(RM, NOP, 0)
+ X(RM, SET_GUEST_SYSTEM_INFO, 1)
+ X(RM, ALLOC_ROOT, 2)
+ X(RM, ALLOC_DEVICE, 3) // deprecated
+ X(RM, ALLOC_MEMORY, 4)
+ X(RM, ALLOC_CTX_DMA, 5)
+ X(RM, ALLOC_CHANNEL_DMA, 6)
+ X(RM, MAP_MEMORY, 7)
+ X(RM, BIND_CTX_DMA, 8) // deprecated
+ X(RM, ALLOC_OBJECT, 9)
+ X(RM, FREE, 10)
+ X(RM, LOG, 11)
+ X(RM, ALLOC_VIDMEM, 12)
+ X(RM, UNMAP_MEMORY, 13)
+ X(RM, MAP_MEMORY_DMA, 14)
+ X(RM, UNMAP_MEMORY_DMA, 15)
+ X(RM, GET_EDID, 16) // deprecated
+ X(RM, ALLOC_DISP_CHANNEL, 17)
+ X(RM, ALLOC_DISP_OBJECT, 18)
+ X(RM, ALLOC_SUBDEVICE, 19)
+ X(RM, ALLOC_DYNAMIC_MEMORY, 20)
+ X(RM, DUP_OBJECT, 21)
+ X(RM, IDLE_CHANNELS, 22)
+ X(RM, ALLOC_EVENT, 23)
+ X(RM, SEND_EVENT, 24) // deprecated
+ X(RM, REMAPPER_CONTROL, 25) // deprecated
+ X(RM, DMA_CONTROL, 26) // deprecated
+ X(RM, DMA_FILL_PTE_MEM, 27)
+ X(RM, MANAGE_HW_RESOURCE, 28)
+ X(RM, BIND_ARBITRARY_CTX_DMA, 29) // deprecated
+ X(RM, CREATE_FB_SEGMENT, 30)
+ X(RM, DESTROY_FB_SEGMENT, 31)
+ X(RM, ALLOC_SHARE_DEVICE, 32)
+ X(RM, DEFERRED_API_CONTROL, 33)
+ X(RM, REMOVE_DEFERRED_API, 34)
+ X(RM, SIM_ESCAPE_READ, 35)
+ X(RM, SIM_ESCAPE_WRITE, 36)
+ X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA, 37)
+ X(RM, FREE_VIDMEM_VIRT, 38)
+ X(RM, PERF_GET_PSTATE_INFO, 39) // deprecated
+ X(RM, PERF_GET_PERFMON_SAMPLE, 40)
+ X(RM, PERF_GET_VIRTUAL_PSTATE_INFO, 41) // deprecated
+ X(RM, PERF_GET_LEVEL_INFO, 42)
+ X(RM, MAP_SEMA_MEMORY, 43)
+ X(RM, UNMAP_SEMA_MEMORY, 44)
+ X(RM, SET_SURFACE_PROPERTIES, 45)
+ X(RM, CLEANUP_SURFACE, 46)
+ X(RM, UNLOADING_GUEST_DRIVER, 47)
+ X(RM, TDR_SET_TIMEOUT_STATE, 48)
+ X(RM, SWITCH_TO_VGA, 49)
+ X(RM, GPU_EXEC_REG_OPS, 50)
+ X(RM, GET_STATIC_INFO, 51)
+ X(RM, ALLOC_VIRTMEM, 52)
+ X(RM, UPDATE_PDE_2, 53)
+ X(RM, SET_PAGE_DIRECTORY, 54)
+ X(RM, GET_STATIC_PSTATE_INFO, 55)
+ X(RM, TRANSLATE_GUEST_GPU_PTES, 56)
+ X(RM, RESERVED_57, 57)
+ X(RM, RESET_CURRENT_GR_CONTEXT, 58)
+ X(RM, SET_SEMA_MEM_VALIDATION_STATE, 59)
+ X(RM, GET_ENGINE_UTILIZATION, 60)
+ X(RM, UPDATE_GPU_PDES, 61)
+ X(RM, GET_ENCODER_CAPACITY, 62)
+ X(RM, VGPU_PF_REG_READ32, 63) // deprecated
+ X(RM, SET_GUEST_SYSTEM_INFO_EXT, 64)
+ X(GSP, GET_GSP_STATIC_INFO, 65)
+ X(RM, RMFS_INIT, 66) // deprecated
+ X(RM, RMFS_CLOSE_QUEUE, 67) // deprecated
+ X(RM, RMFS_CLEANUP, 68) // deprecated
+ X(RM, RMFS_TEST, 69) // deprecated
+ X(RM, UPDATE_BAR_PDE, 70)
+ X(RM, CONTINUATION_RECORD, 71)
+ X(RM, GSP_SET_SYSTEM_INFO, 72)
+ X(RM, SET_REGISTRY, 73)
+ X(GSP, GSP_INIT_POST_OBJGPU, 74) // deprecated
+ X(RM, SUBDEV_EVENT_SET_NOTIFICATION, 75) // deprecated
+ X(GSP, GSP_RM_CONTROL, 76)
+ X(RM, GET_STATIC_INFO2, 77)
+ X(RM, DUMP_PROTOBUF_COMPONENT, 78)
+ X(RM, UNSET_PAGE_DIRECTORY, 79)
+ X(RM, GET_CONSOLIDATED_STATIC_INFO, 80) // deprecated
+ X(RM, GMMU_REGISTER_FAULT_BUFFER, 81) // deprecated
+ X(RM, GMMU_UNREGISTER_FAULT_BUFFER, 82) // deprecated
+ X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER, 83) // deprecated
+ X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER, 84) // deprecated
+ X(RM, CTRL_SET_VGPU_FB_USAGE, 85)
+ X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO, 86)
+ X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO, 87)
+ X(RM, CTRL_RESET_CHANNEL, 88)
+ X(RM, CTRL_RESET_ISOLATED_CHANNEL, 89)
+ X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT, 90)
+ X(RM, CTRL_CLK_GET_EXTENDED_INFO, 91)
+ X(RM, CTRL_PERF_BOOST, 92)
+ X(RM, CTRL_PERF_VPSTATES_GET_CONTROL, 93)
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE, 94)
+ X(RM, CTRL_SET_ZBC_COLOR_CLEAR, 95)
+ X(RM, CTRL_SET_ZBC_DEPTH_CLEAR, 96)
+ X(RM, CTRL_GPFIFO_SCHEDULE, 97)
+ X(RM, CTRL_SET_TIMESLICE, 98)
+ X(RM, CTRL_PREEMPT, 99)
+ X(RM, CTRL_FIFO_DISABLE_CHANNELS, 100)
+ X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL, 101)
+ X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL, 102)
+ X(GSP, GSP_RM_ALLOC, 103)
+ X(RM, CTRL_GET_P2P_CAPS_V2, 104)
+ X(RM, CTRL_CIPHER_AES_ENCRYPT, 105)
+ X(RM, CTRL_CIPHER_SESSION_KEY, 106)
+ X(RM, CTRL_CIPHER_SESSION_KEY_STATUS, 107)
+ X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES, 108)
+ X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES, 109)
+ X(RM, CTRL_DBG_SET_EXCEPTION_MASK, 110)
+ X(RM, CTRL_GPU_PROMOTE_CTX, 111)
+ X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND, 112)
+ X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE, 113)
+ X(RM, CTRL_GR_CTXSW_ZCULL_BIND, 114)
+ X(RM, CTRL_GPU_INITIALIZE_CTX, 115)
+ X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES, 116)
+ X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT, 117)
+ X(RM, CTRL_GET_LATEST_ECC_ADDRESSES, 118)
+ X(RM, CTRL_MC_SERVICE_INTERRUPTS, 119)
+ X(RM, CTRL_DMA_SET_DEFAULT_VASPACE, 120)
+ X(RM, CTRL_GET_CE_PCE_MASK, 121)
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY, 122)
+ X(RM, CTRL_GET_NVLINK_PEER_ID_MASK, 123) // deprecated
+ X(RM, CTRL_GET_NVLINK_STATUS, 124)
+ X(RM, CTRL_GET_P2P_CAPS, 125)
+ X(RM, CTRL_GET_P2P_CAPS_MATRIX, 126)
+ X(RM, RESERVED_0, 127)
+ X(RM, CTRL_RESERVE_PM_AREA_SMPC, 128)
+ X(RM, CTRL_RESERVE_HWPM_LEGACY, 129)
+ X(RM, CTRL_B0CC_EXEC_REG_OPS, 130)
+ X(RM, CTRL_BIND_PM_RESOURCES, 131)
+ X(RM, CTRL_DBG_SUSPEND_CONTEXT, 132)
+ X(RM, CTRL_DBG_RESUME_CONTEXT, 133)
+ X(RM, CTRL_DBG_EXEC_REG_OPS, 134)
+ X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG, 135)
+ X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE, 136)
+ X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE, 137)
+ X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG, 138)
+ X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE, 139)
+ X(RM, CTRL_ALLOC_PMA_STREAM, 140)
+ X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT, 141)
+ X(RM, CTRL_FB_GET_INFO_V2, 142)
+ X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES, 143)
+ X(RM, CTRL_GR_GET_CTX_BUFFER_INFO, 144)
+ X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES, 145)
+ X(RM, CTRL_GPU_EVICT_CTX, 146)
+ X(RM, CTRL_FB_GET_FS_INFO, 147)
+ X(RM, CTRL_GRMGR_GET_GR_FS_INFO, 148)
+ X(RM, CTRL_STOP_CHANNEL, 149)
+ X(RM, CTRL_GR_PC_SAMPLING_MODE, 150)
+ X(RM, CTRL_PERF_RATED_TDP_GET_STATUS, 151)
+ X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL, 152)
+ X(RM, CTRL_FREE_PMA_STREAM, 153)
+ X(RM, CTRL_TIMER_SET_GR_TICK_FREQ, 154)
+ X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB, 155)
+ X(RM, GET_CONSOLIDATED_GR_STATIC_INFO, 156)
+ X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP, 157)
+ X(RM, CTRL_GR_GET_TPC_PARTITION_MODE, 158)
+ X(RM, CTRL_GR_SET_TPC_PARTITION_MODE, 159)
+ X(UVM, UVM_PAGING_CHANNEL_ALLOCATE, 160)
+ X(UVM, UVM_PAGING_CHANNEL_DESTROY, 161)
+ X(UVM, UVM_PAGING_CHANNEL_MAP, 162)
+ X(UVM, UVM_PAGING_CHANNEL_UNMAP, 163)
+ X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM, 164)
+ X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES, 165)
+ X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION, 166)
+ X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL, 167)
+ X(RM, DCE_RM_INIT, 168)
+ X(RM, REGISTER_VIRTUAL_EVENT_BUFFER, 169)
+ X(RM, CTRL_EVENT_BUFFER_UPDATE_GET, 170)
+ X(RM, GET_PLCABLE_ADDRESS_KIND, 171)
+ X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2, 172)
+ X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM, 173)
+ X(RM, CTRL_GET_MMU_DEBUG_MODE, 174)
+ X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS, 175)
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE, 176)
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO, 177)
+ X(RM, DISABLE_CHANNELS, 178)
+ X(RM, CTRL_FABRIC_MEMORY_DESCRIBE, 179)
+ X(RM, CTRL_FABRIC_MEM_STATS, 180)
+ X(RM, SAVE_HIBERNATION_DATA, 181)
+ X(RM, RESTORE_HIBERNATION_DATA, 182)
+ X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED, 183)
+ X(RM, CTRL_EXEC_PARTITIONS_CREATE, 184)
+ X(RM, CTRL_EXEC_PARTITIONS_DELETE, 185)
+ X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN, 186)
+ X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX, 187)
+ X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION, 188)
+ X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK, 189)
+ X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER, 190)
+ X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS, 191)
+ X(RM, CTRL_BUS_SET_P2P_MAPPING, 192)
+ X(RM, CTRL_BUS_UNSET_P2P_MAPPING, 193)
+ X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK, 194)
+ X(RM, CTRL_GPU_MIGRATABLE_OPS, 195)
+ X(RM, CTRL_GET_TOTAL_HS_CREDITS, 196)
+ X(RM, CTRL_GET_HS_CREDITS, 197)
+ X(RM, CTRL_SET_HS_CREDITS, 198)
+ X(RM, CTRL_PM_AREA_PC_SAMPLER, 199)
+ X(RM, INVALIDATE_TLB, 200)
+ X(RM, CTRL_GPU_QUERY_ECC_STATUS, 201) // deprecated
+ X(RM, ECC_NOTIFIER_WRITE_ACK, 202)
+ X(RM, CTRL_DBG_GET_MODE_MMU_DEBUG, 203)
+ X(RM, RM_API_CONTROL, 204)
+ X(RM, CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE, 205)
+ X(RM, CTRL_NVLINK_GET_INBAND_RECEIVED_DATA, 206)
+ X(RM, GET_STATIC_DATA, 207)
+ X(RM, RESERVED_208, 208)
+ X(RM, CTRL_GPU_GET_INFO_V2, 209)
+ X(RM, GET_BRAND_CAPS, 210)
+ X(RM, CTRL_CMD_NVLINK_INBAND_SEND_DATA, 211)
+ X(RM, UPDATE_GPM_GUEST_BUFFER_INFO, 212)
+ X(RM, CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE, 213)
+ X(RM, CTRL_SET_ZBC_STENCIL_CLEAR, 214)
+ X(RM, CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS, 215)
+ X(RM, CTRL_SUBDEVICE_GET_LIBOS_HEAP_STATS, 216)
+ X(RM, CTRL_DBG_SET_MODE_MMU_GCC_DEBUG, 217)
+ X(RM, CTRL_DBG_GET_MODE_MMU_GCC_DEBUG, 218)
+ X(RM, CTRL_RESERVE_HES, 219)
+ X(RM, CTRL_RELEASE_HES, 220)
+ X(RM, CTRL_RESERVE_CCU_PROF, 221)
+ X(RM, CTRL_RELEASE_CCU_PROF, 222)
+ X(RM, RESERVED, 223)
+ X(RM, CTRL_CMD_GET_CHIPLET_HS_CREDIT_POOL, 224)
+ X(RM, CTRL_CMD_GET_HS_CREDITS_MAPPING, 225)
+ X(RM, CTRL_EXEC_PARTITIONS_EXPORT, 226)
+ X(RM, NUM_FUNCTIONS, 227)
+#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef X
+# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c
new file mode 100644
index 000000000000..6fb3083edde3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/ofa.h"
+
+static int
+r570_ofa_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, int inst,
+ struct nvkm_gsp_object *ofa)
+{
+ NV_OFA_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(parent, handle, oclass, sizeof(*args), ofa);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(ofa, args);
+}
+
+const struct nvkm_rm_api_engine
+r570_ofa = {
+ .alloc = r570_ofa_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c
new file mode 100644
index 000000000000..498658d0c60c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/gsp.h"
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos2 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gh100 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x200000,
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gb10x = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x200000,
+ .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000),
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gb20x = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x220000,
+ .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000),
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_api
+r570_api = {
+ .gsp = &r570_gsp,
+ .rpc = &r535_rpc,
+ .ctrl = &r535_ctrl,
+ .alloc = &r535_alloc,
+ .client = &r570_client,
+ .device = &r535_device,
+ .fbsr = &r570_fbsr,
+ .disp = &r570_disp,
+ .fifo = &r570_fifo,
+ .ce = &r535_ce,
+ .gr = &r570_gr,
+ .nvdec = &r535_nvdec,
+ .nvenc = &r535_nvenc,
+ .nvjpg = &r535_nvjpg,
+ .ofa = &r570_ofa,
+};
+
+const struct nvkm_rm_impl
+r570_rm_tu102 = {
+ .wpr = &r570_wpr_libos2,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_ga102 = {
+ .wpr = &r570_wpr_libos3,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gh100 = {
+ .wpr = &r570_wpr_libos3_gh100,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gb10x = {
+ .wpr = &r570_wpr_libos3_gb10x,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gb20x = {
+ .wpr = &r570_wpr_libos3_gb20x,
+ .api = &r570_api,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h
new file mode 100644
index 000000000000..393ea775941f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <subdev/gsp.h>
+#ifndef __NVKM_RM_H__
+#define __NVKM_RM_H__
+#include "handles.h"
+struct nvkm_outp;
+struct r535_gr;
+
+struct nvkm_rm_impl {
+ const struct nvkm_rm_wpr *wpr;
+ const struct nvkm_rm_api *api;
+};
+
+struct nvkm_rm {
+ struct nvkm_device *device;
+ const struct nvkm_rm_gpu *gpu;
+ const struct nvkm_rm_wpr *wpr;
+ const struct nvkm_rm_api *api;
+};
+
+struct nvkm_rm_wpr {
+ u32 os_carveout_size;
+ u32 base_size;
+ u64 heap_size_min;
+ u32 heap_size_non_wpr;
+ u32 rsvd_size_pmu;
+ bool offset_set_by_acr;
+};
+
+struct nvkm_rm_api {
+ const struct nvkm_rm_api_gsp {
+ void (*set_rmargs)(struct nvkm_gsp *, bool resume);
+ int (*set_system_info)(struct nvkm_gsp *);
+ int (*get_static_info)(struct nvkm_gsp *);
+ bool (*xlat_mc_engine_idx)(u32 mc_engine_idx, enum nvkm_subdev_type *, int *inst);
+ void (*drop_send_user_shared_data)(struct nvkm_gsp *);
+ void (*drop_post_nocat_record)(struct nvkm_gsp *);
+ u32 (*sr_data_size)(struct nvkm_gsp *);
+ } *gsp;
+
+ const struct nvkm_rm_api_rpc {
+ void *(*get)(struct nvkm_gsp *, u32 fn, u32 argc);
+ void *(*push)(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 repc);
+ void (*done)(struct nvkm_gsp *gsp, void *repv);
+ } *rpc;
+
+ const struct nvkm_rm_api_ctrl {
+ void *(*get)(struct nvkm_gsp_object *, u32 cmd, u32 params_size);
+ int (*push)(struct nvkm_gsp_object *, void **params, u32 repc);
+ void (*done)(struct nvkm_gsp_object *, void *params);
+ } *ctrl;
+
+ const struct nvkm_rm_api_alloc {
+ void *(*get)(struct nvkm_gsp_object *, u32 oclass, u32 params_size);
+ void *(*push)(struct nvkm_gsp_object *, void *params);
+ void (*done)(struct nvkm_gsp_object *, void *params);
+
+ int (*free)(struct nvkm_gsp_object *);
+ } *alloc;
+
+ const struct nvkm_rm_api_client {
+ int (*ctor)(struct nvkm_gsp_client *, u32 handle);
+ } *client;
+
+ const struct nvkm_rm_api_device {
+ int (*ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
+ void (*dtor)(struct nvkm_gsp_device *);
+
+ struct {
+ int (*ctor)(struct nvkm_gsp_device *, u32 handle, u32 id,
+ nvkm_gsp_event_func, struct nvkm_gsp_event *);
+ void (*dtor)(struct nvkm_gsp_event *);
+ } event;
+ } *device;
+
+ const struct nvkm_rm_api_fbsr {
+ int (*suspend)(struct nvkm_gsp *);
+ void (*resume)(struct nvkm_gsp *);
+ } *fbsr;
+
+ const struct nvkm_rm_api_disp {
+ int (*get_static_info)(struct nvkm_disp *);
+ int (*get_supported)(struct nvkm_disp *, unsigned long *display_mask);
+ int (*get_connect_state)(struct nvkm_disp *, unsigned display_id);
+ int (*get_active)(struct nvkm_disp *, unsigned head, u32 *display_id);
+
+ int (*bl_ctrl)(struct nvkm_disp *, unsigned display_id, bool set, int *val);
+
+ struct {
+ int (*get_caps)(struct nvkm_disp *, int *link_bw, bool *mst, bool *wm);
+ int (*set_indexed_link_rates)(struct nvkm_outp *);
+ } dp;
+
+ struct {
+ int (*set_pushbuf)(struct nvkm_disp *, s32 oclass, int inst,
+ struct nvkm_memory *);
+ int (*dmac_alloc)(struct nvkm_disp *, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *);
+ } chan;
+ } *disp;
+
+ const struct nvkm_rm_api_fifo {
+ int (*xlat_rm_engine_type)(u32 rm_engine_type,
+ enum nvkm_subdev_type *, int *nv2080_type);
+ int (*ectx_size)(struct nvkm_fifo *);
+ unsigned rsvd_chids;
+ int (*rc_triggered)(void *priv, u32 fn, void *repv, u32 repc);
+ struct {
+ int (*alloc)(struct nvkm_gsp_device *, u32 handle,
+ u32 nv2080_engine_type, u8 runq, bool priv, int chid,
+ u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *);
+ } chan;
+ } *fifo;
+
+ const struct nvkm_rm_api_engine {
+ int (*alloc)(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *);
+ } *ce, *nvdec, *nvenc, *nvjpg, *ofa;
+
+ const struct nvkm_rm_api_gr {
+ int (*get_ctxbufs_info)(struct r535_gr *);
+ struct {
+ int (*init)(struct r535_gr *);
+ void (*fini)(struct r535_gr *);
+ } scrubber;
+ } *gr;
+};
+
+extern const struct nvkm_rm_impl r535_rm_tu102;
+extern const struct nvkm_rm_impl r535_rm_ga102;
+extern const struct nvkm_rm_api_gsp r535_gsp;
+typedef struct DOD_METHOD_DATA DOD_METHOD_DATA;
+typedef struct JT_METHOD_DATA JT_METHOD_DATA;
+typedef struct CAPS_METHOD_DATA CAPS_METHOD_DATA;
+void r535_gsp_acpi_dod(acpi_handle, DOD_METHOD_DATA *);
+void r535_gsp_acpi_jt(acpi_handle, JT_METHOD_DATA *);
+void r535_gsp_acpi_caps(acpi_handle, CAPS_METHOD_DATA *);
+struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+void r535_gsp_get_static_info_fb(struct nvkm_gsp *,
+ const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *);
+extern const struct nvkm_rm_api_rpc r535_rpc;
+extern const struct nvkm_rm_api_ctrl r535_ctrl;
+extern const struct nvkm_rm_api_alloc r535_alloc;
+extern const struct nvkm_rm_api_client r535_client;
+void r535_gsp_client_dtor(struct nvkm_gsp_client *);
+extern const struct nvkm_rm_api_device r535_device;
+int r535_mmu_vaspace_new(struct nvkm_vmm *, u32 handle, bool external);
+void r535_mmu_vaspace_del(struct nvkm_vmm *);
+extern const struct nvkm_rm_api_fbsr r535_fbsr;
+void r535_fbsr_resume(struct nvkm_gsp *);
+int r535_fbsr_memlist(struct nvkm_gsp_device *, u32 handle, enum nvkm_memory_target,
+ u64 phys, u64 size, struct sg_table *, struct nvkm_gsp_object *);
+extern const struct nvkm_rm_api_disp r535_disp;
+extern const struct nvkm_rm_api_fifo r535_fifo;
+void r535_fifo_rc_chid(struct nvkm_fifo *, int chid);
+extern const struct nvkm_rm_api_engine r535_ce;
+extern const struct nvkm_rm_api_gr r535_gr;
+void *r535_gr_dtor(struct nvkm_gr *);
+int r535_gr_oneinit(struct nvkm_gr *);
+u64 r535_gr_units(struct nvkm_gr *);
+int r535_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *, const struct nvkm_oclass *,
+ struct nvkm_object **);
+int r535_gr_promote_ctx(struct r535_gr *, bool golden, struct nvkm_vmm *,
+ struct nvkm_memory **pctxbuf_mem, struct nvkm_vma **pctxbuf_vma,
+ struct nvkm_gsp_object *chan);
+extern const struct nvkm_rm_api_engine r535_nvdec;
+extern const struct nvkm_rm_api_engine r535_nvenc;
+extern const struct nvkm_rm_api_engine r535_nvjpg;
+extern const struct nvkm_rm_api_engine r535_ofa;
+
+extern const struct nvkm_rm_impl r570_rm_tu102;
+extern const struct nvkm_rm_impl r570_rm_ga102;
+extern const struct nvkm_rm_impl r570_rm_gh100;
+extern const struct nvkm_rm_impl r570_rm_gb10x;
+extern const struct nvkm_rm_impl r570_rm_gb20x;
+extern const struct nvkm_rm_api_gsp r570_gsp;
+extern const struct nvkm_rm_api_client r570_client;
+extern const struct nvkm_rm_api_fbsr r570_fbsr;
+extern const struct nvkm_rm_api_disp r570_disp;
+extern const struct nvkm_rm_api_fifo r570_fifo;
+extern const struct nvkm_rm_api_gr r570_gr;
+int r570_gr_gpc_mask(struct nvkm_gsp *, u32 *mask);
+int r570_gr_tpc_mask(struct nvkm_gsp *, int gpc, u32 *mask);
+extern const struct nvkm_rm_api_engine r570_ofa;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h
new file mode 100644
index 000000000000..4431e33b3304
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_RPC_H__
+#define __NVKM_RM_RPC_H__
+#include "rm.h"
+
+#define to_payload_hdr(p, header) \
+ container_of((void *)p, typeof(*header), params)
+
+int r535_gsp_rpc_poll(struct nvkm_gsp *, u32 fn);
+
+struct nvfw_gsp_rpc *r535_gsp_msg_recv(struct nvkm_gsp *, int fn, u32 gsp_rpc_len);
+int r535_gsp_msg_ntfy_add(struct nvkm_gsp *, u32 fn, nvkm_gsp_msg_ntfy_func, void *priv);
+
+int r535_rpc_status_to_errno(uint32_t rpc_status);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c
new file mode 100644
index 000000000000..423502f870db
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+tu1xx_gpu = {
+ .disp.class = {
+ .root = TU102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = TU102_DISP_CORE_CHANNEL_DMA,
+ .wndw = TU102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = TU102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = TU102_DISP_CURSOR,
+ },
+
+ .usermode.class = TURING_USERMODE_A,
+
+ .fifo.chan = {
+ .class = TURING_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = TURING_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = TURING_A,
+ .compute = TURING_COMPUTE_A,
+ },
+ .nvdec.class = NVC4B0_VIDEO_DECODER,
+ .nvenc.class = NVC4B7_VIDEO_ENCODER,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
index 59c5f2b9172a..58e233bc53b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -22,11 +22,45 @@
#include "priv.h"
#include <subdev/fb.h>
+#include <engine/sec2.h>
+
+#include <rm/r535/nvrm/gsp.h>
#include <nvfw/flcn.h>
#include <nvfw/fw.h>
#include <nvfw/hs.h>
+static int
+tu102_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 wpr2_hi;
+ int ret;
+
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ if (!wpr2_hi) {
+ nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
+ return 0;
+ }
+
+ ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+ if (WARN_ON(ret))
+ return ret;
+
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ if (WARN_ON(wpr2_hi))
+ return -EIO;
+
+ return 0;
+}
+
+static int
+tu102_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+ return nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+}
+
int
tu102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob,
struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
@@ -114,6 +148,118 @@ tu102_gsp_reset(struct nvkm_gsp *gsp)
return gsp->falcon.func->reset_eng(&gsp->falcon);
}
+int
+tu102_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+ u32 mbox0 = 0xff, mbox1 = 0xff;
+ int ret;
+
+ ret = r535_gsp_fini(gsp, suspend);
+ if (ret && suspend)
+ return ret;
+
+ nvkm_falcon_reset(&gsp->falcon);
+
+ ret = nvkm_gsp_fwsec_sb(gsp);
+ WARN_ON(ret);
+
+ if (suspend) {
+ mbox0 = lower_32_bits(gsp->sr.meta.addr);
+ mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ }
+
+ ret = tu102_gsp_booter_unload(gsp, mbox0, mbox1);
+ WARN_ON(ret);
+ return 0;
+}
+
+int
+tu102_gsp_init(struct nvkm_gsp *gsp)
+{
+ u32 mbox0, mbox1;
+ int ret;
+
+ if (!gsp->sr.meta.data) {
+ mbox0 = lower_32_bits(gsp->wpr_meta.addr);
+ mbox1 = upper_32_bits(gsp->wpr_meta.addr);
+ } else {
+ gsp->rm->api->gsp->set_rmargs(gsp, true);
+
+ mbox0 = lower_32_bits(gsp->sr.meta.addr);
+ mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ }
+
+ /* Execute booter to handle (eventually...) booting GSP-RM. */
+ ret = tu102_gsp_booter_load(gsp, mbox0, mbox1);
+ if (WARN_ON(ret))
+ return ret;
+
+ return r535_gsp_init(gsp);
+}
+
+static int
+tu102_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta;
+ int ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta);
+ if (ret)
+ return ret;
+
+ meta = gsp->wpr_meta.data;
+
+ meta->magic = GSP_FW_WPR_META_MAGIC;
+ meta->revision = GSP_FW_WPR_META_REVISION;
+
+ meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
+ meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
+
+ meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+ meta->sizeOfBootloader = gsp->boot.fw.size;
+ meta->bootloaderCodeOffset = gsp->boot.code_offset;
+ meta->bootloaderDataOffset = gsp->boot.data_offset;
+ meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+ meta->sysmemAddrOfSignature = gsp->sig.addr;
+ meta->sizeOfSignature = gsp->sig.size;
+
+ meta->gspFwRsvdStart = gsp->fb.heap.addr;
+ meta->nonWprHeapOffset = gsp->fb.heap.addr;
+ meta->nonWprHeapSize = gsp->fb.heap.size;
+ meta->gspFwWprStart = gsp->fb.wpr2.addr;
+ meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
+ meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
+ meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
+ meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
+ meta->frtsOffset = gsp->fb.wpr2.frts.addr;
+ meta->frtsSize = gsp->fb.wpr2.frts.size;
+ meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
+ meta->fbSize = gsp->fb.size;
+ meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
+ meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+ meta->bootCount = 0;
+ meta->partitionRpcAddr = 0;
+ meta->partitionRpcRequestOffset = 0;
+ meta->partitionRpcReplyOffset = 0;
+ meta->verified = 0;
+ return 0;
+}
+
+u64
+tu102_gsp_wpr_heap_size(struct nvkm_gsp *gsp)
+{
+ u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
+ u64 heap_size;
+
+ heap_size = gsp->rm->wpr->os_carveout_size +
+ gsp->rm->wpr->base_size +
+ ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
+ ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
+
+ return max(heap_size, gsp->rm->wpr->heap_size_min);
+}
+
static u64
tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
{
@@ -136,14 +282,67 @@ tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
int
tu102_gsp_oneinit(struct nvkm_gsp *gsp)
{
- gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
+ struct nvkm_device *device = gsp->subdev.device;
+ int ret;
+
+ gsp->fb.size = nvkm_fb_vidmem_size(device);
gsp->fb.bios.vga_workspace.addr = tu102_gsp_vga_workspace_addr(gsp, gsp->fb.size);
gsp->fb.bios.vga_workspace.size = gsp->fb.size - gsp->fb.bios.vga_workspace.addr;
gsp->fb.bios.addr = gsp->fb.bios.vga_workspace.addr;
gsp->fb.bios.size = gsp->fb.bios.vga_workspace.size;
- return r535_gsp_oneinit(gsp);
+ ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
+ &device->sec2->falcon, &gsp->booter.load);
+ if (ret)
+ return ret;
+
+ ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
+ &device->sec2->falcon, &gsp->booter.unload);
+ if (ret)
+ return ret;
+
+ ret = r535_gsp_oneinit(gsp);
+ if (ret)
+ return ret;
+
+ /* Calculate FB layout. */
+ gsp->fb.wpr2.frts.size = 0x100000;
+ gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
+
+ gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
+ gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
+
+ gsp->fb.wpr2.elf.size = gsp->fw.len;
+ gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
+
+ gsp->fb.wpr2.heap.size = tu102_gsp_wpr_heap_size(gsp);
+
+ gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
+ gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
+
+ gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
+ gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
+
+ gsp->fb.heap.size = 0x100000;
+ gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
+
+ ret = tu102_gsp_wpr_meta_init(gsp);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_fwsec_frts(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ /* Reset GSP into RISC-V mode. */
+ ret = gsp->func->reset(gsp);
+ if (ret)
+ return ret;
+
+ nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
+ nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
+ return 0;
}
const struct nvkm_falcon_func
@@ -163,29 +362,73 @@ tu102_gsp_flcn = {
};
static const struct nvkm_gsp_func
-tu102_gsp_r535_113_01 = {
+tu102_gsp = {
.flcn = &tu102_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_tu10x",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &tu1xx_gpu,
};
+int
+tu102_gsp_load_rm(struct nvkm_gsp *gsp, const struct nvkm_gsp_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ bool enable_gsp = fwif->enable;
+ int ret;
+
+#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
+ enable_gsp = true;
+#endif
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
+ return -EINVAL;
+
+ ret = nvkm_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+tu102_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
+{
+ int ret;
+
+ ret = tu102_gsp_load_rm(gsp, fwif);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload);
+
+done:
+ if (ret)
+ nvkm_gsp_dtor_fws(gsp);
+
+ return ret;
+}
+
static struct nvkm_gsp_fwif
tu102_gsps[] = {
- { 0, r535_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &tu102_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &tu102_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -196,3 +439,11 @@ tu102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(tu102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(tu102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu106, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(tu102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu106, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
index 04fbd9ed28b1..97eb046c25d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
@@ -22,29 +22,27 @@
#include "priv.h"
static const struct nvkm_gsp_func
-tu116_gsp_r535_113_01 = {
+tu116_gsp = {
.flcn = &tu102_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_tu11x",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &tu1xx_gpu,
};
static struct nvkm_gsp_fwif
tu116_gsps[] = {
- { 0, r535_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &tu116_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &tu116_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -55,3 +53,9 @@ tu116_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(tu116_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(tu116, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu117, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(tu116, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu117, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
index 553d540f2736..fa7a2862dd1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
@@ -4,5 +4,4 @@ nvkm-y += nvkm/subdev/instmem/nv04.o
nvkm-y += nvkm/subdev/instmem/nv40.o
nvkm-y += nvkm/subdev/instmem/nv50.o
nvkm-y += nvkm/subdev/instmem/gk20a.o
-
-nvkm-y += nvkm/subdev/instmem/r535.o
+nvkm-y += nvkm/subdev/instmem/gh100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index a2cd3330efc6..2f55bab8e132 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -182,9 +182,11 @@ nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
int ret;
if (suspend) {
- ret = imem->func->suspend(imem);
- if (ret)
- return ret;
+ if (imem->func->suspend) {
+ ret = imem->func->suspend(imem);
+ if (ret)
+ return ret;
+ }
imem->suspend = true;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c
new file mode 100644
index 000000000000..8d8dd5f8a6c7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/ref/gh100/pri_nv_xal_ep.h>
+
+static void
+gh100_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr)
+{
+ nvkm_wr32(device, NV_XAL_EP_BAR0_WINDOW, addr >> NV_XAL_EP_BAR0_WINDOW_BASE_SHIFT);
+}
+
+static const struct nvkm_instmem_func
+gh100_instmem = {
+ .fini = nv50_instmem_fini,
+ .memory_new = nv50_instobj_new,
+ .memory_wrap = nv50_instobj_wrap,
+ .set_bar0_window_addr = gh100_instmem_set_bar0_window_addr,
+};
+
+int
+gh100_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_instmem **pimem)
+{
+ return r535_instmem_new(&gh100_instmem, device, type, inst, pimem);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index 6b462f960922..2544b9f0ec85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -239,7 +239,6 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins
struct nvkm_instmem **pimem)
{
struct nv40_instmem *imem;
- int bar;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
@@ -247,13 +246,8 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins
*pimem = &imem->base;
/* map bar */
- if (device->func->resource_size(device, 2))
- bar = 2;
- else
- bar = 3;
-
- imem->iomem = ioremap_wc(device->func->resource_addr(device, bar),
- device->func->resource_size(device, bar));
+ imem->iomem = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST),
+ device->func->resource_size(device, NVKM_BAR2_INST));
if (!imem->iomem) {
nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
return -EFAULT;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index dd5b5a17ece0..4ca6fb30743d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -65,7 +65,7 @@ nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
+ imem->base.func->set_bar0_window_addr(device, base);
imem->addr = base;
}
nvkm_wr32(device, 0x700000 + addr, data);
@@ -85,7 +85,7 @@ nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
+ imem->base.func->set_bar0_window_addr(device, base);
imem->addr = base;
}
data = nvkm_rd32(device, 0x700000 + addr);
@@ -172,7 +172,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
/* Make the mapping visible to the host. */
iobj->bar = bar;
- iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
+ iobj->map = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST) +
(u32)iobj->bar->addr, size);
if (!iobj->map) {
nvkm_warn(subdev, "PRAMIN ioremap failed\n");
@@ -353,7 +353,7 @@ nv50_instobj_func = {
.map = nv50_instobj_map,
};
-static int
+int
nv50_instobj_wrap(struct nvkm_instmem *base,
struct nvkm_memory *memory, struct nvkm_memory **pmemory)
{
@@ -373,7 +373,7 @@ nv50_instobj_wrap(struct nvkm_instmem *base,
return 0;
}
-static int
+int
nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
struct nvkm_memory **pmemory)
{
@@ -395,6 +395,12 @@ nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
*****************************************************************************/
static void
+nv50_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr)
+{
+ nvkm_wr32(device, 0x001700, addr >> 16);
+}
+
+void
nv50_instmem_fini(struct nvkm_instmem *base)
{
nv50_instmem(base)->addr = ~0ULL;
@@ -415,6 +421,7 @@ nv50_instmem = {
.memory_new = nv50_instobj_new,
.memory_wrap = nv50_instobj_wrap,
.zero = false,
+ .set_bar0_window_addr = nv50_instmem_set_bar0_window_addr,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index 4c14c96fb60a..87bbdd786eaa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -16,10 +16,16 @@ struct nvkm_instmem_func {
bool zero, struct nvkm_memory **);
int (*memory_wrap)(struct nvkm_instmem *, struct nvkm_memory *, struct nvkm_memory **);
bool zero;
+ void (*set_bar0_window_addr)(struct nvkm_device *, u64 addr);
};
int nv50_instmem_new_(const struct nvkm_instmem_func *, struct nvkm_device *,
enum nvkm_subdev_type, int, struct nvkm_instmem **);
+void nv50_instmem_fini(struct nvkm_instmem *);
+int nv50_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
+ struct nvkm_memory **);
+int nv50_instobj_wrap(struct nvkm_instmem *, struct nvkm_memory *vram,
+ struct nvkm_memory **bar2);
void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
enum nvkm_subdev_type, int, struct nvkm_instmem *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 7ba35ea59c06..ea4848931540 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -15,8 +15,7 @@ nvkm-y += nvkm/subdev/mmu/gp100.o
nvkm-y += nvkm/subdev/mmu/gp10b.o
nvkm-y += nvkm/subdev/mmu/gv100.o
nvkm-y += nvkm/subdev/mmu/tu102.o
-
-nvkm-y += nvkm/subdev/mmu/r535.o
+nvkm-y += nvkm/subdev/mmu/gh100.o
nvkm-y += nvkm/subdev/mmu/mem.o
nvkm-y += nvkm/subdev/mmu/memnv04.o
@@ -38,6 +37,7 @@ nvkm-y += nvkm/subdev/mmu/vmmgp100.o
nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
nvkm-y += nvkm/subdev/mmu/vmmgv100.o
nvkm-y += nvkm/subdev/mmu/vmmtu102.o
+nvkm-y += nvkm/subdev/mmu/vmmgh100.o
nvkm-y += nvkm/subdev/mmu/umem.o
nvkm-y += nvkm/subdev/mmu/ummu.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c
new file mode 100644
index 000000000000..2918fb32cc91
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gh100_mmu = {
+ .dma_bits = 52,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gh100_vmm_new },
+ .kind = tu102_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gh100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ return r535_mmu_new(&gh100_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
index d9c9bee45222..160a5749a29f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
@@ -60,7 +60,7 @@ gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
if (ret)
return ret;
- *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr;
*psize = (*pvma)->size;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
index 79a3b0cc9f5b..1e3db52de6cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
@@ -41,7 +41,7 @@ nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
return ret;
- *paddr = device->func->resource_addr(device, 1) + addr;
+ *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + addr;
*psize = nvkm_memory_size(memory);
*pvma = ERR_PTR(-ENODEV);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
index 46759b89fc1f..33b2321e9d87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
@@ -57,7 +57,7 @@ nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
if (ret)
return ret;
- *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr;
*psize = (*pvma)->size;
return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index e9ca6537778c..90efef8f0b54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -53,6 +53,8 @@ const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *);
+const u8 *tu102_mmu_kind(struct nvkm_mmu *, int *, u8 *);
+
struct nvkm_mmu_pt {
union {
struct nvkm_mmu_ptc *ptc;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index df662ce4a4b0..7acff3642e20 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -28,7 +28,7 @@
#include <nvif/class.h>
-static const u8 *
+const u8 *
tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 9c97800fe037..f95c58b67633 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -19,7 +19,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#define NVKM_VMM_LEVELS_MAX 5
+#define NVKM_VMM_LEVELS_MAX 6
#include "vmm.h"
#include <subdev/fb.h>
@@ -1030,12 +1030,8 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm)
struct nvkm_vma *vma;
struct rb_node *node;
- if (vmm->rm.client.gsp) {
- nvkm_gsp_rm_free(&vmm->rm.object);
- nvkm_gsp_device_dtor(&vmm->rm.device);
- nvkm_gsp_client_dtor(&vmm->rm.client);
- nvkm_vmm_put(vmm, &vmm->rm.rsvd);
- }
+ if (vmm->rm.client.gsp)
+ r535_mmu_vaspace_del(vmm);
if (0)
nvkm_vmm_dump(vmm);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index f9bc30cdb2b3..4586a425dbe4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -143,6 +143,8 @@ struct nvkm_vmm_func {
int (*aper)(enum nvkm_memory_target);
int (*valid)(struct nvkm_vmm *, void *argv, u32 argc,
struct nvkm_vmm_map *);
+ int (*valid2)(struct nvkm_vmm *, bool ro, bool priv, u8 kind, u8 comp,
+ struct nvkm_vmm_map *);
void (*flush)(struct nvkm_vmm *, int depth);
int (*mthd)(struct nvkm_vmm *, struct nvkm_client *,
@@ -254,6 +256,8 @@ void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);
int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void tu102_vmm_flush(struct nvkm_vmm *, int depth);
+
int nv04_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int nv41_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
@@ -296,6 +300,9 @@ int gv100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
+int gh100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
#define VMM_PRINT(l,v,p,f,a...) do { \
struct nvkm_vmm *_vmm = (v); \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
new file mode 100644
index 000000000000..5614df3432da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_mmu.h>
+
+static inline void
+gh100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = addr | map->type;
+
+ while (ptes--) {
+ VMM_WO064(pt, vmm, ptei++ * NV_MMU_VER3_PTE__SIZE, data);
+ data += map->next;
+ }
+}
+
+static void
+gh100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
+}
+
+static void
+gh100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map)
+{
+ if (map->page->shift == PAGE_SHIFT) {
+ VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u64 data = *map->dma++ | map->type;
+
+ VMM_WO064(pt, vmm, ptei++ * NV_MMU_VER3_PTE__SIZE, data);
+ }
+ nvkm_done(pt->memory);
+ return;
+ }
+
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
+}
+
+static void
+gh100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
+}
+
+static void
+gh100_vmm_pgt_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ const u64 data = NVDEF(NV_MMU, VER3_PTE, PCF, SPARSE);
+
+ VMM_FO064(pt, vmm, ptei * NV_MMU_VER3_PTE__SIZE, data, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_spt = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gh100_vmm_pgt_sparse,
+ .mem = gh100_vmm_pgt_mem,
+ .dma = gh100_vmm_pgt_dma,
+ .sgl = gh100_vmm_pgt_sgl,
+};
+
+static void
+gh100_vmm_lpt_invalid(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ const u64 data = NVDEF(NV_MMU, VER3_PTE, PCF, NO_VALID_4KB_PAGE);
+
+ VMM_FO064(pt, vmm, ptei * NV_MMU_VER3_PTE__SIZE, data, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_lpt = {
+ .invalid = gh100_vmm_lpt_invalid,
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gh100_vmm_pgt_sparse,
+ .mem = gh100_vmm_pgt_mem,
+};
+
+static inline void
+gh100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = addr | map->type;
+
+ while (ptes--) {
+ VMM_WO128(pt, vmm, ptei++ * NV_MMU_VER3_DUAL_PDE__SIZE, data, 0ULL);
+ data += map->next;
+ }
+}
+
+static void
+gh100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pd0_pte);
+}
+
+static inline bool
+gh100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data)
+{
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM:
+ *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, VIDEO_MEMORY);
+ *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_CACHED_ATS_NOT_ALLOWED);
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, SYSTEM_COHERENT_MEMORY);
+ *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_UNCACHED_ATS_ALLOWED);
+ break;
+ case NVKM_MEM_TARGET_NCOH:
+ *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, SYSTEM_NON_COHERENT_MEMORY);
+ *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_CACHED_ATS_ALLOWED);
+ break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ *data |= pt->addr;
+ return true;
+}
+
+static void
+gh100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data[2] = {};
+
+ if (pgt->pt[0] && !gh100_vmm_pde(pgt->pt[0], &data[0]))
+ return;
+ if (pgt->pt[1] && !gh100_vmm_pde(pgt->pt[1], &data[1]))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO128(pd, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, data[0], data[1]);
+ nvkm_done(pd->memory);
+}
+
+static void
+gh100_vmm_pd0_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ const u64 data = NVDEF(NV_MMU, VER3_DUAL_PDE, PCF_BIG, SPARSE_ATS_ALLOWED);
+
+ VMM_FO128(pt, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, data, 0ULL, pdes);
+}
+
+static void
+gh100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ VMM_FO128(pt, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, 0ULL, 0ULL, pdes);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_pd0 = {
+ .unmap = gh100_vmm_pd0_unmap,
+ .sparse = gh100_vmm_pd0_sparse,
+ .pde = gh100_vmm_pd0_pde,
+ .mem = gh100_vmm_pd0_mem,
+};
+
+static void
+gh100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data = 0;
+
+ if (!gh100_vmm_pde(pgt->pt[0], &data))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO064(pd, vmm, pdei * NV_MMU_VER3_PDE__SIZE, data);
+ nvkm_done(pd->memory);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_pd1 = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gh100_vmm_pgt_sparse,
+ .pde = gh100_vmm_pd1_pde,
+};
+
+static const struct nvkm_vmm_desc
+gh100_vmm_desc_16[] = {
+ { LPT, 5, 8, 0x0100, &gh100_vmm_desc_lpt },
+ { PGD, 8, 16, 0x1000, &gh100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 1, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+gh100_vmm_desc_12[] = {
+ { SPT, 9, 8, 0x1000, &gh100_vmm_desc_spt },
+ { PGD, 8, 16, 0x1000, &gh100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 1, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ {}
+};
+
+static int
+gh100_vmm_valid(struct nvkm_vmm *vmm, bool ro, bool priv, u8 kind, u8 comp,
+ struct nvkm_vmm_map *map)
+{
+ const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
+ const bool vol = target == NVKM_MEM_TARGET_HOST;
+ const struct nvkm_vmm_page *page = map->page;
+ u8 kind_inv, pcf;
+ int kindn, aper;
+ const u8 *kindm;
+
+ map->next = 1ULL << page->shift;
+ map->type = 0;
+
+ aper = vmm->func->aper(target);
+ if (WARN_ON(aper < 0))
+ return aper;
+
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
+ VMM_DEBUG(vmm, "kind %02x", kind);
+ return -EINVAL;
+ }
+
+ if (priv) {
+ if (ro) {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACD;
+ } else {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD;
+ }
+ } else {
+ if (ro) {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD;
+ } else {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD;
+ }
+ }
+
+ map->type |= NVDEF(NV_MMU, VER3_PTE, VALID, TRUE);
+ map->type |= NVVAL(NV_MMU, VER3_PTE, APERTURE, aper);
+ map->type |= NVVAL(NV_MMU, VER3_PTE, PCF, pcf);
+ map->type |= NVVAL(NV_MMU, VER3_PTE, KIND, kind);
+ return 0;
+}
+
+static const struct nvkm_vmm_func
+gh100_vmm = {
+ .join = gv100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .valid2 = gh100_vmm_valid,
+ .flush = tu102_vmm_flush,
+ .page = {
+ { 56, &gh100_vmm_desc_16[5], NVKM_VMM_PAGE_Sxxx },
+ { 47, &gh100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gh100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gh100_vmm_desc_16[2], NVKM_VMM_PAGE_SVxC },
+ { 21, &gh100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+ { 16, &gh100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gh100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+gh100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gp100_vmm_new_(&gh100_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index bddac77f48f0..851fd847a2a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -436,6 +436,9 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return ret;
}
+ if (vmm->func->valid2)
+ return vmm->func->valid2(vmm, ro, priv, kind, 0, map);
+
aper = vmm->func->aper(target);
if (WARN_ON(aper < 0))
return aper;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index 8379e72d77ab..4b30eab40bba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -23,7 +23,7 @@
#include <subdev/timer.h>
-static void
+void
tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
struct nvkm_device *device = vmm->mmu->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
index 174bdf995271..a14ea0f7b1c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -13,3 +13,4 @@ nvkm-y += nvkm/subdev/pci/gf100.o
nvkm-y += nvkm/subdev/pci/gf106.o
nvkm-y += nvkm/subdev/pci/gk104.o
nvkm-y += nvkm/subdev/pci/gp100.o
+nvkm-y += nvkm/subdev/pci/gh100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index 5a0de45d36ce..6867934256a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -39,26 +39,26 @@ nvkm_pci_msi_rearm(struct nvkm_device *device)
u32
nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
{
- return pci->func->rd32(pci, addr);
+ return nvkm_rd32(pci->subdev.device, pci->func->cfg.addr + addr);
}
void
nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
{
- pci->func->wr08(pci, addr, data);
+ nvkm_wr08(pci->subdev.device, pci->func->cfg.addr + addr, data);
}
void
nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
{
- pci->func->wr32(pci, addr, data);
+ nvkm_wr32(pci->subdev.device, pci->func->cfg.addr + addr, data);
}
u32
nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value)
{
- u32 data = pci->func->rd32(pci, addr);
- pci->func->wr32(pci, addr, (data & ~mask) | value);
+ u32 data = nvkm_pci_rd32(pci, addr);
+ nvkm_pci_wr32(pci, addr, (data & ~mask) | value);
return data;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
index 5b29aacedef3..5308f6539a3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
@@ -132,10 +132,9 @@ g84_pcie_init(struct nvkm_pci *pci)
static const struct nvkm_pci_func
g84_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv46_pci_msi_rearm,
.pcie.init = g84_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
index a9e0674009c6..8ae7aa02e675 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
@@ -33,10 +33,9 @@ g92_pcie_version_supported(struct nvkm_pci *pci)
static const struct nvkm_pci_func
g92_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv46_pci_msi_rearm,
.pcie.init = g84_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
index 7bacd0693283..df745d0690ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
@@ -25,10 +25,9 @@
static const struct nvkm_pci_func
g94_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = g84_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
index 099906092fe1..6ce941df87b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -78,10 +78,9 @@ gf100_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
static const struct nvkm_pci_func
gf100_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = gf100_pci_msi_rearm,
.pcie.init = gf100_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
index bcde609ba866..712ca7e0959a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
@@ -25,10 +25,9 @@
static const struct nvkm_pci_func
gf106_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = gf100_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c
new file mode 100644
index 000000000000..42da92d7a5fe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_xtl_ep_pri.h>
+
+static void
+gh100_pci_msi_rearm(struct nvkm_pci *pci)
+{
+ /* Handled by top-level intr ACK. */
+}
+
+static const struct nvkm_pci_func
+gh100_pci = {
+ .cfg = {
+ .addr = DRF_LO(NV_EP_PCFGM),
+ .size = DRF_HI(NV_EP_PCFGM) - DRF_LO(NV_EP_PCFGM) + 1,
+ },
+ .msi_rearm = gh100_pci_msi_rearm,
+};
+
+int
+gh100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
+{
+ return nvkm_pci_new_(&gh100_pci, device, type, inst, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
index 6be87ecffc89..ec6d0a7de995 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
@@ -204,10 +204,9 @@ gk104_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
static const struct nvkm_pci_func
gk104_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = gk104_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
index a5fafda0014d..4204316a544f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
@@ -31,9 +31,7 @@ gp100_pci_msi_rearm(struct nvkm_pci *pci)
static const struct nvkm_pci_func
gp100_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
.msi_rearm = gp100_pci_msi_rearm,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
index 9ab64194b185..b8a3f6850fa7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
@@ -23,32 +23,9 @@
*/
#include "priv.h"
-static u32
-nv04_pci_rd32(struct nvkm_pci *pci, u16 addr)
-{
- struct nvkm_device *device = pci->subdev.device;
- return nvkm_rd32(device, 0x001800 + addr);
-}
-
-static void
-nv04_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr08(device, 0x001800 + addr, data);
-}
-
-static void
-nv04_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr32(device, 0x001800 + addr, data);
-}
-
static const struct nvkm_pci_func
nv04_pci_func = {
- .rd32 = nv04_pci_rd32,
- .wr08 = nv04_pci_wr08,
- .wr32 = nv04_pci_wr32,
+ .cfg = { .addr = 0x001800, .size = 0x1000 },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
index 6a3c31cf0200..1971dbbdeb2b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
@@ -23,27 +23,6 @@
*/
#include "priv.h"
-u32
-nv40_pci_rd32(struct nvkm_pci *pci, u16 addr)
-{
- struct nvkm_device *device = pci->subdev.device;
- return nvkm_rd32(device, 0x088000 + addr);
-}
-
-void
-nv40_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr08(device, 0x088000 + addr, data);
-}
-
-void
-nv40_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr32(device, 0x088000 + addr, data);
-}
-
void
nv40_pci_msi_rearm(struct nvkm_pci *pci)
{
@@ -52,9 +31,7 @@ nv40_pci_msi_rearm(struct nvkm_pci *pci)
static const struct nvkm_pci_func
nv40_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
.msi_rearm = nv40_pci_msi_rearm,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
index 9cad17f178ec..0093eabac9ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
@@ -38,9 +38,7 @@ nv46_pci_msi_rearm(struct nvkm_pci *pci)
static const struct nvkm_pci_func
nv46_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
.msi_rearm = nv46_pci_msi_rearm,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
index 741e34bf307c..b445081bb80e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
@@ -25,9 +25,7 @@
static const struct nvkm_pci_func
nv4c_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index 9b7583532962..988eeee1471c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -8,10 +8,12 @@ int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *, enum nvkm_
struct nvkm_pci **);
struct nvkm_pci_func {
+ struct {
+ u32 addr;
+ u16 size;
+ } cfg;
+
void (*init)(struct nvkm_pci *);
- u32 (*rd32)(struct nvkm_pci *, u16 addr);
- void (*wr08)(struct nvkm_pci *, u16 addr, u8 data);
- void (*wr32)(struct nvkm_pci *, u16 addr, u32 data);
void (*msi_rearm)(struct nvkm_pci *);
struct {
@@ -27,9 +29,6 @@ struct nvkm_pci_func {
} pcie;
};
-u32 nv40_pci_rd32(struct nvkm_pci *, u16);
-void nv40_pci_wr08(struct nvkm_pci *, u16, u8);
-void nv40_pci_wr32(struct nvkm_pci *, u16, u32);
void nv40_pci_msi_rearm(struct nvkm_pci *);
void nv46_pci_msi_rearm(struct nvkm_pci *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
index dce337306cab..9446049642e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <rm/gpu.h>
+
static void
r535_vfn_dtor(struct nvkm_vfn *vfn)
{
@@ -32,6 +34,7 @@ r535_vfn_new(const struct nvkm_vfn_func *hw,
struct nvkm_device *device, enum nvkm_subdev_type type, int inst, u32 addr,
struct nvkm_vfn **pvfn)
{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
struct nvkm_vfn_func *rm;
int ret;
@@ -39,8 +42,12 @@ r535_vfn_new(const struct nvkm_vfn_func *hw,
return -ENOMEM;
rm->dtor = r535_vfn_dtor;
- rm->intr = hw->intr;
- rm->user = hw->user;
+ rm->intr = &tu102_vfn_intr,
+ rm->user.addr = 0x030000;
+ rm->user.size = 0x010000;
+ rm->user.base.minver = -1;
+ rm->user.base.maxver = -1;
+ rm->user.base.oclass = gpu->usermode.class;
ret = nvkm_vfn_new_(rm, device, type, inst, addr, pvfn);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
index c5460a14c541..4e64d8843373 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
@@ -36,7 +36,7 @@ nvkm_uvfn_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_vfn *vfn = nvkm_uvfn(object)->vfn;
struct nvkm_device *device = vfn->subdev.device;
- *addr = device->func->resource_addr(device, 0) + vfn->addr.user;
+ *addr = device->func->resource_addr(device, NVKM_BAR0_PRI) + vfn->addr.user;
*size = vfn->func->user.size;
*type = NVKM_OBJECT_MAP_IO;
return 0;
diff --git a/drivers/gpu/drm/nova/Kconfig b/drivers/gpu/drm/nova/Kconfig
new file mode 100644
index 000000000000..cca6a3fea879
--- /dev/null
+++ b/drivers/gpu/drm/nova/Kconfig
@@ -0,0 +1,14 @@
+config DRM_NOVA
+ tristate "Nova DRM driver"
+ depends on DRM=y
+ depends on PCI
+ depends on RUST
+ select AUXILIARY_BUS
+ default n
+ help
+ Choose this if you want to build the Nova DRM driver for Nvidia
+ GSP-based GPUs.
+
+ This driver is work in progress and may not be functional.
+
+ If M is selected, the module will be called nova.
diff --git a/drivers/gpu/drm/nova/Makefile b/drivers/gpu/drm/nova/Makefile
new file mode 100644
index 000000000000..42019bff3173
--- /dev/null
+++ b/drivers/gpu/drm/nova/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_DRM_NOVA) += nova.o
diff --git a/drivers/gpu/drm/nova/driver.rs b/drivers/gpu/drm/nova/driver.rs
new file mode 100644
index 000000000000..b28b2e05cc15
--- /dev/null
+++ b/drivers/gpu/drm/nova/driver.rs
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{auxiliary, c_str, device::Core, drm, drm::gem, drm::ioctl, prelude::*, types::ARef};
+
+use crate::file::File;
+use crate::gem::NovaObject;
+
+pub(crate) struct NovaDriver {
+ #[expect(unused)]
+ drm: ARef<drm::Device<Self>>,
+}
+
+/// Convienence type alias for the DRM device type for this driver
+pub(crate) type NovaDevice = drm::Device<NovaDriver>;
+
+#[pin_data]
+pub(crate) struct NovaData {
+ pub(crate) adev: ARef<auxiliary::Device>,
+}
+
+const INFO: drm::DriverInfo = drm::DriverInfo {
+ major: 0,
+ minor: 0,
+ patchlevel: 0,
+ name: c_str!("nova"),
+ desc: c_str!("Nvidia Graphics"),
+};
+
+const NOVA_CORE_MODULE_NAME: &CStr = c_str!("NovaCore");
+const AUXILIARY_NAME: &CStr = c_str!("nova-drm");
+
+kernel::auxiliary_device_table!(
+ AUX_TABLE,
+ MODULE_AUX_TABLE,
+ <NovaDriver as auxiliary::Driver>::IdInfo,
+ [(
+ auxiliary::DeviceId::new(NOVA_CORE_MODULE_NAME, AUXILIARY_NAME),
+ ()
+ )]
+);
+
+impl auxiliary::Driver for NovaDriver {
+ type IdInfo = ();
+ const ID_TABLE: auxiliary::IdTable<Self::IdInfo> = &AUX_TABLE;
+
+ fn probe(adev: &auxiliary::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ let data = try_pin_init!(NovaData { adev: adev.into() });
+
+ let drm = drm::Device::<Self>::new(adev.as_ref(), data)?;
+ drm::Registration::new_foreign_owned(&drm, adev.as_ref(), 0)?;
+
+ Ok(KBox::new(Self { drm }, GFP_KERNEL)?.into())
+ }
+}
+
+#[vtable]
+impl drm::Driver for NovaDriver {
+ type Data = NovaData;
+ type File = File;
+ type Object = gem::Object<NovaObject>;
+
+ const INFO: drm::DriverInfo = INFO;
+
+ kernel::declare_drm_ioctls! {
+ (NOVA_GETPARAM, drm_nova_getparam, ioctl::RENDER_ALLOW, File::get_param),
+ (NOVA_GEM_CREATE, drm_nova_gem_create, ioctl::AUTH | ioctl::RENDER_ALLOW, File::gem_create),
+ (NOVA_GEM_INFO, drm_nova_gem_info, ioctl::AUTH | ioctl::RENDER_ALLOW, File::gem_info),
+ }
+}
diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs
new file mode 100644
index 000000000000..7e59a34b830d
--- /dev/null
+++ b/drivers/gpu/drm/nova/file.rs
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::driver::{NovaDevice, NovaDriver};
+use crate::gem::NovaObject;
+use crate::uapi::{GemCreate, GemInfo, Getparam};
+use kernel::{
+ alloc::flags::*,
+ drm::{self, gem::BaseObject},
+ pci,
+ prelude::*,
+ types::Opaque,
+ uapi,
+};
+
+pub(crate) struct File;
+
+impl drm::file::DriverFile for File {
+ type Driver = NovaDriver;
+
+ fn open(_dev: &NovaDevice) -> Result<Pin<KBox<Self>>> {
+ Ok(KBox::new(Self, GFP_KERNEL)?.into())
+ }
+}
+
+impl File {
+ /// IOCTL: get_param: Query GPU / driver metadata.
+ pub(crate) fn get_param(
+ dev: &NovaDevice,
+ getparam: &Opaque<uapi::drm_nova_getparam>,
+ _file: &drm::File<File>,
+ ) -> Result<u32> {
+ let adev = &dev.adev;
+ let parent = adev.parent().ok_or(ENOENT)?;
+ let pdev: &pci::Device = parent.try_into()?;
+ let getparam: &Getparam = getparam.into();
+
+ let value = match getparam.param() as u32 {
+ uapi::NOVA_GETPARAM_VRAM_BAR_SIZE => pdev.resource_len(1)?,
+ _ => return Err(EINVAL),
+ };
+
+ getparam.set_value(value);
+
+ Ok(0)
+ }
+
+ /// IOCTL: gem_create: Create a new DRM GEM object.
+ pub(crate) fn gem_create(
+ dev: &NovaDevice,
+ req: &Opaque<uapi::drm_nova_gem_create>,
+ file: &drm::File<File>,
+ ) -> Result<u32> {
+ let req: &GemCreate = req.into();
+ let obj = NovaObject::new(dev, req.size().try_into()?)?;
+
+ req.set_handle(obj.create_handle(file)?);
+
+ Ok(0)
+ }
+
+ /// IOCTL: gem_info: Query GEM metadata.
+ pub(crate) fn gem_info(
+ _dev: &NovaDevice,
+ req: &Opaque<uapi::drm_nova_gem_info>,
+ file: &drm::File<File>,
+ ) -> Result<u32> {
+ let req: &GemInfo = req.into();
+ let bo = NovaObject::lookup_handle(file, req.handle())?;
+
+ req.set_size(bo.size().try_into()?);
+
+ Ok(0)
+ }
+}
diff --git a/drivers/gpu/drm/nova/gem.rs b/drivers/gpu/drm/nova/gem.rs
new file mode 100644
index 000000000000..33b62d21400c
--- /dev/null
+++ b/drivers/gpu/drm/nova/gem.rs
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{
+ drm,
+ drm::{gem, gem::BaseObject},
+ prelude::*,
+ types::ARef,
+};
+
+use crate::{
+ driver::{NovaDevice, NovaDriver},
+ file::File,
+};
+
+/// GEM Object inner driver data
+#[pin_data]
+pub(crate) struct NovaObject {}
+
+impl gem::BaseDriverObject<gem::Object<NovaObject>> for NovaObject {
+ fn new(_dev: &NovaDevice, _size: usize) -> impl PinInit<Self, Error> {
+ try_pin_init!(NovaObject {})
+ }
+}
+
+impl gem::DriverObject for NovaObject {
+ type Driver = NovaDriver;
+}
+
+impl NovaObject {
+ /// Create a new DRM GEM object.
+ pub(crate) fn new(dev: &NovaDevice, size: usize) -> Result<ARef<gem::Object<Self>>> {
+ let aligned_size = size.next_multiple_of(1 << 12);
+
+ if size == 0 || size > aligned_size {
+ return Err(EINVAL);
+ }
+
+ gem::Object::new(dev, aligned_size)
+ }
+
+ /// Look up a GEM object handle for a `File` and return an `ObjectRef` for it.
+ #[inline]
+ pub(crate) fn lookup_handle(
+ file: &drm::File<File>,
+ handle: u32,
+ ) -> Result<ARef<gem::Object<Self>>> {
+ gem::Object::lookup_handle(file, handle)
+ }
+}
diff --git a/drivers/gpu/drm/nova/nova.rs b/drivers/gpu/drm/nova/nova.rs
new file mode 100644
index 000000000000..902876aa14d1
--- /dev/null
+++ b/drivers/gpu/drm/nova/nova.rs
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Nova DRM Driver
+
+mod driver;
+mod file;
+mod gem;
+mod uapi;
+
+use crate::driver::NovaDriver;
+
+kernel::module_auxiliary_driver! {
+ type: NovaDriver,
+ name: "Nova",
+ author: "Danilo Krummrich",
+ description: "Nova GPU driver",
+ license: "GPL v2",
+}
diff --git a/drivers/gpu/drm/nova/uapi.rs b/drivers/gpu/drm/nova/uapi.rs
new file mode 100644
index 000000000000..eb228a58d423
--- /dev/null
+++ b/drivers/gpu/drm/nova/uapi.rs
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::uapi;
+
+// TODO Work out some common infrastructure to avoid boilerplate code for uAPI abstractions.
+
+macro_rules! define_uapi_abstraction {
+ ($name:ident <= $inner:ty) => {
+ #[repr(transparent)]
+ pub struct $name(::kernel::types::Opaque<$inner>);
+
+ impl ::core::convert::From<&::kernel::types::Opaque<$inner>> for &$name {
+ fn from(value: &::kernel::types::Opaque<$inner>) -> Self {
+ // SAFETY: `Self` is a transparent wrapper of `$inner`.
+ unsafe { ::core::mem::transmute(value) }
+ }
+ }
+ };
+}
+
+define_uapi_abstraction!(Getparam <= uapi::drm_nova_getparam);
+
+impl Getparam {
+ pub fn param(&self) -> u64 {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
+ unsafe { (*self.0.get()).param }
+ }
+
+ pub fn set_value(&self, v: u64) {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
+ unsafe { (*self.0.get()).value = v };
+ }
+}
+
+define_uapi_abstraction!(GemCreate <= uapi::drm_nova_gem_create);
+
+impl GemCreate {
+ pub fn size(&self) -> u64 {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
+ unsafe { (*self.0.get()).size }
+ }
+
+ pub fn set_handle(&self, handle: u32) {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
+ unsafe { (*self.0.get()).handle = handle };
+ }
+}
+
+define_uapi_abstraction!(GemInfo <= uapi::drm_nova_gem_info);
+
+impl GemInfo {
+ pub fn handle(&self) -> u32 {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
+ unsafe { (*self.0.get()).handle }
+ }
+
+ pub fn set_size(&self, size: u64) {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
+ unsafe { (*self.0.get()).size = size };
+ }
+}
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index b17e77f700dd..6eff97a09160 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -420,6 +420,7 @@ static void dpi_init_pll(struct dpi_data *dpi)
*/
static int dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
@@ -429,7 +430,7 @@ static int dpi_bridge_attach(struct drm_bridge *bridge,
dpi_init_pll(dpi);
- return drm_bridge_attach(bridge->encoder, dpi->output.next_bridge,
+ return drm_bridge_attach(encoder, dpi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 9b9cc593790c..91ee63bfe0bc 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4617,6 +4617,7 @@ static const struct component_ops dsi_component_ops = {
*/
static int dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
@@ -4624,7 +4625,7 @@ static int dsi_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge,
+ return drm_bridge_attach(encoder, dsi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index e1ac447221ee..a3b22952fdc3 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -314,6 +314,7 @@ void hdmi4_core_disable(struct hdmi_core_data *core)
*/
static int hdmi4_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
@@ -321,7 +322,7 @@ static int hdmi4_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ return drm_bridge_attach(encoder, hdmi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index fa9904e4c218..0c98444d39a9 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -312,6 +312,7 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi)
*/
static int hdmi5_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
@@ -319,7 +320,7 @@ static int hdmi5_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ return drm_bridge_attach(encoder, hdmi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index f9ae358e8e52..e78826e4b560 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -128,6 +128,7 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
*/
static int sdi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
@@ -135,7 +136,7 @@ static int sdi_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, sdi->output.next_bridge,
+ return drm_bridge_attach(encoder, sdi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index aaeef603682c..50349518eda1 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -538,6 +538,7 @@ static int venc_get_clocks(struct venc_device *venc)
*/
static int venc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct venc_device *venc = drm_bridge_to_venc(bridge);
@@ -545,7 +546,7 @@ static int venc_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, venc->output.next_bridge,
+ return drm_bridge_attach(encoder, venc->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index e059b06e0239..721581d425b4 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -67,6 +67,15 @@ config DRM_PANEL_BOE_HIMAX8279D
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_BOE_TD4320
+ tristate "BOE TD4320 DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for BOE TD4320 1080x2340
+ video mode panel found in Xiaomi Redmi Note 7 smartphones.
+
config DRM_PANEL_BOE_TH101MB31UIG002_28A
tristate "Boe TH101MB31UIG002-28A panel"
depends on OF
@@ -154,6 +163,17 @@ config DRM_PANEL_LVDS
handling of power supplies or control signals. It implements automatic
backlight handling if the panel is attached to a backlight controller.
+config DRM_PANEL_HIMAX_HX8279
+ tristate "Himax HX8279-based panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Himax HX8279 controller, such as the Startek KD070FHFID078
+ 7.0" 1200x1920 IPS LCD panel that uses a MIPI-DSI interface
+ and others.
+
config DRM_PANEL_HIMAX_HX83102
tristate "Himax HX83102-based panels"
depends on OF
@@ -497,6 +517,16 @@ config DRM_PANEL_NOVATEK_NT36672E
LCD panel module. The panel has a resolution of 1080x2408 and uses 24 bit
RGB per pixel.
+config DRM_PANEL_NOVATEK_NT37801
+ tristate "Novatek NT37801/NT37810 AMOLED DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Novatek NT37801 (or
+ NT37810) AMOLED DSI Video Mode LCD panel module with 1440x3200
+ resolution.
+
config DRM_PANEL_NOVATEK_NT39016
tristate "Novatek NT39016 RGB/SPI panel"
depends on OF && SPI
@@ -996,6 +1026,15 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
Video Mode panel
+config DRM_PANEL_VISIONOX_G2647FB105
+ tristate "Visionox G2647FB105"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Visionox
+ G2647FB105 (2340x1080@60Hz) AMOLED DSI cmd mode panel.
+
config DRM_PANEL_VISIONOX_R66451
tristate "Visionox R66451"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 1bb8ae46b59b..714cbac830e3 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.
obj-$(CONFIG_DRM_PANEL_AUO_A030JTN01) += panel-auo-a030jtn01.o
obj-$(CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0) += panel-boe-bf060y8m-aj0.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
+obj-$(CONFIG_DRM_PANEL_BOE_TD4320) += panel-boe-td4320.o
obj-$(CONFIG_DRM_PANEL_BOE_TH101MB31UIG002_28A) += panel-boe-th101mb31ig002-28a.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_LL2) += panel-boe-tv101wum-ll2.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
@@ -16,6 +17,7 @@ obj-$(CONFIG_DRM_PANEL_EBBG_FT8719) += panel-ebbg-ft8719.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
+obj-$(CONFIG_DRM_PANEL_HIMAX_HX8279) += panel-himax-hx8279.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83102) += panel-himax-hx83102.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
@@ -49,6 +51,7 @@ obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35950) += panel-novatek-nt35950.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36523) += panel-novatek-nt36523.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672E) += panel-novatek-nt36672e.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT37801) += panel-novatek-nt37801.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
@@ -101,6 +104,7 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_G2647FB105) += panel-visionox-g2647fb105.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM692E5) += panel-visionox-rm692e5.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_VTDR6130) += panel-visionox-vtdr6130.o
diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
index 4692c36fe217..87fb0fd29658 100644
--- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
+++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
@@ -279,9 +279,10 @@ static int y030xx067a_probe(struct spi_device *spi)
struct y030xx067a *priv;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct y030xx067a, panel,
+ &y030xx067a_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -306,9 +307,6 @@ static int y030xx067a_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
"Failed to get reset GPIO\n");
- drm_panel_init(&priv->panel, dev, &y030xx067a_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index 503ecea72c5e..ea5119018df4 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -306,9 +306,11 @@ static int versatile_panel_probe(struct platform_device *pdev)
return PTR_ERR(map);
}
- vpanel = devm_kzalloc(dev, sizeof(*vpanel), GFP_KERNEL);
- if (!vpanel)
- return -ENOMEM;
+ vpanel = devm_drm_panel_alloc(dev, struct versatile_panel, panel,
+ &versatile_panel_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(vpanel))
+ return PTR_ERR(vpanel);
ret = regmap_read(map, SYS_CLCD, &val);
if (ret) {
@@ -348,9 +350,6 @@ static int versatile_panel_probe(struct platform_device *pdev)
dev_info(dev, "panel mounted on IB2 daughterboard\n");
}
- drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&vpanel->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
index b05a663c134c..db006576d704 100644
--- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -224,9 +224,11 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
struct tm5p5_nt35596 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct tm5p5_nt35596, panel,
+ &tm5p5_nt35596_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vdd";
ctx->supplies[1].supply = "vddio";
@@ -253,9 +255,6 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &tm5p5_nt35596_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = tm5p5_nt35596_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight)) {
ret = PTR_ERR(ctx->panel.backlight);
diff --git a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
index 77604d6a4e72..6e52bf6830e1 100644
--- a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
+++ b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
@@ -200,9 +200,10 @@ static int a030jtn01_probe(struct spi_device *spi)
spi->mode |= SPI_MODE_3 | SPI_3WIRE;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct a030jtn01, panel,
+ &a030jtn01_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -223,9 +224,6 @@ static int a030jtn01_probe(struct spi_device *spi)
if (IS_ERR(priv->reset_gpio))
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO");
- drm_panel_init(&priv->panel, dev, &a030jtn01_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
index 7e66db4a88bb..84c21c62a43e 100644
--- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -55,77 +55,56 @@ static void boe_bf060y8m_aj0_reset(struct boe_bf060y8m_aj0 *boe)
static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe)
{
struct mipi_dsi_device *dsi = boe->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
- mipi_dsi_dcs_write_seq(dsi, 0xf8,
- 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(30);
-
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc0,
- 0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
- 0x2a, 0x31, 0x39, 0x20, 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
- 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
- 0x5c, 0x5c, 0x5c);
- mipi_dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
-
- msleep(30);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(50);
-
- return 0;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x00, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_3D_CONTROL, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf8,
+ 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 30);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0,
+ 0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
+ 0x2a, 0x31, 0x39, 0x20, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
+ 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
+ 0x5c, 0x5c, 0x5c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
+
+ mipi_dsi_msleep(&dsi_ctx, 30);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
+
+ return dsi_ctx.accum_err;
}
-static int boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe)
+static void boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe)
{
struct mipi_dsi_device *dsi = boe->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
/* OFF commands sent in HS mode */
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(20);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- usleep_range(1000, 2000);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000);
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
-
- return 0;
}
static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel)
{
struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel);
- struct device *dev = &boe->dsi->dev;
int ret;
/*
@@ -157,13 +136,14 @@ static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel)
ret = boe_bf060y8m_aj0_on(boe);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(boe->reset_gpio, 1);
- return ret;
+ goto err_on;
}
return 0;
+err_on:
+ regulator_disable(boe->vregs[BF060Y8M_VREG_VCI].consumer);
err_vci:
regulator_disable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer);
err_vddio:
@@ -178,15 +158,11 @@ err_elvss:
static int boe_bf060y8m_aj0_unprepare(struct drm_panel *panel)
{
struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel);
- struct device *dev = &boe->dsi->dev;
- int ret;
- ret = boe_bf060y8m_aj0_off(boe);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ boe_bf060y8m_aj0_off(boe);
gpiod_set_value_cansleep(boe->reset_gpio, 1);
- ret = regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs);
+ regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs);
return 0;
}
@@ -234,13 +210,11 @@ static int boe_bf060y8m_aj0_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, brightness);
- return 0;
+ return dsi_ctx.accum_err;
}
static int boe_bf060y8m_aj0_bl_get_brightness(struct backlight_device *bl)
@@ -350,9 +324,11 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
struct boe_bf060y8m_aj0 *boe;
int ret;
- boe = devm_kzalloc(dev, sizeof(*boe), GFP_KERNEL);
- if (!boe)
- return -ENOMEM;
+ boe = devm_drm_panel_alloc(dev, struct boe_bf060y8m_aj0, panel,
+ &boe_bf060y8m_aj0_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(boe))
+ return PTR_ERR(boe);
ret = boe_bf060y8m_aj0_init_vregs(boe, dev);
if (ret)
@@ -374,9 +350,6 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&boe->panel, dev, &boe_bf060y8m_aj0_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
boe->panel.prepare_prev_first = true;
boe->panel.backlight = boe_bf060y8m_aj0_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-boe-td4320.c b/drivers/gpu/drm/panel/panel-boe-td4320.c
new file mode 100644
index 000000000000..1956daa2c71b
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-td4320.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2024 Barnabas Czeman <barnabas.czeman@mainlining.org>
+// Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+// Copyright (c) 2013, The Linux Foundation. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct boe_td4320 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data *supplies;
+ struct gpio_desc *reset_gpio;
+};
+
+static const struct regulator_bulk_data boe_td4320_supplies[] = {
+ { .supply = "iovcc" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline struct boe_td4320 *to_boe_td4320(struct drm_panel *panel)
+{
+ return container_of(panel, struct boe_td4320, panel);
+}
+
+static void boe_td4320_reset(struct boe_td4320 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(30);
+}
+
+static int boe_td4320_on(struct boe_td4320 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x04);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xd6, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb8,
+ 0x19, 0x55, 0x00, 0xbe, 0x00, 0x00,
+ 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb9,
+ 0x4d, 0x55, 0x05, 0xe6, 0x00, 0x02,
+ 0x03);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xba,
+ 0x9b, 0x5b, 0x07, 0xe6, 0x00, 0x13,
+ 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf9,
+ 0x44, 0x3f, 0x00, 0x8d, 0xbf);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xce,
+ 0x5d, 0x00, 0x0f, 0x1f, 0x2f, 0x3f,
+ 0x4f, 0x5f, 0x6f, 0x7f, 0x8f, 0x9f,
+ 0xaf, 0xbf, 0xcf, 0xdf, 0xef, 0xff,
+ 0x04, 0x00, 0x02, 0x02, 0x42, 0x01,
+ 0x69, 0x5a, 0x40, 0x40, 0x00, 0x00,
+ 0x04, 0xfa, 0x00);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x00b8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x2c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x00);
+ mipi_dsi_msleep(&dsi_ctx, 96);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static int boe_td4320_off(struct boe_td4320 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ ctx->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ return dsi_ctx.accum_err;
+}
+
+static int boe_td4320_prepare(struct drm_panel *panel)
+{
+ struct boe_td4320 *ctx = to_boe_td4320(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(boe_td4320_supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ boe_td4320_reset(ctx);
+
+ ret = boe_td4320_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(boe_td4320_supplies), ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int boe_td4320_unprepare(struct drm_panel *panel)
+{
+ struct boe_td4320 *ctx = to_boe_td4320(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = boe_td4320_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(boe_td4320_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode boe_td4320_mode = {
+ .clock = (1080 + 86 + 2 + 100) * (2340 + 4 + 4 + 60) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 86,
+ .hsync_end = 1080 + 86 + 2,
+ .htotal = 1080 + 86 + 2 + 100,
+ .vdisplay = 2340,
+ .vsync_start = 2340 + 4,
+ .vsync_end = 2340 + 4 + 4,
+ .vtotal = 2340 + 4 + 4 + 60,
+ .width_mm = 67,
+ .height_mm = 145,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int boe_td4320_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &boe_td4320_mode);
+}
+
+static const struct drm_panel_funcs boe_td4320_panel_funcs = {
+ .prepare = boe_td4320_prepare,
+ .unprepare = boe_td4320_unprepare,
+ .get_modes = boe_td4320_get_modes,
+};
+
+static int boe_td4320_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct boe_td4320 *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct boe_td4320, panel,
+ &boe_td4320_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(boe_td4320_supplies),
+ boe_td4320_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ctx->panel.prepare_prev_first = true;
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void boe_td4320_remove(struct mipi_dsi_device *dsi)
+{
+ struct boe_td4320 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id boe_td4320_of_match[] = {
+ { .compatible = "boe,td4320" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_td4320_of_match);
+
+static struct mipi_dsi_driver boe_td4320_driver = {
+ .probe = boe_td4320_probe,
+ .remove = boe_td4320_remove,
+ .driver = {
+ .name = "panel-boe-td4320",
+ .of_match_table = boe_td4320_of_match,
+ },
+};
+module_mipi_dsi_driver(boe_td4320_driver);
+
+MODULE_AUTHOR("Barnabas Czeman <barnabas.czeman@mainlining.org>");
+MODULE_DESCRIPTION("DRM driver for boe td4320 fhdplus video mode dsi panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
index 0b87f1e6ecae..f33d4f855929 100644
--- a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
+++ b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
@@ -349,9 +349,11 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
const struct panel_desc *desc;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct boe_th101mb31ig002, panel,
+ &boe_th101mb31ig002_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
@@ -383,9 +385,6 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(&dsi->dev, ret,
"Failed to get orientation\n");
- drm_panel_init(&ctx->panel, &dsi->dev, &boe_th101mb31ig002_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
index 50e4a5341bc6..20b6e11a7d84 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
@@ -166,9 +166,11 @@ static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi)
struct boe_tv101wum_ll2 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct boe_tv101wum_ll2, panel,
+ &boe_tv101wum_ll2_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(boe_tv101wum_ll2_supplies),
@@ -190,8 +192,6 @@ static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_VIDEO_HSE;
- drm_panel_init(&ctx->panel, dev, &boe_tv101wum_ll2_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
index 6b3f4d664d2a..ae6e9ffc46cb 100644
--- a/drivers/gpu/drm/panel/panel-dsi-cm.c
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -511,9 +511,10 @@ static int dsicm_probe(struct mipi_dsi_device *dsi)
dev_dbg(dev, "probe\n");
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
+ ddata = devm_drm_panel_alloc(dev, struct panel_drv_data, panel,
+ &dsicm_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ddata))
+ return PTR_ERR(ddata);
mipi_dsi_set_drvdata(dsi, ddata);
ddata->dsi = dsi;
@@ -530,9 +531,6 @@ static int dsicm_probe(struct mipi_dsi_device *dsi)
dsicm_hw_reset(ddata);
- drm_panel_init(&ddata->panel, dev, &dsicm_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
if (ddata->use_dsi_backlight) {
struct backlight_properties props = { 0 };
props.max_brightness = 255;
diff --git a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
index 0bfed0ec0bbc..fb9f9f42be4f 100644
--- a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
+++ b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
@@ -163,9 +163,11 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
struct ebbg_ft8719 *ctx;
int i, ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ebbg_ft8719, panel,
+ &ebbg_ft8719_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
ctx->supplies[i].supply = regulator_names[i];
@@ -196,9 +198,6 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &ebbg_ft8719_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 52028c8f8988..90e8c154a978 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -839,9 +839,10 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
struct device_node *ddc;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct panel_edp, base,
+ &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
panel->prepared_time = 0;
panel->desc = desc;
@@ -886,8 +887,6 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
dev_set_drvdata(dev, panel);
- drm_panel_init(&panel->base, dev, &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP);
-
err = drm_panel_of_backlight(&panel->base);
if (err)
goto err_finished_ddc_init;
@@ -1763,6 +1762,13 @@ static const struct panel_delay delay_80_500_e50 = {
.enable = 50,
};
+static const struct panel_delay delay_80_500_e80_p2e200 = {
+ .hpd_absent = 80,
+ .unprepare = 500,
+ .enable = 80,
+ .prepare_to_enable = 200,
+};
+
static const struct panel_delay delay_100_500_e200 = {
.hpd_absent = 100,
.unprepare = 500,
@@ -1878,6 +1884,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xc9a8, &delay_200_500_e50, "B140QAN08.H"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
@@ -1938,6 +1945,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d73, &delay_200_500_e80, "NE140WUM-N6S"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
@@ -1973,6 +1981,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1103, &delay_200_500_e80_d50, "MNB601LS1-3"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"),
EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8279.c b/drivers/gpu/drm/panel/panel-himax-hx8279.c
new file mode 100644
index 000000000000..fb302d1f91b9
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-himax-hx8279.c
@@ -0,0 +1,1296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Himax HX8279 DriverIC panels driver
+ *
+ * Copyright (c) 2025 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+/* Page selection */
+#define HX8279_REG_PAGE 0xb0
+ #define HX8279_PAGE_SEL GENMASK(3, 0)
+
+/* Page 0 - Driver/Module Configuration */
+#define HX8279_P0_VGHS 0xbf
+#define HX8279_P0_VGLS 0xc0
+#define HX8279_P0_VGPHS 0xc2
+#define HX8279_P0_VGNHS 0xc4
+ #define HX8279_P0_VG_SEL GENMASK(4, 0)
+ #define HX8279_VGH_MIN_MV 8700
+ #define HX8279_VGH_STEP_MV 300
+ #define HX8279_VGL_MIN_MV 6700
+ #define HX8279_VGL_STEP_MV 300
+ #define HX8279_VGPNH_MIN_MV 4000
+ #define HX8279_VGPNX_STEP_MV 50
+ #define HX8279_VGH_VOLT_SEL(x) ((x - HX8279_VGH_MIN_MV) / HX8279_VGH_STEP_MV)
+ #define HX8279_VGL_VOLT_SEL(x) ((x - HX8279_VGL_MIN_MV) / HX8279_VGL_STEP_MV)
+ #define HX8279_VGPN_VOLT_SEL(x) ((x - HX8279_VGPNH_MIN_MV) / HX8279_VGPNX_STEP_MV)
+
+/* Page 1 - Gate driver On Array (GOA) Mux */
+#define HX8279_P1_REG_GOA_L 0xc0
+#define HX8279_P1_REG_GOUTL(x) (HX8279_P1_REG_GOA_L + (x))
+#define HX8279_P1_REG_GOA_R 0xd4
+#define HX8279_P1_REG_GOUTR(x) (HX8279_P1_REG_GOA_R + (x))
+ #define HX8279_GOUT_STB GENMASK(7, 6)
+ #define HX8279_GOUT_SEL GENMASK(5, 0)
+
+/* Page 2 - Analog Gamma Configuration */
+#define HX8279_P2_REG_ANALOG_GAMMA 0xc0
+ #define HX8279_P2_REG_GAMMA_T_PVP(x) (HX8279_P2_REG_ANALOG_GAMMA + (x)) /* 0..16 */
+ #define HX8279_P2_REG_GAMMA_T_PVN(x) (HX8279_P2_REG_GAMMA_T_PVP(17) + (x)) /* 0..16 */
+
+/* Page 3 - Gate driver On Array (GOA) Configuration */
+#define HX8279_P3_REG_UNKNOWN_BA 0xba
+#define HX8279_P3_REG_GOA_CKV_FALL_PREC 0xbc
+#define HX8279_P3_REG_GOA_TIMING_ODD 0xc2
+ #define HX8279_P3_REG_GOA_TO(x) (HX8279_P3_REG_GOA_TIMING_ODD + x) /* GOA_T0..5 */
+#define HX8279_P3_REG_GOA_STVL 0xc8
+ #define HX8279_P3_GOA_STV_LEAD GENMASK(4, 0)
+#define HX8279_P3_REG_GOA_CKVL 0xc9
+ #define HX8279_P3_GOA_CKV_LEAD GENMASK(4, 0)
+#define HX8279_P3_REG_GOA_CKVD 0xca
+ #define HX8279_P3_GOA_CKV_NONOVERLAP BIT(7)
+ #define HX8279_P3_GOA_CKV_RESERVED BIT(6)
+ #define HX8279_P3_GOA_CKV_DUMMY GENMASK(5, 0)
+#define HX8279_P3_REG_GOA_CKV_RISE_PREC 0xcb
+#define HX8279_P3_REG_GOA_CLR1_W_ADJ 0xd2
+#define HX8279_P3_REG_GOA_CLR234_W_ADJ 0xd3
+#define HX8279_P3_REG_GOA_CLR1_CFG 0xd4
+#define HX8279_P3_REG_GOA_CLR_CFG(x) (HX8279_P3_REG_GOA_CLR1_CFG + (x)) /* CLR1..4 */
+ #define HX8279_P3_GOA_CLR_CFG_POLARITY BIT(7)
+ #define HX8279_P3_GOA_CLR_CFG_STARTPOS GENMASK(6, 0)
+#define HX8279_P3_REG_GOA_TIMING_EVEN 0xdd
+ #define HX8279_P3_REG_GOA_TE(x) (HX8279_P3_REG_GOA_TIMING_EVEN + x)
+#define HX8279_P3_REG_UNKNOWN_E4 0xe4
+#define HX8279_P3_REG_UNKNOWN_E5 0xe5
+
+/* Page 5 - MIPI */
+#define HX8279_P5_REG_TIMING 0xb3
+ #define HX8279_P5_TIMING_THS_SETTLE GENMASK(7, 5)
+ #define HX8279_P5_TIMING_LHS_SETTLE BIT(4)
+ #define HX8279_P5_TIMING_TLPX GENMASK(3, 0)
+#define HX8279_P5_REG_UNKNOWN_B8 0xb8
+#define HX8279_P5_REG_UNKNOWN_BC 0xbc
+#define HX8279_P5_REG_UNKNOWN_D6 0xd6
+
+/* Page 6 - Engineer */
+#define HX8279_P6_REG_ENGINEER_PWD 0xb8
+#define HX8279_P6_REG_INHOUSE_FUNC 0xc0
+ #define HX8279_P6_ENG_UNLOCK_WORD 0xa5
+#define HX8279_P6_REG_GAMMA_CHOPPER 0xbc
+ #define HX8279_P6_GAMMA_POCGM_CTL GENMASK(6, 4)
+ #define HX8279_P6_GAMMA_POGCMD_CTL GENMASK(2, 0)
+#define HX8279_P6_REG_VOLT_ADJ 0xc7
+ /* For VCCIFS and VCCS - 0: 1450, 1: 1500, 2: 1550, 3: 1600 uV */
+ #define HX8279_P6_VOLT_ADJ_VCCIFS GENMASK(3, 2)
+ #define HX8279_P6_VOLT_ADJ_VCCS GENMASK(1, 0)
+#define HX8279_P6_REG_DLY_TIME_ADJ 0xd5
+
+/* Page 7...12 - Digital Gamma Adjustment */
+#define HX8279_PG_DIGITAL_GAMMA 0xb1
+#define HX8279_DGAMMA_DGMA1_HI GENMASK(7, 6)
+#define HX8279_DGAMMA_DGMA2_HI GENMASK(5, 4)
+#define HX8279_DGAMMA_DGMA3_HI GENMASK(3, 2)
+#define HX8279_DGAMMA_DGMA4_HI GENMASK(1, 0)
+#define HX8279_PG_DGAMMA_NUM_LO_BYTES 24
+#define HX8279_PG_DGAMMA_NUM_HI_BYTES 6
+
+struct hx8279 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi[2];
+ struct regulator_bulk_data vregs[2];
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *reset_gpio;
+ const struct hx8279_panel_desc *desc;
+ u8 last_page;
+ bool skip_voltage_config;
+ bool skip_goa_config;
+ bool skip_goa_timing;
+ bool skip_goa_even_timing;
+ bool skip_mipi_timing;
+};
+
+struct hx8279_panel_mode {
+ const struct drm_display_mode mode;
+ u8 bpc;
+ bool is_video_mode;
+};
+
+/**
+ * struct hx8279_goa_mux - Gate driver On Array Muxer
+ * @gout_l: Mux GOA signal to GOUT Left pin
+ * @gout_r: Mux GOA signal to GOUT Right pin
+ */
+struct hx8279_goa_mux {
+ u8 gout_l[20];
+ u8 gout_r[20];
+};
+
+/**
+ * struct hx8279_analog_gamma - Analog Gamma Adjustment
+ * @pos: Positive gamma op's input voltage, adjusted by VGP(H/L)
+ * @neg: Negative gamma op's input voltage, adjusted by VGN(H/L)
+ *
+ * Analog Gamma correction is performed with 17+17 reference voltages,
+ * changed with resistor streams, and defined with 17 register values
+ * for positive and 17 for negative.
+ *
+ * Each register holds resistance values, in 8.5ohms per unit, for the
+ * following gamma levels:
+ * 0, 8, 16, 28, 40, 56, 80, 128, 176, 200, 216, 228, 240, 248, 252, 255.
+ */
+struct hx8279_analog_gamma {
+ u8 pos[17];
+ u8 neg[17];
+};
+
+/**
+ * struct hx8279_digital_gamma - Digital Gamma Adjustment
+ * @r: Adjustment for red component
+ * @g: Adjustment for green component
+ * @b: Adjustment for blue component
+ *
+ * The layout of this structure follows the register layout to simplify
+ * both the handling and the declaration of those values in the driver.
+ * Gamma correction is internally done with a 24 segment piecewise
+ * linear interpolation; those segments are defined with 24 ten bits
+ * values of which:
+ * - The LOW eight bits for the first 24 registers start at the first
+ * register (at 0xb1) of the Digital Gamma Adjustment page;
+ * - The HIGH two bits for each of the 24 registers are contained
+ * in the last six registers;
+ * - The last six registers contain four groups of two-bits HI values
+ * for each of the first 24 registers, but in an inverted fashion,
+ * this means that the first two bits relate to the last register
+ * of a set of four.
+ *
+ * The 24 segments refer to the following gamma levels:
+ * 0, 1, 3, 7, 11, 15, 23, 31, 47, 63, 95, 127, 128, 160,
+ * 192, 208, 224, 232, 240, 244, 248, 252, 254, 255
+ */
+struct hx8279_digital_gamma {
+ u8 r[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES];
+ u8 g[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES];
+ u8 b[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES];
+};
+
+struct hx8279_panel_desc {
+ const struct mipi_dsi_device_info dsi_info;
+ const struct hx8279_panel_mode *mode_data;
+ u8 num_lanes;
+ u8 num_modes;
+
+ /* Page 0 */
+ unsigned int vgh_mv;
+ unsigned int vgl_mv;
+ unsigned int vgph_mv;
+ unsigned int vgnh_mv;
+
+ /* Page 1 */
+ const struct hx8279_goa_mux *gmux;
+
+ /* Page 2 */
+ const struct hx8279_analog_gamma *agamma;
+
+ /* Page 3 */
+ u8 goa_unk_ba;
+ u8 goa_odd_timing[6];
+ u8 goa_even_timing[6];
+ u8 goa_stv_lead_time_ck;
+ u8 goa_ckv_lead_time_ck;
+ u8 goa_ckv_dummy_vblank_num;
+ u8 goa_ckv_rise_precharge;
+ u8 goa_ckv_fall_precharge;
+ bool goa_ckv_non_overlap_ctl;
+ u8 goa_clr1_width_adj;
+ u8 goa_clr234_width_adj;
+ s8 goa_clr_polarity[4];
+ int goa_clr_start_pos[4];
+ u8 goa_unk_e4;
+ u8 goa_unk_e5;
+
+ /* Page 5 */
+ u8 bta_tlpx;
+ bool lhs_settle_time_by_osc25;
+ u8 ths_settle_time;
+ u8 timing_unk_b8;
+ u8 timing_unk_bc;
+ u8 timing_unk_d6;
+
+ /* Page 6 */
+ u8 gamma_ctl;
+ u8 volt_adj;
+ u8 src_delay_time_adj_ck;
+
+ /* Page 7..12 */
+ const struct hx8279_digital_gamma *dgamma;
+};
+
+static inline struct hx8279 *to_hx8279(struct drm_panel *panel)
+{
+ return container_of(panel, struct hx8279, panel);
+}
+
+static void hx8279_set_page(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx, u8 page)
+{
+ const u8 cmd_set_page[] = { HX8279_REG_PAGE, page };
+
+ if (hx->last_page == page)
+ return;
+
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_page, ARRAY_SIZE(cmd_set_page));
+ if (!dsi_ctx->accum_err)
+ hx->last_page = page;
+}
+
+static void hx8279_set_module_config(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_voltage[2];
+
+ if (hx->skip_voltage_config)
+ return;
+
+ /* Page 0 - Driver/Module Configuration */
+ hx8279_set_page(hx, dsi_ctx, 0);
+
+ if (desc->vgh_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGHS;
+ cmd_set_voltage[1] = HX8279_VGH_VOLT_SEL(desc->vgh_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+
+ if (desc->vgl_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGLS;
+ cmd_set_voltage[1] = HX8279_VGL_VOLT_SEL(desc->vgl_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+
+ if (desc->vgph_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGPHS;
+ cmd_set_voltage[1] = HX8279_VGPN_VOLT_SEL(desc->vgph_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+
+ if (desc->vgnh_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGNHS;
+ cmd_set_voltage[1] = HX8279_VGPN_VOLT_SEL(desc->vgnh_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+}
+
+static void hx8279_set_gmux(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_goa_mux *gmux = hx->desc->gmux;
+ u8 cmd_set_gmux[2];
+ int i;
+
+ if (!gmux)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 1);
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_l); i++) {
+ cmd_set_gmux[0] = HX8279_P1_REG_GOUTL(i);
+ cmd_set_gmux[1] = gmux->gout_l[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gmux,
+ ARRAY_SIZE(cmd_set_gmux));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_r); i++) {
+ cmd_set_gmux[0] = HX8279_P1_REG_GOUTR(i);
+ cmd_set_gmux[1] = gmux->gout_r[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gmux,
+ ARRAY_SIZE(cmd_set_gmux));
+ }
+}
+
+static void hx8279_set_analog_gamma(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_analog_gamma *agamma = hx->desc->agamma;
+ u8 cmd_set_ana_gamma[2];
+ int i;
+
+ if (!agamma)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 2);
+
+ for (i = 0; i < ARRAY_SIZE(agamma->pos); i++) {
+ cmd_set_ana_gamma[0] = HX8279_P2_REG_GAMMA_T_PVP(i);
+ cmd_set_ana_gamma[1] = agamma->pos[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_ana_gamma,
+ ARRAY_SIZE(cmd_set_ana_gamma));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(agamma->neg); i++) {
+ cmd_set_ana_gamma[0] = HX8279_P2_REG_GAMMA_T_PVN(i);
+ cmd_set_ana_gamma[1] = agamma->neg[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_ana_gamma,
+ ARRAY_SIZE(cmd_set_ana_gamma));
+ }
+}
+
+static void hx8279_set_goa_timing(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_goa_t[2];
+ int i;
+
+ if (hx->skip_goa_timing)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 3);
+
+ for (i = 0; i < ARRAY_SIZE(desc->goa_odd_timing); i++) {
+ cmd_set_goa_t[0] = HX8279_P3_REG_GOA_TO(i);
+ cmd_set_goa_t[1] = desc->goa_odd_timing[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa_t,
+ ARRAY_SIZE(cmd_set_goa_t));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(desc->goa_even_timing); i++) {
+ cmd_set_goa_t[0] = HX8279_P3_REG_GOA_TE(i);
+ cmd_set_goa_t[1] = desc->goa_odd_timing[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa_t,
+ ARRAY_SIZE(cmd_set_goa_t));
+ }
+}
+
+static void hx8279_set_goa_cfg(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_goa[2];
+ int i;
+
+ if (hx->skip_goa_config)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 3);
+
+ if (desc->goa_unk_ba) {
+ cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_BA;
+ cmd_set_goa[1] = desc->goa_unk_ba;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_stv_lead_time_ck) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_STVL;
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_STV_LEAD,
+ desc->goa_stv_lead_time_ck);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_ckv_lead_time_ck) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKVL;
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CKV_DUMMY,
+ desc->goa_ckv_lead_time_ck);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_ckv_dummy_vblank_num) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKVD;
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CKV_LEAD,
+ desc->goa_ckv_dummy_vblank_num);
+ cmd_set_goa[1] |= FIELD_PREP(HX8279_P3_GOA_CKV_NONOVERLAP,
+ desc->goa_ckv_non_overlap_ctl);
+ /* RESERVED must be always set */
+ cmd_set_goa[1] |= HX8279_P3_GOA_CKV_RESERVED;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ /*
+ * One of the two being more than zero means that we want to write
+ * both of them. Anyway, the register default is zero in this case.
+ */
+ if (desc->goa_ckv_rise_precharge || desc->goa_ckv_fall_precharge) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKV_RISE_PREC;
+ cmd_set_goa[1] = desc->goa_ckv_rise_precharge;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKV_FALL_PREC;
+ cmd_set_goa[1] = desc->goa_ckv_fall_precharge;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_clr1_width_adj) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR1_W_ADJ;
+ cmd_set_goa[1] = desc->goa_clr1_width_adj;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_clr234_width_adj) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR234_W_ADJ;
+ cmd_set_goa[1] = desc->goa_clr234_width_adj;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ /* Polarity and Start Position arrays are of the same size */
+ for (i = 0; i < ARRAY_SIZE(desc->goa_clr_polarity); i++) {
+ if (desc->goa_clr_polarity[i] < 0 || desc->goa_clr_start_pos[i] < 0)
+ continue;
+
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR_CFG(i);
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CLR_CFG_STARTPOS,
+ desc->goa_clr_start_pos[i]);
+ cmd_set_goa[1] |= FIELD_PREP(HX8279_P3_GOA_CLR_CFG_POLARITY,
+ desc->goa_clr_polarity[i]);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_unk_e4) {
+ cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_E4;
+ cmd_set_goa[1] = desc->goa_unk_e4;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_E5;
+ cmd_set_goa[1] = desc->goa_unk_e5;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+}
+
+static void hx8279_set_mipi_cfg(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_mipi[2];
+
+ if (hx->skip_mipi_timing)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 5);
+
+ if (desc->bta_tlpx || desc->ths_settle_time || desc->lhs_settle_time_by_osc25) {
+ cmd_set_mipi[0] = HX8279_P5_REG_TIMING;
+ cmd_set_mipi[1] = FIELD_PREP(HX8279_P5_TIMING_TLPX, desc->bta_tlpx);
+ cmd_set_mipi[1] |= FIELD_PREP(HX8279_P5_TIMING_THS_SETTLE,
+ desc->ths_settle_time);
+ cmd_set_mipi[1] |= FIELD_PREP(HX8279_P5_TIMING_LHS_SETTLE,
+ desc->lhs_settle_time_by_osc25);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+
+ if (desc->timing_unk_b8) {
+ cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_B8;
+ cmd_set_mipi[1] = desc->timing_unk_b8;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+
+ if (desc->timing_unk_bc) {
+ cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_BC;
+ cmd_set_mipi[1] = desc->timing_unk_bc;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+
+ if (desc->timing_unk_d6) {
+ cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_D6;
+ cmd_set_mipi[1] = desc->timing_unk_d6;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+}
+
+static void hx8279_set_adv_cfg(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ const u8 cmd_set_dly[] = { HX8279_P6_REG_DLY_TIME_ADJ, desc->src_delay_time_adj_ck };
+ const u8 cmd_set_gamma[] = { HX8279_P6_REG_GAMMA_CHOPPER, desc->gamma_ctl };
+ const u8 cmd_set_volt_adj[] = { HX8279_P6_REG_VOLT_ADJ, desc->volt_adj };
+ u8 cmd_set_eng[] = { HX8279_P6_REG_ENGINEER_PWD, HX8279_P6_ENG_UNLOCK_WORD };
+
+ if (!desc->gamma_ctl && !desc->src_delay_time_adj_ck && !desc->volt_adj)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 6);
+
+ /* Unlock ENG settings: write same word to both ENGINEER_PWD and INHOUSE_FUNC */
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+
+ cmd_set_eng[0] = HX8279_P6_REG_INHOUSE_FUNC;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+
+ /* Set Gamma Chopper and Gamma buffer Chopper control */
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gamma, ARRAY_SIZE(cmd_set_gamma));
+
+ /* Set Source delay time adjustment (CKV falling to Source off) */
+ if (desc->src_delay_time_adj_ck)
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dly,
+ ARRAY_SIZE(cmd_set_dly));
+
+ /* Set voltage adjustment */
+ if (desc->volt_adj)
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_volt_adj,
+ ARRAY_SIZE(cmd_set_volt_adj));
+
+ /* Lock ENG settings again */
+ cmd_set_eng[0] = HX8279_P6_REG_ENGINEER_PWD;
+ cmd_set_eng[1] = 0;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+
+ cmd_set_eng[0] = HX8279_P6_REG_INHOUSE_FUNC;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+}
+
+static void hx8279_set_digital_gamma(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_digital_gamma *dgamma = hx->desc->dgamma;
+ u8 cmd_set_dig_gamma[2];
+ int i;
+
+ if (!dgamma)
+ return;
+
+ /*
+ * Pages 7..9 are for RGB Positive, 10..12 are for RGB Negative:
+ * The first iteration sets all positive component registers,
+ * the second one sets all negatives.
+ */
+ for (i = 0; i < 2; i++) {
+ u8 pg_neg = i * 3;
+
+ hx8279_set_page(hx, dsi_ctx, 7 + pg_neg);
+
+ for (i = 0; i < ARRAY_SIZE(dgamma->r); i++) {
+ cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i;
+ cmd_set_dig_gamma[1] = dgamma->r[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma,
+ ARRAY_SIZE(cmd_set_dig_gamma));
+ }
+
+ hx8279_set_page(hx, dsi_ctx, 8 + pg_neg);
+
+ for (i = 0; i < ARRAY_SIZE(dgamma->g); i++) {
+ cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i;
+ cmd_set_dig_gamma[1] = dgamma->g[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma,
+ ARRAY_SIZE(cmd_set_dig_gamma));
+ }
+
+ hx8279_set_page(hx, dsi_ctx, 9 + pg_neg);
+
+ for (i = 0; i < ARRAY_SIZE(dgamma->b); i++) {
+ cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i;
+ cmd_set_dig_gamma[1] = dgamma->b[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma,
+ ARRAY_SIZE(cmd_set_dig_gamma));
+ }
+ }
+}
+
+static int hx8279_on(struct hx8279 *hx)
+{
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ /* Page 5 */
+ hx8279_set_mipi_cfg(hx, &dsi_ctx);
+
+ /* Page 1 */
+ hx8279_set_gmux(hx, &dsi_ctx);
+
+ /* Page 2 */
+ hx8279_set_analog_gamma(hx, &dsi_ctx);
+
+ /* Page 3 */
+ hx8279_set_goa_cfg(hx, &dsi_ctx);
+ hx8279_set_goa_timing(hx, &dsi_ctx);
+
+ /* Page 0 - Driver/Module Configuration */
+ hx8279_set_module_config(hx, &dsi_ctx);
+
+ /* Page 6 */
+ hx8279_set_adv_cfg(hx, &dsi_ctx);
+
+ /* Pages 7..12 */
+ hx8279_set_digital_gamma(hx, &dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
+
+static void hx8279_power_off(struct hx8279 *hx)
+{
+ gpiod_set_value_cansleep(hx->reset_gpio, 0);
+ usleep_range(100, 500);
+ gpiod_set_value_cansleep(hx->enable_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(hx->vregs), hx->vregs);
+}
+
+static int hx8279_disable(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+
+ return 0;
+}
+
+static int hx8279_enable(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return 0;
+}
+
+static int hx8279_prepare(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hx->vregs), hx->vregs);
+ if (ret)
+ return ret;
+
+ gpiod_set_value_cansleep(hx->enable_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(hx->reset_gpio, 1);
+ usleep_range(6000, 7000);
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+ if (hx->dsi[1])
+ hx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = hx8279_on(hx);
+ if (ret) {
+ hx8279_power_off(hx);
+ return ret;
+ }
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 130);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx8279_unprepare(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 130);
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ if (hx->dsi[1])
+ hx->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ hx8279_power_off(hx);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx8279_get_modes(struct drm_panel *panel, struct drm_connector *connector)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ int i;
+
+ for (i = 0; i < hx->desc->num_modes; i++) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &hx->desc->mode_data[i].mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+ if (hx->desc->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ connector->display_info.bpc = hx->desc->mode_data[0].bpc;
+ connector->display_info.height_mm = hx->desc->mode_data[0].mode.height_mm;
+ connector->display_info.width_mm = hx->desc->mode_data[0].mode.width_mm;
+
+ return hx->desc->num_modes;
+}
+
+static const struct drm_panel_funcs hx8279_panel_funcs = {
+ .disable = hx8279_disable,
+ .unprepare = hx8279_unprepare,
+ .prepare = hx8279_prepare,
+ .enable = hx8279_enable,
+ .get_modes = hx8279_get_modes,
+};
+
+static int hx8279_init_vregs(struct hx8279 *hx, struct device *dev)
+{
+ int ret;
+
+ hx->vregs[0].supply = "vdd";
+ hx->vregs[1].supply = "iovcc";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hx->vregs),
+ hx->vregs);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_is_supported_voltage(hx->vregs[0].consumer,
+ 3000000, 5000000);
+ if (!ret)
+ return -EINVAL;
+
+ ret = regulator_is_supported_voltage(hx->vregs[1].consumer,
+ 1700000, 1900000);
+ if (!ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hx8279_check_gmux_config(struct hx8279 *hx, struct device *dev)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ const struct hx8279_goa_mux *gmux = desc->gmux;
+ int i;
+
+ /* No gmux defined means we simply skip the GOA mux configuration */
+ if (!gmux)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_l); i++) {
+ if (gmux->gout_l[i] > (HX8279_GOUT_STB | HX8279_GOUT_SEL))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid value found in gout_l[%d]\n", i);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_r); i++) {
+ if (gmux->gout_r[i] > (HX8279_GOUT_STB | HX8279_GOUT_SEL))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid value found in gout_r[%d]\n", i);
+ }
+
+ return 0;
+}
+
+static int hx8279_check_goa_config(struct hx8279 *hx, struct device *dev)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ bool goa_odd_valid, goa_even_valid;
+ int i, num_zero, num_clr = 0;
+
+ /* Up to 4 zero values is a valid configuration. Check them all. */
+ num_zero = 1;
+ for (i = 0; i < ARRAY_SIZE(desc->goa_odd_timing); i++) {
+ if (desc->goa_odd_timing[i])
+ num_zero++;
+ }
+
+ goa_odd_valid = (num_zero != ARRAY_SIZE(desc->goa_odd_timing));
+
+ /* Up to 3 zeroes is a valid config. Check them all. */
+ num_zero = 1;
+ for (i = 0; i < ARRAY_SIZE(desc->goa_even_timing); i++) {
+ if (desc->goa_even_timing[i])
+ num_zero++;
+ }
+
+ goa_even_valid = (num_zero != ARRAY_SIZE(desc->goa_even_timing));
+
+ /* Programming one without the other would make no sense! */
+ if (goa_odd_valid != goa_even_valid)
+ return -EINVAL;
+
+ /* We know that both are either true or false now, check just one */
+ if (!goa_odd_valid)
+ hx->skip_goa_timing = true;
+
+ if (!desc->goa_unk_ba && !desc->goa_stv_lead_time_ck &&
+ !desc->goa_ckv_lead_time_ck && !desc->goa_ckv_dummy_vblank_num &&
+ !desc->goa_ckv_rise_precharge && !desc->goa_ckv_fall_precharge &&
+ !desc->goa_clr1_width_adj && !desc->goa_clr234_width_adj &&
+ !desc->goa_unk_e4 && !desc->goa_unk_e5) {
+ hx->skip_goa_config = true;
+ return 0;
+ }
+
+ if ((desc->goa_stv_lead_time_ck > HX8279_P3_GOA_STV_LEAD) ||
+ (desc->goa_ckv_lead_time_ck > HX8279_P3_GOA_CKV_LEAD) ||
+ (desc->goa_ckv_dummy_vblank_num > HX8279_P3_GOA_CKV_DUMMY))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid lead timings in GOA config\n");
+
+ /*
+ * Don't perform zero check for polarity and start position, as
+ * both pol=0 and start=0 are valid configuration values.
+ */
+ for (i = 0; i < ARRAY_SIZE(desc->goa_clr_start_pos); i++) {
+ if (desc->goa_clr_start_pos[i] < 0)
+ continue;
+ else if (desc->goa_clr_start_pos[i] > HX8279_P3_GOA_CLR_CFG_STARTPOS)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid start position for CLR%d\n", i + 1);
+ else
+ num_clr++;
+ }
+ if (!num_clr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(desc->goa_clr_polarity); i++) {
+ if (num_clr < 0)
+ return -EINVAL;
+
+ if (desc->goa_clr_polarity[i] < 0)
+ continue;
+ else if (desc->goa_clr_polarity[i] > 1)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid polarity for CLR%d\n", i + 1);
+ else
+ num_clr--;
+ }
+
+ return 0;
+}
+
+static int hx8279_check_dig_gamma(struct hx8279 *hx, struct device *dev, const u8 *component)
+{
+ u8 gamma_high_bits[4];
+ u16 prev_val = 0;
+ int i, j, k, x;
+
+ /*
+ * The gamma values are 10 bits long and shall be incremental
+ * to form a digital gamma correction reference curve.
+ *
+ * As for the registers format: the first 24 bytes contain each the
+ * lowest 8 bits for each of the gamma level references, and the last
+ * 6 bytes contain the high two bits of 4 registers at a time, where
+ * the first two bits are relative to the last register, and the last
+ * two are relative to the first register.
+ *
+ * Another way of saying, those are the first four LOW values:
+ * DGMA1_LO = 0xb1, DGMA2_LO = 0xb2, DGMA3_LO = 0xb3, DGMA4_LO = 0xb4
+ *
+ * The high values for those four are at DGMA1_4_HI = 0xc9;
+ * ...and DGMA1_4_HI's data contains the following bits:
+ * [1:0] = DGMA4_HI, [3:2] = DGMA3_HI, [5:4] = DGMA2_HI, [7:6] = DGMA1_HI
+ */
+ for (i = 0; i < HX8279_PG_DGAMMA_NUM_HI_BYTES; i++) {
+ k = HX8279_PG_DGAMMA_NUM_LO_BYTES + i;
+ j = i * 4;
+ x = 0;
+
+ gamma_high_bits[0] = FIELD_GET(HX8279_DGAMMA_DGMA1_HI, component[k]);
+ gamma_high_bits[1] = FIELD_GET(HX8279_DGAMMA_DGMA2_HI, component[k]);
+ gamma_high_bits[2] = FIELD_GET(HX8279_DGAMMA_DGMA3_HI, component[k]);
+ gamma_high_bits[3] = FIELD_GET(HX8279_DGAMMA_DGMA4_HI, component[k]);
+
+ do {
+ u16 cur_val = component[j] | (gamma_high_bits[x] << 8);
+
+ if (cur_val < prev_val)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid dgamma values: %u < %u!\n",
+ cur_val, prev_val);
+ prev_val = cur_val;
+ j++;
+ x++;
+ } while (x < 4);
+ };
+
+ return 0;
+}
+
+static int hx8279_check_params(struct hx8279 *hx, struct device *dev)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ int ret;
+
+ /* Voltages config validation */
+ if (!desc->vgh_mv && !desc->vgl_mv && !desc->vgph_mv && !desc->vgnh_mv)
+ hx->skip_voltage_config = true;
+ else if ((desc->vgh_mv && desc->vgh_mv < HX8279_VGH_MIN_MV) ||
+ (desc->vgl_mv && desc->vgl_mv < HX8279_VGL_MIN_MV) ||
+ (desc->vgph_mv && desc->vgph_mv < HX8279_VGPNH_MIN_MV) ||
+ (desc->vgnh_mv && desc->vgnh_mv < HX8279_VGPNH_MIN_MV))
+ return -EINVAL;
+
+ /* GOA Muxing validation */
+ ret = hx8279_check_gmux_config(hx, dev);
+ if (ret)
+ return ret;
+
+ /* GOA Configuration validation */
+ ret = hx8279_check_goa_config(hx, dev);
+ if (ret)
+ return ret;
+
+ /* MIPI Configuration validation */
+ if (!desc->bta_tlpx && !desc->lhs_settle_time_by_osc25 &&
+ !desc->ths_settle_time && !desc->timing_unk_b8 &&
+ !desc->timing_unk_bc && !desc->timing_unk_d6)
+ hx->skip_mipi_timing = true;
+
+ /* ENG/Gamma Configuration validation */
+ if (desc->gamma_ctl > (HX8279_P6_GAMMA_POCGM_CTL | HX8279_P6_GAMMA_POGCMD_CTL))
+ return -EINVAL;
+
+ /* Digital Gamma values validation */
+ if (desc->dgamma) {
+ ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->r);
+ if (ret)
+ return ret;
+
+ ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->g);
+ if (ret)
+ return ret;
+
+ ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->b);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hx8279_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct device_node *dsi_r;
+ struct hx8279 *hx;
+ int i, ret;
+
+ hx = devm_drm_panel_alloc(dev, struct hx8279, panel,
+ &hx8279_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(hx))
+ return PTR_ERR(hx);
+
+ ret = hx8279_init_vregs(hx, dev);
+ if (ret)
+ return ret;
+
+ hx->desc = device_get_match_data(dev);
+ if (!hx->desc)
+ return -ENODEV;
+
+ /*
+ * In some DriverICs some or all fields may be OTP: perform a
+ * basic configuration check before writing to help avoiding
+ * irreparable mistakes.
+ *
+ * Please note that this is not perfect and will only check if
+ * the values may be plausible; values that are wrong for a
+ * specific display, but still plausible for DrIC config will
+ * be accepted.
+ */
+ ret = hx8279_check_params(hx, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Invalid DriverIC configuration\n");
+
+ /* The enable line may be always tied to VCCIO, so this is optional */
+ hx->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_ASIS);
+ if (IS_ERR(hx->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(hx->enable_gpio),
+ "Failed to get enable GPIO\n");
+
+ hx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(hx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(hx->reset_gpio),
+ "Failed to get reset GPIO\n");
+
+ /* If the panel is connected on two DSIs then DSI0 left, DSI1 right */
+ dsi_r = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
+ if (dsi_r) {
+ const struct mipi_dsi_device_info *info = &hx->desc->dsi_info;
+ struct mipi_dsi_host *dsi_r_host;
+
+ dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r);
+ of_node_put(dsi_r);
+ if (!dsi_r_host)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "Cannot get secondary DSI host\n");
+
+ hx->dsi[1] = devm_mipi_dsi_device_register_full(dev, dsi_r_host, info);
+ if (IS_ERR(hx->dsi[1]))
+ return dev_err_probe(dev, PTR_ERR(hx->dsi[1]),
+ "Cannot get secondary DSI node\n");
+ mipi_dsi_set_drvdata(hx->dsi[1], hx);
+ }
+
+ hx->dsi[0] = dsi;
+ mipi_dsi_set_drvdata(dsi, hx);
+
+ ret = drm_panel_of_backlight(&hx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&hx->panel);
+
+ for (i = 0; i < 2; i++) {
+ if (!hx->dsi[i])
+ continue;
+
+ hx->dsi[i]->lanes = hx->desc->num_lanes;
+ hx->dsi[i]->format = MIPI_DSI_FMT_RGB888;
+
+ hx->dsi[i]->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_LPM;
+
+ if (hx->desc->mode_data[0].is_video_mode)
+ hx->dsi[i]->mode_flags |= MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+
+ ret = devm_mipi_dsi_attach(dev, hx->dsi[i]);
+ if (ret < 0) {
+ drm_panel_remove(&hx->panel);
+ return dev_err_probe(dev, ret,
+ "Cannot attach to DSI%d host.\n", i);
+ }
+ }
+
+ return 0;
+}
+
+static void hx8279_remove(struct mipi_dsi_device *dsi)
+{
+ struct hx8279 *hx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&hx->panel);
+}
+
+static const struct hx8279_panel_mode aoly_sl101pm1794fog_v15_modes[] = {
+ {
+ .mode = {
+ .clock = 159420,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 60,
+ .htotal = 1200 + 80 + 60 + 24,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 10,
+ .vsync_end = 1920 + 10 + 14,
+ .vtotal = 1920 + 10 + 14 + 4,
+ .width_mm = 136,
+ .height_mm = 217,
+ .type = DRM_MODE_TYPE_DRIVER
+ },
+ .bpc = 8,
+ .is_video_mode = true,
+ },
+};
+
+static const struct hx8279_panel_mode startek_kd070fhfid078_modes[] = {
+ {
+ .mode = {
+ .clock = 156458,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 50,
+ .hsync_end = 1200 + 50 + 24,
+ .htotal = 1200 + 50 + 24 + 66,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 14,
+ .vsync_end = 1920 + 14 + 2,
+ .vtotal = 1920 + 14 + 2 + 10,
+ .width_mm = 95,
+ .height_mm = 151,
+ .type = DRM_MODE_TYPE_DRIVER
+ },
+ .bpc = 8,
+ .is_video_mode = true,
+ },
+};
+
+static const struct hx8279_goa_mux aoly_sl101pm1794fog_v15_gmux = {
+ .gout_l = { 0x5, 0x5, 0xb, 0xb, 0x9, 0x9, 0x16, 0x16, 0xe, 0xe,
+ 0x7, 0x7, 0x26, 0x26, 0x15, 0x15, 0x1, 0x1, 0x3, 0x3 },
+ .gout_r = { 0x6, 0x6, 0xc, 0xc, 0xa, 0xa, 0x16, 0x16, 0xe, 0xe,
+ 0x8, 0x8, 0x26, 0x26, 0x15, 0x15, 0x2, 0x2, 0x4, 0x4 },
+};
+
+static const struct hx8279_analog_gamma aoly_sl101pm1794fog_v15_ana_gamma = {
+ .pos = { 0x0, 0xd, 0x17, 0x26, 0x31, 0x1c, 0x2c, 0x33, 0x31,
+ 0x37, 0x37, 0x37, 0x39, 0x2e, 0x2f, 0x2f, 0x7 },
+ .neg = { 0x0, 0xd, 0x17, 0x26, 0x31, 0x3f, 0x3f, 0x3f, 0x3f,
+ 0x37, 0x37, 0x37, 0x39, 0x2e, 0x2f, 0x2f, 0x7 },
+};
+
+static const struct hx8279_digital_gamma aoly_sl101pm1794fog_v15_dig_gamma = {
+ .r = { 0x0, 0x5, 0x10, 0x22, 0x36, 0x4a, 0x6c, 0x9a, 0xd7, 0x17,
+ 0x92, 0x15, 0x18, 0x8c, 0x0, 0x3a, 0x72, 0x8c, 0xa5, 0xb1,
+ 0xbe, 0xca, 0xd1, 0xd4, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff },
+ .g = { 0x4, 0x5, 0x11, 0x24, 0x39, 0x4e, 0x72, 0xa3, 0xe1, 0x25,
+ 0xa8, 0x2e, 0x32, 0xad, 0x28, 0x63, 0x9b, 0xb5, 0xcf, 0xdb,
+ 0xe8, 0xf5, 0xfa, 0xfc, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff },
+ .b = { 0x4, 0x4, 0xf, 0x22, 0x37, 0x4d, 0x71, 0xa2, 0xe1, 0x26,
+ 0xa9, 0x2f, 0x33, 0xac, 0x24, 0x5d, 0x94, 0xac, 0xc5, 0xd1,
+ 0xdc, 0xe8, 0xed, 0xf0, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff },
+};
+
+static const struct hx8279_panel_desc aoly_sl101pm1794fog_v15 = {
+ .dsi_info = {
+ .type = "L101PM1794FOG-V15",
+ .channel = 0,
+ .node = NULL,
+ },
+ .mode_data = aoly_sl101pm1794fog_v15_modes,
+ .num_modes = ARRAY_SIZE(aoly_sl101pm1794fog_v15_modes),
+ .num_lanes = 4,
+
+ /* Driver/Module Configuration: LC Matrix voltages */
+ .vgh_mv = 16500,
+ .vgl_mv = 11200,
+ .vgph_mv = 4600,
+ .vgnh_mv = 4600,
+
+ /* Analog Gamma correction */
+ .agamma = &aoly_sl101pm1794fog_v15_ana_gamma,
+
+ /* Gate driver On Array (GOA) Muxing */
+ .gmux = &aoly_sl101pm1794fog_v15_gmux,
+
+ /* Gate driver On Array (GOA) Mux Config */
+ .goa_unk_ba = 0xf0,
+ .goa_odd_timing = { 0, 0, 0, 42, 0, 0 },
+ .goa_even_timing = { 1, 42, 0, 0 },
+ .goa_stv_lead_time_ck = 11,
+ .goa_ckv_lead_time_ck = 7,
+ .goa_ckv_dummy_vblank_num = 3,
+ .goa_ckv_rise_precharge = 1,
+ .goa_clr1_width_adj = 0,
+ .goa_clr234_width_adj = 0,
+ .goa_clr_polarity = { 1, 0, 0, 0 },
+ .goa_clr_start_pos = { 8, 9, 3, 4 },
+ .goa_unk_e4 = 0xc0,
+ .goa_unk_e5 = 0x0d,
+
+ /* MIPI Configuration */
+ .bta_tlpx = 2,
+ .lhs_settle_time_by_osc25 = true,
+ .ths_settle_time = 2,
+ .timing_unk_b8 = 0xa5,
+ .timing_unk_bc = 0x20,
+ .timing_unk_d6 = 0x7f,
+
+ /* ENG/Gamma Configuration */
+ .gamma_ctl = 0,
+ .volt_adj = FIELD_PREP_CONST(HX8279_P6_VOLT_ADJ_VCCIFS, 3) |
+ FIELD_PREP_CONST(HX8279_P6_VOLT_ADJ_VCCS, 3),
+ .src_delay_time_adj_ck = 50,
+
+ /* Digital Gamma Adjustment */
+ .dgamma = &aoly_sl101pm1794fog_v15_dig_gamma,
+};
+
+static const struct hx8279_goa_mux startek_kd070fhfid078_gmux = {
+ .gout_l = { 0xd, 0xd, 0x6, 0x6, 0x8, 0x8, 0xa, 0xa, 0xc, 0xc,
+ 0x0, 0x0, 0xe, 0xe, 0x1, 0x1, 0x4, 0x4, 0x0, 0x0 },
+ .gout_r = { 0xd, 0xd, 0x5, 0x5, 0x7, 0x7, 0x9, 0x9, 0xb, 0xb,
+ 0x0, 0x0, 0xe, 0xe, 0x1, 0x1, 0x3, 0x3, 0x0, 0x0 },
+};
+
+static const struct hx8279_panel_desc startek_kd070fhfid078 = {
+ .dsi_info = {
+ .type = "KD070FHFID078",
+ .channel = 0,
+ .node = NULL,
+ },
+ .mode_data = startek_kd070fhfid078_modes,
+ .num_modes = ARRAY_SIZE(startek_kd070fhfid078_modes),
+ .num_lanes = 4,
+
+ /* Driver/Module Configuration: LC Matrix voltages */
+ .vgh_mv = 18000,
+ .vgl_mv = 12100,
+ .vgph_mv = 5500,
+ .vgnh_mv = 5500,
+
+ /* Gate driver On Array (GOA) Mux Config */
+ .gmux = &startek_kd070fhfid078_gmux,
+
+ /* Gate driver On Array (GOA) Configuration */
+ .goa_unk_ba = 0xf0,
+ .goa_stv_lead_time_ck = 7,
+ .goa_ckv_lead_time_ck = 3,
+ .goa_ckv_dummy_vblank_num = 1,
+ .goa_ckv_rise_precharge = 0,
+ .goa_ckv_fall_precharge = 0,
+ .goa_clr1_width_adj = 1,
+ .goa_clr234_width_adj = 5,
+ .goa_clr_polarity = { 0, 1, -1, -1 },
+ .goa_clr_start_pos = { 5, 10, -1, -1 },
+ .goa_unk_e4 = 0xc0,
+ .goa_unk_e5 = 0x00,
+
+ /* MIPI Configuration */
+ .bta_tlpx = 2,
+ .lhs_settle_time_by_osc25 = true,
+ .ths_settle_time = 2,
+ .timing_unk_b8 = 0x7f,
+ .timing_unk_bc = 0x20,
+ .timing_unk_d6 = 0x7f,
+
+ /* ENG/Gamma Configuration */
+ .gamma_ctl = FIELD_PREP_CONST(HX8279_P6_GAMMA_POCGM_CTL, 1) |
+ FIELD_PREP_CONST(HX8279_P6_GAMMA_POGCMD_CTL, 1),
+ .src_delay_time_adj_ck = 72,
+};
+
+static const struct of_device_id hx8279_of_match[] = {
+ { .compatible = "aoly,sl101pm1794fog-v15", .data = &aoly_sl101pm1794fog_v15 },
+ { .compatible = "startek,kd070fhfid078", .data = &startek_kd070fhfid078 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hx8279_of_match);
+
+static struct mipi_dsi_driver hx8279_driver = {
+ .probe = hx8279_probe,
+ .remove = hx8279_remove,
+ .driver = {
+ .name = "panel-himax-hx8279",
+ .of_match_table = hx8279_of_match,
+ },
+};
+module_mipi_dsi_driver(hx8279_driver);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("Himax HX8279 DriverIC panels driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index 92b03a2f65a3..ff994bf0e3cc 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -80,7 +80,7 @@ struct hx8394_panel_desc {
unsigned int lanes;
unsigned long mode_flags;
enum mipi_dsi_pixel_format format;
- int (*init_sequence)(struct hx8394 *ctx);
+ void (*init_sequence)(struct mipi_dsi_multi_context *dsi_ctx);
};
static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel)
@@ -88,98 +88,94 @@ static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel)
return container_of(panel, struct hx8394, panel);
}
-static int hsd060bhw4_init_sequence(struct hx8394 *ctx)
+static void hsd060bhw4_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x78, 0x0c, 0x07);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x78, 0x0c, 0x07);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55,
- 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c,
- 0x7c);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55,
+ 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c,
+ 0x7c);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10,
- 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00,
- 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00,
- 0x00, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10,
+ 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00,
+ 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00,
+ 0x00, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06,
- 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f,
- 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a,
- 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00,
- 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31,
- 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f,
- 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b,
- 0x4a, 0x4c, 0x4b, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f,
+ 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a,
+ 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00,
+ 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31,
+ 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f,
+ 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b,
+ 0x4a, 0x4c, 0x4b, 0x7f);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x7d, 0x7d);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x7d, 0x7d);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0xed);
}
static const struct drm_display_mode hsd060bhw4_mode = {
@@ -205,114 +201,110 @@ static const struct hx8394_panel_desc hsd060bhw4_desc = {
.init_sequence = hsd060bhw4_init_sequence,
};
-static int powkiddy_x55_init_sequence(struct hx8394 *ctx)
+static void powkiddy_x55_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
- 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
- 0x86);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
+ 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
+ 0x86);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x6e, 0x6e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
- 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
- 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
- 0x07, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
+ 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
+ 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
- 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
- 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
- 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
+ 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
+ 0x18, 0x18, 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
- 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
- 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
- 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
+ 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
+ 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
+ 0x18, 0x18, 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
- 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
- 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
- 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
- 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
- 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
+ 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
+ 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
+ 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
+ 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN5,
- 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN5,
+ 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xed);
}
static const struct drm_display_mode powkiddy_x55_mode = {
@@ -339,131 +331,127 @@ static const struct hx8394_panel_desc powkiddy_x55_desc = {
.init_sequence = powkiddy_x55_init_sequence,
};
-static int mchp_ac40t08a_init_sequence(struct hx8394 *ctx)
+static void mchp_ac40t08a_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* DCS commands do not seem to be sent correclty without this delay */
- msleep(20);
+ mipi_dsi_msleep(dsi_ctx, 20);
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x12, 0x72, 0x09, 0x32, 0x54,
- 0x71, 0x71, 0x57, 0x47);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54,
+ 0x71, 0x71, 0x57, 0x47);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
- 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
- 0x01, 0x0c, 0x86);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x6e, 0x6e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x07, 0x07, 0x40, 0x07,
- 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00,
- 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a,
- 0x02, 0x15, 0x06, 0x05, 0x06, 0x47,
- 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
- 0x07, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07,
+ 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00,
+ 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a,
+ 0x02, 0x15, 0x06, 0x05, 0x06, 0x47,
+ 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25,
- 0x18, 0x18, 0x26, 0x27, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x20, 0x21, 0x18, 0x18,
- 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25,
+ 0x18, 0x18, 0x26, 0x27, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06,
- 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
- 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20,
- 0x18, 0x18, 0x27, 0x26, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x25, 0x24, 0x18, 0x18,
- 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
+ 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20,
+ 0x18, 0x18, 0x27, 0x26, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21,
- 0x24, 0x22, 0x47, 0x56, 0x65, 0x66,
- 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d,
- 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61,
- 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
- 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24,
- 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e,
- 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99,
- 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67,
- 0x6b, 0x72, 0x7f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21,
+ 0x24, 0x22, 0x47, 0x56, 0x65, 0x66,
+ 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d,
+ 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61,
+ 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24,
+ 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e,
+ 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99,
+ 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67,
+ 0x6b, 0x72, 0x7f, 0x7f);
/* Unknown command, not listed in the HX8394-F datasheet (C0H) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x73);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x73);
/* Set CABC control (C9h)*/
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCABC,
- 0x76, 0x00, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCABC,
+ 0x76, 0x00, 0x30);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet (D4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
/* 5.19.11 Set register bank (D8h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet (C6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xed);
}
static const struct drm_display_mode mchp_ac40t08a_mode = {
@@ -493,35 +481,31 @@ static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
int ret;
- ret = ctx->desc->init_sequence(ctx);
- if (ret) {
- dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
- return ret;
- }
+ ctx->desc->init_sequence(&dsi_ctx);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
+ return dsi_ctx.accum_err;
/* Panel is operational 120 msec after reset */
msleep(120);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to turn on the display: %d\n", ret);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
goto sleep_in;
- }
return 0;
sleep_in:
+ ret = dsi_ctx.accum_err;
+ dsi_ctx.accum_err = 0;
+
/* This will probably fail, but let's try orderly power off anyway. */
- if (!mipi_dsi_dcs_enter_sleep_mode(dsi))
- msleep(50);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
return ret;
}
@@ -530,17 +514,12 @@ static int hx8394_disable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- msleep(50); /* about 3 frames */
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50); /* about 3 frames */
- return 0;
+ return dsi_ctx.accum_err;
}
static int hx8394_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index 7d68a8acfe2e..eb0f8373258c 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -129,11 +129,11 @@ static int jadard_unprepare(struct drm_panel *panel)
{
struct jadard *jadard = panel_to_jadard(panel);
- gpiod_set_value(jadard->reset, 1);
+ gpiod_set_value(jadard->reset, 0);
msleep(120);
if (jadard->desc->reset_before_power_off_vcioo) {
- gpiod_set_value(jadard->reset, 0);
+ gpiod_set_value(jadard->reset, 1);
usleep_range(1000, 2000);
}
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
index 5d115ecd5dd4..b6429795e8f5 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
@@ -413,15 +413,10 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
static void panel_nv3051d_shutdown(struct mipi_dsi_device *dsi)
{
struct panel_nv3051d *ctx = mipi_dsi_get_drvdata(dsi);
- int ret;
- ret = drm_panel_unprepare(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
+ drm_panel_unprepare(&ctx->panel);
- ret = drm_panel_disable(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
+ drm_panel_disable(&ctx->panel);
}
static void panel_nv3051d_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 04f1d2676c78..116d67bfa114 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -23,10 +23,12 @@
#define DSI_NUM_MIN 1
-#define mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, cmd, seq...) \
- do { \
- mipi_dsi_dcs_write_seq(dsi0, cmd, seq); \
- mipi_dsi_dcs_write_seq(dsi1, cmd, seq); \
+#define mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, cmd, seq...) \
+ do { \
+ dsi_ctx.dsi = dsi0; \
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
+ dsi_ctx.dsi = dsi1; \
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
} while (0)
struct panel_info {
@@ -67,868 +69,829 @@ static int elish_boe_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd2, 0x30);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x76, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x77, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x59);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x48);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd7, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdc, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdd, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe1, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe2, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf3, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf4, 0x48);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x13, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x97, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x98, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x99, 0x95);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9b, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9c, 0x0b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9d, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9e, 0x90);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa3, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x60);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
- msleep(70);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
-
- return 0;
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd2, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x76, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x77, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x59);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdc, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdd, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe1, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe2, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf3, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf4, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x13, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x97, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x98, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x99, 0x95);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9b, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9c, 0x0b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9d, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9e, 0x90);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa3, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x60);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_msleep(&dsi_ctx, 70);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+
+ return dsi_ctx.accum_err;
}
static int elish_csot_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x30);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x55, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x4d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x96);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x07);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x07);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x5c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x1c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
- msleep(70);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
-
- return 0;
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x55, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x4d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x96);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x5c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x1c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_msleep(&dsi_ctx, 70);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+
+ return dsi_ctx.accum_err;
}
static int j606f_boe_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi = pinfo->dsi[0];
- struct device *dev = &dsi->dev;
- int ret;
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0xd9);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x78);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x63);
- mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x91);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0x95, 0xeb);
- mipi_dsi_dcs_write_seq(dsi, 0x96, 0xeb);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x66);
- mipi_dsi_dcs_write_seq(dsi, 0x75, 0xa2);
- mipi_dsi_dcs_write_seq(dsi, 0x77, 0xb3);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x23);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x77);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x01, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x03, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x09, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x0a, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x0b, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x10, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x13, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x17, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x21, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x28, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x32);
- mipi_dsi_dcs_write_seq(dsi, 0x37, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x38, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x9a);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x42);
- mipi_dsi_dcs_write_seq(dsi, 0x3f, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x43, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x47, 0x66);
- mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x9a);
- mipi_dsi_dcs_write_seq(dsi, 0x4b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x4c, 0x91);
- mipi_dsi_dcs_write_seq(dsi, 0x4d, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0x4e, 0x43);
-
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 18);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x52, 0x34);
- mipi_dsi_dcs_write_seq(dsi, 0x55, 0x82, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x56, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x58, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x7f, 0x3c);
- mipi_dsi_dcs_write_seq(dsi, 0x82, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x97, 0xc0);
- mipi_dsi_dcs_write_seq(dsi, 0xb6,
- 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
- 0x05, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x92, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0x93, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x94, 0x5f);
- mipi_dsi_dcs_write_seq(dsi, 0xd7, 0x55);
- mipi_dsi_dcs_write_seq(dsi, 0xda, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0xde, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xdc, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe0, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe6, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x88);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x88);
- mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x90);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x3f, 0xe0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_VSYNC_TIMING, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x44, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_GET_SCANLINE, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0x48, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0xf1, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x64, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x67, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x6a, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x70, 0x30);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_START, 0xf3);
- mipi_dsi_dcs_write_seq(dsi, 0xa3, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xa4, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xa5, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0xa1);
- mipi_dsi_dcs_write_seq(dsi, 0x0a, 0xf2);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x50);
- mipi_dsi_dcs_write_seq(dsi, 0x13, 0x51);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x65);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x17, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x86);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0xbb);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x05);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x32, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0xc3);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x78);
- mipi_dsi_dcs_write_seq(dsi, 0x35, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xc9, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0xca, 0x4e);
- mipi_dsi_dcs_write_seq(dsi, 0xcb, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_CONTINUE, 0x4c);
- mipi_dsi_dcs_write_seq(dsi, 0xaa, 0x47);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x27);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x56, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x58, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0x53);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x14);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x01);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x60, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x63, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x64, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x66, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x67, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x68, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x78, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x2f);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xf8);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x28, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0xe0);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0xc0);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x08);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x5d);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x5d);
- mipi_dsi_dcs_write_seq(dsi, 0x4b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x91, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x92, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xdc, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe6, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x60);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x48, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7f);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7f);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x75);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0x32, 0x8d);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x75);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x02);
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- usleep_range(10000, 11000);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x68, 0x05, 0x01);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(100);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(30);
-
- return 0;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0xd9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x63);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x91);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x95, 0xeb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x96, 0xeb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x66);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0xa2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0xb3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0b, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x10, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x9a);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_3D_CONTROL, 0x42);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x47, 0x66);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x9a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4c, 0x91);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4e, 0x43);
+
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 18);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x34);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x82, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7e, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0x3c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x97, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6,
+ 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x05, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x93, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x94, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd7, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xda, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x90);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0xe0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_VSYNC_TIMING, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_GET_SCANLINE, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_START, 0xf3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa3, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa4, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa5, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd6, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0xa1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0xf2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x51);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x65);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x86);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0xc3);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc8, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc9, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0x4e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcb, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_CONTINUE, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xaa, 0x47);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x27);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x53);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc3, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd1, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd2, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xe0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x08);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x5d);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x5d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0x8d);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x75);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x02);
+
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x05, 0x01);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 100);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 30);
+
+ return dsi_ctx.accum_err;
}
static const struct drm_display_mode elish_boe_modes[] = {
@@ -1063,18 +1026,18 @@ static int nt36523_prepare(struct drm_panel *panel)
static int nt36523_disable(struct drm_panel *panel)
{
struct panel_info *pinfo = to_panel_info(panel);
- int i, ret;
+ int i;
for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
- ret = mipi_dsi_dcs_set_display_off(pinfo->dsi[i]);
- if (ret < 0)
- dev_err(&pinfo->dsi[i]->dev, "failed to set display off: %d\n", ret);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]};
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
}
for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
- ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->dsi[i]);
- if (ret < 0)
- dev_err(&pinfo->dsi[i]->dev, "failed to enter sleep mode: %d\n", ret);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]};
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
}
msleep(70);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt37801.c b/drivers/gpu/drm/panel/panel-novatek-nt37801.c
new file mode 100644
index 000000000000..84d367eab058
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt37801.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2024 Linaro Limited
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include <drm/display/drm_dsc.h>
+#include <drm/display/drm_dsc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/mipi_display.h>
+
+struct novatek_nt37801 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct drm_dsc_config dsc;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data novatek_nt37801_supplies[] = {
+ { .supply = "vddio" },
+ { .supply = "vci" },
+ { .supply = "vdd" },
+};
+
+static inline struct novatek_nt37801 *to_novatek_nt37801(struct drm_panel *panel)
+{
+ return container_of(panel, struct novatek_nt37801, panel);
+}
+
+static void novatek_nt37801_reset(struct novatek_nt37801 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 21000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 21000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 21000);
+}
+
+#define NT37801_DCS_SWITCH_PAGE 0xf0
+
+#define novatek_nt37801_switch_page(dsi_ctx, page) \
+ mipi_dsi_dcs_write_seq_multi((dsi_ctx), NT37801_DCS_SWITCH_PAGE, \
+ 0x55, 0xaa, 0x52, 0x08, (page))
+
+static int novatek_nt37801_on(struct novatek_nt37801 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ novatek_nt37801_switch_page(&dsi_ctx, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc5, 0x0b, 0x0b, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xaa, 0x55, 0xa5, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf5, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x1b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf8, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfc, 0x00);
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 0x059f);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x0c7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x90, 0x03, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91,
+ 0x89, 0x28, 0x00, 0x28, 0xc2, 0x00, 0x02,
+ 0x68, 0x04, 0x6c, 0x00, 0x0a, 0x02, 0x77,
+ 0x01, 0xe9, 0x10, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xaa, 0x55, 0xa5, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb,
+ 0x00, 0x01, 0x00, 0x11, 0x33, 0x33, 0x33,
+ 0x55, 0x57, 0xd0, 0x00, 0x00, 0x44, 0x56,
+ 0x77, 0x78, 0x9a, 0xbc, 0xdd, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0xdc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x00);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x00, 0x18, 0x00, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x51,
+ 0x07, 0xff, 0x07, 0xff, 0x0f, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9c, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_MEMORY_START);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x00);
+
+ novatek_nt37801_switch_page(&dsi_ctx, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x55, 0x01, 0xff, 0x03);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static int novatek_nt37801_off(struct novatek_nt37801 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ return dsi_ctx.accum_err;
+}
+
+static int novatek_nt37801_prepare(struct drm_panel *panel)
+{
+ struct novatek_nt37801 *ctx = to_novatek_nt37801(panel);
+ struct device *dev = &ctx->dsi->dev;
+ struct drm_dsc_picture_parameter_set pps;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(novatek_nt37801_supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ novatek_nt37801_reset(ctx);
+
+ ret = novatek_nt37801_on(ctx);
+ if (ret < 0)
+ goto err;
+
+ drm_dsc_pps_payload_pack(&pps, &ctx->dsc);
+
+ ret = mipi_dsi_picture_parameter_set(ctx->dsi, &pps);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to transmit PPS: %d\n", ret);
+ goto err;
+ }
+
+ ret = mipi_dsi_compression_mode(ctx->dsi, true);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable compression mode: %d\n", ret);
+ goto err;
+ }
+
+ msleep(28);
+
+ return 0;
+
+err:
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(novatek_nt37801_supplies),
+ ctx->supplies);
+
+ return ret;
+}
+
+static int novatek_nt37801_unprepare(struct drm_panel *panel)
+{
+ struct novatek_nt37801 *ctx = to_novatek_nt37801(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = novatek_nt37801_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ regulator_bulk_disable(ARRAY_SIZE(novatek_nt37801_supplies),
+ ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode novatek_nt37801_mode = {
+ .clock = (1440 + 20 + 4 + 20) * (3200 + 20 + 2 + 18) * 120 / 1000,
+ .hdisplay = 1440,
+ .hsync_start = 1440 + 20,
+ .hsync_end = 1440 + 20 + 4,
+ .htotal = 1440 + 20 + 4 + 20,
+ .vdisplay = 3200,
+ .vsync_start = 3200 + 20,
+ .vsync_end = 3200 + 20 + 2,
+ .vtotal = 3200 + 20 + 2 + 18,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int novatek_nt37801_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector,
+ &novatek_nt37801_mode);
+}
+
+static const struct drm_panel_funcs novatek_nt37801_panel_funcs = {
+ .prepare = novatek_nt37801_prepare,
+ .unprepare = novatek_nt37801_unprepare,
+ .get_modes = novatek_nt37801_get_modes,
+};
+
+static int novatek_nt37801_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops novatek_nt37801_bl_ops = {
+ .update_status = novatek_nt37801_bl_update_status,
+};
+
+static struct backlight_device *
+novatek_nt37801_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 4095,
+ .max_brightness = 4095,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &novatek_nt37801_bl_ops, &props);
+}
+
+static int novatek_nt37801_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct novatek_nt37801 *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct novatek_nt37801, panel,
+ &novatek_nt37801_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(novatek_nt37801_supplies),
+ novatek_nt37801_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ctx->panel.prepare_prev_first = true;
+ ctx->panel.backlight = novatek_nt37801_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ /* This panel only supports DSC; unconditionally enable it */
+ dsi->dsc = &ctx->dsc;
+ ctx->dsc.dsc_version_major = 1;
+ ctx->dsc.dsc_version_minor = 1;
+ ctx->dsc.slice_height = 40;
+ ctx->dsc.slice_width = 720;
+ ctx->dsc.slice_count = 1440 / ctx->dsc.slice_width;
+ ctx->dsc.bits_per_component = 8;
+ ctx->dsc.bits_per_pixel = 8 << 4; /* 4 fractional bits */
+ ctx->dsc.block_pred_enable = true;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void novatek_nt37801_remove(struct mipi_dsi_device *dsi)
+{
+ struct novatek_nt37801 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id novatek_nt37801_of_match[] = {
+ { .compatible = "novatek,nt37801" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, novatek_nt37801_of_match);
+
+static struct mipi_dsi_driver novatek_nt37801_driver = {
+ .probe = novatek_nt37801_probe,
+ .remove = novatek_nt37801_remove,
+ .driver = {
+ .name = "panel-novatek-nt37801",
+ .of_match_table = novatek_nt37801_of_match,
+ },
+};
+module_mipi_dsi_driver(novatek_nt37801_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>");
+MODULE_DESCRIPTION("Panel driver for the Novatek NT37801/NT37810 AMOLED DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
index f23d8832a1ad..93f11e2e9398 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
@@ -34,8 +34,8 @@ struct s6d7aa0 {
struct s6d7aa0_panel_desc {
unsigned int panel_type;
- int (*init_func)(struct s6d7aa0 *ctx);
- int (*off_func)(struct s6d7aa0 *ctx);
+ void (*init_func)(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx);
+ void (*off_func)(struct mipi_dsi_multi_context *dsi_ctx);
const struct drm_display_mode *drm_mode;
unsigned long mode_flags;
u32 bus_flags;
@@ -62,93 +62,61 @@ static void s6d7aa0_reset(struct s6d7aa0 *ctx)
msleep(50);
}
-static int s6d7aa0_lock(struct s6d7aa0 *ctx, bool lock)
+static void s6d7aa0_lock(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx, bool lock)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
if (lock) {
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0xa5, 0xa5);
if (ctx->desc->use_passwd3)
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0x5a, 0x5a);
} else {
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0x5a, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0x5a, 0x5a);
if (ctx->desc->use_passwd3)
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0xa5, 0xa5);
}
-
- return 0;
}
static int s6d7aa0_on(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = ctx->desc->init_func(ctx);
- if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
- gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- return ret;
- }
+ ctx->desc->init_func(ctx, &dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
-static int s6d7aa0_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_off(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = ctx->desc->off_func(ctx);
- if (ret < 0) {
- dev_err(dev, "Panel-specific off function failed: %d\n", ret);
- return ret;
- }
+ ctx->desc->off_func(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(64);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 64);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return 0;
+ mipi_dsi_msleep(&dsi_ctx, 120);
}
static int s6d7aa0_prepare(struct drm_panel *panel)
{
struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ if (ret < 0)
return ret;
- }
s6d7aa0_reset(ctx);
ret = s6d7aa0_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
@@ -159,12 +127,8 @@ static int s6d7aa0_prepare(struct drm_panel *panel)
static int s6d7aa0_disable(struct drm_panel *panel)
{
struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = s6d7aa0_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ s6d7aa0_off(ctx);
return 0;
}
@@ -185,13 +149,11 @@ static int s6d7aa0_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, brightness);
- return 0;
+ return dsi_ctx.accum_err;
}
static int s6d7aa0_bl_get_brightness(struct backlight_device *bl)
@@ -228,65 +190,39 @@ s6d7aa0_create_backlight(struct mipi_dsi_device *dsi)
/* Initialization code and structures for LSL080AL02 panel */
-static int s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ mipi_dsi_usleep_range(dsi_ctx, 20000, 25000);
- usleep_range(20000, 25000);
+ s6d7aa0_lock(ctx, dsi_ctx, false);
- ret = s6d7aa0_lock(ctx, false);
- if (ret < 0) {
- dev_err(dev, "Failed to unlock registers: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, MCS_OTP_RELOAD, 0x00, 0x10);
- usleep_range(1000, 1500);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_OTP_RELOAD, 0x00, 0x10);
+ mipi_dsi_usleep_range(dsi_ctx, 1000, 1500);
/* SEQ_B6_PARAM_8_R01 */
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x10);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb6, 0x10);
/* BL_CTL_ON */
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x28);
-
- usleep_range(5000, 6000);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x28);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x04);
+ mipi_dsi_usleep_range(dsi_ctx, 5000, 6000);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x04);
- msleep(120);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx);
- ret = s6d7aa0_lock(ctx, true);
- if (ret < 0) {
- dev_err(dev, "Failed to lock registers: %d\n", ret);
- return ret;
- }
+ mipi_dsi_msleep(dsi_ctx, 120);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ s6d7aa0_lock(ctx, dsi_ctx, true);
- return 0;
+ mipi_dsi_dcs_set_display_on_multi(dsi_ctx);
}
-static int s6d7aa0_lsl080al02_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al02_off(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
/* BL_CTL_OFF */
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x20);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x20);
}
static const struct drm_display_mode s6d7aa0_lsl080al02_mode = {
@@ -317,79 +253,51 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
/* Initialization code and structures for LSL080AL03 panel */
-static int s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- usleep_range(20000, 25000);
+ mipi_dsi_usleep_range(dsi_ctx, 20000, 25000);
- ret = s6d7aa0_lock(ctx, false);
- if (ret < 0) {
- dev_err(dev, "Failed to unlock registers: %d\n", ret);
- return ret;
- }
+ s6d7aa0_lock(ctx, dsi_ctx, false);
if (ctx->desc->panel_type == S6D7AA0_PANEL_LSL080AL03) {
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0xc7, 0x00, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23,
- 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21,
- 0x80, 0x78);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0xc7, 0x00, 0x29);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23,
+ 0x09);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21,
+ 0x80, 0x78);
} else if (ctx->desc->panel_type == S6D7AA0_PANEL_LTL101AT01) {
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0x0b);
- mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23,
- 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21,
- 0x80, 0x68);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x08);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23,
+ 0x09);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21,
+ 0x80, 0x68);
}
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x51);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x02, 0x08, 0x08);
-
- usleep_range(10000, 11000);
-
- mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x80, 0x80, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xcd,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e);
- mipi_dsi_dcs_write_seq(dsi, 0xce,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x03);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb3, 0x51);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xf2, 0x02, 0x08, 0x08);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
-
- ret = s6d7aa0_lock(ctx, true);
- if (ret < 0) {
- dev_err(dev, "Failed to lock registers: %d\n", ret);
- return ret;
- }
+ mipi_dsi_usleep_range(dsi_ctx, 10000, 11000);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x80, 0x80, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xcd,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x2e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xce,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x03);
- return 0;
+ mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx);
+ s6d7aa0_lock(ctx, dsi_ctx, true);
+ mipi_dsi_dcs_set_display_on_multi(dsi_ctx);
}
-static int s6d7aa0_lsl080al03_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al03_off(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0x22, 0x00);
}
static const struct drm_display_mode s6d7aa0_lsl080al03_mode = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index 04ce925b3d9d..d92ae6b6100f 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2020 Caleb Connolly <caleb@connolly.tech>
+/* Copyright (c) 2020 Casey Connolly <casey.connolly@linaro.org>
* Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
@@ -22,7 +22,6 @@ struct sofef00_panel {
struct mipi_dsi_device *dsi;
struct regulator *supply;
struct gpio_desc *reset_gpio;
- const struct drm_display_mode *mode;
};
static inline
@@ -44,66 +43,44 @@ static void sofef00_panel_reset(struct sofef00_panel *ctx)
static int sofef00_panel_on(struct sofef00_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- usleep_range(10000, 11000);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x12);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sofef00_panel_off(struct sofef00_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(40);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 40);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(160);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 160);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sofef00_panel_prepare(struct drm_panel *panel)
@@ -122,7 +99,6 @@ static int sofef00_panel_prepare(struct drm_panel *panel)
ret = sofef00_panel_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
@@ -133,13 +109,8 @@ static int sofef00_panel_prepare(struct drm_panel *panel)
static int sofef00_panel_unprepare(struct drm_panel *panel)
{
struct sofef00_panel *ctx = to_sofef00_panel(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
-
- ret = sofef00_panel_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ sofef00_panel_off(ctx);
regulator_disable(ctx->supply);
return 0;
@@ -159,26 +130,11 @@ static const struct drm_display_mode enchilada_panel_mode = {
.height_mm = 145,
};
-static const struct drm_display_mode fajita_panel_mode = {
- .clock = (1080 + 72 + 16 + 36) * (2340 + 32 + 4 + 18) * 60 / 1000,
- .hdisplay = 1080,
- .hsync_start = 1080 + 72,
- .hsync_end = 1080 + 72 + 16,
- .htotal = 1080 + 72 + 16 + 36,
- .vdisplay = 2340,
- .vsync_start = 2340 + 32,
- .vsync_end = 2340 + 32 + 4,
- .vtotal = 2340 + 32 + 4 + 18,
- .width_mm = 68,
- .height_mm = 145,
-};
-
static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector)
{
struct drm_display_mode *mode;
- struct sofef00_panel *ctx = to_sofef00_panel(panel);
- mode = drm_mode_duplicate(connector->dev, ctx->mode);
+ mode = drm_mode_duplicate(connector->dev, &enchilada_panel_mode);
if (!mode)
return -ENOMEM;
@@ -239,13 +195,6 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
if (!ctx)
return -ENOMEM;
- ctx->mode = of_device_get_match_data(dev);
-
- if (!ctx->mode) {
- dev_err(dev, "Missing device mode\n");
- return -ENODEV;
- }
-
ctx->supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(ctx->supply))
return dev_err_probe(dev, PTR_ERR(ctx->supply),
@@ -295,14 +244,7 @@ static void sofef00_panel_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id sofef00_panel_of_match[] = {
- { // OnePlus 6 / enchilada
- .compatible = "samsung,sofef00",
- .data = &enchilada_panel_mode,
- },
- { // OnePlus 6T / fajita
- .compatible = "samsung,s6e3fc2x01",
- .data = &fajita_panel_mode,
- },
+ { .compatible = "samsung,sofef00" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sofef00_panel_of_match);
@@ -318,6 +260,6 @@ static struct mipi_dsi_driver sofef00_panel_driver = {
module_mipi_dsi_driver(sofef00_panel_driver);
-MODULE_AUTHOR("Caleb Connolly <caleb@connolly.tech>");
+MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index 729cbb0d8403..36abfa2e65e9 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -36,60 +36,49 @@ static inline struct sharp_nt_panel *to_sharp_nt_panel(struct drm_panel *panel)
static int sharp_nt_panel_init(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
/* Novatek two-lane operation */
- ret = mipi_dsi_dcs_write(dsi, 0xae, (u8[]){ 0x03 }, 1);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xae, 0x03);
/* Set both MCU and RGB I/F to 24bpp */
- ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT |
- (MIPI_DCS_PIXEL_FMT_24BIT << 4));
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx,
+ MIPI_DCS_PIXEL_FMT_24BIT |
+ (MIPI_DCS_PIXEL_FMT_24BIT << 4));
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_on(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_off(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 232b03c1a259..82ee2f12b8d2 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -579,9 +579,10 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
u32 bus_flags;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct panel_simple, base,
+ &panel_simple_funcs, desc->connector_type);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
panel->desc = desc;
@@ -694,8 +695,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
- drm_panel_init(&panel->base, dev, &panel_simple_funcs, connector_type);
-
err = drm_panel_of_backlight(&panel->base);
if (err) {
dev_err_probe(dev, err, "Could not find backlight\n");
@@ -1027,27 +1026,28 @@ static const struct panel_desc auo_g070vvn01 = {
},
};
-static const struct drm_display_mode auo_g101evn010_mode = {
- .clock = 68930,
- .hdisplay = 1280,
- .hsync_start = 1280 + 82,
- .hsync_end = 1280 + 82 + 2,
- .htotal = 1280 + 82 + 2 + 84,
- .vdisplay = 800,
- .vsync_start = 800 + 8,
- .vsync_end = 800 + 8 + 2,
- .vtotal = 800 + 8 + 2 + 6,
+static const struct display_timing auo_g101evn010_timing = {
+ .pixelclock = { 64000000, 68930000, 85000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 8, 64, 256 },
+ .hback_porch = { 8, 64, 256 },
+ .hsync_len = { 40, 168, 767 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 8, 100 },
+ .vback_porch = { 4, 8, 100 },
+ .vsync_len = { 8, 16, 223 },
};
static const struct panel_desc auo_g101evn010 = {
- .modes = &auo_g101evn010_mode,
- .num_modes = 1,
+ .timings = &auo_g101evn010_timing,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 216,
.height = 135,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -3527,6 +3527,30 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct drm_display_mode nlt_nl13676bc25_03f_mode = {
+ .clock = 75400,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 14,
+ .hsync_end = 1366 + 14 + 56,
+ .htotal = 1366 + 14 + 56 + 64,
+ .vdisplay = 768,
+ .vsync_start = 768 + 1,
+ .vsync_end = 768 + 1 + 3,
+ .vtotal = 768 + 1 + 3 + 22,
+};
+
+static const struct panel_desc nlt_nl13676bc25_03f = {
+ .modes = &nlt_nl13676bc25_03f_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 363,
+ .height = 215,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing nlt_nl192108ac18_02d_timing = {
.pixelclock = { 130000000, 148350000, 163000000 },
.hactive = { 1920, 1920, 1920 },
@@ -3796,6 +3820,32 @@ static const struct panel_desc pda_91_00156_a0 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode powertip_ph128800t004_zza01_mode = {
+ .clock = 71150,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 9,
+ .vsync_end = 800 + 9 + 8,
+ .vtotal = 800 + 9 + 8 + 6,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc powertip_ph128800t004_zza01 = {
+ .modes = &powertip_ph128800t004_zza01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode powertip_ph128800t006_zhc01_mode = {
.clock = 66500,
.hdisplay = 1280,
@@ -4393,10 +4443,10 @@ static const struct panel_desc tianma_tm070jvhg33 = {
};
/*
- * The datasheet computes total blanking as back porch + front porch, not
- * including sync pulse width. This is for both H and V. To make the total
- * blanking and period correct, subtract the pulse width from the front
- * porch.
+ * The TM070JDHG34-00 datasheet computes total blanking as back porch +
+ * front porch, not including sync pulse width. This is for both H and
+ * V. To make the total blanking and period correct, subtract the pulse
+ * width from the front porch.
*
* This works well for the Min and Typ values, but for Max values the sync
* pulse width is higher than back porch + front porch, so work around that
@@ -4405,6 +4455,10 @@ static const struct panel_desc tianma_tm070jvhg33 = {
*
* Exact datasheet values are added as a comment where they differ from the
* ones implemented for the above reason.
+ *
+ * The P0700WXF1MBAA datasheet is even less detailed, only listing period
+ * and total blanking time, however the resulting values are the same as
+ * the TM070JDHG34-00.
*/
static const struct display_timing tianma_tm070jdhg34_00_timing = {
.pixelclock = { 68400000, 71900000, 78100000 },
@@ -4427,6 +4481,30 @@ static const struct panel_desc tianma_tm070jdhg34_00 = {
.width = 150, /* 149.76 */
.height = 94, /* 93.60 */
},
+ .delay = {
+ .prepare = 15, /* Tp1 */
+ .enable = 150, /* Tp2 */
+ .disable = 150, /* Tp4 */
+ .unprepare = 120, /* Tp3 */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+static const struct panel_desc tianma_p0700wxf1mbaa = {
+ .timings = &tianma_tm070jdhg34_00_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 150, /* 149.76 */
+ .height = 94, /* 93.60 */
+ },
+ .delay = {
+ .prepare = 18, /* Tr + Tp1 */
+ .enable = 152, /* Tp2 + Tp5 */
+ .disable = 152, /* Tp6 + Tp4 */
+ .unprepare = 120, /* Tp3 */
+ },
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -5121,6 +5199,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "newhaven,nhd-4.3-480272ef-atxl",
.data = &newhaven_nhd_43_480272ef_atxl,
}, {
+ .compatible = "nlt,nl13676bc25-03f",
+ .data = &nlt_nl13676bc25_03f,
+ }, {
.compatible = "nlt,nl192108ac18-02d",
.data = &nlt_nl192108ac18_02d,
}, {
@@ -5154,6 +5235,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "pda,91-00156-a0",
.data = &pda_91_00156_a0,
}, {
+ .compatible = "powertip,ph128800t004-zza01",
+ .data = &powertip_ph128800t004_zza01,
+ }, {
.compatible = "powertip,ph128800t006-zhc01",
.data = &powertip_ph128800t006_zhc01,
}, {
@@ -5214,6 +5298,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "tfc,s9700rtwv43tr-01b",
.data = &tfc_s9700rtwv43tr_01b,
}, {
+ .compatible = "tianma,p0700wxf1mbaa",
+ .data = &tianma_p0700wxf1mbaa,
+ }, {
.compatible = "tianma,tm070jdhg30",
.data = &tianma_tm070jdhg30,
}, {
diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
index 17349825543f..b148e6cba9bd 100644
--- a/drivers/gpu/drm/panel/panel-synaptics-r63353.c
+++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
@@ -106,53 +106,34 @@ static int r63353_panel_power_off(struct r63353_panel *rpanel)
static int r63353_panel_activate(struct r63353_panel *rpanel)
{
struct mipi_dsi_device *dsi = rpanel->dsi;
- struct device *dev = &dsi->dev;
- int i, ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+ int i;
- ret = mipi_dsi_dcs_soft_reset(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to do Software Reset (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_dcs_soft_reset_multi(&dsi_ctx);
- usleep_range(15000, 17000);
+ mipi_dsi_usleep_range(&dsi_ctx, 15000, 17000);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
for (i = 0; i < rpanel->pdata->init_length; i++) {
const struct r63353_instr *instr = &rpanel->pdata->init[i];
- ret = mipi_dsi_dcs_write_buffer(dsi, instr->data, instr->len);
- if (ret < 0)
- goto fail;
+ mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, instr->data,
+ instr->len);
}
- msleep(120);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_msleep(&dsi_ctx, 120);
- usleep_range(5000, 10000);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display ON (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000);
- return 0;
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
-fail:
- gpiod_set_value(rpanel->reset_gpio, 0);
+ if (dsi_ctx.accum_err)
+ gpiod_set_value(rpanel->reset_gpio, 0);
- return ret;
+ return dsi_ctx.accum_err;
}
static int r63353_panel_prepare(struct drm_panel *panel)
@@ -178,27 +159,16 @@ static int r63353_panel_prepare(struct drm_panel *panel)
return 0;
}
-static int r63353_panel_deactivate(struct r63353_panel *rpanel)
+static void r63353_panel_deactivate(struct r63353_panel *rpanel)
{
struct mipi_dsi_device *dsi = rpanel->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display OFF (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
- usleep_range(5000, 10000);
+ mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
- return ret;
- }
-
- return 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
}
static int r63353_panel_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index 4dbf8b88f264..11d460d2ea19 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -86,11 +86,7 @@ struct td028ttec1_panel {
#define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
-/*
- * noinline_for_stack so we don't get multiple copies of tx_buf
- * on the stack in case of gcc-plugin-structleak
- */
-static int noinline_for_stack
+static int
jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
{
struct spi_device *spi = lcd->spi;
diff --git a/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c b/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c
new file mode 100644
index 000000000000..413849f7b4de
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025, Alexander Baransky <sanyapilot496@gmail.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct visionox_g2647fb105 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data visionox_g2647fb105_supplies[] = {
+ { .supply = "vdd3p3" },
+ { .supply = "vddio" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline
+struct visionox_g2647fb105 *to_visionox_g2647fb105(struct drm_panel *panel)
+{
+ return container_of(panel, struct visionox_g2647fb105, panel);
+}
+
+static void visionox_g2647fb105_reset(struct visionox_g2647fb105 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int visionox_g2647fb105_on(struct visionox_g2647fb105 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbe, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbf, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0, 0xdd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00);
+
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x0000);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 100);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
+
+static int visionox_g2647fb105_off(struct visionox_g2647fb105 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static int visionox_g2647fb105_prepare(struct drm_panel *panel)
+{
+ struct visionox_g2647fb105 *ctx = to_visionox_g2647fb105(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(visionox_g2647fb105_supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ visionox_g2647fb105_reset(ctx);
+
+ ret = visionox_g2647fb105_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int visionox_g2647fb105_unprepare(struct drm_panel *panel)
+{
+ struct visionox_g2647fb105 *ctx = to_visionox_g2647fb105(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = visionox_g2647fb105_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(visionox_g2647fb105_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode visionox_g2647fb105_mode = {
+ .clock = (1080 + 28 + 4 + 36) * (2340 + 8 + 4 + 4) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 28,
+ .hsync_end = 1080 + 28 + 4,
+ .htotal = 1080 + 28 + 4 + 36,
+ .vdisplay = 2340,
+ .vsync_start = 2340 + 8,
+ .vsync_end = 2340 + 8 + 4,
+ .vtotal = 2340 + 8 + 4 + 4,
+ .width_mm = 69,
+ .height_mm = 149,
+};
+
+static int visionox_g2647fb105_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &visionox_g2647fb105_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs visionox_g2647fb105_panel_funcs = {
+ .prepare = visionox_g2647fb105_prepare,
+ .unprepare = visionox_g2647fb105_unprepare,
+ .get_modes = visionox_g2647fb105_get_modes,
+};
+
+static int visionox_g2647fb105_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops visionox_g2647fb105_bl_ops = {
+ .update_status = visionox_g2647fb105_bl_update_status,
+};
+
+static struct backlight_device *
+visionox_g2647fb105_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 1023,
+ .max_brightness = 2047,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &visionox_g2647fb105_bl_ops, &props);
+}
+
+static int visionox_g2647fb105_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct visionox_g2647fb105 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(visionox_g2647fb105_supplies),
+ visionox_g2647fb105_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ctx->panel.prepare_prev_first = true;
+
+ drm_panel_init(&ctx->panel, dev, &visionox_g2647fb105_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = visionox_g2647fb105_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void visionox_g2647fb105_remove(struct mipi_dsi_device *dsi)
+{
+ struct visionox_g2647fb105 *ctx = mipi_dsi_get_drvdata(dsi);
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id visionox_g2647fb105_of_match[] = {
+ { .compatible = "visionox,g2647fb105" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, visionox_g2647fb105_of_match);
+
+static struct mipi_dsi_driver visionox_g2647fb105_driver = {
+ .probe = visionox_g2647fb105_probe,
+ .remove = visionox_g2647fb105_remove,
+ .driver = {
+ .name = "panel-visionox-g2647fb105",
+ .of_match_table = visionox_g2647fb105_of_match,
+ },
+};
+module_mipi_dsi_driver(visionox_g2647fb105_driver);
+
+MODULE_AUTHOR("Alexander Baransky <sanyapilot496@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for Visionox G2647FB105 AMOLED DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index a45e4addcc19..5d35076b2e6d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -209,10 +209,20 @@ int panfrost_device_init(struct panfrost_device *pfdev)
spin_lock_init(&pfdev->cycle_counter.lock);
+ err = panfrost_pm_domain_init(pfdev);
+ if (err)
+ return err;
+
+ err = panfrost_reset_init(pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "reset init failed %d\n", err);
+ goto out_pm_domain;
+ }
+
err = panfrost_clk_init(pfdev);
if (err) {
dev_err(pfdev->dev, "clk init failed %d\n", err);
- return err;
+ goto out_reset;
}
err = panfrost_devfreq_init(pfdev);
@@ -229,25 +239,15 @@ int panfrost_device_init(struct panfrost_device *pfdev)
goto out_devfreq;
}
- err = panfrost_reset_init(pfdev);
- if (err) {
- dev_err(pfdev->dev, "reset init failed %d\n", err);
- goto out_regulator;
- }
-
- err = panfrost_pm_domain_init(pfdev);
- if (err)
- goto out_reset;
-
pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
if (IS_ERR(pfdev->iomem)) {
err = PTR_ERR(pfdev->iomem);
- goto out_pm_domain;
+ goto out_regulator;
}
err = panfrost_gpu_init(pfdev);
if (err)
- goto out_pm_domain;
+ goto out_regulator;
err = panfrost_mmu_init(pfdev);
if (err)
@@ -268,16 +268,16 @@ out_mmu:
panfrost_mmu_fini(pfdev);
out_gpu:
panfrost_gpu_fini(pfdev);
-out_pm_domain:
- panfrost_pm_domain_fini(pfdev);
-out_reset:
- panfrost_reset_fini(pfdev);
out_regulator:
panfrost_regulator_fini(pfdev);
out_devfreq:
panfrost_devfreq_fini(pfdev);
out_clk:
panfrost_clk_fini(pfdev);
+out_reset:
+ panfrost_reset_fini(pfdev);
+out_pm_domain:
+ panfrost_pm_domain_fini(pfdev);
return err;
}
@@ -287,11 +287,11 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
panfrost_job_fini(pfdev);
panfrost_mmu_fini(pfdev);
panfrost_gpu_fini(pfdev);
- panfrost_pm_domain_fini(pfdev);
- panfrost_reset_fini(pfdev);
panfrost_devfreq_fini(pfdev);
panfrost_regulator_fini(pfdev);
panfrost_clk_fini(pfdev);
+ panfrost_reset_fini(pfdev);
+ panfrost_pm_domain_fini(pfdev);
}
#define PANFROST_EXCEPTION(id) \
@@ -406,11 +406,36 @@ void panfrost_device_reset(struct panfrost_device *pfdev)
static int panfrost_device_runtime_resume(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) {
+ ret = reset_control_deassert(pfdev->rstc);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(pfdev->clock);
+ if (ret)
+ goto err_clk;
+
+ if (pfdev->bus_clock) {
+ ret = clk_enable(pfdev->bus_clock);
+ if (ret)
+ goto err_bus_clk;
+ }
+ }
panfrost_device_reset(pfdev);
panfrost_devfreq_resume(pfdev);
return 0;
+
+err_bus_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT))
+ clk_disable(pfdev->clock);
+err_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT))
+ reset_control_assert(pfdev->rstc);
+ return ret;
}
static int panfrost_device_runtime_suspend(struct device *dev)
@@ -426,6 +451,14 @@ static int panfrost_device_runtime_suspend(struct device *dev)
panfrost_gpu_suspend_irq(pfdev);
panfrost_gpu_power_off(pfdev);
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) {
+ if (pfdev->bus_clock)
+ clk_disable(pfdev->bus_clock);
+
+ clk_disable(pfdev->clock);
+ reset_control_assert(pfdev->rstc);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index cffcb0ac7c11..dcff70f905cd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -36,10 +36,21 @@ enum panfrost_drv_comp_bits {
* enum panfrost_gpu_pm - Supported kernel power management features
* @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend
* @GPU_PM_VREG_OFF: Allow turning off regulators during system suspend
+ * @GPU_PM_RT: Allow disabling clocks and asserting the reset control during
+ * system runtime suspend
*/
enum panfrost_gpu_pm {
GPU_PM_CLK_DIS,
GPU_PM_VREG_OFF,
+ GPU_PM_RT
+};
+
+/**
+ * enum panfrost_gpu_quirks - GPU optional quirks
+ * @GPU_QUIRK_FORCE_AARCH64_PGTABLE: Use AARCH64_4K page table format
+ */
+enum panfrost_gpu_quirks {
+ GPU_QUIRK_FORCE_AARCH64_PGTABLE,
};
struct panfrost_features {
@@ -95,6 +106,9 @@ struct panfrost_compatible {
/* Allowed PM features */
u8 pm_features;
+
+ /* GPU configuration quirks */
+ u8 gpu_quirks;
};
struct panfrost_device {
@@ -162,6 +176,11 @@ struct panfrost_mmu {
int as;
atomic_t as_count;
struct list_head list;
+ struct {
+ u64 transtab;
+ u64 memattr;
+ u64 transcfg;
+ } cfg;
};
struct panfrost_engine_usage {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 0f3935556ac7..f1ec3b02f15a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -476,7 +476,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
}
}
- args->retained = drm_gem_shmem_madvise(&bo->base, args->madv);
+ args->retained = drm_gem_shmem_madvise_locked(&bo->base, args->madv);
if (args->retained) {
if (args->madv == PANFROST_MADV_DONTNEED)
@@ -776,6 +776,13 @@ static const struct panfrost_compatible default_data = {
.pm_domain_names = NULL,
};
+static const struct panfrost_compatible allwinner_h616_data = {
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 1,
+ .pm_features = BIT(GPU_PM_RT),
+};
+
static const struct panfrost_compatible amlogic_data = {
.num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
@@ -824,6 +831,7 @@ static const struct panfrost_compatible mediatek_mt8188_data = {
.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
.pm_domain_names = mediatek_mt8183_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
+ .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
static const char * const mediatek_mt8192_supplies[] = { "mali", NULL };
@@ -835,6 +843,7 @@ static const struct panfrost_compatible mediatek_mt8192_data = {
.num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains),
.pm_domain_names = mediatek_mt8192_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
+ .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
static const struct of_device_id dt_match[] = {
@@ -859,6 +868,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data },
{ .compatible = "mediatek,mt8188-mali", .data = &mediatek_mt8188_data },
{ .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data },
+ { .compatible = "allwinner,sun50i-h616-mali", .data = &allwinner_h616_data },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
index 47751302f1bc..4042afe2fbf4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_dump.c
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -209,7 +209,7 @@ void panfrost_core_dump(struct panfrost_job *job)
goto dump_header;
}
- ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
+ ret = drm_gem_vmap(&bo->base.base, &map);
if (ret) {
dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
iter.hdr->bomap.valid = 0;
@@ -228,7 +228,7 @@ void panfrost_core_dump(struct panfrost_job *job)
vaddr = map.vaddr;
memcpy(iter.data, vaddr, bo->base.base.size);
- drm_gem_vunmap_unlocked(&bo->base.base, &map);
+ drm_gem_vunmap(&bo->base.base, &map);
iter.hdr->bomap.valid = 1;
diff --git a/drivers/gpu/drm/panfrost/panfrost_features.h b/drivers/gpu/drm/panfrost/panfrost_features.h
index 7ed0cd3ea2d4..52f9d69f6db9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_features.h
+++ b/drivers/gpu/drm/panfrost/panfrost_features.h
@@ -54,6 +54,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g72 (\
@@ -64,6 +65,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g51 hw_features_g72
@@ -77,6 +79,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
BIT_ULL(HW_FEATURE_IDVS_GROUP_SIZE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g76 (\
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 8e0ff3efede7..963f04ba2de6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -200,7 +200,7 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
enum drm_gem_object_status res = 0;
- if (bo->base.base.import_attach || bo->base.pages)
+ if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
res |= DRM_GEM_OBJECT_RESIDENT;
if (bo->base.madv == PANFROST_MADV_DONTNEED)
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 3d9f51bd48b6..02b60ea1433a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -51,7 +51,7 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
goto unlock_mappings;
panfrost_gem_teardown_mappings_locked(bo);
- drm_gem_shmem_purge(&bo->base);
+ drm_gem_shmem_purge_locked(&bo->base);
ret = true;
dma_resv_unlock(shmem->base.resv);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index b91019cd5acb..f6b91c052cfb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -26,6 +26,48 @@
#define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define mmu_read(dev, reg) readl(dev->iomem + reg)
+static u64 mair_to_memattr(u64 mair, bool coherent)
+{
+ u64 memattr = 0;
+ u32 i;
+
+ for (i = 0; i < 8; i++) {
+ u8 in_attr = mair >> (8 * i), out_attr;
+ u8 outer = in_attr >> 4, inner = in_attr & 0xf;
+
+ /* For caching to be enabled, inner and outer caching policy
+ * have to be both write-back, if one of them is write-through
+ * or non-cacheable, we just choose non-cacheable. Device
+ * memory is also translated to non-cacheable.
+ */
+ if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
+ AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
+ } else {
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
+ /* Use SH_MIDGARD_INNER mode when device isn't coherent,
+ * so SH_IS, which is used when IOMMU_CACHE is set, maps
+ * to Mali's internal-shareable mode. As per the Mali
+ * Spec, inner and outer-shareable modes aren't allowed
+ * for WB memory when coherency is disabled.
+ * Use SH_CPU_INNER mode when coherency is enabled, so
+ * that SH_IS actually maps to the standard definition of
+ * inner-shareable.
+ */
+ if (!coherent)
+ out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER;
+ else
+ out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER;
+ }
+
+ memattr |= (u64)out_attr << (8 * i);
+ }
+
+ return memattr;
+}
+
static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
{
int ret;
@@ -124,9 +166,9 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev,
static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as_nr = mmu->as;
- struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
- u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
- u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
+ u64 transtab = mmu->cfg.transtab;
+ u64 memattr = mmu->cfg.memattr;
+ u64 transcfg = mmu->cfg.transcfg;
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
@@ -139,6 +181,9 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
+ mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
+ mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
+
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -152,9 +197,67 @@ static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
+ mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
+ mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), 0);
+
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
+static int mmu_cfg_init_mali_lpae(struct panfrost_mmu *mmu)
+{
+ struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
+
+ /* TODO: The following fields are duplicated between the MMU and Page
+ * Table config structs. Ideally, should be kept in one place.
+ */
+ mmu->cfg.transtab = pgtbl_cfg->arm_mali_lpae_cfg.transtab;
+ mmu->cfg.memattr = pgtbl_cfg->arm_mali_lpae_cfg.memattr;
+ mmu->cfg.transcfg = AS_TRANSCFG_ADRMODE_LEGACY;
+
+ return 0;
+}
+
+static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu)
+{
+ struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
+ struct panfrost_device *pfdev = mmu->pfdev;
+
+ if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
+ ~AS_TRANSTAB_AARCH64_4K_ADDR_MASK))
+ return -EINVAL;
+
+ mmu->cfg.transtab = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+
+ mmu->cfg.memattr = mair_to_memattr(pgtbl_cfg->arm_lpae_s1_cfg.mair,
+ pgtbl_cfg->coherent_walk);
+
+ mmu->cfg.transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
+ AS_TRANSCFG_PTW_RA |
+ AS_TRANSCFG_ADRMODE_AARCH64_4K |
+ AS_TRANSCFG_INA_BITS(55 - pgtbl_cfg->ias);
+ if (pgtbl_cfg->coherent_walk)
+ mmu->cfg.transcfg |= AS_TRANSCFG_PTW_SH_OS;
+
+ return 0;
+}
+
+static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu,
+ enum io_pgtable_fmt fmt)
+{
+ struct panfrost_device *pfdev = mmu->pfdev;
+
+ switch (fmt) {
+ case ARM_64_LPAE_S1:
+ return mmu_cfg_init_aarch64_4k(mmu);
+ case ARM_MALI_LPAE:
+ return mmu_cfg_init_mali_lpae(mmu);
+ default:
+ /* This should never happen */
+ drm_WARN(pfdev->ddev, 1, "Invalid pgtable format");
+ return -EINVAL;
+ }
+}
+
u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as;
@@ -327,7 +430,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
struct drm_gem_object *obj = &shmem->base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
- int prot = IOMMU_READ | IOMMU_WRITE;
+ int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE;
if (WARN_ON(mapping->active))
return 0;
@@ -489,7 +592,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_unlock;
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ refcount_set(&bo->base.pages_use_count, 1);
} else {
pages = bo->base.pages;
if (pages[page_offset]) {
@@ -528,7 +631,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_map;
mmu_map_sg(pfdev, bomapping->mmu, addr,
- IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+ IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt);
bomapping->active = true;
bo->heap_rss_size += SZ_2M;
@@ -615,7 +718,22 @@ static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
{
+ u32 va_bits = GPU_MMU_FEATURES_VA_BITS(pfdev->features.mmu_features);
+ u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(pfdev->features.mmu_features);
struct panfrost_mmu *mmu;
+ enum io_pgtable_fmt fmt;
+ int ret;
+
+ if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) {
+ if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) {
+ dev_err_once(pfdev->dev,
+ "AARCH64_4K page table not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+ fmt = ARM_64_LPAE_S1;
+ } else {
+ fmt = ARM_MALI_LPAE;
+ }
mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
if (!mmu)
@@ -633,23 +751,33 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = SZ_4K | SZ_2M,
- .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
- .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
+ .ias = va_bits,
+ .oas = pa_bits,
.coherent_walk = pfdev->coherent,
.tlb = &mmu_tlb_ops,
.iommu_dev = pfdev->dev,
};
- mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
- mmu);
+ mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu);
if (!mmu->pgtbl_ops) {
- kfree(mmu);
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto err_free_mmu;
}
+ ret = panfrost_mmu_cfg_init(mmu, fmt);
+ if (ret)
+ goto err_free_io_pgtable;
+
kref_init(&mmu->refcount);
return mmu;
+
+err_free_io_pgtable:
+ free_io_pgtable_ops(mmu->pgtbl_ops);
+
+err_free_mmu:
+ kfree(mmu);
+ return ERR_PTR(ret);
}
static const char *access_type_name(struct panfrost_device *pfdev,
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index ba9b6e2b2636..52befead08c6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -106,7 +106,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_close_bo;
}
- ret = drm_gem_vmap_unlocked(&bo->base, &map);
+ ret = drm_gem_vmap(&bo->base, &map);
if (ret)
goto err_put_mapping;
perfcnt->buf = map.vaddr;
@@ -165,7 +165,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
return 0;
err_vunmap:
- drm_gem_vunmap_unlocked(&bo->base, &map);
+ drm_gem_vunmap(&bo->base, &map);
err_put_mapping:
panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
@@ -195,7 +195,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
- drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map);
+ drm_gem_vunmap(&perfcnt->mapping->obj->base.base, &map);
perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index c7bba476ab3f..2b8f1617b836 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -16,6 +16,8 @@
#define GROUPS_L2_COHERENT BIT(0) /* Cores groups are l2 coherent */
#define GPU_MMU_FEATURES 0x014 /* (RO) MMU features */
+#define GPU_MMU_FEATURES_VA_BITS(x) ((x) & GENMASK(7, 0))
+#define GPU_MMU_FEATURES_PA_BITS(x) (((x) >> 8) & GENMASK(7, 0))
#define GPU_AS_PRESENT 0x018 /* (RO) Address space slots present */
#define GPU_JS_PRESENT 0x01C /* (RO) Job slots present */
@@ -299,6 +301,17 @@
#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x04) /* (RW) Translation Table Base Address for address space n, high word */
#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x08) /* (RW) Memory attributes for address space n, low word. */
#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0x0C) /* (RW) Memory attributes for address space n, high word. */
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_IMPL (2 << 2)
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(w, r) ((3 << 2) | \
+ ((w) ? BIT(0) : 0) | \
+ ((r) ? BIT(1) : 0))
+#define AS_MEMATTR_AARCH64_SH_MIDGARD_INNER (0 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER (1 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER_SHADER_COH (2 << 4)
+#define AS_MEMATTR_AARCH64_SHARED (0 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_NC (1 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_WB (2 << 6)
+#define AS_MEMATTR_AARCH64_FAULT (3 << 6)
#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10) /* (RW) Lock region address for address space n, low word */
#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14) /* (RW) Lock region address for address space n, high word */
#define AS_COMMAND(as) (MMU_AS(as) + 0x18) /* (WO) MMU command register for address space n */
@@ -309,6 +322,24 @@
/* Additional Bifrost AS registers */
#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30) /* (RW) Translation table configuration for address space n, low word */
#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34) /* (RW) Translation table configuration for address space n, high word */
+#define AS_TRANSCFG_ADRMODE_LEGACY (0 << 0)
+#define AS_TRANSCFG_ADRMODE_UNMAPPED (1 << 0)
+#define AS_TRANSCFG_ADRMODE_IDENTITY (2 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_4K (6 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_64K (8 << 0)
+#define AS_TRANSCFG_INA_BITS(x) ((x) << 6)
+#define AS_TRANSCFG_OUTA_BITS(x) ((x) << 14)
+#define AS_TRANSCFG_SL_CONCAT BIT(22)
+#define AS_TRANSCFG_PTW_MEMATTR_NC (1 << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_WB (2 << 24)
+#define AS_TRANSCFG_PTW_SH_NS (0 << 28)
+#define AS_TRANSCFG_PTW_SH_OS (2 << 28)
+#define AS_TRANSCFG_PTW_SH_IS (3 << 28)
+#define AS_TRANSCFG_PTW_RA BIT(30)
+#define AS_TRANSCFG_DISABLE_HIER_AP BIT(33)
+#define AS_TRANSCFG_DISABLE_AF_FAULT BIT(34)
+#define AS_TRANSCFG_WXN BIT(35)
+#define AS_TRANSCFG_XREADABLE BIT(36)
#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38) /* (RO) Secondary fault address for address space n, low word */
#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C) /* (RO) Secondary fault address for address space n, high word */
@@ -324,6 +355,11 @@
#define AS_TRANSTAB_LPAE_READ_INNER BIT(2)
#define AS_TRANSTAB_LPAE_SHARE_OUTER BIT(4)
+/*
+ * Begin AARCH64_4K MMU TRANSTAB register values
+ */
+#define AS_TRANSTAB_AARCH64_4K_ADDR_MASK 0xfffffffffffffff0
+
#define AS_STATUS_AS_ACTIVE 0x01
#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3 << 8)
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index a9da1d1eeb70..f0b2da5b2b96 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -171,10 +171,6 @@ int panthor_device_init(struct panthor_device *ptdev)
struct page *p;
int ret;
- ret = panthor_gpu_coherency_init(ptdev);
- if (ret)
- return ret;
-
init_completion(&ptdev->unplug.done);
ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
if (ret)
@@ -184,6 +180,11 @@ int panthor_device_init(struct panthor_device *ptdev)
if (ret)
return ret;
+#ifdef CONFIG_DEBUG_FS
+ drmm_mutex_init(&ptdev->base, &ptdev->gems.lock);
+ INIT_LIST_HEAD(&ptdev->gems.node);
+#endif
+
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
p = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!p)
@@ -247,6 +248,10 @@ int panthor_device_init(struct panthor_device *ptdev)
if (ret)
goto err_rpm_put;
+ ret = panthor_gpu_coherency_init(ptdev);
+ if (ret)
+ goto err_unplug_gpu;
+
ret = panthor_mmu_init(ptdev);
if (ret)
goto err_unplug_gpu;
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index da6574021664..465d3ab1b79e 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -205,6 +205,17 @@ struct panthor_device {
/** @fast_rate: Maximum device clock frequency. Set by DVFS */
unsigned long fast_rate;
+
+#ifdef CONFIG_DEBUG_FS
+ /** @gems: Device-wide list of GEM objects owned by at least one file. */
+ struct {
+ /** @gems.lock: Protects the device-wide list of GEM objects. */
+ struct mutex lock;
+
+ /** @node: Used to keep track of all the device's DRM objects */
+ struct list_head node;
+ } gems;
+#endif
};
struct panthor_gpu_usage {
@@ -383,8 +394,6 @@ static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *da
if (!status) \
break; \
\
- gpu_write(ptdev, __reg_prefix ## _INT_CLEAR, status); \
- \
__handler(ptdev, status); \
ret = IRQ_HANDLED; \
} \
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 06fe46e32073..6200cad22563 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -940,6 +940,7 @@ static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct drm_panthor_bo_mmap_offset *args = data;
+ struct panthor_gem_object *bo;
struct drm_gem_object *obj;
int ret;
@@ -950,6 +951,12 @@ static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
if (!obj)
return -ENOENT;
+ bo = to_panthor_bo(obj);
+ if (bo->flags & DRM_PANTHOR_BO_NO_MMAP) {
+ ret = -EPERM;
+ goto out;
+ }
+
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
@@ -1331,6 +1338,46 @@ static int panthor_ioctl_vm_get_state(struct drm_device *ddev, void *data,
return 0;
}
+static int panthor_ioctl_bo_set_label(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_panthor_bo_set_label *args = data;
+ struct drm_gem_object *obj;
+ const char *label = NULL;
+ int ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->label) {
+ label = strndup_user((const char __user *)(uintptr_t)args->label,
+ PANTHOR_BO_LABEL_MAXLEN);
+ if (IS_ERR(label)) {
+ ret = PTR_ERR(label);
+ if (ret == -EINVAL)
+ ret = -E2BIG;
+ goto err_put_obj;
+ }
+ }
+
+ /*
+ * We treat passing a label of length 0 and passing a NULL label
+ * differently, because even though they might seem conceptually
+ * similar, future uses of the BO label might expect a different
+ * behaviour in each case.
+ */
+ panthor_gem_bo_set_label(obj, label);
+
+err_put_obj:
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
static int
panthor_open(struct drm_device *ddev, struct drm_file *file)
{
@@ -1400,6 +1447,7 @@ static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
PANTHOR_IOCTL(TILER_HEAP_CREATE, tiler_heap_create, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(BO_SET_LABEL, bo_set_label, DRM_RENDER_ALLOW),
};
static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -1496,9 +1544,34 @@ static const struct file_operations panthor_drm_driver_fops = {
};
#ifdef CONFIG_DEBUG_FS
+static int panthor_gems_show(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
+
+ panthor_gem_debugfs_print_bos(ptdev, m);
+
+ return 0;
+}
+
+static struct drm_info_list panthor_debugfs_list[] = {
+ {"gems", panthor_gems_show, 0, NULL},
+};
+
+static int panthor_gems_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(panthor_debugfs_list,
+ ARRAY_SIZE(panthor_debugfs_list),
+ minor->debugfs_root, minor);
+
+ return 0;
+}
+
static void panthor_debugfs_init(struct drm_minor *minor)
{
panthor_mmu_debugfs_init(minor);
+ panthor_gems_debugfs_init(minor);
}
#endif
@@ -1509,6 +1582,7 @@ static void panthor_debugfs_init(struct drm_minor *minor)
* - 1.2 - adds DEV_QUERY_GROUP_PRIORITIES_INFO query
* - adds PANTHOR_GROUP_PRIORITY_REALTIME priority
* - 1.3 - adds DRM_PANTHOR_GROUP_STATE_INNOCENT flag
+ * - 1.4 - adds DRM_IOCTL_PANTHOR_BO_SET_LABEL ioctl
*/
static const struct drm_driver panthor_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
@@ -1522,7 +1596,7 @@ static const struct drm_driver panthor_drm_driver = {
.name = "panthor",
.desc = "Panthor DRM driver",
.major = 1,
- .minor = 3,
+ .minor = 4,
.gem_create_object = panthor_gem_create_object,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 0f52766a3120..7bc38e635329 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -449,7 +449,8 @@ panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Queue FW interface");
if (IS_ERR(mem))
return mem;
@@ -481,7 +482,8 @@ panthor_fw_alloc_suspend_buf_mem(struct panthor_device *ptdev, size_t size)
return panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev), size,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "FW suspend buffer");
}
static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
@@ -601,7 +603,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev),
section_size,
DRM_PANTHOR_BO_NO_MMAP,
- vm_map_flags, va);
+ vm_map_flags, va, "FW section");
if (IS_ERR(section->mem))
return PTR_ERR(section->mem);
@@ -1008,6 +1010,8 @@ static void panthor_fw_init_global_iface(struct panthor_device *ptdev)
static void panthor_job_irq_handler(struct panthor_device *ptdev, u32 status)
{
+ gpu_write(ptdev, JOB_INT_CLEAR, status);
+
if (!ptdev->fw->booted && (status & JOB_INT_GLOBAL_IF))
ptdev->fw->booted = true;
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index 8244a4e6c2a2..7c00fd77758b 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -2,6 +2,7 @@
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
/* Copyright 2023 Collabora ltd. */
+#include <linux/cleanup.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
@@ -10,14 +11,64 @@
#include <drm/panthor_drm.h>
#include "panthor_device.h"
+#include "panthor_fw.h"
#include "panthor_gem.h"
#include "panthor_mmu.h"
+#ifdef CONFIG_DEBUG_FS
+static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev,
+ struct panthor_gem_object *bo)
+{
+ INIT_LIST_HEAD(&bo->debugfs.node);
+
+ bo->debugfs.creator.tgid = current->group_leader->pid;
+ get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
+
+ mutex_lock(&ptdev->gems.lock);
+ list_add_tail(&bo->debugfs.node, &ptdev->gems.node);
+ mutex_unlock(&ptdev->gems.lock);
+}
+
+static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo)
+{
+ struct panthor_device *ptdev = container_of(bo->base.base.dev,
+ struct panthor_device, base);
+
+ if (list_empty(&bo->debugfs.node))
+ return;
+
+ mutex_lock(&ptdev->gems.lock);
+ list_del_init(&bo->debugfs.node);
+ mutex_unlock(&ptdev->gems.lock);
+}
+
+static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags)
+{
+ bo->debugfs.flags = usage_flags | PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED;
+}
+#else
+static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev,
+ struct panthor_gem_object *bo)
+{}
+static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {}
+static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {}
+#endif
+
static void panthor_gem_free_object(struct drm_gem_object *obj)
{
struct panthor_gem_object *bo = to_panthor_bo(obj);
struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem;
+ panthor_gem_debugfs_bo_rm(bo);
+
+ /*
+ * Label might have been allocated with kstrdup_const(),
+ * we need to take that into account when freeing the memory
+ */
+ kfree_const(bo->label.str);
+
+ mutex_destroy(&bo->label.lock);
+
drm_gem_free_mmap_offset(&bo->base.base);
mutex_destroy(&bo->gpuva_list_lock);
drm_gem_shmem_free(&bo->base);
@@ -67,17 +118,19 @@ out_free_bo:
* @gpu_va: GPU address assigned when mapping to the VM.
* If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be
* automatically allocated.
+ * @name: Descriptive label of the BO's contents
*
* Return: A valid pointer in case of success, an ERR_PTR() otherwise.
*/
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
- u64 gpu_va)
+ u64 gpu_va, const char *name)
{
struct drm_gem_shmem_object *obj;
struct panthor_kernel_bo *kbo;
struct panthor_gem_object *bo;
+ u32 debug_flags = PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL;
int ret;
if (drm_WARN_ON(&ptdev->base, !vm))
@@ -97,6 +150,12 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
kbo->obj = &obj->base;
bo->flags = bo_flags;
+ if (vm == panthor_fw_vm(ptdev))
+ debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED;
+
+ panthor_gem_kernel_bo_set_label(kbo, name);
+ panthor_gem_debugfs_set_usage_flags(to_panthor_bo(kbo->obj), debug_flags);
+
/* The system and GPU MMU page size might differ, which becomes a
* problem for FW sections that need to be mapped at explicit address
* since our PAGE_SIZE alignment might cover a VA range that's
@@ -129,17 +188,6 @@ err_free_bo:
return ERR_PTR(ret);
}
-static int panthor_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- struct panthor_gem_object *bo = to_panthor_bo(obj);
-
- /* Don't allow mmap on objects that have the NO_MMAP flag set. */
- if (bo->flags & DRM_PANTHOR_BO_NO_MMAP)
- return -EINVAL;
-
- return drm_gem_shmem_object_mmap(obj, vma);
-}
-
static struct dma_buf *
panthor_gem_prime_export(struct drm_gem_object *obj, int flags)
{
@@ -155,7 +203,7 @@ static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj)
struct panthor_gem_object *bo = to_panthor_bo(obj);
enum drm_gem_object_status res = 0;
- if (bo->base.base.import_attach || bo->base.pages)
+ if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
res |= DRM_GEM_OBJECT_RESIDENT;
return res;
@@ -169,7 +217,7 @@ static const struct drm_gem_object_funcs panthor_gem_funcs = {
.get_sg_table = drm_gem_shmem_object_get_sg_table,
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
- .mmap = panthor_gem_mmap,
+ .mmap = drm_gem_shmem_object_mmap,
.status = panthor_gem_status,
.export = panthor_gem_prime_export,
.vm_ops = &drm_gem_shmem_vm_ops,
@@ -196,6 +244,9 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t
obj->base.map_wc = !ptdev->coherent;
mutex_init(&obj->gpuva_list_lock);
drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
+ mutex_init(&obj->label.lock);
+
+ panthor_gem_debugfs_bo_add(ptdev, obj);
return &obj->base.base;
}
@@ -245,5 +296,153 @@ panthor_gem_create_with_handle(struct drm_file *file,
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);
+ /*
+ * No explicit flags are needed in the call below, since the
+ * function internally sets the INITIALIZED bit for us.
+ */
+ panthor_gem_debugfs_set_usage_flags(bo, 0);
+
return ret;
}
+
+void
+panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(obj);
+ const char *old_label;
+
+ scoped_guard(mutex, &bo->label.lock) {
+ old_label = bo->label.str;
+ bo->label.str = label;
+ }
+
+ kfree_const(old_label);
+}
+
+void
+panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label)
+{
+ const char *str;
+
+ /* We should never attempt labelling a UM-exposed GEM object */
+ if (drm_WARN_ON(bo->obj->dev, bo->obj->handle_count > 0))
+ return;
+
+ if (!label)
+ return;
+
+ str = kstrdup_const(label, GFP_KERNEL);
+ if (!str) {
+ /* Failing to allocate memory for a label isn't a fatal condition */
+ drm_warn(bo->obj->dev, "Not enough memory to allocate BO label");
+ return;
+ }
+
+ panthor_gem_bo_set_label(bo->obj, str);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct gem_size_totals {
+ size_t size;
+ size_t resident;
+ size_t reclaimable;
+};
+
+static void panthor_gem_debugfs_print_flag_names(struct seq_file *m)
+{
+ int len;
+ int i;
+
+ static const char * const gem_state_flags_names[] = {
+ [PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT] = "imported",
+ [PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT] = "exported",
+ };
+
+ static const char * const gem_usage_flags_names[] = {
+ [PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT] = "kernel",
+ [PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT] = "fw-mapped",
+ };
+
+ seq_puts(m, "GEM state flags: ");
+ for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) {
+ if (!gem_state_flags_names[i])
+ continue;
+ seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i],
+ (u32)BIT(i), (i < len - 1) ? ", " : "\n");
+ }
+
+ seq_puts(m, "GEM usage flags: ");
+ for (i = 0, len = ARRAY_SIZE(gem_usage_flags_names); i < len; i++) {
+ if (!gem_usage_flags_names[i])
+ continue;
+ seq_printf(m, "%s (0x%x)%s", gem_usage_flags_names[i],
+ (u32)BIT(i), (i < len - 1) ? ", " : "\n\n");
+ }
+}
+
+static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo,
+ struct seq_file *m,
+ struct gem_size_totals *totals)
+{
+ unsigned int refcount = kref_read(&bo->base.base.refcount);
+ char creator_info[32] = {};
+ size_t resident_size;
+ u32 gem_usage_flags = bo->debugfs.flags & (u32)~PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED;
+ u32 gem_state_flags = 0;
+
+ /* Skip BOs being destroyed. */
+ if (!refcount)
+ return;
+
+ resident_size = bo->base.pages ? bo->base.base.size : 0;
+
+ snprintf(creator_info, sizeof(creator_info),
+ "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
+ seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx",
+ creator_info,
+ bo->base.base.name,
+ refcount,
+ bo->base.base.size,
+ resident_size,
+ drm_vma_node_start(&bo->base.base.vma_node));
+
+ if (bo->base.base.import_attach)
+ gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
+ if (bo->base.base.dma_buf)
+ gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
+
+ seq_printf(m, "0x%-8x 0x%-10x", gem_state_flags, gem_usage_flags);
+
+ scoped_guard(mutex, &bo->label.lock) {
+ seq_printf(m, "%s\n", bo->label.str ? : "");
+ }
+
+ totals->size += bo->base.base.size;
+ totals->resident += resident_size;
+ if (bo->base.madv > 0)
+ totals->reclaimable += resident_size;
+}
+
+void panthor_gem_debugfs_print_bos(struct panthor_device *ptdev,
+ struct seq_file *m)
+{
+ struct gem_size_totals totals = {0};
+ struct panthor_gem_object *bo;
+
+ panthor_gem_debugfs_print_flag_names(m);
+
+ seq_puts(m, "created-by global-name refcount size resident-size file-offset state usage label\n");
+ seq_puts(m, "----------------------------------------------------------------------------------------------------------------------------------------------\n");
+
+ scoped_guard(mutex, &ptdev->gems.lock) {
+ list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) {
+ if (bo->debugfs.flags & PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED)
+ panthor_gem_debugfs_bo_print(bo, m, &totals);
+ }
+ }
+
+ seq_puts(m, "==============================================================================================================================================\n");
+ seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n",
+ totals.size, totals.resident, totals.reclaimable);
+}
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index 5749ef2ebe03..4dd732dcd59f 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -13,6 +13,56 @@
struct panthor_vm;
+#define PANTHOR_BO_LABEL_MAXLEN 4096
+
+enum panthor_debugfs_gem_state_flags {
+ PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT = 0,
+ PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT = 1,
+
+ /** @PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED: GEM BO is PRIME imported. */
+ PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED = BIT(PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT),
+
+ /** @PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED: GEM BO is PRIME exported. */
+ PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED = BIT(PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT),
+};
+
+enum panthor_debugfs_gem_usage_flags {
+ PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT = 0,
+ PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT = 1,
+
+ /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL: BO is for kernel use only. */
+ PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL = BIT(PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT),
+
+ /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED: BO is mapped on the FW VM. */
+ PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED = BIT(PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT),
+
+ /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED: BO is ready for DebugFS display. */
+ PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED = BIT(31),
+};
+
+/**
+ * struct panthor_gem_debugfs - GEM object's DebugFS list information
+ */
+struct panthor_gem_debugfs {
+ /**
+ * @node: Node used to insert the object in the device-wide list of
+ * GEM objects, to display information about it through a DebugFS file.
+ */
+ struct list_head node;
+
+ /** @creator: Information about the UM process which created the GEM. */
+ struct {
+ /** @creator.process_name: Group leader name in owning thread's process */
+ char process_name[TASK_COMM_LEN];
+
+ /** @creator.tgid: PID of the thread's group leader within its process */
+ pid_t tgid;
+ } creator;
+
+ /** @flags: Combination of panthor_debugfs_gem_usage_flags flags */
+ u32 flags;
+};
+
/**
* struct panthor_gem_object - Driver specific GEM object.
*/
@@ -46,6 +96,24 @@ struct panthor_gem_object {
/** @flags: Combination of drm_panthor_bo_flags flags. */
u32 flags;
+
+ /**
+ * @label: BO tagging fields. The label can be assigned within the
+ * driver itself or through a specific IOCTL.
+ */
+ struct {
+ /**
+ * @label.str: Pointer to NULL-terminated string,
+ */
+ const char *str;
+
+ /** @lock.str: Protects access to the @label.str field. */
+ struct mutex lock;
+ } label;
+
+#ifdef CONFIG_DEBUG_FS
+ struct panthor_gem_debugfs debugfs;
+#endif
};
/**
@@ -91,6 +159,9 @@ panthor_gem_create_with_handle(struct drm_file *file,
struct panthor_vm *exclusive_vm,
u64 *size, u32 flags, uint32_t *handle);
+void panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label);
+void panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label);
+
static inline u64
panthor_kernel_bo_gpuva(struct panthor_kernel_bo *bo)
{
@@ -112,7 +183,7 @@ panthor_kernel_bo_vmap(struct panthor_kernel_bo *bo)
if (bo->kmap)
return 0;
- ret = drm_gem_vmap_unlocked(bo->obj, &map);
+ ret = drm_gem_vmap(bo->obj, &map);
if (ret)
return ret;
@@ -126,7 +197,7 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
if (bo->kmap) {
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap);
- drm_gem_vunmap_unlocked(bo->obj, &map);
+ drm_gem_vunmap(bo->obj, &map);
bo->kmap = NULL;
}
}
@@ -134,8 +205,13 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
- u64 gpu_va);
+ u64 gpu_va, const char *name);
void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo);
+#ifdef CONFIG_DEBUG_FS
+void panthor_gem_debugfs_print_bos(struct panthor_device *pfdev,
+ struct seq_file *m);
+#endif
+
#endif /* __PANTHOR_GEM_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index 671049020afa..32d678a0114e 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -150,6 +150,8 @@ static void panthor_gpu_init_info(struct panthor_device *ptdev)
static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
{
+ gpu_write(ptdev, GPU_INT_CLEAR, status);
+
if (status & GPU_IRQ_FAULT) {
u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
u64 address = ((u64)gpu_read(ptdev, GPU_FAULT_ADDR_HI) << 32) |
diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
index 3bdf61c14264..d236e9ceade4 100644
--- a/drivers/gpu/drm/panthor/panthor_heap.c
+++ b/drivers/gpu/drm/panthor/panthor_heap.c
@@ -151,7 +151,8 @@ static int panthor_alloc_heap_chunk(struct panthor_heap_pool *pool,
chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Tiler heap chunk");
if (IS_ERR(chunk->bo)) {
ret = PTR_ERR(chunk->bo);
goto err_free_chunk;
@@ -555,7 +556,8 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Heap pool");
if (IS_ERR(pool->gpu_contexts)) {
ret = PTR_ERR(pool->gpu_contexts);
goto err_destroy_pool;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 12a02e28f50f..6ca9a2642a4e 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -781,6 +781,7 @@ out_enable_as:
if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as));
ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as);
+ ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as);
gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
}
@@ -1103,7 +1104,7 @@ static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
/* If the vm_bo object was destroyed, release the pin reference that
* was hold by this object.
*/
- if (unpin && !bo->base.base.import_attach)
+ if (unpin && !drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
drm_gpuvm_put(vm);
@@ -1234,7 +1235,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
if (ret)
goto err_cleanup;
- if (!bo->base.base.import_attach) {
+ if (!drm_gem_is_imported(&bo->base.base)) {
/* Pre-reserve the BO pages, so the map operation doesn't have to
* allocate.
*/
@@ -1245,7 +1246,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
if (IS_ERR(sgt)) {
- if (!bo->base.base.import_attach)
+ if (!drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
ret = PTR_ERR(sgt);
@@ -1256,7 +1257,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base);
if (!preallocated_vm_bo) {
- if (!bo->base.base.import_attach)
+ if (!drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
ret = -ENOMEM;
@@ -1282,7 +1283,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
* which will be released in panthor_vm_bo_put().
*/
if (preallocated_vm_bo != op_ctx->map.vm_bo &&
- !bo->base.base.import_attach)
+ !drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
op_ctx->map.bo_offset = offset;
@@ -1709,11 +1710,17 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
access_type, access_type_name(ptdev, fault_status),
source_id);
+ /* We don't handle VM faults at the moment, so let's just clear the
+ * interrupt and let the writer/reader crash.
+ * Note that COMPLETED irqs are never cleared, but this is fine
+ * because they are always masked.
+ */
+ gpu_write(ptdev, MMU_INT_CLEAR, mask);
+
/* Ignore MMU interrupts on this AS until it's been
* re-enabled.
*/
ptdev->mmu->irq.mask = new_int_mask;
- gpu_write(ptdev, MMU_INT_MASK, new_int_mask);
if (ptdev->mmu->as.slots[as].vm)
ptdev->mmu->as.slots[as].vm->unhandled_fault = true;
diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
index b7b3b3add166..a7a323dc5cf9 100644
--- a/drivers/gpu/drm/panthor/panthor_regs.h
+++ b/drivers/gpu/drm/panthor/panthor_regs.h
@@ -133,8 +133,8 @@
#define GPU_COHERENCY_PROT_BIT(name) BIT(GPU_COHERENCY_ ## name)
#define GPU_COHERENCY_PROTOCOL 0x304
-#define GPU_COHERENCY_ACE 0
-#define GPU_COHERENCY_ACE_LITE 1
+#define GPU_COHERENCY_ACE_LITE 0
+#define GPU_COHERENCY_ACE 1
#define GPU_COHERENCY_NONE 31
#define MCU_CONTROL 0x700
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 4d31d1967716..43ee57728de5 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -840,7 +840,7 @@ panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
if (queue->syncwait.kmap) {
struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
- drm_gem_vunmap_unlocked(queue->syncwait.obj, &map);
+ drm_gem_vunmap(queue->syncwait.obj, &map);
queue->syncwait.kmap = NULL;
}
@@ -866,7 +866,7 @@ panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue
goto err_put_syncwait_obj;
queue->syncwait.obj = &bo->base.base;
- ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map);
+ ret = drm_gem_vmap(queue->syncwait.obj, &map);
if (drm_WARN_ON(&ptdev->base, ret))
goto err_put_syncwait_obj;
@@ -3332,7 +3332,8 @@ group_create_queue(struct panthor_group *group,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "CS ring buffer");
if (IS_ERR(queue->ringbuf)) {
ret = PTR_ERR(queue->ringbuf);
goto err_free_queue;
@@ -3362,7 +3363,8 @@ group_create_queue(struct panthor_group *group,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Group job stats");
if (IS_ERR(queue->profiling.slots)) {
ret = PTR_ERR(queue->profiling.slots);
@@ -3493,7 +3495,8 @@ int panthor_group_create(struct panthor_file *pfile,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Group sync objects");
if (IS_ERR(group->syncobjs)) {
ret = PTR_ERR(group->syncobjs);
goto err_put_group;
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 1e4b28d03f4d..5f460b296c0c 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -501,7 +501,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
* if we find it, it will take precedence. This is on the Integrator/AP
* which only has this option for PL110 graphics.
*/
- if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
+ if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
np = of_find_matching_node_and_match(NULL, impd1_clcd_of_match,
&clcd_id);
if (np)
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 69427eb8bed2..d8f24bcae34b 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_QXL
tristate "QXL virtual GPU"
- depends on DRM && PCI && MMU && HAS_IOPORT
+ depends on DRM && PCI && HAS_IOPORT
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index f51bace9555d..c479f0c0dd5c 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -2,7 +2,7 @@
config DRM_RADEON
tristate "ATI Radeon"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on AGP || !AGP
select FW_LOADER
select DRM_CLIENT_SELECTION
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 2db40789235c..a7caac5b8ac8 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -3825,8 +3825,7 @@ typedef struct _ATOM_DPCD_INFO
// note2: From RV770, the memory is more than 32bit addressable, so we will change
// ucTableFormatRevision=1,ucTableContentRevision=4, the structure remains
// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
-// (in offset to start of memory address) is KB aligned instead of byte aligend.
-/***********************************************************************************/
+// (in offset to start of memory address) is KB aligned instead of byte aligned.
// Note3:
/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have:
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index fa78824931cc..3f3c360dce4b 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -501,8 +501,8 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
u8 link_status[DP_LINK_STATUS_SIZE];
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
- if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status)
- <= 0)
+ if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux,
+ link_status) < 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
@@ -678,7 +678,7 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -741,7 +741,7 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 11a492f21157..51a3e0fc2f56 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -8548,7 +8548,7 @@ int cik_suspend(struct radeon_device *rdev)
*/
int cik_init(struct radeon_device *rdev)
{
- struct radeon_ring *ring;
+ struct radeon_ring *ring, *ring_cp1, *ring_cp2;
int r;
/* Read BIOS */
@@ -8623,19 +8623,22 @@ int cik_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_index);
+ ring_cp1 = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ ring_cp2 = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ ring_cp1->ring_obj = NULL;
+ ring_cp2->ring_obj = NULL;
+ ring_cp1->doorbell_index = RADEON_MAX_DOORBELLS;
+ ring_cp2->doorbell_index = RADEON_MAX_DOORBELLS;
+
+ r600_ring_init(rdev, ring_cp1, 1024 * 1024);
+ r = radeon_doorbell_get(rdev, &ring_cp1->doorbell_index);
if (r)
return r;
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_index);
+ r600_ring_init(rdev, ring_cp2, 1024 * 1024);
+ r = radeon_doorbell_get(rdev, &ring_cp2->doorbell_index);
if (r)
- return r;
+ goto out;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring->ring_obj = NULL;
@@ -8653,12 +8656,16 @@ int cik_init(struct radeon_device *rdev)
r = r600_pcie_gart_init(rdev);
if (r)
- return r;
+ goto out;
rdev->accel_working = true;
r = cik_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
+ radeon_doorbell_free(rdev, ring_cp1->doorbell_index);
+ radeon_doorbell_free(rdev, ring_cp2->doorbell_index);
+ ring_cp1->doorbell_index = RADEON_MAX_DOORBELLS;
+ ring_cp2->doorbell_index = RADEON_MAX_DOORBELLS;
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
cik_irq_fini(rdev);
@@ -8678,10 +8685,16 @@ int cik_init(struct radeon_device *rdev)
*/
if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
DRM_ERROR("radeon: MC ucode required for NI+.\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
return 0;
+
+out:
+ radeon_doorbell_free(rdev, ring_cp1->doorbell_index);
+ radeon_doorbell_free(rdev, ring_cp2->doorbell_index);
+ return r;
}
/**
@@ -8695,6 +8708,7 @@ int cik_init(struct radeon_device *rdev)
*/
void cik_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring;
radeon_pm_fini(rdev);
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
@@ -8708,6 +8722,10 @@ void cik_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
uvd_v1_0_fini(rdev);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ radeon_doorbell_free(rdev, ring->doorbell_index);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ radeon_doorbell_free(rdev, ring->doorbell_index);
radeon_uvd_fini(rdev);
radeon_vce_fini(rdev);
cik_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 661f374f5f27..9758f3a9df75 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -290,28 +290,6 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
return result;
}
-/*
- * write the audio workaround status to the hardware
- */
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset = dig->afmt->offset;
- bool hdmi_audio_workaround = false; /* FIXME */
- u32 value;
-
- if (!hdmi_audio_workaround ||
- r600_hdmi_is_audio_buffer_filled(encoder))
- value = 0; /* disable workaround */
- else
- value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
- WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
- value, ~HDMI0_AUDIO_TEST_EN);
-}
-
void r600_hdmi_audio_set_dto(struct radeon_device *rdev,
struct radeon_crtc *crtc, unsigned int clock)
{
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8605c074d9f7..63c47585afbc 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -394,9 +394,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, l
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_any(struct radeon_device *rdev,
- struct radeon_fence **fences,
- bool intr);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 8f5e07834fcc..9e697f10f9ca 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -401,7 +401,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
size_t size);
void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock);
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
u32 r600_get_xclk(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 64b26bfeafc9..b8e6202f1d5b 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -409,7 +409,6 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
* radeon_cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
* @error: error number
- * @backoff: indicator to backoff the reservation
*
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 8ff4f18b51a9..5b5b54e876d4 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -575,48 +575,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
}
/**
- * radeon_fence_wait_any - wait for a fence to signal on any ring
- *
- * @rdev: radeon device pointer
- * @fences: radeon fence object(s)
- * @intr: use interruptable sleep
- *
- * Wait for any requested fence to signal (all asics). Fence
- * array is indexed by ring id. @intr selects whether to use
- * interruptable (true) or non-interruptable (false) sleep when
- * waiting for the fences. Used by the suballocator.
- * Returns 0 if any fence has passed, error for all other cases.
- */
-int radeon_fence_wait_any(struct radeon_device *rdev,
- struct radeon_fence **fences,
- bool intr)
-{
- uint64_t seq[RADEON_NUM_RINGS];
- unsigned int i, num_rings = 0;
- long r;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- seq[i] = 0;
-
- if (!fences[i])
- continue;
-
- seq[i] = fences[i]->seq;
- ++num_rings;
- }
-
- /* nothing to wait for ? */
- if (num_rings == 0)
- return -ENOENT;
-
- r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- return r;
-
- return 0;
-}
-
-/**
* radeon_fence_wait_next - wait for the next fence to signal
*
* @rdev: radeon device pointer
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 65a911ddd509..f9267b026f8d 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1109,7 +1109,7 @@
#define MIN_POWER_SHIFT 0
#define MAX_POWER(x) ((x) << 16)
#define MAX_POWER_MASK (0x3fff << 16)
-#define MAX_POWER_SHIFT 0
+#define MAX_POWER_SHIFT 16
#define SQ_POWER_THROTTLE2 0x8e5c
#define MAX_POWER_DELTA(x) ((x) << 0)
#define MAX_POWER_DELTA_MASK (0x3fff << 0)
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
index 79b67c406bd6..93ba115d654f 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
@@ -32,11 +32,6 @@ struct rcar_cmm {
} lut;
};
-static inline int rcar_cmm_read(struct rcar_cmm *rcmm, u32 reg)
-{
- return ioread32(rcmm->base + reg);
-}
-
static inline void rcar_cmm_write(struct rcar_cmm *rcmm, u32 reg, u32 data)
{
iowrite32(data, rcmm->base + reg);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
index 70d8ad065bfa..4c8fe83dd610 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
@@ -705,7 +705,7 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name,
cells, i, &args);
if (ret < 0)
- goto error;
+ goto done;
/*
* Add the VSP to the list or update the corresponding existing
@@ -743,13 +743,11 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
vsp->dev = rcdu;
ret = rcar_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask);
- if (ret < 0)
- goto error;
+ if (ret)
+ goto done;
}
- return 0;
-
-error:
+done:
for (i = 0; i < ARRAY_SIZE(vsps); ++i)
of_node_put(vsps[i].np);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index 380a855b832a..a9145253294f 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -634,6 +634,7 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
}
static int rcar_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -641,7 +642,7 @@ static int rcar_lvds_attach(struct drm_bridge *bridge,
if (!lvds->next_bridge)
return 0;
- return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge,
+ return drm_bridge_attach(encoder, lvds->next_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index d1e626068065..7ab8be46c7f6 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -799,11 +799,12 @@ static void rcar_mipi_dsi_stop_video(struct rcar_mipi_dsi *dsi)
*/
static int rcar_mipi_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
index 7c1817240846..e57536fd6f4d 100644
--- a/drivers/gpu/drm/renesas/rz-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -14,10 +14,15 @@ config DRM_RZG2L_DU
Choose this option if you have an RZ/G2L alike chipset.
If M is selected the module will be called rzg2l-du-drm.
-config DRM_RZG2L_MIPI_DSI
- tristate "RZ/G2L MIPI DSI Encoder Support"
- depends on DRM && DRM_BRIDGE && OF
- depends on ARCH_RENESAS || COMPILE_TEST
- select DRM_MIPI_DSI
+config DRM_RZG2L_USE_MIPI_DSI
+ bool "RZ/G2L MIPI DSI Encoder Support"
+ depends on DRM_BRIDGE && OF
+ depends on DRM_RZG2L_DU || COMPILE_TEST
+ default DRM_RZG2L_DU
help
Enable support for the RZ/G2L Display Unit embedded MIPI DSI encoders.
+
+config DRM_RZG2L_MIPI_DSI
+ def_tristate DRM_RZG2L_DU
+ depends on DRM_RZG2L_USE_MIPI_DSI
+ select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index cbd9b9841267..5e40f0c1e7b0 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -79,7 +79,7 @@ DEFINE_DRM_GEM_DMA_FOPS(rzg2l_du_fops);
static const struct drm_driver rzg2l_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .dumb_create = rzg2l_du_dumb_create,
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(rzg2l_du_dumb_create),
DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &rzg2l_du_fops,
.name = "rzg2l-du",
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
index 90c6269ccd29..55a97691e9b2 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -36,23 +36,129 @@
static const struct rzg2l_du_format_info rzg2l_du_format_infos[] = {
{
- .fourcc = DRM_FORMAT_XRGB8888,
- .v4l2 = V4L2_PIX_FMT_XBGR32,
- .bpp = 32,
+ .fourcc = DRM_FORMAT_RGB332,
+ .v4l2 = V4L2_PIX_FMT_RGB332,
.planes = 1,
.hsub = 1,
}, {
- .fourcc = DRM_FORMAT_ARGB8888,
- .v4l2 = V4L2_PIX_FMT_ABGR32,
- .bpp = 32,
+ .fourcc = DRM_FORMAT_ARGB4444,
+ .v4l2 = V4L2_PIX_FMT_ARGB444,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB4444,
+ .v4l2 = V4L2_PIX_FMT_XRGB444,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB1555,
+ .v4l2 = V4L2_PIX_FMT_ARGB555,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB1555,
+ .v4l2 = V4L2_PIX_FMT_XRGB555,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_RGB565,
+ .v4l2 = V4L2_PIX_FMT_RGB565,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGR888,
+ .v4l2 = V4L2_PIX_FMT_RGB24,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGB888,
.v4l2 = V4L2_PIX_FMT_BGR24,
- .bpp = 24,
.planes = 1,
.hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRA8888,
+ .v4l2 = V4L2_PIX_FMT_ARGB32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRX8888,
+ .v4l2 = V4L2_PIX_FMT_XRGB32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB8888,
+ .v4l2 = V4L2_PIX_FMT_ABGR32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB8888,
+ .v4l2 = V4L2_PIX_FMT_XBGR32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_UYVY,
+ .v4l2 = V4L2_PIX_FMT_UYVY,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUYV,
+ .v4l2 = V4L2_PIX_FMT_YUYV,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVYU,
+ .v4l2 = V4L2_PIX_FMT_YVYU,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV12,
+ .v4l2 = V4L2_PIX_FMT_NV12M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV21,
+ .v4l2 = V4L2_PIX_FMT_NV21M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV16,
+ .v4l2 = V4L2_PIX_FMT_NV16M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV61,
+ .v4l2 = V4L2_PIX_FMT_NV61M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV420,
+ .v4l2 = V4L2_PIX_FMT_YUV420M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVU420,
+ .v4l2 = V4L2_PIX_FMT_YVU420M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV422,
+ .v4l2 = V4L2_PIX_FMT_YUV422M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVU422,
+ .v4l2 = V4L2_PIX_FMT_YVU422M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV444,
+ .v4l2 = V4L2_PIX_FMT_YUV444M,
+ .planes = 3,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_YVU444,
+ .v4l2 = V4L2_PIX_FMT_YVU444M,
+ .planes = 3,
+ .hsub = 1,
}
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
index 876e97cfbf45..e2c599f115c6 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
@@ -23,7 +23,6 @@ struct sg_table;
struct rzg2l_du_format_info {
u32 fourcc;
u32 v4l2;
- unsigned int bpp;
unsigned int planes;
unsigned int hsub;
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
index 8643ff2eec46..040d4e4aff00 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
@@ -340,6 +340,15 @@ int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np,
drm_plane_helper_add(&plane->plane,
&rzg2l_du_vsp_plane_helper_funcs);
+
+ drm_plane_create_alpha_property(&plane->plane);
+ drm_plane_create_zpos_property(&plane->plane, i, 0,
+ num_planes - 1);
+
+ drm_plane_create_blend_mode_property(&plane->plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
}
return 0;
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index 4550c6d84796..dc6ab012cdb6 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -479,7 +479,7 @@ static int rzg2l_mipi_dsi_start_video(struct rzg2l_mipi_dsi *dsi)
u32 status;
int ret;
- /* Configuration for Blanking sequence and start video input*/
+ /* Configuration for Blanking sequence and start video input */
vich1set0r = VICH1SET0R_HFPNOLP | VICH1SET0R_HBPNOLP |
VICH1SET0R_HSANOLP | VICH1SET0R_VSTART;
rzg2l_mipi_dsi_link_write(dsi, VICH1SET0R, vich1set0r);
@@ -523,11 +523,12 @@ err:
*/
static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 26c4410b2407..ab525668939a 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -2,12 +2,14 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
+ depends on OF
select DRM_CLIENT_SELECTION
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
+ select DRM_DISPLAY_DP_AUX_BUS if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index a8265a1bf9ff..d30f0983a53a 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -21,6 +21,7 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -51,11 +52,13 @@ struct rockchip_grf_reg_field {
/**
* struct rockchip_dp_chip_data - splite the grf setting of kind of chips
* @lcdc_sel: grf register field of lcdc_sel
+ * @edp_mode: grf register field of edp_mode
* @chip_type: specific chip type
* @reg: register base address
*/
struct rockchip_dp_chip_data {
const struct rockchip_grf_reg_field lcdc_sel;
+ const struct rockchip_grf_reg_field edp_mode;
u32 chip_type;
u32 reg;
};
@@ -70,6 +73,7 @@ struct rockchip_dp_device {
struct clk *grfclk;
struct regmap *grf;
struct reset_control *rst;
+ struct reset_control *apbrst;
const struct rockchip_dp_chip_data *data;
@@ -115,6 +119,10 @@ static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
usleep_range(10, 20);
reset_control_deassert(dp->rst);
+ reset_control_assert(dp->apbrst);
+ usleep_range(10, 20);
+ reset_control_deassert(dp->apbrst);
+
return 0;
}
@@ -136,12 +144,21 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
return ret;
}
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->edp_mode, 1);
+ if (ret != 0)
+ DRM_DEV_ERROR(dp->dev, "failed to set edp mode %d\n", ret);
+
return ret;
}
static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
{
struct rockchip_dp_device *dp = pdata_encoder_to_dp(plat_data);
+ int ret;
+
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->edp_mode, 0);
+ if (ret != 0)
+ DRM_DEV_ERROR(dp->dev, "failed to set edp mode %d\n", ret);
clk_disable_unprepare(dp->pclk);
@@ -205,6 +222,10 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
struct rockchip_dp_device *dp = encoder_to_dp(encoder);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
+ struct of_endpoint endpoint;
+ struct device_node *remote_port, *remote_port_parent;
+ char name[32];
+ u32 port_id;
int ret;
crtc = rockchip_dp_drm_get_new_crtc(encoder, state);
@@ -222,13 +243,27 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
return;
}
- ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
+ ret = drm_of_encoder_active_endpoint(dp->dev->of_node, encoder, &endpoint);
if (ret < 0)
return;
- DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+ remote_port_parent = of_graph_get_remote_port_parent(endpoint.local_node);
+ if (remote_port_parent) {
+ if (of_get_child_by_name(remote_port_parent, "ports")) {
+ remote_port = of_graph_get_remote_port(endpoint.local_node);
+ of_property_read_u32(remote_port, "reg", &port_id);
+ of_node_put(remote_port);
+ sprintf(name, "%s vp%d", remote_port_parent->full_name, port_id);
+ } else {
+ sprintf(name, "%s %s",
+ remote_port_parent->full_name, endpoint.id ? "vopl" : "vopb");
+ }
+ of_node_put(remote_port_parent);
+
+ DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+ }
- ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, ret);
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, endpoint.id);
if (ret != 0)
DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
@@ -322,6 +357,12 @@ static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
return PTR_ERR(dp->rst);
}
+ dp->apbrst = devm_reset_control_get_optional(dev, "apb");
+ if (IS_ERR(dp->apbrst)) {
+ DRM_DEV_ERROR(dev, "failed to get apb reset control\n");
+ return PTR_ERR(dp->apbrst);
+ }
+
return 0;
}
@@ -392,11 +433,28 @@ static const struct component_ops rockchip_dp_component_ops = {
.unbind = rockchip_dp_unbind,
};
+static int rockchip_dp_link_panel(struct drm_dp_aux *aux)
+{
+ struct analogix_dp_plat_data *plat_data = analogix_dp_aux_to_plat_data(aux);
+ struct rockchip_dp_device *dp = pdata_encoder_to_dp(plat_data);
+ int ret;
+
+ /*
+ * If drm_of_find_panel_or_bridge() returns -ENODEV, there may be no valid panel
+ * or bridge nodes. The driver should go on for the driver-free bridge or the DP
+ * mode applications.
+ */
+ ret = drm_of_find_panel_or_bridge(dp->dev->of_node, 1, 0, &plat_data->panel, NULL);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ return component_add(dp->dev, &rockchip_dp_component_ops);
+}
+
static int rockchip_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct rockchip_dp_chip_data *dp_data;
- struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
struct resource *res;
int i;
@@ -406,10 +464,6 @@ static int rockchip_dp_probe(struct platform_device *pdev)
if (!dp_data)
return -ENODEV;
- ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
- if (ret < 0 && ret != -ENODEV)
- return ret;
-
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
@@ -432,7 +486,6 @@ static int rockchip_dp_probe(struct platform_device *pdev)
dp->dev = dev;
dp->adp = ERR_PTR(-ENODEV);
- dp->plat_data.panel = panel;
dp->plat_data.dev_type = dp->data->chip_type;
dp->plat_data.power_on = rockchip_dp_poweron;
dp->plat_data.power_off = rockchip_dp_powerdown;
@@ -448,9 +501,20 @@ static int rockchip_dp_probe(struct platform_device *pdev)
if (IS_ERR(dp->adp))
return PTR_ERR(dp->adp);
- ret = component_add(dev, &rockchip_dp_component_ops);
- if (ret)
- return ret;
+ ret = devm_of_dp_aux_populate_bus(analogix_dp_get_aux(dp->adp), rockchip_dp_link_panel);
+ if (ret) {
+ /*
+ * If devm_of_dp_aux_populate_bus() returns -ENODEV, the done_probing() will not
+ * be called because there are no EP devices. Then the rockchip_dp_link_panel()
+ * will be called directly in order to support the other valid DT configurations.
+ *
+ * NOTE: The devm_of_dp_aux_populate_bus() is allowed to return -EPROBE_DEFER.
+ */
+ if (ret != -ENODEV)
+ return dev_err_probe(dp->dev, ret, "failed to populate aux bus\n");
+
+ return rockchip_dp_link_panel(analogix_dp_get_aux(dp->adp));
+ }
return 0;
}
@@ -501,9 +565,24 @@ static const struct rockchip_dp_chip_data rk3288_dp[] = {
{ /* sentinel */ }
};
+static const struct rockchip_dp_chip_data rk3588_edp[] = {
+ {
+ .edp_mode = GRF_REG_FIELD(0x0000, 0, 0),
+ .chip_type = RK3588_EDP,
+ .reg = 0xfdec0000,
+ },
+ {
+ .edp_mode = GRF_REG_FIELD(0x0004, 0, 0),
+ .chip_type = RK3588_EDP,
+ .reg = 0xfded0000,
+ },
+ { /* sentinel */ }
+};
+
static const struct of_device_id rockchip_dp_dt_ids[] = {
{.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp },
{.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp },
+ {.compatible = "rockchip,rk3588-edp", .data = &rk3588_edp },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index 3d1dddb34603..7d531b6f4c09 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -94,6 +94,7 @@ struct rockchip_hdmi_qp {
struct gpio_desc *enable_gpio;
struct delayed_work hpd_work;
int port_id;
+ const struct rockchip_hdmi_qp_ctrl_ops *ctrl_ops;
};
struct rockchip_hdmi_qp_ctrl_ops {
@@ -461,6 +462,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
return -ENODEV;
}
+ hdmi->ctrl_ops = cfg->ctrl_ops;
hdmi->dev = &pdev->dev;
hdmi->port_id = -ENODEV;
@@ -600,27 +602,8 @@ static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev)
static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
{
struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
- u32 val;
- val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
- HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
- HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
- HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
- regmap_write(hdmi->vo_regmap,
- hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
- val);
-
- val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
- RK3588_SET_HPD_PATH_MASK);
- regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
-
- if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
- RK3588_HDMI1_GRANT_SEL);
- else
- val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
- RK3588_HDMI0_GRANT_SEL);
- regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
+ hdmi->ctrl_ops->io_init(hdmi);
dw_hdmi_qp_resume(dev, hdmi->hdmi);
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 483ecfeaebb0..db4b4038e51d 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -10,10 +10,12 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hdmi.h>
+#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -29,8 +31,19 @@
#include "inno_hdmi.h"
+#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
+
#define INNO_HDMI_MIN_TMDS_CLOCK 25000000U
+#define RK3036_GRF_SOC_CON2 0x148
+#define RK3036_HDMI_PHSYNC BIT(4)
+#define RK3036_HDMI_PVSYNC BIT(5)
+
+enum inno_hdmi_dev_type {
+ RK3036_HDMI,
+ RK3128_HDMI,
+};
+
struct inno_hdmi_phy_config {
unsigned long pixelclock;
u8 pre_emphasis;
@@ -38,6 +51,7 @@ struct inno_hdmi_phy_config {
};
struct inno_hdmi_variant {
+ enum inno_hdmi_dev_type dev_type;
struct inno_hdmi_phy_config *phy_configs;
struct inno_hdmi_phy_config *default_phy_config;
};
@@ -58,6 +72,7 @@ struct inno_hdmi {
struct clk *pclk;
struct clk *refclk;
void __iomem *regs;
+ struct regmap *grf;
struct drm_connector connector;
struct rockchip_encoder encoder;
@@ -374,7 +389,15 @@ static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi)
static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
struct drm_display_mode *mode)
{
- int value;
+ int value, psync;
+
+ if (hdmi->variant->dev_type == RK3036_HDMI) {
+ psync = mode->flags & DRM_MODE_FLAG_PHSYNC ? RK3036_HDMI_PHSYNC : 0;
+ value = HIWORD_UPDATE(psync, RK3036_HDMI_PHSYNC);
+ psync = mode->flags & DRM_MODE_FLAG_PVSYNC ? RK3036_HDMI_PVSYNC : 0;
+ value |= HIWORD_UPDATE(psync, RK3036_HDMI_PVSYNC);
+ regmap_write(hdmi->grf, RK3036_GRF_SOC_CON2, value);
+ }
/* Set detail external video timing polarity and interlace mode */
value = v_EXTERANL_VIDEO(1);
@@ -885,32 +908,34 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(hdmi->regs);
hdmi->pclk = devm_clk_get(hdmi->dev, "pclk");
- if (IS_ERR(hdmi->pclk)) {
- DRM_DEV_ERROR(hdmi->dev, "Unable to get HDMI pclk clk\n");
- return PTR_ERR(hdmi->pclk);
- }
+ if (IS_ERR(hdmi->pclk))
+ return dev_err_probe(dev, PTR_ERR(hdmi->pclk), "Unable to get HDMI pclk\n");
ret = clk_prepare_enable(hdmi->pclk);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev,
- "Cannot enable HDMI pclk clock: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable HDMI pclk: %d\n", ret);
hdmi->refclk = devm_clk_get_optional(hdmi->dev, "ref");
if (IS_ERR(hdmi->refclk)) {
- DRM_DEV_ERROR(hdmi->dev, "Unable to get HDMI reference clock\n");
- ret = PTR_ERR(hdmi->refclk);
+ ret = dev_err_probe(dev, PTR_ERR(hdmi->refclk), "Unable to get HDMI refclk\n");
goto err_disable_pclk;
}
ret = clk_prepare_enable(hdmi->refclk);
if (ret) {
- DRM_DEV_ERROR(hdmi->dev,
- "Cannot enable HDMI reference clock: %d\n", ret);
+ ret = dev_err_probe(dev, ret, "Cannot enable HDMI refclk: %d\n", ret);
goto err_disable_pclk;
}
+ if (hdmi->variant->dev_type == RK3036_HDMI) {
+ hdmi->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (IS_ERR(hdmi->grf)) {
+ ret = dev_err_probe(dev, PTR_ERR(hdmi->grf),
+ "Unable to get rockchip,grf\n");
+ goto err_disable_clk;
+ }
+ }
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
@@ -995,11 +1020,13 @@ static void inno_hdmi_remove(struct platform_device *pdev)
}
static const struct inno_hdmi_variant rk3036_inno_hdmi_variant = {
+ .dev_type = RK3036_HDMI,
.phy_configs = rk3036_hdmi_phy_configs,
.default_phy_config = &rk3036_hdmi_phy_configs[1],
};
static const struct inno_hdmi_variant rk3128_inno_hdmi_variant = {
+ .dev_type = RK3128_HDMI,
.phy_configs = rk3128_hdmi_phy_configs,
.default_phy_config = &rk3128_hdmi_phy_configs[1],
};
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index f7a460190313..e7875b52f298 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -5,6 +5,9 @@
*/
#include <drm/drm_atomic.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -46,27 +49,20 @@ struct rk3066_hdmi {
struct clk *hclk;
void __iomem *regs;
- struct drm_connector connector;
+ struct drm_bridge bridge;
+ struct drm_connector *connector;
struct rockchip_encoder encoder;
struct rk3066_hdmi_i2c *i2c;
- struct i2c_adapter *ddc;
unsigned int tmdsclk;
struct hdmi_data_info hdmi_data;
};
-static struct rk3066_hdmi *encoder_to_rk3066_hdmi(struct drm_encoder *encoder)
+static struct rk3066_hdmi *bridge_to_rk3066_hdmi(struct drm_bridge *bridge)
{
- struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
-
- return container_of(rkencoder, struct rk3066_hdmi, encoder);
-}
-
-static struct rk3066_hdmi *connector_to_rk3066_hdmi(struct drm_connector *connector)
-{
- return container_of(connector, struct rk3066_hdmi, connector);
+ return container_of(bridge, struct rk3066_hdmi, bridge);
}
static inline u8 hdmi_readb(struct rk3066_hdmi *hdmi, u16 offset)
@@ -161,57 +157,40 @@ static void rk3066_hdmi_set_power_mode(struct rk3066_hdmi *hdmi, int mode)
hdmi->tmdsclk = DEFAULT_PLLA_RATE;
}
-static int
-rk3066_hdmi_upload_frame(struct rk3066_hdmi *hdmi, int setup_rc,
- union hdmi_infoframe *frame, u32 frame_index,
- u32 mask, u32 disable, u32 enable)
+static int rk3066_hdmi_bridge_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
{
- if (mask)
- hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, disable);
-
- hdmi_writeb(hdmi, HDMI_CP_BUF_INDEX, frame_index);
-
- if (setup_rc >= 0) {
- u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
- ssize_t rc, i;
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
- rc = hdmi_infoframe_pack(frame, packed_frame,
- sizeof(packed_frame));
- if (rc < 0)
- return rc;
-
- for (i = 0; i < rc; i++)
- hdmi_writeb(hdmi, HDMI_CP_BUF_ACC_HB0 + i * 4,
- packed_frame[i]);
-
- if (mask)
- hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, enable);
+ if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ drm_err(bridge->dev, "Unsupported infoframe type: %u\n", type);
+ return 0;
}
- return setup_rc;
+ hdmi_writeb(hdmi, HDMI_CP_BUF_INDEX, HDMI_INFOFRAME_AVI);
+
+ return 0;
}
-static int rk3066_hdmi_config_avi(struct rk3066_hdmi *hdmi,
- struct drm_display_mode *mode)
+static int
+rk3066_hdmi_bridge_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
{
- union hdmi_infoframe frame;
- int rc;
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
+ ssize_t i;
- rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- &hdmi->connector, mode);
+ if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ drm_err(bridge->dev, "Unsupported infoframe type: %u\n", type);
+ return 0;
+ }
- if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
- else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV422;
- else
- frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+ rk3066_hdmi_bridge_clear_infoframe(bridge, type);
- frame.avi.colorimetry = hdmi->hdmi_data.colorimetry;
- frame.avi.scan_mode = HDMI_SCAN_MODE_NONE;
+ for (i = 0; i < len; i++)
+ hdmi_writeb(hdmi, HDMI_CP_BUF_ACC_HB0 + i * 4, buffer[i]);
- return rk3066_hdmi_upload_frame(hdmi, rc, &frame,
- HDMI_INFOFRAME_AVI, 0, 0, 0);
+ return 0;
}
static int rk3066_hdmi_config_video_timing(struct rk3066_hdmi *hdmi,
@@ -324,9 +303,27 @@ static void rk3066_hdmi_config_phy(struct rk3066_hdmi *hdmi)
}
static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
- struct drm_display_mode *mode)
+ struct drm_atomic_state *state)
{
- struct drm_display_info *display = &hdmi->connector.display_info;
+ struct drm_bridge *bridge = &hdmi->bridge;
+ struct drm_connector *connector;
+ struct drm_display_info *display;
+ struct drm_display_mode *mode;
+ struct drm_connector_state *new_conn_state;
+ struct drm_crtc_state *new_crtc_state;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+
+ new_conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!new_conn_state))
+ return -EINVAL;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+ if (WARN_ON(!new_crtc_state))
+ return -EINVAL;
+
+ display = &connector->display_info;
+ mode = &new_crtc_state->adjusted_mode;
hdmi->hdmi_data.vic = drm_match_cea_mode(mode);
hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB;
@@ -363,7 +360,7 @@ static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
if (display->is_hdmi) {
hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK,
HDMI_VIDEO_MODE_HDMI);
- rk3066_hdmi_config_avi(hdmi, mode);
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
} else {
hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK, 0);
}
@@ -386,15 +383,15 @@ static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
return 0;
}
-static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void rk3066_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
- struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
int mux, val;
- conn_state = drm_atomic_get_new_connector_state(state, &hdmi->connector);
+ conn_state = drm_atomic_get_new_connector_state(state, hdmi->connector);
if (WARN_ON(!conn_state))
return;
@@ -402,7 +399,7 @@ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
if (WARN_ON(!crtc_state))
return;
- mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
+ mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, &hdmi->encoder.encoder);
if (mux)
val = (HDMI_VIDEO_SEL << 16) | HDMI_VIDEO_SEL;
else
@@ -413,13 +410,13 @@ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder enable select: vop%s\n",
(mux) ? "1" : "0");
- rk3066_hdmi_setup(hdmi, &crtc_state->adjusted_mode);
+ rk3066_hdmi_setup(hdmi, state);
}
-static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void rk3066_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
- struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder disable\n");
@@ -450,39 +447,34 @@ rk3066_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
static const
struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
.atomic_check = rk3066_hdmi_encoder_atomic_check,
- .atomic_enable = rk3066_hdmi_encoder_enable,
- .atomic_disable = rk3066_hdmi_encoder_disable,
};
static enum drm_connector_status
-rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
+rk3066_hdmi_bridge_detect(struct drm_bridge *bridge)
{
- struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
return (hdmi_readb(hdmi, HDMI_HPG_MENS_STA) & HDMI_HPG_IN_STATUS_HIGH) ?
connector_status_connected : connector_status_disconnected;
}
-static int rk3066_hdmi_connector_get_modes(struct drm_connector *connector)
+static const struct drm_edid *
+rk3066_hdmi_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector)
{
- struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
const struct drm_edid *drm_edid;
- int ret = 0;
-
- if (!hdmi->ddc)
- return 0;
- drm_edid = drm_edid_read_ddc(connector, hdmi->ddc);
- drm_edid_connector_update(connector, drm_edid);
- ret = drm_edid_connector_add_modes(connector);
- drm_edid_free(drm_edid);
+ drm_edid = drm_edid_read_ddc(connector, bridge->ddc);
+ if (!drm_edid)
+ dev_dbg(hdmi->dev, "failed to get edid\n");
- return ret;
+ return drm_edid;
}
static enum drm_mode_status
-rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
+rk3066_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
{
u32 vic = drm_match_cea_mode(mode);
@@ -492,82 +484,19 @@ rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
return MODE_BAD;
}
-static struct drm_encoder *
-rk3066_hdmi_connector_best_encoder(struct drm_connector *connector)
-{
- struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
-
- return &hdmi->encoder.encoder;
-}
-
-static int
-rk3066_hdmi_probe_single_connector_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
-{
- if (maxX > 1920)
- maxX = 1920;
- if (maxY > 1080)
- maxY = 1080;
-
- return drm_helper_probe_single_connector_modes(connector, maxX, maxY);
-}
-
-static void rk3066_hdmi_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs rk3066_hdmi_connector_funcs = {
- .fill_modes = rk3066_hdmi_probe_single_connector_modes,
- .detect = rk3066_hdmi_connector_detect,
- .destroy = rk3066_hdmi_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const
-struct drm_connector_helper_funcs rk3066_hdmi_connector_helper_funcs = {
- .get_modes = rk3066_hdmi_connector_get_modes,
- .mode_valid = rk3066_hdmi_connector_mode_valid,
- .best_encoder = rk3066_hdmi_connector_best_encoder,
+static const struct drm_bridge_funcs rk3066_hdmi_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = rk3066_hdmi_bridge_atomic_enable,
+ .atomic_disable = rk3066_hdmi_bridge_atomic_disable,
+ .detect = rk3066_hdmi_bridge_detect,
+ .edid_read = rk3066_hdmi_bridge_edid_read,
+ .hdmi_clear_infoframe = rk3066_hdmi_bridge_clear_infoframe,
+ .hdmi_write_infoframe = rk3066_hdmi_bridge_write_infoframe,
+ .mode_valid = rk3066_hdmi_bridge_mode_valid,
};
-static int
-rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
-{
- struct drm_encoder *encoder = &hdmi->encoder.encoder;
- struct device *dev = hdmi->dev;
-
- encoder->possible_crtcs =
- drm_of_find_possible_crtcs(drm, dev->of_node);
-
- /*
- * If we failed to find the CRTC(s) which this encoder is
- * supposed to be connected to, it's because the CRTC has
- * not been registered yet. Defer probing, and hope that
- * the required CRTC is added later.
- */
- if (encoder->possible_crtcs == 0)
- return -EPROBE_DEFER;
-
- drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
-
- hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
- drm_connector_helper_add(&hdmi->connector,
- &rk3066_hdmi_connector_helper_funcs);
- drm_connector_init_with_ddc(drm, &hdmi->connector,
- &rk3066_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- hdmi->ddc);
-
- drm_connector_attach_encoder(&hdmi->connector, encoder);
-
- return 0;
-}
static irqreturn_t rk3066_hdmi_hardirq(int irq, void *dev_id)
{
@@ -597,7 +526,7 @@ static irqreturn_t rk3066_hdmi_irq(int irq, void *dev_id)
{
struct rk3066_hdmi *hdmi = dev_id;
- drm_helper_hpd_irq_event(hdmi->connector.dev);
+ drm_helper_hpd_irq_event(hdmi->connector->dev);
return IRQ_HANDLED;
}
@@ -720,7 +649,7 @@ static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
strscpy(adap->name, "RK3066 HDMI", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
- ret = i2c_add_adapter(adap);
+ ret = devm_i2c_add_adapter(hdmi->dev, adap);
if (ret) {
DRM_DEV_ERROR(hdmi->dev, "cannot add %s I2C adapter\n",
adap->name);
@@ -735,6 +664,66 @@ static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
return adap;
}
+static int
+rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
+{
+ struct drm_encoder *encoder = &hdmi->encoder.encoder;
+ struct device *dev = hdmi->dev;
+ int ret;
+
+ encoder->possible_crtcs =
+ drm_of_find_possible_crtcs(drm, dev->of_node);
+
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (encoder->possible_crtcs == 0)
+ return -EPROBE_DEFER;
+
+ drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+
+ hdmi->bridge.driver_private = hdmi;
+ hdmi->bridge.funcs = &rk3066_hdmi_bridge_funcs;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HPD;
+ hdmi->bridge.of_node = hdmi->dev->of_node;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ hdmi->bridge.vendor = "Rockchip";
+ hdmi->bridge.product = "RK3066 HDMI";
+
+ hdmi->bridge.ddc = rk3066_hdmi_i2c_adapter(hdmi);
+ if (IS_ERR(hdmi->bridge.ddc))
+ return PTR_ERR(hdmi->bridge.ddc);
+
+ if (IS_ERR(hdmi->bridge.ddc))
+ return PTR_ERR(hdmi->bridge.ddc);
+
+ ret = devm_drm_bridge_add(dev, &hdmi->bridge);
+ if (ret)
+ return ret;
+
+ ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ret;
+
+ hdmi->connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(hdmi->connector)) {
+ ret = PTR_ERR(hdmi->connector);
+ dev_err(hdmi->dev, "failed to init bridge connector: %d\n", ret);
+ return ret;
+ }
+
+ drm_connector_attach_encoder(hdmi->connector, encoder);
+
+ return 0;
+}
+
static int rk3066_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -781,13 +770,6 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
/* internal hclk = hdmi_hclk / 25 */
hdmi_writeb(hdmi, HDMI_INTERNAL_CLK_DIVIDER, 25);
- hdmi->ddc = rk3066_hdmi_i2c_adapter(hdmi);
- if (IS_ERR(hdmi->ddc)) {
- ret = PTR_ERR(hdmi->ddc);
- hdmi->ddc = NULL;
- goto err_disable_hclk;
- }
-
rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B);
usleep_range(999, 1000);
hdmi_writeb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_HOTPLUG);
@@ -798,7 +780,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
ret = rk3066_hdmi_register(drm, hdmi);
if (ret)
- goto err_disable_i2c;
+ goto err_disable_hclk;
dev_set_drvdata(dev, hdmi);
@@ -813,10 +795,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
return 0;
err_cleanup_hdmi:
- hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
-err_disable_i2c:
- i2c_put_adapter(hdmi->ddc);
err_disable_hclk:
clk_disable_unprepare(hdmi->hclk);
@@ -828,10 +807,8 @@ static void rk3066_hdmi_unbind(struct device *dev, struct device *master,
{
struct rk3066_hdmi *hdmi = dev_get_drvdata(dev);
- hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
- i2c_put_adapter(hdmi->ddc);
clk_disable_unprepare(hdmi->hclk);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index e3596e2b557d..ba6b0528d1e5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -733,11 +733,10 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
WARN_ON(vop->event);
- if (crtc->state->self_refresh_active)
+ if (crtc->state->self_refresh_active) {
rockchip_drm_set_win_enabled(crtc, false);
-
- if (crtc->state->self_refresh_active)
goto out;
+ }
mutex_lock(&vop->vop_lock);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index 680bedbb770e..fc3ecb9fcd95 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -710,6 +710,7 @@ enum dst_factor_mode {
#define VOP2_COLOR_KEY_MASK BIT(31)
+#define RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL GENMASK(31, 30)
#define RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD BIT(28)
#define RK3568_OVL_CTRL__YUV_MODE(vp) BIT(vp)
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 14958d6b3d2e..32c4ed685739 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -1754,9 +1754,9 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_DP1:
- die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_DP1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_DP1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP1_MUX, vp->id);
dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL;
dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags);
break;
@@ -2070,7 +2070,10 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL);
- ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
+ ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
+ ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL;
+ ovl_ctrl |= FIELD_PREP(RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL, vp->id);
+
if (vcstate->yuv_overlay)
ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id);
else
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 4e2099d86517..d1f788763318 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -906,21 +906,21 @@ static const struct vop_data rk3366_vop = {
static const struct vop_output rk3399_output = {
.dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
- .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
- .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
- .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
- .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .rgb_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 31),
.dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
+ .rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 28),
.dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
- .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
- .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
- .edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
- .mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
- .mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
+ .rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12),
+ .hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13),
+ .edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14),
+ .mipi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 15),
+ .mipi_dual_channel_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 3),
};
static const struct vop_common rk3399_common = {
@@ -975,23 +975,23 @@ static const struct vop_win_phy rk3399_win0_data = {
.data_formats = formats_win_full_10,
.nformats = ARRAY_SIZE(formats_win_full_10),
.format_modifiers = format_modifiers_win_full_afbc,
- .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
- .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
- .fmt_10 = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 4),
- .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
- .uv_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 15),
- .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
- .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
- .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
- .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
- .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
- .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
- .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
- .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
- .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
- .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
- .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
- .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
+ .enable = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(RK3399_WIN0_CTRL0, 0x7, 1),
+ .fmt_10 = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 4),
+ .rb_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 12),
+ .uv_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 15),
+ .x_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 21),
+ .y_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 22),
+ .act_info = VOP_REG(RK3399_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3399_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3399_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3399_WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3399_WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(RK3399_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(RK3399_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3399_WIN0_CTRL2, 0xff, 0),
};
static const struct vop_win_phy rk3399_win1_data = {
@@ -999,23 +999,23 @@ static const struct vop_win_phy rk3399_win1_data = {
.data_formats = formats_win_full_10,
.nformats = ARRAY_SIZE(formats_win_full_10),
.format_modifiers = format_modifiers_win_full,
- .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
- .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
- .fmt_10 = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 4),
- .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
- .uv_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 15),
- .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
- .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
- .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
- .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
- .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
- .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
- .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
- .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
- .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
- .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
- .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
- .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
+ .enable = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(RK3399_WIN0_CTRL0, 0x7, 1),
+ .fmt_10 = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 4),
+ .rb_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 12),
+ .uv_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 15),
+ .x_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 21),
+ .y_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 22),
+ .act_info = VOP_REG(RK3399_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3399_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3399_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3399_WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3399_WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(RK3399_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(RK3399_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3399_WIN0_CTRL2, 0xff, 0),
};
/*
diff --git a/drivers/gpu/drm/scheduler/.kunitconfig b/drivers/gpu/drm/scheduler/.kunitconfig
new file mode 100644
index 000000000000..cece53609fcf
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/.kunitconfig
@@ -0,0 +1,12 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_SCHED_KUNIT_TEST=y
+CONFIG_EXPERT=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCKDEP=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_LIST=y
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
index 53863621829f..6e13e4c63e9d 100644
--- a/drivers/gpu/drm/scheduler/Makefile
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -23,3 +23,5 @@
gpu-sched-y := sched_main.o sched_fence.o sched_entity.o
obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
+
+obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index bfea608a7106..829579c41c6b 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -828,11 +828,15 @@ EXPORT_SYMBOL(drm_sched_job_init);
*
* This arms a scheduler job for execution. Specifically it initializes the
* &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
- * or other places that need to track the completion of this job.
+ * or other places that need to track the completion of this job. It also
+ * initializes sequence numbers, which are fundamental for fence ordering.
*
* Refer to drm_sched_entity_push_job() documentation for locking
* considerations.
*
+ * Once this function was called, you *must* submit @job with
+ * drm_sched_entity_push_job().
+ *
* This can only be called if drm_sched_job_init() succeeded.
*/
void drm_sched_job_arm(struct drm_sched_job *job)
@@ -1015,13 +1019,14 @@ EXPORT_SYMBOL(drm_sched_job_has_dependency);
* Cleans up the resources allocated with drm_sched_job_init().
*
* Drivers should call this from their error unwind code if @job is aborted
- * before it was submitted to an entity with drm_sched_entity_push_job().
+ * before drm_sched_job_arm() is called.
*
- * Since calling drm_sched_job_arm() causes the job's fences to be initialized,
- * it is up to the driver to ensure that fences that were exposed to external
- * parties get signaled. drm_sched_job_cleanup() does not ensure this.
+ * drm_sched_job_arm() is a point of no return since it initializes the fences
+ * and their sequence number etc. Once that function has been called, you *must*
+ * submit it with drm_sched_entity_push_job() and cannot simply abort it by
+ * calling drm_sched_job_cleanup().
*
- * This function must also be called in &struct drm_sched_backend_ops.free_job
+ * This function should be called in the &drm_sched_backend_ops.free_job callback.
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
@@ -1029,10 +1034,15 @@ void drm_sched_job_cleanup(struct drm_sched_job *job)
unsigned long index;
if (kref_read(&job->s_fence->finished.refcount)) {
- /* drm_sched_job_arm() has been called */
+ /* The job has been processed by the scheduler, i.e.,
+ * drm_sched_job_arm() and drm_sched_entity_push_job() have
+ * been called.
+ */
dma_fence_put(&job->s_fence->finished);
} else {
- /* aborted job before arming */
+ /* The job was aborted before it has been committed to be run;
+ * notably, drm_sched_job_arm() has not been called.
+ */
drm_sched_fence_free(job->s_fence);
}
@@ -1220,20 +1230,23 @@ static void drm_sched_run_job_work(struct work_struct *w)
drm_sched_job_begin(sched_job);
trace_drm_run_job(sched_job, entity);
+ /*
+ * The run_job() callback must by definition return a fence whose
+ * refcount has been incremented for the scheduler already.
+ */
fence = sched->ops->run_job(sched_job);
complete_all(&entity->entity_idle);
drm_sched_fence_scheduled(s_fence, fence);
if (!IS_ERR_OR_NULL(fence)) {
- /* Drop for original kref_init of the fence */
- dma_fence_put(fence);
-
r = dma_fence_add_callback(fence, &sched_job->cb,
drm_sched_job_done_cb);
if (r == -ENOENT)
drm_sched_job_done(sched_job, fence->error);
else if (r)
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
+
+ dma_fence_put(fence);
} else {
drm_sched_job_done(sched_job, IS_ERR(fence) ?
PTR_ERR(fence) : 0);
diff --git a/drivers/gpu/drm/scheduler/tests/Makefile b/drivers/gpu/drm/scheduler/tests/Makefile
new file mode 100644
index 000000000000..5bf707bad373
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+drm-sched-tests-y := \
+ mock_scheduler.o \
+ tests_basic.o
+
+obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += drm-sched-tests.o
diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
new file mode 100644
index 000000000000..f999c8859cf7
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Valve Corporation */
+
+#include "sched_tests.h"
+
+/*
+ * Here we implement the mock "GPU" (or the scheduler backend) which is used by
+ * the DRM scheduler unit tests in order to exercise the core functionality.
+ *
+ * Test cases are implemented in a separate file.
+ */
+
+/**
+ * drm_mock_sched_entity_new - Create a new mock scheduler entity
+ *
+ * @test: KUnit test owning the entity
+ * @priority: Scheduling priority
+ * @sched: Mock scheduler on which the entity can be scheduled
+ *
+ * Returns: New mock scheduler entity with allocation managed by the test
+ */
+struct drm_mock_sched_entity *
+drm_mock_sched_entity_new(struct kunit *test,
+ enum drm_sched_priority priority,
+ struct drm_mock_scheduler *sched)
+{
+ struct drm_mock_sched_entity *entity;
+ struct drm_gpu_scheduler *drm_sched;
+ int ret;
+
+ entity = kunit_kzalloc(test, sizeof(*entity), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, entity);
+
+ drm_sched = &sched->base;
+ ret = drm_sched_entity_init(&entity->base,
+ priority,
+ &drm_sched, 1,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ entity->test = test;
+
+ return entity;
+}
+
+/**
+ * drm_mock_sched_entity_free - Destroys a mock scheduler entity
+ *
+ * @entity: Entity to destroy
+ *
+ * To be used from the test cases once done with the entity.
+ */
+void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity)
+{
+ drm_sched_entity_destroy(&entity->base);
+}
+
+static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(job->base.sched);
+
+ lockdep_assert_held(&sched->lock);
+
+ job->flags |= DRM_MOCK_SCHED_JOB_DONE;
+ list_move_tail(&job->link, &sched->done_list);
+ dma_fence_signal(&job->hw_fence);
+ complete(&job->done);
+}
+
+static enum hrtimer_restart
+drm_mock_sched_job_signal_timer(struct hrtimer *hrtimer)
+{
+ struct drm_mock_sched_job *job =
+ container_of(hrtimer, typeof(*job), timer);
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(job->base.sched);
+ struct drm_mock_sched_job *next;
+ ktime_t now = ktime_get();
+ unsigned long flags;
+ LIST_HEAD(signal);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &sched->job_list, link) {
+ if (!job->duration_us)
+ break;
+
+ if (ktime_before(now, job->finish_at))
+ break;
+
+ sched->hw_timeline.cur_seqno = job->hw_fence.seqno;
+ drm_mock_sched_job_complete(job);
+ }
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * drm_mock_sched_job_new - Create a new mock scheduler job
+ *
+ * @test: KUnit test owning the job
+ * @entity: Scheduler entity of the job
+ *
+ * Returns: New mock scheduler job with allocation managed by the test
+ */
+struct drm_mock_sched_job *
+drm_mock_sched_job_new(struct kunit *test,
+ struct drm_mock_sched_entity *entity)
+{
+ struct drm_mock_sched_job *job;
+ int ret;
+
+ job = kunit_kzalloc(test, sizeof(*job), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, job);
+
+ ret = drm_sched_job_init(&job->base,
+ &entity->base,
+ 1,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ job->test = test;
+
+ init_completion(&job->done);
+ spin_lock_init(&job->lock);
+ INIT_LIST_HEAD(&job->link);
+ hrtimer_setup(&job->timer, drm_mock_sched_job_signal_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+
+ return job;
+}
+
+static const char *drm_mock_sched_hw_fence_driver_name(struct dma_fence *fence)
+{
+ return "drm_mock_sched";
+}
+
+static const char *
+drm_mock_sched_hw_fence_timeline_name(struct dma_fence *fence)
+{
+ struct drm_mock_sched_job *job =
+ container_of(fence, typeof(*job), hw_fence);
+
+ return (const char *)job->base.sched->name;
+}
+
+static void drm_mock_sched_hw_fence_release(struct dma_fence *fence)
+{
+ struct drm_mock_sched_job *job =
+ container_of(fence, typeof(*job), hw_fence);
+
+ hrtimer_cancel(&job->timer);
+
+ /* Containing job is freed by the kunit framework */
+}
+
+static const struct dma_fence_ops drm_mock_sched_hw_fence_ops = {
+ .get_driver_name = drm_mock_sched_hw_fence_driver_name,
+ .get_timeline_name = drm_mock_sched_hw_fence_timeline_name,
+ .release = drm_mock_sched_hw_fence_release,
+};
+
+static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+
+ dma_fence_init(&job->hw_fence,
+ &drm_mock_sched_hw_fence_ops,
+ &job->lock,
+ sched->hw_timeline.context,
+ atomic_inc_return(&sched->hw_timeline.next_seqno));
+
+ dma_fence_get(&job->hw_fence); /* Reference for the job_list */
+
+ spin_lock_irq(&sched->lock);
+ if (job->duration_us) {
+ ktime_t prev_finish_at = 0;
+
+ if (!list_empty(&sched->job_list)) {
+ struct drm_mock_sched_job *prev =
+ list_last_entry(&sched->job_list, typeof(*prev),
+ link);
+
+ prev_finish_at = prev->finish_at;
+ }
+
+ if (!prev_finish_at)
+ prev_finish_at = ktime_get();
+
+ job->finish_at = ktime_add_us(prev_finish_at, job->duration_us);
+ }
+ list_add_tail(&job->link, &sched->job_list);
+ if (job->finish_at)
+ hrtimer_start(&job->timer, job->finish_at, HRTIMER_MODE_ABS);
+ spin_unlock_irq(&sched->lock);
+
+ return &job->hw_fence;
+}
+
+static enum drm_gpu_sched_stat
+mock_sched_timedout_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+
+ job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT;
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+static void mock_sched_free_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+ unsigned long flags;
+
+ /* Remove from the scheduler done list. */
+ spin_lock_irqsave(&sched->lock, flags);
+ list_del(&job->link);
+ spin_unlock_irqrestore(&sched->lock, flags);
+ dma_fence_put(&job->hw_fence);
+
+ drm_sched_job_cleanup(sched_job);
+
+ /* Mock job itself is freed by the kunit framework. */
+}
+
+static const struct drm_sched_backend_ops drm_mock_scheduler_ops = {
+ .run_job = mock_sched_run_job,
+ .timedout_job = mock_sched_timedout_job,
+ .free_job = mock_sched_free_job
+};
+
+/**
+ * drm_mock_sched_new - Create a new mock scheduler
+ *
+ * @test: KUnit test owning the job
+ * @timeout: Job timeout to set
+ *
+ * Returns: New mock scheduler with allocation managed by the test
+ */
+struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
+{
+ struct drm_sched_init_args args = {
+ .ops = &drm_mock_scheduler_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = U32_MAX,
+ .hang_limit = 1,
+ .timeout = timeout,
+ .name = "drm-mock-scheduler",
+ };
+ struct drm_mock_scheduler *sched;
+ int ret;
+
+ sched = kunit_kzalloc(test, sizeof(*sched), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, sched);
+
+ ret = drm_sched_init(&sched->base, &args);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ sched->test = test;
+ sched->hw_timeline.context = dma_fence_context_alloc(1);
+ atomic_set(&sched->hw_timeline.next_seqno, 0);
+ INIT_LIST_HEAD(&sched->job_list);
+ INIT_LIST_HEAD(&sched->done_list);
+ spin_lock_init(&sched->lock);
+
+ return sched;
+}
+
+/**
+ * drm_mock_sched_fini - Destroys a mock scheduler
+ *
+ * @sched: Scheduler to destroy
+ *
+ * To be used from the test cases once done with the scheduler.
+ */
+void drm_mock_sched_fini(struct drm_mock_scheduler *sched)
+{
+ struct drm_mock_sched_job *job, *next;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ drm_sched_wqueue_stop(&sched->base);
+
+ /* Force complete all unfinished jobs. */
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &sched->job_list, link)
+ list_move_tail(&job->link, &list);
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ list_for_each_entry(job, &list, link)
+ hrtimer_cancel(&job->timer);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &list, link)
+ drm_mock_sched_job_complete(job);
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ /*
+ * Free completed jobs and jobs not yet processed by the DRM scheduler
+ * free worker.
+ */
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &sched->done_list, link)
+ list_move_tail(&job->link, &list);
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ list_for_each_entry_safe(job, next, &list, link)
+ mock_sched_free_job(&job->base);
+
+ drm_sched_fini(&sched->base);
+}
+
+/**
+ * drm_mock_sched_advance - Advances the mock scheduler timeline
+ *
+ * @sched: Scheduler timeline to advance
+ * @num: By how many jobs to advance
+ *
+ * Advancing the scheduler timeline by a number of seqnos will trigger
+ * signalling of the hardware fences and unlinking the jobs from the internal
+ * scheduler tracking.
+ *
+ * This can be used from test cases which want complete control of the simulated
+ * job execution timing. For example submitting one job with no set duration
+ * would never complete it before test cases advances the timeline by one.
+ */
+unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
+ unsigned int num)
+{
+ struct drm_mock_sched_job *job, *next;
+ unsigned int found = 0;
+ unsigned long flags;
+ LIST_HEAD(signal);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ if (WARN_ON_ONCE(sched->hw_timeline.cur_seqno + num <
+ sched->hw_timeline.cur_seqno))
+ goto unlock;
+ sched->hw_timeline.cur_seqno += num;
+ list_for_each_entry_safe(job, next, &sched->job_list, link) {
+ if (sched->hw_timeline.cur_seqno < job->hw_fence.seqno)
+ break;
+
+ drm_mock_sched_job_complete(job);
+ found++;
+ }
+unlock:
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ return found;
+}
+
+MODULE_DESCRIPTION("DRM mock scheduler and tests");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
new file mode 100644
index 000000000000..27caf8285fb7
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Valve Corporation */
+
+#ifndef _SCHED_TESTS_H_
+#define _SCHED_TESTS_H_
+
+#include <kunit/test.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/dma-fence.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include <drm/gpu_scheduler.h>
+
+/*
+ * DOC: Mock DRM scheduler data structures
+ *
+ * drm_mock_* data structures are used to implement a mock "GPU".
+ *
+ * They subclass the core DRM scheduler objects and add their data on top, which
+ * enables tracking the submitted jobs and simulating their execution with the
+ * attributes as specified by the test case.
+ */
+
+/**
+ * struct drm_mock_scheduler - implements a trivial mock GPU execution engine
+ *
+ * @base: DRM scheduler base class
+ * @test: Backpointer to owning the kunit test case
+ * @lock: Lock to protect the simulated @hw_timeline, @job_list and @done_list
+ * @job_list: List of jobs submitted to the mock GPU
+ * @done_list: List of jobs completed by the mock GPU
+ * @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and
+ * @cur_seqno for implementing a struct dma_fence signaling the
+ * simulated job completion.
+ *
+ * Trivial mock GPU execution engine tracks submitted jobs and enables
+ * completing them strictly in submission order.
+ */
+struct drm_mock_scheduler {
+ struct drm_gpu_scheduler base;
+
+ struct kunit *test;
+
+ spinlock_t lock;
+ struct list_head job_list;
+ struct list_head done_list;
+
+ struct {
+ u64 context;
+ atomic_t next_seqno;
+ unsigned int cur_seqno;
+ } hw_timeline;
+};
+
+/**
+ * struct drm_mock_sched_entity - implements a mock GPU sched entity
+ *
+ * @base: DRM scheduler entity base class
+ * @test: Backpointer to owning the kunit test case
+ *
+ * Mock GPU sched entity is used by the test cases to submit jobs to the mock
+ * scheduler.
+ */
+struct drm_mock_sched_entity {
+ struct drm_sched_entity base;
+
+ struct kunit *test;
+};
+
+/**
+ * struct drm_mock_sched_job - implements a mock GPU job
+ *
+ * @base: DRM sched job base class
+ * @done: Completion signaling job completion.
+ * @flags: Flags designating job state.
+ * @link: List head element used by job tracking by the drm_mock_scheduler
+ * @timer: Timer used for simulating job execution duration
+ * @duration_us: Simulated job duration in micro seconds, or zero if in manual
+ * timeline advance mode
+ * @finish_at: Absolute time when the jobs with set duration will complete
+ * @lock: Lock used for @hw_fence
+ * @hw_fence: Fence returned to DRM scheduler as the hardware fence
+ * @test: Backpointer to owning the kunit test case
+ *
+ * Mock GPU sched job is used by the test cases to submit jobs to the mock
+ * scheduler.
+ */
+struct drm_mock_sched_job {
+ struct drm_sched_job base;
+
+ struct completion done;
+
+#define DRM_MOCK_SCHED_JOB_DONE 0x1
+#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
+ unsigned long flags;
+
+ struct list_head link;
+ struct hrtimer timer;
+
+ unsigned int duration_us;
+ ktime_t finish_at;
+
+ spinlock_t lock;
+ struct dma_fence hw_fence;
+
+ struct kunit *test;
+};
+
+static inline struct drm_mock_scheduler *
+drm_sched_to_mock_sched(struct drm_gpu_scheduler *sched)
+{
+ return container_of(sched, struct drm_mock_scheduler, base);
+};
+
+static inline struct drm_mock_sched_entity *
+drm_sched_entity_to_mock_entity(struct drm_sched_entity *sched_entity)
+{
+ return container_of(sched_entity, struct drm_mock_sched_entity, base);
+};
+
+static inline struct drm_mock_sched_job *
+drm_sched_job_to_mock_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct drm_mock_sched_job, base);
+};
+
+struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test,
+ long timeout);
+void drm_mock_sched_fini(struct drm_mock_scheduler *sched);
+unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
+ unsigned int num);
+
+struct drm_mock_sched_entity *
+drm_mock_sched_entity_new(struct kunit *test,
+ enum drm_sched_priority priority,
+ struct drm_mock_scheduler *sched);
+void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity);
+
+struct drm_mock_sched_job *
+drm_mock_sched_job_new(struct kunit *test,
+ struct drm_mock_sched_entity *entity);
+
+/**
+ * drm_mock_sched_job_submit - Arm and submit a job in one go
+ *
+ * @job: Job to arm and submit
+ */
+static inline void drm_mock_sched_job_submit(struct drm_mock_sched_job *job)
+{
+ drm_sched_job_arm(&job->base);
+ drm_sched_entity_push_job(&job->base);
+}
+
+/**
+ * drm_mock_sched_job_set_duration_us - Set a job duration
+ *
+ * @job: Job to set the duration for
+ * @duration_us: Duration in micro seconds
+ *
+ * Jobs with duration set will be automatically completed by the mock scheduler
+ * as the timeline progresses, unless a job without a set duration is
+ * encountered in the timelime in which case calling drm_mock_sched_advance()
+ * will be required to bump the timeline.
+ */
+static inline void
+drm_mock_sched_job_set_duration_us(struct drm_mock_sched_job *job,
+ unsigned int duration_us)
+{
+ job->duration_us = duration_us;
+}
+
+/**
+ * drm_mock_sched_job_is_finished - Check if a job is finished
+ *
+ * @job: Job to check
+ *
+ * Returns: true if finished
+ */
+static inline bool
+drm_mock_sched_job_is_finished(struct drm_mock_sched_job *job)
+{
+ return job->flags & DRM_MOCK_SCHED_JOB_DONE;
+}
+
+/**
+ * drm_mock_sched_job_wait_finished - Wait until a job is finished
+ *
+ * @job: Job to wait for
+ * @timeout: Wait time in jiffies
+ *
+ * Returns: true if finished within the timeout provided, otherwise false
+ */
+static inline bool
+drm_mock_sched_job_wait_finished(struct drm_mock_sched_job *job, long timeout)
+{
+ if (job->flags & DRM_MOCK_SCHED_JOB_DONE)
+ return true;
+
+ return wait_for_completion_timeout(&job->done, timeout) != 0;
+}
+
+/**
+ * drm_mock_sched_job_wait_scheduled - Wait until a job is scheduled
+ *
+ * @job: Job to wait for
+ * @timeout: Wait time in jiffies
+ *
+ * Returns: true if scheduled within the timeout provided, otherwise false
+ */
+static inline bool
+drm_mock_sched_job_wait_scheduled(struct drm_mock_sched_job *job, long timeout)
+{
+ KUNIT_ASSERT_EQ(job->test, job->flags & DRM_MOCK_SCHED_JOB_DONE, 0);
+
+ return dma_fence_wait_timeout(&job->base.s_fence->scheduled,
+ false,
+ timeout) != 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c
new file mode 100644
index 000000000000..7230057e0594
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Valve Corporation */
+
+#include <linux/delay.h>
+
+#include "sched_tests.h"
+
+/*
+ * DRM scheduler basic tests should check the basic functional correctness of
+ * the scheduler, including some very light smoke testing. More targeted tests,
+ * for example focusing on testing specific bugs and other more complicated test
+ * scenarios, should be implemented in separate source units.
+ */
+
+static int drm_sched_basic_init(struct kunit *test)
+{
+ test->priv = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+
+ return 0;
+}
+
+static void drm_sched_basic_exit(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+
+ drm_mock_sched_fini(sched);
+}
+
+static int drm_sched_timeout_init(struct kunit *test)
+{
+ test->priv = drm_mock_sched_new(test, HZ);
+
+ return 0;
+}
+
+static void drm_sched_basic_submit(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_sched_job *job;
+ unsigned int i;
+ bool done;
+
+ /*
+ * Submit one job to the scheduler and verify that it gets scheduled
+ * and completed only when the mock hw backend processes it.
+ */
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+ job = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ / 2);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+}
+
+struct drm_sched_basic_params {
+ const char *description;
+ unsigned int queue_depth;
+ unsigned int num_entities;
+ unsigned int job_us;
+ bool dep_chain;
+};
+
+static const struct drm_sched_basic_params drm_sched_basic_cases[] = {
+ {
+ .description = "A queue of jobs in a single entity",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 1,
+ },
+ {
+ .description = "A chain of dependent jobs across multiple entities",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 1,
+ .dep_chain = true,
+ },
+ {
+ .description = "Multiple independent job queues",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 4,
+ },
+ {
+ .description = "Multiple inter-dependent job queues",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 4,
+ .dep_chain = true,
+ },
+};
+
+static void
+drm_sched_basic_desc(const struct drm_sched_basic_params *params, char *desc)
+{
+ strscpy(desc, params->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(drm_sched_basic, drm_sched_basic_cases, drm_sched_basic_desc);
+
+static void drm_sched_basic_test(struct kunit *test)
+{
+ const struct drm_sched_basic_params *params = test->param_value;
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job, *prev = NULL;
+ struct drm_mock_sched_entity **entity;
+ unsigned int i, cur_ent = 0;
+ bool done;
+
+ entity = kunit_kcalloc(test, params->num_entities, sizeof(*entity),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, entity);
+
+ for (i = 0; i < params->num_entities; i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ for (i = 0; i < params->queue_depth; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= params->num_entities;
+ drm_mock_sched_job_set_duration_us(job, params->job_us);
+ if (params->dep_chain && prev)
+ drm_sched_job_add_dependency(&job->base,
+ dma_fence_get(&prev->base.s_fence->finished));
+ drm_mock_sched_job_submit(job);
+ prev = job;
+ }
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ for (i = 0; i < params->num_entities; i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static void drm_sched_basic_entity_cleanup(struct kunit *test)
+{
+ struct drm_mock_sched_job *job, *mid, *prev = NULL;
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity[4];
+ const unsigned int qd = 100;
+ unsigned int i, cur_ent = 0;
+ bool done;
+
+ /*
+ * Submit a queue of jobs across different entities with an explicit
+ * chain of dependencies between them and trigger entity cleanup while
+ * the queue is still being processed.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ if (prev)
+ drm_sched_job_add_dependency(&job->base,
+ dma_fence_get(&prev->base.s_fence->finished));
+ drm_mock_sched_job_submit(job);
+ if (i == qd / 2)
+ mid = job;
+ prev = job;
+ }
+
+ done = drm_mock_sched_job_wait_finished(mid, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ /* Exit with half of the queue still pending to be executed. */
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static struct kunit_case drm_sched_basic_tests[] = {
+ KUNIT_CASE(drm_sched_basic_submit),
+ KUNIT_CASE_PARAM(drm_sched_basic_test, drm_sched_basic_gen_params),
+ KUNIT_CASE(drm_sched_basic_entity_cleanup),
+ {}
+};
+
+static struct kunit_suite drm_sched_basic = {
+ .name = "drm_sched_basic_tests",
+ .init = drm_sched_basic_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_basic_tests,
+};
+
+static void drm_sched_basic_timeout(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_sched_job *job;
+ bool done;
+
+ /*
+ * Submit a single job against a scheduler with the timeout configured
+ * and verify that the timeout handling will run if the backend fails
+ * to complete it in time.
+ */
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+ job = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ / 2);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ KUNIT_ASSERT_EQ(test,
+ job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT,
+ 0);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ KUNIT_ASSERT_EQ(test,
+ job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT,
+ DRM_MOCK_SCHED_JOB_TIMEDOUT);
+
+ drm_mock_sched_entity_free(entity);
+}
+
+static struct kunit_case drm_sched_timeout_tests[] = {
+ KUNIT_CASE(drm_sched_basic_timeout),
+ {}
+};
+
+static struct kunit_suite drm_sched_timeout = {
+ .name = "drm_sched_basic_timeout_tests",
+ .init = drm_sched_timeout_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_timeout_tests,
+};
+
+static void drm_sched_priorities(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT];
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 100;
+ unsigned int i, cur_ent = 0;
+ enum drm_sched_priority p;
+ bool done;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * priorities.
+ */
+
+ BUILD_BUG_ON(DRM_SCHED_PRIORITY_KERNEL > DRM_SCHED_PRIORITY_LOW);
+ BUILD_BUG_ON(ARRAY_SIZE(entity) != DRM_SCHED_PRIORITY_COUNT);
+
+ for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++)
+ entity[p] = drm_mock_sched_entity_new(test, p, sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static void drm_sched_change_priority(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT];
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 1000;
+ unsigned int i, cur_ent = 0;
+ enum drm_sched_priority p;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * priorities and while waiting for them to complete, periodically keep
+ * changing their priorities.
+ *
+ * We set up the queue-depth (qd) and job duration so the priority
+ * changing loop has some time to interact with submissions to the
+ * backend and job completions as they progress.
+ */
+
+ for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++)
+ entity[p] = drm_mock_sched_entity_new(test, p, sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ do {
+ drm_sched_entity_set_priority(&entity[cur_ent]->base,
+ (entity[cur_ent]->base.priority + 1) %
+ DRM_SCHED_PRIORITY_COUNT);
+ cur_ent++;
+ cur_ent %= ARRAY_SIZE(entity);
+ usleep_range(200, 500);
+ } while (!drm_mock_sched_job_is_finished(job));
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static struct kunit_case drm_sched_priority_tests[] = {
+ KUNIT_CASE(drm_sched_priorities),
+ KUNIT_CASE(drm_sched_change_priority),
+ {}
+};
+
+static struct kunit_suite drm_sched_priority = {
+ .name = "drm_sched_basic_priority_tests",
+ .init = drm_sched_basic_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_priority_tests,
+};
+
+static void drm_sched_test_modify_sched(struct kunit *test)
+{
+ unsigned int i, cur_ent = 0, cur_sched = 0;
+ struct drm_mock_sched_entity *entity[13];
+ struct drm_mock_scheduler *sched[3];
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 1000;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * schedulers and while waiting for them to complete, periodically keep
+ * changing schedulers associated with each entity.
+ *
+ * We set up the queue-depth (qd) and job duration so the sched modify
+ * loop has some time to interact with submissions to the backend and
+ * job completions as they progress.
+ *
+ * For the number of schedulers and entities we use primes in order to
+ * perturb the entity->sched assignments with less of a regular pattern.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(sched); i++)
+ sched[i] = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched[i % ARRAY_SIZE(sched)]);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ do {
+ struct drm_gpu_scheduler *modify;
+
+ usleep_range(200, 500);
+ cur_ent++;
+ cur_ent %= ARRAY_SIZE(entity);
+ cur_sched++;
+ cur_sched %= ARRAY_SIZE(sched);
+ modify = &sched[cur_sched]->base;
+ drm_sched_entity_modify_sched(&entity[cur_ent]->base, &modify,
+ 1);
+ } while (!drm_mock_sched_job_is_finished(job));
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+
+ for (i = 0; i < ARRAY_SIZE(sched); i++)
+ drm_mock_sched_fini(sched[i]);
+}
+
+static struct kunit_case drm_sched_modify_sched_tests[] = {
+ KUNIT_CASE(drm_sched_test_modify_sched),
+ {}
+};
+
+static struct kunit_suite drm_sched_modify_sched = {
+ .name = "drm_sched_basic_modify_sched_tests",
+ .test_cases = drm_sched_modify_sched_tests,
+};
+
+static void drm_sched_test_credits(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_scheduler *sched;
+ struct drm_mock_sched_job *job[2];
+ bool done;
+ int i;
+
+ /*
+ * Check that the configured credit limit is respected.
+ */
+
+ sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+ sched->base.credit_limit = 1;
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ job[0] = drm_mock_sched_job_new(test, entity);
+ job[1] = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job[0]);
+ drm_mock_sched_job_submit(job[1]);
+
+ done = drm_mock_sched_job_wait_scheduled(job[0], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_scheduled(job[1], HZ);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_scheduled(job[1], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_finished(job[1], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+ drm_mock_sched_fini(sched);
+}
+
+static struct kunit_case drm_sched_credits_tests[] = {
+ KUNIT_CASE(drm_sched_test_credits),
+ {}
+};
+
+static struct kunit_suite drm_sched_credits = {
+ .name = "drm_sched_basic_credits_tests",
+ .test_cases = drm_sched_credits_tests,
+};
+
+kunit_test_suites(&drm_sched_basic,
+ &drm_sched_timeout,
+ &drm_sched_priority,
+ &drm_sched_modify_sched,
+ &drm_sched_credits);
diff --git a/drivers/gpu/drm/sitronix/Kconfig b/drivers/gpu/drm/sitronix/Kconfig
new file mode 100644
index 000000000000..c069d0d41775
--- /dev/null
+++ b/drivers/gpu/drm/sitronix/Kconfig
@@ -0,0 +1,51 @@
+config DRM_ST7571_I2C
+ tristate "DRM support for Sitronix ST7571 display panels (I2C)"
+ depends on DRM && I2C && MMU
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ help
+ DRM driver for Sitronix ST7571 panels controlled over I2C.
+
+ if M is selected the module will be called st7571-i2c.
+
+config TINYDRM_ST7586
+ tristate
+ default n
+
+config DRM_ST7586
+ tristate "DRM support for Sitronix ST7586 display panels"
+ depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
+ select DRM_KMS_HELPER
+ select DRM_GEM_DMA_HELPER
+ select DRM_MIPI_DBI
+ default TINYDRM_ST7586
+ help
+ DRM driver for the following Sitronix ST7586 panels:
+ * LEGO MINDSTORMS EV3
+
+ If M is selected the module will be called st7586.
+
+config TINYDRM_ST7735R
+ tristate
+ default n
+
+config DRM_ST7735R
+ tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
+ depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
+ select DRM_KMS_HELPER
+ select DRM_GEM_DMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
+ default TINYDRM_ST7735R
+ help
+ DRM driver for Sitronix ST7715R/ST7735R with one of the following
+ LCDs:
+ * Jianda JD-T18003-T01 1.8" 128x160 TFT
+ * Okaya RH128128T 1.44" 128x128 TFT
+
+ If M is selected the module will be called st7735r.
+
diff --git a/drivers/gpu/drm/sitronix/Makefile b/drivers/gpu/drm/sitronix/Makefile
new file mode 100644
index 000000000000..bd139e5a6995
--- /dev/null
+++ b/drivers/gpu/drm/sitronix/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_DRM_ST7571_I2C) += st7571-i2c.o
+obj-$(CONFIG_DRM_ST7586) += st7586.o
+obj-$(CONFIG_DRM_ST7735R) += st7735r.o
diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c
new file mode 100644
index 000000000000..eec846892962
--- /dev/null
+++ b/drivers/gpu/drm/sitronix/st7571-i2c.c
@@ -0,0 +1,1000 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Sitronix ST7571, a 4 level gray scale dot matrix LCD controller
+ *
+ * Copyright (C) 2025 Marcus Folkesson <marcus.folkesson@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_module.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+
+#define ST7571_COMMAND_MODE (0x00)
+#define ST7571_DATA_MODE (0x40)
+
+/* Normal mode command set */
+#define ST7571_DISPLAY_OFF (0xae)
+#define ST7571_DISPLAY_ON (0xaf)
+#define ST7571_OSC_ON (0xab)
+#define ST7571_SET_COLUMN_LSB(c) (0x00 | FIELD_PREP(GENMASK(3, 0), (c)))
+#define ST7571_SET_COLUMN_MSB(c) (0x10 | FIELD_PREP(GENMASK(2, 0), (c) >> 4))
+#define ST7571_SET_COM0_LSB(x) (FIELD_PREP(GENMASK(6, 0), (x)))
+#define ST7571_SET_COM0_MSB (0x44)
+#define ST7571_SET_COM_SCAN_DIR(d) (0xc0 | FIELD_PREP(GENMASK(3, 3), (d)))
+#define ST7571_SET_CONTRAST_LSB(c) (FIELD_PREP(GENMASK(5, 0), (c)))
+#define ST7571_SET_CONTRAST_MSB (0x81)
+#define ST7571_SET_DISPLAY_DUTY_LSB(d) (FIELD_PREP(GENMASK(7, 0), (d)))
+#define ST7571_SET_DISPLAY_DUTY_MSB (0x48)
+#define ST7571_SET_ENTIRE_DISPLAY_ON(p) (0xa4 | FIELD_PREP(GENMASK(0, 0), (p)))
+#define ST7571_SET_LCD_BIAS(b) (0x50 | FIELD_PREP(GENMASK(2, 0), (b)))
+#define ST7571_SET_MODE_LSB(m) (FIELD_PREP(GENMASK(7, 2), (m)))
+#define ST7571_SET_MODE_MSB (0x38)
+#define ST7571_SET_PAGE(p) (0xb0 | FIELD_PREP(GENMASK(3, 0), (p)))
+#define ST7571_SET_POWER(p) (0x28 | FIELD_PREP(GENMASK(2, 0), (p)))
+#define ST7571_SET_REGULATOR_REG(r) (0x20 | FIELD_PREP(GENMASK(2, 0), (r)))
+#define ST7571_SET_REVERSE(r) (0xa6 | FIELD_PREP(GENMASK(0, 0), (r)))
+#define ST7571_SET_SEG_SCAN_DIR(d) (0xa0 | FIELD_PREP(GENMASK(0, 0), (d)))
+#define ST7571_SET_START_LINE_LSB(l) (FIELD_PREP(GENMASK(6, 0), (l)))
+#define ST7571_SET_START_LINE_MSB (0x40)
+
+/* Extension command set 3 */
+#define ST7571_COMMAND_SET_3 (0x7b)
+#define ST7571_SET_COLOR_MODE(c) (0x10 | FIELD_PREP(GENMASK(0, 0), (c)))
+#define ST7571_COMMAND_SET_NORMAL (0x00)
+
+#define ST7571_PAGE_HEIGHT 8
+
+#define DRIVER_NAME "st7571"
+#define DRIVER_DESC "ST7571 DRM driver"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+enum st7571_color_mode {
+ ST7571_COLOR_MODE_GRAY = 0,
+ ST7571_COLOR_MODE_BLACKWHITE = 1,
+};
+
+struct st7571_device;
+
+struct st7571_panel_constraints {
+ u32 min_nlines;
+ u32 max_nlines;
+ u32 min_ncols;
+ u32 max_ncols;
+ bool support_grayscale;
+};
+
+struct st7571_panel_data {
+ int (*init)(struct st7571_device *st7571);
+ struct st7571_panel_constraints constraints;
+};
+
+struct st7571_panel_format {
+ void (*prepare_buffer)(struct st7571_device *st7571,
+ const struct iosys_map *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state);
+ int (*update_rect)(struct drm_framebuffer *fb, struct drm_rect *rect);
+ enum st7571_color_mode mode;
+ const u8 nformats;
+ const u32 formats[];
+};
+
+struct st7571_device {
+ struct drm_device dev;
+
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+
+ struct drm_display_mode mode;
+
+ const struct st7571_panel_format *pformat;
+ const struct st7571_panel_data *pdata;
+ struct i2c_client *client;
+ struct gpio_desc *reset;
+ struct regmap *regmap;
+
+ /*
+ * Depending on the hardware design, the acknowledge signal may be hard to
+ * recognize as a valid logic "0" level.
+ * Therefor, ignore NAK if possible to stay compatible with most hardware designs
+ * and off-the-shelf panels out there.
+ *
+ * From section 6.4 MICROPOCESSOR INTERFACE section in the datasheet:
+ *
+ * "By connecting SDA_OUT to SDA_IN externally, the SDA line becomes fully
+ * I2C interface compatible.
+ * Separating acknowledge-output from serial data
+ * input is advantageous for chip-on-glass (COG) applications. In COG
+ * applications, the ITO resistance and the pull-up resistor will form a
+ * voltage divider, which affects acknowledge-signal level. Larger ITO
+ * resistance will raise the acknowledged-signal level and system cannot
+ * recognize this level as a valid logic “0†level. By separating SDA_IN from
+ * SDA_OUT, the IC can be used in a mode that ignores the acknowledge-bit.
+ * For applications which check acknowledge-bit, it is necessary to minimize
+ * the ITO resistance of the SDA_OUT trace to guarantee a valid low level."
+ *
+ */
+ bool ignore_nak;
+
+ bool grayscale;
+ u32 height_mm;
+ u32 width_mm;
+ u32 startline;
+ u32 nlines;
+ u32 ncols;
+ u32 bpp;
+
+ /* Intermediate buffer in LCD friendly format */
+ u8 *hwbuf;
+
+ /* Row of (transformed) pixels ready to be written to the display */
+ u8 *row;
+};
+
+static inline struct st7571_device *drm_to_st7571(struct drm_device *dev)
+{
+ return container_of(dev, struct st7571_device, dev);
+}
+
+static int st7571_regmap_write(void *context, const void *data, size_t count)
+{
+ struct i2c_client *client = context;
+ struct st7571_device *st7571 = i2c_get_clientdata(client);
+ int ret;
+
+ struct i2c_msg msg = {
+ .addr = st7571->client->addr,
+ .flags = st7571->ignore_nak ? I2C_M_IGNORE_NAK : 0,
+ .len = count,
+ .buf = (u8 *)data
+ };
+
+ ret = i2c_transfer(st7571->client->adapter, &msg, 1);
+
+ /*
+ * Unfortunately, there is no way to check if the transfer failed because of
+ * a NAK or something else as I2C bus drivers use different return values for NAK.
+ *
+ * However, if the transfer fails and ignore_nak is set, we know it is an error.
+ */
+ if (ret < 0 && st7571->ignore_nak)
+ return ret;
+
+ return 0;
+}
+
+/* The st7571 driver does not read registers but regmap expects a .read */
+static int st7571_regmap_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf, size_t val_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static int st7571_send_command_list(struct st7571_device *st7571,
+ const u8 *cmd_list, size_t len)
+{
+ int ret;
+
+ for (int i = 0; i < len; i++) {
+ ret = regmap_write(st7571->regmap, ST7571_COMMAND_MODE, cmd_list[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline u8 st7571_transform_xy(const char *p, int x, int y)
+{
+ int xrest = x % 8;
+ u8 result = 0;
+
+ /*
+ * Transforms an (x, y) pixel coordinate into a vertical 8-bit
+ * column from the framebuffer. It calculates the corresponding byte in the
+ * framebuffer, extracts the bit at the given x position across 8 consecutive
+ * rows, and packs those bits into a single byte.
+ *
+ * Return an 8-bit value representing a vertical column of pixels.
+ */
+ x = x / 8;
+ y = (y / 8) * 8;
+
+ for (int i = 0; i < 8; i++) {
+ int row_idx = y + i;
+ u8 byte = p[row_idx * 16 + x];
+ u8 bit = (byte >> xrest) & 1;
+
+ result |= (bit << i);
+ }
+
+ return result;
+}
+
+static int st7571_set_position(struct st7571_device *st7571, int x, int y)
+{
+ u8 cmd_list[] = {
+ ST7571_SET_COLUMN_LSB(x),
+ ST7571_SET_COLUMN_MSB(x),
+ ST7571_SET_PAGE(y / ST7571_PAGE_HEIGHT),
+ };
+
+ return st7571_send_command_list(st7571, cmd_list, ARRAY_SIZE(cmd_list));
+}
+
+static int st7571_fb_clear_screen(struct st7571_device *st7571)
+{
+ u32 npixels = st7571->ncols * round_up(st7571->nlines, ST7571_PAGE_HEIGHT) * st7571->bpp;
+ char pixelvalue = 0x00;
+
+ for (int i = 0; i < npixels; i++)
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, &pixelvalue, 1);
+
+ return 0;
+}
+
+static void st7571_prepare_buffer_monochrome(struct st7571_device *st7571,
+ const struct iosys_map *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state)
+{
+ unsigned int dst_pitch;
+ struct iosys_map dst;
+ u32 size;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB8888:
+ dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
+ iosys_map_set_vaddr(&dst, st7571->hwbuf);
+
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
+ break;
+
+ case DRM_FORMAT_R1:
+ size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8;
+ memcpy(st7571->hwbuf, vmap->vaddr, size);
+ break;
+ }
+}
+
+static void st7571_prepare_buffer_grayscale(struct st7571_device *st7571,
+ const struct iosys_map *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state)
+{
+ u32 size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8;
+ unsigned int dst_pitch;
+ struct iosys_map dst;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB8888: /* Only support XRGB8888 in monochrome mode */
+ dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
+ iosys_map_set_vaddr(&dst, st7571->hwbuf);
+
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
+ break;
+
+ case DRM_FORMAT_R1:
+ size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8;
+ memcpy(st7571->hwbuf, vmap->vaddr, size);
+ break;
+
+ case DRM_FORMAT_R2:
+ size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 4;
+ memcpy(st7571->hwbuf, vmap->vaddr, size);
+ break;
+ };
+}
+
+static int st7571_fb_update_rect_monochrome(struct drm_framebuffer *fb, struct drm_rect *rect)
+{
+ struct st7571_device *st7571 = drm_to_st7571(fb->dev);
+ char *row = st7571->row;
+
+ /* Align y to display page boundaries */
+ rect->y1 = round_down(rect->y1, ST7571_PAGE_HEIGHT);
+ rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines);
+
+ for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
+ for (int x = rect->x1; x < rect->x2; x++)
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+
+ st7571_set_position(st7571, rect->x1, y);
+
+ /* TODO: Investige why we can't write multiple bytes at once */
+ for (int x = rect->x1; x < rect->x2; x++)
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
+ }
+
+ return 0;
+}
+
+static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct drm_rect *rect)
+{
+ struct st7571_device *st7571 = drm_to_st7571(fb->dev);
+ u32 format = fb->format->format;
+ char *row = st7571->row;
+ int x1;
+ int x2;
+
+ /* Align y to display page boundaries */
+ rect->y1 = round_down(rect->y1, ST7571_PAGE_HEIGHT);
+ rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines);
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ /* Threated as monochrome (R1) */
+ fallthrough;
+ case DRM_FORMAT_R1:
+ x1 = rect->x1;
+ x2 = rect->x2;
+ break;
+ case DRM_FORMAT_R2:
+ x1 = rect->x1 * 2;
+ x2 = rect->x2 * 2;
+ break;
+ }
+
+ for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
+ for (int x = x1; x < x2; x++)
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+
+ st7571_set_position(st7571, rect->x1, y);
+
+ /* TODO: Investige why we can't write multiple bytes at once */
+ for (int x = x1; x < x2; x++) {
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
+
+ /*
+ * As the display supports grayscale, all pixels must be written as two bits
+ * even if the format is monochrome.
+ *
+ * The bit values maps to the following grayscale:
+ * 0 0 = White
+ * 0 1 = Light gray
+ * 1 0 = Dark gray
+ * 1 1 = Black
+ *
+ * For monochrome formats, write the same value twice to get
+ * either a black or white pixel.
+ */
+ if (format == DRM_FORMAT_R1 || format == DRM_FORMAT_XRGB8888)
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int st7571_connector_get_modes(struct drm_connector *conn)
+{
+ struct st7571_device *st7571 = drm_to_st7571(conn->dev);
+
+ return drm_connector_helper_get_modes_fixed(conn, &st7571->mode);
+}
+
+static const struct drm_connector_helper_funcs st7571_connector_helper_funcs = {
+ .get_modes = st7571_connector_get_modes,
+};
+
+static const struct st7571_panel_format st7571_monochrome = {
+ .prepare_buffer = st7571_prepare_buffer_monochrome,
+ .update_rect = st7571_fb_update_rect_monochrome,
+ .mode = ST7571_COLOR_MODE_BLACKWHITE,
+ .formats = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_R1,
+ },
+ .nformats = 2,
+};
+
+static const struct st7571_panel_format st7571_grayscale = {
+ .prepare_buffer = st7571_prepare_buffer_grayscale,
+ .update_rect = st7571_fb_update_rect_grayscale,
+ .mode = ST7571_COLOR_MODE_GRAY,
+ .formats = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_R1,
+ DRM_FORMAT_R2,
+ },
+ .nformats = 3,
+};
+
+static const u64 st7571_primary_plane_fmtmods[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static int st7571_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+}
+
+static void st7571_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_device *dev = plane->dev;
+ struct drm_rect damage;
+ struct st7571_device *st7571 = drm_to_st7571(plane->dev);
+ int ret, idx;
+
+ if (!fb)
+ return; /* no framebuffer; plane is disabled */
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ st7571->pformat->prepare_buffer(st7571,
+ &shadow_plane_state->data[0],
+ fb, &damage,
+ &shadow_plane_state->fmtcnv_state);
+
+ st7571->pformat->update_rect(fb, &damage);
+ }
+
+ drm_dev_exit(idx);
+
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+}
+
+static void st7571_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct st7571_device *st7571 = drm_to_st7571(plane->dev);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ st7571_fb_clear_screen(st7571);
+ drm_dev_exit(idx);
+}
+
+static const struct drm_plane_helper_funcs st7571_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = st7571_primary_plane_helper_atomic_check,
+ .atomic_update = st7571_primary_plane_helper_atomic_update,
+ .atomic_disable = st7571_primary_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs st7571_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+/*
+ * CRTC
+ */
+
+static enum drm_mode_status st7571_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct st7571_device *st7571 = drm_to_st7571(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &st7571->mode);
+}
+
+static const struct drm_crtc_helper_funcs st7571_crtc_helper_funcs = {
+ .atomic_check = drm_crtc_helper_atomic_check,
+ .mode_valid = st7571_crtc_mode_valid,
+};
+
+static const struct drm_crtc_funcs st7571_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+/*
+ * Encoder
+ */
+
+static void ssd130x_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = encoder->dev;
+ struct st7571_device *st7571 = drm_to_st7571(drm);
+ u8 command = ST7571_DISPLAY_ON;
+ int ret;
+
+ ret = st7571->pdata->init(st7571);
+ if (ret)
+ return;
+
+ st7571_send_command_list(st7571, &command, 1);
+}
+
+static void ssd130x_encoder_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = encoder->dev;
+ struct st7571_device *st7571 = drm_to_st7571(drm);
+ u8 command = ST7571_DISPLAY_OFF;
+
+ st7571_send_command_list(st7571, &command, 1);
+}
+
+static const struct drm_encoder_funcs st7571_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+
+};
+
+static const struct drm_encoder_helper_funcs st7571_encoder_helper_funcs = {
+ .atomic_enable = ssd130x_encoder_atomic_enable,
+ .atomic_disable = ssd130x_encoder_atomic_disable,
+};
+
+/*
+ * Connector
+ */
+
+static const struct drm_connector_funcs st7571_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_mode_config_funcs st7571_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static struct drm_display_mode st7571_mode(struct st7571_device *st7571)
+{
+ struct drm_display_mode mode = {
+ DRM_SIMPLE_MODE(st7571->ncols, st7571->nlines,
+ st7571->width_mm, st7571->height_mm),
+ };
+
+ return mode;
+}
+
+static int st7571_mode_config_init(struct st7571_device *st7571)
+{
+ struct drm_device *dev = &st7571->dev;
+ const struct st7571_panel_constraints *constraints = &st7571->pdata->constraints;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
+
+ dev->mode_config.min_width = constraints->min_ncols;
+ dev->mode_config.min_height = constraints->min_nlines;
+ dev->mode_config.max_width = constraints->max_ncols;
+ dev->mode_config.max_height = constraints->max_nlines;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.funcs = &st7571_mode_config_funcs;
+
+ return 0;
+}
+
+static int st7571_plane_init(struct st7571_device *st7571,
+ const struct st7571_panel_format *pformat)
+{
+ struct drm_plane *primary_plane = &st7571->primary_plane;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &st7571_primary_plane_funcs,
+ pformat->formats,
+ pformat->nformats,
+ st7571_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(primary_plane, &st7571_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ return 0;
+}
+
+static int st7571_crtc_init(struct st7571_device *st7571)
+{
+ struct drm_plane *primary_plane = &st7571->primary_plane;
+ struct drm_crtc *crtc = &st7571->crtc;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &st7571_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &st7571_crtc_helper_funcs);
+
+ return 0;
+}
+
+static int st7571_encoder_init(struct st7571_device *st7571)
+{
+ struct drm_encoder *encoder = &st7571->encoder;
+ struct drm_crtc *crtc = &st7571->crtc;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder, &st7571_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &st7571_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ return 0;
+}
+
+static int st7571_connector_init(struct st7571_device *st7571)
+{
+ struct drm_connector *connector = &st7571->connector;
+ struct drm_encoder *encoder = &st7571->encoder;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_connector_init(dev, connector, &st7571_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &st7571_connector_helper_funcs);
+
+ return drm_connector_attach_encoder(connector, encoder);
+}
+
+DEFINE_DRM_GEM_FOPS(st7571_fops);
+
+static const struct drm_driver st7571_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+
+ .fops = &st7571_fops,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+};
+
+static const struct regmap_bus st7571_regmap_bus = {
+ .read = st7571_regmap_read,
+ .write = st7571_regmap_write,
+};
+
+static const struct regmap_config st7571_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_write = true,
+};
+
+static int st7571_validate_parameters(struct st7571_device *st7571)
+{
+ struct device *dev = st7571->dev.dev;
+ const struct st7571_panel_constraints *constraints = &st7571->pdata->constraints;
+
+ if (st7571->width_mm == 0) {
+ dev_err(dev, "Invalid panel width\n");
+ return -EINVAL;
+ }
+
+ if (st7571->height_mm == 0) {
+ dev_err(dev, "Invalid panel height\n");
+ return -EINVAL;
+ }
+
+ if (st7571->nlines < constraints->min_nlines ||
+ st7571->nlines > constraints->max_nlines) {
+ dev_err(dev, "Invalid timing configuration.\n");
+ return -EINVAL;
+ }
+
+ if (st7571->startline + st7571->nlines > constraints->max_nlines) {
+ dev_err(dev, "Invalid timing configuration.\n");
+ return -EINVAL;
+ }
+
+ if (st7571->ncols < constraints->min_ncols ||
+ st7571->ncols > constraints->max_ncols) {
+ dev_err(dev, "Invalid timing configuration.\n");
+ return -EINVAL;
+ }
+
+ if (st7571->grayscale && !constraints->support_grayscale) {
+ dev_err(dev, "Grayscale not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int st7571_parse_dt(struct st7571_device *st7571)
+{
+ struct device *dev = &st7571->client->dev;
+ struct device_node *np = dev->of_node;
+ struct display_timing dt;
+ int ret;
+
+ ret = of_get_display_timing(np, "panel-timing", &dt);
+ if (ret) {
+ dev_err(dev, "Failed to get display timing from DT\n");
+ return ret;
+ }
+
+ of_property_read_u32(np, "width-mm", &st7571->width_mm);
+ of_property_read_u32(np, "height-mm", &st7571->height_mm);
+ st7571->grayscale = of_property_read_bool(np, "sitronix,grayscale");
+
+ if (st7571->grayscale) {
+ st7571->pformat = &st7571_grayscale;
+ st7571->bpp = 2;
+ } else {
+ st7571->pformat = &st7571_monochrome;
+ st7571->bpp = 1;
+ }
+
+ st7571->startline = dt.vfront_porch.typ;
+ st7571->nlines = dt.vactive.typ;
+ st7571->ncols = dt.hactive.typ;
+
+ st7571->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(st7571->reset))
+ return PTR_ERR(st7571->reset);
+
+ return 0;
+}
+
+static void st7571_reset(struct st7571_device *st7571)
+{
+ gpiod_set_value_cansleep(st7571->reset, 1);
+ fsleep(20);
+ gpiod_set_value_cansleep(st7571->reset, 0);
+}
+
+static int st7571_lcd_init(struct st7571_device *st7571)
+{
+ /*
+ * Most of the initialization sequence is taken directly from the
+ * referential initial code in the ST7571 datasheet.
+ */
+ u8 commands[] = {
+ ST7571_DISPLAY_OFF,
+
+ ST7571_SET_MODE_MSB,
+ ST7571_SET_MODE_LSB(0x2e),
+
+ ST7571_SET_SEG_SCAN_DIR(0),
+ ST7571_SET_COM_SCAN_DIR(1),
+
+ ST7571_SET_COM0_MSB,
+ ST7571_SET_COM0_LSB(0x00),
+
+ ST7571_SET_START_LINE_MSB,
+ ST7571_SET_START_LINE_LSB(st7571->startline),
+
+ ST7571_OSC_ON,
+ ST7571_SET_REGULATOR_REG(5),
+ ST7571_SET_CONTRAST_MSB,
+ ST7571_SET_CONTRAST_LSB(0x33),
+ ST7571_SET_LCD_BIAS(0x04),
+ ST7571_SET_DISPLAY_DUTY_MSB,
+ ST7571_SET_DISPLAY_DUTY_LSB(st7571->nlines),
+
+ ST7571_SET_POWER(0x4), /* Power Control, VC: ON, VR: OFF, VF: OFF */
+ ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */
+ ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */
+
+ ST7571_COMMAND_SET_3,
+ ST7571_SET_COLOR_MODE(st7571->pformat->mode),
+ ST7571_COMMAND_SET_NORMAL,
+
+ ST7571_SET_REVERSE(0),
+ ST7571_SET_ENTIRE_DISPLAY_ON(0),
+ };
+
+ /* Perform a reset before initializing the controller */
+ st7571_reset(st7571);
+
+ return st7571_send_command_list(st7571, commands, ARRAY_SIZE(commands));
+}
+
+static int st7571_probe(struct i2c_client *client)
+{
+ struct st7571_device *st7571;
+ struct drm_device *dev;
+ int ret;
+
+ st7571 = devm_drm_dev_alloc(&client->dev, &st7571_driver,
+ struct st7571_device, dev);
+ if (IS_ERR(st7571))
+ return PTR_ERR(st7571);
+
+ dev = &st7571->dev;
+ st7571->client = client;
+ i2c_set_clientdata(client, st7571);
+ st7571->pdata = device_get_match_data(&client->dev);
+
+ ret = st7571_parse_dt(st7571);
+ if (ret)
+ return ret;
+
+ ret = st7571_validate_parameters(st7571);
+ if (ret)
+ return ret;
+
+ st7571->mode = st7571_mode(st7571);
+
+ /*
+ * The hardware design could make it hard to detect a NAK on the I2C bus.
+ * If the adapter does not support protocol mangling do
+ * not set the I2C_M_IGNORE_NAK flag at the expense * of possible
+ * cruft in the logs.
+ */
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+ st7571->ignore_nak = true;
+
+ st7571->regmap = devm_regmap_init(&client->dev, &st7571_regmap_bus,
+ client, &st7571_regmap_config);
+ if (IS_ERR(st7571->regmap)) {
+ return dev_err_probe(&client->dev, PTR_ERR(st7571->regmap),
+ "Failed to initialize regmap\n");
+ }
+
+ st7571->hwbuf = devm_kzalloc(&client->dev,
+ (st7571->nlines * st7571->ncols * st7571->bpp) / 8,
+ GFP_KERNEL);
+ if (!st7571->hwbuf)
+ return -ENOMEM;
+
+ st7571->row = devm_kzalloc(&client->dev,
+ (st7571->ncols * st7571->bpp),
+ GFP_KERNEL);
+ if (!st7571->row)
+ return -ENOMEM;
+
+ ret = st7571_mode_config_init(st7571);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize mode config\n");
+
+ ret = st7571_plane_init(st7571, st7571->pformat);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize primary plane\n");
+
+ ret = st7571_crtc_init(st7571);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize CRTC\n");
+
+ ret = st7571_encoder_init(st7571);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize encoder\n");
+
+ ret = st7571_connector_init(st7571);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize connector\n");
+
+ drm_mode_config_reset(dev);
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to register DRM device\n");
+
+ drm_client_setup(dev, NULL);
+ return 0;
+}
+
+static void st7571_remove(struct i2c_client *client)
+{
+ struct st7571_device *st7571 = i2c_get_clientdata(client);
+
+ drm_dev_unplug(&st7571->dev);
+}
+
+struct st7571_panel_data st7571_config = {
+ .init = st7571_lcd_init,
+ .constraints = {
+ .min_nlines = 1,
+ .max_nlines = 128,
+ .min_ncols = 128,
+ .max_ncols = 128,
+ .support_grayscale = true,
+ },
+};
+
+static const struct of_device_id st7571_of_match[] = {
+ { .compatible = "sitronix,st7571", .data = &st7571_config },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st7571_of_match);
+
+static const struct i2c_device_id st7571_id[] = {
+ { "st7571", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, st7571_id);
+
+static struct i2c_driver st7571_i2c_driver = {
+ .driver = {
+ .name = "st7571",
+ .of_match_table = st7571_of_match,
+ },
+ .probe = st7571_probe,
+ .remove = st7571_remove,
+ .id_table = st7571_id,
+};
+
+module_i2c_driver(st7571_i2c_driver);
+
+MODULE_AUTHOR("Marcus Folkesson <marcus.folkesson@gmail.com>");
+MODULE_DESCRIPTION("DRM Driver for Sitronix ST7571 LCD controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/sitronix/st7586.c
index a29672d84ede..a29672d84ede 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/sitronix/st7586.c
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/sitronix/st7735r.c
index 1d60f6e5b3bc..1d60f6e5b3bc 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/sitronix/st7735r.c
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index cb2816985305..a3447622a33c 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -784,19 +784,12 @@ static int sprd_dpu_context_init(struct sprd_dpu *dpu,
{
struct platform_device *pdev = to_platform_device(dev);
struct dpu_context *ctx = &dpu->ctx;
- struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O resource\n");
- return -EINVAL;
- }
-
- ctx->base = devm_ioremap(dev, res->start, resource_size(res));
- if (!ctx->base) {
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->base)) {
dev_err(dev, "failed to map dpu registers\n");
- return -EFAULT;
+ return PTR_ERR(ctx->base);
}
ctx->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
index 8fc26479bb6b..23b0e1dc547a 100644
--- a/drivers/gpu/drm/sprd/sprd_dsi.c
+++ b/drivers/gpu/drm/sprd/sprd_dsi.c
@@ -901,18 +901,11 @@ static int sprd_dsi_context_init(struct sprd_dsi *dsi,
{
struct platform_device *pdev = to_platform_device(dev);
struct dsi_context *ctx = &dsi->ctx;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O resource\n");
- return -EINVAL;
- }
-
- ctx->base = devm_ioremap(dev, res->start, resource_size(res));
- if (!ctx->base) {
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->base)) {
drm_err(dsi->drm, "failed to map dsi host registers\n");
- return -ENXIO;
+ return PTR_ERR(ctx->base);
}
ctx->regmap = devm_regmap_init(dev, &regmap_tst_io, dsi, &byte_config);
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index f203ac5514ae..f778a4eee7c9 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -7,8 +7,6 @@ sti-drm-y := \
sti_compositor.o \
sti_crtc.o \
sti_plane.o \
- sti_crtc.o \
- sti_plane.o \
sti_hdmi.o \
sti_hdmi_tx3g4c28phy.o \
sti_dvo.o \
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 063f82d23d80..8c529b0cca8b 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -177,7 +177,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct device_node *vtg_np;
struct sti_compositor *compo;
- struct resource *res;
unsigned int i;
compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
@@ -194,17 +193,10 @@ static int sti_compositor_probe(struct platform_device *pdev)
memcpy(&compo->data, of_match_node(compositor_of_match, np)->data,
sizeof(struct sti_compositor_data));
-
- /* Get Memory ressources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENXIO;
- }
- compo->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (compo->regs == NULL) {
+ compo->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(compo->regs)) {
DRM_ERROR("Register mapping failed\n");
- return -ENXIO;
+ return PTR_ERR(compo->regs);
}
/* Get clock resources */
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 4dcddd02629b..74a1eef4674e 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -511,7 +511,6 @@ static int sti_dvo_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_dvo *dvo;
- struct resource *res;
struct device_node *np = dev->of_node;
DRM_INFO("%s\n", __func__);
@@ -523,16 +522,9 @@ static int sti_dvo_probe(struct platform_device *pdev)
}
dvo->dev = pdev->dev;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvo-reg");
- if (!res) {
- DRM_ERROR("Invalid dvo resource\n");
- return -ENOMEM;
- }
- dvo->regs = devm_ioremap(dev, res->start,
- resource_size(res));
- if (!dvo->regs)
- return -ENOMEM;
+ dvo->regs = devm_platform_ioremap_resource_byname(pdev, "dvo-reg");
+ if (IS_ERR(dvo->regs))
+ return PTR_ERR(dvo->regs);
dvo->clk_pix = devm_clk_get(dev, "dvo_pix");
if (IS_ERR(dvo->clk_pix)) {
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 14fdc00d2ba0..d202b6c1eb8f 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -693,7 +693,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
connector->hda = hda;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -ENOMEM;
@@ -750,16 +750,9 @@ static int sti_hda_probe(struct platform_device *pdev)
return -ENOMEM;
hda->dev = pdev->dev;
-
- /* Get resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hda-reg");
- if (!res) {
- DRM_ERROR("Invalid hda resource\n");
- return -ENOMEM;
- }
- hda->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hda->regs)
- return -ENOMEM;
+ hda->regs = devm_platform_ioremap_resource_byname(pdev, "hda-reg");
+ if (IS_ERR(hda->regs))
+ return PTR_ERR(hda->regs);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"video-dacs-ctrl");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 164a34d793d8..37b8d619066e 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1380,7 +1380,6 @@ static int sti_hdmi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sti_hdmi *hdmi;
struct device_node *np = dev->of_node;
- struct resource *res;
struct device_node *ddc;
int ret;
@@ -1399,17 +1398,9 @@ static int sti_hdmi_probe(struct platform_device *pdev)
}
hdmi->dev = pdev->dev;
-
- /* Get resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi-reg");
- if (!res) {
- DRM_ERROR("Invalid hdmi resource\n");
- ret = -ENOMEM;
- goto release_adapter;
- }
- hdmi->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hdmi->regs) {
- ret = -ENOMEM;
+ hdmi->regs = devm_platform_ioremap_resource_byname(pdev, "hdmi-reg");
+ if (IS_ERR(hdmi->regs)) {
+ ret = PTR_ERR(hdmi->regs);
goto release_adapter;
}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 0f658709c9d0..03684062309b 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1356,7 +1356,6 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *vtg_np;
struct sti_hqvdp *hqvdp;
- struct resource *res;
DRM_DEBUG_DRIVER("\n");
@@ -1367,17 +1366,10 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
}
hqvdp->dev = dev;
-
- /* Get Memory resources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENXIO;
- }
- hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hqvdp->regs) {
+ hqvdp->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hqvdp->regs)) {
DRM_ERROR("Register mapping failed\n");
- return -ENXIO;
+ return PTR_ERR(hqvdp->regs);
}
/* Get clock resources */
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index af6c06f448c4..6a464b035de8 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -838,7 +838,6 @@ static int sti_tvout_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct sti_tvout *tvout;
- struct resource *res;
DRM_INFO("%s\n", __func__);
@@ -850,16 +849,9 @@ static int sti_tvout_probe(struct platform_device *pdev)
return -ENOMEM;
tvout->dev = dev;
-
- /* get memory resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg");
- if (!res) {
- DRM_ERROR("Invalid glue resource\n");
- return -ENOMEM;
- }
- tvout->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!tvout->regs)
- return -ENOMEM;
+ tvout->regs = devm_platform_ioremap_resource_byname(pdev, "tvout-reg");
+ if (IS_ERR(tvout->regs))
+ return PTR_ERR(tvout->regs);
/* get reset resources */
tvout->reset = devm_reset_control_get(dev, "tvout");
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 5ba469b711b5..ee81691b3203 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -380,23 +380,15 @@ static int vtg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_vtg *vtg;
- struct resource *res;
int ret;
vtg = devm_kzalloc(dev, sizeof(*vtg), GFP_KERNEL);
if (!vtg)
return -ENOMEM;
-
- /* Get Memory ressources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENOMEM;
- }
- vtg->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vtg->regs) {
+ vtg->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vtg->regs)) {
DRM_ERROR("failed to remap I/O memory\n");
- return -ENOMEM;
+ return PTR_ERR(vtg->regs);
}
vtg->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index 4613e8e3b8fd..a3ae9a93ce66 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -934,28 +934,27 @@ static const struct drm_connector_funcs lvds_conn_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int lvds_attach(struct drm_bridge *bridge,
+static int lvds_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct stm_lvds *lvds = bridge_to_stm_lvds(bridge);
struct drm_connector *connector = &lvds->connector;
- struct drm_encoder *encoder = bridge->encoder;
int ret;
- if (!bridge->encoder) {
+ if (!encoder) {
drm_err(bridge->dev, "Parent encoder object not found\n");
return -ENODEV;
}
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
/* No cloning support */
- bridge->encoder->possible_clones = 0;
+ encoder->possible_clones = 0;
/* If we have a next bridge just attach it. */
if (lvds->next_bridge)
- return drm_bridge_attach(bridge->encoder, lvds->next_bridge,
+ return drm_bridge_attach(encoder, lvds->next_bridge,
bridge, flags);
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
diff --git a/drivers/gpu/drm/sysfb/Kconfig b/drivers/gpu/drm/sysfb/Kconfig
new file mode 100644
index 000000000000..9c9884c7efc6
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/Kconfig
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menu "Drivers for system framebuffers"
+ depends on DRM
+
+config DRM_SYSFB_HELPER
+ tristate
+ depends on DRM
+
+config DRM_EFIDRM
+ tristate "EFI framebuffer driver"
+ depends on DRM && MMU && EFI && (!SYSFB_SIMPLEFB || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ select SYSFB
+ help
+ DRM driver for EFI framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via EFI interfaces.
+
+config DRM_OFDRM
+ tristate "Open Firmware display driver"
+ depends on DRM && MMU && OF && (PPC || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ help
+ DRM driver for Open Firmware framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the Open Firmware before the kernel boots. Scanout buffer, size,
+ and display format must be provided via device tree.
+
+config DRM_SIMPLEDRM
+ tristate "Simple framebuffer driver"
+ depends on DRM && MMU
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ help
+ DRM driver for simple platform-provided framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via device tree,
+ UEFI, VESA, etc.
+
+ On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB
+ to use UEFI and VESA framebuffers.
+
+config DRM_VESADRM
+ tristate "VESA framebuffer driver"
+ depends on DRM && MMU && X86 && (!SYSFB_SIMPLEFB || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ select SYSFB
+ help
+ DRM driver for VESA framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via VBE interfaces.
+
+endmenu
diff --git a/drivers/gpu/drm/sysfb/Makefile b/drivers/gpu/drm/sysfb/Makefile
new file mode 100644
index 000000000000..a156c496413d
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+drm_sysfb_helper-y := \
+ drm_sysfb.o \
+ drm_sysfb_modeset.o
+drm_sysfb_helper-$(CONFIG_SCREEN_INFO) += drm_sysfb_screen_info.o
+obj-$(CONFIG_DRM_SYSFB_HELPER) += drm_sysfb_helper.o
+
+obj-$(CONFIG_DRM_EFIDRM) += efidrm.o
+obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
+obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
+obj-$(CONFIG_DRM_VESADRM) += vesadrm.o
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb.c b/drivers/gpu/drm/sysfb/drm_sysfb.c
new file mode 100644
index 000000000000..308f82153b15
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/limits.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+
+#include <drm/drm_print.h>
+
+#include "drm_sysfb_helper.h"
+
+int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max)
+{
+ if (value > min(max, INT_MAX)) {
+ drm_warn(dev, "%s of %llu exceeds maximum of %u\n", name, value, max);
+ return -EINVAL;
+ }
+ return value;
+}
+EXPORT_SYMBOL(drm_sysfb_get_validated_int);
+
+int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max)
+{
+ if (!value) {
+ drm_warn(dev, "%s of 0 not allowed\n", name);
+ return -EINVAL;
+ }
+ return drm_sysfb_get_validated_int(dev, name, value, max);
+}
+EXPORT_SYMBOL(drm_sysfb_get_validated_int0);
+
+MODULE_DESCRIPTION("Helpers for DRM sysfb drivers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
new file mode 100644
index 000000000000..cb08a88242cc
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef DRM_SYSFB_HELPER_H
+#define DRM_SYSFB_HELPER_H
+
+#include <linux/container_of.h>
+#include <linux/iosys-map.h>
+
+#include <video/pixel_format.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_modes.h>
+
+struct drm_format_info;
+struct drm_scanout_buffer;
+struct screen_info;
+
+/*
+ * Input parsing
+ */
+
+struct drm_sysfb_format {
+ struct pixel_format pixel;
+ u32 fourcc;
+};
+
+int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+
+#if defined(CONFIG_SCREEN_INFO)
+int drm_sysfb_get_width_si(struct drm_device *dev, const struct screen_info *si);
+int drm_sysfb_get_height_si(struct drm_device *dev, const struct screen_info *si);
+struct resource *drm_sysfb_get_memory_si(struct drm_device *dev,
+ const struct screen_info *si,
+ struct resource *res);
+int drm_sysfb_get_stride_si(struct drm_device *dev, const struct screen_info *si,
+ const struct drm_format_info *format,
+ unsigned int width, unsigned int height, u64 size);
+u64 drm_sysfb_get_visible_size_si(struct drm_device *dev, const struct screen_info *si,
+ unsigned int height, unsigned int stride, u64 size);
+const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
+ const struct drm_sysfb_format *formats,
+ size_t nformats,
+ const struct screen_info *si);
+#endif
+
+/*
+ * Input parsing
+ */
+
+int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+
+/*
+ * Display modes
+ */
+
+struct drm_display_mode drm_sysfb_mode(unsigned int width,
+ unsigned int height,
+ unsigned int width_mm,
+ unsigned int height_mm);
+
+/*
+ * Device
+ */
+
+struct drm_sysfb_device {
+ struct drm_device dev;
+
+ const u8 *edid; /* can be NULL */
+
+ /* hardware settings */
+ struct drm_display_mode fb_mode;
+ const struct drm_format_info *fb_format;
+ unsigned int fb_pitch;
+ unsigned int fb_gamma_lut_size;
+
+ /* hardware-framebuffer kernel address */
+ struct iosys_map fb_addr;
+};
+
+static inline struct drm_sysfb_device *to_drm_sysfb_device(struct drm_device *dev)
+{
+ return container_of(dev, struct drm_sysfb_device, dev);
+}
+
+/*
+ * Plane
+ */
+
+int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state);
+void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void drm_sysfb_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
+#define DRM_SYSFB_PLANE_NFORMATS(_num_native) \
+ ((_num_native) + 1)
+
+#define DRM_SYSFB_PLANE_FORMAT_MODIFIERS \
+ DRM_FORMAT_MOD_LINEAR, \
+ DRM_FORMAT_MOD_INVALID
+
+#define DRM_SYSFB_PLANE_HELPER_FUNCS \
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
+ .atomic_check = drm_sysfb_plane_helper_atomic_check, \
+ .atomic_update = drm_sysfb_plane_helper_atomic_update, \
+ .atomic_disable = drm_sysfb_plane_helper_atomic_disable, \
+ .get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer
+
+#define DRM_SYSFB_PLANE_FUNCS \
+ .update_plane = drm_atomic_helper_update_plane, \
+ .disable_plane = drm_atomic_helper_disable_plane, \
+ DRM_GEM_SHADOW_PLANE_FUNCS
+
+/*
+ * CRTC
+ */
+
+struct drm_sysfb_crtc_state {
+ struct drm_crtc_state base;
+
+ /* Primary-plane format; required for color mgmt. */
+ const struct drm_format_info *format;
+};
+
+static inline struct drm_sysfb_crtc_state *
+to_drm_sysfb_crtc_state(struct drm_crtc_state *base)
+{
+ return container_of(base, struct drm_sysfb_crtc_state, base);
+}
+
+enum drm_mode_status drm_sysfb_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+int drm_sysfb_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state);
+
+#define DRM_SYSFB_CRTC_HELPER_FUNCS \
+ .mode_valid = drm_sysfb_crtc_helper_mode_valid, \
+ .atomic_check = drm_sysfb_crtc_helper_atomic_check
+
+void drm_sysfb_crtc_reset(struct drm_crtc *crtc);
+struct drm_crtc_state *drm_sysfb_crtc_atomic_duplicate_state(struct drm_crtc *crtc);
+void drm_sysfb_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state);
+
+#define DRM_SYSFB_CRTC_FUNCS \
+ .reset = drm_sysfb_crtc_reset, \
+ .set_config = drm_atomic_helper_set_config, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = drm_sysfb_crtc_atomic_duplicate_state, \
+ .atomic_destroy_state = drm_sysfb_crtc_atomic_destroy_state
+
+/*
+ * Connector
+ */
+
+int drm_sysfb_connector_helper_get_modes(struct drm_connector *connector);
+
+#define DRM_SYSFB_CONNECTOR_HELPER_FUNCS \
+ .get_modes = drm_sysfb_connector_helper_get_modes
+
+#define DRM_SYSFB_CONNECTOR_FUNCS \
+ .reset = drm_atomic_helper_connector_reset, \
+ .fill_modes = drm_helper_probe_single_connector_modes, \
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, \
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state
+
+/*
+ * Mode config
+ */
+
+#define DRM_SYSFB_MODE_CONFIG_FUNCS \
+ .fb_create = drm_gem_fb_create_with_dirty, \
+ .atomic_check = drm_atomic_helper_check, \
+ .atomic_commit = drm_atomic_helper_commit
+
+#endif
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
new file mode 100644
index 000000000000..ffaa2522ab96
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "drm_sysfb_helper.h"
+
+struct drm_display_mode drm_sysfb_mode(unsigned int width,
+ unsigned int height,
+ unsigned int width_mm,
+ unsigned int height_mm)
+{
+ /*
+ * Assume a monitor resolution of 96 dpi to
+ * get a somewhat reasonable screen size.
+ */
+ if (!width_mm)
+ width_mm = DRM_MODE_RES_MM(width, 96ul);
+ if (!height_mm)
+ height_mm = DRM_MODE_RES_MM(height, 96ul);
+
+ {
+ const struct drm_display_mode mode = {
+ DRM_MODE_INIT(60, width, height, width_mm, height_mm)
+ };
+
+ return mode;
+ }
+}
+EXPORT_SYMBOL(drm_sysfb_mode);
+
+/*
+ * Plane
+ */
+
+int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
+ struct drm_shadow_plane_state *new_shadow_plane_state =
+ to_drm_shadow_plane_state(new_plane_state);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ struct drm_sysfb_crtc_state *new_sysfb_crtc_state;
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ if (new_fb->format != sysfb->fb_format) {
+ void *buf;
+
+ /* format conversion necessary; reserve buffer */
+ buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
+ sysfb->fb_pitch, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
+ new_sysfb_crtc_state->format = new_fb->format;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_check);
+
+void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ unsigned int dst_pitch = sysfb->fb_pitch;
+ const struct drm_format_info *dst_format = sysfb->fb_format;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ int ret, idx;
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ struct iosys_map dst = sysfb->fb_addr;
+ struct drm_rect dst_clip = plane_state->dst;
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
+ drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
+ &damage, &shadow_plane_state->fmtcnv_state);
+ }
+
+ drm_dev_exit(idx);
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_update);
+
+void drm_sysfb_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct iosys_map dst = sysfb->fb_addr;
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */
+ unsigned int dst_pitch = sysfb->fb_pitch;
+ const struct drm_format_info *dst_format = sysfb->fb_format;
+ struct drm_rect dst_clip;
+ unsigned long lines, linepixels, i;
+ int idx;
+
+ drm_rect_init(&dst_clip,
+ plane_state->src_x >> 16, plane_state->src_y >> 16,
+ plane_state->src_w >> 16, plane_state->src_h >> 16);
+
+ lines = drm_rect_height(&dst_clip);
+ linepixels = drm_rect_width(&dst_clip);
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ /* Clear buffer to black if disabled */
+ dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip);
+ for (i = 0; i < lines; ++i) {
+ memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]);
+ dst_vmap += dst_pitch;
+ }
+
+ drm_dev_exit(idx);
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_disable);
+
+int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev);
+
+ sb->width = sysfb->fb_mode.hdisplay;
+ sb->height = sysfb->fb_mode.vdisplay;
+ sb->format = sysfb->fb_format;
+ sb->pitch[0] = sysfb->fb_pitch;
+ sb->map[0] = sysfb->fb_addr;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_get_scanout_buffer);
+
+/*
+ * CRTC
+ */
+
+static void drm_sysfb_crtc_state_destroy(struct drm_sysfb_crtc_state *sysfb_crtc_state)
+{
+ __drm_atomic_helper_crtc_destroy_state(&sysfb_crtc_state->base);
+
+ kfree(sysfb_crtc_state);
+}
+
+enum drm_mode_status drm_sysfb_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sysfb->fb_mode);
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_helper_mode_valid);
+
+int drm_sysfb_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ int ret;
+
+ if (!new_crtc_state->enable)
+ return 0;
+
+ ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
+ if (ret)
+ return ret;
+
+ if (new_crtc_state->color_mgmt_changed) {
+ const size_t gamma_lut_length =
+ sysfb->fb_gamma_lut_size * sizeof(struct drm_color_lut);
+ const struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut;
+
+ if (gamma_lut && (gamma_lut->length != gamma_lut_length)) {
+ drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_helper_atomic_check);
+
+void drm_sysfb_crtc_reset(struct drm_crtc *crtc)
+{
+ struct drm_sysfb_crtc_state *sysfb_crtc_state;
+
+ if (crtc->state)
+ drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc->state));
+
+ sysfb_crtc_state = kzalloc(sizeof(*sysfb_crtc_state), GFP_KERNEL);
+ if (sysfb_crtc_state)
+ __drm_atomic_helper_crtc_reset(crtc, &sysfb_crtc_state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_reset);
+
+struct drm_crtc_state *drm_sysfb_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_sysfb_crtc_state *new_sysfb_crtc_state;
+ struct drm_sysfb_crtc_state *sysfb_crtc_state;
+
+ if (drm_WARN_ON(dev, !crtc_state))
+ return NULL;
+
+ new_sysfb_crtc_state = kzalloc(sizeof(*new_sysfb_crtc_state), GFP_KERNEL);
+ if (!new_sysfb_crtc_state)
+ return NULL;
+
+ sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &new_sysfb_crtc_state->base);
+ new_sysfb_crtc_state->format = sysfb_crtc_state->format;
+
+ return &new_sysfb_crtc_state->base;
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_atomic_duplicate_state);
+
+void drm_sysfb_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
+{
+ drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc_state));
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_atomic_destroy_state);
+
+/*
+ * Connector
+ */
+
+static int drm_sysfb_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
+{
+ struct drm_sysfb_device *sysfb = data;
+ const u8 *edid = sysfb->edid;
+ size_t off = block * EDID_LENGTH;
+ size_t end = off + len;
+
+ if (!edid)
+ return -EINVAL;
+ if (end > EDID_LENGTH)
+ return -EINVAL;
+ memcpy(buf, &edid[off], len);
+
+ /*
+ * We don't have EDID extensions available and reporting them
+ * will upset DRM helpers. Thus clear the extension field and
+ * update the checksum. Adding the extension flag to the checksum
+ * does this.
+ */
+ buf[127] += buf[126];
+ buf[126] = 0;
+
+ return 0;
+}
+
+int drm_sysfb_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(connector->dev);
+ const struct drm_edid *drm_edid;
+
+ if (sysfb->edid) {
+ drm_edid = drm_edid_read_custom(connector, drm_sysfb_get_edid_block, sysfb);
+ drm_edid_connector_update(connector, drm_edid);
+ drm_edid_free(drm_edid);
+ }
+
+ /* Return the fixed mode even with EDID */
+ return drm_connector_helper_get_modes_fixed(connector, &sysfb->fb_mode);
+}
+EXPORT_SYMBOL(drm_sysfb_connector_helper_get_modes);
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
new file mode 100644
index 000000000000..0b3fb874a51f
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/limits.h>
+#include <linux/minmax.h>
+#include <linux/screen_info.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
+
+#include "drm_sysfb_helper.h"
+
+static s64 drm_sysfb_get_validated_size0(struct drm_device *dev, const char *name,
+ u64 value, u64 max)
+{
+ if (!value) {
+ drm_warn(dev, "%s of 0 not allowed\n", name);
+ return -EINVAL;
+ } else if (value > min(max, S64_MAX)) {
+ drm_warn(dev, "%s of %llu exceeds maximum of %llu\n", name, value, max);
+ return -EINVAL;
+ }
+ return value;
+}
+
+int drm_sysfb_get_width_si(struct drm_device *dev, const struct screen_info *si)
+{
+ return drm_sysfb_get_validated_int0(dev, "width", si->lfb_width, U16_MAX);
+}
+EXPORT_SYMBOL(drm_sysfb_get_width_si);
+
+int drm_sysfb_get_height_si(struct drm_device *dev, const struct screen_info *si)
+{
+ return drm_sysfb_get_validated_int0(dev, "height", si->lfb_height, U16_MAX);
+}
+EXPORT_SYMBOL(drm_sysfb_get_height_si);
+
+struct resource *drm_sysfb_get_memory_si(struct drm_device *dev,
+ const struct screen_info *si,
+ struct resource *res)
+{
+ ssize_t num;
+
+ num = screen_info_resources(si, res, 1);
+ if (!num) {
+ drm_warn(dev, "memory resource not found\n");
+ return NULL;
+ }
+
+ return res;
+}
+EXPORT_SYMBOL(drm_sysfb_get_memory_si);
+
+int drm_sysfb_get_stride_si(struct drm_device *dev, const struct screen_info *si,
+ const struct drm_format_info *format,
+ unsigned int width, unsigned int height, u64 size)
+{
+ u64 lfb_linelength = si->lfb_linelength;
+
+ if (!lfb_linelength)
+ lfb_linelength = drm_format_info_min_pitch(format, 0, width);
+
+ return drm_sysfb_get_validated_int0(dev, "stride", lfb_linelength, div64_u64(size, height));
+}
+EXPORT_SYMBOL(drm_sysfb_get_stride_si);
+
+u64 drm_sysfb_get_visible_size_si(struct drm_device *dev, const struct screen_info *si,
+ unsigned int height, unsigned int stride, u64 size)
+{
+ u64 vsize = PAGE_ALIGN(height * stride);
+
+ return drm_sysfb_get_validated_size0(dev, "visible size", vsize, size);
+}
+EXPORT_SYMBOL(drm_sysfb_get_visible_size_si);
+
+const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
+ const struct drm_sysfb_format *formats,
+ size_t nformats,
+ const struct screen_info *si)
+{
+ const struct drm_format_info *format = NULL;
+ u32 bits_per_pixel;
+ size_t i;
+
+ bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
+
+ for (i = 0; i < nformats; ++i) {
+ const struct pixel_format *f = &formats[i].pixel;
+
+ if (bits_per_pixel == f->bits_per_pixel &&
+ si->red_size == f->red.length &&
+ si->red_pos == f->red.offset &&
+ si->green_size == f->green.length &&
+ si->green_pos == f->green.offset &&
+ si->blue_size == f->blue.length &&
+ si->blue_pos == f->blue.offset) {
+ format = drm_format_info(formats[i].fourcc);
+ break;
+ }
+ }
+
+ if (!format)
+ drm_warn(dev, "No compatible color format found\n");
+
+ return format;
+}
+EXPORT_SYMBOL(drm_sysfb_get_format_si);
diff --git a/drivers/gpu/drm/sysfb/efidrm.c b/drivers/gpu/drm/sysfb/efidrm.c
new file mode 100644
index 000000000000..46912924636a
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/efidrm.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/aperture.h>
+#include <linux/efi.h>
+#include <linux/limits.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/edid.h>
+#include <video/pixel_format.h>
+
+#include "drm_sysfb_helper.h"
+
+#define DRIVER_NAME "efidrm"
+#define DRIVER_DESC "DRM driver for EFI platform devices"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static const struct drm_format_info *efidrm_get_format_si(struct drm_device *dev,
+ const struct screen_info *si)
+{
+ static const struct drm_sysfb_format formats[] = {
+ { PIXEL_FORMAT_XRGB1555, DRM_FORMAT_XRGB1555, },
+ { PIXEL_FORMAT_RGB565, DRM_FORMAT_RGB565, },
+ { PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, },
+ { PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, },
+ { PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, },
+ { PIXEL_FORMAT_XRGB2101010, DRM_FORMAT_XRGB2101010, },
+ };
+
+ return drm_sysfb_get_format_si(dev, formats, ARRAY_SIZE(formats), si);
+}
+
+static u64 efidrm_get_mem_flags(struct drm_device *dev, resource_size_t start,
+ resource_size_t len)
+{
+ u64 attribute = EFI_MEMORY_UC | EFI_MEMORY_WC |
+ EFI_MEMORY_WT | EFI_MEMORY_WB;
+ u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
+ resource_size_t end = start + len;
+ efi_memory_desc_t md;
+ u64 md_end;
+
+ if (!efi_enabled(EFI_MEMMAP) || efi_mem_desc_lookup(start, &md))
+ goto out;
+
+ md_end = md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT);
+ if (end > md_end)
+ goto out;
+
+ attribute &= md.attribute;
+ if (attribute) {
+ mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+ mem_flags &= attribute;
+ }
+
+out:
+ return mem_flags;
+}
+
+/*
+ * EFI device
+ */
+
+struct efidrm_device {
+ struct drm_sysfb_device sysfb;
+
+ /* modesetting */
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+/*
+ * Modesetting
+ */
+
+static const u64 efidrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
+};
+
+static const struct drm_plane_helper_funcs efidrm_primary_plane_helper_funcs = {
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs efidrm_primary_plane_funcs = {
+ DRM_SYSFB_PLANE_FUNCS,
+ .destroy = drm_plane_cleanup,
+};
+
+static const struct drm_crtc_helper_funcs efidrm_crtc_helper_funcs = {
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs efidrm_crtc_funcs = {
+ DRM_SYSFB_CRTC_FUNCS,
+ .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_encoder_funcs efidrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_connector_helper_funcs efidrm_connector_helper_funcs = {
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs efidrm_connector_funcs = {
+ DRM_SYSFB_CONNECTOR_FUNCS,
+ .destroy = drm_connector_cleanup,
+};
+
+static const struct drm_mode_config_funcs efidrm_mode_config_funcs = {
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
+};
+
+/*
+ * Init / Cleanup
+ */
+
+static struct efidrm_device *efidrm_device_create(struct drm_driver *drv,
+ struct platform_device *pdev)
+{
+ const struct screen_info *si;
+ const struct drm_format_info *format;
+ int width, height, stride;
+ u64 vsize, mem_flags;
+ struct resource resbuf;
+ struct resource *res;
+ struct efidrm_device *efi;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ struct resource *mem = NULL;
+ void __iomem *screen_base = NULL;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned long max_width, max_height;
+ size_t nformats;
+ int ret;
+
+ si = dev_get_platdata(&pdev->dev);
+ if (!si)
+ return ERR_PTR(-ENODEV);
+ if (screen_info_video_type(si) != VIDEO_TYPE_EFI)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * EFI DRM driver
+ */
+
+ efi = devm_drm_dev_alloc(&pdev->dev, drv, struct efidrm_device, sysfb.dev);
+ if (IS_ERR(efi))
+ return ERR_CAST(efi);
+ sysfb = &efi->sysfb;
+ dev = &sysfb->dev;
+ platform_set_drvdata(pdev, dev);
+
+ /*
+ * Hardware settings
+ */
+
+ format = efidrm_get_format_si(dev, si);
+ if (!format)
+ return ERR_PTR(-EINVAL);
+ width = drm_sysfb_get_width_si(dev, si);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = drm_sysfb_get_height_si(dev, si);
+ if (height < 0)
+ return ERR_PTR(height);
+ res = drm_sysfb_get_memory_si(dev, si, &resbuf);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+ stride = drm_sysfb_get_stride_si(dev, si, format, width, height, resource_size(res));
+ if (stride < 0)
+ return ERR_PTR(stride);
+ vsize = drm_sysfb_get_visible_size_si(dev, si, height, stride, resource_size(res));
+ if (!vsize)
+ return ERR_PTR(-EINVAL);
+
+ drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n",
+ &format->format, width, height, stride);
+
+#ifdef CONFIG_X86
+ if (drm_edid_header_is_valid(edid_info.dummy) == 8)
+ sysfb->edid = edid_info.dummy;
+#endif
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
+
+ /*
+ * Memory management
+ */
+
+ ret = devm_aperture_acquire_for_platform_device(pdev, res->start, vsize);
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res);
+
+ mem = devm_request_mem_region(&pdev->dev, res->start, vsize, drv->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
+
+ mem_flags = efidrm_get_mem_flags(dev, res->start, vsize);
+
+ if (mem_flags & EFI_MEMORY_WC)
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
+ else if (mem_flags & EFI_MEMORY_UC)
+ screen_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ else if (mem_flags & EFI_MEMORY_WT)
+ screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem),
+ MEMREMAP_WT);
+ else if (mem_flags & EFI_MEMORY_WB)
+ screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem),
+ MEMREMAP_WB);
+ else
+ drm_err(dev, "invalid mem_flags: 0x%llx\n", mem_flags);
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+
+ /*
+ * Modesetting
+ */
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
+
+ dev->mode_config.min_width = width;
+ dev->mode_config.max_width = max_width;
+ dev->mode_config.min_height = height;
+ dev->mode_config.max_height = max_height;
+ dev->mode_config.preferred_depth = format->depth;
+ dev->mode_config.funcs = &efidrm_mode_config_funcs;
+
+ /* Primary plane */
+
+ nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
+ efi->formats, ARRAY_SIZE(efi->formats));
+
+ primary_plane = &efi->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0, &efidrm_primary_plane_funcs,
+ efi->formats, nformats,
+ efidrm_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_plane_helper_add(primary_plane, &efidrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &efi->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &efidrm_crtc_funcs, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_crtc_helper_add(crtc, &efidrm_crtc_helper_funcs);
+
+ /* Encoder */
+
+ encoder = &efi->encoder;
+ ret = drm_encoder_init(dev, encoder, &efidrm_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &efi->connector;
+ ret = drm_connector_init(dev, connector, &efidrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_connector_helper_add(connector, &efidrm_connector_helper_funcs);
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ width, height);
+ if (sysfb->edid)
+ drm_connector_attach_edid_property(connector);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
+ return efi;
+}
+
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(efidrm_fops);
+
+static struct drm_driver efidrm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &efidrm_fops,
+};
+
+/*
+ * Platform driver
+ */
+
+static int efidrm_probe(struct platform_device *pdev)
+{
+ struct efidrm_device *efi;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ int ret;
+
+ efi = efidrm_device_create(&efidrm_driver, pdev);
+ if (IS_ERR(efi))
+ return PTR_ERR(efi);
+ sysfb = &efi->sysfb;
+ dev = &sysfb->dev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return ret;
+
+ drm_client_setup(dev, sysfb->fb_format);
+
+ return 0;
+}
+
+static void efidrm_remove(struct platform_device *pdev)
+{
+ struct drm_device *dev = platform_get_drvdata(pdev);
+
+ drm_dev_unplug(dev);
+}
+
+static struct platform_driver efidrm_platform_driver = {
+ .driver = {
+ .name = "efi-framebuffer",
+ },
+ .probe = efidrm_probe,
+ .remove = efidrm_remove,
+};
+
+module_platform_driver(efidrm_platform_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/sysfb/ofdrm.c
index 13491c0e704a..fddfe8bea9f7 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/sysfb/ofdrm.c
@@ -12,6 +12,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
@@ -21,7 +22,8 @@
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
+
+#include "drm_sysfb_helper.h"
#define DRIVER_NAME "ofdrm"
#define DRIVER_DESC "DRM driver for OF platform devices"
@@ -76,20 +78,12 @@ enum ofdrm_model {
static int display_get_validated_int(struct drm_device *dev, const char *name, uint32_t value)
{
- if (value > INT_MAX) {
- drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
- return -EINVAL;
- }
- return (int)value;
+ return drm_sysfb_get_validated_int(dev, name, value, INT_MAX);
}
static int display_get_validated_int0(struct drm_device *dev, const char *name, uint32_t value)
{
- if (!value) {
- drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
- return -EINVAL;
- }
- return display_get_validated_int(dev, name, value);
+ return drm_sysfb_get_validated_int0(dev, name, value, INT_MAX);
}
static const struct drm_format_info *display_get_validated_format(struct drm_device *dev,
@@ -226,6 +220,16 @@ static u64 display_get_address_of(struct drm_device *dev, struct device_node *of
return address;
}
+static const u8 *display_get_edid_of(struct drm_device *dev, struct device_node *of_node,
+ u8 buf[EDID_LENGTH])
+{
+ int ret = of_property_read_u8_array(of_node, "EDID", buf, EDID_LENGTH);
+
+ if (ret)
+ return NULL;
+ return buf;
+}
+
static bool is_avivo(u32 vendor, u32 device)
{
/* This will match most R5xx */
@@ -290,22 +294,17 @@ struct ofdrm_device_funcs {
};
struct ofdrm_device {
- struct drm_device dev;
- struct platform_device *pdev;
+ struct drm_sysfb_device sysfb;
const struct ofdrm_device_funcs *funcs;
- /* firmware-buffer settings */
- struct iosys_map screen_base;
- struct drm_display_mode mode;
- const struct drm_format_info *format;
- unsigned int pitch;
-
/* colormap */
void __iomem *cmap_base;
+ u8 edid[EDID_LENGTH];
+
/* modesetting */
- uint32_t formats[8];
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
@@ -314,7 +313,7 @@ struct ofdrm_device {
static struct ofdrm_device *ofdrm_device_of_dev(struct drm_device *dev)
{
- return container_of(dev, struct ofdrm_device, dev);
+ return container_of(to_drm_sysfb_device(dev), struct ofdrm_device, sysfb);
}
/*
@@ -354,7 +353,7 @@ static void ofdrm_pci_release(void *data)
static int ofdrm_device_init_pci(struct ofdrm_device *odev)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct pci_dev *pcidev;
@@ -397,7 +396,7 @@ static int ofdrm_device_init_pci(struct ofdrm_device *odev)
static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev,
struct resource *fb_res)
{
- struct platform_device *pdev = to_platform_device(odev->dev.dev);
+ struct platform_device *pdev = to_platform_device(odev->sysfb.dev.dev);
struct resource *res, *max_res = NULL;
u32 i;
@@ -423,7 +422,7 @@ static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev,
static void __iomem *get_cmap_address_of(struct ofdrm_device *odev, struct device_node *of_node,
int bar_no, unsigned long offset, unsigned long size)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
const __be32 *addr_p;
u64 max_size, address;
unsigned int flags;
@@ -456,7 +455,7 @@ static void __iomem *ofdrm_mach64_cmap_ioremap(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_base)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
u64 address;
void __iomem *cmap_base;
@@ -618,7 +617,7 @@ static void __iomem *ofdrm_qemu_cmap_ioremap(struct ofdrm_device *odev,
cpu_to_be32(0x00),
};
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
u64 address;
void __iomem *cmap_base;
@@ -648,7 +647,7 @@ static void ofdrm_qemu_cmap_write(struct ofdrm_device *odev, unsigned char index
static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
const struct drm_format_info *format)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
int i;
switch (format->format) {
@@ -687,7 +686,7 @@ static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
const struct drm_format_info *format,
struct drm_color_lut *lut)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
int i;
switch (format->format) {
@@ -731,205 +730,27 @@ static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
* Modesetting
*/
-struct ofdrm_crtc_state {
- struct drm_crtc_state base;
-
- /* Primary-plane format; required for color mgmt. */
- const struct drm_format_info *format;
+static const u64 ofdrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
};
-static struct ofdrm_crtc_state *to_ofdrm_crtc_state(struct drm_crtc_state *base)
-{
- return container_of(base, struct ofdrm_crtc_state, base);
-}
-
-static void ofdrm_crtc_state_destroy(struct ofdrm_crtc_state *ofdrm_crtc_state)
-{
- __drm_atomic_helper_crtc_destroy_state(&ofdrm_crtc_state->base);
- kfree(ofdrm_crtc_state);
-}
-
-static const uint64_t ofdrm_primary_plane_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *new_state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
- struct drm_shadow_plane_state *new_shadow_plane_state =
- to_drm_shadow_plane_state(new_plane_state);
- struct drm_framebuffer *new_fb = new_plane_state->fb;
- struct drm_crtc *new_crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state = NULL;
- struct ofdrm_crtc_state *new_ofdrm_crtc_state;
- int ret;
-
- if (new_crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
- if (ret)
- return ret;
- else if (!new_plane_state->visible)
- return 0;
-
- if (new_fb->format != odev->format) {
- void *buf;
-
- /* format conversion necessary; reserve buffer */
- buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
- odev->pitch, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
-
- new_ofdrm_crtc_state = to_ofdrm_crtc_state(new_crtc_state);
- new_ofdrm_crtc_state->format = new_fb->format;
-
- return 0;
-}
-
-static void ofdrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- unsigned int dst_pitch = odev->pitch;
- const struct drm_format_info *dst_format = odev->format;
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect damage;
- int ret, idx;
-
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return;
-
- if (!drm_dev_enter(dev, &idx))
- goto out_drm_gem_fb_end_cpu_access;
-
- drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- struct iosys_map dst = odev->screen_base;
- struct drm_rect dst_clip = plane_state->dst;
-
- if (!drm_rect_intersect(&dst_clip, &damage))
- continue;
-
- iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
- drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
- &damage, &shadow_plane_state->fmtcnv_state);
- }
-
- drm_dev_exit(idx);
-out_drm_gem_fb_end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-}
-
-static void ofdrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct iosys_map dst = odev->screen_base;
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */
- unsigned int dst_pitch = odev->pitch;
- const struct drm_format_info *dst_format = odev->format;
- struct drm_rect dst_clip;
- unsigned long lines, linepixels, i;
- int idx;
-
- drm_rect_init(&dst_clip,
- plane_state->src_x >> 16, plane_state->src_y >> 16,
- plane_state->src_w >> 16, plane_state->src_h >> 16);
-
- lines = drm_rect_height(&dst_clip);
- linepixels = drm_rect_width(&dst_clip);
-
- if (!drm_dev_enter(dev, &idx))
- return;
-
- /* Clear buffer to black if disabled */
- dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip);
- for (i = 0; i < lines; ++i) {
- memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]);
- dst_vmap += dst_pitch;
- }
-
- drm_dev_exit(idx);
-}
-
static const struct drm_plane_helper_funcs ofdrm_primary_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = ofdrm_primary_plane_helper_atomic_check,
- .atomic_update = ofdrm_primary_plane_helper_atomic_update,
- .atomic_disable = ofdrm_primary_plane_helper_atomic_disable,
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
};
static const struct drm_plane_funcs ofdrm_primary_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_SYSFB_PLANE_FUNCS,
.destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static enum drm_mode_status ofdrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
-
- return drm_crtc_helper_mode_valid_fixed(crtc, mode, &odev->mode);
-}
-
-static int ofdrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
- struct drm_atomic_state *new_state)
-{
- static const size_t gamma_lut_length = OFDRM_GAMMA_LUT_SIZE * sizeof(struct drm_color_lut);
-
- struct drm_device *dev = crtc->dev;
- struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
- int ret;
-
- if (!new_crtc_state->enable)
- return 0;
-
- ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
- if (ret)
- return ret;
-
- if (new_crtc_state->color_mgmt_changed) {
- struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut;
-
- if (gamma_lut && (gamma_lut->length != gamma_lut_length)) {
- drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
- struct ofdrm_crtc_state *ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
+ struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
- const struct drm_format_info *format = ofdrm_crtc_state->format;
+ const struct drm_format_info *format = sysfb_crtc_state->format;
if (crtc_state->gamma_lut)
ofdrm_device_set_gamma(odev, format, crtc_state->gamma_lut->data);
@@ -938,91 +759,31 @@ static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_ato
}
}
-/*
- * The CRTC is always enabled. Screen updates are performed by
- * the primary plane's atomic_update function. Disabling clears
- * the screen in the primary plane's atomic_disable function.
- */
static const struct drm_crtc_helper_funcs ofdrm_crtc_helper_funcs = {
- .mode_valid = ofdrm_crtc_helper_mode_valid,
- .atomic_check = ofdrm_crtc_helper_atomic_check,
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
.atomic_flush = ofdrm_crtc_helper_atomic_flush,
};
-static void ofdrm_crtc_reset(struct drm_crtc *crtc)
-{
- struct ofdrm_crtc_state *ofdrm_crtc_state =
- kzalloc(sizeof(*ofdrm_crtc_state), GFP_KERNEL);
-
- if (crtc->state)
- ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc->state));
-
- if (ofdrm_crtc_state)
- __drm_atomic_helper_crtc_reset(crtc, &ofdrm_crtc_state->base);
- else
- __drm_atomic_helper_crtc_reset(crtc, NULL);
-}
-
-static struct drm_crtc_state *ofdrm_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_crtc_state *crtc_state = crtc->state;
- struct ofdrm_crtc_state *new_ofdrm_crtc_state;
- struct ofdrm_crtc_state *ofdrm_crtc_state;
-
- if (drm_WARN_ON(dev, !crtc_state))
- return NULL;
-
- new_ofdrm_crtc_state = kzalloc(sizeof(*new_ofdrm_crtc_state), GFP_KERNEL);
- if (!new_ofdrm_crtc_state)
- return NULL;
-
- ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
-
- __drm_atomic_helper_crtc_duplicate_state(crtc, &new_ofdrm_crtc_state->base);
- new_ofdrm_crtc_state->format = ofdrm_crtc_state->format;
-
- return &new_ofdrm_crtc_state->base;
-}
-
-static void ofdrm_crtc_atomic_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state)
-{
- ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc_state));
-}
-
static const struct drm_crtc_funcs ofdrm_crtc_funcs = {
- .reset = ofdrm_crtc_reset,
+ DRM_SYSFB_CRTC_FUNCS,
.destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = ofdrm_crtc_atomic_duplicate_state,
- .atomic_destroy_state = ofdrm_crtc_atomic_destroy_state,
};
-static int ofdrm_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct ofdrm_device *odev = ofdrm_device_of_dev(connector->dev);
-
- return drm_connector_helper_get_modes_fixed(connector, &odev->mode);
-}
+static const struct drm_encoder_funcs ofdrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
static const struct drm_connector_helper_funcs ofdrm_connector_helper_funcs = {
- .get_modes = ofdrm_connector_helper_get_modes,
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
};
static const struct drm_connector_funcs ofdrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ DRM_SYSFB_CONNECTOR_FUNCS,
.destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs ofdrm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
};
/*
@@ -1072,32 +833,19 @@ static const struct ofdrm_device_funcs ofdrm_qemu_device_funcs = {
.cmap_write = ofdrm_qemu_cmap_write,
};
-static struct drm_display_mode ofdrm_mode(unsigned int width, unsigned int height)
-{
- /*
- * Assume a monitor resolution of 96 dpi to
- * get a somewhat reasonable screen size.
- */
- const struct drm_display_mode mode = {
- DRM_MODE_INIT(60, width, height,
- DRM_MODE_RES_MM(width, 96ul),
- DRM_MODE_RES_MM(height, 96ul))
- };
-
- return mode;
-}
-
static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
struct platform_device *pdev)
{
struct device_node *of_node = pdev->dev.of_node;
struct ofdrm_device *odev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
enum ofdrm_model model;
bool big_endian;
int width, height, depth, linebytes;
const struct drm_format_info *format;
u64 address;
+ const u8 *edid;
resource_size_t fb_size, fb_base, fb_pgbase, fb_pgsize;
struct resource *res, *mem;
void __iomem *screen_base;
@@ -1109,10 +857,11 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
size_t nformats;
int ret;
- odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, dev);
+ odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, sysfb.dev);
if (IS_ERR(odev))
return ERR_CAST(odev);
- dev = &odev->dev;
+ sysfb = &odev->sysfb;
+ dev = &sysfb->dev;
platform_set_drvdata(pdev, dev);
ret = ofdrm_device_init_pci(odev);
@@ -1246,16 +995,22 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
}
}
+ /* EDID is optional */
+ edid = display_get_edid_of(dev, of_node, odev->edid);
+
/*
* Firmware framebuffer
*/
- iosys_map_set_vaddr_iomem(&odev->screen_base, screen_base);
- odev->mode = ofdrm_mode(width, height);
- odev->format = format;
- odev->pitch = linebytes;
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = linebytes;
+ if (odev->cmap_base)
+ sysfb->fb_gamma_lut_size = OFDRM_GAMMA_LUT_SIZE;
+ sysfb->edid = edid;
- drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&odev->mode));
+ drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sysfb->fb_mode));
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, linebytes=%d byte\n",
&format->format, width, height, linebytes);
@@ -1302,15 +1057,16 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
return ERR_PTR(ret);
drm_crtc_helper_add(crtc, &ofdrm_crtc_helper_funcs);
- if (odev->cmap_base) {
- drm_mode_crtc_set_gamma_size(crtc, OFDRM_GAMMA_LUT_SIZE);
- drm_crtc_enable_color_mgmt(crtc, 0, false, OFDRM_GAMMA_LUT_SIZE);
+ if (sysfb->fb_gamma_lut_size) {
+ ret = drm_mode_crtc_set_gamma_size(crtc, sysfb->fb_gamma_lut_size);
+ if (!ret)
+ drm_crtc_enable_color_mgmt(crtc, 0, false, sysfb->fb_gamma_lut_size);
}
/* Encoder */
encoder = &odev->encoder;
- ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
+ ret = drm_encoder_init(dev, encoder, &ofdrm_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL);
if (ret)
return ERR_PTR(ret);
encoder->possible_crtcs = drm_crtc_mask(crtc);
@@ -1326,6 +1082,8 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
drm_connector_set_panel_orientation_with_quirk(connector,
DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
width, height);
+ if (edid)
+ drm_connector_attach_edid_property(connector);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
@@ -1360,19 +1118,21 @@ static struct drm_driver ofdrm_driver = {
static int ofdrm_probe(struct platform_device *pdev)
{
struct ofdrm_device *odev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int ret;
odev = ofdrm_device_create(&ofdrm_driver, pdev);
if (IS_ERR(odev))
return PTR_ERR(odev);
- dev = &odev->dev;
+ sysfb = &odev->sysfb;
+ dev = &sysfb->dev;
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
- drm_client_setup(dev, odev->format);
+ drm_client_setup(dev, sysfb->fb_format);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c
index 5d9ab8adf800..a1c3119330de 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/sysfb/simpledrm.c
@@ -14,7 +14,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -26,9 +25,10 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_panic.h>
#include <drm/drm_probe_helper.h>
+#include "drm_sysfb_helper.h"
+
#define DRIVER_NAME "simpledrm"
#define DRIVER_DESC "DRM driver for simple-framebuffer platform devices"
#define DRIVER_MAJOR 1
@@ -42,24 +42,14 @@ static int
simplefb_get_validated_int(struct drm_device *dev, const char *name,
uint32_t value)
{
- if (value > INT_MAX) {
- drm_err(dev, "simplefb: invalid framebuffer %s of %u\n",
- name, value);
- return -EINVAL;
- }
- return (int)value;
+ return drm_sysfb_get_validated_int(dev, name, value, INT_MAX);
}
static int
simplefb_get_validated_int0(struct drm_device *dev, const char *name,
uint32_t value)
{
- if (!value) {
- drm_err(dev, "simplefb: invalid framebuffer %s of %u\n",
- name, value);
- return -EINVAL;
- }
- return simplefb_get_validated_int(dev, name, value);
+ return drm_sysfb_get_validated_int0(dev, name, value, INT_MAX);
}
static const struct drm_format_info *
@@ -217,7 +207,7 @@ simplefb_get_memory_of(struct drm_device *dev, struct device_node *of_node)
*/
struct simpledrm_device {
- struct drm_device dev;
+ struct drm_sysfb_device sysfb;
/* clocks */
#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
@@ -236,28 +226,14 @@ struct simpledrm_device {
struct device_link **pwr_dom_links;
#endif
- /* simplefb settings */
- struct drm_display_mode mode;
- const struct drm_format_info *format;
- unsigned int pitch;
-
- /* memory management */
- struct iosys_map screen_base;
-
/* modesetting */
- uint32_t formats[8];
- size_t nformats;
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
};
-static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
-{
- return container_of(dev, struct simpledrm_device, dev);
-}
-
/*
* Hardware
*/
@@ -284,7 +260,7 @@ static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
static void simpledrm_device_release_clocks(void *res)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
+ struct simpledrm_device *sdev = res;
unsigned int i;
for (i = 0; i < sdev->clk_count; ++i) {
@@ -297,7 +273,7 @@ static void simpledrm_device_release_clocks(void *res)
static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
{
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct clk *clock;
@@ -382,7 +358,7 @@ static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
static void simpledrm_device_release_regulators(void *res)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
+ struct simpledrm_device *sdev = res;
unsigned int i;
for (i = 0; i < sdev->regulator_count; ++i) {
@@ -395,7 +371,7 @@ static void simpledrm_device_release_regulators(void *res)
static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
{
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct property *prop;
@@ -516,7 +492,7 @@ static void simpledrm_device_detach_genpd(void *res)
static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
{
- struct device *dev = sdev->dev.dev;
+ struct device *dev = sdev->sysfb.dev.dev;
int i;
sdev->pwr_dom_count = of_count_phandle_with_args(dev->of_node, "power-domains",
@@ -548,7 +524,7 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
simpledrm_device_detach_genpd(sdev);
return ret;
}
- drm_warn(&sdev->dev,
+ drm_warn(&sdev->sysfb.dev,
"pm_domain_attach_by_id(%u) failed: %d\n", i, ret);
continue;
}
@@ -559,7 +535,7 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!sdev->pwr_dom_links[i])
- drm_warn(&sdev->dev, "failed to link power-domain %d\n", i);
+ drm_warn(&sdev->sysfb.dev, "failed to link power-domain %d\n", i);
}
return devm_add_action_or_reset(dev, simpledrm_device_detach_genpd, sdev);
@@ -575,210 +551,56 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
* Modesetting
*/
-static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
+static const u64 simpledrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
};
-static int simpledrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_shadow_plane_state *new_shadow_plane_state =
- to_drm_shadow_plane_state(new_plane_state);
- struct drm_framebuffer *new_fb = new_plane_state->fb;
- struct drm_crtc *new_crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state = NULL;
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- int ret;
-
- if (new_crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
- if (ret)
- return ret;
- else if (!new_plane_state->visible)
- return 0;
-
- if (new_fb->format != sdev->format) {
- void *buf;
-
- /* format conversion necessary; reserve buffer */
- buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
- sdev->pitch, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect damage;
- int ret, idx;
-
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return;
-
- if (!drm_dev_enter(dev, &idx))
- goto out_drm_gem_fb_end_cpu_access;
-
- drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- struct drm_rect dst_clip = plane_state->dst;
- struct iosys_map dst = sdev->screen_base;
-
- if (!drm_rect_intersect(&dst_clip, &damage))
- continue;
-
- iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip));
- drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data,
- fb, &damage, &shadow_plane_state->fmtcnv_state);
- }
-
- drm_dev_exit(idx);
-out_drm_gem_fb_end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-}
-
-static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- int idx;
-
- if (!drm_dev_enter(dev, &idx))
- return;
-
- /* Clear screen to black if disabled */
- iosys_map_memset(&sdev->screen_base, 0, 0, sdev->pitch * sdev->mode.vdisplay);
-
- drm_dev_exit(idx);
-}
-
-static int simpledrm_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
- struct drm_scanout_buffer *sb)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(plane->dev);
-
- sb->width = sdev->mode.hdisplay;
- sb->height = sdev->mode.vdisplay;
- sb->format = sdev->format;
- sb->pitch[0] = sdev->pitch;
- sb->map[0] = sdev->screen_base;
-
- return 0;
-}
-
static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = simpledrm_primary_plane_helper_atomic_check,
- .atomic_update = simpledrm_primary_plane_helper_atomic_update,
- .atomic_disable = simpledrm_primary_plane_helper_atomic_disable,
- .get_scanout_buffer = simpledrm_primary_plane_helper_get_scanout_buffer,
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
};
static const struct drm_plane_funcs simpledrm_primary_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_SYSFB_PLANE_FUNCS,
.destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static enum drm_mode_status simpledrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(crtc->dev);
-
- return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sdev->mode);
-}
-
-/*
- * The CRTC is always enabled. Screen updates are performed by
- * the primary plane's atomic_update function. Disabling clears
- * the screen in the primary plane's atomic_disable function.
- */
static const struct drm_crtc_helper_funcs simpledrm_crtc_helper_funcs = {
- .mode_valid = simpledrm_crtc_helper_mode_valid,
- .atomic_check = drm_crtc_helper_atomic_check,
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
};
static const struct drm_crtc_funcs simpledrm_crtc_funcs = {
- .reset = drm_atomic_helper_crtc_reset,
+ DRM_SYSFB_CRTC_FUNCS,
.destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_encoder_funcs simpledrm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static int simpledrm_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(connector->dev);
-
- return drm_connector_helper_get_modes_fixed(connector, &sdev->mode);
-}
-
static const struct drm_connector_helper_funcs simpledrm_connector_helper_funcs = {
- .get_modes = simpledrm_connector_helper_get_modes,
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
};
static const struct drm_connector_funcs simpledrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ DRM_SYSFB_CONNECTOR_FUNCS,
.destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
};
/*
* Init / Cleanup
*/
-static struct drm_display_mode simpledrm_mode(unsigned int width,
- unsigned int height,
- unsigned int width_mm,
- unsigned int height_mm)
-{
- const struct drm_display_mode mode = {
- DRM_MODE_INIT(60, width, height, width_mm, height_mm)
- };
-
- return mode;
-}
-
static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
struct platform_device *pdev)
{
const struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct simpledrm_device *sdev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int width, height, stride;
int width_mm = 0, height_mm = 0;
@@ -793,10 +615,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
size_t nformats;
int ret;
- sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, dev);
+ sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, sysfb.dev);
if (IS_ERR(sdev))
return ERR_CAST(sdev);
- dev = &sdev->dev;
+ sysfb = &sdev->sysfb;
+ dev = &sysfb->dev;
platform_set_drvdata(pdev, sdev);
/*
@@ -858,20 +681,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
return ERR_PTR(-EINVAL);
}
- /*
- * Assume a monitor resolution of 96 dpi if physical dimensions
- * are not specified to get a somewhat reasonable screen size.
- */
- if (!width_mm)
- width_mm = DRM_MODE_RES_MM(width, 96ul);
- if (!height_mm)
- height_mm = DRM_MODE_RES_MM(height, 96ul);
-
- sdev->mode = simpledrm_mode(width, height, width_mm, height_mm);
- sdev->format = format;
- sdev->pitch = stride;
+ sysfb->fb_mode = drm_sysfb_mode(width, height, width_mm, height_mm);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
- drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sdev->mode));
+ drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sysfb->fb_mode));
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d byte\n",
&format->format, width, height, stride);
@@ -895,7 +709,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
if (IS_ERR(screen_base))
return screen_base;
- iosys_map_set_vaddr(&sdev->screen_base, screen_base);
+ iosys_map_set_vaddr(&sysfb->fb_addr, screen_base);
} else {
void __iomem *screen_base;
@@ -928,7 +742,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
if (!screen_base)
return ERR_PTR(-ENOMEM);
- iosys_map_set_vaddr_iomem(&sdev->screen_base, screen_base);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
}
/*
@@ -1027,19 +841,21 @@ static struct drm_driver simpledrm_driver = {
static int simpledrm_probe(struct platform_device *pdev)
{
struct simpledrm_device *sdev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int ret;
sdev = simpledrm_device_create(&simpledrm_driver, pdev);
if (IS_ERR(sdev))
return PTR_ERR(sdev);
- dev = &sdev->dev;
+ sysfb = &sdev->sysfb;
+ dev = &sysfb->dev;
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
- drm_client_setup(dev, sdev->format);
+ drm_client_setup(dev, sdev->sysfb.fb_format);
return 0;
}
@@ -1047,7 +863,7 @@ static int simpledrm_probe(struct platform_device *pdev)
static void simpledrm_remove(struct platform_device *pdev)
{
struct simpledrm_device *sdev = platform_get_drvdata(pdev);
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
drm_dev_unplug(dev);
}
diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c
new file mode 100644
index 000000000000..4d62c78e7d1e
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/vesadrm.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/aperture.h>
+#include <linux/ioport.h>
+#include <linux/limits.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/edid.h>
+#include <video/pixel_format.h>
+#include <video/vga.h>
+
+#include "drm_sysfb_helper.h"
+
+#define DRIVER_NAME "vesadrm"
+#define DRIVER_DESC "DRM driver for VESA platform devices"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define VESADRM_GAMMA_LUT_SIZE 256
+
+static const struct drm_format_info *vesadrm_get_format_si(struct drm_device *dev,
+ const struct screen_info *si)
+{
+ static const struct drm_sysfb_format formats[] = {
+ { PIXEL_FORMAT_XRGB1555, DRM_FORMAT_XRGB1555, },
+ { PIXEL_FORMAT_RGB565, DRM_FORMAT_RGB565, },
+ { PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, },
+ { PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, },
+ { PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, },
+ };
+
+ return drm_sysfb_get_format_si(dev, formats, ARRAY_SIZE(formats), si);
+}
+
+/*
+ * VESA device
+ */
+
+struct vesadrm_device {
+ struct drm_sysfb_device sysfb;
+
+#if defined(CONFIG_X86_32)
+ /* VESA Protected Mode interface */
+ struct {
+ const u8 *PrimaryPalette;
+ } pmi;
+#endif
+
+ void (*cmap_write)(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue);
+
+ /* modesetting */
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static struct vesadrm_device *to_vesadrm_device(struct drm_device *dev)
+{
+ return container_of(to_drm_sysfb_device(dev), struct vesadrm_device, sysfb);
+}
+
+/*
+ * Palette
+ */
+
+static void vesadrm_vga_cmap_write(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ u8 i8, r8, g8, b8;
+
+ if (index > 255)
+ return;
+
+ i8 = index;
+ r8 = red >> 8;
+ g8 = green >> 8;
+ b8 = blue >> 8;
+
+ outb_p(i8, VGA_PEL_IW);
+ outb_p(r8, VGA_PEL_D);
+ outb_p(g8, VGA_PEL_D);
+ outb_p(b8, VGA_PEL_D);
+}
+
+#if defined(CONFIG_X86_32)
+static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ u32 i32 = index;
+ struct {
+ u8 b8;
+ u8 g8;
+ u8 r8;
+ u8 x8;
+ } PaletteEntry = {
+ blue >> 8,
+ green >> 8,
+ red >> 8,
+ 0x00,
+ };
+
+ if (index > 255)
+ return;
+
+ __asm__ __volatile__ (
+ "call *(%%esi)"
+ : /* no return value */
+ : "a" (0x4f09),
+ "b" (0),
+ "c" (1),
+ "d" (i32),
+ "D" (&PaletteEntry),
+ "S" (&vesa->pmi.PrimaryPalette));
+}
+#endif
+
+static void vesadrm_set_gamma_linear(struct vesadrm_device *vesa,
+ const struct drm_format_info *format)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ size_t i;
+ u16 r16, g16, b16;
+
+ switch (format->format) {
+ case DRM_FORMAT_XRGB1555:
+ for (i = 0; i < 32; ++i) {
+ r16 = i * 8 + i / 4;
+ r16 |= (r16 << 8) | r16;
+ vesa->cmap_write(vesa, i, r16, r16, r16);
+ }
+ break;
+ case DRM_FORMAT_RGB565:
+ for (i = 0; i < 32; ++i) {
+ r16 = i * 8 + i / 4;
+ r16 |= (r16 << 8) | r16;
+ g16 = i * 4 + i / 16;
+ g16 |= (g16 << 8) | g16;
+ b16 = r16;
+ vesa->cmap_write(vesa, i, r16, g16, b16);
+ }
+ for (i = 32; i < 64; ++i) {
+ g16 = i * 4 + i / 16;
+ g16 |= (g16 << 8) | g16;
+ vesa->cmap_write(vesa, i, 0, g16, 0);
+ }
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ for (i = 0; i < 256; ++i) {
+ r16 = (i << 8) | i;
+ vesa->cmap_write(vesa, i, r16, r16, r16);
+ }
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+static void vesadrm_set_gamma_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ size_t i;
+ u16 r16, g16, b16;
+
+ switch (format->format) {
+ case DRM_FORMAT_XRGB1555:
+ for (i = 0; i < 32; ++i) {
+ r16 = lut[i * 8 + i / 4].red;
+ g16 = lut[i * 8 + i / 4].green;
+ b16 = lut[i * 8 + i / 4].blue;
+ vesa->cmap_write(vesa, i, r16, g16, b16);
+ }
+ break;
+ case DRM_FORMAT_RGB565:
+ for (i = 0; i < 32; ++i) {
+ r16 = lut[i * 8 + i / 4].red;
+ g16 = lut[i * 4 + i / 16].green;
+ b16 = lut[i * 8 + i / 4].blue;
+ vesa->cmap_write(vesa, i, r16, g16, b16);
+ }
+ for (i = 32; i < 64; ++i)
+ vesa->cmap_write(vesa, i, 0, lut[i * 4 + i / 16].green, 0);
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ for (i = 0; i < 256; ++i)
+ vesa->cmap_write(vesa, i, lut[i].red, lut[i].green, lut[i].blue);
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+/*
+ * Modesetting
+ */
+
+static const u64 vesadrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
+};
+
+static const struct drm_plane_helper_funcs vesadrm_primary_plane_helper_funcs = {
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs vesadrm_primary_plane_funcs = {
+ DRM_SYSFB_PLANE_FUNCS,
+ .destroy = drm_plane_cleanup,
+};
+
+static void vesadrm_crtc_helper_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct vesadrm_device *vesa = to_vesadrm_device(dev);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+
+ /*
+ * The gamma LUT has to be reloaded after changing the primary
+ * plane's color format.
+ */
+ if (crtc_state->enable && crtc_state->color_mgmt_changed) {
+ if (sysfb_crtc_state->format == sysfb->fb_format) {
+ if (crtc_state->gamma_lut)
+ vesadrm_set_gamma_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
+ else
+ vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ } else {
+ vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ }
+ }
+}
+
+static const struct drm_crtc_helper_funcs vesadrm_crtc_helper_funcs = {
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
+ .atomic_flush = vesadrm_crtc_helper_atomic_flush,
+};
+
+static const struct drm_crtc_funcs vesadrm_crtc_funcs = {
+ DRM_SYSFB_CRTC_FUNCS,
+ .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_encoder_funcs vesadrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_connector_helper_funcs vesadrm_connector_helper_funcs = {
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs vesadrm_connector_funcs = {
+ DRM_SYSFB_CONNECTOR_FUNCS,
+ .destroy = drm_connector_cleanup,
+};
+
+static const struct drm_mode_config_funcs vesadrm_mode_config_funcs = {
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
+};
+
+/*
+ * Init / Cleanup
+ */
+
+static struct vesadrm_device *vesadrm_device_create(struct drm_driver *drv,
+ struct platform_device *pdev)
+{
+ const struct screen_info *si;
+ const struct drm_format_info *format;
+ int width, height, stride;
+ u64 vsize;
+ struct resource resbuf;
+ struct resource *res;
+ struct vesadrm_device *vesa;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ struct resource *mem = NULL;
+ void __iomem *screen_base;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned long max_width, max_height;
+ size_t nformats;
+ int ret;
+
+ si = dev_get_platdata(&pdev->dev);
+ if (!si)
+ return ERR_PTR(-ENODEV);
+ if (screen_info_video_type(si) != VIDEO_TYPE_VLFB)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * VESA DRM driver
+ */
+
+ vesa = devm_drm_dev_alloc(&pdev->dev, drv, struct vesadrm_device, sysfb.dev);
+ if (IS_ERR(vesa))
+ return ERR_CAST(vesa);
+ sysfb = &vesa->sysfb;
+ dev = &sysfb->dev;
+ platform_set_drvdata(pdev, dev);
+
+ /*
+ * Hardware settings
+ */
+
+ format = vesadrm_get_format_si(dev, si);
+ if (!format)
+ return ERR_PTR(-EINVAL);
+ width = drm_sysfb_get_width_si(dev, si);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = drm_sysfb_get_height_si(dev, si);
+ if (height < 0)
+ return ERR_PTR(height);
+ res = drm_sysfb_get_memory_si(dev, si, &resbuf);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+ stride = drm_sysfb_get_stride_si(dev, si, format, width, height, resource_size(res));
+ if (stride < 0)
+ return ERR_PTR(stride);
+ vsize = drm_sysfb_get_visible_size_si(dev, si, height, stride, resource_size(res));
+ if (!vsize)
+ return ERR_PTR(-EINVAL);
+
+ drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n",
+ &format->format, width, height, stride);
+
+ if (!__screen_info_vbe_mode_nonvga(si)) {
+ vesa->cmap_write = vesadrm_vga_cmap_write;
+#if defined(CONFIG_X86_32)
+ } else {
+ phys_addr_t pmi_base = __screen_info_vesapm_info_base(si);
+ const u16 *pmi_addr = phys_to_virt(pmi_base);
+
+ vesa->pmi.PrimaryPalette = (u8 *)pmi_addr + pmi_addr[2];
+ vesa->cmap_write = vesadrm_pmi_cmap_write;
+#endif
+ }
+
+#ifdef CONFIG_X86
+ if (drm_edid_header_is_valid(edid_info.dummy) == 8)
+ sysfb->edid = edid_info.dummy;
+#endif
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
+ if (vesa->cmap_write)
+ sysfb->fb_gamma_lut_size = VESADRM_GAMMA_LUT_SIZE;
+
+ /*
+ * Memory management
+ */
+
+ ret = devm_aperture_acquire_for_platform_device(pdev, res->start, vsize);
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res);
+
+ mem = devm_request_mem_region(&pdev->dev, res->start, vsize, drv->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
+
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+
+ /*
+ * Modesetting
+ */
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
+
+ dev->mode_config.min_width = width;
+ dev->mode_config.max_width = max_width;
+ dev->mode_config.min_height = height;
+ dev->mode_config.max_height = max_height;
+ dev->mode_config.preferred_depth = format->depth;
+ dev->mode_config.funcs = &vesadrm_mode_config_funcs;
+
+ /* Primary plane */
+
+ nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
+ vesa->formats, ARRAY_SIZE(vesa->formats));
+
+ primary_plane = &vesa->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0, &vesadrm_primary_plane_funcs,
+ vesa->formats, nformats,
+ vesadrm_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_plane_helper_add(primary_plane, &vesadrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &vesa->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &vesadrm_crtc_funcs, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_crtc_helper_add(crtc, &vesadrm_crtc_helper_funcs);
+
+ if (sysfb->fb_gamma_lut_size) {
+ ret = drm_mode_crtc_set_gamma_size(crtc, sysfb->fb_gamma_lut_size);
+ if (!ret)
+ drm_crtc_enable_color_mgmt(crtc, 0, false, sysfb->fb_gamma_lut_size);
+ }
+
+ /* Encoder */
+
+ encoder = &vesa->encoder;
+ ret = drm_encoder_init(dev, encoder, &vesadrm_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &vesa->connector;
+ ret = drm_connector_init(dev, connector, &vesadrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_connector_helper_add(connector, &vesadrm_connector_helper_funcs);
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ width, height);
+ if (sysfb->edid)
+ drm_connector_attach_edid_property(connector);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
+ return vesa;
+}
+
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(vesadrm_fops);
+
+static struct drm_driver vesadrm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &vesadrm_fops,
+};
+
+/*
+ * Platform driver
+ */
+
+static int vesadrm_probe(struct platform_device *pdev)
+{
+ struct vesadrm_device *vesa;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ int ret;
+
+ vesa = vesadrm_device_create(&vesadrm_driver, pdev);
+ if (IS_ERR(vesa))
+ return PTR_ERR(vesa);
+ sysfb = &vesa->sysfb;
+ dev = &sysfb->dev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return ret;
+
+ drm_client_setup(dev, sysfb->fb_format);
+
+ return 0;
+}
+
+static void vesadrm_remove(struct platform_device *pdev)
+{
+ struct drm_device *dev = platform_get_drvdata(pdev);
+
+ drm_dev_unplug(dev);
+}
+
+static struct platform_driver vesadrm_platform_driver = {
+ .driver = {
+ .name = "vesa-framebuffer",
+ },
+ .probe = vesadrm_probe,
+ .remove = vesadrm_remove,
+};
+
+module_platform_driver(vesadrm_platform_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 798507a8ae56..59d5c1ba145a 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1321,10 +1321,16 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
if (wgrp->dc == dc->pipe) {
for (j = 0; j < wgrp->num_windows; j++) {
unsigned int index = wgrp->windows[j];
+ enum drm_plane_type type;
+
+ if (primary)
+ type = DRM_PLANE_TYPE_OVERLAY;
+ else
+ type = DRM_PLANE_TYPE_PRIMARY;
plane = tegra_shared_plane_create(drm, dc,
wgrp->index,
- index);
+ index, type);
if (IS_ERR(plane))
return plane;
@@ -1332,10 +1338,8 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
* Choose the first shared plane owned by this
* head as the primary plane.
*/
- if (!primary) {
- plane->type = DRM_PLANE_TYPE_PRIMARY;
+ if (!primary)
primary = plane;
- }
}
}
}
@@ -1389,7 +1393,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
tegra_crtc_atomic_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ if (state)
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
static struct drm_crtc_state *
diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
index 08fbd8f151a1..990e744b0923 100644
--- a/drivers/gpu/drm/tegra/dp.c
+++ b/drivers/gpu/drm/tegra/dp.c
@@ -256,73 +256,6 @@ int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
}
/**
- * drm_dp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-/**
- * drm_dp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-/**
* drm_dp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h
index cb12ed0c54e7..695060cafac0 100644
--- a/drivers/gpu/drm/tegra/dp.h
+++ b/drivers/gpu/drm/tegra/dp.h
@@ -164,8 +164,6 @@ int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate);
void drm_dp_link_update_rates(struct drm_dp_link *link);
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_choose(struct drm_dp_link *link,
const struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 2cd8dcb959c0..e5297ac5c0fc 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -501,14 +501,9 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
dpaux->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
if (IS_ERR(dpaux->vdd)) {
- if (PTR_ERR(dpaux->vdd) != -ENODEV) {
- if (PTR_ERR(dpaux->vdd) != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get VDD supply: %ld\n",
- PTR_ERR(dpaux->vdd));
-
- return PTR_ERR(dpaux->vdd);
- }
+ if (PTR_ERR(dpaux->vdd) != -ENODEV)
+ return dev_err_probe(&pdev->dev, PTR_ERR(dpaux->vdd),
+ "failed to get VDD supply\n");
dpaux->vdd = NULL;
}
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 9bb077558167..b5089b772267 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1564,7 +1564,6 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
static int tegra_dsi_probe(struct platform_device *pdev)
{
struct tegra_dsi *dsi;
- struct resource *regs;
int err;
dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
@@ -1636,8 +1635,7 @@ static int tegra_dsi_probe(struct platform_device *pdev)
goto remove;
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ dsi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->regs)) {
err = PTR_ERR(dsi->regs);
goto remove;
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index c0d85463eb1a..17f616bbcb45 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -30,6 +30,14 @@ int falcon_wait_idle(struct falcon *falcon)
(value == 0), 10, 100000);
}
+static int falcon_dma_wait_not_full(struct falcon *falcon)
+{
+ u32 value;
+
+ return readl_poll_timeout(falcon->regs + FALCON_DMATRFCMD, value,
+ !(value & FALCON_DMATRFCMD_FULL), 10, 100000);
+}
+
static int falcon_dma_wait_idle(struct falcon *falcon)
{
u32 value;
@@ -44,6 +52,7 @@ static int falcon_copy_chunk(struct falcon *falcon,
enum falcon_memory target)
{
u32 cmd = FALCON_DMATRFCMD_SIZE_256B;
+ int err;
if (target == FALCON_MEMORY_IMEM)
cmd |= FALCON_DMATRFCMD_IMEM;
@@ -56,11 +65,15 @@ static int falcon_copy_chunk(struct falcon *falcon,
*/
cmd |= FALCON_DMATRFCMD_DMACTX(1);
+ err = falcon_dma_wait_not_full(falcon);
+ if (err < 0)
+ return err;
+
falcon_writel(falcon, offset, FALCON_DMATRFMOFFS);
falcon_writel(falcon, base, FALCON_DMATRFFBOFFS);
falcon_writel(falcon, cmd, FALCON_DMATRFCMD);
- return falcon_dma_wait_idle(falcon);
+ return 0;
}
static void falcon_copy_firmware_image(struct falcon *falcon,
@@ -191,6 +204,11 @@ int falcon_boot(struct falcon *falcon)
falcon_copy_chunk(falcon, falcon->firmware.code.offset + offset,
offset, FALCON_MEMORY_IMEM);
+ /* wait for DMA to complete */
+ err = falcon_dma_wait_idle(falcon);
+ if (err < 0)
+ return err;
+
/* setup falcon interrupts */
falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) |
FALCON_IRQMSET_SWGEN1 |
diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h
index 1955cf11a8a6..902bb7e4fd0f 100644
--- a/drivers/gpu/drm/tegra/falcon.h
+++ b/drivers/gpu/drm/tegra/falcon.h
@@ -47,6 +47,7 @@
#define FALCON_DMATRFMOFFS 0x00001114
#define FALCON_DMATRFCMD 0x00001118
+#define FALCON_DMATRFCMD_FULL (1 << 0)
#define FALCON_DMATRFCMD_IDLE (1 << 1)
#define FALCON_DMATRFCMD_IMEM (1 << 4)
#define FALCON_DMATRFCMD_SIZE_256B (6 << 8)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index ace3e5a805cf..dbc1394f96b8 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -17,7 +17,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
-#include <drm/tegra_drm.h>
#include "drm.h"
#include "gem.h"
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index fa6140fc37fb..8f779f23dc09 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -755,9 +755,9 @@ static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int wgrp,
- unsigned int index)
+ unsigned int index,
+ enum drm_plane_type type)
{
- enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub *hub = tegra->hub;
struct tegra_shared_plane *plane;
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 23c4b2115ed1..a66f18c4facc 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -80,7 +80,8 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub);
struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int wgrp,
- unsigned int index);
+ unsigned int index,
+ enum drm_plane_type type);
int tegra_display_hub_atomic_check(struct drm_device *drm,
struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 1e8ec50b759e..ff5a749710db 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -200,6 +200,11 @@ static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
.atomic_check = tegra_rgb_encoder_atomic_check,
};
+static void tegra_dc_of_node_put(void *data)
+{
+ of_node_put(data);
+}
+
int tegra_dc_rgb_probe(struct tegra_dc *dc)
{
struct device_node *np;
@@ -207,7 +212,14 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
int err;
np = of_get_child_by_name(dc->dev->of_node, "rgb");
- if (!np || !of_device_is_available(np))
+ if (!np)
+ return -ENODEV;
+
+ err = devm_add_action_or_reset(dc->dev, tegra_dc_of_node_put, np);
+ if (err < 0)
+ return err;
+
+ if (!of_device_is_available(np))
return -ENODEV;
rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index f98f70eda906..21f3dfdcc5c9 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2666,7 +2666,7 @@ static void tegra_sor_dp_disable(struct drm_encoder *encoder)
* the AUX transactions would just be timing out.
*/
if (output->connector.status != connector_status_disconnected) {
- err = drm_dp_link_power_down(sor->aux, &sor->link);
+ err = drm_dp_link_power_down(sor->aux, sor->link.revision);
if (err < 0)
dev_err(sor->dev, "failed to power down link: %d\n",
err);
@@ -2882,7 +2882,7 @@ static void tegra_sor_dp_enable(struct drm_encoder *encoder)
else
dev_dbg(sor->dev, "link training succeeded\n");
- err = drm_dp_link_power_up(sor->aux, &sor->link);
+ err = drm_dp_link_power_up(sor->aux, sor->link.revision);
if (err < 0)
dev_err(sor->dev, "failed to power up DP link: %d\n", err);
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 0109bcf7faa5..3afd6587df08 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -4,7 +4,9 @@ obj-$(CONFIG_DRM_KUNIT_TEST_HELPERS) += \
drm_kunit_helpers.o
obj-$(CONFIG_DRM_KUNIT_TEST) += \
+ drm_atomic_test.o \
drm_atomic_state_test.o \
+ drm_bridge_test.o \
drm_buddy_test.o \
drm_cmdline_parser_test.o \
drm_connector_test.o \
diff --git a/drivers/gpu/drm/tests/drm_atomic_test.c b/drivers/gpu/drm/tests/drm_atomic_test.c
new file mode 100644
index 000000000000..ea91bec6569e
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_atomic_test.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_atomic functions
+ */
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
+#include <kunit/test.h>
+
+struct drm_atomic_test_priv {
+ struct drm_device drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static const struct drm_connector_helper_funcs drm_atomic_init_connector_helper_funcs = {
+};
+
+static const struct drm_connector_funcs drm_atomic_init_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static struct drm_atomic_test_priv *create_device(struct kunit *test)
+{
+ struct drm_atomic_test_priv *priv;
+ struct drm_connector *connector;
+ struct drm_encoder *enc;
+ struct drm_device *drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ priv = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct drm_atomic_test_priv, drm,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ drm = &priv->drm;
+ plane = drm_kunit_helper_create_primary_plane(test, drm,
+ NULL,
+ NULL,
+ NULL, 0,
+ NULL);
+ if (IS_ERR(plane))
+ return ERR_CAST(plane);
+ priv->plane = plane;
+
+ crtc = drm_kunit_helper_create_crtc(test, drm,
+ plane, NULL,
+ NULL,
+ NULL);
+ if (IS_ERR(crtc))
+ return ERR_CAST(crtc);
+ priv->crtc = crtc;
+
+ enc = &priv->encoder;
+ ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ enc->possible_crtcs = drm_crtc_mask(crtc);
+
+ connector = &priv->connector;
+ ret = drmm_connector_init(drm, connector,
+ &drm_atomic_init_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_helper_add(connector, &drm_atomic_init_connector_helper_funcs);
+
+ drm_connector_attach_encoder(connector, enc);
+
+ drm_mode_config_reset(drm);
+
+ return priv;
+}
+
+static void drm_test_drm_atomic_get_connector_for_encoder(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_test_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_connector *curr_connector;
+ int ret;
+
+ priv = create_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, &priv->drm,
+ priv->crtc, &priv->connector,
+ mode, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_enable;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn:
+ curr_connector = drm_atomic_get_connector_for_encoder(&priv->encoder,
+ &ctx);
+ if (PTR_ERR(curr_connector) == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_conn;
+ }
+ KUNIT_EXPECT_PTR_EQ(test, curr_connector, &priv->connector);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+static struct kunit_case drm_atomic_get_connector_for_encoder_tests[] = {
+ KUNIT_CASE(drm_test_drm_atomic_get_connector_for_encoder),
+ { }
+};
+
+
+static struct kunit_suite drm_atomic_get_connector_for_encoder_test_suite = {
+ .name = "drm_test_atomic_get_connector_for_encoder",
+ .test_cases = drm_atomic_get_connector_for_encoder_tests,
+};
+
+kunit_test_suite(drm_atomic_get_connector_for_encoder_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_DESCRIPTION("Kunit test for drm_atomic functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_bridge_test.c b/drivers/gpu/drm/tests/drm_bridge_test.c
new file mode 100644
index 000000000000..ff88ec2e911c
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_bridge_test.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_bridge functions
+ */
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_bridge_helper.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include <kunit/test.h>
+
+struct drm_bridge_init_priv {
+ struct drm_device drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder encoder;
+ struct drm_bridge bridge;
+ struct drm_connector *connector;
+ unsigned int enable_count;
+ unsigned int disable_count;
+};
+
+static void drm_test_bridge_enable(struct drm_bridge *bridge)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->enable_count++;
+}
+
+static void drm_test_bridge_disable(struct drm_bridge *bridge)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->disable_count++;
+}
+
+static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = {
+ .enable = drm_test_bridge_enable,
+ .disable = drm_test_bridge_disable,
+};
+
+static void drm_test_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->enable_count++;
+}
+
+static void drm_test_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->disable_count++;
+}
+
+static const struct drm_bridge_funcs drm_test_bridge_atomic_funcs = {
+ .atomic_enable = drm_test_bridge_atomic_enable,
+ .atomic_disable = drm_test_bridge_atomic_disable,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+KUNIT_DEFINE_ACTION_WRAPPER(drm_bridge_remove_wrapper,
+ drm_bridge_remove,
+ struct drm_bridge *);
+
+static int drm_kunit_bridge_add(struct kunit *test,
+ struct drm_bridge *bridge)
+{
+ drm_bridge_add(bridge);
+
+ return kunit_add_action_or_reset(test,
+ drm_bridge_remove_wrapper,
+ bridge);
+}
+
+static struct drm_bridge_init_priv *
+drm_test_bridge_init(struct kunit *test, const struct drm_bridge_funcs *funcs)
+{
+ struct drm_bridge_init_priv *priv;
+ struct drm_encoder *enc;
+ struct drm_bridge *bridge;
+ struct drm_device *drm;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ priv = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct drm_bridge_init_priv, drm,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ drm = &priv->drm;
+ priv->plane = drm_kunit_helper_create_primary_plane(test, drm,
+ NULL,
+ NULL,
+ NULL, 0,
+ NULL);
+ if (IS_ERR(priv->plane))
+ return ERR_CAST(priv->plane);
+
+ priv->crtc = drm_kunit_helper_create_crtc(test, drm,
+ priv->plane, NULL,
+ NULL,
+ NULL);
+ if (IS_ERR(priv->crtc))
+ return ERR_CAST(priv->crtc);
+
+ enc = &priv->encoder;
+ ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ enc->possible_crtcs = drm_crtc_mask(priv->crtc);
+
+ bridge = &priv->bridge;
+ bridge->type = DRM_MODE_CONNECTOR_VIRTUAL;
+ bridge->funcs = funcs;
+
+ ret = drm_kunit_bridge_add(test, bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_bridge_attach(enc, bridge, NULL, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv->connector = drm_bridge_connector_init(drm, enc);
+ if (IS_ERR(priv->connector))
+ return ERR_CAST(priv->connector);
+
+ drm_connector_attach_encoder(priv->connector, enc);
+
+ drm_mode_config_reset(drm);
+
+ return priv;
+}
+
+/*
+ * Test that drm_bridge_get_current_state() returns the last committed
+ * state for an atomic bridge.
+ */
+static void drm_test_drm_bridge_get_current_state_atomic(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_bridge_state *curr_bridge_state;
+ struct drm_bridge_state *bridge_state;
+ struct drm_atomic_state *state;
+ struct drm_bridge *bridge;
+ struct drm_device *drm;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ drm = &priv->drm;
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+retry_commit:
+ bridge = &priv->bridge;
+ bridge_state = drm_atomic_get_bridge_state(state, bridge);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bridge_state);
+
+ ret = drm_atomic_commit(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_state:
+ ret = drm_modeset_lock(&bridge->base.lock, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_state;
+ }
+
+ curr_bridge_state = drm_bridge_get_current_state(bridge);
+ KUNIT_EXPECT_PTR_EQ(test, curr_bridge_state, bridge_state);
+
+ drm_modeset_unlock(&bridge->base.lock);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that drm_bridge_get_current_state() returns NULL for a
+ * non-atomic bridge.
+ */
+static void drm_test_drm_bridge_get_current_state_legacy(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv;
+ struct drm_bridge *bridge;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ /*
+ * NOTE: Strictly speaking, we should take the bridge->base.lock
+ * before calling that function. However, bridge->base is only
+ * initialized if the bridge is atomic, while we explicitly
+ * initialize one that isn't there.
+ *
+ * In order to avoid unnecessary warnings, let's skip the
+ * locking. The function would return NULL in all cases anyway,
+ * so we don't really have any concurrency to worry about.
+ */
+ bridge = &priv->bridge;
+ KUNIT_EXPECT_NULL(test, drm_bridge_get_current_state(bridge));
+}
+
+static struct kunit_case drm_bridge_get_current_state_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_get_current_state_atomic),
+ KUNIT_CASE(drm_test_drm_bridge_get_current_state_legacy),
+ { }
+};
+
+
+static struct kunit_suite drm_bridge_get_current_state_test_suite = {
+ .name = "drm_test_bridge_get_current_state",
+ .test_cases = drm_bridge_get_current_state_tests,
+};
+
+/*
+ * Test that an atomic bridge is properly power-cycled when calling
+ * drm_bridge_helper_reset_crtc().
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_atomic(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge *bridge;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_commit:
+ ret = drm_kunit_helper_enable_crtc_connector(test,
+ &priv->drm, priv->crtc,
+ priv->connector,
+ mode,
+ &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ bridge = &priv->bridge;
+ KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+}
+
+/*
+ * Test that calling drm_bridge_helper_reset_crtc() on a disabled atomic
+ * bridge will fail and not call the enable / disable callbacks
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_atomic_disabled(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge *bridge;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ bridge = &priv->bridge;
+ KUNIT_ASSERT_EQ(test, priv->enable_count, 0);
+ KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_EXPECT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, priv->enable_count, 0);
+ KUNIT_EXPECT_EQ(test, priv->disable_count, 0);
+}
+
+/*
+ * Test that a non-atomic bridge is properly power-cycled when calling
+ * drm_bridge_helper_reset_crtc().
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_legacy(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge *bridge;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_commit:
+ ret = drm_kunit_helper_enable_crtc_connector(test,
+ &priv->drm, priv->crtc,
+ priv->connector,
+ mode,
+ &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ bridge = &priv->bridge;
+ KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+}
+
+static struct kunit_case drm_bridge_helper_reset_crtc_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic),
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic_disabled),
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_legacy),
+ { }
+};
+
+static struct kunit_suite drm_bridge_helper_reset_crtc_test_suite = {
+ .name = "drm_test_bridge_helper_reset_crtc",
+ .test_cases = drm_bridge_helper_reset_crtc_tests,
+};
+
+kunit_test_suites(
+ &drm_bridge_get_current_state_test_suite,
+ &drm_bridge_helper_reset_crtc_test_suite,
+);
+
+MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_DESCRIPTION("Kunit test for drm_bridge functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c
index 7516f6cb36e4..3f44fe5e92e4 100644
--- a/drivers/gpu/drm/tests/drm_client_modeset_test.c
+++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c
@@ -88,13 +88,17 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
struct drm_device *drm = priv->drm;
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
- struct drm_display_mode *expected_mode, *mode;
+ struct drm_display_mode *expected_mode;
+ const struct drm_display_mode *mode;
const char *cmdline = "1920x1080@60";
int ret;
expected_mode = drm_mode_find_dmt(priv->drm, 1920, 1080, 60, false);
KUNIT_ASSERT_NOT_NULL(test, expected_mode);
+ ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_ASSERT_TRUE(test,
drm_mode_parse_command_line_for_connector(cmdline,
connector,
@@ -129,7 +133,8 @@ static void drm_test_pick_cmdline_named(struct kunit *test)
struct drm_device *drm = priv->drm;
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
- const struct drm_display_mode *expected_mode, *mode;
+ const struct drm_display_mode *mode;
+ struct drm_display_mode *expected_mode;
const char *cmdline = params->cmdline;
int ret;
@@ -149,6 +154,9 @@ static void drm_test_pick_cmdline_named(struct kunit *test)
expected_mode = params->func(drm);
KUNIT_ASSERT_NOT_NULL(test, expected_mode);
+ ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected_mode, mode));
}
diff --git a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
index 59c8408c453c..1cfcb597b088 100644
--- a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
+++ b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
@@ -7,6 +7,7 @@
#include <kunit/test.h>
#include <drm/drm_connector.h>
+#include <drm/drm_kunit_helpers.h>
#include <drm/drm_modes.h>
static const struct drm_connector no_connector = {};
@@ -955,8 +956,15 @@ struct drm_cmdline_tv_option_test {
static void drm_test_cmdline_tv_options(struct kunit *test)
{
const struct drm_cmdline_tv_option_test *params = test->param_value;
- const struct drm_display_mode *expected_mode = params->mode_fn(NULL);
+ struct drm_display_mode *expected_mode;
struct drm_cmdline_mode mode = { };
+ int ret;
+
+ expected_mode = params->mode_fn(NULL);
+ KUNIT_ASSERT_NOT_NULL(test, expected_mode);
+
+ ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(params->cmdline,
&no_connector, &mode));
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
index fd4215e2f982..68f2c3162354 100644
--- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -134,7 +134,7 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test)
shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -142,14 +142,14 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test)
ret = drm_gem_shmem_pin(shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
drm_gem_shmem_unpin(shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
}
/*
@@ -168,24 +168,24 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
KUNIT_EXPECT_NULL(test, shmem->vaddr);
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
- ret = drm_gem_shmem_vmap(shmem, &map);
+ ret = drm_gem_shmem_vmap_locked(shmem, &map);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 1);
iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
for (i = 0; i < TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
- drm_gem_shmem_vunmap(shmem, &map);
+ drm_gem_shmem_vunmap_locked(shmem, &map);
KUNIT_EXPECT_NULL(test, shmem->vaddr);
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
}
/*
@@ -216,6 +216,9 @@ static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
KUNIT_EXPECT_NULL(test, shmem->sgt);
+ ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -251,7 +254,7 @@ static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
sgt = drm_gem_shmem_get_pages_sgt(shmem);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
for_each_sgtable_sg(sgt, sg, si) {
@@ -281,17 +284,17 @@ static void drm_gem_shmem_test_madvise(struct kunit *test)
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
- ret = drm_gem_shmem_madvise(shmem, 1);
+ ret = drm_gem_shmem_madvise_locked(shmem, 1);
KUNIT_EXPECT_TRUE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, 1);
/* Set madv to a negative value */
- ret = drm_gem_shmem_madvise(shmem, -1);
+ ret = drm_gem_shmem_madvise_locked(shmem, -1);
KUNIT_EXPECT_FALSE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, -1);
/* Check that madv cannot be set back to a positive value */
- ret = drm_gem_shmem_madvise(shmem, 0);
+ ret = drm_gem_shmem_madvise_locked(shmem, 0);
KUNIT_EXPECT_FALSE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, -1);
}
@@ -319,7 +322,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test)
ret = drm_gem_shmem_is_purgeable(shmem);
KUNIT_EXPECT_FALSE(test, ret);
- ret = drm_gem_shmem_madvise(shmem, 1);
+ ret = drm_gem_shmem_madvise_locked(shmem, 1);
KUNIT_EXPECT_TRUE(test, ret);
/* The scatter/gather table will be freed by drm_gem_shmem_free */
@@ -329,7 +332,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test)
ret = drm_gem_shmem_is_purgeable(shmem);
KUNIT_EXPECT_TRUE(test, ret);
- drm_gem_shmem_purge(shmem);
+ drm_gem_shmem_purge_locked(shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
KUNIT_EXPECT_NULL(test, shmem->sgt);
KUNIT_EXPECT_EQ(test, shmem->madv, -1);
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index e97efd3af9ed..7ffd666753b1 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -55,49 +55,6 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec
return preferred;
}
-static int light_up_connector(struct kunit *test,
- struct drm_device *drm,
- struct drm_crtc *crtc,
- struct drm_connector *connector,
- struct drm_display_mode *mode,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_atomic_state *state;
- struct drm_connector_state *conn_state;
- struct drm_crtc_state *crtc_state;
- int ret;
-
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
-
-retry:
- conn_state = drm_atomic_get_connector_state(state, connector);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
-
- ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
- if (ret == -EDEADLK) {
- drm_atomic_state_clear(state);
- ret = drm_modeset_backoff(ctx);
- if (!ret)
- goto retry;
- }
- KUNIT_EXPECT_EQ(test, ret, 0);
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
-
- ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
- KUNIT_EXPECT_EQ(test, ret, 0);
-
- crtc_state->enable = true;
- crtc_state->active = true;
-
- ret = drm_atomic_commit(state);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- return 0;
-}
-
static int set_connector_edid(struct kunit *test, struct drm_connector *connector,
const char *edid, size_t edid_len)
{
@@ -298,7 +255,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -364,7 +324,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -432,7 +395,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -489,7 +455,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -547,7 +516,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -606,7 +578,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -666,7 +641,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -725,7 +703,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -789,7 +770,10 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -865,7 +849,10 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -941,7 +928,10 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -988,7 +978,10 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1037,7 +1030,10 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1086,7 +1082,10 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1134,7 +1133,10 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
/* You shouldn't be doing that at home. */
@@ -1208,7 +1210,10 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1282,7 +1287,10 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1347,7 +1355,10 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1414,7 +1425,10 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1483,7 +1497,10 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1543,7 +1560,10 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1605,7 +1625,10 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1645,7 +1668,10 @@ static void drm_test_check_disable_connector(struct kunit *test)
drm = &priv->drm;
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index a4eb68f0decc..5f7257840d8e 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -2,6 +2,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
@@ -271,6 +272,66 @@ drm_kunit_helper_create_crtc(struct kunit *test,
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc);
+/**
+ * drm_kunit_helper_enable_crtc_connector - Enables a CRTC -> Connector output
+ * @test: The test context object
+ * @drm: The device to alloc the plane for
+ * @crtc: The CRTC to enable
+ * @connector: The Connector to enable
+ * @mode: The display mode to configure the CRTC with
+ * @ctx: Locking context
+ *
+ * This function creates an atomic update to enable the route from @crtc
+ * to @connector, with the given @mode.
+ *
+ * Returns:
+ *
+ * A pointer to the new CRTC, or an ERR_PTR() otherwise. If the error
+ * returned is EDEADLK, the entire atomic sequence must be restarted.
+ */
+int drm_kunit_helper_enable_crtc_connector(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+ if (ret)
+ return ret;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
+ if (ret)
+ return ret;
+
+ crtc_state->enable = true;
+ crtc_state->active = true;
+
+ ret = drm_atomic_commit(state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_enable_crtc_connector);
+
static void kunit_action_drm_mode_destroy(void *ptr)
{
struct drm_display_mode *mode = ptr;
@@ -279,6 +340,28 @@ static void kunit_action_drm_mode_destroy(void *ptr)
}
/**
+ * drm_kunit_add_mode_destroy_action() - Add a drm_destroy_mode kunit action
+ * @test: The test context object
+ * @mode: The drm_display_mode to destroy eventually
+ *
+ * Registers a kunit action that will destroy the drm_display_mode at
+ * the end of the test.
+ *
+ * If an error occurs, the drm_display_mode will be destroyed.
+ *
+ * Returns:
+ * 0 on success, an error code otherwise.
+ */
+int drm_kunit_add_mode_destroy_action(struct kunit *test,
+ struct drm_display_mode *mode)
+{
+ return kunit_add_action_or_reset(test,
+ kunit_action_drm_mode_destroy,
+ mode);
+}
+EXPORT_SYMBOL_GPL(drm_kunit_add_mode_destroy_action);
+
+/**
* drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test
* @test: The test context object
* @dev: DRM device
diff --git a/drivers/gpu/drm/tests/drm_modes_test.c b/drivers/gpu/drm/tests/drm_modes_test.c
index 6ed51f99e133..f5b20f92df8b 100644
--- a/drivers/gpu/drm/tests/drm_modes_test.c
+++ b/drivers/gpu/drm/tests/drm_modes_test.c
@@ -40,6 +40,7 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
+ int ret;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_NTSC,
@@ -47,6 +48,9 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 60);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
@@ -70,6 +74,7 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *expected, *mode;
+ int ret;
expected = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_NTSC,
@@ -77,9 +82,15 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, expected);
+ ret = drm_kunit_add_mode_destroy_action(test, expected);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
mode = drm_mode_analog_ntsc_480i(priv->drm);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
}
@@ -87,6 +98,7 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
+ int ret;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_PAL,
@@ -94,6 +106,9 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
@@ -117,6 +132,7 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *expected, *mode;
+ int ret;
expected = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_PAL,
@@ -124,9 +140,15 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, expected);
+ ret = drm_kunit_add_mode_destroy_action(test, expected);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
mode = drm_mode_analog_pal_576i(priv->drm);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
}
@@ -134,6 +156,7 @@ static void drm_test_modes_analog_tv_mono_576i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
+ int ret;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_MONOCHROME,
@@ -141,6 +164,9 @@ static void drm_test_modes_analog_tv_mono_576i(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
diff --git a/drivers/gpu/drm/tests/drm_probe_helper_test.c b/drivers/gpu/drm/tests/drm_probe_helper_test.c
index bc09ff38aca1..db0e4f5df275 100644
--- a/drivers/gpu/drm/tests/drm_probe_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_probe_helper_test.c
@@ -98,7 +98,7 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
struct drm_display_mode *mode;
- const struct drm_display_mode *expected;
+ struct drm_display_mode *expected;
size_t len;
int ret;
@@ -134,6 +134,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
KUNIT_EXPECT_TRUE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
+
+ ret = drm_kunit_add_mode_destroy_action(test, expected);
+ KUNIT_ASSERT_EQ(test, ret, 0);
}
if (params->num_expected_modes >= 2) {
@@ -145,6 +148,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
KUNIT_EXPECT_FALSE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
+
+ ret = drm_kunit_add_mode_destroy_action(test, expected);
+ KUNIT_ASSERT_EQ(test, ret, 0);
}
mutex_unlock(&priv->drm->mode_config.mutex);
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
index 17a86bed8054..95b4aeff2775 100644
--- a/drivers/gpu/drm/tidss/tidss_encoder.c
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -34,11 +34,12 @@ static inline struct tidss_encoder
}
static int tidss_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tidss_encoder *t_enc = bridge_to_tidss_encoder(bridge);
- return drm_bridge_attach(bridge->encoder, t_enc->next_bridge,
+ return drm_bridge_attach(encoder, t_enc->next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 54c84c9801c1..06e54694a7f2 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -3,6 +3,7 @@
config DRM_APPLETBDRM
tristate "DRM support for Apple Touch Bars"
depends on DRM && USB && MMU
+ depends on X86 || COMPILE_TEST
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
@@ -37,7 +38,7 @@ config DRM_BOCHS
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
@@ -65,20 +66,6 @@ config DRM_GM12U320
This is a KMS driver for projectors which use the GM12U320 chipset
for video transfer over USB2/3, such as the Acer C120 mini projector.
-config DRM_OFDRM
- tristate "Open Firmware display driver"
- depends on DRM && MMU && OF && (PPC || COMPILE_TEST)
- select APERTURE_HELPERS
- select DRM_CLIENT_SELECTION
- select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
- help
- DRM driver for Open Firmware framebuffers.
-
- This driver assumes that the display hardware has been initialized
- by the Open Firmware before the kernel boots. Scanout buffer, size,
- and display format must be provided via device tree.
-
config DRM_PANEL_MIPI_DBI
tristate "DRM support for MIPI DBI compatible panels"
depends on DRM && SPI
@@ -95,24 +82,6 @@ config DRM_PANEL_MIPI_DBI
https://github.com/notro/panel-mipi-dbi/wiki.
To compile this driver as a module, choose M here.
-config DRM_SIMPLEDRM
- tristate "Simple framebuffer driver"
- depends on DRM && MMU
- select APERTURE_HELPERS
- select DRM_CLIENT_SELECTION
- select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
- help
- DRM driver for simple platform-provided framebuffers.
-
- This driver assumes that the display hardware has been initialized
- by the firmware or bootloader before the kernel boots. Scanout
- buffer, size, and display format must be provided via device tree,
- UEFI, VESA, etc.
-
- On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB
- to use UEFI and VESA framebuffers.
-
config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
@@ -230,32 +199,3 @@ config TINYDRM_SHARP_MEMORY
* 4.40" Sharp Memory LCD (LS044Q7DH01)
If M is selected the module will be called sharp_memory.
-
-config TINYDRM_ST7586
- tristate "DRM support for Sitronix ST7586 display panels"
- depends on DRM && SPI
- select DRM_CLIENT_SELECTION
- select DRM_KMS_HELPER
- select DRM_GEM_DMA_HELPER
- select DRM_MIPI_DBI
- help
- DRM driver for the following Sitronix ST7586 panels:
- * LEGO MINDSTORMS EV3
-
- If M is selected the module will be called st7586.
-
-config TINYDRM_ST7735R
- tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
- depends on DRM && SPI
- select DRM_CLIENT_SELECTION
- select DRM_KMS_HELPER
- select DRM_GEM_DMA_HELPER
- select DRM_MIPI_DBI
- select BACKLIGHT_CLASS_DEVICE
- help
- DRM driver for Sitronix ST7715R/ST7735R with one of the following
- LCDs:
- * Jianda JD-T18003-T01 1.8" 128x160 TFT
- * Okaya RH128128T 1.44" 128x128 TFT
-
- If M is selected the module will be called st7735r.
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 0a3a7837a58b..4a9ff61ec254 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -5,9 +5,7 @@ obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_BOCHS) += bochs.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
-obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o
-obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9163) += ili9163.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
@@ -16,5 +14,3 @@ obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
obj-$(CONFIG_TINYDRM_SHARP_MEMORY) += sharp-memory.o
-obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
-obj-$(CONFIG_TINYDRM_ST7735R) += st7735r.o
diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c
index 4370ba22dd88..751b05753c94 100644
--- a/drivers/gpu/drm/tiny/appletbdrm.c
+++ b/drivers/gpu/drm/tiny/appletbdrm.c
@@ -45,7 +45,7 @@
#define APPLETBDRM_BULK_MSG_TIMEOUT 1000
#define drm_to_adev(_drm) container_of(_drm, struct appletbdrm_device, drm)
-#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface(adev->dmadev))
+#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface((adev)->drm.dev))
struct appletbdrm_msg_request_header {
__le16 unk_00;
@@ -123,8 +123,6 @@ struct appletbdrm_fb_request_response {
} __packed;
struct appletbdrm_device {
- struct device *dmadev;
-
unsigned int in_ep;
unsigned int out_ep;
@@ -214,7 +212,7 @@ retry:
}
if (response->msg != expected_response) {
- drm_err(drm, "Unexpected response from device (expected %p4cc found %p4cc)\n",
+ drm_err(drm, "Unexpected response from device (expected %p4cl found %p4cl)\n",
&expected_response, &response->msg);
return -EIO;
}
@@ -288,7 +286,7 @@ static int appletbdrm_get_information(struct appletbdrm_device *adev)
}
if (pixel_format != APPLETBDRM_PIXEL_FORMAT) {
- drm_err(drm, "Encountered unknown pixel format (%p4cc)\n", &pixel_format);
+ drm_err(drm, "Encountered unknown pixel format (%p4cl)\n", &pixel_format);
ret = -EINVAL;
goto free_info;
}
@@ -612,22 +610,10 @@ static const struct drm_encoder_funcs appletbdrm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static struct drm_gem_object *appletbdrm_driver_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct appletbdrm_device *adev = drm_to_adev(dev);
-
- if (!adev->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, adev->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(appletbdrm_drm_fops);
static const struct drm_driver appletbdrm_drm_driver = {
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = appletbdrm_driver_gem_prime_import,
.name = "appletbdrm",
.desc = "Apple Touch Bar DRM Driver",
.major = 1,
@@ -747,6 +733,7 @@ static int appletbdrm_probe(struct usb_interface *intf,
struct device *dev = &intf->dev;
struct appletbdrm_device *adev;
struct drm_device *drm = NULL;
+ struct device *dma_dev;
int ret;
ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL);
@@ -761,12 +748,19 @@ static int appletbdrm_probe(struct usb_interface *intf,
adev->in_ep = bulk_in->bEndpointAddress;
adev->out_ep = bulk_out->bEndpointAddress;
- adev->dmadev = dev;
drm = &adev->drm;
usb_set_intfdata(intf, adev);
+ dma_dev = usb_intf_get_dma_device(intf);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(drm, dma_dev);
+ put_device(dma_dev);
+ } else {
+ drm_warn(drm, "buffer sharing not supported"); /* not an error */
+ }
+
ret = appletbdrm_get_information(adev);
if (ret) {
drm_err(drm, "Failed to get display information\n");
diff --git a/drivers/gpu/drm/tiny/cirrus-qemu.c b/drivers/gpu/drm/tiny/cirrus-qemu.c
index 52ec1e4ea9e5..97a93adc5669 100644
--- a/drivers/gpu/drm/tiny/cirrus-qemu.c
+++ b/drivers/gpu/drm/tiny/cirrus-qemu.c
@@ -70,20 +70,6 @@ struct cirrus_device {
#define to_cirrus(_dev) container_of(_dev, struct cirrus_device, dev)
-struct cirrus_primary_plane_state {
- struct drm_shadow_plane_state base;
-
- /* HW scanout buffer */
- const struct drm_format_info *format;
- unsigned int pitch;
-};
-
-static inline struct cirrus_primary_plane_state *
-to_cirrus_primary_plane_state(struct drm_plane_state *plane_state)
-{
- return container_of(plane_state, struct cirrus_primary_plane_state, base.base);
-};
-
/* ------------------------------------------------------------------ */
/*
* The meat of this driver. The core passes us a mode and we have to program
@@ -144,37 +130,6 @@ static void wreg_hdr(struct cirrus_device *cirrus, u8 val)
iowrite8(val, cirrus->mmio + VGA_DAC_MASK);
}
-static const struct drm_format_info *cirrus_convert_to(struct drm_framebuffer *fb)
-{
- if (fb->format->format == DRM_FORMAT_XRGB8888 && fb->pitches[0] > CIRRUS_MAX_PITCH) {
- if (fb->width * 3 <= CIRRUS_MAX_PITCH)
- /* convert from XR24 to RG24 */
- return drm_format_info(DRM_FORMAT_RGB888);
- else
- /* convert from XR24 to RG16 */
- return drm_format_info(DRM_FORMAT_RGB565);
- }
- return NULL;
-}
-
-static const struct drm_format_info *cirrus_format(struct drm_framebuffer *fb)
-{
- const struct drm_format_info *format = cirrus_convert_to(fb);
-
- if (format)
- return format;
- return fb->format;
-}
-
-static int cirrus_pitch(struct drm_framebuffer *fb)
-{
- const struct drm_format_info *format = cirrus_convert_to(fb);
-
- if (format)
- return drm_format_info_min_pitch(format, 0, fb->width);
- return fb->pitches[0];
-}
-
static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
{
u32 addr;
@@ -318,7 +273,6 @@ static void cirrus_pitch_set(struct cirrus_device *cirrus, unsigned int pitch)
/* Enable extended blanking and pitch bits, and enable full memory */
cr1b = 0x22;
cr1b |= (pitch >> 7) & 0x10;
- cr1b |= (pitch >> 6) & 0x40;
wreg_crt(cirrus, 0x1b, cr1b);
cirrus_set_start_address(cirrus, 0);
@@ -342,13 +296,10 @@ static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct cirrus_primary_plane_state *new_primary_plane_state =
- to_cirrus_primary_plane_state(new_plane_state);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc *new_crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state = NULL;
int ret;
- unsigned int pitch;
if (new_crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
@@ -362,17 +313,12 @@ static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane,
else if (!new_plane_state->visible)
return 0;
- pitch = cirrus_pitch(fb);
-
/* validate size constraints */
- if (pitch > CIRRUS_MAX_PITCH)
+ if (fb->pitches[0] > CIRRUS_MAX_PITCH)
return -EINVAL;
- else if (pitch * fb->height > CIRRUS_VRAM_SIZE)
+ else if (fb->pitches[0] > CIRRUS_VRAM_SIZE / fb->height)
return -EINVAL;
- new_primary_plane_state->format = cirrus_format(fb);
- new_primary_plane_state->pitch = pitch;
-
return 0;
}
@@ -381,15 +327,10 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
{
struct cirrus_device *cirrus = to_cirrus(plane->dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
- const struct drm_format_info *format = primary_plane_state->format;
- unsigned int pitch = primary_plane_state->pitch;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct cirrus_primary_plane_state *old_primary_plane_state =
- to_cirrus_primary_plane_state(old_plane_state);
+ struct drm_framebuffer *old_fb = old_plane_state->fb;
struct iosys_map vaddr = IOSYS_MAP_INIT_VADDR_IOMEM(cirrus->vram);
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
@@ -401,18 +342,17 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
if (!drm_dev_enter(&cirrus->dev, &idx))
return;
- if (old_primary_plane_state->format != format)
- cirrus_format_set(cirrus, format);
- if (old_primary_plane_state->pitch != pitch)
- cirrus_pitch_set(cirrus, pitch);
+ if (!old_fb || old_fb->format != fb->format)
+ cirrus_format_set(cirrus, fb->format);
+ if (!old_fb || old_fb->pitches[0] != fb->pitches[0])
+ cirrus_pitch_set(cirrus, fb->pitches[0]);
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
- unsigned int offset = drm_fb_clip_offset(pitch, format, &damage);
+ unsigned int offset = drm_fb_clip_offset(fb->pitches[0], fb->format, &damage);
struct iosys_map dst = IOSYS_MAP_INIT_OFFSET(&vaddr, offset);
- drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb,
- &damage, &shadow_plane_state->fmtcnv_state);
+ drm_fb_memcpy(&dst, fb->pitches, shadow_plane_state->data, fb, &damage);
}
drm_dev_exit(idx);
@@ -424,62 +364,11 @@ static const struct drm_plane_helper_funcs cirrus_primary_plane_helper_funcs = {
.atomic_update = cirrus_primary_plane_helper_atomic_update,
};
-static struct drm_plane_state *
-cirrus_primary_plane_atomic_duplicate_state(struct drm_plane *plane)
-{
- struct drm_plane_state *plane_state = plane->state;
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
- struct cirrus_primary_plane_state *new_primary_plane_state;
- struct drm_shadow_plane_state *new_shadow_plane_state;
-
- if (!plane_state)
- return NULL;
-
- new_primary_plane_state = kzalloc(sizeof(*new_primary_plane_state), GFP_KERNEL);
- if (!new_primary_plane_state)
- return NULL;
- new_shadow_plane_state = &new_primary_plane_state->base;
-
- __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
- new_primary_plane_state->format = primary_plane_state->format;
- new_primary_plane_state->pitch = primary_plane_state->pitch;
-
- return &new_shadow_plane_state->base;
-}
-
-static void cirrus_primary_plane_atomic_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *plane_state)
-{
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
-
- __drm_gem_destroy_shadow_plane_state(&primary_plane_state->base);
- kfree(primary_plane_state);
-}
-
-static void cirrus_reset_primary_plane(struct drm_plane *plane)
-{
- struct cirrus_primary_plane_state *primary_plane_state;
-
- if (plane->state) {
- cirrus_primary_plane_atomic_destroy_state(plane, plane->state);
- plane->state = NULL; /* must be set to NULL here */
- }
-
- primary_plane_state = kzalloc(sizeof(*primary_plane_state), GFP_KERNEL);
- if (!primary_plane_state)
- return;
- __drm_gem_reset_shadow_plane(plane, &primary_plane_state->base);
-}
-
static const struct drm_plane_funcs cirrus_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .reset = cirrus_reset_primary_plane,
- .atomic_duplicate_state = cirrus_primary_plane_atomic_duplicate_state,
- .atomic_destroy_state = cirrus_primary_plane_atomic_destroy_state,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int cirrus_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
@@ -614,9 +503,17 @@ static enum drm_mode_status cirrus_mode_config_mode_valid(struct drm_device *dev
const struct drm_display_mode *mode)
{
const struct drm_format_info *format = drm_format_info(DRM_FORMAT_XRGB8888);
- uint64_t pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
+ u64 pitch;
- if (pitch * mode->vdisplay > CIRRUS_VRAM_SIZE)
+ if (drm_WARN_ON_ONCE(dev, !format))
+ return MODE_ERROR; /* driver bug */
+
+ pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
+ if (!pitch)
+ return MODE_BAD_WIDTH;
+ if (pitch > CIRRUS_MAX_PITCH)
+ return MODE_BAD_WIDTH; /* maximum programmable pitch */
+ if (pitch > CIRRUS_VRAM_SIZE / mode->vdisplay)
return MODE_MEM;
return MODE_OK;
@@ -681,7 +578,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_request_regions(pdev, DRIVER_NAME);
+ ret = pcim_request_all_regions(pdev, DRIVER_NAME);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 41e9bfb2e2ff..fb0004166f4a 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -86,7 +86,6 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
struct gm12u320_device {
struct drm_device dev;
- struct device *dmadev;
struct drm_simple_display_pipe pipe;
struct drm_connector conn;
unsigned char *cmd_buf;
@@ -602,22 +601,6 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct gm12u320_device *gm12u320 = to_gm12u320(dev);
-
- if (!gm12u320->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static const struct drm_driver gm12u320_drm_driver = {
@@ -630,7 +613,6 @@ static const struct drm_driver gm12u320_drm_driver = {
.fops = &gm12u320_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = gm12u320_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
};
@@ -645,6 +627,7 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
{
struct gm12u320_device *gm12u320;
struct drm_device *dev;
+ struct device *dma_dev;
int ret;
/*
@@ -660,16 +643,20 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
return PTR_ERR(gm12u320);
dev = &gm12u320->dev;
- gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
- if (!gm12u320->dmadev)
+ dma_dev = usb_intf_get_dma_device(interface);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(dev, dma_dev);
+ put_device(dma_dev);
+ } else {
drm_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
mutex_init(&gm12u320->fb_update.lock);
ret = drmm_mode_config_init(dev);
if (ret)
- goto err_put_device;
+ return ret;
dev->mode_config.min_width = GM12U320_USER_WIDTH;
dev->mode_config.max_width = GM12U320_USER_WIDTH;
@@ -679,15 +666,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
ret = gm12u320_usb_alloc(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = gm12u320_set_ecomode(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = gm12u320_conn_init(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = drm_simple_display_pipe_init(&gm12u320->dev,
&gm12u320->pipe,
@@ -697,31 +684,24 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
gm12u320_pipe_modifiers,
&gm12u320->conn);
if (ret)
- goto err_put_device;
+ return ret;
drm_mode_config_reset(dev);
usb_set_intfdata(interface, dev);
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_put_device;
+ return ret;
drm_client_setup(dev, NULL);
return 0;
-
-err_put_device:
- put_device(gm12u320->dmadev);
- return ret;
}
static void gm12u320_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = to_gm12u320(dev);
- put_device(gm12u320->dmadev);
- gm12u320->dmadev = NULL;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
}
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index 0460ecaef4bd..23914a9f7fd3 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -390,7 +390,10 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- drm_client_setup(drm, NULL);
+ if (bpp == 16)
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565);
+ else
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB888);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index f8f20d2f6174..560d12e50e9e 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -201,7 +201,7 @@ static int threaded_ttm_bo_reserve(void *arg)
err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
timer_delete_sync(&s_timer.timer);
- destroy_timer_on_stack(&s_timer.timer);
+ timer_destroy_on_stack(&s_timer.timer);
ww_acquire_fini(&ctx);
@@ -340,7 +340,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+ KUNIT_ASSERT_NOT_NULL(test, resv);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
index 93c007f18855..ffaab68bd5dd 100644
--- a/drivers/gpu/drm/ttm/ttm_backup.c
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -8,20 +8,6 @@
#include <linux/swap.h>
/*
- * Casting from randomized struct file * to struct ttm_backup * is fine since
- * struct ttm_backup is never defined nor dereferenced.
- */
-static struct file *ttm_backup_to_file(struct ttm_backup *backup)
-{
- return (void *)backup;
-}
-
-static struct ttm_backup *ttm_file_to_backup(struct file *file)
-{
- return (void *)file;
-}
-
-/*
* Need to map shmem indices to handle since a handle value
* of 0 means error, following the swp_entry_t convention.
*/
@@ -40,12 +26,12 @@ static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
* @backup: The struct backup pointer used to obtain the handle
* @handle: The handle obtained from the @backup_page function.
*/
-void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
+void ttm_backup_drop(struct file *backup, pgoff_t handle)
{
loff_t start = ttm_backup_handle_to_shmem_idx(handle);
start <<= PAGE_SHIFT;
- shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start,
+ shmem_truncate_range(file_inode(backup), start,
start + PAGE_SIZE - 1);
}
@@ -55,16 +41,15 @@ void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
* @backup: The struct backup pointer used to back up the page.
* @dst: The struct page to copy into.
* @handle: The handle returned when the page was backed up.
- * @intr: Try to perform waits interruptable or at least killable.
+ * @intr: Try to perform waits interruptible or at least killable.
*
* Return: 0 on success, Negative error code on failure, notably
* -EINTR if @intr was set to true and a signal is pending.
*/
-int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
+int ttm_backup_copy_page(struct file *backup, struct page *dst,
pgoff_t handle, bool intr)
{
- struct file *filp = ttm_backup_to_file(backup);
- struct address_space *mapping = filp->f_mapping;
+ struct address_space *mapping = backup->f_mapping;
struct folio *from_folio;
pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
@@ -106,12 +91,11 @@ int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
* the folio size- and usage.
*/
s64
-ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
+ttm_backup_backup_page(struct file *backup, struct page *page,
bool writeback, pgoff_t idx, gfp_t page_gfp,
gfp_t alloc_gfp)
{
- struct file *filp = ttm_backup_to_file(backup);
- struct address_space *mapping = filp->f_mapping;
+ struct address_space *mapping = backup->f_mapping;
unsigned long handle = 0;
struct folio *to_folio;
int ret;
@@ -136,13 +120,13 @@ ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
.for_reclaim = 1,
};
folio_set_reclaim(to_folio);
- ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc);
+ ret = shmem_writeout(to_folio, &wbc);
if (!folio_test_writeback(to_folio))
folio_clear_reclaim(to_folio);
/*
- * If writepage succeeds, it unlocks the folio.
- * writepage() errors are otherwise dropped, since writepage()
- * is only best effort here.
+ * If writeout succeeds, it unlocks the folio. errors
+ * are otherwise dropped, since writeout is only best
+ * effort here.
*/
if (ret)
folio_unlock(to_folio);
@@ -161,9 +145,9 @@ ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
*
* After a call to this function, it's illegal to use the @backup pointer.
*/
-void ttm_backup_fini(struct ttm_backup *backup)
+void ttm_backup_fini(struct file *backup)
{
- fput(ttm_backup_to_file(backup));
+ fput(backup);
}
/**
@@ -194,14 +178,10 @@ EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
*
* Create a backup utilizing shmem objects.
*
- * Return: A pointer to a struct ttm_backup on success,
+ * Return: A pointer to a struct file on success,
* an error pointer on error.
*/
-struct ttm_backup *ttm_backup_shmem_create(loff_t size)
+struct file *ttm_backup_shmem_create(loff_t size)
{
- struct file *filp;
-
- filp = shmem_file_setup("ttm shmem backup", size, 0);
-
- return ttm_file_to_backup(filp);
+ return shmem_file_setup("ttm shmem backup", size, 0);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 95b86003c50d..08a23ab037cb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -235,7 +235,7 @@ static void ttm_bo_delayed_delete(struct work_struct *work)
bo = container_of(work, typeof(*bo), delayed_delete);
- dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ dma_resv_wait_timeout(&bo->base._resv, DMA_RESV_USAGE_BOOKKEEP, false,
MAX_SCHEDULE_TIMEOUT);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_cleanup_memtype_use(bo);
@@ -270,7 +270,7 @@ static void ttm_bo_release(struct kref *kref)
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_free(bdev, bo->resource);
- if (!dma_resv_test_signaled(bo->base.resv,
+ if (!dma_resv_test_signaled(&bo->base._resv,
DMA_RESV_USAGE_BOOKKEEP) ||
(want_init_on_free() && (bo->ttm != NULL)) ||
bo->type == ttm_bo_type_sg ||
@@ -1093,7 +1093,8 @@ struct ttm_bo_swapout_walk {
struct ttm_lru_walk walk;
/** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
gfp_t gfp_flags;
-
+ /** @hit_low: Whether we should attempt to swap BO's with low watermark threshold */
+ /** @evict_low: If we cannot swap a bo when @try_low is false (first pass) */
bool hit_low, evict_low;
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a194db83421d..bdfa6ecfef05 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -220,7 +220,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
- .force_alloc = true
};
ttm = bo->ttm;
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 83b10706ba89..c2ea865be657 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -506,7 +506,7 @@ static void ttm_pool_allocated_page_commit(struct page *allocated,
* if successful, populate the page-table and dma-address arrays.
*/
static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
- struct ttm_backup *backup,
+ struct file *backup,
const struct ttm_operation_ctx *ctx,
struct ttm_pool_alloc_state *alloc)
@@ -655,7 +655,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
pgoff_t start_page, pgoff_t end_page)
{
struct page **pages = &tt->pages[start_page];
- struct ttm_backup *backup = tt->backup;
+ struct file *backup = tt->backup;
pgoff_t i, nr;
for (i = start_page; i < end_page; i += nr, pages += nr) {
@@ -963,7 +963,7 @@ void ttm_pool_drop_backed_up(struct ttm_tt *tt)
long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
const struct ttm_backup_flags *flags)
{
- struct ttm_backup *backup = tt->backup;
+ struct file *backup = tt->backup;
struct page *page;
unsigned long handle;
gfp_t alloc_gfp;
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 7e5a60c55813..769b0ca9be47 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -548,7 +548,6 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
- .force_alloc = true
};
struct dma_fence *fence;
int ret;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index df0aa6c4b8b8..698cd4bf5e46 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -544,7 +544,7 @@ EXPORT_SYMBOL(ttm_tt_pages_limit);
*/
int ttm_tt_setup_backup(struct ttm_tt *tt)
{
- struct ttm_backup *backup =
+ struct file *backup =
ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 05b3a152cc33..1922988625eb 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -22,13 +22,14 @@ static int udl_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ struct udl_device *udl = to_udl(dev);
int ret;
ret = drm_mode_config_helper_suspend(dev);
if (ret)
return ret;
- udl_sync_pending_urbs(dev);
+ udl_sync_pending_urbs(udl);
return 0;
}
@@ -49,22 +50,6 @@ static int udl_usb_reset_resume(struct usb_interface *interface)
return drm_mode_config_helper_resume(dev);
}
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct udl_device *udl = to_udl(dev);
-
- if (!udl->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(udl_driver_fops);
static const struct drm_driver driver = {
@@ -73,7 +58,6 @@ static const struct drm_driver driver = {
/* GEM hooks */
.fops = &udl_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = udl_driver_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
@@ -126,10 +110,10 @@ static int udl_usb_probe(struct usb_interface *interface,
static void udl_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ struct udl_device *udl = to_udl(dev);
- drm_kms_helper_poll_fini(dev);
- udl_drop_usb(dev);
drm_dev_unplug(dev);
+ udl_drop_usb(udl);
}
/*
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index be00dc1d87a1..145bb95ccc48 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -50,18 +50,14 @@ struct urb_list {
struct udl_device {
struct drm_device drm;
- struct device *dev;
- struct device *dmadev;
+
+ unsigned long sku_pixel_limit;
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
- struct mutex gem_lock;
-
- int sku_pixel_limit;
-
struct urb_list urbs;
};
@@ -73,22 +69,22 @@ static inline struct usb_device *udl_to_usb_device(struct udl_device *udl)
}
/* modeset */
-int udl_modeset_init(struct drm_device *dev);
+int udl_modeset_init(struct udl_device *udl);
struct drm_connector *udl_connector_init(struct drm_device *dev);
-struct urb *udl_get_urb(struct drm_device *dev);
+struct urb *udl_get_urb(struct udl_device *udl);
-int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
-void udl_sync_pending_urbs(struct drm_device *dev);
+int udl_submit_urb(struct udl_device *udl, struct urb *urb, size_t len);
+void udl_sync_pending_urbs(struct udl_device *udl);
void udl_urb_completion(struct urb *urb);
int udl_init(struct udl_device *udl);
-int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
+int udl_render_hline(struct udl_device *udl, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset, u32 byte_width);
-int udl_drop_usb(struct drm_device *dev);
+int udl_drop_usb(struct udl_device *udl);
int udl_select_std_channel(struct udl_device *udl);
#endif
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 3ebe2ce55dfd..bc58991a6f14 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -8,6 +8,8 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
+#include <linux/unaligned.h>
+
#include <drm/drm.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -23,72 +25,99 @@
#define WRITES_IN_FLIGHT (20)
#define MAX_VENDOR_DESCRIPTOR_SIZE 256
+#define UDL_SKU_PIXEL_LIMIT_DEFAULT 2080000
+
static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
+/*
+ * Try to make sense of whatever we parse. Therefore return @end on
+ * errors, but don't fail hard.
+ */
+static const u8 *udl_parse_key_value_pair(struct udl_device *udl, const u8 *pos, const u8 *end)
+{
+ u16 key;
+ u8 len;
+
+ /* read key */
+ if (pos >= end - 2)
+ return end;
+ key = get_unaligned_le16(pos);
+ pos += 2;
+
+ /* read value length */
+ if (pos >= end - 1)
+ return end;
+ len = *pos++;
+
+ /* read value */
+ if (pos >= end - len)
+ return end;
+ switch (key) {
+ case 0x0200: { /* maximum number of pixels */
+ unsigned int sku_pixel_limit;
+
+ if (len < sizeof(__le32))
+ break;
+ sku_pixel_limit = get_unaligned_le32(pos);
+ if (sku_pixel_limit >= 16 * UDL_SKU_PIXEL_LIMIT_DEFAULT)
+ break; /* almost 100 MiB, so probably bogus */
+ udl->sku_pixel_limit = sku_pixel_limit;
+ break;
+ }
+ default:
+ break;
+ }
+ pos += len;
+
+ return pos;
+}
+
static int udl_parse_vendor_descriptor(struct udl_device *udl)
{
+ struct drm_device *dev = &udl->drm;
struct usb_device *udev = udl_to_usb_device(udl);
- char *desc;
- char *buf;
- char *desc_end;
-
- u8 total_len = 0;
+ bool detected = false;
+ void *buf;
+ int ret;
+ unsigned int len;
+ const u8 *desc;
+ const u8 *desc_end;
buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
if (!buf)
- return false;
+ return -ENOMEM;
+
+ ret = usb_get_descriptor(udev, 0x5f, /* vendor specific */
+ 0, buf, MAX_VENDOR_DESCRIPTOR_SIZE);
+ if (ret < 0)
+ goto out;
+ len = ret;
+
+ if (len < 5)
+ goto out;
+
desc = buf;
+ desc_end = desc + len;
- total_len = usb_get_descriptor(udev, 0x5f, /* vendor specific */
- 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
- if (total_len > 5) {
- DRM_INFO("vendor descriptor length:%x data:%11ph\n",
- total_len, desc);
-
- if ((desc[0] != total_len) || /* descriptor length */
- (desc[1] != 0x5f) || /* vendor descriptor type */
- (desc[2] != 0x01) || /* version (2 bytes) */
- (desc[3] != 0x00) ||
- (desc[4] != total_len - 2)) /* length after type */
- goto unrecognized;
-
- desc_end = desc + total_len;
- desc += 5; /* the fixed header we've already parsed */
-
- while (desc < desc_end) {
- u8 length;
- u16 key;
-
- key = le16_to_cpu(*((u16 *) desc));
- desc += sizeof(u16);
- length = *desc;
- desc++;
-
- switch (key) {
- case 0x0200: { /* max_area */
- u32 max_area;
- max_area = le32_to_cpu(*((u32 *)desc));
- DRM_DEBUG("DL chip limited to %d pixel modes\n",
- max_area);
- udl->sku_pixel_limit = max_area;
- break;
- }
- default:
- break;
- }
- desc += length;
- }
- }
+ if ((desc[0] != len) || /* descriptor length */
+ (desc[1] != 0x5f) || /* vendor descriptor type */
+ (desc[2] != 0x01) || /* version (2 bytes) */
+ (desc[3] != 0x00) ||
+ (desc[4] != len - 2)) /* length after type */
+ goto out;
+ desc += 5;
- goto success;
+ detected = true;
-unrecognized:
- /* allow udlfb to load for now even if firmware unrecognized */
- DRM_ERROR("Unrecognized vendor firmware descriptor\n");
+ while (desc < desc_end)
+ desc = udl_parse_key_value_pair(udl, desc, desc_end);
-success:
+out:
+ if (!detected)
+ drm_warn(dev, "Unrecognized vendor firmware descriptor\n");
kfree(buf);
- return true;
+
+ return 0;
}
/*
@@ -145,9 +174,8 @@ void udl_urb_completion(struct urb *urb)
wake_up(&udl->urbs.sleep);
}
-static void udl_free_urb_list(struct drm_device *dev)
+static void udl_free_urb_list(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
struct urb_node *unode;
struct urb *urb;
@@ -172,9 +200,8 @@ static void udl_free_urb_list(struct drm_device *dev)
wake_up_all(&udl->urbs.sleep);
}
-static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
+static int udl_alloc_urb_list(struct udl_device *udl, int count, size_t size)
{
- struct udl_device *udl = to_udl(dev);
struct urb *urb;
struct urb_node *unode;
char *buf;
@@ -210,7 +237,7 @@ retry:
usb_free_urb(urb);
if (size > PAGE_SIZE) {
size /= 2;
- udl_free_urb_list(dev);
+ udl_free_urb_list(udl);
goto retry;
}
break;
@@ -259,9 +286,8 @@ static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
}
#define GET_URB_TIMEOUT HZ
-struct urb *udl_get_urb(struct drm_device *dev)
+struct urb *udl_get_urb(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
struct urb *urb;
spin_lock_irq(&udl->urbs.lock);
@@ -270,9 +296,8 @@ struct urb *udl_get_urb(struct drm_device *dev)
return urb;
}
-int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
+int udl_submit_urb(struct udl_device *udl, struct urb *urb, size_t len)
{
- struct udl_device *udl = to_udl(dev);
int ret;
if (WARN_ON(len > udl->urbs.size)) {
@@ -290,9 +315,9 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
}
/* wait until all pending URBs have been processed */
-void udl_sync_pending_urbs(struct drm_device *dev)
+void udl_sync_pending_urbs(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
+ struct drm_device *dev = &udl->drm;
spin_lock_irq(&udl->urbs.lock);
/* 2 seconds as a sane timeout */
@@ -308,53 +333,55 @@ int udl_init(struct udl_device *udl)
{
struct drm_device *dev = &udl->drm;
int ret = -ENOMEM;
+ struct device *dma_dev;
DRM_DEBUG("\n");
- udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
- if (!udl->dmadev)
+ dma_dev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
+ if (dma_dev) {
+ drm_dev_set_dma_dev(dev, dma_dev);
+ put_device(dma_dev);
+ } else {
drm_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
- mutex_init(&udl->gem_lock);
+ /*
+ * Not all devices provide vendor descriptors with device
+ * information. Initialize to default values of real-world
+ * devices. It is just enough memory for FullHD.
+ */
+ udl->sku_pixel_limit = UDL_SKU_PIXEL_LIMIT_DEFAULT;
- if (!udl_parse_vendor_descriptor(udl)) {
- ret = -ENODEV;
- DRM_ERROR("firmware not recognized. Assume incompatible device\n");
+ ret = udl_parse_vendor_descriptor(udl);
+ if (ret)
goto err;
- }
if (udl_select_std_channel(udl))
DRM_ERROR("Selecting channel failed\n");
- if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
+ if (!udl_alloc_urb_list(udl, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
DRM_ERROR("udl_alloc_urb_list failed\n");
+ ret = -ENOMEM;
goto err;
}
DRM_DEBUG("\n");
- ret = udl_modeset_init(dev);
+ ret = udl_modeset_init(udl);
if (ret)
goto err;
- drm_kms_helper_poll_init(dev);
-
return 0;
err:
if (udl->urbs.count)
- udl_free_urb_list(dev);
- put_device(udl->dmadev);
+ udl_free_urb_list(udl);
DRM_ERROR("%d\n", ret);
return ret;
}
-int udl_drop_usb(struct drm_device *dev)
+int udl_drop_usb(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
-
- udl_free_urb_list(dev);
- put_device(udl->dmadev);
- udl->dmadev = NULL;
+ udl_free_urb_list(udl);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index bbb04f98886a..231e829bd709 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -205,6 +205,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
struct drm_device *dev = fb->dev;
+ struct udl_device *udl = to_udl(dev);
void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */
int i, ret;
char *cmd;
@@ -216,7 +217,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
return ret;
log_bpp = ret;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
return -ENOMEM;
cmd = urb->transfer_buffer;
@@ -226,7 +227,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
const int byte_offset = line_offset + (clip->x1 << log_bpp);
const int dev_byte_offset = (fb->width * i + clip->x1) << log_bpp;
const int byte_width = drm_rect_width(clip) << log_bpp;
- ret = udl_render_hline(dev, log_bpp, &urb, (char *)vaddr,
+ ret = udl_render_hline(udl, log_bpp, &urb, (char *)vaddr,
&cmd, byte_offset, dev_byte_offset,
byte_width);
if (ret)
@@ -239,7 +240,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
if (cmd < (char *)urb->transfer_buffer + urb->transfer_buffer_length)
*cmd++ = UDL_MSG_BULK;
len = cmd - (char *)urb->transfer_buffer;
- ret = udl_submit_urb(dev, urb, len);
+ ret = udl_submit_urb(udl, urb, len);
} else {
udl_urb_completion(urb);
}
@@ -330,6 +331,7 @@ static const struct drm_plane_funcs udl_primary_plane_funcs = {
static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
+ struct udl_device *udl = to_udl(dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct drm_display_mode *mode = &crtc_state->mode;
struct urb *urb;
@@ -339,7 +341,7 @@ static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atom
if (!drm_dev_enter(dev, &idx))
return;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
goto out;
@@ -355,7 +357,7 @@ static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atom
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
- udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
+ udl_submit_urb(udl, urb, buf - (char *)urb->transfer_buffer);
out:
drm_dev_exit(idx);
@@ -364,6 +366,7 @@ out:
static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
+ struct udl_device *udl = to_udl(dev);
struct urb *urb;
char *buf;
int idx;
@@ -371,7 +374,7 @@ static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato
if (!drm_dev_enter(dev, &idx))
return;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
goto out;
@@ -381,7 +384,7 @@ static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
- udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
+ udl_submit_urb(udl, urb, buf - (char *)urb->transfer_buffer);
out:
drm_dev_exit(idx);
@@ -476,9 +479,9 @@ static const struct drm_mode_config_funcs udl_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-int udl_modeset_init(struct drm_device *dev)
+int udl_modeset_init(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
+ struct drm_device *dev = &udl->drm;
struct drm_plane *primary_plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
@@ -535,6 +538,7 @@ int udl_modeset_init(struct drm_device *dev)
return ret;
drm_mode_config_reset(dev);
+ drmm_kms_helper_poll_init(dev);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 62224992988f..7d670b3a5293 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -170,7 +170,7 @@ static void udl_compress_hline16(
* (that we can only write to, slowly, and can never read), and (optionally)
* our shadow copy that tracks what's been sent to that hardware buffer.
*/
-int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
+int udl_render_hline(struct udl_device *udl, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset,
u32 byte_width)
@@ -199,10 +199,10 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
- int ret = udl_submit_urb(dev, urb, len);
+ int ret = udl_submit_urb(udl, urb, len);
if (ret)
return ret;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
return -EAGAIN;
*urb_ptr = urb;
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 76816f2551c1..7e789e181af0 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -21,74 +21,74 @@ struct v3d_reg_def {
};
static const struct v3d_reg_def v3d_hub_reg_defs[] = {
- REGDEF(33, 42, V3D_HUB_AXICFG),
- REGDEF(33, 71, V3D_HUB_UIFCFG),
- REGDEF(33, 71, V3D_HUB_IDENT0),
- REGDEF(33, 71, V3D_HUB_IDENT1),
- REGDEF(33, 71, V3D_HUB_IDENT2),
- REGDEF(33, 71, V3D_HUB_IDENT3),
- REGDEF(33, 71, V3D_HUB_INT_STS),
- REGDEF(33, 71, V3D_HUB_INT_MSK_STS),
-
- REGDEF(33, 71, V3D_MMU_CTL),
- REGDEF(33, 71, V3D_MMU_VIO_ADDR),
- REGDEF(33, 71, V3D_MMU_VIO_ID),
- REGDEF(33, 71, V3D_MMU_DEBUG_INFO),
-
- REGDEF(71, 71, V3D_GMP_STATUS(71)),
- REGDEF(71, 71, V3D_GMP_CFG(71)),
- REGDEF(71, 71, V3D_GMP_VIO_ADDR(71)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_HUB_AXICFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_UIFCFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT0),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT1),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT2),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT3),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_MSK_STS),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_CTL),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ADDR),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ID),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_DEBUG_INFO),
+
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_STATUS(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_CFG(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_VIO_ADDR(71)),
};
static const struct v3d_reg_def v3d_gca_reg_defs[] = {
- REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN),
- REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN_ACK),
+ REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN),
+ REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN_ACK),
};
static const struct v3d_reg_def v3d_core_reg_defs[] = {
- REGDEF(33, 71, V3D_CTL_IDENT0),
- REGDEF(33, 71, V3D_CTL_IDENT1),
- REGDEF(33, 71, V3D_CTL_IDENT2),
- REGDEF(33, 71, V3D_CTL_MISCCFG),
- REGDEF(33, 71, V3D_CTL_INT_STS),
- REGDEF(33, 71, V3D_CTL_INT_MSK_STS),
- REGDEF(33, 71, V3D_CLE_CT0CS),
- REGDEF(33, 71, V3D_CLE_CT0CA),
- REGDEF(33, 71, V3D_CLE_CT0EA),
- REGDEF(33, 71, V3D_CLE_CT1CS),
- REGDEF(33, 71, V3D_CLE_CT1CA),
- REGDEF(33, 71, V3D_CLE_CT1EA),
-
- REGDEF(33, 71, V3D_PTB_BPCA),
- REGDEF(33, 71, V3D_PTB_BPCS),
-
- REGDEF(33, 42, V3D_GMP_STATUS(33)),
- REGDEF(33, 42, V3D_GMP_CFG(33)),
- REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)),
-
- REGDEF(33, 71, V3D_ERR_FDBGO),
- REGDEF(33, 71, V3D_ERR_FDBGB),
- REGDEF(33, 71, V3D_ERR_FDBGS),
- REGDEF(33, 71, V3D_ERR_STAT),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT0),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT1),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT2),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_MISCCFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_MSK_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0EA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1EA),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCS),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_STATUS(33)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_CFG(33)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_VIO_ADDR(33)),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGO),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGB),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_STAT),
};
static const struct v3d_reg_def v3d_csd_reg_defs[] = {
- REGDEF(41, 71, V3D_CSD_STATUS),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG3(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG4(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG5(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG6(71)),
- REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG7),
+ REGDEF(V3D_GEN_41, V3D_GEN_71, V3D_CSD_STATUS),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG0(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG1(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG2(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG3(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG4(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG5(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG6(41)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG0(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG1(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG2(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG3(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG4(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG5(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG6(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG7),
};
static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
@@ -164,7 +164,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU));
seq_printf(m, "TFU: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU));
- if (v3d->ver <= 42) {
+ if (v3d->ver <= V3D_GEN_42) {
seq_printf(m, "TSY: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY));
}
@@ -196,11 +196,11 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
seq_printf(m, " QPUs: %d\n", nslc * qups);
seq_printf(m, " Semaphores: %d\n",
V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM));
- if (v3d->ver <= 42) {
+ if (v3d->ver <= V3D_GEN_42) {
seq_printf(m, " BCG int: %d\n",
(ident2 & V3D_IDENT2_BCG_INT) != 0);
}
- if (v3d->ver < 40) {
+ if (v3d->ver < V3D_GEN_41) {
seq_printf(m, " Override TMU: %d\n",
(misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
}
@@ -234,7 +234,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
int core = 0;
int measure_ms = 1000;
- if (v3d->ver >= 40) {
+ if (v3d->ver >= V3D_GEN_41) {
int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
V3D_SET_FIELD_VER(cycle_count_reg,
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 852015214e97..5e997ae8bc9c 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sched/clock.h>
@@ -92,7 +93,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
args->value = 1;
return 0;
case DRM_V3D_PARAM_SUPPORTS_PERFMON:
- args->value = (v3d->ver >= 40);
+ args->value = (v3d->ver >= V3D_GEN_41);
return 0;
case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
args->value = 1;
@@ -254,14 +255,44 @@ static const struct drm_driver v3d_drm_driver = {
};
static const struct of_device_id v3d_of_match[] = {
- { .compatible = "brcm,2711-v3d" },
- { .compatible = "brcm,2712-v3d" },
- { .compatible = "brcm,7268-v3d" },
- { .compatible = "brcm,7278-v3d" },
+ { .compatible = "brcm,2711-v3d", .data = (void *)V3D_GEN_42 },
+ { .compatible = "brcm,2712-v3d", .data = (void *)V3D_GEN_71 },
+ { .compatible = "brcm,7268-v3d", .data = (void *)V3D_GEN_33 },
+ { .compatible = "brcm,7278-v3d", .data = (void *)V3D_GEN_41 },
{},
};
MODULE_DEVICE_TABLE(of, v3d_of_match);
+static void
+v3d_idle_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_CLEAR_POWER_OFF);
+
+ if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS),
+ V3D_SMS_STATE) == V3D_SMS_IDLE), 100)) {
+ DRM_ERROR("Failed to power up SMS\n");
+ }
+
+ v3d_reset_sms(v3d);
+}
+
+static void
+v3d_power_off_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_POWER_OFF);
+
+ if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS),
+ V3D_SMS_STATE) == V3D_SMS_POWER_OFF_STATE), 100)) {
+ DRM_ERROR("Failed to power off SMS\n");
+ }
+}
+
static int
map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
{
@@ -274,6 +305,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct drm_device *drm;
struct v3d_dev *v3d;
+ enum v3d_gen gen;
int ret;
u32 mmu_debug;
u32 ident1, ident3;
@@ -287,6 +319,9 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drm);
+ gen = (uintptr_t)of_device_get_match_data(dev);
+ v3d->ver = gen;
+
ret = map_regs(v3d, &v3d->hub_regs, "hub");
if (ret)
return ret;
@@ -295,6 +330,12 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (v3d->ver >= V3D_GEN_71) {
+ ret = map_regs(v3d, &v3d->sms_regs, "sms");
+ if (ret)
+ return ret;
+ }
+
v3d->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(v3d->clk))
return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n");
@@ -305,6 +346,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
return ret;
}
+ v3d_idle_sms(v3d);
+
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
ret = dma_set_mask_and_coherent(dev, mask);
@@ -316,6 +359,11 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ident1 = V3D_READ(V3D_HUB_IDENT1);
v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 +
V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV));
+ /* Make sure that the V3D tech version retrieved from the HW is equal
+ * to the one advertised by the device tree.
+ */
+ WARN_ON(v3d->ver != gen);
+
v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
@@ -340,7 +388,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
}
}
- if (v3d->ver < 41) {
+ if (v3d->ver < V3D_GEN_41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
goto clk_disable;
@@ -400,6 +448,8 @@ static void v3d_platform_drm_remove(struct platform_device *pdev)
dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
v3d->mmu_scratch_paddr);
+ v3d_power_off_sms(v3d);
+
clk_disable_unprepare(v3d->clk);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 9deaefa0f95b..b51f0b648a08 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -94,11 +94,18 @@ struct v3d_perfmon {
u64 values[] __counted_by(ncounters);
};
+enum v3d_gen {
+ V3D_GEN_33 = 33,
+ V3D_GEN_41 = 41,
+ V3D_GEN_42 = 42,
+ V3D_GEN_71 = 71,
+};
+
struct v3d_dev {
struct drm_device drm;
/* Short representation (e.g. 33, 41) of the V3D tech version */
- int ver;
+ enum v3d_gen ver;
/* Short representation (e.g. 5, 6) of the V3D tech revision */
int rev;
@@ -111,6 +118,7 @@ struct v3d_dev {
void __iomem *core_regs[3];
void __iomem *bridge_regs;
void __iomem *gca_regs;
+ void __iomem *sms_regs;
struct clk *clk;
struct reset_control *reset;
@@ -199,7 +207,7 @@ to_v3d_dev(struct drm_device *dev)
static inline bool
v3d_has_csd(struct v3d_dev *v3d)
{
- return v3d->ver >= 41;
+ return v3d->ver >= V3D_GEN_41;
}
#define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
@@ -261,6 +269,15 @@ to_v3d_fence(struct dma_fence *fence)
#define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
#define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
+#define V3D_SMS_IDLE 0x0
+#define V3D_SMS_ISOLATING_FOR_RESET 0xa
+#define V3D_SMS_RESETTING 0xb
+#define V3D_SMS_ISOLATING_FOR_POWER_OFF 0xc
+#define V3D_SMS_POWER_OFF_STATE 0xd
+
+#define V3D_SMS_READ(offset) readl(v3d->sms_regs + (offset))
+#define V3D_SMS_WRITE(offset, val) writel(val, v3d->sms_regs + (offset))
+
#define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
#define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
@@ -539,6 +556,7 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
/* v3d_gem.c */
int v3d_gem_init(struct drm_device *dev);
void v3d_gem_destroy(struct drm_device *dev);
+void v3d_reset_sms(struct v3d_dev *v3d);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
void v3d_clean_caches(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index b1e681630ded..d7d16da78db3 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -25,7 +25,7 @@ v3d_init_core(struct v3d_dev *v3d, int core)
* type. If you want the default behavior, you can still put
* "2" in the indirect texture state's output_type field.
*/
- if (v3d->ver < 40)
+ if (v3d->ver < V3D_GEN_41)
V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
/* Whenever we flush the L2T cache, we always want to flush
@@ -58,7 +58,7 @@ v3d_idle_axi(struct v3d_dev *v3d, int core)
static void
v3d_idle_gca(struct v3d_dev *v3d)
{
- if (v3d->ver >= 41)
+ if (v3d->ver >= V3D_GEN_41)
return;
V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
@@ -105,6 +105,22 @@ v3d_reset_v3d(struct v3d_dev *v3d)
}
void
+v3d_reset_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_REE_CS, V3D_SET_FIELD(0x4, V3D_SMS_STATE));
+
+ if (wait_for(!(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS),
+ V3D_SMS_STATE) == V3D_SMS_ISOLATING_FOR_RESET) &&
+ !(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS),
+ V3D_SMS_STATE) == V3D_SMS_RESETTING), 100)) {
+ DRM_ERROR("Failed to wait for SMS reset\n");
+ }
+}
+
+void
v3d_reset(struct v3d_dev *v3d)
{
struct drm_device *dev = &v3d->drm;
@@ -119,6 +135,7 @@ v3d_reset(struct v3d_dev *v3d)
v3d_idle_axi(v3d, 0);
v3d_idle_gca(v3d);
+ v3d_reset_sms(v3d);
v3d_reset_v3d(v3d);
v3d_mmu_set_page_table(v3d);
@@ -132,13 +149,13 @@ v3d_reset(struct v3d_dev *v3d)
static void
v3d_flush_l3(struct v3d_dev *v3d)
{
- if (v3d->ver < 41) {
+ if (v3d->ver < V3D_GEN_41) {
u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
- if (v3d->ver < 33) {
+ if (v3d->ver < V3D_GEN_33) {
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
}
@@ -151,7 +168,7 @@ v3d_flush_l3(struct v3d_dev *v3d)
static void
v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
{
- if (v3d->ver > 32)
+ if (v3d->ver >= V3D_GEN_33)
return;
V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 72b6a119412f..2cca5d3a26a2 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -143,7 +143,7 @@ v3d_irq(int irq, void *arg)
/* We shouldn't be triggering these if we have GMP in
* always-allowed mode.
*/
- if (v3d->ver < 71 && (intsts & V3D_INT_GMPV))
+ if (v3d->ver < V3D_GEN_71 && (intsts & V3D_INT_GMPV))
dev_err(v3d->drm.dev, "GMP violation\n");
/* V3D 4.2 wires the hub and core IRQs together, so if we &
@@ -186,27 +186,59 @@ v3d_hub_irq(int irq, void *arg)
u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) <<
(v3d->va_width - 32));
- static const char *const v3d41_axi_ids[] = {
- "L2T",
- "PTB",
- "PSE",
- "TLB",
- "CLE",
- "TFU",
- "MMU",
- "GMP",
+ static const struct {
+ u32 begin;
+ u32 end;
+ const char *client;
+ } v3d41_axi_ids[] = {
+ {0x00, 0x20, "L2T"},
+ {0x20, 0x21, "PTB"},
+ {0x40, 0x41, "PSE"},
+ {0x60, 0x80, "TLB"},
+ {0x80, 0x88, "CLE"},
+ {0xA0, 0xA1, "TFU"},
+ {0xC0, 0xE0, "MMU"},
+ {0xE0, 0xE1, "GMP"},
+ }, v3d71_axi_ids[] = {
+ {0x00, 0x30, "L2T"},
+ {0x30, 0x38, "CLE"},
+ {0x38, 0x39, "PTB"},
+ {0x39, 0x3A, "PSE"},
+ {0x3A, 0x3B, "CSD"},
+ {0x40, 0x60, "TLB"},
+ {0x60, 0x70, "MMU"},
+ {0x7C, 0x7E, "TFU"},
+ {0x7F, 0x80, "GMP"},
};
const char *client = "?";
V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
- if (v3d->ver >= 41) {
- axi_id = axi_id >> 5;
- if (axi_id < ARRAY_SIZE(v3d41_axi_ids))
- client = v3d41_axi_ids[axi_id];
+ if (v3d->ver >= V3D_GEN_71) {
+ size_t i;
+
+ axi_id = axi_id & 0x7F;
+ for (i = 0; i < ARRAY_SIZE(v3d71_axi_ids); i++) {
+ if (axi_id >= v3d71_axi_ids[i].begin &&
+ axi_id < v3d71_axi_ids[i].end) {
+ client = v3d71_axi_ids[i].client;
+ break;
+ }
+ }
+ } else if (v3d->ver >= V3D_GEN_41) {
+ size_t i;
+
+ axi_id = axi_id & 0xFF;
+ for (i = 0; i < ARRAY_SIZE(v3d41_axi_ids); i++) {
+ if (axi_id >= v3d41_axi_ids[i].begin &&
+ axi_id < v3d41_axi_ids[i].end) {
+ client = v3d41_axi_ids[i].client;
+ break;
+ }
+ }
}
- dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
+ dev_err(v3d->drm.dev, "MMU error from client %s (0x%x) at 0x%llx%s%s%s\n",
client, axi_id, (long long)vio_addr,
((intsts & V3D_HUB_INT_MMU_WRV) ?
", write violation" : ""),
@@ -217,7 +249,7 @@ v3d_hub_irq(int irq, void *arg)
status = IRQ_HANDLED;
}
- if (v3d->ver >= 71 && (intsts & V3D_V7_HUB_INT_GMPV)) {
+ if (v3d->ver >= V3D_GEN_71 && (intsts & V3D_V7_HUB_INT_GMPV)) {
dev_err(v3d->drm.dev, "GMP Violation\n");
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index 3ebda2fa46fc..9a3fe5255874 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -200,10 +200,10 @@ void v3d_perfmon_init(struct v3d_dev *v3d)
const struct v3d_perf_counter_desc *counters = NULL;
unsigned int max = 0;
- if (v3d->ver >= 71) {
+ if (v3d->ver >= V3D_GEN_71) {
counters = v3d_v71_performance_counters;
max = ARRAY_SIZE(v3d_v71_performance_counters);
- } else if (v3d->ver >= 42) {
+ } else if (v3d->ver >= V3D_GEN_42) {
counters = v3d_v42_performance_counters;
max = ARRAY_SIZE(v3d_v42_performance_counters);
}
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 6da3c69082bd..c1870265eaee 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -515,4 +515,30 @@
# define V3D_ERR_VPAERGS BIT(1)
# define V3D_ERR_VPAEABB BIT(0)
+#define V3D_SMS_REE_CS 0x00000
+#define V3D_SMS_TEE_CS 0x00400
+# define V3D_SMS_INTERRUPT BIT(31)
+# define V3D_SMS_POWER_OFF BIT(30)
+# define V3D_SMS_CLEAR_POWER_OFF BIT(29)
+# define V3D_SMS_LOCK BIT(28)
+# define V3D_SMS_CLEAR_LOCK BIT(27)
+# define V3D_SMS_SVP_MODE_EXIT BIT(26)
+# define V3D_SMS_CLEAR_SVP_MODE_EXIT BIT(25)
+# define V3D_SMS_SVP_MODE_ENTER BIT(24)
+# define V3D_SMS_CLEAR_SVP_MODE_ENTER BIT(23)
+# define V3D_SMS_THEIR_MODE_EXIT BIT(22)
+# define V3D_SMS_THEIR_MODE_ENTER BIT(21)
+# define V3D_SMS_OUR_MODE_EXIT BIT(20)
+# define V3D_SMS_CLEAR_OUR_MODE_EXIT BIT(19)
+# define V3D_SMS_SEQ_PC_MASK V3D_MASK(16, 10)
+# define V3D_SMS_SEQ_PC_SHIFT 10
+# define V3D_SMS_HUBCORE_STATUS_MASK V3D_MASK(9, 8)
+# define V3D_SMS_HUBCORE_STATUS_SHIFT 8
+# define V3D_SMS_NEW_MODE_MASK V3D_MASK(7, 6)
+# define V3D_SMS_NEW_MODE_SHIFT 6
+# define V3D_SMS_OLD_MODE_MASK V3D_MASK(5, 4)
+# define V3D_SMS_OLD_MODE_SHIFT 4
+# define V3D_SMS_STATE_MASK V3D_MASK(3, 0)
+# define V3D_SMS_STATE_SHIFT 0
+
#endif /* V3D_REGS_H */
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 34c42d6e12cd..35f131a46d07 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -357,11 +357,11 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica);
V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua);
V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa);
- if (v3d->ver >= 71)
+ if (v3d->ver >= V3D_GEN_71)
V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc);
V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios);
V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]);
- if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
+ if (v3d->ver >= V3D_GEN_71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]);
V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]);
V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]);
@@ -412,7 +412,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
*
* XXX: Set the CFG7 register
*/
- if (v3d->ver >= 71)
+ if (v3d->ver >= V3D_GEN_71)
V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0);
/* CFG0 write kicks off the job. */
@@ -428,7 +428,8 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
- u32 *wg_counts;
+ struct v3d_dev *v3d = job->base.v3d;
+ u32 num_batches, *wg_counts;
v3d_get_bo_vaddr(bo);
v3d_get_bo_vaddr(indirect);
@@ -441,8 +442,17 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
- args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
- (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;
+
+ num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
+ (wg_counts[0] * wg_counts[1] * wg_counts[2]);
+
+ /* V3D 7.1.6 and later don't subtract 1 from the number of batches */
+ if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6))
+ args->cfg[4] = num_batches - 1;
+ else
+ args->cfg[4] = num_batches;
+
+ WARN_ON(args->cfg[4] == ~0);
for (int i = 0; i < 3; i++) {
/* 0xffffffff indicates that the uniform rewrite is not needed */
@@ -734,11 +744,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
return DRM_GPU_SCHED_STAT_NOMINAL;
}
-/* If the current address or return address have changed, then the GPU
- * has probably made progress and we should delay the reset. This
- * could fail if the GPU got in an infinite loop in the CL, but that
- * is pretty unlikely outside of an i-g-t testcase.
- */
+static void
+v3d_sched_skip_reset(struct drm_sched_job *sched_job)
+{
+ struct drm_gpu_scheduler *sched = sched_job->sched;
+
+ spin_lock(&sched->job_list_lock);
+ list_add(&sched_job->list, &sched->pending_list);
+ spin_unlock(&sched->job_list_lock);
+}
+
static enum drm_gpu_sched_stat
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 *timedout_ctca, u32 *timedout_ctra)
@@ -748,9 +763,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
+ /* If the current address or return address have changed, then the GPU
+ * has probably made progress and we should delay the reset. This
+ * could fail if the GPU got in an infinite loop in the CL, but that
+ * is pretty unlikely outside of an i-g-t testcase.
+ */
if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
*timedout_ctca = ctca;
*timedout_ctra = ctra;
+
+ v3d_sched_skip_reset(sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -790,11 +812,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
struct v3d_dev *v3d = job->base.v3d;
u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
- /* If we've made progress, skip reset and let the timer get
- * rearmed.
+ /* If we've made progress, skip reset, add the job to the pending
+ * list, and let the timer get rearmed.
*/
if (job->timedout_batches != batches) {
job->timedout_batches = batches;
+
+ v3d_sched_skip_reset(sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
index e70d7c3076ac..577d9a956369 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
@@ -61,6 +61,19 @@ static const struct drm_display_mode default_mode = {
DRM_SIMPLE_MODE(640, 480, 64, 48)
};
+/**
+ * vc4_mock_atomic_add_output() - Enables an output in a state
+ * @test: The test context object
+ * @state: Atomic state to enable the output in.
+ * @type: Type of the output encoder
+ *
+ * Adds an output CRTC and connector to a state, and enables them.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure. If the error is
+ * EDEADLK, the entire atomic sequence must be restarted. All other
+ * errors are fatal.
+ */
int vc4_mock_atomic_add_output(struct kunit *test,
struct drm_atomic_state *state,
enum vc4_encoder_type type)
@@ -75,30 +88,49 @@ int vc4_mock_atomic_add_output(struct kunit *test,
int ret;
encoder = vc4_find_encoder_by_type(drm, type);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+ if (!encoder)
+ return -ENODEV;
crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+ if (!crtc)
+ return -ENODEV;
output = encoder_to_vc4_dummy_output(encoder);
conn = &output->connector;
conn_state = drm_atomic_get_connector_state(state, conn);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
ret = drm_atomic_set_mode_for_crtc(crtc_state, &default_mode);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
crtc_state->active = true;
return 0;
}
+/**
+ * vc4_mock_atomic_del_output() - Disables an output in a state
+ * @test: The test context object
+ * @state: Atomic state to disable the output in.
+ * @type: Type of the output encoder
+ *
+ * Adds an output CRTC and connector to a state, and disables them.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure. If the error is
+ * EDEADLK, the entire atomic sequence must be restarted. All other
+ * errors are fatal.
+ */
int vc4_mock_atomic_del_output(struct kunit *test,
struct drm_atomic_state *state,
enum vc4_encoder_type type)
@@ -113,26 +145,32 @@ int vc4_mock_atomic_del_output(struct kunit *test,
int ret;
encoder = vc4_find_encoder_by_type(drm, type);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+ if (!encoder)
+ return -ENODEV;
crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+ if (!crtc)
+ return -ENODEV;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
crtc_state->active = false;
ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
- KUNIT_ASSERT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
output = encoder_to_vc4_dummy_output(encoder);
conn = &output->connector;
conn_state = drm_atomic_get_connector_state(state, conn);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
- KUNIT_ASSERT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
index 992e8f5c5c6e..d1f694029169 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
@@ -20,7 +20,6 @@
struct pv_muxing_priv {
struct vc4_dev *vc4;
- struct drm_atomic_state *state;
};
static bool check_fifo_conflict(struct kunit *test,
@@ -677,18 +676,41 @@ static void drm_vc4_test_pv_muxing(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
const struct pv_muxing_priv *priv = test->priv;
- struct drm_atomic_state *state = priv->state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
unsigned int i;
int ret;
+ drm_modeset_acquire_init(&ctx, 0);
+
+ vc4 = priv->vc4;
+ drm = &vc4->base;
+
+retry:
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
for (i = 0; i < params->nencoders; i++) {
enum vc4_encoder_type enc_type = params->encoders[i];
ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
}
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test,
@@ -700,33 +722,61 @@ static void drm_vc4_test_pv_muxing(struct kunit *test)
KUNIT_EXPECT_TRUE(test, check_channel_for_encoder(test, state, enc_type,
params->check_fn));
}
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static void drm_vc4_test_pv_muxing_invalid(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
const struct pv_muxing_priv *priv = test->priv;
- struct drm_atomic_state *state = priv->state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
unsigned int i;
int ret;
+ drm_modeset_acquire_init(&ctx, 0);
+
+ vc4 = priv->vc4;
+ drm = &vc4->base;
+
+retry:
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
for (i = 0; i < params->nencoders; i++) {
enum vc4_encoder_type enc_type = params->encoders[i];
ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
}
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_EXPECT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static int vc4_pv_muxing_test_init(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
- struct drm_modeset_acquire_ctx ctx;
struct pv_muxing_priv *priv;
- struct drm_device *drm;
struct vc4_dev *vc4;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
@@ -737,15 +787,6 @@ static int vc4_pv_muxing_test_init(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
priv->vc4 = vc4;
- drm_modeset_acquire_init(&ctx, 0);
-
- drm = &vc4->base;
- priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
return 0;
}
@@ -800,13 +841,26 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -823,13 +877,26 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -874,16 +941,35 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -908,13 +994,26 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -968,25 +1067,50 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
-
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 779b22efe27b..458e5d987964 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -552,8 +552,6 @@ struct vc4_dsi {
struct vc4_encoder encoder;
struct mipi_dsi_host dsi_host;
- struct kref kref;
-
struct platform_device *pdev;
struct drm_bridge *out_bridge;
@@ -1160,12 +1158,13 @@ static void vc4_dsi_bridge_enable(struct drm_bridge *bridge,
}
static int vc4_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
/* Attach the panel or bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge,
+ return drm_bridge_attach(encoder, dsi->out_bridge,
&dsi->bridge, flags);
}
@@ -1621,29 +1620,11 @@ static void vc4_dsi_dma_chan_release(void *ptr)
dsi->reg_dma_chan = NULL;
}
-static void vc4_dsi_release(struct kref *kref)
-{
- struct vc4_dsi *dsi =
- container_of(kref, struct vc4_dsi, kref);
-
- kfree(dsi);
-}
-
-static void vc4_dsi_get(struct vc4_dsi *dsi)
-{
- kref_get(&dsi->kref);
-}
-
-static void vc4_dsi_put(struct vc4_dsi *dsi)
-{
- kref_put(&dsi->kref, &vc4_dsi_release);
-}
-
static void vc4_dsi_release_action(struct drm_device *drm, void *ptr)
{
struct vc4_dsi *dsi = ptr;
- vc4_dsi_put(dsi);
+ drm_bridge_put(&dsi->bridge);
}
static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
@@ -1654,7 +1635,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
struct drm_encoder *encoder = &dsi->encoder.base;
int ret;
- vc4_dsi_get(dsi);
+ drm_bridge_get(&dsi->bridge);
ret = drmm_add_action_or_reset(drm, vc4_dsi_release_action, dsi);
if (ret)
@@ -1809,15 +1790,12 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct vc4_dsi *dsi;
- dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct vc4_dsi, bridge, &vc4_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dev_set_drvdata(dev, dsi);
- kref_init(&dsi->kref);
-
dsi->pdev = pdev;
- dsi->bridge.funcs = &vc4_dsi_bridge_funcs;
#ifdef CONFIG_OF
dsi->bridge.of_node = dev->of_node;
#endif
@@ -1835,7 +1813,6 @@ static void vc4_dsi_dev_remove(struct platform_device *pdev)
struct vc4_dsi *dsi = dev_get_drvdata(dev);
mipi_dsi_host_unregister(&dsi->dsi_host);
- vc4_dsi_put(dsi);
}
struct platform_driver vc4_dsi_driver = {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 37238a12baa5..a29a6ef266f9 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -51,6 +51,7 @@
#include <linux/reset.h>
#include <sound/dmaengine_pcm.h>
#include <sound/hdmi-codec.h>
+#include <sound/jack.h>
#include <sound/pcm_drm_eld.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -372,13 +373,13 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
* the lock for now.
*/
+ drm_atomic_helper_connector_hdmi_hotplug(connector, status);
+
if (status == connector_status_disconnected) {
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
return;
}
- drm_atomic_helper_connector_hdmi_hotplug(connector, status);
-
cec_s_phys_addr(vc4_hdmi->cec_adap,
connector->display_info.source_physical_address, false);
@@ -2175,6 +2176,22 @@ static const struct drm_connector_hdmi_audio_funcs vc4_hdmi_audio_funcs = {
.shutdown = vc4_hdmi_audio_shutdown,
};
+static int vc4_hdmi_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct vc4_hdmi *vc4_hdmi = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component;
+ int ret;
+
+ ret = snd_soc_card_jack_new(rtd->card, "HDMI Jack", SND_JACK_LINEOUT,
+ &vc4_hdmi->hdmi_jack);
+ if (ret) {
+ dev_err(rtd->dev, "HDMI Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ return snd_soc_component_set_jack(component, &vc4_hdmi->hdmi_jack, NULL);
+}
+
static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
{
const struct vc4_hdmi_register *mai_data =
@@ -2288,6 +2305,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
dai_link->cpus->dai_name = dev_name(dev);
dai_link->codecs->name = dev_name(&vc4_hdmi->connector.hdmi_audio.codec_pdev->dev);
dai_link->platforms->name = dev_name(dev);
+ dai_link->init = vc4_hdmi_codec_init;
card->dai_link = dai_link;
card->num_links = 1;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index e3d989ca302b..a31157c99bee 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -4,6 +4,7 @@
#include <drm/drm_connector.h>
#include <media/cec.h>
#include <sound/dmaengine_pcm.h>
+#include <sound/hdmi-codec.h>
#include <sound/soc.h>
#include "vc4_drv.h"
@@ -211,6 +212,12 @@ struct vc4_hdmi {
* KMS hooks. Protected by @mutex.
*/
enum hdmi_colorspace output_format;
+
+ /**
+ * @hdmi_jack: Represents the connection state of the HDMI plug, for
+ * ALSA jack detection.
+ */
+ struct snd_soc_jack hdmi_jack;
};
#define connector_to_vc4_hdmi(_connector) \
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index c5e84d3494d2..056d344c5411 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -2080,7 +2080,7 @@ static int vc6_plane_mode_set(struct drm_plane *plane,
/* HPPF plane 1 */
vc4_dlist_write(vc4_state, kernel);
/* VPPF plane 1 */
- vc4_dlist_write(vc4_state, kernel);
+ vc4_dlist_write(vc4_state, kernel);
}
}
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 37bb1fb58cf9..b611c7c8ca2d 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -53,25 +53,10 @@ static void vgem_fence_release(struct dma_fence *base)
dma_fence_free(&fence->base);
}
-static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
-{
- snprintf(str, size, "%llu", fence->seqno);
-}
-
-static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
- int size)
-{
- snprintf(str, size, "%llu",
- dma_fence_is_signaled(fence) ? fence->seqno : 0);
-}
-
static const struct dma_fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
.release = vgem_fence_release,
-
- .fence_value_str = vgem_fence_value_str,
- .timeline_value_str = vgem_fence_timeline_value_str,
};
static void vgem_fence_timeout(struct timer_list *t)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 2d88e390feb4..e32e680c7197 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -128,6 +128,14 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
drm_dev_put(dev);
}
+static void virtio_gpu_shutdown(struct virtio_device *vdev)
+{
+ /*
+ * drm does its own synchronization on shutdown.
+ * Do nothing here, opt out of device reset.
+ */
+}
+
static void virtio_gpu_config_changed(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
@@ -162,6 +170,7 @@ static struct virtio_driver virtio_gpu_driver = {
.id_table = id_table,
.probe = virtio_gpu_probe,
.remove = virtio_gpu_remove,
+ .shutdown = virtio_gpu_shutdown,
.config_changed = virtio_gpu_config_changed
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f28357dbde35..44c1d8ef3c4d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -49,26 +49,10 @@ static bool virtio_gpu_fence_signaled(struct dma_fence *f)
return false;
}
-static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
-{
- snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
-}
-
-static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
- int size)
-{
- struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
-
- snprintf(str, size, "%llu",
- (u64)atomic64_read(&fence->drv->last_fence_id));
-}
-
static const struct dma_fence_ops virtio_gpu_fence_ops = {
.get_driver_name = virtio_gpu_get_driver_name,
.get_timeline_name = virtio_gpu_get_timeline_name,
.signaled = virtio_gpu_fence_signaled,
- .fence_value_str = virtio_gpu_fence_value_str,
- .timeline_value_str = virtio_gpu_timeline_value_str,
};
struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index dde8fc1a3689..90c99d83c4cf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -115,13 +115,14 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
if (!vgdev->has_context_init)
virtio_gpu_create_context(obj->dev, file);
- objs = virtio_gpu_array_alloc(1);
- if (!objs)
- return -ENOMEM;
- virtio_gpu_array_add_obj(objs, obj);
+ if (vfpriv->context_created) {
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, obj);
- if (vfpriv->ctx_id)
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs);
+ }
out_notify:
virtio_gpu_notify(vgdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a6f5a78f436a..698ea7adb951 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -366,13 +366,7 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return 0;
obj = new_state->fb->obj[0];
- if (obj->import_attach) {
- ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
- if (ret)
- return ret;
- }
-
- if (bo->dumb || obj->import_attach) {
+ if (bo->dumb || drm_gem_is_imported(obj)) {
vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
vgdev->fence_drv.context,
0);
@@ -380,7 +374,21 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return -ENOMEM;
}
+ if (drm_gem_is_imported(obj)) {
+ ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
+ if (ret)
+ goto err_fence;
+ }
+
return 0;
+
+err_fence:
+ if (vgplane_st->fence) {
+ dma_fence_put(&vgplane_st->fence->f);
+ vgplane_st->fence = NULL;
+ }
+
+ return ret;
}
static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj)
@@ -409,7 +417,7 @@ static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
}
obj = state->fb->obj[0];
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
virtio_gpu_cleanup_imported_obj(obj);
}
@@ -500,11 +508,19 @@ static int virtio_drm_get_scanout_buffer(struct drm_plane *plane,
bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
- /* Only support mapped shmem bo */
- if (virtio_gpu_is_vram(bo) || bo->base.base.import_attach || !bo->base.vaddr)
+ if (virtio_gpu_is_vram(bo) || drm_gem_is_imported(&bo->base.base))
return -ENODEV;
- iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+ if (bo->base.vaddr) {
+ iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+ } else {
+ struct drm_gem_shmem_object *shmem = &bo->base;
+
+ if (!shmem->pages)
+ return -ENODEV;
+ /* map scanout buffer later */
+ sb->pages = shmem->pages;
+ }
sb->format = plane->state->fb->format;
sb->height = plane->state->fb->height;
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index fe6a0b018571..1118a0250279 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -75,7 +75,6 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.ops = {
- .cache_sgt_mapping = true,
.attach = virtio_dma_buf_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = virtgpu_gem_map_dma_buf,
@@ -205,16 +204,15 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
- struct dma_buf_attachment *attach = obj->import_attach;
- if (attach) {
- struct dma_buf *dmabuf = attach->dmabuf;
+ if (drm_gem_is_imported(obj)) {
+ struct dma_buf *dmabuf = obj->dma_buf;
dma_resv_lock(dmabuf->resv, NULL);
virtgpu_dma_buf_unmap(bo);
dma_resv_unlock(dmabuf->resv);
- dma_buf_detach(dmabuf, attach);
+ dma_buf_detach(dmabuf, obj->import_attach);
dma_buf_put(dmabuf);
}
@@ -321,6 +319,7 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
obj = &bo->base.base;
+ obj->resv = buf->resv;
obj->funcs = &virtgpu_gem_dma_buf_funcs;
drm_gem_private_object_init(dev, obj, buf->size);
diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig
index 9def079f685b..3c02f928ffe6 100644
--- a/drivers/gpu/drm/vkms/Kconfig
+++ b/drivers/gpu/drm/vkms/Kconfig
@@ -14,3 +14,18 @@ config DRM_VKMS
a VKMS.
If M is selected the module will be called vkms.
+
+config DRM_VKMS_KUNIT_TEST
+ tristate "KUnit tests for VKMS" if !KUNIT_ALL_TESTS
+ depends on DRM_VKMS && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for VKMS. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on VKMS.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index 1b28a6a32948..d657865e573f 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -6,6 +6,9 @@ vkms-y := \
vkms_formats.o \
vkms_crtc.o \
vkms_composer.o \
- vkms_writeback.o
+ vkms_writeback.o \
+ vkms_connector.o \
+ vkms_config.o
obj-$(CONFIG_DRM_VKMS) += vkms.o
+obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/vkms/tests/.kunitconfig b/drivers/gpu/drm/vkms/tests/.kunitconfig
new file mode 100644
index 000000000000..6a2d87068edc
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/.kunitconfig
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_VKMS=y
+CONFIG_DRM_VKMS_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/vkms/tests/Makefile b/drivers/gpu/drm/vkms/tests/Makefile
new file mode 100644
index 000000000000..9ded37b67a46
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms_config_test.o
diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
new file mode 100644
index 000000000000..ff4566cf9925
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
@@ -0,0 +1,951 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include "../vkms_config.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static size_t vkms_config_get_num_planes(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ count++;
+
+ return count;
+}
+
+static size_t vkms_config_get_num_encoders(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ count++;
+
+ return count;
+}
+
+static size_t vkms_config_get_num_connectors(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ count++;
+
+ return count;
+}
+
+static struct vkms_config_plane *get_first_plane(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ return plane_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_crtc *get_first_crtc(struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ vkms_config_for_each_crtc(config, crtc_cfg)
+ return crtc_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_encoder *get_first_encoder(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ return encoder_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_connector *get_first_connector(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ return connector_cfg;
+
+ return NULL;
+}
+
+struct default_config_case {
+ bool enable_cursor;
+ bool enable_writeback;
+ bool enable_overlay;
+};
+
+static void vkms_config_test_empty_config(struct kunit *test)
+{
+ struct vkms_config *config;
+ const char *dev_name = "test";
+
+ config = vkms_config_create(dev_name);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* The dev_name string and the config have different lifetimes */
+ dev_name = NULL;
+ KUNIT_EXPECT_STREQ(test, vkms_config_get_device_name(config), "test");
+
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_planes(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static struct default_config_case default_config_cases[] = {
+ { false, false, false },
+ { true, false, false },
+ { true, true, false },
+ { true, false, true },
+ { false, true, false },
+ { false, true, true },
+ { false, false, true },
+ { true, true, true },
+};
+
+KUNIT_ARRAY_PARAM(default_config, default_config_cases, NULL);
+
+static void vkms_config_test_default_config(struct kunit *test)
+{
+ const struct default_config_case *params = test->param_value;
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ int n_primaries = 0;
+ int n_cursors = 0;
+ int n_overlays = 0;
+
+ config = vkms_config_default_create(params->enable_cursor,
+ params->enable_writeback,
+ params->enable_overlay);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Planes */
+ vkms_config_for_each_plane(config, plane_cfg) {
+ switch (vkms_config_plane_get_type(plane_cfg)) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ n_primaries++;
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ n_cursors++;
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ n_overlays++;
+ break;
+ default:
+ KUNIT_FAIL_AND_ABORT(test, "Unknown plane type");
+ }
+ }
+ KUNIT_EXPECT_EQ(test, n_primaries, 1);
+ KUNIT_EXPECT_EQ(test, n_cursors, params->enable_cursor ? 1 : 0);
+ KUNIT_EXPECT_EQ(test, n_overlays, params->enable_overlay ? 8 : 0);
+
+ /* CRTCs */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+
+ crtc_cfg = get_first_crtc(config);
+ KUNIT_EXPECT_EQ(test, vkms_config_crtc_get_writeback(crtc_cfg),
+ params->enable_writeback);
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ int n_possible_crtcs = 0;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ KUNIT_EXPECT_PTR_EQ(test, crtc_cfg, possible_crtc);
+ n_possible_crtcs++;
+ }
+ KUNIT_EXPECT_EQ(test, n_possible_crtcs, 1);
+ }
+
+ /* Encoders */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 1);
+
+ /* Connectors */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 1);
+
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_planes(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ int n_planes = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ n_planes++;
+ KUNIT_ASSERT_EQ(test, n_planes, 0);
+
+ plane_cfg1 = vkms_config_create_plane(config);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg1)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 1);
+ n_planes = 0;
+
+ plane_cfg2 = vkms_config_create_plane(config);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg1 && plane_cfg != plane_cfg2)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 2);
+ n_planes = 0;
+
+ vkms_config_destroy_plane(plane_cfg1);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg2)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 0);
+ vkms_config_for_each_crtc(config, crtc_cfg)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 2);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1 && crtc_cfg != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ vkms_config_destroy_crtc(config, crtc_cfg2);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ int n_encoders = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ n_encoders++;
+ KUNIT_ASSERT_EQ(test, n_encoders, 0);
+
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ encoder_cfg2 = vkms_config_create_encoder(config);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1 && encoder_cfg != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 2);
+ n_encoders = 0;
+
+ vkms_config_destroy_encoder(config, encoder_cfg2);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_connectors(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ int n_connectors = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ n_connectors++;
+ KUNIT_ASSERT_EQ(test, n_connectors, 0);
+
+ connector_cfg1 = vkms_config_create_connector(config);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 1);
+ n_connectors = 0;
+
+ connector_cfg2 = vkms_config_create_connector(config);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1 &&
+ connector_cfg != connector_cfg2)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 2);
+ n_connectors = 0;
+
+ vkms_config_destroy_connector(connector_cfg2);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 1);
+ n_connectors = 0;
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_plane_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No planes */
+ plane_cfg = get_first_plane(config);
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many planes */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_plane(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_plane_type(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ int err;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg = get_first_plane(config);
+ vkms_config_destroy_plane(plane_cfg);
+
+ crtc_cfg = get_first_crtc(config);
+
+ /* Invalid: No primary plane */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Multiple primary planes */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: One primary plane */
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Multiple cursor planes */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: One primary and one cursor plane */
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Second CRTC without primary plane */
+ crtc_cfg = vkms_config_create_crtc(config);
+ encoder_cfg = vkms_config_create_encoder(config);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: Second CRTC with a primary plane */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_plane_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg = get_first_plane(config);
+ crtc_cfg = get_first_crtc(config);
+
+ /* Invalid: Primary plane without a possible CRTC */
+ vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_crtc_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_crtc *crtc_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No CRTCs */
+ crtc_cfg = get_first_crtc(config);
+ vkms_config_destroy_crtc(config, crtc_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many CRTCs */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_crtc(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_encoder_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No encoders */
+ encoder_cfg = get_first_encoder(config);
+ vkms_config_destroy_encoder(config, encoder_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many encoders */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_encoder(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_encoder *encoder_cfg;
+ int err;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ crtc_cfg1 = get_first_crtc(config);
+
+ /* Invalid: Encoder without a possible CRTC */
+ encoder_cfg = vkms_config_create_encoder(config);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: Second CRTC with shared encoder */
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Second CRTC without encoders */
+ vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg2);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: First CRTC with 2 possible encoder */
+ vkms_config_destroy_plane(plane_cfg);
+ vkms_config_destroy_crtc(config, crtc_cfg2);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_connector_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No connectors */
+ connector_cfg = get_first_connector(config);
+ vkms_config_destroy_connector(connector_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many connectors */
+ for (n = 0; n <= 32; n++)
+ connector_cfg = vkms_config_create_connector(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_connector_possible_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ encoder_cfg = get_first_encoder(config);
+ connector_cfg = get_first_connector(config);
+
+ /* Invalid: Connector without a possible encoder */
+ vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_attach_different_configs(struct kunit *test)
+{
+ struct vkms_config *config1, *config2;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ int err;
+
+ config1 = vkms_config_create("test1");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config1);
+
+ config2 = vkms_config_create("test2");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config2);
+
+ plane_cfg1 = vkms_config_create_plane(config1);
+ crtc_cfg1 = vkms_config_create_crtc(config1);
+ encoder_cfg1 = vkms_config_create_encoder(config1);
+ connector_cfg1 = vkms_config_create_connector(config1);
+
+ plane_cfg2 = vkms_config_create_plane(config2);
+ crtc_cfg2 = vkms_config_create_crtc(config2);
+ encoder_cfg2 = vkms_config_create_encoder(config2);
+ connector_cfg2 = vkms_config_create_connector(config2);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2);
+
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ vkms_config_destroy(config1);
+ vkms_config_destroy(config2);
+}
+
+static void vkms_config_test_plane_attach_crtc(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *overlay_cfg;
+ struct vkms_config_plane *primary_cfg;
+ struct vkms_config_plane *cursor_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ overlay_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(overlay_cfg, DRM_PLANE_TYPE_OVERLAY);
+ primary_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(primary_cfg, DRM_PLANE_TYPE_PRIMARY);
+ cursor_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(cursor_cfg, DRM_PLANE_TYPE_CURSOR);
+
+ crtc_cfg = vkms_config_create_crtc(config);
+
+ /* No primary or cursor planes */
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Overlay plane, but no primary or cursor planes */
+ err = vkms_config_plane_attach_crtc(overlay_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Primary plane, attaching it twice must fail */
+ err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg);
+ KUNIT_EXPECT_NE(test, err, 0);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_primary_plane(config, crtc_cfg),
+ primary_cfg);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Primary and cursor planes */
+ err = vkms_config_plane_attach_crtc(cursor_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_primary_plane(config, crtc_cfg),
+ primary_cfg);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_cursor_plane(config, crtc_cfg),
+ cursor_cfg);
+
+ /* Detach primary and destroy cursor plane */
+ vkms_config_plane_detach_crtc(overlay_cfg, crtc_cfg);
+ vkms_config_plane_detach_crtc(primary_cfg, crtc_cfg);
+ vkms_config_destroy_plane(cursor_cfg);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_plane_get_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ int n_crtcs = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg1 = vkms_config_create_plane(config);
+ plane_cfg2 = vkms_config_create_plane(config);
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ /* No possible CRTCs */
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Plane 1 attached to CRTC 1 and 2 */
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 2);
+ n_crtcs = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Plane 1 attached to CRTC 1 and plane 2 to CRTC 2 */
+ vkms_config_plane_detach_crtc(plane_cfg1, crtc_cfg2);
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+ n_crtcs = 0;
+
+ err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_encoder_get_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ int n_crtcs = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ encoder_cfg2 = vkms_config_create_encoder(config);
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ /* No possible CRTCs */
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Encoder 1 attached to CRTC 1 and 2 */
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 2);
+ n_crtcs = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Encoder 1 attached to CRTC 1 and encoder 2 to CRTC 2 */
+ vkms_config_encoder_detach_crtc(encoder_cfg1, crtc_cfg2);
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+ n_crtcs = 0;
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_connector_get_possible_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+ int n_encoders = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ connector_cfg1 = vkms_config_create_connector(config);
+ connector_cfg2 = vkms_config_create_connector(config);
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ encoder_cfg2 = vkms_config_create_encoder(config);
+
+ /* No possible encoders */
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ /* Connector 1 attached to encoders 1 and 2 */
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg1 &&
+ possible_encoder != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 2);
+ n_encoders = 0;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ /* Connector 1 attached to encoder 1 and connector 2 to encoder 2 */
+ vkms_config_connector_detach_encoder(connector_cfg1, encoder_cfg2);
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+
+ vkms_config_destroy(config);
+}
+
+static struct kunit_case vkms_config_test_cases[] = {
+ KUNIT_CASE(vkms_config_test_empty_config),
+ KUNIT_CASE_PARAM(vkms_config_test_default_config,
+ default_config_gen_params),
+ KUNIT_CASE(vkms_config_test_get_planes),
+ KUNIT_CASE(vkms_config_test_get_crtcs),
+ KUNIT_CASE(vkms_config_test_get_encoders),
+ KUNIT_CASE(vkms_config_test_get_connectors),
+ KUNIT_CASE(vkms_config_test_invalid_plane_number),
+ KUNIT_CASE(vkms_config_test_valid_plane_type),
+ KUNIT_CASE(vkms_config_test_valid_plane_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_invalid_crtc_number),
+ KUNIT_CASE(vkms_config_test_invalid_encoder_number),
+ KUNIT_CASE(vkms_config_test_valid_encoder_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_invalid_connector_number),
+ KUNIT_CASE(vkms_config_test_valid_connector_possible_encoders),
+ KUNIT_CASE(vkms_config_test_attach_different_configs),
+ KUNIT_CASE(vkms_config_test_plane_attach_crtc),
+ KUNIT_CASE(vkms_config_test_plane_get_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_encoder_get_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_connector_get_possible_encoders),
+ {}
+};
+
+static struct kunit_suite vkms_config_test_suite = {
+ .name = "vkms-config",
+ .test_cases = vkms_config_test_cases,
+};
+
+kunit_test_suite(vkms_config_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kunit test for vkms config utility");
diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c
new file mode 100644
index 000000000000..a1df5659b0fb
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_config.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/slab.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_debugfs.h>
+#include <kunit/visibility.h>
+
+#include "vkms_config.h"
+
+struct vkms_config *vkms_config_create(const char *dev_name)
+{
+ struct vkms_config *config;
+
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return ERR_PTR(-ENOMEM);
+
+ config->dev_name = kstrdup_const(dev_name, GFP_KERNEL);
+ if (!config->dev_name) {
+ kfree(config);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_LIST_HEAD(&config->planes);
+ INIT_LIST_HEAD(&config->crtcs);
+ INIT_LIST_HEAD(&config->encoders);
+ INIT_LIST_HEAD(&config->connectors);
+
+ return config;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create);
+
+struct vkms_config *vkms_config_default_create(bool enable_cursor,
+ bool enable_writeback,
+ bool enable_overlay)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+ int n;
+
+ config = vkms_config_create(DEFAULT_DEVICE_NAME);
+ if (IS_ERR(config))
+ return config;
+
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+
+ crtc_cfg = vkms_config_create_crtc(config);
+ if (IS_ERR(crtc_cfg))
+ goto err_alloc;
+ vkms_config_crtc_set_writeback(crtc_cfg, enable_writeback);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+
+ if (enable_overlay) {
+ for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+
+ vkms_config_plane_set_type(plane_cfg,
+ DRM_PLANE_TYPE_OVERLAY);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+ }
+ }
+
+ if (enable_cursor) {
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+ }
+
+ encoder_cfg = vkms_config_create_encoder(config);
+ if (IS_ERR(encoder_cfg))
+ goto err_alloc;
+
+ if (vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg))
+ goto err_alloc;
+
+ connector_cfg = vkms_config_create_connector(config);
+ if (IS_ERR(connector_cfg))
+ goto err_alloc;
+
+ if (vkms_config_connector_attach_encoder(connector_cfg, encoder_cfg))
+ goto err_alloc;
+
+ return config;
+
+err_alloc:
+ vkms_config_destroy(config);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_default_create);
+
+void vkms_config_destroy(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg, *plane_tmp;
+ struct vkms_config_crtc *crtc_cfg, *crtc_tmp;
+ struct vkms_config_encoder *encoder_cfg, *encoder_tmp;
+ struct vkms_config_connector *connector_cfg, *connector_tmp;
+
+ list_for_each_entry_safe(plane_cfg, plane_tmp, &config->planes, link)
+ vkms_config_destroy_plane(plane_cfg);
+
+ list_for_each_entry_safe(crtc_cfg, crtc_tmp, &config->crtcs, link)
+ vkms_config_destroy_crtc(config, crtc_cfg);
+
+ list_for_each_entry_safe(encoder_cfg, encoder_tmp, &config->encoders, link)
+ vkms_config_destroy_encoder(config, encoder_cfg);
+
+ list_for_each_entry_safe(connector_cfg, connector_tmp, &config->connectors, link)
+ vkms_config_destroy_connector(connector_cfg);
+
+ kfree_const(config->dev_name);
+ kfree(config);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy);
+
+static bool valid_plane_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_planes;
+
+ n_planes = list_count_nodes((struct list_head *)&config->planes);
+ if (n_planes <= 0 || n_planes >= 32) {
+ drm_info(dev, "The number of planes must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_planes_for_crtc(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_plane *plane_cfg;
+ bool has_primary_plane = false;
+ bool has_cursor_plane = false;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ enum drm_plane_type type;
+
+ type = vkms_config_plane_get_type(plane_cfg);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc != crtc_cfg)
+ continue;
+
+ if (type == DRM_PLANE_TYPE_PRIMARY) {
+ if (has_primary_plane) {
+ drm_info(dev, "Multiple primary planes\n");
+ return false;
+ }
+
+ has_primary_plane = true;
+ } else if (type == DRM_PLANE_TYPE_CURSOR) {
+ if (has_cursor_plane) {
+ drm_info(dev, "Multiple cursor planes\n");
+ return false;
+ }
+
+ has_cursor_plane = true;
+ }
+ }
+ }
+
+ if (!has_primary_plane) {
+ drm_info(dev, "Primary plane not found\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_plane_possible_crtcs(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_plane *plane_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ if (xa_empty(&plane_cfg->possible_crtcs)) {
+ drm_info(dev, "All planes must have at least one possible CRTC\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool valid_crtc_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_crtcs;
+
+ n_crtcs = list_count_nodes((struct list_head *)&config->crtcs);
+ if (n_crtcs <= 0 || n_crtcs >= 32) {
+ drm_info(dev, "The number of CRTCs must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_encoder_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_encoders;
+
+ n_encoders = list_count_nodes((struct list_head *)&config->encoders);
+ if (n_encoders <= 0 || n_encoders >= 32) {
+ drm_info(dev, "The number of encoders must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_encoder_possible_crtcs(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ if (xa_empty(&encoder_cfg->possible_crtcs)) {
+ drm_info(dev, "All encoders must have at least one possible CRTC\n");
+ return false;
+ }
+ }
+
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ bool crtc_has_encoder = false;
+
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg,
+ idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ crtc_has_encoder = true;
+ }
+ }
+
+ if (!crtc_has_encoder) {
+ drm_info(dev, "All CRTCs must have at least one possible encoder\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool valid_connector_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_connectors;
+
+ n_connectors = list_count_nodes((struct list_head *)&config->connectors);
+ if (n_connectors <= 0 || n_connectors >= 32) {
+ drm_info(dev, "The number of connectors must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_connector_possible_encoders(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg) {
+ if (xa_empty(&connector_cfg->possible_encoders)) {
+ drm_info(dev,
+ "All connectors must have at least one possible encoder\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool vkms_config_is_valid(const struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ if (!valid_plane_number(config))
+ return false;
+
+ if (!valid_crtc_number(config))
+ return false;
+
+ if (!valid_encoder_number(config))
+ return false;
+
+ if (!valid_connector_number(config))
+ return false;
+
+ if (!valid_plane_possible_crtcs(config))
+ return false;
+
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (!valid_planes_for_crtc(config, crtc_cfg))
+ return false;
+ }
+
+ if (!valid_encoder_possible_crtcs(config))
+ return false;
+
+ if (!valid_connector_possible_encoders(config))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_is_valid);
+
+static int vkms_config_show(struct seq_file *m, void *data)
+{
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
+ const char *dev_name;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+
+ dev_name = vkms_config_get_device_name((struct vkms_config *)vkmsdev->config);
+ seq_printf(m, "dev_name=%s\n", dev_name);
+
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ seq_puts(m, "plane:\n");
+ seq_printf(m, "\ttype=%d\n",
+ vkms_config_plane_get_type(plane_cfg));
+ }
+
+ vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) {
+ seq_puts(m, "crtc:\n");
+ seq_printf(m, "\twriteback=%d\n",
+ vkms_config_crtc_get_writeback(crtc_cfg));
+ }
+
+ vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg)
+ seq_puts(m, "encoder\n");
+
+ vkms_config_for_each_connector(vkmsdev->config, connector_cfg)
+ seq_puts(m, "connector\n");
+
+ return 0;
+}
+
+static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
+ { "vkms_config", vkms_config_show, 0 },
+};
+
+void vkms_config_register_debugfs(struct vkms_device *vkms_device)
+{
+ drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
+ ARRAY_SIZE(vkms_config_debugfs_list));
+}
+
+struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+
+ plane_cfg = kzalloc(sizeof(*plane_cfg), GFP_KERNEL);
+ if (!plane_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ plane_cfg->config = config;
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
+ xa_init_flags(&plane_cfg->possible_crtcs, XA_FLAGS_ALLOC);
+
+ list_add_tail(&plane_cfg->link, &config->planes);
+
+ return plane_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_plane);
+
+void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg)
+{
+ xa_destroy(&plane_cfg->possible_crtcs);
+ list_del(&plane_cfg->link);
+ kfree(plane_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_plane);
+
+int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ u32 crtc_idx = 0;
+
+ if (plane_cfg->config != crtc_cfg->config)
+ return -EINVAL;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&plane_cfg->possible_crtcs, &crtc_idx, crtc_cfg,
+ xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_attach_crtc);
+
+void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ xa_erase(&plane_cfg->possible_crtcs, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_detach_crtc);
+
+struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ crtc_cfg = kzalloc(sizeof(*crtc_cfg), GFP_KERNEL);
+ if (!crtc_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ crtc_cfg->config = config;
+ vkms_config_crtc_set_writeback(crtc_cfg, false);
+
+ list_add_tail(&crtc_cfg->link, &config->crtcs);
+
+ return crtc_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_crtc);
+
+void vkms_config_destroy_crtc(struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg);
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg);
+
+ list_del(&crtc_cfg->link);
+ kfree(crtc_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_crtc);
+
+/**
+ * vkms_config_crtc_get_plane() - Return the first attached plane to a CRTC with
+ * the specific type
+ * @config: Configuration containing the CRTC and the plane
+ * @crtc_cfg: Only find planes attached to this CRTC
+ * @type: Plane type to search
+ *
+ * Returns:
+ * The first plane found attached to @crtc_cfg with the type @type.
+ */
+static struct vkms_config_plane *vkms_config_crtc_get_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg,
+ enum drm_plane_type type)
+{
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *possible_crtc;
+ enum drm_plane_type current_type;
+ unsigned long idx = 0;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ current_type = vkms_config_plane_get_type(plane_cfg);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg && current_type == type)
+ return plane_cfg;
+ }
+ }
+
+ return NULL;
+}
+
+struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_PRIMARY);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_primary_plane);
+
+struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_CURSOR);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_cursor_plane);
+
+struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+
+ encoder_cfg = kzalloc(sizeof(*encoder_cfg), GFP_KERNEL);
+ if (!encoder_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ encoder_cfg->config = config;
+ xa_init_flags(&encoder_cfg->possible_crtcs, XA_FLAGS_ALLOC);
+
+ list_add_tail(&encoder_cfg->link, &config->encoders);
+
+ return encoder_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_encoder);
+
+void vkms_config_destroy_encoder(struct vkms_config *config,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg);
+
+ xa_destroy(&encoder_cfg->possible_crtcs);
+ list_del(&encoder_cfg->link);
+ kfree(encoder_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_encoder);
+
+int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ u32 crtc_idx = 0;
+
+ if (encoder_cfg->config != crtc_cfg->config)
+ return -EINVAL;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&encoder_cfg->possible_crtcs, &crtc_idx, crtc_cfg,
+ xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_attach_crtc);
+
+void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ xa_erase(&encoder_cfg->possible_crtcs, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_detach_crtc);
+
+struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ connector_cfg = kzalloc(sizeof(*connector_cfg), GFP_KERNEL);
+ if (!connector_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ connector_cfg->config = config;
+ xa_init_flags(&connector_cfg->possible_encoders, XA_FLAGS_ALLOC);
+
+ list_add_tail(&connector_cfg->link, &config->connectors);
+
+ return connector_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_connector);
+
+void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg)
+{
+ xa_destroy(&connector_cfg->possible_encoders);
+ list_del(&connector_cfg->link);
+ kfree(connector_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_connector);
+
+int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+ u32 encoder_idx = 0;
+
+ if (connector_cfg->config != encoder_cfg->config)
+ return -EINVAL;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg, idx,
+ possible_encoder) {
+ if (possible_encoder == encoder_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&connector_cfg->possible_encoders, &encoder_idx,
+ encoder_cfg, xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_attach_encoder);
+
+void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg, idx,
+ possible_encoder) {
+ if (possible_encoder == encoder_cfg)
+ xa_erase(&connector_cfg->possible_encoders, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_detach_encoder);
diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h
new file mode 100644
index 000000000000..0118e3f99706
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_config.h
@@ -0,0 +1,437 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_CONFIG_H_
+#define _VKMS_CONFIG_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+#include "vkms_drv.h"
+
+/**
+ * struct vkms_config - General configuration for VKMS driver
+ *
+ * @dev_name: Name of the device
+ * @planes: List of planes configured for the device
+ * @crtcs: List of CRTCs configured for the device
+ * @encoders: List of encoders configured for the device
+ * @connectors: List of connectors configured for the device
+ * @dev: Used to store the current VKMS device. Only set when the device is instantiated.
+ */
+struct vkms_config {
+ const char *dev_name;
+ struct list_head planes;
+ struct list_head crtcs;
+ struct list_head encoders;
+ struct list_head connectors;
+ struct vkms_device *dev;
+};
+
+/**
+ * struct vkms_config_plane
+ *
+ * @link: Link to the others planes in vkms_config
+ * @config: The vkms_config this plane belongs to
+ * @type: Type of the plane. The creator of configuration needs to ensures that
+ * at least one primary plane is present.
+ * @possible_crtcs: Array of CRTCs that can be used with this plane
+ * @plane: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS plane during
+ * device creation. This pointer is not managed by the configuration and
+ * must be managed by other means.
+ */
+struct vkms_config_plane {
+ struct list_head link;
+ struct vkms_config *config;
+
+ enum drm_plane_type type;
+ struct xarray possible_crtcs;
+
+ /* Internal usage */
+ struct vkms_plane *plane;
+};
+
+/**
+ * struct vkms_config_crtc
+ *
+ * @link: Link to the others CRTCs in vkms_config
+ * @config: The vkms_config this CRTC belongs to
+ * @writeback: If true, a writeback buffer can be attached to the CRTC
+ * @crtc: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS CRTC during
+ * device creation. This pointer is not managed by the configuration and
+ * must be managed by other means.
+ */
+struct vkms_config_crtc {
+ struct list_head link;
+ struct vkms_config *config;
+
+ bool writeback;
+
+ /* Internal usage */
+ struct vkms_output *crtc;
+};
+
+/**
+ * struct vkms_config_encoder
+ *
+ * @link: Link to the others encoders in vkms_config
+ * @config: The vkms_config this CRTC belongs to
+ * @possible_crtcs: Array of CRTCs that can be used with this encoder
+ * @encoder: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS encoder
+ * during device creation. This pointer is not managed by the
+ * configuration and must be managed by other means.
+ */
+struct vkms_config_encoder {
+ struct list_head link;
+ struct vkms_config *config;
+
+ struct xarray possible_crtcs;
+
+ /* Internal usage */
+ struct drm_encoder *encoder;
+};
+
+/**
+ * struct vkms_config_connector
+ *
+ * @link: Link to the others connector in vkms_config
+ * @config: The vkms_config this connector belongs to
+ * @possible_encoders: Array of encoders that can be used with this connector
+ * @connector: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS connector
+ * during device creation. This pointer is not managed by the
+ * configuration and must be managed by other means.
+ */
+struct vkms_config_connector {
+ struct list_head link;
+ struct vkms_config *config;
+
+ struct xarray possible_encoders;
+
+ /* Internal usage */
+ struct vkms_connector *connector;
+};
+
+/**
+ * vkms_config_for_each_plane - Iterate over the vkms_config planes
+ * @config: &struct vkms_config pointer
+ * @plane_cfg: &struct vkms_config_plane pointer used as cursor
+ */
+#define vkms_config_for_each_plane(config, plane_cfg) \
+ list_for_each_entry((plane_cfg), &(config)->planes, link)
+
+/**
+ * vkms_config_for_each_crtc - Iterate over the vkms_config CRTCs
+ * @config: &struct vkms_config pointer
+ * @crtc_cfg: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_for_each_crtc(config, crtc_cfg) \
+ list_for_each_entry((crtc_cfg), &(config)->crtcs, link)
+
+/**
+ * vkms_config_for_each_encoder - Iterate over the vkms_config encoders
+ * @config: &struct vkms_config pointer
+ * @encoder_cfg: &struct vkms_config_encoder pointer used as cursor
+ */
+#define vkms_config_for_each_encoder(config, encoder_cfg) \
+ list_for_each_entry((encoder_cfg), &(config)->encoders, link)
+
+/**
+ * vkms_config_for_each_connector - Iterate over the vkms_config connectors
+ * @config: &struct vkms_config pointer
+ * @connector_cfg: &struct vkms_config_connector pointer used as cursor
+ */
+#define vkms_config_for_each_connector(config, connector_cfg) \
+ list_for_each_entry((connector_cfg), &(config)->connectors, link)
+
+/**
+ * vkms_config_plane_for_each_possible_crtc - Iterate over the vkms_config_plane
+ * possible CRTCs
+ * @plane_cfg: &struct vkms_config_plane pointer
+ * @idx: Index of the cursor
+ * @possible_crtc: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) \
+ xa_for_each(&(plane_cfg)->possible_crtcs, idx, (possible_crtc))
+
+/**
+ * vkms_config_encoder_for_each_possible_crtc - Iterate over the
+ * vkms_config_encoder possible CRTCs
+ * @encoder_cfg: &struct vkms_config_encoder pointer
+ * @idx: Index of the cursor
+ * @possible_crtc: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) \
+ xa_for_each(&(encoder_cfg)->possible_crtcs, idx, (possible_crtc))
+
+/**
+ * vkms_config_connector_for_each_possible_encoder - Iterate over the
+ * vkms_config_connector possible encoders
+ * @connector_cfg: &struct vkms_config_connector pointer
+ * @idx: Index of the cursor
+ * @possible_encoder: &struct vkms_config_encoder pointer used as cursor
+ */
+#define vkms_config_connector_for_each_possible_encoder(connector_cfg, idx, possible_encoder) \
+ xa_for_each(&(connector_cfg)->possible_encoders, idx, (possible_encoder))
+
+/**
+ * vkms_config_create() - Create a new VKMS configuration
+ * @dev_name: Name of the device
+ *
+ * Returns:
+ * The new vkms_config or an error. Call vkms_config_destroy() to free the
+ * returned configuration.
+ */
+struct vkms_config *vkms_config_create(const char *dev_name);
+
+/**
+ * vkms_config_default_create() - Create the configuration for the default device
+ * @enable_cursor: Create or not a cursor plane
+ * @enable_writeback: Create or not a writeback connector
+ * @enable_overlay: Create or not overlay planes
+ *
+ * Returns:
+ * The default vkms_config or an error. Call vkms_config_destroy() to free the
+ * returned configuration.
+ */
+struct vkms_config *vkms_config_default_create(bool enable_cursor,
+ bool enable_writeback,
+ bool enable_overlay);
+
+/**
+ * vkms_config_destroy() - Free a VKMS configuration
+ * @config: vkms_config to free
+ */
+void vkms_config_destroy(struct vkms_config *config);
+
+/**
+ * vkms_config_get_device_name() - Return the name of the device
+ * @config: Configuration to get the device name from
+ *
+ * Returns:
+ * The device name. Only valid while @config is valid.
+ */
+static inline const char *
+vkms_config_get_device_name(struct vkms_config *config)
+{
+ return config->dev_name;
+}
+
+/**
+ * vkms_config_get_num_crtcs() - Return the number of CRTCs in the configuration
+ * @config: Configuration to get the number of CRTCs from
+ */
+static inline size_t vkms_config_get_num_crtcs(struct vkms_config *config)
+{
+ return list_count_nodes(&config->crtcs);
+}
+
+/**
+ * vkms_config_is_valid() - Validate a configuration
+ * @config: Configuration to validate
+ *
+ * Returns:
+ * Whether the configuration is valid or not.
+ * For example, a configuration without primary planes is not valid.
+ */
+bool vkms_config_is_valid(const struct vkms_config *config);
+
+/**
+ * vkms_config_register_debugfs() - Register a debugfs file to show the device's
+ * configuration
+ * @vkms_device: Device to register
+ */
+void vkms_config_register_debugfs(struct vkms_device *vkms_device);
+
+/**
+ * vkms_config_create_plane() - Add a new plane configuration
+ * @config: Configuration to add the plane to
+ *
+ * Returns:
+ * The new plane configuration or an error. Call vkms_config_destroy_plane() to
+ * free the returned plane configuration.
+ */
+struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_plane() - Remove and free a plane configuration
+ * @plane_cfg: Plane configuration to destroy
+ */
+void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg);
+
+/**
+ * vkms_config_plane_type() - Return the plane type
+ * @plane_cfg: Plane to get the type from
+ */
+static inline enum drm_plane_type
+vkms_config_plane_get_type(struct vkms_config_plane *plane_cfg)
+{
+ return plane_cfg->type;
+}
+
+/**
+ * vkms_config_plane_set_type() - Set the plane type
+ * @plane_cfg: Plane to set the type to
+ * @type: New plane type
+ */
+static inline void
+vkms_config_plane_set_type(struct vkms_config_plane *plane_cfg,
+ enum drm_plane_type type)
+{
+ plane_cfg->type = type;
+}
+
+/**
+ * vkms_config_plane_attach_crtc - Attach a plane to a CRTC
+ * @plane_cfg: Plane to attach
+ * @crtc_cfg: CRTC to attach @plane_cfg to
+ */
+int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_plane_detach_crtc - Detach a plane from a CRTC
+ * @plane_cfg: Plane to detach
+ * @crtc_cfg: CRTC to detach @plane_cfg from
+ */
+void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_crtc() - Add a new CRTC configuration
+ * @config: Configuration to add the CRTC to
+ *
+ * Returns:
+ * The new CRTC configuration or an error. Call vkms_config_destroy_crtc() to
+ * free the returned CRTC configuration.
+ */
+struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_crtc() - Remove and free a CRTC configuration
+ * @config: Configuration to remove the CRTC from
+ * @crtc_cfg: CRTC configuration to destroy
+ */
+void vkms_config_destroy_crtc(struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_crtc_get_writeback() - If a writeback connector will be created
+ * @crtc_cfg: CRTC with or without a writeback connector
+ */
+static inline bool
+vkms_config_crtc_get_writeback(struct vkms_config_crtc *crtc_cfg)
+{
+ return crtc_cfg->writeback;
+}
+
+/**
+ * vkms_config_crtc_set_writeback() - If a writeback connector will be created
+ * @crtc_cfg: Target CRTC
+ * @writeback: Enable or disable the writeback connector
+ */
+static inline void
+vkms_config_crtc_set_writeback(struct vkms_config_crtc *crtc_cfg,
+ bool writeback)
+{
+ crtc_cfg->writeback = writeback;
+}
+
+/**
+ * vkms_config_crtc_primary_plane() - Return the primary plane for a CRTC
+ * @config: Configuration containing the CRTC
+ * @crtc_config: Target CRTC
+ *
+ * Note that, if multiple primary planes are found, the first one is returned.
+ * In this case, the configuration will be invalid. See vkms_config_is_valid().
+ *
+ * Returns:
+ * The primary plane or NULL if none is assigned yet.
+ */
+struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_crtc_cursor_plane() - Return the cursor plane for a CRTC
+ * @config: Configuration containing the CRTC
+ * @crtc_config: Target CRTC
+ *
+ * Note that, if multiple cursor planes are found, the first one is returned.
+ * In this case, the configuration will be invalid. See vkms_config_is_valid().
+ *
+ * Returns:
+ * The cursor plane or NULL if none is assigned yet.
+ */
+struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_encoder() - Add a new encoder configuration
+ * @config: Configuration to add the encoder to
+ *
+ * Returns:
+ * The new encoder configuration or an error. Call vkms_config_destroy_encoder()
+ * to free the returned encoder configuration.
+ */
+struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_encoder() - Remove and free a encoder configuration
+ * @config: Configuration to remove the encoder from
+ * @encoder_cfg: Encoder configuration to destroy
+ */
+void vkms_config_destroy_encoder(struct vkms_config *config,
+ struct vkms_config_encoder *encoder_cfg);
+
+/**
+ * vkms_config_encoder_attach_crtc - Attach a encoder to a CRTC
+ * @encoder_cfg: Encoder to attach
+ * @crtc_cfg: CRTC to attach @encoder_cfg to
+ */
+int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_encoder_detach_crtc - Detach a encoder from a CRTC
+ * @encoder_cfg: Encoder to detach
+ * @crtc_cfg: CRTC to detach @encoder_cfg from
+ */
+void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_connector() - Add a new connector configuration
+ * @config: Configuration to add the connector to
+ *
+ * Returns:
+ * The new connector configuration or an error. Call
+ * vkms_config_destroy_connector() to free the returned connector configuration.
+ */
+struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_connector() - Remove and free a connector configuration
+ * @connector_cfg: Connector configuration to destroy
+ */
+void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg);
+
+/**
+ * vkms_config_connector_attach_encoder - Attach a connector to an encoder
+ * @connector_cfg: Connector to attach
+ * @encoder_cfg: Encoder to attach @connector_cfg to
+ */
+int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg);
+
+/**
+ * vkms_config_connector_detach_encoder - Detach a connector from an encoder
+ * @connector_cfg: Connector to detach
+ * @encoder_cfg: Encoder to detach @connector_cfg from
+ */
+void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg);
+
+#endif /* _VKMS_CONFIG_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_connector.c b/drivers/gpu/drm/vkms/vkms_connector.c
new file mode 100644
index 000000000000..48b10cba322a
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_connector.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+
+#include "vkms_connector.h"
+
+static const struct drm_connector_funcs vkms_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int vkms_conn_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ /* Use the default modes list from DRM */
+ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+
+ return count;
+}
+
+static struct drm_encoder *vkms_conn_best_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
+}
+
+static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
+ .get_modes = vkms_conn_get_modes,
+ .best_encoder = vkms_conn_best_encoder,
+};
+
+struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+ struct vkms_connector *connector;
+ int ret;
+
+ connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drmm_connector_init(dev, &connector->base, &vkms_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_helper_add(&connector->base, &vkms_conn_helper_funcs);
+
+ return connector;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_connector.h b/drivers/gpu/drm/vkms/vkms_connector.h
new file mode 100644
index 000000000000..c9149c1b7af0
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_connector.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_CONNECTOR_H_
+#define _VKMS_CONNECTOR_H_
+
+#include "vkms_drv.h"
+
+/**
+ * struct vkms_connector - VKMS custom type wrapping around the DRM connector
+ *
+ * @drm: Base DRM connector
+ */
+struct vkms_connector {
+ struct drm_connector base;
+};
+
+/**
+ * vkms_connector_init() - Initialize a connector
+ * @vkmsdev: VKMS device containing the connector
+ *
+ * Returns:
+ * The connector or an error on failure.
+ */
+struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev);
+
+#endif /* _VKMS_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 12034ec12029..8c9898b9055d 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -194,7 +194,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
i++;
}
- vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
+ vkms_state->active_planes = kcalloc(i, sizeof(*vkms_state->active_planes), GFP_KERNEL);
if (!vkms_state->active_planes)
return -ENOMEM;
vkms_state->num_active_planes = i;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index b6de91134a22..a24d1655f7b8 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -27,11 +27,9 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_vblank.h>
+#include "vkms_config.h"
#include "vkms_drv.h"
-#include <drm/drm_print.h>
-#include <drm/drm_debugfs.h>
-
#define DRIVER_NAME "vkms"
#define DRIVER_DESC "Virtual Kernel Mode Setting"
#define DRIVER_MAJOR 1
@@ -81,23 +79,6 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_cleanup_planes(dev, old_state);
}
-static int vkms_config_show(struct seq_file *m, void *data)
-{
- struct drm_debugfs_entry *entry = m->private;
- struct drm_device *dev = entry->dev;
- struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
-
- seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback);
- seq_printf(m, "cursor=%d\n", vkmsdev->config->cursor);
- seq_printf(m, "overlay=%d\n", vkmsdev->config->overlay);
-
- return 0;
-}
-
-static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
- { "vkms_config", vkms_config_show, 0 },
-};
-
static const struct drm_driver vkms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
.fops = &vkms_driver_fops,
@@ -170,8 +151,10 @@ static int vkms_create(struct vkms_config *config)
int ret;
struct platform_device *pdev;
struct vkms_device *vkms_device;
+ const char *dev_name;
- pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
+ dev_name = vkms_config_get_device_name(config);
+ pdev = platform_device_register_simple(dev_name, -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -198,7 +181,8 @@ static int vkms_create(struct vkms_config *config)
goto out_devres;
}
- ret = drm_vblank_init(&vkms_device->drm, 1);
+ ret = drm_vblank_init(&vkms_device->drm,
+ vkms_config_get_num_crtcs(config));
if (ret) {
DRM_ERROR("Failed to vblank\n");
goto out_devres;
@@ -208,8 +192,7 @@ static int vkms_create(struct vkms_config *config)
if (ret)
goto out_devres;
- drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
- ARRAY_SIZE(vkms_config_debugfs_list));
+ vkms_config_register_debugfs(vkms_device);
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
@@ -231,17 +214,13 @@ static int __init vkms_init(void)
int ret;
struct vkms_config *config;
- config = kmalloc(sizeof(*config), GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- config->cursor = enable_cursor;
- config->writeback = enable_writeback;
- config->overlay = enable_overlay;
+ config = vkms_config_default_create(enable_cursor, enable_writeback, enable_overlay);
+ if (IS_ERR(config))
+ return PTR_ERR(config);
ret = vkms_create(config);
if (ret) {
- kfree(config);
+ vkms_config_destroy(config);
return ret;
}
@@ -275,7 +254,7 @@ static void __exit vkms_exit(void)
return;
vkms_destroy(default_config);
- kfree(default_config);
+ vkms_config_destroy(default_config);
}
module_init(vkms_init);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index abbb652be2b5..a74a7fc3a056 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -12,6 +12,8 @@
#include <drm/drm_encoder.h>
#include <drm/drm_writeback.h>
+#define DEFAULT_DEVICE_NAME "vkms"
+
#define XRES_MIN 10
#define YRES_MIN 10
@@ -189,20 +191,7 @@ struct vkms_output {
spinlock_t composer_lock;
};
-/**
- * struct vkms_config - General configuration for VKMS driver
- *
- * @writeback: If true, a writeback buffer can be attached to the CRTC
- * @cursor: If true, a cursor plane is created in the VKMS device
- * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device
- * @dev: Used to store the current VKMS device. Only set when the device is instantiated.
- */
-struct vkms_config {
- bool writeback;
- bool cursor;
- bool overlay;
- struct vkms_device *dev;
-};
+struct vkms_config;
/**
* struct vkms_device - Description of a VKMS device
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 22f0d678af3a..8d7ca0cdd79f 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -1,121 +1,111 @@
// SPDX-License-Identifier: GPL-2.0+
+#include "vkms_config.h"
+#include "vkms_connector.h"
#include "vkms_drv.h"
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_edid.h>
#include <drm/drm_managed.h>
-#include <drm/drm_probe_helper.h>
-
-static const struct drm_connector_funcs vkms_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int vkms_conn_get_modes(struct drm_connector *connector)
-{
- int count;
-
- /* Use the default modes list from DRM */
- count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
- drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
-
- return count;
-}
-
-static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
- .get_modes = vkms_conn_get_modes,
-};
int vkms_output_init(struct vkms_device *vkmsdev)
{
struct drm_device *dev = &vkmsdev->drm;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct vkms_output *output;
- struct vkms_plane *primary, *overlay, *cursor = NULL;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
int ret;
int writeback;
- unsigned int n;
-
- /*
- * Initialize used plane. One primary plane is required to perform the composition.
- *
- * The overlay and cursor planes are not mandatory, but can be used to perform complex
- * composition.
- */
- primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
- if (IS_ERR(primary))
- return PTR_ERR(primary);
-
- if (vkmsdev->config->cursor) {
- cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
- if (IS_ERR(cursor))
- return PTR_ERR(cursor);
- }
- output = vkms_crtc_init(dev, &primary->base,
- cursor ? &cursor->base : NULL);
- if (IS_ERR(output)) {
- DRM_ERROR("Failed to allocate CRTC\n");
- return PTR_ERR(output);
- }
+ if (!vkms_config_is_valid(vkmsdev->config))
+ return -EINVAL;
- if (vkmsdev->config->overlay) {
- for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
- overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY);
- if (IS_ERR(overlay)) {
- DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
- return PTR_ERR(overlay);
- }
- overlay->base.possible_crtcs = drm_crtc_mask(&output->crtc);
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ enum drm_plane_type type;
+
+ type = vkms_config_plane_get_type(plane_cfg);
+
+ plane_cfg->plane = vkms_plane_init(vkmsdev, type);
+ if (IS_ERR(plane_cfg->plane)) {
+ DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
+ return PTR_ERR(plane_cfg->plane);
}
}
- connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
- if (!connector) {
- DRM_ERROR("Failed to allocate connector\n");
- return -ENOMEM;
- }
+ vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) {
+ struct vkms_config_plane *primary, *cursor;
- ret = drmm_connector_init(dev, connector, &vkms_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL, NULL);
- if (ret) {
- DRM_ERROR("Failed to init connector\n");
- return ret;
- }
+ primary = vkms_config_crtc_primary_plane(vkmsdev->config, crtc_cfg);
+ cursor = vkms_config_crtc_cursor_plane(vkmsdev->config, crtc_cfg);
- drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
+ crtc_cfg->crtc = vkms_crtc_init(dev, &primary->plane->base,
+ cursor ? &cursor->plane->base : NULL);
+ if (IS_ERR(crtc_cfg->crtc)) {
+ DRM_ERROR("Failed to allocate CRTC\n");
+ return PTR_ERR(crtc_cfg->crtc);
+ }
- encoder = drmm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL);
- if (!encoder) {
- DRM_ERROR("Failed to allocate encoder\n");
- return -ENOMEM;
+ /* Initialize the writeback component */
+ if (vkms_config_crtc_get_writeback(crtc_cfg)) {
+ writeback = vkms_enable_writeback_connector(vkmsdev, crtc_cfg->crtc);
+ if (writeback)
+ DRM_ERROR("Failed to init writeback connector\n");
+ }
}
- ret = drmm_encoder_init(dev, encoder, NULL,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- if (ret) {
- DRM_ERROR("Failed to init encoder\n");
- return ret;
+
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ plane_cfg->plane->base.possible_crtcs |=
+ drm_crtc_mask(&possible_crtc->crtc->crtc);
+ }
}
- encoder->possible_crtcs = drm_crtc_mask(&output->crtc);
- /* Attach the encoder and the connector */
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret) {
- DRM_ERROR("Failed to attach connector to encoder\n");
- return ret;
+ vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ encoder_cfg->encoder = drmm_kzalloc(dev, sizeof(*encoder_cfg->encoder), GFP_KERNEL);
+ if (!encoder_cfg->encoder) {
+ DRM_ERROR("Failed to allocate encoder\n");
+ return -ENOMEM;
+ }
+ ret = drmm_encoder_init(dev, encoder_cfg->encoder, NULL,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init encoder\n");
+ return ret;
+ }
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ encoder_cfg->encoder->possible_crtcs |=
+ drm_crtc_mask(&possible_crtc->crtc->crtc);
+ }
}
- /* Initialize the writeback component */
- if (vkmsdev->config->writeback) {
- writeback = vkms_enable_writeback_connector(vkmsdev, output);
- if (writeback)
- DRM_ERROR("Failed to init writeback connector\n");
+ vkms_config_for_each_connector(vkmsdev->config, connector_cfg) {
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+
+ connector_cfg->connector = vkms_connector_init(vkmsdev);
+ if (IS_ERR(connector_cfg->connector)) {
+ DRM_ERROR("Failed to init connector\n");
+ return PTR_ERR(connector_cfg->connector);
+ }
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg,
+ idx,
+ possible_encoder) {
+ ret = drm_connector_attach_encoder(&connector_cfg->connector->base,
+ possible_encoder->encoder);
+ if (ret) {
+ DRM_ERROR("Failed to attach connector to encoder\n");
+ return ret;
+ }
+ }
}
drm_mode_config_reset(dev);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 6c3c2922ae8b..aab646b91ca9 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on (X86 && HYPERVISOR_GUEST) || ARM64
select DRM_CLIENT_SELECTION
select DRM_TTM
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 46a4ab688a7f..b168fd7fe9b3 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -10,6 +10,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
- vmwgfx_gem.o vmwgfx_vkms.o
+ vmwgfx_gem.o vmwgfx_vkms.o vmwgfx_cursor_plane.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 64bd7d74854e..fa5841fda659 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -429,7 +429,7 @@ static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
void *ptr = NULL;
int ret;
- if (bo->tbo.base.import_attach) {
+ if (drm_gem_is_imported(&bo->tbo.base)) {
ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
if (ret) {
drm_dbg_driver(&vmw->drm,
@@ -447,7 +447,7 @@ out:
static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
{
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_vunmap(bo->tbo.base.dma_buf, map);
else
vmw_bo_unmap(bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 9b5b8c1f063b..f031a312c783 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -36,8 +36,7 @@ static void vmw_bo_release(struct vmw_bo *vbo)
{
struct vmw_resource *res;
- WARN_ON(vbo->tbo.base.funcs &&
- kref_read(&vbo->tbo.base.refcount) != 0);
+ WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
xa_destroy(&vbo->detached_resources);
@@ -51,11 +50,13 @@ static void vmw_bo_release(struct vmw_bo *vbo)
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void)vmw_resource_reserve(res, false, true);
vmw_resource_mob_detach(res);
+ if (res->dirty)
+ res->func->dirty_free(res);
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
res->guest_memory_bo = NULL;
res->guest_memory_offset = 0;
- vmw_resource_unreserve(res, false, false, false, NULL,
+ vmw_resource_unreserve(res, true, false, false, NULL,
0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -73,9 +74,9 @@ static void vmw_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
- WARN_ON(vbo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_release(vbo);
+ WARN_ON(vbo->dirty);
kfree(vbo);
}
@@ -467,6 +468,7 @@ int vmw_bo_create(struct vmw_private *vmw,
if (unlikely(ret != 0))
goto out_error;
+ (*p_bo)->tbo.base.funcs = &vmw_gem_object_funcs;
return ret;
out_error:
*p_bo = NULL;
@@ -848,9 +850,9 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
vmw_bo_placement_set(bo, domain, domain);
}
-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
{
- xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
+ return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL));
}
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
@@ -887,3 +889,9 @@ out:
surf = vmw_res_to_srf(res);
return surf;
}
+
+s32 vmw_bo_mobid(struct vmw_bo *vbo)
+{
+ WARN_ON(vbo->tbo.resource->mem_type != VMW_PL_MOB);
+ return (s32)vbo->tbo.resource->start;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 11e330c7c7f5..cf84a163bfcb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -141,7 +141,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);
@@ -204,12 +204,12 @@ static inline void vmw_bo_unreference(struct vmw_bo **buf)
*buf = NULL;
if (tmp_buf)
- ttm_bo_put(&tmp_buf->tbo);
+ drm_gem_object_put(&tmp_buf->tbo.base);
}
static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
{
- ttm_bo_get(&buf->tbo);
+ drm_gem_object_get(&buf->tbo.base);
return buf;
}
@@ -233,4 +233,6 @@ static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
return container_of((gobj), struct vmw_bo, tbo.base);
}
+s32 vmw_bo_mobid(struct vmw_bo *vbo);
+
#endif // VMWGFX_BO_H
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index a7c07692262b..98331c4c0335 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
+ ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
goto out_done;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
new file mode 100644
index 000000000000..718832b08d96
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
@@ -0,0 +1,844 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+#include "vmwgfx_cursor_plane.h"
+
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmw_surface_cache.h"
+
+#include "drm/drm_atomic.h"
+#include "drm/drm_atomic_helper.h"
+#include "drm/drm_plane.h"
+#include <asm/page.h>
+
+#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
+#define VMW_CURSOR_SNOOP_WIDTH 64
+#define VMW_CURSOR_SNOOP_HEIGHT 64
+
+struct vmw_svga_fifo_cmd_define_cursor {
+ u32 cmd;
+ SVGAFifoCmdDefineAlphaCursor cursor;
+};
+
+/**
+ * vmw_send_define_cursor_cmd - queue a define cursor command
+ * @dev_priv: the private driver struct
+ * @image: buffer which holds the cursor image
+ * @width: width of the mouse cursor image
+ * @height: height of the mouse cursor image
+ * @hotspotX: the horizontal position of mouse hotspot
+ * @hotspotY: the vertical position of mouse hotspot
+ */
+static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct vmw_svga_fifo_cmd_define_cursor *cmd;
+ const u32 image_size = width * height * sizeof(*image);
+ const u32 cmd_size = sizeof(*cmd) + image_size;
+
+ /*
+ * Try to reserve fifocmd space and swallow any failures;
+ * such reservations cannot be left unconsumed for long
+ * under the risk of clogging other fifocmd users, so
+ * we treat reservations separtely from the way we treat
+ * other fallible KMS-atomic resources at prepare_fb
+ */
+ cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
+
+ if (unlikely(!cmd))
+ return;
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ memcpy(&cmd[1], image, image_size);
+
+ cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+ cmd->cursor.id = 0;
+ cmd->cursor.width = width;
+ cmd->cursor.height = height;
+ cmd->cursor.hotspotX = hotspotX;
+ cmd->cursor.hotspotY = hotspotY;
+
+ vmw_cmd_commit_flush(dev_priv, cmd_size);
+}
+
+static void
+vmw_cursor_plane_update_legacy(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+ s32 hotspot_x = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ s32 hotspot_y = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+
+ if (WARN_ON(!surface || !surface->snooper.image))
+ return;
+
+ if (vps->cursor.legacy.id != surface->snooper.id) {
+ vmw_send_define_cursor_cmd(vmw, surface->snooper.image,
+ vps->base.crtc_w, vps->base.crtc_h,
+ hotspot_x, hotspot_y);
+ vps->cursor.legacy.id = surface->snooper.id;
+ }
+}
+
+static enum vmw_cursor_update_type
+vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+
+ if (surface && surface->snooper.image)
+ return VMW_CURSOR_UPDATE_LEGACY;
+
+ if (vmw->has_mob) {
+ if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0)
+ return VMW_CURSOR_UPDATE_MOB;
+ }
+
+ return VMW_CURSOR_UPDATE_NONE;
+}
+
+static void vmw_cursor_update_mob(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ SVGAGBCursorHeader *header;
+ SVGAGBAlphaCursorHeader *alpha_header;
+ struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
+ u32 *image = vmw_bo_map_and_cache(bo);
+ const u32 image_size = vps->base.crtc_w * vps->base.crtc_h * sizeof(*image);
+
+ header = vmw_bo_map_and_cache(vps->cursor.mob);
+ alpha_header = &header->header.alphaHeader;
+
+ memset(header, 0, sizeof(*header));
+
+ header->type = SVGA_ALPHA_CURSOR;
+ header->sizeInBytes = image_size;
+
+ alpha_header->hotspotX = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ alpha_header->hotspotY = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+ alpha_header->width = vps->base.crtc_w;
+ alpha_header->height = vps->base.crtc_h;
+
+ memcpy(header + 1, image, image_size);
+ vmw_write(vmw, SVGA_REG_CURSOR_MOBID, vmw_bo_mobid(vps->cursor.mob));
+
+ vmw_bo_unmap(bo);
+ vmw_bo_unmap(vps->cursor.mob);
+}
+
+static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,
+ u32 w, u32 h)
+{
+ switch (update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ case VMW_CURSOR_UPDATE_NONE:
+ return 0;
+ case VMW_CURSOR_UPDATE_MOB:
+ return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
+ }
+ return 0;
+}
+
+static void vmw_cursor_mob_destroy(struct vmw_bo **vbo)
+{
+ if (!(*vbo))
+ return;
+
+ ttm_bo_unpin(&(*vbo)->tbo);
+ vmw_bo_unreference(vbo);
+}
+
+/**
+ * vmw_cursor_mob_unmap - Unmaps the cursor mobs.
+ *
+ * @vps: state of the cursor plane
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_unmap(struct vmw_plane_state *vps)
+{
+ int ret = 0;
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo || !vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
+ if (likely(ret == 0)) {
+ vmw_bo_unmap(vbo);
+ ttm_bo_unreserve(&vbo->tbo);
+ }
+
+ return ret;
+}
+
+static void vmw_cursor_mob_put(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ u32 i;
+
+ if (!vps->cursor.mob)
+ return;
+
+ vmw_cursor_mob_unmap(vps);
+
+ /* Look for a free slot to return this mob to the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (!vcp->cursor_mobs[i]) {
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Cache is full: See if this mob is bigger than an existing mob. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i]->tbo.base.size <
+ vps->cursor.mob->tbo.base.size) {
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Destroy it if it's not worth caching. */
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+}
+
+static int vmw_cursor_mob_get(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ u32 i;
+ u32 cursor_max_dim, mob_max_size;
+ struct vmw_fence_obj *fence = NULL;
+ int ret;
+
+ if (!dev_priv->has_mob ||
+ (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
+ return -EINVAL;
+
+ mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
+ cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
+
+ if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
+ vps->base.crtc_h > cursor_max_dim)
+ return -EINVAL;
+
+ if (vps->cursor.mob) {
+ if (vps->cursor.mob->tbo.base.size >= size)
+ return 0;
+ vmw_cursor_mob_put(vcp, vps);
+ }
+
+ /* Look for an unused mob in the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i] &&
+ vcp->cursor_mobs[i]->tbo.base.size >= size) {
+ vps->cursor.mob = vcp->cursor_mobs[i];
+ vcp->cursor_mobs[i] = NULL;
+ return 0;
+ }
+ }
+ /* Create a new mob if we can't find an existing one. */
+ ret = vmw_bo_create_and_populate(dev_priv, size, VMW_BO_DOMAIN_MOB,
+ &vps->cursor.mob);
+
+ if (ret != 0)
+ return ret;
+
+ /* Fence the mob creation so we are guarateed to have the mob */
+ ret = ttm_bo_reserve(&vps->cursor.mob->tbo, false, false, NULL);
+ if (ret != 0)
+ goto teardown;
+
+ ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (ret != 0) {
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+ goto teardown;
+ }
+
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
+
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+
+ return 0;
+
+teardown:
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+ return ret;
+}
+
+static void vmw_cursor_update_position(struct vmw_private *dev_priv,
+ bool show, int x, int y)
+{
+ const u32 svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
+ : SVGA_CURSOR_ON_HIDE;
+ u32 count;
+
+ spin_lock(&dev_priv->cursor_lock);
+ if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
+ } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+ count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
+ } else {
+ vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
+ }
+ spin_unlock(&dev_priv->cursor_lock);
+}
+
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ struct ttm_object_file *tfile,
+ struct ttm_buffer_object *bo,
+ SVGA3dCmdHeader *header)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ SVGA3dCopyBox *box;
+ u32 box_count;
+ void *virtual;
+ bool is_iomem;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int i, ret;
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+
+ /* No snooper installed, nothing to copy */
+ if (!srf->snooper.image)
+ return;
+
+ if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
+ DRM_ERROR("face and mipmap for cursors should never != 0\n");
+ return;
+ }
+
+ if (cmd->header.size < 64) {
+ DRM_ERROR("at least one full copy box must be given\n");
+ return;
+ }
+
+ box = (SVGA3dCopyBox *)&cmd[1];
+ box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
+ sizeof(SVGA3dCopyBox);
+
+ if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ box->x != 0 || box->y != 0 || box->z != 0 ||
+ box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+ box->d != 1 || box_count != 1 ||
+ box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
+ /* TODO handle none page aligned offsets */
+ /* TODO handle more dst & src != 0 */
+ /* TODO handle more then one copy */
+ DRM_ERROR("Can't snoop dma request for cursor!\n");
+ DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
+ box->srcx, box->srcy, box->srcz,
+ box->x, box->y, box->z,
+ box->w, box->h, box->d, box_count,
+ cmd->dma.guest.ptr.offset);
+ return;
+ }
+
+ kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
+ kmap_num = (VMW_CURSOR_SNOOP_HEIGHT * image_pitch) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(bo, true, false, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
+
+ if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
+ memcpy(srf->snooper.image, virtual,
+ VMW_CURSOR_SNOOP_HEIGHT * image_pitch);
+ } else {
+ /* Image is unsigned pointer. */
+ for (i = 0; i < box->h; i++)
+ memcpy(srf->snooper.image + i * image_pitch,
+ virtual + i * cmd->dma.guest.pitch,
+ box->w * desc->pitchBytesPerBlock);
+ }
+ srf->snooper.id++;
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(bo);
+}
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ u32 i;
+
+ vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+
+ drm_plane_cleanup(plane);
+}
+
+/**
+ * vmw_cursor_mob_map - Maps the cursor mobs.
+ *
+ * @vps: plane_state
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_map(struct vmw_plane_state *vps)
+{
+ int ret;
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo)
+ return -EINVAL;
+
+ if (vbo->tbo.base.size < size)
+ return -EINVAL;
+
+ if (vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, false, false, NULL);
+ if (unlikely(ret != 0))
+ return -ENOMEM;
+
+ vmw_bo_map_and_cache(vbo);
+
+ ttm_bo_unreserve(&vbo->tbo);
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_cleanup_fb - Unpins the plane surface
+ *
+ * @plane: cursor plane
+ * @old_state: contains the state to clean up
+ *
+ * Unmaps all cursor bo mappings and unpins the cursor surface
+ *
+ * Returns 0 on success
+ */
+void
+vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+
+ if (!vmw_user_object_is_null(&vps->uo))
+ vmw_user_object_unmap(&vps->uo);
+
+ vmw_cursor_mob_unmap(vps);
+ vmw_cursor_mob_put(vcp, vps);
+
+ vmw_du_plane_unpin_surf(vps);
+ vmw_user_object_unref(&vps->uo);
+}
+
+static bool
+vmw_cursor_buffer_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ struct vmw_bo *new_bo = vmw_user_object_buffer(&new_vps->uo);
+ struct vmw_bo *old_bo = vmw_user_object_buffer(&old_vps->uo);
+ struct vmw_surface *surf;
+ bool dirty = false;
+ int ret;
+
+ if (new_bo != old_bo)
+ return true;
+
+ if (new_bo) {
+ if (!old_bo) {
+ return true;
+ } else if (new_bo->dirty) {
+ vmw_bo_dirty_scan(new_bo);
+ dirty = vmw_bo_is_dirty(new_bo);
+ if (dirty) {
+ surf = vmw_user_object_surface(&new_vps->uo);
+ if (surf)
+ vmw_bo_dirty_transfer_to_res(&surf->res);
+ else
+ vmw_bo_dirty_clear(new_bo);
+ }
+ return dirty;
+ } else if (new_bo != old_bo) {
+ /*
+ * Currently unused because the top exits right away.
+ * In most cases buffer being different will mean
+ * that the contents is different. For the few percent
+ * of cases where that's not true the cost of doing
+ * the memcmp on all other seems to outweight the
+ * benefits. Leave the conditional to be able to
+ * trivially validate it by removing the initial
+ * if (new_bo != old_bo) at the start.
+ */
+ void *old_image;
+ void *new_image;
+ bool changed = false;
+ struct ww_acquire_ctx ctx;
+ const u32 size = new_vps->base.crtc_w *
+ new_vps->base.crtc_h * sizeof(u32);
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ttm_bo_unreserve(&old_bo->tbo);
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ old_image = vmw_bo_map_and_cache(old_bo);
+ new_image = vmw_bo_map_and_cache(new_bo);
+
+ if (old_image && new_image && old_image != new_image)
+ changed = memcmp(old_image, new_image, size) !=
+ 0;
+
+ ttm_bo_unreserve(&new_bo->tbo);
+ ttm_bo_unreserve(&old_bo->tbo);
+
+ ww_acquire_fini(&ctx);
+
+ return changed;
+ }
+ return false;
+ }
+
+ return false;
+}
+
+static bool
+vmw_cursor_plane_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
+ old_vps->base.crtc_h != new_vps->base.crtc_h)
+ return true;
+
+ if (old_vps->base.hotspot_x != new_vps->base.hotspot_x ||
+ old_vps->base.hotspot_y != new_vps->base.hotspot_y)
+ return true;
+
+ if (old_vps->cursor.legacy.hotspot_x !=
+ new_vps->cursor.legacy.hotspot_x ||
+ old_vps->cursor.legacy.hotspot_y !=
+ new_vps->cursor.legacy.hotspot_y)
+ return true;
+
+ if (old_vps->base.fb != new_vps->base.fb)
+ return true;
+
+ return false;
+}
+
+/**
+ * vmw_cursor_plane_prepare_fb - Readies the cursor by referencing it
+ *
+ * @plane: display plane
+ * @new_state: info on the new plane state, including the FB
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(plane->state);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ struct vmw_bo *bo = NULL;
+ struct vmw_surface *surface;
+ int ret = 0;
+
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ vmw_user_object_unmap(&vps->uo);
+ vmw_user_object_unref(&vps->uo);
+ }
+
+ if (fb) {
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
+ vps->uo.surface = NULL;
+ } else {
+ memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
+ }
+ vmw_user_object_ref(&vps->uo);
+ }
+
+ vps->cursor.update_type = vmw_cursor_update_type(vmw, vps);
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || vps->cursor.legacy.id == surface->snooper.id)
+ vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE;
+ break;
+ case VMW_CURSOR_UPDATE_MOB: {
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo) {
+ struct ttm_operation_ctx ctx = { false, false };
+
+ ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
+ if (ret != 0)
+ return -ENOMEM;
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret != 0)
+ return -ENOMEM;
+
+ /*
+ * vmw_bo_pin_reserved also validates, so to skip
+ * the extra validation use ttm_bo_pin directly
+ */
+ if (!bo->tbo.pin_count)
+ ttm_bo_pin(&bo->tbo);
+
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ const u32 size = new_state->crtc_w *
+ new_state->crtc_h *
+ sizeof(u32);
+
+ (void)vmw_bo_map_and_cache_size(bo, size);
+ } else {
+ vmw_bo_map_and_cache(bo);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+ }
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ if (!vmw_cursor_plane_changed(vps, old_vps) &&
+ !vmw_cursor_buffer_changed(vps, old_vps)) {
+ vps->cursor.update_type =
+ VMW_CURSOR_UPDATE_NONE;
+ } else {
+ vmw_cursor_mob_get(vcp, vps);
+ vmw_cursor_mob_map(vps);
+ }
+ }
+ }
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_atomic_check - check if the new state is okay
+ *
+ * @plane: cursor plane
+ * @state: info on the new plane state
+ *
+ * This is a chance to fail if the new cursor state does not fit
+ * our requirements.
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ int ret = 0;
+ struct drm_crtc_state *crtc_state = NULL;
+ struct vmw_surface *surface = NULL;
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ enum vmw_cursor_update_type update_type;
+ struct drm_framebuffer *fb = new_state->fb;
+
+ if (new_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING, true,
+ true);
+ if (ret)
+ return ret;
+
+ /* Turning off */
+ if (!fb)
+ return 0;
+
+ update_type = vmw_cursor_update_type(vmw, vps);
+ if (update_type == VMW_CURSOR_UPDATE_LEGACY) {
+ if (new_state->crtc_w != VMW_CURSOR_SNOOP_WIDTH ||
+ new_state->crtc_h != VMW_CURSOR_SNOOP_HEIGHT) {
+ drm_warn(&vmw->drm,
+ "Invalid cursor dimensions (%d, %d)\n",
+ new_state->crtc_w, new_state->crtc_h);
+ return -EINVAL;
+ }
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || !surface->snooper.image) {
+ drm_warn(&vmw->drm,
+ "surface not suitable for cursor\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void
+vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
+ struct vmw_private *dev_priv = vmw_priv(plane->dev);
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ s32 hotspot_x, hotspot_y, cursor_x, cursor_y;
+
+ /*
+ * Hide the cursor if the new bo is null
+ */
+ if (vmw_user_object_is_null(&vps->uo)) {
+ vmw_cursor_update_position(dev_priv, false, 0, 0);
+ return;
+ }
+
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ vmw_cursor_plane_update_legacy(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_MOB:
+ vmw_cursor_update_mob(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ /*
+ * For all update types update the cursor position
+ */
+ cursor_x = new_state->crtc_x + du->set_gui_x;
+ cursor_y = new_state->crtc_y + du->set_gui_y;
+
+ hotspot_x = vps->cursor.legacy.hotspot_x + new_state->hotspot_x;
+ hotspot_y = vps->cursor.legacy.hotspot_y + new_state->hotspot_y;
+
+ vmw_cursor_update_position(dev_priv, true, cursor_x + hotspot_x,
+ cursor_y + hotspot_y);
+}
+
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_cursor_bypass_arg *arg = data;
+ struct vmw_display_unit *du;
+ struct vmw_plane_state *vps;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+ }
+
+ crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
+ if (!crtc) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata)
+{
+ if (!file_priv->atomic && metadata->scanout &&
+ metadata->num_sizes == 1 &&
+ metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
+ metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
+ metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
+ VMW_CURSOR_SNOOP_HEIGHT *
+ desc->pitchBytesPerBlock;
+ void *image = kzalloc(cursor_size_bytes, GFP_KERNEL);
+
+ if (!image) {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ return image;
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
new file mode 100644
index 000000000000..40694925a70e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_CURSOR_PLANE_H
+#define VMWGFX_CURSOR_PLANE_H
+
+#include "device_include/svga3d_cmd.h"
+#include "drm/drm_file.h"
+#include "drm/drm_fourcc.h"
+#include "drm/drm_plane.h"
+
+#include <linux/types.h>
+
+struct SVGA3dCmdHeader;
+struct ttm_buffer_object;
+struct vmw_bo;
+struct vmw_cursor;
+struct vmw_private;
+struct vmw_surface;
+struct vmw_user_object;
+
+#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
+
+static const u32 __maybe_unused vmw_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+enum vmw_cursor_update_type {
+ VMW_CURSOR_UPDATE_NONE = 0,
+ VMW_CURSOR_UPDATE_LEGACY,
+ VMW_CURSOR_UPDATE_MOB,
+};
+
+struct vmw_cursor_plane_state {
+ enum vmw_cursor_update_type update_type;
+ bool changed;
+ bool surface_changed;
+ struct vmw_bo *mob;
+ struct {
+ s32 hotspot_x;
+ s32 hotspot_y;
+ u32 id;
+ } legacy;
+};
+
+/**
+ * Derived class for cursor plane object
+ *
+ * @base DRM plane object
+ * @cursor.cursor_mobs Cursor mobs available for re-use
+ */
+struct vmw_cursor_plane {
+ struct drm_plane base;
+
+ struct vmw_bo *cursor_mobs[3];
+};
+
+struct vmw_surface_metadata;
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata);
+void vmw_cursor_cmd_dma_snoop(SVGA3dCmdHeader *header,
+ struct vmw_surface *srf,
+ struct ttm_buffer_object *bo);
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane);
+
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+
+#endif /* VMWGFX_CURSOR_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0f32471c8533..0695a342b1ef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,31 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
-
#include "vmwgfx_drv.h"
#include "vmwgfx_bo.h"
@@ -1324,9 +1304,6 @@ static void vmw_master_set(struct drm_device *dev,
static void vmw_master_drop(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
-
- vmw_kms_legacy_hotspot_clear(dev_priv);
}
bool vmwgfx_supported(struct vmw_private *vmw)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5275ef632d4b..594af8eb04c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,29 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#ifndef _VMWGFX_DRV_H_
@@ -58,7 +38,7 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 20
+#define VMWGFX_DRIVER_MINOR 21
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_NUM_DISPLAY_UNITS 8
@@ -100,10 +80,6 @@
#define VMW_RES_SHADER ttm_driver_type4
#define VMW_RES_HT_ORDER 12
-#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
-#define VMW_CURSOR_SNOOP_WIDTH 64
-#define VMW_CURSOR_SNOOP_HEIGHT 64
-
#define MKSSTAT_CAPACITY_LOG2 5U
#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
@@ -201,7 +177,7 @@ enum vmw_cmdbuf_res_type {
struct vmw_cmdbuf_res_manager;
struct vmw_cursor_snooper {
- size_t age;
+ size_t id;
uint32_t *image;
};
@@ -846,9 +822,7 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
* GEM related functionality - vmwgfx_gem.c
*/
struct vmw_bo_params;
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo);
+extern const struct drm_gem_object_funcs vmw_gem_object_funcs;
extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -1050,7 +1024,6 @@ int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
@@ -1067,7 +1040,6 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
int vmw_kms_suspend(struct drm_device *dev);
int vmw_kms_resume(struct drm_device *dev);
void vmw_kms_lost_device(struct drm_device *dev);
@@ -1393,8 +1365,10 @@ int vmw_mksstat_remove_all(struct vmw_private *dev_priv);
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/* Resource dirtying - vmwgfx_page_dirty.c */
+bool vmw_bo_is_dirty(struct vmw_bo *vbo);
void vmw_bo_dirty_scan(struct vmw_bo *vbo);
int vmw_bo_dirty_add(struct vmw_bo *vbo);
+void vmw_bo_dirty_clear(struct vmw_bo *vbo);
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
void vmw_bo_dirty_clear_res(struct vmw_resource *res);
void vmw_bo_dirty_release(struct vmw_bo *vbo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2e52d73eba48..e831e324e737 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,29 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
+
#include "vmwgfx_binding.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
@@ -4086,6 +4068,23 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
return 0;
}
+/*
+ * DMA fence callback to remove a seqno_waiter
+ */
+struct seqno_waiter_rm_context {
+ struct dma_fence_cb base;
+ struct vmw_private *dev_priv;
+};
+
+static void seqno_waiter_rm_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct seqno_waiter_rm_context *ctx =
+ container_of(cb, struct seqno_waiter_rm_context, base);
+
+ vmw_seqno_waiter_remove(ctx->dev_priv);
+ kfree(ctx);
+}
+
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands, void *kernel_commands,
@@ -4266,6 +4265,15 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
+ struct seqno_waiter_rm_context *ctx =
+ kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx->dev_priv = dev_priv;
+ vmw_seqno_waiter_add(dev_priv);
+ if (dma_fence_add_callback(&fence->base, &ctx->base,
+ seqno_waiter_rm_cb) < 0) {
+ vmw_seqno_waiter_remove(dev_priv);
+ kfree(ctx);
+ }
}
}
@@ -4512,8 +4520,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out;
- vmw_kms_cursor_post_execbuf(dev_priv);
-
out:
if (in_fence)
dma_fence_put(in_fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index ed5015ced392..c55382167c1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -84,11 +84,11 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
int ret;
- if (obj->import_attach) {
- ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
+ if (drm_gem_is_imported(obj)) {
+ ret = dma_buf_vmap(obj->dma_buf, map);
if (!ret) {
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ dma_buf_vunmap(obj->dma_buf, map);
return -EIO;
}
}
@@ -101,8 +101,8 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
- if (obj->import_attach)
- dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ if (drm_gem_is_imported(obj))
+ dma_buf_vunmap(obj->dma_buf, map);
else
drm_gem_ttm_vunmap(obj, map);
}
@@ -111,7 +111,7 @@ static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
int ret;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
/*
* Reset both vm_ops and vm_private_data, so we don't end up with
* vm_ops pointing to our implementation if the dma-buf backend
@@ -140,7 +140,7 @@ static const struct vm_operations_struct vmw_vm_ops = {
.close = ttm_bo_vm_close,
};
-static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
+const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.free = vmw_gem_object_free,
.open = vmw_gem_object_open,
.close = vmw_gem_object_close,
@@ -154,20 +154,6 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.vm_ops = &vmw_vm_ops,
};
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo)
-{
- int ret = vmw_bo_create(vmw, params, p_vbo);
-
- if (ret != 0)
- goto out_no_bo;
-
- (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
-out_no_bo:
- return ret;
-}
-
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -183,7 +169,7 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv, &params, p_vbo);
+ ret = vmw_bo_create(dev_priv, &params, p_vbo);
if (ret != 0)
goto out_no_bo;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 1912ac1cde6d..05b1c54a070c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,33 +1,15 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
+
#include "vmwgfx_kms.h"
#include "vmwgfx_bo.h"
+#include "vmwgfx_resource_priv.h"
#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
@@ -59,474 +41,6 @@ void vmw_du_cleanup(struct vmw_display_unit *du)
drm_connector_cleanup(&du->connector);
}
-/*
- * Display Unit Cursor functions
- */
-
-static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY);
-
-struct vmw_svga_fifo_cmd_define_cursor {
- u32 cmd;
- SVGAFifoCmdDefineAlphaCursor cursor;
-};
-
-/**
- * vmw_send_define_cursor_cmd - queue a define cursor command
- * @dev_priv: the private driver struct
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- struct vmw_svga_fifo_cmd_define_cursor *cmd;
- const u32 image_size = width * height * sizeof(*image);
- const u32 cmd_size = sizeof(*cmd) + image_size;
-
- /* Try to reserve fifocmd space and swallow any failures;
- such reservations cannot be left unconsumed for long
- under the risk of clogging other fifocmd users, so
- we treat reservations separtely from the way we treat
- other fallible KMS-atomic resources at prepare_fb */
- cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
-
- if (unlikely(!cmd))
- return;
-
- memset(cmd, 0, sizeof(*cmd));
-
- memcpy(&cmd[1], image, image_size);
-
- cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
- cmd->cursor.id = 0;
- cmd->cursor.width = width;
- cmd->cursor.height = height;
- cmd->cursor.hotspotX = hotspotX;
- cmd->cursor.hotspotY = hotspotY;
-
- vmw_cmd_commit_flush(dev_priv, cmd_size);
-}
-
-/**
- * vmw_cursor_update_image - update the cursor image on the provided plane
- * @dev_priv: the private driver struct
- * @vps: the plane state of the cursor plane
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_cursor_update_image(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- if (vps->cursor.bo)
- vmw_cursor_update_mob(dev_priv, vps, image,
- vps->base.crtc_w, vps->base.crtc_h,
- hotspotX, hotspotY);
-
- else
- vmw_send_define_cursor_cmd(dev_priv, image, width, height,
- hotspotX, hotspotY);
-}
-
-
-/**
- * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
- *
- * Called from inside vmw_du_cursor_plane_atomic_update to actually
- * make the cursor-image live.
- *
- * @dev_priv: device to work with
- * @vps: the plane state of the cursor plane
- * @image: cursor source data to fill the MOB with
- * @width: source data width
- * @height: source data height
- * @hotspotX: cursor hotspot x
- * @hotspotY: cursor hotspot Y
- */
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- SVGAGBCursorHeader *header;
- SVGAGBAlphaCursorHeader *alpha_header;
- const u32 image_size = width * height * sizeof(*image);
-
- header = vmw_bo_map_and_cache(vps->cursor.bo);
- alpha_header = &header->header.alphaHeader;
-
- memset(header, 0, sizeof(*header));
-
- header->type = SVGA_ALPHA_CURSOR;
- header->sizeInBytes = image_size;
-
- alpha_header->hotspotX = hotspotX;
- alpha_header->hotspotY = hotspotY;
- alpha_header->width = width;
- alpha_header->height = height;
-
- memcpy(header + 1, image, image_size);
- vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
- vps->cursor.bo->tbo.resource->start);
-}
-
-
-static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
-{
- return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
-}
-
-/**
- * vmw_du_cursor_plane_acquire_image -- Acquire the image data
- * @vps: cursor plane state
- */
-static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
-{
- struct vmw_surface *surf;
-
- if (vmw_user_object_is_null(&vps->uo))
- return NULL;
-
- surf = vmw_user_object_surface(&vps->uo);
- if (surf && !vmw_user_object_is_mapped(&vps->uo))
- return surf->snooper.image;
-
- return vmw_user_object_map(&vps->uo);
-}
-
-static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
- struct vmw_plane_state *new_vps)
-{
- void *old_image;
- void *new_image;
- u32 size;
- bool changed;
-
- if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
- old_vps->base.crtc_h != new_vps->base.crtc_h)
- return true;
-
- if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
- old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
- return true;
-
- size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
-
- old_image = vmw_du_cursor_plane_acquire_image(old_vps);
- new_image = vmw_du_cursor_plane_acquire_image(new_vps);
-
- changed = false;
- if (old_image && new_image && old_image != new_image)
- changed = memcmp(old_image, new_image, size) != 0;
-
- return changed;
-}
-
-static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
-{
- if (!(*vbo))
- return;
-
- ttm_bo_unpin(&(*vbo)->tbo);
- vmw_bo_unreference(vbo);
-}
-
-static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- u32 i;
-
- if (!vps->cursor.bo)
- return;
-
- vmw_du_cursor_plane_unmap_cm(vps);
-
- /* Look for a free slot to return this mob to the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (!vcp->cursor_mobs[i]) {
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Cache is full: See if this mob is bigger than an existing mob. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i]->tbo.base.size <
- vps->cursor.bo->tbo.base.size) {
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Destroy it if it's not worth caching. */
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
-}
-
-static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- u32 i;
- u32 cursor_max_dim, mob_max_size;
- struct vmw_fence_obj *fence = NULL;
- int ret;
-
- if (!dev_priv->has_mob ||
- (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
- return -EINVAL;
-
- mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
- cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
-
- if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
- vps->base.crtc_h > cursor_max_dim)
- return -EINVAL;
-
- if (vps->cursor.bo) {
- if (vps->cursor.bo->tbo.base.size >= size)
- return 0;
- vmw_du_put_cursor_mob(vcp, vps);
- }
-
- /* Look for an unused mob in the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i] &&
- vcp->cursor_mobs[i]->tbo.base.size >= size) {
- vps->cursor.bo = vcp->cursor_mobs[i];
- vcp->cursor_mobs[i] = NULL;
- return 0;
- }
- }
- /* Create a new mob if we can't find an existing one. */
- ret = vmw_bo_create_and_populate(dev_priv, size,
- VMW_BO_DOMAIN_MOB,
- &vps->cursor.bo);
-
- if (ret != 0)
- return ret;
-
- /* Fence the mob creation so we are guarateed to have the mob */
- ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
- if (ret != 0)
- goto teardown;
-
- ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- if (ret != 0) {
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- goto teardown;
- }
-
- dma_fence_wait(&fence->base, false);
- dma_fence_put(&fence->base);
-
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- return 0;
-
-teardown:
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
- return ret;
-}
-
-
-static void vmw_cursor_update_position(struct vmw_private *dev_priv,
- bool show, int x, int y)
-{
- const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
- : SVGA_CURSOR_ON_HIDE;
- uint32_t count;
-
- spin_lock(&dev_priv->cursor_lock);
- if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
- vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
- } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
- count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
- } else {
- vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
- }
- spin_unlock(&dev_priv->cursor_lock);
-}
-
-void vmw_kms_cursor_snoop(struct vmw_surface *srf,
- struct ttm_object_file *tfile,
- struct ttm_buffer_object *bo,
- SVGA3dCmdHeader *header)
-{
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_num;
- SVGA3dCopyBox *box;
- unsigned box_count;
- void *virtual;
- bool is_iomem;
- struct vmw_dma_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA dma;
- } *cmd;
- int i, ret;
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
-
- cmd = container_of(header, struct vmw_dma_cmd, header);
-
- /* No snooper installed, nothing to copy */
- if (!srf->snooper.image)
- return;
-
- if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
- DRM_ERROR("face and mipmap for cursors should never != 0\n");
- return;
- }
-
- if (cmd->header.size < 64) {
- DRM_ERROR("at least one full copy box must be given\n");
- return;
- }
-
- box = (SVGA3dCopyBox *)&cmd[1];
- box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
- sizeof(SVGA3dCopyBox);
-
- if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
- box->x != 0 || box->y != 0 || box->z != 0 ||
- box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
- box->d != 1 || box_count != 1 ||
- box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
- /* TODO handle none page aligned offsets */
- /* TODO handle more dst & src != 0 */
- /* TODO handle more then one copy */
- DRM_ERROR("Can't snoop dma request for cursor!\n");
- DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
- box->srcx, box->srcy, box->srcz,
- box->x, box->y, box->z,
- box->w, box->h, box->d, box_count,
- cmd->dma.guest.ptr.offset);
- return;
- }
-
- kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
- kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
-
- ret = ttm_bo_reserve(bo, true, false, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("reserve failed\n");
- return;
- }
-
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0))
- goto err_unreserve;
-
- virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
-
- if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
- memcpy(srf->snooper.image, virtual,
- VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
- } else {
- /* Image is unsigned pointer. */
- for (i = 0; i < box->h; i++)
- memcpy(srf->snooper.image + i * image_pitch,
- virtual + i * cmd->dma.guest.pitch,
- box->w * desc->pitchBytesPerBlock);
- }
-
- srf->snooper.age++;
-
- ttm_bo_kunmap(&map);
-err_unreserve:
- ttm_bo_unreserve(bo);
-}
-
-/**
- * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
- *
- * @dev_priv: Pointer to the device private struct.
- *
- * Clears all legacy hotspots.
- */
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- drm_modeset_lock_all(dev);
- drm_for_each_crtc(crtc, dev) {
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = 0;
- du->hotspot_y = 0;
- }
- drm_modeset_unlock_all(dev);
-}
-
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- mutex_lock(&dev->mode_config.mutex);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- if (!du->cursor_surface ||
- du->cursor_age == du->cursor_surface->snooper.age ||
- !du->cursor_surface->snooper.image)
- continue;
-
- du->cursor_age = du->cursor_surface->snooper.age;
- vmw_send_define_cursor_cmd(dev_priv,
- du->cursor_surface->snooper.image,
- VMW_CURSOR_SNOOP_WIDTH,
- VMW_CURSOR_SNOOP_HEIGHT,
- du->hotspot_x + du->core_hotspot_x,
- du->hotspot_y + du->core_hotspot_y);
- }
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- u32 i;
-
- vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
-
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
-
- drm_plane_cleanup(plane);
-}
-
void vmw_du_primary_plane_destroy(struct drm_plane *plane)
{
@@ -575,262 +89,6 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane,
/**
- * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
- *
- * @vps: plane_state
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
-{
- int ret;
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- struct ttm_buffer_object *bo;
-
- if (!vps->cursor.bo)
- return -EINVAL;
-
- bo = &vps->cursor.bo->tbo;
-
- if (bo->base.size < size)
- return -EINVAL;
-
- if (vps->cursor.bo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- vmw_bo_map_and_cache(vps->cursor.bo);
-
- ttm_bo_unreserve(bo);
-
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- return 0;
-}
-
-
-/**
- * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
- *
- * @vps: state of the cursor plane
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
-{
- int ret = 0;
- struct vmw_bo *vbo = vps->cursor.bo;
-
- if (!vbo || !vbo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
- if (likely(ret == 0)) {
- vmw_bo_unmap(vbo);
- ttm_bo_unreserve(&vbo->tbo);
- }
-
- return ret;
-}
-
-
-/**
- * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
- *
- * @plane: cursor plane
- * @old_state: contains the state to clean up
- *
- * Unmaps all cursor bo mappings and unpins the cursor surface
- *
- * Returns 0 on success
- */
-void
-vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
-
- if (!vmw_user_object_is_null(&vps->uo))
- vmw_user_object_unmap(&vps->uo);
-
- vmw_du_cursor_plane_unmap_cm(vps);
- vmw_du_put_cursor_mob(vcp, vps);
-
- vmw_du_plane_unpin_surf(vps);
- vmw_user_object_unref(&vps->uo);
-}
-
-
-/**
- * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
- *
- * @plane: display plane
- * @new_state: info on the new plane state, including the FB
- *
- * Returns 0 on success
- */
-int
-vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_framebuffer *fb = new_state->fb;
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct vmw_bo *bo = NULL;
- int ret = 0;
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- vmw_user_object_unmap(&vps->uo);
- vmw_user_object_unref(&vps->uo);
- }
-
- if (fb) {
- if (vmw_framebuffer_to_vfb(fb)->bo) {
- vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
- vps->uo.surface = NULL;
- } else {
- memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
- }
- vmw_user_object_ref(&vps->uo);
- }
-
- bo = vmw_user_object_buffer(&vps->uo);
- if (bo) {
- struct ttm_operation_ctx ctx = {false, false};
-
- ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
- if (ret != 0)
- return -ENOMEM;
-
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret != 0)
- return -ENOMEM;
-
- vmw_bo_pin_reserved(bo, true);
- if (vmw_framebuffer_to_vfb(fb)->bo) {
- const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
-
- (void)vmw_bo_map_and_cache_size(bo, size);
- } else {
- vmw_bo_map_and_cache(bo);
- }
- ttm_bo_unreserve(&bo->tbo);
- }
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- vmw_du_get_cursor_mob(vcp, vps);
- vmw_du_cursor_plane_map_cm(vps);
- }
-
- return 0;
-}
-
-
-void
-vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
- plane);
- struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
- struct vmw_bo *old_bo = NULL;
- struct vmw_bo *new_bo = NULL;
- struct ww_acquire_ctx ctx;
- s32 hotspot_x, hotspot_y;
- int ret;
-
- hotspot_x = du->hotspot_x + new_state->hotspot_x;
- hotspot_y = du->hotspot_y + new_state->hotspot_y;
-
- du->cursor_surface = vmw_user_object_surface(&vps->uo);
-
- if (vmw_user_object_is_null(&vps->uo)) {
- vmw_cursor_update_position(dev_priv, false, 0, 0);
- return;
- }
-
- vps->cursor.hotspot_x = hotspot_x;
- vps->cursor.hotspot_y = hotspot_y;
-
- if (du->cursor_surface)
- du->cursor_age = du->cursor_surface->snooper.age;
-
- ww_acquire_init(&ctx, &reservation_ww_class);
-
- if (!vmw_user_object_is_null(&old_vps->uo)) {
- old_bo = vmw_user_object_buffer(&old_vps->uo);
- ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
- if (ret != 0)
- return;
- }
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- new_bo = vmw_user_object_buffer(&vps->uo);
- if (old_bo != new_bo) {
- ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
- if (ret != 0) {
- if (old_bo) {
- ttm_bo_unreserve(&old_bo->tbo);
- ww_acquire_fini(&ctx);
- }
- return;
- }
- } else {
- new_bo = NULL;
- }
- }
- if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
- /*
- * If it hasn't changed, avoid making the device do extra
- * work by keeping the old cursor active.
- */
- struct vmw_cursor_plane_state tmp = old_vps->cursor;
- old_vps->cursor = vps->cursor;
- vps->cursor = tmp;
- } else {
- void *image = vmw_du_cursor_plane_acquire_image(vps);
- if (image)
- vmw_cursor_update_image(dev_priv, vps, image,
- new_state->crtc_w,
- new_state->crtc_h,
- hotspot_x, hotspot_y);
- }
-
- if (new_bo)
- ttm_bo_unreserve(&new_bo->tbo);
- if (old_bo)
- ttm_bo_unreserve(&old_bo->tbo);
-
- ww_acquire_fini(&ctx);
-
- du->cursor_x = new_state->crtc_x + du->set_gui_x;
- du->cursor_y = new_state->crtc_y + du->set_gui_y;
-
- vmw_cursor_update_position(dev_priv, true,
- du->cursor_x + hotspot_x,
- du->cursor_y + hotspot_y);
-
- du->core_hotspot_x = hotspot_x - du->hotspot_x;
- du->core_hotspot_y = hotspot_y - du->hotspot_y;
-}
-
-
-/**
* vmw_du_primary_plane_atomic_check - check if the new state is okay
*
* @plane: display plane
@@ -873,66 +131,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
return ret;
}
-
-/**
- * vmw_du_cursor_plane_atomic_check - check if the new state is okay
- *
- * @plane: cursor plane
- * @state: info on the new plane state
- *
- * This is a chance to fail if the new cursor state does not fit
- * our requirements.
- *
- * Returns 0 on success
- */
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- int ret = 0;
- struct drm_crtc_state *crtc_state = NULL;
- struct vmw_surface *surface = NULL;
- struct drm_framebuffer *fb = new_state->fb;
-
- if (new_state->crtc)
- crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
- new_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true, true);
- if (ret)
- return ret;
-
- /* Turning off */
- if (!fb)
- return 0;
-
- /* A lot of the code assumes this */
- if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
- DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
- new_state->crtc_w, new_state->crtc_h);
- return -EINVAL;
- }
-
- if (!vmw_framebuffer_to_vfb(fb)->bo) {
- surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
-
- WARN_ON(!surface);
-
- if (!surface ||
- (!surface->snooper.image && !surface->res.guest_memory_bo)) {
- DRM_ERROR("surface not suitable for cursor\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1076,7 +274,7 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
vps->pinned = 0;
vps->cpp = 0;
- memset(&vps->cursor, 0, sizeof(vps->cursor));
+ vps->cursor.mob = NULL;
/* Each ref counted resource needs to be acquired again */
vmw_user_object_ref(&vps->uo);
@@ -1221,7 +419,20 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
+ struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
+ struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
+ if (bo) {
+ vmw_bo_dirty_release(bo);
+ /*
+ * bo->dirty is reference counted so it being NULL
+ * means that the surface wasn't coherent to begin
+ * with and so we have to free the dirty tracker
+ * in the vmw_resource
+ */
+ if (!bo->dirty && surf && surf->res.dirty)
+ surf->res.func->dirty_free(&surf->res);
+ }
drm_framebuffer_cleanup(framebuffer);
vmw_user_object_unref(&vfbs->uo);
@@ -1375,6 +586,7 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
+ vmw_bo_dirty_release(vfbd->buffer);
drm_framebuffer_cleanup(framebuffer);
vmw_bo_unreference(&vfbd->buffer);
@@ -1505,6 +717,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_framebuffer *vfb = NULL;
struct vmw_user_object uo = {0};
+ struct vmw_bo *bo;
+ struct vmw_surface *surface;
int ret;
/* returns either a bo or surface */
@@ -1534,6 +748,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
err_out:
+ bo = vmw_user_object_buffer(&uo);
+ surface = vmw_user_object_surface(&uo);
/* vmw_user_object_lookup takes one ref so does new_fb */
vmw_user_object_unref(&uo);
@@ -1542,6 +758,14 @@ err_out:
return ERR_PTR(ret);
}
+ ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ ret = vmw_bo_dirty_add(bo);
+ if (!ret && surface && surface->res.func->dirty_alloc) {
+ surface->res.coherent = true;
+ ret = surface->res.func->dirty_alloc(&surface->res);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+
return &vfb->base;
}
@@ -1974,44 +1198,6 @@ int vmw_kms_close(struct vmw_private *dev_priv)
return ret;
}
-int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_cursor_bypass_arg *arg = data;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
- }
-
- mutex_unlock(&dev->mode_config.mutex);
- return 0;
- }
-
- crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
- if (!crtc) {
- ret = -ENOENT;
- goto out;
- }
-
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 4eab581883e2..511e29cdb987 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,40 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#ifndef VMWGFX_KMS_H_
#define VMWGFX_KMS_H_
+#include "vmwgfx_cursor_plane.h"
+#include "vmwgfx_drv.h"
+
#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
-#include "vmwgfx_drv.h"
-
/**
* struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update
* @plane: Plane which is being updated.
@@ -235,16 +216,11 @@ static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
DRM_FORMAT_XRGB1555,
};
-static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
-
#define vmw_crtc_state_to_vcs(x) container_of(x, struct vmw_crtc_state, base)
#define vmw_plane_state_to_vps(x) container_of(x, struct vmw_plane_state, base)
#define vmw_connector_state_to_vcs(x) \
container_of(x, struct vmw_connector_state, base)
-#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
/**
* Derived class for crtc state object
@@ -255,11 +231,6 @@ struct vmw_crtc_state {
struct drm_crtc_state base;
};
-struct vmw_cursor_plane_state {
- struct vmw_bo *bo;
- s32 hotspot_x;
- s32 hotspot_y;
-};
/**
* Derived class for plane state object
@@ -283,7 +254,6 @@ struct vmw_plane_state {
/* For CPU Blit */
unsigned int cpp;
- bool surf_mapped;
struct vmw_cursor_plane_state cursor;
};
@@ -317,17 +287,6 @@ struct vmw_connector_state {
int gui_y;
};
-/**
- * Derived class for cursor plane object
- *
- * @base DRM plane object
- * @cursor.cursor_mobs Cursor mobs available for re-use
- */
-struct vmw_cursor_plane {
- struct drm_plane base;
-
- struct vmw_bo *cursor_mobs[3];
-};
/**
* Base class display unit.
@@ -343,17 +302,6 @@ struct vmw_display_unit {
struct drm_plane primary;
struct vmw_cursor_plane cursor;
- struct vmw_surface *cursor_surface;
- size_t cursor_age;
-
- int cursor_x;
- int cursor_y;
-
- int hotspot_x;
- int hotspot_y;
- s32 core_hotspot_x;
- s32 core_hotspot_y;
-
unsigned unit;
/*
@@ -403,8 +351,6 @@ struct vmw_display_unit {
*/
void vmw_du_init(struct vmw_display_unit *du);
void vmw_du_cleanup(struct vmw_display_unit *du);
-void vmw_du_crtc_save(struct drm_crtc *crtc);
-void vmw_du_crtc_restore(struct drm_crtc *crtc);
int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t size,
@@ -460,19 +406,10 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
/* Atomic Helpers */
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state);
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state);
-void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state);
-int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state);
-void vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state);
void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
void vmw_du_plane_reset(struct drm_plane *plane);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index f0b429525467..c23c9195f0dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -372,7 +372,7 @@ static const struct drm_plane_funcs vmw_ldu_plane_funcs = {
static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -383,10 +383,10 @@ static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_ldu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 7055cbefc768..d8204d4265d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -282,8 +282,7 @@ out_no_setup:
}
vmw_bo_unpin_unlocked(&batch->otable_bo->tbo);
- ttm_bo_put(&batch->otable_bo->tbo);
- batch->otable_bo = NULL;
+ vmw_bo_unreference(&batch->otable_bo);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 74ff2812d66a..7de20e56082c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -1,27 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2019-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
#include "vmwgfx_bo.h"
@@ -71,6 +52,11 @@ struct vmw_bo_dirty {
unsigned long bitmap[];
};
+bool vmw_bo_is_dirty(struct vmw_bo *vbo)
+{
+ return vbo->dirty && (vbo->dirty->start < vbo->dirty->end);
+}
+
/**
* vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
* @vbo: The buffer object to scan
@@ -341,6 +327,41 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
dirty->end = res_start;
}
+void vmw_bo_dirty_clear(struct vmw_bo *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t start, cur, end;
+ unsigned long res_start = 0;
+ unsigned long res_end = vbo->tbo.base.size;
+
+ WARN_ON_ONCE(res_start & ~PAGE_MASK);
+ res_start >>= PAGE_SHIFT;
+ res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
+
+ if (res_start >= dirty->end || res_end <= dirty->start)
+ return;
+
+ cur = max(res_start, dirty->start);
+ res_end = max(res_end, dirty->end);
+ while (cur < res_end) {
+ unsigned long num;
+
+ start = find_next_bit(&dirty->bitmap[0], res_end, cur);
+ if (start >= res_end)
+ break;
+
+ end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
+ cur = end + 1;
+ num = end - start;
+ bitmap_clear(&dirty->bitmap[0], start, num);
+ }
+
+ if (res_start <= dirty->start && res_end > dirty->start)
+ dirty->start = res_end;
+ if (res_start < dirty->end && res_end >= dirty->end)
+ dirty->end = res_start;
+}
+
/**
* vmw_bo_dirty_clear_res - Clear a resource's dirty region from
* its backing mob.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a73af8a355fb..388011696941 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -273,7 +273,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
goto out_bad_resource;
res = converter->base_obj_to_res(base);
- kref_get(&res->kref);
+ vmw_resource_reference(res);
*p_res = res;
ret = 0;
@@ -347,7 +347,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
return 0;
}
- ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
+ ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
if (unlikely(ret != 0))
goto out_no_bo;
@@ -531,9 +531,9 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
}
INIT_LIST_HEAD(&val_list);
- ttm_bo_get(&res->guest_memory_bo->tbo);
val_buf->bo = &res->guest_memory_bo->tbo;
val_buf->num_shared = 0;
+ drm_gem_object_get(&val_buf->bo->base);
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
@@ -557,7 +557,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
if (guest_memory_dirty)
vmw_user_bo_unref(&res->guest_memory_bo);
@@ -619,7 +619,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 32029d80b72b..5f5f5a94301f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -445,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer);
+ ret = vmw_bo_create(dev_priv, &bo_params, &vps->uo.buffer);
vmw_overlay_resume_all(dev_priv);
if (ret)
return ret;
@@ -764,7 +764,7 @@ static const struct drm_plane_funcs vmw_sou_plane_funcs = {
static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -775,10 +775,10 @@ static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index f5d2ed1b0a72..20aab725e53a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1482,7 +1482,7 @@ static const struct drm_plane_funcs vmw_stdu_plane_funcs = {
static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -1494,10 +1494,10 @@ static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_stdu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
@@ -1584,6 +1584,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(&cursor->base, &vmw_stdu_cursor_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(&cursor->base);
ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 5721c74da3e0..7e281c3c6bc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,32 +1,13 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#include "vmwgfx_bo.h"
+#include "vmwgfx_cursor_plane.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
@@ -658,7 +639,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
- WARN_ON_ONCE(res->dirty);
+ WARN_ON(res->dirty);
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
@@ -689,8 +670,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
* Dumb buffers own the resource and they'll unref the
* resource themselves
*/
- if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb)
- return;
+ WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb);
vmw_resource_unreference(&res);
}
@@ -818,25 +798,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
}
res->guest_memory_size = cur_bo_offset;
- if (!file_priv->atomic &&
- metadata->scanout &&
- metadata->num_sizes == 1 &&
- metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
- metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
- metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
- VMW_CURSOR_SNOOP_HEIGHT *
- desc->pitchBytesPerBlock;
- srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL);
- if (!srf->snooper.image) {
- DRM_ERROR("Failed to allocate cursor_image\n");
- ret = -ENOMEM;
- goto out_no_copy;
- }
- } else {
- srf->snooper.image = NULL;
+
+ srf->snooper.image = vmw_cursor_snooper_create(file_priv, metadata);
+ if (IS_ERR(srf->snooper.image)) {
+ ret = PTR_ERR(srf->snooper.image);
+ goto out_no_copy;
}
if (drm_is_primary_client(file_priv))
@@ -864,14 +830,17 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv,
- &params,
- &res->guest_memory_bo);
+ ret = vmw_bo_create(dev_priv, &params, &res->guest_memory_bo);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
- vmw_bo_add_detached_resource(res->guest_memory_bo, res);
}
tmp = vmw_resource_reference(&srf->res);
@@ -1670,6 +1639,14 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
+ if (res->guest_memory_bo) {
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+ }
+
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
VMW_RES_SURFACE,
@@ -1684,7 +1661,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->guest_memory_size;
if (res->guest_memory_bo) {
- vmw_bo_add_detached_resource(res->guest_memory_bo, res);
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
rep->buffer_size = res->guest_memory_bo->tbo.base.size;
@@ -2358,12 +2334,19 @@ int vmw_dumb_create(struct drm_file *file_priv,
vbo = res->guest_memory_bo;
vbo->is_dumb = true;
vbo->dumb_surface = vmw_res_to_srf(res);
-
+ drm_gem_object_put(&vbo->tbo.base);
+ /*
+ * Unset the user surface dtor since this in not actually exposed
+ * to userspace. The suface is owned via the dumb_buffer's GEM handle
+ */
+ struct vmw_user_surface *usurf = container_of(vbo->dumb_surface,
+ struct vmw_user_surface, srf);
+ usurf->prime.base.refcount_release = NULL;
err:
if (res)
vmw_resource_unreference(&res);
- if (ret)
- ttm_ref_object_base_unref(tfile, arg.rep.handle);
+
+ ttm_ref_object_base_unref(tfile, arg.rep.handle);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index e7625b3f71e0..7ee93e7191c7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -262,9 +262,8 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
bo_node->hash.key);
}
val_buf = &bo_node->base;
- val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo);
- if (!val_buf->bo)
- return -ESRCH;
+ vmw_bo_reference(vbo);
+ val_buf->bo = &vbo->tbo;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
}
@@ -656,7 +655,7 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
struct vmw_validation_res_node *val;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
- ttm_bo_put(entry->base.bo);
+ drm_gem_object_put(&entry->base.bo->base);
entry->base.bo = NULL;
}
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 5c2f459a2925..2169bc969ea1 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_XE
tristate "Intel Xe Graphics"
- depends on DRM && PCI && MMU && (m || (y && KUNIT=y))
+ depends on DRM && PCI && (m || (y && KUNIT=y))
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -39,7 +39,6 @@ config DRM_XE
select DRM_TTM_HELPER
select DRM_EXEC
select DRM_GPUVM
- select DRM_GPUSVM if !UML && DEVICE_PRIVATE
select DRM_SCHED
select MMU_NOTIFIER
select WANT_DEV_COREDUMP
@@ -74,9 +73,22 @@ config DRM_XE_DP_TUNNEL
If in doubt say "Y".
+config DRM_XE_GPUSVM
+ bool "Enable CPU to GPU address mirroring"
+ depends on DRM_XE
+ depends on !UML
+ depends on DEVICE_PRIVATE
+ default y
+ select DRM_GPUSVM
+ help
+ Enable this option if you want support for CPU to GPU address
+ mirroring.
+
+ If in doubut say "Y".
+
config DRM_XE_DEVMEM_MIRROR
bool "Enable device memory mirror"
- depends on DRM_XE
+ depends on DRM_XE_GPUSVM
select GET_FREE_REGION
default y
help
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 9699b08585f7..e4bf484d4121 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -125,12 +125,13 @@ xe-y += xe_bb.o \
xe_wopcm.o
xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
-xe-$(CONFIG_DRM_GPUSVM) += xe_svm.o
+xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
xe-$(CONFIG_PERF_EVENTS) += xe_pmu.o
+xe-$(CONFIG_CONFIGFS_FS) += xe_configfs.o
# graphics virtualization (SR-IOV) support
xe-y += \
@@ -185,7 +186,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/intel_fbdev_fb.o \
display/xe_display.o \
display/xe_display_misc.o \
- display/xe_display_rps.o \
+ display/xe_display_rpm.o \
display/xe_display_wa.o \
display/xe_dsb_buffer.o \
display/xe_fb_pin.o \
@@ -196,7 +197,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
# SOC code shared with i915
xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-soc/intel_dram.o \
- i915-soc/intel_pch.o \
i915-soc/intel_rom.o
# Display code shared with i915
@@ -271,6 +271,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_panel.o \
i915-display/intel_pfit.o \
i915-display/intel_pmdemand.o \
+ i915-display/intel_pch.o \
i915-display/intel_pps.o \
i915-display/intel_psr.o \
i915-display/intel_qp_tables.o \
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index ec516e838ee8..448afb86e05c 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -141,6 +141,7 @@ enum xe_guc_action {
XE_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
XE_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER = 0x550C,
+ XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER = 0x550D,
XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR = 0x6000,
XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC = 0x6002,
XE_GUC_ACTION_PAGE_FAULT_RES_DESC = 0x6003,
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index d633f1c739e4..7de8f827281f 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -367,6 +367,7 @@ enum xe_guc_klv_ids {
GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008,
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009,
GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO = 0x900a,
+ GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH = 0x900b,
};
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h b/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h
deleted file mode 100644
index 21fec9cc837c..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_RPS_H__
-#define __INTEL_RPS_H__
-
-#define gen5_rps_irq_handler(x) ({})
-
-#endif /* __INTEL_RPS_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index dfec5108d2c3..9b7572e06f34 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -13,7 +13,6 @@
#include <drm/drm_drv.h>
#include "i915_utils.h"
-#include "intel_runtime_pm.h"
#include "xe_device.h" /* for xe_device_has_flat_ccs() */
#include "xe_device_types.h"
@@ -22,28 +21,12 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
return container_of(dev, struct drm_i915_private, drm);
}
+/* compat platform checks only for soc/ usage */
#define IS_PLATFORM(xe, x) ((xe)->info.platform == x)
-#define INTEL_INFO(dev_priv) (&((dev_priv)->info))
-#define IS_I830(dev_priv) (dev_priv && 0)
-#define IS_I845G(dev_priv) (dev_priv && 0)
-#define IS_I85X(dev_priv) (dev_priv && 0)
-#define IS_I865G(dev_priv) (dev_priv && 0)
#define IS_I915G(dev_priv) (dev_priv && 0)
#define IS_I915GM(dev_priv) (dev_priv && 0)
-#define IS_I945G(dev_priv) (dev_priv && 0)
-#define IS_I945GM(dev_priv) (dev_priv && 0)
-#define IS_I965G(dev_priv) (dev_priv && 0)
-#define IS_I965GM(dev_priv) (dev_priv && 0)
-#define IS_G45(dev_priv) (dev_priv && 0)
-#define IS_GM45(dev_priv) (dev_priv && 0)
-#define IS_G4X(dev_priv) (dev_priv && 0)
#define IS_PINEVIEW(dev_priv) (dev_priv && 0)
-#define IS_G33(dev_priv) (dev_priv && 0)
-#define IS_IRONLAKE(dev_priv) (dev_priv && 0)
-#define IS_IRONLAKE_M(dev_priv) (dev_priv && 0)
-#define IS_SANDYBRIDGE(dev_priv) (dev_priv && 0)
#define IS_IVYBRIDGE(dev_priv) (dev_priv && 0)
-#define IS_IVB_GT1(dev_priv) (dev_priv && 0)
#define IS_VALLEYVIEW(dev_priv) (dev_priv && 0)
#define IS_CHERRYVIEW(dev_priv) (dev_priv && 0)
#define IS_HASWELL(dev_priv) (dev_priv && 0)
@@ -71,39 +54,10 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0)
-#define IS_BROADWELL_ULX(dev_priv) (dev_priv && 0)
#define IS_MOBILE(xe) (xe && 0)
-#define IS_TIGERLAKE_UY(xe) (xe && 0)
-#define IS_COMETLAKE_ULX(xe) (xe && 0)
-#define IS_COFFEELAKE_ULX(xe) (xe && 0)
-#define IS_KABYLAKE_ULX(xe) (xe && 0)
-#define IS_SKYLAKE_ULX(xe) (xe && 0)
-#define IS_HASWELL_ULX(xe) (xe && 0)
-#define IS_COMETLAKE_ULT(xe) (xe && 0)
-#define IS_COFFEELAKE_ULT(xe) (xe && 0)
-#define IS_KABYLAKE_ULT(xe) (xe && 0)
-#define IS_SKYLAKE_ULT(xe) (xe && 0)
-
-#define IS_DG2_G10(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G10)
-#define IS_DG2_G11(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G11)
-#define IS_DG2_G12(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G12)
-#define IS_RAPTORLAKE_U(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_ALDERLAKE_P_RPLU)
-#define IS_ICL_WITH_PORT_F(xe) (xe && 0)
#define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe))
-
#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
-#ifdef CONFIG_ARM64
-/*
- * arm64 indirectly includes linux/rtc.h,
- * which defines a irq_lock, so include it
- * here before #define-ing it
- */
-#include <linux/rtc.h>
-#endif
-
-#define irq_lock irq.lock
-
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
deleted file mode 100644
index 274042bff1be..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_RUNTIME_PM_H__
-#define __INTEL_RUNTIME_PM_H__
-
-#include "intel_wakeref.h"
-#include "xe_device_types.h"
-#include "xe_pm.h"
-
-#define intel_runtime_pm xe_runtime_pm
-
-static inline void disable_rpm_wakeref_asserts(void *rpm)
-{
-}
-
-static inline void enable_rpm_wakeref_asserts(void *rpm)
-{
-}
-
-static inline bool
-intel_runtime_pm_suspended(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return pm_runtime_suspended(xe->drm.dev);
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return xe_pm_runtime_resume_and_get(xe) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return xe_pm_runtime_get_if_in_use(xe) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- xe_pm_runtime_get_noresume(xe);
-
- return INTEL_WAKEREF_DEF;
-}
-
-static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- xe_pm_runtime_put(xe);
-}
-
-static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_t wakeref)
-{
- if (wakeref)
- intel_runtime_pm_put_unchecked(pm);
-}
-
-#define intel_runtime_pm_get_raw intel_runtime_pm_get
-#define intel_runtime_pm_put_raw intel_runtime_pm_put
-#define assert_rpm_wakelock_held(x) do { } while (0)
-#define assert_rpm_raw_wakeref_held(x) do { } while (0)
-
-#define with_intel_runtime_pm(rpm, wf) \
- for ((wf) = intel_runtime_pm_get(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h
deleted file mode 100644
index 9c46556d33a4..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "../../../i915/soc/intel_pch.h"
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 3a1e505ff182..e8191562d122 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -45,7 +45,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT);
if (!IS_ERR(obj))
drm_info(&xe->drm, "Allocated fbdev into stolen\n");
else
@@ -56,7 +56,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT);
}
if (IS_ERR(obj)) {
@@ -79,11 +79,11 @@ err:
return ERR_CAST(fb);
}
-int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
- struct drm_gem_object *_obj, struct i915_vma *vma)
+int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+ struct drm_gem_object *_obj, struct i915_vma *vma)
{
struct xe_bo *obj = gem_to_xe_bo(_obj);
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
if (obj->flags & XE_BO_FLAG_STOLEN)
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 0b0aca7a25af..68f064f33d4b 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -133,9 +133,6 @@ int xe_display_init_early(struct xe_device *xe)
/* Fake uncore lock */
spin_lock_init(&xe->uncore.lock);
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(xe);
-
intel_display_driver_early_probe(display);
/* Early display init.. */
@@ -147,7 +144,7 @@ int xe_display_init_early(struct xe_device *xe)
*/
intel_dram_detect(xe);
- intel_bw_init_hw(xe);
+ intel_bw_init_hw(display);
intel_display_device_info_runtime_init(display);
@@ -173,7 +170,7 @@ static void xe_display_fini(void *arg)
struct xe_device *xe = arg;
struct intel_display *display = &xe->display;
- intel_hpd_poll_fini(xe);
+ intel_hpd_poll_fini(display);
intel_hdcp_component_fini(display);
intel_audio_deinit(display);
intel_display_driver_remove(display);
@@ -220,11 +217,13 @@ void xe_display_unregister(struct xe_device *xe)
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
if (master_ctl & DISPLAY_IRQ)
- gen11_display_irq_handler(xe);
+ gen11_display_irq_handler(display);
}
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
@@ -240,19 +239,23 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
void xe_display_irq_reset(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- gen11_display_irq_reset(xe);
+ gen11_display_irq_reset(display);
}
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
if (gt->info.id == XE_GT0)
- gen11_de_irq_postinstall(xe);
+ gen11_de_irq_postinstall(display);
}
static bool suspend_to_idle(void)
@@ -305,7 +308,7 @@ static void xe_display_enable_d3cold(struct xe_device *xe)
intel_dmc_suspend(display);
if (has_display(xe))
- intel_hpd_poll_enable(xe);
+ intel_hpd_poll_enable(display);
}
static void xe_display_disable_d3cold(struct xe_device *xe)
@@ -322,10 +325,10 @@ static void xe_display_disable_d3cold(struct xe_device *xe)
intel_display_driver_init_hw(display);
- intel_hpd_init(xe);
+ intel_hpd_init(display);
if (has_display(xe))
- intel_hpd_poll_disable(xe);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -355,7 +358,7 @@ void xe_display_pm_suspend(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
- intel_hpd_cancel_work(xe);
+ intel_hpd_cancel_work(display);
if (has_display(xe)) {
intel_display_driver_suspend_access(display);
@@ -385,7 +388,7 @@ void xe_display_pm_shutdown(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
intel_dp_mst_suspend(display);
- intel_hpd_cancel_work(xe);
+ intel_hpd_cancel_work(display);
if (has_display(xe))
intel_display_driver_suspend_access(display);
@@ -400,6 +403,8 @@ void xe_display_pm_shutdown(struct xe_device *xe)
void xe_display_pm_runtime_suspend(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
@@ -408,7 +413,7 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe)
return;
}
- intel_hpd_poll_enable(xe);
+ intel_hpd_poll_enable(display);
}
void xe_display_pm_suspend_late(struct xe_device *xe)
@@ -482,7 +487,7 @@ void xe_display_pm_resume(struct xe_device *xe)
if (has_display(xe))
intel_display_driver_resume_access(display);
- intel_hpd_init(xe);
+ intel_hpd_init(display);
if (has_display(xe)) {
intel_display_driver_resume(display);
@@ -491,7 +496,7 @@ void xe_display_pm_resume(struct xe_device *xe)
}
if (has_display(xe))
- intel_hpd_poll_disable(xe);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -502,6 +507,8 @@ void xe_display_pm_resume(struct xe_device *xe)
void xe_display_pm_runtime_resume(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
@@ -510,9 +517,9 @@ void xe_display_pm_runtime_resume(struct xe_device *xe)
return;
}
- intel_hpd_init(xe);
- intel_hpd_poll_disable(xe);
- skl_watermark_ipc_update(xe);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
+ skl_watermark_ipc_update(display);
}
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c
new file mode 100644
index 000000000000..1955153aadba
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "intel_display_rpm.h"
+#include "xe_device_types.h"
+#include "xe_pm.h"
+
+static struct xe_device *display_to_xe(struct intel_display *display)
+{
+ return container_of(display, struct xe_device, display);
+}
+
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+{
+ return intel_display_rpm_get(display);
+}
+
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_display_rpm_put(display, wakeref);
+}
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+{
+ return xe_pm_runtime_resume_and_get(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
+}
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
+{
+ return xe_pm_runtime_get_if_in_use(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
+}
+
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
+{
+ xe_pm_runtime_get_noresume(display_to_xe(display));
+
+ return INTEL_WAKEREF_DEF;
+}
+
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ if (wakeref)
+ xe_pm_runtime_put(display_to_xe(display));
+}
+
+void intel_display_rpm_put_unchecked(struct intel_display *display)
+{
+ xe_pm_runtime_put(display_to_xe(display));
+}
+
+bool intel_display_rpm_suspended(struct intel_display *display)
+{
+ struct xe_device *xe = display_to_xe(display);
+
+ return pm_runtime_suspended(xe->drm.dev);
+}
+
+void assert_display_rpm_held(struct intel_display *display)
+{
+ /* FIXME */
+}
+
+void intel_display_rpm_assert_block(struct intel_display *display)
+{
+ /* FIXME */
+}
+
+void intel_display_rpm_assert_unblock(struct intel_display *display)
+{
+ /* FIXME */
+}
diff --git a/drivers/gpu/drm/xe/display/xe_display_rps.c b/drivers/gpu/drm/xe/display/xe_display_rps.c
deleted file mode 100644
index fa616f9688a5..000000000000
--- a/drivers/gpu/drm/xe/display/xe_display_rps.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "intel_display_rps.h"
-
-void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
- struct dma_fence *fence)
-{
-}
-
-void intel_display_rps_mark_interactive(struct intel_display *display,
- struct intel_atomic_state *state,
- bool interactive)
-{
-}
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
index 68e3d1959ad6..2933ca97d673 100644
--- a/drivers/gpu/drm/xe/display/xe_display_wa.c
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -10,7 +10,9 @@
#include <generated/xe_wa_oob.h>
-bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915)
+bool intel_display_needs_wa_16023588340(struct intel_display *display)
{
- return XE_WA(xe_root_mmio_gt(i915), 16023588340);
+ struct xe_device *xe = to_xe_device(display->drm);
+
+ return XE_WA(xe_root_mmio_gt(xe), 16023588340);
}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 7c02323e9531..b35a6f201d4a 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -9,7 +9,6 @@
#include "abi/gsc_command_header_abi.h"
#include "intel_hdcp_gsc.h"
-#include "intel_hdcp_gsc_message.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_device_types.h"
@@ -22,7 +21,8 @@
#define HECI_MEADDRESS_HDCP 18
-struct intel_hdcp_gsc_message {
+struct intel_hdcp_gsc_context {
+ struct xe_device *xe;
struct xe_bo *hdcp_bo;
u64 hdcp_cmd_in;
u64 hdcp_cmd_out;
@@ -30,14 +30,9 @@ struct intel_hdcp_gsc_message {
#define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header)
-bool intel_hdcp_gsc_cs_required(struct intel_display *display)
+bool intel_hdcp_gsc_check_status(struct drm_device *drm)
{
- return DISPLAY_VER(display) >= 14;
-}
-
-bool intel_hdcp_gsc_check_status(struct intel_display *display)
-{
- struct xe_device *xe = to_xe_device(display->drm);
+ struct xe_device *xe = to_xe_device(drm);
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_gt *gt = tile->media_gt;
struct xe_gsc *gsc = &gt->uc.gsc;
@@ -69,10 +64,9 @@ out:
}
/*This function helps allocate memory for the command that we will send to gsc cs */
-static int intel_hdcp_gsc_initialize_message(struct intel_display *display,
- struct intel_hdcp_gsc_message *hdcp_message)
+static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
+ struct intel_hdcp_gsc_context *gsc_context)
{
- struct xe_device *xe = to_xe_device(display->drm);
struct xe_bo *bo = NULL;
u64 cmd_in, cmd_out;
int ret = 0;
@@ -84,7 +78,7 @@ static int intel_hdcp_gsc_initialize_message(struct intel_display *display,
XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
- drm_err(display->drm, "Failed to allocate bo for HDCP streaming command!\n");
+ drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
ret = PTR_ERR(bo);
goto out;
}
@@ -93,104 +87,60 @@ static int intel_hdcp_gsc_initialize_message(struct intel_display *display,
cmd_out = cmd_in + PAGE_SIZE;
xe_map_memset(xe, &bo->vmap, 0, 0, bo->size);
- hdcp_message->hdcp_bo = bo;
- hdcp_message->hdcp_cmd_in = cmd_in;
- hdcp_message->hdcp_cmd_out = cmd_out;
+ gsc_context->hdcp_bo = bo;
+ gsc_context->hdcp_cmd_in = cmd_in;
+ gsc_context->hdcp_cmd_out = cmd_out;
+ gsc_context->xe = xe;
+
out:
return ret;
}
-static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display)
+struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm)
{
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct xe_device *xe = to_xe_device(drm);
+ struct intel_hdcp_gsc_context *gsc_context;
int ret;
- hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
-
- if (!hdcp_message)
- return -ENOMEM;
+ gsc_context = kzalloc(sizeof(*gsc_context), GFP_KERNEL);
+ if (!gsc_context)
+ return ERR_PTR(-ENOMEM);
/*
* NOTE: No need to lock the comp mutex here as it is already
* going to be taken before this function called
*/
- ret = intel_hdcp_gsc_initialize_message(display, hdcp_message);
+ ret = intel_hdcp_gsc_initialize_message(xe, gsc_context);
if (ret) {
- drm_err(display->drm, "Could not initialize hdcp_message\n");
- kfree(hdcp_message);
- return ret;
+ drm_err(&xe->drm, "Could not initialize gsc_context\n");
+ kfree(gsc_context);
+ gsc_context = ERR_PTR(ret);
}
- display->hdcp.hdcp_message = hdcp_message;
- return ret;
-}
-
-static const struct i915_hdcp_ops gsc_hdcp_ops = {
- .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
- .verify_receiver_cert_prepare_km =
- intel_hdcp_gsc_verify_receiver_cert_prepare_km,
- .verify_hprime = intel_hdcp_gsc_verify_hprime,
- .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
- .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
- .verify_lprime = intel_hdcp_gsc_verify_lprime,
- .get_session_key = intel_hdcp_gsc_get_session_key,
- .repeater_check_flow_prepare_ack =
- intel_hdcp_gsc_repeater_check_flow_prepare_ack,
- .verify_mprime = intel_hdcp_gsc_verify_mprime,
- .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
- .close_hdcp_session = intel_hdcp_gsc_close_session,
-};
-
-int intel_hdcp_gsc_init(struct intel_display *display)
-{
- struct i915_hdcp_arbiter *data;
- int ret;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- mutex_lock(&display->hdcp.hdcp_mutex);
- display->hdcp.arbiter = data;
- display->hdcp.arbiter->hdcp_dev = display->drm->dev;
- display->hdcp.arbiter->ops = &gsc_hdcp_ops;
- ret = intel_hdcp_gsc_hdcp2_init(display);
- if (ret)
- kfree(data);
-
- mutex_unlock(&display->hdcp.hdcp_mutex);
-
- return ret;
+ return gsc_context;
}
-void intel_hdcp_gsc_fini(struct intel_display *display)
+void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context)
{
- struct intel_hdcp_gsc_message *hdcp_message =
- display->hdcp.hdcp_message;
- struct i915_hdcp_arbiter *arb = display->hdcp.arbiter;
-
- if (hdcp_message) {
- xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
- kfree(hdcp_message);
- display->hdcp.hdcp_message = NULL;
- }
+ if (!gsc_context)
+ return;
- kfree(arb);
- display->hdcp.arbiter = NULL;
+ xe_bo_unpin_map_no_vm(gsc_context->hdcp_bo);
+ kfree(gsc_context);
}
static int xe_gsc_send_sync(struct xe_device *xe,
- struct intel_hdcp_gsc_message *hdcp_message,
+ struct intel_hdcp_gsc_context *gsc_context,
u32 msg_size_in, u32 msg_size_out,
u32 addr_out_off)
{
- struct xe_gt *gt = hdcp_message->hdcp_bo->tile->media_gt;
- struct iosys_map *map = &hdcp_message->hdcp_bo->vmap;
+ struct xe_gt *gt = gsc_context->hdcp_bo->tile->media_gt;
+ struct iosys_map *map = &gsc_context->hdcp_bo->vmap;
struct xe_gsc *gsc = &gt->uc.gsc;
int ret;
- ret = xe_gsc_pkt_submit_kernel(gsc, hdcp_message->hdcp_cmd_in, msg_size_in,
- hdcp_message->hdcp_cmd_out, msg_size_out);
+ ret = xe_gsc_pkt_submit_kernel(gsc, gsc_context->hdcp_cmd_in, msg_size_in,
+ gsc_context->hdcp_cmd_out, msg_size_out);
if (ret) {
drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret);
return ret;
@@ -205,12 +155,12 @@ static int xe_gsc_send_sync(struct xe_device *xe,
return ret;
}
-ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len)
+ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len)
{
+ struct xe_device *xe = gsc_context->xe;
const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE;
- struct intel_hdcp_gsc_message *hdcp_message;
u64 host_session_id;
u32 msg_size_in, msg_size_out;
u32 addr_out_off, addr_in_wr_off = 0;
@@ -223,15 +173,14 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
- hdcp_message = xe->display.hdcp.hdcp_message;
addr_out_off = PAGE_SIZE;
host_session_id = xe_gsc_create_host_session_id();
xe_pm_runtime_get_noresume(xe);
- addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap,
+ addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap,
addr_in_wr_off, HECI_MEADDRESS_HDCP,
host_session_id, msg_in_len);
- xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off,
+ xe_map_memcpy_to(xe, &gsc_context->hdcp_bo->vmap, addr_in_wr_off,
msg_in, msg_in_len);
/*
* Keep sending request in case the pending bit is set no need to add
@@ -240,7 +189,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
* 20 times each message 50 ms apart
*/
do {
- ret = xe_gsc_send_sync(xe, hdcp_message, msg_size_in, msg_size_out,
+ ret = xe_gsc_send_sync(xe, gsc_context, msg_size_in, msg_size_out,
addr_out_off);
/* Only try again if gsc says so */
@@ -254,7 +203,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
if (ret)
goto out;
- xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap,
+ xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap,
addr_out_off + HDCP_GSC_HEADER_SIZE,
msg_out_len);
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 4ca0cb571194..6502b8274173 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -83,7 +83,7 @@ initial_plane_bo(struct xe_device *xe,
if (plane_config->size == 0)
return NULL;
- flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
+ flags = XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
diff --git a/drivers/gpu/drm/xe/instructions/xe_alu_commands.h b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h
new file mode 100644
index 000000000000..2987b10d3e16
--- /dev/null
+++ b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_ALU_COMMANDS_H_
+#define _XE_ALU_COMMANDS_H_
+
+#include "instructions/xe_instr_defs.h"
+
+/* Instruction Opcodes */
+#define CS_ALU_OPCODE_NOOP 0x000
+#define CS_ALU_OPCODE_FENCE_RD 0x001
+#define CS_ALU_OPCODE_FENCE_WR 0x002
+#define CS_ALU_OPCODE_LOAD 0x080
+#define CS_ALU_OPCODE_LOADINV 0x480
+#define CS_ALU_OPCODE_LOAD0 0x081
+#define CS_ALU_OPCODE_LOAD1 0x481
+#define CS_ALU_OPCODE_LOADIND 0x082
+#define CS_ALU_OPCODE_ADD 0x100
+#define CS_ALU_OPCODE_SUB 0x101
+#define CS_ALU_OPCODE_AND 0x102
+#define CS_ALU_OPCODE_OR 0x103
+#define CS_ALU_OPCODE_XOR 0x104
+#define CS_ALU_OPCODE_SHL 0x105
+#define CS_ALU_OPCODE_SHR 0x106
+#define CS_ALU_OPCODE_SAR 0x107
+#define CS_ALU_OPCODE_STORE 0x180
+#define CS_ALU_OPCODE_STOREINV 0x580
+#define CS_ALU_OPCODE_STOREIND 0x181
+
+/* Instruction Operands */
+#define CS_ALU_OPERAND_REG(n) REG_FIELD_PREP(GENMASK(3, 0), (n))
+#define CS_ALU_OPERAND_REG0 0x0
+#define CS_ALU_OPERAND_REG1 0x1
+#define CS_ALU_OPERAND_REG2 0x2
+#define CS_ALU_OPERAND_REG3 0x3
+#define CS_ALU_OPERAND_REG4 0x4
+#define CS_ALU_OPERAND_REG5 0x5
+#define CS_ALU_OPERAND_REG6 0x6
+#define CS_ALU_OPERAND_REG7 0x7
+#define CS_ALU_OPERAND_REG8 0x8
+#define CS_ALU_OPERAND_REG9 0x9
+#define CS_ALU_OPERAND_REG10 0xa
+#define CS_ALU_OPERAND_REG11 0xb
+#define CS_ALU_OPERAND_REG12 0xc
+#define CS_ALU_OPERAND_REG13 0xd
+#define CS_ALU_OPERAND_REG14 0xe
+#define CS_ALU_OPERAND_REG15 0xf
+#define CS_ALU_OPERAND_SRCA 0x20
+#define CS_ALU_OPERAND_SRCB 0x21
+#define CS_ALU_OPERAND_ACCU 0x31
+#define CS_ALU_OPERAND_ZF 0x32
+#define CS_ALU_OPERAND_CF 0x33
+#define CS_ALU_OPERAND_NA 0 /* N/A operand */
+
+/* Command Streamer ALU Instructions */
+#define CS_ALU_INSTR(opcode, op1, op2) (REG_FIELD_PREP(GENMASK(31, 20), (opcode)) | \
+ REG_FIELD_PREP(GENMASK(19, 10), (op1)) | \
+ REG_FIELD_PREP(GENMASK(9, 0), (op2)))
+
+#define __CS_ALU_INSTR(opcode, op1, op2) CS_ALU_INSTR(CS_ALU_OPCODE_##opcode, \
+ CS_ALU_OPERAND_##op1, \
+ CS_ALU_OPERAND_##op2)
+
+#define CS_ALU_INSTR_NOOP __CS_ALU_INSTR(NOOP, NA, NA)
+#define CS_ALU_INSTR_LOAD(op1, op2) __CS_ALU_INSTR(LOAD, op1, op2)
+#define CS_ALU_INSTR_LOADINV(op1, op2) __CS_ALU_INSTR(LOADINV, op1, op2)
+#define CS_ALU_INSTR_LOAD0(op1) __CS_ALU_INSTR(LOAD0, op1, NA)
+#define CS_ALU_INSTR_LOAD1(op1) __CS_ALU_INSTR(LOAD1, op1, NA)
+#define CS_ALU_INSTR_ADD __CS_ALU_INSTR(ADD, NA, NA)
+#define CS_ALU_INSTR_SUB __CS_ALU_INSTR(SUB, NA, NA)
+#define CS_ALU_INSTR_AND __CS_ALU_INSTR(AND, NA, NA)
+#define CS_ALU_INSTR_OR __CS_ALU_INSTR(OR, NA, NA)
+#define CS_ALU_INSTR_XOR __CS_ALU_INSTR(XOR, NA, NA)
+#define CS_ALU_INSTR_STORE(op1, op2) __CS_ALU_INSTR(STORE, op1, op2)
+#define CS_ALU_INSTR_STOREINV(op1, op2) __CS_ALU_INSTR(STOREINV, op1, op2)
+
+#endif
diff --git a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
index 31d28a67ef6a..457881af8af9 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
@@ -137,6 +137,7 @@
#define CMD_3DSTATE_CLIP_MESH GFXPIPE_3D_CMD(0x0, 0x81)
#define CMD_3DSTATE_SBE_MESH GFXPIPE_3D_CMD(0x0, 0x82)
#define CMD_3DSTATE_CPSIZE_CONTROL_BUFFER GFXPIPE_3D_CMD(0x0, 0x83)
+#define CMD_3DSTATE_COARSE_PIXEL GFXPIPE_3D_CMD(0x0, 0x89)
#define CMD_3DSTATE_DRAWING_RECTANGLE GFXPIPE_3D_CMD(0x1, 0x0)
#define CMD_3DSTATE_CHROMA_KEY GFXPIPE_3D_CMD(0x1, 0x4)
diff --git a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
index a255946b6f77..8cfcd3360896 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
@@ -41,6 +41,7 @@
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
+#define PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE BIT(10) /* gen12 */
#define PIPE_CONTROL0_HDC_PIPELINE_FLUSH BIT(9) /* gen12 */
#define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29)
diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
index 167fb0f742de..e3f5e8bb3ebc 100644
--- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
@@ -32,6 +32,7 @@
#define MI_BATCH_BUFFER_END __MI_INSTR(0xA)
#define MI_TOPOLOGY_FILTER __MI_INSTR(0xD)
#define MI_FORCE_WAKEUP __MI_INSTR(0x1D)
+#define MI_MATH(n) (__MI_INSTR(0x1A) | XE_INSTR_NUM_DW((n) + 1))
#define MI_STORE_DATA_IMM __MI_INSTR(0x20)
#define MI_SDI_GGTT REG_BIT(22)
@@ -47,6 +48,10 @@
#define MI_LRI_FORCE_POSTED REG_BIT(12)
#define MI_LRI_LEN(x) (((x) & 0xff) + 1)
+#define MI_STORE_REGISTER_MEM (__MI_INSTR(0x24) | XE_INSTR_NUM_DW(4))
+#define MI_SRM_USE_GGTT REG_BIT(22)
+#define MI_SRM_ADD_CS_OFFSET REG_BIT(19)
+
#define MI_FLUSH_DW __MI_INSTR(0x26)
#define MI_FLUSH_DW_PROTECTED_MEM_EN REG_BIT(22)
#define MI_FLUSH_DW_STORE_INDEX REG_BIT(21)
@@ -61,6 +66,10 @@
#define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4))
#define MI_LRM_USE_GGTT REG_BIT(22)
+#define MI_LOAD_REGISTER_REG (__MI_INSTR(0x2a) | XE_INSTR_NUM_DW(3))
+#define MI_LRR_DST_CS_MMIO REG_BIT(19)
+#define MI_LRR_SRC_CS_MMIO REG_BIT(18)
+
#define MI_COPY_MEM_MEM (__MI_INSTR(0x2e) | XE_INSTR_NUM_DW(5))
#define MI_COPY_MEM_MEM_SRC_GGTT REG_BIT(22)
#define MI_COPY_MEM_MEM_DST_GGTT REG_BIT(21)
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index fb8ec317b6ee..7ade41e2b7b3 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -43,6 +43,10 @@
#define XEHPC_BCS8_RING_BASE 0x3ee000
#define GSCCS_RING_BASE 0x11a000
+#define ENGINE_ID(base) XE_REG((base) + 0x8c)
+#define ENGINE_INSTANCE_ID REG_GENMASK(9, 4)
+#define ENGINE_CLASS_ID REG_GENMASK(2, 0)
+
#define RING_TAIL(base) XE_REG((base) + 0x30)
#define TAIL_ADDR REG_GENMASK(20, 3)
@@ -154,6 +158,7 @@
#define STOP_RING REG_BIT(8)
#define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8)
+#define RING_CTX_TIMESTAMP_UDW(base) XE_REG((base) + 0x3ac)
#define CSBE_DEBUG_STATUS(base) XE_REG((base) + 0x3fc)
#define RING_FORCE_TO_NONPRIV(base, i) XE_REG(((base) + 0x4d0) + (i) * 4)
@@ -188,6 +193,10 @@
#define PREEMPT_GPGPU_LEVEL_MASK PREEMPT_GPGPU_LEVEL(1, 1)
#define PREEMPT_3D_OBJECT_LEVEL REG_BIT(0)
+#define CS_GPR_DATA(base, n) XE_REG((base) + 0x600 + (n) * 4)
+#define CS_GPR_REG(base, n) CS_GPR_DATA((base), (n) * 2)
+#define CS_GPR_REG_UDW(base, n) CS_GPR_DATA((base), (n) * 2 + 1)
+
#define VDBOX_CGCTL3F08(base) XE_REG((base) + 0x3f08)
#define CG3DDISHRS_CLKGATE_DIS REG_BIT(5)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index da1f198ac107..5cd5ab8529c5 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -62,7 +62,6 @@
#define LE_SSE_MASK REG_GENMASK(18, 17)
#define LE_SSE(value) REG_FIELD_PREP(LE_SSE_MASK, value)
#define LE_COS_MASK REG_GENMASK(16, 15)
-#define LE_COS(value) REG_FIELD_PREP(LE_COS_MASK)
#define LE_SCF_MASK REG_BIT(14)
#define LE_SCF(value) REG_FIELD_PREP(LE_SCF_MASK, value)
#define LE_PFM_MASK REG_GENMASK(13, 11)
@@ -157,6 +156,7 @@
#define XEHPG_SC_INSTDONE_EXTRA2 XE_REG_MCR(0x7108)
#define COMMON_SLICE_CHICKEN4 XE_REG(0x7300, XE_REG_OPTION_MASKED)
+#define SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE REG_BIT(12)
#define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6)
#define COMMON_SLICE_CHICKEN3 XE_REG(0x7304, XE_REG_OPTION_MASKED)
@@ -392,6 +392,18 @@
#define XEHP_L3NODEARBCFG XE_REG_MCR(0xb0b4)
#define XEHP_LNESPARE REG_BIT(19)
+#define LSN_VC_REG2 XE_REG_MCR(0xb0c8)
+#define LSN_LNI_WGT_MASK REG_GENMASK(31, 28)
+#define LSN_LNI_WGT(value) REG_FIELD_PREP(LSN_LNI_WGT_MASK, value)
+#define LSN_LNE_WGT_MASK REG_GENMASK(27, 24)
+#define LSN_LNE_WGT(value) REG_FIELD_PREP(LSN_LNE_WGT_MASK, value)
+#define LSN_DIM_X_WGT_MASK REG_GENMASK(23, 20)
+#define LSN_DIM_X_WGT(value) REG_FIELD_PREP(LSN_DIM_X_WGT_MASK, value)
+#define LSN_DIM_Y_WGT_MASK REG_GENMASK(19, 16)
+#define LSN_DIM_Y_WGT(value) REG_FIELD_PREP(LSN_DIM_Y_WGT_MASK, value)
+#define LSN_DIM_Z_WGT_MASK REG_GENMASK(15, 12)
+#define LSN_DIM_Z_WGT(value) REG_FIELD_PREP(LSN_DIM_Z_WGT_MASK, value)
+
#define L3SQCREG2 XE_REG_MCR(0xb104)
#define COMPMEMRD256BOVRFETCHEN REG_BIT(20)
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 57944f90bbf6..994af591a2e8 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -11,7 +11,9 @@
#define CTX_RING_TAIL (0x06 + 1)
#define CTX_RING_START (0x08 + 1)
#define CTX_RING_CTL (0x0a + 1)
+#define CTX_BB_PER_CTX_PTR (0x12 + 1)
#define CTX_TIMESTAMP (0x22 + 1)
+#define CTX_TIMESTAMP_UDW (0x24 + 1)
#define CTX_INDIRECT_RING_STATE (0x26 + 1)
#define CTX_PDP0_UDW (0x30 + 1)
#define CTX_PDP0_LDW (0x32 + 1)
diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
index 8846eb9ce2a4..c7d5d782e3f9 100644
--- a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
@@ -21,6 +21,9 @@
#define BMG_PACKAGE_POWER_SKU XE_REG(0x138098)
#define BMG_PACKAGE_POWER_SKU_UNIT XE_REG(0x1380dc)
#define BMG_PACKAGE_ENERGY_STATUS XE_REG(0x138120)
+#define BMG_FAN_1_SPEED XE_REG(0x138140)
+#define BMG_FAN_2_SPEED XE_REG(0x138170)
+#define BMG_FAN_3_SPEED XE_REG(0x1381a0)
#define BMG_VRAM_TEMPERATURE XE_REG(0x1382c0)
#define BMG_PACKAGE_TEMPERATURE XE_REG(0x138434)
#define BMG_PACKAGE_RAPL_LIMIT XE_REG(0x138440)
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 9fde67ca989f..378dcd0fb414 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -60,7 +60,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
}
/* Evict to system. CCS data should be copied. */
- ret = xe_bo_evict(bo, true);
+ ret = xe_bo_evict(bo);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return ret;
@@ -252,7 +252,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
for_each_gt(__gt, xe, id)
xe_gt_sanitize(__gt);
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
/*
* Snapshotting the CTB and copying back a potentially old
* version seems risky, depending on what might have been
@@ -273,7 +273,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
goto cleanup_all;
}
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err) {
KUNIT_FAIL(test, "restore user err=%pe\n", ERR_PTR(err));
goto cleanup_all;
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index cedd3e88a6fb..c53f67ce4b0a 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -65,7 +65,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
* the exporter and the importer should be the same bo.
*/
swap(exported->ttm.base.dma_buf, dmabuf);
- ret = xe_bo_evict(exported, true);
+ ret = xe_bo_evict(exported);
swap(exported->ttm.base.dma_buf, dmabuf);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index d5fe0ea889ad..4a65e3103f77 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -202,8 +202,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
@@ -211,8 +210,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -222,8 +220,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
PTR_ERR(tiny));
@@ -512,7 +509,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
dma_fence_put(fence);
kunit_info(test, "Evict vram buffer object\n");
- ret = xe_bo_evict(vram_bo, true);
+ ret = xe_bo_evict(vram_bo);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return;
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index ef1e5256c56a..0e502feaca81 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -46,8 +46,11 @@ static void read_l3cc_table(struct xe_gt *gt,
unsigned int fw_ref, i;
u32 reg_val;
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n");
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
+ }
for (i = 0; i < info->num_mocs_regs; i++) {
if (!(i & 1)) {
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 64f9c936eea0..d99d91fe8aa9 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -55,6 +55,8 @@ static struct ttm_placement sys_placement = {
.placement = &sys_placement_flags,
};
+static struct ttm_placement purge_placement;
+
static const struct ttm_place tt_placement_flags[] = {
{
.fpfn = 0,
@@ -189,11 +191,18 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
static bool force_contiguous(u32 bo_flags)
{
+ if (bo_flags & XE_BO_FLAG_STOLEN)
+ return true; /* users expect this */
+ else if (bo_flags & XE_BO_FLAG_PINNED &&
+ !(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE))
+ return true; /* needs vmap */
+
/*
* For eviction / restore on suspend / resume objects pinned in VRAM
* must be contiguous, also only contiguous BOs support xe_bo_vmap.
*/
- return bo_flags & (XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+ return bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS &&
+ bo_flags & XE_BO_FLAG_PINNED;
}
static void add_vram(struct xe_device *xe, struct xe_bo *bo,
@@ -281,6 +290,8 @@ int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
static void xe_evict_flags(struct ttm_buffer_object *tbo,
struct ttm_placement *placement)
{
+ struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm);
+ bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
struct xe_bo *bo;
if (!xe_bo_is_xe_bo(tbo)) {
@@ -290,7 +301,7 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
return;
}
- *placement = sys_placement;
+ *placement = device_unplugged ? purge_placement : sys_placement;
return;
}
@@ -300,6 +311,11 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
return;
}
+ if (device_unplugged && !tbo->base.dma_buf) {
+ *placement = purge_placement;
+ return;
+ }
+
/*
* For xe, sg bos that are evicted to system just triggers a
* rebind of the sg list upon subsequent validation to XE_PL_TT.
@@ -657,11 +673,20 @@ static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
ttm);
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
struct sg_table *sg;
xe_assert(xe, attach);
xe_assert(xe, ttm_bo->ttm);
+ if (device_unplugged && new_res->mem_type == XE_PL_SYSTEM &&
+ ttm_bo->sg) {
+ dma_resv_wait_timeout(ttm_bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
+ ttm_bo->sg = NULL;
+ }
+
if (new_res->mem_type == XE_PL_SYSTEM)
goto out;
@@ -898,79 +923,44 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
xe_pm_runtime_get_noresume(xe);
}
- if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
- /*
- * Kernel memory that is pinned should only be moved on suspend
- * / resume, some of the pinned memory is required for the
- * device to resume / use the GPU to move other evicted memory
- * (user memory) around. This likely could be optimized a bit
- * further where we find the minimum set of pinned memory
- * required for resume but for simplity doing a memcpy for all
- * pinned memory.
- */
- ret = xe_bo_vmap(bo);
- if (!ret) {
- ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);
-
- /* Create a new VMAP once kernel BO back in VRAM */
- if (!ret && resource_is_vram(new_mem)) {
- struct xe_vram_region *vram = res_to_mem_region(new_mem);
- void __iomem *new_addr = vram->mapping +
- (new_mem->start << PAGE_SHIFT);
+ if (move_lacks_source) {
+ u32 flags = 0;
- if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
- ret = -EINVAL;
- xe_pm_runtime_put(xe);
- goto out;
- }
+ if (mem_type_is_vram(new_mem->mem_type))
+ flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
+ else if (handle_system_ccs)
+ flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
- xe_assert(xe, new_mem->start ==
- bo->placements->fpfn);
-
- iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
- }
- }
+ fence = xe_migrate_clear(migrate, bo, new_mem, flags);
} else {
- if (move_lacks_source) {
- u32 flags = 0;
-
- if (mem_type_is_vram(new_mem->mem_type))
- flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
- else if (handle_system_ccs)
- flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
-
- fence = xe_migrate_clear(migrate, bo, new_mem, flags);
- }
- else
- fence = xe_migrate_copy(migrate, bo, bo, old_mem,
- new_mem, handle_system_ccs);
- if (IS_ERR(fence)) {
- ret = PTR_ERR(fence);
- xe_pm_runtime_put(xe);
- goto out;
- }
- if (!move_lacks_source) {
- ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict,
- true, new_mem);
- if (ret) {
- dma_fence_wait(fence, false);
- ttm_bo_move_null(ttm_bo, new_mem);
- ret = 0;
- }
- } else {
- /*
- * ttm_bo_move_accel_cleanup() may blow up if
- * bo->resource == NULL, so just attach the
- * fence and set the new resource.
- */
- dma_resv_add_fence(ttm_bo->base.resv, fence,
- DMA_RESV_USAGE_KERNEL);
+ fence = xe_migrate_copy(migrate, bo, bo, old_mem, new_mem,
+ handle_system_ccs);
+ }
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ xe_pm_runtime_put(xe);
+ goto out;
+ }
+ if (!move_lacks_source) {
+ ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true,
+ new_mem);
+ if (ret) {
+ dma_fence_wait(fence, false);
ttm_bo_move_null(ttm_bo, new_mem);
+ ret = 0;
}
-
- dma_fence_put(fence);
+ } else {
+ /*
+ * ttm_bo_move_accel_cleanup() may blow up if
+ * bo->resource == NULL, so just attach the
+ * fence and set the new resource.
+ */
+ dma_resv_add_fence(ttm_bo->base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ ttm_bo_move_null(ttm_bo, new_mem);
}
+ dma_fence_put(fence);
xe_pm_runtime_put(xe);
out:
@@ -1095,6 +1085,80 @@ out_unref:
}
/**
+ * xe_bo_notifier_prepare_pinned() - Prepare a pinned VRAM object to be backed
+ * up in system memory.
+ * @bo: The buffer object to prepare.
+ *
+ * On successful completion, the object backup pages are allocated. Expectation
+ * is that this is called from the PM notifier, prior to suspend/hibernation.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
+{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup;
+ int ret = 0;
+
+ xe_bo_lock(bo, false);
+
+ xe_assert(xe, !bo->backup_obj);
+
+ /*
+ * Since this is called from the PM notifier we might have raced with
+ * someone unpinning this after we dropped the pinned list lock and
+ * grabbing the above bo lock.
+ */
+ if (!xe_bo_is_pinned(bo))
+ goto out_unlock_bo;
+
+ if (!xe_bo_is_vram(bo))
+ goto out_unlock_bo;
+
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ goto out_unlock_bo;
+
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
+ if (IS_ERR(backup)) {
+ ret = PTR_ERR(backup);
+ goto out_unlock_bo;
+ }
+
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ ttm_bo_pin(&backup->ttm);
+ bo->backup_obj = backup;
+
+out_unlock_bo:
+ xe_bo_unlock(bo);
+ return ret;
+}
+
+/**
+ * xe_bo_notifier_unprepare_pinned() - Undo the previous prepare operation.
+ * @bo: The buffer object to undo the prepare for.
+ *
+ * Always returns 0. The backup object is removed, if still present. Expectation
+ * it that this called from the PM notifier when undoing the prepare step.
+ *
+ * Return: Always returns 0.
+ */
+int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
+{
+ xe_bo_lock(bo, false);
+ if (bo->backup_obj) {
+ ttm_bo_unpin(&bo->backup_obj->ttm);
+ xe_bo_put(bo->backup_obj);
+ bo->backup_obj = NULL;
+ }
+ xe_bo_unlock(bo);
+
+ return 0;
+}
+
+/**
* xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
* @bo: The buffer object to move.
*
@@ -1107,59 +1171,99 @@ out_unref:
*/
int xe_bo_evict_pinned(struct xe_bo *bo)
{
- struct ttm_place place = {
- .mem_type = XE_PL_TT,
- };
- struct ttm_placement placement = {
- .placement = &place,
- .num_placement = 1,
- };
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .gfp_retry_mayfail = true,
- };
- struct ttm_resource *new_mem;
- int ret;
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup = bo->backup_obj;
+ bool backup_created = false;
+ bool unmap = false;
+ int ret = 0;
- xe_bo_assert_held(bo);
+ xe_bo_lock(bo, false);
- if (WARN_ON(!bo->ttm.resource))
- return -EINVAL;
+ if (WARN_ON(!bo->ttm.resource)) {
+ ret = -EINVAL;
+ goto out_unlock_bo;
+ }
- if (WARN_ON(!xe_bo_is_pinned(bo)))
- return -EINVAL;
+ if (WARN_ON(!xe_bo_is_pinned(bo))) {
+ ret = -EINVAL;
+ goto out_unlock_bo;
+ }
if (!xe_bo_is_vram(bo))
- return 0;
+ goto out_unlock_bo;
+
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ goto out_unlock_bo;
+
+ if (!backup) {
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
+ if (IS_ERR(backup)) {
+ ret = PTR_ERR(backup);
+ goto out_unlock_bo;
+ }
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ backup_created = true;
+ }
- ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
- if (ret)
- return ret;
+ if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
+ struct xe_migrate *migrate;
+ struct dma_fence *fence;
+
+ if (bo->tile)
+ migrate = bo->tile->migrate;
+ else
+ migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
+
+ ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
+ if (ret)
+ goto out_backup;
+
+ ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
+ if (ret)
+ goto out_backup;
- if (!bo->ttm.ttm) {
- bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0);
- if (!bo->ttm.ttm) {
- ret = -ENOMEM;
- goto err_res_free;
+ fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource,
+ backup->ttm.resource, false);
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto out_backup;
}
- }
- ret = ttm_bo_populate(&bo->ttm, &ctx);
- if (ret)
- goto err_res_free;
+ dma_resv_add_fence(bo->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_resv_add_fence(backup->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
+ } else {
+ ret = xe_bo_vmap(backup);
+ if (ret)
+ goto out_backup;
- ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
- if (ret)
- goto err_res_free;
+ if (iosys_map_is_null(&bo->vmap)) {
+ ret = xe_bo_vmap(bo);
+ if (ret)
+ goto out_backup;
+ unmap = true;
+ }
- ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
- if (ret)
- goto err_res_free;
+ xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0,
+ bo->size);
+ }
- return 0;
+ if (!bo->backup_obj)
+ bo->backup_obj = backup;
-err_res_free:
- ttm_resource_free(&bo->ttm, &new_mem);
+out_backup:
+ xe_bo_vunmap(backup);
+ if (ret && backup_created)
+ xe_bo_put(backup);
+out_unlock_bo:
+ if (unmap)
+ xe_bo_vunmap(bo);
+ xe_bo_unlock(bo);
return ret;
}
@@ -1180,50 +1284,109 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
.interruptible = false,
.gfp_retry_mayfail = false,
};
- struct ttm_resource *new_mem;
- struct ttm_place *place = &bo->placements[0];
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup = bo->backup_obj;
+ bool unmap = false;
int ret;
- xe_bo_assert_held(bo);
+ if (!backup)
+ return 0;
- if (WARN_ON(!bo->ttm.resource))
- return -EINVAL;
+ xe_bo_lock(bo, false);
- if (WARN_ON(!xe_bo_is_pinned(bo)))
- return -EINVAL;
+ if (!xe_bo_is_pinned(backup)) {
+ ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx);
+ if (ret)
+ goto out_unlock_bo;
+ }
- if (WARN_ON(xe_bo_is_vram(bo)))
- return -EINVAL;
+ if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
+ struct xe_migrate *migrate;
+ struct dma_fence *fence;
- if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo)))
- return -EINVAL;
+ if (bo->tile)
+ migrate = bo->tile->migrate;
+ else
+ migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
- if (!mem_type_is_vram(place->mem_type))
- return 0;
+ ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
+ if (ret)
+ goto out_unlock_bo;
- ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
- if (ret)
- return ret;
+ ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
+ if (ret)
+ goto out_unlock_bo;
- ret = ttm_bo_populate(&bo->ttm, &ctx);
- if (ret)
- goto err_res_free;
+ fence = xe_migrate_copy(migrate, backup, bo,
+ backup->ttm.resource, bo->ttm.resource,
+ false);
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto out_unlock_bo;
+ }
- ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
- if (ret)
- goto err_res_free;
+ dma_resv_add_fence(bo->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_resv_add_fence(backup->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
+ } else {
+ ret = xe_bo_vmap(backup);
+ if (ret)
+ goto out_unlock_bo;
- ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
- if (ret)
- goto err_res_free;
+ if (iosys_map_is_null(&bo->vmap)) {
+ ret = xe_bo_vmap(bo);
+ if (ret)
+ goto out_backup;
+ unmap = true;
+ }
- return 0;
+ xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr,
+ bo->size);
+ }
+
+ bo->backup_obj = NULL;
-err_res_free:
- ttm_resource_free(&bo->ttm, &new_mem);
+out_backup:
+ xe_bo_vunmap(backup);
+ if (!bo->backup_obj) {
+ if (xe_bo_is_pinned(backup))
+ ttm_bo_unpin(&backup->ttm);
+ xe_bo_put(backup);
+ }
+out_unlock_bo:
+ if (unmap)
+ xe_bo_vunmap(bo);
+ xe_bo_unlock(bo);
return ret;
}
+int xe_bo_dma_unmap_pinned(struct xe_bo *bo)
+{
+ struct ttm_buffer_object *ttm_bo = &bo->ttm;
+ struct ttm_tt *tt = ttm_bo->ttm;
+
+ if (tt) {
+ struct xe_ttm_tt *xe_tt = container_of(tt, typeof(*xe_tt), ttm);
+
+ if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
+ dma_buf_unmap_attachment(ttm_bo->base.import_attach,
+ ttm_bo->sg,
+ DMA_BIDIRECTIONAL);
+ ttm_bo->sg = NULL;
+ xe_tt->sg = NULL;
+ } else if (xe_tt->sg) {
+ dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
+ DMA_BIDIRECTIONAL, 0);
+ sg_free_table(xe_tt->sg);
+ xe_tt->sg = NULL;
+ }
+ }
+
+ return 0;
+}
+
static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
unsigned long page_offset)
{
@@ -1371,6 +1534,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
struct xe_res_cursor cursor;
struct xe_vram_region *vram;
int bytes_left = len;
+ int err = 0;
xe_bo_assert_held(bo);
xe_device_assert_mem_access(xe);
@@ -1378,9 +1542,14 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
if (!mem_type_is_vram(ttm_bo->resource->mem_type))
return -EIO;
- /* FIXME: Use GPU for non-visible VRAM */
- if (!xe_ttm_resource_visible(ttm_bo->resource))
- return -EIO;
+ if (!xe_ttm_resource_visible(ttm_bo->resource) || len >= SZ_16K) {
+ struct xe_migrate *migrate =
+ mem_type_to_migrate(xe, ttm_bo->resource->mem_type);
+
+ err = xe_migrate_access_memory(migrate, bo, offset, buf, len,
+ write);
+ goto out;
+ }
vram = res_to_mem_region(ttm_bo->resource);
xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
@@ -1404,7 +1573,8 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
xe_res_next(&cursor, PAGE_SIZE);
} while (bytes_left);
- return len;
+out:
+ return err ?: len;
}
const struct ttm_device_funcs xe_ttm_funcs = {
@@ -1448,6 +1618,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
if (bo->vm && xe_bo_is_user(bo))
xe_vm_put(bo->vm);
+ if (bo->parent_obj)
+ xe_bo_put(bo->parent_obj);
+
mutex_lock(&xe->mem_access.vram_userfault.lock);
if (!list_empty(&bo->vram_userfault_link))
list_del(&bo->vram_userfault_link);
@@ -1947,7 +2120,7 @@ struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
flags |= XE_BO_FLAG_GGTT;
bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
- flags | XE_BO_FLAG_NEEDS_CPU_ACCESS,
+ flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
alignment);
if (IS_ERR(bo))
return bo;
@@ -2049,7 +2222,8 @@ int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, str
struct xe_bo *bo;
u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
- dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE;
+ dst_flags |= (*src)->flags & (XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
xe_assert(xe, IS_DGFX(xe));
xe_assert(xe, !(*src)->vmap.is_iomem);
@@ -2073,10 +2247,16 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
{
struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
- if (res->mem_type == XE_PL_STOLEN)
+ switch (res->mem_type) {
+ case XE_PL_STOLEN:
return xe_ttm_stolen_gpu_offset(xe);
-
- return res_to_mem_region(res)->dpa_base;
+ case XE_PL_TT:
+ case XE_PL_SYSTEM:
+ return 0;
+ default:
+ return res_to_mem_region(res)->dpa_base;
+ }
+ return 0;
}
/**
@@ -2102,12 +2282,9 @@ int xe_bo_pin_external(struct xe_bo *bo)
if (err)
return err;
- if (xe_bo_is_vram(bo)) {
- spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- }
+ spin_lock(&xe->pinned.lock);
+ list_add_tail(&bo->pinned_link, &xe->pinned.late.external);
+ spin_unlock(&xe->pinned.lock);
}
ttm_bo_pin(&bo->ttm);
@@ -2149,25 +2326,12 @@ int xe_bo_pin(struct xe_bo *bo)
if (err)
return err;
- /*
- * For pinned objects in on DGFX, which are also in vram, we expect
- * these to be in contiguous VRAM memory. Required eviction / restore
- * during suspend / resume (force restore to same physical address).
- */
- if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- if (mem_type_is_vram(place->mem_type)) {
- xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
-
- place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
- vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
- place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
- }
- }
-
if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
+ if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)
+ list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present);
+ else
+ list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present);
spin_unlock(&xe->pinned.lock);
}
@@ -2231,6 +2395,13 @@ void xe_bo_unpin(struct xe_bo *bo)
xe_assert(xe, !list_empty(&bo->pinned_link));
list_del_init(&bo->pinned_link);
spin_unlock(&xe->pinned.lock);
+
+ if (bo->backup_obj) {
+ if (xe_bo_is_pinned(bo->backup_obj))
+ ttm_bo_unpin(&bo->backup_obj->ttm);
+ xe_bo_put(bo->backup_obj);
+ bo->backup_obj = NULL;
+ }
}
ttm_bo_unpin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
@@ -2398,7 +2569,7 @@ static int gem_create_user_ext_set_property(struct xe_device *xe,
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -2435,7 +2606,7 @@ static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo,
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -2759,19 +2930,17 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
/**
* xe_bo_evict - Evict an object to evict placement
* @bo: The buffer object to migrate.
- * @force_alloc: Set force_alloc in ttm_operation_ctx
*
* On successful completion, the object memory will be moved to evict
* placement. This function blocks until the object has been fully moved.
*
* Return: 0 on success. Negative error code on failure.
*/
-int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
+int xe_bo_evict(struct xe_bo *bo)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
- .force_alloc = force_alloc,
.gfp_retry_mayfail = true,
};
struct ttm_placement placement;
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index ec3e4446d027..02ada1fb8a23 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -39,20 +39,23 @@
#define XE_BO_FLAG_NEEDS_64K BIT(15)
#define XE_BO_FLAG_NEEDS_2M BIT(16)
#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
-#define XE_BO_FLAG_GGTT0 BIT(18)
-#define XE_BO_FLAG_GGTT1 BIT(19)
-#define XE_BO_FLAG_GGTT2 BIT(20)
-#define XE_BO_FLAG_GGTT3 BIT(21)
-#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
- XE_BO_FLAG_GGTT1 | \
- XE_BO_FLAG_GGTT2 | \
- XE_BO_FLAG_GGTT3)
-#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(22)
+#define XE_BO_FLAG_PINNED_NORESTORE BIT(18)
+#define XE_BO_FLAG_PINNED_LATE_RESTORE BIT(19)
+#define XE_BO_FLAG_GGTT0 BIT(20)
+#define XE_BO_FLAG_GGTT1 BIT(21)
+#define XE_BO_FLAG_GGTT2 BIT(22)
+#define XE_BO_FLAG_GGTT3 BIT(23)
+#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(24)
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_FLAG_INTERNAL_64K BIT(31)
+#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
+ XE_BO_FLAG_GGTT1 | \
+ XE_BO_FLAG_GGTT2 | \
+ XE_BO_FLAG_GGTT3)
+
#define XE_BO_FLAG_GGTTx(tile) \
(XE_BO_FLAG_GGTT0 << (tile)->id)
@@ -271,11 +274,15 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res);
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
-int xe_bo_evict(struct xe_bo *bo, bool force_alloc);
+int xe_bo_evict(struct xe_bo *bo);
int xe_bo_evict_pinned(struct xe_bo *bo);
+int xe_bo_notifier_prepare_pinned(struct xe_bo *bo);
+int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo);
int xe_bo_restore_pinned(struct xe_bo *bo);
+int xe_bo_dma_unmap_pinned(struct xe_bo *bo);
+
extern const struct ttm_device_funcs xe_ttm_funcs;
extern const char *const xe_mem_type_to_name[];
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 6a40eedd9db1..ed3746d32b27 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -10,28 +10,103 @@
#include "xe_ggtt.h"
#include "xe_tile.h"
+typedef int (*xe_pinned_fn)(struct xe_bo *bo);
+
+static int xe_bo_apply_to_pinned(struct xe_device *xe,
+ struct list_head *pinned_list,
+ struct list_head *new_list,
+ const xe_pinned_fn pinned_fn)
+{
+ LIST_HEAD(still_in_list);
+ struct xe_bo *bo;
+ int ret = 0;
+
+ spin_lock(&xe->pinned.lock);
+ while (!ret) {
+ bo = list_first_entry_or_null(pinned_list, typeof(*bo),
+ pinned_link);
+ if (!bo)
+ break;
+ xe_bo_get(bo);
+ list_move_tail(&bo->pinned_link, &still_in_list);
+ spin_unlock(&xe->pinned.lock);
+
+ ret = pinned_fn(bo);
+ if (ret && pinned_list != new_list) {
+ spin_lock(&xe->pinned.lock);
+ /*
+ * We might no longer be pinned, since PM notifier can
+ * call this. If the pinned link is now empty, keep it
+ * that way.
+ */
+ if (!list_empty(&bo->pinned_link))
+ list_move(&bo->pinned_link, pinned_list);
+ spin_unlock(&xe->pinned.lock);
+ }
+ xe_bo_put(bo);
+ spin_lock(&xe->pinned.lock);
+ }
+ list_splice_tail(&still_in_list, new_list);
+ spin_unlock(&xe->pinned.lock);
+
+ return ret;
+}
+
/**
- * xe_bo_evict_all - evict all BOs from VRAM
+ * xe_bo_notifier_prepare_all_pinned() - Pre-allocate the backing pages for all
+ * pinned VRAM objects which need to be saved.
+ * @xe: xe device
+ *
+ * Should be called from PM notifier when preparing for s3/s4.
*
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe)
+{
+ int ret;
+
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_notifier_prepare_pinned);
+ if (!ret)
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_notifier_prepare_pinned);
+
+ return ret;
+}
+
+/**
+ * xe_bo_notifier_unprepare_all_pinned() - Remove the backing pages for all
+ * pinned VRAM objects which have been restored.
* @xe: xe device
*
- * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
- * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
- * All eviction magic done via TTM calls.
+ * Should be called from PM notifier after exiting s3/s4 (either on success or
+ * failure).
+ */
+void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe)
+{
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_notifier_unprepare_pinned);
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_notifier_unprepare_pinned);
+}
+
+/**
+ * xe_bo_evict_all_user - evict all non-pinned user BOs from VRAM
+ * @xe: xe device
*
- * Evict == move VRAM BOs to temporary (typically system) memory.
+ * Evict non-pinned user BOs (via GPU).
*
- * This function should be called before the device goes into a suspend state
- * where the VRAM loses power.
+ * Evict == move VRAM BOs to temporary (typically system) memory.
*/
-int xe_bo_evict_all(struct xe_device *xe)
+int xe_bo_evict_all_user(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
- struct xe_bo *bo;
- struct xe_tile *tile;
- struct list_head still_in_list;
u32 mem_type;
- u8 id;
int ret;
/* User memory */
@@ -57,34 +132,38 @@ int xe_bo_evict_all(struct xe_device *xe)
}
}
- /* Pinned user memory in VRAM */
- INIT_LIST_HEAD(&still_in_list);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.external_vram,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &still_in_list);
- spin_unlock(&xe->pinned.lock);
+ return 0;
+}
- xe_bo_lock(bo, false);
- ret = xe_bo_evict_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret) {
- spin_lock(&xe->pinned.lock);
- list_splice_tail(&still_in_list,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- return ret;
- }
+/**
+ * xe_bo_evict_all - evict all BOs from VRAM
+ * @xe: xe device
+ *
+ * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
+ * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
+ * All eviction magic done via TTM calls.
+ *
+ * Evict == move VRAM BOs to temporary (typically system) memory.
+ *
+ * This function should be called before the device goes into a suspend state
+ * where the VRAM loses power.
+ */
+int xe_bo_evict_all(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ u8 id;
+ int ret;
- spin_lock(&xe->pinned.lock);
- }
- list_splice_tail(&still_in_list, &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
+ ret = xe_bo_evict_all_user(xe);
+ if (ret)
+ return ret;
+
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.evicted, xe_bo_evict_pinned);
+
+ if (!ret)
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.evicted, xe_bo_evict_pinned);
/*
* Wait for all user BO to be evicted as those evictions depend on the
@@ -93,32 +172,49 @@ int xe_bo_evict_all(struct xe_device *xe)
for_each_tile(tile, xe, id)
xe_tile_migrate_wait(tile);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.kernel_bo_present,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &xe->pinned.evicted);
- spin_unlock(&xe->pinned.lock);
+ if (ret)
+ return ret;
- xe_bo_lock(bo, false);
- ret = xe_bo_evict_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret)
- return ret;
+ return xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.evicted,
+ xe_bo_evict_pinned);
+}
- spin_lock(&xe->pinned.lock);
+static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ int ret;
+
+ ret = xe_bo_restore_pinned(bo);
+ if (ret)
+ return ret;
+
+ if (bo->flags & XE_BO_FLAG_GGTT) {
+ struct xe_tile *tile;
+ u8 id;
+
+ for_each_tile(tile, xe_bo_device(bo), id) {
+ if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
+ continue;
+
+ mutex_lock(&tile->mem.ggtt->lock);
+ xe_ggtt_map_bo(tile->mem.ggtt, bo);
+ mutex_unlock(&tile->mem.ggtt->lock);
+ }
}
- spin_unlock(&xe->pinned.lock);
+
+ /*
+ * We expect validate to trigger a move VRAM and our move code
+ * should setup the iosys map.
+ */
+ xe_assert(xe, !(bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) ||
+ !iosys_map_is_null(&bo->vmap));
return 0;
}
/**
- * xe_bo_restore_kernel - restore kernel BOs to VRAM
+ * xe_bo_restore_early - restore early phase kernel BOs to VRAM
*
* @xe: xe device
*
@@ -128,111 +224,130 @@ int xe_bo_evict_all(struct xe_device *xe)
* This function should be called early, before trying to init the GT, on device
* resume.
*/
-int xe_bo_restore_kernel(struct xe_device *xe)
+int xe_bo_restore_early(struct xe_device *xe)
{
- struct xe_bo *bo;
- int ret;
+ return xe_bo_apply_to_pinned(xe, &xe->pinned.early.evicted,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_restore_and_map_ggtt);
+}
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.evicted,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
- spin_unlock(&xe->pinned.lock);
+/**
+ * xe_bo_restore_late - restore pinned late phase BOs
+ *
+ * @xe: xe device
+ *
+ * Move pinned user and kernel BOs which can use blitter from temporary
+ * (typically system) memory to VRAM. All moves done via TTM calls.
+ *
+ * This function should be called late, after GT init, on device resume.
+ */
+int xe_bo_restore_late(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ int ret, id;
- xe_bo_lock(bo, false);
- ret = xe_bo_restore_pinned(bo);
- xe_bo_unlock(bo);
- if (ret) {
- xe_bo_put(bo);
- return ret;
- }
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.evicted,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_restore_and_map_ggtt);
- if (bo->flags & XE_BO_FLAG_GGTT) {
- struct xe_tile *tile;
- u8 id;
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
- for_each_tile(tile, xe, id) {
- if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
- continue;
+ if (ret)
+ return ret;
- mutex_lock(&tile->mem.ggtt->lock);
- xe_ggtt_map_bo(tile->mem.ggtt, bo);
- mutex_unlock(&tile->mem.ggtt->lock);
- }
- }
+ if (!IS_DGFX(xe))
+ return 0;
- /*
- * We expect validate to trigger a move VRAM and our move code
- * should setup the iosys map.
- */
- xe_assert(xe, !iosys_map_is_null(&bo->vmap));
+ /* Pinned user memory in VRAM should be validated on resume */
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_restore_pinned);
- xe_bo_put(bo);
+ /* Wait for restore to complete */
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
- spin_lock(&xe->pinned.lock);
- }
- spin_unlock(&xe->pinned.lock);
+ return ret;
+}
- return 0;
+static void xe_bo_pci_dev_remove_pinned(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ unsigned int id;
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_dma_unmap_pinned);
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
}
/**
- * xe_bo_restore_user - restore pinned user BOs to VRAM
- *
- * @xe: xe device
+ * xe_bo_pci_dev_remove_all() - Handle bos when the pci_device is about to be removed
+ * @xe: The xe device.
*
- * Move pinned user BOs from temporary (typically system) memory to VRAM via
- * CPU. All moves done via TTM calls.
+ * On pci_device removal we need to drop all dma mappings and move
+ * the data of exported bos out to system. This includes SVM bos and
+ * exported dma-buf bos. This is done by evicting all bos, but
+ * the evict placement in xe_evict_flags() is chosen such that all
+ * bos except those mentioned are purged, and thus their memory
+ * is released.
*
- * This function should be called late, after GT init, on device resume.
+ * For pinned bos, we're unmapping dma.
*/
-int xe_bo_restore_user(struct xe_device *xe)
+void xe_bo_pci_dev_remove_all(struct xe_device *xe)
{
- struct xe_bo *bo;
- struct xe_tile *tile;
- struct list_head still_in_list;
- u8 id;
- int ret;
+ unsigned int mem_type;
- if (!IS_DGFX(xe))
- return 0;
+ /*
+ * Move pagemap bos and exported dma-buf to system, and
+ * purge everything else.
+ */
+ for (mem_type = XE_PL_VRAM1; mem_type >= XE_PL_TT; --mem_type) {
+ struct ttm_resource_manager *man =
+ ttm_manager_type(&xe->ttm, mem_type);
- /* Pinned user memory in VRAM should be validated on resume */
- INIT_LIST_HEAD(&still_in_list);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.external_vram,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- list_move_tail(&bo->pinned_link, &still_in_list);
- xe_bo_get(bo);
- spin_unlock(&xe->pinned.lock);
+ if (man) {
+ int ret = ttm_resource_manager_evict_all(&xe->ttm, man);
- xe_bo_lock(bo, false);
- ret = xe_bo_restore_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret) {
- spin_lock(&xe->pinned.lock);
- list_splice_tail(&still_in_list,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- return ret;
+ drm_WARN_ON(&xe->drm, ret);
}
-
- spin_lock(&xe->pinned.lock);
}
- list_splice_tail(&still_in_list, &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- /* Wait for restore to complete */
- for_each_tile(tile, xe, id)
- xe_tile_migrate_wait(tile);
+ xe_bo_pci_dev_remove_pinned(xe);
+}
- return 0;
+static void xe_bo_pinned_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_dma_unmap_pinned);
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_dma_unmap_pinned);
+}
+
+/**
+ * xe_bo_pinned_init() - Initialize pinned bo tracking
+ * @xe: The xe device.
+ *
+ * Initializes the lists and locks required for pinned bo
+ * tracking and registers a callback to dma-unmap
+ * any remaining pinned bos on pci device removal.
+ *
+ * Return: %0 on success, negative error code on error.
+ */
+int xe_bo_pinned_init(struct xe_device *xe)
+{
+ spin_lock_init(&xe->pinned.lock);
+ INIT_LIST_HEAD(&xe->pinned.early.kernel_bo_present);
+ INIT_LIST_HEAD(&xe->pinned.early.evicted);
+ INIT_LIST_HEAD(&xe->pinned.late.kernel_bo_present);
+ INIT_LIST_HEAD(&xe->pinned.late.evicted);
+ INIT_LIST_HEAD(&xe->pinned.late.external);
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_bo_pinned_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.h b/drivers/gpu/drm/xe/xe_bo_evict.h
index 746894798852..e8385cb7f5e9 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.h
+++ b/drivers/gpu/drm/xe/xe_bo_evict.h
@@ -9,7 +9,13 @@
struct xe_device;
int xe_bo_evict_all(struct xe_device *xe);
-int xe_bo_restore_kernel(struct xe_device *xe);
-int xe_bo_restore_user(struct xe_device *xe);
+int xe_bo_evict_all_user(struct xe_device *xe);
+int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe);
+void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe);
+int xe_bo_restore_early(struct xe_device *xe);
+int xe_bo_restore_late(struct xe_device *xe);
+void xe_bo_pci_dev_remove_all(struct xe_device *xe);
+
+int xe_bo_pinned_init(struct xe_device *xe);
#endif
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 15a92e3d4898..eb5e83c5f233 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -28,6 +28,10 @@ struct xe_vm;
struct xe_bo {
/** @ttm: TTM base buffer object */
struct ttm_buffer_object ttm;
+ /** @backup_obj: The backup object when pinned and suspended (vram only) */
+ struct xe_bo *backup_obj;
+ /** @parent_obj: Ref to parent bo if this a backup_obj */
+ struct xe_bo *parent_obj;
/** @size: Size of this buffer object */
size_t size;
/** @flags: flags for this buffer object */
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
new file mode 100644
index 000000000000..cb9f175c89a1
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/configfs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "xe_configfs.h"
+#include "xe_module.h"
+
+/**
+ * DOC: Xe Configfs
+ *
+ * Overview
+ * =========
+ *
+ * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a
+ * configfs subsystem called ``'xe'`` that creates a directory in the mounted configfs directory
+ * The user can create devices under this directory and configure them as necessary
+ * See Documentation/filesystems/configfs.rst for more information about how configfs works.
+ *
+ * Create devices
+ * ===============
+ *
+ * In order to create a device, the user has to create a directory inside ``'xe'``::
+ *
+ * mkdir /sys/kernel/config/xe/0000:03:00.0/
+ *
+ * Every device created is populated by the driver with entries that can be
+ * used to configure it::
+ *
+ * /sys/kernel/config/xe/
+ * .. 0000:03:00.0/
+ * ... survivability_mode
+ *
+ * Configure Attributes
+ * ====================
+ *
+ * Survivability mode:
+ * -------------------
+ *
+ * Enable survivability mode on supported cards. This setting only takes
+ * effect when probing the device. Example to enable it::
+ *
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
+ * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported)
+ *
+ * Remove devices
+ * ==============
+ *
+ * The created device directories can be removed using ``rmdir``::
+ *
+ * rmdir /sys/kernel/config/xe/0000:03:00.0/
+ */
+
+struct xe_config_device {
+ struct config_group group;
+
+ bool survivability_mode;
+
+ /* protects attributes */
+ struct mutex lock;
+};
+
+static struct xe_config_device *to_xe_config_device(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct xe_config_device, group);
+}
+
+static ssize_t survivability_mode_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+
+ return sprintf(page, "%d\n", dev->survivability_mode);
+}
+
+static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ bool survivability_mode;
+ int ret;
+
+ ret = kstrtobool(page, &survivability_mode);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+ dev->survivability_mode = survivability_mode;
+ mutex_unlock(&dev->lock);
+
+ return len;
+}
+
+CONFIGFS_ATTR(, survivability_mode);
+
+static struct configfs_attribute *xe_config_device_attrs[] = {
+ &attr_survivability_mode,
+ NULL,
+};
+
+static void xe_config_device_release(struct config_item *item)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+
+ mutex_destroy(&dev->lock);
+ kfree(dev);
+}
+
+static struct configfs_item_operations xe_config_device_ops = {
+ .release = xe_config_device_release,
+};
+
+static const struct config_item_type xe_config_device_type = {
+ .ct_item_ops = &xe_config_device_ops,
+ .ct_attrs = xe_config_device_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *xe_config_make_device_group(struct config_group *group,
+ const char *name)
+{
+ unsigned int domain, bus, slot, function;
+ struct xe_config_device *dev;
+ struct pci_dev *pdev;
+ int ret;
+
+ ret = sscanf(name, "%04x:%02x:%02x.%x", &domain, &bus, &slot, &function);
+ if (ret != 4)
+ return ERR_PTR(-EINVAL);
+
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
+ if (!pdev)
+ return ERR_PTR(-EINVAL);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&dev->group, name, &xe_config_device_type);
+
+ mutex_init(&dev->lock);
+
+ return &dev->group;
+}
+
+static struct configfs_group_operations xe_config_device_group_ops = {
+ .make_group = xe_config_make_device_group,
+};
+
+static const struct config_item_type xe_configfs_type = {
+ .ct_group_ops = &xe_config_device_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem xe_configfs = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "xe",
+ .ci_type = &xe_configfs_type,
+ },
+ },
+};
+
+static struct xe_config_device *configfs_find_group(struct pci_dev *pdev)
+{
+ struct config_item *item;
+ char name[64];
+
+ snprintf(name, sizeof(name), "%04x:%02x:%02x.%x", pci_domain_nr(pdev->bus),
+ pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+ mutex_lock(&xe_configfs.su_mutex);
+ item = config_group_find_item(&xe_configfs.su_group, name);
+ mutex_unlock(&xe_configfs.su_mutex);
+
+ if (!item)
+ return NULL;
+
+ return to_xe_config_device(item);
+}
+
+/**
+ * xe_configfs_get_survivability_mode - get configfs survivability mode attribute
+ * @pdev: pci device
+ *
+ * find the configfs group that belongs to the pci device and return
+ * the survivability mode attribute
+ *
+ * Return: survivability mode if config group is found, false otherwise
+ */
+bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+ bool mode;
+
+ if (!dev)
+ return false;
+
+ mode = dev->survivability_mode;
+ config_item_put(&dev->group.cg_item);
+
+ return mode;
+}
+
+/**
+ * xe_configfs_clear_survivability_mode - clear configfs survivability mode attribute
+ * @pdev: pci device
+ *
+ * find the configfs group that belongs to the pci device and clear survivability
+ * mode attribute
+ */
+void xe_configfs_clear_survivability_mode(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+
+ if (!dev)
+ return;
+
+ mutex_lock(&dev->lock);
+ dev->survivability_mode = 0;
+ mutex_unlock(&dev->lock);
+
+ config_item_put(&dev->group.cg_item);
+}
+
+int __init xe_configfs_init(void)
+{
+ struct config_group *root = &xe_configfs.su_group;
+ int ret;
+
+ config_group_init(root);
+ mutex_init(&xe_configfs.su_mutex);
+ ret = configfs_register_subsystem(&xe_configfs);
+ if (ret) {
+ pr_err("Error %d while registering %s subsystem\n",
+ ret, root->cg_item.ci_namebuf);
+ return ret;
+ }
+
+ return 0;
+}
+
+void __exit xe_configfs_exit(void)
+{
+ configfs_unregister_subsystem(&xe_configfs);
+}
+
diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h
new file mode 100644
index 000000000000..d7d041ec2611
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_configfs.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#ifndef _XE_CONFIGFS_H_
+#define _XE_CONFIGFS_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
+int xe_configfs_init(void);
+void xe_configfs_exit(void);
+bool xe_configfs_get_survivability_mode(struct pci_dev *pdev);
+void xe_configfs_clear_survivability_mode(struct pci_dev *pdev);
+#else
+static inline int xe_configfs_init(void) { return 0; };
+static inline void xe_configfs_exit(void) {};
+static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; };
+static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) {};
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 81b9d9bb3f57..7a8af2311318 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -80,7 +80,8 @@ static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
return &q->gt->uc.guc;
}
-static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
+static ssize_t __xe_devcoredump_read(char *buffer, ssize_t count,
+ ssize_t start,
struct xe_devcoredump *coredump)
{
struct xe_device *xe;
@@ -94,7 +95,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
ss = &coredump->snapshot;
iter.data = buffer;
- iter.start = 0;
+ iter.start = start;
iter.remain = count;
p = drm_coredump_printer(&iter);
@@ -168,12 +169,16 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
ss->vm = NULL;
}
+#define XE_DEVCOREDUMP_CHUNK_MAX (SZ_512M + SZ_1G)
+
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
struct xe_devcoredump *coredump = data;
struct xe_devcoredump_snapshot *ss;
ssize_t byte_copied;
+ u32 chunk_offset;
+ ssize_t new_chunk_position;
if (!coredump)
return -ENODEV;
@@ -183,6 +188,9 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
/* Ensure delayed work is captured before continuing */
flush_work(&ss->work);
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX)
+ xe_pm_runtime_get(gt_to_xe(ss->gt));
+
mutex_lock(&coredump->lock);
if (!ss->read.buffer) {
@@ -195,12 +203,29 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
return 0;
}
+ new_chunk_position = div_u64_rem(offset,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ &chunk_offset);
+
+ if (offset >= ss->read.chunk_position + XE_DEVCOREDUMP_CHUNK_MAX ||
+ offset < ss->read.chunk_position) {
+ ss->read.chunk_position = new_chunk_position *
+ XE_DEVCOREDUMP_CHUNK_MAX;
+
+ __xe_devcoredump_read(ss->read.buffer,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ ss->read.chunk_position, coredump);
+ }
+
byte_copied = count < ss->read.size - offset ? count :
ss->read.size - offset;
- memcpy(buffer, ss->read.buffer + offset, byte_copied);
+ memcpy(buffer, ss->read.buffer + chunk_offset, byte_copied);
mutex_unlock(&coredump->lock);
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX)
+ xe_pm_runtime_put(gt_to_xe(ss->gt));
+
return byte_copied;
}
@@ -254,17 +279,32 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
- xe_pm_runtime_put(xe);
+ ss->read.chunk_position = 0;
/* Calculate devcoredump size */
- ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
-
- ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
- if (!ss->read.buffer)
- return;
+ ss->read.size = __xe_devcoredump_read(NULL, LONG_MAX, 0, coredump);
+
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) {
+ ss->read.buffer = kvmalloc(XE_DEVCOREDUMP_CHUNK_MAX,
+ GFP_USER);
+ if (!ss->read.buffer)
+ goto put_pm;
+
+ __xe_devcoredump_read(ss->read.buffer,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ 0, coredump);
+ } else {
+ ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
+ if (!ss->read.buffer)
+ goto put_pm;
+
+ __xe_devcoredump_read(ss->read.buffer, ss->read.size, 0,
+ coredump);
+ xe_devcoredump_snapshot_free(ss);
+ }
- __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
- xe_devcoredump_snapshot_free(ss);
+put_pm:
+ xe_pm_runtime_put(xe);
}
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
@@ -425,7 +465,7 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffi
if (offset & 3)
drm_printf(p, "Offset not word aligned: %zu", offset);
- line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_KERNEL);
+ line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_ATOMIC);
if (!line_buff) {
drm_printf(p, "Failed to allocate line buffer\n");
return;
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
index 1a1d16a96b2d..a174385a6d83 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -66,6 +66,8 @@ struct xe_devcoredump_snapshot {
struct {
/** @read.size: size of devcoredump in human readable format */
ssize_t size;
+ /** @read.chunk_position: position of devcoredump chunk */
+ ssize_t chunk_position;
/** @read.buffer: buffer of devcoredump in human readable format */
char *buffer;
} read;
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 00191227bc95..c02c4c4e9412 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -23,8 +23,10 @@
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_bo.h"
+#include "xe_bo_evict.h"
#include "xe_debugfs.h"
#include "xe_devcoredump.h"
+#include "xe_device_sysfs.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
#include "xe_drv.h"
@@ -467,10 +469,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xa_erase(&xe->usm.asid_to_vm, asid);
}
- spin_lock_init(&xe->pinned.lock);
- INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
- INIT_LIST_HEAD(&xe->pinned.external_vram);
- INIT_LIST_HEAD(&xe->pinned.evicted);
+ err = xe_bo_pinned_init(xe);
+ if (err)
+ goto err;
xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq",
WQ_MEM_RECLAIM);
@@ -505,7 +506,15 @@ ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */
static bool xe_driver_flr_disabled(struct xe_device *xe)
{
- return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS;
+ if (IS_SRIOV_VF(xe))
+ return true;
+
+ if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
+ drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n");
+ return true;
+ }
+
+ return false;
}
/*
@@ -523,7 +532,7 @@ static bool xe_driver_flr_disabled(struct xe_device *xe)
*/
static void __xe_driver_flr(struct xe_device *xe)
{
- const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
+ const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
int ret;
@@ -569,10 +578,8 @@ static void __xe_driver_flr(struct xe_device *xe)
static void xe_driver_flr(struct xe_device *xe)
{
- if (xe_driver_flr_disabled(xe)) {
- drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
+ if (xe_driver_flr_disabled(xe))
return;
- }
__xe_driver_flr(xe);
}
@@ -706,7 +713,7 @@ int xe_device_probe_early(struct xe_device *xe)
sriov_update_device_info(xe);
err = xe_pcode_probe_early(xe);
- if (err) {
+ if (err || xe_survivability_mode_is_requested(xe)) {
int save_err = err;
/*
@@ -729,6 +736,7 @@ int xe_device_probe_early(struct xe_device *xe)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
static int probe_has_flat_ccs(struct xe_device *xe)
{
@@ -908,6 +916,10 @@ int xe_device_probe(struct xe_device *xe)
if (err)
goto err_unregister_display;
+ err = xe_device_sysfs_init(xe);
+ if (err)
+ goto err_unregister_display;
+
xe_debugfs_register(xe);
err = xe_hwmon_register(xe);
@@ -932,6 +944,8 @@ void xe_device_remove(struct xe_device *xe)
xe_display_unregister(xe);
drm_dev_unplug(&xe->drm);
+
+ xe_bo_pci_dev_remove_all(xe);
}
void xe_device_shutdown(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index 7efbd4c52791..2e657692e5b5 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -3,14 +3,16 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/pci.h>
#include <linux/sysfs.h>
-#include <drm/drm_managed.h>
-
#include "xe_device.h"
#include "xe_device_sysfs.h"
+#include "xe_mmio.h"
+#include "xe_pcode_api.h"
+#include "xe_pcode.h"
#include "xe_pm.h"
/**
@@ -63,11 +65,94 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(vram_d3cold_threshold);
+/**
+ * DOC: PCIe Gen5 Limitations
+ *
+ * Default link speed of discrete GPUs is determined by configuration parameters
+ * stored in their flash memory, which are subject to override through user
+ * initiated firmware updates. It has been observed that devices configured with
+ * PCIe Gen5 as their default link speed can come across link quality issues due
+ * to host or motherboard limitations and may have to auto-downgrade their link
+ * to PCIe Gen4 speed when faced with unstable link at Gen5, which makes
+ * firmware updates rather risky on such setups. It is required to ensure that
+ * the device is capable of auto-downgrading its link to PCIe Gen4 speed before
+ * pushing the firmware image with PCIe Gen5 as default configuration. This can
+ * be done by reading ``auto_link_downgrade_capable`` sysfs entry, which will
+ * denote if the device is capable of auto-downgrading its link to PCIe Gen4
+ * speed with boolean output value of ``0`` or ``1``, meaning `incapable` or
+ * `capable` respectively.
+ *
+ * .. code-block:: shell
+ *
+ * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_capable
+ *
+ * Pushing the firmware image with PCIe Gen5 as default configuration on a auto
+ * link downgrade incapable device and facing link instability due to host or
+ * motherboard limitations can result in driver failing to bind to the device,
+ * making further firmware updates impossible with RMA being the only last
+ * resort.
+ *
+ * Link downgrade status of auto link downgrade capable devices is available
+ * through ``auto_link_downgrade_status`` sysfs entry with boolean output value
+ * of ``0`` or ``1``, where ``0`` means no auto-downgrading was required during
+ * link training (which is the optimal scenario) and ``1`` means the device has
+ * auto-downgraded its link to PCIe Gen4 speed due to unstable Gen5 link.
+ *
+ * .. code-block:: shell
+ *
+ * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_status
+ */
+
+static ssize_t
+auto_link_downgrade_capable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ u32 cap, val;
+
+ xe_pm_runtime_get(xe);
+ val = xe_mmio_read32(xe_root_tile_mmio(xe), BMG_PCIE_CAP);
+ xe_pm_runtime_put(xe);
+
+ cap = REG_FIELD_GET(LINK_DOWNGRADE, val);
+ return sysfs_emit(buf, "%u\n", cap == DOWNGRADE_CAPABLE ? true : false);
+}
+static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_capable);
+
+static ssize_t
+auto_link_downgrade_status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ /* default the auto_link_downgrade status to 0 */
+ u32 val = 0;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = xe_pcode_read(xe_device_get_root_tile(xe),
+ PCODE_MBOX(DGFX_PCODE_STATUS, DGFX_GET_INIT_STATUS, 0),
+ &val, NULL);
+ xe_pm_runtime_put(xe);
+
+ return ret ?: sysfs_emit(buf, "%u\n", REG_FIELD_GET(DGFX_LINK_DOWNGRADE_STATUS, val));
+}
+static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_status);
+
+static const struct attribute *auto_link_downgrade_attrs[] = {
+ &dev_attr_auto_link_downgrade_capable.attr,
+ &dev_attr_auto_link_downgrade_status.attr,
+ NULL
+};
+
static void xe_device_sysfs_fini(void *arg)
{
struct xe_device *xe = arg;
- sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ if (xe->d3cold.capable)
+ sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+
+ if (xe->info.platform == XE_BATTLEMAGE)
+ sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs);
}
int xe_device_sysfs_init(struct xe_device *xe)
@@ -75,9 +160,17 @@ int xe_device_sysfs_init(struct xe_device *xe)
struct device *dev = xe->drm.dev;
int ret;
- ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
- if (ret)
- return ret;
+ if (xe->d3cold.capable) {
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ if (ret)
+ return ret;
+ }
+
+ if (xe->info.platform == XE_BATTLEMAGE) {
+ ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
+ if (ret)
+ return ret;
+ }
return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 72ef0b6fc425..c8fa2c011666 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -31,7 +31,6 @@
#endif
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
-#include "soc/intel_pch.h"
#include "intel_display_core.h"
#include "intel_display_device.h"
#endif
@@ -107,6 +106,9 @@ struct xe_vram_region {
resource_size_t actual_physical_size;
/** @mapping: pointer to VRAM mappable space */
void __iomem *mapping;
+ /** @ttm: VRAM TTM manager */
+ struct xe_ttm_vram_mgr ttm;
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
/** @pagemap: Used to remap device memory as ZONE_DEVICE */
struct dev_pagemap pagemap;
/**
@@ -120,8 +122,7 @@ struct xe_vram_region {
* This is generated when remap device memory as ZONE_DEVICE
*/
resource_size_t hpa_base;
- /** @ttm: VRAM TTM manager */
- struct xe_ttm_vram_mgr ttm;
+#endif
};
/**
@@ -314,6 +315,8 @@ struct xe_device {
u8 has_atomic_enable_pte_bit:1;
/** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
u8 has_device_atomics_on_smem:1;
+ /** @info.has_fan_control: Device supports fan control */
+ u8 has_fan_control:1;
/** @info.has_flat_ccs: Whether flat CCS metadata is used */
u8 has_flat_ccs:1;
/** @info.has_heci_cscfi: device has heci cscfi */
@@ -330,8 +333,12 @@ struct xe_device {
u8 has_sriov:1;
/** @info.has_usm: Device has unified shared memory support */
u8 has_usm:1;
+ /** @info.has_64bit_timestamp: Device supports 64-bit timestamps */
+ u8 has_64bit_timestamp:1;
/** @info.is_dgfx: is discrete device */
u8 is_dgfx:1;
+ /** @info.needs_scratch: needs scratch page for oob prefetch to work */
+ u8 needs_scratch:1;
/**
* @info.probe_display: Probe display hardware. If set to
* false, the driver will behave as if there is no display
@@ -418,12 +425,22 @@ struct xe_device {
struct {
/** @pinned.lock: protected pinned BO list state */
spinlock_t lock;
- /** @pinned.kernel_bo_present: pinned kernel BO that are present */
- struct list_head kernel_bo_present;
- /** @pinned.evicted: pinned BO that have been evicted */
- struct list_head evicted;
- /** @pinned.external_vram: pinned external BO in vram*/
- struct list_head external_vram;
+ /** @pinned.early: early pinned lists */
+ struct {
+ /** @pinned.early.kernel_bo_present: pinned kernel BO that are present */
+ struct list_head kernel_bo_present;
+ /** @pinned.early.evicted: pinned BO that have been evicted */
+ struct list_head evicted;
+ } early;
+ /** @pinned.late: late pinned lists */
+ struct {
+ /** @pinned.late.kernel_bo_present: pinned kernel BO that are present */
+ struct list_head kernel_bo_present;
+ /** @pinned.late.evicted: pinned BO that have been evicted */
+ struct list_head evicted;
+ /** @pinned.external: pinned external and dma-buf. */
+ struct list_head external;
+ } late;
} pinned;
/** @ufence_wq: user fence wait queue */
@@ -506,6 +523,9 @@ struct xe_device {
struct mutex lock;
} d3cold;
+ /** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */
+ struct notifier_block pm_notifier;
+
/** @pmt: Support the PMT driver callback interface */
struct {
/** @pmt.lock: protect access for telemetry data */
@@ -570,7 +590,6 @@ struct xe_device {
* migrating to the right sub-structs
*/
struct intel_display display;
- enum intel_pch pch_type;
struct dram_info {
bool wm_lv_0_adjust_needed;
@@ -585,6 +604,8 @@ struct xe_device {
INTEL_DRAM_DDR5,
INTEL_DRAM_LPDDR5,
INTEL_DRAM_GDDR,
+ INTEL_DRAM_GDDR_ECC,
+ __INTEL_DRAM_TYPE_MAX,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index f67803e15a0e..346f857f3837 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -145,10 +145,7 @@ static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- struct dma_buf *dma_buf = attach->dmabuf;
- struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv);
-
- if (!xe_bo_is_vram(bo)) {
+ if (sg_page(sgt->sgl)) {
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt);
kfree(sgt);
@@ -236,7 +233,7 @@ static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct drm_gem_object *obj = attach->importer_priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
- XE_WARN_ON(xe_bo_evict(bo, false));
+ XE_WARN_ON(xe_bo_evict(bo));
}
static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index f2bb9168967c..96732613b4b7 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -52,6 +52,8 @@ struct xe_eu_stall_data_stream {
struct xe_gt *gt;
struct xe_bo *bo;
+ /* Lock to protect data buffer pointers */
+ struct mutex xecore_buf_lock;
struct per_xecore_buf *xecore_buf;
struct {
bool reported_to_user;
@@ -208,6 +210,9 @@ int xe_eu_stall_init(struct xe_gt *gt)
struct xe_device *xe = gt_to_xe(gt);
int ret;
+ if (!xe_eu_stall_supported_on_platform(xe))
+ return 0;
+
gt->eu_stall = kzalloc(sizeof(*gt->eu_stall), GFP_KERNEL);
if (!gt->eu_stall) {
ret = -ENOMEM;
@@ -278,7 +283,7 @@ static int xe_eu_stall_user_ext_set_property(struct xe_device *xe, u64 extension
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -308,7 +313,7 @@ static int xe_eu_stall_user_extensions(struct xe_device *xe, u64 extension,
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -378,7 +383,7 @@ static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream)
u16 group, instance;
unsigned int xecore;
- mutex_lock(&gt->eu_stall->stream_lock);
+ mutex_lock(&stream->xecore_buf_lock);
for_each_dss_steering(xecore, gt, group, instance) {
xecore_buf = &stream->xecore_buf[xecore];
read_ptr = xecore_buf->read;
@@ -396,7 +401,7 @@ static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream)
set_bit(xecore, stream->data_drop.mask);
xecore_buf->write = write_ptr;
}
- mutex_unlock(&gt->eu_stall->stream_lock);
+ mutex_unlock(&stream->xecore_buf_lock);
return min_data_present;
}
@@ -511,11 +516,13 @@ static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *st
unsigned int xecore;
int ret = 0;
+ mutex_lock(&stream->xecore_buf_lock);
if (bitmap_weight(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS)) {
if (!stream->data_drop.reported_to_user) {
stream->data_drop.reported_to_user = true;
xe_gt_dbg(gt, "EU stall data dropped in XeCores: %*pb\n",
XE_MAX_DSS_FUSE_BITS, stream->data_drop.mask);
+ mutex_unlock(&stream->xecore_buf_lock);
return -EIO;
}
stream->data_drop.reported_to_user = false;
@@ -527,6 +534,7 @@ static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *st
if (ret || count == total_size)
break;
}
+ mutex_unlock(&stream->xecore_buf_lock);
return total_size ?: (ret ?: -EAGAIN);
}
@@ -583,6 +591,7 @@ static void xe_eu_stall_stream_free(struct xe_eu_stall_data_stream *stream)
{
struct xe_gt *gt = stream->gt;
+ mutex_destroy(&stream->xecore_buf_lock);
gt->eu_stall->stream = NULL;
kfree(stream);
}
@@ -718,6 +727,7 @@ static int xe_eu_stall_stream_init(struct xe_eu_stall_data_stream *stream,
}
init_waitqueue_head(&stream->poll_wq);
+ mutex_init(&stream->xecore_buf_lock);
INIT_DELAYED_WORK(&stream->buf_poll_work, eu_stall_data_buf_poll_work_fn);
stream->per_xecore_buf_size = per_xecore_buf_size;
stream->sampling_rate_mult = props->sampling_rate_mult;
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.h b/drivers/gpu/drm/xe/xe_eu_stall.h
index ed9d0f233566..d1c76e503799 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.h
+++ b/drivers/gpu/drm/xe/xe_eu_stall.h
@@ -7,6 +7,7 @@
#define __XE_EU_STALL_H__
#include "xe_gt_types.h"
+#include "xe_sriov.h"
size_t xe_eu_stall_get_per_xecore_buf_size(void);
size_t xe_eu_stall_data_record_size(struct xe_device *xe);
@@ -19,6 +20,6 @@ int xe_eu_stall_stream_open(struct drm_device *dev,
static inline bool xe_eu_stall_supported_on_platform(struct xe_device *xe)
{
- return xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20;
+ return !IS_SRIOV_VF(xe) && (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20);
}
#endif
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index b75adfc99fb7..44364c042ad7 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -176,8 +176,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
if (xe_exec_queue_is_parallel(q)) {
- err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
- q->width);
+ err = copy_from_user(addresses, addresses_user, sizeof(u64) *
+ q->width);
if (err) {
err = -EFAULT;
goto err_syncs;
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 606922d9dd73..ce78cee5dec6 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -479,7 +479,7 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -518,7 +518,7 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -618,9 +618,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
return -EINVAL;
- err = __copy_from_user(eci, user_eci,
- sizeof(struct drm_xe_engine_class_instance) *
- len);
+ err = copy_from_user(eci, user_eci,
+ sizeof(struct drm_xe_engine_class_instance) * len);
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -830,7 +829,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
struct xe_device *xe = gt_to_xe(q->gt);
struct xe_lrc *lrc;
- u32 old_ts, new_ts;
+ u64 old_ts, new_ts;
int idx;
/*
diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c
index 4f6784e5abf8..8a5cba22b586 100644
--- a/drivers/gpu/drm/xe/xe_force_wake.c
+++ b/drivers/gpu/drm/xe/xe_force_wake.c
@@ -49,9 +49,6 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
fw->gt = gt;
spin_lock_init(&fw->lock);
- /* Assuming gen11+ so assert this assumption is correct */
- xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
-
if (xe->info.graphics_verx100 >= 1270) {
init_domain(fw, XE_FW_DOMAIN_ID_GT,
FORCEWAKE_GT,
@@ -67,9 +64,6 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
{
int i, j;
- /* Assuming gen11+ so assert this assumption is correct */
- xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
-
if (!xe_gt_is_media_type(gt))
init_domain(fw, XE_FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER,
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 5fcb2b4c2c13..7062115909f2 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -365,7 +365,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
* scratch entries, rather keep the scratch page in system memory on
* platforms where 64K pages are needed for VRAM.
*/
- flags = XE_BO_FLAG_PINNED;
+ flags = 0;
if (ggtt->flags & XE_GGTT_FLAGS_64K)
flags |= XE_BO_FLAG_SYSTEM;
else
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index fd41113f8572..0bcf97063ff6 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -555,6 +555,28 @@ void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc)
flush_work(&gsc->work);
}
+void xe_gsc_stop_prepare(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ int ret;
+
+ if (!xe_uc_fw_is_loadable(&gsc->fw) || xe_uc_fw_is_in_error_state(&gsc->fw))
+ return;
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GSC);
+
+ /*
+ * If the GSC FW load or the proxy init are interrupted, the only way
+ * to recover it is to do an FLR and reload the GSC from scratch.
+ * Therefore, let's wait for the init to complete before stopping
+ * operations. The proxy init is the last step, so we can just wait on
+ * that
+ */
+ ret = xe_gsc_wait_for_proxy_init_done(gsc);
+ if (ret)
+ xe_gt_err(gt, "failed to wait for GSC init completion before uc stop\n");
+}
+
/*
* wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
* GSC engine reset by writing a notification bit in the GS1 register and then
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
index d99f66c38075..b8b8e0810ad9 100644
--- a/drivers/gpu/drm/xe/xe_gsc.h
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -16,6 +16,7 @@ struct xe_hw_engine;
int xe_gsc_init(struct xe_gsc *gsc);
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
+void xe_gsc_stop_prepare(struct xe_gsc *gsc);
void xe_gsc_load_start(struct xe_gsc *gsc);
void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index 8cf70b228ff3..d0519cd6704a 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -71,6 +71,17 @@ bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
HECI1_FWSTS1_PROXY_STATE_NORMAL;
}
+int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+
+ /* Proxy init can take up to 500ms, so wait double that for safety */
+ return xe_mmio_wait32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
+ HECI1_FWSTS1_CURRENT_STATE,
+ HECI1_FWSTS1_PROXY_STATE_NORMAL,
+ USEC_PER_SEC, NULL, false);
+}
+
static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
{
struct xe_gt *gt = gsc_to_gt(gsc);
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h
index fdef56995cd4..765602221dbc 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.h
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h
@@ -12,6 +12,7 @@ struct xe_gsc;
int xe_gsc_proxy_init(struct xe_gsc *gsc);
bool xe_gsc_proxy_init_done(struct xe_gsc *gsc);
+int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc);
int xe_gsc_proxy_start(struct xe_gsc *gsc);
int xe_gsc_proxy_request_handler(struct xe_gsc *gsc);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 10a9e3c72b36..0e5d243c9451 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -12,8 +12,10 @@
#include <generated/xe_wa_oob.h>
+#include "instructions/xe_alu_commands.h"
#include "instructions/xe_gfxpipe_commands.h"
#include "instructions/xe_mi_commands.h"
+#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_bb.h"
@@ -176,15 +178,6 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
return 0;
}
-/*
- * Convert back from encoded value to type-safe, only to be used when reg.mcr
- * is true
- */
-static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
-{
- return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
-}
-
static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
{
struct xe_reg_sr *sr = &q->hwe->reg_lrc;
@@ -194,6 +187,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
struct xe_bb *bb;
struct dma_fence *fence;
long timeout;
+ int count_rmw = 0;
int count = 0;
if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
@@ -206,30 +200,32 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
if (IS_ERR(bb))
return PTR_ERR(bb);
- xa_for_each(&sr->xa, idx, entry)
- ++count;
+ /* count RMW registers as those will be handled separately */
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ ++count;
+ else
+ ++count_rmw;
+ }
- if (count) {
+ if (count || count_rmw)
xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
+ if (count) {
+ /* emit single LRI with all non RMW regs */
+
bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
xa_for_each(&sr->xa, idx, entry) {
struct xe_reg reg = entry->reg;
- struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
u32 val;
- /*
- * Skip reading the register if it's not really needed
- */
if (reg.masked)
val = entry->clr_bits << 16;
- else if (entry->clr_bits + 1)
- val = (reg.mcr ?
- xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
- xe_mmio_read32(&gt->mmio, reg)) & (~entry->clr_bits);
- else
+ else if (entry->clr_bits == ~0)
val = 0;
+ else
+ continue;
val |= entry->set_bits;
@@ -239,6 +235,52 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
}
}
+ if (count_rmw) {
+ /* emit MI_MATH for each RMW reg */
+
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ continue;
+
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
+ bb->cs[bb->len++] = entry->reg.addr;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
+
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
+ MI_LRI_LRM_CS_MMIO;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
+ bb->cs[bb->len++] = entry->clr_bits;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
+ bb->cs[bb->len++] = entry->set_bits;
+
+ bb->cs[bb->len++] = MI_MATH(8);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOADINV(SRCB, REG1);
+ bb->cs[bb->len++] = CS_ALU_INSTR_AND;
+ bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCB, REG2);
+ bb->cs[bb->len++] = CS_ALU_INSTR_OR;
+ bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
+
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
+ bb->cs[bb->len++] = entry->reg.addr;
+
+ xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n",
+ entry->reg.addr, entry->clr_bits, entry->set_bits);
+ }
+
+ /* reset used GPR */
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) | MI_LRI_LRM_CS_MMIO;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
+ bb->cs[bb->len++] = 0;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
+ bb->cs[bb->len++] = 0;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
+ bb->cs[bb->len++] = 0;
+ }
+
xe_lrc_emit_hwe_state_instructions(q, bb);
job = xe_bb_create_job(q, bb);
@@ -857,7 +899,7 @@ void xe_gt_suspend_prepare(struct xe_gt *gt)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- xe_uc_stop_prepare(&gt->uc);
+ xe_uc_suspend_prepare(&gt->uc);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 2d63a69cbfa3..119a55bb7580 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -92,22 +92,23 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
unsigned int fw_ref;
+ int ret = 0;
xe_pm_runtime_get(xe);
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
- xe_pm_runtime_put(xe);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto fw_put;
}
for_each_hw_engine(hwe, gt, id)
xe_hw_engine_print(hwe, p);
+fw_put:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
- return 0;
+ return ret;
}
static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
@@ -299,20 +300,20 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
-static const struct drm_info_list debugfs_list[] = {
- {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
+/*
+ * only for GT debugfs files which can be safely used on the VF as well:
+ * - without access to the GT privileged registers
+ * - without access to the PF specific data
+ */
+static const struct drm_info_list vf_safe_debugfs_list[] = {
{"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset},
{"force_reset_sync", .show = xe_gt_debugfs_simple_show, .data = force_reset_sync},
{"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info},
{"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
- {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
{"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
- {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
{"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
{"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
{"tunings", .show = xe_gt_debugfs_simple_show, .data = tunings},
- {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
- {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
{"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc},
{"default_lrc_ccs", .show = xe_gt_debugfs_simple_show, .data = ccs_default_lrc},
{"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
@@ -322,6 +323,15 @@ static const struct drm_info_list debugfs_list[] = {
{"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig},
};
+/* everything else should be added here */
+static const struct drm_info_list pf_only_debugfs_list[] = {
+ {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
+ {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
+ {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
+ {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
+ {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
+};
+
void xe_gt_debugfs_register(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -345,10 +355,15 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
*/
root->d_inode->i_private = gt;
- drm_debugfs_create_files(debugfs_list,
- ARRAY_SIZE(debugfs_list),
+ drm_debugfs_create_files(vf_safe_debugfs_list,
+ ARRAY_SIZE(vf_safe_debugfs_list),
root, minor);
+ if (!IS_SRIOV_VF(xe))
+ drm_debugfs_create_files(pf_only_debugfs_list,
+ ARRAY_SIZE(pf_only_debugfs_list),
+ root, minor);
+
xe_uc_debugfs_register(&gt->uc, root);
if (IS_SRIOV_PF(xe))
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 604bdc7c8173..868a5d2c1a52 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -56,9 +56,10 @@ dev_to_xe(struct device *dev)
return gt_to_xe(kobj_to_gt(dev->kobj.parent));
}
-static ssize_t act_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t act_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -68,11 +69,12 @@ static ssize_t act_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(act_freq);
+static struct kobj_attribute attr_act_freq = __ATTR_RO(act_freq);
-static ssize_t cur_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cur_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -85,11 +87,12 @@ static ssize_t cur_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(cur_freq);
+static struct kobj_attribute attr_cur_freq = __ATTR_RO(cur_freq);
-static ssize_t rp0_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rp0_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -99,11 +102,12 @@ static ssize_t rp0_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rp0_freq);
+static struct kobj_attribute attr_rp0_freq = __ATTR_RO(rp0_freq);
-static ssize_t rpe_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpe_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -113,11 +117,12 @@ static ssize_t rpe_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rpe_freq);
+static struct kobj_attribute attr_rpe_freq = __ATTR_RO(rpe_freq);
-static ssize_t rpa_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpa_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -127,20 +132,22 @@ static ssize_t rpa_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rpa_freq);
+static struct kobj_attribute attr_rpa_freq = __ATTR_RO(rpa_freq);
-static ssize_t rpn_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpn_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rpn_freq(pc));
}
-static DEVICE_ATTR_RO(rpn_freq);
+static struct kobj_attribute attr_rpn_freq = __ATTR_RO(rpn_freq);
-static ssize_t min_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t min_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -154,9 +161,10 @@ static ssize_t min_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
- const char *buff, size_t count)
+static ssize_t min_freq_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff, size_t count)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -173,11 +181,12 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR_RW(min_freq);
+static struct kobj_attribute attr_min_freq = __ATTR_RW(min_freq);
-static ssize_t max_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t max_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -191,9 +200,10 @@ static ssize_t max_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
- const char *buff, size_t count)
+static ssize_t max_freq_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff, size_t count)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -210,17 +220,17 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR_RW(max_freq);
+static struct kobj_attribute attr_max_freq = __ATTR_RW(max_freq);
static const struct attribute *freq_attrs[] = {
- &dev_attr_act_freq.attr,
- &dev_attr_cur_freq.attr,
- &dev_attr_rp0_freq.attr,
- &dev_attr_rpa_freq.attr,
- &dev_attr_rpe_freq.attr,
- &dev_attr_rpn_freq.attr,
- &dev_attr_min_freq.attr,
- &dev_attr_max_freq.attr,
+ &attr_act_freq.attr,
+ &attr_cur_freq.attr,
+ &attr_rp0_freq.attr,
+ &attr_rpa_freq.attr,
+ &attr_rpe_freq.attr,
+ &attr_rpn_freq.attr,
+ &attr_min_freq.attr,
+ &attr_max_freq.attr,
NULL
};
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index fbbace7b0b12..c11206410a4d 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -249,9 +249,10 @@ int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
-static ssize_t name_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+static ssize_t name_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
ssize_t ret;
@@ -262,11 +263,12 @@ static ssize_t name_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR_RO(name);
+static struct kobj_attribute name_attr = __ATTR_RO(name);
-static ssize_t idle_status_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+static ssize_t idle_status_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
enum xe_gt_idle_state state;
@@ -277,6 +279,7 @@ static ssize_t idle_status_show(struct device *dev,
return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
}
+static struct kobj_attribute idle_status_attr = __ATTR_RO(idle_status);
u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle)
{
@@ -291,10 +294,11 @@ u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle)
return residency;
}
-static DEVICE_ATTR_RO(idle_status);
-static ssize_t idle_residency_ms_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+
+static ssize_t idle_residency_ms_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
u64 residency;
@@ -305,12 +309,12 @@ static ssize_t idle_residency_ms_show(struct device *dev,
return sysfs_emit(buff, "%llu\n", residency);
}
-static DEVICE_ATTR_RO(idle_residency_ms);
+static struct kobj_attribute idle_residency_attr = __ATTR_RO(idle_residency_ms);
static const struct attribute *gt_idle_attrs[] = {
- &dev_attr_name.attr,
- &dev_attr_idle_status.attr,
- &dev_attr_idle_residency_ms.attr,
+ &name_attr.attr,
+ &idle_status_attr.attr,
+ &idle_residency_attr.attr,
NULL,
};
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 605aad3554e7..d4d9730f0d2c 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -345,7 +345,8 @@ fallback:
* Some older platforms don't have tables or don't have complete tables.
* Newer platforms should always have the required info.
*/
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 2000)
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 2000 &&
+ !gt_to_xe(gt)->info.force_execlist)
xe_gt_err(gt, "Slice/Subslice counts missing from hwconfig table; using typical fallback values\n");
if (gt_to_xe(gt)->info.platform == XE_PVC)
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index c5ad9a0a89c2..10622ca471a2 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -240,7 +240,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
atomic = access_is_atomic(pf->access_type);
if (xe_vma_is_cpu_addr_mirror(vma))
- err = xe_svm_handle_pagefault(vm, vma, gt_to_tile(gt),
+ err = xe_svm_handle_pagefault(vm, vma, gt,
pf->page_addr, atomic);
else
err = handle_vma_pagefault(gt, vma, atomic);
@@ -435,9 +435,16 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
XE_MAX_EU_FUSE_BITS) * num_dss;
- /* user can issue separate page faults per EU and per CS */
+ /*
+ * user can issue separate page faults per EU and per CS
+ *
+ * XXX: Multiplier required as compute UMD are getting PF queue errors
+ * without it. Follow on why this multiplier is required.
+ */
+#define PF_MULTIPLIER 8
pf_queue->num_dw =
- (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
+ (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER;
+#undef PF_MULTIPLIER
pf_queue->gt = gt;
pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 10be109bf357..2420a548cacc 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1444,15 +1444,23 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
return 0;
xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_NEEDS_2M |
- XE_BO_FLAG_PINNED);
+ bo = xe_bo_create_locked(xe, tile, NULL,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_NEEDS_2M |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PINNED_LATE_RESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
+ err = xe_bo_pin(bo);
+ xe_bo_unlock(bo);
+ if (unlikely(err)) {
+ xe_bo_put(bo);
+ return err;
+ }
+
config->lmem_obj = bo;
if (xe_device_has_lmtt(xe)) {
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index b2521dd6ec42..0fe47f41b63c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -51,27 +51,18 @@ static unsigned int extract_vfid(struct dentry *d)
* /sys/kernel/debug/dri/0/
* ├── gt0
* │   ├── pf
- * │   │   ├── ggtt_available
- * │   │   ├── ggtt_provisioned
* │   │   ├── contexts_provisioned
* │   │   ├── doorbells_provisioned
* │   │   ├── runtime_registers
* │   │   ├── negotiated_versions
* │   │   ├── adverse_events
+ * ├── gt1
+ * │   ├── pf
+ * │   │   ├── ...
*/
static const struct drm_info_list pf_info[] = {
{
- "ggtt_available",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_available_ggtt,
- },
- {
- "ggtt_provisioned",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_ggtt,
- },
- {
"contexts_provisioned",
.show = xe_gt_debugfs_simple_show,
.data = xe_gt_sriov_pf_config_print_ctxs,
@@ -82,11 +73,6 @@ static const struct drm_info_list pf_info[] = {
.data = xe_gt_sriov_pf_config_print_dbs,
},
{
- "lmem_provisioned",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_lmem,
- },
- {
"runtime_registers",
.show = xe_gt_debugfs_simple_show,
.data = xe_gt_sriov_pf_service_print_runtime,
@@ -107,6 +93,42 @@ static const struct drm_info_list pf_info[] = {
* /sys/kernel/debug/dri/0/
* ├── gt0
* │   ├── pf
+ * │   │   ├── ggtt_available
+ * │   │   ├── ggtt_provisioned
+ */
+
+static const struct drm_info_list pf_ggtt_info[] = {
+ {
+ "ggtt_available",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_available_ggtt,
+ },
+ {
+ "ggtt_provisioned",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_ggtt,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
+ * │   │   ├── lmem_provisioned
+ */
+
+static const struct drm_info_list pf_lmem_info[] = {
+ {
+ "lmem_provisioned",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_lmem,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
* │   │   ├── reset_engine
* │   │   ├── sample_period
* │   │   ├── sched_if_idle
@@ -532,6 +554,16 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root)
pfdentry->d_inode->i_private = gt;
drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), pfdentry, minor);
+ if (!xe_gt_is_media_type(gt)) {
+ drm_debugfs_create_files(pf_ggtt_info,
+ ARRAY_SIZE(pf_ggtt_info),
+ pfdentry, minor);
+ if (IS_DGFX(gt_to_xe(gt)))
+ drm_debugfs_create_files(pf_lmem_info,
+ ARRAY_SIZE(pf_lmem_info),
+ pfdentry, minor);
+ }
+
pf_add_policy_attrs(gt, pfdentry);
pf_add_config_attrs(gt, pfdentry, PFID);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
index 4efde5f46b43..821cfcc34e6b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
@@ -112,7 +112,6 @@ static const struct xe_reg tgl_runtime_regs[] = {
XELP_GT_SLICE_ENABLE, /* _MMIO(0x9138) */
XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -124,7 +123,6 @@ static const struct xe_reg ats_m_runtime_regs[] = {
XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -136,7 +134,6 @@ static const struct xe_reg pvc_runtime_regs[] = {
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
- CTC_MODE, /* _MMIO(0xA26C) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -150,7 +147,6 @@ static const struct xe_reg ver_1270_runtime_regs[] = {
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -167,7 +163,6 @@ static const struct xe_reg ver_2000_runtime_regs[] = {
XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -185,7 +180,6 @@ static const struct xe_reg ver_3000_runtime_regs[] = {
XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index 6155ea354432..30f942671c2b 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -27,6 +27,7 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr)
}
static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
+ "svm_pagefault_count",
"tlb_inval_count",
"vma_pagefault_count",
"vma_pagefault_kb",
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
index d556771f99d6..be3244d7133c 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -7,6 +7,7 @@
#define _XE_GT_STATS_TYPES_H_
enum xe_gt_stats_id {
+ XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT,
XE_GT_STATS_ID_TLB_INVAL,
XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT,
XE_GT_STATS_ID_VMA_PAGEFAULT_KB,
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c
index 8db78d616b6f..aa962c783cdf 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle.c
@@ -114,115 +114,115 @@ static u32 read_reason_vr_tdc(struct xe_gt *gt)
return tdc;
}
-static ssize_t status_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t status_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool status = !!read_status(gt);
return sysfs_emit(buff, "%u\n", status);
}
-static DEVICE_ATTR_RO(status);
+static struct kobj_attribute attr_status = __ATTR_RO(status);
-static ssize_t reason_pl1_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl1_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl1 = !!read_reason_pl1(gt);
return sysfs_emit(buff, "%u\n", pl1);
}
-static DEVICE_ATTR_RO(reason_pl1);
+static struct kobj_attribute attr_reason_pl1 = __ATTR_RO(reason_pl1);
-static ssize_t reason_pl2_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl2_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl2 = !!read_reason_pl2(gt);
return sysfs_emit(buff, "%u\n", pl2);
}
-static DEVICE_ATTR_RO(reason_pl2);
+static struct kobj_attribute attr_reason_pl2 = __ATTR_RO(reason_pl2);
-static ssize_t reason_pl4_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl4_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl4 = !!read_reason_pl4(gt);
return sysfs_emit(buff, "%u\n", pl4);
}
-static DEVICE_ATTR_RO(reason_pl4);
+static struct kobj_attribute attr_reason_pl4 = __ATTR_RO(reason_pl4);
-static ssize_t reason_thermal_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_thermal_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool thermal = !!read_reason_thermal(gt);
return sysfs_emit(buff, "%u\n", thermal);
}
-static DEVICE_ATTR_RO(reason_thermal);
+static struct kobj_attribute attr_reason_thermal = __ATTR_RO(reason_thermal);
-static ssize_t reason_prochot_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_prochot_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool prochot = !!read_reason_prochot(gt);
return sysfs_emit(buff, "%u\n", prochot);
}
-static DEVICE_ATTR_RO(reason_prochot);
+static struct kobj_attribute attr_reason_prochot = __ATTR_RO(reason_prochot);
-static ssize_t reason_ratl_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_ratl_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool ratl = !!read_reason_ratl(gt);
return sysfs_emit(buff, "%u\n", ratl);
}
-static DEVICE_ATTR_RO(reason_ratl);
+static struct kobj_attribute attr_reason_ratl = __ATTR_RO(reason_ratl);
-static ssize_t reason_vr_thermalert_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_vr_thermalert_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool thermalert = !!read_reason_vr_thermalert(gt);
return sysfs_emit(buff, "%u\n", thermalert);
}
-static DEVICE_ATTR_RO(reason_vr_thermalert);
+static struct kobj_attribute attr_reason_vr_thermalert = __ATTR_RO(reason_vr_thermalert);
-static ssize_t reason_vr_tdc_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_vr_tdc_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool tdc = !!read_reason_vr_tdc(gt);
return sysfs_emit(buff, "%u\n", tdc);
}
-static DEVICE_ATTR_RO(reason_vr_tdc);
+static struct kobj_attribute attr_reason_vr_tdc = __ATTR_RO(reason_vr_tdc);
static struct attribute *throttle_attrs[] = {
- &dev_attr_status.attr,
- &dev_attr_reason_pl1.attr,
- &dev_attr_reason_pl2.attr,
- &dev_attr_reason_pl4.attr,
- &dev_attr_reason_thermal.attr,
- &dev_attr_reason_prochot.attr,
- &dev_attr_reason_ratl.attr,
- &dev_attr_reason_vr_thermalert.attr,
- &dev_attr_reason_vr_tdc.attr,
+ &attr_status.attr,
+ &attr_reason_pl1.attr,
+ &attr_reason_pl2.attr,
+ &attr_reason_pl4.attr,
+ &attr_reason_thermal.attr,
+ &attr_reason_prochot.attr,
+ &attr_reason_ratl.attr,
+ &attr_reason_vr_thermalert.attr,
+ &attr_reason_vr_tdc.attr,
NULL
};
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 03072e094991..084cbdeba8ea 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -322,6 +322,13 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
return 0;
}
+/*
+ * Ensure that roundup_pow_of_two(length) doesn't overflow.
+ * Note that roundup_pow_of_two() operates on unsigned long,
+ * not on u64.
+ */
+#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
+
/**
* xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
* address range
@@ -346,6 +353,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_device *xe = gt_to_xe(gt);
#define MAX_TLB_INVALIDATION_LEN 7
u32 action[MAX_TLB_INVALIDATION_LEN];
+ u64 length = end - start;
int len = 0;
xe_gt_assert(gt, fence);
@@ -358,11 +366,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
- if (!xe->info.has_range_tlb_invalidation) {
+ if (!xe->info.has_range_tlb_invalidation ||
+ length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
} else {
u64 orig_start = start;
- u64 length = end - start;
u64 align;
if (length < SZ_4K)
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index bc5714a5b36b..bac5471a1a78 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -483,7 +483,8 @@ static int guc_g2g_alloc(struct xe_guc *guc)
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_ALL |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1393,6 +1394,7 @@ proto:
/* Use data from the GuC response as our return value */
return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
}
+ALLOW_ERROR_INJECTION(xe_guc_mmio_send_recv, ERRNO);
int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
{
@@ -1508,30 +1510,32 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
xe_uc_fw_print(&guc->fw, p);
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (!fw_ref)
- return;
+ if (!IS_SRIOV_VF(gt_to_xe(gt))) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
+
+ status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
+
+ drm_printf(p, "\nGuC status 0x%08x:\n", status);
+ drm_printf(p, "\tBootrom status = 0x%x\n",
+ REG_FIELD_GET(GS_BOOTROM_MASK, status));
+ drm_printf(p, "\tuKernel status = 0x%x\n",
+ REG_FIELD_GET(GS_UKERNEL_MASK, status));
+ drm_printf(p, "\tMIA Core status = 0x%x\n",
+ REG_FIELD_GET(GS_MIA_MASK, status));
+ drm_printf(p, "\tLog level = %d\n",
+ xe_guc_log_get_level(&guc->log));
+
+ drm_puts(p, "\nScratch registers:\n");
+ for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
+ drm_printf(p, "\t%2d: \t0x%x\n",
+ i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
+ }
- status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
-
- drm_printf(p, "\nGuC status 0x%08x:\n", status);
- drm_printf(p, "\tBootrom status = 0x%x\n",
- REG_FIELD_GET(GS_BOOTROM_MASK, status));
- drm_printf(p, "\tuKernel status = 0x%x\n",
- REG_FIELD_GET(GS_UKERNEL_MASK, status));
- drm_printf(p, "\tMIA Core status = 0x%x\n",
- REG_FIELD_GET(GS_MIA_MASK, status));
- drm_printf(p, "\tLog level = %d\n",
- xe_guc_log_get_level(&guc->log));
-
- drm_puts(p, "\nScratch registers:\n");
- for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
- drm_printf(p, "\t%2d: \t0x%x\n",
- i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
drm_puts(p, "\n");
xe_guc_ct_print(&guc->ct, p, false);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index e7c9e095a19f..44c1fa2fe7c8 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -376,6 +376,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET,
&offset, &remain);
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_WA(gt, 16026508708))
+ guc_waklv_enable_simple(ads,
+ GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH,
+ &offset, &remain);
+
size = guc_ads_waklv_size(ads) - remain;
if (!size)
return;
@@ -414,7 +419,8 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -490,24 +496,52 @@ static void fill_engine_enable_masks(struct xe_gt *gt,
engine_enable_mask(gt, XE_ENGINE_CLASS_OTHER));
}
-static void guc_prep_golden_lrc_null(struct xe_guc_ads *ads)
+/*
+ * Write the offsets corresponding to the golden LRCs. The actual data is
+ * populated later by guc_golden_lrc_populate()
+ */
+static void guc_golden_lrc_init(struct xe_guc_ads *ads)
{
struct xe_device *xe = ads_to_xe(ads);
+ struct xe_gt *gt = ads_to_gt(ads);
struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
offsetof(struct __guc_ads_blob, system_info));
- u8 guc_class;
+ size_t alloc_size, real_size;
+ u32 addr_ggtt, offset;
+ int class;
+
+ offset = guc_ads_golden_lrc_offset(ads);
+ addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
+
+ for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
+ u8 guc_class;
+
+ guc_class = xe_engine_class_to_guc_class(class);
- for (guc_class = 0; guc_class <= GUC_MAX_ENGINE_CLASSES; ++guc_class) {
if (!info_map_read(xe, &info_map,
engine_enabled_masks[guc_class]))
continue;
+ real_size = xe_gt_lrc_size(gt, class);
+ alloc_size = PAGE_ALIGN(real_size);
+
+ /*
+ * This interface is slightly confusing. We need to pass the
+ * base address of the full golden context and the size of just
+ * the engine state, which is the section of the context image
+ * that starts after the execlists LRC registers. This is
+ * required to allow the GuC to restore just the engine state
+ * when a watchdog reset occurs.
+ * We calculate the engine state size by removing the size of
+ * what comes before it in the context image (which is identical
+ * on all engines).
+ */
ads_blob_write(ads, ads.eng_state_size[guc_class],
- guc_ads_golden_lrc_size(ads) -
- xe_lrc_skip_size(xe));
+ real_size - xe_lrc_skip_size(xe));
ads_blob_write(ads, ads.golden_context_lrca[guc_class],
- xe_bo_ggtt_addr(ads->bo) +
- guc_ads_golden_lrc_offset(ads));
+ addr_ggtt);
+
+ addr_ggtt += alloc_size;
}
}
@@ -682,8 +716,8 @@ static int guc_capture_prep_lists(struct xe_guc_ads *ads)
}
if (ads->capture_size != PAGE_ALIGN(total_size))
- xe_gt_dbg(gt, "ADS capture alloc size changed from %d to %d\n",
- ads->capture_size, PAGE_ALIGN(total_size));
+ xe_gt_dbg(gt, "Updated ADS capture size %d (was %d)\n",
+ PAGE_ALIGN(total_size), ads->capture_size);
return PAGE_ALIGN(total_size);
}
@@ -857,7 +891,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads)
xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
guc_policies_init(ads);
- guc_prep_golden_lrc_null(ads);
+ guc_golden_lrc_init(ads);
guc_mapping_table_init_invalid(gt, &info_map);
guc_doorbell_init(ads);
@@ -883,7 +917,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
guc_policies_init(ads);
fill_engine_enable_masks(gt, &info_map);
guc_mmio_reg_state_init(ads);
- guc_prep_golden_lrc_null(ads);
+ guc_golden_lrc_init(ads);
guc_mapping_table_init(gt, &info_map);
guc_capture_prep_lists(ads);
guc_doorbell_init(ads);
@@ -903,18 +937,22 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
guc_ads_private_data_offset(ads));
}
-static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
+/*
+ * After the golden LRC's are recorded for each engine class by the first
+ * submission, copy them to the ADS, as initialized earlier by
+ * guc_golden_lrc_init().
+ */
+static void guc_golden_lrc_populate(struct xe_guc_ads *ads)
{
struct xe_device *xe = ads_to_xe(ads);
struct xe_gt *gt = ads_to_gt(ads);
struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
offsetof(struct __guc_ads_blob, system_info));
size_t total_size = 0, alloc_size, real_size;
- u32 addr_ggtt, offset;
+ u32 offset;
int class;
offset = guc_ads_golden_lrc_offset(ads);
- addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
u8 guc_class;
@@ -931,26 +969,9 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
alloc_size = PAGE_ALIGN(real_size);
total_size += alloc_size;
- /*
- * This interface is slightly confusing. We need to pass the
- * base address of the full golden context and the size of just
- * the engine state, which is the section of the context image
- * that starts after the execlists LRC registers. This is
- * required to allow the GuC to restore just the engine state
- * when a watchdog reset occurs.
- * We calculate the engine state size by removing the size of
- * what comes before it in the context image (which is identical
- * on all engines).
- */
- ads_blob_write(ads, ads.eng_state_size[guc_class],
- real_size - xe_lrc_skip_size(xe));
- ads_blob_write(ads, ads.golden_context_lrca[guc_class],
- addr_ggtt);
-
xe_map_memcpy_to(xe, ads_to_map(ads), offset,
gt->default_lrc[class], real_size);
- addr_ggtt += alloc_size;
offset += alloc_size;
}
@@ -959,7 +980,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads)
{
- guc_populate_golden_lrc(ads);
+ guc_golden_lrc_populate(ads);
}
static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_offset)
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
index f6d523e4c5fe..859a3ba91be5 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.c
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -105,49 +105,49 @@ struct __guc_capture_parsed_output {
* 3. Incorrect order will trigger XE_WARN.
*/
#define COMMON_XELP_BASE_GLOBAL \
- { FORCEWAKE_GT, REG_32BIT, 0, 0, "FORCEWAKE_GT"}
+ { FORCEWAKE_GT, REG_32BIT, 0, 0, 0, "FORCEWAKE_GT"}
#define COMMON_BASE_ENGINE_INSTANCE \
- { RING_HWSTAM(0), REG_32BIT, 0, 0, "HWSTAM"}, \
- { RING_HWS_PGA(0), REG_32BIT, 0, 0, "RING_HWS_PGA"}, \
- { RING_HEAD(0), REG_32BIT, 0, 0, "RING_HEAD"}, \
- { RING_TAIL(0), REG_32BIT, 0, 0, "RING_TAIL"}, \
- { RING_CTL(0), REG_32BIT, 0, 0, "RING_CTL"}, \
- { RING_MI_MODE(0), REG_32BIT, 0, 0, "RING_MI_MODE"}, \
- { RING_MODE(0), REG_32BIT, 0, 0, "RING_MODE"}, \
- { RING_ESR(0), REG_32BIT, 0, 0, "RING_ESR"}, \
- { RING_EMR(0), REG_32BIT, 0, 0, "RING_EMR"}, \
- { RING_EIR(0), REG_32BIT, 0, 0, "RING_EIR"}, \
- { RING_IMR(0), REG_32BIT, 0, 0, "RING_IMR"}, \
- { RING_IPEHR(0), REG_32BIT, 0, 0, "IPEHR"}, \
- { RING_INSTDONE(0), REG_32BIT, 0, 0, "RING_INSTDONE"}, \
- { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, "INDIRECT_RING_STATE"}, \
- { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, "ACTHD"}, \
- { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_BBADDR"}, \
- { RING_START(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_START"}, \
- { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_DMA_FADD"}, \
- { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_STATUS"}, \
- { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_SQ_CONTENTS"}
+ { RING_HWSTAM(0), REG_32BIT, 0, 0, 0, "HWSTAM"}, \
+ { RING_HWS_PGA(0), REG_32BIT, 0, 0, 0, "RING_HWS_PGA"}, \
+ { RING_HEAD(0), REG_32BIT, 0, 0, 0, "RING_HEAD"}, \
+ { RING_TAIL(0), REG_32BIT, 0, 0, 0, "RING_TAIL"}, \
+ { RING_CTL(0), REG_32BIT, 0, 0, 0, "RING_CTL"}, \
+ { RING_MI_MODE(0), REG_32BIT, 0, 0, 0, "RING_MI_MODE"}, \
+ { RING_MODE(0), REG_32BIT, 0, 0, 0, "RING_MODE"}, \
+ { RING_ESR(0), REG_32BIT, 0, 0, 0, "RING_ESR"}, \
+ { RING_EMR(0), REG_32BIT, 0, 0, 0, "RING_EMR"}, \
+ { RING_EIR(0), REG_32BIT, 0, 0, 0, "RING_EIR"}, \
+ { RING_IMR(0), REG_32BIT, 0, 0, 0, "RING_IMR"}, \
+ { RING_IPEHR(0), REG_32BIT, 0, 0, 0, "IPEHR"}, \
+ { RING_INSTDONE(0), REG_32BIT, 0, 0, 0, "RING_INSTDONE"}, \
+ { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, 0, "INDIRECT_RING_STATE"}, \
+ { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "ACTHD"}, \
+ { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_BBADDR"}, \
+ { RING_START(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_START"}, \
+ { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_DMA_FADD"}, \
+ { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_EXECLIST_STATUS"}, \
+ { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_EXECLIST_SQ_CONTENTS"}
#define COMMON_XELP_RC_CLASS \
- { RCU_MODE, REG_32BIT, 0, 0, "RCU_MODE"}
+ { RCU_MODE, REG_32BIT, 0, 0, 0, "RCU_MODE"}
#define COMMON_XELP_RC_CLASS_INSTDONE \
- { SC_INSTDONE, REG_32BIT, 0, 0, "SC_INSTDONE"}, \
- { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA"}, \
- { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA2"}
+ { SC_INSTDONE, REG_32BIT, 0, 0, 0, "SC_INSTDONE"}, \
+ { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, 0, "SC_INSTDONE_EXTRA"}, \
+ { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, 0, "SC_INSTDONE_EXTRA2"}
#define XELP_VEC_CLASS_REGS \
- { SFC_DONE(0), 0, 0, 0, "SFC_DONE[0]"}, \
- { SFC_DONE(1), 0, 0, 0, "SFC_DONE[1]"}, \
- { SFC_DONE(2), 0, 0, 0, "SFC_DONE[2]"}, \
- { SFC_DONE(3), 0, 0, 0, "SFC_DONE[3]"}
+ { SFC_DONE(0), 0, 0, 0, 0, "SFC_DONE[0]"}, \
+ { SFC_DONE(1), 0, 0, 0, 0, "SFC_DONE[1]"}, \
+ { SFC_DONE(2), 0, 0, 0, 0, "SFC_DONE[2]"}, \
+ { SFC_DONE(3), 0, 0, 0, 0, "SFC_DONE[3]"}
/* XE_LP Global */
static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
@@ -352,15 +352,16 @@ static const struct __ext_steer_reg xehpg_extregs[] = {
static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
const struct __ext_steer_reg *extlist,
- int slice_id, int subslice_id)
+ u32 dss_id, u16 slice_id, u16 subslice_id)
{
if (!ext || !extlist)
return;
ext->reg = XE_REG(extlist->reg.__reg.addr);
ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1);
- ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
+ ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
+ ext->dss_id = dss_id;
ext->regname = extlist->name;
}
@@ -397,7 +398,7 @@ static void guc_capture_alloc_steered_lists(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
u16 slice, subslice;
- int iter, i, total = 0;
+ int dss, i, total = 0;
const struct __guc_mmio_reg_descr_group *lists = guc->capture->reglists;
const struct __guc_mmio_reg_descr_group *list;
struct __guc_mmio_reg_descr_group *extlists;
@@ -454,15 +455,15 @@ static void guc_capture_alloc_steered_lists(struct xe_guc *guc)
/* For steering registers, the list is generated at run-time */
extarray = (struct __guc_mmio_reg_descr *)extlists[0].list;
- for_each_dss_steering(iter, gt, slice, subslice) {
+ for_each_dss_steering(dss, gt, slice, subslice) {
for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) {
- __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
+ __fill_ext_reg(extarray, &xe_extregs[i], dss, slice, subslice);
++extarray;
}
if (has_xehpg_extregs)
for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
- __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
+ __fill_ext_reg(extarray, &xehpg_extregs[i], dss, slice, subslice);
++extarray;
}
}
@@ -1672,18 +1673,16 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_
{
struct xe_gt *gt = snapshot->hwe->gt;
struct xe_device *xe = gt_to_xe(gt);
- struct xe_guc *guc = &gt->uc.guc;
struct xe_devcoredump *devcoredump = &xe->devcoredump;
struct xe_devcoredump_snapshot *devcore_snapshot = &devcoredump->snapshot;
struct gcap_reg_list_info *reginfo = NULL;
u32 i, last_value = 0;
- bool is_ext, low32_ready = false;
+ bool low32_ready = false;
if (!list || !list->list || list->num_regs == 0)
return;
XE_WARN_ON(!devcore_snapshot->matched_node);
- is_ext = list == guc->capture->extlists;
reginfo = &devcore_snapshot->matched_node->reginfo[type];
/*
@@ -1749,17 +1748,12 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_
*/
XE_WARN_ON(low32_ready);
- if (is_ext) {
- int dss, group, instance;
-
- group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags);
- instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags);
- dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance);
-
- drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value);
- } else {
+ if (FIELD_GET(GUC_REGSET_STEERING_NEEDED, reg_desc->flags))
+ drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname,
+ reg_desc->dss_id, value);
+ else
drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value);
- }
+
break;
}
}
diff --git a/drivers/gpu/drm/xe/xe_guc_capture_types.h b/drivers/gpu/drm/xe/xe_guc_capture_types.h
index ca2d390ccbee..6cb439115597 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_capture_types.h
@@ -39,6 +39,8 @@ struct __guc_mmio_reg_descr {
u32 flags;
/** @mask: The mask to apply */
u32 mask;
+ /** @dss_id: Cached index for steered registers */
+ u32 dss_id;
/** @regname: Name of the register */
const char *regname;
};
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 72ad576fc18e..2447de0ebedf 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -238,7 +238,8 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1088,6 +1089,7 @@ int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
return guc_ct_send_recv(ct, action, len, response_buffer, false);
}
+ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO);
int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 *response_buffer)
@@ -1828,10 +1830,10 @@ static void ct_dead_print(struct xe_dead_ct *dead)
return;
}
- drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason);
/* Can't generate a genuine core dump at this point, so just do the good bits */
drm_puts(&lp, "**** Xe Device Coredump ****\n");
+ drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
xe_device_snapshot_print(xe, &lp);
drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c
index c569ff456e74..0b102ab46c4d 100644
--- a/drivers/gpu/drm/xe/xe_guc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c
@@ -17,101 +17,130 @@
#include "xe_macros.h"
#include "xe_pm.h"
-static struct xe_guc *node_to_guc(struct drm_info_node *node)
-{
- return node->info_ent->data;
-}
-
-static int guc_info(struct seq_file *m, void *data)
+/*
+ * guc_debugfs_show - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * This callback can be used in struct drm_info_list to describe debugfs
+ * files that are &xe_guc specific in similar way how we handle &xe_gt
+ * specific files using &xe_gt_debugfs_simple_show.
+ *
+ * It is assumed that those debugfs files will be created on directory entry
+ * which grandparent struct dentry d_inode->i_private points to &xe_gt.
+ *
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0 # dent->d_parent->d_parent (d_inode->i_private == gt)
+ * │   ├── uc # dent->d_parent
+ * │   │   ├── guc_info # dent
+ * │   │   ├── guc_...
+ *
+ * This function assumes that &m->private will be set to the &struct
+ * drm_info_node corresponding to the instance of the info on a given &struct
+ * drm_minor (see struct drm_info_list.show for details).
+ *
+ * This function also assumes that struct drm_info_list.data will point to the
+ * function code that will actually print a file content::
+ *
+ * int (*print)(struct xe_guc *, struct drm_printer *)
+ *
+ * Example::
+ *
+ * int foo(struct xe_guc *guc, struct drm_printer *p)
+ * {
+ * drm_printf(p, "enabled %d\n", guc->submission_state.enabled);
+ * return 0;
+ * }
+ *
+ * static const struct drm_info_list bar[] = {
+ * { name = "foo", .show = guc_debugfs_show, .data = foo },
+ * };
+ *
+ * parent = debugfs_create_dir("uc", gtdir);
+ * drm_debugfs_create_files(bar, ARRAY_SIZE(bar), parent, minor);
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int guc_debugfs_show(struct seq_file *m, void *data)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct dentry *parent = node->dent->d_parent;
+ struct dentry *grandparent = parent->d_parent;
+ struct xe_gt *gt = grandparent->d_inode->i_private;
+ struct xe_device *xe = gt_to_xe(gt);
+ int (*print)(struct xe_guc *, struct drm_printer *) = node->info_ent->data;
+ int ret;
xe_pm_runtime_get(xe);
- xe_guc_print_info(guc, &p);
+ ret = print(&gt->uc.guc, &p);
xe_pm_runtime_put(xe);
- return 0;
+ return ret;
}
-static int guc_log(struct seq_file *m, void *data)
+static int guc_log(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pm_runtime_get(xe);
- xe_guc_log_print(&guc->log, &p);
- xe_pm_runtime_put(xe);
-
+ xe_guc_log_print(&guc->log, p);
return 0;
}
-static int guc_log_dmesg(struct seq_file *m, void *data)
+static int guc_log_dmesg(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
-
- xe_pm_runtime_get(xe);
xe_guc_log_print_dmesg(&guc->log);
- xe_pm_runtime_put(xe);
-
return 0;
}
-static int guc_ctb(struct seq_file *m, void *data)
+static int guc_ctb(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pm_runtime_get(xe);
- xe_guc_ct_print(&guc->ct, &p, true);
- xe_pm_runtime_put(xe);
-
+ xe_guc_ct_print(&guc->ct, p, true);
return 0;
}
-static int guc_pc(struct seq_file *m, void *data)
+static int guc_pc(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pm_runtime_get(xe);
- xe_guc_pc_print(&guc->pc, &p);
- xe_pm_runtime_put(xe);
-
+ xe_guc_pc_print(&guc->pc, p);
return 0;
}
-static const struct drm_info_list debugfs_list[] = {
- {"guc_info", guc_info, 0},
- {"guc_log", guc_log, 0},
- {"guc_log_dmesg", guc_log_dmesg, 0},
- {"guc_ctb", guc_ctb, 0},
- {"guc_pc", guc_pc, 0},
+/*
+ * only for GuC debugfs files which can be safely used on the VF as well:
+ * - without access to the GuC privileged registers
+ * - without access to the PF specific GuC objects
+ */
+static const struct drm_info_list vf_safe_debugfs_list[] = {
+ { "guc_info", .show = guc_debugfs_show, .data = xe_guc_print_info },
+ { "guc_ctb", .show = guc_debugfs_show, .data = guc_ctb },
+};
+
+/* For GuC debugfs files that require the SLPC support */
+static const struct drm_info_list slpc_debugfs_list[] = {
+ { "guc_pc", .show = guc_debugfs_show, .data = guc_pc },
+};
+
+/* everything else should be added here */
+static const struct drm_info_list pf_only_debugfs_list[] = {
+ { "guc_log", .show = guc_debugfs_show, .data = guc_log },
+ { "guc_log_dmesg", .show = guc_debugfs_show, .data = guc_log_dmesg },
};
void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent)
{
- struct drm_minor *minor = guc_to_xe(guc)->drm.primary;
- struct drm_info_list *local;
- int i;
-
-#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
- local = drmm_kmalloc(&guc_to_xe(guc)->drm, DEBUGFS_SIZE, GFP_KERNEL);
- if (!local)
- return;
+ struct xe_device *xe = guc_to_xe(guc);
+ struct drm_minor *minor = xe->drm.primary;
- memcpy(local, debugfs_list, DEBUGFS_SIZE);
-#undef DEBUGFS_SIZE
+ drm_debugfs_create_files(vf_safe_debugfs_list,
+ ARRAY_SIZE(vf_safe_debugfs_list),
+ parent, minor);
- for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
- local[i].data = guc;
+ if (!IS_SRIOV_VF(xe)) {
+ drm_debugfs_create_files(pf_only_debugfs_list,
+ ARRAY_SIZE(pf_only_debugfs_list),
+ parent, minor);
- drm_debugfs_create_files(local,
- ARRAY_SIZE(debugfs_list),
- parent, minor);
+ if (!xe->info.skip_guc_pc)
+ drm_debugfs_create_files(slpc_debugfs_list,
+ ARRAY_SIZE(slpc_debugfs_list),
+ parent, minor);
+ }
}
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
index 2a457dcf31d5..0fb48f8f05d8 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.c
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
@@ -17,36 +17,61 @@
#include "xe_hw_engine.h"
#include "xe_map.h"
#include "xe_mmio.h"
+#include "xe_sriov_pf_helpers.h"
#include "xe_trace_guc.h"
#define TOTAL_QUANTA 0x8000
-static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe)
+static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int index)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
- struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+ struct engine_activity_buffer *buffer;
u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
size_t offset;
- offset = offsetof(struct guc_engine_activity_data,
+ if (engine_activity->num_functions) {
+ buffer = &engine_activity->function_buffer;
+ offset = sizeof(struct guc_engine_activity_data) * index;
+ } else {
+ buffer = &engine_activity->device_buffer;
+ offset = 0;
+ }
+
+ offset += offsetof(struct guc_engine_activity_data,
engine_activity[guc_class][hwe->logical_instance]);
return IOSYS_MAP_INIT_OFFSET(&buffer->activity_bo->vmap, offset);
}
-static struct iosys_map engine_metadata_map(struct xe_guc *guc)
+static struct iosys_map engine_metadata_map(struct xe_guc *guc,
+ unsigned int index)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
- struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+ struct engine_activity_buffer *buffer;
+ size_t offset;
- return buffer->metadata_bo->vmap;
+ if (engine_activity->num_functions) {
+ buffer = &engine_activity->function_buffer;
+ offset = sizeof(struct guc_engine_activity_metadata) * index;
+ } else {
+ buffer = &engine_activity->device_buffer;
+ offset = 0;
+ }
+
+ return IOSYS_MAP_INIT_OFFSET(&buffer->metadata_bo->vmap, offset);
}
static int allocate_engine_activity_group(struct xe_guc *guc)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
struct xe_device *xe = guc_to_xe(guc);
- u32 num_activity_group = 1; /* Will be modified for VF */
+ u32 num_activity_group;
+
+ /*
+ * An additional activity group is allocated for PF
+ */
+ num_activity_group = IS_SRIOV_PF(xe) ? xe_sriov_pf_get_totalvfs(xe) + 1 : 1;
engine_activity->eag = drmm_kcalloc(&xe->drm, num_activity_group,
sizeof(struct engine_activity_group), GFP_KERNEL);
@@ -60,10 +85,11 @@ static int allocate_engine_activity_group(struct xe_guc *guc)
}
static int allocate_engine_activity_buffers(struct xe_guc *guc,
- struct engine_activity_buffer *buffer)
+ struct engine_activity_buffer *buffer,
+ int count)
{
- u32 metadata_size = sizeof(struct guc_engine_activity_metadata);
- u32 size = sizeof(struct guc_engine_activity_data);
+ u32 metadata_size = sizeof(struct guc_engine_activity_metadata) * count;
+ u32 size = sizeof(struct guc_engine_activity_data) * count;
struct xe_gt *gt = guc_to_gt(guc);
struct xe_tile *tile = gt_to_tile(gt);
struct xe_bo *bo, *metadata_bo;
@@ -118,10 +144,11 @@ static bool is_engine_activity_supported(struct xe_guc *guc)
return true;
}
-static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe)
+static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe,
+ unsigned int index)
{
struct xe_guc *guc = &hwe->gt->uc.guc;
- struct engine_activity_group *eag = &guc->engine_activity.eag[0];
+ struct engine_activity_group *eag = &guc->engine_activity.eag[index];
u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
return &eag->engine[guc_class][hwe->logical_instance];
@@ -138,9 +165,10 @@ static u64 cpu_ns_to_guc_tsc_tick(ktime_t ns, u32 freq)
#define read_metadata_record(xe_, map_, field_) \
xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity_metadata, field_)
-static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int index)
{
- struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index);
struct guc_engine_activity *cached_activity = &ea->activity;
struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
@@ -151,8 +179,8 @@ static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
u64 active_ticks, gpm_ts;
u16 change_num;
- activity_map = engine_activity_map(guc, hwe);
- metadata_map = engine_metadata_map(guc);
+ activity_map = engine_activity_map(guc, hwe, index);
+ metadata_map = engine_metadata_map(guc, index);
global_change_num = read_metadata_record(xe, &metadata_map, global_change_num);
/* GuC has not initialized activity data yet, return 0 */
@@ -194,9 +222,9 @@ update:
return ea->total + ea->active;
}
-static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe, unsigned int index)
{
- struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index);
struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
struct guc_engine_activity *cached_activity = &ea->activity;
struct iosys_map activity_map, metadata_map;
@@ -205,8 +233,8 @@ static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
u64 numerator;
u16 quanta_ratio;
- activity_map = engine_activity_map(guc, hwe);
- metadata_map = engine_metadata_map(guc);
+ activity_map = engine_activity_map(guc, hwe, index);
+ metadata_map = engine_metadata_map(guc, index);
if (!cached_metadata->guc_tsc_frequency_hz)
cached_metadata->guc_tsc_frequency_hz = read_metadata_record(xe, &metadata_map,
@@ -245,12 +273,39 @@ static int enable_engine_activity_stats(struct xe_guc *guc)
return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
}
-static void engine_activity_set_cpu_ts(struct xe_guc *guc)
+static int enable_function_engine_activity_stats(struct xe_guc *guc, bool enable)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
- struct engine_activity_group *eag = &engine_activity->eag[0];
+ u32 metadata_ggtt_addr = 0, ggtt_addr = 0, num_functions = 0;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ u32 action[6];
+ int len = 0;
+
+ if (enable) {
+ metadata_ggtt_addr = xe_bo_ggtt_addr(buffer->metadata_bo);
+ ggtt_addr = xe_bo_ggtt_addr(buffer->activity_bo);
+ num_functions = engine_activity->num_functions;
+ }
+
+ action[len++] = XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER;
+ action[len++] = num_functions;
+ action[len++] = metadata_ggtt_addr;
+ action[len++] = 0;
+ action[len++] = ggtt_addr;
+ action[len++] = 0;
+
+ /* Blocking here to ensure the buffers are ready before reading them */
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static void engine_activity_set_cpu_ts(struct xe_guc *guc, unsigned int index)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_group *eag = &engine_activity->eag[index];
int i, j;
+ xe_gt_assert(guc_to_gt(guc), index < engine_activity->num_activity_group);
+
for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++)
for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; j++)
eag->engine[i][j].last_cpu_ts = ktime_get();
@@ -265,34 +320,107 @@ static u32 gpm_timestamp_shift(struct xe_gt *gt)
return 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
}
+static bool is_function_valid(struct xe_guc *guc, unsigned int fn_id)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+
+ if (!IS_SRIOV_PF(xe) && fn_id)
+ return false;
+
+ if (engine_activity->num_functions && fn_id >= engine_activity->num_functions)
+ return false;
+
+ return true;
+}
+
+static int engine_activity_disable_function_stats(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ int ret;
+
+ if (!engine_activity->num_functions)
+ return 0;
+
+ ret = enable_function_engine_activity_stats(guc, false);
+ if (ret)
+ return ret;
+
+ free_engine_activity_buffers(buffer);
+ engine_activity->num_functions = 0;
+
+ return 0;
+}
+
+static int engine_activity_enable_function_stats(struct xe_guc *guc, int num_vfs)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ int ret, i;
+
+ if (!num_vfs)
+ return 0;
+
+ /* This includes 1 PF and num_vfs */
+ engine_activity->num_functions = num_vfs + 1;
+
+ ret = allocate_engine_activity_buffers(guc, buffer, engine_activity->num_functions);
+ if (ret)
+ return ret;
+
+ ret = enable_function_engine_activity_stats(guc, true);
+ if (ret) {
+ free_engine_activity_buffers(buffer);
+ engine_activity->num_functions = 0;
+ return ret;
+ }
+
+ /* skip PF as it was already setup */
+ for (i = 1; i < engine_activity->num_functions; i++)
+ engine_activity_set_cpu_ts(guc, i);
+
+ return 0;
+}
+
/**
* xe_guc_engine_activity_active_ticks - Get engine active ticks
* @guc: The GuC object
* @hwe: The hw_engine object
+ * @fn_id: function id to report on
*
* Return: accumulated ticks @hwe was active since engine activity stats were enabled.
*/
-u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id)
{
if (!xe_guc_engine_activity_supported(guc))
return 0;
- return get_engine_active_ticks(guc, hwe);
+ if (!is_function_valid(guc, fn_id))
+ return 0;
+
+ return get_engine_active_ticks(guc, hwe, fn_id);
}
/**
* xe_guc_engine_activity_total_ticks - Get engine total ticks
* @guc: The GuC object
* @hwe: The hw_engine object
+ * @fn_id: function id to report on
*
* Return: accumulated quanta of ticks allocated for the engine
*/
-u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id)
{
if (!xe_guc_engine_activity_supported(guc))
return 0;
- return get_engine_total_ticks(guc, hwe);
+ if (!is_function_valid(guc, fn_id))
+ return 0;
+
+ return get_engine_total_ticks(guc, hwe, fn_id);
}
/**
@@ -311,6 +439,25 @@ bool xe_guc_engine_activity_supported(struct xe_guc *guc)
}
/**
+ * xe_guc_engine_activity_function_stats - Enable/Disable per-function engine activity stats
+ * @guc: The GuC object
+ * @num_vfs: number of vfs
+ * @enable: true to enable, false otherwise
+ *
+ * Return: 0 on success, negative error code otherwise
+ */
+int xe_guc_engine_activity_function_stats(struct xe_guc *guc, int num_vfs, bool enable)
+{
+ if (!xe_guc_engine_activity_supported(guc))
+ return 0;
+
+ if (enable)
+ return engine_activity_enable_function_stats(guc, num_vfs);
+
+ return engine_activity_disable_function_stats(guc);
+}
+
+/**
* xe_guc_engine_activity_enable_stats - Enable engine activity stats
* @guc: The GuC object
*
@@ -327,7 +474,7 @@ void xe_guc_engine_activity_enable_stats(struct xe_guc *guc)
if (ret)
xe_gt_err(guc_to_gt(guc), "failed to enable activity stats%d\n", ret);
else
- engine_activity_set_cpu_ts(guc);
+ engine_activity_set_cpu_ts(guc, 0);
}
static void engine_activity_fini(void *arg)
@@ -360,7 +507,7 @@ int xe_guc_engine_activity_init(struct xe_guc *guc)
return ret;
}
- ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer);
+ ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer, 1);
if (ret) {
xe_gt_err(gt, "failed to allocate engine activity buffers (%pe)\n", ERR_PTR(ret));
return ret;
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.h b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
index a042d4cb404c..b32926c2d208 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.h
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
@@ -14,6 +14,9 @@ struct xe_guc;
int xe_guc_engine_activity_init(struct xe_guc *guc);
bool xe_guc_engine_activity_supported(struct xe_guc *guc);
void xe_guc_engine_activity_enable_stats(struct xe_guc *guc);
-u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe);
-u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe);
+int xe_guc_engine_activity_function_stats(struct xe_guc *guc, int num_vfs, bool enable);
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id);
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id);
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
index 5cdd034b6b70..48f69ddefa36 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
@@ -79,14 +79,24 @@ struct xe_guc_engine_activity {
/** @num_activity_group: number of activity groups */
u32 num_activity_group;
+ /** @num_functions: number of functions */
+ u32 num_functions;
+
/** @supported: indicates support for engine activity stats */
bool supported;
- /** @eag: holds the device level engine activity data */
+ /**
+ * @eag: holds the device level engine activity data in native mode.
+ * In SRIOV mode, points to an array with entries which holds the engine
+ * activity data for PF and VF's
+ */
struct engine_activity_group *eag;
/** @device_buffer: buffer object for global engine activity */
struct engine_activity_buffer device_buffer;
+
+ /** @function_buffer: buffer object for per-function engine activity */
+ struct engine_activity_buffer function_buffer;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index 80514a446ba2..38039c411387 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -260,7 +260,8 @@ int xe_guc_log_init(struct xe_guc_log *log)
bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 85215313976c..18c623992035 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -462,6 +462,21 @@ static u32 get_cur_freq(struct xe_gt *gt)
}
/**
+ * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
+ * @pc: The GuC PC
+ *
+ * Returns: the requested frequency for that GT instance
+ */
+u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+
+ return get_cur_freq(gt);
+}
+
+/**
* xe_guc_pc_get_cur_freq - Get Current requested frequency
* @pc: The GuC PC
* @freq: A pointer to a u32 where the freq value will be returned
@@ -1070,6 +1085,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
+ ret = -EIO;
goto out;
}
@@ -1169,7 +1185,8 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index 39102b79602f..0a2664d5c811 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -22,6 +22,7 @@ void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p);
u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq);
+u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 31bc2022bfc2..2ad38f6b103e 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -300,6 +300,8 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
primelockdep(guc);
+ guc->submission_state.initialized = true;
+
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
@@ -834,6 +836,13 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
+ /*
+ * If device is being wedged even before submission_state is
+ * initialized, there's nothing to do here.
+ */
+ if (!guc->submission_state.initialized)
+ return;
+
err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
guc_submit_wedged_fini, guc);
if (err) {
@@ -941,7 +950,7 @@ static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
return xe_sched_invalidate_job(job, 2);
}
- ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
+ ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(q->lrc[0]));
ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
/*
@@ -1170,9 +1179,12 @@ trigger_reset:
process_name = q->vm->xef->process_name;
pid = q->vm->xef->pid;
}
- xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
- xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
- q->guc->id, q->flags, process_name, pid);
+
+ if (!exec_queue_killed(q))
+ xe_gt_notice(guc_to_gt(guc),
+ "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id, q->flags, process_name, pid);
trace_xe_sched_job_timedout(job);
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index 63bac64429a5..1fde7614fcc5 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -89,6 +89,11 @@ struct xe_guc {
struct mutex lock;
/** @submission_state.enabled: submission is enabled */
bool enabled;
+ /**
+ * @submission_state.initialized: mark when submission state is
+ * even initialized - before that not even the lock is valid
+ */
+ bool initialized;
/** @submission_state.fini_wq: submit fini wait queue */
wait_queue_head_t fini_wq;
} submission_state;
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
index c3cc0fa105e8..57b71956ddf4 100644
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -19,29 +19,6 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end)
return (end - start) >> PAGE_SHIFT;
}
-/**
- * xe_mark_range_accessed() - mark a range is accessed, so core mm
- * have such information for memory eviction or write back to
- * hard disk
- * @range: the range to mark
- * @write: if write to this range, we mark pages in this range
- * as dirty
- */
-static void xe_mark_range_accessed(struct hmm_range *range, bool write)
-{
- struct page *page;
- u64 i, npages;
-
- npages = xe_npages_in_range(range->start, range->end);
- for (i = 0; i < npages; i++) {
- page = hmm_pfn_to_page(range->hmm_pfns[i]);
- if (write)
- set_page_dirty_lock(page);
-
- mark_page_accessed(page);
- }
-}
-
static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
struct hmm_range *range, struct rw_semaphore *notifier_sem)
{
@@ -331,7 +308,6 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
if (ret)
goto out_unlock;
- xe_mark_range_accessed(&hmm_range, write);
userptr->sg = &userptr->sgt;
xe_hmm_userptr_set_mapped(uvma);
userptr->notifier_seq = hmm_range.notifier_seq;
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 8c05fd30b7df..93241fd0a4ba 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -389,12 +389,6 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
blit_cctl_val,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
- /* Use Fixed slice CCS mode */
- { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
- XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
- XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
- RCU_MODE_FIXED_SLICE_CCS_MODE))
- },
/* Disable WMTP if HW doesn't support it */
{ XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
@@ -461,6 +455,12 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
+ /* Use Fixed slice CCS mode */
+ { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
+ XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
+ XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
+ RCU_MODE_FIXED_SLICE_CCS_MODE))
+ },
};
xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
index b53e8d2accdb..640950172088 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
@@ -32,14 +32,61 @@ bool xe_hw_engine_timeout_in_range(u64 timeout, u64 min, u64 max)
return timeout >= min && timeout <= max;
}
-static void kobj_xe_hw_engine_release(struct kobject *kobj)
+static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
{
kfree(kobj);
}
+static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->show) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->show(kobj, kattr, buf);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->store) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->store(kobj, kattr, buf, count);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
+ .show = xe_hw_engine_class_sysfs_attr_show,
+ .store = xe_hw_engine_class_sysfs_attr_store,
+};
+
static const struct kobj_type kobj_xe_hw_engine_type = {
- .release = kobj_xe_hw_engine_release,
- .sysfs_ops = &kobj_sysfs_ops
+ .release = xe_hw_engine_sysfs_kobj_release,
+ .sysfs_ops = &xe_hw_engine_class_sysfs_ops,
+};
+
+static const struct kobj_type kobj_xe_hw_engine_type_def = {
+ .release = xe_hw_engine_sysfs_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
};
static ssize_t job_timeout_max_store(struct kobject *kobj,
@@ -543,7 +590,7 @@ static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
if (!kobj)
return -ENOMEM;
- kobject_init(kobj, &kobj_xe_hw_engine_type);
+ kobject_init(kobj, &kobj_xe_hw_engine_type_def);
err = kobject_add(kobj, parent, "%s", ".defaults");
if (err)
goto err_object;
@@ -558,58 +605,8 @@ err_object:
kobject_put(kobj);
return err;
}
+ALLOW_ERROR_INJECTION(xe_add_hw_engine_class_defaults, ERRNO); /* See xe_pci_probe() */
-static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
-{
- kfree(kobj);
-}
-
-static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct xe_device *xe = kobj_to_xe(kobj);
- struct kobj_attribute *kattr;
- ssize_t ret = -EIO;
-
- kattr = container_of(attr, struct kobj_attribute, attr);
- if (kattr->show) {
- xe_pm_runtime_get(xe);
- ret = kattr->show(kobj, kattr, buf);
- xe_pm_runtime_put(xe);
- }
-
- return ret;
-}
-
-static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t count)
-{
- struct xe_device *xe = kobj_to_xe(kobj);
- struct kobj_attribute *kattr;
- ssize_t ret = -EIO;
-
- kattr = container_of(attr, struct kobj_attribute, attr);
- if (kattr->store) {
- xe_pm_runtime_get(xe);
- ret = kattr->store(kobj, kattr, buf, count);
- xe_pm_runtime_put(xe);
- }
-
- return ret;
-}
-
-static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
- .show = xe_hw_engine_class_sysfs_attr_show,
- .store = xe_hw_engine_class_sysfs_attr_store,
-};
-
-static const struct kobj_type xe_hw_engine_sysfs_kobj_type = {
- .release = xe_hw_engine_sysfs_kobj_release,
- .sysfs_ops = &xe_hw_engine_class_sysfs_ops,
-};
static void hw_engine_class_sysfs_fini(void *arg)
{
@@ -640,7 +637,7 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt)
if (!kobj)
return -ENOMEM;
- kobject_init(kobj, &xe_hw_engine_sysfs_kobj_type);
+ kobject_init(kobj, &kobj_xe_hw_engine_type);
err = kobject_add(kobj, gt->sysfs, "engines");
if (err)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 48d80ffdf7bb..eb293aec36a0 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -5,6 +5,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
+#include <linux/jiffies.h>
#include <linux/types.h>
#include <linux/units.h>
@@ -27,6 +28,7 @@ enum xe_hwmon_reg {
REG_PKG_POWER_SKU_UNIT,
REG_GT_PERF_STATUS,
REG_PKG_ENERGY_STATUS,
+ REG_FAN_SPEED,
};
enum xe_hwmon_reg_operation {
@@ -42,6 +44,13 @@ enum xe_hwmon_channel {
CHANNEL_MAX,
};
+enum xe_fan_channel {
+ FAN_1,
+ FAN_2,
+ FAN_3,
+ FAN_MAX,
+};
+
/*
* SF_* - scale factors for particular quantities according to hwmon spec.
*/
@@ -62,6 +71,16 @@ struct xe_hwmon_energy_info {
};
/**
+ * struct xe_hwmon_fan_info - to cache previous fan reading
+ */
+struct xe_hwmon_fan_info {
+ /** @reg_val_prev: previous fan reg val */
+ u32 reg_val_prev;
+ /** @time_prev: previous timestamp */
+ u64 time_prev;
+};
+
+/**
* struct xe_hwmon - xe hwmon data structure
*/
struct xe_hwmon {
@@ -79,6 +98,8 @@ struct xe_hwmon {
int scl_shift_time;
/** @ei: Energy info for energyN_input */
struct xe_hwmon_energy_info ei[CHANNEL_MAX];
+ /** @fi: Fan info for fanN_input */
+ struct xe_hwmon_fan_info fi[FAN_MAX];
};
static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
@@ -144,6 +165,14 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
return PCU_CR_PACKAGE_ENERGY_STATUS;
}
break;
+ case REG_FAN_SPEED:
+ if (channel == FAN_1)
+ return BMG_FAN_1_SPEED;
+ else if (channel == FAN_2)
+ return BMG_FAN_2_SPEED;
+ else if (channel == FAN_3)
+ return BMG_FAN_3_SPEED;
+ break;
default:
drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
break;
@@ -454,6 +483,7 @@ static const struct hwmon_channel_info * const hwmon_info[] = {
HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT, HWMON_F_INPUT),
NULL
};
@@ -480,6 +510,19 @@ static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval)
(uval & POWER_SETUP_I1_DATA_MASK));
}
+static int xe_hwmon_pcode_read_fan_control(const struct xe_hwmon *hwmon, u32 subcmd, u32 *uval)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+
+ /* Platforms that don't return correct value */
+ if (hwmon->xe->info.platform == XE_DG2 && subcmd == FSC_READ_NUM_FANS) {
+ *uval = 2;
+ return 0;
+ }
+
+ return xe_pcode_read(root_tile, PCODE_MBOX(FAN_SPEED_CONTROL, subcmd, 0), uval, NULL);
+}
+
static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
long *value, u32 scale_factor)
{
@@ -706,6 +749,75 @@ xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
}
static umode_t
+xe_hwmon_fan_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
+{
+ u32 uval;
+
+ if (!hwmon->xe->info.has_fan_control)
+ return 0;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ if (xe_hwmon_pcode_read_fan_control(hwmon, FSC_READ_NUM_FANS, &uval))
+ return 0;
+
+ return channel < uval ? 0444 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int
+xe_hwmon_fan_input_read(struct xe_hwmon *hwmon, int channel, long *val)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
+ struct xe_hwmon_fan_info *fi = &hwmon->fi[channel];
+ u64 rotations, time_now, time;
+ u32 reg_val;
+ int ret = 0;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_FAN_SPEED, channel));
+ time_now = get_jiffies_64();
+
+ /*
+ * HW register value is accumulated count of pulses from PWM fan with the scale
+ * of 2 pulses per rotation.
+ */
+ rotations = (reg_val - fi->reg_val_prev) / 2;
+
+ time = jiffies_delta_to_msecs(time_now - fi->time_prev);
+ if (unlikely(!time)) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ /*
+ * Calculate fan speed in RPM by time averaging two subsequent readings in minutes.
+ * RPM = number of rotations * msecs per minute / time in msecs
+ */
+ *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time);
+
+ fi->reg_val_prev = reg_val;
+ fi->time_prev = time_now;
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
+static int
+xe_hwmon_fan_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ return xe_hwmon_fan_input_read(hwmon, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t
xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -730,6 +842,9 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
case hwmon_energy:
ret = xe_hwmon_energy_is_visible(hwmon, attr, channel);
break;
+ case hwmon_fan:
+ ret = xe_hwmon_fan_is_visible(hwmon, attr, channel);
+ break;
default:
ret = 0;
break;
@@ -765,6 +880,9 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
case hwmon_energy:
ret = xe_hwmon_energy_read(hwmon, attr, channel, val);
break;
+ case hwmon_fan:
+ ret = xe_hwmon_fan_read(hwmon, attr, channel, val);
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -842,7 +960,7 @@ static void
xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
- long energy;
+ long energy, fan_speed;
u64 val_sku_unit = 0;
int channel;
struct xe_reg pkg_power_sku_unit;
@@ -866,6 +984,11 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
for (channel = 0; channel < CHANNEL_MAX; channel++)
if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel))
xe_hwmon_energy_get(hwmon, channel, &energy);
+
+ /* Initialize 'struct xe_hwmon_fan_info' with initial fan register reading. */
+ for (channel = 0; channel < FAN_MAX; channel++)
+ if (xe_hwmon_is_visible(hwmon, hwmon_fan, hwmon_fan_input, channel))
+ xe_hwmon_fan_input_read(hwmon, channel, &fan_speed);
}
static void xe_hwmon_mutex_destroy(void *arg)
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 89393dcb53d9..63db66df064b 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -71,7 +71,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
lmtt->ops->lmtt_pte_num(level)),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
- XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_NEEDS_64K);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index df3ceddede07..61a2e87990a9 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -24,6 +24,7 @@
#include "xe_hw_fence.h"
#include "xe_map.h"
#include "xe_memirq.h"
+#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_trace_lrc.h"
#include "xe_vm.h"
@@ -37,6 +38,7 @@
#define LRC_ENGINE_CLASS GENMASK_ULL(63, 61)
#define LRC_ENGINE_INSTANCE GENMASK_ULL(53, 48)
+#define LRC_PPHWSP_SIZE SZ_4K
#define LRC_INDIRECT_RING_STATE_SIZE SZ_4K
static struct xe_device *
@@ -50,19 +52,22 @@ size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
struct xe_device *xe = gt_to_xe(gt);
size_t size;
+ /* Per-process HW status page (PPHWSP) */
+ size = LRC_PPHWSP_SIZE;
+
+ /* Engine context image */
switch (class) {
case XE_ENGINE_CLASS_RENDER:
if (GRAPHICS_VER(xe) >= 20)
- size = 4 * SZ_4K;
+ size += 3 * SZ_4K;
else
- size = 14 * SZ_4K;
+ size += 13 * SZ_4K;
break;
case XE_ENGINE_CLASS_COMPUTE:
- /* 14 pages since graphics_ver == 11 */
if (GRAPHICS_VER(xe) >= 20)
- size = 3 * SZ_4K;
+ size += 2 * SZ_4K;
else
- size = 14 * SZ_4K;
+ size += 13 * SZ_4K;
break;
default:
WARN(1, "Unknown engine class: %d", class);
@@ -71,7 +76,7 @@ size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
case XE_ENGINE_CLASS_VIDEO_DECODE:
case XE_ENGINE_CLASS_VIDEO_ENHANCE:
case XE_ENGINE_CLASS_OTHER:
- size = 2 * SZ_4K;
+ size += 1 * SZ_4K;
}
/* Add indirect ring state page */
@@ -650,7 +655,7 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc)
#define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_PARALLEL_PPHWSP_OFFSET 2048
-#define LRC_PPHWSP_SIZE SZ_4K
+#define LRC_ENGINE_ID_PPHWSP_OFFSET 2096
u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
{
@@ -684,7 +689,7 @@ static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc)
static u32 __xe_lrc_ctx_job_timestamp_offset(struct xe_lrc *lrc)
{
- /* The start seqno is stored in the driver-defined portion of PPHWSP */
+ /* This is stored in the driver-defined portion of PPHWSP */
return xe_lrc_pphwsp_offset(lrc) + LRC_CTX_JOB_TIMESTAMP_OFFSET;
}
@@ -694,11 +699,21 @@ static inline u32 __xe_lrc_parallel_offset(struct xe_lrc *lrc)
return xe_lrc_pphwsp_offset(lrc) + LRC_PARALLEL_PPHWSP_OFFSET;
}
+static inline u32 __xe_lrc_engine_id_offset(struct xe_lrc *lrc)
+{
+ return xe_lrc_pphwsp_offset(lrc) + LRC_ENGINE_ID_PPHWSP_OFFSET;
+}
+
static u32 __xe_lrc_ctx_timestamp_offset(struct xe_lrc *lrc)
{
return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP * sizeof(u32);
}
+static u32 __xe_lrc_ctx_timestamp_udw_offset(struct xe_lrc *lrc)
+{
+ return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP_UDW * sizeof(u32);
+}
+
static inline u32 __xe_lrc_indirect_ring_offset(struct xe_lrc *lrc)
{
/* Indirect ring state page is at the very end of LRC */
@@ -726,8 +741,10 @@ DECL_MAP_ADDR_HELPERS(regs)
DECL_MAP_ADDR_HELPERS(start_seqno)
DECL_MAP_ADDR_HELPERS(ctx_job_timestamp)
DECL_MAP_ADDR_HELPERS(ctx_timestamp)
+DECL_MAP_ADDR_HELPERS(ctx_timestamp_udw)
DECL_MAP_ADDR_HELPERS(parallel)
DECL_MAP_ADDR_HELPERS(indirect_ring)
+DECL_MAP_ADDR_HELPERS(engine_id)
#undef DECL_MAP_ADDR_HELPERS
@@ -743,18 +760,37 @@ u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc)
}
/**
+ * xe_lrc_ctx_timestamp_udw_ggtt_addr() - Get ctx timestamp udw GGTT address
+ * @lrc: Pointer to the lrc.
+ *
+ * Returns: ctx timestamp udw GGTT address
+ */
+u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc)
+{
+ return __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc);
+}
+
+/**
* xe_lrc_ctx_timestamp() - Read ctx timestamp value
* @lrc: Pointer to the lrc.
*
* Returns: ctx timestamp value
*/
-u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
+u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
{
struct xe_device *xe = lrc_to_xe(lrc);
struct iosys_map map;
+ u32 ldw, udw = 0;
map = __xe_lrc_ctx_timestamp_map(lrc);
- return xe_map_read32(xe, &map);
+ ldw = xe_map_read32(xe, &map);
+
+ if (xe->info.has_64bit_timestamp) {
+ map = __xe_lrc_ctx_timestamp_udw_map(lrc);
+ udw = xe_map_read32(xe, &map);
+ }
+
+ return (u64)udw << 32 | ldw;
}
/**
@@ -864,7 +900,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe)
static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
{
- u64 desc = xe_vm_pdp4_descriptor(vm, lrc->tile);
+ u64 desc = xe_vm_pdp4_descriptor(vm, gt_to_tile(lrc->gt));
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_UDW, upper_32_bits(desc));
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_LDW, lower_32_bits(desc));
@@ -877,6 +913,65 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
xe_bo_unpin(lrc->bo);
xe_bo_unlock(lrc->bo);
xe_bo_put(lrc->bo);
+ xe_bo_unpin_map_no_vm(lrc->bb_per_ctx_bo);
+}
+
+/*
+ * xe_lrc_setup_utilization() - Setup wa bb to assist in calculating active
+ * context run ticks.
+ * @lrc: Pointer to the lrc.
+ *
+ * Context Timestamp (CTX_TIMESTAMP) in the LRC accumulates the run ticks of the
+ * context, but only gets updated when the context switches out. In order to
+ * check how long a context has been active before it switches out, two things
+ * are required:
+ *
+ * (1) Determine if the context is running:
+ * To do so, we program the WA BB to set an initial value for CTX_TIMESTAMP in
+ * the LRC. The value chosen is 1 since 0 is the initial value when the LRC is
+ * initialized. During a query, we just check for this value to determine if the
+ * context is active. If the context switched out, it would overwrite this
+ * location with the actual CTX_TIMESTAMP MMIO value. Note that WA BB runs as
+ * the last part of context restore, so reusing this LRC location will not
+ * clobber anything.
+ *
+ * (2) Calculate the time that the context has been active for:
+ * The CTX_TIMESTAMP ticks only when the context is active. If a context is
+ * active, we just use the CTX_TIMESTAMP MMIO as the new value of utilization.
+ * While doing so, we need to read the CTX_TIMESTAMP MMIO for the specific
+ * engine instance. Since we do not know which instance the context is running
+ * on until it is scheduled, we also read the ENGINE_ID MMIO in the WA BB and
+ * store it in the PPHSWP.
+ */
+#define CONTEXT_ACTIVE 1ULL
+static void xe_lrc_setup_utilization(struct xe_lrc *lrc)
+{
+ u32 *cmd;
+
+ cmd = lrc->bb_per_ctx_bo->vmap.vaddr;
+
+ *cmd++ = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
+ *cmd++ = ENGINE_ID(0).addr;
+ *cmd++ = __xe_lrc_engine_id_ggtt_addr(lrc);
+ *cmd++ = 0;
+
+ *cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
+ *cmd++ = __xe_lrc_ctx_timestamp_ggtt_addr(lrc);
+ *cmd++ = 0;
+ *cmd++ = lower_32_bits(CONTEXT_ACTIVE);
+
+ if (lrc_to_xe(lrc)->info.has_64bit_timestamp) {
+ *cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
+ *cmd++ = __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc);
+ *cmd++ = 0;
+ *cmd++ = upper_32_bits(CONTEXT_ACTIVE);
+ }
+
+ *cmd++ = MI_BATCH_BUFFER_END;
+
+ xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR,
+ xe_bo_ggtt_addr(lrc->bb_per_ctx_bo) | 1);
+
}
#define PVC_CTX_ASID (0x2e + 1)
@@ -893,31 +988,42 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
void *init_data = NULL;
u32 arb_enable;
u32 lrc_size;
+ u32 bo_flags;
int err;
kref_init(&lrc->refcount);
+ lrc->gt = gt;
lrc->flags = 0;
lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class);
if (xe_gt_has_indirect_ring_state(gt))
lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE;
+ bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE;
+ if (vm && vm->xef) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
/*
* FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
* via VM bind calls.
*/
lrc->bo = xe_bo_create_pin_map(xe, tile, vm, lrc_size,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ bo_flags);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
+ lrc->bb_per_ctx_bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
+ ttm_bo_type_kernel,
+ bo_flags);
+ if (IS_ERR(lrc->bb_per_ctx_bo)) {
+ err = PTR_ERR(lrc->bb_per_ctx_bo);
+ goto err_lrc_finish;
+ }
+
lrc->size = lrc_size;
- lrc->tile = gt_to_tile(hwe->gt);
lrc->ring.size = ring_size;
lrc->ring.tail = 0;
- lrc->ctx_timestamp = 0;
xe_hw_fence_ctx_init(&lrc->fence_ctx, hwe->gt,
hwe->fence_irq, hwe->name);
@@ -990,7 +1096,10 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
_MASKED_BIT_ENABLE(CTX_CTRL_PXP_ENABLE));
+ lrc->ctx_timestamp = 0;
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0);
+ if (lrc_to_xe(lrc)->info.has_64bit_timestamp)
+ xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP_UDW, 0);
if (xe->info.has_asid && vm)
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
@@ -1019,6 +1128,8 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
map = __xe_lrc_start_seqno_map(lrc);
xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
+ xe_lrc_setup_utilization(lrc);
+
return 0;
err_lrc_finish:
@@ -1238,6 +1349,21 @@ struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc)
return __xe_lrc_parallel_map(lrc);
}
+/**
+ * xe_lrc_engine_id() - Read engine id value
+ * @lrc: Pointer to the lrc.
+ *
+ * Returns: context id value
+ */
+static u32 xe_lrc_engine_id(struct xe_lrc *lrc)
+{
+ struct xe_device *xe = lrc_to_xe(lrc);
+ struct iosys_map map;
+
+ map = __xe_lrc_engine_id_map(lrc);
+ return xe_map_read32(xe, &map);
+}
+
static int instr_dw(u32 cmd_header)
{
/* GFXPIPE "SINGLE_DW" opcodes are a single dword */
@@ -1445,6 +1571,7 @@ static int dump_gfxpipe_command(struct drm_printer *p,
MATCH3D(3DSTATE_CLIP_MESH);
MATCH3D(3DSTATE_SBE_MESH);
MATCH3D(3DSTATE_CPSIZE_CONTROL_BUFFER);
+ MATCH3D(3DSTATE_COARSE_PIXEL);
MATCH3D(3DSTATE_DRAWING_RECTANGLE);
MATCH3D(3DSTATE_CHROMA_KEY);
@@ -1684,7 +1811,7 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc);
snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset;
snapshot->lrc_snapshot = NULL;
- snapshot->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
+ snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc));
snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc);
return snapshot;
}
@@ -1784,22 +1911,74 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot)
kfree(snapshot);
}
+static int get_ctx_timestamp(struct xe_lrc *lrc, u32 engine_id, u64 *reg_ctx_ts)
+{
+ u16 class = REG_FIELD_GET(ENGINE_CLASS_ID, engine_id);
+ u16 instance = REG_FIELD_GET(ENGINE_INSTANCE_ID, engine_id);
+ struct xe_hw_engine *hwe;
+ u64 val;
+
+ hwe = xe_gt_hw_engine(lrc->gt, class, instance, false);
+ if (xe_gt_WARN_ONCE(lrc->gt, !hwe || xe_hw_engine_is_reserved(hwe),
+ "Unexpected engine class:instance %d:%d for context utilization\n",
+ class, instance))
+ return -1;
+
+ if (lrc_to_xe(lrc)->info.has_64bit_timestamp)
+ val = xe_mmio_read64_2x32(&hwe->gt->mmio,
+ RING_CTX_TIMESTAMP(hwe->mmio_base));
+ else
+ val = xe_mmio_read32(&hwe->gt->mmio,
+ RING_CTX_TIMESTAMP(hwe->mmio_base));
+
+ *reg_ctx_ts = val;
+
+ return 0;
+}
+
/**
* xe_lrc_update_timestamp() - Update ctx timestamp
* @lrc: Pointer to the lrc.
* @old_ts: Old timestamp value
*
* Populate @old_ts current saved ctx timestamp, read new ctx timestamp and
- * update saved value.
+ * update saved value. With support for active contexts, the calculation may be
+ * slightly racy, so follow a read-again logic to ensure that the context is
+ * still active before returning the right timestamp.
*
* Returns: New ctx timestamp value
*/
-u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts)
+u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts)
{
+ u64 lrc_ts, reg_ts;
+ u32 engine_id;
+
*old_ts = lrc->ctx_timestamp;
- lrc->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
+ lrc_ts = xe_lrc_ctx_timestamp(lrc);
+ /* CTX_TIMESTAMP mmio read is invalid on VF, so return the LRC value */
+ if (IS_SRIOV_VF(lrc_to_xe(lrc))) {
+ lrc->ctx_timestamp = lrc_ts;
+ goto done;
+ }
+
+ if (lrc_ts == CONTEXT_ACTIVE) {
+ engine_id = xe_lrc_engine_id(lrc);
+ if (!get_ctx_timestamp(lrc, engine_id, &reg_ts))
+ lrc->ctx_timestamp = reg_ts;
+
+ /* read lrc again to ensure context is still active */
+ lrc_ts = xe_lrc_ctx_timestamp(lrc);
+ }
+
+ /*
+ * If context switched out, just use the lrc_ts. Note that this needs to
+ * be a separate if condition.
+ */
+ if (lrc_ts != CONTEXT_ACTIVE)
+ lrc->ctx_timestamp = lrc_ts;
+done:
trace_xe_lrc_update_timestamp(lrc, *old_ts);
return lrc->ctx_timestamp;
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index 0b40f349ab95..eb6e8de8c939 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -120,7 +120,8 @@ void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer
void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot);
u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc);
-u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
+u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc);
+u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
@@ -136,6 +137,6 @@ u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
*
* Returns the current LRC timestamp
*/
-u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts);
+u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts);
#endif
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
index 71ecb453f811..ae24cf6f8dd9 100644
--- a/drivers/gpu/drm/xe/xe_lrc_types.h
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -25,8 +25,8 @@ struct xe_lrc {
/** @size: size of lrc including any indirect ring state page */
u32 size;
- /** @tile: tile which this LRC belongs to */
- struct xe_tile *tile;
+ /** @gt: gt which this LRC belongs to */
+ struct xe_gt *gt;
/** @flags: LRC flags */
#define XE_LRC_FLAG_INDIRECT_RING_STATE 0x1
@@ -52,7 +52,10 @@ struct xe_lrc {
struct xe_hw_fence_ctx fence_ctx;
/** @ctx_timestamp: readout value of CTX_TIMESTAMP on last update */
- u32 ctx_timestamp;
+ u64 ctx_timestamp;
+
+ /** @bb_per_ctx_bo: buffer object for per context batch wa buffer */
+ struct xe_bo *bb_per_ctx_bo;
};
struct xe_lrc_snapshot;
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
index 404fa2a456d5..49c45ec3e83c 100644
--- a/drivers/gpu/drm/xe/xe_memirq.c
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -86,7 +86,7 @@ static const char *guc_name(struct xe_guc *guc)
* This object needs to be 4KiB aligned.
*
* - _`Interrupt Source Report Page`: this is the equivalent of the
- * GEN11_GT_INTR_DWx registers, with each bit in those registers being
+ * GT_INTR_DWx registers, with each bit in those registers being
* mapped to a byte here. The offsets are the same, just bytes instead
* of bits. This object needs to be cacheline aligned.
*
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index df4282c71bf0..8f8e9fdfb2a8 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -97,7 +97,7 @@ struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
return tile->migrate->q;
}
-static void xe_migrate_fini(struct drm_device *dev, void *arg)
+static void xe_migrate_fini(void *arg)
{
struct xe_migrate *m = arg;
@@ -209,7 +209,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED |
XE_BO_FLAG_PAGETABLE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -401,7 +400,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
struct xe_vm *vm;
int err;
- m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
+ m = devm_kzalloc(xe->drm.dev, sizeof(*m), GFP_KERNEL);
if (!m)
return ERR_PTR(-ENOMEM);
@@ -455,7 +454,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
might_lock(&m->job_mutex);
fs_reclaim_release(GFP_KERNEL);
- err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
+ err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
if (err)
return ERR_PTR(err);
@@ -670,6 +669,7 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
u32 mocs = 0;
u32 tile_y = 0;
+ xe_gt_assert(gt, !(pitch & 3));
xe_gt_assert(gt, size / pitch <= S16_MAX);
xe_gt_assert(gt, pitch / 4 <= S16_MAX);
xe_gt_assert(gt, pitch <= U16_MAX);
@@ -779,10 +779,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
bool dst_is_pltt = dst->mem_type == XE_PL_TT;
bool src_is_vram = mem_type_is_vram(src->mem_type);
bool dst_is_vram = mem_type_is_vram(dst->mem_type);
+ bool type_device = src_bo->ttm.type == ttm_bo_type_device;
+ bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe);
bool copy_ccs = xe_device_has_flat_ccs(xe) &&
xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
- bool use_comp_pat = xe_device_has_flat_ccs(xe) &&
+ bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) &&
GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
/* Copying CCS between two different BOs is not supported yet. */
@@ -839,6 +841,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
avail_pts, avail_pts);
if (copy_system_ccs) {
+ xe_assert(xe, type_device);
ccs_size = xe_device_ccs_bytes(xe, src_L0);
batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
&ccs_ofs, &ccs_pt, 0,
@@ -849,7 +852,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
/* Add copy commands size here */
batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
- ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0));
+ ((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0));
bb = xe_bb_new(gt, batch_size, usm);
if (IS_ERR(bb)) {
@@ -878,7 +881,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (!copy_only_ccs)
emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
- if (xe_migrate_needs_ccs_emit(xe))
+ if (needs_ccs_emit)
flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
IS_DGFX(xe) ? src_is_vram : src_is_pltt,
dst_L0_ofs,
@@ -1177,7 +1180,7 @@ err:
err_sync:
/* Sync partial copies if any. FIXME: job_mutex? */
if (fence) {
- dma_fence_wait(m->fence, false);
+ dma_fence_wait(fence, false);
dma_fence_put(fence);
}
@@ -1547,7 +1550,7 @@ void xe_migrate_wait(struct xe_migrate *m)
static u32 pte_update_cmd_size(u64 size)
{
u32 num_dword;
- u64 entries = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+ u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE);
XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
/*
@@ -1558,7 +1561,7 @@ static u32 pte_update_cmd_size(u64 size)
* 2 dword for the page table's physical location
* 2*n dword for value of pte to fill (each pte entry is 2 dwords)
*/
- num_dword = (1 + 2) * DIV_ROUND_UP(entries, 0x1ff);
+ num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, 0x1ff);
num_dword += entries * 2;
return num_dword;
@@ -1601,55 +1604,63 @@ enum xe_migrate_copy_dir {
XE_MIGRATE_COPY_TO_SRAM,
};
+#define XE_CACHELINE_BYTES 64ull
+#define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1)
+
static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
- unsigned long npages,
+ unsigned long len,
+ unsigned long sram_offset,
dma_addr_t *sram_addr, u64 vram_addr,
const enum xe_migrate_copy_dir dir)
{
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
+ bool use_usm_batch = xe->info.has_usm;
struct dma_fence *fence = NULL;
u32 batch_size = 2;
u64 src_L0_ofs, dst_L0_ofs;
- u64 round_update_size;
struct xe_sched_job *job;
struct xe_bb *bb;
u32 update_idx, pt_slot = 0;
+ unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
+ unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ?
+ PAGE_SIZE : 4;
int err;
- if (npages * PAGE_SIZE > MAX_PREEMPTDISABLE_TRANSFER)
- return ERR_PTR(-EINVAL);
+ if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
+ (sram_offset | vram_addr) & XE_CACHELINE_MASK))
+ return ERR_PTR(-EOPNOTSUPP);
- round_update_size = npages * PAGE_SIZE;
- batch_size += pte_update_cmd_size(round_update_size);
+ xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
+
+ batch_size += pte_update_cmd_size(len);
batch_size += EMIT_COPY_DW;
- bb = xe_bb_new(gt, batch_size, true);
+ bb = xe_bb_new(gt, batch_size, use_usm_batch);
if (IS_ERR(bb)) {
err = PTR_ERR(bb);
return ERR_PTR(err);
}
build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
- sram_addr, round_update_size);
+ sram_addr, len + sram_offset);
if (dir == XE_MIGRATE_COPY_TO_VRAM) {
- src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
+ src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
} else {
src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
- dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
+ dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
}
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, round_update_size,
- XE_PAGE_SIZE);
+ emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
job = xe_bb_create_migration_job(m->q, bb,
- xe_migrate_batch_base(m, true),
+ xe_migrate_batch_base(m, use_usm_batch),
update_idx);
if (IS_ERR(job)) {
err = PTR_ERR(job);
@@ -1694,7 +1705,7 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
dma_addr_t *src_addr,
u64 dst_addr)
{
- return xe_migrate_vram(m, npages, src_addr, dst_addr,
+ return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
XE_MIGRATE_COPY_TO_VRAM);
}
@@ -1715,10 +1726,193 @@ struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
u64 src_addr,
dma_addr_t *dst_addr)
{
- return xe_migrate_vram(m, npages, dst_addr, src_addr,
+ return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
XE_MIGRATE_COPY_TO_SRAM);
}
+static void xe_migrate_dma_unmap(struct xe_device *xe, dma_addr_t *dma_addr,
+ int len, int write)
+{
+ unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ for (i = 0; i < npages; ++i) {
+ if (!dma_addr[i])
+ break;
+
+ dma_unmap_page(xe->drm.dev, dma_addr[i], PAGE_SIZE,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ }
+ kfree(dma_addr);
+}
+
+static dma_addr_t *xe_migrate_dma_map(struct xe_device *xe,
+ void *buf, int len, int write)
+{
+ dma_addr_t *dma_addr;
+ unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ dma_addr = kcalloc(npages, sizeof(*dma_addr), GFP_KERNEL);
+ if (!dma_addr)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < npages; ++i) {
+ dma_addr_t addr;
+ struct page *page;
+
+ if (is_vmalloc_addr(buf))
+ page = vmalloc_to_page(buf);
+ else
+ page = virt_to_page(buf);
+
+ addr = dma_map_page(xe->drm.dev,
+ page, 0, PAGE_SIZE,
+ write ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(xe->drm.dev, addr))
+ goto err_fault;
+
+ dma_addr[i] = addr;
+ buf += PAGE_SIZE;
+ }
+
+ return dma_addr;
+
+err_fault:
+ xe_migrate_dma_unmap(xe, dma_addr, len, write);
+ return ERR_PTR(-EFAULT);
+}
+
+/**
+ * xe_migrate_access_memory - Access memory of a BO via GPU
+ *
+ * @m: The migration context.
+ * @bo: buffer object
+ * @offset: access offset into buffer object
+ * @buf: pointer to caller memory to read into or write from
+ * @len: length of access
+ * @write: write access
+ *
+ * Access memory of a BO via GPU either reading in or writing from a passed in
+ * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to
+ * read to or write from pointer.
+ *
+ * Returns:
+ * 0 if successful, negative error code on failure.
+ */
+int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
+ unsigned long offset, void *buf, int len,
+ int write)
+{
+ struct xe_tile *tile = m->tile;
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_res_cursor cursor;
+ struct dma_fence *fence = NULL;
+ dma_addr_t *dma_addr;
+ unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
+ int bytes_left = len, current_page = 0;
+ void *orig_buf = buf;
+
+ xe_bo_assert_held(bo);
+
+ /* Use bounce buffer for small access and unaligned access */
+ if (len & XE_CACHELINE_MASK ||
+ ((uintptr_t)buf | offset) & XE_CACHELINE_MASK) {
+ int buf_offset = 0;
+
+ /*
+ * Less than ideal for large unaligned access but this should be
+ * fairly rare, can fixup if this becomes common.
+ */
+ do {
+ u8 bounce[XE_CACHELINE_BYTES];
+ void *ptr = (void *)bounce;
+ int err;
+ int copy_bytes = min_t(int, bytes_left,
+ XE_CACHELINE_BYTES -
+ (offset & XE_CACHELINE_MASK));
+ int ptr_offset = offset & XE_CACHELINE_MASK;
+
+ err = xe_migrate_access_memory(m, bo,
+ offset &
+ ~XE_CACHELINE_MASK,
+ (void *)ptr,
+ sizeof(bounce), 0);
+ if (err)
+ return err;
+
+ if (write) {
+ memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);
+
+ err = xe_migrate_access_memory(m, bo,
+ offset & ~XE_CACHELINE_MASK,
+ (void *)ptr,
+ sizeof(bounce), 0);
+ if (err)
+ return err;
+ } else {
+ memcpy(buf + buf_offset, ptr + ptr_offset,
+ copy_bytes);
+ }
+
+ bytes_left -= copy_bytes;
+ buf_offset += copy_bytes;
+ offset += copy_bytes;
+ } while (bytes_left);
+
+ return 0;
+ }
+
+ dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
+ if (IS_ERR(dma_addr))
+ return PTR_ERR(dma_addr);
+
+ xe_res_first(bo->ttm.resource, offset, bo->size - offset, &cursor);
+
+ do {
+ struct dma_fence *__fence;
+ u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
+ cursor.start;
+ int current_bytes;
+
+ if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
+ current_bytes = min_t(int, bytes_left,
+ MAX_PREEMPTDISABLE_TRANSFER);
+ else
+ current_bytes = min_t(int, bytes_left, cursor.size);
+
+ if (fence)
+ dma_fence_put(fence);
+
+ __fence = xe_migrate_vram(m, current_bytes,
+ (unsigned long)buf & ~PAGE_MASK,
+ dma_addr + current_page,
+ vram_addr, write ?
+ XE_MIGRATE_COPY_TO_VRAM :
+ XE_MIGRATE_COPY_TO_SRAM);
+ if (IS_ERR(__fence)) {
+ if (fence)
+ dma_fence_wait(fence, false);
+ fence = __fence;
+ goto out_err;
+ }
+ fence = __fence;
+
+ buf += current_bytes;
+ offset += current_bytes;
+ current_page = (int)(buf - orig_buf) / PAGE_SIZE;
+ bytes_left -= current_bytes;
+ if (bytes_left)
+ xe_res_next(&cursor, current_bytes);
+ } while (bytes_left);
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+out_err:
+ xe_migrate_dma_unmap(xe, dma_addr, len + page_offset, write);
+ return IS_ERR(fence) ? PTR_ERR(fence) : 0;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_migrate.c"
#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 6ff9a963425c..fb9839c1bae0 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -112,6 +112,10 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct ttm_resource *dst,
bool copy_only_ccs);
+int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
+ unsigned long offset, void *buf, int len,
+ int write);
+
#define XE_MIGRATE_CLEAR_FLAG_BO_DATA BIT(0)
#define XE_MIGRATE_CLEAR_FLAG_CCS_DATA BIT(1)
#define XE_MIGRATE_CLEAR_FLAG_FULL (XE_MIGRATE_CLEAR_FLAG_BO_DATA | \
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 70a36e777546..7357458bc0d2 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -75,12 +75,12 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
* is fine as it's going to the root tile's mmio, that's
* guaranteed to be initialized earlier in xe_mmio_probe_early()
*/
- mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR);
+ mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
if (tile_count < xe->info.tile_count) {
drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
- xe->info.tile_count, tile_count);
+ xe->info.tile_count, tile_count);
xe->info.tile_count = tile_count;
/*
@@ -128,7 +128,7 @@ int xe_mmio_probe_early(struct xe_device *xe)
*/
xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR);
xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0);
- if (xe->mmio.regs == NULL) {
+ if (!xe->mmio.regs) {
drm_err(&xe->drm, "failed to map registers\n");
return -EIO;
}
@@ -138,6 +138,7 @@ int xe_mmio_probe_early(struct xe_device *xe)
return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
}
+ALLOW_ERROR_INJECTION(xe_mmio_probe_early, ERRNO); /* See xe_pci_probe() */
/**
* xe_mmio_init() - Initialize an MMIO instance
@@ -204,8 +205,9 @@ void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
- if (!reg.vf && mmio->sriov_vf_gt)
- xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val);
+ if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
+ xe_gt_sriov_vf_write32(mmio->sriov_vf_gt ?:
+ mmio->tile->primary_gt, reg, val);
else
writel(val, mmio->regs + addr);
}
@@ -218,8 +220,9 @@ u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
/* Wa_15015404425 */
mmio_flush_pending_writes(mmio);
- if (!reg.vf && mmio->sriov_vf_gt)
- val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg);
+ if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
+ val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt ?:
+ mmio->tile->primary_gt, reg);
else
val = readl(mmio->regs + addr);
@@ -309,8 +312,8 @@ u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
return (u64)udw << 32 | ldw;
}
-static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
- u32 *out_val, bool atomic, bool expect_match)
+static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
+ u32 timeout_us, u32 *out_val, bool atomic, bool expect_match)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us);
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index 31dade91a089..0c737413fcb6 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -775,22 +775,23 @@ void xe_mocs_init(struct xe_gt *gt)
void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
+ enum xe_force_wake_domains domain;
struct xe_mocs_info table;
unsigned int fw_ref, flags;
flags = get_mocs_settings(xe, &table);
+ domain = flags & HAS_LNCF_MOCS ? XE_FORCEWAKE_ALL : XE_FW_GT;
xe_pm_runtime_get_noresume(xe);
- fw_ref = xe_force_wake_get(gt_to_fw(gt),
- flags & HAS_LNCF_MOCS ?
- XE_FORCEWAKE_ALL : XE_FW_GT);
- if (!fw_ref)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), domain);
+
+ if (!xe_force_wake_ref_has_domain(fw_ref, domain))
goto err_fw;
table.ops->dump(&table, flags, gt, p);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
err_fw:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
}
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index 9f4632e39a1a..e4742e27e2cd 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -11,6 +11,7 @@
#include <drm/drm_module.h>
#include "xe_drv.h"
+#include "xe_configfs.h"
#include "xe_hw_fence.h"
#include "xe_pci.h"
#include "xe_pm.h"
@@ -29,17 +30,14 @@ struct xe_modparam xe_modparam = {
module_param_named(svm_notifier_size, xe_modparam.svm_notifier_size, uint, 0600);
MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size(in MiB), must be power of 2");
-module_param_named(always_migrate_to_vram, xe_modparam.always_migrate_to_vram, bool, 0444);
-MODULE_PARM_DESC(always_migrate_to_vram, "Always migrate to VRAM on GPU fault");
-
module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444);
MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
module_param_named(probe_display, xe_modparam.probe_display, bool, 0444);
MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched (default: true)");
-module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, uint, 0600);
-MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
+module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, int, 0600);
+MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size (in MiB) - <0=disable-resize, 0=max-needed-size[default], >0=force-size");
module_param_named(guc_log_level, xe_modparam.guc_log_level, int, 0600);
MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1..5=enable with verbosity min..max)");
@@ -89,6 +87,10 @@ static const struct init_funcs init_funcs[] = {
.init = xe_check_nomodeset,
},
{
+ .init = xe_configfs_init,
+ .exit = xe_configfs_exit,
+ },
+ {
.init = xe_hw_fence_module_init,
.exit = xe_hw_fence_module_exit,
},
diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
index 84339e509c80..5a3bfea8b7b4 100644
--- a/drivers/gpu/drm/xe/xe_module.h
+++ b/drivers/gpu/drm/xe/xe_module.h
@@ -12,7 +12,6 @@
struct xe_modparam {
bool force_execlist;
bool probe_display;
- bool always_migrate_to_vram;
u32 force_vram_bar_size;
int guc_log_level;
char *guc_firmware_path;
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 7ffc98f67e69..fb842fa0552e 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -1301,7 +1301,7 @@ static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_fr
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(oa->xe, err))
return -EFAULT;
@@ -1338,7 +1338,7 @@ static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from fro
if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(oa->xe, err))
return -EFAULT;
@@ -2221,6 +2221,7 @@ addr_err:
kfree(oa_regs);
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_oa_alloc_regs, ERRNO);
static ssize_t show_dynamic_id(struct kobject *kobj,
struct kobj_attribute *attr,
@@ -2280,7 +2281,7 @@ int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *fi
return -EACCES;
}
- err = __copy_from_user(&param, u64_to_user_ptr(data), sizeof(param));
+ err = copy_from_user(&param, u64_to_user_ptr(data), sizeof(param));
if (XE_IOCTL_DBG(oa->xe, err))
return -EFAULT;
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 818f023166d5..024175cfe61e 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -62,11 +62,13 @@ struct xe_device_desc {
u8 is_dgfx:1;
u8 has_display:1;
+ u8 has_fan_control:1;
u8 has_heci_gscfi:1;
u8 has_heci_cscfi:1;
u8 has_llc:1;
u8 has_pxp:1;
u8 has_sriov:1;
+ u8 needs_scratch:1;
u8 skip_guc_pc:1;
u8 skip_mtcfg:1;
u8 skip_pcode:1;
@@ -140,6 +142,7 @@ static const struct xe_graphics_desc graphics_xelpg = {
.has_indirect_ring_state = 1, \
.has_range_tlb_invalidation = 1, \
.has_usm = 1, \
+ .has_64bit_timestamp = 1, \
.va_bits = 48, \
.vm_max_level = 4, \
.hw_engine_mask = \
@@ -302,6 +305,7 @@ static const struct xe_device_desc dg2_desc = {
DG2_FEATURES,
.has_display = true,
+ .has_fan_control = true,
};
static const __maybe_unused struct xe_device_desc pvc_desc = {
@@ -329,6 +333,7 @@ static const struct xe_device_desc lnl_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_pxp = true,
+ .needs_scratch = true,
};
static const struct xe_device_desc bmg_desc = {
@@ -336,7 +341,9 @@ static const struct xe_device_desc bmg_desc = {
PLATFORM(BATTLEMAGE),
.dma_mask_size = 46,
.has_display = true,
+ .has_fan_control = true,
.has_heci_cscfi = 1,
+ .needs_scratch = true,
};
static const struct xe_device_desc ptl_desc = {
@@ -345,6 +352,7 @@ static const struct xe_device_desc ptl_desc = {
.has_display = true,
.has_sriov = true,
.require_force_probe = true,
+ .needs_scratch = true,
};
#undef PLATFORM
@@ -575,6 +583,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.dma_mask_size = desc->dma_mask_size;
xe->info.is_dgfx = desc->is_dgfx;
+ xe->info.has_fan_control = desc->has_fan_control;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
xe->info.has_heci_cscfi = desc->has_heci_cscfi;
xe->info.has_llc = desc->has_llc;
@@ -583,6 +592,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.skip_guc_pc = desc->skip_guc_pc;
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
+ xe->info.needs_scratch = desc->needs_scratch;
xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
xe_modparam.probe_display &&
@@ -668,6 +678,7 @@ static int xe_info_init(struct xe_device *xe,
xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
xe->info.has_usm = graphics_desc->has_usm;
+ xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp;
for_each_remote_tile(tile, xe, id) {
int err;
@@ -733,7 +744,7 @@ static void xe_pci_remove(struct pci_dev *pdev)
return;
xe_device_remove(xe);
- xe_pm_runtime_fini(xe);
+ xe_pm_fini(xe);
}
/*
@@ -803,18 +814,17 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
err = xe_device_probe_early(xe);
- if (err) {
- /*
- * In Boot Survivability mode, no drm card is exposed and driver
- * is loaded with bare minimum to allow for firmware to be
- * flashed through mei. If early probe failed, but it managed to
- * enable survivability mode, return success.
- */
- if (xe_survivability_mode_is_enabled(xe))
- return 0;
+ /*
+ * In Boot Survivability mode, no drm card is exposed and driver
+ * is loaded with bare minimum to allow for firmware to be
+ * flashed through mei. Return success, if survivability mode
+ * is enabled due to pcode failure or configfs being set
+ */
+ if (xe_survivability_mode_is_enabled(xe))
+ return 0;
+ if (err)
return err;
- }
err = xe_info_init(xe, desc);
if (err)
@@ -920,6 +930,7 @@ static int xe_pci_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
index 09ee8a06fe2e..8813efdcafbb 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -7,6 +7,8 @@
#include "xe_device.h"
#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc_engine_activity.h"
#include "xe_pci_sriov.h"
#include "xe_pm.h"
#include "xe_sriov.h"
@@ -111,6 +113,20 @@ static void pf_link_vfs(struct xe_device *xe, int num_vfs)
}
}
+static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs, bool enable)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret = 0;
+
+ for_each_gt(gt, xe, id) {
+ ret = xe_guc_engine_activity_function_stats(&gt->uc.guc, num_vfs, enable);
+ if (ret)
+ xe_gt_sriov_info(gt, "Failed to %s engine activity function stats (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(ret));
+ }
+}
+
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -145,6 +161,9 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
xe_sriov_info(xe, "Enabled %u of %u VF%s\n",
num_vfs, total_vfs, str_plural(total_vfs));
+
+ pf_engine_activity_stats(xe, num_vfs, true);
+
return num_vfs;
failed:
@@ -168,6 +187,8 @@ static int pf_disable_vfs(struct xe_device *xe)
if (!num_vfs)
return 0;
+ pf_engine_activity_stats(xe, num_vfs, false);
+
pci_disable_sriov(pdev);
pf_reset_vfs(xe, num_vfs);
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index e9b9bbc138d3..ca6b10d35573 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -21,6 +21,7 @@ struct xe_graphics_desc {
u8 has_indirect_ring_state:1;
u8 has_range_tlb_invalidation:1;
u8 has_usm:1;
+ u8 has_64bit_timestamp:1;
};
struct xe_media_desc {
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index 9333ce776a6e..cf955b3ed52c 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/error-injection.h>
#include <drm/drm_managed.h>
@@ -323,3 +324,4 @@ int xe_pcode_probe_early(struct xe_device *xe)
{
return xe_pcode_ready(xe, false);
}
+ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */
diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h
index 2bae9afdbd35..127d4d26c4cf 100644
--- a/drivers/gpu/drm/xe/xe_pcode_api.h
+++ b/drivers/gpu/drm/xe/xe_pcode_api.h
@@ -34,6 +34,7 @@
#define DGFX_PCODE_STATUS 0x7E
#define DGFX_GET_INIT_STATUS 0x0
#define DGFX_INIT_STATUS_COMPLETE 0x1
+#define DGFX_LINK_DOWNGRADE_STATUS REG_BIT(31)
#define PCODE_POWER_SETUP 0x7C
#define POWER_SETUP_SUBCOMMAND_READ_I1 0x4
@@ -49,6 +50,9 @@
/* Domain IDs (param2) */
#define PCODE_MBOX_DOMAIN_HBM 0x2
+#define FAN_SPEED_CONTROL 0x7D
+#define FSC_READ_NUM_FANS 0x4
+
#define PCODE_SCRATCH(x) XE_REG(0x138320 + ((x) * 4))
/* PCODE_SCRATCH0 */
#define AUXINFO_REG_OFFSET REG_GENMASK(17, 15)
@@ -63,6 +67,10 @@
/* Auxiliary info bits */
#define AUXINFO_HISTORY_OFFSET REG_GENMASK(31, 29)
+#define BMG_PCIE_CAP XE_REG(0x138340)
+#define LINK_DOWNGRADE REG_GENMASK(1, 0)
+#define DOWNGRADE_CAPABLE 2
+
struct pcode_err_decode {
int errno;
const char *str;
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 7b6b754ad6eb..ff749edc005b 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -16,7 +16,6 @@
#include "xe_bo.h"
#include "xe_bo_evict.h"
#include "xe_device.h"
-#include "xe_device_sysfs.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_guc.h"
@@ -188,7 +187,7 @@ int xe_pm_resume(struct xe_device *xe)
* This only restores pinned memory which is the memory required for the
* GT(s) to resume.
*/
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
if (err)
goto err;
@@ -199,7 +198,7 @@ int xe_pm_resume(struct xe_device *xe)
xe_display_pm_resume(xe);
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err)
goto err;
@@ -273,6 +272,7 @@ int xe_pm_init_early(struct xe_device *xe)
if (err)
return err;
+ xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
return 0;
}
ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
@@ -286,6 +286,42 @@ static u32 vram_threshold_value(struct xe_device *xe)
return DEFAULT_VRAM_THRESHOLD;
}
+static int xe_pm_notifier_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier);
+ int err = 0;
+
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ xe_pm_runtime_get(xe);
+ err = xe_bo_evict_all_user(xe);
+ if (err) {
+ drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
+ xe_pm_runtime_put(xe);
+ break;
+ }
+
+ err = xe_bo_notifier_prepare_all_pinned(xe);
+ if (err) {
+ drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
+ xe_pm_runtime_put(xe);
+ }
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ xe_bo_notifier_unprepare_all_pinned(xe);
+ xe_pm_runtime_put(xe);
+ break;
+ }
+
+ if (err)
+ return NOTIFY_BAD;
+
+ return NOTIFY_DONE;
+}
+
/**
* xe_pm_init - Initialize Xe Power Management
* @xe: xe device instance
@@ -299,33 +335,31 @@ int xe_pm_init(struct xe_device *xe)
u32 vram_threshold;
int err;
+ xe->pm_notifier.notifier_call = xe_pm_notifier_callback;
+ err = register_pm_notifier(&xe->pm_notifier);
+ if (err)
+ return err;
+
/* For now suspend/resume is only allowed with GuC */
if (!xe_device_uc_enabled(xe))
return 0;
- xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
-
if (xe->d3cold.capable) {
- err = xe_device_sysfs_init(xe);
- if (err)
- return err;
-
vram_threshold = vram_threshold_value(xe);
err = xe_pm_set_vram_threshold(xe, vram_threshold);
if (err)
- return err;
+ goto err_unregister;
}
xe_pm_runtime_init(xe);
-
return 0;
+
+err_unregister:
+ unregister_pm_notifier(&xe->pm_notifier);
+ return err;
}
-/**
- * xe_pm_runtime_fini - Finalize Runtime PM
- * @xe: xe device instance
- */
-void xe_pm_runtime_fini(struct xe_device *xe)
+static void xe_pm_runtime_fini(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
@@ -333,6 +367,18 @@ void xe_pm_runtime_fini(struct xe_device *xe)
pm_runtime_forbid(dev);
}
+/**
+ * xe_pm_fini - Finalize PM
+ * @xe: xe device instance
+ */
+void xe_pm_fini(struct xe_device *xe)
+{
+ if (xe_device_uc_enabled(xe))
+ xe_pm_runtime_fini(xe);
+
+ unregister_pm_notifier(&xe->pm_notifier);
+}
+
static void xe_pm_write_callback_task(struct xe_device *xe,
struct task_struct *task)
{
@@ -484,7 +530,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
* This only restores pinned memory which is the memory
* required for the GT(s) to resume.
*/
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
if (err)
goto out;
}
@@ -497,7 +543,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe_display_pm_runtime_resume(xe);
if (xe->d3cold.allowed) {
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err)
goto out;
}
@@ -641,7 +687,7 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
return dev->power.runtime_status == RPM_SUSPENDING ||
dev->power.runtime_status == RPM_RESUMING ||
- pm_suspend_target_state != PM_SUSPEND_ON;
+ pm_suspend_in_progress();
#else
return false;
#endif
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 998d1ed64556..59678b310e55 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -17,7 +17,7 @@ int xe_pm_resume(struct xe_device *xe);
int xe_pm_init_early(struct xe_device *xe);
int xe_pm_init(struct xe_device *xe);
-void xe_pm_runtime_fini(struct xe_device *xe);
+void xe_pm_fini(struct xe_device *xe);
bool xe_pm_runtime_suspended(struct xe_device *xe);
int xe_pm_runtime_suspend(struct xe_device *xe);
int xe_pm_runtime_resume(struct xe_device *xe);
diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c
index 4f62a6e515d6..69df0e3520a5 100644
--- a/drivers/gpu/drm/xe/xe_pmu.c
+++ b/drivers/gpu/drm/xe/xe_pmu.c
@@ -10,9 +10,11 @@
#include "xe_force_wake.h"
#include "xe_gt_idle.h"
#include "xe_guc_engine_activity.h"
+#include "xe_guc_pc.h"
#include "xe_hw_engine.h"
#include "xe_pm.h"
#include "xe_pmu.h"
+#include "xe_sriov_pf_helpers.h"
/**
* DOC: Xe PMU (Performance Monitoring Unit)
@@ -32,9 +34,10 @@
* gt[60:63] Selects gt for the event
* engine_class[20:27] Selects engine-class for event
* engine_instance[12:19] Selects the engine-instance for the event
+ * function[44:59] Selects the function of the event (SRIOV enabled)
*
* For engine specific events (engine-*), gt, engine_class and engine_instance parameters must be
- * set as populated by DRM_XE_DEVICE_QUERY_ENGINES.
+ * set as populated by DRM_XE_DEVICE_QUERY_ENGINES and function if SRIOV is enabled.
*
* For gt specific events (gt-*) gt parameter must be passed. All other parameters will be 0.
*
@@ -49,6 +52,7 @@
*/
#define XE_PMU_EVENT_GT_MASK GENMASK_ULL(63, 60)
+#define XE_PMU_EVENT_FUNCTION_MASK GENMASK_ULL(59, 44)
#define XE_PMU_EVENT_ENGINE_CLASS_MASK GENMASK_ULL(27, 20)
#define XE_PMU_EVENT_ENGINE_INSTANCE_MASK GENMASK_ULL(19, 12)
#define XE_PMU_EVENT_ID_MASK GENMASK_ULL(11, 0)
@@ -58,6 +62,11 @@ static unsigned int config_to_event_id(u64 config)
return FIELD_GET(XE_PMU_EVENT_ID_MASK, config);
}
+static unsigned int config_to_function_id(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_FUNCTION_MASK, config);
+}
+
static unsigned int config_to_engine_class(u64 config)
{
return FIELD_GET(XE_PMU_EVENT_ENGINE_CLASS_MASK, config);
@@ -76,6 +85,8 @@ static unsigned int config_to_gt_id(u64 config)
#define XE_PMU_EVENT_GT_C6_RESIDENCY 0x01
#define XE_PMU_EVENT_ENGINE_ACTIVE_TICKS 0x02
#define XE_PMU_EVENT_ENGINE_TOTAL_TICKS 0x03
+#define XE_PMU_EVENT_GT_ACTUAL_FREQUENCY 0x04
+#define XE_PMU_EVENT_GT_REQUESTED_FREQUENCY 0x05
static struct xe_gt *event_to_gt(struct perf_event *event)
{
@@ -111,6 +122,14 @@ static bool is_engine_event(u64 config)
event_id == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
}
+static bool is_gt_frequency_event(struct perf_event *event)
+{
+ u32 id = config_to_event_id(event->attr.config);
+
+ return id == XE_PMU_EVENT_GT_ACTUAL_FREQUENCY ||
+ id == XE_PMU_EVENT_GT_REQUESTED_FREQUENCY;
+}
+
static bool event_gt_forcewake(struct perf_event *event)
{
struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
@@ -118,7 +137,7 @@ static bool event_gt_forcewake(struct perf_event *event)
struct xe_gt *gt;
unsigned int *fw_ref;
- if (!is_engine_event(config))
+ if (!is_engine_event(config) && !is_gt_frequency_event(event))
return true;
gt = xe_device_get_gt(xe, config_to_gt_id(config));
@@ -151,7 +170,7 @@ static bool event_supported(struct xe_pmu *pmu, unsigned int gt,
static bool event_param_valid(struct perf_event *event)
{
struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
- unsigned int engine_class, engine_instance;
+ unsigned int engine_class, engine_instance, function_id;
u64 config = event->attr.config;
struct xe_gt *gt;
@@ -161,16 +180,28 @@ static bool event_param_valid(struct perf_event *event)
engine_class = config_to_engine_class(config);
engine_instance = config_to_engine_instance(config);
+ function_id = config_to_function_id(config);
switch (config_to_event_id(config)) {
case XE_PMU_EVENT_GT_C6_RESIDENCY:
- if (engine_class || engine_instance)
+ case XE_PMU_EVENT_GT_ACTUAL_FREQUENCY:
+ case XE_PMU_EVENT_GT_REQUESTED_FREQUENCY:
+ if (engine_class || engine_instance || function_id)
return false;
break;
case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
if (!event_to_hwe(event))
return false;
+
+ /* PF(0) and total vfs when SRIOV is enabled */
+ if (IS_SRIOV_PF(xe)) {
+ if (function_id > xe_sriov_pf_get_totalvfs(xe))
+ return false;
+ } else if (function_id) {
+ return false;
+ }
+
break;
}
@@ -242,13 +273,17 @@ static int xe_pmu_event_init(struct perf_event *event)
static u64 read_engine_events(struct xe_gt *gt, struct perf_event *event)
{
struct xe_hw_engine *hwe;
- u64 val = 0;
+ unsigned int function_id;
+ u64 config, val = 0;
+
+ config = event->attr.config;
+ function_id = config_to_function_id(config);
hwe = event_to_hwe(event);
- if (config_to_event_id(event->attr.config) == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS)
- val = xe_guc_engine_activity_active_ticks(&gt->uc.guc, hwe);
+ if (config_to_event_id(config) == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS)
+ val = xe_guc_engine_activity_active_ticks(&gt->uc.guc, hwe, function_id);
else
- val = xe_guc_engine_activity_total_ticks(&gt->uc.guc, hwe);
+ val = xe_guc_engine_activity_total_ticks(&gt->uc.guc, hwe, function_id);
return val;
}
@@ -266,6 +301,10 @@ static u64 __xe_pmu_event_read(struct perf_event *event)
case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
return read_engine_events(gt, event);
+ case XE_PMU_EVENT_GT_ACTUAL_FREQUENCY:
+ return xe_guc_pc_get_act_freq(&gt->uc.guc.pc);
+ case XE_PMU_EVENT_GT_REQUESTED_FREQUENCY:
+ return xe_guc_pc_get_cur_freq_fw(&gt->uc.guc.pc);
}
return 0;
@@ -281,7 +320,14 @@ static void xe_pmu_event_update(struct perf_event *event)
new = __xe_pmu_event_read(event);
} while (!local64_try_cmpxchg(&hwc->prev_count, &prev, new));
- local64_add(new - prev, &event->count);
+ /*
+ * GT frequency is not a monotonically increasing counter, so add the
+ * instantaneous value instead.
+ */
+ if (is_gt_frequency_event(event))
+ local64_add(new, &event->count);
+ else
+ local64_add(new - prev, &event->count);
}
static void xe_pmu_event_read(struct perf_event *event)
@@ -351,6 +397,7 @@ static void xe_pmu_event_del(struct perf_event *event, int flags)
}
PMU_FORMAT_ATTR(gt, "config:60-63");
+PMU_FORMAT_ATTR(function, "config:44-59");
PMU_FORMAT_ATTR(engine_class, "config:20-27");
PMU_FORMAT_ATTR(engine_instance, "config:12-19");
PMU_FORMAT_ATTR(event, "config:0-11");
@@ -359,6 +406,7 @@ static struct attribute *pmu_format_attrs[] = {
&format_attr_event.attr,
&format_attr_engine_class.attr,
&format_attr_engine_instance.attr,
+ &format_attr_function.attr,
&format_attr_gt.attr,
NULL,
};
@@ -419,6 +467,10 @@ static ssize_t event_attr_show(struct device *dev,
XE_EVENT_ATTR_SIMPLE(gt-c6-residency, gt_c6_residency, XE_PMU_EVENT_GT_C6_RESIDENCY, "ms");
XE_EVENT_ATTR_NOUNIT(engine-active-ticks, engine_active_ticks, XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
XE_EVENT_ATTR_NOUNIT(engine-total-ticks, engine_total_ticks, XE_PMU_EVENT_ENGINE_TOTAL_TICKS);
+XE_EVENT_ATTR_SIMPLE(gt-actual-frequency, gt_actual_frequency,
+ XE_PMU_EVENT_GT_ACTUAL_FREQUENCY, "MHz");
+XE_EVENT_ATTR_SIMPLE(gt-requested-frequency, gt_requested_frequency,
+ XE_PMU_EVENT_GT_REQUESTED_FREQUENCY, "MHz");
static struct attribute *pmu_empty_event_attrs[] = {
/* Empty - all events are added as groups with .attr_update() */
@@ -434,6 +486,8 @@ static const struct attribute_group *pmu_events_attr_update[] = {
&pmu_group_gt_c6_residency,
&pmu_group_engine_active_ticks,
&pmu_group_engine_total_ticks,
+ &pmu_group_gt_actual_frequency,
+ &pmu_group_gt_requested_frequency,
NULL,
};
@@ -442,8 +496,11 @@ static void set_supported_events(struct xe_pmu *pmu)
struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
struct xe_gt *gt = xe_device_get_gt(xe, 0);
- if (!xe->info.skip_guc_pc)
+ if (!xe->info.skip_guc_pc) {
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_C6_RESIDENCY);
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_ACTUAL_FREQUENCY);
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_REQUESTED_FREQUENCY);
+ }
if (xe_guc_engine_activity_supported(&gt->uc.guc)) {
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index ffaf0d02dc7d..b04756a97cdc 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -103,6 +103,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
{
struct xe_pt *pt;
struct xe_bo *bo;
+ u32 bo_flags;
int err;
if (level) {
@@ -115,14 +116,16 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
if (!pt)
return ERR_PTR(-ENOMEM);
+ bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
+ XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE;
+ if (vm->xef) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
- XE_BO_FLAG_PINNED |
- XE_BO_FLAG_NO_RESV_EVICT |
- XE_BO_FLAG_PAGETABLE);
+ bo_flags);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto err_kfree;
@@ -269,8 +272,11 @@ struct xe_pt_update {
bool preexisting;
};
+/**
+ * struct xe_pt_stage_bind_walk - Walk state for the stage_bind walk.
+ */
struct xe_pt_stage_bind_walk {
- /** base: The base class. */
+ /** @base: The base class. */
struct xe_pt_walk base;
/* Input parameters for the walk */
@@ -278,15 +284,19 @@ struct xe_pt_stage_bind_walk {
struct xe_vm *vm;
/** @tile: The tile we're building for. */
struct xe_tile *tile;
- /** @default_pte: PTE flag only template. No address is associated */
- u64 default_pte;
+ /** @default_vram_pte: PTE flag only template for VRAM. No address is associated */
+ u64 default_vram_pte;
+ /** @default_system_pte: PTE flag only template for System. No address is associated */
+ u64 default_system_pte;
/** @dma_offset: DMA offset to add to the PTE. */
u64 dma_offset;
/**
- * @needs_64k: This address range enforces 64K alignment and
- * granularity.
+ * @needs_64K: This address range enforces 64K alignment and
+ * granularity on VRAM.
*/
bool needs_64K;
+ /** @clear_pt: clear page table entries during the bind walk */
+ bool clear_pt;
/**
* @vma: VMA being mapped
*/
@@ -299,6 +309,7 @@ struct xe_pt_stage_bind_walk {
u64 va_curs_start;
/* Output */
+ /** @wupd: Walk output data for page-table updates. */
struct xe_walk_update {
/** @wupd.entries: Caller provided storage. */
struct xe_vm_pgtable_update *entries;
@@ -316,7 +327,7 @@ struct xe_pt_stage_bind_walk {
u64 l0_end_addr;
/** @addr_64K: The start address of the current 64K chunk. */
u64 addr_64K;
- /** @found_64: Whether @add_64K actually points to a 64K chunk. */
+ /** @found_64K: Whether @add_64K actually points to a 64K chunk. */
bool found_64K;
};
@@ -436,6 +447,10 @@ static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level,
if (xe_vma_is_null(xe_walk->vma))
return true;
+ /* if we are clearing page table, no dma addresses*/
+ if (xe_walk->clear_pt)
+ return true;
+
/* Is the DMA address huge PTE size aligned? */
size = next - addr;
dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs);
@@ -515,24 +530,35 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) {
struct xe_res_cursor *curs = xe_walk->curs;
bool is_null = xe_vma_is_null(xe_walk->vma);
+ bool is_vram = is_null ? false : xe_res_is_vram(curs);
XE_WARN_ON(xe_walk->va_curs_start != addr);
- pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
- xe_res_dma(curs) + xe_walk->dma_offset,
- xe_walk->vma, pat_index, level);
- pte |= xe_walk->default_pte;
+ if (xe_walk->clear_pt) {
+ pte = 0;
+ } else {
+ pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
+ xe_res_dma(curs) +
+ xe_walk->dma_offset,
+ xe_walk->vma,
+ pat_index, level);
+ if (!is_null)
+ pte |= is_vram ? xe_walk->default_vram_pte :
+ xe_walk->default_system_pte;
- /*
- * Set the XE_PTE_PS64 hint if possible, otherwise if
- * this device *requires* 64K PTE size for VRAM, fail.
- */
- if (level == 0 && !xe_parent->is_compact) {
- if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
- xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
- pte |= XE_PTE_PS64;
- } else if (XE_WARN_ON(xe_walk->needs_64K)) {
- return -EINVAL;
+ /*
+ * Set the XE_PTE_PS64 hint if possible, otherwise if
+ * this device *requires* 64K PTE size for VRAM, fail.
+ */
+ if (level == 0 && !xe_parent->is_compact) {
+ if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
+ xe_walk->vma->gpuva.flags |=
+ XE_VMA_PTE_64K;
+ pte |= XE_PTE_PS64;
+ } else if (XE_WARN_ON(xe_walk->needs_64K &&
+ is_vram)) {
+ return -EINVAL;
+ }
}
}
@@ -540,7 +566,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (unlikely(ret))
return ret;
- if (!is_null)
+ if (!is_null && !xe_walk->clear_pt)
xe_res_next(curs, next - addr);
xe_walk->va_curs_start = next;
xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
@@ -603,6 +629,44 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
.pt_entry = xe_pt_stage_bind_entry,
};
+/*
+ * Default atomic expectations for different allocation scenarios are as follows:
+ *
+ * 1. Traditional API: When the VM is not in LR mode:
+ * - Device atomics are expected to function with all allocations.
+ *
+ * 2. Compute/SVM API: When the VM is in LR mode:
+ * - Device atomics are the default behavior when the bo is placed in a single region.
+ * - In all other cases device atomics will be disabled with AE=0 until an application
+ * request differently using a ioctl like madvise.
+ */
+static bool xe_atomic_for_vram(struct xe_vm *vm)
+{
+ return true;
+}
+
+static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
+{
+ struct xe_device *xe = vm->xe;
+
+ if (!xe->info.has_device_atomics_on_smem)
+ return false;
+
+ /*
+ * If a SMEM+LMEM allocation is backed by SMEM, a device
+ * atomics will cause a gpu page fault and which then
+ * gets migrated to LMEM, bind such allocations with
+ * device atomics enabled.
+ *
+ * TODO: Revisit this. Perhaps add something like a
+ * fault_on_atomics_in_system UAPI flag.
+ * Note that this also prohibits GPU atomics in LR mode for
+ * userptr and system memory on DGFX.
+ */
+ return (!IS_DGFX(xe) || (!xe_vm_in_lr_mode(vm) ||
+ (bo && xe_bo_has_single_placement(bo))));
+}
+
/**
* xe_pt_stage_bind() - Build a disconnected page-table tree for a given address
* range.
@@ -612,6 +676,7 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
* @entries: Storage for the update entries used for connecting the tree to
* the main tree at commit time.
* @num_entries: On output contains the number of @entries used.
+ * @clear_pt: Clear the page table entries.
*
* This function builds a disconnected page-table tree for a given address
* range. The tree is connected to the main vm tree for the gpu using
@@ -625,13 +690,13 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
static int
xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_svm_range *range,
- struct xe_vm_pgtable_update *entries, u32 *num_entries)
+ struct xe_vm_pgtable_update *entries,
+ u32 *num_entries, bool clear_pt)
{
struct xe_device *xe = tile_to_xe(tile);
struct xe_bo *bo = xe_vma_bo(vma);
- bool is_devmem = !xe_vma_is_userptr(vma) && bo &&
- (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo));
struct xe_res_cursor curs;
+ struct xe_vm *vm = xe_vma_vm(vma);
struct xe_pt_stage_bind_walk xe_walk = {
.base = {
.ops = &xe_pt_stage_bind_ops,
@@ -639,34 +704,31 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
.max_level = XE_PT_HIGHEST_LEVEL,
.staging = true,
},
- .vm = xe_vma_vm(vma),
+ .vm = vm,
.tile = tile,
.curs = &curs,
.va_curs_start = range ? range->base.itree.start :
xe_vma_start(vma),
.vma = vma,
.wupd.entries = entries,
+ .clear_pt = clear_pt,
};
- struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
+ struct xe_pt *pt = vm->pt_root[tile->id];
int ret;
if (range) {
/* Move this entire thing to xe_svm.c? */
- xe_svm_notifier_lock(xe_vma_vm(vma));
+ xe_svm_notifier_lock(vm);
if (!xe_svm_range_pages_valid(range)) {
xe_svm_range_debug(range, "BIND PREPARE - RETRY");
- xe_svm_notifier_unlock(xe_vma_vm(vma));
+ xe_svm_notifier_unlock(vm);
return -EAGAIN;
}
if (xe_svm_range_has_dma_mapping(range)) {
xe_res_first_dma(range->base.dma_addr, 0,
range->base.itree.last + 1 - range->base.itree.start,
&curs);
- is_devmem = xe_res_is_vram(&curs);
- if (is_devmem)
- xe_svm_range_debug(range, "BIND PREPARE - DMA VRAM");
- else
- xe_svm_range_debug(range, "BIND PREPARE - DMA");
+ xe_svm_range_debug(range, "BIND PREPARE - MIXED");
} else {
xe_assert(xe, false);
}
@@ -674,54 +736,21 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
* Note, when unlocking the resource cursor dma addresses may become
* stale, but the bind will be aborted anyway at commit time.
*/
- xe_svm_notifier_unlock(xe_vma_vm(vma));
+ xe_svm_notifier_unlock(vm);
}
- xe_walk.needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem;
+ xe_walk.needs_64K = (vm->flags & XE_VM_FLAG_64K);
+ if (clear_pt)
+ goto walk_pt;
- /**
- * Default atomic expectations for different allocation scenarios are as follows:
- *
- * 1. Traditional API: When the VM is not in LR mode:
- * - Device atomics are expected to function with all allocations.
- *
- * 2. Compute/SVM API: When the VM is in LR mode:
- * - Device atomics are the default behavior when the bo is placed in a single region.
- * - In all other cases device atomics will be disabled with AE=0 until an application
- * request differently using a ioctl like madvise.
- */
if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
- if (xe_vm_in_lr_mode(xe_vma_vm(vma))) {
- if (bo && xe_bo_has_single_placement(bo))
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
- /**
- * If a SMEM+LMEM allocation is backed by SMEM, a device
- * atomics will cause a gpu page fault and which then
- * gets migrated to LMEM, bind such allocations with
- * device atomics enabled.
- */
- else if (is_devmem)
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
- } else {
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
- }
-
- /**
- * Unset AE if the platform(PVC) doesn't support it on an
- * allocation
- */
- if (!xe->info.has_device_atomics_on_smem && !is_devmem)
- xe_walk.default_pte &= ~XE_USM_PPGTT_PTE_AE;
- }
-
- if (is_devmem) {
- xe_walk.default_pte |= XE_PPGTT_PTE_DM;
- xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0;
+ xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
+ xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
+ XE_USM_PPGTT_PTE_AE : 0;
}
- if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo))
- xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo));
-
+ xe_walk.default_vram_pte |= XE_PPGTT_PTE_DM;
+ xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0;
if (!range)
xe_bo_assert_held(bo);
@@ -739,6 +768,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
curs.size = xe_vma_size(vma);
}
+walk_pt:
ret = xe_pt_walk_range(&pt->base, pt->level,
range ? range->base.itree.start : xe_vma_start(vma),
range ? range->base.itree.last + 1 : xe_vma_end(vma),
@@ -1103,12 +1133,14 @@ static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_svm_range *range,
- struct xe_vm_pgtable_update *entries, u32 *num_entries)
+ struct xe_vm_pgtable_update *entries,
+ u32 *num_entries, bool invalidate_on_bind)
{
int err;
*num_entries = 0;
- err = xe_pt_stage_bind(tile, vma, range, entries, num_entries);
+ err = xe_pt_stage_bind(tile, vma, range, entries, num_entries,
+ invalidate_on_bind);
if (!err)
xe_tile_assert(tile, *num_entries);
@@ -1420,6 +1452,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
return err;
}
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
{
struct xe_vm *vm = pt_update->vops->vm;
@@ -1453,6 +1486,7 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
return 0;
}
+#endif
struct invalidation_fence {
struct xe_gt_tlb_invalidation_fence base;
@@ -1791,7 +1825,7 @@ static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
- struct xe_vma *vma)
+ struct xe_vma *vma, bool invalidate_on_bind)
{
u32 current_op = pt_update_ops->current_op;
struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
@@ -1813,7 +1847,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
return err;
err = xe_pt_prepare_bind(tile, vma, NULL, pt_op->entries,
- &pt_op->num_entries);
+ &pt_op->num_entries, invalidate_on_bind);
if (!err) {
xe_tile_assert(tile, pt_op->num_entries <=
ARRAY_SIZE(pt_op->entries));
@@ -1835,11 +1869,11 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
* If !rebind, and scratch enabled VMs, there is a chance the scratch
* PTE is already cached in the TLB so it needs to be invalidated.
* On !LR VMs this is done in the ring ops preceding a batch, but on
- * non-faulting LR, in particular on user-space batch buffer chaining,
- * it needs to be done here.
+ * LR, in particular on user-space batch buffer chaining, it needs to
+ * be done here.
*/
if ((!pt_op->rebind && xe_vm_has_scratch(vm) &&
- xe_vm_in_preempt_fence_mode(vm)))
+ xe_vm_in_lr_mode(vm)))
pt_update_ops->needs_invalidation = true;
else if (pt_op->rebind && !xe_vm_in_lr_mode(vm))
/* We bump also if batch_invalidate_tlb is true */
@@ -1875,7 +1909,7 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
pt_op->rebind = BIT(tile->id) & range->tile_present;
err = xe_pt_prepare_bind(tile, vma, range, pt_op->entries,
- &pt_op->num_entries);
+ &pt_op->num_entries, false);
if (!err) {
xe_tile_assert(tile, pt_op->num_entries <=
ARRAY_SIZE(pt_op->entries));
@@ -1987,11 +2021,13 @@ static int op_prepare(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
+ if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
+ !op->map.invalidate_on_bind) ||
op->map.is_cpu_addr_mirror)
break;
- err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma);
+ err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
+ op->map.invalidate_on_bind);
pt_update_ops->wait_vm_kernel = true;
break;
case DRM_GPUVA_OP_REMAP:
@@ -2005,12 +2041,12 @@ static int op_prepare(struct xe_vm *vm,
if (!err && op->remap.prev) {
err = bind_op_prepare(vm, tile, pt_update_ops,
- op->remap.prev);
+ op->remap.prev, false);
pt_update_ops->wait_vm_bookkeep = true;
}
if (!err && op->remap.next) {
err = bind_op_prepare(vm, tile, pt_update_ops,
- op->remap.next);
+ op->remap.next, false);
pt_update_ops->wait_vm_bookkeep = true;
}
break;
@@ -2032,7 +2068,7 @@ static int op_prepare(struct xe_vm *vm,
if (xe_vma_is_cpu_addr_mirror(vma))
break;
- err = bind_op_prepare(vm, tile, pt_update_ops, vma);
+ err = bind_op_prepare(vm, tile, pt_update_ops, vma, false);
pt_update_ops->wait_vm_kernel = true;
break;
}
@@ -2115,7 +2151,7 @@ ALLOW_ERROR_INJECTION(xe_pt_update_ops_prepare, ERRNO);
static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma, struct dma_fence *fence,
- struct dma_fence *fence2)
+ struct dma_fence *fence2, bool invalidate_on_bind)
{
xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
@@ -2132,6 +2168,8 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
}
vma->tile_present |= BIT(tile->id);
vma->tile_staged &= ~BIT(tile->id);
+ if (invalidate_on_bind)
+ vma->tile_invalidated |= BIT(tile->id);
if (xe_vma_is_userptr(vma)) {
lockdep_assert_held_read(&vm->userptr.notifier_lock);
to_userptr_vma(vma)->userptr.initial_bind = true;
@@ -2193,7 +2231,7 @@ static void op_commit(struct xe_vm *vm,
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
- fence2);
+ fence2, op->map.invalidate_on_bind);
break;
case DRM_GPUVA_OP_REMAP:
{
@@ -2206,10 +2244,10 @@ static void op_commit(struct xe_vm *vm,
if (op->remap.prev)
bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
- fence, fence2);
+ fence, fence2, false);
if (op->remap.next)
bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
- fence, fence2);
+ fence, fence2, false);
break;
}
case DRM_GPUVA_OP_UNMAP:
@@ -2227,16 +2265,24 @@ static void op_commit(struct xe_vm *vm,
if (!xe_vma_is_cpu_addr_mirror(vma))
bind_op_commit(vm, tile, pt_update_ops, vma, fence,
- fence2);
+ fence2, false);
break;
}
case DRM_GPUVA_OP_DRIVER:
{
+ /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
+
if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
- op->map_range.range->tile_present |= BIT(tile->id);
- op->map_range.range->tile_invalidated &= ~BIT(tile->id);
+ WRITE_ONCE(op->map_range.range->tile_present,
+ op->map_range.range->tile_present |
+ BIT(tile->id));
+ WRITE_ONCE(op->map_range.range->tile_invalidated,
+ op->map_range.range->tile_invalidated &
+ ~BIT(tile->id));
} else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
- op->unmap_range.range->tile_present &= ~BIT(tile->id);
+ WRITE_ONCE(op->unmap_range.range->tile_present,
+ op->unmap_range.range->tile_present &
+ ~BIT(tile->id));
}
break;
}
@@ -2257,11 +2303,15 @@ static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
.pre_commit = xe_pt_userptr_pre_commit,
};
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
.populate = xe_vm_populate_pgtable,
.clear = xe_migrate_clear_pgtable_callback,
.pre_commit = xe_pt_svm_pre_commit,
};
+#else
+static const struct xe_migrate_pt_update_ops svm_migrate_ops;
+#endif
/**
* xe_pt_update_ops_run() - Run PT update operations
diff --git a/drivers/gpu/drm/xe/xe_pxp_debugfs.c b/drivers/gpu/drm/xe/xe_pxp_debugfs.c
index ccfbacf08efc..525a2f6bb076 100644
--- a/drivers/gpu/drm/xe/xe_pxp_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_pxp_debugfs.c
@@ -66,9 +66,18 @@ static int pxp_terminate(struct seq_file *m, void *data)
{
struct xe_pxp *pxp = node_to_pxp(m->private);
struct drm_printer p = drm_seq_file_printer(m);
+ int ready = xe_pxp_get_readiness_status(pxp);
- if (!xe_pxp_is_enabled(pxp))
- return -ENODEV;
+ if (ready < 0)
+ return ready; /* disabled or error occurred */
+ else if (!ready)
+ return -EBUSY; /* init still in progress */
+
+ /* no need for a termination if PXP is not active */
+ if (pxp->status != XE_PXP_ACTIVE) {
+ drm_printf(&p, "PXP not active\n");
+ return 0;
+ }
/* simulate a termination interrupt */
spin_lock_irq(&pxp->xe->irq.lock);
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 5e65830dad25..2dbf4066d86f 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -340,7 +340,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
- if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_GPUSVM))
+ if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM))
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR;
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
index 9475e3f74958..fc8447a838c4 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.c
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -173,6 +173,9 @@ void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt)
if (xa_empty(&sr->xa))
return;
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name);
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 917fc16de866..bc1689db4cd7 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -137,7 +137,8 @@ emit_pipe_control(u32 *dw, int i, u32 bit_group_0, u32 bit_group_1, u32 offset,
static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
int i)
{
- u32 flags = PIPE_CONTROL_CS_STALL |
+ u32 flags0 = 0;
+ u32 flags1 = PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_COMMAND_CACHE_INVALIDATE |
PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
@@ -148,11 +149,15 @@ static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
PIPE_CONTROL_STORE_DATA_INDEX;
if (invalidate_tlb)
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags1 |= PIPE_CONTROL_TLB_INVALIDATE;
- flags &= ~mask_flags;
+ flags1 &= ~mask_flags;
- return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
+ if (flags1 & PIPE_CONTROL_VF_CACHE_INVALIDATE)
+ flags0 |= PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE;
+
+ return emit_pipe_control(dw, i, flags0, flags1,
+ LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
}
static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
@@ -229,13 +234,10 @@ static u32 get_ppgtt_flag(struct xe_sched_job *job)
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
{
- dw[i++] = MI_COPY_MEM_MEM | MI_COPY_MEM_MEM_SRC_GGTT |
- MI_COPY_MEM_MEM_DST_GGTT;
+ dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
+ dw[i++] = RING_CTX_TIMESTAMP(0).addr;
dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc);
dw[i++] = 0;
- dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc);
- dw[i++] = 0;
- dw[i++] = MI_NOOP;
return i;
}
diff --git a/drivers/gpu/drm/xe/xe_ring_ops_types.h b/drivers/gpu/drm/xe/xe_ring_ops_types.h
index 1ae56e2ee7b4..d7e3e150a9a5 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops_types.h
+++ b/drivers/gpu/drm/xe/xe_ring_ops_types.h
@@ -8,7 +8,7 @@
struct xe_sched_job;
-#define MAX_JOB_SIZE_DW 48
+#define MAX_JOB_SIZE_DW 58
#define MAX_JOB_SIZE_BYTES (MAX_JOB_SIZE_DW * 4)
/**
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 13bb62d3e615..29e694bb1219 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -258,9 +258,6 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
rtp_get_context(ctx, &hwe, &gt, &xe);
- if (IS_SRIOV_VF(xe))
- return;
-
xe_assert(xe, entries);
for (entry = entries; entry - entries < n_entries; entry++) {
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index f8fe61e25518..1d43e183ca21 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -60,7 +60,8 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n",
size / SZ_1K, bo);
diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c
index 8184390f9c7b..86d47aaf0358 100644
--- a/drivers/gpu/drm/xe/xe_shrinker.c
+++ b/drivers/gpu/drm/xe/xe_shrinker.c
@@ -227,7 +227,7 @@ struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
if (!shrinker)
return ERR_PTR(-ENOMEM);
- shrinker->shrink = shrinker_alloc(0, "xe system shrinker");
+ shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique);
if (!shrinker->shrink) {
kfree(shrinker);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
index cb813b337fd3..1f710b3fc599 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.c
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/sysfs.h>
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_heci_gsc.h"
@@ -28,20 +29,32 @@
* This is implemented by loading the driver with bare minimum (no drm card) to allow the firmware
* to be flashed through mei and collect telemetry. The driver's probe flow is modified
* such that it enters survivability mode when pcode initialization is incomplete and boot status
- * denotes a failure. The driver then populates the survivability_mode PCI sysfs indicating
- * survivability mode and provides additional information required for debug
+ * denotes a failure.
*
- * KMD exposes below admin-only readable sysfs in survivability mode
+ * Survivability mode can also be entered manually using the survivability mode attribute available
+ * through configfs which is beneficial in several usecases. It can be used to address scenarios
+ * where pcode does not detect failure or for validation purposes. It can also be used in
+ * In-Field-Repair (IFR) to repair a single card without impacting the other cards in a node.
*
- * device/survivability_mode: The presence of this file indicates that the card is in survivability
- * mode. Also, provides additional information on why the driver entered
- * survivability mode.
+ * Use below command enable survivability mode manually::
*
- * Capability Information - Provides boot status
- * Postcode Information - Provides information about the failure
- * Overflow Information - Provides history of previous failures
- * Auxiliary Information - Certain failures may have information in
- * addition to postcode information
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
+ *
+ * Refer :ref:`xe_configfs` for more details on how to use configfs
+ *
+ * Survivability mode is indicated by the below admin-only readable sysfs which provides additional
+ * debug information::
+ *
+ * /sys/bus/pci/devices/<device>/surivability_mode
+ *
+ * Capability Information:
+ * Provides boot status
+ * Postcode Information:
+ * Provides information about the failure
+ * Overflow Information
+ * Provides history of previous failures
+ * Auxiliary Information
+ * Certain failures may have information in addition to postcode information
*/
static u32 aux_history_offset(u32 reg_value)
@@ -133,6 +146,7 @@ static void xe_survivability_mode_fini(void *arg)
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct device *dev = &pdev->dev;
+ xe_configfs_clear_survivability_mode(pdev);
sysfs_remove_file(&dev->kobj, &dev_attr_survivability_mode.attr);
}
@@ -186,24 +200,41 @@ bool xe_survivability_mode_is_enabled(struct xe_device *xe)
return xe->survivability.mode;
}
-/*
- * survivability_mode_requested - check if it's possible to enable
- * survivability mode and that was requested by firmware
+/**
+ * xe_survivability_mode_is_requested - check if it's possible to enable survivability
+ * mode that was requested by firmware or userspace
+ * @xe: xe device instance
*
- * This function reads the boot status from Pcode.
+ * This function reads configfs and boot status from Pcode.
*
* Return: true if platform support is available and boot status indicates
- * failure, false otherwise.
+ * failure or if survivability mode is requested, false otherwise.
*/
-static bool survivability_mode_requested(struct xe_device *xe)
+bool xe_survivability_mode_is_requested(struct xe_device *xe)
{
struct xe_survivability *survivability = &xe->survivability;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
u32 data;
+ bool survivability_mode;
- if (!IS_DGFX(xe) || xe->info.platform < XE_BATTLEMAGE || IS_SRIOV_VF(xe))
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
return false;
+ survivability_mode = xe_configfs_get_survivability_mode(pdev);
+
+ if (xe->info.platform < XE_BATTLEMAGE) {
+ if (survivability_mode) {
+ dev_err(&pdev->dev, "Survivability Mode is not supported on this card\n");
+ xe_configfs_clear_survivability_mode(pdev);
+ }
+ return false;
+ }
+
+ /* Enable survivability mode if set via configfs */
+ if (survivability_mode)
+ return true;
+
data = xe_mmio_read32(mmio, PCODE_SCRATCH(0));
survivability->boot_status = REG_FIELD_GET(BOOT_STATUS, data);
@@ -226,7 +257,7 @@ int xe_survivability_mode_enable(struct xe_device *xe)
struct xe_survivability_info *info;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- if (!survivability_mode_requested(xe))
+ if (!xe_survivability_mode_is_requested(xe))
return 0;
survivability->size = MAX_SCRATCH_MMIO;
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.h b/drivers/gpu/drm/xe/xe_survivability_mode.h
index d7e64885570d..02231c2bf008 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.h
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.h
@@ -12,5 +12,6 @@ struct xe_device;
int xe_survivability_mode_enable(struct xe_device *xe);
bool xe_survivability_mode_is_enabled(struct xe_device *xe);
+bool xe_survivability_mode_is_requested(struct xe_device *xe);
#endif /* _XE_SURVIVABILITY_MODE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 3e829c87d7b4..6345896585de 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -4,6 +4,7 @@
*/
#include "xe_bo.h"
+#include "xe_gt_stats.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_module.h"
@@ -15,8 +16,17 @@
static bool xe_svm_range_in_vram(struct xe_svm_range *range)
{
- /* Not reliable without notifier lock */
- return range->base.flags.has_devmem_pages;
+ /*
+ * Advisory only check whether the range is currently backed by VRAM
+ * memory.
+ */
+
+ struct drm_gpusvm_range_flags flags = {
+ /* Pairs with WRITE_ONCE in drm_gpusvm.c */
+ .__flags = READ_ONCE(range->base.flags.__flags),
+ };
+
+ return flags.has_devmem_pages;
}
static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
@@ -79,7 +89,7 @@ xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
range = kzalloc(sizeof(*range), GFP_KERNEL);
if (!range)
- return ERR_PTR(-ENOMEM);
+ return NULL;
INIT_LIST_HEAD(&range->garbage_collector_link);
xe_vm_get(gpusvm_to_vm(gpusvm));
@@ -339,6 +349,8 @@ static void xe_svm_garbage_collector_work_func(struct work_struct *w)
up_write(&vm->lock);
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+
static struct xe_vram_region *page_to_vr(struct page *page)
{
return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
@@ -577,6 +589,8 @@ static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = {
.copy_to_ram = xe_svm_copy_to_ram,
};
+#endif
+
static const struct drm_gpusvm_ops gpusvm_ops = {
.range_alloc = xe_svm_range_alloc,
.range_free = xe_svm_range_free,
@@ -645,11 +659,19 @@ void xe_svm_fini(struct xe_vm *vm)
}
static bool xe_svm_range_is_valid(struct xe_svm_range *range,
- struct xe_tile *tile)
+ struct xe_tile *tile,
+ bool devmem_only)
{
- return (range->tile_present & ~range->tile_invalidated) & BIT(tile->id);
+ /*
+ * Advisory only check whether the range currently has a valid mapping,
+ * READ_ONCE pairs with WRITE_ONCE in xe_pt.c
+ */
+ return ((READ_ONCE(range->tile_present) &
+ ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) &&
+ (!devmem_only || xe_svm_range_in_vram(range));
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
{
return &tile->mem.vram;
@@ -696,11 +718,14 @@ retry:
list_for_each_entry(block, blocks, link)
block->private = vr;
+ xe_bo_get(bo);
err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base,
&bo->devmem_allocation, ctx);
- xe_bo_unlock(bo);
if (err)
- xe_bo_put(bo); /* Creation ref */
+ xe_svm_devmem_release(&bo->devmem_allocation);
+
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
unlock:
mmap_read_unlock(mm);
@@ -708,12 +733,50 @@ unlock:
return err;
}
+#else
+static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static bool supports_4K_migration(struct xe_device *xe)
+{
+ if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
+ return false;
+
+ return true;
+}
+
+static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range,
+ struct xe_vma *vma)
+{
+ struct xe_vm *vm = range_to_vm(&range->base);
+ u64 range_size = xe_svm_range_size(range);
+
+ if (!range->base.flags.migrate_devmem)
+ return false;
+
+ if (xe_svm_range_in_vram(range)) {
+ drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
+ return false;
+ }
+
+ if (range_size <= SZ_64K && !supports_4K_migration(vm->xe)) {
+ drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
+ return false;
+ }
+
+ return true;
+}
/**
* xe_svm_handle_pagefault() - SVM handle page fault
* @vm: The VM.
* @vma: The CPU address mirror VMA.
- * @tile: The tile upon the fault occurred.
+ * @gt: The gt upon the fault occurred.
* @fault_addr: The GPU fault address.
* @atomic: The fault atomic access bit.
*
@@ -723,7 +786,7 @@ unlock:
* Return: 0 on success, negative error code on error.
*/
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_tile *tile, u64 fault_addr,
+ struct xe_gt *gt, u64 fault_addr,
bool atomic)
{
struct drm_gpusvm_ctx ctx = {
@@ -732,17 +795,25 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
.check_pages_threshold = IS_DGFX(vm->xe) &&
IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
+ .devmem_only = atomic && IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
+ .timeslice_ms = atomic && IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? 5 : 0,
};
struct xe_svm_range *range;
struct drm_gpusvm_range *r;
struct drm_exec exec;
struct dma_fence *fence;
+ int migrate_try_count = ctx.devmem_only ? 3 : 1;
+ struct xe_tile *tile = gt_to_tile(gt);
ktime_t end = 0;
int err;
lockdep_assert_held_write(&vm->lock);
xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
+
retry:
/* Always process UNMAPs first so view SVM ranges is current */
err = xe_svm_garbage_collector(vm);
@@ -755,24 +826,31 @@ retry:
if (IS_ERR(r))
return PTR_ERR(r);
+ if (ctx.devmem_only && !r->flags.migrate_devmem)
+ return -EACCES;
+
range = to_xe_range(r);
- if (xe_svm_range_is_valid(range, tile))
+ if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
return 0;
range_debug(range, "PAGE FAULT");
- /* XXX: Add migration policy, for now migrate range once */
- if (!range->skip_migrate && range->base.flags.migrate_devmem &&
- xe_svm_range_size(range) >= SZ_64K) {
- range->skip_migrate = true;
-
+ if (--migrate_try_count >= 0 &&
+ xe_svm_range_needs_migrate_to_vram(range, vma)) {
err = xe_svm_alloc_vram(vm, tile, range, &ctx);
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (err) {
- drm_dbg(&vm->xe->drm,
- "VRAM allocation failed, falling back to "
- "retrying fault, asid=%u, errno=%pe\n",
- vm->usm.asid, ERR_PTR(err));
- goto retry;
+ if (migrate_try_count || !ctx.devmem_only) {
+ drm_dbg(&vm->xe->drm,
+ "VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
+ vm->usm.asid, ERR_PTR(err));
+ goto retry;
+ } else {
+ drm_err(&vm->xe->drm,
+ "VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
+ vm->usm.asid, ERR_PTR(err));
+ return err;
+ }
}
}
@@ -780,15 +858,23 @@ retry:
err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
/* Corner where CPU mappings have changed */
if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
- if (err == -EOPNOTSUPP) {
- range_debug(range, "PAGE FAULT - EVICT PAGES");
- drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ if (migrate_try_count > 0 || !ctx.devmem_only) {
+ if (err == -EOPNOTSUPP) {
+ range_debug(range, "PAGE FAULT - EVICT PAGES");
+ drm_gpusvm_range_evict(&vm->svm.gpusvm,
+ &range->base);
+ }
+ drm_dbg(&vm->xe->drm,
+ "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ range_debug(range, "PAGE FAULT - RETRY PAGES");
+ goto retry;
+ } else {
+ drm_err(&vm->xe->drm,
+ "Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
}
- drm_dbg(&vm->xe->drm,
- "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
- vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
- range_debug(range, "PAGE FAULT - RETRY PAGES");
- goto retry;
}
if (err) {
range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
@@ -812,6 +898,7 @@ retry_bind:
drm_exec_fini(&exec);
err = PTR_ERR(fence);
if (err == -EAGAIN) {
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
range_debug(range, "PAGE FAULT - RETRY BIND");
goto retry;
}
@@ -822,9 +909,6 @@ retry_bind:
}
drm_exec_fini(&exec);
- if (xe_modparam.always_migrate_to_vram)
- range->skip_migrate = false;
-
dma_fence_wait(fence, false);
dma_fence_put(fence);
@@ -863,6 +947,7 @@ int xe_svm_bo_evict(struct xe_bo *bo)
}
#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+
static struct drm_pagemap_device_addr
xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
struct device *dev,
@@ -944,3 +1029,15 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
return 0;
}
#endif
+
+/**
+ * xe_svm_flush() - SVM flush
+ * @vm: The VM.
+ *
+ * Flush all SVM actions.
+ */
+void xe_svm_flush(struct xe_vm *vm)
+{
+ if (xe_vm_in_fault_mode(vm))
+ flush_work(&vm->svm.garbage_collector.work);
+}
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index e059590e5076..30fc78b85b30 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -6,16 +6,19 @@
#ifndef _XE_SVM_H_
#define _XE_SVM_H_
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+
#include <drm/drm_pagemap.h>
#include <drm/drm_gpusvm.h>
#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
struct xe_bo;
-struct xe_vram_region;
+struct xe_gt;
struct xe_tile;
struct xe_vm;
struct xe_vma;
+struct xe_vram_region;
/** struct xe_svm_range - SVM range */
struct xe_svm_range {
@@ -36,14 +39,8 @@ struct xe_svm_range {
* range. Protected by GPU SVM notifier lock.
*/
u8 tile_invalidated;
- /**
- * @skip_migrate: Skip migration to VRAM, protected by GPU fault handler
- * locking.
- */
- u8 skip_migrate :1;
};
-#if IS_ENABLED(CONFIG_DRM_GPUSVM)
/**
* xe_svm_range_pages_valid() - SVM range pages valid
* @range: SVM range
@@ -64,7 +61,7 @@ void xe_svm_fini(struct xe_vm *vm);
void xe_svm_close(struct xe_vm *vm);
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_tile *tile, u64 fault_addr,
+ struct xe_gt *gt, u64 fault_addr,
bool atomic);
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
@@ -72,7 +69,52 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
int xe_svm_bo_evict(struct xe_bo *bo);
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
+
+/**
+ * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
+ * @range: SVM range
+ *
+ * Return: True if SVM range has a DMA mapping, False otherwise
+ */
+static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
+{
+ lockdep_assert_held(&range->base.gpusvm->notifier_lock);
+ return range->base.flags.has_dma_mapping;
+}
+
+#define xe_svm_assert_in_notifier(vm__) \
+ lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_lock(vm__) \
+ drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
+
+#define xe_svm_notifier_unlock(vm__) \
+ drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+
+void xe_svm_flush(struct xe_vm *vm);
+
#else
+#include <linux/interval_tree.h>
+
+struct drm_pagemap_device_addr;
+struct xe_bo;
+struct xe_gt;
+struct xe_vm;
+struct xe_vma;
+struct xe_tile;
+struct xe_vram_region;
+
+#define XE_INTERCONNECT_VRAM 1
+
+struct xe_svm_range {
+ struct {
+ struct interval_tree_node itree;
+ const struct drm_pagemap_device_addr *dma_addr;
+ } base;
+ u32 tile_present;
+ u32 tile_invalidated;
+};
+
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
return false;
@@ -102,7 +144,7 @@ void xe_svm_close(struct xe_vm *vm)
static inline
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_tile *tile, u64 fault_addr,
+ struct xe_gt *gt, u64 fault_addr,
bool atomic)
{
return 0;
@@ -124,27 +166,20 @@ static inline
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
{
}
-#endif
-/**
- * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
- * @range: SVM range
- *
- * Return: True if SVM range has a DMA mapping, False otherwise
- */
-static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
+#define xe_svm_assert_in_notifier(...) do {} while (0)
+#define xe_svm_range_has_dma_mapping(...) false
+
+static inline void xe_svm_notifier_lock(struct xe_vm *vm)
{
- lockdep_assert_held(&range->base.gpusvm->notifier_lock);
- return range->base.flags.has_dma_mapping;
}
-#define xe_svm_assert_in_notifier(vm__) \
- lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
-
-#define xe_svm_notifier_lock(vm__) \
- drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
-
-#define xe_svm_notifier_unlock(vm__) \
- drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
+{
+}
+static inline void xe_svm_flush(struct xe_vm *vm)
+{
+}
+#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_trace_lrc.h b/drivers/gpu/drm/xe/xe_trace_lrc.h
index 5c669a0b2180..d525cbee1e34 100644
--- a/drivers/gpu/drm/xe/xe_trace_lrc.h
+++ b/drivers/gpu/drm/xe/xe_trace_lrc.h
@@ -19,12 +19,12 @@
#define __dev_name_lrc(lrc) dev_name(gt_to_xe((lrc)->fence_ctx.gt)->drm.dev)
TRACE_EVENT(xe_lrc_update_timestamp,
- TP_PROTO(struct xe_lrc *lrc, uint32_t old),
+ TP_PROTO(struct xe_lrc *lrc, uint64_t old),
TP_ARGS(lrc, old),
TP_STRUCT__entry(
__field(struct xe_lrc *, lrc)
- __field(u32, old)
- __field(u32, new)
+ __field(u64, old)
+ __field(u64, new)
__string(name, lrc->fence_ctx.name)
__string(device_id, __dev_name_lrc(lrc))
),
@@ -36,7 +36,7 @@ TRACE_EVENT(xe_lrc_update_timestamp,
__assign_str(name);
__assign_str(device_id);
),
- TP_printk("lrc=:%p lrc->name=%s old=%u new=%u device_id:%s",
+ TP_printk("lrc=:%p lrc->name=%s old=%llu new=%llu device_id:%s",
__entry->lrc, __get_str(name),
__entry->old, __entry->new,
__get_str(device_id))
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index c14bd2282044..3a8751a8b92d 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -244,7 +244,7 @@ void xe_uc_gucrc_disable(struct xe_uc *uc)
void xe_uc_stop_prepare(struct xe_uc *uc)
{
- xe_gsc_wait_for_worker_completion(&uc->gsc);
+ xe_gsc_stop_prepare(&uc->gsc);
xe_guc_stop_prepare(&uc->guc);
}
@@ -278,6 +278,12 @@ again:
goto again;
}
+void xe_uc_suspend_prepare(struct xe_uc *uc)
+{
+ xe_gsc_wait_for_worker_completion(&uc->gsc);
+ xe_guc_stop_prepare(&uc->guc);
+}
+
int xe_uc_suspend(struct xe_uc *uc)
{
/* GuC submission not enabled, nothing to do */
diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
index 3813c1ede450..c23e6f5e2514 100644
--- a/drivers/gpu/drm/xe/xe_uc.h
+++ b/drivers/gpu/drm/xe/xe_uc.h
@@ -18,6 +18,7 @@ int xe_uc_reset_prepare(struct xe_uc *uc);
void xe_uc_stop_prepare(struct xe_uc *uc);
void xe_uc_stop(struct xe_uc *uc);
int xe_uc_start(struct xe_uc *uc);
+void xe_uc_suspend_prepare(struct xe_uc *uc);
int xe_uc_suspend(struct xe_uc *uc);
int xe_uc_sanitize_reset(struct xe_uc *uc);
void xe_uc_declare_wedged(struct xe_uc *uc);
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index fb0eda3d5682..2741849bbf4d 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -92,6 +92,8 @@
struct uc_fw_entry {
enum xe_platform platform;
+ enum xe_gt_type gt_type;
+
struct {
const char *path;
u16 major;
@@ -106,32 +108,37 @@ struct fw_blobs_by_type {
u32 count;
};
-#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
- fw_def(BATTLEMAGE, major_ver(xe, guc, bmg, 70, 29, 2)) \
- fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 29, 2)) \
- fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 29, 2)) \
- fw_def(DG2, major_ver(i915, guc, dg2, 70, 29, 2)) \
- fw_def(DG1, major_ver(i915, guc, dg1, 70, 29, 2)) \
- fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 29, 2)) \
- fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 29, 2))
+/*
+ * Add an "ANY" define just to convey the meaning it's given here.
+ */
+#define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED
+
+#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
+ fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 44, 1)) \
+ fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 44, 1)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \
+ fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 44, 1)) \
+ fw_def(DG1, GT_TYPE_ANY, major_ver(i915, guc, dg1, 70, 44, 1)) \
+ fw_def(ALDERLAKE_N, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(ALDERLAKE_P, GT_TYPE_ANY, major_ver(i915, guc, adlp, 70, 44, 1)) \
+ fw_def(ALDERLAKE_S, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(ROCKETLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(TIGERLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1))
#define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \
- fw_def(BATTLEMAGE, no_ver(xe, huc, bmg)) \
- fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \
- fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \
- fw_def(DG1, no_ver(i915, huc, dg1)) \
- fw_def(ALDERLAKE_P, no_ver(i915, huc, tgl)) \
- fw_def(ALDERLAKE_S, no_ver(i915, huc, tgl)) \
- fw_def(ROCKETLAKE, no_ver(i915, huc, tgl)) \
- fw_def(TIGERLAKE, no_ver(i915, huc, tgl))
+ fw_def(BATTLEMAGE, GT_TYPE_ANY, no_ver(xe, huc, bmg)) \
+ fw_def(LUNARLAKE, GT_TYPE_ANY, no_ver(xe, huc, lnl)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, no_ver(i915, huc_gsc, mtl)) \
+ fw_def(DG1, GT_TYPE_ANY, no_ver(i915, huc, dg1)) \
+ fw_def(ALDERLAKE_P, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(ALDERLAKE_S, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(ROCKETLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(TIGERLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl))
/* for the GSC FW we match the compatibility version and not the release one */
#define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \
- fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 104, 1, 0)) \
- fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 102, 1, 0))
+ fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, gsc, lnl, 104, 1, 0)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, gsc, mtl, 102, 1, 0))
#define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \
__stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin"
@@ -159,12 +166,13 @@ struct fw_blobs_by_type {
a, b, c }
/* All blobs need to be declared via MODULE_FIRMWARE() */
-#define XE_UC_MODULE_FIRMWARE(platform__, fw_filename) \
+#define XE_UC_MODULE_FIRMWARE(platform__, gt_type__, fw_filename) \
MODULE_FIRMWARE(fw_filename);
-#define XE_UC_FW_ENTRY(platform__, entry__) \
+#define XE_UC_FW_ENTRY(platform__, gt_type__, entry__) \
{ \
.platform = XE_ ## platform__, \
+ .gt_type = XE_ ## gt_type__, \
entry__, \
},
@@ -222,30 +230,38 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw)
[XE_UC_FW_TYPE_HUC] = { entries_huc, ARRAY_SIZE(entries_huc) },
[XE_UC_FW_TYPE_GSC] = { entries_gsc, ARRAY_SIZE(entries_gsc) },
};
- static const struct uc_fw_entry *entries;
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
enum xe_platform p = xe->info.platform;
+ const struct uc_fw_entry *entries;
u32 count;
int i;
- xe_assert(xe, uc_fw->type < ARRAY_SIZE(blobs_all));
+ xe_gt_assert(gt, uc_fw->type < ARRAY_SIZE(blobs_all));
+ xe_gt_assert(gt, gt->info.type != XE_GT_TYPE_UNINITIALIZED);
+
entries = blobs_all[uc_fw->type].entries;
count = blobs_all[uc_fw->type].count;
for (i = 0; i < count && p <= entries[i].platform; i++) {
- if (p == entries[i].platform) {
- uc_fw->path = entries[i].path;
- uc_fw->versions.wanted.major = entries[i].major;
- uc_fw->versions.wanted.minor = entries[i].minor;
- uc_fw->versions.wanted.patch = entries[i].patch;
- uc_fw->full_ver_required = entries[i].full_ver_required;
-
- if (uc_fw->type == XE_UC_FW_TYPE_GSC)
- uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
- else
- uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE;
-
- break;
- }
+ if (p != entries[i].platform)
+ continue;
+
+ if (entries[i].gt_type != XE_GT_TYPE_ANY &&
+ entries[i].gt_type != gt->info.type)
+ continue;
+
+ uc_fw->path = entries[i].path;
+ uc_fw->versions.wanted.major = entries[i].major;
+ uc_fw->versions.wanted.minor = entries[i].minor;
+ uc_fw->versions.wanted.patch = entries[i].patch;
+ uc_fw->full_ver_required = entries[i].full_ver_required;
+
+ if (uc_fw->type == XE_UC_FW_TYPE_GSC)
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
+ else
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE;
+
+ break;
}
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 60303998bd61..79323c78130f 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2049,7 +2049,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
- args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
+ args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
+ !xe->info.needs_scratch))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
@@ -2201,6 +2202,20 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
}
#endif
+static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags)
+{
+ if (!xe_vm_in_fault_mode(vm))
+ return false;
+
+ if (!xe_vm_has_scratch(vm))
+ return false;
+
+ if (bind_flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE)
+ return false;
+
+ return true;
+}
+
/*
* Create operations list from IOCTL arguments, setup operations fields so parse
* and commit steps are decoupled from IOCTL arguments. This step can fail.
@@ -2273,6 +2288,8 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
+ op->map.invalidate_on_bind =
+ __xe_vm_needs_clear_scratch_pages(vm, flags);
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
op->prefetch.region = prefetch_region;
}
@@ -2472,8 +2489,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return PTR_ERR(vma);
op->map.vma = vma;
- if ((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
- !op->map.is_cpu_addr_mirror)
+ if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
+ !op->map.is_cpu_addr_mirror) ||
+ op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
op->tile_mask);
break;
@@ -2726,9 +2744,10 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- err = vma_lock_and_validate(exec, op->map.vma,
- !xe_vm_in_fault_mode(vm) ||
- op->map.immediate);
+ if (!op->map.invalidate_on_bind)
+ err = vma_lock_and_validate(exec, op->map.vma,
+ !xe_vm_in_fault_mode(vm) ||
+ op->map.immediate);
break;
case DRM_GPUVA_OP_REMAP:
err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
@@ -3082,9 +3101,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
if (!*bind_ops)
return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
- err = __copy_from_user(*bind_ops, bind_user,
- sizeof(struct drm_xe_vm_bind_op) *
- args->num_binds);
+ err = copy_from_user(*bind_ops, bind_user,
+ sizeof(struct drm_xe_vm_bind_op) *
+ args->num_binds);
if (XE_IOCTL_DBG(xe, err)) {
err = -EFAULT;
goto free_bind_ops;
@@ -3109,7 +3128,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
if (XE_IOCTL_DBG(xe, is_cpu_addr_mirror &&
(!xe_vm_in_fault_mode(vm) ||
- !IS_ENABLED(CONFIG_DRM_GPUSVM)))) {
+ !IS_ENABLED(CONFIG_DRM_XE_GPUSVM)))) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -3243,7 +3262,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
- return -EINVAL;
+ return -EINVAL;
}
}
@@ -3251,7 +3270,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
if (bo->cpu_caching) {
if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
- return -EINVAL;
+ return -EINVAL;
}
} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
/*
@@ -3260,7 +3279,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
* how it was mapped on the CPU. Just assume is it
* potentially cached on CPU side.
*/
- return -EINVAL;
+ return -EINVAL;
}
/* If a BO is protected it can only be mapped if the key is still valid */
@@ -3312,8 +3331,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
/* Ensure all UNMAPs visible */
- if (xe_vm_in_fault_mode(vm))
- flush_work(&vm->svm.garbage_collector.work);
+ xe_svm_flush(vm);
err = down_write_killable(&vm->lock);
if (err)
@@ -3847,6 +3865,9 @@ void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
}
drm_puts(p, "\n");
+
+ if (drm_coredump_printer_is_full(p))
+ return;
}
}
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 84fa41b9fa20..1662604c4486 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -330,6 +330,8 @@ struct xe_vma_op_map {
bool is_cpu_addr_mirror;
/** @dumpable: whether BO is dumped on GPU hang */
bool dumpable;
+ /** @invalidate: invalidate the VMA before bind */
+ bool invalidate_on_bind;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;
};
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index b1f81dca610d..e421a74fb87c 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -49,7 +49,7 @@ _resize_bar(struct xe_device *xe, int resno, resource_size_t size)
*/
static void resize_vram_bar(struct xe_device *xe)
{
- u64 force_vram_bar_size = xe_modparam.force_vram_bar_size;
+ int force_vram_bar_size = xe_modparam.force_vram_bar_size;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct pci_bus *root = pdev->bus;
resource_size_t current_size;
@@ -66,6 +66,9 @@ static void resize_vram_bar(struct xe_device *xe)
if (!bar_size_mask)
return;
+ if (force_vram_bar_size < 0)
+ return;
+
/* set to a specific size? */
if (force_vram_bar_size) {
u32 bar_size_bit;
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 24f644c0a673..67196baa4249 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -230,6 +230,18 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
+ /* Xe2_HPG */
+
+ { XE_RTP_NAME("16025250150"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001)),
+ XE_RTP_ACTIONS(SET(LSN_VC_REG2,
+ LSN_LNI_WGT(1) |
+ LSN_LNE_WGT(1) |
+ LSN_DIM_X_WGT(1) |
+ LSN_DIM_Y_WGT(1) |
+ LSN_DIM_Z_WGT(1)))
+ },
+
/* Xe2_HPM */
{ XE_RTP_NAME("16021867713"),
@@ -815,6 +827,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
},
+ { XE_RTP_NAME("22021007897"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
+ },
/* Xe3_LPG */
{ XE_RTP_NAME("14021490052"),
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 0c738af24f7c..9efc5accd43d 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -32,8 +32,10 @@
GRAPHICS_VERSION(3001)
14022293748 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION_RANGE(3000, 3001)
22019794406 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION_RANGE(3000, 3001)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
@@ -55,3 +57,5 @@ no_media_l3 MEDIA_VERSION(3000)
GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0)
16023105232 GRAPHICS_VERSION_RANGE(2001, 3001)
MEDIA_VERSION_RANGE(1301, 3000)
+16026508708 GRAPHICS_VERSION_RANGE(1200, 3001)
+ MEDIA_VERSION_RANGE(1300, 3000)
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index dbecca9bdd54..cfabf5e2a0bb 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -22,6 +22,7 @@ config DRM_ZYNQMP_DPSUB_AUDIO
bool "ZynqMP DisplayPort Audio Support"
depends on DRM_ZYNQMP_DPSUB
depends on SND && SND_SOC
+ depends on SND_SOC=y || DRM_ZYNQMP_DPSUB=m
select SND_SOC_GENERIC_DMAENGINE_PCM
help
Choose this option to enable DisplayPort audio support in the ZynqMP
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index a6a4a871f197..238cbb49963e 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1481,6 +1481,7 @@ static void zynqmp_dp_disp_disable(struct zynqmp_dp *dp,
*/
static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
@@ -1494,7 +1495,7 @@ static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge,
}
if (dp->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, dp->next_bridge,
+ ret = drm_bridge_attach(encoder, dp->next_bridge,
bridge, flags);
if (ret < 0)
goto error;
@@ -2466,10 +2467,8 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
dp->reset = devm_reset_control_get(dp->dev, NULL);
if (IS_ERR(dp->reset)) {
- if (PTR_ERR(dp->reset) != -EPROBE_DEFER)
- dev_err(dp->dev, "failed to get reset: %ld\n",
- PTR_ERR(dp->reset));
- ret = PTR_ERR(dp->reset);
+ ret = dev_err_probe(dp->dev, PTR_ERR(dp->reset),
+ "failed to get reset\n");
goto err_free;
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
index f07ff4eb3a6d..1a46a046103f 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
@@ -45,7 +45,6 @@ struct zynqmp_dpsub_audio {
struct {
struct snd_soc_dai_link_component cpu;
- struct snd_soc_dai_link_component codec;
struct snd_soc_dai_link_component platform;
} components[ZYNQMP_NUM_PCMS];
@@ -403,10 +402,8 @@ int zynqmp_audio_init(struct zynqmp_dpsub *dpsub)
link->num_cpus = 1;
link->cpus[0].dai_name = audio->dai_name;
- link->codecs = &audio->components[i].codec;
+ link->codecs = &snd_soc_dummy_dlc;
link->num_codecs = 1;
- link->codecs[0].name = "snd-soc-dummy";
- link->codecs[0].dai_name = "snd-soc-dummy-dai";
link->platforms = &audio->components[i].platform;
link->num_platforms = 1;
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 8e09d6d328d2..344cc9e741c1 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -41,7 +41,6 @@ static int host1x_subdev_add(struct host1x_device *device,
struct device_node *np)
{
struct host1x_subdev *subdev;
- struct device_node *child;
int err;
subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
@@ -56,13 +55,12 @@ static int host1x_subdev_add(struct host1x_device *device,
mutex_unlock(&device->subdevs_lock);
/* recursively add children */
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (of_match_node(driver->subdevs, child) &&
of_device_is_available(child)) {
err = host1x_subdev_add(device, driver, child);
if (err < 0) {
/* XXX cleanup? */
- of_node_put(child);
return err;
}
}
@@ -90,17 +88,14 @@ static void host1x_subdev_del(struct host1x_subdev *subdev)
static int host1x_device_parse_dt(struct host1x_device *device,
struct host1x_driver *driver)
{
- struct device_node *np;
int err;
- for_each_child_of_node(device->dev.parent->of_node, np) {
+ for_each_child_of_node_scoped(device->dev.parent->of_node, np) {
if (of_match_node(driver->subdevs, np) &&
of_device_is_available(np)) {
err = host1x_subdev_add(device, driver, np);
- if (err < 0) {
- of_node_put(np);
+ if (err < 0)
return err;
- }
}
}
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 407ed9b9cf64..ba2e572567c0 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -247,8 +247,6 @@ static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
CDMA_EVENT_PUSH_BUFFER_SPACE);
- host1x_hw_cdma_flush(host1x, cdma);
-
/* If somebody has managed to already start waiting, yield */
if (cdma->event != CDMA_EVENT_NONE) {
mutex_unlock(&cdma->lock);
@@ -591,7 +589,6 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
*/
void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
{
- struct host1x *host1x = cdma_to_host1x(cdma);
struct push_buffer *pb = &cdma->push_buffer;
u32 slots_free = cdma->slots_free;
@@ -599,11 +596,9 @@ void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma)->dev),
op1, op2);
- if (slots_free == 0) {
- host1x_hw_cdma_flush(host1x, cdma);
+ if (slots_free == 0)
slots_free = host1x_cdma_wait_locked(cdma,
CDMA_EVENT_PUSH_BUFFER_SPACE);
- }
cdma->slots_free = slots_free - 1;
cdma->slots_used++;
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index fa77e4e64f12..333f36e0a715 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1008,7 +1008,7 @@ int ipu_map_irq(struct ipu_soc *ipu, int irq)
{
int virq;
- virq = irq_linear_revmap(ipu->domain, irq);
+ virq = irq_find_mapping(ipu->domain, irq);
if (!virq)
virq = irq_create_mapping(ipu->domain, irq);
@@ -1169,8 +1169,8 @@ static int ipu_irq_init(struct ipu_soc *ipu)
};
int ret, i;
- ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
- &irq_generic_chip_ops, ipu);
+ ipu->domain = irq_domain_create_linear(of_fwnode_handle(ipu->dev->of_node), IPU_NUM_IRQS,
+ &irq_generic_chip_ops, ipu);
if (!ipu->domain) {
dev_err(ipu->dev, "failed to add irq domain\n");
return -ENODEV;
@@ -1219,7 +1219,7 @@ static void ipu_irq_exit(struct ipu_soc *ipu)
/* TODO: remove irq_domain_generic_chips */
for (i = 0; i < IPU_NUM_IRQS; i++) {
- irq = irq_linear_revmap(ipu->domain, i);
+ irq = irq_find_mapping(ipu->domain, i);
if (irq)
irq_dispose_mapping(irq);
}
diff --git a/drivers/gpu/nova-core/Kconfig b/drivers/gpu/nova-core/Kconfig
index ad0c06756516..8726d80d6ba4 100644
--- a/drivers/gpu/nova-core/Kconfig
+++ b/drivers/gpu/nova-core/Kconfig
@@ -3,6 +3,7 @@ config NOVA_CORE
depends on PCI
depends on RUST
depends on RUST_FW_LOADER_ABSTRACTIONS
+ select AUXILIARY_BUS
default n
help
Choose this if you want to build the Nova Core driver for Nvidia
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
index a08fb6599267..8c86101c26cb 100644
--- a/drivers/gpu/nova-core/driver.rs
+++ b/drivers/gpu/nova-core/driver.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{bindings, c_str, device::Core, pci, prelude::*};
+use kernel::{auxiliary, bindings, c_str, device::Core, pci, prelude::*};
use crate::gpu::Gpu;
@@ -8,6 +8,7 @@ use crate::gpu::Gpu;
pub(crate) struct NovaCore {
#[pin]
pub(crate) gpu: Gpu,
+ _reg: auxiliary::Registration,
}
const BAR0_SIZE: usize = 8;
@@ -38,6 +39,12 @@ impl pci::Driver for NovaCore {
let this = KBox::pin_init(
try_pin_init!(Self {
gpu <- Gpu::new(pdev, bar)?,
+ _reg: auxiliary::Registration::new(
+ pdev.as_ref(),
+ c_str!("nova-drm"),
+ 0, // TODO: Once it lands, use XArray; for now we don't use the ID.
+ crate::MODULE_NAME
+ )?,
}),
GFP_KERNEL,
)?;
diff --git a/drivers/gpu/nova-core/firmware.rs b/drivers/gpu/nova-core/firmware.rs
index 6e6361c59ca1..4b8a38358a4f 100644
--- a/drivers/gpu/nova-core/firmware.rs
+++ b/drivers/gpu/nova-core/firmware.rs
@@ -1,13 +1,49 @@
// SPDX-License-Identifier: GPL-2.0
-use crate::gpu;
+//! Contains structures and functions dedicated to the parsing, building and patching of firmwares
+//! to be loaded into a given execution unit.
+
+use kernel::device;
use kernel::firmware;
+use kernel::prelude::*;
+use kernel::str::CString;
+
+use crate::gpu;
+use crate::gpu::Chipset;
+
+pub(crate) const FIRMWARE_VERSION: &str = "535.113.01";
+
+/// Structure encapsulating the firmware blobs required for the GPU to operate.
+#[expect(dead_code)]
+pub(crate) struct Firmware {
+ booter_load: firmware::Firmware,
+ booter_unload: firmware::Firmware,
+ bootloader: firmware::Firmware,
+ gsp: firmware::Firmware,
+}
+
+impl Firmware {
+ pub(crate) fn new(dev: &device::Device, chipset: Chipset, ver: &str) -> Result<Firmware> {
+ let mut chip_name = CString::try_from_fmt(fmt!("{}", chipset))?;
+ chip_name.make_ascii_lowercase();
+
+ let request = |name_| {
+ CString::try_from_fmt(fmt!("nvidia/{}/gsp/{}-{}.bin", &*chip_name, name_, ver))
+ .and_then(|path| firmware::Firmware::request(&path, dev))
+ };
+
+ Ok(Firmware {
+ booter_load: request("booter_load")?,
+ booter_unload: request("booter_unload")?,
+ bootloader: request("bootloader")?,
+ gsp: request("gsp")?,
+ })
+ }
+}
pub(crate) struct ModInfoBuilder<const N: usize>(firmware::ModInfoBuilder<N>);
impl<const N: usize> ModInfoBuilder<N> {
- const VERSION: &'static str = "535.113.01";
-
const fn make_entry_file(self, chipset: &str, fw: &str) -> Self {
ModInfoBuilder(
self.0
@@ -17,7 +53,7 @@ impl<const N: usize> ModInfoBuilder<N> {
.push("/gsp/")
.push(fw)
.push("-")
- .push(Self::VERSION)
+ .push(FIRMWARE_VERSION)
.push(".bin"),
)
}
diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs
index 17c9660da450..60b86f370284 100644
--- a/drivers/gpu/nova-core/gpu.rs
+++ b/drivers/gpu/nova-core/gpu.rs
@@ -1,10 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{
- device, devres::Devres, error::code::*, firmware, fmt, pci, prelude::*, str::CString,
-};
+use kernel::{device, devres::Devres, error::code::*, pci, prelude::*};
use crate::driver::Bar0;
+use crate::firmware::{Firmware, FIRMWARE_VERSION};
use crate::regs;
use crate::util;
use core::fmt;
@@ -13,7 +12,7 @@ macro_rules! define_chipset {
({ $($variant:ident = $value:expr),* $(,)* }) =>
{
/// Enum representation of the GPU chipset.
- #[derive(fmt::Debug)]
+ #[derive(fmt::Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub(crate) enum Chipset {
$($variant = $value),*,
}
@@ -54,6 +53,7 @@ define_chipset!({
TU117 = 0x167,
TU116 = 0x168,
// Ampere
+ GA100 = 0x170,
GA102 = 0x172,
GA103 = 0x173,
GA104 = 0x174,
@@ -73,7 +73,7 @@ impl Chipset {
Self::TU102 | Self::TU104 | Self::TU106 | Self::TU117 | Self::TU116 => {
Architecture::Turing
}
- Self::GA102 | Self::GA103 | Self::GA104 | Self::GA106 | Self::GA107 => {
+ Self::GA100 | Self::GA102 | Self::GA103 | Self::GA104 | Self::GA106 | Self::GA107 => {
Architecture::Ampere
}
Self::AD102 | Self::AD103 | Self::AD104 | Self::AD106 | Self::AD107 => {
@@ -93,16 +93,29 @@ impl Chipset {
// For now, redirect to fmt::Debug for convenience.
impl fmt::Display for Chipset {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "{:?}", self)
+ write!(f, "{self:?}")
}
}
/// Enum representation of the GPU generation.
#[derive(fmt::Debug)]
pub(crate) enum Architecture {
- Turing,
- Ampere,
- Ada,
+ Turing = 0x16,
+ Ampere = 0x17,
+ Ada = 0x19,
+}
+
+impl TryFrom<u8> for Architecture {
+ type Error = Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ match value {
+ 0x16 => Ok(Self::Turing),
+ 0x17 => Ok(Self::Ampere),
+ 0x19 => Ok(Self::Ada),
+ _ => Err(ENODEV),
+ }
+ }
}
pub(crate) struct Revision {
@@ -111,10 +124,10 @@ pub(crate) struct Revision {
}
impl Revision {
- fn from_boot0(boot0: regs::Boot0) -> Self {
+ fn from_boot0(boot0: regs::NV_PMC_BOOT_0) -> Self {
Self {
- major: boot0.major_rev(),
- minor: boot0.minor_rev(),
+ major: boot0.major_revision(),
+ minor: boot0.minor_revision(),
}
}
}
@@ -133,45 +146,16 @@ pub(crate) struct Spec {
}
impl Spec {
- fn new(bar: &Devres<Bar0>) -> Result<Spec> {
- let bar = bar.try_access().ok_or(ENXIO)?;
- let boot0 = regs::Boot0::read(&bar);
+ fn new(bar: &Bar0) -> Result<Spec> {
+ let boot0 = regs::NV_PMC_BOOT_0::read(bar);
Ok(Self {
- chipset: boot0.chipset().try_into()?,
+ chipset: boot0.chipset()?,
revision: Revision::from_boot0(boot0),
})
}
}
-/// Structure encapsulating the firmware blobs required for the GPU to operate.
-#[expect(dead_code)]
-pub(crate) struct Firmware {
- booter_load: firmware::Firmware,
- booter_unload: firmware::Firmware,
- bootloader: firmware::Firmware,
- gsp: firmware::Firmware,
-}
-
-impl Firmware {
- fn new(dev: &device::Device, spec: &Spec, ver: &str) -> Result<Firmware> {
- let mut chip_name = CString::try_from_fmt(fmt!("{}", spec.chipset))?;
- chip_name.make_ascii_lowercase();
-
- let request = |name_| {
- CString::try_from_fmt(fmt!("nvidia/{}/gsp/{}-{}.bin", &*chip_name, name_, ver))
- .and_then(|path| firmware::Firmware::request(&path, dev))
- };
-
- Ok(Firmware {
- booter_load: request("booter_load")?,
- booter_unload: request("booter_unload")?,
- bootloader: request("bootloader")?,
- gsp: request("gsp")?,
- })
- }
-}
-
/// Structure holding the resources required to operate the GPU.
#[pin_data]
pub(crate) struct Gpu {
@@ -182,9 +166,13 @@ pub(crate) struct Gpu {
}
impl Gpu {
- pub(crate) fn new(pdev: &pci::Device, bar: Devres<Bar0>) -> Result<impl PinInit<Self>> {
- let spec = Spec::new(&bar)?;
- let fw = Firmware::new(pdev.as_ref(), &spec, "535.113.01")?;
+ pub(crate) fn new(
+ pdev: &pci::Device<device::Bound>,
+ devres_bar: Devres<Bar0>,
+ ) -> Result<impl PinInit<Self>> {
+ let bar = devres_bar.access(pdev.as_ref())?;
+ let spec = Spec::new(bar)?;
+ let fw = Firmware::new(pdev.as_ref(), spec.chipset, FIRMWARE_VERSION)?;
dev_info!(
pdev.as_ref(),
@@ -194,6 +182,10 @@ impl Gpu {
spec.revision
);
- Ok(pin_init!(Self { spec, bar, fw }))
+ Ok(pin_init!(Self {
+ spec,
+ bar: devres_bar,
+ fw
+ }))
}
}
diff --git a/drivers/gpu/nova-core/nova_core.rs b/drivers/gpu/nova-core/nova_core.rs
index a91cd924054b..618632f0abcc 100644
--- a/drivers/gpu/nova-core/nova_core.rs
+++ b/drivers/gpu/nova-core/nova_core.rs
@@ -8,6 +8,8 @@ mod gpu;
mod regs;
mod util;
+pub(crate) const MODULE_NAME: &kernel::str::CStr = <LocalModule as kernel::ModuleMetadata>::NAME;
+
kernel::module_pci_driver! {
type: driver::NovaCore,
name: "NovaCore",
diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs
index b1a25b86ef17..5a1273230306 100644
--- a/drivers/gpu/nova-core/regs.rs
+++ b/drivers/gpu/nova-core/regs.rs
@@ -1,55 +1,39 @@
// SPDX-License-Identifier: GPL-2.0
-use crate::driver::Bar0;
-
-// TODO
-//
-// Create register definitions via generic macros. See task "Generic register
-// abstraction" in Documentation/gpu/nova/core/todo.rst.
-
-const BOOT0_OFFSET: usize = 0x00000000;
-
-// 3:0 - chipset minor revision
-const BOOT0_MINOR_REV_SHIFT: u8 = 0;
-const BOOT0_MINOR_REV_MASK: u32 = 0x0000000f;
-
-// 7:4 - chipset major revision
-const BOOT0_MAJOR_REV_SHIFT: u8 = 4;
-const BOOT0_MAJOR_REV_MASK: u32 = 0x000000f0;
-
-// 23:20 - chipset implementation Identifier (depends on architecture)
-const BOOT0_IMPL_SHIFT: u8 = 20;
-const BOOT0_IMPL_MASK: u32 = 0x00f00000;
-
-// 28:24 - chipset architecture identifier
-const BOOT0_ARCH_MASK: u32 = 0x1f000000;
-
-// 28:20 - chipset identifier (virtual register field combining BOOT0_IMPL and
-// BOOT0_ARCH)
-const BOOT0_CHIPSET_SHIFT: u8 = BOOT0_IMPL_SHIFT;
-const BOOT0_CHIPSET_MASK: u32 = BOOT0_IMPL_MASK | BOOT0_ARCH_MASK;
-
-#[derive(Copy, Clone)]
-pub(crate) struct Boot0(u32);
-
-impl Boot0 {
- #[inline]
- pub(crate) fn read(bar: &Bar0) -> Self {
- Self(bar.read32(BOOT0_OFFSET))
- }
-
- #[inline]
- pub(crate) fn chipset(&self) -> u32 {
- (self.0 & BOOT0_CHIPSET_MASK) >> BOOT0_CHIPSET_SHIFT
- }
-
- #[inline]
- pub(crate) fn minor_rev(&self) -> u8 {
- ((self.0 & BOOT0_MINOR_REV_MASK) >> BOOT0_MINOR_REV_SHIFT) as u8
+// Required to retain the original register names used by OpenRM, which are all capital snake case
+// but are mapped to types.
+#![allow(non_camel_case_types)]
+
+#[macro_use]
+mod macros;
+
+use crate::gpu::{Architecture, Chipset};
+use kernel::prelude::*;
+
+/* PMC */
+
+register!(NV_PMC_BOOT_0 @ 0x00000000, "Basic revision information about the GPU" {
+ 3:0 minor_revision as u8, "Minor revision of the chip";
+ 7:4 major_revision as u8, "Major revision of the chip";
+ 8:8 architecture_1 as u8, "MSB of the architecture";
+ 23:20 implementation as u8, "Implementation version of the architecture";
+ 28:24 architecture_0 as u8, "Lower bits of the architecture";
+});
+
+impl NV_PMC_BOOT_0 {
+ /// Combines `architecture_0` and `architecture_1` to obtain the architecture of the chip.
+ pub(crate) fn architecture(self) -> Result<Architecture> {
+ Architecture::try_from(
+ self.architecture_0() | (self.architecture_1() << Self::ARCHITECTURE_0.len()),
+ )
}
- #[inline]
- pub(crate) fn major_rev(&self) -> u8 {
- ((self.0 & BOOT0_MAJOR_REV_MASK) >> BOOT0_MAJOR_REV_SHIFT) as u8
+ /// Combines `architecture` and `implementation` to obtain a code unique to the chipset.
+ pub(crate) fn chipset(self) -> Result<Chipset> {
+ self.architecture()
+ .map(|arch| {
+ ((arch as u32) << Self::IMPLEMENTATION.len()) | self.implementation() as u32
+ })
+ .and_then(Chipset::try_from)
}
}
diff --git a/drivers/gpu/nova-core/regs/macros.rs b/drivers/gpu/nova-core/regs/macros.rs
new file mode 100644
index 000000000000..7ecc70efb3cd
--- /dev/null
+++ b/drivers/gpu/nova-core/regs/macros.rs
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Macro to define register layout and accessors.
+//!
+//! A single register typically includes several fields, which are accessed through a combination
+//! of bit-shift and mask operations that introduce a class of potential mistakes, notably because
+//! not all possible field values are necessarily valid.
+//!
+//! The macro in this module allow to define, using an intruitive and readable syntax, a dedicated
+//! type for each register with its own field accessors that can return an error is a field's value
+//! is invalid.
+
+/// Defines a dedicated type for a register with an absolute offset, alongside with getter and
+/// setter methods for its fields and methods to read and write it from an `Io` region.
+///
+/// Example:
+///
+/// ```no_run
+/// register!(BOOT_0 @ 0x00000100, "Basic revision information about the GPU" {
+/// 3:0 minor_revision as u8, "Minor revision of the chip";
+/// 7:4 major_revision as u8, "Major revision of the chip";
+/// 28:20 chipset as u32 ?=> Chipset, "Chipset model";
+/// });
+/// ```
+///
+/// This defines a `BOOT_0` type which can be read or written from offset `0x100` of an `Io`
+/// region. It is composed of 3 fields, for instance `minor_revision` is made of the 4 less
+/// significant bits of the register. Each field can be accessed and modified using accessor
+/// methods:
+///
+/// ```no_run
+/// // Read from the register's defined offset (0x100).
+/// let boot0 = BOOT_0::read(&bar);
+/// pr_info!("chip revision: {}.{}", boot0.major_revision(), boot0.minor_revision());
+///
+/// // `Chipset::try_from` will be called with the value of the field and returns an error if the
+/// // value is invalid.
+/// let chipset = boot0.chipset()?;
+///
+/// // Update some fields and write the value back.
+/// boot0.set_major_revision(3).set_minor_revision(10).write(&bar);
+///
+/// // Or just read and update the register in a single step:
+/// BOOT_0::alter(&bar, |r| r.set_major_revision(3).set_minor_revision(10));
+/// ```
+///
+/// Fields can be defined as follows:
+///
+/// - `as <type>` simply returns the field value casted as the requested integer type, typically
+/// `u32`, `u16`, `u8` or `bool`. Note that `bool` fields must have a range of 1 bit.
+/// - `as <type> => <into_type>` calls `<into_type>`'s `From::<<type>>` implementation and returns
+/// the result.
+/// - `as <type> ?=> <try_into_type>` calls `<try_into_type>`'s `TryFrom::<<type>>` implementation
+/// and returns the result. This is useful on fields for which not all values are value.
+///
+/// The documentation strings are optional. If present, they will be added to the type's
+/// definition, or the field getter and setter methods they are attached to.
+///
+/// Putting a `+` before the address of the register makes it relative to a base: the `read` and
+/// `write` methods take a `base` argument that is added to the specified address before access,
+/// and `try_read` and `try_write` methods are also created, allowing access with offsets unknown
+/// at compile-time:
+///
+/// ```no_run
+/// register!(CPU_CTL @ +0x0000010, "CPU core control" {
+/// 0:0 start as bool, "Start the CPU core";
+/// });
+///
+/// // Flip the `start` switch for the CPU core which base address is at `CPU_BASE`.
+/// let cpuctl = CPU_CTL::read(&bar, CPU_BASE);
+/// pr_info!("CPU CTL: {:#x}", cpuctl);
+/// cpuctl.set_start(true).write(&bar, CPU_BASE);
+/// ```
+macro_rules! register {
+ // Creates a register at a fixed offset of the MMIO space.
+ (
+ $name:ident @ $offset:literal $(, $comment:literal)? {
+ $($fields:tt)*
+ }
+ ) => {
+ register!(@common $name $(, $comment)?);
+ register!(@field_accessors $name { $($fields)* });
+ register!(@io $name @ $offset);
+ };
+
+ // Creates a register at a relative offset from a base address.
+ (
+ $name:ident @ + $offset:literal $(, $comment:literal)? {
+ $($fields:tt)*
+ }
+ ) => {
+ register!(@common $name $(, $comment)?);
+ register!(@field_accessors $name { $($fields)* });
+ register!(@io$name @ + $offset);
+ };
+
+ // Defines the wrapper `$name` type, as well as its relevant implementations (`Debug`, `BitOr`,
+ // and conversion to regular `u32`).
+ (@common $name:ident $(, $comment:literal)?) => {
+ $(
+ #[doc=$comment]
+ )?
+ #[repr(transparent)]
+ #[derive(Clone, Copy, Default)]
+ pub(crate) struct $name(u32);
+
+ // TODO: display the raw hex value, then the value of all the fields. This requires
+ // matching the fields, which will complexify the syntax considerably...
+ impl ::core::fmt::Debug for $name {
+ fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
+ f.debug_tuple(stringify!($name))
+ .field(&format_args!("0x{0:x}", &self.0))
+ .finish()
+ }
+ }
+
+ impl core::ops::BitOr for $name {
+ type Output = Self;
+
+ fn bitor(self, rhs: Self) -> Self::Output {
+ Self(self.0 | rhs.0)
+ }
+ }
+
+ impl ::core::convert::From<$name> for u32 {
+ fn from(reg: $name) -> u32 {
+ reg.0
+ }
+ }
+ };
+
+ // Defines all the field getter/methods methods for `$name`.
+ (
+ @field_accessors $name:ident {
+ $($hi:tt:$lo:tt $field:ident as $type:tt
+ $(?=> $try_into_type:ty)?
+ $(=> $into_type:ty)?
+ $(, $comment:literal)?
+ ;
+ )*
+ }
+ ) => {
+ $(
+ register!(@check_field_bounds $hi:$lo $field as $type);
+ )*
+
+ #[allow(dead_code)]
+ impl $name {
+ $(
+ register!(@field_accessor $name $hi:$lo $field as $type
+ $(?=> $try_into_type)?
+ $(=> $into_type)?
+ $(, $comment)?
+ ;
+ );
+ )*
+ }
+ };
+
+ // Boolean fields must have `$hi == $lo`.
+ (@check_field_bounds $hi:tt:$lo:tt $field:ident as bool) => {
+ #[allow(clippy::eq_op)]
+ const _: () = {
+ kernel::build_assert!(
+ $hi == $lo,
+ concat!("boolean field `", stringify!($field), "` covers more than one bit")
+ );
+ };
+ };
+
+ // Non-boolean fields must have `$hi >= $lo`.
+ (@check_field_bounds $hi:tt:$lo:tt $field:ident as $type:tt) => {
+ #[allow(clippy::eq_op)]
+ const _: () = {
+ kernel::build_assert!(
+ $hi >= $lo,
+ concat!("field `", stringify!($field), "`'s MSB is smaller than its LSB")
+ );
+ };
+ };
+
+ // Catches fields defined as `bool` and convert them into a boolean value.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as bool => $into_type:ty
+ $(, $comment:literal)?;
+ ) => {
+ register!(
+ @leaf_accessor $name $hi:$lo $field as bool
+ { |f| <$into_type>::from(if f != 0 { true } else { false }) }
+ $into_type => $into_type $(, $comment)?;
+ );
+ };
+
+ // Shortcut for fields defined as `bool` without the `=>` syntax.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as bool $(, $comment:literal)?;
+ ) => {
+ register!(@field_accessor $name $hi:$lo $field as bool => bool $(, $comment)?;);
+ };
+
+ // Catches the `?=>` syntax for non-boolean fields.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt ?=> $try_into_type:ty
+ $(, $comment:literal)?;
+ ) => {
+ register!(@leaf_accessor $name $hi:$lo $field as $type
+ { |f| <$try_into_type>::try_from(f as $type) } $try_into_type =>
+ ::core::result::Result<
+ $try_into_type,
+ <$try_into_type as ::core::convert::TryFrom<$type>>::Error
+ >
+ $(, $comment)?;);
+ };
+
+ // Catches the `=>` syntax for non-boolean fields.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt => $into_type:ty
+ $(, $comment:literal)?;
+ ) => {
+ register!(@leaf_accessor $name $hi:$lo $field as $type
+ { |f| <$into_type>::from(f as $type) } $into_type => $into_type $(, $comment)?;);
+ };
+
+ // Shortcut for fields defined as non-`bool` without the `=>` or `?=>` syntax.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt
+ $(, $comment:literal)?;
+ ) => {
+ register!(@field_accessor $name $hi:$lo $field as $type => $type $(, $comment)?;);
+ };
+
+ // Generates the accessor methods for a single field.
+ (
+ @leaf_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:ty
+ { $process:expr } $to_type:ty => $res_type:ty $(, $comment:literal)?;
+ ) => {
+ kernel::macros::paste!(
+ const [<$field:upper>]: ::core::ops::RangeInclusive<u8> = $lo..=$hi;
+ const [<$field:upper _MASK>]: u32 = ((((1 << $hi) - 1) << 1) + 1) - ((1 << $lo) - 1);
+ const [<$field:upper _SHIFT>]: u32 = Self::[<$field:upper _MASK>].trailing_zeros();
+ );
+
+ $(
+ #[doc="Returns the value of this field:"]
+ #[doc=$comment]
+ )?
+ #[inline]
+ pub(crate) fn $field(self) -> $res_type {
+ kernel::macros::paste!(
+ const MASK: u32 = $name::[<$field:upper _MASK>];
+ const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
+ );
+ let field = ((self.0 & MASK) >> SHIFT);
+
+ $process(field)
+ }
+
+ kernel::macros::paste!(
+ $(
+ #[doc="Sets the value of this field:"]
+ #[doc=$comment]
+ )?
+ #[inline]
+ pub(crate) fn [<set_ $field>](mut self, value: $to_type) -> Self {
+ const MASK: u32 = $name::[<$field:upper _MASK>];
+ const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
+ let value = ((value as u32) << SHIFT) & MASK;
+ self.0 = (self.0 & !MASK) | value;
+
+ self
+ }
+ );
+ };
+
+ // Creates the IO accessors for a fixed offset register.
+ (@io $name:ident @ $offset:literal) => {
+ #[allow(dead_code)]
+ impl $name {
+ #[inline]
+ pub(crate) fn read<const SIZE: usize, T>(io: &T) -> Self where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ Self(io.read32($offset))
+ }
+
+ #[inline]
+ pub(crate) fn write<const SIZE: usize, T>(self, io: &T) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ io.write32(self.0, $offset)
+ }
+
+ #[inline]
+ pub(crate) fn alter<const SIZE: usize, T, F>(
+ io: &T,
+ f: F,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::read(io));
+ reg.write(io);
+ }
+ }
+ };
+
+ // Create the IO accessors for a relative offset register.
+ (@io $name:ident @ + $offset:literal) => {
+ #[allow(dead_code)]
+ impl $name {
+ #[inline]
+ pub(crate) fn read<const SIZE: usize, T>(
+ io: &T,
+ base: usize,
+ ) -> Self where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ Self(io.read32(base + $offset))
+ }
+
+ #[inline]
+ pub(crate) fn write<const SIZE: usize, T>(
+ self,
+ io: &T,
+ base: usize,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ io.write32(self.0, base + $offset)
+ }
+
+ #[inline]
+ pub(crate) fn alter<const SIZE: usize, T, F>(
+ io: &T,
+ base: usize,
+ f: F,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::read(io, base));
+ reg.write(io, base);
+ }
+
+ #[inline]
+ pub(crate) fn try_read<const SIZE: usize, T>(
+ io: &T,
+ base: usize,
+ ) -> ::kernel::error::Result<Self> where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ io.try_read32(base + $offset).map(Self)
+ }
+
+ #[inline]
+ pub(crate) fn try_write<const SIZE: usize, T>(
+ self,
+ io: &T,
+ base: usize,
+ ) -> ::kernel::error::Result<()> where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ io.try_write32(self.0, base + $offset)
+ }
+
+ #[inline]
+ pub(crate) fn try_alter<const SIZE: usize, T, F>(
+ io: &T,
+ base: usize,
+ f: F,
+ ) -> ::kernel::error::Result<()> where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::try_read(io, base)?);
+ reg.try_write(io, base)
+ }
+ }
+ };
+}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a503252702b7..43859fc75747 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -151,6 +151,7 @@ config HID_APPLEIR
config HID_APPLETB_BL
tristate "Apple Touch Bar Backlight"
depends on BACKLIGHT_CLASS_DEVICE
+ depends on X86 || COMPILE_TEST
help
Say Y here if you want support for the backlight of Touch Bars on x86
MacBook Pros.
@@ -163,6 +164,7 @@ config HID_APPLETB_KBD
depends on USB_HID
depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
+ depends on X86 || COMPILE_TEST
select INPUT_SPARSEKMAP
select HID_APPLETB_BL
help
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index 25f0ebfcbd5f..0a9b44ce4904 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -83,6 +83,9 @@ static int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
case ALS_IDX:
privdata->dev_en.is_als_present = false;
break;
+ case SRA_IDX:
+ privdata->dev_en.is_sra_present = false;
+ break;
}
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
@@ -134,9 +137,6 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->sensor_sts[i] = SENSOR_DISABLED;
- if (cl_data->num_hid_devices == 1 && cl_data->sensor_idx[0] == SRA_IDX)
- break;
-
if (cl_data->sensor_idx[i] == SRA_IDX) {
info.sensor_idx = cl_data->sensor_idx[i];
writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0));
@@ -145,8 +145,10 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
(privdata, cl_data->sensor_idx[i], ENABLE_SENSOR);
cl_data->sensor_sts[i] = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED;
- if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+ cl_data->is_any_sensor_enabled = true;
privdata->dev_en.is_sra_present = true;
+ }
continue;
}
@@ -238,6 +240,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
cleanup:
amd_sfh_hid_client_deinit(privdata);
for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_idx[i] == SRA_IDX)
+ continue;
devm_kfree(dev, cl_data->feature_report[i]);
devm_kfree(dev, in_data->input_report[i]);
devm_kfree(dev, cl_data->report_descr[i]);
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index 2e96ec6a3073..9a06f9b0e4ef 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -38,6 +38,9 @@ dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type
struct hid_bpf_ops *e;
int ret;
+ if (unlikely(hdev->bpf.destroyed))
+ return ERR_PTR(-ENODEV);
+
if (type >= HID_REPORT_TYPES)
return ERR_PTR(-EINVAL);
@@ -93,6 +96,9 @@ int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
struct hid_bpf_ops *e;
int ret, idx;
+ if (unlikely(hdev->bpf.destroyed))
+ return -ENODEV;
+
if (rtype >= HID_REPORT_TYPES)
return -EINVAL;
@@ -130,6 +136,9 @@ int dispatch_hid_bpf_output_report(struct hid_device *hdev,
struct hid_bpf_ops *e;
int ret, idx;
+ if (unlikely(hdev->bpf.destroyed))
+ return -ENODEV;
+
idx = srcu_read_lock(&hdev->bpf.srcu);
list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
srcu_read_lock_held(&hdev->bpf.srcu)) {
diff --git a/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c b/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c
index 1a0aeea6a081..a754710fc90b 100644
--- a/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c
+++ b/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c
@@ -157,6 +157,7 @@ static const __u8 fixed_rdesc_vendor[] = {
ReportCount(5) // padding
Input(Const)
// Byte 4 in report - just exists so we get to be a tablet pad
+ UsagePage_Digitizers
Usage_Dig_BarrelSwitch // BTN_STYLUS
ReportCount(1)
ReportSize(1)
diff --git a/drivers/hid/hid-appletb-kbd.c b/drivers/hid/hid-appletb-kbd.c
index 029ccbaa1d12..ef51b2c06872 100644
--- a/drivers/hid/hid-appletb-kbd.c
+++ b/drivers/hid/hid-appletb-kbd.c
@@ -172,7 +172,8 @@ static void appletb_inactivity_timer(struct timer_list *t)
if (!kbd->has_dimmed) {
backlight_device_set_brightness(kbd->backlight_dev, 1);
kbd->has_dimmed = true;
- mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_idle_timeout * 1000));
+ mod_timer(&kbd->inactivity_timer,
+ jiffies + secs_to_jiffies(appletb_tb_idle_timeout));
} else if (!kbd->has_turned_off) {
backlight_device_set_brightness(kbd->backlight_dev, 0);
kbd->has_turned_off = true;
@@ -188,7 +189,8 @@ static void reset_inactivity_timer(struct appletb_kbd *kbd)
kbd->has_dimmed = false;
kbd->has_turned_off = false;
}
- mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000));
+ mod_timer(&kbd->inactivity_timer,
+ jiffies + secs_to_jiffies(appletb_tb_dim_timeout));
}
}
@@ -407,7 +409,8 @@ static int appletb_kbd_probe(struct hid_device *hdev, const struct hid_device_id
} else {
backlight_device_set_brightness(kbd->backlight_dev, 2);
timer_setup(&kbd->inactivity_timer, appletb_inactivity_timer, 0);
- mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000));
+ mod_timer(&kbd->inactivity_timer,
+ jiffies + secs_to_jiffies(appletb_tb_dim_timeout));
}
kbd->inp_handler.event = appletb_kbd_inp_event;
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 46e3e42f9eb5..4b45e31f0bab 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -52,6 +52,10 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define FEATURE_KBD_LED_REPORT_ID1 0x5d
#define FEATURE_KBD_LED_REPORT_ID2 0x5e
+#define ROG_ALLY_REPORT_SIZE 64
+#define ROG_ALLY_X_MIN_MCU 313
+#define ROG_ALLY_MIN_MCU 319
+
#define SUPPORT_KBD_BACKLIGHT BIT(0)
#define MAX_TOUCH_MAJOR 8
@@ -84,6 +88,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_MEDION_E1239T BIT(10)
#define QUIRK_ROG_NKEY_KEYBOARD BIT(11)
#define QUIRK_ROG_CLAYMORE_II_KEYBOARD BIT(12)
+#define QUIRK_ROG_ALLY_XPAD BIT(13)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -534,9 +539,102 @@ static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev)
return !!(value & ASUS_WMI_DSTS_PRESENCE_BIT);
}
+/*
+ * We don't care about any other part of the string except the version section.
+ * Example strings: FGA80100.RC72LA.312_T01, FGA80100.RC71LS.318_T01
+ * The bytes "5a 05 03 31 00 1a 13" and possibly more come before the version
+ * string, and there may be additional bytes after the version string such as
+ * "75 00 74 00 65 00" or a postfix such as "_T01"
+ */
+static int mcu_parse_version_string(const u8 *response, size_t response_size)
+{
+ const u8 *end = response + response_size;
+ const u8 *p = response;
+ int dots, err, version;
+ char buf[4];
+
+ dots = 0;
+ while (p < end && dots < 2) {
+ if (*p++ == '.')
+ dots++;
+ }
+
+ if (dots != 2 || p >= end || (p + 3) >= end)
+ return -EINVAL;
+
+ memcpy(buf, p, 3);
+ buf[3] = '\0';
+
+ err = kstrtoint(buf, 10, &version);
+ if (err || version < 0)
+ return -EINVAL;
+
+ return version;
+}
+
+static int mcu_request_version(struct hid_device *hdev)
+{
+ u8 *response __free(kfree) = kzalloc(ROG_ALLY_REPORT_SIZE, GFP_KERNEL);
+ const u8 request[] = { 0x5a, 0x05, 0x03, 0x31, 0x00, 0x20 };
+ int ret;
+
+ if (!response)
+ return -ENOMEM;
+
+ ret = asus_kbd_set_report(hdev, request, sizeof(request));
+ if (ret < 0)
+ return ret;
+
+ ret = hid_hw_raw_request(hdev, FEATURE_REPORT_ID, response,
+ ROG_ALLY_REPORT_SIZE, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret < 0)
+ return ret;
+
+ ret = mcu_parse_version_string(response, ROG_ALLY_REPORT_SIZE);
+ if (ret < 0) {
+ pr_err("Failed to parse MCU version: %d\n", ret);
+ print_hex_dump(KERN_ERR, "MCU: ", DUMP_PREFIX_NONE,
+ 16, 1, response, ROG_ALLY_REPORT_SIZE, false);
+ }
+
+ return ret;
+}
+
+static void validate_mcu_fw_version(struct hid_device *hdev, int idProduct)
+{
+ int min_version, version;
+
+ version = mcu_request_version(hdev);
+ if (version < 0)
+ return;
+
+ switch (idProduct) {
+ case USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY:
+ min_version = ROG_ALLY_MIN_MCU;
+ break;
+ case USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X:
+ min_version = ROG_ALLY_X_MIN_MCU;
+ break;
+ default:
+ min_version = 0;
+ }
+
+ if (version < min_version) {
+ hid_warn(hdev,
+ "The MCU firmware version must be %d or greater to avoid issues with suspend.\n",
+ min_version);
+ } else {
+ set_ally_mcu_hack(ASUS_WMI_ALLY_MCU_HACK_DISABLED);
+ set_ally_mcu_powersave(true);
+ }
+}
+
static int asus_kbd_register_leds(struct hid_device *hdev)
{
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+ struct usb_interface *intf;
+ struct usb_device *udev;
unsigned char kbd_func;
int ret;
@@ -560,6 +658,14 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
if (ret < 0)
return ret;
}
+
+ if (drvdata->quirks & QUIRK_ROG_ALLY_XPAD) {
+ intf = to_usb_interface(hdev->dev.parent);
+ udev = interface_to_usbdev(intf);
+ validate_mcu_fw_version(hdev,
+ le16_to_cpu(udev->descriptor.idProduct));
+ }
+
} else {
/* Initialize keyboard */
ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID);
@@ -1280,10 +1386,10 @@ static const struct hid_device_id asus_devices[] = {
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY),
- QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD | QUIRK_ROG_ALLY_XPAD},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X),
- QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD | QUIRK_ROG_ALLY_XPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD),
QUIRK_ROG_CLAYMORE_II_KEYBOARD },
@@ -1327,4 +1433,5 @@ static struct hid_driver asus_driver = {
};
module_hid_driver(asus_driver);
+MODULE_IMPORT_NS("ASUS_WMI");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 4741ff626771..b348d0464314 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2396,6 +2396,9 @@ int hid_hw_open(struct hid_device *hdev)
ret = hdev->ll_driver->open(hdev);
if (ret)
hdev->ll_open_count--;
+
+ if (hdev->driver->on_hid_hw_open)
+ hdev->driver->on_hid_hw_open(hdev);
}
mutex_unlock(&hdev->ll_open_lock);
@@ -2415,8 +2418,12 @@ EXPORT_SYMBOL_GPL(hid_hw_open);
void hid_hw_close(struct hid_device *hdev)
{
mutex_lock(&hdev->ll_open_lock);
- if (!--hdev->ll_open_count)
+ if (!--hdev->ll_open_count) {
hdev->ll_driver->close(hdev);
+
+ if (hdev->driver->on_hid_hw_close)
+ hdev->driver->on_hid_hw_close(hdev);
+ }
mutex_unlock(&hdev->ll_open_lock);
}
EXPORT_SYMBOL_GPL(hid_hw_close);
diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
index afbd67aa9719..fee134a7eba3 100644
--- a/drivers/hid/hid-corsair-void.c
+++ b/drivers/hid/hid-corsair-void.c
@@ -507,7 +507,7 @@ static void corsair_void_status_work_handler(struct work_struct *work)
struct delayed_work *delayed_work;
int battery_ret;
- delayed_work = container_of(work, struct delayed_work, work);
+ delayed_work = to_delayed_work(work);
drvdata = container_of(delayed_work, struct corsair_void_drvdata,
delayed_status_work);
@@ -525,7 +525,7 @@ static void corsair_void_firmware_work_handler(struct work_struct *work)
struct delayed_work *delayed_work;
int firmware_ret;
- delayed_work = container_of(work, struct delayed_work, work);
+ delayed_work = to_delayed_work(work);
drvdata = container_of(delayed_work, struct corsair_void_drvdata,
delayed_firmware_work);
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index f4c8d981aa0a..234fa82eab07 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -17,11 +17,13 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/hid.h>
#include <linux/hidraw.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/nls.h>
#include <linux/string_choices.h>
#include <linux/usb/ch9.h>
@@ -185,7 +187,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -194,7 +196,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
if (ret >= 0)
ret = -EIO;
- goto exit;
+ return ret;
}
buf[1] &= ~BIT(offset);
@@ -207,25 +209,19 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
hid_err(hdev, "error setting GPIO config: %d\n", ret);
if (ret >= 0)
ret = -EIO;
- goto exit;
+ return ret;
}
- ret = 0;
-
-exit:
- mutex_unlock(&dev->lock);
- return ret;
+ return 0;
}
-static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int cp2112_gpio_set_unlocked(struct cp2112_device *dev,
+ unsigned int offset, int value)
{
- struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
-
buf[0] = CP2112_GPIO_SET;
buf[1] = value ? CP2112_GPIO_ALL_GPIO_MASK : 0;
buf[2] = BIT(offset);
@@ -236,7 +232,17 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (ret < 0)
hid_err(hdev, "error setting GPIO values: %d\n", ret);
- mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static int cp2112_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+
+ guard(mutex)(&dev->lock);
+
+ return cp2112_gpio_set_unlocked(dev, offset, value);
}
static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -246,23 +252,17 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_GET_LENGTH) {
hid_err(hdev, "error requesting GPIO values: %d\n", ret);
- ret = ret < 0 ? ret : -EIO;
- goto exit;
+ return ret < 0 ? ret : -EIO;
}
- ret = buf[1];
-
-exit:
- mutex_unlock(&dev->lock);
-
- return ret;
+ return buf[1];
}
static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -284,14 +284,14 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
- goto fail;
+ return ret < 0 ? ret : -EIO;
}
buf[1] |= 1 << offset;
@@ -302,22 +302,16 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
- goto fail;
+ return ret;
}
- mutex_unlock(&dev->lock);
-
/*
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
*/
- cp2112_gpio_set(chip, offset, value);
+ cp2112_gpio_set_unlocked(dev, offset, value);
return 0;
-
-fail:
- mutex_unlock(&dev->lock);
- return ret < 0 ? ret : -EIO;
}
static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
@@ -1205,7 +1199,11 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!dev->in_out_buffer)
return -ENOMEM;
- mutex_init(&dev->lock);
+ ret = devm_mutex_init(&hdev->dev, &dev->lock);
+ if (ret) {
+ hid_err(hdev, "mutex init failed\n");
+ return ret;
+ }
ret = hid_parse(hdev);
if (ret) {
@@ -1290,7 +1288,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.label = "cp2112_gpio";
dev->gc.direction_input = cp2112_gpio_direction_input;
dev->gc.direction_output = cp2112_gpio_direction_output;
- dev->gc.set = cp2112_gpio_set;
+ dev->gc.set_rv = cp2112_gpio_set;
dev->gc.get = cp2112_gpio_get;
dev->gc.base = -1;
dev->gc.ngpio = CP2112_GPIO_MAX_GPIO;
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 0fb210e40a41..9eafff0b6ea4 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -192,7 +192,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
goto cleanup;
input_device->report_desc_size = le16_to_cpu(
- desc->desc[0].wDescriptorLength);
+ desc->rpt_desc.wDescriptorLength);
if (input_device->report_desc_size == 0) {
input_device->dev_info_status = -EINVAL;
goto cleanup;
@@ -210,7 +210,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
- le16_to_cpu(desc->desc[0].wDescriptorLength));
+ le16_to_cpu(desc->rpt_desc.wDescriptorLength));
/* Send the ack */
memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 288a2b864cc4..e3fb4e2fe911 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -41,6 +41,10 @@
#define USB_VENDOR_ID_ACTIONSTAR 0x2101
#define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011
+#define USB_VENDOR_ID_ADATA_XPG 0x125f
+#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE 0x7505
+#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE 0x7506
+
#define USB_VENDOR_ID_ADS_TECH 0x06e1
#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155
@@ -92,6 +96,7 @@
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
#define USB_DEVICE_ID_APPLE_MAGICMOUSE2 0x0269
+#define USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC 0x0323
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 0x0265
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC 0x0324
diff --git a/drivers/hid/hid-kysona.c b/drivers/hid/hid-kysona.c
index d4c0406b3323..09bfe30d02cb 100644
--- a/drivers/hid/hid-kysona.c
+++ b/drivers/hid/hid-kysona.c
@@ -14,6 +14,7 @@
#define BATTERY_TIMEOUT_MS 5000
+#define ONLINE_REPORT_ID 3
#define BATTERY_REPORT_ID 4
struct kysona_drvdata {
@@ -80,11 +81,46 @@ static int kysona_battery_get_property(struct power_supply *psy,
return ret;
}
+static const char kysona_online_request[] = {
+ 0x08, ONLINE_REPORT_ID, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a
+};
+
static const char kysona_battery_request[] = {
0x08, BATTERY_REPORT_ID, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49
};
+static int kysona_m600_fetch_online(struct hid_device *hdev)
+{
+ u8 *write_buf;
+ int ret;
+
+ /* Request online information */
+ write_buf = kmemdup(kysona_online_request, sizeof(kysona_online_request), GFP_KERNEL);
+ if (!write_buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, kysona_online_request[0],
+ write_buf, sizeof(kysona_online_request),
+ HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ if (ret < (int)sizeof(kysona_online_request)) {
+ hid_err(hdev, "hid_hw_raw_request() failed with %d\n", ret);
+ ret = -ENODATA;
+ }
+ kfree(write_buf);
+ return ret;
+}
+
+static void kysona_fetch_online(struct hid_device *hdev)
+{
+ int ret = kysona_m600_fetch_online(hdev);
+
+ if (ret < 0)
+ hid_dbg(hdev,
+ "Online query failed (err: %d)\n", ret);
+}
+
static int kysona_m600_fetch_battery(struct hid_device *hdev)
{
u8 *write_buf;
@@ -121,6 +157,7 @@ static void kysona_battery_timer_tick(struct work_struct *work)
struct kysona_drvdata, battery_work.work);
struct hid_device *hdev = drv_data->hdev;
+ kysona_fetch_online(hdev);
kysona_fetch_battery(hdev);
schedule_delayed_work(&drv_data->battery_work,
msecs_to_jiffies(BATTERY_TIMEOUT_MS));
@@ -160,6 +197,7 @@ static int kysona_battery_probe(struct hid_device *hdev)
power_supply_powers(drv_data->battery, &hdev->dev);
INIT_DELAYED_WORK(&drv_data->battery_work, kysona_battery_timer_tick);
+ kysona_fetch_online(hdev);
kysona_fetch_battery(hdev);
schedule_delayed_work(&drv_data->battery_work,
msecs_to_jiffies(BATTERY_TIMEOUT_MS));
@@ -206,12 +244,16 @@ static int kysona_raw_event(struct hid_device *hdev,
{
struct kysona_drvdata *drv_data = hid_get_drvdata(hdev);
- if (drv_data->battery && size == sizeof(kysona_battery_request) &&
+ if (size == sizeof(kysona_online_request) &&
+ data[0] == 8 && data[1] == ONLINE_REPORT_ID) {
+ drv_data->online = data[6];
+ }
+
+ if (size == sizeof(kysona_battery_request) &&
data[0] == 8 && data[1] == BATTERY_REPORT_ID) {
drv_data->battery_capacity = data[6];
drv_data->battery_charging = data[7];
drv_data->battery_voltage = (data[8] << 8) | data[9];
- drv_data->online = true;
}
return 0;
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index c0a138f21ca4..445623dd1bd6 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -823,7 +823,7 @@ static ssize_t lg4ff_alternate_modes_show(struct device *dev, struct device_attr
for (i = 0; i < LG4FF_MODE_MAX_IDX; i++) {
if (entry->wdata.alternate_modes & BIT(i)) {
/* Print tag and full name */
- count += scnprintf(buf + count, PAGE_SIZE - count, "%s: %s",
+ count += sysfs_emit_at(buf, count, "%s: %s",
lg4ff_alternate_modes[i].tag,
!lg4ff_alternate_modes[i].product_id ? entry->wdata.real_name : lg4ff_alternate_modes[i].name);
if (count >= PAGE_SIZE - 1)
@@ -832,9 +832,9 @@ static ssize_t lg4ff_alternate_modes_show(struct device *dev, struct device_attr
/* Mark the currently active mode with an asterisk */
if (lg4ff_alternate_modes[i].product_id == entry->wdata.product_id ||
(lg4ff_alternate_modes[i].product_id == 0 && entry->wdata.product_id == entry->wdata.real_product_id))
- count += scnprintf(buf + count, PAGE_SIZE - count, " *\n");
+ count += sysfs_emit_at(buf, count, " *\n");
else
- count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
+ count += sysfs_emit_at(buf, count, "\n");
if (count >= PAGE_SIZE - 1)
return count;
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index adfa329e917b..d4d91e49bbe8 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -218,7 +218,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
int pressure = 0;
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
id = (tdata[6] << 2 | tdata[5] >> 6) & 0xf;
x = (tdata[1] << 28 | tdata[0] << 20) >> 20;
y = -((tdata[2] << 24 | tdata[1] << 16) >> 20);
@@ -370,7 +371,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
if (report_undeciphered) {
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC)
input_event(input, EV_MSC, MSC_RAW, tdata[7]);
else if (input->id.product !=
USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
@@ -497,7 +499,8 @@ static int magicmouse_raw_event(struct hid_device *hdev,
}
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
@@ -519,7 +522,8 @@ static int magicmouse_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct magicmouse_sc *msc = hid_get_drvdata(hdev);
- if (msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
+ if ((msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) &&
field->report->id == MOUSE2_REPORT_ID) {
/*
* magic_mouse_raw_event has done all the work. Skip hidinput.
@@ -540,7 +544,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(EV_KEY, input->evbit);
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
__set_bit(BTN_LEFT, input->keybit);
__set_bit(BTN_RIGHT, input->keybit);
if (emulate_3button)
@@ -625,7 +630,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
* inverse of the reported Y.
*/
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_MT_POSITION_X,
MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
@@ -741,19 +747,25 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev)
int ret;
int feature_size;
- if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
- hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
- if (hdev->vendor == BT_VENDOR_ID_APPLE) {
+ switch (hdev->product) {
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2:
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC:
+ switch (hdev->vendor) {
+ case BT_VENDOR_ID_APPLE:
feature_size = sizeof(feature_mt_trackpad2_bt);
feature = feature_mt_trackpad2_bt;
- } else { /* USB_VENDOR_ID_APPLE */
+ break;
+ default: /* USB_VENDOR_ID_APPLE */
feature_size = sizeof(feature_mt_trackpad2_usb);
feature = feature_mt_trackpad2_usb;
}
- } else if (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ break;
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2:
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC:
feature_size = sizeof(feature_mt_mouse2);
feature = feature_mt_mouse2;
- } else {
+ break;
+ default:
feature_size = sizeof(feature_mt);
feature = feature_mt;
}
@@ -787,6 +799,7 @@ static int magicmouse_fetch_battery(struct hid_device *hdev)
if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE ||
(hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
+ hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC &&
hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC))
return -1;
@@ -857,6 +870,7 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->vendor == USB_VENDOR_ID_APPLE &&
(id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC ||
((id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
hdev->type != HID_TYPE_USBMOUSE)))
@@ -868,21 +882,27 @@ static int magicmouse_probe(struct hid_device *hdev,
goto err_stop_hw;
}
- if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
- report = hid_register_report(hdev, HID_INPUT_REPORT,
- MOUSE_REPORT_ID, 0);
- else if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
- report = hid_register_report(hdev, HID_INPUT_REPORT,
- MOUSE2_REPORT_ID, 0);
- else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
- id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
- if (id->vendor == BT_VENDOR_ID_APPLE)
+ switch (id->product) {
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE:
+ report = hid_register_report(hdev, HID_INPUT_REPORT, MOUSE_REPORT_ID, 0);
+ break;
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2:
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC:
+ report = hid_register_report(hdev, HID_INPUT_REPORT, MOUSE2_REPORT_ID, 0);
+ break;
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2:
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC:
+ switch (id->vendor) {
+ case BT_VENDOR_ID_APPLE:
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD2_BT_REPORT_ID, 0);
- else /* USB_VENDOR_ID_APPLE */
+ break;
+ default:
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD2_USB_REPORT_ID, 0);
- } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ }
+ break;
+ default: /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD_REPORT_ID, 0);
report = hid_register_report(hdev, HID_INPUT_REPORT,
@@ -909,7 +929,8 @@ static int magicmouse_probe(struct hid_device *hdev,
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
- if (ret == -EIO && id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ if (ret == -EIO && (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC)) {
schedule_delayed_work(&msc->work, msecs_to_jiffies(500));
}
@@ -945,6 +966,7 @@ static const __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
if (hdev->vendor == USB_VENDOR_ID_APPLE &&
(hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC ||
hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
*rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) {
@@ -971,6 +993,10 @@ static const struct hid_device_id magic_mice[] = {
USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC), .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
diff --git a/drivers/hid/hid-mcp2200.c b/drivers/hid/hid-mcp2200.c
index bf57f7f6caa0..e6ea0a2140eb 100644
--- a/drivers/hid/hid-mcp2200.c
+++ b/drivers/hid/hid-mcp2200.c
@@ -127,8 +127,8 @@ static int mcp_cmd_read_all(struct mcp2200 *mcp)
return mcp->status;
}
-static void mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
struct mcp2200 *mcp = gpiochip_get_data(gc);
u8 value;
@@ -150,16 +150,20 @@ static void mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
if (status == sizeof(struct mcp_set_clear_outputs))
mcp->gpio_val = value;
+ else
+ status = -EIO;
mutex_unlock(&mcp->lock);
+
+ return status;
}
-static void mcp_set(struct gpio_chip *gc, unsigned int gpio_nr, int value)
+static int mcp_set(struct gpio_chip *gc, unsigned int gpio_nr, int value)
{
unsigned long mask = 1 << gpio_nr;
unsigned long bmap_value = value << gpio_nr;
- mcp_set_multiple(gc, &mask, &bmap_value);
+ return mcp_set_multiple(gc, &mask, &bmap_value);
}
static int mcp_get_multiple(struct gpio_chip *gc, unsigned long *mask,
@@ -263,9 +267,10 @@ static int mcp_direction_output(struct gpio_chip *gc, unsigned int gpio_nr,
bmap_value = value << gpio_nr;
ret = mcp_set_direction(gc, gpio_nr, MCP2200_DIR_OUT);
- if (!ret)
- mcp_set_multiple(gc, &mask, &bmap_value);
- return ret;
+ if (ret)
+ return ret;
+
+ return mcp_set_multiple(gc, &mask, &bmap_value);
}
static const struct gpio_chip template_chip = {
@@ -274,8 +279,8 @@ static const struct gpio_chip template_chip = {
.get_direction = mcp_get_direction,
.direction_input = mcp_direction_input,
.direction_output = mcp_direction_output,
- .set = mcp_set,
- .set_multiple = mcp_set_multiple,
+ .set_rv = mcp_set,
+ .set_multiple_rv = mcp_set_multiple,
.get = mcp_get,
.get_multiple = mcp_get_multiple,
.base = -1,
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index 0f93c22a479f..6c0ac14f11a6 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -624,10 +624,10 @@ static int mcp_gpio_get(struct gpio_chip *gc,
return ret;
}
-static void mcp_gpio_set(struct gpio_chip *gc,
- unsigned int offset, int value)
+static int mcp_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct mcp2221 *mcp = gpiochip_get_data(gc);
+ int ret;
memset(mcp->txbuf, 0, 18);
mcp->txbuf[0] = MCP2221_GPIO_SET;
@@ -638,8 +638,10 @@ static void mcp_gpio_set(struct gpio_chip *gc,
mcp->txbuf[mcp->gp_idx] = !!value;
mutex_lock(&mcp->lock);
- mcp_send_data_req_status(mcp, mcp->txbuf, 18);
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 18);
mutex_unlock(&mcp->lock);
+
+ return ret;
}
static int mcp_gpio_dir_set(struct mcp2221 *mcp,
@@ -1206,7 +1208,7 @@ static int mcp2221_probe(struct hid_device *hdev,
mcp->gc->direction_input = mcp_gpio_direction_input;
mcp->gc->direction_output = mcp_gpio_direction_output;
mcp->gc->get_direction = mcp_gpio_get_direction;
- mcp->gc->set = mcp_gpio_set;
+ mcp->gc->set_rv = mcp_gpio_set;
mcp->gc->get = mcp_gpio_get;
mcp->gc->ngpio = MCP_NGPIO;
mcp->gc->base = -1;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 7ac8e16e6158..ded0fef7d8c7 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1887,6 +1887,16 @@ static void mt_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
+static void mt_on_hid_hw_open(struct hid_device *hdev)
+{
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL);
+}
+
+static void mt_on_hid_hw_close(struct hid_device *hdev)
+{
+ mt_set_modes(hdev, HID_LATENCY_HIGH, TOUCHPAD_REPORT_NONE);
+}
+
/*
* This list contains only:
* - VID/PID of products not working with the default multitouch handling
@@ -2354,5 +2364,7 @@ static struct hid_driver mt_driver = {
.suspend = pm_ptr(mt_suspend),
.reset_resume = pm_ptr(mt_reset_resume),
.resume = pm_ptr(mt_resume),
+ .on_hid_hw_open = mt_on_hid_hw_open,
+ .on_hid_hw_close = mt_on_hid_hw_close,
};
module_hid_driver(mt_driver);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 646171598e41..7fefeb413ec3 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -27,6 +27,8 @@
static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS },
@@ -1061,7 +1063,7 @@ bool hid_ignore(struct hid_device *hdev)
}
if (hdev->type == HID_TYPE_USBMOUSE &&
- hid_match_id(hdev, hid_mouse_ignore_list))
+ hdev->quirks & HID_QUIRK_IGNORE_MOUSE)
return true;
return !!hid_match_id(hdev, hid_ignore_list);
@@ -1265,6 +1267,9 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
if (hid_match_id(hdev, hid_ignore_list))
quirks |= HID_QUIRK_IGNORE;
+ if (hid_match_id(hdev, hid_mouse_ignore_list))
+ quirks |= HID_QUIRK_IGNORE_MOUSE;
+
if (hid_match_id(hdev, hid_have_special_driver))
quirks |= HID_QUIRK_HAVE_SPECIAL_DRIVER;
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index dfd9d22ed559..949d307c66a8 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -1150,11 +1150,9 @@ static void steam_client_ll_close(struct hid_device *hdev)
struct steam_device *steam = hdev->driver_data;
unsigned long flags;
- bool connected;
spin_lock_irqsave(&steam->lock, flags);
steam->client_opened--;
- connected = steam->connected && !steam->client_opened;
spin_unlock_irqrestore(&steam->lock, flags);
schedule_work(&steam->unregister_work);
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index 3b81468a1df2..0bf70664c35e 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -174,6 +174,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
u8 ep_addr[2] = {b_ep, 0};
if (!usb_check_int_endpoints(usbif, ep_addr)) {
+ kfree(send_buf);
hid_err(hdev, "Unexpected non-int endpoint\n");
return;
}
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index a367df6ea01f..61a4019ddc74 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -142,11 +142,12 @@ static int uclogic_input_configured(struct hid_device *hdev,
suffix = "System Control";
break;
}
- }
-
- if (suffix)
+ } else {
hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
"%s %s", hdev->name, suffix);
+ if (!hi->input->name)
+ return -ENOMEM;
+ }
return 0;
}
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
index fa51155ebe39..8a8c4a46f927 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -82,15 +82,10 @@ static int quicki2c_acpi_get_dsd_property(struct acpi_device *adev, acpi_string
{
acpi_handle handle = acpi_device_handle(adev);
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object obj = { .type = type };
- struct acpi_object_list arg_list = {
- .count = 1,
- .pointer = &obj,
- };
union acpi_object *ret_obj;
acpi_status status;
- status = acpi_evaluate_object(handle, dsd_method_name, &arg_list, &buffer);
+ status = acpi_evaluate_object(handle, dsd_method_name, NULL, &buffer);
if (ACPI_FAILURE(status)) {
acpi_handle_err(handle,
"Can't evaluate %s method: %d\n", dsd_method_name, status);
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
index 4fc78b5a04b5..c105df7f6c87 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
@@ -1121,7 +1121,7 @@ EXPORT_SYMBOL_NS_GPL(thc_port_select, "INTEL_THC");
static u8 thc_get_spi_freq_div_val(struct thc_device *dev, u32 spi_freq_val)
{
- int frequency[] = {
+ static const int frequency[] = {
THC_SPI_FREQUENCY_7M,
THC_SPI_FREQUENCY_15M,
THC_SPI_FREQUENCY_17M,
@@ -1130,7 +1130,7 @@ static u8 thc_get_spi_freq_div_val(struct thc_device *dev, u32 spi_freq_val)
THC_SPI_FREQUENCY_31M,
THC_SPI_FREQUENCY_41M,
};
- u8 frequency_div[] = {
+ static const u8 frequency_div[] = {
THC_SPI_FRQ_DIV_2,
THC_SPI_FRQ_DIV_1,
THC_SPI_FRQ_DIV_7,
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 7d9297fad90e..d4cbecc668ec 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -984,12 +984,11 @@ static int usbhid_parse(struct hid_device *hid)
struct usb_host_interface *interface = intf->cur_altsetting;
struct usb_device *dev = interface_to_usbdev (intf);
struct hid_descriptor *hdesc;
+ struct hid_class_descriptor *hcdesc;
u32 quirks = 0;
unsigned int rsize = 0;
char *rdesc;
- int ret, n;
- int num_descriptors;
- size_t offset = offsetof(struct hid_descriptor, desc);
+ int ret;
quirks = hid_lookup_quirk(hid);
@@ -1011,20 +1010,19 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV;
}
- if (hdesc->bLength < sizeof(struct hid_descriptor)) {
- dbg_hid("hid descriptor is too short\n");
+ if (!hdesc->bNumDescriptors ||
+ hdesc->bLength != sizeof(*hdesc) +
+ (hdesc->bNumDescriptors - 1) * sizeof(*hcdesc)) {
+ dbg_hid("hid descriptor invalid, bLen=%hhu bNum=%hhu\n",
+ hdesc->bLength, hdesc->bNumDescriptors);
return -EINVAL;
}
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- num_descriptors = min_t(int, hdesc->bNumDescriptors,
- (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
-
- for (n = 0; n < num_descriptors; n++)
- if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
- rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
+ if (hdesc->rpt_desc.bDescriptorType == HID_DT_REPORT)
+ rsize = le16_to_cpu(hdesc->rpt_desc.wDescriptorLength);
if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
dbg_hid("weird size of report descriptor (%u)\n", rsize);
@@ -1052,6 +1050,11 @@ static int usbhid_parse(struct hid_device *hid)
goto err;
}
+ if (hdesc->bNumDescriptors > 1)
+ hid_warn(intf,
+ "%u unsupported optional hid class descriptors\n",
+ (int)(hdesc->bNumDescriptors - 1));
+
hid->quirks |= quirks;
return 0;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 1556d4287fa5..eaf099b2efdb 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -70,10 +70,16 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
{
while (!kfifo_is_empty(fifo)) {
int size = kfifo_peek_len(fifo);
- u8 *buf = kzalloc(size, GFP_KERNEL);
+ u8 *buf;
unsigned int count;
int err;
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf) {
+ kfifo_skip(fifo);
+ continue;
+ }
+
count = kfifo_out(fifo, buf, size);
if (count != size) {
// Hard to say what is the "right" action in this
@@ -81,6 +87,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
// to flush seems reasonable enough, however.
hid_warn(hdev, "%s: removed fifo entry with unexpected size\n",
__func__);
+ kfree(buf);
continue;
}
err = hid_report_raw_event(hdev, HID_INPUT_REPORT, buf, size, false);
@@ -2361,6 +2368,8 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
unsigned int connect_mask = HID_CONNECT_HIDRAW;
features->pktlen = wacom_compute_pktlen(hdev);
+ if (!features->pktlen)
+ return -ENODEV;
if (!devres_open_group(&hdev->dev, wacom, GFP_KERNEL))
return -ENOMEM;
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 6c1416167bd2..1cd188b73b74 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -5,17 +5,18 @@ menu "Microsoft Hyper-V guest support"
config HYPERV
tristate "Microsoft Hyper-V client drivers"
depends on (X86 && X86_LOCAL_APIC && HYPERVISOR_GUEST) \
- || (ACPI && ARM64 && !CPU_BIG_ENDIAN)
+ || (ARM64 && !CPU_BIG_ENDIAN)
select PARAVIRT
select X86_HV_CALLBACK_VECTOR if X86
select OF_EARLY_FLATTREE if OF
+ select SYSFB if !HYPERV_VTL_MODE
help
Select this option to run Linux as a Hyper-V client operating
system.
config HYPERV_VTL_MODE
bool "Enable Linux to boot in VTL context"
- depends on X86_64 && HYPERV
+ depends on (X86_64 || ARM64) && HYPERV
depends on SMP
default n
help
@@ -31,7 +32,7 @@ config HYPERV_VTL_MODE
Select this option to build a Linux kernel to run at a VTL other than
the normal VTL0, which currently is only VTL2. This option
- initializes the x86 platform for VTL2, and adds the ability to boot
+ initializes the kernel to run in VTL2, and adds the ability to boot
secondary CPUs directly into 64-bit context as required for VTLs other
than 0. A kernel built with this option must run at VTL2, and will
not run as a normal guest.
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index fb8cd8469328..35f26fa1ffe7 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -1077,68 +1077,10 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
EXPORT_SYMBOL(vmbus_sendpacket);
/*
- * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
- * packets using a GPADL Direct packet type. This interface allows you
- * to control notifying the host. This will be useful for sending
- * batched data. Also the sender can control the send flags
- * explicitly.
- */
-int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount, void *buffer, u32 bufferlen,
- u64 requestid)
-{
- int i;
- struct vmbus_channel_packet_page_buffer desc;
- u32 descsize;
- u32 packetlen;
- u32 packetlen_aligned;
- struct kvec bufferlist[3];
- u64 aligned_data = 0;
-
- if (pagecount > MAX_PAGE_BUFFER_COUNT)
- return -EINVAL;
-
- /*
- * Adjust the size down since vmbus_channel_packet_page_buffer is the
- * largest size we support
- */
- descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
- ((MAX_PAGE_BUFFER_COUNT - pagecount) *
- sizeof(struct hv_page_buffer));
- packetlen = descsize + bufferlen;
- packetlen_aligned = ALIGN(packetlen, sizeof(u64));
-
- /* Setup the descriptor */
- desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
- desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
- desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
- desc.length8 = (u16)(packetlen_aligned >> 3);
- desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
- desc.reserved = 0;
- desc.rangecount = pagecount;
-
- for (i = 0; i < pagecount; i++) {
- desc.range[i].len = pagebuffers[i].len;
- desc.range[i].offset = pagebuffers[i].offset;
- desc.range[i].pfn = pagebuffers[i].pfn;
- }
-
- bufferlist[0].iov_base = &desc;
- bufferlist[0].iov_len = descsize;
- bufferlist[1].iov_base = buffer;
- bufferlist[1].iov_len = bufferlen;
- bufferlist[2].iov_base = &aligned_data;
- bufferlist[2].iov_len = (packetlen_aligned - packetlen);
-
- return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
-}
-EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
-
-/*
- * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
+ * vmbus_sendpacket_mpb_desc - Send one or more multi-page buffer packets
* using a GPADL Direct packet type.
- * The buffer includes the vmbus descriptor.
+ * The desc argument must include space for the VMBus descriptor. The
+ * rangecount field must already be set.
*/
int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct vmbus_packet_mpb_array *desc,
@@ -1160,7 +1102,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
desc->length8 = (u16)(packetlen_aligned >> 3);
desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
desc->reserved = 0;
- desc->rangecount = 1;
bufferlist[0].iov_base = desc;
bufferlist[0].iov_len = desc_size;
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 8351360bba16..be490c598785 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -207,10 +207,19 @@ int vmbus_connect(void)
mutex_init(&vmbus_connection.channel_mutex);
/*
+ * The following Hyper-V interrupt and monitor pages can be used by
+ * UIO for mapping to user-space, so they should always be allocated on
+ * system page boundaries. The system page size must be >= the Hyper-V
+ * page size.
+ */
+ BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
+
+ /*
* Setup the vmbus event connection for channel interrupt
* abstraction stuff
*/
- vmbus_connection.int_page = hv_alloc_hyperv_zeroed_page();
+ vmbus_connection.int_page =
+ (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (vmbus_connection.int_page == NULL) {
ret = -ENOMEM;
goto cleanup;
@@ -225,8 +234,8 @@ int vmbus_connect(void)
* Setup the monitor notification facility. The 1st page for
* parent->child and the 2nd page for child->parent
*/
- vmbus_connection.monitor_pages[0] = hv_alloc_hyperv_page();
- vmbus_connection.monitor_pages[1] = hv_alloc_hyperv_page();
+ vmbus_connection.monitor_pages[0] = (void *)__get_free_page(GFP_KERNEL);
+ vmbus_connection.monitor_pages[1] = (void *)__get_free_page(GFP_KERNEL);
if ((vmbus_connection.monitor_pages[0] == NULL) ||
(vmbus_connection.monitor_pages[1] == NULL)) {
ret = -ENOMEM;
@@ -342,21 +351,23 @@ void vmbus_disconnect(void)
destroy_workqueue(vmbus_connection.work_queue);
if (vmbus_connection.int_page) {
- hv_free_hyperv_page(vmbus_connection.int_page);
+ free_page((unsigned long)vmbus_connection.int_page);
vmbus_connection.int_page = NULL;
}
if (vmbus_connection.monitor_pages[0]) {
if (!set_memory_encrypted(
(unsigned long)vmbus_connection.monitor_pages[0], 1))
- hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
+ free_page((unsigned long)
+ vmbus_connection.monitor_pages[0]);
vmbus_connection.monitor_pages[0] = NULL;
}
if (vmbus_connection.monitor_pages[1]) {
if (!set_memory_encrypted(
(unsigned long)vmbus_connection.monitor_pages[1], 1))
- hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
+ free_page((unsigned long)
+ vmbus_connection.monitor_pages[1]);
vmbus_connection.monitor_pages[1] = NULL;
}
}
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index b3b11be11650..49898d10faff 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -105,45 +105,6 @@ void __init hv_common_free(void)
hv_synic_eventring_tail = NULL;
}
-/*
- * Functions for allocating and freeing memory with size and
- * alignment HV_HYP_PAGE_SIZE. These functions are needed because
- * the guest page size may not be the same as the Hyper-V page
- * size. We depend upon kmalloc() aligning power-of-two size
- * allocations to the allocation size boundary, so that the
- * allocated memory appears to Hyper-V as a page of the size
- * it expects.
- */
-
-void *hv_alloc_hyperv_page(void)
-{
- BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
-
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- return (void *)__get_free_page(GFP_KERNEL);
- else
- return kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
-}
-EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page);
-
-void *hv_alloc_hyperv_zeroed_page(void)
-{
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- else
- return kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
-}
-EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page);
-
-void hv_free_hyperv_page(void *addr)
-{
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- free_page((unsigned long)addr);
- else
- kfree(addr);
-}
-EXPORT_SYMBOL_GPL(hv_free_hyperv_page);
-
static void *hv_panic_page;
/*
@@ -272,7 +233,7 @@ static void hv_kmsg_dump_unregister(void)
atomic_notifier_chain_unregister(&panic_notifier_list,
&hyperv_panic_report_block);
- hv_free_hyperv_page(hv_panic_page);
+ kfree(hv_panic_page);
hv_panic_page = NULL;
}
@@ -280,7 +241,7 @@ static void hv_kmsg_dump_register(void)
{
int ret;
- hv_panic_page = hv_alloc_hyperv_zeroed_page();
+ hv_panic_page = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!hv_panic_page) {
pr_err("Hyper-V: panic message page memory allocation failed\n");
return;
@@ -289,7 +250,7 @@ static void hv_kmsg_dump_register(void)
ret = kmsg_dump_register(&hv_kmsg_dumper);
if (ret) {
pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
- hv_free_hyperv_page(hv_panic_page);
+ kfree(hv_panic_page);
hv_panic_page = NULL;
}
}
@@ -307,7 +268,7 @@ void __init hv_get_partition_id(void)
local_irq_save(flags);
output = *this_cpu_ptr(hyperv_pcpu_input_arg);
- status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, &output);
+ status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output);
pt_id = output->partition_id;
local_irq_restore(flags);
@@ -317,6 +278,37 @@ void __init hv_get_partition_id(void)
pr_err("Hyper-V: failed to get partition ID: %#x\n",
hv_result(status));
}
+#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE)
+u8 __init get_vtl(void)
+{
+ u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS;
+ struct hv_input_get_vp_registers *input;
+ struct hv_output_get_vp_registers *output;
+ unsigned long flags;
+ u64 ret;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, struct_size(input, names, 1));
+ input->partition_id = HV_PARTITION_ID_SELF;
+ input->vp_index = HV_VP_INDEX_SELF;
+ input->input_vtl.as_uint8 = 0;
+ input->names[0] = HV_REGISTER_VSM_VP_STATUS;
+
+ ret = hv_do_hypercall(control, input, output);
+ if (hv_result_success(ret)) {
+ ret = output->values[0].reg8 & HV_VTL_MASK;
+ } else {
+ pr_err("Failed to get VTL(error: %lld) exiting...\n", ret);
+ BUG();
+ }
+
+ local_irq_restore(flags);
+ return ret;
+}
+#endif
int __init hv_common_init(void)
{
@@ -566,9 +558,11 @@ int hv_common_cpu_die(unsigned int cpu)
* originally allocated memory is reused in hv_common_cpu_init().
*/
- synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail);
- kfree(*synic_eventring_tail);
- *synic_eventring_tail = NULL;
+ if (hv_root_partition()) {
+ synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail);
+ kfree(*synic_eventring_tail);
+ *synic_eventring_tail = NULL;
+ }
return 0;
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 29780f3a7478..0b450e53161e 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -477,4 +477,10 @@ static inline int hv_debug_add_dev_dir(struct hv_device *dev)
#endif /* CONFIG_HYPERV_TESTING */
+/* Create and remove sysfs entry for memory mapped ring buffers for a channel */
+int hv_create_ring_sysfs(struct vmbus_channel *channel,
+ int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
+ struct vm_area_struct *vma));
+int hv_remove_ring_sysfs(struct vmbus_channel *channel);
+
#endif /* _HYPERV_VMBUS_H */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8d3cff42bdbb..33b524b4eb5e 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -45,7 +45,8 @@ struct vmbus_dynid {
struct hv_vmbus_device_id id;
};
-static struct device *hv_dev;
+/* VMBus Root Device */
+static struct device *vmbus_root_device;
static int hyperv_cpuhp_online;
@@ -80,9 +81,15 @@ static struct resource *fb_mmio;
static struct resource *hyperv_mmio;
static DEFINE_MUTEX(hyperv_mmio_lock);
+struct device *hv_get_vmbus_root_device(void)
+{
+ return vmbus_root_device;
+}
+EXPORT_SYMBOL_GPL(hv_get_vmbus_root_device);
+
static int vmbus_exists(void)
{
- if (hv_dev == NULL)
+ if (vmbus_root_device == NULL)
return -ENODEV;
return 0;
@@ -707,7 +714,30 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(const struct hv_driver *
return id;
}
-/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
+/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices
+ *
+ * This function can race with vmbus_device_register(). This function is
+ * typically running on a user thread in response to writing to the "new_id"
+ * sysfs entry for a driver. vmbus_device_register() is running on a
+ * workqueue thread in response to the Hyper-V host offering a device to the
+ * guest. This function calls driver_attach(), which looks for an existing
+ * device matching the new id, and attaches the driver to which the new id
+ * has been assigned. vmbus_device_register() calls device_register(), which
+ * looks for a driver that matches the device being registered. If both
+ * operations are running simultaneously, the device driver probe function runs
+ * on whichever thread establishes the linkage between the driver and device.
+ *
+ * In most cases, it doesn't matter which thread runs the driver probe
+ * function. But if vmbus_device_register() does not find a matching driver,
+ * it proceeds to create the "channels" subdirectory and numbered per-channel
+ * subdirectory in sysfs. While that multi-step creation is in progress, this
+ * function could run the driver probe function. If the probe function checks
+ * for, or operates on, entries in the "channels" subdirectory, including by
+ * calling hv_create_ring_sysfs(), the operation may or may not succeed
+ * depending on the race. The race can't create a kernel failure in VMBus
+ * or device subsystem code, but probe functions in VMBus drivers doing such
+ * operations must be prepared for the failure case.
+ */
static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
{
struct vmbus_dynid *dynid;
@@ -861,7 +891,7 @@ static int vmbus_dma_configure(struct device *child_device)
* On x86/x64 coherence is assumed and these calls have no effect.
*/
hv_setup_dma_ops(child_device,
- device_get_dma_attr(hv_dev) == DEV_DMA_COHERENT);
+ device_get_dma_attr(vmbus_root_device) == DEV_DMA_COHERENT);
return 0;
}
@@ -1802,6 +1832,26 @@ static ssize_t subchannel_id_show(struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR_RO(subchannel_id);
+static int hv_mmap_ring_buffer_wrapper(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct vmbus_channel *channel = container_of(kobj, struct vmbus_channel, kobj);
+
+ /*
+ * hv_(create|remove)_ring_sysfs implementation ensures that mmap_ring_buffer
+ * is not NULL.
+ */
+ return channel->mmap_ring_buffer(channel, vma);
+}
+
+static struct bin_attribute chan_attr_ring_buffer = {
+ .attr = {
+ .name = "ring",
+ .mode = 0600,
+ },
+ .mmap = hv_mmap_ring_buffer_wrapper,
+};
static struct attribute *vmbus_chan_attrs[] = {
&chan_attr_out_mask.attr,
&chan_attr_in_mask.attr,
@@ -1821,6 +1871,11 @@ static struct attribute *vmbus_chan_attrs[] = {
NULL
};
+static const struct bin_attribute *vmbus_chan_bin_attrs[] = {
+ &chan_attr_ring_buffer,
+ NULL
+};
+
/*
* Channel-level attribute_group callback function. Returns the permission for
* each attribute, and returns 0 if an attribute is not visible.
@@ -1841,9 +1896,34 @@ static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
+static umode_t vmbus_chan_bin_attr_is_visible(struct kobject *kobj,
+ const struct bin_attribute *attr, int idx)
+{
+ const struct vmbus_channel *channel =
+ container_of(kobj, struct vmbus_channel, kobj);
+
+ /* Hide ring attribute if channel's ring_sysfs_visible is set to false */
+ if (attr == &chan_attr_ring_buffer && !channel->ring_sysfs_visible)
+ return 0;
+
+ return attr->attr.mode;
+}
+
+static size_t vmbus_chan_bin_size(struct kobject *kobj,
+ const struct bin_attribute *bin_attr, int a)
+{
+ const struct vmbus_channel *channel =
+ container_of(kobj, struct vmbus_channel, kobj);
+
+ return channel->ringbuffer_pagecount << PAGE_SHIFT;
+}
+
static const struct attribute_group vmbus_chan_group = {
.attrs = vmbus_chan_attrs,
- .is_visible = vmbus_chan_attr_is_visible
+ .bin_attrs = vmbus_chan_bin_attrs,
+ .is_visible = vmbus_chan_attr_is_visible,
+ .is_bin_visible = vmbus_chan_bin_attr_is_visible,
+ .bin_size = vmbus_chan_bin_size,
};
static const struct kobj_type vmbus_chan_ktype = {
@@ -1851,6 +1931,64 @@ static const struct kobj_type vmbus_chan_ktype = {
.release = vmbus_chan_release,
};
+/**
+ * hv_create_ring_sysfs() - create "ring" sysfs entry corresponding to ring buffers for a channel.
+ * @channel: Pointer to vmbus_channel structure
+ * @hv_mmap_ring_buffer: function pointer for initializing the function to be called on mmap of
+ * channel's "ring" sysfs node, which is for the ring buffer of that channel.
+ * Function pointer is of below type:
+ * int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
+ * struct vm_area_struct *vma))
+ * This has a pointer to the channel and a pointer to vm_area_struct,
+ * used for mmap, as arguments.
+ *
+ * Sysfs node for ring buffer of a channel is created along with other fields, however its
+ * visibility is disabled by default. Sysfs creation needs to be controlled when the use-case
+ * is running.
+ * For example, HV_NIC device is used either by uio_hv_generic or hv_netvsc at any given point of
+ * time, and "ring" sysfs is needed only when uio_hv_generic is bound to that device. To avoid
+ * exposing the ring buffer by default, this function is reponsible to enable visibility of
+ * ring for userspace to use.
+ * Note: Race conditions can happen with userspace and it is not encouraged to create new
+ * use-cases for this. This was added to maintain backward compatibility, while solving
+ * one of the race conditions in uio_hv_generic while creating sysfs. See comments with
+ * vmbus_add_dynid() and vmbus_device_register().
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int hv_create_ring_sysfs(struct vmbus_channel *channel,
+ int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
+ struct vm_area_struct *vma))
+{
+ struct kobject *kobj = &channel->kobj;
+
+ channel->mmap_ring_buffer = hv_mmap_ring_buffer;
+ channel->ring_sysfs_visible = true;
+
+ return sysfs_update_group(kobj, &vmbus_chan_group);
+}
+EXPORT_SYMBOL_GPL(hv_create_ring_sysfs);
+
+/**
+ * hv_remove_ring_sysfs() - remove ring sysfs entry corresponding to ring buffers for a channel.
+ * @channel: Pointer to vmbus_channel structure
+ *
+ * Hide "ring" sysfs for a channel by changing its is_visible attribute and updating sysfs group.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int hv_remove_ring_sysfs(struct vmbus_channel *channel)
+{
+ struct kobject *kobj = &channel->kobj;
+ int ret;
+
+ channel->ring_sysfs_visible = false;
+ ret = sysfs_update_group(kobj, &vmbus_chan_group);
+ channel->mmap_ring_buffer = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hv_remove_ring_sysfs);
+
/*
* vmbus_add_channel_kobj - setup a sub-directory under device/channels
*/
@@ -1930,7 +2068,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
&child_device_obj->channel->offermsg.offer.if_instance);
child_device_obj->device.bus = &hv_bus;
- child_device_obj->device.parent = hv_dev;
+ child_device_obj->device.parent = vmbus_root_device;
child_device_obj->device.release = vmbus_device_release;
child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
@@ -1948,6 +2086,20 @@ int vmbus_device_register(struct hv_device *child_device_obj)
return ret;
}
+ /*
+ * If device_register() found a driver to assign to the device, the
+ * driver's probe function has already run at this point. If that
+ * probe function accesses or operates on the "channels" subdirectory
+ * in sysfs, those operations will have failed because the "channels"
+ * subdirectory doesn't exist until the code below runs. Or if the
+ * probe function creates a /dev entry, a user space program could
+ * find and open the /dev entry, and then create a race by accessing
+ * the "channels" subdirectory while the creation steps are in progress
+ * here. The race can't result in a kernel failure, but the user space
+ * program may get an error in accessing "channels" or its
+ * subdirectories. See also comments with vmbus_add_dynid() about a
+ * related race condition.
+ */
child_device_obj->channels_kset = kset_create_and_add("channels",
NULL, kobj);
if (!child_device_obj->channels_kset) {
@@ -2305,7 +2457,7 @@ static int vmbus_acpi_add(struct platform_device *pdev)
struct acpi_device *ancestor;
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
- hv_dev = &device->dev;
+ vmbus_root_device = &device->dev;
/*
* Older versions of Hyper-V for ARM64 fail to include the _CCA
@@ -2358,6 +2510,31 @@ static int vmbus_acpi_add(struct platform_device *pdev)
}
#endif
+static int vmbus_set_irq(struct platform_device *pdev)
+{
+ struct irq_data *data;
+ int irq;
+ irq_hw_number_t hwirq;
+
+ irq = platform_get_irq(pdev, 0);
+ /* platform_get_irq() may not return 0. */
+ if (irq < 0)
+ return irq;
+
+ data = irq_get_irq_data(irq);
+ if (!data) {
+ pr_err("No interrupt data for VMBus virq %d\n", irq);
+ return -ENODEV;
+ }
+ hwirq = irqd_to_hwirq(data);
+
+ vmbus_irq = irq;
+ vmbus_interrupt = hwirq;
+ pr_debug("VMBus virq %d, hwirq %d\n", vmbus_irq, vmbus_interrupt);
+
+ return 0;
+}
+
static int vmbus_device_add(struct platform_device *pdev)
{
struct resource **cur_res = &hyperv_mmio;
@@ -2366,12 +2543,17 @@ static int vmbus_device_add(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int ret;
- hv_dev = &pdev->dev;
+ vmbus_root_device = &pdev->dev;
ret = of_range_parser_init(&parser, np);
if (ret)
return ret;
+ if (!__is_defined(HYPERVISOR_CALLBACK_VECTOR))
+ ret = vmbus_set_irq(pdev);
+ if (ret)
+ return ret;
+
for_each_of_range(&parser, &range) {
struct resource *res;
@@ -2679,7 +2861,7 @@ static int __init hv_acpi_init(void)
if (ret)
return ret;
- if (!hv_dev) {
+ if (!vmbus_root_device) {
ret = -ENODEV;
goto cleanup;
}
@@ -2710,7 +2892,7 @@ static int __init hv_acpi_init(void)
cleanup:
platform_driver_unregister(&vmbus_platform_driver);
- hv_dev = NULL;
+ vmbus_root_device = NULL;
return ret;
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f91f713b0105..1b1d64493909 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -335,6 +335,26 @@ config SENSORS_K10TEMP
This driver can also be built as a module. If so, the module
will be called k10temp.
+config SENSORS_KBATT
+ tristate "KEBA battery controller support"
+ depends on KEBA_CP500
+ help
+ This driver supports the battery monitoring controller found in
+ KEBA system FPGA devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called kbatt.
+
+config SENSORS_KFAN
+ tristate "KEBA fan controller support"
+ depends on KEBA_CP500
+ help
+ This driver supports the fan controller found in KEBA system
+ FPGA devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called kfan.
+
config SENSORS_FAM15H_POWER
tristate "AMD Family 15h processor power"
depends on X86 && PCI && CPU_SUP_AMD
@@ -1308,6 +1328,15 @@ config SENSORS_MAX31790
This driver can also be built as a module. If so, the module
will be called max31790.
+config SENSORS_MAX77705
+ tristate "MAX77705 current and voltage sensor"
+ depends on MFD_MAX77705
+ help
+ If you say yes here you get support for MAX77705 sensors connected with I2C.
+
+ This driver can also be built as a module. If so, the module
+ will be called max77705-hwmon.
+
config SENSORS_MC34VR500
tristate "NXP MC34VR500 hardware monitoring driver"
depends on I2C
@@ -1795,17 +1824,6 @@ config SENSORS_NZXT_SMART2
source "drivers/hwmon/occ/Kconfig"
-config SENSORS_OXP
- tristate "OneXPlayer EC fan control"
- depends on ACPI_EC
- depends on X86
- help
- If you say yes here you get support for fan readings and control over
- OneXPlayer handheld devices. Only OneXPlayer mini AMD handheld variant
- boards are supported.
-
- Can also be built as a module. In that case it will be called oxp-sensors.
-
config SENSORS_PCF8591
tristate "Philips PCF8591 ADC/DAC"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 766c652ef22b..48e5866c0c9a 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -110,6 +110,8 @@ obj-$(CONFIG_SENSORS_IT87) += it87.o
obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
+obj-$(CONFIG_SENSORS_KBATT) += kbatt.o
+obj-$(CONFIG_SENSORS_KFAN) += kfan.o
obj-$(CONFIG_SENSORS_LAN966X) += lan966x-hwmon.o
obj-$(CONFIG_SENSORS_LENOVO_EC) += lenovo-ec-sensors.o
obj-$(CONFIG_SENSORS_LINEAGE) += lineage-pem.o
@@ -161,6 +163,7 @@ obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_MAX31827) += max31827.o
+obj-$(CONFIG_SENSORS_MAX77705) += max77705-hwmon.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MC34VR500) += mc34vr500.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
@@ -183,7 +186,6 @@ obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_NZXT_KRAKEN2) += nzxt-kraken2.o
obj-$(CONFIG_SENSORS_NZXT_KRAKEN3) += nzxt-kraken3.o
obj-$(CONFIG_SENSORS_NZXT_SMART2) += nzxt-smart2.o
-obj-$(CONFIG_SENSORS_OXP) += oxp-sensors.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
index 312ef3e98754..d1c55e2eb479 100644
--- a/drivers/hwmon/aht10.c
+++ b/drivers/hwmon/aht10.c
@@ -94,7 +94,7 @@ struct aht10_data {
unsigned int meas_size;
};
-/**
+/*
* aht10_init() - Initialize an AHT10/AHT20 chip
* @data: the data associated with this AHT10/AHT20 chip
* Return: 0 if successful, 1 if not
@@ -124,7 +124,7 @@ static int aht10_init(struct aht10_data *data)
return 0;
}
-/**
+/*
* aht10_polltime_expired() - check if the minimum poll interval has
* expired
* @data: the data containing the time to compare
@@ -140,7 +140,7 @@ static int aht10_polltime_expired(struct aht10_data *data)
DECLARE_CRC8_TABLE(crc8_table);
-/**
+/*
* crc8_check() - check crc of the sensor's measurements
* @raw_data: data frame received from sensor(including crc as the last byte)
* @count: size of the data frame
@@ -155,7 +155,7 @@ static int crc8_check(u8 *raw_data, int count)
return crc8(crc8_table, raw_data, count, CRC8_INIT_VALUE);
}
-/**
+/*
* aht10_read_values() - read and parse the raw data from the AHT10/AHT20
* @data: the struct aht10_data to use for the lock
* Return: 0 if successful, 1 if not
@@ -214,7 +214,7 @@ static int aht10_read_values(struct aht10_data *data)
return 0;
}
-/**
+/*
* aht10_interval_write() - store the given minimum poll interval.
* Return: 0 on success, -EINVAL if a value lower than the
* AHT10_MIN_POLL_INTERVAL is given
@@ -226,7 +226,7 @@ static ssize_t aht10_interval_write(struct aht10_data *data,
return 0;
}
-/**
+/*
* aht10_interval_read() - read the minimum poll interval
* in milliseconds
*/
@@ -237,7 +237,7 @@ static ssize_t aht10_interval_read(struct aht10_data *data,
return 0;
}
-/**
+/*
* aht10_temperature1_read() - read the temperature in millidegrees
*/
static int aht10_temperature1_read(struct aht10_data *data, long *val)
@@ -252,7 +252,7 @@ static int aht10_temperature1_read(struct aht10_data *data, long *val)
return 0;
}
-/**
+/*
* aht10_humidity1_read() - read the relative humidity in millipercent
*/
static int aht10_humidity1_read(struct aht10_data *data, long *val)
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 1e3c6acd8974..13a789cc85d2 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -23,9 +23,12 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
+#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <dt-bindings/pwm/pwm.h>
+
/*
* Addresses to scan.
*/
@@ -37,7 +40,7 @@ static const unsigned short normal_i2c[] = {0x18, 0x19, 0x1a, 0x2c, 0x2d, 0x2e,
* Insmod parameters
*/
-static int pwminv; /*Inverted PWM output. */
+static int pwminv = -1; /*Inverted PWM output. */
module_param(pwminv, int, 0444);
static int init = 1; /*Power-on initialization.*/
@@ -845,9 +848,43 @@ static int amc6821_detect(struct i2c_client *client, struct i2c_board_info *info
return 0;
}
-static int amc6821_init_client(struct amc6821_data *data)
+static enum pwm_polarity amc6821_pwm_polarity(struct i2c_client *client)
+{
+ enum pwm_polarity polarity = PWM_POLARITY_NORMAL;
+ struct of_phandle_args args;
+ struct device_node *fan_np;
+
+ /*
+ * For backward compatibility, the pwminv module parameter takes
+ * always the precedence over any other device description
+ */
+ if (pwminv == 0)
+ return PWM_POLARITY_NORMAL;
+ if (pwminv > 0)
+ return PWM_POLARITY_INVERSED;
+
+ fan_np = of_get_child_by_name(client->dev.of_node, "fan");
+ if (!fan_np)
+ return PWM_POLARITY_NORMAL;
+
+ if (of_parse_phandle_with_args(fan_np, "pwms", "#pwm-cells", 0, &args))
+ goto out;
+ of_node_put(args.np);
+
+ if (args.args_count != 2)
+ goto out;
+
+ if (args.args[1] & PWM_POLARITY_INVERTED)
+ polarity = PWM_POLARITY_INVERSED;
+out:
+ of_node_put(fan_np);
+ return polarity;
+}
+
+static int amc6821_init_client(struct i2c_client *client, struct amc6821_data *data)
{
struct regmap *regmap = data->regmap;
+ u32 regval;
int err;
if (init) {
@@ -864,11 +901,14 @@ static int amc6821_init_client(struct amc6821_data *data)
if (err)
return err;
+ regval = AMC6821_CONF1_START;
+ if (amc6821_pwm_polarity(client) == PWM_POLARITY_INVERSED)
+ regval |= AMC6821_CONF1_PWMINV;
+
err = regmap_update_bits(regmap, AMC6821_REG_CONF1,
AMC6821_CONF1_THERMOVIE | AMC6821_CONF1_FANIE |
AMC6821_CONF1_START | AMC6821_CONF1_PWMINV,
- AMC6821_CONF1_START |
- (pwminv ? AMC6821_CONF1_PWMINV : 0));
+ regval);
if (err)
return err;
}
@@ -916,7 +956,7 @@ static int amc6821_probe(struct i2c_client *client)
"Failed to initialize regmap\n");
data->regmap = regmap;
- err = amc6821_init_client(data);
+ err = amc6821_init_client(client, data);
if (err)
return err;
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 006ced5ab6e6..e0a95197c71b 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -169,7 +169,11 @@ enum board_family {
family_intel_600_series
};
-/* All the known sensors for ASUS EC controllers */
+/*
+ * All the known sensors for ASUS EC controllers. These arrays have to be sorted
+ * by the full ((bank << 8) + index) register index (see asus_ec_block_read() as
+ * to why).
+ */
static const struct ec_sensor_info sensors_family_amd_400[] = {
[ec_sensor_temp_chipset] =
EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
@@ -183,10 +187,10 @@ static const struct ec_sensor_info sensors_family_amd_400[] = {
EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
[ec_sensor_in_cpu_core] =
EC_SENSOR("CPU Core", hwmon_in, 2, 0x00, 0xa2),
- [ec_sensor_fan_cpu_opt] =
- EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xbc),
[ec_sensor_fan_vrm_hs] =
EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xbc),
[ec_sensor_fan_chipset] =
/* no chipset fans in this generation */
EC_SENSOR("Chipset", hwmon_fan, 0, 0x00, 0x00),
@@ -194,10 +198,10 @@ static const struct ec_sensor_info sensors_family_amd_400[] = {
EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xb4),
[ec_sensor_curr_cpu] =
EC_SENSOR("CPU", hwmon_curr, 1, 0x00, 0xf4),
- [ec_sensor_temp_water_in] =
- EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x0d),
[ec_sensor_temp_water_out] =
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x0b),
+ [ec_sensor_temp_water_in] =
+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x0d),
};
static const struct ec_sensor_info sensors_family_amd_500[] = {
@@ -239,19 +243,20 @@ static const struct ec_sensor_info sensors_family_amd_500[] = {
static const struct ec_sensor_info sensors_family_amd_600[] = {
[ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x30),
- [ec_sensor_temp_cpu_package] = EC_SENSOR("CPU Package", hwmon_temp, 1, 0x00, 0x31),
+ [ec_sensor_temp_cpu_package] =
+ EC_SENSOR("CPU Package", hwmon_temp, 1, 0x00, 0x31),
[ec_sensor_temp_mb] =
EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x32),
[ec_sensor_temp_vrm] =
EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x33),
[ec_sensor_temp_t_sensor] =
EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x36),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
[ec_sensor_temp_water_in] =
EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
[ec_sensor_temp_water_out] =
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
- [ec_sensor_fan_cpu_opt] =
- EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
};
static const struct ec_sensor_info sensors_family_intel_300[] = {
@@ -278,6 +283,14 @@ static const struct ec_sensor_info sensors_family_intel_600[] = {
[ec_sensor_temp_t_sensor] =
EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
[ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
+ [ec_sensor_fan_water_flow] =
+ EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xbe),
+ [ec_sensor_temp_water_in] =
+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
+ [ec_sensor_temp_water_out] =
+ EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
+ [ec_sensor_temp_water_block_in] =
+ EC_SENSOR("Water_Block_In", hwmon_temp, 1, 0x01, 0x02),
};
/* Shortcuts for common combinations */
@@ -300,6 +313,15 @@ struct ec_board_info {
enum board_family family;
};
+static const struct ec_board_info board_info_maximus_vi_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_intel_300_series,
+};
+
static const struct ec_board_info board_info_prime_x470_pro = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
@@ -402,6 +424,13 @@ static const struct ec_board_info board_info_maximus_xi_hero = {
.family = family_intel_300_series,
};
+static const struct ec_board_info board_info_maximus_z690_formula = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_SET_TEMP_WATER | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_intel_600_series,
+};
+
static const struct ec_board_info board_info_crosshair_viii_impact = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
@@ -507,6 +536,8 @@ static const struct ec_board_info board_info_tuf_gaming_x670e_plus = {
}
static const struct dmi_system_id dmi_table[] = {
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("MAXIMUS VI HERO",
+ &board_info_maximus_vi_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO",
&board_info_prime_x470_pro),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO",
@@ -537,6 +568,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_maximus_xi_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)",
&board_info_maximus_xi_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS Z690 FORMULA",
+ &board_info_maximus_z690_formula),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
&board_info_crosshair_viii_impact),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING",
@@ -933,6 +966,10 @@ static int asus_ec_hwmon_read_string(struct device *dev,
{
struct ec_sensors_data *state = dev_get_drvdata(dev);
int sensor_index = find_ec_sensor_index(state, type, channel);
+
+ if (sensor_index < 0)
+ return sensor_index;
+
*str = get_sensor_info(state, sensor_index)->label;
return 0;
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 79e5606e6d2f..1e2c8e284001 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1274,6 +1274,13 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
},
},
{
+ .ident = "Dell OptiPlex 7050",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7050"),
+ },
+ },
+ {
.ident = "Dell Precision",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 9ed2c4b6734e..8ecebea53651 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -143,8 +143,8 @@ static void do_read_registers_on_cu(void *_data)
*/
cu = topology_core_id(smp_processor_id());
- rdmsrl_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]);
- rdmsrl_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]);
+ rdmsrq_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]);
+ rdmsrq_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]);
data->cu_on[cu] = 1;
}
@@ -424,7 +424,7 @@ static int fam15h_power_init_data(struct pci_dev *f4,
*/
data->cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);
- if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
+ if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
pr_err("Failed to read max compute unit power accumulator MSR\n");
return -ENODEV;
}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index b779240328d5..516c34bb61c9 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -20,6 +20,9 @@
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <linux/thermal.h>
struct gpio_fan_speed {
@@ -42,6 +45,7 @@ struct gpio_fan_data {
bool pwm_enable;
struct gpio_desc *alarm_gpio;
struct work_struct alarm_work;
+ struct regulator *supply;
};
/*
@@ -125,13 +129,32 @@ static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
}
/* Must be called with fan_data->lock held, except during initialization. */
-static void set_fan_speed(struct gpio_fan_data *fan_data, int speed_index)
+static int set_fan_speed(struct gpio_fan_data *fan_data, int speed_index)
{
if (fan_data->speed_index == speed_index)
- return;
+ return 0;
+
+ if (fan_data->speed_index == 0 && speed_index > 0) {
+ int ret;
+
+ ret = pm_runtime_resume_and_get(fan_data->dev);
+ if (ret < 0)
+ return ret;
+ }
__set_fan_ctrl(fan_data, fan_data->speed[speed_index].ctrl_val);
+
+ if (fan_data->speed_index > 0 && speed_index == 0) {
+ int ret;
+
+ ret = pm_runtime_put_sync(fan_data->dev);
+ if (ret < 0)
+ return ret;
+ }
+
fan_data->speed_index = speed_index;
+
+ return 0;
}
static int get_fan_speed_index(struct gpio_fan_data *fan_data)
@@ -176,7 +199,7 @@ static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
unsigned long pwm;
int speed_index;
- int ret = count;
+ int ret;
if (kstrtoul(buf, 10, &pwm) || pwm > 255)
return -EINVAL;
@@ -189,12 +212,12 @@ static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
}
speed_index = DIV_ROUND_UP(pwm * (fan_data->num_speed - 1), 255);
- set_fan_speed(fan_data, speed_index);
+ ret = set_fan_speed(fan_data, speed_index);
exit_unlock:
mutex_unlock(&fan_data->lock);
- return ret;
+ return ret ? ret : count;
}
static ssize_t pwm1_enable_show(struct device *dev,
@@ -211,6 +234,7 @@ static ssize_t pwm1_enable_store(struct device *dev,
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
unsigned long val;
+ int ret = 0;
if (kstrtoul(buf, 10, &val) || val > 1)
return -EINVAL;
@@ -224,11 +248,11 @@ static ssize_t pwm1_enable_store(struct device *dev,
/* Disable manual control mode: set fan at full speed. */
if (val == 0)
- set_fan_speed(fan_data, fan_data->num_speed - 1);
+ ret = set_fan_speed(fan_data, fan_data->num_speed - 1);
mutex_unlock(&fan_data->lock);
- return count;
+ return ret ? ret : count;
}
static ssize_t pwm1_mode_show(struct device *dev,
@@ -279,7 +303,7 @@ static ssize_t set_rpm(struct device *dev, struct device_attribute *attr,
goto exit_unlock;
}
- set_fan_speed(fan_data, rpm_to_speed_index(fan_data, rpm));
+ ret = set_fan_speed(fan_data, rpm_to_speed_index(fan_data, rpm));
exit_unlock:
mutex_unlock(&fan_data->lock);
@@ -386,6 +410,7 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct gpio_fan_data *fan_data = cdev->devdata;
+ int ret;
if (!fan_data)
return -EINVAL;
@@ -395,11 +420,11 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
mutex_lock(&fan_data->lock);
- set_fan_speed(fan_data, state);
+ ret = set_fan_speed(fan_data, state);
mutex_unlock(&fan_data->lock);
- return 0;
+ return ret;
}
static const struct thermal_cooling_device_ops gpio_fan_cool_ops = {
@@ -499,6 +524,8 @@ static void gpio_fan_stop(void *data)
mutex_lock(&fan_data->lock);
set_fan_speed(data, 0);
mutex_unlock(&fan_data->lock);
+
+ pm_runtime_disable(fan_data->dev);
}
static int gpio_fan_probe(struct platform_device *pdev)
@@ -521,6 +548,11 @@ static int gpio_fan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fan_data);
mutex_init(&fan_data->lock);
+ fan_data->supply = devm_regulator_get(dev, "fan");
+ if (IS_ERR(fan_data->supply))
+ return dev_err_probe(dev, PTR_ERR(fan_data->supply),
+ "Failed to get fan-supply");
+
/* Configure control GPIOs if available. */
if (fan_data->gpios && fan_data->num_gpios > 0) {
if (!fan_data->speed || fan_data->num_speed <= 1)
@@ -548,6 +580,17 @@ static int gpio_fan_probe(struct platform_device *pdev)
return err;
}
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ /* If current GPIO state is active, mark RPM as active as well */
+ if (fan_data->speed_index > 0) {
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+ }
+
/* Optional cooling device register for Device tree platforms */
fan_data->cdev = devm_thermal_of_cooling_device_register(dev, np,
"gpio-fan", fan_data, &gpio_fan_cool_ops);
@@ -565,41 +608,69 @@ static void gpio_fan_shutdown(struct platform_device *pdev)
set_fan_speed(fan_data, 0);
}
+static int gpio_fan_runtime_suspend(struct device *dev)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (fan_data->supply)
+ ret = regulator_disable(fan_data->supply);
+
+ return ret;
+}
+
+static int gpio_fan_runtime_resume(struct device *dev)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (fan_data->supply)
+ ret = regulator_enable(fan_data->supply);
+
+ return ret;
+}
+
static int gpio_fan_suspend(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
if (fan_data->gpios) {
fan_data->resume_speed = fan_data->speed_index;
mutex_lock(&fan_data->lock);
- set_fan_speed(fan_data, 0);
+ ret = set_fan_speed(fan_data, 0);
mutex_unlock(&fan_data->lock);
}
- return 0;
+ return ret;
}
static int gpio_fan_resume(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
if (fan_data->gpios) {
mutex_lock(&fan_data->lock);
- set_fan_speed(fan_data, fan_data->resume_speed);
+ ret = set_fan_speed(fan_data, fan_data->resume_speed);
mutex_unlock(&fan_data->lock);
}
- return 0;
+ return ret;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
+static const struct dev_pm_ops gpio_fan_pm = {
+ RUNTIME_PM_OPS(gpio_fan_runtime_suspend,
+ gpio_fan_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(gpio_fan_suspend, gpio_fan_resume)
+};
static struct platform_driver gpio_fan_driver = {
.probe = gpio_fan_probe,
.shutdown = gpio_fan_shutdown,
.driver = {
.name = "gpio-fan",
- .pm = pm_sleep_ptr(&gpio_fan_pm),
+ .pm = pm_ptr(&gpio_fan_pm),
.of_match_table = of_gpio_fan_match,
},
};
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 6d1175a51832..2df4956296ed 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -15,6 +15,10 @@
#include <linux/kernel.h>
#include <linux/hwmon-vid.h>
+#ifdef CONFIG_X86
+#include <asm/msr.h>
+#endif
+
/*
* Common code for decoding VID pins.
*
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index 2d9f12f68d50..a4a41742786b 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -21,11 +21,14 @@
#define INA238_CONFIG 0x0
#define INA238_ADC_CONFIG 0x1
#define INA238_SHUNT_CALIBRATION 0x2
+#define SQ52206_SHUNT_TEMPCO 0x3
#define INA238_SHUNT_VOLTAGE 0x4
#define INA238_BUS_VOLTAGE 0x5
#define INA238_DIE_TEMP 0x6
#define INA238_CURRENT 0x7
#define INA238_POWER 0x8
+#define SQ52206_ENERGY 0x9
+#define SQ52206_CHARGE 0xa
#define INA238_DIAG_ALERT 0xb
#define INA238_SHUNT_OVER_VOLTAGE 0xc
#define INA238_SHUNT_UNDER_VOLTAGE 0xd
@@ -33,9 +36,12 @@
#define INA238_BUS_UNDER_VOLTAGE 0xf
#define INA238_TEMP_LIMIT 0x10
#define INA238_POWER_LIMIT 0x11
+#define SQ52206_POWER_PEAK 0x20
#define INA238_DEVICE_ID 0x3f /* not available on INA237 */
#define INA238_CONFIG_ADCRANGE BIT(4)
+#define SQ52206_CONFIG_ADCRANGE_HIGH BIT(4)
+#define SQ52206_CONFIG_ADCRANGE_LOW BIT(3)
#define INA238_DIAG_ALERT_TMPOL BIT(7)
#define INA238_DIAG_ALERT_SHNTOL BIT(6)
@@ -44,12 +50,13 @@
#define INA238_DIAG_ALERT_BUSUL BIT(3)
#define INA238_DIAG_ALERT_POL BIT(2)
-#define INA238_REGISTERS 0x11
+#define INA238_REGISTERS 0x20
#define INA238_RSHUNT_DEFAULT 10000 /* uOhm */
/* Default configuration of device on reset. */
#define INA238_CONFIG_DEFAULT 0
+#define SQ52206_CONFIG_DEFAULT 0x0005
/* 16 sample averaging, 1052us conversion time, continuous mode */
#define INA238_ADC_CONFIG_DEFAULT 0xfb6a
/* Configure alerts to be based on averaged value (SLOWALERT) */
@@ -87,14 +94,19 @@
* shunt = 0x4000 / (819.2 * 10^6) / 0.001 = 20000 uOhms (with 1mA/lsb)
*
* Current (mA) = register value * 20000 / rshunt / 4 * gain
- * Power (W) = 0.2 * register value * 20000 / rshunt / 4 * gain
+ * Power (mW) = 0.2 * register value * 20000 / rshunt / 4 * gain
+ * (Specific for SQ52206)
+ * Power (mW) = 0.24 * register value * 20000 / rshunt / 4 * gain
+ * Energy (mJ) = 16 * 0.24 * register value * 20000 / rshunt / 4 * gain
*/
#define INA238_CALIBRATION_VALUE 16384
#define INA238_FIXED_SHUNT 20000
#define INA238_SHUNT_VOLTAGE_LSB 5 /* 5 uV/lsb */
#define INA238_BUS_VOLTAGE_LSB 3125 /* 3.125 mV/lsb */
-#define INA238_DIE_TEMP_LSB 125 /* 125 mC/lsb */
+#define INA238_DIE_TEMP_LSB 1250000 /* 125.0000 mC/lsb */
+#define SQ52206_BUS_VOLTAGE_LSB 3750 /* 3.75 mV/lsb */
+#define SQ52206_DIE_TEMP_LSB 78125 /* 7.8125 mC/lsb */
static const struct regmap_config ina238_regmap_config = {
.max_register = INA238_REGISTERS,
@@ -102,7 +114,20 @@ static const struct regmap_config ina238_regmap_config = {
.val_bits = 16,
};
+enum ina238_ids { ina238, ina237, sq52206 };
+
+struct ina238_config {
+ bool has_power_highest; /* chip detection power peak */
+ bool has_energy; /* chip detection energy */
+ u8 temp_shift; /* fixed parameters for temp calculate */
+ u32 power_calculate_factor; /* fixed parameters for power calculate */
+ u16 config_default; /* Power-on default state */
+ int bus_voltage_lsb; /* use for temperature calculate, uV/lsb */
+ int temp_lsb; /* use for temperature calculate */
+};
+
struct ina238_data {
+ const struct ina238_config *config;
struct i2c_client *client;
struct mutex config_lock;
struct regmap *regmap;
@@ -110,6 +135,36 @@ struct ina238_data {
int gain;
};
+static const struct ina238_config ina238_config[] = {
+ [ina238] = {
+ .has_energy = false,
+ .has_power_highest = false,
+ .temp_shift = 4,
+ .power_calculate_factor = 20,
+ .config_default = INA238_CONFIG_DEFAULT,
+ .bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
+ .temp_lsb = INA238_DIE_TEMP_LSB,
+ },
+ [ina237] = {
+ .has_energy = false,
+ .has_power_highest = false,
+ .temp_shift = 4,
+ .power_calculate_factor = 20,
+ .config_default = INA238_CONFIG_DEFAULT,
+ .bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
+ .temp_lsb = INA238_DIE_TEMP_LSB,
+ },
+ [sq52206] = {
+ .has_energy = true,
+ .has_power_highest = true,
+ .temp_shift = 0,
+ .power_calculate_factor = 24,
+ .config_default = SQ52206_CONFIG_DEFAULT,
+ .bus_voltage_lsb = SQ52206_BUS_VOLTAGE_LSB,
+ .temp_lsb = SQ52206_DIE_TEMP_LSB,
+ },
+};
+
static int ina238_read_reg24(const struct i2c_client *client, u8 reg, u32 *val)
{
u8 data[3];
@@ -126,6 +181,24 @@ static int ina238_read_reg24(const struct i2c_client *client, u8 reg, u32 *val)
return 0;
}
+static int ina238_read_reg40(const struct i2c_client *client, u8 reg, u64 *val)
+{
+ u8 data[5];
+ u32 low;
+ int err;
+
+ /* 40-bit register read */
+ err = i2c_smbus_read_i2c_block_data(client, reg, 5, data);
+ if (err < 0)
+ return err;
+ if (err != 5)
+ return -EIO;
+ low = (data[1] << 24) | (data[2] << 16) | (data[3] << 8) | data[4];
+ *val = ((long long)data[0] << 32) | low;
+
+ return 0;
+}
+
static int ina238_read_in(struct device *dev, u32 attr, int channel,
long *val)
{
@@ -197,10 +270,10 @@ static int ina238_read_in(struct device *dev, u32 attr, int channel,
regval = (s16)regval;
if (channel == 0)
/* gain of 1 -> LSB / 4 */
- *val = (regval * INA238_SHUNT_VOLTAGE_LSB) /
- (1000 * (4 - data->gain + 1));
+ *val = (regval * INA238_SHUNT_VOLTAGE_LSB) *
+ data->gain / (1000 * 4);
else
- *val = (regval * INA238_BUS_VOLTAGE_LSB) / 1000;
+ *val = (regval * data->config->bus_voltage_lsb) / 1000;
break;
case hwmon_in_max_alarm:
case hwmon_in_min_alarm:
@@ -225,8 +298,8 @@ static int ina238_write_in(struct device *dev, u32 attr, int channel,
case 0:
/* signed value, clamp to max range +/-163 mV */
regval = clamp_val(val, -163, 163);
- regval = (regval * 1000 * (4 - data->gain + 1)) /
- INA238_SHUNT_VOLTAGE_LSB;
+ regval = (regval * 1000 * 4) /
+ (INA238_SHUNT_VOLTAGE_LSB * data->gain);
regval = clamp_val(regval, S16_MIN, S16_MAX);
switch (attr) {
@@ -242,7 +315,7 @@ static int ina238_write_in(struct device *dev, u32 attr, int channel,
case 1:
/* signed value, positive values only. Clamp to max 102.396 V */
regval = clamp_val(val, 0, 102396);
- regval = (regval * 1000) / INA238_BUS_VOLTAGE_LSB;
+ regval = (regval * 1000) / data->config->bus_voltage_lsb;
regval = clamp_val(regval, 0, S16_MAX);
switch (attr) {
@@ -297,8 +370,19 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
return err;
/* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
- power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT *
- data->gain, 20 * data->rshunt);
+ power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+ /* Clamp value to maximum value of long */
+ *val = clamp_val(power, 0, LONG_MAX);
+ break;
+ case hwmon_power_input_highest:
+ err = ina238_read_reg24(data->client, SQ52206_POWER_PEAK, &regval);
+ if (err)
+ return err;
+
+ /* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
+ power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -311,8 +395,8 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
* Truncated 24-bit compare register, lower 8-bits are
* truncated. Same conversion to/from uW as POWER register.
*/
- power = div_u64((regval << 8) * 1000ULL * INA238_FIXED_SHUNT *
- data->gain, 20 * data->rshunt);
+ power = div_u64((regval << 8) * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -344,8 +428,8 @@ static int ina238_write_power(struct device *dev, u32 attr, long val)
* register.
*/
regval = clamp_val(val, 0, LONG_MAX);
- regval = div_u64(val * 20ULL * data->rshunt,
- 1000ULL * INA238_FIXED_SHUNT * data->gain);
+ regval = div_u64(val * 4 * 100 * data->rshunt, data->config->power_calculate_factor *
+ 1000ULL * INA238_FIXED_SHUNT * data->gain);
regval = clamp_val(regval >> 8, 0, U16_MAX);
return regmap_write(data->regmap, INA238_POWER_LIMIT, regval);
@@ -362,17 +446,17 @@ static int ina238_read_temp(struct device *dev, u32 attr, long *val)
err = regmap_read(data->regmap, INA238_DIE_TEMP, &regval);
if (err)
return err;
-
- /* Signed, bits 15-4 of register, result in mC */
- *val = ((s16)regval >> 4) * INA238_DIE_TEMP_LSB;
+ /* Signed, result in mC */
+ *val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
+ (s64)data->config->temp_lsb, 10000);
break;
case hwmon_temp_max:
err = regmap_read(data->regmap, INA238_TEMP_LIMIT, &regval);
if (err)
return err;
-
- /* Signed, bits 15-4 of register, result in mC */
- *val = ((s16)regval >> 4) * INA238_DIE_TEMP_LSB;
+ /* Signed, result in mC */
+ *val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
+ (s64)data->config->temp_lsb, 10000);
break;
case hwmon_temp_max_alarm:
err = regmap_read(data->regmap, INA238_DIAG_ALERT, &regval);
@@ -396,13 +480,33 @@ static int ina238_write_temp(struct device *dev, u32 attr, long val)
if (attr != hwmon_temp_max)
return -EOPNOTSUPP;
- /* Signed, bits 15-4 of register */
- regval = (val / INA238_DIE_TEMP_LSB) << 4;
- regval = clamp_val(regval, S16_MIN, S16_MAX) & 0xfff0;
+ /* Signed */
+ regval = clamp_val(val, -40000, 125000);
+ regval = div_s64(val * 10000, data->config->temp_lsb) << data->config->temp_shift;
+ regval = clamp_val(regval, S16_MIN, S16_MAX) & (0xffff << data->config->temp_shift);
return regmap_write(data->regmap, INA238_TEMP_LIMIT, regval);
}
+static ssize_t energy1_input_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int ret;
+ u64 regval;
+ u64 energy;
+
+ ret = ina238_read_reg40(data->client, SQ52206_ENERGY, &regval);
+ if (ret)
+ return ret;
+
+ /* result in mJ */
+ energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+
+ return sysfs_emit(buf, "%llu\n", energy);
+}
+
static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
@@ -422,7 +526,7 @@ static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
}
static int ina238_write(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long val)
+ u32 attr, int channel, long val)
{
struct ina238_data *data = dev_get_drvdata(dev);
int err;
@@ -452,6 +556,9 @@ static umode_t ina238_is_visible(const void *drvdata,
enum hwmon_sensor_types type,
u32 attr, int channel)
{
+ const struct ina238_data *data = drvdata;
+ bool has_power_highest = data->config->has_power_highest;
+
switch (type) {
case hwmon_in:
switch (attr) {
@@ -479,6 +586,10 @@ static umode_t ina238_is_visible(const void *drvdata,
return 0444;
case hwmon_power_max:
return 0644;
+ case hwmon_power_input_highest:
+ if (has_power_highest)
+ return 0444;
+ return 0;
default:
return 0;
}
@@ -512,7 +623,8 @@ static const struct hwmon_channel_info * const ina238_info[] = {
HWMON_C_INPUT),
HWMON_CHANNEL_INFO(power,
/* 0: power */
- HWMON_P_INPUT | HWMON_P_MAX | HWMON_P_MAX_ALARM),
+ HWMON_P_INPUT | HWMON_P_MAX |
+ HWMON_P_MAX_ALARM | HWMON_P_INPUT_HIGHEST),
HWMON_CHANNEL_INFO(temp,
/* 0: die temperature */
HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_ALARM),
@@ -530,20 +642,35 @@ static const struct hwmon_chip_info ina238_chip_info = {
.info = ina238_info,
};
+/* energy attributes are 5 bytes wide so we need u64 */
+static DEVICE_ATTR_RO(energy1_input);
+
+static struct attribute *ina238_attrs[] = {
+ &dev_attr_energy1_input.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ina238);
+
static int ina238_probe(struct i2c_client *client)
{
struct ina2xx_platform_data *pdata = dev_get_platdata(&client->dev);
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct ina238_data *data;
+ enum ina238_ids chip;
int config;
int ret;
+ chip = (uintptr_t)i2c_get_match_data(client);
+
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = client;
+ /* set the device type */
+ data->config = &ina238_config[chip];
+
mutex_init(&data->config_lock);
data->regmap = devm_regmap_init_i2c(client, &ina238_regmap_config);
@@ -564,15 +691,21 @@ static int ina238_probe(struct i2c_client *client)
/* load shunt gain value */
if (device_property_read_u32(dev, "ti,shunt-gain", &data->gain) < 0)
data->gain = 4; /* Default of ADCRANGE = 0 */
- if (data->gain != 1 && data->gain != 4) {
+ if (data->gain != 1 && data->gain != 2 && data->gain != 4) {
dev_err(dev, "invalid shunt gain value %u\n", data->gain);
return -EINVAL;
}
/* Setup CONFIG register */
- config = INA238_CONFIG_DEFAULT;
- if (data->gain == 1)
+ config = data->config->config_default;
+ if (chip == sq52206) {
+ if (data->gain == 1)
+ config |= SQ52206_CONFIG_ADCRANGE_HIGH; /* ADCRANGE = 10/11 is /1 */
+ else if (data->gain == 2)
+ config |= SQ52206_CONFIG_ADCRANGE_LOW; /* ADCRANGE = 01 is /2 */
+ } else if (data->gain == 1) {
config |= INA238_CONFIG_ADCRANGE; /* ADCRANGE = 1 is /1 */
+ }
ret = regmap_write(data->regmap, INA238_CONFIG, config);
if (ret < 0) {
dev_err(dev, "error configuring the device: %d\n", ret);
@@ -605,7 +738,8 @@ static int ina238_probe(struct i2c_client *client)
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
&ina238_chip_info,
- NULL);
+ data->config->has_energy ?
+ ina238_groups : NULL);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
@@ -616,15 +750,27 @@ static int ina238_probe(struct i2c_client *client)
}
static const struct i2c_device_id ina238_id[] = {
- { "ina238" },
+ { "ina237", ina237 },
+ { "ina238", ina238 },
+ { "sq52206", sq52206 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ina238_id);
static const struct of_device_id __maybe_unused ina238_of_match[] = {
- { .compatible = "ti,ina237" },
- { .compatible = "ti,ina238" },
- { },
+ {
+ .compatible = "ti,ina237",
+ .data = (void *)ina237
+ },
+ {
+ .compatible = "ti,ina238",
+ .data = (void *)ina238
+ },
+ {
+ .compatible = "silergy,sq52206",
+ .data = (void *)sq52206
+ },
+ { }
};
MODULE_DEVICE_TABLE(of, ina238_of_match);
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 345fe7db9de9..bc3c1f7314b3 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -959,8 +959,12 @@ static int ina2xx_probe(struct i2c_client *client)
return PTR_ERR(data->regmap);
}
- ret = devm_regulator_get_enable(dev, "vs");
- if (ret)
+ /*
+ * Regulator core returns -ENODEV if the 'vs' is not available.
+ * Hence the check for -ENODEV return code is necessary.
+ */
+ ret = devm_regulator_get_enable_optional(dev, "vs");
+ if (ret < 0 && ret != -ENODEV)
return dev_err_probe(dev, ret, "failed to enable vs regulator\n");
ret = ina2xx_init(dev, data);
diff --git a/drivers/hwmon/isl28022.c b/drivers/hwmon/isl28022.c
index 1fb9864635db..c2e559dde63f 100644
--- a/drivers/hwmon/isl28022.c
+++ b/drivers/hwmon/isl28022.c
@@ -154,6 +154,7 @@ static int isl28022_read_current(struct device *dev, u32 attr, long *val)
struct isl28022_data *data = dev_get_drvdata(dev);
unsigned int regval;
int err;
+ u16 sign_bit;
switch (attr) {
case hwmon_curr_input:
@@ -161,8 +162,9 @@ static int isl28022_read_current(struct device *dev, u32 attr, long *val)
ISL28022_REG_CURRENT, &regval);
if (err < 0)
return err;
- *val = ((long)regval * 1250L * (long)data->gain) /
- (long)data->shunt;
+ sign_bit = (regval >> 15) & 0x01;
+ *val = (((long)(((u16)regval) & 0x7FFF) - (sign_bit * 32768)) *
+ 1250L * (long)data->gain) / (long)data->shunt;
break;
default:
return -EOPNOTSUPP;
@@ -301,7 +303,7 @@ static const struct regmap_config isl28022_regmap_config = {
.writeable_reg = isl28022_is_writeable_reg,
.volatile_reg = isl28022_is_volatile_reg,
.val_format_endian = REGMAP_ENDIAN_BIG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.use_single_read = true,
.use_single_write = true,
};
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 3685906cc57c..babf2413d666 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
-#include <asm/amd_node.h>
+#include <asm/amd/node.h>
#include <asm/processor.h>
MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
@@ -503,6 +503,13 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
k10temp_get_ccd_support(data, 12);
break;
}
+ } else if (boot_cpu_data.x86 == 0x1a) {
+ switch (boot_cpu_data.x86_model) {
+ case 0x40 ... 0x4f: /* Zen5 Ryzen Desktop */
+ data->ccd_offset = 0x308;
+ k10temp_get_ccd_support(data, 8);
+ break;
+ }
}
for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
diff --git a/drivers/hwmon/kbatt.c b/drivers/hwmon/kbatt.c
new file mode 100644
index 000000000000..501b8f4ded33
--- /dev/null
+++ b/drivers/hwmon/kbatt.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 KEBA Industrial Automation GmbH
+ *
+ * Driver for KEBA battery monitoring controller FPGA IP core
+ */
+
+#include <linux/hwmon.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/misc/keba.h>
+#include <linux/mutex.h>
+
+#define KBATT "kbatt"
+
+#define KBATT_CONTROL_REG 0x4
+#define KBATT_CONTROL_BAT_TEST 0x01
+
+#define KBATT_STATUS_REG 0x8
+#define KBATT_STATUS_BAT_OK 0x01
+
+#define KBATT_MAX_UPD_INTERVAL (10 * HZ)
+#define KBATT_SETTLE_TIME_US (100 * USEC_PER_MSEC)
+
+struct kbatt {
+ /* update lock */
+ struct mutex lock;
+ void __iomem *base;
+
+ unsigned long next_update; /* in jiffies */
+ bool alarm;
+};
+
+static bool kbatt_alarm(struct kbatt *kbatt)
+{
+ mutex_lock(&kbatt->lock);
+
+ if (!kbatt->next_update || time_after(jiffies, kbatt->next_update)) {
+ /* switch load on */
+ iowrite8(KBATT_CONTROL_BAT_TEST,
+ kbatt->base + KBATT_CONTROL_REG);
+
+ /* wait some time to let things settle */
+ fsleep(KBATT_SETTLE_TIME_US);
+
+ /* check battery state */
+ if (ioread8(kbatt->base + KBATT_STATUS_REG) &
+ KBATT_STATUS_BAT_OK)
+ kbatt->alarm = false;
+ else
+ kbatt->alarm = true;
+
+ /* switch load off */
+ iowrite8(0, kbatt->base + KBATT_CONTROL_REG);
+
+ kbatt->next_update = jiffies + KBATT_MAX_UPD_INTERVAL;
+ }
+
+ mutex_unlock(&kbatt->lock);
+
+ return kbatt->alarm;
+}
+
+static int kbatt_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct kbatt *kbatt = dev_get_drvdata(dev);
+
+ *val = kbatt_alarm(kbatt) ? 1 : 0;
+
+ return 0;
+}
+
+static umode_t kbatt_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (channel == 0 && attr == hwmon_in_min_alarm)
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *kbatt_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ /* 0: input minimum alarm channel */
+ HWMON_I_MIN_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops kbatt_hwmon_ops = {
+ .is_visible = kbatt_is_visible,
+ .read = kbatt_read,
+};
+
+static const struct hwmon_chip_info kbatt_chip_info = {
+ .ops = &kbatt_hwmon_ops,
+ .info = kbatt_info,
+};
+
+static int kbatt_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct keba_batt_auxdev *kbatt_auxdev =
+ container_of(auxdev, struct keba_batt_auxdev, auxdev);
+ struct device *dev = &auxdev->dev;
+ struct device *hwmon_dev;
+ struct kbatt *kbatt;
+ int retval;
+
+ kbatt = devm_kzalloc(dev, sizeof(*kbatt), GFP_KERNEL);
+ if (!kbatt)
+ return -ENOMEM;
+
+ retval = devm_mutex_init(dev, &kbatt->lock);
+ if (retval)
+ return retval;
+
+ kbatt->base = devm_ioremap_resource(dev, &kbatt_auxdev->io);
+ if (IS_ERR(kbatt->base))
+ return PTR_ERR(kbatt->base);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, KBATT, kbatt,
+ &kbatt_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct auxiliary_device_id kbatt_devtype_aux[] = {
+ { .name = "keba.batt" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, kbatt_devtype_aux);
+
+static struct auxiliary_driver kbatt_driver_aux = {
+ .name = KBATT,
+ .id_table = kbatt_devtype_aux,
+ .probe = kbatt_probe,
+};
+module_auxiliary_driver(kbatt_driver_aux);
+
+MODULE_AUTHOR("Petar Bojanic <boja@keba.com>");
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA battery monitoring controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/kfan.c b/drivers/hwmon/kfan.c
new file mode 100644
index 000000000000..f353acb66749
--- /dev/null
+++ b/drivers/hwmon/kfan.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 KEBA Industrial Automation GmbH
+ *
+ * Driver for KEBA fan controller FPGA IP core
+ *
+ */
+
+#include <linux/hwmon.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/misc/keba.h>
+
+#define KFAN "kfan"
+
+#define KFAN_CONTROL_REG 0x04
+
+#define KFAN_STATUS_REG 0x08
+#define KFAN_STATUS_PRESENT 0x01
+#define KFAN_STATUS_REGULABLE 0x02
+#define KFAN_STATUS_TACHO 0x04
+#define KFAN_STATUS_BLOCKED 0x08
+
+#define KFAN_TACHO_REG 0x0c
+
+#define KFAN_DEFAULT_DIV 2
+
+struct kfan {
+ void __iomem *base;
+ bool tacho;
+ bool regulable;
+
+ /* hwmon API configuration */
+ u32 fan_channel_config[2];
+ struct hwmon_channel_info fan_info;
+ u32 pwm_channel_config[2];
+ struct hwmon_channel_info pwm_info;
+ const struct hwmon_channel_info *info[3];
+ struct hwmon_chip_info chip;
+};
+
+static bool kfan_get_fault(struct kfan *kfan)
+{
+ u8 status = ioread8(kfan->base + KFAN_STATUS_REG);
+
+ if (!(status & KFAN_STATUS_PRESENT))
+ return true;
+
+ if (!kfan->tacho && (status & KFAN_STATUS_BLOCKED))
+ return true;
+
+ return false;
+}
+
+static unsigned int kfan_count_to_rpm(u16 count)
+{
+ if (count == 0 || count == 0xffff)
+ return 0;
+
+ return 5000000UL / (KFAN_DEFAULT_DIV * count);
+}
+
+static unsigned int kfan_get_rpm(struct kfan *kfan)
+{
+ unsigned int rpm;
+ u16 count;
+
+ count = ioread16(kfan->base + KFAN_TACHO_REG);
+ rpm = kfan_count_to_rpm(count);
+
+ return rpm;
+}
+
+static unsigned int kfan_get_pwm(struct kfan *kfan)
+{
+ return ioread8(kfan->base + KFAN_CONTROL_REG);
+}
+
+static int kfan_set_pwm(struct kfan *kfan, long val)
+{
+ if (val < 0 || val > 0xff)
+ return -EINVAL;
+
+ /* if none-regulable, then only 0 or 0xff can be written */
+ if (!kfan->regulable && val > 0)
+ val = 0xff;
+
+ iowrite8(val, kfan->base + KFAN_CONTROL_REG);
+
+ return 0;
+}
+
+static int kfan_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct kfan *kfan = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ return kfan_set_pwm(kfan, val);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int kfan_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct kfan *kfan = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_fault:
+ *val = kfan_get_fault(kfan);
+ return 0;
+ case hwmon_fan_input:
+ *val = kfan_get_rpm(kfan);
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = kfan_get_pwm(kfan);
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t kfan_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ case hwmon_fan_fault:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops kfan_hwmon_ops = {
+ .is_visible = kfan_is_visible,
+ .read = kfan_read,
+ .write = kfan_write,
+};
+
+static int kfan_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct keba_fan_auxdev *kfan_auxdev =
+ container_of(auxdev, struct keba_fan_auxdev, auxdev);
+ struct device *dev = &auxdev->dev;
+ struct device *hwmon_dev;
+ struct kfan *kfan;
+ u8 status;
+
+ kfan = devm_kzalloc(dev, sizeof(*kfan), GFP_KERNEL);
+ if (!kfan)
+ return -ENOMEM;
+
+ kfan->base = devm_ioremap_resource(dev, &kfan_auxdev->io);
+ if (IS_ERR(kfan->base))
+ return PTR_ERR(kfan->base);
+
+ status = ioread8(kfan->base + KFAN_STATUS_REG);
+ if (status & KFAN_STATUS_REGULABLE)
+ kfan->regulable = true;
+ if (status & KFAN_STATUS_TACHO)
+ kfan->tacho = true;
+
+ /* fan */
+ kfan->fan_channel_config[0] = HWMON_F_FAULT;
+ if (kfan->tacho)
+ kfan->fan_channel_config[0] |= HWMON_F_INPUT;
+ kfan->fan_info.type = hwmon_fan;
+ kfan->fan_info.config = kfan->fan_channel_config;
+ kfan->info[0] = &kfan->fan_info;
+
+ /* PWM */
+ kfan->pwm_channel_config[0] = HWMON_PWM_INPUT;
+ kfan->pwm_info.type = hwmon_pwm;
+ kfan->pwm_info.config = kfan->pwm_channel_config;
+ kfan->info[1] = &kfan->pwm_info;
+
+ kfan->chip.ops = &kfan_hwmon_ops;
+ kfan->chip.info = kfan->info;
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, KFAN, kfan,
+ &kfan->chip, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct auxiliary_device_id kfan_devtype_aux[] = {
+ { .name = "keba.fan" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, kfan_devtype_aux);
+
+static struct auxiliary_driver kfan_driver_aux = {
+ .name = KFAN,
+ .id_table = kfan_devtype_aux,
+ .probe = kfan_probe,
+};
+module_auxiliary_driver(kfan_driver_aux);
+
+MODULE_AUTHOR("Petar Bojanic <boja@keba.com>");
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA fan controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index d95a3c6c245c..9b4875e2fd8d 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -622,7 +622,7 @@ static int lm75_i3c_reg_read(void *context, unsigned int reg, unsigned int *val)
{
.rnw = true,
.len = 2,
- .data.out = data->val_buf,
+ .data.in = data->val_buf,
},
};
int ret;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 75f09553fd67..c1f528e292f3 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1235,7 +1235,7 @@ static int lm90_update_alarms(struct lm90_data *data, bool force)
static void lm90_alert_work(struct work_struct *__work)
{
- struct delayed_work *delayed_work = container_of(__work, struct delayed_work, work);
+ struct delayed_work *delayed_work = to_delayed_work(__work);
struct lm90_data *data = container_of(delayed_work, struct lm90_data, alert_work);
/* Nothing to do if alerts are enabled */
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 541fa09dc6e7..a07e2eb93c71 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -256,33 +256,38 @@ static int ltc2992_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask
return 0;
}
-static void ltc2992_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int ltc2992_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ltc2992_state *st = gpiochip_get_data(chip);
unsigned long gpio_ctrl;
- int reg;
+ int reg, ret;
mutex_lock(&st->gpio_mutex);
reg = ltc2992_read_reg(st, ltc2992_gpio_addr_map[offset].ctrl, 1);
if (reg < 0) {
mutex_unlock(&st->gpio_mutex);
- return;
+ return reg;
}
gpio_ctrl = reg;
assign_bit(ltc2992_gpio_addr_map[offset].ctrl_bit, &gpio_ctrl, value);
- ltc2992_write_reg(st, ltc2992_gpio_addr_map[offset].ctrl, 1, gpio_ctrl);
+ ret = ltc2992_write_reg(st, ltc2992_gpio_addr_map[offset].ctrl, 1,
+ gpio_ctrl);
mutex_unlock(&st->gpio_mutex);
+
+ return ret;
}
-static void ltc2992_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int ltc2992_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
struct ltc2992_state *st = gpiochip_get_data(chip);
unsigned long gpio_ctrl_io = 0;
unsigned long gpio_ctrl = 0;
unsigned int gpio_nr;
+ int ret;
for_each_set_bit(gpio_nr, mask, LTC2992_GPIO_NR) {
if (gpio_nr < 3)
@@ -293,9 +298,14 @@ static void ltc2992_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mas
}
mutex_lock(&st->gpio_mutex);
- ltc2992_write_reg(st, LTC2992_GPIO_IO_CTRL, 1, gpio_ctrl_io);
- ltc2992_write_reg(st, LTC2992_GPIO_CTRL, 1, gpio_ctrl);
+ ret = ltc2992_write_reg(st, LTC2992_GPIO_IO_CTRL, 1, gpio_ctrl_io);
+ if (ret)
+ goto out;
+
+ ret = ltc2992_write_reg(st, LTC2992_GPIO_CTRL, 1, gpio_ctrl);
+out:
mutex_unlock(&st->gpio_mutex);
+ return ret;
}
static int ltc2992_config_gpio(struct ltc2992_state *st)
@@ -329,8 +339,8 @@ static int ltc2992_config_gpio(struct ltc2992_state *st)
st->gc.ngpio = ARRAY_SIZE(st->gpio_names);
st->gc.get = ltc2992_gpio_get;
st->gc.get_multiple = ltc2992_gpio_get_multiple;
- st->gc.set = ltc2992_gpio_set;
- st->gc.set_multiple = ltc2992_gpio_set_multiple;
+ st->gc.set_rv = ltc2992_gpio_set;
+ st->gc.set_multiple_rv = ltc2992_gpio_set_multiple;
ret = devm_gpiochip_add_data(&st->client->dev, &st->gc, st);
if (ret)
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 32b4d54b2076..a06346496e1d 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -80,6 +80,7 @@ struct max6639_data {
/* Register values initialized only once */
u8 ppr[MAX6639_NUM_CHANNELS]; /* Pulses per rotation 0..3 for 1..4 ppr */
u8 rpm_range[MAX6639_NUM_CHANNELS]; /* Index in above rpm_ranges table */
+ u32 target_rpm[MAX6639_NUM_CHANNELS];
/* Optional regulator for FAN supply */
struct regulator *reg;
@@ -563,6 +564,10 @@ static int max6639_probe_child_from_dt(struct i2c_client *client,
if (!err)
data->rpm_range[i] = rpm_range_to_reg(val);
+ err = of_property_read_u32(child, "target-rpm", &val);
+ if (!err)
+ data->target_rpm[i] = val;
+
return 0;
}
@@ -573,6 +578,7 @@ static int max6639_init_client(struct i2c_client *client,
const struct device_node *np = dev->of_node;
struct device_node *child;
int i, err;
+ u8 target_duty;
/* Reset chip to default values, see below for GCONFIG setup */
err = regmap_write(data->regmap, MAX6639_REG_GCONFIG, MAX6639_GCONFIG_POR);
@@ -586,6 +592,8 @@ static int max6639_init_client(struct i2c_client *client,
/* default: 4000 RPM */
data->rpm_range[0] = 1;
data->rpm_range[1] = 1;
+ data->target_rpm[0] = 4000;
+ data->target_rpm[1] = 4000;
for_each_child_of_node(np, child) {
if (strcmp(child->name, "fan"))
@@ -639,8 +647,12 @@ static int max6639_init_client(struct i2c_client *client,
if (err)
return err;
- /* PWM 120/120 (i.e. 100%) */
- err = regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(i), 120);
+ /* Set PWM based on target RPM if specified */
+ if (data->target_rpm[i] > rpm_ranges[data->rpm_range[i]])
+ data->target_rpm[i] = rpm_ranges[data->rpm_range[i]];
+
+ target_duty = 120 * data->target_rpm[i] / rpm_ranges[data->rpm_range[i]];
+ err = regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(i), target_duty);
if (err)
return err;
}
diff --git a/drivers/hwmon/max77705-hwmon.c b/drivers/hwmon/max77705-hwmon.c
new file mode 100644
index 000000000000..990023e6474e
--- /dev/null
+++ b/drivers/hwmon/max77705-hwmon.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MAX77705 voltage and current hwmon driver.
+ *
+ * Copyright (C) 2025 Dzmitry Sankouski <dsankouski@gmail.com>
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/mfd/max77705-private.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct channel_desc {
+ u8 reg;
+ u8 avg_reg;
+ const char *const label;
+ // register resolution. nano Volts for voltage, nano Amperes for current
+ u32 resolution;
+};
+
+static const struct channel_desc current_channel_desc[] = {
+ {
+ .reg = IIN_REG,
+ .label = "IIN_REG",
+ .resolution = 125000
+ },
+ {
+ .reg = ISYS_REG,
+ .avg_reg = AVGISYS_REG,
+ .label = "ISYS_REG",
+ .resolution = 312500
+ }
+};
+
+static const struct channel_desc voltage_channel_desc[] = {
+ {
+ .reg = VBYP_REG,
+ .label = "VBYP_REG",
+ .resolution = 427246
+ },
+ {
+ .reg = VSYS_REG,
+ .label = "VSYS_REG",
+ .resolution = 156250
+ }
+};
+
+static int max77705_read_and_convert(struct regmap *regmap, u8 reg, u32 res,
+ bool is_signed, long *val)
+{
+ int ret;
+ u32 regval;
+
+ ret = regmap_read(regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+
+ if (is_signed)
+ *val = mult_frac((long)sign_extend32(regval, 15), res, 1000000);
+ else
+ *val = mult_frac((long)regval, res, 1000000);
+
+ return 0;
+}
+
+static umode_t max77705_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_label:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_in_label:
+ return 0444;
+ case hwmon_curr_average:
+ if (current_channel_desc[channel].avg_reg)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int max77705_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **buf)
+{
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_in_label:
+ *buf = current_channel_desc[channel].label;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *buf = voltage_channel_desc[channel].label;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int max77705_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ u8 reg;
+ u32 res;
+
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ reg = current_channel_desc[channel].reg;
+ res = current_channel_desc[channel].resolution;
+
+ return max77705_read_and_convert(regmap, reg, res, true, val);
+ case hwmon_curr_average:
+ reg = current_channel_desc[channel].avg_reg;
+ res = current_channel_desc[channel].resolution;
+
+ return max77705_read_and_convert(regmap, reg, res, true, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ reg = voltage_channel_desc[channel].reg;
+ res = voltage_channel_desc[channel].resolution;
+
+ return max77705_read_and_convert(regmap, reg, res, false, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops max77705_hwmon_ops = {
+ .is_visible = max77705_is_visible,
+ .read = max77705_read,
+ .read_string = max77705_read_string,
+};
+
+static const struct hwmon_channel_info *max77705_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL
+ ),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_AVERAGE | HWMON_C_LABEL
+ ),
+ NULL
+};
+
+static const struct hwmon_chip_info max77705_chip_info = {
+ .ops = &max77705_hwmon_ops,
+ .info = max77705_info,
+};
+
+static int max77705_hwmon_probe(struct platform_device *pdev)
+{
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap)
+ return -ENODEV;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev, "max77705", regmap,
+ &max77705_chip_info, NULL);
+ if (IS_ERR(hwmon_dev))
+ return dev_err_probe(&pdev->dev, PTR_ERR(hwmon_dev),
+ "Unable to register hwmon device\n");
+
+ return 0;
+};
+
+static struct platform_driver max77705_hwmon_driver = {
+ .driver = {
+ .name = "max77705-hwmon",
+ },
+ .probe = max77705_hwmon_probe,
+};
+
+module_platform_driver(max77705_hwmon_driver);
+
+MODULE_AUTHOR("Dzmitry Sankouski <dsankouski@gmail.com>");
+MODULE_DESCRIPTION("MAX77705 monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/nct7363.c b/drivers/hwmon/nct7363.c
index be7bf32f6e68..e13ab918b1ab 100644
--- a/drivers/hwmon/nct7363.c
+++ b/drivers/hwmon/nct7363.c
@@ -391,7 +391,7 @@ static const struct regmap_config nct7363_regmap_config = {
.val_bits = 8,
.use_single_read = true,
.use_single_write = true,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = nct7363_regmap_is_volatile,
};
diff --git a/drivers/hwmon/oxp-sensors.c b/drivers/hwmon/oxp-sensors.c
deleted file mode 100644
index 83730d931824..000000000000
--- a/drivers/hwmon/oxp-sensors.c
+++ /dev/null
@@ -1,716 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Platform driver for OneXPlayer, AOKZOE, AYANEO, and OrangePi Handhelds
- * that expose fan reading and control via hwmon sysfs.
- *
- * Old OXP boards have the same DMI strings and they are told apart by
- * the boot cpu vendor (Intel/AMD). Of these older models only AMD is
- * supported.
- *
- * Fan control is provided via pwm interface in the range [0-255].
- * Old AMD boards use [0-100] as range in the EC, the written value is
- * scaled to accommodate for that. Newer boards like the mini PRO and
- * AOKZOE are not scaled but have the same EC layout. Newer models
- * like the 2 and X1 are [0-184] and are scaled to 0-255. OrangePi
- * are [1-244] and scaled to 0-255.
- *
- * Copyright (C) 2022 Joaquín I. Aramendía <samsagax@gmail.com>
- * Copyright (C) 2024 Derek J. Clark <derekjohn.clark@gmail.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/dmi.h>
-#include <linux/hwmon.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/processor.h>
-
-/* Handle ACPI lock mechanism */
-static u32 oxp_mutex;
-
-#define ACPI_LOCK_DELAY_MS 500
-
-static bool lock_global_acpi_lock(void)
-{
- return ACPI_SUCCESS(acpi_acquire_global_lock(ACPI_LOCK_DELAY_MS, &oxp_mutex));
-}
-
-static bool unlock_global_acpi_lock(void)
-{
- return ACPI_SUCCESS(acpi_release_global_lock(oxp_mutex));
-}
-
-enum oxp_board {
- aok_zoe_a1 = 1,
- aya_neo_2,
- aya_neo_air,
- aya_neo_air_1s,
- aya_neo_air_plus_mendo,
- aya_neo_air_pro,
- aya_neo_flip,
- aya_neo_geek,
- aya_neo_kun,
- orange_pi_neo,
- oxp_2,
- oxp_fly,
- oxp_mini_amd,
- oxp_mini_amd_a07,
- oxp_mini_amd_pro,
- oxp_x1,
-};
-
-static enum oxp_board board;
-
-/* Fan reading and PWM */
-#define OXP_SENSOR_FAN_REG 0x76 /* Fan reading is 2 registers long */
-#define OXP_2_SENSOR_FAN_REG 0x58 /* Fan reading is 2 registers long */
-#define OXP_SENSOR_PWM_ENABLE_REG 0x4A /* PWM enable is 1 register long */
-#define OXP_SENSOR_PWM_REG 0x4B /* PWM reading is 1 register long */
-#define PWM_MODE_AUTO 0x00
-#define PWM_MODE_MANUAL 0x01
-
-/* OrangePi fan reading and PWM */
-#define ORANGEPI_SENSOR_FAN_REG 0x78 /* Fan reading is 2 registers long */
-#define ORANGEPI_SENSOR_PWM_ENABLE_REG 0x40 /* PWM enable is 1 register long */
-#define ORANGEPI_SENSOR_PWM_REG 0x38 /* PWM reading is 1 register long */
-
-/* Turbo button takeover function
- * Different boards have different values and EC registers
- * for the same function
- */
-#define OXP_TURBO_SWITCH_REG 0xF1 /* Mini Pro, OneXFly, AOKZOE */
-#define OXP_2_TURBO_SWITCH_REG 0xEB /* OXP2 and X1 */
-#define OXP_MINI_TURBO_SWITCH_REG 0x1E /* Mini AO7 */
-
-#define OXP_MINI_TURBO_TAKE_VAL 0x01 /* Mini AO7 */
-#define OXP_TURBO_TAKE_VAL 0x40 /* All other models */
-
-#define OXP_TURBO_RETURN_VAL 0x00 /* Common return val */
-
-static const struct dmi_system_id dmi_table[] = {
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AOKZOE"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AOKZOE A1 AR07"),
- },
- .driver_data = (void *)aok_zoe_a1,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AOKZOE"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AOKZOE A1 Pro"),
- },
- .driver_data = (void *)aok_zoe_a1,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"),
- },
- .driver_data = (void *)aya_neo_2,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR"),
- },
- .driver_data = (void *)aya_neo_air,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR 1S"),
- },
- .driver_data = (void *)aya_neo_air_1s,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AB05-Mendocino"),
- },
- .driver_data = (void *)aya_neo_air_plus_mendo,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR Pro"),
- },
- .driver_data = (void *)aya_neo_air_pro,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "FLIP"),
- },
- .driver_data = (void *)aya_neo_flip,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "GEEK"),
- },
- .driver_data = (void *)aya_neo_geek,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "KUN"),
- },
- .driver_data = (void *)aya_neo_kun,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "OrangePi"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "NEO-01"),
- },
- .driver_data = (void *)orange_pi_neo,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONE XPLAYER"),
- },
- .driver_data = (void *)oxp_mini_amd,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_MATCH(DMI_BOARD_NAME, "ONEXPLAYER 2"),
- },
- .driver_data = (void *)oxp_2,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER F1"),
- },
- .driver_data = (void *)oxp_fly,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER mini A07"),
- },
- .driver_data = (void *)oxp_mini_amd_a07,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER Mini Pro"),
- },
- .driver_data = (void *)oxp_mini_amd_pro,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1"),
- },
- .driver_data = (void *)oxp_x1,
- },
- {},
-};
-
-/* Helper functions to handle EC read/write */
-static int read_from_ec(u8 reg, int size, long *val)
-{
- int i;
- int ret;
- u8 buffer;
-
- if (!lock_global_acpi_lock())
- return -EBUSY;
-
- *val = 0;
- for (i = 0; i < size; i++) {
- ret = ec_read(reg + i, &buffer);
- if (ret)
- return ret;
- *val <<= i * 8;
- *val += buffer;
- }
-
- if (!unlock_global_acpi_lock())
- return -EBUSY;
-
- return 0;
-}
-
-static int write_to_ec(u8 reg, u8 value)
-{
- int ret;
-
- if (!lock_global_acpi_lock())
- return -EBUSY;
-
- ret = ec_write(reg, value);
-
- if (!unlock_global_acpi_lock())
- return -EBUSY;
-
- return ret;
-}
-
-/* Turbo button toggle functions */
-static int tt_toggle_enable(void)
-{
- u8 reg;
- u8 val;
-
- switch (board) {
- case oxp_mini_amd_a07:
- reg = OXP_MINI_TURBO_SWITCH_REG;
- val = OXP_MINI_TURBO_TAKE_VAL;
- break;
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- reg = OXP_TURBO_SWITCH_REG;
- val = OXP_TURBO_TAKE_VAL;
- break;
- case oxp_2:
- case oxp_x1:
- reg = OXP_2_TURBO_SWITCH_REG;
- val = OXP_TURBO_TAKE_VAL;
- break;
- default:
- return -EINVAL;
- }
- return write_to_ec(reg, val);
-}
-
-static int tt_toggle_disable(void)
-{
- u8 reg;
- u8 val;
-
- switch (board) {
- case oxp_mini_amd_a07:
- reg = OXP_MINI_TURBO_SWITCH_REG;
- val = OXP_TURBO_RETURN_VAL;
- break;
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- reg = OXP_TURBO_SWITCH_REG;
- val = OXP_TURBO_RETURN_VAL;
- break;
- case oxp_2:
- case oxp_x1:
- reg = OXP_2_TURBO_SWITCH_REG;
- val = OXP_TURBO_RETURN_VAL;
- break;
- default:
- return -EINVAL;
- }
- return write_to_ec(reg, val);
-}
-
-/* Callbacks for turbo toggle attribute */
-static umode_t tt_toggle_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- switch (board) {
- case aok_zoe_a1:
- case oxp_2:
- case oxp_fly:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- case oxp_x1:
- return attr->mode;
- default:
- break;
- }
- return 0;
-}
-
-static ssize_t tt_toggle_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- int rval;
- bool value;
-
- rval = kstrtobool(buf, &value);
- if (rval)
- return rval;
-
- if (value) {
- rval = tt_toggle_enable();
- } else {
- rval = tt_toggle_disable();
- }
- if (rval)
- return rval;
-
- return count;
-}
-
-static ssize_t tt_toggle_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int retval;
- u8 reg;
- long val;
-
- switch (board) {
- case oxp_mini_amd_a07:
- reg = OXP_MINI_TURBO_SWITCH_REG;
- break;
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- reg = OXP_TURBO_SWITCH_REG;
- break;
- case oxp_2:
- case oxp_x1:
- reg = OXP_2_TURBO_SWITCH_REG;
- break;
- default:
- return -EINVAL;
- }
-
- retval = read_from_ec(reg, 1, &val);
- if (retval)
- return retval;
-
- return sysfs_emit(buf, "%d\n", !!val);
-}
-
-static DEVICE_ATTR_RW(tt_toggle);
-
-/* PWM enable/disable functions */
-static int oxp_pwm_enable(void)
-{
- switch (board) {
- case orange_pi_neo:
- return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL);
- case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_2:
- case oxp_fly:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- case oxp_x1:
- return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL);
- default:
- return -EINVAL;
- }
-}
-
-static int oxp_pwm_disable(void)
-{
- switch (board) {
- case orange_pi_neo:
- return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO);
- case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_2:
- case oxp_fly:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- case oxp_x1:
- return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO);
- default:
- return -EINVAL;
- }
-}
-
-/* Callbacks for hwmon interface */
-static umode_t oxp_ec_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type, u32 attr, int channel)
-{
- switch (type) {
- case hwmon_fan:
- return 0444;
- case hwmon_pwm:
- return 0644;
- default:
- return 0;
- }
-}
-
-static int oxp_platform_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- int ret;
-
- switch (type) {
- case hwmon_fan:
- switch (attr) {
- case hwmon_fan_input:
- switch (board) {
- case orange_pi_neo:
- return read_from_ec(ORANGEPI_SENSOR_FAN_REG, 2, val);
- case oxp_2:
- case oxp_x1:
- return read_from_ec(OXP_2_SENSOR_FAN_REG, 2, val);
- case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_fly:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- return read_from_ec(OXP_SENSOR_FAN_REG, 2, val);
- default:
- break;
- }
- break;
- default:
- break;
- }
- break;
- case hwmon_pwm:
- switch (attr) {
- case hwmon_pwm_input:
- switch (board) {
- case orange_pi_neo:
- ret = read_from_ec(ORANGEPI_SENSOR_PWM_REG, 1, val);
- if (ret)
- return ret;
- /* scale from range [1-244] */
- *val = ((*val - 1) * 254 / 243) + 1;
- break;
- case oxp_2:
- case oxp_x1:
- ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
- if (ret)
- return ret;
- /* scale from range [0-184] */
- *val = (*val * 255) / 184;
- break;
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
- if (ret)
- return ret;
- /* scale from range [0-100] */
- *val = (*val * 255) / 100;
- break;
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- default:
- ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
- if (ret)
- return ret;
- break;
- }
- return 0;
- case hwmon_pwm_enable:
- switch (board) {
- case orange_pi_neo:
- return read_from_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, 1, val);
- case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_2:
- case oxp_fly:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- case oxp_x1:
- return read_from_ec(OXP_SENSOR_PWM_ENABLE_REG, 1, val);
- default:
- break;
- }
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- return -EOPNOTSUPP;
-}
-
-static int oxp_platform_write(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long val)
-{
- switch (type) {
- case hwmon_pwm:
- switch (attr) {
- case hwmon_pwm_enable:
- if (val == 1)
- return oxp_pwm_enable();
- else if (val == 0)
- return oxp_pwm_disable();
- return -EINVAL;
- case hwmon_pwm_input:
- if (val < 0 || val > 255)
- return -EINVAL;
- switch (board) {
- case orange_pi_neo:
- /* scale to range [1-244] */
- val = ((val - 1) * 243 / 254) + 1;
- return write_to_ec(ORANGEPI_SENSOR_PWM_REG, val);
- case oxp_2:
- case oxp_x1:
- /* scale to range [0-184] */
- val = (val * 184) / 255;
- return write_to_ec(OXP_SENSOR_PWM_REG, val);
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- /* scale to range [0-100] */
- val = (val * 100) / 255;
- return write_to_ec(OXP_SENSOR_PWM_REG, val);
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- return write_to_ec(OXP_SENSOR_PWM_REG, val);
- default:
- break;
- }
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- return -EOPNOTSUPP;
-}
-
-/* Known sensors in the OXP EC controllers */
-static const struct hwmon_channel_info * const oxp_platform_sensors[] = {
- HWMON_CHANNEL_INFO(fan,
- HWMON_F_INPUT),
- HWMON_CHANNEL_INFO(pwm,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
- NULL,
-};
-
-static struct attribute *oxp_ec_attrs[] = {
- &dev_attr_tt_toggle.attr,
- NULL
-};
-
-static struct attribute_group oxp_ec_attribute_group = {
- .is_visible = tt_toggle_is_visible,
- .attrs = oxp_ec_attrs,
-};
-
-static const struct attribute_group *oxp_ec_groups[] = {
- &oxp_ec_attribute_group,
- NULL
-};
-
-static const struct hwmon_ops oxp_ec_hwmon_ops = {
- .is_visible = oxp_ec_hwmon_is_visible,
- .read = oxp_platform_read,
- .write = oxp_platform_write,
-};
-
-static const struct hwmon_chip_info oxp_ec_chip_info = {
- .ops = &oxp_ec_hwmon_ops,
- .info = oxp_platform_sensors,
-};
-
-/* Initialization logic */
-static int oxp_platform_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device *hwdev;
-
- hwdev = devm_hwmon_device_register_with_info(dev, "oxpec", NULL,
- &oxp_ec_chip_info, NULL);
-
- return PTR_ERR_OR_ZERO(hwdev);
-}
-
-static struct platform_driver oxp_platform_driver = {
- .driver = {
- .name = "oxp-platform",
- .dev_groups = oxp_ec_groups,
- },
- .probe = oxp_platform_probe,
-};
-
-static struct platform_device *oxp_platform_device;
-
-static int __init oxp_platform_init(void)
-{
- const struct dmi_system_id *dmi_entry;
-
- dmi_entry = dmi_first_match(dmi_table);
- if (!dmi_entry)
- return -ENODEV;
-
- board = (enum oxp_board)(unsigned long)dmi_entry->driver_data;
-
- /*
- * Have to check for AMD processor here because DMI strings are the same
- * between Intel and AMD boards on older OneXPlayer devices, the only way
- * to tell them apart is the CPU. Old Intel boards have an unsupported EC.
- */
- if (board == oxp_mini_amd && boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
- return -ENODEV;
-
- oxp_platform_device =
- platform_create_bundle(&oxp_platform_driver,
- oxp_platform_probe, NULL, 0, NULL, 0);
-
- return PTR_ERR_OR_ZERO(oxp_platform_device);
-}
-
-static void __exit oxp_platform_exit(void)
-{
- platform_device_unregister(oxp_platform_device);
- platform_driver_unregister(&oxp_platform_driver);
-}
-
-MODULE_DEVICE_TABLE(dmi, dmi_table);
-
-module_init(oxp_platform_init);
-module_exit(oxp_platform_exit);
-
-MODULE_AUTHOR("Joaquín Ignacio Aramendía <samsagax@gmail.com>");
-MODULE_DESCRIPTION("Platform driver that handles EC sensors of OneXPlayer devices");
-MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index c9b3c3149982..441f984a859d 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -218,6 +218,24 @@ config SENSORS_LM25066_REGULATOR
If you say yes here you get regulator support for National
Semiconductor LM25066, LM5064, and LM5066.
+config SENSORS_LT3074
+ tristate "Analog Devices LT3074"
+ help
+ If you say yes here you get hardware monitoring support for Analog
+ Devices LT3074.
+
+ This driver can also be built as a module. If so, the module will
+ be called lt3074.
+
+config SENSORS_LT3074_REGULATOR
+ tristate "Regulator support for LT3074"
+ depends on SENSORS_LT3074 && REGULATOR
+ help
+ If you say yes here you get regulator support for Analog Devices
+ LT3074. The LT3074 is a low voltage, ultralow noise, high PSRR,
+ dropout linear regulator. The device supplies up to 3A with a
+ typical dropout voltage of 45mV.
+
config SENSORS_LT7182S
tristate "Analog Devices LT7182S"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 56f128c4653e..29cd8a3317d2 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_SENSORS_IR38064) += ir38064.o
obj-$(CONFIG_SENSORS_IRPS5401) += irps5401.o
obj-$(CONFIG_SENSORS_ISL68137) += isl68137.o
obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
+obj-$(CONFIG_SENSORS_LT3074) += lt3074.o
obj-$(CONFIG_SENSORS_LT7182S) += lt7182s.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 40b0dda32ea6..dd7275a67a0a 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -437,7 +437,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
#if IS_ENABLED(CONFIG_SENSORS_LM25066_REGULATOR)
static const struct regulator_desc lm25066_reg_desc[] = {
- PMBUS_REGULATOR_ONE("vout"),
+ PMBUS_REGULATOR_ONE_NODE("vout"),
};
#endif
diff --git a/drivers/hwmon/pmbus/lt3074.c b/drivers/hwmon/pmbus/lt3074.c
new file mode 100644
index 000000000000..3704dbe7b54a
--- /dev/null
+++ b/drivers/hwmon/pmbus/lt3074.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hardware monitoring driver for Analog Devices LT3074
+ *
+ * Copyright (C) 2025 Analog Devices, Inc.
+ */
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include "pmbus.h"
+
+#define LT3074_MFR_READ_VBIAS 0xc6
+#define LT3074_MFR_BIAS_OV_WARN_LIMIT 0xc7
+#define LT3074_MFR_BIAS_UV_WARN_LIMIT 0xc8
+#define LT3074_MFR_SPECIAL_ID 0xe7
+
+#define LT3074_SPECIAL_ID_VALUE 0x1c1d
+
+static const struct regulator_desc __maybe_unused lt3074_reg_desc[] = {
+ PMBUS_REGULATOR_ONE("regulator"),
+};
+
+static int lt3074_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ switch (reg) {
+ case PMBUS_VIRT_READ_VMON:
+ return pmbus_read_word_data(client, page, phase,
+ LT3074_MFR_READ_VBIAS);
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ return pmbus_read_word_data(client, page, phase,
+ LT3074_MFR_BIAS_UV_WARN_LIMIT);
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ return pmbus_read_word_data(client, page, phase,
+ LT3074_MFR_BIAS_OV_WARN_LIMIT);
+ default:
+ return -ENODATA;
+ }
+}
+
+static int lt3074_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ switch (reg) {
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ return pmbus_write_word_data(client, 0,
+ LT3074_MFR_BIAS_UV_WARN_LIMIT,
+ word);
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ return pmbus_write_word_data(client, 0,
+ LT3074_MFR_BIAS_OV_WARN_LIMIT,
+ word);
+ default:
+ return -ENODATA;
+ }
+}
+
+static struct pmbus_driver_info lt3074_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_VMON |
+ PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+ .read_word_data = lt3074_read_word_data,
+ .write_word_data = lt3074_write_word_data,
+#if IS_ENABLED(CONFIG_SENSORS_LT3074_REGULATOR)
+ .num_regulators = 1,
+ .reg_desc = lt3074_reg_desc,
+#endif
+};
+
+static int lt3074_probe(struct i2c_client *client)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_word_data(client, LT3074_MFR_SPECIAL_ID);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to read ID\n");
+
+ if (ret != LT3074_SPECIAL_ID_VALUE)
+ return dev_err_probe(dev, -ENODEV, "ID mismatch\n");
+
+ return pmbus_do_probe(client, &lt3074_info);
+}
+
+static const struct i2c_device_id lt3074_id[] = {
+ { "lt3074", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lt3074_id);
+
+static const struct of_device_id __maybe_unused lt3074_of_match[] = {
+ { .compatible = "adi,lt3074" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lt3074_of_match);
+
+static struct i2c_driver lt3074_driver = {
+ .driver = {
+ .name = "lt3074",
+ .of_match_table = of_match_ptr(lt3074_of_match),
+ },
+ .probe = lt3074_probe,
+ .id_table = lt3074_id,
+};
+module_i2c_driver(lt3074_driver);
+
+MODULE_AUTHOR("Cedric Encarnacion <cedricjustine.encarnacion@analog.com>");
+MODULE_DESCRIPTION("PMBus driver for Analog Devices LT3074");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index c9dda33831ff..56834d26f8ef 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -12,9 +12,26 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/delay.h>
#include "pmbus.h"
-enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
+enum chips {
+ adpm12160,
+ max34440,
+ max34441,
+ max34446,
+ max34451,
+ max34460,
+ max34461,
+};
+
+/*
+ * Firmware is sometimes not ready if we try and read the
+ * data from the page immediately after setting. Maxim
+ * recommends 50us delay due to the chip failing to clock
+ * stretch long enough here.
+ */
+#define MAX34440_PAGE_CHANGE_DELAY 50
#define MAX34440_MFR_VOUT_PEAK 0xd4
#define MAX34440_MFR_IOUT_PEAK 0xd5
@@ -34,16 +51,21 @@ enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
/*
* The whole max344* family have IOUT_OC_WARN_LIMIT and IOUT_OC_FAULT_LIMIT
* swapped from the standard pmbus spec addresses.
+ * For max34451, version MAX34451ETNA6+ and later has this issue fixed.
*/
#define MAX34440_IOUT_OC_WARN_LIMIT 0x46
#define MAX34440_IOUT_OC_FAULT_LIMIT 0x4A
+#define MAX34451ETNA6_MFR_REV 0x0012
+
#define MAX34451_MFR_CHANNEL_CONFIG 0xe4
#define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f
struct max34440_data {
int id;
struct pmbus_driver_info info;
+ u8 iout_oc_warn_limit;
+ u8 iout_oc_fault_limit;
};
#define to_max34440_data(x) container_of(x, struct max34440_data, info)
@@ -60,11 +82,11 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_IOUT_OC_FAULT_LIMIT:
ret = pmbus_read_word_data(client, page, phase,
- MAX34440_IOUT_OC_FAULT_LIMIT);
+ data->iout_oc_fault_limit);
break;
case PMBUS_IOUT_OC_WARN_LIMIT:
ret = pmbus_read_word_data(client, page, phase,
- MAX34440_IOUT_OC_WARN_LIMIT);
+ data->iout_oc_warn_limit);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
ret = pmbus_read_word_data(client, page, phase,
@@ -75,7 +97,8 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
MAX34440_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_AVG:
- if (data->id != max34446 && data->id != max34451)
+ if (data->id != max34446 && data->id != max34451 &&
+ data->id != adpm12160)
return -ENXIO;
ret = pmbus_read_word_data(client, page, phase,
MAX34446_MFR_IOUT_AVG);
@@ -133,11 +156,11 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_IOUT_OC_FAULT_LIMIT:
- ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_FAULT_LIMIT,
+ ret = pmbus_write_word_data(client, page, data->iout_oc_fault_limit,
word);
break;
case PMBUS_IOUT_OC_WARN_LIMIT:
- ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_WARN_LIMIT,
+ ret = pmbus_write_word_data(client, page, data->iout_oc_warn_limit,
word);
break;
case PMBUS_VIRT_RESET_POUT_HISTORY:
@@ -159,7 +182,8 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
case PMBUS_VIRT_RESET_IOUT_HISTORY:
ret = pmbus_write_word_data(client, page,
MAX34440_MFR_IOUT_PEAK, 0);
- if (!ret && (data->id == max34446 || data->id == max34451))
+ if (!ret && (data->id == max34446 || data->id == max34451 ||
+ data->id == adpm12160))
ret = pmbus_write_word_data(client, page,
MAX34446_MFR_IOUT_AVG, 0);
@@ -235,9 +259,29 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
*/
int page, rv;
+ bool max34451_na6 = false;
+
+ rv = i2c_smbus_read_word_data(client, PMBUS_MFR_REVISION);
+ if (rv < 0)
+ return rv;
+
+ if (rv >= MAX34451ETNA6_MFR_REV) {
+ max34451_na6 = true;
+ data->info.format[PSC_VOLTAGE_IN] = direct;
+ data->info.format[PSC_CURRENT_IN] = direct;
+ data->info.m[PSC_VOLTAGE_IN] = 1;
+ data->info.b[PSC_VOLTAGE_IN] = 0;
+ data->info.R[PSC_VOLTAGE_IN] = 3;
+ data->info.m[PSC_CURRENT_IN] = 1;
+ data->info.b[PSC_CURRENT_IN] = 0;
+ data->info.R[PSC_CURRENT_IN] = 2;
+ data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT;
+ data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT;
+ }
for (page = 0; page < 16; page++) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ fsleep(MAX34440_PAGE_CHANGE_DELAY);
if (rv < 0)
return rv;
@@ -251,16 +295,30 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
case 0x20:
data->info.func[page] = PMBUS_HAVE_VOUT |
PMBUS_HAVE_STATUS_VOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_VIN |
+ PMBUS_HAVE_STATUS_INPUT;
break;
case 0x21:
data->info.func[page] = PMBUS_HAVE_VOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_VIN;
break;
case 0x22:
data->info.func[page] = PMBUS_HAVE_IOUT |
PMBUS_HAVE_STATUS_IOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_IIN |
+ PMBUS_HAVE_STATUS_INPUT;
break;
case 0x23:
data->info.func[page] = PMBUS_HAVE_IOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_IIN;
break;
default:
break;
@@ -271,6 +329,41 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
}
static struct pmbus_driver_info max34440_info[] = {
+ [adpm12160] = {
+ .pages = 19,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .m[PSC_CURRENT_IN] = 1,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = 2,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 2,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ /* absent func below [18] are not for monitoring */
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[4] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[5] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[6] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[7] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[8] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[9] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[10] = PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
[max34440] = {
.pages = 14,
.format[PSC_VOLTAGE_IN] = direct,
@@ -312,6 +405,7 @@ static struct pmbus_driver_info max34440_info[] = {
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34441] = {
.pages = 12,
@@ -355,6 +449,7 @@ static struct pmbus_driver_info max34440_info[] = {
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34446] = {
.pages = 7,
@@ -392,6 +487,7 @@ static struct pmbus_driver_info max34440_info[] = {
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34451] = {
.pages = 21,
@@ -415,6 +511,7 @@ static struct pmbus_driver_info max34440_info[] = {
.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34460] = {
.pages = 18,
@@ -445,6 +542,7 @@ static struct pmbus_driver_info max34440_info[] = {
.func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34461] = {
.pages = 23,
@@ -480,6 +578,7 @@ static struct pmbus_driver_info max34440_info[] = {
.func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
};
@@ -494,17 +593,23 @@ static int max34440_probe(struct i2c_client *client)
return -ENOMEM;
data->id = i2c_match_id(max34440_id, client)->driver_data;
data->info = max34440_info[data->id];
+ data->iout_oc_fault_limit = MAX34440_IOUT_OC_FAULT_LIMIT;
+ data->iout_oc_warn_limit = MAX34440_IOUT_OC_WARN_LIMIT;
if (data->id == max34451) {
rv = max34451_set_supported_funcs(client, data);
if (rv)
return rv;
+ } else if (data->id == adpm12160) {
+ data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT;
+ data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT;
}
return pmbus_do_probe(client, &data->info);
}
static const struct i2c_device_id max34440_id[] = {
+ {"adpm12160", adpm12160},
{"max34440", max34440},
{"max34441", max34441},
{"max34446", max34446},
diff --git a/drivers/hwmon/pmbus/mpq7932.c b/drivers/hwmon/pmbus/mpq7932.c
index c1e2d0cb2fd0..8f10e37a7a76 100644
--- a/drivers/hwmon/pmbus/mpq7932.c
+++ b/drivers/hwmon/pmbus/mpq7932.c
@@ -51,8 +51,8 @@ static const struct regulator_desc mpq7932_regulators_desc[] = {
};
static const struct regulator_desc mpq7932_regulators_desc_one[] = {
- PMBUS_REGULATOR_STEP_ONE("buck", MPQ7932_N_VOLTAGES,
- MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
+ PMBUS_REGULATOR_STEP_ONE_NODE("buck", MPQ7932_N_VOLTAGES,
+ MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
};
#endif
diff --git a/drivers/hwmon/pmbus/mpq8785.c b/drivers/hwmon/pmbus/mpq8785.c
index 331c274ca892..1f56aaf4dde8 100644
--- a/drivers/hwmon/pmbus/mpq8785.c
+++ b/drivers/hwmon/pmbus/mpq8785.c
@@ -4,10 +4,23 @@
*/
#include <linux/i2c.h>
+#include <linux/bitops.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/of_device.h>
#include "pmbus.h"
+#define MPM82504_READ_TEMPERATURE_1_SIGN_POS 9
+
+enum chips { mpm3695, mpm3695_25, mpm82504, mpq8785 };
+
+static u16 voltage_scale_loop_max_val[] = {
+ [mpm3695] = GENMASK(9, 0),
+ [mpm3695_25] = GENMASK(11, 0),
+ [mpm82504] = GENMASK(9, 0),
+ [mpq8785] = GENMASK(10, 0),
+};
+
static int mpq8785_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
@@ -34,6 +47,20 @@ static int mpq8785_identify(struct i2c_client *client,
return 0;
};
+static int mpm82504_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ int ret;
+
+ ret = pmbus_read_word_data(client, page, phase, reg);
+
+ if (ret < 0 || reg != PMBUS_READ_TEMPERATURE_1)
+ return ret;
+
+ /* Fix PMBUS_READ_TEMPERATURE_1 signedness */
+ return sign_extend32(ret, MPM82504_READ_TEMPERATURE_1_SIGN_POS) & 0xffff;
+}
+
static struct pmbus_driver_info mpq8785_info = {
.pages = 1,
.format[PSC_VOLTAGE_IN] = direct,
@@ -53,26 +80,74 @@ static struct pmbus_driver_info mpq8785_info = {
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
- .identify = mpq8785_identify,
-};
-
-static int mpq8785_probe(struct i2c_client *client)
-{
- return pmbus_do_probe(client, &mpq8785_info);
};
static const struct i2c_device_id mpq8785_id[] = {
- { "mpq8785" },
+ { "mpm3695", mpm3695 },
+ { "mpm3695-25", mpm3695_25 },
+ { "mpm82504", mpm82504 },
+ { "mpq8785", mpq8785 },
{ },
};
MODULE_DEVICE_TABLE(i2c, mpq8785_id);
static const struct of_device_id __maybe_unused mpq8785_of_match[] = {
- { .compatible = "mps,mpq8785" },
+ { .compatible = "mps,mpm3695", .data = (void *)mpm3695 },
+ { .compatible = "mps,mpm3695-25", .data = (void *)mpm3695_25 },
+ { .compatible = "mps,mpm82504", .data = (void *)mpm82504 },
+ { .compatible = "mps,mpq8785", .data = (void *)mpq8785 },
{}
};
MODULE_DEVICE_TABLE(of, mpq8785_of_match);
+static int mpq8785_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct pmbus_driver_info *info;
+ enum chips chip_id;
+ u32 voltage_scale;
+ int ret;
+
+ info = devm_kmemdup(dev, &mpq8785_info, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if (dev->of_node)
+ chip_id = (kernel_ulong_t)of_device_get_match_data(dev);
+ else
+ chip_id = (kernel_ulong_t)i2c_get_match_data(client);
+
+ switch (chip_id) {
+ case mpm3695:
+ case mpm3695_25:
+ case mpm82504:
+ info->format[PSC_VOLTAGE_OUT] = direct;
+ info->m[PSC_VOLTAGE_OUT] = 8;
+ info->b[PSC_VOLTAGE_OUT] = 0;
+ info->R[PSC_VOLTAGE_OUT] = 2;
+ info->read_word_data = mpm82504_read_word_data;
+ break;
+ case mpq8785:
+ info->identify = mpq8785_identify;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (!device_property_read_u32(dev, "mps,vout-fb-divider-ratio-permille",
+ &voltage_scale)) {
+ if (voltage_scale > voltage_scale_loop_max_val[chip_id])
+ return -EINVAL;
+
+ ret = i2c_smbus_write_word_data(client, PMBUS_VOUT_SCALE_LOOP,
+ voltage_scale);
+ if (ret)
+ return ret;
+ }
+
+ return pmbus_do_probe(client, info);
+};
+
static struct i2c_driver mpq8785_driver = {
.driver = {
.name = "mpq8785",
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index ddb19c9726d6..d2e9bfb5320f 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -482,6 +482,7 @@ struct pmbus_driver_info {
*/
int access_delay; /* in microseconds */
int write_delay; /* in microseconds */
+ int page_change_delay; /* in microseconds */
};
/* Regulator ops */
@@ -508,11 +509,11 @@ int pmbus_regulator_init_cb(struct regulator_dev *rdev,
#define PMBUS_REGULATOR(_name, _id) PMBUS_REGULATOR_STEP(_name, _id, 0, 0, 0)
-#define PMBUS_REGULATOR_STEP_ONE(_name, _voltages, _step, _min_uV) \
+#define __PMBUS_REGULATOR_STEP_ONE(_name, _node, _voltages, _step, _min_uV) \
{ \
.name = (_name), \
.of_match = of_match_ptr(_name), \
- .regulators_node = of_match_ptr("regulators"), \
+ .regulators_node = of_match_ptr(_node), \
.ops = &pmbus_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
@@ -522,7 +523,19 @@ int pmbus_regulator_init_cb(struct regulator_dev *rdev,
.init_cb = pmbus_regulator_init_cb, \
}
-#define PMBUS_REGULATOR_ONE(_name) PMBUS_REGULATOR_STEP_ONE(_name, 0, 0, 0)
+/*
+ * _NODE macros are defined for historic reasons and MUST NOT be used in new
+ * drivers.
+ */
+#define PMBUS_REGULATOR_STEP_ONE_NODE(_name, _voltages, _step, _min_uV) \
+ __PMBUS_REGULATOR_STEP_ONE(_name, "regulators", _voltages, _step, _min_uV)
+
+#define PMBUS_REGULATOR_ONE_NODE(_name) PMBUS_REGULATOR_STEP_ONE_NODE(_name, 0, 0, 0)
+
+#define PMBUS_REGULATOR_STEP_ONE(_name, _voltages, _step, _min_uV) \
+ __PMBUS_REGULATOR_STEP_ONE(_name, NULL, _voltages, _step, _min_uV)
+
+#define PMBUS_REGULATOR_ONE(_name) PMBUS_REGULATOR_STEP_ONE(_name, 0, 0, 0)
/* Function declarations */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index cfeba2e4c5c3..be6d05def115 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -32,6 +32,13 @@
#define PMBUS_ATTR_ALLOC_SIZE 32
#define PMBUS_NAME_SIZE 24
+/*
+ * The type of operation used for picking the delay between
+ * successive pmbus operations.
+ */
+#define PMBUS_OP_WRITE BIT(0)
+#define PMBUS_OP_PAGE_CHANGE BIT(1)
+
static int wp = -1;
module_param(wp, int, 0444);
@@ -113,8 +120,8 @@ struct pmbus_data {
int vout_low[PMBUS_PAGES]; /* voltage low margin */
int vout_high[PMBUS_PAGES]; /* voltage high margin */
- ktime_t write_time; /* Last SMBUS write timestamp */
- ktime_t access_time; /* Last SMBUS access timestamp */
+
+ ktime_t next_access_backoff; /* Wait until at least this time */
};
struct pmbus_debugfs_entry {
@@ -169,32 +176,26 @@ EXPORT_SYMBOL_NS_GPL(pmbus_set_update, "PMBUS");
static void pmbus_wait(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
- const struct pmbus_driver_info *info = data->info;
- s64 delta;
+ s64 delay = ktime_us_delta(data->next_access_backoff, ktime_get());
- if (info->access_delay) {
- delta = ktime_us_delta(ktime_get(), data->access_time);
-
- if (delta < info->access_delay)
- fsleep(info->access_delay - delta);
- } else if (info->write_delay) {
- delta = ktime_us_delta(ktime_get(), data->write_time);
-
- if (delta < info->write_delay)
- fsleep(info->write_delay - delta);
- }
+ if (delay > 0)
+ fsleep(delay);
}
-/* Sets the last accessed timestamp for pmbus_wait */
-static void pmbus_update_ts(struct i2c_client *client, bool write_op)
+/* Sets the last operation timestamp for pmbus_wait */
+static void pmbus_update_ts(struct i2c_client *client, int op)
{
struct pmbus_data *data = i2c_get_clientdata(client);
const struct pmbus_driver_info *info = data->info;
+ int delay = info->access_delay;
+
+ if (op & PMBUS_OP_WRITE)
+ delay = max(delay, info->write_delay);
+ if (op & PMBUS_OP_PAGE_CHANGE)
+ delay = max(delay, info->page_change_delay);
- if (info->access_delay)
- data->access_time = ktime_get();
- else if (info->write_delay && write_op)
- data->write_time = ktime_get();
+ if (delay > 0)
+ data->next_access_backoff = ktime_add_us(ktime_get(), delay);
}
int pmbus_set_page(struct i2c_client *client, int page, int phase)
@@ -209,13 +210,13 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase)
data->info->pages > 1 && page != data->currpage) {
pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE | PMBUS_OP_PAGE_CHANGE);
if (rv < 0)
return rv;
pmbus_wait(client);
rv = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (rv < 0)
return rv;
@@ -229,7 +230,7 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase)
pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, PMBUS_PHASE,
phase);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
if (rv)
return rv;
}
@@ -249,7 +250,7 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
pmbus_wait(client);
rv = i2c_smbus_write_byte(client, value);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
return rv;
}
@@ -284,7 +285,7 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
pmbus_wait(client);
rv = i2c_smbus_write_word_data(client, reg, word);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
return rv;
}
@@ -405,7 +406,7 @@ int pmbus_read_word_data(struct i2c_client *client, int page, int phase, u8 reg)
pmbus_wait(client);
rv = i2c_smbus_read_word_data(client, reg);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
return rv;
}
@@ -468,7 +469,7 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
pmbus_wait(client);
rv = i2c_smbus_read_byte_data(client, reg);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
return rv;
}
@@ -484,7 +485,7 @@ int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, u8 value)
pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, reg, value);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
return rv;
}
@@ -520,7 +521,7 @@ static int pmbus_read_block_data(struct i2c_client *client, int page, u8 reg,
pmbus_wait(client);
rv = i2c_smbus_read_block_data(client, reg, data_buf);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
return rv;
}
@@ -2524,7 +2525,7 @@ static int pmbus_read_coefficients(struct i2c_client *client,
rv = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
I2C_SMBUS_WRITE, PMBUS_COEFFICIENTS,
I2C_SMBUS_BLOCK_PROC_CALL, &data);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
if (rv < 0)
return rv;
@@ -2728,7 +2729,7 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
if (!(data->flags & PMBUS_NO_CAPABILITY)) {
pmbus_wait(client);
ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) {
if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC))
@@ -2744,13 +2745,13 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
data->read_status = pmbus_read_status_word;
pmbus_wait(client);
ret = i2c_smbus_read_word_data(client, PMBUS_STATUS_WORD);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (ret < 0 || ret == 0xffff) {
data->read_status = pmbus_read_status_byte;
pmbus_wait(client);
ret = i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (ret < 0 || ret == 0xff) {
dev_err(dev, "PMBus status register not found\n");
diff --git a/drivers/hwmon/pmbus/tda38640.c b/drivers/hwmon/pmbus/tda38640.c
index 07fe58c24485..d902d39f49f4 100644
--- a/drivers/hwmon/pmbus/tda38640.c
+++ b/drivers/hwmon/pmbus/tda38640.c
@@ -15,7 +15,7 @@
#include "pmbus.h"
static const struct regulator_desc __maybe_unused tda38640_reg_desc[] = {
- PMBUS_REGULATOR_ONE("vout"),
+ PMBUS_REGULATOR_ONE_NODE("vout"),
};
struct tda38640_data {
diff --git a/drivers/hwmon/pmbus/tps25990.c b/drivers/hwmon/pmbus/tps25990.c
index 0d2655e69549..c13edd7e1abf 100644
--- a/drivers/hwmon/pmbus/tps25990.c
+++ b/drivers/hwmon/pmbus/tps25990.c
@@ -333,7 +333,7 @@ static int tps25990_write_byte_data(struct i2c_client *client,
#if IS_ENABLED(CONFIG_SENSORS_TPS25990_REGULATOR)
static const struct regulator_desc tps25990_reg_desc[] = {
- PMBUS_REGULATOR_ONE("vout"),
+ PMBUS_REGULATOR_ONE_NODE("vout"),
};
#endif
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 9b0eadc81a2e..2bc8cccb01fd 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -212,8 +212,8 @@ static int ucd9000_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(ret & UCD9000_GPIO_CONFIG_STATUS);
}
-static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct i2c_client *client = gpiochip_get_data(gc);
int ret;
@@ -222,17 +222,17 @@ static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
if (ret < 0) {
dev_dbg(&client->dev, "failed to read GPIO %d config: %d\n",
offset, ret);
- return;
+ return ret;
}
if (value) {
if (ret & UCD9000_GPIO_CONFIG_STATUS)
- return;
+ return 0;
ret |= UCD9000_GPIO_CONFIG_STATUS;
} else {
if (!(ret & UCD9000_GPIO_CONFIG_STATUS))
- return;
+ return 0;
ret &= ~UCD9000_GPIO_CONFIG_STATUS;
}
@@ -244,7 +244,7 @@ static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
if (ret < 0) {
dev_dbg(&client->dev, "Failed to write GPIO %d config: %d\n",
offset, ret);
- return;
+ return ret;
}
ret &= ~UCD9000_GPIO_CONFIG_ENABLE;
@@ -253,6 +253,8 @@ static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
if (ret < 0)
dev_dbg(&client->dev, "Failed to write GPIO %d config: %d\n",
offset, ret);
+
+ return ret;
}
static int ucd9000_gpio_get_direction(struct gpio_chip *gc,
@@ -362,7 +364,7 @@ static void ucd9000_probe_gpio(struct i2c_client *client,
data->gpio.direction_input = ucd9000_gpio_direction_input;
data->gpio.direction_output = ucd9000_gpio_direction_output;
data->gpio.get = ucd9000_gpio_get;
- data->gpio.set = ucd9000_gpio_set;
+ data->gpio.set_rv = ucd9000_gpio_set;
data->gpio.can_sleep = true;
data->gpio.base = -1;
data->gpio.parent = &client->dev;
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index d506a5e7e033..2df294793f6e 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -620,8 +620,8 @@ static int pwm_fan_probe(struct platform_device *pdev)
if (tach->irq == -EPROBE_DEFER)
return tach->irq;
if (tach->irq > 0) {
- ret = devm_request_irq(dev, tach->irq, pulse_handler, 0,
- pdev->name, tach);
+ ret = devm_request_irq(dev, tach->irq, pulse_handler,
+ IRQF_NO_THREAD, pdev->name, tach);
if (ret) {
dev_err(dev,
"Failed to request interrupt: %d\n",
diff --git a/drivers/hwmon/qnap-mcu-hwmon.c b/drivers/hwmon/qnap-mcu-hwmon.c
index 29057514739c..e86e64c4d391 100644
--- a/drivers/hwmon/qnap-mcu-hwmon.c
+++ b/drivers/hwmon/qnap-mcu-hwmon.c
@@ -6,7 +6,6 @@
* Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
*/
-#include <linux/fwnode.h>
#include <linux/hwmon.h>
#include <linux/mfd/qnap-mcu.h>
#include <linux/module.h>
diff --git a/drivers/hwmon/spd5118.c b/drivers/hwmon/spd5118.c
index 358152868d96..5da44571b6a0 100644
--- a/drivers/hwmon/spd5118.c
+++ b/drivers/hwmon/spd5118.c
@@ -66,6 +66,9 @@ static const unsigned short normal_i2c[] = {
#define SPD5118_EEPROM_BASE 0x80
#define SPD5118_EEPROM_SIZE (SPD5118_PAGE_SIZE * SPD5118_NUM_PAGES)
+#define PAGE_ADDR0(page) (((page) & BIT(0)) << 6)
+#define PAGE_ADDR1_4(page) (((page) & GENMASK(4, 1)) >> 1)
+
/* Temperature unit in millicelsius */
#define SPD5118_TEMP_UNIT (MILLIDEGREE_PER_DEGREE / 4)
/* Representable temperature range in millicelsius */
@@ -75,6 +78,7 @@ static const unsigned short normal_i2c[] = {
struct spd5118_data {
struct regmap *regmap;
struct mutex nvmem_lock;
+ bool is_16bit;
};
/* hwmon */
@@ -305,51 +309,6 @@ static bool spd5118_vendor_valid(u8 bank, u8 id)
return id && id != 0x7f;
}
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int spd5118_detect(struct i2c_client *client, struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = client->adapter;
- int regval;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
-
- regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
- if (regval != 0x5118)
- return -ENODEV;
-
- regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
- if (regval < 0 || !spd5118_vendor_valid(regval & 0xff, regval >> 8))
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_CAPABILITY);
- if (regval < 0)
- return -ENODEV;
- if (!(regval & SPD5118_CAP_TS_SUPPORT) || (regval & 0xfc))
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CLR);
- if (regval)
- return -ENODEV;
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_ERROR_CLR);
- if (regval)
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_REVISION);
- if (regval < 0 || (regval & 0xc1))
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CONFIG);
- if (regval < 0)
- return -ENODEV;
- if (regval & ~SPD5118_TS_DISABLE)
- return -ENODEV;
-
- strscpy(info->type, "spd5118", I2C_NAME_SIZE);
- return 0;
-}
-
static const struct hwmon_channel_info *spd5118_info[] = {
HWMON_CHANNEL_INFO(chip,
HWMON_C_REGISTER_TZ),
@@ -376,11 +335,12 @@ static const struct hwmon_chip_info spd5118_chip_info = {
/* nvmem */
-static ssize_t spd5118_nvmem_read_page(struct regmap *regmap, char *buf,
+static ssize_t spd5118_nvmem_read_page(struct spd5118_data *data, char *buf,
unsigned int offset, size_t count)
{
- int addr = (offset >> SPD5118_PAGE_SHIFT) * 0x100 + SPD5118_EEPROM_BASE;
- int err;
+ int page = offset >> SPD5118_PAGE_SHIFT;
+ struct regmap *regmap = data->regmap;
+ int err, addr;
offset &= SPD5118_PAGE_MASK;
@@ -388,6 +348,12 @@ static ssize_t spd5118_nvmem_read_page(struct regmap *regmap, char *buf,
if (offset + count > SPD5118_PAGE_SIZE)
count = SPD5118_PAGE_SIZE - offset;
+ if (data->is_16bit) {
+ addr = SPD5118_EEPROM_BASE | PAGE_ADDR0(page) |
+ (PAGE_ADDR1_4(page) << 8);
+ } else {
+ addr = page * 0x100 + SPD5118_EEPROM_BASE;
+ }
err = regmap_bulk_read(regmap, addr + offset, buf, count);
if (err)
return err;
@@ -410,7 +376,7 @@ static int spd5118_nvmem_read(void *priv, unsigned int off, void *val, size_t co
mutex_lock(&data->nvmem_lock);
while (count) {
- ret = spd5118_nvmem_read_page(data->regmap, buf, off, count);
+ ret = spd5118_nvmem_read_page(data, buf, off, count);
if (ret < 0) {
mutex_unlock(&data->nvmem_lock);
return ret;
@@ -483,7 +449,7 @@ static bool spd5118_volatile_reg(struct device *dev, unsigned int reg)
}
}
-static const struct regmap_range_cfg spd5118_regmap_range_cfg[] = {
+static const struct regmap_range_cfg spd5118_i2c_regmap_range_cfg[] = {
{
.selector_reg = SPD5118_REG_I2C_LEGACY_MODE,
.selector_mask = SPD5118_LEGACY_PAGE_MASK,
@@ -495,7 +461,7 @@ static const struct regmap_range_cfg spd5118_regmap_range_cfg[] = {
},
};
-static const struct regmap_config spd5118_regmap_config = {
+static const struct regmap_config spd5118_regmap8_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x7ff,
@@ -503,89 +469,76 @@ static const struct regmap_config spd5118_regmap_config = {
.volatile_reg = spd5118_volatile_reg,
.cache_type = REGCACHE_MAPLE,
- .ranges = spd5118_regmap_range_cfg,
- .num_ranges = ARRAY_SIZE(spd5118_regmap_range_cfg),
+ .ranges = spd5118_i2c_regmap_range_cfg,
+ .num_ranges = ARRAY_SIZE(spd5118_i2c_regmap_range_cfg),
};
-static int spd5118_init(struct i2c_client *client)
-{
- struct i2c_adapter *adapter = client->adapter;
- int err, regval, mode;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+static const struct regmap_config spd5118_regmap16_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0x7ff,
+ .writeable_reg = spd5118_writeable_reg,
+ .volatile_reg = spd5118_volatile_reg,
+ .cache_type = REGCACHE_MAPLE,
+};
- regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
- if (regval < 0 || (regval && regval != 0x5118))
- return -ENODEV;
+static int spd5118_suspend(struct device *dev)
+{
+ struct spd5118_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ u32 regval;
+ int err;
/*
- * If the device type registers return 0, it is possible that the chip
- * has a non-zero page selected and takes the specification literally,
- * i.e. disables access to volatile registers besides the page register
- * if the page is not 0. Try to identify such chips.
+ * Make sure the configuration register in the regmap cache is current
+ * before bypassing it.
*/
- if (!regval) {
- /* Vendor ID registers must also be 0 */
- regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
- if (regval)
- return -ENODEV;
-
- /* The selected page in MR11 must not be 0 */
- mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE);
- if (mode < 0 || (mode & ~SPD5118_LEGACY_MODE_MASK) ||
- !(mode & SPD5118_LEGACY_PAGE_MASK))
- return -ENODEV;
+ err = regmap_read(regmap, SPD5118_REG_TEMP_CONFIG, &regval);
+ if (err < 0)
+ return err;
- err = i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE,
- mode & SPD5118_LEGACY_MODE_ADDR);
- if (err)
- return -ENODEV;
+ regcache_cache_bypass(regmap, true);
+ regmap_update_bits(regmap, SPD5118_REG_TEMP_CONFIG, SPD5118_TS_DISABLE,
+ SPD5118_TS_DISABLE);
+ regcache_cache_bypass(regmap, false);
- /*
- * If the device type registers are still bad after selecting
- * page 0, this is not a SPD5118 device. Restore original
- * legacy mode register value and abort.
- */
- regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
- if (regval != 0x5118) {
- i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE, mode);
- return -ENODEV;
- }
- }
+ regcache_cache_only(regmap, true);
+ regcache_mark_dirty(regmap);
- /* We are reasonably sure that this is really a SPD5118 hub controller */
return 0;
}
-static int spd5118_probe(struct i2c_client *client)
+static int spd5118_resume(struct device *dev)
{
- struct device *dev = &client->dev;
- unsigned int regval, revision, vendor, bank;
+ struct spd5118_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+
+ regcache_cache_only(regmap, false);
+ return regcache_sync(regmap);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(spd5118_pm_ops, spd5118_suspend, spd5118_resume);
+
+static int spd5118_common_probe(struct device *dev, struct regmap *regmap,
+ bool is_16bit)
+{
+ unsigned int capability, revision, vendor, bank;
struct spd5118_data *data;
struct device *hwmon_dev;
- struct regmap *regmap;
int err;
- err = spd5118_init(client);
- if (err)
- return err;
-
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- regmap = devm_regmap_init_i2c(client, &spd5118_regmap_config);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap), "regmap init failed\n");
-
- err = regmap_read(regmap, SPD5118_REG_CAPABILITY, &regval);
+ err = regmap_read(regmap, SPD5118_REG_CAPABILITY, &capability);
if (err)
return err;
- if (!(regval & SPD5118_CAP_TS_SUPPORT))
+ if (!(capability & SPD5118_CAP_TS_SUPPORT))
return -ENODEV;
+ data->is_16bit = is_16bit;
+
err = regmap_read(regmap, SPD5118_REG_REVISION, &revision);
if (err)
return err;
@@ -627,48 +580,176 @@ static int spd5118_probe(struct i2c_client *client)
return 0;
}
-static int spd5118_suspend(struct device *dev)
+/* I2C */
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int spd5118_detect(struct i2c_client *client, struct i2c_board_info *info)
{
- struct spd5118_data *data = dev_get_drvdata(dev);
- struct regmap *regmap = data->regmap;
- u32 regval;
- int err;
+ struct i2c_adapter *adapter = client->adapter;
+ int regval;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval != 0x5118)
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
+ if (regval < 0 || !spd5118_vendor_valid(regval & 0xff, regval >> 8))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_CAPABILITY);
+ if (regval < 0)
+ return -ENODEV;
+ if (!(regval & SPD5118_CAP_TS_SUPPORT) || (regval & 0xfc))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CLR);
+ if (regval)
+ return -ENODEV;
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_ERROR_CLR);
+ if (regval)
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_REVISION);
+ if (regval < 0 || (regval & 0xc1))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CONFIG);
+ if (regval < 0)
+ return -ENODEV;
+ if (regval & ~SPD5118_TS_DISABLE)
+ return -ENODEV;
+
+ strscpy(info->type, "spd5118", I2C_NAME_SIZE);
+ return 0;
+}
+
+static int spd5118_i2c_init(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int err, regval, mode;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval < 0 || (regval && regval != 0x5118))
+ return -ENODEV;
/*
- * Make sure the configuration register in the regmap cache is current
- * before bypassing it.
+ * If the device type registers return 0, it is possible that the chip
+ * has a non-zero page selected and takes the specification literally,
+ * i.e. disables access to volatile registers besides the page register
+ * if the page is not 0. The Renesas/ITD SPD5118 Hub Controller is known
+ * to show this behavior. Try to identify such chips.
*/
- err = regmap_read(regmap, SPD5118_REG_TEMP_CONFIG, &regval);
- if (err < 0)
- return err;
+ if (!regval) {
+ /* Vendor ID registers must also be 0 */
+ regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
+ if (regval)
+ return -ENODEV;
- regcache_cache_bypass(regmap, true);
- regmap_update_bits(regmap, SPD5118_REG_TEMP_CONFIG, SPD5118_TS_DISABLE,
- SPD5118_TS_DISABLE);
- regcache_cache_bypass(regmap, false);
+ /* The selected page in MR11 must not be 0 */
+ mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE);
+ if (mode < 0 || (mode & ~SPD5118_LEGACY_MODE_MASK) ||
+ !(mode & SPD5118_LEGACY_PAGE_MASK))
+ return -ENODEV;
- regcache_cache_only(regmap, true);
- regcache_mark_dirty(regmap);
+ err = i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE,
+ mode & SPD5118_LEGACY_MODE_ADDR);
+ if (err)
+ return -ENODEV;
+ /*
+ * If the device type registers are still bad after selecting
+ * page 0, this is not a SPD5118 device. Restore original
+ * legacy mode register value and abort.
+ */
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval != 0x5118) {
+ i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE, mode);
+ return -ENODEV;
+ }
+ }
+
+ /* We are reasonably sure that this is really a SPD5118 hub controller */
return 0;
}
-static int spd5118_resume(struct device *dev)
+/*
+ * 16-bit addressing note:
+ *
+ * If I2C_FUNC_I2C is not supported by an I2C adapter driver, regmap uses
+ * SMBus operations as alternative. To simulate a read operation with a 16-bit
+ * address, it writes the address using i2c_smbus_write_byte_data(), followed
+ * by one or more calls to i2c_smbus_read_byte() to read the data.
+ * Per spd5118 standard, a read operation after writing the address must start
+ * with <Sr> (Repeat Start). However, a SMBus read byte operation starts with
+ * <S> (Start). This resets the register address in the spd5118 chip. As result,
+ * i2c_smbus_read_byte() always returns data from register address 0x00.
+ *
+ * A working alternative to access chips with 16-bit register addresses in the
+ * absence of I2C_FUNC_I2C support is not known.
+ *
+ * For this reason, 16-bit addressing can only be supported with I2C if the
+ * adapter supports I2C_FUNC_I2C.
+ *
+ * For I2C, the addressing mode selected by the BIOS must not be changed.
+ * Experiments show that at least some PC BIOS versions will not change the
+ * addressing mode on a soft reboot and end up in setup, claiming that some
+ * configuration change happened. This will happen again after a power cycle,
+ * which does reset the addressing mode. To prevent this from happening,
+ * detect if 16-bit addressing is enabled and always use the currently
+ * configured addressing mode.
+ */
+
+static int spd5118_i2c_probe(struct i2c_client *client)
{
- struct spd5118_data *data = dev_get_drvdata(dev);
- struct regmap *regmap = data->regmap;
+ const struct regmap_config *config;
+ struct device *dev = &client->dev;
+ struct regmap *regmap;
+ int err, mode;
+ bool is_16bit;
- regcache_cache_only(regmap, false);
- return regcache_sync(regmap);
-}
+ err = spd5118_i2c_init(client);
+ if (err)
+ return err;
-static DEFINE_SIMPLE_DEV_PM_OPS(spd5118_pm_ops, spd5118_suspend, spd5118_resume);
+ mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE);
+ if (mode < 0)
+ return mode;
+
+ is_16bit = mode & SPD5118_LEGACY_MODE_ADDR;
+ if (is_16bit) {
+ /*
+ * See 16-bit addressing note above explaining why it is
+ * necessary to check for I2C_FUNC_I2C support here.
+ */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "Adapter does not support 16-bit register addresses\n");
+ return -ENODEV;
+ }
+ config = &spd5118_regmap16_config;
+ } else {
+ config = &spd5118_regmap8_config;
+ }
+
+ regmap = devm_regmap_init_i2c(client, config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "regmap init failed\n");
+
+ return spd5118_common_probe(dev, regmap, is_16bit);
+}
-static const struct i2c_device_id spd5118_id[] = {
+static const struct i2c_device_id spd5118_i2c_id[] = {
{ "spd5118" },
{ }
};
-MODULE_DEVICE_TABLE(i2c, spd5118_id);
+MODULE_DEVICE_TABLE(i2c, spd5118_i2c_id);
static const struct of_device_id spd5118_of_ids[] = {
{ .compatible = "jedec,spd5118", },
@@ -676,20 +757,20 @@ static const struct of_device_id spd5118_of_ids[] = {
};
MODULE_DEVICE_TABLE(of, spd5118_of_ids);
-static struct i2c_driver spd5118_driver = {
+static struct i2c_driver spd5118_i2c_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "spd5118",
.of_match_table = spd5118_of_ids,
.pm = pm_sleep_ptr(&spd5118_pm_ops),
},
- .probe = spd5118_probe,
- .id_table = spd5118_id,
+ .probe = spd5118_i2c_probe,
+ .id_table = spd5118_i2c_id,
.detect = IS_ENABLED(CONFIG_SENSORS_SPD5118_DETECT) ? spd5118_detect : NULL,
.address_list = IS_ENABLED(CONFIG_SENSORS_SPD5118_DETECT) ? normal_i2c : NULL,
};
-module_i2c_driver(spd5118_driver);
+module_i2c_driver(spd5118_i2c_driver);
MODULE_AUTHOR("René Rebe <rene@exactcode.de>");
MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 8af44a33055f..a02daa496c9c 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/of.h>
#define DRIVER_NAME "tmp102"
@@ -204,6 +205,10 @@ static int tmp102_probe(struct i2c_client *client)
return -ENODEV;
}
+ err = devm_regulator_get_enable_optional(dev, "vcc");
+ if (err < 0 && err != -ENODEV)
+ return dev_err_probe(dev, err, "Failed to enable regulator\n");
+
tmp102 = devm_kzalloc(dev, sizeof(*tmp102), GFP_KERNEL);
if (!tmp102)
return -ENOMEM;
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 2cdbd5f107a2..11c5d80428cd 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -103,8 +103,6 @@ struct xgene_hwmon_dev {
struct device *hwmon_dev;
bool temp_critical_alarm;
- phys_addr_t comm_base_addr;
- void *pcc_comm_addr;
unsigned int usecs_lat;
};
@@ -125,7 +123,8 @@ static u16 xgene_word_tst_and_clr(u16 *addr, u16 mask)
static int xgene_hwmon_pcc_rd(struct xgene_hwmon_dev *ctx, u32 *msg)
{
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
u32 *ptr = (void *)(generic_comm_base + 1);
int rc, i;
u16 val;
@@ -523,7 +522,8 @@ static void xgene_hwmon_rx_cb(struct mbox_client *cl, void *msg)
static void xgene_hwmon_pcc_rx_cb(struct mbox_client *cl, void *msg)
{
struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
struct slimpro_resp_msg amsg;
/*
@@ -649,7 +649,6 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
} else {
struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
- int version;
acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
&pdev->dev);
@@ -658,8 +657,6 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
goto out_mbox_free;
}
- version = (int)acpi_id->driver_data;
-
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx)) {
dev_err(&pdev->dev, "no pcc-channel property\n");
@@ -686,34 +683,6 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
/*
- * This is the shared communication region
- * for the OS and Platform to communicate over.
- */
- ctx->comm_base_addr = pcc_chan->shmem_base_addr;
- if (ctx->comm_base_addr) {
- if (version == XGENE_HWMON_V2)
- ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev,
- ctx->comm_base_addr,
- pcc_chan->shmem_size);
- else
- ctx->pcc_comm_addr = devm_memremap(&pdev->dev,
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WB);
- } else {
- dev_err(&pdev->dev, "Failed to get PCC comm region\n");
- rc = -ENODEV;
- goto out;
- }
-
- if (IS_ERR_OR_NULL(ctx->pcc_comm_addr)) {
- dev_err(&pdev->dev,
- "Failed to ioremap PCC comm region\n");
- rc = -ENOMEM;
- goto out;
- }
-
- /*
* pcc_chan->latency is just a Nominal value. In reality
* the remote processor could be much slower to reply.
* So add an arbitrary amount of wait on top of Nominal.
diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig
index 4b6359326ede..4f7d2b6d79e2 100644
--- a/drivers/hwtracing/intel_th/Kconfig
+++ b/drivers/hwtracing/intel_th/Kconfig
@@ -60,6 +60,7 @@ config INTEL_TH_STH
config INTEL_TH_MSU
tristate "Intel(R) Trace Hub Memory Storage Unit"
+ depends on MMU
help
Memory Storage Unit (MSU) trace output device enables
storing STP traces to system memory. It supports single
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index bf99d79a4192..7163950eb371 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
+#include <linux/pfn_t.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -976,7 +977,6 @@ static void msc_buffer_contig_free(struct msc *msc)
for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
struct page *page = virt_to_page(msc->base + off);
- page->mapping = NULL;
__free_page(page);
}
@@ -1158,9 +1158,6 @@ static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
int i;
for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
- struct page *page = msc_sg_page(sg);
-
- page->mapping = NULL;
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
sg_virt(sg), sg_dma_address(sg));
}
@@ -1601,22 +1598,10 @@ static void msc_mmap_close(struct vm_area_struct *vma)
{
struct msc_iter *iter = vma->vm_file->private_data;
struct msc *msc = iter->msc;
- unsigned long pg;
if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
return;
- /* drop page _refcounts */
- for (pg = 0; pg < msc->nr_pages; pg++) {
- struct page *page = msc_buffer_get_page(msc, pg);
-
- if (WARN_ON_ONCE(!page))
- continue;
-
- if (page->mapping)
- page->mapping = NULL;
- }
-
/* last mapping -- drop user_count */
atomic_dec(&msc->user_count);
mutex_unlock(&msc->buf_mutex);
@@ -1626,16 +1611,14 @@ static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
{
struct msc_iter *iter = vmf->vma->vm_file->private_data;
struct msc *msc = iter->msc;
+ struct page *page;
- vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
- if (!vmf->page)
+ page = msc_buffer_get_page(msc, vmf->pgoff);
+ if (!page)
return VM_FAULT_SIGBUS;
- get_page(vmf->page);
- vmf->page->mapping = vmf->vma->vm_file->f_mapping;
- vmf->page->index = vmf->pgoff;
-
- return 0;
+ get_page(page);
+ return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn_t(page));
}
static const struct vm_operations_struct msc_mmap_ops = {
@@ -1676,7 +1659,7 @@ out:
atomic_dec(&msc->user_count);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY);
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY | VM_MIXEDMAP);
vma->vm_ops = &msc_mmap_ops;
return ret;
}
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 7a01f2687b4c..740066ceaea3 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -19,6 +19,7 @@
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pcf.h>
+#include <linux/string_choices.h>
#include "i2c-algo-pcf.h"
@@ -316,7 +317,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
pmsg = &msgs[i];
DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: Doing %s %d bytes to 0x%02x - %d of %d messages\n",
- pmsg->flags & I2C_M_RD ? "read" : "write",
+ str_read_write(pmsg->flags & I2C_M_RD),
pmsg->len, pmsg->addr, i + 1, num);)
ret = pcf_doAddress(adap, pmsg);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 83c88c79afe2..48c5ab832009 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -200,7 +200,7 @@ config I2C_ISMT
config I2C_PIIX4
tristate "Intel PIIX4 and compatible (ATI/AMD/Serverworks/Broadcom/SMSC)"
- depends on PCI && HAS_IOPORT
+ depends on PCI && HAS_IOPORT && X86
select I2C_SMBUS
help
If you say yes to this option, support will be included for the Intel
@@ -592,6 +592,17 @@ config I2C_DESIGNWARE_PLATFORM
This driver can also be built as a module. If so, the module
will be called i2c-designware-platform.
+config I2C_DESIGNWARE_AMDISP
+ tristate "Synopsys DesignWare Platform for AMDISP"
+ depends on DRM_AMD_ISP || COMPILE_TEST
+ depends on I2C_DESIGNWARE_CORE
+ help
+ If you say yes to this option, support will be included for the
+ AMDISP Synopsys DesignWare I2C adapter.
+
+ This driver can also be built as a module. If so, the module
+ will be called amd_isp_i2c_designware.
+
config I2C_DESIGNWARE_AMDPSP
bool "AMD PSP I2C semaphore support"
depends on ACPI
@@ -845,7 +856,7 @@ config I2C_LS2X
config I2C_MLXBF
tristate "Mellanox BlueField I2C controller"
- depends on MELLANOX_PLATFORM && ARM64
+ depends on (MELLANOX_PLATFORM && ARM64) || COMPILE_TEST
depends on ACPI
select I2C_SLAVE
help
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index c1252e2b779e..04db855fdfd6 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o
i2c-designware-platform-y := i2c-designware-platdrv.o
i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_AMDPSP) += i2c-designware-amdpsp.o
i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o
+obj-$(CONFIG_I2C_DESIGNWARE_AMDISP) += i2c-designware-amdisp.o
obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
i2c-designware-pci-y := i2c-designware-pcidrv.o
obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index ee3b469ddfb9..374fc50bb205 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -26,6 +26,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/string_choices.h>
#include "i2c-at91.h"
@@ -523,7 +524,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
*/
dev_dbg(dev->dev, "transfer: %s %zu bytes.\n",
- (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
+ str_read_write(dev->msg->flags & I2C_M_RD), dev->buf_len);
reinit_completion(&dev->cmd_complete);
dev->transfer_status = 0;
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 332a0fcca28d..63bc3c8f49d3 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -224,11 +224,6 @@ static void slave_rx_tasklet_fn(unsigned long);
| BIT(IS_S_TX_UNDERRUN_SHIFT) | BIT(IS_S_RX_FIFO_FULL_SHIFT)\
| BIT(IS_S_RX_THLD_SHIFT))
-static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave);
-static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave);
-static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
- bool enable);
-
static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 offset)
{
@@ -264,8 +259,8 @@ static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
}
}
-static void bcm_iproc_i2c_slave_init(
- struct bcm_iproc_i2c_dev *iproc_i2c, bool need_reset)
+static void bcm_iproc_i2c_slave_init(struct bcm_iproc_i2c_dev *iproc_i2c,
+ bool need_reset)
{
u32 val;
@@ -276,8 +271,8 @@ static void bcm_iproc_i2c_slave_init(
val |= BIT(CFG_RESET_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
- /* wait 100 usec per spec */
- udelay(100);
+ /* wait approximately 100 usec as per spec */
+ usleep_range(100, 200);
/* bring controller out of reset */
val &= ~(BIT(CFG_RESET_SHIFT));
@@ -316,6 +311,19 @@ static void bcm_iproc_i2c_slave_init(
iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
}
+static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
+ bool enable)
+{
+ u32 val;
+
+ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
+ if (enable)
+ val |= BIT(CFG_EN_SHIFT);
+ else
+ val &= ~BIT(CFG_EN_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
+}
+
static bool bcm_iproc_i2c_check_slave_status
(struct bcm_iproc_i2c_dev *iproc_i2c, u32 status)
{
@@ -438,7 +446,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 val;
u8 value;
-
if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
iproc_i2c->tx_underrun++;
if (iproc_i2c->tx_underrun == 1)
@@ -542,7 +549,7 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c)
{
struct i2c_msg *msg = iproc_i2c->msg;
- uint32_t val;
+ u32 val;
/* Read valid data from RX FIFO */
while (iproc_i2c->rx_bytes < msg->len) {
@@ -688,8 +695,8 @@ static void bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
val &= ~(BIT(CFG_EN_SHIFT));
iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
- /* wait 100 usec per spec */
- udelay(100);
+ /* wait approximately 100 usec as per spec */
+ usleep_range(100, 200);
/* bring controller out of reset */
val &= ~(BIT(CFG_RESET_SHIFT));
@@ -708,19 +715,6 @@ static void bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, 0xffffffff);
}
-static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
- bool enable)
-{
- u32 val;
-
- val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
- if (enable)
- val |= BIT(CFG_EN_SHIFT);
- else
- val &= ~BIT(CFG_EN_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
-}
-
static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
struct i2c_msg *msg)
{
@@ -734,31 +728,31 @@ static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
return 0;
case M_CMD_STATUS_LOST_ARB:
- dev_dbg(iproc_i2c->device, "lost bus arbitration\n");
+ dev_err(iproc_i2c->device, "lost bus arbitration\n");
return -EAGAIN;
case M_CMD_STATUS_NACK_ADDR:
- dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
+ dev_err(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
return -ENXIO;
case M_CMD_STATUS_NACK_DATA:
- dev_dbg(iproc_i2c->device, "NAK data\n");
+ dev_err(iproc_i2c->device, "NAK data\n");
return -ENXIO;
case M_CMD_STATUS_TIMEOUT:
- dev_dbg(iproc_i2c->device, "bus timeout\n");
+ dev_err(iproc_i2c->device, "bus timeout\n");
return -ETIMEDOUT;
case M_CMD_STATUS_FIFO_UNDERRUN:
- dev_dbg(iproc_i2c->device, "FIFO under-run\n");
+ dev_err(iproc_i2c->device, "FIFO under-run\n");
return -ENXIO;
case M_CMD_STATUS_RX_FIFO_FULL:
- dev_dbg(iproc_i2c->device, "RX FIFO full\n");
+ dev_err(iproc_i2c->device, "RX FIFO full\n");
return -ETIMEDOUT;
default:
- dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
+ dev_err(iproc_i2c->device, "unknown error code=%d\n", val);
/* re-initialize i2c for recovery */
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
@@ -833,7 +827,7 @@ static int bcm_iproc_i2c_xfer_wait(struct bcm_iproc_i2c_dev *iproc_i2c,
* The i2c quirks are set to enforce this rule.
*/
static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
- struct i2c_msg *msgs, bool process_call)
+ struct i2c_msg *msgs, bool process_call)
{
int i;
u8 addr;
@@ -842,8 +836,8 @@ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
struct i2c_msg *msg = &msgs[0];
/* check if bus is busy */
- if (!!(iproc_i2c_rd_reg(iproc_i2c,
- M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT))) {
+ if (iproc_i2c_rd_reg(iproc_i2c,
+ M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT)) {
dev_warn(iproc_i2c->device, "bus is busy\n");
return -EBUSY;
}
@@ -970,14 +964,14 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
ret = bcm_iproc_i2c_xfer_internal(iproc_i2c, msgs, process_call);
if (ret) {
- dev_dbg(iproc_i2c->device, "xfer failed\n");
+ dev_err(iproc_i2c->device, "xfer failed\n");
return ret;
}
return num;
}
-static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
+static u32 bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
{
u32 val;
@@ -989,6 +983,63 @@ static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
return val;
}
+static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
+
+ if (iproc_i2c->slave)
+ return -EBUSY;
+
+ if (slave->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ iproc_i2c->slave = slave;
+
+ tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
+ (unsigned long)iproc_i2c);
+
+ bcm_iproc_i2c_slave_init(iproc_i2c, false);
+
+ return 0;
+}
+
+static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
+ u32 tmp;
+
+ if (!iproc_i2c->slave)
+ return -EINVAL;
+
+ disable_irq(iproc_i2c->irq);
+
+ tasklet_kill(&iproc_i2c->slave_rx_tasklet);
+
+ /* disable all slave interrupts */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
+ IE_S_ALL_INTERRUPT_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
+
+ /* Erase the slave address programmed */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
+ tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
+
+ /* flush TX/RX FIFOs */
+ tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp);
+
+ /* clear all pending slave interrupts */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
+
+ iproc_i2c->slave = NULL;
+
+ enable_irq(iproc_i2c->irq);
+
+ return 0;
+}
+
static struct i2c_algorithm bcm_iproc_algo = {
.master_xfer = bcm_iproc_i2c_xfer,
.functionality = bcm_iproc_i2c_functionality,
@@ -1010,21 +1061,18 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
"clock-frequency", &bus_speed);
if (ret < 0) {
dev_info(iproc_i2c->device,
- "unable to interpret clock-frequency DT property\n");
+ "unable to interpret clock-frequency DT property\n");
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
}
- if (bus_speed < I2C_MAX_STANDARD_MODE_FREQ) {
- dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n",
- bus_speed);
- dev_err(iproc_i2c->device,
- "valid speeds are 100khz and 400khz\n");
- return -EINVAL;
- } else if (bus_speed < I2C_MAX_FAST_MODE_FREQ) {
+ if (bus_speed < I2C_MAX_STANDARD_MODE_FREQ)
+ return dev_err_probe(iproc_i2c->device, -EINVAL,
+ "%d Hz not supported (out of 100-400 kHz range)\n",
+ bus_speed);
+ else if (bus_speed < I2C_MAX_FAST_MODE_FREQ)
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- } else {
+ else
bus_speed = I2C_MAX_FAST_MODE_FREQ;
- }
iproc_i2c->bus_speed = bus_speed;
val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
@@ -1039,9 +1087,9 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
static int bcm_iproc_i2c_probe(struct platform_device *pdev)
{
- int irq, ret = 0;
struct bcm_iproc_i2c_dev *iproc_i2c;
struct i2c_adapter *adap;
+ int irq, ret;
iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c),
GFP_KERNEL);
@@ -1066,11 +1114,9 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
ret = of_property_read_u32(iproc_i2c->device->of_node,
"brcm,ape-hsls-addr-mask",
&iproc_i2c->ape_addr_mask);
- if (ret < 0) {
- dev_err(iproc_i2c->device,
- "'brcm,ape-hsls-addr-mask' missing\n");
- return -EINVAL;
- }
+ if (ret < 0)
+ return dev_err_probe(iproc_i2c->device, ret,
+ "'brcm,ape-hsls-addr-mask' missing\n");
spin_lock_init(&iproc_i2c->idm_lock);
@@ -1090,11 +1136,9 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
ret = devm_request_irq(iproc_i2c->device, irq,
bcm_iproc_i2c_isr, 0, pdev->name,
iproc_i2c);
- if (ret < 0) {
- dev_err(iproc_i2c->device,
- "unable to request irq %i\n", irq);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(iproc_i2c->device, ret,
+ "unable to request irq %i\n", irq);
iproc_i2c->irq = irq;
} else {
@@ -1106,9 +1150,8 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
adap = &iproc_i2c->adapter;
i2c_set_adapdata(adap, iproc_i2c);
- snprintf(adap->name, sizeof(adap->name),
- "Broadcom iProc (%s)",
- of_node_full_name(iproc_i2c->device->of_node));
+ snprintf(adap->name, sizeof(adap->name), "Broadcom iProc (%s)",
+ of_node_full_name(iproc_i2c->device->of_node));
adap->algo = &bcm_iproc_algo;
adap->quirks = &bcm_iproc_i2c_quirks;
adap->dev.parent = &pdev->dev;
@@ -1182,62 +1225,6 @@ static const struct dev_pm_ops bcm_iproc_i2c_pm_ops = {
.resume_early = &bcm_iproc_i2c_resume
};
-static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
-{
- struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
-
- if (iproc_i2c->slave)
- return -EBUSY;
-
- if (slave->flags & I2C_CLIENT_TEN)
- return -EAFNOSUPPORT;
-
- iproc_i2c->slave = slave;
-
- tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
- (unsigned long)iproc_i2c);
-
- bcm_iproc_i2c_slave_init(iproc_i2c, false);
- return 0;
-}
-
-static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
-{
- u32 tmp;
- struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
-
- if (!iproc_i2c->slave)
- return -EINVAL;
-
- disable_irq(iproc_i2c->irq);
-
- tasklet_kill(&iproc_i2c->slave_rx_tasklet);
-
- /* disable all slave interrupts */
- tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
- tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
- IE_S_ALL_INTERRUPT_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
-
- /* Erase the slave address programmed */
- tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
- tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
-
- /* flush TX/RX FIFOs */
- tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
- iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp);
-
- /* clear all pending slave interrupts */
- iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
-
- iproc_i2c->slave = NULL;
-
- enable_irq(iproc_i2c->irq);
-
- return 0;
-}
-
static const struct of_device_id bcm_iproc_i2c_of_match[] = {
{
.compatible = "brcm,iproc-i2c",
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 26a36a65521e..606ac071cb80 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -467,7 +467,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
return ret;
/* Alloc and register client IRQ */
- adap->irq_domain = irq_domain_add_linear(NULL, 1, &irq_domain_simple_ops, NULL);
+ adap->irq_domain = irq_domain_create_linear(NULL, 1, &irq_domain_simple_ops, NULL);
if (!adap->irq_domain)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 43bf90d90eeb..208ce4f9e782 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -247,6 +247,9 @@ static int ec_i2c_probe(struct platform_device *pdev)
u32 remote_bus;
int err;
+ if (!ec)
+ return dev_err_probe(dev, -EPROBE_DEFER, "couldn't find parent EC device\n");
+
if (!ec->cmd_xfer) {
dev_err(dev, "Missing sendrecv\n");
return -EINVAL;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 6a909d339681..6a3d4e9e07f4 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -551,7 +551,8 @@ out:
static u32 i2c_davinci_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_PROTOCOL_MANGLING;
}
static void terminate_read(struct davinci_i2c_dev *dev)
diff --git a/drivers/i2c/busses/i2c-designware-amdisp.c b/drivers/i2c/busses/i2c-designware-amdisp.c
new file mode 100644
index 000000000000..ad6f08338124
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware-amdisp.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Based on Synopsys DesignWare I2C adapter driver.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "i2c-designware-core.h"
+
+#define DRV_NAME "amd_isp_i2c_designware"
+#define AMD_ISP_I2C_INPUT_CLK 100 /* Mhz */
+
+static void amd_isp_dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *i2c_dev)
+{
+ pm_runtime_disable(i2c_dev->dev);
+
+ if (i2c_dev->shared_with_punit)
+ pm_runtime_put_noidle(i2c_dev->dev);
+}
+
+static inline u32 amd_isp_dw_i2c_get_clk_rate(struct dw_i2c_dev *i2c_dev)
+{
+ return AMD_ISP_I2C_INPUT_CLK * 1000;
+}
+
+static int amd_isp_dw_i2c_plat_probe(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *isp_i2c_dev;
+ struct i2c_adapter *adap;
+ int ret;
+
+ isp_i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*isp_i2c_dev), GFP_KERNEL);
+ if (!isp_i2c_dev)
+ return -ENOMEM;
+ isp_i2c_dev->dev = &pdev->dev;
+
+ pdev->dev.init_name = DRV_NAME;
+
+ /*
+ * Use the polling mode to send/receive the data, because
+ * no IRQ connection from ISP I2C
+ */
+ isp_i2c_dev->flags |= ACCESS_POLLING;
+ platform_set_drvdata(pdev, isp_i2c_dev);
+
+ isp_i2c_dev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(isp_i2c_dev->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(isp_i2c_dev->base),
+ "failed to get IOMEM resource\n");
+
+ isp_i2c_dev->get_clk_rate_khz = amd_isp_dw_i2c_get_clk_rate;
+ ret = i2c_dw_fw_parse_and_configure(isp_i2c_dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to parse i2c dw fwnode and configure\n");
+
+ i2c_dw_configure(isp_i2c_dev);
+
+ adap = &isp_i2c_dev->adapter;
+ adap->owner = THIS_MODULE;
+ ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
+ adap->dev.of_node = pdev->dev.of_node;
+ /* use dynamically allocated adapter id */
+ adap->nr = -1;
+
+ if (isp_i2c_dev->flags & ACCESS_NO_IRQ_SUSPEND)
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE);
+ else
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND);
+
+ device_enable_async_suspend(&pdev->dev);
+
+ if (isp_i2c_dev->shared_with_punit)
+ pm_runtime_get_noresume(&pdev->dev);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ ret = i2c_dw_probe(isp_i2c_dev);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "i2c_dw_probe failed\n");
+ goto error_release_rpm;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+
+error_release_rpm:
+ amd_isp_dw_i2c_plat_pm_cleanup(isp_i2c_dev);
+ pm_runtime_put_sync(&pdev->dev);
+ return ret;
+}
+
+static void amd_isp_dw_i2c_plat_remove(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *isp_i2c_dev = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ i2c_del_adapter(&isp_i2c_dev->adapter);
+
+ i2c_dw_disable(isp_i2c_dev);
+
+ pm_runtime_put_sync(&pdev->dev);
+ amd_isp_dw_i2c_plat_pm_cleanup(isp_i2c_dev);
+}
+
+static int amd_isp_dw_i2c_plat_prepare(struct device *dev)
+{
+ /*
+ * If the ACPI companion device object is present for this device, it
+ * may be accessed during suspend and resume of other devices via I2C
+ * operation regions, so tell the PM core and middle layers to avoid
+ * skipping system suspend/resume callbacks for it in that case.
+ */
+ return !has_acpi_companion(dev);
+}
+
+static int amd_isp_dw_i2c_plat_runtime_suspend(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+
+ if (i_dev->shared_with_punit)
+ return 0;
+
+ i2c_dw_disable(i_dev);
+ i2c_dw_prepare_clk(i_dev, false);
+
+ return 0;
+}
+
+static int amd_isp_dw_i2c_plat_suspend(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+ int ret;
+
+ if (!i_dev)
+ return -ENODEV;
+
+ ret = amd_isp_dw_i2c_plat_runtime_suspend(dev);
+ if (!ret)
+ i2c_mark_adapter_suspended(&i_dev->adapter);
+
+ return ret;
+}
+
+static int amd_isp_dw_i2c_plat_runtime_resume(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+
+ if (!i_dev)
+ return -ENODEV;
+
+ if (!i_dev->shared_with_punit)
+ i2c_dw_prepare_clk(i_dev, true);
+ if (i_dev->init)
+ i_dev->init(i_dev);
+
+ return 0;
+}
+
+static int amd_isp_dw_i2c_plat_resume(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+
+ amd_isp_dw_i2c_plat_runtime_resume(dev);
+ i2c_mark_adapter_resumed(&i_dev->adapter);
+
+ return 0;
+}
+
+static const struct dev_pm_ops amd_isp_dw_i2c_dev_pm_ops = {
+ .prepare = pm_sleep_ptr(amd_isp_dw_i2c_plat_prepare),
+ LATE_SYSTEM_SLEEP_PM_OPS(amd_isp_dw_i2c_plat_suspend, amd_isp_dw_i2c_plat_resume)
+ RUNTIME_PM_OPS(amd_isp_dw_i2c_plat_runtime_suspend, amd_isp_dw_i2c_plat_runtime_resume, NULL)
+};
+
+/* Work with hotplug and coldplug */
+MODULE_ALIAS("platform:amd_isp_i2c_designware");
+
+static struct platform_driver amd_isp_dw_i2c_driver = {
+ .probe = amd_isp_dw_i2c_plat_probe,
+ .remove = amd_isp_dw_i2c_plat_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = pm_ptr(&amd_isp_dw_i2c_dev_pm_ops),
+ },
+};
+module_platform_driver(amd_isp_dw_i2c_driver);
+
+MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter in AMD ISP");
+MODULE_IMPORT_NS("I2C_DW");
+MODULE_IMPORT_NS("I2C_DW_COMMON");
+MODULE_AUTHOR("Venkata Narendra Kumar Gutta <vengutta@amd.com>");
+MODULE_AUTHOR("Pratap Nirujogi <pratap.nirujogi@amd.com>");
+MODULE_AUTHOR("Bin Du <bin.du@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 8eb7bd640f8d..5b1e8f74c4ac 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -572,8 +572,10 @@ u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev)
* Clock is not necessary if we got LCNT/HCNT values directly from
* the platform code.
*/
- if (WARN_ON_ONCE(!dev->get_clk_rate_khz))
+ if (!dev->get_clk_rate_khz) {
+ dev_dbg_once(dev->dev, "Callback get_clk_rate_khz() is not defined\n");
return 0;
+ }
return dev->get_clk_rate_khz(dev);
}
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 8e0267c7cc29..f21f9877c040 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -278,9 +278,11 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node);
- if (IS_ERR(dev->slave))
+ if (IS_ERR(dev->slave)) {
+ i2c_del_adapter(&dev->adapter);
return dev_err_probe(device, PTR_ERR(dev->slave),
"register UCSI failed\n");
+ }
}
pm_runtime_set_autosuspend_delay(device, 1000);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index d6e1ee935399..879719e91df2 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -34,7 +34,7 @@
static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
{
- return clk_get_rate(dev->clk) / KILO;
+ return clk_get_rate(dev->clk) / HZ_PER_KHZ;
}
#ifdef CONFIG_OF
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 5cd4a5f7a472..b936a240db0a 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -96,7 +96,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
i2c_dw_disable(dev);
synchronize_irq(dev->irq);
dev->slave = NULL;
- pm_runtime_put(dev->dev);
+ pm_runtime_put_sync_suspend(dev->dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 48e1af544b75..a7f89946dad4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1180,7 +1180,7 @@ static void i801_probe_optional_targets(struct i801_priv *priv)
#ifdef CONFIG_I2C_I801_MUX
if (!priv->mux_pdev)
#endif
- i2c_register_spd(&priv->adapter);
+ i2c_register_spd_write_enable(&priv->adapter);
}
#else
static void __init input_apanel_init(void) {}
@@ -1283,7 +1283,7 @@ static int i801_notifier_call(struct notifier_block *nb, unsigned long action,
return NOTIFY_DONE;
/* Call i2c_register_spd for muxed child segments */
- i2c_register_spd(to_i2c_adapter(dev));
+ i2c_register_spd_write_enable(to_i2c_adapter(dev));
return NOTIFY_OK;
}
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 0d4b3935e687..342d47e67586 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -1380,9 +1380,9 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
return 0;
rpm_disable:
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 9e5d454d8318..de01dfecb16e 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1711,11 +1711,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return irq;
+ return dev_err_probe(&pdev->dev, irq, "can't get IRQ\n");
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
- return PTR_ERR(base);
+ return dev_err_probe(&pdev->dev, PTR_ERR(base), "can't get IO memory\n");
phy_addr = (dma_addr_t)res->start;
i2c_imx = devm_kzalloc(&pdev->dev, sizeof(*i2c_imx), GFP_KERNEL);
@@ -1810,13 +1810,15 @@ static int i2c_imx_probe(struct platform_device *pdev)
*/
ret = i2c_imx_dma_request(i2c_imx, phy_addr);
if (ret) {
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ dev_err_probe(&pdev->dev, ret, "can't get DMA channels\n");
goto clk_notifier_unregister;
- else if (ret == -ENODEV)
+ } else if (ret == -ENODEV) {
dev_dbg(&pdev->dev, "Only use PIO mode\n");
- else
+ } else {
dev_warn(&pdev->dev, "Failed to setup DMA (%pe), only use PIO mode\n",
ERR_PTR(ret));
+ }
}
/* Add I2C adapter */
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index c93c02aa6ac8..7aaefb21416a 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -933,7 +933,7 @@ ismt_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_region(pdev, SMBBAR, ismt_driver.name);
+ err = pcim_request_region(pdev, SMBBAR, ismt_driver.name);
if (err) {
dev_err(&pdev->dev,
"Failed to request SMBus region 0x%lx-0x%lx\n",
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 6943a0de860a..ccd13c4fb83e 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -442,8 +442,13 @@ static int i2c_lpc2k_suspend(struct device *dev)
static int i2c_lpc2k_resume(struct device *dev)
{
struct lpc2k_i2c *i2c = dev_get_drvdata(dev);
+ int ret;
- clk_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock.\n");
+ return ret;
+ }
i2c_lpc2k_reset(i2c);
return 0;
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
index 5db73429125c..492bf4c34722 100644
--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -76,6 +76,8 @@
#define CORE_I2C_FREQ (0x14)
#define CORE_I2C_GLITCHREG (0x18)
#define CORE_I2C_SLAVE1_ADDR (0x1c)
+#define CORE_I2C_SMBUS_MSG_WR (0x0)
+#define CORE_I2C_SMBUS_MSG_RD (0x1)
#define PCLK_DIV_960 (CTRL_CR2)
#define PCLK_DIV_256 (0)
@@ -424,9 +426,109 @@ static u32 mchp_corei2c_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
+static int mchp_corei2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
+ char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct i2c_msg msgs[2];
+ struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
+ u8 tx_buf[I2C_SMBUS_BLOCK_MAX + 2];
+ u8 rx_buf[I2C_SMBUS_BLOCK_MAX + 1];
+ int num_msgs = 1;
+
+ msgs[CORE_I2C_SMBUS_MSG_WR].addr = addr;
+ msgs[CORE_I2C_SMBUS_MSG_WR].flags = 0;
+
+ if (read_write == I2C_SMBUS_READ && size <= I2C_SMBUS_BYTE)
+ msgs[CORE_I2C_SMBUS_MSG_WR].flags = I2C_M_RD;
+
+ if (read_write == I2C_SMBUS_WRITE && size <= I2C_SMBUS_WORD_DATA)
+ msgs[CORE_I2C_SMBUS_MSG_WR].len = size;
+
+ if (read_write == I2C_SMBUS_WRITE && size > I2C_SMBUS_BYTE) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = tx_buf;
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[0] = command;
+ }
+
+ if (read_write == I2C_SMBUS_READ && size >= I2C_SMBUS_BYTE_DATA) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = tx_buf;
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[0] = command;
+ msgs[CORE_I2C_SMBUS_MSG_RD].addr = addr;
+ msgs[CORE_I2C_SMBUS_MSG_RD].flags = I2C_M_RD;
+ num_msgs = 2;
+ }
+
+ if (read_write == I2C_SMBUS_READ && size > I2C_SMBUS_QUICK)
+ msgs[CORE_I2C_SMBUS_MSG_WR].len = 1;
+
+ switch (size) {
+ case I2C_SMBUS_QUICK:
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = NULL;
+ return 0;
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_WRITE)
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = &command;
+ else
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = &data->byte;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[1] = data->byte;
+ } else {
+ msgs[CORE_I2C_SMBUS_MSG_RD].len = size - 1;
+ msgs[CORE_I2C_SMBUS_MSG_RD].buf = &data->byte;
+ }
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[1] = data->word & 0xFF;
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[2] = (data->word >> 8) & 0xFF;
+ } else {
+ msgs[CORE_I2C_SMBUS_MSG_RD].len = size - 1;
+ msgs[CORE_I2C_SMBUS_MSG_RD].buf = rx_buf;
+ }
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ int data_len;
+
+ data_len = data->block[0];
+ msgs[CORE_I2C_SMBUS_MSG_WR].len = data_len + 2;
+ for (int i = 0; i <= data_len; i++)
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[i + 1] = data->block[i];
+ } else {
+ msgs[CORE_I2C_SMBUS_MSG_RD].len = I2C_SMBUS_BLOCK_MAX + 1;
+ msgs[CORE_I2C_SMBUS_MSG_RD].buf = rx_buf;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mchp_corei2c_xfer(&idev->adapter, msgs, num_msgs);
+ if (read_write == I2C_SMBUS_WRITE || size <= I2C_SMBUS_BYTE_DATA)
+ return 0;
+
+ switch (size) {
+ case I2C_SMBUS_WORD_DATA:
+ data->word = (rx_buf[0] | (rx_buf[1] << 8));
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (rx_buf[0] > I2C_SMBUS_BLOCK_MAX)
+ rx_buf[0] = I2C_SMBUS_BLOCK_MAX;
+ /* As per protocol first member of block is size of the block. */
+ for (int i = 0; i <= rx_buf[0]; i++)
+ data->block[i] = rx_buf[i];
+ break;
+ }
+
+ return 0;
+}
+
static const struct i2c_algorithm mchp_corei2c_algo = {
.master_xfer = mchp_corei2c_xfer,
.functionality = mchp_corei2c_func,
+ .smbus_xfer = mchp_corei2c_smbus_xfer,
};
static int mchp_corei2c_probe(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index 280dde53d7f3..8345f7e6385d 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
/* Defines what functionality is present. */
#define MLXBF_I2C_FUNC_SMBUS_BLOCK \
@@ -197,6 +198,7 @@
#define MLXBF_I2C_MASK_8 GENMASK(7, 0)
#define MLXBF_I2C_MASK_16 GENMASK(15, 0)
+#define MLXBF_I2C_MASK_32 GENMASK(31, 0)
#define MLXBF_I2C_MST_ADDR_OFFSET 0x200
@@ -223,7 +225,7 @@
#define MLXBF_I2C_MASTER_ENABLE \
(MLXBF_I2C_MASTER_LOCK_BIT | MLXBF_I2C_MASTER_BUSY_BIT | \
- MLXBF_I2C_MASTER_START_BIT | MLXBF_I2C_MASTER_STOP_BIT)
+ MLXBF_I2C_MASTER_START_BIT)
#define MLXBF_I2C_MASTER_ENABLE_WRITE \
(MLXBF_I2C_MASTER_ENABLE | MLXBF_I2C_MASTER_CTL_WRITE_BIT)
@@ -337,6 +339,7 @@ enum {
MLXBF_I2C_F_SMBUS_BLOCK = BIT(5),
MLXBF_I2C_F_SMBUS_PEC = BIT(6),
MLXBF_I2C_F_SMBUS_PROCESS_CALL = BIT(7),
+ MLXBF_I2C_F_WRITE_WITHOUT_STOP = BIT(8),
};
/* Mellanox BlueField chip type. */
@@ -637,16 +640,19 @@ static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv,
}
static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
- u8 len, u8 block_en, u8 pec_en, bool read)
+ u8 len, u8 block_en, u8 pec_en, bool read,
+ bool stop)
{
- u32 command;
+ u32 command = 0;
/* Set Master GW control word. */
+ if (stop)
+ command |= MLXBF_I2C_MASTER_STOP_BIT;
if (read) {
- command = MLXBF_I2C_MASTER_ENABLE_READ;
+ command |= MLXBF_I2C_MASTER_ENABLE_READ;
command |= rol32(len, MLXBF_I2C_MASTER_READ_SHIFT);
} else {
- command = MLXBF_I2C_MASTER_ENABLE_WRITE;
+ command |= MLXBF_I2C_MASTER_ENABLE_WRITE;
command |= rol32(len, MLXBF_I2C_MASTER_WRITE_SHIFT);
}
command |= rol32(slave, MLXBF_I2C_MASTER_SLV_ADDR_SHIFT);
@@ -681,8 +687,10 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
u8 op_idx, data_idx, data_len, write_len, read_len;
struct mlxbf_i2c_smbus_operation *operation;
u8 read_en, write_en, block_en, pec_en;
- u8 slave, flags, addr;
+ bool stop_after_write = true;
+ u8 slave, addr;
u8 *read_buf;
+ u32 flags;
u32 bits;
int ret;
@@ -754,7 +762,16 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
memcpy(data_desc + data_idx,
operation->buffer, operation->length);
data_idx += operation->length;
+
+ /*
+ * The stop condition can be skipped when writing on the bus
+ * to implement a repeated start condition on the next read
+ * as required for several SMBus and I2C operations.
+ */
+ if (flags & MLXBF_I2C_F_WRITE_WITHOUT_STOP)
+ stop_after_write = false;
}
+
/*
* We assume that read operations are performed only once per
* SMBus transaction. *TBD* protect this statement so it won't
@@ -780,7 +797,7 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
if (write_en) {
ret = mlxbf_i2c_smbus_enable(priv, slave, write_len, block_en,
- pec_en, 0);
+ pec_en, 0, stop_after_write);
if (ret)
goto out_unlock;
}
@@ -790,7 +807,7 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
mlxbf_i2c_smbus_write_data(priv, (const u8 *)&addr, 1,
MLXBF_I2C_MASTER_DATA_DESC_ADDR, true);
ret = mlxbf_i2c_smbus_enable(priv, slave, read_len, block_en,
- pec_en, 1);
+ pec_en, 1, true);
if (!ret) {
/* Get Master GW data descriptor. */
mlxbf_i2c_smbus_read_data(priv, data_desc, read_len + 1,
@@ -896,6 +913,9 @@ mlxbf_i2c_smbus_i2c_block_func(struct mlxbf_i2c_smbus_request *request,
request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0;
request->operation[0].buffer = command;
+ if (read)
+ request->operation[0].flags |= MLXBF_I2C_F_WRITE_WITHOUT_STOP;
+
/*
* As specified in the standard, the max number of bytes to read/write
* per block operation is 32 bytes. In Golan code, the controller can
@@ -1063,7 +1083,7 @@ static u32 mlxbf_i2c_get_ticks(struct mlxbf_i2c_priv *priv, u64 nanoseconds,
* Frequency
*/
frequency = priv->frequency;
- ticks = (nanoseconds * frequency) / MLXBF_I2C_FREQUENCY_1GHZ;
+ ticks = div_u64(nanoseconds * frequency, MLXBF_I2C_FREQUENCY_1GHZ);
/*
* The number of ticks is rounded down and if minimum is equal to 1
* then add one tick.
@@ -1130,7 +1150,8 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF);
- timer = timings->timeout;
+ timer = mlxbf_i2c_set_timer(priv, timings->timeout, false,
+ MLXBF_I2C_MASK_32, MLXBF_I2C_SHIFT_0);
writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT);
}
@@ -1140,11 +1161,7 @@ enum mlxbf_i2c_timings_config {
MLXBF_I2C_TIMING_CONFIG_1000KHZ,
};
-/*
- * Note that the mlxbf_i2c_timings->timeout value is not related to the
- * bus frequency, it is impacted by the time it takes the driver to
- * complete data transmission before transaction abort.
- */
+/* Timing values are in nanoseconds */
static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = {
[MLXBF_I2C_TIMING_CONFIG_100KHZ] = {
.scl_high = 4810,
@@ -1159,8 +1176,8 @@ static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = {
.scl_fall = 50,
.hold_data = 300,
.buf = 20000,
- .thigh_max = 5000,
- .timeout = 106500
+ .thigh_max = 50000,
+ .timeout = 35000000
},
[MLXBF_I2C_TIMING_CONFIG_400KHZ] = {
.scl_high = 1011,
@@ -1175,24 +1192,24 @@ static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = {
.scl_fall = 50,
.hold_data = 300,
.buf = 20000,
- .thigh_max = 5000,
- .timeout = 106500
+ .thigh_max = 50000,
+ .timeout = 35000000
},
[MLXBF_I2C_TIMING_CONFIG_1000KHZ] = {
- .scl_high = 600,
- .scl_low = 1300,
+ .scl_high = 383,
+ .scl_low = 460,
.hold_start = 600,
- .setup_start = 600,
- .setup_stop = 600,
- .setup_data = 100,
+ .setup_start = 260,
+ .setup_stop = 260,
+ .setup_data = 50,
.sda_rise = 50,
.sda_fall = 50,
.scl_rise = 50,
.scl_fall = 50,
.hold_data = 300,
- .buf = 20000,
- .thigh_max = 5000,
- .timeout = 106500
+ .buf = 500,
+ .thigh_max = 50000,
+ .timeout = 35000000
}
};
@@ -1443,9 +1460,8 @@ static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_
* and PadFrequency, respectively.
*/
core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
- core_frequency /= (++core_r) * (++core_od);
- return core_frequency;
+ return div_u64(core_frequency, (++core_r) * (++core_od));
}
static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
@@ -1474,9 +1490,8 @@ static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_r
* and PadFrequency, respectively.
*/
corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
- corepll_frequency /= (++core_r) * (++core_od);
- return corepll_frequency;
+ return div_u64(corepll_frequency, (++core_r) * (++core_od));
}
static int mlxbf_i2c_calculate_corepll_freq(struct platform_device *pdev,
@@ -2038,21 +2053,21 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
read ? &data->byte : &command, read,
pec);
dev_dbg(&adap->dev, "smbus %s byte, slave 0x%02x.\n",
- read ? "read" : "write", addr);
+ str_read_write(read), addr);
break;
case I2C_SMBUS_BYTE_DATA:
mlxbf_i2c_smbus_data_byte_func(&request, &command, &data->byte,
read, pec);
dev_dbg(&adap->dev, "smbus %s byte data at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", command, addr);
+ str_read_write(read), command, addr);
break;
case I2C_SMBUS_WORD_DATA:
mlxbf_i2c_smbus_data_word_func(&request, &command,
(u8 *)&data->word, read, pec);
dev_dbg(&adap->dev, "smbus %s word data at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", command, addr);
+ str_read_write(read), command, addr);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
@@ -2060,7 +2075,7 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
mlxbf_i2c_smbus_i2c_block_func(&request, &command, data->block,
&byte_cnt, read, pec);
dev_dbg(&adap->dev, "i2c %s block data, %d bytes at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", byte_cnt, command, addr);
+ str_read_write(read), byte_cnt, command, addr);
break;
case I2C_SMBUS_BLOCK_DATA:
@@ -2068,7 +2083,7 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
mlxbf_i2c_smbus_block_func(&request, &command, data->block,
&byte_cnt, read, pec);
dev_dbg(&adap->dev, "smbus %s block data, %d bytes at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", byte_cnt, command, addr);
+ str_read_write(read), byte_cnt, command, addr);
break;
case I2C_FUNC_SMBUS_PROC_CALL:
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index de713b5747fe..892e2d2988a7 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -1115,14 +1115,10 @@ static void npcm_i2c_master_abort(struct npcm_i2c *bus)
#if IS_ENABLED(CONFIG_I2C_SLAVE)
static u8 npcm_i2c_get_slave_addr(struct npcm_i2c *bus, enum i2c_addr addr_type)
{
- u8 slave_add;
-
if (addr_type > I2C_SLAVE_ADDR2 && addr_type <= I2C_SLAVE_ADDR10)
dev_err(bus->dev, "get slave: try to use more than 2 SA not supported\n");
- slave_add = ioread8(bus->reg + npcm_i2caddr[(int)addr_type]);
-
- return slave_add;
+ return ioread8(bus->reg + npcm_i2caddr[addr_type]);
}
static int npcm_i2c_remove_slave_addr(struct npcm_i2c *bus, u8 slave_add)
@@ -2178,10 +2174,14 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
/* Check HW is OK: SDA and SCL should be high at this point. */
if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) {
- dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num);
- dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap),
- npcm_i2c_get_SCL(&bus->adap));
- return -ENXIO;
+ dev_warn(bus->dev, " I2C%d SDA=%d SCL=%d, attempting to recover\n", bus->num,
+ npcm_i2c_get_SDA(&bus->adap), npcm_i2c_get_SCL(&bus->adap));
+ if (npcm_i2c_recovery_tgclk(&bus->adap)) {
+ dev_err(bus->dev, "I2C%d init fail: SDA=%d SCL=%d\n",
+ bus->num, npcm_i2c_get_SDA(&bus->adap),
+ npcm_i2c_get_SCL(&bus->adap));
+ return -ENXIO;
+ }
}
npcm_i2c_int_enable(bus, true);
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index baf6b27f3752..93a49e4637ec 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -135,6 +135,32 @@ static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c)
octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
}
+static void octeon_i2c_block_enable(struct octeon_i2c *i2c)
+{
+ u64 mode;
+
+ if (i2c->block_enabled || !OCTEON_REG_BLOCK_CTL(i2c))
+ return;
+
+ i2c->block_enabled = true;
+ mode = __raw_readq(i2c->twsi_base + OCTEON_REG_MODE(i2c));
+ mode |= TWSX_MODE_BLOCK_MODE;
+ octeon_i2c_writeq_flush(mode, i2c->twsi_base + OCTEON_REG_MODE(i2c));
+}
+
+static void octeon_i2c_block_disable(struct octeon_i2c *i2c)
+{
+ u64 mode;
+
+ if (!i2c->block_enabled || !OCTEON_REG_BLOCK_CTL(i2c))
+ return;
+
+ i2c->block_enabled = false;
+ mode = __raw_readq(i2c->twsi_base + OCTEON_REG_MODE(i2c));
+ mode &= ~TWSX_MODE_BLOCK_MODE;
+ octeon_i2c_writeq_flush(mode, i2c->twsi_base + OCTEON_REG_MODE(i2c));
+}
+
/**
* octeon_i2c_hlc_wait - wait for an HLC operation to complete
* @i2c: The struct octeon_i2c
@@ -281,6 +307,7 @@ static int octeon_i2c_start(struct octeon_i2c *i2c)
u8 stat;
octeon_i2c_hlc_disable(i2c);
+ octeon_i2c_block_disable(i2c);
octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STA);
ret = octeon_i2c_wait(i2c);
@@ -605,6 +632,125 @@ err:
}
/**
+ * octeon_i2c_hlc_block_comp_read - high-level-controller composite block read
+ * @i2c: The struct octeon_i2c
+ * @msgs: msg[0] contains address, place read data into msg[1]
+ *
+ * i2c core command is constructed and written into the SW_TWSI register.
+ * The execution of the command will result in requested data being
+ * placed into a FIFO buffer, ready to be read.
+ * Used in the case where the i2c xfer is for greater than 8 bytes of read data.
+ *
+ * Returns: 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_hlc_block_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
+{
+ int ret;
+ u16 len, i;
+ u64 cmd;
+
+ octeon_i2c_hlc_enable(i2c);
+ octeon_i2c_block_enable(i2c);
+
+ /* Write (size - 1) into block control register */
+ len = msgs[1].len - 1;
+ octeon_i2c_writeq_flush((u64)len, i2c->twsi_base + OCTEON_REG_BLOCK_CTL(i2c));
+
+ /* Prepare core command */
+ cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR | SW_TWSI_OP_7_IA;
+ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+ /* Send core command */
+ ret = octeon_i2c_hlc_read_cmd(i2c, msgs[0], cmd);
+ if (ret)
+ goto err;
+
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
+ if ((cmd & SW_TWSI_R) == 0) {
+ octeon_i2c_block_disable(i2c);
+ return octeon_i2c_check_status(i2c, false);
+ }
+
+ /* read data in FIFO */
+ octeon_i2c_writeq_flush(TWSX_BLOCK_STS_RESET_PTR,
+ i2c->twsi_base + OCTEON_REG_BLOCK_STS(i2c));
+ for (i = 0; i <= len; i += 8) {
+ /* Byte-swap FIFO data and copy into msg buffer */
+ __be64 rd = cpu_to_be64(__raw_readq(i2c->twsi_base + OCTEON_REG_BLOCK_FIFO(i2c)));
+
+ memcpy(&msgs[1].buf[i], &rd, min(8, msgs[1].len - i));
+ }
+
+err:
+ octeon_i2c_block_disable(i2c);
+ return ret;
+}
+
+/**
+ * octeon_i2c_hlc_block_comp_write - high-level-controller composite block write
+ * @i2c: The struct octeon_i2c
+ * @msgs: msg[0] contains address, msg[1] contains data to be written
+ *
+ * i2c core command is constructed and write data is written into the FIFO buffer.
+ * The execution of the command will result in HW write, using the data in FIFO.
+ * Used in the case where the i2c xfer is for greater than 8 bytes of write data.
+ *
+ * Returns: 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_hlc_block_comp_write(struct octeon_i2c *i2c, struct i2c_msg *msgs)
+{
+ bool set_ext;
+ int ret;
+ u16 len, i;
+ u64 cmd, ext = 0;
+
+ octeon_i2c_hlc_enable(i2c);
+ octeon_i2c_block_enable(i2c);
+
+ /* Write (size - 1) into block control register */
+ len = msgs[1].len - 1;
+ octeon_i2c_writeq_flush((u64)len, i2c->twsi_base + OCTEON_REG_BLOCK_CTL(i2c));
+
+ /* Prepare core command */
+ cmd = SW_TWSI_V | SW_TWSI_SOVR | SW_TWSI_OP_7_IA;
+ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+ /* Set parameters for extended message (if required) */
+ set_ext = octeon_i2c_hlc_ext(i2c, msgs[0], &cmd, &ext);
+
+ /* Write msg into FIFO buffer */
+ octeon_i2c_writeq_flush(TWSX_BLOCK_STS_RESET_PTR,
+ i2c->twsi_base + OCTEON_REG_BLOCK_STS(i2c));
+ for (i = 0; i <= len; i += 8) {
+ __be64 buf = 0;
+
+ /* Copy 8 bytes or remaining bytes from message buffer */
+ memcpy(&buf, &msgs[1].buf[i], min(8, msgs[1].len - i));
+
+ /* Byte-swap message data and write into FIFO */
+ buf = cpu_to_be64(buf);
+ octeon_i2c_writeq_flush((u64)buf, i2c->twsi_base + OCTEON_REG_BLOCK_FIFO(i2c));
+ }
+ if (set_ext)
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + OCTEON_REG_SW_TWSI_EXT(i2c));
+
+ /* Send command to core (send data in FIFO) */
+ ret = octeon_i2c_hlc_cmd_send(i2c, cmd);
+ if (ret)
+ goto err;
+
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
+ if ((cmd & SW_TWSI_R) == 0) {
+ octeon_i2c_block_disable(i2c);
+ return octeon_i2c_check_status(i2c, false);
+ }
+
+err:
+ octeon_i2c_block_disable(i2c);
+ return ret;
+}
+
+/**
* octeon_i2c_xfer - The driver's xfer function
* @adap: Pointer to the i2c_adapter structure
* @msgs: Pointer to the messages to be processed
@@ -630,13 +776,21 @@ int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if ((msgs[0].flags & I2C_M_RD) == 0 &&
(msgs[1].flags & I2C_M_RECV_LEN) == 0 &&
msgs[0].len > 0 && msgs[0].len <= 2 &&
- msgs[1].len > 0 && msgs[1].len <= 8 &&
+ msgs[1].len > 0 &&
msgs[0].addr == msgs[1].addr) {
- if (msgs[1].flags & I2C_M_RD)
- ret = octeon_i2c_hlc_comp_read(i2c, msgs);
- else
- ret = octeon_i2c_hlc_comp_write(i2c, msgs);
- goto out;
+ if (msgs[1].len <= 8) {
+ if (msgs[1].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_comp_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_comp_write(i2c, msgs);
+ goto out;
+ } else if (msgs[1].len <= 1024 && OCTEON_REG_BLOCK_CTL(i2c)) {
+ if (msgs[1].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_block_comp_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_block_comp_write(i2c, msgs);
+ goto out;
+ }
}
}
}
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index b265e21189a1..32a44f2d6274 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -96,18 +96,28 @@ struct octeon_i2c_reg_offset {
unsigned int twsi_int;
unsigned int sw_twsi_ext;
unsigned int mode;
+ unsigned int block_ctl;
+ unsigned int block_sts;
+ unsigned int block_fifo;
};
#define OCTEON_REG_SW_TWSI(x) ((x)->roff.sw_twsi)
#define OCTEON_REG_TWSI_INT(x) ((x)->roff.twsi_int)
#define OCTEON_REG_SW_TWSI_EXT(x) ((x)->roff.sw_twsi_ext)
#define OCTEON_REG_MODE(x) ((x)->roff.mode)
+#define OCTEON_REG_BLOCK_CTL(x) ((x)->roff.block_ctl)
+#define OCTEON_REG_BLOCK_STS(x) ((x)->roff.block_sts)
+#define OCTEON_REG_BLOCK_FIFO(x) ((x)->roff.block_fifo)
-/* Set REFCLK_SRC and HS_MODE in TWSX_MODE register */
+/* TWSX_MODE register */
#define TWSX_MODE_REFCLK_SRC BIT(4)
+#define TWSX_MODE_BLOCK_MODE BIT(2)
#define TWSX_MODE_HS_MODE BIT(0)
#define TWSX_MODE_HS_MASK (TWSX_MODE_REFCLK_SRC | TWSX_MODE_HS_MODE)
+/* TWSX_BLOCK_STS register */
+#define TWSX_BLOCK_STS_RESET_PTR BIT(0)
+
/* Set BUS_MON_RST to reset bus monitor */
#define BUS_MON_RST_MASK BIT(3)
@@ -123,6 +133,7 @@ struct octeon_i2c {
void __iomem *twsi_base;
struct device *dev;
bool hlc_enabled;
+ bool block_enabled;
bool broken_irq_mode;
bool broken_irq_check;
void (*int_enable)(struct octeon_i2c *);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 16afb9ca19bb..876791d20ed5 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1454,7 +1454,7 @@ omap_i2c_probe(struct platform_device *pdev)
(1000 * omap->speed / 8);
}
- if (of_property_read_bool(node, "mux-states")) {
+ if (of_property_present(node, "mux-states")) {
struct mux_state *mux_state;
mux_state = devm_mux_state_get(&pdev->dev, NULL);
diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
index bd128ab2e2eb..f4eca44ed183 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.c
+++ b/drivers/i2c/busses/i2c-pasemi-core.c
@@ -5,22 +5,24 @@
* SMBus host driver for PA Semi PWRficient
*/
-#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/stddef.h>
#include <linux/sched.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/io.h>
+#include <linux/stddef.h>
#include "i2c-pasemi-core.h"
/* Register offsets */
#define REG_MTXFIFO 0x00
#define REG_MRXFIFO 0x04
+#define REG_XFSTA 0x0c
#define REG_SMSTA 0x14
#define REG_IMASK 0x18
#define REG_CTL 0x1c
@@ -52,6 +54,12 @@
#define CTL_UJM BIT(8)
#define CTL_CLK_M GENMASK(7, 0)
+/*
+ * The hardware (supposedly) has a 25ms timeout for clock stretching, thus
+ * use 100ms here which should be plenty.
+ */
+#define PASEMI_TRANSFER_TIMEOUT_MS 100
+
static inline void reg_write(struct pasemi_smbus *smbus, int reg, int val)
{
dev_dbg(smbus->dev, "smbus write reg %x val %08x\n", reg, val);
@@ -71,7 +79,7 @@ static inline int reg_read(struct pasemi_smbus *smbus, int reg)
static void pasemi_reset(struct pasemi_smbus *smbus)
{
- u32 val = (CTL_MTR | CTL_MRR | (smbus->clk_div & CTL_CLK_M));
+ u32 val = (CTL_MTR | CTL_MRR | CTL_UJM | (smbus->clk_div & CTL_CLK_M));
if (smbus->hw_rev >= 6)
val |= CTL_EN;
@@ -80,43 +88,102 @@ static void pasemi_reset(struct pasemi_smbus *smbus)
reinit_completion(&smbus->irq_completion);
}
-static void pasemi_smb_clear(struct pasemi_smbus *smbus)
+static int pasemi_smb_clear(struct pasemi_smbus *smbus)
{
unsigned int status;
+ int ret;
+
+ /* First wait for the bus to go idle */
+ ret = readx_poll_timeout(ioread32, smbus->ioaddr + REG_SMSTA,
+ status, !(status & (SMSTA_XIP | SMSTA_JAM)),
+ USEC_PER_MSEC,
+ USEC_PER_MSEC * PASEMI_TRANSFER_TIMEOUT_MS);
+
+ if (ret < 0) {
+ dev_err(smbus->dev, "Bus is still stuck (status 0x%08x xfstatus 0x%08x)\n",
+ status, reg_read(smbus, REG_XFSTA));
+ return -EIO;
+ }
+
+ /* If any badness happened or there is data in the FIFOs, reset the FIFOs */
+ if ((status & (SMSTA_MRNE | SMSTA_JMD | SMSTA_MTO | SMSTA_TOM | SMSTA_MTN | SMSTA_MTA)) ||
+ !(status & SMSTA_MTE)) {
+ dev_warn(smbus->dev, "Issuing reset due to status 0x%08x (xfstatus 0x%08x)\n",
+ status, reg_read(smbus, REG_XFSTA));
+ pasemi_reset(smbus);
+ }
- status = reg_read(smbus, REG_SMSTA);
+ /* Clear the flags */
reg_write(smbus, REG_SMSTA, status);
+
+ return 0;
}
static int pasemi_smb_waitready(struct pasemi_smbus *smbus)
{
- int timeout = 100;
unsigned int status;
if (smbus->use_irq) {
reinit_completion(&smbus->irq_completion);
reg_write(smbus, REG_IMASK, SMSTA_XEN | SMSTA_MTN);
- wait_for_completion_timeout(&smbus->irq_completion, msecs_to_jiffies(100));
+ int ret = wait_for_completion_timeout(
+ &smbus->irq_completion,
+ msecs_to_jiffies(PASEMI_TRANSFER_TIMEOUT_MS));
reg_write(smbus, REG_IMASK, 0);
status = reg_read(smbus, REG_SMSTA);
+
+ if (ret < 0) {
+ dev_err(smbus->dev,
+ "Completion wait failed with %d, status 0x%08x\n",
+ ret, status);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(smbus->dev, "Timeout, status 0x%08x\n", status);
+ return -ETIME;
+ }
} else {
- status = reg_read(smbus, REG_SMSTA);
- while (!(status & SMSTA_XEN) && timeout--) {
- msleep(1);
- status = reg_read(smbus, REG_SMSTA);
+ int ret = readx_poll_timeout(
+ ioread32, smbus->ioaddr + REG_SMSTA,
+ status, status & SMSTA_XEN,
+ USEC_PER_MSEC,
+ USEC_PER_MSEC * PASEMI_TRANSFER_TIMEOUT_MS);
+
+ if (ret < 0) {
+ dev_err(smbus->dev, "Timeout, status 0x%08x\n", status);
+ return -ETIME;
}
}
- /* Got NACK? */
- if (status & SMSTA_MTN)
- return -ENXIO;
+ /* Controller timeout? */
+ if (status & SMSTA_TOM) {
+ dev_err(smbus->dev, "Controller timeout, status 0x%08x\n", status);
+ return -EIO;
+ }
- if (timeout < 0) {
- dev_warn(smbus->dev, "Timeout, status 0x%08x\n", status);
- reg_write(smbus, REG_SMSTA, status);
+ /* Peripheral timeout? */
+ if (status & SMSTA_MTO) {
+ dev_err(smbus->dev, "Peripheral timeout, status 0x%08x\n", status);
return -ETIME;
}
+ /* Still stuck in a transaction? */
+ if (status & SMSTA_XIP) {
+ dev_err(smbus->dev, "Bus stuck, status 0x%08x\n", status);
+ return -EIO;
+ }
+
+ /* Arbitration loss? */
+ if (status & SMSTA_MTA) {
+ dev_err(smbus->dev, "Arbitration loss, status 0x%08x\n", status);
+ return -EBUSY;
+ }
+
+ /* Got NACK? */
+ if (status & SMSTA_MTN) {
+ dev_err(smbus->dev, "NACK, status 0x%08x\n", status);
+ return -ENXIO;
+ }
+
/* Clear XEN */
reg_write(smbus, REG_SMSTA, SMSTA_XEN);
@@ -177,9 +244,9 @@ static int pasemi_i2c_xfer(struct i2c_adapter *adapter,
struct pasemi_smbus *smbus = adapter->algo_data;
int ret, i;
- pasemi_smb_clear(smbus);
-
- ret = 0;
+ ret = pasemi_smb_clear(smbus);
+ if (ret)
+ return ret;
for (i = 0; i < num && !ret; i++)
ret = pasemi_i2c_xfer_msg(adapter, &msgs[i], (i == (num - 1)));
@@ -200,7 +267,9 @@ static int pasemi_smb_xfer(struct i2c_adapter *adapter,
addr <<= 1;
read_flag = read_write == I2C_SMBUS_READ;
- pasemi_smb_clear(smbus);
+ err = pasemi_smb_clear(smbus);
+ if (err)
+ return err;
switch (size) {
case I2C_SMBUS_QUICK:
diff --git a/drivers/i2c/busses/i2c-pasemi-pci.c b/drivers/i2c/busses/i2c-pasemi-pci.c
index 77f90c7436ed..b9ccb54ec77e 100644
--- a/drivers/i2c/busses/i2c-pasemi-pci.c
+++ b/drivers/i2c/busses/i2c-pasemi-pci.c
@@ -5,15 +5,15 @@
* SMBus host driver for PA Semi PWRficient
*/
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/stddef.h>
#include <linux/sched.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/io.h>
+#include <linux/stddef.h>
#include "i2c-pasemi-core.h"
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index dd75916157f0..9d3a4dc2bd60 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -34,6 +34,7 @@
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/io.h>
+#include <asm/amd/fch.h>
#include "i2c-piix4.h"
@@ -80,12 +81,11 @@
#define SB800_PIIX4_PORT_IDX_MASK 0x06
#define SB800_PIIX4_PORT_IDX_SHIFT 1
-/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
-#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
-#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
+/* SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+#define SB800_PIIX4_PORT_IDX_KERNCZ (FCH_PM_DECODEEN + 0x02)
+#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ (FCH_PM_DECODEEN_SMBUS0SEL >> 16)
#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
-#define SB800_PIIX4_FCH_PM_ADDR 0xFED80300
#define SB800_PIIX4_FCH_PM_SIZE 8
#define SB800_ASF_ACPI_PATH "\\_SB.ASFC"
@@ -162,19 +162,19 @@ int piix4_sb800_region_request(struct device *dev, struct sb800_mmio_cfg *mmio_c
if (mmio_cfg->use_mmio) {
void __iomem *addr;
- if (!request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR,
+ if (!request_mem_region_muxed(FCH_PM_BASE,
SB800_PIIX4_FCH_PM_SIZE,
"sb800_piix4_smb")) {
dev_err(dev,
"SMBus base address memory region 0x%x already in use.\n",
- SB800_PIIX4_FCH_PM_ADDR);
+ FCH_PM_BASE);
return -EBUSY;
}
- addr = ioremap(SB800_PIIX4_FCH_PM_ADDR,
+ addr = ioremap(FCH_PM_BASE,
SB800_PIIX4_FCH_PM_SIZE);
if (!addr) {
- release_mem_region(SB800_PIIX4_FCH_PM_ADDR,
+ release_mem_region(FCH_PM_BASE,
SB800_PIIX4_FCH_PM_SIZE);
dev_err(dev, "SMBus base address mapping failed.\n");
return -ENOMEM;
@@ -201,7 +201,7 @@ void piix4_sb800_region_release(struct device *dev, struct sb800_mmio_cfg *mmio_
{
if (mmio_cfg->use_mmio) {
iounmap(mmio_cfg->addr);
- release_mem_region(SB800_PIIX4_FCH_PM_ADDR,
+ release_mem_region(FCH_PM_BASE,
SB800_PIIX4_FCH_PM_SIZE);
return;
}
@@ -971,7 +971,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
* This would allow the ee1004 to be probed incorrectly.
*/
if (port == 0)
- i2c_register_spd(adap);
+ i2c_register_spd_write_enable(adap);
*padap = adap;
return 0;
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 9a867c817db0..f99a2cc721a8 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -349,7 +349,7 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap,
/* Fill out the rest of the info structure */
info.addr = addr;
info.irq = irq_of_parse_and_map(node, 0);
- info.of_node = of_node_get(node);
+ info.fwnode = of_fwnode_handle(of_node_get(node));
newdev = i2c_new_client_device(adap, &info);
if (IS_ERR(newdev)) {
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 515a784c951c..ccea575fb783 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -71,7 +71,6 @@ enum geni_i2c_err_code {
<< 5)
#define I2C_AUTO_SUSPEND_DELAY 250
-#define KHZ(freq) (1000 * freq)
#define PACKING_BYTES_PW 4
#define ABORT_TIMEOUT HZ
@@ -148,18 +147,18 @@ struct geni_i2c_clk_fld {
* source_clock = 19.2 MHz
*/
static const struct geni_i2c_clk_fld geni_i2c_clk_map_19p2mhz[] = {
- {KHZ(100), 7, 10, 12, 26},
- {KHZ(400), 2, 5, 11, 22},
- {KHZ(1000), 1, 2, 8, 18},
- {},
+ { I2C_MAX_STANDARD_MODE_FREQ, 7, 10, 12, 26 },
+ { I2C_MAX_FAST_MODE_FREQ, 2, 5, 11, 22 },
+ { I2C_MAX_FAST_MODE_PLUS_FREQ, 1, 2, 8, 18 },
+ {}
};
/* source_clock = 32 MHz */
static const struct geni_i2c_clk_fld geni_i2c_clk_map_32mhz[] = {
- {KHZ(100), 8, 14, 18, 40},
- {KHZ(400), 4, 3, 11, 20},
- {KHZ(1000), 2, 3, 6, 15},
- {},
+ { I2C_MAX_STANDARD_MODE_FREQ, 8, 14, 18, 40 },
+ { I2C_MAX_FAST_MODE_FREQ, 4, 3, 11, 20 },
+ { I2C_MAX_FAST_MODE_PLUS_FREQ, 2, 3, 6, 15 },
+ {}
};
static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c)
@@ -812,7 +811,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
&gi2c->clk_freq_out);
if (ret) {
dev_info(dev, "Bus frequency not specified, default to 100kHz.\n");
- gi2c->clk_freq_out = KHZ(100);
+ gi2c->clk_freq_out = I2C_MAX_STANDARD_MODE_FREQ;
}
if (has_acpi_companion(dev))
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index d7dddd6c296a..23375f7fe3ad 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -52,6 +52,8 @@
#define ICCR1_ICE BIT(7)
#define ICCR1_IICRST BIT(6)
#define ICCR1_SOWP BIT(4)
+#define ICCR1_SCLO BIT(3)
+#define ICCR1_SDAO BIT(2)
#define ICCR1_SCLI BIT(1)
#define ICCR1_SDAI BIT(0)
@@ -151,11 +153,11 @@ static int riic_bus_barrier(struct riic_dev *riic)
ret = readb_poll_timeout(riic->base + riic->info->regs[RIIC_ICCR2], val,
!(val & ICCR2_BBSY), 10, riic->adapter.timeout);
if (ret)
- return ret;
+ return i2c_recover_bus(&riic->adapter);
if ((riic_readb(riic, RIIC_ICCR1) & (ICCR1_SDAI | ICCR1_SCLI)) !=
(ICCR1_SDAI | ICCR1_SCLI))
- return -EBUSY;
+ return i2c_recover_bus(&riic->adapter);
return 0;
}
@@ -439,6 +441,52 @@ static int riic_init_hw(struct riic_dev *riic)
return 0;
}
+static int riic_get_scl(struct i2c_adapter *adap)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ return !!(riic_readb(riic, RIIC_ICCR1) & ICCR1_SCLI);
+}
+
+static int riic_get_sda(struct i2c_adapter *adap)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ return !!(riic_readb(riic, RIIC_ICCR1) & ICCR1_SDAI);
+}
+
+static void riic_set_scl(struct i2c_adapter *adap, int val)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ if (val)
+ riic_clear_set_bit(riic, ICCR1_SOWP, ICCR1_SCLO, RIIC_ICCR1);
+ else
+ riic_clear_set_bit(riic, ICCR1_SOWP | ICCR1_SCLO, 0, RIIC_ICCR1);
+
+ riic_clear_set_bit(riic, 0, ICCR1_SOWP, RIIC_ICCR1);
+}
+
+static void riic_set_sda(struct i2c_adapter *adap, int val)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ if (val)
+ riic_clear_set_bit(riic, ICCR1_SOWP, ICCR1_SDAO, RIIC_ICCR1);
+ else
+ riic_clear_set_bit(riic, ICCR1_SOWP | ICCR1_SDAO, 0, RIIC_ICCR1);
+
+ riic_clear_set_bit(riic, 0, ICCR1_SOWP, RIIC_ICCR1);
+}
+
+static struct i2c_bus_recovery_info riic_bri = {
+ .recover_bus = i2c_generic_scl_recovery,
+ .get_scl = riic_get_scl,
+ .set_scl = riic_set_scl,
+ .get_sda = riic_get_sda,
+ .set_sda = riic_set_sda,
+};
+
static const struct riic_irq_desc riic_irqs[] = {
{ .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
{ .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
@@ -495,6 +543,7 @@ static int riic_i2c_probe(struct platform_device *pdev)
adap->algo = &riic_algo;
adap->dev.parent = dev;
adap->dev.of_node = dev->of_node;
+ adap->bus_recovery_info = &riic_bri;
init_completion(&riic->msg_done);
diff --git a/drivers/i2c/busses/i2c-rzv2m.c b/drivers/i2c/busses/i2c-rzv2m.c
index 53762cc56d28..b0e9c0b62429 100644
--- a/drivers/i2c/busses/i2c-rzv2m.c
+++ b/drivers/i2c/busses/i2c-rzv2m.c
@@ -402,7 +402,7 @@ static const struct i2c_adapter_quirks rzv2m_i2c_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
};
-static struct i2c_algorithm rzv2m_i2c_algo = {
+static const struct i2c_algorithm rzv2m_i2c_algo = {
.xfer = rzv2m_i2c_xfer,
.functionality = rzv2m_i2c_func,
};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index efe29621b8d7..adfcee6c9fdc 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
/* Transmit operation: */
/* */
@@ -409,7 +410,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
pd->sr |= sr; /* remember state */
dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr,
- (pd->msg->flags & I2C_M_RD) ? "read" : "write",
+ str_read_write(pd->msg->flags & I2C_M_RD),
pd->pos, pd->msg->len);
/* Kick off TxDMA after preface was done */
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 87976e99e6d0..049b4d154c23 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1395,6 +1395,11 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], MSG_END_CONTINUE);
if (ret)
break;
+
+ /* Validate message length before proceeding */
+ if (msgs[i].buf[0] == 0 || msgs[i].buf[0] > I2C_SMBUS_BLOCK_MAX)
+ break;
+
/* Set the msg length from first byte */
msgs[i].len += msgs[i].buf[0];
dev_dbg(i2c_dev->dev, "reading %d bytes\n", msgs[i].len);
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index 143d012fa43e..3959f23fc440 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -168,6 +168,9 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
i2c->roff.twsi_int = 0x1010;
i2c->roff.sw_twsi_ext = 0x1018;
i2c->roff.mode = 0x1038;
+ i2c->roff.block_ctl = 0x1048;
+ i2c->roff.block_sts = 0x1050;
+ i2c->roff.block_fifo = 0x1058;
i2c->dev = dev;
pci_set_drvdata(pdev, i2c);
@@ -175,7 +178,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pcim_request_all_regions(pdev, DRV_NAME);
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 0f2ed181b266..a18eab0992a1 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
/* include interfaces to usb layer */
@@ -71,7 +72,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
dev_dbg(&adapter->dev,
" %d: %s (flags %d) %d bytes to 0x%02x\n",
- i, pmsg->flags & I2C_M_RD ? "read" : "write",
+ i, str_read_write(pmsg->flags & I2C_M_RD),
pmsg->flags, pmsg->len, pmsg->addr);
/* and directly send the message */
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index d877f5a1f579..ca0358e8f928 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -532,22 +532,16 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) {
- dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
- return -EINVAL;
- }
+ if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ)
+ return dev_err_probe(dev, -EINVAL, "invalid clock-frequency %d\n", bus_speed);
priv->clk = devm_clk_get_enabled(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to enable clock\n");
- return PTR_ERR(priv->clk);
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to enable clock\n");
clk_rate = clk_get_rate(priv->clk);
- if (!clk_rate) {
- dev_err(dev, "input clock rate should not be zero\n");
- return -EINVAL;
- }
+ if (!clk_rate)
+ return dev_err_probe(dev, -EINVAL, "input clock rate should not be zero\n");
priv->clk_cycle = clk_rate / bus_speed;
init_completion(&priv->comp);
@@ -565,10 +559,8 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, uniphier_fi2c_interrupt, 0,
pdev->name, priv);
- if (ret) {
- dev_err(dev, "failed to request irq %d\n", irq);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq %d\n", irq);
return i2c_add_adapter(&priv->adap);
}
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index b95d50d4d7db..9d49a3d5d612 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -327,22 +327,16 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) {
- dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
- return -EINVAL;
- }
+ if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ)
+ return dev_err_probe(dev, -EINVAL, "invalid clock-frequency %d\n", bus_speed);
priv->clk = devm_clk_get_enabled(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to enable clock\n");
- return PTR_ERR(priv->clk);
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to enable clock\n");
clk_rate = clk_get_rate(priv->clk);
- if (!clk_rate) {
- dev_err(dev, "input clock rate should not be zero\n");
- return -EINVAL;
- }
+ if (!clk_rate)
+ return dev_err_probe(dev, -EINVAL, "input clock rate should not be zero\n");
priv->clk_cycle = clk_rate / bus_speed;
init_completion(&priv->comp);
@@ -359,10 +353,8 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, uniphier_i2c_interrupt, 0, pdev->name,
priv);
- if (ret) {
- dev_err(dev, "failed to request irq %d\n", irq);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq %d\n", irq);
return i2c_add_adapter(&priv->adap);
}
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 7ed29992a97f..2c26a57883f2 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -89,10 +89,9 @@ static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
u8 rev;
int res;
- if (pm_io_base) {
- dev_err(&dev->dev, "i2c-via: Will only support one host\n");
- return -ENODEV;
- }
+ if (pm_io_base)
+ return dev_err_probe(&dev->dev, -ENODEV,
+ "Will only support one host\n");
pci_read_config_byte(dev, PM_CFG_REVID, &rev);
@@ -113,10 +112,10 @@ static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
pci_read_config_word(dev, base, &pm_io_base);
pm_io_base &= (0xff << 8);
- if (!request_region(I2C_DIR, IOSPACE, vt586b_driver.name)) {
- dev_err(&dev->dev, "IO 0x%x-0x%x already in use\n", I2C_DIR, I2C_DIR + IOSPACE);
- return -ENODEV;
- }
+ if (!request_region(I2C_DIR, IOSPACE, vt586b_driver.name))
+ return dev_err_probe(&dev->dev, -ENODEV,
+ "IO 0x%x-0x%x already in use\n",
+ I2C_DIR, I2C_DIR + IOSPACE);
outb(inb(I2C_DIR) & ~(I2C_SDA | I2C_SCL), I2C_DIR);
outb(inb(I2C_OUT) & ~(I2C_SDA | I2C_SCL), I2C_OUT);
diff --git a/drivers/i2c/busses/i2c-viai2c-wmt.c b/drivers/i2c/busses/i2c-viai2c-wmt.c
index 4eb740faf268..2cf3cc0165fb 100644
--- a/drivers/i2c/busses/i2c-viai2c-wmt.c
+++ b/drivers/i2c/busses/i2c-viai2c-wmt.c
@@ -44,16 +44,13 @@ static int wmt_i2c_reset_hardware(struct viai2c *i2c)
int err;
err = clk_prepare_enable(i2c->clk);
- if (err) {
- dev_err(i2c->dev, "failed to enable clock\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(i2c->dev, err, "failed to enable clock\n");
err = clk_set_rate(i2c->clk, 20000000);
if (err) {
- dev_err(i2c->dev, "failed to set clock = 20Mhz\n");
clk_disable_unprepare(i2c->clk);
- return err;
+ return dev_err_probe(i2c->dev, err, "failed to set clock = 20Mhz\n");
}
writew(0, i2c->base + VIAI2C_REG_CR);
@@ -121,10 +118,9 @@ static int wmt_i2c_probe(struct platform_device *pdev)
"failed to request irq %i\n", i2c->irq);
i2c->clk = of_clk_get(np, 0);
- if (IS_ERR(i2c->clk)) {
- dev_err(&pdev->dev, "unable to request clock\n");
- return PTR_ERR(i2c->clk);
- }
+ if (IS_ERR(i2c->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk),
+ "unable to request clock\n");
err = of_property_read_u32(np, "clock-frequency", &clk_rate);
if (!err && clk_rate == I2C_MAX_FAST_MODE_FREQ)
@@ -139,10 +135,8 @@ static int wmt_i2c_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
err = wmt_i2c_reset_hardware(i2c);
- if (err) {
- dev_err(&pdev->dev, "error initializing hardware\n");
+ if (err)
return err;
- }
err = i2c_add_adapter(adap);
if (err)
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 2cc7bba3b8bf..c58843609107 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -330,30 +330,27 @@ static int vt596_probe(struct pci_dev *pdev,
SMBHSTCFG = 0x84;
} else {
/* no matches at all */
- dev_err(&pdev->dev, "Cannot configure "
- "SMBus I/O Base address\n");
- return -ENODEV;
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Cannot configure "
+ "SMBus I/O Base address\n");
}
}
vt596_smba &= 0xfff0;
- if (vt596_smba == 0) {
- dev_err(&pdev->dev, "SMBus base address "
- "uninitialized - upgrade BIOS or use "
- "force_addr=0xaddr\n");
- return -ENODEV;
- }
+ if (vt596_smba == 0)
+ return dev_err_probe(&pdev->dev, -ENODEV, "SMBus base address "
+ "uninitialized - upgrade BIOS or use "
+ "force_addr=0xaddr\n");
found:
error = acpi_check_region(vt596_smba, 8, vt596_driver.name);
if (error)
return -ENODEV;
- if (!request_region(vt596_smba, 8, vt596_driver.name)) {
- dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n",
- vt596_smba);
- return -ENODEV;
- }
+ if (!request_region(vt596_smba, 8, vt596_driver.name))
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "SMBus region 0x%x already in use!\n",
+ vt596_smba);
pci_read_config_byte(pdev, SMBHSTCFG, &temp);
/* If force_addr is set, we program the new address here. Just to make
@@ -375,10 +372,10 @@ found:
pci_write_config_byte(pdev, SMBHSTCFG, temp | 0x01);
dev_info(&pdev->dev, "Enabling SMBus device\n");
} else {
- dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
- "controller not enabled! - upgrade BIOS or "
- "use force=1\n");
- error = -ENODEV;
+ error = dev_err_probe(&pdev->dev, -ENODEV,
+ "SMBUS: Error: Host SMBus "
+ "controller not enabled! - "
+ "upgrade BIOS or use force=1\n");
goto release_region;
}
}
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 503e2f4d6f84..1bd602852e35 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
@@ -278,7 +279,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
dev_dbg(&i2c->dev,
" %d: %s (flags %d) %d bytes to 0x%02x\n",
- i, pmsg->flags & I2C_M_RD ? "read" : "write",
+ i, str_read_write(pmsg->flags & I2C_M_RD),
pmsg->flags, pmsg->len, pmsg->addr);
mutex_lock(&vb->lock);
@@ -384,15 +385,13 @@ static int vprbrd_i2c_probe(struct platform_device *pdev)
VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT,
0x0000, 0x0000, &vb_i2c->bus_freq_param, 1,
VPRBRD_USB_TIMEOUT_MS);
- if (ret != 1) {
- dev_err(&pdev->dev, "failure setting i2c_bus_freq to %d\n",
- i2c_bus_freq);
- return -EIO;
- }
+ if (ret != 1)
+ return dev_err_probe(&pdev->dev, -EIO,
+ "failure setting i2c_bus_freq to %d\n",
+ i2c_bus_freq);
} else {
- dev_err(&pdev->dev,
- "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
- return -EIO;
+ return dev_err_probe(&pdev->dev, -EIO,
+ "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
}
vb_i2c->i2c.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 2a351f961b89..9b05ff53d3d7 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -192,10 +192,9 @@ static int virtio_i2c_probe(struct virtio_device *vdev)
struct virtio_i2c *vi;
int ret;
- if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST)) {
- dev_err(&vdev->dev, "Zero-length request feature is mandatory\n");
- return -EINVAL;
- }
+ if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST))
+ return dev_err_probe(&vdev->dev, -EINVAL,
+ "Zero-length request feature is mandatory\n");
vi = devm_kzalloc(&vdev->dev, sizeof(*vi), GFP_KERNEL);
if (!vi)
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 663fe5604dd6..b29dec66b2c3 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -101,8 +101,6 @@ struct slimpro_i2c_dev {
struct completion rd_complete;
u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
u32 *resp_msg;
- phys_addr_t comm_base_addr;
- void *pcc_comm_addr;
};
#define to_slimpro_i2c_dev(cl) \
@@ -148,7 +146,8 @@ static void slimpro_i2c_rx_cb(struct mbox_client *cl, void *mssg)
static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg)
{
struct slimpro_i2c_dev *ctx = to_slimpro_i2c_dev(cl);
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
/* Check if platform sends interrupt */
if (!xgene_word_tst_and_clr(&generic_comm_base->status,
@@ -169,7 +168,8 @@ static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg)
static void slimpro_i2c_pcc_tx_prepare(struct slimpro_i2c_dev *ctx, u32 *msg)
{
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
u32 *ptr = (void *)(generic_comm_base + 1);
u16 status;
int i;
@@ -457,22 +457,18 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
cl->tx_block = true;
cl->rx_callback = slimpro_i2c_rx_cb;
ctx->mbox_chan = mbox_request_channel(cl, MAILBOX_I2C_INDEX);
- if (IS_ERR(ctx->mbox_chan)) {
- dev_err(&pdev->dev, "i2c mailbox channel request failed\n");
- return PTR_ERR(ctx->mbox_chan);
- }
+ if (IS_ERR(ctx->mbox_chan))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ctx->mbox_chan),
+ "i2c mailbox channel request failed\n");
} else {
struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
- int version = XGENE_SLIMPRO_I2C_V1;
acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
&pdev->dev);
if (!acpi_id)
return -EINVAL;
- version = (int)acpi_id->driver_data;
-
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx))
ctx->mbox_idx = MAILBOX_I2C_INDEX;
@@ -480,48 +476,19 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
cl->tx_block = false;
cl->rx_callback = slimpro_i2c_pcc_rx_cb;
pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
- if (IS_ERR(pcc_chan)) {
- dev_err(&pdev->dev, "PCC mailbox channel request failed\n");
- return PTR_ERR(pcc_chan);
- }
+ if (IS_ERR(pcc_chan))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pcc_chan),
+ "PCC mailbox channel request failed\n");
ctx->pcc_chan = pcc_chan;
ctx->mbox_chan = pcc_chan->mchan;
if (!ctx->mbox_chan->mbox->txdone_irq) {
- dev_err(&pdev->dev, "PCC IRQ not supported\n");
- rc = -ENOENT;
+ rc = dev_err_probe(&pdev->dev, -ENOENT,
+ "PCC IRQ not supported\n");
goto mbox_err;
}
- /*
- * This is the shared communication region
- * for the OS and Platform to communicate over.
- */
- ctx->comm_base_addr = pcc_chan->shmem_base_addr;
- if (ctx->comm_base_addr) {
- if (version == XGENE_SLIMPRO_I2C_V2)
- ctx->pcc_comm_addr = memremap(
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WT);
- else
- ctx->pcc_comm_addr = memremap(
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WB);
- } else {
- dev_err(&pdev->dev, "Failed to get PCC comm region\n");
- rc = -ENOENT;
- goto mbox_err;
- }
-
- if (!ctx->pcc_comm_addr) {
- dev_err(&pdev->dev,
- "Failed to ioremap PCC comm region\n");
- rc = -ENOMEM;
- goto mbox_err;
- }
}
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index dc1e46d834dc..6bc1575cea6c 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -1489,7 +1489,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
pdev->name, i2c);
if (ret < 0) {
- dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ dev_err_probe(&pdev->dev, ret, "Cannot claim IRQ\n");
goto err_pm_disable;
}
@@ -1510,7 +1510,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
ret = xiic_reinit(i2c);
if (ret < 0) {
- dev_err(&pdev->dev, "Cannot xiic_reinit\n");
+ dev_err_probe(&pdev->dev, ret, "Cannot xiic_reinit\n");
goto err_pm_disable;
}
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 4d6abd7e92ce..06cf221557f2 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -500,10 +500,8 @@ static int scx200_probe(struct platform_device *pdev)
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!res) {
- dev_err(&pdev->dev, "can't fetch device resource info\n");
- return -ENODEV;
- }
+ if (!res)
+ return dev_err_probe(&pdev->dev, -ENODEV, "can't fetch device resource info\n");
iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev);
if (!iface)
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
index 8fe9ddff8e96..be7d6d41e0b2 100644
--- a/drivers/i2c/i2c-atr.c
+++ b/drivers/i2c/i2c-atr.c
@@ -8,40 +8,73 @@
* Originally based on i2c-mux.c
*/
-#include <linux/fwnode.h>
#include <linux/i2c-atr.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/lockdep.h>
#define ATR_MAX_ADAPTERS 100 /* Just a sanity limit */
#define ATR_MAX_SYMLINK_LEN 11 /* Longest name is 10 chars: "channel-99" */
/**
- * struct i2c_atr_alias_pair - Holds the alias assigned to a client.
+ * struct i2c_atr_alias_pair - Holds the alias assigned to a client address.
* @node: List node
- * @client: Pointer to the client on the child bus
+ * @addr: Address of the client on the child bus.
* @alias: I2C alias address assigned by the driver.
* This is the address that will be used to issue I2C transactions
* on the parent (physical) bus.
+ * @fixed: Alias pair cannot be replaced during dynamic address attachment.
+ * This flag is necessary for situations where a single I2C transaction
+ * contains more distinct target addresses than the ATR channel can handle.
+ * It marks addresses that have already been attached to an alias so
+ * that their alias pair is not evicted by a subsequent address in the same
+ * transaction.
+ *
*/
struct i2c_atr_alias_pair {
struct list_head node;
- const struct i2c_client *client;
+ bool fixed;
+ u16 addr;
u16 alias;
};
/**
+ * struct i2c_atr_alias_pool - Pool of client aliases available for an ATR.
+ * @size: Total number of aliases
+ * @shared: Indicates if this alias pool is shared by multiple channels
+ *
+ * @lock: Lock protecting @aliases and @use_mask
+ * @aliases: Array of aliases, must hold exactly @size elements
+ * @use_mask: Mask of used aliases
+ */
+struct i2c_atr_alias_pool {
+ size_t size;
+ bool shared;
+
+ /* Protects aliases and use_mask */
+ spinlock_t lock;
+ u16 *aliases;
+ unsigned long *use_mask;
+};
+
+/**
* struct i2c_atr_chan - Data for a channel.
* @adap: The &struct i2c_adapter for the channel
* @atr: The parent I2C ATR
* @chan_id: The ID of this channel
- * @alias_list: List of @struct i2c_atr_alias_pair containing the
+ * @alias_pairs_lock: Mutex protecting @alias_pairs
+ * @alias_pairs_lock_key: Lock key for @alias_pairs_lock
+ * @alias_pairs: List of @struct i2c_atr_alias_pair containing the
* assigned aliases
+ * @alias_pool: Pool of available client aliases
+ *
* @orig_addrs_lock: Mutex protecting @orig_addrs
+ * @orig_addrs_lock_key: Lock key for @orig_addrs_lock
* @orig_addrs: Buffer used to store the original addresses during transmit
* @orig_addrs_size: Size of @orig_addrs
*/
@@ -50,10 +83,15 @@ struct i2c_atr_chan {
struct i2c_atr *atr;
u32 chan_id;
- struct list_head alias_list;
+ /* Lock alias_pairs during attach/detach */
+ struct mutex alias_pairs_lock;
+ struct lock_class_key alias_pairs_lock_key;
+ struct list_head alias_pairs;
+ struct i2c_atr_alias_pool *alias_pool;
/* Lock orig_addrs during xfer */
struct mutex orig_addrs_lock;
+ struct lock_class_key orig_addrs_lock_key;
u16 *orig_addrs;
unsigned int orig_addrs_size;
};
@@ -66,11 +104,10 @@ struct i2c_atr_chan {
* @priv: Private driver data, set with i2c_atr_set_driver_data()
* @algo: The &struct i2c_algorithm for adapters
* @lock: Lock for the I2C bus segment (see &struct i2c_lock_operations)
+ * @lock_key: Lock key for @lock
* @max_adapters: Maximum number of adapters this I2C ATR can have
- * @num_aliases: Number of aliases in the aliases array
- * @aliases: The aliases array
- * @alias_mask_lock: Lock protecting alias_use_mask
- * @alias_use_mask: Bitmask for used aliases in aliases array
+ * @flags: Flags for ATR
+ * @alias_pool: Optional common pool of available client aliases
* @i2c_nb: Notifier for remote client add & del events
* @adapter: Array of adapters
*/
@@ -84,27 +121,135 @@ struct i2c_atr {
struct i2c_algorithm algo;
/* lock for the I2C bus segment (see struct i2c_lock_operations) */
struct mutex lock;
+ struct lock_class_key lock_key;
int max_adapters;
+ u32 flags;
- size_t num_aliases;
- const u16 *aliases;
- /* Protects alias_use_mask */
- spinlock_t alias_mask_lock;
- unsigned long *alias_use_mask;
+ struct i2c_atr_alias_pool *alias_pool;
struct notifier_block i2c_nb;
struct i2c_adapter *adapter[] __counted_by(max_adapters);
};
+static struct i2c_atr_alias_pool *i2c_atr_alloc_alias_pool(size_t num_aliases, bool shared)
+{
+ struct i2c_atr_alias_pool *alias_pool;
+ int ret;
+
+ alias_pool = kzalloc(sizeof(*alias_pool), GFP_KERNEL);
+ if (!alias_pool)
+ return ERR_PTR(-ENOMEM);
+
+ alias_pool->size = num_aliases;
+
+ alias_pool->aliases = kcalloc(num_aliases, sizeof(*alias_pool->aliases), GFP_KERNEL);
+ if (!alias_pool->aliases) {
+ ret = -ENOMEM;
+ goto err_free_alias_pool;
+ }
+
+ alias_pool->use_mask = bitmap_zalloc(num_aliases, GFP_KERNEL);
+ if (!alias_pool->use_mask) {
+ ret = -ENOMEM;
+ goto err_free_aliases;
+ }
+
+ alias_pool->shared = shared;
+
+ spin_lock_init(&alias_pool->lock);
+
+ return alias_pool;
+
+err_free_aliases:
+ kfree(alias_pool->aliases);
+err_free_alias_pool:
+ kfree(alias_pool);
+ return ERR_PTR(ret);
+}
+
+static void i2c_atr_free_alias_pool(struct i2c_atr_alias_pool *alias_pool)
+{
+ bitmap_free(alias_pool->use_mask);
+ kfree(alias_pool->aliases);
+ kfree(alias_pool);
+}
+
+/* Must be called with alias_pairs_lock held */
+static struct i2c_atr_alias_pair *i2c_atr_create_c2a(struct i2c_atr_chan *chan,
+ u16 alias, u16 addr)
+{
+ struct i2c_atr_alias_pair *c2a;
+
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ c2a = kzalloc(sizeof(*c2a), GFP_KERNEL);
+ if (!c2a)
+ return NULL;
+
+ c2a->addr = addr;
+ c2a->alias = alias;
+
+ list_add(&c2a->node, &chan->alias_pairs);
+
+ return c2a;
+}
+
+/* Must be called with alias_pairs_lock held */
+static void i2c_atr_destroy_c2a(struct i2c_atr_alias_pair **pc2a)
+{
+ list_del(&(*pc2a)->node);
+ kfree(*pc2a);
+ *pc2a = NULL;
+}
+
+static int i2c_atr_reserve_alias(struct i2c_atr_alias_pool *alias_pool)
+{
+ unsigned long idx;
+ u16 alias;
+
+ spin_lock(&alias_pool->lock);
+
+ idx = find_first_zero_bit(alias_pool->use_mask, alias_pool->size);
+ if (idx >= alias_pool->size) {
+ spin_unlock(&alias_pool->lock);
+ return -EBUSY;
+ }
+
+ set_bit(idx, alias_pool->use_mask);
+
+ alias = alias_pool->aliases[idx];
+
+ spin_unlock(&alias_pool->lock);
+ return alias;
+}
+
+static void i2c_atr_release_alias(struct i2c_atr_alias_pool *alias_pool, u16 alias)
+{
+ unsigned int idx;
+
+ spin_lock(&alias_pool->lock);
+
+ for (idx = 0; idx < alias_pool->size; ++idx) {
+ if (alias_pool->aliases[idx] == alias) {
+ clear_bit(idx, alias_pool->use_mask);
+ spin_unlock(&alias_pool->lock);
+ return;
+ }
+ }
+
+ spin_unlock(&alias_pool->lock);
+}
+
static struct i2c_atr_alias_pair *
-i2c_atr_find_mapping_by_client(const struct list_head *list,
- const struct i2c_client *client)
+i2c_atr_find_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
{
struct i2c_atr_alias_pair *c2a;
- list_for_each_entry(c2a, list, node) {
- if (c2a->client == client)
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ list_for_each_entry(c2a, &chan->alias_pairs, node) {
+ if (c2a->addr == addr)
return c2a;
}
@@ -112,18 +257,107 @@ i2c_atr_find_mapping_by_client(const struct list_head *list,
}
static struct i2c_atr_alias_pair *
-i2c_atr_find_mapping_by_addr(const struct list_head *list, u16 phys_addr)
+i2c_atr_create_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
{
+ struct i2c_atr *atr = chan->atr;
struct i2c_atr_alias_pair *c2a;
+ u16 alias;
+ int ret;
- list_for_each_entry(c2a, list, node) {
- if (c2a->client->addr == phys_addr)
- return c2a;
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ ret = i2c_atr_reserve_alias(chan->alias_pool);
+ if (ret < 0)
+ return NULL;
+
+ alias = ret;
+
+ c2a = i2c_atr_create_c2a(chan, alias, addr);
+ if (!c2a)
+ goto err_release_alias;
+
+ ret = atr->ops->attach_addr(atr, chan->chan_id, c2a->addr, c2a->alias);
+ if (ret) {
+ dev_err(atr->dev, "failed to attach 0x%02x on channel %d: err %d\n",
+ addr, chan->chan_id, ret);
+ goto err_del_c2a;
}
+ return c2a;
+
+err_del_c2a:
+ i2c_atr_destroy_c2a(&c2a);
+err_release_alias:
+ i2c_atr_release_alias(chan->alias_pool, alias);
return NULL;
}
+static struct i2c_atr_alias_pair *
+i2c_atr_replace_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
+{
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_atr_alias_pair *c2a;
+ struct list_head *alias_pairs;
+ bool found = false;
+ u16 alias;
+ int ret;
+
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ alias_pairs = &chan->alias_pairs;
+
+ if (unlikely(list_empty(alias_pairs)))
+ return NULL;
+
+ list_for_each_entry_reverse(c2a, alias_pairs, node) {
+ if (!c2a->fixed) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return NULL;
+
+ atr->ops->detach_addr(atr, chan->chan_id, c2a->addr);
+ c2a->addr = addr;
+
+ list_move(&c2a->node, alias_pairs);
+
+ alias = c2a->alias;
+
+ ret = atr->ops->attach_addr(atr, chan->chan_id, c2a->addr, c2a->alias);
+ if (ret) {
+ dev_err(atr->dev, "failed to attach 0x%02x on channel %d: err %d\n",
+ addr, chan->chan_id, ret);
+ i2c_atr_destroy_c2a(&c2a);
+ i2c_atr_release_alias(chan->alias_pool, alias);
+ return NULL;
+ }
+
+ return c2a;
+}
+
+static struct i2c_atr_alias_pair *
+i2c_atr_get_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
+{
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_atr_alias_pair *c2a;
+
+ c2a = i2c_atr_find_mapping_by_addr(chan, addr);
+ if (c2a)
+ return c2a;
+
+ if (atr->flags & I2C_ATR_F_STATIC)
+ return NULL;
+
+ c2a = i2c_atr_create_mapping_by_addr(chan, addr);
+ if (c2a)
+ return c2a;
+
+ return i2c_atr_replace_mapping_by_addr(chan, addr);
+}
+
/*
* Replace all message addresses with their aliases, saving the original
* addresses.
@@ -136,7 +370,7 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
{
struct i2c_atr *atr = chan->atr;
static struct i2c_atr_alias_pair *c2a;
- int i;
+ int i, ret = 0;
/* Ensure we have enough room to save the original addresses */
if (unlikely(chan->orig_addrs_size < num)) {
@@ -152,25 +386,36 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
chan->orig_addrs_size = num;
}
+ mutex_lock(&chan->alias_pairs_lock);
+
for (i = 0; i < num; i++) {
chan->orig_addrs[i] = msgs[i].addr;
- c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list,
- msgs[i].addr);
+ c2a = i2c_atr_get_mapping_by_addr(chan, msgs[i].addr);
+
if (!c2a) {
+ if (atr->flags & I2C_ATR_F_PASSTHROUGH)
+ continue;
+
dev_err(atr->dev, "client 0x%02x not mapped!\n",
msgs[i].addr);
while (i--)
msgs[i].addr = chan->orig_addrs[i];
- return -ENXIO;
+ ret = -ENXIO;
+ goto out_unlock;
}
+ // Prevent c2a from being overwritten by another client in this transaction
+ c2a->fixed = true;
+
msgs[i].addr = c2a->alias;
}
- return 0;
+out_unlock:
+ mutex_unlock(&chan->alias_pairs_lock);
+ return ret;
}
/*
@@ -183,10 +428,24 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
static void i2c_atr_unmap_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
int num)
{
+ struct i2c_atr_alias_pair *c2a;
int i;
for (i = 0; i < num; i++)
msgs[i].addr = chan->orig_addrs[i];
+
+ mutex_lock(&chan->alias_pairs_lock);
+
+ if (unlikely(list_empty(&chan->alias_pairs)))
+ goto out_unlock;
+
+ // unfix c2a entries so that subsequent transfers can reuse their aliases
+ list_for_each_entry(c2a, &chan->alias_pairs, node) {
+ c2a->fixed = false;
+ }
+
+out_unlock:
+ mutex_unlock(&chan->alias_pairs_lock);
}
static int i2c_atr_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
@@ -224,14 +483,23 @@ static int i2c_atr_smbus_xfer(struct i2c_adapter *adap, u16 addr,
struct i2c_atr *atr = chan->atr;
struct i2c_adapter *parent = atr->parent;
struct i2c_atr_alias_pair *c2a;
+ u16 alias;
- c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list, addr);
- if (!c2a) {
+ mutex_lock(&chan->alias_pairs_lock);
+
+ c2a = i2c_atr_get_mapping_by_addr(chan, addr);
+
+ if (!c2a && !(atr->flags & I2C_ATR_F_PASSTHROUGH)) {
dev_err(atr->dev, "client 0x%02x not mapped!\n", addr);
+ mutex_unlock(&chan->alias_pairs_lock);
return -ENXIO;
}
- return i2c_smbus_xfer(parent, c2a->alias, flags, read_write, command,
+ alias = c2a ? c2a->alias : addr;
+
+ mutex_unlock(&chan->alias_pairs_lock);
+
+ return i2c_smbus_xfer(parent, alias, flags, read_write, command,
size, data);
}
@@ -273,112 +541,60 @@ static const struct i2c_lock_operations i2c_atr_lock_ops = {
.unlock_bus = i2c_atr_unlock_bus,
};
-static int i2c_atr_reserve_alias(struct i2c_atr *atr)
-{
- unsigned long idx;
-
- spin_lock(&atr->alias_mask_lock);
-
- idx = find_first_zero_bit(atr->alias_use_mask, atr->num_aliases);
- if (idx >= atr->num_aliases) {
- spin_unlock(&atr->alias_mask_lock);
- dev_err(atr->dev, "failed to find a free alias\n");
- return -EBUSY;
- }
-
- set_bit(idx, atr->alias_use_mask);
-
- spin_unlock(&atr->alias_mask_lock);
-
- return atr->aliases[idx];
-}
-
-static void i2c_atr_release_alias(struct i2c_atr *atr, u16 alias)
-{
- unsigned int idx;
-
- spin_lock(&atr->alias_mask_lock);
-
- for (idx = 0; idx < atr->num_aliases; ++idx) {
- if (atr->aliases[idx] == alias) {
- clear_bit(idx, atr->alias_use_mask);
- spin_unlock(&atr->alias_mask_lock);
- return;
- }
- }
-
- spin_unlock(&atr->alias_mask_lock);
-
- /* This should never happen */
- dev_warn(atr->dev, "Unable to find mapped alias\n");
-}
-
-static int i2c_atr_attach_client(struct i2c_adapter *adapter,
- const struct i2c_client *client)
+static int i2c_atr_attach_addr(struct i2c_adapter *adapter,
+ u16 addr)
{
struct i2c_atr_chan *chan = adapter->algo_data;
struct i2c_atr *atr = chan->atr;
struct i2c_atr_alias_pair *c2a;
- u16 alias;
- int ret;
+ int ret = 0;
- ret = i2c_atr_reserve_alias(atr);
- if (ret < 0)
- return ret;
+ mutex_lock(&chan->alias_pairs_lock);
- alias = ret;
+ c2a = i2c_atr_create_mapping_by_addr(chan, addr);
+ if (!c2a && !(atr->flags & I2C_ATR_F_STATIC))
+ c2a = i2c_atr_replace_mapping_by_addr(chan, addr);
- c2a = kzalloc(sizeof(*c2a), GFP_KERNEL);
if (!c2a) {
- ret = -ENOMEM;
- goto err_release_alias;
+ dev_err(atr->dev, "failed to find a free alias\n");
+ ret = -EBUSY;
+ goto out_unlock;
}
- ret = atr->ops->attach_client(atr, chan->chan_id, client, alias);
- if (ret)
- goto err_free;
-
- dev_dbg(atr->dev, "chan%u: client 0x%02x mapped at alias 0x%02x (%s)\n",
- chan->chan_id, client->addr, alias, client->name);
-
- c2a->client = client;
- c2a->alias = alias;
- list_add(&c2a->node, &chan->alias_list);
-
- return 0;
-
-err_free:
- kfree(c2a);
-err_release_alias:
- i2c_atr_release_alias(atr, alias);
+ dev_dbg(atr->dev, "chan%u: using alias 0x%02x for addr 0x%02x\n",
+ chan->chan_id, c2a->alias, addr);
+out_unlock:
+ mutex_unlock(&chan->alias_pairs_lock);
return ret;
}
-static void i2c_atr_detach_client(struct i2c_adapter *adapter,
- const struct i2c_client *client)
+static void i2c_atr_detach_addr(struct i2c_adapter *adapter,
+ u16 addr)
{
struct i2c_atr_chan *chan = adapter->algo_data;
struct i2c_atr *atr = chan->atr;
struct i2c_atr_alias_pair *c2a;
- atr->ops->detach_client(atr, chan->chan_id, client);
+ atr->ops->detach_addr(atr, chan->chan_id, addr);
+
+ mutex_lock(&chan->alias_pairs_lock);
- c2a = i2c_atr_find_mapping_by_client(&chan->alias_list, client);
+ c2a = i2c_atr_find_mapping_by_addr(chan, addr);
if (!c2a) {
- /* This should never happen */
- dev_warn(atr->dev, "Unable to find address mapping\n");
+ mutex_unlock(&chan->alias_pairs_lock);
return;
}
- i2c_atr_release_alias(atr, c2a->alias);
+ i2c_atr_release_alias(chan->alias_pool, c2a->alias);
dev_dbg(atr->dev,
- "chan%u: client 0x%02x unmapped from alias 0x%02x (%s)\n",
- chan->chan_id, client->addr, c2a->alias, client->name);
+ "chan%u: detached alias 0x%02x from addr 0x%02x\n",
+ chan->chan_id, c2a->alias, addr);
- list_del(&c2a->node);
- kfree(c2a);
+ i2c_atr_destroy_c2a(&c2a);
+
+ mutex_unlock(&chan->alias_pairs_lock);
}
static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
@@ -405,7 +621,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
switch (event) {
case BUS_NOTIFY_ADD_DEVICE:
- ret = i2c_atr_attach_client(client->adapter, client);
+ ret = i2c_atr_attach_addr(client->adapter, client->addr);
if (ret)
dev_err(atr->dev,
"Failed to attach remote client '%s': %d\n",
@@ -413,7 +629,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
break;
case BUS_NOTIFY_REMOVED_DEVICE:
- i2c_atr_detach_client(client->adapter, client);
+ i2c_atr_detach_addr(client->adapter, client->addr);
break;
default:
@@ -425,29 +641,43 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
static int i2c_atr_parse_alias_pool(struct i2c_atr *atr)
{
+ struct i2c_atr_alias_pool *alias_pool;
struct device *dev = atr->dev;
- unsigned long *alias_use_mask;
size_t num_aliases;
unsigned int i;
u32 *aliases32;
- u16 *aliases16;
int ret;
- ret = fwnode_property_count_u32(dev_fwnode(dev), "i2c-alias-pool");
- if (ret < 0) {
- dev_err(dev, "Failed to count 'i2c-alias-pool' property: %d\n",
- ret);
+ if (!fwnode_property_present(dev_fwnode(dev), "i2c-alias-pool")) {
+ num_aliases = 0;
+ } else {
+ ret = fwnode_property_count_u32(dev_fwnode(dev), "i2c-alias-pool");
+ if (ret < 0) {
+ dev_err(dev, "Failed to count 'i2c-alias-pool' property: %d\n",
+ ret);
+ return ret;
+ }
+
+ num_aliases = ret;
+ }
+
+ alias_pool = i2c_atr_alloc_alias_pool(num_aliases, true);
+ if (IS_ERR(alias_pool)) {
+ ret = PTR_ERR(alias_pool);
+ dev_err(dev, "Failed to allocate alias pool, err %d\n", ret);
return ret;
}
- num_aliases = ret;
+ atr->alias_pool = alias_pool;
- if (!num_aliases)
+ if (!alias_pool->size)
return 0;
aliases32 = kcalloc(num_aliases, sizeof(*aliases32), GFP_KERNEL);
- if (!aliases32)
- return -ENOMEM;
+ if (!aliases32) {
+ ret = -ENOMEM;
+ goto err_free_alias_pool;
+ }
ret = fwnode_property_read_u32_array(dev_fwnode(dev), "i2c-alias-pool",
aliases32, num_aliases);
@@ -457,48 +687,33 @@ static int i2c_atr_parse_alias_pool(struct i2c_atr *atr)
goto err_free_aliases32;
}
- aliases16 = kcalloc(num_aliases, sizeof(*aliases16), GFP_KERNEL);
- if (!aliases16) {
- ret = -ENOMEM;
- goto err_free_aliases32;
- }
-
for (i = 0; i < num_aliases; i++) {
if (!(aliases32[i] & 0xffff0000)) {
- aliases16[i] = aliases32[i];
+ alias_pool->aliases[i] = aliases32[i];
continue;
}
dev_err(dev, "Failed to parse 'i2c-alias-pool' property: I2C flags are not supported\n");
ret = -EINVAL;
- goto err_free_aliases16;
- }
-
- alias_use_mask = bitmap_zalloc(num_aliases, GFP_KERNEL);
- if (!alias_use_mask) {
- ret = -ENOMEM;
- goto err_free_aliases16;
+ goto err_free_aliases32;
}
kfree(aliases32);
- atr->num_aliases = num_aliases;
- atr->aliases = aliases16;
- atr->alias_use_mask = alias_use_mask;
-
- dev_dbg(dev, "i2c-alias-pool has %zu aliases", atr->num_aliases);
+ dev_dbg(dev, "i2c-alias-pool has %zu aliases\n", alias_pool->size);
return 0;
-err_free_aliases16:
- kfree(aliases16);
err_free_aliases32:
kfree(aliases32);
+err_free_alias_pool:
+ i2c_atr_free_alias_pool(alias_pool);
return ret;
}
struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
- const struct i2c_atr_ops *ops, int max_adapters)
+ const struct i2c_atr_ops *ops, int max_adapters,
+ u32 flags)
{
struct i2c_atr *atr;
int ret;
@@ -506,20 +721,21 @@ struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
if (max_adapters > ATR_MAX_ADAPTERS)
return ERR_PTR(-EINVAL);
- if (!ops || !ops->attach_client || !ops->detach_client)
+ if (!ops || !ops->attach_addr || !ops->detach_addr)
return ERR_PTR(-EINVAL);
atr = kzalloc(struct_size(atr, adapter, max_adapters), GFP_KERNEL);
if (!atr)
return ERR_PTR(-ENOMEM);
- mutex_init(&atr->lock);
- spin_lock_init(&atr->alias_mask_lock);
+ lockdep_register_key(&atr->lock_key);
+ mutex_init_with_key(&atr->lock, &atr->lock_key);
atr->parent = parent;
atr->dev = dev;
atr->ops = ops;
atr->max_adapters = max_adapters;
+ atr->flags = flags;
if (parent->algo->master_xfer)
atr->algo.master_xfer = i2c_atr_master_xfer;
@@ -534,15 +750,15 @@ struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
atr->i2c_nb.notifier_call = i2c_atr_bus_notifier_call;
ret = bus_register_notifier(&i2c_bus_type, &atr->i2c_nb);
if (ret)
- goto err_free_aliases;
+ goto err_free_alias_pool;
return atr;
-err_free_aliases:
- bitmap_free(atr->alias_use_mask);
- kfree(atr->aliases);
+err_free_alias_pool:
+ i2c_atr_free_alias_pool(atr->alias_pool);
err_destroy_mutex:
mutex_destroy(&atr->lock);
+ lockdep_unregister_key(&atr->lock_key);
kfree(atr);
return ERR_PTR(ret);
@@ -557,22 +773,22 @@ void i2c_atr_delete(struct i2c_atr *atr)
WARN_ON(atr->adapter[i]);
bus_unregister_notifier(&i2c_bus_type, &atr->i2c_nb);
- bitmap_free(atr->alias_use_mask);
- kfree(atr->aliases);
+ i2c_atr_free_alias_pool(atr->alias_pool);
mutex_destroy(&atr->lock);
+ lockdep_unregister_key(&atr->lock_key);
kfree(atr);
}
EXPORT_SYMBOL_NS_GPL(i2c_atr_delete, "I2C_ATR");
-int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
- struct device *adapter_parent,
- struct fwnode_handle *bus_handle)
+int i2c_atr_add_adapter(struct i2c_atr *atr, struct i2c_atr_adap_desc *desc)
{
+ struct fwnode_handle *bus_handle = desc->bus_handle;
struct i2c_adapter *parent = atr->parent;
+ char symlink_name[ATR_MAX_SYMLINK_LEN];
struct device *dev = atr->dev;
+ u32 chan_id = desc->chan_id;
struct i2c_atr_chan *chan;
- char symlink_name[ATR_MAX_SYMLINK_LEN];
- int ret;
+ int ret, idx;
if (chan_id >= atr->max_adapters) {
dev_err(dev, "No room for more i2c-atr adapters\n");
@@ -588,20 +804,23 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
if (!chan)
return -ENOMEM;
- if (!adapter_parent)
- adapter_parent = dev;
+ if (!desc->parent)
+ desc->parent = dev;
chan->atr = atr;
chan->chan_id = chan_id;
- INIT_LIST_HEAD(&chan->alias_list);
- mutex_init(&chan->orig_addrs_lock);
+ INIT_LIST_HEAD(&chan->alias_pairs);
+ lockdep_register_key(&chan->alias_pairs_lock_key);
+ lockdep_register_key(&chan->orig_addrs_lock_key);
+ mutex_init_with_key(&chan->alias_pairs_lock, &chan->alias_pairs_lock_key);
+ mutex_init_with_key(&chan->orig_addrs_lock, &chan->orig_addrs_lock_key);
snprintf(chan->adap.name, sizeof(chan->adap.name), "i2c-%d-atr-%d",
i2c_adapter_id(parent), chan_id);
chan->adap.owner = THIS_MODULE;
chan->adap.algo = &atr->algo;
chan->adap.algo_data = chan;
- chan->adap.dev.parent = adapter_parent;
+ chan->adap.dev.parent = desc->parent;
chan->adap.retries = parent->retries;
chan->adap.timeout = parent->timeout;
chan->adap.quirks = parent->quirks;
@@ -628,13 +847,26 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
fwnode_handle_put(atr_node);
}
+ if (desc->num_aliases > 0) {
+ chan->alias_pool = i2c_atr_alloc_alias_pool(desc->num_aliases, false);
+ if (IS_ERR(chan->alias_pool)) {
+ ret = PTR_ERR(chan->alias_pool);
+ goto err_fwnode_put;
+ }
+
+ for (idx = 0; idx < desc->num_aliases; idx++)
+ chan->alias_pool->aliases[idx] = desc->aliases[idx];
+ } else {
+ chan->alias_pool = atr->alias_pool;
+ }
+
atr->adapter[chan_id] = &chan->adap;
ret = i2c_add_adapter(&chan->adap);
if (ret) {
dev_err(dev, "failed to add atr-adapter %u (error=%d)\n",
chan_id, ret);
- goto err_fwnode_put;
+ goto err_free_alias_pool;
}
snprintf(symlink_name, sizeof(symlink_name), "channel-%u",
@@ -651,9 +883,15 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
return 0;
+err_free_alias_pool:
+ if (!chan->alias_pool->shared)
+ i2c_atr_free_alias_pool(chan->alias_pool);
err_fwnode_put:
fwnode_handle_put(dev_fwnode(&chan->adap.dev));
mutex_destroy(&chan->orig_addrs_lock);
+ mutex_destroy(&chan->alias_pairs_lock);
+ lockdep_unregister_key(&chan->orig_addrs_lock_key);
+ lockdep_unregister_key(&chan->alias_pairs_lock_key);
kfree(chan);
return ret;
}
@@ -683,10 +921,16 @@ void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id)
i2c_del_adapter(adap);
+ if (!chan->alias_pool->shared)
+ i2c_atr_free_alias_pool(chan->alias_pool);
+
atr->adapter[chan_id] = NULL;
fwnode_handle_put(fwnode);
mutex_destroy(&chan->orig_addrs_lock);
+ mutex_destroy(&chan->alias_pairs_lock);
+ lockdep_unregister_key(&chan->orig_addrs_lock_key);
+ lockdep_unregister_key(&chan->alias_pairs_lock_key);
kfree(chan->orig_addrs);
kfree(chan);
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 7ad1ad5c8c3f..2ad2b1838f0f 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -26,14 +26,13 @@
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/irqflags.h>
+#include <linux/irq.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/pm_domain.h>
@@ -42,6 +41,7 @@
#include <linux/property.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "i2c-core.h"
@@ -490,6 +490,7 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
static int i2c_device_probe(struct device *dev)
{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct i2c_client *client = i2c_verify_client(dev);
struct i2c_driver *driver;
bool do_power_on;
@@ -508,11 +509,11 @@ static int i2c_device_probe(struct device *dev)
/* Keep adapter active when Host Notify is required */
pm_runtime_get_sync(&client->adapter->dev);
irq = i2c_smbus_host_notify_to_irq(client);
- } else if (dev->of_node) {
- irq = of_irq_get_byname(dev->of_node, "irq");
+ } else if (is_of_node(fwnode)) {
+ irq = fwnode_irq_get_byname(fwnode, "irq");
if (irq == -EINVAL || irq == -ENODATA)
- irq = of_irq_get(dev->of_node, 0);
- } else if (ACPI_COMPANION(dev)) {
+ irq = fwnode_irq_get(fwnode, 0);
+ } else if (is_acpi_device_node(fwnode)) {
bool wake_capable;
irq = i2c_acpi_get_irq(client, &wake_capable);
@@ -520,7 +521,7 @@ static int i2c_device_probe(struct device *dev)
client->flags |= I2C_CLIENT_WAKE;
}
if (irq == -EPROBE_DEFER) {
- status = irq;
+ status = dev_err_probe(dev, irq, "can't get irq\n");
goto put_sync_adapter;
}
@@ -546,9 +547,9 @@ static int i2c_device_probe(struct device *dev)
if (client->flags & I2C_CLIENT_WAKE) {
int wakeirq;
- wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
+ wakeirq = fwnode_irq_get_byname(fwnode, "wakeup");
if (wakeirq == -EPROBE_DEFER) {
- status = wakeirq;
+ status = dev_err_probe(dev, wakeirq, "can't get wakeirq\n");
goto put_sync_adapter;
}
@@ -567,7 +568,7 @@ static int i2c_device_probe(struct device *dev)
dev_dbg(dev, "probe\n");
- status = of_clk_set_defaults(dev->of_node, false);
+ status = of_clk_set_defaults(to_of_node(fwnode), false);
if (status < 0)
goto err_clear_wakeup_irq;
@@ -961,6 +962,7 @@ static void i2c_unlock_addr(struct i2c_adapter *adap, unsigned short addr,
struct i2c_client *
i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
{
+ struct fwnode_handle *fwnode = info->fwnode;
struct i2c_client *client;
bool need_put = false;
int status;
@@ -1001,18 +1003,18 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
client->dev.parent = &client->adapter->dev;
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
- client->dev.of_node = of_node_get(info->of_node);
- client->dev.fwnode = info->fwnode;
device_enable_async_suspend(&client->dev);
+ device_set_node(&client->dev, fwnode_handle_get(fwnode));
+
if (info->swnode) {
status = device_add_software_node(&client->dev, info->swnode);
if (status) {
dev_err(&adap->dev,
"Failed to add software node to client %s: %d\n",
client->name, status);
- goto out_err_put_of_node;
+ goto out_err_put_fwnode;
}
}
@@ -1031,8 +1033,8 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
out_remove_swnode:
device_remove_software_node(&client->dev);
need_put = true;
-out_err_put_of_node:
- of_node_put(info->of_node);
+out_err_put_fwnode:
+ fwnode_handle_put(fwnode);
out_err:
dev_err(&adap->dev,
"Failed to register i2c client %s at 0x%02x (%d)\n",
@@ -1054,16 +1056,17 @@ EXPORT_SYMBOL_GPL(i2c_new_client_device);
*/
void i2c_unregister_device(struct i2c_client *client)
{
+ struct fwnode_handle *fwnode;
+
if (IS_ERR_OR_NULL(client))
return;
- if (client->dev.of_node) {
- of_node_clear_flag(client->dev.of_node, OF_POPULATED);
- of_node_put(client->dev.of_node);
- }
-
- if (ACPI_COMPANION(&client->dev))
- acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
+ fwnode = dev_fwnode(&client->dev);
+ if (is_of_node(fwnode))
+ of_node_clear_flag(to_of_node(fwnode), OF_POPULATED);
+ else if (is_acpi_device_node(fwnode))
+ acpi_device_clear_enumerated(to_acpi_device_node(fwnode));
+ fwnode_handle_put(fwnode);
device_remove_software_node(&client->dev);
device_unregister(&client->dev);
@@ -1209,11 +1212,9 @@ struct i2c_client *i2c_new_ancillary_device(struct i2c_client *client,
u32 addr = default_addr;
int i;
- if (np) {
- i = of_property_match_string(np, "reg-names", name);
- if (i >= 0)
- of_property_read_u32_index(np, "reg", i, &addr);
- }
+ i = of_property_match_string(np, "reg-names", name);
+ if (i >= 0)
+ of_property_read_u32_index(np, "reg", i, &addr);
dev_dbg(&client->adapter->dev, "Address for %s : 0x%x\n", name, addr);
return i2c_new_dummy_device(client->adapter, addr);
@@ -1651,12 +1652,10 @@ int i2c_add_adapter(struct i2c_adapter *adapter)
struct device *dev = &adapter->dev;
int id;
- if (dev->of_node) {
- id = of_alias_get_id(dev->of_node, "i2c");
- if (id >= 0) {
- adapter->nr = id;
- return __i2c_add_numbered_adapter(adapter);
- }
+ id = of_alias_get_id(dev->of_node, "i2c");
+ if (id >= 0) {
+ adapter->nr = id;
+ return __i2c_add_numbered_adapter(adapter);
}
mutex_lock(&core_lock);
@@ -2146,7 +2145,7 @@ static int i2c_quirk_error(struct i2c_adapter *adap, struct i2c_msg *msg, char *
{
dev_err_ratelimited(&adap->dev, "adapter quirk: %s (addr 0x%04x, size %u, %s)\n",
err_msg, msg->addr, msg->len,
- msg->flags & I2C_M_RD ? "read" : "write");
+ str_read_write(msg->flags & I2C_M_RD));
return -EOPNOTSUPP;
}
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index 02feee6c9ba9..eb7fb202355f 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -49,7 +49,6 @@ int of_i2c_get_board_info(struct device *dev, struct device_node *node,
}
info->addr = addr;
- info->of_node = node;
info->fwnode = of_fwnode_handle(node);
if (of_property_read_bool(node, "host-notify"))
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index faefe1dfa8e5..7ee6b992b835 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/of.h>
+#include <linux/property.h>
#include "i2c-core.h"
@@ -108,15 +109,18 @@ EXPORT_SYMBOL_GPL(i2c_slave_event);
*/
bool i2c_detect_slave_mode(struct device *dev)
{
- if (IS_BUILTIN(CONFIG_OF) && dev->of_node) {
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ if (is_of_node(fwnode)) {
+ struct fwnode_handle *child __free(fwnode_handle) = NULL;
u32 reg;
- for_each_child_of_node_scoped(dev->of_node, child) {
- of_property_read_u32(child, "reg", &reg);
+ fwnode_for_each_child_node(fwnode, child) {
+ fwnode_property_read_u32(child, "reg", &reg);
if (reg & I2C_OWN_SLAVE_ADDRESS)
return true;
}
- } else if (IS_BUILTIN(CONFIG_ACPI) && ACPI_HANDLE(dev)) {
+ } else if (is_acpi_device_node(fwnode)) {
dev_dbg(dev, "ACPI slave is not supported yet\n");
}
return false;
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index e73afbefe222..71eb1ef56f0c 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -16,6 +16,7 @@
#include <linux/i2c-smbus.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "i2c-core.h"
@@ -433,7 +434,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
case I2C_SMBUS_I2C_BLOCK_DATA:
if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
dev_err(&adapter->dev, "Invalid block %s size %d\n",
- read_write == I2C_SMBUS_READ ? "read" : "write",
+ str_read_write(read_write == I2C_SMBUS_READ),
data->block[0]);
return -EINVAL;
}
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 7d40e7aa3799..0316b347f9e7 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -372,12 +372,13 @@ EXPORT_SYMBOL_GPL(i2c_free_slave_host_notify_device);
* - Only works on systems with 1 to 8 memory slots
*/
#if IS_ENABLED(CONFIG_DMI)
-void i2c_register_spd(struct i2c_adapter *adap)
+static void i2c_register_spd(struct i2c_adapter *adap, bool write_disabled)
{
int n, slot_count = 0, dimm_count = 0;
u16 handle;
u8 common_mem_type = 0x0, mem_type;
u64 mem_size;
+ bool instantiate = true;
const char *name;
while ((handle = dmi_memdev_handle(slot_count)) != 0xffff) {
@@ -438,6 +439,7 @@ void i2c_register_spd(struct i2c_adapter *adap)
case 0x22: /* DDR5 */
case 0x23: /* LPDDR5 */
name = "spd5118";
+ instantiate = !write_disabled;
break;
default:
dev_info(&adap->dev,
@@ -461,6 +463,9 @@ void i2c_register_spd(struct i2c_adapter *adap)
addr_list[0] = 0x50 + n;
addr_list[1] = I2C_CLIENT_END;
+ if (!instantiate)
+ continue;
+
if (!IS_ERR(i2c_new_scanned_device(adap, &info, addr_list, NULL))) {
dev_info(&adap->dev,
"Successfully instantiated SPD at 0x%hx\n",
@@ -469,7 +474,19 @@ void i2c_register_spd(struct i2c_adapter *adap)
}
}
}
-EXPORT_SYMBOL_GPL(i2c_register_spd);
+
+void i2c_register_spd_write_disable(struct i2c_adapter *adap)
+{
+ i2c_register_spd(adap, true);
+}
+EXPORT_SYMBOL_GPL(i2c_register_spd_write_disable);
+
+void i2c_register_spd_write_enable(struct i2c_adapter *adap)
+{
+ i2c_register_spd(adap, false);
+}
+EXPORT_SYMBOL_GPL(i2c_register_spd_write_enable);
+
#endif
MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index 8a87f19bf5d5..c688af270a11 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -85,13 +85,13 @@ static int ltc4306_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(1 - offset));
}
-static void ltc4306_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int ltc4306_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ltc4306 *data = gpiochip_get_data(chip);
- regmap_update_bits(data->regmap, LTC_REG_CONFIG, BIT(5 - offset),
- value ? BIT(5 - offset) : 0);
+ return regmap_update_bits(data->regmap, LTC_REG_CONFIG,
+ BIT(5 - offset), value ? BIT(5 - offset) : 0);
}
static int ltc4306_gpio_get_direction(struct gpio_chip *chip,
@@ -164,7 +164,7 @@ static int ltc4306_gpio_init(struct ltc4306 *data)
data->gpiochip.direction_input = ltc4306_gpio_direction_input;
data->gpiochip.direction_output = ltc4306_gpio_direction_output;
data->gpiochip.get = ltc4306_gpio_get;
- data->gpiochip.set = ltc4306_gpio_set;
+ data->gpiochip.set_rv = ltc4306_gpio_set;
data->gpiochip.set_config = ltc4306_gpio_set_config;
data->gpiochip.owner = THIS_MODULE;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index db95113a5b49..5bb26af0f532 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -442,9 +442,9 @@ static int pca954x_irq_setup(struct i2c_mux_core *muxc)
raw_spin_lock_init(&data->lock);
- data->irq = irq_domain_add_linear(client->dev.of_node,
- data->chip->nchans,
- &irq_domain_simple_ops, data);
+ data->irq = irq_domain_create_linear(of_fwnode_handle(client->dev.of_node),
+ data->chip->nchans,
+ &irq_domain_simple_ops, data);
if (!data->irq)
return -ENODEV;
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 77da199c7413..7b30db3253af 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config CDNS_I3C_MASTER
tristate "Cadence I3C master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
help
@@ -9,7 +8,6 @@ config CDNS_I3C_MASTER
config DW_I3C_MASTER
tristate "Synospsys DesignWare I3C master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
# ALPHA and PARISC needs {read,write}sl()
@@ -38,7 +36,6 @@ config AST2600_I3C_MASTER
config SVC_I3C_MASTER
tristate "Silvaco I3C Dual-Role Master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
help
@@ -46,7 +43,6 @@ config SVC_I3C_MASTER
config MIPI_I3C_HCI
tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)"
- depends on I3C
depends on HAS_IOMEM
help
Support for hardware following the MIPI Aliance's I3C Host Controller
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index a71226d7ca59..bc4538694540 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -78,7 +78,7 @@
#define INTR_SIGNAL_ENABLE 0x28
#define INTR_FORCE 0x2c
#define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */
-#define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */
+#define INTR_HC_SEQ_CANCEL BIT(11) /* HC Cancelled Transaction Sequence */
#define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */
#define DAT_SECTION 0x30 /* Device Address Table */
@@ -590,26 +590,27 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
u32 val;
val = reg_read(INTR_STATUS);
+ reg_write(INTR_STATUS, val);
DBG("INTR_STATUS = %#x", val);
- if (val) {
- reg_write(INTR_STATUS, val);
- }
+ if (val)
+ result = IRQ_HANDLED;
- if (val & INTR_HC_RESET_CANCEL) {
- DBG("cancelled reset");
- val &= ~INTR_HC_RESET_CANCEL;
+ if (val & INTR_HC_SEQ_CANCEL) {
+ dev_dbg(&hci->master.dev,
+ "Host Controller Cancelled Transaction Sequence\n");
+ val &= ~INTR_HC_SEQ_CANCEL;
}
if (val & INTR_HC_INTERNAL_ERR) {
dev_err(&hci->master.dev, "Host Controller Internal Error\n");
val &= ~INTR_HC_INTERNAL_ERR;
}
- hci->io->irq_handler(hci);
-
if (val)
- dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
- else
+ dev_warn_once(&hci->master.dev,
+ "unexpected INTR_STATUS %#x\n", val);
+
+ if (hci->io->irq_handler(hci))
result = IRQ_HANDLED;
return result;
@@ -699,9 +700,14 @@ static int i3c_hci_init(struct i3c_hci *hci)
if (ret)
return -ENXIO;
- /* Disable all interrupts and allow all signal updates */
+ /* Disable all interrupts */
reg_write(INTR_SIGNAL_ENABLE, 0x0);
- reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+ /*
+ * Only allow bit 31:10 signal updates because
+ * Bit 0:9 are reserved in IP version >= 0.8
+ * Bit 0:5 are defined in IP version < 0.8 but not handled by PIO code
+ */
+ reg_write(INTR_STATUS_ENABLE, GENMASK(31, 10));
/* Make sure our data ordering fits the host's */
regval = reg_read(HC_CONTROL);
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 85e16de208d3..7e1a7cb94b43 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -201,11 +201,10 @@ struct svc_i3c_drvdata {
* @addrs: Array containing the dynamic addresses of each attached device
* @descs: Array of descriptors, one per attached device
* @hj_work: Hot-join work
- * @ibi_work: IBI work
* @irq: Main interrupt
- * @pclk: System clock
+ * @num_clks: I3C clock number
* @fclk: Fast clock (bus)
- * @sclk: Slow clock (other events)
+ * @clks: I3C clock array
* @xferqueue: Transfer queue structure
* @xferqueue.list: List member
* @xferqueue.cur: Current ongoing transfer
@@ -229,11 +228,10 @@ struct svc_i3c_master {
u8 addrs[SVC_I3C_MAX_DEVS];
struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
struct work_struct hj_work;
- struct work_struct ibi_work;
int irq;
- struct clk *pclk;
+ int num_clks;
struct clk *fclk;
- struct clk *sclk;
+ struct clk_bulk_data *clks;
struct {
struct list_head list;
struct svc_i3c_xfer *cur;
@@ -487,9 +485,8 @@ static int svc_i3c_master_handle_ibi_won(struct svc_i3c_master *master, u32 msta
return ret;
}
-static void svc_i3c_master_ibi_work(struct work_struct *work)
+static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
{
- struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
struct svc_i3c_i2c_dev_data *data;
unsigned int ibitype, ibiaddr;
struct i3c_dev_desc *dev;
@@ -504,7 +501,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
* schedule during the whole I3C transaction, otherwise, the I3C bus timeout may happen if
* any irq or schedule happen during transaction.
*/
- guard(spinlock_irqsave)(&master->xferqueue.lock);
+ guard(spinlock)(&master->xferqueue.lock);
/*
* IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
@@ -530,7 +527,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
if (ret) {
dev_err(master->dev, "Timeout when polling for IBIWON\n");
svc_i3c_master_emit_stop(master);
- goto reenable_ibis;
+ return;
}
status = readl(master->regs + SVC_I3C_MSTATUS);
@@ -574,17 +571,17 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
svc_i3c_master_emit_stop(master);
- goto reenable_ibis;
+ return;
}
/* Handle the non critical tasks */
switch (ibitype) {
case SVC_I3C_MSTATUS_IBITYPE_IBI:
+ svc_i3c_master_emit_stop(master);
if (dev) {
i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
master->ibi.tbq_slot = NULL;
}
- svc_i3c_master_emit_stop(master);
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
svc_i3c_master_emit_stop(master);
@@ -597,9 +594,6 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
default:
break;
}
-
-reenable_ibis:
- svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
}
static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
@@ -618,10 +612,12 @@ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
!SVC_I3C_MSTATUS_STATE_SLVREQ(active))
return IRQ_HANDLED;
- svc_i3c_master_disable_interrupts(master);
-
- /* Handle the interrupt in a non atomic context */
- queue_work(master->base.wq, &master->ibi_work);
+ /*
+ * The SDA line remains low until the request is processed.
+ * Receive the request in the interrupt context to respond promptly
+ * and restore the bus to idle state.
+ */
+ svc_i3c_master_ibi_isr(master);
return IRQ_HANDLED;
}
@@ -1281,9 +1277,9 @@ static int svc_i3c_master_write(struct svc_i3c_master *master,
static int svc_i3c_master_xfer(struct svc_i3c_master *master,
bool rnw, unsigned int xfer_type, u8 addr,
u8 *in, const u8 *out, unsigned int xfer_len,
- unsigned int *actual_len, bool continued)
+ unsigned int *actual_len, bool continued, bool repeat_start)
{
- int retry = 2;
+ int retry = repeat_start ? 1 : 2;
u32 reg;
int ret;
@@ -1468,7 +1464,7 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
cmd->addr, cmd->in, cmd->out,
cmd->len, &cmd->actual_len,
- cmd->continued);
+ cmd->continued, i > 0);
/* cmd->xfer is NULL if I2C or CCC transfer */
if (cmd->xfer)
cmd->xfer->actual_len = cmd->actual_len;
@@ -1875,42 +1871,11 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = {
.set_speed = svc_i3c_master_set_speed,
};
-static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
-{
- int ret = 0;
-
- ret = clk_prepare_enable(master->pclk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(master->fclk);
- if (ret) {
- clk_disable_unprepare(master->pclk);
- return ret;
- }
-
- ret = clk_prepare_enable(master->sclk);
- if (ret) {
- clk_disable_unprepare(master->pclk);
- clk_disable_unprepare(master->fclk);
- return ret;
- }
-
- return 0;
-}
-
-static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
-{
- clk_disable_unprepare(master->pclk);
- clk_disable_unprepare(master->fclk);
- clk_disable_unprepare(master->sclk);
-}
-
static int svc_i3c_master_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct svc_i3c_master *master;
- int ret;
+ int ret, i;
master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
if (!master)
@@ -1924,30 +1889,33 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
- master->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(master->pclk))
- return PTR_ERR(master->pclk);
+ master->num_clks = devm_clk_bulk_get_all(dev, &master->clks);
+ if (master->num_clks < 0)
+ return dev_err_probe(dev, -EINVAL, "can't get I3C clocks\n");
- master->fclk = devm_clk_get(dev, "fast_clk");
+ for (i = 0; i < master->num_clks; i++) {
+ if (!strcmp(master->clks[i].id, "fast_clk"))
+ break;
+ }
+
+ if (i == master->num_clks)
+ return dev_err_probe(dev, -EINVAL,
+ "can't get I3C peripheral clock\n");
+
+ master->fclk = master->clks[i].clk;
if (IS_ERR(master->fclk))
return PTR_ERR(master->fclk);
- master->sclk = devm_clk_get(dev, "slow_clk");
- if (IS_ERR(master->sclk))
- return PTR_ERR(master->sclk);
-
master->irq = platform_get_irq(pdev, 0);
if (master->irq < 0)
return master->irq;
master->dev = dev;
-
- ret = svc_i3c_master_prepare_clks(master);
+ ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "can't enable I3C clocks\n");
INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
- INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
mutex_init(&master->lock);
ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
@@ -1998,7 +1966,7 @@ rpm_disable:
pm_runtime_set_suspended(&pdev->dev);
err_disable_clks:
- svc_i3c_master_unprepare_clks(master);
+ clk_bulk_disable_unprepare(master->num_clks, master->clks);
return ret;
}
@@ -2036,7 +2004,7 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
struct svc_i3c_master *master = dev_get_drvdata(dev);
svc_i3c_save_regs(master);
- svc_i3c_master_unprepare_clks(master);
+ clk_bulk_disable_unprepare(master->num_clks, master->clks);
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -2045,9 +2013,12 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
{
struct svc_i3c_master *master = dev_get_drvdata(dev);
+ int ret;
pinctrl_pm_select_default_state(dev);
- svc_i3c_master_prepare_clks(master);
+ ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
+ if (ret)
+ return ret;
svc_i3c_restore_regs(master);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 976f5be54e36..8ccb483204fa 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -48,14 +48,17 @@
#include <trace/events/power.h>
#include <linux/sched.h>
#include <linux/sched/smt.h>
+#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
-#include <asm/cpuid.h>
+#include <linux/sysfs.h>
+#include <asm/cpuid/api.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h>
+#include <asm/msr.h>
#include <asm/tsc.h>
#include <asm/fpu/api.h>
#include <asm/smp.h>
@@ -92,9 +95,15 @@ struct idle_cpu {
*/
unsigned long auto_demotion_disable_flags;
bool disable_promotion_to_c1e;
+ bool c1_demotion_supported;
bool use_acpi;
};
+static bool c1_demotion_supported;
+static DEFINE_MUTEX(c1_demotion_mutex);
+
+static struct device *sysfs_root __initdata;
+
static const struct idle_cpu *icpu __initdata;
static struct cpuidle_state *cpuidle_state_table __initdata;
@@ -1549,18 +1558,21 @@ static const struct idle_cpu idle_cpu_gmt __initconst = {
static const struct idle_cpu idle_cpu_spr __initconst = {
.state_table = spr_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
static const struct idle_cpu idle_cpu_gnr __initconst = {
.state_table = gnr_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
static const struct idle_cpu idle_cpu_gnrd __initconst = {
.state_table = gnrd_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
@@ -1599,12 +1611,14 @@ static const struct idle_cpu idle_cpu_snr __initconst = {
static const struct idle_cpu idle_cpu_grr __initconst = {
.state_table = grr_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
static const struct idle_cpu idle_cpu_srf __initconst = {
.state_table = srf_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
@@ -1928,35 +1942,35 @@ static void __init bxt_idle_state_table_update(void)
unsigned long long msr;
unsigned int usec;
- rdmsrl(MSR_PKGC6_IRTL, msr);
+ rdmsrq(MSR_PKGC6_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[2].exit_latency = usec;
bxt_cstates[2].target_residency = usec;
}
- rdmsrl(MSR_PKGC7_IRTL, msr);
+ rdmsrq(MSR_PKGC7_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[3].exit_latency = usec;
bxt_cstates[3].target_residency = usec;
}
- rdmsrl(MSR_PKGC8_IRTL, msr);
+ rdmsrq(MSR_PKGC8_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[4].exit_latency = usec;
bxt_cstates[4].target_residency = usec;
}
- rdmsrl(MSR_PKGC9_IRTL, msr);
+ rdmsrq(MSR_PKGC9_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[5].exit_latency = usec;
bxt_cstates[5].target_residency = usec;
}
- rdmsrl(MSR_PKGC10_IRTL, msr);
+ rdmsrq(MSR_PKGC10_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[6].exit_latency = usec;
@@ -1984,7 +1998,7 @@ static void __init sklh_idle_state_table_update(void)
if ((mwait_substates & (0xF << 28)) == 0)
return;
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr);
/* PC10 is not enabled in PKG C-state limit */
if ((msr & 0xF) != 8)
@@ -1996,7 +2010,7 @@ static void __init sklh_idle_state_table_update(void)
/* if SGX is present */
if (ebx & (1 << 2)) {
- rdmsrl(MSR_IA32_FEAT_CTL, msr);
+ rdmsrq(MSR_IA32_FEAT_CTL, msr);
/* if SGX is enabled */
if (msr & (1 << 18))
@@ -2015,7 +2029,7 @@ static void __init skx_idle_state_table_update(void)
{
unsigned long long msr;
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr);
/*
* 000b: C0/C1 (no package C-state support)
@@ -2068,7 +2082,7 @@ static void __init spr_idle_state_table_update(void)
* C6. However, if PC6 is disabled, we update the numbers to match
* core C6.
*/
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr);
/* Limit value 2 and above allow for PC6. */
if ((msr & 0x7) < 2) {
@@ -2082,8 +2096,8 @@ static void __init spr_idle_state_table_update(void)
*/
static void __init byt_cht_auto_demotion_disable(void)
{
- wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
- wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
+ wrmsrq(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
+ wrmsrq(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
}
static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
@@ -2241,27 +2255,27 @@ static void auto_demotion_disable(void)
{
unsigned long long msr_bits;
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
msr_bits &= ~auto_demotion_disable_flags;
- wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+ wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
}
static void c1e_promotion_enable(void)
{
unsigned long long msr_bits;
- rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ rdmsrq(MSR_IA32_POWER_CTL, msr_bits);
msr_bits |= 0x2;
- wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ wrmsrq(MSR_IA32_POWER_CTL, msr_bits);
}
static void c1e_promotion_disable(void)
{
unsigned long long msr_bits;
- rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ rdmsrq(MSR_IA32_POWER_CTL, msr_bits);
msr_bits &= ~0x2;
- wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ wrmsrq(MSR_IA32_POWER_CTL, msr_bits);
}
/**
@@ -2324,6 +2338,88 @@ static void __init intel_idle_cpuidle_devices_uninit(void)
cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
}
+static void intel_c1_demotion_toggle(void *enable)
+{
+ unsigned long long msr_val;
+
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val);
+ /*
+ * Enable/disable C1 undemotion along with C1 demotion, as this is the
+ * most sensible configuration in general.
+ */
+ if (enable)
+ msr_val |= NHM_C1_AUTO_DEMOTE | SNB_C1_AUTO_UNDEMOTE;
+ else
+ msr_val &= ~(NHM_C1_AUTO_DEMOTE | SNB_C1_AUTO_UNDEMOTE);
+ wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val);
+}
+
+static ssize_t intel_c1_demotion_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool enable;
+ int err;
+
+ err = kstrtobool(buf, &enable);
+ if (err)
+ return err;
+
+ mutex_lock(&c1_demotion_mutex);
+ /* Enable/disable C1 demotion on all CPUs */
+ on_each_cpu(intel_c1_demotion_toggle, (void *)enable, 1);
+ mutex_unlock(&c1_demotion_mutex);
+
+ return count;
+}
+
+static ssize_t intel_c1_demotion_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long long msr_val;
+
+ /*
+ * Read the MSR value for a CPU and assume it is the same for all CPUs. Any other
+ * configuration would be a BIOS bug.
+ */
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val);
+ return sysfs_emit(buf, "%d\n", !!(msr_val & NHM_C1_AUTO_DEMOTE));
+}
+static DEVICE_ATTR_RW(intel_c1_demotion);
+
+static int __init intel_idle_sysfs_init(void)
+{
+ int err;
+
+ if (!c1_demotion_supported)
+ return 0;
+
+ sysfs_root = bus_get_dev_root(&cpu_subsys);
+ if (!sysfs_root)
+ return 0;
+
+ err = sysfs_add_file_to_group(&sysfs_root->kobj,
+ &dev_attr_intel_c1_demotion.attr,
+ "cpuidle");
+ if (err) {
+ put_device(sysfs_root);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __init intel_idle_sysfs_uninit(void)
+{
+ if (!sysfs_root)
+ return;
+
+ sysfs_remove_file_from_group(&sysfs_root->kobj,
+ &dev_attr_intel_c1_demotion.attr,
+ "cpuidle");
+ put_device(sysfs_root);
+}
+
static int __init intel_idle_init(void)
{
const struct x86_cpu_id *id;
@@ -2374,6 +2470,8 @@ static int __init intel_idle_init(void)
auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
if (icpu->disable_promotion_to_c1e)
c1e_promotion = C1E_PROMOTION_DISABLE;
+ if (icpu->c1_demotion_supported)
+ c1_demotion_supported = true;
if (icpu->use_acpi || force_use_acpi)
intel_idle_acpi_cst_extract();
} else if (!intel_idle_acpi_cst_extract()) {
@@ -2387,6 +2485,10 @@ static int __init intel_idle_init(void)
if (!intel_idle_cpuidle_devices)
return -ENOMEM;
+ retval = intel_idle_sysfs_init();
+ if (retval)
+ pr_warn("failed to initialized sysfs");
+
intel_idle_cpuidle_driver_init(&intel_idle_driver);
retval = cpuidle_register_driver(&intel_idle_driver);
@@ -2411,6 +2513,7 @@ hp_setup_fail:
intel_idle_cpuidle_devices_uninit();
cpuidle_unregister_driver(&intel_idle_driver);
init_driver_fail:
+ intel_idle_sysfs_uninit();
free_percpu(intel_idle_cpuidle_devices);
return retval;
diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
index 8601b9a8b8e7..5127e58eebc7 100644
--- a/drivers/iio/accel/adis16201.c
+++ b/drivers/iio/accel/adis16201.c
@@ -211,9 +211,9 @@ static const struct iio_chan_spec adis16201_channels[] = {
BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12),
ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12),
IIO_CHAN_SOFT_TIMESTAMP(7)
};
diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c
index e8cd21fa77a6..cbac622ef821 100644
--- a/drivers/iio/accel/adxl355_core.c
+++ b/drivers/iio/accel/adxl355_core.c
@@ -231,7 +231,7 @@ struct adxl355_data {
u8 transf_buf[3];
struct {
u8 buf[14];
- s64 ts;
+ aligned_s64 ts;
} buffer;
} __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index add4053e7a02..0c04b2bb7efb 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -601,18 +601,14 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr)
if (ret)
return ret;
+ st->odr = odr;
+
/* Activity timers depend on ODR */
ret = _adxl367_set_act_time_ms(st, st->act_time_ms);
if (ret)
return ret;
- ret = _adxl367_set_inact_time_ms(st, st->inact_time_ms);
- if (ret)
- return ret;
-
- st->odr = odr;
-
- return 0;
+ return _adxl367_set_inact_time_ms(st, st->inact_time_ms);
}
static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr)
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 48e4282964a0..bf1d3923a181 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -1226,8 +1226,11 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq)
if (ret)
return ret;
- if (device_property_read_bool(dev, "wakeup-source"))
- device_init_wakeup(dev, true);
+ if (device_property_read_bool(dev, "wakeup-source")) {
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
+ }
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 18559757f908..7fef2727f89e 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -45,7 +45,7 @@ struct ad7266_state {
*/
struct {
__be16 sample[2];
- s64 timestamp;
+ aligned_s64 timestamp;
} data __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
index 4fcb49fdf566..aef85093eb16 100644
--- a/drivers/iio/adc/ad7380.c
+++ b/drivers/iio/adc/ad7380.c
@@ -1211,6 +1211,9 @@ static int ad7380_offload_buffer_predisable(struct iio_dev *indio_dev)
struct ad7380_state *st = iio_priv(indio_dev);
int ret;
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+ spi_unoptimize_message(&st->offload_msg);
+
if (st->seq) {
ret = regmap_update_bits(st->regmap,
AD7380_REG_ADDR_CONFIG1,
@@ -1222,10 +1225,6 @@ static int ad7380_offload_buffer_predisable(struct iio_dev *indio_dev)
st->seq = false;
}
- spi_offload_trigger_disable(st->offload, st->offload_trigger);
-
- spi_unoptimize_message(&st->offload_msg);
-
return 0;
}
@@ -1611,11 +1610,25 @@ static int ad7380_write_event_config(struct iio_dev *indio_dev,
return ret;
}
-static int ad7380_get_alert_th(struct ad7380_state *st,
+static int ad7380_get_alert_th(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
enum iio_event_direction dir,
int *val)
{
- int ret, tmp;
+ struct ad7380_state *st = iio_priv(indio_dev);
+ const struct iio_scan_type *scan_type;
+ int ret, tmp, shift;
+
+ scan_type = iio_get_current_scan_type(indio_dev, chan);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ /*
+ * The register value is 12-bits and is compared to the most significant
+ * bits of raw value, therefore a shift is required to convert this to
+ * the same scale as the raw value.
+ */
+ shift = scan_type->realbits - 12;
switch (dir) {
case IIO_EV_DIR_RISING:
@@ -1625,7 +1638,7 @@ static int ad7380_get_alert_th(struct ad7380_state *st,
if (ret)
return ret;
- *val = FIELD_GET(AD7380_ALERT_HIGH_TH, tmp);
+ *val = FIELD_GET(AD7380_ALERT_HIGH_TH, tmp) << shift;
return IIO_VAL_INT;
case IIO_EV_DIR_FALLING:
ret = regmap_read(st->regmap,
@@ -1634,7 +1647,7 @@ static int ad7380_get_alert_th(struct ad7380_state *st,
if (ret)
return ret;
- *val = FIELD_GET(AD7380_ALERT_LOW_TH, tmp);
+ *val = FIELD_GET(AD7380_ALERT_LOW_TH, tmp) << shift;
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -1648,7 +1661,6 @@ static int ad7380_read_event_value(struct iio_dev *indio_dev,
enum iio_event_info info,
int *val, int *val2)
{
- struct ad7380_state *st = iio_priv(indio_dev);
int ret;
switch (info) {
@@ -1656,7 +1668,7 @@ static int ad7380_read_event_value(struct iio_dev *indio_dev,
if (!iio_device_claim_direct(indio_dev))
return -EBUSY;
- ret = ad7380_get_alert_th(st, dir, val);
+ ret = ad7380_get_alert_th(indio_dev, chan, dir, val);
iio_device_release_direct(indio_dev);
return ret;
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index 1a314fddd7eb..703556eb7257 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -1236,9 +1236,11 @@ static int ad7616_sw_mode_setup(struct iio_dev *indio_dev)
st->write_scale = ad7616_write_scale_sw;
st->write_os = &ad7616_write_os_sw;
- ret = st->bops->sw_mode_config(indio_dev);
- if (ret)
- return ret;
+ if (st->bops->sw_mode_config) {
+ ret = st->bops->sw_mode_config(indio_dev);
+ if (ret)
+ return ret;
+ }
/* Activate Burst mode and SEQEN MODE */
return ad7606_write_mask(st, AD7616_CONFIGURATION_REGISTER,
@@ -1268,6 +1270,9 @@ static int ad7606b_sw_mode_setup(struct iio_dev *indio_dev)
st->write_scale = ad7606_write_scale_sw;
st->write_os = &ad7606_write_os_sw;
+ if (!st->bops->sw_mode_config)
+ return 0;
+
return st->bops->sw_mode_config(indio_dev);
}
diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
index 885bf0b68e77..179115e90988 100644
--- a/drivers/iio/adc/ad7606_spi.c
+++ b/drivers/iio/adc/ad7606_spi.c
@@ -131,7 +131,7 @@ static int ad7606_spi_reg_read(struct ad7606_state *st, unsigned int addr)
{
.tx_buf = &st->d16[0],
.len = 2,
- .cs_change = 0,
+ .cs_change = 1,
}, {
.rx_buf = &st->d16[1],
.len = 2,
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 5a863005aca6..5e0be36af0c5 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -168,7 +168,7 @@ struct ad7768_state {
union {
struct {
__be32 chan;
- s64 timestamp;
+ aligned_s64 timestamp;
} scan;
__be32 d32;
u8 d8[2];
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index a1e48a756a7b..359e26e3f5bc 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -466,7 +466,7 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct {
__le16 values[DLN2_ADC_MAX_CHANNELS];
- int64_t timestamp_space;
+ aligned_s64 timestamp_space;
} data;
struct dln2_adc_get_all_vals dev_data;
struct dln2_adc *dln2 = iio_priv(indio_dev);
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c
index 7fb8b2499a1d..b64a8a407168 100644
--- a/drivers/iio/adc/qcom-spmi-iadc.c
+++ b/drivers/iio/adc/qcom-spmi-iadc.c
@@ -543,7 +543,9 @@ static int iadc_probe(struct platform_device *pdev)
else
return ret;
} else {
- device_init_wakeup(iadc->dev, 1);
+ ret = devm_device_init_wakeup(iadc->dev);
+ if (ret)
+ return dev_err_probe(iadc->dev, ret, "Failed to init wakeup\n");
}
ret = iadc_update_offset(iadc);
diff --git a/drivers/iio/adc/qcom-spmi-rradc.c b/drivers/iio/adc/qcom-spmi-rradc.c
index 63ebaf13ef19..f61ad0510f04 100644
--- a/drivers/iio/adc/qcom-spmi-rradc.c
+++ b/drivers/iio/adc/qcom-spmi-rradc.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Linaro Limited.
- * Author: Caleb Connolly <caleb.connolly@linaro.org>
+ * Author: Casey Connolly <casey.connolly@linaro.org>
*
* This driver is for the Round Robin ADC found in the pmi8998 and pm660 PMICs.
*/
@@ -1016,5 +1016,5 @@ static struct platform_driver rradc_driver = {
module_platform_driver(rradc_driver);
MODULE_DESCRIPTION("QCOM SPMI PMIC RR ADC driver");
-MODULE_AUTHOR("Caleb Connolly <caleb.connolly@linaro.org>");
+MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 9a099df79518..5e28bd28b81a 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -520,15 +520,6 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
if (info->reset)
rockchip_saradc_reset_controller(info->reset);
- /*
- * Use a default value for the converter clock.
- * This may become user-configurable in the future.
- */
- ret = clk_set_rate(info->clk, info->data->clk_rate);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret,
- "failed to set adc clk rate\n");
-
ret = regulator_enable(info->vref);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
@@ -555,6 +546,14 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
if (IS_ERR(info->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
"failed to get adc clock\n");
+ /*
+ * Use a default value for the converter clock.
+ * This may become user-configurable in the future.
+ */
+ ret = clk_set_rate(info->clk, info->data->clk_rate);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to set adc clk rate\n");
platform_set_drvdata(pdev, indio_dev);
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 0914148d1a22..bd3458965bff 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -421,9 +421,10 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
return priv->irq[i];
}
- priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0,
- &stm32_adc_domain_ops,
- priv);
+ priv->domain = irq_domain_create_simple(of_fwnode_handle(np),
+ STM32_ADC_MAX_ADCS, 0,
+ &stm32_adc_domain_ops,
+ priv);
if (!priv->domain) {
dev_err(&pdev->dev, "Failed to add irq domain\n");
return -ENOMEM;
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
index d0bd94912e0a..e05ce1f12065 100644
--- a/drivers/iio/chemical/pms7003.c
+++ b/drivers/iio/chemical/pms7003.c
@@ -5,7 +5,6 @@
* Copyright (c) Tomasz Duszynski <tduszyns@gmail.com>
*/
-#include <linux/unaligned.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/errno.h>
@@ -19,6 +18,8 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/serdev.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
#define PMS7003_DRIVER_NAME "pms7003"
@@ -76,7 +77,7 @@ struct pms7003_state {
/* Used to construct scan to push to the IIO buffer */
struct {
u16 data[3]; /* PM1, PM2P5, PM10 */
- s64 ts;
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
index 6f4f2ba2c09d..a7888146188d 100644
--- a/drivers/iio/chemical/sps30.c
+++ b/drivers/iio/chemical/sps30.c
@@ -108,7 +108,7 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p)
int ret;
struct {
s32 data[4]; /* PM1, PM2P5, PM4, PM10 */
- s64 ts;
+ aligned_s64 ts;
} scan;
mutex_lock(&state->lock);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index ad1882f608c0..2055a03cbeb1 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -66,6 +66,10 @@ static struct {
{HID_USAGE_SENSOR_HUMIDITY, 0, 1000, 0},
{HID_USAGE_SENSOR_HINGE, 0, 0, 17453293},
{HID_USAGE_SENSOR_HINGE, HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293},
+
+ {HID_USAGE_SENSOR_HUMAN_PRESENCE, 0, 1, 0},
+ {HID_USAGE_SENSOR_HUMAN_PROXIMITY, 0, 1, 0},
+ {HID_USAGE_SENSOR_HUMAN_ATTENTION, 0, 1, 0},
};
static void simple_div(int dividend, int divisor, int *whole,
diff --git a/drivers/iio/imu/adis16550.c b/drivers/iio/imu/adis16550.c
index b14ea8937c7f..28f0dbd0226c 100644
--- a/drivers/iio/imu/adis16550.c
+++ b/drivers/iio/imu/adis16550.c
@@ -836,7 +836,7 @@ static irqreturn_t adis16550_trigger_handler(int irq, void *p)
u16 dummy;
bool valid;
struct iio_poll_func *pf = p;
- __be32 data[ADIS16550_MAX_SCAN_DATA];
+ __be32 data[ADIS16550_MAX_SCAN_DATA] __aligned(8);
struct iio_dev *indio_dev = pf->indio_dev;
struct adis16550 *st = iio_priv(indio_dev);
struct adis *adis = iio_device_get_drvdata(indio_dev);
diff --git a/drivers/iio/imu/bmi270/bmi270_core.c b/drivers/iio/imu/bmi270/bmi270_core.c
index a86be5af5ccb..2e4469f30d53 100644
--- a/drivers/iio/imu/bmi270/bmi270_core.c
+++ b/drivers/iio/imu/bmi270/bmi270_core.c
@@ -918,8 +918,7 @@ static int bmi270_configure_imu(struct bmi270_data *data)
FIELD_PREP(BMI270_ACC_CONF_ODR_MSK,
BMI270_ACC_CONF_ODR_100HZ) |
FIELD_PREP(BMI270_ACC_CONF_BWP_MSK,
- BMI270_ACC_CONF_BWP_NORMAL_MODE) |
- BMI270_PWR_CONF_ADV_PWR_SAVE_MSK);
+ BMI270_ACC_CONF_BWP_NORMAL_MODE));
if (ret)
return dev_err_probe(dev, ret, "Failed to configure accelerometer");
@@ -927,8 +926,7 @@ static int bmi270_configure_imu(struct bmi270_data *data)
FIELD_PREP(BMI270_GYR_CONF_ODR_MSK,
BMI270_GYR_CONF_ODR_200HZ) |
FIELD_PREP(BMI270_GYR_CONF_BWP_MSK,
- BMI270_GYR_CONF_BWP_NORMAL_MODE) |
- BMI270_PWR_CONF_ADV_PWR_SAVE_MSK);
+ BMI270_GYR_CONF_BWP_NORMAL_MODE));
if (ret)
return dev_err_probe(dev, ret, "Failed to configure gyroscope");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 3d3b27f28c9d..273196e647a2 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -50,7 +50,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
u16 fifo_count;
u32 fifo_period;
s64 timestamp;
- u8 data[INV_MPU6050_OUTPUT_DATA_SIZE];
+ u8 data[INV_MPU6050_OUTPUT_DATA_SIZE] __aligned(8);
size_t i, nb;
mutex_lock(&st->lock);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 0a7cd8c1aa33..8a9d2593576a 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -392,6 +392,9 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK))
return 0;
+ if (!pattern_len)
+ pattern_len = ST_LSM6DSX_SAMPLE_SIZE;
+
fifo_len = (le16_to_cpu(fifo_status) & fifo_diff_mask) *
ST_LSM6DSX_CHAN_SIZE;
fifo_len = (fifo_len / pattern_len) * pattern_len;
@@ -623,6 +626,9 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw)
if (!fifo_len)
return 0;
+ if (!pattern_len)
+ pattern_len = ST_LSM6DSX_TAGGED_SAMPLE_SIZE;
+
for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
err = st_lsm6dsx_read_block(hw,
ST_LSM6DSX_REG_FIFO_OUT_TAG_ADDR,
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 4fdcc2acc94e..96c6106b95ee 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -2719,8 +2719,11 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
}
if (device_property_read_bool(dev, "wakeup-source") ||
- (pdata && pdata->wakeup_source))
- device_init_wakeup(dev, true);
+ (pdata && pdata->wakeup_source)) {
+ err = devm_device_init_wakeup(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to init wakeup\n");
+ }
return 0;
}
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 76b76d12b388..4c65b32d34ce 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -34,9 +34,9 @@ struct prox_state {
struct iio_chan_spec channels[MAX_CHANNELS];
u32 channel2usage[MAX_CHANNELS];
u32 human_presence[MAX_CHANNELS];
- int scale_pre_decml;
- int scale_post_decml;
- int scale_precision;
+ int scale_pre_decml[MAX_CHANNELS];
+ int scale_post_decml[MAX_CHANNELS];
+ int scale_precision[MAX_CHANNELS];
unsigned long scan_mask[2]; /* One entry plus one terminator. */
int num_channels;
};
@@ -116,13 +116,15 @@ static int prox_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
- *val = prox_state->scale_pre_decml;
- *val2 = prox_state->scale_post_decml;
- ret_type = prox_state->scale_precision;
+ if (chan->scan_index >= prox_state->num_channels)
+ return -EINVAL;
+
+ *val = prox_state->scale_pre_decml[chan->scan_index];
+ *val2 = prox_state->scale_post_decml[chan->scan_index];
+ ret_type = prox_state->scale_precision[chan->scan_index];
break;
case IIO_CHAN_INFO_OFFSET:
- *val = hid_sensor_convert_exponent(
- prox_state->prox_attr[chan->scan_index].unit_expo);
+ *val = 0;
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
@@ -249,6 +251,10 @@ static int prox_parse_report(struct platform_device *pdev,
st->prox_attr[index].size);
dev_dbg(&pdev->dev, "prox %x:%x\n", st->prox_attr[index].index,
st->prox_attr[index].report_id);
+ st->scale_precision[index] =
+ hid_sensor_format_scale(usage_id, &st->prox_attr[index],
+ &st->scale_pre_decml[index],
+ &st->scale_post_decml[index]);
index++;
}
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 65b295877b41..393a3d2fbe1d 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -788,8 +788,9 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
int ret;
bool wake_result_ready_queue = false;
enum iio_chan_type chan_type = opt->chip_info->chan_type;
+ bool ok_to_ignore_lock = opt->ok_to_ignore_lock;
- if (!opt->ok_to_ignore_lock)
+ if (!ok_to_ignore_lock)
mutex_lock(&opt->lock);
ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
@@ -826,7 +827,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
}
out:
- if (!opt->ok_to_ignore_lock)
+ if (!ok_to_ignore_lock)
mutex_unlock(&opt->lock);
if (wake_result_ready_queue)
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 08975c60e325..7bc341c69697 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -535,8 +535,8 @@ static int ak8974_detect(struct ak8974 *ak8974)
fab_data2, sizeof(fab_data2));
for (i = 0; i < 3; ++i) {
- static const char axis[3] = "XYZ";
- static const char pgaxis[6] = "ZYZXYX";
+ static const char axis[] = "XYZ";
+ static const char pgaxis[] = "ZYZXYX";
unsigned offz = le16_to_cpu(fab_data2[i]) & 0x7F;
unsigned fine = le16_to_cpu(fab_data1[i]);
unsigned sens = le16_to_cpu(fab_data1[i + 3]);
diff --git a/drivers/iio/pressure/mprls0025pa.h b/drivers/iio/pressure/mprls0025pa.h
index 9d5c30afa9d6..d62a018eaff3 100644
--- a/drivers/iio/pressure/mprls0025pa.h
+++ b/drivers/iio/pressure/mprls0025pa.h
@@ -34,16 +34,6 @@ struct iio_dev;
struct mpr_data;
struct mpr_ops;
-/**
- * struct mpr_chan
- * @pres: pressure value
- * @ts: timestamp
- */
-struct mpr_chan {
- s32 pres;
- s64 ts;
-};
-
enum mpr_func_id {
MPR_FUNCTION_A,
MPR_FUNCTION_B,
@@ -69,6 +59,8 @@ enum mpr_func_id {
* reading in a loop until data is ready
* @completion: handshake from irq to read
* @chan: channel values for buffered mode
+ * @chan.pres: pressure value
+ * @chan.ts: timestamp
* @buffer: raw conversion data
*/
struct mpr_data {
@@ -87,7 +79,10 @@ struct mpr_data {
struct gpio_desc *gpiod_reset;
int irq;
struct completion completion;
- struct mpr_chan chan;
+ struct {
+ s32 pres;
+ aligned_s64 ts;
+ } chan;
u8 buffer[MPR_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index c28a7a6dea5f..555a61e2f3fd 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -121,9 +121,9 @@ static const struct maxim_thermocouple_chip maxim_thermocouple_chips[] = {
struct maxim_thermocouple_data {
struct spi_device *spi;
const struct maxim_thermocouple_chip *chip;
+ char tc_type;
u8 buffer[16] __aligned(IIO_DMA_MINALIGN);
- char tc_type;
};
static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 142170473e75..8670e58675c6 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -36,6 +36,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
#define CM_DIRECT_RETRY_CTX ((void *) 1UL)
+#define CM_MRA_SETTING 24 /* 4.096us * 2^24 = ~68.7 seconds */
static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_NO_QP] = "no QP",
@@ -167,7 +168,7 @@ struct cm_port {
struct cm_device {
struct kref kref;
struct list_head list;
- spinlock_t mad_agent_lock;
+ rwlock_t mad_agent_lock;
struct ib_device *ib_device;
u8 ack_delay;
int going_down;
@@ -241,7 +242,6 @@ struct cm_id_private {
u8 initiator_depth;
u8 retry_count;
u8 rnr_retry_count;
- u8 service_timeout;
u8 target_ack_delay;
struct list_head work_list;
@@ -285,7 +285,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
if (!cm_id_priv->av.port)
return ERR_PTR(-EINVAL);
- spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
mad_agent = cm_id_priv->av.port->mad_agent;
if (!mad_agent) {
m = ERR_PTR(-EINVAL);
@@ -311,7 +311,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
m->ah = ah;
out:
- spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
return m;
}
@@ -1297,10 +1297,10 @@ static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
if (!cm_id_priv->av.port)
return cpu_to_be64(low_tid);
- spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
if (cm_id_priv->av.port->mad_agent)
hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
- spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
return cpu_to_be64(hi_tid | low_tid);
}
@@ -1872,7 +1872,7 @@ static void cm_process_work(struct cm_id_private *cm_id_priv,
static void cm_format_mra(struct cm_mra_msg *mra_msg,
struct cm_id_private *cm_id_priv,
- enum cm_msg_response msg_mraed, u8 service_timeout,
+ enum cm_msg_response msg_mraed,
const void *private_data, u8 private_data_len)
{
cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
@@ -1881,7 +1881,7 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
be32_to_cpu(cm_id_priv->id.local_id));
IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
be32_to_cpu(cm_id_priv->id.remote_id));
- IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
+ IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, CM_MRA_SETTING);
if (private_data && private_data_len)
IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
@@ -1960,7 +1960,7 @@ static void cm_dup_req_handler(struct cm_work *work,
switch (cm_id_priv->id.state) {
case IB_CM_MRA_REQ_SENT:
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
+ CM_MSG_RESPONSE_REQ,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
break;
@@ -2454,7 +2454,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
cm_id_priv->private_data_len);
else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
+ CM_MSG_RESPONSE_REP,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
else
@@ -3094,26 +3094,13 @@ out:
return -EINVAL;
}
-int ib_send_cm_mra(struct ib_cm_id *cm_id,
- u8 service_timeout,
- const void *private_data,
- u8 private_data_len)
+int ib_prepare_cm_mra(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
- struct ib_mad_send_buf *msg;
enum ib_cm_state cm_state;
enum ib_cm_lap_state lap_state;
- enum cm_msg_response msg_response;
- void *data;
unsigned long flags;
- int ret;
-
- if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
- return -EINVAL;
-
- data = cm_copy_private_data(private_data, private_data_len);
- if (IS_ERR(data))
- return PTR_ERR(data);
+ int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3122,58 +3109,33 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
case IB_CM_REQ_RCVD:
cm_state = IB_CM_MRA_REQ_SENT;
lap_state = cm_id->lap_state;
- msg_response = CM_MSG_RESPONSE_REQ;
break;
case IB_CM_REP_RCVD:
cm_state = IB_CM_MRA_REP_SENT;
lap_state = cm_id->lap_state;
- msg_response = CM_MSG_RESPONSE_REP;
break;
case IB_CM_ESTABLISHED:
if (cm_id->lap_state == IB_CM_LAP_RCVD) {
cm_state = cm_id->state;
lap_state = IB_CM_MRA_LAP_SENT;
- msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
fallthrough;
default:
- trace_icm_send_mra_unknown_err(&cm_id_priv->id);
+ trace_icm_prepare_mra_unknown_err(&cm_id_priv->id);
ret = -EINVAL;
goto error_unlock;
}
- if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
- msg = cm_alloc_msg(cm_id_priv);
- if (IS_ERR(msg)) {
- ret = PTR_ERR(msg);
- goto error_unlock;
- }
-
- cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- msg_response, service_timeout,
- private_data, private_data_len);
- trace_icm_send_mra(cm_id);
- ret = ib_post_send_mad(msg, NULL);
- if (ret)
- goto error_free_msg;
- }
-
cm_id->state = cm_state;
cm_id->lap_state = lap_state;
- cm_id_priv->service_timeout = service_timeout;
- cm_set_private_data(cm_id_priv, data, private_data_len);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- return 0;
+ cm_set_private_data(cm_id_priv, NULL, 0);
-error_free_msg:
- cm_free_msg(msg);
error_unlock:
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- kfree(data);
return ret;
}
-EXPORT_SYMBOL(ib_send_cm_mra);
+EXPORT_SYMBOL(ib_prepare_cm_mra);
static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
{
@@ -3377,7 +3339,6 @@ static int cm_lap_handler(struct cm_work *work)
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
CM_MSG_RESPONSE_OTHER,
- cm_id_priv->service_timeout,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
@@ -3786,7 +3747,8 @@ static void cm_process_send_error(struct cm_id_private *cm_id_priv,
spin_lock_irq(&cm_id_priv->lock);
if (msg != cm_id_priv->msg) {
spin_unlock_irq(&cm_id_priv->lock);
- cm_free_priv_msg(msg);
+ cm_free_msg(msg);
+ cm_deref_id(cm_id_priv);
return;
}
cm_free_priv_msg(msg);
@@ -4378,7 +4340,7 @@ static int cm_add_one(struct ib_device *ib_device)
return -ENOMEM;
kref_init(&cm_dev->kref);
- spin_lock_init(&cm_dev->mad_agent_lock);
+ rwlock_init(&cm_dev->mad_agent_lock);
cm_dev->ib_device = ib_device;
cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
cm_dev->going_down = 0;
@@ -4494,9 +4456,9 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
* The above ensures no call paths from the work are running,
* the remaining paths all take the mad_agent_lock.
*/
- spin_lock(&cm_dev->mad_agent_lock);
+ write_lock(&cm_dev->mad_agent_lock);
port->mad_agent = NULL;
- spin_unlock(&cm_dev->mad_agent_lock);
+ write_unlock(&cm_dev->mad_agent_lock);
ib_unregister_mad_agent(mad_agent);
ib_port_unregister_client_groups(ib_device, i,
cm_counter_groups);
diff --git a/drivers/infiniband/core/cm_trace.h b/drivers/infiniband/core/cm_trace.h
index 944d9071245d..4a4987da69d4 100644
--- a/drivers/infiniband/core/cm_trace.h
+++ b/drivers/infiniband/core/cm_trace.h
@@ -229,7 +229,7 @@ DEFINE_CM_ERR_EVENT(send_drep);
DEFINE_CM_ERR_EVENT(dreq_unknown);
DEFINE_CM_ERR_EVENT(send_unknown_rej);
DEFINE_CM_ERR_EVENT(rej_unknown);
-DEFINE_CM_ERR_EVENT(send_mra_unknown);
+DEFINE_CM_ERR_EVENT(prepare_mra_unknown);
DEFINE_CM_ERR_EVENT(mra_unknown);
DEFINE_CM_ERR_EVENT(qp_init);
DEFINE_CM_ERR_EVENT(qp_rtr);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index fedcdb56fb6b..9b471548e7ae 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -46,7 +46,6 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
#define CMA_MAX_CM_RETRIES 15
-#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 16
#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
@@ -72,6 +71,8 @@ static const char * const cma_events[] = {
static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
enum ib_gid_type gid_type);
+static void cma_netevent_work_handler(struct work_struct *_work);
+
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
{
size_t index = event;
@@ -144,19 +145,6 @@ struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
}
EXPORT_SYMBOL(rdma_iw_cm_id);
-/**
- * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
- * @res: rdma resource tracking entry pointer
- */
-struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
-{
- struct rdma_id_private *id_priv =
- container_of(res, struct rdma_id_private, res);
-
- return &id_priv->id;
-}
-EXPORT_SYMBOL(rdma_res_to_id);
-
static int cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);
@@ -1047,6 +1035,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
id_priv->id.route.addr.dev_addr.net = get_net(net);
id_priv->seq_num &= 0x00ffffff;
+ INIT_WORK(&id_priv->id.net_work, cma_netevent_work_handler);
rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID);
if (parent)
@@ -2211,8 +2200,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
case IB_CM_REP_RECEIVED:
if (state == RDMA_CM_CONNECT &&
(id_priv->id.qp_type != IB_QPT_UD)) {
- trace_cm_send_mra(id_priv);
- ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ trace_cm_prepare_mra(id_priv);
+ ib_prepare_cm_mra(cm_id);
}
if (id_priv->id.qp) {
event.status = cma_rep_recv(id_priv);
@@ -2473,8 +2462,8 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
conn_id->id.qp_type != IB_QPT_UD) {
- trace_cm_send_mra(cm_id->context);
- ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ trace_cm_prepare_mra(cm_id->context);
+ ib_prepare_cm_mra(cm_id);
}
mutex_unlock(&conn_id->handler_mutex);
@@ -5241,9 +5230,9 @@ static int cma_netevent_callback(struct notifier_block *self,
if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr,
neigh->ha, ETH_ALEN))
continue;
- INIT_WORK(&current_id->id.net_work, cma_netevent_work_handler);
cma_id_get(current_id);
- queue_work(cma_wq, &current_id->id.net_work);
+ if (!queue_work(cma_wq, &current_id->id.net_work))
+ cma_id_put(current_id);
}
out:
spin_unlock_irqrestore(&id_table_lock, flags);
diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h
index dc622f3778be..3456d5f3aa47 100644
--- a/drivers/infiniband/core/cma_trace.h
+++ b/drivers/infiniband/core/cma_trace.h
@@ -55,7 +55,7 @@ DECLARE_EVENT_CLASS(cma_fsm_class,
DEFINE_CMA_FSM_EVENT(send_rtu);
DEFINE_CMA_FSM_EVENT(send_rej);
-DEFINE_CMA_FSM_EVENT(send_mra);
+DEFINE_CMA_FSM_EVENT(prepare_mra);
DEFINE_CMA_FSM_EVENT(send_sidr_req);
DEFINE_CMA_FSM_EVENT(send_sidr_rep);
DEFINE_CMA_FSM_EVENT(disconnect);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index b4e3e4beb7f4..d4263385850a 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1352,6 +1352,9 @@ static void ib_device_notify_register(struct ib_device *device)
down_read(&devices_rwsem);
+ /* Mark for userspace that device is ready */
+ kobject_uevent(&device->dev.kobj, KOBJ_ADD);
+
ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
if (ret)
goto out;
@@ -1468,10 +1471,9 @@ int ib_register_device(struct ib_device *device, const char *name,
return ret;
}
dev_set_uevent_suppress(&device->dev, false);
- /* Mark for userspace that device is ready */
- kobject_uevent(&device->dev.kobj, KOBJ_ADD);
ib_device_notify_register(device);
+
ib_device_put(device);
return 0;
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index f4486cbd8f45..62410578dec3 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -368,12 +368,9 @@ EXPORT_SYMBOL(iw_cm_disconnect);
/*
* CM_ID <-- DESTROYING
*
- * Clean up all resources associated with the connection and release
- * the initial reference taken by iw_create_cm_id.
- *
- * Returns true if and only if the last cm_id_priv reference has been dropped.
+ * Clean up all resources associated with the connection.
*/
-static bool destroy_cm_id(struct iw_cm_id *cm_id)
+static void destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
struct ib_qp *qp;
@@ -442,20 +439,22 @@ static bool destroy_cm_id(struct iw_cm_id *cm_id)
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
}
-
- return iwcm_deref_id(cm_id_priv);
}
/*
- * This function is only called by the application thread and cannot
- * be called by the event thread. The function will wait for all
- * references to be released on the cm_id and then kfree the cm_id
- * object.
+ * Destroy cm_id. If the cm_id still has other references, wait for all
+ * references to be released on the cm_id and then release the initial
+ * reference taken by iw_create_cm_id.
*/
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
{
- if (!destroy_cm_id(cm_id))
+ struct iwcm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ destroy_cm_id(cm_id);
+ if (refcount_read(&cm_id_priv->refcount) > 1)
flush_workqueue(iwcm_wq);
+ iwcm_deref_id(cm_id_priv);
}
EXPORT_SYMBOL(iw_destroy_cm_id);
@@ -1035,8 +1034,10 @@ static void cm_work_handler(struct work_struct *_work)
if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
ret = process_event(cm_id_priv, &levent);
- if (ret)
- WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
+ if (ret) {
+ destroy_cm_id(&cm_id_priv->id);
+ WARN_ON_ONCE(iwcm_deref_id(cm_id_priv));
+ }
} else
pr_debug("dropping event %d\n", levent.event);
if (iwcm_deref_id(cm_id_priv))
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 8af0619a39cd..b4b10e8a6495 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -158,7 +158,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
recv_wc->recv_buf.grh, agent->port_num);
if (IS_ERR(ah))
- return (void *) ah;
+ return ERR_CAST(ah);
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
diff --git a/drivers/infiniband/core/ucaps.c b/drivers/infiniband/core/ucaps.c
index 6853c6d078f9..de5cb8bf0a61 100644
--- a/drivers/infiniband/core/ucaps.c
+++ b/drivers/infiniband/core/ucaps.c
@@ -170,7 +170,7 @@ int ib_create_ucap(enum rdma_user_cap type)
ucap->dev.class = &ucaps_class;
ucap->dev.devt = MKDEV(MAJOR(ucaps_base_dev), type);
ucap->dev.release = ucap_dev_release;
- ret = dev_set_name(&ucap->dev, ucap_names[type]);
+ ret = dev_set_name(&ucap->dev, "%s", ucap_names[type]);
if (ret)
goto err_device;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index e9fa22d31c23..c752ae9fad6c 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -41,65 +41,72 @@
#include <linux/hugetlb.h>
#include <linux/interval_tree.h>
#include <linux/hmm.h>
+#include <linux/hmm-dma.h>
#include <linux/pagemap.h>
#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
-static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
- const struct mmu_interval_notifier_ops *ops)
+static void ib_init_umem_implicit_odp(struct ib_umem_odp *umem_odp)
{
- int ret;
+ umem_odp->is_implicit_odp = 1;
+ umem_odp->umem.is_odp = 1;
+ mutex_init(&umem_odp->umem_mutex);
+}
+
+static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
+ const struct mmu_interval_notifier_ops *ops)
+{
+ struct ib_device *dev = umem_odp->umem.ibdev;
+ size_t page_size = 1UL << umem_odp->page_shift;
+ struct hmm_dma_map *map;
+ unsigned long start;
+ unsigned long end;
+ size_t nr_entries;
+ int ret = 0;
umem_odp->umem.is_odp = 1;
mutex_init(&umem_odp->umem_mutex);
- if (!umem_odp->is_implicit_odp) {
- size_t page_size = 1UL << umem_odp->page_shift;
- unsigned long start;
- unsigned long end;
- size_t ndmas, npfns;
-
- start = ALIGN_DOWN(umem_odp->umem.address, page_size);
- if (check_add_overflow(umem_odp->umem.address,
- (unsigned long)umem_odp->umem.length,
- &end))
- return -EOVERFLOW;
- end = ALIGN(end, page_size);
- if (unlikely(end < page_size))
- return -EOVERFLOW;
-
- ndmas = (end - start) >> umem_odp->page_shift;
- if (!ndmas)
- return -EINVAL;
-
- npfns = (end - start) >> PAGE_SHIFT;
- umem_odp->pfn_list = kvcalloc(
- npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
- if (!umem_odp->pfn_list)
- return -ENOMEM;
-
- umem_odp->dma_list = kvcalloc(
- ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL);
- if (!umem_odp->dma_list) {
+ start = ALIGN_DOWN(umem_odp->umem.address, page_size);
+ if (check_add_overflow(umem_odp->umem.address,
+ (unsigned long)umem_odp->umem.length, &end))
+ return -EOVERFLOW;
+ end = ALIGN(end, page_size);
+ if (unlikely(end < page_size))
+ return -EOVERFLOW;
+
+ nr_entries = (end - start) >> PAGE_SHIFT;
+ if (!(nr_entries * PAGE_SIZE / page_size))
+ return -EINVAL;
+
+ map = &umem_odp->map;
+ if (ib_uses_virt_dma(dev)) {
+ map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!map->pfn_list)
ret = -ENOMEM;
- goto out_pfn_list;
- }
+ } else
+ ret = hmm_dma_map_alloc(dev->dma_device, map,
+ (end - start) >> PAGE_SHIFT,
+ 1 << umem_odp->page_shift);
+ if (ret)
+ return ret;
- ret = mmu_interval_notifier_insert(&umem_odp->notifier,
- umem_odp->umem.owning_mm,
- start, end - start, ops);
- if (ret)
- goto out_dma_list;
- }
+ ret = mmu_interval_notifier_insert(&umem_odp->notifier,
+ umem_odp->umem.owning_mm, start,
+ end - start, ops);
+ if (ret)
+ goto out_free_map;
return 0;
-out_dma_list:
- kvfree(umem_odp->dma_list);
-out_pfn_list:
- kvfree(umem_odp->pfn_list);
+out_free_map:
+ if (ib_uses_virt_dma(dev))
+ kfree(map->pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, map);
return ret;
}
@@ -118,7 +125,6 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
{
struct ib_umem *umem;
struct ib_umem_odp *umem_odp;
- int ret;
if (access & IB_ACCESS_HUGETLB)
return ERR_PTR(-EINVAL);
@@ -130,16 +136,10 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
umem->ibdev = device;
umem->writable = ib_access_writable(access);
umem->owning_mm = current->mm;
- umem_odp->is_implicit_odp = 1;
umem_odp->page_shift = PAGE_SHIFT;
umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
- ret = ib_init_umem_odp(umem_odp, NULL);
- if (ret) {
- put_pid(umem_odp->tgid);
- kfree(umem_odp);
- return ERR_PTR(ret);
- }
+ ib_init_umem_implicit_odp(umem_odp);
return umem_odp;
}
EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
@@ -260,74 +260,41 @@ err_put_pid:
}
EXPORT_SYMBOL(ib_umem_odp_get);
-void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
+static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
{
+ struct ib_device *dev = umem_odp->umem.ibdev;
+
/*
* Ensure that no more pages are mapped in the umem.
*
* It is the driver's responsibility to ensure, before calling us,
* that the hardware will not attempt to access the MR any more.
*/
- if (!umem_odp->is_implicit_odp) {
- mutex_lock(&umem_odp->umem_mutex);
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
- mutex_unlock(&umem_odp->umem_mutex);
- mmu_interval_notifier_remove(&umem_odp->notifier);
- kvfree(umem_odp->dma_list);
- kvfree(umem_odp->pfn_list);
- }
- put_pid(umem_odp->tgid);
- kfree(umem_odp);
+ mutex_lock(&umem_odp->umem_mutex);
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
+ ib_umem_end(umem_odp));
+ mutex_unlock(&umem_odp->umem_mutex);
+ mmu_interval_notifier_remove(&umem_odp->notifier);
+ if (ib_uses_virt_dma(dev))
+ kfree(umem_odp->map.pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, &umem_odp->map);
}
-EXPORT_SYMBOL(ib_umem_odp_release);
-/*
- * Map for DMA and insert a single page into the on-demand paging page tables.
- *
- * @umem: the umem to insert the page to.
- * @dma_index: index in the umem to add the dma to.
- * @page: the page struct to map and add.
- * @access_mask: access permissions needed for this page.
- *
- * The function returns -EFAULT if the DMA mapping operation fails.
- *
- */
-static int ib_umem_odp_map_dma_single_page(
- struct ib_umem_odp *umem_odp,
- unsigned int dma_index,
- struct page *page,
- u64 access_mask)
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
{
- struct ib_device *dev = umem_odp->umem.ibdev;
- dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index];
-
- if (*dma_addr) {
- /*
- * If the page is already dma mapped it means it went through
- * a non-invalidating trasition, like read-only to writable.
- * Resync the flags.
- */
- *dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask;
- return 0;
- }
+ if (!umem_odp->is_implicit_odp)
+ ib_umem_odp_free(umem_odp);
- *dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift,
- DMA_BIDIRECTIONAL);
- if (ib_dma_mapping_error(dev, *dma_addr)) {
- *dma_addr = 0;
- return -EFAULT;
- }
- umem_odp->npages++;
- *dma_addr |= access_mask;
- return 0;
+ put_pid(umem_odp->tgid);
+ kfree(umem_odp);
}
+EXPORT_SYMBOL(ib_umem_odp_release);
/**
* ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it.
*
* Maps the range passed in the argument to DMA addresses.
- * The DMA addresses of the mapped pages is updated in umem_odp->dma_list.
* Upon success the ODP MR will be locked to let caller complete its device
* page table update.
*
@@ -355,9 +322,6 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
struct hmm_range range = {};
unsigned long timeout;
- if (access_mask == 0)
- return -EINVAL;
-
if (user_virt < ib_umem_start(umem_odp) ||
user_virt + bcnt > ib_umem_end(umem_odp))
return -EFAULT;
@@ -383,11 +347,11 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
if (fault) {
range.default_flags = HMM_PFN_REQ_FAULT;
- if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ if (access_mask & HMM_PFN_WRITE)
range.default_flags |= HMM_PFN_REQ_WRITE;
}
- range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]);
+ range.hmm_pfns = &(umem_odp->map.pfn_list[pfn_start_idx]);
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
retry:
@@ -415,22 +379,17 @@ retry:
for (pfn_index = 0; pfn_index < num_pfns;
pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) {
- if (fault) {
- /*
- * Since we asked for hmm_range_fault() to populate
- * pages it shouldn't return an error entry on success.
- */
- WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
- WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
- } else {
- if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) {
- WARN_ON(umem_odp->dma_list[dma_index]);
- continue;
- }
- access_mask = ODP_READ_ALLOWED_BIT;
- if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE)
- access_mask |= ODP_WRITE_ALLOWED_BIT;
- }
+ /*
+ * Since we asked for hmm_range_fault() to populate
+ * pages it shouldn't return an error entry on success.
+ */
+ WARN_ON(fault && range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
+ WARN_ON(fault && !(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
+ if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID))
+ continue;
+
+ if (range.hmm_pfns[pfn_index] & HMM_PFN_DMA_MAPPED)
+ continue;
hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]);
/* If a hugepage was detected and ODP wasn't set for, the umem
@@ -443,15 +402,6 @@ retry:
__func__, hmm_order, page_shift);
break;
}
-
- ret = ib_umem_odp_map_dma_single_page(
- umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]),
- access_mask);
- if (ret < 0) {
- ibdev_dbg(umem_odp->umem.ibdev,
- "ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
- break;
- }
}
/* upon success lock should stay on hold for the callee */
if (!ret)
@@ -471,45 +421,38 @@ EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
u64 bound)
{
- dma_addr_t dma_addr;
- dma_addr_t dma;
- int idx;
- u64 addr;
struct ib_device *dev = umem_odp->umem.ibdev;
+ u64 addr;
lockdep_assert_held(&umem_odp->umem_mutex);
virt = max_t(u64, virt, ib_umem_start(umem_odp));
bound = min_t(u64, bound, ib_umem_end(umem_odp));
for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
- idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- dma = umem_odp->dma_list[idx];
-
- /* The access flags guaranteed a valid DMA address in case was NULL */
- if (dma) {
- unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
- struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]);
-
- dma_addr = dma & ODP_DMA_ADDR_MASK;
- ib_dma_unmap_page(dev, dma_addr,
- BIT(umem_odp->page_shift),
- DMA_BIDIRECTIONAL);
- if (dma & ODP_WRITE_ALLOWED_BIT) {
- struct page *head_page = compound_head(page);
- /*
- * set_page_dirty prefers being called with
- * the page lock. However, MMU notifiers are
- * called sometimes with and sometimes without
- * the lock. We rely on the umem_mutex instead
- * to prevent other mmu notifiers from
- * continuing and allowing the page mapping to
- * be removed.
- */
- set_page_dirty(head_page);
- }
- umem_odp->dma_list[idx] = 0;
- umem_odp->npages--;
+ u64 offset = addr - ib_umem_start(umem_odp);
+ size_t idx = offset >> umem_odp->page_shift;
+ unsigned long pfn = umem_odp->map.pfn_list[idx];
+
+ if (!hmm_dma_unmap_pfn(dev->dma_device, &umem_odp->map, idx))
+ goto clear;
+
+ if (pfn & HMM_PFN_WRITE) {
+ struct page *page = hmm_pfn_to_page(pfn);
+ struct page *head_page = compound_head(page);
+ /*
+ * set_page_dirty prefers being called with
+ * the page lock. However, MMU notifiers are
+ * called sometimes with and sometimes without
+ * the lock. We rely on the umem_mutex instead
+ * to prevent other mmu notifiers from
+ * continuing and allowing the page mapping to
+ * be removed.
+ */
+ set_page_dirty(head_page);
}
+ umem_odp->npages--;
+clear:
+ umem_odp->map.pfn_list[idx] &= ~HMM_PFN_FLAGS;
}
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 3c3bb670c805..bc9fe3ceca4d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -193,7 +193,7 @@ _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
fd, attrs);
if (IS_ERR(uobj))
- return (void *)uobj;
+ return ERR_CAST(uobj);
uverbs_uobject_get(uobj);
uobj_put_read(uobj);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c5e78bbefbd0..75fde0fe9989 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -572,7 +572,7 @@ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
GFP_KERNEL : GFP_ATOMIC);
if (IS_ERR(slave)) {
rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
- return (void *)slave;
+ return ERR_CAST(slave);
}
ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
rdma_lag_put_ah_roce_slave(slave);
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c
index af91d16c3c77..e632f1661b92 100644
--- a/drivers/infiniband/hw/bnxt_re/debugfs.c
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.c
@@ -170,6 +170,9 @@ static int map_cc_config_offset_gen0_ext0(u32 offset, struct bnxt_qplib_cc_param
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP:
*val = ccparam->tcp_cp;
break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ *val = ccparam->inact_th;
+ break;
default:
return -EINVAL;
}
@@ -203,7 +206,7 @@ static ssize_t bnxt_re_cc_config_get(struct file *filp, char __user *buffer,
return simple_read_from_buffer(buffer, usr_buf_len, ppos, (u8 *)(buf), rc);
}
-static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val)
+static int bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val)
{
u32 modify_mask;
@@ -247,7 +250,9 @@ static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offs
ccparam->tcp_cp = val;
break;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TX_QUEUE:
+ return -EOPNOTSUPP;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ ccparam->inact_th = val;
break;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE:
ccparam->time_pph = val;
@@ -258,17 +263,20 @@ static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offs
}
ccparam->mask = modify_mask;
+ return 0;
}
static int bnxt_re_configure_cc(struct bnxt_re_dev *rdev, u32 gen_ext, u32 offset, u32 val)
{
struct bnxt_qplib_cc_param ccparam = { };
+ int rc;
- /* Supporting only Gen 0 now */
- if (gen_ext == CC_CONFIG_GEN0_EXT0)
- bnxt_re_fill_gen0_ext0(&ccparam, offset, val);
- else
- return -EINVAL;
+ if (gen_ext != CC_CONFIG_GEN0_EXT0)
+ return -EOPNOTSUPP;
+
+ rc = bnxt_re_fill_gen0_ext0(&ccparam, offset, val);
+ if (rc)
+ return rc;
bnxt_qplib_modify_cc(&rdev->qplib_res, &ccparam);
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 9082b3fd2b47..063801384b2b 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1774,10 +1774,7 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
ib_srq);
struct bnxt_re_dev *rdev = srq->rdev;
struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
- struct bnxt_qplib_nq *nq = NULL;
- if (qplib_srq->cq)
- nq = qplib_srq->cq->nq;
if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
free_page((unsigned long)srq->uctx_srq_page);
hash_del(&srq->hash_entry);
@@ -1785,8 +1782,6 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
ib_umem_release(srq->umem);
atomic_dec(&rdev->stats.res.srq_count);
- if (nq)
- nq->budget--;
return 0;
}
@@ -1827,7 +1822,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
struct ib_udata *udata)
{
struct bnxt_qplib_dev_attr *dev_attr;
- struct bnxt_qplib_nq *nq = NULL;
struct bnxt_re_ucontext *uctx;
struct bnxt_re_dev *rdev;
struct bnxt_re_srq *srq;
@@ -1873,7 +1867,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
- nq = &rdev->nqr->nq[0];
if (udata) {
rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
@@ -1908,8 +1901,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
goto fail;
}
}
- if (nq)
- nq->budget++;
active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
if (active_srqs > rdev->stats.res.srq_watermark)
rdev->stats.res.srq_watermark = active_srqs;
@@ -3079,7 +3070,6 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
ib_umem_release(cq->umem);
atomic_dec(&rdev->stats.res.cq_count);
- nq->budget--;
kfree(cq->cql);
return 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 457eecb99f96..be34c605d516 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1113,7 +1113,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
- if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
+ if (bnxt_ext_stats_supported(res->cctx, res->dattr->dev_cap_flags, res->is_vf))
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
req.qp_flags = cpu_to_le32(qp_flags);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index f231e886ad9d..9efd32a3dc55 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -846,7 +846,12 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
req.resp_addr = cpu_to_le64(sbuf.dma_addr);
- req.function_id = cpu_to_le32(fid);
+ if (bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx) && rcfw->res->is_vf)
+ req.function_id =
+ cpu_to_le32(CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID |
+ (fid << CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT));
+ else
+ req.function_id = cpu_to_le32(fid);
req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index b6e3141253c4..d6dde762921a 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -124,7 +124,6 @@ struct opa_mad_notice_attr {
} __packed ntc_2048;
};
- u8 class_data[];
};
#define IB_VLARB_LOWPRI_0_31 1
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 5a91cbda4aee..764286da2ce8 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1361,16 +1361,6 @@ void sc_flush(struct send_context *sc)
sc_wait_for_packet_egress(sc, 1);
}
-/* drop all packets on the context, no waiting until they are sent */
-void sc_drop(struct send_context *sc)
-{
- if (!sc)
- return;
-
- dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
- __func__, sc->sw_index, sc->hw_context);
-}
-
/*
* Start the software reaction to a context halt or SPC freeze:
* - mark the context as halted or frozen
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index d07cc6ea7c63..ab0f9a3a8d12 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -246,7 +246,6 @@ void sc_disable(struct send_context *sc);
int sc_restart(struct send_context *sc);
void sc_return_credits(struct send_context *sc);
void sc_flush(struct send_context *sc);
-void sc_drop(struct send_context *sc);
void sc_stop(struct send_context *sc, int bit);
struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
pio_release_cb cb, void *arg);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 0d2b39b7c8b5..16a749d16ee9 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1521,24 +1521,6 @@ void sdma_all_running(struct hfi1_devdata *dd)
}
/**
- * sdma_all_idle() - called when the link goes down
- * @dd: hfi1_devdata
- *
- * This routine moves all engines to the idle state.
- */
-void sdma_all_idle(struct hfi1_devdata *dd)
-{
- struct sdma_engine *sde;
- unsigned int i;
-
- /* idle all engines */
- for (i = 0; i < dd->num_sdma; ++i) {
- sde = &dd->per_sdma[i];
- sdma_process_event(sde, sdma_event_e70_go_idle);
- }
-}
-
-/**
* sdma_start() - called to kick off state processing for all engines
* @dd: hfi1_devdata
*
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index d77246b48434..91dfd5d0c419 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -373,7 +373,6 @@ void sdma_start(struct hfi1_devdata *dd);
void sdma_exit(struct hfi1_devdata *dd);
void sdma_clean(struct hfi1_devdata *dd, size_t num_engines);
void sdma_all_running(struct hfi1_devdata *dd);
-void sdma_all_idle(struct hfi1_devdata *dd);
void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
void sdma_freeze(struct hfi1_devdata *dd);
void sdma_unfreeze(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index cf2d29098406..62b4f16dab27 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -53,7 +53,7 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
int ret = 0;
fd->entry_to_rb = kcalloc(uctxt->expected_count,
- sizeof(struct rb_node *),
+ sizeof(*fd->entry_to_rb),
GFP_KERNEL);
if (!fd->entry_to_rb)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 7917af8e6380..baf592e6f21b 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -4,6 +4,7 @@
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(src)
hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 4fc5b9d5fea8..307c35888b30 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -33,7 +33,6 @@
#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
-#include "hnae3.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 560a1d9de408..1dcc9cbb4678 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -1027,6 +1027,26 @@ struct hns_roce_dev {
atomic64_t *dfx_cnt;
};
+enum hns_roce_trace_type {
+ TRACE_SQ,
+ TRACE_RQ,
+ TRACE_SRQ,
+};
+
+static inline const char *trace_type_to_str(enum hns_roce_trace_type type)
+{
+ switch (type) {
+ case TRACE_SQ:
+ return "SQ";
+ case TRACE_RQ:
+ return "RQ";
+ case TRACE_SRQ:
+ return "SRQ";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
{
return container_of(ib_dev, struct hns_roce_dev, ib_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 160e8927d364..fa8747656f25 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -43,13 +43,15 @@
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
+#define CREATE_TRACE_POINTS
+#include "hns_roce_trace.h"
+
enum {
CMD_RST_PRC_OTHERS,
CMD_RST_PRC_SUCCESS,
@@ -738,6 +740,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
else
ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+ trace_hns_sq_wqe(qp->qpn, wqe_idx, wqe, 1 << qp->sq.wqe_shift,
+ wr->wr_id, TRACE_SQ);
if (unlikely(ret)) {
*bad_wr = wr;
goto out;
@@ -807,6 +811,9 @@ static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
+
+ trace_hns_rq_wqe(hr_qp->qpn, wqe_idx, wqe, 1 << hr_qp->rq.wqe_shift,
+ wr->wr_id, TRACE_RQ);
}
static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
@@ -943,7 +950,7 @@ static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
static void update_srq_db(struct hns_roce_srq *srq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
- struct hns_roce_v2_db db;
+ struct hns_roce_v2_db db = {};
hr_reg_write(&db, DB_TAG, srq->srqn);
hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
@@ -984,6 +991,9 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
fill_wqe_idx(srq, wqe_idx);
srq->wrid[wqe_idx] = wr->wr_id;
+
+ trace_hns_srq_wqe(srq->srqn, wqe_idx, wqe, 1 << srq->wqe_shift,
+ wr->wr_id, TRACE_SRQ);
}
if (likely(nreq)) {
@@ -1311,6 +1321,8 @@ static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
tail = csq->head;
for (i = 0; i < num; i++) {
+ trace_hns_cmdq_req(hr_dev, &desc[i]);
+
csq->desc[csq->head++] = desc[i];
if (csq->head == csq->desc_num)
csq->head = 0;
@@ -1325,6 +1337,8 @@ static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
if (hns_roce_cmq_csq_done(hr_dev)) {
ret = 0;
for (i = 0; i < num; i++) {
+ trace_hns_cmdq_resp(hr_dev, &csq->desc[tail]);
+
/* check the result of hardware write back */
desc_ret = le16_to_cpu(csq->desc[tail++].retval);
if (tail == csq->desc_num)
@@ -4302,8 +4316,7 @@ static inline int get_pdn(struct ib_pd *ib_pd)
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
- struct hns_roce_v2_qp_context *context,
- struct hns_roce_v2_qp_context *qpc_mask)
+ struct hns_roce_v2_qp_context *context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -5122,7 +5135,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
- modify_qp_reset_to_init(ibqp, context, qpc_mask);
+ modify_qp_reset_to_init(ibqp, context);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
@@ -5313,6 +5326,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp,
return;
spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
+ trace_hns_sq_flush_cqe(hr_qp->qpn, hr_qp->sq.head, TRACE_SQ);
hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head);
hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);
hr_qp->state = IB_QPS_ERR;
@@ -5322,6 +5336,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp,
return;
spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
+ trace_hns_rq_flush_cqe(hr_qp->qpn, hr_qp->rq.head, TRACE_RQ);
hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX);
spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
@@ -6248,6 +6263,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
eq->sub_type = sub_type;
++eq->cons_index;
aeqe_found = IRQ_HANDLED;
+ trace_hns_ae_info(event_type, aeqe, eq->eqe_size);
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_AEQE_CNT]);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 91a5665465ff..bc7466830eaf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -34,6 +34,7 @@
#define _HNS_ROCE_HW_V2_H
#include <linux/bitops.h>
+#include "hnae3.h"
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index cf89a8db4f64..e7a497cc125c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -37,7 +37,6 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
@@ -763,7 +762,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
if (ret)
return ret;
}
- dma_set_max_seg_size(dev, UINT_MAX);
+ dma_set_max_seg_size(dev, SZ_2G);
ret = ib_register_device(ib_dev, "hns_%d", dev);
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 09da3496843b..93a48b41955b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -38,6 +38,7 @@
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
+#include "hns_roce_trace.h"
static u32 hw_index_to_key(int ind)
{
@@ -159,6 +160,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
+ trace_hns_mr(mr);
if (mr->type != MR_TYPE_FRMR)
ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
else
@@ -1146,6 +1148,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
+ trace_hns_buf_attr(buf_attr);
/* The caller has its own buffer list and invokes the hns_roce_mtr_map()
* to finish the MTT configuration.
*/
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 356d98816949..f637b73b946e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -4,7 +4,6 @@
#include <rdma/rdma_cm.h>
#include <rdma/restrack.h>
#include <uapi/rdma/rdma_netlink.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_trace.h b/drivers/infiniband/hw/hns/hns_roce_trace.h
new file mode 100644
index 000000000000..59ceb591b3a1
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_trace.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2025 Hisilicon Limited.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns_roce
+
+#if !defined(__HNS_ROCE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HNS_ROCE_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <linux/string_choices.h>
+#include "hns_roce_device.h"
+#include "hns_roce_hw_v2.h"
+
+DECLARE_EVENT_CLASS(flush_head_template,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type),
+
+ TP_STRUCT__entry(__field(unsigned long, qpn)
+ __field(u32, pi)
+ __field(enum hns_roce_trace_type, type)
+ ),
+
+ TP_fast_assign(__entry->qpn = qpn;
+ __entry->pi = pi;
+ __entry->type = type;
+ ),
+
+ TP_printk("%s 0x%lx flush head 0x%x.",
+ trace_type_to_str(__entry->type),
+ __entry->qpn, __entry->pi)
+);
+
+DEFINE_EVENT(flush_head_template, hns_sq_flush_cqe,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type));
+DEFINE_EVENT(flush_head_template, hns_rq_flush_cqe,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type));
+
+#define MAX_SGE_PER_WQE 64
+#define MAX_WQE_SIZE (MAX_SGE_PER_WQE * HNS_ROCE_SGE_SIZE)
+DECLARE_EVENT_CLASS(wqe_template,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len,
+ u64 id, enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type),
+
+ TP_STRUCT__entry(__field(unsigned long, qpn)
+ __field(u32, idx)
+ __array(u32, wqe,
+ MAX_WQE_SIZE / sizeof(__le32))
+ __field(u32, len)
+ __field(u64, id)
+ __field(enum hns_roce_trace_type, type)
+ ),
+
+ TP_fast_assign(__entry->qpn = qpn;
+ __entry->idx = idx;
+ __entry->id = id;
+ __entry->len = len / sizeof(__le32);
+ __entry->type = type;
+ for (int i = 0; i < __entry->len; i++)
+ __entry->wqe[i] = le32_to_cpu(((__le32 *)wqe)[i]);
+ ),
+
+ TP_printk("%s 0x%lx wqe(0x%x/0x%llx): %s",
+ trace_type_to_str(__entry->type),
+ __entry->qpn, __entry->idx, __entry->id,
+ __print_array(__entry->wqe, __entry->len,
+ sizeof(__le32)))
+);
+
+DEFINE_EVENT(wqe_template, hns_sq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+DEFINE_EVENT(wqe_template, hns_rq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+DEFINE_EVENT(wqe_template, hns_srq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+
+TRACE_EVENT(hns_ae_info,
+ TP_PROTO(int event_type, void *aeqe, unsigned int len),
+ TP_ARGS(event_type, aeqe, len),
+
+ TP_STRUCT__entry(__field(int, event_type)
+ __array(u32, aeqe,
+ HNS_ROCE_V3_EQE_SIZE / sizeof(__le32))
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(__entry->event_type = event_type;
+ __entry->len = len / sizeof(__le32);
+ for (int i = 0; i < __entry->len; i++)
+ __entry->aeqe[i] = le32_to_cpu(((__le32 *)aeqe)[i]);
+ ),
+
+ TP_printk("event %2d aeqe: %s", __entry->event_type,
+ __print_array(__entry->aeqe, __entry->len, sizeof(__le32)))
+);
+
+TRACE_EVENT(hns_mr,
+ TP_PROTO(struct hns_roce_mr *mr),
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(__field(u64, iova)
+ __field(u64, size)
+ __field(u32, key)
+ __field(u32, pd)
+ __field(u32, pbl_hop_num)
+ __field(u32, npages)
+ __field(int, type)
+ __field(int, enabled)
+ ),
+
+ TP_fast_assign(__entry->iova = mr->iova;
+ __entry->size = mr->size;
+ __entry->key = mr->key;
+ __entry->pd = mr->pd;
+ __entry->pbl_hop_num = mr->pbl_hop_num;
+ __entry->npages = mr->npages;
+ __entry->type = mr->type;
+ __entry->enabled = mr->enabled;
+ ),
+
+ TP_printk("iova:0x%llx, size:%llu, key:%u, pd:%u, pbl_hop:%u, npages:%u, type:%d, status:%d",
+ __entry->iova, __entry->size, __entry->key,
+ __entry->pd, __entry->pbl_hop_num, __entry->npages,
+ __entry->type, __entry->enabled)
+);
+
+TRACE_EVENT(hns_buf_attr,
+ TP_PROTO(struct hns_roce_buf_attr *attr),
+ TP_ARGS(attr),
+
+ TP_STRUCT__entry(__field(unsigned int, region_count)
+ __field(unsigned int, region0_size)
+ __field(int, region0_hopnum)
+ __field(unsigned int, region1_size)
+ __field(int, region1_hopnum)
+ __field(unsigned int, region2_size)
+ __field(int, region2_hopnum)
+ __field(unsigned int, page_shift)
+ __field(bool, mtt_only)
+ ),
+
+ TP_fast_assign(__entry->region_count = attr->region_count;
+ __entry->region0_size = attr->region[0].size;
+ __entry->region0_hopnum = attr->region[0].hopnum;
+ __entry->region1_size = attr->region[1].size;
+ __entry->region1_hopnum = attr->region[1].hopnum;
+ __entry->region2_size = attr->region[2].size;
+ __entry->region2_hopnum = attr->region[2].hopnum;
+ __entry->page_shift = attr->page_shift;
+ __entry->mtt_only = attr->mtt_only;
+ ),
+
+ TP_printk("rg cnt:%u, pg_sft:0x%x, mtt_only:%s, rg 0 (sz:%u, hop:%u), rg 1 (sz:%u, hop:%u), rg 2 (sz:%u, hop:%u)\n",
+ __entry->region_count, __entry->page_shift,
+ str_yes_no(__entry->mtt_only),
+ __entry->region0_size, __entry->region0_hopnum,
+ __entry->region1_size, __entry->region1_hopnum,
+ __entry->region2_size, __entry->region2_hopnum)
+);
+
+DECLARE_EVENT_CLASS(cmdq,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc),
+
+ TP_STRUCT__entry(__string(dev_name, dev_name(hr_dev->dev))
+ __field(u16, opcode)
+ __field(u16, flag)
+ __field(u16, retval)
+ __array(u32, data, 6)
+ ),
+
+ TP_fast_assign(__assign_str(dev_name);
+ __entry->opcode = le16_to_cpu(desc->opcode);
+ __entry->flag = le16_to_cpu(desc->flag);
+ __entry->retval = le16_to_cpu(desc->retval);
+ for (int i = 0; i < 6; i++)
+ __entry->data[i] = le32_to_cpu(desc->data[i]);
+ ),
+
+ TP_printk("%s cmdq opcode:0x%x, flag:0x%x, retval:0x%x, data:%s\n",
+ __get_str(dev_name), __entry->opcode,
+ __entry->flag, __entry->retval,
+ __print_array(__entry->data, 6, sizeof(__le32)))
+);
+
+DEFINE_EVENT(cmdq, hns_cmdq_req,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc));
+DEFINE_EVENT(cmdq, hns_cmdq_resp,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc));
+
+#endif /* __HNS_ROCE_TRACE_H */
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hns_roce_trace
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 6aed6169c07d..99a7f1a6c0b5 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -3131,7 +3131,7 @@ int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
ibdev_dbg(to_ibdev(cqp->dev),
- "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n",
+ "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%p] cqp[%p] polarity[x%04x]\n",
cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
(u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
return 0;
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index 1ee8969595d3..1e840bbd619d 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -1,10 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "main.h"
-#include "../../../net/ethernet/intel/ice/ice.h"
MODULE_ALIAS("i40iw");
-MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA");
MODULE_LICENSE("Dual BSD/GPL");
@@ -61,7 +59,7 @@ static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
}
static void irdma_fill_qos_info(struct irdma_l2params *l2params,
- struct iidc_qos_params *qos_info)
+ struct iidc_rdma_qos_params *qos_info)
{
int i;
@@ -85,12 +83,13 @@ static void irdma_fill_qos_info(struct irdma_l2params *l2params,
}
}
-static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event)
+static void irdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
{
- struct irdma_device *iwdev = dev_get_drvdata(&pf->adev->dev);
+ struct irdma_device *iwdev = dev_get_drvdata(&cdev_info->adev->dev);
struct irdma_l2params l2params = {};
- if (*event->type & BIT(IIDC_EVENT_AFTER_MTU_CHANGE)) {
+ if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
l2params.mtu = iwdev->netdev->mtu;
@@ -98,25 +97,26 @@ static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event
irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
irdma_change_l2params(&iwdev->vsi, &l2params);
}
- } else if (*event->type & BIT(IIDC_EVENT_BEFORE_TC_CHANGE)) {
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) {
if (iwdev->vsi.tc_change_pending)
return;
irdma_prep_tc_change(iwdev);
- } else if (*event->type & BIT(IIDC_EVENT_AFTER_TC_CHANGE)) {
- struct iidc_qos_params qos_info = {};
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) {
+ struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
if (!iwdev->vsi.tc_change_pending)
return;
l2params.tc_changed = true;
ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
- ice_get_qos_params(pf, &qos_info);
- irdma_fill_qos_info(&l2params, &qos_info);
+
+ irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
- iwdev->dcb_vlan_mode = qos_info.num_tc > 1 && !l2params.dscp_mode;
+ iwdev->dcb_vlan_mode =
+ l2params.num_tc > 1 && !l2params.dscp_mode;
irdma_change_l2params(&iwdev->vsi, &l2params);
- } else if (*event->type & BIT(IIDC_EVENT_CRIT_ERR)) {
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) {
ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
event->reg);
if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
@@ -151,10 +151,8 @@ static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event
*/
static void irdma_request_reset(struct irdma_pci_f *rf)
{
- struct ice_pf *pf = rf->cdev;
-
ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
- ice_rdma_request_reset(pf, IIDC_PFR);
+ ice_rdma_request_reset(rf->cdev, IIDC_FUNC_RESET);
}
/**
@@ -166,14 +164,15 @@ static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node)
{
struct irdma_device *iwdev = vsi->back_vsi;
- struct ice_pf *pf = iwdev->rf->cdev;
+ struct iidc_rdma_core_dev_info *cdev_info;
struct iidc_rdma_qset_params qset = {};
int ret;
+ cdev_info = iwdev->rf->cdev;
qset.qs_handle = tc_node->qs_handle;
qset.tc = tc_node->traffic_class;
qset.vport_id = vsi->vsi_idx;
- ret = ice_add_rdma_qset(pf, &qset);
+ ret = ice_add_rdma_qset(cdev_info, &qset);
if (ret) {
ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
return ret;
@@ -194,19 +193,20 @@ static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node)
{
struct irdma_device *iwdev = vsi->back_vsi;
- struct ice_pf *pf = iwdev->rf->cdev;
+ struct iidc_rdma_core_dev_info *cdev_info;
struct iidc_rdma_qset_params qset = {};
+ cdev_info = iwdev->rf->cdev;
qset.qs_handle = tc_node->qs_handle;
qset.tc = tc_node->traffic_class;
qset.vport_id = vsi->vsi_idx;
qset.teid = tc_node->l2_sched_node_id;
- if (ice_del_rdma_qset(pf, &qset))
+ if (ice_del_rdma_qset(cdev_info, &qset))
ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
}
-static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
+static int irdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
{
int i;
@@ -217,12 +217,12 @@ static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
return -ENOMEM;
for (i = 0; i < rf->msix_count; i++)
- if (ice_alloc_rdma_qvector(pf, &rf->msix_entries[i]))
+ if (ice_alloc_rdma_qvector(cdev, &rf->msix_entries[i]))
break;
if (i < IRDMA_MIN_MSIX) {
- for (; i > 0; i--)
- ice_free_rdma_qvector(pf, &rf->msix_entries[i]);
+ while (--i >= 0)
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
kfree(rf->msix_entries);
return -ENOMEM;
@@ -233,54 +233,65 @@ static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
return 0;
}
-static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
+static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
{
int i;
for (i = 0; i < rf->msix_count; i++)
- ice_free_rdma_qvector(pf, &rf->msix_entries[i]);
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
kfree(rf->msix_entries);
}
static void irdma_remove(struct auxiliary_device *aux_dev)
{
- struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
- struct iidc_auxiliary_dev,
- adev);
- struct ice_pf *pf = iidc_adev->pf;
struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
+ struct iidc_rdma_core_auxiliary_dev *iidc_adev;
+ struct iidc_rdma_core_dev_info *cdev_info;
+
+ iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ cdev_info = iidc_adev->cdev_info;
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false);
irdma_ib_unregister_device(iwdev);
- ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false);
- irdma_deinit_interrupts(iwdev->rf, pf);
+ irdma_deinit_interrupts(iwdev->rf, cdev_info);
- pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
+ kfree(iwdev->rf);
+
+ pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(cdev_info->pdev->devfn));
}
-static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf,
- struct ice_vsi *vsi)
+static void irdma_fill_device_info(struct irdma_device *iwdev,
+ struct iidc_rdma_core_dev_info *cdev_info)
{
+ struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
struct irdma_pci_f *rf = iwdev->rf;
- rf->cdev = pf;
+ rf->sc_dev.hw = &rf->hw;
+ rf->iwdev = iwdev;
+ rf->cdev = cdev_info;
+ rf->hw.hw_addr = iidc_priv->hw_addr;
+ rf->pcidev = cdev_info->pdev;
+ rf->hw.device = &rf->pcidev->dev;
+ rf->pf_id = iidc_priv->pf_id;
rf->gen_ops.register_qset = irdma_lan_register_qset;
rf->gen_ops.unregister_qset = irdma_lan_unregister_qset;
- rf->hw.hw_addr = pf->hw.hw_addr;
- rf->pcidev = pf->pdev;
- rf->pf_id = pf->hw.pf_id;
- rf->default_vsi.vsi_idx = vsi->vsi_num;
- rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ?
- IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
+
+ rf->default_vsi.vsi_idx = iidc_priv->vport_id;
+ rf->protocol_used =
+ cdev_info->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
rf->rdma_ver = IRDMA_GEN_2;
rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
rf->gen_ops.request_reset = irdma_request_reset;
rf->limits_sel = 7;
rf->iwdev = iwdev;
+
mutex_init(&iwdev->ah_tbl_lock);
- iwdev->netdev = vsi->netdev;
- iwdev->vsi_num = vsi->vsi_num;
+
+ iwdev->netdev = iidc_priv->netdev;
+ iwdev->vsi_num = iidc_priv->vport_id;
iwdev->init_state = INITIAL_STATE;
iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
@@ -292,19 +303,18 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf
static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
{
- struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
- struct iidc_auxiliary_dev,
- adev);
- struct ice_pf *pf = iidc_adev->pf;
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct iidc_qos_params qos_info = {};
+ struct iidc_rdma_core_auxiliary_dev *iidc_adev;
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct iidc_rdma_priv_dev_info *iidc_priv;
+ struct irdma_l2params l2params = {};
struct irdma_device *iwdev;
struct irdma_pci_f *rf;
- struct irdma_l2params l2params = {};
int err;
- if (!vsi)
- return -EIO;
+ iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ cdev_info = iidc_adev->cdev_info;
+ iidc_priv = cdev_info->iidc_priv;
+
iwdev = ib_alloc_device(irdma_device, ibdev);
if (!iwdev)
return -ENOMEM;
@@ -314,10 +324,10 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
return -ENOMEM;
}
- irdma_fill_device_info(iwdev, pf, vsi);
+ irdma_fill_device_info(iwdev, cdev_info);
rf = iwdev->rf;
- err = irdma_init_interrupts(rf, pf);
+ err = irdma_init_interrupts(rf, cdev_info);
if (err)
goto err_init_interrupts;
@@ -326,8 +336,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
goto err_ctrl_init;
l2params.mtu = iwdev->netdev->mtu;
- ice_get_qos_params(pf, &qos_info);
- irdma_fill_qos_info(&l2params, &qos_info);
+ irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
@@ -339,7 +348,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
if (err)
goto err_ibreg;
- ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, true);
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, true);
ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
auxiliary_set_drvdata(aux_dev, iwdev);
@@ -351,7 +360,7 @@ err_ibreg:
err_rt_init:
irdma_ctrl_deinit_hw(rf);
err_ctrl_init:
- irdma_deinit_interrupts(rf, pf);
+ irdma_deinit_interrupts(rf, cdev_info);
err_init_interrupts:
kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
@@ -367,7 +376,7 @@ static const struct auxiliary_device_id irdma_auxiliary_id_table[] = {
MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table);
-static struct iidc_auxiliary_drv irdma_auxiliary_drv = {
+static struct iidc_rdma_core_auxiliary_drv irdma_auxiliary_drv = {
.adrv = {
.id_table = irdma_auxiliary_id_table,
.probe = irdma_probe,
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index bb0b6494ccb2..674acc952168 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -29,7 +29,8 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#endif
#include <linux/auxiliary_bus.h>
-#include <linux/net/intel/iidc.h>
+#include <linux/net/intel/iidc_rdma.h>
+#include <linux/net/intel/iidc_rdma_ice.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h
index 4b4f78288d12..3f73ceacccb6 100644
--- a/drivers/infiniband/hw/irdma/osdep.h
+++ b/drivers/infiniband/hw/irdma/osdep.h
@@ -5,8 +5,8 @@
#include <linux/pci.h>
#include <linux/bitfield.h>
-#include <linux/net/intel/iidc.h>
#include <rdma/ib_verbs.h>
+#include <net/dscp.h>
#define STATS_TIMER_DELAY 60000
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
index e7ce6840755f..37ce35cb10e7 100644
--- a/drivers/infiniband/hw/irdma/pble.c
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -108,7 +108,7 @@ static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
chunk->vaddr = sd_entry->u.bp.addr.va + offset;
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
ibdev_dbg(to_ibdev(dev),
- "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n",
+ "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%p fpm_addr = %llx\n",
chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
return 0;
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 59b34afa867b..527c6da2c1ac 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -567,7 +567,7 @@ struct irdma_sc_vsi {
u8 qos_rel_bw;
u8 qos_prio_type;
u8 stats_idx;
- u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
+ u8 dscp_map[DSCP_MAX];
struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
bool dscp_mode:1;
@@ -695,7 +695,7 @@ struct irdma_l2params {
u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
u16 mtu;
u8 up2tc[IRDMA_MAX_USER_PRIORITY];
- u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
+ u8 dscp_map[DSCP_MAX];
u8 num_tc;
u8 vsi_rel_bw;
u8 vsi_prio_type;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index eeb932e58730..1e8c92826de2 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -4871,5 +4871,4 @@ void irdma_ib_dealloc_device(struct ib_device *ibdev)
irdma_rt_deinit_hw(iwdev);
irdma_ctrl_deinit_hw(iwdev->rf);
- kfree(iwdev->rf);
}
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 0fc4e2679218..28e154bbb50f 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -15,14 +15,12 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev;
- struct gdma_context *gc;
bool is_rnic_cq;
u32 doorbell;
u32 buf_size;
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev_to_gc(mdev);
cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
cq->cq_handle = INVALID_MANA_HANDLE;
@@ -65,7 +63,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
ibdev_dbg(ibdev, "Failed to create kernel queue for create cq, %d\n", err);
return err;
}
- doorbell = gc->mana_ib.doorbell;
+ doorbell = mdev->gdma_dev->doorbell;
}
if (is_rnic_cq) {
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index b31089320aa5..165c0a1e67d1 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -101,103 +101,95 @@ static int mana_ib_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
+ struct gdma_context *gc = madev->mdev->gdma_context;
+ struct mana_context *mc = gc->mana.driver_data;
struct gdma_dev *mdev = madev->mdev;
struct net_device *ndev;
- struct mana_context *mc;
struct mana_ib_dev *dev;
u8 mac_addr[ETH_ALEN];
int ret;
- mc = mdev->driver_data;
-
dev = ib_alloc_device(mana_ib_dev, ib_dev);
if (!dev)
return -ENOMEM;
ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops);
-
- dev->ib_dev.phys_port_cnt = mc->num_ports;
-
- ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
- mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
-
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
-
- /*
- * num_comp_vectors needs to set to the max MSIX index
- * when interrupts and event queues are implemented
- */
- dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues;
- dev->ib_dev.dev.parent = mdev->gdma_context->dev;
-
- ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
- if (!ndev) {
- ret = -ENODEV;
- ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
- goto free_ib_device;
- }
- ether_addr_copy(mac_addr, ndev->dev_addr);
- addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
- ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
- /* mana_get_primary_netdev() returns ndev with refcount held */
- netdev_put(ndev, &dev->dev_tracker);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
- goto free_ib_device;
- }
-
- ret = mana_gd_register_device(&mdev->gdma_context->mana_ib);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register device, ret %d",
- ret);
- goto free_ib_device;
- }
- dev->gdma_dev = &mdev->gdma_context->mana_ib;
-
- dev->nb.notifier_call = mana_ib_netdev_event;
- ret = register_netdevice_notifier(&dev->nb);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
- ret);
- goto deregister_device;
- }
-
- ret = mana_ib_gd_query_adapter_caps(dev);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d",
- ret);
- goto deregister_net_notifier;
- }
-
- ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
-
- ret = mana_ib_create_eqs(dev);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
- goto deregister_net_notifier;
- }
-
- ret = mana_ib_gd_create_rnic_adapter(dev);
- if (ret)
- goto destroy_eqs;
-
+ dev->ib_dev.num_comp_vectors = gc->max_num_queues;
+ dev->ib_dev.dev.parent = gc->dev;
+ dev->gdma_dev = mdev;
xa_init_flags(&dev->qp_table_wq, XA_FLAGS_LOCK_IRQ);
- ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d",
- ret);
- goto destroy_rnic;
+
+ if (mana_ib_is_rnic(dev)) {
+ dev->ib_dev.phys_port_cnt = 1;
+ ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
+ if (!ndev) {
+ ret = -ENODEV;
+ ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
+ goto free_ib_device;
+ }
+ ether_addr_copy(mac_addr, ndev->dev_addr);
+ addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
+ ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ netdev_put(ndev, &dev->dev_tracker);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
+ goto free_ib_device;
+ }
+
+ dev->nb.notifier_call = mana_ib_netdev_event;
+ ret = register_netdevice_notifier(&dev->nb);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
+ ret);
+ goto free_ib_device;
+ }
+
+ ret = mana_ib_gd_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", ret);
+ goto deregister_net_notifier;
+ }
+
+ ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
+
+ ret = mana_ib_create_eqs(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
+ goto deregister_net_notifier;
+ }
+
+ ret = mana_ib_gd_create_rnic_adapter(dev);
+ if (ret)
+ goto destroy_eqs;
+
+ ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
+ goto destroy_rnic;
+ }
+ } else {
+ dev->ib_dev.phys_port_cnt = mc->num_ports;
+ ret = mana_eth_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query ETH device caps, ret %d", ret);
+ goto free_ib_device;
+ }
}
- dev->av_pool = dma_pool_create("mana_ib_av", mdev->gdma_context->dev,
- MANA_AV_BUFFER_SIZE, MANA_AV_BUFFER_SIZE, 0);
+ dev->av_pool = dma_pool_create("mana_ib_av", gc->dev, MANA_AV_BUFFER_SIZE,
+ MANA_AV_BUFFER_SIZE, 0);
if (!dev->av_pool) {
ret = -ENOMEM;
goto destroy_rnic;
}
- ret = ib_register_device(&dev->ib_dev, "mana_%d",
- mdev->gdma_context->dev);
+ ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
+ mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
+
+ ret = ib_register_device(&dev->ib_dev, mana_ib_is_rnic(dev) ? "mana_%d" : "manae_%d",
+ gc->dev);
if (ret)
goto deallocate_pool;
@@ -208,15 +200,16 @@ static int mana_ib_probe(struct auxiliary_device *adev,
deallocate_pool:
dma_pool_destroy(dev->av_pool);
destroy_rnic:
- xa_destroy(&dev->qp_table_wq);
- mana_ib_gd_destroy_rnic_adapter(dev);
+ if (mana_ib_is_rnic(dev))
+ mana_ib_gd_destroy_rnic_adapter(dev);
destroy_eqs:
- mana_ib_destroy_eqs(dev);
+ if (mana_ib_is_rnic(dev))
+ mana_ib_destroy_eqs(dev);
deregister_net_notifier:
- unregister_netdevice_notifier(&dev->nb);
-deregister_device:
- mana_gd_deregister_device(dev->gdma_dev);
+ if (mana_ib_is_rnic(dev))
+ unregister_netdevice_notifier(&dev->nb);
free_ib_device:
+ xa_destroy(&dev->qp_table_wq);
ib_dealloc_device(&dev->ib_dev);
return ret;
}
@@ -227,25 +220,24 @@ static void mana_ib_remove(struct auxiliary_device *adev)
ib_unregister_device(&dev->ib_dev);
dma_pool_destroy(dev->av_pool);
+ if (mana_ib_is_rnic(dev)) {
+ mana_ib_gd_destroy_rnic_adapter(dev);
+ mana_ib_destroy_eqs(dev);
+ unregister_netdevice_notifier(&dev->nb);
+ }
xa_destroy(&dev->qp_table_wq);
- mana_ib_gd_destroy_rnic_adapter(dev);
- mana_ib_destroy_eqs(dev);
- unregister_netdevice_notifier(&dev->nb);
- mana_gd_deregister_device(dev->gdma_dev);
ib_dealloc_device(&dev->ib_dev);
}
static const struct auxiliary_device_id mana_id_table[] = {
- {
- .name = "mana.rdma",
- },
+ { .name = "mana.rdma", },
+ { .name = "mana.eth", },
{},
};
MODULE_DEVICE_TABLE(auxiliary, mana_id_table);
static struct auxiliary_driver mana_driver = {
- .name = "rdma",
.probe = mana_ib_probe,
.remove = mana_ib_remove,
.id_table = mana_id_table,
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index eda9c5b971de..41a24a186f9d 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -4,6 +4,7 @@
*/
#include "mana_ib.h"
+#include "linux/pci.h"
void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
u32 port)
@@ -243,7 +244,6 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
struct mana_ib_queue *queue)
{
- struct gdma_context *gc = mdev_to_gc(mdev);
struct gdma_queue_spec spec = {};
int err;
@@ -252,7 +252,7 @@ int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_qu
spec.type = type;
spec.monitor_avl_buf = false;
spec.queue_size = size;
- err = mana_gd_create_mana_wq_cq(&gc->mana_ib, &spec, &queue->kmem);
+ err = mana_gd_create_mana_wq_cq(mdev->gdma_dev, &spec, &queue->kmem);
if (err)
return err;
/* take ownership into mana_ib from mana */
@@ -479,7 +479,7 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
{
unsigned long page_sz;
- page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
+ page_sz = ib_umem_find_best_pgsz(umem, dev->adapter_caps.page_size_cap, virt);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
@@ -494,7 +494,7 @@ int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_ume
unsigned long page_sz;
/* Hardware requires dma region to align to chosen page size */
- page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
+ page_sz = ib_umem_find_best_pgoff(umem, dev->adapter_caps.page_size_cap, 0);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
@@ -551,6 +551,7 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
struct ib_port_attr attr;
int err;
@@ -560,10 +561,12 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
- if (port_num == 1) {
- immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ if (mana_ib_is_rnic(dev)) {
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ } else {
+ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
}
return 0;
@@ -572,12 +575,14 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
- struct mana_ib_dev *dev = container_of(ibdev,
- struct mana_ib_dev, ib_dev);
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct pci_dev *pdev = to_pci_dev(mdev_to_gc(dev)->dev);
memset(props, 0, sizeof(*props));
+ props->vendor_id = pdev->vendor;
+ props->vendor_part_id = dev->gdma_dev->dev_id.type;
props->max_mr_size = MANA_IB_MAX_MR_SIZE;
- props->page_size_cap = PAGE_SZ_BM;
+ props->page_size_cap = dev->adapter_caps.page_size_cap;
props->max_qp = dev->adapter_caps.max_qp_count;
props->max_qp_wr = dev->adapter_caps.max_qp_wr;
props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN;
@@ -596,6 +601,8 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
props->max_ah = INT_MAX;
props->max_pkeys = 1;
props->local_ca_ack_delay = MANA_CA_ACK_DELAY;
+ if (!mana_ib_is_rnic(dev))
+ props->raw_packet_caps = IB_RAW_PACKET_CAP_IP_CSUM;
return 0;
}
@@ -603,6 +610,7 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
int mana_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
if (!ndev)
@@ -623,7 +631,7 @@ int mana_ib_query_port(struct ib_device *ibdev, u32 port,
props->active_width = IB_WIDTH_4X;
props->active_speed = IB_SPEED_EDR;
props->pkey_tbl_len = 1;
- if (port == 1) {
+ if (mana_ib_is_rnic(dev)) {
props->gid_tbl_len = 16;
props->port_cap_flags = IB_PORT_CM_SUP;
props->ip_gids = true;
@@ -696,6 +704,41 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
caps->max_recv_sge_count = resp.max_recv_sge_count;
caps->feature_flags = resp.feature_flags;
+ caps->page_size_cap = PAGE_SZ_BM;
+ if (mdev_to_gc(dev)->pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB)
+ caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
+
+ return 0;
+}
+
+int mana_eth_query_adapter_caps(struct mana_ib_dev *dev)
+{
+ struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
+ struct gdma_query_max_resources_resp resp = {};
+ struct gdma_general_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
+ sizeof(req), sizeof(resp));
+
+ err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&dev->ib_dev,
+ "Failed to query adapter caps err %d", err);
+ return err;
+ }
+
+ caps->max_qp_count = min_t(u32, resp.max_sq, resp.max_rq);
+ caps->max_cq_count = resp.max_cq;
+ caps->max_mr_count = resp.max_mst;
+ caps->max_pd_count = 0x6000;
+ caps->max_qp_wr = min_t(u32,
+ 0x100000 / GDMA_MAX_SQE_SIZE,
+ 0x100000 / GDMA_MAX_RQE_SIZE);
+ caps->max_send_sge_count = 30;
+ caps->max_recv_sge_count = 15;
+ caps->page_size_cap = PAGE_SZ_BM;
+
return 0;
}
@@ -740,7 +783,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
spec.eq.msix_index = 0;
- err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq);
+ err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->fatal_err_eq);
if (err)
return err;
@@ -791,7 +834,7 @@ int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
req.hdr.req.msg_version = GDMA_MESSAGE_V2;
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.notify_eq_id = mdev->fatal_err_eq->id;
if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT)
@@ -816,7 +859,7 @@ int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
gc = mdev_to_gc(mdev);
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
@@ -843,7 +886,7 @@ int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_ADD;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
@@ -873,7 +916,7 @@ int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_REMOVE;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
@@ -896,7 +939,7 @@ int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = op;
copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
@@ -917,8 +960,11 @@ int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 do
struct mana_rnic_create_cq_req req = {};
int err;
+ if (!mdev->eqs)
+ return -EINVAL;
+
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.gdma_region = cq->queue.gdma_region;
req.eq_id = mdev->eqs[cq->comp_vector]->id;
@@ -950,7 +996,7 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
return 0;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.cq_handle = cq->cq_handle;
@@ -976,7 +1022,7 @@ int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
int err, i;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_RC_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.pd_handle = pd->pd_handle;
req.send_cq_handle = send_cq->cq_handle;
@@ -1012,7 +1058,7 @@ int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.rc_qp_handle = qp->qp_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
@@ -1035,7 +1081,7 @@ int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
int err, i;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.pd_handle = pd->pd_handle;
req.send_cq_handle = send_cq->cq_handle;
@@ -1070,7 +1116,7 @@ int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 6903946677e5..42bebd6cd4f7 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -60,6 +60,7 @@ struct mana_ib_adapter_caps {
u32 max_recv_sge_count;
u32 max_inline_data_size;
u64 feature_flags;
+ u64 page_size_cap;
};
struct mana_ib_queue {
@@ -543,6 +544,11 @@ static inline void mana_put_qp_ref(struct mana_ib_qp *qp)
complete(&qp->free);
}
+static inline bool mana_ib_is_rnic(struct mana_ib_dev *mdev)
+{
+ return mdev->gdma_dev->dev_id.type == GDMA_DEVICE_MANA_IB;
+}
+
static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
{
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
@@ -642,6 +648,7 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
+int mana_eth_query_adapter_caps(struct mana_ib_dev *mdev);
int mana_ib_create_eqs(struct mana_ib_dev *mdev);
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index f99557ec7767..6d974d0a8400 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -5,8 +5,8 @@
#include "mana_ib.h"
-#define VALID_MR_FLAGS \
- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)
+#define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
+ IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
#define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
@@ -24,6 +24,9 @@ mana_ib_verbs_to_gdma_access_flags(int access_flags)
if (access_flags & IB_ACCESS_REMOTE_READ)
flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
+ if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
+
return flags;
}
@@ -48,7 +51,10 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
req.gva.virtual_address = mr_params->gva.virtual_address;
req.gva.access_flags = mr_params->gva.access_flags;
break;
-
+ case GDMA_MR_TYPE_ZBVA:
+ req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
+ req.zbva.access_flags = mr_params->zbva.access_flags;
+ break;
default:
ibdev_dbg(&dev->ib_dev,
"invalid param (GDMA_MR_TYPE) passed, type %d\n",
@@ -144,11 +150,18 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
- mr_params.mr_type = GDMA_MR_TYPE_GVA;
- mr_params.gva.dma_region_handle = dma_region_handle;
- mr_params.gva.virtual_address = iova;
- mr_params.gva.access_flags =
- mana_ib_verbs_to_gdma_access_flags(access_flags);
+ if (access_flags & IB_ZERO_BASED) {
+ mr_params.mr_type = GDMA_MR_TYPE_ZBVA;
+ mr_params.zbva.dma_region_handle = dma_region_handle;
+ mr_params.zbva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ } else {
+ mr_params.mr_type = GDMA_MR_TYPE_GVA;
+ mr_params.gva.dma_region_handle = dma_region_handle;
+ mr_params.gva.virtual_address = iova;
+ mr_params.gva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ }
err = mana_ib_gd_create_mr(dev, mr, &mr_params);
if (err)
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index c928af58f38b..14fd7d6c54a2 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -635,7 +635,6 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
{
struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
- struct gdma_context *gc = mdev_to_gc(mdev);
u32 doorbell, queue_size;
int i, err;
@@ -654,7 +653,7 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
goto destroy_queues;
}
}
- doorbell = gc->mana_ib.doorbell;
+ doorbell = mdev->gdma_dev->doorbell;
err = create_shadow_queue(&qp->shadow_rq, attr->cap.max_recv_wr,
sizeof(struct ud_rq_shadow_wqe));
@@ -736,7 +735,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
req.qp_state = attr->qp_state;
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 33f525b744f2..e279e69b9a51 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -43,7 +43,7 @@
#define MAX_VFS 80
#define MAX_PEND_REQS_PER_FUNC 4
-#define MAD_TIMEOUT_MS 2000
+#define MAD_TIMEOUT_SEC 2
#define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
#define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
@@ -270,7 +270,7 @@ static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad
if (!ret) {
/* calls mlx4_ib_mcg_timeout_handler */
queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
- msecs_to_jiffies(MAD_TIMEOUT_MS));
+ secs_to_jiffies(MAD_TIMEOUT_SEC));
}
return ret;
@@ -309,7 +309,7 @@ static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
if (!ret) {
/* calls mlx4_ib_mcg_timeout_handler */
queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
- msecs_to_jiffies(MAD_TIMEOUT_MS));
+ secs_to_jiffies(MAD_TIMEOUT_SEC));
}
return ret;
@@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
for (i = 0; i < MAX_VFS; ++i)
clean_vf_mcast(ctx, i);
- end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000);
+ end = jiffies + secs_to_jiffies(MAD_TIMEOUT_SEC + 3);
do {
count = 0;
mutex_lock(&ctx->mcg_table_lock);
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 251246c73b33..680627f1de33 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -1645,11 +1645,6 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
}
-enum {
- LEFTOVERS_MC,
- LEFTOVERS_UC,
-};
-
static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
struct ib_flow_attr *flow_attr,
@@ -1659,43 +1654,32 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
struct mlx5_ib_flow_handler *handler = NULL;
static struct {
- struct ib_flow_attr flow_attr;
struct ib_flow_spec_eth eth_flow;
- } leftovers_specs[] = {
- [LEFTOVERS_MC] = {
- .flow_attr = {
- .num_of_specs = 1,
- .size = sizeof(leftovers_specs[0])
- },
- .eth_flow = {
- .type = IB_FLOW_SPEC_ETH,
- .size = sizeof(struct ib_flow_spec_eth),
- .mask = {.dst_mac = {0x1} },
- .val = {.dst_mac = {0x1} }
- }
- },
- [LEFTOVERS_UC] = {
- .flow_attr = {
- .num_of_specs = 1,
- .size = sizeof(leftovers_specs[0])
- },
- .eth_flow = {
- .type = IB_FLOW_SPEC_ETH,
- .size = sizeof(struct ib_flow_spec_eth),
- .mask = {.dst_mac = {0x1} },
- .val = {.dst_mac = {} }
- }
- }
- };
+ struct ib_flow_attr flow_attr;
+ } leftovers_wc = { .flow_attr = { .num_of_specs = 1,
+ .size = sizeof(leftovers_wc) },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = { .dst_mac = { 0x1 } },
+ .val = { .dst_mac = { 0x1 } } } };
- handler = create_flow_rule(dev, ft_prio,
- &leftovers_specs[LEFTOVERS_MC].flow_attr,
- dst);
+ static struct {
+ struct ib_flow_spec_eth eth_flow;
+ struct ib_flow_attr flow_attr;
+ } leftovers_uc = { .flow_attr = { .num_of_specs = 1,
+ .size = sizeof(leftovers_uc) },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = { .dst_mac = { 0x1 } },
+ .val = { .dst_mac = {} } } };
+
+ handler = create_flow_rule(dev, ft_prio, &leftovers_wc.flow_attr, dst);
if (!IS_ERR(handler) &&
flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
handler_ucast = create_flow_rule(dev, ft_prio,
- &leftovers_specs[LEFTOVERS_UC].flow_attr,
- dst);
+ &leftovers_uc.flow_attr, dst);
if (IS_ERR(handler_ucast)) {
mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--;
@@ -3461,7 +3445,6 @@ DECLARE_UVERBS_NAMED_OBJECT(
&UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY));
const struct uapi_definition mlx5_ib_flow_defs[] = {
-#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
MLX5_IB_OBJECT_FLOW_MATCHER),
UAPI_DEF_CHAIN_OBJ_TREE(
@@ -3472,7 +3455,6 @@ const struct uapi_definition mlx5_ib_flow_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
MLX5_IB_OBJECT_STEERING_ANCHOR,
UAPI_DEF_IS_OBJ_SUPPORTED(mlx5_ib_shared_ft_allowed)),
-#endif
{},
};
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d07cacaa0abd..ce7610740412 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -485,6 +485,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_200GAUI_1_200GBASE_CR1_KR1):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_XDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_HDR;
@@ -493,10 +497,18 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_400GAUI_2_400GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_XDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_800GAUI_8_800GBASE_CR8_KR8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_800GAUI_4_800GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_XDR;
+ break;
default:
return -EINVAL;
}
@@ -4422,17 +4434,6 @@ static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
mlx5_core_native_port_num(dev->mdev) - 1);
}
-static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
-{
- dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
- return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
-}
-
-static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
-{
- mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
-}
-
static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
{
int err;
@@ -4662,9 +4663,6 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
mlx5_ib_stage_cong_debugfs_init,
mlx5_ib_stage_cong_debugfs_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_UAR,
- mlx5_ib_stage_uar_init,
- mlx5_ib_stage_uar_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
mlx5_ib_stage_bfrag_init,
mlx5_ib_stage_bfrag_cleanup),
@@ -4722,9 +4720,6 @@ const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
mlx5_ib_stage_cong_debugfs_init,
mlx5_ib_stage_cong_debugfs_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_UAR,
- mlx5_ib_stage_uar_init,
- mlx5_ib_stage_uar_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
mlx5_ib_stage_bfrag_init,
mlx5_ib_stage_bfrag_cleanup),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index ace2df3e1d9f..fde859d207ae 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -351,6 +351,7 @@ struct mlx5_ib_flow_db {
#define MLX5_IB_UPD_XLT_PD BIT(4)
#define MLX5_IB_UPD_XLT_ACCESS BIT(5)
#define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
+#define MLX5_IB_UPD_XLT_DOWNGRADE BIT(7)
/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
*
@@ -1005,7 +1006,6 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_ODP,
MLX5_IB_STAGE_COUNTERS,
MLX5_IB_STAGE_CONG_DEBUGFS,
- MLX5_IB_STAGE_UAR,
MLX5_IB_STAGE_BFREG,
MLX5_IB_STAGE_PRE_IB_REG_UMR,
MLX5_IB_STAGE_WHITELIST_UID,
@@ -1473,8 +1473,8 @@ void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev);
-void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags);
+int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags);
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
@@ -1495,8 +1495,11 @@ static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
{
return 0;
}
-static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags) {}
+static inline int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ return -EOPNOTSUPP;
+}
static inline int
mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 5fbebafc8774..6dd813bac5b2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -525,7 +525,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
ent->fill_to_high_water = false;
if (ent->pending)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
- msecs_to_jiffies(1000));
+ secs_to_jiffies(1));
else
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
}
@@ -576,7 +576,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
"add keys command failed, err %d\n",
err);
queue_delayed_work(cache->wq, &ent->dwork,
- msecs_to_jiffies(1000));
+ secs_to_jiffies(1));
}
}
} else if (ent->mkeys_queue.ci > 2 * ent->limit) {
@@ -2051,7 +2051,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
ent->in_use--;
if (ent->is_tmp && !ent->tmp_cleanup_scheduled) {
mod_delayed_work(ent->dev->cache.wq, &ent->dwork,
- msecs_to_jiffies(30 * 1000));
+ secs_to_jiffies(30));
ent->tmp_cleanup_scheduled = true;
}
spin_unlock_irq(&ent->mkeys_queue.lock);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 86d8fa63bf69..eaa2f9f5f3a9 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -34,6 +34,9 @@
#include <linux/kernel.h>
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
+#include <linux/hmm.h>
+#include <linux/hmm-dma.h>
+#include <linux/pci-p2pdma.h>
#include "mlx5_ib.h"
#include "cmd.h"
@@ -158,41 +161,50 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
}
}
-static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
-{
- u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
-
- if (umem_dma & ODP_READ_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_READ;
- if (umem_dma & ODP_WRITE_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_WRITE;
-
- return mtt_entry;
-}
-
-static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags)
+static int populate_mtt(__be64 *pas, size_t start, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- dma_addr_t pa;
+ bool downgrade = flags & MLX5_IB_UPD_XLT_DOWNGRADE;
+ struct pci_p2pdma_map_state p2pdma_state = {};
+ struct ib_device *dev = odp->umem.ibdev;
size_t i;
if (flags & MLX5_IB_UPD_XLT_ZAP)
- return;
+ return 0;
for (i = 0; i < nentries; i++) {
- pa = odp->dma_list[idx + i];
- pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
+ unsigned long pfn = odp->map.pfn_list[start + i];
+ dma_addr_t dma_addr;
+
+ pfn = odp->map.pfn_list[start + i];
+ if (!(pfn & HMM_PFN_VALID))
+ /* ODP initialization */
+ continue;
+
+ dma_addr = hmm_dma_map_pfn(dev->dma_device, &odp->map,
+ start + i, &p2pdma_state);
+ if (ib_dma_mapping_error(dev, dma_addr))
+ return -EFAULT;
+
+ dma_addr |= MLX5_IB_MTT_READ;
+ if ((pfn & HMM_PFN_WRITE) && !downgrade)
+ dma_addr |= MLX5_IB_MTT_WRITE;
+
+ pas[i] = cpu_to_be64(dma_addr);
+ odp->npages++;
}
+ return 0;
}
-void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags)
+int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
{
if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
populate_klm(xlt, idx, nentries, mr, flags);
+ return 0;
} else {
- populate_mtt(xlt, idx, nentries, mr, flags);
+ return populate_mtt(xlt, idx, nentries, mr, flags);
}
}
@@ -303,8 +315,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
* estimate the cost of another UMR vs. the cost of bigger
* UMR.
*/
- if (umem_odp->dma_list[idx] &
- (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
+ if (umem_odp->map.pfn_list[idx] & HMM_PFN_VALID) {
if (!in_block) {
blk_start_idx = idx;
in_block = 1;
@@ -687,7 +698,7 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
{
int page_shift, ret, np;
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
- u64 access_mask;
+ u64 access_mask = 0;
u64 start_idx;
bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
@@ -695,12 +706,14 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
if (flags & MLX5_PF_FLAGS_ENABLE)
xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
+ if (flags & MLX5_PF_FLAGS_DOWNGRADE)
+ xlt_flags |= MLX5_IB_UPD_XLT_DOWNGRADE;
+
page_shift = odp->page_shift;
start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
- access_mask = ODP_READ_ALLOWED_BIT;
if (odp->umem.writable && !downgrade)
- access_mask |= ODP_WRITE_ALLOWED_BIT;
+ access_mask |= HMM_PFN_WRITE;
np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
if (np < 0)
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
index d3dcc272200a..146d03ae40bd 100644
--- a/drivers/infiniband/hw/mlx5/qpc.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -21,8 +21,10 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
spin_lock_irqsave(&table->lock, flags);
common = radix_tree_lookup(&table->tree, rsn);
- if (common)
+ if (common && !common->invalid)
refcount_inc(&common->refcount);
+ else
+ common = NULL;
spin_unlock_irqrestore(&table->lock, flags);
@@ -178,6 +180,18 @@ static int create_resource_common(struct mlx5_ib_dev *dev,
return 0;
}
+static void modify_resource_common_state(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *qp,
+ bool invalid)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&table->lock, flags);
+ qp->common.invalid = invalid;
+ spin_unlock_irqrestore(&table->lock, flags);
+}
+
static void destroy_resource_common(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *qp)
{
@@ -609,8 +623,20 @@ err_destroy_rq:
int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq)
{
+ int ret;
+
+ /* The rq destruction can be called again in case it fails, hence we
+ * mark the common resource as invalid and only once FW destruction
+ * is completed successfully we actually destroy the resources.
+ */
+ modify_resource_common_state(dev, rq, true);
+ ret = destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ if (ret) {
+ modify_resource_common_state(dev, rq, false);
+ return ret;
+ }
destroy_resource_common(dev, rq);
- return destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ return 0;
}
static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 793f3c5c4d01..5be4426a2884 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -840,7 +840,17 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
size_to_map = npages * desc_size;
dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
- mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ /*
+ * npages is the maximum number of pages to map, but we
+ * can't guarantee that all pages are actually mapped.
+ *
+ * For example, if page is p2p of type which is not supported
+ * for mapping, the number of pages mapped will be less than
+ * requested.
+ */
+ err = mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ if (err)
+ return err;
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
sg.length = ALIGN(size_to_map, MLX5_UMR_FLEX_ALIGNMENT);
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 192f83fd7c8a..dacb8ceeebe0 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -144,7 +144,7 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
- buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
+ buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits),
GFP_KERNEL);
buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
GFP_KERNEL);
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index b9f4a2937c3a..2098de762bf5 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -90,7 +90,7 @@ static int create_file(const char *name, umode_t mode,
int error;
inode_lock(d_inode(parent));
- *dentry = lookup_one_len(name, parent, strlen(name));
+ *dentry = lookup_noperm(&QSTR(name), parent);
if (!IS_ERR(*dentry))
error = qibfs_mknod(d_inode(parent), *dentry,
mode, fops, data);
@@ -433,7 +433,7 @@ static int remove_device_files(struct super_block *sb,
char unit[10];
snprintf(unit, sizeof(unit), "%u", dd->unit);
- dir = lookup_one_len_unlocked(unit, sb->s_root, strlen(unit));
+ dir = lookup_noperm_unlocked(&QSTR(unit), sb->s_root);
if (IS_ERR(dir)) {
pr_err("Lookup of %s failed\n", unit);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 4ddcd5860e0f..11eca39b73a9 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -397,7 +397,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
if (!us_ibdev) {
usnic_err("Device %s context alloc failed\n",
netdev_name(pci_get_drvdata(dev)));
- return ERR_PTR(-EFAULT);
+ return NULL;
}
us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
@@ -517,8 +517,8 @@ static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
}
us_ibdev = usnic_ib_device_add(parent_pci);
- if (IS_ERR_OR_NULL(us_ibdev)) {
- us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
+ if (!us_ibdev) {
+ us_ibdev = ERR_PTR(-EFAULT);
goto out;
}
@@ -586,10 +586,10 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
}
pf = usnic_ib_discover_pf(vf->vnic);
- if (IS_ERR_OR_NULL(pf)) {
- usnic_err("Failed to discover pf of vnic %s with err%ld\n",
- pci_name(pdev), PTR_ERR(pf));
- err = pf ? PTR_ERR(pf) : -EFAULT;
+ if (IS_ERR(pf)) {
+ err = PTR_ERR(pf);
+ usnic_err("Failed to discover pf of vnic %s with err%d\n",
+ pci_name(pdev), err);
goto out_clean_vnic;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index f948b76f984d..3fbf99757b11 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -56,7 +56,7 @@ static int usnic_uiom_dma_fault(struct iommu_domain *domain,
unsigned long iova, int flags,
void *token)
{
- usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
+ usnic_err("Device %s iommu fault domain 0x%p va 0x%lx flags 0x%x\n",
dev_name(dev),
domain, iova, flags);
return -ENOSYS;
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index c180e7ebcfc5..1ed5b63f8afc 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
- depends on INET && PCI && INFINIBAND
+ depends on INET && PCI && INFINIBAND && 64BIT
depends on INFINIBAND_VIRT_DMA
select NET_UDP_TUNNEL
select CRC32
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index b248c68bf9b1..3a77d6db1720 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -101,6 +101,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev)
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_FLUSH;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC_WRITE;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index fec87c9030ab..fffd144d509e 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -56,11 +56,8 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
- if (err) {
- vfree(cq->queue->buf);
- kfree(cq->queue);
+ if (err)
return err;
- }
cq->is_user = uresp;
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index feb386d98d1d..876702058c84 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -70,9 +70,9 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
void *addr, int length, enum rxe_mr_copy_dir dir);
int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
-int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val);
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
+enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
@@ -140,6 +140,12 @@ static inline int qp_mtu(struct rxe_qp *qp)
return IB_MTU_4096;
}
+static inline bool is_odp_mr(struct rxe_mr *mr)
+{
+ return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
+ mr->umem->is_odp;
+}
+
void free_rd_atomic_resource(struct resp_res *res);
static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
@@ -187,13 +193,16 @@ static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
/* rxe_odp.c */
extern const struct mmu_interval_notifier_ops rxe_mn_ops;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+#if defined CONFIG_INFINIBAND_ON_DEMAND_PAGING
int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
u64 iova, int access_flags, struct rxe_mr *mr);
int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir);
-int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val);
+enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length);
+enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline int
rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
@@ -206,9 +215,19 @@ static inline int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
{
return -EOPNOTSUPP;
}
-static inline int
+static inline enum resp_states
rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+}
+static inline int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length)
+{
+ return -EOPNOTSUPP;
+}
+static inline enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr,
+ u64 iova, u64 value)
{
return RESPST_ERR_UNSUPPORTED_OPCODE;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 868d2f0b74e9..bcb97b3ea58a 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -323,7 +323,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
return err;
}
- if (mr->umem->is_odp)
+ if (is_odp_mr(mr))
return rxe_odp_mr_copy(mr, iova, addr, length, dir);
else
return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
@@ -424,7 +424,7 @@ err1:
return err;
}
-int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
+static int rxe_mr_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
{
unsigned int page_offset;
unsigned long index;
@@ -433,16 +433,6 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
int err;
u8 *va;
- /* mr must be valid even if length is zero */
- if (WARN_ON(!mr))
- return -EINVAL;
-
- if (length == 0)
- return 0;
-
- if (mr->ibmr.type == IB_MR_TYPE_DMA)
- return -EFAULT;
-
err = mr_check_range(mr, iova, length);
if (err)
return err;
@@ -454,7 +444,7 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
if (!page)
return -EFAULT;
bytes = min_t(unsigned int, length,
- mr_page_size(mr) - page_offset);
+ mr_page_size(mr) - page_offset);
va = kmap_local_page(page);
arch_wb_cache_pmem(va + page_offset, bytes);
@@ -468,11 +458,33 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
return 0;
}
+int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 start, unsigned int length)
+{
+ int err;
+
+ /* mr must be valid even if length is zero */
+ if (WARN_ON(!mr))
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ if (mr->ibmr.type == IB_MR_TYPE_DMA)
+ return -EFAULT;
+
+ if (is_odp_mr(mr))
+ err = rxe_odp_flush_pmem_iova(mr, start, length);
+ else
+ err = rxe_mr_flush_pmem_iova(mr, start, length);
+
+ return err;
+}
+
/* Guarantee atomicity of atomic operations at the machine level. */
DEFINE_SPINLOCK(atomic_ops_lock);
-int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
{
unsigned int page_offset;
struct page *page;
@@ -524,27 +536,15 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
kunmap_local(va);
- return 0;
+ return RESPST_NONE;
}
-#if defined CONFIG_64BIT
-/* only implemented or called for 64 bit architectures */
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
{
unsigned int page_offset;
struct page *page;
u64 *va;
- /* ODP is not supported right now. WIP. */
- if (mr->umem->is_odp)
- return RESPST_ERR_UNSUPPORTED_OPCODE;
-
- /* See IBA oA19-28 */
- if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
- rxe_dbg_mr(mr, "mr not in valid state\n");
- return RESPST_ERR_RKEY_VIOLATION;
- }
-
if (mr->ibmr.type == IB_MR_TYPE_DMA) {
page_offset = iova & (PAGE_SIZE - 1);
page = ib_virt_dma_to_page(iova);
@@ -572,20 +572,12 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
}
va = kmap_local_page(page);
-
/* Do atomic write after all prior operations have completed */
smp_store_release(&va[page_offset >> 3], value);
-
kunmap_local(va);
- return 0;
-}
-#else
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
-{
- return RESPST_ERR_UNSUPPORTED_OPCODE;
+ return RESPST_NONE;
}
-#endif
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c
index 9f6e2bb2a269..dbc5a5600eb7 100644
--- a/drivers/infiniband/sw/rxe/rxe_odp.c
+++ b/drivers/infiniband/sw/rxe/rxe_odp.c
@@ -4,6 +4,7 @@
*/
#include <linux/hmm.h>
+#include <linux/libnvdimm.h>
#include <rdma/ib_umem_odp.h>
@@ -26,7 +27,7 @@ static bool rxe_ib_invalidate_range(struct mmu_interval_notifier *mni,
start = max_t(u64, ib_umem_start(umem_odp), range->start);
end = min_t(u64, ib_umem_end(umem_odp), range->end);
- /* update umem_odp->dma_list */
+ /* update umem_odp->map.pfn_list */
ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
mutex_unlock(&umem_odp->umem_mutex);
@@ -44,12 +45,11 @@ static int rxe_odp_do_pagefault_and_lock(struct rxe_mr *mr, u64 user_va, int bcn
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
bool fault = !(flags & RXE_PAGEFAULT_SNAPSHOT);
- u64 access_mask;
+ u64 access_mask = 0;
int np;
- access_mask = ODP_READ_ALLOWED_BIT;
if (umem_odp->umem.writable && !(flags & RXE_PAGEFAULT_RDONLY))
- access_mask |= ODP_WRITE_ALLOWED_BIT;
+ access_mask |= HMM_PFN_WRITE;
/*
* ib_umem_odp_map_dma_and_lock() locks umem_mutex on success.
@@ -124,8 +124,8 @@ int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
return err;
}
-static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
- u64 iova, int length, u32 perm)
+static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp, u64 iova,
+ int length)
{
bool need_fault = false;
u64 addr;
@@ -137,7 +137,7 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
while (addr < iova + length) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- if (!(umem_odp->dma_list[idx] & perm)) {
+ if (!(umem_odp->map.pfn_list[idx] & HMM_PFN_VALID)) {
need_fault = true;
break;
}
@@ -147,23 +147,28 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
return need_fault;
}
+static unsigned long rxe_odp_iova_to_index(struct ib_umem_odp *umem_odp, u64 iova)
+{
+ return (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
+}
+
+static unsigned long rxe_odp_iova_to_page_offset(struct ib_umem_odp *umem_odp, u64 iova)
+{
+ return iova & (BIT(umem_odp->page_shift) - 1);
+}
+
static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u32 flags)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
bool need_fault;
- u64 perm;
int err;
if (unlikely(length < 1))
return -EINVAL;
- perm = ODP_READ_ALLOWED_BIT;
- if (!(flags & RXE_PAGEFAULT_RDONLY))
- perm |= ODP_WRITE_ALLOWED_BIT;
-
mutex_lock(&umem_odp->umem_mutex);
- need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
+ need_fault = rxe_check_pagefault(umem_odp, iova, length);
if (need_fault) {
mutex_unlock(&umem_odp->umem_mutex);
@@ -173,7 +178,7 @@ static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u
if (err < 0)
return err;
- need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
+ need_fault = rxe_check_pagefault(umem_odp, iova, length);
if (need_fault)
return -EFAULT;
}
@@ -190,13 +195,13 @@ static int __rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
size_t offset;
u8 *user_va;
- idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- offset = iova & (BIT(umem_odp->page_shift) - 1);
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
while (length > 0) {
u8 *src, *dest;
- page = hmm_pfn_to_page(umem_odp->pfn_list[idx]);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
user_va = kmap_local_page(page);
if (!user_va)
return -EFAULT;
@@ -255,8 +260,9 @@ int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
return err;
}
-static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+static enum resp_states rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova,
+ int opcode, u64 compare,
+ u64 swap_add, u64 *orig_val)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
unsigned int page_offset;
@@ -277,9 +283,9 @@ static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
return RESPST_ERR_RKEY_VIOLATION;
}
- idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- page_offset = iova & (BIT(umem_odp->page_shift) - 1);
- page = hmm_pfn_to_page(umem_odp->pfn_list[idx]);
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
if (!page)
return RESPST_ERR_RKEY_VIOLATION;
@@ -304,11 +310,11 @@ static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
kunmap_local(va);
- return 0;
+ return RESPST_NONE;
}
-int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
int err;
@@ -324,3 +330,91 @@ int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
return err;
}
+
+int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ unsigned int bytes;
+ int err;
+ u8 *va;
+
+ err = rxe_odp_map_range_and_lock(mr, iova, length,
+ RXE_PAGEFAULT_DEFAULT);
+ if (err)
+ return err;
+
+ while (length > 0) {
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+ if (!page) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ return -EFAULT;
+ }
+
+ bytes = min_t(unsigned int, length,
+ mr_page_size(mr) - page_offset);
+
+ va = kmap_local_page(page);
+ arch_wb_cache_pmem(va + page_offset, bytes);
+ kunmap_local(va);
+
+ length -= bytes;
+ iova += bytes;
+ page_offset = 0;
+ }
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return 0;
+}
+
+enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ int err;
+ u64 *va;
+
+ /* See IBA oA19-28 */
+ err = mr_check_range(mr, iova, sizeof(value));
+ if (unlikely(err)) {
+ rxe_dbg_mr(mr, "iova out of range\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ err = rxe_odp_map_range_and_lock(mr, iova, sizeof(value),
+ RXE_PAGEFAULT_DEFAULT);
+ if (err)
+ return RESPST_ERR_RKEY_VIOLATION;
+
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+ if (!page) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+ /* See IBA A19.4.2 */
+ if (unlikely(page_offset & 0x7)) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ rxe_dbg_mr(mr, "misaligned address\n");
+ return RESPST_ERR_MISALIGNED_ATOMIC;
+ }
+
+ va = kmap_local_page(page);
+ /* Do atomic write after all prior operations have completed */
+ smp_store_release(&va[page_offset >> 3], value);
+ kunmap_local(va);
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return RESPST_NONE;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 003f681e5dc0..767870568372 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -53,12 +53,9 @@ enum rxe_device_param {
| IB_DEVICE_MEM_WINDOW
| IB_DEVICE_FLUSH_GLOBAL
| IB_DEVICE_FLUSH_PERSISTENT
-#ifdef CONFIG_64BIT
| IB_DEVICE_MEM_WINDOW_TYPE_2B
| IB_DEVICE_ATOMIC_WRITE,
-#else
- | IB_DEVICE_MEM_WINDOW_TYPE_2B,
-#endif /* CONFIG_64BIT */
+
RXE_MAX_SGE = 32,
RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) +
sizeof(struct ib_sge) * RXE_MAX_SGE,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 7975fb0e2782..f2af3e0aef35 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -811,7 +811,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
spin_unlock_irqrestore(&qp->state_lock, flags);
qp->qp_timeout_jiffies = 0;
- if (qp_type(qp) == IB_QPT_RC) {
+ /* In the function timer_setup, .function is initialized. If .function
+ * is NULL, it indicates the function timer_setup is not called, the
+ * timer is not initialized. Or else, the timer is initialized.
+ */
+ if (qp_type(qp) == IB_QPT_RC && qp->retrans_timer.function &&
+ qp->rnr_nak_timer.function) {
timer_delete_sync(&qp->retrans_timer);
timer_delete_sync(&qp->rnr_nak_timer);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 54ba9ee1acc5..711f73e0bbb1 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -649,10 +649,6 @@ static enum resp_states process_flush(struct rxe_qp *qp,
struct rxe_mr *mr = qp->resp.mr;
struct resp_res *res = qp->resp.res;
- /* ODP is not supported right now. WIP. */
- if (mr->umem->is_odp)
- return RESPST_ERR_UNSUPPORTED_OPCODE;
-
/* oA19-14, oA19-15 */
if (res && res->replay)
return RESPST_ACKNOWLEDGE;
@@ -706,7 +702,7 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
if (!res->replay) {
u64 iova = qp->resp.va + qp->resp.offset;
- if (mr->umem->is_odp)
+ if (is_odp_mr(mr))
err = rxe_odp_atomic_op(mr, iova, pkt->opcode,
atmeth_comp(pkt),
atmeth_swap_add(pkt),
@@ -753,7 +749,16 @@ static enum resp_states atomic_write_reply(struct rxe_qp *qp,
value = *(u64 *)payload_addr(pkt);
iova = qp->resp.va + qp->resp.offset;
- err = rxe_mr_do_atomic_write(mr, iova, value);
+ /* See IBA oA19-28 */
+ if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
+ rxe_dbg_mr(mr, "mr not in valid state\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ if (is_odp_mr(mr))
+ err = rxe_odp_do_atomic_write(mr, iova, value);
+ else
+ err = rxe_mr_do_atomic_write(mr, iova, value);
if (err)
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 80332638d9e3..6f8f353e9583 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -85,17 +85,17 @@ static bool is_done(struct rxe_task *task)
/* do_task is a wrapper for the three tasks (requester,
* completer, responder) and calls them in a loop until
- * they return a non-zero value. It is called either
- * directly by rxe_run_task or indirectly if rxe_sched_task
- * schedules the task. They must call __reserve_if_idle to
- * move the task to busy before calling or scheduling.
- * The task can also be moved to drained or invalid
- * by calls to rxe_cleanup_task or rxe_disable_task.
- * In that case tasks which get here are not executed but
- * just flushed. The tasks are designed to look to see if
- * there is work to do and then do part of it before returning
- * here with a return value of zero until all the work
- * has been consumed then it returns a non-zero value.
+ * they return a non-zero value. It is called indirectly
+ * when rxe_sched_task schedules the task. They must
+ * call __reserve_if_idle to move the task to busy before
+ * calling or scheduling. The task can also be moved to
+ * drained or invalid by calls to rxe_cleanup_task or
+ * rxe_disable_task. In that case tasks which get here
+ * are not executed but just flushed. The tasks are
+ * designed to look to see if there is work to do and
+ * then do part of it before returning here with a return
+ * value of zero until all the work has been consumed then
+ * it returns a non-zero value.
* The number of times the task can be run is limited by
* max iterations so one task cannot hold the cpu forever.
* If the limit is hit and work remains the task is rescheduled.
@@ -234,24 +234,6 @@ void rxe_cleanup_task(struct rxe_task *task)
spin_unlock_irqrestore(&task->lock, flags);
}
-/* run the task inline if it is currently idle
- * cannot call do_task holding the lock
- */
-void rxe_run_task(struct rxe_task *task)
-{
- unsigned long flags;
- bool run;
-
- WARN_ON(rxe_read(task->qp) <= 0);
-
- spin_lock_irqsave(&task->lock, flags);
- run = __reserve_if_idle(task);
- spin_unlock_irqrestore(&task->lock, flags);
-
- if (run)
- do_task(task);
-}
-
/* schedule the task to run later as a work queue entry.
* the queue_work call can be called holding
* the lock.
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
index a63e258b3d66..a8c9a77b6027 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -47,8 +47,6 @@ int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
/* cleanup task */
void rxe_cleanup_task(struct rxe_task *task);
-void rxe_run_task(struct rxe_task *task);
-
void rxe_sched_task(struct rxe_task *task);
/* keep a task from scheduling */
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index ae4a953e2a03..186f182b80e7 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -3,6 +3,7 @@ config RDMA_SIW
depends on INET && INFINIBAND
depends on INFINIBAND_VIRT_DMA
select CRC32
+ select NET_CRC32C
help
This driver implements the iWARP RDMA transport over
the Linux TCP/IP network stack. It enables a system with a
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 385067e07faf..f5fd71717b80 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -693,29 +693,9 @@ static inline void siw_crc_oneshot(const void *data, size_t len, u8 out[4])
return siw_crc_final(&crc, out);
}
-static inline __wsum siw_csum_update(const void *buff, int len, __wsum sum)
-{
- return (__force __wsum)crc32c((__force __u32)sum, buff, len);
-}
-
-static inline __wsum siw_csum_combine(__wsum csum, __wsum csum2, int offset,
- int len)
-{
- return (__force __wsum)crc32c_combine((__force __u32)csum,
- (__force __u32)csum2, len);
-}
-
static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
{
- const struct skb_checksum_ops siw_cs_ops = {
- .update = siw_csum_update,
- .combine = siw_csum_combine,
- };
- __wsum crc = (__force __wsum)srx->mpa_crc;
-
- crc = __skb_checksum(srx->skb, srx->skb_offset, len, crc,
- &siw_cs_ops);
- srx->mpa_crc = (__force u32)crc;
+ srx->mpa_crc = skb_crc32c(srx->skb, srx->skb_offset, len, srx->mpa_crc);
}
#define siw_dbg(ibdev, fmt, ...) \
@@ -738,7 +718,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
#define siw_dbg_cep(cep, fmt, ...) \
- ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
+ ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \
cep, __func__, ##__VA_ARGS__)
void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index f3c2226aff94..25b3c741b66b 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -72,7 +72,7 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
wc->opcode = map_wc_opcode[cqe->opcode];
wc->status = map_cqe_status[cqe->status].ib;
siw_dbg_cq(cq,
- "idx %u, type %d, flags %2x, id 0x%pK\n",
+ "idx %u, type %d, flags %2x, id 0x%p\n",
cq->cq_get % cq->num_cqe, cqe->opcode,
cqe->flags, (void *)(uintptr_t)cqe->id);
} else {
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index dcb963607c8b..d5ddeb17bd22 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -18,30 +18,6 @@
#define SIW_STAG_MAX_INDEX 0x00ffffff
/*
- * The code avoids special Stag of zero and tries to randomize
- * STag values between 1 and SIW_STAG_MAX_INDEX.
- */
-int siw_mem_add(struct siw_device *sdev, struct siw_mem *m)
-{
- struct xa_limit limit = XA_LIMIT(1, SIW_STAG_MAX_INDEX);
- u32 id, next;
-
- get_random_bytes(&next, 4);
- next &= SIW_STAG_MAX_INDEX;
-
- if (xa_alloc_cyclic(&sdev->mem_xa, &id, m, limit, &next,
- GFP_KERNEL) < 0)
- return -ENOMEM;
-
- /* Set the STag index part */
- m->stag = id << 8;
-
- siw_dbg_mem(m, "new MEM object\n");
-
- return 0;
-}
-
-/*
* siw_mem_id2obj()
*
* resolves memory from stag given by id. might be called from:
@@ -181,10 +157,10 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
*/
if (addr < mem->va || addr + len > mem->va + mem->len) {
siw_dbg_pd(pd, "MEM interval len %d\n", len);
- siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
+ siw_dbg_pd(pd, "[0x%p, 0x%p] out of bounds\n",
(void *)(uintptr_t)addr,
(void *)(uintptr_t)(addr + len));
- siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
+ siw_dbg_pd(pd, "[0x%p, 0x%p] STag=0x%08x\n",
(void *)(uintptr_t)mem->va,
(void *)(uintptr_t)(mem->va + mem->len),
mem->stag);
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index e74cfcd6dbc1..8e769d30e2ac 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -12,7 +12,6 @@ void siw_umem_release(struct siw_umem *umem);
struct siw_pbl *siw_pbl_alloc(u32 num_buf);
dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
-int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
enum ib_access_flags perms, int len);
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 32554eba1eac..a10820e33887 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -38,7 +38,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
p = siw_get_upage(umem, dest_addr);
if (unlikely(!p)) {
- pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
+ pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n",
__func__, qp_id(rx_qp(srx)),
(void *)(uintptr_t)dest_addr,
(void *)(uintptr_t)umem->fp_addr);
@@ -51,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
pg_off = dest_addr & ~PAGE_MASK;
bytes = min(len, (int)PAGE_SIZE - pg_off);
- siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
+ siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes);
dest = kmap_atomic(p);
rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
@@ -105,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
{
int rv;
- siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
+ siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len);
rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
if (unlikely(rv)) {
- pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
+ pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n",
qp_id(rx_qp(srx)), __func__, len, kva, rv);
return rv;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index fd7b266a221b..2b2a7b8e93b0 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -936,7 +936,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
rv = -EINVAL;
break;
}
- siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
+ siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n",
sqe->opcode, sqe->flags,
(void *)(uintptr_t)sqe->id);
@@ -1102,7 +1102,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
siw_dbg_qp(qp, "error %d\n", rv);
*bad_wr = wr;
}
- return rv > 0 ? 0 : rv;
+ return rv;
}
int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
@@ -1332,7 +1332,7 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
struct siw_device *sdev = to_siw_dev(pd->device);
int rv;
- siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
+ siw_dbg_pd(pd, "start: 0x%p, va: 0x%p, len: %llu\n",
(void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
(unsigned long long)len);
@@ -1525,7 +1525,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
mem->len = base_mr->length;
mem->va = base_mr->iova;
siw_dbg_mem(mem,
- "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
+ "%llu bytes, start 0x%p, %u SLE to %u entries\n",
mem->len, (void *)(uintptr_t)mem->va, num_sle,
pbl->num_buf);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index abe0522b7df4..91f866e3fb8b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -329,14 +329,6 @@ struct ipoib_dev_priv {
unsigned long flags;
- /*
- * This protects access to the child_intfs list.
- * To READ from child_intfs the RTNL or vlan_rwsem read side must be
- * held. To WRITE RTNL and the vlan_rwsem write side must be held (in
- * that order) This lock exists because we have a few contexts where
- * we need the child_intfs, but do not want to grab the RTNL.
- */
- struct rw_semaphore vlan_rwsem;
struct mutex mcast_mutex;
struct rb_root path_tree;
@@ -399,6 +391,9 @@ struct ipoib_dev_priv {
struct ib_event_handler event_handler;
struct net_device *parent;
+ /* 'child_intfs' and 'list' membership of all child devices are
+ * protected by the netdev instance lock of 'dev'.
+ */
struct list_head child_intfs;
struct list_head list;
int child_type;
@@ -512,6 +507,8 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *format,
void ipoib_ib_dev_flush_light(struct work_struct *work);
void ipoib_ib_dev_flush_normal(struct work_struct *work);
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
+void ipoib_queue_work(struct ipoib_dev_priv *priv,
+ enum ipoib_flush_level level);
void ipoib_ib_tx_timeout_work(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5cde275daa94..10b0dbda6cd5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -40,6 +40,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <net/netdev_lock.h>
#include <rdma/ib_cache.h>
#include "ipoib.h"
@@ -781,16 +782,20 @@ static void ipoib_napi_enable(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- napi_enable(&priv->recv_napi);
- napi_enable(&priv->send_napi);
+ netdev_lock_ops_to_full(dev);
+ napi_enable_locked(&priv->recv_napi);
+ napi_enable_locked(&priv->send_napi);
+ netdev_unlock_full_to_ops(dev);
}
static void ipoib_napi_disable(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- napi_disable(&priv->recv_napi);
- napi_disable(&priv->send_napi);
+ netdev_lock_ops_to_full(dev);
+ napi_disable_locked(&priv->recv_napi);
+ napi_disable_locked(&priv->send_napi);
+ netdev_unlock_full_to_ops(dev);
}
int ipoib_ib_dev_stop_default(struct net_device *dev)
@@ -1172,24 +1177,11 @@ out:
}
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
- enum ipoib_flush_level level,
- int nesting)
+ enum ipoib_flush_level level)
{
- struct ipoib_dev_priv *cpriv;
struct net_device *dev = priv->dev;
int result;
- down_read_nested(&priv->vlan_rwsem, nesting);
-
- /*
- * Flush any child interfaces too -- they might be up even if
- * the parent is down.
- */
- list_for_each_entry(cpriv, &priv->child_intfs, list)
- __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
-
- up_read(&priv->vlan_rwsem);
-
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
level != IPOIB_FLUSH_HEAVY) {
/* Make sure the dev_addr is set even if not flushing */
@@ -1253,10 +1245,14 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
+ netdev_lock_ops(dev);
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
ipoib_ib_dev_stop(dev);
- if (ipoib_ib_dev_open(dev))
+ result = ipoib_ib_dev_open(dev);
+ netdev_unlock_ops(dev);
+
+ if (result)
return;
if (netif_queue_stopped(dev))
@@ -1280,7 +1276,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_light);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
}
void ipoib_ib_dev_flush_normal(struct work_struct *work)
@@ -1288,7 +1284,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_normal);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
}
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
@@ -1297,10 +1293,35 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
container_of(work, struct ipoib_dev_priv, flush_heavy);
rtnl_lock();
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
rtnl_unlock();
}
+void ipoib_queue_work(struct ipoib_dev_priv *priv,
+ enum ipoib_flush_level level)
+{
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ struct ipoib_dev_priv *cpriv;
+
+ netdev_lock(priv->dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ ipoib_queue_work(cpriv, level);
+ netdev_unlock(priv->dev);
+ }
+
+ switch (level) {
+ case IPOIB_FLUSH_LIGHT:
+ queue_work(ipoib_workqueue, &priv->flush_light);
+ break;
+ case IPOIB_FLUSH_NORMAL:
+ queue_work(ipoib_workqueue, &priv->flush_normal);
+ break;
+ case IPOIB_FLUSH_HEAVY:
+ queue_work(ipoib_workqueue, &priv->flush_heavy);
+ break;
+ }
+}
+
void ipoib_ib_dev_cleanup(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 3b463db8ce39..f2f5465f2a90 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -49,6 +49,7 @@
#include <linux/jhash.h>
#include <net/arp.h>
#include <net/addrconf.h>
+#include <net/netdev_lock.h>
#include <net/pkt_sched.h>
#include <linux/inetdevice.h>
#include <rdma/ib_cache.h>
@@ -132,6 +133,52 @@ static int ipoib_netdev_event(struct notifier_block *this,
}
#endif
+struct ipoib_ifupdown_work {
+ struct work_struct work;
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+ bool up;
+};
+
+static void ipoib_ifupdown_task(struct work_struct *work)
+{
+ struct ipoib_ifupdown_work *pwork =
+ container_of(work, struct ipoib_ifupdown_work, work);
+ struct net_device *dev = pwork->dev;
+ unsigned int flags;
+
+ rtnl_lock();
+ flags = dev->flags;
+ if (pwork->up)
+ flags |= IFF_UP;
+ else
+ flags &= ~IFF_UP;
+
+ if (dev->flags != flags)
+ dev_change_flags(dev, flags, NULL);
+ rtnl_unlock();
+ netdev_put(dev, &pwork->dev_tracker);
+ kfree(pwork);
+}
+
+static void ipoib_schedule_ifupdown_task(struct net_device *dev, bool up)
+{
+ struct ipoib_ifupdown_work *work;
+
+ if ((up && (dev->flags & IFF_UP)) ||
+ (!up && !(dev->flags & IFF_UP)))
+ return;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return;
+ work->dev = dev;
+ netdev_hold(dev, &work->dev_tracker, GFP_KERNEL);
+ work->up = up;
+ INIT_WORK(&work->work, ipoib_ifupdown_task);
+ queue_work(ipoib_workqueue, &work->work);
+}
+
int ipoib_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -154,17 +201,10 @@ int ipoib_open(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring up any child interfaces too */
- down_read(&priv->vlan_rwsem);
- list_for_each_entry(cpriv, &priv->child_intfs, list) {
- int flags;
-
- flags = cpriv->dev->flags;
- if (flags & IFF_UP)
- continue;
-
- dev_change_flags(cpriv->dev, flags | IFF_UP, NULL);
- }
- up_read(&priv->vlan_rwsem);
+ netdev_lock_ops_to_full(dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ ipoib_schedule_ifupdown_task(cpriv->dev, true);
+ netdev_unlock_full_to_ops(dev);
} else if (priv->parent) {
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
@@ -199,17 +239,10 @@ static int ipoib_stop(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring down any child interfaces too */
- down_read(&priv->vlan_rwsem);
- list_for_each_entry(cpriv, &priv->child_intfs, list) {
- int flags;
-
- flags = cpriv->dev->flags;
- if (!(flags & IFF_UP))
- continue;
-
- dev_change_flags(cpriv->dev, flags & ~IFF_UP, NULL);
- }
- up_read(&priv->vlan_rwsem);
+ netdev_lock_ops_to_full(dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ ipoib_schedule_ifupdown_task(cpriv->dev, false);
+ netdev_unlock_full_to_ops(dev);
}
return 0;
@@ -426,17 +459,20 @@ static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
}
}
+ if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
+ return matches;
+
/* Check child interfaces */
- down_read_nested(&priv->vlan_rwsem, nesting);
+ netdev_lock(priv->dev);
list_for_each_entry(child_priv, &priv->child_intfs, list) {
matches += ipoib_match_gid_pkey_addr(child_priv, gid,
- pkey_index, addr,
- nesting + 1,
- found_net_dev);
+ pkey_index, addr,
+ nesting + 1,
+ found_net_dev);
if (matches > 1)
break;
}
- up_read(&priv->vlan_rwsem);
+ netdev_unlock(priv->dev);
return matches;
}
@@ -531,9 +567,11 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
+ netdev_lock_ops(dev);
netdev_update_features(dev);
- dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
+ netif_set_mtu(dev, ipoib_cm_max_mtu(dev));
netif_set_real_num_tx_queues(dev, 1);
+ netdev_unlock_ops(dev);
rtnl_unlock();
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
@@ -543,9 +581,11 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
if (!strcmp(buf, "datagram\n")) {
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+ netdev_lock_ops(dev);
netdev_update_features(dev);
- dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ netif_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
netif_set_real_num_tx_queues(dev, dev->num_tx_queues);
+ netdev_unlock_ops(dev);
rtnl_unlock();
ipoib_flush_paths(dev);
return (!rtnl_trylock()) ? -EBUSY : 0;
@@ -1212,6 +1252,7 @@ void ipoib_ib_tx_timeout_work(struct work_struct *work)
int err;
rtnl_lock();
+ netdev_lock_ops(priv->dev);
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
goto unlock;
@@ -1226,6 +1267,7 @@ void ipoib_ib_tx_timeout_work(struct work_struct *work)
netif_tx_wake_all_queues(priv->dev);
unlock:
+ netdev_unlock_ops(priv->dev);
rtnl_unlock();
}
@@ -1992,9 +2034,9 @@ static int ipoib_ndo_init(struct net_device *ndev)
dev_hold(priv->parent);
- down_write(&ppriv->vlan_rwsem);
+ netdev_lock(priv->parent);
list_add_tail(&priv->list, &ppriv->child_intfs);
- up_write(&ppriv->vlan_rwsem);
+ netdev_unlock(priv->parent);
}
return 0;
@@ -2004,8 +2046,6 @@ static void ipoib_ndo_uninit(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- ASSERT_RTNL();
-
/*
* ipoib_remove_one guarantees the children are removed before the
* parent, and that is the only place where a parent can be removed.
@@ -2015,9 +2055,9 @@ static void ipoib_ndo_uninit(struct net_device *dev)
if (priv->parent) {
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
- down_write(&ppriv->vlan_rwsem);
+ netdev_lock(ppriv->dev);
list_del(&priv->list);
- up_write(&ppriv->vlan_rwsem);
+ netdev_unlock(ppriv->dev);
}
ipoib_neigh_hash_uninit(dev);
@@ -2167,7 +2207,6 @@ static void ipoib_build_priv(struct net_device *dev)
priv->dev = dev;
spin_lock_init(&priv->lock);
- init_rwsem(&priv->vlan_rwsem);
mutex_init(&priv->mcast_mutex);
INIT_LIST_HEAD(&priv->path_list);
@@ -2372,10 +2411,10 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
netif_addr_unlock_bh(netdev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
- down_read(&priv->vlan_rwsem);
+ netdev_lock_ops_to_full(priv->dev);
list_for_each_entry(child_priv, &priv->child_intfs, list)
set_base_guid(child_priv, gid);
- up_read(&priv->vlan_rwsem);
+ netdev_unlock_full_to_ops(priv->dev);
}
}
@@ -2415,6 +2454,14 @@ static int ipoib_set_mac(struct net_device *dev, void *addr)
set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ struct ipoib_dev_priv *cpriv;
+
+ netdev_lock_ops_to_full(dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ queue_work(ipoib_workqueue, &cpriv->flush_light);
+ netdev_unlock_full_to_ops(dev);
+ }
queue_work(ipoib_workqueue, &priv->flush_light);
return 0;
@@ -2526,7 +2573,7 @@ static struct net_device *ipoib_add_port(const char *format,
ib_register_event_handler(&priv->event_handler);
/* call event handler to ensure pkey in sync */
- queue_work(ipoib_workqueue, &priv->flush_heavy);
+ ipoib_queue_work(priv, IPOIB_FLUSH_HEAVY);
ndev->rtnl_link_ops = ipoib_get_link_ops();
@@ -2624,9 +2671,11 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
rtnl_lock();
+ netdev_lock(priv->dev);
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs,
list)
unregister_netdevice_queue(cpriv->dev, &head);
+ netdev_unlock(priv->dev);
unregister_netdevice_queue(priv->dev, &head);
unregister_netdevice_many(&head);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 368e5d77416d..86983080d28b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -280,15 +280,15 @@ void ipoib_event(struct ib_event_handler *handler,
dev_name(&record->device->dev), record->element.port_num);
if (record->event == IB_EVENT_CLIENT_REREGISTER) {
- queue_work(ipoib_workqueue, &priv->flush_light);
+ ipoib_queue_work(priv, IPOIB_FLUSH_LIGHT);
} else if (record->event == IB_EVENT_PORT_ERR ||
record->event == IB_EVENT_PORT_ACTIVE ||
record->event == IB_EVENT_LID_CHANGE) {
- queue_work(ipoib_workqueue, &priv->flush_normal);
+ ipoib_queue_work(priv, IPOIB_FLUSH_NORMAL);
} else if (record->event == IB_EVENT_PKEY_CHANGE) {
- queue_work(ipoib_workqueue, &priv->flush_heavy);
+ ipoib_queue_work(priv, IPOIB_FLUSH_HEAVY);
} else if (record->event == IB_EVENT_GID_CHANGE &&
!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
- queue_work(ipoib_workqueue, &priv->flush_light);
+ ipoib_queue_work(priv, IPOIB_FLUSH_LIGHT);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 562df2b3ef18..243e8f555eca 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -53,8 +53,7 @@ static bool is_child_unique(struct ipoib_dev_priv *ppriv,
struct ipoib_dev_priv *priv)
{
struct ipoib_dev_priv *tpriv;
-
- ASSERT_RTNL();
+ bool result = true;
/*
* Since the legacy sysfs interface uses pkey for deletion it cannot
@@ -73,13 +72,17 @@ static bool is_child_unique(struct ipoib_dev_priv *ppriv,
if (ppriv->pkey == priv->pkey)
return false;
+ netdev_lock(ppriv->dev);
list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
if (tpriv->pkey == priv->pkey &&
- tpriv->child_type == IPOIB_LEGACY_CHILD)
- return false;
+ tpriv->child_type == IPOIB_LEGACY_CHILD) {
+ result = false;
+ break;
+ }
}
+ netdev_unlock(ppriv->dev);
- return true;
+ return result;
}
/*
@@ -98,8 +101,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
int result;
struct rdma_netdev *rn = netdev_priv(ndev);
- ASSERT_RTNL();
-
/*
* We do not need to touch priv if register_netdevice fails, so just
* always use this flow.
@@ -267,6 +268,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
ppriv = ipoib_priv(pdev);
rc = -ENODEV;
+ netdev_lock(ppriv->dev);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
@@ -278,9 +280,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
goto out;
}
- down_write(&ppriv->vlan_rwsem);
list_del_init(&priv->list);
- up_write(&ppriv->vlan_rwsem);
work->dev = priv->dev;
INIT_WORK(&work->work, ipoib_vlan_delete_task);
queue_work(ipoib_workqueue, &work->work);
@@ -291,6 +291,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
}
out:
+ netdev_unlock(ppriv->dev);
rtnl_unlock();
return rc;
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index 2eaa25c9c68c..7622638e5bb8 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -48,7 +48,7 @@ struct magellan {
static int magellan_crunch_nibbles(unsigned char *data, int count)
{
- static unsigned char nibbles[16] = "0AB3D56GH9:K<MN?";
+ static const unsigned char nibbles[16] __nonstring = "0AB3D56GH9:K<MN?";
do {
if (data[count] == nibbles[data[count] & 0xf])
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index c33e6f33265b..c066a4da7c14 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -77,12 +77,13 @@
* xbox d-pads should map to buttons, as is required for DDR pads
* but we map them to axes when possible to simplify things
*/
-#define MAP_DPAD_TO_BUTTONS (1 << 0)
-#define MAP_TRIGGERS_TO_BUTTONS (1 << 1)
-#define MAP_STICKS_TO_NULL (1 << 2)
-#define MAP_SELECT_BUTTON (1 << 3)
-#define MAP_PADDLES (1 << 4)
-#define MAP_PROFILE_BUTTON (1 << 5)
+#define MAP_DPAD_TO_BUTTONS BIT(0)
+#define MAP_TRIGGERS_TO_BUTTONS BIT(1)
+#define MAP_STICKS_TO_NULL BIT(2)
+#define MAP_SHARE_BUTTON BIT(3)
+#define MAP_PADDLES BIT(4)
+#define MAP_PROFILE_BUTTON BIT(5)
+#define MAP_SHARE_OFFSET BIT(6)
#define DANCEPAD_MAP_CONFIG (MAP_DPAD_TO_BUTTONS | \
MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL)
@@ -104,6 +105,8 @@
#define PKT_XBE2_FW_5_EARLY 3
#define PKT_XBE2_FW_5_11 4
+#define FLAG_DELAY_INIT BIT(0)
+
static bool dpad_to_buttons;
module_param(dpad_to_buttons, bool, S_IRUGO);
MODULE_PARM_DESC(dpad_to_buttons, "Map D-PAD to buttons rather than axes for unknown pads");
@@ -126,6 +129,7 @@ static const struct xpad_device {
char *name;
u8 mapping;
u8 xtype;
+ u8 flags;
} xpad_device[] = {
/* Please keep this list sorted by vendor and product ID. */
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
@@ -135,14 +139,14 @@ static const struct xpad_device {
{ 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wireless */
{ 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
{ 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE },
- { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, /* v2 */
+ { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, /* v2 */
{ 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
- { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
+ { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
@@ -159,7 +163,7 @@ static const struct xpad_device {
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0b00, "Microsoft X-Box One Elite 2 pad", MAP_PADDLES, XTYPE_XBOXONE },
{ 0x045e, 0x0b0a, "Microsoft X-Box Adaptive Controller", MAP_PROFILE_BUTTON, XTYPE_XBOXONE },
- { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
+ { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SHARE_BUTTON | MAP_SHARE_OFFSET, XTYPE_XBOXONE },
{ 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
@@ -205,13 +209,13 @@ static const struct xpad_device {
{ 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 },
{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
{ 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
+ { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 },
{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
{ 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
- { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE },
+ { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE },
{ 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
{ 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
@@ -240,7 +244,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
- { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE },
+ { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
@@ -281,6 +285,7 @@ static const struct xpad_device {
{ 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x0151, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
{ 0x0f0d, 0x0152, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
+ { 0x0f0d, 0x01b2, "HORI Taiko No Tatsujin Drum Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
@@ -288,6 +293,8 @@ static const struct xpad_device {
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE },
+ { 0x10f5, 0x7008, "Turtle Beach Recon Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
+ { 0x10f5, 0x7073, "Turtle Beach Stealth Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
{ 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
@@ -352,7 +359,10 @@ static const struct xpad_device {
{ 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 },
{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x2064, "PowerA Wired Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x20d6, 0x400b, "PowerA FUSION Pro 4 Wired Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
+ { 0x20d6, 0x890b, "PowerA MOGA XP-Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x2345, 0xe00b, "Machenike G5 Pro Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
@@ -384,13 +394,16 @@ static const struct xpad_device {
{ 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
{ 0x2993, 0x2001, "TECNO Pocket Go", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
+ { 0x2dc8, 0x200f, "8BitDo Ultimate 3-mode Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x310b, "8BitDo Ultimate 2 Wireless Controller", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 },
+ { 0x2e24, 0x0423, "Hyperkin DuchesS Xbox One pad", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE },
- { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
+ { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
@@ -406,6 +419,7 @@ static const struct xpad_device {
{ 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
{ 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE },
+ { 0x366c, 0x0005, "ByoWave Proteus Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE, FLAG_DELAY_INIT },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
@@ -561,6 +575,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */
XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
+ XPAD_XBOXONE_VENDOR(0x366c), /* ByoWave controllers */
XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */
{ }
};
@@ -589,6 +604,7 @@ struct xboxone_init_packet {
* - https://github.com/medusalix/xone/blob/master/bus/protocol.c
*/
#define GIP_CMD_ACK 0x01
+#define GIP_CMD_ANNOUNCE 0x02
#define GIP_CMD_IDENTIFY 0x04
#define GIP_CMD_POWER 0x05
#define GIP_CMD_AUTHENTICATE 0x06
@@ -663,20 +679,19 @@ static const u8 xboxone_hori_ack_id[] = {
};
/*
- * This packet is required for most (all?) of the PDP pads to start
- * sending input reports. These pads include: (0x0e6f:0x02ab),
- * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
+ * This packet is sent by default on Windows, and is required for some pads to
+ * start sending input reports, including most (all?) of the PDP. These pads
+ * include: (0x0e6f:0x02ab), (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
-static const u8 xboxone_pdp_led_on[] = {
- GIP_CMD_LED, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(3), 0x00, GIP_LED_ON, 0x14
-};
+static const u8 xboxone_led_on[] = { GIP_CMD_LED, GIP_OPT_INTERNAL, GIP_SEQ0,
+GIP_PL_LEN(3), 0x00, GIP_LED_ON, 0x14 };
/*
* This packet is required for most (all?) of the PDP pads to start
* sending input reports. These pads include: (0x0e6f:0x02ab),
* (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
-static const u8 xboxone_pdp_auth[] = {
+static const u8 xboxone_auth_done[] = {
GIP_CMD_AUTHENTICATE, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(2), 0x01, 0x00
};
@@ -713,10 +728,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init),
- XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on),
- XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on),
- XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth),
- XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth),
+ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_led_on),
+ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_auth_done),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
@@ -776,10 +789,13 @@ struct usb_xpad {
const char *name; /* name of the device */
struct work_struct work; /* init/remove device from callback */
time64_t mode_btn_down_ts;
+ bool delay_init; /* init packets should be delayed */
+ bool delayed_init_done;
};
static int xpad_init_input(struct usb_xpad *xpad);
static void xpad_deinit_input(struct usb_xpad *xpad);
+static int xpad_start_input(struct usb_xpad *xpad);
static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num);
static void xpad360w_poweroff_controller(struct usb_xpad *xpad);
@@ -1027,7 +1043,7 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha
* The report format was gleaned from
* https://github.com/kylelemons/xbox/blob/master/xbox.go
*/
-static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
+static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data, u32 len)
{
struct input_dev *dev = xpad->dev;
bool do_sync = false;
@@ -1064,12 +1080,27 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
do_sync = true;
}
+ } else if (data[0] == GIP_CMD_ANNOUNCE) {
+ int error;
+
+ if (xpad->delay_init && !xpad->delayed_init_done) {
+ xpad->delayed_init_done = true;
+ error = xpad_start_input(xpad);
+ if (error)
+ dev_warn(&xpad->dev->dev,
+ "unable to start delayed input: %d\n",
+ error);
+ }
} else if (data[0] == GIP_CMD_INPUT) { /* The main valid packet type for inputs */
/* menu/view buttons */
input_report_key(dev, BTN_START, data[4] & BIT(2));
input_report_key(dev, BTN_SELECT, data[4] & BIT(3));
- if (xpad->mapping & MAP_SELECT_BUTTON)
- input_report_key(dev, KEY_RECORD, data[22] & BIT(0));
+ if (xpad->mapping & MAP_SHARE_BUTTON) {
+ if (xpad->mapping & MAP_SHARE_OFFSET)
+ input_report_key(dev, KEY_RECORD, data[len - 26] & BIT(0));
+ else
+ input_report_key(dev, KEY_RECORD, data[len - 18] & BIT(0));
+ }
/* buttons A,B,X,Y */
input_report_key(dev, BTN_A, data[4] & BIT(4));
@@ -1217,7 +1248,7 @@ static void xpad_irq_in(struct urb *urb)
xpad360w_process_packet(xpad, 0, xpad->idata);
break;
case XTYPE_XBOXONE:
- xpadone_process_packet(xpad, 0, xpad->idata);
+ xpadone_process_packet(xpad, 0, xpad->idata, urb->actual_length);
break;
default:
xpad_process_packet(xpad, 0, xpad->idata);
@@ -1238,6 +1269,14 @@ static bool xpad_prepare_next_init_packet(struct usb_xpad *xpad)
if (xpad->xtype != XTYPE_XBOXONE)
return false;
+ /*
+ * Some dongles will discard init packets if they're sent before the
+ * controller connects. In these cases, we need to wait until we get
+ * an announce packet from them to send the init packet sequence.
+ */
+ if (xpad->delay_init && !xpad->delayed_init_done)
+ return false;
+
/* Perform initialization sequence for Xbox One pads that require it */
while (xpad->init_seq < ARRAY_SIZE(xboxone_init_packets)) {
init_packet = &xboxone_init_packets[xpad->init_seq++];
@@ -1944,7 +1983,7 @@ static int xpad_init_input(struct usb_xpad *xpad)
xpad->xtype == XTYPE_XBOXONE) {
for (i = 0; xpad360_btn[i] >= 0; i++)
input_set_capability(input_dev, EV_KEY, xpad360_btn[i]);
- if (xpad->mapping & MAP_SELECT_BUTTON)
+ if (xpad->mapping & MAP_SHARE_BUTTON)
input_set_capability(input_dev, EV_KEY, KEY_RECORD);
} else {
for (i = 0; xpad_btn[i] >= 0; i++)
@@ -2053,6 +2092,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
xpad->mapping = xpad_device[i].mapping;
xpad->xtype = xpad_device[i].xtype;
xpad->name = xpad_device[i].name;
+ if (xpad_device[i].flags & FLAG_DELAY_INIT)
+ xpad->delay_init = true;
+
xpad->packet_type = PKT_XB;
INIT_WORK(&xpad->work, xpad_presence_work);
@@ -2252,6 +2294,7 @@ static int xpad_resume(struct usb_interface *intf)
struct usb_xpad *xpad = usb_get_intfdata(intf);
struct input_dev *input = xpad->dev;
+ xpad->delayed_init_done = false;
if (xpad->xtype == XTYPE_XBOX360W)
return xpad360w_start_input(xpad);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index adf0f311996c..3ff2fcf05ad5 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -37,7 +37,7 @@ static int atkbd_set = 2;
module_param_named(set, atkbd_set, int, 0);
MODULE_PARM_DESC(set, "Select keyboard code set (2 = default, 3 = PS/2 native)");
-#if defined(__i386__) || defined(__x86_64__) || defined(__hppa__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__hppa__) || defined(__loongarch__)
static bool atkbd_reset;
#else
static bool atkbd_reset = true;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 5c39a217b94c..f9db86da0818 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -449,6 +449,8 @@ static enum hrtimer_restart gpio_keys_irq_timer(struct hrtimer *t)
release_timer);
struct input_dev *input = bdata->input;
+ guard(spinlock_irqsave)(&bdata->lock);
+
if (bdata->key_pressed) {
input_report_key(input, *bdata->code, 0);
input_sync(input);
@@ -486,7 +488,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
if (bdata->release_delay)
hrtimer_start(&bdata->release_timer,
ms_to_ktime(bdata->release_delay),
- HRTIMER_MODE_REL_HARD);
+ HRTIMER_MODE_REL);
out:
return IRQ_HANDLED;
}
@@ -628,7 +630,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
bdata->release_delay = button->debounce_interval;
hrtimer_setup(&bdata->release_timer, gpio_keys_irq_timer,
- CLOCK_REALTIME, HRTIMER_MODE_REL_HARD);
+ CLOCK_REALTIME, HRTIMER_MODE_REL);
isr = gpio_keys_irq_isr;
irqflags = 0;
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index e46473cb817c..e50a6fea9a60 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -104,6 +104,16 @@ static void disable_row_irqs(struct matrix_keypad *keypad)
disable_irq_nosync(keypad->row_irqs[i]);
}
+static uint32_t read_row_state(struct matrix_keypad *keypad)
+{
+ int row;
+ u32 row_state = 0;
+
+ for (row = 0; row < keypad->num_row_gpios; row++)
+ row_state |= row_asserted(keypad, row) ? BIT(row) : 0;
+ return row_state;
+}
+
/*
* This gets the keys from keyboard and reports it to input subsystem
*/
@@ -115,6 +125,10 @@ static void matrix_keypad_scan(struct work_struct *work)
const unsigned short *keycodes = input_dev->keycode;
uint32_t new_state[MATRIX_MAX_COLS];
int row, col, code;
+ u32 init_row_state, new_row_state;
+
+ /* read initial row state to detect changes between scan */
+ init_row_state = read_row_state(keypad);
/* de-activate all columns for scanning */
activate_all_cols(keypad, false);
@@ -129,9 +143,7 @@ static void matrix_keypad_scan(struct work_struct *work)
activate_col(keypad, col, true);
- for (row = 0; row < keypad->num_row_gpios; row++)
- new_state[col] |=
- row_asserted(keypad, row) ? BIT(row) : 0;
+ new_state[col] = read_row_state(keypad);
activate_col(keypad, col, false);
}
@@ -165,6 +177,18 @@ static void matrix_keypad_scan(struct work_struct *work)
keypad->scan_pending = false;
enable_row_irqs(keypad);
}
+
+ /* read new row state and detect if value has changed */
+ new_row_state = read_row_state(keypad);
+ if (init_row_state != new_row_state) {
+ guard(spinlock_irq)(&keypad->lock);
+ if (unlikely(keypad->scan_pending || keypad->stopped))
+ return;
+ disable_row_irqs(keypad);
+ keypad->scan_pending = true;
+ schedule_delayed_work(&keypad->work,
+ msecs_to_jiffies(keypad->debounce_ms));
+ }
}
static irqreturn_t matrix_keypad_interrupt(int irq, void *id)
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index 5ad6be914160..061d48350df6 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -147,8 +147,8 @@ static void mtk_pmic_keys_lp_reset_setup(struct mtk_pmic_keys *keys,
u32 value, mask;
int error;
- kregs_home = keys->keys[MTK_PMIC_HOMEKEY_INDEX].regs;
- kregs_pwr = keys->keys[MTK_PMIC_PWRKEY_INDEX].regs;
+ kregs_home = &regs->keys_regs[MTK_PMIC_HOMEKEY_INDEX];
+ kregs_pwr = &regs->keys_regs[MTK_PMIC_PWRKEY_INDEX];
error = of_property_read_u32(keys->dev->of_node, "power-off-time-sec",
&long_press_debounce);
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index bbf409dda89f..fe7398eeb828 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -27,6 +27,8 @@
#define SNVS_HPSR_BTN BIT(6)
#define SNVS_LPSR_SPO BIT(18)
#define SNVS_LPCR_DEP_EN BIT(5)
+#define SNVS_LPCR_BPT_SHIFT 16
+#define SNVS_LPCR_BPT_MASK (3 << SNVS_LPCR_BPT_SHIFT)
#define DEBOUNCE_TIME 30
#define REPEAT_INTERVAL 60
@@ -114,6 +116,8 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
struct device_node *np;
struct clk *clk;
int error;
+ unsigned int val;
+ unsigned int bpt;
u32 vid;
/* Get SNVS register Page */
@@ -148,6 +152,27 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
if (pdata->irq < 0)
return -EINVAL;
+ error = of_property_read_u32(np, "power-off-time-sec", &val);
+ if (!error) {
+ switch (val) {
+ case 0:
+ bpt = 0x3;
+ break;
+ case 5:
+ case 10:
+ case 15:
+ bpt = (val / 5) - 1;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "power-off-time-sec %d out of range\n", val);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(pdata->snvs, SNVS_LPCR_REG, SNVS_LPCR_BPT_MASK,
+ bpt << SNVS_LPCR_BPT_SHIFT);
+ }
+
regmap_read(pdata->snvs, SNVS_HPVIDR1_REG, &vid);
pdata->minor_rev = vid & 0xff;
diff --git a/drivers/input/misc/hisi_powerkey.c b/drivers/input/misc/hisi_powerkey.c
index d3c293a95d32..d315017324d9 100644
--- a/drivers/input/misc/hisi_powerkey.c
+++ b/drivers/input/misc/hisi_powerkey.c
@@ -30,7 +30,7 @@ static irqreturn_t hi65xx_power_press_isr(int irq, void *q)
{
struct input_dev *input = q;
- pm_wakeup_event(input->dev.parent, MAX_HELD_TIME);
+ pm_wakeup_dev_event(input->dev.parent, MAX_HELD_TIME, true);
input_report_key(input, KEY_POWER, 1);
input_sync(input);
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index d9ee14b1f451..4581f1c53644 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -844,6 +844,12 @@ static int ims_pcu_flash_firmware(struct ims_pcu *pcu,
addr = be32_to_cpu(rec->addr) / 2;
len = be16_to_cpu(rec->len);
+ if (len > sizeof(pcu->cmd_buf) - 1 - sizeof(*fragment)) {
+ dev_err(pcu->dev,
+ "Invalid record length in firmware: %d\n", len);
+ return -EINVAL;
+ }
+
fragment = (void *)&pcu->cmd_buf[1];
put_unaligned_le32(addr, &fragment->addr);
fragment->len = len;
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 8d7303fc13bc..1cfadd73829f 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -74,9 +74,14 @@ static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int
return -1;
switch (code) {
- case SND_BELL: if (value) value = 1000;
- case SND_TONE: break;
- default: return -1;
+ case SND_BELL:
+ if (value)
+ value = 1000;
+ break;
+ case SND_TONE:
+ break;
+ default:
+ return -1;
}
if (value > 20 && value < 32767)
@@ -109,9 +114,14 @@ static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned
return -1;
switch (code) {
- case SND_BELL: if (value) value = 1000;
- case SND_TONE: break;
- default: return -1;
+ case SND_BELL:
+ if (value)
+ value = 1000;
+ break;
+ case SND_TONE:
+ break;
+ default:
+ return -1;
}
if (value > 20 && value < 32767)
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 309c360aab55..c5c88a75a019 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -164,6 +164,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
#ifdef CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS
static const char * const smbus_pnp_ids[] = {
/* all of the topbuttonpad_pnp_ids are valid, we just add some extras */
+ "DLL060d", /* Dell Precision M3800 */
"LEN0048", /* X1 Carbon 3 */
"LEN0046", /* X250 */
"LEN0049", /* Yoga 11e */
@@ -190,11 +191,15 @@ static const char * const smbus_pnp_ids[] = {
"LEN2054", /* E480 */
"LEN2055", /* E580 */
"LEN2068", /* T14 Gen 1 */
+ "SYN1221", /* TUXEDO InfinityBook Pro 14 v5 */
+ "SYN3003", /* HP EliteBook 850 G1 */
"SYN3015", /* HP EliteBook 840 G2 */
"SYN3052", /* HP EliteBook 840 G4 */
"SYN3221", /* HP 15-ay000 */
"SYN323d", /* HP Spectre X360 13-w013dx */
"SYN3257", /* HP Envy 13-ad105ng */
+ "TOS01f6", /* Dynabook Portege X30L-G */
+ "TOS0213", /* Dynabook Portege X30-D */
NULL
};
#endif
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
index d760af4cc12e..f1947f03b06a 100644
--- a/drivers/input/rmi4/rmi_f34.c
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -4,6 +4,7 @@
* Copyright (C) 2016 Zodiac Inflight Innovations
*/
+#include "linux/device.h"
#include <linux/kernel.h>
#include <linux/rmi.h>
#include <linux/firmware.h>
@@ -289,39 +290,30 @@ static int rmi_f34_update_firmware(struct f34_data *f34,
return rmi_f34_flash_firmware(f34, syn_fw);
}
-static int rmi_f34_status(struct rmi_function *fn)
-{
- struct f34_data *f34 = dev_get_drvdata(&fn->dev);
-
- /*
- * The status is the percentage complete, or once complete,
- * zero for success or a negative return code.
- */
- return f34->update_status;
-}
-
static ssize_t rmi_driver_bootloader_id_show(struct device *dev,
struct device_attribute *dattr,
char *buf)
{
struct rmi_driver_data *data = dev_get_drvdata(dev);
- struct rmi_function *fn = data->f34_container;
+ struct rmi_function *fn;
struct f34_data *f34;
- if (fn) {
- f34 = dev_get_drvdata(&fn->dev);
-
- if (f34->bl_version == 5)
- return sysfs_emit(buf, "%c%c\n",
- f34->bootloader_id[0],
- f34->bootloader_id[1]);
- else
- return sysfs_emit(buf, "V%d.%d\n",
- f34->bootloader_id[1],
- f34->bootloader_id[0]);
- }
+ fn = data->f34_container;
+ if (!fn)
+ return -ENODEV;
- return 0;
+ f34 = dev_get_drvdata(&fn->dev);
+ if (!f34)
+ return -ENODEV;
+
+ if (f34->bl_version == 5)
+ return sysfs_emit(buf, "%c%c\n",
+ f34->bootloader_id[0],
+ f34->bootloader_id[1]);
+ else
+ return sysfs_emit(buf, "V%d.%d\n",
+ f34->bootloader_id[1],
+ f34->bootloader_id[0]);
}
static DEVICE_ATTR(bootloader_id, 0444, rmi_driver_bootloader_id_show, NULL);
@@ -334,13 +326,16 @@ static ssize_t rmi_driver_configuration_id_show(struct device *dev,
struct rmi_function *fn = data->f34_container;
struct f34_data *f34;
- if (fn) {
- f34 = dev_get_drvdata(&fn->dev);
+ fn = data->f34_container;
+ if (!fn)
+ return -ENODEV;
+
+ f34 = dev_get_drvdata(&fn->dev);
+ if (!f34)
+ return -ENODEV;
- return sysfs_emit(buf, "%s\n", f34->configuration_id);
- }
- return 0;
+ return sysfs_emit(buf, "%s\n", f34->configuration_id);
}
static DEVICE_ATTR(configuration_id, 0444,
@@ -356,10 +351,14 @@ static int rmi_firmware_update(struct rmi_driver_data *data,
if (!data->f34_container) {
dev_warn(dev, "%s: No F34 present!\n", __func__);
- return -EINVAL;
+ return -ENODEV;
}
f34 = dev_get_drvdata(&data->f34_container->dev);
+ if (!f34) {
+ dev_warn(dev, "%s: No valid F34 present!\n", __func__);
+ return -ENODEV;
+ }
if (f34->bl_version >= 7) {
if (data->pdt_props & HAS_BSR) {
@@ -485,10 +484,18 @@ static ssize_t rmi_driver_update_fw_status_show(struct device *dev,
char *buf)
{
struct rmi_driver_data *data = dev_get_drvdata(dev);
- int update_status = 0;
+ struct f34_data *f34;
+ int update_status = -ENODEV;
- if (data->f34_container)
- update_status = rmi_f34_status(data->f34_container);
+ /*
+ * The status is the percentage complete, or once complete,
+ * zero for success or a negative return code.
+ */
+ if (data->f34_container) {
+ f34 = dev_get_drvdata(&data->f34_container->dev);
+ if (f34)
+ update_status = f34->update_status;
+ }
return sysfs_emit(buf, "%d\n", update_status);
}
@@ -508,33 +515,21 @@ static const struct attribute_group rmi_firmware_attr_group = {
.attrs = rmi_firmware_attrs,
};
-static int rmi_f34_probe(struct rmi_function *fn)
+static int rmi_f34v5_probe(struct f34_data *f34)
{
- struct f34_data *f34;
- unsigned char f34_queries[9];
+ struct rmi_function *fn = f34->fn;
+ u8 f34_queries[9];
bool has_config_id;
- u8 version = fn->fd.function_version;
- int ret;
-
- f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL);
- if (!f34)
- return -ENOMEM;
-
- f34->fn = fn;
- dev_set_drvdata(&fn->dev, f34);
-
- /* v5 code only supported version 0, try V7 probe */
- if (version > 0)
- return rmi_f34v7_probe(f34);
+ int error;
f34->bl_version = 5;
- ret = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
- f34_queries, sizeof(f34_queries));
- if (ret) {
+ error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
+ f34_queries, sizeof(f34_queries));
+ if (error) {
dev_err(&fn->dev, "%s: Failed to query properties\n",
__func__);
- return ret;
+ return error;
}
snprintf(f34->bootloader_id, sizeof(f34->bootloader_id),
@@ -560,11 +555,11 @@ static int rmi_f34_probe(struct rmi_function *fn)
f34->v5.config_blocks);
if (has_config_id) {
- ret = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr,
- f34_queries, sizeof(f34_queries));
- if (ret) {
+ error = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr,
+ f34_queries, sizeof(f34_queries));
+ if (error) {
dev_err(&fn->dev, "Failed to read F34 config ID\n");
- return ret;
+ return error;
}
snprintf(f34->configuration_id, sizeof(f34->configuration_id),
@@ -573,12 +568,34 @@ static int rmi_f34_probe(struct rmi_function *fn)
f34_queries[2], f34_queries[3]);
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Configuration ID: %s\n",
- f34->configuration_id);
+ f34->configuration_id);
}
return 0;
}
+static int rmi_f34_probe(struct rmi_function *fn)
+{
+ struct f34_data *f34;
+ u8 version = fn->fd.function_version;
+ int error;
+
+ f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL);
+ if (!f34)
+ return -ENOMEM;
+
+ f34->fn = fn;
+
+ /* v5 code only supported version 0 */
+ error = version == 0 ? rmi_f34v5_probe(f34) : rmi_f34v7_probe(f34);
+ if (error)
+ return error;
+
+ dev_set_drvdata(&fn->dev, f34);
+
+ return 0;
+}
+
int rmi_f34_create_sysfs(struct rmi_device *rmi_dev)
{
return sysfs_create_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group);
diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c
index eafe5a9b8964..071b7c9bf566 100644
--- a/drivers/input/touchscreen/cyttsp5.c
+++ b/drivers/input/touchscreen/cyttsp5.c
@@ -580,7 +580,7 @@ static int cyttsp5_power_control(struct cyttsp5 *ts, bool on)
int rc;
SET_CMD_REPORT_TYPE(cmd[0], 0);
- SET_CMD_REPORT_ID(cmd[0], HID_POWER_SLEEP);
+ SET_CMD_REPORT_ID(cmd[0], state);
SET_CMD_OPCODE(cmd[1], HID_CMD_SET_POWER);
rc = cyttsp5_write(ts, HID_COMMAND_REG, cmd, sizeof(cmd));
@@ -870,13 +870,16 @@ static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq,
ts->input->phys = ts->phys;
input_set_drvdata(ts->input, ts);
- /* Reset the gpio to be in a reset state */
+ /* Assert gpio to be in a reset state */
ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ts->reset_gpio)) {
error = PTR_ERR(ts->reset_gpio);
dev_err(dev, "Failed to request reset gpio, error %d\n", error);
return error;
}
+
+ fsleep(10); /* Ensure long-enough reset pulse (minimum 10us). */
+
gpiod_set_value_cansleep(ts->reset_gpio, 0);
/* Need a delay to have device up */
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index a94a1997f96b..af0fb38bcfdc 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -366,12 +366,7 @@ static struct platform_driver stmpe_ts_driver = {
};
module_platform_driver(stmpe_ts_driver);
-static const struct of_device_id stmpe_ts_ids[] = {
- { .compatible = "st,stmpe-ts", },
- { },
-};
-MODULE_DEVICE_TABLE(of, stmpe_ts_ids);
-
+MODULE_ALIAS("platform:stmpe-ts");
MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>");
MODULE_DESCRIPTION("STMPEXXX touchscreen driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index cd750f512dee..0a33d995d15d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -192,6 +192,7 @@ config MSM_IOMMU
If unsure, say N here.
source "drivers/iommu/amd/Kconfig"
+source "drivers/iommu/arm/Kconfig"
source "drivers/iommu/intel/Kconfig"
source "drivers/iommu/iommufd/Kconfig"
source "drivers/iommu/riscv/Kconfig"
@@ -199,7 +200,6 @@ source "drivers/iommu/riscv/Kconfig"
config IRQ_REMAP
bool "Support for Interrupt Remapping"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
- select DMAR_TABLE if INTEL_IOMMU
help
Supports Interrupt remapping for IO-APIC and MSI devices.
To use x2apic mode in the CPU's which support x2APIC enhancements or
@@ -314,150 +314,6 @@ config APPLE_DART
Say Y here if you are using an Apple SoC.
-# ARM IOMMU support
-config ARM_SMMU
- tristate "ARM Ltd. System MMU (SMMU) Support"
- depends on ARM64 || ARM || COMPILE_TEST
- depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select ARM_DMA_USE_IOMMU if ARM
- help
- Support for implementations of the ARM System MMU architecture
- versions 1 and 2.
-
- Say Y here if your SoC includes an IOMMU device implementing
- the ARM SMMU architecture.
-
-config ARM_SMMU_LEGACY_DT_BINDINGS
- bool "Support the legacy \"mmu-masters\" devicetree bindings"
- depends on ARM_SMMU=y && OF
- help
- Support for the badly designed and deprecated "mmu-masters"
- devicetree bindings. This allows some DMA masters to attach
- to the SMMU but does not provide any support via the DMA API.
- If you're lucky, you might be able to get VFIO up and running.
-
- If you say Y here then you'll make me very sad. Instead, say N
- and move your firmware to the utopian future that was 2016.
-
-config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
- bool "Default to disabling bypass on ARM SMMU v1 and v2"
- depends on ARM_SMMU
- default y
- help
- Say Y here to (by default) disable bypass streams such that
- incoming transactions from devices that are not attached to
- an iommu domain will report an abort back to the device and
- will not be allowed to pass through the SMMU.
-
- Any old kernels that existed before this KConfig was
- introduced would default to _allowing_ bypass (AKA the
- equivalent of NO for this config). However the default for
- this option is YES because the old behavior is insecure.
-
- There are few reasons to allow unmatched stream bypass, and
- even fewer good ones. If saying YES here breaks your board
- you should work on fixing your board. This KConfig option
- is expected to be removed in the future and we'll simply
- hardcode the bypass disable in the code.
-
- NOTE: the kernel command line parameter
- 'arm-smmu.disable_bypass' will continue to override this
- config.
-
-config ARM_SMMU_MMU_500_CPRE_ERRATA
- bool "Enable errata workaround for CPRE in SMMU reset path"
- depends on ARM_SMMU
- default y
- help
- Say Y here (by default) to apply workaround to disable
- MMU-500's next-page prefetcher for sake of 4 known errata.
-
- Say N here only when it is sure that any errata related to
- prefetch enablement are not applicable on the platform.
- Refer silicon-errata.rst for info on errata IDs.
-
-config ARM_SMMU_QCOM
- def_tristate y
- depends on ARM_SMMU && ARCH_QCOM
- select QCOM_SCM
- help
- When running on a Qualcomm platform that has the custom variant
- of the ARM SMMU, this needs to be built into the SMMU driver.
-
-config ARM_SMMU_QCOM_DEBUG
- bool "ARM SMMU QCOM implementation defined debug support"
- depends on ARM_SMMU_QCOM=y
- help
- Support for implementation specific debug features in ARM SMMU
- hardware found in QTI platforms. This include support for
- the Translation Buffer Units (TBU) that can be used to obtain
- additional information when debugging memory management issues
- like context faults.
-
- Say Y here to enable debug for issues such as context faults
- or TLB sync timeouts which requires implementation defined
- register dumps.
-
-config ARM_SMMU_V3
- tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
- depends on ARM64
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select GENERIC_MSI_IRQ
- select IOMMUFD_DRIVER if IOMMUFD
- help
- Support for implementations of the ARM System MMU architecture
- version 3 providing translation support to a PCIe root complex.
-
- Say Y here if your system includes an IOMMU device implementing
- the ARM SMMUv3 architecture.
-
-if ARM_SMMU_V3
-config ARM_SMMU_V3_SVA
- bool "Shared Virtual Addressing support for the ARM SMMUv3"
- select IOMMU_SVA
- select IOMMU_IOPF
- select MMU_NOTIFIER
- help
- Support for sharing process address spaces with devices using the
- SMMUv3.
-
- Say Y here if your system supports SVA extensions such as PCIe PASID
- and PRI.
-
-config ARM_SMMU_V3_IOMMUFD
- bool "Enable IOMMUFD features for ARM SMMUv3 (EXPERIMENTAL)"
- depends on IOMMUFD
- help
- Support for IOMMUFD features intended to support virtual machines
- with accelerated virtual IOMMUs.
-
- Say Y here if you are doing development and testing on this feature.
-
-config ARM_SMMU_V3_KUNIT_TEST
- tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
- depends on KUNIT
- depends on ARM_SMMU_V3_SVA
- default KUNIT_ALL_TESTS
- help
- Enable this option to unit-test arm-smmu-v3 driver functions.
-
- If unsure, say N.
-
-config TEGRA241_CMDQV
- bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3"
- depends on ACPI
- help
- Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The
- CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues
- support, except with virtualization capabilities.
-
- Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same
- CMDQ-V extension.
-endif
-
config S390_IOMMU
def_bool y if S390 && PCI
depends on S390 && PCI
@@ -494,18 +350,6 @@ config MTK_IOMMU_V1
if unsure, say N here.
-config QCOM_IOMMU
- # Note: iommu drivers cannot (yet?) be built as modules
- bool "Qualcomm IOMMU Support"
- depends on ARCH_QCOM || COMPILE_TEST
- depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
- select QCOM_SCM
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select ARM_DMA_USE_IOMMU
- help
- Support for IOMMU on certain Qualcomm SoCs.
-
config HYPERV_IOMMU
bool "Hyper-V IRQ Handling"
depends on HYPERV && X86
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 5e5a83c6c2aa..355294fa9033 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += amd/ intel/ arm/ iommufd/ riscv/
+obj-y += arm/ iommufd/
+obj-$(CONFIG_AMD_IOMMU) += amd/
+obj-$(CONFIG_INTEL_IOMMU) += intel/
+obj-$(CONFIG_RISCV_IOMMU) += riscv/
obj-$(CONFIG_IOMMU_API) += iommu.o
+obj-$(CONFIG_IOMMU_SUPPORT) += iommu-pages.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
index 9de33b2d42f5..59c04a67f398 100644
--- a/drivers/iommu/amd/Makefile
+++ b/drivers/iommu/amd/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
+obj-y += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 220c598b7e14..29a8864381c3 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -147,6 +147,8 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev)
return PCI_SEG_DEVID_TO_SBDF(seg, devid);
}
+bool amd_iommu_ht_range_ignore(void);
+
/*
* This must be called after device probe completes. During probe
* use rlookup_amd_iommu() get the iommu.
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5089b58e528a..ccbab3a4811a 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -29,8 +29,6 @@
* some size calculation constants
*/
#define DEV_TABLE_ENTRY_SIZE 32
-#define ALIAS_TABLE_ENTRY_SIZE 2
-#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
/* Capability offsets used by the driver */
#define MMIO_CAP_HDR_OFFSET 0x00
@@ -111,6 +109,7 @@
#define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5)
#define FEATURE_SNPAVICSUP_GAM(x) \
(FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1)
+#define FEATURE_HT_RANGE_IGNORE BIT_ULL(11)
#define FEATURE_NUM_INT_REMAP_SUP GENMASK_ULL(9, 8)
#define FEATURE_NUM_INT_REMAP_SUP_2K(x) \
@@ -316,6 +315,7 @@
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
#define DTE_IRQ_REMAP_ENABLE 1ULL
+#define DTE_INTTAB_ALIGNMENT 128
#define DTE_INTTABLEN_MASK (0xfULL << 1)
#define DTE_INTTABLEN_VALUE_512 9ULL
#define DTE_INTTABLEN_512 (DTE_INTTABLEN_VALUE_512 << 1)
@@ -616,12 +616,6 @@ struct amd_iommu_pci_seg {
/* Size of the device table */
u32 dev_table_size;
- /* Size of the alias table */
- u32 alias_table_size;
-
- /* Size of the rlookup table */
- u32 rlookup_table_size;
-
/*
* device table virtual address
*
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index dd9e26b7b718..9c17dfa76703 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -243,17 +243,14 @@ static void init_translation_status(struct amd_iommu *iommu)
iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
}
-static inline unsigned long tbl_size(int entry_size, int last_bdf)
+int amd_iommu_get_num_iommus(void)
{
- unsigned shift = PAGE_SHIFT +
- get_order((last_bdf + 1) * entry_size);
-
- return 1UL << shift;
+ return amd_iommus_present;
}
-int amd_iommu_get_num_iommus(void)
+bool amd_iommu_ht_range_ignore(void)
{
- return amd_iommus_present;
+ return check_feature2(FEATURE_HT_RANGE_IGNORE);
}
/*
@@ -634,8 +631,8 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_
/* Allocate per PCI segment device table */
static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32,
+ pci_seg->dev_table_size);
if (!pci_seg->dev_table)
return -ENOMEM;
@@ -644,16 +641,16 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->dev_table,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = NULL;
}
/* Allocate per PCI segment IOMMU rlookup table. */
static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->rlookup_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->rlookup_table_size));
+ pci_seg->rlookup_table = kvcalloc(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->rlookup_table),
+ GFP_KERNEL);
if (pci_seg->rlookup_table == NULL)
return -ENOMEM;
@@ -662,17 +659,15 @@ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->rlookup_table,
- get_order(pci_seg->rlookup_table_size));
+ kvfree(pci_seg->rlookup_table);
pci_seg->rlookup_table = NULL;
}
static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->irq_lookup_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->rlookup_table_size));
- kmemleak_alloc(pci_seg->irq_lookup_table,
- pci_seg->rlookup_table_size, 1, GFP_KERNEL);
+ pci_seg->irq_lookup_table = kvcalloc(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->irq_lookup_table),
+ GFP_KERNEL);
if (pci_seg->irq_lookup_table == NULL)
return -ENOMEM;
@@ -681,9 +676,7 @@ static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_se
static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- kmemleak_free(pci_seg->irq_lookup_table);
- iommu_free_pages(pci_seg->irq_lookup_table,
- get_order(pci_seg->rlookup_table_size));
+ kvfree(pci_seg->irq_lookup_table);
pci_seg->irq_lookup_table = NULL;
}
@@ -691,8 +684,9 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
{
int i;
- pci_seg->alias_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->alias_table_size));
+ pci_seg->alias_table = kvmalloc_array(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->alias_table),
+ GFP_KERNEL);
if (!pci_seg->alias_table)
return -ENOMEM;
@@ -707,8 +701,7 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->alias_table,
- get_order(pci_seg->alias_table_size));
+ kvfree(pci_seg->alias_table);
pci_seg->alias_table = NULL;
}
@@ -719,8 +712,7 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
*/
static int __init alloc_command_buffer(struct amd_iommu *iommu)
{
- iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL,
- get_order(CMD_BUFFER_SIZE));
+ iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE);
return iommu->cmd_buf ? 0 : -ENOMEM;
}
@@ -817,20 +809,22 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu)
static void __init free_command_buffer(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
+ iommu_free_pages(iommu->cmd_buf);
}
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
size_t size)
{
- int order = get_order(size);
- void *buf = iommu_alloc_pages(gfp, order);
+ void *buf;
- if (buf &&
- check_feature(FEATURE_SNP) &&
- set_memory_4k((unsigned long)buf, (1 << order))) {
- iommu_free_pages(buf, order);
- buf = NULL;
+ size = PAGE_ALIGN(size);
+ buf = iommu_alloc_pages_sz(gfp, size);
+ if (!buf)
+ return NULL;
+ if (check_feature(FEATURE_SNP) &&
+ set_memory_4k((unsigned long)buf, size / PAGE_SIZE)) {
+ iommu_free_pages(buf);
+ return NULL;
}
return buf;
@@ -873,14 +867,14 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu)
static void __init free_event_buffer(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
+ iommu_free_pages(iommu->evt_buf);
}
static void free_ga_log(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
- iommu_free_pages(iommu->ga_log, get_order(GA_LOG_SIZE));
- iommu_free_pages(iommu->ga_log_tail, get_order(8));
+ iommu_free_pages(iommu->ga_log);
+ iommu_free_pages(iommu->ga_log_tail);
#endif
}
@@ -925,11 +919,11 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
- iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE));
+ iommu->ga_log = iommu_alloc_pages_sz(GFP_KERNEL, GA_LOG_SIZE);
if (!iommu->ga_log)
goto err_out;
- iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8));
+ iommu->ga_log_tail = iommu_alloc_pages_sz(GFP_KERNEL, 8);
if (!iommu->ga_log_tail)
goto err_out;
@@ -950,7 +944,7 @@ static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
static void __init free_cwwb_sem(struct amd_iommu *iommu)
{
if (iommu->cmd_sem)
- iommu_free_page((void *)iommu->cmd_sem);
+ iommu_free_pages((void *)iommu->cmd_sem);
}
static void iommu_enable_xt(struct amd_iommu *iommu)
@@ -1024,8 +1018,8 @@ static bool __copy_device_table(struct amd_iommu *iommu)
if (!old_devtb)
return false;
- pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(pci_seg->dev_table_size));
+ pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz(
+ GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size);
if (pci_seg->old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
memunmap(old_devtb);
@@ -1599,9 +1593,9 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
pci_seg->last_bdf = last_bdf;
DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
- pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
- pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
- pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->dev_table_size =
+ max(roundup_pow_of_two((last_bdf + 1) * DEV_TABLE_ENTRY_SIZE),
+ SZ_4K);
pci_seg->id = id;
init_llist_head(&pci_seg->dev_data_list);
@@ -2030,9 +2024,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (!iommu->dev)
return -ENODEV;
- /* Prevent binding other PCI device drivers to IOMMU devices */
- iommu->dev->match_driver = false;
-
/* ACPI _PRT won't have an IRQ for IOMMU */
iommu->dev->irq_managed = 1;
@@ -2789,8 +2780,7 @@ static void early_enable_iommus(void)
for_each_pci_segment(pci_seg) {
if (pci_seg->old_dev_tbl_cpy != NULL) {
- iommu_free_pages(pci_seg->old_dev_tbl_cpy,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->old_dev_tbl_cpy);
pci_seg->old_dev_tbl_cpy = NULL;
}
}
@@ -2803,8 +2793,7 @@ static void early_enable_iommus(void)
pr_info("Copied DEV table from previous kernel.\n");
for_each_pci_segment(pci_seg) {
- iommu_free_pages(pci_seg->dev_table,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
}
@@ -3664,6 +3653,14 @@ found:
while (*uid == '0' && *(uid + 1))
uid++;
+ if (strlen(hid) >= ACPIHID_HID_LEN) {
+ pr_err("Invalid command line: hid is too long\n");
+ return 1;
+ } else if (strlen(uid) >= ACPIHID_UID_LEN) {
+ pr_err("Invalid command line: uid is too long\n");
+ return 1;
+ }
+
i = early_acpihid_map_size++;
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 26cf562dde11..4d308c071134 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -47,14 +47,7 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
return fpte;
}
-static void free_pt_page(u64 *pt, struct list_head *freelist)
-{
- struct page *p = virt_to_page(pt);
-
- list_add_tail(&p->lru, freelist);
-}
-
-static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
+static void free_pt_lvl(u64 *pt, struct iommu_pages_list *freelist, int lvl)
{
u64 *p;
int i;
@@ -77,20 +70,20 @@ static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
if (lvl > 2)
free_pt_lvl(p, freelist, lvl - 1);
else
- free_pt_page(p, freelist);
+ iommu_pages_list_add(freelist, p);
}
- free_pt_page(pt, freelist);
+ iommu_pages_list_add(freelist, pt);
}
-static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
+static void free_sub_pt(u64 *root, int mode, struct iommu_pages_list *freelist)
{
switch (mode) {
case PAGE_MODE_NONE:
case PAGE_MODE_7_LEVEL:
break;
case PAGE_MODE_1_LEVEL:
- free_pt_page(root, freelist);
+ iommu_pages_list_add(freelist, root);
break;
case PAGE_MODE_2_LEVEL:
case PAGE_MODE_3_LEVEL:
@@ -121,7 +114,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
bool ret = true;
u64 *pte;
- pte = iommu_alloc_page_node(cfg->amd.nid, gfp);
+ pte = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp, SZ_4K);
if (!pte)
return false;
@@ -146,7 +139,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
out:
spin_unlock_irqrestore(&domain->lock, flags);
- iommu_free_page(pte);
+ iommu_free_pages(pte);
return ret;
}
@@ -213,7 +206,8 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
if (!IOMMU_PTE_PRESENT(__pte) ||
pte_level == PAGE_MODE_NONE) {
- page = iommu_alloc_page_node(cfg->amd.nid, gfp);
+ page = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp,
+ SZ_4K);
if (!page)
return NULL;
@@ -222,7 +216,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
/* pte could have been changed somewhere. */
if (!try_cmpxchg64(pte, &__pte, __npte))
- iommu_free_page(page);
+ iommu_free_pages(page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -299,7 +293,8 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
return pte;
}
-static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
+static void free_clear_pte(u64 *pte, u64 pteval,
+ struct iommu_pages_list *freelist)
{
u64 *pt;
int mode;
@@ -328,7 +323,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
int prot, gfp_t gfp, size_t *mapped)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
bool updated = false;
u64 __pte, *pte;
int ret, i, count;
@@ -353,7 +348,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
for (i = 0; i < count; ++i)
free_clear_pte(&pte[i], pte[i], &freelist);
- if (!list_empty(&freelist))
+ if (!iommu_pages_list_empty(&freelist))
updated = true;
if (count > 1) {
@@ -524,7 +519,7 @@ static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops,
static void v1_free_pgtable(struct io_pgtable *iop)
{
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
if (pgtable->mode == PAGE_MODE_NONE)
return;
@@ -541,7 +536,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
- pgtable->root = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
+ pgtable->root =
+ iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
if (!pgtable->root)
return NULL;
pgtable->mode = PAGE_MODE_3_LEVEL;
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index a56a27396305..b47941353ccb 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -121,10 +121,10 @@ static void free_pgtable(u64 *pt, int level)
if (level > 2)
free_pgtable(p, level - 1);
else
- iommu_free_page(p);
+ iommu_free_pages(p);
}
- iommu_free_page(pt);
+ iommu_free_pages(pt);
}
/* Allocate page table */
@@ -152,14 +152,14 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
}
if (!IOMMU_PTE_PRESENT(__pte)) {
- page = iommu_alloc_page_node(nid, gfp);
+ page = iommu_alloc_pages_node_sz(nid, gfp, SZ_4K);
if (!page)
return NULL;
__npte = set_pgtable_attr(page);
/* pte could have been changed somewhere. */
if (!try_cmpxchg64(pte, &__pte, __npte))
- iommu_free_page(page);
+ iommu_free_pages(page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -181,7 +181,7 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
if (pg_size == IOMMU_PAGE_SIZE_1G)
free_pgtable(__pte, end_level - 1);
else if (pg_size == IOMMU_PAGE_SIZE_2M)
- iommu_free_page(__pte);
+ iommu_free_pages(__pte);
}
return pte;
@@ -346,7 +346,7 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
int ias = IOMMU_IN_ADDR_BIT_SIZE;
- pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
+ pgtable->pgd = iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
if (!pgtable->pgd)
return NULL;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index be8761bbef0f..3117d99cf83d 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -241,7 +241,9 @@ static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- struct acpihid_map_entry *p;
+ struct acpihid_map_entry *p, *p1 = NULL;
+ int hid_count = 0;
+ bool fw_bug;
if (!adev)
return -ENODEV;
@@ -249,12 +251,33 @@ static inline int get_acpihid_device_id(struct device *dev,
list_for_each_entry(p, &acpihid_map, list) {
if (acpi_dev_hid_uid_match(adev, p->hid,
p->uid[0] ? p->uid : NULL)) {
- if (entry)
- *entry = p;
- return p->devid;
+ p1 = p;
+ fw_bug = false;
+ hid_count = 1;
+ break;
+ }
+
+ /*
+ * Count HID matches w/o UID, raise FW_BUG but allow exactly one match
+ */
+ if (acpi_dev_hid_match(adev, p->hid)) {
+ p1 = p;
+ hid_count++;
+ fw_bug = true;
}
}
- return -EINVAL;
+
+ if (!p1)
+ return -EINVAL;
+ if (fw_bug)
+ dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n",
+ hid_count, hid_count > 1 ? "s" : "");
+ if (hid_count > 1)
+ return -EINVAL;
+ if (entry)
+ *entry = p1;
+
+ return p1->devid;
}
static inline int get_device_sbdf_id(struct device *dev)
@@ -982,6 +1005,14 @@ int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
{
iommu_ga_log_notifier = notifier;
+ /*
+ * Ensure all in-flight IRQ handlers run to completion before returning
+ * to the caller, e.g. to ensure module code isn't unloaded while it's
+ * being executed in the IRQ handler.
+ */
+ if (!notifier)
+ synchronize_rcu();
+
return 0;
}
EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
@@ -1812,7 +1843,7 @@ static void free_gcr3_tbl_level1(u64 *tbl)
ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
- iommu_free_page(ptr);
+ iommu_free_pages(ptr);
}
}
@@ -1845,7 +1876,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
/* Free per device domain ID */
pdom_id_free(gcr3_info->domid);
- iommu_free_page(gcr3_info->gcr3_tbl);
+ iommu_free_pages(gcr3_info->gcr3_tbl);
gcr3_info->gcr3_tbl = NULL;
}
@@ -1884,7 +1915,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
return -ENOSPC;
gcr3_info->domid = domid;
- gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC);
+ gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K);
if (gcr3_info->gcr3_tbl == NULL) {
pdom_id_free(domid);
return -ENOMEM;
@@ -2908,6 +2939,9 @@ static void amd_iommu_get_resv_regions(struct device *dev,
return;
list_add_tail(&region->list, head);
+ if (amd_iommu_ht_range_ignore())
+ return;
+
region = iommu_alloc_resv_region(HT_RANGE_START,
HT_RANGE_END - HT_RANGE_START + 1,
0, IOMMU_RESV_RESERVED, GFP_KERNEL);
@@ -2984,38 +3018,6 @@ static const struct iommu_dirty_ops amd_dirty_ops = {
.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
};
-static int amd_iommu_dev_enable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- int ret = 0;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- case IOMMU_DEV_FEAT_SVA:
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int amd_iommu_dev_disable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- int ret = 0;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- case IOMMU_DEV_FEAT_SVA:
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.blocked_domain = &blocked_domain,
@@ -3029,8 +3031,6 @@ const struct iommu_ops amd_iommu_ops = {
.get_resv_regions = amd_iommu_get_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.def_domain_type = amd_iommu_def_domain_type,
- .dev_enable_feat = amd_iommu_dev_enable_feature,
- .dev_disable_feat = amd_iommu_dev_disable_feature,
.page_response = amd_iommu_page_response,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
@@ -3129,7 +3129,7 @@ static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
return table;
}
-static struct irq_remap_table *__alloc_irq_table(int nid, int order)
+static struct irq_remap_table *__alloc_irq_table(int nid, size_t size)
{
struct irq_remap_table *table;
@@ -3137,7 +3137,8 @@ static struct irq_remap_table *__alloc_irq_table(int nid, int order)
if (!table)
return NULL;
- table->table = iommu_alloc_pages_node(nid, GFP_KERNEL, order);
+ table->table = iommu_alloc_pages_node_sz(
+ nid, GFP_KERNEL, max(DTE_INTTAB_ALIGNMENT, size));
if (!table->table) {
kfree(table);
return NULL;
@@ -3191,7 +3192,6 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
struct irq_remap_table *new_table = NULL;
struct amd_iommu_pci_seg *pci_seg;
unsigned long flags;
- int order = get_order(get_irq_table_size(max_irqs));
int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
u16 alias;
@@ -3211,7 +3211,7 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
spin_unlock_irqrestore(&iommu_table_lock, flags);
/* Nothing there yet, allocate new irq remapping table */
- new_table = __alloc_irq_table(nid, order);
+ new_table = __alloc_irq_table(nid, get_irq_table_size(max_irqs));
if (!new_table)
return NULL;
@@ -3246,7 +3246,7 @@ out_unlock:
spin_unlock_irqrestore(&iommu_table_lock, flags);
if (new_table) {
- iommu_free_pages(new_table->table, order);
+ iommu_free_pages(new_table->table);
kfree(new_table);
}
return table;
@@ -3869,6 +3869,9 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
struct iommu_dev_data *dev_data;
+ if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
+ return -EINVAL;
+
if (ir_data->iommu == NULL)
return -EINVAL;
@@ -3879,21 +3882,11 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
* we should not modify the IRTE
*/
if (!dev_data || !dev_data->use_vapic)
- return 0;
+ return -EINVAL;
ir_data->cfg = irqd_cfg(data);
pi_data->ir_data = ir_data;
- /* Note:
- * SVM tries to set up for VAPIC mode, but we are in
- * legacy mode. So, we force legacy mode instead.
- */
- if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
- pr_debug("%s: Fall back to using intr legacy remap\n",
- __func__);
- pi_data->is_guest_mode = false;
- }
-
pi_data->prev_ga_tag = ir_data->cached_ga_tag;
if (pi_data->is_guest_mode) {
ir_data->ga_root_ptr = (pi_data->base >> 12);
diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c
index 7c67d69f0b8c..e6767c057d01 100644
--- a/drivers/iommu/amd/ppr.c
+++ b/drivers/iommu/amd/ppr.c
@@ -48,7 +48,7 @@ void amd_iommu_enable_ppr_log(struct amd_iommu *iommu)
void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE));
+ iommu_free_pages(iommu->ppr_log);
}
/*
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index e13501541fdd..757d24f67ad4 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -776,8 +776,7 @@ static void apple_dart_domain_free(struct iommu_domain *domain)
{
struct apple_dart_domain *dart_domain = to_dart_domain(domain);
- if (dart_domain->pgtbl_ops)
- free_io_pgtable_ops(dart_domain->pgtbl_ops);
+ free_io_pgtable_ops(dart_domain->pgtbl_ops);
kfree(dart_domain);
}
diff --git a/drivers/iommu/arm/Kconfig b/drivers/iommu/arm/Kconfig
new file mode 100644
index 000000000000..ef42bbe07dbe
--- /dev/null
+++ b/drivers/iommu/arm/Kconfig
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# ARM IOMMU support
+config ARM_SMMU
+ tristate "ARM Ltd. System MMU (SMMU) Support"
+ depends on ARM64 || ARM || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select ARM_DMA_USE_IOMMU if ARM
+ help
+ Support for implementations of the ARM System MMU architecture
+ versions 1 and 2.
+
+ Say Y here if your SoC includes an IOMMU device implementing
+ the ARM SMMU architecture.
+
+if ARM_SMMU
+config ARM_SMMU_LEGACY_DT_BINDINGS
+ bool "Support the legacy \"mmu-masters\" devicetree bindings"
+ depends on ARM_SMMU=y && OF
+ help
+ Support for the badly designed and deprecated "mmu-masters"
+ devicetree bindings. This allows some DMA masters to attach
+ to the SMMU but does not provide any support via the DMA API.
+ If you're lucky, you might be able to get VFIO up and running.
+
+ If you say Y here then you'll make me very sad. Instead, say N
+ and move your firmware to the utopian future that was 2016.
+
+config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
+ bool "Disable unmatched stream bypass by default" if EXPERT
+ default y
+ help
+ If your firmware is broken and fails to describe StreamIDs which
+ Linux should know about in order to manage the SMMU correctly and
+ securely, and you don't want to boot with the 'arm-smmu.disable_bypass=0'
+ command line parameter, then as a last resort you can turn it off
+ by default here. But don't. This option may be removed at any time.
+
+ Note that 'arm-smmu.disable_bypass=1' will still take precedence.
+
+config ARM_SMMU_MMU_500_CPRE_ERRATA
+ bool "Enable errata workaround for CPRE in SMMU reset path"
+ default y
+ help
+ Say Y here (by default) to apply workaround to disable
+ MMU-500's next-page prefetcher for sake of 4 known errata.
+
+ Say N here only when it is sure that any errata related to
+ prefetch enablement are not applicable on the platform.
+ Refer silicon-errata.rst for info on errata IDs.
+
+config ARM_SMMU_QCOM
+ def_tristate y
+ depends on ARCH_QCOM
+ select QCOM_SCM
+ help
+ When running on a Qualcomm platform that has the custom variant
+ of the ARM SMMU, this needs to be built into the SMMU driver.
+
+config ARM_SMMU_QCOM_DEBUG
+ bool "ARM SMMU QCOM implementation defined debug support"
+ depends on ARM_SMMU_QCOM=y
+ help
+ Support for implementation specific debug features in ARM SMMU
+ hardware found in QTI platforms. This include support for
+ the Translation Buffer Units (TBU) that can be used to obtain
+ additional information when debugging memory management issues
+ like context faults.
+
+ Say Y here to enable debug for issues such as context faults
+ or TLB sync timeouts which requires implementation defined
+ register dumps.
+endif
+
+config ARM_SMMU_V3
+ tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
+ depends on ARM64
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select GENERIC_MSI_IRQ
+ select IOMMUFD_DRIVER if IOMMUFD
+ help
+ Support for implementations of the ARM System MMU architecture
+ version 3 providing translation support to a PCIe root complex.
+
+ Say Y here if your system includes an IOMMU device implementing
+ the ARM SMMUv3 architecture.
+
+if ARM_SMMU_V3
+config ARM_SMMU_V3_SVA
+ bool "Shared Virtual Addressing support for the ARM SMMUv3"
+ select IOMMU_SVA
+ select IOMMU_IOPF
+ select MMU_NOTIFIER
+ help
+ Support for sharing process address spaces with devices using the
+ SMMUv3.
+
+ Say Y here if your system supports SVA extensions such as PCIe PASID
+ and PRI.
+
+config ARM_SMMU_V3_IOMMUFD
+ bool "Enable IOMMUFD features for ARM SMMUv3 (EXPERIMENTAL)"
+ depends on IOMMUFD
+ help
+ Support for IOMMUFD features intended to support virtual machines
+ with accelerated virtual IOMMUs.
+
+ Say Y here if you are doing development and testing on this feature.
+
+config ARM_SMMU_V3_KUNIT_TEST
+ tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on ARM_SMMU_V3_SVA
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to unit-test arm-smmu-v3 driver functions.
+
+ If unsure, say N.
+
+config TEGRA241_CMDQV
+ bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3"
+ depends on ACPI
+ help
+ Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The
+ CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues
+ support, except with virtualization capabilities.
+
+ Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same
+ CMDQ-V extension.
+endif
+
+config QCOM_IOMMU
+ # Note: iommu drivers cannot (yet?) be built as modules
+ bool "Qualcomm IOMMU Support"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ select QCOM_SCM
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select ARM_DMA_USE_IOMMU
+ help
+ Support for IOMMU on certain Qualcomm SoCs.
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 9ba596430e7c..0601dece0a0d 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -13,8 +13,6 @@
#include "arm-smmu-v3.h"
#include "../../io-pgtable-arm.h"
-static DEFINE_MUTEX(sva_lock);
-
static void __maybe_unused
arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
{
@@ -257,84 +255,6 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
return true;
}
-bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
-{
- /* We're not keeping track of SIDs in fault events */
- if (master->num_streams != 1)
- return false;
-
- return master->stall_enabled;
-}
-
-bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
-{
- if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
- return false;
-
- /* SSID support is mandatory for the moment */
- return master->ssid_bits;
-}
-
-bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
-{
- bool enabled;
-
- mutex_lock(&sva_lock);
- enabled = master->sva_enabled;
- mutex_unlock(&sva_lock);
- return enabled;
-}
-
-static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
-{
- struct device *dev = master->dev;
-
- /*
- * Drivers for devices supporting PRI or stall should enable IOPF first.
- * Others have device-specific fault handlers and don't need IOPF.
- */
- if (!arm_smmu_master_iopf_supported(master))
- return 0;
-
- if (!master->iopf_enabled)
- return -EINVAL;
-
- return iopf_queue_add_device(master->smmu->evtq.iopf, dev);
-}
-
-static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
-{
- struct device *dev = master->dev;
-
- if (!master->iopf_enabled)
- return;
-
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
-}
-
-int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
-{
- int ret;
-
- mutex_lock(&sva_lock);
- ret = arm_smmu_master_sva_enable_iopf(master);
- if (!ret)
- master->sva_enabled = true;
- mutex_unlock(&sva_lock);
-
- return ret;
-}
-
-int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
-{
- mutex_lock(&sva_lock);
- arm_smmu_master_sva_disable_iopf(master);
- master->sva_enabled = false;
- mutex_unlock(&sva_lock);
-
- return 0;
-}
-
void arm_smmu_sva_notifier_synchronize(void)
{
/*
@@ -353,6 +273,9 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_cd target;
int ret;
+ if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
+ return -EOPNOTSUPP;
+
/* Prevent arm_smmu_mm_release from being called while we are attaching */
if (!mmget_not_zero(domain->mm))
return -EINVAL;
@@ -406,11 +329,20 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
u32 asid;
int ret;
+ if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
+ return ERR_PTR(-EOPNOTSUPP);
+
smmu_domain = arm_smmu_domain_alloc();
if (IS_ERR(smmu_domain))
return ERR_CAST(smmu_domain);
smmu_domain->domain.type = IOMMU_DOMAIN_SVA;
smmu_domain->domain.ops = &arm_smmu_sva_domain_ops;
+
+ /*
+ * Choose page_size as the leaf page size for invalidation when
+ * ARM_SMMU_FEAT_RANGE_INV is present
+ */
+ smmu_domain->domain.pgsize_bitmap = PAGE_SIZE;
smmu_domain->smmu = smmu;
ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index b4c21aaed126..10cc6dc26b7b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2720,6 +2720,7 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
static struct arm_smmu_master_domain *
arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
+ struct iommu_domain *domain,
struct arm_smmu_master *master,
ioasid_t ssid, bool nested_ats_flush)
{
@@ -2730,6 +2731,7 @@ arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
list_for_each_entry(master_domain, &smmu_domain->devices,
devices_elm) {
if (master_domain->master == master &&
+ master_domain->domain == domain &&
master_domain->ssid == ssid &&
master_domain->nested_ats_flush == nested_ats_flush)
return master_domain;
@@ -2756,6 +2758,58 @@ to_smmu_domain_devices(struct iommu_domain *domain)
return NULL;
}
+static int arm_smmu_enable_iopf(struct arm_smmu_master *master,
+ struct arm_smmu_master_domain *master_domain)
+{
+ int ret;
+
+ iommu_group_mutex_assert(master->dev);
+
+ if (!IS_ENABLED(CONFIG_ARM_SMMU_V3_SVA))
+ return -EOPNOTSUPP;
+
+ /*
+ * Drivers for devices supporting PRI or stall require iopf others have
+ * device-specific fault handlers and don't need IOPF, so this is not a
+ * failure.
+ */
+ if (!master->stall_enabled)
+ return 0;
+
+ /* We're not keeping track of SIDs in fault events */
+ if (master->num_streams != 1)
+ return -EOPNOTSUPP;
+
+ if (master->iopf_refcount) {
+ master->iopf_refcount++;
+ master_domain->using_iopf = true;
+ return 0;
+ }
+
+ ret = iopf_queue_add_device(master->smmu->evtq.iopf, master->dev);
+ if (ret)
+ return ret;
+ master->iopf_refcount = 1;
+ master_domain->using_iopf = true;
+ return 0;
+}
+
+static void arm_smmu_disable_iopf(struct arm_smmu_master *master,
+ struct arm_smmu_master_domain *master_domain)
+{
+ iommu_group_mutex_assert(master->dev);
+
+ if (!IS_ENABLED(CONFIG_ARM_SMMU_V3_SVA))
+ return;
+
+ if (!master_domain || !master_domain->using_iopf)
+ return;
+
+ master->iopf_refcount--;
+ if (master->iopf_refcount == 0)
+ iopf_queue_remove_device(master->smmu->evtq.iopf, master->dev);
+}
+
static void arm_smmu_remove_master_domain(struct arm_smmu_master *master,
struct iommu_domain *domain,
ioasid_t ssid)
@@ -2772,15 +2826,17 @@ static void arm_smmu_remove_master_domain(struct arm_smmu_master *master,
nested_ats_flush = to_smmu_nested_domain(domain)->enable_ats;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- master_domain = arm_smmu_find_master_domain(smmu_domain, master, ssid,
- nested_ats_flush);
+ master_domain = arm_smmu_find_master_domain(smmu_domain, domain, master,
+ ssid, nested_ats_flush);
if (master_domain) {
list_del(&master_domain->devices_elm);
- kfree(master_domain);
if (master->ats_enabled)
atomic_dec(&smmu_domain->nr_ats_masters);
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ arm_smmu_disable_iopf(master, master_domain);
+ kfree(master_domain);
}
/*
@@ -2853,12 +2909,19 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
kfree(state->vmaster);
return -ENOMEM;
}
+ master_domain->domain = new_domain;
master_domain->master = master;
master_domain->ssid = state->ssid;
if (new_domain->type == IOMMU_DOMAIN_NESTED)
master_domain->nested_ats_flush =
to_smmu_nested_domain(new_domain)->enable_ats;
+ if (new_domain->iopf_handler) {
+ ret = arm_smmu_enable_iopf(master, master_domain);
+ if (ret)
+ goto err_free_master_domain;
+ }
+
/*
* During prepare we want the current smmu_domain and new
* smmu_domain to be in the devices list before we change any
@@ -2878,9 +2941,9 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
!arm_smmu_master_canwbs(master)) {
spin_unlock_irqrestore(&smmu_domain->devices_lock,
flags);
- kfree(master_domain);
kfree(state->vmaster);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iopf;
}
if (state->ats_enabled)
@@ -2899,6 +2962,12 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
wmb();
}
return 0;
+
+err_iopf:
+ arm_smmu_disable_iopf(master, master_domain);
+err_free_master_domain:
+ kfree(master_domain);
+ return ret;
}
/*
@@ -2953,7 +3022,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
smmu = master->smmu;
if (smmu_domain->smmu != smmu)
- return ret;
+ return -EINVAL;
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID);
@@ -3388,6 +3457,7 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
mutex_lock(&smmu->streams_mutex);
for (i = 0; i < fwspec->num_ids; i++) {
struct arm_smmu_stream *new_stream = &master->streams[i];
+ struct rb_node *existing;
u32 sid = fwspec->ids[i];
new_stream->id = sid;
@@ -3398,11 +3468,21 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
break;
/* Insert into SID tree */
- if (rb_find_add(&new_stream->node, &smmu->streams,
- arm_smmu_streams_cmp_node)) {
- dev_warn(master->dev, "stream %u already in tree\n",
- sid);
- ret = -EINVAL;
+ existing = rb_find_add(&new_stream->node, &smmu->streams,
+ arm_smmu_streams_cmp_node);
+ if (existing) {
+ struct arm_smmu_master *existing_master =
+ rb_entry(existing, struct arm_smmu_stream, node)
+ ->master;
+
+ /* Bridged PCI devices may end up with duplicated IDs */
+ if (existing_master == master)
+ continue;
+
+ dev_warn(master->dev,
+ "Aliasing StreamID 0x%x (from %s) unsupported, expect DMA to be broken\n",
+ sid, dev_name(existing_master->dev));
+ ret = -ENODEV;
break;
}
}
@@ -3499,8 +3579,7 @@ static void arm_smmu_release_device(struct device *dev)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- if (WARN_ON(arm_smmu_master_sva_enabled(master)))
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
+ WARN_ON(master->iopf_refcount);
/* Put the STE back to what arm_smmu_init_strtab() sets */
if (dev->iommu->require_direct)
@@ -3575,58 +3654,6 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
-static int arm_smmu_dev_enable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return -ENODEV;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- if (!arm_smmu_master_iopf_supported(master))
- return -EINVAL;
- if (master->iopf_enabled)
- return -EBUSY;
- master->iopf_enabled = true;
- return 0;
- case IOMMU_DEV_FEAT_SVA:
- if (!arm_smmu_master_sva_supported(master))
- return -EINVAL;
- if (arm_smmu_master_sva_enabled(master))
- return -EBUSY;
- return arm_smmu_master_enable_sva(master);
- default:
- return -EINVAL;
- }
-}
-
-static int arm_smmu_dev_disable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return -EINVAL;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- if (!master->iopf_enabled)
- return -EINVAL;
- if (master->sva_enabled)
- return -EBUSY;
- master->iopf_enabled = false;
- return 0;
- case IOMMU_DEV_FEAT_SVA:
- if (!arm_smmu_master_sva_enabled(master))
- return -EINVAL;
- return arm_smmu_master_disable_sva(master);
- default:
- return -EINVAL;
- }
-}
-
/*
* HiSilicon PCIe tune and trace device can be used to trace TLP headers on the
* PCIe link and save the data to memory by DMA. The hardware is restricted to
@@ -3659,8 +3686,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .dev_enable_feat = arm_smmu_dev_enable_feature,
- .dev_disable_feat = arm_smmu_dev_disable_feature,
.page_response = arm_smmu_page_response,
.def_domain_type = arm_smmu_def_domain_type,
.viommu_alloc = arm_vsmmu_alloc,
@@ -4429,6 +4454,8 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3);
if (FIELD_GET(IDR3_RIL, reg))
smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
+ if (FIELD_GET(IDR3_FWB, reg))
+ smmu->features |= ARM_SMMU_FEAT_S2FWB;
/* IDR5 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index dd1ad56ce863..ea41d790463e 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -837,9 +837,8 @@ struct arm_smmu_master {
bool ats_enabled : 1;
bool ste_ats_enabled : 1;
bool stall_enabled;
- bool sva_enabled;
- bool iopf_enabled;
unsigned int ssid_bits;
+ unsigned int iopf_refcount;
};
/* SMMU private data for an IOMMU domain */
@@ -915,8 +914,14 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
struct arm_smmu_master_domain {
struct list_head devices_elm;
struct arm_smmu_master *master;
+ /*
+ * For nested domains the master_domain is threaded onto the S2 parent,
+ * this points to the IOMMU_DOMAIN_NESTED to disambiguate the masters.
+ */
+ struct iommu_domain *domain;
ioasid_t ssid;
bool nested_ats_flush : 1;
+ bool using_iopf : 1;
};
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
@@ -995,11 +1000,6 @@ int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
#ifdef CONFIG_ARM_SMMU_V3_SVA
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
-bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
-bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
-int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
-int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
-bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
void arm_smmu_sva_notifier_synchronize(void);
struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
struct mm_struct *mm);
@@ -1009,31 +1009,6 @@ static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
return false;
}
-static inline bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
-{
- return false;
-}
-
-static inline bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
-{
- return false;
-}
-
-static inline int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
-{
- return -ENODEV;
-}
-
-static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
-{
- return -ENODEV;
-}
-
-static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
-{
- return false;
-}
-
static inline void arm_smmu_sva_notifier_synchronize(void) {}
#define arm_smmu_sva_domain_alloc NULL
diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
index d525ab43a4ae..dd7d030d2e89 100644
--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
@@ -487,17 +487,6 @@ static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu)
/* VCMDQ Resource Helpers */
-static void tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
-{
- struct arm_smmu_queue *q = &vcmdq->cmdq.q;
- size_t nents = 1 << q->llq.max_n_shift;
- size_t qsz = nents << CMDQ_ENT_SZ_SHIFT;
-
- if (!q->base)
- return;
- dmam_free_coherent(vcmdq->cmdqv->smmu.dev, qsz, q->base, q->base_dma);
-}
-
static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
{
struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
@@ -560,7 +549,8 @@ static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
char header[64];
- tegra241_vcmdq_free_smmu_cmdq(vcmdq);
+ /* Note that the lvcmdq queue memory space is managed by devres */
+
tegra241_vintf_deinit_lvcmdq(vintf, lidx);
dev_dbg(vintf->cmdqv->dev,
@@ -768,13 +758,13 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
vintf = kzalloc(sizeof(*vintf), GFP_KERNEL);
if (!vintf)
- goto out_fallback;
+ return -ENOMEM;
/* Init VINTF0 for in-kernel use */
ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf);
if (ret) {
dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret);
- goto free_vintf;
+ return ret;
}
/* Preallocate logical VCMDQs to VINTF0 */
@@ -783,24 +773,12 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx);
if (IS_ERR(vcmdq))
- goto free_lvcmdq;
+ return PTR_ERR(vcmdq);
}
/* Now, we are ready to run all the impl ops */
smmu->impl_ops = &tegra241_cmdqv_impl_ops;
return 0;
-
-free_lvcmdq:
- for (lidx--; lidx >= 0; lidx--)
- tegra241_vintf_free_lvcmdq(vintf, lidx);
- tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx);
-free_vintf:
- kfree(vintf);
-out_fallback:
- dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
- smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
- tegra241_cmdqv_remove(smmu);
- return 0;
}
#ifdef CONFIG_IOMMU_DEBUGFS
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index d03b2239baad..65e0ef6539fe 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -406,6 +406,12 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
arm_smmu_print_context_fault_info(smmu, idx, &cfi);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
+
+ if (cfi.fsr & ARM_SMMU_CB_FSR_SS) {
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE);
+ }
+
return IRQ_HANDLED;
}
@@ -416,6 +422,9 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
if (!tmp || tmp == -EBUSY) {
ret = IRQ_HANDLED;
resume = ARM_SMMU_RESUME_TERMINATE;
+ } else if (tmp == -EAGAIN) {
+ ret = IRQ_HANDLED;
+ resume = 0;
} else {
phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, cfi.iova, cfi.fsr);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 59d02687280e..62874b18f645 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -112,25 +112,39 @@ static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled)
{
struct arm_smmu_domain *smmu_domain = (void *)cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct qcom_smmu *qsmmu = to_qcom_smmu(smmu_domain->smmu);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ u32 mask = BIT(cfg->cbndx);
+ bool stall_changed = !!(qsmmu->stall_enabled & mask) != enabled;
+ unsigned long flags;
if (enabled)
- qsmmu->stall_enabled |= BIT(cfg->cbndx);
+ qsmmu->stall_enabled |= mask;
else
- qsmmu->stall_enabled &= ~BIT(cfg->cbndx);
-}
+ qsmmu->stall_enabled &= ~mask;
-static void qcom_adreno_smmu_resume_translation(const void *cookie, bool terminate)
-{
- struct arm_smmu_domain *smmu_domain = (void *)cookie;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- u32 reg = 0;
+ /*
+ * If the device is on and we changed the setting, update the register.
+ * The spec pseudocode says that CFCFG is resampled after a fault, and
+ * we believe that no implementations cache it in the TLB, so it should
+ * be safe to change it without a TLB invalidation.
+ */
+ if (stall_changed && pm_runtime_get_if_active(smmu->dev) > 0) {
+ u32 reg;
+
+ spin_lock_irqsave(&smmu_domain->cb_lock, flags);
+ reg = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR);
+
+ if (enabled)
+ reg |= ARM_SMMU_SCTLR_CFCFG;
+ else
+ reg &= ~ARM_SMMU_SCTLR_CFCFG;
- if (terminate)
- reg |= ARM_SMMU_RESUME_TERMINATE;
+ arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR, reg);
+ spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
- arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
+ pm_runtime_put_autosuspend(smmu->dev);
+ }
}
static void qcom_adreno_smmu_set_prr_bit(const void *cookie, bool set)
@@ -337,7 +351,6 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
priv->set_stall = qcom_adreno_smmu_set_stall;
- priv->resume_translation = qcom_adreno_smmu_resume_translation;
priv->set_prr_bit = NULL;
priv->set_prr_addr = NULL;
@@ -356,6 +369,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,mdp4" },
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,qcm2290-mdss" },
+ { .compatible = "qcom,sar2130p-mdss" },
{ .compatible = "qcom,sc7180-mdss" },
{ .compatible = "qcom,sc7180-mss-pil" },
{ .compatible = "qcom,sc7280-mdss" },
@@ -585,6 +599,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
.tlb_sync = qcom_smmu_tlb_sync,
+ .context_fault_needs_threaded_irq = true,
};
static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
@@ -594,6 +609,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
.tlb_sync = qcom_smmu_tlb_sync,
+ .context_fault_needs_threaded_irq = true,
};
static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 8f439c265a23..8d95b14c7d5a 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -474,6 +474,12 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
arm_smmu_print_context_fault_info(smmu, idx, &cfi);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
+
+ if (cfi.fsr & ARM_SMMU_CB_FSR_SS) {
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index cb7e29dcac15..6c708fec48d1 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -27,6 +27,7 @@
#include <linux/msi.h>
#include <linux/of_iommu.h>
#include <linux/pci.h>
+#include <linux/pci-p2pdma.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/swiotlb.h>
@@ -105,7 +106,7 @@ early_param("iommu.forcedac", iommu_dma_forcedac_setup);
struct iova_fq_entry {
unsigned long iova_pfn;
unsigned long pages;
- struct list_head freelist;
+ struct iommu_pages_list freelist;
u64 counter; /* Flush counter when this entry was added */
};
@@ -154,6 +155,8 @@ static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
+ fq->entries[idx].freelist =
+ IOMMU_PAGES_LIST_INIT(fq->entries[idx].freelist);
fq->head = (fq->head + 1) & fq->mod_mask;
}
}
@@ -192,7 +195,7 @@ static void fq_flush_timeout(struct timer_list *t)
static void queue_iova(struct iommu_dma_cookie *cookie,
unsigned long pfn, unsigned long pages,
- struct list_head *freelist)
+ struct iommu_pages_list *freelist)
{
struct iova_fq *fq;
unsigned long flags;
@@ -231,7 +234,7 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
fq->entries[idx].iova_pfn = pfn;
fq->entries[idx].pages = pages;
fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
- list_splice(freelist, &fq->entries[idx].freelist);
+ iommu_pages_list_splice(freelist, &fq->entries[idx].freelist);
spin_unlock_irqrestore(&fq->lock, flags);
@@ -289,7 +292,8 @@ static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
spin_lock_init(&fq->lock);
for (i = 0; i < fq_size; i++)
- INIT_LIST_HEAD(&fq->entries[i].freelist);
+ fq->entries[i].freelist =
+ IOMMU_PAGES_LIST_INIT(fq->entries[i].freelist);
}
static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
@@ -1137,6 +1141,54 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
+static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iova_domain *iovad = &domain->iova_cookie->iovad;
+
+ if (!is_swiotlb_active(dev)) {
+ dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
+ return (phys_addr_t)DMA_MAPPING_ERROR;
+ }
+
+ trace_swiotlb_bounced(dev, phys, size);
+
+ phys = swiotlb_tbl_map_single(dev, phys, size, iova_mask(iovad), dir,
+ attrs);
+
+ /*
+ * Untrusted devices should not see padding areas with random leftover
+ * kernel data, so zero the pre- and post-padding.
+ * swiotlb_tbl_map_single() has initialized the bounce buffer proper to
+ * the contents of the original memory buffer.
+ */
+ if (phys != (phys_addr_t)DMA_MAPPING_ERROR && dev_is_untrusted(dev)) {
+ size_t start, virt = (size_t)phys_to_virt(phys);
+
+ /* Pre-padding */
+ start = iova_align_down(iovad, virt);
+ memset((void *)start, 0, virt - start);
+
+ /* Post-padding */
+ start = virt + size;
+ memset((void *)start, 0, iova_align(iovad, start) - start);
+ }
+
+ return phys;
+}
+
+/*
+ * Checks if a physical buffer has unaligned boundaries with respect to
+ * the IOMMU granule. Returns non-zero if either the start or end
+ * address is not aligned to the granule boundary.
+ */
+static inline size_t iova_unaligned(struct iova_domain *iovad, phys_addr_t phys,
+ size_t size)
+{
+ return iova_offset(iovad, phys | size);
+}
+
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
@@ -1150,42 +1202,14 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
dma_addr_t iova, dma_mask = dma_get_mask(dev);
/*
- * If both the physical buffer start address and size are
- * page aligned, we don't need to use a bounce page.
+ * If both the physical buffer start address and size are page aligned,
+ * we don't need to use a bounce page.
*/
if (dev_use_swiotlb(dev, size, dir) &&
- iova_offset(iovad, phys | size)) {
- if (!is_swiotlb_active(dev)) {
- dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
- return DMA_MAPPING_ERROR;
- }
-
- trace_swiotlb_bounced(dev, phys, size);
-
- phys = swiotlb_tbl_map_single(dev, phys, size,
- iova_mask(iovad), dir, attrs);
-
- if (phys == DMA_MAPPING_ERROR)
+ iova_unaligned(iovad, phys, size)) {
+ phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
+ if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
-
- /*
- * Untrusted devices should not see padding areas with random
- * leftover kernel data, so zero the pre- and post-padding.
- * swiotlb_tbl_map_single() has initialized the bounce buffer
- * proper to the contents of the original memory buffer.
- */
- if (dev_is_untrusted(dev)) {
- size_t start, virt = (size_t)phys_to_virt(phys);
-
- /* Pre-padding */
- start = iova_align_down(iovad, virt);
- memset((void *)start, 0, virt - start);
-
- /* Post-padding */
- start = virt + size;
- memset((void *)start, 0,
- iova_align(iovad, start) - start);
- }
}
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
@@ -1359,7 +1383,6 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
struct scatterlist *s, *prev = NULL;
int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
struct pci_p2pdma_map_state p2pdma_state = {};
- enum pci_p2pdma_map_type map;
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
@@ -1389,28 +1412,30 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
size_t s_length = s->length;
size_t pad_len = (mask - iova_len + 1) & mask;
- if (is_pci_p2pdma_page(sg_page(s))) {
- map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
- switch (map) {
- case PCI_P2PDMA_MAP_BUS_ADDR:
- /*
- * iommu_map_sg() will skip this segment as
- * it is marked as a bus address,
- * __finalise_sg() will copy the dma address
- * into the output segment.
- */
- continue;
- case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
- /*
- * Mapping through host bridge should be
- * mapped with regular IOVAs, thus we
- * do nothing here and continue below.
- */
- break;
- default:
- ret = -EREMOTEIO;
- goto out_restore_sg;
- }
+ switch (pci_p2pdma_state(&p2pdma_state, dev, sg_page(s))) {
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ /*
+ * Mapping through host bridge should be mapped with
+ * regular IOVAs, thus we do nothing here and continue
+ * below.
+ */
+ break;
+ case PCI_P2PDMA_MAP_NONE:
+ break;
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ /*
+ * iommu_map_sg() will skip this segment as it is marked
+ * as a bus address, __finalise_sg() will copy the dma
+ * address into the output segment.
+ */
+ s->dma_address = pci_p2pdma_bus_addr_map(&p2pdma_state,
+ sg_phys(s));
+ sg_dma_len(s) = sg->length;
+ sg_dma_mark_bus_address(s);
+ continue;
+ default:
+ ret = -EREMOTEIO;
+ goto out_restore_sg;
}
sg_dma_address(s) = s_iova_off;
@@ -1721,6 +1746,354 @@ size_t iommu_dma_max_mapping_size(struct device *dev)
return SIZE_MAX;
}
+/**
+ * dma_iova_try_alloc - Try to allocate an IOVA space
+ * @dev: Device to allocate the IOVA space for
+ * @state: IOVA state
+ * @phys: physical address
+ * @size: IOVA size
+ *
+ * Check if @dev supports the IOVA-based DMA API, and if yes allocate IOVA space
+ * for the given base address and size.
+ *
+ * Note: @phys is only used to calculate the IOVA alignment. Callers that always
+ * do PAGE_SIZE aligned transfers can safely pass 0 here.
+ *
+ * Returns %true if the IOVA-based DMA API can be used and IOVA space has been
+ * allocated, or %false if the regular DMA API should be used.
+ */
+bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t size)
+{
+ struct iommu_dma_cookie *cookie;
+ struct iommu_domain *domain;
+ struct iova_domain *iovad;
+ size_t iova_off;
+ dma_addr_t addr;
+
+ memset(state, 0, sizeof(*state));
+ if (!use_dma_iommu(dev))
+ return false;
+
+ domain = iommu_get_dma_domain(dev);
+ cookie = domain->iova_cookie;
+ iovad = &cookie->iovad;
+ iova_off = iova_offset(iovad, phys);
+
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_deferred_attach(dev, iommu_get_domain_for_dev(dev)))
+ return false;
+
+ if (WARN_ON_ONCE(!size))
+ return false;
+
+ /*
+ * DMA_IOVA_USE_SWIOTLB is flag which is set by dma-iommu
+ * internals, make sure that caller didn't set it and/or
+ * didn't use this interface to map SIZE_MAX.
+ */
+ if (WARN_ON_ONCE((u64)size & DMA_IOVA_USE_SWIOTLB))
+ return false;
+
+ addr = iommu_dma_alloc_iova(domain,
+ iova_align(iovad, size + iova_off),
+ dma_get_mask(dev), dev);
+ if (!addr)
+ return false;
+
+ state->addr = addr + iova_off;
+ state->__size = size;
+ return true;
+}
+EXPORT_SYMBOL_GPL(dma_iova_try_alloc);
+
+/**
+ * dma_iova_free - Free an IOVA space
+ * @dev: Device to free the IOVA space for
+ * @state: IOVA state
+ *
+ * Undoes a successful dma_try_iova_alloc().
+ *
+ * Note that all dma_iova_link() calls need to be undone first. For callers
+ * that never call dma_iova_unlink(), dma_iova_destroy() can be used instead
+ * which unlinks all ranges and frees the IOVA space in a single efficient
+ * operation.
+ */
+void dma_iova_free(struct device *dev, struct dma_iova_state *state)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_start_pad = iova_offset(iovad, state->addr);
+ size_t size = dma_iova_size(state);
+
+ iommu_dma_free_iova(domain, state->addr - iova_start_pad,
+ iova_align(iovad, size + iova_start_pad), NULL);
+}
+EXPORT_SYMBOL_GPL(dma_iova_free);
+
+static int __dma_iova_link(struct device *dev, dma_addr_t addr,
+ phys_addr_t phys, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ bool coherent = dev_is_dma_coherent(dev);
+
+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(phys, size, dir);
+
+ return iommu_map_nosync(iommu_get_dma_domain(dev), addr, phys, size,
+ dma_info_to_prot(dir, coherent, attrs), GFP_ATOMIC);
+}
+
+static int iommu_dma_iova_bounce_and_link(struct device *dev, dma_addr_t addr,
+ phys_addr_t phys, size_t bounce_len,
+ enum dma_data_direction dir, unsigned long attrs,
+ size_t iova_start_pad)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iova_domain *iovad = &domain->iova_cookie->iovad;
+ phys_addr_t bounce_phys;
+ int error;
+
+ bounce_phys = iommu_dma_map_swiotlb(dev, phys, bounce_len, dir, attrs);
+ if (bounce_phys == DMA_MAPPING_ERROR)
+ return -ENOMEM;
+
+ error = __dma_iova_link(dev, addr - iova_start_pad,
+ bounce_phys - iova_start_pad,
+ iova_align(iovad, bounce_len), dir, attrs);
+ if (error)
+ swiotlb_tbl_unmap_single(dev, bounce_phys, bounce_len, dir,
+ attrs);
+ return error;
+}
+
+static int iommu_dma_iova_link_swiotlb(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t offset,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_start_pad = iova_offset(iovad, phys);
+ size_t iova_end_pad = iova_offset(iovad, phys + size);
+ dma_addr_t addr = state->addr + offset;
+ size_t mapped = 0;
+ int error;
+
+ if (iova_start_pad) {
+ size_t bounce_len = min(size, iovad->granule - iova_start_pad);
+
+ error = iommu_dma_iova_bounce_and_link(dev, addr, phys,
+ bounce_len, dir, attrs, iova_start_pad);
+ if (error)
+ return error;
+ state->__size |= DMA_IOVA_USE_SWIOTLB;
+
+ mapped += bounce_len;
+ size -= bounce_len;
+ if (!size)
+ return 0;
+ }
+
+ size -= iova_end_pad;
+ error = __dma_iova_link(dev, addr + mapped, phys + mapped, size, dir,
+ attrs);
+ if (error)
+ goto out_unmap;
+ mapped += size;
+
+ if (iova_end_pad) {
+ error = iommu_dma_iova_bounce_and_link(dev, addr + mapped,
+ phys + mapped, iova_end_pad, dir, attrs, 0);
+ if (error)
+ goto out_unmap;
+ state->__size |= DMA_IOVA_USE_SWIOTLB;
+ }
+
+ return 0;
+
+out_unmap:
+ dma_iova_unlink(dev, state, 0, mapped, dir, attrs);
+ return error;
+}
+
+/**
+ * dma_iova_link - Link a range of IOVA space
+ * @dev: DMA device
+ * @state: IOVA state
+ * @phys: physical address to link
+ * @offset: offset into the IOVA state to map into
+ * @size: size of the buffer
+ * @dir: DMA direction
+ * @attrs: attributes of mapping properties
+ *
+ * Link a range of IOVA space for the given IOVA state without IOTLB sync.
+ * This function is used to link multiple physical addresses in contiguous
+ * IOVA space without performing costly IOTLB sync.
+ *
+ * The caller is responsible to call to dma_iova_sync() to sync IOTLB at
+ * the end of linkage.
+ */
+int dma_iova_link(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_start_pad = iova_offset(iovad, phys);
+
+ if (WARN_ON_ONCE(iova_start_pad && offset > 0))
+ return -EIO;
+
+ if (dev_use_swiotlb(dev, size, dir) &&
+ iova_unaligned(iovad, phys, size))
+ return iommu_dma_iova_link_swiotlb(dev, state, phys, offset,
+ size, dir, attrs);
+
+ return __dma_iova_link(dev, state->addr + offset - iova_start_pad,
+ phys - iova_start_pad,
+ iova_align(iovad, size + iova_start_pad), dir, attrs);
+}
+EXPORT_SYMBOL_GPL(dma_iova_link);
+
+/**
+ * dma_iova_sync - Sync IOTLB
+ * @dev: DMA device
+ * @state: IOVA state
+ * @offset: offset into the IOVA state to sync
+ * @size: size of the buffer
+ *
+ * Sync IOTLB for the given IOVA state. This function should be called on
+ * the IOVA-contiguous range created by one ore more dma_iova_link() calls
+ * to sync the IOTLB.
+ */
+int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ dma_addr_t addr = state->addr + offset;
+ size_t iova_start_pad = iova_offset(iovad, addr);
+
+ return iommu_sync_map(domain, addr - iova_start_pad,
+ iova_align(iovad, size + iova_start_pad));
+}
+EXPORT_SYMBOL_GPL(dma_iova_sync);
+
+static void iommu_dma_iova_unlink_range_slow(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_start_pad = iova_offset(iovad, addr);
+ dma_addr_t end = addr + size;
+
+ do {
+ phys_addr_t phys;
+ size_t len;
+
+ phys = iommu_iova_to_phys(domain, addr);
+ if (WARN_ON(!phys))
+ /* Something very horrible happen here */
+ return;
+
+ len = min_t(size_t,
+ end - addr, iovad->granule - iova_start_pad);
+
+ if (!dev_is_dma_coherent(dev) &&
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_cpu(phys, len, dir);
+
+ swiotlb_tbl_unmap_single(dev, phys, len, dir, attrs);
+
+ addr += len;
+ iova_start_pad = 0;
+ } while (addr < end);
+}
+
+static void __iommu_dma_iova_unlink(struct device *dev,
+ struct dma_iova_state *state, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs,
+ bool free_iova)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ dma_addr_t addr = state->addr + offset;
+ size_t iova_start_pad = iova_offset(iovad, addr);
+ struct iommu_iotlb_gather iotlb_gather;
+ size_t unmapped;
+
+ if ((state->__size & DMA_IOVA_USE_SWIOTLB) ||
+ (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)))
+ iommu_dma_iova_unlink_range_slow(dev, addr, size, dir, attrs);
+
+ iommu_iotlb_gather_init(&iotlb_gather);
+ iotlb_gather.queued = free_iova && READ_ONCE(cookie->fq_domain);
+
+ size = iova_align(iovad, size + iova_start_pad);
+ addr -= iova_start_pad;
+ unmapped = iommu_unmap_fast(domain, addr, size, &iotlb_gather);
+ WARN_ON(unmapped != size);
+
+ if (!iotlb_gather.queued)
+ iommu_iotlb_sync(domain, &iotlb_gather);
+ if (free_iova)
+ iommu_dma_free_iova(domain, addr, size, &iotlb_gather);
+}
+
+/**
+ * dma_iova_unlink - Unlink a range of IOVA space
+ * @dev: DMA device
+ * @state: IOVA state
+ * @offset: offset into the IOVA state to unlink
+ * @size: size of the buffer
+ * @dir: DMA direction
+ * @attrs: attributes of mapping properties
+ *
+ * Unlink a range of IOVA space for the given IOVA state.
+ */
+void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ __iommu_dma_iova_unlink(dev, state, offset, size, dir, attrs, false);
+}
+EXPORT_SYMBOL_GPL(dma_iova_unlink);
+
+/**
+ * dma_iova_destroy - Finish a DMA mapping transaction
+ * @dev: DMA device
+ * @state: IOVA state
+ * @mapped_len: number of bytes to unmap
+ * @dir: DMA direction
+ * @attrs: attributes of mapping properties
+ *
+ * Unlink the IOVA range up to @mapped_len and free the entire IOVA space. The
+ * range of IOVA from dma_addr to @mapped_len must all be linked, and be the
+ * only linked IOVA in state.
+ */
+void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
+ size_t mapped_len, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ if (mapped_len)
+ __iommu_dma_iova_unlink(dev, state, 0, mapped_len, dir, attrs,
+ true);
+ else
+ /*
+ * We can be here if first call to dma_iova_link() failed and
+ * there is nothing to unlink, so let's be more clear.
+ */
+ dma_iova_free(dev, state);
+}
+EXPORT_SYMBOL_GPL(dma_iova_destroy);
+
void iommu_setup_dma_ops(struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
@@ -1754,7 +2127,7 @@ static size_t cookie_msi_granule(const struct iommu_domain *domain)
return PAGE_SIZE;
default:
BUG();
- };
+ }
}
static struct list_head *cookie_msi_pages(const struct iommu_domain *domain)
@@ -1766,7 +2139,7 @@ static struct list_head *cookie_msi_pages(const struct iommu_domain *domain)
return &domain->msi_cookie->msi_page_list;
default:
BUG();
- };
+ }
}
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 69e23e017d9e..fcb6a0f7c082 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -832,7 +832,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
- if (&data->domain->domain != &exynos_identity_domain) {
+ if (data->domain) {
dev_dbg(data->sysmmu, "saving state\n");
__sysmmu_disable(data);
}
@@ -850,7 +850,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
- if (&data->domain->domain != &exynos_identity_domain) {
+ if (data->domain) {
dev_dbg(data->sysmmu, "restoring state\n");
__sysmmu_enable(data);
}
@@ -902,11 +902,11 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
if (!domain)
return NULL;
- domain->pgtable = iommu_alloc_pages(GFP_KERNEL, 2);
+ domain->pgtable = iommu_alloc_pages_sz(GFP_KERNEL, SZ_16K);
if (!domain->pgtable)
goto err_pgtable;
- domain->lv2entcnt = iommu_alloc_pages(GFP_KERNEL, 1);
+ domain->lv2entcnt = iommu_alloc_pages_sz(GFP_KERNEL, SZ_8K);
if (!domain->lv2entcnt)
goto err_counter;
@@ -932,9 +932,9 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
return &domain->domain;
err_lv2ent:
- iommu_free_pages(domain->lv2entcnt, 1);
+ iommu_free_pages(domain->lv2entcnt);
err_counter:
- iommu_free_pages(domain->pgtable, 2);
+ iommu_free_pages(domain->pgtable);
err_pgtable:
kfree(domain);
return NULL;
@@ -975,8 +975,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
phys_to_virt(base));
}
- iommu_free_pages(domain->pgtable, 2);
- iommu_free_pages(domain->lv2entcnt, 1);
+ iommu_free_pages(domain->pgtable);
+ iommu_free_pages(domain->lv2entcnt);
kfree(domain);
}
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 30be786bff11..5f08523f97cb 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -64,7 +64,7 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
spin_lock_irqsave(&iommu_lock, flags);
ret = pamu_update_paace_stash(liodn, val);
if (ret) {
- pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
+ pr_debug("Failed to update SPAACE for liodn %d\n", liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
return ret;
}
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index 6c7528130cf9..ada651c4a01b 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DMAR_TABLE) += dmar.o
-obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o prq.o
-obj-$(CONFIG_DMAR_TABLE) += trace.o
+obj-y += iommu.o pasid.o nested.o cache.o prq.o
+obj-$(CONFIG_DMAR_TABLE) += dmar.o trace.o
obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
-ifdef CONFIG_INTEL_IOMMU
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
-endif
obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index e540092d664d..b61d9ea27aa9 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1099,6 +1099,9 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
spin_lock_init(&iommu->device_rbtree_lock);
mutex_init(&iommu->iopf_lock);
iommu->node = NUMA_NO_NODE;
+ spin_lock_init(&iommu->lock);
+ ida_init(&iommu->domain_ida);
+ mutex_init(&iommu->did_lock);
ver = readl(iommu->reg + DMAR_VER_REG);
pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
@@ -1187,7 +1190,7 @@ static void free_iommu(struct intel_iommu *iommu)
}
if (iommu->qi) {
- iommu_free_page(iommu->qi->desc);
+ iommu_free_pages(iommu->qi->desc);
kfree(iommu->qi->desc_status);
kfree(iommu->qi);
}
@@ -1195,6 +1198,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);
+ ida_destroy(&iommu->domain_ida);
ida_free(&dmar_seq_ids, iommu->seq_id);
kfree(iommu);
}
@@ -1681,7 +1685,6 @@ int dmar_enable_qi(struct intel_iommu *iommu)
{
struct q_inval *qi;
void *desc;
- int order;
if (!ecap_qis(iommu->ecap))
return -ENOENT;
@@ -1702,8 +1705,9 @@ int dmar_enable_qi(struct intel_iommu *iommu)
* Need two pages to accommodate 256 descriptors of 256 bits each
* if the remapping hardware supports scalable mode translation.
*/
- order = ecap_smts(iommu->ecap) ? 1 : 0;
- desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order);
+ desc = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
+ ecap_smts(iommu->ecap) ? SZ_8K :
+ SZ_4K);
if (!desc) {
kfree(qi);
iommu->qi = NULL;
@@ -1714,7 +1718,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
if (!qi->desc_status) {
- iommu_free_page(qi->desc);
+ iommu_free_pages(qi->desc);
kfree(qi);
iommu->qi = NULL;
return -ENOMEM;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 6e67cc66a204..7aa3932251b2 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c